diff --git a/builds/any/rootfs/jessie/common/all-base-packages.yml b/builds/any/rootfs/jessie/common/all-base-packages.yml index a3835b56..df5454a8 100644 --- a/builds/any/rootfs/jessie/common/all-base-packages.yml +++ b/builds/any/rootfs/jessie/common/all-base-packages.yml @@ -4,7 +4,6 @@ # ############################################################ - base-files -- sysvinit-core - locales - python - apt diff --git a/builds/any/rootfs/jessie/standard/standard.yml b/builds/any/rootfs/jessie/standard/standard.yml index d5390fb0..2f1d3e24 100644 --- a/builds/any/rootfs/jessie/standard/standard.yml +++ b/builds/any/rootfs/jessie/standard/standard.yml @@ -11,6 +11,7 @@ variables: !include $ONL/make/versions/version-onl.yml Packages: &Packages + - !script $ONL/tools/onl-init-pkgs.py ${INIT} - !include $ONL/builds/any/rootfs/$ONL_DEBIAN_SUITE/common/all-base-packages.yml - !include $ONL/builds/any/rootfs/$ONL_DEBIAN_SUITE/common/${ARCH}-base-packages.yml - !include $ONL/builds/any/rootfs/$ONL_DEBIAN_SUITE/common/${ARCH}-onl-packages.yml diff --git a/builds/any/rootfs/stretch/common/all-base-packages.yml b/builds/any/rootfs/stretch/common/all-base-packages.yml index 9c5db8ff..10b69061 100644 --- a/builds/any/rootfs/stretch/common/all-base-packages.yml +++ b/builds/any/rootfs/stretch/common/all-base-packages.yml @@ -4,7 +4,6 @@ # ############################################################ - base-files -- sysvinit-core - locales - python - apt diff --git a/builds/any/rootfs/stretch/standard/standard.yml b/builds/any/rootfs/stretch/standard/standard.yml index 275b1ce7..6adb7f1c 100644 --- a/builds/any/rootfs/stretch/standard/standard.yml +++ b/builds/any/rootfs/stretch/standard/standard.yml @@ -11,6 +11,7 @@ variables: !include $ONL/make/versions/version-onl.yml Packages: &Packages + - !script $ONL/tools/onl-init-pkgs.py ${INIT} - !include $ONL/builds/any/rootfs/$ONL_DEBIAN_SUITE/common/all-base-packages.yml - !include $ONL/builds/any/rootfs/$ONL_DEBIAN_SUITE/common/${ARCH}-base-packages.yml - !include $ONL/builds/any/rootfs/$ONL_DEBIAN_SUITE/common/${ARCH}-onl-packages.yml diff --git a/docs/Building.md b/docs/Building.md index 9261449e..c4cae980 100644 --- a/docs/Building.md +++ b/docs/Building.md @@ -1,4 +1,5 @@ -#How to Build Open Network Linux +How to Build Open Network Linux +============================================================ In case you are not interested in building ONL from scratch (it takes a while) you can download pre-compiled binaries from @@ -55,7 +56,8 @@ and the SWI files (if you want them) are in -#Installing Docker Gotchas +Installing Docker Gotchas +------------------------------------------------------------ Docker installer oneliner (for reference: see docker.com for details) @@ -78,7 +80,7 @@ Consider enabling builds for non-privileged users with: - If you run as non-root without this, you will get errors like `..: dial unix /var/run/docker.sock: permission denied` - Building as root is fine as well (it immediately jumps into a root build shell), so this optional -#Additional Build Details +Additional Build Details ---------------------------------------------------------- The rest of this guide talks about how to build specific diff --git a/make/config.mk b/make/config.mk index c5e02fc4..b7ffb5c0 100644 --- a/make/config.mk +++ b/make/config.mk @@ -26,6 +26,10 @@ export ONL_DEBIAN_SUITE_$(ONL_DEBIAN_SUITE)=1 export BUILD_DIR_BASE=BUILD/$(ONL_DEBIAN_SUITE) +# init system options sysvinit, systemd. default is sysvinit +ifndef INIT + export INIT := sysvinit +endif # Use the new module database tool to resolve dependencies dynamically. ifndef BUILDER_MODULE_DATABASE diff --git a/packages/base/all/initrds/loader-initrd-files/src/bin/switchroot b/packages/base/all/initrds/loader-initrd-files/src/bin/switchroot index 16529007..97709a4a 100644 --- a/packages/base/all/initrds/loader-initrd-files/src/bin/switchroot +++ b/packages/base/all/initrds/loader-initrd-files/src/bin/switchroot @@ -62,6 +62,8 @@ mount --move /dev /newroot/dev # Switch to /newroot if possible, else re-execute /init if [ -x /newroot/sbin/init ]; then exec switch_root -c /dev/console /newroot /sbin/init +elif [ -x /newroot/lib/systemd/systemd ]; then + exec switch_root -c /dev/console /newroot /lib/systemd/systemd else exec /init fi diff --git a/packages/base/all/vendor-config-onl/src/lib/platform-config-defaults-uboot.yml b/packages/base/all/vendor-config-onl/src/lib/platform-config-defaults-uboot.yml index 23953ddd..e187cbe3 100644 --- a/packages/base/all/vendor-config-onl/src/lib/platform-config-defaults-uboot.yml +++ b/packages/base/all/vendor-config-onl/src/lib/platform-config-defaults-uboot.yml @@ -46,6 +46,13 @@ default: =: kernel-3.2-lts-arm-iproc-all.bin.gz <<: *arm-iproc-kernel-package + arm-iproc-kernel-4-4-package: &arm-iproc-kernel-4-4-package + package: onl-kernel-4.4-lts-arm-iproc-all:armel + + arm-iproc-kernel-4-4: &arm-iproc-kernel-4-4 + =: kernel-4.4-lts-arm-iproc-all.bin.gz + <<: *arm-iproc-kernel-4-4-package + arm64-kernel-package: &arm64-kernel-package package: onl-kernel-4.9-lts-arm64-all:arm64 diff --git a/packages/base/any/kernels/3.16-lts/configs/powerpc-e500v-all/.gitignore b/packages/base/any/kernels/3.16-lts/configs/powerpc-e500v-all/.gitignore index 5dbdc5b9..0f5e41e7 100644 --- a/packages/base/any/kernels/3.16-lts/configs/powerpc-e500v-all/.gitignore +++ b/packages/base/any/kernels/3.16-lts/configs/powerpc-e500v-all/.gitignore @@ -1,3 +1,3 @@ kernel-3.16* linux-* - +lib/ diff --git a/packages/base/any/kernels/3.16-lts/configs/powerpc-e500v-all/powerpc-e500v-all.config b/packages/base/any/kernels/3.16-lts/configs/powerpc-e500v-all/powerpc-e500v-all.config index 2a9d8ae8..2c070d7f 100644 --- a/packages/base/any/kernels/3.16-lts/configs/powerpc-e500v-all/powerpc-e500v-all.config +++ b/packages/base/any/kernels/3.16-lts/configs/powerpc-e500v-all/powerpc-e500v-all.config @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/powerpc 3.16.53 Kernel Configuration +# Linux/powerpc 3.16.56 Kernel Configuration # # CONFIG_PPC64 is not set diff --git a/packages/base/any/kernels/3.16-lts/configs/x86_64-all/.gitignore b/packages/base/any/kernels/3.16-lts/configs/x86_64-all/.gitignore index 5dbdc5b9..0f5e41e7 100644 --- a/packages/base/any/kernels/3.16-lts/configs/x86_64-all/.gitignore +++ b/packages/base/any/kernels/3.16-lts/configs/x86_64-all/.gitignore @@ -1,3 +1,3 @@ kernel-3.16* linux-* - +lib/ diff --git a/packages/base/any/kernels/3.16-lts/configs/x86_64-all/x86_64-all.config b/packages/base/any/kernels/3.16-lts/configs/x86_64-all/x86_64-all.config index c360583d..b4a83cc6 100644 --- a/packages/base/any/kernels/3.16-lts/configs/x86_64-all/x86_64-all.config +++ b/packages/base/any/kernels/3.16-lts/configs/x86_64-all/x86_64-all.config @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/x86_64 3.16.53 Kernel Configuration +# Linux/x86_64 3.16.56 Kernel Configuration # CONFIG_64BIT=y CONFIG_X86_64=y @@ -354,6 +354,7 @@ CONFIG_FREEZER=y CONFIG_ZONE_DMA=y CONFIG_SMP=y CONFIG_X86_MPPARSE=y +CONFIG_RETPOLINE=y # CONFIG_X86_EXTENDED_PLATFORM is not set # CONFIG_X86_INTEL_LPSS is not set CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y @@ -1119,6 +1120,7 @@ CONFIG_FW_LOADER_USER_HELPER=y # CONFIG_SYS_HYPERVISOR is not set # CONFIG_GENERIC_CPU_DEVICES is not set CONFIG_GENERIC_CPU_AUTOPROBE=y +CONFIG_GENERIC_CPU_VULNERABILITIES=y # CONFIG_DMA_SHARED_BUFFER is not set # diff --git a/packages/base/any/kernels/3.16-lts/kconfig.mk b/packages/base/any/kernels/3.16-lts/kconfig.mk index 290135c9..210f15a8 100644 --- a/packages/base/any/kernels/3.16-lts/kconfig.mk +++ b/packages/base/any/kernels/3.16-lts/kconfig.mk @@ -25,6 +25,6 @@ THIS_DIR := $(abspath $(dir $(lastword $(MAKEFILE_LIST)))) K_MAJOR_VERSION := 3 K_PATCH_LEVEL := 16 -K_SUB_LEVEL := 53 +K_SUB_LEVEL := 56 K_SUFFIX := K_PATCH_DIR := $(THIS_DIR)/patches diff --git a/packages/base/any/kernels/3.16-lts/patches/driver-ixgbe-version-5.2.4.patch b/packages/base/any/kernels/3.16-lts/patches/driver-ixgbe-version-5.2.4.patch new file mode 100644 index 00000000..aa66e02c --- /dev/null +++ b/packages/base/any/kernels/3.16-lts/patches/driver-ixgbe-version-5.2.4.patch @@ -0,0 +1,65204 @@ +From 54268c6d6567bbaa557983faaa50b6cbe0876040 Mon Sep 17 00:00:00 2001 +From: fengkm +Date: Mon, 30 Oct 2017 15:27:15 +0800 +Subject: [PATCH] driver ixgbe version 5.2.4 + +--- + drivers/net/ethernet/intel/ixgbe/Makefile | 48 +- + drivers/net/ethernet/intel/ixgbe/ixgbe.h | 1128 ++- + drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c | 501 +- + drivers/net/ethernet/intel/ixgbe/ixgbe_82598.h | 43 + + drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c | 1553 ++-- + drivers/net/ethernet/intel/ixgbe/ixgbe_82599.h | 55 + + drivers/net/ethernet/intel/ixgbe/ixgbe_api.c | 1624 ++++ + drivers/net/ethernet/intel/ixgbe/ixgbe_api.h | 213 + + drivers/net/ethernet/intel/ixgbe/ixgbe_cna.c | 168 + + drivers/net/ethernet/intel/ixgbe/ixgbe_cna.h | 31 + + drivers/net/ethernet/intel/ixgbe/ixgbe_common.c | 2960 +++++-- + drivers/net/ethernet/intel/ixgbe/ixgbe_common.h | 171 +- + drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c | 602 +- + drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h | 228 +- + drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c | 176 +- + drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h | 137 +- + drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c | 473 +- + drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h | 139 +- + drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c | 380 +- + drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c | 25 +- + drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c | 2406 ++++-- + drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c | 350 +- + drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h | 32 +- + drivers/net/ethernet/intel/ixgbe/ixgbe_hv_vf.c | 210 + + drivers/net/ethernet/intel/ixgbe/ixgbe_hv_vf.h | 51 + + drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c | 636 +- + drivers/net/ethernet/intel/ixgbe/ixgbe_main.c | 8418 ++++++++++++++------ + drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c | 389 +- + drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h | 126 +- + drivers/net/ethernet/intel/ixgbe/ixgbe_osdep.h | 200 + + drivers/net/ethernet/intel/ixgbe/ixgbe_osdep2.h | 68 + + drivers/net/ethernet/intel/ixgbe/ixgbe_param.c | 1256 +++ + drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c | 1606 ++-- + drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h | 157 +- + drivers/net/ethernet/intel/ixgbe/ixgbe_procfs.c | 938 +++ + drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c | 964 ++- + drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c | 1178 ++- + drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h | 69 +- + drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c | 129 +- + drivers/net/ethernet/intel/ixgbe/ixgbe_type.h | 5427 ++++++++----- + drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c | 861 +- + drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h | 58 + + drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c | 4707 +++++++++++ + drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h | 115 + + drivers/net/ethernet/intel/ixgbe/kcompat.c | 2375 ++++++ + drivers/net/ethernet/intel/ixgbe/kcompat.h | 5610 +++++++++++++ + drivers/net/ethernet/intel/ixgbe/kcompat_ethtool.c | 1169 +++ + 47 files changed, 39675 insertions(+), 10485 deletions(-) + create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_82598.h + create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_82599.h + create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_api.c + create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_api.h + create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_cna.c + create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_cna.h + create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_hv_vf.c + create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_hv_vf.h + create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_osdep.h + create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_osdep2.h + create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_param.c + create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_procfs.c + create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h + create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c + create mode 100644 drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h + create mode 100644 drivers/net/ethernet/intel/ixgbe/kcompat.c + create mode 100644 drivers/net/ethernet/intel/ixgbe/kcompat.h + create mode 100644 drivers/net/ethernet/intel/ixgbe/kcompat_ethtool.c + +diff --git a/drivers/net/ethernet/intel/ixgbe/Makefile b/drivers/net/ethernet/intel/ixgbe/Makefile +index be2989e..e53eb72 100644 +--- a/drivers/net/ethernet/intel/ixgbe/Makefile ++++ b/drivers/net/ethernet/intel/ixgbe/Makefile +@@ -1,7 +1,7 @@ + ################################################################################ + # +-# Intel 10 Gigabit PCI Express Linux driver +-# Copyright(c) 1999 - 2013 Intel Corporation. ++# Intel(R) 10GbE PCI Express Linux Network Driver ++# Copyright(c) 1999 - 2017 Intel Corporation. + # + # This program is free software; you can redistribute it and/or modify it + # under the terms and conditions of the GNU General Public License, +@@ -12,10 +12,6 @@ + # FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + # more details. + # +-# You should have received a copy of the GNU General Public License along with +-# this program; if not, write to the Free Software Foundation, Inc., +-# 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +-# + # The full GNU General Public License is included in this distribution in + # the file called "COPYING". + # +@@ -27,18 +23,40 @@ + ################################################################################ + + # +-# Makefile for the Intel(R) 10GbE PCI Express ethernet driver ++# Makefile for the Intel(R) 10GbE PCI Express Linux Network Driver + # + + obj-$(CONFIG_IXGBE) += ixgbe.o + +-ixgbe-objs := ixgbe_main.o ixgbe_common.o ixgbe_ethtool.o \ +- ixgbe_82599.o ixgbe_82598.o ixgbe_phy.o ixgbe_sriov.o \ +- ixgbe_mbx.o ixgbe_x540.o ixgbe_lib.o ixgbe_ptp.o ++define ixgbe-y ++ ixgbe_main.o ++ ixgbe_api.o ++ ixgbe_common.o ++ ixgbe_dcb.o ++ ixgbe_dcb_82598.o ++ ixgbe_dcb_82599.o ++ ixgbe_ethtool.o ++ ixgbe_lib.o ++ ixgbe_mbx.o ++ ixgbe_sriov.o ++ ixgbe_param.o ++ ixgbe_phy.o ++ ixgbe_procfs.o ++ ixgbe_82598.o ++ ixgbe_82599.o ++ ixgbe_x540.o ++ ixgbe_x550.o ++endef ++ixgbe-y := $(strip ${ixgbe-y}) ++ ++ixgbe-${CONFIG_DCB} += ixgbe_dcb_nl.o ++ ++ixgbe-${CONFIG_DEBUG_FS} += ixgbe_debugfs.o ++ ++ixgbe-${CONFIG_FCOE:m=y} += ixgbe_fcoe.o ++ ++ixgbe-$(CONFIG_PTP_1588_CLOCK:m=y) += ixgbe_ptp.o + +-ixgbe-$(CONFIG_IXGBE_DCB) += ixgbe_dcb.o ixgbe_dcb_82598.o \ +- ixgbe_dcb_82599.o ixgbe_dcb_nl.o ++ixgbe-${CONFIG_SYSFS} += ixgbe_sysfs.o + +-ixgbe-$(CONFIG_IXGBE_HWMON) += ixgbe_sysfs.o +-ixgbe-$(CONFIG_DEBUG_FS) += ixgbe_debugfs.o +-ixgbe-$(CONFIG_FCOE:m=y) += ixgbe_fcoe.o ++ixgbe-y += kcompat.o +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe.h +index ac9f214..33be88c 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe.h ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe.h +@@ -1,7 +1,7 @@ + /******************************************************************************* + +- Intel 10 Gigabit PCI Express Linux driver +- Copyright(c) 1999 - 2013 Intel Corporation. ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, +@@ -12,10 +12,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +@@ -29,68 +25,151 @@ + #ifndef _IXGBE_H_ + #define _IXGBE_H_ + +-#include +-#include ++#include ++ + #include + #include +-#include +-#include +-#include +-#include +- +-#include +-#include +-#include ++#include + +-#include "ixgbe_type.h" +-#include "ixgbe_common.h" +-#include "ixgbe_dcb.h" +-#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) +-#define IXGBE_FCOE +-#include "ixgbe_fcoe.h" +-#endif /* CONFIG_FCOE or CONFIG_FCOE_MODULE */ +-#ifdef CONFIG_IXGBE_DCA ++#ifdef SIOCETHTOOL ++#include ++#endif ++#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) ++#include ++#endif ++/* Can't use IS_ENABLED until after kcompat is loaded */ ++#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) ++#define IXGBE_DCA + #include + #endif ++#include "ixgbe_dcb.h" + +-#include ++#include "kcompat.h" + + #ifdef CONFIG_NET_RX_BUSY_POLL ++#include ++#ifdef HAVE_NDO_BUSY_POLL + #define BP_EXTENDED_STATS + #endif +-/* common prefix used by pr_<> macros */ +-#undef pr_fmt +-#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt ++#endif /* CONFIG_NET_RX_BUSY_POLL */ + +-/* TX/RX descriptor defines */ +-#define IXGBE_DEFAULT_TXD 512 +-#define IXGBE_DEFAULT_TX_WORK 256 +-#define IXGBE_MAX_TXD 4096 +-#define IXGBE_MIN_TXD 64 ++#ifdef HAVE_SCTP ++#include ++#endif + +-#if (PAGE_SIZE < 8192) +-#define IXGBE_DEFAULT_RXD 512 +-#else +-#define IXGBE_DEFAULT_RXD 128 ++#ifdef HAVE_INCLUDE_LINUX_MDIO_H ++#include ++#endif ++ ++#if IS_ENABLED(CONFIG_FCOE) ++#include "ixgbe_fcoe.h" ++#endif /* CONFIG_FCOE */ ++ ++#include "ixgbe_api.h" ++ ++#include "ixgbe_common.h" ++ ++#define PFX "ixgbe: " ++#define DPRINTK(nlevel, klevel, fmt, args...) \ ++ ((void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \ ++ printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \ ++ __func__ , ## args))) ++ ++#ifdef HAVE_PTP_1588_CLOCK ++#ifdef HAVE_INCLUDE_LINUX_TIMECOUNTER_H ++#include ++#endif /* HAVE_INCLUDE_TIMECOUNTER_H */ ++#include ++#include ++#include + #endif +-#define IXGBE_MAX_RXD 4096 +-#define IXGBE_MIN_RXD 64 ++ ++/* TX/RX descriptor defines */ ++#define IXGBE_DEFAULT_TXD 512 ++#define IXGBE_DEFAULT_TX_WORK 256 ++#define IXGBE_MAX_TXD 4096 ++#define IXGBE_MIN_TXD 64 ++ ++#define IXGBE_DEFAULT_RXD 512 ++#define IXGBE_DEFAULT_RX_WORK 256 ++#define IXGBE_MAX_RXD 4096 ++#define IXGBE_MIN_RXD 64 ++ ++#define IXGBE_ETH_P_LLDP 0x88CC + + /* flow control */ +-#define IXGBE_MIN_FCRTL 0x40 ++#define IXGBE_MIN_FCRTL 0x40 + #define IXGBE_MAX_FCRTL 0x7FF80 +-#define IXGBE_MIN_FCRTH 0x600 ++#define IXGBE_MIN_FCRTH 0x600 + #define IXGBE_MAX_FCRTH 0x7FFF0 +-#define IXGBE_DEFAULT_FCPAUSE 0xFFFF +-#define IXGBE_MIN_FCPAUSE 0 +-#define IXGBE_MAX_FCPAUSE 0xFFFF ++#define IXGBE_DEFAULT_FCPAUSE 0xFFFF ++#define IXGBE_MIN_FCPAUSE 0 ++#define IXGBE_MAX_FCPAUSE 0xFFFF + + /* Supported Rx Buffer Sizes */ +-#define IXGBE_RXBUFFER_256 256 /* Used for skb receive header */ +-#define IXGBE_RXBUFFER_2K 2048 +-#define IXGBE_RXBUFFER_3K 3072 +-#define IXGBE_RXBUFFER_4K 4096 +-#define IXGBE_MAX_RXBUFFER 16384 /* largest size for a single descriptor */ ++#define IXGBE_RXBUFFER_256 256 /* Used for skb receive header */ ++#define IXGBE_RXBUFFER_1536 1536 ++#define IXGBE_RXBUFFER_2K 2048 ++#define IXGBE_RXBUFFER_3K 3072 ++#define IXGBE_RXBUFFER_4K 4096 ++#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT ++#define IXGBE_RXBUFFER_7K 7168 ++#define IXGBE_RXBUFFER_8K 8192 ++#define IXGBE_RXBUFFER_15K 15360 ++#endif /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ ++#define IXGBE_MAX_RXBUFFER 16384 /* largest size for single descriptor */ ++ ++/* Attempt to maximize the headroom available for incoming frames. We ++ * use a 2K buffer for receives and need 1536/1534 to store the data for ++ * the frame. This leaves us with 512 bytes of room. From that we need ++ * to deduct the space needed for the shared info and the padding needed ++ * to IP align the frame. ++ * ++ * Note: For cache line sizes 256 or larger this value is going to end ++ * up negative. In these cases we should fall back to the 3K ++ * buffers. ++ */ ++#if (PAGE_SIZE < 8192) ++#define IXGBE_MAX_2K_FRAME_BUILD_SKB (IXGBE_RXBUFFER_1536 - NET_IP_ALIGN) ++#define IXGBE_2K_TOO_SMALL_WITH_PADDING \ ++((NET_SKB_PAD + IXGBE_RXBUFFER_1536) > SKB_WITH_OVERHEAD(IXGBE_RXBUFFER_2K)) ++ ++static inline int ixgbe_compute_pad(int rx_buf_len) ++{ ++ int page_size, pad_size; ++ ++ page_size = ALIGN(rx_buf_len, PAGE_SIZE / 2); ++ pad_size = SKB_WITH_OVERHEAD(page_size) - rx_buf_len; ++ ++ return pad_size; ++} ++ ++static inline int ixgbe_skb_pad(void) ++{ ++ int rx_buf_len; ++ ++ /* If a 2K buffer cannot handle a standard Ethernet frame then ++ * optimize padding for a 3K buffer instead of a 1.5K buffer. ++ * ++ * For a 3K buffer we need to add enough padding to allow for ++ * tailroom due to NET_IP_ALIGN possibly shifting us out of ++ * cache-line alignment. ++ */ ++ if (IXGBE_2K_TOO_SMALL_WITH_PADDING) ++ rx_buf_len = IXGBE_RXBUFFER_3K + SKB_DATA_ALIGN(NET_IP_ALIGN); ++ else ++ rx_buf_len = IXGBE_RXBUFFER_1536; ++ ++ /* if needed make room for NET_IP_ALIGN */ ++ rx_buf_len -= NET_IP_ALIGN; ++ ++ return ixgbe_compute_pad(rx_buf_len); ++} ++ ++#define IXGBE_SKB_PAD ixgbe_skb_pad() ++#else ++#define IXGBE_SKB_PAD (NET_SKB_PAD + NET_IP_ALIGN) ++#endif + + /* + * NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we +@@ -100,11 +179,27 @@ + * Since netdev_alloc_skb now allocates a page fragment we can use a value + * of 256 and the resultant skb will have a truesize of 960 or less. + */ +-#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256 ++#define IXGBE_RX_HDR_SIZE IXGBE_RXBUFFER_256 ++ ++#define MAXIMUM_ETHERNET_VLAN_SIZE (VLAN_ETH_FRAME_LEN + ETH_FCS_LEN) + + /* How many Rx Buffers do we bundle into one write to the hardware ? */ + #define IXGBE_RX_BUFFER_WRITE 16 /* Must be power of 2 */ + ++#ifdef HAVE_STRUCT_DMA_ATTRS ++#define IXGBE_RX_DMA_ATTR NULL ++#else ++#define IXGBE_RX_DMA_ATTR \ ++ (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING) ++#endif ++ ++/* assume the kernel supports 8021p to avoid stripping vlan tags */ ++#ifdef IXGBE_DISABLE_8021P_SUPPORT ++#ifndef HAVE_8021P_SUPPORT ++#define HAVE_8021P_SUPPORT ++#endif ++#endif /* IXGBE_DISABLE_8021P_SUPPORT */ ++ + enum ixgbe_tx_flags { + /* cmd_type flags */ + IXGBE_TX_FLAGS_HW_VLAN = 0x01, +@@ -124,31 +219,73 @@ enum ixgbe_tx_flags { + /* VLAN info */ + #define IXGBE_TX_FLAGS_VLAN_MASK 0xffff0000 + #define IXGBE_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000 +-#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29 ++#define IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT 29 + #define IXGBE_TX_FLAGS_VLAN_SHIFT 16 + +-#define IXGBE_MAX_VF_MC_ENTRIES 30 +-#define IXGBE_MAX_VF_FUNCTIONS 64 +-#define IXGBE_MAX_VFTA_ENTRIES 128 +-#define MAX_EMULATION_MAC_ADDRS 16 +-#define IXGBE_MAX_PF_MACVLANS 15 +-#define VMDQ_P(p) ((p) + adapter->ring_feature[RING_F_VMDQ].offset) +-#define IXGBE_82599_VF_DEVICE_ID 0x10ED +-#define IXGBE_X540_VF_DEVICE_ID 0x1515 ++#define IXGBE_MAX_RX_DESC_POLL 10 ++ ++#define IXGBE_MAX_VF_MC_ENTRIES 30 ++#define IXGBE_MAX_VF_FUNCTIONS 64 ++#define IXGBE_MAX_VFTA_ENTRIES 128 ++#define MAX_EMULATION_MAC_ADDRS 16 ++#define IXGBE_MAX_PF_MACVLANS 15 + ++/* must account for pools assigned to VFs. */ ++#ifdef CONFIG_PCI_IOV ++#define VMDQ_P(p) ((p) + adapter->ring_feature[RING_F_VMDQ].offset) ++#else ++#define VMDQ_P(p) (p) ++#endif ++ ++#define UPDATE_VF_COUNTER_32bit(reg, last_counter, counter) \ ++ { \ ++ u32 current_counter = IXGBE_READ_REG(hw, reg); \ ++ if (current_counter < last_counter) \ ++ counter += 0x100000000LL; \ ++ last_counter = current_counter; \ ++ counter &= 0xFFFFFFFF00000000LL; \ ++ counter |= current_counter; \ ++ } ++ ++#define UPDATE_VF_COUNTER_36bit(reg_lsb, reg_msb, last_counter, counter) \ ++ { \ ++ u64 current_counter_lsb = IXGBE_READ_REG(hw, reg_lsb); \ ++ u64 current_counter_msb = IXGBE_READ_REG(hw, reg_msb); \ ++ u64 current_counter = (current_counter_msb << 32) | \ ++ current_counter_lsb; \ ++ if (current_counter < last_counter) \ ++ counter += 0x1000000000LL; \ ++ last_counter = current_counter; \ ++ counter &= 0xFFFFFFF000000000LL; \ ++ counter |= current_counter; \ ++ } ++ ++struct vf_stats { ++ u64 gprc; ++ u64 gorc; ++ u64 gptc; ++ u64 gotc; ++ u64 mprc; ++}; + struct vf_data_storage { ++ struct pci_dev *vfdev; + unsigned char vf_mac_addresses[ETH_ALEN]; + u16 vf_mc_hashes[IXGBE_MAX_VF_MC_ENTRIES]; + u16 num_vf_mc_hashes; +- u16 default_vf_vlan_id; +- u16 vlans_enabled; + bool clear_to_send; ++ struct vf_stats vfstats; ++ struct vf_stats last_vfstats; ++ struct vf_stats saved_rst_vfstats; + bool pf_set_mac; + u16 pf_vlan; /* When set, guest VLAN config not allowed. */ + u16 pf_qos; + u16 tx_rate; +- u16 vlan_count; + u8 spoofchk_enabled; ++#ifdef HAVE_NDO_SET_VF_RSS_QUERY_EN ++ bool rss_query_enabled; ++#endif ++ u8 trusted; ++ int xcast_mode; + unsigned int vf_api; + }; + +@@ -164,8 +301,14 @@ struct vf_macvlans { + #define IXGBE_MAX_DATA_PER_TXD (1 << IXGBE_MAX_TXD_PWR) + + /* Tx Descriptors needed, worst case */ +-#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) +-#define DESC_NEEDED (MAX_SKB_FRAGS + 4) ++#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IXGBE_MAX_DATA_PER_TXD) ++#ifndef MAX_SKB_FRAGS ++#define DESC_NEEDED 4 ++#elif (MAX_SKB_FRAGS < 16) ++#define DESC_NEEDED ((MAX_SKB_FRAGS * TXD_USE_COUNT(PAGE_SIZE)) + 4) ++#else ++#define DESC_NEEDED (MAX_SKB_FRAGS + 4) ++#endif + + /* wrapper around a pointer to a socket buffer, + * so a DMA handle can be stored along with the buffer */ +@@ -184,8 +327,15 @@ struct ixgbe_tx_buffer { + struct ixgbe_rx_buffer { + struct sk_buff *skb; + dma_addr_t dma; ++#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT + struct page *page; +- unsigned int page_offset; ++#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536) ++ __u32 page_offset; ++#else ++ __u16 page_offset; ++#endif ++ __u16 pagecnt_bias; ++#endif + }; + + struct ixgbe_queue_stats { +@@ -213,25 +363,27 @@ struct ixgbe_rx_queue_stats { + u64 csum_err; + }; + ++#define IXGBE_TS_HDR_LEN 8 + enum ixgbe_ring_state_t { ++#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT ++ __IXGBE_RX_3K_BUFFER, ++ __IXGBE_RX_BUILD_SKB_ENABLED, ++#endif ++ __IXGBE_RX_RSC_ENABLED, ++ __IXGBE_RX_CSUM_UDP_ZERO_ERR, ++#if IS_ENABLED(CONFIG_FCOE) ++ __IXGBE_RX_FCOE, ++#endif + __IXGBE_TX_FDIR_INIT_DONE, + __IXGBE_TX_XPS_INIT_DONE, + __IXGBE_TX_DETECT_HANG, + __IXGBE_HANG_CHECK_ARMED, +- __IXGBE_RX_RSC_ENABLED, +- __IXGBE_RX_CSUM_UDP_ZERO_ERR, +- __IXGBE_RX_FCOE, +-}; +- +-struct ixgbe_fwd_adapter { +- unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; +- struct net_device *netdev; +- struct ixgbe_adapter *real_adapter; +- unsigned int tx_base_queue; +- unsigned int rx_base_queue; +- int pool; + }; + ++#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT ++#define ring_uses_build_skb(ring) \ ++ test_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &(ring)->state) ++#endif + #define check_for_tx_hang(ring) \ + test_bit(__IXGBE_TX_DETECT_HANG, &(ring)->state) + #define set_check_for_tx_hang(ring) \ +@@ -244,12 +396,15 @@ struct ixgbe_fwd_adapter { + set_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) + #define clear_ring_rsc_enabled(ring) \ + clear_bit(__IXGBE_RX_RSC_ENABLED, &(ring)->state) ++#define netdev_ring(ring) (ring->netdev) ++#define ring_queue_index(ring) (ring->queue_index) ++ ++ + struct ixgbe_ring { + struct ixgbe_ring *next; /* pointer to next ring in q_vector */ + struct ixgbe_q_vector *q_vector; /* backpointer to host q_vector */ + struct net_device *netdev; /* netdev ring belongs to */ + struct device *dev; /* device for DMA mapping */ +- struct ixgbe_fwd_adapter *l2_accel_priv; + void *desc; /* descriptor ring memory */ + union { + struct ixgbe_tx_buffer *tx_buffer_info; +@@ -271,8 +426,16 @@ struct ixgbe_ring { + u16 next_to_use; + u16 next_to_clean; + ++#ifdef HAVE_PTP_1588_CLOCK ++ unsigned long last_rx_timestamp; ++ ++#endif + union { ++#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT ++ u16 rx_buf_len; ++#else + u16 next_to_alloc; ++#endif + struct { + u8 atr_sample_rate; + u8 atr_count; +@@ -281,7 +444,9 @@ struct ixgbe_ring { + + u8 dcb_tc; + struct ixgbe_queue_stats stats; ++#ifdef HAVE_NDO_GET_STATS64 + struct u64_stats_sync syncp; ++#endif + union { + struct ixgbe_tx_queue_stats tx_stats; + struct ixgbe_rx_queue_stats rx_stats; +@@ -293,61 +458,68 @@ enum ixgbe_ring_f_enum { + RING_F_VMDQ, /* SR-IOV uses the same ring feature */ + RING_F_RSS, + RING_F_FDIR, +-#ifdef IXGBE_FCOE ++#if IS_ENABLED(CONFIG_FCOE) + RING_F_FCOE, +-#endif /* IXGBE_FCOE */ +- +- RING_F_ARRAY_SIZE /* must be last in enum set */ ++#endif /* CONFIG_FCOE */ ++ RING_F_ARRAY_SIZE /* must be last in enum set */ + }; + +-#define IXGBE_MAX_RSS_INDICES 16 +-#define IXGBE_MAX_VMDQ_INDICES 64 +-#define IXGBE_MAX_FDIR_INDICES 63 /* based on q_vector limit */ +-#define IXGBE_MAX_FCOE_INDICES 8 +-#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) +-#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) +-#define IXGBE_MAX_L2A_QUEUES 4 +-#define IXGBE_MAX_L2A_QUEUES 4 +-#define IXGBE_BAD_L2A_QUEUE 3 +-#define IXGBE_MAX_MACVLANS 31 +-#define IXGBE_MAX_DCBMACVLANS 8 +- ++#define IXGBE_MAX_DCB_INDICES 8 ++#define IXGBE_MAX_RSS_INDICES 16 ++#define IXGBE_MAX_RSS_INDICES_X550 63 ++#define IXGBE_MAX_VMDQ_INDICES 64 ++#define IXGBE_MAX_FDIR_INDICES 63 ++#if IS_ENABLED(CONFIG_FCOE) ++#define IXGBE_MAX_FCOE_INDICES 8 ++#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES) ++#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + IXGBE_MAX_FCOE_INDICES) ++#else ++#define MAX_RX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) ++#define MAX_TX_QUEUES (IXGBE_MAX_FDIR_INDICES + 1) ++#endif /* CONFIG_FCOE */ + struct ixgbe_ring_feature { + u16 limit; /* upper limit on feature indices */ + u16 indices; /* current value of indices */ + u16 mask; /* Mask used for feature to ring mapping */ + u16 offset; /* offset to start of feature */ +-} ____cacheline_internodealigned_in_smp; ++}; + + #define IXGBE_82599_VMDQ_8Q_MASK 0x78 + #define IXGBE_82599_VMDQ_4Q_MASK 0x7C + #define IXGBE_82599_VMDQ_2Q_MASK 0x7E + ++#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT + /* + * FCoE requires that all Rx buffers be over 2200 bytes in length. Since + * this is twice the size of a half page we need to double the page order + * for FCoE enabled Rx queues. + */ +-static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring *ring) ++static inline unsigned int ixgbe_rx_bufsz(struct ixgbe_ring __maybe_unused *ring) + { +-#ifdef IXGBE_FCOE +- if (test_bit(__IXGBE_RX_FCOE, &ring->state)) +- return (PAGE_SIZE < 8192) ? IXGBE_RXBUFFER_4K : +- IXGBE_RXBUFFER_3K; ++#if MAX_SKB_FRAGS < 8 ++ return ALIGN(IXGBE_MAX_RXBUFFER / MAX_SKB_FRAGS, 1024); ++#else ++ if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state)) ++ return IXGBE_RXBUFFER_3K; ++#if (PAGE_SIZE < 8192) ++ if (ring_uses_build_skb(ring)) ++ return IXGBE_MAX_2K_FRAME_BUILD_SKB; + #endif + return IXGBE_RXBUFFER_2K; ++#endif + } + +-static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring *ring) ++static inline unsigned int ixgbe_rx_pg_order(struct ixgbe_ring __maybe_unused *ring) + { +-#ifdef IXGBE_FCOE +- if (test_bit(__IXGBE_RX_FCOE, &ring->state)) +- return (PAGE_SIZE < 8192) ? 1 : 0; ++#if (PAGE_SIZE < 8192) ++ if (test_bit(__IXGBE_RX_3K_BUFFER, &ring->state)) ++ return 1; + #endif + return 0; + } + #define ixgbe_rx_pg_size(_ring) (PAGE_SIZE << ixgbe_rx_pg_order(_ring)) + ++#endif + struct ixgbe_ring_container { + struct ixgbe_ring *ring; /* pointer to linked list of rings */ + unsigned int total_bytes; /* total bytes processed this int */ +@@ -361,184 +533,120 @@ struct ixgbe_ring_container { + #define ixgbe_for_each_ring(pos, head) \ + for (pos = (head).ring; pos != NULL; pos = pos->next) + +-#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \ +- ? 8 : 1) +-#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS ++#define MAX_RX_PACKET_BUFFERS ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) \ ++ ? 8 : 1) ++#define MAX_TX_PACKET_BUFFERS MAX_RX_PACKET_BUFFERS + +-/* MAX_Q_VECTORS of these are allocated, ++/* MAX_MSIX_Q_VECTORS of these are allocated, + * but we only use one per queue-specific vector. + */ + struct ixgbe_q_vector { + struct ixgbe_adapter *adapter; +-#ifdef CONFIG_IXGBE_DCA +- int cpu; /* CPU for DCA */ +-#endif +- u16 v_idx; /* index of q_vector within array, also used for +- * finding the bit in EICR and friends that +- * represents the vector for this ring */ +- u16 itr; /* Interrupt throttle rate written to EITR */ ++ int cpu; /* CPU for DCA */ ++ u16 v_idx; /* index of q_vector within array, also used for ++ * finding the bit in EICR and friends that ++ * represents the vector for this ring */ ++ u16 itr; /* Interrupt throttle rate written to EITR */ + struct ixgbe_ring_container rx, tx; + + struct napi_struct napi; ++#ifndef HAVE_NETDEV_NAPI_LIST ++ struct net_device poll_dev; ++#endif ++#ifdef HAVE_IRQ_AFFINITY_HINT + cpumask_t affinity_mask; ++#endif + int numa_node; + struct rcu_head rcu; /* to avoid race with update stats on free */ + char name[IFNAMSIZ + 9]; ++ bool netpoll_rx; + +-#ifdef CONFIG_NET_RX_BUSY_POLL +- unsigned int state; +-#define IXGBE_QV_STATE_IDLE 0 +-#define IXGBE_QV_STATE_NAPI 1 /* NAPI owns this QV */ +-#define IXGBE_QV_STATE_POLL 2 /* poll owns this QV */ +-#define IXGBE_QV_STATE_DISABLED 4 /* QV is disabled */ +-#define IXGBE_QV_OWNED (IXGBE_QV_STATE_NAPI | IXGBE_QV_STATE_POLL) +-#define IXGBE_QV_LOCKED (IXGBE_QV_OWNED | IXGBE_QV_STATE_DISABLED) +-#define IXGBE_QV_STATE_NAPI_YIELD 8 /* NAPI yielded this QV */ +-#define IXGBE_QV_STATE_POLL_YIELD 16 /* poll yielded this QV */ +-#define IXGBE_QV_YIELD (IXGBE_QV_STATE_NAPI_YIELD | IXGBE_QV_STATE_POLL_YIELD) +-#define IXGBE_QV_USER_PEND (IXGBE_QV_STATE_POLL | IXGBE_QV_STATE_POLL_YIELD) +- spinlock_t lock; +-#endif /* CONFIG_NET_RX_BUSY_POLL */ ++#ifdef HAVE_NDO_BUSY_POLL ++ atomic_t state; ++#endif /* HAVE_NDO_BUSY_POLL */ + + /* for dynamic allocation of rings associated with this q_vector */ + struct ixgbe_ring ring[0] ____cacheline_internodealigned_in_smp; + }; +-#ifdef CONFIG_NET_RX_BUSY_POLL ++ ++#ifdef HAVE_NDO_BUSY_POLL ++enum ixgbe_qv_state_t { ++ IXGBE_QV_STATE_IDLE = 0, ++ IXGBE_QV_STATE_NAPI, ++ IXGBE_QV_STATE_POLL, ++ IXGBE_QV_STATE_DISABLE ++}; ++ + static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector) + { +- +- spin_lock_init(&q_vector->lock); +- q_vector->state = IXGBE_QV_STATE_IDLE; ++ /* reset state to idle */ ++ atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE); + } + + /* called from the device poll routine to get ownership of a q_vector */ + static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector) + { +- int rc = true; +- spin_lock_bh(&q_vector->lock); +- if (q_vector->state & IXGBE_QV_LOCKED) { +- WARN_ON(q_vector->state & IXGBE_QV_STATE_NAPI); +- q_vector->state |= IXGBE_QV_STATE_NAPI_YIELD; +- rc = false; ++ int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE, ++ IXGBE_QV_STATE_NAPI); + #ifdef BP_EXTENDED_STATS ++ if (rc != IXGBE_QV_STATE_IDLE) + q_vector->tx.ring->stats.yields++; + #endif +- } else { +- /* we don't care if someone yielded */ +- q_vector->state = IXGBE_QV_STATE_NAPI; +- } +- spin_unlock_bh(&q_vector->lock); +- return rc; ++ ++ return rc == IXGBE_QV_STATE_IDLE; + } + + /* returns true is someone tried to get the qv while napi had it */ +-static inline bool ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector) ++static inline void ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector) + { +- int rc = false; +- spin_lock_bh(&q_vector->lock); +- WARN_ON(q_vector->state & (IXGBE_QV_STATE_POLL | +- IXGBE_QV_STATE_NAPI_YIELD)); +- +- if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD) +- rc = true; +- /* will reset state to idle, unless QV is disabled */ +- q_vector->state &= IXGBE_QV_STATE_DISABLED; +- spin_unlock_bh(&q_vector->lock); +- return rc; ++ WARN_ON(atomic_read(&q_vector->state) != IXGBE_QV_STATE_NAPI); ++ ++ /* flush any outstanding Rx frames */ ++ if (q_vector->napi.gro_list) ++ napi_gro_flush(&q_vector->napi, false); ++ ++ /* reset state to idle */ ++ atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE); + } + + /* called from ixgbe_low_latency_poll() */ + static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector) + { +- int rc = true; +- spin_lock_bh(&q_vector->lock); +- if ((q_vector->state & IXGBE_QV_LOCKED)) { +- q_vector->state |= IXGBE_QV_STATE_POLL_YIELD; +- rc = false; ++ int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE, ++ IXGBE_QV_STATE_POLL); + #ifdef BP_EXTENDED_STATS +- q_vector->rx.ring->stats.yields++; ++ if (rc != IXGBE_QV_STATE_IDLE) ++ q_vector->tx.ring->stats.yields++; + #endif +- } else { +- /* preserve yield marks */ +- q_vector->state |= IXGBE_QV_STATE_POLL; +- } +- spin_unlock_bh(&q_vector->lock); +- return rc; ++ return rc == IXGBE_QV_STATE_IDLE; + } + + /* returns true if someone tried to get the qv while it was locked */ +-static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector) ++static inline void ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector) + { +- int rc = false; +- spin_lock_bh(&q_vector->lock); +- WARN_ON(q_vector->state & (IXGBE_QV_STATE_NAPI)); +- +- if (q_vector->state & IXGBE_QV_STATE_POLL_YIELD) +- rc = true; +- /* will reset state to idle, unless QV is disabled */ +- q_vector->state &= IXGBE_QV_STATE_DISABLED; +- spin_unlock_bh(&q_vector->lock); +- return rc; ++ WARN_ON(atomic_read(&q_vector->state) != IXGBE_QV_STATE_POLL); ++ ++ /* reset state to idle */ ++ atomic_set(&q_vector->state, IXGBE_QV_STATE_IDLE); + } + + /* true if a socket is polling, even if it did not get the lock */ + static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector) + { +- WARN_ON(!(q_vector->state & IXGBE_QV_OWNED)); +- return q_vector->state & IXGBE_QV_USER_PEND; ++ return atomic_read(&q_vector->state) == IXGBE_QV_STATE_POLL; + } + + /* false if QV is currently owned */ + static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector) + { +- int rc = true; +- spin_lock_bh(&q_vector->lock); +- if (q_vector->state & IXGBE_QV_OWNED) +- rc = false; +- q_vector->state |= IXGBE_QV_STATE_DISABLED; +- spin_unlock_bh(&q_vector->lock); +- +- return rc; +-} +- +-#else /* CONFIG_NET_RX_BUSY_POLL */ +-static inline void ixgbe_qv_init_lock(struct ixgbe_q_vector *q_vector) +-{ +-} +- +-static inline bool ixgbe_qv_lock_napi(struct ixgbe_q_vector *q_vector) +-{ +- return true; +-} +- +-static inline bool ixgbe_qv_unlock_napi(struct ixgbe_q_vector *q_vector) +-{ +- return false; +-} +- +-static inline bool ixgbe_qv_lock_poll(struct ixgbe_q_vector *q_vector) +-{ +- return false; +-} +- +-static inline bool ixgbe_qv_unlock_poll(struct ixgbe_q_vector *q_vector) +-{ +- return false; +-} +- +-static inline bool ixgbe_qv_busy_polling(struct ixgbe_q_vector *q_vector) +-{ +- return false; +-} ++ int rc = atomic_cmpxchg(&q_vector->state, IXGBE_QV_STATE_IDLE, ++ IXGBE_QV_STATE_DISABLE); + +-static inline bool ixgbe_qv_disable(struct ixgbe_q_vector *q_vector) +-{ +- return true; ++ return rc == IXGBE_QV_STATE_IDLE; + } + +-#endif /* CONFIG_NET_RX_BUSY_POLL */ +- +-#ifdef CONFIG_IXGBE_HWMON ++#endif /* HAVE_NDO_BUSY_POLL */ ++#ifdef IXGBE_HWMON + + #define IXGBE_HWMON_TYPE_LOC 0 + #define IXGBE_HWMON_TYPE_TEMP 1 +@@ -553,13 +661,11 @@ struct hwmon_attr { + }; + + struct hwmon_buff { +- struct attribute_group group; +- const struct attribute_group *groups[2]; +- struct attribute *attrs[IXGBE_MAX_SENSORS * 4 + 1]; +- struct hwmon_attr hwmon_list[IXGBE_MAX_SENSORS * 4]; ++ struct device *device; ++ struct hwmon_attr *hwmon_list; + unsigned int n_hwmon; + }; +-#endif /* CONFIG_IXGBE_HWMON */ ++#endif /* IXGBE_HWMON */ + + /* + * microsecond values for various ITR rates shifted by 2 to fit itr register +@@ -568,8 +674,8 @@ struct hwmon_buff { + #define IXGBE_MIN_RSC_ITR 24 + #define IXGBE_100K_ITR 40 + #define IXGBE_20K_ITR 200 +-#define IXGBE_10K_ITR 400 +-#define IXGBE_8K_ITR 500 ++#define IXGBE_16K_ITR 248 ++#define IXGBE_12K_ITR 336 + + /* ixgbe_test_staterr - tests bits in Rx descriptor status and error fields */ + static inline __le32 ixgbe_test_staterr(union ixgbe_adv_rx_desc *rx_desc, +@@ -578,6 +684,7 @@ static inline __le32 ixgbe_test_staterr(union ixgbe_adv_rx_desc *rx_desc, + return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits); + } + ++/* ixgbe_desc_unused - calculate if we have unused descriptors */ + static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring) + { + u16 ntc = ring->next_to_clean; +@@ -586,53 +693,68 @@ static inline u16 ixgbe_desc_unused(struct ixgbe_ring *ring) + return ((ntc > ntu) ? 0 : ring->count) + ntc - ntu - 1; + } + +-static inline void ixgbe_write_tail(struct ixgbe_ring *ring, u32 value) +-{ +- writel(value, ring->tail); +-} +- +-#define IXGBE_RX_DESC(R, i) \ ++#define IXGBE_RX_DESC(R, i) \ + (&(((union ixgbe_adv_rx_desc *)((R)->desc))[i])) +-#define IXGBE_TX_DESC(R, i) \ ++#define IXGBE_TX_DESC(R, i) \ + (&(((union ixgbe_adv_tx_desc *)((R)->desc))[i])) +-#define IXGBE_TX_CTXTDESC(R, i) \ ++#define IXGBE_TX_CTXTDESC(R, i) \ + (&(((struct ixgbe_adv_tx_context_desc *)((R)->desc))[i])) + +-#define IXGBE_MAX_JUMBO_FRAME_SIZE 9728 /* Maximum Supported Size 9.5KB */ +-#ifdef IXGBE_FCOE +-/* Use 3K as the baby jumbo frame size for FCoE */ +-#define IXGBE_FCOE_JUMBO_FRAME_SIZE 3072 +-#endif /* IXGBE_FCOE */ ++#define IXGBE_MAX_JUMBO_FRAME_SIZE 9728 ++#if IS_ENABLED(CONFIG_FCOE) ++/* use 3K as the baby jumbo frame size for FCoE */ ++#define IXGBE_FCOE_JUMBO_FRAME_SIZE 3072 ++#endif /* CONFIG_FCOE */ + +-#define OTHER_VECTOR 1 +-#define NON_Q_VECTORS (OTHER_VECTOR) ++#define TCP_TIMER_VECTOR 0 ++#define OTHER_VECTOR 1 ++#define NON_Q_VECTORS (OTHER_VECTOR + TCP_TIMER_VECTOR) + +-#define MAX_MSIX_VECTORS_82599 64 +-#define MAX_Q_VECTORS_82599 64 +-#define MAX_MSIX_VECTORS_82598 18 +-#define MAX_Q_VECTORS_82598 16 ++#define IXGBE_MAX_MSIX_Q_VECTORS_82599 64 ++#define IXGBE_MAX_MSIX_Q_VECTORS_82598 16 + + struct ixgbe_mac_addr { + u8 addr[ETH_ALEN]; +- u16 queue; ++ u16 pool; + u16 state; /* bitmask */ + }; ++ + #define IXGBE_MAC_STATE_DEFAULT 0x1 + #define IXGBE_MAC_STATE_MODIFIED 0x2 + #define IXGBE_MAC_STATE_IN_USE 0x4 + +-#define MAX_Q_VECTORS MAX_Q_VECTORS_82599 +-#define MAX_MSIX_COUNT MAX_MSIX_VECTORS_82599 ++#ifdef IXGBE_PROCFS ++struct ixgbe_therm_proc_data { ++ struct ixgbe_hw *hw; ++ struct ixgbe_thermal_diode_data *sensor_data; ++}; + +-#define MIN_MSIX_Q_VECTORS 1 +-#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) ++#endif /* IXGBE_PROCFS */ ++/* ++ * Only for array allocations in our adapter struct. On 82598, there will be ++ * unused entries in the array, but that's not a big deal. Also, in 82599, ++ * we can actually assign 64 queue vectors based on our extended-extended ++ * interrupt registers. This is different than 82598, which is limited to 16. ++ */ ++#define MAX_MSIX_Q_VECTORS IXGBE_MAX_MSIX_Q_VECTORS_82599 ++#define MAX_MSIX_COUNT IXGBE_MAX_MSIX_VECTORS_82599 ++ ++#define MIN_MSIX_Q_VECTORS 1 ++#define MIN_MSIX_COUNT (MIN_MSIX_Q_VECTORS + NON_Q_VECTORS) + + /* default to trying for four seconds */ +-#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ) ++#define IXGBE_TRY_LINK_TIMEOUT (4 * HZ) ++#define IXGBE_SFP_POLL_JIFFIES (2 * HZ) /* SFP poll every 2 seconds */ + + /* board specific private data structure */ + struct ixgbe_adapter { ++#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) ++#ifdef HAVE_VLAN_RX_REGISTER ++ struct vlan_group *vlgrp; /* must be first, see ixgbe_receive_skb */ ++#else + unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)]; ++#endif ++#endif /* NETIF_F_HW_VLAN_TX || NETIF_F_HW_VLAN_CTAG_TX */ + /* OS defined structs */ + struct net_device *netdev; + struct pci_dev *pdev; +@@ -643,53 +765,95 @@ struct ixgbe_adapter { + * thus the additional *_CAPABLE flags. + */ + u32 flags; +-#define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 0) +-#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 1) +-#define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 2) +-#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 3) +-#define IXGBE_FLAG_RX_1BUF_CAPABLE (u32)(1 << 4) +-#define IXGBE_FLAG_RX_PS_CAPABLE (u32)(1 << 5) +-#define IXGBE_FLAG_RX_PS_ENABLED (u32)(1 << 6) +-#define IXGBE_FLAG_IN_NETPOLL (u32)(1 << 7) +-#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 8) +-#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 9) +-#define IXGBE_FLAG_IMIR_ENABLED (u32)(1 << 10) +-#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 11) +-#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 12) +-#define IXGBE_FLAG_VMDQ_CAPABLE (u32)(1 << 13) +-#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 14) +-#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 15) +-#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 16) +-#define IXGBE_FLAG_NEED_LINK_CONFIG (u32)(1 << 17) +-#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 18) +-#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 19) +-#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 20) +-#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 21) +-#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 22) +-#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 23) ++#define IXGBE_FLAG_MSI_CAPABLE (u32)(1 << 0) ++#define IXGBE_FLAG_MSI_ENABLED (u32)(1 << 1) ++#define IXGBE_FLAG_MSIX_CAPABLE (u32)(1 << 2) ++#define IXGBE_FLAG_MSIX_ENABLED (u32)(1 << 3) ++#ifndef IXGBE_NO_LLI ++#define IXGBE_FLAG_LLI_PUSH (u32)(1 << 4) ++#endif ++ ++#if defined(CONFIG_DCA) || defined(CONFIG_DCA_MODULE) ++#define IXGBE_FLAG_DCA_ENABLED (u32)(1 << 6) ++#define IXGBE_FLAG_DCA_CAPABLE (u32)(1 << 7) ++#define IXGBE_FLAG_DCA_ENABLED_DATA (u32)(1 << 8) ++#else ++#define IXGBE_FLAG_DCA_ENABLED (u32)0 ++#define IXGBE_FLAG_DCA_CAPABLE (u32)0 ++#define IXGBE_FLAG_DCA_ENABLED_DATA (u32)0 ++#endif ++#define IXGBE_FLAG_MQ_CAPABLE (u32)(1 << 9) ++#define IXGBE_FLAG_DCB_ENABLED (u32)(1 << 10) ++#define IXGBE_FLAG_VMDQ_ENABLED (u32)(1 << 11) ++#define IXGBE_FLAG_FAN_FAIL_CAPABLE (u32)(1 << 12) ++#define IXGBE_FLAG_NEED_LINK_UPDATE (u32)(1 << 13) ++#define IXGBE_FLAG_NEED_LINK_CONFIG (u32)(1 << 14) ++#define IXGBE_FLAG_FDIR_HASH_CAPABLE (u32)(1 << 15) ++#define IXGBE_FLAG_FDIR_PERFECT_CAPABLE (u32)(1 << 16) ++#if IS_ENABLED(CONFIG_FCOE) ++#define IXGBE_FLAG_FCOE_CAPABLE (u32)(1 << 17) ++#define IXGBE_FLAG_FCOE_ENABLED (u32)(1 << 18) ++#endif /* CONFIG_FCOE */ ++#define IXGBE_FLAG_SRIOV_CAPABLE (u32)(1 << 19) ++#define IXGBE_FLAG_SRIOV_ENABLED (u32)(1 << 20) ++#define IXGBE_FLAG_SRIOV_REPLICATION_ENABLE (u32)(1 << 21) ++#define IXGBE_FLAG_SRIOV_L2SWITCH_ENABLE (u32)(1 << 22) ++#define IXGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE (u32)(1 << 23) ++#define IXGBE_FLAG_RX_HWTSTAMP_ENABLED (u32)(1 << 24) ++#define IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE (u32)(1 << 25) ++#define IXGBE_FLAG_VXLAN_OFFLOAD_ENABLE (u32)(1 << 26) ++#define IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER (u32)(1 << 27) ++#define IXGBE_FLAG_MDD_ENABLED (u32)(1 << 29) ++#define IXGBE_FLAG_DCB_CAPABLE (u32)(1 << 30) ++#define IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE BIT(31) ++ ++/* preset defaults */ ++#define IXGBE_FLAGS_82598_INIT (IXGBE_FLAG_MSI_CAPABLE | \ ++ IXGBE_FLAG_MSIX_CAPABLE | \ ++ IXGBE_FLAG_MQ_CAPABLE) ++ ++#define IXGBE_FLAGS_82599_INIT (IXGBE_FLAGS_82598_INIT | \ ++ IXGBE_FLAG_SRIOV_CAPABLE) ++ ++#define IXGBE_FLAGS_X540_INIT IXGBE_FLAGS_82599_INIT ++ ++#define IXGBE_FLAGS_X550_INIT (IXGBE_FLAGS_82599_INIT | \ ++ IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE) + + u32 flags2; +-#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1 << 0) +-#define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1) +-#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 2) +-#define IXGBE_FLAG2_TEMP_SENSOR_EVENT (u32)(1 << 3) +-#define IXGBE_FLAG2_SEARCH_FOR_SFP (u32)(1 << 4) +-#define IXGBE_FLAG2_SFP_NEEDS_RESET (u32)(1 << 5) +-#define IXGBE_FLAG2_RESET_REQUESTED (u32)(1 << 6) +-#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 7) +-#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 8) +-#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 9) +-#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 10) +-#define IXGBE_FLAG2_BRIDGE_MODE_VEB (u32)(1 << 11) ++#define IXGBE_FLAG2_RSC_CAPABLE (u32)(1 << 0) ++#define IXGBE_FLAG2_RSC_ENABLED (u32)(1 << 1) ++#define IXGBE_FLAG2_TEMP_SENSOR_CAPABLE (u32)(1 << 3) ++#define IXGBE_FLAG2_TEMP_SENSOR_EVENT (u32)(1 << 4) ++#define IXGBE_FLAG2_SEARCH_FOR_SFP (u32)(1 << 5) ++#define IXGBE_FLAG2_SFP_NEEDS_RESET (u32)(1 << 6) ++#define IXGBE_FLAG2_FDIR_REQUIRES_REINIT (u32)(1 << 8) ++#define IXGBE_FLAG2_RSS_FIELD_IPV4_UDP (u32)(1 << 9) ++#define IXGBE_FLAG2_RSS_FIELD_IPV6_UDP (u32)(1 << 10) ++#define IXGBE_FLAG2_PTP_PPS_ENABLED (u32)(1 << 11) ++#define IXGBE_FLAG2_EEE_CAPABLE (u32)(1 << 14) ++#define IXGBE_FLAG2_EEE_ENABLED (u32)(1 << 15) ++#define IXGBE_FLAG2_UDP_TUN_REREG_NEEDED (u32)(1 << 16) ++#define IXGBE_FLAG2_PHY_INTERRUPT (u32)(1 << 17) ++#define IXGBE_FLAG2_VLAN_PROMISC (u32)(1 << 18) ++#define IXGBE_FLAG2_RX_LEGACY (u32)(1 << 19) + + /* Tx fast path data */ + int num_tx_queues; + u16 tx_itr_setting; + u16 tx_work_limit; + ++#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) ++ __be16 vxlan_port; ++#endif /* HAVE_UDP_ENC_RX_OFFLAD || HAVE_VXLAN_RX_OFFLOAD */ ++#ifdef HAVE_UDP_ENC_RX_OFFLOAD ++ __be16 geneve_port; ++#endif /* HAVE_UDP_ENC_RX_OFFLOAD */ ++ + /* Rx fast path data */ + int num_rx_queues; + u16 rx_itr_setting; ++ u16 rx_work_limit; + + /* TX */ + struct ixgbe_ring *tx_ring[MAX_TX_QUEUES] ____cacheline_aligned_in_smp; +@@ -700,8 +864,8 @@ struct ixgbe_adapter { + + /* RX */ + struct ixgbe_ring *rx_ring[MAX_RX_QUEUES]; +- int num_rx_pools; /* == num_rx_queues in 82598 */ +- int num_rx_queues_per_pool; /* 1 if 82598, can be many if 82599 */ ++ int num_rx_pools; /* does not include pools assigned to VFs */ ++ int num_rx_queues_per_pool; + u64 hw_csum_rx_error; + u64 hw_rx_no_dma_resources; + u64 rsc_total_count; +@@ -710,37 +874,58 @@ struct ixgbe_adapter { + u32 alloc_rx_page_failed; + u32 alloc_rx_buff_failed; + +- struct ixgbe_q_vector *q_vector[MAX_Q_VECTORS]; ++ struct ixgbe_q_vector *q_vector[MAX_MSIX_Q_VECTORS]; + +- /* DCB parameters */ ++#ifdef HAVE_DCBNL_IEEE + struct ieee_pfc *ixgbe_ieee_pfc; + struct ieee_ets *ixgbe_ieee_ets; ++#endif + struct ixgbe_dcb_config dcb_cfg; + struct ixgbe_dcb_config temp_dcb_cfg; + u8 dcb_set_bitmap; + u8 dcbx_cap; ++#ifndef HAVE_MQPRIO ++ u8 dcb_tc; ++#endif + enum ixgbe_fc_mode last_lfc_mode; + + int num_q_vectors; /* current number of q_vectors for device */ +- int max_q_vectors; /* true count of q_vectors for device */ ++ int max_q_vectors; /* upper limit of q_vectors for device */ + struct ixgbe_ring_feature ring_feature[RING_F_ARRAY_SIZE]; + struct msix_entry *msix_entries; + ++#ifndef HAVE_NETDEV_STATS_IN_NETDEV ++ struct net_device_stats net_stats; ++#endif ++ ++#ifdef ETHTOOL_TEST + u32 test_icr; + struct ixgbe_ring test_tx_ring; + struct ixgbe_ring test_rx_ring; ++#endif + + /* structs defined in ixgbe_hw.h */ + struct ixgbe_hw hw; + u16 msg_enable; + struct ixgbe_hw_stats stats; +- ++#ifndef IXGBE_NO_LLI ++ u32 lli_port; ++ u32 lli_size; ++ u32 lli_etype; ++ u32 lli_vlan_pri; ++#endif /* IXGBE_NO_LLI */ ++ ++ u32 *config_space; + u64 tx_busy; + unsigned int tx_ring_count; + unsigned int rx_ring_count; + + u32 link_speed; + bool link_up; ++ ++ bool cloud_mode; ++ ++ unsigned long sfp_poll_time; + unsigned long link_check_timeout; + + struct timer_list service_timer; +@@ -754,21 +939,27 @@ struct ixgbe_adapter { + u32 atr_sample_rate; + spinlock_t fdir_perfect_lock; + +-#ifdef IXGBE_FCOE ++#if IS_ENABLED(CONFIG_FCOE) + struct ixgbe_fcoe fcoe; +-#endif /* IXGBE_FCOE */ +- u8 __iomem *io_addr; /* Mainly for iounmap use */ ++#endif /* CONFIG_FCOE */ ++ u8 __iomem *io_addr; /* Mainly for iounmap use */ + u32 wol; + + u16 bd_number; + +- u16 eeprom_verh; +- u16 eeprom_verl; +- u16 eeprom_cap; ++#ifdef HAVE_BRIDGE_ATTRIBS ++ u16 bridge_mode; ++#endif + ++ char eeprom_id[32]; ++ u16 eeprom_cap; ++ bool netdev_registered; + u32 interrupt_event; ++#ifdef HAVE_ETHTOOL_SET_PHYS_ID + u32 led_reg; ++#endif + ++#ifdef HAVE_PTP_1588_CLOCK + struct ptp_clock *ptp_clock; + struct ptp_clock_info ptp_caps; + struct work_struct ptp_tx_work; +@@ -777,40 +968,88 @@ struct ixgbe_adapter { + unsigned long ptp_tx_start; + unsigned long last_overflow_check; + unsigned long last_rx_ptp_check; +- unsigned long last_rx_timestamp; + spinlock_t tmreg_lock; +- struct cyclecounter cc; +- struct timecounter tc; ++ struct cyclecounter hw_cc; ++ struct timecounter hw_tc; + u32 base_incval; ++ u32 tx_hwtstamp_timeouts; ++ u32 tx_hwtstamp_skipped; ++ u32 rx_hwtstamp_cleared; ++ void (*ptp_setup_sdp) (struct ixgbe_adapter *); ++#endif /* HAVE_PTP_1588_CLOCK */ + +- /* SR-IOV */ + DECLARE_BITMAP(active_vfs, IXGBE_MAX_VF_FUNCTIONS); + unsigned int num_vfs; ++ unsigned int max_vfs; + struct vf_data_storage *vfinfo; + int vf_rate_link_speed; + struct vf_macvlans vf_mvs; + struct vf_macvlans *mv_list; +- ++#ifdef CONFIG_PCI_IOV + u32 timer_event_accumulator; + u32 vferr_refcount; ++#endif + struct ixgbe_mac_addr *mac_table; +- struct kobject *info_kobj; +-#ifdef CONFIG_IXGBE_HWMON +- struct hwmon_buff *ixgbe_hwmon_buff; +-#endif /* CONFIG_IXGBE_HWMON */ +-#ifdef CONFIG_DEBUG_FS ++#ifdef IXGBE_SYSFS ++#ifdef IXGBE_HWMON ++ struct hwmon_buff ixgbe_hwmon_buff; ++#endif /* IXGBE_HWMON */ ++#else /* IXGBE_SYSFS */ ++#ifdef IXGBE_PROCFS ++ struct proc_dir_entry *eth_dir; ++ struct proc_dir_entry *info_dir; ++ u64 old_lsc; ++ struct proc_dir_entry *therm_dir[IXGBE_MAX_SENSORS]; ++ struct ixgbe_therm_proc_data therm_data[IXGBE_MAX_SENSORS]; ++#endif /* IXGBE_PROCFS */ ++#endif /* IXGBE_SYSFS */ ++ ++#ifdef HAVE_IXGBE_DEBUG_FS + struct dentry *ixgbe_dbg_adapter; +-#endif /*CONFIG_DEBUG_FS*/ +- ++#endif /*HAVE_IXGBE_DEBUG_FS*/ + u8 default_up; +- unsigned long fwd_bitmask; /* Bitmask indicating in use pools */ ++ ++/* maximum number of RETA entries among all devices supported by ixgbe ++ * driver: currently it's x550 device in non-SRIOV mode ++ */ ++#define IXGBE_MAX_RETA_ENTRIES 512 ++ u8 rss_indir_tbl[IXGBE_MAX_RETA_ENTRIES]; ++ ++#define IXGBE_RSS_KEY_SIZE 40 /* size of RSS Hash Key in bytes */ ++ u32 *rss_key; ++ ++#ifdef HAVE_TX_MQ ++#ifndef HAVE_NETDEV_SELECT_QUEUE ++ unsigned int indices; ++#endif ++#endif ++ bool need_crosstalk_fix; + }; + ++static inline u8 ixgbe_max_rss_indices(struct ixgbe_adapter *adapter) ++{ ++ switch (adapter->hw.mac.type) { ++ case ixgbe_mac_82598EB: ++ case ixgbe_mac_82599EB: ++ case ixgbe_mac_X540: ++ return IXGBE_MAX_RSS_INDICES; ++ break; ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: ++ return IXGBE_MAX_RSS_INDICES_X550; ++ break; ++ default: ++ return 0; ++ break; ++ } ++} ++ + struct ixgbe_fdir_filter { +- struct hlist_node fdir_node; ++ struct hlist_node fdir_node; + union ixgbe_atr_input filter; + u16 sw_idx; +- u16 action; ++ u64 action; + }; + + enum ixgbe_state_t { +@@ -818,43 +1057,65 @@ enum ixgbe_state_t { + __IXGBE_RESETTING, + __IXGBE_DOWN, + __IXGBE_DISABLED, +- __IXGBE_REMOVING, ++ __IXGBE_REMOVE, + __IXGBE_SERVICE_SCHED, + __IXGBE_SERVICE_INITED, + __IXGBE_IN_SFP_INIT, ++#ifdef HAVE_PTP_1588_CLOCK + __IXGBE_PTP_RUNNING, + __IXGBE_PTP_TX_IN_PROGRESS, ++#endif ++ __IXGBE_RESET_REQUESTED, + }; + + struct ixgbe_cb { ++#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT + union { /* Union defining head/tail partner */ + struct sk_buff *head; + struct sk_buff *tail; + }; ++#endif + dma_addr_t dma; +- u16 append_cnt; +- bool page_released; ++#ifdef HAVE_VLAN_RX_REGISTER ++ u16 vid; /* VLAN tag */ ++#endif ++ u16 append_cnt; /* number of skb's appended */ ++#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT ++ bool page_released; ++#endif + }; + #define IXGBE_CB(skb) ((struct ixgbe_cb *)(skb)->cb) + +-enum ixgbe_boards { +- board_82598, +- board_82599, +- board_X540, +-}; +- +-extern struct ixgbe_info ixgbe_82598_info; +-extern struct ixgbe_info ixgbe_82599_info; +-extern struct ixgbe_info ixgbe_X540_info; +-#ifdef CONFIG_IXGBE_DCB +-extern const struct dcbnl_rtnl_ops dcbnl_ops; +-#endif ++/* ESX ixgbe CIM IOCTL definition */ + ++#ifdef IXGBE_SYSFS ++void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter); ++int ixgbe_sysfs_init(struct ixgbe_adapter *adapter); ++#endif /* IXGBE_SYSFS */ ++#ifdef IXGBE_PROCFS ++void ixgbe_procfs_exit(struct ixgbe_adapter *adapter); ++int ixgbe_procfs_init(struct ixgbe_adapter *adapter); ++int ixgbe_procfs_topdir_init(void); ++void ixgbe_procfs_topdir_exit(void); ++#endif /* IXGBE_PROCFS */ ++ ++extern struct dcbnl_rtnl_ops ixgbe_dcbnl_ops; ++int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max); ++ ++u8 ixgbe_dcb_txq_to_tc(struct ixgbe_adapter *adapter, u8 index); ++ ++/* needed by ixgbe_main.c */ ++int ixgbe_validate_mac_addr(u8 *mc_addr); ++void ixgbe_check_options(struct ixgbe_adapter *adapter); ++void ixgbe_assign_netdev_ops(struct net_device *netdev); ++ ++/* needed by ixgbe_ethtool.c */ ++#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME + extern char ixgbe_driver_name[]; ++#else ++extern const char ixgbe_driver_name[]; ++#endif + extern const char ixgbe_driver_version[]; +-#ifdef IXGBE_FCOE +-extern char ixgbe_default_device_descr[]; +-#endif /* IXGBE_FCOE */ + + void ixgbe_up(struct ixgbe_adapter *adapter); + void ixgbe_down(struct ixgbe_adapter *adapter); +@@ -865,113 +1126,162 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *); + int ixgbe_setup_tx_resources(struct ixgbe_ring *); + void ixgbe_free_rx_resources(struct ixgbe_ring *); + void ixgbe_free_tx_resources(struct ixgbe_ring *); +-void ixgbe_configure_rx_ring(struct ixgbe_adapter *, struct ixgbe_ring *); +-void ixgbe_configure_tx_ring(struct ixgbe_adapter *, struct ixgbe_ring *); +-void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, struct ixgbe_ring *); ++void ixgbe_configure_rx_ring(struct ixgbe_adapter *, ++ struct ixgbe_ring *); ++void ixgbe_configure_tx_ring(struct ixgbe_adapter *, ++ struct ixgbe_ring *); + void ixgbe_update_stats(struct ixgbe_adapter *adapter); + int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter); +-int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, +- u16 subdevice_id); +-#ifdef CONFIG_PCI_IOV +-void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter); +-#endif +-int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, +- u8 *addr, u16 queue); +-int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, +- u8 *addr, u16 queue); ++void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter); ++void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter); + void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter); +-netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, struct ixgbe_adapter *, +- struct ixgbe_ring *); ++bool ixgbe_is_ixgbe(struct pci_dev *pcidev); ++netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *, ++ struct ixgbe_adapter *, ++ struct ixgbe_ring *); + void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *, +- struct ixgbe_tx_buffer *); ++ struct ixgbe_tx_buffer *); + void ixgbe_alloc_rx_buffers(struct ixgbe_ring *, u16); +-void ixgbe_write_eitr(struct ixgbe_q_vector *); +-int ixgbe_poll(struct napi_struct *napi, int budget); +-int ethtool_ioctl(struct ifreq *ifr); +-s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); +-s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl); +-s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl); +-s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, +- union ixgbe_atr_hash_dword input, +- union ixgbe_atr_hash_dword common, +- u8 queue); +-s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, +- union ixgbe_atr_input *input_mask); +-s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, +- union ixgbe_atr_input *input, +- u16 soft_id, u8 queue); +-s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, +- union ixgbe_atr_input *input, +- u16 soft_id); +-void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, +- union ixgbe_atr_input *mask); +-void ixgbe_set_rx_mode(struct net_device *netdev); +-#ifdef CONFIG_IXGBE_DCB +-void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter); ++void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, ++ struct ixgbe_ring *); ++void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter, ++ struct ixgbe_ring *); ++#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) ++void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *, u32); + #endif ++void ixgbe_set_rx_mode(struct net_device *netdev); ++int ixgbe_write_mc_addr_list(struct net_device *netdev); + int ixgbe_setup_tc(struct net_device *dev, u8 tc); + void ixgbe_tx_ctxtdesc(struct ixgbe_ring *, u32, u32, u32, u32); + void ixgbe_do_reset(struct net_device *netdev); +-#ifdef CONFIG_IXGBE_HWMON +-void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter); +-int ixgbe_sysfs_init(struct ixgbe_adapter *adapter); +-#endif /* CONFIG_IXGBE_HWMON */ +-#ifdef IXGBE_FCOE ++void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector); ++int ixgbe_poll(struct napi_struct *napi, int budget); ++void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, ++ struct ixgbe_ring *); ++void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter); ++void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter); ++#ifdef ETHTOOL_OPS_COMPAT ++int ethtool_ioctl(struct ifreq *ifr); ++#endif ++ ++#if IS_ENABLED(CONFIG_FCOE) + void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter); +-int ixgbe_fso(struct ixgbe_ring *tx_ring, struct ixgbe_tx_buffer *first, +- u8 *hdr_len); ++int ixgbe_fso(struct ixgbe_ring *tx_ring, ++ struct ixgbe_tx_buffer *first, ++ u8 *hdr_len); + int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, +- union ixgbe_adv_rx_desc *rx_desc, struct sk_buff *skb); ++ union ixgbe_adv_rx_desc *rx_desc, ++ struct sk_buff *skb); + int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, +- struct scatterlist *sgl, unsigned int sgc); ++ struct scatterlist *sgl, unsigned int sgc); ++#ifdef HAVE_NETDEV_OPS_FCOE_DDP_TARGET + int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, +- struct scatterlist *sgl, unsigned int sgc); ++ struct scatterlist *sgl, unsigned int sgc); ++#endif /* HAVE_NETDEV_OPS_FCOE_DDP_TARGET */ + int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid); + int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter); + void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter); ++#ifdef HAVE_NETDEV_OPS_FCOE_ENABLE + int ixgbe_fcoe_enable(struct net_device *netdev); + int ixgbe_fcoe_disable(struct net_device *netdev); +-#ifdef CONFIG_IXGBE_DCB +-u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter); ++#else ++int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter); ++void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter); ++#endif /* HAVE_NETDEV_OPS_FCOE_ENABLE */ ++#if IS_ENABLED(CONFIG_DCB) ++#ifdef HAVE_DCBNL_OPS_GETAPP ++u8 ixgbe_fcoe_getapp(struct net_device *netdev); ++#endif /* HAVE_DCBNL_OPS_GETAPP */ + u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up); +-#endif /* CONFIG_IXGBE_DCB */ +-int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type); +-int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, +- struct netdev_fcoe_hbainfo *info); ++#endif /* CONFIG_DCB */ + u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter); +-#endif /* IXGBE_FCOE */ +-#ifdef CONFIG_DEBUG_FS ++#ifdef HAVE_NETDEV_OPS_FCOE_GETWWN ++int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type); ++#endif ++#endif /* CONFIG_FCOE */ ++ ++#ifdef HAVE_IXGBE_DEBUG_FS + void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter); + void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter); + void ixgbe_dbg_init(void); + void ixgbe_dbg_exit(void); +-#else +-static inline void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter) {} +-static inline void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) {} +-static inline void ixgbe_dbg_init(void) {} +-static inline void ixgbe_dbg_exit(void) {} +-#endif /* CONFIG_DEBUG_FS */ ++#endif /* HAVE_IXGBE_DEBUG_FS */ ++ ++#if IS_ENABLED(CONFIG_BQL) || defined(HAVE_SKB_XMIT_MORE) + static inline struct netdev_queue *txring_txq(const struct ixgbe_ring *ring) + { + return netdev_get_tx_queue(ring->netdev, ring->queue_index); + } ++#endif ++ ++#if IS_ENABLED(CONFIG_DCB) ++#ifdef HAVE_DCBNL_IEEE ++s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame); ++#endif /* HAVE_DCBNL_IEEE */ ++#endif /* CONFIG_DCB */ + ++bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, ++ u16 subdevice_id); ++void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring); ++int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn); ++void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter); ++int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, ++ const u8 *addr, u16 queue); ++int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, ++ const u8 *addr, u16 queue); ++int ixgbe_available_rars(struct ixgbe_adapter *adapter, u16 pool); ++void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid); ++#ifndef HAVE_VLAN_RX_REGISTER ++void ixgbe_vlan_mode(struct net_device *, u32); ++#else ++#ifdef CONFIG_PCI_IOV ++int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan); ++#endif ++#endif ++ ++#ifdef HAVE_PTP_1588_CLOCK + void ixgbe_ptp_init(struct ixgbe_adapter *adapter); +-void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter); + void ixgbe_ptp_stop(struct ixgbe_adapter *adapter); ++void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter); + void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter); + void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter); +-void ixgbe_ptp_rx_hwtstamp(struct ixgbe_adapter *adapter, struct sk_buff *skb); +-int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr); ++void ixgbe_ptp_tx_hang(struct ixgbe_adapter *adapter); ++void ixgbe_ptp_rx_pktstamp(struct ixgbe_q_vector *q_vector, ++ struct sk_buff *skb); ++void ixgbe_ptp_rx_rgtstamp(struct ixgbe_q_vector *q_vector, ++ struct sk_buff *skb); ++static inline void ixgbe_ptp_rx_hwtstamp(struct ixgbe_ring *rx_ring, ++ union ixgbe_adv_rx_desc *rx_desc, ++ struct sk_buff *skb) ++{ ++ if (unlikely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_TSIP))) { ++ ixgbe_ptp_rx_pktstamp(rx_ring->q_vector, skb); ++ return; ++ } ++ ++ if (unlikely(!ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS))) ++ return; ++ ++ ixgbe_ptp_rx_rgtstamp(rx_ring->q_vector, skb); ++ ++ /* Update the last_rx_timestamp timer in order to enable watchdog check ++ * for error case of latched timestamp on a dropped packet. ++ */ ++ rx_ring->last_rx_timestamp = jiffies; ++} ++ + int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr); ++int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr); + void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter); + void ixgbe_ptp_reset(struct ixgbe_adapter *adapter); +-void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr); ++void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter); ++#endif /* HAVE_PTP_1588_CLOCK */ + #ifdef CONFIG_PCI_IOV + void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter); + #endif ++u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter); ++void ixgbe_store_key(struct ixgbe_adapter *adapter); ++void ixgbe_store_reta(struct ixgbe_adapter *adapter); + +-netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, +- struct ixgbe_adapter *adapter, +- struct ixgbe_ring *tx_ring); ++void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter); + #endif /* _IXGBE_H_ */ +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +index 1560933..8b7fc59 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.c +@@ -1,7 +1,7 @@ + /******************************************************************************* + +- Intel 10 Gigabit PCI Express Linux driver +- Copyright(c) 1999 - 2014 Intel Corporation. ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, +@@ -12,10 +12,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +@@ -26,11 +22,10 @@ + + *******************************************************************************/ + +-#include +-#include +-#include +- +-#include "ixgbe.h" ++#include "ixgbe_type.h" ++#include "ixgbe_82598.h" ++#include "ixgbe_api.h" ++#include "ixgbe_common.h" + #include "ixgbe_phy.h" + + #define IXGBE_82598_MAX_TX_QUEUES 32 +@@ -38,14 +33,30 @@ + #define IXGBE_82598_RAR_ENTRIES 16 + #define IXGBE_82598_MC_TBL_SIZE 128 + #define IXGBE_82598_VFT_TBL_SIZE 128 +-#define IXGBE_82598_RX_PB_SIZE 512 ++#define IXGBE_82598_RX_PB_SIZE 512 + +-static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, ++STATIC s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, ++ ixgbe_link_speed *speed, ++ bool *autoneg); ++STATIC enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw); ++STATIC s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, ++ bool autoneg_wait_to_complete); ++STATIC s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, ++ ixgbe_link_speed *speed, bool *link_up, ++ bool link_up_wait_to_complete); ++STATIC s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, ++ ixgbe_link_speed speed, ++ bool autoneg_wait_to_complete); ++STATIC s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +-static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, +- u8 *eeprom_data); +- ++STATIC s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw); ++STATIC s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq); ++STATIC s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw); ++STATIC void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, ++ u32 headroom, int strategy); ++STATIC s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset, ++ u8 *sff8472_data); + /** + * ixgbe_set_pcie_completion_timeout - set pci-e completion timeout + * @hw: pointer to the HW structure +@@ -56,14 +67,11 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, + * increase the value to either 10ms to 250ms for capability version 1 config, + * or 16ms to 55ms for version 2. + **/ +-static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw) ++void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw) + { + u32 gcr = IXGBE_READ_REG(hw, IXGBE_GCR); + u16 pcie_devctl2; + +- if (ixgbe_removed(hw->hw_addr)) +- return; +- + /* only take action if timeout value is defaulted to 0 */ + if (gcr & IXGBE_GCR_CMPL_TMOUT_MASK) + goto out; +@@ -82,31 +90,82 @@ static void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw) + * directly in order to set the completion timeout value for + * 16ms to 55ms + */ +- pcie_devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2); ++ pcie_devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2); + pcie_devctl2 |= IXGBE_PCI_DEVICE_CONTROL2_16ms; +- ixgbe_write_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2); ++ IXGBE_WRITE_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2, pcie_devctl2); + out: + /* disable completion timeout resend */ + gcr &= ~IXGBE_GCR_CMPL_TMOUT_RESEND; + IXGBE_WRITE_REG(hw, IXGBE_GCR, gcr); + } + +-static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) ++/** ++ * ixgbe_init_ops_82598 - Inits func ptrs and MAC type ++ * @hw: pointer to hardware structure ++ * ++ * Initialize the function pointers and assign the MAC type for 82598. ++ * Does not touch the hardware. ++ **/ ++s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw) + { + struct ixgbe_mac_info *mac = &hw->mac; ++ struct ixgbe_phy_info *phy = &hw->phy; ++ s32 ret_val; ++ ++ DEBUGFUNC("ixgbe_init_ops_82598"); ++ ++ ret_val = ixgbe_init_phy_ops_generic(hw); ++ ret_val = ixgbe_init_ops_generic(hw); ++ ++ /* PHY */ ++ phy->ops.init = ixgbe_init_phy_ops_82598; ++ ++ /* MAC */ ++ mac->ops.start_hw = ixgbe_start_hw_82598; ++ mac->ops.reset_hw = ixgbe_reset_hw_82598; ++ mac->ops.get_media_type = ixgbe_get_media_type_82598; ++ mac->ops.get_supported_physical_layer = ++ ixgbe_get_supported_physical_layer_82598; ++ mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82598; ++ mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82598; ++ mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie_82598; ++ mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82598; ++ ++ /* RAR, Multicast, VLAN */ ++ mac->ops.set_vmdq = ixgbe_set_vmdq_82598; ++ mac->ops.clear_vmdq = ixgbe_clear_vmdq_82598; ++ mac->ops.set_vfta = ixgbe_set_vfta_82598; ++ mac->ops.set_vlvf = NULL; ++ mac->ops.clear_vfta = ixgbe_clear_vfta_82598; ++ ++ /* Flow Control */ ++ mac->ops.fc_enable = ixgbe_fc_enable_82598; ++ ++ mac->mcft_size = IXGBE_82598_MC_TBL_SIZE; ++ mac->vft_size = IXGBE_82598_VFT_TBL_SIZE; ++ mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES; ++ mac->rx_pb_size = IXGBE_82598_RX_PB_SIZE; ++ mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES; ++ mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES; ++ mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); ++ ++ /* SFP+ Module */ ++ phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_82598; ++ phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_82598; ++ ++ /* Link */ ++ mac->ops.check_link = ixgbe_check_mac_link_82598; ++ mac->ops.setup_link = ixgbe_setup_mac_link_82598; ++ mac->ops.flap_tx_laser = NULL; ++ mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82598; ++ mac->ops.setup_rxpba = ixgbe_set_rxpba_82598; ++ ++ /* Manageability interface */ ++ mac->ops.set_fw_drv_ver = NULL; ++ ++ mac->ops.get_rtrup2tc = NULL; + +- /* Call PHY identify routine to get the phy type */ +- ixgbe_identify_phy_generic(hw); +- +- mac->mcft_size = IXGBE_82598_MC_TBL_SIZE; +- mac->vft_size = IXGBE_82598_VFT_TBL_SIZE; +- mac->num_rar_entries = IXGBE_82598_RAR_ENTRIES; +- mac->rx_pb_size = IXGBE_82598_RX_PB_SIZE; +- mac->max_rx_queues = IXGBE_82598_MAX_RX_QUEUES; +- mac->max_tx_queues = IXGBE_82598_MAX_TX_QUEUES; +- mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); +- +- return 0; ++ return ret_val; + } + + /** +@@ -114,40 +173,42 @@ static s32 ixgbe_get_invariants_82598(struct ixgbe_hw *hw) + * @hw: pointer to hardware structure + * + * Initialize any function pointers that were not able to be +- * set during get_invariants because the PHY/SFP type was ++ * set during init_shared_code because the PHY/SFP type was + * not known. Perform the SFP init if necessary. + * + **/ +-static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) ++s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) + { + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_phy_info *phy = &hw->phy; +- s32 ret_val = 0; ++ s32 ret_val = IXGBE_SUCCESS; + u16 list_offset, data_offset; + ++ DEBUGFUNC("ixgbe_init_phy_ops_82598"); ++ + /* Identify the PHY */ + phy->ops.identify(hw); + + /* Overwrite the link function pointers if copper PHY */ + if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { +- mac->ops.setup_link = &ixgbe_setup_copper_link_82598; ++ mac->ops.setup_link = ixgbe_setup_copper_link_82598; + mac->ops.get_link_capabilities = +- &ixgbe_get_copper_link_capabilities_generic; ++ ixgbe_get_copper_link_capabilities_generic; + } + + switch (hw->phy.type) { + case ixgbe_phy_tn: +- phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; +- phy->ops.check_link = &ixgbe_check_phy_link_tnx; ++ phy->ops.setup_link = ixgbe_setup_phy_link_tnx; ++ phy->ops.check_link = ixgbe_check_phy_link_tnx; + phy->ops.get_firmware_version = +- &ixgbe_get_phy_firmware_version_tnx; ++ ixgbe_get_phy_firmware_version_tnx; + break; + case ixgbe_phy_nl: +- phy->ops.reset = &ixgbe_reset_phy_nl; ++ phy->ops.reset = ixgbe_reset_phy_nl; + + /* Call SFP+ identify routine to get the SFP+ module type */ + ret_val = phy->ops.identify_sfp(hw); +- if (ret_val != 0) ++ if (ret_val != IXGBE_SUCCESS) + goto out; + else if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) { + ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; +@@ -156,9 +217,9 @@ static s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw) + + /* Check to see if SFP+ module is supported */ + ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, +- &list_offset, +- &data_offset); +- if (ret_val != 0) { ++ &list_offset, ++ &data_offset); ++ if (ret_val != IXGBE_SUCCESS) { + ret_val = IXGBE_ERR_SFP_NOT_SUPPORTED; + goto out; + } +@@ -179,13 +240,17 @@ out: + * Disables relaxed ordering Then set pcie completion timeout + * + **/ +-static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) ++s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) + { + u32 regval; + u32 i; +- s32 ret_val = 0; ++ s32 ret_val = IXGBE_SUCCESS; ++ ++ DEBUGFUNC("ixgbe_start_hw_82598"); + + ret_val = ixgbe_start_hw_generic(hw); ++ if (ret_val) ++ return ret_val; + + /* Disable relaxed ordering */ + for (i = 0; ((i < hw->mac.max_tx_queues) && +@@ -204,8 +269,7 @@ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) + } + + /* set the completion timeout for interface */ +- if (ret_val == 0) +- ixgbe_set_pcie_completion_timeout(hw); ++ ixgbe_set_pcie_completion_timeout(hw); + + return ret_val; + } +@@ -218,13 +282,15 @@ static s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw) + * + * Determines the link capabilities by reading the AUTOC register. + **/ +-static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, ++STATIC s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg) + { +- s32 status = 0; ++ s32 status = IXGBE_SUCCESS; + u32 autoc = 0; + ++ DEBUGFUNC("ixgbe_get_link_capabilities_82598"); ++ + /* + * Determine link capabilities based on the stored value of AUTOC, + * which represents EEPROM defaults. If AUTOC value has not been +@@ -275,10 +341,12 @@ static s32 ixgbe_get_link_capabilities_82598(struct ixgbe_hw *hw, + * + * Returns the media type (fiber, copper, backplane) + **/ +-static enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) ++STATIC enum ixgbe_media_type ixgbe_get_media_type_82598(struct ixgbe_hw *hw) + { + enum ixgbe_media_type media_type; + ++ DEBUGFUNC("ixgbe_get_media_type_82598"); ++ + /* Detect if there is a copper PHY attached. */ + switch (hw->phy.type) { + case ixgbe_phy_cu_unknown: +@@ -326,9 +394,9 @@ out: + * + * Enable flow control according to the current settings. + **/ +-static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) ++s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) + { +- s32 ret_val = 0; ++ s32 ret_val = IXGBE_SUCCESS; + u32 fctrl_reg; + u32 rmcs_reg; + u32 reg; +@@ -337,6 +405,8 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) + int i; + bool link_up; + ++ DEBUGFUNC("ixgbe_fc_enable_82598"); ++ + /* Validate the water mark configuration */ + if (!hw->fc.pause_time) { + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; +@@ -344,12 +414,12 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) + } + + /* Low water mark of zero causes XOFF floods */ +- for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { ++ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && + hw->fc.high_water[i]) { + if (!hw->fc.low_water[i] || + hw->fc.low_water[i] >= hw->fc.high_water[i]) { +- hw_dbg(hw, "Invalid water mark configuration\n"); ++ DEBUGOUT("Invalid water mark configuration\n"); + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } +@@ -427,7 +497,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) + rmcs_reg |= IXGBE_RMCS_TFCE_802_3X; + break; + default: +- hw_dbg(hw, "Flow control param set incorrectly\n"); ++ DEBUGOUT("Flow control param set incorrectly\n"); + ret_val = IXGBE_ERR_CONFIG; + goto out; + break; +@@ -439,7 +509,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) + IXGBE_WRITE_REG(hw, IXGBE_RMCS, rmcs_reg); + + /* Set up and enable Rx high/low water mark thresholds, enable XON. */ +- for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { ++ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && + hw->fc.high_water[i]) { + fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; +@@ -455,7 +525,7 @@ static s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw) + + /* Configure pause time (2 TCs per register) */ + reg = hw->fc.pause_time * 0x00010001; +- for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) ++ for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); + + /* Configure flow control refresh threshold value */ +@@ -472,13 +542,15 @@ out: + * Configures link settings based on values in the ixgbe_hw struct. + * Restarts the link. Performs autonegotiation if needed. + **/ +-static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, ++STATIC s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, + bool autoneg_wait_to_complete) + { + u32 autoc_reg; + u32 links_reg; + u32 i; +- s32 status = 0; ++ s32 status = IXGBE_SUCCESS; ++ ++ DEBUGFUNC("ixgbe_start_mac_link_82598"); + + /* Restart link */ + autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); +@@ -496,17 +568,17 @@ static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + if (links_reg & IXGBE_LINKS_KX_AN_COMP) + break; +- msleep(100); ++ msec_delay(100); + } + if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { + status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; +- hw_dbg(hw, "Autonegotiation did not complete.\n"); ++ DEBUGOUT("Autonegotiation did not complete.\n"); + } + } + } + + /* Add delay to filter out noises during initial link setup */ +- msleep(50); ++ msec_delay(50); + + return status; + } +@@ -518,31 +590,32 @@ static s32 ixgbe_start_mac_link_82598(struct ixgbe_hw *hw, + * Function indicates success when phy link is available. If phy is not ready + * within 5 seconds of MAC indicating link, the function returns error. + **/ +-static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw) ++STATIC s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw) + { + u32 timeout; + u16 an_reg; + + if (hw->device_id != IXGBE_DEV_ID_82598AT2) +- return 0; ++ return IXGBE_SUCCESS; + + for (timeout = 0; + timeout < IXGBE_VALIDATE_LINK_READY_TIMEOUT; timeout++) { +- hw->phy.ops.read_reg(hw, MDIO_STAT1, MDIO_MMD_AN, &an_reg); ++ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &an_reg); + +- if ((an_reg & MDIO_AN_STAT1_COMPLETE) && +- (an_reg & MDIO_STAT1_LSTATUS)) ++ if ((an_reg & IXGBE_MII_AUTONEG_COMPLETE) && ++ (an_reg & IXGBE_MII_AUTONEG_LINK_UP)) + break; + +- msleep(100); ++ msec_delay(100); + } + + if (timeout == IXGBE_VALIDATE_LINK_READY_TIMEOUT) { +- hw_dbg(hw, "Link was indicated but link is down\n"); ++ DEBUGOUT("Link was indicated but link is down\n"); + return IXGBE_ERR_LINK_SETUP; + } + +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +@@ -554,7 +627,7 @@ static s32 ixgbe_validate_link_ready(struct ixgbe_hw *hw) + * + * Reads the links register to determine if link is up and the current speed + **/ +-static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, ++STATIC s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, bool *link_up, + bool link_up_wait_to_complete) + { +@@ -562,19 +635,21 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, + u32 i; + u16 link_reg, adapt_comp_reg; + ++ DEBUGFUNC("ixgbe_check_mac_link_82598"); ++ + /* +- * SERDES PHY requires us to read link status from register 0xC79F. +- * Bit 0 set indicates link is up/ready; clear indicates link down. +- * 0xC00C is read to check that the XAUI lanes are active. Bit 0 +- * clear indicates active; set indicates inactive. ++ * SERDES PHY requires us to read link status from undocumented ++ * register 0xC79F. Bit 0 set indicates link is up/ready; clear ++ * indicates link down. OxC00C is read to check that the XAUI lanes ++ * are active. Bit 0 clear indicates active; set indicates inactive. + */ + if (hw->phy.type == ixgbe_phy_nl) { +- hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg); +- hw->phy.ops.read_reg(hw, 0xC79F, MDIO_MMD_PMAPMD, &link_reg); +- hw->phy.ops.read_reg(hw, 0xC00C, MDIO_MMD_PMAPMD, ++ hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg); ++ hw->phy.ops.read_reg(hw, 0xC79F, IXGBE_TWINAX_DEV, &link_reg); ++ hw->phy.ops.read_reg(hw, 0xC00C, IXGBE_TWINAX_DEV, + &adapt_comp_reg); + if (link_up_wait_to_complete) { +- for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { ++ for (i = 0; i < hw->mac.max_link_up_time; i++) { + if ((link_reg & 1) && + ((adapt_comp_reg & 1) == 0)) { + *link_up = true; +@@ -582,12 +657,12 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, + } else { + *link_up = false; + } +- msleep(100); ++ msec_delay(100); + hw->phy.ops.read_reg(hw, 0xC79F, +- MDIO_MMD_PMAPMD, ++ IXGBE_TWINAX_DEV, + &link_reg); + hw->phy.ops.read_reg(hw, 0xC00C, +- MDIO_MMD_PMAPMD, ++ IXGBE_TWINAX_DEV, + &adapt_comp_reg); + } + } else { +@@ -597,20 +672,20 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, + *link_up = false; + } + +- if (!*link_up) ++ if (*link_up == false) + goto out; + } + + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + if (link_up_wait_to_complete) { +- for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { ++ for (i = 0; i < hw->mac.max_link_up_time; i++) { + if (links_reg & IXGBE_LINKS_UP) { + *link_up = true; + break; + } else { + *link_up = false; + } +- msleep(100); ++ msec_delay(100); + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + } + } else { +@@ -625,12 +700,12 @@ static s32 ixgbe_check_mac_link_82598(struct ixgbe_hw *hw, + else + *speed = IXGBE_LINK_SPEED_1GB_FULL; + +- if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && *link_up && +- (ixgbe_validate_link_ready(hw) != 0)) ++ if ((hw->device_id == IXGBE_DEV_ID_82598AT2) && (*link_up == true) && ++ (ixgbe_validate_link_ready(hw) != IXGBE_SUCCESS)) + *link_up = false; + + out: +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +@@ -641,19 +716,21 @@ out: + * + * Set the link speed in the AUTOC register and restarts link. + **/ +-static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, ++STATIC s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) + { +- bool autoneg = false; +- s32 status = 0; ++ bool autoneg = false; ++ s32 status = IXGBE_SUCCESS; + ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; +- u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); +- u32 autoc = curr_autoc; +- u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; ++ u32 curr_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); ++ u32 autoc = curr_autoc; ++ u32 link_mode = autoc & IXGBE_AUTOC_LMS_MASK; ++ ++ DEBUGFUNC("ixgbe_setup_mac_link_82598"); + + /* Check to see if speed passed in is supported. */ +- ixgbe_get_link_capabilities_82598(hw, &link_capabilities, &autoneg); ++ ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg); + speed &= link_capabilities; + + if (speed == IXGBE_LINK_SPEED_UNKNOWN) +@@ -671,7 +748,7 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, autoc); + } + +- if (status == 0) { ++ if (status == IXGBE_SUCCESS) { + /* + * Setup and restart the link based on the new values in + * ixgbe_hw This will write the AUTOC register based on the new +@@ -693,12 +770,14 @@ static s32 ixgbe_setup_mac_link_82598(struct ixgbe_hw *hw, + * + * Sets the link speed in the AUTOC register in the MAC and restarts link. + **/ +-static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, +- ixgbe_link_speed speed, +- bool autoneg_wait_to_complete) ++STATIC s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, ++ ixgbe_link_speed speed, ++ bool autoneg_wait_to_complete) + { + s32 status; + ++ DEBUGFUNC("ixgbe_setup_copper_link_82598"); ++ + /* Setup the PHY according to input speed */ + status = hw->phy.ops.setup_link_speed(hw, speed, + autoneg_wait_to_complete); +@@ -716,19 +795,21 @@ static s32 ixgbe_setup_copper_link_82598(struct ixgbe_hw *hw, + * clears all interrupts, performing a PHY reset, and performing a link (MAC) + * reset. + **/ +-static s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) ++STATIC s32 ixgbe_reset_hw_82598(struct ixgbe_hw *hw) + { +- s32 status = 0; +- s32 phy_status = 0; ++ s32 status = IXGBE_SUCCESS; ++ s32 phy_status = IXGBE_SUCCESS; + u32 ctrl; + u32 gheccr; + u32 i; + u32 autoc; + u8 analog_val; + ++ DEBUGFUNC("ixgbe_reset_hw_82598"); ++ + /* Call adapter stop to disable tx/rx and clear interrupts */ + status = hw->mac.ops.stop_adapter(hw); +- if (status != 0) ++ if (status != IXGBE_SUCCESS) + goto reset_hw_out; + + /* +@@ -789,17 +870,17 @@ mac_reset_top: + + /* Poll for reset bit to self-clear indicating reset is complete */ + for (i = 0; i < 10; i++) { +- udelay(1); ++ usec_delay(1); + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + if (!(ctrl & IXGBE_CTRL_RST)) + break; + } + if (ctrl & IXGBE_CTRL_RST) { + status = IXGBE_ERR_RESET_FAILED; +- hw_dbg(hw, "Reset polling failed to complete.\n"); ++ DEBUGOUT("Reset polling failed to complete.\n"); + } + +- msleep(50); ++ msec_delay(50); + + /* + * Double resets are required for recovery from certain error +@@ -838,7 +919,7 @@ mac_reset_top: + hw->mac.ops.init_rx_addrs(hw); + + reset_hw_out: +- if (phy_status) ++ if (phy_status != IXGBE_SUCCESS) + status = phy_status; + + return status; +@@ -850,14 +931,16 @@ reset_hw_out: + * @rar: receive address register index to associate with a VMDq index + * @vmdq: VMDq set index + **/ +-static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) ++s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) + { + u32 rar_high; + u32 rar_entries = hw->mac.num_rar_entries; + ++ DEBUGFUNC("ixgbe_set_vmdq_82598"); ++ + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { +- hw_dbg(hw, "RAR index %d is out of range.\n", rar); ++ DEBUGOUT1("RAR index %d is out of range.\n", rar); + return IXGBE_ERR_INVALID_ARGUMENT; + } + +@@ -865,7 +948,7 @@ static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) + rar_high &= ~IXGBE_RAH_VIND_MASK; + rar_high |= ((vmdq << IXGBE_RAH_VIND_SHIFT) & IXGBE_RAH_VIND_MASK); + IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +@@ -874,15 +957,16 @@ static s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) + * @rar: receive address register index to associate with a VMDq index + * @vmdq: VMDq clear index (not used in 82598, but elsewhere) + **/ +-static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) ++STATIC s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) + { + u32 rar_high; + u32 rar_entries = hw->mac.num_rar_entries; + ++ UNREFERENCED_1PARAMETER(vmdq); + + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { +- hw_dbg(hw, "RAR index %d is out of range.\n", rar); ++ DEBUGOUT1("RAR index %d is out of range.\n", rar); + return IXGBE_ERR_INVALID_ARGUMENT; + } + +@@ -892,7 +976,7 @@ static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) + IXGBE_WRITE_REG(hw, IXGBE_RAH(rar), rar_high); + } + +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +@@ -901,17 +985,22 @@ static s32 ixgbe_clear_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq) + * @vlan: VLAN id to write to VLAN filter + * @vind: VMDq output index that maps queue to VLAN id in VFTA + * @vlan_on: boolean flag to turn on/off VLAN in VFTA ++ * @vlvf_bypass: boolean flag - unused + * + * Turn on/off specified VLAN in the VLAN filter table. + **/ +-static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, +- bool vlan_on) ++s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, ++ bool vlan_on, bool vlvf_bypass) + { + u32 regindex; + u32 bitindex; + u32 bits; + u32 vftabyte; + ++ UNREFERENCED_1PARAMETER(vlvf_bypass); ++ ++ DEBUGFUNC("ixgbe_set_vfta_82598"); ++ + if (vlan > 4095) + return IXGBE_ERR_PARAM; + +@@ -940,7 +1029,7 @@ static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, + bits &= ~(1 << bitindex); + IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), bits); + +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +@@ -949,11 +1038,13 @@ static s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, + * + * Clears the VLAN filer table, and the VMDq index associated with the filter + **/ +-static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw) ++STATIC s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw) + { + u32 offset; + u32 vlanbyte; + ++ DEBUGFUNC("ixgbe_clear_vfta_82598"); ++ + for (offset = 0; offset < hw->mac.vft_size; offset++) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); + +@@ -962,7 +1053,7 @@ static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw) + IXGBE_WRITE_REG(hw, IXGBE_VFTAVIND(vlanbyte, offset), + 0); + +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +@@ -973,18 +1064,20 @@ static s32 ixgbe_clear_vfta_82598(struct ixgbe_hw *hw) + * + * Performs read operation to Atlas analog register specified. + **/ +-static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val) ++s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val) + { + u32 atlas_ctl; + ++ DEBUGFUNC("ixgbe_read_analog_reg8_82598"); ++ + IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, + IXGBE_ATLASCTL_WRITE_CMD | (reg << 8)); + IXGBE_WRITE_FLUSH(hw); +- udelay(10); ++ usec_delay(10); + atlas_ctl = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); + *val = (u8)atlas_ctl; + +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +@@ -995,16 +1088,18 @@ static s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val) + * + * Performs write operation to Atlas analog register specified. + **/ +-static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val) ++s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val) + { + u32 atlas_ctl; + ++ DEBUGFUNC("ixgbe_write_analog_reg8_82598"); ++ + atlas_ctl = (reg << 8) | val; + IXGBE_WRITE_REG(hw, IXGBE_ATLASCTL, atlas_ctl); + IXGBE_WRITE_FLUSH(hw); +- udelay(10); ++ usec_delay(10); + +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +@@ -1014,60 +1109,62 @@ static s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val) + * @byte_offset: byte offset to read from dev_addr + * @eeprom_data: value read + * +- * Performs 8 byte read operation to SFP module's data over I2C interface. ++ * Performs 8 byte read operation to SFP module's EEPROM over I2C interface. + **/ +-static s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr, ++STATIC s32 ixgbe_read_i2c_phy_82598(struct ixgbe_hw *hw, u8 dev_addr, + u8 byte_offset, u8 *eeprom_data) + { +- s32 status = 0; ++ s32 status = IXGBE_SUCCESS; + u16 sfp_addr = 0; + u16 sfp_data = 0; + u16 sfp_stat = 0; + u16 gssr; + u32 i; + ++ DEBUGFUNC("ixgbe_read_i2c_phy_82598"); ++ + if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) + gssr = IXGBE_GSSR_PHY1_SM; + else + gssr = IXGBE_GSSR_PHY0_SM; + +- if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != 0) ++ if (hw->mac.ops.acquire_swfw_sync(hw, gssr) != IXGBE_SUCCESS) + return IXGBE_ERR_SWFW_SYNC; + + if (hw->phy.type == ixgbe_phy_nl) { + /* +- * phy SDA/SCL registers are at addresses 0xC30A to +- * 0xC30D. These registers are used to talk to the SFP+ ++ * NetLogic phy SDA/SCL registers are at addresses 0xC30A to ++ * 0xC30D. These registers are used to talk to the SFP+ + * module's EEPROM through the SDA/SCL (I2C) interface. + */ + sfp_addr = (dev_addr << 8) + byte_offset; + sfp_addr = (sfp_addr | IXGBE_I2C_EEPROM_READ_MASK); + hw->phy.ops.write_reg_mdi(hw, + IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR, +- MDIO_MMD_PMAPMD, ++ IXGBE_MDIO_PMA_PMD_DEV_TYPE, + sfp_addr); + + /* Poll status */ + for (i = 0; i < 100; i++) { + hw->phy.ops.read_reg_mdi(hw, + IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT, +- MDIO_MMD_PMAPMD, ++ IXGBE_MDIO_PMA_PMD_DEV_TYPE, + &sfp_stat); + sfp_stat = sfp_stat & IXGBE_I2C_EEPROM_STATUS_MASK; + if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS) + break; +- usleep_range(10000, 20000); ++ msec_delay(10); + } + + if (sfp_stat != IXGBE_I2C_EEPROM_STATUS_PASS) { +- hw_dbg(hw, "EEPROM read did not pass.\n"); ++ DEBUGOUT("EEPROM read did not pass.\n"); + status = IXGBE_ERR_SFP_NOT_PRESENT; + goto out; + } + + /* Read data */ + hw->phy.ops.read_reg_mdi(hw, IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA, +- MDIO_MMD_PMAPMD, &sfp_data); ++ IXGBE_MDIO_PMA_PMD_DEV_TYPE, &sfp_data); + + *eeprom_data = (u8)(sfp_data >> 8); + } else { +@@ -1087,8 +1184,8 @@ out: + * + * Performs 8 byte read operation to SFP module's EEPROM over I2C interface. + **/ +-static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, +- u8 *eeprom_data) ++s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, ++ u8 *eeprom_data) + { + return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR, + byte_offset, eeprom_data); +@@ -1102,8 +1199,8 @@ static s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, + * + * Performs 8 byte read operation to SFP module's SFF-8472 data over I2C + **/ +-static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset, +- u8 *sff8472_data) ++STATIC s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset, ++ u8 *sff8472_data) + { + return ixgbe_read_i2c_phy_82598(hw, IXGBE_I2C_EEPROM_DEV_ADDR2, + byte_offset, sff8472_data); +@@ -1115,14 +1212,16 @@ static s32 ixgbe_read_i2c_sff8472_82598(struct ixgbe_hw *hw, u8 byte_offset, + * + * Determines physical layer capabilities of the current configuration. + **/ +-static u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw) ++u64 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw) + { +- u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; ++ u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + u32 pma_pmd_10g = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; + u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; + u16 ext_ability = 0; + ++ DEBUGFUNC("ixgbe_get_supported_physical_layer_82598"); ++ + hw->phy.ops.identify(hw); + + /* Copper PHY must be checked before AUTOC LMS to determine correct +@@ -1130,13 +1229,13 @@ static u32 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw) + switch (hw->phy.type) { + case ixgbe_phy_tn: + case ixgbe_phy_cu_unknown: +- hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, +- MDIO_MMD_PMAPMD, &ext_ability); +- if (ext_ability & MDIO_PMA_EXTABLE_10GBT) ++ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, ++ IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); ++ if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; +- if (ext_ability & MDIO_PMA_EXTABLE_1000BT) ++ if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; +- if (ext_ability & MDIO_PMA_EXTABLE_100BTX) ++ if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; + goto out; + default: +@@ -1217,12 +1316,14 @@ out: + * Calls common function and corrects issue with some single port devices + * that enable LAN1 but not LAN0. + **/ +-static void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw) ++void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw) + { + struct ixgbe_bus_info *bus = &hw->bus; + u16 pci_gen = 0; + u16 pci_ctrl2 = 0; + ++ DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie_82598"); ++ + ixgbe_set_lan_id_multi_port_pcie(hw); + + /* check if LAN0 is disabled */ +@@ -1248,11 +1349,12 @@ static void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw) + * @headroom: reserve n KB of headroom + * @strategy: packet buffer allocation strategy + **/ +-static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, ++STATIC void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, + u32 headroom, int strategy) + { + u32 rxpktsize = IXGBE_RXPBSIZE_64KB; +- u8 i = 0; ++ u8 i = 0; ++ UNREFERENCED_1PARAMETER(headroom); + + if (!num_pb) + return; +@@ -1280,79 +1382,18 @@ static void ixgbe_set_rxpba_82598(struct ixgbe_hw *hw, int num_pb, + IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), IXGBE_TXPBSIZE_40KB); + } + +-static struct ixgbe_mac_operations mac_ops_82598 = { +- .init_hw = &ixgbe_init_hw_generic, +- .reset_hw = &ixgbe_reset_hw_82598, +- .start_hw = &ixgbe_start_hw_82598, +- .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, +- .get_media_type = &ixgbe_get_media_type_82598, +- .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82598, +- .enable_rx_dma = &ixgbe_enable_rx_dma_generic, +- .get_mac_addr = &ixgbe_get_mac_addr_generic, +- .stop_adapter = &ixgbe_stop_adapter_generic, +- .get_bus_info = &ixgbe_get_bus_info_generic, +- .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie_82598, +- .read_analog_reg8 = &ixgbe_read_analog_reg8_82598, +- .write_analog_reg8 = &ixgbe_write_analog_reg8_82598, +- .setup_link = &ixgbe_setup_mac_link_82598, +- .set_rxpba = &ixgbe_set_rxpba_82598, +- .check_link = &ixgbe_check_mac_link_82598, +- .get_link_capabilities = &ixgbe_get_link_capabilities_82598, +- .led_on = &ixgbe_led_on_generic, +- .led_off = &ixgbe_led_off_generic, +- .blink_led_start = &ixgbe_blink_led_start_generic, +- .blink_led_stop = &ixgbe_blink_led_stop_generic, +- .set_rar = &ixgbe_set_rar_generic, +- .clear_rar = &ixgbe_clear_rar_generic, +- .set_vmdq = &ixgbe_set_vmdq_82598, +- .clear_vmdq = &ixgbe_clear_vmdq_82598, +- .init_rx_addrs = &ixgbe_init_rx_addrs_generic, +- .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, +- .enable_mc = &ixgbe_enable_mc_generic, +- .disable_mc = &ixgbe_disable_mc_generic, +- .clear_vfta = &ixgbe_clear_vfta_82598, +- .set_vfta = &ixgbe_set_vfta_82598, +- .fc_enable = &ixgbe_fc_enable_82598, +- .set_fw_drv_ver = NULL, +- .acquire_swfw_sync = &ixgbe_acquire_swfw_sync, +- .release_swfw_sync = &ixgbe_release_swfw_sync, +- .get_thermal_sensor_data = NULL, +- .init_thermal_sensor_thresh = NULL, +- .prot_autoc_read = &prot_autoc_read_generic, +- .prot_autoc_write = &prot_autoc_write_generic, +-}; +- +-static struct ixgbe_eeprom_operations eeprom_ops_82598 = { +- .init_params = &ixgbe_init_eeprom_params_generic, +- .read = &ixgbe_read_eerd_generic, +- .write = &ixgbe_write_eeprom_generic, +- .write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic, +- .read_buffer = &ixgbe_read_eerd_buffer_generic, +- .calc_checksum = &ixgbe_calc_eeprom_checksum_generic, +- .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, +- .update_checksum = &ixgbe_update_eeprom_checksum_generic, +-}; +- +-static struct ixgbe_phy_operations phy_ops_82598 = { +- .identify = &ixgbe_identify_phy_generic, +- .identify_sfp = &ixgbe_identify_module_generic, +- .init = &ixgbe_init_phy_ops_82598, +- .reset = &ixgbe_reset_phy_generic, +- .read_reg = &ixgbe_read_phy_reg_generic, +- .write_reg = &ixgbe_write_phy_reg_generic, +- .read_reg_mdi = &ixgbe_read_phy_reg_mdi, +- .write_reg_mdi = &ixgbe_write_phy_reg_mdi, +- .setup_link = &ixgbe_setup_phy_link_generic, +- .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, +- .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_82598, +- .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_82598, +- .check_overtemp = &ixgbe_tn_check_overtemp, +-}; +- +-struct ixgbe_info ixgbe_82598_info = { +- .mac = ixgbe_mac_82598EB, +- .get_invariants = &ixgbe_get_invariants_82598, +- .mac_ops = &mac_ops_82598, +- .eeprom_ops = &eeprom_ops_82598, +- .phy_ops = &phy_ops_82598, +-}; ++/** ++ * ixgbe_enable_rx_dma_82598 - Enable the Rx DMA unit ++ * @hw: pointer to hardware structure ++ * @regval: register value to write to RXCTRL ++ * ++ * Enables the Rx DMA unit ++ **/ ++s32 ixgbe_enable_rx_dma_82598(struct ixgbe_hw *hw, u32 regval) ++{ ++ DEBUGFUNC("ixgbe_enable_rx_dma_82598"); ++ ++ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); ++ ++ return IXGBE_SUCCESS; ++} +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.h +new file mode 100644 +index 0000000..1e0c15a +--- /dev/null ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82598.h +@@ -0,0 +1,43 @@ ++/******************************************************************************* ++ ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#ifndef _IXGBE_82598_H_ ++#define _IXGBE_82598_H_ ++ ++u32 ixgbe_get_pcie_msix_count_82598(struct ixgbe_hw *hw); ++s32 ixgbe_fc_enable_82598(struct ixgbe_hw *hw); ++s32 ixgbe_start_hw_82598(struct ixgbe_hw *hw); ++s32 ixgbe_set_vmdq_82598(struct ixgbe_hw *hw, u32 rar, u32 vmdq); ++s32 ixgbe_set_vfta_82598(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on, ++ bool vlvf_bypass); ++s32 ixgbe_read_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 *val); ++s32 ixgbe_write_analog_reg8_82598(struct ixgbe_hw *hw, u32 reg, u8 val); ++s32 ixgbe_read_i2c_eeprom_82598(struct ixgbe_hw *hw, u8 byte_offset, ++ u8 *eeprom_data); ++u64 ixgbe_get_supported_physical_layer_82598(struct ixgbe_hw *hw); ++s32 ixgbe_init_phy_ops_82598(struct ixgbe_hw *hw); ++void ixgbe_set_lan_id_multi_port_pcie_82598(struct ixgbe_hw *hw); ++void ixgbe_set_pcie_completion_timeout(struct ixgbe_hw *hw); ++s32 ixgbe_enable_rx_dma_82598(struct ixgbe_hw *hw, u32 regval); ++#endif /* _IXGBE_82598_H_ */ +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +index bc7c924..0164233 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.c +@@ -1,7 +1,7 @@ + /******************************************************************************* + +- Intel 10 Gigabit PCI Express Linux driver +- Copyright(c) 1999 - 2014 Intel Corporation. ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, +@@ -12,10 +12,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +@@ -26,13 +22,11 @@ + + *******************************************************************************/ + +-#include +-#include +-#include +- +-#include "ixgbe.h" ++#include "ixgbe_type.h" ++#include "ixgbe_82599.h" ++#include "ixgbe_api.h" ++#include "ixgbe_common.h" + #include "ixgbe_phy.h" +-#include "ixgbe_mbx.h" + + #define IXGBE_82599_MAX_TX_QUEUES 128 + #define IXGBE_82599_MAX_RX_QUEUES 128 +@@ -41,65 +35,37 @@ + #define IXGBE_82599_VFT_TBL_SIZE 128 + #define IXGBE_82599_RX_PB_SIZE 512 + +-static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); +-static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); +-static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); +-static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, +- ixgbe_link_speed speed, +- bool autoneg_wait_to_complete); +-static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, +- ixgbe_link_speed speed, +- bool autoneg_wait_to_complete); +-static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw); +-static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, +- bool autoneg_wait_to_complete); +-static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, +- ixgbe_link_speed speed, +- bool autoneg_wait_to_complete); +-static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, ++STATIC s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete); +-static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); +-static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, +- u8 dev_addr, u8 *data); +-static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, +- u8 dev_addr, u8 data); +-static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw); +-static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw); +- +-bool ixgbe_mng_enabled(struct ixgbe_hw *hw) +-{ +- u32 fwsm, manc, factps; +- +- fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); +- if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) +- return false; +- +- manc = IXGBE_READ_REG(hw, IXGBE_MANC); +- if (!(manc & IXGBE_MANC_RCV_TCO_EN)) +- return false; +- +- factps = IXGBE_READ_REG(hw, IXGBE_FACTPS); +- if (factps & IXGBE_FACTPS_MNGCG) +- return false; +- +- return true; +-} +- +-static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) ++STATIC s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw); ++STATIC s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, ++ u16 offset, u16 *data); ++STATIC s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, ++ u16 words, u16 *data); ++STATIC s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, ++ u8 dev_addr, u8 *data); ++STATIC s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, ++ u8 dev_addr, u8 data); ++ ++void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) + { + struct ixgbe_mac_info *mac = &hw->mac; + +- /* enable the laser control functions for SFP+ fiber ++ DEBUGFUNC("ixgbe_init_mac_link_ops_82599"); ++ ++ /* ++ * enable the laser control functions for SFP+ fiber + * and MNG not enabled + */ + if ((mac->ops.get_media_type(hw) == ixgbe_media_type_fiber) && + !ixgbe_mng_enabled(hw)) { + mac->ops.disable_tx_laser = +- &ixgbe_disable_tx_laser_multispeed_fiber; ++ ixgbe_disable_tx_laser_multispeed_fiber; + mac->ops.enable_tx_laser = +- &ixgbe_enable_tx_laser_multispeed_fiber; +- mac->ops.flap_tx_laser = &ixgbe_flap_tx_laser_multispeed_fiber; ++ ixgbe_enable_tx_laser_multispeed_fiber; ++ mac->ops.flap_tx_laser = ixgbe_flap_tx_laser_multispeed_fiber; ++ + } else { + mac->ops.disable_tx_laser = NULL; + mac->ops.enable_tx_laser = NULL; +@@ -108,24 +74,96 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw) + + if (hw->phy.multispeed_fiber) { + /* Set up dual speed SFP+ support */ +- mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; ++ mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber; ++ mac->ops.setup_mac_link = ixgbe_setup_mac_link_82599; ++ mac->ops.set_rate_select_speed = ++ ixgbe_set_hard_rate_select_speed; + } else { +- if ((mac->ops.get_media_type(hw) == +- ixgbe_media_type_backplane) && +- (hw->phy.smart_speed == ixgbe_smart_speed_auto || +- hw->phy.smart_speed == ixgbe_smart_speed_on) && +- !ixgbe_verify_lesm_fw_enabled_82599(hw)) +- mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed; +- else +- mac->ops.setup_link = &ixgbe_setup_mac_link_82599; ++ if ((ixgbe_get_media_type(hw) == ixgbe_media_type_backplane) && ++ (hw->phy.smart_speed == ixgbe_smart_speed_auto || ++ hw->phy.smart_speed == ixgbe_smart_speed_on) && ++ !ixgbe_verify_lesm_fw_enabled_82599(hw)) { ++ mac->ops.setup_link = ixgbe_setup_mac_link_smartspeed; ++ } else { ++ mac->ops.setup_link = ixgbe_setup_mac_link_82599; ++ } ++ } ++} ++ ++/** ++ * ixgbe_init_phy_ops_82599 - PHY/SFP specific init ++ * @hw: pointer to hardware structure ++ * ++ * Initialize any function pointers that were not able to be ++ * set during init_shared_code because the PHY/SFP type was ++ * not known. Perform the SFP init if necessary. ++ * ++ **/ ++s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) ++{ ++ struct ixgbe_mac_info *mac = &hw->mac; ++ struct ixgbe_phy_info *phy = &hw->phy; ++ s32 ret_val = IXGBE_SUCCESS; ++ u32 esdp; ++ ++ DEBUGFUNC("ixgbe_init_phy_ops_82599"); ++ ++ if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) { ++ /* Store flag indicating I2C bus access control unit. */ ++ hw->phy.qsfp_shared_i2c_bus = TRUE; ++ ++ /* Initialize access to QSFP+ I2C bus */ ++ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); ++ esdp |= IXGBE_ESDP_SDP0_DIR; ++ esdp &= ~IXGBE_ESDP_SDP1_DIR; ++ esdp &= ~IXGBE_ESDP_SDP0; ++ esdp &= ~IXGBE_ESDP_SDP0_NATIVE; ++ esdp &= ~IXGBE_ESDP_SDP1_NATIVE; ++ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); ++ IXGBE_WRITE_FLUSH(hw); ++ ++ phy->ops.read_i2c_byte = ixgbe_read_i2c_byte_82599; ++ phy->ops.write_i2c_byte = ixgbe_write_i2c_byte_82599; ++ } ++ /* Identify the PHY or SFP module */ ++ ret_val = phy->ops.identify(hw); ++ if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED) ++ goto init_phy_ops_out; ++ ++ /* Setup function pointers based on detected SFP module and speeds */ ++ ixgbe_init_mac_link_ops_82599(hw); ++ if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) ++ hw->phy.ops.reset = NULL; ++ ++ /* If copper media, overwrite with copper function pointers */ ++ if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { ++ mac->ops.setup_link = ixgbe_setup_copper_link_82599; ++ mac->ops.get_link_capabilities = ++ ixgbe_get_copper_link_capabilities_generic; ++ } ++ ++ /* Set necessary function pointers based on PHY type */ ++ switch (hw->phy.type) { ++ case ixgbe_phy_tn: ++ phy->ops.setup_link = ixgbe_setup_phy_link_tnx; ++ phy->ops.check_link = ixgbe_check_phy_link_tnx; ++ phy->ops.get_firmware_version = ++ ixgbe_get_phy_firmware_version_tnx; ++ break; ++ default: ++ break; + } ++init_phy_ops_out: ++ return ret_val; + } + +-static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) ++s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) + { +- s32 ret_val = 0; ++ s32 ret_val = IXGBE_SUCCESS; + u16 list_offset, data_offset, data_value; + ++ DEBUGFUNC("ixgbe_setup_sfp_modules_82599"); ++ + if (hw->phy.sfp_type != ixgbe_sfp_type_unknown) { + ixgbe_init_mac_link_ops_82599(hw); + +@@ -133,13 +171,13 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) + + ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, + &data_offset); +- if (ret_val != 0) ++ if (ret_val != IXGBE_SUCCESS) + goto setup_sfp_out; + + /* PHY config will finish before releasing the semaphore */ + ret_val = hw->mac.ops.acquire_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); +- if (ret_val != 0) { ++ if (ret_val != IXGBE_SUCCESS) { + ret_val = IXGBE_ERR_SWFW_SYNC; + goto setup_sfp_out; + } +@@ -155,12 +193,10 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) + + /* Release the semaphore */ + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); +- /* +- * Delay obtaining semaphore again to allow FW access, +- * semaphore_delay is in ms usleep_range needs us. ++ /* Delay obtaining semaphore again to allow FW access ++ * prot_autoc_write uses the semaphore too. + */ +- usleep_range(hw->eeprom.semaphore_delay * 1000, +- hw->eeprom.semaphore_delay * 2000); ++ msec_delay(hw->eeprom.semaphore_delay); + + /* Restart DSP and set SFI mode */ + ret_val = hw->mac.ops.prot_autoc_write(hw, +@@ -168,10 +204,11 @@ static s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw) + false); + + if (ret_val) { +- hw_dbg(hw, " sfp module setup not complete\n"); ++ DEBUGOUT("sfp module setup not complete\n"); + ret_val = IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; + goto setup_sfp_out; + } ++ + } + + setup_sfp_out: +@@ -180,13 +217,11 @@ setup_sfp_out: + setup_sfp_err: + /* Release the semaphore */ + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_MAC_CSR_SM); +- /* Delay obtaining semaphore again to allow FW access, +- * semaphore_delay is in ms usleep_range needs us. +- */ +- usleep_range(hw->eeprom.semaphore_delay * 1000, +- hw->eeprom.semaphore_delay * 2000); +- hw_err(hw, "eeprom read at offset %d failed\n", data_offset); +- return IXGBE_ERR_SFP_SETUP_NOT_COMPLETE; ++ /* Delay obtaining semaphore again to allow FW access */ ++ msec_delay(hw->eeprom.semaphore_delay); ++ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, ++ "eeprom read at offset %d failed", data_offset); ++ return IXGBE_ERR_PHY; + } + + /** +@@ -197,27 +232,25 @@ setup_sfp_err: + * + * For this part (82599) we need to wrap read-modify-writes with a possible + * FW/SW lock. It is assumed this lock will be freed with the next +- * prot_autoc_write_82599(). Note, that locked can only be true in cases +- * where this function doesn't return an error. +- **/ +-static s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, +- u32 *reg_val) ++ * prot_autoc_write_82599(). ++ */ ++s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val) + { + s32 ret_val; + + *locked = false; +- /* If LESM is on then we need to hold the SW/FW semaphore. */ ++ /* If LESM is on then we need to hold the SW/FW semaphore. */ + if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { + ret_val = hw->mac.ops.acquire_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); +- if (ret_val) ++ if (ret_val != IXGBE_SUCCESS) + return IXGBE_ERR_SWFW_SYNC; + + *locked = true; + } + + *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC); +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +@@ -225,14 +258,14 @@ static s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, + * @hw: pointer to hardware structure + * @reg_val: value to write to AUTOC + * @locked: bool to indicate whether the SW/FW lock was already taken by +- * previous proc_autoc_read_82599. ++ * previous proc_autoc_read_82599. + * +- * This part (82599) may need to hold a the SW/FW lock around all writes to ++ * This part (82599) may need to hold the SW/FW lock around all writes to + * AUTOC. Likewise after a write we need to do a pipeline reset. +- **/ +-static s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked) ++ */ ++s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked) + { +- s32 ret_val = 0; ++ s32 ret_val = IXGBE_SUCCESS; + + /* Blocked by MNG FW so bail */ + if (ixgbe_check_reset_blocked(hw)) +@@ -245,7 +278,7 @@ static s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 autoc, bool locked) + if (!locked && ixgbe_verify_lesm_fw_enabled_82599(hw)) { + ret_val = hw->mac.ops.acquire_swfw_sync(hw, + IXGBE_GSSR_MAC_CSR_SM); +- if (ret_val) ++ if (ret_val != IXGBE_SUCCESS) + return IXGBE_ERR_SWFW_SYNC; + + locked = true; +@@ -264,81 +297,95 @@ out: + return ret_val; + } + +-static s32 ixgbe_get_invariants_82599(struct ixgbe_hw *hw) +-{ +- struct ixgbe_mac_info *mac = &hw->mac; +- +- ixgbe_init_mac_link_ops_82599(hw); +- +- mac->mcft_size = IXGBE_82599_MC_TBL_SIZE; +- mac->vft_size = IXGBE_82599_VFT_TBL_SIZE; +- mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES; +- mac->rx_pb_size = IXGBE_82599_RX_PB_SIZE; +- mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES; +- mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES; +- mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); +- +- return 0; +-} +- + /** +- * ixgbe_init_phy_ops_82599 - PHY/SFP specific init ++ * ixgbe_init_ops_82599 - Inits func ptrs and MAC type + * @hw: pointer to hardware structure + * +- * Initialize any function pointers that were not able to be +- * set during get_invariants because the PHY/SFP type was +- * not known. Perform the SFP init if necessary. +- * ++ * Initialize the function pointers and assign the MAC type for 82599. ++ * Does not touch the hardware. + **/ +-static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) ++ ++s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw) + { + struct ixgbe_mac_info *mac = &hw->mac; + struct ixgbe_phy_info *phy = &hw->phy; +- s32 ret_val = 0; +- u32 esdp; ++ struct ixgbe_eeprom_info *eeprom = &hw->eeprom; ++ s32 ret_val; + +- if (hw->device_id == IXGBE_DEV_ID_82599_QSFP_SF_QP) { +- /* Store flag indicating I2C bus access control unit. */ +- hw->phy.qsfp_shared_i2c_bus = true; ++ DEBUGFUNC("ixgbe_init_ops_82599"); ++ ++ ixgbe_init_phy_ops_generic(hw); ++ ret_val = ixgbe_init_ops_generic(hw); ++ ++ /* PHY */ ++ phy->ops.identify = ixgbe_identify_phy_82599; ++ phy->ops.init = ixgbe_init_phy_ops_82599; ++ ++ /* MAC */ ++ mac->ops.reset_hw = ixgbe_reset_hw_82599; ++ mac->ops.get_media_type = ixgbe_get_media_type_82599; ++ mac->ops.get_supported_physical_layer = ++ ixgbe_get_supported_physical_layer_82599; ++ mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic; ++ mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic; ++ mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_82599; ++ mac->ops.read_analog_reg8 = ixgbe_read_analog_reg8_82599; ++ mac->ops.write_analog_reg8 = ixgbe_write_analog_reg8_82599; ++ mac->ops.start_hw = ixgbe_start_hw_82599; ++ mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic; ++ mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic; ++ mac->ops.get_device_caps = ixgbe_get_device_caps_generic; ++ mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic; ++ mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic; ++ mac->ops.prot_autoc_read = prot_autoc_read_82599; ++ mac->ops.prot_autoc_write = prot_autoc_write_82599; ++ ++ /* RAR, Multicast, VLAN */ ++ mac->ops.set_vmdq = ixgbe_set_vmdq_generic; ++ mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic; ++ mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic; ++ mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic; ++ mac->rar_highwater = 1; ++ mac->ops.set_vfta = ixgbe_set_vfta_generic; ++ mac->ops.set_vlvf = ixgbe_set_vlvf_generic; ++ mac->ops.clear_vfta = ixgbe_clear_vfta_generic; ++ mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic; ++ mac->ops.setup_sfp = ixgbe_setup_sfp_modules_82599; ++ mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing; ++ mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing; ++ ++ /* Link */ ++ mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_82599; ++ mac->ops.check_link = ixgbe_check_mac_link_generic; ++ mac->ops.setup_rxpba = ixgbe_set_rxpba_generic; ++ ixgbe_init_mac_link_ops_82599(hw); + +- /* Initialize access to QSFP+ I2C bus */ +- esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); +- esdp |= IXGBE_ESDP_SDP0_DIR; +- esdp &= ~IXGBE_ESDP_SDP1_DIR; +- esdp &= ~IXGBE_ESDP_SDP0; +- esdp &= ~IXGBE_ESDP_SDP0_NATIVE; +- esdp &= ~IXGBE_ESDP_SDP1_NATIVE; +- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); +- IXGBE_WRITE_FLUSH(hw); ++ mac->mcft_size = IXGBE_82599_MC_TBL_SIZE; ++ mac->vft_size = IXGBE_82599_VFT_TBL_SIZE; ++ mac->num_rar_entries = IXGBE_82599_RAR_ENTRIES; ++ mac->rx_pb_size = IXGBE_82599_RX_PB_SIZE; ++ mac->max_rx_queues = IXGBE_82599_MAX_RX_QUEUES; ++ mac->max_tx_queues = IXGBE_82599_MAX_TX_QUEUES; ++ mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); + +- phy->ops.read_i2c_byte = &ixgbe_read_i2c_byte_82599; +- phy->ops.write_i2c_byte = &ixgbe_write_i2c_byte_82599; +- } ++ mac->arc_subsystem_valid = !!(IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw)) ++ & IXGBE_FWSM_MODE_MASK); + +- /* Identify the PHY or SFP module */ +- ret_val = phy->ops.identify(hw); ++ hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf; + +- /* Setup function pointers based on detected SFP module and speeds */ +- ixgbe_init_mac_link_ops_82599(hw); ++ /* EEPROM */ ++ eeprom->ops.read = ixgbe_read_eeprom_82599; ++ eeprom->ops.read_buffer = ixgbe_read_eeprom_buffer_82599; + +- /* If copper media, overwrite with copper function pointers */ +- if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) { +- mac->ops.setup_link = &ixgbe_setup_copper_link_82599; +- mac->ops.get_link_capabilities = +- &ixgbe_get_copper_link_capabilities_generic; +- } ++ /* Manageability interface */ ++ mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic; + +- /* Set necessary function pointers based on phy type */ +- switch (hw->phy.type) { +- case ixgbe_phy_tn: +- phy->ops.check_link = &ixgbe_check_phy_link_tnx; +- phy->ops.setup_link = &ixgbe_setup_phy_link_tnx; +- phy->ops.get_firmware_version = +- &ixgbe_get_phy_firmware_version_tnx; +- break; +- default: +- break; +- } ++ mac->ops.get_thermal_sensor_data = ++ ixgbe_get_thermal_sensor_data_generic; ++ mac->ops.init_thermal_sensor_thresh = ++ ixgbe_init_thermal_sensor_thresh_generic; ++ ++ mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic; + + return ret_val; + } +@@ -351,14 +398,17 @@ static s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw) + * + * Determines the link capabilities by reading the AUTOC register. + **/ +-static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, +- ixgbe_link_speed *speed, +- bool *autoneg) ++s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, ++ ixgbe_link_speed *speed, ++ bool *autoneg) + { +- s32 status = 0; ++ s32 status = IXGBE_SUCCESS; + u32 autoc = 0; + +- /* Determine 1G link capabilities off of SFP+ type */ ++ DEBUGFUNC("ixgbe_get_link_capabilities_82599"); ++ ++ ++ /* Check if 1G SFP module. */ + if (hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || +@@ -372,8 +422,8 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, + + /* + * Determine link capabilities based on the stored value of AUTOC, +- * which represents EEPROM defaults. If AUTOC value has not been +- * stored, use the current register value. ++ * which represents EEPROM defaults. If AUTOC value has not ++ * been stored, use the current register values. + */ + if (hw->mac.orig_link_settings_stored) + autoc = hw->mac.orig_autoc; +@@ -439,7 +489,9 @@ static s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, + *speed |= IXGBE_LINK_SPEED_10GB_FULL | + IXGBE_LINK_SPEED_1GB_FULL; + +- /* QSFP must not enable auto-negotiation */ ++ /* QSFP must not enable full auto-negotiation ++ * Limited autoneg is enabled at 1G ++ */ + if (hw->phy.media_type == ixgbe_media_type_fiber_qsfp) + *autoneg = false; + else +@@ -456,10 +508,12 @@ out: + * + * Returns the media type (fiber, copper, backplane) + **/ +-static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) ++enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw) + { + enum ixgbe_media_type media_type; + ++ DEBUGFUNC("ixgbe_get_media_type_82599"); ++ + /* Detect if there is a copper PHY attached. */ + switch (hw->phy.type) { + case ixgbe_phy_cu_unknown: +@@ -509,24 +563,21 @@ out: + } + + /** +- * ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3 +- * @hw: pointer to hardware structure ++ * ixgbe_stop_mac_link_on_d3_82599 - Disables link on D3 ++ * @hw: pointer to hardware structure + * +- * Disables link, should be called during D3 power down sequence. ++ * Disables link during D3 power down sequence. + * + **/ +-static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw) ++void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw) + { +- u32 autoc2_reg, fwsm; ++ u32 autoc2_reg; + u16 ee_ctrl_2 = 0; + +- hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2); +- +- /* Check to see if MNG FW could be enabled */ +- fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); ++ DEBUGFUNC("ixgbe_stop_mac_link_on_d3_82599"); ++ ixgbe_read_eeprom(hw, IXGBE_EEPROM_CTRL_2, &ee_ctrl_2); + +- if (((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) && +- !hw->wol_enabled && ++ if (!ixgbe_mng_present(hw) && !hw->wol_enabled && + ee_ctrl_2 & IXGBE_EEPROM_CCD_BIT) { + autoc2_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC2); + autoc2_reg |= IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK; +@@ -542,19 +593,24 @@ static void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw) + * Configures link settings based on values in the ixgbe_hw struct. + * Restarts the link. Performs autonegotiation if needed. + **/ +-static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, ++s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, + bool autoneg_wait_to_complete) + { + u32 autoc_reg; + u32 links_reg; + u32 i; +- s32 status = 0; ++ s32 status = IXGBE_SUCCESS; + bool got_lock = false; + ++ DEBUGFUNC("ixgbe_start_mac_link_82599"); ++ ++ /* reset_pipeline requires us to hold this lock as it writes to ++ * AUTOC. ++ */ + if (ixgbe_verify_lesm_fw_enabled_82599(hw)) { + status = hw->mac.ops.acquire_swfw_sync(hw, +- IXGBE_GSSR_MAC_CSR_SM); +- if (status) ++ IXGBE_GSSR_MAC_CSR_SM); ++ if (status != IXGBE_SUCCESS) + goto out; + + got_lock = true; +@@ -580,17 +636,17 @@ static s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + if (links_reg & IXGBE_LINKS_KX_AN_COMP) + break; +- msleep(100); ++ msec_delay(100); + } + if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { + status = IXGBE_ERR_AUTONEG_NOT_COMPLETE; +- hw_dbg(hw, "Autoneg did not complete.\n"); ++ DEBUGOUT("Autoneg did not complete.\n"); + } + } + } + + /* Add delay to filter out noises during initial link setup */ +- msleep(50); ++ msec_delay(50); + + out: + return status; +@@ -604,7 +660,7 @@ out: + * PHY states. This includes selectively shutting down the Tx + * laser on the PHY, effectively halting physical link. + **/ +-static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) ++void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) + { + u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); + +@@ -612,11 +668,11 @@ static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) + if (ixgbe_check_reset_blocked(hw)) + return; + +- /* Disable tx laser; allow 100us to go dark per spec */ ++ /* Disable Tx laser; allow 100us to go dark per spec */ + esdp_reg |= IXGBE_ESDP_SDP3; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); + IXGBE_WRITE_FLUSH(hw); +- udelay(100); ++ usec_delay(100); + } + + /** +@@ -627,15 +683,15 @@ static void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) + * PHY states. This includes selectively turning on the Tx + * laser on the PHY, effectively starting physical link. + **/ +-static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) ++void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) + { + u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); + +- /* Enable tx laser; allow 100ms to light up */ ++ /* Enable Tx laser; allow 100ms to light up */ + esdp_reg &= ~IXGBE_ESDP_SDP3; + IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); + IXGBE_WRITE_FLUSH(hw); +- msleep(100); ++ msec_delay(100); + } + + /** +@@ -645,13 +701,15 @@ static void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) + * When the driver changes the link speeds that it can support, + * it sets autotry_restart to true to indicate that we need to + * initiate a new autotry session with the link partner. To do +- * so, we set the speed then disable and re-enable the tx laser, to ++ * so, we set the speed then disable and re-enable the Tx laser, to + * alert the link partner that it also needs to restart autotry on its + * end. This is consistent with true clause 37 autoneg, which also + * involves a loss of signal. + **/ +-static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) ++void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) + { ++ DEBUGFUNC("ixgbe_flap_tx_laser_multispeed_fiber"); ++ + /* Blocked by MNG FW so bail */ + if (ixgbe_check_reset_blocked(hw)) + return; +@@ -664,176 +722,32 @@ static void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw) + } + + /** +- * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed ++ * ixgbe_set_hard_rate_select_speed - Set module link speed + * @hw: pointer to hardware structure +- * @speed: new link speed +- * @autoneg_wait_to_complete: true when waiting for completion is needed ++ * @speed: link speed to set + * +- * Set the link speed in the AUTOC register and restarts link. +- **/ +-static s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, +- ixgbe_link_speed speed, +- bool autoneg_wait_to_complete) ++ * Set module link speed via RS0/RS1 rate select pins. ++ */ ++void ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw, ++ ixgbe_link_speed speed) + { +- s32 status = 0; +- ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; +- ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; +- u32 speedcnt = 0; + u32 esdp_reg = IXGBE_READ_REG(hw, IXGBE_ESDP); +- u32 i = 0; +- bool link_up = false; +- bool autoneg = false; +- +- /* Mask off requested but non-supported speeds */ +- status = hw->mac.ops.get_link_capabilities(hw, &link_speed, +- &autoneg); +- if (status != 0) +- return status; +- +- speed &= link_speed; +- +- /* +- * Try each speed one by one, highest priority first. We do this in +- * software because 10gb fiber doesn't support speed autonegotiation. +- */ +- if (speed & IXGBE_LINK_SPEED_10GB_FULL) { +- speedcnt++; +- highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; +- +- /* If we already have link at this speed, just jump out */ +- status = hw->mac.ops.check_link(hw, &link_speed, &link_up, +- false); +- if (status != 0) +- return status; +- +- if ((link_speed == IXGBE_LINK_SPEED_10GB_FULL) && link_up) +- goto out; +- +- /* Set the module link speed */ +- switch (hw->phy.media_type) { +- case ixgbe_media_type_fiber: +- esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); +- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); +- IXGBE_WRITE_FLUSH(hw); +- break; +- case ixgbe_media_type_fiber_qsfp: +- /* QSFP module automatically detects MAC link speed */ +- break; +- default: +- hw_dbg(hw, "Unexpected media type.\n"); +- break; +- } +- +- /* Allow module to change analog characteristics (1G->10G) */ +- msleep(40); +- +- status = ixgbe_setup_mac_link_82599(hw, +- IXGBE_LINK_SPEED_10GB_FULL, +- autoneg_wait_to_complete); +- if (status != 0) +- return status; +- +- /* Flap the tx laser if it has not already been done */ +- if (hw->mac.ops.flap_tx_laser) +- hw->mac.ops.flap_tx_laser(hw); +- +- /* +- * Wait for the controller to acquire link. Per IEEE 802.3ap, +- * Section 73.10.2, we may have to wait up to 500ms if KR is +- * attempted. 82599 uses the same timing for 10g SFI. +- */ +- for (i = 0; i < 5; i++) { +- /* Wait for the link partner to also set speed */ +- msleep(100); +- +- /* If we have link, just jump out */ +- status = hw->mac.ops.check_link(hw, &link_speed, +- &link_up, false); +- if (status != 0) +- return status; +- +- if (link_up) +- goto out; +- } +- } +- +- if (speed & IXGBE_LINK_SPEED_1GB_FULL) { +- speedcnt++; +- if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) +- highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; +- +- /* If we already have link at this speed, just jump out */ +- status = hw->mac.ops.check_link(hw, &link_speed, &link_up, +- false); +- if (status != 0) +- return status; +- +- if ((link_speed == IXGBE_LINK_SPEED_1GB_FULL) && link_up) +- goto out; +- +- /* Set the module link speed */ +- switch (hw->phy.media_type) { +- case ixgbe_media_type_fiber: +- esdp_reg &= ~IXGBE_ESDP_SDP5; +- esdp_reg |= IXGBE_ESDP_SDP5_DIR; +- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); +- IXGBE_WRITE_FLUSH(hw); +- break; +- case ixgbe_media_type_fiber_qsfp: +- /* QSFP module automatically detects MAC link speed */ +- break; +- default: +- hw_dbg(hw, "Unexpected media type.\n"); +- break; +- } +- +- /* Allow module to change analog characteristics (10G->1G) */ +- msleep(40); +- +- status = ixgbe_setup_mac_link_82599(hw, +- IXGBE_LINK_SPEED_1GB_FULL, +- autoneg_wait_to_complete); +- if (status != 0) +- return status; +- +- /* Flap the tx laser if it has not already been done */ +- if (hw->mac.ops.flap_tx_laser) +- hw->mac.ops.flap_tx_laser(hw); +- +- /* Wait for the link partner to also set speed */ +- msleep(100); +- +- /* If we have link, just jump out */ +- status = hw->mac.ops.check_link(hw, &link_speed, &link_up, +- false); +- if (status != 0) +- return status; + +- if (link_up) +- goto out; ++ switch (speed) { ++ case IXGBE_LINK_SPEED_10GB_FULL: ++ esdp_reg |= (IXGBE_ESDP_SDP5_DIR | IXGBE_ESDP_SDP5); ++ break; ++ case IXGBE_LINK_SPEED_1GB_FULL: ++ esdp_reg &= ~IXGBE_ESDP_SDP5; ++ esdp_reg |= IXGBE_ESDP_SDP5_DIR; ++ break; ++ default: ++ DEBUGOUT("Invalid fixed module speed\n"); ++ return; + } + +- /* +- * We didn't get link. Configure back to the highest speed we tried, +- * (if there was more than one). We call ourselves back with just the +- * single highest speed that the user requested. +- */ +- if (speedcnt > 1) +- status = ixgbe_setup_mac_link_multispeed_fiber(hw, +- highest_link_speed, +- autoneg_wait_to_complete); +- +-out: +- /* Set autoneg_advertised value based on input link speed */ +- hw->phy.autoneg_advertised = 0; +- +- if (speed & IXGBE_LINK_SPEED_10GB_FULL) +- hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; +- +- if (speed & IXGBE_LINK_SPEED_1GB_FULL) +- hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; +- +- return status; ++ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp_reg); ++ IXGBE_WRITE_FLUSH(hw); + } + + /** +@@ -844,16 +758,18 @@ out: + * + * Implements the Intel SmartSpeed algorithm. + **/ +-static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, +- ixgbe_link_speed speed, +- bool autoneg_wait_to_complete) ++s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, ++ ixgbe_link_speed speed, ++ bool autoneg_wait_to_complete) + { +- s32 status = 0; ++ s32 status = IXGBE_SUCCESS; + ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; + s32 i, j; + bool link_up = false; + u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + ++ DEBUGFUNC("ixgbe_setup_mac_link_smartspeed"); ++ + /* Set autoneg_advertised value based on input link speed */ + hw->phy.autoneg_advertised = 0; + +@@ -878,7 +794,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, + for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) { + status = ixgbe_setup_mac_link_82599(hw, speed, + autoneg_wait_to_complete); +- if (status != 0) ++ if (status != IXGBE_SUCCESS) + goto out; + + /* +@@ -888,12 +804,12 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, + * Table 9 in the AN MAS. + */ + for (i = 0; i < 5; i++) { +- mdelay(100); ++ msec_delay(100); + + /* If we have link, just jump out */ +- status = hw->mac.ops.check_link(hw, &link_speed, +- &link_up, false); +- if (status != 0) ++ status = ixgbe_check_link(hw, &link_speed, &link_up, ++ false); ++ if (status != IXGBE_SUCCESS) + goto out; + + if (link_up) +@@ -913,7 +829,7 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, + hw->phy.smart_speed_active = true; + status = ixgbe_setup_mac_link_82599(hw, speed, + autoneg_wait_to_complete); +- if (status != 0) ++ if (status != IXGBE_SUCCESS) + goto out; + + /* +@@ -923,12 +839,11 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, + * connect attempts as defined in the AN MAS table 73-7. + */ + for (i = 0; i < 6; i++) { +- mdelay(100); ++ msec_delay(100); + + /* If we have link, just jump out */ +- status = hw->mac.ops.check_link(hw, &link_speed, +- &link_up, false); +- if (status != 0) ++ status = ixgbe_check_link(hw, &link_speed, &link_up, false); ++ if (status != IXGBE_SUCCESS) + goto out; + + if (link_up) +@@ -942,7 +857,8 @@ static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, + + out: + if (link_up && (link_speed == IXGBE_LINK_SPEED_1GB_FULL)) +- hw_dbg(hw, "Smartspeed has downgraded the link speed from the maximum advertised\n"); ++ DEBUGOUT("Smartspeed has downgraded the link speed " ++ "from the maximum advertised\n"); + return status; + } + +@@ -954,28 +870,27 @@ out: + * + * Set the link speed in the AUTOC register and restarts link. + **/ +-static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, +- ixgbe_link_speed speed, +- bool autoneg_wait_to_complete) ++s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, ++ ixgbe_link_speed speed, ++ bool autoneg_wait_to_complete) + { + bool autoneg = false; +- s32 status = 0; +- u32 pma_pmd_1g, link_mode, links_reg, i; ++ s32 status = IXGBE_SUCCESS; ++ u32 pma_pmd_1g, link_mode; ++ u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); /* holds the value of AUTOC register at this current point in time */ ++ u32 orig_autoc = 0; /* holds the cached value of AUTOC register */ ++ u32 autoc = current_autoc; /* Temporary variable used for comparison purposes */ + u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); + u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; ++ u32 links_reg; ++ u32 i; + ixgbe_link_speed link_capabilities = IXGBE_LINK_SPEED_UNKNOWN; + +- /* holds the value of AUTOC register at this current point in time */ +- u32 current_autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); +- /* holds the cached value of AUTOC register */ +- u32 orig_autoc = 0; +- /* temporary variable used for comparison purposes */ +- u32 autoc = current_autoc; ++ DEBUGFUNC("ixgbe_setup_mac_link_82599"); + + /* Check to see if speed passed in is supported. */ +- status = hw->mac.ops.get_link_capabilities(hw, &link_capabilities, +- &autoneg); +- if (status != 0) ++ status = ixgbe_get_link_capabilities(hw, &link_capabilities, &autoneg); ++ if (status) + goto out; + + speed &= link_capabilities; +@@ -1023,7 +938,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, + if ((speed == IXGBE_LINK_SPEED_1GB_FULL) && + (pma_pmd_1g == IXGBE_AUTOC_1G_SFI)) { + autoc &= ~IXGBE_AUTOC_LMS_MASK; +- if (autoneg) ++ if (autoneg || hw->phy.type == ixgbe_phy_qsfp_intel) + autoc |= IXGBE_AUTOC_LMS_1G_AN; + else + autoc |= IXGBE_AUTOC_LMS_1G_LINK_NO_AN; +@@ -1033,7 +948,7 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, + if (autoc != current_autoc) { + /* Restart link */ + status = hw->mac.ops.prot_autoc_write(hw, autoc, false); +- if (status) ++ if (status != IXGBE_SUCCESS) + goto out; + + /* Only poll for autoneg to complete if specified to do so */ +@@ -1047,18 +962,18 @@ static s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, + IXGBE_READ_REG(hw, IXGBE_LINKS); + if (links_reg & IXGBE_LINKS_KX_AN_COMP) + break; +- msleep(100); ++ msec_delay(100); + } + if (!(links_reg & IXGBE_LINKS_KX_AN_COMP)) { + status = + IXGBE_ERR_AUTONEG_NOT_COMPLETE; +- hw_dbg(hw, "Autoneg did not complete.\n"); ++ DEBUGOUT("Autoneg did not complete.\n"); + } + } + } + + /* Add delay to filter out noises during initial link setup */ +- msleep(50); ++ msec_delay(50); + } + + out: +@@ -1073,12 +988,14 @@ out: + * + * Restarts link on PHY and MAC based on settings passed in. + **/ +-static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, ++STATIC s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) + { + s32 status; + ++ DEBUGFUNC("ixgbe_setup_copper_link_82599"); ++ + /* Setup the PHY according to input speed */ + status = hw->phy.ops.setup_link_speed(hw, speed, + autoneg_wait_to_complete); +@@ -1096,17 +1013,20 @@ static s32 ixgbe_setup_copper_link_82599(struct ixgbe_hw *hw, + * and clears all interrupts, perform a PHY reset, and perform a link (MAC) + * reset. + **/ +-static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) ++s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) + { + ixgbe_link_speed link_speed; + s32 status; +- u32 ctrl, i, autoc, autoc2; ++ u32 ctrl = 0; ++ u32 i, autoc, autoc2; + u32 curr_lms; + bool link_up = false; + ++ DEBUGFUNC("ixgbe_reset_hw_82599"); ++ + /* Call adapter stop to disable tx/rx and clear interrupts */ + status = hw->mac.ops.stop_adapter(hw); +- if (status != 0) ++ if (status != IXGBE_SUCCESS) + goto reset_hw_out; + + /* flush pending Tx transactions */ +@@ -1138,7 +1058,7 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw) + + mac_reset_top: + /* +- * Issue global reset to the MAC. Needs to be SW reset if link is up. ++ * Issue global reset to the MAC. Needs to be SW reset if link is up. + * If link reset is used when link is up, it might reset the PHY when + * mng is using it. If link is down or the flag to force full link + * reset is set, then perform link reset. +@@ -1154,9 +1074,9 @@ mac_reset_top: + IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); + IXGBE_WRITE_FLUSH(hw); + +- /* Poll for reset bit to self-clear indicating reset is complete */ ++ /* Poll for reset bit to self-clear meaning reset is complete */ + for (i = 0; i < 10; i++) { +- udelay(1); ++ usec_delay(1); + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + if (!(ctrl & IXGBE_CTRL_RST_MASK)) + break; +@@ -1164,15 +1084,15 @@ mac_reset_top: + + if (ctrl & IXGBE_CTRL_RST_MASK) { + status = IXGBE_ERR_RESET_FAILED; +- hw_dbg(hw, "Reset polling failed to complete.\n"); ++ DEBUGOUT("Reset polling failed to complete.\n"); + } + +- msleep(50); ++ msec_delay(50); + + /* + * Double resets are required for recovery from certain error +- * conditions. Between resets, it is necessary to stall to allow time +- * for any pending HW events to complete. ++ * conditions. Between resets, it is necessary to stall to ++ * allow time for any pending HW events to complete. + */ + if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { + hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; +@@ -1204,7 +1124,7 @@ mac_reset_top: + * doesn't autoneg with out driver support we need to + * leave LMS in the state it was before we MAC reset. + * Likewise if we support WoL we don't want change the +- * LMS state either. ++ * LMS state. + */ + if ((hw->phy.multispeed_fiber && ixgbe_mng_enabled(hw)) || + hw->wol_enabled) +@@ -1216,7 +1136,7 @@ mac_reset_top: + status = hw->mac.ops.prot_autoc_write(hw, + hw->mac.orig_autoc, + false); +- if (status) ++ if (status != IXGBE_SUCCESS) + goto reset_hw_out; + } + +@@ -1244,49 +1164,70 @@ mac_reset_top: + hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); + + /* Add the SAN MAC address to the RAR only if it's a valid address */ +- if (is_valid_ether_addr(hw->mac.san_addr)) { +- hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, +- hw->mac.san_addr, 0, IXGBE_RAH_AV); +- ++ if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { + /* Save the SAN MAC RAR index */ + hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; + ++ hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index, ++ hw->mac.san_addr, 0, IXGBE_RAH_AV); ++ ++ /* clear VMDq pool/queue selection for this RAR */ ++ hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index, ++ IXGBE_CLEAR_VMDQ_ALL); ++ + /* Reserve the last RAR for the SAN MAC address */ + hw->mac.num_rar_entries--; + } + + /* Store the alternative WWNN/WWPN prefix */ + hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix, +- &hw->mac.wwpn_prefix); ++ &hw->mac.wwpn_prefix); + + reset_hw_out: + return status; + } + + /** ++ * ixgbe_fdir_check_cmd_complete - poll to check whether FDIRCMD is complete ++ * @hw: pointer to hardware structure ++ * @fdircmd: current value of FDIRCMD register ++ */ ++STATIC s32 ixgbe_fdir_check_cmd_complete(struct ixgbe_hw *hw, u32 *fdircmd) ++{ ++ int i; ++ ++ for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) { ++ *fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD); ++ if (!(*fdircmd & IXGBE_FDIRCMD_CMD_MASK)) ++ return IXGBE_SUCCESS; ++ usec_delay(10); ++ } ++ ++ return IXGBE_ERR_FDIR_CMD_INCOMPLETE; ++} ++ ++/** + * ixgbe_reinit_fdir_tables_82599 - Reinitialize Flow Director tables. + * @hw: pointer to hardware structure + **/ + s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) + { ++ s32 err; + int i; + u32 fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); +- ++ u32 fdircmd; + fdirctrl &= ~IXGBE_FDIRCTRL_INIT_DONE; + ++ DEBUGFUNC("ixgbe_reinit_fdir_tables_82599"); ++ + /* + * Before starting reinitialization process, + * FDIRCMD.CMD must be zero. + */ +- for (i = 0; i < IXGBE_FDIRCMD_CMD_POLL; i++) { +- if (!(IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & +- IXGBE_FDIRCMD_CMD_MASK)) +- break; +- udelay(10); +- } +- if (i >= IXGBE_FDIRCMD_CMD_POLL) { +- hw_dbg(hw, "Flow Director previous command isn't complete, aborting table re-initialization.\n"); +- return IXGBE_ERR_FDIR_REINIT_FAILED; ++ err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); ++ if (err) { ++ DEBUGOUT("Flow Director previous command did not complete, aborting table re-initialization.\n"); ++ return err; + } + + IXGBE_WRITE_REG(hw, IXGBE_FDIRFREE, 0); +@@ -1321,10 +1262,10 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) + if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & + IXGBE_FDIRCTRL_INIT_DONE) + break; +- usleep_range(1000, 2000); ++ msec_delay(1); + } + if (i >= IXGBE_FDIR_INIT_DONE_POLL) { +- hw_dbg(hw, "Flow Director Signature poll time exceeded!\n"); ++ DEBUGOUT("Flow Director Signature poll time exceeded!\n"); + return IXGBE_ERR_FDIR_REINIT_FAILED; + } + +@@ -1335,7 +1276,7 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) + IXGBE_READ_REG(hw, IXGBE_FDIRMISS); + IXGBE_READ_REG(hw, IXGBE_FDIRLEN); + +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +@@ -1343,10 +1284,12 @@ s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw) + * @hw: pointer to hardware structure + * @fdirctrl: value to write to flow director control register + **/ +-static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl) ++STATIC void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl) + { + int i; + ++ DEBUGFUNC("ixgbe_fdir_enable_82599"); ++ + /* Prime the keys for hashing */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRHKEY, IXGBE_ATR_BUCKET_HASH_KEY); + IXGBE_WRITE_REG(hw, IXGBE_FDIRSKEY, IXGBE_ATR_SIGNATURE_HASH_KEY); +@@ -1370,21 +1313,23 @@ static void ixgbe_fdir_enable_82599(struct ixgbe_hw *hw, u32 fdirctrl) + if (IXGBE_READ_REG(hw, IXGBE_FDIRCTRL) & + IXGBE_FDIRCTRL_INIT_DONE) + break; +- usleep_range(1000, 2000); ++ msec_delay(1); + } + + if (i >= IXGBE_FDIR_INIT_DONE_POLL) +- hw_dbg(hw, "Flow Director poll time exceeded!\n"); ++ DEBUGOUT("Flow Director poll time exceeded!\n"); + } + + /** + * ixgbe_init_fdir_signature_82599 - Initialize Flow Director signature filters + * @hw: pointer to hardware structure + * @fdirctrl: value to write to flow director control register, initially +- * contains just the value of the Rx packet buffer allocation ++ * contains just the value of the Rx packet buffer allocation + **/ + s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl) + { ++ DEBUGFUNC("ixgbe_init_fdir_signature_82599"); ++ + /* + * Continue setup of fdirctrl register bits: + * Move the flexible bytes to use the ethertype - shift 6 words +@@ -1398,22 +1343,26 @@ s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl) + /* write hashes and fdirctrl register, poll for completion */ + ixgbe_fdir_enable_82599(hw, fdirctrl); + +- return 0; ++ return IXGBE_SUCCESS; + } + + /** + * ixgbe_init_fdir_perfect_82599 - Initialize Flow Director perfect filters + * @hw: pointer to hardware structure + * @fdirctrl: value to write to flow director control register, initially +- * contains just the value of the Rx packet buffer allocation ++ * contains just the value of the Rx packet buffer allocation ++ * @cloud_mode: true - cloud mode, false - other mode + **/ +-s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl) ++s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl, ++ bool cloud_mode) + { ++ DEBUGFUNC("ixgbe_init_fdir_perfect_82599"); ++ + /* + * Continue setup of fdirctrl register bits: + * Turn perfect match filtering on + * Report hash in RSS field of Rx wb descriptor +- * Initialize the drop queue ++ * Initialize the drop queue to queue 127 + * Move the flexible bytes to use the ethertype - shift 6 words + * Set the maximum length per hash bucket to 0xA filters + * Send interrupt when 64 (0x4 * 16) filters are left +@@ -1425,10 +1374,48 @@ s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl) + (0xA << IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT) | + (4 << IXGBE_FDIRCTRL_FULL_THRESH_SHIFT); + ++ if (cloud_mode) ++ fdirctrl |=(IXGBE_FDIRCTRL_FILTERMODE_CLOUD << ++ IXGBE_FDIRCTRL_FILTERMODE_SHIFT); ++ + /* write hashes and fdirctrl register, poll for completion */ + ixgbe_fdir_enable_82599(hw, fdirctrl); + +- return 0; ++ return IXGBE_SUCCESS; ++} ++ ++/** ++ * ixgbe_set_fdir_drop_queue_82599 - Set Flow Director drop queue ++ * @hw: pointer to hardware structure ++ * @dropqueue: Rx queue index used for the dropped packets ++ **/ ++void ixgbe_set_fdir_drop_queue_82599(struct ixgbe_hw *hw, u8 dropqueue) ++{ ++ u32 fdirctrl; ++ ++ DEBUGFUNC("ixgbe_set_fdir_drop_queue_82599"); ++ /* Clear init done bit and drop queue field */ ++ fdirctrl = IXGBE_READ_REG(hw, IXGBE_FDIRCTRL); ++ fdirctrl &= ~(IXGBE_FDIRCTRL_DROP_Q_MASK | IXGBE_FDIRCTRL_INIT_DONE); ++ ++ /* Set drop queue */ ++ fdirctrl |= (dropqueue << IXGBE_FDIRCTRL_DROP_Q_SHIFT); ++ if ((hw->mac.type == ixgbe_mac_X550) || ++ (hw->mac.type == ixgbe_mac_X550EM_x) || ++ (hw->mac.type == ixgbe_mac_X550EM_a)) ++ fdirctrl |= IXGBE_FDIRCTRL_DROP_NO_MATCH; ++ ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, ++ (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) | ++ IXGBE_FDIRCMD_CLEARHT)); ++ IXGBE_WRITE_FLUSH(hw); ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, ++ (IXGBE_READ_REG(hw, IXGBE_FDIRCMD) & ++ ~IXGBE_FDIRCMD_CLEARHT)); ++ IXGBE_WRITE_FLUSH(hw); ++ ++ /* write hashes and fdirctrl register, poll for completion */ ++ ixgbe_fdir_enable_82599(hw, fdirctrl); + } + + /* +@@ -1460,22 +1447,22 @@ do { \ + * @stream: input bitstream to compute the hash on + * + * This function is almost identical to the function above but contains +- * several optomizations such as unwinding all of the loops, letting the ++ * several optimizations such as unwinding all of the loops, letting the + * compiler work out all of the conditional ifs since the keys are static + * defines, and computing two keys at once since the hashed dword stream + * will be the same for both keys. + **/ +-static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, +- union ixgbe_atr_hash_dword common) ++u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, ++ union ixgbe_atr_hash_dword common) + { + u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; + u32 sig_hash = 0, bucket_hash = 0, common_hash = 0; + + /* record the flow_vm_vlan bits as they are a key part to the hash */ +- flow_vm_vlan = ntohl(input.dword); ++ flow_vm_vlan = IXGBE_NTOHL(input.dword); + + /* generate common hash dword */ +- hi_hash_dword = ntohl(common.dword); ++ hi_hash_dword = IXGBE_NTOHL(common.dword); + + /* low dword is word swapped version of common */ + lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); +@@ -1489,7 +1476,7 @@ static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, + /* + * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to + * delay this because bit 0 of the stream should not be processed +- * so we do not add the vlan until after bit 0 was processed ++ * so we do not add the VLAN until after bit 0 was processed + */ + lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); + +@@ -1527,20 +1514,31 @@ static u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, + * @input: unique input dword + * @common: compressed common input dword + * @queue: queue index to direct traffic to ++ * ++ * Note that the tunnel bit in input must not be set when the hardware ++ * tunneling support does not exist. + **/ +-s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, +- union ixgbe_atr_hash_dword input, +- union ixgbe_atr_hash_dword common, +- u8 queue) ++void ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, ++ union ixgbe_atr_hash_dword input, ++ union ixgbe_atr_hash_dword common, ++ u8 queue) + { +- u64 fdirhashcmd; +- u32 fdircmd; ++ u64 fdirhashcmd; ++ u8 flow_type; ++ bool tunnel; ++ u32 fdircmd; ++ ++ DEBUGFUNC("ixgbe_fdir_add_signature_filter_82599"); + + /* + * Get the flow_type in order to program FDIRCMD properly + * lowest 2 bits are FDIRCMD.L4TYPE, third lowest bit is FDIRCMD.IPV6 ++ * fifth is FDIRCMD.TUNNEL_FILTER + */ +- switch (input.formatted.flow_type) { ++ tunnel = !!(input.formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK); ++ flow_type = input.formatted.flow_type & ++ (IXGBE_ATR_L4TYPE_TUNNEL_MASK - 1); ++ switch (flow_type) { + case IXGBE_ATR_FLOW_TYPE_TCPV4: + case IXGBE_ATR_FLOW_TYPE_UDPV4: + case IXGBE_ATR_FLOW_TYPE_SCTPV4: +@@ -1549,15 +1547,17 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, + case IXGBE_ATR_FLOW_TYPE_SCTPV6: + break; + default: +- hw_dbg(hw, " Error on flow type input\n"); +- return IXGBE_ERR_CONFIG; ++ DEBUGOUT(" Error on flow type input\n"); ++ return; + } + + /* configure FDIRCMD register */ + fdircmd = IXGBE_FDIRCMD_CMD_ADD_FLOW | IXGBE_FDIRCMD_FILTER_UPDATE | + IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; +- fdircmd |= input.formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; ++ fdircmd |= (u32)flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; + fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; ++ if (tunnel) ++ fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER; + + /* + * The lower 32-bits of fdirhashcmd is for FDIRHASH, the upper 32-bits +@@ -1567,9 +1567,9 @@ s32 ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, + fdirhashcmd |= ixgbe_atr_compute_sig_hash_82599(input, common); + IXGBE_WRITE_REG64(hw, IXGBE_FDIRHASH, fdirhashcmd); + +- hw_dbg(hw, "Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd); ++ DEBUGOUT2("Tx Queue=%x hash=%x\n", queue, (u32)fdirhashcmd); + +- return 0; ++ return; + } + + #define IXGBE_COMPUTE_BKT_HASH_ITERATION(_n) \ +@@ -1586,7 +1586,7 @@ do { \ + * @atr_input: input bitstream to compute the hash on + * @input_mask: mask for the input bitstream + * +- * This function serves two main purposes. First it applys the input_mask ++ * This function serves two main purposes. First it applies the input_mask + * to the atr_input resulting in a cleaned up atr_input data stream. + * Secondly it computes the hash and stores it in the bkt_hash field at + * the end of the input byte stream. This way it will be available for +@@ -1597,20 +1597,21 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, + { + + u32 hi_hash_dword, lo_hash_dword, flow_vm_vlan; +- u32 bucket_hash = 0, hi_dword = 0; +- int i; ++ u32 bucket_hash = 0; ++ u32 hi_dword = 0; ++ u32 i = 0; + + /* Apply masks to input data */ +- for (i = 0; i <= 10; i++) +- input->dword_stream[i] &= input_mask->dword_stream[i]; ++ for (i = 0; i < 14; i++) ++ input->dword_stream[i] &= input_mask->dword_stream[i]; + + /* record the flow_vm_vlan bits as they are a key part to the hash */ +- flow_vm_vlan = ntohl(input->dword_stream[0]); ++ flow_vm_vlan = IXGBE_NTOHL(input->dword_stream[0]); + + /* generate common hash dword */ +- for (i = 1; i <= 10; i++) ++ for (i = 1; i <= 13; i++) + hi_dword ^= input->dword_stream[i]; +- hi_hash_dword = ntohl(hi_dword); ++ hi_hash_dword = IXGBE_NTOHL(hi_dword); + + /* low dword is word swapped version of common */ + lo_hash_dword = (hi_hash_dword >> 16) | (hi_hash_dword << 16); +@@ -1624,7 +1625,7 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, + /* + * apply flow ID/VM pool/VLAN ID bits to lo hash dword, we had to + * delay this because bit 0 of the stream should not be processed +- * so we do not add the vlan until after bit 0 was processed ++ * so we do not add the VLAN until after bit 0 was processed + */ + lo_hash_dword ^= flow_vm_vlan ^ (flow_vm_vlan << 16); + +@@ -1640,7 +1641,7 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, + } + + /** +- * ixgbe_get_fdirtcpm_82599 - generate a tcp port from atr_input_masks ++ * ixgbe_get_fdirtcpm_82599 - generate a TCP port from atr_input_masks + * @input_mask: mask to be bit swapped + * + * The source and destination port masks for flow director are bit swapped +@@ -1648,12 +1649,11 @@ void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, + * generate a correctly swapped value we need to bit swap the mask and that + * is what is accomplished by this function. + **/ +-static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask) ++STATIC u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask) + { +- u32 mask = ntohs(input_mask->formatted.dst_port); +- ++ u32 mask = IXGBE_NTOHS(input_mask->formatted.dst_port); + mask <<= IXGBE_FDIRTCPM_DPORTM_SHIFT; +- mask |= ntohs(input_mask->formatted.src_port); ++ mask |= IXGBE_NTOHS(input_mask->formatted.src_port); + mask = ((mask & 0x55555555) << 1) | ((mask & 0xAAAAAAAA) >> 1); + mask = ((mask & 0x33333333) << 2) | ((mask & 0xCCCCCCCC) >> 2); + mask = ((mask & 0x0F0F0F0F) << 4) | ((mask & 0xF0F0F0F0) >> 4); +@@ -1672,17 +1672,19 @@ static u32 ixgbe_get_fdirtcpm_82599(union ixgbe_atr_input *input_mask) + (((u32)(_value) & 0x0000FF00) << 8) | ((u32)(_value) << 24)) + + #define IXGBE_WRITE_REG_BE32(a, reg, value) \ +- IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(ntohl(value))) ++ IXGBE_WRITE_REG((a), (reg), IXGBE_STORE_AS_BE32(IXGBE_NTOHL(value))) + + #define IXGBE_STORE_AS_BE16(_value) \ +- ntohs(((u16)(_value) >> 8) | ((u16)(_value) << 8)) ++ IXGBE_NTOHS(((u16)(_value) >> 8) | ((u16)(_value) << 8)) + + s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, +- union ixgbe_atr_input *input_mask) ++ union ixgbe_atr_input *input_mask, bool cloud_mode) + { + /* mask IPv6 since it is currently not supported */ + u32 fdirm = IXGBE_FDIRM_DIPv6; + u32 fdirtcpm; ++ u32 fdirip6m; ++ DEBUGFUNC("ixgbe_fdir_set_atr_input_mask_82599"); + + /* + * Program the relevant mask registers. If src/dst_port or src/dst_addr +@@ -1696,7 +1698,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, + + /* verify bucket hash is cleared on hash generation */ + if (input_mask->formatted.bkt_hash) +- hw_dbg(hw, " bucket hash should always be 0 in mask\n"); ++ DEBUGOUT(" bucket hash should always be 0 in mask\n"); + + /* Program FDIRM and verify partial masks */ + switch (input_mask->formatted.vm_pool & 0x7F) { +@@ -1705,7 +1707,7 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, + case 0x7F: + break; + default: +- hw_dbg(hw, " Error on vm pool mask\n"); ++ DEBUGOUT(" Error on vm pool mask\n"); + return IXGBE_ERR_CONFIG; + } + +@@ -1714,97 +1716,199 @@ s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, + fdirm |= IXGBE_FDIRM_L4P; + if (input_mask->formatted.dst_port || + input_mask->formatted.src_port) { +- hw_dbg(hw, " Error on src/dst port mask\n"); ++ DEBUGOUT(" Error on src/dst port mask\n"); + return IXGBE_ERR_CONFIG; + } + case IXGBE_ATR_L4TYPE_MASK: + break; + default: +- hw_dbg(hw, " Error on flow type mask\n"); ++ DEBUGOUT(" Error on flow type mask\n"); + return IXGBE_ERR_CONFIG; + } + +- switch (ntohs(input_mask->formatted.vlan_id) & 0xEFFF) { ++ switch (IXGBE_NTOHS(input_mask->formatted.vlan_id) & 0xEFFF) { + case 0x0000: +- /* mask VLAN ID, fall through to mask VLAN priority */ ++ /* mask VLAN ID */ + fdirm |= IXGBE_FDIRM_VLANID; ++ /* fall through */ + case 0x0FFF: + /* mask VLAN priority */ + fdirm |= IXGBE_FDIRM_VLANP; + break; + case 0xE000: +- /* mask VLAN ID only, fall through */ ++ /* mask VLAN ID only */ + fdirm |= IXGBE_FDIRM_VLANID; ++ /* fall through */ + case 0xEFFF: + /* no VLAN fields masked */ + break; + default: +- hw_dbg(hw, " Error on VLAN mask\n"); ++ DEBUGOUT(" Error on VLAN mask\n"); + return IXGBE_ERR_CONFIG; + } + + switch (input_mask->formatted.flex_bytes & 0xFFFF) { + case 0x0000: +- /* Mask Flex Bytes, fall through */ ++ /* Mask Flex Bytes */ + fdirm |= IXGBE_FDIRM_FLEX; ++ /* fall through */ + case 0xFFFF: + break; + default: +- hw_dbg(hw, " Error on flexible byte mask\n"); ++ DEBUGOUT(" Error on flexible byte mask\n"); + return IXGBE_ERR_CONFIG; + } + +- /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ +- IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); ++ if (cloud_mode) { ++ fdirm |= IXGBE_FDIRM_L3P; ++ fdirip6m = ((u32) 0xFFFFU << IXGBE_FDIRIP6M_DIPM_SHIFT); ++ fdirip6m |= IXGBE_FDIRIP6M_ALWAYS_MASK; + +- /* store the TCP/UDP port masks, bit reversed from port layout */ +- fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask); ++ switch (input_mask->formatted.inner_mac[0] & 0xFF) { ++ case 0x00: ++ /* Mask inner MAC, fall through */ ++ fdirip6m |= IXGBE_FDIRIP6M_INNER_MAC; ++ case 0xFF: ++ break; ++ default: ++ DEBUGOUT(" Error on inner_mac byte mask\n"); ++ return IXGBE_ERR_CONFIG; ++ } + +- /* write both the same so that UDP and TCP use the same mask */ +- IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); +- IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm); ++ switch (input_mask->formatted.tni_vni & 0xFFFFFFFF) { ++ case 0x0: ++ /* Mask vxlan id */ ++ fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI; ++ break; ++ case 0x00FFFFFF: ++ fdirip6m |= IXGBE_FDIRIP6M_TNI_VNI_24; ++ break; ++ case 0xFFFFFFFF: ++ break; ++ default: ++ DEBUGOUT(" Error on TNI/VNI byte mask\n"); ++ return IXGBE_ERR_CONFIG; ++ } + +- /* store source and destination IP masks (big-enian) */ +- IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, +- ~input_mask->formatted.src_ip[0]); +- IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, +- ~input_mask->formatted.dst_ip[0]); ++ switch (input_mask->formatted.tunnel_type & 0xFFFF) { ++ case 0x0: ++ /* Mask turnnel type, fall through */ ++ fdirip6m |= IXGBE_FDIRIP6M_TUNNEL_TYPE; ++ case 0xFFFF: ++ break; ++ default: ++ DEBUGOUT(" Error on tunnel type byte mask\n"); ++ return IXGBE_ERR_CONFIG; ++ } ++ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIP6M, fdirip6m); ++ ++ /* Set all bits in FDIRTCPM, FDIRUDPM, FDIRSCTPM, ++ * FDIRSIP4M and FDIRDIP4M in cloud mode to allow ++ * L3/L3 packets to tunnel. ++ */ ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, 0xFFFFFFFF); ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, 0xFFFFFFFF); ++ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, 0xFFFFFFFF); ++ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, 0xFFFFFFFF); ++ switch (hw->mac.type) { ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, 0xFFFFFFFF); ++ break; ++ default: ++ break; ++ } ++ } ++ ++ /* Now mask VM pool and destination IPv6 - bits 5 and 2 */ ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRM, fdirm); ++ ++ if (!cloud_mode) { ++ /* store the TCP/UDP port masks, bit reversed from port ++ * layout */ ++ fdirtcpm = ixgbe_get_fdirtcpm_82599(input_mask); ++ ++ /* write both the same so that UDP and TCP use the same mask */ ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRTCPM, ~fdirtcpm); ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRUDPM, ~fdirtcpm); ++ /* also use it for SCTP */ ++ switch (hw->mac.type) { ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRSCTPM, ~fdirtcpm); ++ break; ++ default: ++ break; ++ } + +- return 0; ++ /* store source and destination IP masks (big-enian) */ ++ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIP4M, ++ ~input_mask->formatted.src_ip[0]); ++ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRDIP4M, ++ ~input_mask->formatted.dst_ip[0]); ++ } ++ return IXGBE_SUCCESS; + } + + s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, + union ixgbe_atr_input *input, +- u16 soft_id, u8 queue) ++ u16 soft_id, u8 queue, bool cloud_mode) + { + u32 fdirport, fdirvlan, fdirhash, fdircmd; ++ u32 addr_low, addr_high; ++ u32 cloud_type = 0; ++ s32 err; ++ ++ DEBUGFUNC("ixgbe_fdir_write_perfect_filter_82599"); ++ if (!cloud_mode) { ++ /* currently IPv6 is not supported, must be programmed with 0 */ ++ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), ++ input->formatted.src_ip[0]); ++ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), ++ input->formatted.src_ip[1]); ++ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), ++ input->formatted.src_ip[2]); ++ ++ /* record the source address (big-endian) */ ++ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, ++ input->formatted.src_ip[0]); ++ ++ /* record the first 32 bits of the destination address ++ * (big-endian) */ ++ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, ++ input->formatted.dst_ip[0]); ++ ++ /* record source and destination port (little-endian)*/ ++ fdirport = IXGBE_NTOHS(input->formatted.dst_port); ++ fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT; ++ fdirport |= IXGBE_NTOHS(input->formatted.src_port); ++ IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); ++ } + +- /* currently IPv6 is not supported, must be programmed with 0 */ +- IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), +- input->formatted.src_ip[0]); +- IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), +- input->formatted.src_ip[1]); +- IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), +- input->formatted.src_ip[2]); +- +- /* record the source address (big-endian) */ +- IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPSA, input->formatted.src_ip[0]); +- +- /* record the first 32 bits of the destination address (big-endian) */ +- IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRIPDA, input->formatted.dst_ip[0]); +- +- /* record source and destination port (little-endian)*/ +- fdirport = ntohs(input->formatted.dst_port); +- fdirport <<= IXGBE_FDIRPORT_DESTINATION_SHIFT; +- fdirport |= ntohs(input->formatted.src_port); +- IXGBE_WRITE_REG(hw, IXGBE_FDIRPORT, fdirport); +- +- /* record vlan (little-endian) and flex_bytes(big-endian) */ ++ /* record VLAN (little-endian) and flex_bytes(big-endian) */ + fdirvlan = IXGBE_STORE_AS_BE16(input->formatted.flex_bytes); + fdirvlan <<= IXGBE_FDIRVLAN_FLEX_SHIFT; +- fdirvlan |= ntohs(input->formatted.vlan_id); ++ fdirvlan |= IXGBE_NTOHS(input->formatted.vlan_id); + IXGBE_WRITE_REG(hw, IXGBE_FDIRVLAN, fdirvlan); + ++ if (cloud_mode) { ++ if (input->formatted.tunnel_type != 0) ++ cloud_type = 0x80000000; ++ ++ addr_low = ((u32)input->formatted.inner_mac[0] | ++ ((u32)input->formatted.inner_mac[1] << 8) | ++ ((u32)input->formatted.inner_mac[2] << 16) | ++ ((u32)input->formatted.inner_mac[3] << 24)); ++ addr_high = ((u32)input->formatted.inner_mac[4] | ++ ((u32)input->formatted.inner_mac[5] << 8)); ++ cloud_type |= addr_high; ++ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(0), addr_low); ++ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(1), cloud_type); ++ IXGBE_WRITE_REG_BE32(hw, IXGBE_FDIRSIPv6(2), input->formatted.tni_vni); ++ } ++ + /* configure FDIRHASH register */ + fdirhash = input->formatted.bkt_hash; + fdirhash |= soft_id << IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT; +@@ -1821,13 +1925,20 @@ s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, + IXGBE_FDIRCMD_LAST | IXGBE_FDIRCMD_QUEUE_EN; + if (queue == IXGBE_FDIR_DROP_QUEUE) + fdircmd |= IXGBE_FDIRCMD_DROP; ++ if (input->formatted.flow_type & IXGBE_ATR_L4TYPE_TUNNEL_MASK) ++ fdircmd |= IXGBE_FDIRCMD_TUNNEL_FILTER; + fdircmd |= input->formatted.flow_type << IXGBE_FDIRCMD_FLOW_TYPE_SHIFT; + fdircmd |= (u32)queue << IXGBE_FDIRCMD_RX_QUEUE_SHIFT; + fdircmd |= (u32)input->formatted.vm_pool << IXGBE_FDIRCMD_VT_POOL_SHIFT; + + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, fdircmd); ++ err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); ++ if (err) { ++ DEBUGOUT("Flow Director command did not complete!\n"); ++ return err; ++ } + +- return 0; ++ return IXGBE_SUCCESS; + } + + s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, +@@ -1835,9 +1946,8 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, + u16 soft_id) + { + u32 fdirhash; +- u32 fdircmd = 0; +- u32 retry_count; +- s32 err = 0; ++ u32 fdircmd; ++ s32 err; + + /* configure FDIRHASH register */ + fdirhash = input->formatted.bkt_hash; +@@ -1850,18 +1960,12 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, + /* Query if filter is present */ + IXGBE_WRITE_REG(hw, IXGBE_FDIRCMD, IXGBE_FDIRCMD_CMD_QUERY_REM_FILT); + +- for (retry_count = 10; retry_count; retry_count--) { +- /* allow 10us for query to process */ +- udelay(10); +- /* verify query completed successfully */ +- fdircmd = IXGBE_READ_REG(hw, IXGBE_FDIRCMD); +- if (!(fdircmd & IXGBE_FDIRCMD_CMD_MASK)) +- break; ++ err = ixgbe_fdir_check_cmd_complete(hw, &fdircmd); ++ if (err) { ++ DEBUGOUT("Flow Director command did not complete!\n"); ++ return err; + } + +- if (!retry_count) +- err = IXGBE_ERR_FDIR_REINIT_FAILED; +- + /* if filter exists in hardware then remove it */ + if (fdircmd & IXGBE_FDIRCMD_FILTER_VALID) { + IXGBE_WRITE_REG(hw, IXGBE_FDIRHASH, fdirhash); +@@ -1870,7 +1974,72 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, + IXGBE_FDIRCMD_CMD_REMOVE_FLOW); + } + +- return err; ++ return IXGBE_SUCCESS; ++} ++ ++/** ++ * ixgbe_fdir_add_perfect_filter_82599 - Adds a perfect filter ++ * @hw: pointer to hardware structure ++ * @input: input bitstream ++ * @input_mask: mask for the input bitstream ++ * @soft_id: software index for the filters ++ * @queue: queue index to direct traffic to ++ * ++ * Note that the caller to this function must lock before calling, since the ++ * hardware writes must be protected from one another. ++ **/ ++s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, ++ union ixgbe_atr_input *input, ++ union ixgbe_atr_input *input_mask, ++ u16 soft_id, u8 queue, bool cloud_mode) ++{ ++ s32 err = IXGBE_ERR_CONFIG; ++ ++ DEBUGFUNC("ixgbe_fdir_add_perfect_filter_82599"); ++ ++ /* ++ * Check flow_type formatting, and bail out before we touch the hardware ++ * if there's a configuration issue ++ */ ++ switch (input->formatted.flow_type) { ++ case IXGBE_ATR_FLOW_TYPE_IPV4: ++ case IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4: ++ input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK; ++ if (input->formatted.dst_port || input->formatted.src_port) { ++ DEBUGOUT(" Error on src/dst port\n"); ++ return IXGBE_ERR_CONFIG; ++ } ++ break; ++ case IXGBE_ATR_FLOW_TYPE_SCTPV4: ++ case IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4: ++ if (input->formatted.dst_port || input->formatted.src_port) { ++ DEBUGOUT(" Error on src/dst port\n"); ++ return IXGBE_ERR_CONFIG; ++ } ++ /* fall through */ ++ case IXGBE_ATR_FLOW_TYPE_TCPV4: ++ case IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4: ++ case IXGBE_ATR_FLOW_TYPE_UDPV4: ++ case IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4: ++ input_mask->formatted.flow_type = IXGBE_ATR_L4TYPE_IPV6_MASK | ++ IXGBE_ATR_L4TYPE_MASK; ++ break; ++ default: ++ DEBUGOUT(" Error on flow type input\n"); ++ return err; ++ } ++ ++ /* program input mask into the HW */ ++ err = ixgbe_fdir_set_input_mask_82599(hw, input_mask, cloud_mode); ++ if (err) ++ return err; ++ ++ /* apply mask and compute/store hash */ ++ ixgbe_atr_compute_perfect_hash_82599(input, input_mask); ++ ++ /* program filters to filter memory */ ++ return ixgbe_fdir_write_perfect_filter_82599(hw, input, ++ soft_id, queue, cloud_mode); + } + + /** +@@ -1881,18 +2050,20 @@ s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, + * + * Performs read operation to Omer analog register specified. + **/ +-static s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val) ++s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val) + { + u32 core_ctl; + ++ DEBUGFUNC("ixgbe_read_analog_reg8_82599"); ++ + IXGBE_WRITE_REG(hw, IXGBE_CORECTL, IXGBE_CORECTL_WRITE_CMD | + (reg << 8)); + IXGBE_WRITE_FLUSH(hw); +- udelay(10); ++ usec_delay(10); + core_ctl = IXGBE_READ_REG(hw, IXGBE_CORECTL); + *val = (u8)core_ctl; + +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +@@ -1903,16 +2074,18 @@ static s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val) + * + * Performs write operation to Omer analog register specified. + **/ +-static s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val) ++s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val) + { + u32 core_ctl; + ++ DEBUGFUNC("ixgbe_write_analog_reg8_82599"); ++ + core_ctl = (reg << 8) | val; + IXGBE_WRITE_REG(hw, IXGBE_CORECTL, core_ctl); + IXGBE_WRITE_FLUSH(hw); +- udelay(10); ++ usec_delay(10); + +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +@@ -1923,22 +2096,24 @@ static s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val) + * and the generation start_hw function. + * Then performs revision-specific operations, if any. + **/ +-static s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw) ++s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw) + { +- s32 ret_val = 0; ++ s32 ret_val = IXGBE_SUCCESS; ++ ++ DEBUGFUNC("ixgbe_start_hw_82599"); + + ret_val = ixgbe_start_hw_generic(hw); +- if (ret_val != 0) ++ if (ret_val != IXGBE_SUCCESS) + goto out; + + ret_val = ixgbe_start_hw_gen2(hw); +- if (ret_val != 0) ++ if (ret_val != IXGBE_SUCCESS) + goto out; + + /* We need to run link autotry after the driver loads */ + hw->mac.autotry_restart = true; + +- if (ret_val == 0) ++ if (ret_val == IXGBE_SUCCESS) + ret_val = ixgbe_verify_fw_version_82599(hw); + out: + return ret_val; +@@ -1952,16 +2127,18 @@ out: + * If PHY already detected, maintains current PHY type in hw struct, + * otherwise executes the PHY detection routine. + **/ +-static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) ++s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) + { +- s32 status = IXGBE_ERR_PHY_ADDR_INVALID; ++ s32 status; ++ ++ DEBUGFUNC("ixgbe_identify_phy_82599"); + + /* Detect PHY if not unknown - returns success if already detected. */ + status = ixgbe_identify_phy_generic(hw); +- if (status != 0) { ++ if (status != IXGBE_SUCCESS) { + /* 82599 10GBASE-T requires an external PHY */ + if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_copper) +- goto out; ++ return status; + else + status = ixgbe_identify_module_generic(hw); + } +@@ -1969,14 +2146,13 @@ static s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw) + /* Set PHY type none if no PHY detected */ + if (hw->phy.type == ixgbe_phy_unknown) { + hw->phy.type = ixgbe_phy_none; +- status = 0; ++ return IXGBE_SUCCESS; + } + + /* Return error if SFP module has been detected but is not supported */ + if (hw->phy.type == ixgbe_phy_sfp_unsupported) +- status = IXGBE_ERR_SFP_NOT_SUPPORTED; ++ return IXGBE_ERR_SFP_NOT_SUPPORTED; + +-out: + return status; + } + +@@ -1986,30 +2162,30 @@ out: + * + * Determines physical layer capabilities of the current configuration. + **/ +-static u32 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw) ++u64 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw) + { +- u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; ++ u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + u32 autoc = IXGBE_READ_REG(hw, IXGBE_AUTOC); + u32 autoc2 = IXGBE_READ_REG(hw, IXGBE_AUTOC2); + u32 pma_pmd_10g_serial = autoc2 & IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK; + u32 pma_pmd_10g_parallel = autoc & IXGBE_AUTOC_10G_PMA_PMD_MASK; + u32 pma_pmd_1g = autoc & IXGBE_AUTOC_1G_PMA_PMD_MASK; + u16 ext_ability = 0; +- u8 comp_codes_10g = 0; +- u8 comp_codes_1g = 0; ++ ++ DEBUGFUNC("ixgbe_get_support_physical_layer_82599"); + + hw->phy.ops.identify(hw); + + switch (hw->phy.type) { + case ixgbe_phy_tn: + case ixgbe_phy_cu_unknown: +- hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD, +- &ext_ability); +- if (ext_ability & MDIO_PMA_EXTABLE_10GBT) ++ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, ++ IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); ++ if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; +- if (ext_ability & MDIO_PMA_EXTABLE_1000BT) ++ if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; +- if (ext_ability & MDIO_PMA_EXTABLE_100BTX) ++ if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; + goto out; + default: +@@ -2062,49 +2238,7 @@ sfp_check: + /* SFP check must be done last since DA modules are sometimes used to + * test KR mode - we need to id KR mode correctly before SFP module. + * Call identify_sfp because the pluggable module may have changed */ +- hw->phy.ops.identify_sfp(hw); +- if (hw->phy.sfp_type == ixgbe_sfp_type_not_present) +- goto out; +- +- switch (hw->phy.type) { +- case ixgbe_phy_sfp_passive_tyco: +- case ixgbe_phy_sfp_passive_unknown: +- case ixgbe_phy_qsfp_passive_unknown: +- physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; +- break; +- case ixgbe_phy_sfp_ftl_active: +- case ixgbe_phy_sfp_active_unknown: +- case ixgbe_phy_qsfp_active_unknown: +- physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA; +- break; +- case ixgbe_phy_sfp_avago: +- case ixgbe_phy_sfp_ftl: +- case ixgbe_phy_sfp_intel: +- case ixgbe_phy_sfp_unknown: +- hw->phy.ops.read_i2c_eeprom(hw, +- IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g); +- hw->phy.ops.read_i2c_eeprom(hw, +- IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g); +- if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) +- physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; +- else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) +- physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; +- else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) +- physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T; +- break; +- case ixgbe_phy_qsfp_intel: +- case ixgbe_phy_qsfp_unknown: +- hw->phy.ops.read_i2c_eeprom(hw, +- IXGBE_SFF_QSFP_10GBE_COMP, &comp_codes_10g); +- if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) +- physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; +- else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) +- physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; +- break; +- default: +- break; +- } +- ++ physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw); + out: + return physical_layer; + } +@@ -2116,25 +2250,32 @@ out: + * + * Enables the Rx DMA unit for 82599 + **/ +-static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) ++s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) + { ++ ++ DEBUGFUNC("ixgbe_enable_rx_dma_82599"); ++ + /* + * Workaround for 82599 silicon errata when enabling the Rx datapath. + * If traffic is incoming before we enable the Rx unit, it could hang + * the Rx DMA unit. Therefore, make sure the security engine is + * completely disabled prior to enabling the Rx unit. + */ +- hw->mac.ops.disable_rx_buff(hw); + +- IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); ++ hw->mac.ops.disable_sec_rx_path(hw); ++ ++ if (regval & IXGBE_RXCTRL_RXEN) ++ ixgbe_enable_rx(hw); ++ else ++ ixgbe_disable_rx(hw); + +- hw->mac.ops.enable_rx_buff(hw); ++ hw->mac.ops.enable_sec_rx_path(hw); + +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +- * ixgbe_verify_fw_version_82599 - verify fw version for 82599 ++ * ixgbe_verify_fw_version_82599 - verify FW version for 82599 + * @hw: pointer to hardware structure + * + * Verifies that installed the firmware version is 0.6 or higher +@@ -2143,49 +2284,58 @@ static s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval) + * Returns IXGBE_ERR_EEPROM_VERSION if the FW is not present or + * if the FW version is not supported. + **/ +-static s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw) ++STATIC s32 ixgbe_verify_fw_version_82599(struct ixgbe_hw *hw) + { + s32 status = IXGBE_ERR_EEPROM_VERSION; + u16 fw_offset, fw_ptp_cfg_offset; +- u16 offset; +- u16 fw_version = 0; ++ u16 fw_version; ++ ++ DEBUGFUNC("ixgbe_verify_fw_version_82599"); + + /* firmware check is only necessary for SFI devices */ + if (hw->phy.media_type != ixgbe_media_type_fiber) { +- status = 0; ++ status = IXGBE_SUCCESS; + goto fw_version_out; + } + + /* get the offset to the Firmware Module block */ +- offset = IXGBE_FW_PTR; +- if (hw->eeprom.ops.read(hw, offset, &fw_offset)) +- goto fw_version_err; ++ if (hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset)) { ++ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, ++ "eeprom read at offset %d failed", IXGBE_FW_PTR); ++ return IXGBE_ERR_EEPROM_VERSION; ++ } + + if ((fw_offset == 0) || (fw_offset == 0xFFFF)) + goto fw_version_out; + + /* get the offset to the Pass Through Patch Configuration block */ +- offset = fw_offset + IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR; +- if (hw->eeprom.ops.read(hw, offset, &fw_ptp_cfg_offset)) +- goto fw_version_err; ++ if (hw->eeprom.ops.read(hw, (fw_offset + ++ IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR), ++ &fw_ptp_cfg_offset)) { ++ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, ++ "eeprom read at offset %d failed", ++ fw_offset + ++ IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR); ++ return IXGBE_ERR_EEPROM_VERSION; ++ } + + if ((fw_ptp_cfg_offset == 0) || (fw_ptp_cfg_offset == 0xFFFF)) + goto fw_version_out; + + /* get the firmware version */ +- offset = fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4; +- if (hw->eeprom.ops.read(hw, offset, &fw_version)) +- goto fw_version_err; ++ if (hw->eeprom.ops.read(hw, (fw_ptp_cfg_offset + ++ IXGBE_FW_PATCH_VERSION_4), &fw_version)) { ++ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, ++ "eeprom read at offset %d failed", ++ fw_ptp_cfg_offset + IXGBE_FW_PATCH_VERSION_4); ++ return IXGBE_ERR_EEPROM_VERSION; ++ } + + if (fw_version > 0x5) +- status = 0; ++ status = IXGBE_SUCCESS; + + fw_version_out: + return status; +- +-fw_version_err: +- hw_err(hw, "eeprom read at offset %d failed\n", offset); +- return IXGBE_ERR_EEPROM_VERSION; + } + + /** +@@ -2195,16 +2345,18 @@ fw_version_err: + * Returns true if the LESM FW module is present and enabled. Otherwise + * returns false. Smart Speed must be disabled if LESM FW module is enabled. + **/ +-static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw) ++bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw) + { + bool lesm_enabled = false; + u16 fw_offset, fw_lesm_param_offset, fw_lesm_state; + s32 status; + ++ DEBUGFUNC("ixgbe_verify_lesm_fw_enabled_82599"); ++ + /* get the offset to the Firmware Module block */ + status = hw->eeprom.ops.read(hw, IXGBE_FW_PTR, &fw_offset); + +- if ((status != 0) || ++ if ((status != IXGBE_SUCCESS) || + (fw_offset == 0) || (fw_offset == 0xFFFF)) + goto out; + +@@ -2213,16 +2365,16 @@ static bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw) + IXGBE_FW_LESM_PARAMETERS_PTR), + &fw_lesm_param_offset); + +- if ((status != 0) || ++ if ((status != IXGBE_SUCCESS) || + (fw_lesm_param_offset == 0) || (fw_lesm_param_offset == 0xFFFF)) + goto out; + +- /* get the lesm state word */ ++ /* get the LESM state word */ + status = hw->eeprom.ops.read(hw, (fw_lesm_param_offset + + IXGBE_FW_LESM_STATE_1), + &fw_lesm_state); + +- if ((status == 0) && ++ if ((status == IXGBE_SUCCESS) && + (fw_lesm_state & IXGBE_FW_LESM_STATE_ENABLED)) + lesm_enabled = true; + +@@ -2241,12 +2393,14 @@ out: + * + * Retrieves 16 bit word(s) read from EEPROM + **/ +-static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, ++STATIC s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) + { + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + s32 ret_val = IXGBE_ERR_CONFIG; + ++ DEBUGFUNC("ixgbe_read_eeprom_buffer_82599"); ++ + /* + * If EEPROM is detected and can be addressed using 14 bits, + * use EERD otherwise use bit bang +@@ -2273,12 +2427,14 @@ static s32 ixgbe_read_eeprom_buffer_82599(struct ixgbe_hw *hw, u16 offset, + * + * Reads a 16 bit word from the EEPROM + **/ +-static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, ++STATIC s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, + u16 offset, u16 *data) + { + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + s32 ret_val = IXGBE_ERR_CONFIG; + ++ DEBUGFUNC("ixgbe_read_eeprom_82599"); ++ + /* + * If EEPROM is detected and can be addressed using 14 bits, + * use EERD otherwise use bit bang +@@ -2295,13 +2451,12 @@ static s32 ixgbe_read_eeprom_82599(struct ixgbe_hw *hw, + /** + * ixgbe_reset_pipeline_82599 - perform pipeline reset + * +- * @hw: pointer to hardware structure ++ * @hw: pointer to hardware structure + * + * Reset pipeline by asserting Restart_AN together with LMS change to ensure +- * full pipeline reset. Note - We must hold the SW/FW semaphore before writing +- * to AUTOC, so this function assumes the semaphore is held. ++ * full pipeline reset. This function assumes the SW/FW lock is held. + **/ +-static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw) ++s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw) + { + s32 ret_val; + u32 anlp1_reg = 0; +@@ -2317,26 +2472,24 @@ static s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw) + + autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); + autoc_reg |= IXGBE_AUTOC_AN_RESTART; +- + /* Write AUTOC register with toggled LMS[2] bit and Restart_AN */ + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, + autoc_reg ^ (0x4 << IXGBE_AUTOC_LMS_SHIFT)); +- + /* Wait for AN to leave state 0 */ + for (i = 0; i < 10; i++) { +- usleep_range(4000, 8000); ++ msec_delay(4); + anlp1_reg = IXGBE_READ_REG(hw, IXGBE_ANLP1); + if (anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK) + break; + } + + if (!(anlp1_reg & IXGBE_ANLP1_AN_STATE_MASK)) { +- hw_dbg(hw, "auto negotiation not completed\n"); ++ DEBUGOUT("auto negotiation not completed\n"); + ret_val = IXGBE_ERR_RESET_FAILED; + goto reset_pipeline_out; + } + +- ret_val = 0; ++ ret_val = IXGBE_SUCCESS; + + reset_pipeline_out: + /* Write AUTOC register with original LMS field and Restart_AN */ +@@ -2355,14 +2508,16 @@ reset_pipeline_out: + * Performs byte read operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +-static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, +- u8 dev_addr, u8 *data) ++STATIC s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, ++ u8 dev_addr, u8 *data) + { + u32 esdp; + s32 status; + s32 timeout = 200; + +- if (hw->phy.qsfp_shared_i2c_bus == true) { ++ DEBUGFUNC("ixgbe_read_i2c_byte_82599"); ++ ++ if (hw->phy.qsfp_shared_i2c_bus == TRUE) { + /* Acquire I2C bus ownership. */ + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp |= IXGBE_ESDP_SDP0; +@@ -2374,12 +2529,13 @@ static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, + if (esdp & IXGBE_ESDP_SDP1) + break; + +- usleep_range(5000, 10000); ++ msec_delay(5); + timeout--; + } + + if (!timeout) { +- hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n"); ++ DEBUGOUT("Driver can't access resource," ++ " acquiring I2C bus timeout.\n"); + status = IXGBE_ERR_I2C; + goto release_i2c_access; + } +@@ -2388,7 +2544,8 @@ static s32 ixgbe_read_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, + status = ixgbe_read_i2c_byte_generic(hw, byte_offset, dev_addr, data); + + release_i2c_access: +- if (hw->phy.qsfp_shared_i2c_bus == true) { ++ ++ if (hw->phy.qsfp_shared_i2c_bus == TRUE) { + /* Release I2C bus ownership. */ + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp &= ~IXGBE_ESDP_SDP0; +@@ -2408,14 +2565,16 @@ release_i2c_access: + * Performs byte write operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +-static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, +- u8 dev_addr, u8 data) ++STATIC s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, ++ u8 dev_addr, u8 data) + { + u32 esdp; + s32 status; + s32 timeout = 200; + +- if (hw->phy.qsfp_shared_i2c_bus == true) { ++ DEBUGFUNC("ixgbe_write_i2c_byte_82599"); ++ ++ if (hw->phy.qsfp_shared_i2c_bus == TRUE) { + /* Acquire I2C bus ownership. */ + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp |= IXGBE_ESDP_SDP0; +@@ -2427,12 +2586,13 @@ static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, + if (esdp & IXGBE_ESDP_SDP1) + break; + +- usleep_range(5000, 10000); ++ msec_delay(5); + timeout--; + } + + if (!timeout) { +- hw_dbg(hw, "Driver can't access resource, acquiring I2C bus timeout.\n"); ++ DEBUGOUT("Driver can't access resource," ++ " acquiring I2C bus timeout.\n"); + status = IXGBE_ERR_I2C; + goto release_i2c_access; + } +@@ -2441,7 +2601,8 @@ static s32 ixgbe_write_i2c_byte_82599(struct ixgbe_hw *hw, u8 byte_offset, + status = ixgbe_write_i2c_byte_generic(hw, byte_offset, dev_addr, data); + + release_i2c_access: +- if (hw->phy.qsfp_shared_i2c_bus == true) { ++ ++ if (hw->phy.qsfp_shared_i2c_bus == TRUE) { + /* Release I2C bus ownership. */ + esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + esdp &= ~IXGBE_ESDP_SDP0; +@@ -2451,93 +2612,3 @@ release_i2c_access: + + return status; + } +- +-static struct ixgbe_mac_operations mac_ops_82599 = { +- .init_hw = &ixgbe_init_hw_generic, +- .reset_hw = &ixgbe_reset_hw_82599, +- .start_hw = &ixgbe_start_hw_82599, +- .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, +- .get_media_type = &ixgbe_get_media_type_82599, +- .get_supported_physical_layer = &ixgbe_get_supported_physical_layer_82599, +- .enable_rx_dma = &ixgbe_enable_rx_dma_82599, +- .disable_rx_buff = &ixgbe_disable_rx_buff_generic, +- .enable_rx_buff = &ixgbe_enable_rx_buff_generic, +- .get_mac_addr = &ixgbe_get_mac_addr_generic, +- .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, +- .get_device_caps = &ixgbe_get_device_caps_generic, +- .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic, +- .stop_adapter = &ixgbe_stop_adapter_generic, +- .get_bus_info = &ixgbe_get_bus_info_generic, +- .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, +- .read_analog_reg8 = &ixgbe_read_analog_reg8_82599, +- .write_analog_reg8 = &ixgbe_write_analog_reg8_82599, +- .stop_link_on_d3 = &ixgbe_stop_mac_link_on_d3_82599, +- .setup_link = &ixgbe_setup_mac_link_82599, +- .set_rxpba = &ixgbe_set_rxpba_generic, +- .check_link = &ixgbe_check_mac_link_generic, +- .get_link_capabilities = &ixgbe_get_link_capabilities_82599, +- .led_on = &ixgbe_led_on_generic, +- .led_off = &ixgbe_led_off_generic, +- .blink_led_start = &ixgbe_blink_led_start_generic, +- .blink_led_stop = &ixgbe_blink_led_stop_generic, +- .set_rar = &ixgbe_set_rar_generic, +- .clear_rar = &ixgbe_clear_rar_generic, +- .set_vmdq = &ixgbe_set_vmdq_generic, +- .set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic, +- .clear_vmdq = &ixgbe_clear_vmdq_generic, +- .init_rx_addrs = &ixgbe_init_rx_addrs_generic, +- .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, +- .enable_mc = &ixgbe_enable_mc_generic, +- .disable_mc = &ixgbe_disable_mc_generic, +- .clear_vfta = &ixgbe_clear_vfta_generic, +- .set_vfta = &ixgbe_set_vfta_generic, +- .fc_enable = &ixgbe_fc_enable_generic, +- .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, +- .init_uta_tables = &ixgbe_init_uta_tables_generic, +- .setup_sfp = &ixgbe_setup_sfp_modules_82599, +- .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, +- .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, +- .acquire_swfw_sync = &ixgbe_acquire_swfw_sync, +- .release_swfw_sync = &ixgbe_release_swfw_sync, +- .get_thermal_sensor_data = &ixgbe_get_thermal_sensor_data_generic, +- .init_thermal_sensor_thresh = &ixgbe_init_thermal_sensor_thresh_generic, +- .prot_autoc_read = &prot_autoc_read_82599, +- .prot_autoc_write = &prot_autoc_write_82599, +-}; +- +-static struct ixgbe_eeprom_operations eeprom_ops_82599 = { +- .init_params = &ixgbe_init_eeprom_params_generic, +- .read = &ixgbe_read_eeprom_82599, +- .read_buffer = &ixgbe_read_eeprom_buffer_82599, +- .write = &ixgbe_write_eeprom_generic, +- .write_buffer = &ixgbe_write_eeprom_buffer_bit_bang_generic, +- .calc_checksum = &ixgbe_calc_eeprom_checksum_generic, +- .validate_checksum = &ixgbe_validate_eeprom_checksum_generic, +- .update_checksum = &ixgbe_update_eeprom_checksum_generic, +-}; +- +-static struct ixgbe_phy_operations phy_ops_82599 = { +- .identify = &ixgbe_identify_phy_82599, +- .identify_sfp = &ixgbe_identify_module_generic, +- .init = &ixgbe_init_phy_ops_82599, +- .reset = &ixgbe_reset_phy_generic, +- .read_reg = &ixgbe_read_phy_reg_generic, +- .write_reg = &ixgbe_write_phy_reg_generic, +- .setup_link = &ixgbe_setup_phy_link_generic, +- .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, +- .read_i2c_byte = &ixgbe_read_i2c_byte_generic, +- .write_i2c_byte = &ixgbe_write_i2c_byte_generic, +- .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic, +- .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, +- .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, +- .check_overtemp = &ixgbe_tn_check_overtemp, +-}; +- +-struct ixgbe_info ixgbe_82599_info = { +- .mac = ixgbe_mac_82599EB, +- .get_invariants = &ixgbe_get_invariants_82599, +- .mac_ops = &mac_ops_82599, +- .eeprom_ops = &eeprom_ops_82599, +- .phy_ops = &phy_ops_82599, +- .mbx_ops = &mbx_ops_generic, +-}; +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.h +new file mode 100644 +index 0000000..7d928b8 +--- /dev/null ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_82599.h +@@ -0,0 +1,55 @@ ++/******************************************************************************* ++ ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#ifndef _IXGBE_82599_H_ ++#define _IXGBE_82599_H_ ++ ++s32 ixgbe_get_link_capabilities_82599(struct ixgbe_hw *hw, ++ ixgbe_link_speed *speed, bool *autoneg); ++enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw); ++void ixgbe_disable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); ++void ixgbe_enable_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); ++void ixgbe_flap_tx_laser_multispeed_fiber(struct ixgbe_hw *hw); ++void ixgbe_set_hard_rate_select_speed(struct ixgbe_hw *hw, ++ ixgbe_link_speed speed); ++s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw, ++ ixgbe_link_speed speed, ++ bool autoneg_wait_to_complete); ++s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, ++ bool autoneg_wait_to_complete); ++s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, ixgbe_link_speed speed, ++ bool autoneg_wait_to_complete); ++s32 ixgbe_setup_sfp_modules_82599(struct ixgbe_hw *hw); ++void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw); ++s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw); ++s32 ixgbe_read_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 *val); ++s32 ixgbe_write_analog_reg8_82599(struct ixgbe_hw *hw, u32 reg, u8 val); ++s32 ixgbe_start_hw_82599(struct ixgbe_hw *hw); ++s32 ixgbe_identify_phy_82599(struct ixgbe_hw *hw); ++s32 ixgbe_init_phy_ops_82599(struct ixgbe_hw *hw); ++u64 ixgbe_get_supported_physical_layer_82599(struct ixgbe_hw *hw); ++s32 ixgbe_enable_rx_dma_82599(struct ixgbe_hw *hw, u32 regval); ++s32 prot_autoc_read_82599(struct ixgbe_hw *hw, bool *locked, u32 *reg_val); ++s32 prot_autoc_write_82599(struct ixgbe_hw *hw, u32 reg_val, bool locked); ++#endif /* _IXGBE_82599_H_ */ +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_api.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_api.c +new file mode 100644 +index 0000000..3251a71 +--- /dev/null ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_api.c +@@ -0,0 +1,1624 @@ ++/******************************************************************************* ++ ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#include "ixgbe_api.h" ++#include "ixgbe_common.h" ++ ++#define IXGBE_EMPTY_PARAM ++ ++static const u32 ixgbe_mvals_base[IXGBE_MVALS_IDX_LIMIT] = { ++ IXGBE_MVALS_INIT(IXGBE_EMPTY_PARAM) ++}; ++ ++static const u32 ixgbe_mvals_X540[IXGBE_MVALS_IDX_LIMIT] = { ++ IXGBE_MVALS_INIT(_X540) ++}; ++ ++static const u32 ixgbe_mvals_X550[IXGBE_MVALS_IDX_LIMIT] = { ++ IXGBE_MVALS_INIT(_X550) ++}; ++ ++static const u32 ixgbe_mvals_X550EM_x[IXGBE_MVALS_IDX_LIMIT] = { ++ IXGBE_MVALS_INIT(_X550EM_x) ++}; ++ ++static const u32 ixgbe_mvals_X550EM_a[IXGBE_MVALS_IDX_LIMIT] = { ++ IXGBE_MVALS_INIT(_X550EM_a) ++}; ++ ++/** ++ * ixgbe_dcb_get_rtrup2tc - read rtrup2tc reg ++ * @hw: pointer to hardware structure ++ * @map: pointer to u8 arr for returning map ++ * ++ * Read the rtrup2tc HW register and resolve its content into map ++ **/ ++void ixgbe_dcb_get_rtrup2tc(struct ixgbe_hw *hw, u8 *map) ++{ ++ if (hw->mac.ops.get_rtrup2tc) ++ hw->mac.ops.get_rtrup2tc(hw, map); ++} ++ ++/** ++ * ixgbe_init_shared_code - Initialize the shared code ++ * @hw: pointer to hardware structure ++ * ++ * This will assign function pointers and assign the MAC type and PHY code. ++ * Does not touch the hardware. This function must be called prior to any ++ * other function in the shared code. The ixgbe_hw structure should be ++ * memset to 0 prior to calling this function. The following fields in ++ * hw structure should be filled in prior to calling this function: ++ * hw_addr, back, device_id, vendor_id, subsystem_device_id, ++ * subsystem_vendor_id, and revision_id ++ **/ ++s32 ixgbe_init_shared_code(struct ixgbe_hw *hw) ++{ ++ s32 status; ++ ++ DEBUGFUNC("ixgbe_init_shared_code"); ++ ++ /* ++ * Set the mac type ++ */ ++ ixgbe_set_mac_type(hw); ++ ++ switch (hw->mac.type) { ++ case ixgbe_mac_82598EB: ++ status = ixgbe_init_ops_82598(hw); ++ break; ++ case ixgbe_mac_82599EB: ++ status = ixgbe_init_ops_82599(hw); ++ break; ++ case ixgbe_mac_X540: ++ status = ixgbe_init_ops_X540(hw); ++ break; ++ case ixgbe_mac_X550: ++ status = ixgbe_init_ops_X550(hw); ++ break; ++ case ixgbe_mac_X550EM_x: ++ status = ixgbe_init_ops_X550EM_x(hw); ++ break; ++ case ixgbe_mac_X550EM_a: ++ status = ixgbe_init_ops_X550EM_a(hw); ++ break; ++ default: ++ status = IXGBE_ERR_DEVICE_NOT_SUPPORTED; ++ break; ++ } ++ hw->mac.max_link_up_time = IXGBE_LINK_UP_TIME; ++ ++ return status; ++} ++ ++/** ++ * ixgbe_set_mac_type - Sets MAC type ++ * @hw: pointer to the HW structure ++ * ++ * This function sets the mac type of the adapter based on the ++ * vendor ID and device ID stored in the hw structure. ++ **/ ++s32 ixgbe_set_mac_type(struct ixgbe_hw *hw) ++{ ++ s32 ret_val = IXGBE_SUCCESS; ++ ++ DEBUGFUNC("ixgbe_set_mac_type\n"); ++ ++ if (hw->vendor_id != IXGBE_INTEL_VENDOR_ID) { ++ ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED, ++ "Unsupported vendor id: %x", hw->vendor_id); ++ return IXGBE_ERR_DEVICE_NOT_SUPPORTED; ++ } ++ ++ hw->mvals = ixgbe_mvals_base; ++ ++ switch (hw->device_id) { ++ case IXGBE_DEV_ID_82598: ++ case IXGBE_DEV_ID_82598_BX: ++ case IXGBE_DEV_ID_82598AF_SINGLE_PORT: ++ case IXGBE_DEV_ID_82598AF_DUAL_PORT: ++ case IXGBE_DEV_ID_82598AT: ++ case IXGBE_DEV_ID_82598AT2: ++ case IXGBE_DEV_ID_82598EB_CX4: ++ case IXGBE_DEV_ID_82598_CX4_DUAL_PORT: ++ case IXGBE_DEV_ID_82598_DA_DUAL_PORT: ++ case IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM: ++ case IXGBE_DEV_ID_82598EB_XF_LR: ++ case IXGBE_DEV_ID_82598EB_SFP_LOM: ++ hw->mac.type = ixgbe_mac_82598EB; ++ break; ++ case IXGBE_DEV_ID_82599_KX4: ++ case IXGBE_DEV_ID_82599_KX4_MEZZ: ++ case IXGBE_DEV_ID_82599_XAUI_LOM: ++ case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: ++ case IXGBE_DEV_ID_82599_KR: ++ case IXGBE_DEV_ID_82599_SFP: ++ case IXGBE_DEV_ID_82599_BACKPLANE_FCOE: ++ case IXGBE_DEV_ID_82599_SFP_FCOE: ++ case IXGBE_DEV_ID_82599_SFP_EM: ++ case IXGBE_DEV_ID_82599_SFP_SF2: ++ case IXGBE_DEV_ID_82599_SFP_SF_QP: ++ case IXGBE_DEV_ID_82599_QSFP_SF_QP: ++ case IXGBE_DEV_ID_82599EN_SFP: ++ case IXGBE_DEV_ID_82599_CX4: ++ case IXGBE_DEV_ID_82599_LS: ++ case IXGBE_DEV_ID_82599_T3_LOM: ++ hw->mac.type = ixgbe_mac_82599EB; ++ break; ++ case IXGBE_DEV_ID_X540T: ++ case IXGBE_DEV_ID_X540T1: ++ hw->mac.type = ixgbe_mac_X540; ++ hw->mvals = ixgbe_mvals_X540; ++ break; ++ case IXGBE_DEV_ID_X550T: ++ case IXGBE_DEV_ID_X550T1: ++ hw->mac.type = ixgbe_mac_X550; ++ hw->mvals = ixgbe_mvals_X550; ++ break; ++ case IXGBE_DEV_ID_X550EM_X_KX4: ++ case IXGBE_DEV_ID_X550EM_X_KR: ++ case IXGBE_DEV_ID_X550EM_X_10G_T: ++ case IXGBE_DEV_ID_X550EM_X_1G_T: ++ case IXGBE_DEV_ID_X550EM_X_SFP: ++ case IXGBE_DEV_ID_X550EM_X_XFI: ++ hw->mac.type = ixgbe_mac_X550EM_x; ++ hw->mvals = ixgbe_mvals_X550EM_x; ++ break; ++ case IXGBE_DEV_ID_X550EM_A_KR: ++ case IXGBE_DEV_ID_X550EM_A_KR_L: ++ case IXGBE_DEV_ID_X550EM_A_SFP_N: ++ case IXGBE_DEV_ID_X550EM_A_SGMII: ++ case IXGBE_DEV_ID_X550EM_A_SGMII_L: ++ case IXGBE_DEV_ID_X550EM_A_1G_T: ++ case IXGBE_DEV_ID_X550EM_A_1G_T_L: ++ case IXGBE_DEV_ID_X550EM_A_10G_T: ++ case IXGBE_DEV_ID_X550EM_A_QSFP: ++ case IXGBE_DEV_ID_X550EM_A_QSFP_N: ++ case IXGBE_DEV_ID_X550EM_A_SFP: ++ hw->mac.type = ixgbe_mac_X550EM_a; ++ hw->mvals = ixgbe_mvals_X550EM_a; ++ break; ++ default: ++ ret_val = IXGBE_ERR_DEVICE_NOT_SUPPORTED; ++ ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED, ++ "Unsupported device id: %x", ++ hw->device_id); ++ break; ++ } ++ ++ DEBUGOUT2("ixgbe_set_mac_type found mac: %d, returns: %d\n", ++ hw->mac.type, ret_val); ++ return ret_val; ++} ++ ++/** ++ * ixgbe_init_hw - Initialize the hardware ++ * @hw: pointer to hardware structure ++ * ++ * Initialize the hardware by resetting and then starting the hardware ++ **/ ++s32 ixgbe_init_hw(struct ixgbe_hw *hw) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.init_hw, (hw), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_reset_hw - Performs a hardware reset ++ * @hw: pointer to hardware structure ++ * ++ * Resets the hardware by resetting the transmit and receive units, masks and ++ * clears all interrupts, performs a PHY reset, and performs a MAC reset ++ **/ ++s32 ixgbe_reset_hw(struct ixgbe_hw *hw) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.reset_hw, (hw), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_start_hw - Prepares hardware for Rx/Tx ++ * @hw: pointer to hardware structure ++ * ++ * Starts the hardware by filling the bus info structure and media type, ++ * clears all on chip counters, initializes receive address registers, ++ * multicast table, VLAN filter table, calls routine to setup link and ++ * flow control settings, and leaves transmit and receive units disabled ++ * and uninitialized. ++ **/ ++s32 ixgbe_start_hw(struct ixgbe_hw *hw) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.start_hw, (hw), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_clear_hw_cntrs - Clear hardware counters ++ * @hw: pointer to hardware structure ++ * ++ * Clears all hardware statistics counters by reading them from the hardware ++ * Statistics counters are clear on read. ++ **/ ++s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.clear_hw_cntrs, (hw), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_get_media_type - Get media type ++ * @hw: pointer to hardware structure ++ * ++ * Returns the media type (fiber, copper, backplane) ++ **/ ++enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.get_media_type, (hw), ++ ixgbe_media_type_unknown); ++} ++ ++/** ++ * ixgbe_get_mac_addr - Get MAC address ++ * @hw: pointer to hardware structure ++ * @mac_addr: Adapter MAC address ++ * ++ * Reads the adapter's MAC address from the first Receive Address Register ++ * (RAR0) A reset of the adapter must have been performed prior to calling ++ * this function in order for the MAC address to have been loaded from the ++ * EEPROM into RAR0 ++ **/ ++s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.get_mac_addr, ++ (hw, mac_addr), IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_get_san_mac_addr - Get SAN MAC address ++ * @hw: pointer to hardware structure ++ * @san_mac_addr: SAN MAC address ++ * ++ * Reads the SAN MAC address from the EEPROM, if it's available. This is ++ * per-port, so set_lan_id() must be called before reading the addresses. ++ **/ ++s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.get_san_mac_addr, ++ (hw, san_mac_addr), IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_set_san_mac_addr - Write a SAN MAC address ++ * @hw: pointer to hardware structure ++ * @san_mac_addr: SAN MAC address ++ * ++ * Writes A SAN MAC address to the EEPROM. ++ **/ ++s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.set_san_mac_addr, ++ (hw, san_mac_addr), IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_get_device_caps - Get additional device capabilities ++ * @hw: pointer to hardware structure ++ * @device_caps: the EEPROM word for device capabilities ++ * ++ * Reads the extra device capabilities from the EEPROM ++ **/ ++s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.get_device_caps, ++ (hw, device_caps), IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_get_wwn_prefix - Get alternative WWNN/WWPN prefix from the EEPROM ++ * @hw: pointer to hardware structure ++ * @wwnn_prefix: the alternative WWNN prefix ++ * @wwpn_prefix: the alternative WWPN prefix ++ * ++ * This function will read the EEPROM from the alternative SAN MAC address ++ * block to check the support for the alternative WWNN/WWPN prefix support. ++ **/ ++s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix, ++ u16 *wwpn_prefix) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.get_wwn_prefix, ++ (hw, wwnn_prefix, wwpn_prefix), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_get_fcoe_boot_status - Get FCOE boot status from EEPROM ++ * @hw: pointer to hardware structure ++ * @bs: the fcoe boot status ++ * ++ * This function will read the FCOE boot status from the iSCSI FCOE block ++ **/ ++s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.get_fcoe_boot_status, ++ (hw, bs), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_get_bus_info - Set PCI bus info ++ * @hw: pointer to hardware structure ++ * ++ * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure ++ **/ ++s32 ixgbe_get_bus_info(struct ixgbe_hw *hw) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.get_bus_info, (hw), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_get_num_of_tx_queues - Get Tx queues ++ * @hw: pointer to hardware structure ++ * ++ * Returns the number of transmit queues for the given adapter. ++ **/ ++u32 ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw) ++{ ++ return hw->mac.max_tx_queues; ++} ++ ++/** ++ * ixgbe_get_num_of_rx_queues - Get Rx queues ++ * @hw: pointer to hardware structure ++ * ++ * Returns the number of receive queues for the given adapter. ++ **/ ++u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw) ++{ ++ return hw->mac.max_rx_queues; ++} ++ ++/** ++ * ixgbe_stop_adapter - Disable Rx/Tx units ++ * @hw: pointer to hardware structure ++ * ++ * Sets the adapter_stopped flag within ixgbe_hw struct. Clears interrupts, ++ * disables transmit and receive units. The adapter_stopped flag is used by ++ * the shared code and drivers to determine if the adapter is in a stopped ++ * state and should not touch the hardware. ++ **/ ++s32 ixgbe_stop_adapter(struct ixgbe_hw *hw) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.stop_adapter, (hw), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_read_pba_string - Reads part number string from EEPROM ++ * @hw: pointer to hardware structure ++ * @pba_num: stores the part number string from the EEPROM ++ * @pba_num_size: part number string buffer length ++ * ++ * Reads the part number string from the EEPROM. ++ **/ ++s32 ixgbe_read_pba_string(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size) ++{ ++ return ixgbe_read_pba_string_generic(hw, pba_num, pba_num_size); ++} ++ ++/** ++ * ixgbe_identify_phy - Get PHY type ++ * @hw: pointer to hardware structure ++ * ++ * Determines the physical layer module found on the current adapter. ++ **/ ++s32 ixgbe_identify_phy(struct ixgbe_hw *hw) ++{ ++ s32 status = IXGBE_SUCCESS; ++ ++ if (hw->phy.type == ixgbe_phy_unknown) { ++ status = ixgbe_call_func(hw, hw->phy.ops.identify, (hw), ++ IXGBE_NOT_IMPLEMENTED); ++ } ++ ++ return status; ++} ++ ++/** ++ * ixgbe_reset_phy - Perform a PHY reset ++ * @hw: pointer to hardware structure ++ **/ ++s32 ixgbe_reset_phy(struct ixgbe_hw *hw) ++{ ++ s32 status = IXGBE_SUCCESS; ++ ++ if (hw->phy.type == ixgbe_phy_unknown) { ++ if (ixgbe_identify_phy(hw) != IXGBE_SUCCESS) ++ status = IXGBE_ERR_PHY; ++ } ++ ++ if (status == IXGBE_SUCCESS) { ++ status = ixgbe_call_func(hw, hw->phy.ops.reset, (hw), ++ IXGBE_NOT_IMPLEMENTED); ++ } ++ return status; ++} ++ ++/** ++ * ixgbe_get_phy_firmware_version - ++ * @hw: pointer to hardware structure ++ * @firmware_version: pointer to firmware version ++ **/ ++s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw, u16 *firmware_version) ++{ ++ s32 status = IXGBE_SUCCESS; ++ ++ status = ixgbe_call_func(hw, hw->phy.ops.get_firmware_version, ++ (hw, firmware_version), ++ IXGBE_NOT_IMPLEMENTED); ++ return status; ++} ++ ++/** ++ * ixgbe_read_phy_reg - Read PHY register ++ * @hw: pointer to hardware structure ++ * @reg_addr: 32 bit address of PHY register to read ++ * @phy_data: Pointer to read data from PHY register ++ * ++ * Reads a value from a specified PHY register ++ **/ ++s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, ++ u16 *phy_data) ++{ ++ if (hw->phy.id == 0) ++ ixgbe_identify_phy(hw); ++ ++ return ixgbe_call_func(hw, hw->phy.ops.read_reg, (hw, reg_addr, ++ device_type, phy_data), IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_write_phy_reg - Write PHY register ++ * @hw: pointer to hardware structure ++ * @reg_addr: 32 bit PHY register to write ++ * @phy_data: Data to write to the PHY register ++ * ++ * Writes a value to specified PHY register ++ **/ ++s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, ++ u16 phy_data) ++{ ++ if (hw->phy.id == 0) ++ ixgbe_identify_phy(hw); ++ ++ return ixgbe_call_func(hw, hw->phy.ops.write_reg, (hw, reg_addr, ++ device_type, phy_data), IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_setup_phy_link - Restart PHY autoneg ++ * @hw: pointer to hardware structure ++ * ++ * Restart autonegotiation and PHY and waits for completion. ++ **/ ++s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw) ++{ ++ return ixgbe_call_func(hw, hw->phy.ops.setup_link, (hw), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_setup_internal_phy - Configure integrated PHY ++ * @hw: pointer to hardware structure ++ * ++ * Reconfigure the integrated PHY in order to enable talk to the external PHY. ++ * Returns success if not implemented, since nothing needs to be done in this ++ * case. ++ */ ++s32 ixgbe_setup_internal_phy(struct ixgbe_hw *hw) ++{ ++ return ixgbe_call_func(hw, hw->phy.ops.setup_internal_link, (hw), ++ IXGBE_SUCCESS); ++} ++ ++/** ++ * ixgbe_check_phy_link - Determine link and speed status ++ * @hw: pointer to hardware structure ++ * ++ * Reads a PHY register to determine if link is up and the current speed for ++ * the PHY. ++ **/ ++s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, ++ bool *link_up) ++{ ++ return ixgbe_call_func(hw, hw->phy.ops.check_link, (hw, speed, ++ link_up), IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_setup_phy_link_speed - Set auto advertise ++ * @hw: pointer to hardware structure ++ * @speed: new link speed ++ * ++ * Sets the auto advertised capabilities ++ **/ ++s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed, ++ bool autoneg_wait_to_complete) ++{ ++ return ixgbe_call_func(hw, hw->phy.ops.setup_link_speed, (hw, speed, ++ autoneg_wait_to_complete), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_set_phy_power - Control the phy power state ++ * @hw: pointer to hardware structure ++ * @on: true for on, false for off ++ */ ++s32 ixgbe_set_phy_power(struct ixgbe_hw *hw, bool on) ++{ ++ return ixgbe_call_func(hw, hw->phy.ops.set_phy_power, (hw, on), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_check_link - Get link and speed status ++ * @hw: pointer to hardware structure ++ * ++ * Reads the links register to determine if link is up and the current speed ++ **/ ++s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, ++ bool *link_up, bool link_up_wait_to_complete) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.check_link, (hw, speed, ++ link_up, link_up_wait_to_complete), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_disable_tx_laser - Disable Tx laser ++ * @hw: pointer to hardware structure ++ * ++ * If the driver needs to disable the laser on SFI optics. ++ **/ ++void ixgbe_disable_tx_laser(struct ixgbe_hw *hw) ++{ ++ if (hw->mac.ops.disable_tx_laser) ++ hw->mac.ops.disable_tx_laser(hw); ++} ++ ++/** ++ * ixgbe_enable_tx_laser - Enable Tx laser ++ * @hw: pointer to hardware structure ++ * ++ * If the driver needs to enable the laser on SFI optics. ++ **/ ++void ixgbe_enable_tx_laser(struct ixgbe_hw *hw) ++{ ++ if (hw->mac.ops.enable_tx_laser) ++ hw->mac.ops.enable_tx_laser(hw); ++} ++ ++/** ++ * ixgbe_flap_tx_laser - flap Tx laser to start autotry process ++ * @hw: pointer to hardware structure ++ * ++ * When the driver changes the link speeds that it can support then ++ * flap the tx laser to alert the link partner to start autotry ++ * process on its end. ++ **/ ++void ixgbe_flap_tx_laser(struct ixgbe_hw *hw) ++{ ++ if (hw->mac.ops.flap_tx_laser) ++ hw->mac.ops.flap_tx_laser(hw); ++} ++ ++/** ++ * ixgbe_setup_link - Set link speed ++ * @hw: pointer to hardware structure ++ * @speed: new link speed ++ * ++ * Configures link settings. Restarts the link. ++ * Performs autonegotiation if needed. ++ **/ ++s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed, ++ bool autoneg_wait_to_complete) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.setup_link, (hw, speed, ++ autoneg_wait_to_complete), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_setup_mac_link - Set link speed ++ * @hw: pointer to hardware structure ++ * @speed: new link speed ++ * ++ * Configures link settings. Restarts the link. ++ * Performs autonegotiation if needed. ++ **/ ++s32 ixgbe_setup_mac_link(struct ixgbe_hw *hw, ixgbe_link_speed speed, ++ bool autoneg_wait_to_complete) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.setup_mac_link, (hw, speed, ++ autoneg_wait_to_complete), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_get_link_capabilities - Returns link capabilities ++ * @hw: pointer to hardware structure ++ * ++ * Determines the link capabilities of the current configuration. ++ **/ ++s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed, ++ bool *autoneg) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.get_link_capabilities, (hw, ++ speed, autoneg), IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_led_on - Turn on LEDs ++ * @hw: pointer to hardware structure ++ * @index: led number to turn on ++ * ++ * Turns on the software controllable LEDs. ++ **/ ++s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.led_on, (hw, index), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_led_off - Turn off LEDs ++ * @hw: pointer to hardware structure ++ * @index: led number to turn off ++ * ++ * Turns off the software controllable LEDs. ++ **/ ++s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.led_off, (hw, index), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_blink_led_start - Blink LEDs ++ * @hw: pointer to hardware structure ++ * @index: led number to blink ++ * ++ * Blink LED based on index. ++ **/ ++s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.blink_led_start, (hw, index), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_blink_led_stop - Stop blinking LEDs ++ * @hw: pointer to hardware structure ++ * ++ * Stop blinking LED based on index. ++ **/ ++s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.blink_led_stop, (hw, index), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_init_eeprom_params - Initialize EEPROM parameters ++ * @hw: pointer to hardware structure ++ * ++ * Initializes the EEPROM parameters ixgbe_eeprom_info within the ++ * ixgbe_hw struct in order to set up EEPROM access. ++ **/ ++s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw) ++{ ++ return ixgbe_call_func(hw, hw->eeprom.ops.init_params, (hw), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++ ++/** ++ * ixgbe_write_eeprom - Write word to EEPROM ++ * @hw: pointer to hardware structure ++ * @offset: offset within the EEPROM to be written to ++ * @data: 16 bit word to be written to the EEPROM ++ * ++ * Writes 16 bit value to EEPROM. If ixgbe_eeprom_update_checksum is not ++ * called after this function, the EEPROM will most likely contain an ++ * invalid checksum. ++ **/ ++s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data) ++{ ++ return ixgbe_call_func(hw, hw->eeprom.ops.write, (hw, offset, data), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_write_eeprom_buffer - Write word(s) to EEPROM ++ * @hw: pointer to hardware structure ++ * @offset: offset within the EEPROM to be written to ++ * @data: 16 bit word(s) to be written to the EEPROM ++ * @words: number of words ++ * ++ * Writes 16 bit word(s) to EEPROM. If ixgbe_eeprom_update_checksum is not ++ * called after this function, the EEPROM will most likely contain an ++ * invalid checksum. ++ **/ ++s32 ixgbe_write_eeprom_buffer(struct ixgbe_hw *hw, u16 offset, u16 words, ++ u16 *data) ++{ ++ return ixgbe_call_func(hw, hw->eeprom.ops.write_buffer, ++ (hw, offset, words, data), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_read_eeprom - Read word from EEPROM ++ * @hw: pointer to hardware structure ++ * @offset: offset within the EEPROM to be read ++ * @data: read 16 bit value from EEPROM ++ * ++ * Reads 16 bit value from EEPROM ++ **/ ++s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data) ++{ ++ return ixgbe_call_func(hw, hw->eeprom.ops.read, (hw, offset, data), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_read_eeprom_buffer - Read word(s) from EEPROM ++ * @hw: pointer to hardware structure ++ * @offset: offset within the EEPROM to be read ++ * @data: read 16 bit word(s) from EEPROM ++ * @words: number of words ++ * ++ * Reads 16 bit word(s) from EEPROM ++ **/ ++s32 ixgbe_read_eeprom_buffer(struct ixgbe_hw *hw, u16 offset, ++ u16 words, u16 *data) ++{ ++ return ixgbe_call_func(hw, hw->eeprom.ops.read_buffer, ++ (hw, offset, words, data), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_validate_eeprom_checksum - Validate EEPROM checksum ++ * @hw: pointer to hardware structure ++ * @checksum_val: calculated checksum ++ * ++ * Performs checksum calculation and validates the EEPROM checksum ++ **/ ++s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val) ++{ ++ return ixgbe_call_func(hw, hw->eeprom.ops.validate_checksum, ++ (hw, checksum_val), IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_eeprom_update_checksum - Updates the EEPROM checksum ++ * @hw: pointer to hardware structure ++ **/ ++s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw) ++{ ++ return ixgbe_call_func(hw, hw->eeprom.ops.update_checksum, (hw), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_insert_mac_addr - Find a RAR for this mac address ++ * @hw: pointer to hardware structure ++ * @addr: Address to put into receive address register ++ * @vmdq: VMDq pool to assign ++ * ++ * Puts an ethernet address into a receive address register, or ++ * finds the rar that it is aleady in; adds to the pool list ++ **/ ++s32 ixgbe_insert_mac_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.insert_mac_addr, ++ (hw, addr, vmdq), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_set_rar - Set Rx address register ++ * @hw: pointer to hardware structure ++ * @index: Receive address register to write ++ * @addr: Address to put into receive address register ++ * @vmdq: VMDq "set" ++ * @enable_addr: set flag that address is active ++ * ++ * Puts an ethernet address into a receive address register. ++ **/ ++s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, ++ u32 enable_addr) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.set_rar, (hw, index, addr, vmdq, ++ enable_addr), IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_clear_rar - Clear Rx address register ++ * @hw: pointer to hardware structure ++ * @index: Receive address register to write ++ * ++ * Puts an ethernet address into a receive address register. ++ **/ ++s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.clear_rar, (hw, index), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_set_vmdq - Associate a VMDq index with a receive address ++ * @hw: pointer to hardware structure ++ * @rar: receive address register index to associate with VMDq index ++ * @vmdq: VMDq set or pool index ++ **/ ++s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.set_vmdq, (hw, rar, vmdq), ++ IXGBE_NOT_IMPLEMENTED); ++ ++} ++ ++/** ++ * ixgbe_set_vmdq_san_mac - Associate VMDq index 127 with a receive address ++ * @hw: pointer to hardware structure ++ * @vmdq: VMDq default pool index ++ **/ ++s32 ixgbe_set_vmdq_san_mac(struct ixgbe_hw *hw, u32 vmdq) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.set_vmdq_san_mac, ++ (hw, vmdq), IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_clear_vmdq - Disassociate a VMDq index from a receive address ++ * @hw: pointer to hardware structure ++ * @rar: receive address register index to disassociate with VMDq index ++ * @vmdq: VMDq set or pool index ++ **/ ++s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.clear_vmdq, (hw, rar, vmdq), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_init_rx_addrs - Initializes receive address filters. ++ * @hw: pointer to hardware structure ++ * ++ * Places the MAC address in receive address register 0 and clears the rest ++ * of the receive address registers. Clears the multicast table. Assumes ++ * the receiver is in reset when the routine is called. ++ **/ ++s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.init_rx_addrs, (hw), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_get_num_rx_addrs - Returns the number of RAR entries. ++ * @hw: pointer to hardware structure ++ **/ ++u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw) ++{ ++ return hw->mac.num_rar_entries; ++} ++ ++/** ++ * ixgbe_update_uc_addr_list - Updates the MAC's list of secondary addresses ++ * @hw: pointer to hardware structure ++ * @addr_list: the list of new multicast addresses ++ * @addr_count: number of addresses ++ * @func: iterator function to walk the multicast address list ++ * ++ * The given list replaces any existing list. Clears the secondary addrs from ++ * receive address registers. Uses unused receive address registers for the ++ * first secondary addresses, and falls back to promiscuous mode as needed. ++ **/ ++s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list, ++ u32 addr_count, ixgbe_mc_addr_itr func) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.update_uc_addr_list, (hw, ++ addr_list, addr_count, func), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_update_mc_addr_list - Updates the MAC's list of multicast addresses ++ * @hw: pointer to hardware structure ++ * @mc_addr_list: the list of new multicast addresses ++ * @mc_addr_count: number of addresses ++ * @func: iterator function to walk the multicast address list ++ * ++ * The given list replaces any existing list. Clears the MC addrs from receive ++ * address registers and the multicast table. Uses unused receive address ++ * registers for the first multicast addresses, and hashes the rest into the ++ * multicast table. ++ **/ ++s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list, ++ u32 mc_addr_count, ixgbe_mc_addr_itr func, ++ bool clear) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.update_mc_addr_list, (hw, ++ mc_addr_list, mc_addr_count, func, clear), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_enable_mc - Enable multicast address in RAR ++ * @hw: pointer to hardware structure ++ * ++ * Enables multicast address in RAR and the use of the multicast hash table. ++ **/ ++s32 ixgbe_enable_mc(struct ixgbe_hw *hw) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.enable_mc, (hw), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_disable_mc - Disable multicast address in RAR ++ * @hw: pointer to hardware structure ++ * ++ * Disables multicast address in RAR and the use of the multicast hash table. ++ **/ ++s32 ixgbe_disable_mc(struct ixgbe_hw *hw) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.disable_mc, (hw), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_clear_vfta - Clear VLAN filter table ++ * @hw: pointer to hardware structure ++ * ++ * Clears the VLAN filer table, and the VMDq index associated with the filter ++ **/ ++s32 ixgbe_clear_vfta(struct ixgbe_hw *hw) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.clear_vfta, (hw), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_set_vfta - Set VLAN filter table ++ * @hw: pointer to hardware structure ++ * @vlan: VLAN id to write to VLAN filter ++ * @vind: VMDq output index that maps queue to VLAN id in VLVFB ++ * @vlan_on: boolean flag to turn on/off VLAN ++ * @vlvf_bypass: boolean flag indicating updating the default pool is okay ++ * ++ * Turn on/off specified VLAN in the VLAN filter table. ++ **/ ++s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on, ++ bool vlvf_bypass) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.set_vfta, (hw, vlan, vind, ++ vlan_on, vlvf_bypass), IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_set_vlvf - Set VLAN Pool Filter ++ * @hw: pointer to hardware structure ++ * @vlan: VLAN id to write to VLAN filter ++ * @vind: VMDq output index that maps queue to VLAN id in VLVFB ++ * @vlan_on: boolean flag to turn on/off VLAN in VLVF ++ * @vfta_delta: pointer to the difference between the current value of VFTA ++ * and the desired value ++ * @vfta: the desired value of the VFTA ++ * @vlvf_bypass: boolean flag indicating updating the default pool is okay ++ * ++ * Turn on/off specified bit in VLVF table. ++ **/ ++s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind, bool vlan_on, ++ u32 *vfta_delta, u32 vfta, bool vlvf_bypass) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.set_vlvf, (hw, vlan, vind, ++ vlan_on, vfta_delta, vfta, vlvf_bypass), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_fc_enable - Enable flow control ++ * @hw: pointer to hardware structure ++ * ++ * Configures the flow control settings based on SW configuration. ++ **/ ++s32 ixgbe_fc_enable(struct ixgbe_hw *hw) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.fc_enable, (hw), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_setup_fc - Set up flow control ++ * @hw: pointer to hardware structure ++ * ++ * Called at init time to set up flow control. ++ **/ ++s32 ixgbe_setup_fc(struct ixgbe_hw *hw) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.setup_fc, (hw), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_set_fw_drv_ver - Try to send the driver version number FW ++ * @hw: pointer to hardware structure ++ * @maj: driver major number to be sent to firmware ++ * @min: driver minor number to be sent to firmware ++ * @build: driver build number to be sent to firmware ++ * @ver: driver version number to be sent to firmware ++ * @len: length of driver_ver string ++ * @driver_ver: driver string ++ **/ ++s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build, ++ u8 ver, u16 len, char *driver_ver) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.set_fw_drv_ver, (hw, maj, min, ++ build, ver, len, driver_ver), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data ++ * @hw: pointer to hardware structure ++ * ++ * Updates the temperatures in mac.thermal_sensor_data ++ **/ ++s32 ixgbe_get_thermal_sensor_data(struct ixgbe_hw *hw) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.get_thermal_sensor_data, (hw), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_init_thermal_sensor_thresh - Inits thermal sensor thresholds ++ * @hw: pointer to hardware structure ++ * ++ * Inits the thermal sensor thresholds according to the NVM map ++ **/ ++s32 ixgbe_init_thermal_sensor_thresh(struct ixgbe_hw *hw) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.init_thermal_sensor_thresh, (hw), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_dmac_config - Configure DMA Coalescing registers. ++ * @hw: pointer to hardware structure ++ * ++ * Configure DMA coalescing. If enabling dmac, dmac is activated. ++ * When disabling dmac, dmac enable dmac bit is cleared. ++ **/ ++s32 ixgbe_dmac_config(struct ixgbe_hw *hw) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.dmac_config, (hw), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_dmac_update_tcs - Configure DMA Coalescing registers. ++ * @hw: pointer to hardware structure ++ * ++ * Disables dmac, updates per TC settings, and then enable dmac. ++ **/ ++s32 ixgbe_dmac_update_tcs(struct ixgbe_hw *hw) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.dmac_update_tcs, (hw), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_dmac_config_tcs - Configure DMA Coalescing registers. ++ * @hw: pointer to hardware structure ++ * ++ * Configure DMA coalescing threshold per TC and set high priority bit for ++ * FCOE TC. The dmac enable bit must be cleared before configuring. ++ **/ ++s32 ixgbe_dmac_config_tcs(struct ixgbe_hw *hw) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.dmac_config_tcs, (hw), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_setup_eee - Enable/disable EEE support ++ * @hw: pointer to the HW structure ++ * @enable_eee: boolean flag to enable EEE ++ * ++ * Enable/disable EEE based on enable_ee flag. ++ * Auto-negotiation must be started after BASE-T EEE bits in PHY register 7.3C ++ * are modified. ++ * ++ **/ ++s32 ixgbe_setup_eee(struct ixgbe_hw *hw, bool enable_eee) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.setup_eee, (hw, enable_eee), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_set_source_address_pruning - Enable/Disable source address pruning ++ * @hw: pointer to hardware structure ++ * @enbale: enable or disable source address pruning ++ * @pool: Rx pool - Rx pool to toggle source address pruning ++ **/ ++void ixgbe_set_source_address_pruning(struct ixgbe_hw *hw, bool enable, ++ unsigned int pool) ++{ ++ if (hw->mac.ops.set_source_address_pruning) ++ hw->mac.ops.set_source_address_pruning(hw, enable, pool); ++} ++ ++/** ++ * ixgbe_set_ethertype_anti_spoofing - Enable/Disable Ethertype anti-spoofing ++ * @hw: pointer to hardware structure ++ * @enable: enable or disable switch for Ethertype anti-spoofing ++ * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing ++ * ++ **/ ++void ixgbe_set_ethertype_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) ++{ ++ if (hw->mac.ops.set_ethertype_anti_spoofing) ++ hw->mac.ops.set_ethertype_anti_spoofing(hw, enable, vf); ++} ++ ++/** ++ * ixgbe_read_iosf_sb_reg - Read 32 bit PHY register ++ * @hw: pointer to hardware structure ++ * @reg_addr: 32 bit address of PHY register to read ++ * @device_type: type of device you want to communicate with ++ * @phy_data: Pointer to read data from PHY register ++ * ++ * Reads a value from a specified PHY register ++ **/ ++s32 ixgbe_read_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr, ++ u32 device_type, u32 *phy_data) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.read_iosf_sb_reg, (hw, reg_addr, ++ device_type, phy_data), IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_write_iosf_sb_reg - Write 32 bit register through IOSF Sideband ++ * @hw: pointer to hardware structure ++ * @reg_addr: 32 bit PHY register to write ++ * @device_type: type of device you want to communicate with ++ * @phy_data: Data to write to the PHY register ++ * ++ * Writes a value to specified PHY register ++ **/ ++s32 ixgbe_write_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr, ++ u32 device_type, u32 phy_data) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.write_iosf_sb_reg, (hw, reg_addr, ++ device_type, phy_data), IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_disable_mdd - Disable malicious driver detection ++ * @hw: pointer to hardware structure ++ * ++ **/ ++void ixgbe_disable_mdd(struct ixgbe_hw *hw) ++{ ++ if (hw->mac.ops.disable_mdd) ++ hw->mac.ops.disable_mdd(hw); ++} ++ ++/** ++ * ixgbe_enable_mdd - Enable malicious driver detection ++ * @hw: pointer to hardware structure ++ * ++ **/ ++void ixgbe_enable_mdd(struct ixgbe_hw *hw) ++{ ++ if (hw->mac.ops.enable_mdd) ++ hw->mac.ops.enable_mdd(hw); ++} ++ ++/** ++ * ixgbe_mdd_event - Handle malicious driver detection event ++ * @hw: pointer to hardware structure ++ * @vf_bitmap: vf bitmap of malicious vfs ++ * ++ **/ ++void ixgbe_mdd_event(struct ixgbe_hw *hw, u32 *vf_bitmap) ++{ ++ if (hw->mac.ops.mdd_event) ++ hw->mac.ops.mdd_event(hw, vf_bitmap); ++} ++ ++/** ++ * ixgbe_restore_mdd_vf - Restore VF that was disabled during malicious driver ++ * detection event ++ * @hw: pointer to hardware structure ++ * @vf: vf index ++ * ++ **/ ++void ixgbe_restore_mdd_vf(struct ixgbe_hw *hw, u32 vf) ++{ ++ if (hw->mac.ops.restore_mdd_vf) ++ hw->mac.ops.restore_mdd_vf(hw, vf); ++} ++ ++/** ++ * ixgbe_enter_lplu - Transition to low power states ++ * @hw: pointer to hardware structure ++ * ++ * Configures Low Power Link Up on transition to low power states ++ * (from D0 to non-D0). ++ **/ ++s32 ixgbe_enter_lplu(struct ixgbe_hw *hw) ++{ ++ return ixgbe_call_func(hw, hw->phy.ops.enter_lplu, (hw), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_handle_lasi - Handle external Base T PHY interrupt ++ * @hw: pointer to hardware structure ++ * ++ * Handle external Base T PHY interrupt. If high temperature ++ * failure alarm then return error, else if link status change ++ * then setup internal/external PHY link ++ * ++ * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature ++ * failure alarm, else return PHY access status. ++ */ ++s32 ixgbe_handle_lasi(struct ixgbe_hw *hw) ++{ ++ return ixgbe_call_func(hw, hw->phy.ops.handle_lasi, (hw), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_read_analog_reg8 - Reads 8 bit analog register ++ * @hw: pointer to hardware structure ++ * @reg: analog register to read ++ * @val: read value ++ * ++ * Performs write operation to analog register specified. ++ **/ ++s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.read_analog_reg8, (hw, reg, ++ val), IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_write_analog_reg8 - Writes 8 bit analog register ++ * @hw: pointer to hardware structure ++ * @reg: analog register to write ++ * @val: value to write ++ * ++ * Performs write operation to Atlas analog register specified. ++ **/ ++s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.write_analog_reg8, (hw, reg, ++ val), IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_init_uta_tables - Initializes Unicast Table Arrays. ++ * @hw: pointer to hardware structure ++ * ++ * Initializes the Unicast Table Arrays to zero on device load. This ++ * is part of the Rx init addr execution path. ++ **/ ++s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.init_uta_tables, (hw), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_read_i2c_byte - Reads 8 bit word over I2C at specified device address ++ * @hw: pointer to hardware structure ++ * @byte_offset: byte offset to read ++ * @dev_addr: I2C bus address to read from ++ * @data: value read ++ * ++ * Performs byte read operation to SFP module's EEPROM over I2C interface. ++ **/ ++s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, ++ u8 *data) ++{ ++ return ixgbe_call_func(hw, hw->phy.ops.read_i2c_byte, (hw, byte_offset, ++ dev_addr, data), IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_read_i2c_byte_unlocked - Reads 8 bit word via I2C from device address ++ * @hw: pointer to hardware structure ++ * @byte_offset: byte offset to read ++ * @dev_addr: I2C bus address to read from ++ * @data: value read ++ * ++ * Performs byte read operation to SFP module's EEPROM over I2C interface. ++ **/ ++s32 ixgbe_read_i2c_byte_unlocked(struct ixgbe_hw *hw, u8 byte_offset, ++ u8 dev_addr, u8 *data) ++{ ++ return ixgbe_call_func(hw, hw->phy.ops.read_i2c_byte_unlocked, ++ (hw, byte_offset, dev_addr, data), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_read_link - Perform read operation on link device ++ * @hw: pointer to the hardware structure ++ * @addr: bus address to read from ++ * @reg: device register to read from ++ * @val: pointer to location to receive read value ++ * ++ * Returns an error code on error. ++ */ ++s32 ixgbe_read_link(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val) ++{ ++ return ixgbe_call_func(hw, hw->link.ops.read_link, (hw, addr, ++ reg, val), IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_read_link_unlocked - Perform read operation on link device ++ * @hw: pointer to the hardware structure ++ * @addr: bus address to read from ++ * @reg: device register to read from ++ * @val: pointer to location to receive read value ++ * ++ * Returns an error code on error. ++ **/ ++s32 ixgbe_read_link_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val) ++{ ++ return ixgbe_call_func(hw, hw->link.ops.read_link_unlocked, ++ (hw, addr, reg, val), IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_write_i2c_byte - Writes 8 bit word over I2C ++ * @hw: pointer to hardware structure ++ * @byte_offset: byte offset to write ++ * @dev_addr: I2C bus address to write to ++ * @data: value to write ++ * ++ * Performs byte write operation to SFP module's EEPROM over I2C interface ++ * at a specified device address. ++ **/ ++s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, ++ u8 data) ++{ ++ return ixgbe_call_func(hw, hw->phy.ops.write_i2c_byte, (hw, byte_offset, ++ dev_addr, data), IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_write_i2c_byte_unlocked - Writes 8 bit word over I2C ++ * @hw: pointer to hardware structure ++ * @byte_offset: byte offset to write ++ * @dev_addr: I2C bus address to write to ++ * @data: value to write ++ * ++ * Performs byte write operation to SFP module's EEPROM over I2C interface ++ * at a specified device address. ++ **/ ++s32 ixgbe_write_i2c_byte_unlocked(struct ixgbe_hw *hw, u8 byte_offset, ++ u8 dev_addr, u8 data) ++{ ++ return ixgbe_call_func(hw, hw->phy.ops.write_i2c_byte_unlocked, ++ (hw, byte_offset, dev_addr, data), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_write_link - Perform write operation on link device ++ * @hw: pointer to the hardware structure ++ * @addr: bus address to write to ++ * @reg: device register to write to ++ * @val: value to write ++ * ++ * Returns an error code on error. ++ */ ++s32 ixgbe_write_link(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val) ++{ ++ return ixgbe_call_func(hw, hw->link.ops.write_link, ++ (hw, addr, reg, val), IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_write_link_unlocked - Perform write operation on link device ++ * @hw: pointer to the hardware structure ++ * @addr: bus address to write to ++ * @reg: device register to write to ++ * @val: value to write ++ * ++ * Returns an error code on error. ++ **/ ++s32 ixgbe_write_link_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val) ++{ ++ return ixgbe_call_func(hw, hw->link.ops.write_link_unlocked, ++ (hw, addr, reg, val), IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_write_i2c_eeprom - Writes 8 bit EEPROM word over I2C interface ++ * @hw: pointer to hardware structure ++ * @byte_offset: EEPROM byte offset to write ++ * @eeprom_data: value to write ++ * ++ * Performs byte write operation to SFP module's EEPROM over I2C interface. ++ **/ ++s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw, ++ u8 byte_offset, u8 eeprom_data) ++{ ++ return ixgbe_call_func(hw, hw->phy.ops.write_i2c_eeprom, ++ (hw, byte_offset, eeprom_data), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_read_i2c_eeprom - Reads 8 bit EEPROM word over I2C interface ++ * @hw: pointer to hardware structure ++ * @byte_offset: EEPROM byte offset to read ++ * @eeprom_data: value read ++ * ++ * Performs byte read operation to SFP module's EEPROM over I2C interface. ++ **/ ++s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data) ++{ ++ return ixgbe_call_func(hw, hw->phy.ops.read_i2c_eeprom, ++ (hw, byte_offset, eeprom_data), ++ IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_get_supported_physical_layer - Returns physical layer type ++ * @hw: pointer to hardware structure ++ * ++ * Determines physical layer capabilities of the current configuration. ++ **/ ++u64 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.get_supported_physical_layer, ++ (hw), IXGBE_PHYSICAL_LAYER_UNKNOWN); ++} ++ ++/** ++ * ixgbe_enable_rx_dma - Enables Rx DMA unit, dependent on device specifics ++ * @hw: pointer to hardware structure ++ * @regval: bitfield to write to the Rx DMA register ++ * ++ * Enables the Rx DMA unit of the device. ++ **/ ++s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.enable_rx_dma, ++ (hw, regval), IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_disable_sec_rx_path - Stops the receive data path ++ * @hw: pointer to hardware structure ++ * ++ * Stops the receive data path. ++ **/ ++s32 ixgbe_disable_sec_rx_path(struct ixgbe_hw *hw) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.disable_sec_rx_path, ++ (hw), IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_enable_sec_rx_path - Enables the receive data path ++ * @hw: pointer to hardware structure ++ * ++ * Enables the receive data path. ++ **/ ++s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.enable_sec_rx_path, ++ (hw), IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_acquire_swfw_semaphore - Acquire SWFW semaphore ++ * @hw: pointer to hardware structure ++ * @mask: Mask to specify which semaphore to acquire ++ * ++ * Acquires the SWFW semaphore through SW_FW_SYNC register for the specified ++ * function (CSR, PHY0, PHY1, EEPROM, Flash) ++ **/ ++s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u32 mask) ++{ ++ return ixgbe_call_func(hw, hw->mac.ops.acquire_swfw_sync, ++ (hw, mask), IXGBE_NOT_IMPLEMENTED); ++} ++ ++/** ++ * ixgbe_release_swfw_semaphore - Release SWFW semaphore ++ * @hw: pointer to hardware structure ++ * @mask: Mask to specify which semaphore to release ++ * ++ * Releases the SWFW semaphore through SW_FW_SYNC register for the specified ++ * function (CSR, PHY0, PHY1, EEPROM, Flash) ++ **/ ++void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u32 mask) ++{ ++ if (hw->mac.ops.release_swfw_sync) ++ hw->mac.ops.release_swfw_sync(hw, mask); ++} ++ ++/** ++ * ixgbe_init_swfw_semaphore - Clean up SWFW semaphore ++ * @hw: pointer to hardware structure ++ * ++ * Attempts to acquire the SWFW semaphore through SW_FW_SYNC register. ++ * Regardless of whether is succeeds or not it then release the semaphore. ++ * This is function is called to recover from catastrophic failures that ++ * may have left the semaphore locked. ++ **/ ++void ixgbe_init_swfw_semaphore(struct ixgbe_hw *hw) ++{ ++ if (hw->mac.ops.init_swfw_sync) ++ hw->mac.ops.init_swfw_sync(hw); ++} ++ ++void ixgbe_disable_rx(struct ixgbe_hw *hw) ++{ ++ if (hw->mac.ops.disable_rx) ++ hw->mac.ops.disable_rx(hw); ++} ++ ++void ixgbe_enable_rx(struct ixgbe_hw *hw) ++{ ++ if (hw->mac.ops.enable_rx) ++ hw->mac.ops.enable_rx(hw); ++} ++ ++/** ++ * ixgbe_set_rate_select_speed - Set module link speed ++ * @hw: pointer to hardware structure ++ * @speed: link speed to set ++ * ++ * Set module link speed via the rate select. ++ */ ++void ixgbe_set_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed) ++{ ++ if (hw->mac.ops.set_rate_select_speed) ++ hw->mac.ops.set_rate_select_speed(hw, speed); ++} +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_api.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_api.h +new file mode 100644 +index 0000000..8016a49 +--- /dev/null ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_api.h +@@ -0,0 +1,213 @@ ++/******************************************************************************* ++ ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#ifndef _IXGBE_API_H_ ++#define _IXGBE_API_H_ ++ ++#include "ixgbe_type.h" ++ ++void ixgbe_dcb_get_rtrup2tc(struct ixgbe_hw *hw, u8 *map); ++ ++s32 ixgbe_init_shared_code(struct ixgbe_hw *hw); ++ ++extern s32 ixgbe_init_ops_82598(struct ixgbe_hw *hw); ++extern s32 ixgbe_init_ops_82599(struct ixgbe_hw *hw); ++extern s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw); ++extern s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw); ++extern s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw); ++extern s32 ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw); ++extern s32 ixgbe_init_ops_X550EM_a(struct ixgbe_hw *hw); ++ ++s32 ixgbe_set_mac_type(struct ixgbe_hw *hw); ++s32 ixgbe_init_hw(struct ixgbe_hw *hw); ++s32 ixgbe_reset_hw(struct ixgbe_hw *hw); ++s32 ixgbe_start_hw(struct ixgbe_hw *hw); ++s32 ixgbe_clear_hw_cntrs(struct ixgbe_hw *hw); ++enum ixgbe_media_type ixgbe_get_media_type(struct ixgbe_hw *hw); ++s32 ixgbe_get_mac_addr(struct ixgbe_hw *hw, u8 *mac_addr); ++s32 ixgbe_get_bus_info(struct ixgbe_hw *hw); ++u32 ixgbe_get_num_of_tx_queues(struct ixgbe_hw *hw); ++u32 ixgbe_get_num_of_rx_queues(struct ixgbe_hw *hw); ++s32 ixgbe_stop_adapter(struct ixgbe_hw *hw); ++s32 ixgbe_read_pba_string(struct ixgbe_hw *hw, u8 *pba_num, u32 pba_num_size); ++ ++s32 ixgbe_identify_phy(struct ixgbe_hw *hw); ++s32 ixgbe_reset_phy(struct ixgbe_hw *hw); ++s32 ixgbe_read_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, ++ u16 *phy_data); ++s32 ixgbe_write_phy_reg(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, ++ u16 phy_data); ++ ++s32 ixgbe_setup_phy_link(struct ixgbe_hw *hw); ++s32 ixgbe_setup_internal_phy(struct ixgbe_hw *hw); ++s32 ixgbe_check_phy_link(struct ixgbe_hw *hw, ++ ixgbe_link_speed *speed, ++ bool *link_up); ++s32 ixgbe_setup_phy_link_speed(struct ixgbe_hw *hw, ++ ixgbe_link_speed speed, ++ bool autoneg_wait_to_complete); ++s32 ixgbe_set_phy_power(struct ixgbe_hw *, bool on); ++void ixgbe_disable_tx_laser(struct ixgbe_hw *hw); ++void ixgbe_enable_tx_laser(struct ixgbe_hw *hw); ++void ixgbe_flap_tx_laser(struct ixgbe_hw *hw); ++s32 ixgbe_setup_link(struct ixgbe_hw *hw, ixgbe_link_speed speed, ++ bool autoneg_wait_to_complete); ++s32 ixgbe_setup_mac_link(struct ixgbe_hw *hw, ixgbe_link_speed speed, ++ bool autoneg_wait_to_complete); ++s32 ixgbe_check_link(struct ixgbe_hw *hw, ixgbe_link_speed *speed, ++ bool *link_up, bool link_up_wait_to_complete); ++s32 ixgbe_get_link_capabilities(struct ixgbe_hw *hw, ixgbe_link_speed *speed, ++ bool *autoneg); ++s32 ixgbe_led_on(struct ixgbe_hw *hw, u32 index); ++s32 ixgbe_led_off(struct ixgbe_hw *hw, u32 index); ++s32 ixgbe_blink_led_start(struct ixgbe_hw *hw, u32 index); ++s32 ixgbe_blink_led_stop(struct ixgbe_hw *hw, u32 index); ++ ++s32 ixgbe_init_eeprom_params(struct ixgbe_hw *hw); ++s32 ixgbe_write_eeprom(struct ixgbe_hw *hw, u16 offset, u16 data); ++s32 ixgbe_write_eeprom_buffer(struct ixgbe_hw *hw, u16 offset, ++ u16 words, u16 *data); ++s32 ixgbe_read_eeprom(struct ixgbe_hw *hw, u16 offset, u16 *data); ++s32 ixgbe_read_eeprom_buffer(struct ixgbe_hw *hw, u16 offset, ++ u16 words, u16 *data); ++ ++s32 ixgbe_validate_eeprom_checksum(struct ixgbe_hw *hw, u16 *checksum_val); ++s32 ixgbe_update_eeprom_checksum(struct ixgbe_hw *hw); ++ ++s32 ixgbe_insert_mac_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq); ++s32 ixgbe_set_rar(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, ++ u32 enable_addr); ++s32 ixgbe_clear_rar(struct ixgbe_hw *hw, u32 index); ++s32 ixgbe_set_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq); ++s32 ixgbe_set_vmdq_san_mac(struct ixgbe_hw *hw, u32 vmdq); ++s32 ixgbe_clear_vmdq(struct ixgbe_hw *hw, u32 rar, u32 vmdq); ++s32 ixgbe_init_rx_addrs(struct ixgbe_hw *hw); ++u32 ixgbe_get_num_rx_addrs(struct ixgbe_hw *hw); ++s32 ixgbe_update_uc_addr_list(struct ixgbe_hw *hw, u8 *addr_list, ++ u32 addr_count, ixgbe_mc_addr_itr func); ++s32 ixgbe_update_mc_addr_list(struct ixgbe_hw *hw, u8 *mc_addr_list, ++ u32 mc_addr_count, ixgbe_mc_addr_itr func, ++ bool clear); ++void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr_list, u32 vmdq); ++s32 ixgbe_enable_mc(struct ixgbe_hw *hw); ++s32 ixgbe_disable_mc(struct ixgbe_hw *hw); ++s32 ixgbe_clear_vfta(struct ixgbe_hw *hw); ++s32 ixgbe_set_vfta(struct ixgbe_hw *hw, u32 vlan, ++ u32 vind, bool vlan_on, bool vlvf_bypass); ++s32 ixgbe_set_vlvf(struct ixgbe_hw *hw, u32 vlan, u32 vind, ++ bool vlan_on, u32 *vfta_delta, u32 vfta, ++ bool vlvf_bypass); ++s32 ixgbe_fc_enable(struct ixgbe_hw *hw); ++s32 ixgbe_setup_fc(struct ixgbe_hw *hw); ++s32 ixgbe_set_fw_drv_ver(struct ixgbe_hw *hw, u8 maj, u8 min, u8 build, ++ u8 ver, u16 len, char *driver_ver); ++s32 ixgbe_get_thermal_sensor_data(struct ixgbe_hw *hw); ++s32 ixgbe_init_thermal_sensor_thresh(struct ixgbe_hw *hw); ++void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr); ++s32 ixgbe_get_phy_firmware_version(struct ixgbe_hw *hw, ++ u16 *firmware_version); ++s32 ixgbe_read_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 *val); ++s32 ixgbe_write_analog_reg8(struct ixgbe_hw *hw, u32 reg, u8 val); ++s32 ixgbe_init_uta_tables(struct ixgbe_hw *hw); ++s32 ixgbe_read_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 *eeprom_data); ++u64 ixgbe_get_supported_physical_layer(struct ixgbe_hw *hw); ++s32 ixgbe_enable_rx_dma(struct ixgbe_hw *hw, u32 regval); ++s32 ixgbe_disable_sec_rx_path(struct ixgbe_hw *hw); ++s32 ixgbe_enable_sec_rx_path(struct ixgbe_hw *hw); ++s32 ixgbe_mng_fw_enabled(struct ixgbe_hw *hw); ++s32 ixgbe_reinit_fdir_tables_82599(struct ixgbe_hw *hw); ++s32 ixgbe_init_fdir_signature_82599(struct ixgbe_hw *hw, u32 fdirctrl); ++s32 ixgbe_init_fdir_perfect_82599(struct ixgbe_hw *hw, u32 fdirctrl, ++ bool cloud_mode); ++void ixgbe_fdir_add_signature_filter_82599(struct ixgbe_hw *hw, ++ union ixgbe_atr_hash_dword input, ++ union ixgbe_atr_hash_dword common, ++ u8 queue); ++s32 ixgbe_fdir_set_input_mask_82599(struct ixgbe_hw *hw, ++ union ixgbe_atr_input *input_mask, bool cloud_mode); ++s32 ixgbe_fdir_write_perfect_filter_82599(struct ixgbe_hw *hw, ++ union ixgbe_atr_input *input, ++ u16 soft_id, u8 queue, bool cloud_mode); ++s32 ixgbe_fdir_erase_perfect_filter_82599(struct ixgbe_hw *hw, ++ union ixgbe_atr_input *input, ++ u16 soft_id); ++s32 ixgbe_fdir_add_perfect_filter_82599(struct ixgbe_hw *hw, ++ union ixgbe_atr_input *input, ++ union ixgbe_atr_input *mask, ++ u16 soft_id, ++ u8 queue, ++ bool cloud_mode); ++void ixgbe_atr_compute_perfect_hash_82599(union ixgbe_atr_input *input, ++ union ixgbe_atr_input *mask); ++u32 ixgbe_atr_compute_sig_hash_82599(union ixgbe_atr_hash_dword input, ++ union ixgbe_atr_hash_dword common); ++bool ixgbe_verify_lesm_fw_enabled_82599(struct ixgbe_hw *hw); ++s32 ixgbe_read_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, ++ u8 *data); ++s32 ixgbe_read_i2c_byte_unlocked(struct ixgbe_hw *hw, u8 byte_offset, ++ u8 dev_addr, u8 *data); ++s32 ixgbe_read_link(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val); ++s32 ixgbe_read_link_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 *val); ++s32 ixgbe_write_i2c_byte(struct ixgbe_hw *hw, u8 byte_offset, u8 dev_addr, ++ u8 data); ++void ixgbe_set_fdir_drop_queue_82599(struct ixgbe_hw *hw, u8 dropqueue); ++s32 ixgbe_write_i2c_byte_unlocked(struct ixgbe_hw *hw, u8 byte_offset, ++ u8 dev_addr, u8 data); ++s32 ixgbe_write_link(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val); ++s32 ixgbe_write_link_unlocked(struct ixgbe_hw *hw, u8 addr, u16 reg, u16 val); ++s32 ixgbe_write_i2c_eeprom(struct ixgbe_hw *hw, u8 byte_offset, u8 eeprom_data); ++s32 ixgbe_get_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr); ++s32 ixgbe_set_san_mac_addr(struct ixgbe_hw *hw, u8 *san_mac_addr); ++s32 ixgbe_get_device_caps(struct ixgbe_hw *hw, u16 *device_caps); ++s32 ixgbe_acquire_swfw_semaphore(struct ixgbe_hw *hw, u32 mask); ++void ixgbe_release_swfw_semaphore(struct ixgbe_hw *hw, u32 mask); ++void ixgbe_init_swfw_semaphore(struct ixgbe_hw *hw); ++s32 ixgbe_get_wwn_prefix(struct ixgbe_hw *hw, u16 *wwnn_prefix, ++ u16 *wwpn_prefix); ++s32 ixgbe_get_fcoe_boot_status(struct ixgbe_hw *hw, u16 *bs); ++s32 ixgbe_dmac_config(struct ixgbe_hw *hw); ++s32 ixgbe_dmac_update_tcs(struct ixgbe_hw *hw); ++s32 ixgbe_dmac_config_tcs(struct ixgbe_hw *hw); ++s32 ixgbe_setup_eee(struct ixgbe_hw *hw, bool enable_eee); ++void ixgbe_set_source_address_pruning(struct ixgbe_hw *hw, bool enable, ++ unsigned int vf); ++void ixgbe_set_ethertype_anti_spoofing(struct ixgbe_hw *hw, bool enable, ++ int vf); ++s32 ixgbe_read_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr, ++ u32 device_type, u32 *phy_data); ++s32 ixgbe_write_iosf_sb_reg(struct ixgbe_hw *hw, u32 reg_addr, ++ u32 device_type, u32 phy_data); ++void ixgbe_disable_mdd(struct ixgbe_hw *hw); ++void ixgbe_enable_mdd(struct ixgbe_hw *hw); ++void ixgbe_mdd_event(struct ixgbe_hw *hw, u32 *vf_bitmap); ++void ixgbe_restore_mdd_vf(struct ixgbe_hw *hw, u32 vf); ++s32 ixgbe_enter_lplu(struct ixgbe_hw *hw); ++s32 ixgbe_handle_lasi(struct ixgbe_hw *hw); ++void ixgbe_set_rate_select_speed(struct ixgbe_hw *hw, ixgbe_link_speed speed); ++void ixgbe_disable_rx(struct ixgbe_hw *hw); ++void ixgbe_enable_rx(struct ixgbe_hw *hw); ++s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, ++ u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm); ++ ++#endif /* _IXGBE_API_H_ */ +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_cna.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_cna.c +new file mode 100644 +index 0000000..5f51629 +--- /dev/null ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_cna.c +@@ -0,0 +1,168 @@ ++/******************************************************************************* ++ ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++ ++#include "ixgbe.h" ++#include "ixgbe_cna.h" ++#include "ixgbe_vmdq.h" ++ ++static int ixgbe_cna_open(struct net_device *cnadev) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(cnadev); ++ strcpy(cnadev->name, adapter->netdev->name); ++ DPRINTK(PROBE, INFO, "CNA pseudo device opened %s\n", cnadev->name); ++ return 0; ++} ++ ++static int ixgbe_cna_close(struct net_device *cnadev) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(cnadev); ++ ++ DPRINTK(PROBE, INFO, "CNA pseudo device closed %s\n", cnadev->name); ++ return 0; ++} ++ ++static int ixgbe_cna_change_mtu(struct net_device *cnadev, int new_mtu) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(cnadev); ++ int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; ++ ++ /* MTU < 68 is an error and causes problems on some kernels */ ++ if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) ++ return -EINVAL; ++ ++ DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n", ++ cnadev->mtu, new_mtu); ++ /* must set new MTU before calling down or up */ ++ cnadev->mtu = new_mtu; ++ ++ return 0; ++} ++ ++int ixgbe_cna_enable(struct ixgbe_adapter *adapter) ++{ ++ struct net_device *cnadev; ++ struct net_device *netdev; ++ int err; ++ u64 wwpn; ++ u64 wwnn; ++ ++ netdev = adapter->netdev; ++ /* ++ * Oppositely to regular net device, CNA device doesn't have ++ * a private allocated region as we don't want to duplicate ++ * ixgbe_adapter information. Though, the CNA device still need ++ * to access the ixgbe_adapter while allocating queues or such. Thereby, ++ * cnadev->priv needs to point to netdev->priv. ++ */ ++ cnadev = alloc_etherdev_mq(0, MAX_TX_QUEUES); ++ if (!cnadev) { ++ err = -ENOMEM; ++ goto err_alloc_etherdev; ++ } ++ adapter->cnadev = cnadev; ++ SET_MODULE_OWNER(cnadev); ++ ++ cnadev->priv = adapter; ++ ++ cnadev->open = &ixgbe_cna_open; ++ cnadev->stop = &ixgbe_cna_close; ++ cnadev->change_mtu = &ixgbe_cna_change_mtu; ++ cnadev->do_ioctl = netdev->do_ioctl; ++ cnadev->hard_start_xmit = netdev->hard_start_xmit; ++#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) ++ cnadev->vlan_rx_register = netdev->vlan_rx_register; ++ cnadev->vlan_rx_add_vid = netdev->vlan_rx_add_vid; ++ cnadev->vlan_rx_kill_vid = netdev->vlan_rx_kill_vid; ++#endif ++ ixgbe_set_ethtool_ops(cnadev); ++ ++#if IS_ENABLED(CONFIG_DCB) ++ cnadev->dcbnl_ops = netdev->dcbnl_ops; ++#endif /* CONFIG_DCB */ ++ ++ cnadev->mtu = netdev->mtu; ++ cnadev->pdev = netdev->pdev; ++ cnadev->gso_max_size = GSO_MAX_SIZE; ++ cnadev->features = netdev->features | NETIF_F_CNA | NETIF_F_HW_VLAN_FILTER; ++ ++ /* set the MAC address to SAN mac address */ ++ if (ixgbe_validate_mac_addr(adapter->hw.mac.san_addr) == 0) ++ memcpy(cnadev->dev_addr, ++ adapter->hw.mac.san_addr, ++ cnadev->addr_len); ++ ++ cnadev->features |= NETIF_F_FCOE_CRC | ++ NETIF_F_FCOE_MTU | ++ NETIF_F_FSO; ++ ++ cnadev->ndo_fcoe_ddp_setup = &ixgbe_fcoe_ddp_get; ++ cnadev->ndo_fcoe_ddp_done = &ixgbe_fcoe_ddp_put; ++ cnadev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; ++ ++ netif_carrier_off(cnadev); ++ netif_tx_stop_all_queues(cnadev); ++ ++ VMKNETDDI_REGISTER_QUEUEOPS(cnadev, ixgbe_netqueue_ops); ++ ++ err = register_netdev(cnadev); ++ if (err) ++ goto err_register; ++ ++ DPRINTK(PROBE, INFO, "CNA pseudo device registered %s\n", netdev->name); ++ ++ return err; ++ ++err_register: ++ DPRINTK(PROBE, INFO, "CNA pseudo device cannot be registered %s\n", ++ netdev->name); ++ free_netdev(cnadev); ++err_alloc_etherdev: ++ DPRINTK(PROBE, INFO, "CNA cannot be enabled on %s\n", netdev->name); ++ adapter->flags2 &= ~IXGBE_FLAG2_CNA_ENABLED; ++ adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; ++ adapter->ring_feature[RING_F_FCOE].indices = 0; ++ return err; ++} ++ ++void ixgbe_cna_disable(struct ixgbe_adapter *adapter) ++{ ++ if (!(adapter->flags2 & IXGBE_FLAG2_CNA_ENABLED)) ++ return; ++ ++ adapter->flags2 &= ~IXGBE_FLAG2_CNA_ENABLED; ++ adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; ++ adapter->ring_feature[RING_F_FCOE].indices = 0; ++ ++ if (adapter->cnadev) { ++ unregister_netdev(adapter->cnadev); ++ DPRINTK(PROBE, INFO, "CNA pseudo device unregistered %s\n", ++ adapter->cnadev->name); ++ ++ free_netdev(adapter->cnadev); ++ adapter->cnadev = NULL; ++ } ++} ++ ++/* ixgbe_cna.c */ +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_cna.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_cna.h +new file mode 100644 +index 0000000..ee40480 +--- /dev/null ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_cna.h +@@ -0,0 +1,31 @@ ++/******************************************************************************* ++ ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#ifndef _IXGBE_CNA_H_ ++#define _IXGBE_CNA_H_ ++ ++int ixgbe_cna_enable(struct ixgbe_adapter *adapter); ++void ixgbe_cna_disable(struct ixgbe_adapter *adapter); ++ ++#endif /* _IXGBE_CNA_H_ */ +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +index 4e5385a..b725de4 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.c +@@ -1,7 +1,7 @@ + /******************************************************************************* + +- Intel 10 Gigabit PCI Express Linux driver +- Copyright(c) 1999 - 2014 Intel Corporation. ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, +@@ -12,10 +12,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +@@ -26,45 +22,132 @@ + + *******************************************************************************/ + +-#include +-#include +-#include +-#include +- +-#include "ixgbe.h" + #include "ixgbe_common.h" + #include "ixgbe_phy.h" +- +-static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw); +-static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw); +-static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw); +-static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw); +-static void ixgbe_standby_eeprom(struct ixgbe_hw *hw); +-static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, ++#include "ixgbe_dcb.h" ++#include "ixgbe_dcb_82599.h" ++#include "ixgbe_api.h" ++ ++STATIC s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw); ++STATIC s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw); ++STATIC void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw); ++STATIC s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw); ++STATIC void ixgbe_standby_eeprom(struct ixgbe_hw *hw); ++STATIC void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, + u16 count); +-static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count); +-static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); +-static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); +-static void ixgbe_release_eeprom(struct ixgbe_hw *hw); +- +-static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); +-static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg); +-static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, +- u16 words, u16 *data); +-static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, ++STATIC u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count); ++STATIC void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); ++STATIC void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec); ++STATIC void ixgbe_release_eeprom(struct ixgbe_hw *hw); ++ ++STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr); ++STATIC s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, ++ u16 *san_mac_offset); ++STATIC s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +-static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, ++STATIC s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, ++ u16 words, u16 *data); ++STATIC s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, + u16 offset); +-static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); + + /** +- * ixgbe_device_supports_autoneg_fc - Check if phy supports autoneg flow +- * control +- * @hw: pointer to hardware structure ++ * ixgbe_init_ops_generic - Inits function ptrs ++ * @hw: pointer to the hardware structure ++ * ++ * Initialize the function pointers. ++ **/ ++s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw) ++{ ++ struct ixgbe_eeprom_info *eeprom = &hw->eeprom; ++ struct ixgbe_mac_info *mac = &hw->mac; ++ u32 eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); ++ ++ DEBUGFUNC("ixgbe_init_ops_generic"); ++ ++ /* EEPROM */ ++ eeprom->ops.init_params = ixgbe_init_eeprom_params_generic; ++ /* If EEPROM is valid (bit 8 = 1), use EERD otherwise use bit bang */ ++ if (eec & IXGBE_EEC_PRES) { ++ eeprom->ops.read = ixgbe_read_eerd_generic; ++ eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_generic; ++ } else { ++ eeprom->ops.read = ixgbe_read_eeprom_bit_bang_generic; ++ eeprom->ops.read_buffer = ++ ixgbe_read_eeprom_buffer_bit_bang_generic; ++ } ++ eeprom->ops.write = ixgbe_write_eeprom_generic; ++ eeprom->ops.write_buffer = ixgbe_write_eeprom_buffer_bit_bang_generic; ++ eeprom->ops.validate_checksum = ++ ixgbe_validate_eeprom_checksum_generic; ++ eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_generic; ++ eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_generic; ++ ++ /* MAC */ ++ mac->ops.init_hw = ixgbe_init_hw_generic; ++ mac->ops.reset_hw = NULL; ++ mac->ops.start_hw = ixgbe_start_hw_generic; ++ mac->ops.clear_hw_cntrs = ixgbe_clear_hw_cntrs_generic; ++ mac->ops.get_media_type = NULL; ++ mac->ops.get_supported_physical_layer = NULL; ++ mac->ops.enable_rx_dma = ixgbe_enable_rx_dma_generic; ++ mac->ops.get_mac_addr = ixgbe_get_mac_addr_generic; ++ mac->ops.stop_adapter = ixgbe_stop_adapter_generic; ++ mac->ops.get_bus_info = ixgbe_get_bus_info_generic; ++ mac->ops.set_lan_id = ixgbe_set_lan_id_multi_port_pcie; ++ mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync; ++ mac->ops.release_swfw_sync = ixgbe_release_swfw_sync; ++ mac->ops.prot_autoc_read = prot_autoc_read_generic; ++ mac->ops.prot_autoc_write = prot_autoc_write_generic; ++ ++ /* LEDs */ ++ mac->ops.led_on = ixgbe_led_on_generic; ++ mac->ops.led_off = ixgbe_led_off_generic; ++ mac->ops.blink_led_start = ixgbe_blink_led_start_generic; ++ mac->ops.blink_led_stop = ixgbe_blink_led_stop_generic; ++ mac->ops.init_led_link_act = ixgbe_init_led_link_act_generic; ++ ++ /* RAR, Multicast, VLAN */ ++ mac->ops.set_rar = ixgbe_set_rar_generic; ++ mac->ops.clear_rar = ixgbe_clear_rar_generic; ++ mac->ops.insert_mac_addr = NULL; ++ mac->ops.set_vmdq = NULL; ++ mac->ops.clear_vmdq = NULL; ++ mac->ops.init_rx_addrs = ixgbe_init_rx_addrs_generic; ++ mac->ops.update_uc_addr_list = ixgbe_update_uc_addr_list_generic; ++ mac->ops.update_mc_addr_list = ixgbe_update_mc_addr_list_generic; ++ mac->ops.enable_mc = ixgbe_enable_mc_generic; ++ mac->ops.disable_mc = ixgbe_disable_mc_generic; ++ mac->ops.clear_vfta = NULL; ++ mac->ops.set_vfta = NULL; ++ mac->ops.set_vlvf = NULL; ++ mac->ops.init_uta_tables = NULL; ++ mac->ops.enable_rx = ixgbe_enable_rx_generic; ++ mac->ops.disable_rx = ixgbe_disable_rx_generic; ++ ++ /* Flow Control */ ++ mac->ops.fc_enable = ixgbe_fc_enable_generic; ++ mac->ops.setup_fc = ixgbe_setup_fc_generic; ++ mac->ops.fc_autoneg = ixgbe_fc_autoneg; ++ ++ /* Link */ ++ mac->ops.get_link_capabilities = NULL; ++ mac->ops.setup_link = NULL; ++ mac->ops.check_link = NULL; ++ mac->ops.dmac_config = NULL; ++ mac->ops.dmac_update_tcs = NULL; ++ mac->ops.dmac_config_tcs = NULL; ++ ++ return IXGBE_SUCCESS; ++} ++ ++/** ++ * ixgbe_device_supports_autoneg_fc - Check if device supports autonegotiation ++ * of flow control ++ * @hw: pointer to hardware structure ++ * ++ * This function returns true if the device supports flow control ++ * autonegotiation, and false if it does not. + * +- * There are several phys that do not support autoneg flow control. This +- * function check the device id to see if the associated phy supports +- * autoneg flow control. + **/ + bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) + { +@@ -72,18 +155,35 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) + ixgbe_link_speed speed; + bool link_up; + ++ DEBUGFUNC("ixgbe_device_supports_autoneg_fc"); ++ + switch (hw->phy.media_type) { ++ case ixgbe_media_type_fiber_qsfp: + case ixgbe_media_type_fiber: +- hw->mac.ops.check_link(hw, &speed, &link_up, false); +- /* if link is down, assume supported */ +- if (link_up) +- supported = speed == IXGBE_LINK_SPEED_1GB_FULL ? ++ /* flow control autoneg black list */ ++ switch (hw->device_id) { ++ case IXGBE_DEV_ID_X550EM_A_SFP: ++ case IXGBE_DEV_ID_X550EM_A_SFP_N: ++ case IXGBE_DEV_ID_X550EM_A_QSFP: ++ case IXGBE_DEV_ID_X550EM_A_QSFP_N: ++ supported = false; ++ break; ++ default: ++ hw->mac.ops.check_link(hw, &speed, &link_up, false); ++ /* if link is down, assume supported */ ++ if (link_up) ++ supported = speed == IXGBE_LINK_SPEED_1GB_FULL ? + true : false; +- else +- supported = true; ++ else ++ supported = true; ++ } ++ + break; + case ixgbe_media_type_backplane: +- supported = true; ++ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_XFI) ++ supported = false; ++ else ++ supported = true; + break; + case ixgbe_media_type_copper: + /* only some copper devices support flow control autoneg */ +@@ -91,37 +191,47 @@ bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw) + case IXGBE_DEV_ID_82599_T3_LOM: + case IXGBE_DEV_ID_X540T: + case IXGBE_DEV_ID_X540T1: ++ case IXGBE_DEV_ID_X550T: ++ case IXGBE_DEV_ID_X550T1: ++ case IXGBE_DEV_ID_X550EM_X_10G_T: ++ case IXGBE_DEV_ID_X550EM_A_10G_T: ++ case IXGBE_DEV_ID_X550EM_A_1G_T: ++ case IXGBE_DEV_ID_X550EM_A_1G_T_L: + supported = true; + break; + default: +- break; ++ supported = false; + } + default: + break; + } + ++ if (!supported) ++ ERROR_REPORT2(IXGBE_ERROR_UNSUPPORTED, ++ "Device %x does not support flow control autoneg", ++ hw->device_id); + return supported; + } + + /** +- * ixgbe_setup_fc - Set up flow control ++ * ixgbe_setup_fc_generic - Set up flow control + * @hw: pointer to hardware structure + * + * Called at init time to set up flow control. + **/ +-static s32 ixgbe_setup_fc(struct ixgbe_hw *hw) ++s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw) + { +- s32 ret_val = 0; ++ s32 ret_val = IXGBE_SUCCESS; + u32 reg = 0, reg_bp = 0; + u16 reg_cu = 0; + bool locked = false; + +- /* +- * Validate the requested mode. Strict IEEE mode does not allow +- * ixgbe_fc_rx_pause because it will cause us to fail at UNH. +- */ ++ DEBUGFUNC("ixgbe_setup_fc_generic"); ++ ++ /* Validate the requested mode */ + if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { +- hw_dbg(hw, "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); ++ ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, ++ "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } +@@ -142,17 +252,18 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw) + case ixgbe_media_type_backplane: + /* some MAC's need RMW protection on AUTOC */ + ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, ®_bp); +- if (ret_val) ++ if (ret_val != IXGBE_SUCCESS) + goto out; + +- /* only backplane uses autoc so fall though */ ++ /* fall through - only backplane uses autoc */ ++ case ixgbe_media_type_fiber_qsfp: + case ixgbe_media_type_fiber: + reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); + + break; + case ixgbe_media_type_copper: +- hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, +- MDIO_MMD_AN, ®_cu); ++ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®_cu); + break; + default: + break; +@@ -213,13 +324,14 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw) + reg_cu |= IXGBE_TAF_SYM_PAUSE | IXGBE_TAF_ASM_PAUSE; + break; + default: +- hw_dbg(hw, "Flow control param set incorrectly\n"); ++ ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, ++ "Flow control param set incorrectly\n"); + ret_val = IXGBE_ERR_CONFIG; + goto out; + break; + } + +- if (hw->mac.type != ixgbe_mac_X540) { ++ if (hw->mac.type < ixgbe_mac_X540) { + /* + * Enable auto-negotiation between the MAC & PHY; + * the MAC will advertise clause 37 flow control. +@@ -232,7 +344,7 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw) + reg &= ~IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN; + + IXGBE_WRITE_REG(hw, IXGBE_PCS1GLCTL, reg); +- hw_dbg(hw, "Set up FC; PCS1GLCTL = 0x%08X\n", reg); ++ DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg); + } + + /* +@@ -241,21 +353,17 @@ static s32 ixgbe_setup_fc(struct ixgbe_hw *hw) + * + */ + if (hw->phy.media_type == ixgbe_media_type_backplane) { +- /* Need the SW/FW semaphore around AUTOC writes if 82599 and +- * LESM is on, likewise reset_pipeline requries the lock as +- * it also writes AUTOC. +- */ ++ reg_bp |= IXGBE_AUTOC_AN_RESTART; + ret_val = hw->mac.ops.prot_autoc_write(hw, reg_bp, locked); + if (ret_val) + goto out; +- + } else if ((hw->phy.media_type == ixgbe_media_type_copper) && +- ixgbe_device_supports_autoneg_fc(hw)) { +- hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, +- MDIO_MMD_AN, reg_cu); ++ (ixgbe_device_supports_autoneg_fc(hw))) { ++ hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg_cu); + } + +- hw_dbg(hw, "Set up FC; IXGBE_AUTOC = 0x%08X\n", reg); ++ DEBUGOUT1("Set up FC; PCS1GLCTL = 0x%08X\n", reg); + out: + return ret_val; + } +@@ -273,12 +381,14 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) + { + s32 ret_val; + u32 ctrl_ext; ++ u16 device_caps; ++ ++ DEBUGFUNC("ixgbe_start_hw_generic"); + + /* Set the media type */ + hw->phy.media_type = hw->mac.ops.get_media_type(hw); + +- /* Identify the PHY */ +- hw->phy.ops.identify(hw); ++ /* PHY ops initialization must be done in reset_hw() */ + + /* Clear the VLAN filter table */ + hw->mac.ops.clear_vfta(hw); +@@ -294,14 +404,31 @@ s32 ixgbe_start_hw_generic(struct ixgbe_hw *hw) + + /* Setup flow control */ + ret_val = ixgbe_setup_fc(hw); +- if (!ret_val) +- goto out; ++ if (ret_val != IXGBE_SUCCESS && ret_val != IXGBE_NOT_IMPLEMENTED) { ++ DEBUGOUT1("Flow control setup failed, returning %d\n", ret_val); ++ return ret_val; ++ } ++ ++ /* Cache bit indicating need for crosstalk fix */ ++ switch (hw->mac.type) { ++ case ixgbe_mac_82599EB: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: ++ hw->mac.ops.get_device_caps(hw, &device_caps); ++ if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR) ++ hw->need_crosstalk_fix = false; ++ else ++ hw->need_crosstalk_fix = true; ++ break; ++ default: ++ hw->need_crosstalk_fix = false; ++ break; ++ } + + /* Clear adapter stopped flag */ + hw->adapter_stopped = false; + +-out: +- return ret_val; ++ return IXGBE_SUCCESS; + } + + /** +@@ -340,7 +467,7 @@ s32 ixgbe_start_hw_gen2(struct ixgbe_hw *hw) + IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(i), regval); + } + +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +@@ -357,14 +484,23 @@ s32 ixgbe_init_hw_generic(struct ixgbe_hw *hw) + { + s32 status; + ++ DEBUGFUNC("ixgbe_init_hw_generic"); ++ + /* Reset the hardware */ + status = hw->mac.ops.reset_hw(hw); + +- if (status == 0) { ++ if (status == IXGBE_SUCCESS || status == IXGBE_ERR_SFP_NOT_PRESENT) { + /* Start the HW */ + status = hw->mac.ops.start_hw(hw); + } + ++ /* Initialize the LED link active for LED blink support */ ++ if (hw->mac.ops.init_led_link_act) ++ hw->mac.ops.init_led_link_act(hw); ++ ++ if (status != IXGBE_SUCCESS) ++ DEBUGOUT1("Failed to initialize HW, STATUS = %d\n", status); ++ + return status; + } + +@@ -379,6 +515,8 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) + { + u16 i = 0; + ++ DEBUGFUNC("ixgbe_clear_hw_cntrs_generic"); ++ + IXGBE_READ_REG(hw, IXGBE_CRCERRS); + IXGBE_READ_REG(hw, IXGBE_ILLERRC); + IXGBE_READ_REG(hw, IXGBE_ERRBC); +@@ -464,16 +602,20 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw) + } + } + +- if (hw->mac.type == ixgbe_mac_X540) { ++ if (hw->mac.type == ixgbe_mac_X550 || hw->mac.type == ixgbe_mac_X540) { + if (hw->phy.id == 0) +- hw->phy.ops.identify(hw); +- hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, MDIO_MMD_PCS, &i); +- hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, MDIO_MMD_PCS, &i); +- hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, MDIO_MMD_PCS, &i); +- hw->phy.ops.read_reg(hw, IXGBE_LDPCECH, MDIO_MMD_PCS, &i); ++ ixgbe_identify_phy(hw); ++ hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECL, ++ IXGBE_MDIO_PCS_DEV_TYPE, &i); ++ hw->phy.ops.read_reg(hw, IXGBE_PCRC8ECH, ++ IXGBE_MDIO_PCS_DEV_TYPE, &i); ++ hw->phy.ops.read_reg(hw, IXGBE_LDPCECL, ++ IXGBE_MDIO_PCS_DEV_TYPE, &i); ++ hw->phy.ops.read_reg(hw, IXGBE_LDPCECH, ++ IXGBE_MDIO_PCS_DEV_TYPE, &i); + } + +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +@@ -493,20 +635,22 @@ s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, + u16 offset; + u16 length; + ++ DEBUGFUNC("ixgbe_read_pba_string_generic"); ++ + if (pba_num == NULL) { +- hw_dbg(hw, "PBA string buffer was null\n"); ++ DEBUGOUT("PBA string buffer was null\n"); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM0_PTR, &data); + if (ret_val) { +- hw_dbg(hw, "NVM Read Error\n"); ++ DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + ret_val = hw->eeprom.ops.read(hw, IXGBE_PBANUM1_PTR, &pba_ptr); + if (ret_val) { +- hw_dbg(hw, "NVM Read Error\n"); ++ DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + +@@ -516,11 +660,11 @@ s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, + * and we can decode it into an ascii string + */ + if (data != IXGBE_PBANUM_PTR_GUARD) { +- hw_dbg(hw, "NVM PBA number is not stored as string\n"); ++ DEBUGOUT("NVM PBA number is not stored as string\n"); + + /* we will need 11 characters to store the PBA */ + if (pba_num_size < 11) { +- hw_dbg(hw, "PBA string buffer too small\n"); ++ DEBUGOUT("PBA string buffer too small\n"); + return IXGBE_ERR_NO_SPACE; + } + +@@ -547,23 +691,23 @@ s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, + pba_num[offset] += 'A' - 0xA; + } + +- return 0; ++ return IXGBE_SUCCESS; + } + + ret_val = hw->eeprom.ops.read(hw, pba_ptr, &length); + if (ret_val) { +- hw_dbg(hw, "NVM Read Error\n"); ++ DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + + if (length == 0xFFFF || length == 0) { +- hw_dbg(hw, "NVM PBA number section invalid length\n"); ++ DEBUGOUT("NVM PBA number section invalid length\n"); + return IXGBE_ERR_PBA_SECTION; + } + + /* check if pba_num buffer is big enough */ + if (pba_num_size < (((u32)length * 2) - 1)) { +- hw_dbg(hw, "PBA string buffer too small\n"); ++ DEBUGOUT("PBA string buffer too small\n"); + return IXGBE_ERR_NO_SPACE; + } + +@@ -574,7 +718,7 @@ s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, + for (offset = 0; offset < length; offset++) { + ret_val = hw->eeprom.ops.read(hw, pba_ptr + offset, &data); + if (ret_val) { +- hw_dbg(hw, "NVM Read Error\n"); ++ DEBUGOUT("NVM Read Error\n"); + return ret_val; + } + pba_num[offset * 2] = (u8)(data >> 8); +@@ -582,7 +726,7 @@ s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, + } + pba_num[offset * 2] = '\0'; + +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +@@ -600,6 +744,8 @@ s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr) + u32 rar_low; + u16 i; + ++ DEBUGFUNC("ixgbe_get_mac_addr_generic"); ++ + rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(0)); + rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(0)); + +@@ -609,82 +755,111 @@ s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr) + for (i = 0; i < 2; i++) + mac_addr[i+4] = (u8)(rar_high >> (i*8)); + +- return 0; ++ return IXGBE_SUCCESS; + } + +-enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status) ++/** ++ * ixgbe_set_pci_config_data_generic - Generic store PCI bus info ++ * @hw: pointer to hardware structure ++ * @link_status: the link status returned by the PCI config space ++ * ++ * Stores the PCI bus info (speed, width, type) within the ixgbe_hw structure ++ **/ ++void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status) + { ++ struct ixgbe_mac_info *mac = &hw->mac; ++ ++ if (hw->bus.type == ixgbe_bus_type_unknown) ++ hw->bus.type = ixgbe_bus_type_pci_express; ++ + switch (link_status & IXGBE_PCI_LINK_WIDTH) { + case IXGBE_PCI_LINK_WIDTH_1: +- return ixgbe_bus_width_pcie_x1; ++ hw->bus.width = ixgbe_bus_width_pcie_x1; ++ break; + case IXGBE_PCI_LINK_WIDTH_2: +- return ixgbe_bus_width_pcie_x2; ++ hw->bus.width = ixgbe_bus_width_pcie_x2; ++ break; + case IXGBE_PCI_LINK_WIDTH_4: +- return ixgbe_bus_width_pcie_x4; ++ hw->bus.width = ixgbe_bus_width_pcie_x4; ++ break; + case IXGBE_PCI_LINK_WIDTH_8: +- return ixgbe_bus_width_pcie_x8; ++ hw->bus.width = ixgbe_bus_width_pcie_x8; ++ break; + default: +- return ixgbe_bus_width_unknown; ++ hw->bus.width = ixgbe_bus_width_unknown; ++ break; + } +-} + +-enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status) +-{ + switch (link_status & IXGBE_PCI_LINK_SPEED) { + case IXGBE_PCI_LINK_SPEED_2500: +- return ixgbe_bus_speed_2500; ++ hw->bus.speed = ixgbe_bus_speed_2500; ++ break; + case IXGBE_PCI_LINK_SPEED_5000: +- return ixgbe_bus_speed_5000; ++ hw->bus.speed = ixgbe_bus_speed_5000; ++ break; + case IXGBE_PCI_LINK_SPEED_8000: +- return ixgbe_bus_speed_8000; ++ hw->bus.speed = ixgbe_bus_speed_8000; ++ break; + default: +- return ixgbe_bus_speed_unknown; ++ hw->bus.speed = ixgbe_bus_speed_unknown; ++ break; + } ++ ++ mac->ops.set_lan_id(hw); + } + + /** + * ixgbe_get_bus_info_generic - Generic set PCI bus info + * @hw: pointer to hardware structure + * +- * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure ++ * Gets the PCI bus info (speed, width, type) then calls helper function to ++ * store this data within the ixgbe_hw structure. + **/ + s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw) + { + u16 link_status; + +- hw->bus.type = ixgbe_bus_type_pci_express; ++ DEBUGFUNC("ixgbe_get_bus_info_generic"); + + /* Get the negotiated link width and speed from PCI config space */ +- link_status = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_LINK_STATUS); +- +- hw->bus.width = ixgbe_convert_bus_width(link_status); +- hw->bus.speed = ixgbe_convert_bus_speed(link_status); ++ link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS); + +- hw->mac.ops.set_lan_id(hw); ++ ixgbe_set_pci_config_data_generic(hw, link_status); + +- return 0; ++ return IXGBE_SUCCESS; + } + + /** + * ixgbe_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices + * @hw: pointer to the HW structure + * +- * Determines the LAN function id by reading memory-mapped registers +- * and swaps the port value if requested. ++ * Determines the LAN function id by reading memory-mapped registers and swaps ++ * the port value if requested, and set MAC instance for devices that share ++ * CS4227. + **/ + void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw) + { + struct ixgbe_bus_info *bus = &hw->bus; + u32 reg; ++ u16 ee_ctrl_4; ++ ++ DEBUGFUNC("ixgbe_set_lan_id_multi_port_pcie"); + + reg = IXGBE_READ_REG(hw, IXGBE_STATUS); + bus->func = (reg & IXGBE_STATUS_LAN_ID) >> IXGBE_STATUS_LAN_ID_SHIFT; +- bus->lan_id = bus->func; ++ bus->lan_id = (u8)bus->func; + + /* check for a port swap */ +- reg = IXGBE_READ_REG(hw, IXGBE_FACTPS); ++ reg = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw)); + if (reg & IXGBE_FACTPS_LFS) + bus->func ^= 0x1; ++ ++ /* Get MAC instance from EEPROM for configuring CS4227 */ ++ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP) { ++ hw->eeprom.ops.read(hw, IXGBE_EEPROM_CTRL_4, &ee_ctrl_4); ++ bus->instance_id = (ee_ctrl_4 & IXGBE_EE_CTRL_4_INST_ID) >> ++ IXGBE_EE_CTRL_4_INST_ID_SHIFT; ++ } + } + + /** +@@ -701,6 +876,8 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) + u32 reg_val; + u16 i; + ++ DEBUGFUNC("ixgbe_stop_adapter_generic"); ++ + /* + * Set the adapter_stopped flag so other driver functions stop touching + * the hardware +@@ -708,7 +885,7 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) + hw->adapter_stopped = true; + + /* Disable the receive unit */ +- IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, 0); ++ ixgbe_disable_rx(hw); + + /* Clear interrupt mask to stop interrupts from being generated */ + IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_IRQ_CLEAR_MASK); +@@ -730,16 +907,57 @@ s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw) + + /* flush all queues disables */ + IXGBE_WRITE_FLUSH(hw); +- usleep_range(1000, 2000); ++ msec_delay(2); + + /* +- * Prevent the PCI-E bus from from hanging by disabling PCI-E master ++ * Prevent the PCI-E bus from hanging by disabling PCI-E master + * access and verify no pending requests + */ + return ixgbe_disable_pcie_master(hw); + } + + /** ++ * ixgbe_init_led_link_act_generic - Store the LED index link/activity. ++ * @hw: pointer to hardware structure ++ * ++ * Store the index for the link active LED. This will be used to support ++ * blinking the LED. ++ **/ ++s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw) ++{ ++ struct ixgbe_mac_info *mac = &hw->mac; ++ u32 led_reg, led_mode; ++ u8 i; ++ ++ led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); ++ ++ /* Get LED link active from the LEDCTL register */ ++ for (i = 0; i < 4; i++) { ++ led_mode = led_reg >> IXGBE_LED_MODE_SHIFT(i); ++ ++ if ((led_mode & IXGBE_LED_MODE_MASK_BASE) == ++ IXGBE_LED_LINK_ACTIVE) { ++ mac->led_link_act = i; ++ return IXGBE_SUCCESS; ++ } ++ } ++ ++ /* ++ * If LEDCTL register does not have the LED link active set, then use ++ * known MAC defaults. ++ */ ++ switch (hw->mac.type) { ++ case ixgbe_mac_X550EM_a: ++ case ixgbe_mac_X550EM_x: ++ mac->led_link_act = 1; ++ break; ++ default: ++ mac->led_link_act = 2; ++ } ++ return IXGBE_SUCCESS; ++} ++ ++/** + * ixgbe_led_on_generic - Turns on the software controllable LEDs. + * @hw: pointer to hardware structure + * @index: led number to turn on +@@ -748,13 +966,18 @@ s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index) + { + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + ++ DEBUGFUNC("ixgbe_led_on_generic"); ++ ++ if (index > 3) ++ return IXGBE_ERR_PARAM; ++ + /* To turn on the LED, set mode to ON. */ + led_reg &= ~IXGBE_LED_MODE_MASK(index); + led_reg |= IXGBE_LED_ON << IXGBE_LED_MODE_SHIFT(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); + IXGBE_WRITE_FLUSH(hw); + +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +@@ -766,13 +989,18 @@ s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index) + { + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + ++ DEBUGFUNC("ixgbe_led_off_generic"); ++ ++ if (index > 3) ++ return IXGBE_ERR_PARAM; ++ + /* To turn off the LED, set mode to OFF. */ + led_reg &= ~IXGBE_LED_MODE_MASK(index); + led_reg |= IXGBE_LED_OFF << IXGBE_LED_MODE_SHIFT(index); + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); + IXGBE_WRITE_FLUSH(hw); + +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +@@ -788,6 +1016,8 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) + u32 eec; + u16 eeprom_size; + ++ DEBUGFUNC("ixgbe_init_eeprom_params_generic"); ++ + if (eeprom->type == ixgbe_eeprom_uninitialized) { + eeprom->type = ixgbe_eeprom_none; + /* Set default semaphore delay to 10ms which is a well +@@ -800,7 +1030,7 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) + * Check for EEPROM present first. + * If not present leave as none + */ +- eec = IXGBE_READ_REG(hw, IXGBE_EEC); ++ eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); + if (eec & IXGBE_EEC_PRES) { + eeprom->type = ixgbe_eeprom_spi; + +@@ -811,25 +1041,26 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) + eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> + IXGBE_EEC_SIZE_SHIFT); + eeprom->word_size = 1 << (eeprom_size + +- IXGBE_EEPROM_WORD_SIZE_SHIFT); ++ IXGBE_EEPROM_WORD_SIZE_SHIFT); + } + + if (eec & IXGBE_EEC_ADDR_SIZE) + eeprom->address_bits = 16; + else + eeprom->address_bits = 8; +- hw_dbg(hw, "Eeprom params: type = %d, size = %d, address bits: %d\n", +- eeprom->type, eeprom->word_size, eeprom->address_bits); ++ DEBUGOUT3("Eeprom params: type = %d, size = %d, address bits: " ++ "%d\n", eeprom->type, eeprom->word_size, ++ eeprom->address_bits); + } + +- return 0; ++ return IXGBE_SUCCESS; + } + + /** + * ixgbe_write_eeprom_buffer_bit_bang_generic - Write EEPROM using bit-bang + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to write +- * @words: number of words ++ * @words: number of word(s) + * @data: 16 bit word(s) to write to EEPROM + * + * Reads 16 bit word(s) from EEPROM through bit-bang method +@@ -837,9 +1068,11 @@ s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw) + s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) + { +- s32 status = 0; ++ s32 status = IXGBE_SUCCESS; + u16 i, count; + ++ DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang_generic"); ++ + hw->eeprom.ops.init_params(hw); + + if (words == 0) { +@@ -867,11 +1100,11 @@ s32 ixgbe_write_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + */ + for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { + count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? +- IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); ++ IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); + status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset + i, + count, &data[i]); + +- if (status != 0) ++ if (status != IXGBE_SUCCESS) + break; + } + +@@ -889,7 +1122,7 @@ out: + * If ixgbe_eeprom_update_checksum is not called after this function, the + * EEPROM will most likely contain an invalid checksum. + **/ +-static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, ++STATIC s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) + { + s32 status; +@@ -898,24 +1131,26 @@ static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, + u16 i; + u8 write_opcode = IXGBE_EEPROM_WRITE_OPCODE_SPI; + ++ DEBUGFUNC("ixgbe_write_eeprom_buffer_bit_bang"); ++ + /* Prepare the EEPROM for writing */ + status = ixgbe_acquire_eeprom(hw); + +- if (status == 0) { +- if (ixgbe_ready_eeprom(hw) != 0) { ++ if (status == IXGBE_SUCCESS) { ++ if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) { + ixgbe_release_eeprom(hw); + status = IXGBE_ERR_EEPROM; + } + } + +- if (status == 0) { ++ if (status == IXGBE_SUCCESS) { + for (i = 0; i < words; i++) { + ixgbe_standby_eeprom(hw); + + /* Send the WRITE ENABLE command (8 bit opcode ) */ + ixgbe_shift_out_eeprom_bits(hw, +- IXGBE_EEPROM_WREN_OPCODE_SPI, +- IXGBE_EEPROM_OPCODE_BITS); ++ IXGBE_EEPROM_WREN_OPCODE_SPI, ++ IXGBE_EEPROM_OPCODE_BITS); + + ixgbe_standby_eeprom(hw); + +@@ -951,7 +1186,7 @@ static s32 ixgbe_write_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, + } while (++i < words); + + ixgbe_standby_eeprom(hw); +- usleep_range(10000, 20000); ++ msec_delay(10); + } + /* Done with writing - release the EEPROM */ + ixgbe_release_eeprom(hw); +@@ -973,6 +1208,8 @@ s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data) + { + s32 status; + ++ DEBUGFUNC("ixgbe_write_eeprom_generic"); ++ + hw->eeprom.ops.init_params(hw); + + if (offset >= hw->eeprom.word_size) { +@@ -990,17 +1227,19 @@ out: + * ixgbe_read_eeprom_buffer_bit_bang_generic - Read EEPROM using bit-bang + * @hw: pointer to hardware structure + * @offset: offset within the EEPROM to be read +- * @words: number of word(s) + * @data: read 16 bit words(s) from EEPROM ++ * @words: number of word(s) + * + * Reads 16 bit word(s) from EEPROM through bit-bang method + **/ + s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) + { +- s32 status = 0; ++ s32 status = IXGBE_SUCCESS; + u16 i, count; + ++ DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang_generic"); ++ + hw->eeprom.ops.init_params(hw); + + if (words == 0) { +@@ -1020,12 +1259,12 @@ s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + */ + for (i = 0; i < words; i += IXGBE_EEPROM_RD_BUFFER_MAX_COUNT) { + count = (words - i) / IXGBE_EEPROM_RD_BUFFER_MAX_COUNT > 0 ? +- IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); ++ IXGBE_EEPROM_RD_BUFFER_MAX_COUNT : (words - i); + + status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset + i, + count, &data[i]); + +- if (status != 0) ++ if (status != IXGBE_SUCCESS) + break; + } + +@@ -1042,7 +1281,7 @@ out: + * + * Reads 16 bit word(s) from EEPROM through bit-bang method + **/ +-static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, ++STATIC s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) + { + s32 status; +@@ -1050,17 +1289,19 @@ static s32 ixgbe_read_eeprom_buffer_bit_bang(struct ixgbe_hw *hw, u16 offset, + u8 read_opcode = IXGBE_EEPROM_READ_OPCODE_SPI; + u16 i; + ++ DEBUGFUNC("ixgbe_read_eeprom_buffer_bit_bang"); ++ + /* Prepare the EEPROM for reading */ + status = ixgbe_acquire_eeprom(hw); + +- if (status == 0) { +- if (ixgbe_ready_eeprom(hw) != 0) { ++ if (status == IXGBE_SUCCESS) { ++ if (ixgbe_ready_eeprom(hw) != IXGBE_SUCCESS) { + ixgbe_release_eeprom(hw); + status = IXGBE_ERR_EEPROM; + } + } + +- if (status == 0) { ++ if (status == IXGBE_SUCCESS) { + for (i = 0; i < words; i++) { + ixgbe_standby_eeprom(hw); + /* +@@ -1102,6 +1343,8 @@ s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + { + s32 status; + ++ DEBUGFUNC("ixgbe_read_eeprom_bit_bang_generic"); ++ + hw->eeprom.ops.init_params(hw); + + if (offset >= hw->eeprom.word_size) { +@@ -1128,18 +1371,22 @@ s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) + { + u32 eerd; +- s32 status = 0; ++ s32 status = IXGBE_SUCCESS; + u32 i; + ++ DEBUGFUNC("ixgbe_read_eerd_buffer_generic"); ++ + hw->eeprom.ops.init_params(hw); + + if (words == 0) { + status = IXGBE_ERR_INVALID_ARGUMENT; ++ ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words"); + goto out; + } + + if (offset >= hw->eeprom.word_size) { + status = IXGBE_ERR_EEPROM; ++ ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset"); + goto out; + } + +@@ -1150,11 +1397,11 @@ s32 ixgbe_read_eerd_buffer_generic(struct ixgbe_hw *hw, u16 offset, + IXGBE_WRITE_REG(hw, IXGBE_EERD, eerd); + status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_READ); + +- if (status == 0) { ++ if (status == IXGBE_SUCCESS) { + data[i] = (IXGBE_READ_REG(hw, IXGBE_EERD) >> + IXGBE_EEPROM_RW_REG_DATA); + } else { +- hw_dbg(hw, "Eeprom read timed out\n"); ++ DEBUGOUT("Eeprom read timed out\n"); + goto out; + } + } +@@ -1171,13 +1418,15 @@ out: + * This function is called only when we are writing a new large buffer + * at given offset so the data would be overwritten anyway. + **/ +-static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, ++STATIC s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, + u16 offset) + { + u16 data[IXGBE_EEPROM_PAGE_SIZE_MAX]; +- s32 status = 0; ++ s32 status = IXGBE_SUCCESS; + u16 i; + ++ DEBUGFUNC("ixgbe_detect_eeprom_page_size_generic"); ++ + for (i = 0; i < IXGBE_EEPROM_PAGE_SIZE_MAX; i++) + data[i] = i; + +@@ -1185,11 +1434,11 @@ static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, + status = ixgbe_write_eeprom_buffer_bit_bang(hw, offset, + IXGBE_EEPROM_PAGE_SIZE_MAX, data); + hw->eeprom.word_page_size = 0; +- if (status != 0) ++ if (status != IXGBE_SUCCESS) + goto out; + + status = ixgbe_read_eeprom_buffer_bit_bang(hw, offset, 1, data); +- if (status != 0) ++ if (status != IXGBE_SUCCESS) + goto out; + + /* +@@ -1198,8 +1447,8 @@ static s32 ixgbe_detect_eeprom_page_size_generic(struct ixgbe_hw *hw, + */ + hw->eeprom.word_page_size = IXGBE_EEPROM_PAGE_SIZE_MAX - data[0]; + +- hw_dbg(hw, "Detected EEPROM page size = %d words.\n", +- hw->eeprom.word_page_size); ++ DEBUGOUT1("Detected EEPROM page size = %d words.", ++ hw->eeprom.word_page_size); + out: + return status; + } +@@ -1221,7 +1470,7 @@ s32 ixgbe_read_eerd_generic(struct ixgbe_hw *hw, u16 offset, u16 *data) + * ixgbe_write_eewr_buffer_generic - Write EEPROM word(s) using EEWR + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to write +- * @words: number of words ++ * @words: number of word(s) + * @data: word(s) write to the EEPROM + * + * Write a 16 bit word(s) to the EEPROM using the EEWR register. +@@ -1230,37 +1479,41 @@ s32 ixgbe_write_eewr_buffer_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data) + { + u32 eewr; +- s32 status = 0; ++ s32 status = IXGBE_SUCCESS; + u16 i; + ++ DEBUGFUNC("ixgbe_write_eewr_generic"); ++ + hw->eeprom.ops.init_params(hw); + + if (words == 0) { + status = IXGBE_ERR_INVALID_ARGUMENT; ++ ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM words"); + goto out; + } + + if (offset >= hw->eeprom.word_size) { + status = IXGBE_ERR_EEPROM; ++ ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, "Invalid EEPROM offset"); + goto out; + } + + for (i = 0; i < words; i++) { + eewr = ((offset + i) << IXGBE_EEPROM_RW_ADDR_SHIFT) | +- (data[i] << IXGBE_EEPROM_RW_REG_DATA) | +- IXGBE_EEPROM_RW_REG_START; ++ (data[i] << IXGBE_EEPROM_RW_REG_DATA) | ++ IXGBE_EEPROM_RW_REG_START; + + status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); +- if (status != 0) { +- hw_dbg(hw, "Eeprom write EEWR timed out\n"); ++ if (status != IXGBE_SUCCESS) { ++ DEBUGOUT("Eeprom write EEWR timed out\n"); + goto out; + } + + IXGBE_WRITE_REG(hw, IXGBE_EEWR, eewr); + + status = ixgbe_poll_eerd_eewr_done(hw, IXGBE_NVM_POLL_WRITE); +- if (status != 0) { +- hw_dbg(hw, "Eeprom write EEWR timed out\n"); ++ if (status != IXGBE_SUCCESS) { ++ DEBUGOUT("Eeprom write EEWR timed out\n"); + goto out; + } + } +@@ -1290,12 +1543,14 @@ s32 ixgbe_write_eewr_generic(struct ixgbe_hw *hw, u16 offset, u16 data) + * Polls the status bit (bit 1) of the EERD or EEWR to determine when the + * read or write is done respectively. + **/ +-static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) ++s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) + { + u32 i; + u32 reg; + s32 status = IXGBE_ERR_EEPROM; + ++ DEBUGFUNC("ixgbe_poll_eerd_eewr_done"); ++ + for (i = 0; i < IXGBE_EERD_EEWR_ATTEMPTS; i++) { + if (ee_reg == IXGBE_NVM_POLL_READ) + reg = IXGBE_READ_REG(hw, IXGBE_EERD); +@@ -1303,11 +1558,16 @@ static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) + reg = IXGBE_READ_REG(hw, IXGBE_EEWR); + + if (reg & IXGBE_EEPROM_RW_REG_DONE) { +- status = 0; ++ status = IXGBE_SUCCESS; + break; + } +- udelay(5); ++ usec_delay(5); + } ++ ++ if (i == IXGBE_EERD_EEWR_ATTEMPTS) ++ ERROR_REPORT1(IXGBE_ERROR_POLLING, ++ "EEPROM read/write done polling timed out"); ++ + return status; + } + +@@ -1318,46 +1578,49 @@ static s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg) + * Prepares EEPROM for access using bit-bang method. This function should + * be called before issuing a command to the EEPROM. + **/ +-static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) ++STATIC s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) + { +- s32 status = 0; ++ s32 status = IXGBE_SUCCESS; + u32 eec; + u32 i; + +- if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) != 0) ++ DEBUGFUNC("ixgbe_acquire_eeprom"); ++ ++ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) ++ != IXGBE_SUCCESS) + status = IXGBE_ERR_SWFW_SYNC; + +- if (status == 0) { +- eec = IXGBE_READ_REG(hw, IXGBE_EEC); ++ if (status == IXGBE_SUCCESS) { ++ eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); + + /* Request EEPROM Access */ + eec |= IXGBE_EEC_REQ; +- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); ++ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); + + for (i = 0; i < IXGBE_EEPROM_GRANT_ATTEMPTS; i++) { +- eec = IXGBE_READ_REG(hw, IXGBE_EEC); ++ eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); + if (eec & IXGBE_EEC_GNT) + break; +- udelay(5); ++ usec_delay(5); + } + + /* Release if grant not acquired */ + if (!(eec & IXGBE_EEC_GNT)) { + eec &= ~IXGBE_EEC_REQ; +- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); +- hw_dbg(hw, "Could not acquire EEPROM grant\n"); ++ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); ++ DEBUGOUT("Could not acquire EEPROM grant\n"); + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + status = IXGBE_ERR_EEPROM; + } + + /* Setup EEPROM for Read/Write */ +- if (status == 0) { ++ if (status == IXGBE_SUCCESS) { + /* Clear CS and SK */ + eec &= ~(IXGBE_EEC_CS | IXGBE_EEC_SK); +- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); ++ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); + IXGBE_WRITE_FLUSH(hw); +- udelay(1); ++ usec_delay(1); + } + } + return status; +@@ -1369,29 +1632,32 @@ static s32 ixgbe_acquire_eeprom(struct ixgbe_hw *hw) + * + * Sets the hardware semaphores so EEPROM access can occur for bit-bang method + **/ +-static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) ++STATIC s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) + { + s32 status = IXGBE_ERR_EEPROM; + u32 timeout = 2000; + u32 i; + u32 swsm; + ++ DEBUGFUNC("ixgbe_get_eeprom_semaphore"); ++ + /* Get SMBI software semaphore between device drivers first */ + for (i = 0; i < timeout; i++) { + /* + * If the SMBI bit is 0 when we read it, then the bit will be + * set and we have the semaphore + */ +- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); ++ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); + if (!(swsm & IXGBE_SWSM_SMBI)) { +- status = 0; ++ status = IXGBE_SUCCESS; + break; + } +- udelay(50); ++ usec_delay(50); + } + + if (i == timeout) { +- hw_dbg(hw, "Driver can't access the Eeprom - SMBI Semaphore not granted.\n"); ++ DEBUGOUT("Driver can't access the Eeprom - SMBI Semaphore " ++ "not granted.\n"); + /* + * this release is particularly important because our attempts + * above to get the semaphore may have succeeded, and if there +@@ -1400,35 +1666,35 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) + */ + ixgbe_release_eeprom_semaphore(hw); + +- udelay(50); ++ usec_delay(50); + /* + * one last try + * If the SMBI bit is 0 when we read it, then the bit will be + * set and we have the semaphore + */ +- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); ++ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); + if (!(swsm & IXGBE_SWSM_SMBI)) +- status = 0; ++ status = IXGBE_SUCCESS; + } + + /* Now get the semaphore between SW/FW through the SWESMBI bit */ +- if (status == 0) { ++ if (status == IXGBE_SUCCESS) { + for (i = 0; i < timeout; i++) { +- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); ++ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); + + /* Set the SW EEPROM semaphore bit to request access */ + swsm |= IXGBE_SWSM_SWESMBI; +- IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); ++ IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm); + + /* + * If we set the bit successfully then we got the + * semaphore. + */ +- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); ++ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); + if (swsm & IXGBE_SWSM_SWESMBI) + break; + +- udelay(50); ++ usec_delay(50); + } + + /* +@@ -1436,12 +1702,15 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) + * was not granted because we don't have access to the EEPROM + */ + if (i >= timeout) { +- hw_dbg(hw, "SWESMBI Software EEPROM semaphore not granted.\n"); ++ ERROR_REPORT1(IXGBE_ERROR_POLLING, ++ "SWESMBI Software EEPROM semaphore not granted.\n"); + ixgbe_release_eeprom_semaphore(hw); + status = IXGBE_ERR_EEPROM; + } + } else { +- hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n"); ++ ERROR_REPORT1(IXGBE_ERROR_POLLING, ++ "Software semaphore SMBI between device drivers " ++ "not granted.\n"); + } + + return status; +@@ -1453,10 +1722,12 @@ static s32 ixgbe_get_eeprom_semaphore(struct ixgbe_hw *hw) + * + * This function clears hardware semaphore bits. + **/ +-static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw) ++STATIC void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw) + { + u32 swsm; + ++ DEBUGFUNC("ixgbe_release_eeprom_semaphore"); ++ + swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); + + /* Release both semaphores by writing 0 to the bits SWESMBI and SMBI */ +@@ -1469,12 +1740,14 @@ static void ixgbe_release_eeprom_semaphore(struct ixgbe_hw *hw) + * ixgbe_ready_eeprom - Polls for EEPROM ready + * @hw: pointer to hardware structure + **/ +-static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw) ++STATIC s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw) + { +- s32 status = 0; ++ s32 status = IXGBE_SUCCESS; + u16 i; + u8 spi_stat_reg; + ++ DEBUGFUNC("ixgbe_ready_eeprom"); ++ + /* + * Read "Status Register" repeatedly until the LSB is cleared. The + * EEPROM will signal that the command has been completed by clearing +@@ -1488,16 +1761,16 @@ static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw) + if (!(spi_stat_reg & IXGBE_EEPROM_STATUS_RDY_SPI)) + break; + +- udelay(5); ++ usec_delay(5); + ixgbe_standby_eeprom(hw); +- } ++ }; + + /* + * On some parts, SPI write time could vary from 0-20mSec on 3.3V + * devices (and only 0-5mSec on 5V devices) + */ + if (i >= IXGBE_EEPROM_MAX_RETRY_SPI) { +- hw_dbg(hw, "SPI EEPROM Status error\n"); ++ DEBUGOUT("SPI EEPROM Status error\n"); + status = IXGBE_ERR_EEPROM; + } + +@@ -1508,21 +1781,23 @@ static s32 ixgbe_ready_eeprom(struct ixgbe_hw *hw) + * ixgbe_standby_eeprom - Returns EEPROM to a "standby" state + * @hw: pointer to hardware structure + **/ +-static void ixgbe_standby_eeprom(struct ixgbe_hw *hw) ++STATIC void ixgbe_standby_eeprom(struct ixgbe_hw *hw) + { + u32 eec; + +- eec = IXGBE_READ_REG(hw, IXGBE_EEC); ++ DEBUGFUNC("ixgbe_standby_eeprom"); ++ ++ eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); + + /* Toggle CS to flush commands */ + eec |= IXGBE_EEC_CS; +- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); ++ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); + IXGBE_WRITE_FLUSH(hw); +- udelay(1); ++ usec_delay(1); + eec &= ~IXGBE_EEC_CS; +- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); ++ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); + IXGBE_WRITE_FLUSH(hw); +- udelay(1); ++ usec_delay(1); + } + + /** +@@ -1531,14 +1806,16 @@ static void ixgbe_standby_eeprom(struct ixgbe_hw *hw) + * @data: data to send to the EEPROM + * @count: number of bits to shift out + **/ +-static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, ++STATIC void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, + u16 count) + { + u32 eec; + u32 mask; + u32 i; + +- eec = IXGBE_READ_REG(hw, IXGBE_EEC); ++ DEBUGFUNC("ixgbe_shift_out_eeprom_bits"); ++ ++ eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); + + /* + * Mask is used to shift "count" bits of "data" out to the EEPROM +@@ -1559,10 +1836,10 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, + else + eec &= ~IXGBE_EEC_DI; + +- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); ++ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); + IXGBE_WRITE_FLUSH(hw); + +- udelay(1); ++ usec_delay(1); + + ixgbe_raise_eeprom_clk(hw, &eec); + ixgbe_lower_eeprom_clk(hw, &eec); +@@ -1572,11 +1849,11 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, + * EEPROM + */ + mask = mask >> 1; +- } ++ }; + + /* We leave the "DI" bit set to "0" when we leave this routine. */ + eec &= ~IXGBE_EEC_DI; +- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); ++ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); + IXGBE_WRITE_FLUSH(hw); + } + +@@ -1584,12 +1861,14 @@ static void ixgbe_shift_out_eeprom_bits(struct ixgbe_hw *hw, u16 data, + * ixgbe_shift_in_eeprom_bits - Shift data bits in from the EEPROM + * @hw: pointer to hardware structure + **/ +-static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) ++STATIC u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) + { + u32 eec; + u32 i; + u16 data = 0; + ++ DEBUGFUNC("ixgbe_shift_in_eeprom_bits"); ++ + /* + * In order to read a register from the EEPROM, we need to shift + * 'count' bits in from the EEPROM. Bits are "shifted in" by raising +@@ -1597,7 +1876,7 @@ static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) + * the value of the "DO" bit. During this "shifting in" process the + * "DI" bit should always be clear. + */ +- eec = IXGBE_READ_REG(hw, IXGBE_EEC); ++ eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); + + eec &= ~(IXGBE_EEC_DO | IXGBE_EEC_DI); + +@@ -1605,7 +1884,7 @@ static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) + data = data << 1; + ixgbe_raise_eeprom_clk(hw, &eec); + +- eec = IXGBE_READ_REG(hw, IXGBE_EEC); ++ eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); + + eec &= ~(IXGBE_EEC_DI); + if (eec & IXGBE_EEC_DO) +@@ -1622,16 +1901,18 @@ static u16 ixgbe_shift_in_eeprom_bits(struct ixgbe_hw *hw, u16 count) + * @hw: pointer to hardware structure + * @eec: EEC register's current value + **/ +-static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) ++STATIC void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) + { ++ DEBUGFUNC("ixgbe_raise_eeprom_clk"); ++ + /* + * Raise the clock input to the EEPROM + * (setting the SK bit), then delay + */ + *eec = *eec | IXGBE_EEC_SK; +- IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec); ++ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec); + IXGBE_WRITE_FLUSH(hw); +- udelay(1); ++ usec_delay(1); + } + + /** +@@ -1639,55 +1920,57 @@ static void ixgbe_raise_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) + * @hw: pointer to hardware structure + * @eecd: EECD's current value + **/ +-static void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) ++STATIC void ixgbe_lower_eeprom_clk(struct ixgbe_hw *hw, u32 *eec) + { ++ DEBUGFUNC("ixgbe_lower_eeprom_clk"); ++ + /* + * Lower the clock input to the EEPROM (clearing the SK bit), then + * delay + */ + *eec = *eec & ~IXGBE_EEC_SK; +- IXGBE_WRITE_REG(hw, IXGBE_EEC, *eec); ++ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), *eec); + IXGBE_WRITE_FLUSH(hw); +- udelay(1); ++ usec_delay(1); + } + + /** + * ixgbe_release_eeprom - Release EEPROM, release semaphores + * @hw: pointer to hardware structure + **/ +-static void ixgbe_release_eeprom(struct ixgbe_hw *hw) ++STATIC void ixgbe_release_eeprom(struct ixgbe_hw *hw) + { + u32 eec; + +- eec = IXGBE_READ_REG(hw, IXGBE_EEC); ++ DEBUGFUNC("ixgbe_release_eeprom"); ++ ++ eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); + + eec |= IXGBE_EEC_CS; /* Pull CS high */ + eec &= ~IXGBE_EEC_SK; /* Lower SCK */ + +- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); ++ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); + IXGBE_WRITE_FLUSH(hw); + +- udelay(1); ++ usec_delay(1); + + /* Stop requesting EEPROM access */ + eec &= ~IXGBE_EEC_REQ; +- IXGBE_WRITE_REG(hw, IXGBE_EEC, eec); ++ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), eec); + + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + +- /* +- * Delay before attempt to obtain semaphore again to allow FW +- * access. semaphore_delay is in ms we need us for usleep_range +- */ +- usleep_range(hw->eeprom.semaphore_delay * 1000, +- hw->eeprom.semaphore_delay * 2000); ++ /* Delay before attempt to obtain semaphore again to allow FW access */ ++ msec_delay(hw->eeprom.semaphore_delay); + } + + /** + * ixgbe_calc_eeprom_checksum_generic - Calculates and returns the checksum + * @hw: pointer to hardware structure ++ * ++ * Returns a negative error code on error, or the 16-bit checksum + **/ +-u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) ++s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) + { + u16 i; + u16 j; +@@ -1696,35 +1979,48 @@ u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw) + u16 pointer = 0; + u16 word = 0; + ++ DEBUGFUNC("ixgbe_calc_eeprom_checksum_generic"); ++ + /* Include 0x0-0x3F in the checksum */ + for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { +- if (hw->eeprom.ops.read(hw, i, &word) != 0) { +- hw_dbg(hw, "EEPROM read failed\n"); +- break; ++ if (hw->eeprom.ops.read(hw, i, &word)) { ++ DEBUGOUT("EEPROM read failed\n"); ++ return IXGBE_ERR_EEPROM; + } + checksum += word; + } + + /* Include all data from pointers except for the fw pointer */ + for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { +- hw->eeprom.ops.read(hw, i, &pointer); ++ if (hw->eeprom.ops.read(hw, i, &pointer)) { ++ DEBUGOUT("EEPROM read failed\n"); ++ return IXGBE_ERR_EEPROM; ++ } ++ ++ /* If the pointer seems invalid */ ++ if (pointer == 0xFFFF || pointer == 0) ++ continue; ++ ++ if (hw->eeprom.ops.read(hw, pointer, &length)) { ++ DEBUGOUT("EEPROM read failed\n"); ++ return IXGBE_ERR_EEPROM; ++ } + +- /* Make sure the pointer seems valid */ +- if (pointer != 0xFFFF && pointer != 0) { +- hw->eeprom.ops.read(hw, pointer, &length); ++ if (length == 0xFFFF || length == 0) ++ continue; + +- if (length != 0xFFFF && length != 0) { +- for (j = pointer+1; j <= pointer+length; j++) { +- hw->eeprom.ops.read(hw, j, &word); +- checksum += word; +- } ++ for (j = pointer + 1; j <= pointer + length; j++) { ++ if (hw->eeprom.ops.read(hw, j, &word)) { ++ DEBUGOUT("EEPROM read failed\n"); ++ return IXGBE_ERR_EEPROM; + } ++ checksum += word; + } + } + + checksum = (u16)IXGBE_EEPROM_SUM - checksum; + +- return checksum; ++ return (s32)checksum; + } + + /** +@@ -1742,32 +2038,40 @@ s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, + u16 checksum; + u16 read_checksum = 0; + +- /* +- * Read the first word from the EEPROM. If this times out or fails, do ++ DEBUGFUNC("ixgbe_validate_eeprom_checksum_generic"); ++ ++ /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = hw->eeprom.ops.read(hw, 0, &checksum); ++ if (status) { ++ DEBUGOUT("EEPROM read failed\n"); ++ return status; ++ } + +- if (status == 0) { +- checksum = hw->eeprom.ops.calc_checksum(hw); +- +- hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); ++ status = hw->eeprom.ops.calc_checksum(hw); ++ if (status < 0) ++ return status; + +- /* +- * Verify read checksum from EEPROM is the same as +- * calculated checksum +- */ +- if (read_checksum != checksum) +- status = IXGBE_ERR_EEPROM_CHECKSUM; ++ checksum = (u16)(status & 0xffff); + +- /* If the user cares, return the calculated checksum */ +- if (checksum_val) +- *checksum_val = checksum; +- } else { +- hw_dbg(hw, "EEPROM read failed\n"); ++ status = hw->eeprom.ops.read(hw, IXGBE_EEPROM_CHECKSUM, &read_checksum); ++ if (status) { ++ DEBUGOUT("EEPROM read failed\n"); ++ return status; + } + ++ /* Verify read checksum from EEPROM is the same as ++ * calculated checksum ++ */ ++ if (read_checksum != checksum) ++ status = IXGBE_ERR_EEPROM_CHECKSUM; ++ ++ /* If the user cares, return the calculated checksum */ ++ if (checksum_val) ++ *checksum_val = checksum; ++ + return status; + } + +@@ -1780,21 +2084,52 @@ s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw) + s32 status; + u16 checksum; + +- /* +- * Read the first word from the EEPROM. If this times out or fails, do ++ DEBUGFUNC("ixgbe_update_eeprom_checksum_generic"); ++ ++ /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = hw->eeprom.ops.read(hw, 0, &checksum); +- +- if (status == 0) { +- checksum = hw->eeprom.ops.calc_checksum(hw); +- status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, +- checksum); +- } else { +- hw_dbg(hw, "EEPROM read failed\n"); ++ if (status) { ++ DEBUGOUT("EEPROM read failed\n"); ++ return status; + } + ++ status = hw->eeprom.ops.calc_checksum(hw); ++ if (status < 0) ++ return status; ++ ++ checksum = (u16)(status & 0xffff); ++ ++ status = hw->eeprom.ops.write(hw, IXGBE_EEPROM_CHECKSUM, checksum); ++ ++ return status; ++} ++ ++/** ++ * ixgbe_validate_mac_addr - Validate MAC address ++ * @mac_addr: pointer to MAC address. ++ * ++ * Tests a MAC address to ensure it is a valid Individual Address. ++ **/ ++s32 ixgbe_validate_mac_addr(u8 *mac_addr) ++{ ++ s32 status = IXGBE_SUCCESS; ++ ++ DEBUGFUNC("ixgbe_validate_mac_addr"); ++ ++ /* Make sure it is not a multicast address */ ++ if (IXGBE_IS_MULTICAST(mac_addr)) { ++ status = IXGBE_ERR_INVALID_MAC_ADDR; ++ /* Not a broadcast address */ ++ } else if (IXGBE_IS_BROADCAST(mac_addr)) { ++ status = IXGBE_ERR_INVALID_MAC_ADDR; ++ /* Reject the zero address */ ++ } else if (mac_addr[0] == 0 && mac_addr[1] == 0 && mac_addr[2] == 0 && ++ mac_addr[3] == 0 && mac_addr[4] == 0 && mac_addr[5] == 0) { ++ status = IXGBE_ERR_INVALID_MAC_ADDR; ++ } + return status; + } + +@@ -1814,9 +2149,12 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, + u32 rar_low, rar_high; + u32 rar_entries = hw->mac.num_rar_entries; + ++ DEBUGFUNC("ixgbe_set_rar_generic"); ++ + /* Make sure we are using a valid rar index range */ + if (index >= rar_entries) { +- hw_dbg(hw, "RAR index %d is out of range.\n", index); ++ ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, ++ "RAR index %d is out of range.\n", index); + return IXGBE_ERR_INVALID_ARGUMENT; + } + +@@ -1846,7 +2184,7 @@ s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, + IXGBE_WRITE_REG(hw, IXGBE_RAL(index), rar_low); + IXGBE_WRITE_REG(hw, IXGBE_RAH(index), rar_high); + +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +@@ -1861,9 +2199,12 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) + u32 rar_high; + u32 rar_entries = hw->mac.num_rar_entries; + ++ DEBUGFUNC("ixgbe_clear_rar_generic"); ++ + /* Make sure we are using a valid rar index range */ + if (index >= rar_entries) { +- hw_dbg(hw, "RAR index %d is out of range.\n", index); ++ ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, ++ "RAR index %d is out of range.\n", index); + return IXGBE_ERR_INVALID_ARGUMENT; + } + +@@ -1881,7 +2222,7 @@ s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index) + /* clear VMDq pool/queue selection for this RAR */ + hw->mac.ops.clear_vmdq(hw, index, IXGBE_CLEAR_VMDQ_ALL); + +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +@@ -1897,32 +2238,44 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) + u32 i; + u32 rar_entries = hw->mac.num_rar_entries; + ++ DEBUGFUNC("ixgbe_init_rx_addrs_generic"); ++ + /* + * If the current mac address is valid, assume it is a software override + * to the permanent address. + * Otherwise, use the permanent address from the eeprom. + */ +- if (!is_valid_ether_addr(hw->mac.addr)) { ++ if (ixgbe_validate_mac_addr(hw->mac.addr) == ++ IXGBE_ERR_INVALID_MAC_ADDR) { + /* Get the MAC address from the RAR0 for later reference */ + hw->mac.ops.get_mac_addr(hw, hw->mac.addr); + +- hw_dbg(hw, " Keeping Current RAR0 Addr =%pM\n", hw->mac.addr); ++ DEBUGOUT3(" Keeping Current RAR0 Addr =%.2X %.2X %.2X ", ++ hw->mac.addr[0], hw->mac.addr[1], ++ hw->mac.addr[2]); ++ DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3], ++ hw->mac.addr[4], hw->mac.addr[5]); + } else { + /* Setup the receive address. */ +- hw_dbg(hw, "Overriding MAC Address in RAR[0]\n"); +- hw_dbg(hw, " New MAC Addr =%pM\n", hw->mac.addr); ++ DEBUGOUT("Overriding MAC Address in RAR[0]\n"); ++ DEBUGOUT3(" New MAC Addr =%.2X %.2X %.2X ", ++ hw->mac.addr[0], hw->mac.addr[1], ++ hw->mac.addr[2]); ++ DEBUGOUT3("%.2X %.2X %.2X\n", hw->mac.addr[3], ++ hw->mac.addr[4], hw->mac.addr[5]); + + hw->mac.ops.set_rar(hw, 0, hw->mac.addr, 0, IXGBE_RAH_AV); +- +- /* clear VMDq pool/queue selection for RAR 0 */ +- hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL); + } ++ ++ /* clear VMDq pool/queue selection for RAR 0 */ ++ hw->mac.ops.clear_vmdq(hw, 0, IXGBE_CLEAR_VMDQ_ALL); ++ + hw->addr_ctrl.overflow_promisc = 0; + + hw->addr_ctrl.rar_used_count = 1; + + /* Zero out the other receive addresses. */ +- hw_dbg(hw, "Clearing RAR[1-%d]\n", rar_entries - 1); ++ DEBUGOUT1("Clearing RAR[1-%d]\n", rar_entries - 1); + for (i = 1; i < rar_entries; i++) { + IXGBE_WRITE_REG(hw, IXGBE_RAL(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_RAH(i), 0); +@@ -1932,14 +2285,116 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) + hw->addr_ctrl.mta_in_use = 0; + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); + +- hw_dbg(hw, " Clearing MTA\n"); ++ DEBUGOUT(" Clearing MTA\n"); + for (i = 0; i < hw->mac.mcft_size; i++) + IXGBE_WRITE_REG(hw, IXGBE_MTA(i), 0); + +- if (hw->mac.ops.init_uta_tables) +- hw->mac.ops.init_uta_tables(hw); ++ ixgbe_init_uta_tables(hw); ++ ++ return IXGBE_SUCCESS; ++} ++ ++/** ++ * ixgbe_add_uc_addr - Adds a secondary unicast address. ++ * @hw: pointer to hardware structure ++ * @addr: new address ++ * ++ * Adds it to unused receive address register or goes into promiscuous mode. ++ **/ ++void ixgbe_add_uc_addr(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) ++{ ++ u32 rar_entries = hw->mac.num_rar_entries; ++ u32 rar; ++ ++ DEBUGFUNC("ixgbe_add_uc_addr"); ++ ++ DEBUGOUT6(" UC Addr = %.2X %.2X %.2X %.2X %.2X %.2X\n", ++ addr[0], addr[1], addr[2], addr[3], addr[4], addr[5]); ++ ++ /* ++ * Place this address in the RAR if there is room, ++ * else put the controller into promiscuous mode ++ */ ++ if (hw->addr_ctrl.rar_used_count < rar_entries) { ++ rar = hw->addr_ctrl.rar_used_count; ++ hw->mac.ops.set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); ++ DEBUGOUT1("Added a secondary address to RAR[%d]\n", rar); ++ hw->addr_ctrl.rar_used_count++; ++ } else { ++ hw->addr_ctrl.overflow_promisc++; ++ } ++ ++ DEBUGOUT("ixgbe_add_uc_addr Complete\n"); ++} ++ ++/** ++ * ixgbe_update_uc_addr_list_generic - Updates MAC list of secondary addresses ++ * @hw: pointer to hardware structure ++ * @addr_list: the list of new addresses ++ * @addr_count: number of addresses ++ * @next: iterator function to walk the address list ++ * ++ * The given list replaces any existing list. Clears the secondary addrs from ++ * receive address registers. Uses unused receive address registers for the ++ * first secondary addresses, and falls back to promiscuous mode as needed. ++ * ++ * Drivers using secondary unicast addresses must set user_set_promisc when ++ * manually putting the device into promiscuous mode. ++ **/ ++s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list, ++ u32 addr_count, ixgbe_mc_addr_itr next) ++{ ++ u8 *addr; ++ u32 i; ++ u32 old_promisc_setting = hw->addr_ctrl.overflow_promisc; ++ u32 uc_addr_in_use; ++ u32 fctrl; ++ u32 vmdq; ++ ++ DEBUGFUNC("ixgbe_update_uc_addr_list_generic"); ++ ++ /* ++ * Clear accounting of old secondary address list, ++ * don't count RAR[0] ++ */ ++ uc_addr_in_use = hw->addr_ctrl.rar_used_count - 1; ++ hw->addr_ctrl.rar_used_count -= uc_addr_in_use; ++ hw->addr_ctrl.overflow_promisc = 0; ++ ++ /* Zero out the other receive addresses */ ++ DEBUGOUT1("Clearing RAR[1-%d]\n", uc_addr_in_use+1); ++ for (i = 0; i < uc_addr_in_use; i++) { ++ IXGBE_WRITE_REG(hw, IXGBE_RAL(1+i), 0); ++ IXGBE_WRITE_REG(hw, IXGBE_RAH(1+i), 0); ++ } ++ ++ /* Add the new addresses */ ++ for (i = 0; i < addr_count; i++) { ++ DEBUGOUT(" Adding the secondary addresses:\n"); ++ addr = next(hw, &addr_list, &vmdq); ++ ixgbe_add_uc_addr(hw, addr, vmdq); ++ } ++ ++ if (hw->addr_ctrl.overflow_promisc) { ++ /* enable promisc if not already in overflow or set by user */ ++ if (!old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { ++ DEBUGOUT(" Entering address overflow promisc mode\n"); ++ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); ++ fctrl |= IXGBE_FCTRL_UPE; ++ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); ++ } ++ } else { ++ /* only disable if set by overflow, not by user */ ++ if (old_promisc_setting && !hw->addr_ctrl.user_set_promisc) { ++ DEBUGOUT(" Leaving address overflow promisc mode\n"); ++ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); ++ fctrl &= ~IXGBE_FCTRL_UPE; ++ IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); ++ } ++ } + +- return 0; ++ DEBUGOUT("ixgbe_update_uc_addr_list_generic Complete\n"); ++ return IXGBE_SUCCESS; + } + + /** +@@ -1954,10 +2409,12 @@ s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw) + * by the MO field of the MCSTCTRL. The MO field is set during initialization + * to mc_filter_type. + **/ +-static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) ++STATIC s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) + { + u32 vector = 0; + ++ DEBUGFUNC("ixgbe_mta_vector"); ++ + switch (hw->mac.mc_filter_type) { + case 0: /* use bits [47:36] of the address */ + vector = ((mc_addr[4] >> 4) | (((u16)mc_addr[5]) << 4)); +@@ -1972,7 +2429,8 @@ static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) + vector = ((mc_addr[4]) | (((u16)mc_addr[5]) << 8)); + break; + default: /* Invalid mc_filter_type */ +- hw_dbg(hw, "MC filter type param set incorrectly\n"); ++ DEBUGOUT("MC filter type param set incorrectly\n"); ++ ASSERT(0); + break; + } + +@@ -1988,16 +2446,18 @@ static s32 ixgbe_mta_vector(struct ixgbe_hw *hw, u8 *mc_addr) + * + * Sets the bit-vector in the multicast table. + **/ +-static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) ++void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) + { + u32 vector; + u32 vector_bit; + u32 vector_reg; + ++ DEBUGFUNC("ixgbe_set_mta"); ++ + hw->addr_ctrl.mta_in_use++; + + vector = ixgbe_mta_vector(hw, mc_addr); +- hw_dbg(hw, " bit-vector = 0x%03X\n", vector); ++ DEBUGOUT1(" bit-vector = 0x%03X\n", vector); + + /* + * The MTA is a register array of 128 32-bit registers. It is treated +@@ -2016,34 +2476,40 @@ static void ixgbe_set_mta(struct ixgbe_hw *hw, u8 *mc_addr) + /** + * ixgbe_update_mc_addr_list_generic - Updates MAC list of multicast addresses + * @hw: pointer to hardware structure +- * @netdev: pointer to net device structure ++ * @mc_addr_list: the list of new multicast addresses ++ * @mc_addr_count: number of addresses ++ * @next: iterator function to walk the multicast address list ++ * @clear: flag, when set clears the table beforehand + * +- * The given list replaces any existing list. Clears the MC addrs from receive +- * address registers and the multicast table. Uses unused receive address +- * registers for the first multicast addresses, and hashes the rest into the +- * multicast table. ++ * When the clear flag is set, the given list replaces any existing list. ++ * Hashes the given addresses into the multicast table. + **/ +-s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, +- struct net_device *netdev) ++s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, ++ u32 mc_addr_count, ixgbe_mc_addr_itr next, ++ bool clear) + { +- struct netdev_hw_addr *ha; + u32 i; ++ u32 vmdq; ++ ++ DEBUGFUNC("ixgbe_update_mc_addr_list_generic"); + + /* + * Set the new number of MC addresses that we are being requested to + * use. + */ +- hw->addr_ctrl.num_mc_addrs = netdev_mc_count(netdev); ++ hw->addr_ctrl.num_mc_addrs = mc_addr_count; + hw->addr_ctrl.mta_in_use = 0; + + /* Clear mta_shadow */ +- hw_dbg(hw, " Clearing MTA\n"); +- memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); ++ if (clear) { ++ DEBUGOUT(" Clearing MTA\n"); ++ memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow)); ++ } + +- /* Update mta shadow */ +- netdev_for_each_mc_addr(ha, netdev) { +- hw_dbg(hw, " Adding the multicast addresses:\n"); +- ixgbe_set_mta(hw, ha->addr); ++ /* Update mta_shadow */ ++ for (i = 0; i < mc_addr_count; i++) { ++ DEBUGOUT(" Adding the multicast addresses:\n"); ++ ixgbe_set_mta(hw, next(hw, &mc_addr_list, &vmdq)); + } + + /* Enable mta */ +@@ -2055,8 +2521,8 @@ s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, + IXGBE_MCSTCTRL_MFE | hw->mac.mc_filter_type); + +- hw_dbg(hw, "ixgbe_update_mc_addr_list_generic Complete\n"); +- return 0; ++ DEBUGOUT("ixgbe_update_mc_addr_list_generic Complete\n"); ++ return IXGBE_SUCCESS; + } + + /** +@@ -2069,11 +2535,13 @@ s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw) + { + struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; + ++ DEBUGFUNC("ixgbe_enable_mc_generic"); ++ + if (a->mta_in_use > 0) + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, IXGBE_MCSTCTRL_MFE | + hw->mac.mc_filter_type); + +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +@@ -2086,10 +2554,12 @@ s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw) + { + struct ixgbe_addr_filter_info *a = &hw->addr_ctrl; + ++ DEBUGFUNC("ixgbe_disable_mc_generic"); ++ + if (a->mta_in_use > 0) + IXGBE_WRITE_REG(hw, IXGBE_MCSTCTRL, hw->mac.mc_filter_type); + +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +@@ -2100,25 +2570,27 @@ s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw) + **/ + s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) + { +- s32 ret_val = 0; ++ s32 ret_val = IXGBE_SUCCESS; + u32 mflcn_reg, fccfg_reg; + u32 reg; + u32 fcrtl, fcrth; + int i; + +- /* Validate the water mark configuration. */ ++ DEBUGFUNC("ixgbe_fc_enable_generic"); ++ ++ /* Validate the water mark configuration */ + if (!hw->fc.pause_time) { + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } + + /* Low water mark of zero causes XOFF floods */ +- for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { ++ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && + hw->fc.high_water[i]) { + if (!hw->fc.low_water[i] || + hw->fc.low_water[i] >= hw->fc.high_water[i]) { +- hw_dbg(hw, "Invalid water mark configuration\n"); ++ DEBUGOUT("Invalid water mark configuration\n"); + ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; + goto out; + } +@@ -2126,7 +2598,7 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) + } + + /* Negotiate the fc mode to use */ +- ixgbe_fc_autoneg(hw); ++ hw->mac.ops.fc_autoneg(hw); + + /* Disable any previous flow control settings */ + mflcn_reg = IXGBE_READ_REG(hw, IXGBE_MFLCN); +@@ -2176,7 +2648,8 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) + fccfg_reg |= IXGBE_FCCFG_TFCE_802_3X; + break; + default: +- hw_dbg(hw, "Flow control param set incorrectly\n"); ++ ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, ++ "Flow control param set incorrectly\n"); + ret_val = IXGBE_ERR_CONFIG; + goto out; + break; +@@ -2187,8 +2660,9 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) + IXGBE_WRITE_REG(hw, IXGBE_MFLCN, mflcn_reg); + IXGBE_WRITE_REG(hw, IXGBE_FCCFG, fccfg_reg); + ++ + /* Set up and enable Rx high/low water mark thresholds, enable XON. */ +- for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { ++ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + if ((hw->fc.current_mode & ixgbe_fc_tx_pause) && + hw->fc.high_water[i]) { + fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; +@@ -2199,10 +2673,11 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) + /* + * In order to prevent Tx hangs when the internal Tx + * switch is enabled we must set the high water mark +- * to the maximum FCRTH value. This allows the Tx +- * switch to function even under heavy Rx workloads. ++ * to the Rx packet buffer size - 24KB. This allows ++ * the Tx switch to function even under heavy Rx ++ * workloads. + */ +- fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32; ++ fcrth = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576; + } + + IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), fcrth); +@@ -2210,9 +2685,10 @@ s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw) + + /* Configure pause time (2 TCs per register) */ + reg = hw->fc.pause_time * 0x00010001; +- for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) ++ for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); + ++ /* Configure flow control refresh threshold value */ + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); + + out: +@@ -2232,11 +2708,16 @@ out: + * Find the intersection between advertised settings and link partner's + * advertised settings + **/ +-static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, +- u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) ++s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, ++ u32 adv_sym, u32 adv_asm, u32 lp_sym, u32 lp_asm) + { +- if ((!(adv_reg)) || (!(lp_reg))) ++ if ((!(adv_reg)) || (!(lp_reg))) { ++ ERROR_REPORT3(IXGBE_ERROR_UNSUPPORTED, ++ "Local or link partner's advertised flow control " ++ "settings are NULL. Local: %x, link partner: %x\n", ++ adv_reg, lp_reg); + return IXGBE_ERR_FC_NOT_NEGOTIATED; ++ } + + if ((adv_reg & adv_sym) && (lp_reg & lp_sym)) { + /* +@@ -2248,24 +2729,24 @@ static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, + */ + if (hw->fc.requested_mode == ixgbe_fc_full) { + hw->fc.current_mode = ixgbe_fc_full; +- hw_dbg(hw, "Flow Control = FULL.\n"); ++ DEBUGOUT("Flow Control = FULL.\n"); + } else { + hw->fc.current_mode = ixgbe_fc_rx_pause; +- hw_dbg(hw, "Flow Control=RX PAUSE frames only\n"); ++ DEBUGOUT("Flow Control=RX PAUSE frames only\n"); + } + } else if (!(adv_reg & adv_sym) && (adv_reg & adv_asm) && + (lp_reg & lp_sym) && (lp_reg & lp_asm)) { + hw->fc.current_mode = ixgbe_fc_tx_pause; +- hw_dbg(hw, "Flow Control = TX PAUSE frames only.\n"); ++ DEBUGOUT("Flow Control = TX PAUSE frames only.\n"); + } else if ((adv_reg & adv_sym) && (adv_reg & adv_asm) && + !(lp_reg & lp_sym) && (lp_reg & lp_asm)) { + hw->fc.current_mode = ixgbe_fc_rx_pause; +- hw_dbg(hw, "Flow Control = RX PAUSE frames only.\n"); ++ DEBUGOUT("Flow Control = RX PAUSE frames only.\n"); + } else { + hw->fc.current_mode = ixgbe_fc_none; +- hw_dbg(hw, "Flow Control = NONE.\n"); ++ DEBUGOUT("Flow Control = NONE.\n"); + } +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +@@ -2274,7 +2755,7 @@ static s32 ixgbe_negotiate_fc(struct ixgbe_hw *hw, u32 adv_reg, u32 lp_reg, + * + * Enable flow control according on 1 gig fiber. + **/ +-static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) ++STATIC s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) + { + u32 pcs_anadv_reg, pcs_lpab_reg, linkstat; + s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; +@@ -2287,17 +2768,19 @@ static s32 ixgbe_fc_autoneg_fiber(struct ixgbe_hw *hw) + + linkstat = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); + if ((!!(linkstat & IXGBE_PCS1GLSTA_AN_COMPLETE) == 0) || +- (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) ++ (!!(linkstat & IXGBE_PCS1GLSTA_AN_TIMED_OUT) == 1)) { ++ DEBUGOUT("Auto-Negotiation did not complete or timed out\n"); + goto out; ++ } + + pcs_anadv_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); + pcs_lpab_reg = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); + + ret_val = ixgbe_negotiate_fc(hw, pcs_anadv_reg, +- pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE, +- IXGBE_PCS1GANA_ASM_PAUSE, +- IXGBE_PCS1GANA_SYM_PAUSE, +- IXGBE_PCS1GANA_ASM_PAUSE); ++ pcs_lpab_reg, IXGBE_PCS1GANA_SYM_PAUSE, ++ IXGBE_PCS1GANA_ASM_PAUSE, ++ IXGBE_PCS1GANA_SYM_PAUSE, ++ IXGBE_PCS1GANA_ASM_PAUSE); + + out: + return ret_val; +@@ -2309,7 +2792,7 @@ out: + * + * Enable flow control according to IEEE clause 37. + **/ +-static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) ++STATIC s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) + { + u32 links2, anlp1_reg, autoc_reg, links; + s32 ret_val = IXGBE_ERR_FC_NOT_NEGOTIATED; +@@ -2320,13 +2803,17 @@ static s32 ixgbe_fc_autoneg_backplane(struct ixgbe_hw *hw) + * - we are 82599 and link partner is not AN enabled + */ + links = IXGBE_READ_REG(hw, IXGBE_LINKS); +- if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) ++ if ((links & IXGBE_LINKS_KX_AN_COMP) == 0) { ++ DEBUGOUT("Auto-Negotiation did not complete\n"); + goto out; ++ } + + if (hw->mac.type == ixgbe_mac_82599EB) { + links2 = IXGBE_READ_REG(hw, IXGBE_LINKS2); +- if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) ++ if ((links2 & IXGBE_LINKS2_AN_SUPPORTED) == 0) { ++ DEBUGOUT("Link partner is not AN enabled\n"); + goto out; ++ } + } + /* + * Read the 10g AN autoc and LP ability registers and resolve +@@ -2349,16 +2836,16 @@ out: + * + * Enable flow control according to IEEE clause 37. + **/ +-static s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw) ++STATIC s32 ixgbe_fc_autoneg_copper(struct ixgbe_hw *hw) + { + u16 technology_ability_reg = 0; + u16 lp_technology_ability_reg = 0; + +- hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, +- MDIO_MMD_AN, ++ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_ADVT, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &technology_ability_reg); +- hw->phy.ops.read_reg(hw, MDIO_AN_LPA, +- MDIO_MMD_AN, ++ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_LP, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &lp_technology_ability_reg); + + return ixgbe_negotiate_fc(hw, (u32)technology_ability_reg, +@@ -2380,24 +2867,29 @@ void ixgbe_fc_autoneg(struct ixgbe_hw *hw) + ixgbe_link_speed speed; + bool link_up; + ++ DEBUGFUNC("ixgbe_fc_autoneg"); ++ + /* + * AN should have completed when the cable was plugged in. + * Look for reasons to bail out. Bail out if: + * - FC autoneg is disabled, or if + * - link is not up. +- * +- * Since we're being called from an LSC, link is already known to be up. +- * So use link_up_wait_to_complete=false. + */ +- if (hw->fc.disable_fc_autoneg) ++ if (hw->fc.disable_fc_autoneg) { ++ ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, ++ "Flow control autoneg is disabled"); + goto out; ++ } + + hw->mac.ops.check_link(hw, &speed, &link_up, false); +- if (!link_up) ++ if (!link_up) { ++ ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down"); + goto out; ++ } + + switch (hw->phy.media_type) { + /* Autoneg flow control on fiber adapters */ ++ case ixgbe_media_type_fiber_qsfp: + case ixgbe_media_type_fiber: + if (speed == IXGBE_LINK_SPEED_1GB_FULL) + ret_val = ixgbe_fc_autoneg_fiber(hw); +@@ -2419,7 +2911,7 @@ void ixgbe_fc_autoneg(struct ixgbe_hw *hw) + } + + out: +- if (ret_val == 0) { ++ if (ret_val == IXGBE_SUCCESS) { + hw->fc.fc_was_autonegged = true; + } else { + hw->fc.fc_was_autonegged = false; +@@ -2427,46 +2919,46 @@ out: + } + } + +-/** ++/* + * ixgbe_pcie_timeout_poll - Return number of times to poll for completion + * @hw: pointer to hardware structure + * + * System-wide timeout range is encoded in PCIe Device Control2 register. + * +- * Add 10% to specified maximum and return the number of times to poll for +- * completion timeout, in units of 100 microsec. Never return less than +- * 800 = 80 millisec. +- **/ +-static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw) ++ * Add 10% to specified maximum and return the number of times to poll for ++ * completion timeout, in units of 100 microsec. Never return less than ++ * 800 = 80 millisec. ++ */ ++STATIC u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw) + { + s16 devctl2; + u32 pollcnt; + +- devctl2 = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_CONTROL2); ++ devctl2 = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_CONTROL2); + devctl2 &= IXGBE_PCIDEVCTRL2_TIMEO_MASK; + + switch (devctl2) { + case IXGBE_PCIDEVCTRL2_65_130ms: +- pollcnt = 1300; /* 130 millisec */ ++ pollcnt = 1300; /* 130 millisec */ + break; + case IXGBE_PCIDEVCTRL2_260_520ms: +- pollcnt = 5200; /* 520 millisec */ ++ pollcnt = 5200; /* 520 millisec */ + break; + case IXGBE_PCIDEVCTRL2_1_2s: +- pollcnt = 20000; /* 2 sec */ ++ pollcnt = 20000; /* 2 sec */ + break; + case IXGBE_PCIDEVCTRL2_4_8s: +- pollcnt = 80000; /* 8 sec */ ++ pollcnt = 80000; /* 8 sec */ + break; + case IXGBE_PCIDEVCTRL2_17_34s: +- pollcnt = 34000; /* 34 sec */ ++ pollcnt = 34000; /* 34 sec */ + break; +- case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */ +- case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */ +- case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */ +- case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */ ++ case IXGBE_PCIDEVCTRL2_50_100us: /* 100 microsecs */ ++ case IXGBE_PCIDEVCTRL2_1_2ms: /* 2 millisecs */ ++ case IXGBE_PCIDEVCTRL2_16_32ms: /* 32 millisec */ ++ case IXGBE_PCIDEVCTRL2_16_32ms_def: /* 32 millisec default */ + default: +- pollcnt = 800; /* 80 millisec minimum */ ++ pollcnt = 800; /* 80 millisec minimum */ + break; + } + +@@ -2480,26 +2972,28 @@ static u32 ixgbe_pcie_timeout_poll(struct ixgbe_hw *hw) + * + * Disables PCI-Express master access and verifies there are no pending + * requests. IXGBE_ERR_MASTER_REQUESTS_PENDING is returned if master disable +- * bit hasn't caused the master requests to be disabled, else 0 ++ * bit hasn't caused the master requests to be disabled, else IXGBE_SUCCESS + * is returned signifying master requests disabled. + **/ +-static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) ++s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) + { +- s32 status = 0; ++ s32 status = IXGBE_SUCCESS; + u32 i, poll; + u16 value; + ++ DEBUGFUNC("ixgbe_disable_pcie_master"); ++ + /* Always set this bit to ensure any future transactions are blocked */ + IXGBE_WRITE_REG(hw, IXGBE_CTRL, IXGBE_CTRL_GIO_DIS); + + /* Exit if master requests are blocked */ + if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO) || +- ixgbe_removed(hw->hw_addr)) ++ IXGBE_REMOVED(hw->hw_addr)) + goto out; + + /* Poll for master request bit to clear */ + for (i = 0; i < IXGBE_PCI_MASTER_DISABLE_TIMEOUT; i++) { +- udelay(100); ++ usec_delay(100); + if (!(IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_GIO)) + goto out; + } +@@ -2512,24 +3006,28 @@ static s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw) + * remaining completions from the PCIe bus to trickle in, and then reset + * again to clear out any effects they may have had on our device. + */ +- hw_dbg(hw, "GIO Master Disable bit didn't clear - requesting resets\n"); ++ DEBUGOUT("GIO Master Disable bit didn't clear - requesting resets\n"); + hw->mac.flags |= IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; + ++ if (hw->mac.type >= ixgbe_mac_X550) ++ goto out; ++ + /* + * Before proceeding, make sure that the PCIe block does not have + * transactions pending. + */ + poll = ixgbe_pcie_timeout_poll(hw); + for (i = 0; i < poll; i++) { +- udelay(100); +- value = ixgbe_read_pci_cfg_word(hw, IXGBE_PCI_DEVICE_STATUS); +- if (ixgbe_removed(hw->hw_addr)) ++ usec_delay(100); ++ value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS); ++ if (IXGBE_REMOVED(hw->hw_addr)) + goto out; + if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) + goto out; + } + +- hw_dbg(hw, "PCIe transaction pending bit also did not clear.\n"); ++ ERROR_REPORT1(IXGBE_ERROR_POLLING, ++ "PCIe transaction pending bit also did not clear.\n"); + status = IXGBE_ERR_MASTER_REQUESTS_PENDING; + + out: +@@ -2544,7 +3042,7 @@ out: + * Acquires the SWFW semaphore through the GSSR register for the specified + * function (CSR, PHY0, PHY1, EEPROM, Flash) + **/ +-s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) ++s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask) + { + u32 gssr = 0; + u32 swmask = mask; +@@ -2552,6 +3050,8 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) + u32 timeout = 200; + u32 i; + ++ DEBUGFUNC("ixgbe_acquire_swfw_sync"); ++ + for (i = 0; i < timeout; i++) { + /* + * SW NVM semaphore bit is used for access to all +@@ -2565,11 +3065,11 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) + gssr |= swmask; + IXGBE_WRITE_REG(hw, IXGBE_GSSR, gssr); + ixgbe_release_eeprom_semaphore(hw); +- return 0; ++ return IXGBE_SUCCESS; + } else { + /* Resource is currently in use by FW or SW */ + ixgbe_release_eeprom_semaphore(hw); +- usleep_range(5000, 10000); ++ msec_delay(5); + } + } + +@@ -2577,7 +3077,7 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) + if (gssr & (fwmask | swmask)) + ixgbe_release_swfw_sync(hw, gssr & (fwmask | swmask)); + +- usleep_range(5000, 10000); ++ msec_delay(5); + return IXGBE_ERR_SWFW_SYNC; + } + +@@ -2589,11 +3089,13 @@ s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask) + * Releases the SWFW semaphore through the GSSR register for the specified + * function (CSR, PHY0, PHY1, EEPROM, Flash) + **/ +-void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask) ++void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask) + { + u32 gssr; + u32 swmask = mask; + ++ DEBUGFUNC("ixgbe_release_swfw_sync"); ++ + ixgbe_get_eeprom_semaphore(hw); + + gssr = IXGBE_READ_REG(hw, IXGBE_GSSR); +@@ -2604,47 +3106,21 @@ void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask) + } + + /** +- * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read +- * @hw: pointer to hardware structure +- * @reg_val: Value we read from AUTOC +- * @locked: bool to indicate whether the SW/FW lock should be taken. Never +- * true in this the generic case. ++ * ixgbe_disable_sec_rx_path_generic - Stops the receive data path ++ * @hw: pointer to hardware structure + * +- * The default case requires no protection so just to the register read. ++ * Stops the receive data path and waits for the HW to internally empty ++ * the Rx security block + **/ +-s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val) ++s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw) + { +- *locked = false; +- *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC); +- return 0; +-} ++#define IXGBE_MAX_SECRX_POLL 40 + +-/** +- * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write +- * @hw: pointer to hardware structure +- * @reg_val: value to write to AUTOC +- * @locked: bool to indicate whether the SW/FW lock was already taken by +- * previous read. +- **/ +-s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked) +-{ +- IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val); +- return 0; +-} +- +-/** +- * ixgbe_disable_rx_buff_generic - Stops the receive data path +- * @hw: pointer to hardware structure +- * +- * Stops the receive data path and waits for the HW to internally +- * empty the Rx security block. +- **/ +-s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw) +-{ +-#define IXGBE_MAX_SECRX_POLL 40 + int i; + int secrxreg; + ++ DEBUGFUNC("ixgbe_disable_sec_rx_path_generic"); ++ + secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); + secrxreg |= IXGBE_SECRXCTRL_RX_DIS; + IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); +@@ -2654,33 +3130,66 @@ s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw) + break; + else + /* Use interrupt-safe sleep just in case */ +- udelay(1000); ++ usec_delay(1000); + } + + /* For informational purposes only */ + if (i >= IXGBE_MAX_SECRX_POLL) +- hw_dbg(hw, "Rx unit being enabled before security path fully disabled. Continuing with init.\n"); ++ DEBUGOUT("Rx unit being enabled before security " ++ "path fully disabled. Continuing with init.\n"); ++ ++ return IXGBE_SUCCESS; ++} ++ ++/** ++ * prot_autoc_read_generic - Hides MAC differences needed for AUTOC read ++ * @hw: pointer to hardware structure ++ * @reg_val: Value we read from AUTOC ++ * ++ * The default case requires no protection so just to the register read. ++ */ ++s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *locked, u32 *reg_val) ++{ ++ *locked = false; ++ *reg_val = IXGBE_READ_REG(hw, IXGBE_AUTOC); ++ return IXGBE_SUCCESS; ++} + +- return 0; ++/** ++ * prot_autoc_write_generic - Hides MAC differences needed for AUTOC write ++ * @hw: pointer to hardware structure ++ * @reg_val: value to write to AUTOC ++ * @locked: bool to indicate whether the SW/FW lock was already taken by ++ * previous read. ++ * ++ * The default case requires no protection so just to the register write. ++ */ ++s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked) ++{ ++ UNREFERENCED_1PARAMETER(locked); + ++ IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_val); ++ return IXGBE_SUCCESS; + } + + /** +- * ixgbe_enable_rx_buff - Enables the receive data path ++ * ixgbe_enable_sec_rx_path_generic - Enables the receive data path + * @hw: pointer to hardware structure + * +- * Enables the receive data path ++ * Enables the receive data path. + **/ +-s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw) ++s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw) + { +- int secrxreg; ++ u32 secrxreg; ++ ++ DEBUGFUNC("ixgbe_enable_sec_rx_path_generic"); + + secrxreg = IXGBE_READ_REG(hw, IXGBE_SECRXCTRL); + secrxreg &= ~IXGBE_SECRXCTRL_RX_DIS; + IXGBE_WRITE_REG(hw, IXGBE_SECRXCTRL, secrxreg); + IXGBE_WRITE_FLUSH(hw); + +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +@@ -2692,9 +3201,14 @@ s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw) + **/ + s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval) + { +- IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, regval); ++ DEBUGFUNC("ixgbe_enable_rx_dma_generic"); ++ ++ if (regval & IXGBE_RXCTRL_RXEN) ++ ixgbe_enable_rx(hw); ++ else ++ ixgbe_disable_rx(hw); + +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +@@ -2705,12 +3219,17 @@ s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval) + s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) + { + ixgbe_link_speed speed = 0; +- bool link_up = false; +- u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC); ++ bool link_up = 0; ++ u32 autoc_reg = 0; + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); +- s32 ret_val = 0; ++ s32 ret_val = IXGBE_SUCCESS; + bool locked = false; + ++ DEBUGFUNC("ixgbe_blink_led_start_generic"); ++ ++ if (index > 3) ++ return IXGBE_ERR_PARAM; ++ + /* + * Link must be up to auto-blink the LEDs; + * Force it if link is down. +@@ -2719,19 +3238,18 @@ s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index) + + if (!link_up) { + ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); +- if (ret_val) ++ if (ret_val != IXGBE_SUCCESS) + goto out; + + autoc_reg |= IXGBE_AUTOC_AN_RESTART; + autoc_reg |= IXGBE_AUTOC_FLU; + + ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked); +- if (ret_val) ++ if (ret_val != IXGBE_SUCCESS) + goto out; + + IXGBE_WRITE_FLUSH(hw); +- +- usleep_range(10000, 20000); ++ msec_delay(10); + } + + led_reg &= ~IXGBE_LED_MODE_MASK(index); +@@ -2752,18 +3270,23 @@ s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index) + { + u32 autoc_reg = 0; + u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); +- s32 ret_val = 0; ++ s32 ret_val = IXGBE_SUCCESS; + bool locked = false; + ++ DEBUGFUNC("ixgbe_blink_led_stop_generic"); ++ ++ if (index > 3) ++ return IXGBE_ERR_PARAM; ++ + ret_val = hw->mac.ops.prot_autoc_read(hw, &locked, &autoc_reg); +- if (ret_val) ++ if (ret_val != IXGBE_SUCCESS) + goto out; + + autoc_reg &= ~IXGBE_AUTOC_FLU; + autoc_reg |= IXGBE_AUTOC_AN_RESTART; + + ret_val = hw->mac.ops.prot_autoc_write(hw, autoc_reg, locked); +- if (ret_val) ++ if (ret_val != IXGBE_SUCCESS) + goto out; + + led_reg &= ~IXGBE_LED_MODE_MASK(index); +@@ -2785,20 +3308,24 @@ out: + * pointer, and returns the value at that location. This is used in both + * get and set mac_addr routines. + **/ +-static s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, +- u16 *san_mac_offset) ++STATIC s32 ixgbe_get_san_mac_addr_offset(struct ixgbe_hw *hw, ++ u16 *san_mac_offset) + { + s32 ret_val; + ++ DEBUGFUNC("ixgbe_get_san_mac_addr_offset"); ++ + /* + * First read the EEPROM pointer to see if the MAC addresses are + * available. + */ + ret_val = hw->eeprom.ops.read(hw, IXGBE_SAN_MAC_ADDR_PTR, + san_mac_offset); +- if (ret_val) +- hw_err(hw, "eeprom read at offset %d failed\n", +- IXGBE_SAN_MAC_ADDR_PTR); ++ if (ret_val) { ++ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, ++ "eeprom at offset %d failed", ++ IXGBE_SAN_MAC_ADDR_PTR); ++ } + + return ret_val; + } +@@ -2819,14 +3346,15 @@ s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) + u8 i; + s32 ret_val; + ++ DEBUGFUNC("ixgbe_get_san_mac_addr_generic"); ++ + /* + * First read the EEPROM pointer to see if the MAC addresses are + * available. If they're not, no point in calling set_lan_id() here. + */ + ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); + if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF) +- +- goto san_mac_addr_clr; ++ goto san_mac_addr_out; + + /* make sure we know which port we need to program */ + hw->mac.ops.set_lan_id(hw); +@@ -2837,23 +3365,61 @@ s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) + ret_val = hw->eeprom.ops.read(hw, san_mac_offset, + &san_mac_data); + if (ret_val) { +- hw_err(hw, "eeprom read at offset %d failed\n", +- san_mac_offset); +- goto san_mac_addr_clr; ++ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, ++ "eeprom read at offset %d failed", ++ san_mac_offset); ++ goto san_mac_addr_out; + } + san_mac_addr[i * 2] = (u8)(san_mac_data); + san_mac_addr[i * 2 + 1] = (u8)(san_mac_data >> 8); + san_mac_offset++; + } +- return 0; ++ return IXGBE_SUCCESS; + +-san_mac_addr_clr: +- /* No addresses available in this EEPROM. It's not necessarily an ++san_mac_addr_out: ++ /* ++ * No addresses available in this EEPROM. It's not an + * error though, so just wipe the local address and return. + */ + for (i = 0; i < 6; i++) + san_mac_addr[i] = 0xFF; +- return ret_val; ++ return IXGBE_SUCCESS; ++} ++ ++/** ++ * ixgbe_set_san_mac_addr_generic - Write the SAN MAC address to the EEPROM ++ * @hw: pointer to hardware structure ++ * @san_mac_addr: SAN MAC address ++ * ++ * Write a SAN MAC address to the EEPROM. ++ **/ ++s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr) ++{ ++ s32 ret_val; ++ u16 san_mac_data, san_mac_offset; ++ u8 i; ++ ++ DEBUGFUNC("ixgbe_set_san_mac_addr_generic"); ++ ++ /* Look for SAN mac address pointer. If not defined, return */ ++ ret_val = ixgbe_get_san_mac_addr_offset(hw, &san_mac_offset); ++ if (ret_val || san_mac_offset == 0 || san_mac_offset == 0xFFFF) ++ return IXGBE_ERR_NO_SAN_ADDR_PTR; ++ ++ /* Make sure we know which port we need to write */ ++ hw->mac.ops.set_lan_id(hw); ++ /* Apply the port offset to the address offset */ ++ (hw->bus.func) ? (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT1_OFFSET) : ++ (san_mac_offset += IXGBE_SAN_MAC_ADDR_PORT0_OFFSET); ++ ++ for (i = 0; i < 3; i++) { ++ san_mac_data = (u16)((u16)(san_mac_addr[i * 2 + 1]) << 8); ++ san_mac_data |= (u16)(san_mac_addr[i * 2]); ++ hw->eeprom.ops.write(hw, san_mac_offset, san_mac_data); ++ san_mac_offset++; ++ } ++ ++ return IXGBE_SUCCESS; + } + + /** +@@ -2876,6 +3442,9 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: + pcie_offset = IXGBE_PCIE_MSIX_82599_CAPS; + max_msix_count = IXGBE_MAX_MSIX_VECTORS_82599; + break; +@@ -2883,8 +3452,9 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) + return msix_count; + } + +- msix_count = ixgbe_read_pci_cfg_word(hw, pcie_offset); +- if (ixgbe_removed(hw->hw_addr)) ++ DEBUGFUNC("ixgbe_get_pcie_msix_count_generic"); ++ msix_count = IXGBE_READ_PCIE_WORD(hw, pcie_offset); ++ if (IXGBE_REMOVED(hw->hw_addr)) + msix_count = 0; + msix_count &= IXGBE_PCIE_MSIX_TBL_SZ_MASK; + +@@ -2898,6 +3468,75 @@ u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw) + } + + /** ++ * ixgbe_insert_mac_addr_generic - Find a RAR for this mac address ++ * @hw: pointer to hardware structure ++ * @addr: Address to put into receive address register ++ * @vmdq: VMDq pool to assign ++ * ++ * Puts an ethernet address into a receive address register, or ++ * finds the rar that it is aleady in; adds to the pool list ++ **/ ++s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq) ++{ ++ static const u32 NO_EMPTY_RAR_FOUND = 0xFFFFFFFF; ++ u32 first_empty_rar = NO_EMPTY_RAR_FOUND; ++ u32 rar; ++ u32 rar_low, rar_high; ++ u32 addr_low, addr_high; ++ ++ DEBUGFUNC("ixgbe_insert_mac_addr_generic"); ++ ++ /* swap bytes for HW little endian */ ++ addr_low = addr[0] | (addr[1] << 8) ++ | (addr[2] << 16) ++ | (addr[3] << 24); ++ addr_high = addr[4] | (addr[5] << 8); ++ ++ /* ++ * Either find the mac_id in rar or find the first empty space. ++ * rar_highwater points to just after the highest currently used ++ * rar in order to shorten the search. It grows when we add a new ++ * rar to the top. ++ */ ++ for (rar = 0; rar < hw->mac.rar_highwater; rar++) { ++ rar_high = IXGBE_READ_REG(hw, IXGBE_RAH(rar)); ++ ++ if (((IXGBE_RAH_AV & rar_high) == 0) ++ && first_empty_rar == NO_EMPTY_RAR_FOUND) { ++ first_empty_rar = rar; ++ } else if ((rar_high & 0xFFFF) == addr_high) { ++ rar_low = IXGBE_READ_REG(hw, IXGBE_RAL(rar)); ++ if (rar_low == addr_low) ++ break; /* found it already in the rars */ ++ } ++ } ++ ++ if (rar < hw->mac.rar_highwater) { ++ /* already there so just add to the pool bits */ ++ ixgbe_set_vmdq(hw, rar, vmdq); ++ } else if (first_empty_rar != NO_EMPTY_RAR_FOUND) { ++ /* stick it into first empty RAR slot we found */ ++ rar = first_empty_rar; ++ ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); ++ } else if (rar == hw->mac.rar_highwater) { ++ /* add it to the top of the list and inc the highwater mark */ ++ ixgbe_set_rar(hw, rar, addr, vmdq, IXGBE_RAH_AV); ++ hw->mac.rar_highwater++; ++ } else if (rar >= hw->mac.num_rar_entries) { ++ return IXGBE_ERR_INVALID_MAC_ADDR; ++ } ++ ++ /* ++ * If we found rar[0], make sure the default pool bit (we use pool 0) ++ * remains cleared to be sure default pool packets will get delivered ++ */ ++ if (rar == 0) ++ ixgbe_clear_vmdq(hw, rar, 0); ++ ++ return rar; ++} ++ ++/** + * ixgbe_clear_vmdq_generic - Disassociate a VMDq pool index from a rx address + * @hw: pointer to hardware struct + * @rar: receive address register index to disassociate +@@ -2908,16 +3547,19 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) + u32 mpsar_lo, mpsar_hi; + u32 rar_entries = hw->mac.num_rar_entries; + ++ DEBUGFUNC("ixgbe_clear_vmdq_generic"); ++ + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { +- hw_dbg(hw, "RAR index %d is out of range.\n", rar); ++ ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, ++ "RAR index %d is out of range.\n", rar); + return IXGBE_ERR_INVALID_ARGUMENT; + } + + mpsar_lo = IXGBE_READ_REG(hw, IXGBE_MPSAR_LO(rar)); + mpsar_hi = IXGBE_READ_REG(hw, IXGBE_MPSAR_HI(rar)); + +- if (ixgbe_removed(hw->hw_addr)) ++ if (IXGBE_REMOVED(hw->hw_addr)) + goto done; + + if (!mpsar_lo && !mpsar_hi) +@@ -2941,10 +3583,11 @@ s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) + } + + /* was that the last pool using this rar? */ +- if (mpsar_lo == 0 && mpsar_hi == 0 && rar != 0) ++ if (mpsar_lo == 0 && mpsar_hi == 0 && ++ rar != 0 && rar != hw->mac.san_mac_rar_index) + hw->mac.ops.clear_rar(hw, rar); + done: +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +@@ -2958,9 +3601,12 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) + u32 mpsar; + u32 rar_entries = hw->mac.num_rar_entries; + ++ DEBUGFUNC("ixgbe_set_vmdq_generic"); ++ + /* Make sure we are using a valid rar index range */ + if (rar >= rar_entries) { +- hw_dbg(hw, "RAR index %d is out of range.\n", rar); ++ ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, ++ "RAR index %d is out of range.\n", rar); + return IXGBE_ERR_INVALID_ARGUMENT; + } + +@@ -2973,7 +3619,7 @@ s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq) + mpsar |= 1 << (vmdq - 32); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), mpsar); + } +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +@@ -2990,6 +3636,8 @@ s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq) + { + u32 rar = hw->mac.san_mac_rar_index; + ++ DEBUGFUNC("ixgbe_set_vmdq_san_mac"); ++ + if (vmdq < 32) { + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_LO(rar), 1 << vmdq); + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 0); +@@ -2998,7 +3646,7 @@ s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq) + IXGBE_WRITE_REG(hw, IXGBE_MPSAR_HI(rar), 1 << (vmdq - 32)); + } + +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +@@ -3009,10 +3657,13 @@ s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw) + { + int i; + ++ DEBUGFUNC("ixgbe_init_uta_tables_generic"); ++ DEBUGOUT(" Clearing UTA\n"); ++ + for (i = 0; i < 128; i++) + IXGBE_WRITE_REG(hw, IXGBE_UTA(i), 0); + +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +@@ -3023,66 +3674,65 @@ s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw) + * return the VLVF index where this VLAN id should be placed + * + **/ +-static s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan) ++s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass) + { +- u32 bits = 0; +- u32 first_empty_slot = 0; +- s32 regindex; ++ s32 regindex, first_empty_slot; ++ u32 bits; + + /* short cut the special case */ + if (vlan == 0) + return 0; + +- /* +- * Search for the vlan id in the VLVF entries. Save off the first empty +- * slot found along the way +- */ +- for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) { ++ /* if vlvf_bypass is set we don't want to use an empty slot, we ++ * will simply bypass the VLVF if there are no entries present in the ++ * VLVF that contain our VLAN ++ */ ++ first_empty_slot = vlvf_bypass ? IXGBE_ERR_NO_SPACE : 0; ++ ++ /* add VLAN enable bit for comparison */ ++ vlan |= IXGBE_VLVF_VIEN; ++ ++ /* Search for the vlan id in the VLVF entries. Save off the first empty ++ * slot found along the way. ++ * ++ * pre-decrement loop covering (IXGBE_VLVF_ENTRIES - 1) .. 1 ++ */ ++ for (regindex = IXGBE_VLVF_ENTRIES; --regindex;) { + bits = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex)); +- if (!bits && !(first_empty_slot)) ++ if (bits == vlan) ++ return regindex; ++ if (!first_empty_slot && !bits) + first_empty_slot = regindex; +- else if ((bits & 0x0FFF) == vlan) +- break; + } + +- /* +- * If regindex is less than IXGBE_VLVF_ENTRIES, then we found the vlan +- * in the VLVF. Else use the first empty VLVF register for this +- * vlan id. +- */ +- if (regindex >= IXGBE_VLVF_ENTRIES) { +- if (first_empty_slot) +- regindex = first_empty_slot; +- else { +- hw_dbg(hw, "No space in VLVF.\n"); +- regindex = IXGBE_ERR_NO_SPACE; +- } +- } ++ /* If we are here then we didn't find the VLAN. Return first empty ++ * slot we found during our search, else error. ++ */ ++ if (!first_empty_slot) ++ ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "No space in VLVF.\n"); + +- return regindex; ++ return first_empty_slot ? first_empty_slot : IXGBE_ERR_NO_SPACE; + } + + /** + * ixgbe_set_vfta_generic - Set VLAN filter table + * @hw: pointer to hardware structure + * @vlan: VLAN id to write to VLAN filter +- * @vind: VMDq output index that maps queue to VLAN id in VFVFB +- * @vlan_on: boolean flag to turn on/off VLAN in VFVF ++ * @vind: VMDq output index that maps queue to VLAN id in VLVFB ++ * @vlan_on: boolean flag to turn on/off VLAN ++ * @vlvf_bypass: boolean flag indicating updating default pool is okay + * + * Turn on/off specified VLAN in the VLAN filter table. + **/ + s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, +- bool vlan_on) ++ bool vlan_on, bool vlvf_bypass) + { +- s32 regindex; +- u32 bitindex; +- u32 vfta; +- u32 bits; +- u32 vt; +- u32 targetbit; +- bool vfta_changed = false; ++ u32 regidx, vfta_delta, vfta; ++ s32 ret_val; + +- if (vlan > 4095) ++ DEBUGFUNC("ixgbe_set_vfta_generic"); ++ ++ if (vlan > 4095 || vind > 63) + return IXGBE_ERR_PARAM; + + /* +@@ -3097,112 +3747,124 @@ s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, + * bits[11-5]: which register + * bits[4-0]: which bit in the register + */ +- regindex = (vlan >> 5) & 0x7F; +- bitindex = vlan & 0x1F; +- targetbit = (1 << bitindex); +- vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regindex)); +- +- if (vlan_on) { +- if (!(vfta & targetbit)) { +- vfta |= targetbit; +- vfta_changed = true; +- } +- } else { +- if ((vfta & targetbit)) { +- vfta &= ~targetbit; +- vfta_changed = true; +- } +- } ++ regidx = vlan / 32; ++ vfta_delta = 1 << (vlan % 32); ++ vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(regidx)); ++ ++ /* ++ * vfta_delta represents the difference between the current value ++ * of vfta and the value we want in the register. Since the diff ++ * is an XOR mask we can just update the vfta using an XOR ++ */ ++ vfta_delta &= vlan_on ? ~vfta : vfta; ++ vfta ^= vfta_delta; + + /* Part 2 +- * If VT Mode is set ++ * Call ixgbe_set_vlvf_generic to set VLVFB and VLVF ++ */ ++ ret_val = ixgbe_set_vlvf_generic(hw, vlan, vind, vlan_on, &vfta_delta, ++ vfta, vlvf_bypass); ++ if (ret_val != IXGBE_SUCCESS) { ++ if (vlvf_bypass) ++ goto vfta_update; ++ return ret_val; ++ } ++ ++vfta_update: ++ /* Update VFTA now that we are ready for traffic */ ++ if (vfta_delta) ++ IXGBE_WRITE_REG(hw, IXGBE_VFTA(regidx), vfta); ++ ++ return IXGBE_SUCCESS; ++} ++ ++/** ++ * ixgbe_set_vlvf_generic - Set VLAN Pool Filter ++ * @hw: pointer to hardware structure ++ * @vlan: VLAN id to write to VLAN filter ++ * @vind: VMDq output index that maps queue to VLAN id in VLVFB ++ * @vlan_on: boolean flag to turn on/off VLAN in VLVF ++ * @vfta_delta: pointer to the difference between the current value of VFTA ++ * and the desired value ++ * @vfta: the desired value of the VFTA ++ * @vlvf_bypass: boolean flag indicating updating default pool is okay ++ * ++ * Turn on/off specified bit in VLVF table. ++ **/ ++s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, ++ bool vlan_on, u32 *vfta_delta, u32 vfta, ++ bool vlvf_bypass) ++{ ++ u32 bits; ++ s32 vlvf_index; ++ ++ DEBUGFUNC("ixgbe_set_vlvf_generic"); ++ ++ if (vlan > 4095 || vind > 63) ++ return IXGBE_ERR_PARAM; ++ ++ /* If VT Mode is set + * Either vlan_on + * make sure the vlan is in VLVF + * set the vind bit in the matching VLVFB + * Or !vlan_on + * clear the pool bit and possibly the vind + */ +- vt = IXGBE_READ_REG(hw, IXGBE_VT_CTL); +- if (vt & IXGBE_VT_CTL_VT_ENABLE) { +- s32 vlvf_index; +- +- vlvf_index = ixgbe_find_vlvf_slot(hw, vlan); +- if (vlvf_index < 0) +- return vlvf_index; +- +- if (vlan_on) { +- /* set the pool bit */ +- if (vind < 32) { +- bits = IXGBE_READ_REG(hw, +- IXGBE_VLVFB(vlvf_index*2)); +- bits |= (1 << vind); +- IXGBE_WRITE_REG(hw, +- IXGBE_VLVFB(vlvf_index*2), +- bits); +- } else { +- bits = IXGBE_READ_REG(hw, +- IXGBE_VLVFB((vlvf_index*2)+1)); +- bits |= (1 << (vind-32)); +- IXGBE_WRITE_REG(hw, +- IXGBE_VLVFB((vlvf_index*2)+1), +- bits); +- } +- } else { +- /* clear the pool bit */ +- if (vind < 32) { +- bits = IXGBE_READ_REG(hw, +- IXGBE_VLVFB(vlvf_index*2)); +- bits &= ~(1 << vind); +- IXGBE_WRITE_REG(hw, +- IXGBE_VLVFB(vlvf_index*2), +- bits); +- bits |= IXGBE_READ_REG(hw, +- IXGBE_VLVFB((vlvf_index*2)+1)); +- } else { +- bits = IXGBE_READ_REG(hw, +- IXGBE_VLVFB((vlvf_index*2)+1)); +- bits &= ~(1 << (vind-32)); +- IXGBE_WRITE_REG(hw, +- IXGBE_VLVFB((vlvf_index*2)+1), +- bits); +- bits |= IXGBE_READ_REG(hw, +- IXGBE_VLVFB(vlvf_index*2)); +- } +- } ++ if (!(IXGBE_READ_REG(hw, IXGBE_VT_CTL) & IXGBE_VT_CTL_VT_ENABLE)) ++ return IXGBE_SUCCESS; + +- /* +- * If there are still bits set in the VLVFB registers +- * for the VLAN ID indicated we need to see if the +- * caller is requesting that we clear the VFTA entry bit. +- * If the caller has requested that we clear the VFTA +- * entry bit but there are still pools/VFs using this VLAN +- * ID entry then ignore the request. We're not worried +- * about the case where we're turning the VFTA VLAN ID +- * entry bit on, only when requested to turn it off as +- * there may be multiple pools and/or VFs using the +- * VLAN ID entry. In that case we cannot clear the +- * VFTA bit until all pools/VFs using that VLAN ID have also +- * been cleared. This will be indicated by "bits" being +- * zero. ++ vlvf_index = ixgbe_find_vlvf_slot(hw, vlan, vlvf_bypass); ++ if (vlvf_index < 0) ++ return vlvf_index; ++ ++ bits = IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32)); ++ ++ /* set the pool bit */ ++ bits |= 1 << (vind % 32); ++ if (vlan_on) ++ goto vlvf_update; ++ ++ /* clear the pool bit */ ++ bits ^= 1 << (vind % 32); ++ ++ if (!bits && ++ !IXGBE_READ_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + 1 - vind / 32))) { ++ /* Clear VFTA first, then disable VLVF. Otherwise ++ * we run the risk of stray packets leaking into ++ * the PF via the default pool + */ +- if (bits) { +- IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), +- (IXGBE_VLVF_VIEN | vlan)); +- if (!vlan_on) { +- /* someone wants to clear the vfta entry +- * but some pools/VFs are still using it. +- * Ignore it. */ +- vfta_changed = false; +- } +- } else { +- IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0); +- } ++ if (*vfta_delta) ++ IXGBE_WRITE_REG(hw, IXGBE_VFTA(vlan / 32), vfta); ++ ++ /* disable VLVF and clear remaining bit from pool */ ++ IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), 0); ++ IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), 0); ++ ++ return IXGBE_SUCCESS; + } + +- if (vfta_changed) +- IXGBE_WRITE_REG(hw, IXGBE_VFTA(regindex), vfta); ++ /* If there are still bits set in the VLVFB registers ++ * for the VLAN ID indicated we need to see if the ++ * caller is requesting that we clear the VFTA entry bit. ++ * If the caller has requested that we clear the VFTA ++ * entry bit but there are still pools/VFs using this VLAN ++ * ID entry then ignore the request. We're not worried ++ * about the case where we're turning the VFTA VLAN ID ++ * entry bit on, only when requested to turn it off as ++ * there may be multiple pools and/or VFs using the ++ * VLAN ID entry. In that case we cannot clear the ++ * VFTA bit until all pools/VFs using that VLAN ID have also ++ * been cleared. This will be indicated by "bits" being ++ * zero. ++ */ ++ *vfta_delta = 0; ++ ++vlvf_update: ++ /* record pool change and enable VLAN ID if not already enabled */ ++ IXGBE_WRITE_REG(hw, IXGBE_VLVFB(vlvf_index * 2 + vind / 32), bits); ++ IXGBE_WRITE_REG(hw, IXGBE_VLVF(vlvf_index), IXGBE_VLVF_VIEN | vlan); + +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +@@ -3215,16 +3877,44 @@ s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw) + { + u32 offset; + ++ DEBUGFUNC("ixgbe_clear_vfta_generic"); ++ + for (offset = 0; offset < hw->mac.vft_size; offset++) + IXGBE_WRITE_REG(hw, IXGBE_VFTA(offset), 0); + + for (offset = 0; offset < IXGBE_VLVF_ENTRIES; offset++) { + IXGBE_WRITE_REG(hw, IXGBE_VLVF(offset), 0); +- IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset*2), 0); +- IXGBE_WRITE_REG(hw, IXGBE_VLVFB((offset*2)+1), 0); ++ IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2), 0); ++ IXGBE_WRITE_REG(hw, IXGBE_VLVFB(offset * 2 + 1), 0); ++ } ++ ++ return IXGBE_SUCCESS; ++} ++ ++/** ++ * ixgbe_need_crosstalk_fix - Determine if we need to do cross talk fix ++ * @hw: pointer to hardware structure ++ * ++ * Contains the logic to identify if we need to verify link for the ++ * crosstalk fix ++ **/ ++static bool ixgbe_need_crosstalk_fix(struct ixgbe_hw *hw) ++{ ++ ++ /* Does FW say we need the fix */ ++ if (!hw->need_crosstalk_fix) ++ return false; ++ ++ /* Only consider SFP+ PHYs i.e. media type fiber */ ++ switch (hw->mac.ops.get_media_type(hw)) { ++ case ixgbe_media_type_fiber: ++ case ixgbe_media_type_fiber_qsfp: ++ break; ++ default: ++ return false; + } + +- return 0; ++ return true; + } + + /** +@@ -3242,25 +3932,56 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + u32 links_reg, links_orig; + u32 i; + ++ DEBUGFUNC("ixgbe_check_mac_link_generic"); ++ ++ /* If Crosstalk fix enabled do the sanity check of making sure ++ * the SFP+ cage is full. ++ */ ++ if (ixgbe_need_crosstalk_fix(hw)) { ++ u32 sfp_cage_full; ++ ++ switch (hw->mac.type) { ++ case ixgbe_mac_82599EB: ++ sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & ++ IXGBE_ESDP_SDP2; ++ break; ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: ++ sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & ++ IXGBE_ESDP_SDP0; ++ break; ++ default: ++ /* sanity check - No SFP+ devices here */ ++ sfp_cage_full = false; ++ break; ++ } ++ ++ if (!sfp_cage_full) { ++ *link_up = false; ++ *speed = IXGBE_LINK_SPEED_UNKNOWN; ++ return IXGBE_SUCCESS; ++ } ++ } ++ + /* clear the old state */ + links_orig = IXGBE_READ_REG(hw, IXGBE_LINKS); + + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + + if (links_orig != links_reg) { +- hw_dbg(hw, "LINKS changed from %08X to %08X\n", +- links_orig, links_reg); ++ DEBUGOUT2("LINKS changed from %08X to %08X\n", ++ links_orig, links_reg); + } + + if (link_up_wait_to_complete) { +- for (i = 0; i < IXGBE_LINK_UP_TIME; i++) { ++ for (i = 0; i < hw->mac.max_link_up_time; i++) { + if (links_reg & IXGBE_LINKS_UP) { + *link_up = true; + break; + } else { + *link_up = false; + } +- msleep(100); ++ msec_delay(100); + links_reg = IXGBE_READ_REG(hw, IXGBE_LINKS); + } + } else { +@@ -3270,19 +3991,36 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + *link_up = false; + } + +- if ((links_reg & IXGBE_LINKS_SPEED_82599) == +- IXGBE_LINKS_SPEED_10G_82599) ++ switch (links_reg & IXGBE_LINKS_SPEED_82599) { ++ case IXGBE_LINKS_SPEED_10G_82599: + *speed = IXGBE_LINK_SPEED_10GB_FULL; +- else if ((links_reg & IXGBE_LINKS_SPEED_82599) == +- IXGBE_LINKS_SPEED_1G_82599) ++ if (hw->mac.type >= ixgbe_mac_X550) { ++ if (links_reg & IXGBE_LINKS_SPEED_NON_STD) ++ *speed = IXGBE_LINK_SPEED_2_5GB_FULL; ++ } ++ break; ++ case IXGBE_LINKS_SPEED_1G_82599: + *speed = IXGBE_LINK_SPEED_1GB_FULL; +- else if ((links_reg & IXGBE_LINKS_SPEED_82599) == +- IXGBE_LINKS_SPEED_100_82599) ++ break; ++ case IXGBE_LINKS_SPEED_100_82599: + *speed = IXGBE_LINK_SPEED_100_FULL; +- else ++ if (hw->mac.type == ixgbe_mac_X550) { ++ if (links_reg & IXGBE_LINKS_SPEED_NON_STD) ++ *speed = IXGBE_LINK_SPEED_5GB_FULL; ++ } ++ break; ++ case IXGBE_LINKS_SPEED_10_X550EM_A: ++ *speed = IXGBE_LINK_SPEED_UNKNOWN; ++ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || ++ hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) { ++ *speed = IXGBE_LINK_SPEED_10_FULL; ++ } ++ break; ++ default: + *speed = IXGBE_LINK_SPEED_UNKNOWN; ++ } + +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +@@ -3296,11 +4034,13 @@ s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + * block to check the support for the alternative WWNN/WWPN prefix support. + **/ + s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, +- u16 *wwpn_prefix) ++ u16 *wwpn_prefix) + { + u16 offset, caps; + u16 alt_san_mac_blk_offset; + ++ DEBUGFUNC("ixgbe_get_wwn_prefix_generic"); ++ + /* clear output first */ + *wwnn_prefix = 0xFFFF; + *wwpn_prefix = 0xFFFF; +@@ -3323,68 +4063,102 @@ s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, + + /* get the corresponding prefix for WWNN/WWPN */ + offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET; +- if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) +- hw_err(hw, "eeprom read at offset %d failed\n", offset); ++ if (hw->eeprom.ops.read(hw, offset, wwnn_prefix)) { ++ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, ++ "eeprom read at offset %d failed", offset); ++ } + + offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET; + if (hw->eeprom.ops.read(hw, offset, wwpn_prefix)) + goto wwn_prefix_err; + + wwn_prefix_out: +- return 0; ++ return IXGBE_SUCCESS; + + wwn_prefix_err: +- hw_err(hw, "eeprom read at offset %d failed\n", offset); +- return 0; ++ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, ++ "eeprom read at offset %d failed", offset); ++ return IXGBE_SUCCESS; ++} ++ ++/** ++ * ixgbe_get_fcoe_boot_status_generic - Get FCOE boot status from EEPROM ++ * @hw: pointer to hardware structure ++ * @bs: the fcoe boot status ++ * ++ * This function will read the FCOE boot status from the iSCSI FCOE block ++ **/ ++s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs) ++{ ++ u16 offset, caps, flags; ++ s32 status; ++ ++ DEBUGFUNC("ixgbe_get_fcoe_boot_status_generic"); ++ ++ /* clear output first */ ++ *bs = ixgbe_fcoe_bootstatus_unavailable; ++ ++ /* check if FCOE IBA block is present */ ++ offset = IXGBE_FCOE_IBA_CAPS_BLK_PTR; ++ status = hw->eeprom.ops.read(hw, offset, &caps); ++ if (status != IXGBE_SUCCESS) ++ goto out; ++ ++ if (!(caps & IXGBE_FCOE_IBA_CAPS_FCOE)) ++ goto out; ++ ++ /* check if iSCSI FCOE block is populated */ ++ status = hw->eeprom.ops.read(hw, IXGBE_ISCSI_FCOE_BLK_PTR, &offset); ++ if (status != IXGBE_SUCCESS) ++ goto out; ++ ++ if ((offset == 0) || (offset == 0xFFFF)) ++ goto out; ++ ++ /* read fcoe flags in iSCSI FCOE block */ ++ offset = offset + IXGBE_ISCSI_FCOE_FLAGS_OFFSET; ++ status = hw->eeprom.ops.read(hw, offset, &flags); ++ if (status != IXGBE_SUCCESS) ++ goto out; ++ ++ if (flags & IXGBE_ISCSI_FCOE_FLAGS_ENABLE) ++ *bs = ixgbe_fcoe_bootstatus_enabled; ++ else ++ *bs = ixgbe_fcoe_bootstatus_disabled; ++ ++out: ++ return status; + } + + /** + * ixgbe_set_mac_anti_spoofing - Enable/Disable MAC anti-spoofing + * @hw: pointer to hardware structure +- * @enable: enable or disable switch for anti-spoofing +- * @pf: Physical Function pool - do not enable anti-spoofing for the PF ++ * @enable: enable or disable switch for MAC anti-spoofing ++ * @vf: Virtual Function pool - VF Pool to set for MAC anti-spoofing + * + **/ +-void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf) ++void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) + { +- int j; +- int pf_target_reg = pf >> 3; +- int pf_target_shift = pf % 8; +- u32 pfvfspoof = 0; ++ int vf_target_reg = vf >> 3; ++ int vf_target_shift = vf % 8; ++ u32 pfvfspoof; + + if (hw->mac.type == ixgbe_mac_82598EB) + return; + ++ pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); + if (enable) +- pfvfspoof = IXGBE_SPOOF_MACAS_MASK; +- +- /* +- * PFVFSPOOF register array is size 8 with 8 bits assigned to +- * MAC anti-spoof enables in each register array element. +- */ +- for (j = 0; j < pf_target_reg; j++) +- IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof); +- +- /* +- * The PF should be allowed to spoof so that it can support +- * emulation mode NICs. Do not set the bits assigned to the PF +- */ +- pfvfspoof &= (1 << pf_target_shift) - 1; +- IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), pfvfspoof); +- +- /* +- * Remaining pools belong to the PF so they do not need to have +- * anti-spoofing enabled. +- */ +- for (j++; j < IXGBE_PFVFSPOOF_REG_COUNT; j++) +- IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(j), 0); ++ pfvfspoof |= (1 << vf_target_shift); ++ else ++ pfvfspoof &= ~(1 << vf_target_shift); ++ IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); + } + + /** + * ixgbe_set_vlan_anti_spoofing - Enable/Disable VLAN anti-spoofing + * @hw: pointer to hardware structure + * @enable: enable or disable switch for VLAN anti-spoofing +- * @pf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing ++ * @vf: Virtual Function pool - VF Pool to set for VLAN anti-spoofing + * + **/ + void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) +@@ -3414,90 +4188,27 @@ void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf) + **/ + s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps) + { ++ DEBUGFUNC("ixgbe_get_device_caps_generic"); ++ + hw->eeprom.ops.read(hw, IXGBE_DEVICE_CAPS, device_caps); + +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +- * ixgbe_set_rxpba_generic - Initialize RX packet buffer +- * @hw: pointer to hardware structure +- * @num_pb: number of packet buffers to allocate +- * @headroom: reserve n KB of headroom +- * @strategy: packet buffer allocation strategy ++ * ixgbe_calculate_checksum - Calculate checksum for buffer ++ * @buffer: pointer to EEPROM ++ * @length: size of EEPROM to calculate a checksum for ++ * Calculates the checksum for some buffer on a specified length. The ++ * checksum calculated is returned. + **/ +-void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, +- int num_pb, +- u32 headroom, +- int strategy) +-{ +- u32 pbsize = hw->mac.rx_pb_size; +- int i = 0; +- u32 rxpktsize, txpktsize, txpbthresh; +- +- /* Reserve headroom */ +- pbsize -= headroom; +- +- if (!num_pb) +- num_pb = 1; +- +- /* Divide remaining packet buffer space amongst the number +- * of packet buffers requested using supplied strategy. +- */ +- switch (strategy) { +- case (PBA_STRATEGY_WEIGHTED): +- /* pba_80_48 strategy weight first half of packet buffer with +- * 5/8 of the packet buffer space. +- */ +- rxpktsize = ((pbsize * 5 * 2) / (num_pb * 8)); +- pbsize -= rxpktsize * (num_pb / 2); +- rxpktsize <<= IXGBE_RXPBSIZE_SHIFT; +- for (; i < (num_pb / 2); i++) +- IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); +- /* Fall through to configure remaining packet buffers */ +- case (PBA_STRATEGY_EQUAL): +- /* Divide the remaining Rx packet buffer evenly among the TCs */ +- rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT; +- for (; i < num_pb; i++) +- IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); +- break; +- default: +- break; +- } +- +- /* +- * Setup Tx packet buffer and threshold equally for all TCs +- * TXPBTHRESH register is set in K so divide by 1024 and subtract +- * 10 since the largest packet we support is just over 9K. +- */ +- txpktsize = IXGBE_TXPBSIZE_MAX / num_pb; +- txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX; +- for (i = 0; i < num_pb; i++) { +- IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize); +- IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh); +- } +- +- /* Clear unused TCs, if any, to zero buffer size*/ +- for (; i < IXGBE_MAX_PB; i++) { +- IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); +- IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0); +- IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0); +- } +-} +- +-/** +- * ixgbe_calculate_checksum - Calculate checksum for buffer +- * @buffer: pointer to EEPROM +- * @length: size of EEPROM to calculate a checksum for +- * +- * Calculates the checksum for some buffer on a specified length. The +- * checksum calculated is returned. +- **/ +-static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) ++u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) + { + u32 i; + u8 sum = 0; + ++ DEBUGFUNC("ixgbe_calculate_checksum"); ++ + if (!buffer) + return 0; + +@@ -3508,99 +4219,158 @@ static u8 ixgbe_calculate_checksum(u8 *buffer, u32 length) + } + + /** +- * ixgbe_host_interface_command - Issue command to manageability block ++ * ixgbe_hic_unlocked - Issue command to manageability block unlocked + * @hw: pointer to the HW structure +- * @buffer: contains the command to write and where the return status will +- * be placed ++ * @buffer: command to write and where the return status will be placed + * @length: length of buffer, must be multiple of 4 bytes ++ * @timeout: time in ms to wait for command completion ++ * ++ * Communicates with the manageability block. On success return IXGBE_SUCCESS ++ * else returns semaphore error when encountering an error acquiring ++ * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. + * +- * Communicates with the manageability block. On success return 0 +- * else return IXGBE_ERR_HOST_INTERFACE_COMMAND. ++ * This function assumes that the IXGBE_GSSR_SW_MNG_SM semaphore is held ++ * by the caller. + **/ +-static s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, +- u32 length) ++s32 ixgbe_hic_unlocked(struct ixgbe_hw *hw, u32 *buffer, u32 length, ++ u32 timeout) + { +- u32 hicr, i, bi; +- u32 hdr_size = sizeof(struct ixgbe_hic_hdr); +- u8 buf_len, dword_len; ++ u32 hicr, i, fwsts; ++ u16 dword_len; + +- s32 ret_val = 0; ++ DEBUGFUNC("ixgbe_hic_unlocked"); + +- if (length == 0 || length & 0x3 || +- length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { +- hw_dbg(hw, "Buffer length failure.\n"); +- ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; +- goto out; ++ if (!length || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { ++ DEBUGOUT1("Buffer length failure buffersize=%d.\n", length); ++ return IXGBE_ERR_HOST_INTERFACE_COMMAND; + } + ++ /* Set bit 9 of FWSTS clearing FW reset indication */ ++ fwsts = IXGBE_READ_REG(hw, IXGBE_FWSTS); ++ IXGBE_WRITE_REG(hw, IXGBE_FWSTS, fwsts | IXGBE_FWSTS_FWRI); ++ + /* Check that the host interface is enabled. */ + hicr = IXGBE_READ_REG(hw, IXGBE_HICR); +- if ((hicr & IXGBE_HICR_EN) == 0) { +- hw_dbg(hw, "IXGBE_HOST_EN bit disabled.\n"); +- ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; +- goto out; ++ if (!(hicr & IXGBE_HICR_EN)) { ++ DEBUGOUT("IXGBE_HOST_EN bit disabled.\n"); ++ return IXGBE_ERR_HOST_INTERFACE_COMMAND; ++ } ++ ++ /* Calculate length in DWORDs. We must be DWORD aligned */ ++ if (length % sizeof(u32)) { ++ DEBUGOUT("Buffer length failure, not aligned to dword"); ++ return IXGBE_ERR_INVALID_ARGUMENT; + } + +- /* Calculate length in DWORDs */ + dword_len = length >> 2; + +- /* +- * The device driver writes the relevant command block ++ /* The device driver writes the relevant command block + * into the ram area. + */ + for (i = 0; i < dword_len; i++) + IXGBE_WRITE_REG_ARRAY(hw, IXGBE_FLEX_MNG, +- i, cpu_to_le32(buffer[i])); ++ i, IXGBE_CPU_TO_LE32(buffer[i])); + + /* Setting this bit tells the ARC that a new command is pending. */ + IXGBE_WRITE_REG(hw, IXGBE_HICR, hicr | IXGBE_HICR_C); + +- for (i = 0; i < IXGBE_HI_COMMAND_TIMEOUT; i++) { ++ for (i = 0; i < timeout; i++) { + hicr = IXGBE_READ_REG(hw, IXGBE_HICR); + if (!(hicr & IXGBE_HICR_C)) + break; +- usleep_range(1000, 2000); ++ msec_delay(1); + } + +- /* Check command successful completion. */ +- if (i == IXGBE_HI_COMMAND_TIMEOUT || +- (!(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV))) { +- hw_dbg(hw, "Command has failed with no status valid.\n"); +- ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; +- goto out; ++ /* Check command completion */ ++ if ((timeout && i == timeout) || ++ !(IXGBE_READ_REG(hw, IXGBE_HICR) & IXGBE_HICR_SV)) { ++ ERROR_REPORT1(IXGBE_ERROR_CAUTION, ++ "Command has failed with no status valid.\n"); ++ return IXGBE_ERR_HOST_INTERFACE_COMMAND; + } + ++ return IXGBE_SUCCESS; ++} ++ ++/** ++ * ixgbe_host_interface_command - Issue command to manageability block ++ * @hw: pointer to the HW structure ++ * @buffer: contains the command to write and where the return status will ++ * be placed ++ * @length: length of buffer, must be multiple of 4 bytes ++ * @timeout: time in ms to wait for command completion ++ * @return_data: read and return data from the buffer (true) or not (false) ++ * Needed because FW structures are big endian and decoding of ++ * these fields can be 8 bit or 16 bit based on command. Decoding ++ * is not easily understood without making a table of commands. ++ * So we will leave this up to the caller to read back the data ++ * in these cases. ++ * ++ * Communicates with the manageability block. On success return IXGBE_SUCCESS ++ * else returns semaphore error when encountering an error acquiring ++ * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. ++ **/ ++s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, ++ u32 length, u32 timeout, bool return_data) ++{ ++ u32 hdr_size = sizeof(struct ixgbe_hic_hdr); ++ u16 dword_len; ++ u16 buf_len; ++ s32 status; ++ u32 bi; ++ ++ DEBUGFUNC("ixgbe_host_interface_command"); ++ ++ if (length == 0 || length > IXGBE_HI_MAX_BLOCK_BYTE_LENGTH) { ++ DEBUGOUT1("Buffer length failure buffersize=%d.\n", length); ++ return IXGBE_ERR_HOST_INTERFACE_COMMAND; ++ } ++ ++ /* Take management host interface semaphore */ ++ status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); ++ if (status) ++ return status; ++ ++ status = ixgbe_hic_unlocked(hw, buffer, length, timeout); ++ if (status) ++ goto rel_out; ++ ++ if (!return_data) ++ goto rel_out; ++ + /* Calculate length in DWORDs */ + dword_len = hdr_size >> 2; + + /* first pull in the header so we know the buffer length */ + for (bi = 0; bi < dword_len; bi++) { + buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); +- le32_to_cpus(&buffer[bi]); ++ IXGBE_LE32_TO_CPUS(&buffer[bi]); + } + + /* If there is any thing in data position pull it in */ + buf_len = ((struct ixgbe_hic_hdr *)buffer)->buf_len; +- if (buf_len == 0) +- goto out; ++ if (!buf_len) ++ goto rel_out; + +- if (length < (buf_len + hdr_size)) { +- hw_dbg(hw, "Buffer not large enough for reply message.\n"); +- ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; +- goto out; ++ if (length < buf_len + hdr_size) { ++ DEBUGOUT("Buffer not large enough for reply message.\n"); ++ status = IXGBE_ERR_HOST_INTERFACE_COMMAND; ++ goto rel_out; + } + + /* Calculate length in DWORDs, add 3 for odd lengths */ + dword_len = (buf_len + 3) >> 2; + +- /* Pull in the rest of the buffer (bi is where we left off)*/ ++ /* Pull in the rest of the buffer (bi is where we left off) */ + for (; bi <= dword_len; bi++) { + buffer[bi] = IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, bi); +- le32_to_cpus(&buffer[bi]); ++ IXGBE_LE32_TO_CPUS(&buffer[bi]); + } + +-out: +- return ret_val; ++rel_out: ++ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); ++ ++ return status; + } + + /** +@@ -3612,21 +4382,20 @@ out: + * @sub: driver version sub build number + * + * Sends driver version number to firmware through the manageability +- * block. On success return 0 ++ * block. On success return IXGBE_SUCCESS + * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring + * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. + **/ + s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, +- u8 build, u8 sub) ++ u8 build, u8 sub, u16 len, ++ const char *driver_ver) + { + struct ixgbe_hic_drv_info fw_cmd; + int i; +- s32 ret_val = 0; ++ s32 ret_val = IXGBE_SUCCESS; + +- if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM) != 0) { +- ret_val = IXGBE_ERR_SWFW_SYNC; +- goto out; +- } ++ DEBUGFUNC("ixgbe_set_fw_drv_ver_generic"); ++ UNREFERENCED_2PARAMETER(len, driver_ver); + + fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; + fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN; +@@ -3644,25 +4413,84 @@ s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, + + for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { + ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, +- sizeof(fw_cmd)); +- if (ret_val != 0) ++ sizeof(fw_cmd), ++ IXGBE_HI_COMMAND_TIMEOUT, ++ true); ++ if (ret_val != IXGBE_SUCCESS) + continue; + + if (fw_cmd.hdr.cmd_or_resp.ret_status == + FW_CEM_RESP_STATUS_SUCCESS) +- ret_val = 0; ++ ret_val = IXGBE_SUCCESS; + else + ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; + + break; + } + +- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_SW_MNG_SM); +-out: + return ret_val; + } + + /** ++ * ixgbe_set_rxpba_generic - Initialize Rx packet buffer ++ * @hw: pointer to hardware structure ++ * @num_pb: number of packet buffers to allocate ++ * @headroom: reserve n KB of headroom ++ * @strategy: packet buffer allocation strategy ++ **/ ++void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom, ++ int strategy) ++{ ++ u32 pbsize = hw->mac.rx_pb_size; ++ int i = 0; ++ u32 rxpktsize, txpktsize, txpbthresh; ++ ++ /* Reserve headroom */ ++ pbsize -= headroom; ++ ++ if (!num_pb) ++ num_pb = 1; ++ ++ /* Divide remaining packet buffer space amongst the number of packet ++ * buffers requested using supplied strategy. ++ */ ++ switch (strategy) { ++ case PBA_STRATEGY_WEIGHTED: ++ /* ixgbe_dcb_pba_80_48 strategy weight first half of packet ++ * buffer with 5/8 of the packet buffer space. ++ */ ++ rxpktsize = (pbsize * 5) / (num_pb * 4); ++ pbsize -= rxpktsize * (num_pb / 2); ++ rxpktsize <<= IXGBE_RXPBSIZE_SHIFT; ++ for (; i < (num_pb / 2); i++) ++ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); ++ /* fall through - configure remaining packet buffers */ ++ case PBA_STRATEGY_EQUAL: ++ rxpktsize = (pbsize / (num_pb - i)) << IXGBE_RXPBSIZE_SHIFT; ++ for (; i < num_pb; i++) ++ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), rxpktsize); ++ break; ++ default: ++ break; ++ } ++ ++ /* Only support an equally distributed Tx packet buffer strategy. */ ++ txpktsize = IXGBE_TXPBSIZE_MAX / num_pb; ++ txpbthresh = (txpktsize / 1024) - IXGBE_TXPKT_SIZE_MAX; ++ for (i = 0; i < num_pb; i++) { ++ IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), txpktsize); ++ IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), txpbthresh); ++ } ++ ++ /* Clear unused TCs, if any, to zero buffer size*/ ++ for (; i < IXGBE_MAX_PB; i++) { ++ IXGBE_WRITE_REG(hw, IXGBE_RXPBSIZE(i), 0); ++ IXGBE_WRITE_REG(hw, IXGBE_TXPBSIZE(i), 0); ++ IXGBE_WRITE_REG(hw, IXGBE_TXPBTHRESH(i), 0); ++ } ++} ++ ++/** + * ixgbe_clear_tx_pending - Clear pending TX work from the PCIe fifo + * @hw: pointer to the hardware structure + * +@@ -3672,7 +4500,8 @@ out: + **/ + void ixgbe_clear_tx_pending(struct ixgbe_hw *hw) + { +- u32 gcr_ext, hlreg0; ++ u32 gcr_ext, hlreg0, i, poll; ++ u16 value; + + /* + * If double reset is not requested then all transactions should +@@ -3689,6 +4518,25 @@ void ixgbe_clear_tx_pending(struct ixgbe_hw *hw) + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0 | IXGBE_HLREG0_LPBK); + ++ /* Wait for a last completion before clearing buffers */ ++ IXGBE_WRITE_FLUSH(hw); ++ msec_delay(3); ++ ++ /* ++ * Before proceeding, make sure that the PCIe block does not have ++ * transactions pending. ++ */ ++ poll = ixgbe_pcie_timeout_poll(hw); ++ for (i = 0; i < poll; i++) { ++ usec_delay(100); ++ value = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_DEVICE_STATUS); ++ if (IXGBE_REMOVED(hw->hw_addr)) ++ goto out; ++ if (!(value & IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING)) ++ goto out; ++ } ++ ++out: + /* initiate cleaning flow for buffers in the PCIe transaction layer */ + gcr_ext = IXGBE_READ_REG(hw, IXGBE_GCR_EXT); + IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, +@@ -3696,20 +4544,20 @@ void ixgbe_clear_tx_pending(struct ixgbe_hw *hw) + + /* Flush all writes and allow 20usec for all transactions to clear */ + IXGBE_WRITE_FLUSH(hw); +- udelay(20); ++ usec_delay(20); + + /* restore previous register values */ + IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); + } + +-static const u8 ixgbe_emc_temp_data[4] = { ++STATIC const u8 ixgbe_emc_temp_data[4] = { + IXGBE_EMC_INTERNAL_DATA, + IXGBE_EMC_DIODE1_DATA, + IXGBE_EMC_DIODE2_DATA, + IXGBE_EMC_DIODE3_DATA + }; +-static const u8 ixgbe_emc_therm_limit[4] = { ++STATIC const u8 ixgbe_emc_therm_limit[4] = { + IXGBE_EMC_INTERNAL_THERM_LIMIT, + IXGBE_EMC_DIODE1_THERM_LIMIT, + IXGBE_EMC_DIODE2_THERM_LIMIT, +@@ -3717,74 +4565,57 @@ static const u8 ixgbe_emc_therm_limit[4] = { + }; + + /** +- * ixgbe_get_ets_data - Extracts the ETS bit data ++ * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data + * @hw: pointer to hardware structure +- * @ets_cfg: extected ETS data +- * @ets_offset: offset of ETS data ++ * @data: pointer to the thermal sensor data structure + * +- * Returns error code. ++ * Returns the thermal sensor data structure + **/ +-static s32 ixgbe_get_ets_data(struct ixgbe_hw *hw, u16 *ets_cfg, +- u16 *ets_offset) ++s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw) + { +- s32 status = 0; ++ s32 status = IXGBE_SUCCESS; ++ u16 ets_offset; ++ u16 ets_cfg; ++ u16 ets_sensor; ++ u8 num_sensors; ++ u8 sensor_index; ++ u8 sensor_location; ++ u8 i; ++ struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + +- status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, ets_offset); +- if (status) +- goto out; ++ DEBUGFUNC("ixgbe_get_thermal_sensor_data_generic"); + +- if ((*ets_offset == 0x0000) || (*ets_offset == 0xFFFF)) { ++ /* Only support thermal sensors attached to 82599 physical port 0 */ ++ if ((hw->mac.type != ixgbe_mac_82599EB) || ++ (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) { + status = IXGBE_NOT_IMPLEMENTED; + goto out; + } + +- status = hw->eeprom.ops.read(hw, *ets_offset, ets_cfg); ++ status = hw->eeprom.ops.read(hw, IXGBE_ETS_CFG, &ets_offset); + if (status) + goto out; + +- if ((*ets_cfg & IXGBE_ETS_TYPE_MASK) != IXGBE_ETS_TYPE_EMC_SHIFTED) { ++ if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) { + status = IXGBE_NOT_IMPLEMENTED; + goto out; + } + +-out: +- return status; +-} +- +-/** +- * ixgbe_get_thermal_sensor_data - Gathers thermal sensor data +- * @hw: pointer to hardware structure +- * +- * Returns the thermal sensor data structure +- **/ +-s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw) +-{ +- s32 status = 0; +- u16 ets_offset; +- u16 ets_cfg; +- u16 ets_sensor; +- u8 num_sensors; +- u8 i; +- struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; ++ status = hw->eeprom.ops.read(hw, ets_offset, &ets_cfg); ++ if (status) ++ goto out; + +- /* Only support thermal sensors attached to physical port 0 */ +- if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) { ++ if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT) ++ != IXGBE_ETS_TYPE_EMC) { + status = IXGBE_NOT_IMPLEMENTED; + goto out; + } + +- status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset); +- if (status) +- goto out; +- + num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK); + if (num_sensors > IXGBE_MAX_SENSORS) + num_sensors = IXGBE_MAX_SENSORS; + + for (i = 0; i < num_sensors; i++) { +- u8 sensor_index; +- u8 sensor_location; +- + status = hw->eeprom.ops.read(hw, (ets_offset + 1 + i), + &ets_sensor); + if (status) +@@ -3809,49 +4640,59 @@ out: + } + + /** +- * ixgbe_init_thermal_sensor_thresh_generic - Inits thermal sensor thresholds +- * @hw: pointer to hardware structure ++ * ixgbe_init_thermal_sensor_thresh_generic - Inits thermal sensor thresholds ++ * @hw: pointer to hardware structure + * +- * Inits the thermal sensor thresholds according to the NVM map +- * and save off the threshold and location values into mac.thermal_sensor_data ++ * Inits the thermal sensor thresholds according to the NVM map ++ * and save off the threshold and location values into mac.thermal_sensor_data + **/ + s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw) + { +- s32 status = 0; ++ s32 status = IXGBE_SUCCESS; ++ u16 offset; + u16 ets_offset; + u16 ets_cfg; + u16 ets_sensor; + u8 low_thresh_delta; + u8 num_sensors; ++ u8 sensor_index; ++ u8 sensor_location; + u8 therm_limit; + u8 i; + struct ixgbe_thermal_sensor_data *data = &hw->mac.thermal_sensor_data; + ++ DEBUGFUNC("ixgbe_init_thermal_sensor_thresh_generic"); ++ + memset(data, 0, sizeof(struct ixgbe_thermal_sensor_data)); + +- /* Only support thermal sensors attached to physical port 0 */ +- if ((IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) { +- status = IXGBE_NOT_IMPLEMENTED; +- goto out; +- } ++ /* Only support thermal sensors attached to 82599 physical port 0 */ ++ if ((hw->mac.type != ixgbe_mac_82599EB) || ++ (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1)) ++ return IXGBE_NOT_IMPLEMENTED; + +- status = ixgbe_get_ets_data(hw, &ets_cfg, &ets_offset); +- if (status) +- goto out; ++ offset = IXGBE_ETS_CFG; ++ if (hw->eeprom.ops.read(hw, offset, &ets_offset)) ++ goto eeprom_err; ++ if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF)) ++ return IXGBE_NOT_IMPLEMENTED; ++ ++ offset = ets_offset; ++ if (hw->eeprom.ops.read(hw, offset, &ets_cfg)) ++ goto eeprom_err; ++ if (((ets_cfg & IXGBE_ETS_TYPE_MASK) >> IXGBE_ETS_TYPE_SHIFT) ++ != IXGBE_ETS_TYPE_EMC) ++ return IXGBE_NOT_IMPLEMENTED; + + low_thresh_delta = ((ets_cfg & IXGBE_ETS_LTHRES_DELTA_MASK) >> + IXGBE_ETS_LTHRES_DELTA_SHIFT); + num_sensors = (ets_cfg & IXGBE_ETS_NUM_SENSORS_MASK); +- if (num_sensors > IXGBE_MAX_SENSORS) +- num_sensors = IXGBE_MAX_SENSORS; + + for (i = 0; i < num_sensors; i++) { +- u8 sensor_index; +- u8 sensor_location; +- +- if (hw->eeprom.ops.read(hw, ets_offset + 1 + i, &ets_sensor)) { +- hw_err(hw, "eeprom read at offset %d failed\n", +- ets_offset + 1 + i); ++ offset = ets_offset + 1 + i; ++ if (hw->eeprom.ops.read(hw, offset, &ets_sensor)) { ++ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, ++ "eeprom read at offset %d failed", ++ offset); + continue; + } + sensor_index = ((ets_sensor & IXGBE_ETS_DATA_INDEX_MASK) >> +@@ -3864,14 +4705,441 @@ s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw) + ixgbe_emc_therm_limit[sensor_index], + IXGBE_I2C_THERMAL_SENSOR_ADDR, therm_limit); + +- if (sensor_location == 0) +- continue; ++ if ((i < IXGBE_MAX_SENSORS) && (sensor_location != 0)) { ++ data->sensor[i].location = sensor_location; ++ data->sensor[i].caution_thresh = therm_limit; ++ data->sensor[i].max_op_thresh = therm_limit - ++ low_thresh_delta; ++ } ++ } ++ return status; ++ ++eeprom_err: ++ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, ++ "eeprom read at offset %d failed", offset); ++ return IXGBE_NOT_IMPLEMENTED; ++} ++ ++/** ++ * ixgbe_get_orom_version - Return option ROM from EEPROM ++ * ++ * @hw: pointer to hardware structure ++ * @nvm_ver: pointer to output structure ++ * ++ * if valid option ROM version, nvm_ver->or_valid set to true ++ * else nvm_ver->or_valid is false. ++ **/ ++void ixgbe_get_orom_version(struct ixgbe_hw *hw, ++ struct ixgbe_nvm_version *nvm_ver) ++{ ++ u16 offset, eeprom_cfg_blkh, eeprom_cfg_blkl; ++ ++ nvm_ver->or_valid = false; ++ /* Option Rom may or may not be present. Start with pointer */ ++ hw->eeprom.ops.read(hw, NVM_OROM_OFFSET, &offset); ++ ++ /* make sure offset is valid */ ++ if ((offset == 0x0) || (offset == NVM_INVALID_PTR)) ++ return; ++ ++ hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_HI, &eeprom_cfg_blkh); ++ hw->eeprom.ops.read(hw, offset + NVM_OROM_BLK_LOW, &eeprom_cfg_blkl); ++ ++ /* option rom exists and is valid */ ++ if ((eeprom_cfg_blkl | eeprom_cfg_blkh) == 0x0 || ++ eeprom_cfg_blkl == NVM_VER_INVALID || ++ eeprom_cfg_blkh == NVM_VER_INVALID) ++ return; ++ ++ nvm_ver->or_valid = true; ++ nvm_ver->or_major = eeprom_cfg_blkl >> NVM_OROM_SHIFT; ++ nvm_ver->or_build = (eeprom_cfg_blkl << NVM_OROM_SHIFT) | ++ (eeprom_cfg_blkh >> NVM_OROM_SHIFT); ++ nvm_ver->or_patch = eeprom_cfg_blkh & NVM_OROM_PATCH_MASK; ++} ++ ++/** ++ * ixgbe_get_oem_prod_version - Return OEM Product version ++ * ++ * @hw: pointer to hardware structure ++ * @nvm_ver: pointer to output structure ++ * ++ * if valid OEM product version, nvm_ver->oem_valid set to true ++ * else nvm_ver->oem_valid is false. ++ **/ ++void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw, ++ struct ixgbe_nvm_version *nvm_ver) ++{ ++ u16 rel_num, prod_ver, mod_len, cap, offset; ++ ++ nvm_ver->oem_valid = false; ++ hw->eeprom.ops.read(hw, NVM_OEM_PROD_VER_PTR, &offset); ++ ++ /* Return is offset to OEM Product Version block is invalid */ ++ if (offset == 0x0 && offset == NVM_INVALID_PTR) ++ return; ++ ++ /* Read product version block */ ++ hw->eeprom.ops.read(hw, offset, &mod_len); ++ hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_CAP_OFF, &cap); ++ ++ /* Return if OEM product version block is invalid */ ++ if (mod_len != NVM_OEM_PROD_VER_MOD_LEN || ++ (cap & NVM_OEM_PROD_VER_CAP_MASK) != 0x0) ++ return; ++ ++ hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_L, &prod_ver); ++ hw->eeprom.ops.read(hw, offset + NVM_OEM_PROD_VER_OFF_H, &rel_num); ++ ++ /* Return if version is invalid */ ++ if ((rel_num | prod_ver) == 0x0 || ++ rel_num == NVM_VER_INVALID || prod_ver == NVM_VER_INVALID) ++ return; ++ ++ nvm_ver->oem_major = prod_ver >> NVM_VER_SHIFT; ++ nvm_ver->oem_minor = prod_ver & NVM_VER_MASK; ++ nvm_ver->oem_release = rel_num; ++ nvm_ver->oem_valid = true; ++} ++ ++/** ++ * ixgbe_get_etk_id - Return Etrack ID from EEPROM ++ * ++ * @hw: pointer to hardware structure ++ * @nvm_ver: pointer to output structure ++ * ++ * word read errors will return 0xFFFF ++ **/ ++void ixgbe_get_etk_id(struct ixgbe_hw *hw, struct ixgbe_nvm_version *nvm_ver) ++{ ++ u16 etk_id_l, etk_id_h; ++ ++ if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_LOW, &etk_id_l)) ++ etk_id_l = NVM_VER_INVALID; ++ if (hw->eeprom.ops.read(hw, NVM_ETK_OFF_HI, &etk_id_h)) ++ etk_id_h = NVM_VER_INVALID; + +- data->sensor[i].location = sensor_location; +- data->sensor[i].caution_thresh = therm_limit; +- data->sensor[i].max_op_thresh = therm_limit - low_thresh_delta; ++ /* The word order for the version format is determined by high order ++ * word bit 15. ++ */ ++ if ((etk_id_h & NVM_ETK_VALID) == 0) { ++ nvm_ver->etk_id = etk_id_h; ++ nvm_ver->etk_id |= (etk_id_l << NVM_ETK_SHIFT); ++ } else { ++ nvm_ver->etk_id = etk_id_l; ++ nvm_ver->etk_id |= (etk_id_h << NVM_ETK_SHIFT); + } ++} ++ ++/** ++ * ixgbe_dcb_get_rtrup2tc_generic - read rtrup2tc reg ++ * @hw: pointer to hardware structure ++ * @map: pointer to u8 arr for returning map ++ * ++ * Read the rtrup2tc HW register and resolve its content into map ++ **/ ++void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map) ++{ ++ u32 reg, i; ++ ++ reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); ++ for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) ++ map[i] = IXGBE_RTRUP2TC_UP_MASK & ++ (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT)); ++ return; ++} ++ ++void ixgbe_disable_rx_generic(struct ixgbe_hw *hw) ++{ ++ u32 pfdtxgswc; ++ u32 rxctrl; ++ ++ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); ++ if (rxctrl & IXGBE_RXCTRL_RXEN) { ++ if (hw->mac.type != ixgbe_mac_82598EB) { ++ pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); ++ if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) { ++ pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN; ++ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); ++ hw->mac.set_lben = true; ++ } else { ++ hw->mac.set_lben = false; ++ } ++ } ++ rxctrl &= ~IXGBE_RXCTRL_RXEN; ++ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); ++ } ++} ++ ++void ixgbe_enable_rx_generic(struct ixgbe_hw *hw) ++{ ++ u32 pfdtxgswc; ++ u32 rxctrl; ++ ++ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); ++ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, (rxctrl | IXGBE_RXCTRL_RXEN)); ++ ++ if (hw->mac.type != ixgbe_mac_82598EB) { ++ if (hw->mac.set_lben) { ++ pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); ++ pfdtxgswc |= IXGBE_PFDTXGSWC_VT_LBEN; ++ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); ++ hw->mac.set_lben = false; ++ } ++ } ++} ++ ++/** ++ * ixgbe_mng_present - returns true when management capability is present ++ * @hw: pointer to hardware structure ++ */ ++bool ixgbe_mng_present(struct ixgbe_hw *hw) ++{ ++ u32 fwsm; ++ ++ if (hw->mac.type < ixgbe_mac_82599EB) ++ return false; ++ ++ fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw)); ++ fwsm &= IXGBE_FWSM_MODE_MASK; ++ return fwsm == IXGBE_FWSM_FW_MODE_PT; ++} ++ ++/** ++ * ixgbe_mng_enabled - Is the manageability engine enabled? ++ * @hw: pointer to hardware structure ++ * ++ * Returns true if the manageability engine is enabled. ++ **/ ++bool ixgbe_mng_enabled(struct ixgbe_hw *hw) ++{ ++ u32 fwsm, manc, factps; ++ ++ fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw)); ++ if ((fwsm & IXGBE_FWSM_MODE_MASK) != IXGBE_FWSM_FW_MODE_PT) ++ return false; ++ ++ manc = IXGBE_READ_REG(hw, IXGBE_MANC); ++ if (!(manc & IXGBE_MANC_RCV_TCO_EN)) ++ return false; ++ ++ if (hw->mac.type <= ixgbe_mac_X540) { ++ factps = IXGBE_READ_REG(hw, IXGBE_FACTPS_BY_MAC(hw)); ++ if (factps & IXGBE_FACTPS_MNGCG) ++ return false; ++ } ++ ++ return true; ++} ++ ++/** ++ * ixgbe_setup_mac_link_multispeed_fiber - Set MAC link speed ++ * @hw: pointer to hardware structure ++ * @speed: new link speed ++ * @autoneg_wait_to_complete: true when waiting for completion is needed ++ * ++ * Set the link speed in the MAC and/or PHY register and restarts link. ++ **/ ++s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, ++ ixgbe_link_speed speed, ++ bool autoneg_wait_to_complete) ++{ ++ ixgbe_link_speed link_speed = IXGBE_LINK_SPEED_UNKNOWN; ++ ixgbe_link_speed highest_link_speed = IXGBE_LINK_SPEED_UNKNOWN; ++ s32 status = IXGBE_SUCCESS; ++ u32 speedcnt = 0; ++ u32 i = 0; ++ bool autoneg, link_up = false; ++ ++ DEBUGFUNC("ixgbe_setup_mac_link_multispeed_fiber"); ++ ++ /* Mask off requested but non-supported speeds */ ++ status = ixgbe_get_link_capabilities(hw, &link_speed, &autoneg); ++ if (status != IXGBE_SUCCESS) ++ return status; ++ ++ speed &= link_speed; ++ ++ /* Try each speed one by one, highest priority first. We do this in ++ * software because 10Gb fiber doesn't support speed autonegotiation. ++ */ ++ if (speed & IXGBE_LINK_SPEED_10GB_FULL) { ++ speedcnt++; ++ highest_link_speed = IXGBE_LINK_SPEED_10GB_FULL; ++ ++ /* Set the module link speed */ ++ switch (hw->phy.media_type) { ++ case ixgbe_media_type_fiber: ++ ixgbe_set_rate_select_speed(hw, ++ IXGBE_LINK_SPEED_10GB_FULL); ++ break; ++ case ixgbe_media_type_fiber_qsfp: ++ /* QSFP module automatically detects MAC link speed */ ++ break; ++ default: ++ DEBUGOUT("Unexpected media type.\n"); ++ break; ++ } ++ ++ /* Allow module to change analog characteristics (1G->10G) */ ++ msec_delay(40); ++ ++ status = ixgbe_setup_mac_link(hw, ++ IXGBE_LINK_SPEED_10GB_FULL, ++ autoneg_wait_to_complete); ++ if (status != IXGBE_SUCCESS) ++ return status; ++ ++ /* Flap the Tx laser if it has not already been done */ ++ ixgbe_flap_tx_laser(hw); ++ ++ /* Wait for the controller to acquire link. Per IEEE 802.3ap, ++ * Section 73.10.2, we may have to wait up to 500ms if KR is ++ * attempted. 82599 uses the same timing for 10g SFI. ++ */ ++ for (i = 0; i < 5; i++) { ++ /* Wait for the link partner to also set speed */ ++ msec_delay(100); ++ ++ /* If we have link, just jump out */ ++ status = ixgbe_check_link(hw, &link_speed, ++ &link_up, false); ++ if (status != IXGBE_SUCCESS) ++ return status; ++ ++ if (link_up) ++ goto out; ++ } ++ } ++ ++ if (speed & IXGBE_LINK_SPEED_1GB_FULL) { ++ speedcnt++; ++ if (highest_link_speed == IXGBE_LINK_SPEED_UNKNOWN) ++ highest_link_speed = IXGBE_LINK_SPEED_1GB_FULL; ++ ++ /* Set the module link speed */ ++ switch (hw->phy.media_type) { ++ case ixgbe_media_type_fiber: ++ ixgbe_set_rate_select_speed(hw, ++ IXGBE_LINK_SPEED_1GB_FULL); ++ break; ++ case ixgbe_media_type_fiber_qsfp: ++ /* QSFP module automatically detects link speed */ ++ break; ++ default: ++ DEBUGOUT("Unexpected media type.\n"); ++ break; ++ } ++ ++ /* Allow module to change analog characteristics (10G->1G) */ ++ msec_delay(40); ++ ++ status = ixgbe_setup_mac_link(hw, ++ IXGBE_LINK_SPEED_1GB_FULL, ++ autoneg_wait_to_complete); ++ if (status != IXGBE_SUCCESS) ++ return status; ++ ++ /* Flap the Tx laser if it has not already been done */ ++ ixgbe_flap_tx_laser(hw); ++ ++ /* Wait for the link partner to also set speed */ ++ msec_delay(100); ++ ++ /* If we have link, just jump out */ ++ status = ixgbe_check_link(hw, &link_speed, &link_up, false); ++ if (status != IXGBE_SUCCESS) ++ return status; ++ ++ if (link_up) ++ goto out; ++ } ++ ++ /* We didn't get link. Configure back to the highest speed we tried, ++ * (if there was more than one). We call ourselves back with just the ++ * single highest speed that the user requested. ++ */ ++ if (speedcnt > 1) ++ status = ixgbe_setup_mac_link_multispeed_fiber(hw, ++ highest_link_speed, ++ autoneg_wait_to_complete); ++ + out: ++ /* Set autoneg_advertised value based on input link speed */ ++ hw->phy.autoneg_advertised = 0; ++ ++ if (speed & IXGBE_LINK_SPEED_10GB_FULL) ++ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; ++ ++ if (speed & IXGBE_LINK_SPEED_1GB_FULL) ++ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; ++ + return status; + } + ++/** ++ * ixgbe_set_soft_rate_select_speed - Set module link speed ++ * @hw: pointer to hardware structure ++ * @speed: link speed to set ++ * ++ * Set module link speed via the soft rate select. ++ */ ++void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, ++ ixgbe_link_speed speed) ++{ ++ s32 status; ++ u8 rs, eeprom_data; ++ ++ switch (speed) { ++ case IXGBE_LINK_SPEED_10GB_FULL: ++ /* one bit mask same as setting on */ ++ rs = IXGBE_SFF_SOFT_RS_SELECT_10G; ++ break; ++ case IXGBE_LINK_SPEED_1GB_FULL: ++ rs = IXGBE_SFF_SOFT_RS_SELECT_1G; ++ break; ++ default: ++ DEBUGOUT("Invalid fixed module speed\n"); ++ return; ++ } ++ ++ /* Set RS0 */ ++ status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, ++ IXGBE_I2C_EEPROM_DEV_ADDR2, ++ &eeprom_data); ++ if (status) { ++ DEBUGOUT("Failed to read Rx Rate Select RS0\n"); ++ goto out; ++ } ++ ++ eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs; ++ ++ status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_OSCB, ++ IXGBE_I2C_EEPROM_DEV_ADDR2, ++ eeprom_data); ++ if (status) { ++ DEBUGOUT("Failed to write Rx Rate Select RS0\n"); ++ goto out; ++ } ++ ++ /* Set RS1 */ ++ status = hw->phy.ops.read_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, ++ IXGBE_I2C_EEPROM_DEV_ADDR2, ++ &eeprom_data); ++ if (status) { ++ DEBUGOUT("Failed to read Rx Rate Select RS1\n"); ++ goto out; ++ } ++ ++ eeprom_data = (eeprom_data & ~IXGBE_SFF_SOFT_RS_SELECT_MASK) | rs; ++ ++ status = hw->phy.ops.write_i2c_byte(hw, IXGBE_SFF_SFF_8472_ESCB, ++ IXGBE_I2C_EEPROM_DEV_ADDR2, ++ eeprom_data); ++ if (status) { ++ DEBUGOUT("Failed to write Rx Rate Select RS1\n"); ++ goto out; ++ } ++out: ++ return; ++} +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +index 2ae5d4b..4b65730 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_common.h +@@ -1,7 +1,7 @@ + /******************************************************************************* + +- Intel 10 Gigabit PCI Express Linux driver +- Copyright(c) 1999 - 2014 Intel Corporation. ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, +@@ -12,10 +12,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +@@ -30,7 +26,8 @@ + #define _IXGBE_COMMON_H_ + + #include "ixgbe_type.h" +-#include "ixgbe.h" ++ ++void ixgbe_dcb_get_rtrup2tc_generic(struct ixgbe_hw *hw, u8 *map); + + u16 ixgbe_get_pcie_msix_count_generic(struct ixgbe_hw *hw); + s32 ixgbe_init_ops_generic(struct ixgbe_hw *hw); +@@ -41,14 +38,14 @@ s32 ixgbe_clear_hw_cntrs_generic(struct ixgbe_hw *hw); + s32 ixgbe_read_pba_string_generic(struct ixgbe_hw *hw, u8 *pba_num, + u32 pba_num_size); + s32 ixgbe_get_mac_addr_generic(struct ixgbe_hw *hw, u8 *mac_addr); +-enum ixgbe_bus_width ixgbe_convert_bus_width(u16 link_status); +-enum ixgbe_bus_speed ixgbe_convert_bus_speed(u16 link_status); + s32 ixgbe_get_bus_info_generic(struct ixgbe_hw *hw); ++void ixgbe_set_pci_config_data_generic(struct ixgbe_hw *hw, u16 link_status); + void ixgbe_set_lan_id_multi_port_pcie(struct ixgbe_hw *hw); + s32 ixgbe_stop_adapter_generic(struct ixgbe_hw *hw); + + s32 ixgbe_led_on_generic(struct ixgbe_hw *hw, u32 index); + s32 ixgbe_led_off_generic(struct ixgbe_hw *hw, u32 index); ++s32 ixgbe_init_led_link_act_generic(struct ixgbe_hw *hw); + + s32 ixgbe_init_eeprom_params_generic(struct ixgbe_hw *hw); + s32 ixgbe_write_eeprom_generic(struct ixgbe_hw *hw, u16 offset, u16 data); +@@ -64,57 +61,87 @@ s32 ixgbe_read_eeprom_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + u16 *data); + s32 ixgbe_read_eeprom_buffer_bit_bang_generic(struct ixgbe_hw *hw, u16 offset, + u16 words, u16 *data); +-u16 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw); ++s32 ixgbe_calc_eeprom_checksum_generic(struct ixgbe_hw *hw); + s32 ixgbe_validate_eeprom_checksum_generic(struct ixgbe_hw *hw, + u16 *checksum_val); + s32 ixgbe_update_eeprom_checksum_generic(struct ixgbe_hw *hw); ++s32 ixgbe_poll_eerd_eewr_done(struct ixgbe_hw *hw, u32 ee_reg); + + s32 ixgbe_set_rar_generic(struct ixgbe_hw *hw, u32 index, u8 *addr, u32 vmdq, + u32 enable_addr); + s32 ixgbe_clear_rar_generic(struct ixgbe_hw *hw, u32 index); + s32 ixgbe_init_rx_addrs_generic(struct ixgbe_hw *hw); +-s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, +- struct net_device *netdev); ++s32 ixgbe_update_mc_addr_list_generic(struct ixgbe_hw *hw, u8 *mc_addr_list, ++ u32 mc_addr_count, ++ ixgbe_mc_addr_itr func, bool clear); ++s32 ixgbe_update_uc_addr_list_generic(struct ixgbe_hw *hw, u8 *addr_list, ++ u32 addr_count, ixgbe_mc_addr_itr func); + s32 ixgbe_enable_mc_generic(struct ixgbe_hw *hw); + s32 ixgbe_disable_mc_generic(struct ixgbe_hw *hw); +-s32 ixgbe_disable_rx_buff_generic(struct ixgbe_hw *hw); +-s32 ixgbe_enable_rx_buff_generic(struct ixgbe_hw *hw); + s32 ixgbe_enable_rx_dma_generic(struct ixgbe_hw *hw, u32 regval); ++s32 ixgbe_disable_sec_rx_path_generic(struct ixgbe_hw *hw); ++s32 ixgbe_enable_sec_rx_path_generic(struct ixgbe_hw *hw); ++ + s32 ixgbe_fc_enable_generic(struct ixgbe_hw *hw); + bool ixgbe_device_supports_autoneg_fc(struct ixgbe_hw *hw); + void ixgbe_fc_autoneg(struct ixgbe_hw *hw); ++s32 ixgbe_setup_fc_generic(struct ixgbe_hw *hw); ++ ++s32 ixgbe_validate_mac_addr(u8 *mac_addr); ++s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u32 mask); ++void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u32 mask); ++s32 ixgbe_disable_pcie_master(struct ixgbe_hw *hw); ++ ++s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val); ++s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked); ++ ++s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index); ++s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); + +-s32 ixgbe_acquire_swfw_sync(struct ixgbe_hw *hw, u16 mask); +-void ixgbe_release_swfw_sync(struct ixgbe_hw *hw, u16 mask); + s32 ixgbe_get_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr); ++s32 ixgbe_set_san_mac_addr_generic(struct ixgbe_hw *hw, u8 *san_mac_addr); ++ + s32 ixgbe_set_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); + s32 ixgbe_set_vmdq_san_mac_generic(struct ixgbe_hw *hw, u32 vmdq); + s32 ixgbe_clear_vmdq_generic(struct ixgbe_hw *hw, u32 rar, u32 vmdq); ++s32 ixgbe_insert_mac_addr_generic(struct ixgbe_hw *hw, u8 *addr, u32 vmdq); + s32 ixgbe_init_uta_tables_generic(struct ixgbe_hw *hw); + s32 ixgbe_set_vfta_generic(struct ixgbe_hw *hw, u32 vlan, +- u32 vind, bool vlan_on); ++ u32 vind, bool vlan_on, bool vlvf_bypass); ++s32 ixgbe_set_vlvf_generic(struct ixgbe_hw *hw, u32 vlan, u32 vind, ++ bool vlan_on, u32 *vfta_delta, u32 vfta, ++ bool vlvf_bypass); + s32 ixgbe_clear_vfta_generic(struct ixgbe_hw *hw); ++s32 ixgbe_find_vlvf_slot(struct ixgbe_hw *hw, u32 vlan, bool vlvf_bypass); ++ + s32 ixgbe_check_mac_link_generic(struct ixgbe_hw *hw, +- ixgbe_link_speed *speed, +- bool *link_up, bool link_up_wait_to_complete); ++ ixgbe_link_speed *speed, ++ bool *link_up, bool link_up_wait_to_complete); ++ + s32 ixgbe_get_wwn_prefix_generic(struct ixgbe_hw *hw, u16 *wwnn_prefix, + u16 *wwpn_prefix); + +-s32 prot_autoc_read_generic(struct ixgbe_hw *hw, bool *, u32 *reg_val); +-s32 prot_autoc_write_generic(struct ixgbe_hw *hw, u32 reg_val, bool locked); +- +-s32 ixgbe_blink_led_start_generic(struct ixgbe_hw *hw, u32 index); +-s32 ixgbe_blink_led_stop_generic(struct ixgbe_hw *hw, u32 index); +-void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int pf); ++s32 ixgbe_get_fcoe_boot_status_generic(struct ixgbe_hw *hw, u16 *bs); ++void ixgbe_set_mac_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); + void ixgbe_set_vlan_anti_spoofing(struct ixgbe_hw *hw, bool enable, int vf); + s32 ixgbe_get_device_caps_generic(struct ixgbe_hw *hw, u16 *device_caps); ++void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, u32 headroom, ++ int strategy); + s32 ixgbe_set_fw_drv_ver_generic(struct ixgbe_hw *hw, u8 maj, u8 min, +- u8 build, u8 ver); ++ u8 build, u8 ver, u16 len, const char *str); ++u8 ixgbe_calculate_checksum(u8 *buffer, u32 length); ++s32 ixgbe_host_interface_command(struct ixgbe_hw *hw, u32 *buffer, ++ u32 length, u32 timeout, bool return_data); ++s32 ixgbe_hic_unlocked(struct ixgbe_hw *, u32 *buffer, u32 length, u32 timeout); ++s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *); ++s32 ixgbe_fw_phy_activity(struct ixgbe_hw *, u16 activity, ++ u32 (*data)[FW_PHY_ACT_DATA_COUNT]); + void ixgbe_clear_tx_pending(struct ixgbe_hw *hw); +-bool ixgbe_mng_enabled(struct ixgbe_hw *hw); + +-void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, +- u32 headroom, int strategy); ++extern s32 ixgbe_reset_pipeline_82599(struct ixgbe_hw *hw); ++extern void ixgbe_stop_mac_link_on_d3_82599(struct ixgbe_hw *hw); ++bool ixgbe_mng_present(struct ixgbe_hw *hw); ++bool ixgbe_mng_enabled(struct ixgbe_hw *hw); + + #define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8 + #define IXGBE_EMC_INTERNAL_DATA 0x00 +@@ -129,78 +156,16 @@ void ixgbe_set_rxpba_generic(struct ixgbe_hw *hw, int num_pb, + s32 ixgbe_get_thermal_sensor_data_generic(struct ixgbe_hw *hw); + s32 ixgbe_init_thermal_sensor_thresh_generic(struct ixgbe_hw *hw); + +-#define IXGBE_FAILED_READ_REG 0xffffffffU +-#define IXGBE_FAILED_READ_CFG_DWORD 0xffffffffU +-#define IXGBE_FAILED_READ_CFG_WORD 0xffffU +- +-u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg); +-void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value); +- +-static inline bool ixgbe_removed(void __iomem *addr) +-{ +- return unlikely(!addr); +-} +- +-static inline void ixgbe_write_reg(struct ixgbe_hw *hw, u32 reg, u32 value) +-{ +- u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); +- +- if (ixgbe_removed(reg_addr)) +- return; +- writel(value, reg_addr + reg); +-} +-#define IXGBE_WRITE_REG(a, reg, value) ixgbe_write_reg((a), (reg), (value)) +- +-#ifndef writeq +-#define writeq writeq +-static inline void writeq(u64 val, void __iomem *addr) +-{ +- writel((u32)val, addr); +- writel((u32)(val >> 32), addr + 4); +-} +-#endif +- +-static inline void ixgbe_write_reg64(struct ixgbe_hw *hw, u32 reg, u64 value) +-{ +- u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); +- +- if (ixgbe_removed(reg_addr)) +- return; +- writeq(value, reg_addr + reg); +-} +-#define IXGBE_WRITE_REG64(a, reg, value) ixgbe_write_reg64((a), (reg), (value)) +- +-u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg); +-#define IXGBE_READ_REG(a, reg) ixgbe_read_reg((a), (reg)) +- +-#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) \ +- ixgbe_write_reg((a), (reg) + ((offset) << 2), (value)) +- +-#define IXGBE_READ_REG_ARRAY(a, reg, offset) \ +- ixgbe_read_reg((a), (reg) + ((offset) << 2)) +- +-#define IXGBE_WRITE_FLUSH(a) ixgbe_read_reg((a), IXGBE_STATUS) +- +-#define ixgbe_hw_to_netdev(hw) (((struct ixgbe_adapter *)(hw)->back)->netdev) +- +-#define hw_dbg(hw, format, arg...) \ +- netdev_dbg(ixgbe_hw_to_netdev(hw), format, ## arg) +-#define hw_err(hw, format, arg...) \ +- netdev_err(ixgbe_hw_to_netdev(hw), format, ## arg) +-#define e_dev_info(format, arg...) \ +- dev_info(&adapter->pdev->dev, format, ## arg) +-#define e_dev_warn(format, arg...) \ +- dev_warn(&adapter->pdev->dev, format, ## arg) +-#define e_dev_err(format, arg...) \ +- dev_err(&adapter->pdev->dev, format, ## arg) +-#define e_dev_notice(format, arg...) \ +- dev_notice(&adapter->pdev->dev, format, ## arg) +-#define e_info(msglvl, format, arg...) \ +- netif_info(adapter, msglvl, adapter->netdev, format, ## arg) +-#define e_err(msglvl, format, arg...) \ +- netif_err(adapter, msglvl, adapter->netdev, format, ## arg) +-#define e_warn(msglvl, format, arg...) \ +- netif_warn(adapter, msglvl, adapter->netdev, format, ## arg) +-#define e_crit(msglvl, format, arg...) \ +- netif_crit(adapter, msglvl, adapter->netdev, format, ## arg) ++void ixgbe_get_etk_id(struct ixgbe_hw *hw, struct ixgbe_nvm_version *nvm_ver); ++void ixgbe_get_oem_prod_version(struct ixgbe_hw *hw, ++ struct ixgbe_nvm_version *nvm_ver); ++void ixgbe_get_orom_version(struct ixgbe_hw *hw, ++ struct ixgbe_nvm_version *nvm_ver); ++void ixgbe_disable_rx_generic(struct ixgbe_hw *hw); ++void ixgbe_enable_rx_generic(struct ixgbe_hw *hw); ++s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw, ++ ixgbe_link_speed speed, ++ bool autoneg_wait_to_complete); ++void ixgbe_set_soft_rate_select_speed(struct ixgbe_hw *hw, ++ ixgbe_link_speed speed); + #endif /* IXGBE_COMMON */ +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c +index a689ee0..3eee95c 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.c +@@ -1,7 +1,7 @@ + /******************************************************************************* + +- Intel 10 Gigabit PCI Express Linux driver +- Copyright(c) 1999 - 2013 Intel Corporation. ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, +@@ -12,10 +12,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +@@ -27,30 +23,29 @@ + *******************************************************************************/ + + +-#include "ixgbe.h" + #include "ixgbe_type.h" + #include "ixgbe_dcb.h" + #include "ixgbe_dcb_82598.h" + #include "ixgbe_dcb_82599.h" + + /** +- * ixgbe_ieee_credits - This calculates the ieee traffic class ++ * ixgbe_dcb_calculate_tc_credits - This calculates the ieee traffic class + * credits from the configured bandwidth percentages. Credits + * are the smallest unit programmable into the underlying + * hardware. The IEEE 802.1Qaz specification do not use bandwidth + * groups so this is much simplified from the CEE case. + */ +-static s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, +- __u16 *max, int max_frame) ++s32 ixgbe_dcb_calculate_tc_credits(u8 *bw, u16 *refill, u16 *max, ++ int max_frame_size) + { + int min_percent = 100; + int min_credit, multiplier; + int i; + +- min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) / +- DCB_CREDIT_QUANTUM; ++ min_credit = ((max_frame_size / 2) + IXGBE_DCB_CREDIT_QUANTUM - 1) / ++ IXGBE_DCB_CREDIT_QUANTUM; + +- for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { ++ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + if (bw[i] < min_percent && bw[i]) + min_percent = bw[i]; + } +@@ -58,53 +53,54 @@ static s32 ixgbe_ieee_credits(__u8 *bw, __u16 *refill, + multiplier = (min_credit / min_percent) + 1; + + /* Find out the hw credits for each TC */ +- for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { +- int val = min(bw[i] * multiplier, MAX_CREDIT_REFILL); ++ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { ++ int val = min(bw[i] * multiplier, IXGBE_DCB_MAX_CREDIT_REFILL); + + if (val < min_credit) + val = min_credit; +- refill[i] = val; ++ refill[i] = (u16)val; + +- max[i] = bw[i] ? (bw[i] * MAX_CREDIT)/100 : min_credit; ++ max[i] = bw[i] ? (bw[i]*IXGBE_DCB_MAX_CREDIT)/100 : min_credit; + } ++ + return 0; + } + + /** +- * ixgbe_dcb_calculate_tc_credits - Calculates traffic class credits ++ * ixgbe_dcb_calculate_tc_credits_cee - Calculates traffic class credits + * @ixgbe_dcb_config: Struct containing DCB settings. + * @direction: Configuring either Tx or Rx. + * + * This function calculates the credits allocated to each traffic class. + * It should be called only after the rules are checked by +- * ixgbe_dcb_check_config(). ++ * ixgbe_dcb_check_config_cee(). + */ +-s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw, ++s32 ixgbe_dcb_calculate_tc_credits_cee(struct ixgbe_hw *hw, + struct ixgbe_dcb_config *dcb_config, +- int max_frame, u8 direction) ++ u32 max_frame_size, u8 direction) + { +- struct tc_bw_alloc *p; +- int min_credit; +- int min_multiplier; +- int min_percent = 100; +- s32 ret_val = 0; ++ struct ixgbe_dcb_tc_path *p; ++ u32 min_multiplier = 0; ++ u16 min_percent = 100; ++ s32 ret_val = IXGBE_SUCCESS; + /* Initialization values default for Tx settings */ +- u32 credit_refill = 0; +- u32 credit_max = 0; +- u16 link_percentage = 0; +- u8 bw_percent = 0; ++ u32 min_credit = 0; ++ u32 credit_refill = 0; ++ u32 credit_max = 0; ++ u16 link_percentage = 0; ++ u8 bw_percent = 0; + u8 i; + + if (dcb_config == NULL) { +- ret_val = DCB_ERR_CONFIG; ++ ret_val = IXGBE_ERR_CONFIG; + goto out; + } + +- min_credit = ((max_frame / 2) + DCB_CREDIT_QUANTUM - 1) / +- DCB_CREDIT_QUANTUM; ++ min_credit = ((max_frame_size / 2) + IXGBE_DCB_CREDIT_QUANTUM - 1) / ++ IXGBE_DCB_CREDIT_QUANTUM; + + /* Find smallest link percentage */ +- for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { ++ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + p = &dcb_config->tc_config[i].path[direction]; + bw_percent = dcb_config->bw_percentage[direction][p->bwg_id]; + link_percentage = p->bwg_percent; +@@ -126,7 +122,7 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw, + min_multiplier = (min_credit / min_percent) + 1; + + /* Find out the link percentage for each TC first */ +- for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { ++ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + p = &dcb_config->tc_config[i].path[direction]; + bw_percent = dcb_config->bw_percentage[direction][p->bwg_id]; + +@@ -141,34 +137,39 @@ s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *hw, + + /* Calculate credit refill ratio using multiplier */ + credit_refill = min(link_percentage * min_multiplier, +- MAX_CREDIT_REFILL); ++ (u32)IXGBE_DCB_MAX_CREDIT_REFILL); ++ ++ /* Refill at least minimum credit */ ++ if (credit_refill < min_credit) ++ credit_refill = min_credit; ++ + p->data_credits_refill = (u16)credit_refill; + + /* Calculate maximum credit for the TC */ +- credit_max = (link_percentage * MAX_CREDIT) / 100; ++ credit_max = (link_percentage * IXGBE_DCB_MAX_CREDIT) / 100; + + /* + * Adjustment based on rule checking, if the percentage + * of a TC is too small, the maximum credit may not be + * enough to send out a jumbo frame in data plane arbitration. + */ +- if (credit_max && (credit_max < min_credit)) ++ if (credit_max < min_credit) + credit_max = min_credit; + +- if (direction == DCB_TX_CONFIG) { ++ if (direction == IXGBE_DCB_TX_CONFIG) { + /* + * Adjustment based on rule checking, if the + * percentage of a TC is too small, the maximum + * credit may not be enough to send out a TSO + * packet in descriptor plane arbitration. + */ +- if ((hw->mac.type == ixgbe_mac_82598EB) && +- credit_max && +- (credit_max < MINIMUM_CREDIT_FOR_TSO)) +- credit_max = MINIMUM_CREDIT_FOR_TSO; ++ if (credit_max && (credit_max < ++ IXGBE_DCB_MIN_TSO_CREDIT) ++ && (hw->mac.type == ixgbe_mac_82598EB)) ++ credit_max = IXGBE_DCB_MIN_TSO_CREDIT; + + dcb_config->tc_config[i].desc_credits_max = +- (u16)credit_max; ++ (u16)credit_max; + } + + p->data_credits_max = (u16)credit_max; +@@ -178,59 +179,74 @@ out: + return ret_val; + } + +-void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en) ++/** ++ * ixgbe_dcb_unpack_pfc_cee - Unpack dcb_config PFC info ++ * @cfg: dcb configuration to unpack into hardware consumable fields ++ * @map: user priority to traffic class map ++ * @pfc_up: u8 to store user priority PFC bitmask ++ * ++ * This unpacks the dcb configuration PFC info which is stored per ++ * traffic class into a 8bit user priority bitmask that can be ++ * consumed by hardware routines. The priority to tc map must be ++ * updated before calling this routine to use current up-to maps. ++ */ ++void ixgbe_dcb_unpack_pfc_cee(struct ixgbe_dcb_config *cfg, u8 *map, u8 *pfc_up) + { +- struct tc_configuration *tc_config = &cfg->tc_config[0]; +- int tc; ++ struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; ++ int up; + +- for (*pfc_en = 0, tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) { +- if (tc_config[tc].dcb_pfc != pfc_disabled) +- *pfc_en |= 1 << tc; ++ /* ++ * If the TC for this user priority has PFC enabled then set the ++ * matching bit in 'pfc_up' to reflect that PFC is enabled. ++ */ ++ for (*pfc_up = 0, up = 0; up < IXGBE_DCB_MAX_USER_PRIORITY; up++) { ++ if (tc_config[map[up]].pfc != ixgbe_dcb_pfc_disabled) ++ *pfc_up |= 1 << up; + } + } + +-void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *cfg, int direction, ++void ixgbe_dcb_unpack_refill_cee(struct ixgbe_dcb_config *cfg, int direction, + u16 *refill) + { +- struct tc_configuration *tc_config = &cfg->tc_config[0]; ++ struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; + int tc; + +- for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) ++ for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) + refill[tc] = tc_config[tc].path[direction].data_credits_refill; + } + +-void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *cfg, u16 *max) ++void ixgbe_dcb_unpack_max_cee(struct ixgbe_dcb_config *cfg, u16 *max) + { +- struct tc_configuration *tc_config = &cfg->tc_config[0]; ++ struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; + int tc; + +- for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) ++ for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) + max[tc] = tc_config[tc].desc_credits_max; + } + +-void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *cfg, int direction, ++void ixgbe_dcb_unpack_bwgid_cee(struct ixgbe_dcb_config *cfg, int direction, + u8 *bwgid) + { +- struct tc_configuration *tc_config = &cfg->tc_config[0]; ++ struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; + int tc; + +- for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) ++ for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) + bwgid[tc] = tc_config[tc].path[direction].bwg_id; + } + +-void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *cfg, int direction, +- u8 *ptype) ++void ixgbe_dcb_unpack_tsa_cee(struct ixgbe_dcb_config *cfg, int direction, ++ u8 *tsa) + { +- struct tc_configuration *tc_config = &cfg->tc_config[0]; ++ struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; + int tc; + +- for (tc = 0; tc < MAX_TRAFFIC_CLASS; tc++) +- ptype[tc] = tc_config[tc].path[direction].prio_type; ++ for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) ++ tsa[tc] = tc_config[tc].path[direction].tsa; + } + + u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *cfg, int direction, u8 up) + { +- struct tc_configuration *tc_config = &cfg->tc_config[0]; ++ struct ixgbe_dcb_tc_config *tc_config = &cfg->tc_config[0]; + u8 prio_mask = 1 << up; + u8 tc = cfg->num_tcs.pg_tcs; + +@@ -251,49 +267,240 @@ out: + return tc; + } + +-void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *cfg, int direction, u8 *map) ++void ixgbe_dcb_unpack_map_cee(struct ixgbe_dcb_config *cfg, int direction, ++ u8 *map) + { + u8 up; + +- for (up = 0; up < MAX_USER_PRIORITY; up++) ++ for (up = 0; up < IXGBE_DCB_MAX_USER_PRIORITY; up++) + map[up] = ixgbe_dcb_get_tc_from_up(cfg, direction, up); + } + + /** +- * ixgbe_dcb_hw_config - Config and enable DCB ++ * ixgbe_dcb_config - Struct containing DCB settings. ++ * @dcb_config: Pointer to DCB config structure ++ * ++ * This function checks DCB rules for DCB settings. ++ * The following rules are checked: ++ * 1. The sum of bandwidth percentages of all Bandwidth Groups must total 100%. ++ * 2. The sum of bandwidth percentages of all Traffic Classes within a Bandwidth ++ * Group must total 100. ++ * 3. A Traffic Class should not be set to both Link Strict Priority ++ * and Group Strict Priority. ++ * 4. Link strict Bandwidth Groups can only have link strict traffic classes ++ * with zero bandwidth. ++ */ ++s32 ixgbe_dcb_check_config_cee(struct ixgbe_dcb_config *dcb_config) ++{ ++ struct ixgbe_dcb_tc_path *p; ++ s32 ret_val = IXGBE_SUCCESS; ++ u8 i, j, bw = 0, bw_id; ++ u8 bw_sum[2][IXGBE_DCB_MAX_BW_GROUP]; ++ bool link_strict[2][IXGBE_DCB_MAX_BW_GROUP]; ++ ++ memset(bw_sum, 0, sizeof(bw_sum)); ++ memset(link_strict, 0, sizeof(link_strict)); ++ ++ /* First Tx, then Rx */ ++ for (i = 0; i < 2; i++) { ++ /* Check each traffic class for rule violation */ ++ for (j = 0; j < IXGBE_DCB_MAX_TRAFFIC_CLASS; j++) { ++ p = &dcb_config->tc_config[j].path[i]; ++ ++ bw = p->bwg_percent; ++ bw_id = p->bwg_id; ++ ++ if (bw_id >= IXGBE_DCB_MAX_BW_GROUP) { ++ ret_val = IXGBE_ERR_CONFIG; ++ goto err_config; ++ } ++ if (p->tsa == ixgbe_dcb_tsa_strict) { ++ link_strict[i][bw_id] = true; ++ /* Link strict should have zero bandwidth */ ++ if (bw) { ++ ret_val = IXGBE_ERR_CONFIG; ++ goto err_config; ++ } ++ } else if (!bw) { ++ /* ++ * Traffic classes without link strict ++ * should have non-zero bandwidth. ++ */ ++ ret_val = IXGBE_ERR_CONFIG; ++ goto err_config; ++ } ++ bw_sum[i][bw_id] += bw; ++ } ++ ++ bw = 0; ++ ++ /* Check each bandwidth group for rule violation */ ++ for (j = 0; j < IXGBE_DCB_MAX_BW_GROUP; j++) { ++ bw += dcb_config->bw_percentage[i][j]; ++ /* ++ * Sum of bandwidth percentages of all traffic classes ++ * within a Bandwidth Group must total 100 except for ++ * link strict group (zero bandwidth). ++ */ ++ if (link_strict[i][j]) { ++ if (bw_sum[i][j]) { ++ /* ++ * Link strict group should have zero ++ * bandwidth. ++ */ ++ ret_val = IXGBE_ERR_CONFIG; ++ goto err_config; ++ } ++ } else if (bw_sum[i][j] != IXGBE_DCB_BW_PERCENT && ++ bw_sum[i][j] != 0) { ++ ret_val = IXGBE_ERR_CONFIG; ++ goto err_config; ++ } ++ } ++ ++ if (bw != IXGBE_DCB_BW_PERCENT) { ++ ret_val = IXGBE_ERR_CONFIG; ++ goto err_config; ++ } ++ } ++ ++err_config: ++ ++ return ret_val; ++} ++ ++/** ++ * ixgbe_dcb_get_tc_stats - Returns status of each traffic class ++ * @hw: pointer to hardware structure ++ * @stats: pointer to statistics structure ++ * @tc_count: Number of elements in bwg_array. ++ * ++ * This function returns the status data for each of the Traffic Classes in use. ++ */ ++s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats, ++ u8 tc_count) ++{ ++ s32 ret = IXGBE_NOT_IMPLEMENTED; ++ switch (hw->mac.type) { ++ case ixgbe_mac_82598EB: ++ ret = ixgbe_dcb_get_tc_stats_82598(hw, stats, tc_count); ++ break; ++ case ixgbe_mac_82599EB: ++ case ixgbe_mac_X540: ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: ++ ret = ixgbe_dcb_get_tc_stats_82599(hw, stats, tc_count); ++ break; ++ default: ++ break; ++ } ++ return ret; ++} ++ ++/** ++ * ixgbe_dcb_get_pfc_stats - Returns CBFC status of each traffic class ++ * @hw: pointer to hardware structure ++ * @stats: pointer to statistics structure ++ * @tc_count: Number of elements in bwg_array. ++ * ++ * This function returns the CBFC status data for each of the Traffic Classes. ++ */ ++s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *hw, struct ixgbe_hw_stats *stats, ++ u8 tc_count) ++{ ++ s32 ret = IXGBE_NOT_IMPLEMENTED; ++ switch (hw->mac.type) { ++ case ixgbe_mac_82598EB: ++ ret = ixgbe_dcb_get_pfc_stats_82598(hw, stats, tc_count); ++ break; ++ case ixgbe_mac_82599EB: ++ case ixgbe_mac_X540: ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: ++ ret = ixgbe_dcb_get_pfc_stats_82599(hw, stats, tc_count); ++ break; ++ default: ++ break; ++ } ++ return ret; ++} ++ ++/** ++ * ixgbe_dcb_config_rx_arbiter_cee - Config Rx arbiter + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * +- * Configure dcb settings and enable dcb mode. ++ * Configure Rx Data Arbiter and credits for each traffic class. + */ +-s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, +- struct ixgbe_dcb_config *dcb_config) ++s32 ixgbe_dcb_config_rx_arbiter_cee(struct ixgbe_hw *hw, ++ struct ixgbe_dcb_config *dcb_config) + { +- s32 ret = 0; +- u8 pfc_en; +- u8 ptype[MAX_TRAFFIC_CLASS]; +- u8 bwgid[MAX_TRAFFIC_CLASS]; +- u8 prio_tc[MAX_TRAFFIC_CLASS]; +- u16 refill[MAX_TRAFFIC_CLASS]; +- u16 max[MAX_TRAFFIC_CLASS]; ++ s32 ret = IXGBE_NOT_IMPLEMENTED; ++ u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS] = { 0 }; ++ u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS] = { 0 }; ++ u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; ++ u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS] = { 0 }; ++ u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS] = { 0 }; ++ ++ ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill); ++ ixgbe_dcb_unpack_max_cee(dcb_config, max); ++ ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid); ++ ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa); ++ ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map); + +- /* Unpack CEE standard containers */ +- ixgbe_dcb_unpack_pfc(dcb_config, &pfc_en); +- ixgbe_dcb_unpack_refill(dcb_config, DCB_TX_CONFIG, refill); +- ixgbe_dcb_unpack_max(dcb_config, max); +- ixgbe_dcb_unpack_bwgid(dcb_config, DCB_TX_CONFIG, bwgid); +- ixgbe_dcb_unpack_prio(dcb_config, DCB_TX_CONFIG, ptype); +- ixgbe_dcb_unpack_map(dcb_config, DCB_TX_CONFIG, prio_tc); ++ switch (hw->mac.type) { ++ case ixgbe_mac_82598EB: ++ ret = ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa); ++ break; ++ case ixgbe_mac_82599EB: ++ case ixgbe_mac_X540: ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: ++ ret = ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwgid, ++ tsa, map); ++ break; ++ default: ++ break; ++ } ++ return ret; ++} ++ ++/** ++ * ixgbe_dcb_config_tx_desc_arbiter_cee - Config Tx Desc arbiter ++ * @hw: pointer to hardware structure ++ * @dcb_config: pointer to ixgbe_dcb_config structure ++ * ++ * Configure Tx Descriptor Arbiter and credits for each traffic class. ++ */ ++s32 ixgbe_dcb_config_tx_desc_arbiter_cee(struct ixgbe_hw *hw, ++ struct ixgbe_dcb_config *dcb_config) ++{ ++ s32 ret = IXGBE_NOT_IMPLEMENTED; ++ u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS]; ++ u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS]; ++ u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS]; ++ u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS]; ++ ++ ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill); ++ ixgbe_dcb_unpack_max_cee(dcb_config, max); ++ ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid); ++ ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa); + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: +- ret = ixgbe_dcb_hw_config_82598(hw, pfc_en, refill, max, +- bwgid, ptype); ++ ret = ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, ++ bwgid, tsa); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: +- ret = ixgbe_dcb_hw_config_82599(hw, pfc_en, refill, max, +- bwgid, ptype, prio_tc); ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: ++ ret = ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, ++ bwgid, tsa); + break; + default: + break; +@@ -301,18 +508,42 @@ s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, + return ret; + } + +-/* Helper routines to abstract HW specifics from DCB netlink ops */ +-s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) ++/** ++ * ixgbe_dcb_config_tx_data_arbiter_cee - Config Tx data arbiter ++ * @hw: pointer to hardware structure ++ * @dcb_config: pointer to ixgbe_dcb_config structure ++ * ++ * Configure Tx Data Arbiter and credits for each traffic class. ++ */ ++s32 ixgbe_dcb_config_tx_data_arbiter_cee(struct ixgbe_hw *hw, ++ struct ixgbe_dcb_config *dcb_config) + { +- int ret = -EINVAL; ++ s32 ret = IXGBE_NOT_IMPLEMENTED; ++ u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS]; ++ u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS]; ++ u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; ++ u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS]; ++ u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS]; ++ ++ ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill); ++ ixgbe_dcb_unpack_max_cee(dcb_config, max); ++ ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid); ++ ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa); ++ ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map); + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: +- ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en); ++ ret = ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, ++ bwgid, tsa); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: +- ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc); ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: ++ ret = ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, ++ bwgid, tsa, ++ map); + break; + default: + break; +@@ -320,85 +551,168 @@ s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) + return ret; + } + +-s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame) ++/** ++ * ixgbe_dcb_config_pfc_cee - Config priority flow control ++ * @hw: pointer to hardware structure ++ * @dcb_config: pointer to ixgbe_dcb_config structure ++ * ++ * Configure Priority Flow Control for each traffic class. ++ */ ++s32 ixgbe_dcb_config_pfc_cee(struct ixgbe_hw *hw, ++ struct ixgbe_dcb_config *dcb_config) + { +- __u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS]; +- __u8 prio_type[IEEE_8021QAZ_MAX_TCS]; +- int i; ++ s32 ret = IXGBE_NOT_IMPLEMENTED; ++ u8 pfc_en; ++ u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; + +- /* naively give each TC a bwg to map onto CEE hardware */ +- __u8 bwg_id[IEEE_8021QAZ_MAX_TCS] = {0, 1, 2, 3, 4, 5, 6, 7}; ++ ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map); ++ ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en); + +- /* Map TSA onto CEE prio type */ +- for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { +- switch (ets->tc_tsa[i]) { +- case IEEE_8021QAZ_TSA_STRICT: +- prio_type[i] = 2; +- break; +- case IEEE_8021QAZ_TSA_ETS: +- prio_type[i] = 0; +- break; +- default: +- /* Hardware only supports priority strict or +- * ETS transmission selection algorithms if +- * we receive some other value from dcbnl +- * throw an error +- */ +- return -EINVAL; +- } ++ switch (hw->mac.type) { ++ case ixgbe_mac_82598EB: ++ ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en); ++ break; ++ case ixgbe_mac_82599EB: ++ case ixgbe_mac_X540: ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: ++ ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en, map); ++ break; ++ default: ++ break; + } ++ return ret; ++} + +- ixgbe_ieee_credits(ets->tc_tx_bw, refill, max, max_frame); +- return ixgbe_dcb_hw_ets_config(hw, refill, max, +- bwg_id, prio_type, ets->prio_tc); ++/** ++ * ixgbe_dcb_config_tc_stats - Config traffic class statistics ++ * @hw: pointer to hardware structure ++ * ++ * Configure queue statistics registers, all queues belonging to same traffic ++ * class uses a single set of queue statistics counters. ++ */ ++s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *hw) ++{ ++ s32 ret = IXGBE_NOT_IMPLEMENTED; ++ switch (hw->mac.type) { ++ case ixgbe_mac_82598EB: ++ ret = ixgbe_dcb_config_tc_stats_82598(hw); ++ break; ++ case ixgbe_mac_82599EB: ++ case ixgbe_mac_X540: ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: ++ ret = ixgbe_dcb_config_tc_stats_82599(hw, NULL); ++ break; ++ default: ++ break; ++ } ++ return ret; + } + +-s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, +- u16 *refill, u16 *max, u8 *bwg_id, +- u8 *prio_type, u8 *prio_tc) ++/** ++ * ixgbe_dcb_hw_config_cee - Config and enable DCB ++ * @hw: pointer to hardware structure ++ * @dcb_config: pointer to ixgbe_dcb_config structure ++ * ++ * Configure dcb settings and enable dcb mode. ++ */ ++s32 ixgbe_dcb_hw_config_cee(struct ixgbe_hw *hw, ++ struct ixgbe_dcb_config *dcb_config) + { ++ s32 ret = IXGBE_NOT_IMPLEMENTED; ++ u8 pfc_en; ++ u8 tsa[IXGBE_DCB_MAX_TRAFFIC_CLASS]; ++ u8 bwgid[IXGBE_DCB_MAX_TRAFFIC_CLASS]; ++ u8 map[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; ++ u16 refill[IXGBE_DCB_MAX_TRAFFIC_CLASS]; ++ u16 max[IXGBE_DCB_MAX_TRAFFIC_CLASS]; ++ ++ /* Unpack CEE standard containers */ ++ ixgbe_dcb_unpack_refill_cee(dcb_config, IXGBE_DCB_TX_CONFIG, refill); ++ ixgbe_dcb_unpack_max_cee(dcb_config, max); ++ ixgbe_dcb_unpack_bwgid_cee(dcb_config, IXGBE_DCB_TX_CONFIG, bwgid); ++ ixgbe_dcb_unpack_tsa_cee(dcb_config, IXGBE_DCB_TX_CONFIG, tsa); ++ ixgbe_dcb_unpack_map_cee(dcb_config, IXGBE_DCB_TX_CONFIG, map); ++ + switch (hw->mac.type) { + case ixgbe_mac_82598EB: +- ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, +- prio_type); +- ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, +- bwg_id, prio_type); +- ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, +- bwg_id, prio_type); ++ ret = ixgbe_dcb_hw_config_82598(hw, dcb_config->link_speed, ++ refill, max, bwgid, tsa); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: +- ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, +- bwg_id, prio_type, prio_tc); +- ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, +- bwg_id, prio_type); +- ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, +- prio_type, prio_tc); ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: ++ ixgbe_dcb_config_82599(hw, dcb_config); ++ ret = ixgbe_dcb_hw_config_82599(hw, dcb_config->link_speed, ++ refill, max, bwgid, ++ tsa, map); ++ ++ ixgbe_dcb_config_tc_stats_82599(hw, dcb_config); + break; + default: + break; + } +- return 0; ++ ++ if (!ret && dcb_config->pfc_mode_enable) { ++ ixgbe_dcb_unpack_pfc_cee(dcb_config, map, &pfc_en); ++ ret = ixgbe_dcb_config_pfc(hw, pfc_en, map); ++ } ++ ++ return ret; + } + +-static void ixgbe_dcb_read_rtrup2tc_82599(struct ixgbe_hw *hw, u8 *map) ++/* Helper routines to abstract HW specifics from DCB netlink ops */ ++s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *hw, u8 pfc_en, u8 *map) + { +- u32 reg, i; ++ int ret = IXGBE_ERR_PARAM; + +- reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); +- for (i = 0; i < MAX_USER_PRIORITY; i++) +- map[i] = IXGBE_RTRUP2TC_UP_MASK & +- (reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT)); ++ switch (hw->mac.type) { ++ case ixgbe_mac_82598EB: ++ ret = ixgbe_dcb_config_pfc_82598(hw, pfc_en); ++ break; ++ case ixgbe_mac_82599EB: ++ case ixgbe_mac_X540: ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: ++ ret = ixgbe_dcb_config_pfc_82599(hw, pfc_en, map); ++ break; ++ default: ++ break; ++ } ++ return ret; + } + +-void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map) ++s32 ixgbe_dcb_hw_config(struct ixgbe_hw *hw, u16 *refill, u16 *max, ++ u8 *bwg_id, u8 *tsa, u8 *map) + { + switch (hw->mac.type) { ++ case ixgbe_mac_82598EB: ++ ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa); ++ ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id, ++ tsa); ++ ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id, ++ tsa); ++ break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: +- ixgbe_dcb_read_rtrup2tc_82599(hw, map); ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: ++ ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, ++ tsa, map); ++ ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id, ++ tsa); ++ ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, ++ tsa, map); + break; + default: + break; + } ++ return 0; + } +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h +index fc0a2dd..d19c3c2 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb.h +@@ -1,7 +1,7 @@ + /******************************************************************************* + +- Intel 10 Gigabit PCI Express Linux driver +- Copyright(c) 1999 - 2013 Intel Corporation. ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, +@@ -12,10 +12,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +@@ -26,146 +22,144 @@ + + *******************************************************************************/ + +-#ifndef _DCB_CONFIG_H_ +-#define _DCB_CONFIG_H_ ++#ifndef _IXGBE_DCB_H_ ++#define _IXGBE_DCB_H_ + +-#include + #include "ixgbe_type.h" + +-/* DCB data structures */ +- +-#define IXGBE_MAX_PACKET_BUFFERS 8 +-#define MAX_USER_PRIORITY 8 +-#define MAX_BW_GROUP 8 +-#define BW_PERCENT 100 +- +-#define DCB_TX_CONFIG 0 +-#define DCB_RX_CONFIG 1 +- +-/* DCB error Codes */ +-#define DCB_SUCCESS 0 +-#define DCB_ERR_CONFIG -1 +-#define DCB_ERR_PARAM -2 +- +-/* Transmit and receive Errors */ +-/* Error in bandwidth group allocation */ +-#define DCB_ERR_BW_GROUP -3 +-/* Error in traffic class bandwidth allocation */ +-#define DCB_ERR_TC_BW -4 +-/* Traffic class has both link strict and group strict enabled */ +-#define DCB_ERR_LS_GS -5 +-/* Link strict traffic class has non zero bandwidth */ +-#define DCB_ERR_LS_BW_NONZERO -6 +-/* Link strict bandwidth group has non zero bandwidth */ +-#define DCB_ERR_LS_BWG_NONZERO -7 +-/* Traffic class has zero bandwidth */ +-#define DCB_ERR_TC_BW_ZERO -8 +- +-#define DCB_NOT_IMPLEMENTED 0x7FFFFFFF +- +-struct dcb_pfc_tc_debug { +- u8 tc; +- u8 pause_status; +- u64 pause_quanta; +-}; ++/* DCB defines */ ++/* DCB credit calculation defines */ ++#define IXGBE_DCB_CREDIT_QUANTUM 64 ++#define IXGBE_DCB_MAX_CREDIT_REFILL 200 /* 200 * 64B = 12800B */ ++#define IXGBE_DCB_MAX_TSO_SIZE (32 * 1024) /* Max TSO pkt size in DCB*/ ++#define IXGBE_DCB_MAX_CREDIT (2 * IXGBE_DCB_MAX_CREDIT_REFILL) + +-enum strict_prio_type { +- prio_none = 0, +- prio_group, +- prio_link +-}; ++/* 513 for 32KB TSO packet */ ++#define IXGBE_DCB_MIN_TSO_CREDIT \ ++ ((IXGBE_DCB_MAX_TSO_SIZE / IXGBE_DCB_CREDIT_QUANTUM) + 1) + +-/* DCB capability definitions */ +-#define IXGBE_DCB_PG_SUPPORT 0x00000001 +-#define IXGBE_DCB_PFC_SUPPORT 0x00000002 +-#define IXGBE_DCB_BCN_SUPPORT 0x00000004 +-#define IXGBE_DCB_UP2TC_SUPPORT 0x00000008 +-#define IXGBE_DCB_GSP_SUPPORT 0x00000010 ++/* DCB configuration defines */ ++#define IXGBE_DCB_MAX_USER_PRIORITY 8 ++#define IXGBE_DCB_MAX_BW_GROUP 8 ++#define IXGBE_DCB_BW_PERCENT 100 + +-#define IXGBE_DCB_8_TC_SUPPORT 0x80 ++#define IXGBE_DCB_TX_CONFIG 0 ++#define IXGBE_DCB_RX_CONFIG 1 + +-struct dcb_support { +- /* DCB capabilities */ +- u32 capabilities; ++/* DCB capability defines */ ++#define IXGBE_DCB_PG_SUPPORT 0x00000001 ++#define IXGBE_DCB_PFC_SUPPORT 0x00000002 ++#define IXGBE_DCB_BCN_SUPPORT 0x00000004 ++#define IXGBE_DCB_UP2TC_SUPPORT 0x00000008 ++#define IXGBE_DCB_GSP_SUPPORT 0x00000010 ++ ++struct ixgbe_dcb_support { ++ u32 capabilities; /* DCB capabilities */ + + /* Each bit represents a number of TCs configurable in the hw. +- * If 8 traffic classes can be configured, the value is 0x80. +- */ +- u8 traffic_classes; +- u8 pfc_traffic_classes; ++ * If 8 traffic classes can be configured, the value is 0x80. */ ++ u8 traffic_classes; ++ u8 pfc_traffic_classes; ++}; ++ ++enum ixgbe_dcb_tsa { ++ ixgbe_dcb_tsa_ets = 0, ++ ixgbe_dcb_tsa_group_strict_cee, ++ ixgbe_dcb_tsa_strict + }; + + /* Traffic class bandwidth allocation per direction */ +-struct tc_bw_alloc { +- u8 bwg_id; /* Bandwidth Group (BWG) ID */ +- u8 bwg_percent; /* % of BWG's bandwidth */ +- u8 link_percent; /* % of link bandwidth */ +- u8 up_to_tc_bitmap; /* User Priority to Traffic Class mapping */ +- u16 data_credits_refill; /* Credit refill amount in 64B granularity */ +- u16 data_credits_max; /* Max credits for a configured packet buffer +- * in 64B granularity.*/ +- enum strict_prio_type prio_type; /* Link or Group Strict Priority */ ++struct ixgbe_dcb_tc_path { ++ u8 bwg_id; /* Bandwidth Group (BWG) ID */ ++ u8 bwg_percent; /* % of BWG's bandwidth */ ++ u8 link_percent; /* % of link bandwidth */ ++ u8 up_to_tc_bitmap; /* User Priority to Traffic Class mapping */ ++ u16 data_credits_refill; /* Credit refill amount in 64B granularity */ ++ u16 data_credits_max; /* Max credits for a configured packet buffer ++ * in 64B granularity.*/ ++ enum ixgbe_dcb_tsa tsa; /* Link or Group Strict Priority */ + }; + +-enum dcb_pfc_type { +- pfc_disabled = 0, +- pfc_enabled_full, +- pfc_enabled_tx, +- pfc_enabled_rx ++enum ixgbe_dcb_pfc { ++ ixgbe_dcb_pfc_disabled = 0, ++ ixgbe_dcb_pfc_enabled, ++ ixgbe_dcb_pfc_enabled_txonly, ++ ixgbe_dcb_pfc_enabled_rxonly + }; + + /* Traffic class configuration */ +-struct tc_configuration { +- struct tc_bw_alloc path[2]; /* One each for Tx/Rx */ +- enum dcb_pfc_type dcb_pfc; /* Class based flow control setting */ ++struct ixgbe_dcb_tc_config { ++ struct ixgbe_dcb_tc_path path[2]; /* One each for Tx/Rx */ ++ enum ixgbe_dcb_pfc pfc; /* Class based flow control setting */ + + u16 desc_credits_max; /* For Tx Descriptor arbitration */ + u8 tc; /* Traffic class (TC) */ + }; + +-struct dcb_num_tcs { ++enum ixgbe_dcb_pba { ++ /* PBA[0-7] each use 64KB FIFO */ ++ ixgbe_dcb_pba_equal = PBA_STRATEGY_EQUAL, ++ /* PBA[0-3] each use 80KB, PBA[4-7] each use 48KB */ ++ ixgbe_dcb_pba_80_48 = PBA_STRATEGY_WEIGHTED ++}; ++ ++struct ixgbe_dcb_num_tcs { + u8 pg_tcs; + u8 pfc_tcs; + }; + + struct ixgbe_dcb_config { +- struct dcb_support support; +- struct dcb_num_tcs num_tcs; +- struct tc_configuration tc_config[MAX_TRAFFIC_CLASS]; +- u8 bw_percentage[2][MAX_BW_GROUP]; /* One each for Tx/Rx */ +- bool pfc_mode_enable; +- +- u32 dcb_cfg_version; /* Not used...OS-specific? */ +- u32 link_speed; /* For bandwidth allocation validation purpose */ ++ struct ixgbe_dcb_tc_config tc_config[IXGBE_DCB_MAX_TRAFFIC_CLASS]; ++ struct ixgbe_dcb_support support; ++ struct ixgbe_dcb_num_tcs num_tcs; ++ u8 bw_percentage[2][IXGBE_DCB_MAX_BW_GROUP]; /* One each for Tx/Rx */ ++ bool pfc_mode_enable; ++ bool round_robin_enable; ++ ++ enum ixgbe_dcb_pba rx_pba_cfg; ++ ++ u32 dcb_cfg_version; /* Not used...OS-specific? */ ++ u32 link_speed; /* For bandwidth allocation validation purpose */ ++ bool vt_mode; + }; + + /* DCB driver APIs */ +-void ixgbe_dcb_unpack_pfc(struct ixgbe_dcb_config *cfg, u8 *pfc_en); +-void ixgbe_dcb_unpack_refill(struct ixgbe_dcb_config *, int, u16 *); +-void ixgbe_dcb_unpack_max(struct ixgbe_dcb_config *, u16 *); +-void ixgbe_dcb_unpack_bwgid(struct ixgbe_dcb_config *, int, u8 *); +-void ixgbe_dcb_unpack_prio(struct ixgbe_dcb_config *, int, u8 *); +-void ixgbe_dcb_unpack_map(struct ixgbe_dcb_config *, int, u8 *); +-u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *, int, u8); ++ ++/* DCB rule checking */ ++s32 ixgbe_dcb_check_config_cee(struct ixgbe_dcb_config *); + + /* DCB credits calculation */ +-s32 ixgbe_dcb_calculate_tc_credits(struct ixgbe_hw *, +- struct ixgbe_dcb_config *, int, u8); +- +-/* DCB hw initialization */ +-s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max); +-s32 ixgbe_dcb_hw_ets_config(struct ixgbe_hw *hw, u16 *refill, u16 *max, +- u8 *bwg_id, u8 *prio_type, u8 *tc_prio); +-s32 ixgbe_dcb_hw_pfc_config(struct ixgbe_hw *hw, u8 pfc_en, u8 *tc_prio); +-s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, struct ixgbe_dcb_config *); +- +-void ixgbe_dcb_read_rtrup2tc(struct ixgbe_hw *hw, u8 *map); +- +-/* DCB definitions for credit calculation */ +-#define DCB_CREDIT_QUANTUM 64 /* DCB Quantum */ +-#define MAX_CREDIT_REFILL 511 /* 0x1FF * 64B = 32704B */ +-#define DCB_MAX_TSO_SIZE (32*1024) /* MAX TSO packet size supported in DCB mode */ +-#define MINIMUM_CREDIT_FOR_TSO (DCB_MAX_TSO_SIZE/64 + 1) /* 513 for 32KB TSO packet */ +-#define MAX_CREDIT 4095 /* Maximum credit supported: 256KB * 1204 / 64B */ +- +-#endif /* _DCB_CONFIG_H */ ++s32 ixgbe_dcb_calculate_tc_credits(u8 *, u16 *, u16 *, int); ++s32 ixgbe_dcb_calculate_tc_credits_cee(struct ixgbe_hw *, ++ struct ixgbe_dcb_config *, u32, u8); ++ ++/* DCB PFC */ ++s32 ixgbe_dcb_config_pfc(struct ixgbe_hw *, u8, u8 *); ++s32 ixgbe_dcb_config_pfc_cee(struct ixgbe_hw *, struct ixgbe_dcb_config *); ++ ++/* DCB stats */ ++s32 ixgbe_dcb_config_tc_stats(struct ixgbe_hw *); ++s32 ixgbe_dcb_get_tc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8); ++s32 ixgbe_dcb_get_pfc_stats(struct ixgbe_hw *, struct ixgbe_hw_stats *, u8); ++ ++/* DCB config arbiters */ ++s32 ixgbe_dcb_config_tx_desc_arbiter_cee(struct ixgbe_hw *, ++ struct ixgbe_dcb_config *); ++s32 ixgbe_dcb_config_tx_data_arbiter_cee(struct ixgbe_hw *, ++ struct ixgbe_dcb_config *); ++s32 ixgbe_dcb_config_rx_arbiter_cee(struct ixgbe_hw *, ++ struct ixgbe_dcb_config *); ++ ++/* DCB unpack routines */ ++void ixgbe_dcb_unpack_pfc_cee(struct ixgbe_dcb_config *, u8 *, u8 *); ++void ixgbe_dcb_unpack_refill_cee(struct ixgbe_dcb_config *, int, u16 *); ++void ixgbe_dcb_unpack_max_cee(struct ixgbe_dcb_config *, u16 *); ++void ixgbe_dcb_unpack_bwgid_cee(struct ixgbe_dcb_config *, int, u8 *); ++void ixgbe_dcb_unpack_tsa_cee(struct ixgbe_dcb_config *, int, u8 *); ++void ixgbe_dcb_unpack_map_cee(struct ixgbe_dcb_config *, int, u8 *); ++u8 ixgbe_dcb_get_tc_from_up(struct ixgbe_dcb_config *, int, u8); ++ ++/* DCB initialization */ ++s32 ixgbe_dcb_hw_config(struct ixgbe_hw *, u16 *, u16 *, u8 *, u8 *, u8 *); ++s32 ixgbe_dcb_hw_config_cee(struct ixgbe_hw *, struct ixgbe_dcb_config *); ++#endif /* _IXGBE_DCB_H_ */ +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c +index d3ba63f..b1d8df9 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.c +@@ -1,7 +1,7 @@ + /******************************************************************************* + +- Intel 10 Gigabit PCI Express Linux driver +- Copyright(c) 1999 - 2013 Intel Corporation. ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, +@@ -12,10 +12,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +@@ -26,27 +22,96 @@ + + *******************************************************************************/ + +-#include "ixgbe.h" ++ + #include "ixgbe_type.h" + #include "ixgbe_dcb.h" + #include "ixgbe_dcb_82598.h" + + /** ++ * ixgbe_dcb_get_tc_stats_82598 - Return status data for each traffic class ++ * @hw: pointer to hardware structure ++ * @stats: pointer to statistics structure ++ * @tc_count: Number of elements in bwg_array. ++ * ++ * This function returns the status data for each of the Traffic Classes in use. ++ */ ++s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *hw, ++ struct ixgbe_hw_stats *stats, ++ u8 tc_count) ++{ ++ int tc; ++ ++ DEBUGFUNC("dcb_get_tc_stats"); ++ ++ if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS) ++ return IXGBE_ERR_PARAM; ++ ++ /* Statistics pertaining to each traffic class */ ++ for (tc = 0; tc < tc_count; tc++) { ++ /* Transmitted Packets */ ++ stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc)); ++ /* Transmitted Bytes */ ++ stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC(tc)); ++ /* Received Packets */ ++ stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc)); ++ /* Received Bytes */ ++ stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC(tc)); ++ ++#if 0 ++ /* Can we get rid of these?? Consequently, getting rid ++ * of the tc_stats structure. ++ */ ++ tc_stats_array[up]->in_overflow_discards = 0; ++ tc_stats_array[up]->out_overflow_discards = 0; ++#endif ++ } ++ ++ return IXGBE_SUCCESS; ++} ++ ++/** ++ * ixgbe_dcb_get_pfc_stats_82598 - Returns CBFC status data ++ * @hw: pointer to hardware structure ++ * @stats: pointer to statistics structure ++ * @tc_count: Number of elements in bwg_array. ++ * ++ * This function returns the CBFC status data for each of the Traffic Classes. ++ */ ++s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *hw, ++ struct ixgbe_hw_stats *stats, ++ u8 tc_count) ++{ ++ int tc; ++ ++ DEBUGFUNC("dcb_get_pfc_stats"); ++ ++ if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS) ++ return IXGBE_ERR_PARAM; ++ ++ for (tc = 0; tc < tc_count; tc++) { ++ /* Priority XOFF Transmitted */ ++ stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc)); ++ /* Priority XOFF Received */ ++ stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(tc)); ++ } ++ ++ return IXGBE_SUCCESS; ++} ++ ++/** + * ixgbe_dcb_config_rx_arbiter_82598 - Config Rx data arbiter + * @hw: pointer to hardware structure + * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure Rx Data Arbiter and credits for each traffic class. + */ +-s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, +- u16 *refill, +- u16 *max, +- u8 *prio_type) ++s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, u16 *refill, ++ u16 *max, u8 *tsa) + { +- u32 reg = 0; +- u32 credit_refill = 0; +- u32 credit_max = 0; +- u8 i = 0; ++ u32 reg = 0; ++ u32 credit_refill = 0; ++ u32 credit_max = 0; ++ u8 i = 0; + + reg = IXGBE_READ_REG(hw, IXGBE_RUPPBMR) | IXGBE_RUPPBMR_MQA; + IXGBE_WRITE_REG(hw, IXGBE_RUPPBMR, reg); +@@ -62,13 +127,13 @@ s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, + IXGBE_WRITE_REG(hw, IXGBE_RMCS, reg); + + /* Configure traffic class credits and priority */ +- for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { ++ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + credit_refill = refill[i]; +- credit_max = max[i]; ++ credit_max = max[i]; + + reg = credit_refill | (credit_max << IXGBE_RT2CR_MCL_SHIFT); + +- if (prio_type[i] == prio_link) ++ if (tsa[i] == ixgbe_dcb_tsa_strict) + reg |= IXGBE_RT2CR_LSP; + + IXGBE_WRITE_REG(hw, IXGBE_RT2CR(i), reg); +@@ -85,7 +150,7 @@ s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, + reg &= ~IXGBE_RXCTRL_DMBYPS; + IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg); + +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +@@ -96,13 +161,11 @@ s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, + * Configure Tx Descriptor Arbiter and credits for each traffic class. + */ + s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, +- u16 *refill, +- u16 *max, +- u8 *bwg_id, +- u8 *prio_type) ++ u16 *refill, u16 *max, u8 *bwg_id, ++ u8 *tsa) + { +- u32 reg, max_credits; +- u8 i; ++ u32 reg, max_credits; ++ u8 i; + + reg = IXGBE_READ_REG(hw, IXGBE_DPMCS); + +@@ -116,22 +179,22 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, + IXGBE_WRITE_REG(hw, IXGBE_DPMCS, reg); + + /* Configure traffic class credits and priority */ +- for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { ++ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + max_credits = max[i]; + reg = max_credits << IXGBE_TDTQ2TCCR_MCL_SHIFT; + reg |= refill[i]; + reg |= (u32)(bwg_id[i]) << IXGBE_TDTQ2TCCR_BWG_SHIFT; + +- if (prio_type[i] == prio_group) ++ if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee) + reg |= IXGBE_TDTQ2TCCR_GSP; + +- if (prio_type[i] == prio_link) ++ if (tsa[i] == ixgbe_dcb_tsa_strict) + reg |= IXGBE_TDTQ2TCCR_LSP; + + IXGBE_WRITE_REG(hw, IXGBE_TDTQ2TCCR(i), reg); + } + +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +@@ -142,10 +205,8 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, + * Configure Tx Data Arbiter and credits for each traffic class. + */ + s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, +- u16 *refill, +- u16 *max, +- u8 *bwg_id, +- u8 *prio_type) ++ u16 *refill, u16 *max, u8 *bwg_id, ++ u8 *tsa) + { + u32 reg; + u8 i; +@@ -159,15 +220,15 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, + IXGBE_WRITE_REG(hw, IXGBE_PDPMCS, reg); + + /* Configure traffic class credits and priority */ +- for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { ++ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + reg = refill[i]; + reg |= (u32)(max[i]) << IXGBE_TDPT2TCCR_MCL_SHIFT; + reg |= (u32)(bwg_id[i]) << IXGBE_TDPT2TCCR_BWG_SHIFT; + +- if (prio_type[i] == prio_group) ++ if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee) + reg |= IXGBE_TDPT2TCCR_GSP; + +- if (prio_type[i] == prio_link) ++ if (tsa[i] == ixgbe_dcb_tsa_strict) + reg |= IXGBE_TDPT2TCCR_LSP; + + IXGBE_WRITE_REG(hw, IXGBE_TDPT2TCCR(i), reg); +@@ -178,7 +239,7 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, + reg |= IXGBE_DTXCTL_ENDBUBD; + IXGBE_WRITE_REG(hw, IXGBE_DTXCTL, reg); + +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +@@ -191,7 +252,7 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, + s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en) + { + u32 fcrtl, reg; +- u8 i; ++ u8 i; + + /* Enable Transmit Priority Flow Control */ + reg = IXGBE_READ_REG(hw, IXGBE_RMCS); +@@ -209,7 +270,7 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en) + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg); + + /* Configure PFC Tx thresholds per TC */ +- for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { ++ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + if (!(pfc_en & (1 << i))) { + IXGBE_WRITE_REG(hw, IXGBE_FCRTL(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_FCRTH(i), 0); +@@ -223,15 +284,14 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en) + } + + /* Configure pause time */ +- reg = hw->fc.pause_time * 0x00010001; +- for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) ++ reg = hw->fc.pause_time | (hw->fc.pause_time << 16); ++ for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); + + /* Configure flow control refresh threshold value */ + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); + +- +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +@@ -241,11 +301,11 @@ s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *hw, u8 pfc_en) + * Configure queue statistics registers, all queues belonging to same traffic + * class uses a single set of queue statistics counters. + */ +-static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw) ++s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw) + { + u32 reg = 0; +- u8 i = 0; +- u8 j = 0; ++ u8 i = 0; ++ u8 j = 0; + + /* Receive Queues stats setting - 8 queues per statistics reg */ + for (i = 0, j = 0; i < 15 && j < 8; i = i + 2, j++) { +@@ -256,14 +316,14 @@ static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw) + reg |= ((0x1010101) * j); + IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i + 1), reg); + } +- /* Transmit Queues stats setting - 4 queues per statistics reg */ ++ /* Transmit Queues stats setting - 4 queues per statistics reg*/ + for (i = 0; i < 8; i++) { + reg = IXGBE_READ_REG(hw, IXGBE_TQSMR(i)); + reg |= ((0x1010101) * i); + IXGBE_WRITE_REG(hw, IXGBE_TQSMR(i), reg); + } + +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +@@ -273,16 +333,18 @@ static s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *hw) + * + * Configure dcb settings and enable dcb mode. + */ +-s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill, +- u16 *max, u8 *bwg_id, u8 *prio_type) ++s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, int link_speed, ++ u16 *refill, u16 *max, u8 *bwg_id, ++ u8 *tsa) + { +- ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, prio_type); +- ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, +- bwg_id, prio_type); +- ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, +- bwg_id, prio_type); +- ixgbe_dcb_config_pfc_82598(hw, pfc_en); ++ UNREFERENCED_1PARAMETER(link_speed); ++ ++ ixgbe_dcb_config_rx_arbiter_82598(hw, refill, max, tsa); ++ ixgbe_dcb_config_tx_desc_arbiter_82598(hw, refill, max, bwg_id, ++ tsa); ++ ixgbe_dcb_config_tx_data_arbiter_82598(hw, refill, max, bwg_id, ++ tsa); + ixgbe_dcb_config_tc_stats_82598(hw); + +- return 0; ++ return IXGBE_SUCCESS; + } +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h +index 3164f54..d340a69 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82598.h +@@ -1,7 +1,7 @@ + /******************************************************************************* + +- Intel 10 Gigabit PCI Express Linux driver +- Copyright(c) 1999 - 2013 Intel Corporation. ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, +@@ -12,10 +12,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +@@ -26,72 +22,69 @@ + + *******************************************************************************/ + +-#ifndef _DCB_82598_CONFIG_H_ +-#define _DCB_82598_CONFIG_H_ ++#ifndef _IXGBE_DCB_82598_H_ ++#define _IXGBE_DCB_82598_H_ + + /* DCB register definitions */ + +-#define IXGBE_DPMCS_MTSOS_SHIFT 16 +-#define IXGBE_DPMCS_TDPAC 0x00000001 /* 0 Round Robin, 1 DFP - Deficit Fixed Priority */ +-#define IXGBE_DPMCS_TRM 0x00000010 /* Transmit Recycle Mode */ +-#define IXGBE_DPMCS_ARBDIS 0x00000040 /* DCB arbiter disable */ +-#define IXGBE_DPMCS_TSOEF 0x00080000 /* TSO Expand Factor: 0=x4, 1=x2 */ +- +-#define IXGBE_RUPPBMR_MQA 0x80000000 /* Enable UP to queue mapping */ +- +-#define IXGBE_RT2CR_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */ +-#define IXGBE_RT2CR_LSP 0x80000000 /* LSP enable bit */ +- +-#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet buffers enable */ +-#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores (RSS) enable */ +- +-#define IXGBE_TDTQ2TCCR_MCL_SHIFT 12 +-#define IXGBE_TDTQ2TCCR_BWG_SHIFT 9 +-#define IXGBE_TDTQ2TCCR_GSP 0x40000000 +-#define IXGBE_TDTQ2TCCR_LSP 0x80000000 +- +-#define IXGBE_TDPT2TCCR_MCL_SHIFT 12 +-#define IXGBE_TDPT2TCCR_BWG_SHIFT 9 +-#define IXGBE_TDPT2TCCR_GSP 0x40000000 +-#define IXGBE_TDPT2TCCR_LSP 0x80000000 +- +-#define IXGBE_PDPMCS_TPPAC 0x00000020 /* 0 Round Robin, 1 for DFP - Deficit Fixed Priority */ +-#define IXGBE_PDPMCS_ARBDIS 0x00000040 /* Arbiter disable */ +-#define IXGBE_PDPMCS_TRM 0x00000100 /* Transmit Recycle Mode enable */ +- +-#define IXGBE_DTXCTL_ENDBUBD 0x00000004 /* Enable DBU buffer division */ +- +-#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */ +-#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */ +-#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */ +-#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */ +- +-#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 +- +-/* DCB hardware-specific driver APIs */ +- +-/* DCB PFC functions */ +-s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8 pfc_en); +- +-/* DCB hw initialization */ +-s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *hw, +- u16 *refill, +- u16 *max, +- u8 *prio_type); +- +-s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *hw, +- u16 *refill, +- u16 *max, +- u8 *bwg_id, +- u8 *prio_type); +- +-s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *hw, +- u16 *refill, +- u16 *max, +- u8 *bwg_id, +- u8 *prio_type); +- +-s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill, +- u16 *max, u8 *bwg_id, u8 *prio_type); +- +-#endif /* _DCB_82598_CONFIG_H */ ++#define IXGBE_DPMCS_MTSOS_SHIFT 16 ++#define IXGBE_DPMCS_TDPAC 0x00000001 /* 0 Round Robin, ++ * 1 DFP - Deficit Fixed Priority */ ++#define IXGBE_DPMCS_TRM 0x00000010 /* Transmit Recycle Mode */ ++#define IXGBE_DPMCS_ARBDIS 0x00000040 /* DCB arbiter disable */ ++#define IXGBE_DPMCS_TSOEF 0x00080000 /* TSO Expand Factor: 0=x4, 1=x2 */ ++ ++#define IXGBE_RUPPBMR_MQA 0x80000000 /* Enable UP to queue mapping */ ++ ++#define IXGBE_RT2CR_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */ ++#define IXGBE_RT2CR_LSP 0x80000000 /* LSP enable bit */ ++ ++#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet ++ * buffers enable */ ++#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores ++ * (RSS) enable */ ++ ++#define IXGBE_TDTQ2TCCR_MCL_SHIFT 12 ++#define IXGBE_TDTQ2TCCR_BWG_SHIFT 9 ++#define IXGBE_TDTQ2TCCR_GSP 0x40000000 ++#define IXGBE_TDTQ2TCCR_LSP 0x80000000 ++ ++#define IXGBE_TDPT2TCCR_MCL_SHIFT 12 ++#define IXGBE_TDPT2TCCR_BWG_SHIFT 9 ++#define IXGBE_TDPT2TCCR_GSP 0x40000000 ++#define IXGBE_TDPT2TCCR_LSP 0x80000000 ++ ++#define IXGBE_PDPMCS_TPPAC 0x00000020 /* 0 Round Robin, ++ * 1 DFP - Deficit Fixed Priority */ ++#define IXGBE_PDPMCS_ARBDIS 0x00000040 /* Arbiter disable */ ++#define IXGBE_PDPMCS_TRM 0x00000100 /* Transmit Recycle Mode enable */ ++ ++#define IXGBE_DTXCTL_ENDBUBD 0x00000004 /* Enable DBU buffer division */ ++ ++#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */ ++#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */ ++#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */ ++#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */ ++ ++/* DCB driver APIs */ ++ ++/* DCB PFC */ ++s32 ixgbe_dcb_config_pfc_82598(struct ixgbe_hw *, u8); ++ ++/* DCB stats */ ++s32 ixgbe_dcb_config_tc_stats_82598(struct ixgbe_hw *); ++s32 ixgbe_dcb_get_tc_stats_82598(struct ixgbe_hw *, ++ struct ixgbe_hw_stats *, u8); ++s32 ixgbe_dcb_get_pfc_stats_82598(struct ixgbe_hw *, ++ struct ixgbe_hw_stats *, u8); ++ ++/* DCB config arbiters */ ++s32 ixgbe_dcb_config_tx_desc_arbiter_82598(struct ixgbe_hw *, u16 *, u16 *, ++ u8 *, u8 *); ++s32 ixgbe_dcb_config_tx_data_arbiter_82598(struct ixgbe_hw *, u16 *, u16 *, ++ u8 *, u8 *); ++s32 ixgbe_dcb_config_rx_arbiter_82598(struct ixgbe_hw *, u16 *, u16 *, u8 *); ++ ++/* DCB initialization */ ++s32 ixgbe_dcb_hw_config_82598(struct ixgbe_hw *, int, u16 *, u16 *, u8 *, u8 *); ++#endif /* _IXGBE_DCB_82958_H_ */ +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c +index 3b932fe..b0c5e52 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.c +@@ -1,7 +1,7 @@ + /******************************************************************************* + +- Intel 10 Gigabit PCI Express Linux driver +- Copyright(c) 1999 - 2013 Intel Corporation. ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, +@@ -12,10 +12,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +@@ -26,32 +22,96 @@ + + *******************************************************************************/ + +-#include "ixgbe.h" ++ + #include "ixgbe_type.h" + #include "ixgbe_dcb.h" + #include "ixgbe_dcb_82599.h" + + /** ++ * ixgbe_dcb_get_tc_stats_82599 - Returns status for each traffic class ++ * @hw: pointer to hardware structure ++ * @stats: pointer to statistics structure ++ * @tc_count: Number of elements in bwg_array. ++ * ++ * This function returns the status data for each of the Traffic Classes in use. ++ */ ++s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *hw, ++ struct ixgbe_hw_stats *stats, ++ u8 tc_count) ++{ ++ int tc; ++ ++ DEBUGFUNC("dcb_get_tc_stats"); ++ ++ if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS) ++ return IXGBE_ERR_PARAM; ++ ++ /* Statistics pertaining to each traffic class */ ++ for (tc = 0; tc < tc_count; tc++) { ++ /* Transmitted Packets */ ++ stats->qptc[tc] += IXGBE_READ_REG(hw, IXGBE_QPTC(tc)); ++ /* Transmitted Bytes (read low first to prevent missed carry) */ ++ stats->qbtc[tc] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(tc)); ++ stats->qbtc[tc] += ++ (((u64)(IXGBE_READ_REG(hw, IXGBE_QBTC_H(tc)))) << 32); ++ /* Received Packets */ ++ stats->qprc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRC(tc)); ++ /* Received Bytes (read low first to prevent missed carry) */ ++ stats->qbrc[tc] += IXGBE_READ_REG(hw, IXGBE_QBRC_L(tc)); ++ stats->qbrc[tc] += ++ (((u64)(IXGBE_READ_REG(hw, IXGBE_QBRC_H(tc)))) << 32); ++ ++ /* Received Dropped Packet */ ++ stats->qprdc[tc] += IXGBE_READ_REG(hw, IXGBE_QPRDC(tc)); ++ } ++ ++ return IXGBE_SUCCESS; ++} ++ ++/** ++ * ixgbe_dcb_get_pfc_stats_82599 - Return CBFC status data ++ * @hw: pointer to hardware structure ++ * @stats: pointer to statistics structure ++ * @tc_count: Number of elements in bwg_array. ++ * ++ * This function returns the CBFC status data for each of the Traffic Classes. ++ */ ++s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *hw, ++ struct ixgbe_hw_stats *stats, ++ u8 tc_count) ++{ ++ int tc; ++ ++ DEBUGFUNC("dcb_get_pfc_stats"); ++ ++ if (tc_count > IXGBE_DCB_MAX_TRAFFIC_CLASS) ++ return IXGBE_ERR_PARAM; ++ ++ for (tc = 0; tc < tc_count; tc++) { ++ /* Priority XOFF Transmitted */ ++ stats->pxofftxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFTXC(tc)); ++ /* Priority XOFF Received */ ++ stats->pxoffrxc[tc] += IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(tc)); ++ } ++ ++ return IXGBE_SUCCESS; ++} ++ ++/** + * ixgbe_dcb_config_rx_arbiter_82599 - Config Rx Data arbiter + * @hw: pointer to hardware structure +- * @refill: refill credits index by traffic class +- * @max: max credits index by traffic class +- * @bwg_id: bandwidth grouping indexed by traffic class +- * @prio_type: priority type indexed by traffic class ++ * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure Rx Packet Arbiter and credits for each traffic class. + */ +-s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, +- u16 *refill, +- u16 *max, +- u8 *bwg_id, +- u8 *prio_type, +- u8 *prio_tc) ++s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, u16 *refill, ++ u16 *max, u8 *bwg_id, u8 *tsa, ++ u8 *map) + { +- u32 reg = 0; +- u32 credit_refill = 0; +- u32 credit_max = 0; +- u8 i = 0; ++ u32 reg = 0; ++ u32 credit_refill = 0; ++ u32 credit_max = 0; ++ u8 i = 0; + + /* + * Disable the arbiter before changing parameters +@@ -60,21 +120,27 @@ s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, + reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC | IXGBE_RTRPCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg); + +- /* Map all traffic classes to their UP */ ++ /* ++ * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding ++ * bits sets for the UPs that needs to be mappped to that TC. ++ * e.g if priorities 6 and 7 are to be mapped to a TC then the ++ * up_to_tc_bitmap value for that TC will be 11000000 in binary. ++ */ + reg = 0; +- for (i = 0; i < MAX_USER_PRIORITY; i++) +- reg |= (prio_tc[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT)); ++ for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) ++ reg |= (map[i] << (i * IXGBE_RTRUP2TC_UP_SHIFT)); ++ + IXGBE_WRITE_REG(hw, IXGBE_RTRUP2TC, reg); + + /* Configure traffic class credits and priority */ +- for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { ++ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + credit_refill = refill[i]; +- credit_max = max[i]; ++ credit_max = max[i]; + reg = credit_refill | (credit_max << IXGBE_RTRPT4C_MCL_SHIFT); + + reg |= (u32)(bwg_id[i]) << IXGBE_RTRPT4C_BWG_SHIFT; + +- if (prio_type[i] == prio_link) ++ if (tsa[i] == ixgbe_dcb_tsa_strict) + reg |= IXGBE_RTRPT4C_LSP; + + IXGBE_WRITE_REG(hw, IXGBE_RTRPT4C(i), reg); +@@ -87,27 +153,21 @@ s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, + reg = IXGBE_RTRPCS_RRM | IXGBE_RTRPCS_RAC; + IXGBE_WRITE_REG(hw, IXGBE_RTRPCS, reg); + +- return 0; ++ return IXGBE_SUCCESS; + } + + /** + * ixgbe_dcb_config_tx_desc_arbiter_82599 - Config Tx Desc. arbiter + * @hw: pointer to hardware structure +- * @refill: refill credits index by traffic class +- * @max: max credits index by traffic class +- * @bwg_id: bandwidth grouping indexed by traffic class +- * @prio_type: priority type indexed by traffic class ++ * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure Tx Descriptor Arbiter and credits for each traffic class. + */ +-s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, +- u16 *refill, +- u16 *max, +- u8 *bwg_id, +- u8 *prio_type) ++s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, u16 *refill, ++ u16 *max, u8 *bwg_id, u8 *tsa) + { +- u32 reg, max_credits; +- u8 i; ++ u32 reg, max_credits; ++ u8 i; + + /* Clear the per-Tx queue credits; we use per-TC instead */ + for (i = 0; i < 128; i++) { +@@ -116,16 +176,16 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, + } + + /* Configure traffic class credits and priority */ +- for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { ++ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + max_credits = max[i]; + reg = max_credits << IXGBE_RTTDT2C_MCL_SHIFT; + reg |= refill[i]; + reg |= (u32)(bwg_id[i]) << IXGBE_RTTDT2C_BWG_SHIFT; + +- if (prio_type[i] == prio_group) ++ if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee) + reg |= IXGBE_RTTDT2C_GSP; + +- if (prio_type[i] == prio_link) ++ if (tsa[i] == ixgbe_dcb_tsa_strict) + reg |= IXGBE_RTTDT2C_LSP; + + IXGBE_WRITE_REG(hw, IXGBE_RTTDT2C(i), reg); +@@ -138,25 +198,19 @@ s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, + reg = IXGBE_RTTDCS_TDPAC | IXGBE_RTTDCS_TDRM; + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); + +- return 0; ++ return IXGBE_SUCCESS; + } + + /** + * ixgbe_dcb_config_tx_data_arbiter_82599 - Config Tx Data arbiter + * @hw: pointer to hardware structure +- * @refill: refill credits index by traffic class +- * @max: max credits index by traffic class +- * @bwg_id: bandwidth grouping indexed by traffic class +- * @prio_type: priority type indexed by traffic class ++ * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure Tx Packet Arbiter and credits for each traffic class. + */ +-s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, +- u16 *refill, +- u16 *max, +- u8 *bwg_id, +- u8 *prio_type, +- u8 *prio_tc) ++s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, u16 *refill, ++ u16 *max, u8 *bwg_id, u8 *tsa, ++ u8 *map) + { + u32 reg; + u8 i; +@@ -170,22 +224,28 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, + IXGBE_RTTPCS_ARBDIS; + IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg); + +- /* Map all traffic classes to their UP */ ++ /* ++ * map all UPs to TCs. up_to_tc_bitmap for each TC has corresponding ++ * bits sets for the UPs that needs to be mappped to that TC. ++ * e.g if priorities 6 and 7 are to be mapped to a TC then the ++ * up_to_tc_bitmap value for that TC will be 11000000 in binary. ++ */ + reg = 0; +- for (i = 0; i < MAX_USER_PRIORITY; i++) +- reg |= (prio_tc[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT)); ++ for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) ++ reg |= (map[i] << (i * IXGBE_RTTUP2TC_UP_SHIFT)); ++ + IXGBE_WRITE_REG(hw, IXGBE_RTTUP2TC, reg); + + /* Configure traffic class credits and priority */ +- for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { ++ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + reg = refill[i]; + reg |= (u32)(max[i]) << IXGBE_RTTPT2C_MCL_SHIFT; + reg |= (u32)(bwg_id[i]) << IXGBE_RTTPT2C_BWG_SHIFT; + +- if (prio_type[i] == prio_group) ++ if (tsa[i] == ixgbe_dcb_tsa_group_strict_cee) + reg |= IXGBE_RTTPT2C_GSP; + +- if (prio_type[i] == prio_link) ++ if (tsa[i] == ixgbe_dcb_tsa_strict) + reg |= IXGBE_RTTPT2C_LSP; + + IXGBE_WRITE_REG(hw, IXGBE_RTTPT2C(i), reg); +@@ -199,18 +259,18 @@ s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, + (IXGBE_RTTPCS_ARBD_DCB << IXGBE_RTTPCS_ARBD_SHIFT); + IXGBE_WRITE_REG(hw, IXGBE_RTTPCS, reg); + +- return 0; ++ return IXGBE_SUCCESS; + } + + /** + * ixgbe_dcb_config_pfc_82599 - Configure priority flow control + * @hw: pointer to hardware structure + * @pfc_en: enabled pfc bitmask +- * @prio_tc: priority to tc assignments indexed by priority ++ * @map: priority to tc assignments indexed by priority + * + * Configure Priority Flow Control (PFC) for each traffic class. + */ +-s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) ++s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *map) + { + u32 i, j, fcrtl, reg; + u8 max_tc = 0; +@@ -229,7 +289,7 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) + */ + reg &= ~(IXGBE_MFLCN_RPFCE_MASK | IXGBE_MFLCN_RFCE); + +- if (hw->mac.type == ixgbe_mac_X540) ++ if (hw->mac.type >= ixgbe_mac_X540) + reg |= pfc_en << IXGBE_MFLCN_RPFCE_SHIFT; + + if (pfc_en) +@@ -237,9 +297,9 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) + + IXGBE_WRITE_REG(hw, IXGBE_MFLCN, reg); + +- for (i = 0; i < MAX_USER_PRIORITY; i++) { +- if (prio_tc[i] > max_tc) +- max_tc = prio_tc[i]; ++ for (i = 0; i < IXGBE_DCB_MAX_USER_PRIORITY; i++) { ++ if (map[i] > max_tc) ++ max_tc = map[i]; + } + + +@@ -247,8 +307,8 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) + for (i = 0; i <= max_tc; i++) { + int enabled = 0; + +- for (j = 0; j < MAX_USER_PRIORITY; j++) { +- if ((prio_tc[j] == i) && (pfc_en & (1 << j))) { ++ for (j = 0; j < IXGBE_DCB_MAX_USER_PRIORITY; j++) { ++ if ((map[j] == i) && (pfc_en & (1 << j))) { + enabled = 1; + break; + } +@@ -259,27 +319,34 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) + fcrtl = (hw->fc.low_water[i] << 10) | IXGBE_FCRTL_XONE; + IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), fcrtl); + } else { +- reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 32; ++ /* ++ * In order to prevent Tx hangs when the internal Tx ++ * switch is enabled we must set the high water mark ++ * to the Rx packet buffer size - 24KB. This allows ++ * the Tx switch to function even under heavy Rx ++ * workloads. ++ */ ++ reg = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)) - 24576; + IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); + } + + IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), reg); + } + +- for (; i < MAX_TRAFFIC_CLASS; i++) { ++ for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + IXGBE_WRITE_REG(hw, IXGBE_FCRTL_82599(i), 0); + IXGBE_WRITE_REG(hw, IXGBE_FCRTH_82599(i), 0); + } + + /* Configure pause time (2 TCs per register) */ +- reg = hw->fc.pause_time * 0x00010001; +- for (i = 0; i < (MAX_TRAFFIC_CLASS / 2); i++) ++ reg = hw->fc.pause_time | (hw->fc.pause_time << 16); ++ for (i = 0; i < (IXGBE_DCB_MAX_TRAFFIC_CLASS / 2); i++) + IXGBE_WRITE_REG(hw, IXGBE_FCTTV(i), reg); + + /* Configure flow control refresh threshold value */ + IXGBE_WRITE_REG(hw, IXGBE_FCRTV, hw->fc.pause_time / 2); + +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +@@ -289,75 +356,229 @@ s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc) + * Configure queue statistics registers, all queues belonging to same traffic + * class uses a single set of queue statistics counters. + */ +-static s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw) ++s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *hw, ++ struct ixgbe_dcb_config *dcb_config) + { + u32 reg = 0; + u8 i = 0; ++ u8 tc_count = 8; ++ bool vt_mode = false; + +- /* +- * Receive Queues stats setting +- * 32 RQSMR registers, each configuring 4 queues. +- * Set all 16 queues of each TC to the same stat +- * with TC 'n' going to stat 'n'. +- */ +- for (i = 0; i < 32; i++) { +- reg = 0x01010101 * (i / 4); +- IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg); ++ if (dcb_config != NULL) { ++ tc_count = dcb_config->num_tcs.pg_tcs; ++ vt_mode = dcb_config->vt_mode; + } +- /* +- * Transmit Queues stats setting +- * 32 TQSM registers, each controlling 4 queues. +- * Set all queues of each TC to the same stat +- * with TC 'n' going to stat 'n'. +- * Tx queues are allocated non-uniformly to TCs: +- * 32, 32, 16, 16, 8, 8, 8, 8. +- */ +- for (i = 0; i < 32; i++) { +- if (i < 8) +- reg = 0x00000000; +- else if (i < 16) +- reg = 0x01010101; +- else if (i < 20) +- reg = 0x02020202; +- else if (i < 24) +- reg = 0x03030303; +- else if (i < 26) +- reg = 0x04040404; +- else if (i < 28) +- reg = 0x05050505; +- else if (i < 30) +- reg = 0x06060606; ++ ++ if (!((tc_count == 8 && vt_mode == false) || tc_count == 4)) ++ return IXGBE_ERR_PARAM; ++ ++ if (tc_count == 8 && vt_mode == false) { ++ /* ++ * Receive Queues stats setting ++ * 32 RQSMR registers, each configuring 4 queues. ++ * ++ * Set all 16 queues of each TC to the same stat ++ * with TC 'n' going to stat 'n'. ++ */ ++ for (i = 0; i < 32; i++) { ++ reg = 0x01010101 * (i / 4); ++ IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg); ++ } ++ /* ++ * Transmit Queues stats setting ++ * 32 TQSM registers, each controlling 4 queues. ++ * ++ * Set all queues of each TC to the same stat ++ * with TC 'n' going to stat 'n'. ++ * Tx queues are allocated non-uniformly to TCs: ++ * 32, 32, 16, 16, 8, 8, 8, 8. ++ */ ++ for (i = 0; i < 32; i++) { ++ if (i < 8) ++ reg = 0x00000000; ++ else if (i < 16) ++ reg = 0x01010101; ++ else if (i < 20) ++ reg = 0x02020202; ++ else if (i < 24) ++ reg = 0x03030303; ++ else if (i < 26) ++ reg = 0x04040404; ++ else if (i < 28) ++ reg = 0x05050505; ++ else if (i < 30) ++ reg = 0x06060606; ++ else ++ reg = 0x07070707; ++ IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg); ++ } ++ } else if (tc_count == 4 && vt_mode == false) { ++ /* ++ * Receive Queues stats setting ++ * 32 RQSMR registers, each configuring 4 queues. ++ * ++ * Set all 16 queues of each TC to the same stat ++ * with TC 'n' going to stat 'n'. ++ */ ++ for (i = 0; i < 32; i++) { ++ if (i % 8 > 3) ++ /* In 4 TC mode, odd 16-queue ranges are ++ * not used. ++ */ ++ continue; ++ reg = 0x01010101 * (i / 8); ++ IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), reg); ++ } ++ /* ++ * Transmit Queues stats setting ++ * 32 TQSM registers, each controlling 4 queues. ++ * ++ * Set all queues of each TC to the same stat ++ * with TC 'n' going to stat 'n'. ++ * Tx queues are allocated non-uniformly to TCs: ++ * 64, 32, 16, 16. ++ */ ++ for (i = 0; i < 32; i++) { ++ if (i < 16) ++ reg = 0x00000000; ++ else if (i < 24) ++ reg = 0x01010101; ++ else if (i < 28) ++ reg = 0x02020202; ++ else ++ reg = 0x03030303; ++ IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg); ++ } ++ } else if (tc_count == 4 && vt_mode == true) { ++ /* ++ * Receive Queues stats setting ++ * 32 RQSMR registers, each configuring 4 queues. ++ * ++ * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each ++ * pool. Set all 32 queues of each TC across pools to the same ++ * stat with TC 'n' going to stat 'n'. ++ */ ++ for (i = 0; i < 32; i++) ++ IXGBE_WRITE_REG(hw, IXGBE_RQSMR(i), 0x03020100); ++ /* ++ * Transmit Queues stats setting ++ * 32 TQSM registers, each controlling 4 queues. ++ * ++ * Queue Indexing in 32 VF with DCB mode maps 4 TC's to each ++ * pool. Set all 32 queues of each TC across pools to the same ++ * stat with TC 'n' going to stat 'n'. ++ */ ++ for (i = 0; i < 32; i++) ++ IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), 0x03020100); ++ } ++ ++ return IXGBE_SUCCESS; ++} ++ ++/** ++ * ixgbe_dcb_config_82599 - Configure general DCB parameters ++ * @hw: pointer to hardware structure ++ * @dcb_config: pointer to ixgbe_dcb_config structure ++ * ++ * Configure general DCB parameters. ++ */ ++s32 ixgbe_dcb_config_82599(struct ixgbe_hw *hw, ++ struct ixgbe_dcb_config *dcb_config) ++{ ++ u32 reg; ++ u32 q; ++ ++ /* Disable the Tx desc arbiter so that MTQC can be changed */ ++ reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS); ++ reg |= IXGBE_RTTDCS_ARBDIS; ++ IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); ++ ++ reg = IXGBE_READ_REG(hw, IXGBE_MRQC); ++ if (dcb_config->num_tcs.pg_tcs == 8) { ++ /* Enable DCB for Rx with 8 TCs */ ++ switch (reg & IXGBE_MRQC_MRQE_MASK) { ++ case 0: ++ case IXGBE_MRQC_RT4TCEN: ++ /* RSS disabled cases */ ++ reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | ++ IXGBE_MRQC_RT8TCEN; ++ break; ++ case IXGBE_MRQC_RSSEN: ++ case IXGBE_MRQC_RTRSS4TCEN: ++ /* RSS enabled cases */ ++ reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | ++ IXGBE_MRQC_RTRSS8TCEN; ++ break; ++ default: ++ /* ++ * Unsupported value, assume stale data, ++ * overwrite no RSS ++ */ ++ ASSERT(0); ++ reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | ++ IXGBE_MRQC_RT8TCEN; ++ } ++ } ++ if (dcb_config->num_tcs.pg_tcs == 4) { ++ /* We support both VT-on and VT-off with 4 TCs. */ ++ if (dcb_config->vt_mode) ++ reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | ++ IXGBE_MRQC_VMDQRT4TCEN; + else +- reg = 0x07070707; +- IXGBE_WRITE_REG(hw, IXGBE_TQSM(i), reg); ++ reg = (reg & ~IXGBE_MRQC_MRQE_MASK) | ++ IXGBE_MRQC_RTRSS4TCEN; ++ } ++ IXGBE_WRITE_REG(hw, IXGBE_MRQC, reg); ++ ++ /* Enable DCB for Tx with 8 TCs */ ++ if (dcb_config->num_tcs.pg_tcs == 8) ++ reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; ++ else { ++ /* We support both VT-on and VT-off with 4 TCs. */ ++ reg = IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; ++ if (dcb_config->vt_mode) ++ reg |= IXGBE_MTQC_VT_ENA; + } ++ IXGBE_WRITE_REG(hw, IXGBE_MTQC, reg); + +- return 0; ++ /* Disable drop for all queues */ ++ for (q = 0; q < 128; q++) ++ IXGBE_WRITE_REG(hw, IXGBE_QDE, ++ (IXGBE_QDE_WRITE | (q << IXGBE_QDE_IDX_SHIFT))); ++ ++ /* Enable the Tx desc arbiter */ ++ reg = IXGBE_READ_REG(hw, IXGBE_RTTDCS); ++ reg &= ~IXGBE_RTTDCS_ARBDIS; ++ IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, reg); ++ ++ /* Enable Security TX Buffer IFG for DCB */ ++ reg = IXGBE_READ_REG(hw, IXGBE_SECTXMINIFG); ++ reg |= IXGBE_SECTX_DCB; ++ IXGBE_WRITE_REG(hw, IXGBE_SECTXMINIFG, reg); ++ ++ return IXGBE_SUCCESS; + } + + /** + * ixgbe_dcb_hw_config_82599 - Configure and enable DCB + * @hw: pointer to hardware structure +- * @refill: refill credits index by traffic class +- * @max: max credits index by traffic class +- * @bwg_id: bandwidth grouping indexed by traffic class +- * @prio_type: priority type indexed by traffic class +- * @pfc_en: enabled pfc bitmask ++ * @dcb_config: pointer to ixgbe_dcb_config structure + * + * Configure dcb settings and enable dcb mode. + */ +-s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill, +- u16 *max, u8 *bwg_id, u8 *prio_type, u8 *prio_tc) ++s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, int link_speed, ++ u16 *refill, u16 *max, u8 *bwg_id, u8 *tsa, ++ u8 *map) + { +- ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, +- prio_type, prio_tc); +- ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, +- bwg_id, prio_type); +- ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, +- bwg_id, prio_type, prio_tc); +- ixgbe_dcb_config_pfc_82599(hw, pfc_en, prio_tc); +- ixgbe_dcb_config_tc_stats_82599(hw); +- +- return 0; ++ UNREFERENCED_1PARAMETER(link_speed); ++ ++ ixgbe_dcb_config_rx_arbiter_82599(hw, refill, max, bwg_id, tsa, ++ map); ++ ixgbe_dcb_config_tx_desc_arbiter_82599(hw, refill, max, bwg_id, ++ tsa); ++ ixgbe_dcb_config_tx_data_arbiter_82599(hw, refill, max, bwg_id, ++ tsa, map); ++ ++ return IXGBE_SUCCESS; + } + +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h +index 90c3702..24be906 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_82599.h +@@ -1,7 +1,7 @@ + /******************************************************************************* + +- Intel 10 Gigabit PCI Express Linux driver +- Copyright(c) 1999 - 2013 Intel Corporation. ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, +@@ -12,10 +12,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +@@ -26,100 +22,97 @@ + + *******************************************************************************/ + +-#ifndef _DCB_82599_CONFIG_H_ +-#define _DCB_82599_CONFIG_H_ ++#ifndef _IXGBE_DCB_82599_H_ ++#define _IXGBE_DCB_82599_H_ + + /* DCB register definitions */ +-#define IXGBE_RTTDCS_TDPAC 0x00000001 /* 0 Round Robin, ++#define IXGBE_RTTDCS_TDPAC 0x00000001 /* 0 Round Robin, + * 1 WSP - Weighted Strict Priority + */ +-#define IXGBE_RTTDCS_VMPAC 0x00000002 /* 0 Round Robin, ++#define IXGBE_RTTDCS_VMPAC 0x00000002 /* 0 Round Robin, + * 1 WRR - Weighted Round Robin + */ +-#define IXGBE_RTTDCS_TDRM 0x00000010 /* Transmit Recycle Mode */ +-#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */ +-#define IXGBE_RTTDCS_BDPM 0x00400000 /* Bypass Data Pipe - must clear! */ +-#define IXGBE_RTTDCS_BPBFSM 0x00800000 /* Bypass PB Free Space - must ++#define IXGBE_RTTDCS_TDRM 0x00000010 /* Transmit Recycle Mode */ ++#define IXGBE_RTTDCS_BDPM 0x00400000 /* Bypass Data Pipe - must clear! */ ++#define IXGBE_RTTDCS_BPBFSM 0x00800000 /* Bypass PB Free Space - must + * clear! + */ +-#define IXGBE_RTTDCS_SPEED_CHG 0x80000000 /* Link speed change */ ++#define IXGBE_RTTDCS_SPEED_CHG 0x80000000 /* Link speed change */ + + /* Receive UP2TC mapping */ +-#define IXGBE_RTRUP2TC_UP_SHIFT 3 ++#define IXGBE_RTRUP2TC_UP_SHIFT 3 + #define IXGBE_RTRUP2TC_UP_MASK 7 + /* Transmit UP2TC mapping */ +-#define IXGBE_RTTUP2TC_UP_SHIFT 3 ++#define IXGBE_RTTUP2TC_UP_SHIFT 3 + +-#define IXGBE_RTRPT4C_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */ +-#define IXGBE_RTRPT4C_BWG_SHIFT 9 /* Offset to BWG index */ +-#define IXGBE_RTRPT4C_GSP 0x40000000 /* GSP enable bit */ +-#define IXGBE_RTRPT4C_LSP 0x80000000 /* LSP enable bit */ ++#define IXGBE_RTRPT4C_MCL_SHIFT 12 /* Offset to Max Credit Limit setting */ ++#define IXGBE_RTRPT4C_BWG_SHIFT 9 /* Offset to BWG index */ ++#define IXGBE_RTRPT4C_GSP 0x40000000 /* GSP enable bit */ ++#define IXGBE_RTRPT4C_LSP 0x80000000 /* LSP enable bit */ + +-#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet ++#define IXGBE_RDRXCTL_MPBEN 0x00000010 /* DMA config for multiple packet + * buffers enable + */ +-#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores ++#define IXGBE_RDRXCTL_MCEN 0x00000040 /* DMA config for multiple cores + * (RSS) enable + */ + + /* RTRPCS Bit Masks */ +-#define IXGBE_RTRPCS_RRM 0x00000002 /* Receive Recycle Mode enable */ ++#define IXGBE_RTRPCS_RRM 0x00000002 /* Receive Recycle Mode enable */ + /* Receive Arbitration Control: 0 Round Robin, 1 DFP */ +-#define IXGBE_RTRPCS_RAC 0x00000004 +-#define IXGBE_RTRPCS_ARBDIS 0x00000040 /* Arbitration disable bit */ ++#define IXGBE_RTRPCS_RAC 0x00000004 ++#define IXGBE_RTRPCS_ARBDIS 0x00000040 /* Arbitration disable bit */ + + /* RTTDT2C Bit Masks */ +-#define IXGBE_RTTDT2C_MCL_SHIFT 12 +-#define IXGBE_RTTDT2C_BWG_SHIFT 9 +-#define IXGBE_RTTDT2C_GSP 0x40000000 +-#define IXGBE_RTTDT2C_LSP 0x80000000 ++#define IXGBE_RTTDT2C_MCL_SHIFT 12 ++#define IXGBE_RTTDT2C_BWG_SHIFT 9 ++#define IXGBE_RTTDT2C_GSP 0x40000000 ++#define IXGBE_RTTDT2C_LSP 0x80000000 + +-#define IXGBE_RTTPT2C_MCL_SHIFT 12 +-#define IXGBE_RTTPT2C_BWG_SHIFT 9 +-#define IXGBE_RTTPT2C_GSP 0x40000000 +-#define IXGBE_RTTPT2C_LSP 0x80000000 ++#define IXGBE_RTTPT2C_MCL_SHIFT 12 ++#define IXGBE_RTTPT2C_BWG_SHIFT 9 ++#define IXGBE_RTTPT2C_GSP 0x40000000 ++#define IXGBE_RTTPT2C_LSP 0x80000000 + + /* RTTPCS Bit Masks */ +-#define IXGBE_RTTPCS_TPPAC 0x00000020 /* 0 Round Robin, ++#define IXGBE_RTTPCS_TPPAC 0x00000020 /* 0 Round Robin, + * 1 SP - Strict Priority + */ +-#define IXGBE_RTTPCS_ARBDIS 0x00000040 /* Arbiter disable */ +-#define IXGBE_RTTPCS_TPRM 0x00000100 /* Transmit Recycle Mode enable */ +-#define IXGBE_RTTPCS_ARBD_SHIFT 22 +-#define IXGBE_RTTPCS_ARBD_DCB 0x4 /* Arbitration delay in DCB mode */ +- +-/* SECTXMINIFG DCB */ +-#define IXGBE_SECTX_DCB 0x00001F00 /* DCB TX Buffer IFG */ +- +- +-/* DCB hardware-specific driver APIs */ ++#define IXGBE_RTTPCS_ARBDIS 0x00000040 /* Arbiter disable */ ++#define IXGBE_RTTPCS_TPRM 0x00000100 /* Transmit Recycle Mode enable */ ++#define IXGBE_RTTPCS_ARBD_SHIFT 22 ++#define IXGBE_RTTPCS_ARBD_DCB 0x4 /* Arbitration delay in DCB mode */ + +-/* DCB PFC functions */ +-s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *hw, u8 pfc_en, u8 *prio_tc); ++#define IXGBE_TXPBTHRESH_DCB 0xA /* THRESH value for DCB mode */ + +-/* DCB hw initialization */ +-s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *hw, +- u16 *refill, +- u16 *max, +- u8 *bwg_id, +- u8 *prio_type, +- u8 *prio_tc); +- +-s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *hw, +- u16 *refill, +- u16 *max, +- u8 *bwg_id, +- u8 *prio_type); +- +-s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *hw, +- u16 *refill, +- u16 *max, +- u8 *bwg_id, +- u8 *prio_type, +- u8 *prio_tc); +- +-s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *hw, u8 pfc_en, u16 *refill, +- u16 *max, u8 *bwg_id, u8 *prio_type, +- u8 *prio_tc); +- +-#endif /* _DCB_82599_CONFIG_H */ ++/* SECTXMINIFG DCB */ ++#define IXGBE_SECTX_DCB 0x00001F00 /* DCB TX Buffer SEC IFG */ ++ ++/* DCB driver APIs */ ++ ++/* DCB PFC */ ++s32 ixgbe_dcb_config_pfc_82599(struct ixgbe_hw *, u8, u8 *); ++ ++/* DCB stats */ ++s32 ixgbe_dcb_config_tc_stats_82599(struct ixgbe_hw *, ++ struct ixgbe_dcb_config *); ++s32 ixgbe_dcb_get_tc_stats_82599(struct ixgbe_hw *, ++ struct ixgbe_hw_stats *, u8); ++s32 ixgbe_dcb_get_pfc_stats_82599(struct ixgbe_hw *, ++ struct ixgbe_hw_stats *, u8); ++ ++/* DCB config arbiters */ ++s32 ixgbe_dcb_config_tx_desc_arbiter_82599(struct ixgbe_hw *, u16 *, u16 *, ++ u8 *, u8 *); ++s32 ixgbe_dcb_config_tx_data_arbiter_82599(struct ixgbe_hw *, u16 *, u16 *, ++ u8 *, u8 *, u8 *); ++s32 ixgbe_dcb_config_rx_arbiter_82599(struct ixgbe_hw *, u16 *, u16 *, u8 *, ++ u8 *, u8 *); ++ ++/* DCB initialization */ ++s32 ixgbe_dcb_config_82599(struct ixgbe_hw *, ++ struct ixgbe_dcb_config *); ++ ++s32 ixgbe_dcb_hw_config_82599(struct ixgbe_hw *, int, u16 *, u16 *, u8 *, ++ u8 *, u8 *); ++#endif /* _IXGBE_DCB_82959_H_ */ +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c +index 5172b6b..20d9d05 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_dcb_nl.c +@@ -1,7 +1,7 @@ + /******************************************************************************* + +- Intel 10 Gigabit PCI Express Linux driver +- Copyright(c) 1999 - 2013 Intel Corporation. ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, +@@ -12,10 +12,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +@@ -27,10 +23,11 @@ + *******************************************************************************/ + + #include "ixgbe.h" ++ ++#if IS_ENABLED(CONFIG_DCB) + #include + #include "ixgbe_dcb_82598.h" + #include "ixgbe_dcb_82599.h" +-#include "ixgbe_sriov.h" + + /* Callbacks for DCB netlink in the kernel */ + #define BIT_DCB_MODE 0x01 +@@ -38,40 +35,36 @@ + #define BIT_PG_RX 0x04 + #define BIT_PG_TX 0x08 + #define BIT_APP_UPCHG 0x10 +-#define BIT_LINKSPEED 0x80 ++#define BIT_RESETLINK 0x40 ++#define BIT_LINKSPEED 0x80 + + /* Responses for the DCB_C_SET_ALL command */ +-#define DCB_HW_CHG_RST 0 /* DCB configuration changed with reset */ +-#define DCB_NO_HW_CHG 1 /* DCB configuration did not change */ +-#define DCB_HW_CHG 2 /* DCB configuration changed, no reset */ ++#define DCB_HW_CHG_RST 0 /* DCB configuration changed with reset */ ++#define DCB_NO_HW_CHG 1 /* DCB configuration did not change */ ++#define DCB_HW_CHG 2 /* DCB configuration changed, no reset */ + +-static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max) ++int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max) + { + struct ixgbe_dcb_config *scfg = &adapter->temp_dcb_cfg; + struct ixgbe_dcb_config *dcfg = &adapter->dcb_cfg; +- struct tc_configuration *src = NULL; +- struct tc_configuration *dst = NULL; ++ struct ixgbe_dcb_tc_config *src = NULL; ++ struct ixgbe_dcb_tc_config *dst = NULL; + int i, j; +- int tx = DCB_TX_CONFIG; +- int rx = DCB_RX_CONFIG; ++ int tx = IXGBE_DCB_TX_CONFIG; ++ int rx = IXGBE_DCB_RX_CONFIG; + int changes = 0; +-#ifdef IXGBE_FCOE +- struct dcb_app app = { +- .selector = DCB_APP_IDTYPE_ETHTYPE, +- .protocol = ETH_P_FCOE, +- }; +- u8 up = dcb_getapp(adapter->netdev, &app); + +- if (up && !(up & (1 << adapter->fcoe.up))) ++#if IS_ENABLED(CONFIG_FCOE) ++ if (adapter->fcoe.up_set != adapter->fcoe.up) + changes |= BIT_APP_UPCHG; +-#endif ++#endif /* CONFIG_FCOE */ + + for (i = DCB_PG_ATTR_TC_0; i < tc_max + DCB_PG_ATTR_TC_0; i++) { + src = &scfg->tc_config[i - DCB_PG_ATTR_TC_0]; + dst = &dcfg->tc_config[i - DCB_PG_ATTR_TC_0]; + +- if (dst->path[tx].prio_type != src->path[tx].prio_type) { +- dst->path[tx].prio_type = src->path[tx].prio_type; ++ if (dst->path[tx].tsa != src->path[tx].tsa) { ++ dst->path[tx].tsa = src->path[tx].tsa; + changes |= BIT_PG_TX; + } + +@@ -86,14 +79,14 @@ static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max) + } + + if (dst->path[tx].up_to_tc_bitmap != +- src->path[tx].up_to_tc_bitmap) { ++ src->path[tx].up_to_tc_bitmap) { + dst->path[tx].up_to_tc_bitmap = + src->path[tx].up_to_tc_bitmap; + changes |= (BIT_PG_TX | BIT_PFC | BIT_APP_UPCHG); + } + +- if (dst->path[rx].prio_type != src->path[rx].prio_type) { +- dst->path[rx].prio_type = src->path[rx].prio_type; ++ if (dst->path[rx].tsa != src->path[rx].tsa) { ++ dst->path[rx].tsa = src->path[rx].tsa; + changes |= BIT_PG_RX; + } + +@@ -108,7 +101,7 @@ static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max) + } + + if (dst->path[rx].up_to_tc_bitmap != +- src->path[rx].up_to_tc_bitmap) { ++ src->path[rx].up_to_tc_bitmap) { + dst->path[rx].up_to_tc_bitmap = + src->path[rx].up_to_tc_bitmap; + changes |= (BIT_PG_RX | BIT_PFC | BIT_APP_UPCHG); +@@ -117,6 +110,7 @@ static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max) + + for (i = DCB_PG_ATTR_BW_ID_0; i < DCB_PG_ATTR_BW_ID_MAX; i++) { + j = i - DCB_PG_ATTR_BW_ID_0; ++ + if (dcfg->bw_percentage[tx][j] != scfg->bw_percentage[tx][j]) { + dcfg->bw_percentage[tx][j] = scfg->bw_percentage[tx][j]; + changes |= BIT_PG_TX; +@@ -129,8 +123,8 @@ static int ixgbe_copy_dcb_cfg(struct ixgbe_adapter *adapter, int tc_max) + + for (i = DCB_PFC_UP_ATTR_0; i < DCB_PFC_UP_ATTR_MAX; i++) { + j = i - DCB_PFC_UP_ATTR_0; +- if (dcfg->tc_config[j].dcb_pfc != scfg->tc_config[j].dcb_pfc) { +- dcfg->tc_config[j].dcb_pfc = scfg->tc_config[j].dcb_pfc; ++ if (dcfg->tc_config[j].pfc != scfg->tc_config[j].pfc) { ++ dcfg->tc_config[j].pfc = scfg->tc_config[j].pfc; + changes |= BIT_PFC; + } + } +@@ -183,6 +177,7 @@ static void ixgbe_dcbnl_get_perm_hw_addr(struct net_device *netdev, + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: ++ case ixgbe_mac_X550: + for (j = 0; j < netdev->addr_len; j++, i++) + perm_addr[i] = adapter->hw.mac.san_addr[j]; + break; +@@ -198,7 +193,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_tx(struct net_device *netdev, int tc, + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if (prio != DCB_ATTR_VALUE_UNDEFINED) +- adapter->temp_dcb_cfg.tc_config[tc].path[0].prio_type = prio; ++ adapter->temp_dcb_cfg.tc_config[tc].path[0].tsa = prio; + if (bwg_id != DCB_ATTR_VALUE_UNDEFINED) + adapter->temp_dcb_cfg.tc_config[tc].path[0].bwg_id = bwg_id; + if (bw_pct != DCB_ATTR_VALUE_UNDEFINED) +@@ -224,7 +219,7 @@ static void ixgbe_dcbnl_set_pg_tc_cfg_rx(struct net_device *netdev, int tc, + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + if (prio != DCB_ATTR_VALUE_UNDEFINED) +- adapter->temp_dcb_cfg.tc_config[tc].path[1].prio_type = prio; ++ adapter->temp_dcb_cfg.tc_config[tc].path[1].tsa = prio; + if (bwg_id != DCB_ATTR_VALUE_UNDEFINED) + adapter->temp_dcb_cfg.tc_config[tc].path[1].bwg_id = bwg_id; + if (bw_pct != DCB_ATTR_VALUE_UNDEFINED) +@@ -249,7 +244,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_tx(struct net_device *netdev, int tc, + { + struct ixgbe_adapter *adapter = netdev_priv(netdev); + +- *prio = adapter->dcb_cfg.tc_config[tc].path[0].prio_type; ++ *prio = adapter->dcb_cfg.tc_config[tc].path[0].tsa; + *bwg_id = adapter->dcb_cfg.tc_config[tc].path[0].bwg_id; + *bw_pct = adapter->dcb_cfg.tc_config[tc].path[0].bwg_percent; + *up_map = adapter->dcb_cfg.tc_config[tc].path[0].up_to_tc_bitmap; +@@ -269,7 +264,7 @@ static void ixgbe_dcbnl_get_pg_tc_cfg_rx(struct net_device *netdev, int tc, + { + struct ixgbe_adapter *adapter = netdev_priv(netdev); + +- *prio = adapter->dcb_cfg.tc_config[tc].path[1].prio_type; ++ *prio = adapter->dcb_cfg.tc_config[tc].path[1].tsa; + *bwg_id = adapter->dcb_cfg.tc_config[tc].path[1].bwg_id; + *bw_pct = adapter->dcb_cfg.tc_config[tc].path[1].bwg_percent; + *up_map = adapter->dcb_cfg.tc_config[tc].path[1].up_to_tc_bitmap; +@@ -283,23 +278,22 @@ static void ixgbe_dcbnl_get_pg_bwg_cfg_rx(struct net_device *netdev, int bwg_id, + *bw_pct = adapter->dcb_cfg.bw_percentage[1][bwg_id]; + } + +-static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int priority, +- u8 setting) ++static void ixgbe_dcbnl_set_pfc_cfg(struct net_device *netdev, int up, u8 pfc) + { + struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ u8 tc = ixgbe_dcb_get_tc_from_up(&adapter->temp_dcb_cfg, 0, up); + +- adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc = setting; +- if (adapter->temp_dcb_cfg.tc_config[priority].dcb_pfc != +- adapter->dcb_cfg.tc_config[priority].dcb_pfc) ++ adapter->temp_dcb_cfg.tc_config[tc].pfc = pfc; ++ if (adapter->temp_dcb_cfg.tc_config[tc].pfc != ++ adapter->dcb_cfg.tc_config[tc].pfc) + adapter->temp_dcb_cfg.pfc_mode_enable = true; + } + +-static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int priority, +- u8 *setting) ++static void ixgbe_dcbnl_get_pfc_cfg(struct net_device *netdev, int up, u8 *pfc) + { + struct ixgbe_adapter *adapter = netdev_priv(netdev); +- +- *setting = adapter->dcb_cfg.tc_config[priority].dcb_pfc; ++ u8 tc = ixgbe_dcb_get_tc_from_up(&adapter->dcb_cfg, 0, up); ++ *pfc = adapter->dcb_cfg.tc_config[tc].pfc; + } + + static void ixgbe_dcbnl_devreset(struct net_device *dev) +@@ -310,13 +304,21 @@ static void ixgbe_dcbnl_devreset(struct net_device *dev) + usleep_range(1000, 2000); + + if (netif_running(dev)) ++#ifdef HAVE_NET_DEVICE_OPS + dev->netdev_ops->ndo_stop(dev); ++#else ++ dev->stop(dev); ++#endif + + ixgbe_clear_interrupt_scheme(adapter); + ixgbe_init_interrupt_scheme(adapter); + + if (netif_running(dev)) ++#ifdef HAVE_NET_DEVICE_OPS + dev->netdev_ops->ndo_open(dev); ++#else ++ dev->open(dev); ++#endif + + clear_bit(__IXGBE_RESETTING, &adapter->state); + } +@@ -327,83 +329,80 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) + struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg; + struct ixgbe_hw *hw = &adapter->hw; + int ret = DCB_NO_HW_CHG; +- int i; ++ u8 prio_tc[IXGBE_DCB_MAX_USER_PRIORITY] = { 0 }; + + /* Fail command if not in CEE mode */ + if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) + return ret; + + adapter->dcb_set_bitmap |= ixgbe_copy_dcb_cfg(adapter, +- MAX_TRAFFIC_CLASS); ++ IXGBE_DCB_MAX_TRAFFIC_CLASS); + if (!adapter->dcb_set_bitmap) + return ret; + +- if (adapter->dcb_set_bitmap & (BIT_PG_TX|BIT_PG_RX)) { +- u16 refill[MAX_TRAFFIC_CLASS], max[MAX_TRAFFIC_CLASS]; +- u8 bwg_id[MAX_TRAFFIC_CLASS], prio_type[MAX_TRAFFIC_CLASS]; ++ ixgbe_dcb_unpack_map_cee(dcb_cfg, IXGBE_DCB_TX_CONFIG, prio_tc); ++ ++ if (adapter->dcb_set_bitmap & (BIT_PG_TX | BIT_PG_RX)) { + /* Priority to TC mapping in CEE case default to 1:1 */ +- u8 prio_tc[MAX_USER_PRIORITY]; + int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; ++#ifdef HAVE_MQPRIO ++ int i; ++#endif + +-#ifdef IXGBE_FCOE ++#if IS_ENABLED(CONFIG_FCOE) + if (adapter->netdev->features & NETIF_F_FCOE_MTU) + max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); + #endif + +- ixgbe_dcb_calculate_tc_credits(hw, dcb_cfg, max_frame, +- DCB_TX_CONFIG); +- ixgbe_dcb_calculate_tc_credits(hw, dcb_cfg, max_frame, +- DCB_RX_CONFIG); ++ ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_cfg, max_frame, ++ IXGBE_DCB_TX_CONFIG); + +- ixgbe_dcb_unpack_refill(dcb_cfg, DCB_TX_CONFIG, refill); +- ixgbe_dcb_unpack_max(dcb_cfg, max); +- ixgbe_dcb_unpack_bwgid(dcb_cfg, DCB_TX_CONFIG, bwg_id); +- ixgbe_dcb_unpack_prio(dcb_cfg, DCB_TX_CONFIG, prio_type); +- ixgbe_dcb_unpack_map(dcb_cfg, DCB_TX_CONFIG, prio_tc); ++ ixgbe_dcb_calculate_tc_credits_cee(hw, dcb_cfg, max_frame, ++ IXGBE_DCB_RX_CONFIG); + +- ixgbe_dcb_hw_ets_config(hw, refill, max, bwg_id, +- prio_type, prio_tc); ++ ixgbe_dcb_hw_config_cee(hw, dcb_cfg); + ++#ifdef HAVE_MQPRIO + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + netdev_set_prio_tc_map(netdev, i, prio_tc[i]); +- ++#endif /* HAVE_MQPRIO */ + ret = DCB_HW_CHG_RST; + } + + if (adapter->dcb_set_bitmap & BIT_PFC) { + if (dcb_cfg->pfc_mode_enable) { + u8 pfc_en; +- u8 prio_tc[MAX_USER_PRIORITY]; +- +- ixgbe_dcb_unpack_map(dcb_cfg, DCB_TX_CONFIG, prio_tc); +- ixgbe_dcb_unpack_pfc(dcb_cfg, &pfc_en); +- ixgbe_dcb_hw_pfc_config(hw, pfc_en, prio_tc); ++ ixgbe_dcb_unpack_pfc_cee(dcb_cfg, prio_tc, &pfc_en); ++ ixgbe_dcb_config_pfc(hw, pfc_en, prio_tc); + } else { + hw->mac.ops.fc_enable(hw); + } ++ /* This is known driver so disable MDD before updating SRRCTL */ ++ if ((adapter->num_vfs) && (hw->mac.ops.disable_mdd) && ++ (adapter->flags & IXGBE_FLAG_MDD_ENABLED)) ++ hw->mac.ops.disable_mdd(hw); + + ixgbe_set_rx_drop_en(adapter); + +- ret = DCB_HW_CHG; ++ if ((adapter->num_vfs) && (hw->mac.ops.enable_mdd) && ++ (adapter->flags & IXGBE_FLAG_MDD_ENABLED)) ++ hw->mac.ops.enable_mdd(hw); ++ ++ if (ret != DCB_HW_CHG_RST) ++ ret = DCB_HW_CHG; + } + +-#ifdef IXGBE_FCOE ++#if IS_ENABLED(CONFIG_FCOE) + /* Reprogam FCoE hardware offloads when the traffic class + * FCoE is using changes. This happens if the APP info + * changes or the up2tc mapping is updated. + */ + if (adapter->dcb_set_bitmap & BIT_APP_UPCHG) { +- struct dcb_app app = { +- .selector = DCB_APP_IDTYPE_ETHTYPE, +- .protocol = ETH_P_FCOE, +- }; +- u8 up = dcb_getapp(netdev, &app); +- +- adapter->fcoe.up = ffs(up) - 1; ++ adapter->fcoe.up_set = adapter->fcoe.up; + ixgbe_dcbnl_devreset(netdev); + ret = DCB_HW_CHG_RST; + } +-#endif ++#endif /* CONFIG_FCOE */ + + adapter->dcb_set_bitmap = 0x00; + return ret; +@@ -411,7 +410,9 @@ static u8 ixgbe_dcbnl_set_all(struct net_device *netdev) + + static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap) + { ++#ifdef HAVE_DCBNL_IEEE + struct ixgbe_adapter *adapter = netdev_priv(netdev); ++#endif + + switch (capid) { + case DCB_CAP_ATTR_PG: +@@ -435,9 +436,11 @@ static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap) + case DCB_CAP_ATTR_BCN: + *cap = false; + break; ++#ifdef HAVE_DCBNL_IEEE + case DCB_CAP_ATTR_DCBX: + *cap = adapter->dcbx_cap; + break; ++#endif + default: + *cap = false; + break; +@@ -446,9 +449,14 @@ static u8 ixgbe_dcbnl_getcap(struct net_device *netdev, int capid, u8 *cap) + return 0; + } + ++#ifdef NUMTCS_RETURNS_U8 ++static u8 ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) ++#else + static int ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) ++#endif + { + struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ u8 rval = 0; + + if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { + switch (tcid) { +@@ -459,19 +467,42 @@ static int ixgbe_dcbnl_getnumtcs(struct net_device *netdev, int tcid, u8 *num) + *num = adapter->dcb_cfg.num_tcs.pfc_tcs; + break; + default: +- return -EINVAL; ++ rval = -EINVAL; + break; + } + } else { +- return -EINVAL; ++ rval = -EINVAL; + } + +- return 0; ++ return rval; + } + ++#ifdef NUMTCS_RETURNS_U8 ++static u8 ixgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num) ++#else + static int ixgbe_dcbnl_setnumtcs(struct net_device *netdev, int tcid, u8 num) ++#endif + { +- return -EINVAL; ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ u8 rval = 0; ++ ++ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) { ++ switch (tcid) { ++ case DCB_NUMTCS_ATTR_PG: ++ adapter->dcb_cfg.num_tcs.pg_tcs = num; ++ break; ++ case DCB_NUMTCS_ATTR_PFC: ++ adapter->dcb_cfg.num_tcs.pfc_tcs = num; ++ break; ++ default: ++ rval = -EINVAL; ++ break; ++ } ++ } else { ++ rval = -EINVAL; ++ } ++ ++ return rval; + } + + static u8 ixgbe_dcbnl_getpfcstate(struct net_device *netdev) +@@ -486,8 +517,10 @@ static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state) + struct ixgbe_adapter *adapter = netdev_priv(netdev); + + adapter->temp_dcb_cfg.pfc_mode_enable = state; ++ return; + } + ++#ifdef HAVE_DCBNL_OPS_GETAPP + /** + * ixgbe_dcbnl_getapp - retrieve the DCBX application user priority + * @netdev : the corresponding netdev +@@ -498,32 +531,96 @@ static void ixgbe_dcbnl_setpfcstate(struct net_device *netdev, u8 state) + * otherwise returns 0 as the invalid user priority bitmap to indicate an + * error. + */ ++#ifdef HAVE_DCBNL_OPS_SETAPP_RETURN_INT ++static int ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) ++#else + static u8 ixgbe_dcbnl_getapp(struct net_device *netdev, u8 idtype, u16 id) ++#endif + { +- struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ u8 rval = 0; ++#ifdef HAVE_DCBNL_IEEE + struct dcb_app app = { +- .selector = idtype, +- .protocol = id, +- }; ++ .selector = idtype, ++ .protocol = id, ++ }; + +- if (!(adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE)) +- return 0; ++ rval = dcb_getapp(netdev, &app); ++#endif ++ ++ switch (idtype) { ++ case DCB_APP_IDTYPE_ETHTYPE: ++#if IS_ENABLED(CONFIG_FCOE) ++ if (id == ETH_P_FCOE) ++ rval = ixgbe_fcoe_getapp(netdev); ++#endif ++ break; ++ case DCB_APP_IDTYPE_PORTNUM: ++ break; ++ default: ++ break; ++ } + +- return dcb_getapp(netdev, &app); ++ return rval; + } + ++/** ++ * ixgbe_dcbnl_setapp - set the DCBX application user priority ++ * @netdev : the corresponding netdev ++ * @idtype : identifies the id as ether type or TCP/UDP port number ++ * @id: id is either ether type or TCP/UDP port number ++ * @up: the 802.1p user priority bitmap ++ * ++ * Returns : 0 on success or 1 on error ++ */ ++#ifdef HAVE_DCBNL_OPS_SETAPP_RETURN_INT ++static int ixgbe_dcbnl_setapp(struct net_device *netdev, ++#else ++static u8 ixgbe_dcbnl_setapp(struct net_device *netdev, ++#endif ++ u8 idtype, u16 id, u8 up) ++{ ++ int err = 0; ++#ifdef HAVE_DCBNL_IEEE ++ struct dcb_app app; ++ ++ app.selector = idtype; ++ app.protocol = id; ++ app.priority = up; ++ err = dcb_setapp(netdev, &app); ++#endif ++ ++ switch (idtype) { ++ case DCB_APP_IDTYPE_ETHTYPE: ++#if IS_ENABLED(CONFIG_FCOE) ++ if (id == ETH_P_FCOE) { ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ ++ adapter->fcoe.up = up ? ffs(up) - 1 : IXGBE_FCOE_DEFUP; ++ } ++#endif ++ break; ++ case DCB_APP_IDTYPE_PORTNUM: ++ break; ++ default: ++ break; ++ } ++ ++ return err; ++} ++#endif /* HAVE_DCBNL_OPS_GETAPP */ ++ ++#ifdef HAVE_DCBNL_IEEE + static int ixgbe_dcbnl_ieee_getets(struct net_device *dev, + struct ieee_ets *ets) + { + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ieee_ets *my_ets = adapter->ixgbe_ieee_ets; + +- ets->ets_cap = adapter->dcb_cfg.num_tcs.pg_tcs; +- + /* No IEEE PFC settings available */ + if (!my_ets) +- return 0; ++ return -EINVAL; + ++ ets->ets_cap = adapter->dcb_cfg.num_tcs.pg_tcs; + ets->cbs = my_ets->cbs; + memcpy(ets->tc_tx_bw, my_ets->tc_tx_bw, sizeof(ets->tc_tx_bw)); + memcpy(ets->tc_rx_bw, my_ets->tc_rx_bw, sizeof(ets->tc_rx_bw)); +@@ -549,13 +646,13 @@ static int ixgbe_dcbnl_ieee_setets(struct net_device *dev, + GFP_KERNEL); + if (!adapter->ixgbe_ieee_ets) + return -ENOMEM; +- + /* initialize UP2TC mappings to invalid value */ + for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) + adapter->ixgbe_ieee_ets->prio_tc[i] = + IEEE_8021QAZ_MAX_TCS; + /* if possible update UP2TC mappings from HW */ +- ixgbe_dcb_read_rtrup2tc(&adapter->hw, ++ if (adapter->hw.mac.ops.get_rtrup2tc) ++ adapter->hw.mac.ops.get_rtrup2tc(&adapter->hw, + adapter->ixgbe_ieee_ets->prio_tc); + } + +@@ -594,17 +691,16 @@ static int ixgbe_dcbnl_ieee_getpfc(struct net_device *dev, + struct ieee_pfc *my_pfc = adapter->ixgbe_ieee_pfc; + int i; + +- pfc->pfc_cap = adapter->dcb_cfg.num_tcs.pfc_tcs; +- + /* No IEEE PFC settings available */ + if (!my_pfc) +- return 0; ++ return -EINVAL; + ++ pfc->pfc_cap = adapter->dcb_cfg.num_tcs.pfc_tcs; + pfc->pfc_en = my_pfc->pfc_en; + pfc->mbc = my_pfc->mbc; + pfc->delay = my_pfc->delay; + +- for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { ++ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + pfc->requests[i] = adapter->stats.pxoffrxc[i]; + pfc->indications[i] = adapter->stats.pxofftxc[i]; + } +@@ -633,14 +729,24 @@ static int ixgbe_dcbnl_ieee_setpfc(struct net_device *dev, + prio_tc = adapter->ixgbe_ieee_ets->prio_tc; + memcpy(adapter->ixgbe_ieee_pfc, pfc, sizeof(*adapter->ixgbe_ieee_pfc)); + ++ + /* Enable link flow control parameters if PFC is disabled */ + if (pfc->pfc_en) +- err = ixgbe_dcb_hw_pfc_config(hw, pfc->pfc_en, prio_tc); ++ err = ixgbe_dcb_config_pfc(hw, pfc->pfc_en, prio_tc); + else + err = hw->mac.ops.fc_enable(hw); + ++ /* This is known driver so disable MDD before updating SRRCTL */ ++ if ((adapter->num_vfs) && (hw->mac.ops.disable_mdd) && ++ (adapter->flags & IXGBE_FLAG_MDD_ENABLED)) ++ hw->mac.ops.disable_mdd(hw); ++ + ixgbe_set_rx_drop_en(adapter); + ++ if ((adapter->num_vfs) && (hw->mac.ops.enable_mdd) && ++ (adapter->flags & IXGBE_FLAG_MDD_ENABLED)) ++ hw->mac.ops.enable_mdd(hw); ++ + return err; + } + +@@ -654,11 +760,9 @@ static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev, + return err; + + err = dcb_ieee_setapp(dev, app); +- if (err) +- return err; + +-#ifdef IXGBE_FCOE +- if (app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && ++#if IS_ENABLED(CONFIG_FCOE) ++ if (!err && app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && + app->protocol == ETH_P_FCOE) { + u8 app_mask = dcb_ieee_getapp_mask(dev, app); + +@@ -666,29 +770,14 @@ static int ixgbe_dcbnl_ieee_setapp(struct net_device *dev, + return err; + + adapter->fcoe.up = app->priority; ++ adapter->fcoe.up_set = adapter->fcoe.up; + ixgbe_dcbnl_devreset(dev); + } + #endif +- +- /* VF devices should use default UP when available */ +- if (app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && +- app->protocol == 0) { +- int vf; +- +- adapter->default_up = app->priority; +- +- for (vf = 0; vf < adapter->num_vfs; vf++) { +- struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; +- +- if (!vfinfo->pf_qos) +- ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, +- app->priority, vf); +- } +- } +- + return 0; + } + ++#ifdef HAVE_DCBNL_IEEE_DELAPP + static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev, + struct dcb_app *app) + { +@@ -700,7 +789,7 @@ static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev, + + err = dcb_ieee_delapp(dev, app); + +-#ifdef IXGBE_FCOE ++#if IS_ENABLED(CONFIG_FCOE) + if (!err && app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && + app->protocol == ETH_P_FCOE) { + u8 app_mask = dcb_ieee_getapp_mask(dev, app); +@@ -709,30 +798,13 @@ static int ixgbe_dcbnl_ieee_delapp(struct net_device *dev, + return err; + + adapter->fcoe.up = app_mask ? +- ffs(app_mask) - 1 : IXGBE_FCOE_DEFTC; ++ ffs(app_mask) - 1 : IXGBE_FCOE_DEFUP; + ixgbe_dcbnl_devreset(dev); + } + #endif +- /* IF default priority is being removed clear VF default UP */ +- if (app->selector == IEEE_8021QAZ_APP_SEL_ETHERTYPE && +- app->protocol == 0 && adapter->default_up == app->priority) { +- int vf; +- long unsigned int app_mask = dcb_ieee_getapp_mask(dev, app); +- int qos = app_mask ? find_first_bit(&app_mask, 8) : 0; +- +- adapter->default_up = qos; +- +- for (vf = 0; vf < adapter->num_vfs; vf++) { +- struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; +- +- if (!vfinfo->pf_qos) +- ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, +- qos, vf); +- } +- } +- + return err; + } ++#endif /* HAVE_DCBNL_IEEE_DELAPP */ + + static u8 ixgbe_dcbnl_getdcbx(struct net_device *dev) + { +@@ -743,9 +815,8 @@ static u8 ixgbe_dcbnl_getdcbx(struct net_device *dev) + static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode) + { + struct ixgbe_adapter *adapter = netdev_priv(dev); +- struct ieee_ets ets = {0}; +- struct ieee_pfc pfc = {0}; +- int err = 0; ++ struct ieee_ets ets = { .ets_cap = 0 }; ++ struct ieee_pfc pfc = { .pfc_en = 0 }; + + /* no support for LLD_MANAGED modes or CEE+IEEE */ + if ((mode & DCB_CAP_DCBX_LLD_MANAGED) || +@@ -766,7 +837,7 @@ static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode) + ixgbe_dcbnl_ieee_setets(dev, &ets); + ixgbe_dcbnl_ieee_setpfc(dev, &pfc); + } else if (mode & DCB_CAP_DCBX_VER_CEE) { +- u8 mask = BIT_PFC | BIT_PG_TX | BIT_PG_RX | BIT_APP_UPCHG; ++ u8 mask = (BIT_PFC | BIT_PG_TX | BIT_PG_RX | BIT_APP_UPCHG); + + adapter->dcb_set_bitmap |= mask; + ixgbe_dcbnl_set_all(dev); +@@ -776,19 +847,25 @@ static u8 ixgbe_dcbnl_setdcbx(struct net_device *dev, u8 mode) + */ + ixgbe_dcbnl_ieee_setets(dev, &ets); + ixgbe_dcbnl_ieee_setpfc(dev, &pfc); +- err = ixgbe_setup_tc(dev, 0); ++ ixgbe_setup_tc(dev, 0); + } + +- return err ? 1 : 0; ++ return 0; + } + +-const struct dcbnl_rtnl_ops dcbnl_ops = { ++#endif ++ ++struct dcbnl_rtnl_ops ixgbe_dcbnl_ops = { ++#ifdef HAVE_DCBNL_IEEE + .ieee_getets = ixgbe_dcbnl_ieee_getets, + .ieee_setets = ixgbe_dcbnl_ieee_setets, + .ieee_getpfc = ixgbe_dcbnl_ieee_getpfc, + .ieee_setpfc = ixgbe_dcbnl_ieee_setpfc, + .ieee_setapp = ixgbe_dcbnl_ieee_setapp, ++#ifdef HAVE_DCBNL_IEEE_DELAPP + .ieee_delapp = ixgbe_dcbnl_ieee_delapp, ++#endif ++#endif + .getstate = ixgbe_dcbnl_get_state, + .setstate = ixgbe_dcbnl_set_state, + .getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr, +@@ -808,7 +885,14 @@ const struct dcbnl_rtnl_ops dcbnl_ops = { + .setnumtcs = ixgbe_dcbnl_setnumtcs, + .getpfcstate = ixgbe_dcbnl_getpfcstate, + .setpfcstate = ixgbe_dcbnl_setpfcstate, ++#ifdef HAVE_DCBNL_OPS_GETAPP + .getapp = ixgbe_dcbnl_getapp, ++ .setapp = ixgbe_dcbnl_setapp, ++#endif ++#ifdef HAVE_DCBNL_IEEE + .getdcbx = ixgbe_dcbnl_getdcbx, + .setdcbx = ixgbe_dcbnl_setdcbx, ++#endif + }; ++ ++#endif /* CONFIG_DCB */ +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c +index 5e2c1e3..66f52e2 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_debugfs.c +@@ -1,7 +1,7 @@ + /******************************************************************************* + +- Intel 10 Gigabit PCI Express Linux driver +- Copyright(c) 1999 - 2013 Intel Corporation. ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, +@@ -12,10 +12,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +@@ -25,11 +21,13 @@ + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + + *******************************************************************************/ +-#include +-#include + + #include "ixgbe.h" + ++#ifdef HAVE_IXGBE_DEBUG_FS ++#include ++#include ++ + static struct dentry *ixgbe_dbg_root; + + static char ixgbe_dbg_reg_ops_buf[256] = ""; +@@ -205,7 +203,11 @@ static ssize_t ixgbe_dbg_netdev_ops_write(struct file *filp, + ixgbe_dbg_netdev_ops_buf[len] = '\0'; + + if (strncmp(ixgbe_dbg_netdev_ops_buf, "tx_timeout", 10) == 0) { ++#ifdef HAVE_NET_DEVICE_OPS + adapter->netdev->netdev_ops->ndo_tx_timeout(adapter->netdev); ++#else ++ adapter->netdev->tx_timeout(adapter->netdev); ++#endif /* HAVE_NET_DEVICE_OPS */ + e_dev_info("tx_timeout called\n"); + } else { + e_dev_info("Unknown command: %s\n", ixgbe_dbg_netdev_ops_buf); +@@ -215,7 +217,7 @@ static ssize_t ixgbe_dbg_netdev_ops_write(struct file *filp, + return count; + } + +-static const struct file_operations ixgbe_dbg_netdev_ops_fops = { ++static struct file_operations ixgbe_dbg_netdev_ops_fops = { + .owner = THIS_MODULE, + .open = simple_open, + .read = ixgbe_dbg_netdev_ops_read, +@@ -253,7 +255,8 @@ void ixgbe_dbg_adapter_init(struct ixgbe_adapter *adapter) + **/ + void ixgbe_dbg_adapter_exit(struct ixgbe_adapter *adapter) + { +- debugfs_remove_recursive(adapter->ixgbe_dbg_adapter); ++ if (adapter->ixgbe_dbg_adapter) ++ debugfs_remove_recursive(adapter->ixgbe_dbg_adapter); + adapter->ixgbe_dbg_adapter = NULL; + } + +@@ -274,3 +277,5 @@ void ixgbe_dbg_exit(void) + { + debugfs_remove_recursive(ixgbe_dbg_root); + } ++ ++#endif /* HAVE_IXGBE_DEBUG_FS */ +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +index a452730..1e8762c 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ethtool.c +@@ -1,7 +1,7 @@ + /******************************************************************************* + +- Intel 10 Gigabit PCI Express Linux driver +- Copyright(c) 1999 - 2014 Intel Corporation. ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, +@@ -12,10 +12,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +@@ -28,99 +24,122 @@ + + /* ethtool support for ixgbe */ + +-#include + #include + #include +-#include + #include + #include + #include + #include + #include +-#include ++ ++#ifdef SIOCETHTOOL ++#include + + #include "ixgbe.h" ++#ifdef ETHTOOL_GMODULEINFO + #include "ixgbe_phy.h" ++#endif ++#ifdef HAVE_ETHTOOL_GET_TS_INFO ++#include ++#endif + ++#ifndef ETH_GSTRING_LEN ++#define ETH_GSTRING_LEN 32 ++#endif + + #define IXGBE_ALL_RAR_ENTRIES 16 + +-enum {NETDEV_STATS, IXGBE_STATS}; +- ++#ifdef ETHTOOL_OPS_COMPAT ++#include "kcompat_ethtool.c" ++#endif ++#ifdef ETHTOOL_GSTATS + struct ixgbe_stats { + char stat_string[ETH_GSTRING_LEN]; +- int type; + int sizeof_stat; + int stat_offset; + }; + +-#define IXGBE_STAT(m) IXGBE_STATS, \ +- sizeof(((struct ixgbe_adapter *)0)->m), \ +- offsetof(struct ixgbe_adapter, m) +-#define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \ +- sizeof(((struct rtnl_link_stats64 *)0)->m), \ +- offsetof(struct rtnl_link_stats64, m) +- +-static const struct ixgbe_stats ixgbe_gstrings_stats[] = { +- {"rx_packets", IXGBE_NETDEV_STAT(rx_packets)}, +- {"tx_packets", IXGBE_NETDEV_STAT(tx_packets)}, +- {"rx_bytes", IXGBE_NETDEV_STAT(rx_bytes)}, +- {"tx_bytes", IXGBE_NETDEV_STAT(tx_bytes)}, +- {"rx_pkts_nic", IXGBE_STAT(stats.gprc)}, +- {"tx_pkts_nic", IXGBE_STAT(stats.gptc)}, +- {"rx_bytes_nic", IXGBE_STAT(stats.gorc)}, +- {"tx_bytes_nic", IXGBE_STAT(stats.gotc)}, +- {"lsc_int", IXGBE_STAT(lsc_int)}, +- {"tx_busy", IXGBE_STAT(tx_busy)}, +- {"non_eop_descs", IXGBE_STAT(non_eop_descs)}, +- {"rx_errors", IXGBE_NETDEV_STAT(rx_errors)}, +- {"tx_errors", IXGBE_NETDEV_STAT(tx_errors)}, +- {"rx_dropped", IXGBE_NETDEV_STAT(rx_dropped)}, +- {"tx_dropped", IXGBE_NETDEV_STAT(tx_dropped)}, +- {"multicast", IXGBE_NETDEV_STAT(multicast)}, +- {"broadcast", IXGBE_STAT(stats.bprc)}, +- {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) }, +- {"collisions", IXGBE_NETDEV_STAT(collisions)}, +- {"rx_over_errors", IXGBE_NETDEV_STAT(rx_over_errors)}, +- {"rx_crc_errors", IXGBE_NETDEV_STAT(rx_crc_errors)}, +- {"rx_frame_errors", IXGBE_NETDEV_STAT(rx_frame_errors)}, +- {"hw_rsc_aggregated", IXGBE_STAT(rsc_total_count)}, +- {"hw_rsc_flushed", IXGBE_STAT(rsc_total_flush)}, +- {"fdir_match", IXGBE_STAT(stats.fdirmatch)}, +- {"fdir_miss", IXGBE_STAT(stats.fdirmiss)}, +- {"fdir_overflow", IXGBE_STAT(fdir_overflow)}, +- {"rx_fifo_errors", IXGBE_NETDEV_STAT(rx_fifo_errors)}, +- {"rx_missed_errors", IXGBE_NETDEV_STAT(rx_missed_errors)}, +- {"tx_aborted_errors", IXGBE_NETDEV_STAT(tx_aborted_errors)}, +- {"tx_carrier_errors", IXGBE_NETDEV_STAT(tx_carrier_errors)}, +- {"tx_fifo_errors", IXGBE_NETDEV_STAT(tx_fifo_errors)}, +- {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(tx_heartbeat_errors)}, +- {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)}, +- {"tx_restart_queue", IXGBE_STAT(restart_queue)}, +- {"rx_long_length_errors", IXGBE_STAT(stats.roc)}, +- {"rx_short_length_errors", IXGBE_STAT(stats.ruc)}, +- {"tx_flow_control_xon", IXGBE_STAT(stats.lxontxc)}, +- {"rx_flow_control_xon", IXGBE_STAT(stats.lxonrxc)}, +- {"tx_flow_control_xoff", IXGBE_STAT(stats.lxofftxc)}, +- {"rx_flow_control_xoff", IXGBE_STAT(stats.lxoffrxc)}, +- {"rx_csum_offload_errors", IXGBE_STAT(hw_csum_rx_error)}, +- {"alloc_rx_page_failed", IXGBE_STAT(alloc_rx_page_failed)}, +- {"alloc_rx_buff_failed", IXGBE_STAT(alloc_rx_buff_failed)}, +- {"rx_no_dma_resources", IXGBE_STAT(hw_rx_no_dma_resources)}, +- {"os2bmc_rx_by_bmc", IXGBE_STAT(stats.o2bgptc)}, +- {"os2bmc_tx_by_bmc", IXGBE_STAT(stats.b2ospc)}, +- {"os2bmc_tx_by_host", IXGBE_STAT(stats.o2bspc)}, +- {"os2bmc_rx_by_host", IXGBE_STAT(stats.b2ogprc)}, +-#ifdef IXGBE_FCOE +- {"fcoe_bad_fccrc", IXGBE_STAT(stats.fccrc)}, +- {"rx_fcoe_dropped", IXGBE_STAT(stats.fcoerpdc)}, +- {"rx_fcoe_packets", IXGBE_STAT(stats.fcoeprc)}, +- {"rx_fcoe_dwords", IXGBE_STAT(stats.fcoedwrc)}, +- {"fcoe_noddp", IXGBE_STAT(stats.fcoe_noddp)}, +- {"fcoe_noddp_ext_buff", IXGBE_STAT(stats.fcoe_noddp_ext_buff)}, +- {"tx_fcoe_packets", IXGBE_STAT(stats.fcoeptc)}, +- {"tx_fcoe_dwords", IXGBE_STAT(stats.fcoedwtc)}, +-#endif /* IXGBE_FCOE */ ++#define IXGBE_NETDEV_STAT(_net_stat) { \ ++ .stat_string = #_net_stat, \ ++ .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \ ++ .stat_offset = offsetof(struct net_device_stats, _net_stat) \ ++} ++static const struct ixgbe_stats ixgbe_gstrings_net_stats[] = { ++ IXGBE_NETDEV_STAT(rx_packets), ++ IXGBE_NETDEV_STAT(tx_packets), ++ IXGBE_NETDEV_STAT(rx_bytes), ++ IXGBE_NETDEV_STAT(tx_bytes), ++ IXGBE_NETDEV_STAT(rx_errors), ++ IXGBE_NETDEV_STAT(tx_errors), ++ IXGBE_NETDEV_STAT(rx_dropped), ++ IXGBE_NETDEV_STAT(tx_dropped), ++ IXGBE_NETDEV_STAT(multicast), ++ IXGBE_NETDEV_STAT(collisions), ++ IXGBE_NETDEV_STAT(rx_over_errors), ++ IXGBE_NETDEV_STAT(rx_crc_errors), ++ IXGBE_NETDEV_STAT(rx_frame_errors), ++ IXGBE_NETDEV_STAT(rx_fifo_errors), ++ IXGBE_NETDEV_STAT(rx_missed_errors), ++ IXGBE_NETDEV_STAT(tx_aborted_errors), ++ IXGBE_NETDEV_STAT(tx_carrier_errors), ++ IXGBE_NETDEV_STAT(tx_fifo_errors), ++ IXGBE_NETDEV_STAT(tx_heartbeat_errors), ++}; ++ ++#define IXGBE_STAT(_name, _stat) { \ ++ .stat_string = _name, \ ++ .sizeof_stat = FIELD_SIZEOF(struct ixgbe_adapter, _stat), \ ++ .stat_offset = offsetof(struct ixgbe_adapter, _stat) \ ++} ++static struct ixgbe_stats ixgbe_gstrings_stats[] = { ++ IXGBE_STAT("rx_pkts_nic", stats.gprc), ++ IXGBE_STAT("tx_pkts_nic", stats.gptc), ++ IXGBE_STAT("rx_bytes_nic", stats.gorc), ++ IXGBE_STAT("tx_bytes_nic", stats.gotc), ++ IXGBE_STAT("lsc_int", lsc_int), ++ IXGBE_STAT("tx_busy", tx_busy), ++ IXGBE_STAT("non_eop_descs", non_eop_descs), ++ IXGBE_STAT("broadcast", stats.bprc), ++ IXGBE_STAT("rx_no_buffer_count", stats.rnbc[0]) , ++ IXGBE_STAT("tx_timeout_count", tx_timeout_count), ++ IXGBE_STAT("tx_restart_queue", restart_queue), ++ IXGBE_STAT("rx_long_length_errors", stats.roc), ++ IXGBE_STAT("rx_short_length_errors", stats.ruc), ++ IXGBE_STAT("tx_flow_control_xon", stats.lxontxc), ++ IXGBE_STAT("rx_flow_control_xon", stats.lxonrxc), ++ IXGBE_STAT("tx_flow_control_xoff", stats.lxofftxc), ++ IXGBE_STAT("rx_flow_control_xoff", stats.lxoffrxc), ++ IXGBE_STAT("rx_csum_offload_errors", hw_csum_rx_error), ++ IXGBE_STAT("alloc_rx_page_failed", alloc_rx_page_failed), ++ IXGBE_STAT("alloc_rx_buff_failed", alloc_rx_buff_failed), ++ IXGBE_STAT("rx_no_dma_resources", hw_rx_no_dma_resources), ++ IXGBE_STAT("hw_rsc_aggregated", rsc_total_count), ++ IXGBE_STAT("hw_rsc_flushed", rsc_total_flush), ++#ifdef HAVE_TX_MQ ++ IXGBE_STAT("fdir_match", stats.fdirmatch), ++ IXGBE_STAT("fdir_miss", stats.fdirmiss), ++ IXGBE_STAT("fdir_overflow", fdir_overflow), ++#endif /* HAVE_TX_MQ */ ++#if IS_ENABLED(CONFIG_FCOE) ++ IXGBE_STAT("fcoe_bad_fccrc", stats.fccrc), ++ IXGBE_STAT("fcoe_last_errors", stats.fclast), ++ IXGBE_STAT("rx_fcoe_dropped", stats.fcoerpdc), ++ IXGBE_STAT("rx_fcoe_packets", stats.fcoeprc), ++ IXGBE_STAT("rx_fcoe_dwords", stats.fcoedwrc), ++ IXGBE_STAT("fcoe_noddp", stats.fcoe_noddp), ++ IXGBE_STAT("fcoe_noddp_ext_buff", stats.fcoe_noddp_ext_buff), ++ IXGBE_STAT("tx_fcoe_packets", stats.fcoeptc), ++ IXGBE_STAT("tx_fcoe_dwords", stats.fcoedwtc), ++#endif /* CONFIG_FCOE */ ++ IXGBE_STAT("os2bmc_rx_by_bmc", stats.o2bgptc), ++ IXGBE_STAT("os2bmc_tx_by_bmc", stats.b2ospc), ++ IXGBE_STAT("os2bmc_tx_by_host", stats.o2bspc), ++ IXGBE_STAT("os2bmc_rx_by_host", stats.b2ogprc), ++#ifdef HAVE_PTP_1588_CLOCK ++ IXGBE_STAT("tx_hwtstamp_timeouts", tx_hwtstamp_timeouts), ++ IXGBE_STAT("tx_hwtstamp_skipped", tx_hwtstamp_skipped), ++ IXGBE_STAT("rx_hwtstamp_cleared", rx_hwtstamp_cleared), ++#endif /* HAVE_PTP_1588_CLOCK */ + }; + + /* ixgbe allocates num_tx_queues and num_rx_queues symmetrically so +@@ -128,66 +147,335 @@ static const struct ixgbe_stats ixgbe_gstrings_stats[] = { + * used because we do not have a good way to get the max number of + * rx queues with CONFIG_RPS disabled. + */ ++#ifdef HAVE_TX_MQ ++#ifdef HAVE_NETDEV_SELECT_QUEUE + #define IXGBE_NUM_RX_QUEUES netdev->num_tx_queues ++#define IXGBE_NUM_TX_QUEUES netdev->num_tx_queues ++#else ++#define IXGBE_NUM_RX_QUEUES adapter->indices ++#define IXGBE_NUM_TX_QUEUES adapter->indices ++#endif /* HAVE_NETDEV_SELECT_QUEUE */ ++#else /* HAVE_TX_MQ */ ++#define IXGBE_NUM_TX_QUEUES 1 ++#define IXGBE_NUM_RX_QUEUES ( \ ++ ((struct ixgbe_adapter *)netdev_priv(netdev))->num_rx_queues) ++#endif /* HAVE_TX_MQ */ + + #define IXGBE_QUEUE_STATS_LEN ( \ +- (netdev->num_tx_queues + IXGBE_NUM_RX_QUEUES) * \ +- (sizeof(struct ixgbe_queue_stats) / sizeof(u64))) +-#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) ++ (IXGBE_NUM_TX_QUEUES + IXGBE_NUM_RX_QUEUES) * \ ++ (sizeof(struct ixgbe_queue_stats) / sizeof(u64))) ++#define IXGBE_GLOBAL_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_stats) ++#define IXGBE_NETDEV_STATS_LEN ARRAY_SIZE(ixgbe_gstrings_net_stats) + #define IXGBE_PB_STATS_LEN ( \ +- (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \ +- sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \ +- sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \ +- sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \ +- / sizeof(u64)) ++ (sizeof(((struct ixgbe_adapter *)0)->stats.pxonrxc) + \ ++ sizeof(((struct ixgbe_adapter *)0)->stats.pxontxc) + \ ++ sizeof(((struct ixgbe_adapter *)0)->stats.pxoffrxc) + \ ++ sizeof(((struct ixgbe_adapter *)0)->stats.pxofftxc)) \ ++ / sizeof(u64)) ++#define IXGBE_VF_STATS_LEN \ ++ ((((struct ixgbe_adapter *)netdev_priv(netdev))->num_vfs) * \ ++ (sizeof(struct vf_stats) / sizeof(u64))) + #define IXGBE_STATS_LEN (IXGBE_GLOBAL_STATS_LEN + \ ++ IXGBE_NETDEV_STATS_LEN + \ + IXGBE_PB_STATS_LEN + \ +- IXGBE_QUEUE_STATS_LEN) ++ IXGBE_QUEUE_STATS_LEN + \ ++ IXGBE_VF_STATS_LEN) + ++#endif /* ETHTOOL_GSTATS */ ++#ifdef ETHTOOL_TEST + static const char ixgbe_gstrings_test[][ETH_GSTRING_LEN] = { + "Register test (offline)", "Eeprom test (offline)", + "Interrupt test (offline)", "Loopback test (offline)", + "Link test (on/offline)" + }; +-#define IXGBE_TEST_LEN sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN ++#define IXGBE_TEST_LEN (sizeof(ixgbe_gstrings_test) / ETH_GSTRING_LEN) ++#endif /* ETHTOOL_TEST */ ++ ++#ifdef HAVE_ETHTOOL_GET_SSET_COUNT ++static const char ixgbe_priv_flags_strings[][ETH_GSTRING_LEN] = { ++#define IXGBE_PRIV_FLAGS_FD_ATR BIT(0) ++ "flow-director-atr", ++#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC ++#define IXGBE_PRIV_FLAGS_LEGACY_RX BIT(1) ++ "legacy-rx", ++#endif ++}; ++ ++#define IXGBE_PRIV_FLAGS_STR_LEN ARRAY_SIZE(ixgbe_priv_flags_strings) ++ ++#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ ++/* currently supported speeds for 10G */ ++#define ADVERTISED_MASK_10G (SUPPORTED_10000baseT_Full | SUPPORTED_10000baseKX4_Full | SUPPORTED_10000baseKR_Full) ++ ++#define ixgbe_isbackplane(type) ((type == ixgbe_media_type_backplane)? true : false) ++ ++static __u32 ixgbe_backplane_type(struct ixgbe_hw *hw) ++{ ++ __u32 mode = 0x00; ++ switch(hw->device_id) ++ { ++ case IXGBE_DEV_ID_82598: ++ case IXGBE_DEV_ID_82599_KX4: ++ case IXGBE_DEV_ID_82599_KX4_MEZZ: ++ case IXGBE_DEV_ID_X550EM_X_KX4: ++ mode = SUPPORTED_10000baseKX4_Full; ++ break; ++ case IXGBE_DEV_ID_82598_BX: ++ case IXGBE_DEV_ID_82599_KR: ++ case IXGBE_DEV_ID_X550EM_X_KR: ++ case IXGBE_DEV_ID_X550EM_X_XFI: ++ mode = SUPPORTED_10000baseKR_Full; ++ break; ++ default: ++ mode = (SUPPORTED_10000baseKX4_Full | SUPPORTED_10000baseKR_Full); ++ break; ++ } ++ return mode; ++} ++ ++#ifdef HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE ++static int ixgbe_get_link_ksettings(struct net_device *netdev, ++ struct ethtool_link_ksettings *cmd) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ struct ixgbe_hw *hw = &adapter->hw; ++ ixgbe_link_speed supported_link; ++ bool autoneg = false; ++ u32 supported, advertising; ++ ++ ethtool_convert_link_mode_to_legacy_u32(&supported, ++ cmd->link_modes.supported); ++ ++ hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg); ++ ++ /* set the supported link speeds */ ++ if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) ++ supported |= (ixgbe_isbackplane(hw->phy.media_type)) ? ++ ixgbe_backplane_type(hw) : ++ SUPPORTED_10000baseT_Full; ++ if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) ++ supported |= (ixgbe_isbackplane(hw->phy.media_type)) ? ++ SUPPORTED_1000baseKX_Full : ++ SUPPORTED_1000baseT_Full; ++ if (supported_link & IXGBE_LINK_SPEED_100_FULL) ++ supported |= SUPPORTED_100baseT_Full; ++ if (supported_link & IXGBE_LINK_SPEED_10_FULL) ++ supported |= SUPPORTED_10baseT_Full; ++ ++ /* default advertised speed if phy.autoneg_advertised isn't set */ ++ advertising = supported; ++ ++ /* set the advertised speeds */ ++ if (hw->phy.autoneg_advertised) { ++ advertising = 0; ++ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL) ++ advertising |= ADVERTISED_10baseT_Full; ++ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) ++ advertising |= ADVERTISED_100baseT_Full; ++ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) ++ advertising |= supported & ADVERTISED_MASK_10G; ++ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) { ++ if (supported & SUPPORTED_1000baseKX_Full) ++ advertising |= ADVERTISED_1000baseKX_Full; ++ else ++ advertising |= ADVERTISED_1000baseT_Full; ++ } ++ } else { ++ if (hw->phy.multispeed_fiber && !autoneg) { ++ if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) ++ advertising = ADVERTISED_10000baseT_Full; ++ } ++ } ++ ++ if (autoneg) { ++ supported |= SUPPORTED_Autoneg; ++ advertising |= ADVERTISED_Autoneg; ++ cmd->base.autoneg = AUTONEG_ENABLE; ++ } else { ++ cmd->base.autoneg = AUTONEG_DISABLE; ++ } ++ ++ /* Determine the remaining settings based on the PHY type. */ ++ switch (adapter->hw.phy.type) { ++ case ixgbe_phy_tn: ++ case ixgbe_phy_aq: ++ case ixgbe_phy_x550em_ext_t: ++ case ixgbe_phy_fw: ++ case ixgbe_phy_cu_unknown: ++ supported |= SUPPORTED_TP; ++ advertising |= ADVERTISED_TP; ++ cmd->base.port = PORT_TP; ++ break; ++ case ixgbe_phy_qt: ++ supported |= SUPPORTED_FIBRE; ++ advertising |= ADVERTISED_FIBRE; ++ cmd->base.port = PORT_FIBRE; ++ break; ++ case ixgbe_phy_nl: ++ case ixgbe_phy_sfp_passive_tyco: ++ case ixgbe_phy_sfp_passive_unknown: ++ case ixgbe_phy_sfp_ftl: ++ case ixgbe_phy_sfp_avago: ++ case ixgbe_phy_sfp_intel: ++ case ixgbe_phy_sfp_unknown: ++ case ixgbe_phy_qsfp_passive_unknown: ++ case ixgbe_phy_qsfp_active_unknown: ++ case ixgbe_phy_qsfp_intel: ++ case ixgbe_phy_qsfp_unknown: ++ switch (adapter->hw.phy.sfp_type) { ++ /* SFP+ devices, further checking needed */ ++ case ixgbe_sfp_type_da_cu: ++ case ixgbe_sfp_type_da_cu_core0: ++ case ixgbe_sfp_type_da_cu_core1: ++ supported |= SUPPORTED_FIBRE; ++ advertising |= ADVERTISED_FIBRE; ++ cmd->base.port = PORT_DA; ++ break; ++ case ixgbe_sfp_type_sr: ++ case ixgbe_sfp_type_lr: ++ case ixgbe_sfp_type_srlr_core0: ++ case ixgbe_sfp_type_srlr_core1: ++ case ixgbe_sfp_type_1g_sx_core0: ++ case ixgbe_sfp_type_1g_sx_core1: ++ case ixgbe_sfp_type_1g_lx_core0: ++ case ixgbe_sfp_type_1g_lx_core1: ++ supported |= SUPPORTED_FIBRE; ++ advertising |= ADVERTISED_FIBRE; ++ cmd->base.port = PORT_FIBRE; ++ break; ++ case ixgbe_sfp_type_not_present: ++ supported |= SUPPORTED_FIBRE; ++ advertising |= ADVERTISED_FIBRE; ++ cmd->base.port = PORT_NONE; ++ break; ++ case ixgbe_sfp_type_1g_cu_core0: ++ case ixgbe_sfp_type_1g_cu_core1: ++ supported |= SUPPORTED_TP; ++ advertising |= ADVERTISED_TP; ++ cmd->base.port = PORT_TP; ++ break; ++ case ixgbe_sfp_type_unknown: ++ default: ++ supported |= SUPPORTED_FIBRE; ++ advertising |= ADVERTISED_FIBRE; ++ cmd->base.port = PORT_OTHER; ++ break; ++ } ++ break; ++ case ixgbe_phy_xaui: ++ supported |= SUPPORTED_FIBRE; ++ advertising |= ADVERTISED_FIBRE; ++ cmd->base.port = PORT_NONE; ++ break; ++ case ixgbe_phy_unknown: ++ case ixgbe_phy_generic: ++ case ixgbe_phy_sfp_unsupported: ++ default: ++ supported |= SUPPORTED_FIBRE; ++ advertising |= ADVERTISED_FIBRE; ++ cmd->base.port = PORT_OTHER; ++ break; ++ } ++ ++ /* Indicate pause support */ ++ supported |= SUPPORTED_Pause; ++ ++ switch (hw->fc.requested_mode) { ++ case ixgbe_fc_full: ++ advertising |= ADVERTISED_Pause; ++ break; ++ case ixgbe_fc_rx_pause: ++ advertising |= ADVERTISED_Pause | ADVERTISED_Asym_Pause; ++ break; ++ case ixgbe_fc_tx_pause: ++ advertising |= ADVERTISED_Asym_Pause; ++ break; ++ default: ++ advertising &= ~(ADVERTISED_Pause | ADVERTISED_Asym_Pause); ++ } ++ ++ if (netif_carrier_ok(netdev)) { ++ switch (adapter->link_speed) { ++ case IXGBE_LINK_SPEED_10GB_FULL: ++ cmd->base.speed = SPEED_10000; ++ break; ++ case IXGBE_LINK_SPEED_5GB_FULL: ++ cmd->base.speed = SPEED_5000; ++ break; ++#ifdef SUPPORTED_2500baseX_Full ++ case IXGBE_LINK_SPEED_2_5GB_FULL: ++ cmd->base.speed = SPEED_2500; ++ break; ++#endif /* SUPPORTED_2500baseX_Full */ ++ case IXGBE_LINK_SPEED_1GB_FULL: ++ cmd->base.speed = SPEED_1000; ++ break; ++ case IXGBE_LINK_SPEED_100_FULL: ++ cmd->base.speed = SPEED_100; ++ break; ++ case IXGBE_LINK_SPEED_10_FULL: ++ cmd->base.speed = SPEED_10; ++ break; ++ default: ++ break; ++ } ++ cmd->base.duplex = DUPLEX_FULL; ++ } else { ++ cmd->base.speed = SPEED_UNKNOWN; ++ cmd->base.duplex = DUPLEX_UNKNOWN; ++ } ++ ++ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.supported, ++ supported); ++ ethtool_convert_legacy_u32_to_link_mode(cmd->link_modes.advertising, ++ supported); + ++ return 0; ++} ++#else /* !HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE */ + static int ixgbe_get_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) + { + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + ixgbe_link_speed supported_link; +- u32 link_speed = 0; + bool autoneg = false; +- bool link_up; + + hw->mac.ops.get_link_capabilities(hw, &supported_link, &autoneg); + + /* set the supported link speeds */ + if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) +- ecmd->supported |= SUPPORTED_10000baseT_Full; ++ ecmd->supported |= (ixgbe_isbackplane(hw->phy.media_type)) ? ++ ixgbe_backplane_type(hw) : ++ SUPPORTED_10000baseT_Full; + if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) +- ecmd->supported |= SUPPORTED_1000baseT_Full; ++ ecmd->supported |= (ixgbe_isbackplane(hw->phy.media_type)) ? ++ SUPPORTED_1000baseKX_Full : ++ SUPPORTED_1000baseT_Full; + if (supported_link & IXGBE_LINK_SPEED_100_FULL) + ecmd->supported |= SUPPORTED_100baseT_Full; ++ if (supported_link & IXGBE_LINK_SPEED_10_FULL) ++ ecmd->supported |= SUPPORTED_10baseT_Full; ++ ++ /* default advertised speed if phy.autoneg_advertised isn't set */ ++ ecmd->advertising = ecmd->supported; + + /* set the advertised speeds */ + if (hw->phy.autoneg_advertised) { ++ ecmd->advertising = 0; ++ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10_FULL) ++ ecmd->advertising |= ADVERTISED_10baseT_Full; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) + ecmd->advertising |= ADVERTISED_100baseT_Full; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) +- ecmd->advertising |= ADVERTISED_10000baseT_Full; +- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) +- ecmd->advertising |= ADVERTISED_1000baseT_Full; ++ ecmd->advertising |= (ecmd->supported & ADVERTISED_MASK_10G); ++ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) { ++ if (ecmd->supported & SUPPORTED_1000baseKX_Full) ++ ecmd->advertising |= ADVERTISED_1000baseKX_Full; ++ else ++ ecmd->advertising |= ADVERTISED_1000baseT_Full; ++ } + } else { +- /* default modes in case phy.autoneg_advertised isn't set */ +- if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) +- ecmd->advertising |= ADVERTISED_10000baseT_Full; +- if (supported_link & IXGBE_LINK_SPEED_1GB_FULL) +- ecmd->advertising |= ADVERTISED_1000baseT_Full; +- if (supported_link & IXGBE_LINK_SPEED_100_FULL) +- ecmd->advertising |= ADVERTISED_100baseT_Full; +- + if (hw->phy.multispeed_fiber && !autoneg) { + if (supported_link & IXGBE_LINK_SPEED_10GB_FULL) + ecmd->advertising = ADVERTISED_10000baseT_Full; +@@ -198,8 +486,9 @@ static int ixgbe_get_settings(struct net_device *netdev, + ecmd->supported |= SUPPORTED_Autoneg; + ecmd->advertising |= ADVERTISED_Autoneg; + ecmd->autoneg = AUTONEG_ENABLE; +- } else ++ } else { + ecmd->autoneg = AUTONEG_DISABLE; ++ } + + ecmd->transceiver = XCVR_EXTERNAL; + +@@ -207,6 +496,8 @@ static int ixgbe_get_settings(struct net_device *netdev, + switch (adapter->hw.phy.type) { + case ixgbe_phy_tn: + case ixgbe_phy_aq: ++ case ixgbe_phy_x550em_ext_t: ++ case ixgbe_phy_fw: + case ixgbe_phy_cu_unknown: + ecmd->supported |= SUPPORTED_TP; + ecmd->advertising |= ADVERTISED_TP; +@@ -224,8 +515,12 @@ static int ixgbe_get_settings(struct net_device *netdev, + case ixgbe_phy_sfp_avago: + case ixgbe_phy_sfp_intel: + case ixgbe_phy_sfp_unknown: +- /* SFP+ devices, further checking needed */ ++ case ixgbe_phy_qsfp_passive_unknown: ++ case ixgbe_phy_qsfp_active_unknown: ++ case ixgbe_phy_qsfp_intel: ++ case ixgbe_phy_qsfp_unknown: + switch (adapter->hw.phy.sfp_type) { ++ /* SFP+ devices, further checking needed */ + case ixgbe_sfp_type_da_cu: + case ixgbe_sfp_type_da_cu_core0: + case ixgbe_sfp_type_da_cu_core1: +@@ -279,18 +574,47 @@ static int ixgbe_get_settings(struct net_device *netdev, + break; + } + +- hw->mac.ops.check_link(hw, &link_speed, &link_up, false); +- if (link_up) { +- switch (link_speed) { ++ /* Indicate pause support */ ++ ecmd->supported |= SUPPORTED_Pause; ++ ++ switch (hw->fc.requested_mode) { ++ case ixgbe_fc_full: ++ ecmd->advertising |= ADVERTISED_Pause; ++ break; ++ case ixgbe_fc_rx_pause: ++ ecmd->advertising |= ADVERTISED_Pause | ++ ADVERTISED_Asym_Pause; ++ break; ++ case ixgbe_fc_tx_pause: ++ ecmd->advertising |= ADVERTISED_Asym_Pause; ++ break; ++ default: ++ ecmd->advertising &= ~(ADVERTISED_Pause | ++ ADVERTISED_Asym_Pause); ++ } ++ ++ if (netif_carrier_ok(netdev)) { ++ switch (adapter->link_speed) { + case IXGBE_LINK_SPEED_10GB_FULL: + ethtool_cmd_speed_set(ecmd, SPEED_10000); + break; ++ case IXGBE_LINK_SPEED_5GB_FULL: ++ ethtool_cmd_speed_set(ecmd, SPEED_5000); ++ break; ++#ifdef SUPPORTED_2500baseX_Full ++ case IXGBE_LINK_SPEED_2_5GB_FULL: ++ ethtool_cmd_speed_set(ecmd, SPEED_2500); ++ break; ++#endif /* SUPPORTED_2500baseX_Full */ + case IXGBE_LINK_SPEED_1GB_FULL: + ethtool_cmd_speed_set(ecmd, SPEED_1000); + break; + case IXGBE_LINK_SPEED_100_FULL: + ethtool_cmd_speed_set(ecmd, SPEED_100); + break; ++ case IXGBE_LINK_SPEED_10_FULL: ++ ethtool_cmd_speed_set(ecmd, SPEED_10); ++ break; + default: + break; + } +@@ -302,7 +626,80 @@ static int ixgbe_get_settings(struct net_device *netdev, + + return 0; + } ++#endif /* !HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE */ ++ ++#ifdef HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE ++static int ixgbe_set_link_ksettings(struct net_device *netdev, ++ const struct ethtool_link_ksettings *cmd) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 advertised, old; ++ s32 err = 0; ++ u32 supported, advertising; ++ ++ ethtool_convert_link_mode_to_legacy_u32(&supported, ++ cmd->link_modes.supported); ++ ethtool_convert_link_mode_to_legacy_u32(&advertising, ++ cmd->link_modes.advertising); ++ ++ if ((hw->phy.media_type == ixgbe_media_type_copper) || ++ (hw->phy.multispeed_fiber)) { ++ /* ++ * this function does not support duplex forcing, but can ++ * limit the advertising of the adapter to the specified speed ++ */ ++ if (advertising & ~supported) ++ return -EINVAL; ++ ++ /* only allow one speed at a time if no autoneg */ ++ if (!cmd->base.autoneg && hw->phy.multispeed_fiber) { ++ if (advertising == ++ (ADVERTISED_10000baseT_Full | ++ ADVERTISED_1000baseT_Full)) ++ return -EINVAL; ++ } ++ ++ old = hw->phy.autoneg_advertised; ++ advertised = 0; ++ if (advertising & ADVERTISED_10000baseT_Full) ++ advertised |= IXGBE_LINK_SPEED_10GB_FULL; ++ ++ if (advertising & ADVERTISED_1000baseT_Full) ++ advertised |= IXGBE_LINK_SPEED_1GB_FULL; ++ ++ if (advertising & ADVERTISED_100baseT_Full) ++ advertised |= IXGBE_LINK_SPEED_100_FULL; ++ ++ if (advertising & ADVERTISED_10baseT_Full) ++ advertised |= IXGBE_LINK_SPEED_10_FULL; ++ ++ if (old == advertised) ++ return err; ++ /* this sets the link speed and restarts auto-neg */ ++ while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) ++ usleep_range(1000, 2000); ++ ++ hw->mac.autotry_restart = true; ++ err = hw->mac.ops.setup_link(hw, advertised, true); ++ if (err) { ++ e_info(probe, "setup link failed with code %d\n", err); ++ hw->mac.ops.setup_link(hw, old, true); ++ } ++ clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); ++ } else { ++ /* in this case we currently only support 10Gb/FULL */ ++ u32 speed = cmd->base.speed; ++ ++ if ((cmd->base.autoneg == AUTONEG_ENABLE) || ++ (advertising != ADVERTISED_10000baseT_Full) || ++ (speed + cmd->base.duplex != SPEED_10000 + DUPLEX_FULL)) ++ return -EINVAL; ++ } + ++ return err; ++} ++#else /* !HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE */ + static int ixgbe_set_settings(struct net_device *netdev, + struct ethtool_cmd *ecmd) + { +@@ -339,18 +736,27 @@ static int ixgbe_set_settings(struct net_device *netdev, + if (ecmd->advertising & ADVERTISED_100baseT_Full) + advertised |= IXGBE_LINK_SPEED_100_FULL; + ++ if (ecmd->advertising & ADVERTISED_10baseT_Full) ++ advertised |= IXGBE_LINK_SPEED_10_FULL; ++ + if (old == advertised) + return err; + /* this sets the link speed and restarts auto-neg */ ++ while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) ++ usleep_range(1000, 2000); ++ + hw->mac.autotry_restart = true; + err = hw->mac.ops.setup_link(hw, advertised, true); + if (err) { + e_info(probe, "setup link failed with code %d\n", err); + hw->mac.ops.setup_link(hw, old, true); + } +- } else { ++ clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); ++ } ++ else { + /* in this case we currently only support 10Gb/FULL */ + u32 speed = ethtool_cmd_speed(ecmd); ++ + if ((ecmd->autoneg == AUTONEG_ENABLE) || + (ecmd->advertising != ADVERTISED_10000baseT_Full) || + (speed + ecmd->duplex != SPEED_10000 + DUPLEX_FULL)) +@@ -359,6 +765,7 @@ static int ixgbe_set_settings(struct net_device *netdev, + + return err; + } ++#endif /* !HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE */ + + static void ixgbe_get_pauseparam(struct net_device *netdev, + struct ethtool_pauseparam *pause) +@@ -394,18 +801,19 @@ static int ixgbe_set_pauseparam(struct net_device *netdev, + (adapter->flags & IXGBE_FLAG_DCB_ENABLED)) + return -EINVAL; + +- /* some devices do not support autoneg of link flow control */ ++ ++ /* some devices do not support autoneg of flow control */ + if ((pause->autoneg == AUTONEG_ENABLE) && + !ixgbe_device_supports_autoneg_fc(hw)) +- return -EINVAL; ++ return -EINVAL; + + fc.disable_fc_autoneg = (pause->autoneg != AUTONEG_ENABLE); + + if ((pause->rx_pause && pause->tx_pause) || pause->autoneg) + fc.requested_mode = ixgbe_fc_full; +- else if (pause->rx_pause && !pause->tx_pause) ++ else if (pause->rx_pause) + fc.requested_mode = ixgbe_fc_rx_pause; +- else if (!pause->rx_pause && pause->tx_pause) ++ else if (pause->tx_pause) + fc.requested_mode = ixgbe_fc_tx_pause; + else + fc.requested_mode = ixgbe_fc_none; +@@ -434,16 +842,16 @@ static void ixgbe_set_msglevel(struct net_device *netdev, u32 data) + adapter->msg_enable = data; + } + +-static int ixgbe_get_regs_len(struct net_device *netdev) ++static int ixgbe_get_regs_len(struct net_device __always_unused *netdev) + { +-#define IXGBE_REGS_LEN 1139 ++#define IXGBE_REGS_LEN 1129 + return IXGBE_REGS_LEN * sizeof(u32); + } + +-#define IXGBE_GET_STAT(_A_, _R_) _A_->stats._R_ ++#define IXGBE_GET_STAT(_A_, _R_) (_A_->stats._R_) + +-static void ixgbe_get_regs(struct net_device *netdev, +- struct ethtool_regs *regs, void *p) ++static void ixgbe_get_regs(struct net_device *netdev, struct ethtool_regs *regs, ++ void *p) + { + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; +@@ -456,193 +864,166 @@ static void ixgbe_get_regs(struct net_device *netdev, + hw->device_id; + + /* General Registers */ +- regs_buff[0] = IXGBE_READ_REG(hw, IXGBE_CTRL); +- regs_buff[1] = IXGBE_READ_REG(hw, IXGBE_STATUS); +- regs_buff[2] = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); +- regs_buff[3] = IXGBE_READ_REG(hw, IXGBE_ESDP); +- regs_buff[4] = IXGBE_READ_REG(hw, IXGBE_EODSDP); +- regs_buff[5] = IXGBE_READ_REG(hw, IXGBE_LEDCTL); +- regs_buff[6] = IXGBE_READ_REG(hw, IXGBE_FRTIMER); +- regs_buff[7] = IXGBE_READ_REG(hw, IXGBE_TCPTIMER); ++ regs_buff[0] = IXGBE_R32_Q(hw, IXGBE_CTRL); ++ regs_buff[1] = IXGBE_R32_Q(hw, IXGBE_STATUS); ++ regs_buff[2] = IXGBE_R32_Q(hw, IXGBE_CTRL_EXT); ++ regs_buff[3] = IXGBE_R32_Q(hw, IXGBE_ESDP); ++ regs_buff[4] = IXGBE_R32_Q(hw, IXGBE_EODSDP); ++ regs_buff[5] = IXGBE_R32_Q(hw, IXGBE_LEDCTL); ++ regs_buff[6] = IXGBE_R32_Q(hw, IXGBE_FRTIMER); ++ regs_buff[7] = IXGBE_R32_Q(hw, IXGBE_TCPTIMER); + + /* NVM Register */ +- regs_buff[8] = IXGBE_READ_REG(hw, IXGBE_EEC); +- regs_buff[9] = IXGBE_READ_REG(hw, IXGBE_EERD); +- regs_buff[10] = IXGBE_READ_REG(hw, IXGBE_FLA); +- regs_buff[11] = IXGBE_READ_REG(hw, IXGBE_EEMNGCTL); +- regs_buff[12] = IXGBE_READ_REG(hw, IXGBE_EEMNGDATA); +- regs_buff[13] = IXGBE_READ_REG(hw, IXGBE_FLMNGCTL); +- regs_buff[14] = IXGBE_READ_REG(hw, IXGBE_FLMNGDATA); +- regs_buff[15] = IXGBE_READ_REG(hw, IXGBE_FLMNGCNT); +- regs_buff[16] = IXGBE_READ_REG(hw, IXGBE_FLOP); +- regs_buff[17] = IXGBE_READ_REG(hw, IXGBE_GRC); ++ regs_buff[8] = IXGBE_R32_Q(hw, IXGBE_EEC); ++ regs_buff[9] = IXGBE_R32_Q(hw, IXGBE_EERD); ++ regs_buff[10] = IXGBE_R32_Q(hw, IXGBE_FLA); ++ regs_buff[11] = IXGBE_R32_Q(hw, IXGBE_EEMNGCTL); ++ regs_buff[12] = IXGBE_R32_Q(hw, IXGBE_EEMNGDATA); ++ regs_buff[13] = IXGBE_R32_Q(hw, IXGBE_FLMNGCTL); ++ regs_buff[14] = IXGBE_R32_Q(hw, IXGBE_FLMNGDATA); ++ regs_buff[15] = IXGBE_R32_Q(hw, IXGBE_FLMNGCNT); ++ regs_buff[16] = IXGBE_R32_Q(hw, IXGBE_FLOP); ++ regs_buff[17] = IXGBE_R32_Q(hw, IXGBE_GRC); + + /* Interrupt */ + /* don't read EICR because it can clear interrupt causes, instead + * read EICS which is a shadow but doesn't clear EICR */ +- regs_buff[18] = IXGBE_READ_REG(hw, IXGBE_EICS); +- regs_buff[19] = IXGBE_READ_REG(hw, IXGBE_EICS); +- regs_buff[20] = IXGBE_READ_REG(hw, IXGBE_EIMS); +- regs_buff[21] = IXGBE_READ_REG(hw, IXGBE_EIMC); +- regs_buff[22] = IXGBE_READ_REG(hw, IXGBE_EIAC); +- regs_buff[23] = IXGBE_READ_REG(hw, IXGBE_EIAM); +- regs_buff[24] = IXGBE_READ_REG(hw, IXGBE_EITR(0)); +- regs_buff[25] = IXGBE_READ_REG(hw, IXGBE_IVAR(0)); +- regs_buff[26] = IXGBE_READ_REG(hw, IXGBE_MSIXT); +- regs_buff[27] = IXGBE_READ_REG(hw, IXGBE_MSIXPBA); +- regs_buff[28] = IXGBE_READ_REG(hw, IXGBE_PBACL(0)); +- regs_buff[29] = IXGBE_READ_REG(hw, IXGBE_GPIE); ++ regs_buff[18] = IXGBE_R32_Q(hw, IXGBE_EICS); ++ regs_buff[19] = IXGBE_R32_Q(hw, IXGBE_EICS); ++ regs_buff[20] = IXGBE_R32_Q(hw, IXGBE_EIMS); ++ regs_buff[21] = IXGBE_R32_Q(hw, IXGBE_EIMC); ++ regs_buff[22] = IXGBE_R32_Q(hw, IXGBE_EIAC); ++ regs_buff[23] = IXGBE_R32_Q(hw, IXGBE_EIAM); ++ regs_buff[24] = IXGBE_R32_Q(hw, IXGBE_EITR(0)); ++ regs_buff[25] = IXGBE_R32_Q(hw, IXGBE_IVAR(0)); ++ regs_buff[26] = IXGBE_R32_Q(hw, IXGBE_MSIXT); ++ regs_buff[27] = IXGBE_R32_Q(hw, IXGBE_MSIXPBA); ++ regs_buff[28] = IXGBE_R32_Q(hw, IXGBE_PBACL(0)); ++ regs_buff[29] = IXGBE_R32_Q(hw, IXGBE_GPIE); + + /* Flow Control */ +- regs_buff[30] = IXGBE_READ_REG(hw, IXGBE_PFCTOP); +- regs_buff[31] = IXGBE_READ_REG(hw, IXGBE_FCTTV(0)); +- regs_buff[32] = IXGBE_READ_REG(hw, IXGBE_FCTTV(1)); +- regs_buff[33] = IXGBE_READ_REG(hw, IXGBE_FCTTV(2)); +- regs_buff[34] = IXGBE_READ_REG(hw, IXGBE_FCTTV(3)); ++ regs_buff[30] = IXGBE_R32_Q(hw, IXGBE_PFCTOP); ++ regs_buff[31] = IXGBE_R32_Q(hw, IXGBE_FCTTV(0)); ++ regs_buff[32] = IXGBE_R32_Q(hw, IXGBE_FCTTV(1)); ++ regs_buff[33] = IXGBE_R32_Q(hw, IXGBE_FCTTV(2)); ++ regs_buff[34] = IXGBE_R32_Q(hw, IXGBE_FCTTV(3)); + for (i = 0; i < 8; i++) { + switch (hw->mac.type) { + case ixgbe_mac_82598EB: +- regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL(i)); +- regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH(i)); ++ regs_buff[35 + i] = IXGBE_R32_Q(hw, IXGBE_FCRTL(i)); ++ regs_buff[43 + i] = IXGBE_R32_Q(hw, IXGBE_FCRTH(i)); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: +- regs_buff[35 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTL_82599(i)); +- regs_buff[43 + i] = IXGBE_READ_REG(hw, IXGBE_FCRTH_82599(i)); ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ regs_buff[35 + i] = IXGBE_R32_Q(hw, ++ IXGBE_FCRTL_82599(i)); ++ regs_buff[43 + i] = IXGBE_R32_Q(hw, ++ IXGBE_FCRTH_82599(i)); + break; + default: + break; + } + } +- regs_buff[51] = IXGBE_READ_REG(hw, IXGBE_FCRTV); +- regs_buff[52] = IXGBE_READ_REG(hw, IXGBE_TFCS); ++ regs_buff[51] = IXGBE_R32_Q(hw, IXGBE_FCRTV); ++ regs_buff[52] = IXGBE_R32_Q(hw, IXGBE_TFCS); + + /* Receive DMA */ + for (i = 0; i < 64; i++) +- regs_buff[53 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i)); ++ regs_buff[53 + i] = IXGBE_R32_Q(hw, IXGBE_RDBAL(i)); + for (i = 0; i < 64; i++) +- regs_buff[117 + i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i)); ++ regs_buff[117 + i] = IXGBE_R32_Q(hw, IXGBE_RDBAH(i)); + for (i = 0; i < 64; i++) +- regs_buff[181 + i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i)); ++ regs_buff[181 + i] = IXGBE_R32_Q(hw, IXGBE_RDLEN(i)); + for (i = 0; i < 64; i++) +- regs_buff[245 + i] = IXGBE_READ_REG(hw, IXGBE_RDH(i)); ++ regs_buff[245 + i] = IXGBE_R32_Q(hw, IXGBE_RDH(i)); + for (i = 0; i < 64; i++) +- regs_buff[309 + i] = IXGBE_READ_REG(hw, IXGBE_RDT(i)); ++ regs_buff[309 + i] = IXGBE_R32_Q(hw, IXGBE_RDT(i)); + for (i = 0; i < 64; i++) +- regs_buff[373 + i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); ++ regs_buff[373 + i] = IXGBE_R32_Q(hw, IXGBE_RXDCTL(i)); + for (i = 0; i < 16; i++) +- regs_buff[437 + i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i)); ++ regs_buff[437 + i] = IXGBE_R32_Q(hw, IXGBE_SRRCTL(i)); + for (i = 0; i < 16; i++) +- regs_buff[453 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); +- regs_buff[469] = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); ++ regs_buff[453 + i] = IXGBE_R32_Q(hw, IXGBE_DCA_RXCTRL(i)); ++ regs_buff[469] = IXGBE_R32_Q(hw, IXGBE_RDRXCTL); + for (i = 0; i < 8; i++) +- regs_buff[470 + i] = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(i)); +- regs_buff[478] = IXGBE_READ_REG(hw, IXGBE_RXCTRL); +- regs_buff[479] = IXGBE_READ_REG(hw, IXGBE_DROPEN); ++ regs_buff[470 + i] = IXGBE_R32_Q(hw, IXGBE_RXPBSIZE(i)); ++ regs_buff[478] = IXGBE_R32_Q(hw, IXGBE_RXCTRL); ++ regs_buff[479] = IXGBE_R32_Q(hw, IXGBE_DROPEN); + + /* Receive */ +- regs_buff[480] = IXGBE_READ_REG(hw, IXGBE_RXCSUM); +- regs_buff[481] = IXGBE_READ_REG(hw, IXGBE_RFCTL); ++ regs_buff[480] = IXGBE_R32_Q(hw, IXGBE_RXCSUM); ++ regs_buff[481] = IXGBE_R32_Q(hw, IXGBE_RFCTL); + for (i = 0; i < 16; i++) +- regs_buff[482 + i] = IXGBE_READ_REG(hw, IXGBE_RAL(i)); ++ regs_buff[482 + i] = IXGBE_R32_Q(hw, IXGBE_RAL(i)); + for (i = 0; i < 16; i++) +- regs_buff[498 + i] = IXGBE_READ_REG(hw, IXGBE_RAH(i)); +- regs_buff[514] = IXGBE_READ_REG(hw, IXGBE_PSRTYPE(0)); +- regs_buff[515] = IXGBE_READ_REG(hw, IXGBE_FCTRL); +- regs_buff[516] = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); +- regs_buff[517] = IXGBE_READ_REG(hw, IXGBE_MCSTCTRL); +- regs_buff[518] = IXGBE_READ_REG(hw, IXGBE_MRQC); +- regs_buff[519] = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); ++ regs_buff[498 + i] = IXGBE_R32_Q(hw, IXGBE_RAH(i)); ++ regs_buff[514] = IXGBE_R32_Q(hw, IXGBE_PSRTYPE(0)); ++ regs_buff[515] = IXGBE_R32_Q(hw, IXGBE_FCTRL); ++ regs_buff[516] = IXGBE_R32_Q(hw, IXGBE_VLNCTRL); ++ regs_buff[517] = IXGBE_R32_Q(hw, IXGBE_MCSTCTRL); ++ regs_buff[518] = IXGBE_R32_Q(hw, IXGBE_MRQC); ++ regs_buff[519] = IXGBE_R32_Q(hw, IXGBE_VMD_CTL); + for (i = 0; i < 8; i++) +- regs_buff[520 + i] = IXGBE_READ_REG(hw, IXGBE_IMIR(i)); ++ regs_buff[520 + i] = IXGBE_R32_Q(hw, IXGBE_IMIR(i)); + for (i = 0; i < 8; i++) +- regs_buff[528 + i] = IXGBE_READ_REG(hw, IXGBE_IMIREXT(i)); +- regs_buff[536] = IXGBE_READ_REG(hw, IXGBE_IMIRVP); ++ regs_buff[528 + i] = IXGBE_R32_Q(hw, IXGBE_IMIREXT(i)); ++ regs_buff[536] = IXGBE_R32_Q(hw, IXGBE_IMIRVP); + + /* Transmit */ + for (i = 0; i < 32; i++) +- regs_buff[537 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i)); ++ regs_buff[537 + i] = IXGBE_R32_Q(hw, IXGBE_TDBAL(i)); + for (i = 0; i < 32; i++) +- regs_buff[569 + i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i)); ++ regs_buff[569 + i] = IXGBE_R32_Q(hw, IXGBE_TDBAH(i)); + for (i = 0; i < 32; i++) +- regs_buff[601 + i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i)); ++ regs_buff[601 + i] = IXGBE_R32_Q(hw, IXGBE_TDLEN(i)); + for (i = 0; i < 32; i++) +- regs_buff[633 + i] = IXGBE_READ_REG(hw, IXGBE_TDH(i)); ++ regs_buff[633 + i] = IXGBE_R32_Q(hw, IXGBE_TDH(i)); + for (i = 0; i < 32; i++) +- regs_buff[665 + i] = IXGBE_READ_REG(hw, IXGBE_TDT(i)); ++ regs_buff[665 + i] = IXGBE_R32_Q(hw, IXGBE_TDT(i)); + for (i = 0; i < 32; i++) +- regs_buff[697 + i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); ++ regs_buff[697 + i] = IXGBE_R32_Q(hw, IXGBE_TXDCTL(i)); + for (i = 0; i < 32; i++) +- regs_buff[729 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAL(i)); ++ regs_buff[729 + i] = IXGBE_R32_Q(hw, IXGBE_TDWBAL(i)); + for (i = 0; i < 32; i++) +- regs_buff[761 + i] = IXGBE_READ_REG(hw, IXGBE_TDWBAH(i)); +- regs_buff[793] = IXGBE_READ_REG(hw, IXGBE_DTXCTL); ++ regs_buff[761 + i] = IXGBE_R32_Q(hw, IXGBE_TDWBAH(i)); ++ regs_buff[793] = IXGBE_R32_Q(hw, IXGBE_DTXCTL); + for (i = 0; i < 16; i++) +- regs_buff[794 + i] = IXGBE_READ_REG(hw, IXGBE_DCA_TXCTRL(i)); +- regs_buff[810] = IXGBE_READ_REG(hw, IXGBE_TIPG); ++ regs_buff[794 + i] = IXGBE_R32_Q(hw, IXGBE_DCA_TXCTRL(i)); ++ regs_buff[810] = IXGBE_R32_Q(hw, IXGBE_TIPG); + for (i = 0; i < 8; i++) +- regs_buff[811 + i] = IXGBE_READ_REG(hw, IXGBE_TXPBSIZE(i)); +- regs_buff[819] = IXGBE_READ_REG(hw, IXGBE_MNGTXMAP); ++ regs_buff[811 + i] = IXGBE_R32_Q(hw, IXGBE_TXPBSIZE(i)); ++ regs_buff[819] = IXGBE_R32_Q(hw, IXGBE_MNGTXMAP); + + /* Wake Up */ +- regs_buff[820] = IXGBE_READ_REG(hw, IXGBE_WUC); +- regs_buff[821] = IXGBE_READ_REG(hw, IXGBE_WUFC); +- regs_buff[822] = IXGBE_READ_REG(hw, IXGBE_WUS); +- regs_buff[823] = IXGBE_READ_REG(hw, IXGBE_IPAV); +- regs_buff[824] = IXGBE_READ_REG(hw, IXGBE_IP4AT); +- regs_buff[825] = IXGBE_READ_REG(hw, IXGBE_IP6AT); +- regs_buff[826] = IXGBE_READ_REG(hw, IXGBE_WUPL); +- regs_buff[827] = IXGBE_READ_REG(hw, IXGBE_WUPM); +- regs_buff[828] = IXGBE_READ_REG(hw, IXGBE_FHFT(0)); ++ regs_buff[820] = IXGBE_R32_Q(hw, IXGBE_WUC); ++ regs_buff[821] = IXGBE_R32_Q(hw, IXGBE_WUFC); ++ regs_buff[822] = IXGBE_R32_Q(hw, IXGBE_WUS); ++ regs_buff[823] = IXGBE_R32_Q(hw, IXGBE_IPAV); ++ regs_buff[824] = IXGBE_R32_Q(hw, IXGBE_IP4AT); ++ regs_buff[825] = IXGBE_R32_Q(hw, IXGBE_IP6AT); ++ regs_buff[826] = IXGBE_R32_Q(hw, IXGBE_WUPL); ++ regs_buff[827] = IXGBE_R32_Q(hw, IXGBE_WUPM); ++ regs_buff[828] = IXGBE_R32_Q(hw, IXGBE_FHFT(0)); + + /* DCB */ +- regs_buff[829] = IXGBE_READ_REG(hw, IXGBE_RMCS); /* same as FCCFG */ +- regs_buff[831] = IXGBE_READ_REG(hw, IXGBE_PDPMCS); /* same as RTTPCS */ +- +- switch (hw->mac.type) { +- case ixgbe_mac_82598EB: +- regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_DPMCS); +- regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RUPPBMR); +- for (i = 0; i < 8; i++) +- regs_buff[833 + i] = +- IXGBE_READ_REG(hw, IXGBE_RT2CR(i)); +- for (i = 0; i < 8; i++) +- regs_buff[841 + i] = +- IXGBE_READ_REG(hw, IXGBE_RT2SR(i)); +- for (i = 0; i < 8; i++) +- regs_buff[849 + i] = +- IXGBE_READ_REG(hw, IXGBE_TDTQ2TCCR(i)); +- for (i = 0; i < 8; i++) +- regs_buff[857 + i] = +- IXGBE_READ_REG(hw, IXGBE_TDTQ2TCSR(i)); +- break; +- case ixgbe_mac_82599EB: +- case ixgbe_mac_X540: +- regs_buff[830] = IXGBE_READ_REG(hw, IXGBE_RTTDCS); +- regs_buff[832] = IXGBE_READ_REG(hw, IXGBE_RTRPCS); +- for (i = 0; i < 8; i++) +- regs_buff[833 + i] = +- IXGBE_READ_REG(hw, IXGBE_RTRPT4C(i)); +- for (i = 0; i < 8; i++) +- regs_buff[841 + i] = +- IXGBE_READ_REG(hw, IXGBE_RTRPT4S(i)); +- for (i = 0; i < 8; i++) +- regs_buff[849 + i] = +- IXGBE_READ_REG(hw, IXGBE_RTTDT2C(i)); +- for (i = 0; i < 8; i++) +- regs_buff[857 + i] = +- IXGBE_READ_REG(hw, IXGBE_RTTDT2S(i)); +- break; +- default: +- break; +- } +- ++ regs_buff[829] = IXGBE_R32_Q(hw, IXGBE_RMCS); ++ regs_buff[830] = IXGBE_R32_Q(hw, IXGBE_DPMCS); ++ regs_buff[831] = IXGBE_R32_Q(hw, IXGBE_PDPMCS); ++ regs_buff[832] = IXGBE_R32_Q(hw, IXGBE_RUPPBMR); ++ for (i = 0; i < 8; i++) ++ regs_buff[833 + i] = IXGBE_R32_Q(hw, IXGBE_RT2CR(i)); ++ for (i = 0; i < 8; i++) ++ regs_buff[841 + i] = IXGBE_R32_Q(hw, IXGBE_RT2SR(i)); + for (i = 0; i < 8; i++) +- regs_buff[865 + i] = +- IXGBE_READ_REG(hw, IXGBE_TDPT2TCCR(i)); /* same as RTTPT2C */ ++ regs_buff[849 + i] = IXGBE_R32_Q(hw, IXGBE_TDTQ2TCCR(i)); + for (i = 0; i < 8; i++) +- regs_buff[873 + i] = +- IXGBE_READ_REG(hw, IXGBE_TDPT2TCSR(i)); /* same as RTTPT2S */ ++ regs_buff[857 + i] = IXGBE_R32_Q(hw, IXGBE_TDTQ2TCSR(i)); ++ for (i = 0; i < 8; i++) ++ regs_buff[865 + i] = IXGBE_R32_Q(hw, IXGBE_TDPT2TCCR(i)); ++ for (i = 0; i < 8; i++) ++ regs_buff[873 + i] = IXGBE_R32_Q(hw, IXGBE_TDPT2TCSR(i)); + + /* Statistics */ + regs_buff[881] = IXGBE_GET_STAT(adapter, crcerrs); +@@ -709,93 +1090,77 @@ static void ixgbe_get_regs(struct net_device *netdev, + regs_buff[1022 + i] = IXGBE_GET_STAT(adapter, qbtc[i]); + + /* MAC */ +- regs_buff[1038] = IXGBE_READ_REG(hw, IXGBE_PCS1GCFIG); +- regs_buff[1039] = IXGBE_READ_REG(hw, IXGBE_PCS1GLCTL); +- regs_buff[1040] = IXGBE_READ_REG(hw, IXGBE_PCS1GLSTA); +- regs_buff[1041] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG0); +- regs_buff[1042] = IXGBE_READ_REG(hw, IXGBE_PCS1GDBG1); +- regs_buff[1043] = IXGBE_READ_REG(hw, IXGBE_PCS1GANA); +- regs_buff[1044] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLP); +- regs_buff[1045] = IXGBE_READ_REG(hw, IXGBE_PCS1GANNP); +- regs_buff[1046] = IXGBE_READ_REG(hw, IXGBE_PCS1GANLPNP); +- regs_buff[1047] = IXGBE_READ_REG(hw, IXGBE_HLREG0); +- regs_buff[1048] = IXGBE_READ_REG(hw, IXGBE_HLREG1); +- regs_buff[1049] = IXGBE_READ_REG(hw, IXGBE_PAP); +- regs_buff[1050] = IXGBE_READ_REG(hw, IXGBE_MACA); +- regs_buff[1051] = IXGBE_READ_REG(hw, IXGBE_APAE); +- regs_buff[1052] = IXGBE_READ_REG(hw, IXGBE_ARD); +- regs_buff[1053] = IXGBE_READ_REG(hw, IXGBE_AIS); +- regs_buff[1054] = IXGBE_READ_REG(hw, IXGBE_MSCA); +- regs_buff[1055] = IXGBE_READ_REG(hw, IXGBE_MSRWD); +- regs_buff[1056] = IXGBE_READ_REG(hw, IXGBE_MLADD); +- regs_buff[1057] = IXGBE_READ_REG(hw, IXGBE_MHADD); +- regs_buff[1058] = IXGBE_READ_REG(hw, IXGBE_TREG); +- regs_buff[1059] = IXGBE_READ_REG(hw, IXGBE_PCSS1); +- regs_buff[1060] = IXGBE_READ_REG(hw, IXGBE_PCSS2); +- regs_buff[1061] = IXGBE_READ_REG(hw, IXGBE_XPCSS); +- regs_buff[1062] = IXGBE_READ_REG(hw, IXGBE_SERDESC); +- regs_buff[1063] = IXGBE_READ_REG(hw, IXGBE_MACS); +- regs_buff[1064] = IXGBE_READ_REG(hw, IXGBE_AUTOC); +- regs_buff[1065] = IXGBE_READ_REG(hw, IXGBE_LINKS); +- regs_buff[1066] = IXGBE_READ_REG(hw, IXGBE_AUTOC2); +- regs_buff[1067] = IXGBE_READ_REG(hw, IXGBE_AUTOC3); +- regs_buff[1068] = IXGBE_READ_REG(hw, IXGBE_ANLP1); +- regs_buff[1069] = IXGBE_READ_REG(hw, IXGBE_ANLP2); +- regs_buff[1070] = IXGBE_READ_REG(hw, IXGBE_ATLASCTL); ++ regs_buff[1038] = IXGBE_R32_Q(hw, IXGBE_PCS1GCFIG); ++ regs_buff[1039] = IXGBE_R32_Q(hw, IXGBE_PCS1GLCTL); ++ regs_buff[1040] = IXGBE_R32_Q(hw, IXGBE_PCS1GLSTA); ++ regs_buff[1041] = IXGBE_R32_Q(hw, IXGBE_PCS1GDBG0); ++ regs_buff[1042] = IXGBE_R32_Q(hw, IXGBE_PCS1GDBG1); ++ regs_buff[1043] = IXGBE_R32_Q(hw, IXGBE_PCS1GANA); ++ regs_buff[1044] = IXGBE_R32_Q(hw, IXGBE_PCS1GANLP); ++ regs_buff[1045] = IXGBE_R32_Q(hw, IXGBE_PCS1GANNP); ++ regs_buff[1046] = IXGBE_R32_Q(hw, IXGBE_PCS1GANLPNP); ++ regs_buff[1047] = IXGBE_R32_Q(hw, IXGBE_HLREG0); ++ regs_buff[1048] = IXGBE_R32_Q(hw, IXGBE_HLREG1); ++ regs_buff[1049] = IXGBE_R32_Q(hw, IXGBE_PAP); ++ regs_buff[1050] = IXGBE_R32_Q(hw, IXGBE_MACA); ++ regs_buff[1051] = IXGBE_R32_Q(hw, IXGBE_APAE); ++ regs_buff[1052] = IXGBE_R32_Q(hw, IXGBE_ARD); ++ regs_buff[1053] = IXGBE_R32_Q(hw, IXGBE_AIS); ++ regs_buff[1054] = IXGBE_R32_Q(hw, IXGBE_MSCA); ++ regs_buff[1055] = IXGBE_R32_Q(hw, IXGBE_MSRWD); ++ regs_buff[1056] = IXGBE_R32_Q(hw, IXGBE_MLADD); ++ regs_buff[1057] = IXGBE_R32_Q(hw, IXGBE_MHADD); ++ regs_buff[1058] = IXGBE_R32_Q(hw, IXGBE_TREG); ++ regs_buff[1059] = IXGBE_R32_Q(hw, IXGBE_PCSS1); ++ regs_buff[1060] = IXGBE_R32_Q(hw, IXGBE_PCSS2); ++ regs_buff[1061] = IXGBE_R32_Q(hw, IXGBE_XPCSS); ++ regs_buff[1062] = IXGBE_R32_Q(hw, IXGBE_SERDESC); ++ regs_buff[1063] = IXGBE_R32_Q(hw, IXGBE_MACS); ++ regs_buff[1064] = IXGBE_R32_Q(hw, IXGBE_AUTOC); ++ regs_buff[1065] = IXGBE_R32_Q(hw, IXGBE_LINKS); ++ regs_buff[1066] = IXGBE_R32_Q(hw, IXGBE_AUTOC2); ++ regs_buff[1067] = IXGBE_R32_Q(hw, IXGBE_AUTOC3); ++ regs_buff[1068] = IXGBE_R32_Q(hw, IXGBE_ANLP1); ++ regs_buff[1069] = IXGBE_R32_Q(hw, IXGBE_ANLP2); ++ regs_buff[1070] = IXGBE_R32_Q(hw, IXGBE_ATLASCTL); + + /* Diagnostic */ +- regs_buff[1071] = IXGBE_READ_REG(hw, IXGBE_RDSTATCTL); ++ regs_buff[1071] = IXGBE_R32_Q(hw, IXGBE_RDSTATCTL); + for (i = 0; i < 8; i++) +- regs_buff[1072 + i] = IXGBE_READ_REG(hw, IXGBE_RDSTAT(i)); +- regs_buff[1080] = IXGBE_READ_REG(hw, IXGBE_RDHMPN); ++ regs_buff[1072 + i] = IXGBE_R32_Q(hw, IXGBE_RDSTAT(i)); ++ regs_buff[1080] = IXGBE_R32_Q(hw, IXGBE_RDHMPN); + for (i = 0; i < 4; i++) +- regs_buff[1081 + i] = IXGBE_READ_REG(hw, IXGBE_RIC_DW(i)); +- regs_buff[1085] = IXGBE_READ_REG(hw, IXGBE_RDPROBE); +- regs_buff[1086] = IXGBE_READ_REG(hw, IXGBE_TDSTATCTL); +- for (i = 0; i < 8; i++) +- regs_buff[1087 + i] = IXGBE_READ_REG(hw, IXGBE_TDSTAT(i)); +- regs_buff[1095] = IXGBE_READ_REG(hw, IXGBE_TDHMPN); ++ regs_buff[1081 + i] = IXGBE_R32_Q(hw, IXGBE_RIC_DW(i)); ++ regs_buff[1085] = IXGBE_R32_Q(hw, IXGBE_RDPROBE); ++ regs_buff[1095] = IXGBE_R32_Q(hw, IXGBE_TDHMPN); + for (i = 0; i < 4; i++) +- regs_buff[1096 + i] = IXGBE_READ_REG(hw, IXGBE_TIC_DW(i)); +- regs_buff[1100] = IXGBE_READ_REG(hw, IXGBE_TDPROBE); +- regs_buff[1101] = IXGBE_READ_REG(hw, IXGBE_TXBUFCTRL); +- regs_buff[1102] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA0); +- regs_buff[1103] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA1); +- regs_buff[1104] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA2); +- regs_buff[1105] = IXGBE_READ_REG(hw, IXGBE_TXBUFDATA3); +- regs_buff[1106] = IXGBE_READ_REG(hw, IXGBE_RXBUFCTRL); +- regs_buff[1107] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA0); +- regs_buff[1108] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA1); +- regs_buff[1109] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA2); +- regs_buff[1110] = IXGBE_READ_REG(hw, IXGBE_RXBUFDATA3); ++ regs_buff[1096 + i] = IXGBE_R32_Q(hw, IXGBE_TIC_DW(i)); ++ regs_buff[1100] = IXGBE_R32_Q(hw, IXGBE_TDPROBE); ++ regs_buff[1101] = IXGBE_R32_Q(hw, IXGBE_TXBUFCTRL); ++ regs_buff[1102] = IXGBE_R32_Q(hw, IXGBE_TXBUFDATA0); ++ regs_buff[1103] = IXGBE_R32_Q(hw, IXGBE_TXBUFDATA1); ++ regs_buff[1104] = IXGBE_R32_Q(hw, IXGBE_TXBUFDATA2); ++ regs_buff[1105] = IXGBE_R32_Q(hw, IXGBE_TXBUFDATA3); ++ regs_buff[1106] = IXGBE_R32_Q(hw, IXGBE_RXBUFCTRL); ++ regs_buff[1107] = IXGBE_R32_Q(hw, IXGBE_RXBUFDATA0); ++ regs_buff[1108] = IXGBE_R32_Q(hw, IXGBE_RXBUFDATA1); ++ regs_buff[1109] = IXGBE_R32_Q(hw, IXGBE_RXBUFDATA2); ++ regs_buff[1110] = IXGBE_R32_Q(hw, IXGBE_RXBUFDATA3); + for (i = 0; i < 8; i++) +- regs_buff[1111 + i] = IXGBE_READ_REG(hw, IXGBE_PCIE_DIAG(i)); +- regs_buff[1119] = IXGBE_READ_REG(hw, IXGBE_RFVAL); +- regs_buff[1120] = IXGBE_READ_REG(hw, IXGBE_MDFTC1); +- regs_buff[1121] = IXGBE_READ_REG(hw, IXGBE_MDFTC2); +- regs_buff[1122] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO1); +- regs_buff[1123] = IXGBE_READ_REG(hw, IXGBE_MDFTFIFO2); +- regs_buff[1124] = IXGBE_READ_REG(hw, IXGBE_MDFTS); +- regs_buff[1125] = IXGBE_READ_REG(hw, IXGBE_PCIEECCCTL); +- regs_buff[1126] = IXGBE_READ_REG(hw, IXGBE_PBTXECC); +- regs_buff[1127] = IXGBE_READ_REG(hw, IXGBE_PBRXECC); ++ regs_buff[1111 + i] = IXGBE_R32_Q(hw, IXGBE_PCIE_DIAG(i)); ++ regs_buff[1119] = IXGBE_R32_Q(hw, IXGBE_RFVAL); ++ regs_buff[1120] = IXGBE_R32_Q(hw, IXGBE_MDFTC1); ++ regs_buff[1121] = IXGBE_R32_Q(hw, IXGBE_MDFTC2); ++ regs_buff[1122] = IXGBE_R32_Q(hw, IXGBE_MDFTFIFO1); ++ regs_buff[1123] = IXGBE_R32_Q(hw, IXGBE_MDFTFIFO2); ++ regs_buff[1124] = IXGBE_R32_Q(hw, IXGBE_MDFTS); ++ regs_buff[1125] = IXGBE_R32_Q(hw, IXGBE_PCIEECCCTL); ++ regs_buff[1126] = IXGBE_R32_Q(hw, IXGBE_PBTXECC); ++ regs_buff[1127] = IXGBE_R32_Q(hw, IXGBE_PBRXECC); + + /* 82599 X540 specific registers */ +- regs_buff[1128] = IXGBE_READ_REG(hw, IXGBE_MFLCN); +- +- /* 82599 X540 specific DCB registers */ +- regs_buff[1129] = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); +- regs_buff[1130] = IXGBE_READ_REG(hw, IXGBE_RTTUP2TC); +- for (i = 0; i < 4; i++) +- regs_buff[1131 + i] = IXGBE_READ_REG(hw, IXGBE_TXLLQ(i)); +- regs_buff[1135] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRM); +- /* same as RTTQCNRM */ +- regs_buff[1136] = IXGBE_READ_REG(hw, IXGBE_RTTBCNRD); +- /* same as RTTQCNRR */ ++ regs_buff[1128] = IXGBE_R32_Q(hw, IXGBE_MFLCN); + +- /* X540 specific DCB registers */ +- regs_buff[1137] = IXGBE_READ_REG(hw, IXGBE_RTTQCNCR); +- regs_buff[1138] = IXGBE_READ_REG(hw, IXGBE_RTTQCNTG); + } + + static int ixgbe_get_eeprom_len(struct net_device *netdev) +@@ -828,7 +1193,7 @@ static int ixgbe_get_eeprom(struct net_device *netdev, + return -ENOMEM; + + ret_val = hw->eeprom.ops.read_buffer(hw, first_word, eeprom_len, +- eeprom_buff); ++ eeprom_buff); + + /* Device's eeprom is always little-endian, word addressable */ + for (i = 0; i < eeprom_len; i++) +@@ -845,10 +1210,9 @@ static int ixgbe_set_eeprom(struct net_device *netdev, + { + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; +- u16 *eeprom_buff; +- void *ptr; + int max_len, first_word, last_word, ret_val = 0; +- u16 i; ++ u16 *eeprom_buff, i; ++ void *ptr; + + if (eeprom->len == 0) + return -EINVAL; +@@ -877,7 +1241,7 @@ static int ixgbe_set_eeprom(struct net_device *netdev, + + ptr++; + } +- if ((eeprom->offset + eeprom->len) & 1) { ++ if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) { + /* + * need read/modify/write of last changed EEPROM word + * only the first byte of the word is being modified +@@ -898,8 +1262,8 @@ static int ixgbe_set_eeprom(struct net_device *netdev, + cpu_to_le16s(&eeprom_buff[i]); + + ret_val = hw->eeprom.ops.write_buffer(hw, first_word, +- last_word - first_word + 1, +- eeprom_buff); ++ last_word - first_word + 1, ++ eeprom_buff); + + /* Update the checksum */ + if (ret_val == 0) +@@ -914,35 +1278,35 @@ static void ixgbe_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) + { + struct ixgbe_adapter *adapter = netdev_priv(netdev); +- u32 nvm_track_id; + +- strlcpy(drvinfo->driver, ixgbe_driver_name, sizeof(drvinfo->driver)); +- strlcpy(drvinfo->version, ixgbe_driver_version, +- sizeof(drvinfo->version)); ++ strncpy(drvinfo->driver, ixgbe_driver_name, ++ sizeof(drvinfo->driver) - 1); ++ strncpy(drvinfo->version, ixgbe_driver_version, ++ sizeof(drvinfo->version) - 1); + +- nvm_track_id = (adapter->eeprom_verh << 16) | +- adapter->eeprom_verl; +- snprintf(drvinfo->fw_version, sizeof(drvinfo->fw_version), "0x%08x", +- nvm_track_id); ++ strncpy(drvinfo->fw_version, adapter->eeprom_id, ++ sizeof(drvinfo->fw_version) - 1); ++ strncpy(drvinfo->bus_info, pci_name(adapter->pdev), ++ sizeof(drvinfo->bus_info) - 1); ++#ifdef HAVE_ETHTOOL_GET_SSET_COUNT + +- strlcpy(drvinfo->bus_info, pci_name(adapter->pdev), +- sizeof(drvinfo->bus_info)); +- drvinfo->n_stats = IXGBE_STATS_LEN; +- drvinfo->testinfo_len = IXGBE_TEST_LEN; +- drvinfo->regdump_len = ixgbe_get_regs_len(netdev); ++ drvinfo->n_priv_flags = IXGBE_PRIV_FLAGS_STR_LEN; ++#endif + } + + static void ixgbe_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ring) + { + struct ixgbe_adapter *adapter = netdev_priv(netdev); +- struct ixgbe_ring *tx_ring = adapter->tx_ring[0]; +- struct ixgbe_ring *rx_ring = adapter->rx_ring[0]; + + ring->rx_max_pending = IXGBE_MAX_RXD; + ring->tx_max_pending = IXGBE_MAX_TXD; +- ring->rx_pending = rx_ring->count; +- ring->tx_pending = tx_ring->count; ++ ring->rx_mini_max_pending = 0; ++ ring->rx_jumbo_max_pending = 0; ++ ring->rx_pending = adapter->rx_ring_count; ++ ring->tx_pending = adapter->tx_ring_count; ++ ring->rx_mini_pending = 0; ++ ring->rx_jumbo_pending = 0; + } + + static int ixgbe_set_ringparam(struct net_device *netdev, +@@ -1041,9 +1405,9 @@ static int ixgbe_set_ringparam(struct net_device *netdev, + } + goto err_setup; + } +- + } + ++ + for (i = 0; i < adapter->num_rx_queues; i++) { + ixgbe_free_rx_resources(adapter->rx_ring[i]); + +@@ -1062,69 +1426,86 @@ clear_reset: + return err; + } + ++#ifndef HAVE_ETHTOOL_GET_SSET_COUNT ++static int ixgbe_get_stats_count(struct net_device *netdev) ++{ ++ return IXGBE_STATS_LEN; ++} ++ ++#else /* HAVE_ETHTOOL_GET_SSET_COUNT */ + static int ixgbe_get_sset_count(struct net_device *netdev, int sset) + { ++#ifdef HAVE_TX_MQ ++#ifndef HAVE_NETDEV_SELECT_QUEUE ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++#endif ++#endif ++ + switch (sset) { + case ETH_SS_TEST: + return IXGBE_TEST_LEN; + case ETH_SS_STATS: + return IXGBE_STATS_LEN; ++ case ETH_SS_PRIV_FLAGS: ++ return IXGBE_PRIV_FLAGS_STR_LEN; + default: + return -EOPNOTSUPP; + } + } + ++#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ + static void ixgbe_get_ethtool_stats(struct net_device *netdev, +- struct ethtool_stats *stats, u64 *data) ++ struct ethtool_stats __always_unused *stats, u64 *data) + { + struct ixgbe_adapter *adapter = netdev_priv(netdev); +- struct rtnl_link_stats64 temp; +- const struct rtnl_link_stats64 *net_stats; ++#ifdef HAVE_NETDEV_STATS_IN_NETDEV ++ struct net_device_stats *net_stats = &netdev->stats; ++#else ++ struct net_device_stats *net_stats = &adapter->net_stats; ++#endif ++ u64 *queue_stat; ++ int stat_count, k; ++#ifdef HAVE_NDO_GET_STATS64 + unsigned int start; ++#endif + struct ixgbe_ring *ring; + int i, j; +- char *p = NULL; ++ char *p; + + ixgbe_update_stats(adapter); +- net_stats = dev_get_stats(netdev, &temp); +- for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { +- switch (ixgbe_gstrings_stats[i].type) { +- case NETDEV_STATS: +- p = (char *) net_stats + +- ixgbe_gstrings_stats[i].stat_offset; +- break; +- case IXGBE_STATS: +- p = (char *) adapter + +- ixgbe_gstrings_stats[i].stat_offset; +- break; +- default: +- data[i] = 0; +- continue; +- } + +- data[i] = (ixgbe_gstrings_stats[i].sizeof_stat == ++ for (i = 0; i < IXGBE_NETDEV_STATS_LEN; i++) { ++ p = (char *)net_stats + ixgbe_gstrings_net_stats[i].stat_offset; ++ data[i] = (ixgbe_gstrings_net_stats[i].sizeof_stat == ++ sizeof(u64)) ? *(u64 *)p : *(u32 *)p; ++ } ++ for (j = 0; j < IXGBE_GLOBAL_STATS_LEN; j++, i++) { ++ p = (char *)adapter + ixgbe_gstrings_stats[j].stat_offset; ++ data[i] = (ixgbe_gstrings_stats[j].sizeof_stat == + sizeof(u64)) ? *(u64 *)p : *(u32 *)p; + } +- for (j = 0; j < netdev->num_tx_queues; j++) { ++ for (j = 0; j < IXGBE_NUM_TX_QUEUES; j++) { + ring = adapter->tx_ring[j]; + if (!ring) { +- data[i] = 0; +- data[i+1] = 0; +- i += 2; ++ data[i++] = 0; ++ data[i++] = 0; + #ifdef BP_EXTENDED_STATS +- data[i] = 0; +- data[i+1] = 0; +- data[i+2] = 0; +- i += 3; ++ data[i++] = 0; ++ data[i++] = 0; ++ data[i++] = 0; + #endif + continue; + } + ++#ifdef HAVE_NDO_GET_STATS64 + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); ++#endif + data[i] = ring->stats.packets; + data[i+1] = ring->stats.bytes; ++#ifdef HAVE_NDO_GET_STATS64 + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); ++#endif + i += 2; + #ifdef BP_EXTENDED_STATS + data[i] = ring->stats.yields; +@@ -1136,23 +1517,25 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, + for (j = 0; j < IXGBE_NUM_RX_QUEUES; j++) { + ring = adapter->rx_ring[j]; + if (!ring) { +- data[i] = 0; +- data[i+1] = 0; +- i += 2; ++ data[i++] = 0; ++ data[i++] = 0; + #ifdef BP_EXTENDED_STATS +- data[i] = 0; +- data[i+1] = 0; +- data[i+2] = 0; +- i += 3; ++ data[i++] = 0; ++ data[i++] = 0; ++ data[i++] = 0; + #endif + continue; + } + ++#ifdef HAVE_NDO_GET_STATS64 + do { + start = u64_stats_fetch_begin_irq(&ring->syncp); ++#endif + data[i] = ring->stats.packets; + data[i+1] = ring->stats.bytes; ++#ifdef HAVE_NDO_GET_STATS64 + } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); ++#endif + i += 2; + #ifdef BP_EXTENDED_STATS + data[i] = ring->stats.yields; +@@ -1161,7 +1544,6 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, + i += 3; + #endif + } +- + for (j = 0; j < IXGBE_MAX_PACKET_BUFFERS; j++) { + data[i++] = adapter->stats.pxontxc[j]; + data[i++] = adapter->stats.pxofftxc[j]; +@@ -1170,28 +1552,42 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev, + data[i++] = adapter->stats.pxonrxc[j]; + data[i++] = adapter->stats.pxoffrxc[j]; + } ++ stat_count = sizeof(struct vf_stats) / sizeof(u64); ++ for (j = 0; j < adapter->num_vfs; j++) { ++ queue_stat = (u64 *)&adapter->vfinfo[j].vfstats; ++ for (k = 0; k < stat_count; k++) ++ data[i + k] = queue_stat[k]; ++ queue_stat = (u64 *)&adapter->vfinfo[j].saved_rst_vfstats; ++ for (k = 0; k < stat_count; k++) ++ data[i + k] += queue_stat[k]; ++ i += k; ++ } + } + + static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) + { ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); + char *p = (char *)data; +- int i; ++ unsigned int i; + + switch (stringset) { + case ETH_SS_TEST: +- for (i = 0; i < IXGBE_TEST_LEN; i++) { +- memcpy(data, ixgbe_gstrings_test[i], ETH_GSTRING_LEN); +- data += ETH_GSTRING_LEN; +- } ++ memcpy(data, *ixgbe_gstrings_test, ++ IXGBE_TEST_LEN * ETH_GSTRING_LEN); + break; + case ETH_SS_STATS: ++ for (i = 0; i < IXGBE_NETDEV_STATS_LEN; i++) { ++ memcpy(p, ixgbe_gstrings_net_stats[i].stat_string, ++ ETH_GSTRING_LEN); ++ p += ETH_GSTRING_LEN; ++ } + for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { + memcpy(p, ixgbe_gstrings_stats[i].stat_string, + ETH_GSTRING_LEN); + p += ETH_GSTRING_LEN; + } +- for (i = 0; i < netdev->num_tx_queues; i++) { ++ for (i = 0; i < IXGBE_NUM_TX_QUEUES; i++) { + sprintf(p, "tx_queue_%u_packets", i); + p += ETH_GSTRING_LEN; + sprintf(p, "tx_queue_%u_bytes", i); +@@ -1231,24 +1627,41 @@ static void ixgbe_get_strings(struct net_device *netdev, u32 stringset, + sprintf(p, "rx_pb_%u_pxoff", i); + p += ETH_GSTRING_LEN; + } +- /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ +- break; +- } +-} +- ++ for (i = 0; i < adapter->num_vfs; i++) { ++ sprintf(p, "VF %u Rx Packets", i); ++ p += ETH_GSTRING_LEN; ++ sprintf(p, "VF %u Rx Bytes", i); ++ p += ETH_GSTRING_LEN; ++ sprintf(p, "VF %u Tx Packets", i); ++ p += ETH_GSTRING_LEN; ++ sprintf(p, "VF %u Tx Bytes", i); ++ p += ETH_GSTRING_LEN; ++ sprintf(p, "VF %u MC Packets", i); ++ p += ETH_GSTRING_LEN; ++ } ++ /* BUG_ON(p - data != IXGBE_STATS_LEN * ETH_GSTRING_LEN); */ ++ break; ++#ifdef HAVE_ETHTOOL_GET_SSET_COUNT ++ case ETH_SS_PRIV_FLAGS: ++ memcpy(data, ixgbe_priv_flags_strings, ++ IXGBE_PRIV_FLAGS_STR_LEN * ETH_GSTRING_LEN); ++ break; ++#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ ++ } ++} ++ + static int ixgbe_link_test(struct ixgbe_adapter *adapter, u64 *data) + { + struct ixgbe_hw *hw = &adapter->hw; + bool link_up; + u32 link_speed = 0; + +- if (ixgbe_removed(hw->hw_addr)) { ++ if (IXGBE_REMOVED(hw->hw_addr)) { + *data = 1; + return 1; + } + *data = 0; +- +- hw->mac.ops.check_link(hw, &link_speed, &link_up, true); ++ hw->mac.ops.check_link(hw, &link_speed, &link_up, true); + if (link_up) + return *data; + else +@@ -1283,7 +1696,7 @@ struct ixgbe_reg_test { + #define TABLE64_TEST_HI 6 + + /* default 82599 register test */ +-static const struct ixgbe_reg_test reg_test_82599[] = { ++static struct ixgbe_reg_test reg_test_82599[] = { + { IXGBE_FCRTL_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + { IXGBE_FCRTH_82599(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +@@ -1303,11 +1716,11 @@ static const struct ixgbe_reg_test reg_test_82599[] = { + { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x8001FFFF, 0x800CFFFF }, + { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { 0, 0, 0, 0 } ++ { .reg = 0 } + }; + + /* default 82598 register test */ +-static const struct ixgbe_reg_test reg_test_82598[] = { ++static struct ixgbe_reg_test reg_test_82598[] = { + { IXGBE_FCRTL(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + { IXGBE_FCRTH(0), 1, PATTERN_TEST, 0x8007FFF0, 0x8007FFF0 }, + { IXGBE_PFCTOP, 1, PATTERN_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +@@ -1331,32 +1744,35 @@ static const struct ixgbe_reg_test reg_test_82598[] = { + { IXGBE_RAL(0), 16, TABLE64_TEST_LO, 0xFFFFFFFF, 0xFFFFFFFF }, + { IXGBE_RAL(0), 16, TABLE64_TEST_HI, 0x800CFFFF, 0x800CFFFF }, + { IXGBE_MTA(0), 128, TABLE32_TEST, 0xFFFFFFFF, 0xFFFFFFFF }, +- { 0, 0, 0, 0 } ++ { .reg = 0 } + }; + ++ + static bool reg_pattern_test(struct ixgbe_adapter *adapter, u64 *data, int reg, + u32 mask, u32 write) + { + u32 pat, val, before; + static const u32 test_pattern[] = { +- 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; ++ 0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF ++ }; + +- if (ixgbe_removed(adapter->hw.hw_addr)) { ++ if (IXGBE_REMOVED(adapter->hw.hw_addr)) { + *data = 1; +- return 1; ++ return true; + } + for (pat = 0; pat < ARRAY_SIZE(test_pattern); pat++) { +- before = ixgbe_read_reg(&adapter->hw, reg); +- ixgbe_write_reg(&adapter->hw, reg, test_pattern[pat] & write); +- val = ixgbe_read_reg(&adapter->hw, reg); ++ before = IXGBE_READ_REG(&adapter->hw, reg); ++ IXGBE_WRITE_REG(&adapter->hw, reg, test_pattern[pat] & write); ++ val = IXGBE_READ_REG(&adapter->hw, reg); + if (val != (test_pattern[pat] & write & mask)) { +- e_err(drv, "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", +- reg, val, (test_pattern[pat] & write & mask)); ++ e_err(drv, ++ "pattern test reg %04X failed: got 0x%08X expected 0x%08X\n", ++ reg, val, test_pattern[pat] & write & mask); + *data = reg; +- ixgbe_write_reg(&adapter->hw, reg, before); ++ IXGBE_WRITE_REG(&adapter->hw, reg, before); + return true; + } +- ixgbe_write_reg(&adapter->hw, reg, before); ++ IXGBE_WRITE_REG(&adapter->hw, reg, before); + } + return false; + } +@@ -1366,49 +1782,53 @@ static bool reg_set_and_check(struct ixgbe_adapter *adapter, u64 *data, int reg, + { + u32 val, before; + +- if (ixgbe_removed(adapter->hw.hw_addr)) { ++ if (IXGBE_REMOVED(adapter->hw.hw_addr)) { + *data = 1; +- return 1; ++ return true; + } +- before = ixgbe_read_reg(&adapter->hw, reg); +- ixgbe_write_reg(&adapter->hw, reg, write & mask); +- val = ixgbe_read_reg(&adapter->hw, reg); ++ before = IXGBE_READ_REG(&adapter->hw, reg); ++ IXGBE_WRITE_REG(&adapter->hw, reg, write & mask); ++ val = IXGBE_READ_REG(&adapter->hw, reg); + if ((write & mask) != (val & mask)) { +- e_err(drv, "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", ++ e_err(drv, ++ "set/check reg %04X test failed: got 0x%08X expected 0x%08X\n", + reg, (val & mask), (write & mask)); + *data = reg; +- ixgbe_write_reg(&adapter->hw, reg, before); ++ IXGBE_WRITE_REG(&adapter->hw, reg, before); + return true; + } +- ixgbe_write_reg(&adapter->hw, reg, before); ++ IXGBE_WRITE_REG(&adapter->hw, reg, before); + return false; + } + +-static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) ++static bool ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) + { +- const struct ixgbe_reg_test *test; ++ struct ixgbe_reg_test *test; ++ struct ixgbe_hw *hw = &adapter->hw; + u32 value, before, after; + u32 i, toggle; + +- if (ixgbe_removed(adapter->hw.hw_addr)) { ++ if (IXGBE_REMOVED(hw->hw_addr)) { + e_err(drv, "Adapter removed - register test blocked\n"); + *data = 1; +- return 1; ++ return true; + } +- switch (adapter->hw.mac.type) { ++ switch (hw->mac.type) { + case ixgbe_mac_82598EB: + toggle = 0x7FFFF3FF; + test = reg_test_82598; + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: + toggle = 0x7FFFF30F; + test = reg_test_82599; + break; + default: + *data = 1; +- return 1; +- break; ++ return true; + } + + /* +@@ -1417,18 +1837,19 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) + * tests. Some bits are read-only, some toggle, and some + * are writeable on newer MACs. + */ +- before = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS); +- value = (ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle); +- ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, toggle); +- after = ixgbe_read_reg(&adapter->hw, IXGBE_STATUS) & toggle; ++ before = IXGBE_READ_REG(hw, IXGBE_STATUS); ++ value = IXGBE_READ_REG(hw, IXGBE_STATUS) & toggle; ++ IXGBE_WRITE_REG(hw, IXGBE_STATUS, toggle); ++ after = IXGBE_READ_REG(hw, IXGBE_STATUS) & toggle; + if (value != after) { +- e_err(drv, "failed STATUS register test got: 0x%08X expected: 0x%08X\n", ++ e_err(drv, ++ "failed STATUS register test got: 0x%08X expected: 0x%08X\n", + after, value); + *data = 1; +- return 1; ++ return true; + } + /* restore previous status */ +- ixgbe_write_reg(&adapter->hw, IXGBE_STATUS, before); ++ IXGBE_WRITE_REG(hw, IXGBE_STATUS, before); + + /* + * Perform the remainder of the register test, looping through +@@ -1441,61 +1862,63 @@ static int ixgbe_reg_test(struct ixgbe_adapter *adapter, u64 *data) + switch (test->test_type) { + case PATTERN_TEST: + b = reg_pattern_test(adapter, data, +- test->reg + (i * 0x40), +- test->mask, +- test->write); +- break; +- case SET_READ_TEST: +- b = reg_set_and_check(adapter, data, + test->reg + (i * 0x40), + test->mask, + test->write); + break; ++ case SET_READ_TEST: ++ b = reg_set_and_check(adapter, data, ++ test->reg + (i * 0x40), ++ test->mask, ++ test->write); ++ break; + case WRITE_NO_TEST: +- ixgbe_write_reg(&adapter->hw, +- test->reg + (i * 0x40), ++ IXGBE_WRITE_REG(hw, test->reg + (i * 0x40), + test->write); + break; + case TABLE32_TEST: + b = reg_pattern_test(adapter, data, +- test->reg + (i * 4), +- test->mask, +- test->write); ++ test->reg + (i * 4), ++ test->mask, ++ test->write); + break; + case TABLE64_TEST_LO: + b = reg_pattern_test(adapter, data, +- test->reg + (i * 8), +- test->mask, +- test->write); ++ test->reg + (i * 8), ++ test->mask, ++ test->write); + break; + case TABLE64_TEST_HI: + b = reg_pattern_test(adapter, data, +- (test->reg + 4) + (i * 8), +- test->mask, +- test->write); ++ (test->reg + 4) + (i * 8), ++ test->mask, ++ test->write); + break; + } + if (b) +- return 1; ++ return true; + } + test++; + } + + *data = 0; +- return 0; ++ return false; + } + +-static int ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data) ++static bool ixgbe_eeprom_test(struct ixgbe_adapter *adapter, u64 *data) + { + struct ixgbe_hw *hw = &adapter->hw; +- if (hw->eeprom.ops.validate_checksum(hw, NULL)) ++ ++ if (hw->eeprom.ops.validate_checksum(hw, NULL)) { + *data = 1; +- else ++ return true; ++ } else { + *data = 0; +- return *data; ++ return false; ++ } + } + +-static irqreturn_t ixgbe_test_intr(int irq, void *data) ++static irqreturn_t ixgbe_test_intr(int __always_unused irq, void *data) + { + struct net_device *netdev = (struct net_device *) data; + struct ixgbe_adapter *adapter = netdev_priv(netdev); +@@ -1511,6 +1934,10 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) + u32 mask, i = 0, shared_int = true; + u32 irq = adapter->pdev->irq; + ++ if (IXGBE_REMOVED(adapter->hw.hw_addr)) { ++ *data = 1; ++ return -1; ++ } + *data = 0; + + /* Hook up test interrupt handler just for this test */ +@@ -1519,21 +1946,21 @@ static int ixgbe_intr_test(struct ixgbe_adapter *adapter, u64 *data) + return 0; + } else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) { + shared_int = false; +- if (request_irq(irq, ixgbe_test_intr, 0, netdev->name, ++ if (request_irq(irq, &ixgbe_test_intr, 0, netdev->name, + netdev)) { + *data = 1; + return -1; + } +- } else if (!request_irq(irq, ixgbe_test_intr, IRQF_PROBE_SHARED, ++ } else if (!request_irq(irq, &ixgbe_test_intr, IRQF_PROBE_SHARED, + netdev->name, netdev)) { + shared_int = false; +- } else if (request_irq(irq, ixgbe_test_intr, IRQF_SHARED, ++ } else if (request_irq(irq, &ixgbe_test_intr, IRQF_SHARED, + netdev->name, netdev)) { + *data = 1; + return -1; + } +- e_info(hw, "testing %s interrupt\n", shared_int ? +- "shared" : "unshared"); ++ e_info(hw, "testing %s interrupt\n", ++ (shared_int ? "shared" : "unshared")); + + /* Disable all the interrupts */ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFFFFFF); +@@ -1628,19 +2055,18 @@ static void ixgbe_free_desc_rings(struct ixgbe_adapter *adapter) + /* shut down the DMA engines now so they can be reinitialized later */ + + /* first Rx */ +- reg_ctl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); +- reg_ctl &= ~IXGBE_RXCTRL_RXEN; +- IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, reg_ctl); ++ ixgbe_disable_rx(hw); + ixgbe_disable_rx_queue(adapter, rx_ring); + + /* now Tx */ +- reg_ctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx)); +- reg_ctl &= ~IXGBE_TXDCTL_ENABLE; +- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), reg_ctl); ++ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(tx_ring->reg_idx), 0); + + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: + reg_ctl = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); + reg_ctl &= ~IXGBE_DMATXCTL_TE; + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg_ctl); +@@ -1666,7 +2092,7 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) + /* Setup Tx descriptor ring and Tx buffers */ + tx_ring->count = IXGBE_DEFAULT_TXD; + tx_ring->queue_index = 0; +- tx_ring->dev = &adapter->pdev->dev; ++ tx_ring->dev = pci_dev_to_dev(adapter->pdev); + tx_ring->netdev = adapter->netdev; + tx_ring->reg_idx = adapter->tx_ring[0]->reg_idx; + +@@ -1677,6 +2103,9 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: + reg_data = IXGBE_READ_REG(&adapter->hw, IXGBE_DMATXCTL); + reg_data |= IXGBE_DMATXCTL_TE; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_DMATXCTL, reg_data); +@@ -1690,9 +2119,12 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) + /* Setup Rx Descriptor ring and Rx buffers */ + rx_ring->count = IXGBE_DEFAULT_RXD; + rx_ring->queue_index = 0; +- rx_ring->dev = &adapter->pdev->dev; ++ rx_ring->dev = pci_dev_to_dev(adapter->pdev); + rx_ring->netdev = adapter->netdev; + rx_ring->reg_idx = adapter->rx_ring[0]->reg_idx; ++#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT ++ rx_ring->rx_buf_len = IXGBE_RXBUFFER_2K; ++#endif + + err = ixgbe_setup_rx_resources(rx_ring); + if (err) { +@@ -1700,13 +2132,14 @@ static int ixgbe_setup_desc_rings(struct ixgbe_adapter *adapter) + goto err_nomem; + } + +- rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL); +- IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl & ~IXGBE_RXCTRL_RXEN); ++ ixgbe_disable_rx(&adapter->hw); + + ixgbe_configure_rx_ring(adapter, rx_ring); + +- rctl |= IXGBE_RXCTRL_RXEN | IXGBE_RXCTRL_DMBYPS; ++ rctl = IXGBE_READ_REG(&adapter->hw, IXGBE_RXCTRL); ++ rctl |= IXGBE_RXCTRL_DMBYPS; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_RXCTRL, rctl); ++ ixgbe_enable_rx(&adapter->hw); + + return 0; + +@@ -1731,11 +2164,16 @@ static int ixgbe_setup_loopback_test(struct ixgbe_adapter *adapter) + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, reg_data); + + /* X540 needs to set the MACC.FLU bit to force link up */ +- if (adapter->hw.mac.type == ixgbe_mac_X540) { ++ switch (adapter->hw.mac.type) { ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: ++ case ixgbe_mac_X540: + reg_data = IXGBE_READ_REG(hw, IXGBE_MACC); + reg_data |= IXGBE_MACC_FLU; + IXGBE_WRITE_REG(hw, IXGBE_MACC, reg_data); +- } else { ++ break; ++ default: + if (hw->mac.orig_autoc) { + reg_data = hw->mac.orig_autoc | IXGBE_AUTOC_FLU; + IXGBE_WRITE_REG(hw, IXGBE_AUTOC, reg_data); +@@ -1797,15 +2235,21 @@ static bool ixgbe_check_lbtest_frame(struct ixgbe_rx_buffer *rx_buffer, + + frame_size >>= 1; + ++#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT ++ data = rx_buffer->skb->data; ++#else + data = kmap(rx_buffer->page) + rx_buffer->page_offset; ++#endif + + if (data[3] != 0xFF || + data[frame_size + 10] != 0xBE || + data[frame_size + 12] != 0xAF) + match = false; + ++#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT + kunmap(rx_buffer->page); + ++#endif + return match; + } + +@@ -1814,8 +2258,11 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, + unsigned int size) + { + union ixgbe_adv_rx_desc *rx_desc; +- struct ixgbe_rx_buffer *rx_buffer; +- struct ixgbe_tx_buffer *tx_buffer; ++#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT ++ const int bufsz = rx_ring->rx_buf_len; ++#else ++ const int bufsz = ixgbe_rx_bufsz(rx_ring); ++#endif + u16 rx_ntc, tx_ntc, count = 0; + + /* initialize next to clean and descriptor values */ +@@ -1823,14 +2270,21 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, + tx_ntc = tx_ring->next_to_clean; + rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc); + +- while (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) { ++ while (rx_desc->wb.upper.length) { ++ struct ixgbe_rx_buffer *rx_buffer; ++ struct ixgbe_tx_buffer *tx_buffer; ++ ++ /* unmap buffer on Tx side */ ++ tx_buffer = &tx_ring->tx_buffer_info[tx_ntc]; ++ ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer); ++ + /* check Rx buffer */ + rx_buffer = &rx_ring->rx_buffer_info[rx_ntc]; + + /* sync Rx buffer for CPU read */ + dma_sync_single_for_cpu(rx_ring->dev, + rx_buffer->dma, +- ixgbe_rx_bufsz(rx_ring), ++ bufsz, + DMA_FROM_DEVICE); + + /* verify contents of skb */ +@@ -1840,13 +2294,9 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, + /* sync Rx buffer for device write */ + dma_sync_single_for_device(rx_ring->dev, + rx_buffer->dma, +- ixgbe_rx_bufsz(rx_ring), ++ bufsz, + DMA_FROM_DEVICE); + +- /* unmap buffer on Tx side */ +- tx_buffer = &tx_ring->tx_buffer_info[tx_ntc]; +- ixgbe_unmap_and_free_tx_resource(tx_ring, tx_buffer); +- + /* increment Rx/Tx next to clean counters */ + rx_ntc++; + if (rx_ntc == rx_ring->count) +@@ -1859,8 +2309,6 @@ static u16 ixgbe_clean_test_rings(struct ixgbe_ring *rx_ring, + rx_desc = IXGBE_RX_DESC(rx_ring, rx_ntc); + } + +- netdev_tx_reset_queue(txring_txq(tx_ring)); +- + /* re-map buffers to ring, store next to clean values */ + ixgbe_alloc_rx_buffers(rx_ring, count); + rx_ring->next_to_clean = rx_ntc; +@@ -1873,7 +2321,7 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter) + { + struct ixgbe_ring *tx_ring = &adapter->test_tx_ring; + struct ixgbe_ring *rx_ring = &adapter->test_rx_ring; +- int i, j, lc, good_cnt, ret_val = 0; ++ int i, j, lc, ret_val = 0; + unsigned int size = 1024; + netdev_tx_t tx_ret_val; + struct sk_buff *skb; +@@ -1903,6 +2351,8 @@ static int ixgbe_run_loopback_test(struct ixgbe_adapter *adapter) + lc = ((rx_ring->count / 64) * 2) + 1; + + for (j = 0; j <= lc; j++) { ++ unsigned int good_cnt; ++ + /* reset count of good packets */ + good_cnt = 0; + +@@ -1955,13 +2405,21 @@ out: + return *data; + } + ++#ifndef HAVE_ETHTOOL_GET_SSET_COUNT ++static int ixgbe_diag_test_count(struct net_device __always_unused *netdev) ++{ ++ return IXGBE_TEST_LEN; ++} ++ ++#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ + static void ixgbe_diag_test(struct net_device *netdev, + struct ethtool_test *eth_test, u64 *data) + { + struct ixgbe_adapter *adapter = netdev_priv(netdev); + bool if_running = netif_running(netdev); ++ struct ixgbe_hw *hw = &adapter->hw; + +- if (ixgbe_removed(adapter->hw.hw_addr)) { ++ if (IXGBE_REMOVED(hw->hw_addr)) { + e_err(hw, "Adapter removed - test blocked\n"); + data[0] = 1; + data[1] = 1; +@@ -1973,13 +2431,14 @@ static void ixgbe_diag_test(struct net_device *netdev, + } + set_bit(__IXGBE_TESTING, &adapter->state); + if (eth_test->flags == ETH_TEST_FL_OFFLINE) { +- struct ixgbe_hw *hw = &adapter->hw; +- + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { + int i; + for (i = 0; i < adapter->num_vfs; i++) { + if (adapter->vfinfo[i].clear_to_send) { +- netdev_warn(netdev, "offline diagnostic is not supported when VFs are present\n"); ++ e_warn(drv, "Please take active VFS " ++ "offline and restart the " ++ "adapter before running NIC " ++ "diagnostics\n"); + data[0] = 1; + data[1] = 1; + data[2] = 1; +@@ -1997,8 +2456,7 @@ static void ixgbe_diag_test(struct net_device *netdev, + e_info(hw, "offline testing starting\n"); + + /* Link test performed before hardware reset so autoneg doesn't +- * interfere with test result +- */ ++ * interfere with test result */ + if (ixgbe_link_test(adapter, &data[4])) + eth_test->flags |= ETH_TEST_FL_FAILED; + +@@ -2026,7 +2484,7 @@ static void ixgbe_diag_test(struct net_device *netdev, + * loopback diagnostic. */ + if (adapter->flags & (IXGBE_FLAG_SRIOV_ENABLED | + IXGBE_FLAG_VMDQ_ENABLED)) { +- e_info(hw, "Skip MAC loopback diagnostic in VT mode\n"); ++ e_info(hw, "skip MAC loopback diagnostic in VT mode\n"); + data[3] = 0; + goto skip_loopback; + } +@@ -2091,7 +2549,7 @@ static void ixgbe_get_wol(struct net_device *netdev, + wol->wolopts = 0; + + if (ixgbe_wol_exclusion(adapter, wol) || +- !device_can_wakeup(&adapter->pdev->dev)) ++ !device_can_wakeup(pci_dev_to_dev(adapter->pdev))) + return; + + if (adapter->wol & IXGBE_WUFC_EX) +@@ -2107,6 +2565,7 @@ static void ixgbe_get_wol(struct net_device *netdev, + static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) + { + struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ struct ixgbe_hw *hw = &adapter->hw; + + if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE)) + return -EOPNOTSUPP; +@@ -2125,7 +2584,9 @@ static int ixgbe_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol) + if (wol->wolopts & WAKE_MAGIC) + adapter->wol |= IXGBE_WUFC_MAG; + +- device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); ++ hw->wol_enabled = !!(adapter->wol); ++ ++ device_set_wakeup_enable(pci_dev_to_dev(adapter->pdev), adapter->wol); + + return 0; + } +@@ -2140,23 +2601,29 @@ static int ixgbe_nway_reset(struct net_device *netdev) + return 0; + } + ++#ifdef HAVE_ETHTOOL_SET_PHYS_ID + static int ixgbe_set_phys_id(struct net_device *netdev, + enum ethtool_phys_id_state state) + { + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + ++ if (!hw->mac.ops.led_on || !hw->mac.ops.led_off) ++ return -EOPNOTSUPP; ++ + switch (state) { + case ETHTOOL_ID_ACTIVE: + adapter->led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + return 2; + + case ETHTOOL_ID_ON: +- hw->mac.ops.led_on(hw, IXGBE_LED_ON); ++ if (hw->mac.ops.led_on(hw, hw->mac.led_link_act)) ++ return -EINVAL; + break; + + case ETHTOOL_ID_OFF: +- hw->mac.ops.led_off(hw, IXGBE_LED_ON); ++ if (hw->mac.ops.led_off(hw, hw->mac.led_link_act)) ++ return -EINVAL; + break; + + case ETHTOOL_ID_INACTIVE: +@@ -2167,12 +2634,42 @@ static int ixgbe_set_phys_id(struct net_device *netdev, + + return 0; + } ++#else ++static int ixgbe_phys_id(struct net_device *netdev, u32 data) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 led_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); ++ u32 i; ++ ++ if (!hw->mac.ops.led_on || !hw->mac.ops.led_off) ++ return -EOPNOTSUPP; ++ ++ if (!data || data > 300) ++ data = 300; ++ ++ for (i = 0; i < (data * 1000); i += 400) { ++ if (hw->mac.ops.led_on(hw, hw->mac.led_link_act)) ++ return -EINVAL; ++ msleep_interruptible(200); ++ if (hw->mac.ops.led_off(hw, hw->mac.led_link_act)) ++ return -EINVAL; ++ msleep_interruptible(200); ++ } ++ ++ /* Restore LED settings */ ++ IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, led_reg); ++ ++ return IXGBE_SUCCESS; ++} ++#endif /* HAVE_ETHTOOL_SET_PHYS_ID */ + + static int ixgbe_get_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) + { + struct ixgbe_adapter *adapter = netdev_priv(netdev); + ++ ec->tx_max_coalesced_frames_irq = adapter->tx_work_limit; + /* only valid if in constant ITR mode */ + if (adapter->rx_itr_setting <= 1) + ec->rx_coalesce_usecs = adapter->rx_itr_setting; +@@ -2210,7 +2707,8 @@ static bool ixgbe_update_rsc(struct ixgbe_adapter *adapter) + adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) { + if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { + adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; +- e_info(probe, "rx-usecs value high enough to re-enable RSC\n"); ++ e_info(probe, "rx-usecs value high enough " ++ "to re-enable RSC\n"); + return true; + } + /* if interrupt rate is too high then disable RSC */ +@@ -2226,9 +2724,9 @@ static int ixgbe_set_coalesce(struct net_device *netdev, + struct ethtool_coalesce *ec) + { + struct ixgbe_adapter *adapter = netdev_priv(netdev); +- struct ixgbe_q_vector *q_vector; + int i; +- u16 tx_itr_param, rx_itr_param, tx_itr_prev; ++ u16 tx_itr_param, rx_itr_param; ++ u16 tx_itr_prev; + bool need_reset = false; + + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) { +@@ -2240,6 +2738,9 @@ static int ixgbe_set_coalesce(struct net_device *netdev, + tx_itr_prev = adapter->tx_itr_setting; + } + ++ if (ec->tx_max_coalesced_frames_irq) ++ adapter->tx_work_limit = ec->tx_max_coalesced_frames_irq; ++ + if ((ec->rx_coalesce_usecs > (IXGBE_MAX_EITR >> 2)) || + (ec->tx_coalesce_usecs > (IXGBE_MAX_EITR >> 2))) + return -EINVAL; +@@ -2260,7 +2761,7 @@ static int ixgbe_set_coalesce(struct net_device *netdev, + adapter->tx_itr_setting = ec->tx_coalesce_usecs; + + if (adapter->tx_itr_setting == 1) +- tx_itr_param = IXGBE_10K_ITR; ++ tx_itr_param = IXGBE_12K_ITR; + else + tx_itr_param = adapter->tx_itr_setting; + +@@ -2268,7 +2769,6 @@ static int ixgbe_set_coalesce(struct net_device *netdev, + if (adapter->q_vector[0]->tx.count && adapter->q_vector[0]->rx.count) + adapter->tx_itr_setting = adapter->rx_itr_setting; + +-#if IS_ENABLED(CONFIG_BQL) + /* detect ITR changes that require update of TXDCTL.WTHRESH */ + if ((adapter->tx_itr_setting != 1) && + (adapter->tx_itr_setting < IXGBE_100K_ITR)) { +@@ -2280,12 +2780,23 @@ static int ixgbe_set_coalesce(struct net_device *netdev, + (tx_itr_prev < IXGBE_100K_ITR)) + need_reset = true; + } +-#endif ++ + /* check the old value and enable RSC if necessary */ + need_reset |= ixgbe_update_rsc(adapter); + ++ if (adapter->hw.mac.dmac_config.watchdog_timer && ++ (!adapter->rx_itr_setting && !adapter->tx_itr_setting)) { ++ e_info(probe, ++ "Disabling DMA coalescing because interrupt throttling is disabled\n"); ++ adapter->hw.mac.dmac_config.watchdog_timer = 0; ++ ixgbe_dmac_config(&adapter->hw); ++ } ++ + for (i = 0; i < adapter->num_q_vectors; i++) { +- q_vector = adapter->q_vector[i]; ++ struct ixgbe_q_vector *q_vector = adapter->q_vector[i]; ++ ++ q_vector->tx.work_limit = adapter->tx_work_limit; ++ q_vector->rx.work_limit = adapter->rx_work_limit; + if (q_vector->tx.count && !q_vector->rx.count) + /* tx only */ + q_vector->itr = tx_itr_param; +@@ -2306,6 +2817,253 @@ static int ixgbe_set_coalesce(struct net_device *netdev, + return 0; + } + ++#ifndef HAVE_NDO_SET_FEATURES ++static u32 ixgbe_get_rx_csum(struct net_device *netdev) ++{ ++ return !!(netdev->features & NETIF_F_RXCSUM); ++} ++ ++static int ixgbe_set_rx_csum(struct net_device *netdev, u32 data) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ bool need_reset = false; ++ ++ if (data) ++ netdev->features |= NETIF_F_RXCSUM; ++ else ++ netdev->features &= ~NETIF_F_RXCSUM; ++ ++ /* LRO and RSC both depend on RX checksum to function */ ++ if (!data && (netdev->features & NETIF_F_LRO)) { ++ netdev->features &= ~NETIF_F_LRO; ++ ++ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) { ++ adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; ++ need_reset = true; ++ } ++ } ++ ++#ifdef HAVE_VXLAN_RX_OFFLOAD ++ if (adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE && data) { ++ netdev->hw_enc_features |= NETIF_F_RXCSUM | ++ NETIF_F_IP_CSUM | ++ NETIF_F_IPV6_CSUM; ++ if (!need_reset) ++ adapter->flags2 |= IXGBE_FLAG2_VXLAN_REREG_NEEDED; ++ } else { ++ netdev->hw_enc_features &= ~(NETIF_F_RXCSUM | ++ NETIF_F_IP_CSUM | ++ NETIF_F_IPV6_CSUM); ++ ixgbe_clear_udp_tunnel_port(adapter, ++ IXGBE_VXLANCTRL_ALL_UDPPORT_MASK); ++ } ++#endif /* HAVE_VXLAN_RX_OFFLOAD */ ++ ++ if (need_reset) ++ ixgbe_do_reset(netdev); ++ ++ return 0; ++} ++ ++static int ixgbe_set_tx_csum(struct net_device *netdev, u32 data) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++#ifdef NETIF_F_IPV6_CSUM ++ u32 feature_list = NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; ++#else ++ u32 feature_list = NETIF_F_IP_CSUM; ++#endif ++ ++ switch (adapter->hw.mac.type) { ++ case ixgbe_mac_82599EB: ++ case ixgbe_mac_X540: ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: ++#ifdef HAVE_ENCAP_TSO_OFFLOAD ++ if (data) ++ netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL; ++ else ++ netdev->hw_enc_features &= ~NETIF_F_GSO_UDP_TUNNEL; ++ feature_list |= NETIF_F_GSO_UDP_TUNNEL; ++#endif /* HAVE_ENCAP_TSO_OFFLOAD */ ++ feature_list |= NETIF_F_SCTP_CSUM; ++ break; ++ default: ++ break; ++ } ++ ++ if (data) ++ netdev->features |= feature_list; ++ else ++ netdev->features &= ~feature_list; ++ ++ return 0; ++} ++ ++#ifdef NETIF_F_TSO ++static int ixgbe_set_tso(struct net_device *netdev, u32 data) ++{ ++#ifdef NETIF_F_TSO6 ++ u32 feature_list = NETIF_F_TSO | NETIF_F_TSO6; ++#else ++ u32 feature_list = NETIF_F_TSO; ++#endif ++ ++ if (data) ++ netdev->features |= feature_list; ++ else ++ netdev->features &= ~feature_list; ++ ++#ifndef HAVE_NETDEV_VLAN_FEATURES ++ if (!data) { ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ struct net_device *v_netdev; ++ int i; ++ ++ /* disable TSO on all VLANs if they're present */ ++ if (!adapter->vlgrp) ++ goto tso_out; ++ ++ for (i = 0; i < VLAN_GROUP_ARRAY_LEN; i++) { ++ v_netdev = vlan_group_get_device(adapter->vlgrp, i); ++ if (!v_netdev) ++ continue; ++ ++ v_netdev->features &= ~feature_list; ++ vlan_group_set_device(adapter->vlgrp, i, v_netdev); ++ } ++ } ++ ++tso_out: ++ ++#endif /* HAVE_NETDEV_VLAN_FEATURES */ ++ return 0; ++} ++ ++#endif /* NETIF_F_TSO */ ++#ifdef ETHTOOL_GFLAGS ++static int ixgbe_set_flags(struct net_device *netdev, u32 data) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ u32 supported_flags = ETH_FLAG_RXVLAN | ETH_FLAG_TXVLAN; ++ u32 changed = netdev->features ^ data; ++ bool need_reset = false; ++ int rc; ++ ++#ifndef HAVE_VLAN_RX_REGISTER ++ if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && ++ !(data & ETH_FLAG_RXVLAN)) ++ return -EINVAL; ++ ++#endif ++ if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) ++ supported_flags |= ETH_FLAG_LRO; ++ ++#ifdef ETHTOOL_GRXRINGS ++ switch (adapter->hw.mac.type) { ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: ++ case ixgbe_mac_X540: ++ case ixgbe_mac_82599EB: ++ supported_flags |= ETH_FLAG_NTUPLE; ++ default: ++ break; ++ } ++ ++#endif ++#ifdef NETIF_F_RXHASH ++ supported_flags |= ETH_FLAG_RXHASH; ++ ++#endif ++ rc = ethtool_op_set_flags(netdev, data, supported_flags); ++ if (rc) ++ return rc; ++ ++#ifndef HAVE_VLAN_RX_REGISTER ++ if (changed & ETH_FLAG_RXVLAN) ++ ixgbe_vlan_mode(netdev, netdev->features); ++#endif ++ ++#ifdef HAVE_VXLAN_RX_OFFLOAD ++ if (adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE && ++ netdev->features & NETIF_F_RXCSUM) { ++ vxlan_get_rx_port(netdev); ++ else ++ ixgbe_clear_udp_tunnel_port(adapter, ++ IXGBE_VXLANCTRL_ALL_UDPPORT_MASK); ++ } ++#endif /* HAVE_VXLAN_RX_OFFLOAD */ ++ ++ /* if state changes we need to update adapter->flags and reset */ ++ if (!(netdev->features & NETIF_F_LRO)) { ++ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) ++ need_reset = true; ++ adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; ++ } else if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) && ++ !(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) { ++ if (adapter->rx_itr_setting == 1 || ++ adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) { ++ adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; ++ need_reset = true; ++ } else if (changed & ETH_FLAG_LRO) { ++ e_info(probe, "rx-usecs set too low, " ++ "disabling RSC\n"); ++ } ++ } ++ ++#ifdef ETHTOOL_GRXRINGS ++ /* ++ * Check if Flow Director n-tuple support was enabled or disabled. If ++ * the state changed, we need to reset. ++ */ ++ switch (netdev->features & NETIF_F_NTUPLE) { ++ case NETIF_F_NTUPLE: ++ /* turn off ATR, enable perfect filters and reset */ ++ if (!(adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE)) ++ need_reset = true; ++ ++ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; ++ adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; ++ break; ++ default: ++ /* turn off perfect filters, enable ATR and reset */ ++ if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) ++ need_reset = true; ++ ++ adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; ++ ++ /* We cannot enable ATR if VMDq is enabled */ ++ if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) ++ break; ++ ++ /* We cannot enable ATR if we have 2 or more traffic classes */ ++ if (netdev_get_num_tc(netdev) > 1) ++ break; ++ ++ /* We cannot enable ATR if RSS is disabled */ ++ if (adapter->ring_feature[RING_F_RSS].limit <= 1) ++ break; ++ ++ /* A sample rate of 0 indicates ATR disabled */ ++ if (!adapter->atr_sample_rate) ++ break; ++ ++ adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; ++ break; ++ } ++ ++#endif /* ETHTOOL_GRXRINGS */ ++ if (need_reset) ++ ixgbe_do_reset(netdev); ++ ++ return 0; ++} ++ ++#endif /* ETHTOOL_GFLAGS */ ++#endif /* HAVE_NDO_SET_FEATURES */ ++#ifdef ETHTOOL_GRXRINGS + static int ixgbe_get_ethtool_fdir_entry(struct ixgbe_adapter *adapter, + struct ethtool_rxnfc *cmd) + { +@@ -2408,11 +3166,11 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter, + switch (cmd->flow_type) { + case TCP_V4_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; +- /* fallthrough */ ++ /* fall through */ + case UDP_V4_FLOW: + if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; +- /* fallthrough */ ++ /* fall through */ + case SCTP_V4_FLOW: + case AH_ESP_V4_FLOW: + case AH_V4_FLOW: +@@ -2422,11 +3180,11 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter, + break; + case TCP_V6_FLOW: + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; +- /* fallthrough */ ++ /* fall through */ + case UDP_V6_FLOW: + if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) + cmd->data |= RXH_L4_B_0_1 | RXH_L4_B_2_3; +- /* fallthrough */ ++ /* fall through */ + case SCTP_V6_FLOW: + case AH_ESP_V6_FLOW: + case AH_V6_FLOW: +@@ -2442,7 +3200,11 @@ static int ixgbe_get_rss_hash_opts(struct ixgbe_adapter *adapter, + } + + static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, ++#ifdef HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS ++ void *rule_locs) ++#else + u32 *rule_locs) ++#endif + { + struct ixgbe_adapter *adapter = netdev_priv(dev); + int ret = -EOPNOTSUPP; +@@ -2460,7 +3222,8 @@ static int ixgbe_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd, + ret = ixgbe_get_ethtool_fdir_entry(adapter, cmd); + break; + case ETHTOOL_GRXCLSRLALL: +- ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, rule_locs); ++ ret = ixgbe_get_ethtool_fdir_all(adapter, cmd, ++ (u32 *)rule_locs); + break; + case ETHTOOL_GRXFH: + ret = ixgbe_get_rss_hash_opts(adapter, cmd); +@@ -2479,7 +3242,8 @@ static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter, + struct ixgbe_hw *hw = &adapter->hw; + struct hlist_node *node2; + struct ixgbe_fdir_filter *rule, *parent; +- int err = -EINVAL; ++ bool deleted = false; ++ s32 err; + + parent = NULL; + rule = NULL; +@@ -2494,31 +3258,39 @@ static int ixgbe_update_ethtool_fdir_entry(struct ixgbe_adapter *adapter, + + /* if there is an old rule occupying our place remove it */ + if (rule && (rule->sw_idx == sw_idx)) { +- if (!input || (rule->filter.formatted.bkt_hash != +- input->filter.formatted.bkt_hash)) { ++ /* hardware filters are only configured when interface is up, ++ * and we should not issue filter commands while the interface ++ * is down ++ */ ++ if (netif_running(adapter->netdev) && ++ (!input || (rule->filter.formatted.bkt_hash != ++ input->filter.formatted.bkt_hash))) { + err = ixgbe_fdir_erase_perfect_filter_82599(hw, + &rule->filter, + sw_idx); ++ if (err) ++ return -EINVAL; + } + + hlist_del(&rule->fdir_node); + kfree(rule); + adapter->fdir_filter_count--; ++ deleted = true; + } + +- /* +- * If no input this was a delete, err should be 0 if a rule was +- * successfully found and removed from the list else -EINVAL ++ /* If we weren't given an input, then this was a request to delete a ++ * filter. We should return -EINVAL if the filter wasn't found, but ++ * return 0 if the rule was successfully deleted. + */ + if (!input) +- return err; ++ return deleted ? 0 : -EINVAL; + + /* initialize node and set software index */ + INIT_HLIST_NODE(&input->fdir_node); + + /* add filter to the list */ + if (parent) +- hlist_add_after(&parent->fdir_node, &input->fdir_node); ++ hlist_add_behind(&input->fdir_node, &parent->fdir_node); + else + hlist_add_head(&input->fdir_node, + &adapter->fdir_filter_list); +@@ -2558,6 +3330,7 @@ static int ixgbe_flowspec_to_flow_type(struct ethtool_rx_flow_spec *fsp, + *flow_type = IXGBE_ATR_FLOW_TYPE_IPV4; + break; + } ++ /* fall through */ + default: + return 0; + } +@@ -2651,27 +3424,33 @@ static int ixgbe_add_ethtool_fdir_entry(struct ixgbe_adapter *adapter, + if (hlist_empty(&adapter->fdir_filter_list)) { + /* save mask and program input mask into HW */ + memcpy(&adapter->fdir_mask, &mask, sizeof(mask)); +- err = ixgbe_fdir_set_input_mask_82599(hw, &mask); ++ err = ixgbe_fdir_set_input_mask_82599(hw, &mask, adapter->cloud_mode); + if (err) { + e_err(drv, "Error writing mask\n"); + goto err_out_w_lock; + } + } else if (memcmp(&adapter->fdir_mask, &mask, sizeof(mask))) { +- e_err(drv, "Only one mask supported per port\n"); ++ e_err(drv, "Hardware only supports one mask per port. To change the mask you must first delete all the rules.\n"); + goto err_out_w_lock; + } + + /* apply mask and compute/store hash */ + ixgbe_atr_compute_perfect_hash_82599(&input->filter, &mask); + +- /* program filters to filter memory */ +- err = ixgbe_fdir_write_perfect_filter_82599(hw, +- &input->filter, input->sw_idx, +- (input->action == IXGBE_FDIR_DROP_QUEUE) ? +- IXGBE_FDIR_DROP_QUEUE : +- adapter->rx_ring[input->action]->reg_idx); +- if (err) +- goto err_out_w_lock; ++ /* only program filters to hardware if the net device is running, as ++ * we store the filters in the Rx buffer which is not allocated when ++ * the device is down ++ */ ++ if (netif_running(adapter->netdev)) { ++ err = ixgbe_fdir_write_perfect_filter_82599(hw, ++ &input->filter, input->sw_idx, ++ (input->action == IXGBE_FDIR_DROP_QUEUE) ? ++ IXGBE_FDIR_DROP_QUEUE : ++ adapter->rx_ring[input->action]->reg_idx, ++ adapter->cloud_mode); ++ if (err) ++ goto err_out_w_lock; ++ } + + ixgbe_update_ethtool_fdir_entry(adapter, input, input->sw_idx); + +@@ -2699,6 +3478,19 @@ static int ixgbe_del_ethtool_fdir_entry(struct ixgbe_adapter *adapter, + return err; + } + ++#ifdef ETHTOOL_SRXNTUPLE ++/* ++ * We need to keep this around for kernels 2.6.33 - 2.6.39 in order to avoid ++ * a null pointer dereference as it was assumend if the NETIF_F_NTUPLE flag ++ * was defined that this function was present. ++ */ ++static int ixgbe_set_rx_ntuple(struct net_device __always_unused *dev, ++ struct ethtool_rx_ntuple __always_unused *cmd) ++{ ++ return -EOPNOTSUPP; ++} ++ ++#endif + #define UDP_RSS_FLAGS (IXGBE_FLAG2_RSS_FIELD_IPV4_UDP | \ + IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) + static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter, +@@ -2774,11 +3566,19 @@ static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter, + /* if we changed something we need to update flags */ + if (flags2 != adapter->flags2) { + struct ixgbe_hw *hw = &adapter->hw; +- u32 mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); ++ u32 mrqc; ++ unsigned int pf_pool = adapter->num_vfs; ++ ++ if ((hw->mac.type >= ixgbe_mac_X550) && ++ (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) ++ mrqc = IXGBE_READ_REG(hw, IXGBE_PFVFMRQC(pf_pool)); ++ else ++ mrqc = IXGBE_READ_REG(hw, IXGBE_MRQC); + + if ((flags2 & UDP_RSS_FLAGS) && + !(adapter->flags2 & UDP_RSS_FLAGS)) +- e_warn(drv, "enabling UDP RSS: fragmented packets may arrive out of order to the stack above\n"); ++ e_warn(drv, "enabling UDP RSS: fragmented packets" ++ " may arrive out of order to the stack above\n"); + + adapter->flags2 = flags2; + +@@ -2797,7 +3597,11 @@ static int ixgbe_set_rss_hash_opt(struct ixgbe_adapter *adapter, + if (flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) + mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; + +- IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); ++ if ((hw->mac.type >= ixgbe_mac_X550) && ++ (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) ++ IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), mrqc); ++ else ++ IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); + } + + return 0; +@@ -2825,12 +3629,130 @@ static int ixgbe_set_rxnfc(struct net_device *dev, struct ethtool_rxnfc *cmd) + return ret; + } + ++#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) ++static int ixgbe_rss_indir_tbl_max(struct ixgbe_adapter *adapter) ++{ ++ if (adapter->hw.mac.type < ixgbe_mac_X550) ++ return 16; ++ else ++ return 64; ++} ++ ++static u32 ixgbe_get_rxfh_key_size(struct net_device *netdev) ++{ ++ return IXGBE_RSS_KEY_SIZE; ++} ++ ++static u32 ixgbe_rss_indir_size(struct net_device *netdev) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ ++ return ixgbe_rss_indir_tbl_entries(adapter); ++} ++ ++static void ixgbe_get_reta(struct ixgbe_adapter *adapter, u32 *indir) ++{ ++ int i, reta_size = ixgbe_rss_indir_tbl_entries(adapter); ++ u16 rss_m = adapter->ring_feature[RING_F_RSS].mask; ++ ++ if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) ++ rss_m = adapter->ring_feature[RING_F_RSS].indices - 1; ++ ++ for (i = 0; i < reta_size; i++) ++ indir[i] = adapter->rss_indir_tbl[i] & rss_m; ++} ++ ++#ifdef HAVE_RXFH_HASHFUNC ++static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key, ++ u8 *hfunc) ++#else ++static int ixgbe_get_rxfh(struct net_device *netdev, u32 *indir, u8 *key) ++#endif ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ ++#ifdef HAVE_RXFH_HASHFUNC ++ if (hfunc) ++ *hfunc = ETH_RSS_HASH_TOP; ++#endif ++ ++ if (indir) ++ ixgbe_get_reta(adapter, indir); ++ ++ if (key) ++ memcpy(key, adapter->rss_key, ixgbe_get_rxfh_key_size(netdev)); ++ ++ return 0; ++} ++ ++#ifdef HAVE_RXFH_HASHFUNC ++static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir, ++ const u8 *key, const u8 hfunc) ++#else ++#ifdef HAVE_RXFH_NONCONST ++static int ixgbe_set_rxfh(struct net_device *netdev, u32 *indir, u8 *key) ++#else ++static int ixgbe_set_rxfh(struct net_device *netdev, const u32 *indir, ++ const u8 *key) ++#endif /* HAVE_RXFH_NONCONST */ ++#endif /* HAVE_RXFH_HASHFUNC */ ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ int i; ++ u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter); ++ ++#ifdef HAVE_RXFH_HASHFUNC ++ if (hfunc) ++ return -EINVAL; ++#endif ++ ++ /* Fill out the redirection table */ ++ if (indir) { ++ int max_queues = min_t(int, adapter->num_rx_queues, ++ ixgbe_rss_indir_tbl_max(adapter)); ++ ++ /*Allow at least 2 queues w/ SR-IOV.*/ ++ if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && ++ (max_queues < 2)) ++ max_queues = 2; ++ ++ /* Verify user input. */ ++ for (i = 0; i < reta_entries; i++) ++ if (indir[i] >= max_queues) ++ return -EINVAL; ++ ++ for (i = 0; i < reta_entries; i++) ++ adapter->rss_indir_tbl[i] = indir[i]; ++ } ++ ++ /* Fill out the rss hash key */ ++ if (key) { ++ memcpy(adapter->rss_key, key, ixgbe_get_rxfh_key_size(netdev)); ++ ixgbe_store_key(adapter); ++ } ++ ++ ixgbe_store_reta(adapter); ++ ++ return 0; ++} ++#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */ ++ ++#ifdef HAVE_ETHTOOL_GET_TS_INFO + static int ixgbe_get_ts_info(struct net_device *dev, + struct ethtool_ts_info *info) + { + struct ixgbe_adapter *adapter = netdev_priv(dev); + ++ /* we always support timestamping disabled */ ++ info->rx_filters = 1 << HWTSTAMP_FILTER_NONE; ++ + switch (adapter->hw.mac.type) { ++#ifdef HAVE_PTP_1588_CLOCK ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: ++ info->rx_filters |= 1 << HWTSTAMP_FILTER_ALL; ++ /* fallthrough */ + case ixgbe_mac_X540: + case ixgbe_mac_82599EB: + info->so_timestamping = +@@ -2850,8 +3772,7 @@ static int ixgbe_get_ts_info(struct net_device *dev, + (1 << HWTSTAMP_TX_OFF) | + (1 << HWTSTAMP_TX_ON); + +- info->rx_filters = +- (1 << HWTSTAMP_FILTER_NONE) | ++ info->rx_filters |= + (1 << HWTSTAMP_FILTER_PTP_V1_L4_SYNC) | + (1 << HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_L2_EVENT) | +@@ -2864,13 +3785,17 @@ static int ixgbe_get_ts_info(struct net_device *dev, + (1 << HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ) | + (1 << HWTSTAMP_FILTER_PTP_V2_EVENT); + break; ++#endif /* HAVE_PTP_1588_CLOCK */ + default: + return ethtool_op_get_ts_info(dev, info); + break; + } + return 0; + } ++#endif /* HAVE_ETHTOOL_GET_TS_INFO */ + ++#endif /* ETHTOOL_GRXRINGS */ ++#ifdef ETHTOOL_SCHANNELS + static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter) + { + unsigned int max_combined; +@@ -2880,8 +3805,8 @@ static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter) + /* We only support one q_vector without MSI-X */ + max_combined = 1; + } else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { +- /* SR-IOV currently only allows one queue on the PF */ +- max_combined = 1; ++ /* Limit value based on the queue mask */ ++ max_combined = adapter->ring_feature[RING_F_RSS].mask + 1; + } else if (tcs > 1) { + /* For DCB report channels per traffic class */ + if (adapter->hw.mac.type == ixgbe_mac_82598EB) { +@@ -2898,8 +3823,8 @@ static unsigned int ixgbe_max_channels(struct ixgbe_adapter *adapter) + /* support up to 64 queues with ATR */ + max_combined = IXGBE_MAX_FDIR_INDICES; + } else { +- /* support up to 16 queues with RSS */ +- max_combined = IXGBE_MAX_RSS_INDICES; ++ /* support up to max allowed queues with RSS */ ++ max_combined = ixgbe_max_rss_indices(adapter); + } + + return max_combined; +@@ -2947,6 +3872,7 @@ static int ixgbe_set_channels(struct net_device *dev, + { + struct ixgbe_adapter *adapter = netdev_priv(dev); + unsigned int count = ch->combined_count; ++ u8 max_rss_indices = ixgbe_max_rss_indices(adapter); + + /* verify they are not requesting separate vectors */ + if (!count || ch->rx_count || ch->tx_count) +@@ -2963,22 +3889,24 @@ static int ixgbe_set_channels(struct net_device *dev, + /* update feature limits from largest to smallest supported values */ + adapter->ring_feature[RING_F_FDIR].limit = count; + +- /* cap RSS limit at 16 */ +- if (count > IXGBE_MAX_RSS_INDICES) +- count = IXGBE_MAX_RSS_INDICES; ++ /* cap RSS limit */ ++ if (count > max_rss_indices) ++ count = max_rss_indices; + adapter->ring_feature[RING_F_RSS].limit = count; + +-#ifdef IXGBE_FCOE ++#if IS_ENABLED(CONFIG_FCOE) + /* cap FCoE limit at 8 */ + if (count > IXGBE_FCRETA_SIZE) + count = IXGBE_FCRETA_SIZE; + adapter->ring_feature[RING_F_FCOE].limit = count; ++#endif /* CONFIG_FCOE */ + +-#endif + /* use setup TC to update any traffic class queue mapping */ + return ixgbe_setup_tc(dev, netdev_get_num_tc(dev)); + } ++#endif /* ETHTOOL_SCHANNELS */ + ++#ifdef ETHTOOL_GMODULEINFO + static int ixgbe_get_module_info(struct net_device *dev, + struct ethtool_modinfo *modinfo) + { +@@ -3051,43 +3979,371 @@ static int ixgbe_get_module_eeprom(struct net_device *dev, + + return 0; + } ++#endif /* ETHTOOL_GMODULEINFO */ ++ ++#ifdef ETHTOOL_GEEE ++ ++static const struct { ++ ixgbe_link_speed mac_speed; ++ u32 supported; ++} ixgbe_ls_map[] = { ++ { IXGBE_LINK_SPEED_10_FULL, SUPPORTED_10baseT_Full }, ++ { IXGBE_LINK_SPEED_100_FULL, SUPPORTED_100baseT_Full }, ++ { IXGBE_LINK_SPEED_1GB_FULL, SUPPORTED_1000baseT_Full }, ++ { IXGBE_LINK_SPEED_2_5GB_FULL, SUPPORTED_2500baseX_Full }, ++ { IXGBE_LINK_SPEED_10GB_FULL, SUPPORTED_10000baseT_Full }, ++}; ++ ++static const struct { ++ u32 lp_advertised; ++ u32 mac_speed; ++} ixgbe_lp_map[] = { ++ { FW_PHY_ACT_UD_2_100M_TX_EEE, SUPPORTED_100baseT_Full }, ++ { FW_PHY_ACT_UD_2_1G_T_EEE, SUPPORTED_1000baseT_Full }, ++ { FW_PHY_ACT_UD_2_10G_T_EEE, SUPPORTED_10000baseT_Full }, ++ { FW_PHY_ACT_UD_2_1G_KX_EEE, SUPPORTED_1000baseKX_Full }, ++ { FW_PHY_ACT_UD_2_10G_KX4_EEE, SUPPORTED_10000baseKX4_Full }, ++ { FW_PHY_ACT_UD_2_10G_KR_EEE, SUPPORTED_10000baseKR_Full}, ++}; ++ ++static int ++ixgbe_get_eee_fw(struct ixgbe_adapter *adapter, struct ethtool_eee *edata) ++{ ++ u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 }; ++ struct ixgbe_hw *hw = &adapter->hw; ++ s32 rc; ++ u16 i; ++ ++ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_UD_2, &info); ++ if (rc) ++ return rc; ++ ++ edata->lp_advertised = 0; ++ for (i = 0; i < ARRAY_SIZE(ixgbe_lp_map); ++i) { ++ if (info[0] & ixgbe_lp_map[i].lp_advertised) ++ edata->lp_advertised |= ixgbe_lp_map[i].mac_speed; ++ } ++ ++ edata->supported = 0; ++ for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) { ++ if (hw->phy.eee_speeds_supported & ixgbe_ls_map[i].mac_speed) ++ edata->supported |= ixgbe_ls_map[i].supported; ++ } ++ ++ edata->advertised = 0; ++ for (i = 0; i < ARRAY_SIZE(ixgbe_ls_map); ++i) { ++ if (hw->phy.eee_speeds_advertised & ixgbe_ls_map[i].mac_speed) ++ edata->advertised |= ixgbe_ls_map[i].supported; ++ } ++ ++ edata->eee_enabled = !!edata->advertised; ++ edata->tx_lpi_enabled = edata->eee_enabled; ++ if (edata->advertised & edata->lp_advertised) ++ edata->eee_active = true; + +-static const struct ethtool_ops ixgbe_ethtool_ops = { +- .get_settings = ixgbe_get_settings, +- .set_settings = ixgbe_set_settings, +- .get_drvinfo = ixgbe_get_drvinfo, +- .get_regs_len = ixgbe_get_regs_len, +- .get_regs = ixgbe_get_regs, +- .get_wol = ixgbe_get_wol, +- .set_wol = ixgbe_set_wol, +- .nway_reset = ixgbe_nway_reset, +- .get_link = ethtool_op_get_link, +- .get_eeprom_len = ixgbe_get_eeprom_len, +- .get_eeprom = ixgbe_get_eeprom, +- .set_eeprom = ixgbe_set_eeprom, +- .get_ringparam = ixgbe_get_ringparam, +- .set_ringparam = ixgbe_set_ringparam, +- .get_pauseparam = ixgbe_get_pauseparam, +- .set_pauseparam = ixgbe_set_pauseparam, +- .get_msglevel = ixgbe_get_msglevel, +- .set_msglevel = ixgbe_set_msglevel, +- .self_test = ixgbe_diag_test, +- .get_strings = ixgbe_get_strings, +- .set_phys_id = ixgbe_set_phys_id, +- .get_sset_count = ixgbe_get_sset_count, ++ return 0; ++} ++ ++static int ixgbe_get_eee(struct net_device *netdev, struct ethtool_eee *edata) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ struct ixgbe_hw *hw = &adapter->hw; ++ ++ if (!hw->mac.ops.setup_eee) ++ return -EOPNOTSUPP; ++ ++ if (!(adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE)) ++ return -EOPNOTSUPP; ++ ++ if (hw->phy.eee_speeds_supported && hw->phy.type == ixgbe_phy_fw) ++ return ixgbe_get_eee_fw(adapter, edata); ++ ++ return -EOPNOTSUPP; ++} ++#endif /* ETHTOOL_GEEE */ ++ ++#ifdef ETHTOOL_SEEE ++static int ixgbe_set_eee(struct net_device *netdev, struct ethtool_eee *edata) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ struct ixgbe_hw *hw = &adapter->hw; ++ struct ethtool_eee eee_data; ++ s32 ret_val; ++ ++ if (!(hw->mac.ops.setup_eee && ++ (adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE))) ++ return -EOPNOTSUPP; ++ ++ memset(&eee_data, 0, sizeof(struct ethtool_eee)); ++ ++ ret_val = ixgbe_get_eee(netdev, &eee_data); ++ if (ret_val) ++ return ret_val; ++ ++ if (eee_data.eee_enabled && !edata->eee_enabled) { ++ if (eee_data.tx_lpi_enabled != edata->tx_lpi_enabled) { ++ e_dev_err("Setting EEE tx-lpi is not supported\n"); ++ return -EINVAL; ++ } ++ ++ if (eee_data.tx_lpi_timer != edata->tx_lpi_timer) { ++ e_dev_err("Setting EEE Tx LPI timer is not supported\n"); ++ return -EINVAL; ++ } ++ ++ if (eee_data.advertised != edata->advertised) { ++ e_dev_err("Setting EEE advertised speeds is not supported\n"); ++ return -EINVAL; ++ } ++ ++ } ++ ++ if (eee_data.eee_enabled != edata->eee_enabled) { ++ ++ if (edata->eee_enabled) { ++ adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED; ++ hw->phy.eee_speeds_advertised = ++ hw->phy.eee_speeds_supported; ++ } else { ++ adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED; ++ hw->phy.eee_speeds_advertised = 0; ++ } ++ ++ /* reset link */ ++ if (netif_running(netdev)) ++ ixgbe_reinit_locked(adapter); ++ else ++ ixgbe_reset(adapter); ++ } ++ ++ return 0; ++} ++#endif /* ETHTOOL_SEEE */ ++ ++#ifdef HAVE_ETHTOOL_GET_SSET_COUNT ++/** ++ * ixgbe_get_priv_flags - report device private flags ++ * @netdev: network interface device structure ++ * ++ * The get string set count and the string set should be matched for each ++ * flag returned. Add new strings for each flag to the ixgbe_priv_flags_strings ++ * array. ++ * ++ * Returns a u32 bitmap of flags. ++ **/ ++static u32 ixgbe_get_priv_flags(struct net_device *netdev) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ u32 priv_flags = 0; ++ ++ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ++ priv_flags |= IXGBE_PRIV_FLAGS_FD_ATR; ++#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC ++ ++ if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY) ++ priv_flags |= IXGBE_PRIV_FLAGS_LEGACY_RX; ++#endif ++ ++ return priv_flags; ++} ++ ++/** ++ * ixgbe_set_priv_flags - set private flags ++ * @netdev: network interface device structure ++ * @flags: bit flags to be set ++ **/ ++static int ixgbe_set_priv_flags(struct net_device *netdev, u32 priv_flags) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC ++ unsigned int flags2 = adapter->flags2; ++#endif ++ unsigned int flags = adapter->flags; ++ ++ /* allow the user to control the state of the Flow ++ * Director ATR (Application Targeted Routing) feature ++ * of the driver ++ */ ++ flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; ++ if (priv_flags & IXGBE_PRIV_FLAGS_FD_ATR) { ++ /* We cannot enable ATR if VMDq is enabled */ ++ if (flags & IXGBE_FLAG_VMDQ_ENABLED) ++ return -EINVAL; ++ /* We cannot enable ATR if we have 2 or more traffic classes */ ++ if (netdev_get_num_tc(netdev) > 1) ++ return -EINVAL; ++ /* We cannot enable ATR if RSS is disabled */ ++ if (adapter->ring_feature[RING_F_RSS].limit <= 1) ++ return -EINVAL; ++ /* A sample rate of 0 indicates ATR disabled */ ++ if (!adapter->atr_sample_rate) ++ return -EINVAL; ++ flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; ++ } ++#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC ++ ++ flags2 &= ~IXGBE_FLAG2_RX_LEGACY; ++ if (priv_flags & IXGBE_PRIV_FLAGS_LEGACY_RX) ++ flags2 |= IXGBE_FLAG2_RX_LEGACY; ++#endif ++ ++ if (flags != adapter->flags) { ++ adapter->flags = flags; ++ ++ /* ATR state change requires a reset */ ++ ixgbe_do_reset(netdev); ++#ifndef HAVE_SWIOTLB_SKIP_CPU_SYNC ++ } ++#else ++ } else if (flags2 != adapter->flags2) { ++ adapter->flags2 = flags2; ++ ++ /* reset interface to repopulate queues */ ++ if (netif_running(netdev)) ++ ixgbe_reinit_locked(adapter); ++ } ++#endif ++ ++ return 0; ++} ++ ++#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ ++static struct ethtool_ops ixgbe_ethtool_ops = { ++#ifdef HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE ++ .get_link_ksettings = ixgbe_get_link_ksettings, ++ .set_link_ksettings = ixgbe_set_link_ksettings, ++#else ++ .get_settings = ixgbe_get_settings, ++ .set_settings = ixgbe_set_settings, ++#endif ++ .get_drvinfo = ixgbe_get_drvinfo, ++ .get_regs_len = ixgbe_get_regs_len, ++ .get_regs = ixgbe_get_regs, ++ .get_wol = ixgbe_get_wol, ++ .set_wol = ixgbe_set_wol, ++ .nway_reset = ixgbe_nway_reset, ++ .get_link = ethtool_op_get_link, ++ .get_eeprom_len = ixgbe_get_eeprom_len, ++ .get_eeprom = ixgbe_get_eeprom, ++ .set_eeprom = ixgbe_set_eeprom, ++ .get_ringparam = ixgbe_get_ringparam, ++ .set_ringparam = ixgbe_set_ringparam, ++ .get_pauseparam = ixgbe_get_pauseparam, ++ .set_pauseparam = ixgbe_set_pauseparam, ++ .get_msglevel = ixgbe_get_msglevel, ++ .set_msglevel = ixgbe_set_msglevel, ++#ifndef HAVE_ETHTOOL_GET_SSET_COUNT ++ .self_test_count = ixgbe_diag_test_count, ++#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ ++ .self_test = ixgbe_diag_test, ++ .get_strings = ixgbe_get_strings, ++#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT ++#ifdef HAVE_ETHTOOL_SET_PHYS_ID ++ .set_phys_id = ixgbe_set_phys_id, ++#else ++ .phys_id = ixgbe_phys_id, ++#endif /* HAVE_ETHTOOL_SET_PHYS_ID */ ++#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ ++#ifndef HAVE_ETHTOOL_GET_SSET_COUNT ++ .get_stats_count = ixgbe_get_stats_count, ++#else /* HAVE_ETHTOOL_GET_SSET_COUNT */ ++ .get_sset_count = ixgbe_get_sset_count, ++ .get_priv_flags = ixgbe_get_priv_flags, ++ .set_priv_flags = ixgbe_set_priv_flags, ++#endif /* HAVE_ETHTOOL_GET_SSET_COUNT */ + .get_ethtool_stats = ixgbe_get_ethtool_stats, +- .get_coalesce = ixgbe_get_coalesce, +- .set_coalesce = ixgbe_set_coalesce, ++#ifdef HAVE_ETHTOOL_GET_PERM_ADDR ++ .get_perm_addr = ethtool_op_get_perm_addr, ++#endif ++ .get_coalesce = ixgbe_get_coalesce, ++ .set_coalesce = ixgbe_set_coalesce, ++#ifndef HAVE_NDO_SET_FEATURES ++ .get_rx_csum = ixgbe_get_rx_csum, ++ .set_rx_csum = ixgbe_set_rx_csum, ++ .get_tx_csum = ethtool_op_get_tx_csum, ++ .set_tx_csum = ixgbe_set_tx_csum, ++ .get_sg = ethtool_op_get_sg, ++ .set_sg = ethtool_op_set_sg, ++#ifdef NETIF_F_TSO ++ .get_tso = ethtool_op_get_tso, ++ .set_tso = ixgbe_set_tso, ++#endif ++#ifdef ETHTOOL_GFLAGS ++ .get_flags = ethtool_op_get_flags, ++ .set_flags = ixgbe_set_flags, ++#endif ++#endif /* HAVE_NDO_SET_FEATURES */ ++#ifdef ETHTOOL_GRXRINGS + .get_rxnfc = ixgbe_get_rxnfc, + .set_rxnfc = ixgbe_set_rxnfc, ++#ifdef ETHTOOL_SRXNTUPLE ++ .set_rx_ntuple = ixgbe_set_rx_ntuple, ++#endif ++#endif /* ETHTOOL_GRXRINGS */ ++#ifndef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT ++#ifdef ETHTOOL_GEEE ++ .get_eee = ixgbe_get_eee, ++#endif /* ETHTOOL_GEEE */ ++#ifdef ETHTOOL_SEEE ++ .set_eee = ixgbe_set_eee, ++#endif /* ETHTOOL_SEEE */ ++#ifdef ETHTOOL_SCHANNELS + .get_channels = ixgbe_get_channels, + .set_channels = ixgbe_set_channels, ++#endif ++#ifdef ETHTOOL_GMODULEINFO ++ .get_module_info = ixgbe_get_module_info, ++ .get_module_eeprom = ixgbe_get_module_eeprom, ++#endif ++#ifdef HAVE_ETHTOOL_GET_TS_INFO ++ .get_ts_info = ixgbe_get_ts_info, ++#endif ++#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) ++ .get_rxfh_indir_size = ixgbe_rss_indir_size, ++ .get_rxfh_key_size = ixgbe_get_rxfh_key_size, ++ .get_rxfh = ixgbe_get_rxfh, ++ .set_rxfh = ixgbe_set_rxfh, ++#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */ ++#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ ++}; ++ ++#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT ++static const struct ethtool_ops_ext ixgbe_ethtool_ops_ext = { ++ .size = sizeof(struct ethtool_ops_ext), + .get_ts_info = ixgbe_get_ts_info, ++ .set_phys_id = ixgbe_set_phys_id, ++ .get_channels = ixgbe_get_channels, ++ .set_channels = ixgbe_set_channels, ++#ifdef ETHTOOL_GMODULEINFO + .get_module_info = ixgbe_get_module_info, + .get_module_eeprom = ixgbe_get_module_eeprom, ++#endif ++#if defined(ETHTOOL_GRSSH) && defined(ETHTOOL_SRSSH) ++ .get_rxfh_indir_size = ixgbe_rss_indir_size, ++ .get_rxfh_key_size = ixgbe_get_rxfh_key_size, ++ .get_rxfh = ixgbe_get_rxfh, ++ .set_rxfh = ixgbe_set_rxfh, ++#endif /* ETHTOOL_GRSSH && ETHTOOL_SRSSH */ ++#ifdef ETHTOOL_GEEE ++ .get_eee = ixgbe_get_eee, ++#endif /* ETHTOOL_GEEE */ ++#ifdef ETHTOOL_SEEE ++ .set_eee = ixgbe_set_eee, ++#endif /* ETHTOOL_SEEE */ + }; + ++#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ + void ixgbe_set_ethtool_ops(struct net_device *netdev) + { ++#ifndef ETHTOOL_OPS_COMPAT + netdev->ethtool_ops = &ixgbe_ethtool_ops; ++#else ++ SET_ETHTOOL_OPS(netdev, &ixgbe_ethtool_ops); ++#endif ++ ++#ifdef HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT ++ set_ethtool_ops_ext(netdev, &ixgbe_ethtool_ops_ext); ++#endif /* HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT */ + } ++#endif /* SIOCETHTOOL */ ++ +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +index 25a3dfe..abd12a9 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.c +@@ -1,7 +1,7 @@ + /******************************************************************************* + +- Intel 10 Gigabit PCI Express Linux driver +- Copyright(c) 1999 - 2013 Intel Corporation. ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, +@@ -12,10 +12,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +@@ -27,9 +23,12 @@ + *******************************************************************************/ + + #include "ixgbe.h" ++ ++#if IS_ENABLED(CONFIG_FCOE) ++#if IS_ENABLED(CONFIG_DCB) ++#include "ixgbe_dcb_82599.h" ++#endif /* CONFIG_DCB */ + #include +-#include +-#include + #include + #include + #include +@@ -39,7 +38,7 @@ + + /** + * ixgbe_fcoe_clear_ddp - clear the given ddp context +- * @ddp: ptr to the ixgbe_fcoe_ddp ++ * @ddp - ptr to the ixgbe_fcoe_ddp + * + * Returns : none + * +@@ -70,16 +69,18 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) + int len = 0; + struct ixgbe_fcoe *fcoe; + struct ixgbe_adapter *adapter; ++ struct ixgbe_hw *hw; + struct ixgbe_fcoe_ddp *ddp; + u32 fcbuff; + + if (!netdev) + goto out_ddp_put; + +- if (xid >= IXGBE_FCOE_DDP_MAX) ++ if (xid > netdev->fcoe_ddp_xid) + goto out_ddp_put; + + adapter = netdev_priv(netdev); ++ hw = &adapter->hw; + fcoe = &adapter->fcoe; + ddp = &fcoe->ddp[xid]; + if (!ddp->udl) +@@ -88,24 +89,53 @@ int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) + len = ddp->len; + /* if there an error, force to invalidate ddp context */ + if (ddp->err) { +- spin_lock_bh(&fcoe->lock); +- IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLT, 0); +- IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLTRW, +- (xid | IXGBE_FCFLTRW_WE)); +- IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0); +- IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW, +- (xid | IXGBE_FCDMARW_WE)); ++ switch (hw->mac.type) { ++ case ixgbe_mac_X550: ++ /* X550 does not require DDP FCoE lock */ ++ ++ IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), 0); ++ IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid), ++ (xid | IXGBE_FCFLTRW_WE)); ++ ++ /* program FCBUFF */ ++ IXGBE_WRITE_REG(hw, IXGBE_FCDDC(2, xid), 0); ++ ++ /* program FCDMARW */ ++ IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), ++ (xid | IXGBE_FCDMARW_WE)); ++ ++ /* read FCBUFF to check context invalidated */ ++ IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), ++ (xid | IXGBE_FCDMARW_RE)); ++ fcbuff = IXGBE_READ_REG(hw, ++ IXGBE_FCDDC(2, xid)); ++ break; ++ default: ++ /* other hardware requires DDP FCoE lock */ ++ spin_lock_bh(&fcoe->lock); ++ ++ IXGBE_WRITE_REG(hw, IXGBE_FCFLT, 0); ++ IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, ++ (xid | IXGBE_FCFLTRW_WE)); ++ IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, 0); ++ IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, ++ (xid | IXGBE_FCDMARW_WE)); ++ ++ /* read FCBUFF to check context invalidated */ ++ IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, ++ (xid | IXGBE_FCDMARW_RE)); ++ fcbuff = IXGBE_READ_REG(hw, IXGBE_FCBUFF); ++ ++ spin_unlock_bh(&fcoe->lock); ++ break; ++ } + + /* guaranteed to be invalidated after 100us */ +- IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW, +- (xid | IXGBE_FCDMARW_RE)); +- fcbuff = IXGBE_READ_REG(&adapter->hw, IXGBE_FCBUFF); +- spin_unlock_bh(&fcoe->lock); + if (fcbuff & IXGBE_FCBUFF_VALID) + udelay(100); + } + if (ddp->sgl) +- dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, ddp->sgc, ++ dma_unmap_sg(pci_dev_to_dev(adapter->pdev), ddp->sgl, ddp->sgc, + DMA_FROM_DEVICE); + if (ddp->pool) { + dma_pool_free(ddp->pool, ddp->udl, ddp->udp); +@@ -147,11 +177,11 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, + u32 fcbuff, fcdmarw, fcfltrw, fcrxctl; + dma_addr_t addr = 0; + +- if (!netdev || !sgl) ++ if (!netdev || !sgl || !sgc) + return 0; + + adapter = netdev_priv(netdev); +- if (xid >= IXGBE_FCOE_DDP_MAX) { ++ if (xid > netdev->fcoe_ddp_xid) { + e_warn(drv, "xid=0x%x out-of-range\n", xid); + return 0; + } +@@ -165,7 +195,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, + ddp = &fcoe->ddp[xid]; + if (ddp->sgl) { + e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n", +- xid, ddp->sgl, ddp->sgc); ++ xid, ddp->sgl, ddp->sgc); + return 0; + } + ixgbe_fcoe_clear_ddp(ddp); +@@ -183,7 +213,7 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, + } + + /* setup dma from scsi command sgl */ +- dmacount = dma_map_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE); ++ dmacount = dma_map_sg(pci_dev_to_dev(adapter->pdev), sgl, sgc, DMA_FROM_DEVICE); + if (dmacount == 0) { + e_err(drv, "xid 0x%x DMA map error\n", xid); + goto out_noddp; +@@ -241,8 +271,9 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, + lastsize = thisoff + thislen; + + /* +- * lastsize can not be buffer len. ++ * lastsize can not be bufflen. + * If it is then adding another buffer with lastsize = 1. ++ * Since lastsize is 1 there will be no HW access to this buffer. + */ + if (lastsize == bufflen) { + if (j >= IXGBE_BUFFCNT_MAX) { +@@ -273,7 +304,6 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, + + /* program DMA context */ + hw = &adapter->hw; +- spin_lock_bh(&fcoe->lock); + + /* turn on last frame indication for target mode as FCP_RSPtarget is + * supposed to send FCP_RSP when it is done. */ +@@ -284,16 +314,41 @@ static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, + IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl); + } + +- IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32)); +- IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32); +- IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff); +- IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw); +- /* program filter context */ +- IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0); +- IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID); +- IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw); ++ switch (hw->mac.type) { ++ case ixgbe_mac_X550: ++ /* X550 does not require DDP lock */ ++ ++ IXGBE_WRITE_REG(hw, IXGBE_FCDDC(0, xid), ++ ddp->udp & DMA_BIT_MASK(32)); ++ IXGBE_WRITE_REG(hw, IXGBE_FCDDC(1, xid), (u64)ddp->udp >> 32); ++ IXGBE_WRITE_REG(hw, IXGBE_FCDDC(2, xid), fcbuff); ++ IXGBE_WRITE_REG(hw, IXGBE_FCDDC(3, xid), fcdmarw); ++ /* program filter context */ ++ IXGBE_WRITE_REG(hw, IXGBE_FCDFC(0, xid), IXGBE_FCFLT_VALID); ++ IXGBE_WRITE_REG(hw, IXGBE_FCDFC(1, xid), 0); ++ IXGBE_WRITE_REG(hw, IXGBE_FCDFC(3, xid), fcfltrw); ++ /* ++ * TBD: SMAC and FCID info not available with current ++ * netdev APIs, add code to pull that from skb later ++ * and then program that here before enabling DDP context. ++ */ ++ break; ++ default: ++ /* other devices require DDP lock with direct DDP context access */ ++ spin_lock_bh(&fcoe->lock); ++ ++ IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32)); ++ IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32); ++ IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff); ++ IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw); ++ /* program filter context */ ++ IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0); ++ IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID); ++ IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw); + +- spin_unlock_bh(&fcoe->lock); ++ spin_unlock_bh(&fcoe->lock); ++ break; ++ } + + return 1; + +@@ -302,7 +357,7 @@ out_noddp_free: + ixgbe_fcoe_clear_ddp(ddp); + + out_noddp_unmap: +- dma_unmap_sg(&adapter->pdev->dev, sgl, sgc, DMA_FROM_DEVICE); ++ dma_unmap_sg(pci_dev_to_dev(adapter->pdev), sgl, sgc, DMA_FROM_DEVICE); + out_noddp: + put_cpu(); + return 0; +@@ -328,6 +383,7 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, + return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0); + } + ++#ifdef HAVE_NETDEV_OPS_FCOE_DDP_TARGET + /** + * ixgbe_fcoe_ddp_target - called to set up ddp context in target mode + * @netdev: the corresponding net_device +@@ -344,11 +400,12 @@ int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, + * Returns : 1 for success and 0 for no ddp + */ + int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, +- struct scatterlist *sgl, unsigned int sgc) ++ struct scatterlist *sgl, unsigned int sgc) + { + return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1); + } + ++#endif /* HAVE_NETDEV_OPS_FCOE_DDP_TARGET */ + /** + * ixgbe_fcoe_ddp - check ddp status and mark it done + * @adapter: ixgbe adapter +@@ -357,7 +414,7 @@ int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, + * + * This checks ddp status. + * +- * Returns : < 0 indicates an error or not a FCiE ddp, 0 indicates ++ * Returns : < 0 indicates an error or not a FCoE ddp, 0 indicates + * not passing the skb to ULD, > 0 indicates is the length of data + * being ddped. + */ +@@ -365,11 +422,10 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) + { +- int rc = -EINVAL; +- struct ixgbe_fcoe *fcoe; ++ struct ixgbe_fcoe *fcoe = &adapter->fcoe; + struct ixgbe_fcoe_ddp *ddp; + struct fc_frame_header *fh; +- struct fcoe_crc_eof *crc; ++ int rc = -EINVAL, ddp_max; + __le32 fcerr = ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_FCERR); + __le32 ddp_err; + u32 fctl; +@@ -380,23 +436,28 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, + else + skb->ip_summed = CHECKSUM_UNNECESSARY; + +- if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q)) +- fh = (struct fc_frame_header *)(skb->data + +- sizeof(struct vlan_hdr) + sizeof(struct fcoe_hdr)); +- else +- fh = (struct fc_frame_header *)(skb->data + +- sizeof(struct fcoe_hdr)); ++ /* verify header contains at least the FCOE header */ ++ BUG_ON(skb_headlen(skb) < FCOE_HEADER_LEN); ++ ++ fh = (struct fc_frame_header *)(skb->data + sizeof(struct fcoe_hdr)); ++ ++ if (skb->protocol == htons(ETH_P_8021Q)) ++ fh = (struct fc_frame_header *)((char *)fh + VLAN_HLEN); + + fctl = ntoh24(fh->fh_f_ctl); + if (fctl & FC_FC_EX_CTX) +- xid = be16_to_cpu(fh->fh_ox_id); ++ xid = ntohs(fh->fh_ox_id); + else +- xid = be16_to_cpu(fh->fh_rx_id); ++ xid = ntohs(fh->fh_rx_id); + +- if (xid >= IXGBE_FCOE_DDP_MAX) ++ ddp_max = IXGBE_FCOE_DDP_MAX; ++ /* X550 has different DDP Max limit */ ++ if (adapter->hw.mac.type == ixgbe_mac_X550) ++ ddp_max = IXGBE_FCOE_DDP_MAX_X550; ++ ++ if (xid >= ddp_max) + goto ddp_out; + +- fcoe = &adapter->fcoe; + ddp = &fcoe->ddp[xid]; + if (!ddp->udl) + goto ddp_out; +@@ -415,7 +476,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, + break; + /* unmap the sg list when FCPRSP is received */ + case cpu_to_le32(IXGBE_RXDADV_STAT_FCSTAT_FCPRSP): +- dma_unmap_sg(&adapter->pdev->dev, ddp->sgl, ++ dma_unmap_sg(pci_dev_to_dev(adapter->pdev), ddp->sgl, + ddp->sgc, DMA_FROM_DEVICE); + ddp->err = ddp_err; + ddp->sgl = NULL; +@@ -443,6 +504,7 @@ int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, + */ + if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) && + (fctl & FC_FC_END_SEQ)) { ++ struct fcoe_crc_eof *crc; + skb_linearize(skb); + crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc)); + crc->fcoe_eof = FC_EOF_T; +@@ -471,13 +533,16 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring, + u32 fcoe_sof_eof = 0; + u32 mss_l4len_idx; + u8 sof, eof; ++ u32 type_tucmd = IXGBE_ADVTXT_TUCMD_FCOE; + +- if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) { +- dev_err(tx_ring->dev, "Wrong gso type %d:expecting SKB_GSO_FCOE\n", +- skb_shinfo(skb)->gso_type); ++#ifdef NETIF_F_FSO ++ if (skb_is_gso(skb) && skb_shinfo(skb)->gso_type != SKB_GSO_FCOE) { ++ dev_err(tx_ring->dev, "Wrong gso type %d:expecting " ++ "SKB_GSO_FCOE\n", skb_shinfo(skb)->gso_type); + return -EINVAL; + } + ++#endif + /* resets the header to point fcoe/fc */ + skb_set_network_header(skb, skb->mac_len); + skb_set_transport_header(skb, skb->mac_len + +@@ -546,6 +611,8 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring, + skb_shinfo(skb)->gso_size); + first->bytecount += (first->gso_segs - 1) * *hdr_len; + first->tx_flags |= IXGBE_TX_FLAGS_TSO; ++ /* Hardware expects L4T to be RSV for FCoE TSO */ ++ type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_RSV; + } + + /* set flag indicating FCOE to ixgbe_tx_map call */ +@@ -563,7 +630,7 @@ int ixgbe_fso(struct ixgbe_ring *tx_ring, + + /* write context desc */ + ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, fcoe_sof_eof, +- IXGBE_ADVTXT_TUCMD_FCOE, mss_l4len_idx); ++ type_tucmd, mss_l4len_idx); + + return 0; + } +@@ -573,8 +640,7 @@ static void ixgbe_fcoe_dma_pool_free(struct ixgbe_fcoe *fcoe, unsigned int cpu) + struct ixgbe_fcoe_ddp_pool *ddp_pool; + + ddp_pool = per_cpu_ptr(fcoe->ddp_pool, cpu); +- if (ddp_pool->pool) +- dma_pool_destroy(ddp_pool->pool); ++ dma_pool_destroy(ddp_pool->pool); + ddp_pool->pool = NULL; + } + +@@ -586,7 +652,7 @@ static int ixgbe_fcoe_dma_pool_alloc(struct ixgbe_fcoe *fcoe, + struct dma_pool *pool; + char pool_name[32]; + +- snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%u", cpu); ++ snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu); + + pool = dma_pool_create(pool_name, dev, IXGBE_FCPTR_MAX, + IXGBE_FCPTR_ALIGN, PAGE_SIZE); +@@ -613,10 +679,12 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) + { + struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; + struct ixgbe_hw *hw = &adapter->hw; +- int i, fcoe_q, fcoe_i; ++ int i, fcoe_i; ++ u32 fcoe_q, fcoe_q_h = 0; + u32 etqf; ++ int fcreta_size; + +- /* Minimal functionality for FCoE requires at least CRC offloads */ ++ /* Minimal funcionality for FCoE requires at least CRC offloads */ + if (!(adapter->netdev->features & NETIF_F_FCOE_CRC)) + return; + +@@ -629,15 +697,27 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), etqf); + IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0); + +- /* leave registers un-configured if FCoE is disabled */ ++ /* leave remaining registers unconfigued if FCoE is disabled */ + if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) + return; + + /* Use one or more Rx queues for FCoE by redirection table */ +- for (i = 0; i < IXGBE_FCRETA_SIZE; i++) { ++ fcreta_size = IXGBE_FCRETA_SIZE; ++ if (adapter->hw.mac.type == ixgbe_mac_X550) ++ fcreta_size = IXGBE_FCRETA_SIZE_X550; ++ ++ for (i = 0; i < fcreta_size; i++) { ++ if (adapter->hw.mac.type == ixgbe_mac_X550) { ++ int fcoe_i_h = fcoe->offset + ((i + fcreta_size) % ++ fcoe->indices); ++ fcoe_q_h = adapter->rx_ring[fcoe_i_h]->reg_idx; ++ fcoe_q_h = (fcoe_q_h << IXGBE_FCRETA_ENTRY_HIGH_SHIFT) & ++ IXGBE_FCRETA_ENTRY_HIGH_MASK; ++ } + fcoe_i = fcoe->offset + (i % fcoe->indices); + fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; + fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; ++ fcoe_q |= fcoe_q_h; + IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q); + } + IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA); +@@ -673,19 +753,24 @@ void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) + void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter) + { + struct ixgbe_fcoe *fcoe = &adapter->fcoe; +- int cpu, i; ++ int cpu, i, ddp_max; + + /* do nothing if no DDP pools were allocated */ + if (!fcoe->ddp_pool) + return; + +- for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) ++ ddp_max = IXGBE_FCOE_DDP_MAX; ++ /* X550 has different DDP Max limit */ ++ if (adapter->hw.mac.type == ixgbe_mac_X550) ++ ddp_max = IXGBE_FCOE_DDP_MAX_X550; ++ ++ for (i = 0; i < ddp_max; i++) + ixgbe_fcoe_ddp_put(adapter->netdev, i); + + for_each_possible_cpu(cpu) + ixgbe_fcoe_dma_pool_free(fcoe, cpu); + +- dma_unmap_single(&adapter->pdev->dev, ++ dma_unmap_single(pci_dev_to_dev(adapter->pdev), + fcoe->extra_ddp_buffer_dma, + IXGBE_FCBUFF_MIN, + DMA_FROM_DEVICE); +@@ -706,7 +791,7 @@ void ixgbe_free_fcoe_ddp_resources(struct ixgbe_adapter *adapter) + int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter) + { + struct ixgbe_fcoe *fcoe = &adapter->fcoe; +- struct device *dev = &adapter->pdev->dev; ++ struct device *dev = pci_dev_to_dev(adapter->pdev); + void *buffer; + dma_addr_t dma; + unsigned int cpu; +@@ -717,8 +802,10 @@ int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter) + + /* Extra buffer to be shared by all DDPs for HW work around */ + buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC); +- if (!buffer) ++ if (!buffer) { ++ e_err(drv, "failed to allocate extra DDP buffer\n"); + return -ENOMEM; ++ } + + dma = dma_map_single(dev, buffer, IXGBE_FCBUFF_MIN, DMA_FROM_DEVICE); + if (dma_mapping_error(dev, dma)) { +@@ -744,7 +831,11 @@ int ixgbe_setup_fcoe_ddp_resources(struct ixgbe_adapter *adapter) + return 0; + } + ++#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE ++int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter) ++#else + static int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter) ++#endif + { + struct ixgbe_fcoe *fcoe = &adapter->fcoe; + +@@ -760,10 +851,18 @@ static int ixgbe_fcoe_ddp_enable(struct ixgbe_adapter *adapter) + + adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; + ++ /* X550 has different DDP Max limit */ ++ if (adapter->hw.mac.type == ixgbe_mac_X550) ++ adapter->netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX_X550 - 1; ++ + return 0; + } + ++#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE ++void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter) ++#else + static void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter) ++#endif + { + struct ixgbe_fcoe *fcoe = &adapter->fcoe; + +@@ -776,6 +875,7 @@ static void ixgbe_fcoe_ddp_disable(struct ixgbe_adapter *adapter) + fcoe->ddp_pool = NULL; + } + ++#ifdef HAVE_NETDEV_OPS_FCOE_ENABLE + /** + * ixgbe_fcoe_enable - turn on FCoE offload feature + * @netdev: the corresponding netdev +@@ -863,7 +963,28 @@ int ixgbe_fcoe_disable(struct net_device *netdev) + + return 0; + } ++#endif /* HAVE_NETDEV_OPS_FCOE_ENABLE */ + ++#if IS_ENABLED(CONFIG_DCB) ++#ifdef HAVE_DCBNL_OPS_GETAPP ++/** ++ * ixgbe_fcoe_getapp - retrieves current user priority bitmap for FCoE ++ * @netdev: the corresponding net_device ++ * ++ * Finds out the corresponding user priority bitmap from the current ++ * traffic class that FCoE belongs to. Returns 0 as the invalid user ++ * priority bitmap to indicate an error. ++ * ++ * Returns : 802.1p user priority bitmap for FCoE ++ */ ++u8 ixgbe_fcoe_getapp(struct net_device *netdev) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ return 1 << adapter->fcoe.up; ++} ++#endif /* HAVE_DCBNL_OPS_GETAPP */ ++#endif /* CONFIG_DCB */ ++#ifdef HAVE_NETDEV_OPS_FCOE_GETWWN + /** + * ixgbe_fcoe_get_wwn - get world wide name for the node or the port + * @netdev : ixgbe adapter +@@ -908,89 +1029,7 @@ int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type) + return rc; + } + +-/** +- * ixgbe_fcoe_get_hbainfo - get FCoE HBA information +- * @netdev : ixgbe adapter +- * @info : HBA information +- * +- * Returns ixgbe HBA information +- * +- * Returns : 0 on success +- */ +-int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, +- struct netdev_fcoe_hbainfo *info) +-{ +- struct ixgbe_adapter *adapter = netdev_priv(netdev); +- struct ixgbe_hw *hw = &adapter->hw; +- int i, pos; +- u8 buf[8]; +- +- if (!info) +- return -EINVAL; +- +- /* Don't return information on unsupported devices */ +- if (hw->mac.type != ixgbe_mac_82599EB && +- hw->mac.type != ixgbe_mac_X540) +- return -EINVAL; +- +- /* Manufacturer */ +- snprintf(info->manufacturer, sizeof(info->manufacturer), +- "Intel Corporation"); +- +- /* Serial Number */ +- +- /* Get the PCI-e Device Serial Number Capability */ +- pos = pci_find_ext_capability(adapter->pdev, PCI_EXT_CAP_ID_DSN); +- if (pos) { +- pos += 4; +- for (i = 0; i < 8; i++) +- pci_read_config_byte(adapter->pdev, pos + i, &buf[i]); +- +- snprintf(info->serial_number, sizeof(info->serial_number), +- "%02X%02X%02X%02X%02X%02X%02X%02X", +- buf[7], buf[6], buf[5], buf[4], +- buf[3], buf[2], buf[1], buf[0]); +- } else +- snprintf(info->serial_number, sizeof(info->serial_number), +- "Unknown"); +- +- /* Hardware Version */ +- snprintf(info->hardware_version, +- sizeof(info->hardware_version), +- "Rev %d", hw->revision_id); +- /* Driver Name/Version */ +- snprintf(info->driver_version, +- sizeof(info->driver_version), +- "%s v%s", +- ixgbe_driver_name, +- ixgbe_driver_version); +- /* Firmware Version */ +- snprintf(info->firmware_version, +- sizeof(info->firmware_version), +- "0x%08x", +- (adapter->eeprom_verh << 16) | +- adapter->eeprom_verl); +- +- /* Model */ +- if (hw->mac.type == ixgbe_mac_82599EB) { +- snprintf(info->model, +- sizeof(info->model), +- "Intel 82599"); +- } else { +- snprintf(info->model, +- sizeof(info->model), +- "Intel X540"); +- } +- +- /* Model Description */ +- snprintf(info->model_description, +- sizeof(info->model_description), +- "%s", +- ixgbe_default_device_descr); +- +- return 0; +-} +- ++#endif /* HAVE_NETDEV_OPS_FCOE_GETWWN */ + /** + * ixgbe_fcoe_get_tc - get the current TC that fcoe is mapped to + * @adapter - pointer to the device adapter structure +@@ -999,9 +1038,6 @@ int ixgbe_fcoe_get_hbainfo(struct net_device *netdev, + */ + u8 ixgbe_fcoe_get_tc(struct ixgbe_adapter *adapter) + { +-#ifdef CONFIG_IXGBE_DCB + return netdev_get_prio_tc_map(adapter->netdev, adapter->fcoe.up); +-#else +- return 0; +-#endif + } ++#endif /* CONFIG_FCOE */ +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h +index 0772b77..08de8d3 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_fcoe.h +@@ -1,7 +1,7 @@ + /******************************************************************************* + +- Intel 10 Gigabit PCI Express Linux driver +- Copyright(c) 1999 - 2013 Intel Corporation. ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, +@@ -12,10 +12,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +@@ -26,8 +22,10 @@ + + *******************************************************************************/ + +-#ifndef _IXGBE_FCOE_H +-#define _IXGBE_FCOE_H ++#ifndef _IXGBE_FCOE_H_ ++#define _IXGBE_FCOE_H_ ++ ++#if IS_ENABLED(CONFIG_FCOE) + + #include + #include +@@ -38,7 +36,7 @@ + /* ddp user buffer */ + #define IXGBE_BUFFCNT_MAX 256 /* 8 bits bufcnt */ + #define IXGBE_FCPTR_ALIGN 16 +-#define IXGBE_FCPTR_MAX (IXGBE_BUFFCNT_MAX * sizeof(dma_addr_t)) ++#define IXGBE_FCPTR_MAX (IXGBE_BUFFCNT_MAX * sizeof(dma_addr_t)) + #define IXGBE_FCBUFF_4KB 0x0 + #define IXGBE_FCBUFF_8KB 0x1 + #define IXGBE_FCBUFF_16KB 0x2 +@@ -46,12 +44,18 @@ + #define IXGBE_FCBUFF_MAX 65536 /* 64KB max */ + #define IXGBE_FCBUFF_MIN 4096 /* 4KB min */ + #define IXGBE_FCOE_DDP_MAX 512 /* 9 bits xid */ ++#define IXGBE_FCOE_DDP_MAX_X550 2048 /* 11 bits xid */ + +-/* Default traffic class to use for FCoE */ +-#define IXGBE_FCOE_DEFTC 3 ++/* Default user priority to use for FCoE */ ++#define IXGBE_FCOE_DEFUP 3 + + /* fcerr */ +-#define IXGBE_FCERR_BADCRC 0x00100000 ++#define IXGBE_FCERR_BADCRC 0x00100000 ++#define IXGBE_FCERR_EOFSOF 0x00200000 ++#define IXGBE_FCERR_NOFIRST 0x00300000 ++#define IXGBE_FCERR_OOOSEQ 0x00400000 ++#define IXGBE_FCERR_NODMA 0x00500000 ++#define IXGBE_FCERR_PKTLOST 0x00600000 + + /* FCoE DDP for target mode */ + #define __IXGBE_FCOE_TARGET 1 +@@ -77,11 +81,13 @@ struct ixgbe_fcoe { + struct ixgbe_fcoe_ddp_pool __percpu *ddp_pool; + atomic_t refcnt; + spinlock_t lock; +- struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX]; ++ struct ixgbe_fcoe_ddp ddp[IXGBE_FCOE_DDP_MAX_X550]; + void *extra_ddp_buffer; + dma_addr_t extra_ddp_buffer_dma; + unsigned long mode; + u8 up; ++ u8 up_set; + }; ++#endif /* CONFIG_FCOE */ + + #endif /* _IXGBE_FCOE_H */ +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_hv_vf.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_hv_vf.c +new file mode 100644 +index 0000000..2786463 +--- /dev/null ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_hv_vf.c +@@ -0,0 +1,210 @@ ++/******************************************************************************* ++ ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#include "ixgbe_hv_vf.h" ++ ++/** ++ * Hyper-V variant - just a stub. ++ */ ++s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list, ++ u32 mc_addr_count, ixgbe_mc_addr_itr next, ++ bool clear) ++{ ++ UNREFERENCED_5PARAMETER(hw, mc_addr_list, mc_addr_count, next, clear); ++ ++ return IXGBE_ERR_FEATURE_NOT_SUPPORTED; ++} ++ ++/** ++ * Hyper-V variant - just a stub. ++ */ ++s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode) ++{ ++ UNREFERENCED_2PARAMETER(hw, xcast_mode); ++ ++ return IXGBE_ERR_FEATURE_NOT_SUPPORTED; ++} ++ ++/** ++ * Hyper-V variant - just a stub. ++ */ ++s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, ++ bool vlan_on, bool vlvf_bypass) ++{ ++ UNREFERENCED_5PARAMETER(hw, vlan, vind, vlan_on, vlvf_bypass); ++ ++ return IXGBE_ERR_FEATURE_NOT_SUPPORTED; ++} ++ ++s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr) ++{ ++ UNREFERENCED_3PARAMETER(hw, index, addr); ++ ++ return IXGBE_ERR_FEATURE_NOT_SUPPORTED; ++} ++ ++/** ++ * Hyper-V variant; there is no mailbox communication. ++ */ ++s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw, ++ ixgbe_link_speed *speed, ++ bool *link_up, ++ bool autoneg_wait_to_complete) ++{ ++ struct ixgbe_mbx_info *mbx = &hw->mbx; ++ struct ixgbe_mac_info *mac = &hw->mac; ++ u32 links_reg; ++ UNREFERENCED_1PARAMETER(autoneg_wait_to_complete); ++ ++ /* If we were hit with a reset drop the link */ ++ if (!mbx->ops.check_for_rst(hw, 0) || !mbx->timeout) ++ mac->get_link_status = true; ++ ++ if (!mac->get_link_status) ++ goto out; ++ ++ /* if link status is down no point in checking to see if pf is up */ ++ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); ++ if (!(links_reg & IXGBE_LINKS_UP)) ++ goto out; ++ ++ /* for SFP+ modules and DA cables on 82599 it can take up to 500usecs ++ * before the link status is correct ++ */ ++ if (mac->type == ixgbe_mac_82599_vf) { ++ int i; ++ ++ for (i = 0; i < 5; i++) { ++ udelay(100); ++ links_reg = IXGBE_READ_REG(hw, IXGBE_VFLINKS); ++ ++ if (!(links_reg & IXGBE_LINKS_UP)) ++ goto out; ++ } ++ } ++ ++ switch (links_reg & IXGBE_LINKS_SPEED_82599) { ++ case IXGBE_LINKS_SPEED_10G_82599: ++ *speed = IXGBE_LINK_SPEED_10GB_FULL; ++ if (hw->mac.type >= ixgbe_mac_X550) { ++ if (links_reg & IXGBE_LINKS_SPEED_NON_STD) ++ *speed = IXGBE_LINK_SPEED_2_5GB_FULL; ++ } ++ break; ++ case IXGBE_LINKS_SPEED_1G_82599: ++ *speed = IXGBE_LINK_SPEED_1GB_FULL; ++ break; ++ case IXGBE_LINKS_SPEED_100_82599: ++ *speed = IXGBE_LINK_SPEED_100_FULL; ++ if (hw->mac.type == ixgbe_mac_X550) { ++ if (links_reg & IXGBE_LINKS_SPEED_NON_STD) ++ *speed = IXGBE_LINK_SPEED_5GB_FULL; ++ } ++ break; ++ case IXGBE_LINKS_SPEED_10_X550EM_A: ++ *speed = IXGBE_LINK_SPEED_UNKNOWN; ++ /* Reserved for pre-x550 devices */ ++ if (hw->mac.type >= ixgbe_mac_X550) ++ *speed = IXGBE_LINK_SPEED_10_FULL; ++ break; ++ default: ++ *speed = IXGBE_LINK_SPEED_UNKNOWN; ++ } ++ ++ /* if we passed all the tests above then the link is up and we no ++ * longer need to check for link ++ */ ++ mac->get_link_status = false; ++ ++out: ++ *link_up = !mac->get_link_status; ++ return IXGBE_SUCCESS; ++} ++ ++/** ++ * ixgbevf_hv_set_rlpml_vf - Set the maximum receive packet length ++ * @hw: pointer to the HW structure ++ * @max_size: value to assign to max frame size ++ * Hyper-V variant. ++ **/ ++s32 ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size) ++{ ++ u32 reg; ++ ++ /* If we are on Hyper-V, we implement this functionality ++ * differently. ++ */ ++ reg = IXGBE_READ_REG(hw, IXGBE_VFRXDCTL(0)); ++ /* CRC == 4 */ ++ reg |= ((max_size + 4) | IXGBE_RXDCTL_RLPML_EN); ++ IXGBE_WRITE_REG(hw, IXGBE_VFRXDCTL(0), reg); ++ ++ return IXGBE_SUCCESS; ++} ++ ++/** ++ * ixgbevf_hv_negotiate_api_version_vf - Negotiate supported API version ++ * @hw: pointer to the HW structure ++ * @api: integer containing requested API version ++ * Hyper-V version - only ixgbe_mbox_api_10 supported. ++ **/ ++int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api) ++{ ++ UNREFERENCED_1PARAMETER(hw); ++ ++ /* Hyper-V only supports api version ixgbe_mbox_api_10 */ ++ if (api != ixgbe_mbox_api_10) ++ return IXGBE_ERR_INVALID_ARGUMENT; ++ ++ return IXGBE_SUCCESS; ++} ++ ++/** ++ * ixgbevf_hv_init_ops_vf - Initialize the pointers for vf ++ * @hw: pointer to hardware structure ++ * ++ * This will assign function pointers, adapter-specific functions can ++ * override the assignment of generic function pointers by assigning ++ * their own adapter-specific function pointers. ++ * Does not touch the hardware. ++ **/ ++s32 ixgbevf_hv_init_ops_vf(struct ixgbe_hw *hw) ++{ ++ /* Set defaults for VF then override applicable Hyper-V ++ * specific functions ++ */ ++ ixgbe_init_ops_vf(hw); ++ ++ hw->mac.ops.reset_hw = ixgbevf_hv_reset_hw_vf; ++ hw->mac.ops.check_link = ixgbevf_hv_check_mac_link_vf; ++ hw->mac.ops.negotiate_api_version = ixgbevf_hv_negotiate_api_version_vf; ++ hw->mac.ops.set_rar = ixgbevf_hv_set_rar_vf; ++ hw->mac.ops.update_mc_addr_list = ixgbevf_hv_update_mc_addr_list_vf; ++ hw->mac.ops.update_xcast_mode = ixgbevf_hv_update_xcast_mode; ++ hw->mac.ops.set_uc_addr = ixgbevf_hv_set_uc_addr_vf; ++ hw->mac.ops.set_vfta = ixgbevf_hv_set_vfta_vf; ++ hw->mac.ops.set_rlpml = ixgbevf_hv_set_rlpml_vf; ++ ++ return IXGBE_SUCCESS; ++} +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_hv_vf.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_hv_vf.h +new file mode 100644 +index 0000000..387ed1f +--- /dev/null ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_hv_vf.h +@@ -0,0 +1,51 @@ ++/******************************************************************************* ++ ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#ifndef _IXGBE_HV_VF_H_ ++#define _IXGBE_HV_VF_H_ ++ ++/* On Hyper-V, to reset, we need to read from this offset ++ * from the PCI config space. This is the mechanism used on ++ * Hyper-V to support PF/VF communication. ++ */ ++#define IXGBE_HV_RESET_OFFSET 0x201 ++ ++#include "ixgbe_vf.h" ++ ++s32 ixgbevf_hv_check_mac_link_vf(struct ixgbe_hw *hw, ixgbe_link_speed *speed, ++ bool *link_up, bool autoneg_wait_to_complete); ++s32 ixgbevf_hv_set_uc_addr_vf(struct ixgbe_hw *hw, u32 index, u8 *addr); ++s32 ixgbevf_hv_update_mc_addr_list_vf(struct ixgbe_hw *hw, u8 *mc_addr_list, ++ u32 mc_addr_count, ixgbe_mc_addr_itr, ++ bool clear); ++s32 ixgbevf_hv_update_xcast_mode(struct ixgbe_hw *hw, int xcast_mode); ++s32 ixgbevf_hv_set_vfta_vf(struct ixgbe_hw *hw, u32 vlan, u32 vind, ++ bool vlan_on, bool vlvf_bypass); ++s32 ixgbevf_hv_set_rlpml_vf(struct ixgbe_hw *hw, u16 max_size); ++int ixgbevf_hv_negotiate_api_version_vf(struct ixgbe_hw *hw, int api); ++ ++extern s32 ixgbevf_hv_reset_hw_vf(struct ixgbe_hw *hw); ++extern s32 ixgbevf_hv_set_rar_vf(struct ixgbe_hw *hw, u32 index, u8 *addr, ++ u32 vmdq, u32 enable_addr); ++#endif /* _IXGBE_HV_VF_H_ */ +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +index 2d9451e..2a0d91d 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_lib.c +@@ -1,7 +1,7 @@ + /******************************************************************************* + +- Intel 10 Gigabit PCI Express Linux driver +- Copyright(c) 1999 - 2013 Intel Corporation. ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, +@@ -12,10 +12,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +@@ -29,52 +25,79 @@ + #include "ixgbe.h" + #include "ixgbe_sriov.h" + +-#ifdef CONFIG_IXGBE_DCB ++#ifdef HAVE_TX_MQ + /** +- * ixgbe_cache_ring_dcb_sriov - Descriptor ring to register mapping for SR-IOV ++ * ixgbe_cache_ring_dcb_vmdq - Descriptor ring to register mapping for VMDq + * @adapter: board private structure to initialize + * +- * Cache the descriptor ring offsets for SR-IOV to the assigned rings. It ++ * Cache the descriptor ring offsets for VMDq to the assigned rings. It + * will also try to cache the proper offsets if RSS/FCoE are enabled along + * with VMDq. + * + **/ +-static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter) ++static bool ixgbe_cache_ring_dcb_vmdq(struct ixgbe_adapter *adapter) + { +-#ifdef IXGBE_FCOE ++#if IS_ENABLED(CONFIG_FCOE) + struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; +-#endif /* IXGBE_FCOE */ ++#endif /* CONFIG_FCOE */ + struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + int i; + u16 reg_idx; + u8 tcs = netdev_get_num_tc(adapter->netdev); + +- /* verify we have DCB queueing enabled before proceeding */ ++ /* verify we have DCB enabled before proceeding */ + if (tcs <= 1) + return false; + + /* verify we have VMDq enabled before proceeding */ +- if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) ++ if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) + return false; + +- /* start at VMDq register offset for SR-IOV enabled setups */ +- reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); +- for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { +- /* If we are greater than indices move to next pool */ +- if ((reg_idx & ~vmdq->mask) >= tcs) +- reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); +- adapter->rx_ring[i]->reg_idx = reg_idx; +- } ++ switch (adapter->hw.mac.type) { ++ case ixgbe_mac_82598EB: ++ /* ++ * The bits on the 82598 are reversed compared to the other ++ * adapters. The DCB bits are the higher order bits and the ++ * lower bits belong to the VMDq pool. In order to sort ++ * this out we have to swap the bits to get the correct layout ++ */ ++ for (i = 0; i < adapter->num_rx_queues; i++) { ++ reg_idx = ((i >> 3) | (i << 3)) & 0x3F; ++ adapter->rx_ring[i]->reg_idx = reg_idx; ++ } ++ for (i = 0; i < adapter->num_tx_queues; i++) { ++ reg_idx = ((i >> 4) | (i << 2)) & 0x1F; ++ adapter->tx_ring[i]->reg_idx = reg_idx; ++ } ++ break; ++ case ixgbe_mac_82599EB: ++ case ixgbe_mac_X540: ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: ++ /* start at VMDq register offset for SR-IOV enabled setups */ ++ reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); ++ for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { ++ /* If we are greater than indices move to next pool */ ++ if ((reg_idx & ~vmdq->mask) >= tcs) ++ reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); ++ adapter->rx_ring[i]->reg_idx = reg_idx; ++ } + +- reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); +- for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { +- /* If we are greater than indices move to next pool */ +- if ((reg_idx & ~vmdq->mask) >= tcs) +- reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); +- adapter->tx_ring[i]->reg_idx = reg_idx; ++ reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); ++ for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { ++ /* If we are greater than indices move to next pool */ ++ if ((reg_idx & ~vmdq->mask) >= tcs) ++ reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); ++ adapter->tx_ring[i]->reg_idx = reg_idx; ++ } ++ ++ break; ++ default: ++ break; + } + +-#ifdef IXGBE_FCOE ++#if IS_ENABLED(CONFIG_FCOE) + /* nothing to do if FCoE is disabled */ + if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) + return true; +@@ -102,8 +125,8 @@ static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter) + reg_idx++; + } + } ++#endif /* CONFIG_FCOE */ + +-#endif /* IXGBE_FCOE */ + return true; + } + +@@ -111,8 +134,8 @@ static bool ixgbe_cache_ring_dcb_sriov(struct ixgbe_adapter *adapter) + static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, + unsigned int *tx, unsigned int *rx) + { +- struct net_device *dev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; ++ struct net_device *dev = adapter->netdev; + u8 num_tcs = netdev_get_num_tc(dev); + + *tx = 0; +@@ -126,6 +149,9 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++case ixgbe_mac_X550EM_a: + if (num_tcs > 4) { + /* + * TCs : TC0/1 TC2/3 TC4-7 +@@ -165,12 +191,11 @@ static void ixgbe_get_first_reg_idx(struct ixgbe_adapter *adapter, u8 tc, + **/ + static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) + { +- struct net_device *dev = adapter->netdev; +- unsigned int tx_idx, rx_idx; + int tc, offset, rss_i, i; ++ unsigned int tx_idx, rx_idx; ++ struct net_device *dev = adapter->netdev; + u8 num_tcs = netdev_get_num_tc(dev); + +- /* verify we have DCB queueing enabled before proceeding */ + if (num_tcs <= 1) + return false; + +@@ -189,20 +214,21 @@ static bool ixgbe_cache_ring_dcb(struct ixgbe_adapter *adapter) + return true; + } + +-#endif ++#endif /* HAVE_TX_MQ */ + /** +- * ixgbe_cache_ring_sriov - Descriptor ring to register mapping for sriov ++ * ixgbe_cache_ring_vmdq - Descriptor ring to register mapping for VMDq + * @adapter: board private structure to initialize + * +- * SR-IOV doesn't use any descriptor rings but changes the default if +- * no other mapping is used. ++ * Cache the descriptor ring offsets for VMDq to the assigned rings. It ++ * will also try to cache the proper offsets if RSS/FCoE/SRIOV are enabled along ++ * with VMDq. + * +- */ +-static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) ++ **/ ++static bool ixgbe_cache_ring_vmdq(struct ixgbe_adapter *adapter) + { +-#ifdef IXGBE_FCOE ++#if IS_ENABLED(CONFIG_FCOE) + struct ixgbe_ring_feature *fcoe = &adapter->ring_feature[RING_F_FCOE]; +-#endif /* IXGBE_FCOE */ ++#endif /* CONFIG_FCOE */ + struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + struct ixgbe_ring_feature *rss = &adapter->ring_feature[RING_F_RSS]; + int i; +@@ -215,26 +241,26 @@ static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) + /* start at VMDq register offset for SR-IOV enabled setups */ + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); + for (i = 0; i < adapter->num_rx_queues; i++, reg_idx++) { +-#ifdef IXGBE_FCOE ++#if IS_ENABLED(CONFIG_FCOE) + /* Allow first FCoE queue to be mapped as RSS */ + if (fcoe->offset && (i > fcoe->offset)) + break; +-#endif ++#endif /* CONFIG_FCOE */ + /* If we are greater than indices move to next pool */ + if ((reg_idx & ~vmdq->mask) >= rss->indices) + reg_idx = __ALIGN_MASK(reg_idx, ~vmdq->mask); + adapter->rx_ring[i]->reg_idx = reg_idx; + } + +-#ifdef IXGBE_FCOE ++#if IS_ENABLED(CONFIG_FCOE) + /* FCoE uses a linear block of queues so just assigning 1:1 */ + for (; i < adapter->num_rx_queues; i++, reg_idx++) + adapter->rx_ring[i]->reg_idx = reg_idx; ++#endif /* CONFIG_FCOE */ + +-#endif + reg_idx = vmdq->offset * __ALIGN_MASK(1, ~vmdq->mask); + for (i = 0; i < adapter->num_tx_queues; i++, reg_idx++) { +-#ifdef IXGBE_FCOE ++#if IS_ENABLED(CONFIG_FCOE) + /* Allow first FCoE queue to be mapped as RSS */ + if (fcoe->offset && (i > fcoe->offset)) + break; +@@ -245,12 +271,11 @@ static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) + adapter->tx_ring[i]->reg_idx = reg_idx; + } + +-#ifdef IXGBE_FCOE ++#if IS_ENABLED(CONFIG_FCOE) + /* FCoE uses a linear block of queues so just assigning 1:1 */ + for (; i < adapter->num_tx_queues; i++, reg_idx++) + adapter->tx_ring[i]->reg_idx = reg_idx; +- +-#endif ++#endif /* CONFIG_FCOE */ + + return true; + } +@@ -259,7 +284,7 @@ static bool ixgbe_cache_ring_sriov(struct ixgbe_adapter *adapter) + * ixgbe_cache_ring_rss - Descriptor ring to register mapping for RSS + * @adapter: board private structure to initialize + * +- * Cache the descriptor ring offsets for RSS to the assigned rings. ++ * Cache the descriptor ring offsets for RSS, ATR, FCoE, and SR-IOV. + * + **/ + static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) +@@ -268,6 +293,7 @@ static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) + + for (i = 0; i < adapter->num_rx_queues; i++) + adapter->rx_ring[i]->reg_idx = i; ++ + for (i = 0; i < adapter->num_tx_queues; i++) + adapter->tx_ring[i]->reg_idx = i; + +@@ -287,78 +313,94 @@ static bool ixgbe_cache_ring_rss(struct ixgbe_adapter *adapter) + **/ + static void ixgbe_cache_ring_register(struct ixgbe_adapter *adapter) + { +- /* start with default case */ +- adapter->rx_ring[0]->reg_idx = 0; +- adapter->tx_ring[0]->reg_idx = 0; +- +-#ifdef CONFIG_IXGBE_DCB +- if (ixgbe_cache_ring_dcb_sriov(adapter)) ++#ifdef HAVE_TX_MQ ++ if (ixgbe_cache_ring_dcb_vmdq(adapter)) + return; + + if (ixgbe_cache_ring_dcb(adapter)) + return; + + #endif +- if (ixgbe_cache_ring_sriov(adapter)) ++ if (ixgbe_cache_ring_vmdq(adapter)) + return; + + ixgbe_cache_ring_rss(adapter); + } + ++#define IXGBE_RSS_64Q_MASK 0x3F + #define IXGBE_RSS_16Q_MASK 0xF + #define IXGBE_RSS_8Q_MASK 0x7 + #define IXGBE_RSS_4Q_MASK 0x3 + #define IXGBE_RSS_2Q_MASK 0x1 + #define IXGBE_RSS_DISABLED_MASK 0x0 + +-#ifdef CONFIG_IXGBE_DCB ++#ifdef HAVE_TX_MQ + /** +- * ixgbe_set_dcb_sriov_queues: Allocate queues for SR-IOV devices w/ DCB ++ * ixgbe_set_dcb_vmdq_queues: Allocate queues for VMDq devices w/ DCB + * @adapter: board private structure to initialize + * +- * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues ++ * When VMDq (Virtual Machine Devices queue) is enabled, allocate queues + * and VM pools where appropriate. Also assign queues based on DCB + * priorities and map accordingly.. + * + **/ +-static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter) ++static bool ixgbe_set_dcb_vmdq_queues(struct ixgbe_adapter *adapter) + { + int i; + u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; + u16 vmdq_m = 0; +-#ifdef IXGBE_FCOE ++#if IS_ENABLED(CONFIG_FCOE) + u16 fcoe_i = 0; + #endif + u8 tcs = netdev_get_num_tc(adapter->netdev); + +- /* verify we have DCB queueing enabled before proceeding */ ++ /* verify we have DCB enabled before proceeding */ + if (tcs <= 1) + return false; + + /* verify we have VMDq enabled before proceeding */ +- if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) ++ if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) + return false; + +- /* Add starting offset to total pool count */ +- vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; ++ switch (adapter->hw.mac.type) { ++ case ixgbe_mac_82598EB: ++ /* 4 pools w/ 8TC per pool */ ++ vmdq_i = min_t(u16, vmdq_i, 4); ++ vmdq_m = 0x7; ++ break; ++ case ixgbe_mac_82599EB: ++ case ixgbe_mac_X540: ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: ++ /* Add starting offset to total pool count */ ++ vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; ++ ++ /* 16 pools w/ 8 TC per pool */ ++ if (tcs > 4) { ++ vmdq_i = min_t(u16, vmdq_i, 16); ++ vmdq_m = IXGBE_82599_VMDQ_8Q_MASK; ++ /* 32 pools w/ 4 TC per pool */ ++ } else { ++ vmdq_i = min_t(u16, vmdq_i, 32); ++ vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; ++ } + +- /* 16 pools w/ 8 TC per pool */ +- if (tcs > 4) { +- vmdq_i = min_t(u16, vmdq_i, 16); +- vmdq_m = IXGBE_82599_VMDQ_8Q_MASK; +- /* 32 pools w/ 4 TC per pool */ +- } else { +- vmdq_i = min_t(u16, vmdq_i, 32); +- vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; +- } ++#if IS_ENABLED(CONFIG_FCOE) ++ /* queues in the remaining pools are available for FCoE */ ++ fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i; ++#endif /* CONFIG_FCOE */ + +-#ifdef IXGBE_FCOE +- /* queues in the remaining pools are available for FCoE */ +- fcoe_i = (128 / __ALIGN_MASK(1, ~vmdq_m)) - vmdq_i; ++ /* remove the starting offset from the pool count */ ++ vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; + +-#endif +- /* remove the starting offset from the pool count */ +- vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; ++ break; ++ default: ++ /* unknown hardware, only support one pool w/ one queue */ ++ vmdq_i = 1; ++ tcs = 1; ++ break; ++ } + + /* save features for later use */ + adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; +@@ -371,16 +413,16 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter) + adapter->ring_feature[RING_F_RSS].indices = 1; + adapter->ring_feature[RING_F_RSS].mask = IXGBE_RSS_DISABLED_MASK; + +- /* disable ATR as it is not supported when VMDq is enabled */ +- adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; +- + adapter->num_rx_pools = vmdq_i; + adapter->num_rx_queues_per_pool = tcs; + + adapter->num_tx_queues = vmdq_i * tcs; + adapter->num_rx_queues = vmdq_i * tcs; + +-#ifdef IXGBE_FCOE ++ /* disable ATR as it is not supported when VMDq is enabled */ ++ adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; ++ ++#if IS_ENABLED(CONFIG_FCOE) + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { + struct ixgbe_ring_feature *fcoe; + +@@ -408,8 +450,8 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter) + fcoe->offset = 0; + } + } ++#endif /* CONFIG_FCOE */ + +-#endif /* IXGBE_FCOE */ + /* configure TC to queue mapping */ + for (i = 0; i < tcs; i++) + netdev_set_tc_queue(adapter->netdev, i, 1, i); +@@ -417,6 +459,17 @@ static bool ixgbe_set_dcb_sriov_queues(struct ixgbe_adapter *adapter) + return true; + } + ++/** ++ * ixgbe_set_dcb_queues: Allocate queues for a DCB-enabled device ++ * @adapter: board private structure to initialize ++ * ++ * When DCB (Data Center Bridging) is enabled, allocate queues for ++ * each traffic class. If multiqueue isn't available,then abort DCB ++ * initialization. ++ * ++ * This function handles all combinations of DCB, RSS, and FCoE. ++ * ++ **/ + static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) + { + struct net_device *dev = adapter->netdev; +@@ -427,12 +480,15 @@ static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) + /* Map queue offset and counts onto allocated tx queues */ + tcs = netdev_get_num_tc(dev); + +- /* verify we have DCB queueing enabled before proceeding */ + if (tcs <= 1) + return false; + + /* determine the upper limit for our current DCB mode */ ++#ifndef HAVE_NETDEV_SELECT_QUEUE ++ rss_i = adapter->indices; ++#else + rss_i = dev->num_tx_queues / tcs; ++#endif + if (adapter->hw.mac.type == ixgbe_mac_82598EB) { + /* 8 TC w/ 4 queues per TC */ + rss_i = min_t(u16, rss_i, 4); +@@ -449,16 +505,17 @@ static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) + + /* set RSS mask and indices */ + f = &adapter->ring_feature[RING_F_RSS]; +- rss_i = min_t(int, rss_i, f->limit); ++ rss_i = min_t(u16, rss_i, f->limit); + f->indices = rss_i; + f->mask = rss_m; + +- /* disable ATR as it is not supported when multiple TCs are enabled */ ++ /* disable ATR as it is not supported when DCB is enabled */ + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + +-#ifdef IXGBE_FCOE +- /* FCoE enabled queues require special configuration indexed +- * by feature specific indices and offset. Here we map FCoE ++#if IS_ENABLED(CONFIG_FCOE) ++ /* ++ * FCoE enabled queues require special configuration indexed ++ * by feature specific indices and mask. Here we map FCoE + * indices onto the DCB queue pairs allowing FCoE to own + * configuration later. + */ +@@ -469,8 +526,8 @@ static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) + f->indices = min_t(u16, rss_i, f->limit); + f->offset = rss_i * tc; + } ++#endif /* CONFIG_FCOE */ + +-#endif /* IXGBE_FCOE */ + for (i = 0; i < tcs; i++) + netdev_set_tc_queue(dev, i, rss_i, rss_i * i); + +@@ -482,54 +539,81 @@ static bool ixgbe_set_dcb_queues(struct ixgbe_adapter *adapter) + + #endif + /** +- * ixgbe_set_sriov_queues - Allocate queues for SR-IOV devices ++ * ixgbe_set_vmdq_queues: Allocate queues for VMDq devices + * @adapter: board private structure to initialize + * +- * When SR-IOV (Single Root IO Virtualiztion) is enabled, allocate queues ++ * When VMDq (Virtual Machine Devices queue) is enabled, allocate queues + * and VM pools where appropriate. If RSS is available, then also try and + * enable RSS and map accordingly. + * + **/ +-static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) ++static bool ixgbe_set_vmdq_queues(struct ixgbe_adapter *adapter) + { + u16 vmdq_i = adapter->ring_feature[RING_F_VMDQ].limit; + u16 vmdq_m = 0; + u16 rss_i = adapter->ring_feature[RING_F_RSS].limit; + u16 rss_m = IXGBE_RSS_DISABLED_MASK; +-#ifdef IXGBE_FCOE ++#if IS_ENABLED(CONFIG_FCOE) + u16 fcoe_i = 0; + #endif +- bool pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1); + +- /* only proceed if SR-IOV is enabled */ +- if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) ++ /* only proceed if VMDq is enabled */ ++ if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) + return false; + +- /* Add starting offset to total pool count */ +- vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; +- +- /* double check we are limited to maximum pools */ +- vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i); ++ switch (adapter->hw.mac.type) { ++ case ixgbe_mac_82598EB: ++ vmdq_i = min_t(u16, vmdq_i, 16); ++ /* 16 pool mode with 1 queue per pool */ ++ if ((vmdq_i > 4) || (rss_i == 1)) { ++ vmdq_m = 0x0F; ++ rss_i = 1; ++ /* 4 pool mode with 8 queue per pool */ ++ } else { ++ vmdq_m = 0x18; ++ rss_m = IXGBE_RSS_8Q_MASK; ++ rss_i = min_t(u16, rss_i, 8); ++ } ++ break; ++ case ixgbe_mac_82599EB: ++ case ixgbe_mac_X540: ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: ++ /* Add starting offset to total pool count */ ++ vmdq_i += adapter->ring_feature[RING_F_VMDQ].offset; ++ ++ /* double check we are limited to maximum pools */ ++ vmdq_i = min_t(u16, IXGBE_MAX_VMDQ_INDICES, vmdq_i); ++ ++ /* 64 pool mode with 2 queues per pool */ ++ if (vmdq_i > 32) { ++ vmdq_m = IXGBE_82599_VMDQ_2Q_MASK; ++ rss_m = IXGBE_RSS_2Q_MASK; ++ rss_i = min_t(u16, rss_i, 2); ++ /* 32 pool mode with up to 4 queues per pool */ ++ } else { ++ vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; ++ rss_m = IXGBE_RSS_4Q_MASK; ++ /* We can support 4, 2, or 1 queues */ ++ rss_i = (rss_i > 3) ? 4 : (rss_i > 1) ? 2 : 1; ++ } + +- /* 64 pool mode with 2 queues per pool */ +- if ((vmdq_i > 32) || (rss_i < 4) || (vmdq_i > 16 && pools)) { +- vmdq_m = IXGBE_82599_VMDQ_2Q_MASK; +- rss_m = IXGBE_RSS_2Q_MASK; +- rss_i = min_t(u16, rss_i, 2); +- /* 32 pool mode with 4 queues per pool */ +- } else { +- vmdq_m = IXGBE_82599_VMDQ_4Q_MASK; +- rss_m = IXGBE_RSS_4Q_MASK; +- rss_i = 4; +- } ++#if IS_ENABLED(CONFIG_FCOE) ++ /* queues in the remaining pools are available for FCoE */ ++ fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m)); ++#endif + +-#ifdef IXGBE_FCOE +- /* queues in the remaining pools are available for FCoE */ +- fcoe_i = 128 - (vmdq_i * __ALIGN_MASK(1, ~vmdq_m)); ++ /* remove the starting offset from the pool count */ ++ vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; + +-#endif +- /* remove the starting offset from the pool count */ +- vmdq_i -= adapter->ring_feature[RING_F_VMDQ].offset; ++ break; ++ default: ++ /* unknown hardware, support one pool w/ one queue */ ++ vmdq_i = 1; ++ rss_i = 1; ++ break; ++ } + + /* save features for later use */ + adapter->ring_feature[RING_F_VMDQ].indices = vmdq_i; +@@ -543,12 +627,16 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) + adapter->num_rx_queues_per_pool = rss_i; + + adapter->num_rx_queues = vmdq_i * rss_i; ++#ifdef HAVE_TX_MQ + adapter->num_tx_queues = vmdq_i * rss_i; ++#else ++ adapter->num_tx_queues = vmdq_i; ++#endif /* HAVE_TX_MQ */ + + /* disable ATR as it is not supported when VMDq is enabled */ + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; + +-#ifdef IXGBE_FCOE ++#if IS_ENABLED(CONFIG_FCOE) + /* + * FCoE can use rings from adjacent buffers to allow RSS + * like behavior. To account for this we need to add the +@@ -577,7 +665,6 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) + /* attempt to reserve some queues for just FCoE */ + fcoe->indices = min_t(u16, fcoe_i, fcoe->limit); + fcoe->offset = fcoe_i - fcoe->indices; +- + fcoe_i -= rss_i; + } + +@@ -585,13 +672,13 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) + adapter->num_tx_queues += fcoe_i; + adapter->num_rx_queues += fcoe_i; + } ++#endif /* CONFIG_FCOE */ + +-#endif + return true; + } + + /** +- * ixgbe_set_rss_queues - Allocate queues for RSS ++ * ixgbe_set_rss_queues: Allocate queues for RSS + * @adapter: board private structure to initialize + * + * This is our "base" multiqueue mode. RSS (Receive Side Scaling) will try +@@ -600,6 +687,7 @@ static bool ixgbe_set_sriov_queues(struct ixgbe_adapter *adapter) + **/ + static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) + { ++ struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_ring_feature *f; + u16 rss_i; + +@@ -608,7 +696,10 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) + rss_i = f->limit; + + f->indices = rss_i; +- f->mask = IXGBE_RSS_16Q_MASK; ++ if (hw->mac.type < ixgbe_mac_X550) ++ f->mask = IXGBE_RSS_16Q_MASK; ++ else ++ f->mask = IXGBE_RSS_64Q_MASK; + + /* disable ATR by default, it will be configured below */ + adapter->flags &= ~IXGBE_FLAG_FDIR_HASH_CAPABLE; +@@ -627,7 +718,7 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) + adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; + } + +-#ifdef IXGBE_FCOE ++#if IS_ENABLED(CONFIG_FCOE) + /* + * FCoE can exist on the same rings as standard network traffic + * however it is preferred to avoid that if possible. In order +@@ -655,16 +746,18 @@ static bool ixgbe_set_rss_queues(struct ixgbe_adapter *adapter) + f->offset = fcoe_i - f->indices; + rss_i = max_t(u16, fcoe_i, rss_i); + } ++#endif /* CONFIG_FCOE */ + +-#endif /* IXGBE_FCOE */ + adapter->num_rx_queues = rss_i; ++#ifdef HAVE_TX_MQ + adapter->num_tx_queues = rss_i; ++#endif + + return true; + } + +-/** +- * ixgbe_set_num_queues - Allocate queues for device, feature dependent ++/* ++ * ixgbe_set_num_queues: Allocate queues for device, feature dependent + * @adapter: board private structure to initialize + * + * This is the top level queue allocation routine. The order here is very +@@ -682,62 +775,102 @@ static void ixgbe_set_num_queues(struct ixgbe_adapter *adapter) + adapter->num_rx_pools = adapter->num_rx_queues; + adapter->num_rx_queues_per_pool = 1; + +-#ifdef CONFIG_IXGBE_DCB +- if (ixgbe_set_dcb_sriov_queues(adapter)) ++#ifdef HAVE_TX_MQ ++ if (ixgbe_set_dcb_vmdq_queues(adapter)) + return; + + if (ixgbe_set_dcb_queues(adapter)) + return; + + #endif +- if (ixgbe_set_sriov_queues(adapter)) ++ if (ixgbe_set_vmdq_queues(adapter)) + return; + + ixgbe_set_rss_queues(adapter); + } + +-static void ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter, +- int vectors) ++/** ++ * ixgbe_acquire_msix_vectors - acquire MSI-X vectors ++ * @adapter: board private structure ++ * ++ * Attempts to acquire a suitable range of MSI-X vector interrupts. Will ++ * return a negative error code if unable to acquire MSI-X vectors for any ++ * reason. ++ */ ++static int ixgbe_acquire_msix_vectors(struct ixgbe_adapter *adapter) + { +- int vector_threshold; ++ struct ixgbe_hw *hw = &adapter->hw; ++ int i, vectors, vector_threshold; + +- /* We'll want at least 2 (vector_threshold): +- * 1) TxQ[0] + RxQ[0] handler +- * 2) Other (Link Status Change, etc.) ++ if (!(adapter->flags & IXGBE_FLAG_MSIX_CAPABLE)) ++ return -EOPNOTSUPP; ++ ++ /* We start by asking for one vector per queue pair */ ++ vectors = max(adapter->num_rx_queues, adapter->num_tx_queues); ++ ++ /* It is easy to be greedy for MSI-X vectors. However, it really ++ * doesn't do much good if we have a lot more vectors than CPUs. We'll ++ * be somewhat conservative and only ask for (roughly) the same number ++ * of vectors as there are CPUs. + */ +- vector_threshold = MIN_MSIX_COUNT; ++ vectors = min_t(int, vectors, num_online_cpus()); + +- /* +- * The more we get, the more we will assign to Tx/Rx Cleanup +- * for the separate queues...where Rx Cleanup >= Tx Cleanup. +- * Right now, we simply care about how many we'll get; we'll +- * set them up later while requesting irq's. ++ /* Some vectors are necessary for non-queue interrupts */ ++ vectors += NON_Q_VECTORS; ++ ++ /* Hardware can only support a maximum of hw.mac->max_msix_vectors. ++ * With features such as RSS and VMDq, we can easily surpass the ++ * number of Rx and Tx descriptor queues supported by our device. ++ * Thus, we cap the maximum in the rare cases where the CPU count also ++ * exceeds our vector limit + */ ++ vectors = min_t(int, vectors, hw->mac.max_msix_vectors); ++ ++ /* We want a minimum of two MSI-X vectors for (1) a TxQ[0] + RxQ[0] ++ * handler, and (2) an Other (Link Status Change, etc.) handler. ++ */ ++ vector_threshold = MIN_MSIX_COUNT; ++ ++ adapter->msix_entries = kcalloc(vectors, ++ sizeof(struct msix_entry), ++ GFP_KERNEL); ++ if (!adapter->msix_entries) ++ return -ENOMEM; ++ ++ for (i = 0; i < vectors; i++) ++ adapter->msix_entries[i].entry = i; ++ + vectors = pci_enable_msix_range(adapter->pdev, adapter->msix_entries, + vector_threshold, vectors); + + if (vectors < 0) { +- /* Can't allocate enough MSI-X interrupts? Oh well. +- * This just means we'll go with either a single MSI +- * vector or fall back to legacy interrupts. +- */ +- netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, +- "Unable to allocate MSI-X interrupts\n"); ++ /* A negative count of allocated vectors indicates an error in ++ * acquiring within the specified range of MSI-X vectors */ ++ e_dev_warn("Failed to allocate MSI-X interrupts. Err: %d\n", ++ vectors); ++ + adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; + kfree(adapter->msix_entries); + adapter->msix_entries = NULL; +- } else { +- adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; /* Woot! */ +- /* +- * Adjust for only the vectors we'll use, which is minimum +- * of max_msix_q_vectors + NON_Q_VECTORS, or the number of +- * vectors we were allocated. +- */ +- vectors -= NON_Q_VECTORS; +- adapter->num_q_vectors = min(vectors, adapter->max_q_vectors); ++ ++ return vectors; + } ++ ++ /* we successfully allocated some number of vectors within our ++ * requested range. ++ */ ++ adapter->flags |= IXGBE_FLAG_MSIX_ENABLED; ++ ++ /* Adjust for only the vectors we'll use, which is minimum ++ * of max_q_vectors, or the number of vectors we were allocated. ++ */ ++ vectors -= NON_Q_VECTORS; ++ adapter->num_q_vectors = min_t(int, vectors, adapter->max_q_vectors); ++ ++ return 0; + } + ++ + static void ixgbe_add_ring(struct ixgbe_ring *ring, + struct ixgbe_ring_container *head) + { +@@ -759,23 +892,27 @@ static void ixgbe_add_ring(struct ixgbe_ring *ring, + * We allocate one q_vector. If allocation fails we return -ENOMEM. + **/ + static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, +- int v_count, int v_idx, +- int txr_count, int txr_idx, +- int rxr_count, int rxr_idx) ++ unsigned int v_count, unsigned int v_idx, ++ unsigned int txr_count, unsigned int txr_idx, ++ unsigned int rxr_count, unsigned int rxr_idx) + { + struct ixgbe_q_vector *q_vector; + struct ixgbe_ring *ring; +- int node = NUMA_NO_NODE; ++ int node = -1; ++#ifdef HAVE_IRQ_AFFINITY_HINT + int cpu = -1; +- int ring_count, size; + u8 tcs = netdev_get_num_tc(adapter->netdev); ++#endif ++ int ring_count, size; + ++ /* note this will allocate space for the ring structure as well! */ + ring_count = txr_count + rxr_count; + size = sizeof(struct ixgbe_q_vector) + + (sizeof(struct ixgbe_ring) * ring_count); + ++#ifdef HAVE_IRQ_AFFINITY_HINT + /* customize cpu for Flow Director mapping */ +- if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { ++ if ((tcs <= 1) && !(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) { + u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; + if (rss_i > 1 && adapter->atr_sample_rate) { + if (cpu_online(v_idx)) { +@@ -785,6 +922,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, + } + } + ++#endif + /* allocate q_vector and rings */ + q_vector = kzalloc_node(size, GFP_KERNEL, node); + if (!q_vector) +@@ -793,20 +931,29 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, + return -ENOMEM; + + /* setup affinity mask and node */ ++#ifdef HAVE_IRQ_AFFINITY_HINT + if (cpu != -1) + cpumask_set_cpu(cpu, &q_vector->affinity_mask); ++#endif + q_vector->numa_node = node; + +-#ifdef CONFIG_IXGBE_DCA + /* initialize CPU for DCA */ + q_vector->cpu = -1; + +-#endif + /* initialize NAPI */ + netif_napi_add(adapter->netdev, &q_vector->napi, + ixgbe_poll, 64); ++#ifndef HAVE_NETIF_NAPI_ADD_CALLS_NAPI_HASH_ADD ++#ifdef HAVE_NDO_BUSY_POLL + napi_hash_add(&q_vector->napi); ++#endif ++#endif + ++#ifdef HAVE_NDO_BUSY_POLL ++ /* initialize busy poll */ ++ atomic_set(&q_vector->state, IXGBE_QV_STATE_DISABLE); ++ ++#endif + /* tie q_vector and adapter together */ + adapter->q_vector[v_idx] = q_vector; + q_vector->adapter = adapter; +@@ -814,6 +961,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, + + /* initialize work limits */ + q_vector->tx.work_limit = adapter->tx_work_limit; ++ q_vector->rx.work_limit = adapter->rx_work_limit; + + /* initialize pointer to rings */ + ring = q_vector->ring; +@@ -822,7 +970,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, + if (txr_count && !rxr_count) { + /* tx only vector */ + if (adapter->tx_itr_setting == 1) +- q_vector->itr = IXGBE_10K_ITR; ++ q_vector->itr = IXGBE_12K_ITR; + else + q_vector->itr = adapter->tx_itr_setting; + } else { +@@ -835,7 +983,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, + + while (txr_count) { + /* assign generic ring traits */ +- ring->dev = &adapter->pdev->dev; ++ ring->dev = pci_dev_to_dev(adapter->pdev); + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ +@@ -846,11 +994,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, + + /* apply Tx specific ring traits */ + ring->count = adapter->tx_ring_count; +- if (adapter->num_rx_pools > 1) +- ring->queue_index = +- txr_idx % adapter->num_rx_queues_per_pool; +- else +- ring->queue_index = txr_idx; ++ ring->queue_index = txr_idx; + + /* assign ring to adapter */ + adapter->tx_ring[txr_idx] = ring; +@@ -865,7 +1009,7 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, + + while (rxr_count) { + /* assign generic ring traits */ +- ring->dev = &adapter->pdev->dev; ++ ring->dev = pci_dev_to_dev(adapter->pdev); + ring->netdev = adapter->netdev; + + /* configure backlink on ring */ +@@ -881,23 +1025,21 @@ static int ixgbe_alloc_q_vector(struct ixgbe_adapter *adapter, + if (adapter->hw.mac.type == ixgbe_mac_82599EB) + set_bit(__IXGBE_RX_CSUM_UDP_ZERO_ERR, &ring->state); + +-#ifdef IXGBE_FCOE +- if (adapter->netdev->features & NETIF_F_FCOE_MTU) { ++#if IS_ENABLED(CONFIG_FCOE) ++ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) { + struct ixgbe_ring_feature *f; + f = &adapter->ring_feature[RING_F_FCOE]; ++ + if ((rxr_idx >= f->offset) && +- (rxr_idx < f->offset + f->indices)) ++ (rxr_idx < f->offset + f->indices)) { + set_bit(__IXGBE_RX_FCOE, &ring->state); ++ } + } ++#endif /* CONFIG_FCOE */ + +-#endif /* IXGBE_FCOE */ + /* apply Rx specific ring traits */ + ring->count = adapter->rx_ring_count; +- if (adapter->num_rx_pools > 1) +- ring->queue_index = +- rxr_idx % adapter->num_rx_queues_per_pool; +- else +- ring->queue_index = rxr_idx; ++ ring->queue_index = rxr_idx; + + /* assign ring to adapter */ + adapter->rx_ring[rxr_idx] = ring; +@@ -934,13 +1076,10 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx) + adapter->rx_ring[ring->queue_index] = NULL; + + adapter->q_vector[v_idx] = NULL; ++#ifdef HAVE_NDO_BUSY_POLL + napi_hash_del(&q_vector->napi); ++#endif + netif_napi_del(&q_vector->napi); +- +- /* +- * ixgbe_get_stats64() might access the rings on this vector, +- * we must wait a grace period before freeing it. +- */ + kfree_rcu(q_vector, rcu); + } + +@@ -953,21 +1092,16 @@ static void ixgbe_free_q_vector(struct ixgbe_adapter *adapter, int v_idx) + **/ + static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) + { +- int q_vectors = adapter->num_q_vectors; +- int rxr_remaining = adapter->num_rx_queues; +- int txr_remaining = adapter->num_tx_queues; +- int rxr_idx = 0, txr_idx = 0, v_idx = 0; ++ unsigned int q_vectors = adapter->num_q_vectors; ++ unsigned int rxr_remaining = adapter->num_rx_queues; ++ unsigned int txr_remaining = adapter->num_tx_queues; ++ unsigned int rxr_idx = 0, txr_idx = 0, v_idx = 0; + int err; + +- /* only one q_vector if MSI-X is disabled. */ +- if (!(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) +- q_vectors = 1; +- + if (q_vectors >= (rxr_remaining + txr_remaining)) { + for (; rxr_remaining; v_idx++) { + err = ixgbe_alloc_q_vector(adapter, q_vectors, v_idx, + 0, 0, 1, rxr_idx); +- + if (err) + goto err_out; + +@@ -994,7 +1128,7 @@ static int ixgbe_alloc_q_vectors(struct ixgbe_adapter *adapter) + txr_idx++; + } + +- return 0; ++ return IXGBE_SUCCESS; + + err_out: + adapter->num_tx_queues = 0; +@@ -1027,7 +1161,7 @@ static void ixgbe_free_q_vectors(struct ixgbe_adapter *adapter) + ixgbe_free_q_vector(adapter, v_idx); + } + +-static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) ++void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) + { + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { + adapter->flags &= ~IXGBE_FLAG_MSIX_ENABLED; +@@ -1047,48 +1181,22 @@ static void ixgbe_reset_interrupt_capability(struct ixgbe_adapter *adapter) + * Attempt to configure the interrupts using the best available + * capabilities of the hardware and the kernel. + **/ +-static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) ++void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) + { +- struct ixgbe_hw *hw = &adapter->hw; +- int vector, v_budget, err; ++ int err; + +- /* +- * It's easy to be greedy for MSI-X vectors, but it really +- * doesn't do us much good if we have a lot more vectors +- * than CPU's. So let's be conservative and only ask for +- * (roughly) the same number of vectors as there are CPU's. +- * The default is to use pairs of vectors. +- */ +- v_budget = max(adapter->num_rx_queues, adapter->num_tx_queues); +- v_budget = min_t(int, v_budget, num_online_cpus()); +- v_budget += NON_Q_VECTORS; ++ /* We will try to get MSI-X interrupts first */ ++ if (!ixgbe_acquire_msix_vectors(adapter)) ++ return; + +- /* +- * At the same time, hardware can only support a maximum of +- * hw.mac->max_msix_vectors vectors. With features +- * such as RSS and VMDq, we can easily surpass the number of Rx and Tx +- * descriptor queues supported by our device. Thus, we cap it off in +- * those rare cases where the cpu count also exceeds our vector limit. ++ /* At this point, we do not have MSI-X capabilities. We need to ++ * reconfigure or disable various features which require MSI-X ++ * capability. + */ +- v_budget = min_t(int, v_budget, hw->mac.max_msix_vectors); + +- /* A failure in MSI-X entry allocation isn't fatal, but it does +- * mean we disable MSI-X capabilities of the adapter. */ +- adapter->msix_entries = kcalloc(v_budget, +- sizeof(struct msix_entry), GFP_KERNEL); +- if (adapter->msix_entries) { +- for (vector = 0; vector < v_budget; vector++) +- adapter->msix_entries[vector].entry = vector; +- +- ixgbe_acquire_msix_vectors(adapter, v_budget); +- +- if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) +- return; +- } +- +- /* disable DCB if number of TCs exceeds 1 */ ++ /* Disable DCB unless we only have a single traffic class */ + if (netdev_get_num_tc(adapter->netdev) > 1) { +- e_err(probe, "num TCs exceeds number of queues - disabling DCB\n"); ++ e_dev_warn("Number of DCB TCs exceeds number of available queues. Disabling DCB support.\n"); + netdev_reset_tc(adapter->netdev); + + if (adapter->hw.mac.type == ixgbe_mac_82598EB) +@@ -1098,26 +1206,39 @@ static void ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter) + adapter->temp_dcb_cfg.pfc_mode_enable = false; + adapter->dcb_cfg.pfc_mode_enable = false; + } ++ + adapter->dcb_cfg.num_tcs.pg_tcs = 1; + adapter->dcb_cfg.num_tcs.pfc_tcs = 1; + +- /* disable SR-IOV */ ++ /* Disable VMDq support */ ++ e_dev_warn("Disabling VMQd support\n"); ++ adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; ++ ++#ifdef CONFIG_PCI_IOV ++ /* Disable SR-IOV support */ ++ e_dev_warn("Disabling SR-IOV support\n"); + ixgbe_disable_sriov(adapter); ++#endif /* CONFIG_PCI_IOV */ + +- /* disable RSS */ ++ /* Disable RSS */ ++ e_dev_warn("Disabling RSS support\n"); + adapter->ring_feature[RING_F_RSS].limit = 1; + ++ /* recalculate number of queues now that many features have been ++ * changed or disabled. ++ */ + ixgbe_set_num_queues(adapter); + adapter->num_q_vectors = 1; + +- err = pci_enable_msi(adapter->pdev); +- if (err) { +- netif_printk(adapter, hw, KERN_DEBUG, adapter->netdev, +- "Unable to allocate MSI interrupt, falling back to legacy. Error: %d\n", +- err); ++ if (!(adapter->flags & IXGBE_FLAG_MSI_CAPABLE)) + return; +- } +- adapter->flags |= IXGBE_FLAG_MSI_ENABLED; ++ ++ err = pci_enable_msi(adapter->pdev); ++ if (err) ++ e_dev_warn("Failed to allocate MSI interrupt, falling back to legacy. Error: %d\n", ++ err); ++ else ++ adapter->flags |= IXGBE_FLAG_MSI_ENABLED; + } + + /** +@@ -1140,25 +1261,19 @@ int ixgbe_init_interrupt_scheme(struct ixgbe_adapter *adapter) + /* Set interrupt mode */ + ixgbe_set_interrupt_capability(adapter); + ++ /* Allocate memory for queues */ + err = ixgbe_alloc_q_vectors(adapter); + if (err) { +- e_dev_err("Unable to allocate memory for queue vectors\n"); +- goto err_alloc_q_vectors; ++ e_err(probe, "Unable to allocate memory for queue vectors\n"); ++ ixgbe_reset_interrupt_capability(adapter); ++ return err; + } + + ixgbe_cache_ring_register(adapter); + +- e_dev_info("Multiqueue %s: Rx Queue count = %u, Tx Queue count = %u\n", +- (adapter->num_rx_queues > 1) ? "Enabled" : "Disabled", +- adapter->num_rx_queues, adapter->num_tx_queues); +- + set_bit(__IXGBE_DOWN, &adapter->state); + +- return 0; +- +-err_alloc_q_vectors: +- ixgbe_reset_interrupt_capability(adapter); +- return err; ++ return IXGBE_SUCCESS; + } + + /** +@@ -1170,9 +1285,6 @@ err_alloc_q_vectors: + **/ + void ixgbe_clear_interrupt_scheme(struct ixgbe_adapter *adapter) + { +- adapter->num_tx_queues = 0; +- adapter->num_rx_queues = 0; +- + ixgbe_free_q_vectors(adapter); + ixgbe_reset_interrupt_capability(adapter); + } +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +index a125d3c..c7a1499 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +@@ -1,7 +1,7 @@ + /******************************************************************************* + +- Intel 10 Gigabit PCI Express Linux driver +- Copyright(c) 1999 - 2014 Intel Corporation. ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, +@@ -12,10 +12,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +@@ -26,55 +22,70 @@ + + *******************************************************************************/ + ++/****************************************************************************** ++ Copyright (c)2006 - 2007 Myricom, Inc. for some LRO specific code ++******************************************************************************/ + #include + #include + #include + #include + #include ++#include + #include + #include +-#include + #include + #include +-#include + #include + #include +-#include ++#ifdef NETIF_F_TSO + #include ++#ifdef NETIF_F_TSO6 ++#include + #include ++#endif /* NETIF_F_TSO6 */ ++#endif /* NETIF_F_TSO */ ++#ifdef SIOCETHTOOL + #include +-#include +-#include +-#include +-#include +-#include +-#include ++#endif + ++#include + #include "ixgbe.h" +-#include "ixgbe_common.h" ++#ifdef HAVE_UDP_ENC_RX_OFFLOAD ++#include ++#endif /* HAVE_UDP_ENC_RX_OFFLOAD */ ++#ifdef HAVE_VXLAN_RX_OFFLOAD ++#include ++#endif /* HAVE_VXLAN_RX_OFFLOAD */ ++ + #include "ixgbe_dcb_82599.h" + #include "ixgbe_sriov.h" + ++#define DRV_HW_PERF ++ ++#define FPGA ++ ++#define DRIVERIOV ++ ++#define BYPASS_TAG ++ ++#define RELEASE_TAG ++ ++#define DRV_VERSION "5.2.4" \ ++ DRIVERIOV DRV_HW_PERF FPGA \ ++ BYPASS_TAG RELEASE_TAG ++#define DRV_SUMMARY "Intel(R) 10GbE PCI Express Linux Network Driver" ++const char ixgbe_driver_version[] = DRV_VERSION; ++#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME + char ixgbe_driver_name[] = "ixgbe"; +-static const char ixgbe_driver_string[] = +- "Intel(R) 10 Gigabit PCI Express Network Driver"; +-#ifdef IXGBE_FCOE +-char ixgbe_default_device_descr[] = +- "Intel(R) 10 Gigabit Network Connection"; + #else +-static char ixgbe_default_device_descr[] = +- "Intel(R) 10 Gigabit Network Connection"; ++const char ixgbe_driver_name[] = "ixgbe"; + #endif +-#define DRV_VERSION "3.19.1-k" +-const char ixgbe_driver_version[] = DRV_VERSION; +-static const char ixgbe_copyright[] = +- "Copyright (c) 1999-2014 Intel Corporation."; +- +-static const struct ixgbe_info *ixgbe_info_tbl[] = { +- [board_82598] = &ixgbe_82598_info, +- [board_82599] = &ixgbe_82599_info, +- [board_X540] = &ixgbe_X540_info, +-}; ++static const char ixgbe_driver_string[] = DRV_SUMMARY; ++static const char ixgbe_copyright[] = "Copyright(c) 1999 - 2017 Intel Corporation."; ++static const char ixgbe_overheat_msg[] = ++ "Network adapter has been stopped because it has over heated. " ++ "Restart the computer. If the problem persists, " ++ "power off the system and replace the adapter"; + + /* ixgbe_pci_tbl - PCI Device ID Table + * +@@ -84,103 +95,120 @@ static const struct ixgbe_info *ixgbe_info_tbl[] = { + * { Vendor ID, Device ID, SubVendor ID, SubDevice ID, + * Class, Class Mask, private data (not used) } + */ +-static DEFINE_PCI_DEVICE_TABLE(ixgbe_pci_tbl) = { +- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), board_82598 }, +- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), board_82598 }, +- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), board_82598 }, +- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), board_82598 }, +- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), board_82598 }, +- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), board_82598 }, +- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), board_82598 }, +- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), board_82598 }, +- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), board_82598 }, +- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), board_82598 }, +- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), board_82598 }, +- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), board_82598 }, +- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), board_82599 }, +- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), board_82599 }, +- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), board_82599 }, +- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), board_82599 }, +- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), board_82599 }, +- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), board_82599 }, +- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), board_82599 }, +- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), board_82599 }, +- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), board_82599 }, +- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), board_82599 }, +- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), board_82599 }, +- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), board_X540 }, +- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), board_82599 }, +- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), board_82599 }, +- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP), board_82599 }, +- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), board_82599 }, +- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), board_82599 }, +- {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), board_X540 }, ++static const struct pci_device_id ixgbe_pci_tbl[] = { ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_DUAL_PORT), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AF_SINGLE_PORT), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598AT2), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_CX4), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_CX4_DUAL_PORT), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_DA_DUAL_PORT), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_XF_LR), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598EB_SFP_LOM), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82598_BX), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_XAUI_LOM), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KR), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_BACKPLANE_FCOE), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_FCOE), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_T3_LOM), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_COMBO_BACKPLANE), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF2), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_LS), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599EN_SFP), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_SF_QP), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_QSFP_SF_QP), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X540T1), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550T1), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KX4), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_XFI), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_KR), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_SFP), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_10G_T), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_X_1G_T), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_KR_L), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP_N), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SGMII_L), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_10G_T), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_QSFP), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_QSFP_N), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_SFP), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T), 0}, ++ {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_X550EM_A_1G_T_L), 0}, + /* required last entry */ +- {0, } ++ { .device = 0 } + }; + MODULE_DEVICE_TABLE(pci, ixgbe_pci_tbl); + +-#ifdef CONFIG_IXGBE_DCA ++#if IS_ENABLED(CONFIG_DCA) + static int ixgbe_notify_dca(struct notifier_block *, unsigned long event, + void *p); + static struct notifier_block dca_notifier = { +- .notifier_call = ixgbe_notify_dca, +- .next = NULL, +- .priority = 0 ++ .notifier_call = ixgbe_notify_dca, ++ .next = NULL, ++ .priority = 0 + }; +-#endif +- +-#ifdef CONFIG_PCI_IOV +-static unsigned int max_vfs; +-module_param(max_vfs, uint, 0); +-MODULE_PARM_DESC(max_vfs, +- "Maximum number of virtual functions to allocate per physical function - default is zero and maximum value is 63. (Deprecated)"); +-#endif /* CONFIG_PCI_IOV */ +- +-static unsigned int allow_unsupported_sfp; +-module_param(allow_unsupported_sfp, uint, 0); +-MODULE_PARM_DESC(allow_unsupported_sfp, +- "Allow unsupported and untested SFP+ modules on 82599-based adapters"); +- +-#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK) +-static int debug = -1; +-module_param(debug, int, 0); +-MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)"); ++#endif /* CONFIG_DCA */ ++static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *); + + MODULE_AUTHOR("Intel Corporation, "); +-MODULE_DESCRIPTION("Intel(R) 10 Gigabit PCI Express Network Driver"); ++MODULE_DESCRIPTION(DRV_SUMMARY); + MODULE_LICENSE("GPL"); + MODULE_VERSION(DRV_VERSION); + ++#define DEFAULT_DEBUG_LEVEL_SHIFT 3 ++ ++static struct workqueue_struct *ixgbe_wq; ++ ++static bool ixgbe_is_sfp(struct ixgbe_hw *hw); + static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev); + +-static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_adapter *adapter, ++static int ixgbe_read_pci_cfg_word_parent(struct ixgbe_hw *hw, + u32 reg, u16 *value) + { ++ struct ixgbe_adapter *adapter = hw->back; + struct pci_dev *parent_dev; + struct pci_bus *parent_bus; ++ int pos; + + parent_bus = adapter->pdev->bus->parent; + if (!parent_bus) +- return -1; ++ return IXGBE_ERR_FEATURE_NOT_SUPPORTED; + + parent_dev = parent_bus->self; + if (!parent_dev) +- return -1; ++ return IXGBE_ERR_FEATURE_NOT_SUPPORTED; + +- if (!pci_is_pcie(parent_dev)) +- return -1; ++ pos = pci_find_capability(parent_dev, PCI_CAP_ID_EXP); ++ if (!pos) ++ return IXGBE_ERR_FEATURE_NOT_SUPPORTED; + +- pcie_capability_read_word(parent_dev, reg, value); ++ pci_read_config_word(parent_dev, pos + reg, value); + if (*value == IXGBE_FAILED_READ_CFG_WORD && +- ixgbe_check_cfg_remove(&adapter->hw, parent_dev)) +- return -1; +- return 0; ++ ixgbe_check_cfg_remove(hw, parent_dev)) ++ return IXGBE_ERR_FEATURE_NOT_SUPPORTED; ++ return IXGBE_SUCCESS; + } + +-static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter) ++/** ++ * ixgbe_get_parent_bus_info - Set PCI bus info beyond switch ++ * @hw: pointer to hardware structure ++ * ++ * Sets the PCI bus info (speed, width, type) within the ixgbe_hw structure ++ * when the device is behind a switch. ++ **/ ++static s32 ixgbe_get_parent_bus_info(struct ixgbe_hw *hw) + { +- struct ixgbe_hw *hw = &adapter->hw; + u16 link_status = 0; + int err; + +@@ -189,32 +217,30 @@ static s32 ixgbe_get_parent_bus_info(struct ixgbe_adapter *adapter) + /* Get the negotiated link width and speed from PCI config space of the + * parent, as this device is behind a switch + */ +- err = ixgbe_read_pci_cfg_word_parent(adapter, 18, &link_status); ++ err = ixgbe_read_pci_cfg_word_parent(hw, 18, &link_status); + +- /* assume caller will handle error case */ ++ /* If the read fails, fallback to default */ + if (err) +- return err; ++ link_status = IXGBE_READ_PCIE_WORD(hw, IXGBE_PCI_LINK_STATUS); + +- hw->bus.width = ixgbe_convert_bus_width(link_status); +- hw->bus.speed = ixgbe_convert_bus_speed(link_status); ++ ixgbe_set_pci_config_data_generic(hw, link_status); + +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +- * ixgbe_check_from_parent - Determine whether PCIe info should come from parent ++ * ixgbe_check_from_parent - determine whether to use parent for PCIe info + * @hw: hw specific details + * +- * This function is used by probe to determine whether a device's PCI-Express +- * bandwidth details should be gathered from the parent bus instead of from the +- * device. Used to ensure that various locations all have the correct device ID +- * checks. ++ * This function is used by probe to determine whether a device's PCIe info ++ * (speed, width, etc) should be obtained from the parent bus or directly. This ++ * is useful for specialized device configurations containing PCIe bridges. + */ + static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw) + { + switch (hw->device_id) { +- case IXGBE_DEV_ID_82599_SFP_SF_QP: + case IXGBE_DEV_ID_82599_QSFP_SF_QP: ++ case IXGBE_DEV_ID_82599_SFP_SF_QP: + return true; + default: + return false; +@@ -224,13 +250,20 @@ static inline bool ixgbe_pcie_from_parent(struct ixgbe_hw *hw) + static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter, + int expected_gts) + { ++ struct ixgbe_hw *hw = &adapter->hw; + int max_gts = 0; + enum pci_bus_speed speed = PCI_SPEED_UNKNOWN; + enum pcie_link_width width = PCIE_LNK_WIDTH_UNKNOWN; + struct pci_dev *pdev; + +- /* determine whether to use the the parent device ++ /* Some devices are not connected over PCIe and thus do not negotiate ++ * speed. These devices do not have valid bus info, and thus any report ++ * we generate may not be correct. + */ ++ if (hw->bus.type == ixgbe_bus_type_internal) ++ return; ++ ++ /* determine whether to use the parent device */ + if (ixgbe_pcie_from_parent(&adapter->hw)) + pdev = adapter->pdev->bus->parent->self; + else +@@ -252,7 +285,7 @@ static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter, + max_gts = 4 * width; + break; + case PCIE_SPEED_8_0GT: +- /* 128b/130b encoding reduces throughput by less than 2% */ ++ /* 128b/130b encoding has less than 2% impact on throughput */ + max_gts = 8 * width; + break; + default: +@@ -267,7 +300,7 @@ static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter, + speed == PCIE_SPEED_5_0GT ? "5.0GT/s" : + speed == PCIE_SPEED_2_5GT ? "2.5GT/s" : + "Unknown"), +- width, ++ hw->bus.width, + (speed == PCIE_SPEED_2_5GT ? "20%" : + speed == PCIE_SPEED_5_0GT ? "20%" : + speed == PCIE_SPEED_8_0GT ? "<2%" : +@@ -281,12 +314,66 @@ static void ixgbe_check_minimum_link(struct ixgbe_adapter *adapter, + } + } + ++/** ++ * ixgbe_enumerate_functions - Get the number of ports this device has ++ * @adapter: adapter structure ++ * ++ * This function enumerates the phsyical functions co-located on a single slot, ++ * in order to determine how many ports a device has. This is most useful in ++ * determining the required GT/s of PCIe bandwidth necessary for optimal ++ * performance. ++ **/ ++static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter) ++{ ++ struct pci_dev *entry, *pdev = adapter->pdev; ++ int physfns = 0; ++ ++ /* Some cards can not use the generic count PCIe functions method, ++ * because they are behind a parent switch, so we hardcode these to ++ * correct number of ports. ++ */ ++ if (ixgbe_pcie_from_parent(&adapter->hw)) { ++ physfns = 4; ++ } else { ++ list_for_each_entry(entry, &pdev->bus->devices, bus_list) { ++#ifdef CONFIG_PCI_IOV ++ /* don't count virtual functions */ ++ if (entry->is_virtfn) ++ continue; ++#endif ++ ++ /* When the devices on the bus don't all match our device ID, ++ * we can't reliably determine the correct number of ++ * functions. This can occur if a function has been direct ++ * attached to a virtual machine using VT-d, for example. In ++ * this case, simply return -1 to indicate this. ++ */ ++ if ((entry->vendor != pdev->vendor) || ++ (entry->device != pdev->device)) ++ return -1; ++ ++ physfns++; ++ } ++ } ++ ++ return physfns; ++} ++ + static void ixgbe_service_event_schedule(struct ixgbe_adapter *adapter) + { + if (!test_bit(__IXGBE_DOWN, &adapter->state) && +- !test_bit(__IXGBE_REMOVING, &adapter->state) && ++ !test_bit(__IXGBE_REMOVE, &adapter->state) && + !test_and_set_bit(__IXGBE_SERVICE_SCHED, &adapter->state)) +- schedule_work(&adapter->service_task); ++ queue_work(ixgbe_wq, &adapter->service_task); ++} ++ ++static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter) ++{ ++ BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state)); ++ ++ /* flush memory to make sure state is correct before next watchog */ ++ smp_mb__before_atomic(); ++ clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); + } + + static void ixgbe_remove_adapter(struct ixgbe_hw *hw) +@@ -315,481 +402,72 @@ static void ixgbe_check_remove(struct ixgbe_hw *hw, u32 reg) + ixgbe_remove_adapter(hw); + return; + } +- value = ixgbe_read_reg(hw, IXGBE_STATUS); ++ value = IXGBE_READ_REG(hw, IXGBE_STATUS); + if (value == IXGBE_FAILED_READ_REG) + ixgbe_remove_adapter(hw); + } + +-/** +- * ixgbe_read_reg - Read from device register +- * @hw: hw specific details +- * @reg: offset of register to read +- * +- * Returns : value read or IXGBE_FAILED_READ_REG if removed +- * +- * This function is used to read device registers. It checks for device +- * removal by confirming any read that returns all ones by checking the +- * status register value for all ones. This function avoids reading from +- * the hardware if a removal was previously detected in which case it +- * returns IXGBE_FAILED_READ_REG (all ones). +- */ +-u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg) ++static u32 ++ixgbe_validate_register_read(struct ixgbe_hw *_hw, u32 reg, bool quiet) + { +- u8 __iomem *reg_addr = ACCESS_ONCE(hw->hw_addr); ++ int i; + u32 value; ++ u8 __iomem *reg_addr; ++ struct ixgbe_adapter *adapter = _hw->back; + +- if (ixgbe_removed(reg_addr)) ++ reg_addr = ACCESS_ONCE(_hw->hw_addr); ++ if (IXGBE_REMOVED(reg_addr)) + return IXGBE_FAILED_READ_REG; +- value = readl(reg_addr + reg); +- if (unlikely(value == IXGBE_FAILED_READ_REG)) +- ixgbe_check_remove(hw, reg); +- return value; +-} +- +-static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev) +-{ +- u16 value; +- +- pci_read_config_word(pdev, PCI_VENDOR_ID, &value); +- if (value == IXGBE_FAILED_READ_CFG_WORD) { +- ixgbe_remove_adapter(hw); +- return true; ++ for (i = 0; i < IXGBE_DEAD_READ_RETRIES; ++i) { ++ value = readl(reg_addr + reg); ++ if (value != IXGBE_DEAD_READ_REG) ++ break; + } +- return false; +-} +- +-u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg) +-{ +- struct ixgbe_adapter *adapter = hw->back; +- u16 value; +- +- if (ixgbe_removed(hw->hw_addr)) +- return IXGBE_FAILED_READ_CFG_WORD; +- pci_read_config_word(adapter->pdev, reg, &value); +- if (value == IXGBE_FAILED_READ_CFG_WORD && +- ixgbe_check_cfg_remove(hw, adapter->pdev)) +- return IXGBE_FAILED_READ_CFG_WORD; ++ if (quiet) ++ return value; ++ if (value == IXGBE_DEAD_READ_REG) ++ e_err(drv, "%s: register %x read unchanged\n", __func__, reg); ++ else ++ e_warn(hw, "%s: register %x read recovered after %d retries\n", ++ __func__, reg, i + 1); + return value; + } + +-#ifdef CONFIG_PCI_IOV +-static u32 ixgbe_read_pci_cfg_dword(struct ixgbe_hw *hw, u32 reg) ++u32 ixgbe_read_reg(struct ixgbe_hw *hw, u32 reg, bool quiet) + { +- struct ixgbe_adapter *adapter = hw->back; + u32 value; ++ u8 __iomem *reg_addr; + +- if (ixgbe_removed(hw->hw_addr)) +- return IXGBE_FAILED_READ_CFG_DWORD; +- pci_read_config_dword(adapter->pdev, reg, &value); +- if (value == IXGBE_FAILED_READ_CFG_DWORD && +- ixgbe_check_cfg_remove(hw, adapter->pdev)) +- return IXGBE_FAILED_READ_CFG_DWORD; +- return value; +-} +-#endif /* CONFIG_PCI_IOV */ +- +-void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value) +-{ +- struct ixgbe_adapter *adapter = hw->back; +- +- if (ixgbe_removed(hw->hw_addr)) +- return; +- pci_write_config_word(adapter->pdev, reg, value); +-} +- +-static void ixgbe_service_event_complete(struct ixgbe_adapter *adapter) +-{ +- BUG_ON(!test_bit(__IXGBE_SERVICE_SCHED, &adapter->state)); +- +- /* flush memory to make sure state is correct before next watchdog */ +- smp_mb__before_atomic(); +- clear_bit(__IXGBE_SERVICE_SCHED, &adapter->state); +-} +- +-struct ixgbe_reg_info { +- u32 ofs; +- char *name; +-}; +- +-static const struct ixgbe_reg_info ixgbe_reg_info_tbl[] = { +- +- /* General Registers */ +- {IXGBE_CTRL, "CTRL"}, +- {IXGBE_STATUS, "STATUS"}, +- {IXGBE_CTRL_EXT, "CTRL_EXT"}, +- +- /* Interrupt Registers */ +- {IXGBE_EICR, "EICR"}, +- +- /* RX Registers */ +- {IXGBE_SRRCTL(0), "SRRCTL"}, +- {IXGBE_DCA_RXCTRL(0), "DRXCTL"}, +- {IXGBE_RDLEN(0), "RDLEN"}, +- {IXGBE_RDH(0), "RDH"}, +- {IXGBE_RDT(0), "RDT"}, +- {IXGBE_RXDCTL(0), "RXDCTL"}, +- {IXGBE_RDBAL(0), "RDBAL"}, +- {IXGBE_RDBAH(0), "RDBAH"}, +- +- /* TX Registers */ +- {IXGBE_TDBAL(0), "TDBAL"}, +- {IXGBE_TDBAH(0), "TDBAH"}, +- {IXGBE_TDLEN(0), "TDLEN"}, +- {IXGBE_TDH(0), "TDH"}, +- {IXGBE_TDT(0), "TDT"}, +- {IXGBE_TXDCTL(0), "TXDCTL"}, +- +- /* List Terminator */ +- {} +-}; +- +- +-/* +- * ixgbe_regdump - register printout routine +- */ +-static void ixgbe_regdump(struct ixgbe_hw *hw, struct ixgbe_reg_info *reginfo) +-{ +- int i = 0, j = 0; +- char rname[16]; +- u32 regs[64]; +- +- switch (reginfo->ofs) { +- case IXGBE_SRRCTL(0): +- for (i = 0; i < 64; i++) +- regs[i] = IXGBE_READ_REG(hw, IXGBE_SRRCTL(i)); +- break; +- case IXGBE_DCA_RXCTRL(0): +- for (i = 0; i < 64; i++) +- regs[i] = IXGBE_READ_REG(hw, IXGBE_DCA_RXCTRL(i)); +- break; +- case IXGBE_RDLEN(0): +- for (i = 0; i < 64; i++) +- regs[i] = IXGBE_READ_REG(hw, IXGBE_RDLEN(i)); +- break; +- case IXGBE_RDH(0): +- for (i = 0; i < 64; i++) +- regs[i] = IXGBE_READ_REG(hw, IXGBE_RDH(i)); +- break; +- case IXGBE_RDT(0): +- for (i = 0; i < 64; i++) +- regs[i] = IXGBE_READ_REG(hw, IXGBE_RDT(i)); +- break; +- case IXGBE_RXDCTL(0): +- for (i = 0; i < 64; i++) +- regs[i] = IXGBE_READ_REG(hw, IXGBE_RXDCTL(i)); +- break; +- case IXGBE_RDBAL(0): +- for (i = 0; i < 64; i++) +- regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAL(i)); +- break; +- case IXGBE_RDBAH(0): +- for (i = 0; i < 64; i++) +- regs[i] = IXGBE_READ_REG(hw, IXGBE_RDBAH(i)); +- break; +- case IXGBE_TDBAL(0): +- for (i = 0; i < 64; i++) +- regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAL(i)); +- break; +- case IXGBE_TDBAH(0): +- for (i = 0; i < 64; i++) +- regs[i] = IXGBE_READ_REG(hw, IXGBE_TDBAH(i)); +- break; +- case IXGBE_TDLEN(0): +- for (i = 0; i < 64; i++) +- regs[i] = IXGBE_READ_REG(hw, IXGBE_TDLEN(i)); +- break; +- case IXGBE_TDH(0): +- for (i = 0; i < 64; i++) +- regs[i] = IXGBE_READ_REG(hw, IXGBE_TDH(i)); +- break; +- case IXGBE_TDT(0): +- for (i = 0; i < 64; i++) +- regs[i] = IXGBE_READ_REG(hw, IXGBE_TDT(i)); +- break; +- case IXGBE_TXDCTL(0): +- for (i = 0; i < 64; i++) +- regs[i] = IXGBE_READ_REG(hw, IXGBE_TXDCTL(i)); +- break; +- default: +- pr_info("%-15s %08x\n", reginfo->name, +- IXGBE_READ_REG(hw, reginfo->ofs)); +- return; +- } +- +- for (i = 0; i < 8; i++) { +- snprintf(rname, 16, "%s[%d-%d]", reginfo->name, i*8, i*8+7); +- pr_err("%-15s", rname); +- for (j = 0; j < 8; j++) +- pr_cont(" %08x", regs[i*8+j]); +- pr_cont("\n"); +- } +- +-} +- +-/* +- * ixgbe_dump - Print registers, tx-rings and rx-rings +- */ +-static void ixgbe_dump(struct ixgbe_adapter *adapter) +-{ +- struct net_device *netdev = adapter->netdev; +- struct ixgbe_hw *hw = &adapter->hw; +- struct ixgbe_reg_info *reginfo; +- int n = 0; +- struct ixgbe_ring *tx_ring; +- struct ixgbe_tx_buffer *tx_buffer; +- union ixgbe_adv_tx_desc *tx_desc; +- struct my_u0 { u64 a; u64 b; } *u0; +- struct ixgbe_ring *rx_ring; +- union ixgbe_adv_rx_desc *rx_desc; +- struct ixgbe_rx_buffer *rx_buffer_info; +- u32 staterr; +- int i = 0; +- +- if (!netif_msg_hw(adapter)) +- return; +- +- /* Print netdevice Info */ +- if (netdev) { +- dev_info(&adapter->pdev->dev, "Net device Info\n"); +- pr_info("Device Name state " +- "trans_start last_rx\n"); +- pr_info("%-15s %016lX %016lX %016lX\n", +- netdev->name, +- netdev->state, +- netdev->trans_start, +- netdev->last_rx); +- } +- +- /* Print Registers */ +- dev_info(&adapter->pdev->dev, "Register Dump\n"); +- pr_info(" Register Name Value\n"); +- for (reginfo = (struct ixgbe_reg_info *)ixgbe_reg_info_tbl; +- reginfo->name; reginfo++) { +- ixgbe_regdump(hw, reginfo); +- } +- +- /* Print TX Ring Summary */ +- if (!netdev || !netif_running(netdev)) +- goto exit; +- +- dev_info(&adapter->pdev->dev, "TX Rings Summary\n"); +- pr_info(" %s %s %s %s\n", +- "Queue [NTU] [NTC] [bi(ntc)->dma ]", +- "leng", "ntw", "timestamp"); +- for (n = 0; n < adapter->num_tx_queues; n++) { +- tx_ring = adapter->tx_ring[n]; +- tx_buffer = &tx_ring->tx_buffer_info[tx_ring->next_to_clean]; +- pr_info(" %5d %5X %5X %016llX %08X %p %016llX\n", +- n, tx_ring->next_to_use, tx_ring->next_to_clean, +- (u64)dma_unmap_addr(tx_buffer, dma), +- dma_unmap_len(tx_buffer, len), +- tx_buffer->next_to_watch, +- (u64)tx_buffer->time_stamp); +- } +- +- /* Print TX Rings */ +- if (!netif_msg_tx_done(adapter)) +- goto rx_ring_summary; +- +- dev_info(&adapter->pdev->dev, "TX Rings Dump\n"); +- +- /* Transmit Descriptor Formats +- * +- * 82598 Advanced Transmit Descriptor +- * +--------------------------------------------------------------+ +- * 0 | Buffer Address [63:0] | +- * +--------------------------------------------------------------+ +- * 8 | PAYLEN | POPTS | IDX | STA | DCMD |DTYP | RSV | DTALEN | +- * +--------------------------------------------------------------+ +- * 63 46 45 40 39 36 35 32 31 24 23 20 19 0 +- * +- * 82598 Advanced Transmit Descriptor (Write-Back Format) +- * +--------------------------------------------------------------+ +- * 0 | RSV [63:0] | +- * +--------------------------------------------------------------+ +- * 8 | RSV | STA | NXTSEQ | +- * +--------------------------------------------------------------+ +- * 63 36 35 32 31 0 +- * +- * 82599+ Advanced Transmit Descriptor +- * +--------------------------------------------------------------+ +- * 0 | Buffer Address [63:0] | +- * +--------------------------------------------------------------+ +- * 8 |PAYLEN |POPTS|CC|IDX |STA |DCMD |DTYP |MAC |RSV |DTALEN | +- * +--------------------------------------------------------------+ +- * 63 46 45 40 39 38 36 35 32 31 24 23 20 19 18 17 16 15 0 +- * +- * 82599+ Advanced Transmit Descriptor (Write-Back Format) +- * +--------------------------------------------------------------+ +- * 0 | RSV [63:0] | +- * +--------------------------------------------------------------+ +- * 8 | RSV | STA | RSV | +- * +--------------------------------------------------------------+ +- * 63 36 35 32 31 0 +- */ +- +- for (n = 0; n < adapter->num_tx_queues; n++) { +- tx_ring = adapter->tx_ring[n]; +- pr_info("------------------------------------\n"); +- pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index); +- pr_info("------------------------------------\n"); +- pr_info("%s%s %s %s %s %s\n", +- "T [desc] [address 63:0 ] ", +- "[PlPOIdStDDt Ln] [bi->dma ] ", +- "leng", "ntw", "timestamp", "bi->skb"); +- +- for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) { +- tx_desc = IXGBE_TX_DESC(tx_ring, i); +- tx_buffer = &tx_ring->tx_buffer_info[i]; +- u0 = (struct my_u0 *)tx_desc; +- if (dma_unmap_len(tx_buffer, len) > 0) { +- pr_info("T [0x%03X] %016llX %016llX %016llX %08X %p %016llX %p", +- i, +- le64_to_cpu(u0->a), +- le64_to_cpu(u0->b), +- (u64)dma_unmap_addr(tx_buffer, dma), +- dma_unmap_len(tx_buffer, len), +- tx_buffer->next_to_watch, +- (u64)tx_buffer->time_stamp, +- tx_buffer->skb); +- if (i == tx_ring->next_to_use && +- i == tx_ring->next_to_clean) +- pr_cont(" NTC/U\n"); +- else if (i == tx_ring->next_to_use) +- pr_cont(" NTU\n"); +- else if (i == tx_ring->next_to_clean) +- pr_cont(" NTC\n"); +- else +- pr_cont("\n"); +- +- if (netif_msg_pktdata(adapter) && +- tx_buffer->skb) +- print_hex_dump(KERN_INFO, "", +- DUMP_PREFIX_ADDRESS, 16, 1, +- tx_buffer->skb->data, +- dma_unmap_len(tx_buffer, len), +- true); ++ reg_addr = ACCESS_ONCE(hw->hw_addr); ++ if (IXGBE_REMOVED(reg_addr)) ++ return IXGBE_FAILED_READ_REG; ++ if (unlikely(hw->phy.nw_mng_if_sel & ++ IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE)) { ++ struct ixgbe_adapter *adapter; ++ int i; ++ ++ for (i = 0; i < 200; ++i) { ++ value = readl(reg_addr + IXGBE_MAC_SGMII_BUSY); ++ if (likely(!value)) ++ goto writes_completed; ++ if (value == IXGBE_FAILED_READ_REG) { ++ ixgbe_remove_adapter(hw); ++ return IXGBE_FAILED_READ_REG; + } ++ udelay(5); + } +- } +- +- /* Print RX Rings Summary */ +-rx_ring_summary: +- dev_info(&adapter->pdev->dev, "RX Rings Summary\n"); +- pr_info("Queue [NTU] [NTC]\n"); +- for (n = 0; n < adapter->num_rx_queues; n++) { +- rx_ring = adapter->rx_ring[n]; +- pr_info("%5d %5X %5X\n", +- n, rx_ring->next_to_use, rx_ring->next_to_clean); +- } +- +- /* Print RX Rings */ +- if (!netif_msg_rx_status(adapter)) +- goto exit; +- +- dev_info(&adapter->pdev->dev, "RX Rings Dump\n"); +- +- /* Receive Descriptor Formats +- * +- * 82598 Advanced Receive Descriptor (Read) Format +- * 63 1 0 +- * +-----------------------------------------------------+ +- * 0 | Packet Buffer Address [63:1] |A0/NSE| +- * +----------------------------------------------+------+ +- * 8 | Header Buffer Address [63:1] | DD | +- * +-----------------------------------------------------+ +- * +- * +- * 82598 Advanced Receive Descriptor (Write-Back) Format +- * +- * 63 48 47 32 31 30 21 20 16 15 4 3 0 +- * +------------------------------------------------------+ +- * 0 | RSS Hash / |SPH| HDR_LEN | RSV |Packet| RSS | +- * | Packet | IP | | | | Type | Type | +- * | Checksum | Ident | | | | | | +- * +------------------------------------------------------+ +- * 8 | VLAN Tag | Length | Extended Error | Extended Status | +- * +------------------------------------------------------+ +- * 63 48 47 32 31 20 19 0 +- * +- * 82599+ Advanced Receive Descriptor (Read) Format +- * 63 1 0 +- * +-----------------------------------------------------+ +- * 0 | Packet Buffer Address [63:1] |A0/NSE| +- * +----------------------------------------------+------+ +- * 8 | Header Buffer Address [63:1] | DD | +- * +-----------------------------------------------------+ +- * +- * +- * 82599+ Advanced Receive Descriptor (Write-Back) Format +- * +- * 63 48 47 32 31 30 21 20 17 16 4 3 0 +- * +------------------------------------------------------+ +- * 0 |RSS / Frag Checksum|SPH| HDR_LEN |RSC- |Packet| RSS | +- * |/ RTT / PCoE_PARAM | | | CNT | Type | Type | +- * |/ Flow Dir Flt ID | | | | | | +- * +------------------------------------------------------+ +- * 8 | VLAN Tag | Length |Extended Error| Xtnd Status/NEXTP | +- * +------------------------------------------------------+ +- * 63 48 47 32 31 20 19 0 +- */ +- +- for (n = 0; n < adapter->num_rx_queues; n++) { +- rx_ring = adapter->rx_ring[n]; +- pr_info("------------------------------------\n"); +- pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index); +- pr_info("------------------------------------\n"); +- pr_info("%s%s%s", +- "R [desc] [ PktBuf A0] ", +- "[ HeadBuf DD] [bi->dma ] [bi->skb ] ", +- "<-- Adv Rx Read format\n"); +- pr_info("%s%s%s", +- "RWB[desc] [PcsmIpSHl PtRs] ", +- "[vl er S cks ln] ---------------- [bi->skb ] ", +- "<-- Adv Rx Write-Back format\n"); +- +- for (i = 0; i < rx_ring->count; i++) { +- rx_buffer_info = &rx_ring->rx_buffer_info[i]; +- rx_desc = IXGBE_RX_DESC(rx_ring, i); +- u0 = (struct my_u0 *)rx_desc; +- staterr = le32_to_cpu(rx_desc->wb.upper.status_error); +- if (staterr & IXGBE_RXD_STAT_DD) { +- /* Descriptor Done */ +- pr_info("RWB[0x%03X] %016llX " +- "%016llX ---------------- %p", i, +- le64_to_cpu(u0->a), +- le64_to_cpu(u0->b), +- rx_buffer_info->skb); +- } else { +- pr_info("R [0x%03X] %016llX " +- "%016llX %016llX %p", i, +- le64_to_cpu(u0->a), +- le64_to_cpu(u0->b), +- (u64)rx_buffer_info->dma, +- rx_buffer_info->skb); +- +- if (netif_msg_pktdata(adapter) && +- rx_buffer_info->dma) { +- print_hex_dump(KERN_INFO, "", +- DUMP_PREFIX_ADDRESS, 16, 1, +- page_address(rx_buffer_info->page) + +- rx_buffer_info->page_offset, +- ixgbe_rx_bufsz(rx_ring), true); +- } +- } + +- if (i == rx_ring->next_to_use) +- pr_cont(" NTU\n"); +- else if (i == rx_ring->next_to_clean) +- pr_cont(" NTC\n"); +- else +- pr_cont("\n"); +- +- } ++ adapter = hw->back; ++ e_warn(hw, "register writes incomplete %08x\n", value); + } + +-exit: +- return; ++writes_completed: ++ value = readl(reg_addr + reg); ++ if (unlikely(value == IXGBE_FAILED_READ_REG)) ++ ixgbe_check_remove(hw, reg); ++ if (unlikely(value == IXGBE_DEAD_READ_REG)) ++ value = ixgbe_validate_register_read(hw, reg, quiet); ++ return value; + } + + static void ixgbe_release_hw_control(struct ixgbe_adapter *adapter) +@@ -812,7 +490,7 @@ static void ixgbe_get_hw_control(struct ixgbe_adapter *adapter) + ctrl_ext | IXGBE_CTRL_EXT_DRV_LOAD); + } + +-/** ++/* + * ixgbe_set_ivar - set the IVAR registers, mapping interrupt causes to vectors + * @adapter: pointer to adapter struct + * @direction: 0 for Rx, 1 for Tx, -1 for other causes +@@ -838,6 +516,9 @@ static void ixgbe_set_ivar(struct ixgbe_adapter *adapter, s8 direction, + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: + if (direction == -1) { + /* other causes */ + msix_vector |= IXGBE_IVAR_ALLOC_VAL; +@@ -874,6 +555,9 @@ static inline void ixgbe_irq_rearm_queues(struct ixgbe_adapter *adapter, + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: + mask = (qmask & 0xFFFFFFFF); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICS_EX(0), mask); + mask = (qmask >> 32); +@@ -903,7 +587,7 @@ void ixgbe_unmap_and_free_tx_resource(struct ixgbe_ring *ring, + tx_buffer->next_to_watch = NULL; + tx_buffer->skb = NULL; + dma_unmap_len_set(tx_buffer, len, 0); +- /* tx_buffer must be completely set up in the transmit path */ ++ /* tx_buffer_info must be completely set up in the transmit path */ + } + + static void ixgbe_update_xoff_rx_lfc(struct ixgbe_adapter *adapter) +@@ -940,13 +624,14 @@ static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_hw_stats *hwstats = &adapter->stats; + u32 xoff[8] = {0}; +- u8 tc; + int i; + bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; + ++#ifdef HAVE_DCBNL_IEEE + if (adapter->ixgbe_ieee_pfc) + pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); + ++#endif + if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED) || !pfc_en) { + ixgbe_update_xoff_rx_lfc(adapter); + return; +@@ -954,27 +639,22 @@ static void ixgbe_update_xoff_received(struct ixgbe_adapter *adapter) + + /* update stats for each tc, only valid with PFC enabled */ + for (i = 0; i < MAX_TX_PACKET_BUFFERS; i++) { +- u32 pxoffrxc; +- + switch (hw->mac.type) { + case ixgbe_mac_82598EB: +- pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); ++ xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXC(i)); + break; + default: +- pxoffrxc = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); ++ xoff[i] = IXGBE_READ_REG(hw, IXGBE_PXOFFRXCNT(i)); + } +- hwstats->pxoffrxc[i] += pxoffrxc; +- /* Get the TC for given UP */ +- tc = netdev_get_prio_tc_map(adapter->netdev, i); +- xoff[tc] += pxoffrxc; ++ hwstats->pxoffrxc[i] += xoff[i]; + } + + /* disarm tx queues that have received xoff frames */ + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; ++ u8 tc = tx_ring->dcb_tc; + +- tc = tx_ring->dcb_tc; +- if (xoff[tc]) ++ if ((tc <= 7) && (xoff[tc])) + clear_bit(__IXGBE_HANG_CHECK_ARMED, &tx_ring->state); + } + } +@@ -986,27 +666,16 @@ static u64 ixgbe_get_tx_completed(struct ixgbe_ring *ring) + + static u64 ixgbe_get_tx_pending(struct ixgbe_ring *ring) + { +- struct ixgbe_adapter *adapter; +- struct ixgbe_hw *hw; +- u32 head, tail; +- +- if (ring->l2_accel_priv) +- adapter = ring->l2_accel_priv->real_adapter; +- else +- adapter = netdev_priv(ring->netdev); +- +- hw = &adapter->hw; +- head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx)); +- tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx)); ++ struct ixgbe_adapter *adapter = ring->q_vector->adapter; ++ struct ixgbe_hw *hw = &adapter->hw; + +- if (head != tail) +- return (head < tail) ? +- tail - head : (tail + ring->count - head); ++ u32 head = IXGBE_READ_REG(hw, IXGBE_TDH(ring->reg_idx)); ++ u32 tail = IXGBE_READ_REG(hw, IXGBE_TDT(ring->reg_idx)); + +- return 0; ++ return ((head <= tail) ? tail : tail + ring->count) - head; + } + +-static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring) ++static bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring) + { + u32 tx_done = ixgbe_get_tx_completed(tx_ring); + u32 tx_done_old = tx_ring->tx_stats.tx_done_old; +@@ -1023,7 +692,7 @@ static inline bool ixgbe_check_tx_hang(struct ixgbe_ring *tx_ring) + * bit is cleared if a pause frame is received to remove + * false hang detection due to PFC or 802.3x frames. By + * requiring this to fail twice we avoid races with +- * pfc clearing the ARMED bit and conditions where we ++ * PFC clearing the ARMED bit and conditions where we + * run the check_tx_hang logic with a transmit completion + * pending but without time to complete it yet. + */ +@@ -1050,13 +719,41 @@ static void ixgbe_tx_timeout_reset(struct ixgbe_adapter *adapter) + + /* Do the reset outside of interrupt context */ + if (!test_bit(__IXGBE_DOWN, &adapter->state)) { +- adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; +- e_warn(drv, "initiating reset due to tx timeout\n"); ++ set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); + ixgbe_service_event_schedule(adapter); + } + } + + /** ++ * ixgbe_tx_timeout - Respond to a Tx Hang ++ * @netdev: network interface device structure ++ **/ ++static void ixgbe_tx_timeout(struct net_device *netdev) ++{ ++struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ bool real_tx_hang = false; ++ int i; ++ ++#define TX_TIMEO_LIMIT 16000 ++ for (i = 0; i < adapter->num_tx_queues; i++) { ++ struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; ++ if (check_for_tx_hang(tx_ring) && ixgbe_check_tx_hang(tx_ring)) ++ real_tx_hang = true; ++ } ++ ++ if (real_tx_hang) { ++ ixgbe_tx_timeout_reset(adapter); ++ } else { ++ e_info(drv, "Fake Tx hang detected with timeout of %d " ++ "seconds\n", netdev->watchdog_timeo/HZ); ++ ++ /* fake Tx hang - increase the kernel timeout */ ++ if (netdev->watchdog_timeo < TX_TIMEO_LIMIT) ++ netdev->watchdog_timeo *= 2; ++ } ++} ++ ++/** + * ixgbe_clean_tx_irq - Reclaim resources after transmit completes + * @q_vector: structure containing interrupt and ring information + * @tx_ring: tx ring to clean +@@ -1166,23 +863,23 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, + " Tx Queue <%d>\n" + " TDH, TDT <%x>, <%x>\n" + " next_to_use <%x>\n" +- " next_to_clean <%x>\n" +- "tx_buffer_info[next_to_clean]\n" +- " time_stamp <%lx>\n" +- " jiffies <%lx>\n", ++ " next_to_clean <%x>\n", + tx_ring->queue_index, + IXGBE_READ_REG(hw, IXGBE_TDH(tx_ring->reg_idx)), + IXGBE_READ_REG(hw, IXGBE_TDT(tx_ring->reg_idx)), +- tx_ring->next_to_use, i, ++ tx_ring->next_to_use, i); ++ e_err(drv, "tx_buffer_info[next_to_clean]\n" ++ " time_stamp <%lx>\n" ++ " jiffies <%lx>\n", + tx_ring->tx_buffer_info[i].time_stamp, jiffies); + +- netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); ++ netif_stop_subqueue(netdev_ring(tx_ring), ++ ring_queue_index(tx_ring)); + + e_info(probe, + "tx hang %d detected on queue %d, resetting adapter\n", +- adapter->tx_timeout_count + 1, tx_ring->queue_index); ++ adapter->tx_timeout_count + 1, tx_ring->queue_index); + +- /* schedule immediate reset if we believe we hung */ + ixgbe_tx_timeout_reset(adapter); + + /* the adapter is about to reset, no point in enabling stuff */ +@@ -1193,33 +890,44 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector, + total_packets, total_bytes); + + #define TX_WAKE_THRESHOLD (DESC_NEEDED * 2) +- if (unlikely(total_packets && netif_carrier_ok(tx_ring->netdev) && ++ if (unlikely(total_packets && netif_carrier_ok(netdev_ring(tx_ring)) && + (ixgbe_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD))) { + /* Make sure that anybody stopping the queue after this + * sees the new next_to_clean. + */ + smp_mb(); +- if (__netif_subqueue_stopped(tx_ring->netdev, +- tx_ring->queue_index) +- && !test_bit(__IXGBE_DOWN, &adapter->state)) { +- netif_wake_subqueue(tx_ring->netdev, +- tx_ring->queue_index); ++#ifdef HAVE_TX_MQ ++ if (__netif_subqueue_stopped(netdev_ring(tx_ring), ++ ring_queue_index(tx_ring)) ++ && !test_bit(__IXGBE_DOWN, &q_vector->adapter->state)) { ++ netif_wake_subqueue(netdev_ring(tx_ring), ++ ring_queue_index(tx_ring)); ++ ++tx_ring->tx_stats.restart_queue; ++ } ++#else ++ if (netif_queue_stopped(netdev_ring(tx_ring)) && ++ !test_bit(__IXGBE_DOWN, &q_vector->adapter->state)) { ++ netif_wake_queue(netdev_ring(tx_ring)); + ++tx_ring->tx_stats.restart_queue; + } ++#endif + } + + return !!budget; + } + +-#ifdef CONFIG_IXGBE_DCA ++#if IS_ENABLED(CONFIG_DCA) + static void ixgbe_update_tx_dca(struct ixgbe_adapter *adapter, + struct ixgbe_ring *tx_ring, + int cpu) + { + struct ixgbe_hw *hw = &adapter->hw; +- u32 txctrl = dca3_get_tag(tx_ring->dev, cpu); ++ u32 txctrl = 0; + u16 reg_offset; + ++ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) ++ txctrl = dca3_get_tag(tx_ring->dev, cpu); ++ + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + reg_offset = IXGBE_DCA_TXCTRL(tx_ring->reg_idx); +@@ -1251,9 +959,11 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, + int cpu) + { + struct ixgbe_hw *hw = &adapter->hw; +- u32 rxctrl = dca3_get_tag(rx_ring->dev, cpu); ++ u32 rxctrl = 0; + u8 reg_idx = rx_ring->reg_idx; + ++ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) ++ rxctrl = dca3_get_tag(rx_ring->dev, cpu); + + switch (hw->mac.type) { + case ixgbe_mac_82599EB: +@@ -1270,6 +980,7 @@ static void ixgbe_update_rx_dca(struct ixgbe_adapter *adapter, + * which will cause the DCA tag to be cleared. + */ + rxctrl |= IXGBE_DCA_RXCTRL_DESC_RRO_EN | ++ IXGBE_DCA_RXCTRL_DATA_DCA_EN | + IXGBE_DCA_RXCTRL_DESC_DCA_EN; + + IXGBE_WRITE_REG(hw, IXGBE_DCA_RXCTRL(reg_idx), rxctrl); +@@ -1297,17 +1008,19 @@ out_no_update: + + static void ixgbe_setup_dca(struct ixgbe_adapter *adapter) + { +- int i; +- +- if (!(adapter->flags & IXGBE_FLAG_DCA_ENABLED)) +- return; ++ int v_idx; + + /* always use CB2 mode, difference is masked in the CB driver */ +- IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); ++ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, ++ IXGBE_DCA_CTRL_DCA_MODE_CB2); ++ else ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, ++ IXGBE_DCA_CTRL_DCA_DISABLE); + +- for (i = 0; i < adapter->num_q_vectors; i++) { +- adapter->q_vector[i]->cpu = -1; +- ixgbe_update_dca(adapter->q_vector[i]); ++ for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { ++ adapter->q_vector[v_idx]->cpu = -1; ++ ixgbe_update_dca(adapter->q_vector[v_idx]); + } + } + +@@ -1324,12 +1037,12 @@ static int __ixgbe_notify_dca(struct device *dev, void *data) + /* if we're already enabled, don't do it again */ + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) + break; +- if (dca_add_requester(dev) == 0) { ++ if (dca_add_requester(dev) == IXGBE_SUCCESS) { + adapter->flags |= IXGBE_FLAG_DCA_ENABLED; +- ixgbe_setup_dca(adapter); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 2); + break; + } +- /* Fall Through since DCA is disabled. */ ++ /* fall through - DCA is disabled */ + case DCA_PROVIDER_REMOVE: + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { + dca_remove_requester(dev); +@@ -1339,21 +1052,41 @@ static int __ixgbe_notify_dca(struct device *dev, void *data) + break; + } + +- return 0; ++ return IXGBE_SUCCESS; + } ++#endif /* CONFIG_DCA */ ++ ++#ifdef NETIF_F_RXHASH ++#define IXGBE_RSS_L4_TYPES_MASK \ ++ ((1ul << IXGBE_RXDADV_RSSTYPE_IPV4_TCP) | \ ++ (1ul << IXGBE_RXDADV_RSSTYPE_IPV4_UDP) | \ ++ (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP) | \ ++ (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP) | \ ++ (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX) | \ ++ (1ul << IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX)) + +-#endif /* CONFIG_IXGBE_DCA */ + static inline void ixgbe_rx_hash(struct ixgbe_ring *ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) + { +- if (ring->netdev->features & NETIF_F_RXHASH) +- skb_set_hash(skb, +- le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), +- PKT_HASH_TYPE_L3); ++ u16 rss_type; ++ ++ if (!(netdev_ring(ring)->features & NETIF_F_RXHASH)) ++ return; ++ ++ rss_type = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info) & ++ IXGBE_RXDADV_RSSTYPE_MASK; ++ ++ if (!rss_type) ++ return; ++ ++ skb_set_hash(skb, le32_to_cpu(rx_desc->wb.lower.hi_dword.rss), ++ (IXGBE_RSS_L4_TYPES_MASK & (1ul << rss_type)) ? ++ PKT_HASH_TYPE_L4 : PKT_HASH_TYPE_L3); + } ++#endif /* NETIF_F_RXHASH */ + +-#ifdef IXGBE_FCOE ++#if IS_ENABLED(CONFIG_FCOE) + /** + * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type + * @ring: structure containing ring specific data +@@ -1371,8 +1104,8 @@ static inline bool ixgbe_rx_is_fcoe(struct ixgbe_ring *ring, + (cpu_to_le16(IXGBE_ETQF_FILTER_FCOE << + IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT))); + } ++#endif /* CONFIG_FCOE */ + +-#endif /* IXGBE_FCOE */ + /** + * ixgbe_rx_checksum - indicate in skb if hw indicated a good cksum + * @ring: structure containing ring specific data +@@ -1383,12 +1116,24 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) + { ++ __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; ++ bool encap_pkt = false; ++ + skb_checksum_none_assert(skb); + + /* Rx csum disabled */ +- if (!(ring->netdev->features & NETIF_F_RXCSUM)) ++ if (!(netdev_ring(ring)->features & NETIF_F_RXCSUM)) + return; + ++ /* check for VXLAN or Geneve packet type */ ++ if (pkt_info & cpu_to_le16(IXGBE_RXDADV_PKTTYPE_VXLAN)) { ++ encap_pkt = true; ++#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) ++ skb->encapsulation = 1; ++#endif /* HAVE_UDP_ENC_RX_OFFLOAD || HAVE_VXLAN_RX_OFFLOAD */ ++ skb->ip_summed = CHECKSUM_NONE; ++ } ++ + /* if IP and error */ + if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_IPCS) && + ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_IPE)) { +@@ -1400,7 +1145,6 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring, + return; + + if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_TCPE)) { +- __le16 pkt_info = rx_desc->wb.lower.lo_dword.hs_rss.pkt_info; + + /* + * 82599 errata, UDP frames with a 0 checksum can be marked as +@@ -1416,14 +1160,29 @@ static inline void ixgbe_rx_checksum(struct ixgbe_ring *ring, + + /* It must be a TCP or UDP packet with a valid checksum */ + skb->ip_summed = CHECKSUM_UNNECESSARY; ++ if (encap_pkt) { ++ if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_OUTERIPCS)) ++ return; ++ ++ if (ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_ERR_OUTERIPER)) { ++ skb->ip_summed = CHECKSUM_NONE; ++ return; ++ } ++#ifdef HAVE_SKBUFF_CSUM_LEVEL ++ /* If we checked the outer header let the stack know */ ++ skb->csum_level = 1; ++#endif /* HAVE_SKBUFF_CSUM_LEVEL */ ++ } + } + + static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val) + { + rx_ring->next_to_use = val; ++#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT + + /* update next to alloc since we have filled the ring */ + rx_ring->next_to_alloc = val; ++#endif + /* + * Force memory writes to complete before letting h/w + * know there are new descriptors to fetch. (Only +@@ -1431,33 +1190,79 @@ static inline void ixgbe_release_rx_desc(struct ixgbe_ring *rx_ring, u32 val) + * such as IA-64). + */ + wmb(); +- ixgbe_write_tail(rx_ring, val); ++ writel(val, rx_ring->tail); ++} ++ ++#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT ++static bool ixgbe_alloc_mapped_skb(struct ixgbe_ring *rx_ring, ++ struct ixgbe_rx_buffer *bi) ++{ ++ struct sk_buff *skb = bi->skb; ++ dma_addr_t dma = bi->dma; ++ ++ if (unlikely(dma)) ++ return true; ++ ++ if (likely(!skb)) { ++ skb = netdev_alloc_skb_ip_align(netdev_ring(rx_ring), ++ rx_ring->rx_buf_len); ++ if (unlikely(!skb)) { ++ rx_ring->rx_stats.alloc_rx_buff_failed++; ++ return false; ++ } ++ ++ bi->skb = skb; ++ ++ } ++ ++ dma = dma_map_single(rx_ring->dev, skb->data, ++ rx_ring->rx_buf_len, DMA_FROM_DEVICE); ++ ++ /* ++ * if mapping failed free memory back to system since ++ * there isn't much point in holding memory we can't use ++ */ ++ if (dma_mapping_error(rx_ring->dev, dma)) { ++ dev_kfree_skb_any(skb); ++ bi->skb = NULL; ++ ++ rx_ring->rx_stats.alloc_rx_buff_failed++; ++ return false; ++ } ++ ++ bi->dma = dma; ++ return true; ++} ++ ++#else /* !CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ ++static inline unsigned int ixgbe_rx_offset(struct ixgbe_ring *rx_ring) ++{ ++ return ring_uses_build_skb(rx_ring) ? IXGBE_SKB_PAD : 0; + } + + static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, + struct ixgbe_rx_buffer *bi) + { + struct page *page = bi->page; +- dma_addr_t dma = bi->dma; ++ dma_addr_t dma; + + /* since we are recycling buffers we should seldom need to alloc */ +- if (likely(dma)) ++ if (likely(page)) + return true; + + /* alloc new page for storage */ +- if (likely(!page)) { +- page = __skb_alloc_pages(GFP_ATOMIC | __GFP_COLD | __GFP_COMP, +- bi->skb, ixgbe_rx_pg_order(rx_ring)); +- if (unlikely(!page)) { +- rx_ring->rx_stats.alloc_rx_page_failed++; +- return false; +- } +- bi->page = page; ++ page = alloc_pages(GFP_ATOMIC | __GFP_COLD | __GFP_COMP, ++ ixgbe_rx_pg_order(rx_ring)); ++ if (unlikely(!page)) { ++ rx_ring->rx_stats.alloc_rx_page_failed++; ++ return false; + } + + /* map page for use */ +- dma = dma_map_page(rx_ring->dev, page, 0, +- ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); ++ dma = dma_map_page_attrs(rx_ring->dev, page, 0, ++ ixgbe_rx_pg_size(rx_ring), ++ DMA_FROM_DEVICE, ++ IXGBE_RX_DMA_ATTR); + + /* + * if mapping failed free memory back to system since +@@ -1465,18 +1270,20 @@ static bool ixgbe_alloc_mapped_page(struct ixgbe_ring *rx_ring, + */ + if (dma_mapping_error(rx_ring->dev, dma)) { + __free_pages(page, ixgbe_rx_pg_order(rx_ring)); +- bi->page = NULL; + + rx_ring->rx_stats.alloc_rx_page_failed++; + return false; + } + + bi->dma = dma; +- bi->page_offset = 0; ++ bi->page = page; ++ bi->page_offset = ixgbe_rx_offset(rx_ring); ++ bi->pagecnt_bias = 1; + + return true; + } + ++#endif /* !CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ + /** + * ixgbe_alloc_rx_buffers - Replace used receive buffers + * @rx_ring: ring to place buffers on +@@ -1487,6 +1294,9 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) + union ixgbe_adv_rx_desc *rx_desc; + struct ixgbe_rx_buffer *bi; + u16 i = rx_ring->next_to_use; ++#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT ++ u16 bufsz; ++#endif + + /* nothing to do */ + if (!cleaned_count) +@@ -1495,16 +1305,34 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) + rx_desc = IXGBE_RX_DESC(rx_ring, i); + bi = &rx_ring->rx_buffer_info[i]; + i -= rx_ring->count; ++#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT ++ ++ bufsz = ixgbe_rx_bufsz(rx_ring); ++#endif + + do { ++#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT ++ if (!ixgbe_alloc_mapped_skb(rx_ring, bi)) ++ break; ++#else + if (!ixgbe_alloc_mapped_page(rx_ring, bi)) + break; + ++ /* sync the buffer for use by the device */ ++ dma_sync_single_range_for_device(rx_ring->dev, bi->dma, ++ bi->page_offset, bufsz, ++ DMA_FROM_DEVICE); ++#endif ++ + /* + * Refresh the desc even if buffer_addrs didn't change + * because each write-back erases this info. + */ ++#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT ++ rx_desc->read.pkt_addr = cpu_to_le64(bi->dma); ++#else + rx_desc->read.pkt_addr = cpu_to_le64(bi->dma + bi->page_offset); ++#endif + + rx_desc++; + bi++; +@@ -1515,8 +1343,8 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) + i -= rx_ring->count; + } + +- /* clear the hdr_addr for the next_to_use descriptor */ +- rx_desc->read.hdr_addr = 0; ++ /* clear the length for the next_to_use descriptor */ ++ rx_desc->wb.upper.length = 0; + + cleaned_count--; + } while (cleaned_count); +@@ -1527,124 +1355,117 @@ void ixgbe_alloc_rx_buffers(struct ixgbe_ring *rx_ring, u16 cleaned_count) + ixgbe_release_rx_desc(rx_ring, i); + } + ++#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT + /** +- * ixgbe_get_headlen - determine size of header for RSC/LRO/GRO/FCOE +- * @data: pointer to the start of the headers +- * @max_len: total length of section to find headers in ++ * ixgbe_merge_active_tail - merge active tail into lro skb ++ * @tail: pointer to active tail in frag_list + * +- * This function is meant to determine the length of headers that will +- * be recognized by hardware for LRO, GRO, and RSC offloads. The main +- * motivation of doing this is to only perform one pull for IPv4 TCP +- * packets so that we can do basic things like calculating the gso_size +- * based on the average data per packet. ++ * This function merges the length and data of an active tail into the ++ * skb containing the frag_list. It resets the tail's pointer to the head, ++ * but it leaves the heads pointer to tail intact. + **/ +-static unsigned int ixgbe_get_headlen(unsigned char *data, +- unsigned int max_len) ++static inline struct sk_buff *ixgbe_merge_active_tail(struct sk_buff *tail) + { +- union { +- unsigned char *network; +- /* l2 headers */ +- struct ethhdr *eth; +- struct vlan_hdr *vlan; +- /* l3 headers */ +- struct iphdr *ipv4; +- struct ipv6hdr *ipv6; +- } hdr; +- __be16 protocol; +- u8 nexthdr = 0; /* default to not TCP */ +- u8 hlen; +- +- /* this should never happen, but better safe than sorry */ +- if (max_len < ETH_HLEN) +- return max_len; +- +- /* initialize network frame pointer */ +- hdr.network = data; ++ struct sk_buff *head = IXGBE_CB(tail)->head; + +- /* set first protocol and move network header forward */ +- protocol = hdr.eth->h_proto; +- hdr.network += ETH_HLEN; ++ if (!head) ++ return tail; + +- /* handle any vlan tag if present */ +- if (protocol == htons(ETH_P_8021Q)) { +- if ((hdr.network - data) > (max_len - VLAN_HLEN)) +- return max_len; ++ head->len += tail->len; ++ head->data_len += tail->len; ++ head->truesize += tail->truesize; + +- protocol = hdr.vlan->h_vlan_encapsulated_proto; +- hdr.network += VLAN_HLEN; +- } +- +- /* handle L3 protocols */ +- if (protocol == htons(ETH_P_IP)) { +- if ((hdr.network - data) > (max_len - sizeof(struct iphdr))) +- return max_len; +- +- /* access ihl as a u8 to avoid unaligned access on ia64 */ +- hlen = (hdr.network[0] & 0x0F) << 2; ++ IXGBE_CB(tail)->head = NULL; + +- /* verify hlen meets minimum size requirements */ +- if (hlen < sizeof(struct iphdr)) +- return hdr.network - data; ++ return head; ++} + +- /* record next protocol if header is present */ +- if (!(hdr.ipv4->frag_off & htons(IP_OFFSET))) +- nexthdr = hdr.ipv4->protocol; +- } else if (protocol == htons(ETH_P_IPV6)) { +- if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) +- return max_len; ++/** ++ * ixgbe_add_active_tail - adds an active tail into the skb frag_list ++ * @head: pointer to the start of the skb ++ * @tail: pointer to active tail to add to frag_list ++ * ++ * This function adds an active tail to the end of the frag list. This tail ++ * will still be receiving data so we cannot yet ad it's stats to the main ++ * skb. That is done via ixgbe_merge_active_tail. ++ **/ ++static inline void ixgbe_add_active_tail(struct sk_buff *head, ++ struct sk_buff *tail) ++{ ++ struct sk_buff *old_tail = IXGBE_CB(head)->tail; + +- /* record next protocol */ +- nexthdr = hdr.ipv6->nexthdr; +- hlen = sizeof(struct ipv6hdr); +-#ifdef IXGBE_FCOE +- } else if (protocol == htons(ETH_P_FCOE)) { +- if ((hdr.network - data) > (max_len - FCOE_HEADER_LEN)) +- return max_len; +- hlen = FCOE_HEADER_LEN; +-#endif ++ if (old_tail) { ++ ixgbe_merge_active_tail(old_tail); ++ old_tail->next = tail; + } else { +- return hdr.network - data; ++ skb_shinfo(head)->frag_list = tail; + } + +- /* relocate pointer to start of L4 header */ +- hdr.network += hlen; ++ IXGBE_CB(tail)->head = head; ++ IXGBE_CB(head)->tail = tail; ++} ++ ++/** ++ * ixgbe_close_active_frag_list - cleanup pointers on a frag_list skb ++ * @head: pointer to head of an active frag list ++ * ++ * This function will clear the frag_tail_tracker pointer on an active ++ * frag_list and returns true if the pointer was actually set ++ **/ ++static inline bool ixgbe_close_active_frag_list(struct sk_buff *head) ++{ ++ struct sk_buff *tail = IXGBE_CB(head)->tail; ++ ++ if (!tail) ++ return false; + +- /* finally sort out TCP/UDP */ +- if (nexthdr == IPPROTO_TCP) { +- if ((hdr.network - data) > (max_len - sizeof(struct tcphdr))) +- return max_len; ++ ixgbe_merge_active_tail(tail); + +- /* access doff as a u8 to avoid unaligned access on ia64 */ +- hlen = (hdr.network[12] & 0xF0) >> 2; ++ IXGBE_CB(head)->tail = NULL; + +- /* verify hlen meets minimum size requirements */ +- if (hlen < sizeof(struct tcphdr)) +- return hdr.network - data; ++ return true; ++} + +- hdr.network += hlen; +- } else if (nexthdr == IPPROTO_UDP) { +- if ((hdr.network - data) > (max_len - sizeof(struct udphdr))) +- return max_len; ++#endif ++#ifdef HAVE_VLAN_RX_REGISTER ++/** ++ * ixgbe_receive_skb - Send a completed packet up the stack ++ * @q_vector: structure containing interrupt and ring information ++ * @skb: packet to send up ++ **/ ++static void ixgbe_receive_skb(struct ixgbe_q_vector *q_vector, ++ struct sk_buff *skb) ++{ ++ u16 vlan_tag = IXGBE_CB(skb)->vid; + +- hdr.network += sizeof(struct udphdr); ++#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) ++ if (vlan_tag & VLAN_VID_MASK) { ++ /* by placing vlgrp at start of structure we can alias it */ ++ struct vlan_group **vlgrp = netdev_priv(skb->dev); ++ if (!*vlgrp) ++ dev_kfree_skb_any(skb); ++ else if (q_vector->netpoll_rx) ++ vlan_hwaccel_rx(skb, *vlgrp, vlan_tag); ++ else ++ vlan_gro_receive(&q_vector->napi, ++ *vlgrp, vlan_tag, skb); ++ } else { ++#endif /* NETIF_F_HW_VLAN_TX || NETIF_F_HW_VLAN_CTAG_TX */ ++ if (q_vector->netpoll_rx) ++ netif_rx(skb); ++ else ++ napi_gro_receive(&q_vector->napi, skb); ++#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) + } +- +- /* +- * If everything has gone correctly hdr.network should be the +- * data section of the packet and will be the end of the header. +- * If not then it probably represents the end of the last recognized +- * header. +- */ +- if ((hdr.network - data) < max_len) +- return hdr.network - data; +- else +- return max_len; ++#endif /* NETIF_F_HW_VLAN_TX || NETIF_F_HW_VLAN_CTAG_TX */ + } + +-static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring, ++#endif /* HAVE_VLAN_RX_REGISTER */ ++#ifdef NETIF_F_GSO ++static void ixgbe_set_rsc_gso_size(struct ixgbe_ring __maybe_unused *ring, + struct sk_buff *skb) + { +- u16 hdr_len = skb_headlen(skb); ++ u16 hdr_len = eth_get_headlen(skb->data, skb_headlen(skb)); + + /* set gso_size to avoid messing up TCP MSS */ + skb_shinfo(skb)->gso_size = DIV_ROUND_UP((skb->len - hdr_len), +@@ -1652,6 +1473,7 @@ static void ixgbe_set_rsc_gso_size(struct ixgbe_ring *ring, + skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4; + } + ++#endif /* NETIF_F_GSO */ + static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring, + struct sk_buff *skb) + { +@@ -1662,12 +1484,35 @@ static void ixgbe_update_rsc_stats(struct ixgbe_ring *rx_ring, + rx_ring->rx_stats.rsc_count += IXGBE_CB(skb)->append_cnt; + rx_ring->rx_stats.rsc_flush++; + ++#ifdef NETIF_F_GSO + ixgbe_set_rsc_gso_size(rx_ring, skb); + ++#endif + /* gso_size is computed using append_cnt so always clear it last */ + IXGBE_CB(skb)->append_cnt = 0; + } + ++static void ixgbe_rx_vlan(struct ixgbe_ring *ring, ++ union ixgbe_adv_rx_desc *rx_desc, ++ struct sk_buff *skb) ++{ ++#ifdef NETIF_F_HW_VLAN_CTAG_RX ++ if ((netdev_ring(ring)->features & NETIF_F_HW_VLAN_CTAG_RX) && ++#else ++ if ((netdev_ring(ring)->features & NETIF_F_HW_VLAN_RX) && ++#endif ++ ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) ++#ifndef HAVE_VLAN_RX_REGISTER ++ __vlan_hwaccel_put_tag(skb, ++ htons(ETH_P_8021Q), ++ le16_to_cpu(rx_desc->wb.upper.vlan)); ++#else ++ IXGBE_CB(skb)->vid = le16_to_cpu(rx_desc->wb.upper.vlan); ++ else ++ IXGBE_CB(skb)->vid = 0; ++#endif ++} ++ + /** + * ixgbe_process_skb_fields - Populate skb header fields from Rx descriptor + * @rx_ring: rx descriptor ring packet is being transacted on +@@ -1682,39 +1527,53 @@ static void ixgbe_process_skb_fields(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) + { +- struct net_device *dev = rx_ring->netdev; ++#ifdef HAVE_PTP_1588_CLOCK ++ u32 flags = rx_ring->q_vector->adapter->flags; + ++#endif + ixgbe_update_rsc_stats(rx_ring, skb); + ++#ifdef NETIF_F_RXHASH + ixgbe_rx_hash(rx_ring, rx_desc, skb); + ++#endif /* NETIF_F_RXHASH */ + ixgbe_rx_checksum(rx_ring, rx_desc, skb); ++#ifdef HAVE_PTP_1588_CLOCK ++ if (unlikely(flags & IXGBE_FLAG_RX_HWTSTAMP_ENABLED)) ++ ixgbe_ptp_rx_hwtstamp(rx_ring, rx_desc, skb); + +- if (unlikely(ixgbe_test_staterr(rx_desc, IXGBE_RXDADV_STAT_TS))) +- ixgbe_ptp_rx_hwtstamp(rx_ring->q_vector->adapter, skb); +- +- if ((dev->features & NETIF_F_HW_VLAN_CTAG_RX) && +- ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_VP)) { +- u16 vid = le16_to_cpu(rx_desc->wb.upper.vlan); +- __vlan_hwaccel_put_tag(skb, htons(ETH_P_8021Q), vid); +- } ++#endif ++ ixgbe_rx_vlan(rx_ring, rx_desc, skb); + +- skb_record_rx_queue(skb, rx_ring->queue_index); ++ skb_record_rx_queue(skb, ring_queue_index(rx_ring)); + +- skb->protocol = eth_type_trans(skb, dev); ++ skb->protocol = eth_type_trans(skb, netdev_ring(rx_ring)); + } + + static void ixgbe_rx_skb(struct ixgbe_q_vector *q_vector, ++ struct ixgbe_ring *rx_ring, ++ union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) + { +- struct ixgbe_adapter *adapter = q_vector->adapter; ++#ifdef HAVE_NDO_BUSY_POLL ++ skb_mark_napi_id(skb, &q_vector->napi); + +- if (ixgbe_qv_busy_polling(q_vector)) ++ if (ixgbe_qv_busy_polling(q_vector) || q_vector->netpoll_rx) { + netif_receive_skb(skb); +- else if (!(adapter->flags & IXGBE_FLAG_IN_NETPOLL)) ++ /* exit early if we busy polled */ ++ return; ++ } ++#endif ++ ++#ifdef HAVE_VLAN_RX_REGISTER ++ ixgbe_receive_skb(q_vector, skb); ++#else + napi_gro_receive(&q_vector->napi, skb); +- else +- netif_rx(skb); ++#endif ++#ifndef NETIF_F_GRO ++ ++ netdev_ring(rx_ring)->last_rx = jiffies; ++#endif + } + + /** +@@ -1732,6 +1591,9 @@ static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) + { ++#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT ++ struct sk_buff *next_skb; ++#endif + u32 ntc = rx_ring->next_to_clean + 1; + + /* fetch, update, and store next to clean */ +@@ -1763,15 +1625,22 @@ static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring, + return false; + + /* place skb in next buffer to be received */ ++#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT ++ next_skb = rx_ring->rx_buffer_info[ntc].skb; ++ ++ ixgbe_add_active_tail(skb, next_skb); ++ IXGBE_CB(next_skb)->head = skb; ++#else + rx_ring->rx_buffer_info[ntc].skb = skb; ++#endif + rx_ring->rx_stats.non_eop_descs++; + + return true; + } + ++#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT + /** + * ixgbe_pull_tail - ixgbe specific version of skb_pull_tail +- * @rx_ring: rx descriptor ring packet is being transacted on + * @skb: pointer to current skb being adjusted + * + * This function is an ixgbe specific version of __pskb_pull_tail. The +@@ -1781,8 +1650,7 @@ static bool ixgbe_is_non_eop(struct ixgbe_ring *rx_ring, + * As a result we can do things like drop a frag and maintain an accurate + * truesize for the skb. + */ +-static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring, +- struct sk_buff *skb) ++static void ixgbe_pull_tail(struct sk_buff *skb) + { + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + unsigned char *va; +@@ -1799,7 +1667,7 @@ static void ixgbe_pull_tail(struct ixgbe_ring *rx_ring, + * we need the header to contain the greater of either ETH_HLEN or + * 60 bytes if the skb->len is less than 60 for skb_pad. + */ +- pull_len = ixgbe_get_headlen(va, IXGBE_RX_HDR_SIZE); ++ pull_len = eth_get_headlen(va, IXGBE_RX_HDR_SIZE); + + /* align pull length to size of long to optimize memcpy performance */ + skb_copy_to_linear_data(skb, va, ALIGN(pull_len, sizeof(long))); +@@ -1826,19 +1694,19 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, + { + /* if the page was released unmap it, else just sync our portion */ + if (unlikely(IXGBE_CB(skb)->page_released)) { +- dma_unmap_page(rx_ring->dev, IXGBE_CB(skb)->dma, +- ixgbe_rx_pg_size(rx_ring), DMA_FROM_DEVICE); +- IXGBE_CB(skb)->page_released = false; ++ dma_unmap_page_attrs(rx_ring->dev, IXGBE_CB(skb)->dma, ++ ixgbe_rx_pg_size(rx_ring), ++ DMA_FROM_DEVICE, ++ IXGBE_RX_DMA_ATTR); + } else { + struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0]; + + dma_sync_single_range_for_cpu(rx_ring->dev, + IXGBE_CB(skb)->dma, + frag->page_offset, +- ixgbe_rx_bufsz(rx_ring), ++ skb_frag_size(frag), + DMA_FROM_DEVICE); + } +- IXGBE_CB(skb)->dma = 0; + } + + /** +@@ -1859,38 +1727,30 @@ static void ixgbe_dma_sync_frag(struct ixgbe_ring *rx_ring, + * + * Returns true if an error was encountered and skb was freed. + **/ +-static bool ixgbe_cleanup_headers(struct ixgbe_ring *rx_ring, ++static bool ixgbe_cleanup_headers(struct ixgbe_ring __maybe_unused *rx_ring, + union ixgbe_adv_rx_desc *rx_desc, + struct sk_buff *skb) + { +- struct net_device *netdev = rx_ring->netdev; +- + /* verify that the packet does not have any known errors */ + if (unlikely(ixgbe_test_staterr(rx_desc, +- IXGBE_RXDADV_ERR_FRAME_ERR_MASK) && +- !(netdev->features & NETIF_F_RXALL))) { ++ IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) { + dev_kfree_skb_any(skb); + return true; + } + + /* place header in linear portion of buffer */ +- if (skb_is_nonlinear(skb)) +- ixgbe_pull_tail(rx_ring, skb); ++ if (!skb_headlen(skb)) ++ ixgbe_pull_tail(skb); + +-#ifdef IXGBE_FCOE ++#if IS_ENABLED(CONFIG_FCOE) + /* do not attempt to pad FCoE Frames as this will disrupt DDP */ + if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) + return false; +- + #endif +- /* if skb_pad returns an error the skb was freed */ +- if (unlikely(skb->len < 60)) { +- int pad_len = 60 - skb->len; + +- if (skb_pad(skb, pad_len)) +- return true; +- __skb_put(skb, pad_len); +- } ++ /* if eth_skb_pad returns an error the skb was freed */ ++ if (eth_skb_pad(skb)) ++ return true; + + return false; + } +@@ -1914,16 +1774,70 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, + nta++; + rx_ring->next_to_alloc = (nta < rx_ring->count) ? nta : 0; + +- /* transfer page from old buffer to new buffer */ +- new_buff->page = old_buff->page; +- new_buff->dma = old_buff->dma; +- new_buff->page_offset = old_buff->page_offset; ++ /* Transfer page from old buffer to new buffer. ++ * Move each member individually to avoid possible store ++ * forwarding stalls and unnecessary copy of skb. ++ */ ++ new_buff->dma = old_buff->dma; ++ new_buff->page = old_buff->page; ++ new_buff->page_offset = old_buff->page_offset; ++ new_buff->pagecnt_bias = old_buff->pagecnt_bias; ++} + +- /* sync the buffer for use by the device */ +- dma_sync_single_range_for_device(rx_ring->dev, new_buff->dma, +- new_buff->page_offset, +- ixgbe_rx_bufsz(rx_ring), +- DMA_FROM_DEVICE); ++static inline bool ixgbe_page_is_reserved(struct page *page) ++{ ++ return (page_to_nid(page) != numa_mem_id()) || page_is_pfmemalloc(page); ++} ++ ++static bool ixgbe_can_reuse_rx_page(struct ixgbe_rx_buffer *rx_buffer) ++{ ++ unsigned int pagecnt_bias = rx_buffer->pagecnt_bias; ++ struct page *page = rx_buffer->page; ++ ++ /* avoid re-using remote pages */ ++ if (unlikely(ixgbe_page_is_reserved(page))) ++ return false; ++ ++#if (PAGE_SIZE < 8192) ++ /* if we are only owner of page we can reuse it */ ++#ifdef HAVE_PAGE_COUNT_BULK_UPDATE ++ if (unlikely((page_ref_count(page) - pagecnt_bias) > 1)) ++#else ++ if (unlikely((page_count(page) - pagecnt_bias) > 1)) ++#endif ++ return false; ++#else ++ /* The last offset is a bit aggressive in that we assume the ++ * worst case of FCoE being enabled and using a 3K buffer. ++ * However this should have minimal impact as the 1K extra is ++ * still less than one buffer in size. ++ */ ++#define IXGBE_LAST_OFFSET \ ++ (SKB_WITH_OVERHEAD(PAGE_SIZE) - IXGBE_RXBUFFER_3K) ++ if (rx_buffer->page_offset > IXGBE_LAST_OFFSET) ++ return false; ++#endif ++ ++#ifdef HAVE_PAGE_COUNT_BULK_UPDATE ++ /* If we have drained the page fragment pool we need to update ++ * the pagecnt_bias and page count so that we fully restock the ++ * number of references the driver holds. ++ */ ++ if (unlikely(!pagecnt_bias)) { ++ page_ref_add(page, USHRT_MAX); ++ rx_buffer->pagecnt_bias = USHRT_MAX; ++ } ++#else ++ /* Even if we own the page, we are not allowed to use atomic_set() ++ * This would break get_page_unless_zero() users. ++ */ ++ if (likely(!pagecnt_bias)) { ++ page_ref_inc(page); ++ rx_buffer->pagecnt_bias = 1; ++ } ++#endif ++ ++ return true; + } + + /** +@@ -1941,156 +1855,183 @@ static void ixgbe_reuse_rx_page(struct ixgbe_ring *rx_ring, + * The function will then update the page offset if necessary and return + * true if the buffer can be reused by the adapter. + **/ +-static bool ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, ++static void ixgbe_add_rx_frag(struct ixgbe_ring *rx_ring, + struct ixgbe_rx_buffer *rx_buffer, +- union ixgbe_adv_rx_desc *rx_desc, +- struct sk_buff *skb) ++ struct sk_buff *skb, ++ unsigned int size) + { +- struct page *page = rx_buffer->page; +- unsigned int size = le16_to_cpu(rx_desc->wb.upper.length); + #if (PAGE_SIZE < 8192) +- unsigned int truesize = ixgbe_rx_bufsz(rx_ring); ++ unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; + #else +- unsigned int truesize = ALIGN(size, L1_CACHE_BYTES); +- unsigned int last_offset = ixgbe_rx_pg_size(rx_ring) - +- ixgbe_rx_bufsz(rx_ring); ++ unsigned int truesize = ring_uses_build_skb(rx_ring) ? ++ SKB_DATA_ALIGN(IXGBE_SKB_PAD + size) : ++ SKB_DATA_ALIGN(size); + #endif + +- if ((size <= IXGBE_RX_HDR_SIZE) && !skb_is_nonlinear(skb)) { +- unsigned char *va = page_address(page) + rx_buffer->page_offset; +- +- memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); +- +- /* we can reuse buffer as-is, just make sure it is local */ +- if (likely(page_to_nid(page) == numa_node_id())) +- return true; +- +- /* this page cannot be reused so discard it */ +- put_page(page); +- return false; +- } +- +- skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, page, ++ skb_add_rx_frag(skb, skb_shinfo(skb)->nr_frags, rx_buffer->page, + rx_buffer->page_offset, size, truesize); + +- /* avoid re-using remote pages */ +- if (unlikely(page_to_nid(page) != numa_node_id())) +- return false; +- + #if (PAGE_SIZE < 8192) +- /* if we are only owner of page we can reuse it */ +- if (unlikely(page_count(page) != 1)) +- return false; +- +- /* flip page offset to other buffer */ + rx_buffer->page_offset ^= truesize; +- +- /* +- * since we are the only owner of the page and we need to +- * increment it, just set the value to 2 in order to avoid +- * an unecessary locked operation +- */ +- atomic_set(&page->_count, 2); + #else +- /* move offset up to the next cache line */ + rx_buffer->page_offset += truesize; +- +- if (rx_buffer->page_offset > last_offset) +- return false; +- +- /* bump ref count on page before it is given to the stack */ +- get_page(page); + #endif +- +- return true; + } + +-static struct sk_buff *ixgbe_fetch_rx_buffer(struct ixgbe_ring *rx_ring, +- union ixgbe_adv_rx_desc *rx_desc) ++static struct ixgbe_rx_buffer * ++ixgbe_get_rx_buffer(struct ixgbe_ring *rx_ring, ++ union ixgbe_adv_rx_desc *rx_desc, struct sk_buff **skb, ++ const unsigned int size) + { + struct ixgbe_rx_buffer *rx_buffer; +- struct sk_buff *skb; +- struct page *page; + + rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean]; +- page = rx_buffer->page; +- prefetchw(page); ++ prefetchw(rx_buffer->page); ++ *skb = rx_buffer->skb; + +- skb = rx_buffer->skb; ++ /* Delay unmapping of the first packet. It carries the header ++ * information, HW may still access the header after the writeback. ++ * Only unmap it when EOP is reached ++ */ ++ if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) { ++ if (!*skb) ++ goto skip_sync; ++ } else { ++ if (*skb) ++ ixgbe_dma_sync_frag(rx_ring, *skb); ++ } + +- if (likely(!skb)) { +- void *page_addr = page_address(page) + +- rx_buffer->page_offset; ++ /* we are reusing so sync this buffer for CPU use */ ++ dma_sync_single_range_for_cpu(rx_ring->dev, ++ rx_buffer->dma, ++ rx_buffer->page_offset, ++ size, ++ DMA_FROM_DEVICE); ++skip_sync: ++ rx_buffer->pagecnt_bias--; + +- /* prefetch first cache line of first page */ +- prefetch(page_addr); +-#if L1_CACHE_BYTES < 128 +- prefetch(page_addr + L1_CACHE_BYTES); +-#endif ++ return rx_buffer; ++} + +- /* allocate a skb to store the frags */ +- skb = netdev_alloc_skb_ip_align(rx_ring->netdev, +- IXGBE_RX_HDR_SIZE); +- if (unlikely(!skb)) { +- rx_ring->rx_stats.alloc_rx_buff_failed++; +- return NULL; ++static void ixgbe_put_rx_buffer(struct ixgbe_ring *rx_ring, ++ struct ixgbe_rx_buffer *rx_buffer, ++ struct sk_buff *skb) ++{ ++ if (ixgbe_can_reuse_rx_page(rx_buffer)) { ++ /* hand second half of page back to the ring */ ++ ixgbe_reuse_rx_page(rx_ring, rx_buffer); ++ } else { ++ if (IXGBE_CB(skb)->dma == rx_buffer->dma) { ++ /* the page has been released from the ring */ ++ IXGBE_CB(skb)->page_released = true; ++ } else { ++ /* we are not reusing the buffer so unmap it */ ++ dma_unmap_page_attrs(rx_ring->dev, rx_buffer->dma, ++ ixgbe_rx_pg_size(rx_ring), ++ DMA_FROM_DEVICE, ++ IXGBE_RX_DMA_ATTR); + } ++ __page_frag_cache_drain(rx_buffer->page, ++ rx_buffer->pagecnt_bias); ++ } + +- /* +- * we will be copying header into skb->data in +- * pskb_may_pull so it is in our interest to prefetch +- * it now to avoid a possible cache miss +- */ +- prefetchw(skb->data); ++ /* clear contents of rx_buffer */ ++ rx_buffer->page = NULL; ++ rx_buffer->skb = NULL; ++} + +- /* +- * Delay unmapping of the first packet. It carries the +- * header information, HW may still access the header +- * after the writeback. Only unmap it when EOP is +- * reached +- */ +- if (likely(ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP))) +- goto dma_sync; ++static struct sk_buff *ixgbe_construct_skb(struct ixgbe_ring *rx_ring, ++ struct ixgbe_rx_buffer *rx_buffer, ++ union ixgbe_adv_rx_desc *rx_desc, ++ unsigned int size) ++{ ++ void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; ++#if (PAGE_SIZE < 8192) ++ unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; ++#else ++ unsigned int truesize = SKB_DATA_ALIGN(size); ++#endif ++ struct sk_buff *skb; + +- IXGBE_CB(skb)->dma = rx_buffer->dma; +- } else { +- if (ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) +- ixgbe_dma_sync_frag(rx_ring, skb); ++ /* prefetch first cache line of first page */ ++ prefetch(va); ++#if L1_CACHE_BYTES < 128 ++ prefetch(va + L1_CACHE_BYTES); ++#endif + +-dma_sync: +- /* we are reusing so sync this buffer for CPU use */ +- dma_sync_single_range_for_cpu(rx_ring->dev, +- rx_buffer->dma, +- rx_buffer->page_offset, +- ixgbe_rx_bufsz(rx_ring), +- DMA_FROM_DEVICE); +- } ++ /* allocate a skb to store the frags */ ++ skb = napi_alloc_skb(&rx_ring->q_vector->napi, IXGBE_RX_HDR_SIZE); ++ if (unlikely(!skb)) ++ return NULL; + +- /* pull page into skb */ +- if (ixgbe_add_rx_frag(rx_ring, rx_buffer, rx_desc, skb)) { +- /* hand second half of page back to the ring */ +- ixgbe_reuse_rx_page(rx_ring, rx_buffer); +- } else if (IXGBE_CB(skb)->dma == rx_buffer->dma) { +- /* the page has been released from the ring */ +- IXGBE_CB(skb)->page_released = true; ++ if (size > IXGBE_RX_HDR_SIZE) { ++ if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) ++ IXGBE_CB(skb)->dma = rx_buffer->dma; ++ ++ skb_add_rx_frag(skb, 0, rx_buffer->page, ++ rx_buffer->page_offset, ++ size, truesize); ++#if (PAGE_SIZE < 8192) ++ rx_buffer->page_offset ^= truesize; ++#else ++ rx_buffer->page_offset += truesize; ++#endif + } else { +- /* we are not reusing the buffer so unmap it */ +- dma_unmap_page(rx_ring->dev, rx_buffer->dma, +- ixgbe_rx_pg_size(rx_ring), +- DMA_FROM_DEVICE); ++ memcpy(__skb_put(skb, size), va, ALIGN(size, sizeof(long))); ++ rx_buffer->pagecnt_bias++; + } + +- /* clear contents of buffer_info */ +- rx_buffer->skb = NULL; +- rx_buffer->dma = 0; +- rx_buffer->page = NULL; +- + return skb; + } + +-/** +- * ixgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf +- * @q_vector: structure containing interrupt and ring information ++#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC ++static struct sk_buff *ixgbe_build_skb(struct ixgbe_ring *rx_ring, ++ struct ixgbe_rx_buffer *rx_buffer, ++ union ixgbe_adv_rx_desc *rx_desc, ++ unsigned int size) ++{ ++ void *va = page_address(rx_buffer->page) + rx_buffer->page_offset; ++#if (PAGE_SIZE < 8192) ++ unsigned int truesize = ixgbe_rx_pg_size(rx_ring) / 2; ++#else ++ unsigned int truesize = SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + ++ SKB_DATA_ALIGN(IXGBE_SKB_PAD + size); ++#endif ++ struct sk_buff *skb; ++ ++ /* prefetch first cache line of first page */ ++ prefetch(va); ++#if L1_CACHE_BYTES < 128 ++ prefetch(va + L1_CACHE_BYTES); ++#endif ++ ++ /* build an skb around the page buffer */ ++ skb = build_skb(va - IXGBE_SKB_PAD, truesize); ++ if (unlikely(!skb)) ++ return NULL; ++ ++ /* update pointers within the skb to store the data */ ++ skb_reserve(skb, IXGBE_SKB_PAD); ++ __skb_put(skb, size); ++ ++ /* record DMA address if this is the start of a chain of buffers */ ++ if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_EOP)) ++ IXGBE_CB(skb)->dma = rx_buffer->dma; ++ ++ /* update buffer offset */ ++#if (PAGE_SIZE < 8192) ++ rx_buffer->page_offset ^= truesize; ++#else ++ rx_buffer->page_offset += truesize; ++#endif ++ ++ return skb; ++} ++ ++#endif /* HAVE_SWIOTLB_SKIP_CPU_SYNC */ ++/** ++ * ixgbe_clean_rx_irq - Clean completed descriptors from Rx ring - bounce buf ++ * @q_vector: structure containing interrupt and ring information + * @rx_ring: rx descriptor ring to transact packets on + * @budget: Total limit on number of packets to process + * +@@ -2099,23 +2040,24 @@ dma_sync: + * expensive overhead for IOMMU access this provides a means of avoiding + * it by maintaining the mapping of the page to the syste. + * +- * Returns amount of work completed ++ * Returns amount of work completed. + **/ + static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, + struct ixgbe_ring *rx_ring, +- const int budget) ++ int budget) + { + unsigned int total_rx_bytes = 0, total_rx_packets = 0; +-#ifdef IXGBE_FCOE +- struct ixgbe_adapter *adapter = q_vector->adapter; ++#if IS_ENABLED(CONFIG_FCOE) + int ddp_bytes; + unsigned int mss = 0; +-#endif /* IXGBE_FCOE */ ++#endif /* CONFIG_FCOE */ + u16 cleaned_count = ixgbe_desc_unused(rx_ring); + + while (likely(total_rx_packets < budget)) { + union ixgbe_adv_rx_desc *rx_desc; ++ struct ixgbe_rx_buffer *rx_buffer; + struct sk_buff *skb; ++ unsigned int size; + + /* return some buffers to hardware, one at a time is too slow */ + if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { +@@ -2124,24 +2066,38 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, + } + + rx_desc = IXGBE_RX_DESC(rx_ring, rx_ring->next_to_clean); +- +- if (!ixgbe_test_staterr(rx_desc, IXGBE_RXD_STAT_DD)) ++ size = le16_to_cpu(rx_desc->wb.upper.length); ++ if (!size) + break; + +- /* +- * This memory barrier is needed to keep us from reading ++ /* This memory barrier is needed to keep us from reading + * any other fields out of the rx_desc until we know the +- * RXD_STAT_DD bit is set ++ * descriptor has been written back + */ +- rmb(); ++ dma_rmb(); ++ ++ rx_buffer = ixgbe_get_rx_buffer(rx_ring, rx_desc, &skb, size); + + /* retrieve a buffer from the ring */ +- skb = ixgbe_fetch_rx_buffer(rx_ring, rx_desc); ++ if (skb) ++ ixgbe_add_rx_frag(rx_ring, rx_buffer, skb, size); ++#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC ++ else if (ring_uses_build_skb(rx_ring)) ++ skb = ixgbe_build_skb(rx_ring, rx_buffer, ++ rx_desc, size); ++#endif ++ else ++ skb = ixgbe_construct_skb(rx_ring, rx_buffer, rx_desc, ++ size); + + /* exit if we failed to retrieve a buffer */ +- if (!skb) ++ if (!skb) { ++ rx_ring->rx_stats.alloc_rx_buff_failed++; ++ rx_buffer->pagecnt_bias++; + break; ++ } + ++ ixgbe_put_rx_buffer(rx_ring, rx_buffer, skb); + cleaned_count++; + + /* place incomplete frames back on ring for completion */ +@@ -2158,14 +2114,15 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, + /* populate checksum, timestamp, VLAN, and protocol */ + ixgbe_process_skb_fields(rx_ring, rx_desc, skb); + +-#ifdef IXGBE_FCOE ++#if IS_ENABLED(CONFIG_FCOE) + /* if ddp, not passing to ULD unless for FCP_RSP or error */ + if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) { +- ddp_bytes = ixgbe_fcoe_ddp(adapter, rx_desc, skb); ++ ddp_bytes = ixgbe_fcoe_ddp(q_vector->adapter, ++ rx_desc, skb); + /* include DDPed FCoE data */ + if (ddp_bytes > 0) { + if (!mss) { +- mss = rx_ring->netdev->mtu - ++ mss = netdev_ring(rx_ring)->mtu - + sizeof(struct fcoe_hdr) - + sizeof(struct fc_frame_header) - + sizeof(struct fcoe_crc_eof); +@@ -2178,13 +2135,15 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, + } + if (!ddp_bytes) { + dev_kfree_skb_any(skb); ++#ifndef NETIF_F_GRO ++ netdev_ring(rx_ring)->last_rx = jiffies; ++#endif + continue; + } + } ++#endif /* CONFIG_FCOE */ + +-#endif /* IXGBE_FCOE */ +- skb_mark_napi_id(skb, &q_vector->napi); +- ixgbe_rx_skb(q_vector, skb); ++ ixgbe_rx_skb(q_vector, rx_ring, rx_desc, skb); + + /* update budget accounting */ + total_rx_packets++; +@@ -2197,15 +2156,168 @@ static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, + q_vector->rx.total_packets += total_rx_packets; + q_vector->rx.total_bytes += total_rx_bytes; + ++ return total_rx_packets; ++} ++ ++#else /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ ++/** ++ * ixgbe_clean_rx_irq - Clean completed descriptors from Rx ring - legacy ++ * @q_vector: structure containing interrupt and ring information ++ * @rx_ring: rx descriptor ring to transact packets on ++ * @budget: Total limit on number of packets to process ++ * ++ * This function provides a legacy approach to Rx interrupt ++ * handling. This version will perform better on systems with a low cost ++ * dma mapping API. ++ * ++ * Returns amount of work completed. ++ **/ ++static int ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector, ++ struct ixgbe_ring *rx_ring, ++ int budget) ++{ ++ unsigned int total_rx_bytes = 0, total_rx_packets = 0; ++#if IS_ENABLED(CONFIG_FCOE) ++ int ddp_bytes; ++ unsigned int mss = 0; ++#endif /* CONFIG_FCOE */ ++ u16 len = 0; ++ u16 cleaned_count = ixgbe_desc_unused(rx_ring); ++ ++ while (likely(total_rx_packets < budget)) { ++ struct ixgbe_rx_buffer *rx_buffer; ++ union ixgbe_adv_rx_desc *rx_desc; ++ struct sk_buff *skb; ++ u16 ntc; ++ ++ /* return some buffers to hardware, one at a time is too slow */ ++ if (cleaned_count >= IXGBE_RX_BUFFER_WRITE) { ++ ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); ++ cleaned_count = 0; ++ } ++ ++ ntc = rx_ring->next_to_clean; ++ rx_desc = IXGBE_RX_DESC(rx_ring, ntc); ++ rx_buffer = &rx_ring->rx_buffer_info[ntc]; ++ ++ if (!rx_desc->wb.upper.length) ++ break; ++ ++ /* This memory barrier is needed to keep us from reading ++ * any other fields out of the rx_desc until we know the ++ * descriptor has been written back ++ */ ++ dma_rmb(); ++ ++ skb = rx_buffer->skb; ++ ++ prefetch(skb->data); ++ ++ len = le16_to_cpu(rx_desc->wb.upper.length); ++ /* pull the header of the skb in */ ++ __skb_put(skb, len); ++ ++ /* ++ * Delay unmapping of the first packet. It carries the ++ * header information, HW may still access the header after ++ * the writeback. Only unmap it when EOP is reached ++ */ ++ if (!IXGBE_CB(skb)->head) { ++ IXGBE_CB(skb)->dma = rx_buffer->dma; ++ } else { ++ skb = ixgbe_merge_active_tail(skb); ++ dma_unmap_single(rx_ring->dev, ++ rx_buffer->dma, ++ rx_ring->rx_buf_len, ++ DMA_FROM_DEVICE); ++ } ++ ++ /* clear skb reference in buffer info structure */ ++ rx_buffer->skb = NULL; ++ rx_buffer->dma = 0; ++ ++ cleaned_count++; ++ ++ if (ixgbe_is_non_eop(rx_ring, rx_desc, skb)) ++ continue; ++ ++ dma_unmap_single(rx_ring->dev, ++ IXGBE_CB(skb)->dma, ++ rx_ring->rx_buf_len, ++ DMA_FROM_DEVICE); ++ ++ IXGBE_CB(skb)->dma = 0; ++ ++ if (ixgbe_close_active_frag_list(skb) && ++ !IXGBE_CB(skb)->append_cnt) { ++ /* if we got here without RSC the packet is invalid */ ++ dev_kfree_skb_any(skb); ++ continue; ++ } ++ ++ /* ERR_MASK will only have valid bits if EOP set */ ++ if (unlikely(ixgbe_test_staterr(rx_desc, ++ IXGBE_RXDADV_ERR_FRAME_ERR_MASK))) { ++ dev_kfree_skb_any(skb); ++ continue; ++ } ++ ++ /* probably a little skewed due to removing CRC */ ++ total_rx_bytes += skb->len; ++ ++ /* populate checksum, timestamp, VLAN, and protocol */ ++ ixgbe_process_skb_fields(rx_ring, rx_desc, skb); ++ ++#if IS_ENABLED(CONFIG_FCOE) ++ /* if ddp, not passing to ULD unless for FCP_RSP or error */ ++ if (ixgbe_rx_is_fcoe(rx_ring, rx_desc)) { ++ ddp_bytes = ixgbe_fcoe_ddp(q_vector->adapter, ++ rx_desc, skb); ++ /* include DDPed FCoE data */ ++ if (ddp_bytes > 0) { ++ if (!mss) { ++ mss = netdev_ring(rx_ring)->mtu - ++ sizeof(struct fcoe_hdr) - ++ sizeof(struct fc_frame_header) - ++ sizeof(struct fcoe_crc_eof); ++ if (mss > 512) ++ mss &= ~511; ++ } ++ total_rx_bytes += ddp_bytes; ++ total_rx_packets += DIV_ROUND_UP(ddp_bytes, ++ mss); ++ } ++ if (!ddp_bytes) { ++ dev_kfree_skb_any(skb); ++#ifndef NETIF_F_GRO ++ netdev_ring(rx_ring)->last_rx = jiffies; ++#endif ++ continue; ++ } ++ } ++ ++#endif /* CONFIG_FCOE */ ++ ixgbe_rx_skb(q_vector, rx_ring, rx_desc, skb); ++ ++ /* update budget accounting */ ++ total_rx_packets++; ++ } ++ ++ rx_ring->stats.packets += total_rx_packets; ++ rx_ring->stats.bytes += total_rx_bytes; ++ q_vector->rx.total_packets += total_rx_packets; ++ q_vector->rx.total_bytes += total_rx_bytes; ++ + if (cleaned_count) + ixgbe_alloc_rx_buffers(rx_ring, cleaned_count); + + return total_rx_packets; + } + +-#ifdef CONFIG_NET_RX_BUSY_POLL ++#endif /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ ++#ifdef HAVE_NDO_BUSY_POLL + /* must be called with local_bh_disable()d */ +-static int ixgbe_low_latency_recv(struct napi_struct *napi) ++static int ixgbe_busy_poll_recv(struct napi_struct *napi) + { + struct ixgbe_q_vector *q_vector = + container_of(napi, struct ixgbe_q_vector, napi); +@@ -2235,8 +2347,8 @@ static int ixgbe_low_latency_recv(struct napi_struct *napi) + + return found; + } +-#endif /* CONFIG_NET_RX_BUSY_POLL */ + ++#endif /* HAVE_NDO_BUSY_POLL */ + /** + * ixgbe_configure_msix - Configure MSI-X hardware + * @adapter: board private structure +@@ -2246,12 +2358,11 @@ static int ixgbe_low_latency_recv(struct napi_struct *napi) + **/ + static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) + { +- struct ixgbe_q_vector *q_vector; + int v_idx; + u32 mask; + + /* Populate MSIX to EITR Select */ +- if (adapter->num_vfs > 32) { ++ if (adapter->num_vfs >= 32) { + u32 eitrsel = (1 << (adapter->num_vfs - 32)) - 1; + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EITRSEL, eitrsel); + } +@@ -2261,8 +2372,8 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) + * corresponding register. + */ + for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) { ++ struct ixgbe_q_vector *q_vector = adapter->q_vector[v_idx]; + struct ixgbe_ring *ring; +- q_vector = adapter->q_vector[v_idx]; + + ixgbe_for_each_ring(ring, q_vector->rx) + ixgbe_set_ivar(adapter, 0, ring->reg_idx, v_idx); +@@ -2280,6 +2391,9 @@ static void ixgbe_configure_msix(struct ixgbe_adapter *adapter) + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: + ixgbe_set_ivar(adapter, -1, 1, v_idx); + break; + default: +@@ -2333,29 +2447,31 @@ static void ixgbe_update_itr(struct ixgbe_q_vector *q_vector, + /* simple throttlerate management + * 0-10MB/s lowest (100000 ints/s) + * 10-20MB/s low (20000 ints/s) +- * 20-1249MB/s bulk (8000 ints/s) ++ * 20-1249MB/s bulk (12000 ints/s) + */ + /* what was last interrupt timeslice? */ + timepassed_us = q_vector->itr >> 2; + if (timepassed_us == 0) + return; +- + bytes_perint = bytes / timepassed_us; /* bytes/usec */ + + switch (itr_setting) { + case lowest_latency: +- if (bytes_perint > 10) ++ if (bytes_perint > 10) { + itr_setting = low_latency; ++ } + break; + case low_latency: +- if (bytes_perint > 20) ++ if (bytes_perint > 20) { + itr_setting = bulk_latency; +- else if (bytes_perint <= 10) ++ } else if (bytes_perint <= 10) { + itr_setting = lowest_latency; ++ } + break; + case bulk_latency: +- if (bytes_perint <= 20) ++ if (bytes_perint <= 20) { + itr_setting = low_latency; ++ } + break; + } + +@@ -2389,6 +2505,9 @@ void ixgbe_write_eitr(struct ixgbe_q_vector *q_vector) + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: + /* + * set the WDIS bit to not clear the timer bits and cause an + * immediate assertion of the interrupt +@@ -2420,7 +2539,7 @@ static void ixgbe_set_itr(struct ixgbe_q_vector *q_vector) + new_itr = IXGBE_20K_ITR; + break; + case bulk_latency: +- new_itr = IXGBE_8K_ITR; ++ new_itr = IXGBE_12K_ITR; + break; + default: + break; +@@ -2446,12 +2565,12 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) + { + struct ixgbe_hw *hw = &adapter->hw; + u32 eicr = adapter->interrupt_event; ++ s32 rc; + + if (test_bit(__IXGBE_DOWN, &adapter->state)) + return; + +- if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) && +- !(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT)) ++ if (!(adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_EVENT)) + return; + + adapter->flags2 &= ~IXGBE_FLAG2_TEMP_SENSOR_EVENT; +@@ -2484,15 +2603,20 @@ static void ixgbe_check_overtemp_subtask(struct ixgbe_adapter *adapter) + return; + + break; ++ case IXGBE_DEV_ID_X550EM_A_1G_T: ++ case IXGBE_DEV_ID_X550EM_A_1G_T_L: ++ rc = hw->phy.ops.check_overtemp(hw); ++ if (rc != IXGBE_ERR_OVERTEMP) ++ return; ++ break; + default: +- if (!(eicr & IXGBE_EICR_GPI_SDP0)) ++ if (adapter->hw.mac.type >= ixgbe_mac_X540) ++ return; ++ if (!(eicr & IXGBE_EICR_GPI_SDP0_BY_MAC(hw))) + return; + break; + } +- e_crit(drv, +- "Network adapter has been stopped because it has over heated. " +- "Restart the computer. If the problem persists, " +- "power off the system and replace the adapter\n"); ++ e_crit(drv, "%s\n", ixgbe_overheat_msg); + + adapter->interrupt_event = 0; + } +@@ -2528,6 +2652,18 @@ static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr) + return; + } + return; ++ case ixgbe_mac_X550EM_a: ++ if (eicr & IXGBE_EICR_GPI_SDP0_X550EM_a) { ++ adapter->interrupt_event = eicr; ++ adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_EVENT; ++ ixgbe_service_event_schedule(adapter); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, ++ IXGBE_EICR_GPI_SDP0_X550EM_a); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_EICR, ++ IXGBE_EICR_GPI_SDP0_X550EM_a); ++ } ++ return; ++ case ixgbe_mac_X550: + case ixgbe_mac_X540: + if (!(eicr & IXGBE_EICR_TS)) + return; +@@ -2536,28 +2672,33 @@ static void ixgbe_check_overtemp_event(struct ixgbe_adapter *adapter, u32 eicr) + return; + } + +- e_crit(drv, +- "Network adapter has been stopped because it has over heated. " +- "Restart the computer. If the problem persists, " +- "power off the system and replace the adapter\n"); ++ e_crit(drv, "%s\n", ixgbe_overheat_msg); + } + + static void ixgbe_check_sfp_event(struct ixgbe_adapter *adapter, u32 eicr) + { + struct ixgbe_hw *hw = &adapter->hw; ++ u32 eicr_mask = IXGBE_EICR_GPI_SDP2_BY_MAC(hw); ++ ++ if (!ixgbe_is_sfp(hw)) ++ return; ++ if (hw->mac.type >= ixgbe_mac_X540) ++ eicr_mask = IXGBE_EICR_GPI_SDP0_X540; + +- if (eicr & IXGBE_EICR_GPI_SDP2) { ++ if (eicr & eicr_mask) { + /* Clear the interrupt */ +- IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP2); ++ IXGBE_WRITE_REG(hw, IXGBE_EICR, eicr_mask); + if (!test_bit(__IXGBE_DOWN, &adapter->state)) { + adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; ++ adapter->sfp_poll_time = 0; + ixgbe_service_event_schedule(adapter); + } + } + +- if (eicr & IXGBE_EICR_GPI_SDP1) { ++ if (adapter->hw.mac.type == ixgbe_mac_82599EB && ++ (eicr & IXGBE_EICR_GPI_SDP1_BY_MAC(hw))) { + /* Clear the interrupt */ +- IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1); ++ IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_GPI_SDP1_BY_MAC(hw)); + if (!test_bit(__IXGBE_DOWN, &adapter->state)) { + adapter->flags |= IXGBE_FLAG_NEED_LINK_CONFIG; + ixgbe_service_event_schedule(adapter); +@@ -2579,8 +2720,7 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter) + } + } + +-static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, +- u64 qmask) ++static void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, u64 qmask) + { + u32 mask; + struct ixgbe_hw *hw = &adapter->hw; +@@ -2592,6 +2732,9 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: + mask = (qmask & 0xFFFFFFFF); + if (mask) + IXGBE_WRITE_REG(hw, IXGBE_EIMS_EX(0), mask); +@@ -2605,32 +2748,6 @@ static inline void ixgbe_irq_enable_queues(struct ixgbe_adapter *adapter, + /* skip the flush */ + } + +-static inline void ixgbe_irq_disable_queues(struct ixgbe_adapter *adapter, +- u64 qmask) +-{ +- u32 mask; +- struct ixgbe_hw *hw = &adapter->hw; +- +- switch (hw->mac.type) { +- case ixgbe_mac_82598EB: +- mask = (IXGBE_EIMS_RTX_QUEUE & qmask); +- IXGBE_WRITE_REG(hw, IXGBE_EIMC, mask); +- break; +- case ixgbe_mac_82599EB: +- case ixgbe_mac_X540: +- mask = (qmask & 0xFFFFFFFF); +- if (mask) +- IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(0), mask); +- mask = (qmask >> 32); +- if (mask) +- IXGBE_WRITE_REG(hw, IXGBE_EIMC_EX(1), mask); +- break; +- default: +- break; +- } +- /* skip the flush */ +-} +- + /** + * ixgbe_irq_enable - Enable default interrupt generation settings + * @adapter: board private structure +@@ -2647,9 +2764,11 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, + if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: +- mask |= IXGBE_EIMS_GPI_SDP0; ++ case ixgbe_mac_X550EM_a: ++ mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(&adapter->hw); + break; + case ixgbe_mac_X540: ++ case ixgbe_mac_X550: + mask |= IXGBE_EIMS_TS; + break; + default: +@@ -2661,17 +2780,28 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, + case ixgbe_mac_82599EB: + mask |= IXGBE_EIMS_GPI_SDP1; + mask |= IXGBE_EIMS_GPI_SDP2; ++ /* fall through */ + case ixgbe_mac_X540: ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: ++ if (adapter->hw.device_id == IXGBE_DEV_ID_X550EM_X_SFP || ++ adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP || ++ adapter->hw.device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) ++ mask |= IXGBE_EIMS_GPI_SDP0_BY_MAC(&adapter->hw); ++ if (adapter->hw.phy.type == ixgbe_phy_x550em_ext_t) ++ mask |= IXGBE_EICR_GPI_SDP0_X540; + mask |= IXGBE_EIMS_ECC; + mask |= IXGBE_EIMS_MAILBOX; ++#ifdef HAVE_PTP_1588_CLOCK ++ mask |= IXGBE_EIMS_TIMESYNC; ++#endif ++ + break; + default: + break; + } + +- if (adapter->hw.mac.type == ixgbe_mac_X540) +- mask |= IXGBE_EIMS_TIMESYNC; +- + if ((adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) && + !(adapter->flags2 & IXGBE_FLAG2_FDIR_REQUIRES_REINIT)) + mask |= IXGBE_EIMS_FLOW_DIR; +@@ -2683,15 +2813,15 @@ static inline void ixgbe_irq_enable(struct ixgbe_adapter *adapter, bool queues, + IXGBE_WRITE_FLUSH(&adapter->hw); + } + +-static irqreturn_t ixgbe_msix_other(int irq, void *data) ++static irqreturn_t ixgbe_msix_other(int __always_unused irq, void *data) + { + struct ixgbe_adapter *adapter = data; + struct ixgbe_hw *hw = &adapter->hw; + u32 eicr; + + /* +- * Workaround for Silicon errata. Use clear-by-write instead +- * of clear-by-read. Reading with EICS will return the ++ * Workaround for Silicon errata #26 on 82598. Use clear-by-write ++ * instead of clear-by-read. Reading with EICS will return the + * interrupt causes without clearing, which later be done + * with the write to EICR. + */ +@@ -2717,29 +2847,47 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data) + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: ++ if (hw->phy.type == ixgbe_phy_x550em_ext_t && ++ (eicr & IXGBE_EICR_GPI_SDP0_X540)) { ++ adapter->flags2 |= IXGBE_FLAG2_PHY_INTERRUPT; ++ ixgbe_service_event_schedule(adapter); ++ IXGBE_WRITE_REG(hw, IXGBE_EICR, ++ IXGBE_EICR_GPI_SDP0_X540); ++ } + if (eicr & IXGBE_EICR_ECC) { +- e_info(link, "Received ECC Err, initiating reset\n"); +- adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; ++ e_info(link, "Received unrecoverable ECC Err," ++ "initiating reset.\n"); ++ set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); + ixgbe_service_event_schedule(adapter); + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); + } ++#ifdef HAVE_TX_MQ + /* Handle Flow Director Full threshold interrupt */ + if (eicr & IXGBE_EICR_FLOW_DIR) { + int reinit_count = 0; + int i; + for (i = 0; i < adapter->num_tx_queues; i++) { + struct ixgbe_ring *ring = adapter->tx_ring[i]; +- if (test_and_clear_bit(__IXGBE_TX_FDIR_INIT_DONE, +- &ring->state)) ++ if (test_and_clear_bit( ++ __IXGBE_TX_FDIR_INIT_DONE, ++ &ring->state)) + reinit_count++; + } + if (reinit_count) { +- /* no more flow director interrupts until after init */ +- IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_FLOW_DIR); +- adapter->flags2 |= IXGBE_FLAG2_FDIR_REQUIRES_REINIT; ++ /* no more flow director interrupts until ++ * after init ++ */ ++ IXGBE_WRITE_REG(hw, IXGBE_EIMC, ++ IXGBE_EIMC_FLOW_DIR); ++ adapter->flags2 |= ++ IXGBE_FLAG2_FDIR_REQUIRES_REINIT; + ixgbe_service_event_schedule(adapter); + } + } ++#endif + ixgbe_check_sfp_event(adapter, eicr); + ixgbe_check_overtemp_event(adapter, eicr); + break; +@@ -2749,8 +2897,10 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data) + + ixgbe_check_fan_failure(adapter, eicr); + ++#ifdef HAVE_PTP_1588_CLOCK + if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) +- ixgbe_ptp_check_pps_event(adapter, eicr); ++ ixgbe_ptp_check_pps_event(adapter); ++#endif + + /* re-enable the original interrupt state, no lsc, no queues */ + if (!test_bit(__IXGBE_DOWN, &adapter->state)) +@@ -2759,44 +2909,54 @@ static irqreturn_t ixgbe_msix_other(int irq, void *data) + return IRQ_HANDLED; + } + +-static irqreturn_t ixgbe_msix_clean_rings(int irq, void *data) ++static irqreturn_t ixgbe_msix_clean_rings(int __always_unused irq, void *data) + { + struct ixgbe_q_vector *q_vector = data; + + /* EIAM disabled interrupts (on this vector) for us */ + + if (q_vector->rx.ring || q_vector->tx.ring) +- napi_schedule(&q_vector->napi); ++ napi_schedule_irqoff(&q_vector->napi); + + return IRQ_HANDLED; + } + + /** +- * ixgbe_poll - NAPI Rx polling callback +- * @napi: structure for representing this polling device +- * @budget: how many packets driver is allowed to clean ++ * ixgbe_poll - NAPI polling RX/TX cleanup routine ++ * @napi: napi struct with our devices info in it ++ * @budget: amount of work driver is allowed to do this pass, in packets + * +- * This function is used for legacy and MSI, NAPI mode ++ * This function will clean all queues associated with a q_vector. + **/ + int ixgbe_poll(struct napi_struct *napi, int budget) + { + struct ixgbe_q_vector *q_vector = +- container_of(napi, struct ixgbe_q_vector, napi); ++ container_of(napi, struct ixgbe_q_vector, napi); + struct ixgbe_adapter *adapter = q_vector->adapter; + struct ixgbe_ring *ring; +- int per_ring_budget; ++ int per_ring_budget, work_done = 0; + bool clean_complete = true; + +-#ifdef CONFIG_IXGBE_DCA ++#if IS_ENABLED(CONFIG_DCA) + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) + ixgbe_update_dca(q_vector); +-#endif ++#endif /* CONFIG_DCA */ + + ixgbe_for_each_ring(ring, q_vector->tx) +- clean_complete &= !!ixgbe_clean_tx_irq(q_vector, ring); ++ clean_complete &= ixgbe_clean_tx_irq(q_vector, ring); ++ ++#ifdef HAVE_NDO_BUSY_POLL ++ if (test_bit(NAPI_STATE_NPSVC, &napi->state)) ++ return budget; + +- if (!ixgbe_qv_lock_napi(q_vector)) ++ /* Exit if we are called by netpoll or busy polling is active */ ++ if ((budget <= 0) || !ixgbe_qv_lock_napi(q_vector)) ++ return budget; ++#else ++ /* Exit if we are called by netpoll */ ++ if (budget <= 0) + return budget; ++#endif + + /* attempt to distribute budget to each queue fairly, but don't allow + * the budget to go below 1 because we'll exit polling */ +@@ -2805,23 +2965,34 @@ int ixgbe_poll(struct napi_struct *napi, int budget) + else + per_ring_budget = budget; + +- ixgbe_for_each_ring(ring, q_vector->rx) +- clean_complete &= (ixgbe_clean_rx_irq(q_vector, ring, +- per_ring_budget) < per_ring_budget); ++ ixgbe_for_each_ring(ring, q_vector->rx) { ++ int cleaned = ixgbe_clean_rx_irq(q_vector, ring, ++ per_ring_budget); ++ work_done += cleaned; ++ clean_complete &= (cleaned < per_ring_budget); ++ } + ++#ifdef HAVE_NDO_BUSY_POLL + ixgbe_qv_unlock_napi(q_vector); ++#endif ++ ++#ifndef HAVE_NETDEV_NAPI_LIST ++ if (!netif_running(adapter->netdev)) ++ clean_complete = true; ++ ++#endif + /* If all work not completed, return budget and keep polling */ + if (!clean_complete) + return budget; + + /* all work done, exit the polling mode */ +- napi_complete(napi); +- if (adapter->rx_itr_setting & 1) ++ napi_complete_done(napi, work_done); ++ if (adapter->rx_itr_setting == 1) + ixgbe_set_itr(q_vector); + if (!test_bit(__IXGBE_DOWN, &adapter->state)) + ixgbe_irq_enable_queues(adapter, ((u64)1 << q_vector->v_idx)); + +- return 0; ++ return min(work_done, budget - 1); + } + + /** +@@ -2834,23 +3005,23 @@ int ixgbe_poll(struct napi_struct *napi, int budget) + static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) + { + struct net_device *netdev = adapter->netdev; ++ unsigned int ri = 0, ti = 0; + int vector, err; +- int ri = 0, ti = 0; + + for (vector = 0; vector < adapter->num_q_vectors; vector++) { + struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; + struct msix_entry *entry = &adapter->msix_entries[vector]; + + if (q_vector->tx.ring && q_vector->rx.ring) { +- snprintf(q_vector->name, sizeof(q_vector->name) - 1, +- "%s-%s-%d", netdev->name, "TxRx", ri++); ++ snprintf(q_vector->name, sizeof(q_vector->name), ++ "%s-TxRx-%u", netdev->name, ri++); + ti++; + } else if (q_vector->rx.ring) { +- snprintf(q_vector->name, sizeof(q_vector->name) - 1, +- "%s-%s-%d", netdev->name, "rx", ri++); ++ snprintf(q_vector->name, sizeof(q_vector->name), ++ "%s-rx-%u", netdev->name, ri++); + } else if (q_vector->tx.ring) { +- snprintf(q_vector->name, sizeof(q_vector->name) - 1, +- "%s-%s-%d", netdev->name, "tx", ti++); ++ snprintf(q_vector->name, sizeof(q_vector->name), ++ "%s-tx-%u", netdev->name, ti++); + } else { + /* skip this unused q_vector */ + continue; +@@ -2858,16 +3029,18 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) + err = request_irq(entry->vector, &ixgbe_msix_clean_rings, 0, + q_vector->name, q_vector); + if (err) { +- e_err(probe, "request_irq failed for MSIX interrupt " +- "Error: %d\n", err); ++ e_err(probe, "request_irq failed for MSIX interrupt '%s' " ++ "Error: %d\n", q_vector->name, err); + goto free_queue_irqs; + } ++#ifdef HAVE_IRQ_AFFINITY_HINT + /* If Flow Director is enabled, set interrupt affinity */ + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { + /* assign the mask for this irq */ + irq_set_affinity_hint(entry->vector, + &q_vector->affinity_mask); + } ++#endif /* HAVE_IRQ_AFFINITY_HINT */ + } + + err = request_irq(adapter->msix_entries[vector].vector, +@@ -2877,13 +3050,15 @@ static int ixgbe_request_msix_irqs(struct ixgbe_adapter *adapter) + goto free_queue_irqs; + } + +- return 0; ++ return IXGBE_SUCCESS; + + free_queue_irqs: + while (vector) { + vector--; ++#ifdef HAVE_IRQ_AFFINITY_HINT + irq_set_affinity_hint(adapter->msix_entries[vector].vector, + NULL); ++#endif + free_irq(adapter->msix_entries[vector].vector, + adapter->q_vector[vector]); + } +@@ -2899,7 +3074,7 @@ free_queue_irqs: + * @irq: interrupt number + * @data: pointer to a network interface device structure + **/ +-static irqreturn_t ixgbe_intr(int irq, void *data) ++static irqreturn_t ixgbe_intr(int __always_unused irq, void *data) + { + struct ixgbe_adapter *adapter = data; + struct ixgbe_hw *hw = &adapter->hw; +@@ -2933,15 +3108,19 @@ static irqreturn_t ixgbe_intr(int irq, void *data) + + switch (hw->mac.type) { + case ixgbe_mac_82599EB: +- ixgbe_check_sfp_event(adapter, eicr); +- /* Fall through */ + case ixgbe_mac_X540: ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: ++ + if (eicr & IXGBE_EICR_ECC) { +- e_info(link, "Received ECC Err, initiating reset\n"); +- adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; ++ e_info(link, "Received unrecoverable ECC Err," ++ "initiating reset.\n"); ++ set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); + ixgbe_service_event_schedule(adapter); + IXGBE_WRITE_REG(hw, IXGBE_EICR, IXGBE_EICR_ECC); + } ++ ixgbe_check_sfp_event(adapter, eicr); + ixgbe_check_overtemp_event(adapter, eicr); + break; + default: +@@ -2949,11 +3128,13 @@ static irqreturn_t ixgbe_intr(int irq, void *data) + } + + ixgbe_check_fan_failure(adapter, eicr); ++#ifdef HAVE_PTP_1588_CLOCK + if (unlikely(eicr & IXGBE_EICR_TIMESYNC)) +- ixgbe_ptp_check_pps_event(adapter, eicr); ++ ixgbe_ptp_check_pps_event(adapter); ++#endif + + /* would disable interrupts here but EIAM disabled it */ +- napi_schedule(&q_vector->napi); ++ napi_schedule_irqoff(&q_vector->napi); + + /* + * re-enable link(maybe) and non-queue interrupts, no flush. +@@ -2980,10 +3161,10 @@ static int ixgbe_request_irq(struct ixgbe_adapter *adapter) + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) + err = ixgbe_request_msix_irqs(adapter); + else if (adapter->flags & IXGBE_FLAG_MSI_ENABLED) +- err = request_irq(adapter->pdev->irq, ixgbe_intr, 0, ++ err = request_irq(adapter->pdev->irq, &ixgbe_intr, 0, + netdev->name, adapter); + else +- err = request_irq(adapter->pdev->irq, ixgbe_intr, IRQF_SHARED, ++ err = request_irq(adapter->pdev->irq, &ixgbe_intr, IRQF_SHARED, + netdev->name, adapter); + + if (err) +@@ -3001,6 +3182,9 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter) + return; + } + ++ if (!adapter->msix_entries) ++ return; ++ + for (vector = 0; vector < adapter->num_q_vectors; vector++) { + struct ixgbe_q_vector *q_vector = adapter->q_vector[vector]; + struct msix_entry *entry = &adapter->msix_entries[vector]; +@@ -3009,13 +3193,15 @@ static void ixgbe_free_irq(struct ixgbe_adapter *adapter) + if (!q_vector->rx.ring && !q_vector->tx.ring) + continue; + ++#ifdef HAVE_IRQ_AFFINITY_HINT + /* clear the affinity_mask in the IRQ descriptor */ + irq_set_affinity_hint(entry->vector, NULL); + ++#endif + free_irq(entry->vector, q_vector); + } + +- free_irq(adapter->msix_entries[vector++].vector, adapter); ++ free_irq(adapter->msix_entries[vector].vector, adapter); + } + + /** +@@ -3030,6 +3216,9 @@ static inline void ixgbe_irq_disable(struct ixgbe_adapter *adapter) + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC, 0xFFFF0000); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(0), ~0); + IXGBE_WRITE_REG(&adapter->hw, IXGBE_EIMC_EX(1), ~0); +@@ -3083,18 +3272,27 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, + u8 reg_idx = ring->reg_idx; + + /* disable queue to avoid issues while updating state */ +- IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), 0); ++ IXGBE_WRITE_REG(hw, IXGBE_TXDCTL(reg_idx), IXGBE_TXDCTL_SWFLSH); + IXGBE_WRITE_FLUSH(hw); + +- IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx), +- (tdba & DMA_BIT_MASK(32))); +- IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), (tdba >> 32)); ++ IXGBE_WRITE_REG(hw, IXGBE_TDBAL(reg_idx), tdba & DMA_BIT_MASK(32)); ++ IXGBE_WRITE_REG(hw, IXGBE_TDBAH(reg_idx), tdba >> 32); + IXGBE_WRITE_REG(hw, IXGBE_TDLEN(reg_idx), + ring->count * sizeof(union ixgbe_adv_tx_desc)); ++ ++ /* disable head writeback */ ++ IXGBE_WRITE_REG(hw, IXGBE_TDWBAH(reg_idx), 0); ++ IXGBE_WRITE_REG(hw, IXGBE_TDWBAL(reg_idx), 0); ++ ++ /* reset head and tail pointers */ + IXGBE_WRITE_REG(hw, IXGBE_TDH(reg_idx), 0); + IXGBE_WRITE_REG(hw, IXGBE_TDT(reg_idx), 0); + ring->tail = adapter->io_addr + IXGBE_TDT(reg_idx); + ++ /* reset ntu and ntc to place SW in sync with hardwdare */ ++ ring->next_to_clean = 0; ++ ring->next_to_use = 0; ++ + /* + * set WTHRESH to encourage burst writeback, it should not be set + * higher than 1 when: +@@ -3105,11 +3303,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, + * to or less than the number of on chip descriptors, which is + * currently 40. + */ +-#if IS_ENABLED(CONFIG_BQL) + if (!ring->q_vector || (ring->q_vector->itr < IXGBE_100K_ITR)) +-#else +- if (!ring->q_vector || (ring->q_vector->itr < 8)) +-#endif + txdctl |= (1 << 16); /* WTHRESH = 1 */ + else + txdctl |= (8 << 16); /* WTHRESH = 8 */ +@@ -3135,7 +3329,7 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, + struct ixgbe_q_vector *q_vector = ring->q_vector; + + if (q_vector) +- netif_set_xps_queue(ring->netdev, ++ netif_set_xps_queue(adapter->netdev, + &q_vector->affinity_mask, + ring->queue_index); + } +@@ -3152,11 +3346,11 @@ void ixgbe_configure_tx_ring(struct ixgbe_adapter *adapter, + + /* poll to verify queue is enabled */ + do { +- usleep_range(1000, 2000); ++ msleep(1); + txdctl = IXGBE_READ_REG(hw, IXGBE_TXDCTL(reg_idx)); + } while (--wait_loop && !(txdctl & IXGBE_TXDCTL_ENABLE)); + if (!wait_loop) +- e_err(drv, "Could not enable Tx Queue %d\n", reg_idx); ++ hw_dbg(hw, "Could not enable Tx Queue %d\n", reg_idx); + } + + static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) +@@ -3174,13 +3368,14 @@ static void ixgbe_setup_mtqc(struct ixgbe_adapter *adapter) + IXGBE_WRITE_REG(hw, IXGBE_RTTDCS, rttdcs); + + /* set transmit pool layout */ +- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { ++ if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) { + mtqc = IXGBE_MTQC_VT_ENA; + if (tcs > 4) + mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_8TC_8TQ; + else if (tcs > 1) + mtqc |= IXGBE_MTQC_RT_ENA | IXGBE_MTQC_4TC_4TQ; +- else if (adapter->ring_feature[RING_F_RSS].indices == 4) ++ else if (adapter->ring_feature[RING_F_VMDQ].mask == ++ IXGBE_82599_VMDQ_4Q_MASK) + mtqc |= IXGBE_MTQC_32VF; + else + mtqc |= IXGBE_MTQC_64VF; +@@ -3219,6 +3414,13 @@ static void ixgbe_configure_tx(struct ixgbe_adapter *adapter) + u32 dmatxctl; + u32 i; + ++#ifdef CONFIG_NETDEVICES_MULTIQUEUE ++ if (adapter->num_tx_queues > 1) ++ adapter->netdev->features |= NETIF_F_MULTI_QUEUE; ++ else ++ adapter->netdev->features &= ~NETIF_F_MULTI_QUEUE; ++ ++#endif + ixgbe_setup_mtqc(adapter); + + if (hw->mac.type != ixgbe_mac_82598EB) { +@@ -3257,18 +3459,16 @@ static void ixgbe_disable_rx_drop(struct ixgbe_adapter *adapter, + IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl); + } + +-#ifdef CONFIG_IXGBE_DCB + void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter) +-#else +-static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter) +-#endif + { + int i; + bool pfc_en = adapter->dcb_cfg.pfc_mode_enable; + ++#ifdef HAVE_DCBNL_IEEE + if (adapter->ixgbe_ieee_pfc) + pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); + ++#endif + /* + * We should set the drop enable bit if: + * SR-IOV is enabled +@@ -3288,8 +3488,6 @@ static void ixgbe_set_rx_drop_en(struct ixgbe_adapter *adapter) + } + } + +-#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 +- + static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, + struct ixgbe_ring *rx_ring) + { +@@ -3300,18 +3498,34 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, + if (hw->mac.type == ixgbe_mac_82598EB) { + u16 mask = adapter->ring_feature[RING_F_RSS].mask; + ++ /* program one srrctl register per VMDq index */ ++ if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) ++ mask = adapter->ring_feature[RING_F_VMDQ].mask; ++ + /* + * if VMDq is not active we must program one srrctl register + * per RSS queue since we have enabled RDRXCTL.MVMEN + */ + reg_idx &= mask; ++ ++ /* divide by the first bit of the mask to get the indices */ ++ if (reg_idx) ++ reg_idx /= ((~mask) + 1) & mask; + } + + /* configure header buffer length, needed for RSC */ + srrctl = IXGBE_RX_HDR_SIZE << IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT; + + /* configure the packet buffer length */ +- srrctl |= ixgbe_rx_bufsz(rx_ring) >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; ++#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT ++ srrctl |= ALIGN(rx_ring->rx_buf_len, 1024) >> ++ IXGBE_SRRCTL_BSIZEPKT_SHIFT; ++#else ++ if (test_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state)) ++ srrctl |= IXGBE_RXBUFFER_3K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; ++ else ++ srrctl |= IXGBE_RXBUFFER_2K >> IXGBE_SRRCTL_BSIZEPKT_SHIFT; ++#endif + + /* configure descriptor type */ + srrctl |= IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF; +@@ -3319,44 +3533,188 @@ static void ixgbe_configure_srrctl(struct ixgbe_adapter *adapter, + IXGBE_WRITE_REG(hw, IXGBE_SRRCTL(reg_idx), srrctl); + } + +-static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) ++/** ++ * ixgbe_rss_indir_tbl_entries - Return RSS indirection table entries ++ * @adapter: device handle ++ * ++ * - 82598/82599/X540: 128 ++ * - X550(non-SRIOV mode): 512 ++ * - X550(SRIOV mode): 64 ++ */ ++u32 ixgbe_rss_indir_tbl_entries(struct ixgbe_adapter *adapter) + { +- struct ixgbe_hw *hw = &adapter->hw; +- static const u32 seed[10] = { 0xE291D73D, 0x1805EC6C, 0x2A94B30D, +- 0xA54F2BEC, 0xEA49AF7C, 0xE214AD3D, 0xB855AABE, +- 0x6A3E67EA, 0x14364D17, 0x3BED200D}; +- u32 mrqc = 0, reta = 0; +- u32 rxcsum; +- int i, j; +- u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; ++ if (adapter->hw.mac.type < ixgbe_mac_X550) ++ return 128; ++ else if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) ++ return 64; ++ else ++ return 512; ++} + +- /* +- * Program table for at least 2 queues w/ SR-IOV so that VFs can +- * make full use of any rings they may have. We will use the +- * PSRTYPE register to control how many rings we use within the PF. +- */ +- if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 2)) +- rss_i = 2; ++/** ++ * ixgbe_store_key - Write the RSS key to HW ++ * @adapter: device handle ++ * ++ * Write the RSS key stored in adapter.rss_key to HW. ++ */ ++void ixgbe_store_key(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ int i; + +- /* Fill out hash function seeds */ + for (i = 0; i < 10; i++) +- IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), seed[i]); ++ IXGBE_WRITE_REG(hw, IXGBE_RSSRK(i), adapter->rss_key[i]); ++} + +- /* Fill out redirection table */ +- for (i = 0, j = 0; i < 128; i++, j++) { +- if (j == rss_i) +- j = 0; +- /* reta = 4-byte sliding window of +- * 0x00..(indices-1)(indices-1)00..etc. */ +- reta = (reta << 8) | (j * 0x11); +- if ((i & 3) == 3) +- IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); +- } ++/** ++ * ixgbe_init_rss_key - Initialize adapter RSS key ++ * @adapter: device handle ++ * ++ * Allocates and initializes the RSS key if it is not allocated. ++ **/ ++static inline int ixgbe_init_rss_key(struct ixgbe_adapter *adapter) ++{ ++ u32 *rss_key; + +- /* Disable indicating checksum in descriptor, enables RSS hash */ +- rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); +- rxcsum |= IXGBE_RXCSUM_PCSD; +- IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); ++ if (!adapter->rss_key) { ++ rss_key = kzalloc(IXGBE_RSS_KEY_SIZE, GFP_KERNEL); ++ if (unlikely(!rss_key)) ++ return -ENOMEM; ++ ++ netdev_rss_key_fill(rss_key, IXGBE_RSS_KEY_SIZE); ++ adapter->rss_key = rss_key; ++ } ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_store_reta - Write the RETA table to HW ++ * @adapter: device handle ++ * ++ * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW. ++ */ ++void ixgbe_store_reta(struct ixgbe_adapter *adapter) ++{ ++ u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter); ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 reta = 0; ++ u32 indices_multi; ++ u8 *indir_tbl = adapter->rss_indir_tbl; ++ ++ /* Fill out the redirection table as follows: ++ * - 82598: 8 bit wide entries containing pair of 4 bit RSS ++ * indices. ++ * - 82599/X540: 8 bit wide entries containing 4 bit RSS index ++ * - X550: 8 bit wide entries containing 6 bit RSS index ++ */ ++ if (adapter->hw.mac.type == ixgbe_mac_82598EB) ++ indices_multi = 0x11; ++ else ++ indices_multi = 0x1; ++ ++ /* Write redirection table to HW */ ++ for (i = 0; i < reta_entries; i++) { ++ reta |= indices_multi * indir_tbl[i] << (i & 0x3) * 8; ++ if ((i & 3) == 3) { ++ if (i < 128) ++ IXGBE_WRITE_REG(hw, IXGBE_RETA(i >> 2), reta); ++ else ++ IXGBE_WRITE_REG(hw, IXGBE_ERETA((i >> 2) - 32), ++ reta); ++ reta = 0; ++ } ++ } ++} ++ ++/** ++ * ixgbe_store_vfreta - Write the RETA table to HW (x550 devices in SRIOV mode) ++ * @adapter: device handle ++ * ++ * Write the RSS redirection table stored in adapter.rss_indir_tbl[] to HW. ++ */ ++static void ixgbe_store_vfreta(struct ixgbe_adapter *adapter) ++{ ++ u32 i, reta_entries = ixgbe_rss_indir_tbl_entries(adapter); ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 vfreta = 0; ++ unsigned int pf_pool = adapter->num_vfs; ++ ++ /* Write redirection table to HW */ ++ for (i = 0; i < reta_entries; i++) { ++ vfreta |= (u32)adapter->rss_indir_tbl[i] << (i & 0x3) * 8; ++ if ((i & 3) == 3) { ++ IXGBE_WRITE_REG(hw, IXGBE_PFVFRETA(i >> 2, pf_pool), ++ vfreta); ++ vfreta = 0; ++ } ++ } ++} ++ ++static void ixgbe_setup_reta(struct ixgbe_adapter *adapter) ++{ ++ u32 i, j; ++ u32 reta_entries = ixgbe_rss_indir_tbl_entries(adapter); ++ u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; ++ ++ /* Program table for at least 4 queues w/ SR-IOV so that VFs can ++ * make full use of any rings they may have. We will use the ++ * PSRTYPE register to control how many rings we use within the PF. ++ */ ++ if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && (rss_i < 4)) ++ rss_i = 4; ++ ++ /* Fill out hash function seeds */ ++ ixgbe_store_key(adapter); ++ ++ /* Fill out redirection table */ ++ memset(adapter->rss_indir_tbl, 0, sizeof(adapter->rss_indir_tbl)); ++ ++ for (i = 0, j = 0; i < reta_entries; i++, j++) { ++ if (j == rss_i) ++ j = 0; ++ ++ adapter->rss_indir_tbl[i] = j; ++ } ++ ++ ixgbe_store_reta(adapter); ++} ++ ++static void ixgbe_setup_vfreta(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ u16 rss_i = adapter->ring_feature[RING_F_RSS].indices; ++ unsigned int pf_pool = adapter->num_vfs; ++ int i, j; ++ ++ /* Fill out hash function seeds */ ++ for (i = 0; i < 10; i++) ++ IXGBE_WRITE_REG(hw, IXGBE_PFVFRSSRK(i, pf_pool), ++ *(adapter->rss_key + i)); ++ ++ /* Fill out the redirection table */ ++ for (i = 0, j = 0; i < 64; i++, j++) { ++ if (j == rss_i) ++ j = 0; ++ ++ adapter->rss_indir_tbl[i] = j; ++ } ++ ++ ixgbe_store_vfreta(adapter); ++} ++ ++ ++static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 rxcsum; ++ u32 mrqc = 0, rss_field = 0; ++ u32 vfmrqc = 0; ++ ++ /* Disable indicating checksum in descriptor, enables RSS hash */ ++ rxcsum = IXGBE_READ_REG(hw, IXGBE_RXCSUM); ++ rxcsum |= IXGBE_RXCSUM_PCSD; ++ IXGBE_WRITE_REG(hw, IXGBE_RXCSUM, rxcsum); + + if (adapter->hw.mac.type == ixgbe_mac_82598EB) { + if (adapter->ring_feature[RING_F_RSS].mask) +@@ -3364,12 +3722,13 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) + } else { + u8 tcs = netdev_get_num_tc(adapter->netdev); + +- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { ++ if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) { + if (tcs > 4) + mrqc = IXGBE_MRQC_VMDQRT8TCEN; /* 8 TCs */ + else if (tcs > 1) + mrqc = IXGBE_MRQC_VMDQRT4TCEN; /* 4 TCs */ +- else if (adapter->ring_feature[RING_F_RSS].indices == 4) ++ else if (adapter->ring_feature[RING_F_VMDQ].mask == ++ IXGBE_82599_VMDQ_4Q_MASK) + mrqc = IXGBE_MRQC_VMDQRSS32EN; + else + mrqc = IXGBE_MRQC_VMDQRSS64EN; +@@ -3381,29 +3740,68 @@ static void ixgbe_setup_mrqc(struct ixgbe_adapter *adapter) + else + mrqc = IXGBE_MRQC_RSSEN; + } ++ ++ /* Enable L3/L4 for Tx Switched packets */ ++ mrqc |= IXGBE_MRQC_L3L4TXSWEN; + } + + /* Perform hash on these packet types */ +- mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4 | +- IXGBE_MRQC_RSS_FIELD_IPV4_TCP | +- IXGBE_MRQC_RSS_FIELD_IPV6 | +- IXGBE_MRQC_RSS_FIELD_IPV6_TCP; ++ rss_field = IXGBE_MRQC_RSS_FIELD_IPV4 | ++ IXGBE_MRQC_RSS_FIELD_IPV4_TCP | ++ IXGBE_MRQC_RSS_FIELD_IPV6 | ++ IXGBE_MRQC_RSS_FIELD_IPV6_TCP; + + if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV4_UDP) +- mrqc |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; ++ rss_field |= IXGBE_MRQC_RSS_FIELD_IPV4_UDP; + if (adapter->flags2 & IXGBE_FLAG2_RSS_FIELD_IPV6_UDP) +- mrqc |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; ++ rss_field |= IXGBE_MRQC_RSS_FIELD_IPV6_UDP; ++ ++ if ((hw->mac.type >= ixgbe_mac_X550) && ++ (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) { ++ unsigned int pf_pool = adapter->num_vfs; ++ ++ /* Enable VF RSS mode */ ++ mrqc |= IXGBE_MRQC_MULTIPLE_RSS; ++ IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); ++ ++ /* Setup RSS through the VF registers */ ++ ixgbe_setup_vfreta(adapter); ++ vfmrqc = IXGBE_MRQC_RSSEN; ++ vfmrqc |= rss_field; ++ IXGBE_WRITE_REG(hw, IXGBE_PFVFMRQC(pf_pool), vfmrqc); ++ } else { ++ ixgbe_setup_reta(adapter); ++ mrqc |= rss_field; ++ IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); ++ } ++} ++ ++/** ++ * ixgbe_clear_rscctl - disable RSC for the indicated ring ++ * @adapter: address of board private structure ++ * @ring: structure containing ring specific data ++ **/ ++void ixgbe_clear_rscctl(struct ixgbe_adapter *adapter, ++ struct ixgbe_ring *ring) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 rscctrl; ++ u8 reg_idx = ring->reg_idx; ++ ++ rscctrl = IXGBE_READ_REG(hw, IXGBE_RSCCTL(reg_idx)); ++ rscctrl &= ~IXGBE_RSCCTL_RSCEN; ++ IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl); + +- IXGBE_WRITE_REG(hw, IXGBE_MRQC, mrqc); ++ clear_ring_rsc_enabled(ring); + } + + /** + * ixgbe_configure_rscctl - enable RSC for the indicated ring + * @adapter: address of board private structure +- * @index: index of ring to set ++ * @ring: structure containing ring specific data + **/ +-static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, +- struct ixgbe_ring *ring) ++void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, ++ struct ixgbe_ring *ring) + { + struct ixgbe_hw *hw = &adapter->hw; + u32 rscctrl; +@@ -3419,11 +3817,27 @@ static void ixgbe_configure_rscctl(struct ixgbe_adapter *adapter, + * total size of max desc * buf_len is not greater + * than 65536 + */ ++#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT ++#if (MAX_SKB_FRAGS >= 16) + rscctrl |= IXGBE_RSCCTL_MAXDESC_16; ++#elif (MAX_SKB_FRAGS >= 8) ++ rscctrl |= IXGBE_RSCCTL_MAXDESC_8; ++#elif (MAX_SKB_FRAGS >= 4) ++ rscctrl |= IXGBE_RSCCTL_MAXDESC_4; ++#else ++ rscctrl |= IXGBE_RSCCTL_MAXDESC_1; ++#endif ++#else /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ ++ if (ring->rx_buf_len <= IXGBE_RXBUFFER_4K) ++ rscctrl |= IXGBE_RSCCTL_MAXDESC_16; ++ else if (ring->rx_buf_len <= IXGBE_RXBUFFER_8K) ++ rscctrl |= IXGBE_RSCCTL_MAXDESC_8; ++ else ++ rscctrl |= IXGBE_RSCCTL_MAXDESC_4; ++#endif /* !CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ + IXGBE_WRITE_REG(hw, IXGBE_RSCCTL(reg_idx), rscctrl); + } + +-#define IXGBE_MAX_RX_DESC_POLL 10 + static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring) + { +@@ -3432,7 +3846,7 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, + u32 rxdctl; + u8 reg_idx = ring->reg_idx; + +- if (ixgbe_removed(hw->hw_addr)) ++ if (IXGBE_REMOVED(hw->hw_addr)) + return; + /* RXDCTL.EN will return 0 on 82598 if link is down, so skip it */ + if (hw->mac.type == ixgbe_mac_82598EB && +@@ -3440,13 +3854,13 @@ static void ixgbe_rx_desc_queue_enable(struct ixgbe_adapter *adapter, + return; + + do { +- usleep_range(1000, 2000); ++ msleep(1); + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); + } while (--wait_loop && !(rxdctl & IXGBE_RXDCTL_ENABLE)); + + if (!wait_loop) { +- e_err(drv, "RXDCTL.ENABLE on Rx queue %d not set within " +- "the polling period\n", reg_idx); ++ e_err(drv, "RXDCTL.ENABLE on Rx queue %d " ++ "not set within the polling period\n", reg_idx); + } + } + +@@ -3458,7 +3872,7 @@ void ixgbe_disable_rx_queue(struct ixgbe_adapter *adapter, + u32 rxdctl; + u8 reg_idx = ring->reg_idx; + +- if (ixgbe_removed(hw->hw_addr)) ++ if (IXGBE_REMOVED(hw->hw_addr)) + return; + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); + rxdctl &= ~IXGBE_RXDCTL_ENABLE; +@@ -3486,6 +3900,7 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, + struct ixgbe_ring *ring) + { + struct ixgbe_hw *hw = &adapter->hw; ++ union ixgbe_adv_rx_desc *rx_desc; + u64 rdba = ring->dma; + u32 rxdctl; + u8 reg_idx = ring->reg_idx; +@@ -3494,18 +3909,31 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, + rxdctl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); + ixgbe_disable_rx_queue(adapter, ring); + +- IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), (rdba & DMA_BIT_MASK(32))); +- IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), (rdba >> 32)); ++ IXGBE_WRITE_REG(hw, IXGBE_RDBAL(reg_idx), rdba & DMA_BIT_MASK(32)); ++ IXGBE_WRITE_REG(hw, IXGBE_RDBAH(reg_idx), rdba >> 32); + IXGBE_WRITE_REG(hw, IXGBE_RDLEN(reg_idx), + ring->count * sizeof(union ixgbe_adv_rx_desc)); ++ /* Force flushing of IXGBE_RDLEN to prevent MDD */ ++ IXGBE_WRITE_FLUSH(hw); ++ ++ /* reset head and tail pointers */ + IXGBE_WRITE_REG(hw, IXGBE_RDH(reg_idx), 0); + IXGBE_WRITE_REG(hw, IXGBE_RDT(reg_idx), 0); + ring->tail = adapter->io_addr + IXGBE_RDT(reg_idx); + ++ /* reset ntu and ntc to place SW in sync with hardwdare */ ++ ring->next_to_clean = 0; ++ ring->next_to_use = 0; ++#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT ++ ring->next_to_alloc = 0; ++#endif ++ + ixgbe_configure_srrctl(adapter, ring); ++ /* In ESX, RSCCTL configuration is done by on demand */ + ixgbe_configure_rscctl(adapter, ring); + +- if (hw->mac.type == ixgbe_mac_82598EB) { ++ switch (hw->mac.type) { ++ case ixgbe_mac_82598EB: + /* + * enable cache line friendly hardware writes: + * PTHRESH=32 descriptors (half the internal cache), +@@ -3515,8 +3943,37 @@ void ixgbe_configure_rx_ring(struct ixgbe_adapter *adapter, + */ + rxdctl &= ~0x3FFFFF; + rxdctl |= 0x080420; ++ break; ++ case ixgbe_mac_X540: ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: ++#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT ++#if (PAGE_SIZE < 8192) ++ rxdctl &= ~(IXGBE_RXDCTL_RLPMLMASK | ++ IXGBE_RXDCTL_RLPML_EN); ++ ++ /* Limit the maximum frame size so we don't overrun the skb */ ++ if (ring_uses_build_skb(ring) && ++ !test_bit(__IXGBE_RX_3K_BUFFER, &ring->state)) ++ rxdctl |= IXGBE_MAX_2K_FRAME_BUILD_SKB | ++ IXGBE_RXDCTL_RLPML_EN; ++#endif ++#else /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ ++ /* If operating in IOV mode set RLPML */ ++ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) ++ break; ++ rxdctl |= ring->rx_buf_len | IXGBE_RXDCTL_RLPML_EN; ++#endif /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ ++ break; ++ default: ++ break; + } + ++ /* initialize Rx descriptor 0 */ ++ rx_desc = IXGBE_RX_DESC(ring, 0); ++ rx_desc->wb.upper.length = 0; ++ + /* enable receive descriptor ring */ + rxdctl |= IXGBE_RXDCTL_ENABLE; + IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), rxdctl); +@@ -3529,7 +3986,7 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) + { + struct ixgbe_hw *hw = &adapter->hw; + int rss_i = adapter->ring_feature[RING_F_RSS].indices; +- u16 pool; ++ int p; + + /* PSRTYPE must be initialized in non 82598 adapters */ + u32 psrtype = IXGBE_PSRTYPE_TCPHDR | +@@ -3546,8 +4003,67 @@ static void ixgbe_setup_psrtype(struct ixgbe_adapter *adapter) + else if (rss_i > 1) + psrtype |= 1 << 29; + +- for_each_set_bit(pool, &adapter->fwd_bitmask, 32) +- IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype); ++ for (p = 0; p < adapter->num_rx_pools; p++) ++ IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(p)), psrtype); ++} ++ ++/** ++ * ixgbe_configure_bridge_mode - common settings for configuring bridge mode ++ * @adapter - the private structure ++ * ++ * This function's purpose is to remove code duplication and configure some ++ * settings require to switch bridge modes. ++ **/ ++static void ixgbe_configure_bridge_mode(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_hw * hw = &adapter->hw; ++ unsigned int p; ++ u32 vmdctl; ++ ++ if (adapter->flags & IXGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE) { ++ /* disable Tx loopback, rely on switch hairpin mode */ ++ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, 0); ++ ++ /* must enable Rx switching replication to allow multicast ++ * packet reception on all VFs, and to enable source address ++ * pruning. ++ */ ++ vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); ++ vmdctl |= IXGBE_VT_CTL_REPLEN; ++ IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); ++ ++ /* enable Rx source address pruning. Note, this requires ++ * replication to be enabled or else it does nothing. ++ */ ++ for (p = 0; p < (adapter->num_vfs + adapter->num_rx_pools); p++) { ++ if (hw->mac.ops.set_source_address_pruning) ++ hw->mac.ops.set_source_address_pruning(hw, ++ true, ++ p); ++ } ++ } else { ++ /* enable Tx loopback for internal VF/PF communication */ ++ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); ++ ++ /* disable Rx switching replication unless we have SR-IOV ++ * virtual functions ++ */ ++ vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); ++ if (!adapter->num_vfs) ++ vmdctl &= ~IXGBE_VT_CTL_REPLEN; ++ IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); ++ ++ /* disable Rx source address pruning, since we don't expect to ++ * be receiving external loopback of our transmitted frames. ++ */ ++ for (p = 0; p < (adapter->num_vfs + adapter->num_rx_pools); p++) { ++ if (hw->mac.ops.set_source_address_pruning) ++ hw->mac.ops.set_source_address_pruning(hw, ++ false, ++ p); ++ } ++ } ++ + } + + static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) +@@ -3557,29 +4073,60 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) + u32 gcr_ext, vmdctl; + int i; + +- if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) ++ if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED)) + return; + +- vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); +- vmdctl |= IXGBE_VMD_CTL_VMDQ_EN; +- vmdctl &= ~IXGBE_VT_CTL_POOL_MASK; +- vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT; +- vmdctl |= IXGBE_VT_CTL_REPLEN; +- IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl); +- +- vf_shift = VMDQ_P(0) % 32; +- reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0; +- +- /* Enable only the PF's pool for Tx/Rx */ +- IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (~0) << vf_shift); +- IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1); +- IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift); +- IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1); +- if (adapter->flags2 & IXGBE_FLAG2_BRIDGE_MODE_VEB) +- IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); ++ switch (hw->mac.type) { ++ case ixgbe_mac_82598EB: ++ vmdctl = IXGBE_READ_REG(hw, IXGBE_VMD_CTL); ++ vmdctl |= IXGBE_VMD_CTL_VMDQ_EN; ++ IXGBE_WRITE_REG(hw, IXGBE_VMD_CTL, vmdctl); ++ break; ++ case ixgbe_mac_82599EB: ++ case ixgbe_mac_X540: ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: ++ vmdctl = IXGBE_READ_REG(hw, IXGBE_VT_CTL); ++ vmdctl |= IXGBE_VT_CTL_VT_ENABLE; ++ vmdctl &= ~IXGBE_VT_CTL_POOL_MASK; ++ vmdctl |= VMDQ_P(0) << IXGBE_VT_CTL_POOL_SHIFT; ++ if (adapter->num_vfs) ++ vmdctl |= IXGBE_VT_CTL_REPLEN; ++ IXGBE_WRITE_REG(hw, IXGBE_VT_CTL, vmdctl); ++ ++ for (i = 1; i < adapter->num_rx_pools; i++) { ++ u32 vmolr; ++ int pool = VMDQ_P(i); ++ ++ /* accept untagged packets until a vlan tag is ++ * specifically set for the VMDQ queue/pool ++ */ ++ vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool)); ++ vmolr |= IXGBE_VMOLR_AUPE; ++ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr); ++ } ++ ++ vf_shift = VMDQ_P(0) % 32; ++ reg_offset = (VMDQ_P(0) >= 32) ? 1 : 0; + +- /* Map PF MAC address in RAR Entry 0 to first pool following VFs */ +- hw->mac.ops.set_vmdq(hw, 0, VMDQ_P(0)); ++ /* Enable only the PF pools for Tx/Rx */ ++ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset), (~0) << vf_shift); ++ IXGBE_WRITE_REG(hw, IXGBE_VFRE(reg_offset ^ 1), reg_offset - 1); ++ IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), (~0) << vf_shift); ++ IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset ^ 1), reg_offset - 1); ++ ++ /* clear VLAN promisc flag so VFTA ++ * will be updated if necessary ++ */ ++ adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC; ++ break; ++ default: ++ break; ++ } ++ ++ if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) ++ return; + + /* + * Set up VF register offsets for selected VT Mode, +@@ -3599,15 +4146,21 @@ static void ixgbe_configure_virtualization(struct ixgbe_adapter *adapter) + + IXGBE_WRITE_REG(hw, IXGBE_GCR_EXT, gcr_ext); + +- +- /* Enable MAC Anti-Spoofing */ +- hw->mac.ops.set_mac_anti_spoofing(hw, (adapter->num_vfs != 0), +- adapter->num_vfs); +- /* For VFs that have spoof checking turned off */ ++ /* configure default bridge settings */ ++ ixgbe_configure_bridge_mode(adapter); ++#if IS_ENABLED(CONFIG_PCI_IOV) + for (i = 0; i < adapter->num_vfs; i++) { +- if (!adapter->vfinfo[i].spoofchk_enabled) +- ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, false); ++ /* configure spoof checking */ ++ ixgbe_ndo_set_vf_spoofchk(adapter->netdev, i, ++ adapter->vfinfo[i].spoofchk_enabled); ++ ++#ifdef HAVE_NDO_SET_VF_RSS_QUERY_EN ++ /* Enable/Disable RSS query feature */ ++ ixgbe_ndo_set_vf_rss_query_en(adapter->netdev, i, ++ adapter->vfinfo[i].rss_query_enabled); ++#endif /* HAVE_NDO_SET_VF_RSS_QUERY_EN */ + } ++#endif /* CONFIG_PCI_IOV */ + } + + static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) +@@ -3618,14 +4171,25 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) + struct ixgbe_ring *rx_ring; + int i; + u32 mhadd, hlreg0; ++#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT ++ int rx_buf_len; ++#endif ++ ++ switch (hw->mac.type) { ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: ++ max_frame += IXGBE_TS_HDR_LEN; ++ default: ++ break; ++ } + +-#ifdef IXGBE_FCOE ++#if IS_ENABLED(CONFIG_FCOE) + /* adjust max frame to be able to do baby jumbo for FCoE */ + if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && + (max_frame < IXGBE_FCOE_JUMBO_FRAME_SIZE)) + max_frame = IXGBE_FCOE_JUMBO_FRAME_SIZE; +- +-#endif /* IXGBE_FCOE */ ++#endif /* CONFIG_FCOE */ + + /* adjust max frame to be at least the size of a standard frame */ + if (max_frame < (ETH_FRAME_LEN + ETH_FCS_LEN)) +@@ -3639,8 +4203,32 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) + IXGBE_WRITE_REG(hw, IXGBE_MHADD, mhadd); + } + ++#ifdef CONFIG_IXGBE_DISABLE_PACKET_SPLIT ++ /* MHADD will allow an extra 4 bytes past for vlan tagged frames */ ++ max_frame += VLAN_HLEN; ++ ++ if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) && ++ (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)) { ++ rx_buf_len = MAXIMUM_ETHERNET_VLAN_SIZE; ++ /* ++ * Make best use of allocation by using all but 1K of a ++ * power of 2 allocation that will be used for skb->head. ++ */ ++ } else if (max_frame <= IXGBE_RXBUFFER_3K) { ++ rx_buf_len = IXGBE_RXBUFFER_3K; ++ } else if (max_frame <= IXGBE_RXBUFFER_7K) { ++ rx_buf_len = IXGBE_RXBUFFER_7K; ++ } else if (max_frame <= IXGBE_RXBUFFER_15K) { ++ rx_buf_len = IXGBE_RXBUFFER_15K; ++ } else { ++ rx_buf_len = IXGBE_MAX_RXBUFFER; ++ } ++ ++#endif /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ + hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); +- /* set jumbo enable since MHADD.MFS is keeping size locked at max_frame */ ++ /* set jumbo enable since MHADD.MFS is keeping size locked at ++ * max_frame ++ */ + hlreg0 |= IXGBE_HLREG0_JUMBOEN; + IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); + +@@ -3650,10 +4238,48 @@ static void ixgbe_set_rx_buffer_len(struct ixgbe_adapter *adapter) + */ + for (i = 0; i < adapter->num_rx_queues; i++) { + rx_ring = adapter->rx_ring[i]; ++ ++ clear_ring_rsc_enabled(rx_ring); + if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) + set_ring_rsc_enabled(rx_ring); +- else +- clear_ring_rsc_enabled(rx_ring); ++ ++#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT ++ clear_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); ++ clear_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state); ++#if IS_ENABLED(CONFIG_FCOE) ++ ++ if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state)) ++ set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); ++#endif ++#ifdef HAVE_SWIOTLB_SKIP_CPU_SYNC ++ ++ if (adapter->flags2 & IXGBE_FLAG2_RX_LEGACY) ++ continue; ++ ++ set_bit(__IXGBE_RX_BUILD_SKB_ENABLED, &rx_ring->state); ++ ++#if (PAGE_SIZE < 8192) ++ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) ++ set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); ++ ++ if (IXGBE_2K_TOO_SMALL_WITH_PADDING || ++ (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))) ++ set_bit(__IXGBE_RX_3K_BUFFER, &rx_ring->state); ++#endif ++#else /* !HAVE_SWIOTLB_SKIP_CPU_SYNC */ ++ ++ adapter->flags2 |= IXGBE_FLAG2_RX_LEGACY; ++#endif /* !HAVE_SWIOTLB_SKIP_CPU_SYNC */ ++#else /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ ++ ++ rx_ring->rx_buf_len = rx_buf_len; ++#if IS_ENABLED(CONFIG_FCOE) ++ ++ if (test_bit(__IXGBE_RX_FCOE, &rx_ring->state) && ++ (rx_buf_len < IXGBE_FCOE_JUMBO_FRAME_SIZE)) ++ rx_ring->rx_buf_len = IXGBE_FCOE_JUMBO_FRAME_SIZE; ++#endif /* CONFIG_FCOE */ ++#endif /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ + } + } + +@@ -3663,6 +4289,22 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) + u32 rdrxctl = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); + + switch (hw->mac.type) { ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: ++ if (adapter->num_vfs) ++ rdrxctl |= IXGBE_RDRXCTL_PSP; ++ /* fall through */ ++ case ixgbe_mac_82599EB: ++ case ixgbe_mac_X540: ++ /* Disable RSC for ACK packets */ ++ IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, ++ (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU))); ++ rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; ++ /* hardware requires some bits to be set by default */ ++ rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX); ++ rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; ++ break; + case ixgbe_mac_82598EB: + /* + * For VMDq support of different descriptor types or +@@ -3676,16 +4318,6 @@ static void ixgbe_setup_rdrxctl(struct ixgbe_adapter *adapter) + */ + rdrxctl |= IXGBE_RDRXCTL_MVMEN; + break; +- case ixgbe_mac_82599EB: +- case ixgbe_mac_X540: +- /* Disable RSC for ACK packets */ +- IXGBE_WRITE_REG(hw, IXGBE_RSCDBU, +- (IXGBE_RSCDBU_RSCACKDIS | IXGBE_READ_REG(hw, IXGBE_RSCDBU))); +- rdrxctl &= ~IXGBE_RDRXCTL_RSCFRSTSIZE; +- /* hardware requires some bits to be set by default */ +- rdrxctl |= (IXGBE_RDRXCTL_RSCACKC | IXGBE_RDRXCTL_FCOE_WRFIX); +- rdrxctl |= IXGBE_RDRXCTL_CRCSTRIP; +- break; + default: + /* We should do nothing since we don't know this hardware */ + return; +@@ -3707,8 +4339,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) + u32 rxctrl, rfctl; + + /* disable receives while setting up the descriptors */ +- rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); +- IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); ++ ixgbe_disable_rx(hw); + + ixgbe_setup_psrtype(adapter); + ixgbe_setup_rdrxctl(adapter); +@@ -3718,6 +4349,9 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) + rfctl &= ~IXGBE_RFCTL_RSC_DIS; + if (!(adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED)) + rfctl |= IXGBE_RFCTL_RSC_DIS; ++ ++ /* disable NFS filtering */ ++ rfctl |= (IXGBE_RFCTL_NFSW_DIS | IXGBE_RFCTL_NFSR_DIS); + IXGBE_WRITE_REG(hw, IXGBE_RFCTL, rfctl); + + /* Program registers for the distribution of queues */ +@@ -3733,6 +4367,7 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) + for (i = 0; i < adapter->num_rx_queues; i++) + ixgbe_configure_rx_ring(adapter, adapter->rx_ring[i]); + ++ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); + /* disable drop enable for 82598 parts */ + if (hw->mac.type == ixgbe_mac_82598EB) + rxctrl |= IXGBE_RXCTRL_DMBYPS; +@@ -3742,93 +4377,238 @@ static void ixgbe_configure_rx(struct ixgbe_adapter *adapter) + hw->mac.ops.enable_rx_dma(hw, rxctrl); + } + ++#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) ++#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID ++#ifdef NETIF_F_HW_VLAN_CTAG_TX + static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, +- __be16 proto, u16 vid) ++ __always_unused __be16 proto, u16 vid) ++#else /* !NETIF_F_HW_VLAN_CTAG_TX */ ++static int ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) ++#endif /* NETIF_F_HW_VLAN_CTAG_TX */ ++#else /* !HAVE_INT_NDO_VLAN_RX_ADD_VID */ ++static void ixgbe_vlan_rx_add_vid(struct net_device *netdev, u16 vid) ++#endif /* HAVE_INT_NDO_VLAN_RX_ADD_VID */ + { + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; ++ int pool_ndx = VMDQ_P(0); + + /* add VID to filter table */ +- hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), true); +- set_bit(vid, adapter->active_vlans); ++ if (hw->mac.ops.set_vfta) { ++#ifndef HAVE_VLAN_RX_REGISTER ++ if (vid < VLAN_N_VID) ++ set_bit(vid, adapter->active_vlans); ++#endif + +- return 0; +-} ++ if (!vid || !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) ++ hw->mac.ops.set_vfta(hw, vid, pool_ndx, true, !!vid); + +-static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, +- __be16 proto, u16 vid) +-{ +- struct ixgbe_adapter *adapter = netdev_priv(netdev); +- struct ixgbe_hw *hw = &adapter->hw; ++ if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED && ++ adapter->hw.mac.type != ixgbe_mac_82598EB) { ++ int i; + +- /* remove VID from filter table */ +- hw->mac.ops.set_vfta(&adapter->hw, vid, VMDQ_P(0), false); +- clear_bit(vid, adapter->active_vlans); ++ /* enable vlan id for all pools */ ++ for (i = 1; i < adapter->num_rx_pools; i++) ++ hw->mac.ops.set_vfta(hw, vid, VMDQ_P(i), true, ++#ifdef HAVE_VLAN_RX_REGISTER ++ false); ++#else ++ true); ++#endif ++ } ++ } ++#ifndef HAVE_NETDEV_VLAN_FEATURES + ++ /* ++ * Copy feature flags from netdev to the vlan netdev for this vid. ++ * This allows things like TSO to bubble down to our vlan device. ++ * Some vlans, such as VLAN 0 for DCB will not have a v_netdev so ++ * we will not have a netdev that needs updating. ++ */ ++ if (adapter->vlgrp) { ++ struct vlan_group *vlgrp = adapter->vlgrp; ++ struct net_device *v_netdev = vlan_group_get_device(vlgrp, vid); ++ if (v_netdev) { ++ v_netdev->features |= netdev->features; ++ vlan_group_set_device(vlgrp, vid, v_netdev); ++ } ++ } ++#endif /* HAVE_NETDEV_VLAN_FEATURES */ ++#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID + return 0; ++#endif + } + +-/** +- * ixgbe_vlan_strip_disable - helper to disable hw vlan stripping +- * @adapter: driver data +- */ +-static void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter) ++#if defined(HAVE_VLAN_RX_REGISTER) && defined(CONFIG_PCI_IOV) ++int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan) ++#else ++static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan) ++#endif + { +- struct ixgbe_hw *hw = &adapter->hw; +- u32 vlnctrl; +- int i, j; ++ u32 vlvf; ++ int idx; + +- switch (hw->mac.type) { +- case ixgbe_mac_82598EB: +- vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); +- vlnctrl &= ~IXGBE_VLNCTRL_VME; +- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); +- break; +- case ixgbe_mac_82599EB: +- case ixgbe_mac_X540: +- for (i = 0; i < adapter->num_rx_queues; i++) { +- struct ixgbe_ring *ring = adapter->rx_ring[i]; ++ /* short cut the special case */ ++ if (vlan == 0) ++ return 0; + +- if (ring->l2_accel_priv) +- continue; +- j = ring->reg_idx; +- vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); +- vlnctrl &= ~IXGBE_RXDCTL_VME; +- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); +- } +- break; +- default: +- break; ++ /* Search for the vlan id in the VLVF entries */ ++ for (idx = IXGBE_VLVF_ENTRIES; --idx;) { ++ vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(idx)); ++ if ((vlvf & VLAN_VID_MASK) == vlan) ++ break; + } ++ ++ return idx; + } + +-/** +- * ixgbe_vlan_strip_enable - helper to enable hw vlan stripping +- * @adapter: driver data ++void ixgbe_update_pf_promisc_vlvf(struct ixgbe_adapter *adapter, u32 vid) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 bits, word; ++ int idx; ++ ++ idx = ixgbe_find_vlvf_entry(hw, vid); ++ if (!idx) ++ return; ++ ++ /* See if any other pools are set for this VLAN filter ++ * entry other than the PF. ++ */ ++ word = idx * 2 + (VMDQ_P(0) / 32); ++ bits = ~(1 << (VMDQ_P(0)) % 32); ++ bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); ++ ++ /* Disable the filter so this falls into the default pool. */ ++ if (!bits && !IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1))) { ++ if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) ++ IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), 0); ++ IXGBE_WRITE_REG(hw, IXGBE_VLVF(idx), 0); ++ } ++} ++ ++#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID ++#ifdef NETIF_F_HW_VLAN_CTAG_RX ++static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, ++ __always_unused __be16 proto, u16 vid) ++#else /* !NETIF_F_HW_VLAN_CTAG_RX */ ++static int ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) ++#endif /* NETIF_F_HW_VLAN_CTAG_RX */ ++#else ++static void ixgbe_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) ++#endif ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ struct ixgbe_hw *hw = &adapter->hw; ++ int pool_ndx = VMDQ_P(0); ++ ++ /* User is not allowed to remove vlan ID 0 */ ++ if (!vid) ++#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID ++ return 0; ++#else ++ return; ++#endif ++ ++#ifdef HAVE_VLAN_RX_REGISTER ++ if (!test_bit(__IXGBE_DOWN, &adapter->state)) ++ ixgbe_irq_disable(adapter); ++ ++ vlan_group_set_device(adapter->vlgrp, vid, NULL); ++ ++ if (!test_bit(__IXGBE_DOWN, &adapter->state)) ++ ixgbe_irq_enable(adapter, true, true); ++ ++#endif /* HAVE_VLAN_RX_REGISTER */ ++ /* remove VID from filter table */ ++ if (hw->mac.ops.set_vfta) { ++ if (vid && !(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) ++ hw->mac.ops.set_vfta(hw, vid, pool_ndx, false, true); ++ ++ if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED && ++ adapter->hw.mac.type != ixgbe_mac_82598EB) { ++ int i; ++ ++ /* remove vlan id from all pools */ ++ for (i = 1; i < adapter->num_rx_pools; i++) ++ hw->mac.ops.set_vfta(hw, vid, VMDQ_P(i), false, ++ true); ++ } ++ } ++#ifndef HAVE_VLAN_RX_REGISTER ++ ++ clear_bit(vid, adapter->active_vlans); ++#endif ++#ifdef HAVE_INT_NDO_VLAN_RX_ADD_VID ++ return 0; ++#endif ++} ++ ++#ifdef HAVE_8021P_SUPPORT ++/** ++ * ixgbe_vlan_strip_disable - helper to disable vlan tag stripping ++ * @adapter: driver data + */ +-static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter) ++void ixgbe_vlan_strip_disable(struct ixgbe_adapter *adapter) + { + struct ixgbe_hw *hw = &adapter->hw; + u32 vlnctrl; +- int i, j; ++ int i; ++ ++ /* leave vlan tag stripping enabled for DCB */ ++ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) ++ return; + + switch (hw->mac.type) { + case ixgbe_mac_82598EB: + vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); +- vlnctrl |= IXGBE_VLNCTRL_VME; ++ vlnctrl &= ~IXGBE_VLNCTRL_VME; + IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: + for (i = 0; i < adapter->num_rx_queues; i++) { +- struct ixgbe_ring *ring = adapter->rx_ring[i]; ++ u8 reg_idx = adapter->rx_ring[i]->reg_idx; ++ vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); ++ vlnctrl &= ~IXGBE_RXDCTL_VME; ++ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), vlnctrl); ++ } ++ break; ++ default: ++ break; ++ } ++} + +- if (ring->l2_accel_priv) +- continue; +- j = ring->reg_idx; +- vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(j)); ++#endif /* HAVE_8021P_SUPPORT */ ++/** ++ * ixgbe_vlan_strip_enable - helper to enable vlan tag stripping ++ * @adapter: driver data ++ */ ++void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 vlnctrl; ++ int i; ++ ++ switch (hw->mac.type) { ++ case ixgbe_mac_82598EB: ++ vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); ++ vlnctrl |= IXGBE_VLNCTRL_VME; ++ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); ++ break; ++ case ixgbe_mac_82599EB: ++ case ixgbe_mac_X540: ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: ++ for (i = 0; i < adapter->num_rx_queues; i++) { ++ u8 reg_idx = adapter->rx_ring[i]->reg_idx; ++ vlnctrl = IXGBE_READ_REG(hw, IXGBE_RXDCTL(reg_idx)); + vlnctrl |= IXGBE_RXDCTL_VME; +- IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(j), vlnctrl); ++ IXGBE_WRITE_REG(hw, IXGBE_RXDCTL(reg_idx), vlnctrl); + } + break; + default: +@@ -3836,14 +4616,249 @@ static void ixgbe_vlan_strip_enable(struct ixgbe_adapter *adapter) + } + } + ++#ifndef HAVE_VLAN_RX_REGISTER ++static void ixgbe_vlan_promisc_enable(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 vlnctrl, i; ++ ++ vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); ++ ++ if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) { ++ /* we need to keep the VLAN filter on in SRIOV */ ++ vlnctrl |= IXGBE_VLNCTRL_VFE; ++ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); ++ } else { ++ vlnctrl &= ~IXGBE_VLNCTRL_VFE; ++ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); ++ return; ++ } ++ ++ /* Nothing to do for 82598 */ ++ if (hw->mac.type == ixgbe_mac_82598EB) ++ return; ++ ++ /* We are already in VLAN promisc, nothing to do */ ++ if (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC) ++ return; ++ ++ /* Set flag so we don't redo unnecessary work */ ++ adapter->flags2 |= IXGBE_FLAG2_VLAN_PROMISC; ++ ++ /* Add PF to all active pools */ ++ for (i = IXGBE_VLVF_ENTRIES; --i;) { ++ u32 reg_offset = IXGBE_VLVFB(i * 2 + VMDQ_P(0) / 32); ++ u32 vlvfb = IXGBE_READ_REG(hw, reg_offset); ++ ++ vlvfb |= 1 << (VMDQ_P(0) % 32); ++ IXGBE_WRITE_REG(hw, reg_offset, vlvfb); ++ } ++ ++ /* Set all bits in the VLAN filter table array */ ++ for (i = hw->mac.vft_size; i--;) ++ IXGBE_WRITE_REG(hw, IXGBE_VFTA(i), ~0U); ++} ++ ++#define VFTA_BLOCK_SIZE 8 ++static void ixgbe_scrub_vfta(struct ixgbe_adapter *adapter, u32 vfta_offset) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 vfta[VFTA_BLOCK_SIZE] = { 0 }; ++ u32 vid_start = vfta_offset * 32; ++ u32 vid_end = vid_start + (VFTA_BLOCK_SIZE * 32); ++ u32 i, vid, word, bits; ++ ++ for (i = IXGBE_VLVF_ENTRIES; --i;) { ++ u32 vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i)); ++ ++ /* pull VLAN ID from VLVF */ ++ vid = vlvf & VLAN_VID_MASK; ++ ++ /* only concern outselves with a certain range */ ++ if (vid < vid_start || vid >= vid_end) ++ continue; ++ ++ if (vlvf) { ++ /* record VLAN ID in VFTA */ ++ vfta[(vid - vid_start) / 32] |= 1 << (vid % 32); ++ ++ /* if PF is part of this then continue */ ++ if (test_bit(vid, adapter->active_vlans)) ++ continue; ++ } ++ ++ /* remove PF from the pool */ ++ word = i * 2 + VMDQ_P(0) / 32; ++ bits = ~(1 << (VMDQ_P(0) % 32)); ++ bits &= IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); ++ IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), bits); ++ } ++ ++ /* extract values from active_vlans and write back to VFTA */ ++ for (i = VFTA_BLOCK_SIZE; i--;) { ++ vid = (vfta_offset + i) * 32; ++ word = vid / BITS_PER_LONG; ++ bits = vid % BITS_PER_LONG; ++ ++ vfta[i] |= adapter->active_vlans[word] >> bits; ++ ++ IXGBE_WRITE_REG(hw, IXGBE_VFTA(vfta_offset + i), vfta[i]); ++ } ++} ++ ++static void ixgbe_vlan_promisc_disable(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 vlnctrl, i; ++ ++ /* configure vlan filtering */ ++ vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); ++ vlnctrl |= IXGBE_VLNCTRL_VFE; ++ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); ++ ++ if (!(adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) || ++ hw->mac.type == ixgbe_mac_82598EB) ++ return; ++ ++ /* We are not in VLAN promisc, nothing to do */ ++ if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) ++ return; ++ ++ /* Set flag so we don't redo unnecessary work */ ++ adapter->flags2 &= ~IXGBE_FLAG2_VLAN_PROMISC; ++ ++ for (i = 0; i < hw->mac.vft_size; i += VFTA_BLOCK_SIZE) ++ ixgbe_scrub_vfta(adapter, i); ++} ++#endif /* HAVE_VLAN_RX_REGISTER */ ++ ++#ifdef HAVE_VLAN_RX_REGISTER ++static void ixgbe_vlan_mode(struct net_device *netdev, struct vlan_group *grp) ++#else ++void ixgbe_vlan_mode(struct net_device *netdev, u32 features) ++#endif ++{ ++#if defined(HAVE_VLAN_RX_REGISTER) || defined(HAVE_8021P_SUPPORT) ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++#endif ++#ifdef HAVE_8021P_SUPPORT ++ bool enable; ++#endif ++ ++#ifdef HAVE_VLAN_RX_REGISTER ++ if (!test_bit(__IXGBE_DOWN, &adapter->state)) ++ ixgbe_irq_disable(adapter); ++ ++ adapter->vlgrp = grp; ++ ++ if (!test_bit(__IXGBE_DOWN, &adapter->state)) ++ ixgbe_irq_enable(adapter, true, true); ++#endif ++#ifdef HAVE_8021P_SUPPORT ++#ifdef HAVE_VLAN_RX_REGISTER ++ enable = (grp || (adapter->flags & IXGBE_FLAG_DCB_ENABLED)); ++#else ++#ifdef NETIF_F_HW_VLAN_CTAG_RX ++ enable = !!(features & NETIF_F_HW_VLAN_CTAG_RX); ++#else ++ enable = !!(features & NETIF_F_HW_VLAN_RX); ++#endif /* NETIF_F_HW_VLAN_CTAG_RX */ ++#endif /* HAVE_VLAN_RX_REGISTER */ ++ if (enable) ++ /* enable VLAN tag insert/strip */ ++ ixgbe_vlan_strip_enable(adapter); ++ else ++ /* disable VLAN tag insert/strip */ ++ ixgbe_vlan_strip_disable(adapter); ++ ++#endif /* HAVE_8021P_SUPPORT */ ++} ++ + static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) + { +- u16 vid; ++ u16 vid = 1; ++#ifdef HAVE_VLAN_RX_REGISTER ++ ++ ixgbe_vlan_mode(adapter->netdev, adapter->vlgrp); ++ ++ /* ++ * add vlan ID 0 and enable vlan tag stripping so we ++ * always accept priority-tagged traffic ++ */ ++#ifdef NETIF_F_HW_VLAN_CTAG_RX ++ ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); ++#else ++ ixgbe_vlan_rx_add_vid(adapter->netdev, 0); ++#endif ++#ifndef HAVE_8021P_SUPPORT ++ ixgbe_vlan_strip_enable(adapter); ++#endif ++ ++ if (adapter->vlgrp) { ++ for (; vid < VLAN_N_VID; vid++) { ++ if (!vlan_group_get_device(adapter->vlgrp, vid)) ++ continue; ++#ifdef NETIF_F_HW_VLAN_CTAG_RX ++ ixgbe_vlan_rx_add_vid(adapter->netdev, ++ htons(ETH_P_8021Q), vid); ++#else ++ ixgbe_vlan_rx_add_vid(adapter->netdev, vid); ++#endif ++ } ++ } ++#else /* !HAVE_VLAN_RX_REGISTER */ + ++#ifdef NETIF_F_HW_VLAN_CTAG_RX + ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), 0); ++#else ++ ixgbe_vlan_rx_add_vid(adapter->netdev, 0); ++#endif + +- for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID) ++ for_each_set_bit_from(vid, adapter->active_vlans, VLAN_N_VID) ++#ifdef NETIF_F_HW_VLAN_CTAG_RX + ixgbe_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid); ++#else ++ ixgbe_vlan_rx_add_vid(adapter->netdev, vid); ++#endif ++#endif /* HAVE_VLAN_RX_REGISTER */ ++} ++ ++#endif ++static u8 *ixgbe_addr_list_itr(struct ixgbe_hw __maybe_unused *hw, u8 **mc_addr_ptr, u32 *vmdq) ++{ ++#ifdef NETDEV_HW_ADDR_T_MULTICAST ++ struct netdev_hw_addr *mc_ptr; ++#else ++ struct dev_mc_list *mc_ptr; ++#endif ++#ifdef CONFIG_PCI_IOV ++ struct ixgbe_adapter *adapter = hw->back; ++#endif /* CONFIG_PCI_IOV */ ++ u8 *addr = *mc_addr_ptr; ++ ++ /* VMDQ_P implicitely uses the adapter struct when CONFIG_PCI_IOV is ++ * defined, so we have to wrap the pointer above correctly to prevent ++ * a warning. ++ */ ++ *vmdq = VMDQ_P(0); ++ ++#ifdef NETDEV_HW_ADDR_T_MULTICAST ++ mc_ptr = container_of(addr, struct netdev_hw_addr, addr[0]); ++ if (mc_ptr->list.next) { ++ struct netdev_hw_addr *ha; ++ ++ ha = list_entry(mc_ptr->list.next, struct netdev_hw_addr, list); ++ *mc_addr_ptr = ha->addr; ++ } ++#else ++ mc_ptr = container_of(addr, struct dev_mc_list, dmi_addr[0]); ++ if (mc_ptr->next) ++ *mc_addr_ptr = mc_ptr->next->dmi_addr; ++#endif ++ else ++ *mc_addr_ptr = NULL; ++ ++ return addr; + } + + /** +@@ -3855,149 +4870,204 @@ static void ixgbe_restore_vlan(struct ixgbe_adapter *adapter) + * 0 on no addresses written + * X on writing X addresses to MTA + **/ +-static int ixgbe_write_mc_addr_list(struct net_device *netdev) ++int ixgbe_write_mc_addr_list(struct net_device *netdev) + { + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; ++#ifdef NETDEV_HW_ADDR_T_MULTICAST ++ struct netdev_hw_addr *ha; ++#endif ++ u8 *addr_list = NULL; ++ int addr_count = 0; ++ ++ if (!hw->mac.ops.update_mc_addr_list) ++ return -ENOMEM; + + if (!netif_running(netdev)) + return 0; + +- if (hw->mac.ops.update_mc_addr_list) +- hw->mac.ops.update_mc_addr_list(hw, netdev); +- else +- return -ENOMEM; ++ ++ if (netdev_mc_empty(netdev)) { ++ hw->mac.ops.update_mc_addr_list(hw, NULL, 0, ++ ixgbe_addr_list_itr, true); ++ } else { ++#ifdef NETDEV_HW_ADDR_T_MULTICAST ++ ha = list_first_entry(&netdev->mc.list, ++ struct netdev_hw_addr, list); ++ addr_list = ha->addr; ++#else ++ addr_list = netdev->mc_list->dmi_addr; ++#endif ++ addr_count = netdev_mc_count(netdev); ++ ++ hw->mac.ops.update_mc_addr_list(hw, addr_list, addr_count, ++ ixgbe_addr_list_itr, true); ++ } + + #ifdef CONFIG_PCI_IOV + ixgbe_restore_vf_multicasts(adapter); + #endif +- +- return netdev_mc_count(netdev); ++ return addr_count; + } + + #ifdef CONFIG_PCI_IOV + void ixgbe_full_sync_mac_table(struct ixgbe_adapter *adapter) + { ++ struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; + struct ixgbe_hw *hw = &adapter->hw; + int i; +- for (i = 0; i < hw->mac.num_rar_entries; i++) { +- if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE) +- hw->mac.ops.set_rar(hw, i, adapter->mac_table[i].addr, +- adapter->mac_table[i].queue, ++ ++ for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { ++ mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED; ++ ++ if (mac_table->state & IXGBE_MAC_STATE_IN_USE) ++ hw->mac.ops.set_rar(hw, i, ++ mac_table->addr, ++ mac_table->pool, + IXGBE_RAH_AV); + else + hw->mac.ops.clear_rar(hw, i); +- +- adapter->mac_table[i].state &= ~(IXGBE_MAC_STATE_MODIFIED); + } + } + #endif + + static void ixgbe_sync_mac_table(struct ixgbe_adapter *adapter) + { ++ struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; + struct ixgbe_hw *hw = &adapter->hw; + int i; +- for (i = 0; i < hw->mac.num_rar_entries; i++) { +- if (adapter->mac_table[i].state & IXGBE_MAC_STATE_MODIFIED) { +- if (adapter->mac_table[i].state & +- IXGBE_MAC_STATE_IN_USE) +- hw->mac.ops.set_rar(hw, i, +- adapter->mac_table[i].addr, +- adapter->mac_table[i].queue, +- IXGBE_RAH_AV); +- else +- hw->mac.ops.clear_rar(hw, i); + +- adapter->mac_table[i].state &= +- ~(IXGBE_MAC_STATE_MODIFIED); +- } +- } +-} ++ for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { ++ if (!(mac_table->state & IXGBE_MAC_STATE_MODIFIED)) ++ continue; + +-static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter) +-{ +- int i; +- struct ixgbe_hw *hw = &adapter->hw; ++ mac_table->state &= ~IXGBE_MAC_STATE_MODIFIED; + +- for (i = 0; i < hw->mac.num_rar_entries; i++) { +- adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED; +- adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE; +- memset(adapter->mac_table[i].addr, 0, ETH_ALEN); +- adapter->mac_table[i].queue = 0; ++ if (mac_table->state & IXGBE_MAC_STATE_IN_USE) ++ hw->mac.ops.set_rar(hw, i, ++ mac_table->addr, ++ mac_table->pool, ++ IXGBE_RAH_AV); ++ else ++ hw->mac.ops.clear_rar(hw, i); + } +- ixgbe_sync_mac_table(adapter); + } + +-static int ixgbe_available_rars(struct ixgbe_adapter *adapter) ++int ixgbe_available_rars(struct ixgbe_adapter *adapter, u16 pool) + { ++ struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; + struct ixgbe_hw *hw = &adapter->hw; + int i, count = 0; + +- for (i = 0; i < hw->mac.num_rar_entries; i++) { +- if (adapter->mac_table[i].state == 0) +- count++; ++ for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { ++ /* do not count default RAR as available */ ++ if (mac_table->state & IXGBE_MAC_STATE_DEFAULT) ++ continue; ++ ++ /* only count unused and addresses that belong to us */ ++ if (mac_table->state & IXGBE_MAC_STATE_IN_USE) { ++ if (mac_table->pool != pool) ++ continue; ++ } ++ ++ count++; + } ++ + return count; + } + + /* this function destroys the first RAR entry */ +-static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter, +- u8 *addr) ++static void ixgbe_mac_set_default_filter(struct ixgbe_adapter *adapter) + { ++ struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; + struct ixgbe_hw *hw = &adapter->hw; + +- memcpy(&adapter->mac_table[0].addr, addr, ETH_ALEN); +- adapter->mac_table[0].queue = VMDQ_P(0); +- adapter->mac_table[0].state = (IXGBE_MAC_STATE_DEFAULT | +- IXGBE_MAC_STATE_IN_USE); +- hw->mac.ops.set_rar(hw, 0, adapter->mac_table[0].addr, +- adapter->mac_table[0].queue, ++ ether_addr_copy(mac_table->addr, hw->mac.addr); ++ mac_table->pool = VMDQ_P(0); ++ ++ mac_table->state = IXGBE_MAC_STATE_DEFAULT | IXGBE_MAC_STATE_IN_USE; ++ ++ hw->mac.ops.set_rar(hw, 0, mac_table->addr, mac_table->pool, + IXGBE_RAH_AV); + } + +-int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue) ++int ixgbe_add_mac_filter(struct ixgbe_adapter *adapter, ++ const u8 *addr, u16 pool) + { ++ struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; + struct ixgbe_hw *hw = &adapter->hw; + int i; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + +- for (i = 0; i < hw->mac.num_rar_entries; i++) { +- if (adapter->mac_table[i].state & IXGBE_MAC_STATE_IN_USE) ++ for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { ++ if (mac_table->state & IXGBE_MAC_STATE_IN_USE) { + continue; +- adapter->mac_table[i].state |= (IXGBE_MAC_STATE_MODIFIED | +- IXGBE_MAC_STATE_IN_USE); +- ether_addr_copy(adapter->mac_table[i].addr, addr); +- adapter->mac_table[i].queue = queue; ++ } ++ ++ ether_addr_copy(mac_table->addr, addr); ++ mac_table->pool = pool; ++ ++ mac_table->state |= IXGBE_MAC_STATE_MODIFIED | ++ IXGBE_MAC_STATE_IN_USE; ++ + ixgbe_sync_mac_table(adapter); ++ + return i; + } ++ + return -ENOMEM; + } + +-int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue) ++static void ixgbe_flush_sw_mac_table(struct ixgbe_adapter *adapter) + { +- /* search table for addr, if found, set to 0 and sync */ ++ struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; ++ struct ixgbe_hw *hw = &adapter->hw; + int i; ++ ++ for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { ++ mac_table->state |= IXGBE_MAC_STATE_MODIFIED; ++ mac_table->state &= ~IXGBE_MAC_STATE_IN_USE; ++ } ++ ++ ixgbe_sync_mac_table(adapter); ++} ++ ++int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, ++ const u8 *addr, u16 pool) ++{ ++ struct ixgbe_mac_addr *mac_table = &adapter->mac_table[0]; + struct ixgbe_hw *hw = &adapter->hw; ++ int i; + + if (is_zero_ether_addr(addr)) + return -EINVAL; + +- for (i = 0; i < hw->mac.num_rar_entries; i++) { +- if (ether_addr_equal(addr, adapter->mac_table[i].addr) && +- adapter->mac_table[i].queue == queue) { +- adapter->mac_table[i].state |= IXGBE_MAC_STATE_MODIFIED; +- adapter->mac_table[i].state &= ~IXGBE_MAC_STATE_IN_USE; +- memset(adapter->mac_table[i].addr, 0, ETH_ALEN); +- adapter->mac_table[i].queue = 0; +- ixgbe_sync_mac_table(adapter); +- return 0; +- } ++ /* search table for addr, if found clear IN USE flag and sync */ ++ for (i = 0; i < hw->mac.num_rar_entries; i++, mac_table++) { ++ /* we can only delete an entry if it is in use */ ++ if (!(mac_table->state & IXGBE_MAC_STATE_IN_USE)) ++ continue; ++ /* we only care about entries that belong to the given pool */ ++ if (mac_table->pool != pool) ++ continue; ++ /* we only care about a specific MAC address */ ++ if (!ether_addr_equal(addr, mac_table->addr)) ++ continue; ++ ++ mac_table->state |= IXGBE_MAC_STATE_MODIFIED; ++ mac_table->state &= ~IXGBE_MAC_STATE_IN_USE; ++ ++ ixgbe_sync_mac_table(adapter); ++ ++ return 0; + } ++ + return -ENOMEM; + } ++ ++#ifdef HAVE_SET_RX_MODE + /** + * ixgbe_write_uc_addr_list - write unicast addresses to RAR table + * @netdev: network interface device structure +@@ -4007,26 +5077,55 @@ int ixgbe_del_mac_filter(struct ixgbe_adapter *adapter, u8 *addr, u16 queue) + * 0 on no addresses written + * X on writing X addresses to the RAR table + **/ +-static int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn) ++int ixgbe_write_uc_addr_list(struct net_device *netdev, int vfn) + { + struct ixgbe_adapter *adapter = netdev_priv(netdev); + int count = 0; + + /* return ENOMEM indicating insufficient memory for addresses */ +- if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter)) ++ if (netdev_uc_count(netdev) > ixgbe_available_rars(adapter, vfn)) + return -ENOMEM; + + if (!netdev_uc_empty(netdev)) { ++#ifdef NETDEV_HW_ADDR_T_UNICAST + struct netdev_hw_addr *ha; ++#else ++ struct dev_mc_list *ha; ++#endif + netdev_for_each_uc_addr(ha, netdev) { ++#ifdef NETDEV_HW_ADDR_T_UNICAST + ixgbe_del_mac_filter(adapter, ha->addr, vfn); + ixgbe_add_mac_filter(adapter, ha->addr, vfn); ++#else ++ ixgbe_del_mac_filter(adapter, ha->da_addr, vfn); ++ ixgbe_add_mac_filter(adapter, ha->da_addr, vfn); ++#endif + count++; + } + } + return count; + } + ++static int ixgbe_uc_sync(struct net_device *netdev, const unsigned char *addr) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ int ret; ++ ++ ret = ixgbe_add_mac_filter(adapter, addr, VMDQ_P(0)); ++ ++ return min_t(int, ret, 0); ++} ++ ++static int ixgbe_uc_unsync(struct net_device *netdev, const unsigned char *addr) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ ++ ixgbe_del_mac_filter(adapter, addr, VMDQ_P(0)); ++ ++ return 0; ++} ++ ++#endif + /** + * ixgbe_set_rx_mode - Unicast, Multicast and Promiscuous mode set + * @netdev: network interface device structure +@@ -4041,54 +5140,75 @@ void ixgbe_set_rx_mode(struct net_device *netdev) + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + u32 fctrl, vmolr = IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE; ++#if defined(HAVE_VLAN_RX_REGISTER) || defined(ESX55) + u32 vlnctrl; ++#endif ++#if defined(NETIF_F_HW_VLAN_CTAG_FILTER) || defined(NETIF_F_HW_VLAN_FILTER) ++ netdev_features_t features = netdev->features; ++#endif + int count; + + /* Check for Promiscuous and All Multicast modes */ + fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); ++#if defined(HAVE_VLAN_RX_REGISTER) || defined(ESX55) + vlnctrl = IXGBE_READ_REG(hw, IXGBE_VLNCTRL); ++#endif + + /* set all bits that we expect to always be set */ +- fctrl &= ~IXGBE_FCTRL_SBP; /* disable store-bad-packets */ + fctrl |= IXGBE_FCTRL_BAM; + fctrl |= IXGBE_FCTRL_DPF; /* discard pause frames when FC enabled */ + fctrl |= IXGBE_FCTRL_PMCF; + + /* clear the bits we are changing the status of */ + fctrl &= ~(IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); +- vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); ++#if defined(HAVE_VLAN_RX_REGISTER) || defined(ESX55) ++ vlnctrl &= ~(IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); ++#endif + if (netdev->flags & IFF_PROMISC) { + hw->addr_ctrl.user_set_promisc = true; + fctrl |= (IXGBE_FCTRL_UPE | IXGBE_FCTRL_MPE); + vmolr |= IXGBE_VMOLR_MPE; ++#ifdef HAVE_VLAN_RX_REGISTER + /* Only disable hardware filter vlans in promiscuous mode + * if SR-IOV and VMDQ are disabled - otherwise ensure + * that hardware VLAN filters remain enabled. + */ +- if (adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED | +- IXGBE_FLAG_SRIOV_ENABLED)) ++ if ((adapter->flags & (IXGBE_FLAG_VMDQ_ENABLED | ++ IXGBE_FLAG_SRIOV_ENABLED))) + vlnctrl |= (IXGBE_VLNCTRL_VFE | IXGBE_VLNCTRL_CFIEN); ++#endif ++#ifdef NETIF_F_HW_VLAN_CTAG_FILTER ++ features &= ~NETIF_F_HW_VLAN_CTAG_FILTER; ++#endif ++#ifdef NETIF_F_HW_VLAN_FILTER ++ features &= ~NETIF_F_HW_VLAN_FILTER; ++#endif + } else { + if (netdev->flags & IFF_ALLMULTI) { + fctrl |= IXGBE_FCTRL_MPE; + vmolr |= IXGBE_VMOLR_MPE; + } +- vlnctrl |= IXGBE_VLNCTRL_VFE; + hw->addr_ctrl.user_set_promisc = false; ++#if defined(HAVE_VLAN_RX_REGISTER) || defined(ESX55) ++ /* enable hardware vlan filtering */ ++ vlnctrl |= IXGBE_VLNCTRL_VFE; ++#endif + } + ++#ifdef HAVE_SET_RX_MODE + /* + * Write addresses to available RAR registers, if there is not + * sufficient space to store all the addresses then enable + * unicast promiscuous mode + */ +- count = ixgbe_write_uc_addr_list(netdev, VMDQ_P(0)); +- if (count < 0) { ++ if (__dev_uc_sync(netdev, ixgbe_uc_sync, ixgbe_uc_unsync)) { + fctrl |= IXGBE_FCTRL_UPE; + vmolr |= IXGBE_VMOLR_ROPE; + } + +- /* Write addresses to the MTA, if the attempt fails ++#endif ++ /* ++ * Write addresses to the MTA, if the attempt fails + * then we should just turn on promiscuous mode so + * that we can at least receive multicast traffic + */ +@@ -4107,92 +5227,236 @@ void ixgbe_set_rx_mode(struct net_device *netdev) + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(VMDQ_P(0)), vmolr); + } + +- /* This is useful for sniffing bad packets. */ +- if (adapter->netdev->features & NETIF_F_RXALL) { +- /* UPE and MPE will be handled by normal PROMISC logic +- * in e1000e_set_rx_mode */ +- fctrl |= (IXGBE_FCTRL_SBP | /* Receive bad packets */ +- IXGBE_FCTRL_BAM | /* RX All Bcast Pkts */ +- IXGBE_FCTRL_PMCF); /* RX All MAC Ctrl Pkts */ +- +- fctrl &= ~(IXGBE_FCTRL_DPF); +- /* NOTE: VLAN filtering is disabled by setting PROMISC */ +- } +- +- IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); + IXGBE_WRITE_REG(hw, IXGBE_FCTRL, fctrl); + +- if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) ++#ifdef HAVE_8021P_SUPPORT ++#ifdef NETIF_F_HW_VLAN_CTAG_RX ++ if (features & NETIF_F_HW_VLAN_CTAG_RX) ++#else ++ if (features & NETIF_F_HW_VLAN_RX) ++#endif + ixgbe_vlan_strip_enable(adapter); + else + ixgbe_vlan_strip_disable(adapter); ++#endif /* HAVE_8021P_SUPPORT */ ++ ++#if defined(NETIF_F_HW_VLAN_CTAG_FILTER) ++ if (features & NETIF_F_HW_VLAN_CTAG_FILTER) ++ ixgbe_vlan_promisc_disable(adapter); ++ else ++ ixgbe_vlan_promisc_enable(adapter); ++#elif defined(NETIF_F_HW_VLAN_FILTER) && !defined(HAVE_VLAN_RX_REGISTER) ++ if (features & NETIF_F_HW_VLAN_FILTER) ++ ixgbe_vlan_promisc_disable(adapter); ++ else ++ ixgbe_vlan_promisc_enable(adapter); ++#elif defined(HAVE_VLAN_RX_REGISTER) || defined(ESX55) ++ IXGBE_WRITE_REG(hw, IXGBE_VLNCTRL, vlnctrl); ++#endif /* NETIF_F_HW_VLAN_CTAG_FILTER */ + } + + static void ixgbe_napi_enable_all(struct ixgbe_adapter *adapter) + { ++ struct ixgbe_q_vector *q_vector; + int q_idx; + +- for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { +- ixgbe_qv_init_lock(adapter->q_vector[q_idx]); +- napi_enable(&adapter->q_vector[q_idx]->napi); +- } ++ for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { ++ q_vector = adapter->q_vector[q_idx]; ++#ifdef HAVE_NDO_BUSY_POLL ++ ixgbe_qv_init_lock(adapter->q_vector[q_idx]); ++#endif ++ napi_enable(&q_vector->napi); ++ } ++} ++ ++static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_q_vector *q_vector; ++ int q_idx; ++ ++ for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { ++ q_vector = adapter->q_vector[q_idx]; ++ napi_disable(&q_vector->napi); ++#ifdef HAVE_NDO_BUSY_POLL ++ while(!ixgbe_qv_disable(adapter->q_vector[q_idx])) { ++ pr_info("QV %d locked\n", q_idx); ++ usleep_range(1000, 20000); ++ } ++#endif ++ } ++} ++ ++#ifdef HAVE_DCBNL_IEEE ++s32 ixgbe_dcb_hw_ets(struct ixgbe_hw *hw, struct ieee_ets *ets, int max_frame) ++{ ++ __u16 refill[IEEE_8021QAZ_MAX_TCS], max[IEEE_8021QAZ_MAX_TCS]; ++ __u8 prio_type[IEEE_8021QAZ_MAX_TCS]; ++ int i; ++ ++ /* naively give each TC a bwg to map onto CEE hardware */ ++ __u8 bwg_id[IEEE_8021QAZ_MAX_TCS] = {0, 1, 2, 3, 4, 5, 6, 7}; ++ ++ /* Map TSA onto CEE prio type */ ++ for (i = 0; i < IEEE_8021QAZ_MAX_TCS; i++) { ++ switch (ets->tc_tsa[i]) { ++ case IEEE_8021QAZ_TSA_STRICT: ++ prio_type[i] = 2; ++ break; ++ case IEEE_8021QAZ_TSA_ETS: ++ prio_type[i] = 0; ++ break; ++ default: ++ /* Hardware only supports priority strict or ++ * ETS transmission selection algorithms if ++ * we receive some other value from dcbnl ++ * throw an error ++ */ ++ return -EINVAL; ++ } ++ } ++ ++ ixgbe_dcb_calculate_tc_credits(ets->tc_tx_bw, refill, max, max_frame); ++ return ixgbe_dcb_hw_config(hw, refill, max, ++ bwg_id, prio_type, ets->prio_tc); ++} ++#endif /* HAVE_DCBNL_IEEE */ ++ ++#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) ++void ixgbe_clear_udp_tunnel_port(struct ixgbe_adapter *adapter, u32 mask) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 vxlanctrl; ++ ++ if (!(adapter->flags & (IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE | ++ IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE))) ++ return; ++ ++ vxlanctrl = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) && ~mask; ++ IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, vxlanctrl); ++ ++ if (mask & IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK) ++ adapter->vxlan_port = 0; ++#ifdef HAVE_UDP_ENC_RX_OFFLOAD ++ if (mask & IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK) ++ adapter->geneve_port = 0; ++#endif /* HAVE_UDP_ENC_RX_OFFLOAD */ + } ++#endif /* HAVE_UDP_ENC_RX_OFFLOAD || HAVE_VXLAN_RX_OFFLOAD */ + +-static void ixgbe_napi_disable_all(struct ixgbe_adapter *adapter) +-{ +- int q_idx; ++#ifdef NETIF_F_GSO_PARTIAL ++/* NETIF_F_GSO_IPXIP4/6 may not be defined in all distributions */ ++#ifndef NETIF_F_GSO_IPXIP4 ++#define NETIF_F_GSO_IPXIP4 0 ++#endif ++#ifndef NETIF_F_GSO_IPXIP6 ++#define NETIF_F_GSO_IPXIP6 0 ++#endif ++#define IXGBE_GSO_PARTIAL_FEATURES (NETIF_F_GSO_GRE | \ ++ NETIF_F_GSO_GRE_CSUM | \ ++ NETIF_F_GSO_IPXIP4 | \ ++ NETIF_F_GSO_IPXIP6 | \ ++ NETIF_F_GSO_UDP_TUNNEL | \ ++ NETIF_F_GSO_UDP_TUNNEL_CSUM) ++#endif /* NETIF_F_GSO_PARTIAL */ ++ ++static inline unsigned long ixgbe_tso_features(void) ++{ ++ unsigned long features = 0; ++ ++#ifdef NETIF_F_TSO ++ features |= NETIF_F_TSO; ++#endif /* NETIF_F_TSO */ ++#ifdef NETIF_F_TSO6 ++ features |= NETIF_F_TSO6; ++#endif /* NETIF_F_TSO6 */ ++#ifdef NETIF_F_GSO_PARTIAL ++ features |= NETIF_F_GSO_PARTIAL | IXGBE_GSO_PARTIAL_FEATURES; ++#endif + +- for (q_idx = 0; q_idx < adapter->num_q_vectors; q_idx++) { +- napi_disable(&adapter->q_vector[q_idx]->napi); +- while (!ixgbe_qv_disable(adapter->q_vector[q_idx])) { +- pr_info("QV %d locked\n", q_idx); +- usleep_range(1000, 20000); +- } +- } ++ return features; + } + +-#ifdef CONFIG_IXGBE_DCB +-/** +- * ixgbe_configure_dcb - Configure DCB hardware ++/* ++ * ixgbe_configure_dcb - Configure DCB hardware support + * @adapter: ixgbe adapter struct + * +- * This is called by the driver on open to configure the DCB hardware. +- * This is also called by the gennetlink interface when reconfiguring +- * the DCB state. ++ * Called when the driver opens or needs to reconfigure DCB related bits. + */ + static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) + { + struct ixgbe_hw *hw = &adapter->hw; +- int max_frame = adapter->netdev->mtu + ETH_HLEN + ETH_FCS_LEN; ++ struct net_device *netdev = adapter->netdev; ++ ++ int max_frame = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; + ++ /* The following workaround for 82598EB was originaly hidden inside a ++ * kcompat definition of netif_set_gso_max_size. This workaround is ++ * necessary as the 82598EB hardware does not support TSO and DCB ++ * unless the stack TSO maximum segment size can be reduced. Older ++ * kernels do not support the requisite interface, and thus need TSO ++ * disabled if we want to support DCB. ++ */ + if (!(adapter->flags & IXGBE_FLAG_DCB_ENABLED)) { +- if (hw->mac.type == ixgbe_mac_82598EB) +- netif_set_gso_max_size(adapter->netdev, 65536); ++ if (hw->mac.type == ixgbe_mac_82598EB) { ++#ifdef NETDEV_CAN_SET_GSO_MAX_SIZE ++ netif_set_gso_max_size(netdev, 65536); ++#else ++ /* We previously disabled TSO, so we should enable it ++ * now. */ ++ netdev->features |= ixgbe_tso_features(); ++#ifdef NETIF_F_GSO_PARTIAL ++ netdev->gso_partial_features = ++ IXGBE_GSO_PARTIAL_FEATURES; ++#endif ++#endif /* NETDEV_CAN_SET_GSO_MAX_SIZE */ ++ } + return; + } + +- if (hw->mac.type == ixgbe_mac_82598EB) +- netif_set_gso_max_size(adapter->netdev, 32768); +- +-#ifdef IXGBE_FCOE +- if (adapter->netdev->features & NETIF_F_FCOE_MTU) +- max_frame = max(max_frame, IXGBE_FCOE_JUMBO_FRAME_SIZE); ++ if (hw->mac.type == ixgbe_mac_82598EB) { ++#ifdef NETDEV_CAN_SET_GSO_MAX_SIZE ++ netif_set_gso_max_size(netdev, 32768); ++#else ++ /* Simply disable TSO since we cannot change the maximum ++ * segment size. */ ++ netdev->features &= ~ixgbe_tso_features(); ++#ifdef NETIF_F_GSO_PARTIAL ++ netdev->gso_partial_features = 0; + #endif ++#endif /* NETDEV_CAN_SET_GSO_MAX_SIZE */ ++ } ++ ++#if IS_ENABLED(CONFIG_FCOE) ++ if (netdev->features & NETIF_F_FCOE_MTU) ++ max_frame = max_t(int, max_frame, ++ IXGBE_FCOE_JUMBO_FRAME_SIZE); ++#endif /* CONFIG_FCOE */ ++ ++#ifdef HAVE_DCBNL_IEEE ++ if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) { ++ if (adapter->ixgbe_ieee_ets) ++ ixgbe_dcb_hw_ets(&adapter->hw, ++ adapter->ixgbe_ieee_ets, ++ max_frame); ++ ++ if (adapter->ixgbe_ieee_pfc && adapter->ixgbe_ieee_ets) { ++ struct ieee_pfc *pfc = adapter->ixgbe_ieee_pfc; ++ u8 *tc = adapter->ixgbe_ieee_ets->prio_tc; + +- /* reconfigure the hardware */ +- if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) { +- ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, +- DCB_TX_CONFIG); +- ixgbe_dcb_calculate_tc_credits(hw, &adapter->dcb_cfg, max_frame, +- DCB_RX_CONFIG); +- ixgbe_dcb_hw_config(hw, &adapter->dcb_cfg); +- } else if (adapter->ixgbe_ieee_ets && adapter->ixgbe_ieee_pfc) { +- ixgbe_dcb_hw_ets(&adapter->hw, +- adapter->ixgbe_ieee_ets, +- max_frame); +- ixgbe_dcb_hw_pfc_config(&adapter->hw, +- adapter->ixgbe_ieee_pfc->pfc_en, +- adapter->ixgbe_ieee_ets->prio_tc); ++ ixgbe_dcb_config_pfc(&adapter->hw, pfc->pfc_en, tc); ++ } ++ } else ++#endif /* HAVE_DCBNL_IEEE */ ++ { ++ ixgbe_dcb_calculate_tc_credits_cee(hw, ++ &adapter->dcb_cfg, ++ max_frame, ++ IXGBE_DCB_TX_CONFIG); ++ ixgbe_dcb_calculate_tc_credits_cee(hw, ++ &adapter->dcb_cfg, ++ max_frame, ++ IXGBE_DCB_RX_CONFIG); ++ ixgbe_dcb_hw_config_cee(hw, &adapter->dcb_cfg); + } + + /* Enable RSS Hash per TC */ +@@ -4209,16 +5473,149 @@ static void ixgbe_configure_dcb(struct ixgbe_adapter *adapter) + IXGBE_WRITE_REG(hw, IXGBE_RQTC, msb * 0x11111111); + } + } +-#endif + ++#ifndef IXGBE_NO_LLI ++static void ixgbe_configure_lli_82599(struct ixgbe_adapter *adapter) ++{ ++ u16 port; ++ ++ if (adapter->lli_etype) { ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_L34T_IMIR(0), ++ (IXGBE_IMIR_LLI_EN_82599 | ++ IXGBE_IMIR_SIZE_BP_82599 | ++ IXGBE_IMIR_CTRL_BP_82599)); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_ETQS(0), IXGBE_ETQS_LLI); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_ETQF(0), ++ (adapter->lli_etype | IXGBE_ETQF_FILTER_EN)); ++ } ++ ++ if (adapter->lli_port) { ++ port = swab16(adapter->lli_port); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_L34T_IMIR(0), ++ (IXGBE_IMIR_LLI_EN_82599 | ++ IXGBE_IMIR_SIZE_BP_82599 | ++ IXGBE_IMIR_CTRL_BP_82599)); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_FTQF(0), ++ (IXGBE_FTQF_POOL_MASK_EN | ++ (IXGBE_FTQF_PRIORITY_MASK << ++ IXGBE_FTQF_PRIORITY_SHIFT) | ++ (IXGBE_FTQF_DEST_PORT_MASK << ++ IXGBE_FTQF_5TUPLE_MASK_SHIFT))); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_SDPQF(0), (port << 16)); ++ } ++ ++ if (adapter->flags & IXGBE_FLAG_LLI_PUSH) { ++ switch (adapter->hw.mac.type) { ++ case ixgbe_mac_82599EB: ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_L34T_IMIR(0), ++ (IXGBE_IMIR_LLI_EN_82599 | ++ IXGBE_IMIR_SIZE_BP_82599 | ++ IXGBE_IMIR_CTRL_PSH_82599 | ++ IXGBE_IMIR_CTRL_SYN_82599 | ++ IXGBE_IMIR_CTRL_URG_82599 | ++ IXGBE_IMIR_CTRL_ACK_82599 | ++ IXGBE_IMIR_CTRL_RST_82599 | ++ IXGBE_IMIR_CTRL_FIN_82599)); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_LLITHRESH, ++ 0xfc000000); ++ break; ++ case ixgbe_mac_X540: ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_L34T_IMIR(0), ++ (IXGBE_IMIR_LLI_EN_82599 | ++ IXGBE_IMIR_SIZE_BP_82599 | ++ IXGBE_IMIR_CTRL_PSH_82599)); ++ break; ++ default: ++ break; ++ } ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_FTQF(0), ++ (IXGBE_FTQF_POOL_MASK_EN | ++ (IXGBE_FTQF_PRIORITY_MASK << ++ IXGBE_FTQF_PRIORITY_SHIFT) | ++ (IXGBE_FTQF_5TUPLE_MASK_MASK << ++ IXGBE_FTQF_5TUPLE_MASK_SHIFT))); ++ ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_SYNQF, 0x80000100); ++ } ++ ++ if (adapter->lli_size) { ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_L34T_IMIR(0), ++ (IXGBE_IMIR_LLI_EN_82599 | ++ IXGBE_IMIR_CTRL_BP_82599)); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_LLITHRESH, ++ adapter->lli_size); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_FTQF(0), ++ (IXGBE_FTQF_POOL_MASK_EN | ++ (IXGBE_FTQF_PRIORITY_MASK << ++ IXGBE_FTQF_PRIORITY_SHIFT) | ++ (IXGBE_FTQF_5TUPLE_MASK_MASK << ++ IXGBE_FTQF_5TUPLE_MASK_SHIFT))); ++ } ++ ++ if (adapter->lli_vlan_pri) { ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIRVP, ++ (IXGBE_IMIRVP_PRIORITY_EN | ++ adapter->lli_vlan_pri)); ++ } ++} ++ ++static void ixgbe_configure_lli(struct ixgbe_adapter *adapter) ++{ ++ u16 port; ++ ++ /* lli should only be enabled with MSI-X and MSI */ ++ if (!(adapter->flags & IXGBE_FLAG_MSI_ENABLED) && ++ !(adapter->flags & IXGBE_FLAG_MSIX_ENABLED)) ++ return; ++ /* LLI not supported on X550 and X550EM_x*/ ++ if ((adapter->hw.mac.type == ixgbe_mac_X550) || ++ (adapter->hw.mac.type == ixgbe_mac_X550EM_x)) ++ return; ++ /* LLI not supported on X550EM_a */ ++ if (adapter->hw.mac.type == ixgbe_mac_X550EM_a) ++ return; ++ if (adapter->hw.mac.type != ixgbe_mac_82598EB) { ++ ixgbe_configure_lli_82599(adapter); ++ return; ++ } ++ ++ if (adapter->lli_port) { ++ /* use filter 0 for port */ ++ port = swab16(adapter->lli_port); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIR(0), ++ (port | IXGBE_IMIR_PORT_IM_EN)); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIREXT(0), ++ (IXGBE_IMIREXT_SIZE_BP | ++ IXGBE_IMIREXT_CTRL_BP)); ++ } ++ ++ if (adapter->flags & IXGBE_FLAG_LLI_PUSH) { ++ /* use filter 1 for push flag */ ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIR(1), ++ (IXGBE_IMIR_PORT_BP | IXGBE_IMIR_PORT_IM_EN)); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIREXT(1), ++ (IXGBE_IMIREXT_SIZE_BP | ++ IXGBE_IMIREXT_CTRL_PSH)); ++ } ++ ++ if (adapter->lli_size) { ++ /* use filter 2 for size */ ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIR(2), ++ (IXGBE_IMIR_PORT_BP | IXGBE_IMIR_PORT_IM_EN)); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_IMIREXT(2), ++ (adapter->lli_size | IXGBE_IMIREXT_CTRL_BP)); ++ } ++} ++ ++#endif /* IXGBE_NO_LLI */ + /* Additional bittime to account for IXGBE framing */ + #define IXGBE_ETH_FRAMING 20 + +-/** ++/* + * ixgbe_hpbthresh - calculate high water mark for flow control + * + * @adapter: board private structure to calculate for +- * @pb: packet buffer to calculate ++ * @pb - packet buffer to calculate + */ + static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb) + { +@@ -4230,17 +5627,20 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb) + /* Calculate max LAN frame size */ + tc = link = dev->mtu + ETH_HLEN + ETH_FCS_LEN + IXGBE_ETH_FRAMING; + +-#ifdef IXGBE_FCOE ++#if IS_ENABLED(CONFIG_FCOE) + /* FCoE traffic class uses FCOE jumbo frames */ + if ((dev->features & NETIF_F_FCOE_MTU) && + (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) && +- (pb == ixgbe_fcoe_get_tc(adapter))) ++ (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up))) + tc = IXGBE_FCOE_JUMBO_FRAME_SIZE; +-#endif ++#endif /* CONFIG_FCOE */ + + /* Calculate delay value for device */ + switch (hw->mac.type) { + case ixgbe_mac_X540: ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: + dv_id = IXGBE_DV_X540(link, tc); + break; + default: +@@ -4264,7 +5664,7 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb) + */ + if (marker < 0) { + e_warn(drv, "Packet Buffer(%i) can not provide enough" +- "headroom to support flow control." ++ "headroom to suppport flow control." + "Decrease MTU or number of traffic classes\n", pb); + marker = tc + 1; + } +@@ -4272,13 +5672,13 @@ static int ixgbe_hpbthresh(struct ixgbe_adapter *adapter, int pb) + return marker; + } + +-/** ++/* + * ixgbe_lpbthresh - calculate low water mark for for flow control + * + * @adapter: board private structure to calculate for +- * @pb: packet buffer to calculate ++ * @pb - packet buffer to calculate + */ +-static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb) ++static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int __maybe_unused pb) + { + struct ixgbe_hw *hw = &adapter->hw; + struct net_device *dev = adapter->netdev; +@@ -4288,17 +5688,20 @@ static int ixgbe_lpbthresh(struct ixgbe_adapter *adapter, int pb) + /* Calculate max LAN frame size */ + tc = dev->mtu + ETH_HLEN + ETH_FCS_LEN; + +-#ifdef IXGBE_FCOE ++#if IS_ENABLED(CONFIG_FCOE) + /* FCoE traffic class uses FCOE jumbo frames */ + if ((dev->features & NETIF_F_FCOE_MTU) && + (tc < IXGBE_FCOE_JUMBO_FRAME_SIZE) && + (pb == netdev_get_prio_tc_map(dev, adapter->fcoe.up))) + tc = IXGBE_FCOE_JUMBO_FRAME_SIZE; +-#endif ++#endif /* CONFIG_FCOE */ + + /* Calculate delay value for device */ + switch (hw->mac.type) { + case ixgbe_mac_X540: ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: + dv_id = IXGBE_LOW_DV_X540(tc); + break; + default: +@@ -4322,6 +5725,7 @@ static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter) + if (!num_tc) + num_tc = 1; + ++ + for (i = 0; i < num_tc; i++) { + hw->fc.high_water[i] = ixgbe_hpbthresh(adapter, i); + hw->fc.low_water[i] = ixgbe_lpbthresh(adapter, i); +@@ -4331,7 +5735,7 @@ static void ixgbe_pbthresh_setup(struct ixgbe_adapter *adapter) + hw->fc.low_water[i] = 0; + } + +- for (; i < MAX_TRAFFIC_CLASS; i++) ++ for (; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) + hw->fc.high_water[i] = 0; + } + +@@ -4347,7 +5751,7 @@ static void ixgbe_configure_pb(struct ixgbe_adapter *adapter) + else + hdrm = 0; + +- hw->mac.ops.set_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL); ++ hw->mac.ops.setup_rxpba(hw, tc, hdrm, PBA_STRATEGY_EQUAL); + ixgbe_pbthresh_setup(adapter); + } + +@@ -4360,7 +5764,8 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) + spin_lock(&adapter->fdir_perfect_lock); + + if (!hlist_empty(&adapter->fdir_filter_list)) +- ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask); ++ ixgbe_fdir_set_input_mask_82599(hw, &adapter->fdir_mask, ++ adapter->cloud_mode); + + hlist_for_each_entry_safe(filter, node2, + &adapter->fdir_filter_list, fdir_node) { +@@ -4369,232 +5774,20 @@ static void ixgbe_fdir_filter_restore(struct ixgbe_adapter *adapter) + filter->sw_idx, + (filter->action == IXGBE_FDIR_DROP_QUEUE) ? + IXGBE_FDIR_DROP_QUEUE : +- adapter->rx_ring[filter->action]->reg_idx); ++ adapter->rx_ring[filter->action]->reg_idx, ++ adapter->cloud_mode); + } + + spin_unlock(&adapter->fdir_perfect_lock); + } + +-static void ixgbe_macvlan_set_rx_mode(struct net_device *dev, unsigned int pool, +- struct ixgbe_adapter *adapter) +-{ +- struct ixgbe_hw *hw = &adapter->hw; +- u32 vmolr; +- +- /* No unicast promiscuous support for VMDQ devices. */ +- vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(pool)); +- vmolr |= (IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_BAM | IXGBE_VMOLR_AUPE); +- +- /* clear the affected bit */ +- vmolr &= ~IXGBE_VMOLR_MPE; +- +- if (dev->flags & IFF_ALLMULTI) { +- vmolr |= IXGBE_VMOLR_MPE; +- } else { +- vmolr |= IXGBE_VMOLR_ROMPE; +- hw->mac.ops.update_mc_addr_list(hw, dev); +- } +- ixgbe_write_uc_addr_list(adapter->netdev, pool); +- IXGBE_WRITE_REG(hw, IXGBE_VMOLR(pool), vmolr); +-} +- +-static void ixgbe_fwd_psrtype(struct ixgbe_fwd_adapter *vadapter) +-{ +- struct ixgbe_adapter *adapter = vadapter->real_adapter; +- int rss_i = adapter->num_rx_queues_per_pool; +- struct ixgbe_hw *hw = &adapter->hw; +- u16 pool = vadapter->pool; +- u32 psrtype = IXGBE_PSRTYPE_TCPHDR | +- IXGBE_PSRTYPE_UDPHDR | +- IXGBE_PSRTYPE_IPV4HDR | +- IXGBE_PSRTYPE_L2HDR | +- IXGBE_PSRTYPE_IPV6HDR; +- +- if (hw->mac.type == ixgbe_mac_82598EB) +- return; +- +- if (rss_i > 3) +- psrtype |= 2 << 29; +- else if (rss_i > 1) +- psrtype |= 1 << 29; +- +- IXGBE_WRITE_REG(hw, IXGBE_PSRTYPE(VMDQ_P(pool)), psrtype); +-} +- +-/** +- * ixgbe_clean_rx_ring - Free Rx Buffers per Queue +- * @rx_ring: ring to free buffers from +- **/ +-static void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) +-{ +- struct device *dev = rx_ring->dev; +- unsigned long size; +- u16 i; +- +- /* ring already cleared, nothing to do */ +- if (!rx_ring->rx_buffer_info) +- return; +- +- /* Free all the Rx ring sk_buffs */ +- for (i = 0; i < rx_ring->count; i++) { +- struct ixgbe_rx_buffer *rx_buffer; +- +- rx_buffer = &rx_ring->rx_buffer_info[i]; +- if (rx_buffer->skb) { +- struct sk_buff *skb = rx_buffer->skb; +- if (IXGBE_CB(skb)->page_released) { +- dma_unmap_page(dev, +- IXGBE_CB(skb)->dma, +- ixgbe_rx_bufsz(rx_ring), +- DMA_FROM_DEVICE); +- IXGBE_CB(skb)->page_released = false; +- } +- dev_kfree_skb(skb); +- } +- rx_buffer->skb = NULL; +- if (rx_buffer->dma) +- dma_unmap_page(dev, rx_buffer->dma, +- ixgbe_rx_pg_size(rx_ring), +- DMA_FROM_DEVICE); +- rx_buffer->dma = 0; +- if (rx_buffer->page) +- __free_pages(rx_buffer->page, +- ixgbe_rx_pg_order(rx_ring)); +- rx_buffer->page = NULL; +- } +- +- size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; +- memset(rx_ring->rx_buffer_info, 0, size); +- +- /* Zero out the descriptor ring */ +- memset(rx_ring->desc, 0, rx_ring->size); +- +- rx_ring->next_to_alloc = 0; +- rx_ring->next_to_clean = 0; +- rx_ring->next_to_use = 0; +-} +- +-static void ixgbe_disable_fwd_ring(struct ixgbe_fwd_adapter *vadapter, +- struct ixgbe_ring *rx_ring) +-{ +- struct ixgbe_adapter *adapter = vadapter->real_adapter; +- int index = rx_ring->queue_index + vadapter->rx_base_queue; +- +- /* shutdown specific queue receive and wait for dma to settle */ +- ixgbe_disable_rx_queue(adapter, rx_ring); +- usleep_range(10000, 20000); +- ixgbe_irq_disable_queues(adapter, ((u64)1 << index)); +- ixgbe_clean_rx_ring(rx_ring); +- rx_ring->l2_accel_priv = NULL; +-} +- +-static int ixgbe_fwd_ring_down(struct net_device *vdev, +- struct ixgbe_fwd_adapter *accel) +-{ +- struct ixgbe_adapter *adapter = accel->real_adapter; +- unsigned int rxbase = accel->rx_base_queue; +- unsigned int txbase = accel->tx_base_queue; +- int i; +- +- netif_tx_stop_all_queues(vdev); +- +- for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { +- ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]); +- adapter->rx_ring[rxbase + i]->netdev = adapter->netdev; +- } +- +- for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { +- adapter->tx_ring[txbase + i]->l2_accel_priv = NULL; +- adapter->tx_ring[txbase + i]->netdev = adapter->netdev; +- } +- +- +- return 0; +-} +- +-static int ixgbe_fwd_ring_up(struct net_device *vdev, +- struct ixgbe_fwd_adapter *accel) +-{ +- struct ixgbe_adapter *adapter = accel->real_adapter; +- unsigned int rxbase, txbase, queues; +- int i, baseq, err = 0; +- +- if (!test_bit(accel->pool, &adapter->fwd_bitmask)) +- return 0; +- +- baseq = accel->pool * adapter->num_rx_queues_per_pool; +- netdev_dbg(vdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n", +- accel->pool, adapter->num_rx_pools, +- baseq, baseq + adapter->num_rx_queues_per_pool, +- adapter->fwd_bitmask); +- +- accel->netdev = vdev; +- accel->rx_base_queue = rxbase = baseq; +- accel->tx_base_queue = txbase = baseq; +- +- for (i = 0; i < adapter->num_rx_queues_per_pool; i++) +- ixgbe_disable_fwd_ring(accel, adapter->rx_ring[rxbase + i]); +- +- for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { +- adapter->rx_ring[rxbase + i]->netdev = vdev; +- adapter->rx_ring[rxbase + i]->l2_accel_priv = accel; +- ixgbe_configure_rx_ring(adapter, adapter->rx_ring[rxbase + i]); +- } +- +- for (i = 0; i < adapter->num_rx_queues_per_pool; i++) { +- adapter->tx_ring[txbase + i]->netdev = vdev; +- adapter->tx_ring[txbase + i]->l2_accel_priv = accel; +- } +- +- queues = min_t(unsigned int, +- adapter->num_rx_queues_per_pool, vdev->num_tx_queues); +- err = netif_set_real_num_tx_queues(vdev, queues); +- if (err) +- goto fwd_queue_err; +- +- err = netif_set_real_num_rx_queues(vdev, queues); +- if (err) +- goto fwd_queue_err; +- +- if (is_valid_ether_addr(vdev->dev_addr)) +- ixgbe_add_mac_filter(adapter, vdev->dev_addr, accel->pool); +- +- ixgbe_fwd_psrtype(accel); +- ixgbe_macvlan_set_rx_mode(vdev, accel->pool, adapter); +- return err; +-fwd_queue_err: +- ixgbe_fwd_ring_down(vdev, accel); +- return err; +-} +- +-static void ixgbe_configure_dfwd(struct ixgbe_adapter *adapter) +-{ +- struct net_device *upper; +- struct list_head *iter; +- int err; +- +- netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { +- if (netif_is_macvlan(upper)) { +- struct macvlan_dev *dfwd = netdev_priv(upper); +- struct ixgbe_fwd_adapter *vadapter = dfwd->fwd_priv; +- +- if (dfwd->fwd_priv) { +- err = ixgbe_fwd_ring_up(upper, vadapter); +- if (err) +- continue; +- } +- } +- } +-} +- + static void ixgbe_configure(struct ixgbe_adapter *adapter) + { + struct ixgbe_hw *hw = &adapter->hw; + + ixgbe_configure_pb(adapter); +-#ifdef CONFIG_IXGBE_DCB + ixgbe_configure_dcb(adapter); +-#endif ++ + /* + * We must restore virtualization before VLANs or else + * the VLVF registers will not be populated +@@ -4602,66 +5795,70 @@ static void ixgbe_configure(struct ixgbe_adapter *adapter) + ixgbe_configure_virtualization(adapter); + + ixgbe_set_rx_mode(adapter->netdev); ++#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) + ixgbe_restore_vlan(adapter); ++#endif + +- switch (hw->mac.type) { +- case ixgbe_mac_82599EB: +- case ixgbe_mac_X540: +- hw->mac.ops.disable_rx_buff(hw); +- break; +- default: +- break; +- } ++ if (adapter->hw.mac.type == ixgbe_mac_82599EB || ++ adapter->hw.mac.type == ixgbe_mac_X540) ++ hw->mac.ops.disable_sec_rx_path(hw); + + if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { + ixgbe_init_fdir_signature_82599(&adapter->hw, + adapter->fdir_pballoc); + } else if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) { + ixgbe_init_fdir_perfect_82599(&adapter->hw, +- adapter->fdir_pballoc); ++ adapter->fdir_pballoc, adapter->cloud_mode); + ixgbe_fdir_filter_restore(adapter); + } + +- switch (hw->mac.type) { +- case ixgbe_mac_82599EB: +- case ixgbe_mac_X540: +- hw->mac.ops.enable_rx_buff(hw); +- break; +- default: +- break; ++ if (adapter->hw.mac.type == ixgbe_mac_82599EB || ++ adapter->hw.mac.type == ixgbe_mac_X540) ++ hw->mac.ops.enable_sec_rx_path(hw); ++ ++ /* Enable EEE only when supported and enabled */ ++ if (hw->mac.ops.setup_eee && ++ (adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE)) { ++ bool eee_enable = !!(adapter->flags2 & IXGBE_FLAG2_EEE_ENABLED); ++ ++ hw->mac.ops.setup_eee(hw, eee_enable); + } + +-#ifdef IXGBE_FCOE ++#if IS_ENABLED(CONFIG_DCA) ++ /* configure DCA */ ++ if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE) ++ ixgbe_setup_dca(adapter); ++#endif ++ ++#if IS_ENABLED(CONFIG_FCOE) + /* configure FCoE L2 filters, redirection table, and Rx control */ + ixgbe_configure_fcoe(adapter); ++#endif /* CONFIG_FCOE */ + +-#endif /* IXGBE_FCOE */ + ixgbe_configure_tx(adapter); + ixgbe_configure_rx(adapter); +- ixgbe_configure_dfwd(adapter); +-} +- +-static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) +-{ +- switch (hw->phy.type) { +- case ixgbe_phy_sfp_avago: +- case ixgbe_phy_sfp_ftl: +- case ixgbe_phy_sfp_intel: +- case ixgbe_phy_sfp_unknown: +- case ixgbe_phy_sfp_passive_tyco: +- case ixgbe_phy_sfp_passive_unknown: +- case ixgbe_phy_sfp_active_unknown: +- case ixgbe_phy_sfp_ftl_active: +- case ixgbe_phy_qsfp_passive_unknown: +- case ixgbe_phy_qsfp_active_unknown: +- case ixgbe_phy_qsfp_intel: +- case ixgbe_phy_qsfp_unknown: +- /* ixgbe_phy_none is set when no SFP module is present */ +- case ixgbe_phy_none: +- return true; +- case ixgbe_phy_nl: +- if (hw->mac.type == ixgbe_mac_82598EB) ++} ++ ++static bool ixgbe_is_sfp(struct ixgbe_hw *hw) ++{ ++ switch (hw->mac.type) { ++ case ixgbe_mac_82598EB: ++ if (hw->phy.type == ixgbe_phy_nl) ++ return true; ++ return false; ++ case ixgbe_mac_82599EB: ++ switch (hw->mac.ops.get_media_type(hw)) { ++ case ixgbe_media_type_fiber: ++ case ixgbe_media_type_fiber_qsfp: ++ return true; ++ default: ++ return false; ++ } ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: ++ if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) + return true; ++ return false; + default: + return false; + } +@@ -4674,7 +5871,7 @@ static inline bool ixgbe_is_sfp(struct ixgbe_hw *hw) + static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter) + { + /* +- * We are assuming the worst case scenario here, and that ++ * We are assuming the worst case scenerio here, and that + * is that an SFP was inserted/removed after the reset + * but before SFP detection was enabled. As such the best + * solution is to just start searching as soon as we start +@@ -4683,6 +5880,7 @@ static void ixgbe_sfp_link_config(struct ixgbe_adapter *adapter) + adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; + + adapter->flags2 |= IXGBE_FLAG2_SFP_NEEDS_RESET; ++ adapter->sfp_poll_time = 0; + } + + /** +@@ -4716,6 +5914,47 @@ link_cfg_out: + return ret; + } + ++/** ++ * ixgbe_clear_vf_stats_counters - Clear out VF stats after reset ++ * @adapter: board private structure ++ * ++ * On a reset we need to clear out the VF stats or accounting gets ++ * messed up because they're not clear on read. ++ **/ ++static void ixgbe_clear_vf_stats_counters(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ int i; ++ ++ for (i = 0; i < adapter->num_vfs; i++) { ++ adapter->vfinfo[i].last_vfstats.gprc = ++ IXGBE_READ_REG(hw, IXGBE_PVFGPRC(i)); ++ adapter->vfinfo[i].saved_rst_vfstats.gprc += ++ adapter->vfinfo[i].vfstats.gprc; ++ adapter->vfinfo[i].vfstats.gprc = 0; ++ adapter->vfinfo[i].last_vfstats.gptc = ++ IXGBE_READ_REG(hw, IXGBE_PVFGPTC(i)); ++ adapter->vfinfo[i].saved_rst_vfstats.gptc += ++ adapter->vfinfo[i].vfstats.gptc; ++ adapter->vfinfo[i].vfstats.gptc = 0; ++ adapter->vfinfo[i].last_vfstats.gorc = ++ IXGBE_READ_REG(hw, IXGBE_PVFGORC_LSB(i)); ++ adapter->vfinfo[i].saved_rst_vfstats.gorc += ++ adapter->vfinfo[i].vfstats.gorc; ++ adapter->vfinfo[i].vfstats.gorc = 0; ++ adapter->vfinfo[i].last_vfstats.gotc = ++ IXGBE_READ_REG(hw, IXGBE_PVFGOTC_LSB(i)); ++ adapter->vfinfo[i].saved_rst_vfstats.gotc += ++ adapter->vfinfo[i].vfstats.gotc; ++ adapter->vfinfo[i].vfstats.gotc = 0; ++ adapter->vfinfo[i].last_vfstats.mprc = ++ IXGBE_READ_REG(hw, IXGBE_PVFMPRC(i)); ++ adapter->vfinfo[i].saved_rst_vfstats.mprc += ++ adapter->vfinfo[i].vfstats.mprc; ++ adapter->vfinfo[i].vfstats.mprc = 0; ++ } ++} ++ + static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) + { + struct ixgbe_hw *hw = &adapter->hw; +@@ -4735,6 +5974,9 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: + default: + IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(0), 0xFFFFFFFF); + IXGBE_WRITE_REG(hw, IXGBE_EIAM_EX(1), 0xFFFFFFFF); +@@ -4746,9 +5988,6 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) + IXGBE_WRITE_REG(hw, IXGBE_EIAM, IXGBE_EICS_RTX_QUEUE); + } + +- /* XXX: to interrupt immediately for EICS writes, enable this */ +- /* gpie |= IXGBE_GPIE_EIMEN; */ +- + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { + gpie &= ~IXGBE_GPIE_VTMODE_MASK; + +@@ -4766,26 +6005,29 @@ static void ixgbe_setup_gpie(struct ixgbe_adapter *adapter) + } + + /* Enable Thermal over heat sensor interrupt */ +- if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) { ++ if (adapter->flags2 & IXGBE_FLAG2_TEMP_SENSOR_CAPABLE) + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + gpie |= IXGBE_SDP0_GPIEN; + break; +- case ixgbe_mac_X540: +- gpie |= IXGBE_EIMS_TS; +- break; + default: + break; + } +- } + + /* Enable fan failure interrupt */ + if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) + gpie |= IXGBE_SDP1_GPIEN; + +- if (hw->mac.type == ixgbe_mac_82599EB) { +- gpie |= IXGBE_SDP1_GPIEN; +- gpie |= IXGBE_SDP2_GPIEN; ++ switch (hw->mac.type) { ++ case ixgbe_mac_82599EB: ++ gpie |= IXGBE_SDP1_GPIEN | IXGBE_SDP2_GPIEN; ++ break; ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: ++ gpie |= IXGBE_SDP0_GPIEN_X540; ++ break; ++ default: ++ break; + } + + IXGBE_WRITE_REG(hw, IXGBE_GPIE, gpie); +@@ -4808,14 +6050,18 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter) + /* enable the optics for 82599 SFP+ fiber */ + if (hw->mac.ops.enable_tx_laser) + hw->mac.ops.enable_tx_laser(hw); ++ ixgbe_set_phy_power(hw, true); + + smp_mb__before_atomic(); + clear_bit(__IXGBE_DOWN, &adapter->state); + ixgbe_napi_enable_all(adapter); ++#ifndef IXGBE_NO_LLI ++ ixgbe_configure_lli(adapter); ++#endif + + if (ixgbe_is_sfp(hw)) { + ixgbe_sfp_link_config(adapter); +- } else { ++ } else if (!hw->phy.reset_disable) { + err = ixgbe_non_sfp_link_config(hw); + if (err) + e_err(probe, "link_config FAILED %d\n", err); +@@ -4835,12 +6081,16 @@ static void ixgbe_up_complete(struct ixgbe_adapter *adapter) + e_crit(drv, "Fan has stopped, replace the adapter\n"); + } + ++ /* enable transmits */ ++ netif_tx_start_all_queues(adapter->netdev); ++ + /* bring the link up in the watchdog, this could race with our first + * link up interrupt but shouldn't be a problem */ + adapter->flags |= IXGBE_FLAG_NEED_LINK_UPDATE; + adapter->link_check_timeout = jiffies; + mod_timer(&adapter->service_timer, jiffies); + ++ ixgbe_clear_vf_stats_counters(adapter); + /* Set PF Reset Done bit so PF/VF Mail Ops can work */ + ctrl_ext = IXGBE_READ_REG(hw, IXGBE_CTRL_EXT); + ctrl_ext |= IXGBE_CTRL_EXT_PFRSTD; +@@ -4851,10 +6101,16 @@ void ixgbe_reinit_locked(struct ixgbe_adapter *adapter) + { + WARN_ON(in_interrupt()); + /* put off any impending NetWatchDogTimeout */ ++#ifdef HAVE_NETIF_TRANS_UPDATE ++ netif_trans_update(adapter->netdev); ++#else + adapter->netdev->trans_start = jiffies; ++#endif + + while (test_and_set_bit(__IXGBE_RESETTING, &adapter->state)) + usleep_range(1000, 2000); ++ if (adapter->hw.phy.type == ixgbe_phy_fw) ++ ixgbe_watchdog_link_is_down(adapter); + ixgbe_down(adapter); + /* + * If SR-IOV enabled then wait a bit before bringing the adapter +@@ -4879,11 +6135,12 @@ void ixgbe_up(struct ixgbe_adapter *adapter) + void ixgbe_reset(struct ixgbe_adapter *adapter) + { + struct ixgbe_hw *hw = &adapter->hw; ++#ifdef HAVE_SET_RX_MODE + struct net_device *netdev = adapter->netdev; ++#endif + int err; +- u8 old_addr[ETH_ALEN]; + +- if (ixgbe_removed(hw->hw_addr)) ++ if (IXGBE_REMOVED(hw->hw_addr)) + return; + /* lock SFP init bit to prevent race conditions with the watchdog */ + while (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) +@@ -4896,7 +6153,7 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) + + err = hw->mac.ops.init_hw(hw); + switch (err) { +- case 0: ++ case IXGBE_SUCCESS: + case IXGBE_ERR_SFP_NOT_PRESENT: + case IXGBE_ERR_SFP_NOT_SUPPORTED: + break; +@@ -4906,28 +6163,135 @@ void ixgbe_reset(struct ixgbe_adapter *adapter) + case IXGBE_ERR_EEPROM_VERSION: + /* We are running on a pre-production device, log a warning */ + e_dev_warn("This device is a pre-production adapter/LOM. " +- "Please be aware there may be issues associated with " +- "your hardware. If you are experiencing problems " +- "please contact your Intel or hardware " ++ "Please be aware there may be issues associated " ++ "with your hardware. If you are experiencing " ++ "problems please contact your Intel or hardware " + "representative who provided you with this " + "hardware.\n"); + break; ++ case IXGBE_ERR_OVERTEMP: ++ e_crit(drv, "%s\n", ixgbe_overheat_msg); ++ break; + default: + e_dev_err("Hardware Error: %d\n", err); + } + + clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); +- /* do not flush user set addresses */ +- memcpy(old_addr, &adapter->mac_table[0].addr, netdev->addr_len); ++ ++ /* flush entries out of MAC table */ + ixgbe_flush_sw_mac_table(adapter); +- ixgbe_mac_set_default_filter(adapter, old_addr); ++#ifdef HAVE_SET_RX_MODE ++ __dev_uc_unsync(netdev, NULL); ++#endif ++ ++ /* do not flush user set addresses */ ++ ixgbe_mac_set_default_filter(adapter); + + /* update SAN MAC vmdq pool selection */ + if (hw->mac.san_mac_rar_index) + hw->mac.ops.set_vmdq_san_mac(hw, VMDQ_P(0)); + ++ /* Clear saved DMA coalescing values except for watchdog_timer */ ++ hw->mac.dmac_config.fcoe_en = false; ++ hw->mac.dmac_config.link_speed = 0; ++ hw->mac.dmac_config.fcoe_tc = 0; ++ hw->mac.dmac_config.num_tcs = 0; ++ ++#ifdef HAVE_PTP_1588_CLOCK + if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) + ixgbe_ptp_reset(adapter); ++#endif ++ ++ if (!netif_running(adapter->netdev) && !adapter->wol) ++ ixgbe_set_phy_power(hw, false); ++ else ++ ixgbe_set_phy_power(hw, true); ++} ++ ++/** ++ * ixgbe_clean_rx_ring - Free Rx Buffers per Queue ++ * @rx_ring: ring to free buffers from ++ **/ ++void ixgbe_clean_rx_ring(struct ixgbe_ring *rx_ring) ++{ ++ struct device *dev = rx_ring->dev; ++ unsigned long size; ++ u16 i; ++ ++ /* ring already cleared, nothing to do */ ++ if (!rx_ring->rx_buffer_info) ++ return; ++ ++ /* Free all the Rx ring sk_buffs */ ++ for (i = 0; i < rx_ring->count; i++) { ++ struct ixgbe_rx_buffer *rx_buffer = &rx_ring->rx_buffer_info[i]; ++ ++ if (rx_buffer->skb) { ++ struct sk_buff *skb = rx_buffer->skb; ++#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT ++ if (IXGBE_CB(skb)->page_released) ++ dma_unmap_page_attrs(dev, ++ IXGBE_CB(skb)->dma, ++ ixgbe_rx_pg_size(rx_ring), ++ DMA_FROM_DEVICE, ++ IXGBE_RX_DMA_ATTR); ++#else ++ /* We need to clean up RSC frag lists */ ++ skb = ixgbe_merge_active_tail(skb); ++ if (ixgbe_close_active_frag_list(skb)) ++ dma_unmap_single(dev, ++ IXGBE_CB(skb)->dma, ++ rx_ring->rx_buf_len, ++ DMA_FROM_DEVICE); ++ IXGBE_CB(skb)->dma = 0; ++#endif /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ ++ dev_kfree_skb(skb); ++ rx_buffer->skb = NULL; ++ } ++ ++#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT ++ if (!rx_buffer->page) ++ continue; ++ ++ /* Invalidate cache lines that may have been written to by ++ * device so that we avoid corrupting memory. ++ */ ++ dma_sync_single_range_for_cpu(rx_ring->dev, ++ rx_buffer->dma, ++ rx_buffer->page_offset, ++ ixgbe_rx_bufsz(rx_ring), ++ DMA_FROM_DEVICE); ++ ++ /* free resources associated with mapping */ ++ dma_unmap_page_attrs(dev, rx_buffer->dma, ++ ixgbe_rx_pg_size(rx_ring), ++ DMA_FROM_DEVICE, ++ IXGBE_RX_DMA_ATTR); ++ ++ __page_frag_cache_drain(rx_buffer->page, ++ rx_buffer->pagecnt_bias); ++ ++ rx_buffer->page = NULL; ++#else /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ ++ if (!rx_buffer->dma) ++ continue; ++ ++ dma_unmap_single(dev, ++ rx_buffer->dma, ++ rx_ring->rx_buf_len, ++ DMA_FROM_DEVICE); ++ rx_buffer->dma = 0; ++#endif /* CONFIG_IXGBE_DISABLE_PACKET_SPLIT */ ++ } ++ ++ size = sizeof(struct ixgbe_rx_buffer) * rx_ring->count; ++ memset(rx_ring->rx_buffer_info, 0, size); ++#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT ++ ++ rx_ring->next_to_alloc = 0; ++ rx_ring->next_to_clean = 0; ++ rx_ring->next_to_use = 0; ++#endif + } + + /** +@@ -4957,9 +6321,6 @@ static void ixgbe_clean_tx_ring(struct ixgbe_ring *tx_ring) + + /* Zero out the descriptor ring */ + memset(tx_ring->desc, 0, tx_ring->size); +- +- tx_ring->next_to_use = 0; +- tx_ring->next_to_clean = 0; + } + + /** +@@ -5007,9 +6368,6 @@ void ixgbe_down(struct ixgbe_adapter *adapter) + { + struct net_device *netdev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; +- struct net_device *upper; +- struct list_head *iter; +- u32 rxctrl; + int i; + + /* signal that we are down to the interrupt handler */ +@@ -5017,8 +6375,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter) + return; /* do nothing if already down */ + + /* disable receives */ +- rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); +- IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl & ~IXGBE_RXCTRL_RXEN); ++ ixgbe_disable_rx(hw); + + /* disable all enabled rx queues */ + for (i = 0; i < adapter->num_rx_queues; i++) +@@ -5033,25 +6390,12 @@ void ixgbe_down(struct ixgbe_adapter *adapter) + netif_carrier_off(netdev); + netif_tx_disable(netdev); + +- /* disable any upper devices */ +- netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { +- if (netif_is_macvlan(upper)) { +- struct macvlan_dev *vlan = netdev_priv(upper); +- +- if (vlan->fwd_priv) { +- netif_tx_stop_all_queues(upper); +- netif_carrier_off(upper); +- netif_tx_disable(upper); +- } +- } +- } +- + ixgbe_irq_disable(adapter); + + ixgbe_napi_disable_all(adapter); + +- adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT | +- IXGBE_FLAG2_RESET_REQUESTED); ++ adapter->flags2 &= ~(IXGBE_FLAG2_FDIR_REQUIRES_REINIT); ++ clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state); + adapter->flags &= ~IXGBE_FLAG_NEED_LINK_UPDATE; + + del_timer_sync(&adapter->service_timer); +@@ -5062,7 +6406,7 @@ void ixgbe_down(struct ixgbe_adapter *adapter) + + /* Mark all the VFs as inactive */ + for (i = 0 ; i < adapter->num_vfs; i++) +- adapter->vfinfo[i].clear_to_send = false; ++ adapter->vfinfo[i].clear_to_send = 0; + + /* ping all the active vfs to let them know we are going down */ + ixgbe_ping_all_vfs(adapter); +@@ -5081,6 +6425,9 @@ void ixgbe_down(struct ixgbe_adapter *adapter) + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: + IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, + (IXGBE_READ_REG(hw, IXGBE_DMATXCTL) & + ~IXGBE_DMATXCTL_TE)); +@@ -5089,7 +6436,9 @@ void ixgbe_down(struct ixgbe_adapter *adapter) + break; + } + ++#ifdef HAVE_PCI_ERS + if (!pci_channel_offline(adapter->pdev)) ++#endif + ixgbe_reset(adapter); + + /* power down the optics for 82599 SFP+ fiber */ +@@ -5098,24 +6447,74 @@ void ixgbe_down(struct ixgbe_adapter *adapter) + + ixgbe_clean_all_tx_rings(adapter); + ixgbe_clean_all_rx_rings(adapter); +- +-#ifdef CONFIG_IXGBE_DCA +- /* since we reset the hardware DCA settings were cleared */ +- ixgbe_setup_dca(adapter); +-#endif + } + + /** +- * ixgbe_tx_timeout - Respond to a Tx Hang +- * @netdev: network interface device structure ++ * ixgbe_eee_capable - helper function to determine EEE support on X550 ++ * + **/ +-static void ixgbe_tx_timeout(struct net_device *netdev) ++static inline void ixgbe_set_eee_capable(struct ixgbe_adapter *adapter) + { +- struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ struct ixgbe_hw *hw = &adapter->hw; + +- /* Do the reset outside of interrupt context */ +- ixgbe_tx_timeout_reset(adapter); ++ switch (hw->device_id) { ++ case IXGBE_DEV_ID_X550EM_A_1G_T: ++ case IXGBE_DEV_ID_X550EM_A_1G_T_L: ++ if (!hw->phy.eee_speeds_supported) ++ break; ++ adapter->flags2 |= IXGBE_FLAG2_EEE_CAPABLE; ++ if (!hw->phy.eee_speeds_advertised) ++ break; ++ adapter->flags2 |= IXGBE_FLAG2_EEE_ENABLED; ++ break; ++ default: ++ adapter->flags2 &= ~IXGBE_FLAG2_EEE_CAPABLE; ++ adapter->flags2 &= ~IXGBE_FLAG2_EEE_ENABLED; ++ break; ++ } ++} ++ ++#if IS_ENABLED(CONFIG_DCB) ++static void ixgbe_init_dcb(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_dcb_tc_config *tc; ++ int j, bwg_pct; ++ ++ /* Configure DCB traffic classes */ ++ bwg_pct = 100 / adapter->dcb_cfg.num_tcs.pg_tcs; ++ for (j = 0; j < adapter->dcb_cfg.num_tcs.pg_tcs; j++) { ++ tc = &adapter->dcb_cfg.tc_config[j]; ++ tc->path[IXGBE_DCB_TX_CONFIG].bwg_id = 0; ++ tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = bwg_pct; ++ tc->path[IXGBE_DCB_RX_CONFIG].bwg_id = 0; ++ tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = bwg_pct; ++ tc->pfc = ixgbe_dcb_pfc_disabled; ++ } ++ ++ /* reset back to TC 0 */ ++ tc = &adapter->dcb_cfg.tc_config[0]; ++ ++ /* total of all TCs bandwidth needs to be 100 */ ++ bwg_pct += 100 % adapter->dcb_cfg.num_tcs.pg_tcs; ++ tc->path[IXGBE_DCB_TX_CONFIG].bwg_percent = bwg_pct; ++ tc->path[IXGBE_DCB_RX_CONFIG].bwg_percent = bwg_pct; ++ ++ /* Initialize default user to priority mapping, UPx->TC0 */ ++ tc->path[IXGBE_DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; ++ tc->path[IXGBE_DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; ++ ++ adapter->dcb_cfg.bw_percentage[IXGBE_DCB_TX_CONFIG][0] = 100; ++ adapter->dcb_cfg.bw_percentage[IXGBE_DCB_RX_CONFIG][0] = 100; ++ adapter->dcb_cfg.rx_pba_cfg = ixgbe_dcb_pba_equal; ++ adapter->dcb_cfg.pfc_mode_enable = false; ++ adapter->dcb_cfg.round_robin_enable = false; ++ adapter->dcb_set_bitmap = 0x00; ++ if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE) ++ adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE; ++ memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, ++ sizeof(adapter->temp_dcb_cfg)); + } ++#endif /*CONFIG_DCB*/ + + /** + * ixgbe_sw_init - Initialize general software structures (struct ixgbe_adapter) +@@ -5125,156 +6524,188 @@ static void ixgbe_tx_timeout(struct net_device *netdev) + * Fields are initialized based on PCI device information and + * OS network device settings (MTU size). + **/ +-static int ixgbe_sw_init(struct ixgbe_adapter *adapter) ++static int __devinit ixgbe_sw_init(struct ixgbe_adapter *adapter) + { + struct ixgbe_hw *hw = &adapter->hw; + struct pci_dev *pdev = adapter->pdev; +- unsigned int rss, fdir; ++ int err; ++ unsigned int fdir; + u32 fwsm; +-#ifdef CONFIG_IXGBE_DCB +- int j; +- struct tc_configuration *tc; +-#endif ++ u16 device_caps; + + /* PCI config space info */ + + hw->vendor_id = pdev->vendor; + hw->device_id = pdev->device; +- hw->revision_id = pdev->revision; ++ pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id); ++ if (hw->revision_id == IXGBE_FAILED_READ_CFG_BYTE && ++ ixgbe_check_cfg_remove(hw, pdev)) { ++ e_err(probe, "read of revision id failed\n"); ++ err = -ENODEV; ++ goto out; ++ } + hw->subsystem_vendor_id = pdev->subsystem_vendor; + hw->subsystem_device_id = pdev->subsystem_device; + ++ err = ixgbe_init_shared_code(hw); ++ if (err) { ++ e_err(probe, "init_shared_code failed: %d\n", err); ++ goto out; ++ } ++ adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) * ++ hw->mac.num_rar_entries, ++ GFP_ATOMIC); ++ if (!adapter->mac_table) { ++ err = IXGBE_ERR_OUT_OF_MEM; ++ e_err(probe, "mac_table allocation failed: %d\n", err); ++ goto out; ++ } ++ ++ if (ixgbe_init_rss_key(adapter)) { ++ err = IXGBE_ERR_OUT_OF_MEM; ++ e_err(probe, "rss_key allocation failed: %d\n", err); ++ goto out; ++ } ++ + /* Set common capability flags and settings */ +- rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus()); +- adapter->ring_feature[RING_F_RSS].limit = rss; +- adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; +- adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; +- adapter->max_q_vectors = MAX_Q_VECTORS_82599; +- adapter->atr_sample_rate = 20; +- fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus()); +- adapter->ring_feature[RING_F_FDIR].limit = fdir; +- adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K; +-#ifdef CONFIG_IXGBE_DCA ++#if IS_ENABLED(CONFIG_DCA) + adapter->flags |= IXGBE_FLAG_DCA_CAPABLE; + #endif +-#ifdef IXGBE_FCOE ++#if IS_ENABLED(CONFIG_DCB) ++ adapter->flags |= IXGBE_FLAG_DCB_CAPABLE; ++ adapter->flags &= ~IXGBE_FLAG_DCB_ENABLED; ++#endif ++#if IS_ENABLED(CONFIG_FCOE) + adapter->flags |= IXGBE_FLAG_FCOE_CAPABLE; + adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; +-#ifdef CONFIG_IXGBE_DCB ++#if IS_ENABLED(CONFIG_DCB) + /* Default traffic class to use for FCoE */ +- adapter->fcoe.up = IXGBE_FCOE_DEFTC; +-#endif /* CONFIG_IXGBE_DCB */ +-#endif /* IXGBE_FCOE */ +- +- adapter->mac_table = kzalloc(sizeof(struct ixgbe_mac_addr) * +- hw->mac.num_rar_entries, +- GFP_ATOMIC); ++ adapter->fcoe.up = IXGBE_FCOE_DEFUP; ++ adapter->fcoe.up_set = IXGBE_FCOE_DEFUP; ++#endif /* CONFIG_DCB */ ++#endif /* CONFIG_FCOE */ ++ adapter->flags2 |= IXGBE_FLAG2_RSC_CAPABLE; ++ fdir = min_t(int, IXGBE_MAX_FDIR_INDICES, num_online_cpus()); ++ adapter->ring_feature[RING_F_FDIR].limit = fdir; ++ adapter->max_q_vectors = IXGBE_MAX_MSIX_Q_VECTORS_82599; + + /* Set MAC specific capability flags and exceptions */ + switch (hw->mac.type) { + case ixgbe_mac_82598EB: ++ adapter->flags |= IXGBE_FLAGS_82598_INIT; + adapter->flags2 &= ~IXGBE_FLAG2_RSC_CAPABLE; +- adapter->flags2 &= ~IXGBE_FLAG2_RSC_ENABLED; + + if (hw->device_id == IXGBE_DEV_ID_82598AT) + adapter->flags |= IXGBE_FLAG_FAN_FAIL_CAPABLE; + +- adapter->max_q_vectors = MAX_Q_VECTORS_82598; ++ adapter->max_q_vectors = IXGBE_MAX_MSIX_Q_VECTORS_82598; + adapter->ring_feature[RING_F_FDIR].limit = 0; +- adapter->atr_sample_rate = 0; +- adapter->fdir_pballoc = 0; +-#ifdef IXGBE_FCOE ++#if IS_ENABLED(CONFIG_FCOE) + adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; +- adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; +-#ifdef CONFIG_IXGBE_DCB ++#if IS_ENABLED(CONFIG_DCB) + adapter->fcoe.up = 0; ++ adapter->fcoe.up_set = 0; + #endif /* IXGBE_DCB */ +-#endif /* IXGBE_FCOE */ ++#endif /* CONFIG_FCOE */ + break; + case ixgbe_mac_82599EB: ++ adapter->flags |= IXGBE_FLAGS_82599_INIT; + if (hw->device_id == IXGBE_DEV_ID_82599_T3_LOM) + adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; ++#ifndef IXGBE_NO_SMART_SPEED ++ hw->phy.smart_speed = ixgbe_smart_speed_on; ++#else ++ hw->phy.smart_speed = ixgbe_smart_speed_off; ++#endif + break; + case ixgbe_mac_X540: ++ adapter->flags |= IXGBE_FLAGS_X540_INIT; + fwsm = IXGBE_READ_REG(hw, IXGBE_FWSM); + if (fwsm & IXGBE_FWSM_TS_ENABLED) + adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; + break; ++ case ixgbe_mac_X550EM_a: ++ adapter->flags |= IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE; ++ switch (hw->device_id) { ++ case IXGBE_DEV_ID_X550EM_A_1G_T: ++ case IXGBE_DEV_ID_X550EM_A_1G_T_L: ++ adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; ++ break; ++ default: ++ break; ++ } ++ /* fall through */ ++ case ixgbe_mac_X550EM_x: ++#if IS_ENABLED(CONFIG_DCB) ++ adapter->flags &= ~IXGBE_FLAG_DCB_CAPABLE; ++#endif ++#if IS_ENABLED(CONFIG_FCOE) ++ adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; ++#if IS_ENABLED(CONFIG_DCB) ++ adapter->fcoe.up = 0; ++ adapter->fcoe.up_set = 0; ++#endif /* CONFIG_DCB */ ++#endif /* CONFIG_FCOE */ ++ /* fall through */ ++ case ixgbe_mac_X550: ++ if (hw->mac.type == ixgbe_mac_X550) ++ adapter->flags2 |= IXGBE_FLAG2_TEMP_SENSOR_CAPABLE; ++ ixgbe_set_eee_capable(adapter); ++ adapter->flags |= IXGBE_FLAGS_X550_INIT; ++#if IS_ENABLED(CONFIG_DCA) ++ adapter->flags &= ~IXGBE_FLAG_DCA_CAPABLE; ++#endif /* CONFIG_DCA */ + default: + break; + } + +-#ifdef IXGBE_FCOE ++#if IS_ENABLED(CONFIG_FCOE) + /* FCoE support exists, always init the FCoE lock */ + spin_lock_init(&adapter->fcoe.lock); ++#endif /* CONFIG_FCOE */ + +-#endif + /* n-tuple support exists, always init our spinlock */ + spin_lock_init(&adapter->fdir_perfect_lock); + +-#ifdef CONFIG_IXGBE_DCB ++#if IS_ENABLED(CONFIG_DCB) + switch (hw->mac.type) { ++ case ixgbe_mac_82598EB: ++ case ixgbe_mac_82599EB: ++ adapter->dcb_cfg.num_tcs.pg_tcs = 8; ++ adapter->dcb_cfg.num_tcs.pfc_tcs = 8; ++ break; + case ixgbe_mac_X540: +- adapter->dcb_cfg.num_tcs.pg_tcs = X540_TRAFFIC_CLASS; +- adapter->dcb_cfg.num_tcs.pfc_tcs = X540_TRAFFIC_CLASS; ++ case ixgbe_mac_X550: ++ adapter->dcb_cfg.num_tcs.pg_tcs = 4; ++ adapter->dcb_cfg.num_tcs.pfc_tcs = 4; + break; ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: + default: +- adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS; +- adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS; ++ adapter->dcb_cfg.num_tcs.pg_tcs = 1; ++ adapter->dcb_cfg.num_tcs.pfc_tcs = 1; + break; + } ++ ixgbe_init_dcb(adapter); + +- /* Configure DCB traffic classes */ +- for (j = 0; j < MAX_TRAFFIC_CLASS; j++) { +- tc = &adapter->dcb_cfg.tc_config[j]; +- tc->path[DCB_TX_CONFIG].bwg_id = 0; +- tc->path[DCB_TX_CONFIG].bwg_percent = 12 + (j & 1); +- tc->path[DCB_RX_CONFIG].bwg_id = 0; +- tc->path[DCB_RX_CONFIG].bwg_percent = 12 + (j & 1); +- tc->dcb_pfc = pfc_disabled; +- } +- +- /* Initialize default user to priority mapping, UPx->TC0 */ +- tc = &adapter->dcb_cfg.tc_config[0]; +- tc->path[DCB_TX_CONFIG].up_to_tc_bitmap = 0xFF; +- tc->path[DCB_RX_CONFIG].up_to_tc_bitmap = 0xFF; +- +- adapter->dcb_cfg.bw_percentage[DCB_TX_CONFIG][0] = 100; +- adapter->dcb_cfg.bw_percentage[DCB_RX_CONFIG][0] = 100; +- adapter->dcb_cfg.pfc_mode_enable = false; +- adapter->dcb_set_bitmap = 0x00; +- adapter->dcbx_cap = DCB_CAP_DCBX_HOST | DCB_CAP_DCBX_VER_CEE; +- memcpy(&adapter->temp_dcb_cfg, &adapter->dcb_cfg, +- sizeof(adapter->temp_dcb_cfg)); ++#endif /* CONFIG_DCB */ + +-#endif ++ if (hw->mac.type == ixgbe_mac_82599EB || ++ hw->mac.type == ixgbe_mac_X550 || ++ hw->mac.type == ixgbe_mac_X550EM_x || ++ hw->mac.type == ixgbe_mac_X550EM_a || ++ hw->mac.type == ixgbe_mac_X540) ++ hw->mbx.ops.init_params(hw); + + /* default flow control settings */ + hw->fc.requested_mode = ixgbe_fc_full; + hw->fc.current_mode = ixgbe_fc_full; /* init for ethtool output */ ++ ++ adapter->last_lfc_mode = hw->fc.current_mode; + ixgbe_pbthresh_setup(adapter); + hw->fc.pause_time = IXGBE_DEFAULT_FCPAUSE; + hw->fc.send_xon = true; +- hw->fc.disable_fc_autoneg = ixgbe_device_supports_autoneg_fc(hw); +- +-#ifdef CONFIG_PCI_IOV +- if (max_vfs > 0) +- e_dev_warn("Enabling SR-IOV VFs using the max_vfs module parameter is deprecated - please use the pci sysfs interface instead.\n"); +- +- /* assign number of SR-IOV VFs */ +- if (hw->mac.type != ixgbe_mac_82598EB) { +- if (max_vfs > IXGBE_MAX_VFS_DRV_LIMIT) { +- adapter->num_vfs = 0; +- e_dev_warn("max_vfs parameter out of range. Not assigning any SR-IOV VFs\n"); +- } else { +- adapter->num_vfs = max_vfs; +- } +- } +-#endif /* CONFIG_PCI_IOV */ +- +- /* enable itr by default in dynamic mode */ +- adapter->rx_itr_setting = 1; +- adapter->tx_itr_setting = 1; ++ hw->fc.disable_fc_autoneg = false; + + /* set default ring sizes */ + adapter->tx_ring_count = IXGBE_DEFAULT_TXD; +@@ -5282,18 +6713,26 @@ static int ixgbe_sw_init(struct ixgbe_adapter *adapter) + + /* set default work limits */ + adapter->tx_work_limit = IXGBE_DEFAULT_TX_WORK; ++ adapter->rx_work_limit = IXGBE_DEFAULT_RX_WORK; + +- /* initialize eeprom parameters */ +- if (ixgbe_init_eeprom_params_generic(hw)) { +- e_dev_err("EEPROM initialization failed\n"); +- return -EIO; ++ /* Cache bit indicating need for crosstalk fix */ ++ switch (hw->mac.type) { ++ case ixgbe_mac_82599EB: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: ++ hw->mac.ops.get_device_caps(hw, &device_caps); ++ if (device_caps & IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR) ++ adapter->need_crosstalk_fix = false; ++ else ++ adapter->need_crosstalk_fix = true; ++ break; ++ default: ++ adapter->need_crosstalk_fix = false; ++ break; + } +- +- /* PF holds first pool slot */ +- set_bit(0, &adapter->fwd_bitmask); + set_bit(__IXGBE_DOWN, &adapter->state); +- +- return 0; ++out: ++ return err; + } + + /** +@@ -5320,8 +6759,6 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring) + if (!tx_ring->tx_buffer_info) + goto err; + +- u64_stats_init(&tx_ring->syncp); +- + /* round up to nearest 4K */ + tx_ring->size = tx_ring->count * sizeof(union ixgbe_adv_tx_desc); + tx_ring->size = ALIGN(tx_ring->size, 4096); +@@ -5338,8 +6775,6 @@ int ixgbe_setup_tx_resources(struct ixgbe_ring *tx_ring) + if (!tx_ring->desc) + goto err; + +- tx_ring->next_to_use = 0; +- tx_ring->next_to_clean = 0; + return 0; + + err: +@@ -5364,6 +6799,7 @@ static int ixgbe_setup_all_tx_resources(struct ixgbe_adapter *adapter) + int i, err = 0; + + for (i = 0; i < adapter->num_tx_queues; i++) { ++ + err = ixgbe_setup_tx_resources(adapter->tx_ring[i]); + if (!err) + continue; +@@ -5404,8 +6840,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring) + if (!rx_ring->rx_buffer_info) + goto err; + +- u64_stats_init(&rx_ring->syncp); +- + /* Round up to nearest 4K */ + rx_ring->size = rx_ring->count * sizeof(union ixgbe_adv_rx_desc); + rx_ring->size = ALIGN(rx_ring->size, 4096); +@@ -5422,9 +6856,6 @@ int ixgbe_setup_rx_resources(struct ixgbe_ring *rx_ring) + if (!rx_ring->desc) + goto err; + +- rx_ring->next_to_clean = 0; +- rx_ring->next_to_use = 0; +- + return 0; + err: + vfree(rx_ring->rx_buffer_info); +@@ -5449,14 +6880,15 @@ static int ixgbe_setup_all_rx_resources(struct ixgbe_adapter *adapter) + + for (i = 0; i < adapter->num_rx_queues; i++) { + err = ixgbe_setup_rx_resources(adapter->rx_ring[i]); +- if (!err) ++ if (!err) { + continue; ++ } + + e_err(probe, "Allocation for Rx Queue %u failed\n", i); + goto err_setup_rx; + } + +-#ifdef IXGBE_FCOE ++#if IS_ENABLED(CONFIG_FCOE) + err = ixgbe_setup_fcoe_ddp_resources(adapter); + if (!err) + #endif +@@ -5487,7 +6919,6 @@ void ixgbe_free_tx_resources(struct ixgbe_ring *tx_ring) + + dma_free_coherent(tx_ring->dev, tx_ring->size, + tx_ring->desc, tx_ring->dma); +- + tx_ring->desc = NULL; + } + +@@ -5502,8 +6933,7 @@ static void ixgbe_free_all_tx_resources(struct ixgbe_adapter *adapter) + int i; + + for (i = 0; i < adapter->num_tx_queues; i++) +- if (adapter->tx_ring[i]->desc) +- ixgbe_free_tx_resources(adapter->tx_ring[i]); ++ ixgbe_free_tx_resources(adapter->tx_ring[i]); + } + + /** +@@ -5539,13 +6969,12 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter) + { + int i; + +-#ifdef IXGBE_FCOE ++#if IS_ENABLED(CONFIG_FCOE) + ixgbe_free_fcoe_ddp_resources(adapter); +- + #endif ++ + for (i = 0; i < adapter->num_rx_queues; i++) +- if (adapter->rx_ring[i]->desc) +- ixgbe_free_rx_resources(adapter->rx_ring[i]); ++ ixgbe_free_rx_resources(adapter->rx_ring[i]); + } + + /** +@@ -5558,12 +6987,16 @@ static void ixgbe_free_all_rx_resources(struct ixgbe_adapter *adapter) + static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) + { + struct ixgbe_adapter *adapter = netdev_priv(netdev); ++#ifndef HAVE_NETDEVICE_MIN_MAX_MTU + int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; ++#endif + ++#ifndef HAVE_NETDEVICE_MIN_MAX_MTU + /* MTU < 68 is an error and causes problems on some kernels */ + if ((new_mtu < 68) || (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE)) + return -EINVAL; + ++#endif + /* + * For 82599EB we cannot allow legacy VFs to enable their receive + * paths when MTU greater than 1500 is configured. So display a +@@ -5571,7 +7004,11 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) + */ + if ((adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) && + (adapter->hw.mac.type == ixgbe_mac_82599EB) && ++#ifndef HAVE_NETDEVICE_MIN_MAX_MTU + (max_frame > (ETH_FRAME_LEN + ETH_FCS_LEN))) ++#else ++ (new_mtu > ETH_DATA_LEN)) ++#endif + e_warn(probe, "Setting MTU > 1500 will disable legacy VFs\n"); + + e_info(probe, "changing MTU from %d to %d\n", netdev->mtu, new_mtu); +@@ -5600,7 +7037,7 @@ static int ixgbe_change_mtu(struct net_device *netdev, int new_mtu) + static int ixgbe_open(struct net_device *netdev) + { + struct ixgbe_adapter *adapter = netdev_priv(netdev); +- int err, queues; ++ int err; + + /* disallow open during test */ + if (test_bit(__IXGBE_TESTING, &adapter->state)) +@@ -5625,34 +7062,40 @@ static int ixgbe_open(struct net_device *netdev) + goto err_req_irq; + + /* Notify the stack of the actual queue counts. */ +- if (adapter->num_rx_pools > 1) +- queues = adapter->num_rx_queues_per_pool; +- else +- queues = adapter->num_tx_queues; +- +- err = netif_set_real_num_tx_queues(netdev, queues); ++ err = netif_set_real_num_tx_queues(netdev, ++ adapter->num_rx_pools > 1 ? 1 : ++ adapter->num_tx_queues); + if (err) + goto err_set_queues; + +- if (adapter->num_rx_pools > 1 && +- adapter->num_rx_queues > IXGBE_MAX_L2A_QUEUES) +- queues = IXGBE_MAX_L2A_QUEUES; +- else +- queues = adapter->num_rx_queues; +- err = netif_set_real_num_rx_queues(netdev, queues); ++ err = netif_set_real_num_rx_queues(netdev, ++ adapter->num_rx_pools > 1 ? 1 : ++ adapter->num_rx_queues); + if (err) + goto err_set_queues; + ++#ifdef HAVE_PTP_1588_CLOCK + ixgbe_ptp_init(adapter); ++#endif /* HAVE_PTP_1588_CLOCK*/ + + ixgbe_up_complete(adapter); + +- return 0; ++#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) ++ ixgbe_clear_udp_tunnel_port(adapter, IXGBE_VXLANCTRL_ALL_UDPPORT_MASK); ++#endif ++#ifdef HAVE_UDP_ENC_RX_OFFLOAD ++ udp_tunnel_get_rx_info(netdev); ++#elif defined(HAVE_VXLAN_RX_OFFLOAD) ++ vxlan_get_rx_port(netdev); ++#endif /* HAVE_UDP_ENC_RX_OFFLOAD */ ++ return IXGBE_SUCCESS; + + err_set_queues: + ixgbe_free_irq(adapter); + err_req_irq: + ixgbe_free_all_rx_resources(adapter); ++ if (!adapter->wol) ++ ixgbe_set_phy_power(&adapter->hw, false); + err_setup_rx: + ixgbe_free_all_tx_resources(adapter); + err_setup_tx: +@@ -5661,15 +7104,31 @@ err_setup_tx: + return err; + } + ++/** ++ * ixgbe_close_suspend - actions necessary to both suspend and close flows ++ * @adapter: the private adapter struct ++ * ++ * This function should contain the necessary work common to both suspending ++ * and closing of the device. ++ */ + static void ixgbe_close_suspend(struct ixgbe_adapter *adapter) + { ++#ifdef HAVE_PTP_1588_CLOCK + ixgbe_ptp_suspend(adapter); ++#endif + +- ixgbe_down(adapter); ++ if (adapter->hw.phy.ops.enter_lplu) { ++ adapter->hw.phy.reset_disable = true; ++ ixgbe_down(adapter); ++ ixgbe_enter_lplu(&adapter->hw); ++ adapter->hw.phy.reset_disable = false; ++ } else { ++ ixgbe_down(adapter); ++ } + ixgbe_free_irq(adapter); + +- ixgbe_free_all_tx_resources(adapter); + ixgbe_free_all_rx_resources(adapter); ++ ixgbe_free_all_tx_resources(adapter); + } + + /** +@@ -5687,9 +7146,12 @@ static int ixgbe_close(struct net_device *netdev) + { + struct ixgbe_adapter *adapter = netdev_priv(netdev); + ++#ifdef HAVE_PTP_1588_CLOCK + ixgbe_ptp_stop(adapter); ++#endif + +- ixgbe_close_suspend(adapter); ++ if (netif_device_present(netdev)) ++ ixgbe_close_suspend(adapter); + + ixgbe_fdir_filter_exit(adapter); + +@@ -5699,12 +7161,21 @@ static int ixgbe_close(struct net_device *netdev) + } + + #ifdef CONFIG_PM ++#ifndef USE_LEGACY_PM_SUPPORT ++static int ixgbe_resume(struct device *dev) ++#else + static int ixgbe_resume(struct pci_dev *pdev) ++#endif /* USE_LEGACY_PM_SUPPORT */ + { +- struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); +- struct net_device *netdev = adapter->netdev; ++ struct ixgbe_adapter *adapter; ++ struct net_device *netdev; + u32 err; ++#ifndef USE_LEGACY_PM_SUPPORT ++ struct pci_dev *pdev = to_pci_dev(dev); ++#endif + ++ adapter = pci_get_drvdata(pdev); ++ netdev = adapter->netdev; + adapter->hw.hw_addr = adapter->io_addr; + pci_set_power_state(pdev, PCI_D0); + pci_restore_state(pdev); +@@ -5730,21 +7201,90 @@ static int ixgbe_resume(struct pci_dev *pdev) + IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); + + rtnl_lock(); ++ + err = ixgbe_init_interrupt_scheme(adapter); + if (!err && netif_running(netdev)) + err = ixgbe_open(netdev); + ++ ++ if (!err) ++ netif_device_attach(netdev); ++ + rtnl_unlock(); + +- if (err) +- return err; ++ return err; ++} ++ ++#ifndef USE_LEGACY_PM_SUPPORT ++/** ++ * ixgbe_freeze - quiesce the device (no IRQ's or DMA) ++ * @dev: The port's netdev ++ */ ++static int ixgbe_freeze(struct device *dev) ++{ ++ struct ixgbe_adapter *adapter = pci_get_drvdata(to_pci_dev(dev)); ++ struct net_device *netdev = adapter->netdev; ++ bool lplu_enabled = !!adapter->hw.phy.ops.enter_lplu; ++ ++ rtnl_lock(); ++ netif_device_detach(netdev); ++ ++ if (netif_running(netdev)) { ++ if (lplu_enabled) { ++ adapter->hw.phy.reset_disable = true; ++ ixgbe_down(adapter); ++ adapter->hw.phy.reset_disable = false; ++ } else { ++ ixgbe_down(adapter); ++ } ++ ixgbe_free_irq(adapter); ++ } ++ ++ ixgbe_reset_interrupt_capability(adapter); ++ rtnl_unlock(); ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_thaw - un-quiesce the device ++ * @dev: The port's netdev ++ */ ++static int ixgbe_thaw(struct device *dev) ++{ ++ struct ixgbe_adapter *adapter = pci_get_drvdata(to_pci_dev(dev)); ++ struct net_device *netdev = adapter->netdev; ++ bool lplu_enabled = !!adapter->hw.phy.ops.enter_lplu; ++ ++ ixgbe_set_interrupt_capability(adapter); ++ ++ if (netif_running(netdev)) { ++ u32 err = ixgbe_request_irq(adapter); ++ if (err) ++ return err; ++ ++ if (lplu_enabled) { ++ adapter->hw.phy.reset_disable = true; ++ ixgbe_up(adapter); ++ adapter->hw.phy.reset_disable = false; ++ } else { ++ ixgbe_up(adapter); ++ } ++ } + + netif_device_attach(netdev); + + return 0; + } ++#endif /* USE_LEGACY_PM_SUPPORT */ + #endif /* CONFIG_PM */ + ++/* ++ * __ixgbe_shutdown is not used when power management ++ * is disabled on older kernels (<2.6.12). causes a compile ++ * warning/error, because it is defined and not used. ++ */ ++#if defined(CONFIG_PM) || !defined(USE_REBOOT_NOTIFIER) + static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) + { + struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); +@@ -5756,14 +7296,14 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) + int retval = 0; + #endif + ++ rtnl_lock(); + netif_device_detach(netdev); + +- rtnl_lock(); + if (netif_running(netdev)) + ixgbe_close_suspend(adapter); +- rtnl_unlock(); + + ixgbe_clear_interrupt_scheme(adapter); ++ rtnl_unlock(); + + #ifdef CONFIG_PM + retval = pci_save_state(pdev); +@@ -5771,8 +7311,10 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) + return retval; + + #endif +- if (hw->mac.ops.stop_link_on_d3) +- hw->mac.ops.stop_link_on_d3(hw); ++ ++ /* this won't stop link of managebility or WoL is enabled */ ++ if (hw->mac.type == ixgbe_mac_82599EB) ++ ixgbe_stop_mac_link_on_d3_82599(hw); + + if (wufc) { + ixgbe_set_rx_mode(netdev); +@@ -5804,6 +7346,9 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: + pci_wake_from_d3(pdev, !!wufc); + break; + default: +@@ -5811,6 +7356,8 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) + } + + *enable_wake = !!wufc; ++ if (!*enable_wake) ++ ixgbe_set_phy_power(hw, false); + + ixgbe_release_hw_control(adapter); + +@@ -5819,12 +7366,21 @@ static int __ixgbe_shutdown(struct pci_dev *pdev, bool *enable_wake) + + return 0; + } ++#endif /* defined(CONFIG_PM) || !defined(USE_REBOOT_NOTIFIER) */ + + #ifdef CONFIG_PM +-static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state) ++#ifndef USE_LEGACY_PM_SUPPORT ++static int ixgbe_suspend(struct device *dev) ++#else ++static int ixgbe_suspend(struct pci_dev *pdev, ++ pm_message_t __always_unused state) ++#endif /* USE_LEGACY_PM_SUPPORT */ + { + int retval; + bool wake; ++#ifndef USE_LEGACY_PM_SUPPORT ++ struct pci_dev *pdev = to_pci_dev(dev); ++#endif + + retval = __ixgbe_shutdown(pdev, &wake); + if (retval) +@@ -5841,6 +7397,7 @@ static int ixgbe_suspend(struct pci_dev *pdev, pm_message_t state) + } + #endif /* CONFIG_PM */ + ++#ifndef USE_REBOOT_NOTIFIER + static void ixgbe_shutdown(struct pci_dev *pdev) + { + bool wake; +@@ -5853,13 +7410,105 @@ static void ixgbe_shutdown(struct pci_dev *pdev) + } + } + ++#endif ++#ifdef HAVE_NDO_GET_STATS64 ++/** ++ * ixgbe_get_stats64 - Get System Network Statistics ++ * @netdev: network interface device structure ++ * @stats: storage space for 64bit statistics ++ * ++ * Returns 64bit statistics, for use in the ndo_get_stats64 callback. This ++ * function replaces ixgbe_get_stats for kernels which support it. ++ */ ++#ifdef HAVE_VOID_NDO_GET_STATS64 ++static void ixgbe_get_stats64(struct net_device *netdev, ++ struct rtnl_link_stats64 *stats) ++#else ++static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, ++ struct rtnl_link_stats64 *stats) ++#endif ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ int i; ++ ++ rcu_read_lock(); ++ for (i = 0; i < adapter->num_rx_queues; i++) { ++ struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]); ++ u64 bytes, packets; ++ unsigned int start; ++ ++ if (ring) { ++ do { ++ start = u64_stats_fetch_begin_irq(&ring->syncp); ++ packets = ring->stats.packets; ++ bytes = ring->stats.bytes; ++ } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); ++ stats->rx_packets += packets; ++ stats->rx_bytes += bytes; ++ } ++ } ++ ++ for (i = 0; i < adapter->num_tx_queues; i++) { ++ struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]); ++ u64 bytes, packets; ++ unsigned int start; ++ ++ if (ring) { ++ do { ++ start = u64_stats_fetch_begin_irq(&ring->syncp); ++ packets = ring->stats.packets; ++ bytes = ring->stats.bytes; ++ } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); ++ stats->tx_packets += packets; ++ stats->tx_bytes += bytes; ++ } ++ } ++ rcu_read_unlock(); ++ /* following stats updated by ixgbe_watchdog_task() */ ++ stats->multicast = netdev->stats.multicast; ++ stats->rx_errors = netdev->stats.rx_errors; ++ stats->rx_length_errors = netdev->stats.rx_length_errors; ++ stats->rx_crc_errors = netdev->stats.rx_crc_errors; ++ stats->rx_missed_errors = netdev->stats.rx_missed_errors; ++#ifndef HAVE_VOID_NDO_GET_STATS64 ++ return stats; ++#endif ++} ++#else ++/** ++ * ixgbe_get_stats - Get System Network Statistics ++ * @netdev: network interface device structure ++ * ++ * Returns the address of the device statistics structure. ++ * The statistics are actually updated from the timer callback. ++ **/ ++static struct net_device_stats *ixgbe_get_stats(struct net_device *netdev) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ ++ /* update the stats data */ ++ ixgbe_update_stats(adapter); ++ ++#ifdef HAVE_NETDEV_STATS_IN_NETDEV ++ /* only return the current stats */ ++ return &netdev->stats; ++#else ++ /* only return the current stats */ ++ return &adapter->net_stats; ++#endif /* HAVE_NETDEV_STATS_IN_NETDEV */ ++} ++#endif + /** + * ixgbe_update_stats - Update the board statistics counters. + * @adapter: board private structure + **/ + void ixgbe_update_stats(struct ixgbe_adapter *adapter) + { +- struct net_device *netdev = adapter->netdev; ++#ifdef HAVE_NETDEV_STATS_IN_NETDEV ++ struct net_device_stats *net_stats = &adapter->netdev->stats; ++#else ++ struct net_device_stats *net_stats = &adapter->net_stats; ++#endif /* HAVE_NETDEV_STATS_IN_NETDEV */ + struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_hw_stats *hwstats = &adapter->stats; + u64 total_mpc = 0; +@@ -5891,13 +7540,14 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) + hw_csum_rx_error += rx_ring->rx_stats.csum_err; + bytes += rx_ring->stats.bytes; + packets += rx_ring->stats.packets; ++ + } + adapter->non_eop_descs = non_eop_descs; + adapter->alloc_rx_page_failed = alloc_rx_page_failed; + adapter->alloc_rx_buff_failed = alloc_rx_buff_failed; + adapter->hw_csum_rx_error = hw_csum_rx_error; +- netdev->stats.rx_bytes = bytes; +- netdev->stats.rx_packets = packets; ++ net_stats->rx_bytes = bytes; ++ net_stats->rx_packets = packets; + + bytes = 0; + packets = 0; +@@ -5911,8 +7561,8 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) + } + adapter->restart_queue = restart_queue; + adapter->tx_busy = tx_busy; +- netdev->stats.tx_bytes = bytes; +- netdev->stats.tx_packets = packets; ++ net_stats->tx_bytes = bytes; ++ net_stats->tx_packets = packets; + + hwstats->crcerrs += IXGBE_READ_REG(hw, IXGBE_CRCERRS); + +@@ -5935,6 +7585,9 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) + break; + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: + hwstats->pxonrxc[i] += + IXGBE_READ_REG(hw, IXGBE_PXONRXCNT(i)); + break; +@@ -5948,6 +7601,9 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) + hwstats->qptc[i] += IXGBE_READ_REG(hw, IXGBE_QPTC(i)); + hwstats->qprc[i] += IXGBE_READ_REG(hw, IXGBE_QPRC(i)); + if ((hw->mac.type == ixgbe_mac_82599EB) || ++ (hw->mac.type == ixgbe_mac_X550) || ++ (hw->mac.type == ixgbe_mac_X550EM_x) || ++ (hw->mac.type == ixgbe_mac_X550EM_a) || + (hw->mac.type == ixgbe_mac_X540)) { + hwstats->qbtc[i] += IXGBE_READ_REG(hw, IXGBE_QBTC_L(i)); + IXGBE_READ_REG(hw, IXGBE_QBTC_H(i)); /* to clear */ +@@ -5971,11 +7627,15 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) + hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORH); + break; + case ixgbe_mac_X540: ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: + /* OS2BMC stats are X540 only*/ + hwstats->o2bgptc += IXGBE_READ_REG(hw, IXGBE_O2BGPTC); + hwstats->o2bspc += IXGBE_READ_REG(hw, IXGBE_O2BSPC); + hwstats->b2ospc += IXGBE_READ_REG(hw, IXGBE_B2OSPC); + hwstats->b2ogprc += IXGBE_READ_REG(hw, IXGBE_B2OGPRC); ++ /* fall through */ + case ixgbe_mac_82599EB: + for (i = 0; i < 16; i++) + adapter->hw_rx_no_dma_resources += +@@ -5987,16 +7647,19 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) + hwstats->tor += IXGBE_READ_REG(hw, IXGBE_TORL); + IXGBE_READ_REG(hw, IXGBE_TORH); /* to clear */ + hwstats->lxonrxc += IXGBE_READ_REG(hw, IXGBE_LXONRXCNT); ++#ifdef HAVE_TX_MQ + hwstats->fdirmatch += IXGBE_READ_REG(hw, IXGBE_FDIRMATCH); + hwstats->fdirmiss += IXGBE_READ_REG(hw, IXGBE_FDIRMISS); +-#ifdef IXGBE_FCOE ++#endif /* HAVE_TX_MQ */ ++#if IS_ENABLED(CONFIG_FCOE) + hwstats->fccrc += IXGBE_READ_REG(hw, IXGBE_FCCRC); ++ hwstats->fclast += IXGBE_READ_REG(hw, IXGBE_FCLAST); + hwstats->fcoerpdc += IXGBE_READ_REG(hw, IXGBE_FCOERPDC); + hwstats->fcoeprc += IXGBE_READ_REG(hw, IXGBE_FCOEPRC); + hwstats->fcoeptc += IXGBE_READ_REG(hw, IXGBE_FCOEPTC); + hwstats->fcoedwrc += IXGBE_READ_REG(hw, IXGBE_FCOEDWRC); + hwstats->fcoedwtc += IXGBE_READ_REG(hw, IXGBE_FCOEDWTC); +- /* Add up per cpu counters for total ddp aloc fail */ ++ /* Add up per cpu counters for total ddp alloc fail */ + if (adapter->fcoe.ddp_pool) { + struct ixgbe_fcoe *fcoe = &adapter->fcoe; + struct ixgbe_fcoe_ddp_pool *ddp_pool; +@@ -6010,7 +7673,8 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) + hwstats->fcoe_noddp = noddp; + hwstats->fcoe_noddp_ext_buff = noddp_ext_buff; + } +-#endif /* IXGBE_FCOE */ ++ ++#endif /* CONFIG_FCOE */ + break; + default: + break; +@@ -6053,21 +7717,49 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter) + hwstats->ptc1023 += IXGBE_READ_REG(hw, IXGBE_PTC1023); + hwstats->ptc1522 += IXGBE_READ_REG(hw, IXGBE_PTC1522); + hwstats->bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); +- + /* Fill out the OS statistics structure */ +- netdev->stats.multicast = hwstats->mprc; ++ net_stats->multicast = hwstats->mprc; + + /* Rx Errors */ +- netdev->stats.rx_errors = hwstats->crcerrs + hwstats->rlec; +- netdev->stats.rx_dropped = 0; +- netdev->stats.rx_length_errors = hwstats->rlec; +- netdev->stats.rx_crc_errors = hwstats->crcerrs; +- netdev->stats.rx_missed_errors = total_mpc; ++ net_stats->rx_errors = hwstats->crcerrs + ++ hwstats->rlec; ++ net_stats->rx_dropped = 0; ++ net_stats->rx_length_errors = hwstats->rlec; ++ net_stats->rx_crc_errors = hwstats->crcerrs; ++ net_stats->rx_missed_errors = total_mpc; ++ ++ /* ++ * VF Stats Collection - skip while resetting because these ++ * are not clear on read and otherwise you'll sometimes get ++ * crazy values. ++ */ ++ if (!test_bit(__IXGBE_RESETTING, &adapter->state)) { ++ for (i = 0; i < adapter->num_vfs; i++) { ++ UPDATE_VF_COUNTER_32bit(IXGBE_PVFGPRC(i), \ ++ adapter->vfinfo[i].last_vfstats.gprc, \ ++ adapter->vfinfo[i].vfstats.gprc); ++ UPDATE_VF_COUNTER_32bit(IXGBE_PVFGPTC(i), \ ++ adapter->vfinfo[i].last_vfstats.gptc, \ ++ adapter->vfinfo[i].vfstats.gptc); ++ UPDATE_VF_COUNTER_36bit(IXGBE_PVFGORC_LSB(i), \ ++ IXGBE_PVFGORC_MSB(i), \ ++ adapter->vfinfo[i].last_vfstats.gorc, \ ++ adapter->vfinfo[i].vfstats.gorc); ++ UPDATE_VF_COUNTER_36bit(IXGBE_PVFGOTC_LSB(i), \ ++ IXGBE_PVFGOTC_MSB(i), \ ++ adapter->vfinfo[i].last_vfstats.gotc, \ ++ adapter->vfinfo[i].vfstats.gotc); ++ UPDATE_VF_COUNTER_32bit(IXGBE_PVFMPRC(i), \ ++ adapter->vfinfo[i].last_vfstats.mprc, \ ++ adapter->vfinfo[i].vfstats.mprc); ++ } ++ } + } + ++#ifdef HAVE_TX_MQ + /** + * ixgbe_fdir_reinit_subtask - worker thread to reinit FDIR filter table +- * @adapter: pointer to the device adapter structure ++ * @adapter - pointer to the device adapter structure + **/ + static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter) + { +@@ -6089,7 +7781,7 @@ static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter) + + adapter->fdir_overflow++; + +- if (ixgbe_reinit_fdir_tables_82599(hw) == 0) { ++ if (ixgbe_reinit_fdir_tables_82599(hw) == IXGBE_SUCCESS) { + for (i = 0; i < adapter->num_tx_queues; i++) + set_bit(__IXGBE_TX_FDIR_INIT_DONE, + &(adapter->tx_ring[i]->state)); +@@ -6101,9 +7793,10 @@ static void ixgbe_fdir_reinit_subtask(struct ixgbe_adapter *adapter) + } + } + ++#endif /* HAVE_TX_MQ */ + /** + * ixgbe_check_hang_subtask - check for hung queues and dropped interrupts +- * @adapter: pointer to the device adapter structure ++ * @adapter - pointer to the device adapter structure + * + * This function serves two purposes. First it strobes the interrupt lines + * in order to make certain interrupts are occurring. Secondly it sets the +@@ -6116,9 +7809,9 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter) + u64 eics = 0; + int i; + +- /* If we're down, removing or resetting, just bail */ ++ /* If we're down or resetting, just bail */ + if (test_bit(__IXGBE_DOWN, &adapter->state) || +- test_bit(__IXGBE_REMOVING, &adapter->state) || ++ test_bit(__IXGBE_REMOVE, &adapter->state) || + test_bit(__IXGBE_RESETTING, &adapter->state)) + return; + +@@ -6147,13 +7840,12 @@ static void ixgbe_check_hang_subtask(struct ixgbe_adapter *adapter) + + /* Cause software interrupt to ensure rings are cleaned */ + ixgbe_irq_rearm_queues(adapter, eics); +- + } + + /** + * ixgbe_watchdog_update_link - update the link status +- * @adapter: pointer to the device adapter structure +- * @link_speed: pointer to a u32 to store the link_speed ++ * @adapter - pointer to the device adapter structure ++ * @link_speed - pointer to a u32 to store the link_speed + **/ + static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter) + { +@@ -6173,9 +7865,37 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter) + link_up = true; + } + ++ /* If Crosstalk fix enabled do the sanity check of making sure ++ * the SFP+ cage is empty. ++ */ ++ if (adapter->need_crosstalk_fix) { ++ u32 sfp_cage_full; ++ ++ switch (hw->mac.type) { ++ case ixgbe_mac_82599EB: ++ sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & ++ IXGBE_ESDP_SDP2; ++ break; ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: ++ sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & ++ IXGBE_ESDP_SDP0; ++ break; ++ default: ++ /* Non-SFP+ system - sanity check */ ++ sfp_cage_full = false; ++ break; ++ } ++ ++ if (ixgbe_is_sfp(hw) && link_up && !sfp_cage_full) ++ link_up = false; ++ } ++ ++#ifdef HAVE_DCBNL_IEEE + if (adapter->ixgbe_ieee_pfc) + pfc_en |= !!(adapter->ixgbe_ieee_pfc->pfc_en); + ++#endif + if (link_up && !((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && pfc_en)) { + hw->mac.ops.fc_enable(hw); + ixgbe_set_rx_drop_en(adapter); +@@ -6191,37 +7911,60 @@ static void ixgbe_watchdog_update_link(struct ixgbe_adapter *adapter) + + adapter->link_up = link_up; + adapter->link_speed = link_speed; ++ if (hw->mac.ops.dmac_config && hw->mac.dmac_config.watchdog_timer) { ++ u8 num_tcs = netdev_get_num_tc(adapter->netdev); ++#if IS_ENABLED(CONFIG_FCOE) ++ u8 fcoe_tc = ixgbe_fcoe_get_tc(adapter); ++ bool fcoe_en = !!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED); ++#endif /* CONFIG_FCOE */ ++ ++ if (hw->mac.dmac_config.link_speed != link_speed || ++#if IS_ENABLED(CONFIG_FCOE) ++ hw->mac.dmac_config.fcoe_tc != fcoe_tc || ++ hw->mac.dmac_config.fcoe_en != fcoe_en || ++#endif /* CONFIG_FCOE */ ++ hw->mac.dmac_config.num_tcs != num_tcs) { ++ hw->mac.dmac_config.link_speed = link_speed; ++ hw->mac.dmac_config.num_tcs = num_tcs; ++#if IS_ENABLED(CONFIG_FCOE) ++ hw->mac.dmac_config.fcoe_en = fcoe_en; ++ hw->mac.dmac_config.fcoe_tc = fcoe_tc; ++#endif /* CONFIG_FCOE */ ++ hw->mac.ops.dmac_config(hw); ++ } ++ } + } + + static void ixgbe_update_default_up(struct ixgbe_adapter *adapter) + { +-#ifdef CONFIG_IXGBE_DCB ++ u8 up = 0; ++#ifdef HAVE_DCBNL_IEEE + struct net_device *netdev = adapter->netdev; + struct dcb_app app = { +- .selector = IEEE_8021QAZ_APP_SEL_ETHERTYPE, ++ .selector = DCB_APP_IDTYPE_ETHTYPE, + .protocol = 0, + }; +- u8 up = 0; +- +- if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_IEEE) +- up = dcb_ieee_getapp_mask(netdev, &app); ++ up = dcb_getapp(netdev, &app); ++#endif + ++#if IS_ENABLED(CONFIG_FCOE) + adapter->default_up = (up > 1) ? (ffs(up) - 1) : 0; ++#else ++ adapter->default_up = up; + #endif + } + + /** + * ixgbe_watchdog_link_is_up - update netif_carrier status and + * print link up message +- * @adapter: pointer to the device adapter structure ++ * @adapter - pointer to the device adapter structure + **/ + static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) + { + struct net_device *netdev = adapter->netdev; + struct ixgbe_hw *hw = &adapter->hw; +- struct net_device *upper; +- struct list_head *iter; + u32 link_speed = adapter->link_speed; ++ const char *speed_str; + bool flow_rx, flow_tx; + + /* only continue if link was previously down */ +@@ -6238,8 +7981,11 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) + flow_tx = !!(rmcs & IXGBE_RMCS_TFCE_802_3X); + } + break; +- case ixgbe_mac_X540: +- case ixgbe_mac_82599EB: { ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: ++ case ixgbe_mac_82599EB: ++ case ixgbe_mac_X540: { + u32 mflcn = IXGBE_READ_REG(hw, IXGBE_MFLCN); + u32 fccfg = IXGBE_READ_REG(hw, IXGBE_FCCFG); + flow_rx = !!(mflcn & IXGBE_MFLCN_RFCE); +@@ -6252,40 +7998,51 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) + break; + } + ++#ifdef HAVE_PTP_1588_CLOCK + adapter->last_rx_ptp_check = jiffies; + + if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) + ixgbe_ptp_start_cyclecounter(adapter); + +- e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", +- (link_speed == IXGBE_LINK_SPEED_10GB_FULL ? +- "10 Gbps" : +- (link_speed == IXGBE_LINK_SPEED_1GB_FULL ? +- "1 Gbps" : +- (link_speed == IXGBE_LINK_SPEED_100_FULL ? +- "100 Mbps" : +- "unknown speed"))), ++#endif ++ switch (link_speed) { ++ case IXGBE_LINK_SPEED_10GB_FULL: ++ speed_str = "10 Gbps"; ++ break; ++ case IXGBE_LINK_SPEED_5GB_FULL: ++ speed_str = "5 Gbps"; ++ break; ++ case IXGBE_LINK_SPEED_2_5GB_FULL: ++ speed_str = "2.5 Gbps"; ++ break; ++ case IXGBE_LINK_SPEED_1GB_FULL: ++ speed_str = "1 Gbps"; ++ break; ++ case IXGBE_LINK_SPEED_100_FULL: ++ speed_str = "100 Mbps"; ++ break; ++ case IXGBE_LINK_SPEED_10_FULL: ++ speed_str = "10 Mbps"; ++ break; ++ default: ++ speed_str = "unknown speed"; ++ break; ++ } ++ e_info(drv, "NIC Link is Up %s, Flow Control: %s\n", speed_str, + ((flow_rx && flow_tx) ? "RX/TX" : + (flow_rx ? "RX" : + (flow_tx ? "TX" : "None")))); + + netif_carrier_on(netdev); ++#ifdef IFLA_VF_MAX + ixgbe_check_vf_rate_limit(adapter); ++#endif /* IFLA_VF_MAX */ ++ /* Turn on malicious driver detection */ ++ if ((adapter->num_vfs) && (hw->mac.ops.enable_mdd) && ++ (adapter->flags & IXGBE_FLAG_MDD_ENABLED)) ++ hw->mac.ops.enable_mdd(hw); + +- /* enable transmits */ +- netif_tx_wake_all_queues(adapter->netdev); +- +- /* enable any upper devices */ +- rtnl_lock(); +- netdev_for_each_all_upper_dev_rcu(adapter->netdev, upper, iter) { +- if (netif_is_macvlan(upper)) { +- struct macvlan_dev *vlan = netdev_priv(upper); +- +- if (vlan->fwd_priv) +- netif_tx_wake_all_queues(upper); +- } +- } +- rtnl_unlock(); ++ netif_tx_wake_all_queues(netdev); + + /* update the default user priority for VFs */ + ixgbe_update_default_up(adapter); +@@ -6297,7 +8054,7 @@ static void ixgbe_watchdog_link_is_up(struct ixgbe_adapter *adapter) + /** + * ixgbe_watchdog_link_is_down - update netif_carrier status and + * print link down message +- * @adapter: pointer to the adapter structure ++ * @adapter - pointer to the adapter structure + **/ + static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter) + { +@@ -6315,46 +8072,149 @@ static void ixgbe_watchdog_link_is_down(struct ixgbe_adapter *adapter) + if (ixgbe_is_sfp(hw) && hw->mac.type == ixgbe_mac_82598EB) + adapter->flags2 |= IXGBE_FLAG2_SEARCH_FOR_SFP; + ++#ifdef HAVE_PTP_1588_CLOCK + if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) + ixgbe_ptp_start_cyclecounter(adapter); + ++#endif + e_info(drv, "NIC Link is Down\n"); + netif_carrier_off(netdev); ++ netif_tx_stop_all_queues(netdev); + + /* ping all the active vfs to let them know link has changed */ + ixgbe_ping_all_vfs(adapter); + } + ++static bool ixgbe_ring_tx_pending(struct ixgbe_adapter *adapter) ++{ ++ int i; ++ ++ for (i = 0; i < adapter->num_tx_queues; i++) { ++ struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; ++ ++ if (tx_ring->next_to_use != tx_ring->next_to_clean) ++ return true; ++ } ++ ++ return false; ++} ++ ++static bool ixgbe_vf_tx_pending(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; ++ u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); ++ ++ int i, j; ++ ++ if (!adapter->num_vfs) ++ return false; ++ ++ /* resetting the PF is only needed for MACs < X550 */ ++ if (hw->mac.type >= ixgbe_mac_X550) ++ return false; ++ for (i = 0; i < adapter->num_vfs; i++) { ++ for (j = 0; j < q_per_pool; j++) { ++ u32 h, t; ++ ++ h = IXGBE_READ_REG(hw, IXGBE_PVFTDHn(q_per_pool, i, j)); ++ t = IXGBE_READ_REG(hw, IXGBE_PVFTDTn(q_per_pool, i, j)); ++ ++ if (h != t) ++ return true; ++ } ++ } ++ ++ return false; ++} ++ + /** + * ixgbe_watchdog_flush_tx - flush queues on link down +- * @adapter: pointer to the device adapter structure ++ * @adapter - pointer to the device adapter structure + **/ + static void ixgbe_watchdog_flush_tx(struct ixgbe_adapter *adapter) + { +- int i; +- int some_tx_pending = 0; +- + if (!netif_carrier_ok(adapter->netdev)) { +- for (i = 0; i < adapter->num_tx_queues; i++) { +- struct ixgbe_ring *tx_ring = adapter->tx_ring[i]; +- if (tx_ring->next_to_use != tx_ring->next_to_clean) { +- some_tx_pending = 1; +- break; +- } +- } +- +- if (some_tx_pending) { ++ if (ixgbe_ring_tx_pending(adapter) || ++ ixgbe_vf_tx_pending(adapter)) { + /* We've lost link, so the controller stops DMA, + * but we've got queued Tx work that's never going + * to get done, so reset controller to flush Tx. + * (Do the reset outside of interrupt context). + */ +- e_warn(drv, "initiating reset to clear Tx work after link loss\n"); +- adapter->flags2 |= IXGBE_FLAG2_RESET_REQUESTED; ++ e_warn(drv, "initiating reset due to lost link with pending Tx work\n"); ++ set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); + } + } + } + ++#ifdef CONFIG_PCI_IOV ++static inline void ixgbe_issue_vf_flr(struct ixgbe_adapter *adapter, ++ struct pci_dev *vfdev) ++{ ++ int pos, i; ++ u16 status; ++ ++ /* wait for pending transactions on the bus */ ++ for (i = 0; i < 4; i++) { ++ if (i) ++ msleep((1 << (i - 1)) * 100); ++ ++ pcie_capability_read_word(vfdev, PCI_EXP_DEVSTA, &status); ++ if (!(status & PCI_EXP_DEVSTA_TRPND)) ++ goto clear; ++ } ++ ++ e_dev_warn("Issuing VFLR with pending transactions\n"); ++ ++clear: ++ pos = pci_find_capability(vfdev, PCI_CAP_ID_EXP); ++ if (!pos) ++ return; ++ ++ e_dev_err("Issuing VFLR for VF %s\n", pci_name(vfdev)); ++ pci_write_config_word(vfdev, pos + PCI_EXP_DEVCTL, ++ PCI_EXP_DEVCTL_BCR_FLR); ++ msleep(100); ++} ++ ++static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ struct pci_dev *pdev = adapter->pdev; ++ unsigned int vf; ++ u32 gpc; ++ ++ if (!(netif_carrier_ok(adapter->netdev))) ++ return; ++ ++ gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC); ++ if (gpc) /* If incrementing then no need for the check below */ ++ return; ++ /* ++ * Check to see if a bad DMA write target from an errant or ++ * malicious VF has caused a PCIe error. If so then we can ++ * issue a VFLR to the offending VF(s) and then resume without ++ * requesting a full slot reset. ++ */ ++ ++ if (!pdev) ++ return; ++ ++ /* check status reg for all VFs owned by this PF */ ++ for (vf = 0; vf < adapter->num_vfs; ++vf) { ++ struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev; ++ u16 status_reg; ++ ++ if (!vfdev) ++ continue; ++ pci_read_config_word(vfdev, PCI_STATUS, &status_reg); ++ if (status_reg != IXGBE_FAILED_READ_CFG_WORD && ++ status_reg & PCI_STATUS_REC_MASTER_ABORT) ++ ixgbe_issue_vf_flr(adapter, vfdev); ++ } ++} ++ + static void ixgbe_spoof_check(struct ixgbe_adapter *adapter) + { + u32 ssvpc; +@@ -6373,18 +8233,20 @@ static void ixgbe_spoof_check(struct ixgbe_adapter *adapter) + if (!ssvpc) + return; + +- e_warn(drv, "%u Spoofed packets detected\n", ssvpc); ++ e_warn(drv, "%d Spoofed packets detected\n", ssvpc); + } + ++#endif /* CONFIG_PCI_IOV */ ++ + /** + * ixgbe_watchdog_subtask - check and bring link up +- * @adapter: pointer to the device adapter structure ++ * @adapter - pointer to the device adapter structure + **/ + static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter) + { +- /* if interface is down, removing or resetting, do nothing */ ++ /* if interface is down do nothing */ + if (test_bit(__IXGBE_DOWN, &adapter->state) || +- test_bit(__IXGBE_REMOVING, &adapter->state) || ++ test_bit(__IXGBE_REMOVE, &adapter->state) || + test_bit(__IXGBE_RESETTING, &adapter->state)) + return; + +@@ -6394,8 +8256,10 @@ static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter) + ixgbe_watchdog_link_is_up(adapter); + else + ixgbe_watchdog_link_is_down(adapter); +- ++#ifdef CONFIG_PCI_IOV + ixgbe_spoof_check(adapter); ++ ixgbe_check_for_bad_vf(adapter); ++#endif /* CONFIG_PCI_IOV */ + ixgbe_update_stats(adapter); + + ixgbe_watchdog_flush_tx(adapter); +@@ -6403,22 +8267,51 @@ static void ixgbe_watchdog_subtask(struct ixgbe_adapter *adapter) + + /** + * ixgbe_sfp_detection_subtask - poll for SFP+ cable +- * @adapter: the ixgbe adapter structure ++ * @adapter - the ixgbe adapter structure + **/ + static void ixgbe_sfp_detection_subtask(struct ixgbe_adapter *adapter) + { + struct ixgbe_hw *hw = &adapter->hw; + s32 err; + ++ /* If crosstalk fix enabled verify the SFP+ cage is full */ ++ if (adapter->need_crosstalk_fix) { ++ u32 sfp_cage_full; ++ ++ switch (hw->mac.type) { ++ case ixgbe_mac_82599EB: ++ sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & ++ IXGBE_ESDP_SDP2; ++ break; ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: ++ sfp_cage_full = IXGBE_READ_REG(hw, IXGBE_ESDP) & ++ IXGBE_ESDP_SDP0; ++ break; ++ default: ++ /* Non-SFP+ system - sanity check */ ++ sfp_cage_full = false; ++ break; ++ } ++ if (!sfp_cage_full) ++ return; ++ } ++ + /* not searching for SFP so there is nothing to do here */ + if (!(adapter->flags2 & IXGBE_FLAG2_SEARCH_FOR_SFP) && + !(adapter->flags2 & IXGBE_FLAG2_SFP_NEEDS_RESET)) + return; + ++ if (adapter->sfp_poll_time && ++ time_after(adapter->sfp_poll_time, jiffies)) ++ return; /* If not yet time to poll for SFP */ ++ + /* someone else is in init, wait until next service event */ + if (test_and_set_bit(__IXGBE_IN_SFP_INIT, &adapter->state)) + return; + ++ adapter->sfp_poll_time = jiffies + IXGBE_SFP_POLL_JIFFIES - 1; ++ + err = hw->phy.ops.identify_sfp(hw); + if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) + goto sfp_out; +@@ -6459,18 +8352,19 @@ sfp_out: + clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); + + if ((err == IXGBE_ERR_SFP_NOT_SUPPORTED) && +- (adapter->netdev->reg_state == NETREG_REGISTERED)) { ++ adapter->netdev_registered) { + e_dev_err("failed to initialize because an unsupported " + "SFP+ module type was detected.\n"); + e_dev_err("Reload the driver after installing a " + "supported module.\n"); + unregister_netdev(adapter->netdev); ++ adapter->netdev_registered = false; + } + } + + /** + * ixgbe_sfp_link_config_subtask - set up link SFP after module install +- * @adapter: the ixgbe adapter structure ++ * @adapter - the ixgbe adapter structure + **/ + static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter) + { +@@ -6490,7 +8384,6 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter) + speed = hw->phy.autoneg_advertised; + if ((!speed) && (hw->mac.ops.get_link_capabilities)) { + hw->mac.ops.get_link_capabilities(hw, &speed, &autoneg); +- + /* setup the highest link when no autoneg */ + if (!autoneg) { + if (speed & IXGBE_LINK_SPEED_10GB_FULL) +@@ -6506,51 +8399,6 @@ static void ixgbe_sfp_link_config_subtask(struct ixgbe_adapter *adapter) + clear_bit(__IXGBE_IN_SFP_INIT, &adapter->state); + } + +-#ifdef CONFIG_PCI_IOV +-static void ixgbe_check_for_bad_vf(struct ixgbe_adapter *adapter) +-{ +- int vf; +- struct ixgbe_hw *hw = &adapter->hw; +- struct net_device *netdev = adapter->netdev; +- u32 gpc; +- u32 ciaa, ciad; +- +- gpc = IXGBE_READ_REG(hw, IXGBE_TXDGPC); +- if (gpc) /* If incrementing then no need for the check below */ +- return; +- /* +- * Check to see if a bad DMA write target from an errant or +- * malicious VF has caused a PCIe error. If so then we can +- * issue a VFLR to the offending VF(s) and then resume without +- * requesting a full slot reset. +- */ +- +- for (vf = 0; vf < adapter->num_vfs; vf++) { +- ciaa = (vf << 16) | 0x80000000; +- /* 32 bit read so align, we really want status at offset 6 */ +- ciaa |= PCI_COMMAND; +- IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa); +- ciad = IXGBE_READ_REG(hw, IXGBE_CIAD_82599); +- ciaa &= 0x7FFFFFFF; +- /* disable debug mode asap after reading data */ +- IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa); +- /* Get the upper 16 bits which will be the PCI status reg */ +- ciad >>= 16; +- if (ciad & PCI_STATUS_REC_MASTER_ABORT) { +- netdev_err(netdev, "VF %d Hung DMA\n", vf); +- /* Issue VFLR */ +- ciaa = (vf << 16) | 0x80000000; +- ciaa |= 0xA8; +- IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa); +- ciad = 0x00008000; /* VFLR */ +- IXGBE_WRITE_REG(hw, IXGBE_CIAD_82599, ciad); +- ciaa &= 0x7FFFFFFF; +- IXGBE_WRITE_REG(hw, IXGBE_CIAA_82599, ciaa); +- } +- } +-} +- +-#endif + /** + * ixgbe_service_timer - Timer Call-back + * @data: pointer to adapter cast into an unsigned long +@@ -6559,56 +8407,43 @@ static void ixgbe_service_timer(unsigned long data) + { + struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; + unsigned long next_event_offset; +- bool ready = true; + + /* poll faster when waiting for link */ + if (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE) + next_event_offset = HZ / 10; + else +- next_event_offset = HZ * 2; +- +-#ifdef CONFIG_PCI_IOV +- /* +- * don't bother with SR-IOV VF DMA hang check if there are +- * no VFs or the link is down +- */ +- if (!adapter->num_vfs || +- (adapter->flags & IXGBE_FLAG_NEED_LINK_UPDATE)) +- goto normal_timer_service; +- +- /* If we have VFs allocated then we must check for DMA hangs */ +- ixgbe_check_for_bad_vf(adapter); +- next_event_offset = HZ / 50; +- adapter->timer_event_accumulator++; +- +- if (adapter->timer_event_accumulator >= 100) +- adapter->timer_event_accumulator = 0; +- else +- ready = false; ++ next_event_offset = HZ * 2; + +-normal_timer_service: +-#endif + /* Reset the timer */ + mod_timer(&adapter->service_timer, next_event_offset + jiffies); + +- if (ready) +- ixgbe_service_event_schedule(adapter); ++ ixgbe_service_event_schedule(adapter); + } + +-static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter) ++static void ixgbe_phy_interrupt_subtask(struct ixgbe_adapter *adapter) + { +- if (!(adapter->flags2 & IXGBE_FLAG2_RESET_REQUESTED)) ++ u32 status; ++ ++ if (!(adapter->flags2 & IXGBE_FLAG2_PHY_INTERRUPT)) ++ return; ++ adapter->flags2 &= ~IXGBE_FLAG2_PHY_INTERRUPT; ++ status = ixgbe_handle_lasi(&adapter->hw); ++ if (status != IXGBE_ERR_OVERTEMP) + return; ++ e_crit(drv, "%s\n", ixgbe_overheat_msg); ++} + +- adapter->flags2 &= ~IXGBE_FLAG2_RESET_REQUESTED; ++static void ixgbe_reset_subtask(struct ixgbe_adapter *adapter) ++{ ++ if (!test_and_clear_bit(__IXGBE_RESET_REQUESTED, &adapter->state)) ++ return; + +- /* If we're already down, removing or resetting, just bail */ ++ /* If we're already down or resetting, just bail */ + if (test_bit(__IXGBE_DOWN, &adapter->state) || +- test_bit(__IXGBE_REMOVING, &adapter->state) || ++ test_bit(__IXGBE_REMOVE, &adapter->state) || + test_bit(__IXGBE_RESETTING, &adapter->state)) + return; + +- ixgbe_dump(adapter); + netdev_err(adapter->netdev, "Reset adapter\n"); + adapter->tx_timeout_count++; + +@@ -6626,7 +8461,7 @@ static void ixgbe_service_task(struct work_struct *work) + struct ixgbe_adapter *adapter = container_of(work, + struct ixgbe_adapter, + service_task); +- if (ixgbe_removed(adapter->hw.hw_addr)) { ++ if (IXGBE_REMOVED(adapter->hw.hw_addr)) { + if (!test_bit(__IXGBE_DOWN, &adapter->state)) { + rtnl_lock(); + ixgbe_down(adapter); +@@ -6635,29 +8470,57 @@ static void ixgbe_service_task(struct work_struct *work) + ixgbe_service_event_complete(adapter); + return; + } ++#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) ++ if (adapter->flags2 & IXGBE_FLAG2_UDP_TUN_REREG_NEEDED) { ++ rtnl_lock(); ++ adapter->flags2 &= ~IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; ++#ifdef HAVE_UDP_ENC_RX_OFFLOAD ++ udp_tunnel_get_rx_info(adapter->netdev); ++#else ++ vxlan_get_rx_port(adapter->netdev); ++#endif /* HAVE_UDP_ENC_RX_OFFLOAD */ ++ rtnl_unlock(); ++ } ++#endif /* HAVE_UDP_ENC_RX_OFFLOAD || HAVE_VXLAN_RX_OFFLOAD */ + ixgbe_reset_subtask(adapter); ++ ixgbe_phy_interrupt_subtask(adapter); + ixgbe_sfp_detection_subtask(adapter); + ixgbe_sfp_link_config_subtask(adapter); + ixgbe_check_overtemp_subtask(adapter); + ixgbe_watchdog_subtask(adapter); ++#ifdef HAVE_TX_MQ + ixgbe_fdir_reinit_subtask(adapter); ++#endif + ixgbe_check_hang_subtask(adapter); +- ++#ifdef HAVE_PTP_1588_CLOCK + if (test_bit(__IXGBE_PTP_RUNNING, &adapter->state)) { + ixgbe_ptp_overflow_check(adapter); +- ixgbe_ptp_rx_hang(adapter); ++ if (unlikely(adapter->flags & IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER)) ++ ixgbe_ptp_rx_hang(adapter); ++ ixgbe_ptp_tx_hang(adapter); + } ++#endif /* HAVE_PTP_1588_CLOCK */ + + ixgbe_service_event_complete(adapter); + } + ++#ifdef NETIF_F_GSO_PARTIAL + static int ixgbe_tso(struct ixgbe_ring *tx_ring, + struct ixgbe_tx_buffer *first, + u8 *hdr_len) + { ++ u32 vlan_macip_lens, type_tucmd, mss_l4len_idx; + struct sk_buff *skb = first->skb; +- u32 vlan_macip_lens, type_tucmd; +- u32 mss_l4len_idx, l4len; ++ union { ++ struct iphdr *v4; ++ struct ipv6hdr *v6; ++ unsigned char *hdr; ++ } ip; ++ union { ++ struct tcphdr *tcp; ++ unsigned char *hdr; ++ } l4; ++ u32 paylen, l4_offset; + int err; + + if (skb->ip_summed != CHECKSUM_PARTIAL) +@@ -6670,6 +8533,140 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, + if (err < 0) + return err; + ++ ip.hdr = skb_network_header(skb); ++ l4.hdr = skb_checksum_start(skb); ++ ++ /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ ++ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; ++ ++ /* initialize outer IP header fields */ ++ if (ip.v4->version == 4) { ++ /* IP header will have to cancel out any data that ++ * is not a part of the outer IP header ++ */ ++ ip.v4->check = csum_fold(csum_add(lco_csum(skb), ++ csum_unfold(l4.tcp->check))); ++ type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; ++ ++ ip.v4->tot_len = 0; ++ first->tx_flags |= IXGBE_TX_FLAGS_TSO | ++ IXGBE_TX_FLAGS_CSUM | ++ IXGBE_TX_FLAGS_IPV4; ++ } else { ++ ip.v6->payload_len = 0; ++ first->tx_flags |= IXGBE_TX_FLAGS_TSO | ++ IXGBE_TX_FLAGS_CSUM; ++ } ++ ++ /* determine offset of inner transport header */ ++ l4_offset = l4.hdr - skb->data; ++ ++ /* compute length of segmentation header */ ++ *hdr_len = (l4.tcp->doff * 4) + l4_offset; ++ ++ /* remove payload length from inner checksum */ ++ paylen = skb->len - l4_offset; ++ csum_replace_by_diff(&l4.tcp->check, htonl(paylen)); ++ ++ /* update gso size and bytecount with header size */ ++ first->gso_segs = skb_shinfo(skb)->gso_segs; ++ first->bytecount += (first->gso_segs - 1) * *hdr_len; ++ ++ /* mss_l4len_id: use 0 as index for TSO */ ++ mss_l4len_idx = (*hdr_len - l4_offset) << IXGBE_ADVTXD_L4LEN_SHIFT; ++ mss_l4len_idx |= skb_shinfo(skb)->gso_size << IXGBE_ADVTXD_MSS_SHIFT; ++ ++ /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ ++ vlan_macip_lens = l4.hdr - ip.hdr; ++ vlan_macip_lens |= (ip.hdr - skb->data) << IXGBE_ADVTXD_MACLEN_SHIFT; ++ vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; ++ ++ ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, ++ mss_l4len_idx); ++ ++ return 1; ++} ++ ++static inline bool ixgbe_ipv6_csum_is_sctp(struct sk_buff *skb) ++{ ++ unsigned int offset = 0; ++ ++ ipv6_find_hdr(skb, &offset, IPPROTO_SCTP, NULL, NULL); ++ ++ return offset == skb_checksum_start_offset(skb); ++} ++ ++static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, ++ struct ixgbe_tx_buffer *first) ++{ ++ struct sk_buff *skb = first->skb; ++ u32 vlan_macip_lens = 0; ++ u32 type_tucmd = 0; ++ ++ if (skb->ip_summed != CHECKSUM_PARTIAL) { ++csum_failed: ++ if (!(first->tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | ++ IXGBE_TX_FLAGS_CC))) ++ return; ++ goto no_csum; ++ } ++ ++ switch (skb->csum_offset) { ++ case offsetof(struct tcphdr, check): ++ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; ++ /* fall through */ ++ case offsetof(struct udphdr, check): ++ break; ++ case offsetof(struct sctphdr, checksum): ++ /* validate that this is actually an SCTP request */ ++ if (((first->protocol == htons(ETH_P_IP)) && ++ (ip_hdr(skb)->protocol == IPPROTO_SCTP)) || ++ ((first->protocol == htons(ETH_P_IPV6)) && ++ ixgbe_ipv6_csum_is_sctp(skb))) { ++ type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_SCTP; ++ break; ++ } ++ /* fall through */ ++ default: ++ skb_checksum_help(skb); ++ goto csum_failed; ++ } ++ ++ /* update TX checksum flag */ ++ first->tx_flags |= IXGBE_TX_FLAGS_CSUM; ++ vlan_macip_lens = skb_checksum_start_offset(skb) - ++ skb_network_offset(skb); ++no_csum: ++ /* vlan_macip_lens: MACLEN, VLAN tag */ ++ vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; ++ vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; ++ ++ ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, type_tucmd, 0); ++} ++#else ++static int ixgbe_tso(struct ixgbe_ring *tx_ring, ++ struct ixgbe_tx_buffer *first, ++ u8 *hdr_len) ++{ ++#ifndef NETIF_F_TSO ++ return 0; ++#else ++ struct sk_buff *skb = first->skb; ++ u32 vlan_macip_lens, type_tucmd; ++ u32 mss_l4len_idx, l4len; ++ ++ if (skb->ip_summed != CHECKSUM_PARTIAL) ++ return 0; ++ ++ if (!skb_is_gso(skb)) ++ return 0; ++ ++ if (skb_header_cloned(skb)) { ++ int err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC); ++ if (err) ++ return err; ++ } ++ + /* ADV DTYP TUCMD MKRLOC/ISCSIHEDLEN */ + type_tucmd = IXGBE_ADVTXD_TUCMD_L4T_TCP; + +@@ -6685,6 +8682,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, + first->tx_flags |= IXGBE_TX_FLAGS_TSO | + IXGBE_TX_FLAGS_CSUM | + IXGBE_TX_FLAGS_IPV4; ++#ifdef NETIF_F_TSO6 + } else if (skb_is_gso_v6(skb)) { + ipv6_hdr(skb)->payload_len = 0; + tcp_hdr(skb)->check = +@@ -6693,6 +8691,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, + 0, IPPROTO_TCP, 0); + first->tx_flags |= IXGBE_TX_FLAGS_TSO | + IXGBE_TX_FLAGS_CSUM; ++#endif /* NETIF_F_TSO6 */ + } + + /* compute header lengths */ +@@ -6716,6 +8715,7 @@ static int ixgbe_tso(struct ixgbe_ring *tx_ring, + mss_l4len_idx); + + return 1; ++#endif /* !NETIF_F_TSO */ + } + + static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, +@@ -6730,18 +8730,70 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, + if (!(first->tx_flags & IXGBE_TX_FLAGS_HW_VLAN) && + !(first->tx_flags & IXGBE_TX_FLAGS_CC)) + return; ++ vlan_macip_lens = skb_network_offset(skb) << ++ IXGBE_ADVTXD_MACLEN_SHIFT; + } else { + u8 l4_hdr = 0; ++#ifdef HAVE_ENCAP_TSO_OFFLOAD ++ union { ++ struct iphdr *ipv4; ++ struct ipv6hdr *ipv6; ++ u8 *raw; ++ } network_hdr; ++ union { ++ struct tcphdr *tcphdr; ++ u8 *raw; ++ } transport_hdr; ++ __be16 frag_off; ++ ++ if (skb->encapsulation) { ++ network_hdr.raw = skb_inner_network_header(skb); ++ transport_hdr.raw = skb_inner_transport_header(skb); ++ vlan_macip_lens = skb_inner_network_offset(skb) << ++ IXGBE_ADVTXD_MACLEN_SHIFT; ++ } else { ++ network_hdr.raw = skb_network_header(skb); ++ transport_hdr.raw = skb_transport_header(skb); ++ vlan_macip_lens = skb_network_offset(skb) << ++ IXGBE_ADVTXD_MACLEN_SHIFT; ++ } ++ ++ /* use first 4 bits to determine IP version */ ++ switch (network_hdr.ipv4->version) { ++ case IPVERSION: ++ vlan_macip_lens |= transport_hdr.raw - network_hdr.raw; ++ type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; ++ l4_hdr = network_hdr.ipv4->protocol; ++ break; ++ case 6: ++ vlan_macip_lens |= transport_hdr.raw - network_hdr.raw; ++ l4_hdr = network_hdr.ipv6->nexthdr; ++ if (likely((transport_hdr.raw - network_hdr.raw) == ++ sizeof(struct ipv6hdr))) ++ break; ++ ipv6_skip_exthdr(skb, network_hdr.raw - skb->data + ++ sizeof(struct ipv6hdr), ++ &l4_hdr, &frag_off); ++ if (unlikely(frag_off)) ++ l4_hdr = NEXTHDR_FRAGMENT; ++ break; ++ default: ++ break; ++ } ++ ++#else /* HAVE_ENCAP_TSO_OFFLOAD */ + switch (first->protocol) { +- case htons(ETH_P_IP): ++ case __constant_htons(ETH_P_IP): + vlan_macip_lens |= skb_network_header_len(skb); + type_tucmd |= IXGBE_ADVTXD_TUCMD_IPV4; + l4_hdr = ip_hdr(skb)->protocol; + break; +- case htons(ETH_P_IPV6): ++#ifdef NETIF_F_IPV6_CSUM ++ case __constant_htons(ETH_P_IPV6): + vlan_macip_lens |= skb_network_header_len(skb); + l4_hdr = ipv6_hdr(skb)->nexthdr; + break; ++#endif /* NETIF_F_IPV6_CSUM */ + default: + if (unlikely(net_ratelimit())) { + dev_warn(tx_ring->dev, +@@ -6750,28 +8802,46 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, + } + break; + } ++#endif /* HAVE_ENCAP_TSO_OFFLOAD */ + + switch (l4_hdr) { + case IPPROTO_TCP: + type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_TCP; ++#ifdef HAVE_ENCAP_TSO_OFFLOAD ++ mss_l4len_idx = (transport_hdr.tcphdr->doff * 4) << ++ IXGBE_ADVTXD_L4LEN_SHIFT; ++#else + mss_l4len_idx = tcp_hdrlen(skb) << + IXGBE_ADVTXD_L4LEN_SHIFT; ++#endif /* HAVE_ENCAP_TSO_OFFLOAD */ + break; ++#ifdef HAVE_SCTP + case IPPROTO_SCTP: + type_tucmd |= IXGBE_ADVTXD_TUCMD_L4T_SCTP; + mss_l4len_idx = sizeof(struct sctphdr) << + IXGBE_ADVTXD_L4LEN_SHIFT; + break; ++#endif /* HAVE_SCTP */ + case IPPROTO_UDP: + mss_l4len_idx = sizeof(struct udphdr) << + IXGBE_ADVTXD_L4LEN_SHIFT; + break; + default: ++#ifdef HAVE_ENCAP_TSO_OFFLOAD ++ if (unlikely(net_ratelimit())) { ++ dev_warn(tx_ring->dev, ++ "partial checksum, version=%d, l4 proto=%x\n", ++ network_hdr.ipv4->version, l4_hdr); ++ } ++ skb_checksum_help(skb); ++ goto no_csum; ++#else + if (unlikely(net_ratelimit())) { + dev_warn(tx_ring->dev, + "partial checksum but l4 proto=%x!\n", + l4_hdr); + } ++#endif /* HAVE_ENCAP_TSO_OFFLOAD */ + break; + } + +@@ -6779,20 +8849,26 @@ static void ixgbe_tx_csum(struct ixgbe_ring *tx_ring, + first->tx_flags |= IXGBE_TX_FLAGS_CSUM; + } + ++#ifdef HAVE_ENCAP_TSO_OFFLOAD ++no_csum: ++#endif /* HAVE_ENCAP_TSO_OFFLOAD */ + /* vlan_macip_lens: MACLEN, VLAN tag */ ++#ifndef HAVE_ENCAP_TSO_OFFLOAD + vlan_macip_lens |= skb_network_offset(skb) << IXGBE_ADVTXD_MACLEN_SHIFT; ++#endif /* !HAVE_ENCAP_TSO_OFFLOAD */ + vlan_macip_lens |= first->tx_flags & IXGBE_TX_FLAGS_VLAN_MASK; + + ixgbe_tx_ctxtdesc(tx_ring, vlan_macip_lens, 0, + type_tucmd, mss_l4len_idx); + } ++#endif /* NETIF_F_GSO_PARTIAL */ + + #define IXGBE_SET_FLAG(_input, _flag, _result) \ + ((_flag <= _result) ? \ + ((u32)(_input & _flag) * (_result / _flag)) : \ + ((u32)(_input & _flag) / (_flag / _result))) + +-static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) ++static u32 ixgbe_tx_cmd_type(u32 tx_flags) + { + /* set type for advanced descriptor with frame checksum insertion */ + u32 cmd_type = IXGBE_ADVTXD_DTYP_DATA | +@@ -6811,9 +8887,6 @@ static u32 ixgbe_tx_cmd_type(struct sk_buff *skb, u32 tx_flags) + cmd_type |= IXGBE_SET_FLAG(tx_flags, IXGBE_TX_FLAGS_TSTAMP, + IXGBE_ADVTXD_MAC_TSTAMP); + +- /* insert frame checksum */ +- cmd_type ^= IXGBE_SET_FLAG(skb->no_fcs, 1, IXGBE_ADVTXD_DCMD_IFCS); +- + return cmd_type; + } + +@@ -6843,12 +8916,42 @@ static void ixgbe_tx_olinfo_status(union ixgbe_adv_tx_desc *tx_desc, + tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); + } + ++static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) ++{ ++ netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); ++ ++ /* Herbert's original patch had: ++ * smp_mb__after_netif_stop_queue(); ++ * but since that doesn't exist yet, just open code it. ++ */ ++ smp_mb(); ++ ++ /* We need to check again in a case another CPU has just ++ * made room available. ++ */ ++ if (likely(ixgbe_desc_unused(tx_ring) < size)) ++ return -EBUSY; ++ ++ /* A reprieve! - use start_queue because it doesn't call schedule */ ++ netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); ++ ++tx_ring->tx_stats.restart_queue; ++ return 0; ++} ++ ++static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) ++{ ++ if (likely(ixgbe_desc_unused(tx_ring) >= size)) ++ return 0; ++ ++ return __ixgbe_maybe_stop_tx(tx_ring, size); ++} ++ + #define IXGBE_TXD_CMD (IXGBE_TXD_CMD_EOP | \ + IXGBE_TXD_CMD_RS) + +-static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, +- struct ixgbe_tx_buffer *first, +- const u8 hdr_len) ++static int ixgbe_tx_map(struct ixgbe_ring *tx_ring, ++ struct ixgbe_tx_buffer *first, ++ const u8 hdr_len) + { + struct sk_buff *skb = first->skb; + struct ixgbe_tx_buffer *tx_buffer; +@@ -6857,7 +8960,7 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, + dma_addr_t dma; + unsigned int data_len, size; + u32 tx_flags = first->tx_flags; +- u32 cmd_type = ixgbe_tx_cmd_type(skb, tx_flags); ++ u32 cmd_type = ixgbe_tx_cmd_type(tx_flags); + u16 i = tx_ring->next_to_use; + + tx_desc = IXGBE_TX_DESC(tx_ring, i); +@@ -6867,7 +8970,7 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, + size = skb_headlen(skb); + data_len = skb->data_len; + +-#ifdef IXGBE_FCOE ++#if IS_ENABLED(CONFIG_FCOE) + if (tx_flags & IXGBE_TX_FLAGS_FCOE) { + if (data_len < sizeof(struct fcoe_crc_eof)) { + size -= sizeof(struct fcoe_crc_eof) - data_len; +@@ -6876,8 +8979,8 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, + data_len -= sizeof(struct fcoe_crc_eof); + } + } ++#endif /* CONFIG_FCOE */ + +-#endif + dma = dma_map_single(tx_ring->dev, skb->data, size, DMA_TO_DEVICE); + + tx_buffer = first; +@@ -6923,7 +9026,7 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, + } + tx_desc->read.olinfo_status = 0; + +-#ifdef IXGBE_FCOE ++#if IS_ENABLED(CONFIG_FCOE) + size = min_t(unsigned int, data_len, skb_frag_size(frag)); + #else + size = skb_frag_size(frag); +@@ -6945,6 +9048,9 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, + /* set the timestamp */ + first->time_stamp = jiffies; + ++#ifndef HAVE_TRANS_START_IN_QUEUE ++ netdev_ring(tx_ring)->trans_start = first->time_stamp; ++#endif + /* + * Force memory writes to complete before letting h/w know there + * are new descriptors to fetch. (Only applicable for weak-ordered +@@ -6964,12 +9070,31 @@ static void ixgbe_tx_map(struct ixgbe_ring *tx_ring, + + tx_ring->next_to_use = i; + ++ ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); ++ ++#ifdef HAVE_SKB_XMIT_MORE ++ if (netif_xmit_stopped(txring_txq(tx_ring)) || !skb->xmit_more) { ++ writel(i, tx_ring->tail); ++ ++ /* we need this if more than one processor can write to our tail ++ * at a time, it synchronizes IO on IA64/Altix systems ++ */ ++ mmiowb(); ++ } ++#else + /* notify HW of packet */ +- ixgbe_write_tail(tx_ring, i); ++ writel(i, tx_ring->tail); + +- return; ++ /* we need this if more than one processor can write to our tail ++ * at a time, it synchronizes IO on IA64/Altix systems ++ */ ++ mmiowb(); ++#endif /* HAVE_SKB_XMIT_MORE */ ++ ++ return 0; + dma_error: + dev_err(tx_ring->dev, "TX DMA map failed\n"); ++ tx_buffer = &tx_ring->tx_buffer_info[i]; + + /* clear dma mappings for failed tx_buffer_info map */ + for (;;) { +@@ -6983,6 +9108,8 @@ dma_error: + } + + tx_ring->next_to_use = i; ++ ++ return -1; + } + + static void ixgbe_atr(struct ixgbe_ring *ring, +@@ -6997,6 +9124,14 @@ static void ixgbe_atr(struct ixgbe_ring *ring, + struct ipv6hdr *ipv6; + } hdr; + struct tcphdr *th; ++#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) ++ struct sk_buff *skb; ++#else ++#define IXGBE_NO_VXLAN ++#endif /* HAVE_UDP_ENC_RX_OFFLOAD || HAVE_VXLAN_RX_OFFLOAD */ ++#ifdef IXGBE_NO_VXLAN ++ unsigned int hlen; ++#endif /* IXGBE_NO_VXLAN */ + __be16 vlan_id; + + /* if ring doesn't have a interrupt vector, cannot perform ATR */ +@@ -7010,17 +9145,82 @@ static void ixgbe_atr(struct ixgbe_ring *ring, + ring->atr_count++; + + /* snag network header to get L4 type and address */ +- hdr.network = skb_network_header(first->skb); ++#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) ++ skb = first->skb; ++ hdr.network = skb_network_header(skb); ++ th = tcp_hdr(skb); ++ ++ if (skb->encapsulation && ++ first->protocol == htons(ETH_P_IP) && ++ hdr.ipv4->protocol == IPPROTO_UDP) { ++ struct ixgbe_adapter *adapter = q_vector->adapter; ++ ++ /* verify the port is recognized as VXLAN or GENEVE*/ ++ if (adapter->vxlan_port && ++ udp_hdr(skb)->dest == adapter->vxlan_port) { ++ hdr.network = skb_inner_network_header(skb); ++ th = inner_tcp_hdr(skb); ++ } ++ ++#ifdef HAVE_UDP_ENC_RX_OFFLOAD ++ if (adapter->geneve_port && ++ udp_hdr(skb)->dest == adapter->geneve_port) { ++ hdr.network = skb_inner_network_header(skb); ++ th = inner_tcp_hdr(skb); ++ } ++#endif ++ } ++ ++ /* Currently only IPv4/IPv6 with TCP is supported */ ++ switch (hdr.ipv4->version) { ++ case IPVERSION: ++ if (hdr.ipv4->protocol != IPPROTO_TCP) ++ return; ++ break; ++ case 6: ++ if (likely((unsigned char *)th - hdr.network == ++ sizeof(struct ipv6hdr))) { ++ if (hdr.ipv6->nexthdr != IPPROTO_TCP) ++ return; ++ } else { ++ __be16 frag_off; ++ u8 l4_hdr; ++ ++ ipv6_skip_exthdr(skb, hdr.network - skb->data + ++ sizeof(struct ipv6hdr), ++ &l4_hdr, &frag_off); ++ if (unlikely(frag_off)) ++ return; ++ if (l4_hdr != IPPROTO_TCP) ++ return; ++ } ++ break; ++ default: ++ return; ++ } + ++#endif /* HAVE_UDP_ENC_RX_OFFLOAD || HAVE_VXLAN_RX_OFFLOAD */ ++#ifdef IXGBE_NO_VXLAN ++ hdr.network = skb_network_header(first->skb); + /* Currently only IPv4/IPv6 with TCP is supported */ +- if ((first->protocol != htons(ETH_P_IPV6) || +- hdr.ipv6->nexthdr != IPPROTO_TCP) && +- (first->protocol != htons(ETH_P_IP) || +- hdr.ipv4->protocol != IPPROTO_TCP)) ++ if (first->protocol == htons(ETH_P_IP)) { ++ if (hdr.ipv4->protocol != IPPROTO_TCP) ++ return; ++ ++ /* access ihl as a u8 to avoid unaligned access on ia64 */ ++ hlen = (hdr.network[0] & 0x0F) << 2; ++ } else if (first->protocol == htons(ETH_P_IPV6)) { ++ if (hdr.ipv6->nexthdr != IPPROTO_TCP) ++ return; ++ ++ hlen = sizeof(struct ipv6hdr); ++ } else { + return; ++ } + +- th = tcp_hdr(first->skb); ++ th = (struct tcphdr *)(hdr.network + hlen); + ++#endif /* IXGBE_NO_VXLAN */ + /* skip this packet since it is invalid or the socket is closing */ + if (!th || th->fin) + return; +@@ -7053,6 +9253,31 @@ static void ixgbe_atr(struct ixgbe_ring *ring, + common.port.src ^= th->dest ^ first->protocol; + common.port.dst ^= th->source; + ++#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) ++ switch (hdr.ipv4->version) { ++ case IPVERSION: ++ input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; ++ common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr; ++ break; ++ case 6: ++ input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV6; ++ common.ip ^= hdr.ipv6->saddr.s6_addr32[0] ^ ++ hdr.ipv6->saddr.s6_addr32[1] ^ ++ hdr.ipv6->saddr.s6_addr32[2] ^ ++ hdr.ipv6->saddr.s6_addr32[3] ^ ++ hdr.ipv6->daddr.s6_addr32[0] ^ ++ hdr.ipv6->daddr.s6_addr32[1] ^ ++ hdr.ipv6->daddr.s6_addr32[2] ^ ++ hdr.ipv6->daddr.s6_addr32[3]; ++ break; ++ default: ++ break; ++ } ++ ++ if (hdr.network != skb_network_header(skb)) ++ input.formatted.flow_type |= IXGBE_ATR_L4TYPE_TUNNEL_MASK; ++#endif /* HAVE_UDP_ENC_RX_OFFLOAD || HAVE_VXLAN_RX_OFFLOAD */ ++#ifdef IXGBE_NO_VXLAN + if (first->protocol == htons(ETH_P_IP)) { + input.formatted.flow_type = IXGBE_ATR_FLOW_TYPE_TCPV4; + common.ip ^= hdr.ipv4->saddr ^ hdr.ipv4->daddr; +@@ -7067,66 +9292,48 @@ static void ixgbe_atr(struct ixgbe_ring *ring, + hdr.ipv6->daddr.s6_addr32[2] ^ + hdr.ipv6->daddr.s6_addr32[3]; + } ++#endif /* IXGBE_NO_VXLAN */ + + /* This assumes the Rx queue and Tx queue are bound to the same CPU */ + ixgbe_fdir_add_signature_filter_82599(&q_vector->adapter->hw, + input, common, ring->queue_index); + } + +-static int __ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) +-{ +- netif_stop_subqueue(tx_ring->netdev, tx_ring->queue_index); +- /* Herbert's original patch had: +- * smp_mb__after_netif_stop_queue(); +- * but since that doesn't exist yet, just open code it. */ +- smp_mb(); +- +- /* We need to check again in a case another CPU has just +- * made room available. */ +- if (likely(ixgbe_desc_unused(tx_ring) < size)) +- return -EBUSY; +- +- /* A reprieve! - use start_queue because it doesn't call schedule */ +- netif_start_subqueue(tx_ring->netdev, tx_ring->queue_index); +- ++tx_ring->tx_stats.restart_queue; +- return 0; +-} +- +-static inline int ixgbe_maybe_stop_tx(struct ixgbe_ring *tx_ring, u16 size) +-{ +- if (likely(ixgbe_desc_unused(tx_ring) >= size)) +- return 0; +- return __ixgbe_maybe_stop_tx(tx_ring, size); +-} +- ++#ifdef HAVE_NETDEV_SELECT_QUEUE ++#if IS_ENABLED(CONFIG_FCOE) ++#if defined(HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK) ++static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, ++ __always_unused void *accel, ++ select_queue_fallback_t fallback) ++#elif defined(HAVE_NDO_SELECT_QUEUE_ACCEL) + static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, +- void *accel_priv, select_queue_fallback_t fallback) ++ __always_unused void *accel) ++#else ++static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb) ++#endif /* HAVE_NDO_SELECT_QUEUE_ACCEL */ + { +- struct ixgbe_fwd_adapter *fwd_adapter = accel_priv; +-#ifdef IXGBE_FCOE +- struct ixgbe_adapter *adapter; ++ struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_ring_feature *f; + int txq; +-#endif +- +- if (fwd_adapter) +- return skb->queue_mapping + fwd_adapter->tx_base_queue; +- +-#ifdef IXGBE_FCOE + + /* + * only execute the code below if protocol is FCoE + * or FIP and we have FCoE enabled on the adapter + */ + switch (vlan_get_protocol(skb)) { +- case htons(ETH_P_FCOE): +- case htons(ETH_P_FIP): ++ case __constant_htons(ETH_P_FCOE): ++ case __constant_htons(ETH_P_FIP): + adapter = netdev_priv(dev); + + if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) + break; ++ /* fall through */ + default: ++#ifdef HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK + return fallback(dev, skb); ++#else ++ return __netdev_pick_tx(dev, skb); ++#endif + } + + f = &adapter->ring_feature[RING_F_FCOE]; +@@ -7138,14 +9345,13 @@ static u16 ixgbe_select_queue(struct net_device *dev, struct sk_buff *skb, + txq -= f->indices; + + return txq + f->offset; +-#else +- return fallback(dev, skb); +-#endif + } ++#endif /* CONFIG_FCOE */ ++#endif /* HAVE_NETDEV_SELECT_QUEUE */ + + netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, +- struct ixgbe_adapter *adapter, +- struct ixgbe_ring *tx_ring) ++ struct ixgbe_adapter __maybe_unused *adapter, ++ struct ixgbe_ring *tx_ring) + { + struct ixgbe_tx_buffer *first; + int tso; +@@ -7177,8 +9383,8 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, + first->gso_segs = 1; + + /* if we have a HW VLAN tag being added default to the HW one */ +- if (vlan_tx_tag_present(skb)) { +- tx_flags |= vlan_tx_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT; ++ if (skb_vlan_tag_present(skb)) { ++ tx_flags |= skb_vlan_tag_get(skb) << IXGBE_TX_FLAGS_VLAN_SHIFT; + tx_flags |= IXGBE_TX_FLAGS_HW_VLAN; + /* else if it is a SW VLAN check the next protocol and store the tag */ + } else if (protocol == htons(ETH_P_8021Q)) { +@@ -7193,40 +9399,64 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, + tx_flags |= IXGBE_TX_FLAGS_SW_VLAN; + } + +- if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP && +- !test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS, +- &adapter->state))) { +- skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; +- tx_flags |= IXGBE_TX_FLAGS_TSTAMP; ++ skb_tx_timestamp(skb); + +- /* schedule check for Tx timestamp */ +- adapter->ptp_tx_skb = skb_get(skb); +- adapter->ptp_tx_start = jiffies; +- schedule_work(&adapter->ptp_tx_work); +- } ++#ifdef HAVE_PTP_1588_CLOCK ++#ifdef SKB_SHARED_TX_IS_UNION ++ if (unlikely(skb_tx(skb)->hardware) && ++ adapter->ptp_clock) { ++ if (!test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS, ++ &adapter->state)) { ++ skb_tx(skb)->in_progress = 1; ++#else ++ if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) && ++ adapter->ptp_clock) { ++ if (!test_and_set_bit_lock(__IXGBE_PTP_TX_IN_PROGRESS, ++ &adapter->state)) { ++ skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; ++#endif ++ tx_flags |= IXGBE_TX_FLAGS_TSTAMP; + +- skb_tx_timestamp(skb); ++ /* schedule check for Tx timestamp */ ++ adapter->ptp_tx_skb = skb_get(skb); ++ adapter->ptp_tx_start = jiffies; ++ schedule_work(&adapter->ptp_tx_work); ++ } else { ++ adapter->tx_hwtstamp_skipped++; ++ } ++ } + ++#endif + #ifdef CONFIG_PCI_IOV + /* + * Use the l2switch_enable flag - would be false if the DMA + * Tx switch had been disabled. + */ +- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) ++ if (adapter->flags & IXGBE_FLAG_SRIOV_L2SWITCH_ENABLE) + tx_flags |= IXGBE_TX_FLAGS_CC; + + #endif +- /* DCB maps skb priorities 0-7 onto 3 bit PCP of VLAN tag. */ ++#ifdef HAVE_TX_MQ + if ((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && + ((tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | IXGBE_TX_FLAGS_SW_VLAN)) || + (skb->priority != TC_PRIO_CONTROL))) { + tx_flags &= ~IXGBE_TX_FLAGS_VLAN_PRIO_MASK; +- tx_flags |= (skb->priority & 0x7) << +- IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT; ++#if IS_ENABLED(CONFIG_FCOE) ++ /* for FCoE with DCB, we force the priority to what ++ * was specified by the switch */ ++ if ((adapter->flags & IXGBE_FLAG_FCOE_ENABLED) && ++ ((protocol == htons(ETH_P_FCOE)) || ++ (protocol == htons(ETH_P_FIP)))) ++ tx_flags |= adapter->fcoe.up << ++ IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT; ++ else ++#endif /* CONFIG_FCOE */ ++ tx_flags |= skb->priority << ++ IXGBE_TX_FLAGS_VLAN_PRIO_SHIFT; + if (tx_flags & IXGBE_TX_FLAGS_SW_VLAN) { + struct vlan_ethhdr *vhdr; +- +- if (skb_cow_head(skb, 0)) ++ if (skb_header_cloned(skb) && ++ pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) + goto out_drop; + vhdr = (struct vlan_ethhdr *)skb->data; + vhdr->h_vlan_TCI = htons(tx_flags >> +@@ -7236,11 +9466,12 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, + } + } + ++#endif /* HAVE_TX_MQ */ + /* record initial flags and protocol */ + first->tx_flags = tx_flags; + first->protocol = protocol; + +-#ifdef IXGBE_FCOE ++#if IS_ENABLED(CONFIG_FCOE) + /* setup tx offload for FCoE */ + if ((protocol == htons(ETH_P_FCOE)) && + (tx_ring->netdev->features & (NETIF_F_FSO | NETIF_F_FCOE_CRC))) { +@@ -7249,9 +9480,25 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, + goto out_drop; + + goto xmit_fcoe; ++ } else if (protocol == htons(ETH_P_FIP)) { ++ /* FCoE stack has a bug where it does not set the network ++ * header offset for FIP frames sent resulting into MACLEN ++ * being set to ZERO in the Tx context descriptor. ++ * This will cause MDD events when trying to Tx such frames. ++ */ ++ if (!skb_network_offset(skb)) { ++ if (tx_flags & (IXGBE_TX_FLAGS_HW_VLAN | ++ IXGBE_TX_FLAGS_SW_VLAN)) ++ skb_set_network_header(skb, ++ sizeof(struct ethhdr) + ++ sizeof(struct vlan_hdr)); ++ else ++ skb_set_network_header(skb, ++ sizeof(struct ethhdr)); ++ } + } ++#endif /* CONFIG_FCOE */ + +-#endif /* IXGBE_FCOE */ + tso = ixgbe_tso(tx_ring, first, &hdr_len); + if (tso < 0) + goto out_drop; +@@ -7262,51 +9509,65 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, + if (test_bit(__IXGBE_TX_FDIR_INIT_DONE, &tx_ring->state)) + ixgbe_atr(tx_ring, first); + +-#ifdef IXGBE_FCOE ++#if IS_ENABLED(CONFIG_FCOE) + xmit_fcoe: +-#endif /* IXGBE_FCOE */ ++#endif /* CONFIG_FCOE */ ++#ifdef HAVE_PTP_1588_CLOCK ++ if (ixgbe_tx_map(tx_ring, first, hdr_len)) ++ goto cleanup_tx_tstamp; ++#else + ixgbe_tx_map(tx_ring, first, hdr_len); +- +- ixgbe_maybe_stop_tx(tx_ring, DESC_NEEDED); ++#endif + + return NETDEV_TX_OK; + + out_drop: + dev_kfree_skb_any(first->skb); + first->skb = NULL; ++#ifdef HAVE_PTP_1588_CLOCK ++cleanup_tx_tstamp: ++ if (unlikely(tx_flags & IXGBE_TX_FLAGS_TSTAMP)) { ++ dev_kfree_skb_any(adapter->ptp_tx_skb); ++ adapter->ptp_tx_skb = NULL; ++ cancel_work_sync(&adapter->ptp_tx_work); ++ clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); ++ } ++#endif + + return NETDEV_TX_OK; + } + +-static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb, +- struct net_device *netdev, +- struct ixgbe_ring *ring) ++static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, ++ struct net_device *netdev) + { + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_ring *tx_ring; ++#ifdef HAVE_TX_MQ ++ unsigned int r_idx = skb->queue_mapping; ++#endif ++ ++ if (!netif_carrier_ok(netdev)) { ++ dev_kfree_skb_any(skb); ++ return NETDEV_TX_OK; ++ } + + /* + * The minimum packet size for olinfo paylen is 17 so pad the skb + * in order to meet this minimum size requirement. + */ +- if (unlikely(skb->len < 17)) { +- if (skb_pad(skb, 17 - skb->len)) +- return NETDEV_TX_OK; +- skb->len = 17; +- skb_set_tail_pointer(skb, 17); +- } +- +- tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping]; ++ if (skb_put_padto(skb, 17)) ++ return NETDEV_TX_OK; + ++#ifdef HAVE_TX_MQ ++ if (r_idx >= adapter->num_tx_queues) ++ r_idx = r_idx % adapter->num_tx_queues; ++ tx_ring = adapter->tx_ring[r_idx]; ++#else ++ tx_ring = adapter->tx_ring[0]; ++#endif + return ixgbe_xmit_frame_ring(skb, adapter, tx_ring); + } + +-static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb, +- struct net_device *netdev) +-{ +- return __ixgbe_xmit_frame(skb, netdev, NULL); +-} +- + /** + * ixgbe_set_mac - Change the Ethernet Address of the NIC + * @netdev: network interface device structure +@@ -7319,76 +9580,36 @@ static int ixgbe_set_mac(struct net_device *netdev, void *p) + struct ixgbe_adapter *adapter = netdev_priv(netdev); + struct ixgbe_hw *hw = &adapter->hw; + struct sockaddr *addr = p; +- int ret; + + if (!is_valid_ether_addr(addr->sa_data)) + return -EADDRNOTAVAIL; + +- ixgbe_del_mac_filter(adapter, hw->mac.addr, VMDQ_P(0)); + memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); + memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); + +- ret = ixgbe_add_mac_filter(adapter, hw->mac.addr, VMDQ_P(0)); +- return ret > 0 ? 0 : ret; +-} +- +-static int +-ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, u16 addr) +-{ +- struct ixgbe_adapter *adapter = netdev_priv(netdev); +- struct ixgbe_hw *hw = &adapter->hw; +- u16 value; +- int rc; +- +- if (prtad != hw->phy.mdio.prtad) +- return -EINVAL; +- rc = hw->phy.ops.read_reg(hw, addr, devad, &value); +- if (!rc) +- rc = value; +- return rc; +-} +- +-static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad, +- u16 addr, u16 value) +-{ +- struct ixgbe_adapter *adapter = netdev_priv(netdev); +- struct ixgbe_hw *hw = &adapter->hw; +- +- if (prtad != hw->phy.mdio.prtad) +- return -EINVAL; +- return hw->phy.ops.write_reg(hw, addr, devad, value); +-} +- +-static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *req, int cmd) +-{ +- struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ ixgbe_mac_set_default_filter(adapter); + +- switch (cmd) { +- case SIOCSHWTSTAMP: +- return ixgbe_ptp_set_ts_config(adapter, req); +- case SIOCGHWTSTAMP: +- return ixgbe_ptp_get_ts_config(adapter, req); +- default: +- return mdio_mii_ioctl(&adapter->hw.phy.mdio, if_mii(req), cmd); +- } ++ return 0; + } + ++#if defined(HAVE_NETDEV_STORAGE_ADDRESS) && defined(NETDEV_HW_ADDR_T_SAN) + /** + * ixgbe_add_sanmac_netdev - Add the SAN MAC address to the corresponding +- * netdev->dev_addrs ++ * netdev->dev_addr_list + * @netdev: network interface device structure + * + * Returns non-zero on failure + **/ + static int ixgbe_add_sanmac_netdev(struct net_device *dev) + { +- int err = 0; ++ int err = IXGBE_SUCCESS; + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_hw *hw = &adapter->hw; + + if (is_valid_ether_addr(hw->mac.san_addr)) { + rtnl_lock(); +- err = dev_addr_add(dev, hw->mac.san_addr, NETDEV_HW_ADDR_T_SAN); ++ err = dev_addr_add(dev, hw->mac.san_addr, ++ NETDEV_HW_ADDR_T_SAN); + rtnl_unlock(); + + /* update SAN MAC vmdq pool selection */ +@@ -7399,14 +9620,14 @@ static int ixgbe_add_sanmac_netdev(struct net_device *dev) + + /** + * ixgbe_del_sanmac_netdev - Removes the SAN MAC address to the corresponding +- * netdev->dev_addrs ++ * netdev->dev_addr_list + * @netdev: network interface device structure + * + * Returns non-zero on failure + **/ + static int ixgbe_del_sanmac_netdev(struct net_device *dev) + { +- int err = 0; ++ int err = IXGBE_SUCCESS; + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_mac_info *mac = &adapter->hw.mac; + +@@ -7418,6 +9639,83 @@ static int ixgbe_del_sanmac_netdev(struct net_device *dev) + return err; + } + ++#endif /* (HAVE_NETDEV_STORAGE_ADDRESS) && defined(NETDEV_HW_ADDR_T_SAN) */ ++ ++static int ixgbe_mdio_read(struct net_device *netdev, int prtad, int devad, ++ u16 addr) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ struct ixgbe_hw *hw = &adapter->hw; ++ u16 value; ++ int rc; ++ ++ if (prtad != hw->phy.addr) ++ return -EINVAL; ++ rc = hw->phy.ops.read_reg(hw, addr, devad, &value); ++ if (!rc) ++ rc = value; ++ return rc; ++} ++ ++static int ixgbe_mdio_write(struct net_device *netdev, int prtad, int devad, ++ u16 addr, u16 value) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ struct ixgbe_hw *hw = &adapter->hw; ++ ++ if (prtad != hw->phy.addr) ++ return -EINVAL; ++ return hw->phy.ops.write_reg(hw, addr, devad, value); ++} ++ ++static int ixgbe_mii_ioctl(struct net_device *netdev, struct ifreq *ifr, ++ int cmd) ++{ ++ struct mii_ioctl_data *mii = (struct mii_ioctl_data *) &ifr->ifr_data; ++ int prtad, devad, ret; ++ ++ prtad = (mii->phy_id & MDIO_PHY_ID_PRTAD) >> 5; ++ devad = (mii->phy_id & MDIO_PHY_ID_DEVAD); ++ ++ if (cmd == SIOCGMIIREG) { ++ ret = ixgbe_mdio_read(netdev, prtad, devad, mii->reg_num); ++ if (ret < 0) ++ return ret; ++ mii->val_out = ret; ++ return 0; ++ } else { ++ return ixgbe_mdio_write(netdev, prtad, devad, mii->reg_num, ++ mii->val_in); ++ } ++} ++ ++static int ixgbe_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd) ++{ ++#ifdef HAVE_PTP_1588_CLOCK ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ ++#endif ++ switch (cmd) { ++#ifdef HAVE_PTP_1588_CLOCK ++#ifdef SIOCGHWTSTAMP ++ case SIOCGHWTSTAMP: ++ return ixgbe_ptp_get_ts_config(adapter, ifr); ++#endif ++ case SIOCSHWTSTAMP: ++ return ixgbe_ptp_set_ts_config(adapter, ifr); ++#endif ++#ifdef ETHTOOL_OPS_COMPAT ++ case SIOCETHTOOL: ++ return ethtool_ioctl(ifr); ++#endif ++ case SIOCGMIIREG: ++ case SIOCSMIIREG: ++ return ixgbe_mii_ioctl(netdev, ifr, cmd); ++ default: ++ return -EOPNOTSUPP; ++ } ++} ++ + #ifdef CONFIG_NET_POLL_CONTROLLER + /* + * Polling 'interrupt' - used by things like netconsole to send skbs +@@ -7427,74 +9725,25 @@ static int ixgbe_del_sanmac_netdev(struct net_device *dev) + static void ixgbe_netpoll(struct net_device *netdev) + { + struct ixgbe_adapter *adapter = netdev_priv(netdev); +- int i; + + /* if interface is down do nothing */ + if (test_bit(__IXGBE_DOWN, &adapter->state)) + return; + +- adapter->flags |= IXGBE_FLAG_IN_NETPOLL; + if (adapter->flags & IXGBE_FLAG_MSIX_ENABLED) { +- for (i = 0; i < adapter->num_q_vectors; i++) ++ int i; ++ for (i = 0; i < adapter->num_q_vectors; i++) { ++ adapter->q_vector[i]->netpoll_rx = true; + ixgbe_msix_clean_rings(0, adapter->q_vector[i]); +- } else { +- ixgbe_intr(adapter->pdev->irq, netdev); +- } +- adapter->flags &= ~IXGBE_FLAG_IN_NETPOLL; +-} +- +-#endif +-static struct rtnl_link_stats64 *ixgbe_get_stats64(struct net_device *netdev, +- struct rtnl_link_stats64 *stats) +-{ +- struct ixgbe_adapter *adapter = netdev_priv(netdev); +- int i; +- +- rcu_read_lock(); +- for (i = 0; i < adapter->num_rx_queues; i++) { +- struct ixgbe_ring *ring = ACCESS_ONCE(adapter->rx_ring[i]); +- u64 bytes, packets; +- unsigned int start; +- +- if (ring) { +- do { +- start = u64_stats_fetch_begin_irq(&ring->syncp); +- packets = ring->stats.packets; +- bytes = ring->stats.bytes; +- } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); +- stats->rx_packets += packets; +- stats->rx_bytes += bytes; +- } +- } +- +- for (i = 0; i < adapter->num_tx_queues; i++) { +- struct ixgbe_ring *ring = ACCESS_ONCE(adapter->tx_ring[i]); +- u64 bytes, packets; +- unsigned int start; +- +- if (ring) { +- do { +- start = u64_stats_fetch_begin_irq(&ring->syncp); +- packets = ring->stats.packets; +- bytes = ring->stats.bytes; +- } while (u64_stats_fetch_retry_irq(&ring->syncp, start)); +- stats->tx_packets += packets; +- stats->tx_bytes += bytes; ++ adapter->q_vector[i]->netpoll_rx = false; + } ++ } else { ++ ixgbe_intr(0, adapter); + } +- rcu_read_unlock(); +- /* following stats updated by ixgbe_watchdog_task() */ +- stats->multicast = netdev->stats.multicast; +- stats->rx_errors = netdev->stats.rx_errors; +- stats->rx_length_errors = netdev->stats.rx_length_errors; +- stats->rx_crc_errors = netdev->stats.rx_crc_errors; +- stats->rx_missed_errors = netdev->stats.rx_missed_errors; +- return stats; + } ++#endif /* CONFIG_NET_POLL_CONTROLLER */ + +-#ifdef CONFIG_IXGBE_DCB +-/** +- * ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid. ++/* ixgbe_validate_rtr - verify 802.1Qp to Rx packet buffer mapping is valid. + * @adapter: pointer to ixgbe_adapter + * @tc: number of traffic classes currently enabled + * +@@ -7516,7 +9765,7 @@ static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc) + reg = IXGBE_READ_REG(hw, IXGBE_RTRUP2TC); + rsave = reg; + +- for (i = 0; i < MAX_TRAFFIC_CLASS; i++) { ++ for (i = 0; i < IXGBE_DCB_MAX_TRAFFIC_CLASS; i++) { + u8 up2tc = reg >> (i * IXGBE_RTRUP2TC_UP_SHIFT); + + /* If up2tc is out of bounds default to zero */ +@@ -7536,14 +9785,15 @@ static void ixgbe_validate_rtr(struct ixgbe_adapter *adapter, u8 tc) + * + * Populate the netdev user priority to tc map + */ +-static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter) ++static void ixgbe_set_prio_tc_map(struct ixgbe_adapter __maybe_unused *adapter) + { ++#ifdef HAVE_DCBNL_IEEE + struct net_device *dev = adapter->netdev; + struct ixgbe_dcb_config *dcb_cfg = &adapter->dcb_cfg; + struct ieee_ets *ets = adapter->ixgbe_ieee_ets; + u8 prio; + +- for (prio = 0; prio < MAX_USER_PRIORITY; prio++) { ++ for (prio = 0; prio < IXGBE_DCB_MAX_USER_PRIORITY; prio++) { + u8 tc = 0; + + if (adapter->dcbx_cap & DCB_CAP_DCBX_VER_CEE) +@@ -7553,11 +9803,30 @@ static void ixgbe_set_prio_tc_map(struct ixgbe_adapter *adapter) + + netdev_set_prio_tc_map(dev, prio, tc); + } ++#endif ++} ++ ++#ifdef NETIF_F_HW_TC ++static int ++__ixgbe_setup_tc(struct net_device *dev, __always_unused u32 handle, ++ __always_unused __be16 proto, struct tc_to_netdev *tc) ++{ ++ if (tc->type != TC_SETUP_MQPRIO) ++ return -EINVAL; ++ ++#ifdef TC_MQPRIO_HW_OFFLOAD_MAX ++ tc->mqprio->hw = TC_MQPRIO_HW_OFFLOAD_TCS; ++ ++ return ixgbe_setup_tc(dev, tc->mqprio->num_tc); ++#else ++ return ixgbe_setup_tc(dev, tc->tc); ++#endif + } ++#endif /* NETIF_F_HW_TC */ + +-#endif /* CONFIG_IXGBE_DCB */ + /** +- * ixgbe_setup_tc - configure net_device for multiple traffic classes ++ * ixgbe_setup_tc - routine to configure net_device for multiple traffic ++ * classes. + * + * @netdev: net device to configure + * @tc: number of traffic classes to enable +@@ -7566,17 +9835,14 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) + { + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct ixgbe_hw *hw = &adapter->hw; +- bool pools; + + /* Hardware supports up to 8 traffic classes */ +- if (tc > adapter->dcb_cfg.num_tcs.pg_tcs || +- (hw->mac.type == ixgbe_mac_82598EB && +- tc < MAX_TRAFFIC_CLASS)) ++ if (tc > adapter->dcb_cfg.num_tcs.pg_tcs) + return -EINVAL; + +- pools = (find_first_zero_bit(&adapter->fwd_bitmask, 32) > 1); +- if (tc && pools && adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS) +- return -EBUSY; ++ if (tc && hw->mac.type == ixgbe_mac_82598EB && ++ tc < IXGBE_DCB_MAX_TRAFFIC_CLASS) ++ return -EINVAL; + + /* Hardware has to reinitialize queues and interrupts to + * match packet buffer alignment. Unfortunately, the +@@ -7584,9 +9850,11 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) + */ + if (netif_running(dev)) + ixgbe_close(dev); ++ else ++ ixgbe_reset(adapter); ++ + ixgbe_clear_interrupt_scheme(adapter); + +-#ifdef CONFIG_IXGBE_DCB + if (tc) { + netdev_set_num_tc(dev, tc); + ixgbe_set_prio_tc_map(adapter); +@@ -7611,11 +9879,9 @@ int ixgbe_setup_tc(struct net_device *dev, u8 tc) + + ixgbe_validate_rtr(adapter, tc); + +-#endif /* CONFIG_IXGBE_DCB */ + ixgbe_init_interrupt_scheme(adapter); +- + if (netif_running(dev)) +- return ixgbe_open(dev); ++ ixgbe_open(dev); + + return 0; + } +@@ -7629,8 +9895,8 @@ void ixgbe_sriov_reinit(struct ixgbe_adapter *adapter) + ixgbe_setup_tc(netdev, netdev_get_num_tc(netdev)); + rtnl_unlock(); + } +- + #endif ++ + void ixgbe_do_reset(struct net_device *netdev) + { + struct ixgbe_adapter *adapter = netdev_priv(netdev); +@@ -7641,11 +9907,25 @@ void ixgbe_do_reset(struct net_device *netdev) + ixgbe_reset(adapter); + } + ++#ifdef HAVE_NDO_SET_FEATURES ++#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT ++static u32 ixgbe_fix_features(struct net_device *netdev, u32 features) ++#else + static netdev_features_t ixgbe_fix_features(struct net_device *netdev, + netdev_features_t features) ++#endif + { + struct ixgbe_adapter *adapter = netdev_priv(netdev); + ++#if IS_ENABLED(CONFIG_DCB) ++ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) ++#ifdef NETIF_F_HW_VLAN_CTAG_RX ++ features |= NETIF_F_HW_VLAN_CTAG_RX; ++#else ++ features |= NETIF_F_HW_VLAN_RX; ++#endif ++#endif /* CONFIG_DCB */ ++ + /* If Rx checksum is disabled, then RSC/LRO should also be disabled */ + if (!(features & NETIF_F_RXCSUM)) + features &= ~NETIF_F_LRO; +@@ -7657,12 +9937,16 @@ static netdev_features_t ixgbe_fix_features(struct net_device *netdev, + return features; + } + ++#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT ++static int ixgbe_set_features(struct net_device *netdev, u32 features) ++#else + static int ixgbe_set_features(struct net_device *netdev, + netdev_features_t features) ++#endif + { + struct ixgbe_adapter *adapter = netdev_priv(netdev); +- netdev_features_t changed = netdev->features ^ features; + bool need_reset = false; ++ netdev_features_t changed = netdev->features ^ features; + + /* Make sure RSC matches LRO, reset if change */ + if (!(features & NETIF_F_LRO)) { +@@ -7675,7 +9959,7 @@ static int ixgbe_set_features(struct net_device *netdev, + adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR) { + adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; + need_reset = true; +- } else if ((changed ^ features) & NETIF_F_LRO) { ++ } else if (changed & NETIF_F_LRO) { + e_info(probe, "rx-usecs set too low, " + "disabling RSC\n"); + } +@@ -7695,89 +9979,302 @@ static int ixgbe_set_features(struct net_device *netdev, + adapter->flags |= IXGBE_FLAG_FDIR_PERFECT_CAPABLE; + break; + default: +- /* turn off perfect filters, enable ATR and reset */ +- if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) +- need_reset = true; ++ /* turn off perfect filters, enable ATR and reset */ ++ if (adapter->flags & IXGBE_FLAG_FDIR_PERFECT_CAPABLE) ++ need_reset = true; ++ ++ adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; ++ ++ /* We cannot enable ATR if VMDq is enabled */ ++ if (adapter->flags & IXGBE_FLAG_VMDQ_ENABLED) ++ break; ++ ++ /* We cannot enable ATR if we have 2 or more traffic classes */ ++ if (netdev_get_num_tc(netdev) > 1) ++ break; ++ ++ /* We cannot enable ATR if RSS is disabled */ ++ if (adapter->ring_feature[RING_F_RSS].limit <= 1) ++ break; ++ ++ /* A sample rate of 0 indicates ATR disabled */ ++ if (!adapter->atr_sample_rate) ++ break; ++ ++ adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; ++ break; ++ } ++ ++ netdev->features = features; ++ ++#if defined(HAVE_UDP_ENC_RX_OFFLOAD) || defined(HAVE_VXLAN_RX_OFFLOAD) ++ if (adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE && ++ features & NETIF_F_RXCSUM) { ++ if (!need_reset) ++ adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; ++ } else { ++ u32 port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK; ++ ++ ixgbe_clear_udp_tunnel_port(adapter, port_mask); ++ } ++#endif /* HAVE_UDP_ENC_RX_OFFLOAD || HAVE_VXLAN_RX_OFFLOAD */ ++ ++#ifdef HAVE_UDP_ENC_RX_OFFLOAD ++ if (adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE && ++ features & NETIF_F_RXCSUM) { ++ if (!need_reset) ++ adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; ++ } else { ++ u32 port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK; ++ ++ ixgbe_clear_udp_tunnel_port(adapter, port_mask); ++ } ++#endif /* HAVE_UDP_ENC_RX_OFFLOAD */ ++ if (need_reset) ++ ixgbe_do_reset(netdev); ++#ifdef NETIF_F_HW_VLAN_CTAG_FILTER ++ else if (changed & (NETIF_F_HW_VLAN_CTAG_RX | ++ NETIF_F_HW_VLAN_CTAG_FILTER)) ++ ixgbe_set_rx_mode(netdev); ++#endif ++#ifdef NETIF_F_HW_VLAN_FILTER ++ else if (changed & (NETIF_F_HW_VLAN_RX | ++ NETIF_F_HW_VLAN_FILTER)) ++ ixgbe_set_rx_mode(netdev); ++#endif ++ return 0; ++ ++} ++#endif /* HAVE_NDO_SET_FEATURES */ ++ ++#ifdef HAVE_UDP_ENC_RX_OFFLOAD ++/** ++ * ixgbe_add_udp_tunnel_port - Get notifications about adding UDP tunnel ports ++ * @dev: The port's netdev ++ * @ti: Tunnel endpoint information ++ **/ ++static void ixgbe_add_udp_tunnel_port(struct net_device *dev, ++ struct udp_tunnel_info *ti) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(dev); ++ struct ixgbe_hw *hw = &adapter->hw; ++ __be16 port = ti->port; ++ u32 port_shift = 0; ++ u32 reg; ++ ++ if (ti->sa_family != AF_INET) ++ return; ++ ++ switch (ti->type) { ++ case UDP_TUNNEL_TYPE_VXLAN: ++ if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) ++ return; ++ ++ if (adapter->vxlan_port == port) ++ return; ++ ++ if (adapter->vxlan_port) { ++ netdev_info(dev, ++ "VXLAN port %d set, not adding port %d\n", ++ ntohs(adapter->vxlan_port), ++ ntohs(port)); ++ return; ++ } ++ ++ adapter->vxlan_port = port; ++ break; ++ case UDP_TUNNEL_TYPE_GENEVE: ++ if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) ++ return; ++ ++ if (adapter->geneve_port == port) ++ return; ++ ++ if (adapter->geneve_port) { ++ netdev_info(dev, ++ "GENEVE port %d set, not adding port %d\n", ++ ntohs(adapter->geneve_port), ++ ntohs(port)); ++ return; ++ } ++ ++ port_shift = IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT; ++ adapter->geneve_port = port; ++ break; ++ default: ++ return; ++ } + +- adapter->flags &= ~IXGBE_FLAG_FDIR_PERFECT_CAPABLE; ++ reg = IXGBE_READ_REG(hw, IXGBE_VXLANCTRL) | ntohs(port) << port_shift; ++ IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, reg); ++} + +- /* We cannot enable ATR if SR-IOV is enabled */ +- if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) +- break; ++/** ++ * ixgbe_del_udp_tunnel_port - Get notifications about removing UDP tunnel ports ++ * @dev: The port's netdev ++ * @ti: Tunnel endpoint information ++ **/ ++static void ixgbe_del_udp_tunnel_port(struct net_device *dev, ++ struct udp_tunnel_info *ti) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(dev); ++ u32 port_mask; + +- /* We cannot enable ATR if we have 2 or more traffic classes */ +- if (netdev_get_num_tc(netdev) > 1) +- break; ++ if (ti->type != UDP_TUNNEL_TYPE_VXLAN && ++ ti->type != UDP_TUNNEL_TYPE_GENEVE) ++ return; + +- /* We cannot enable ATR if RSS is disabled */ +- if (adapter->ring_feature[RING_F_RSS].limit <= 1) +- break; ++ if (ti->sa_family != AF_INET) ++ return; + +- /* A sample rate of 0 indicates ATR disabled */ +- if (!adapter->atr_sample_rate) +- break; ++ switch (ti->type) { ++ case UDP_TUNNEL_TYPE_VXLAN: ++ if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) ++ return; + +- adapter->flags |= IXGBE_FLAG_FDIR_HASH_CAPABLE; ++ if (adapter->vxlan_port != ti->port) { ++ netdev_info(dev, "VXLAN port %d not found\n", ++ ntohs(ti->port)); ++ return; ++ } ++ ++ port_mask = IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK; ++ break; ++ case UDP_TUNNEL_TYPE_GENEVE: ++ if (!(adapter->flags & IXGBE_FLAG_GENEVE_OFFLOAD_CAPABLE)) ++ return; ++ ++ if (adapter->geneve_port != ti->port) { ++ netdev_info(dev, "GENEVE port %d not found\n", ++ ntohs(ti->port)); ++ return; ++ } ++ ++ port_mask = IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK; + break; ++ default: ++ return; + } + +- if (features & NETIF_F_HW_VLAN_CTAG_RX) +- ixgbe_vlan_strip_enable(adapter); +- else +- ixgbe_vlan_strip_disable(adapter); ++ ixgbe_clear_udp_tunnel_port(adapter, port_mask); ++ adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; ++} ++#elif defined(HAVE_VXLAN_RX_OFFLOAD) ++/** ++ * ixgbe_add_vxlan_port - Get notifications about VXLAN ports that come up ++ * @dev: The port's netdev ++ * @sa_family: Socket Family that VXLAN is notifiying us about ++ * @port: New UDP port number that VXLAN started listening to ++ * @type: Enumerated type specifying UDP tunnel type ++ */ ++static void ixgbe_add_vxlan_port(struct net_device *dev, sa_family_t sa_family, ++ __be16 port) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(dev); ++ struct ixgbe_hw *hw = &adapter->hw; + +- if (changed & NETIF_F_RXALL) +- need_reset = true; ++ if (sa_family != AF_INET) ++ return; + +- netdev->features = features; +- if (need_reset) +- ixgbe_do_reset(netdev); ++ if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_ENABLE)) ++ return; + +- return 0; ++ if (adapter->vxlan_port == port) ++ return; ++ ++ if (adapter->vxlan_port) { ++ netdev_info(dev, ++ "Hit Max num of VXLAN ports, not adding port %d\n", ++ ntohs(port)); ++ return; ++ } ++ ++ adapter->vxlan_port = port; ++ IXGBE_WRITE_REG(hw, IXGBE_VXLANCTRL, ntohs(port)); + } + +-static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], +- struct net_device *dev, +- const unsigned char *addr, +- u16 flags) ++/** ++ * ixgbe_del_vxlan_port - Get notifications about VXLAN ports that go away ++ * @dev: The port's netdev ++ * @sa_family: Socket Family that VXLAN is notifying us about ++ * @port: UDP port number that VXLAN stopped listening to ++ * @type: Enumerated type specifying UDP tunnel type ++ */ ++static void ixgbe_del_vxlan_port(struct net_device *dev, sa_family_t sa_family, ++ __be16 port) + { + struct ixgbe_adapter *adapter = netdev_priv(dev); +- int err; + +- if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) +- return ndo_dflt_fdb_add(ndm, tb, dev, addr, flags); ++ if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_ENABLE)) ++ return; + +- /* Hardware does not support aging addresses so if a +- * ndm_state is given only allow permanent addresses +- */ +- if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { +- pr_info("%s: FDB only supports static addresses\n", +- ixgbe_driver_name); +- return -EINVAL; ++ if (sa_family != AF_INET) ++ return; ++ ++ if (adapter->vxlan_port != port) { ++ netdev_info(dev, "Port %d was not found, not deleting\n", ++ ntohs(port)); ++ return; + } + ++ ixgbe_clear_udp_tunnel_port(adapter, IXGBE_VXLANCTRL_ALL_UDPPORT_MASK); ++ adapter->flags2 |= IXGBE_FLAG2_UDP_TUN_REREG_NEEDED; ++} ++#endif /* HAVE_VXLAN_RX_OFFLOAD */ ++ ++#ifdef HAVE_NDO_GSO_CHECK ++static bool ++ixgbe_gso_check(struct sk_buff *skb, __always_unused struct net_device *dev) ++{ ++ return vxlan_gso_check(skb); ++} ++#endif /* HAVE_NDO_GSO_CHECK */ ++ ++#ifdef HAVE_FDB_OPS ++#ifdef USE_CONST_DEV_UC_CHAR ++static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], ++ struct net_device *dev, ++ const unsigned char *addr, ++#ifdef HAVE_NDO_FDB_ADD_VID ++ u16 vid, ++#endif ++ u16 flags) ++#else ++static int ixgbe_ndo_fdb_add(struct ndmsg *ndm, ++ struct net_device *dev, ++ unsigned char *addr, ++ u16 flags) ++#endif /* USE_CONST_DEV_UC_CHAR */ ++{ ++ /* guarantee we can provide a unique filter for the unicast address */ + if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) { +- u32 rar_uc_entries = IXGBE_MAX_PF_MACVLANS; ++ struct ixgbe_adapter *adapter = netdev_priv(dev); ++ u16 pool = VMDQ_P(0); + +- if (netdev_uc_count(dev) < rar_uc_entries) +- err = dev_uc_add_excl(dev, addr); +- else +- err = -ENOMEM; +- } else if (is_multicast_ether_addr(addr)) { +- err = dev_mc_add_excl(dev, addr); +- } else { +- err = -EINVAL; ++ if (netdev_uc_count(dev) >= ixgbe_available_rars(adapter, pool)) ++ return -ENOMEM; + } + +- /* Only return duplicate errors if NLM_F_EXCL is set */ +- if (err == -EEXIST && !(flags & NLM_F_EXCL)) +- err = 0; +- +- return err; ++#ifdef USE_CONST_DEV_UC_CHAR ++#ifdef HAVE_NDO_FDB_ADD_VID ++ return ndo_dflt_fdb_add(ndm, tb, dev, addr, vid, flags); ++#else ++ return ndo_dflt_fdb_add(ndm, tb, dev, addr, flags); ++#endif /* HAVE_NDO_FDB_ADD_VID */ ++#else ++ return ndo_dflt_fdb_add(ndm, dev, addr, flags); ++#endif /* USE_CONST_DEV_UC_CHAR */ + } + ++#ifdef HAVE_BRIDGE_ATTRIBS ++#ifdef HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS ++static int ixgbe_ndo_bridge_setlink(struct net_device *dev, ++ struct nlmsghdr *nlh, ++ __always_unused u16 flags) ++#else + static int ixgbe_ndo_bridge_setlink(struct net_device *dev, + struct nlmsghdr *nlh) ++#endif /* HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS */ + { + struct ixgbe_adapter *adapter = netdev_priv(dev); + struct nlattr *attr, *br_spec; +@@ -7790,22 +10287,23 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev, + + nla_for_each_nested(attr, br_spec, rem) { + __u16 mode; +- u32 reg = 0; + + if (nla_type(attr) != IFLA_BRIDGE_MODE) + continue; + + mode = nla_get_u16(attr); + if (mode == BRIDGE_MODE_VEPA) { +- reg = 0; +- adapter->flags2 &= ~IXGBE_FLAG2_BRIDGE_MODE_VEB; ++ adapter->flags |= IXGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE; + } else if (mode == BRIDGE_MODE_VEB) { +- reg = IXGBE_PFDTXGSWC_VT_LBEN; +- adapter->flags2 |= IXGBE_FLAG2_BRIDGE_MODE_VEB; +- } else ++ adapter->flags &= ~IXGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE; ++ } else { + return -EINVAL; ++ } ++ ++ adapter->bridge_mode = mode; + +- IXGBE_WRITE_REG(&adapter->hw, IXGBE_PFDTXGSWC, reg); ++ /* re-configure settings related to bridge mode */ ++ ixgbe_configure_bridge_mode(adapter); + + e_info(drv, "enabling bridge mode: %s\n", + mode == BRIDGE_MODE_VEPA ? "VEPA" : "VEB"); +@@ -7814,9 +10312,19 @@ static int ixgbe_ndo_bridge_setlink(struct net_device *dev, + return 0; + } + ++#ifdef HAVE_NDO_BRIDGE_GETLINK_NLFLAGS + static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + struct net_device *dev, +- u32 filter_mask) ++ u32 __maybe_unused filter_mask, ++ int nlflags) ++#elif defined(HAVE_BRIDGE_FILTER) ++static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, ++ struct net_device *dev, ++ u32 __always_unused filter_mask) ++#else ++static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, ++ struct net_device *dev) ++#endif /* HAVE_NDO_BRIDGE_GETLINK_NLFLAGS */ + { + struct ixgbe_adapter *adapter = netdev_priv(dev); + u16 mode; +@@ -7824,180 +10332,264 @@ static int ixgbe_ndo_bridge_getlink(struct sk_buff *skb, u32 pid, u32 seq, + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + return 0; + +- if (adapter->flags2 & IXGBE_FLAG2_BRIDGE_MODE_VEB) +- mode = BRIDGE_MODE_VEB; +- else +- mode = BRIDGE_MODE_VEPA; +- ++ mode = adapter->bridge_mode; ++#ifdef HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT ++ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0, nlflags, ++ filter_mask, NULL); ++#elif defined(HAVE_NDO_BRIDGE_GETLINK_NLFLAGS) ++ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0, nlflags); ++#elif defined(HAVE_NDO_FDB_ADD_VID) || \ ++ defined NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS ++ return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode, 0, 0); ++#else + return ndo_dflt_bridge_getlink(skb, pid, seq, dev, mode); +-} ++#endif /* HAVE_NDO_BRIDGE_GETLINK_NLFLAGS */ ++} ++#endif /* HAVE_BRIDGE_ATTRIBS */ ++#endif /* HAVE_FDB_OPS */ ++ ++#ifdef HAVE_NDO_FEATURES_CHECK ++#define IXGBE_MAX_TUNNEL_HDR_LEN 80 ++#ifdef NETIF_F_GSO_PARTIAL ++#define IXGBE_MAX_MAC_HDR_LEN 127 ++#define IXGBE_MAX_NETWORK_HDR_LEN 511 ++ ++static netdev_features_t ++ixgbe_features_check(struct sk_buff *skb, struct net_device *dev, ++ netdev_features_t features) ++{ ++ unsigned int network_hdr_len, mac_hdr_len; ++ ++ /* Make certain the headers can be described by a context descriptor */ ++ mac_hdr_len = skb_network_header(skb) - skb->data; ++ if (unlikely(mac_hdr_len > IXGBE_MAX_MAC_HDR_LEN)) ++ return features & ~(NETIF_F_HW_CSUM | ++ NETIF_F_SCTP_CRC | ++ NETIF_F_HW_VLAN_CTAG_TX | ++ NETIF_F_TSO | ++ NETIF_F_TSO6); ++ ++ network_hdr_len = skb_checksum_start(skb) - skb_network_header(skb); ++ if (unlikely(network_hdr_len > IXGBE_MAX_NETWORK_HDR_LEN)) ++ return features & ~(NETIF_F_HW_CSUM | ++ NETIF_F_SCTP_CRC | ++ NETIF_F_TSO | ++ NETIF_F_TSO6); ++ ++ /* We can only support IPV4 TSO in tunnels if we can mangle the ++ * inner IP ID field, so strip TSO if MANGLEID is not supported. ++ */ ++ if (skb->encapsulation && !(features & NETIF_F_TSO_MANGLEID)) ++ features &= ~NETIF_F_TSO; + +-static void *ixgbe_fwd_add(struct net_device *pdev, struct net_device *vdev) ++ return features; ++} ++#else ++static netdev_features_t ++ixgbe_features_check(struct sk_buff *skb, struct net_device *dev, ++ netdev_features_t features) + { +- struct ixgbe_fwd_adapter *fwd_adapter = NULL; +- struct ixgbe_adapter *adapter = netdev_priv(pdev); +- unsigned int limit; +- int pool, err; +- +-#ifdef CONFIG_RPS +- if (vdev->num_rx_queues != vdev->num_tx_queues) { +- netdev_info(pdev, "%s: Only supports a single queue count for TX and RX\n", +- vdev->name); +- return ERR_PTR(-EINVAL); +- } +-#endif +- /* Check for hardware restriction on number of rx/tx queues */ +- if (vdev->num_tx_queues > IXGBE_MAX_L2A_QUEUES || +- vdev->num_tx_queues == IXGBE_BAD_L2A_QUEUE) { +- netdev_info(pdev, +- "%s: Supports RX/TX Queue counts 1,2, and 4\n", +- pdev->name); +- return ERR_PTR(-EINVAL); +- } ++ if (!skb->encapsulation) ++ return features; + +- if (((adapter->flags & IXGBE_FLAG_DCB_ENABLED) && +- adapter->num_rx_pools > IXGBE_MAX_DCBMACVLANS - 1) || +- (adapter->num_rx_pools > IXGBE_MAX_MACVLANS)) +- return ERR_PTR(-EBUSY); ++ if (unlikely(skb_inner_mac_header(skb) - skb_transport_header(skb) > ++ IXGBE_MAX_TUNNEL_HDR_LEN)) ++ return features & ~NETIF_F_CSUM_MASK; + +- fwd_adapter = kcalloc(1, sizeof(struct ixgbe_fwd_adapter), GFP_KERNEL); +- if (!fwd_adapter) +- return ERR_PTR(-ENOMEM); +- +- pool = find_first_zero_bit(&adapter->fwd_bitmask, 32); +- adapter->num_rx_pools++; +- set_bit(pool, &adapter->fwd_bitmask); +- limit = find_last_bit(&adapter->fwd_bitmask, 32); +- +- /* Enable VMDq flag so device will be set in VM mode */ +- adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED | IXGBE_FLAG_SRIOV_ENABLED; +- adapter->ring_feature[RING_F_VMDQ].limit = limit + 1; +- adapter->ring_feature[RING_F_RSS].limit = vdev->num_tx_queues; +- +- /* Force reinit of ring allocation with VMDQ enabled */ +- err = ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev)); +- if (err) +- goto fwd_add_err; +- fwd_adapter->pool = pool; +- fwd_adapter->real_adapter = adapter; +- err = ixgbe_fwd_ring_up(vdev, fwd_adapter); +- if (err) +- goto fwd_add_err; +- netif_tx_start_all_queues(vdev); +- return fwd_adapter; +-fwd_add_err: +- /* unwind counter and free adapter struct */ +- netdev_info(pdev, +- "%s: dfwd hardware acceleration failed\n", vdev->name); +- clear_bit(pool, &adapter->fwd_bitmask); +- adapter->num_rx_pools--; +- kfree(fwd_adapter); +- return ERR_PTR(err); +-} +- +-static void ixgbe_fwd_del(struct net_device *pdev, void *priv) +-{ +- struct ixgbe_fwd_adapter *fwd_adapter = priv; +- struct ixgbe_adapter *adapter = fwd_adapter->real_adapter; +- unsigned int limit; +- +- clear_bit(fwd_adapter->pool, &adapter->fwd_bitmask); +- adapter->num_rx_pools--; +- +- limit = find_last_bit(&adapter->fwd_bitmask, 32); +- adapter->ring_feature[RING_F_VMDQ].limit = limit + 1; +- ixgbe_fwd_ring_down(fwd_adapter->netdev, fwd_adapter); +- ixgbe_setup_tc(pdev, netdev_get_num_tc(pdev)); +- netdev_dbg(pdev, "pool %i:%i queues %i:%i VSI bitmask %lx\n", +- fwd_adapter->pool, adapter->num_rx_pools, +- fwd_adapter->rx_base_queue, +- fwd_adapter->rx_base_queue + adapter->num_rx_queues_per_pool, +- adapter->fwd_bitmask); +- kfree(fwd_adapter); ++ return features; + } ++#endif /* NETIF_F_GSO_PARTIAL */ ++#endif /* HAVE_NDO_FEATURES_CHECK */ + ++#ifdef HAVE_NET_DEVICE_OPS + static const struct net_device_ops ixgbe_netdev_ops = { + .ndo_open = ixgbe_open, + .ndo_stop = ixgbe_close, + .ndo_start_xmit = ixgbe_xmit_frame, ++#if IS_ENABLED(CONFIG_FCOE) + .ndo_select_queue = ixgbe_select_queue, ++#else ++#ifndef HAVE_MQPRIO ++ .ndo_select_queue = __netdev_pick_tx, ++#endif ++#endif + .ndo_set_rx_mode = ixgbe_set_rx_mode, + .ndo_validate_addr = eth_validate_addr, + .ndo_set_mac_address = ixgbe_set_mac, + .ndo_change_mtu = ixgbe_change_mtu, + .ndo_tx_timeout = ixgbe_tx_timeout, ++#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) + .ndo_vlan_rx_add_vid = ixgbe_vlan_rx_add_vid, + .ndo_vlan_rx_kill_vid = ixgbe_vlan_rx_kill_vid, ++#endif + .ndo_do_ioctl = ixgbe_ioctl, ++#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT ++/* RHEL7 requires this to be defined to enable extended ops. RHEL7 uses the ++ * function get_ndo_ext to retrieve offsets for extended fields from with the ++ * net_device_ops struct and ndo_size is checked to determine whether or not ++ * the offset is valid. ++ */ ++ .ndo_size = sizeof(const struct net_device_ops), ++#endif ++#ifdef IFLA_VF_MAX + .ndo_set_vf_mac = ixgbe_ndo_set_vf_mac, ++#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN ++ .extended.ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, ++#else + .ndo_set_vf_vlan = ixgbe_ndo_set_vf_vlan, ++#endif ++#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + .ndo_set_vf_rate = ixgbe_ndo_set_vf_bw, ++#else ++ .ndo_set_vf_tx_rate = ixgbe_ndo_set_vf_bw, ++#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ ++#if defined(HAVE_VF_SPOOFCHK_CONFIGURE) && IS_ENABLED(CONFIG_PCI_IOV) + .ndo_set_vf_spoofchk = ixgbe_ndo_set_vf_spoofchk, ++#endif ++#ifdef HAVE_NDO_SET_VF_RSS_QUERY_EN ++ .ndo_set_vf_rss_query_en = ixgbe_ndo_set_vf_rss_query_en, ++#endif ++#ifdef HAVE_NDO_SET_VF_TRUST ++#ifdef HAVE_RHEL7_NET_DEVICE_OPS_EXT ++ .extended.ndo_set_vf_trust = ixgbe_ndo_set_vf_trust, ++#else ++ .ndo_set_vf_trust = ixgbe_ndo_set_vf_trust, ++#endif /* HAVE_RHEL7_NET_DEVICE_OPS_EXT */ ++#endif /* HAVE_NDO_SET_VF_TRUST */ + .ndo_get_vf_config = ixgbe_ndo_get_vf_config, ++#endif /* IFLA_VF_MAX */ ++#ifdef HAVE_NDO_GET_STATS64 + .ndo_get_stats64 = ixgbe_get_stats64, +-#ifdef CONFIG_IXGBE_DCB ++#else ++ .ndo_get_stats = ixgbe_get_stats, ++#endif /* HAVE_NDO_GET_STATS64 */ ++#ifdef HAVE_SETUP_TC ++#ifdef NETIF_F_HW_TC ++ .ndo_setup_tc = __ixgbe_setup_tc, ++#else + .ndo_setup_tc = ixgbe_setup_tc, ++#endif /* NETIF_F_HW_TC */ + #endif + #ifdef CONFIG_NET_POLL_CONTROLLER + .ndo_poll_controller = ixgbe_netpoll, + #endif +-#ifdef CONFIG_NET_RX_BUSY_POLL +- .ndo_busy_poll = ixgbe_low_latency_recv, +-#endif +-#ifdef IXGBE_FCOE ++#ifndef HAVE_RHEL6_NET_DEVICE_EXTENDED ++#ifdef HAVE_NDO_BUSY_POLL ++ .ndo_busy_poll = ixgbe_busy_poll_recv, ++#endif /* HAVE_NDO_BUSY_POLL */ ++#endif /* !HAVE_RHEL6_NET_DEVICE_EXTENDED */ ++#if IS_ENABLED(CONFIG_FCOE) + .ndo_fcoe_ddp_setup = ixgbe_fcoe_ddp_get, ++#ifdef HAVE_NETDEV_OPS_FCOE_DDP_TARGET + .ndo_fcoe_ddp_target = ixgbe_fcoe_ddp_target, ++#endif + .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put, ++#ifdef HAVE_NETDEV_OPS_FCOE_ENABLE + .ndo_fcoe_enable = ixgbe_fcoe_enable, + .ndo_fcoe_disable = ixgbe_fcoe_disable, ++#endif ++#ifdef HAVE_NETDEV_OPS_FCOE_GETWWN + .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn, +- .ndo_fcoe_get_hbainfo = ixgbe_fcoe_get_hbainfo, +-#endif /* IXGBE_FCOE */ +- .ndo_set_features = ixgbe_set_features, +- .ndo_fix_features = ixgbe_fix_features, ++#endif ++#endif /* CONFIG_FCOE */ ++#ifdef HAVE_VLAN_RX_REGISTER ++ .ndo_vlan_rx_register = &ixgbe_vlan_mode, ++#endif ++#ifdef HAVE_FDB_OPS + .ndo_fdb_add = ixgbe_ndo_fdb_add, ++#ifndef USE_DEFAULT_FDB_DEL_DUMP ++ .ndo_fdb_del = ndo_dflt_fdb_del, ++ .ndo_fdb_dump = ndo_dflt_fdb_dump, ++#endif ++#ifdef HAVE_BRIDGE_ATTRIBS + .ndo_bridge_setlink = ixgbe_ndo_bridge_setlink, + .ndo_bridge_getlink = ixgbe_ndo_bridge_getlink, +- .ndo_dfwd_add_station = ixgbe_fwd_add, +- .ndo_dfwd_del_station = ixgbe_fwd_del, ++#endif /* HAVE_BRIDGE_ATTRIBS */ ++#endif ++#ifdef HAVE_UDP_ENC_RX_OFFLOAD ++#ifdef HAVE_RHEL7_NETDEV_OPS_EXT_NDO_UDP_TUNNEL ++ .extended.ndo_udp_tunnel_add = ixgbe_add_udp_tunnel_port, ++ .extended.ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port, ++#else ++ .ndo_udp_tunnel_add = ixgbe_add_udp_tunnel_port, ++ .ndo_udp_tunnel_del = ixgbe_del_udp_tunnel_port, ++#endif ++#elif defined(HAVE_VXLAN_RX_OFFLOAD) ++ .ndo_add_vxlan_port = ixgbe_add_vxlan_port, ++ .ndo_del_vxlan_port = ixgbe_del_vxlan_port, ++#endif /* HAVE_UDP_ENC_RX_OFFLOAD */ ++#ifdef HAVE_NDO_GSO_CHECK ++ .ndo_gso_check = ixgbe_gso_check, ++#endif /* HAVE_NDO_GSO_CHECK */ ++#ifdef HAVE_NDO_FEATURES_CHECK ++ .ndo_features_check = ixgbe_features_check, ++#endif /* HAVE_NDO_FEATURES_CHECK */ ++#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT + }; + +-/** +- * ixgbe_enumerate_functions - Get the number of ports this device has +- * @adapter: adapter structure +- * +- * This function enumerates the phsyical functions co-located on a single slot, +- * in order to determine how many ports a device has. This is most useful in +- * determining the required GT/s of PCIe bandwidth necessary for optimal +- * performance. +- **/ +-static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter) +-{ +- struct list_head *entry; +- int physfns = 0; ++/* RHEL6 keeps these operations in a separate structure */ ++static const struct net_device_ops_ext ixgbe_netdev_ops_ext = { ++ .size = sizeof(struct net_device_ops_ext), ++#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ ++#ifdef HAVE_NDO_SET_FEATURES ++ .ndo_set_features = ixgbe_set_features, ++ .ndo_fix_features = ixgbe_fix_features, ++#endif /* HAVE_NDO_SET_FEATURES */ ++}; ++#endif /* HAVE_NET_DEVICE_OPS */ ++ ++void ixgbe_assign_netdev_ops(struct net_device *dev) ++{ ++#ifdef HAVE_NET_DEVICE_OPS ++ dev->netdev_ops = &ixgbe_netdev_ops; ++#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT ++ set_netdev_ops_ext(dev, &ixgbe_netdev_ops_ext); ++#endif /* HAVE_RHEL6_NET_DEVICE_OPS_EXT */ ++#else /* HAVE_NET_DEVICE_OPS */ ++ dev->open = &ixgbe_open; ++ dev->stop = &ixgbe_close; ++ dev->hard_start_xmit = &ixgbe_xmit_frame; ++ dev->get_stats = &ixgbe_get_stats; ++#ifdef HAVE_SET_RX_MODE ++ dev->set_rx_mode = &ixgbe_set_rx_mode; ++#endif ++ dev->set_multicast_list = &ixgbe_set_rx_mode; ++ dev->set_mac_address = &ixgbe_set_mac; ++ dev->change_mtu = &ixgbe_change_mtu; ++ dev->do_ioctl = &ixgbe_ioctl; ++#ifdef HAVE_TX_TIMEOUT ++ dev->tx_timeout = &ixgbe_tx_timeout; ++#endif ++#if defined(NETIF_F_HW_VLAN_TX) || defined(NETIF_F_HW_VLAN_CTAG_TX) ++ dev->vlan_rx_register = &ixgbe_vlan_mode; ++ dev->vlan_rx_add_vid = &ixgbe_vlan_rx_add_vid; ++ dev->vlan_rx_kill_vid = &ixgbe_vlan_rx_kill_vid; ++#endif ++#ifdef CONFIG_NET_POLL_CONTROLLER ++ dev->poll_controller = &ixgbe_netpoll; ++#endif ++#ifdef HAVE_NETDEV_SELECT_QUEUE ++#if IS_ENABLED(CONFIG_FCOE) ++ dev->select_queue = &ixgbe_select_queue; ++#else ++ dev->select_queue = &__netdev_pick_tx; ++#endif ++#endif /* HAVE_NETDEV_SELECT_QUEUE */ ++#endif /* HAVE_NET_DEVICE_OPS */ + +- /* Some cards can not use the generic count PCIe functions method, +- * because they are behind a parent switch, so we hardcode these with +- * the correct number of functions. +- */ +- if (ixgbe_pcie_from_parent(&adapter->hw)) { +- physfns = 4; +- } else { +- list_for_each(entry, &adapter->pdev->bus_list) { +- struct pci_dev *pdev = +- list_entry(entry, struct pci_dev, bus_list); +- /* don't count virtual functions */ +- if (!pdev->is_virtfn) +- physfns++; +- } +- } ++#ifdef HAVE_RHEL6_NET_DEVICE_EXTENDED ++#ifdef HAVE_NDO_BUSY_POLL ++ netdev_extended(dev)->ndo_busy_poll = ixgbe_busy_poll_recv; ++#endif /* HAVE_NDO_BUSY_POLL */ ++#endif /* HAVE_RHEL6_NET_DEVICE_EXTENDED */ + +- return physfns; ++ ixgbe_set_ethtool_ops(dev); ++ dev->watchdog_timeo = 5 * HZ; + } + + /** + * ixgbe_wol_supported - Check whether device supports WoL +- * @hw: hw specific details ++ * @adapter: the adapter private structure + * @device_id: the device ID + * @subdev_id: the subsystem device ID + * +@@ -8005,59 +10597,144 @@ static inline int ixgbe_enumerate_functions(struct ixgbe_adapter *adapter) + * which devices have WoL support + * + **/ +-int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, +- u16 subdevice_id) ++bool ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, ++ u16 subdevice_id) + { + struct ixgbe_hw *hw = &adapter->hw; + u16 wol_cap = adapter->eeprom_cap & IXGBE_DEVICE_CAPS_WOL_MASK; +- int is_wol_supported = 0; + ++ /* WOL not supported on 82598 */ ++ if (hw->mac.type == ixgbe_mac_82598EB) ++ return false; ++ ++ /* check eeprom to see if WOL is enabled for X540 and newer */ ++ if (hw->mac.type >= ixgbe_mac_X540) { ++ if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) || ++ ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) && ++ (hw->bus.func == 0))) ++ return true; ++ } ++ ++ /* WOL is determined based on device IDs for 82599 MACs */ + switch (device_id) { + case IXGBE_DEV_ID_82599_SFP: + /* Only these subdevices could supports WOL */ + switch (subdevice_id) { +- case IXGBE_SUBDEV_ID_82599_SFP_WOL0: + case IXGBE_SUBDEV_ID_82599_560FLR: ++ case IXGBE_SUBDEV_ID_82599_LOM_SNAP6: ++ case IXGBE_SUBDEV_ID_82599_SFP_WOL0: ++ case IXGBE_SUBDEV_ID_82599_SFP_2OCP: + /* only support first port */ + if (hw->bus.func != 0) + break; ++ /* fall through */ + case IXGBE_SUBDEV_ID_82599_SP_560FLR: + case IXGBE_SUBDEV_ID_82599_SFP: + case IXGBE_SUBDEV_ID_82599_RNDC: + case IXGBE_SUBDEV_ID_82599_ECNA_DP: +- case IXGBE_SUBDEV_ID_82599_LOM_SFP: +- is_wol_supported = 1; +- break; ++ case IXGBE_SUBDEV_ID_82599_SFP_1OCP: ++ case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1: ++ case IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2: ++ return true; + } + break; + case IXGBE_DEV_ID_82599EN_SFP: +- /* Only this subdevice supports WOL */ ++ /* Only these subdevices support WoL */ + switch (subdevice_id) { + case IXGBE_SUBDEV_ID_82599EN_SFP_OCP1: +- is_wol_supported = 1; +- break; ++ return true; + } + break; + case IXGBE_DEV_ID_82599_COMBO_BACKPLANE: + /* All except this subdevice support WOL */ + if (subdevice_id != IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ) +- is_wol_supported = 1; ++ return true; + break; + case IXGBE_DEV_ID_82599_KX4: +- is_wol_supported = 1; ++ return true; ++ default: + break; +- case IXGBE_DEV_ID_X540T: +- case IXGBE_DEV_ID_X540T1: +- /* check eeprom to see if enabled wol */ +- if ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0_1) || +- ((wol_cap == IXGBE_DEVICE_CAPS_WOL_PORT0) && +- (hw->bus.func == 0))) { +- is_wol_supported = 1; ++ } ++ ++ return false; ++} ++ ++/** ++ * ixgbe_set_fw_version - Set FW version ++ * @adapter: the adapter private structure ++ * ++ * This function is used by probe and ethtool to determine the FW version to ++ * format to display. The FW version is taken from the EEPROM/NVM. ++ * ++ **/ ++static void ixgbe_set_fw_version(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ u16 eeprom_verh = 0, eeprom_verl = 0; ++ u16 offset = 0; ++ u32 etrack_id; ++ ++ /* Check for OEM Product Version block format */ ++ hw->eeprom.ops.read(hw, 0x1b, &offset); ++ ++ /* Make sure offset to OEM Product Version block is valid */ ++ if (!(offset == 0x0) && !(offset == 0xffff)) { ++ u16 mod_len = 0, cap = 0, prod_ver = 0, rel_num = 0; ++ u16 build, major, patch; ++ ++ /* Read product version block */ ++ hw->eeprom.ops.read(hw, offset, &mod_len); ++ hw->eeprom.ops.read(hw, offset + 0x1, &cap); ++ hw->eeprom.ops.read(hw, offset + 0x2, &prod_ver); ++ hw->eeprom.ops.read(hw, offset + 0x3, &rel_num); ++ ++ /* Only display OEM product version if valid block */ ++ if (mod_len == 0x3 && (cap & 0xf) == 0x0) { ++ major = prod_ver >> 8; ++ build = prod_ver & 0xff; ++ patch = rel_num; ++ ++ snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), ++ "%x.%x.%x", major, build, patch); ++ return; ++ } ++ } ++ ++ /* ++ * Save off EEPROM version number and Option Rom version which ++ * together make a unique identify for the eeprom ++ */ ++ hw->eeprom.ops.read(hw, 0x2e, &eeprom_verh); ++ hw->eeprom.ops.read(hw, 0x2d, &eeprom_verl); ++ etrack_id = (eeprom_verh << 16) | eeprom_verl; ++ ++ /* Check for SCSI block version format */ ++ hw->eeprom.ops.read(hw, 0x17, &offset); ++ ++ /* Make sure offset to SCSI block is valid */ ++ if (!(offset == 0x0) && !(offset == 0xffff)) { ++ u16 eeprom_cfg_blkh = 0, eeprom_cfg_blkl = 0; ++ u16 build, major, patch; ++ ++ hw->eeprom.ops.read(hw, offset + 0x84, &eeprom_cfg_blkh); ++ hw->eeprom.ops.read(hw, offset + 0x83, &eeprom_cfg_blkl); ++ ++ /* Only display Option Rom if exist */ ++ if (eeprom_cfg_blkl && eeprom_cfg_blkh) { ++ major = eeprom_cfg_blkl >> 8; ++ build = (eeprom_cfg_blkl << 8) | (eeprom_cfg_blkh >> 8); ++ patch = eeprom_cfg_blkh & 0x00ff; ++ ++ snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), ++ "0x%08x, %d.%d.%d", etrack_id, major, build, ++ patch); ++ return; + } +- break; + } + +- return is_wol_supported; ++ /* Set ETrack ID format */ ++ snprintf(adapter->eeprom_id, sizeof(adapter->eeprom_id), ++ "0x%08x", etrack_id); + } + + /** +@@ -8071,85 +10748,125 @@ int ixgbe_wol_supported(struct ixgbe_adapter *adapter, u16 device_id, + * The OS initialization, configuring of the adapter private structure, + * and a hardware reset occur. + **/ +-static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) ++static int __devinit ixgbe_probe(struct pci_dev *pdev, ++ const struct pci_device_id __always_unused *ent) + { + struct net_device *netdev; + struct ixgbe_adapter *adapter = NULL; +- struct ixgbe_hw *hw; +- const struct ixgbe_info *ii = ixgbe_info_tbl[ent->driver_data]; ++ struct ixgbe_hw *hw = NULL; + static int cards_found; +- int i, err, pci_using_dac, expected_gts; +- unsigned int indices = MAX_TX_QUEUES; ++ int err, pci_using_dac, expected_gts; ++ char *info_string, *i_s_var; + u8 part_str[IXGBE_PBANUM_LENGTH]; ++ enum ixgbe_mac_type mac_type = ixgbe_mac_unknown; ++#ifdef HAVE_TX_MQ ++ unsigned int indices = MAX_TX_QUEUES; ++#endif /* HAVE_TX_MQ */ + bool disable_dev = false; +-#ifdef IXGBE_FCOE ++#if IS_ENABLED(CONFIG_FCOE) + u16 device_caps; + #endif +- u32 eec; +- +- /* Catch broken hardware that put the wrong VF device ID in +- * the PCIe SR-IOV capability. +- */ +- if (pdev->is_virtfn) { +- WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n", +- pci_name(pdev), pdev->vendor, pdev->device); +- return -EINVAL; +- } ++#ifndef NETIF_F_GSO_PARTIAL ++#ifdef HAVE_NDO_SET_FEATURES ++#ifndef HAVE_RHEL6_NET_DEVICE_OPS_EXT ++ netdev_features_t hw_features; ++#else ++ u32 hw_features; ++#endif ++#endif ++#endif /* NETIF_F_GSO_PARTIAL */ + + err = pci_enable_device_mem(pdev); + if (err) + return err; + +- if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) { ++ if (!dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64)) && ++ !dma_set_coherent_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(64))) { + pci_using_dac = 1; + } else { +- err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); ++ err = dma_set_mask(pci_dev_to_dev(pdev), DMA_BIT_MASK(32)); + if (err) { +- dev_err(&pdev->dev, +- "No usable DMA configuration, aborting\n"); +- goto err_dma; ++ err = dma_set_coherent_mask(pci_dev_to_dev(pdev), ++ DMA_BIT_MASK(32)); ++ if (err) { ++ dev_err(pci_dev_to_dev(pdev), "No usable DMA " ++ "configuration, aborting\n"); ++ goto err_dma; ++ } + } + pci_using_dac = 0; + } + +- err = pci_request_selected_regions(pdev, pci_select_bars(pdev, +- IORESOURCE_MEM), ixgbe_driver_name); ++ err = pci_request_mem_regions(pdev, ixgbe_driver_name); + if (err) { +- dev_err(&pdev->dev, ++ dev_err(pci_dev_to_dev(pdev), + "pci_request_selected_regions failed 0x%x\n", err); + goto err_pci_reg; + } + ++ /* ++ * The mac_type is needed before we have the adapter is set up ++ * so rather than maintain two devID -> MAC tables we dummy up ++ * an ixgbe_hw stuct and use ixgbe_set_mac_type. ++ */ ++ hw = vmalloc(sizeof(struct ixgbe_hw)); ++ if (!hw) { ++ pr_info("Unable to allocate memory for early mac " ++ "check\n"); ++ } else { ++ hw->vendor_id = pdev->vendor; ++ hw->device_id = pdev->device; ++ ixgbe_set_mac_type(hw); ++ mac_type = hw->mac.type; ++ vfree(hw); ++ } ++ ++ /* ++ * Workaround of Silicon errata on 82598. Disable LOs in the PCI switch ++ * port to which the 82598 is connected to prevent duplicate ++ * completions caused by LOs. We need the mac type so that we only ++ * do this on 82598 devices, ixgbe_set_mac_type does this for us if ++ * we set it's device ID. ++ */ ++ if (mac_type == ixgbe_mac_82598EB) ++ pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S); ++ + pci_enable_pcie_error_reporting(pdev); + + pci_set_master(pdev); +- pci_save_state(pdev); + +- if (ii->mac == ixgbe_mac_82598EB) { +-#ifdef CONFIG_IXGBE_DCB +- /* 8 TC w/ 4 queues per TC */ +- indices = 4 * MAX_TRAFFIC_CLASS; +-#else ++#ifdef HAVE_TX_MQ ++ if (mac_type == ixgbe_mac_82598EB) { ++#if IS_ENABLED(CONFIG_DCB) ++ indices = IXGBE_MAX_DCB_INDICES * 4; ++#else /* CONFIG_DCB */ + indices = IXGBE_MAX_RSS_INDICES; +-#endif ++#endif /* !CONFIG_DCB */ + } + + netdev = alloc_etherdev_mq(sizeof(struct ixgbe_adapter), indices); ++#else /* HAVE_TX_MQ */ ++ netdev = alloc_etherdev(sizeof(struct ixgbe_adapter)); ++#endif /* HAVE_TX_MQ */ + if (!netdev) { + err = -ENOMEM; + goto err_alloc_etherdev; + } + +- SET_NETDEV_DEV(netdev, &pdev->dev); ++ SET_MODULE_OWNER(netdev); ++ SET_NETDEV_DEV(netdev, pci_dev_to_dev(pdev)); + + adapter = netdev_priv(netdev); +- pci_set_drvdata(pdev, adapter); +- ++#ifdef HAVE_TX_MQ ++#ifndef HAVE_NETDEV_SELECT_QUEUE ++ adapter->indices = indices; ++#endif ++#endif + adapter->netdev = netdev; + adapter->pdev = pdev; + hw = &adapter->hw; + hw->back = adapter; +- adapter->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE); ++ adapter->msg_enable = (1 << DEFAULT_DEBUG_LEVEL_SHIFT) - 1; + + hw->hw_addr = ioremap(pci_resource_start(pdev, 0), + pci_resource_len(pdev, 0)); +@@ -8159,50 +10876,29 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + goto err_ioremap; + } + +- netdev->netdev_ops = &ixgbe_netdev_ops; +- ixgbe_set_ethtool_ops(netdev); +- netdev->watchdog_timeo = 5 * HZ; ++ ixgbe_assign_netdev_ops(netdev); ++ + strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1); + + adapter->bd_number = cards_found; + +- /* Setup hw api */ +- memcpy(&hw->mac.ops, ii->mac_ops, sizeof(hw->mac.ops)); +- hw->mac.type = ii->mac; +- +- /* EEPROM */ +- memcpy(&hw->eeprom.ops, ii->eeprom_ops, sizeof(hw->eeprom.ops)); +- eec = IXGBE_READ_REG(hw, IXGBE_EEC); +- if (ixgbe_removed(hw->hw_addr)) { +- err = -EIO; +- goto err_ioremap; +- } +- /* If EEPROM is valid (bit 8 = 1), use default otherwise use bit bang */ +- if (!(eec & (1 << 8))) +- hw->eeprom.ops.read = &ixgbe_read_eeprom_bit_bang_generic; +- +- /* PHY */ +- memcpy(&hw->phy.ops, ii->phy_ops, sizeof(hw->phy.ops)); +- hw->phy.sfp_type = ixgbe_sfp_type_unknown; +- /* ixgbe_identify_phy_generic will set prtad and mmds properly */ +- hw->phy.mdio.prtad = MDIO_PRTAD_NONE; +- hw->phy.mdio.mmds = 0; +- hw->phy.mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22; +- hw->phy.mdio.dev = netdev; +- hw->phy.mdio.mdio_read = ixgbe_mdio_read; +- hw->phy.mdio.mdio_write = ixgbe_mdio_write; +- +- ii->get_invariants(hw); +- ++ ixgbe_get_hw_control(adapter); + /* setup the private structure */ + err = ixgbe_sw_init(adapter); + if (err) + goto err_sw_init; + ++ /* Make sure the SWFW semaphore is in a valid state */ ++ if (hw->mac.ops.init_swfw_sync) ++ hw->mac.ops.init_swfw_sync(hw); ++ + /* Make it possible the adapter to be woken up via WOL */ + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: + IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); + break; + default: +@@ -8210,8 +10906,8 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + } + + /* +- * If there is a fan on this device and it has failed log the +- * failure. ++ * If we have a fan, this is as early we know, warn if we ++ * have had a failure. + */ + if (adapter->flags & IXGBE_FLAG_FAN_FAIL_CAPABLE) { + u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); +@@ -8219,19 +10915,23 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + e_crit(probe, "Fan has stopped, replace the adapter\n"); + } + +- if (allow_unsupported_sfp) +- hw->allow_unsupported_sfp = allow_unsupported_sfp; ++ /* ++ * check_options must be called before setup_link to set up ++ * hw->fc completely ++ */ ++ ixgbe_check_options(adapter); + + /* reset_hw fills in the perm_addr as well */ + hw->phy.reset_if_overtemp = true; + err = hw->mac.ops.reset_hw(hw); + hw->phy.reset_if_overtemp = false; +- if (err == IXGBE_ERR_SFP_NOT_PRESENT && +- hw->mac.type == ixgbe_mac_82598EB) { +- err = 0; ++ if (err == IXGBE_ERR_SFP_NOT_PRESENT) { ++ err = IXGBE_SUCCESS; + } else if (err == IXGBE_ERR_SFP_NOT_SUPPORTED) { +- e_dev_err("failed to load because an unsupported SFP+ or QSFP module type was detected.\n"); +- e_dev_err("Reload the driver after installing a supported module.\n"); ++ e_dev_err("failed to load because an unsupported SFP+ or QSFP " ++ "module type was detected.\n"); ++ e_dev_err("Reload the driver after installing a supported " ++ "module.\n"); + goto err_sw_init; + } else if (err) { + e_dev_err("HW Init failed: %d\n", err); +@@ -8239,96 +10939,240 @@ static int ixgbe_probe(struct pci_dev *pdev, const struct pci_device_id *ent) + } + + #ifdef CONFIG_PCI_IOV +- /* SR-IOV not supported on the 82598 */ +- if (adapter->hw.mac.type == ixgbe_mac_82598EB) +- goto skip_sriov; +- /* Mailbox */ +- ixgbe_init_mbx_params_pf(hw); +- memcpy(&hw->mbx.ops, ii->mbx_ops, sizeof(hw->mbx.ops)); +- pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT); +- ixgbe_enable_sriov(adapter); +-skip_sriov: ++#if defined(HAVE_SRIOV_CONFIGURE) || defined(HAVE_RHEL6_SRIOV_CONFIGURE) ++ if (adapter->max_vfs > 0) { ++ e_dev_warn("Enabling SR-IOV VFs using the max_vfs module parameter is deprecated.\n"); ++ e_dev_warn("Please use the pci sysfs interface instead. Ex:\n"); ++ e_dev_warn("echo '%d' > /sys/bus/pci/devices/%04x:%02x:%02x.%1x/sriov_numvfs\n", ++ adapter->max_vfs, ++ pci_domain_nr(pdev->bus), ++ pdev->bus->number, ++ PCI_SLOT(pdev->devfn), ++ PCI_FUNC(pdev->devfn) ++ ); ++ } ++ ++#endif ++ if (adapter->flags & IXGBE_FLAG_SRIOV_CAPABLE) { ++ pci_sriov_set_totalvfs(pdev, IXGBE_MAX_VFS_DRV_LIMIT); ++ ixgbe_enable_sriov(adapter); ++ } ++ ++#endif /* CONFIG_PCI_IOV */ + +-#endif ++#ifdef NETIF_F_GSO_PARTIAL + netdev->features = NETIF_F_SG | +- NETIF_F_IP_CSUM | +- NETIF_F_IPV6_CSUM | +- NETIF_F_HW_VLAN_CTAG_TX | +- NETIF_F_HW_VLAN_CTAG_RX | +- NETIF_F_HW_VLAN_CTAG_FILTER | + NETIF_F_TSO | + NETIF_F_TSO6 | + NETIF_F_RXHASH | +- NETIF_F_RXCSUM; ++ NETIF_F_RXCSUM | ++ NETIF_F_HW_CSUM; ++ ++ netdev->gso_partial_features = IXGBE_GSO_PARTIAL_FEATURES; ++ netdev->features |= NETIF_F_GSO_PARTIAL | ++ IXGBE_GSO_PARTIAL_FEATURES; ++ ++ if (hw->mac.type >= ixgbe_mac_82599EB) ++ netdev->features |= NETIF_F_SCTP_CRC; ++ ++ /* copy netdev features into list of user selectable features */ ++ netdev->hw_features |= netdev->features | ++ NETIF_F_HW_VLAN_CTAG_FILTER | ++ NETIF_F_HW_VLAN_CTAG_RX | ++ NETIF_F_HW_VLAN_CTAG_TX | ++ NETIF_F_RXALL | ++ NETIF_F_HW_L2FW_DOFFLOAD; ++ ++ if (hw->mac.type >= ixgbe_mac_82599EB) ++ netdev->hw_features |= NETIF_F_NTUPLE | ++ NETIF_F_HW_TC; ++ ++ if (pci_using_dac) ++ netdev->features |= NETIF_F_HIGHDMA; ++ ++ netdev->vlan_features |= netdev->features | NETIF_F_TSO_MANGLEID; ++ netdev->hw_enc_features |= netdev->vlan_features; ++ netdev->mpls_features |= NETIF_F_HW_CSUM; ++ ++ /* set this bit last since it cannot be part of vlan_features */ ++ netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER | ++ NETIF_F_HW_VLAN_CTAG_RX | ++ NETIF_F_HW_VLAN_CTAG_TX; ++ ++ netdev->priv_flags |= IFF_UNICAST_FLT; ++ netdev->priv_flags |= IFF_SUPP_NOFCS; ++ ++ /* give us the option of enabling RSC/LRO later */ ++ if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) ++ netdev->hw_features |= NETIF_F_LRO; + +- netdev->hw_features = netdev->features | NETIF_F_HW_L2FW_DOFFLOAD; ++#else /* NETIF_F_GSO_PARTIAL */ ++ netdev->features |= NETIF_F_SG | ++ NETIF_F_IP_CSUM; ++ ++#ifdef NETIF_F_IPV6_CSUM ++ netdev->features |= NETIF_F_IPV6_CSUM; ++#endif ++ ++#ifdef NETIF_F_HW_VLAN_CTAG_TX ++ netdev->features |= NETIF_F_HW_VLAN_CTAG_TX | ++ NETIF_F_HW_VLAN_CTAG_FILTER | ++ NETIF_F_HW_VLAN_CTAG_RX; ++#endif ++ ++#ifdef NETIF_F_HW_VLAN_TX ++ netdev->features |= NETIF_F_HW_VLAN_TX | ++ NETIF_F_HW_VLAN_FILTER | ++ NETIF_F_HW_VLAN_RX; ++#endif ++ netdev->features |= ixgbe_tso_features(); ++#ifdef NETIF_F_RXHASH ++ netdev->features |= NETIF_F_RXHASH; ++#endif /* NETIF_F_RXHASH */ ++ netdev->features |= NETIF_F_RXCSUM; ++ ++#ifdef HAVE_NDO_SET_FEATURES ++ /* copy netdev features into list of user selectable features */ ++#ifndef HAVE_RHEL6_NET_DEVICE_OPS_EXT ++ hw_features = netdev->hw_features; ++#else ++ hw_features = get_netdev_hw_features(netdev); ++#endif ++ hw_features |= netdev->features; ++ ++ /* give us the option of enabling RSC/LRO later */ ++ if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) ++ hw_features |= NETIF_F_LRO; ++ ++#else ++#ifdef NETIF_F_GRO ++ ++ /* this is only needed on kernels prior to 2.6.39 */ ++ netdev->features |= NETIF_F_GRO; ++#endif /* NETIF_F_GRO */ ++#endif /* HAVE_NDO_SET_FEATURES */ + + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: + case ixgbe_mac_X540: ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: + netdev->features |= NETIF_F_SCTP_CSUM; +- netdev->hw_features |= NETIF_F_SCTP_CSUM | ++#ifdef HAVE_NDO_SET_FEATURES ++ hw_features |= NETIF_F_SCTP_CSUM | + NETIF_F_NTUPLE; ++#endif + break; + default: + break; + } ++#ifdef HAVE_NDO_SET_FEATURES ++#ifdef HAVE_RHEL6_NET_DEVICE_OPS_EXT ++ set_netdev_hw_features(netdev, hw_features); ++#else ++ netdev->hw_features = hw_features; ++#endif ++#endif + +- netdev->hw_features |= NETIF_F_RXALL; +- +- netdev->vlan_features |= NETIF_F_TSO; +- netdev->vlan_features |= NETIF_F_TSO6; +- netdev->vlan_features |= NETIF_F_IP_CSUM; +- netdev->vlan_features |= NETIF_F_IPV6_CSUM; +- netdev->vlan_features |= NETIF_F_SG; +- ++#ifdef HAVE_NETDEV_VLAN_FEATURES ++ netdev->vlan_features |= NETIF_F_SG | ++ NETIF_F_IP_CSUM | ++ NETIF_F_IPV6_CSUM | ++ NETIF_F_TSO | ++ NETIF_F_TSO6; ++ ++#endif /* HAVE_NETDEV_VLAN_FEATURES */ ++#ifdef HAVE_ENCAP_CSUM_OFFLOAD ++ netdev->hw_enc_features |= NETIF_F_SG; ++#endif /* HAVE_ENCAP_CSUM_OFFLOAD */ ++#ifdef HAVE_VXLAN_RX_OFFLOAD ++ if (adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE) { ++ netdev->hw_enc_features |= NETIF_F_IP_CSUM | ++ NETIF_F_IPV6_CSUM; ++ } ++#endif /* NETIF_F_GSO_PARTIAL */ ++ ++#endif /* HAVE_VXLAN_RX_OFFLOAD */ ++ if (netdev->features & NETIF_F_LRO) { ++ if ((adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) && ++ ((adapter->rx_itr_setting == 1) || ++ (adapter->rx_itr_setting > IXGBE_MIN_RSC_ITR))) { ++ adapter->flags2 |= IXGBE_FLAG2_RSC_ENABLED; ++ } else if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) { ++ e_dev_info("InterruptThrottleRate set too high, " ++ "disabling RSC\n"); ++ } ++ } ++#ifdef IFF_UNICAST_FLT + netdev->priv_flags |= IFF_UNICAST_FLT; ++#endif ++#ifdef IFF_SUPP_NOFCS + netdev->priv_flags |= IFF_SUPP_NOFCS; ++#endif ++ ++#ifdef HAVE_NETDEVICE_MIN_MAX_MTU ++ /* MTU range: 68 - 9710 */ ++ netdev->min_mtu = ETH_MIN_MTU; ++ netdev->max_mtu = IXGBE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN); + +-#ifdef CONFIG_IXGBE_DCB +- netdev->dcbnl_ops = &dcbnl_ops; + #endif ++#if IS_ENABLED(CONFIG_DCB) ++ if (adapter->flags & IXGBE_FLAG_DCB_CAPABLE) ++ netdev->dcbnl_ops = &ixgbe_dcbnl_ops; + +-#ifdef IXGBE_FCOE ++#endif /* CONFIG_DCB */ ++#if IS_ENABLED(CONFIG_FCOE) ++#ifdef NETIF_F_FSO + if (adapter->flags & IXGBE_FLAG_FCOE_CAPABLE) { + unsigned int fcoe_l; + +- if (hw->mac.ops.get_device_caps) { +- hw->mac.ops.get_device_caps(hw, &device_caps); +- if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS) +- adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; ++ hw->mac.ops.get_device_caps(hw, &device_caps); ++ if (device_caps & IXGBE_DEVICE_CAPS_FCOE_OFFLOADS) { ++ adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; ++ adapter->flags &= ~IXGBE_FLAG_FCOE_CAPABLE; ++ e_dev_info("FCoE offload feature is not available. " ++ "Disabling FCoE offload feature\n"); ++ } else { ++ netdev->features |= NETIF_F_FSO | ++ NETIF_F_FCOE_CRC; ++#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE ++ ixgbe_fcoe_ddp_enable(adapter); ++ adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; ++ netdev->features |= NETIF_F_FCOE_MTU; ++#endif /* HAVE_NETDEV_OPS_FCOE_ENABLE */ + } + +- + fcoe_l = min_t(int, IXGBE_FCRETA_SIZE, num_online_cpus()); + adapter->ring_feature[RING_F_FCOE].limit = fcoe_l; + +- netdev->features |= NETIF_F_FSO | +- NETIF_F_FCOE_CRC; +- ++#ifdef HAVE_NETDEV_VLAN_FEATURES + netdev->vlan_features |= NETIF_F_FSO | + NETIF_F_FCOE_CRC | + NETIF_F_FCOE_MTU; ++#endif /* HAVE_NETDEV_VLAN_FEATURES */ + } +-#endif /* IXGBE_FCOE */ ++#endif /* NETIF_F_FSO */ ++#endif /* CONFIG_FCOE */ + if (pci_using_dac) { + netdev->features |= NETIF_F_HIGHDMA; ++#ifdef HAVE_NETDEV_VLAN_FEATURES + netdev->vlan_features |= NETIF_F_HIGHDMA; ++#endif /* HAVE_NETDEV_VLAN_FEATURES */ + } + +- if (adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE) +- netdev->hw_features |= NETIF_F_LRO; +- if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) +- netdev->features |= NETIF_F_LRO; +- + /* make sure the EEPROM is good */ +- if (hw->eeprom.ops.validate_checksum(hw, NULL) < 0) { ++ if (hw->eeprom.ops.validate_checksum && ++ (hw->eeprom.ops.validate_checksum(hw, NULL) < 0)) { + e_dev_err("The EEPROM Checksum Is Not Valid\n"); + err = -EIO; + goto err_sw_init; + } + + memcpy(netdev->dev_addr, hw->mac.perm_addr, netdev->addr_len); ++#ifdef ETHTOOL_GPERMADDR ++ memcpy(netdev->perm_addr, hw->mac.perm_addr, netdev->addr_len); ++#endif + + if (!is_valid_ether_addr(netdev->dev_addr)) { + e_dev_err("invalid MAC address\n"); +@@ -8336,12 +11180,14 @@ skip_sriov: + goto err_sw_init; + } + +- ixgbe_mac_set_default_filter(adapter, hw->mac.perm_addr); ++ /* Set hw->mac.addr to permanent MAC address */ ++ ether_addr_copy(hw->mac.addr, hw->mac.perm_addr); ++ ixgbe_mac_set_default_filter(adapter); + + setup_timer(&adapter->service_timer, &ixgbe_service_timer, + (unsigned long) adapter); + +- if (ixgbe_removed(hw->hw_addr)) { ++ if (IXGBE_REMOVED(hw->hw_addr)) { + err = -EIO; + goto err_sw_init; + } +@@ -8356,49 +11202,14 @@ skip_sriov: + /* WOL not supported for all devices */ + adapter->wol = 0; + hw->eeprom.ops.read(hw, 0x2c, &adapter->eeprom_cap); +- hw->wol_enabled = ixgbe_wol_supported(adapter, pdev->device, +- pdev->subsystem_device); +- if (hw->wol_enabled) ++ if (ixgbe_wol_supported(adapter, pdev->device, pdev->subsystem_device)) + adapter->wol = IXGBE_WUFC_MAG; + +- device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); +- +- /* save off EEPROM version number */ +- hw->eeprom.ops.read(hw, 0x2e, &adapter->eeprom_verh); +- hw->eeprom.ops.read(hw, 0x2d, &adapter->eeprom_verl); +- +- /* pick up the PCI bus settings for reporting later */ +- hw->mac.ops.get_bus_info(hw); +- if (ixgbe_pcie_from_parent(hw)) +- ixgbe_get_parent_bus_info(adapter); +- +- /* calculate the expected PCIe bandwidth required for optimal +- * performance. Note that some older parts will never have enough +- * bandwidth due to being older generation PCIe parts. We clamp these +- * parts to ensure no warning is displayed if it can't be fixed. +- */ +- switch (hw->mac.type) { +- case ixgbe_mac_82598EB: +- expected_gts = min(ixgbe_enumerate_functions(adapter) * 10, 16); +- break; +- default: +- expected_gts = ixgbe_enumerate_functions(adapter) * 10; +- break; +- } +- ixgbe_check_minimum_link(adapter, expected_gts); ++ hw->wol_enabled = !!(adapter->wol); + +- err = ixgbe_read_pba_string_generic(hw, part_str, IXGBE_PBANUM_LENGTH); +- if (err) +- strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH); +- if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) +- e_dev_info("MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n", +- hw->mac.type, hw->phy.type, hw->phy.sfp_type, +- part_str); +- else +- e_dev_info("MAC: %d, PHY: %d, PBA No: %s\n", +- hw->mac.type, hw->phy.type, part_str); ++ device_set_wakeup_enable(pci_dev_to_dev(adapter->pdev), adapter->wol); + +- e_dev_info("%pM\n", netdev->dev_addr); ++ ixgbe_set_fw_version(adapter); + + /* reset the hardware with the new settings */ + err = hw->mac.ops.start_hw(hw); +@@ -8410,50 +11221,171 @@ skip_sriov: + "problems please contact your Intel or hardware " + "representative who provided you with this " + "hardware.\n"); ++ } else if (err == IXGBE_ERR_OVERTEMP) { ++ e_crit(drv, "%s\n", ixgbe_overheat_msg); ++ goto err_register; ++ } else if (err) { ++ e_dev_err("HW init failed\n"); ++ goto err_register; + } ++ ++ /* pick up the PCI bus settings for reporting later */ ++ if (ixgbe_pcie_from_parent(hw)) ++ ixgbe_get_parent_bus_info(hw); ++ else ++ if (hw->mac.ops.get_bus_info) ++ hw->mac.ops.get_bus_info(hw); ++ + strcpy(netdev->name, "eth%d"); + err = register_netdev(netdev); + if (err) + goto err_register; + ++ pci_set_drvdata(pdev, adapter); ++ adapter->netdev_registered = true; ++#ifdef HAVE_PCI_ERS ++ /* ++ * call save state here in standalone driver because it relies on ++ * adapter struct to exist, and needs to call netdev_priv ++ */ ++ pci_save_state(pdev); ++ ++#endif ++ + /* power down the optics for 82599 SFP+ fiber */ + if (hw->mac.ops.disable_tx_laser) + hw->mac.ops.disable_tx_laser(hw); + + /* carrier off reporting is important to ethtool even BEFORE open */ + netif_carrier_off(netdev); ++ /* keep stopping all the transmit queues for older kernels */ ++ netif_tx_stop_all_queues(netdev); + +-#ifdef CONFIG_IXGBE_DCA +- if (dca_add_requester(&pdev->dev) == 0) { +- adapter->flags |= IXGBE_FLAG_DCA_ENABLED; +- ixgbe_setup_dca(adapter); ++#if IS_ENABLED(CONFIG_DCA) ++ if (adapter->flags & IXGBE_FLAG_DCA_CAPABLE) { ++ err = dca_add_requester(pci_dev_to_dev(pdev)); ++ switch (err) { ++ case IXGBE_SUCCESS: ++ adapter->flags |= IXGBE_FLAG_DCA_ENABLED; ++ ixgbe_setup_dca(adapter); ++ break; ++ /* -19 is returned from the kernel when no provider is found */ ++ case -19: ++ e_info(rx_err, "No DCA provider found. Please " ++ "start ioatdma for DCA functionality.\n"); ++ break; ++ default: ++ e_info(probe, "DCA registration failed: %d\n", err); ++ break; ++ } ++ } ++#endif ++ ++ /* print all messages at the end so that we use our eth%d name */ ++ ++ /* calculate the expected PCIe bandwidth required for optimal ++ * performance. Note that some older parts will never have enough ++ * bandwidth due to being older generation PCIe parts. We clamp these ++ * parts to ensure that no warning is displayed, as this could confuse ++ * users otherwise. */ ++ switch(hw->mac.type) { ++ case ixgbe_mac_82598EB: ++ expected_gts = min(ixgbe_enumerate_functions(adapter) * 10, 16); ++ break; ++ default: ++ expected_gts = ixgbe_enumerate_functions(adapter) * 10; ++ break; + } ++ ++ /* don't check link if we failed to enumerate functions */ ++ if (expected_gts > 0) ++ ixgbe_check_minimum_link(adapter, expected_gts); ++ ++ /* First try to read PBA as a string */ ++ err = ixgbe_read_pba_string(hw, part_str, IXGBE_PBANUM_LENGTH); ++ if (err) ++ strncpy(part_str, "Unknown", IXGBE_PBANUM_LENGTH); ++ if (ixgbe_is_sfp(hw) && hw->phy.sfp_type != ixgbe_sfp_type_not_present) ++ e_info(probe, "MAC: %d, PHY: %d, SFP+: %d, PBA No: %s\n", ++ hw->mac.type, hw->phy.type, hw->phy.sfp_type, part_str); ++ else ++ e_info(probe, "MAC: %d, PHY: %d, PBA No: %s\n", ++ hw->mac.type, hw->phy.type, part_str); ++ ++ e_dev_info("%02x:%02x:%02x:%02x:%02x:%02x\n", ++ netdev->dev_addr[0], netdev->dev_addr[1], ++ netdev->dev_addr[2], netdev->dev_addr[3], ++ netdev->dev_addr[4], netdev->dev_addr[5]); ++ ++#define INFO_STRING_LEN 255 ++ info_string = kzalloc(INFO_STRING_LEN, GFP_KERNEL); ++ if (!info_string) { ++ e_err(probe, "allocation for info string failed\n"); ++ goto no_info_string; ++ } ++ i_s_var = info_string; ++ i_s_var += sprintf(info_string, "Enabled Features: "); ++ i_s_var += sprintf(i_s_var, "RxQ: %d TxQ: %d ", ++ adapter->num_rx_queues, adapter->num_tx_queues); ++#if IS_ENABLED(CONFIG_FCOE) ++ if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) ++ i_s_var += sprintf(i_s_var, "FCoE "); + #endif ++ if (adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) ++ i_s_var += sprintf(i_s_var, "FdirHash "); ++ if (adapter->flags & IXGBE_FLAG_DCB_ENABLED) ++ i_s_var += sprintf(i_s_var, "DCB "); ++ if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) ++ i_s_var += sprintf(i_s_var, "DCA "); ++ if (adapter->flags2 & IXGBE_FLAG2_RSC_ENABLED) ++ i_s_var += sprintf(i_s_var, "RSC "); ++ if (adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_ENABLE) ++ i_s_var += sprintf(i_s_var, "vxlan_rx "); ++ ++ BUG_ON(i_s_var > (info_string + INFO_STRING_LEN)); ++ /* end features printing */ ++ e_info(probe, "%s\n", info_string); ++ kfree(info_string); ++no_info_string: ++#ifdef CONFIG_PCI_IOV + if (adapter->flags & IXGBE_FLAG_SRIOV_ENABLED) { +- e_info(probe, "IOV is enabled with %d VFs\n", adapter->num_vfs); ++ int i; + for (i = 0; i < adapter->num_vfs; i++) + ixgbe_vf_configuration(pdev, (i | 0x10000000)); + } ++#endif + +- /* firmware requires driver version to be 0xFFFFFFFF +- * since os does not support feature +- */ ++ /* Initialize the LED link active for LED blink support */ ++ if (hw->mac.ops.init_led_link_act) ++ hw->mac.ops.init_led_link_act(hw); ++ ++ /* firmware requires blank numerical version */ + if (hw->mac.ops.set_fw_drv_ver) +- hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, +- 0xFF); ++ hw->mac.ops.set_fw_drv_ver(hw, 0xFF, 0xFF, 0xFF, 0xFF, ++ sizeof(ixgbe_driver_version) - 1, ++ ixgbe_driver_version); + ++#if defined(HAVE_NETDEV_STORAGE_ADDRESS) && defined(NETDEV_HW_ADDR_T_SAN) + /* add san mac addr to netdev */ + ixgbe_add_sanmac_netdev(netdev); + +- e_dev_info("%s\n", ixgbe_default_device_descr); ++#endif /* (HAVE_NETDEV_STORAGE_ADDRESS) && (NETDEV_HW_ADDR_T_SAN) */ ++ e_info(probe, "Intel(R) 10 Gigabit Network Connection\n"); + cards_found++; + +-#ifdef CONFIG_IXGBE_HWMON ++#ifdef IXGBE_SYSFS + if (ixgbe_sysfs_init(adapter)) + e_err(probe, "failed to allocate sysfs resources\n"); +-#endif /* CONFIG_IXGBE_HWMON */ ++#else ++#ifdef IXGBE_PROCFS ++ if (ixgbe_procfs_init(adapter)) ++ e_err(probe, "failed to allocate procfs resources\n"); ++#endif /* IXGBE_PROCFS */ ++#endif /* IXGBE_SYSFS */ ++#ifdef HAVE_IXGBE_DEBUG_FS + + ixgbe_dbg_adapter_init(adapter); ++#endif /* HAVE_IXGBE_DEBUG_FS */ + + /* setup link for SFP devices with MNG FW, else wait for IXGBE_UP */ + if (ixgbe_mng_enabled(hw) && ixgbe_is_sfp(hw) && hw->mac.ops.setup_link) +@@ -8461,22 +11393,31 @@ skip_sriov: + IXGBE_LINK_SPEED_10GB_FULL | IXGBE_LINK_SPEED_1GB_FULL, + true); + ++ if (hw->mac.ops.setup_eee && ++ (adapter->flags2 & IXGBE_FLAG2_EEE_CAPABLE)) { ++ bool eee_enable = !!(adapter->flags2 & IXGBE_FLAG2_EEE_ENABLED); ++ ++ hw->mac.ops.setup_eee(hw, eee_enable); ++ } ++ + return 0; + + err_register: +- ixgbe_release_hw_control(adapter); + ixgbe_clear_interrupt_scheme(adapter); + err_sw_init: ++ ixgbe_release_hw_control(adapter); ++#ifdef CONFIG_PCI_IOV + ixgbe_disable_sriov(adapter); ++#endif /* CONFIG_PCI_IOV */ + adapter->flags2 &= ~IXGBE_FLAG2_SEARCH_FOR_SFP; +- iounmap(adapter->io_addr); + kfree(adapter->mac_table); ++ kfree(adapter->rss_key); ++ iounmap(adapter->io_addr); + err_ioremap: + disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); + free_netdev(netdev); + err_alloc_etherdev: +- pci_release_selected_regions(pdev, +- pci_select_bars(pdev, IORESOURCE_MEM)); ++ pci_release_mem_regions(pdev); + err_pci_reg: + err_dma: + if (!adapter || disable_dev) +@@ -8493,60 +11434,73 @@ err_dma: + * Hot-Plug event, or because the driver is going to be removed from + * memory. + **/ +-static void ixgbe_remove(struct pci_dev *pdev) ++static void __devexit ixgbe_remove(struct pci_dev *pdev) + { + struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); +- struct net_device *netdev = adapter->netdev; ++ struct net_device *netdev; + bool disable_dev; + ++ /* if !adapter then we already cleaned up in probe */ ++ if (!adapter) ++ return; ++ ++ netdev = adapter->netdev; ++#ifdef HAVE_IXGBE_DEBUG_FS + ixgbe_dbg_adapter_exit(adapter); + +- set_bit(__IXGBE_REMOVING, &adapter->state); ++#endif /*HAVE_IXGBE_DEBUG_FS */ ++ set_bit(__IXGBE_REMOVE, &adapter->state); + cancel_work_sync(&adapter->service_task); + +- +-#ifdef CONFIG_IXGBE_DCA ++#if IS_ENABLED(CONFIG_DCA) + if (adapter->flags & IXGBE_FLAG_DCA_ENABLED) { + adapter->flags &= ~IXGBE_FLAG_DCA_ENABLED; +- dca_remove_requester(&pdev->dev); +- IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, 1); ++ dca_remove_requester(pci_dev_to_dev(pdev)); ++ IXGBE_WRITE_REG(&adapter->hw, IXGBE_DCA_CTRL, ++ IXGBE_DCA_CTRL_DCA_DISABLE); + } ++#endif /* CONFIG_DCA */ + +-#endif +-#ifdef CONFIG_IXGBE_HWMON ++#ifdef IXGBE_SYSFS + ixgbe_sysfs_exit(adapter); +-#endif /* CONFIG_IXGBE_HWMON */ ++#else ++#ifdef IXGBE_PROCFS ++ ixgbe_procfs_exit(adapter); ++#endif /* IXGBE_PROCFS */ ++#endif /* IXGBE-SYSFS */ + ++#if defined(HAVE_NETDEV_STORAGE_ADDRESS) && defined(NETDEV_HW_ADDR_T_SAN) + /* remove the added san mac */ + ixgbe_del_sanmac_netdev(netdev); + +- if (netdev->reg_state == NETREG_REGISTERED) +- unregister_netdev(netdev); ++#endif /* (HAVE_NETDEV_STORAGE_ADDRESS) && (NETDEV_HW_ADDR_T_SAN) */ + + #ifdef CONFIG_PCI_IOV +- /* +- * Only disable SR-IOV on unload if the user specified the now +- * deprecated max_vfs module parameter. +- */ +- if (max_vfs) +- ixgbe_disable_sriov(adapter); ++ ixgbe_disable_sriov(adapter); ++#endif /* CONFIG_PCI_IOV */ ++ if (adapter->netdev_registered) { ++ unregister_netdev(netdev); ++ adapter->netdev_registered = false; ++ } ++ ++#if IS_ENABLED(CONFIG_FCOE) ++#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE ++ ixgbe_fcoe_ddp_disable(adapter); + #endif ++#endif /* CONFIG_FCOE */ + ixgbe_clear_interrupt_scheme(adapter); +- + ixgbe_release_hw_control(adapter); + +-#ifdef CONFIG_DCB ++#ifdef HAVE_DCBNL_IEEE + kfree(adapter->ixgbe_ieee_pfc); + kfree(adapter->ixgbe_ieee_ets); + + #endif + iounmap(adapter->io_addr); +- pci_release_selected_regions(pdev, pci_select_bars(pdev, +- IORESOURCE_MEM)); +- +- e_dev_info("complete\n"); ++ pci_release_mem_regions(pdev); + + kfree(adapter->mac_table); ++ kfree(adapter->rss_key); + disable_dev = !test_and_set_bit(__IXGBE_DISABLED, &adapter->state); + free_netdev(netdev); + +@@ -8554,8 +11508,70 @@ static void ixgbe_remove(struct pci_dev *pdev) + + if (disable_dev) + pci_disable_device(pdev); ++ ++} ++ ++static bool ixgbe_check_cfg_remove(struct ixgbe_hw *hw, struct pci_dev *pdev) ++{ ++ u16 value; ++ ++ pci_read_config_word(pdev, PCI_VENDOR_ID, &value); ++ if (value == IXGBE_FAILED_READ_CFG_WORD) { ++ ixgbe_remove_adapter(hw); ++ return true; ++ } ++ return false; ++} ++ ++u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg) ++{ ++ struct ixgbe_adapter *adapter = hw->back; ++ u16 value; ++ ++ if (IXGBE_REMOVED(hw->hw_addr)) ++ return IXGBE_FAILED_READ_CFG_WORD; ++ pci_read_config_word(adapter->pdev, reg, &value); ++ if (value == IXGBE_FAILED_READ_CFG_WORD && ++ ixgbe_check_cfg_remove(hw, adapter->pdev)) ++ return IXGBE_FAILED_READ_CFG_WORD; ++ return value; ++} ++ ++#ifdef HAVE_PCI_ERS ++#ifdef CONFIG_PCI_IOV ++static u32 ixgbe_read_pci_cfg_dword(struct ixgbe_hw *hw, u32 reg) ++{ ++ struct ixgbe_adapter *adapter = hw->back; ++ u32 value; ++ ++ if (IXGBE_REMOVED(hw->hw_addr)) ++ return IXGBE_FAILED_READ_CFG_DWORD; ++ pci_read_config_dword(adapter->pdev, reg, &value); ++ if (value == IXGBE_FAILED_READ_CFG_DWORD && ++ ixgbe_check_cfg_remove(hw, adapter->pdev)) ++ return IXGBE_FAILED_READ_CFG_DWORD; ++ return value; ++} ++#endif /* CONFIG_PCI_IOV */ ++#endif /* HAVE_PCI_ERS */ ++ ++void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value) ++{ ++ struct ixgbe_adapter *adapter = hw->back; ++ ++ if (IXGBE_REMOVED(hw->hw_addr)) ++ return; ++ pci_write_config_word(adapter->pdev, reg, value); ++} ++ ++void ewarn(struct ixgbe_hw *hw, const char *st) ++{ ++ struct ixgbe_adapter *adapter = hw->back; ++ ++ netif_warn(adapter, drv, adapter->netdev, "%s", st); + } + ++#ifdef HAVE_PCI_ERS + /** + * ixgbe_io_error_detected - called when PCI error is detected + * @pdev: Pointer to PCI device +@@ -8596,7 +11612,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, + dw1 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 4); + dw2 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 8); + dw3 = ixgbe_read_pci_cfg_dword(hw, pos + PCI_ERR_HEADER_LOG + 12); +- if (ixgbe_removed(hw->hw_addr)) ++ if (IXGBE_REMOVED(hw->hw_addr)) + goto skip_bad_vf_detection; + + req_id = dw1 >> 16; +@@ -8615,10 +11631,19 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, + dw0, dw1, dw2, dw3); + switch (adapter->hw.mac.type) { + case ixgbe_mac_82599EB: +- device_id = IXGBE_82599_VF_DEVICE_ID; ++ device_id = IXGBE_DEV_ID_82599_VF; + break; + case ixgbe_mac_X540: +- device_id = IXGBE_X540_VF_DEVICE_ID; ++ device_id = IXGBE_DEV_ID_X540_VF; ++ break; ++ case ixgbe_mac_X550: ++ device_id = IXGBE_DEV_ID_X550_VF; ++ break; ++ case ixgbe_mac_X550EM_x: ++ device_id = IXGBE_DEV_ID_X550EM_X_VF; ++ break; ++ case ixgbe_mac_X550EM_a: ++ device_id = IXGBE_DEV_ID_X550EM_A_VF; + break; + default: + device_id = 0; +@@ -8639,8 +11664,7 @@ static pci_ers_result_t ixgbe_io_error_detected(struct pci_dev *pdev, + * VFLR. Just clean up the AER in that case. + */ + if (vfdev) { +- e_dev_err("Issuing VFLR to VF %d\n", vf); +- pci_write_config_dword(vfdev, 0xA8, 0x00008000); ++ ixgbe_issue_vf_flr(adapter, vfdev); + /* Free device reference count */ + pci_dev_put(vfdev); + } +@@ -8672,7 +11696,7 @@ skip_bad_vf_detection: + } + + if (netif_running(netdev)) +- ixgbe_down(adapter); ++ ixgbe_close_suspend(adapter); + + if (!test_and_set_bit(__IXGBE_DISABLED, &adapter->state)) + pci_disable_device(pdev); +@@ -8692,7 +11716,6 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) + { + struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); + pci_ers_result_t result; +- int err; + + if (pci_enable_device_mem(pdev)) { + e_err(probe, "Cannot re-enable PCI device after reset.\n"); +@@ -8703,21 +11726,22 @@ static pci_ers_result_t ixgbe_io_slot_reset(struct pci_dev *pdev) + adapter->hw.hw_addr = adapter->io_addr; + pci_set_master(pdev); + pci_restore_state(pdev); ++ /* ++ * After second error pci->state_saved is false, this ++ * resets it so EEH doesn't break. ++ */ + pci_save_state(pdev); + + pci_wake_from_d3(pdev, false); + +- ixgbe_reset(adapter); ++ set_bit(__IXGBE_RESET_REQUESTED, &adapter->state); ++ ixgbe_service_event_schedule(adapter); ++ + IXGBE_WRITE_REG(&adapter->hw, IXGBE_WUS, ~0); + result = PCI_ERS_RESULT_RECOVERED; + } + +- err = pci_cleanup_aer_uncorrect_error_status(pdev); +- if (err) { +- e_dev_err("pci_cleanup_aer_uncorrect_error_status " +- "failed 0x%0x\n", err); +- /* non-fatal, continue */ +- } ++ pci_cleanup_aer_uncorrect_error_status(pdev); + + return result; + } +@@ -8742,32 +11766,91 @@ static void ixgbe_io_resume(struct pci_dev *pdev) + } + + #endif ++ rtnl_lock(); + if (netif_running(netdev)) +- ixgbe_up(adapter); ++ ixgbe_open(netdev); + + netif_device_attach(netdev); ++ rtnl_unlock(); + } + ++#ifdef HAVE_CONST_STRUCT_PCI_ERROR_HANDLERS + static const struct pci_error_handlers ixgbe_err_handler = { ++#else ++static struct pci_error_handlers ixgbe_err_handler = { ++#endif + .error_detected = ixgbe_io_error_detected, + .slot_reset = ixgbe_io_slot_reset, + .resume = ixgbe_io_resume, + }; ++#endif /* HAVE_PCI_ERS */ ++ ++struct net_device *ixgbe_hw_to_netdev(const struct ixgbe_hw *hw) ++{ ++ return ((struct ixgbe_adapter *)hw->back)->netdev; ++} ++struct ixgbe_msg *ixgbe_hw_to_msg(const struct ixgbe_hw *hw) ++{ ++ struct ixgbe_adapter *adapter = ++ container_of(hw, struct ixgbe_adapter, hw); ++ return (struct ixgbe_msg *)&adapter->msg_enable; ++} ++ ++#ifdef HAVE_RHEL6_SRIOV_CONFIGURE ++static struct pci_driver_rh ixgbe_driver_rh = { ++ .sriov_configure = ixgbe_pci_sriov_configure, ++}; ++#endif ++ ++#ifdef CONFIG_PM ++#ifndef USE_LEGACY_PM_SUPPORT ++static const struct dev_pm_ops ixgbe_pm_ops = { ++ .suspend = ixgbe_suspend, ++ .resume = ixgbe_resume, ++ .freeze = ixgbe_freeze, ++ .thaw = ixgbe_thaw, ++ .poweroff = ixgbe_suspend, ++ .restore = ixgbe_resume, ++}; ++#endif /* USE_LEGACY_PM_SUPPORT */ ++#endif + + static struct pci_driver ixgbe_driver = { + .name = ixgbe_driver_name, + .id_table = ixgbe_pci_tbl, + .probe = ixgbe_probe, +- .remove = ixgbe_remove, ++ .remove = __devexit_p(ixgbe_remove), + #ifdef CONFIG_PM ++#ifndef USE_LEGACY_PM_SUPPORT ++ .driver = { ++ .pm = &ixgbe_pm_ops, ++ }, ++#else + .suspend = ixgbe_suspend, + .resume = ixgbe_resume, ++#endif /* USE_LEGACY_PM_SUPPORT */ + #endif ++#ifndef USE_REBOOT_NOTIFIER + .shutdown = ixgbe_shutdown, ++#endif ++#if defined(HAVE_SRIOV_CONFIGURE) + .sriov_configure = ixgbe_pci_sriov_configure, ++#elif defined(HAVE_RHEL6_SRIOV_CONFIGURE) ++ .rh_reserved = &ixgbe_driver_rh, ++#endif /* HAVE_SRIOV_CONFIGURE */ ++#ifdef HAVE_PCI_ERS + .err_handler = &ixgbe_err_handler ++#endif + }; + ++bool ixgbe_is_ixgbe(struct pci_dev *pcidev) ++{ ++ if (pci_dev_driver(pcidev) != &ixgbe_driver) ++ return false; ++ else ++ return true; ++} ++ + /** + * ixgbe_init_module - Driver Registration Routine + * +@@ -8780,19 +11863,38 @@ static int __init ixgbe_init_module(void) + pr_info("%s - version %s\n", ixgbe_driver_string, ixgbe_driver_version); + pr_info("%s\n", ixgbe_copyright); + ++ ixgbe_wq = create_singlethread_workqueue(ixgbe_driver_name); ++ if (!ixgbe_wq) { ++ pr_err("%s: Failed to create workqueue\n", ixgbe_driver_name); ++ return -ENOMEM; ++ } ++ ++#ifdef IXGBE_PROCFS ++ if (ixgbe_procfs_topdir_init()) ++ pr_info("Procfs failed to initialize topdir\n"); ++#endif ++ ++#ifdef HAVE_IXGBE_DEBUG_FS + ixgbe_dbg_init(); ++#endif /* HAVE_IXGBE_DEBUG_FS */ + + ret = pci_register_driver(&ixgbe_driver); + if (ret) { ++ destroy_workqueue(ixgbe_wq); ++#ifdef HAVE_IXGBE_DEBUG_FS + ixgbe_dbg_exit(); ++#endif /* HAVE_IXGBE_DEBUG_FS */ ++#ifdef IXGBE_PROCFS ++ ixgbe_procfs_topdir_exit(); ++#endif + return ret; +- } ++} ++#if IS_ENABLED(CONFIG_DCA) + +-#ifdef CONFIG_IXGBE_DCA + dca_register_notify(&dca_notifier); + #endif + +- return 0; ++ return ret; + } + + module_init(ixgbe_init_module); +@@ -8805,19 +11907,22 @@ module_init(ixgbe_init_module); + **/ + static void __exit ixgbe_exit_module(void) + { +-#ifdef CONFIG_IXGBE_DCA ++#if IS_ENABLED(CONFIG_DCA) + dca_unregister_notify(&dca_notifier); + #endif + pci_unregister_driver(&ixgbe_driver); +- ++#ifdef IXGBE_PROCFS ++ ixgbe_procfs_topdir_exit(); ++#endif ++ destroy_workqueue(ixgbe_wq); ++#ifdef HAVE_IXGBE_DEBUG_FS + ixgbe_dbg_exit(); +- +- rcu_barrier(); /* Wait for completion of call_rcu()'s */ ++#endif /* HAVE_IXGBE_DEBUG_FS */ + } + +-#ifdef CONFIG_IXGBE_DCA +-static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, +- void *p) ++#if IS_ENABLED(CONFIG_DCA) ++static int ixgbe_notify_dca(struct notifier_block __always_unused *nb, unsigned long event, ++ void __always_unused *p) + { + int ret_val; + +@@ -8826,9 +11931,8 @@ static int ixgbe_notify_dca(struct notifier_block *nb, unsigned long event, + + return ret_val ? NOTIFY_BAD : NOTIFY_DONE; + } +- +-#endif /* CONFIG_IXGBE_DCA */ +- ++#endif + module_exit(ixgbe_exit_module); + + /* ixgbe_main.c */ ++ +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c +index 1918e0a..ab3aa32 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.c +@@ -1,7 +1,7 @@ + /******************************************************************************* + +- Intel 10 Gigabit PCI Express Linux driver +- Copyright(c) 1999 - 2013 Intel Corporation. ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, +@@ -12,10 +12,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +@@ -26,9 +22,7 @@ + + *******************************************************************************/ + +-#include +-#include +-#include "ixgbe.h" ++#include "ixgbe_type.h" + #include "ixgbe_mbx.h" + + /** +@@ -38,13 +32,15 @@ + * @size: Length of buffer + * @mbx_id: id of mailbox to read + * +- * returns SUCCESS if it successfully read message from buffer ++ * returns SUCCESS if it successfuly read message from buffer + **/ + s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) + { + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_ERR_MBX; + ++ DEBUGFUNC("ixgbe_read_mbx"); ++ + /* limit read to size of mailbox */ + if (size > mbx->size) + size = mbx->size; +@@ -67,12 +63,15 @@ s32 ixgbe_read_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) + s32 ixgbe_write_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) + { + struct ixgbe_mbx_info *mbx = &hw->mbx; +- s32 ret_val = 0; ++ s32 ret_val = IXGBE_SUCCESS; + +- if (size > mbx->size) +- ret_val = IXGBE_ERR_MBX; ++ DEBUGFUNC("ixgbe_write_mbx"); + +- else if (mbx->ops.write) ++ if (size > mbx->size) { ++ ret_val = IXGBE_ERR_MBX; ++ ERROR_REPORT2(IXGBE_ERROR_ARGUMENT, ++ "Invalid mailbox message size %d", size); ++ } else if (mbx->ops.write) + ret_val = mbx->ops.write(hw, msg, size, mbx_id); + + return ret_val; +@@ -90,6 +89,8 @@ s32 ixgbe_check_for_msg(struct ixgbe_hw *hw, u16 mbx_id) + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_ERR_MBX; + ++ DEBUGFUNC("ixgbe_check_for_msg"); ++ + if (mbx->ops.check_for_msg) + ret_val = mbx->ops.check_for_msg(hw, mbx_id); + +@@ -108,6 +109,8 @@ s32 ixgbe_check_for_ack(struct ixgbe_hw *hw, u16 mbx_id) + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_ERR_MBX; + ++ DEBUGFUNC("ixgbe_check_for_ack"); ++ + if (mbx->ops.check_for_ack) + ret_val = mbx->ops.check_for_ack(hw, mbx_id); + +@@ -126,6 +129,8 @@ s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id) + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_ERR_MBX; + ++ DEBUGFUNC("ixgbe_check_for_rst"); ++ + if (mbx->ops.check_for_rst) + ret_val = mbx->ops.check_for_rst(hw, mbx_id); + +@@ -139,11 +144,13 @@ s32 ixgbe_check_for_rst(struct ixgbe_hw *hw, u16 mbx_id) + * + * returns SUCCESS if it successfully received a message notification + **/ +-static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id) ++STATIC s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id) + { + struct ixgbe_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + ++ DEBUGFUNC("ixgbe_poll_for_msg"); ++ + if (!countdown || !mbx->ops.check_for_msg) + goto out; + +@@ -151,11 +158,15 @@ static s32 ixgbe_poll_for_msg(struct ixgbe_hw *hw, u16 mbx_id) + countdown--; + if (!countdown) + break; +- udelay(mbx->usec_delay); ++ usec_delay(mbx->usec_delay); + } + ++ if (countdown == 0) ++ ERROR_REPORT2(IXGBE_ERROR_POLLING, ++ "Polling for VF%d mailbox message timedout", mbx_id); ++ + out: +- return countdown ? 0 : IXGBE_ERR_MBX; ++ return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX; + } + + /** +@@ -165,11 +176,13 @@ out: + * + * returns SUCCESS if it successfully received a message acknowledgement + **/ +-static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id) ++STATIC s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id) + { + struct ixgbe_mbx_info *mbx = &hw->mbx; + int countdown = mbx->timeout; + ++ DEBUGFUNC("ixgbe_poll_for_ack"); ++ + if (!countdown || !mbx->ops.check_for_ack) + goto out; + +@@ -177,11 +190,15 @@ static s32 ixgbe_poll_for_ack(struct ixgbe_hw *hw, u16 mbx_id) + countdown--; + if (!countdown) + break; +- udelay(mbx->usec_delay); ++ usec_delay(mbx->usec_delay); + } + ++ if (countdown == 0) ++ ERROR_REPORT2(IXGBE_ERROR_POLLING, ++ "Polling for VF%d mailbox ack timedout", mbx_id); ++ + out: +- return countdown ? 0 : IXGBE_ERR_MBX; ++ return countdown ? IXGBE_SUCCESS : IXGBE_ERR_MBX; + } + + /** +@@ -194,12 +211,13 @@ out: + * returns SUCCESS if it successfully received a message notification and + * copied it into the receive buffer. + **/ +-static s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, +- u16 mbx_id) ++s32 ixgbe_read_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, u16 mbx_id) + { + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_ERR_MBX; + ++ DEBUGFUNC("ixgbe_read_posted_mbx"); ++ + if (!mbx->ops.read) + goto out; + +@@ -222,12 +240,14 @@ out: + * returns SUCCESS if it successfully copied message into the buffer and + * received an ack to that message within delay * timeout period + **/ +-static s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, ++s32 ixgbe_write_posted_mbx(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 mbx_id) + { + struct ixgbe_mbx_info *mbx = &hw->mbx; + s32 ret_val = IXGBE_ERR_MBX; + ++ DEBUGFUNC("ixgbe_write_posted_mbx"); ++ + /* exit if either we can't write or there isn't a defined timeout */ + if (!mbx->ops.write || !mbx->timeout) + goto out; +@@ -242,13 +262,265 @@ out: + return ret_val; + } + +-static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index) ++/** ++ * ixgbe_init_mbx_ops_generic - Initialize MB function pointers ++ * @hw: pointer to the HW structure ++ * ++ * Setups up the mailbox read and write message function pointers ++ **/ ++void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw) ++{ ++ struct ixgbe_mbx_info *mbx = &hw->mbx; ++ ++ mbx->ops.read_posted = ixgbe_read_posted_mbx; ++ mbx->ops.write_posted = ixgbe_write_posted_mbx; ++} ++ ++/** ++ * ixgbe_read_v2p_mailbox - read v2p mailbox ++ * @hw: pointer to the HW structure ++ * ++ * This function is used to read the v2p mailbox without losing the read to ++ * clear status bits. ++ **/ ++STATIC u32 ixgbe_read_v2p_mailbox(struct ixgbe_hw *hw) ++{ ++ u32 v2p_mailbox = IXGBE_READ_REG(hw, IXGBE_VFMAILBOX); ++ ++ v2p_mailbox |= hw->mbx.v2p_mailbox; ++ hw->mbx.v2p_mailbox |= v2p_mailbox & IXGBE_VFMAILBOX_R2C_BITS; ++ ++ return v2p_mailbox; ++} ++ ++/** ++ * ixgbe_check_for_bit_vf - Determine if a status bit was set ++ * @hw: pointer to the HW structure ++ * @mask: bitmask for bits to be tested and cleared ++ * ++ * This function is used to check for the read to clear bits within ++ * the V2P mailbox. ++ **/ ++STATIC s32 ixgbe_check_for_bit_vf(struct ixgbe_hw *hw, u32 mask) ++{ ++ u32 v2p_mailbox = ixgbe_read_v2p_mailbox(hw); ++ s32 ret_val = IXGBE_ERR_MBX; ++ ++ if (v2p_mailbox & mask) ++ ret_val = IXGBE_SUCCESS; ++ ++ hw->mbx.v2p_mailbox &= ~mask; ++ ++ return ret_val; ++} ++ ++/** ++ * ixgbe_check_for_msg_vf - checks to see if the PF has sent mail ++ * @hw: pointer to the HW structure ++ * @mbx_id: id of mailbox to check ++ * ++ * returns SUCCESS if the PF has set the Status bit or else ERR_MBX ++ **/ ++STATIC s32 ixgbe_check_for_msg_vf(struct ixgbe_hw *hw, u16 mbx_id) ++{ ++ s32 ret_val = IXGBE_ERR_MBX; ++ ++ UNREFERENCED_1PARAMETER(mbx_id); ++ DEBUGFUNC("ixgbe_check_for_msg_vf"); ++ ++ if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFSTS)) { ++ ret_val = IXGBE_SUCCESS; ++ hw->mbx.stats.reqs++; ++ } ++ ++ return ret_val; ++} ++ ++/** ++ * ixgbe_check_for_ack_vf - checks to see if the PF has ACK'd ++ * @hw: pointer to the HW structure ++ * @mbx_id: id of mailbox to check ++ * ++ * returns SUCCESS if the PF has set the ACK bit or else ERR_MBX ++ **/ ++STATIC s32 ixgbe_check_for_ack_vf(struct ixgbe_hw *hw, u16 mbx_id) ++{ ++ s32 ret_val = IXGBE_ERR_MBX; ++ ++ UNREFERENCED_1PARAMETER(mbx_id); ++ DEBUGFUNC("ixgbe_check_for_ack_vf"); ++ ++ if (!ixgbe_check_for_bit_vf(hw, IXGBE_VFMAILBOX_PFACK)) { ++ ret_val = IXGBE_SUCCESS; ++ hw->mbx.stats.acks++; ++ } ++ ++ return ret_val; ++} ++ ++/** ++ * ixgbe_check_for_rst_vf - checks to see if the PF has reset ++ * @hw: pointer to the HW structure ++ * @mbx_id: id of mailbox to check ++ * ++ * returns true if the PF has set the reset done bit or else false ++ **/ ++STATIC s32 ixgbe_check_for_rst_vf(struct ixgbe_hw *hw, u16 mbx_id) ++{ ++ s32 ret_val = IXGBE_ERR_MBX; ++ ++ UNREFERENCED_1PARAMETER(mbx_id); ++ DEBUGFUNC("ixgbe_check_for_rst_vf"); ++ ++ if (!ixgbe_check_for_bit_vf(hw, (IXGBE_VFMAILBOX_RSTD | ++ IXGBE_VFMAILBOX_RSTI))) { ++ ret_val = IXGBE_SUCCESS; ++ hw->mbx.stats.rsts++; ++ } ++ ++ return ret_val; ++} ++ ++/** ++ * ixgbe_obtain_mbx_lock_vf - obtain mailbox lock ++ * @hw: pointer to the HW structure ++ * ++ * return SUCCESS if we obtained the mailbox lock ++ **/ ++STATIC s32 ixgbe_obtain_mbx_lock_vf(struct ixgbe_hw *hw) ++{ ++ s32 ret_val = IXGBE_ERR_MBX; ++ ++ DEBUGFUNC("ixgbe_obtain_mbx_lock_vf"); ++ ++ /* Take ownership of the buffer */ ++ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_VFU); ++ ++ /* reserve mailbox for vf use */ ++ if (ixgbe_read_v2p_mailbox(hw) & IXGBE_VFMAILBOX_VFU) ++ ret_val = IXGBE_SUCCESS; ++ ++ return ret_val; ++} ++ ++/** ++ * ixgbe_write_mbx_vf - Write a message to the mailbox ++ * @hw: pointer to the HW structure ++ * @msg: The message buffer ++ * @size: Length of buffer ++ * @mbx_id: id of mailbox to write ++ * ++ * returns SUCCESS if it successfully copied message into the buffer ++ **/ ++STATIC s32 ixgbe_write_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size, ++ u16 mbx_id) ++{ ++ s32 ret_val; ++ u16 i; ++ ++ UNREFERENCED_1PARAMETER(mbx_id); ++ ++ DEBUGFUNC("ixgbe_write_mbx_vf"); ++ ++ /* lock the mailbox to prevent pf/vf race condition */ ++ ret_val = ixgbe_obtain_mbx_lock_vf(hw); ++ if (ret_val) ++ goto out_no_write; ++ ++ /* flush msg and acks as we are overwriting the message buffer */ ++ ixgbe_check_for_msg_vf(hw, 0); ++ ixgbe_check_for_ack_vf(hw, 0); ++ ++ /* copy the caller specified message to the mailbox memory buffer */ ++ for (i = 0; i < size; i++) ++ IXGBE_WRITE_REG_ARRAY(hw, IXGBE_VFMBMEM, i, msg[i]); ++ ++ /* update stats */ ++ hw->mbx.stats.msgs_tx++; ++ ++ /* Drop VFU and interrupt the PF to tell it a message has been sent */ ++ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_REQ); ++ ++out_no_write: ++ return ret_val; ++} ++ ++/** ++ * ixgbe_read_mbx_vf - Reads a message from the inbox intended for vf ++ * @hw: pointer to the HW structure ++ * @msg: The message buffer ++ * @size: Length of buffer ++ * @mbx_id: id of mailbox to read ++ * ++ * returns SUCCESS if it successfuly read message from buffer ++ **/ ++STATIC s32 ixgbe_read_mbx_vf(struct ixgbe_hw *hw, u32 *msg, u16 size, ++ u16 mbx_id) ++{ ++ s32 ret_val = IXGBE_SUCCESS; ++ u16 i; ++ ++ DEBUGFUNC("ixgbe_read_mbx_vf"); ++ UNREFERENCED_1PARAMETER(mbx_id); ++ ++ /* lock the mailbox to prevent pf/vf race condition */ ++ ret_val = ixgbe_obtain_mbx_lock_vf(hw); ++ if (ret_val) ++ goto out_no_read; ++ ++ /* copy the message from the mailbox memory buffer */ ++ for (i = 0; i < size; i++) ++ msg[i] = IXGBE_READ_REG_ARRAY(hw, IXGBE_VFMBMEM, i); ++ ++ /* Acknowledge receipt and release mailbox, then we're done */ ++ IXGBE_WRITE_REG(hw, IXGBE_VFMAILBOX, IXGBE_VFMAILBOX_ACK); ++ ++ /* update stats */ ++ hw->mbx.stats.msgs_rx++; ++ ++out_no_read: ++ return ret_val; ++} ++ ++/** ++ * ixgbe_init_mbx_params_vf - set initial values for vf mailbox ++ * @hw: pointer to the HW structure ++ * ++ * Initializes the hw->mbx struct to correct values for vf mailbox ++ */ ++void ixgbe_init_mbx_params_vf(struct ixgbe_hw *hw) ++{ ++ struct ixgbe_mbx_info *mbx = &hw->mbx; ++ ++ /* start mailbox as timed out and let the reset_hw call set the timeout ++ * value to begin communications */ ++ mbx->timeout = 0; ++ mbx->usec_delay = IXGBE_VF_MBX_INIT_DELAY; ++ ++ mbx->size = IXGBE_VFMAILBOX_SIZE; ++ ++ mbx->ops.read = ixgbe_read_mbx_vf; ++ mbx->ops.write = ixgbe_write_mbx_vf; ++ mbx->ops.read_posted = ixgbe_read_posted_mbx; ++ mbx->ops.write_posted = ixgbe_write_posted_mbx; ++ mbx->ops.check_for_msg = ixgbe_check_for_msg_vf; ++ mbx->ops.check_for_ack = ixgbe_check_for_ack_vf; ++ mbx->ops.check_for_rst = ixgbe_check_for_rst_vf; ++ ++ mbx->stats.msgs_tx = 0; ++ mbx->stats.msgs_rx = 0; ++ mbx->stats.reqs = 0; ++ mbx->stats.acks = 0; ++ mbx->stats.rsts = 0; ++} ++ ++STATIC s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index) + { + u32 mbvficr = IXGBE_READ_REG(hw, IXGBE_MBVFICR(index)); + s32 ret_val = IXGBE_ERR_MBX; + + if (mbvficr & mask) { +- ret_val = 0; ++ ret_val = IXGBE_SUCCESS; + IXGBE_WRITE_REG(hw, IXGBE_MBVFICR(index), mask); + } + +@@ -262,15 +534,17 @@ static s32 ixgbe_check_for_bit_pf(struct ixgbe_hw *hw, u32 mask, s32 index) + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +-static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number) ++STATIC s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number) + { + s32 ret_val = IXGBE_ERR_MBX; + s32 index = IXGBE_MBVFICR_INDEX(vf_number); + u32 vf_bit = vf_number % 16; + ++ DEBUGFUNC("ixgbe_check_for_msg_pf"); ++ + if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFREQ_VF1 << vf_bit, + index)) { +- ret_val = 0; ++ ret_val = IXGBE_SUCCESS; + hw->mbx.stats.reqs++; + } + +@@ -284,15 +558,17 @@ static s32 ixgbe_check_for_msg_pf(struct ixgbe_hw *hw, u16 vf_number) + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +-static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number) ++STATIC s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number) + { + s32 ret_val = IXGBE_ERR_MBX; + s32 index = IXGBE_MBVFICR_INDEX(vf_number); + u32 vf_bit = vf_number % 16; + ++ DEBUGFUNC("ixgbe_check_for_ack_pf"); ++ + if (!ixgbe_check_for_bit_pf(hw, IXGBE_MBVFICR_VFACK_VF1 << vf_bit, + index)) { +- ret_val = 0; ++ ret_val = IXGBE_SUCCESS; + hw->mbx.stats.acks++; + } + +@@ -306,17 +582,22 @@ static s32 ixgbe_check_for_ack_pf(struct ixgbe_hw *hw, u16 vf_number) + * + * returns SUCCESS if the VF has set the Status bit or else ERR_MBX + **/ +-static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number) ++STATIC s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number) + { + u32 reg_offset = (vf_number < 32) ? 0 : 1; + u32 vf_shift = vf_number % 32; + u32 vflre = 0; + s32 ret_val = IXGBE_ERR_MBX; + ++ DEBUGFUNC("ixgbe_check_for_rst_pf"); ++ + switch (hw->mac.type) { + case ixgbe_mac_82599EB: + vflre = IXGBE_READ_REG(hw, IXGBE_VFLRE(reg_offset)); + break; ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: + case ixgbe_mac_X540: + vflre = IXGBE_READ_REG(hw, IXGBE_VFLREC(reg_offset)); + break; +@@ -325,7 +606,7 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number) + } + + if (vflre & (1 << vf_shift)) { +- ret_val = 0; ++ ret_val = IXGBE_SUCCESS; + IXGBE_WRITE_REG(hw, IXGBE_VFLREC(reg_offset), (1 << vf_shift)); + hw->mbx.stats.rsts++; + } +@@ -340,18 +621,24 @@ static s32 ixgbe_check_for_rst_pf(struct ixgbe_hw *hw, u16 vf_number) + * + * return SUCCESS if we obtained the mailbox lock + **/ +-static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number) ++STATIC s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number) + { + s32 ret_val = IXGBE_ERR_MBX; + u32 p2v_mailbox; + ++ DEBUGFUNC("ixgbe_obtain_mbx_lock_pf"); ++ + /* Take ownership of the buffer */ + IXGBE_WRITE_REG(hw, IXGBE_PFMAILBOX(vf_number), IXGBE_PFMAILBOX_PFU); + + /* reserve mailbox for vf use */ + p2v_mailbox = IXGBE_READ_REG(hw, IXGBE_PFMAILBOX(vf_number)); + if (p2v_mailbox & IXGBE_PFMAILBOX_PFU) +- ret_val = 0; ++ ret_val = IXGBE_SUCCESS; ++ else ++ ERROR_REPORT2(IXGBE_ERROR_POLLING, ++ "Failed to obtain mailbox lock for VF%d", vf_number); ++ + + return ret_val; + } +@@ -365,12 +652,14 @@ static s32 ixgbe_obtain_mbx_lock_pf(struct ixgbe_hw *hw, u16 vf_number) + * + * returns SUCCESS if it successfully copied message into the buffer + **/ +-static s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, ++STATIC s32 ixgbe_write_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 vf_number) + { + s32 ret_val; + u16 i; + ++ DEBUGFUNC("ixgbe_write_mbx_pf"); ++ + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number); + if (ret_val) +@@ -406,12 +695,14 @@ out_no_write: + * memory buffer. The presumption is that the caller knows that there was + * a message due to a VF request so no polling for message is needed. + **/ +-static s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, ++STATIC s32 ixgbe_read_mbx_pf(struct ixgbe_hw *hw, u32 *msg, u16 size, + u16 vf_number) + { + s32 ret_val; + u16 i; + ++ DEBUGFUNC("ixgbe_read_mbx_pf"); ++ + /* lock the mailbox to prevent pf/vf race condition */ + ret_val = ixgbe_obtain_mbx_lock_pf(hw, vf_number); + if (ret_val) +@@ -431,7 +722,6 @@ out_no_read: + return ret_val; + } + +-#ifdef CONFIG_PCI_IOV + /** + * ixgbe_init_mbx_params_pf - set initial values for pf mailbox + * @hw: pointer to the HW structure +@@ -443,29 +733,28 @@ void ixgbe_init_mbx_params_pf(struct ixgbe_hw *hw) + struct ixgbe_mbx_info *mbx = &hw->mbx; + + if (hw->mac.type != ixgbe_mac_82599EB && ++ hw->mac.type != ixgbe_mac_X550 && ++ hw->mac.type != ixgbe_mac_X550EM_x && ++ hw->mac.type != ixgbe_mac_X550EM_a && + hw->mac.type != ixgbe_mac_X540) + return; + + mbx->timeout = 0; + mbx->usec_delay = 0; + ++ mbx->size = IXGBE_VFMAILBOX_SIZE; ++ ++ mbx->ops.read = ixgbe_read_mbx_pf; ++ mbx->ops.write = ixgbe_write_mbx_pf; ++ mbx->ops.read_posted = ixgbe_read_posted_mbx; ++ mbx->ops.write_posted = ixgbe_write_posted_mbx; ++ mbx->ops.check_for_msg = ixgbe_check_for_msg_pf; ++ mbx->ops.check_for_ack = ixgbe_check_for_ack_pf; ++ mbx->ops.check_for_rst = ixgbe_check_for_rst_pf; ++ + mbx->stats.msgs_tx = 0; + mbx->stats.msgs_rx = 0; + mbx->stats.reqs = 0; + mbx->stats.acks = 0; + mbx->stats.rsts = 0; +- +- mbx->size = IXGBE_VFMAILBOX_SIZE; + } +-#endif /* CONFIG_PCI_IOV */ +- +-struct ixgbe_mbx_operations mbx_ops_generic = { +- .read = ixgbe_read_mbx_pf, +- .write = ixgbe_write_mbx_pf, +- .read_posted = ixgbe_read_posted_mbx, +- .write_posted = ixgbe_write_posted_mbx, +- .check_for_msg = ixgbe_check_for_msg_pf, +- .check_for_ack = ixgbe_check_for_ack_pf, +- .check_for_rst = ixgbe_check_for_rst_pf, +-}; +- +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h +index a5cb755..b990c32 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_mbx.h +@@ -1,7 +1,7 @@ + /******************************************************************************* + +- Intel 10 Gigabit PCI Express Linux driver +- Copyright(c) 1999 - 2013 Intel Corporation. ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, +@@ -12,10 +12,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +@@ -31,57 +27,69 @@ + + #include "ixgbe_type.h" + +-#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ +-#define IXGBE_ERR_MBX -100 +- +-#define IXGBE_VFMAILBOX 0x002FC +-#define IXGBE_VFMBMEM 0x00200 +- +-#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */ +-#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ +-#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ +-#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ +-#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ +- +-#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */ +-#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ +-#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */ +-#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ +- ++#define IXGBE_VFMAILBOX_SIZE 16 /* 16 32 bit words - 64 bytes */ ++#define IXGBE_ERR_MBX -100 ++ ++#define IXGBE_VFMAILBOX 0x002FC ++#define IXGBE_VFMBMEM 0x00200 ++ ++/* Define mailbox register bits */ ++#define IXGBE_VFMAILBOX_REQ 0x00000001 /* Request for PF Ready bit */ ++#define IXGBE_VFMAILBOX_ACK 0x00000002 /* Ack PF message received */ ++#define IXGBE_VFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ ++#define IXGBE_VFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ ++#define IXGBE_VFMAILBOX_PFSTS 0x00000010 /* PF wrote a message in the MB */ ++#define IXGBE_VFMAILBOX_PFACK 0x00000020 /* PF ack the previous VF msg */ ++#define IXGBE_VFMAILBOX_RSTI 0x00000040 /* PF has reset indication */ ++#define IXGBE_VFMAILBOX_RSTD 0x00000080 /* PF has indicated reset done */ ++#define IXGBE_VFMAILBOX_R2C_BITS 0x000000B0 /* All read to clear bits */ ++ ++#define IXGBE_PFMAILBOX_STS 0x00000001 /* Initiate message send to VF */ ++#define IXGBE_PFMAILBOX_ACK 0x00000002 /* Ack message recv'd from VF */ ++#define IXGBE_PFMAILBOX_VFU 0x00000004 /* VF owns the mailbox buffer */ ++#define IXGBE_PFMAILBOX_PFU 0x00000008 /* PF owns the mailbox buffer */ ++#define IXGBE_PFMAILBOX_RVFU 0x00000010 /* Reset VFU - used when VF stuck */ ++ ++#define IXGBE_MBVFICR_VFREQ_MASK 0x0000FFFF /* bits for VF messages */ ++#define IXGBE_MBVFICR_VFREQ_VF1 0x00000001 /* bit for VF 1 message */ ++#define IXGBE_MBVFICR_VFACK_MASK 0xFFFF0000 /* bits for VF acks */ ++#define IXGBE_MBVFICR_VFACK_VF1 0x00010000 /* bit for VF 1 ack */ + + /* If it's a IXGBE_VF_* msg then it originates in the VF and is sent to the + * PF. The reverse is true if it is IXGBE_PF_*. + * Message ACK's are the value or'd with 0xF0000000 + */ +-#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with +- * this are the ACK */ +-#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with +- * this are the NACK */ +-#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still +- clear to send requests */ +-#define IXGBE_VT_MSGINFO_SHIFT 16 +-/* bits 23:16 are used for exra info for certain messages */ +-#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT) ++#define IXGBE_VT_MSGTYPE_ACK 0x80000000 /* Messages below or'd with ++ * this are the ACK */ ++#define IXGBE_VT_MSGTYPE_NACK 0x40000000 /* Messages below or'd with ++ * this are the NACK */ ++#define IXGBE_VT_MSGTYPE_CTS 0x20000000 /* Indicates that VF is still ++ * clear to send requests */ ++#define IXGBE_VT_MSGINFO_SHIFT 16 ++/* bits 23:16 are used for extra info for certain messages */ ++#define IXGBE_VT_MSGINFO_MASK (0xFF << IXGBE_VT_MSGINFO_SHIFT) + + /* definitions to support mailbox API version negotiation */ + + /* +- * Each element denotes a version of the API; existing numbers may not ++ * each element denotes a version of the API; existing numbers may not + * change; any additions must go at the end + */ + enum ixgbe_pfvf_api_rev { + ixgbe_mbox_api_10, /* API version 1.0, linux/freebsd VF driver */ + ixgbe_mbox_api_20, /* API version 2.0, solaris Phase1 VF driver */ + ixgbe_mbox_api_11, /* API version 1.1, linux/freebsd VF driver */ ++ ixgbe_mbox_api_12, /* API version 1.2, linux/freebsd VF driver */ ++ ixgbe_mbox_api_13, /* API version 1.3, linux/freebsd VF driver */ + /* This value should always be last */ + ixgbe_mbox_api_unknown, /* indicates that API version is not known */ + }; + + /* mailbox API, legacy requests */ +-#define IXGBE_VF_RESET 0x01 /* VF requests reset */ +-#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ +-#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ +-#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ ++#define IXGBE_VF_RESET 0x01 /* VF requests reset */ ++#define IXGBE_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ ++#define IXGBE_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ ++#define IXGBE_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ + + /* mailbox API, version 1.0 VF requests */ + #define IXGBE_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ +@@ -91,6 +99,19 @@ enum ixgbe_pfvf_api_rev { + /* mailbox API, version 1.1 VF requests */ + #define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */ + ++/* mailbox API, version 1.2 VF requests */ ++#define IXGBE_VF_GET_RETA 0x0a /* VF request for RETA */ ++#define IXGBE_VF_GET_RSS_KEY 0x0b /* get RSS key */ ++#define IXGBE_VF_UPDATE_XCAST_MODE 0x0c ++ ++/* mode choices for IXGBE_VF_UPDATE_XCAST_MODE */ ++enum ixgbevf_xcast_modes { ++ IXGBEVF_XCAST_MODE_NONE = 0, ++ IXGBEVF_XCAST_MODE_MULTI, ++ IXGBEVF_XCAST_MODE_ALLMULTI, ++ IXGBEVF_XCAST_MODE_PROMISC, ++}; ++ + /* GET_QUEUES return data indices within the mailbox */ + #define IXGBE_VF_TX_QUEUES 1 /* number of Tx queues supported */ + #define IXGBE_VF_RX_QUEUES 2 /* number of Rx queues supported */ +@@ -98,24 +119,37 @@ enum ixgbe_pfvf_api_rev { + #define IXGBE_VF_DEF_QUEUE 4 /* Default queue offset */ + + /* length of permanent address message returned from PF */ +-#define IXGBE_VF_PERMADDR_MSG_LEN 4 ++#define IXGBE_VF_PERMADDR_MSG_LEN 4 + /* word in permanent address message with the current multicast type */ +-#define IXGBE_VF_MC_TYPE_WORD 3 ++#define IXGBE_VF_MC_TYPE_WORD 3 ++ ++#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */ + +-#define IXGBE_PF_CONTROL_MSG 0x0100 /* PF control message */ ++/* mailbox API, version 2.0 VF requests */ ++#define IXGBE_VF_API_NEGOTIATE 0x08 /* negotiate API version */ ++#define IXGBE_VF_GET_QUEUES 0x09 /* get queue configuration */ ++#define IXGBE_VF_ENABLE_MACADDR 0x0A /* enable MAC address */ ++#define IXGBE_VF_DISABLE_MACADDR 0x0B /* disable MAC address */ ++#define IXGBE_VF_GET_MACADDRS 0x0C /* get all configured MAC addrs */ ++#define IXGBE_VF_SET_MCAST_PROMISC 0x0D /* enable multicast promiscuous */ ++#define IXGBE_VF_GET_MTU 0x0E /* get bounds on MTU */ ++#define IXGBE_VF_SET_MTU 0x0F /* set a specific MTU */ + +-#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ +-#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ ++/* mailbox API, version 2.0 PF requests */ ++#define IXGBE_PF_TRANSPARENT_VLAN 0x0101 /* enable transparent vlan */ ++ ++#define IXGBE_VF_MBX_INIT_TIMEOUT 2000 /* number of retries on mailbox */ ++#define IXGBE_VF_MBX_INIT_DELAY 500 /* microseconds between retries */ + + s32 ixgbe_read_mbx(struct ixgbe_hw *, u32 *, u16, u16); + s32 ixgbe_write_mbx(struct ixgbe_hw *, u32 *, u16, u16); ++s32 ixgbe_read_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16); ++s32 ixgbe_write_posted_mbx(struct ixgbe_hw *, u32 *, u16, u16); + s32 ixgbe_check_for_msg(struct ixgbe_hw *, u16); + s32 ixgbe_check_for_ack(struct ixgbe_hw *, u16); + s32 ixgbe_check_for_rst(struct ixgbe_hw *, u16); +-#ifdef CONFIG_PCI_IOV ++void ixgbe_init_mbx_ops_generic(struct ixgbe_hw *hw); ++void ixgbe_init_mbx_params_vf(struct ixgbe_hw *); + void ixgbe_init_mbx_params_pf(struct ixgbe_hw *); +-#endif /* CONFIG_PCI_IOV */ +- +-extern struct ixgbe_mbx_operations mbx_ops_generic; + + #endif /* _IXGBE_MBX_H_ */ +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_osdep.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_osdep.h +new file mode 100644 +index 0000000..2e40048 +--- /dev/null ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_osdep.h +@@ -0,0 +1,200 @@ ++/******************************************************************************* ++ ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++ ++/* glue for the OS independent part of ixgbe ++ * includes register access macros ++ */ ++ ++#ifndef _IXGBE_OSDEP_H_ ++#define _IXGBE_OSDEP_H_ ++ ++#include ++#include ++#include ++#include ++#include ++#include "kcompat.h" ++ ++#define IXGBE_CPU_TO_BE16(_x) cpu_to_be16(_x) ++#define IXGBE_BE16_TO_CPU(_x) be16_to_cpu(_x) ++#define IXGBE_CPU_TO_BE32(_x) cpu_to_be32(_x) ++#define IXGBE_BE32_TO_CPU(_x) be32_to_cpu(_x) ++ ++#define msec_delay(_x) msleep(_x) ++ ++#define usec_delay(_x) udelay(_x) ++ ++#define STATIC static ++ ++#define IOMEM __iomem ++ ++#ifdef DBG ++#define ASSERT(_x) BUG_ON(!(_x)) ++#define DEBUGOUT(S) printk(KERN_DEBUG S) ++#define DEBUGOUT1(S, A...) printk(KERN_DEBUG S, ## A) ++#define DEBUGOUT2(S, A...) printk(KERN_DEBUG S, ## A) ++#define DEBUGOUT3(S, A...) printk(KERN_DEBUG S, ## A) ++#define DEBUGOUT4(S, A...) printk(KERN_DEBUG S, ## A) ++#define DEBUGOUT5(S, A...) printk(KERN_DEBUG S, ## A) ++#define DEBUGOUT6(S, A...) printk(KERN_DEBUG S, ## A) ++#else ++#define ASSERT(_x) do {} while (0) ++#define DEBUGOUT(S) do {} while (0) ++#define DEBUGOUT1(S, A...) do {} while (0) ++#define DEBUGOUT2(S, A...) do {} while (0) ++#define DEBUGOUT3(S, A...) do {} while (0) ++#define DEBUGOUT4(S, A...) do {} while (0) ++#define DEBUGOUT5(S, A...) do {} while (0) ++#define DEBUGOUT6(S, A...) do {} while (0) ++#endif ++ ++#define DEBUGFUNC(S) do {} while (0) ++ ++#define IXGBE_SFP_DETECT_RETRIES 2 ++ ++struct ixgbe_hw; ++struct ixgbe_msg { ++ u16 msg_enable; ++}; ++struct net_device *ixgbe_hw_to_netdev(const struct ixgbe_hw *hw); ++struct ixgbe_msg *ixgbe_hw_to_msg(const struct ixgbe_hw *hw); ++ ++#define hw_dbg(hw, format, arg...) \ ++ netdev_dbg(ixgbe_hw_to_netdev(hw), format, ## arg) ++#define hw_err(hw, format, arg...) \ ++ netdev_err(ixgbe_hw_to_netdev(hw), format, ## arg) ++#define e_dev_info(format, arg...) \ ++ dev_info(pci_dev_to_dev(adapter->pdev), format, ## arg) ++#define e_dev_warn(format, arg...) \ ++ dev_warn(pci_dev_to_dev(adapter->pdev), format, ## arg) ++#define e_dev_err(format, arg...) \ ++ dev_err(pci_dev_to_dev(adapter->pdev), format, ## arg) ++#define e_dev_notice(format, arg...) \ ++ dev_notice(pci_dev_to_dev(adapter->pdev), format, ## arg) ++#define e_dbg(msglvl, format, arg...) \ ++ netif_dbg(adapter, msglvl, adapter->netdev, format, ## arg) ++#define e_info(msglvl, format, arg...) \ ++ netif_info(adapter, msglvl, adapter->netdev, format, ## arg) ++#define e_err(msglvl, format, arg...) \ ++ netif_err(adapter, msglvl, adapter->netdev, format, ## arg) ++#define e_warn(msglvl, format, arg...) \ ++ netif_warn(adapter, msglvl, adapter->netdev, format, ## arg) ++#define e_crit(msglvl, format, arg...) \ ++ netif_crit(adapter, msglvl, adapter->netdev, format, ## arg) ++ ++#define IXGBE_DEAD_READ_RETRIES 10 ++#define IXGBE_DEAD_READ_REG 0xdeadbeefU ++#define IXGBE_FAILED_READ_REG 0xffffffffU ++#define IXGBE_FAILED_READ_CFG_DWORD 0xffffffffU ++#define IXGBE_FAILED_READ_CFG_WORD 0xffffU ++#define IXGBE_FAILED_READ_CFG_BYTE 0xffU ++ ++#define IXGBE_WRITE_REG_ARRAY(a, reg, offset, value) \ ++ IXGBE_WRITE_REG((a), (reg) + ((offset) << 2), (value)) ++ ++#define IXGBE_READ_REG(h, r) ixgbe_read_reg(h, r, false) ++#define IXGBE_R32_Q(h, r) ixgbe_read_reg(h, r, true) ++ ++#define IXGBE_READ_REG_ARRAY(a, reg, offset) ( \ ++ IXGBE_READ_REG((a), (reg) + ((offset) << 2))) ++ ++#ifndef writeq ++#define writeq(val, addr) do { writel((u32) (val), addr); \ ++ writel((u32) (val >> 32), (addr + 4)); \ ++ } while (0); ++#endif ++ ++#define IXGBE_WRITE_FLUSH(a) IXGBE_READ_REG(a, IXGBE_STATUS) ++ ++u32 ixgbe_read_reg(struct ixgbe_hw *, u32 reg, bool quiet); ++extern u16 ixgbe_read_pci_cfg_word(struct ixgbe_hw *hw, u32 reg); ++extern void ixgbe_write_pci_cfg_word(struct ixgbe_hw *hw, u32 reg, u16 value); ++extern void ewarn(struct ixgbe_hw *hw, const char *str); ++ ++#define IXGBE_READ_PCIE_WORD ixgbe_read_pci_cfg_word ++#define IXGBE_WRITE_PCIE_WORD ixgbe_write_pci_cfg_word ++#define IXGBE_EEPROM_GRANT_ATTEMPS 100 ++#define IXGBE_HTONL(_i) htonl(_i) ++#define IXGBE_NTOHL(_i) ntohl(_i) ++#define IXGBE_NTOHS(_i) ntohs(_i) ++#define IXGBE_CPU_TO_LE32(_i) cpu_to_le32(_i) ++#define IXGBE_CPU_TO_LE16(_i) cpu_to_le16(_i) ++#define IXGBE_LE32_TO_CPU(_i) le32_to_cpu(_i) ++#define IXGBE_LE32_TO_CPUS(_i) le32_to_cpus(_i) ++#define EWARN(H, W) ewarn(H, W) ++ ++enum { ++ IXGBE_ERROR_SOFTWARE, ++ IXGBE_ERROR_POLLING, ++ IXGBE_ERROR_INVALID_STATE, ++ IXGBE_ERROR_UNSUPPORTED, ++ IXGBE_ERROR_ARGUMENT, ++ IXGBE_ERROR_CAUTION, ++}; ++ ++#define ERROR_REPORT(level, format, arg...) do { \ ++ switch (level) { \ ++ case IXGBE_ERROR_SOFTWARE: \ ++ case IXGBE_ERROR_CAUTION: \ ++ case IXGBE_ERROR_POLLING: \ ++ netif_warn(ixgbe_hw_to_msg(hw), drv, ixgbe_hw_to_netdev(hw), \ ++ format, ## arg); \ ++ break; \ ++ case IXGBE_ERROR_INVALID_STATE: \ ++ case IXGBE_ERROR_UNSUPPORTED: \ ++ case IXGBE_ERROR_ARGUMENT: \ ++ netif_err(ixgbe_hw_to_msg(hw), hw, ixgbe_hw_to_netdev(hw), \ ++ format, ## arg); \ ++ break; \ ++ default: \ ++ break; \ ++ } \ ++} while (0) ++ ++#define ERROR_REPORT1 ERROR_REPORT ++#define ERROR_REPORT2 ERROR_REPORT ++#define ERROR_REPORT3 ERROR_REPORT ++ ++#define UNREFERENCED_XPARAMETER ++#define UNREFERENCED_1PARAMETER(_p) do { \ ++ uninitialized_var(_p); \ ++} while (0) ++#define UNREFERENCED_2PARAMETER(_p, _q) do { \ ++ uninitialized_var(_p); \ ++ uninitialized_var(_q); \ ++} while (0) ++#define UNREFERENCED_3PARAMETER(_p, _q, _r) do { \ ++ uninitialized_var(_p); \ ++ uninitialized_var(_q); \ ++ uninitialized_var(_r); \ ++} while (0) ++#define UNREFERENCED_4PARAMETER(_p, _q, _r, _s) do { \ ++ uninitialized_var(_p); \ ++ uninitialized_var(_q); \ ++ uninitialized_var(_r); \ ++ uninitialized_var(_s); \ ++} while (0) ++ ++#endif /* _IXGBE_OSDEP_H_ */ +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_osdep2.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_osdep2.h +new file mode 100644 +index 0000000..549b353 +--- /dev/null ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_osdep2.h +@@ -0,0 +1,68 @@ ++/******************************************************************************* ++ ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#ifndef _IXGBE_OSDEP2_H_ ++#define _IXGBE_OSDEP2_H_ ++ ++static inline bool ixgbe_removed(void __iomem *addr) ++{ ++ return unlikely(!addr); ++} ++#define IXGBE_REMOVED(a) ixgbe_removed(a) ++ ++static inline void IXGBE_WRITE_REG(struct ixgbe_hw *hw, u32 reg, u32 value) ++{ ++ u8 __iomem *reg_addr; ++ ++ reg_addr = ACCESS_ONCE(hw->hw_addr); ++ if (IXGBE_REMOVED(reg_addr)) ++ return; ++#ifdef DBG ++ switch (reg) { ++ case IXGBE_EIMS: ++ case IXGBE_EIMC: ++ case IXGBE_EIAM: ++ case IXGBE_EIAC: ++ case IXGBE_EICR: ++ case IXGBE_EICS: ++ printk("%s: Reg - 0x%05X, value - 0x%08X\n", __func__, ++ reg, value); ++ default: ++ break; ++ } ++#endif /* DBG */ ++ writel(value, reg_addr + reg); ++} ++ ++static inline void IXGBE_WRITE_REG64(struct ixgbe_hw *hw, u32 reg, u64 value) ++{ ++ u8 __iomem *reg_addr; ++ ++ reg_addr = ACCESS_ONCE(hw->hw_addr); ++ if (IXGBE_REMOVED(reg_addr)) ++ return; ++ writeq(value, reg_addr + reg); ++} ++ ++#endif /* _IXGBE_OSDEP2_H_ */ +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_param.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_param.c +new file mode 100644 +index 0000000..5efd016 +--- /dev/null ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_param.c +@@ -0,0 +1,1256 @@ ++/******************************************************************************* ++ ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#include ++#include ++ ++#include "ixgbe.h" ++ ++/* This is the only thing that needs to be changed to adjust the ++ * maximum number of ports that the driver can manage. ++ */ ++ ++#define IXGBE_MAX_NIC 32 ++ ++#define OPTION_UNSET -1 ++#define OPTION_DISABLED 0 ++#define OPTION_ENABLED 1 ++ ++#define STRINGIFY(foo) #foo /* magic for getting defines into strings */ ++#define XSTRINGIFY(bar) STRINGIFY(bar) ++ ++/* All parameters are treated the same, as an integer array of values. ++ * This macro just reduces the need to repeat the same declaration code ++ * over and over (plus this helps to avoid typo bugs). ++ */ ++ ++#define IXGBE_PARAM_INIT { [0 ... IXGBE_MAX_NIC] = OPTION_UNSET } ++#ifndef module_param_array ++/* Module Parameters are always initialized to -1, so that the driver ++ * can tell the difference between no user specified value or the ++ * user asking for the default value. ++ * The true default values are loaded in when ixgbe_check_options is called. ++ * ++ * This is a GCC extension to ANSI C. ++ * See the item "Labelled Elements in Initializers" in the section ++ * "Extensions to the C Language Family" of the GCC documentation. ++ */ ++ ++#define IXGBE_PARAM(X, desc) \ ++ static const int __devinitdata X[IXGBE_MAX_NIC+1] = IXGBE_PARAM_INIT; \ ++ MODULE_PARM(X, "1-" __MODULE_STRING(IXGBE_MAX_NIC) "i"); \ ++ MODULE_PARM_DESC(X, desc); ++#else ++#define IXGBE_PARAM(X, desc) \ ++ static int __devinitdata X[IXGBE_MAX_NIC+1] = IXGBE_PARAM_INIT; \ ++ static unsigned int num_##X; \ ++ module_param_array_named(X, X, int, &num_##X, 0); \ ++ MODULE_PARM_DESC(X, desc); ++#endif ++ ++IXGBE_PARAM(EEE, "Energy Efficient Ethernet (EEE) ,0=disabled, 1=enabled )" ++ "default EEE disable"); ++/* IntMode (Interrupt Mode) ++ * ++ * Valid Range: 0-2 ++ * - 0 - Legacy Interrupt ++ * - 1 - MSI Interrupt ++ * - 2 - MSI-X Interrupt(s) ++ * ++ * Default Value: 2 ++ */ ++IXGBE_PARAM(InterruptType, "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), " ++ "default IntMode (deprecated)"); ++IXGBE_PARAM(IntMode, "Change Interrupt Mode (0=Legacy, 1=MSI, 2=MSI-X), " ++ "default 2"); ++#define IXGBE_INT_LEGACY 0 ++#define IXGBE_INT_MSI 1 ++#define IXGBE_INT_MSIX 2 ++ ++/* MQ - Multiple Queue enable/disable ++ * ++ * Valid Range: 0, 1 ++ * - 0 - disables MQ ++ * - 1 - enables MQ ++ * ++ * Default Value: 1 ++ */ ++ ++IXGBE_PARAM(MQ, "Disable or enable Multiple Queues, default 1"); ++ ++#if IS_ENABLED(CONFIG_DCA) ++/* DCA - Direct Cache Access (DCA) Control ++ * ++ * This option allows the device to hint to DCA enabled processors ++ * which CPU should have its cache warmed with the data being ++ * transferred over PCIe. This can increase performance by reducing ++ * cache misses. ixgbe hardware supports DCA for: ++ * tx descriptor writeback ++ * rx descriptor writeback ++ * rx data ++ * rx data header only (in packet split mode) ++ * ++ * enabling option 2 can cause cache thrash in some tests, particularly ++ * if the CPU is completely utilized ++ * ++ * Valid Range: 0 - 2 ++ * - 0 - disables DCA ++ * - 1 - enables DCA ++ * - 2 - enables DCA with rx data included ++ * ++ * Default Value: 2 ++ */ ++ ++#define IXGBE_MAX_DCA 2 ++ ++IXGBE_PARAM(DCA, "Disable or enable Direct Cache Access, 0=disabled, " ++ "1=descriptor only, 2=descriptor and data"); ++#endif /* CONFIG_DCA */ ++ ++/* RSS - Receive-Side Scaling (RSS) Descriptor Queues ++ * ++ * Valid Range: 0-16 ++ * - 0 - enables RSS and sets the Desc. Q's to min(16, num_online_cpus()). ++ * - 1-16 - enables RSS and sets the Desc. Q's to the specified value. ++ * ++ * Default Value: 0 ++ */ ++ ++IXGBE_PARAM(RSS, "Number of Receive-Side Scaling Descriptor Queues, " ++ "default 0=number of cpus"); ++ ++/* VMDQ - Virtual Machine Device Queues (VMDQ) ++ * ++ * Valid Range: 1-16 ++ * - 0/1 Disables VMDQ by allocating only a single queue. ++ * - 2-16 - enables VMDQ and sets the Desc. Q's to the specified value. ++ * ++ * Default Value: 8 ++ */ ++ ++#define IXGBE_DEFAULT_NUM_VMDQ 8 ++ ++IXGBE_PARAM(VMDQ, "Number of Virtual Machine Device Queues: 0/1 = disable (1 queue) " ++ "2-16 enable (default=" XSTRINGIFY(IXGBE_DEFAULT_NUM_VMDQ) ")"); ++ ++#ifdef CONFIG_PCI_IOV ++/* max_vfs - SR I/O Virtualization ++ * ++ * Valid Range: 0-63 ++ * - 0 Disables SR-IOV ++ * - 1-63 - enables SR-IOV and sets the number of VFs enabled ++ * ++ * Default Value: 0 ++ */ ++ ++#define MAX_SRIOV_VFS 63 ++ ++IXGBE_PARAM(max_vfs, "Number of Virtual Functions: 0 = disable (default), " ++ "1-" XSTRINGIFY(MAX_SRIOV_VFS) " = enable " ++ "this many VFs"); ++ ++/* VEPA - Set internal bridge to VEPA mode ++ * ++ * Valid Range: 0-1 ++ * - 0 Set bridge to VEB mode ++ * - 1 Set bridge to VEPA mode ++ * ++ * Default Value: 0 ++ */ ++/* ++ *Note: ++ *===== ++ * This provides ability to ensure VEPA mode on the internal bridge even if ++ * the kernel does not support the netdev bridge setting operations. ++*/ ++IXGBE_PARAM(VEPA, "VEPA Bridge Mode: 0 = VEB (default), 1 = VEPA"); ++#endif ++ ++/* Interrupt Throttle Rate (interrupts/sec) ++ * ++ * Valid Range: 956-488281 (0=off, 1=dynamic) ++ * ++ * Default Value: 1 ++ */ ++#define DEFAULT_ITR 1 ++IXGBE_PARAM(InterruptThrottleRate, "Maximum interrupts per second, per vector, " ++ "(0,1,956-488281), default 1"); ++#define MAX_ITR IXGBE_MAX_INT_RATE ++#define MIN_ITR IXGBE_MIN_INT_RATE ++ ++#ifndef IXGBE_NO_LLI ++ ++/* LLIPort (Low Latency Interrupt TCP Port) ++ * ++ * Valid Range: 0 - 65535 ++ * ++ * Default Value: 0 (disabled) ++ */ ++IXGBE_PARAM(LLIPort, "Low Latency Interrupt TCP Port (0-65535)"); ++ ++#define DEFAULT_LLIPORT 0 ++#define MAX_LLIPORT 0xFFFF ++#define MIN_LLIPORT 0 ++ ++/* LLIPush (Low Latency Interrupt on TCP Push flag) ++ * ++ * Valid Range: 0,1 ++ * ++ * Default Value: 0 (disabled) ++ */ ++IXGBE_PARAM(LLIPush, "Low Latency Interrupt on TCP Push flag (0,1)"); ++ ++#define DEFAULT_LLIPUSH 0 ++#define MAX_LLIPUSH 1 ++#define MIN_LLIPUSH 0 ++ ++/* LLISize (Low Latency Interrupt on Packet Size) ++ * ++ * Valid Range: 0 - 1500 ++ * ++ * Default Value: 0 (disabled) ++ */ ++IXGBE_PARAM(LLISize, "Low Latency Interrupt on Packet Size (0-1500)"); ++ ++#define DEFAULT_LLISIZE 0 ++#define MAX_LLISIZE 1500 ++#define MIN_LLISIZE 0 ++ ++/* LLIEType (Low Latency Interrupt Ethernet Type) ++ * ++ * Valid Range: 0 - 0x8fff ++ * ++ * Default Value: 0 (disabled) ++ */ ++IXGBE_PARAM(LLIEType, "Low Latency Interrupt Ethernet Protocol Type"); ++ ++#define DEFAULT_LLIETYPE 0 ++#define MAX_LLIETYPE 0x8fff ++#define MIN_LLIETYPE 0 ++ ++/* LLIVLANP (Low Latency Interrupt on VLAN priority threshold) ++ * ++ * Valid Range: 0 - 7 ++ * ++ * Default Value: 0 (disabled) ++ */ ++IXGBE_PARAM(LLIVLANP, "Low Latency Interrupt on VLAN priority threshold"); ++ ++#define DEFAULT_LLIVLANP 0 ++#define MAX_LLIVLANP 7 ++#define MIN_LLIVLANP 0 ++ ++#endif /* IXGBE_NO_LLI */ ++#ifdef HAVE_TX_MQ ++/* Flow Director packet buffer allocation level ++ * ++ * Valid Range: 1-3 ++ * 1 = 8k hash/2k perfect, ++ * 2 = 16k hash/4k perfect, ++ * 3 = 32k hash/8k perfect ++ * ++ * Default Value: 0 ++ */ ++IXGBE_PARAM(FdirPballoc, "Flow Director packet buffer allocation level:\n" ++ "\t\t\t1 = 8k hash filters or 2k perfect filters\n" ++ "\t\t\t2 = 16k hash filters or 4k perfect filters\n" ++ "\t\t\t3 = 32k hash filters or 8k perfect filters"); ++ ++#define IXGBE_DEFAULT_FDIR_PBALLOC IXGBE_FDIR_PBALLOC_64K ++ ++/* Software ATR packet sample rate ++ * ++ * Valid Range: 0-255 0 = off, 1-255 = rate of Tx packet inspection ++ * ++ * Default Value: 20 ++ */ ++IXGBE_PARAM(AtrSampleRate, "Software ATR Tx packet sample rate"); ++ ++#define IXGBE_MAX_ATR_SAMPLE_RATE 255 ++#define IXGBE_MIN_ATR_SAMPLE_RATE 1 ++#define IXGBE_ATR_SAMPLE_RATE_OFF 0 ++#define IXGBE_DEFAULT_ATR_SAMPLE_RATE 20 ++#endif /* HAVE_TX_MQ */ ++ ++#if IS_ENABLED(CONFIG_FCOE) ++/* FCoE - Fibre Channel over Ethernet Offload Enable/Disable ++ * ++ * Valid Range: 0, 1 ++ * - 0 - disables FCoE Offload ++ * - 1 - enables FCoE Offload ++ * ++ * Default Value: 1 ++ */ ++IXGBE_PARAM(FCoE, "Disable or enable FCoE Offload, default 1"); ++#endif /* CONFIG_FCOE */ ++ ++/* Enable/disable Malicious Driver Detection ++ * ++ * Valid Values: 0(off), 1(on) ++ * ++ * Default Value: 1 ++ */ ++IXGBE_PARAM(MDD, "Malicious Driver Detection: (0,1), default 1 = on"); ++ ++/* Enable/disable Large Receive Offload ++ * ++ * Valid Values: 0(off), 1(on) ++ * ++ * Default Value: 1 ++ */ ++IXGBE_PARAM(LRO, "Large Receive Offload (0,1), default 0 = off"); ++ ++/* Enable/disable support for untested SFP+ modules on 82599-based adapters ++ * ++ * Valid Values: 0(Disable), 1(Enable) ++ * ++ * Default Value: 0 ++ */ ++IXGBE_PARAM(allow_unsupported_sfp, "Allow unsupported and untested " ++ "SFP+ modules on 82599 based adapters, default 0 = Disable"); ++ ++/* Enable/disable support for DMA coalescing ++ * ++ * Valid Values: 0(off), 41 - 10000(on) ++ * ++ * Default Value: 0 ++ */ ++IXGBE_PARAM(dmac_watchdog, ++ "DMA coalescing watchdog in microseconds (0,41-10000), default 0 = off"); ++ ++/* Enable/disable support for VXLAN rx checksum offload ++ * ++ * Valid Values: 0(Disable), 1(Enable) ++ * ++ * Default Value: 1 on hardware that supports it ++ */ ++IXGBE_PARAM(vxlan_rx, ++ "VXLAN receive checksum offload (0,1), default 1 = Enable"); ++ ++ ++struct ixgbe_option { ++ enum { enable_option, range_option, list_option } type; ++ const char *name; ++ const char *err; ++ const char *msg; ++ int def; ++ union { ++ struct { /* range_option info */ ++ int min; ++ int max; ++ } r; ++ struct { /* list_option info */ ++ int nr; ++ const struct ixgbe_opt_list { ++ int i; ++ char *str; ++ } *p; ++ } l; ++ } arg; ++}; ++ ++#ifndef IXGBE_NO_LLI ++#ifdef module_param_array ++/** ++ * helper function to determine LLI support ++ * ++ * LLI is only supported for 82599 and X540 ++ * LLIPush is not supported on 82599 ++ **/ ++static bool __devinit ixgbe_lli_supported(struct ixgbe_adapter *adapter, ++ struct ixgbe_option *opt) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ ++ if (hw->mac.type == ixgbe_mac_82599EB) { ++ ++ if (LLIPush[adapter->bd_number] > 0) ++ goto not_supp; ++ ++ return true; ++ } ++ ++ if (hw->mac.type == ixgbe_mac_X540) ++ return true; ++ ++not_supp: ++ DPRINTK(PROBE, INFO, "%s not supported on this HW\n", opt->name); ++ return false; ++} ++#endif /* module_param_array */ ++#endif /* IXGBE_NO_LLI */ ++ ++static int __devinit ixgbe_validate_option(unsigned int *value, ++ struct ixgbe_option *opt) ++{ ++ if (*value == OPTION_UNSET) { ++ printk(KERN_INFO "ixgbe: Invalid %s specified (%d), %s\n", ++ opt->name, *value, opt->err); ++ *value = opt->def; ++ return 0; ++ } ++ ++ switch (opt->type) { ++ case enable_option: ++ switch (*value) { ++ case OPTION_ENABLED: ++ printk(KERN_INFO "ixgbe: %s Enabled\n", opt->name); ++ return 0; ++ case OPTION_DISABLED: ++ printk(KERN_INFO "ixgbe: %s Disabled\n", opt->name); ++ return 0; ++ } ++ break; ++ case range_option: ++ if ((*value >= opt->arg.r.min && *value <= opt->arg.r.max) || ++ *value == opt->def) { ++ if (opt->msg) ++ printk(KERN_INFO "ixgbe: %s set to %d, %s\n", ++ opt->name, *value, opt->msg); ++ else ++ printk(KERN_INFO "ixgbe: %s set to %d\n", ++ opt->name, *value); ++ return 0; ++ } ++ break; ++ case list_option: { ++ int i; ++ ++ for (i = 0; i < opt->arg.l.nr; i++) { ++ const struct ixgbe_opt_list *ent = &opt->arg.l.p[i]; ++ if (*value == ent->i) { ++ if (ent->str[0] != '\0') ++ printk(KERN_INFO "%s\n", ent->str); ++ return 0; ++ } ++ } ++ } ++ break; ++ default: ++ BUG(); ++ } ++ ++ printk(KERN_INFO "ixgbe: Invalid %s specified (%d), %s\n", ++ opt->name, *value, opt->err); ++ *value = opt->def; ++ return -1; ++} ++ ++#define LIST_LEN(l) (sizeof(l) / sizeof(l[0])) ++ ++/** ++ * ixgbe_check_options - Range Checking for Command Line Parameters ++ * @adapter: board private structure ++ * ++ * This routine checks all command line parameters for valid user ++ * input. If an invalid value is given, or if no user specified ++ * value exists, a default value is used. The final value is stored ++ * in a variable in the adapter structure. ++ **/ ++void __devinit ixgbe_check_options(struct ixgbe_adapter *adapter) ++{ ++ unsigned int mdd; ++ int bd = adapter->bd_number; ++ u32 *aflags = &adapter->flags; ++ struct ixgbe_ring_feature *feature = adapter->ring_feature; ++ unsigned int vmdq; ++ ++ if (bd >= IXGBE_MAX_NIC) { ++ printk(KERN_NOTICE ++ "Warning: no configuration for board #%d\n", bd); ++ printk(KERN_NOTICE "Using defaults for all values\n"); ++#ifndef module_param_array ++ bd = IXGBE_MAX_NIC; ++#endif ++ } ++ ++ { /* Interrupt Mode */ ++ unsigned int int_mode; ++ static struct ixgbe_option opt = { ++ .type = range_option, ++ .name = "Interrupt Mode", ++ .err = ++ "using default of " __MODULE_STRING(IXGBE_INT_MSIX), ++ .def = IXGBE_INT_MSIX, ++ .arg = { .r = { .min = IXGBE_INT_LEGACY, ++ .max = IXGBE_INT_MSIX} } ++ }; ++ ++#ifdef module_param_array ++ if (num_IntMode > bd || num_InterruptType > bd) { ++#endif ++ int_mode = IntMode[bd]; ++ if (int_mode == OPTION_UNSET) ++ int_mode = InterruptType[bd]; ++ ixgbe_validate_option(&int_mode, &opt); ++ switch (int_mode) { ++ case IXGBE_INT_MSIX: ++ if (!(*aflags & IXGBE_FLAG_MSIX_CAPABLE)) ++ printk(KERN_INFO ++ "Ignoring MSI-X setting; " ++ "support unavailable\n"); ++ break; ++ case IXGBE_INT_MSI: ++ if (!(*aflags & IXGBE_FLAG_MSI_CAPABLE)) { ++ printk(KERN_INFO ++ "Ignoring MSI setting; " ++ "support unavailable\n"); ++ } else { ++ *aflags &= ~IXGBE_FLAG_MSIX_CAPABLE; ++ } ++ break; ++ case IXGBE_INT_LEGACY: ++ default: ++ *aflags &= ~IXGBE_FLAG_MSIX_CAPABLE; ++ *aflags &= ~IXGBE_FLAG_MSI_CAPABLE; ++ break; ++ } ++#ifdef module_param_array ++ } else { ++ /* default settings */ ++ if (*aflags & IXGBE_FLAG_MSIX_CAPABLE) { ++ *aflags |= IXGBE_FLAG_MSI_CAPABLE; ++ } else { ++ *aflags &= ~IXGBE_FLAG_MSIX_CAPABLE; ++ *aflags &= ~IXGBE_FLAG_MSI_CAPABLE; ++ } ++ } ++#endif ++ } ++ { /* Multiple Queue Support */ ++ static struct ixgbe_option opt = { ++ .type = enable_option, ++ .name = "Multiple Queue Support", ++ .err = "defaulting to Enabled", ++ .def = OPTION_ENABLED ++ }; ++ ++#ifdef module_param_array ++ if (num_MQ > bd) { ++#endif ++ unsigned int mq = MQ[bd]; ++ ixgbe_validate_option(&mq, &opt); ++ if (mq) ++ *aflags |= IXGBE_FLAG_MQ_CAPABLE; ++ else ++ *aflags &= ~IXGBE_FLAG_MQ_CAPABLE; ++#ifdef module_param_array ++ } else { ++ *aflags |= IXGBE_FLAG_MQ_CAPABLE; ++ } ++#endif ++ /* Check Interoperability */ ++ if ((*aflags & IXGBE_FLAG_MQ_CAPABLE) && ++ !(*aflags & IXGBE_FLAG_MSIX_CAPABLE)) { ++ DPRINTK(PROBE, INFO, ++ "Multiple queues are not supported while MSI-X " ++ "is disabled. Disabling Multiple Queues.\n"); ++ *aflags &= ~IXGBE_FLAG_MQ_CAPABLE; ++ } ++ } ++#if IS_ENABLED(CONFIG_DCA) ++ { /* Direct Cache Access (DCA) */ ++ static struct ixgbe_option opt = { ++ .type = range_option, ++ .name = "Direct Cache Access (DCA)", ++ .err = "defaulting to Enabled", ++ .def = IXGBE_MAX_DCA, ++ .arg = { .r = { .min = OPTION_DISABLED, ++ .max = IXGBE_MAX_DCA} } ++ }; ++ unsigned int dca = opt.def; ++ ++#ifdef module_param_array ++ if (num_DCA > bd) { ++#endif ++ dca = DCA[bd]; ++ ixgbe_validate_option(&dca, &opt); ++ if (!dca) ++ *aflags &= ~IXGBE_FLAG_DCA_CAPABLE; ++ ++ /* Check Interoperability */ ++ if (!(*aflags & IXGBE_FLAG_DCA_CAPABLE)) { ++ DPRINTK(PROBE, INFO, "DCA is disabled\n"); ++ *aflags &= ~IXGBE_FLAG_DCA_ENABLED; ++ } ++ ++ if (dca == IXGBE_MAX_DCA) { ++ DPRINTK(PROBE, INFO, ++ "DCA enabled for rx data\n"); ++ adapter->flags |= IXGBE_FLAG_DCA_ENABLED_DATA; ++ } ++#ifdef module_param_array ++ } else { ++ /* make sure to clear the capability flag if the ++ * option is disabled by default above */ ++ if (opt.def == OPTION_DISABLED) ++ *aflags &= ~IXGBE_FLAG_DCA_CAPABLE; ++ } ++#endif ++ if (dca == IXGBE_MAX_DCA) ++ adapter->flags |= IXGBE_FLAG_DCA_ENABLED_DATA; ++ } ++#endif /* CONFIG_DCA */ ++ { /* Receive-Side Scaling (RSS) */ ++ static struct ixgbe_option opt = { ++ .type = range_option, ++ .name = "Receive-Side Scaling (RSS)", ++ .err = "using default.", ++ .def = 0, ++ .arg = { .r = { .min = 0, ++ .max = 16} } ++ }; ++ unsigned int rss = RSS[bd]; ++ /* adjust Max allowed RSS queues based on MAC type */ ++ opt.arg.r.max = ixgbe_max_rss_indices(adapter); ++ ++#ifdef module_param_array ++ if (num_RSS > bd) { ++#endif ++ ixgbe_validate_option(&rss, &opt); ++ /* base it off num_online_cpus() with hardware limit */ ++ if (!rss) ++ rss = min_t(int, opt.arg.r.max, ++ num_online_cpus()); ++ else ++ feature[RING_F_FDIR].limit = rss; ++ ++ feature[RING_F_RSS].limit = rss; ++#ifdef module_param_array ++ } else if (opt.def == 0) { ++ rss = min_t(int, ixgbe_max_rss_indices(adapter), ++ num_online_cpus()); ++ feature[RING_F_RSS].limit = rss; ++ } ++#endif ++ /* Check Interoperability */ ++ if (rss > 1) { ++ if (!(*aflags & IXGBE_FLAG_MQ_CAPABLE)) { ++ DPRINTK(PROBE, INFO, ++ "Multiqueue is disabled. " ++ "Limiting RSS.\n"); ++ feature[RING_F_RSS].limit = 1; ++ } ++ } ++ } ++ { /* Virtual Machine Device Queues (VMDQ) */ ++ static struct ixgbe_option opt = { ++ .type = range_option, ++ .name = "Virtual Machine Device Queues (VMDQ)", ++ .err = "defaulting to Disabled", ++ .def = OPTION_DISABLED, ++ .arg = { .r = { .min = OPTION_DISABLED, ++ .max = IXGBE_MAX_VMDQ_INDICES ++ } } ++ }; ++ ++ switch (adapter->hw.mac.type) { ++ case ixgbe_mac_82598EB: ++ /* 82598 only supports up to 16 pools */ ++ opt.arg.r.max = 16; ++ break; ++ default: ++ break; ++ } ++ ++#ifdef module_param_array ++ if (num_VMDQ > bd) { ++#endif ++ vmdq = VMDQ[bd]; ++ ++ ixgbe_validate_option(&vmdq, &opt); ++ ++ /* zero or one both mean disabled from our driver's ++ * perspective */ ++ if (vmdq > 1) { ++ *aflags |= IXGBE_FLAG_VMDQ_ENABLED; ++ } else ++ *aflags &= ~IXGBE_FLAG_VMDQ_ENABLED; ++ ++ feature[RING_F_VMDQ].limit = vmdq; ++#ifdef module_param_array ++ } else { ++ if (opt.def == OPTION_DISABLED) ++ *aflags &= ~IXGBE_FLAG_VMDQ_ENABLED; ++ else ++ *aflags |= IXGBE_FLAG_VMDQ_ENABLED; ++ ++ feature[RING_F_VMDQ].limit = opt.def; ++ } ++#endif ++ /* Check Interoperability */ ++ if (*aflags & IXGBE_FLAG_VMDQ_ENABLED) { ++ if (!(*aflags & IXGBE_FLAG_MQ_CAPABLE)) { ++ DPRINTK(PROBE, INFO, ++ "VMDQ is not supported while multiple " ++ "queues are disabled. " ++ "Disabling VMDQ.\n"); ++ *aflags &= ~IXGBE_FLAG_VMDQ_ENABLED; ++ feature[RING_F_VMDQ].limit = 0; ++ } ++ } ++ } ++#ifdef CONFIG_PCI_IOV ++ { /* Single Root I/O Virtualization (SR-IOV) */ ++ static struct ixgbe_option opt = { ++ .type = range_option, ++ .name = "I/O Virtualization (IOV)", ++ .err = "defaulting to Disabled", ++ .def = OPTION_DISABLED, ++ .arg = { .r = { .min = OPTION_DISABLED, ++ .max = MAX_SRIOV_VFS} } ++ }; ++ ++#ifdef module_param_array ++ if (num_max_vfs > bd) { ++#endif ++ unsigned int vfs = max_vfs[bd]; ++ if (ixgbe_validate_option(&vfs, &opt)) { ++ vfs = 0; ++ DPRINTK(PROBE, INFO, ++ "max_vfs out of range " ++ "Disabling SR-IOV.\n"); ++ } ++ ++ adapter->max_vfs = vfs; ++ ++ if (vfs) ++ *aflags |= IXGBE_FLAG_SRIOV_ENABLED; ++ else ++ *aflags &= ~IXGBE_FLAG_SRIOV_ENABLED; ++#ifdef module_param_array ++ } else { ++ if (opt.def == OPTION_DISABLED) { ++ adapter->max_vfs = 0; ++ *aflags &= ~IXGBE_FLAG_SRIOV_ENABLED; ++ } else { ++ adapter->max_vfs = opt.def; ++ *aflags |= IXGBE_FLAG_SRIOV_ENABLED; ++ } ++ } ++#endif ++ ++ /* Check Interoperability */ ++ if (*aflags & IXGBE_FLAG_SRIOV_ENABLED) { ++ if (!(*aflags & IXGBE_FLAG_SRIOV_CAPABLE)) { ++ DPRINTK(PROBE, INFO, ++ "IOV is not supported on this " ++ "hardware. Disabling IOV.\n"); ++ *aflags &= ~IXGBE_FLAG_SRIOV_ENABLED; ++ adapter->max_vfs = 0; ++ } else if (!(*aflags & IXGBE_FLAG_MQ_CAPABLE)) { ++ DPRINTK(PROBE, INFO, ++ "IOV is not supported while multiple " ++ "queues are disabled. " ++ "Disabling IOV.\n"); ++ *aflags &= ~IXGBE_FLAG_SRIOV_ENABLED; ++ adapter->max_vfs = 0; ++ } ++ } ++ } ++ { /* VEPA Bridge Mode enable for SR-IOV mode */ ++ static struct ixgbe_option opt = { ++ .type = range_option, ++ .name = "VEPA Bridge Mode Enable", ++ .err = "defaulting to disabled", ++ .def = OPTION_DISABLED, ++ .arg = { .r = { .min = OPTION_DISABLED, ++ .max = OPTION_ENABLED} } ++ }; ++ ++#ifdef module_param_array ++ if (num_VEPA > bd) { ++#endif ++ unsigned int vepa = VEPA[bd]; ++ ixgbe_validate_option(&vepa, &opt); ++ if (vepa) ++ adapter->flags |= ++ IXGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE; ++#ifdef module_param_array ++ } else { ++ if (opt.def == OPTION_ENABLED) ++ adapter->flags |= ++ IXGBE_FLAG_SRIOV_VEPA_BRIDGE_MODE; ++ } ++#endif ++ } ++#endif /* CONFIG_PCI_IOV */ ++ { /* Interrupt Throttling Rate */ ++ static struct ixgbe_option opt = { ++ .type = range_option, ++ .name = "Interrupt Throttling Rate (ints/sec)", ++ .err = "using default of "__MODULE_STRING(DEFAULT_ITR), ++ .def = DEFAULT_ITR, ++ .arg = { .r = { .min = MIN_ITR, ++ .max = MAX_ITR } } ++ }; ++ ++#ifdef module_param_array ++ if (num_InterruptThrottleRate > bd) { ++#endif ++ u32 itr = InterruptThrottleRate[bd]; ++ switch (itr) { ++ case 0: ++ DPRINTK(PROBE, INFO, "%s turned off\n", ++ opt.name); ++ adapter->rx_itr_setting = 0; ++ break; ++ case 1: ++ DPRINTK(PROBE, INFO, "dynamic interrupt " ++ "throttling enabled\n"); ++ adapter->rx_itr_setting = 1; ++ break; ++ default: ++ ixgbe_validate_option(&itr, &opt); ++ /* the first bit is used as control */ ++ adapter->rx_itr_setting = (1000000/itr) << 2; ++ break; ++ } ++ adapter->tx_itr_setting = adapter->rx_itr_setting; ++#ifdef module_param_array ++ } else { ++ adapter->rx_itr_setting = opt.def; ++ adapter->tx_itr_setting = opt.def; ++ } ++#endif ++ } ++#ifndef IXGBE_NO_LLI ++ { /* Low Latency Interrupt TCP Port*/ ++ static struct ixgbe_option opt = { ++ .type = range_option, ++ .name = "Low Latency Interrupt TCP Port", ++ .err = "using default of " ++ __MODULE_STRING(DEFAULT_LLIPORT), ++ .def = DEFAULT_LLIPORT, ++ .arg = { .r = { .min = MIN_LLIPORT, ++ .max = MAX_LLIPORT } } ++ }; ++ ++#ifdef module_param_array ++ if (num_LLIPort > bd && ixgbe_lli_supported(adapter, &opt)) { ++#endif ++ adapter->lli_port = LLIPort[bd]; ++ if (adapter->lli_port) { ++ ixgbe_validate_option(&adapter->lli_port, &opt); ++ } else { ++ DPRINTK(PROBE, INFO, "%s turned off\n", ++ opt.name); ++ } ++#ifdef module_param_array ++ } else { ++ adapter->lli_port = opt.def; ++ } ++#endif ++ } ++ { /* Low Latency Interrupt on Packet Size */ ++ static struct ixgbe_option opt = { ++ .type = range_option, ++ .name = "Low Latency Interrupt on Packet Size", ++ .err = "using default of " ++ __MODULE_STRING(DEFAULT_LLISIZE), ++ .def = DEFAULT_LLISIZE, ++ .arg = { .r = { .min = MIN_LLISIZE, ++ .max = MAX_LLISIZE } } ++ }; ++ ++#ifdef module_param_array ++ if (num_LLISize > bd && ixgbe_lli_supported(adapter, &opt)) { ++#endif ++ adapter->lli_size = LLISize[bd]; ++ if (adapter->lli_size) { ++ ixgbe_validate_option(&adapter->lli_size, &opt); ++ } else { ++ DPRINTK(PROBE, INFO, "%s turned off\n", ++ opt.name); ++ } ++#ifdef module_param_array ++ } else { ++ adapter->lli_size = opt.def; ++ } ++#endif ++ } ++ { /*Low Latency Interrupt on TCP Push flag*/ ++ static struct ixgbe_option opt = { ++ .type = enable_option, ++ .name = "Low Latency Interrupt on TCP Push flag", ++ .err = "defaulting to Disabled", ++ .def = OPTION_DISABLED ++ }; ++ ++#ifdef module_param_array ++ if (num_LLIPush > bd && ixgbe_lli_supported(adapter, &opt)) { ++#endif ++ unsigned int lli_push = LLIPush[bd]; ++ ++ ixgbe_validate_option(&lli_push, &opt); ++ if (lli_push) ++ *aflags |= IXGBE_FLAG_LLI_PUSH; ++ else ++ *aflags &= ~IXGBE_FLAG_LLI_PUSH; ++#ifdef module_param_array ++ } else { ++ *aflags &= ~IXGBE_FLAG_LLI_PUSH; ++ } ++#endif ++ } ++ { /* Low Latency Interrupt EtherType*/ ++ static struct ixgbe_option opt = { ++ .type = range_option, ++ .name = "Low Latency Interrupt on Ethernet Protocol " ++ "Type", ++ .err = "using default of " ++ __MODULE_STRING(DEFAULT_LLIETYPE), ++ .def = DEFAULT_LLIETYPE, ++ .arg = { .r = { .min = MIN_LLIETYPE, ++ .max = MAX_LLIETYPE } } ++ }; ++ ++#ifdef module_param_array ++ if (num_LLIEType > bd && ixgbe_lli_supported(adapter, &opt)) { ++#endif ++ adapter->lli_etype = LLIEType[bd]; ++ if (adapter->lli_etype) { ++ ixgbe_validate_option(&adapter->lli_etype, ++ &opt); ++ } else { ++ DPRINTK(PROBE, INFO, "%s turned off\n", ++ opt.name); ++ } ++#ifdef module_param_array ++ } else { ++ adapter->lli_etype = opt.def; ++ } ++#endif ++ } ++ { /* LLI VLAN Priority */ ++ static struct ixgbe_option opt = { ++ .type = range_option, ++ .name = "Low Latency Interrupt on VLAN priority " ++ "threshold", ++ .err = "using default of " ++ __MODULE_STRING(DEFAULT_LLIVLANP), ++ .def = DEFAULT_LLIVLANP, ++ .arg = { .r = { .min = MIN_LLIVLANP, ++ .max = MAX_LLIVLANP } } ++ }; ++ ++#ifdef module_param_array ++ if (num_LLIVLANP > bd && ixgbe_lli_supported(adapter, &opt)) { ++#endif ++ adapter->lli_vlan_pri = LLIVLANP[bd]; ++ if (adapter->lli_vlan_pri) { ++ ixgbe_validate_option(&adapter->lli_vlan_pri, ++ &opt); ++ } else { ++ DPRINTK(PROBE, INFO, "%s turned off\n", ++ opt.name); ++ } ++#ifdef module_param_array ++ } else { ++ adapter->lli_vlan_pri = opt.def; ++ } ++#endif ++ } ++#endif /* IXGBE_NO_LLI */ ++#ifdef HAVE_TX_MQ ++ { /* Flow Director packet buffer allocation */ ++ unsigned int fdir_pballoc_mode; ++ static struct ixgbe_option opt = { ++ .type = range_option, ++ .name = "Flow Director packet buffer allocation", ++ .err = "using default of " ++ __MODULE_STRING(IXGBE_DEFAULT_FDIR_PBALLOC), ++ .def = IXGBE_DEFAULT_FDIR_PBALLOC, ++ .arg = {.r = {.min = IXGBE_FDIR_PBALLOC_64K, ++ .max = IXGBE_FDIR_PBALLOC_256K} } ++ }; ++ char pstring[10]; ++ ++ if (adapter->hw.mac.type == ixgbe_mac_82598EB) { ++ adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_NONE; ++ } else if (num_FdirPballoc > bd) { ++ fdir_pballoc_mode = FdirPballoc[bd]; ++ ixgbe_validate_option(&fdir_pballoc_mode, &opt); ++ switch (fdir_pballoc_mode) { ++ case IXGBE_FDIR_PBALLOC_256K: ++ adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_256K; ++ sprintf(pstring, "256kB"); ++ break; ++ case IXGBE_FDIR_PBALLOC_128K: ++ adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_128K; ++ sprintf(pstring, "128kB"); ++ break; ++ case IXGBE_FDIR_PBALLOC_64K: ++ default: ++ adapter->fdir_pballoc = IXGBE_FDIR_PBALLOC_64K; ++ sprintf(pstring, "64kB"); ++ break; ++ } ++ DPRINTK(PROBE, INFO, "Flow Director will be allocated " ++ "%s of packet buffer\n", pstring); ++ } else { ++ adapter->fdir_pballoc = opt.def; ++ } ++ ++ } ++ { /* Flow Director ATR Tx sample packet rate */ ++ static struct ixgbe_option opt = { ++ .type = range_option, ++ .name = "Software ATR Tx packet sample rate", ++ .err = "using default of " ++ __MODULE_STRING(IXGBE_DEFAULT_ATR_SAMPLE_RATE), ++ .def = IXGBE_DEFAULT_ATR_SAMPLE_RATE, ++ .arg = {.r = {.min = IXGBE_ATR_SAMPLE_RATE_OFF, ++ .max = IXGBE_MAX_ATR_SAMPLE_RATE} } ++ }; ++ static const char atr_string[] = ++ "ATR Tx Packet sample rate set to"; ++ ++ if (adapter->hw.mac.type == ixgbe_mac_82598EB) { ++ adapter->atr_sample_rate = IXGBE_ATR_SAMPLE_RATE_OFF; ++ } else if (num_AtrSampleRate > bd) { ++ adapter->atr_sample_rate = AtrSampleRate[bd]; ++ ++ if (adapter->atr_sample_rate) { ++ ixgbe_validate_option(&adapter->atr_sample_rate, ++ &opt); ++ DPRINTK(PROBE, INFO, "%s %d\n", atr_string, ++ adapter->atr_sample_rate); ++ } ++ } else { ++ adapter->atr_sample_rate = opt.def; ++ } ++ } ++#endif /* HAVE_TX_MQ */ ++#if IS_ENABLED(CONFIG_FCOE) ++ { ++ *aflags &= ~IXGBE_FLAG_FCOE_CAPABLE; ++ ++ switch (adapter->hw.mac.type) { ++ case ixgbe_mac_X540: ++ case ixgbe_mac_X550: ++ case ixgbe_mac_82599EB: { ++ struct ixgbe_option opt = { ++ .type = enable_option, ++ .name = "Enabled/Disable FCoE offload", ++ .err = "defaulting to Enabled", ++ .def = OPTION_ENABLED ++ }; ++#ifdef module_param_array ++ if (num_FCoE > bd) { ++#endif ++ unsigned int fcoe = FCoE[bd]; ++ ++ ixgbe_validate_option(&fcoe, &opt); ++ if (fcoe) ++ *aflags |= IXGBE_FLAG_FCOE_CAPABLE; ++#ifdef module_param_array ++ } else { ++ if (opt.def == OPTION_ENABLED) ++ *aflags |= IXGBE_FLAG_FCOE_CAPABLE; ++ } ++#endif ++ DPRINTK(PROBE, INFO, "FCoE Offload feature %sabled\n", ++ (*aflags & IXGBE_FLAG_FCOE_CAPABLE) ? ++ "en" : "dis"); ++ } ++ break; ++ default: ++ break; ++ } ++ } ++#endif /* CONFIG_FCOE */ ++ { /* LRO - Set Large Receive Offload */ ++ struct ixgbe_option opt = { ++ .type = enable_option, ++ .name = "LRO - Large Receive Offload", ++ .err = "defaulting to Disabled", ++ .def = OPTION_DISABLED ++ }; ++ struct net_device *netdev = adapter->netdev; ++ ++ if (!(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) ++ opt.def = OPTION_DISABLED; ++ ++#ifdef module_param_array ++ if (num_LRO > bd) { ++#endif ++ unsigned int lro = LRO[bd]; ++ ixgbe_validate_option(&lro, &opt); ++ if (lro) ++ netdev->features |= NETIF_F_LRO; ++ else ++ netdev->features &= ~NETIF_F_LRO; ++#ifdef module_param_array ++ } else { ++ netdev->features &= ~NETIF_F_LRO; ++ } ++#endif ++ if ((netdev->features & NETIF_F_LRO) && ++ !(adapter->flags2 & IXGBE_FLAG2_RSC_CAPABLE)) { ++ DPRINTK(PROBE, INFO, ++ "RSC is not supported on this " ++ "hardware. Disabling RSC.\n"); ++ netdev->features &= ~NETIF_F_LRO; ++ } ++ } ++ { /* ++ * allow_unsupported_sfp - Enable/Disable support for unsupported ++ * and untested SFP+ modules. ++ */ ++ struct ixgbe_option opt = { ++ .type = enable_option, ++ .name = "allow_unsupported_sfp", ++ .err = "defaulting to Disabled", ++ .def = OPTION_DISABLED ++ }; ++#ifdef module_param_array ++ if (num_allow_unsupported_sfp > bd) { ++#endif ++ unsigned int enable_unsupported_sfp = ++ allow_unsupported_sfp[bd]; ++ ixgbe_validate_option(&enable_unsupported_sfp, &opt); ++ if (enable_unsupported_sfp) { ++ adapter->hw.allow_unsupported_sfp = true; ++ } else { ++ adapter->hw.allow_unsupported_sfp = false; ++ } ++#ifdef module_param_array ++ } else { ++ adapter->hw.allow_unsupported_sfp = false; ++ } ++#endif ++ } ++ { /* DMA Coalescing */ ++ struct ixgbe_option opt = { ++ .type = range_option, ++ .name = "dmac_watchdog", ++ .err = "defaulting to 0 (disabled)", ++ .def = 0, ++ .arg = { .r = { .min = 41, .max = 10000 } }, ++ }; ++ const char *cmsg = "DMA coalescing not supported on this hardware"; ++ ++ switch (adapter->hw.mac.type) { ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: ++ if (adapter->rx_itr_setting || adapter->tx_itr_setting) ++ break; ++ opt.err = "interrupt throttling disabled also disables DMA coalescing"; ++ opt.arg.r.min = 0; ++ opt.arg.r.max = 0; ++ break; ++ default: ++ opt.err = cmsg; ++ opt.msg = cmsg; ++ opt.arg.r.min = 0; ++ opt.arg.r.max = 0; ++ } ++#ifdef module_param_array ++ if (num_dmac_watchdog > bd) { ++#endif ++ unsigned int dmac_wd = dmac_watchdog[bd]; ++ ++ ixgbe_validate_option(&dmac_wd, &opt); ++ adapter->hw.mac.dmac_config.watchdog_timer = dmac_wd; ++#ifdef module_param_array ++ } else { ++ adapter->hw.mac.dmac_config.watchdog_timer = opt.def; ++ } ++#endif ++ } ++ { /* VXLAN rx offload */ ++ struct ixgbe_option opt = { ++ .type = range_option, ++ .name = "vxlan_rx", ++ .err = "defaulting to 1 (enabled)", ++ .def = 1, ++ .arg = { .r = { .min = 0, .max = 1 } }, ++ }; ++ const char *cmsg = "VXLAN rx offload not supported on this hardware"; ++ const u32 flag = IXGBE_FLAG_VXLAN_OFFLOAD_ENABLE; ++ ++ if (!(adapter->flags & IXGBE_FLAG_VXLAN_OFFLOAD_CAPABLE)) { ++ opt.err = cmsg; ++ opt.msg = cmsg; ++ opt.def = 0; ++ opt.arg.r.max = 0; ++ } ++#ifdef module_param_array ++ if (num_vxlan_rx > bd) { ++#endif ++ unsigned int enable_vxlan_rx = vxlan_rx[bd]; ++ ++ ixgbe_validate_option(&enable_vxlan_rx, &opt); ++ if (enable_vxlan_rx) ++ adapter->flags |= flag; ++ else ++ adapter->flags &= ~flag; ++#ifdef module_param_array ++ } else if (opt.def) { ++ adapter->flags |= flag; ++ } else { ++ adapter->flags &= ~flag; ++ } ++#endif ++ } ++ ++ { /* MDD support */ ++ struct ixgbe_option opt = { ++ .type = enable_option, ++ .name = "Malicious Driver Detection", ++ .err = "defaulting to Enabled", ++ .def = OPTION_ENABLED, ++ }; ++ ++ switch (adapter->hw.mac.type) { ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: ++#ifdef module_param_array ++ if (num_MDD > bd) { ++#endif ++ mdd = MDD[bd]; ++ ixgbe_validate_option(&mdd, &opt); ++ ++ if (mdd){ ++ *aflags |= IXGBE_FLAG_MDD_ENABLED; ++ ++ } else{ ++ *aflags &= ~IXGBE_FLAG_MDD_ENABLED; ++ } ++#ifdef module_param_array ++ } else { ++ *aflags |= IXGBE_FLAG_MDD_ENABLED; ++ } ++#endif ++ break; ++ default: ++ *aflags &= ~IXGBE_FLAG_MDD_ENABLED; ++ break; ++ } ++ } ++ ++} +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +index ff68b7a..442f9a9 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.c +@@ -1,7 +1,7 @@ + /******************************************************************************* + +- Intel 10 Gigabit PCI Express Linux driver +- Copyright(c) 1999 - 2014 Intel Corporation. ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, +@@ -12,10 +12,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +@@ -26,28 +22,290 @@ + + *******************************************************************************/ + +-#include +-#include +-#include +- +-#include "ixgbe.h" ++#include "ixgbe_api.h" ++#include "ixgbe_common.h" + #include "ixgbe_phy.h" + +-static void ixgbe_i2c_start(struct ixgbe_hw *hw); +-static void ixgbe_i2c_stop(struct ixgbe_hw *hw); +-static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data); +-static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data); +-static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw); +-static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data); +-static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data); +-static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); +-static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); +-static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data); +-static bool ixgbe_get_i2c_data(u32 *i2cctl); +-static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw); +-static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); +-static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); +-static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw); ++STATIC void ixgbe_i2c_start(struct ixgbe_hw *hw); ++STATIC void ixgbe_i2c_stop(struct ixgbe_hw *hw); ++STATIC s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data); ++STATIC s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data); ++STATIC s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw); ++STATIC s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data); ++STATIC s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data); ++STATIC void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); ++STATIC void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl); ++STATIC s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data); ++STATIC bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl); ++STATIC s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, ++ u8 *sff8472_data); ++ ++/** ++ * ixgbe_out_i2c_byte_ack - Send I2C byte with ack ++ * @hw: pointer to the hardware structure ++ * @byte: byte to send ++ * ++ * Returns an error code on error. ++ */ ++STATIC s32 ixgbe_out_i2c_byte_ack(struct ixgbe_hw *hw, u8 byte) ++{ ++ s32 status; ++ ++ status = ixgbe_clock_out_i2c_byte(hw, byte); ++ if (status) ++ return status; ++ return ixgbe_get_i2c_ack(hw); ++} ++ ++/** ++ * ixgbe_in_i2c_byte_ack - Receive an I2C byte and send ack ++ * @hw: pointer to the hardware structure ++ * @byte: pointer to a u8 to receive the byte ++ * ++ * Returns an error code on error. ++ */ ++STATIC s32 ixgbe_in_i2c_byte_ack(struct ixgbe_hw *hw, u8 *byte) ++{ ++ s32 status; ++ ++ status = ixgbe_clock_in_i2c_byte(hw, byte); ++ if (status) ++ return status; ++ /* ACK */ ++ return ixgbe_clock_out_i2c_bit(hw, false); ++} ++ ++/** ++ * ixgbe_ones_comp_byte_add - Perform one's complement addition ++ * @add1 - addend 1 ++ * @add2 - addend 2 ++ * ++ * Returns one's complement 8-bit sum. ++ */ ++STATIC u8 ixgbe_ones_comp_byte_add(u8 add1, u8 add2) ++{ ++ u16 sum = add1 + add2; ++ ++ sum = (sum & 0xFF) + (sum >> 8); ++ return sum & 0xFF; ++} ++ ++/** ++ * ixgbe_read_i2c_combined_generic_int - Perform I2C read combined operation ++ * @hw: pointer to the hardware structure ++ * @addr: I2C bus address to read from ++ * @reg: I2C device register to read from ++ * @val: pointer to location to receive read value ++ * @lock: true if to take and release semaphore ++ * ++ * Returns an error code on error. ++ */ ++s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, u16 reg, ++ u16 *val, bool lock) ++{ ++ u32 swfw_mask = hw->phy.phy_semaphore_mask; ++ int max_retry = 3; ++ int retry = 0; ++ u8 csum_byte; ++ u8 high_bits; ++ u8 low_bits; ++ u8 reg_high; ++ u8 csum; ++ ++ reg_high = ((reg >> 7) & 0xFE) | 1; /* Indicate read combined */ ++ csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF); ++ csum = ~csum; ++ do { ++ if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) ++ return IXGBE_ERR_SWFW_SYNC; ++ ixgbe_i2c_start(hw); ++ /* Device Address and write indication */ ++ if (ixgbe_out_i2c_byte_ack(hw, addr)) ++ goto fail; ++ /* Write bits 14:8 */ ++ if (ixgbe_out_i2c_byte_ack(hw, reg_high)) ++ goto fail; ++ /* Write bits 7:0 */ ++ if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF)) ++ goto fail; ++ /* Write csum */ ++ if (ixgbe_out_i2c_byte_ack(hw, csum)) ++ goto fail; ++ /* Re-start condition */ ++ ixgbe_i2c_start(hw); ++ /* Device Address and read indication */ ++ if (ixgbe_out_i2c_byte_ack(hw, addr | 1)) ++ goto fail; ++ /* Get upper bits */ ++ if (ixgbe_in_i2c_byte_ack(hw, &high_bits)) ++ goto fail; ++ /* Get low bits */ ++ if (ixgbe_in_i2c_byte_ack(hw, &low_bits)) ++ goto fail; ++ /* Get csum */ ++ if (ixgbe_clock_in_i2c_byte(hw, &csum_byte)) ++ goto fail; ++ /* NACK */ ++ if (ixgbe_clock_out_i2c_bit(hw, false)) ++ goto fail; ++ ixgbe_i2c_stop(hw); ++ if (lock) ++ hw->mac.ops.release_swfw_sync(hw, swfw_mask); ++ *val = (high_bits << 8) | low_bits; ++ return 0; ++ ++fail: ++ ixgbe_i2c_bus_clear(hw); ++ if (lock) ++ hw->mac.ops.release_swfw_sync(hw, swfw_mask); ++ retry++; ++ if (retry < max_retry) ++ DEBUGOUT("I2C byte read combined error - Retrying.\n"); ++ else ++ DEBUGOUT("I2C byte read combined error.\n"); ++ } while (retry < max_retry); ++ ++ return IXGBE_ERR_I2C; ++} ++ ++/** ++ * ixgbe_write_i2c_combined_generic_int - Perform I2C write combined operation ++ * @hw: pointer to the hardware structure ++ * @addr: I2C bus address to write to ++ * @reg: I2C device register to write to ++ * @val: value to write ++ * @lock: true if to take and release semaphore ++ * ++ * Returns an error code on error. ++ */ ++s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *hw, u8 addr, u16 reg, ++ u16 val, bool lock) ++{ ++ u32 swfw_mask = hw->phy.phy_semaphore_mask; ++ int max_retry = 1; ++ int retry = 0; ++ u8 reg_high; ++ u8 csum; ++ ++ reg_high = (reg >> 7) & 0xFE; /* Indicate write combined */ ++ csum = ixgbe_ones_comp_byte_add(reg_high, reg & 0xFF); ++ csum = ixgbe_ones_comp_byte_add(csum, val >> 8); ++ csum = ixgbe_ones_comp_byte_add(csum, val & 0xFF); ++ csum = ~csum; ++ do { ++ if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) ++ return IXGBE_ERR_SWFW_SYNC; ++ ixgbe_i2c_start(hw); ++ /* Device Address and write indication */ ++ if (ixgbe_out_i2c_byte_ack(hw, addr)) ++ goto fail; ++ /* Write bits 14:8 */ ++ if (ixgbe_out_i2c_byte_ack(hw, reg_high)) ++ goto fail; ++ /* Write bits 7:0 */ ++ if (ixgbe_out_i2c_byte_ack(hw, reg & 0xFF)) ++ goto fail; ++ /* Write data 15:8 */ ++ if (ixgbe_out_i2c_byte_ack(hw, val >> 8)) ++ goto fail; ++ /* Write data 7:0 */ ++ if (ixgbe_out_i2c_byte_ack(hw, val & 0xFF)) ++ goto fail; ++ /* Write csum */ ++ if (ixgbe_out_i2c_byte_ack(hw, csum)) ++ goto fail; ++ ixgbe_i2c_stop(hw); ++ if (lock) ++ hw->mac.ops.release_swfw_sync(hw, swfw_mask); ++ return 0; ++ ++fail: ++ ixgbe_i2c_bus_clear(hw); ++ if (lock) ++ hw->mac.ops.release_swfw_sync(hw, swfw_mask); ++ retry++; ++ if (retry < max_retry) ++ DEBUGOUT("I2C byte write combined error - Retrying.\n"); ++ else ++ DEBUGOUT("I2C byte write combined error.\n"); ++ } while (retry < max_retry); ++ ++ return IXGBE_ERR_I2C; ++} ++ ++/** ++ * ixgbe_init_phy_ops_generic - Inits PHY function ptrs ++ * @hw: pointer to the hardware structure ++ * ++ * Initialize the function pointers. ++ **/ ++s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw) ++{ ++ struct ixgbe_phy_info *phy = &hw->phy; ++ ++ DEBUGFUNC("ixgbe_init_phy_ops_generic"); ++ ++ /* PHY */ ++ phy->ops.identify = ixgbe_identify_phy_generic; ++ phy->ops.reset = ixgbe_reset_phy_generic; ++ phy->ops.read_reg = ixgbe_read_phy_reg_generic; ++ phy->ops.write_reg = ixgbe_write_phy_reg_generic; ++ phy->ops.read_reg_mdi = ixgbe_read_phy_reg_mdi; ++ phy->ops.write_reg_mdi = ixgbe_write_phy_reg_mdi; ++ phy->ops.setup_link = ixgbe_setup_phy_link_generic; ++ phy->ops.setup_link_speed = ixgbe_setup_phy_link_speed_generic; ++ phy->ops.check_link = NULL; ++ phy->ops.get_firmware_version = ixgbe_get_phy_firmware_version_generic; ++ phy->ops.read_i2c_byte = ixgbe_read_i2c_byte_generic; ++ phy->ops.write_i2c_byte = ixgbe_write_i2c_byte_generic; ++ phy->ops.read_i2c_sff8472 = ixgbe_read_i2c_sff8472_generic; ++ phy->ops.read_i2c_eeprom = ixgbe_read_i2c_eeprom_generic; ++ phy->ops.write_i2c_eeprom = ixgbe_write_i2c_eeprom_generic; ++ phy->ops.i2c_bus_clear = ixgbe_i2c_bus_clear; ++ phy->ops.identify_sfp = ixgbe_identify_module_generic; ++ phy->sfp_type = ixgbe_sfp_type_unknown; ++ phy->ops.read_i2c_byte_unlocked = ixgbe_read_i2c_byte_generic_unlocked; ++ phy->ops.write_i2c_byte_unlocked = ++ ixgbe_write_i2c_byte_generic_unlocked; ++ phy->ops.check_overtemp = ixgbe_tn_check_overtemp; ++ return IXGBE_SUCCESS; ++} ++ ++/** ++ * ixgbe_probe_phy - Probe a single address for a PHY ++ * @hw: pointer to hardware structure ++ * @phy_addr: PHY address to probe ++ * ++ * Returns true if PHY found ++ */ ++static bool ixgbe_probe_phy(struct ixgbe_hw *hw, u16 phy_addr) ++{ ++ u16 ext_ability = 0; ++ ++ if (!ixgbe_validate_phy_addr(hw, phy_addr)) { ++ DEBUGOUT1("Unable to validate PHY address 0x%04X\n", ++ phy_addr); ++ return false; ++ } ++ ++ if (ixgbe_get_phy_id(hw)) ++ return false; ++ ++ hw->phy.type = ixgbe_get_phy_type_from_id(hw->phy.id); ++ ++ if (hw->phy.type == ixgbe_phy_unknown) { ++ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, ++ IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); ++ if (ext_ability & ++ (IXGBE_MDIO_PHY_10GBASET_ABILITY | ++ IXGBE_MDIO_PHY_1000BASET_ABILITY)) ++ hw->phy.type = ixgbe_phy_cu_unknown; ++ else ++ hw->phy.type = ixgbe_phy_generic; ++ } ++ ++ return true; ++} + + /** + * ixgbe_identify_phy_generic - Get physical layer module +@@ -58,43 +316,44 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw); + s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) + { + s32 status = IXGBE_ERR_PHY_ADDR_INVALID; +- u32 phy_addr; +- u16 ext_ability = 0; ++ u16 phy_addr; + +- if (hw->phy.type == ixgbe_phy_unknown) { +- for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { +- hw->phy.mdio.prtad = phy_addr; +- if (mdio45_probe(&hw->phy.mdio, phy_addr) == 0) { +- ixgbe_get_phy_id(hw); +- hw->phy.type = +- ixgbe_get_phy_type_from_id(hw->phy.id); +- +- if (hw->phy.type == ixgbe_phy_unknown) { +- hw->phy.ops.read_reg(hw, +- MDIO_PMA_EXTABLE, +- MDIO_MMD_PMAPMD, +- &ext_ability); +- if (ext_ability & +- (MDIO_PMA_EXTABLE_10GBT | +- MDIO_PMA_EXTABLE_1000BT)) +- hw->phy.type = +- ixgbe_phy_cu_unknown; +- else +- hw->phy.type = +- ixgbe_phy_generic; +- } ++ DEBUGFUNC("ixgbe_identify_phy_generic"); + +- status = 0; +- break; +- } ++ if (!hw->phy.phy_semaphore_mask) { ++ if (hw->bus.lan_id) ++ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM; ++ else ++ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; ++ } ++ ++ if (hw->phy.type != ixgbe_phy_unknown) ++ return IXGBE_SUCCESS; ++ ++ if (hw->phy.nw_mng_if_sel) { ++ phy_addr = (hw->phy.nw_mng_if_sel & ++ IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >> ++ IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT; ++ if (ixgbe_probe_phy(hw, phy_addr)) ++ return IXGBE_SUCCESS; ++ else ++ return IXGBE_ERR_PHY_ADDR_INVALID; ++ } ++ ++ for (phy_addr = 0; phy_addr < IXGBE_MAX_PHY_ADDR; phy_addr++) { ++ if (ixgbe_probe_phy(hw, phy_addr)) { ++ status = IXGBE_SUCCESS; ++ break; + } +- /* clear value if nothing found */ +- if (status != 0) +- hw->phy.mdio.prtad = 0; +- } else { +- status = 0; + } + ++ /* Certain media types do not have a phy so an address will not ++ * be found and the code will take this path. Caller has to ++ * decide if it is an error or not. ++ */ ++ if (status != IXGBE_SUCCESS) ++ hw->phy.addr = 0; ++ + return status; + } + +@@ -104,20 +363,23 @@ s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw) + * + * This function checks the MMNGC.MNG_VETO bit to see if there are + * any constraints on link from manageability. For MAC's that don't +- * have this bit just return false since the link can not be blocked ++ * have this bit just return faluse since the link can not be blocked + * via this method. + **/ +-bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw) ++s32 ixgbe_check_reset_blocked(struct ixgbe_hw *hw) + { + u32 mmngc; + ++ DEBUGFUNC("ixgbe_check_reset_blocked"); ++ + /* If we don't have this bit, it can't be blocking */ + if (hw->mac.type == ixgbe_mac_82598EB) + return false; + + mmngc = IXGBE_READ_REG(hw, IXGBE_MMNGC); + if (mmngc & IXGBE_MMNGC_MNG_VETO) { +- hw_dbg(hw, "MNG_VETO bit detected.\n"); ++ ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, ++ "MNG_VETO bit detected.\n"); + return true; + } + +@@ -125,42 +387,77 @@ bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw) + } + + /** ++ * ixgbe_validate_phy_addr - Determines phy address is valid ++ * @hw: pointer to hardware structure ++ * ++ **/ ++bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr) ++{ ++ u16 phy_id = 0; ++ bool valid = false; ++ ++ DEBUGFUNC("ixgbe_validate_phy_addr"); ++ ++ hw->phy.addr = phy_addr; ++ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH, ++ IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_id); ++ ++ if (phy_id != 0xFFFF && phy_id != 0x0) ++ valid = true; ++ ++ DEBUGOUT1("PHY ID HIGH is 0x%04X\n", phy_id); ++ ++ return valid; ++} ++ ++/** + * ixgbe_get_phy_id - Get the phy type + * @hw: pointer to hardware structure + * + **/ +-static s32 ixgbe_get_phy_id(struct ixgbe_hw *hw) ++s32 ixgbe_get_phy_id(struct ixgbe_hw *hw) + { + u32 status; + u16 phy_id_high = 0; + u16 phy_id_low = 0; + +- status = hw->phy.ops.read_reg(hw, MDIO_DEVID1, MDIO_MMD_PMAPMD, ++ DEBUGFUNC("ixgbe_get_phy_id"); ++ ++ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_HIGH, ++ IXGBE_MDIO_PMA_PMD_DEV_TYPE, + &phy_id_high); + +- if (status == 0) { ++ if (status == IXGBE_SUCCESS) { + hw->phy.id = (u32)(phy_id_high << 16); +- status = hw->phy.ops.read_reg(hw, MDIO_DEVID2, MDIO_MMD_PMAPMD, ++ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_ID_LOW, ++ IXGBE_MDIO_PMA_PMD_DEV_TYPE, + &phy_id_low); + hw->phy.id |= (u32)(phy_id_low & IXGBE_PHY_REVISION_MASK); + hw->phy.revision = (u32)(phy_id_low & ~IXGBE_PHY_REVISION_MASK); + } ++ DEBUGOUT2("PHY_ID_HIGH 0x%04X, PHY_ID_LOW 0x%04X\n", ++ phy_id_high, phy_id_low); ++ + return status; + } + + /** + * ixgbe_get_phy_type_from_id - Get the phy type +- * @hw: pointer to hardware structure ++ * @phy_id: PHY ID information + * + **/ +-static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) ++enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) + { + enum ixgbe_phy_type phy_type; + ++ DEBUGFUNC("ixgbe_get_phy_type_from_id"); ++ + switch (phy_id) { + case TN1010_PHY_ID: + phy_type = ixgbe_phy_tn; + break; ++ case X550_PHY_ID2: ++ case X550_PHY_ID3: + case X540_PHY_ID: + phy_type = ixgbe_phy_aq; + break; +@@ -170,11 +467,18 @@ static enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id) + case ATH_PHY_ID: + phy_type = ixgbe_phy_nl; + break; ++ case X557_PHY_ID: ++ case X557_PHY_ID2: ++ phy_type = ixgbe_phy_x550em_ext_t; ++ break; ++ case IXGBE_M88E1500_E_PHY_ID: ++ case IXGBE_M88E1543_E_PHY_ID: ++ phy_type = ixgbe_phy_ext_1g_t; ++ break; + default: + phy_type = ixgbe_phy_unknown; + break; + } +- + return phy_type; + } + +@@ -186,12 +490,14 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) + { + u32 i; + u16 ctrl = 0; +- s32 status = 0; ++ s32 status = IXGBE_SUCCESS; ++ ++ DEBUGFUNC("ixgbe_reset_phy_generic"); + + if (hw->phy.type == ixgbe_phy_unknown) + status = ixgbe_identify_phy_generic(hw); + +- if (status != 0 || hw->phy.type == ixgbe_phy_none) ++ if (status != IXGBE_SUCCESS || hw->phy.type == ixgbe_phy_none) + goto out; + + /* Don't reset PHY if it's shut down due to overtemp. */ +@@ -207,9 +513,9 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) + * Perform soft PHY reset to the PHY_XS. + * This will cause a soft reset to the PHY + */ +- hw->phy.ops.write_reg(hw, MDIO_CTRL1, +- MDIO_MMD_PHYXS, +- MDIO_CTRL1_RESET); ++ hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, ++ IXGBE_MDIO_PHY_XS_DEV_TYPE, ++ IXGBE_MDIO_PHY_XS_RESET); + + /* + * Poll for reset bit to self-clear indicating reset is complete. +@@ -217,18 +523,38 @@ s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw) + * 1.7 usec delay after the reset is complete. + */ + for (i = 0; i < 30; i++) { +- msleep(100); +- hw->phy.ops.read_reg(hw, MDIO_CTRL1, +- MDIO_MMD_PHYXS, &ctrl); +- if (!(ctrl & MDIO_CTRL1_RESET)) { +- udelay(2); +- break; ++ msec_delay(100); ++ if (hw->phy.type == ixgbe_phy_x550em_ext_t) { ++ status = hw->phy.ops.read_reg(hw, ++ IXGBE_MDIO_TX_VENDOR_ALARMS_3, ++ IXGBE_MDIO_PMA_PMD_DEV_TYPE, ++ &ctrl); ++ if (status != IXGBE_SUCCESS) ++ return status; ++ ++ if (ctrl & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) { ++ usec_delay(2); ++ break; ++ } ++ } else { ++ status = hw->phy.ops.read_reg(hw, ++ IXGBE_MDIO_PHY_XS_CONTROL, ++ IXGBE_MDIO_PHY_XS_DEV_TYPE, ++ &ctrl); ++ if (status != IXGBE_SUCCESS) ++ return status; ++ ++ if (!(ctrl & IXGBE_MDIO_PHY_XS_RESET)) { ++ usec_delay(2); ++ break; ++ } + } + } + +- if (ctrl & MDIO_CTRL1_RESET) { ++ if (ctrl & IXGBE_MDIO_PHY_XS_RESET) { + status = IXGBE_ERR_RESET_FAILED; +- hw_dbg(hw, "PHY reset polling failed to complete.\n"); ++ ERROR_REPORT1(IXGBE_ERROR_POLLING, ++ "PHY reset polling failed to complete.\n"); + } + + out: +@@ -243,52 +569,56 @@ out: + * @phy_data: Pointer to read data from PHY register + **/ + s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, +- u16 *phy_data) ++ u16 *phy_data) + { + u32 i, data, command; + + /* Setup and write the address cycle command */ + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | +- (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | ++ (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + +- /* Check every 10 usec to see if the address cycle completed. ++ /* ++ * Check every 10 usec to see if the address cycle completed. + * The MDI Command bit will clear when the operation is + * complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { +- udelay(10); ++ usec_delay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) +- break; ++ break; + } + + + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { +- hw_dbg(hw, "PHY address command did not complete.\n"); ++ ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY address command did not complete.\n"); ++ DEBUGOUT("PHY address command did not complete, returning IXGBE_ERR_PHY\n"); + return IXGBE_ERR_PHY; + } + +- /* Address cycle complete, setup and write the read ++ /* ++ * Address cycle complete, setup and write the read + * command + */ + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | +- (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | ++ (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_READ | IXGBE_MSCA_MDI_COMMAND)); + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + +- /* Check every 10 usec to see if the address cycle ++ /* ++ * Check every 10 usec to see if the address cycle + * completed. The MDI Command bit will clear when the + * operation is complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { +- udelay(10); ++ usec_delay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) +@@ -296,18 +626,20 @@ s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, + } + + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { +- hw_dbg(hw, "PHY read command didn't complete\n"); ++ ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY read command didn't complete\n"); ++ DEBUGOUT("PHY read command didn't complete, returning IXGBE_ERR_PHY\n"); + return IXGBE_ERR_PHY; + } + +- /* Read operation is complete. Get the data ++ /* ++ * Read operation is complete. Get the data + * from MSRWD + */ + data = IXGBE_READ_REG(hw, IXGBE_MSRWD); + data >>= IXGBE_MSRWD_READ_DATA_SHIFT; + *phy_data = (u16)(data); + +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +@@ -321,20 +653,16 @@ s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data) + { + s32 status; +- u16 gssr; ++ u32 gssr = hw->phy.phy_semaphore_mask; + +- if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) +- gssr = IXGBE_GSSR_PHY1_SM; +- else +- gssr = IXGBE_GSSR_PHY0_SM; ++ DEBUGFUNC("ixgbe_read_phy_reg_generic"); + +- if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) { +- status = ixgbe_read_phy_reg_mdi(hw, reg_addr, device_type, +- phy_data); +- hw->mac.ops.release_swfw_sync(hw, gssr); +- } else { +- status = IXGBE_ERR_SWFW_SYNC; +- } ++ if (hw->mac.ops.acquire_swfw_sync(hw, gssr)) ++ return IXGBE_ERR_SWFW_SYNC; ++ ++ status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data); ++ ++ hw->mac.ops.release_swfw_sync(hw, gssr); + + return status; + } +@@ -358,7 +686,7 @@ s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, + /* Setup and write the address cycle command */ + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | +- (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | ++ (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_ADDR_CYCLE | IXGBE_MSCA_MDI_COMMAND)); + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); +@@ -369,7 +697,7 @@ s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, + * complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { +- udelay(10); ++ usec_delay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) +@@ -377,7 +705,7 @@ s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, + } + + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { +- hw_dbg(hw, "PHY address cmd didn't complete\n"); ++ ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY address cmd didn't complete\n"); + return IXGBE_ERR_PHY; + } + +@@ -387,17 +715,18 @@ s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, + */ + command = ((reg_addr << IXGBE_MSCA_NP_ADDR_SHIFT) | + (device_type << IXGBE_MSCA_DEV_TYPE_SHIFT) | +- (hw->phy.mdio.prtad << IXGBE_MSCA_PHY_ADDR_SHIFT) | ++ (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | + (IXGBE_MSCA_WRITE | IXGBE_MSCA_MDI_COMMAND)); + + IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); + +- /* Check every 10 usec to see if the address cycle ++ /* ++ * Check every 10 usec to see if the address cycle + * completed. The MDI Command bit will clear when the + * operation is complete + */ + for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { +- udelay(10); ++ usec_delay(10); + + command = IXGBE_READ_REG(hw, IXGBE_MSCA); + if ((command & IXGBE_MSCA_MDI_COMMAND) == 0) +@@ -405,11 +734,11 @@ s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, + } + + if ((command & IXGBE_MSCA_MDI_COMMAND) != 0) { +- hw_dbg(hw, "PHY write cmd didn't complete\n"); ++ ERROR_REPORT1(IXGBE_ERROR_POLLING, "PHY write cmd didn't complete\n"); + return IXGBE_ERR_PHY; + } + +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +@@ -424,15 +753,12 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data) + { + s32 status; +- u16 gssr; ++ u32 gssr = hw->phy.phy_semaphore_mask; + +- if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) +- gssr = IXGBE_GSSR_PHY1_SM; +- else +- gssr = IXGBE_GSSR_PHY0_SM; ++ DEBUGFUNC("ixgbe_write_phy_reg_generic"); + +- if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == 0) { +- status = ixgbe_write_phy_reg_mdi(hw, reg_addr, device_type, ++ if (hw->mac.ops.acquire_swfw_sync(hw, gssr) == IXGBE_SUCCESS) { ++ status = hw->phy.ops.write_reg_mdi(hw, reg_addr, device_type, + phy_data); + hw->mac.ops.release_swfw_sync(hw, gssr); + } else { +@@ -443,101 +769,92 @@ s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, + } + + /** +- * ixgbe_setup_phy_link_generic - Set and restart autoneg ++ * ixgbe_setup_phy_link_generic - Set and restart auto-neg + * @hw: pointer to hardware structure + * +- * Restart autonegotiation and PHY and waits for completion. ++ * Restart auto-negotiation and PHY and waits for completion. + **/ + s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw) + { +- s32 status = 0; +- u32 time_out; +- u32 max_time_out = 10; ++ s32 status = IXGBE_SUCCESS; + u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; + bool autoneg = false; + ixgbe_link_speed speed; + +- ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg); +- +- if (speed & IXGBE_LINK_SPEED_10GB_FULL) { +- /* Set or unset auto-negotiation 10G advertisement */ +- hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, +- MDIO_MMD_AN, +- &autoneg_reg); ++ DEBUGFUNC("ixgbe_setup_phy_link_generic"); + +- autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G; +- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) +- autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G; ++ ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg); + +- hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL, +- MDIO_MMD_AN, +- autoneg_reg); ++ /* Set or unset auto-negotiation 10G advertisement */ ++ hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ &autoneg_reg); ++ ++ autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE; ++ if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) && ++ (speed & IXGBE_LINK_SPEED_10GB_FULL)) ++ autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE; ++ ++ hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ autoneg_reg); ++ ++ hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ &autoneg_reg); ++ ++ if (hw->mac.type == ixgbe_mac_X550) { ++ /* Set or unset auto-negotiation 5G advertisement */ ++ autoneg_reg &= ~IXGBE_MII_5GBASE_T_ADVERTISE; ++ if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_5GB_FULL) && ++ (speed & IXGBE_LINK_SPEED_5GB_FULL)) ++ autoneg_reg |= IXGBE_MII_5GBASE_T_ADVERTISE; ++ ++ /* Set or unset auto-negotiation 2.5G advertisement */ ++ autoneg_reg &= ~IXGBE_MII_2_5GBASE_T_ADVERTISE; ++ if ((hw->phy.autoneg_advertised & ++ IXGBE_LINK_SPEED_2_5GB_FULL) && ++ (speed & IXGBE_LINK_SPEED_2_5GB_FULL)) ++ autoneg_reg |= IXGBE_MII_2_5GBASE_T_ADVERTISE; + } + +- if (speed & IXGBE_LINK_SPEED_1GB_FULL) { +- /* Set or unset auto-negotiation 1G advertisement */ +- hw->phy.ops.read_reg(hw, +- IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, +- MDIO_MMD_AN, +- &autoneg_reg); ++ /* Set or unset auto-negotiation 1G advertisement */ ++ autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE; ++ if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) && ++ (speed & IXGBE_LINK_SPEED_1GB_FULL)) ++ autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE; + +- autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE; +- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_1GB_FULL) +- autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE; ++ hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ autoneg_reg); + +- hw->phy.ops.write_reg(hw, +- IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, +- MDIO_MMD_AN, +- autoneg_reg); +- } ++ /* Set or unset auto-negotiation 100M advertisement */ ++ hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ &autoneg_reg); + +- if (speed & IXGBE_LINK_SPEED_100_FULL) { +- /* Set or unset auto-negotiation 100M advertisement */ +- hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, +- MDIO_MMD_AN, +- &autoneg_reg); ++ autoneg_reg &= ~(IXGBE_MII_100BASE_T_ADVERTISE | ++ IXGBE_MII_100BASE_T_ADVERTISE_HALF); ++ if ((hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) && ++ (speed & IXGBE_LINK_SPEED_100_FULL)) ++ autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE; + +- autoneg_reg &= ~(ADVERTISE_100FULL | +- ADVERTISE_100HALF); +- if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) +- autoneg_reg |= ADVERTISE_100FULL; +- +- hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, +- MDIO_MMD_AN, +- autoneg_reg); +- } ++ hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ autoneg_reg); + + /* Blocked by MNG FW so don't reset PHY */ + if (ixgbe_check_reset_blocked(hw)) + return status; + +- /* Restart PHY autonegotiation and wait for completion */ +- hw->phy.ops.read_reg(hw, MDIO_CTRL1, +- MDIO_MMD_AN, &autoneg_reg); ++ /* Restart PHY auto-negotiation. */ ++ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); + +- autoneg_reg |= MDIO_AN_CTRL1_RESTART; ++ autoneg_reg |= IXGBE_MII_RESTART; + +- hw->phy.ops.write_reg(hw, MDIO_CTRL1, +- MDIO_MMD_AN, autoneg_reg); +- +- /* Wait for autonegotiation to finish */ +- for (time_out = 0; time_out < max_time_out; time_out++) { +- udelay(10); +- /* Restart PHY autonegotiation and wait for completion */ +- status = hw->phy.ops.read_reg(hw, MDIO_STAT1, +- MDIO_MMD_AN, +- &autoneg_reg); +- +- autoneg_reg &= MDIO_AN_STAT1_COMPLETE; +- if (autoneg_reg == MDIO_AN_STAT1_COMPLETE) { +- break; +- } +- } +- +- if (time_out == max_time_out) { +- status = IXGBE_ERR_LINK_SETUP; +- hw_dbg(hw, "ixgbe_setup_phy_link_generic: time out\n"); +- } ++ hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); + + return status; + } +@@ -551,6 +868,9 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, + ixgbe_link_speed speed, + bool autoneg_wait_to_complete) + { ++ UNREFERENCED_1PARAMETER(autoneg_wait_to_complete); ++ ++ DEBUGFUNC("ixgbe_setup_phy_link_speed_generic"); + + /* + * Clear autoneg_advertised and set new values based on input link +@@ -561,52 +881,91 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, + if (speed & IXGBE_LINK_SPEED_10GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL; + ++ if (speed & IXGBE_LINK_SPEED_5GB_FULL) ++ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_5GB_FULL; ++ ++ if (speed & IXGBE_LINK_SPEED_2_5GB_FULL) ++ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_2_5GB_FULL; ++ + if (speed & IXGBE_LINK_SPEED_1GB_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL; + + if (speed & IXGBE_LINK_SPEED_100_FULL) + hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL; + ++ if (speed & IXGBE_LINK_SPEED_10_FULL) ++ hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10_FULL; ++ + /* Setup link based on the new speed settings */ +- hw->phy.ops.setup_link(hw); ++ ixgbe_setup_phy_link(hw); + +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +- * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities ++ * ixgbe_get_copper_speeds_supported - Get copper link speeds from phy + * @hw: pointer to hardware structure +- * @speed: pointer to link speed +- * @autoneg: boolean auto-negotiation value + * +- * Determines the link capabilities by reading the AUTOC register. +- */ +-s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, +- ixgbe_link_speed *speed, +- bool *autoneg) ++ * Determines the supported link capabilities by reading the PHY auto ++ * negotiation register. ++ **/ ++static s32 ixgbe_get_copper_speeds_supported(struct ixgbe_hw *hw) + { +- s32 status = IXGBE_ERR_LINK_SETUP; ++ s32 status; + u16 speed_ability; + +- *speed = 0; +- *autoneg = true; +- +- status = hw->phy.ops.read_reg(hw, MDIO_SPEED, MDIO_MMD_PMAPMD, ++ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_SPEED_ABILITY, ++ IXGBE_MDIO_PMA_PMD_DEV_TYPE, + &speed_ability); ++ if (status) ++ return status; + +- if (status == 0) { +- if (speed_ability & MDIO_SPEED_10G) +- *speed |= IXGBE_LINK_SPEED_10GB_FULL; +- if (speed_ability & MDIO_PMA_SPEED_1000) +- *speed |= IXGBE_LINK_SPEED_1GB_FULL; +- if (speed_ability & MDIO_PMA_SPEED_100) +- *speed |= IXGBE_LINK_SPEED_100_FULL; ++ if (speed_ability & IXGBE_MDIO_PHY_SPEED_10G) ++ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_10GB_FULL; ++ if (speed_ability & IXGBE_MDIO_PHY_SPEED_1G) ++ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_1GB_FULL; ++ if (speed_ability & IXGBE_MDIO_PHY_SPEED_100M) ++ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_100_FULL; ++ ++ switch (hw->mac.type) { ++ case ixgbe_mac_X550: ++ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_2_5GB_FULL; ++ hw->phy.speeds_supported |= IXGBE_LINK_SPEED_5GB_FULL; ++ break; ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: ++ hw->phy.speeds_supported &= ~IXGBE_LINK_SPEED_100_FULL; ++ break; ++ default: ++ break; + } + + return status; + } + + /** ++ * ixgbe_get_copper_link_capabilities_generic - Determines link capabilities ++ * @hw: pointer to hardware structure ++ * @speed: pointer to link speed ++ * @autoneg: boolean auto-negotiation value ++ **/ ++s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, ++ ixgbe_link_speed *speed, ++ bool *autoneg) ++{ ++ s32 status = IXGBE_SUCCESS; ++ ++ DEBUGFUNC("ixgbe_get_copper_link_capabilities_generic"); ++ ++ *autoneg = true; ++ if (!hw->phy.speeds_supported) ++ status = ixgbe_get_copper_speeds_supported(hw); ++ ++ *speed = hw->phy.speeds_supported; ++ return status; ++} ++ ++/** + * ixgbe_check_phy_link_tnx - Determine link and speed status + * @hw: pointer to hardware structure + * +@@ -616,13 +975,15 @@ s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, + s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + bool *link_up) + { +- s32 status = 0; ++ s32 status = IXGBE_SUCCESS; + u32 time_out; + u32 max_time_out = 10; + u16 phy_link = 0; + u16 phy_speed = 0; + u16 phy_data = 0; + ++ DEBUGFUNC("ixgbe_check_phy_link_tnx"); ++ + /* Initialize speed and link to default case */ + *link_up = false; + *speed = IXGBE_LINK_SPEED_10GB_FULL; +@@ -633,15 +994,14 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + * be changed for other copper PHYs. + */ + for (time_out = 0; time_out < max_time_out; time_out++) { +- udelay(10); ++ usec_delay(10); + status = hw->phy.ops.read_reg(hw, +- MDIO_STAT1, +- MDIO_MMD_VEND1, +- &phy_data); +- phy_link = phy_data & +- IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS; ++ IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS, ++ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ++ &phy_data); ++ phy_link = phy_data & IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS; + phy_speed = phy_data & +- IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS; ++ IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS; + if (phy_link == IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS) { + *link_up = true; + if (phy_speed == +@@ -655,41 +1015,41 @@ s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, ixgbe_link_speed *speed, + } + + /** +- * ixgbe_setup_phy_link_tnx - Set and restart autoneg ++ * ixgbe_setup_phy_link_tnx - Set and restart auto-neg + * @hw: pointer to hardware structure + * +- * Restart autonegotiation and PHY and waits for completion. ++ * Restart auto-negotiation and PHY and waits for completion. + **/ + s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) + { +- s32 status = 0; +- u32 time_out; +- u32 max_time_out = 10; ++ s32 status = IXGBE_SUCCESS; + u16 autoneg_reg = IXGBE_MII_AUTONEG_REG; + bool autoneg = false; + ixgbe_link_speed speed; + ++ DEBUGFUNC("ixgbe_setup_phy_link_tnx"); ++ + ixgbe_get_copper_link_capabilities_generic(hw, &speed, &autoneg); + + if (speed & IXGBE_LINK_SPEED_10GB_FULL) { + /* Set or unset auto-negotiation 10G advertisement */ +- hw->phy.ops.read_reg(hw, MDIO_AN_10GBT_CTRL, +- MDIO_MMD_AN, ++ hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + +- autoneg_reg &= ~MDIO_AN_10GBT_CTRL_ADV10G; ++ autoneg_reg &= ~IXGBE_MII_10GBASE_T_ADVERTISE; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_10GB_FULL) +- autoneg_reg |= MDIO_AN_10GBT_CTRL_ADV10G; ++ autoneg_reg |= IXGBE_MII_10GBASE_T_ADVERTISE; + +- hw->phy.ops.write_reg(hw, MDIO_AN_10GBT_CTRL, +- MDIO_MMD_AN, ++ hw->phy.ops.write_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + autoneg_reg); + } + + if (speed & IXGBE_LINK_SPEED_1GB_FULL) { + /* Set or unset auto-negotiation 1G advertisement */ + hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG, +- MDIO_MMD_AN, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + + autoneg_reg &= ~IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX; +@@ -697,23 +1057,22 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) + autoneg_reg |= IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX; + + hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_XNP_TX_REG, +- MDIO_MMD_AN, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + autoneg_reg); + } + + if (speed & IXGBE_LINK_SPEED_100_FULL) { + /* Set or unset auto-negotiation 100M advertisement */ +- hw->phy.ops.read_reg(hw, MDIO_AN_ADVERTISE, +- MDIO_MMD_AN, ++ hw->phy.ops.read_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + &autoneg_reg); + +- autoneg_reg &= ~(ADVERTISE_100FULL | +- ADVERTISE_100HALF); ++ autoneg_reg &= ~IXGBE_MII_100BASE_T_ADVERTISE; + if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_100_FULL) +- autoneg_reg |= ADVERTISE_100FULL; ++ autoneg_reg |= IXGBE_MII_100BASE_T_ADVERTISE; + +- hw->phy.ops.write_reg(hw, MDIO_AN_ADVERTISE, +- MDIO_MMD_AN, ++ hw->phy.ops.write_reg(hw, IXGBE_MII_AUTONEG_ADVERTISE_REG, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, + autoneg_reg); + } + +@@ -721,32 +1080,14 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) + if (ixgbe_check_reset_blocked(hw)) + return status; + +- /* Restart PHY autonegotiation and wait for completion */ +- hw->phy.ops.read_reg(hw, MDIO_CTRL1, +- MDIO_MMD_AN, &autoneg_reg); +- +- autoneg_reg |= MDIO_AN_CTRL1_RESTART; ++ /* Restart PHY auto-negotiation. */ ++ hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, &autoneg_reg); + +- hw->phy.ops.write_reg(hw, MDIO_CTRL1, +- MDIO_MMD_AN, autoneg_reg); ++ autoneg_reg |= IXGBE_MII_RESTART; + +- /* Wait for autonegotiation to finish */ +- for (time_out = 0; time_out < max_time_out; time_out++) { +- udelay(10); +- /* Restart PHY autonegotiation and wait for completion */ +- status = hw->phy.ops.read_reg(hw, MDIO_STAT1, +- MDIO_MMD_AN, +- &autoneg_reg); +- +- autoneg_reg &= MDIO_AN_STAT1_COMPLETE; +- if (autoneg_reg == MDIO_AN_STAT1_COMPLETE) +- break; +- } +- +- if (time_out == max_time_out) { +- status = IXGBE_ERR_LINK_SETUP; +- hw_dbg(hw, "ixgbe_setup_phy_link_tnx: time out\n"); +- } ++ hw->phy.ops.write_reg(hw, IXGBE_MDIO_AUTO_NEG_CONTROL, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, autoneg_reg); + + return status; + } +@@ -759,10 +1100,12 @@ s32 ixgbe_setup_phy_link_tnx(struct ixgbe_hw *hw) + s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, + u16 *firmware_version) + { +- s32 status = 0; ++ s32 status; ++ ++ DEBUGFUNC("ixgbe_get_phy_firmware_version_tnx"); + + status = hw->phy.ops.read_reg(hw, TNX_FW_REV, +- MDIO_MMD_VEND1, ++ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + firmware_version); + + return status; +@@ -776,10 +1119,12 @@ s32 ixgbe_get_phy_firmware_version_tnx(struct ixgbe_hw *hw, + s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, + u16 *firmware_version) + { +- s32 status = 0; ++ s32 status; ++ ++ DEBUGFUNC("ixgbe_get_phy_firmware_version_generic"); + + status = hw->phy.ops.read_reg(hw, AQ_FW_REV, +- MDIO_MMD_VEND1, ++ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, + firmware_version); + + return status; +@@ -795,29 +1140,33 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw) + bool end_data = false; + u16 list_offset, data_offset; + u16 phy_data = 0; +- s32 ret_val = 0; ++ s32 ret_val = IXGBE_SUCCESS; + u32 i; + ++ DEBUGFUNC("ixgbe_reset_phy_nl"); ++ + /* Blocked by MNG FW so bail */ + if (ixgbe_check_reset_blocked(hw)) + goto out; + +- hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, &phy_data); ++ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, ++ IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data); + + /* reset the PHY and poll for completion */ +- hw->phy.ops.write_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, +- (phy_data | MDIO_CTRL1_RESET)); ++ hw->phy.ops.write_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, ++ IXGBE_MDIO_PHY_XS_DEV_TYPE, ++ (phy_data | IXGBE_MDIO_PHY_XS_RESET)); + + for (i = 0; i < 100; i++) { +- hw->phy.ops.read_reg(hw, MDIO_CTRL1, MDIO_MMD_PHYXS, +- &phy_data); +- if ((phy_data & MDIO_CTRL1_RESET) == 0) ++ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_XS_CONTROL, ++ IXGBE_MDIO_PHY_XS_DEV_TYPE, &phy_data); ++ if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) == 0) + break; +- usleep_range(10000, 20000); ++ msec_delay(10); + } + +- if ((phy_data & MDIO_CTRL1_RESET) != 0) { +- hw_dbg(hw, "PHY reset did not complete.\n"); ++ if ((phy_data & IXGBE_MDIO_PHY_XS_RESET) != 0) { ++ DEBUGOUT("PHY reset did not complete.\n"); + ret_val = IXGBE_ERR_PHY; + goto out; + } +@@ -825,7 +1174,7 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw) + /* Get init offsets */ + ret_val = ixgbe_get_sfp_init_sequence_offsets(hw, &list_offset, + &data_offset); +- if (ret_val != 0) ++ if (ret_val != IXGBE_SUCCESS) + goto out; + + ret_val = hw->eeprom.ops.read(hw, data_offset, &block_crc); +@@ -843,45 +1192,46 @@ s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw) + switch (control) { + case IXGBE_DELAY_NL: + data_offset++; +- hw_dbg(hw, "DELAY: %d MS\n", edata); +- usleep_range(edata * 1000, edata * 2000); ++ DEBUGOUT1("DELAY: %d MS\n", edata); ++ msec_delay(edata); + break; + case IXGBE_DATA_NL: +- hw_dbg(hw, "DATA:\n"); ++ DEBUGOUT("DATA:\n"); + data_offset++; +- ret_val = hw->eeprom.ops.read(hw, data_offset++, ++ ret_val = hw->eeprom.ops.read(hw, data_offset, + &phy_offset); + if (ret_val) + goto err_eeprom; ++ data_offset++; + for (i = 0; i < edata; i++) { + ret_val = hw->eeprom.ops.read(hw, data_offset, + &eword); + if (ret_val) + goto err_eeprom; + hw->phy.ops.write_reg(hw, phy_offset, +- MDIO_MMD_PMAPMD, eword); +- hw_dbg(hw, "Wrote %4.4x to %4.4x\n", eword, +- phy_offset); ++ IXGBE_TWINAX_DEV, eword); ++ DEBUGOUT2("Wrote %4.4x to %4.4x\n", eword, ++ phy_offset); + data_offset++; + phy_offset++; + } + break; + case IXGBE_CONTROL_NL: + data_offset++; +- hw_dbg(hw, "CONTROL:\n"); ++ DEBUGOUT("CONTROL:\n"); + if (edata == IXGBE_CONTROL_EOL_NL) { +- hw_dbg(hw, "EOL\n"); ++ DEBUGOUT("EOL\n"); + end_data = true; + } else if (edata == IXGBE_CONTROL_SOL_NL) { +- hw_dbg(hw, "SOL\n"); ++ DEBUGOUT("SOL\n"); + } else { +- hw_dbg(hw, "Bad control value\n"); ++ DEBUGOUT("Bad control value\n"); + ret_val = IXGBE_ERR_PHY; + goto out; + } + break; + default: +- hw_dbg(hw, "Bad control type\n"); ++ DEBUGOUT("Bad control type\n"); + ret_val = IXGBE_ERR_PHY; + goto out; + } +@@ -891,7 +1241,8 @@ out: + return ret_val; + + err_eeprom: +- hw_err(hw, "eeprom read at offset %d failed\n", data_offset); ++ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, ++ "eeprom read at offset %d failed", data_offset); + return IXGBE_ERR_PHY; + } + +@@ -905,13 +1256,17 @@ s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw) + { + s32 status = IXGBE_ERR_SFP_NOT_PRESENT; + ++ DEBUGFUNC("ixgbe_identify_module_generic"); ++ + switch (hw->mac.ops.get_media_type(hw)) { + case ixgbe_media_type_fiber: + status = ixgbe_identify_sfp_module_generic(hw); + break; ++ + case ixgbe_media_type_fiber_qsfp: + status = ixgbe_identify_qsfp_module_generic(hw); + break; ++ + default: + hw->phy.sfp_type = ixgbe_sfp_type_not_present; + status = IXGBE_ERR_SFP_NOT_PRESENT; +@@ -924,12 +1279,11 @@ s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw) + /** + * ixgbe_identify_sfp_module_generic - Identifies SFP modules + * @hw: pointer to hardware structure +-* ++ * + * Searches for and identifies the SFP module and assigns appropriate PHY type. + **/ + s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) + { +- struct ixgbe_adapter *adapter = hw->back; + s32 status = IXGBE_ERR_PHY_ADDR_INVALID; + u32 vendor_oui = 0; + enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type; +@@ -941,22 +1295,24 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) + u8 cable_spec = 0; + u16 enforce_sfp = 0; + ++ DEBUGFUNC("ixgbe_identify_sfp_module_generic"); ++ + if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber) { + hw->phy.sfp_type = ixgbe_sfp_type_not_present; + status = IXGBE_ERR_SFP_NOT_PRESENT; + goto out; + } + ++ /* LAN ID is needed for I2C access */ ++ hw->mac.ops.set_lan_id(hw); ++ + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_IDENTIFIER, + &identifier); + +- if (status != 0) ++ if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + +- /* LAN ID is needed for sfp_type determination */ +- hw->mac.ops.set_lan_id(hw); +- + if (identifier != IXGBE_SFF_IDENTIFIER_SFP) { + hw->phy.type = ixgbe_phy_sfp_unsupported; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; +@@ -965,20 +1321,20 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) + IXGBE_SFF_1GBE_COMP_CODES, + &comp_codes_1g); + +- if (status != 0) ++ if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_10GBE_COMP_CODES, + &comp_codes_10g); + +- if (status != 0) ++ if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_CABLE_TECHNOLOGY, + &cable_tech); + +- if (status != 0) ++ if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + /* ID Module +@@ -1006,7 +1362,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) + hw->phy.sfp_type = ixgbe_sfp_type_lr; + else + hw->phy.sfp_type = ixgbe_sfp_type_unknown; +- } else if (hw->mac.type == ixgbe_mac_82599EB) { ++ } else { + if (cable_tech & IXGBE_SFF_DA_PASSIVE_CABLE) { + if (hw->bus.lan_id == 0) + hw->phy.sfp_type = +@@ -1083,21 +1439,21 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) + IXGBE_SFF_VENDOR_OUI_BYTE0, + &oui_bytes[0]); + +- if (status != 0) ++ if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_VENDOR_OUI_BYTE1, + &oui_bytes[1]); + +- if (status != 0) ++ if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_VENDOR_OUI_BYTE2, + &oui_bytes[2]); + +- if (status != 0) ++ if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + vendor_oui = +@@ -1139,7 +1495,7 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) + /* Allow any DA cable vendor */ + if (cable_tech & (IXGBE_SFF_DA_PASSIVE_CABLE | + IXGBE_SFF_DA_ACTIVE_CABLE)) { +- status = 0; ++ status = IXGBE_SUCCESS; + goto out; + } + +@@ -1158,11 +1514,11 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) + + /* Anything else 82598-based is supported */ + if (hw->mac.type == ixgbe_mac_82598EB) { +- status = 0; ++ status = IXGBE_SUCCESS; + goto out; + } + +- hw->mac.ops.get_device_caps(hw, &enforce_sfp); ++ ixgbe_get_device_caps(hw, &enforce_sfp); + if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP) && + !(hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core0 || + hw->phy.sfp_type == ixgbe_sfp_type_1g_cu_core1 || +@@ -1172,21 +1528,20 @@ s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw) + hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1)) { + /* Make sure we're a supported PHY type */ + if (hw->phy.type == ixgbe_phy_sfp_intel) { +- status = 0; ++ status = IXGBE_SUCCESS; + } else { +- if (hw->allow_unsupported_sfp) { +- e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n"); +- status = 0; ++ if (hw->allow_unsupported_sfp == true) { ++ EWARN(hw, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n"); ++ status = IXGBE_SUCCESS; + } else { +- hw_dbg(hw, +- "SFP+ module not supported\n"); ++ DEBUGOUT("SFP+ module not supported\n"); + hw->phy.type = + ixgbe_phy_sfp_unsupported; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + } + } + } else { +- status = 0; ++ status = IXGBE_SUCCESS; + } + } + +@@ -1203,14 +1558,75 @@ err_read_i2c_eeprom: + } + + /** +- * ixgbe_identify_qsfp_module_generic - Identifies QSFP modules +- * @hw: pointer to hardware structure ++ * ixgbe_get_supported_phy_sfp_layer_generic - Returns physical layer type ++ * @hw: pointer to hardware structure + * +- * Searches for and identifies the QSFP module and assigns appropriate PHY type ++ * Determines physical layer capabilities of the current SFP. ++ */ ++u64 ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw) ++{ ++ u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; ++ u8 comp_codes_10g = 0; ++ u8 comp_codes_1g = 0; ++ ++ DEBUGFUNC("ixgbe_get_supported_phy_sfp_layer_generic"); ++ ++ hw->phy.ops.identify_sfp(hw); ++ if (hw->phy.sfp_type == ixgbe_sfp_type_not_present) ++ return physical_layer; ++ ++ switch (hw->phy.type) { ++ case ixgbe_phy_sfp_passive_tyco: ++ case ixgbe_phy_sfp_passive_unknown: ++ case ixgbe_phy_qsfp_passive_unknown: ++ physical_layer = IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU; ++ break; ++ case ixgbe_phy_sfp_ftl_active: ++ case ixgbe_phy_sfp_active_unknown: ++ case ixgbe_phy_qsfp_active_unknown: ++ physical_layer = IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA; ++ break; ++ case ixgbe_phy_sfp_avago: ++ case ixgbe_phy_sfp_ftl: ++ case ixgbe_phy_sfp_intel: ++ case ixgbe_phy_sfp_unknown: ++ hw->phy.ops.read_i2c_eeprom(hw, ++ IXGBE_SFF_1GBE_COMP_CODES, &comp_codes_1g); ++ hw->phy.ops.read_i2c_eeprom(hw, ++ IXGBE_SFF_10GBE_COMP_CODES, &comp_codes_10g); ++ if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) ++ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; ++ else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) ++ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; ++ else if (comp_codes_1g & IXGBE_SFF_1GBASET_CAPABLE) ++ physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_T; ++ else if (comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) ++ physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_SX; ++ break; ++ case ixgbe_phy_qsfp_intel: ++ case ixgbe_phy_qsfp_unknown: ++ hw->phy.ops.read_i2c_eeprom(hw, ++ IXGBE_SFF_QSFP_10GBE_COMP, &comp_codes_10g); ++ if (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE) ++ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_SR; ++ else if (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE) ++ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_LR; ++ break; ++ default: ++ break; ++ } ++ ++ return physical_layer; ++} ++ ++/** ++ * ixgbe_identify_qsfp_module_generic - Identifies QSFP modules ++ * @hw: pointer to hardware structure ++ * ++ * Searches for and identifies the QSFP module and assigns appropriate PHY type + **/ +-static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) ++s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) + { +- struct ixgbe_adapter *adapter = hw->back; + s32 status = IXGBE_ERR_PHY_ADDR_INVALID; + u32 vendor_oui = 0; + enum ixgbe_sfp_type stored_sfp_type = hw->phy.sfp_type; +@@ -1224,16 +1640,21 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) + u8 device_tech = 0; + bool active_cable = false; + ++ DEBUGFUNC("ixgbe_identify_qsfp_module_generic"); ++ + if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_fiber_qsfp) { + hw->phy.sfp_type = ixgbe_sfp_type_not_present; + status = IXGBE_ERR_SFP_NOT_PRESENT; + goto out; + } + ++ /* LAN ID is needed for I2C access */ ++ hw->mac.ops.set_lan_id(hw); ++ + status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_IDENTIFIER, + &identifier); + +- if (status != 0) ++ if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + if (identifier != IXGBE_SFF_IDENTIFIER_QSFP_PLUS) { +@@ -1244,19 +1665,16 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) + + hw->phy.id = identifier; + +- /* LAN ID is needed for sfp_type determination */ +- hw->mac.ops.set_lan_id(hw); +- + status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_10GBE_COMP, + &comp_codes_10g); + +- if (status != 0) ++ if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, IXGBE_SFF_QSFP_1GBE_COMP, + &comp_codes_1g); + +- if (status != 0) ++ if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + if (comp_codes_10g & IXGBE_SFF_QSFP_DA_PASSIVE_CABLE) { +@@ -1277,8 +1695,7 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) + + if (!active_cable) { + /* check for active DA cables that pre-date +- * SFF-8436 v3.6 +- */ ++ * SFF-8436 v3.6 */ + hw->phy.ops.read_i2c_eeprom(hw, + IXGBE_SFF_QSFP_CONNECTOR, + &connector); +@@ -1321,64 +1738,63 @@ static s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw) + /* Determine if the QSFP+ PHY is dual speed or not. */ + hw->phy.multispeed_fiber = false; + if (((comp_codes_1g & IXGBE_SFF_1GBASESX_CAPABLE) && +- (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) || +- ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) && +- (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE))) ++ (comp_codes_10g & IXGBE_SFF_10GBASESR_CAPABLE)) || ++ ((comp_codes_1g & IXGBE_SFF_1GBASELX_CAPABLE) && ++ (comp_codes_10g & IXGBE_SFF_10GBASELR_CAPABLE))) + hw->phy.multispeed_fiber = true; + + /* Determine PHY vendor for optical modules */ + if (comp_codes_10g & (IXGBE_SFF_10GBASESR_CAPABLE | +- IXGBE_SFF_10GBASELR_CAPABLE)) { ++ IXGBE_SFF_10GBASELR_CAPABLE)) { + status = hw->phy.ops.read_i2c_eeprom(hw, +- IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0, +- &oui_bytes[0]); ++ IXGBE_SFF_QSFP_VENDOR_OUI_BYTE0, ++ &oui_bytes[0]); + +- if (status != 0) ++ if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, +- IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1, +- &oui_bytes[1]); ++ IXGBE_SFF_QSFP_VENDOR_OUI_BYTE1, ++ &oui_bytes[1]); + +- if (status != 0) ++ if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + status = hw->phy.ops.read_i2c_eeprom(hw, +- IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2, +- &oui_bytes[2]); ++ IXGBE_SFF_QSFP_VENDOR_OUI_BYTE2, ++ &oui_bytes[2]); + +- if (status != 0) ++ if (status != IXGBE_SUCCESS) + goto err_read_i2c_eeprom; + + vendor_oui = +- ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) | +- (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) | +- (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT)); ++ ((oui_bytes[0] << IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT) | ++ (oui_bytes[1] << IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT) | ++ (oui_bytes[2] << IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT)); + + if (vendor_oui == IXGBE_SFF_VENDOR_OUI_INTEL) + hw->phy.type = ixgbe_phy_qsfp_intel; + else + hw->phy.type = ixgbe_phy_qsfp_unknown; + +- hw->mac.ops.get_device_caps(hw, &enforce_sfp); ++ ixgbe_get_device_caps(hw, &enforce_sfp); + if (!(enforce_sfp & IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP)) { + /* Make sure we're a supported PHY type */ + if (hw->phy.type == ixgbe_phy_qsfp_intel) { +- status = 0; ++ status = IXGBE_SUCCESS; + } else { + if (hw->allow_unsupported_sfp == true) { +- e_warn(drv, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n"); +- status = 0; ++ EWARN(hw, "WARNING: Intel (R) Network Connections are quality tested using Intel (R) Ethernet Optics. Using untested modules is not supported and may cause unstable operation or damage to the module or the adapter. Intel Corporation is not responsible for any harm caused by using untested modules.\n"); ++ status = IXGBE_SUCCESS; + } else { +- hw_dbg(hw, +- "QSFP module not supported\n"); ++ DEBUGOUT("QSFP module not supported\n"); + hw->phy.type = + ixgbe_phy_sfp_unsupported; + status = IXGBE_ERR_SFP_NOT_SUPPORTED; + } + } + } else { +- status = 0; ++ status = IXGBE_SUCCESS; + } + } + +@@ -1409,6 +1825,8 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, + u16 sfp_id; + u16 sfp_type = hw->phy.sfp_type; + ++ DEBUGFUNC("ixgbe_get_sfp_init_sequence_offsets"); ++ + if (hw->phy.sfp_type == ixgbe_sfp_type_unknown) + return IXGBE_ERR_SFP_NOT_SUPPORTED; + +@@ -1436,8 +1854,9 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, + + /* Read offset to PHY init contents */ + if (hw->eeprom.ops.read(hw, IXGBE_PHY_INIT_OFFSET_NL, list_offset)) { +- hw_err(hw, "eeprom read at %d failed\n", +- IXGBE_PHY_INIT_OFFSET_NL); ++ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, ++ "eeprom read at offset %d failed", ++ IXGBE_PHY_INIT_OFFSET_NL); + return IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT; + } + +@@ -1460,7 +1879,7 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, + if (hw->eeprom.ops.read(hw, *list_offset, data_offset)) + goto err_phy; + if ((!*data_offset) || (*data_offset == 0xFFFF)) { +- hw_dbg(hw, "SFP+ module not supported\n"); ++ DEBUGOUT("SFP+ module not supported\n"); + return IXGBE_ERR_SFP_NOT_SUPPORTED; + } else { + break; +@@ -1473,14 +1892,15 @@ s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, + } + + if (sfp_id == IXGBE_PHY_INIT_END_NL) { +- hw_dbg(hw, "No matching SFP+ module found\n"); ++ DEBUGOUT("No matching SFP+ module found\n"); + return IXGBE_ERR_SFP_NOT_SUPPORTED; + } + +- return 0; ++ return IXGBE_SUCCESS; + + err_phy: +- hw_err(hw, "eeprom read at offset %d failed\n", *list_offset); ++ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, ++ "eeprom read at offset %d failed", *list_offset); + return IXGBE_ERR_PHY; + } + +@@ -1495,6 +1915,8 @@ err_phy: + s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 *eeprom_data) + { ++ DEBUGFUNC("ixgbe_read_i2c_eeprom_generic"); ++ + return hw->phy.ops.read_i2c_byte(hw, byte_offset, + IXGBE_I2C_EEPROM_DEV_ADDR, + eeprom_data); +@@ -1508,8 +1930,8 @@ s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, + * + * Performs byte read operation to SFP module's SFF-8472 data over I2C + **/ +-s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, +- u8 *sff8472_data) ++STATIC s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, ++ u8 *sff8472_data) + { + return hw->phy.ops.read_i2c_byte(hw, byte_offset, + IXGBE_I2C_EEPROM_DEV_ADDR2, +@@ -1527,198 +1949,285 @@ s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, + s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 eeprom_data) + { ++ DEBUGFUNC("ixgbe_write_i2c_eeprom_generic"); ++ + return hw->phy.ops.write_i2c_byte(hw, byte_offset, + IXGBE_I2C_EEPROM_DEV_ADDR, + eeprom_data); + } + + /** +- * ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C ++ * ixgbe_is_sfp_probe - Returns true if SFP is being detected ++ * @hw: pointer to hardware structure ++ * @offset: eeprom offset to be read ++ * @addr: I2C address to be read ++ */ ++STATIC bool ixgbe_is_sfp_probe(struct ixgbe_hw *hw, u8 offset, u8 addr) ++{ ++ if (addr == IXGBE_I2C_EEPROM_DEV_ADDR && ++ offset == IXGBE_SFF_IDENTIFIER && ++ hw->phy.sfp_type == ixgbe_sfp_type_not_present) ++ return true; ++ return false; ++} ++ ++/** ++ * ixgbe_read_i2c_byte_generic_int - Reads 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to read + * @data: value read ++ * @lock: true if to take and release semaphore + * + * Performs byte read operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +-s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, +- u8 dev_addr, u8 *data) ++STATIC s32 ixgbe_read_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, ++ u8 dev_addr, u8 *data, bool lock) + { +- s32 status = 0; ++ s32 status; + u32 max_retry = 10; + u32 retry = 0; +- u16 swfw_mask = 0; +- bool nack = true; ++ u32 swfw_mask = hw->phy.phy_semaphore_mask; ++ bool nack = 1; + *data = 0; + +- if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) +- swfw_mask = IXGBE_GSSR_PHY1_SM; +- else +- swfw_mask = IXGBE_GSSR_PHY0_SM; ++ DEBUGFUNC("ixgbe_read_i2c_byte_generic"); ++ ++ if (hw->mac.type >= ixgbe_mac_X550) ++ max_retry = 3; ++ if (ixgbe_is_sfp_probe(hw, byte_offset, dev_addr)) ++ max_retry = IXGBE_SFP_DETECT_RETRIES; + + do { +- if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != 0) { +- status = IXGBE_ERR_SWFW_SYNC; +- goto read_byte_out; +- } ++ if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask)) ++ return IXGBE_ERR_SWFW_SYNC; + + ixgbe_i2c_start(hw); + + /* Device Address and write indication */ + status = ixgbe_clock_out_i2c_byte(hw, dev_addr); +- if (status != 0) ++ if (status != IXGBE_SUCCESS) + goto fail; + + status = ixgbe_get_i2c_ack(hw); +- if (status != 0) ++ if (status != IXGBE_SUCCESS) + goto fail; + + status = ixgbe_clock_out_i2c_byte(hw, byte_offset); +- if (status != 0) ++ if (status != IXGBE_SUCCESS) + goto fail; + + status = ixgbe_get_i2c_ack(hw); +- if (status != 0) ++ if (status != IXGBE_SUCCESS) + goto fail; + + ixgbe_i2c_start(hw); + + /* Device Address and read indication */ + status = ixgbe_clock_out_i2c_byte(hw, (dev_addr | 0x1)); +- if (status != 0) ++ if (status != IXGBE_SUCCESS) + goto fail; + + status = ixgbe_get_i2c_ack(hw); +- if (status != 0) ++ if (status != IXGBE_SUCCESS) + goto fail; + + status = ixgbe_clock_in_i2c_byte(hw, data); +- if (status != 0) ++ if (status != IXGBE_SUCCESS) + goto fail; + + status = ixgbe_clock_out_i2c_bit(hw, nack); +- if (status != 0) ++ if (status != IXGBE_SUCCESS) + goto fail; + + ixgbe_i2c_stop(hw); +- break; ++ if (lock) ++ hw->mac.ops.release_swfw_sync(hw, swfw_mask); ++ return IXGBE_SUCCESS; + + fail: + ixgbe_i2c_bus_clear(hw); +- hw->mac.ops.release_swfw_sync(hw, swfw_mask); +- msleep(100); ++ if (lock) { ++ hw->mac.ops.release_swfw_sync(hw, swfw_mask); ++ msec_delay(100); ++ } + retry++; + if (retry < max_retry) +- hw_dbg(hw, "I2C byte read error - Retrying.\n"); ++ DEBUGOUT("I2C byte read error - Retrying.\n"); + else +- hw_dbg(hw, "I2C byte read error.\n"); ++ DEBUGOUT("I2C byte read error.\n"); + + } while (retry < max_retry); + +- hw->mac.ops.release_swfw_sync(hw, swfw_mask); +- +-read_byte_out: + return status; + } + + /** +- * ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C ++ * ixgbe_read_i2c_byte_generic - Reads 8 bit word over I2C ++ * @hw: pointer to hardware structure ++ * @byte_offset: byte offset to read ++ * @data: value read ++ * ++ * Performs byte read operation to SFP module's EEPROM over I2C interface at ++ * a specified device address. ++ **/ ++s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, ++ u8 dev_addr, u8 *data) ++{ ++ return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr, ++ data, true); ++} ++ ++/** ++ * ixgbe_read_i2c_byte_generic_unlocked - Reads 8 bit word over I2C ++ * @hw: pointer to hardware structure ++ * @byte_offset: byte offset to read ++ * @data: value read ++ * ++ * Performs byte read operation to SFP module's EEPROM over I2C interface at ++ * a specified device address. ++ **/ ++s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, ++ u8 dev_addr, u8 *data) ++{ ++ return ixgbe_read_i2c_byte_generic_int(hw, byte_offset, dev_addr, ++ data, false); ++} ++ ++/** ++ * ixgbe_write_i2c_byte_generic_int - Writes 8 bit word over I2C + * @hw: pointer to hardware structure + * @byte_offset: byte offset to write + * @data: value to write ++ * @lock: true if to take and release semaphore + * + * Performs byte write operation to SFP module's EEPROM over I2C interface at + * a specified device address. + **/ +-s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, +- u8 dev_addr, u8 data) ++STATIC s32 ixgbe_write_i2c_byte_generic_int(struct ixgbe_hw *hw, u8 byte_offset, ++ u8 dev_addr, u8 data, bool lock) + { +- s32 status = 0; ++ s32 status; + u32 max_retry = 1; + u32 retry = 0; +- u16 swfw_mask = 0; ++ u32 swfw_mask = hw->phy.phy_semaphore_mask; + +- if (IXGBE_READ_REG(hw, IXGBE_STATUS) & IXGBE_STATUS_LAN_ID_1) +- swfw_mask = IXGBE_GSSR_PHY1_SM; +- else +- swfw_mask = IXGBE_GSSR_PHY0_SM; ++ DEBUGFUNC("ixgbe_write_i2c_byte_generic"); + +- if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != 0) { +- status = IXGBE_ERR_SWFW_SYNC; +- goto write_byte_out; +- } ++ if (lock && hw->mac.ops.acquire_swfw_sync(hw, swfw_mask) != ++ IXGBE_SUCCESS) ++ return IXGBE_ERR_SWFW_SYNC; + + do { + ixgbe_i2c_start(hw); + + status = ixgbe_clock_out_i2c_byte(hw, dev_addr); +- if (status != 0) ++ if (status != IXGBE_SUCCESS) + goto fail; + + status = ixgbe_get_i2c_ack(hw); +- if (status != 0) ++ if (status != IXGBE_SUCCESS) + goto fail; + + status = ixgbe_clock_out_i2c_byte(hw, byte_offset); +- if (status != 0) ++ if (status != IXGBE_SUCCESS) + goto fail; + + status = ixgbe_get_i2c_ack(hw); +- if (status != 0) ++ if (status != IXGBE_SUCCESS) + goto fail; + + status = ixgbe_clock_out_i2c_byte(hw, data); +- if (status != 0) ++ if (status != IXGBE_SUCCESS) + goto fail; + + status = ixgbe_get_i2c_ack(hw); +- if (status != 0) ++ if (status != IXGBE_SUCCESS) + goto fail; + + ixgbe_i2c_stop(hw); +- break; ++ if (lock) ++ hw->mac.ops.release_swfw_sync(hw, swfw_mask); ++ return IXGBE_SUCCESS; + + fail: + ixgbe_i2c_bus_clear(hw); + retry++; + if (retry < max_retry) +- hw_dbg(hw, "I2C byte write error - Retrying.\n"); ++ DEBUGOUT("I2C byte write error - Retrying.\n"); + else +- hw_dbg(hw, "I2C byte write error.\n"); ++ DEBUGOUT("I2C byte write error.\n"); + } while (retry < max_retry); + +- hw->mac.ops.release_swfw_sync(hw, swfw_mask); ++ if (lock) ++ hw->mac.ops.release_swfw_sync(hw, swfw_mask); + +-write_byte_out: + return status; + } + + /** ++ * ixgbe_write_i2c_byte_generic - Writes 8 bit word over I2C ++ * @hw: pointer to hardware structure ++ * @byte_offset: byte offset to write ++ * @data: value to write ++ * ++ * Performs byte write operation to SFP module's EEPROM over I2C interface at ++ * a specified device address. ++ **/ ++s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, ++ u8 dev_addr, u8 data) ++{ ++ return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr, ++ data, true); ++} ++ ++/** ++ * ixgbe_write_i2c_byte_generic_unlocked - Writes 8 bit word over I2C ++ * @hw: pointer to hardware structure ++ * @byte_offset: byte offset to write ++ * @data: value to write ++ * ++ * Performs byte write operation to SFP module's EEPROM over I2C interface at ++ * a specified device address. ++ **/ ++s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, ++ u8 dev_addr, u8 data) ++{ ++ return ixgbe_write_i2c_byte_generic_int(hw, byte_offset, dev_addr, ++ data, false); ++} ++ ++/** + * ixgbe_i2c_start - Sets I2C start condition + * @hw: pointer to hardware structure + * + * Sets I2C start condition (High -> Low on SDA while SCL is High) ++ * Set bit-bang mode on X550 hardware. + **/ +-static void ixgbe_i2c_start(struct ixgbe_hw *hw) ++STATIC void ixgbe_i2c_start(struct ixgbe_hw *hw) + { +- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); ++ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); ++ ++ DEBUGFUNC("ixgbe_i2c_start"); ++ ++ i2cctl |= IXGBE_I2C_BB_EN_BY_MAC(hw); + + /* Start condition must begin with data and clock high */ + ixgbe_set_i2c_data(hw, &i2cctl, 1); + ixgbe_raise_i2c_clk(hw, &i2cctl); + + /* Setup time for start condition (4.7us) */ +- udelay(IXGBE_I2C_T_SU_STA); ++ usec_delay(IXGBE_I2C_T_SU_STA); + + ixgbe_set_i2c_data(hw, &i2cctl, 0); + + /* Hold time for start condition (4us) */ +- udelay(IXGBE_I2C_T_HD_STA); ++ usec_delay(IXGBE_I2C_T_HD_STA); + + ixgbe_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ +- udelay(IXGBE_I2C_T_LOW); ++ usec_delay(IXGBE_I2C_T_LOW); + + } + +@@ -1727,22 +2236,36 @@ static void ixgbe_i2c_start(struct ixgbe_hw *hw) + * @hw: pointer to hardware structure + * + * Sets I2C stop condition (Low -> High on SDA while SCL is High) ++ * Disables bit-bang mode and negates data output enable on X550 ++ * hardware. + **/ +-static void ixgbe_i2c_stop(struct ixgbe_hw *hw) ++STATIC void ixgbe_i2c_stop(struct ixgbe_hw *hw) + { +- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); ++ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); ++ u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); ++ u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw); ++ u32 bb_en_bit = IXGBE_I2C_BB_EN_BY_MAC(hw); ++ ++ DEBUGFUNC("ixgbe_i2c_stop"); + + /* Stop condition must begin with data low and clock high */ + ixgbe_set_i2c_data(hw, &i2cctl, 0); + ixgbe_raise_i2c_clk(hw, &i2cctl); + + /* Setup time for stop condition (4us) */ +- udelay(IXGBE_I2C_T_SU_STO); ++ usec_delay(IXGBE_I2C_T_SU_STO); + + ixgbe_set_i2c_data(hw, &i2cctl, 1); + + /* bus free time between stop and start (4.7us)*/ +- udelay(IXGBE_I2C_T_BUF); ++ usec_delay(IXGBE_I2C_T_BUF); ++ ++ if (bb_en_bit || data_oe_bit || clk_oe_bit) { ++ i2cctl &= ~bb_en_bit; ++ i2cctl |= data_oe_bit | clk_oe_bit; ++ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl); ++ IXGBE_WRITE_FLUSH(hw); ++ } + } + + /** +@@ -1752,17 +2275,20 @@ static void ixgbe_i2c_stop(struct ixgbe_hw *hw) + * + * Clocks in one byte data via I2C data/clock + **/ +-static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data) ++STATIC s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data) + { + s32 i; +- bool bit = false; ++ bool bit = 0; ++ ++ DEBUGFUNC("ixgbe_clock_in_i2c_byte"); + ++ *data = 0; + for (i = 7; i >= 0; i--) { + ixgbe_clock_in_i2c_bit(hw, &bit); + *data |= bit << i; + } + +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +@@ -1772,25 +2298,28 @@ static s32 ixgbe_clock_in_i2c_byte(struct ixgbe_hw *hw, u8 *data) + * + * Clocks out one byte data via I2C data/clock + **/ +-static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data) ++STATIC s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data) + { +- s32 status = 0; ++ s32 status = IXGBE_SUCCESS; + s32 i; + u32 i2cctl; +- bool bit = false; ++ bool bit; ++ ++ DEBUGFUNC("ixgbe_clock_out_i2c_byte"); + + for (i = 7; i >= 0; i--) { + bit = (data >> i) & 0x1; + status = ixgbe_clock_out_i2c_bit(hw, bit); + +- if (status != 0) ++ if (status != IXGBE_SUCCESS) + break; + } + + /* Release SDA line (set high) */ +- i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); +- i2cctl |= IXGBE_I2C_DATA_OUT; +- IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, i2cctl); ++ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); ++ i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw); ++ i2cctl |= IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); ++ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl); + IXGBE_WRITE_FLUSH(hw); + + return status; +@@ -1802,40 +2331,48 @@ static s32 ixgbe_clock_out_i2c_byte(struct ixgbe_hw *hw, u8 data) + * + * Clocks in/out one bit via I2C data/clock + **/ +-static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw) ++STATIC s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw) + { +- s32 status = 0; ++ u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); ++ s32 status = IXGBE_SUCCESS; + u32 i = 0; +- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); ++ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + u32 timeout = 10; +- bool ack = true; ++ bool ack = 1; + +- ixgbe_raise_i2c_clk(hw, &i2cctl); ++ DEBUGFUNC("ixgbe_get_i2c_ack"); + ++ if (data_oe_bit) { ++ i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw); ++ i2cctl |= data_oe_bit; ++ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl); ++ IXGBE_WRITE_FLUSH(hw); ++ } ++ ixgbe_raise_i2c_clk(hw, &i2cctl); + + /* Minimum high period of clock is 4us */ +- udelay(IXGBE_I2C_T_HIGH); ++ usec_delay(IXGBE_I2C_T_HIGH); + + /* Poll for ACK. Note that ACK in I2C spec is + * transition from 1 to 0 */ + for (i = 0; i < timeout; i++) { +- i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); +- ack = ixgbe_get_i2c_data(&i2cctl); ++ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); ++ ack = ixgbe_get_i2c_data(hw, &i2cctl); + +- udelay(1); +- if (ack == 0) ++ usec_delay(1); ++ if (!ack) + break; + } + +- if (ack == 1) { +- hw_dbg(hw, "I2C ack was not received.\n"); ++ if (ack) { ++ DEBUGOUT("I2C ack was not received.\n"); + status = IXGBE_ERR_I2C; + } + + ixgbe_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ +- udelay(IXGBE_I2C_T_LOW); ++ usec_delay(IXGBE_I2C_T_LOW); + + return status; + } +@@ -1847,24 +2384,33 @@ static s32 ixgbe_get_i2c_ack(struct ixgbe_hw *hw) + * + * Clocks in one bit via I2C data/clock + **/ +-static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data) ++STATIC s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data) + { +- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); ++ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); ++ u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); ++ ++ DEBUGFUNC("ixgbe_clock_in_i2c_bit"); + ++ if (data_oe_bit) { ++ i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw); ++ i2cctl |= data_oe_bit; ++ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), i2cctl); ++ IXGBE_WRITE_FLUSH(hw); ++ } + ixgbe_raise_i2c_clk(hw, &i2cctl); + + /* Minimum high period of clock is 4us */ +- udelay(IXGBE_I2C_T_HIGH); ++ usec_delay(IXGBE_I2C_T_HIGH); + +- i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); +- *data = ixgbe_get_i2c_data(&i2cctl); ++ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); ++ *data = ixgbe_get_i2c_data(hw, &i2cctl); + + ixgbe_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us */ +- udelay(IXGBE_I2C_T_LOW); ++ usec_delay(IXGBE_I2C_T_LOW); + +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +@@ -1874,53 +2420,67 @@ static s32 ixgbe_clock_in_i2c_bit(struct ixgbe_hw *hw, bool *data) + * + * Clocks out one bit via I2C data/clock + **/ +-static s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data) ++STATIC s32 ixgbe_clock_out_i2c_bit(struct ixgbe_hw *hw, bool data) + { + s32 status; +- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); ++ u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); ++ ++ DEBUGFUNC("ixgbe_clock_out_i2c_bit"); + + status = ixgbe_set_i2c_data(hw, &i2cctl, data); +- if (status == 0) { ++ if (status == IXGBE_SUCCESS) { + ixgbe_raise_i2c_clk(hw, &i2cctl); + + /* Minimum high period of clock is 4us */ +- udelay(IXGBE_I2C_T_HIGH); ++ usec_delay(IXGBE_I2C_T_HIGH); + + ixgbe_lower_i2c_clk(hw, &i2cctl); + + /* Minimum low period of clock is 4.7 us. + * This also takes care of the data hold time. + */ +- udelay(IXGBE_I2C_T_LOW); ++ usec_delay(IXGBE_I2C_T_LOW); + } else { + status = IXGBE_ERR_I2C; +- hw_dbg(hw, "I2C data was not set to %X\n", data); ++ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, ++ "I2C data was not set to %X\n", data); + } + + return status; + } ++ + /** + * ixgbe_raise_i2c_clk - Raises the I2C SCL clock + * @hw: pointer to hardware structure + * @i2cctl: Current value of I2CCTL register + * + * Raises the I2C clock line '0'->'1' ++ * Negates the I2C clock output enable on X550 hardware. + **/ +-static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) ++STATIC void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) + { ++ u32 clk_oe_bit = IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw); + u32 i = 0; + u32 timeout = IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT; + u32 i2cctl_r = 0; + ++ DEBUGFUNC("ixgbe_raise_i2c_clk"); ++ ++ if (clk_oe_bit) { ++ *i2cctl |= clk_oe_bit; ++ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); ++ } ++ + for (i = 0; i < timeout; i++) { +- *i2cctl |= IXGBE_I2C_CLK_OUT; +- IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); ++ *i2cctl |= IXGBE_I2C_CLK_OUT_BY_MAC(hw); ++ ++ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); + IXGBE_WRITE_FLUSH(hw); + /* SCL rise time (1000ns) */ +- udelay(IXGBE_I2C_T_RISE); ++ usec_delay(IXGBE_I2C_T_RISE); + +- i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL); +- if (i2cctl_r & IXGBE_I2C_CLK_IN) ++ i2cctl_r = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); ++ if (i2cctl_r & IXGBE_I2C_CLK_IN_BY_MAC(hw)) + break; + } + } +@@ -1931,17 +2491,20 @@ static void ixgbe_raise_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) + * @i2cctl: Current value of I2CCTL register + * + * Lowers the I2C clock line '1'->'0' ++ * Asserts the I2C clock output enable on X550 hardware. + **/ +-static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) ++STATIC void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) + { ++ DEBUGFUNC("ixgbe_lower_i2c_clk"); + +- *i2cctl &= ~IXGBE_I2C_CLK_OUT; ++ *i2cctl &= ~(IXGBE_I2C_CLK_OUT_BY_MAC(hw)); ++ *i2cctl &= ~IXGBE_I2C_CLK_OE_N_EN_BY_MAC(hw); + +- IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); ++ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); + IXGBE_WRITE_FLUSH(hw); + + /* SCL fall time (300ns) */ +- udelay(IXGBE_I2C_T_FALL); ++ usec_delay(IXGBE_I2C_T_FALL); + } + + /** +@@ -1951,27 +2514,42 @@ static void ixgbe_lower_i2c_clk(struct ixgbe_hw *hw, u32 *i2cctl) + * @data: I2C data value (0 or 1) to set + * + * Sets the I2C data bit ++ * Asserts the I2C data output enable on X550 hardware. + **/ +-static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data) ++STATIC s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data) + { +- s32 status = 0; ++ u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); ++ s32 status = IXGBE_SUCCESS; ++ ++ DEBUGFUNC("ixgbe_set_i2c_data"); + + if (data) +- *i2cctl |= IXGBE_I2C_DATA_OUT; ++ *i2cctl |= IXGBE_I2C_DATA_OUT_BY_MAC(hw); + else +- *i2cctl &= ~IXGBE_I2C_DATA_OUT; ++ *i2cctl &= ~(IXGBE_I2C_DATA_OUT_BY_MAC(hw)); ++ *i2cctl &= ~data_oe_bit; + +- IXGBE_WRITE_REG(hw, IXGBE_I2CCTL, *i2cctl); ++ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); + IXGBE_WRITE_FLUSH(hw); + + /* Data rise/fall (1000ns/300ns) and set-up time (250ns) */ +- udelay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA); ++ usec_delay(IXGBE_I2C_T_RISE + IXGBE_I2C_T_FALL + IXGBE_I2C_T_SU_DATA); ++ ++ if (!data) /* Can't verify data in this case */ ++ return IXGBE_SUCCESS; ++ if (data_oe_bit) { ++ *i2cctl |= data_oe_bit; ++ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); ++ IXGBE_WRITE_FLUSH(hw); ++ } + + /* Verify data was set correctly */ +- *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); +- if (data != ixgbe_get_i2c_data(i2cctl)) { ++ *i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); ++ if (data != ixgbe_get_i2c_data(hw, i2cctl)) { + status = IXGBE_ERR_I2C; +- hw_dbg(hw, "Error - I2C data was not set to %X.\n", data); ++ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, ++ "Error - I2C data was not set to %X.\n", ++ data); + } + + return status; +@@ -1983,15 +2561,26 @@ static s32 ixgbe_set_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl, bool data) + * @i2cctl: Current value of I2CCTL register + * + * Returns the I2C data bit value ++ * Negates the I2C data output enable on X550 hardware. + **/ +-static bool ixgbe_get_i2c_data(u32 *i2cctl) ++STATIC bool ixgbe_get_i2c_data(struct ixgbe_hw *hw, u32 *i2cctl) + { ++ u32 data_oe_bit = IXGBE_I2C_DATA_OE_N_EN_BY_MAC(hw); + bool data; + +- if (*i2cctl & IXGBE_I2C_DATA_IN) +- data = true; ++ DEBUGFUNC("ixgbe_get_i2c_data"); ++ ++ if (data_oe_bit) { ++ *i2cctl |= data_oe_bit; ++ IXGBE_WRITE_REG(hw, IXGBE_I2CCTL_BY_MAC(hw), *i2cctl); ++ IXGBE_WRITE_FLUSH(hw); ++ usec_delay(IXGBE_I2C_T_FALL); ++ } ++ ++ if (*i2cctl & IXGBE_I2C_DATA_IN_BY_MAC(hw)) ++ data = 1; + else +- data = false; ++ data = 0; + + return data; + } +@@ -2003,12 +2592,15 @@ static bool ixgbe_get_i2c_data(u32 *i2cctl) + * Clears the I2C bus by sending nine clock pulses. + * Used when data line is stuck low. + **/ +-static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw) ++void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw) + { +- u32 i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL); ++ u32 i2cctl; + u32 i; + ++ DEBUGFUNC("ixgbe_i2c_bus_clear"); ++ + ixgbe_i2c_start(hw); ++ i2cctl = IXGBE_READ_REG(hw, IXGBE_I2CCTL_BY_MAC(hw)); + + ixgbe_set_i2c_data(hw, &i2cctl, 1); + +@@ -2016,12 +2608,12 @@ static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw) + ixgbe_raise_i2c_clk(hw, &i2cctl); + + /* Min high period of clock is 4us */ +- udelay(IXGBE_I2C_T_HIGH); ++ usec_delay(IXGBE_I2C_T_HIGH); + + ixgbe_lower_i2c_clk(hw, &i2cctl); + + /* Min low period of clock is 4.7us*/ +- udelay(IXGBE_I2C_T_LOW); ++ usec_delay(IXGBE_I2C_T_LOW); + } + + ixgbe_i2c_start(hw); +@@ -2038,20 +2630,56 @@ static void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw) + **/ + s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw) + { +- s32 status = 0; ++ s32 status = IXGBE_SUCCESS; + u16 phy_data = 0; + ++ DEBUGFUNC("ixgbe_tn_check_overtemp"); ++ + if (hw->device_id != IXGBE_DEV_ID_82599_T3_LOM) + goto out; + + /* Check that the LASI temp alarm status was triggered */ + hw->phy.ops.read_reg(hw, IXGBE_TN_LASI_STATUS_REG, +- MDIO_MMD_PMAPMD, &phy_data); ++ IXGBE_MDIO_PMA_PMD_DEV_TYPE, &phy_data); + + if (!(phy_data & IXGBE_TN_LASI_STATUS_TEMP_ALARM)) + goto out; + + status = IXGBE_ERR_OVERTEMP; ++ ERROR_REPORT1(IXGBE_ERROR_CAUTION, "Device over temperature"); + out: + return status; + } ++ ++/** ++ * ixgbe_set_copper_phy_power - Control power for copper phy ++ * @hw: pointer to hardware structure ++ * @on: true for on, false for off ++ */ ++s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on) ++{ ++ u32 status; ++ u16 reg; ++ ++ if (!on && ixgbe_mng_present(hw)) ++ return 0; ++ ++ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL, ++ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ++ ®); ++ if (status) ++ return status; ++ ++ if (on) { ++ reg &= ~IXGBE_MDIO_PHY_SET_LOW_POWER_MODE; ++ } else { ++ if (ixgbe_check_reset_blocked(hw)) ++ return 0; ++ reg |= IXGBE_MDIO_PHY_SET_LOW_POWER_MODE; ++ } ++ ++ status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL, ++ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ++ reg); ++ return status; ++} +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h +index 54071ed..445394f 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_phy.h +@@ -1,7 +1,7 @@ + /******************************************************************************* + +- Intel 10 Gigabit PCI Express Linux driver +- Copyright(c) 1999 - 2014 Intel Corporation. ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, +@@ -12,10 +12,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +@@ -30,8 +26,9 @@ + #define _IXGBE_PHY_H_ + + #include "ixgbe_type.h" +-#define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0 +-#define IXGBE_I2C_EEPROM_DEV_ADDR2 0xA2 ++#define IXGBE_I2C_EEPROM_DEV_ADDR 0xA0 ++#define IXGBE_I2C_EEPROM_DEV_ADDR2 0xA2 ++#define IXGBE_I2C_EEPROM_BANK_LEN 0xFF + + /* EEPROM byte offsets */ + #define IXGBE_SFF_IDENTIFIER 0x0 +@@ -58,69 +55,111 @@ + #define IXGBE_SFF_QSFP_DEVICE_TECH 0x93 + + /* Bitmasks */ +-#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4 +-#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8 ++#define IXGBE_SFF_DA_PASSIVE_CABLE 0x4 ++#define IXGBE_SFF_DA_ACTIVE_CABLE 0x8 + #define IXGBE_SFF_DA_SPEC_ACTIVE_LIMITING 0x4 +-#define IXGBE_SFF_1GBASESX_CAPABLE 0x1 +-#define IXGBE_SFF_1GBASELX_CAPABLE 0x2 +-#define IXGBE_SFF_1GBASET_CAPABLE 0x8 +-#define IXGBE_SFF_10GBASESR_CAPABLE 0x10 +-#define IXGBE_SFF_10GBASELR_CAPABLE 0x20 +-#define IXGBE_SFF_ADDRESSING_MODE 0x4 +-#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1 +-#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8 ++#define IXGBE_SFF_1GBASESX_CAPABLE 0x1 ++#define IXGBE_SFF_1GBASELX_CAPABLE 0x2 ++#define IXGBE_SFF_1GBASET_CAPABLE 0x8 ++#define IXGBE_SFF_10GBASESR_CAPABLE 0x10 ++#define IXGBE_SFF_10GBASELR_CAPABLE 0x20 ++#define IXGBE_SFF_SOFT_RS_SELECT_MASK 0x8 ++#define IXGBE_SFF_SOFT_RS_SELECT_10G 0x8 ++#define IXGBE_SFF_SOFT_RS_SELECT_1G 0x0 ++#define IXGBE_SFF_ADDRESSING_MODE 0x4 ++#define IXGBE_SFF_QSFP_DA_ACTIVE_CABLE 0x1 ++#define IXGBE_SFF_QSFP_DA_PASSIVE_CABLE 0x8 + #define IXGBE_SFF_QSFP_CONNECTOR_NOT_SEPARABLE 0x23 + #define IXGBE_SFF_QSFP_TRANSMITER_850NM_VCSEL 0x0 +-#define IXGBE_I2C_EEPROM_READ_MASK 0x100 +-#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3 ++#define IXGBE_I2C_EEPROM_READ_MASK 0x100 ++#define IXGBE_I2C_EEPROM_STATUS_MASK 0x3 + #define IXGBE_I2C_EEPROM_STATUS_NO_OPERATION 0x0 +-#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1 +-#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2 ++#define IXGBE_I2C_EEPROM_STATUS_PASS 0x1 ++#define IXGBE_I2C_EEPROM_STATUS_FAIL 0x2 + #define IXGBE_I2C_EEPROM_STATUS_IN_PROGRESS 0x3 ++ ++#define IXGBE_CS4227 0xBE /* CS4227 address */ ++#define IXGBE_CS4227_GLOBAL_ID_LSB 0 ++#define IXGBE_CS4227_GLOBAL_ID_MSB 1 ++#define IXGBE_CS4227_SCRATCH 2 ++#define IXGBE_CS4227_GLOBAL_ID_VALUE 0x03E5 ++#define IXGBE_CS4227_EFUSE_PDF_SKU 0x19F ++#define IXGBE_CS4223_SKU_ID 0x0010 /* Quad port */ ++#define IXGBE_CS4227_SKU_ID 0x0014 /* Dual port */ ++#define IXGBE_CS4227_RESET_PENDING 0x1357 ++#define IXGBE_CS4227_RESET_COMPLETE 0x5AA5 ++#define IXGBE_CS4227_RETRIES 15 ++#define IXGBE_CS4227_EFUSE_STATUS 0x0181 ++#define IXGBE_CS4227_LINE_SPARE22_MSB 0x12AD /* Reg to program speed */ ++#define IXGBE_CS4227_LINE_SPARE24_LSB 0x12B0 /* Reg to program EDC */ ++#define IXGBE_CS4227_HOST_SPARE22_MSB 0x1AAD /* Reg to program speed */ ++#define IXGBE_CS4227_HOST_SPARE24_LSB 0x1AB0 /* Reg to program EDC */ ++#define IXGBE_CS4227_EEPROM_STATUS 0x5001 ++#define IXGBE_CS4227_EEPROM_LOAD_OK 0x0001 ++#define IXGBE_CS4227_SPEED_1G 0x8000 ++#define IXGBE_CS4227_SPEED_10G 0 ++#define IXGBE_CS4227_EDC_MODE_CX1 0x0002 ++#define IXGBE_CS4227_EDC_MODE_SR 0x0004 ++#define IXGBE_CS4227_EDC_MODE_DIAG 0x0008 ++#define IXGBE_CS4227_RESET_HOLD 500 /* microseconds */ ++#define IXGBE_CS4227_RESET_DELAY 450 /* milliseconds */ ++#define IXGBE_CS4227_CHECK_DELAY 30 /* milliseconds */ ++#define IXGBE_PE 0xE0 /* Port expander address */ ++#define IXGBE_PE_OUTPUT 1 /* Output register offset */ ++#define IXGBE_PE_CONFIG 3 /* Config register offset */ ++#define IXGBE_PE_BIT1 (1 << 1) ++ + /* Flow control defines */ +-#define IXGBE_TAF_SYM_PAUSE 0x400 +-#define IXGBE_TAF_ASM_PAUSE 0x800 ++#define IXGBE_TAF_SYM_PAUSE 0x400 ++#define IXGBE_TAF_ASM_PAUSE 0x800 + + /* Bit-shift macros */ +-#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 24 +-#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 16 +-#define IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT 8 ++#define IXGBE_SFF_VENDOR_OUI_BYTE0_SHIFT 24 ++#define IXGBE_SFF_VENDOR_OUI_BYTE1_SHIFT 16 ++#define IXGBE_SFF_VENDOR_OUI_BYTE2_SHIFT 8 + + /* Vendor OUIs: format of OUI is 0x[byte0][byte1][byte2][00] */ +-#define IXGBE_SFF_VENDOR_OUI_TYCO 0x00407600 +-#define IXGBE_SFF_VENDOR_OUI_FTL 0x00906500 +-#define IXGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00 +-#define IXGBE_SFF_VENDOR_OUI_INTEL 0x001B2100 ++#define IXGBE_SFF_VENDOR_OUI_TYCO 0x00407600 ++#define IXGBE_SFF_VENDOR_OUI_FTL 0x00906500 ++#define IXGBE_SFF_VENDOR_OUI_AVAGO 0x00176A00 ++#define IXGBE_SFF_VENDOR_OUI_INTEL 0x001B2100 + + /* I2C SDA and SCL timing parameters for standard mode */ +-#define IXGBE_I2C_T_HD_STA 4 +-#define IXGBE_I2C_T_LOW 5 +-#define IXGBE_I2C_T_HIGH 4 +-#define IXGBE_I2C_T_SU_STA 5 +-#define IXGBE_I2C_T_HD_DATA 5 +-#define IXGBE_I2C_T_SU_DATA 1 +-#define IXGBE_I2C_T_RISE 1 +-#define IXGBE_I2C_T_FALL 1 +-#define IXGBE_I2C_T_SU_STO 4 +-#define IXGBE_I2C_T_BUF 5 +- +-#define IXGBE_TN_LASI_STATUS_REG 0x9005 +-#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008 +- +-/* SFP+ SFF-8472 Compliance code */ +-#define IXGBE_SFF_SFF_8472_UNSUP 0x00 ++#define IXGBE_I2C_T_HD_STA 4 ++#define IXGBE_I2C_T_LOW 5 ++#define IXGBE_I2C_T_HIGH 4 ++#define IXGBE_I2C_T_SU_STA 5 ++#define IXGBE_I2C_T_HD_DATA 5 ++#define IXGBE_I2C_T_SU_DATA 1 ++#define IXGBE_I2C_T_RISE 1 ++#define IXGBE_I2C_T_FALL 1 ++#define IXGBE_I2C_T_SU_STO 4 ++#define IXGBE_I2C_T_BUF 5 ++ ++#ifndef IXGBE_SFP_DETECT_RETRIES ++#define IXGBE_SFP_DETECT_RETRIES 10 ++ ++#endif /* IXGBE_SFP_DETECT_RETRIES */ ++#define IXGBE_TN_LASI_STATUS_REG 0x9005 ++#define IXGBE_TN_LASI_STATUS_TEMP_ALARM 0x0008 ++ ++/* SFP+ SFF-8472 Compliance */ ++#define IXGBE_SFF_SFF_8472_UNSUP 0x00 + + s32 ixgbe_init_phy_ops_generic(struct ixgbe_hw *hw); ++bool ixgbe_validate_phy_addr(struct ixgbe_hw *hw, u32 phy_addr); ++enum ixgbe_phy_type ixgbe_get_phy_type_from_id(u32 phy_id); ++s32 ixgbe_get_phy_id(struct ixgbe_hw *hw); + s32 ixgbe_identify_phy_generic(struct ixgbe_hw *hw); + s32 ixgbe_reset_phy_generic(struct ixgbe_hw *hw); ++s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, ++ u16 *phy_data); ++s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, u32 device_type, ++ u16 phy_data); + s32 ixgbe_read_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 *phy_data); + s32 ixgbe_write_phy_reg_generic(struct ixgbe_hw *hw, u32 reg_addr, + u32 device_type, u16 phy_data); +-s32 ixgbe_read_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, +- u32 device_type, u16 *phy_data); +-s32 ixgbe_write_phy_reg_mdi(struct ixgbe_hw *hw, u32 reg_addr, +- u32 device_type, u16 phy_data); + s32 ixgbe_setup_phy_link_generic(struct ixgbe_hw *hw); + s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, + ixgbe_link_speed speed, +@@ -128,7 +167,7 @@ s32 ixgbe_setup_phy_link_speed_generic(struct ixgbe_hw *hw, + s32 ixgbe_get_copper_link_capabilities_generic(struct ixgbe_hw *hw, + ixgbe_link_speed *speed, + bool *autoneg); +-bool ixgbe_check_reset_blocked(struct ixgbe_hw *hw); ++s32 ixgbe_check_reset_blocked(struct ixgbe_hw *hw); + + /* PHY specific */ + s32 ixgbe_check_phy_link_tnx(struct ixgbe_hw *hw, +@@ -141,20 +180,30 @@ s32 ixgbe_get_phy_firmware_version_generic(struct ixgbe_hw *hw, + u16 *firmware_version); + + s32 ixgbe_reset_phy_nl(struct ixgbe_hw *hw); ++s32 ixgbe_set_copper_phy_power(struct ixgbe_hw *hw, bool on); + s32 ixgbe_identify_module_generic(struct ixgbe_hw *hw); + s32 ixgbe_identify_sfp_module_generic(struct ixgbe_hw *hw); ++u64 ixgbe_get_supported_phy_sfp_layer_generic(struct ixgbe_hw *hw); ++s32 ixgbe_identify_qsfp_module_generic(struct ixgbe_hw *hw); + s32 ixgbe_get_sfp_init_sequence_offsets(struct ixgbe_hw *hw, + u16 *list_offset, + u16 *data_offset); + s32 ixgbe_tn_check_overtemp(struct ixgbe_hw *hw); + s32 ixgbe_read_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 *data); ++s32 ixgbe_read_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, ++ u8 dev_addr, u8 *data); + s32 ixgbe_write_i2c_byte_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 dev_addr, u8 data); ++s32 ixgbe_write_i2c_byte_generic_unlocked(struct ixgbe_hw *hw, u8 byte_offset, ++ u8 dev_addr, u8 data); + s32 ixgbe_read_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 *eeprom_data); +-s32 ixgbe_read_i2c_sff8472_generic(struct ixgbe_hw *hw, u8 byte_offset, +- u8 *sff8472_data); + s32 ixgbe_write_i2c_eeprom_generic(struct ixgbe_hw *hw, u8 byte_offset, + u8 eeprom_data); ++void ixgbe_i2c_bus_clear(struct ixgbe_hw *hw); ++s32 ixgbe_read_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg, ++ u16 *val, bool lock); ++s32 ixgbe_write_i2c_combined_generic_int(struct ixgbe_hw *, u8 addr, u16 reg, ++ u16 val, bool lock); + #endif /* _IXGBE_PHY_H_ */ +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_procfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_procfs.c +new file mode 100644 +index 0000000..54f0940 +--- /dev/null ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_procfs.c +@@ -0,0 +1,938 @@ ++/******************************************************************************* ++ ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#include "ixgbe.h" ++#include "ixgbe_common.h" ++#include "ixgbe_type.h" ++ ++#ifdef IXGBE_PROCFS ++#ifndef IXGBE_SYSFS ++ ++#include ++#include ++#include ++#include ++#include ++ ++static struct proc_dir_entry *ixgbe_top_dir = NULL; ++ ++static struct net_device_stats *procfs_get_stats(struct net_device *netdev) ++{ ++#ifndef HAVE_NETDEV_STATS_IN_NETDEV ++ struct ixgbe_adapter *adapter; ++#endif ++ if (netdev == NULL) ++ return NULL; ++ ++#ifdef HAVE_NETDEV_STATS_IN_NETDEV ++ /* only return the current stats */ ++ return &netdev->stats; ++#else ++ adapter = netdev_priv(netdev); ++ ++ /* only return the current stats */ ++ return &adapter->net_stats; ++#endif /* HAVE_NETDEV_STATS_IN_NETDEV */ ++} ++ ++bool ixgbe_thermal_present(struct ixgbe_adapter *adapter) ++{ ++ s32 status; ++ if (adapter == NULL) ++ return false; ++ status = ixgbe_init_thermal_sensor_thresh_generic(&(adapter->hw)); ++ if (status != IXGBE_SUCCESS) ++ return false; ++ ++ return true; ++} ++ ++static int ixgbe_fwbanner(char *page, char __always_unused **start, ++ off_t __always_unused off, int count, ++ int __always_unused *eof, void *data) ++{ ++ struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; ++ ++ if (adapter == NULL) ++ return snprintf(page, count, "error: no adapter\n"); ++ ++ return snprintf(page, count, "%s\n", adapter->eeprom_id); ++} ++ ++static int ixgbe_porttype(char *page, char __always_unused **start, ++ off_t __always_unused off, int count, ++ int __always_unused *eof, void *data) ++{ ++ struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; ++ if (adapter == NULL) ++ return snprintf(page, count, "error: no adapter\n"); ++ return snprintf(page, count, "%d\n", ++ test_bit(__IXGBE_DOWN, &adapter->state)); ++} ++ ++static int ixgbe_portspeed(char *page, char __always_unused **start, ++ off_t __always_unused off, int count, ++ int __always_unused *eof, void *data) ++{ ++ struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; ++ int speed = 0; ++ ++ if (adapter == NULL) ++ return snprintf(page, count, "error: no adapter\n"); ++ ++ switch (adapter->link_speed) { ++ case IXGBE_LINK_SPEED_100_FULL: ++ speed = 1; ++ break; ++ case IXGBE_LINK_SPEED_1GB_FULL: ++ speed = 10; ++ break; ++ case IXGBE_LINK_SPEED_10GB_FULL: ++ speed = 100; ++ break; ++ } ++ return snprintf(page, count, "%d\n", speed); ++} ++ ++static int ixgbe_wqlflag(char *page, char __always_unused **start, ++ off_t __always_unused off, int count, ++ int __always_unused *eof, void *data) ++{ ++ struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; ++ if (adapter == NULL) ++ return snprintf(page, count, "error: no adapter\n"); ++ ++ return snprintf(page, count, "%d\n", adapter->wol); ++} ++ ++static int ixgbe_xflowctl(char *page, char __always_unused **start, ++ off_t __always_unused off, int count, ++ int __always_unused *eof, void *data) ++{ ++ struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; ++ struct ixgbe_hw *hw; ++ ++ if (!adapter) ++ return snprintf(page, count, "error: no adapter\n"); ++ ++ hw = &adapter->hw; ++ ++ return snprintf(page, count, "%d\n", hw->fc.current_mode); ++} ++ ++static int ixgbe_rxdrops(char *page, char __always_unused **start, ++ off_t __always_unused off, int count, ++ int __always_unused *eof, void *data) ++{ ++ struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; ++ struct net_device_stats *net_stats; ++ ++ if (adapter == NULL) ++ return snprintf(page, count, "error: no adapter\n"); ++ net_stats = procfs_get_stats(adapter->netdev); ++ if (net_stats == NULL) ++ return snprintf(page, count, "error: no net stats\n"); ++ ++ return snprintf(page, count, "%lu\n", ++ net_stats->rx_dropped); ++} ++ ++static int ixgbe_rxerrors(char *page, char __always_unused **start, ++ off_t __always_unused off, int count, ++ int __always_unused *eof, void *data) ++{ ++ struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; ++ struct net_device_stats *net_stats; ++ ++ if (adapter == NULL) ++ return snprintf(page, count, "error: no adapter\n"); ++ net_stats = procfs_get_stats(adapter->netdev); ++ if (net_stats == NULL) ++ return snprintf(page, count, "error: no net stats\n"); ++ ++ return snprintf(page, count, "%lu\n", net_stats->rx_errors); ++} ++ ++static int ixgbe_rxupacks(char *page, char __always_unused **start, ++ off_t __always_unused off, int count, ++ int __always_unused *eof, void *data) ++{ ++ struct ixgbe_hw *hw; ++ struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; ++ ++ if (!adapter) ++ return snprintf(page, count, "error: no adapter\n"); ++ ++ hw = &adapter->hw; ++ ++ return snprintf(page, count, "%d\n", IXGBE_READ_REG(hw, IXGBE_TPR)); ++} ++ ++static int ixgbe_rxmpacks(char *page, char __always_unused **start, ++ off_t __always_unused off, int count, ++ int __always_unused *eof, void *data) ++{ ++ struct ixgbe_hw *hw; ++ struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; ++ ++ if (!adapter) ++ return snprintf(page, count, "error: no adapter\n"); ++ ++ hw = &adapter->hw; ++ ++ return snprintf(page, count, "%d\n", IXGBE_READ_REG(hw, IXGBE_MPRC)); ++} ++ ++static int ixgbe_rxbpacks(char *page, char __always_unused **start, ++ off_t __always_unused off, int count, ++ int __always_unused *eof, void *data) ++{ ++ struct ixgbe_hw *hw; ++ struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; ++ ++ if (!adapter) ++ return snprintf(page, count, "error: no adapter\n"); ++ ++ hw = &adapter->hw; ++ ++ return snprintf(page, count, "%d\n", IXGBE_READ_REG(hw, IXGBE_BPRC)); ++} ++ ++static int ixgbe_txupacks(char *page, char __always_unused **start, ++ off_t __always_unused off, int count, ++ int __always_unused *eof, void *data) ++{ ++ struct ixgbe_hw *hw; ++ struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; ++ ++ if (!adapter) ++ return snprintf(page, count, "error: no adapter\n"); ++ ++ hw = &adapter->hw; ++ ++ return snprintf(page, count, "%d\n", IXGBE_READ_REG(hw, IXGBE_TPT)); ++} ++ ++static int ixgbe_txmpacks(char *page, char __always_unused **start, ++ off_t __always_unused off, int count, ++ int __always_unused *eof, void *data) ++{ ++ struct ixgbe_hw *hw; ++ struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; ++ ++ if (!adapter) ++ return snprintf(page, count, "error: no adapter\n"); ++ ++ hw = &adapter->hw; ++ ++ return snprintf(page, count, "%d\n", IXGBE_READ_REG(hw, IXGBE_MPTC)); ++} ++ ++static int ixgbe_txbpacks(char *page, char __always_unused **start, ++ off_t __always_unused off, int count, ++ int __always_unused *eof, void *data) ++{ ++ struct ixgbe_hw *hw; ++ struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; ++ ++ if (!adapter) ++ return snprintf(page, count, "error: no adapter\n"); ++ ++ hw = &adapter->hw; ++ ++ return snprintf(page, count, "%d\n", IXGBE_READ_REG(hw, IXGBE_BPTC)); ++} ++ ++static int ixgbe_txerrors(char *page, char __always_unused **start, ++ off_t __always_unused off, int count, ++ int __always_unused *eof, void *data) ++{ ++ struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; ++ struct net_device_stats *net_stats; ++ ++ if (adapter == NULL) ++ return snprintf(page, count, "error: no adapter\n"); ++ net_stats = procfs_get_stats(adapter->netdev); ++ if (net_stats == NULL) ++ return snprintf(page, count, "error: no net stats\n"); ++ ++ return snprintf(page, count, "%lu\n", ++ net_stats->tx_errors); ++} ++ ++static int ixgbe_txdrops(char *page, char __always_unused **start, ++ off_t __always_unused off, int count, ++ int __always_unused *eof, void *data) ++{ ++ struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; ++ struct net_device_stats *net_stats; ++ ++ if (adapter == NULL) ++ return snprintf(page, count, "error: no adapter\n"); ++ net_stats = procfs_get_stats(adapter->netdev); ++ if (net_stats == NULL) ++ return snprintf(page, count, "error: no net stats\n"); ++ ++ return snprintf(page, count, "%lu\n", ++ net_stats->tx_dropped); ++} ++ ++static int ixgbe_rxframes(char *page, char __always_unused **start, ++ off_t __always_unused off, int count, ++ int __always_unused *eof, void *data) ++{ ++ struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; ++ struct net_device_stats *net_stats; ++ ++ if (adapter == NULL) ++ return snprintf(page, count, "error: no adapter\n"); ++ net_stats = procfs_get_stats(adapter->netdev); ++ if (net_stats == NULL) ++ return snprintf(page, count, "error: no net stats\n"); ++ ++ return snprintf(page, count, "%lu\n", ++ net_stats->rx_packets); ++} ++ ++static int ixgbe_rxbytes(char *page, char __always_unused **start, ++ off_t __always_unused off, int count, ++ int __always_unused *eof, void *data) ++{ ++ struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; ++ struct net_device_stats *net_stats; ++ ++ if (adapter == NULL) ++ return snprintf(page, count, "error: no adapter\n"); ++ net_stats = procfs_get_stats(adapter->netdev); ++ if (net_stats == NULL) ++ return snprintf(page, count, "error: no net stats\n"); ++ ++ return snprintf(page, count, "%lu\n", ++ net_stats->rx_bytes); ++} ++ ++static int ixgbe_txframes(char *page, char __always_unused **start, ++ off_t __always_unused off, int count, ++ int __always_unused *eof, void *data) ++{ ++ struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; ++ struct net_device_stats *net_stats; ++ ++ if (adapter == NULL) ++ return snprintf(page, count, "error: no adapter\n"); ++ net_stats = procfs_get_stats(adapter->netdev); ++ if (net_stats == NULL) ++ return snprintf(page, count, "error: no net stats\n"); ++ ++ return snprintf(page, count, "%lu\n", ++ net_stats->tx_packets); ++} ++ ++static int ixgbe_txbytes(char *page, char __always_unused **start, ++ off_t __always_unused off, int count, ++ int __always_unused *eof, void *data) ++{ ++ struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; ++ struct net_device_stats *net_stats; ++ ++ if (adapter == NULL) ++ return snprintf(page, count, "error: no adapter\n"); ++ net_stats = procfs_get_stats(adapter->netdev); ++ if (net_stats == NULL) ++ return snprintf(page, count, "error: no net stats\n"); ++ ++ return snprintf(page, count, "%lu\n", ++ net_stats->tx_bytes); ++} ++ ++static int ixgbe_linkstat(char *page, char __always_unused **start, ++ off_t __always_unused off, int count, ++ int __always_unused *eof, void *data) ++{ ++ struct ixgbe_hw *hw; ++ struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; ++ int bitmask = 0; ++ u32 link_speed; ++ bool link_up = false; ++ ++ if (!adapter) ++ return snprintf(page, count, "error: no adapter\n"); ++ ++ hw = &adapter->hw; ++ ++ if (!test_bit(__IXGBE_DOWN, &adapter->state)) ++ bitmask |= 1; ++ ++ if (hw->mac.ops.check_link) ++ hw->mac.ops.check_link(hw, &link_speed, &link_up, false); ++ else ++ /* always assume link is up, if no check link function */ ++ link_up = true; ++ if (link_up) ++ bitmask |= 2; ++ ++ if (adapter->old_lsc != adapter->lsc_int) { ++ bitmask |= 4; ++ adapter->old_lsc = adapter->lsc_int; ++ } ++ ++ return snprintf(page, count, "0x%X\n", bitmask); ++} ++ ++static int ixgbe_funcid(char *page, char __always_unused **start, ++ off_t __always_unused off, int count, ++ int __always_unused *eof, void *data) ++{ ++ struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; ++ struct ixgbe_hw *hw; ++ ++ if (!adapter) ++ return snprintf(page, count, "error: no adapter\n"); ++ ++ hw = &adapter->hw; ++ ++ return snprintf(page, count, "0x%X\n", hw->bus.func); ++} ++ ++static int ixgbe_funcvers(char *page, char __always_unused **start, ++ off_t __always_unused off, int count, ++ int __always_unused *eof, void __always_unused *data) ++{ ++ return snprintf(page, count, "%s\n", ixgbe_driver_version); ++} ++ ++static int ixgbe_macburn(char *page, char __always_unused **start, ++ off_t __always_unused off, int count, ++ int __always_unused *eof, void *data) ++{ ++ struct ixgbe_hw *hw; ++ struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; ++ ++ if (!adapter) ++ return snprintf(page, count, "error: no adapter\n"); ++ ++ hw = &adapter->hw; ++ ++ return snprintf(page, count, "0x%02X%02X%02X%02X%02X%02X\n", ++ (unsigned int)hw->mac.perm_addr[0], ++ (unsigned int)hw->mac.perm_addr[1], ++ (unsigned int)hw->mac.perm_addr[2], ++ (unsigned int)hw->mac.perm_addr[3], ++ (unsigned int)hw->mac.perm_addr[4], ++ (unsigned int)hw->mac.perm_addr[5]); ++} ++ ++static int ixgbe_macadmn(char *page, char __always_unused **start, ++ off_t __always_unused off, int count, ++ int __always_unused *eof, void *data) ++{ ++ struct ixgbe_hw *hw; ++ struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; ++ ++ if (!adapter) ++ return snprintf(page, count, "error: no adapter\n"); ++ ++ hw = &adapter->hw; ++ ++ return snprintf(page, count, "0x%02X%02X%02X%02X%02X%02X\n", ++ (unsigned int)hw->mac.addr[0], ++ (unsigned int)hw->mac.addr[1], ++ (unsigned int)hw->mac.addr[2], ++ (unsigned int)hw->mac.addr[3], ++ (unsigned int)hw->mac.addr[4], ++ (unsigned int)hw->mac.addr[5]); ++} ++ ++static int ixgbe_maclla1(char *page, char __always_unused **start, ++ off_t __always_unused off, int count, ++ int __always_unused *eof, void *data) ++{ ++ struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; ++ struct ixgbe_hw *hw; ++ int rc; ++ u16 eeprom_buff[6]; ++ u16 first_word = 0x37; ++ const u16 word_count = ARRAY_SIZE(eeprom_buff); ++ ++ if (!adapter) ++ return snprintf(page, count, "error: no adapter\n"); ++ ++ hw = &adapter->hw; ++ ++ rc = hw->eeprom.ops.read_buffer(hw, first_word, 1, &first_word); ++ if (rc != 0) ++ return snprintf(page, count, "error: reading pointer to the EEPROM\n"); ++ ++ if (first_word != 0x0000 && first_word != 0xFFFF) { ++ rc = hw->eeprom.ops.read_buffer(hw, first_word, word_count, ++ eeprom_buff); ++ if (rc != 0) ++ return snprintf(page, count, "error: reading buffer\n"); ++ } else { ++ memset(eeprom_buff, 0, sizeof(eeprom_buff)); ++ } ++ ++ switch (hw->bus.func) { ++ case 0: ++ return snprintf(page, count, "0x%04X%04X%04X\n", ++ eeprom_buff[0], ++ eeprom_buff[1], ++ eeprom_buff[2]); ++ case 1: ++ return snprintf(page, count, "0x%04X%04X%04X\n", ++ eeprom_buff[3], ++ eeprom_buff[4], ++ eeprom_buff[5]); ++ } ++ return snprintf(page, count, "unexpected port %d\n", hw->bus.func); ++} ++ ++static int ixgbe_mtusize(char *page, char __always_unused **start, ++ off_t __always_unused off, int count, ++ int __always_unused *eof, void *data) ++{ ++ struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; ++ struct net_device *netdev; ++ ++ if (adapter == NULL) ++ return snprintf(page, count, "error: no adapter\n"); ++ netdev = adapter->netdev; ++ if (netdev == NULL) ++ return snprintf(page, count, "error: no net device\n"); ++ ++ return snprintf(page, count, "%d\n", netdev->mtu); ++} ++ ++static int ixgbe_featflag(char *page, char __always_unused **start, ++ off_t __always_unused off, int count, ++ int __always_unused *eof, void *data) ++{ ++ int bitmask = 0; ++ struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; ++ struct net_device *netdev; ++ ++ if (adapter == NULL) ++ return snprintf(page, count, "error: no adapter\n"); ++ netdev = adapter->netdev; ++ if (netdev == NULL) ++ return snprintf(page, count, "error: no net device\n"); ++ if (adapter->netdev->features & NETIF_F_RXCSUM) ++ bitmask |= 1; ++ return snprintf(page, count, "%d\n", bitmask); ++} ++ ++static int ixgbe_lsominct(char *page, char __always_unused **start, ++ off_t __always_unused off, int count, ++ int __always_unused *eof, void __always_unused *data) ++{ ++ return snprintf(page, count, "%d\n", 1); ++} ++ ++static int ixgbe_prommode(char *page, char __always_unused **start, ++ off_t __always_unused off, int count, ++ int __always_unused *eof, void *data) ++{ ++ struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; ++ struct net_device *netdev; ++ ++ if (adapter == NULL) ++ return snprintf(page, count, "error: no adapter\n"); ++ netdev = adapter->netdev; ++ if (netdev == NULL) ++ return snprintf(page, count, "error: no net device\n"); ++ ++ return snprintf(page, count, "%d\n", ++ netdev->flags & IFF_PROMISC); ++} ++ ++static int ixgbe_txdscqsz(char *page, char __always_unused **start, ++ off_t __always_unused off, int count, ++ int __always_unused *eof, void *data) ++{ ++ struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; ++ if (adapter == NULL) ++ return snprintf(page, count, "error: no adapter\n"); ++ ++ return snprintf(page, count, "%d\n", adapter->tx_ring[0]->count); ++} ++ ++static int ixgbe_rxdscqsz(char *page, char __always_unused **start, ++ off_t __always_unused off, int count, ++ int __always_unused *eof, void *data) ++{ ++ struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; ++ if (adapter == NULL) ++ return snprintf(page, count, "error: no adapter\n"); ++ ++ return snprintf(page, count, "%d\n", adapter->rx_ring[0]->count); ++} ++ ++static int ixgbe_rxqavg(char *page, char __always_unused **start, ++ off_t __always_unused off, int count, ++ int __always_unused *eof, void *data) ++{ ++ int index; ++ int diff = 0; ++ u16 ntc; ++ u16 ntu; ++ struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; ++ if (adapter == NULL) ++ return snprintf(page, count, "error: no adapter\n"); ++ ++ for (index = 0; index < adapter->num_rx_queues; index++) { ++ ntc = adapter->rx_ring[index]->next_to_clean; ++ ntu = adapter->rx_ring[index]->next_to_use; ++ ++ if (ntc >= ntu) ++ diff += (ntc - ntu); ++ else ++ diff += (adapter->rx_ring[index]->count - ntu + ntc); ++ } ++ if (adapter->num_rx_queues <= 0) ++ return snprintf(page, count, ++ "can't calculate, number of queues %d\n", ++ adapter->num_rx_queues); ++ return snprintf(page, count, "%d\n", diff/adapter->num_rx_queues); ++} ++ ++static int ixgbe_txqavg(char *page, char __always_unused **start, ++ off_t __always_unused off, int count, ++ int __always_unused *eof, void *data) ++{ ++ int index; ++ int diff = 0; ++ u16 ntc; ++ u16 ntu; ++ struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; ++ if (adapter == NULL) ++ return snprintf(page, count, "error: no adapter\n"); ++ ++ for (index = 0; index < adapter->num_tx_queues; index++) { ++ ntc = adapter->tx_ring[index]->next_to_clean; ++ ntu = adapter->tx_ring[index]->next_to_use; ++ ++ if (ntc >= ntu) ++ diff += (ntc - ntu); ++ else ++ diff += (adapter->tx_ring[index]->count - ntu + ntc); ++ } ++ if (adapter->num_tx_queues <= 0) ++ return snprintf(page, count, ++ "can't calculate, number of queues %d\n", ++ adapter->num_tx_queues); ++ return snprintf(page, count, "%d\n", ++ diff/adapter->num_tx_queues); ++} ++ ++static int ixgbe_iovotype(char *page, char __always_unused **start, ++ off_t __always_unused off, int count, ++ int __always_unused *eof, void __always_unused *data) ++{ ++ return snprintf(page, count, "2\n"); ++} ++ ++static int ixgbe_funcnbr(char *page, char __always_unused **start, ++ off_t __always_unused off, int count, ++ int __always_unused *eof, void *data) ++{ ++ struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; ++ if (adapter == NULL) ++ return snprintf(page, count, "error: no adapter\n"); ++ ++ return snprintf(page, count, "%d\n", adapter->num_vfs); ++} ++ ++static int ixgbe_pciebnbr(char *page, char __always_unused **start, ++ off_t __always_unused off, int count, ++ int __always_unused *eof, void *data) ++{ ++ struct ixgbe_adapter *adapter = (struct ixgbe_adapter *)data; ++ if (adapter == NULL) ++ return snprintf(page, count, "error: no adapter\n"); ++ ++ return snprintf(page, count, "%d\n", adapter->pdev->bus->number); ++} ++ ++static int ixgbe_therm_location(char *page, char __always_unused **start, ++ off_t __always_unused off, int count, ++ int __always_unused *eof, void *data) ++{ ++ struct ixgbe_therm_proc_data *therm_data = ++ (struct ixgbe_therm_proc_data *)data; ++ ++ if (therm_data == NULL) ++ return snprintf(page, count, "error: no therm_data\n"); ++ ++ return snprintf(page, count, "%d\n", therm_data->sensor_data->location); ++} ++ ++ ++static int ixgbe_therm_maxopthresh(char *page, char __always_unused **start, ++ off_t __always_unused off, int count, ++ int __always_unused *eof, void *data) ++{ ++ struct ixgbe_therm_proc_data *therm_data = ++ (struct ixgbe_therm_proc_data *)data; ++ ++ if (therm_data == NULL) ++ return snprintf(page, count, "error: no therm_data\n"); ++ ++ return snprintf(page, count, "%d\n", ++ therm_data->sensor_data->max_op_thresh); ++} ++ ++ ++static int ixgbe_therm_cautionthresh(char *page, char __always_unused **start, ++ off_t __always_unused off, int count, ++ int __always_unused *eof, void *data) ++{ ++ struct ixgbe_therm_proc_data *therm_data = ++ (struct ixgbe_therm_proc_data *)data; ++ ++ if (therm_data == NULL) ++ return snprintf(page, count, "error: no therm_data\n"); ++ ++ return snprintf(page, count, "%d\n", ++ therm_data->sensor_data->caution_thresh); ++} ++ ++static int ixgbe_therm_temp(char *page, char __always_unused **start, ++ off_t __always_unused off, int count, ++ int __always_unused *eof, void *data) ++{ ++ s32 status; ++ struct ixgbe_therm_proc_data *therm_data = ++ (struct ixgbe_therm_proc_data *)data; ++ ++ if (therm_data == NULL) ++ return snprintf(page, count, "error: no therm_data\n"); ++ ++ status = ixgbe_get_thermal_sensor_data_generic(therm_data->hw); ++ if (status != IXGBE_SUCCESS) ++ snprintf(page, count, "error: status %d returned\n", status); ++ ++ return snprintf(page, count, "%d\n", therm_data->sensor_data->temp); ++} ++ ++ ++struct ixgbe_proc_type { ++ char name[32]; ++ int (*read)(char*, char**, off_t, int, int*, void*); ++}; ++ ++struct ixgbe_proc_type ixgbe_proc_entries[] = { ++ {"fwbanner", &ixgbe_fwbanner}, ++ {"porttype", &ixgbe_porttype}, ++ {"portspeed", &ixgbe_portspeed}, ++ {"wqlflag", &ixgbe_wqlflag}, ++ {"xflowctl", &ixgbe_xflowctl}, ++ {"rxdrops", &ixgbe_rxdrops}, ++ {"rxerrors", &ixgbe_rxerrors}, ++ {"rxupacks", &ixgbe_rxupacks}, ++ {"rxmpacks", &ixgbe_rxmpacks}, ++ {"rxbpacks", &ixgbe_rxbpacks}, ++ {"txdrops", &ixgbe_txdrops}, ++ {"txerrors", &ixgbe_txerrors}, ++ {"txupacks", &ixgbe_txupacks}, ++ {"txmpacks", &ixgbe_txmpacks}, ++ {"txbpacks", &ixgbe_txbpacks}, ++ {"rxframes", &ixgbe_rxframes}, ++ {"rxbytes", &ixgbe_rxbytes}, ++ {"txframes", &ixgbe_txframes}, ++ {"txbytes", &ixgbe_txbytes}, ++ {"linkstat", &ixgbe_linkstat}, ++ {"funcid", &ixgbe_funcid}, ++ {"funcvers", &ixgbe_funcvers}, ++ {"macburn", &ixgbe_macburn}, ++ {"macadmn", &ixgbe_macadmn}, ++ {"maclla1", &ixgbe_maclla1}, ++ {"mtusize", &ixgbe_mtusize}, ++ {"featflag", &ixgbe_featflag}, ++ {"lsominct", &ixgbe_lsominct}, ++ {"prommode", &ixgbe_prommode}, ++ {"txdscqsz", &ixgbe_txdscqsz}, ++ {"rxdscqsz", &ixgbe_rxdscqsz}, ++ {"txqavg", &ixgbe_txqavg}, ++ {"rxqavg", &ixgbe_rxqavg}, ++ {"iovotype", &ixgbe_iovotype}, ++ {"funcnbr", &ixgbe_funcnbr}, ++ {"pciebnbr", &ixgbe_pciebnbr}, ++ {"", NULL} ++}; ++ ++struct ixgbe_proc_type ixgbe_internal_entries[] = { ++ {"location", &ixgbe_therm_location}, ++ {"temp", &ixgbe_therm_temp}, ++ {"cautionthresh", &ixgbe_therm_cautionthresh}, ++ {"maxopthresh", &ixgbe_therm_maxopthresh}, ++ {"", NULL} ++}; ++ ++void ixgbe_del_proc_entries(struct ixgbe_adapter *adapter) ++{ ++ int index; ++ int i; ++ char buf[16]; /* much larger than the sensor number will ever be */ ++ ++ if (ixgbe_top_dir == NULL) ++ return; ++ ++ for (i = 0; i < IXGBE_MAX_SENSORS; i++) { ++ if (adapter->therm_dir[i] == NULL) ++ continue; ++ ++ for (index = 0; ; index++) { ++ if (ixgbe_internal_entries[index].read == NULL) ++ break; ++ ++ remove_proc_entry(ixgbe_internal_entries[index].name, ++ adapter->therm_dir[i]); ++ } ++ snprintf(buf, sizeof(buf), "sensor_%d", i); ++ remove_proc_entry(buf, adapter->info_dir); ++ } ++ ++ if (adapter->info_dir != NULL) { ++ for (index = 0; ; index++) { ++ if (ixgbe_proc_entries[index].read == NULL) ++ break; ++ remove_proc_entry(ixgbe_proc_entries[index].name, ++ adapter->info_dir); ++ } ++ remove_proc_entry("info", adapter->eth_dir); ++ } ++ ++ if (adapter->eth_dir != NULL) ++ remove_proc_entry(pci_name(adapter->pdev), ixgbe_top_dir); ++} ++ ++/* called from ixgbe_main.c */ ++void ixgbe_procfs_exit(struct ixgbe_adapter *adapter) ++{ ++ ixgbe_del_proc_entries(adapter); ++} ++ ++int ixgbe_procfs_topdir_init() ++{ ++ ixgbe_top_dir = proc_mkdir("driver/ixgbe", NULL); ++ if (ixgbe_top_dir == NULL) ++ return -ENOMEM; ++ ++ return 0; ++} ++ ++void ixgbe_procfs_topdir_exit() ++{ ++ remove_proc_entry("driver/ixgbe", NULL); ++} ++ ++/* called from ixgbe_main.c */ ++int ixgbe_procfs_init(struct ixgbe_adapter *adapter) ++{ ++ int rc = 0; ++ int index; ++ int i; ++ char buf[16]; /* much larger than the sensor number will ever be */ ++ ++ adapter->eth_dir = NULL; ++ adapter->info_dir = NULL; ++ for (i = 0; i < IXGBE_MAX_SENSORS; i++) ++ adapter->therm_dir[i] = NULL; ++ ++ if (ixgbe_top_dir == NULL) { ++ rc = -ENOMEM; ++ goto fail; ++ } ++ ++ adapter->eth_dir = proc_mkdir(pci_name(adapter->pdev), ixgbe_top_dir); ++ if (adapter->eth_dir == NULL) { ++ rc = -ENOMEM; ++ goto fail; ++ } ++ ++ adapter->info_dir = proc_mkdir("info", adapter->eth_dir); ++ if (adapter->info_dir == NULL) { ++ rc = -ENOMEM; ++ goto fail; ++ } ++ for (index = 0; ; index++) { ++ if (ixgbe_proc_entries[index].read == NULL) ++ break; ++ if (!(create_proc_read_entry(ixgbe_proc_entries[index].name, ++ 0444, ++ adapter->info_dir, ++ ixgbe_proc_entries[index].read, ++ adapter))) { ++ ++ rc = -ENOMEM; ++ goto fail; ++ } ++ } ++ if (ixgbe_thermal_present(adapter) == false) ++ goto exit; ++ ++ for (i = 0; i < IXGBE_MAX_SENSORS; i++) { ++ ++ if (adapter->hw.mac.thermal_sensor_data.sensor[i].location == ++ 0) ++ continue; ++ ++ snprintf(buf, sizeof(buf), "sensor_%d", i); ++ adapter->therm_dir[i] = proc_mkdir(buf, adapter->info_dir); ++ if (adapter->therm_dir[i] == NULL) { ++ rc = -ENOMEM; ++ goto fail; ++ } ++ for (index = 0; ; index++) { ++ if (ixgbe_internal_entries[index].read == NULL) ++ break; ++ /* ++ * therm_data struct contains pointer the read func ++ * will be needing ++ */ ++ adapter->therm_data[i].hw = &adapter->hw; ++ adapter->therm_data[i].sensor_data = ++ &adapter->hw.mac.thermal_sensor_data.sensor[i]; ++ ++ if (!(create_proc_read_entry( ++ ixgbe_internal_entries[index].name, ++ 0444, ++ adapter->therm_dir[i], ++ ixgbe_internal_entries[index].read, ++ &adapter->therm_data[i]))) { ++ rc = -ENOMEM; ++ goto fail; ++ } ++ } ++ } ++ goto exit; ++ ++fail: ++ ixgbe_del_proc_entries(adapter); ++exit: ++ return rc; ++} ++ ++#endif /* !IXGBE_SYSFS */ ++#endif /* IXGBE_PROCFS */ +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +index 68f87ec..0fe1421 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_ptp.c +@@ -1,7 +1,7 @@ + /******************************************************************************* + +- Intel 10 Gigabit PCI Express Linux driver +- Copyright(c) 1999 - 2013 Intel Corporation. ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, +@@ -12,10 +12,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +@@ -25,6 +21,7 @@ + Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 + + *******************************************************************************/ ++ + #include "ixgbe.h" + #include + +@@ -93,18 +90,79 @@ + + #define IXGBE_INCVAL_SHIFT_82599 7 + #define IXGBE_INCPER_SHIFT_82599 24 +-#define IXGBE_MAX_TIMEADJ_VALUE 0x7FFFFFFFFFFFFFFFULL + + #define IXGBE_OVERFLOW_PERIOD (HZ * 30) +-#define IXGBE_PTP_TX_TIMEOUT (HZ * 15) ++#define IXGBE_PTP_TX_TIMEOUT (HZ) + +-#ifndef NSECS_PER_SEC +-#define NSECS_PER_SEC 1000000000ULL +-#endif ++/* half of a one second clock period, for use with PPS signal. We have to use ++ * this instead of something pre-defined like IXGBE_PTP_PPS_HALF_SECOND, in ++ * order to force at least 64bits of precision for shifting ++ */ ++#define IXGBE_PTP_PPS_HALF_SECOND 500000000ULL ++ ++/* In contrast, the X550 controller has two registers, SYSTIMEH and SYSTIMEL ++ * which contain measurements of seconds and nanoseconds respectively. This ++ * matches the standard linux representation of time in the kernel. In addition, ++ * the X550 also has a SYSTIMER register which represents residue, or ++ * subnanosecond overflow adjustments. To control clock adjustment, the TIMINCA ++ * register is used, but it is unlike the X540 and 82599 devices. TIMINCA ++ * represents units of 2^-32 nanoseconds, and uses 31 bits for this, with the ++ * high bit representing whether the adjustent is positive or negative. Every ++ * clock cycle, the X550 will add 12.5 ns + TIMINCA which can result in a range ++ * of 12 to 13 nanoseconds adjustment. Unlike the 82599 and X540 devices, the ++ * X550's clock for purposes of SYSTIME generation is constant and not dependant ++ * on the link speed. ++ * ++ * SYSTIMEH SYSTIMEL SYSTIMER ++ * +--------------+ +--------------+ +-------------+ ++ * X550 | 32 | | 32 | | 32 | ++ * *--------------+ +--------------+ +-------------+ ++ * \____seconds___/ \_nanoseconds_/ \__2^-32 ns__/ ++ * ++ * This results in a full 96 bits to represent the clock, with 32 bits for ++ * seconds, 32 bits for nanoseconds (largest value is 0d999999999 or just under ++ * 1 second) and an additional 32 bits to measure sub nanosecond adjustments for ++ * underflow of adjustments. ++ * ++ * The 32 bits of seconds for the X550 overflows every ++ * 2^32 / ( 365.25 * 24 * 60 * 60 ) = ~136 years. ++ * ++ * In order to adjust the clock frequency for the X550, the TIMINCA register is ++ * provided. This register represents a + or minus nearly 0.5 ns adjustment to ++ * the base frequency. It is measured in 2^-32 ns units, with the high bit being ++ * the sign bit. This register enables software to calculate frequency ++ * adjustments and apply them directly to the clock rate. ++ * ++ * The math for converting ppb into TIMINCA values is fairly straightforward. ++ * TIMINCA value = ( Base_Frequency * ppb ) / 1000000000ULL ++ * ++ * This assumes that ppb is never high enough to create a value bigger than ++ * TIMINCA's 31 bits can store. This is ensured by the stack. Calculating this ++ * value is also simple. ++ * Max ppb = ( Max Adjustment / Base Frequency ) / 1000000000ULL ++ * ++ * For the X550, the Max adjustment is +/- 0.5 ns, and the base frequency is ++ * 12.5 nanoseconds. This means that the Max ppb is 39999999 ++ * Note: We subtract one in order to ensure no overflow, because the TIMINCA ++ * register can only hold slightly under 0.5 nanoseconds. ++ * ++ * Because TIMINCA is measured in 2^-32 ns units, we have to convert 12.5 ns ++ * into 2^-32 units, which is ++ * ++ * 12.5 * 2^32 = C80000000 ++ * ++ * Some revisions of hardware have a faster base frequency than the registers ++ * were defined for. To fix this, we use a timecounter structure with the ++ * proper mult and shift to convert the cycles into nanoseconds of time. ++ */ ++#define IXGBE_X550_BASE_PERIOD 0xC80000000ULL ++#define INCVALUE_MASK 0x7FFFFFFF ++#define ISGN 0x80000000 ++#define MAX_TIMADJ 0x7FFFFFFF + + /** +- * ixgbe_ptp_setup_sdp +- * @hw: the hardware private structure ++ * ixgbe_ptp_setup_sdp_X540 ++ * @adapter: the adapter private structure + * + * this function enables or disables the clock out feature on SDP0 for + * the X540 device. It will create a 1second periodic output that can +@@ -114,83 +172,119 @@ + * aligns the start of the PPS signal to that value. The shift is + * necessary because it can change based on the link speed. + */ +-static void ixgbe_ptp_setup_sdp(struct ixgbe_adapter *adapter) ++static void ixgbe_ptp_setup_sdp_X540(struct ixgbe_adapter *adapter) + { + struct ixgbe_hw *hw = &adapter->hw; +- int shift = adapter->cc.shift; ++ int shift = adapter->hw_cc.shift; + u32 esdp, tsauxc, clktiml, clktimh, trgttiml, trgttimh, rem; + u64 ns = 0, clock_edge = 0; + +- if ((adapter->flags2 & IXGBE_FLAG2_PTP_PPS_ENABLED) && +- (hw->mac.type == ixgbe_mac_X540)) { ++ /* disable the pin first */ ++ IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, 0x0); ++ IXGBE_WRITE_FLUSH(hw); + +- /* disable the pin first */ +- IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, 0x0); +- IXGBE_WRITE_FLUSH(hw); ++ if (!(adapter->flags2 & IXGBE_FLAG2_PTP_PPS_ENABLED)) ++ return; + +- esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); ++ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); + +- /* +- * enable the SDP0 pin as output, and connected to the +- * native function for Timesync (ClockOut) +- */ +- esdp |= (IXGBE_ESDP_SDP0_DIR | +- IXGBE_ESDP_SDP0_NATIVE); ++ /* ++ * enable the SDP0 pin as output, and connected to the ++ * native function for Timesync (ClockOut) ++ */ ++ esdp |= IXGBE_ESDP_SDP0_DIR | ++ IXGBE_ESDP_SDP0_NATIVE; + +- /* +- * enable the Clock Out feature on SDP0, and allow +- * interrupts to occur when the pin changes +- */ +- tsauxc = (IXGBE_TSAUXC_EN_CLK | +- IXGBE_TSAUXC_SYNCLK | +- IXGBE_TSAUXC_SDP0_INT); +- +- /* clock period (or pulse length) */ +- clktiml = (u32)(NSECS_PER_SEC << shift); +- clktimh = (u32)((NSECS_PER_SEC << shift) >> 32); +- +- /* +- * Account for the cyclecounter wrap-around value by +- * using the converted ns value of the current time to +- * check for when the next aligned second would occur. +- */ +- clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIML); +- clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32; +- ns = timecounter_cyc2time(&adapter->tc, clock_edge); ++ /* ++ * enable the Clock Out feature on SDP0, and allow ++ * interrupts to occur when the pin changes ++ */ ++ tsauxc = IXGBE_TSAUXC_EN_CLK | ++ IXGBE_TSAUXC_SYNCLK | ++ IXGBE_TSAUXC_SDP0_INT; + +- div_u64_rem(ns, NSECS_PER_SEC, &rem); +- clock_edge += ((NSECS_PER_SEC - (u64)rem) << shift); ++ /* set to half clock period */ ++ clktiml = (u32)(IXGBE_PTP_PPS_HALF_SECOND << shift); ++ clktimh = (u32)((IXGBE_PTP_PPS_HALF_SECOND << shift) >> 32); + +- /* specify the initial clock start time */ +- trgttiml = (u32)clock_edge; +- trgttimh = (u32)(clock_edge >> 32); ++ /* ++ * Account for the cyclecounter wrap-around value by ++ * using the converted ns value of the current time to ++ * check for when the next aligned second would occur. ++ */ ++ clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIML); ++ clock_edge |= (u64)IXGBE_READ_REG(hw, IXGBE_SYSTIMH) << 32; ++ ns = timecounter_cyc2time(&adapter->hw_tc, clock_edge); + +- IXGBE_WRITE_REG(hw, IXGBE_CLKTIML, clktiml); +- IXGBE_WRITE_REG(hw, IXGBE_CLKTIMH, clktimh); +- IXGBE_WRITE_REG(hw, IXGBE_TRGTTIML0, trgttiml); +- IXGBE_WRITE_REG(hw, IXGBE_TRGTTIMH0, trgttimh); ++ div_u64_rem(ns, IXGBE_PTP_PPS_HALF_SECOND, &rem); ++ clock_edge += ((IXGBE_PTP_PPS_HALF_SECOND - (u64)rem) << shift); + +- IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); +- IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc); +- } else { +- IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, 0x0); +- } ++ /* specify the initial clock start time */ ++ trgttiml = (u32)clock_edge; ++ trgttimh = (u32)(clock_edge >> 32); ++ ++ IXGBE_WRITE_REG(hw, IXGBE_CLKTIML, clktiml); ++ IXGBE_WRITE_REG(hw, IXGBE_CLKTIMH, clktimh); ++ IXGBE_WRITE_REG(hw, IXGBE_TRGTTIML0, trgttiml); ++ IXGBE_WRITE_REG(hw, IXGBE_TRGTTIMH0, trgttimh); ++ ++ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); ++ IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, tsauxc); + + IXGBE_WRITE_FLUSH(hw); + } + + /** +- * ixgbe_ptp_read - read raw cycle counter (to be used by time counter) +- * @cc: the cyclecounter structure ++ * ixgbe_ptp_read_X550 - read cycle counter value ++ * @hw_cc: cyclecounter structure ++ * ++ * This function reads SYSTIME registers. It is called by the cyclecounter ++ * structure to convert from internal representation into nanoseconds. We need ++ * this for X550 since some skews do not have expected clock frequency and ++ * result of SYSTIME is 32bits of "billions of cycles" and 32 bits of ++ * "cycles", rather than seconds and nanoseconds. ++ */ ++static u64 ixgbe_ptp_read_X550(const struct cyclecounter *hw_cc) ++{ ++ struct ixgbe_adapter *adapter = ++ container_of(hw_cc, struct ixgbe_adapter, hw_cc); ++ struct ixgbe_hw *hw = &adapter->hw; ++ struct timespec64 ts; ++ ++ /* storage is 32 bits of 'billions of cycles' and 32 bits of 'cycles'. ++ * Some revisions of hardware run at a higher frequency and so the ++ * cycles are not guaranteed to be nanoseconds. The timespec64 created ++ * here is used for its math/conversions but does not necessarily ++ * represent nominal time. ++ * ++ * It should be noted that this cyclecounter will overflow at a ++ * non-bitmask field since we have to convert our billions of cycles ++ * into an actual cycles count. This results in some possible weird ++ * situations at high cycle counter stamps. However given that 32 bits ++ * of "seconds" is ~138 years this isn't a problem. Even at the ++ * increased frequency of some revisions, this is still ~103 years. ++ * Since the SYSTIME values start at 0 and we never write them, it is ++ * highly unlikely for the cyclecounter to overflow in practice. ++ */ ++ IXGBE_READ_REG(hw, IXGBE_SYSTIMR); ++ ts.tv_nsec = IXGBE_READ_REG(hw, IXGBE_SYSTIML); ++ ts.tv_sec = IXGBE_READ_REG(hw, IXGBE_SYSTIMH); ++ ++ return (u64)timespec64_to_ns(&ts); ++} ++ ++/** ++ * ixgbe_ptp_read_82599 - read raw cycle counter (to be used by time counter) ++ * @hw_cc: the cyclecounter structure + * + * this function reads the cyclecounter registers and is called by the + * cyclecounter structure used to construct a ns counter from the + * arbitrary fixed point registers + */ +-static cycle_t ixgbe_ptp_read(const struct cyclecounter *cc) ++static u64 ixgbe_ptp_read_82599(const struct cyclecounter *hw_cc) + { + struct ixgbe_adapter *adapter = +- container_of(cc, struct ixgbe_adapter, cc); ++ container_of(hw_cc, struct ixgbe_adapter, hw_cc); + struct ixgbe_hw *hw = &adapter->hw; + u64 stamp = 0; + +@@ -201,20 +295,80 @@ static cycle_t ixgbe_ptp_read(const struct cyclecounter *cc) + } + + /** +- * ixgbe_ptp_adjfreq ++ * ixgbe_ptp_convert_to_hwtstamp - convert register value to hw timestamp ++ * @adapter: private adapter structure ++ * @hwtstamp: stack timestamp structure ++ * @systim: unsigned 64bit system time value ++ * ++ * We need to convert the adapter's RX/TXSTMP registers into a hwtstamp value ++ * which can be used by the stack's ptp functions. ++ * ++ * The lock is used to protect consistency of the cyclecounter and the SYSTIME ++ * registers. However, it does not need to protect against the Rx or Tx ++ * timestamp registers, as there can't be a new timestamp until the old one is ++ * unlatched by reading. ++ * ++ * In addition to the timestamp in hardware, some controllers need a software ++ * overflow cyclecounter, and this function takes this into account as well. ++ **/ ++static void ixgbe_ptp_convert_to_hwtstamp(struct ixgbe_adapter *adapter, ++ struct skb_shared_hwtstamps *hwtstamp, ++ u64 timestamp) ++{ ++ unsigned long flags; ++ struct timespec64 systime; ++ u64 ns; ++ ++ memset(hwtstamp, 0, sizeof(*hwtstamp)); ++ ++ switch (adapter->hw.mac.type) { ++ /* X550 and later hardware supposedly represent time using a seconds ++ * and nanoseconds counter, instead of raw 64bits nanoseconds. We need ++ * to convert the timestamp into cycles before it can be fed to the ++ * cyclecounter. We need an actual cyclecounter because some revisions ++ * of hardware run at a higher frequency and thus the counter does ++ * not represent seconds/nanoseconds. Instead it can be thought of as ++ * cycles and billions of cycles. ++ */ ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: ++ /* Upper 32 bits represent billions of cycles, lower 32 bits ++ * represent cycles. However, we use timespec64_to_ns for the ++ * correct math even though the units haven't been corrected ++ * yet. ++ */ ++ systime.tv_sec = timestamp >> 32; ++ systime.tv_nsec = timestamp & 0xFFFFFFFF; ++ ++ timestamp = timespec64_to_ns(&systime); ++ break; ++ default: ++ break; ++ } ++ ++ spin_lock_irqsave(&adapter->tmreg_lock, flags); ++ ns = timecounter_cyc2time(&adapter->hw_tc, timestamp); ++ spin_unlock_irqrestore(&adapter->tmreg_lock, flags); ++ ++ hwtstamp->hwtstamp = ns_to_ktime(ns); ++} ++ ++/** ++ * ixgbe_ptp_adjfreq_82599 + * @ptp: the ptp clock structure + * @ppb: parts per billion adjustment from base + * + * adjust the frequency of the ptp cycle counter by the + * indicated ppb from the base frequency. + */ +-static int ixgbe_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) ++static int ixgbe_ptp_adjfreq_82599(struct ptp_clock_info *ptp, s32 ppb) + { + struct ixgbe_adapter *adapter = + container_of(ptp, struct ixgbe_adapter, ptp_caps); + struct ixgbe_hw *hw = &adapter->hw; +- u64 freq; +- u32 diff, incval; ++ u64 freq, incval; ++ u32 diff; + int neg_adj = 0; + + if (ppb < 0) { +@@ -233,12 +387,16 @@ static int ixgbe_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) + + switch (hw->mac.type) { + case ixgbe_mac_X540: +- IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval); ++ if (incval > 0xFFFFFFFFULL) ++ e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n"); ++ IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, (u32)incval); + break; + case ixgbe_mac_82599EB: ++ if (incval > 0x00FFFFFFULL) ++ e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n"); + IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, + (1 << IXGBE_INCPER_SHIFT_82599) | +- incval); ++ ((u32)incval & 0x00FFFFFFUL)); + break; + default: + break; +@@ -248,90 +406,143 @@ static int ixgbe_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb) + } + + /** +- * ixgbe_ptp_adjtime ++ * ixgbe_ptp_adjfreq_X550 ++ * @ptp: the ptp clock structure ++ * @ppb: parts per billion adjustment from base ++ * ++ * adjust the frequency of the SYSTIME registers by the indicated ppb from base ++ * frequency ++ */ ++static int ixgbe_ptp_adjfreq_X550(struct ptp_clock_info *ptp, s32 ppb) ++{ ++ struct ixgbe_adapter *adapter = ++ container_of(ptp, struct ixgbe_adapter, ptp_caps); ++ struct ixgbe_hw *hw = &adapter->hw; ++ int neg_adj = 0; ++ u64 rate = IXGBE_X550_BASE_PERIOD; ++ u32 inca; ++ ++ if (ppb < 0) { ++ neg_adj = 1; ++ ppb = -ppb; ++ } ++ rate *= ppb; ++ rate = div_u64(rate, 1000000000ULL); ++ ++ /* warn if rate is too large */ ++ if (rate >= INCVALUE_MASK) ++ e_dev_warn("PTP ppb adjusted SYSTIME rate overflowed!\n"); ++ ++ inca = rate & INCVALUE_MASK; ++ if (neg_adj) ++ inca |= ISGN; ++ ++ IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, inca); ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_ptp_adjtime_timecounter + * @ptp: the ptp clock structure + * @delta: offset to adjust the cycle counter by + * + * adjust the timer by resetting the timecounter structure. + */ +-static int ixgbe_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta) ++static int ixgbe_ptp_adjtime_timecounter(struct ptp_clock_info *ptp, ++ s64 delta) + { + struct ixgbe_adapter *adapter = + container_of(ptp, struct ixgbe_adapter, ptp_caps); + unsigned long flags; +- u64 now; + + spin_lock_irqsave(&adapter->tmreg_lock, flags); +- +- now = timecounter_read(&adapter->tc); +- now += delta; +- +- /* reset the timecounter */ +- timecounter_init(&adapter->tc, +- &adapter->cc, +- now); +- ++ timecounter_adjtime(&adapter->hw_tc, delta); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + +- ixgbe_ptp_setup_sdp(adapter); ++ if (adapter->ptp_setup_sdp) ++ adapter->ptp_setup_sdp(adapter); + + return 0; + } + + /** +- * ixgbe_ptp_gettime ++ * ixgbe_ptp_gettime64_timecounter + * @ptp: the ptp clock structure +- * @ts: timespec structure to hold the current time value ++ * @ts: timespec64 structure to hold the current time value + * + * read the timecounter and return the correct value on ns, +- * after converting it into a struct timespec. ++ * after converting it into a struct timespec64. + */ +-static int ixgbe_ptp_gettime(struct ptp_clock_info *ptp, struct timespec *ts) ++static int ixgbe_ptp_gettime64_timecounter(struct ptp_clock_info *ptp, struct timespec64 *ts) + { + struct ixgbe_adapter *adapter = + container_of(ptp, struct ixgbe_adapter, ptp_caps); +- u64 ns; +- u32 remainder; + unsigned long flags; ++ u64 ns; + + spin_lock_irqsave(&adapter->tmreg_lock, flags); +- ns = timecounter_read(&adapter->tc); ++ ns = timecounter_read(&adapter->hw_tc); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + +- ts->tv_sec = div_u64_rem(ns, 1000000000ULL, &remainder); +- ts->tv_nsec = remainder; ++ *ts = ns_to_timespec64(ns); + + return 0; + } + + /** +- * ixgbe_ptp_settime ++ * ixgbe_ptp_settime64_timecounter + * @ptp: the ptp clock structure +- * @ts: the timespec containing the new time for the cycle counter ++ * @ts: the timespec64 containing the new time for the cycle counter + * + * reset the timecounter to use a new base value instead of the kernel + * wall timer value. + */ +-static int ixgbe_ptp_settime(struct ptp_clock_info *ptp, +- const struct timespec *ts) ++static int ixgbe_ptp_settime64_timecounter(struct ptp_clock_info *ptp, ++ const struct timespec64 *ts) + { + struct ixgbe_adapter *adapter = + container_of(ptp, struct ixgbe_adapter, ptp_caps); + u64 ns; + unsigned long flags; + +- ns = ts->tv_sec * 1000000000ULL; +- ns += ts->tv_nsec; ++ ns = timespec64_to_ns(ts); + + /* reset the timecounter */ + spin_lock_irqsave(&adapter->tmreg_lock, flags); +- timecounter_init(&adapter->tc, &adapter->cc, ns); ++ timecounter_init(&adapter->hw_tc, &adapter->hw_cc, ns); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + +- ixgbe_ptp_setup_sdp(adapter); ++ if (adapter->ptp_setup_sdp) ++ adapter->ptp_setup_sdp(adapter); ++ return 0; ++} ++ ++#ifndef HAVE_PTP_CLOCK_INFO_GETTIME64 ++static int ixgbe_ptp_gettime_timecounter(struct ptp_clock_info *ptp, struct timespec *ts) ++{ ++ struct timespec64 ts64; ++ int err; ++ ++ err = ixgbe_ptp_gettime64_timecounter(ptp, &ts64); ++ if (err) ++ return err; ++ ++ *ts = timespec64_to_timespec(ts64); ++ + return 0; + } + ++static int ixgbe_ptp_settime_timecounter(struct ptp_clock_info *ptp, ++ const struct timespec *ts) ++{ ++ struct timespec64 ts64; ++ ++ ts64 = timespec_to_timespec64(*ts); ++ return ixgbe_ptp_settime64_timecounter(ptp, &ts64); ++} ++#endif ++ + /** + * ixgbe_ptp_feature_enable + * @ptp: the ptp clock structure +@@ -353,19 +564,14 @@ static int ixgbe_ptp_feature_enable(struct ptp_clock_info *ptp, + * event when the clock SDP triggers. Clear mask when PPS is + * disabled + */ +- if (rq->type == PTP_CLK_REQ_PPS) { +- switch (adapter->hw.mac.type) { +- case ixgbe_mac_X540: +- if (on) +- adapter->flags2 |= IXGBE_FLAG2_PTP_PPS_ENABLED; +- else +- adapter->flags2 &= ~IXGBE_FLAG2_PTP_PPS_ENABLED; +- +- ixgbe_ptp_setup_sdp(adapter); +- return 0; +- default: +- break; +- } ++ if (rq->type == PTP_CLK_REQ_PPS && adapter->ptp_setup_sdp) { ++ if (on) ++ adapter->flags2 |= IXGBE_FLAG2_PTP_PPS_ENABLED; ++ else ++ adapter->flags2 &= ~IXGBE_FLAG2_PTP_PPS_ENABLED; ++ ++ adapter->ptp_setup_sdp(adapter); ++ return 0; + } + + return -ENOTSUPP; +@@ -379,7 +585,7 @@ static int ixgbe_ptp_feature_enable(struct ptp_clock_info *ptp, + * This function is called by the interrupt routine when checking for + * interrupts. It will check and handle a pps event. + */ +-void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr) ++void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter) + { + struct ixgbe_hw *hw = &adapter->hw; + struct ptp_clock_event event; +@@ -408,16 +614,18 @@ void ixgbe_ptp_check_pps_event(struct ixgbe_adapter *adapter, u32 eicr) + * + * this watchdog task periodically reads the timecounter + * in order to prevent missing when the system time registers wrap +- * around. This needs to be run approximately twice a minute. ++ * around. This needs to be run approximately twice a minute for the fastest ++ * overflowing hardware. We run it for all hardware since it shouldn't have a ++ * large impact. + */ + void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter) + { + bool timeout = time_is_before_jiffies(adapter->last_overflow_check + +- IXGBE_OVERFLOW_PERIOD); +- struct timespec ts; ++ IXGBE_OVERFLOW_PERIOD); ++ struct timespec64 ts; + + if (timeout) { +- ixgbe_ptp_gettime(&adapter->ptp_caps, &ts); ++ ixgbe_ptp_gettime64_timecounter(&adapter->ptp_caps, &ts); + adapter->last_overflow_check = jiffies; + } + } +@@ -434,8 +642,10 @@ void ixgbe_ptp_overflow_check(struct ixgbe_adapter *adapter) + void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter) + { + struct ixgbe_hw *hw = &adapter->hw; ++ struct ixgbe_ring *rx_ring; + u32 tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); + unsigned long rx_event; ++ int n; + + /* if we don't have a valid timestamp in the registers, just update the + * timeout counter and exit +@@ -447,15 +657,66 @@ void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter) + + /* determine the most recent watchdog or rx_timestamp event */ + rx_event = adapter->last_rx_ptp_check; +- if (time_after(adapter->last_rx_timestamp, rx_event)) +- rx_event = adapter->last_rx_timestamp; ++ for (n = 0; n < adapter->num_rx_queues; n++) { ++ rx_ring = adapter->rx_ring[n]; ++ if (time_after(rx_ring->last_rx_timestamp, rx_event)) ++ rx_event = rx_ring->last_rx_timestamp; ++ } + + /* only need to read the high RXSTMP register to clear the lock */ + if (time_is_before_jiffies(rx_event + 5*HZ)) { + IXGBE_READ_REG(hw, IXGBE_RXSTMPH); + adapter->last_rx_ptp_check = jiffies; + +- e_warn(drv, "clearing RX Timestamp hang\n"); ++ adapter->rx_hwtstamp_cleared++; ++ e_warn(drv, "clearing RX Timestamp hang"); ++ } ++} ++ ++/** ++ * ixgbe_ptp_clear_tx_timestamp - utility function to clear Tx timestamp state ++ * @adapter: the private adapter structure ++ * ++ * This function should be called whenever the state related to a Tx timestamp ++ * needs to be cleared. This helps ensure that all related bits are reset for ++ * the next Tx timestamp event. ++ */ ++static void ixgbe_ptp_clear_tx_timestamp(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ ++ IXGBE_READ_REG(hw, IXGBE_TXSTMPH); ++ if (adapter->ptp_tx_skb) { ++ dev_kfree_skb_any(adapter->ptp_tx_skb); ++ adapter->ptp_tx_skb = NULL; ++ } ++ clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); ++} ++ ++/** ++ * ixgbe_ptp_tx_hang - detect error case where Tx timestamp never finishes ++ * @adapter: private network adapter structure ++ */ ++void ixgbe_ptp_tx_hang(struct ixgbe_adapter *adapter) ++{ ++ bool timeout = time_is_before_jiffies(adapter->ptp_tx_start + ++ IXGBE_PTP_TX_TIMEOUT); ++ ++ if (!adapter->ptp_tx_skb) ++ return; ++ ++ if (!test_bit(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state)) ++ return; ++ ++ /* If we haven't received a timestamp within the timeout, it is ++ * reasonable to assume that it will never occur, so we can unlock the ++ * timestamp bit when this occurs. ++ */ ++ if (timeout) { ++ cancel_work_sync(&adapter->ptp_tx_work); ++ ixgbe_ptp_clear_tx_timestamp(adapter); ++ adapter->tx_hwtstamp_timeouts++; ++ e_warn(drv, "clearing Tx timestamp hang\n"); + } + } + +@@ -469,25 +730,26 @@ void ixgbe_ptp_rx_hang(struct ixgbe_adapter *adapter) + */ + static void ixgbe_ptp_tx_hwtstamp(struct ixgbe_adapter *adapter) + { ++ struct sk_buff *skb = adapter->ptp_tx_skb; + struct ixgbe_hw *hw = &adapter->hw; + struct skb_shared_hwtstamps shhwtstamps; +- u64 regval = 0, ns; +- unsigned long flags; ++ u64 regval = 0; + + regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPL); + regval |= (u64)IXGBE_READ_REG(hw, IXGBE_TXSTMPH) << 32; ++ ixgbe_ptp_convert_to_hwtstamp(adapter, &shhwtstamps, regval); + +- spin_lock_irqsave(&adapter->tmreg_lock, flags); +- ns = timecounter_cyc2time(&adapter->tc, regval); +- spin_unlock_irqrestore(&adapter->tmreg_lock, flags); +- +- memset(&shhwtstamps, 0, sizeof(shhwtstamps)); +- shhwtstamps.hwtstamp = ns_to_ktime(ns); +- skb_tstamp_tx(adapter->ptp_tx_skb, &shhwtstamps); +- +- dev_kfree_skb_any(adapter->ptp_tx_skb); ++ /* Handle cleanup of the ptp_tx_skb ourselves, and unlock the state ++ * bit prior to notifying the stack via skb_tstamp_tx(). This prevents ++ * well behaved applications from attempting to timestamp again prior ++ * to the lock bit being clear. ++ */ + adapter->ptp_tx_skb = NULL; + clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); ++ ++ /* Notify the stack and then free the skb after we've unlocked */ ++ skb_tstamp_tx(skb, &shhwtstamps); ++ dev_kfree_skb_any(skb); + } + + /** +@@ -507,39 +769,86 @@ static void ixgbe_ptp_tx_hwtstamp_work(struct work_struct *work) + IXGBE_PTP_TX_TIMEOUT); + u32 tsynctxctl; + +- if (timeout) { +- dev_kfree_skb_any(adapter->ptp_tx_skb); +- adapter->ptp_tx_skb = NULL; +- clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); +- e_warn(drv, "clearing Tx Timestamp hang\n"); ++ /* we have to have a valid skb to poll for a timestamp */ ++ if (!adapter->ptp_tx_skb) { ++ ixgbe_ptp_clear_tx_timestamp(adapter); + return; + } + ++ /* stop polling once we have a valid timestamp */ + tsynctxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCTXCTL); +- if (tsynctxctl & IXGBE_TSYNCTXCTL_VALID) ++ if (tsynctxctl & IXGBE_TSYNCTXCTL_VALID) { + ixgbe_ptp_tx_hwtstamp(adapter); +- else +- /* reschedule to keep checking if it's not available yet */ ++ return; ++ } ++ ++ /* check timeout last in case timestamp event just occurred */ ++ if (timeout) { ++ ixgbe_ptp_clear_tx_timestamp(adapter); ++ adapter->tx_hwtstamp_timeouts++; ++ e_warn(drv, "clearing Tx Timestamp hang"); ++ } else { ++ /* reschedule to keep checking until we timeout */ + schedule_work(&adapter->ptp_tx_work); ++ } + } + + /** +- * ixgbe_ptp_rx_hwtstamp - utility function which checks for RX time stamp +- * @adapter: pointer to adapter struct ++ * ixgbe_ptp_rx_pktstamp - utility function to get RX time stamp from buffer ++ * @q_vector: structure containing interrupt and ring information ++ * @skb: the packet ++ * ++ * This function will be called by the Rx routine of the timestamp for this ++ * packet is stored in the buffer. The value is stored in little endian format ++ * starting at the end of the packet data. ++ */ ++void ixgbe_ptp_rx_pktstamp(struct ixgbe_q_vector *q_vector, ++ struct sk_buff *skb) ++{ ++ __le64 regval; ++ ++ /* copy the bits out of the skb, and then trim the skb length */ ++ skb_copy_bits(skb, skb->len - IXGBE_TS_HDR_LEN, ®val, IXGBE_TS_HDR_LEN); ++ __pskb_trim(skb, skb->len - IXGBE_TS_HDR_LEN); ++ ++ /* The timestamp is recorded in little endian format, and is stored at ++ * the end of the packet. ++ * ++ * DWORD: N N + 1 N + 2 ++ * Field: End of Packet SYSTIMH SYSTIML ++ */ ++ ixgbe_ptp_convert_to_hwtstamp(q_vector->adapter, skb_hwtstamps(skb), ++ le64_to_cpu(regval)); ++} ++ ++/** ++ * ixgbe_ptp_rx_rgtstamp - utility function which checks for RX time stamp ++ * @q_vector: structure containing interrupt and ring information + * @skb: particular skb to send timestamp with + * + * if the timestamp is valid, we convert it into the timecounter ns + * value, then store that result into the shhwtstamps structure which + * is passed up the network stack + */ +-void ixgbe_ptp_rx_hwtstamp(struct ixgbe_adapter *adapter, struct sk_buff *skb) ++void ixgbe_ptp_rx_rgtstamp(struct ixgbe_q_vector *q_vector, ++ struct sk_buff *skb) + { +- struct ixgbe_hw *hw = &adapter->hw; +- struct skb_shared_hwtstamps *shhwtstamps; +- u64 regval = 0, ns; ++ struct ixgbe_adapter *adapter; ++ struct ixgbe_hw *hw; ++ u64 regval = 0; + u32 tsyncrxctl; +- unsigned long flags; + ++ /* we cannot process timestamps on a ring without a q_vector */ ++ if (!q_vector || !q_vector->adapter) ++ return; ++ ++ adapter = q_vector->adapter; ++ hw = &adapter->hw; ++ ++ /* ++ * Read the tsyncrxctl register afterwards in order to prevent taking an ++ * I/O hit on every packet. ++ */ + tsyncrxctl = IXGBE_READ_REG(hw, IXGBE_TSYNCRXCTL); + if (!(tsyncrxctl & IXGBE_TSYNCRXCTL_VALID)) + return; +@@ -547,25 +856,24 @@ void ixgbe_ptp_rx_hwtstamp(struct ixgbe_adapter *adapter, struct sk_buff *skb) + regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPL); + regval |= (u64)IXGBE_READ_REG(hw, IXGBE_RXSTMPH) << 32; + +- spin_lock_irqsave(&adapter->tmreg_lock, flags); +- ns = timecounter_cyc2time(&adapter->tc, regval); +- spin_unlock_irqrestore(&adapter->tmreg_lock, flags); +- +- shhwtstamps = skb_hwtstamps(skb); +- shhwtstamps->hwtstamp = ns_to_ktime(ns); +- +- /* Update the last_rx_timestamp timer in order to enable watchdog check +- * for error case of latched timestamp on a dropped packet. +- */ +- adapter->last_rx_timestamp = jiffies; ++ ixgbe_ptp_convert_to_hwtstamp(adapter, skb_hwtstamps(skb), regval); + } + ++/** ++ * ixgbe_ptp_get_ts_config - get current hardware timestamping configuration ++ * @adapter: pointer to adapter structure ++ * @ifreq: ioctl data ++ * ++ * This function returns the current timestamping settings. Rather than ++ * attempt to deconstruct registers to fill in the values, simply keep a copy ++ * of the old settings around, and return a copy when requested. ++ */ + int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr) + { + struct hwtstamp_config *config = &adapter->tstamp_config; + +- return copy_to_user(ifr->ifr_data, config, +- sizeof(*config)) ? -EFAULT : 0; ++ return copy_to_user(ifr->ifr_data, config, sizeof(*config)) ? ++ -EFAULT : 0; + } + + /** +@@ -594,7 +902,7 @@ int ixgbe_ptp_get_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr) + * mode, if required to support the specifically requested mode. + */ + static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, +- struct hwtstamp_config *config) ++ struct hwtstamp_config *config) + { + struct ixgbe_hw *hw = &adapter->hw; + u32 tsync_tx_ctl = IXGBE_TSYNCTXCTL_ENABLED; +@@ -620,14 +928,20 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, + case HWTSTAMP_FILTER_NONE: + tsync_rx_ctl = 0; + tsync_rx_mtrl = 0; ++ adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | ++ IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: + tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; + tsync_rx_mtrl |= IXGBE_RXMTRL_V1_SYNC_MSG; ++ adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | ++ IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: + tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_L4_V1; + tsync_rx_mtrl |= IXGBE_RXMTRL_V1_DELAY_REQ_MSG; ++ adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | ++ IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + break; + case HWTSTAMP_FILTER_PTP_V2_EVENT: + case HWTSTAMP_FILTER_PTP_V2_L2_EVENT: +@@ -641,26 +955,70 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, + tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_EVENT_V2; + is_l2 = true; + config->rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; ++ adapter->flags |= (IXGBE_FLAG_RX_HWTSTAMP_ENABLED | ++ IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + break; + case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: ++#ifdef HAVE_HWTSTAMP_FILTER_NTP_ALL ++ case HWTSTAMP_FILTER_NTP_ALL: ++#endif /* HAVE_HWTSTAMP_FILTER_NTP_ALL */ + case HWTSTAMP_FILTER_ALL: ++ /* The X550 controller is capable of timestamping all packets, ++ * which allows it to accept any filter. ++ */ ++ if (hw->mac.type >= ixgbe_mac_X550) { ++ tsync_rx_ctl |= IXGBE_TSYNCRXCTL_TYPE_ALL; ++ config->rx_filter = HWTSTAMP_FILTER_ALL; ++ adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED; ++ break; ++ } ++ /* fall through */ + default: +- /* +- * register RXMTRL must be set in order to do V1 packets, ++ /* register RXMTRL must be set in order to do V1 packets, + * therefore it is not possible to time stamp both V1 Sync and +- * Delay_Req messages and hardware does not support +- * timestamping all packets => return error ++ * Delay_Req messages unless hardware supports timestamping all ++ * packets => return error + */ ++ adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | ++ IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + config->rx_filter = HWTSTAMP_FILTER_NONE; + return -ERANGE; + } + + if (hw->mac.type == ixgbe_mac_82598EB) { ++ adapter->flags &= ~(IXGBE_FLAG_RX_HWTSTAMP_ENABLED | ++ IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER); + if (tsync_rx_ctl | tsync_tx_ctl) + return -ERANGE; + return 0; + } + ++ /* Per-packet timestamping only works if the filter is set to all ++ * packets. Since this is desired, always timestamp all packets as long ++ * as any Rx filter was configured. ++ */ ++ switch (hw->mac.type) { ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: ++ /* enable timestamping all packets only if at least some ++ * packets were requested. Otherwise, play nice and disable ++ * timestamping */ ++ if (config->rx_filter == HWTSTAMP_FILTER_NONE) ++ break; ++ ++ tsync_rx_ctl = IXGBE_TSYNCRXCTL_ENABLED | ++ IXGBE_TSYNCRXCTL_TYPE_ALL | ++ IXGBE_TSYNCRXCTL_TSIP_UT_EN; ++ config->rx_filter = HWTSTAMP_FILTER_ALL; ++ adapter->flags |= IXGBE_FLAG_RX_HWTSTAMP_ENABLED; ++ adapter->flags &= ~IXGBE_FLAG_RX_HWTSTAMP_IN_REGISTER; ++ is_l2 = true; ++ break; ++ default: ++ break; ++ } ++ + /* define ethertype filter for timestamping L2 packets */ + if (is_l2) + IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_1588), +@@ -687,9 +1045,9 @@ static int ixgbe_ptp_set_timestamp_mode(struct ixgbe_adapter *adapter, + + IXGBE_WRITE_FLUSH(hw); + +- /* clear TX/RX time stamp registers, just to be sure */ +- regval = IXGBE_READ_REG(hw, IXGBE_TXSTMPH); +- regval = IXGBE_READ_REG(hw, IXGBE_RXSTMPH); ++ /* clear TX/RX timestamp state, just to be sure */ ++ ixgbe_ptp_clear_tx_timestamp(adapter); ++ IXGBE_READ_REG(hw, IXGBE_RXSTMPH); + + return 0; + } +@@ -722,23 +1080,9 @@ int ixgbe_ptp_set_ts_config(struct ixgbe_adapter *adapter, struct ifreq *ifr) + -EFAULT : 0; + } + +-/** +- * ixgbe_ptp_start_cyclecounter - create the cycle counter from hw +- * @adapter: pointer to the adapter structure +- * +- * This function should be called to set the proper values for the TIMINCA +- * register and tell the cyclecounter structure what the tick rate of SYSTIME +- * is. It does not directly modify SYSTIME registers or the timecounter +- * structure. It should be called whenever a new TIMINCA value is necessary, +- * such as during initialization or when the link speed changes. +- */ +-void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) ++static void ixgbe_ptp_link_speed_adjust(struct ixgbe_adapter *adapter, ++ u32 *shift, u32 *incval) + { +- struct ixgbe_hw *hw = &adapter->hw; +- u32 incval = 0; +- u32 shift = 0; +- unsigned long flags; +- + /** + * Scale the NIC cycle counter by a large factor so that + * relatively small corrections to the frequency can be added +@@ -755,33 +1099,98 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) + */ + switch (adapter->link_speed) { + case IXGBE_LINK_SPEED_100_FULL: +- incval = IXGBE_INCVAL_100; +- shift = IXGBE_INCVAL_SHIFT_100; ++ *shift = IXGBE_INCVAL_SHIFT_100; ++ *incval = IXGBE_INCVAL_100; + break; + case IXGBE_LINK_SPEED_1GB_FULL: +- incval = IXGBE_INCVAL_1GB; +- shift = IXGBE_INCVAL_SHIFT_1GB; ++ *shift = IXGBE_INCVAL_SHIFT_1GB; ++ *incval = IXGBE_INCVAL_1GB; + break; + case IXGBE_LINK_SPEED_10GB_FULL: + default: +- incval = IXGBE_INCVAL_10GB; +- shift = IXGBE_INCVAL_SHIFT_10GB; ++ *shift = IXGBE_INCVAL_SHIFT_10GB; ++ *incval = IXGBE_INCVAL_10GB; + break; + } + +- /** +- * Modify the calculated values to fit within the correct +- * number of bits specified by the hardware. The 82599 doesn't +- * have the same space as the X540, so bitshift the calculated +- * values to fit. ++ return; ++} ++ ++/** ++ * ixgbe_ptp_start_cyclecounter - create the cycle counter from hw ++ * @adapter: pointer to the adapter structure ++ * ++ * This function should be called to set the proper values for the TIMINCA ++ * register and tell the cyclecounter structure what the tick rate of SYSTIME ++ * is. It does not directly modify SYSTIME registers or the timecounter ++ * structure. It should be called whenever a new TIMINCA value is necessary, ++ * such as during initialization or when the link speed changes. ++ */ ++void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ unsigned long flags; ++ struct cyclecounter cc; ++ u32 incval = 0; ++ u32 tsauxc = 0, fuse0 = 0; ++ ++ /* For some of the boards below this mask is technically incorrect. ++ * The timestamp mask overflows at approximately 61bits. However the ++ * particular hardware does not overflow on an even bitmask value. ++ * Instead, it overflows due to conversion of upper 32bits billions of ++ * cycles. Timecounters are not really intended for this purpose so ++ * they do not properly function if the overflow point isn't 2^N-1. ++ * However, the actual SYSTIME values in question take ~138 years to ++ * overflow. In practice this means they won't actually overflow. A ++ * proper fix to this problem would require modification of the ++ * timecounter delta calculations. + */ ++ cc.mask = CLOCKSOURCE_MASK(64); ++ cc.mult = 1; ++ cc.shift = 0; ++ + switch (hw->mac.type) { ++ case ixgbe_mac_X550EM_x: ++ /* SYSTIME assumes X550EM_x board frequency is 300Mhz, and is ++ * designed to represent seconds and nanoseconds when this is ++ * the case. However, some revisions of hardware have a 400Mhz ++ * clock and we have to compensate for this frequency ++ * variation using corrected mult and shift values. ++ */ ++ fuse0 = IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)); ++ if (!(fuse0 & IXGBE_FUSES0_300MHZ)) { ++ cc.mult = 3; ++ cc.shift = 2; ++ } ++ /* fallthrough */ ++ case ixgbe_mac_X550EM_a: ++ case ixgbe_mac_X550: ++ cc.read = ixgbe_ptp_read_X550; ++ ++ /* enable SYSTIME counter */ ++ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMR, 0); ++ IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0); ++ IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0); ++ tsauxc = IXGBE_READ_REG(hw, IXGBE_TSAUXC); ++ IXGBE_WRITE_REG(hw, IXGBE_TSAUXC, ++ tsauxc & ~IXGBE_TSAUXC_DISABLE_SYSTIME); ++ IXGBE_WRITE_REG(hw, IXGBE_TSIM, IXGBE_TSIM_TXTS); ++ IXGBE_WRITE_REG(hw, IXGBE_EIMS, IXGBE_EIMS_TIMESYNC); ++ ++ IXGBE_WRITE_FLUSH(hw); ++ break; + case ixgbe_mac_X540: ++ cc.read = ixgbe_ptp_read_82599; ++ ++ ixgbe_ptp_link_speed_adjust(adapter, &cc.shift, &incval); + IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, incval); + break; + case ixgbe_mac_82599EB: ++ cc.read = ixgbe_ptp_read_82599; ++ ++ ixgbe_ptp_link_speed_adjust(adapter, &cc.shift, &incval); + incval >>= IXGBE_INCVAL_SHIFT_82599; +- shift -= IXGBE_INCVAL_SHIFT_82599; ++ cc.shift -= IXGBE_INCVAL_SHIFT_82599; + IXGBE_WRITE_REG(hw, IXGBE_TIMINCA, + (1 << IXGBE_INCPER_SHIFT_82599) | + incval); +@@ -797,13 +1206,7 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) + + /* need lock to prevent incorrect read while modifying cyclecounter */ + spin_lock_irqsave(&adapter->tmreg_lock, flags); +- +- memset(&adapter->cc, 0, sizeof(adapter->cc)); +- adapter->cc.read = ixgbe_ptp_read; +- adapter->cc.mask = CLOCKSOURCE_MASK(64); +- adapter->cc.shift = shift; +- adapter->cc.mult = 1; +- ++ memcpy(&adapter->hw_cc, &cc, sizeof(adapter->hw_cc)); + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + } + +@@ -811,55 +1214,55 @@ void ixgbe_ptp_start_cyclecounter(struct ixgbe_adapter *adapter) + * ixgbe_ptp_reset + * @adapter: the ixgbe private board structure + * +- * When the MAC resets, all the hardware bits for timesync are reset. This +- * function is used to re-enable the device for PTP based on current settings. +- * We do lose the current clock time, so just reset the cyclecounter to the +- * system real clock time. ++ * When the MAC resets, all of the hardware configuration for timesync is ++ * reset. This function should be called to re-enable the device for PTP, ++ * using the last known settings. However, we do lose the current clock time, ++ * so we fallback to resetting it based on the kernel's realtime clock. + * +- * This function will maintain hwtstamp_config settings, and resets the SDP +- * output if it was enabled. ++ * This function will maintain the hwtstamp_config settings, and it retriggers ++ * the SDP output if it's enabled. + */ + void ixgbe_ptp_reset(struct ixgbe_adapter *adapter) + { + struct ixgbe_hw *hw = &adapter->hw; + unsigned long flags; + +- /* set SYSTIME registers to 0 just in case */ +- IXGBE_WRITE_REG(hw, IXGBE_SYSTIML, 0x00000000); +- IXGBE_WRITE_REG(hw, IXGBE_SYSTIMH, 0x00000000); +- IXGBE_WRITE_FLUSH(hw); +- + /* reset the hardware timestamping mode */ + ixgbe_ptp_set_timestamp_mode(adapter, &adapter->tstamp_config); + ++ /* 82598 does not support PTP */ ++ if (hw->mac.type == ixgbe_mac_82598EB) ++ return; ++ + ixgbe_ptp_start_cyclecounter(adapter); + + spin_lock_irqsave(&adapter->tmreg_lock, flags); +- +- /* reset the ns time counter */ +- timecounter_init(&adapter->tc, &adapter->cc, ++ timecounter_init(&adapter->hw_tc, &adapter->hw_cc, + ktime_to_ns(ktime_get_real())); +- + spin_unlock_irqrestore(&adapter->tmreg_lock, flags); + ++ adapter->last_overflow_check = jiffies; ++ + /* + * Now that the shift has been calculated and the systime + * registers reset, (re-)enable the Clock out feature + */ +- ixgbe_ptp_setup_sdp(adapter); ++ if (adapter->ptp_setup_sdp) ++ adapter->ptp_setup_sdp(adapter); + } + + /** + * ixgbe_ptp_create_clock + * @adapter: the ixgbe private adapter structure + * +- * This function performs setup of the user entry point function table and +- * initializes the PTP clock device, which is used to access the clock-like +- * features of the PTP core. It will be called by ixgbe_ptp_init, only if +- * there isn't already a clock device (such as after a suspend/resume cycle, +- * where the clock device wasn't destroyed). ++ * This functino performs setup of the user entry point function table and ++ * initalizes the PTP clock device used by userspace to access the clock-like ++ * features of the PTP core. It will be called by ixgbe_ptp_init, and may ++ * re-use a previously initialized clock (such as during a suspend/resume ++ * cycle). + */ +-static int ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) ++ ++static long ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) + { + struct net_device *netdev = adapter->netdev; + long err; +@@ -879,11 +1282,17 @@ static int ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) + adapter->ptp_caps.n_ext_ts = 0; + adapter->ptp_caps.n_per_out = 0; + adapter->ptp_caps.pps = 1; +- adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq; +- adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; +- adapter->ptp_caps.gettime = ixgbe_ptp_gettime; +- adapter->ptp_caps.settime = ixgbe_ptp_settime; ++ adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_82599; ++ adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime_timecounter; ++#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64 ++ adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime64_timecounter; ++ adapter->ptp_caps.settime64 = ixgbe_ptp_settime64_timecounter; ++#else ++ adapter->ptp_caps.gettime = ixgbe_ptp_gettime_timecounter; ++ adapter->ptp_caps.settime = ixgbe_ptp_settime_timecounter; ++#endif + adapter->ptp_caps.enable = ixgbe_ptp_feature_enable; ++ adapter->ptp_setup_sdp = ixgbe_ptp_setup_sdp_X540; + break; + case ixgbe_mac_82599EB: + snprintf(adapter->ptp_caps.name, +@@ -895,30 +1304,58 @@ static int ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) + adapter->ptp_caps.n_ext_ts = 0; + adapter->ptp_caps.n_per_out = 0; + adapter->ptp_caps.pps = 0; +- adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq; +- adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime; +- adapter->ptp_caps.gettime = ixgbe_ptp_gettime; +- adapter->ptp_caps.settime = ixgbe_ptp_settime; ++ adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_82599; ++ adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime_timecounter; ++#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64 ++ adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime64_timecounter; ++ adapter->ptp_caps.settime64 = ixgbe_ptp_settime64_timecounter; ++#else ++ adapter->ptp_caps.gettime = ixgbe_ptp_gettime_timecounter; ++ adapter->ptp_caps.settime = ixgbe_ptp_settime_timecounter; ++#endif + adapter->ptp_caps.enable = ixgbe_ptp_feature_enable; + break; ++ case ixgbe_mac_X550: ++ case ixgbe_mac_X550EM_x: ++ case ixgbe_mac_X550EM_a: ++ snprintf(adapter->ptp_caps.name, 16, "%s", netdev->name); ++ adapter->ptp_caps.owner = THIS_MODULE; ++ adapter->ptp_caps.max_adj = 30000000; ++ adapter->ptp_caps.n_alarm = 0; ++ adapter->ptp_caps.n_ext_ts = 0; ++ adapter->ptp_caps.n_per_out = 0; ++ adapter->ptp_caps.pps = 0; ++ adapter->ptp_caps.adjfreq = ixgbe_ptp_adjfreq_X550; ++ adapter->ptp_caps.adjtime = ixgbe_ptp_adjtime_timecounter; ++#ifdef HAVE_PTP_CLOCK_INFO_GETTIME64 ++ adapter->ptp_caps.gettime64 = ixgbe_ptp_gettime64_timecounter; ++ adapter->ptp_caps.settime64 = ixgbe_ptp_settime64_timecounter; ++#else ++ adapter->ptp_caps.gettime = ixgbe_ptp_gettime_timecounter; ++ adapter->ptp_caps.settime = ixgbe_ptp_settime_timecounter; ++#endif ++ adapter->ptp_caps.enable = ixgbe_ptp_feature_enable; ++ adapter->ptp_setup_sdp = NULL; ++ break; + default: + adapter->ptp_clock = NULL; ++ adapter->ptp_setup_sdp = NULL; + return -EOPNOTSUPP; + } + + adapter->ptp_clock = ptp_clock_register(&adapter->ptp_caps, +- &adapter->pdev->dev); ++ pci_dev_to_dev(adapter->pdev)); + if (IS_ERR(adapter->ptp_clock)) { + err = PTR_ERR(adapter->ptp_clock); + adapter->ptp_clock = NULL; + e_dev_err("ptp_clock_register failed\n"); + return err; +- } else ++ } else if (adapter->ptp_clock) + e_dev_info("registered PHC device on %s\n", netdev->name); + +- /* set default timestamp mode to disabled here. We do this in +- * create_clock instead of init, because we don't want to override the +- * previous settings during a resume cycle. ++ /* Set the default timestamp mode to disabled here. We do this in ++ * create_clock instead of initialization, because we don't want to ++ * override the previous settings during a suspend/resume cycle. + */ + adapter->tstamp_config.rx_filter = HWTSTAMP_FILTER_NONE; + adapter->tstamp_config.tx_type = HWTSTAMP_TX_OFF; +@@ -930,26 +1367,25 @@ static int ixgbe_ptp_create_clock(struct ixgbe_adapter *adapter) + * ixgbe_ptp_init + * @adapter: the ixgbe private adapter structure + * +- * This function performs the required steps for enabling PTP +- * support. If PTP support has already been loaded it simply calls the ++ * This function performs the required steps for enabling ptp ++ * support. If ptp support has already been loaded it simply calls the + * cyclecounter init routine and exits. + */ + void ixgbe_ptp_init(struct ixgbe_adapter *adapter) + { +- /* initialize the spin lock first since we can't control when a user +- * will call the entry functions once we have initialized the clock +- * device ++ /* initialize the spin lock first, since the user might call the clock ++ * functions any time after we've initialized the ptp clock device. + */ + spin_lock_init(&adapter->tmreg_lock); + +- /* obtain a PTP device, or re-use an existing device */ ++ /* obtain a ptp clock device, or re-use an existing device */ + if (ixgbe_ptp_create_clock(adapter)) + return; + +- /* we have a clock so we can initialize work now */ ++ /* we have a clock, so we can intialize work for timestamps now */ + INIT_WORK(&adapter->ptp_tx_work, ixgbe_ptp_tx_hwtstamp_work); + +- /* reset the PTP related hardware bits */ ++ /* reset the ptp related hardware bits */ + ixgbe_ptp_reset(adapter); + + /* enter the IXGBE_PTP_RUNNING state */ +@@ -959,45 +1395,39 @@ void ixgbe_ptp_init(struct ixgbe_adapter *adapter) + } + + /** +- * ixgbe_ptp_suspend - stop PTP work items +- * @ adapter: pointer to adapter struct ++ * ixgbe_ptp_suspend - stop ptp work items ++ * @adapter: pointer to adapter struct + * +- * this function suspends PTP activity, and prevents more PTP work from being +- * generated, but does not destroy the PTP clock device. ++ * This function suspends ptp activity, and prevents more work from being ++ * generated, but does not destroy the clock device. + */ + void ixgbe_ptp_suspend(struct ixgbe_adapter *adapter) + { +- /* Leave the IXGBE_PTP_RUNNING state. */ ++ /* leave the IXGBE_PTP_RUNNING STATE */ + if (!test_and_clear_bit(__IXGBE_PTP_RUNNING, &adapter->state)) + return; + +- /* since this might be called in suspend, we don't clear the state, +- * but simply reset the auxiliary PPS signal control register +- */ +- IXGBE_WRITE_REG(&adapter->hw, IXGBE_TSAUXC, 0x0); ++ adapter->flags2 &= ~IXGBE_FLAG2_PTP_PPS_ENABLED; ++ if (adapter->ptp_setup_sdp) ++ adapter->ptp_setup_sdp(adapter); + +- /* ensure that we cancel any pending PTP Tx work item in progress */ + cancel_work_sync(&adapter->ptp_tx_work); +- if (adapter->ptp_tx_skb) { +- dev_kfree_skb_any(adapter->ptp_tx_skb); +- adapter->ptp_tx_skb = NULL; +- clear_bit_unlock(__IXGBE_PTP_TX_IN_PROGRESS, &adapter->state); +- } ++ ixgbe_ptp_clear_tx_timestamp(adapter); + } + + /** +- * ixgbe_ptp_stop - close the PTP device ++ * ixgbe_ptp_stop - destroy the ptp_clock device + * @adapter: pointer to adapter struct + * +- * completely destroy the PTP device, should only be called when the device is +- * being fully closed. ++ * Completely destroy the ptp_clock device, and disable all PTP related ++ * features. Intended to be run when the device is being closed. + */ + void ixgbe_ptp_stop(struct ixgbe_adapter *adapter) + { +- /* first, suspend PTP activity */ ++ /* first, suspend ptp activity */ + ixgbe_ptp_suspend(adapter); + +- /* disable the PTP clock device */ ++ /* now destroy the ptp clock device */ + if (adapter->ptp_clock) { + ptp_clock_unregister(adapter->ptp_clock); + adapter->ptp_clock = NULL; +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +index 16b3a1c..4b996de 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.c +@@ -1,7 +1,7 @@ + /******************************************************************************* + +- Intel 10 Gigabit PCI Express Linux driver +- Copyright(c) 1999 - 2013 Intel Corporation. ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, +@@ -12,10 +12,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +@@ -26,6 +22,7 @@ + + *******************************************************************************/ + ++ + #include + #include + #include +@@ -36,85 +33,155 @@ + #include + #include + #include +-#ifdef NETIF_F_HW_VLAN_CTAG_TX +-#include +-#endif + + #include "ixgbe.h" + #include "ixgbe_type.h" + #include "ixgbe_sriov.h" + + #ifdef CONFIG_PCI_IOV +-static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter) ++static inline void ixgbe_alloc_vf_macvlans(struct ixgbe_adapter *adapter, ++ unsigned int num_vfs) + { + struct ixgbe_hw *hw = &adapter->hw; +- int num_vf_macvlans, i; + struct vf_macvlans *mv_list; +- +- adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED; +- e_info(probe, "SR-IOV enabled with %d VFs\n", adapter->num_vfs); +- +- /* Enable VMDq flag so device will be set in VM mode */ +- adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED; +- if (!adapter->ring_feature[RING_F_VMDQ].limit) +- adapter->ring_feature[RING_F_VMDQ].limit = 1; +- adapter->ring_feature[RING_F_VMDQ].offset = adapter->num_vfs; ++ int num_vf_macvlans, i; + + num_vf_macvlans = hw->mac.num_rar_entries - +- (IXGBE_MAX_PF_MACVLANS + 1 + adapter->num_vfs); ++ (IXGBE_MAX_PF_MACVLANS + 1 + num_vfs); ++ if (!num_vf_macvlans) ++ return; + +- adapter->mv_list = mv_list = kcalloc(num_vf_macvlans, +- sizeof(struct vf_macvlans), +- GFP_KERNEL); ++ mv_list = kcalloc(num_vf_macvlans, sizeof(struct vf_macvlans), ++ GFP_KERNEL); + if (mv_list) { + /* Initialize list of VF macvlans */ + INIT_LIST_HEAD(&adapter->vf_mvs.l); + for (i = 0; i < num_vf_macvlans; i++) { +- mv_list->vf = -1; +- mv_list->free = true; +- list_add(&mv_list->l, &adapter->vf_mvs.l); +- mv_list++; ++ mv_list[i].vf = -1; ++ mv_list[i].free = true; ++ list_add(&mv_list[i].l, &adapter->vf_mvs.l); + } ++ adapter->mv_list = mv_list; + } ++} ++ ++static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter, ++ unsigned int num_vfs) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ int i; ++ ++ adapter->flags |= IXGBE_FLAG_SRIOV_ENABLED; ++ ++ /* Enable VMDq flag so device will be set in VM mode */ ++ adapter->flags |= IXGBE_FLAG_VMDQ_ENABLED; ++ if (!adapter->ring_feature[RING_F_VMDQ].limit) ++ adapter->ring_feature[RING_F_VMDQ].limit = 1; ++ ++ /* Allocate memory for per VF control structures */ ++ adapter->vfinfo = kcalloc(num_vfs, sizeof(struct vf_data_storage), ++ GFP_KERNEL); ++ if (!adapter->vfinfo) ++ return -ENOMEM; + + /* Initialize default switching mode VEB */ + IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, IXGBE_PFDTXGSWC_VT_LBEN); +- adapter->flags2 |= IXGBE_FLAG2_BRIDGE_MODE_VEB; + +- /* If call to enable VFs succeeded then allocate memory +- * for per VF control structures. ++ /* set adapter->num_vfs only after allocating vfinfo to avoid ++ * NULL pointer issues when accessing adapter->vfinfo + */ +- adapter->vfinfo = +- kcalloc(adapter->num_vfs, +- sizeof(struct vf_data_storage), GFP_KERNEL); +- if (adapter->vfinfo) { +- /* limit trafffic classes based on VFs enabled */ +- if ((adapter->hw.mac.type == ixgbe_mac_82599EB) && +- (adapter->num_vfs < 16)) { +- adapter->dcb_cfg.num_tcs.pg_tcs = MAX_TRAFFIC_CLASS; +- adapter->dcb_cfg.num_tcs.pfc_tcs = MAX_TRAFFIC_CLASS; +- } else if (adapter->num_vfs < 32) { +- adapter->dcb_cfg.num_tcs.pg_tcs = 4; +- adapter->dcb_cfg.num_tcs.pfc_tcs = 4; +- } else { +- adapter->dcb_cfg.num_tcs.pg_tcs = 1; +- adapter->dcb_cfg.num_tcs.pfc_tcs = 1; +- } ++ adapter->num_vfs = num_vfs; ++ ++ ixgbe_alloc_vf_macvlans(adapter, num_vfs); ++ ++ adapter->ring_feature[RING_F_VMDQ].offset = num_vfs; ++ ++ /* enable L2 switch and replication */ ++ adapter->flags |= IXGBE_FLAG_SRIOV_L2SWITCH_ENABLE | ++ IXGBE_FLAG_SRIOV_REPLICATION_ENABLE; ++ ++ /* limit traffic classes based on VFs enabled */ ++ if ((adapter->hw.mac.type == ixgbe_mac_82599EB) && ++ (adapter->num_vfs < 16)) { ++ adapter->dcb_cfg.num_tcs.pg_tcs = ++ IXGBE_DCB_MAX_TRAFFIC_CLASS; ++ adapter->dcb_cfg.num_tcs.pfc_tcs = ++ IXGBE_DCB_MAX_TRAFFIC_CLASS; ++ } else if (adapter->num_vfs < 32) { ++ adapter->dcb_cfg.num_tcs.pg_tcs = 4; ++ adapter->dcb_cfg.num_tcs.pfc_tcs = 4; ++ } else { ++ adapter->dcb_cfg.num_tcs.pg_tcs = 1; ++ adapter->dcb_cfg.num_tcs.pfc_tcs = 1; ++ } ++ adapter->dcb_cfg.vt_mode = true; + +- /* We do not support RSS w/ SR-IOV */ +- adapter->ring_feature[RING_F_RSS].limit = 1; ++#ifdef IXGBE_DISABLE_VF_MQ ++ /* We do not support RSS w/ SR-IOV */ ++ adapter->ring_feature[RING_F_RSS].limit = 1; ++#endif + +- /* Disable RSC when in SR-IOV mode */ +- adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE | +- IXGBE_FLAG2_RSC_ENABLED); ++ /* Disable RSC when in SR-IOV mode */ ++ adapter->flags2 &= ~(IXGBE_FLAG2_RSC_CAPABLE | ++ IXGBE_FLAG2_RSC_ENABLED); + ++ for (i = 0; i < adapter->num_vfs; i++) { + /* enable spoof checking for all VFs */ +- for (i = 0; i < adapter->num_vfs; i++) +- adapter->vfinfo[i].spoofchk_enabled = true; +- return 0; ++ adapter->vfinfo[i].spoofchk_enabled = true; ++ ++#ifdef HAVE_NDO_SET_VF_RSS_QUERY_EN ++ /* We support VF RSS querying only for 82599 and x540 ++ * devices at the moment. These devices share RSS ++ * indirection table and RSS hash key with PF therefore ++ * we want to disable the querying by default. ++ */ ++ adapter->vfinfo[i].rss_query_enabled = 0; ++ ++#endif ++ /* Untrust all VFs */ ++ adapter->vfinfo[i].trusted = false; ++ ++ /* set the default xcast mode */ ++ adapter->vfinfo[i].xcast_mode = IXGBEVF_XCAST_MODE_NONE; + } + +- return -ENOMEM; ++ e_dev_info("SR-IOV enabled with %d VFs\n", num_vfs); ++ if (hw->mac.type < ixgbe_mac_X550) ++ e_dev_info("configure port vlans to keep your VFs secure\n"); ++ ++ return 0; ++} ++ ++/** ++ * ixgbe_get_vfs - Find and take references to all vf devices ++ * @adapter: Pointer to adapter struct ++ */ ++static void ixgbe_get_vfs(struct ixgbe_adapter *adapter) ++{ ++ struct pci_dev *pdev = adapter->pdev; ++ u16 vendor = pdev->vendor; ++ struct pci_dev *vfdev; ++ int vf = 0; ++ u16 vf_id; ++ int pos; ++ ++ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV); ++ if (!pos) ++ return; ++ pci_read_config_word(pdev, pos + PCI_SRIOV_VF_DID, &vf_id); ++ ++ vfdev = pci_get_device(vendor, vf_id, NULL); ++ for (; vfdev; vfdev = pci_get_device(vendor, vf_id, vfdev)) { ++ if (!vfdev->is_virtfn) ++ continue; ++ if (vfdev->physfn != pdev) ++ continue; ++ if (vf >= adapter->num_vfs) ++ continue; ++ pci_dev_get(vfdev); ++ adapter->vfinfo[vf].vfdev = vfdev; ++ ++vf; ++ } + } + + /* Note this function is called when the user wants to enable SR-IOV +@@ -123,9 +190,10 @@ static int __ixgbe_enable_sriov(struct ixgbe_adapter *adapter) + void ixgbe_enable_sriov(struct ixgbe_adapter *adapter) + { + int pre_existing_vfs = 0; ++ unsigned int num_vfs; + + pre_existing_vfs = pci_num_vf(adapter->pdev); +- if (!pre_existing_vfs && !adapter->num_vfs) ++ if (!pre_existing_vfs && !adapter->max_vfs) + return; + + /* If there are pre-existing VFs then we have to force +@@ -135,7 +203,7 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter) + * have been created via the new PCI SR-IOV sysfs interface. + */ + if (pre_existing_vfs) { +- adapter->num_vfs = pre_existing_vfs; ++ num_vfs = pre_existing_vfs; + dev_warn(&adapter->pdev->dev, + "Virtual Functions already enabled for this device - Please reload all VF drivers to avoid spoofed packet errors\n"); + } else { +@@ -147,38 +215,49 @@ void ixgbe_enable_sriov(struct ixgbe_adapter *adapter) + * physical function. If the user requests greater thn + * 63 VFs then it is an error - reset to default of zero. + */ +- adapter->num_vfs = min_t(unsigned int, adapter->num_vfs, IXGBE_MAX_VFS_DRV_LIMIT); ++ num_vfs = min_t(unsigned int, adapter->max_vfs, ++ IXGBE_MAX_VFS_DRV_LIMIT); + +- err = pci_enable_sriov(adapter->pdev, adapter->num_vfs); ++ err = pci_enable_sriov(adapter->pdev, num_vfs); + if (err) { + e_err(probe, "Failed to enable PCI sriov: %d\n", err); +- adapter->num_vfs = 0; + return; + } + } + +- if (!__ixgbe_enable_sriov(adapter)) ++ if (!__ixgbe_enable_sriov(adapter, num_vfs)) { ++ ixgbe_get_vfs(adapter); + return; ++ } + + /* If we have gotten to this point then there is no memory available + * to manage the VF devices - print message and bail. + */ +- e_err(probe, "Unable to allocate memory for VF Data Storage - " +- "SRIOV disabled\n"); ++ e_err(probe, "Unable to allocate memory for VF Data Storage - SRIOV disabled\n"); + ixgbe_disable_sriov(adapter); + } + +-#endif /* #ifdef CONFIG_PCI_IOV */ ++#endif /* CONFIG_PCI_IOV */ + int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) + { ++ unsigned int num_vfs = adapter->num_vfs, vf; + struct ixgbe_hw *hw = &adapter->hw; + u32 gpie; + u32 vmdctl; +- int rss; + + /* set num VFs to 0 to prevent access to vfinfo */ + adapter->num_vfs = 0; + ++ /* put the reference to all of the vf devices */ ++ for (vf = 0; vf < num_vfs; ++vf) { ++ struct pci_dev *vfdev = adapter->vfinfo[vf].vfdev; ++ ++ if (!vfdev) ++ continue; ++ adapter->vfinfo[vf].vfdev = NULL; ++ pci_dev_put(vfdev); ++ } ++ + /* free VF control structures */ + kfree(adapter->vfinfo); + adapter->vfinfo = NULL; +@@ -191,6 +270,11 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) + if (!(adapter->flags & IXGBE_FLAG_SRIOV_ENABLED)) + return 0; + ++ /* Turn off malicious driver detection */ ++ if ((hw->mac.ops.disable_mdd) && ++ (!(adapter->flags & IXGBE_FLAG_MDD_ENABLED))) ++ hw->mac.ops.disable_mdd(hw); ++ + #ifdef CONFIG_PCI_IOV + /* + * If our VFs are assigned we cannot shut down SR-IOV +@@ -218,30 +302,35 @@ int ixgbe_disable_sriov(struct ixgbe_adapter *adapter) + IXGBE_WRITE_FLUSH(hw); + + /* Disable VMDq flag so device will be set in VM mode */ +- if (adapter->ring_feature[RING_F_VMDQ].limit == 1) { ++ if (adapter->ring_feature[RING_F_VMDQ].limit == 1) + adapter->flags &= ~IXGBE_FLAG_VMDQ_ENABLED; +- adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; +- rss = min_t(int, IXGBE_MAX_RSS_INDICES, num_online_cpus()); +- } else { +- rss = min_t(int, IXGBE_MAX_L2A_QUEUES, num_online_cpus()); +- } + + adapter->ring_feature[RING_F_VMDQ].offset = 0; +- adapter->ring_feature[RING_F_RSS].limit = rss; + + /* take a breather then clean up driver data */ + msleep(100); ++ ++ adapter->flags &= ~IXGBE_FLAG_SRIOV_ENABLED; + return 0; + } + +-static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs) ++static int ixgbe_pci_sriov_enable(struct pci_dev __maybe_unused *dev, int __maybe_unused num_vfs) + { + #ifdef CONFIG_PCI_IOV + struct ixgbe_adapter *adapter = pci_get_drvdata(dev); + int err = 0; ++ u8 num_tc; + int i; + int pre_existing_vfs = pci_num_vf(dev); + ++ if (!(adapter->flags & IXGBE_FLAG_SRIOV_CAPABLE)) { ++ e_dev_warn("SRIOV not supported on this device\n"); ++ return -EOPNOTSUPP; ++ } ++ ++ if (adapter->num_vfs == num_vfs) ++ return -EINVAL; ++ + if (pre_existing_vfs && pre_existing_vfs != num_vfs) + err = ixgbe_disable_sriov(adapter); + else if (pre_existing_vfs && pre_existing_vfs == num_vfs) +@@ -251,31 +340,53 @@ static int ixgbe_pci_sriov_enable(struct pci_dev *dev, int num_vfs) + goto err_out; + + /* While the SR-IOV capability structure reports total VFs to be +- * 64 we limit the actual number that can be allocated to 63 so +- * that some transmit/receive resources can be reserved to the ++ * 64 we limit the actual number that can be allocated as below ++ * so that some transmit/receive resources can be reserved to the + * PF. The PCI bus driver already checks for other values out of + * range. ++ * Num_TCs MAX_VFs ++ * 1 63 ++ * <=4 31 ++ * >4 15 + */ +- if (num_vfs > IXGBE_MAX_VFS_DRV_LIMIT) { +- err = -EPERM; +- goto err_out; +- } ++ num_tc = netdev_get_num_tc(adapter->netdev); + +- adapter->num_vfs = num_vfs; ++ if (num_tc > 4) { ++ if (num_vfs > IXGBE_MAX_VFS_8TC) { ++ e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_8TC); ++ err = -EPERM; ++ goto err_out; ++ } ++ } else if ((num_tc > 1) && (num_tc <= 4)) { ++ if (num_vfs > IXGBE_MAX_VFS_4TC) { ++ e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_4TC); ++ err = -EPERM; ++ goto err_out; ++ } ++ } else { ++ if (num_vfs > IXGBE_MAX_VFS_1TC) { ++ e_dev_err("Currently the device is configured with %d TCs, Creating more than %d VFs is not allowed\n", num_tc, IXGBE_MAX_VFS_1TC); ++ err = -EPERM; ++ goto err_out; ++ } ++ } + +- err = __ixgbe_enable_sriov(adapter); ++ err = __ixgbe_enable_sriov(adapter, num_vfs); + if (err) + goto err_out; + + for (i = 0; i < adapter->num_vfs; i++) + ixgbe_vf_configuration(dev, (i | 0x10000000)); + ++ /* reset before enabling SRIOV to avoid mailbox issues */ ++ ixgbe_sriov_reinit(adapter); ++ + err = pci_enable_sriov(dev, num_vfs); + if (err) { + e_dev_warn("Failed to enable PCI sriov: %d\n", err); + goto err_out; + } +- ixgbe_sriov_reinit(adapter); ++ ixgbe_get_vfs(adapter); + + out: + return num_vfs; +@@ -294,6 +405,9 @@ static int ixgbe_pci_sriov_disable(struct pci_dev *dev) + u32 current_flags = adapter->flags; + #endif + ++ if (!adapter->num_vfs && !pci_num_vf(dev)) ++ return -EINVAL; ++ + err = ixgbe_disable_sriov(adapter); + + /* Only reinit if no error and state changed */ +@@ -330,20 +444,16 @@ static int ixgbe_set_vf_multicasts(struct ixgbe_adapter *adapter, + /* only so many hash values supported */ + entries = min(entries, IXGBE_MAX_VF_MC_ENTRIES); + +- /* +- * salt away the number of multi cast addresses assigned ++ /* salt away the number of multi cast addresses assigned + * to this VF for later use to restore when the PF multi cast + * list changes + */ + vfinfo->num_vf_mc_hashes = entries; + +- /* +- * VFs are limited to using the MTA hash table for their multicast +- * addresses +- */ +- for (i = 0; i < entries; i++) { ++ /* VFs are limited to using the MTA hash table for their multicast ++ * addresses */ ++ for (i = 0; i < entries; i++) + vfinfo->vf_mc_hashes[i] = hash_list[i]; +- } + + for (i = 0; i < vfinfo->num_vf_mc_hashes; i++) { + vector_reg = (vfinfo->vf_mc_hashes[i] >> 5) & 0x7F; +@@ -379,7 +489,6 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter) + mta_reg |= (1 << vector_bit); + IXGBE_WRITE_REG(hw, IXGBE_MTA(vector_reg), mta_reg); + } +- + if (vfinfo->num_vf_mc_hashes) + vmolr |= IXGBE_VMOLR_ROMPE; + else +@@ -390,22 +499,46 @@ void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter) + /* Restore any VF macvlans */ + ixgbe_full_sync_mac_table(adapter); + } +-#endif ++#endif /* CONFIG_PCI_IOV */ + +-static int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, +- u32 vf) ++int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf) + { +- /* VLAN 0 is a special case, don't allow it to be removed */ +- if (!vid && !add) +- return 0; ++ struct ixgbe_hw *hw = &adapter->hw; ++ int err; + +- return adapter->hw.mac.ops.set_vfta(&adapter->hw, vid, vf, (bool)add); +-} ++#ifndef HAVE_VLAN_RX_REGISTER ++ /* If VLAN overlaps with one the PF is currently monitoring make ++ * sure that we are able to allocate a VLVF entry. This may be ++ * redundant but it guarantees PF will maintain visibility to ++ * the VLAN. ++ */ ++ if (add && test_bit(vid, adapter->active_vlans)) { ++ err = hw->mac.ops.set_vfta(hw, vid, VMDQ_P(0), true, false); ++ if (err) ++ return err; ++ } ++#endif ++ ++ err = hw->mac.ops.set_vfta(hw, vid, vf, !!add, false); ++#ifndef HAVE_VLAN_RX_REGISTER + +-static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) ++ if (add && !err) ++ return err; ++ ++ /* If we failed to add the VF VLAN or we are removing the VF VLAN ++ * we may need to drop the PF pool bit in order to allow us to free ++ * up the VLVF resources. ++ */ ++ if (test_bit(vid, adapter->active_vlans) || ++ (adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) ++ ixgbe_update_pf_promisc_vlvf(adapter, vid); ++#endif ++ ++ return err; ++} ++static int ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 max_frame, u32 vf) + { + struct ixgbe_hw *hw = &adapter->hw; +- int max_frame = msgbuf[1]; + u32 max_frs; + + /* +@@ -416,29 +549,31 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) + * account before we can enable the VF for receive + */ + if (adapter->hw.mac.type == ixgbe_mac_82599EB) { ++ + struct net_device *dev = adapter->netdev; + int pf_max_frame = dev->mtu + ETH_HLEN; + u32 reg_offset, vf_shift, vfre; + s32 err = 0; + +-#ifdef CONFIG_FCOE ++#if IS_ENABLED(CONFIG_FCOE) + if (dev->features & NETIF_F_FCOE_MTU) + pf_max_frame = max_t(int, pf_max_frame, + IXGBE_FCOE_JUMBO_FRAME_SIZE); +- + #endif /* CONFIG_FCOE */ ++ + switch (adapter->vfinfo[vf].vf_api) { + case ixgbe_mbox_api_11: +- /* +- * Version 1.1 supports jumbo frames on VFs if PF has ++ case ixgbe_mbox_api_12: ++ case ixgbe_mbox_api_13: ++ /* Version 1.1 supports jumbo frames on VFs if PF has + * jumbo frames enabled which means legacy VFs are + * disabled + */ + if (pf_max_frame > ETH_FRAME_LEN) + break; ++ /* fall through */ + default: +- /* +- * If the PF or VF are running w/ jumbo frames enabled ++ /* If the PF or VF are running w/ jumbo frames enabled + * we need to shut down the VF Rx path as we cannot + * support jumbo frames on legacy VFs + */ +@@ -466,12 +601,6 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) + } + } + +- /* MTU < 68 is an error and causes problems on some kernels */ +- if (max_frame > IXGBE_MAX_JUMBO_FRAME_SIZE) { +- e_err(drv, "VF max_frame %d out of range\n", max_frame); +- return -EINVAL; +- } +- + /* pull current max frame size from hardware */ + max_frs = IXGBE_READ_REG(hw, IXGBE_MAXFRS); + max_frs &= IXGBE_MHADD_MFS_MASK; +@@ -487,10 +616,10 @@ static s32 ixgbe_set_vf_lpe(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) + return 0; + } + +-static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) ++void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) + { + u32 vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); +- vmolr |= IXGBE_VMOLR_BAM; ++ vmolr |= IXGBE_VMOLR_BAM; + if (aupe) + vmolr |= IXGBE_VMOLR_AUPE; + else +@@ -498,59 +627,81 @@ static void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe) + IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); + } + +-static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf) ++static void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, ++ u16 vid, u16 qos, u32 vf) + { + struct ixgbe_hw *hw = &adapter->hw; ++ u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) | IXGBE_VMVIR_VLANA_DEFAULT; + +- IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0); ++ IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), vmvir); + } +-static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) ++ ++static void ixgbe_clear_vmvir(struct ixgbe_adapter *adapter, u32 vf) + { + struct ixgbe_hw *hw = &adapter->hw; +- struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; +- u8 num_tcs = netdev_get_num_tc(adapter->netdev); +- +- /* add PF assigned VLAN or VLAN 0 */ +- ixgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf); +- +- /* reset offloads to defaults */ +- ixgbe_set_vmolr(hw, vf, !vfinfo->pf_vlan); +- +- /* set outgoing tags for VFs */ +- if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) { +- ixgbe_clear_vmvir(adapter, vf); +- } else { +- if (vfinfo->pf_qos || !num_tcs) +- ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, +- vfinfo->pf_qos, vf); +- else +- ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, +- adapter->default_up, vf); +- +- if (vfinfo->spoofchk_enabled) +- hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); +- } + +- /* reset multicast table array for vf */ +- adapter->vfinfo[vf].num_vf_mc_hashes = 0; +- +- /* Flush and reset the mta with the new values */ +- ixgbe_set_rx_mode(adapter->netdev); +- +- ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); +- +- /* reset VF api back to unknown */ +- adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10; ++ IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), 0); + } + +-static int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, +- int vf, unsigned char *mac_addr) ++static void ixgbe_clear_vf_vlans(struct ixgbe_adapter *adapter, u32 vf) + { +- ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); +- memcpy(adapter->vfinfo[vf].vf_mac_addresses, mac_addr, ETH_ALEN); +- ixgbe_add_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); +- +- return 0; ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 vlvfb_mask, pool_mask, i; ++ ++ /* create mask for VF and other pools */ ++ pool_mask = (u32)~BIT(VMDQ_P(0) % 32); ++ vlvfb_mask = BIT(vf % 32); ++ ++ /* post increment loop, covers VLVF_ENTRIES - 1 to 0 */ ++ for (i = IXGBE_VLVF_ENTRIES; i--;) { ++ u32 bits[2], vlvfb, vid, vfta, vlvf; ++ u32 word = i * 2 + vf / 32; ++ u32 mask; ++ ++ vlvfb = IXGBE_READ_REG(hw, IXGBE_VLVFB(word)); ++ ++ /* if our bit isn't set we can skip it */ ++ if (!(vlvfb & vlvfb_mask)) ++ continue; ++ ++ /* clear our bit from vlvfb */ ++ vlvfb ^= vlvfb_mask; ++ ++ /* create 64b mask to check to see if we should clear VLVF */ ++ bits[word % 2] = vlvfb; ++ bits[~word % 2] = IXGBE_READ_REG(hw, IXGBE_VLVFB(word ^ 1)); ++ ++ /* if other pools are present, just remove ourselves */ ++ if (bits[(VMDQ_P(0) / 32) ^ 1] || ++ (bits[VMDQ_P(0) / 32] & pool_mask)) ++ goto update_vlvfb; ++ ++ /* if PF is present, leave VFTA */ ++ if (bits[0] || bits[1]) ++ goto update_vlvf; ++ ++ /* if we cannot determine VLAN just remove ourselves */ ++ vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(i)); ++ if (!vlvf) ++ goto update_vlvfb; ++ ++ vid = vlvf & VLAN_VID_MASK; ++ mask = BIT(vid % 32); ++ ++ /* clear bit from VFTA */ ++ vfta = IXGBE_READ_REG(hw, IXGBE_VFTA(vid / 32)); ++ if (vfta & mask) ++ IXGBE_WRITE_REG(hw, IXGBE_VFTA(vid / 32), vfta ^ mask); ++update_vlvf: ++ /* clear POOL selection enable */ ++ IXGBE_WRITE_REG(hw, IXGBE_VLVF(i), 0); ++ ++ if (!(adapter->flags2 & IXGBE_FLAG2_VLAN_PROMISC)) ++ vlvfb = 0; ++update_vlvfb: ++ /* clear pool bits */ ++ IXGBE_WRITE_REG(hw, IXGBE_VLVFB(word), vlvfb); ++ } + } + + static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, +@@ -558,6 +709,7 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, + { + struct list_head *pos; + struct vf_macvlans *entry; ++ s32 retval = 0; + + if (index <= 1) { + list_for_each(pos, &adapter->vf_mvs.l) { +@@ -598,33 +750,115 @@ static int ixgbe_set_vf_macvlan(struct ixgbe_adapter *adapter, + if (!entry || !entry->free) + return -ENOSPC; + +- entry->free = false; +- entry->is_macvlan = true; +- entry->vf = vf; +- memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN); ++ retval = ixgbe_add_mac_filter(adapter, mac_addr, vf); ++ if (retval >= 0) { ++ entry->free = false; ++ entry->is_macvlan = true; ++ entry->vf = vf; ++ memcpy(entry->vf_macvlan, mac_addr, ETH_ALEN); ++ } + +- ixgbe_add_mac_filter(adapter, mac_addr, vf); ++ return retval; ++} + +- return 0; ++static inline void ixgbe_vf_reset_event(struct ixgbe_adapter *adapter, u32 vf) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ struct vf_data_storage *vfinfo = &adapter->vfinfo[vf]; ++ u8 num_tcs = netdev_get_num_tc(adapter->netdev); ++ ++ /* remove VLAN filters belonging to this VF */ ++ ixgbe_clear_vf_vlans(adapter, vf); ++ ++ /* add back PF assigned VLAN or VLAN 0 */ ++ ixgbe_set_vf_vlan(adapter, true, vfinfo->pf_vlan, vf); ++ ++ /* reset offloads to defaults */ ++ ixgbe_set_vmolr(hw, vf, !vfinfo->pf_vlan); ++ ++ /* set outgoing tags for VFs */ ++ if (!vfinfo->pf_vlan && !vfinfo->pf_qos && !num_tcs) { ++ ixgbe_clear_vmvir(adapter, vf); ++ } else { ++ if (vfinfo->pf_qos || !num_tcs) ++ ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, ++ vfinfo->pf_qos, vf); ++ else ++ ixgbe_set_vmvir(adapter, vfinfo->pf_vlan, ++ adapter->default_up, vf); ++ } ++ ++ /* reset multicast table array for vf */ ++ adapter->vfinfo[vf].num_vf_mc_hashes = 0; ++ ++ /* Flush and reset the mta with the new values */ ++ ixgbe_set_rx_mode(adapter->netdev); ++ ++ ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); ++ ixgbe_set_vf_macvlan(adapter, vf, 0, NULL); ++ ++ /* reset VF api back to unknown */ ++ adapter->vfinfo[vf].vf_api = ixgbe_mbox_api_10; ++} ++ ++int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, ++ int vf, unsigned char *mac_addr) ++{ ++ s32 retval = 0; ++ ++ ixgbe_del_mac_filter(adapter, adapter->vfinfo[vf].vf_mac_addresses, vf); ++ retval = ixgbe_add_mac_filter(adapter, mac_addr, vf); ++ if (retval >= 0) ++ memcpy(adapter->vfinfo[vf].vf_mac_addresses, ++ mac_addr, ETH_ALEN); ++ else ++ memset(adapter->vfinfo[vf].vf_mac_addresses, 0, ETH_ALEN); ++ ++ return retval; + } + ++#ifdef CONFIG_PCI_IOV + int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask) + { ++ unsigned char vf_mac_addr[6]; + struct ixgbe_adapter *adapter = pci_get_drvdata(pdev); + unsigned int vfn = (event_mask & 0x3f); +- + bool enable = ((event_mask & 0x10000000U) != 0); + +- if (enable) +- eth_zero_addr(adapter->vfinfo[vfn].vf_mac_addresses); ++ if (enable) { ++ memset(vf_mac_addr, 0, ETH_ALEN); ++ memcpy(adapter->vfinfo[vfn].vf_mac_addresses, vf_mac_addr, 6); ++ } + + return 0; + } ++#endif /* CONFIG_PCI_IOV */ + +-static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) ++static inline void ixgbe_write_qde(struct ixgbe_adapter *adapter, u32 vf, ++ u32 qde) + { ++ struct ixgbe_hw *hw = &adapter->hw; + struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; ++ u32 q_per_pool = __ALIGN_MASK(1, ~vmdq->mask); ++ u32 reg; ++ int i; ++ ++ for (i = vf * q_per_pool; i < ((vf + 1) * q_per_pool); i++) { ++ /* flush previous write */ ++ IXGBE_WRITE_FLUSH(hw); ++ ++ /* drop enable should always be set in SRIOV mode*/ ++ reg = IXGBE_QDE_WRITE | qde; ++ reg |= i << IXGBE_QDE_IDX_SHIFT; ++ IXGBE_WRITE_REG(hw, IXGBE_QDE, reg); ++ } ++ ++} ++ ++static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) ++{ + struct ixgbe_hw *hw = &adapter->hw; ++ struct ixgbe_ring_feature *vmdq = &adapter->ring_feature[RING_F_VMDQ]; + unsigned char *vf_mac = adapter->vfinfo[vf].vf_mac_addresses; + u32 reg, reg_offset, vf_shift; + u32 msgbuf[4] = {0, 0, 0, 0}; +@@ -650,15 +884,7 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) + IXGBE_WRITE_REG(hw, IXGBE_VFTE(reg_offset), reg); + + /* force drop enable for all VF Rx queues */ +- for (i = vf * q_per_pool; i < ((vf + 1) * q_per_pool); i++) { +- /* flush previous write */ +- IXGBE_WRITE_FLUSH(hw); +- +- /* indicate to hardware that we want to set drop enable */ +- reg = IXGBE_QDE_WRITE | IXGBE_QDE_ENABLE; +- reg |= i << IXGBE_QDE_IDX_SHIFT; +- IXGBE_WRITE_REG(hw, IXGBE_QDE, reg); +- } ++ ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE); + + /* enable receive for vf */ + reg = IXGBE_READ_REG(hw, IXGBE_VFRE(reg_offset)); +@@ -671,12 +897,12 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) + struct net_device *dev = adapter->netdev; + int pf_max_frame = dev->mtu + ETH_HLEN; + +-#ifdef CONFIG_FCOE ++#if IS_ENABLED(CONFIG_FCOE) + if (dev->features & NETIF_F_FCOE_MTU) + pf_max_frame = max_t(int, pf_max_frame, + IXGBE_FCOE_JUMBO_FRAME_SIZE); +- + #endif /* CONFIG_FCOE */ ++ + if (pf_max_frame > ETH_FRAME_LEN) + reg &= ~(1 << vf_shift); + } +@@ -685,7 +911,6 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) + /* enable VF mailbox for further messages */ + adapter->vfinfo[vf].clear_to_send = true; + +- /* Enable counting of spoofed packets in the SSVPC register */ + reg = IXGBE_READ_REG(hw, IXGBE_VMECM(reg_offset)); + reg |= (1 << vf_shift); + IXGBE_WRITE_REG(hw, IXGBE_VMECM(reg_offset), reg); +@@ -701,12 +926,12 @@ static int ixgbe_vf_reset_msg(struct ixgbe_adapter *adapter, u32 vf) + + /* reply to reset with ack and vf mac address */ + msgbuf[0] = IXGBE_VF_RESET; +- if (!is_zero_ether_addr(vf_mac)) { ++ if (!is_zero_ether_addr(vf_mac) && adapter->vfinfo[vf].pf_set_mac) { + msgbuf[0] |= IXGBE_VT_MSGTYPE_ACK; + memcpy(addr, vf_mac, ETH_ALEN); + } else { + msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; +- dev_warn(&adapter->pdev->dev, ++ dev_warn(pci_dev_to_dev(adapter->pdev), + "VF %d has no MAC address assigned, you may have to assign one manually\n", + vf); + } +@@ -731,52 +956,26 @@ static int ixgbe_set_vf_mac_addr(struct ixgbe_adapter *adapter, + return -1; + } + +- if (adapter->vfinfo[vf].pf_set_mac && +- !ether_addr_equal(adapter->vfinfo[vf].vf_mac_addresses, new_mac)) { ++ if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted && ++ memcmp(adapter->vfinfo[vf].vf_mac_addresses, new_mac, ++ ETH_ALEN)) { ++ u8 *pm = adapter->vfinfo[vf].vf_mac_addresses; + e_warn(drv, +- "VF %d attempted to override administratively set MAC address\n" +- "Reload the VF driver to resume operations\n", +- vf); ++ "VF %d attempted to set a new MAC address but it already has an administratively set MAC address %2.2X:%2.2X:%2.2X:%2.2X:%2.2X:%2.2X\n", ++ vf, pm[0], pm[1], pm[2], pm[3], pm[4], pm[5]); ++ e_warn(drv, "Check the VF driver and if it is not using the correct MAC address you may need to reload the VF driver\n"); + return -1; + } +- + return ixgbe_set_vf_mac(adapter, vf, new_mac) < 0; + } + +-static int ixgbe_find_vlvf_entry(struct ixgbe_hw *hw, u32 vlan) +-{ +- u32 vlvf; +- s32 regindex; +- +- /* short cut the special case */ +- if (vlan == 0) +- return 0; +- +- /* Search for the vlan id in the VLVF entries */ +- for (regindex = 1; regindex < IXGBE_VLVF_ENTRIES; regindex++) { +- vlvf = IXGBE_READ_REG(hw, IXGBE_VLVF(regindex)); +- if ((vlvf & VLAN_VID_MASK) == vlan) +- break; +- } +- +- /* Return a negative value if not found */ +- if (regindex >= IXGBE_VLVF_ENTRIES) +- regindex = -1; +- +- return regindex; +-} +- + static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter, + u32 *msgbuf, u32 vf) + { +- struct ixgbe_hw *hw = &adapter->hw; +- int add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT; +- int vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); +- int err; +- s32 reg_ndx; +- u32 vlvf; +- u32 bits; ++ u32 add = (msgbuf[0] & IXGBE_VT_MSGINFO_MASK) >> IXGBE_VT_MSGINFO_SHIFT; ++ u32 vid = (msgbuf[1] & IXGBE_VLVF_VLANID_MASK); + u8 tcs = netdev_get_num_tc(adapter->netdev); ++ int err = 0; + + if (adapter->vfinfo[vf].pf_vlan || tcs) { + e_warn(drv, +@@ -786,25 +985,34 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter, + return -1; + } + +- if (add) +- adapter->vfinfo[vf].vlan_count++; +- else if (adapter->vfinfo[vf].vlan_count) +- adapter->vfinfo[vf].vlan_count--; ++ /* VLAN 0 is a special case, don't allow it to be removed */ ++ if (!vid && !add) ++ return 0; ++ ++ err = ixgbe_set_vf_vlan(adapter, add, vid, vf); ++ ++ if (err) ++ return err; + ++#ifdef HAVE_VLAN_RX_REGISTER + /* in case of promiscuous mode any VLAN filter set for a VF must + * also have the PF pool added to it. + */ +- if (add && adapter->netdev->flags & IFF_PROMISC) ++ if (add && adapter->netdev->flags & IFF_PROMISC) { + err = ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0)); ++ if (err) ++ return err; ++ } + +- err = ixgbe_set_vf_vlan(adapter, add, vid, vf); +- if (!err && adapter->vfinfo[vf].spoofchk_enabled) +- hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); +- ++#ifdef CONFIG_PCI_IOV + /* Go through all the checks to see if the VLAN filter should + * be wiped completely. + */ + if (!add && adapter->netdev->flags & IFF_PROMISC) { ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 bits, vlvf; ++ s32 reg_ndx; ++ + reg_ndx = ixgbe_find_vlvf_entry(hw, vid); + if (reg_ndx < 0) + goto out; +@@ -828,13 +1036,15 @@ static int ixgbe_set_vf_vlan_msg(struct ixgbe_adapter *adapter, + * is cleared if the PF only added itself to the pool + * because the PF is in promiscuous mode. + */ +- if ((vlvf & VLAN_VID_MASK) == vid && +- !test_bit(vid, adapter->active_vlans) && !bits) +- ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0)); ++ if ((vlvf & VLAN_VID_MASK) == vid && !bits) ++ err = ixgbe_set_vf_vlan(adapter, add, vid, VMDQ_P(0)); + } + + out: +- ++#endif /* CONFIG_PCI_IOV */ ++#else /* HAVE_VLAN_RX_REGISTER */ ++ return 0; ++#endif /* HAVE_VLAN_RX_REGISTER */ + return err; + } + +@@ -846,7 +1056,8 @@ static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter, + IXGBE_VT_MSGINFO_SHIFT; + int err; + +- if (adapter->vfinfo[vf].pf_set_mac && index > 0) { ++ if (adapter->vfinfo[vf].pf_set_mac && !adapter->vfinfo[vf].trusted && ++ index > 0) { + e_warn(drv, + "VF %d requested MACVLAN filter but is administratively denied\n", + vf); +@@ -864,8 +1075,12 @@ static int ixgbe_set_vf_macvlan_msg(struct ixgbe_adapter *adapter, + * If the VF is allowed to set MAC filters then turn off + * anti-spoofing to avoid false positives. + */ +- if (adapter->vfinfo[vf].spoofchk_enabled) +- ixgbe_ndo_set_vf_spoofchk(adapter->netdev, vf, false); ++ if (adapter->vfinfo[vf].spoofchk_enabled) { ++ struct ixgbe_hw *hw = &adapter->hw; ++ ++ hw->mac.ops.set_mac_anti_spoofing(hw, false, vf); ++ hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf); ++ } + } + + err = ixgbe_set_vf_macvlan(adapter, vf, index, new_mac); +@@ -885,6 +1100,8 @@ static int ixgbe_negotiate_vf_api(struct ixgbe_adapter *adapter, + switch (api) { + case ixgbe_mbox_api_10: + case ixgbe_mbox_api_11: ++ case ixgbe_mbox_api_12: ++ case ixgbe_mbox_api_13: + adapter->vfinfo[vf].vf_api = api; + return 0; + default: +@@ -908,6 +1125,8 @@ static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter, + switch (adapter->vfinfo[vf].vf_api) { + case ixgbe_mbox_api_20: + case ixgbe_mbox_api_11: ++ case ixgbe_mbox_api_12: ++ case ixgbe_mbox_api_13: + break; + default: + return -1; +@@ -935,6 +1154,140 @@ static int ixgbe_get_vf_queues(struct ixgbe_adapter *adapter, + return 0; + } + ++#ifdef HAVE_NDO_SET_VF_RSS_QUERY_EN ++static int ixgbe_get_vf_reta(struct ixgbe_adapter *adapter, u32 *msgbuf, u32 vf) ++{ ++ u32 i, j; ++ u32 *out_buf = &msgbuf[1]; ++ const u8 *reta = adapter->rss_indir_tbl; ++ u32 reta_size = ixgbe_rss_indir_tbl_entries(adapter); ++ ++ /* Check if operation is permitted */ ++ if (!adapter->vfinfo[vf].rss_query_enabled) ++ return -EPERM; ++ ++ /* verify the PF is supporting the correct API */ ++ switch (adapter->vfinfo[vf].vf_api) { ++ case ixgbe_mbox_api_12: ++ case ixgbe_mbox_api_13: ++ break; ++ default: ++ return -EOPNOTSUPP; ++ } ++ ++ /* This mailbox command is supported (required) only for 82599 and x540 ++ * VFs which support up to 4 RSS queues. Therefore we will compress the ++ * RETA by saving only 2 bits from each entry. This way we will be able ++ * to transfer the whole RETA in a single mailbox operation. ++ */ ++ for (i = 0; i < reta_size / 16; i++) { ++ out_buf[i] = 0; ++ for (j = 0; j < 16; j++) ++ out_buf[i] |= (u32)(reta[16 * i + j] & 0x3) << (2 * j); ++ } ++ ++ return 0; ++} ++ ++static int ixgbe_get_vf_rss_key(struct ixgbe_adapter *adapter, ++ u32 *msgbuf, u32 vf) ++{ ++ u32 *rss_key = &msgbuf[1]; ++ ++ /* Check if the operation is permitted */ ++ if (!adapter->vfinfo[vf].rss_query_enabled) ++ return -EPERM; ++ ++ /* verify the PF is supporting the correct API */ ++ switch (adapter->vfinfo[vf].vf_api) { ++ case ixgbe_mbox_api_12: ++ case ixgbe_mbox_api_13: ++ break; ++ default: ++ return -EOPNOTSUPP; ++ } ++ ++ memcpy(rss_key, adapter->rss_key, IXGBE_RSS_KEY_SIZE); ++ ++ return 0; ++} ++#endif /* HAVE_NDO_SET_VF_RSS_QUERY_EN */ ++ ++static int ixgbe_update_vf_xcast_mode(struct ixgbe_adapter *adapter, ++ u32 *msgbuf, u32 vf) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ int xcast_mode = msgbuf[1]; ++ u32 vmolr, fctrl, disable, enable; ++ ++ /* verify the PF is supporting the correct APIs */ ++ switch (adapter->vfinfo[vf].vf_api) { ++ case ixgbe_mbox_api_12: ++ /* promisc introduced in 1.3 version */ ++ if (xcast_mode == IXGBEVF_XCAST_MODE_PROMISC) ++ return -EOPNOTSUPP; ++ /* Fall threw */ ++ case ixgbe_mbox_api_13: ++ break; ++ default: ++ return -EOPNOTSUPP; ++ } ++ ++ if (xcast_mode > IXGBEVF_XCAST_MODE_MULTI && ++ !adapter->vfinfo[vf].trusted) { ++ xcast_mode = IXGBEVF_XCAST_MODE_MULTI; ++ } ++ ++ if (adapter->vfinfo[vf].xcast_mode == xcast_mode) ++ goto out; ++ ++ switch (xcast_mode) { ++ case IXGBEVF_XCAST_MODE_NONE: ++ disable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | ++ IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; ++ enable = 0; ++ break; ++ case IXGBEVF_XCAST_MODE_MULTI: ++ disable = IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; ++ enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE; ++ break; ++ case IXGBEVF_XCAST_MODE_ALLMULTI: ++ disable = IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; ++ enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | IXGBE_VMOLR_MPE; ++ break; ++ case IXGBEVF_XCAST_MODE_PROMISC: ++ if (hw->mac.type <= ixgbe_mac_82599EB) ++ return -EOPNOTSUPP; ++ ++ fctrl = IXGBE_READ_REG(hw, IXGBE_FCTRL); ++ if (!(fctrl & IXGBE_FCTRL_UPE)) { ++ /* VF promisc requires PF in promisc */ ++ e_warn(drv, ++ "Enabling VF promisc requires PF in promisc\n"); ++ return -EPERM; ++ } ++ ++ disable = 0; ++ enable = IXGBE_VMOLR_BAM | IXGBE_VMOLR_ROMPE | ++ IXGBE_VMOLR_MPE | IXGBE_VMOLR_UPE | IXGBE_VMOLR_VPE; ++ break; ++ default: ++ return -EOPNOTSUPP; ++ } ++ ++ vmolr = IXGBE_READ_REG(hw, IXGBE_VMOLR(vf)); ++ vmolr &= ~disable; ++ vmolr |= enable; ++ IXGBE_WRITE_REG(hw, IXGBE_VMOLR(vf), vmolr); ++ ++ adapter->vfinfo[vf].xcast_mode = xcast_mode; ++ ++out: ++ msgbuf[1] = xcast_mode; ++ ++ return 0; ++} ++ + static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) + { + u32 mbx_size = IXGBE_VFMAILBOX_SIZE; +@@ -963,6 +1316,7 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) + * until the vf completes a virtual function reset it should not be + * allowed to start any configuration. + */ ++ + if (!adapter->vfinfo[vf].clear_to_send) { + msgbuf[0] |= IXGBE_VT_MSGTYPE_NACK; + ixgbe_write_mbx(hw, msgbuf, 1, vf); +@@ -980,7 +1334,11 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) + retval = ixgbe_set_vf_vlan_msg(adapter, msgbuf, vf); + break; + case IXGBE_VF_SET_LPE: +- retval = ixgbe_set_vf_lpe(adapter, msgbuf, vf); ++ if (msgbuf[1] > IXGBE_MAX_JUMBO_FRAME_SIZE) { ++ e_err(drv, "VF max_frame %d out of range\n", msgbuf[1]); ++ return -EINVAL; ++ } ++ retval = ixgbe_set_vf_lpe(adapter, msgbuf[1], vf); + break; + case IXGBE_VF_SET_MACVLAN: + retval = ixgbe_set_vf_macvlan_msg(adapter, msgbuf, vf); +@@ -991,6 +1349,17 @@ static int ixgbe_rcv_msg_from_vf(struct ixgbe_adapter *adapter, u32 vf) + case IXGBE_VF_GET_QUEUES: + retval = ixgbe_get_vf_queues(adapter, msgbuf, vf); + break; ++#ifdef HAVE_NDO_SET_VF_RSS_QUERY_EN ++ case IXGBE_VF_GET_RETA: ++ retval = ixgbe_get_vf_reta(adapter, msgbuf, vf); ++ break; ++ case IXGBE_VF_GET_RSS_KEY: ++ retval = ixgbe_get_vf_rss_key(adapter, msgbuf, vf); ++ break; ++#endif /* HAVE_NDO_SET_VF_RSS_QUERY_EN */ ++ case IXGBE_VF_UPDATE_XCAST_MODE: ++ retval = ixgbe_update_vf_xcast_mode(adapter, msgbuf, vf); ++ break; + default: + e_err(drv, "Unhandled Msg %8.8x\n", msgbuf[0]); + retval = IXGBE_ERR_MBX; +@@ -1020,11 +1389,58 @@ static void ixgbe_rcv_ack_from_vf(struct ixgbe_adapter *adapter, u32 vf) + ixgbe_write_mbx(hw, &msg, 1, vf); + } + ++#define Q_BITMAP_DEPTH 2 ++static void ixgbe_check_mdd_event(struct ixgbe_adapter *adapter) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 vf_bitmap[Q_BITMAP_DEPTH] = { 0 }; ++ u32 j, i; ++ u32 ping; ++ ++ if (!hw->mac.ops.mdd_event) ++ return; ++ ++ /* Did we have a malicious event */ ++ hw->mac.ops.mdd_event(hw, vf_bitmap); ++ ++ /* Log any blocked queues and release lock */ ++ for (i = 0; i < Q_BITMAP_DEPTH; i++) { ++ for (j = 0; j < 32 && vf_bitmap[i]; j++) { ++ u32 vf; ++ ++ if (!(vf_bitmap[i] & (1 << j))) ++ continue; ++ ++ /* The VF that malicious event occurred on */ ++ vf = j + (i * 32); ++ ++ dev_warn(pci_dev_to_dev(adapter->pdev), ++ "Malicious event on VF %d tx:%x rx:%x\n", vf, ++ IXGBE_READ_REG(hw, IXGBE_LVMMC_TX), ++ IXGBE_READ_REG(hw, IXGBE_LVMMC_RX)); ++ ++ /* restart the vf */ ++ if (hw->mac.ops.restore_mdd_vf) { ++ hw->mac.ops.restore_mdd_vf(hw, vf); ++ ++ /* get the VF to rebuild its queues */ ++ adapter->vfinfo[vf].clear_to_send = 0; ++ ping = IXGBE_PF_CONTROL_MSG | ++ IXGBE_VT_MSGTYPE_CTS; ++ ixgbe_write_mbx(hw, &ping, 1, vf); ++ } ++ } ++ } ++} ++ + void ixgbe_msg_task(struct ixgbe_adapter *adapter) + { + struct ixgbe_hw *hw = &adapter->hw; + u32 vf; + ++ if (adapter->flags & IXGBE_FLAG_MDD_ENABLED && adapter->vfinfo) ++ ixgbe_check_mdd_event(adapter); ++ + for (vf = 0; vf < adapter->num_vfs; vf++) { + /* process any reset requests */ + if (!ixgbe_check_for_rst(hw, vf)) +@@ -1052,6 +1468,19 @@ void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter) + IXGBE_WRITE_REG(hw, IXGBE_VFRE(1), 0); + } + ++#ifdef HAVE_NDO_SET_VF_TRUST ++static inline void ixgbe_ping_vf(struct ixgbe_adapter *adapter, int vf) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ u32 ping; ++ ++ ping = IXGBE_PF_CONTROL_MSG; ++ if (adapter->vfinfo[vf].clear_to_send) ++ ping |= IXGBE_VT_MSGTYPE_CTS; ++ ixgbe_write_mbx(hw, &ping, 1, vf); ++} ++ ++#endif + void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter) + { + struct ixgbe_hw *hw = &adapter->hw; +@@ -1066,73 +1495,174 @@ void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter) + } + } + ++#ifdef HAVE_NDO_SET_VF_TRUST ++int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ ++ if (vf >= adapter->num_vfs) ++ return -EINVAL; ++ ++ /* nothing to do */ ++ if (adapter->vfinfo[vf].trusted == setting) ++ return 0; ++ ++ adapter->vfinfo[vf].trusted = setting; ++ ++ /* reset VF to reconfigure features */ ++ adapter->vfinfo[vf].clear_to_send = false; ++ ixgbe_ping_vf(adapter, vf); ++ ++ e_info(drv, "VF %u is %strusted\n", vf, setting ? "" : "not "); ++ ++ return 0; ++} ++ ++#endif ++#ifdef IFLA_VF_MAX + int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int vf, u8 *mac) + { + struct ixgbe_adapter *adapter = netdev_priv(netdev); +- if (!is_valid_ether_addr(mac) || (vf >= adapter->num_vfs)) ++ s32 retval = 0; ++ ++ if (vf >= adapter->num_vfs) + return -EINVAL; +- adapter->vfinfo[vf].pf_set_mac = true; +- dev_info(&adapter->pdev->dev, "setting MAC %pM on VF %d\n", mac, vf); +- dev_info(&adapter->pdev->dev, "Reload the VF driver to make this" +- " change effective."); ++ ++ if (is_valid_ether_addr(mac)) { ++ dev_info(pci_dev_to_dev(adapter->pdev), "setting MAC %pM on VF %d\n", ++ mac, vf); ++ dev_info(pci_dev_to_dev(adapter->pdev), "Reload the VF driver to make this change effective.\n"); ++ ++ retval = ixgbe_set_vf_mac(adapter, vf, mac); ++ if (retval >= 0) { ++ /* pf_set_mac is used in ESX5.1 and base driver but not in ESX5.5 */ ++ adapter->vfinfo[vf].pf_set_mac = true; ++ ++ if (test_bit(__IXGBE_DOWN, &adapter->state)) { ++ dev_warn(pci_dev_to_dev(adapter->pdev), "The VF MAC address has been set, but the PF device is not up.\n"); ++ dev_warn(pci_dev_to_dev(adapter->pdev), "Bring the PF device up before attempting to use the VF device.\n"); ++ } ++ } else { ++ dev_warn(pci_dev_to_dev(adapter->pdev), "The VF MAC address was NOT set due to invalid or duplicate MAC address.\n"); ++ } ++ } else if (is_zero_ether_addr(mac)) { ++ unsigned char *vf_mac_addr = ++ adapter->vfinfo[vf].vf_mac_addresses; ++ ++ /* nothing to do */ ++ if (is_zero_ether_addr(vf_mac_addr)) ++ return 0; ++ ++ dev_info(pci_dev_to_dev(adapter->pdev), "removing MAC on VF %d\n", ++ vf); ++ ++ retval = ixgbe_del_mac_filter(adapter, vf_mac_addr, vf); ++ if (retval >= 0) { ++ adapter->vfinfo[vf].pf_set_mac = false; ++ memcpy(vf_mac_addr, mac, ETH_ALEN); ++ } else { ++ dev_warn(pci_dev_to_dev(adapter->pdev), "Could NOT remove the VF MAC address.\n"); ++ } ++ } else { ++ retval = -EINVAL; ++ } ++ return retval; ++} ++ ++static int ixgbe_enable_port_vlan(struct ixgbe_adapter *adapter, ++ int vf, u16 vlan, u8 qos) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ int err; ++ ++ err = ixgbe_set_vf_vlan(adapter, true, vlan, vf); ++ if (err) ++ goto out; ++ ++ /* Revoke tagless access via VLAN 0 */ ++ ixgbe_set_vf_vlan(adapter, false, 0, vf); ++ ++ ixgbe_set_vmvir(adapter, vlan, qos, vf); ++ ixgbe_set_vmolr(hw, vf, false); ++ ++ /* enable hide vlan on X550 */ ++ if (hw->mac.type >= ixgbe_mac_X550) ++ ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE | ++ IXGBE_QDE_HIDE_VLAN); ++ adapter->vfinfo[vf].pf_vlan = vlan; ++ adapter->vfinfo[vf].pf_qos = qos; ++ dev_info(pci_dev_to_dev(adapter->pdev), ++ "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); + if (test_bit(__IXGBE_DOWN, &adapter->state)) { +- dev_warn(&adapter->pdev->dev, "The VF MAC address has been set," +- " but the PF device is not up.\n"); +- dev_warn(&adapter->pdev->dev, "Bring the PF device up before" +- " attempting to use the VF device.\n"); ++ dev_warn(pci_dev_to_dev(adapter->pdev), "The VF VLAN has been set, but the PF device is not up.\n"); ++ dev_warn(pci_dev_to_dev(adapter->pdev), "Bring the PF device up before attempting to use the VF device.\n"); + } +- return ixgbe_set_vf_mac(adapter, vf, mac); ++ ++out: ++ return err; + } + ++static int ixgbe_disable_port_vlan(struct ixgbe_adapter *adapter, int vf) ++{ ++ struct ixgbe_hw *hw = &adapter->hw; ++ int err; ++ ++ err = ixgbe_set_vf_vlan(adapter, false, ++ adapter->vfinfo[vf].pf_vlan, vf); ++ /* Restore tagless access via VLAN 0 */ ++ ixgbe_set_vf_vlan(adapter, true, 0, vf); ++ ixgbe_clear_vmvir(adapter, vf); ++ ixgbe_set_vmolr(hw, vf, true); ++ ++ /* disable hide VLAN on X550 */ ++ if (hw->mac.type >= ixgbe_mac_X550) ++ ixgbe_write_qde(adapter, vf, IXGBE_QDE_ENABLE); ++ adapter->vfinfo[vf].pf_vlan = 0; ++ adapter->vfinfo[vf].pf_qos = 0; ++ ++ return err; ++} ++ ++#ifdef IFLA_VF_MAX ++#ifdef IFLA_VF_VLAN_INFO_MAX ++int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, ++ u8 qos, __be16 vlan_proto) ++#else + int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan, u8 qos) ++#endif + { + int err = 0; + struct ixgbe_adapter *adapter = netdev_priv(netdev); +- struct ixgbe_hw *hw = &adapter->hw; + +- if ((vf >= adapter->num_vfs) || (vlan > 4095) || (qos > 7)) ++ /* VLAN IDs accepted range 0-4094 */ ++ if ((vf >= adapter->num_vfs) || (vlan > VLAN_VID_MASK-1) || (qos > 7)) + return -EINVAL; ++#ifdef IFLA_VF_VLAN_INFO_MAX ++ if (vlan_proto != htons(ETH_P_8021Q)) ++ return -EPROTONOSUPPORT; ++#endif + if (vlan || qos) { ++ /* ++ * Check if there is already a port VLAN set, if so ++ * we have to delete the old one first before we ++ * can set the new one. The usage model had ++ * previously assumed the user would delete the ++ * old port VLAN before setting a new one but this ++ * is not necessarily the case. ++ */ + if (adapter->vfinfo[vf].pf_vlan) +- err = ixgbe_set_vf_vlan(adapter, false, +- adapter->vfinfo[vf].pf_vlan, +- vf); +- if (err) +- goto out; +- err = ixgbe_set_vf_vlan(adapter, true, vlan, vf); ++ err = ixgbe_disable_port_vlan(adapter, vf); + if (err) + goto out; +- ixgbe_set_vmvir(adapter, vlan, qos, vf); +- ixgbe_set_vmolr(hw, vf, false); +- if (adapter->vfinfo[vf].spoofchk_enabled) +- hw->mac.ops.set_vlan_anti_spoofing(hw, true, vf); +- adapter->vfinfo[vf].vlan_count++; +- adapter->vfinfo[vf].pf_vlan = vlan; +- adapter->vfinfo[vf].pf_qos = qos; +- dev_info(&adapter->pdev->dev, +- "Setting VLAN %d, QOS 0x%x on VF %d\n", vlan, qos, vf); +- if (test_bit(__IXGBE_DOWN, &adapter->state)) { +- dev_warn(&adapter->pdev->dev, +- "The VF VLAN has been set," +- " but the PF device is not up.\n"); +- dev_warn(&adapter->pdev->dev, +- "Bring the PF device up before" +- " attempting to use the VF device.\n"); +- } ++ err = ixgbe_enable_port_vlan(adapter, vf, vlan, qos); ++ + } else { +- err = ixgbe_set_vf_vlan(adapter, false, +- adapter->vfinfo[vf].pf_vlan, vf); +- ixgbe_clear_vmvir(adapter, vf); +- ixgbe_set_vmolr(hw, vf, true); +- hw->mac.ops.set_vlan_anti_spoofing(hw, false, vf); +- if (adapter->vfinfo[vf].vlan_count) +- adapter->vfinfo[vf].vlan_count--; +- adapter->vfinfo[vf].pf_vlan = 0; +- adapter->vfinfo[vf].pf_qos = 0; ++ err = ixgbe_disable_port_vlan(adapter, vf); + } + out: + return err; + } ++#endif /* IFLA_VF_MAX */ + + static int ixgbe_link_mbps(struct ixgbe_adapter *adapter) + { +@@ -1210,7 +1740,7 @@ void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter) + + if (ixgbe_link_mbps(adapter) != adapter->vf_rate_link_speed) { + adapter->vf_rate_link_speed = 0; +- dev_info(&adapter->pdev->dev, ++ dev_info(pci_dev_to_dev(adapter->pdev), + "Link speed has been changed. VF Transmit rate is disabled\n"); + } + +@@ -1222,8 +1752,14 @@ void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter) + } + } + +-int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, ++#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE ++int ixgbe_ndo_set_vf_bw(struct net_device *netdev, ++ int vf, ++ int __always_unused min_tx_rate, + int max_tx_rate) ++#else ++int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int max_tx_rate) ++#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ + { + struct ixgbe_adapter *adapter = netdev_priv(netdev); + int link_speed; +@@ -1241,9 +1777,6 @@ int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, + if (link_speed != 10000) + return -EINVAL; + +- if (min_tx_rate) +- return -EINVAL; +- + /* rate limit cannot be less than 10Mbs or greater than link speed */ + if (max_tx_rate && ((max_tx_rate <= 10) || (max_tx_rate > link_speed))) + return -EINVAL; +@@ -1261,29 +1794,60 @@ int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, + int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting) + { + struct ixgbe_adapter *adapter = netdev_priv(netdev); +- int vf_target_reg = vf >> 3; +- int vf_target_shift = vf % 8; + struct ixgbe_hw *hw = &adapter->hw; +- u32 regval; ++ ++ if (vf >= adapter->num_vfs) ++ return -EINVAL; + + adapter->vfinfo[vf].spoofchk_enabled = setting; + +- regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); +- regval &= ~(1 << vf_target_shift); +- regval |= (setting << vf_target_shift); +- IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), regval); +- +- if (adapter->vfinfo[vf].vlan_count) { +- vf_target_shift += IXGBE_SPOOF_VLANAS_SHIFT; +- regval = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); +- regval &= ~(1 << vf_target_shift); +- regval |= (setting << vf_target_shift); +- IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), regval); ++ /* configure MAC spoofing */ ++ hw->mac.ops.set_mac_anti_spoofing(hw, setting, vf); ++ ++ /* configure VLAN spoofing */ ++ hw->mac.ops.set_vlan_anti_spoofing(hw, setting, vf); ++ ++ /* Ensure LLDP and FC is set for Ethertype Antispoofing if we will be ++ * calling set_ethertype_anti_spoofing for each VF in loop below ++ */ ++ if (hw->mac.ops.set_ethertype_anti_spoofing) { ++ IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_LLDP), ++ (IXGBE_ETQF_FILTER_EN | ++ IXGBE_ETQF_TX_ANTISPOOF | ++ IXGBE_ETH_P_LLDP)); ++ ++ IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FC), ++ (IXGBE_ETQF_FILTER_EN | ++ IXGBE_ETQF_TX_ANTISPOOF | ++ ETH_P_PAUSE)); ++ ++ hw->mac.ops.set_ethertype_anti_spoofing(hw, setting, vf); + } ++ return 0; ++} ++ ++#ifdef HAVE_NDO_SET_VF_RSS_QUERY_EN ++int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf, ++ bool setting) ++{ ++ struct ixgbe_adapter *adapter = netdev_priv(netdev); ++ ++ /* This operation is currently supported only for 82599 and x540 ++ * devices. ++ */ ++ if (adapter->hw.mac.type < ixgbe_mac_82599EB || ++ adapter->hw.mac.type >= ixgbe_mac_X550) ++ return -EOPNOTSUPP; ++ ++ if (vf >= adapter->num_vfs) ++ return -EINVAL; ++ ++ adapter->vfinfo[vf].rss_query_enabled = setting; + + return 0; + } + ++#endif + int ixgbe_ndo_get_vf_config(struct net_device *netdev, + int vf, struct ifla_vf_info *ivi) + { +@@ -1292,10 +1856,26 @@ int ixgbe_ndo_get_vf_config(struct net_device *netdev, + return -EINVAL; + ivi->vf = vf; + memcpy(&ivi->mac, adapter->vfinfo[vf].vf_mac_addresses, ETH_ALEN); ++ ++#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + ivi->max_tx_rate = adapter->vfinfo[vf].tx_rate; + ivi->min_tx_rate = 0; ++#else ++ ivi->tx_rate = adapter->vfinfo[vf].tx_rate; ++#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ ++ + ivi->vlan = adapter->vfinfo[vf].pf_vlan; + ivi->qos = adapter->vfinfo[vf].pf_qos; ++#ifdef HAVE_VF_SPOOFCHK_CONFIGURE + ivi->spoofchk = adapter->vfinfo[vf].spoofchk_enabled; ++#endif ++#ifdef HAVE_NDO_SET_VF_RSS_QUERY_EN ++ ivi->rss_query_en = adapter->vfinfo[vf].rss_query_enabled; ++#endif ++#ifdef HAVE_NDO_SET_VF_TRUST ++ ivi->trusted = adapter->vfinfo[vf].trusted; ++#endif + return 0; + } ++#endif /* IFLA_VF_MAX */ ++ +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h +index 32c26d5..5d080ba 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sriov.h +@@ -1,7 +1,7 @@ + /******************************************************************************* + +- Intel 10 Gigabit PCI Express Linux driver +- Copyright(c) 1999 - 2013 Intel Corporation. ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, +@@ -12,10 +12,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +@@ -26,44 +22,71 @@ + + *******************************************************************************/ + ++ + #ifndef _IXGBE_SRIOV_H_ + #define _IXGBE_SRIOV_H_ + +-/* ixgbe driver limit the max number of VFs could be enabled to +- * 63 (IXGBE_MAX_VF_FUNCTIONS - 1) ++/* ixgbe driver limit the max number of VFs could be enabled to ++ * 63 (IXGBE_MAX_VF_FUNCTIONS - 1) + */ + #define IXGBE_MAX_VFS_DRV_LIMIT (IXGBE_MAX_VF_FUNCTIONS - 1) ++#define IXGBE_MAX_VFS_1TC IXGBE_MAX_VFS_DRV_LIMIT ++#define IXGBE_MAX_VFS_4TC 31 ++#define IXGBE_MAX_VFS_8TC 15 + +-#ifdef CONFIG_PCI_IOV + void ixgbe_restore_vf_multicasts(struct ixgbe_adapter *adapter); +-#endif ++int ixgbe_set_vf_vlan(struct ixgbe_adapter *adapter, int add, int vid, u32 vf); ++void ixgbe_set_vmolr(struct ixgbe_hw *hw, u32 vf, bool aupe); + void ixgbe_msg_task(struct ixgbe_adapter *adapter); +-int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask); ++int ixgbe_set_vf_mac(struct ixgbe_adapter *adapter, ++ int vf, unsigned char *mac_addr); + void ixgbe_disable_tx_rx(struct ixgbe_adapter *adapter); + void ixgbe_ping_all_vfs(struct ixgbe_adapter *adapter); ++#ifdef IFLA_VF_MAX + int ixgbe_ndo_set_vf_mac(struct net_device *netdev, int queue, u8 *mac); ++#ifdef IFLA_VF_VLAN_INFO_MAX + int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan, +- u8 qos); ++ u8 qos, __be16 vlan_proto); ++#else ++int ixgbe_ndo_set_vf_vlan(struct net_device *netdev, int queue, u16 vlan, ++ u8 qos); ++#endif ++#ifdef HAVE_NDO_SET_VF_MIN_MAX_TX_RATE + int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int min_tx_rate, + int max_tx_rate); +-int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); ++#else ++int ixgbe_ndo_set_vf_bw(struct net_device *netdev, int vf, int tx_rate); ++#endif /* HAVE_NDO_SET_VF_MIN_MAX_TX_RATE */ ++#ifdef HAVE_NDO_SET_VF_RSS_QUERY_EN ++int ixgbe_ndo_set_vf_rss_query_en(struct net_device *netdev, int vf, ++ bool setting); ++#endif ++#ifdef HAVE_NDO_SET_VF_TRUST ++int ixgbe_ndo_set_vf_trust(struct net_device *netdev, int vf, bool setting); ++#endif + int ixgbe_ndo_get_vf_config(struct net_device *netdev, + int vf, struct ifla_vf_info *ivi); +-void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter); ++#endif /* IFLA_VF_MAX */ + int ixgbe_disable_sriov(struct ixgbe_adapter *adapter); + #ifdef CONFIG_PCI_IOV ++int ixgbe_vf_configuration(struct pci_dev *pdev, unsigned int event_mask); + void ixgbe_enable_sriov(struct ixgbe_adapter *adapter); ++int ixgbe_ndo_set_vf_spoofchk(struct net_device *netdev, int vf, bool setting); + #endif + int ixgbe_pci_sriov_configure(struct pci_dev *dev, int num_vfs); ++#ifdef IFLA_VF_MAX ++void ixgbe_check_vf_rate_limit(struct ixgbe_adapter *adapter); ++#endif /* IFLA_VF_MAX */ ++void ixgbe_dump_registers(struct ixgbe_adapter *adapter); + +-static inline void ixgbe_set_vmvir(struct ixgbe_adapter *adapter, +- u16 vid, u16 qos, u32 vf) +-{ +- struct ixgbe_hw *hw = &adapter->hw; +- u32 vmvir = vid | (qos << VLAN_PRIO_SHIFT) | IXGBE_VMVIR_VLANA_DEFAULT; +- +- IXGBE_WRITE_REG(hw, IXGBE_VMVIR(vf), vmvir); +-} +- ++/* ++ * These are defined in ixgbe_type.h on behalf of the VF driver ++ * but we need them here unwrapped for the PF driver. ++ */ ++#define IXGBE_DEV_ID_82599_VF 0x10ED ++#define IXGBE_DEV_ID_X540_VF 0x1515 ++#define IXGBE_DEV_ID_X550_VF 0x1565 ++#define IXGBE_DEV_ID_X550EM_X_VF 0x15A8 ++#define IXGBE_DEV_ID_X550EM_A_VF 0x15C5 + #endif /* _IXGBE_SRIOV_H_ */ + +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c +index ef6df3d..5d30be5 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_sysfs.c +@@ -1,7 +1,7 @@ + /******************************************************************************* + +- Intel 10 Gigabit PCI Express Linux driver +- Copyright(c) 1999 - 2013 Intel Corporation. ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, +@@ -12,10 +12,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +@@ -30,16 +26,22 @@ + #include "ixgbe_common.h" + #include "ixgbe_type.h" + ++#ifdef IXGBE_SYSFS ++ + #include + #include + #include + #include + #include + #include ++#include ++#ifdef IXGBE_HWMON + #include ++#endif + ++#ifdef IXGBE_HWMON + /* hwmon callback functions */ +-static ssize_t ixgbe_hwmon_show_location(struct device *dev, ++static ssize_t ixgbe_hwmon_show_location(struct device __always_unused *dev, + struct device_attribute *attr, + char *buf) + { +@@ -49,7 +51,7 @@ static ssize_t ixgbe_hwmon_show_location(struct device *dev, + ixgbe_attr->sensor->location); + } + +-static ssize_t ixgbe_hwmon_show_temp(struct device *dev, ++static ssize_t ixgbe_hwmon_show_temp(struct device __always_unused *dev, + struct device_attribute *attr, + char *buf) + { +@@ -68,7 +70,7 @@ static ssize_t ixgbe_hwmon_show_temp(struct device *dev, + return sprintf(buf, "%u\n", value); + } + +-static ssize_t ixgbe_hwmon_show_cautionthresh(struct device *dev, ++static ssize_t ixgbe_hwmon_show_cautionthresh(struct device __always_unused *dev, + struct device_attribute *attr, + char *buf) + { +@@ -82,7 +84,7 @@ static ssize_t ixgbe_hwmon_show_cautionthresh(struct device *dev, + return sprintf(buf, "%u\n", value); + } + +-static ssize_t ixgbe_hwmon_show_maxopthresh(struct device *dev, ++static ssize_t ixgbe_hwmon_show_maxopthresh(struct device __always_unused *dev, + struct device_attribute *attr, + char *buf) + { +@@ -112,29 +114,29 @@ static int ixgbe_add_hwmon_attr(struct ixgbe_adapter *adapter, + unsigned int n_attr; + struct hwmon_attr *ixgbe_attr; + +- n_attr = adapter->ixgbe_hwmon_buff->n_hwmon; +- ixgbe_attr = &adapter->ixgbe_hwmon_buff->hwmon_list[n_attr]; ++ n_attr = adapter->ixgbe_hwmon_buff.n_hwmon; ++ ixgbe_attr = &adapter->ixgbe_hwmon_buff.hwmon_list[n_attr]; + + switch (type) { + case IXGBE_HWMON_TYPE_LOC: + ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_location; + snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name), +- "temp%u_label", offset + 1); ++ "temp%u_label", offset); + break; + case IXGBE_HWMON_TYPE_TEMP: + ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_temp; + snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name), +- "temp%u_input", offset + 1); ++ "temp%u_input", offset); + break; + case IXGBE_HWMON_TYPE_CAUTION: + ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_cautionthresh; + snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name), +- "temp%u_max", offset + 1); ++ "temp%u_max", offset); + break; + case IXGBE_HWMON_TYPE_MAX: + ixgbe_attr->dev_attr.show = ixgbe_hwmon_show_maxopthresh; + snprintf(ixgbe_attr->name, sizeof(ixgbe_attr->name), +- "temp%u_crit", offset + 1); ++ "temp%u_crit", offset); + break; + default: + rc = -EPERM; +@@ -148,17 +150,35 @@ static int ixgbe_add_hwmon_attr(struct ixgbe_adapter *adapter, + ixgbe_attr->dev_attr.store = NULL; + ixgbe_attr->dev_attr.attr.mode = S_IRUGO; + ixgbe_attr->dev_attr.attr.name = ixgbe_attr->name; +- sysfs_attr_init(&ixgbe_attr->dev_attr.attr); + +- adapter->ixgbe_hwmon_buff->attrs[n_attr] = &ixgbe_attr->dev_attr.attr; ++ rc = device_create_file(pci_dev_to_dev(adapter->pdev), ++ &ixgbe_attr->dev_attr); + +- ++adapter->ixgbe_hwmon_buff->n_hwmon; ++ if (rc == 0) ++ ++adapter->ixgbe_hwmon_buff.n_hwmon; + +- return 0; ++ return rc; + } ++#endif /* IXGBE_HWMON */ + +-static void ixgbe_sysfs_del_adapter(struct ixgbe_adapter *adapter) ++static void ixgbe_sysfs_del_adapter(struct ixgbe_adapter __maybe_unused *adapter) + { ++#ifdef IXGBE_HWMON ++ int i; ++ ++ if (adapter == NULL) ++ return; ++ ++ for (i = 0; i < adapter->ixgbe_hwmon_buff.n_hwmon; i++) { ++ device_remove_file(pci_dev_to_dev(adapter->pdev), ++ &adapter->ixgbe_hwmon_buff.hwmon_list[i].dev_attr); ++ } ++ ++ kfree(adapter->ixgbe_hwmon_buff.hwmon_list); ++ ++ if (adapter->ixgbe_hwmon_buff.device) ++ hwmon_device_unregister(adapter->ixgbe_hwmon_buff.device); ++#endif /* IXGBE_HWMON */ + } + + /* called from ixgbe_main.c */ +@@ -170,27 +190,43 @@ void ixgbe_sysfs_exit(struct ixgbe_adapter *adapter) + /* called from ixgbe_main.c */ + int ixgbe_sysfs_init(struct ixgbe_adapter *adapter) + { +- struct hwmon_buff *ixgbe_hwmon; +- struct device *hwmon_dev; +- unsigned int i; + int rc = 0; ++#ifdef IXGBE_HWMON ++ struct hwmon_buff *ixgbe_hwmon = &adapter->ixgbe_hwmon_buff; ++ unsigned int i; ++ int n_attrs; + ++#endif /* IXGBE_HWMON */ ++ if (adapter == NULL) ++ goto err; ++ ++#ifdef IXGBE_HWMON + /* If this method isn't defined we don't support thermals */ + if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL) { +- goto exit; ++ goto no_thermal; + } + + /* Don't create thermal hwmon interface if no sensors present */ + if (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw)) +- goto exit; +- +- ixgbe_hwmon = devm_kzalloc(&adapter->pdev->dev, sizeof(*ixgbe_hwmon), +- GFP_KERNEL); +- if (ixgbe_hwmon == NULL) { ++ goto no_thermal; ++ ++ /* ++ * Allocation space for max attributs ++ * max num sensors * values (loc, temp, max, caution) ++ */ ++ n_attrs = IXGBE_MAX_SENSORS * 4; ++ ixgbe_hwmon->hwmon_list = kcalloc(n_attrs, sizeof(struct hwmon_attr), ++ GFP_KERNEL); ++ if (!ixgbe_hwmon->hwmon_list) { + rc = -ENOMEM; +- goto exit; ++ goto err; ++ } ++ ++ ixgbe_hwmon->device = hwmon_device_register(pci_dev_to_dev(adapter->pdev)); ++ if (IS_ERR(ixgbe_hwmon->device)) { ++ rc = PTR_ERR(ixgbe_hwmon->device); ++ goto err; + } +- adapter->ixgbe_hwmon_buff = ixgbe_hwmon; + + for (i = 0; i < IXGBE_MAX_SENSORS; i++) { + /* +@@ -202,29 +238,20 @@ int ixgbe_sysfs_init(struct ixgbe_adapter *adapter) + + /* Bail if any hwmon attr struct fails to initialize */ + rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_CAUTION); ++ rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_LOC); ++ rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_TEMP); ++ rc |= ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_MAX); + if (rc) +- goto exit; +- rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_LOC); +- if (rc) +- goto exit; +- rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_TEMP); +- if (rc) +- goto exit; +- rc = ixgbe_add_hwmon_attr(adapter, i, IXGBE_HWMON_TYPE_MAX); +- if (rc) +- goto exit; ++ goto err; + } + +- ixgbe_hwmon->groups[0] = &ixgbe_hwmon->group; +- ixgbe_hwmon->group.attrs = ixgbe_hwmon->attrs; ++no_thermal: ++#endif /* IXGBE_HWMON */ ++ goto exit; + +- hwmon_dev = devm_hwmon_device_register_with_groups(&adapter->pdev->dev, +- "ixgbe", +- ixgbe_hwmon, +- ixgbe_hwmon->groups); +- if (IS_ERR(hwmon_dev)) +- rc = PTR_ERR(hwmon_dev); ++err: ++ ixgbe_sysfs_del_adapter(adapter); + exit: + return rc; + } +- ++#endif /* IXGBE_SYSFS */ +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +index 9a89f98..b5a5365 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_type.h +@@ -1,7 +1,7 @@ + /******************************************************************************* + +- Intel 10 Gigabit PCI Express Linux driver +- Copyright(c) 1999 - 2014 Intel Corporation. ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, +@@ -12,10 +12,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +@@ -29,93 +25,245 @@ + #ifndef _IXGBE_TYPE_H_ + #define _IXGBE_TYPE_H_ + +-#include +-#include +-#include ++/* ++ * The following is a brief description of the error categories used by the ++ * ERROR_REPORT* macros. ++ * ++ * - IXGBE_ERROR_INVALID_STATE ++ * This category is for errors which represent a serious failure state that is ++ * unexpected, and could be potentially harmful to device operation. It should ++ * not be used for errors relating to issues that can be worked around or ++ * ignored. ++ * ++ * - IXGBE_ERROR_POLLING ++ * This category is for errors related to polling/timeout issues and should be ++ * used in any case where the timeout occured, or a failure to obtain a lock, or ++ * failure to receive data within the time limit. ++ * ++ * - IXGBE_ERROR_CAUTION ++ * This category should be used for reporting issues that may be the cause of ++ * other errors, such as temperature warnings. It should indicate an event which ++ * could be serious, but hasn't necessarily caused problems yet. ++ * ++ * - IXGBE_ERROR_SOFTWARE ++ * This category is intended for errors due to software state preventing ++ * something. The category is not intended for errors due to bad arguments, or ++ * due to unsupported features. It should be used when a state occurs which ++ * prevents action but is not a serious issue. ++ * ++ * - IXGBE_ERROR_ARGUMENT ++ * This category is for when a bad or invalid argument is passed. It should be ++ * used whenever a function is called and error checking has detected the ++ * argument is wrong or incorrect. ++ * ++ * - IXGBE_ERROR_UNSUPPORTED ++ * This category is for errors which are due to unsupported circumstances or ++ * configuration issues. It should not be used when the issue is due to an ++ * invalid argument, but for when something has occurred that is unsupported ++ * (Ex: Flow control autonegotiation or an unsupported SFP+ module.) ++ */ ++ ++#include "ixgbe_osdep.h" ++ ++/* Override this by setting IOMEM in your ixgbe_osdep.h header */ ++#ifndef IOMEM ++#define IOMEM ++#endif ++ ++/* Vendor ID */ ++#define IXGBE_INTEL_VENDOR_ID 0x8086 + + /* Device IDs */ +-#define IXGBE_DEV_ID_82598 0x10B6 +-#define IXGBE_DEV_ID_82598_BX 0x1508 +-#define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6 +-#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7 +-#define IXGBE_DEV_ID_82598EB_SFP_LOM 0x10DB +-#define IXGBE_DEV_ID_82598AT 0x10C8 +-#define IXGBE_DEV_ID_82598AT2 0x150B +-#define IXGBE_DEV_ID_82598EB_CX4 0x10DD +-#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC +-#define IXGBE_DEV_ID_82598_DA_DUAL_PORT 0x10F1 +-#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1 +-#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4 +-#define IXGBE_DEV_ID_82599_KX4 0x10F7 +-#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514 +-#define IXGBE_DEV_ID_82599_KR 0x1517 +-#define IXGBE_DEV_ID_82599_T3_LOM 0x151C +-#define IXGBE_DEV_ID_82599_CX4 0x10F9 +-#define IXGBE_DEV_ID_82599_SFP 0x10FB +-#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152a +-#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529 +-#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9 +-#define IXGBE_SUBDEV_ID_82599_SFP_WOL0 0x1071 +-#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72 +-#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0 +-#define IXGBE_SUBDEV_ID_82599_SP_560FLR 0x211B +-#define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470 +-#define IXGBE_SUBDEV_ID_82599_LOM_SFP 0x8976 +-#define IXGBE_DEV_ID_82599_SFP_EM 0x1507 +-#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D +-#define IXGBE_DEV_ID_82599EN_SFP 0x1557 +-#define IXGBE_SUBDEV_ID_82599EN_SFP_OCP1 0x0001 +-#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC +-#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8 +-#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C +-#define IXGBE_DEV_ID_82599_LS 0x154F +-#define IXGBE_DEV_ID_X540T 0x1528 +-#define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A +-#define IXGBE_DEV_ID_82599_QSFP_SF_QP 0x1558 +-#define IXGBE_DEV_ID_X540T1 0x1560 +- +-/* VF Device IDs */ +-#define IXGBE_DEV_ID_82599_VF 0x10ED +-#define IXGBE_DEV_ID_X540_VF 0x1515 ++#define IXGBE_DEV_ID_82598 0x10B6 ++#define IXGBE_DEV_ID_82598_BX 0x1508 ++#define IXGBE_DEV_ID_82598AF_DUAL_PORT 0x10C6 ++#define IXGBE_DEV_ID_82598AF_SINGLE_PORT 0x10C7 ++#define IXGBE_DEV_ID_82598AT 0x10C8 ++#define IXGBE_DEV_ID_82598AT2 0x150B ++#define IXGBE_DEV_ID_82598EB_SFP_LOM 0x10DB ++#define IXGBE_DEV_ID_82598EB_CX4 0x10DD ++#define IXGBE_DEV_ID_82598_CX4_DUAL_PORT 0x10EC ++#define IXGBE_DEV_ID_82598_DA_DUAL_PORT 0x10F1 ++#define IXGBE_DEV_ID_82598_SR_DUAL_PORT_EM 0x10E1 ++#define IXGBE_DEV_ID_82598EB_XF_LR 0x10F4 ++#define IXGBE_DEV_ID_82599_KX4 0x10F7 ++#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514 ++#define IXGBE_DEV_ID_82599_KR 0x1517 ++#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8 ++#define IXGBE_SUBDEV_ID_82599_KX4_KR_MEZZ 0x000C ++#define IXGBE_DEV_ID_82599_CX4 0x10F9 ++#define IXGBE_DEV_ID_82599_SFP 0x10FB ++#define IXGBE_SUBDEV_ID_82599_SFP 0x11A9 ++#define IXGBE_SUBDEV_ID_82599_SFP_WOL0 0x1071 ++#define IXGBE_SUBDEV_ID_82599_RNDC 0x1F72 ++#define IXGBE_SUBDEV_ID_82599_560FLR 0x17D0 ++#define IXGBE_SUBDEV_ID_82599_ECNA_DP 0x0470 ++#define IXGBE_SUBDEV_ID_82599_SP_560FLR 0x211B ++#define IXGBE_SUBDEV_ID_82599_LOM_SNAP6 0x2159 ++#define IXGBE_SUBDEV_ID_82599_SFP_1OCP 0x000D ++#define IXGBE_SUBDEV_ID_82599_SFP_2OCP 0x0008 ++#define IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM1 0x8976 ++#define IXGBE_SUBDEV_ID_82599_SFP_LOM_OEM2 0x06EE ++#define IXGBE_DEV_ID_82599_BACKPLANE_FCOE 0x152A ++#define IXGBE_DEV_ID_82599_SFP_FCOE 0x1529 ++#define IXGBE_DEV_ID_82599_SFP_EM 0x1507 ++#define IXGBE_DEV_ID_82599_SFP_SF2 0x154D ++#define IXGBE_DEV_ID_82599_SFP_SF_QP 0x154A ++#define IXGBE_DEV_ID_82599_QSFP_SF_QP 0x1558 ++#define IXGBE_DEV_ID_82599EN_SFP 0x1557 ++#define IXGBE_SUBDEV_ID_82599EN_SFP_OCP1 0x0001 ++#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC ++#define IXGBE_DEV_ID_82599_T3_LOM 0x151C ++#define IXGBE_DEV_ID_82599_LS 0x154F ++#define IXGBE_DEV_ID_X540T 0x1528 ++#define IXGBE_DEV_ID_X540T1 0x1560 ++#define IXGBE_DEV_ID_X550T 0x1563 ++#define IXGBE_DEV_ID_X550T1 0x15D1 ++/* Placeholder value, pending official value. */ ++#define IXGBE_DEV_ID_X550EM_A_KR 0x15C2 ++#define IXGBE_DEV_ID_X550EM_A_KR_L 0x15C3 ++#define IXGBE_DEV_ID_X550EM_A_SFP_N 0x15C4 ++#define IXGBE_DEV_ID_X550EM_A_SGMII 0x15C6 ++#define IXGBE_DEV_ID_X550EM_A_SGMII_L 0x15C7 ++#define IXGBE_DEV_ID_X550EM_A_10G_T 0x15C8 ++#define IXGBE_DEV_ID_X550EM_A_QSFP 0x15CA ++#define IXGBE_DEV_ID_X550EM_A_QSFP_N 0x15CC ++#define IXGBE_DEV_ID_X550EM_A_SFP 0x15CE ++#define IXGBE_DEV_ID_X550EM_A_1G_T 0x15E4 ++#define IXGBE_DEV_ID_X550EM_A_1G_T_L 0x15E5 ++#define IXGBE_DEV_ID_X550EM_X_KX4 0x15AA ++#define IXGBE_DEV_ID_X550EM_X_KR 0x15AB ++#define IXGBE_DEV_ID_X550EM_X_SFP 0x15AC ++#define IXGBE_DEV_ID_X550EM_X_10G_T 0x15AD ++#define IXGBE_DEV_ID_X550EM_X_1G_T 0x15AE ++#define IXGBE_DEV_ID_X550EM_X_XFI 0x15B0 ++ ++#define IXGBE_CAT(r,m) IXGBE_##r##m ++ ++#define IXGBE_BY_MAC(_hw, r) ((_hw)->mvals[IXGBE_CAT(r, _IDX)]) + + /* General Registers */ +-#define IXGBE_CTRL 0x00000 +-#define IXGBE_STATUS 0x00008 +-#define IXGBE_CTRL_EXT 0x00018 +-#define IXGBE_ESDP 0x00020 +-#define IXGBE_EODSDP 0x00028 +-#define IXGBE_I2CCTL 0x00028 +-#define IXGBE_LEDCTL 0x00200 +-#define IXGBE_FRTIMER 0x00048 +-#define IXGBE_TCPTIMER 0x0004C +-#define IXGBE_CORESPARE 0x00600 +-#define IXGBE_EXVET 0x05078 ++#define IXGBE_CTRL 0x00000 ++#define IXGBE_STATUS 0x00008 ++#define IXGBE_CTRL_EXT 0x00018 ++#define IXGBE_ESDP 0x00020 ++#define IXGBE_EODSDP 0x00028 ++#define IXGBE_I2CCTL_82599 0x00028 ++#define IXGBE_I2CCTL IXGBE_I2CCTL_82599 ++#define IXGBE_I2CCTL_X540 IXGBE_I2CCTL_82599 ++#define IXGBE_I2CCTL_X550 0x15F5C ++#define IXGBE_I2CCTL_X550EM_x IXGBE_I2CCTL_X550 ++#define IXGBE_I2CCTL_X550EM_a IXGBE_I2CCTL_X550 ++#define IXGBE_I2CCTL_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2CCTL) ++#define IXGBE_PHY_GPIO 0x00028 ++#define IXGBE_MAC_GPIO 0x00030 ++#define IXGBE_PHYINT_STATUS0 0x00100 ++#define IXGBE_PHYINT_STATUS1 0x00104 ++#define IXGBE_PHYINT_STATUS2 0x00108 ++#define IXGBE_LEDCTL 0x00200 ++#define IXGBE_FRTIMER 0x00048 ++#define IXGBE_TCPTIMER 0x0004C ++#define IXGBE_CORESPARE 0x00600 ++#define IXGBE_EXVET 0x05078 + + /* NVM Registers */ +-#define IXGBE_EEC 0x10010 +-#define IXGBE_EERD 0x10014 +-#define IXGBE_EEWR 0x10018 +-#define IXGBE_FLA 0x1001C +-#define IXGBE_EEMNGCTL 0x10110 +-#define IXGBE_EEMNGDATA 0x10114 +-#define IXGBE_FLMNGCTL 0x10118 +-#define IXGBE_FLMNGDATA 0x1011C +-#define IXGBE_FLMNGCNT 0x10120 +-#define IXGBE_FLOP 0x1013C +-#define IXGBE_GRC 0x10200 ++#define IXGBE_EEC 0x10010 ++#define IXGBE_EEC_X540 IXGBE_EEC ++#define IXGBE_EEC_X550 IXGBE_EEC ++#define IXGBE_EEC_X550EM_x IXGBE_EEC ++#define IXGBE_EEC_X550EM_a 0x15FF8 ++#define IXGBE_EEC_BY_MAC(_hw) IXGBE_BY_MAC((_hw), EEC) ++ ++#define IXGBE_EERD 0x10014 ++#define IXGBE_EEWR 0x10018 ++ ++#define IXGBE_FLA 0x1001C ++#define IXGBE_FLA_X540 IXGBE_FLA ++#define IXGBE_FLA_X550 IXGBE_FLA ++#define IXGBE_FLA_X550EM_x IXGBE_FLA ++#define IXGBE_FLA_X550EM_a 0x15F68 ++#define IXGBE_FLA_BY_MAC(_hw) IXGBE_BY_MAC((_hw), FLA) ++ ++#define IXGBE_EEMNGCTL 0x10110 ++#define IXGBE_EEMNGDATA 0x10114 ++#define IXGBE_FLMNGCTL 0x10118 ++#define IXGBE_FLMNGDATA 0x1011C ++#define IXGBE_FLMNGCNT 0x10120 ++#define IXGBE_FLOP 0x1013C ++ ++#define IXGBE_GRC 0x10200 ++#define IXGBE_GRC_X540 IXGBE_GRC ++#define IXGBE_GRC_X550 IXGBE_GRC ++#define IXGBE_GRC_X550EM_x IXGBE_GRC ++#define IXGBE_GRC_X550EM_a 0x15F64 ++#define IXGBE_GRC_BY_MAC(_hw) IXGBE_BY_MAC((_hw), GRC) ++ ++#define IXGBE_SRAMREL 0x10210 ++#define IXGBE_SRAMREL_X540 IXGBE_SRAMREL ++#define IXGBE_SRAMREL_X550 IXGBE_SRAMREL ++#define IXGBE_SRAMREL_X550EM_x IXGBE_SRAMREL ++#define IXGBE_SRAMREL_X550EM_a 0x15F6C ++#define IXGBE_SRAMREL_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SRAMREL) ++ ++#define IXGBE_PHYDBG 0x10218 + + /* General Receive Control */ +-#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */ +-#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */ ++#define IXGBE_GRC_MNG 0x00000001 /* Manageability Enable */ ++#define IXGBE_GRC_APME 0x00000002 /* APM enabled in EEPROM */ + +-#define IXGBE_VPDDIAG0 0x10204 +-#define IXGBE_VPDDIAG1 0x10208 ++#define IXGBE_VPDDIAG0 0x10204 ++#define IXGBE_VPDDIAG1 0x10208 + + /* I2CCTL Bit Masks */ +-#define IXGBE_I2C_CLK_IN 0x00000001 +-#define IXGBE_I2C_CLK_OUT 0x00000002 +-#define IXGBE_I2C_DATA_IN 0x00000004 +-#define IXGBE_I2C_DATA_OUT 0x00000008 ++#define IXGBE_I2C_CLK_IN 0x00000001 ++#define IXGBE_I2C_CLK_IN_X540 IXGBE_I2C_CLK_IN ++#define IXGBE_I2C_CLK_IN_X550 0x00004000 ++#define IXGBE_I2C_CLK_IN_X550EM_x IXGBE_I2C_CLK_IN_X550 ++#define IXGBE_I2C_CLK_IN_X550EM_a IXGBE_I2C_CLK_IN_X550 ++#define IXGBE_I2C_CLK_IN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_IN) ++ ++#define IXGBE_I2C_CLK_OUT 0x00000002 ++#define IXGBE_I2C_CLK_OUT_X540 IXGBE_I2C_CLK_OUT ++#define IXGBE_I2C_CLK_OUT_X550 0x00000200 ++#define IXGBE_I2C_CLK_OUT_X550EM_x IXGBE_I2C_CLK_OUT_X550 ++#define IXGBE_I2C_CLK_OUT_X550EM_a IXGBE_I2C_CLK_OUT_X550 ++#define IXGBE_I2C_CLK_OUT_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_OUT) ++ ++#define IXGBE_I2C_DATA_IN 0x00000004 ++#define IXGBE_I2C_DATA_IN_X540 IXGBE_I2C_DATA_IN ++#define IXGBE_I2C_DATA_IN_X550 0x00001000 ++#define IXGBE_I2C_DATA_IN_X550EM_x IXGBE_I2C_DATA_IN_X550 ++#define IXGBE_I2C_DATA_IN_X550EM_a IXGBE_I2C_DATA_IN_X550 ++#define IXGBE_I2C_DATA_IN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_IN) ++ ++#define IXGBE_I2C_DATA_OUT 0x00000008 ++#define IXGBE_I2C_DATA_OUT_X540 IXGBE_I2C_DATA_OUT ++#define IXGBE_I2C_DATA_OUT_X550 0x00000400 ++#define IXGBE_I2C_DATA_OUT_X550EM_x IXGBE_I2C_DATA_OUT_X550 ++#define IXGBE_I2C_DATA_OUT_X550EM_a IXGBE_I2C_DATA_OUT_X550 ++#define IXGBE_I2C_DATA_OUT_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_OUT) ++ ++#define IXGBE_I2C_DATA_OE_N_EN 0 ++#define IXGBE_I2C_DATA_OE_N_EN_X540 IXGBE_I2C_DATA_OE_N_EN ++#define IXGBE_I2C_DATA_OE_N_EN_X550 0x00000800 ++#define IXGBE_I2C_DATA_OE_N_EN_X550EM_x IXGBE_I2C_DATA_OE_N_EN_X550 ++#define IXGBE_I2C_DATA_OE_N_EN_X550EM_a IXGBE_I2C_DATA_OE_N_EN_X550 ++#define IXGBE_I2C_DATA_OE_N_EN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_DATA_OE_N_EN) ++ ++#define IXGBE_I2C_BB_EN 0 ++#define IXGBE_I2C_BB_EN_X540 IXGBE_I2C_BB_EN ++#define IXGBE_I2C_BB_EN_X550 0x00000100 ++#define IXGBE_I2C_BB_EN_X550EM_x IXGBE_I2C_BB_EN_X550 ++#define IXGBE_I2C_BB_EN_X550EM_a IXGBE_I2C_BB_EN_X550 ++ ++#define IXGBE_I2C_BB_EN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_BB_EN) ++ ++#define IXGBE_I2C_CLK_OE_N_EN 0 ++#define IXGBE_I2C_CLK_OE_N_EN_X540 IXGBE_I2C_CLK_OE_N_EN ++#define IXGBE_I2C_CLK_OE_N_EN_X550 0x00002000 ++#define IXGBE_I2C_CLK_OE_N_EN_X550EM_x IXGBE_I2C_CLK_OE_N_EN_X550 ++#define IXGBE_I2C_CLK_OE_N_EN_X550EM_a IXGBE_I2C_CLK_OE_N_EN_X550 ++#define IXGBE_I2C_CLK_OE_N_EN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), I2C_CLK_OE_N_EN) + #define IXGBE_I2C_CLOCK_STRETCHING_TIMEOUT 500 + + #define IXGBE_I2C_THERMAL_SENSOR_ADDR 0xF8 +@@ -139,1727 +287,2203 @@ struct ixgbe_thermal_sensor_data { + struct ixgbe_thermal_diode_data sensor[IXGBE_MAX_SENSORS]; + }; + ++#define NVM_OROM_OFFSET 0x17 ++#define NVM_OROM_BLK_LOW 0x83 ++#define NVM_OROM_BLK_HI 0x84 ++#define NVM_OROM_PATCH_MASK 0xFF ++#define NVM_OROM_SHIFT 8 ++ ++#define NVM_VER_MASK 0x00FF /* version mask */ ++#define NVM_VER_SHIFT 8 /* version bit shift */ ++#define NVM_OEM_PROD_VER_PTR 0x1B /* OEM Product version block pointer */ ++#define NVM_OEM_PROD_VER_CAP_OFF 0x1 /* OEM Product version format offset */ ++#define NVM_OEM_PROD_VER_OFF_L 0x2 /* OEM Product version offset low */ ++#define NVM_OEM_PROD_VER_OFF_H 0x3 /* OEM Product version offset high */ ++#define NVM_OEM_PROD_VER_CAP_MASK 0xF /* OEM Product version cap mask */ ++#define NVM_OEM_PROD_VER_MOD_LEN 0x3 /* OEM Product version module length */ ++#define NVM_ETK_OFF_LOW 0x2D /* version low order word */ ++#define NVM_ETK_OFF_HI 0x2E /* version high order word */ ++#define NVM_ETK_SHIFT 16 /* high version word shift */ ++#define NVM_VER_INVALID 0xFFFF ++#define NVM_ETK_VALID 0x8000 ++#define NVM_INVALID_PTR 0xFFFF ++#define NVM_VER_SIZE 32 /* version sting size */ ++ ++struct ixgbe_nvm_version { ++ u32 etk_id; ++ u8 nvm_major; ++ u16 nvm_minor; ++ u8 nvm_id; ++ ++ bool oem_valid; ++ u8 oem_major; ++ u8 oem_minor; ++ u16 oem_release; ++ ++ bool or_valid; ++ u8 or_major; ++ u16 or_build; ++ u8 or_patch; ++ ++}; ++ + /* Interrupt Registers */ +-#define IXGBE_EICR 0x00800 +-#define IXGBE_EICS 0x00808 +-#define IXGBE_EIMS 0x00880 +-#define IXGBE_EIMC 0x00888 +-#define IXGBE_EIAC 0x00810 +-#define IXGBE_EIAM 0x00890 +-#define IXGBE_EICS_EX(_i) (0x00A90 + (_i) * 4) +-#define IXGBE_EIMS_EX(_i) (0x00AA0 + (_i) * 4) +-#define IXGBE_EIMC_EX(_i) (0x00AB0 + (_i) * 4) +-#define IXGBE_EIAM_EX(_i) (0x00AD0 + (_i) * 4) ++#define IXGBE_EICR 0x00800 ++#define IXGBE_EICS 0x00808 ++#define IXGBE_EIMS 0x00880 ++#define IXGBE_EIMC 0x00888 ++#define IXGBE_EIAC 0x00810 ++#define IXGBE_EIAM 0x00890 ++#define IXGBE_EICS_EX(_i) (0x00A90 + (_i) * 4) ++#define IXGBE_EIMS_EX(_i) (0x00AA0 + (_i) * 4) ++#define IXGBE_EIMC_EX(_i) (0x00AB0 + (_i) * 4) ++#define IXGBE_EIAM_EX(_i) (0x00AD0 + (_i) * 4) ++/* 82599 EITR is only 12 bits, with the lower 3 always zero */ + /* + * 82598 EITR is 16 bits but set the limits based on the max +- * supported by all ixgbe hardware. 82599 EITR is only 12 bits, +- * with the lower 3 always zero. ++ * supported by all ixgbe hardware + */ +-#define IXGBE_MAX_INT_RATE 488281 +-#define IXGBE_MIN_INT_RATE 956 +-#define IXGBE_MAX_EITR 0x00000FF8 +-#define IXGBE_MIN_EITR 8 +-#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \ +- (0x012300 + (((_i) - 24) * 4))) +-#define IXGBE_EITR_ITR_INT_MASK 0x00000FF8 +-#define IXGBE_EITR_LLI_MOD 0x00008000 +-#define IXGBE_EITR_CNT_WDIS 0x80000000 +-#define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */ +-#define IXGBE_IVAR_MISC 0x00A00 /* misc MSI-X interrupt causes */ +-#define IXGBE_EITRSEL 0x00894 +-#define IXGBE_MSIXT 0x00000 /* MSI-X Table. 0x0000 - 0x01C */ +-#define IXGBE_MSIXPBA 0x02000 /* MSI-X Pending bit array */ +-#define IXGBE_PBACL(_i) (((_i) == 0) ? (0x11068) : (0x110C0 + ((_i) * 4))) +-#define IXGBE_GPIE 0x00898 ++#define IXGBE_MAX_INT_RATE 488281 ++#define IXGBE_MIN_INT_RATE 956 ++#define IXGBE_MAX_EITR 0x00000FF8 ++#define IXGBE_MIN_EITR 8 ++#define IXGBE_EITR(_i) (((_i) <= 23) ? (0x00820 + ((_i) * 4)) : \ ++ (0x012300 + (((_i) - 24) * 4))) ++#define IXGBE_EITR_ITR_INT_MASK 0x00000FF8 ++#define IXGBE_EITR_LLI_MOD 0x00008000 ++#define IXGBE_EITR_CNT_WDIS 0x80000000 ++#define IXGBE_IVAR(_i) (0x00900 + ((_i) * 4)) /* 24 at 0x900-0x960 */ ++#define IXGBE_IVAR_MISC 0x00A00 /* misc MSI-X interrupt causes */ ++#define IXGBE_EITRSEL 0x00894 ++#define IXGBE_MSIXT 0x00000 /* MSI-X Table. 0x0000 - 0x01C */ ++#define IXGBE_MSIXPBA 0x02000 /* MSI-X Pending bit array */ ++#define IXGBE_PBACL(_i) (((_i) == 0) ? (0x11068) : (0x110C0 + ((_i) * 4))) ++#define IXGBE_GPIE 0x00898 + + /* Flow Control Registers */ +-#define IXGBE_FCADBUL 0x03210 +-#define IXGBE_FCADBUH 0x03214 +-#define IXGBE_FCAMACL 0x04328 +-#define IXGBE_FCAMACH 0x0432C +-#define IXGBE_FCRTH_82599(_i) (0x03260 + ((_i) * 4)) /* 8 of these (0-7) */ +-#define IXGBE_FCRTL_82599(_i) (0x03220 + ((_i) * 4)) /* 8 of these (0-7) */ +-#define IXGBE_PFCTOP 0x03008 +-#define IXGBE_FCTTV(_i) (0x03200 + ((_i) * 4)) /* 4 of these (0-3) */ +-#define IXGBE_FCRTL(_i) (0x03220 + ((_i) * 8)) /* 8 of these (0-7) */ +-#define IXGBE_FCRTH(_i) (0x03260 + ((_i) * 8)) /* 8 of these (0-7) */ +-#define IXGBE_FCRTV 0x032A0 +-#define IXGBE_FCCFG 0x03D00 +-#define IXGBE_TFCS 0x0CE00 ++#define IXGBE_FCADBUL 0x03210 ++#define IXGBE_FCADBUH 0x03214 ++#define IXGBE_FCAMACL 0x04328 ++#define IXGBE_FCAMACH 0x0432C ++#define IXGBE_FCRTH_82599(_i) (0x03260 + ((_i) * 4)) /* 8 of these (0-7) */ ++#define IXGBE_FCRTL_82599(_i) (0x03220 + ((_i) * 4)) /* 8 of these (0-7) */ ++#define IXGBE_PFCTOP 0x03008 ++#define IXGBE_FCTTV(_i) (0x03200 + ((_i) * 4)) /* 4 of these (0-3) */ ++#define IXGBE_FCRTL(_i) (0x03220 + ((_i) * 8)) /* 8 of these (0-7) */ ++#define IXGBE_FCRTH(_i) (0x03260 + ((_i) * 8)) /* 8 of these (0-7) */ ++#define IXGBE_FCRTV 0x032A0 ++#define IXGBE_FCCFG 0x03D00 ++#define IXGBE_TFCS 0x0CE00 + + /* Receive DMA Registers */ +-#define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \ ++#define IXGBE_RDBAL(_i) (((_i) < 64) ? (0x01000 + ((_i) * 0x40)) : \ + (0x0D000 + (((_i) - 64) * 0x40))) +-#define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \ ++#define IXGBE_RDBAH(_i) (((_i) < 64) ? (0x01004 + ((_i) * 0x40)) : \ + (0x0D004 + (((_i) - 64) * 0x40))) +-#define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \ ++#define IXGBE_RDLEN(_i) (((_i) < 64) ? (0x01008 + ((_i) * 0x40)) : \ + (0x0D008 + (((_i) - 64) * 0x40))) +-#define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \ ++#define IXGBE_RDH(_i) (((_i) < 64) ? (0x01010 + ((_i) * 0x40)) : \ + (0x0D010 + (((_i) - 64) * 0x40))) +-#define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \ ++#define IXGBE_RDT(_i) (((_i) < 64) ? (0x01018 + ((_i) * 0x40)) : \ + (0x0D018 + (((_i) - 64) * 0x40))) +-#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \ +- (0x0D028 + (((_i) - 64) * 0x40))) +-#define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \ +- (0x0D02C + (((_i) - 64) * 0x40))) +-#define IXGBE_RSCDBU 0x03028 +-#define IXGBE_RDDCC 0x02F20 +-#define IXGBE_RXMEMWRAP 0x03190 +-#define IXGBE_STARCTRL 0x03024 ++#define IXGBE_RXDCTL(_i) (((_i) < 64) ? (0x01028 + ((_i) * 0x40)) : \ ++ (0x0D028 + (((_i) - 64) * 0x40))) ++#define IXGBE_RSCCTL(_i) (((_i) < 64) ? (0x0102C + ((_i) * 0x40)) : \ ++ (0x0D02C + (((_i) - 64) * 0x40))) ++#define IXGBE_RSCDBU 0x03028 ++#define IXGBE_RDDCC 0x02F20 ++#define IXGBE_RXMEMWRAP 0x03190 ++#define IXGBE_STARCTRL 0x03024 + /* + * Split and Replication Receive Control Registers + * 00-15 : 0x02100 + n*4 + * 16-64 : 0x01014 + n*0x40 + * 64-127: 0x0D014 + (n-64)*0x40 + */ +-#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \ +- (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \ +- (0x0D014 + (((_i) - 64) * 0x40)))) ++#define IXGBE_SRRCTL(_i) (((_i) <= 15) ? (0x02100 + ((_i) * 4)) : \ ++ (((_i) < 64) ? (0x01014 + ((_i) * 0x40)) : \ ++ (0x0D014 + (((_i) - 64) * 0x40)))) + /* + * Rx DCA Control Register: + * 00-15 : 0x02200 + n*4 + * 16-64 : 0x0100C + n*0x40 + * 64-127: 0x0D00C + (n-64)*0x40 + */ +-#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \ ++#define IXGBE_DCA_RXCTRL(_i) (((_i) <= 15) ? (0x02200 + ((_i) * 4)) : \ + (((_i) < 64) ? (0x0100C + ((_i) * 0x40)) : \ + (0x0D00C + (((_i) - 64) * 0x40)))) +-#define IXGBE_RDRXCTL 0x02F00 +-#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4)) +- /* 8 of these 0x03C00 - 0x03C1C */ +-#define IXGBE_RXCTRL 0x03000 +-#define IXGBE_DROPEN 0x03D04 +-#define IXGBE_RXPBSIZE_SHIFT 10 ++#define IXGBE_RDRXCTL 0x02F00 ++/* 8 of these 0x03C00 - 0x03C1C */ ++#define IXGBE_RXPBSIZE(_i) (0x03C00 + ((_i) * 4)) ++#define IXGBE_RXCTRL 0x03000 ++#define IXGBE_DROPEN 0x03D04 ++#define IXGBE_RXPBSIZE_SHIFT 10 ++#define IXGBE_RXPBSIZE_MASK 0x000FFC00 + + /* Receive Registers */ +-#define IXGBE_RXCSUM 0x05000 +-#define IXGBE_RFCTL 0x05008 +-#define IXGBE_DRECCCTL 0x02F08 +-#define IXGBE_DRECCCTL_DISABLE 0 ++#define IXGBE_RXCSUM 0x05000 ++#define IXGBE_RFCTL 0x05008 ++#define IXGBE_DRECCCTL 0x02F08 ++#define IXGBE_DRECCCTL_DISABLE 0 ++#define IXGBE_DRECCCTL2 0x02F8C ++ + /* Multicast Table Array - 128 entries */ +-#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4)) +-#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ +- (0x0A200 + ((_i) * 8))) +-#define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ +- (0x0A204 + ((_i) * 8))) +-#define IXGBE_MPSAR_LO(_i) (0x0A600 + ((_i) * 8)) +-#define IXGBE_MPSAR_HI(_i) (0x0A604 + ((_i) * 8)) ++#define IXGBE_MTA(_i) (0x05200 + ((_i) * 4)) ++#define IXGBE_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ ++ (0x0A200 + ((_i) * 8))) ++#define IXGBE_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ ++ (0x0A204 + ((_i) * 8))) ++#define IXGBE_MPSAR_LO(_i) (0x0A600 + ((_i) * 8)) ++#define IXGBE_MPSAR_HI(_i) (0x0A604 + ((_i) * 8)) + /* Packet split receive type */ +-#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : \ +- (0x0EA00 + ((_i) * 4))) ++#define IXGBE_PSRTYPE(_i) (((_i) <= 15) ? (0x05480 + ((_i) * 4)) : \ ++ (0x0EA00 + ((_i) * 4))) + /* array of 4096 1-bit vlan filters */ +-#define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4)) ++#define IXGBE_VFTA(_i) (0x0A000 + ((_i) * 4)) + /*array of 4096 4-bit vlan vmdq indices */ +-#define IXGBE_VFTAVIND(_j, _i) (0x0A200 + ((_j) * 0x200) + ((_i) * 4)) +-#define IXGBE_FCTRL 0x05080 +-#define IXGBE_VLNCTRL 0x05088 +-#define IXGBE_MCSTCTRL 0x05090 +-#define IXGBE_MRQC 0x05818 +-#define IXGBE_SAQF(_i) (0x0E000 + ((_i) * 4)) /* Source Address Queue Filter */ +-#define IXGBE_DAQF(_i) (0x0E200 + ((_i) * 4)) /* Dest. Address Queue Filter */ +-#define IXGBE_SDPQF(_i) (0x0E400 + ((_i) * 4)) /* Src Dest. Addr Queue Filter */ +-#define IXGBE_FTQF(_i) (0x0E600 + ((_i) * 4)) /* Five Tuple Queue Filter */ +-#define IXGBE_ETQF(_i) (0x05128 + ((_i) * 4)) /* EType Queue Filter */ +-#define IXGBE_ETQS(_i) (0x0EC00 + ((_i) * 4)) /* EType Queue Select */ +-#define IXGBE_SYNQF 0x0EC30 /* SYN Packet Queue Filter */ +-#define IXGBE_RQTC 0x0EC70 +-#define IXGBE_MTQC 0x08120 +-#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */ +-#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */ +-#define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */ +-#define IXGBE_VT_CTL 0x051B0 +-#define IXGBE_PFMAILBOX(_i) (0x04B00 + (4 * (_i))) /* 64 total */ +-#define IXGBE_PFMBMEM(_i) (0x13000 + (64 * (_i))) /* 64 Mailboxes, 16 DW each */ +-#define IXGBE_PFMBICR(_i) (0x00710 + (4 * (_i))) /* 4 total */ +-#define IXGBE_PFMBIMR(_i) (0x00720 + (4 * (_i))) /* 4 total */ +-#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4)) +-#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4)) +-#define IXGBE_VMECM(_i) (0x08790 + ((_i) * 4)) +-#define IXGBE_QDE 0x2F04 +-#define IXGBE_VMTXSW(_i) (0x05180 + ((_i) * 4)) /* 2 total */ +-#define IXGBE_VMOLR(_i) (0x0F000 + ((_i) * 4)) /* 64 total */ +-#define IXGBE_UTA(_i) (0x0F400 + ((_i) * 4)) +-#define IXGBE_MRCTL(_i) (0x0F600 + ((_i) * 4)) +-#define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4)) +-#define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4)) +-#define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/ +-#define IXGBE_RXFECCERR0 0x051B8 +-#define IXGBE_LLITHRESH 0x0EC90 +-#define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */ +-#define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */ +-#define IXGBE_IMIRVP 0x05AC0 +-#define IXGBE_VMD_CTL 0x0581C +-#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */ +-#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */ ++#define IXGBE_VFTAVIND(_j, _i) (0x0A200 + ((_j) * 0x200) + ((_i) * 4)) ++#define IXGBE_FCTRL 0x05080 ++#define IXGBE_VLNCTRL 0x05088 ++#define IXGBE_MCSTCTRL 0x05090 ++#define IXGBE_MRQC 0x05818 ++#define IXGBE_SAQF(_i) (0x0E000 + ((_i) * 4)) /* Source Address Queue Filter */ ++#define IXGBE_DAQF(_i) (0x0E200 + ((_i) * 4)) /* Dest. Address Queue Filter */ ++#define IXGBE_SDPQF(_i) (0x0E400 + ((_i) * 4)) /* Src Dest. Addr Queue Filter */ ++#define IXGBE_FTQF(_i) (0x0E600 + ((_i) * 4)) /* Five Tuple Queue Filter */ ++#define IXGBE_ETQF(_i) (0x05128 + ((_i) * 4)) /* EType Queue Filter */ ++#define IXGBE_ETQS(_i) (0x0EC00 + ((_i) * 4)) /* EType Queue Select */ ++#define IXGBE_SYNQF 0x0EC30 /* SYN Packet Queue Filter */ ++#define IXGBE_RQTC 0x0EC70 ++#define IXGBE_MTQC 0x08120 ++#define IXGBE_VLVF(_i) (0x0F100 + ((_i) * 4)) /* 64 of these (0-63) */ ++#define IXGBE_VLVFB(_i) (0x0F200 + ((_i) * 4)) /* 128 of these (0-127) */ ++#define IXGBE_VMVIR(_i) (0x08000 + ((_i) * 4)) /* 64 of these (0-63) */ ++#define IXGBE_PFFLPL 0x050B0 ++#define IXGBE_PFFLPH 0x050B4 ++#define IXGBE_VT_CTL 0x051B0 ++#define IXGBE_PFMAILBOX(_i) (0x04B00 + (4 * (_i))) /* 64 total */ ++/* 64 Mailboxes, 16 DW each */ ++#define IXGBE_PFMBMEM(_i) (0x13000 + (64 * (_i))) ++#define IXGBE_PFMBICR(_i) (0x00710 + (4 * (_i))) /* 4 total */ ++#define IXGBE_PFMBIMR(_i) (0x00720 + (4 * (_i))) /* 4 total */ ++#define IXGBE_VFRE(_i) (0x051E0 + ((_i) * 4)) ++#define IXGBE_VFTE(_i) (0x08110 + ((_i) * 4)) ++#define IXGBE_VMECM(_i) (0x08790 + ((_i) * 4)) ++#define IXGBE_QDE 0x2F04 ++#define IXGBE_VMTXSW(_i) (0x05180 + ((_i) * 4)) /* 2 total */ ++#define IXGBE_VMOLR(_i) (0x0F000 + ((_i) * 4)) /* 64 total */ ++#define IXGBE_UTA(_i) (0x0F400 + ((_i) * 4)) ++#define IXGBE_MRCTL(_i) (0x0F600 + ((_i) * 4)) ++#define IXGBE_VMRVLAN(_i) (0x0F610 + ((_i) * 4)) ++#define IXGBE_VMRVM(_i) (0x0F630 + ((_i) * 4)) ++#define IXGBE_LVMMC_RX 0x2FA8 ++#define IXGBE_LVMMC_TX 0x8108 ++#define IXGBE_LMVM_RX 0x2FA4 ++#define IXGBE_LMVM_TX 0x8124 ++#define IXGBE_WQBR_RX(_i) (0x2FB0 + ((_i) * 4)) /* 4 total */ ++#define IXGBE_WQBR_TX(_i) (0x8130 + ((_i) * 4)) /* 4 total */ ++#define IXGBE_L34T_IMIR(_i) (0x0E800 + ((_i) * 4)) /*128 of these (0-127)*/ ++#define IXGBE_RXFECCERR0 0x051B8 ++#define IXGBE_LLITHRESH 0x0EC90 ++#define IXGBE_IMIR(_i) (0x05A80 + ((_i) * 4)) /* 8 of these (0-7) */ ++#define IXGBE_IMIREXT(_i) (0x05AA0 + ((_i) * 4)) /* 8 of these (0-7) */ ++#define IXGBE_IMIRVP 0x05AC0 ++#define IXGBE_VMD_CTL 0x0581C ++#define IXGBE_RETA(_i) (0x05C00 + ((_i) * 4)) /* 32 of these (0-31) */ ++#define IXGBE_ERETA(_i) (0x0EE80 + ((_i) * 4)) /* 96 of these (0-95) */ ++#define IXGBE_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* 10 of these (0-9) */ ++ ++/* Registers for setting up RSS on X550 with SRIOV ++ * _p - pool number (0..63) ++ * _i - index (0..10 for PFVFRSSRK, 0..15 for PFVFRETA) ++ */ ++#define IXGBE_PFVFMRQC(_p) (0x03400 + ((_p) * 4)) ++#define IXGBE_PFVFRSSRK(_i, _p) (0x018000 + ((_i) * 4) + ((_p) * 0x40)) ++#define IXGBE_PFVFRETA(_i, _p) (0x019000 + ((_i) * 4) + ((_p) * 0x40)) + + /* Flow Director registers */ +-#define IXGBE_FDIRCTRL 0x0EE00 +-#define IXGBE_FDIRHKEY 0x0EE68 +-#define IXGBE_FDIRSKEY 0x0EE6C +-#define IXGBE_FDIRDIP4M 0x0EE3C +-#define IXGBE_FDIRSIP4M 0x0EE40 +-#define IXGBE_FDIRTCPM 0x0EE44 +-#define IXGBE_FDIRUDPM 0x0EE48 +-#define IXGBE_FDIRIP6M 0x0EE74 +-#define IXGBE_FDIRM 0x0EE70 ++#define IXGBE_FDIRCTRL 0x0EE00 ++#define IXGBE_FDIRHKEY 0x0EE68 ++#define IXGBE_FDIRSKEY 0x0EE6C ++#define IXGBE_FDIRDIP4M 0x0EE3C ++#define IXGBE_FDIRSIP4M 0x0EE40 ++#define IXGBE_FDIRTCPM 0x0EE44 ++#define IXGBE_FDIRUDPM 0x0EE48 ++#define IXGBE_FDIRSCTPM 0x0EE78 ++#define IXGBE_FDIRIP6M 0x0EE74 ++#define IXGBE_FDIRM 0x0EE70 + + /* Flow Director Stats registers */ +-#define IXGBE_FDIRFREE 0x0EE38 +-#define IXGBE_FDIRLEN 0x0EE4C +-#define IXGBE_FDIRUSTAT 0x0EE50 +-#define IXGBE_FDIRFSTAT 0x0EE54 +-#define IXGBE_FDIRMATCH 0x0EE58 +-#define IXGBE_FDIRMISS 0x0EE5C ++#define IXGBE_FDIRFREE 0x0EE38 ++#define IXGBE_FDIRLEN 0x0EE4C ++#define IXGBE_FDIRUSTAT 0x0EE50 ++#define IXGBE_FDIRFSTAT 0x0EE54 ++#define IXGBE_FDIRMATCH 0x0EE58 ++#define IXGBE_FDIRMISS 0x0EE5C + + /* Flow Director Programming registers */ + #define IXGBE_FDIRSIPv6(_i) (0x0EE0C + ((_i) * 4)) /* 3 of these (0-2) */ +-#define IXGBE_FDIRIPSA 0x0EE18 +-#define IXGBE_FDIRIPDA 0x0EE1C +-#define IXGBE_FDIRPORT 0x0EE20 +-#define IXGBE_FDIRVLAN 0x0EE24 +-#define IXGBE_FDIRHASH 0x0EE28 +-#define IXGBE_FDIRCMD 0x0EE2C ++#define IXGBE_FDIRIPSA 0x0EE18 ++#define IXGBE_FDIRIPDA 0x0EE1C ++#define IXGBE_FDIRPORT 0x0EE20 ++#define IXGBE_FDIRVLAN 0x0EE24 ++#define IXGBE_FDIRHASH 0x0EE28 ++#define IXGBE_FDIRCMD 0x0EE2C + + /* Transmit DMA registers */ +-#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) /* 32 of these (0-31)*/ +-#define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40)) +-#define IXGBE_TDLEN(_i) (0x06008 + ((_i) * 0x40)) +-#define IXGBE_TDH(_i) (0x06010 + ((_i) * 0x40)) +-#define IXGBE_TDT(_i) (0x06018 + ((_i) * 0x40)) +-#define IXGBE_TXDCTL(_i) (0x06028 + ((_i) * 0x40)) +-#define IXGBE_TDWBAL(_i) (0x06038 + ((_i) * 0x40)) +-#define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40)) +-#define IXGBE_DTXCTL 0x07E00 +- +-#define IXGBE_DMATXCTL 0x04A80 +-#define IXGBE_PFVFSPOOF(_i) (0x08200 + ((_i) * 4)) /* 8 of these 0 - 7 */ +-#define IXGBE_PFDTXGSWC 0x08220 +-#define IXGBE_DTXMXSZRQ 0x08100 +-#define IXGBE_DTXTCPFLGL 0x04A88 +-#define IXGBE_DTXTCPFLGH 0x04A8C +-#define IXGBE_LBDRPEN 0x0CA00 +-#define IXGBE_TXPBTHRESH(_i) (0x04950 + ((_i) * 4)) /* 8 of these 0 - 7 */ +- +-#define IXGBE_DMATXCTL_TE 0x1 /* Transmit Enable */ +-#define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */ +-#define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */ +-#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */ +- +-#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */ ++#define IXGBE_TDBAL(_i) (0x06000 + ((_i) * 0x40)) /* 32 of them (0-31)*/ ++#define IXGBE_TDBAH(_i) (0x06004 + ((_i) * 0x40)) ++#define IXGBE_TDLEN(_i) (0x06008 + ((_i) * 0x40)) ++#define IXGBE_TDH(_i) (0x06010 + ((_i) * 0x40)) ++#define IXGBE_TDT(_i) (0x06018 + ((_i) * 0x40)) ++#define IXGBE_TXDCTL(_i) (0x06028 + ((_i) * 0x40)) ++#define IXGBE_TDWBAL(_i) (0x06038 + ((_i) * 0x40)) ++#define IXGBE_TDWBAH(_i) (0x0603C + ((_i) * 0x40)) ++#define IXGBE_DTXCTL 0x07E00 ++ ++#define IXGBE_DMATXCTL 0x04A80 ++#define IXGBE_PFVFSPOOF(_i) (0x08200 + ((_i) * 4)) /* 8 of these 0 - 7 */ ++#define IXGBE_PFDTXGSWC 0x08220 ++#define IXGBE_DTXMXSZRQ 0x08100 ++#define IXGBE_DTXTCPFLGL 0x04A88 ++#define IXGBE_DTXTCPFLGH 0x04A8C ++#define IXGBE_LBDRPEN 0x0CA00 ++#define IXGBE_TXPBTHRESH(_i) (0x04950 + ((_i) * 4)) /* 8 of these 0 - 7 */ ++ ++#define IXGBE_DMATXCTL_TE 0x1 /* Transmit Enable */ ++#define IXGBE_DMATXCTL_NS 0x2 /* No Snoop LSO hdr buffer */ ++#define IXGBE_DMATXCTL_GDV 0x8 /* Global Double VLAN */ ++#define IXGBE_DMATXCTL_MDP_EN 0x20 /* Bit 5 */ ++#define IXGBE_DMATXCTL_MBINTEN 0x40 /* Bit 6 */ ++#define IXGBE_DMATXCTL_VT_SHIFT 16 /* VLAN EtherType */ ++ ++#define IXGBE_PFDTXGSWC_VT_LBEN 0x1 /* Local L2 VT switch enable */ + + /* Anti-spoofing defines */ +-#define IXGBE_SPOOF_MACAS_MASK 0xFF +-#define IXGBE_SPOOF_VLANAS_MASK 0xFF00 +-#define IXGBE_SPOOF_VLANAS_SHIFT 8 +-#define IXGBE_PFVFSPOOF_REG_COUNT 8 +- +-#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) /* 16 of these (0-15) */ ++#define IXGBE_SPOOF_MACAS_MASK 0xFF ++#define IXGBE_SPOOF_VLANAS_MASK 0xFF00 ++#define IXGBE_SPOOF_VLANAS_SHIFT 8 ++#define IXGBE_SPOOF_ETHERTYPEAS 0xFF000000 ++#define IXGBE_SPOOF_ETHERTYPEAS_SHIFT 16 ++#define IXGBE_PFVFSPOOF_REG_COUNT 8 ++/* 16 of these (0-15) */ ++#define IXGBE_DCA_TXCTRL(_i) (0x07200 + ((_i) * 4)) + /* Tx DCA Control register : 128 of these (0-127) */ +-#define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40)) +-#define IXGBE_TIPG 0x0CB00 +-#define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) * 4)) /* 8 of these */ +-#define IXGBE_MNGTXMAP 0x0CD10 +-#define IXGBE_TIPG_FIBER_DEFAULT 3 +-#define IXGBE_TXPBSIZE_SHIFT 10 ++#define IXGBE_DCA_TXCTRL_82599(_i) (0x0600C + ((_i) * 0x40)) ++#define IXGBE_TIPG 0x0CB00 ++#define IXGBE_TXPBSIZE(_i) (0x0CC00 + ((_i) * 4)) /* 8 of these */ ++#define IXGBE_MNGTXMAP 0x0CD10 ++#define IXGBE_TIPG_FIBER_DEFAULT 3 ++#define IXGBE_TXPBSIZE_SHIFT 10 + + /* Wake up registers */ +-#define IXGBE_WUC 0x05800 +-#define IXGBE_WUFC 0x05808 +-#define IXGBE_WUS 0x05810 +-#define IXGBE_IPAV 0x05838 +-#define IXGBE_IP4AT 0x05840 /* IPv4 table 0x5840-0x5858 */ +-#define IXGBE_IP6AT 0x05880 /* IPv6 table 0x5880-0x588F */ +- +-#define IXGBE_WUPL 0x05900 +-#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */ ++#define IXGBE_WUC 0x05800 ++#define IXGBE_WUFC 0x05808 ++#define IXGBE_WUS 0x05810 ++#define IXGBE_IPAV 0x05838 ++#define IXGBE_IP4AT 0x05840 /* IPv4 table 0x5840-0x5858 */ ++#define IXGBE_IP6AT 0x05880 /* IPv6 table 0x5880-0x588F */ ++ ++#define IXGBE_WUPL 0x05900 ++#define IXGBE_WUPM 0x05A00 /* wake up pkt memory 0x5A00-0x5A7C */ ++#define IXGBE_PROXYS 0x05F60 /* Proxying Status Register */ ++#define IXGBE_PROXYFC 0x05F64 /* Proxying Filter Control Register */ ++#define IXGBE_VXLANCTRL 0x0000507C /* Rx filter VXLAN UDPPORT Register */ ++ ++/* masks for accessing VXLAN and GENEVE UDP ports */ ++#define IXGBE_VXLANCTRL_VXLAN_UDPPORT_MASK 0x0000ffff /* VXLAN port */ ++#define IXGBE_VXLANCTRL_GENEVE_UDPPORT_MASK 0xffff0000 /* GENEVE port */ ++#define IXGBE_VXLANCTRL_ALL_UDPPORT_MASK 0xffffffff /* GENEVE/VXLAN */ ++ ++#define IXGBE_VXLANCTRL_GENEVE_UDPPORT_SHIFT 16 ++ + #define IXGBE_FHFT(_n) (0x09000 + ((_n) * 0x100)) /* Flex host filter table */ +-#define IXGBE_FHFT_EXT(_n) (0x09800 + ((_n) * 0x100)) /* Ext Flexible Host +- * Filter Table */ ++/* Ext Flexible Host Filter Table */ ++#define IXGBE_FHFT_EXT(_n) (0x09800 + ((_n) * 0x100)) ++#define IXGBE_FHFT_EXT_X550(_n) (0x09600 + ((_n) * 0x100)) ++ ++/* Four Flexible Filters are supported */ ++#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX 4 + +-#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX 4 +-#define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX 2 ++/* Six Flexible Filters are supported */ ++#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX_6 6 ++/* Eight Flexible Filters are supported */ ++#define IXGBE_FLEXIBLE_FILTER_COUNT_MAX_8 8 ++#define IXGBE_EXT_FLEXIBLE_FILTER_COUNT_MAX 2 + + /* Each Flexible Filter is at most 128 (0x80) bytes in length */ +-#define IXGBE_FLEXIBLE_FILTER_SIZE_MAX 128 +-#define IXGBE_FHFT_LENGTH_OFFSET 0xFC /* Length byte in FHFT */ +-#define IXGBE_FHFT_LENGTH_MASK 0x0FF /* Length in lower byte */ ++#define IXGBE_FLEXIBLE_FILTER_SIZE_MAX 128 ++#define IXGBE_FHFT_LENGTH_OFFSET 0xFC /* Length byte in FHFT */ ++#define IXGBE_FHFT_LENGTH_MASK 0x0FF /* Length in lower byte */ + + /* Definitions for power management and wakeup registers */ + /* Wake Up Control */ +-#define IXGBE_WUC_PME_EN 0x00000002 /* PME Enable */ +-#define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */ +-#define IXGBE_WUC_WKEN 0x00000010 /* Enable PE_WAKE_N pin assertion */ ++#define IXGBE_WUC_PME_EN 0x00000002 /* PME Enable */ ++#define IXGBE_WUC_PME_STATUS 0x00000004 /* PME Status */ ++#define IXGBE_WUC_WKEN 0x00000010 /* Enable PE_WAKE_N pin assertion */ + + /* Wake Up Filter Control */ +-#define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ +-#define IXGBE_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ +-#define IXGBE_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ +-#define IXGBE_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ +-#define IXGBE_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ +-#define IXGBE_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ +-#define IXGBE_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ +-#define IXGBE_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */ +-#define IXGBE_WUFC_MNG 0x00000100 /* Directed Mgmt Packet Wakeup Enable */ +- +-#define IXGBE_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */ +-#define IXGBE_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ +-#define IXGBE_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */ +-#define IXGBE_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */ +-#define IXGBE_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */ +-#define IXGBE_WUFC_FLX4 0x00100000 /* Flexible Filter 4 Enable */ +-#define IXGBE_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */ +-#define IXGBE_WUFC_FLX_FILTERS 0x000F0000 /* Mask for 4 flex filters */ +-#define IXGBE_WUFC_EXT_FLX_FILTERS 0x00300000 /* Mask for Ext. flex filters */ +-#define IXGBE_WUFC_ALL_FILTERS 0x003F00FF /* Mask for all wakeup filters */ +-#define IXGBE_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */ ++#define IXGBE_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */ ++#define IXGBE_WUFC_MAG 0x00000002 /* Magic Packet Wakeup Enable */ ++#define IXGBE_WUFC_EX 0x00000004 /* Directed Exact Wakeup Enable */ ++#define IXGBE_WUFC_MC 0x00000008 /* Directed Multicast Wakeup Enable */ ++#define IXGBE_WUFC_BC 0x00000010 /* Broadcast Wakeup Enable */ ++#define IXGBE_WUFC_ARP 0x00000020 /* ARP Request Packet Wakeup Enable */ ++#define IXGBE_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */ ++#define IXGBE_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */ ++#define IXGBE_WUFC_MNG 0x00000100 /* Directed Mgmt Packet Wakeup Enable */ ++ ++#define IXGBE_WUFC_IGNORE_TCO 0x00008000 /* Ignore WakeOn TCO packets */ ++#define IXGBE_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */ ++#define IXGBE_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */ ++#define IXGBE_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */ ++#define IXGBE_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */ ++#define IXGBE_WUFC_FLX4 0x00100000 /* Flexible Filter 4 Enable */ ++#define IXGBE_WUFC_FLX5 0x00200000 /* Flexible Filter 5 Enable */ ++#define IXGBE_WUFC_FLX_FILTERS 0x000F0000 /* Mask for 4 flex filters */ ++#define IXGBE_WUFC_FLX_FILTERS_6 0x003F0000 /* Mask for 6 flex filters */ ++#define IXGBE_WUFC_FLX_FILTERS_8 0x00FF0000 /* Mask for 8 flex filters */ ++#define IXGBE_WUFC_FW_RST_WK 0x80000000 /* Ena wake on FW reset assertion */ ++/* Mask for Ext. flex filters */ ++#define IXGBE_WUFC_EXT_FLX_FILTERS 0x00300000 ++#define IXGBE_WUFC_ALL_FILTERS 0x000F00FF /* Mask all 4 flex filters */ ++#define IXGBE_WUFC_ALL_FILTERS_6 0x003F00FF /* Mask all 6 flex filters */ ++#define IXGBE_WUFC_ALL_FILTERS_8 0x00FF00FF /* Mask all 8 flex filters */ ++#define IXGBE_WUFC_FLX_OFFSET 16 /* Offset to the Flexible Filters bits */ + + /* Wake Up Status */ +-#define IXGBE_WUS_LNKC IXGBE_WUFC_LNKC +-#define IXGBE_WUS_MAG IXGBE_WUFC_MAG +-#define IXGBE_WUS_EX IXGBE_WUFC_EX +-#define IXGBE_WUS_MC IXGBE_WUFC_MC +-#define IXGBE_WUS_BC IXGBE_WUFC_BC +-#define IXGBE_WUS_ARP IXGBE_WUFC_ARP +-#define IXGBE_WUS_IPV4 IXGBE_WUFC_IPV4 +-#define IXGBE_WUS_IPV6 IXGBE_WUFC_IPV6 +-#define IXGBE_WUS_MNG IXGBE_WUFC_MNG +-#define IXGBE_WUS_FLX0 IXGBE_WUFC_FLX0 +-#define IXGBE_WUS_FLX1 IXGBE_WUFC_FLX1 +-#define IXGBE_WUS_FLX2 IXGBE_WUFC_FLX2 +-#define IXGBE_WUS_FLX3 IXGBE_WUFC_FLX3 +-#define IXGBE_WUS_FLX4 IXGBE_WUFC_FLX4 +-#define IXGBE_WUS_FLX5 IXGBE_WUFC_FLX5 +-#define IXGBE_WUS_FLX_FILTERS IXGBE_WUFC_FLX_FILTERS +- +-/* Wake Up Packet Length */ +-#define IXGBE_WUPL_LENGTH_MASK 0xFFFF ++#define IXGBE_WUS_LNKC IXGBE_WUFC_LNKC ++#define IXGBE_WUS_MAG IXGBE_WUFC_MAG ++#define IXGBE_WUS_EX IXGBE_WUFC_EX ++#define IXGBE_WUS_MC IXGBE_WUFC_MC ++#define IXGBE_WUS_BC IXGBE_WUFC_BC ++#define IXGBE_WUS_ARP IXGBE_WUFC_ARP ++#define IXGBE_WUS_IPV4 IXGBE_WUFC_IPV4 ++#define IXGBE_WUS_IPV6 IXGBE_WUFC_IPV6 ++#define IXGBE_WUS_MNG IXGBE_WUFC_MNG ++#define IXGBE_WUS_FLX0 IXGBE_WUFC_FLX0 ++#define IXGBE_WUS_FLX1 IXGBE_WUFC_FLX1 ++#define IXGBE_WUS_FLX2 IXGBE_WUFC_FLX2 ++#define IXGBE_WUS_FLX3 IXGBE_WUFC_FLX3 ++#define IXGBE_WUS_FLX4 IXGBE_WUFC_FLX4 ++#define IXGBE_WUS_FLX5 IXGBE_WUFC_FLX5 ++#define IXGBE_WUS_FLX_FILTERS IXGBE_WUFC_FLX_FILTERS ++#define IXGBE_WUS_FW_RST_WK IXGBE_WUFC_FW_RST_WK ++/* Proxy Status */ ++#define IXGBE_PROXYS_EX 0x00000004 /* Exact packet received */ ++#define IXGBE_PROXYS_ARP_DIR 0x00000020 /* ARP w/filter match received */ ++#define IXGBE_PROXYS_NS 0x00000200 /* IPV6 NS received */ ++#define IXGBE_PROXYS_NS_DIR 0x00000400 /* IPV6 NS w/DA match received */ ++#define IXGBE_PROXYS_ARP 0x00000800 /* ARP request packet received */ ++#define IXGBE_PROXYS_MLD 0x00001000 /* IPv6 MLD packet received */ ++ ++/* Proxying Filter Control */ ++#define IXGBE_PROXYFC_ENABLE 0x00000001 /* Port Proxying Enable */ ++#define IXGBE_PROXYFC_EX 0x00000004 /* Directed Exact Proxy Enable */ ++#define IXGBE_PROXYFC_ARP_DIR 0x00000020 /* Directed ARP Proxy Enable */ ++#define IXGBE_PROXYFC_NS 0x00000200 /* IPv6 Neighbor Solicitation */ ++#define IXGBE_PROXYFC_ARP 0x00000800 /* ARP Request Proxy Enable */ ++#define IXGBE_PROXYFC_MLD 0x00000800 /* IPv6 MLD Proxy Enable */ ++#define IXGBE_PROXYFC_NO_TCO 0x00008000 /* Ignore TCO packets */ ++ ++#define IXGBE_WUPL_LENGTH_MASK 0xFFFF + + /* DCB registers */ +-#define MAX_TRAFFIC_CLASS 8 +-#define X540_TRAFFIC_CLASS 4 +-#define IXGBE_RMCS 0x03D00 +-#define IXGBE_DPMCS 0x07F40 +-#define IXGBE_PDPMCS 0x0CD00 +-#define IXGBE_RUPPBMR 0x050A0 +-#define IXGBE_RT2CR(_i) (0x03C20 + ((_i) * 4)) /* 8 of these (0-7) */ +-#define IXGBE_RT2SR(_i) (0x03C40 + ((_i) * 4)) /* 8 of these (0-7) */ +-#define IXGBE_TDTQ2TCCR(_i) (0x0602C + ((_i) * 0x40)) /* 8 of these (0-7) */ +-#define IXGBE_TDTQ2TCSR(_i) (0x0622C + ((_i) * 0x40)) /* 8 of these (0-7) */ +-#define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ +-#define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ ++#define IXGBE_DCB_MAX_TRAFFIC_CLASS 8 ++#define IXGBE_RMCS 0x03D00 ++#define IXGBE_DPMCS 0x07F40 ++#define IXGBE_PDPMCS 0x0CD00 ++#define IXGBE_RUPPBMR 0x050A0 ++#define IXGBE_RT2CR(_i) (0x03C20 + ((_i) * 4)) /* 8 of these (0-7) */ ++#define IXGBE_RT2SR(_i) (0x03C40 + ((_i) * 4)) /* 8 of these (0-7) */ ++#define IXGBE_TDTQ2TCCR(_i) (0x0602C + ((_i) * 0x40)) /* 8 of these (0-7) */ ++#define IXGBE_TDTQ2TCSR(_i) (0x0622C + ((_i) * 0x40)) /* 8 of these (0-7) */ ++#define IXGBE_TDPT2TCCR(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ ++#define IXGBE_TDPT2TCSR(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ ++ ++/* Power Management */ ++/* DMA Coalescing configuration */ ++struct ixgbe_dmac_config { ++ u16 watchdog_timer; /* usec units */ ++ bool fcoe_en; ++ u32 link_speed; ++ u8 fcoe_tc; ++ u8 num_tcs; ++}; + ++/* ++ * DMA Coalescing threshold Rx PB TC[n] value in Kilobyte by link speed. ++ * DMACRXT = 10Gbps = 10,000 bits / usec = 1250 bytes / usec 70 * 1250 == ++ * 87500 bytes [85KB] ++ */ ++#define IXGBE_DMACRXT_10G 0x55 ++#define IXGBE_DMACRXT_1G 0x09 ++#define IXGBE_DMACRXT_100M 0x01 ++ ++/* DMA Coalescing registers */ ++#define IXGBE_DMCMNGTH 0x15F20 /* Management Threshold */ ++#define IXGBE_DMACR 0x02400 /* Control register */ ++#define IXGBE_DMCTH(_i) (0x03300 + ((_i) * 4)) /* 8 of these */ ++#define IXGBE_DMCTLX 0x02404 /* Time to Lx request */ ++/* DMA Coalescing register fields */ ++#define IXGBE_DMCMNGTH_DMCMNGTH_MASK 0x000FFFF0 /* Mng Threshold mask */ ++#define IXGBE_DMCMNGTH_DMCMNGTH_SHIFT 4 /* Management Threshold shift */ ++#define IXGBE_DMACR_DMACWT_MASK 0x0000FFFF /* Watchdog Timer mask */ ++#define IXGBE_DMACR_HIGH_PRI_TC_MASK 0x00FF0000 ++#define IXGBE_DMACR_HIGH_PRI_TC_SHIFT 16 ++#define IXGBE_DMACR_EN_MNG_IND 0x10000000 /* Enable Mng Indications */ ++#define IXGBE_DMACR_LX_COAL_IND 0x40000000 /* Lx Coalescing indicate */ ++#define IXGBE_DMACR_DMAC_EN 0x80000000 /* DMA Coalescing Enable */ ++#define IXGBE_DMCTH_DMACRXT_MASK 0x000001FF /* Receive Threshold mask */ ++#define IXGBE_DMCTLX_TTLX_MASK 0x00000FFF /* Time to Lx request mask */ ++ ++/* EEE registers */ ++#define IXGBE_EEER 0x043A0 /* EEE register */ ++#define IXGBE_EEE_STAT 0x04398 /* EEE Status */ ++#define IXGBE_EEE_SU 0x04380 /* EEE Set up */ ++#define IXGBE_EEE_SU_TEEE_DLY_SHIFT 26 ++#define IXGBE_TLPIC 0x041F4 /* EEE Tx LPI count */ ++#define IXGBE_RLPIC 0x041F8 /* EEE Rx LPI count */ ++ ++/* EEE register fields */ ++#define IXGBE_EEER_TX_LPI_EN 0x00010000 /* Enable EEE LPI TX path */ ++#define IXGBE_EEER_RX_LPI_EN 0x00020000 /* Enable EEE LPI RX path */ ++#define IXGBE_EEE_STAT_NEG 0x20000000 /* EEE support neg on link */ ++#define IXGBE_EEE_RX_LPI_STATUS 0x40000000 /* RX Link in LPI status */ ++#define IXGBE_EEE_TX_LPI_STATUS 0x80000000 /* TX Link in LPI status */ + + /* Security Control Registers */ +-#define IXGBE_SECTXCTRL 0x08800 +-#define IXGBE_SECTXSTAT 0x08804 +-#define IXGBE_SECTXBUFFAF 0x08808 +-#define IXGBE_SECTXMINIFG 0x08810 +-#define IXGBE_SECRXCTRL 0x08D00 +-#define IXGBE_SECRXSTAT 0x08D04 ++#define IXGBE_SECTXCTRL 0x08800 ++#define IXGBE_SECTXSTAT 0x08804 ++#define IXGBE_SECTXBUFFAF 0x08808 ++#define IXGBE_SECTXMINIFG 0x08810 ++#define IXGBE_SECRXCTRL 0x08D00 ++#define IXGBE_SECRXSTAT 0x08D04 + + /* Security Bit Fields and Masks */ +-#define IXGBE_SECTXCTRL_SECTX_DIS 0x00000001 +-#define IXGBE_SECTXCTRL_TX_DIS 0x00000002 +-#define IXGBE_SECTXCTRL_STORE_FORWARD 0x00000004 ++#define IXGBE_SECTXCTRL_SECTX_DIS 0x00000001 ++#define IXGBE_SECTXCTRL_TX_DIS 0x00000002 ++#define IXGBE_SECTXCTRL_STORE_FORWARD 0x00000004 + +-#define IXGBE_SECTXSTAT_SECTX_RDY 0x00000001 +-#define IXGBE_SECTXSTAT_ECC_TXERR 0x00000002 ++#define IXGBE_SECTXSTAT_SECTX_RDY 0x00000001 ++#define IXGBE_SECTXSTAT_ECC_TXERR 0x00000002 + +-#define IXGBE_SECRXCTRL_SECRX_DIS 0x00000001 +-#define IXGBE_SECRXCTRL_RX_DIS 0x00000002 ++#define IXGBE_SECRXCTRL_SECRX_DIS 0x00000001 ++#define IXGBE_SECRXCTRL_RX_DIS 0x00000002 + +-#define IXGBE_SECRXSTAT_SECRX_RDY 0x00000001 +-#define IXGBE_SECRXSTAT_ECC_RXERR 0x00000002 ++#define IXGBE_SECRXSTAT_SECRX_RDY 0x00000001 ++#define IXGBE_SECRXSTAT_ECC_RXERR 0x00000002 + + /* LinkSec (MacSec) Registers */ +-#define IXGBE_LSECTXCAP 0x08A00 +-#define IXGBE_LSECRXCAP 0x08F00 +-#define IXGBE_LSECTXCTRL 0x08A04 +-#define IXGBE_LSECTXSCL 0x08A08 /* SCI Low */ +-#define IXGBE_LSECTXSCH 0x08A0C /* SCI High */ +-#define IXGBE_LSECTXSA 0x08A10 +-#define IXGBE_LSECTXPN0 0x08A14 +-#define IXGBE_LSECTXPN1 0x08A18 +-#define IXGBE_LSECTXKEY0(_n) (0x08A1C + (4 * (_n))) /* 4 of these (0-3) */ +-#define IXGBE_LSECTXKEY1(_n) (0x08A2C + (4 * (_n))) /* 4 of these (0-3) */ +-#define IXGBE_LSECRXCTRL 0x08F04 +-#define IXGBE_LSECRXSCL 0x08F08 +-#define IXGBE_LSECRXSCH 0x08F0C +-#define IXGBE_LSECRXSA(_i) (0x08F10 + (4 * (_i))) /* 2 of these (0-1) */ +-#define IXGBE_LSECRXPN(_i) (0x08F18 + (4 * (_i))) /* 2 of these (0-1) */ +-#define IXGBE_LSECRXKEY(_n, _m) (0x08F20 + ((0x10 * (_n)) + (4 * (_m)))) +-#define IXGBE_LSECTXUT 0x08A3C /* OutPktsUntagged */ +-#define IXGBE_LSECTXPKTE 0x08A40 /* OutPktsEncrypted */ +-#define IXGBE_LSECTXPKTP 0x08A44 /* OutPktsProtected */ +-#define IXGBE_LSECTXOCTE 0x08A48 /* OutOctetsEncrypted */ +-#define IXGBE_LSECTXOCTP 0x08A4C /* OutOctetsProtected */ +-#define IXGBE_LSECRXUT 0x08F40 /* InPktsUntagged/InPktsNoTag */ +-#define IXGBE_LSECRXOCTD 0x08F44 /* InOctetsDecrypted */ +-#define IXGBE_LSECRXOCTV 0x08F48 /* InOctetsValidated */ +-#define IXGBE_LSECRXBAD 0x08F4C /* InPktsBadTag */ +-#define IXGBE_LSECRXNOSCI 0x08F50 /* InPktsNoSci */ +-#define IXGBE_LSECRXUNSCI 0x08F54 /* InPktsUnknownSci */ +-#define IXGBE_LSECRXUNCH 0x08F58 /* InPktsUnchecked */ +-#define IXGBE_LSECRXDELAY 0x08F5C /* InPktsDelayed */ +-#define IXGBE_LSECRXLATE 0x08F60 /* InPktsLate */ +-#define IXGBE_LSECRXOK(_n) (0x08F64 + (0x04 * (_n))) /* InPktsOk */ +-#define IXGBE_LSECRXINV(_n) (0x08F6C + (0x04 * (_n))) /* InPktsInvalid */ +-#define IXGBE_LSECRXNV(_n) (0x08F74 + (0x04 * (_n))) /* InPktsNotValid */ +-#define IXGBE_LSECRXUNSA 0x08F7C /* InPktsUnusedSa */ +-#define IXGBE_LSECRXNUSA 0x08F80 /* InPktsNotUsingSa */ ++#define IXGBE_LSECTXCAP 0x08A00 ++#define IXGBE_LSECRXCAP 0x08F00 ++#define IXGBE_LSECTXCTRL 0x08A04 ++#define IXGBE_LSECTXSCL 0x08A08 /* SCI Low */ ++#define IXGBE_LSECTXSCH 0x08A0C /* SCI High */ ++#define IXGBE_LSECTXSA 0x08A10 ++#define IXGBE_LSECTXPN0 0x08A14 ++#define IXGBE_LSECTXPN1 0x08A18 ++#define IXGBE_LSECTXKEY0(_n) (0x08A1C + (4 * (_n))) /* 4 of these (0-3) */ ++#define IXGBE_LSECTXKEY1(_n) (0x08A2C + (4 * (_n))) /* 4 of these (0-3) */ ++#define IXGBE_LSECRXCTRL 0x08F04 ++#define IXGBE_LSECRXSCL 0x08F08 ++#define IXGBE_LSECRXSCH 0x08F0C ++#define IXGBE_LSECRXSA(_i) (0x08F10 + (4 * (_i))) /* 2 of these (0-1) */ ++#define IXGBE_LSECRXPN(_i) (0x08F18 + (4 * (_i))) /* 2 of these (0-1) */ ++#define IXGBE_LSECRXKEY(_n, _m) (0x08F20 + ((0x10 * (_n)) + (4 * (_m)))) ++#define IXGBE_LSECTXUT 0x08A3C /* OutPktsUntagged */ ++#define IXGBE_LSECTXPKTE 0x08A40 /* OutPktsEncrypted */ ++#define IXGBE_LSECTXPKTP 0x08A44 /* OutPktsProtected */ ++#define IXGBE_LSECTXOCTE 0x08A48 /* OutOctetsEncrypted */ ++#define IXGBE_LSECTXOCTP 0x08A4C /* OutOctetsProtected */ ++#define IXGBE_LSECRXUT 0x08F40 /* InPktsUntagged/InPktsNoTag */ ++#define IXGBE_LSECRXOCTD 0x08F44 /* InOctetsDecrypted */ ++#define IXGBE_LSECRXOCTV 0x08F48 /* InOctetsValidated */ ++#define IXGBE_LSECRXBAD 0x08F4C /* InPktsBadTag */ ++#define IXGBE_LSECRXNOSCI 0x08F50 /* InPktsNoSci */ ++#define IXGBE_LSECRXUNSCI 0x08F54 /* InPktsUnknownSci */ ++#define IXGBE_LSECRXUNCH 0x08F58 /* InPktsUnchecked */ ++#define IXGBE_LSECRXDELAY 0x08F5C /* InPktsDelayed */ ++#define IXGBE_LSECRXLATE 0x08F60 /* InPktsLate */ ++#define IXGBE_LSECRXOK(_n) (0x08F64 + (0x04 * (_n))) /* InPktsOk */ ++#define IXGBE_LSECRXINV(_n) (0x08F6C + (0x04 * (_n))) /* InPktsInvalid */ ++#define IXGBE_LSECRXNV(_n) (0x08F74 + (0x04 * (_n))) /* InPktsNotValid */ ++#define IXGBE_LSECRXUNSA 0x08F7C /* InPktsUnusedSa */ ++#define IXGBE_LSECRXNUSA 0x08F80 /* InPktsNotUsingSa */ + + /* LinkSec (MacSec) Bit Fields and Masks */ +-#define IXGBE_LSECTXCAP_SUM_MASK 0x00FF0000 +-#define IXGBE_LSECTXCAP_SUM_SHIFT 16 +-#define IXGBE_LSECRXCAP_SUM_MASK 0x00FF0000 +-#define IXGBE_LSECRXCAP_SUM_SHIFT 16 +- +-#define IXGBE_LSECTXCTRL_EN_MASK 0x00000003 +-#define IXGBE_LSECTXCTRL_DISABLE 0x0 +-#define IXGBE_LSECTXCTRL_AUTH 0x1 +-#define IXGBE_LSECTXCTRL_AUTH_ENCRYPT 0x2 +-#define IXGBE_LSECTXCTRL_AISCI 0x00000020 +-#define IXGBE_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00 +-#define IXGBE_LSECTXCTRL_RSV_MASK 0x000000D8 +- +-#define IXGBE_LSECRXCTRL_EN_MASK 0x0000000C +-#define IXGBE_LSECRXCTRL_EN_SHIFT 2 +-#define IXGBE_LSECRXCTRL_DISABLE 0x0 +-#define IXGBE_LSECRXCTRL_CHECK 0x1 +-#define IXGBE_LSECRXCTRL_STRICT 0x2 +-#define IXGBE_LSECRXCTRL_DROP 0x3 +-#define IXGBE_LSECRXCTRL_PLSH 0x00000040 +-#define IXGBE_LSECRXCTRL_RP 0x00000080 +-#define IXGBE_LSECRXCTRL_RSV_MASK 0xFFFFFF33 ++#define IXGBE_LSECTXCAP_SUM_MASK 0x00FF0000 ++#define IXGBE_LSECTXCAP_SUM_SHIFT 16 ++#define IXGBE_LSECRXCAP_SUM_MASK 0x00FF0000 ++#define IXGBE_LSECRXCAP_SUM_SHIFT 16 ++ ++#define IXGBE_LSECTXCTRL_EN_MASK 0x00000003 ++#define IXGBE_LSECTXCTRL_DISABLE 0x0 ++#define IXGBE_LSECTXCTRL_AUTH 0x1 ++#define IXGBE_LSECTXCTRL_AUTH_ENCRYPT 0x2 ++#define IXGBE_LSECTXCTRL_AISCI 0x00000020 ++#define IXGBE_LSECTXCTRL_PNTHRSH_MASK 0xFFFFFF00 ++#define IXGBE_LSECTXCTRL_RSV_MASK 0x000000D8 ++ ++#define IXGBE_LSECRXCTRL_EN_MASK 0x0000000C ++#define IXGBE_LSECRXCTRL_EN_SHIFT 2 ++#define IXGBE_LSECRXCTRL_DISABLE 0x0 ++#define IXGBE_LSECRXCTRL_CHECK 0x1 ++#define IXGBE_LSECRXCTRL_STRICT 0x2 ++#define IXGBE_LSECRXCTRL_DROP 0x3 ++#define IXGBE_LSECRXCTRL_PLSH 0x00000040 ++#define IXGBE_LSECRXCTRL_RP 0x00000080 ++#define IXGBE_LSECRXCTRL_RSV_MASK 0xFFFFFF33 + + /* IpSec Registers */ +-#define IXGBE_IPSTXIDX 0x08900 +-#define IXGBE_IPSTXSALT 0x08904 +-#define IXGBE_IPSTXKEY(_i) (0x08908 + (4 * (_i))) /* 4 of these (0-3) */ +-#define IXGBE_IPSRXIDX 0x08E00 +-#define IXGBE_IPSRXIPADDR(_i) (0x08E04 + (4 * (_i))) /* 4 of these (0-3) */ +-#define IXGBE_IPSRXSPI 0x08E14 +-#define IXGBE_IPSRXIPIDX 0x08E18 +-#define IXGBE_IPSRXKEY(_i) (0x08E1C + (4 * (_i))) /* 4 of these (0-3) */ +-#define IXGBE_IPSRXSALT 0x08E2C +-#define IXGBE_IPSRXMOD 0x08E30 +- +-#define IXGBE_SECTXCTRL_STORE_FORWARD_ENABLE 0x4 ++#define IXGBE_IPSTXIDX 0x08900 ++#define IXGBE_IPSTXSALT 0x08904 ++#define IXGBE_IPSTXKEY(_i) (0x08908 + (4 * (_i))) /* 4 of these (0-3) */ ++#define IXGBE_IPSRXIDX 0x08E00 ++#define IXGBE_IPSRXIPADDR(_i) (0x08E04 + (4 * (_i))) /* 4 of these (0-3) */ ++#define IXGBE_IPSRXSPI 0x08E14 ++#define IXGBE_IPSRXIPIDX 0x08E18 ++#define IXGBE_IPSRXKEY(_i) (0x08E1C + (4 * (_i))) /* 4 of these (0-3) */ ++#define IXGBE_IPSRXSALT 0x08E2C ++#define IXGBE_IPSRXMOD 0x08E30 ++ ++#define IXGBE_SECTXCTRL_STORE_FORWARD_ENABLE 0x4 + + /* DCB registers */ +-#define IXGBE_RTRPCS 0x02430 +-#define IXGBE_RTTDCS 0x04900 +-#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */ +-#define IXGBE_RTTPCS 0x0CD00 +-#define IXGBE_RTRUP2TC 0x03020 +-#define IXGBE_RTTUP2TC 0x0C800 +-#define IXGBE_RTRPT4C(_i) (0x02140 + ((_i) * 4)) /* 8 of these (0-7) */ +-#define IXGBE_TXLLQ(_i) (0x082E0 + ((_i) * 4)) /* 4 of these (0-3) */ +-#define IXGBE_RTRPT4S(_i) (0x02160 + ((_i) * 4)) /* 8 of these (0-7) */ +-#define IXGBE_RTTDT2C(_i) (0x04910 + ((_i) * 4)) /* 8 of these (0-7) */ +-#define IXGBE_RTTDT2S(_i) (0x04930 + ((_i) * 4)) /* 8 of these (0-7) */ +-#define IXGBE_RTTPT2C(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ +-#define IXGBE_RTTPT2S(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ +-#define IXGBE_RTTDQSEL 0x04904 +-#define IXGBE_RTTDT1C 0x04908 +-#define IXGBE_RTTDT1S 0x0490C +-#define IXGBE_RTTQCNCR 0x08B00 +-#define IXGBE_RTTQCNTG 0x04A90 +-#define IXGBE_RTTBCNRD 0x0498C +-#define IXGBE_RTTQCNRR 0x0498C +-#define IXGBE_RTTDTECC 0x04990 +-#define IXGBE_RTTDTECC_NO_BCN 0x00000100 +-#define IXGBE_RTTBCNRC 0x04984 +-#define IXGBE_RTTBCNRC_RS_ENA 0x80000000 ++#define IXGBE_RTRPCS 0x02430 ++#define IXGBE_RTTDCS 0x04900 ++#define IXGBE_RTTDCS_ARBDIS 0x00000040 /* DCB arbiter disable */ ++#define IXGBE_RTTPCS 0x0CD00 ++#define IXGBE_RTRUP2TC 0x03020 ++#define IXGBE_RTTUP2TC 0x0C800 ++#define IXGBE_RTRPT4C(_i) (0x02140 + ((_i) * 4)) /* 8 of these (0-7) */ ++#define IXGBE_TXLLQ(_i) (0x082E0 + ((_i) * 4)) /* 4 of these (0-3) */ ++#define IXGBE_RTRPT4S(_i) (0x02160 + ((_i) * 4)) /* 8 of these (0-7) */ ++#define IXGBE_RTTDT2C(_i) (0x04910 + ((_i) * 4)) /* 8 of these (0-7) */ ++#define IXGBE_RTTDT2S(_i) (0x04930 + ((_i) * 4)) /* 8 of these (0-7) */ ++#define IXGBE_RTTPT2C(_i) (0x0CD20 + ((_i) * 4)) /* 8 of these (0-7) */ ++#define IXGBE_RTTPT2S(_i) (0x0CD40 + ((_i) * 4)) /* 8 of these (0-7) */ ++#define IXGBE_RTTDQSEL 0x04904 ++#define IXGBE_RTTDT1C 0x04908 ++#define IXGBE_RTTDT1S 0x0490C ++#define IXGBE_RTTDTECC 0x04990 ++#define IXGBE_RTTDTECC_NO_BCN 0x00000100 ++ ++#define IXGBE_RTTBCNRC 0x04984 ++#define IXGBE_RTTBCNRC_RS_ENA 0x80000000 + #define IXGBE_RTTBCNRC_RF_DEC_MASK 0x00003FFF + #define IXGBE_RTTBCNRC_RF_INT_SHIFT 14 +-#define IXGBE_RTTBCNRC_RF_INT_MASK \ ++#define IXGBE_RTTBCNRC_RF_INT_MASK \ + (IXGBE_RTTBCNRC_RF_DEC_MASK << IXGBE_RTTBCNRC_RF_INT_SHIFT) +-#define IXGBE_RTTBCNRM 0x04980 +-#define IXGBE_RTTQCNRM 0x04980 ++#define IXGBE_RTTBCNRM 0x04980 + + /* FCoE DMA Context Registers */ +-#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */ +-#define IXGBE_FCPTRH 0x02414 /* FC USer Desc. PTR High */ +-#define IXGBE_FCBUFF 0x02418 /* FC Buffer Control */ +-#define IXGBE_FCDMARW 0x02420 /* FC Receive DMA RW */ +-#define IXGBE_FCINVST0 0x03FC0 /* FC Invalid DMA Context Status Reg 0 */ +-#define IXGBE_FCINVST(_i) (IXGBE_FCINVST0 + ((_i) * 4)) +-#define IXGBE_FCBUFF_VALID (1 << 0) /* DMA Context Valid */ +-#define IXGBE_FCBUFF_BUFFSIZE (3 << 3) /* User Buffer Size */ +-#define IXGBE_FCBUFF_WRCONTX (1 << 7) /* 0: Initiator, 1: Target */ +-#define IXGBE_FCBUFF_BUFFCNT 0x0000ff00 /* Number of User Buffers */ +-#define IXGBE_FCBUFF_OFFSET 0xffff0000 /* User Buffer Offset */ +-#define IXGBE_FCBUFF_BUFFSIZE_SHIFT 3 +-#define IXGBE_FCBUFF_BUFFCNT_SHIFT 8 +-#define IXGBE_FCBUFF_OFFSET_SHIFT 16 +-#define IXGBE_FCDMARW_WE (1 << 14) /* Write enable */ +-#define IXGBE_FCDMARW_RE (1 << 15) /* Read enable */ +-#define IXGBE_FCDMARW_FCOESEL 0x000001ff /* FC X_ID: 11 bits */ +-#define IXGBE_FCDMARW_LASTSIZE 0xffff0000 /* Last User Buffer Size */ +-#define IXGBE_FCDMARW_LASTSIZE_SHIFT 16 +- ++/* FCoE Direct DMA Context */ ++#define IXGBE_FCDDC(_i, _j) (0x20000 + ((_i) * 0x4) + ((_j) * 0x10)) ++#define IXGBE_FCPTRL 0x02410 /* FC User Desc. PTR Low */ ++#define IXGBE_FCPTRH 0x02414 /* FC USer Desc. PTR High */ ++#define IXGBE_FCBUFF 0x02418 /* FC Buffer Control */ ++#define IXGBE_FCDMARW 0x02420 /* FC Receive DMA RW */ ++#define IXGBE_FCBUFF_VALID (1 << 0) /* DMA Context Valid */ ++#define IXGBE_FCBUFF_BUFFSIZE (3 << 3) /* User Buffer Size */ ++#define IXGBE_FCBUFF_WRCONTX (1 << 7) /* 0: Initiator, 1: Target */ ++#define IXGBE_FCBUFF_BUFFCNT 0x0000ff00 /* Number of User Buffers */ ++#define IXGBE_FCBUFF_OFFSET 0xffff0000 /* User Buffer Offset */ ++#define IXGBE_FCBUFF_BUFFSIZE_SHIFT 3 ++#define IXGBE_FCBUFF_BUFFCNT_SHIFT 8 ++#define IXGBE_FCBUFF_OFFSET_SHIFT 16 ++#define IXGBE_FCDMARW_WE (1 << 14) /* Write enable */ ++#define IXGBE_FCDMARW_RE (1 << 15) /* Read enable */ ++#define IXGBE_FCDMARW_FCOESEL 0x000001ff /* FC X_ID: 11 bits */ ++#define IXGBE_FCDMARW_LASTSIZE 0xffff0000 /* Last User Buffer Size */ ++#define IXGBE_FCDMARW_LASTSIZE_SHIFT 16 + /* FCoE SOF/EOF */ +-#define IXGBE_TEOFF 0x04A94 /* Tx FC EOF */ +-#define IXGBE_TSOFF 0x04A98 /* Tx FC SOF */ +-#define IXGBE_REOFF 0x05158 /* Rx FC EOF */ +-#define IXGBE_RSOFF 0x051F8 /* Rx FC SOF */ ++#define IXGBE_TEOFF 0x04A94 /* Tx FC EOF */ ++#define IXGBE_TSOFF 0x04A98 /* Tx FC SOF */ ++#define IXGBE_REOFF 0x05158 /* Rx FC EOF */ ++#define IXGBE_RSOFF 0x051F8 /* Rx FC SOF */ + /* FCoE Filter Context Registers */ +-#define IXGBE_FCFLT 0x05108 /* FC FLT Context */ +-#define IXGBE_FCFLTRW 0x05110 /* FC Filter RW Control */ +-#define IXGBE_FCPARAM 0x051d8 /* FC Offset Parameter */ +-#define IXGBE_FCFLT_VALID (1 << 0) /* Filter Context Valid */ +-#define IXGBE_FCFLT_FIRST (1 << 1) /* Filter First */ +-#define IXGBE_FCFLT_SEQID 0x00ff0000 /* Sequence ID */ +-#define IXGBE_FCFLT_SEQCNT 0xff000000 /* Sequence Count */ +-#define IXGBE_FCFLTRW_RVALDT (1 << 13) /* Fast Re-Validation */ +-#define IXGBE_FCFLTRW_WE (1 << 14) /* Write Enable */ +-#define IXGBE_FCFLTRW_RE (1 << 15) /* Read Enable */ ++#define IXGBE_FCD_ID 0x05114 /* FCoE D_ID */ ++#define IXGBE_FCSMAC 0x0510C /* FCoE Source MAC */ ++#define IXGBE_FCFLTRW_SMAC_HIGH_SHIFT 16 ++/* FCoE Direct Filter Context */ ++#define IXGBE_FCDFC(_i, _j) (0x28000 + ((_i) * 0x4) + ((_j) * 0x10)) ++#define IXGBE_FCDFCD(_i) (0x30000 + ((_i) * 0x4)) ++#define IXGBE_FCFLT 0x05108 /* FC FLT Context */ ++#define IXGBE_FCFLTRW 0x05110 /* FC Filter RW Control */ ++#define IXGBE_FCPARAM 0x051d8 /* FC Offset Parameter */ ++#define IXGBE_FCFLT_VALID (1 << 0) /* Filter Context Valid */ ++#define IXGBE_FCFLT_FIRST (1 << 1) /* Filter First */ ++#define IXGBE_FCFLT_SEQID 0x00ff0000 /* Sequence ID */ ++#define IXGBE_FCFLT_SEQCNT 0xff000000 /* Sequence Count */ ++#define IXGBE_FCFLTRW_RVALDT (1 << 13) /* Fast Re-Validation */ ++#define IXGBE_FCFLTRW_WE (1 << 14) /* Write Enable */ ++#define IXGBE_FCFLTRW_RE (1 << 15) /* Read Enable */ + /* FCoE Receive Control */ +-#define IXGBE_FCRXCTRL 0x05100 /* FC Receive Control */ +-#define IXGBE_FCRXCTRL_FCOELLI (1 << 0) /* Low latency interrupt */ +-#define IXGBE_FCRXCTRL_SAVBAD (1 << 1) /* Save Bad Frames */ +-#define IXGBE_FCRXCTRL_FRSTRDH (1 << 2) /* EN 1st Read Header */ +-#define IXGBE_FCRXCTRL_LASTSEQH (1 << 3) /* EN Last Header in Seq */ +-#define IXGBE_FCRXCTRL_ALLH (1 << 4) /* EN All Headers */ +-#define IXGBE_FCRXCTRL_FRSTSEQH (1 << 5) /* EN 1st Seq. Header */ +-#define IXGBE_FCRXCTRL_ICRC (1 << 6) /* Ignore Bad FC CRC */ +-#define IXGBE_FCRXCTRL_FCCRCBO (1 << 7) /* FC CRC Byte Ordering */ +-#define IXGBE_FCRXCTRL_FCOEVER 0x00000f00 /* FCoE Version: 4 bits */ +-#define IXGBE_FCRXCTRL_FCOEVER_SHIFT 8 ++#define IXGBE_FCRXCTRL 0x05100 /* FC Receive Control */ ++#define IXGBE_FCRXCTRL_FCOELLI (1 << 0) /* Low latency interrupt */ ++#define IXGBE_FCRXCTRL_SAVBAD (1 << 1) /* Save Bad Frames */ ++#define IXGBE_FCRXCTRL_FRSTRDH (1 << 2) /* EN 1st Read Header */ ++#define IXGBE_FCRXCTRL_LASTSEQH (1 << 3) /* EN Last Header in Seq */ ++#define IXGBE_FCRXCTRL_ALLH (1 << 4) /* EN All Headers */ ++#define IXGBE_FCRXCTRL_FRSTSEQH (1 << 5) /* EN 1st Seq. Header */ ++#define IXGBE_FCRXCTRL_ICRC (1 << 6) /* Ignore Bad FC CRC */ ++#define IXGBE_FCRXCTRL_FCCRCBO (1 << 7) /* FC CRC Byte Ordering */ ++#define IXGBE_FCRXCTRL_FCOEVER 0x00000f00 /* FCoE Version: 4 bits */ ++#define IXGBE_FCRXCTRL_FCOEVER_SHIFT 8 + /* FCoE Redirection */ +-#define IXGBE_FCRECTL 0x0ED00 /* FC Redirection Control */ +-#define IXGBE_FCRETA0 0x0ED10 /* FC Redirection Table 0 */ +-#define IXGBE_FCRETA(_i) (IXGBE_FCRETA0 + ((_i) * 4)) /* FCoE Redir */ +-#define IXGBE_FCRECTL_ENA 0x1 /* FCoE Redir Table Enable */ +-#define IXGBE_FCRETA_SIZE 8 /* Max entries in FCRETA */ +-#define IXGBE_FCRETA_ENTRY_MASK 0x0000007f /* 7 bits for the queue index */ ++#define IXGBE_FCRECTL 0x0ED00 /* FC Redirection Control */ ++#define IXGBE_FCRETA0 0x0ED10 /* FC Redirection Table 0 */ ++#define IXGBE_FCRETA(_i) (IXGBE_FCRETA0 + ((_i) * 4)) /* FCoE Redir */ ++#define IXGBE_FCRECTL_ENA 0x1 /* FCoE Redir Table Enable */ ++#define IXGBE_FCRETASEL_ENA 0x2 /* FCoE FCRETASEL bit */ ++#define IXGBE_FCRETA_SIZE 8 /* Max entries in FCRETA */ ++#define IXGBE_FCRETA_ENTRY_MASK 0x0000007f /* 7 bits for the queue index */ ++#define IXGBE_FCRETA_SIZE_X550 32 /* Max entries in FCRETA */ ++/* Higher 7 bits for the queue index */ ++#define IXGBE_FCRETA_ENTRY_HIGH_MASK 0x007F0000 ++#define IXGBE_FCRETA_ENTRY_HIGH_SHIFT 16 + + /* Stats registers */ +-#define IXGBE_CRCERRS 0x04000 +-#define IXGBE_ILLERRC 0x04004 +-#define IXGBE_ERRBC 0x04008 +-#define IXGBE_MSPDC 0x04010 +-#define IXGBE_MPC(_i) (0x03FA0 + ((_i) * 4)) /* 8 of these 3FA0-3FBC*/ +-#define IXGBE_MLFC 0x04034 +-#define IXGBE_MRFC 0x04038 +-#define IXGBE_RLEC 0x04040 +-#define IXGBE_LXONTXC 0x03F60 +-#define IXGBE_LXONRXC 0x0CF60 +-#define IXGBE_LXOFFTXC 0x03F68 +-#define IXGBE_LXOFFRXC 0x0CF68 +-#define IXGBE_LXONRXCNT 0x041A4 +-#define IXGBE_LXOFFRXCNT 0x041A8 +-#define IXGBE_PXONRXCNT(_i) (0x04140 + ((_i) * 4)) /* 8 of these */ +-#define IXGBE_PXOFFRXCNT(_i) (0x04160 + ((_i) * 4)) /* 8 of these */ +-#define IXGBE_PXON2OFFCNT(_i) (0x03240 + ((_i) * 4)) /* 8 of these */ +-#define IXGBE_PXONTXC(_i) (0x03F00 + ((_i) * 4)) /* 8 of these 3F00-3F1C*/ +-#define IXGBE_PXONRXC(_i) (0x0CF00 + ((_i) * 4)) /* 8 of these CF00-CF1C*/ +-#define IXGBE_PXOFFTXC(_i) (0x03F20 + ((_i) * 4)) /* 8 of these 3F20-3F3C*/ +-#define IXGBE_PXOFFRXC(_i) (0x0CF20 + ((_i) * 4)) /* 8 of these CF20-CF3C*/ +-#define IXGBE_PRC64 0x0405C +-#define IXGBE_PRC127 0x04060 +-#define IXGBE_PRC255 0x04064 +-#define IXGBE_PRC511 0x04068 +-#define IXGBE_PRC1023 0x0406C +-#define IXGBE_PRC1522 0x04070 +-#define IXGBE_GPRC 0x04074 +-#define IXGBE_BPRC 0x04078 +-#define IXGBE_MPRC 0x0407C +-#define IXGBE_GPTC 0x04080 +-#define IXGBE_GORCL 0x04088 +-#define IXGBE_GORCH 0x0408C +-#define IXGBE_GOTCL 0x04090 +-#define IXGBE_GOTCH 0x04094 +-#define IXGBE_RNBC(_i) (0x03FC0 + ((_i) * 4)) /* 8 of these 3FC0-3FDC*/ +-#define IXGBE_RUC 0x040A4 +-#define IXGBE_RFC 0x040A8 +-#define IXGBE_ROC 0x040AC +-#define IXGBE_RJC 0x040B0 +-#define IXGBE_MNGPRC 0x040B4 +-#define IXGBE_MNGPDC 0x040B8 +-#define IXGBE_MNGPTC 0x0CF90 +-#define IXGBE_TORL 0x040C0 +-#define IXGBE_TORH 0x040C4 +-#define IXGBE_TPR 0x040D0 +-#define IXGBE_TPT 0x040D4 +-#define IXGBE_PTC64 0x040D8 +-#define IXGBE_PTC127 0x040DC +-#define IXGBE_PTC255 0x040E0 +-#define IXGBE_PTC511 0x040E4 +-#define IXGBE_PTC1023 0x040E8 +-#define IXGBE_PTC1522 0x040EC +-#define IXGBE_MPTC 0x040F0 +-#define IXGBE_BPTC 0x040F4 +-#define IXGBE_XEC 0x04120 +-#define IXGBE_SSVPC 0x08780 +- +-#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4)) +-#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : \ ++#define IXGBE_CRCERRS 0x04000 ++#define IXGBE_ILLERRC 0x04004 ++#define IXGBE_ERRBC 0x04008 ++#define IXGBE_MSPDC 0x04010 ++#define IXGBE_MPC(_i) (0x03FA0 + ((_i) * 4)) /* 8 of these 3FA0-3FBC*/ ++#define IXGBE_MLFC 0x04034 ++#define IXGBE_MRFC 0x04038 ++#define IXGBE_RLEC 0x04040 ++#define IXGBE_LXONTXC 0x03F60 ++#define IXGBE_LXONRXC 0x0CF60 ++#define IXGBE_LXOFFTXC 0x03F68 ++#define IXGBE_LXOFFRXC 0x0CF68 ++#define IXGBE_LXONRXCNT 0x041A4 ++#define IXGBE_LXOFFRXCNT 0x041A8 ++#define IXGBE_PXONRXCNT(_i) (0x04140 + ((_i) * 4)) /* 8 of these */ ++#define IXGBE_PXOFFRXCNT(_i) (0x04160 + ((_i) * 4)) /* 8 of these */ ++#define IXGBE_PXON2OFFCNT(_i) (0x03240 + ((_i) * 4)) /* 8 of these */ ++#define IXGBE_PXONTXC(_i) (0x03F00 + ((_i) * 4)) /* 8 of these 3F00-3F1C*/ ++#define IXGBE_PXONRXC(_i) (0x0CF00 + ((_i) * 4)) /* 8 of these CF00-CF1C*/ ++#define IXGBE_PXOFFTXC(_i) (0x03F20 + ((_i) * 4)) /* 8 of these 3F20-3F3C*/ ++#define IXGBE_PXOFFRXC(_i) (0x0CF20 + ((_i) * 4)) /* 8 of these CF20-CF3C*/ ++#define IXGBE_PRC64 0x0405C ++#define IXGBE_PRC127 0x04060 ++#define IXGBE_PRC255 0x04064 ++#define IXGBE_PRC511 0x04068 ++#define IXGBE_PRC1023 0x0406C ++#define IXGBE_PRC1522 0x04070 ++#define IXGBE_GPRC 0x04074 ++#define IXGBE_BPRC 0x04078 ++#define IXGBE_MPRC 0x0407C ++#define IXGBE_GPTC 0x04080 ++#define IXGBE_GORCL 0x04088 ++#define IXGBE_GORCH 0x0408C ++#define IXGBE_GOTCL 0x04090 ++#define IXGBE_GOTCH 0x04094 ++#define IXGBE_RNBC(_i) (0x03FC0 + ((_i) * 4)) /* 8 of these 3FC0-3FDC*/ ++#define IXGBE_RUC 0x040A4 ++#define IXGBE_RFC 0x040A8 ++#define IXGBE_ROC 0x040AC ++#define IXGBE_RJC 0x040B0 ++#define IXGBE_MNGPRC 0x040B4 ++#define IXGBE_MNGPDC 0x040B8 ++#define IXGBE_MNGPTC 0x0CF90 ++#define IXGBE_TORL 0x040C0 ++#define IXGBE_TORH 0x040C4 ++#define IXGBE_TPR 0x040D0 ++#define IXGBE_TPT 0x040D4 ++#define IXGBE_PTC64 0x040D8 ++#define IXGBE_PTC127 0x040DC ++#define IXGBE_PTC255 0x040E0 ++#define IXGBE_PTC511 0x040E4 ++#define IXGBE_PTC1023 0x040E8 ++#define IXGBE_PTC1522 0x040EC ++#define IXGBE_MPTC 0x040F0 ++#define IXGBE_BPTC 0x040F4 ++#define IXGBE_XEC 0x04120 ++#define IXGBE_SSVPC 0x08780 ++ ++#define IXGBE_RQSMR(_i) (0x02300 + ((_i) * 4)) ++#define IXGBE_TQSMR(_i) (((_i) <= 7) ? (0x07300 + ((_i) * 4)) : \ + (0x08600 + ((_i) * 4))) +-#define IXGBE_TQSM(_i) (0x08600 + ((_i) * 4)) +- +-#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */ +-#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */ +-#define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */ +-#define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */ +-#define IXGBE_QBRC_L(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */ +-#define IXGBE_QBRC_H(_i) (0x01038 + ((_i) * 0x40)) /* 16 of these */ +-#define IXGBE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) /* 16 of these */ +-#define IXGBE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) /* 16 of these */ +-#define IXGBE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) /* 16 of these */ +-#define IXGBE_FCCRC 0x05118 /* Count of Good Eth CRC w/ Bad FC CRC */ +-#define IXGBE_FCOERPDC 0x0241C /* FCoE Rx Packets Dropped Count */ +-#define IXGBE_FCLAST 0x02424 /* FCoE Last Error Count */ +-#define IXGBE_FCOEPRC 0x02428 /* Number of FCoE Packets Received */ +-#define IXGBE_FCOEDWRC 0x0242C /* Number of FCoE DWords Received */ +-#define IXGBE_FCOEPTC 0x08784 /* Number of FCoE Packets Transmitted */ +-#define IXGBE_FCOEDWTC 0x08788 /* Number of FCoE DWords Transmitted */ +-#define IXGBE_O2BGPTC 0x041C4 +-#define IXGBE_O2BSPC 0x087B0 +-#define IXGBE_B2OSPC 0x041C0 +-#define IXGBE_B2OGPRC 0x02F90 +-#define IXGBE_PCRC8ECL 0x0E810 +-#define IXGBE_PCRC8ECH 0x0E811 +-#define IXGBE_PCRC8ECH_MASK 0x1F +-#define IXGBE_LDPCECL 0x0E820 +-#define IXGBE_LDPCECH 0x0E821 ++#define IXGBE_TQSM(_i) (0x08600 + ((_i) * 4)) ++ ++#define IXGBE_QPRC(_i) (0x01030 + ((_i) * 0x40)) /* 16 of these */ ++#define IXGBE_QPTC(_i) (0x06030 + ((_i) * 0x40)) /* 16 of these */ ++#define IXGBE_QBRC(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */ ++#define IXGBE_QBTC(_i) (0x06034 + ((_i) * 0x40)) /* 16 of these */ ++#define IXGBE_QBRC_L(_i) (0x01034 + ((_i) * 0x40)) /* 16 of these */ ++#define IXGBE_QBRC_H(_i) (0x01038 + ((_i) * 0x40)) /* 16 of these */ ++#define IXGBE_QPRDC(_i) (0x01430 + ((_i) * 0x40)) /* 16 of these */ ++#define IXGBE_QBTC_L(_i) (0x08700 + ((_i) * 0x8)) /* 16 of these */ ++#define IXGBE_QBTC_H(_i) (0x08704 + ((_i) * 0x8)) /* 16 of these */ ++#define IXGBE_FCCRC 0x05118 /* Num of Good Eth CRC w/ Bad FC CRC */ ++#define IXGBE_FCOERPDC 0x0241C /* FCoE Rx Packets Dropped Count */ ++#define IXGBE_FCLAST 0x02424 /* FCoE Last Error Count */ ++#define IXGBE_FCOEPRC 0x02428 /* Number of FCoE Packets Received */ ++#define IXGBE_FCOEDWRC 0x0242C /* Number of FCoE DWords Received */ ++#define IXGBE_FCOEPTC 0x08784 /* Number of FCoE Packets Transmitted */ ++#define IXGBE_FCOEDWTC 0x08788 /* Number of FCoE DWords Transmitted */ ++#define IXGBE_FCCRC_CNT_MASK 0x0000FFFF /* CRC_CNT: bit 0 - 15 */ ++#define IXGBE_FCLAST_CNT_MASK 0x0000FFFF /* Last_CNT: bit 0 - 15 */ ++#define IXGBE_O2BGPTC 0x041C4 ++#define IXGBE_O2BSPC 0x087B0 ++#define IXGBE_B2OSPC 0x041C0 ++#define IXGBE_B2OGPRC 0x02F90 ++#define IXGBE_BUPRC 0x04180 ++#define IXGBE_BMPRC 0x04184 ++#define IXGBE_BBPRC 0x04188 ++#define IXGBE_BUPTC 0x0418C ++#define IXGBE_BMPTC 0x04190 ++#define IXGBE_BBPTC 0x04194 ++#define IXGBE_BCRCERRS 0x04198 ++#define IXGBE_BXONRXC 0x0419C ++#define IXGBE_BXOFFRXC 0x041E0 ++#define IXGBE_BXONTXC 0x041E4 ++#define IXGBE_BXOFFTXC 0x041E8 + + /* Management */ +-#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */ +-#define IXGBE_MFUTP(_i) (0x05030 + ((_i) * 4)) /* 8 of these (0-7) */ +-#define IXGBE_MANC 0x05820 +-#define IXGBE_MFVAL 0x05824 +-#define IXGBE_MANC2H 0x05860 +-#define IXGBE_MDEF(_i) (0x05890 + ((_i) * 4)) /* 8 of these (0-7) */ +-#define IXGBE_MIPAF 0x058B0 +-#define IXGBE_MMAL(_i) (0x05910 + ((_i) * 8)) /* 4 of these (0-3) */ +-#define IXGBE_MMAH(_i) (0x05914 + ((_i) * 8)) /* 4 of these (0-3) */ +-#define IXGBE_FTFT 0x09400 /* 0x9400-0x97FC */ +-#define IXGBE_METF(_i) (0x05190 + ((_i) * 4)) /* 4 of these (0-3) */ +-#define IXGBE_MDEF_EXT(_i) (0x05160 + ((_i) * 4)) /* 8 of these (0-7) */ +-#define IXGBE_LSWFW 0x15014 ++#define IXGBE_MAVTV(_i) (0x05010 + ((_i) * 4)) /* 8 of these (0-7) */ ++#define IXGBE_MFUTP(_i) (0x05030 + ((_i) * 4)) /* 8 of these (0-7) */ ++#define IXGBE_MANC 0x05820 ++#define IXGBE_MFVAL 0x05824 ++#define IXGBE_MANC2H 0x05860 ++#define IXGBE_MDEF(_i) (0x05890 + ((_i) * 4)) /* 8 of these (0-7) */ ++#define IXGBE_MIPAF 0x058B0 ++#define IXGBE_MMAL(_i) (0x05910 + ((_i) * 8)) /* 4 of these (0-3) */ ++#define IXGBE_MMAH(_i) (0x05914 + ((_i) * 8)) /* 4 of these (0-3) */ ++#define IXGBE_FTFT 0x09400 /* 0x9400-0x97FC */ ++#define IXGBE_METF(_i) (0x05190 + ((_i) * 4)) /* 4 of these (0-3) */ ++#define IXGBE_MDEF_EXT(_i) (0x05160 + ((_i) * 4)) /* 8 of these (0-7) */ ++#define IXGBE_LSWFW 0x15F14 ++#define IXGBE_BMCIP(_i) (0x05050 + ((_i) * 4)) /* 0x5050-0x505C */ ++#define IXGBE_BMCIPVAL 0x05060 ++#define IXGBE_BMCIP_IPADDR_TYPE 0x00000001 ++#define IXGBE_BMCIP_IPADDR_VALID 0x00000002 + + /* Management Bit Fields and Masks */ ++#define IXGBE_MANC_MPROXYE 0x40000000 /* Management Proxy Enable */ + #define IXGBE_MANC_RCV_TCO_EN 0x00020000 /* Rcv TCO packet enable */ ++#define IXGBE_MANC_EN_BMC2OS 0x10000000 /* Ena BMC2OS and OS2BMC traffic */ ++#define IXGBE_MANC_EN_BMC2OS_SHIFT 28 + + /* Firmware Semaphore Register */ + #define IXGBE_FWSM_MODE_MASK 0xE ++#define IXGBE_FWSM_TS_ENABLED 0x1 + #define IXGBE_FWSM_FW_MODE_PT 0x4 + + /* ARC Subsystem registers */ +-#define IXGBE_HICR 0x15F00 +-#define IXGBE_FWSTS 0x15F0C +-#define IXGBE_HSMC0R 0x15F04 +-#define IXGBE_HSMC1R 0x15F08 +-#define IXGBE_SWSR 0x15F10 +-#define IXGBE_HFDR 0x15FE8 +-#define IXGBE_FLEX_MNG 0x15800 /* 0x15800 - 0x15EFC */ +- +-#define IXGBE_HICR_EN 0x01 /* Enable bit - RO */ ++#define IXGBE_HICR 0x15F00 ++#define IXGBE_FWSTS 0x15F0C ++#define IXGBE_HSMC0R 0x15F04 ++#define IXGBE_HSMC1R 0x15F08 ++#define IXGBE_SWSR 0x15F10 ++#define IXGBE_HFDR 0x15FE8 ++#define IXGBE_FLEX_MNG 0x15800 /* 0x15800 - 0x15EFC */ ++ ++#define IXGBE_HICR_EN 0x01 /* Enable bit - RO */ + /* Driver sets this bit when done to put command in RAM */ +-#define IXGBE_HICR_C 0x02 +-#define IXGBE_HICR_SV 0x04 /* Status Validity */ +-#define IXGBE_HICR_FW_RESET_ENABLE 0x40 +-#define IXGBE_HICR_FW_RESET 0x80 ++#define IXGBE_HICR_C 0x02 ++#define IXGBE_HICR_SV 0x04 /* Status Validity */ ++#define IXGBE_HICR_FW_RESET_ENABLE 0x40 ++#define IXGBE_HICR_FW_RESET 0x80 + + /* PCI-E registers */ +-#define IXGBE_GCR 0x11000 +-#define IXGBE_GTV 0x11004 +-#define IXGBE_FUNCTAG 0x11008 +-#define IXGBE_GLT 0x1100C +-#define IXGBE_GSCL_1 0x11010 +-#define IXGBE_GSCL_2 0x11014 +-#define IXGBE_GSCL_3 0x11018 +-#define IXGBE_GSCL_4 0x1101C +-#define IXGBE_GSCN_0 0x11020 +-#define IXGBE_GSCN_1 0x11024 +-#define IXGBE_GSCN_2 0x11028 +-#define IXGBE_GSCN_3 0x1102C +-#define IXGBE_FACTPS 0x10150 +-#define IXGBE_PCIEANACTL 0x11040 +-#define IXGBE_SWSM 0x10140 +-#define IXGBE_FWSM 0x10148 +-#define IXGBE_GSSR 0x10160 +-#define IXGBE_MREVID 0x11064 +-#define IXGBE_DCA_ID 0x11070 +-#define IXGBE_DCA_CTRL 0x11074 +-#define IXGBE_SWFW_SYNC IXGBE_GSSR +- +-/* PCIe registers 82599-specific */ +-#define IXGBE_GCR_EXT 0x11050 +-#define IXGBE_GSCL_5_82599 0x11030 +-#define IXGBE_GSCL_6_82599 0x11034 +-#define IXGBE_GSCL_7_82599 0x11038 +-#define IXGBE_GSCL_8_82599 0x1103C +-#define IXGBE_PHYADR_82599 0x11040 +-#define IXGBE_PHYDAT_82599 0x11044 +-#define IXGBE_PHYCTL_82599 0x11048 +-#define IXGBE_PBACLR_82599 0x11068 +-#define IXGBE_CIAA_82599 0x11088 +-#define IXGBE_CIAD_82599 0x1108C +-#define IXGBE_PICAUSE 0x110B0 +-#define IXGBE_PIENA 0x110B8 +-#define IXGBE_CDQ_MBR_82599 0x110B4 +-#define IXGBE_PCIESPARE 0x110BC +-#define IXGBE_MISC_REG_82599 0x110F0 +-#define IXGBE_ECC_CTRL_0_82599 0x11100 +-#define IXGBE_ECC_CTRL_1_82599 0x11104 +-#define IXGBE_ECC_STATUS_82599 0x110E0 +-#define IXGBE_BAR_CTRL_82599 0x110F4 ++#define IXGBE_GCR 0x11000 ++#define IXGBE_GTV 0x11004 ++#define IXGBE_FUNCTAG 0x11008 ++#define IXGBE_GLT 0x1100C ++#define IXGBE_PCIEPIPEADR 0x11004 ++#define IXGBE_PCIEPIPEDAT 0x11008 ++#define IXGBE_GSCL_1 0x11010 ++#define IXGBE_GSCL_2 0x11014 ++#define IXGBE_GSCL_1_X540 IXGBE_GSCL_1 ++#define IXGBE_GSCL_2_X540 IXGBE_GSCL_2 ++#define IXGBE_GSCL_3 0x11018 ++#define IXGBE_GSCL_4 0x1101C ++#define IXGBE_GSCN_0 0x11020 ++#define IXGBE_GSCN_1 0x11024 ++#define IXGBE_GSCN_2 0x11028 ++#define IXGBE_GSCN_3 0x1102C ++#define IXGBE_GSCN_0_X540 IXGBE_GSCN_0 ++#define IXGBE_GSCN_1_X540 IXGBE_GSCN_1 ++#define IXGBE_GSCN_2_X540 IXGBE_GSCN_2 ++#define IXGBE_GSCN_3_X540 IXGBE_GSCN_3 ++#define IXGBE_FACTPS 0x10150 ++#define IXGBE_FACTPS_X540 IXGBE_FACTPS ++#define IXGBE_GSCL_1_X550 0x11800 ++#define IXGBE_GSCL_2_X550 0x11804 ++#define IXGBE_GSCL_1_X550EM_x IXGBE_GSCL_1_X550 ++#define IXGBE_GSCL_2_X550EM_x IXGBE_GSCL_2_X550 ++#define IXGBE_GSCN_0_X550 0x11820 ++#define IXGBE_GSCN_1_X550 0x11824 ++#define IXGBE_GSCN_2_X550 0x11828 ++#define IXGBE_GSCN_3_X550 0x1182C ++#define IXGBE_GSCN_0_X550EM_x IXGBE_GSCN_0_X550 ++#define IXGBE_GSCN_1_X550EM_x IXGBE_GSCN_1_X550 ++#define IXGBE_GSCN_2_X550EM_x IXGBE_GSCN_2_X550 ++#define IXGBE_GSCN_3_X550EM_x IXGBE_GSCN_3_X550 ++#define IXGBE_FACTPS_X550 IXGBE_FACTPS ++#define IXGBE_FACTPS_X550EM_x IXGBE_FACTPS ++#define IXGBE_GSCL_1_X550EM_a IXGBE_GSCL_1_X550 ++#define IXGBE_GSCL_2_X550EM_a IXGBE_GSCL_2_X550 ++#define IXGBE_GSCN_0_X550EM_a IXGBE_GSCN_0_X550 ++#define IXGBE_GSCN_1_X550EM_a IXGBE_GSCN_1_X550 ++#define IXGBE_GSCN_2_X550EM_a IXGBE_GSCN_2_X550 ++#define IXGBE_GSCN_3_X550EM_a IXGBE_GSCN_3_X550 ++#define IXGBE_FACTPS_X550EM_a 0x15FEC ++#define IXGBE_FACTPS_BY_MAC(_hw) IXGBE_BY_MAC((_hw), FACTPS) ++ ++#define IXGBE_PCIEANACTL 0x11040 ++#define IXGBE_SWSM 0x10140 ++#define IXGBE_SWSM_X540 IXGBE_SWSM ++#define IXGBE_SWSM_X550 IXGBE_SWSM ++#define IXGBE_SWSM_X550EM_x IXGBE_SWSM ++#define IXGBE_SWSM_X550EM_a 0x15F70 ++#define IXGBE_SWSM_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SWSM) ++ ++#define IXGBE_FWSM 0x10148 ++#define IXGBE_FWSM_X540 IXGBE_FWSM ++#define IXGBE_FWSM_X550 IXGBE_FWSM ++#define IXGBE_FWSM_X550EM_x IXGBE_FWSM ++#define IXGBE_FWSM_X550EM_a 0x15F74 ++#define IXGBE_FWSM_BY_MAC(_hw) IXGBE_BY_MAC((_hw), FWSM) ++ ++#define IXGBE_SWFW_SYNC IXGBE_GSSR ++#define IXGBE_SWFW_SYNC_X540 IXGBE_SWFW_SYNC ++#define IXGBE_SWFW_SYNC_X550 IXGBE_SWFW_SYNC ++#define IXGBE_SWFW_SYNC_X550EM_x IXGBE_SWFW_SYNC ++#define IXGBE_SWFW_SYNC_X550EM_a 0x15F78 ++#define IXGBE_SWFW_SYNC_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SWFW_SYNC) ++ ++#define IXGBE_GSSR 0x10160 ++#define IXGBE_MREVID 0x11064 ++#define IXGBE_DCA_ID 0x11070 ++#define IXGBE_DCA_CTRL 0x11074 ++ ++/* PCI-E registers 82599-Specific */ ++#define IXGBE_GCR_EXT 0x11050 ++#define IXGBE_GSCL_5_82599 0x11030 ++#define IXGBE_GSCL_6_82599 0x11034 ++#define IXGBE_GSCL_7_82599 0x11038 ++#define IXGBE_GSCL_8_82599 0x1103C ++#define IXGBE_GSCL_5_X540 IXGBE_GSCL_5_82599 ++#define IXGBE_GSCL_6_X540 IXGBE_GSCL_6_82599 ++#define IXGBE_GSCL_7_X540 IXGBE_GSCL_7_82599 ++#define IXGBE_GSCL_8_X540 IXGBE_GSCL_8_82599 ++#define IXGBE_PHYADR_82599 0x11040 ++#define IXGBE_PHYDAT_82599 0x11044 ++#define IXGBE_PHYCTL_82599 0x11048 ++#define IXGBE_PBACLR_82599 0x11068 ++#define IXGBE_CIAA 0x11088 ++#define IXGBE_CIAD 0x1108C ++#define IXGBE_CIAA_82599 IXGBE_CIAA ++#define IXGBE_CIAD_82599 IXGBE_CIAD ++#define IXGBE_CIAA_X540 IXGBE_CIAA ++#define IXGBE_CIAD_X540 IXGBE_CIAD ++#define IXGBE_GSCL_5_X550 0x11810 ++#define IXGBE_GSCL_6_X550 0x11814 ++#define IXGBE_GSCL_7_X550 0x11818 ++#define IXGBE_GSCL_8_X550 0x1181C ++#define IXGBE_GSCL_5_X550EM_x IXGBE_GSCL_5_X550 ++#define IXGBE_GSCL_6_X550EM_x IXGBE_GSCL_6_X550 ++#define IXGBE_GSCL_7_X550EM_x IXGBE_GSCL_7_X550 ++#define IXGBE_GSCL_8_X550EM_x IXGBE_GSCL_8_X550 ++#define IXGBE_CIAA_X550 0x11508 ++#define IXGBE_CIAD_X550 0x11510 ++#define IXGBE_CIAA_X550EM_x IXGBE_CIAA_X550 ++#define IXGBE_CIAD_X550EM_x IXGBE_CIAD_X550 ++#define IXGBE_GSCL_5_X550EM_a IXGBE_GSCL_5_X550 ++#define IXGBE_GSCL_6_X550EM_a IXGBE_GSCL_6_X550 ++#define IXGBE_GSCL_7_X550EM_a IXGBE_GSCL_7_X550 ++#define IXGBE_GSCL_8_X550EM_a IXGBE_GSCL_8_X550 ++#define IXGBE_CIAA_X550EM_a IXGBE_CIAA_X550 ++#define IXGBE_CIAD_X550EM_a IXGBE_CIAD_X550 ++#define IXGBE_CIAA_BY_MAC(_hw) IXGBE_BY_MAC((_hw), CIAA) ++#define IXGBE_CIAD_BY_MAC(_hw) IXGBE_BY_MAC((_hw), CIAD) ++#define IXGBE_PICAUSE 0x110B0 ++#define IXGBE_PIENA 0x110B8 ++#define IXGBE_CDQ_MBR_82599 0x110B4 ++#define IXGBE_PCIESPARE 0x110BC ++#define IXGBE_MISC_REG_82599 0x110F0 ++#define IXGBE_ECC_CTRL_0_82599 0x11100 ++#define IXGBE_ECC_CTRL_1_82599 0x11104 ++#define IXGBE_ECC_STATUS_82599 0x110E0 ++#define IXGBE_BAR_CTRL_82599 0x110F4 + + /* PCI Express Control */ +-#define IXGBE_GCR_CMPL_TMOUT_MASK 0x0000F000 +-#define IXGBE_GCR_CMPL_TMOUT_10ms 0x00001000 +-#define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000 +-#define IXGBE_GCR_CAP_VER2 0x00040000 +- +-#define IXGBE_GCR_EXT_MSIX_EN 0x80000000 +-#define IXGBE_GCR_EXT_BUFFERS_CLEAR 0x40000000 +-#define IXGBE_GCR_EXT_VT_MODE_16 0x00000001 +-#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002 +-#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003 +-#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \ ++#define IXGBE_GCR_CMPL_TMOUT_MASK 0x0000F000 ++#define IXGBE_GCR_CMPL_TMOUT_10ms 0x00001000 ++#define IXGBE_GCR_CMPL_TMOUT_RESEND 0x00010000 ++#define IXGBE_GCR_CAP_VER2 0x00040000 ++ ++#define IXGBE_GCR_EXT_MSIX_EN 0x80000000 ++#define IXGBE_GCR_EXT_BUFFERS_CLEAR 0x40000000 ++#define IXGBE_GCR_EXT_VT_MODE_16 0x00000001 ++#define IXGBE_GCR_EXT_VT_MODE_32 0x00000002 ++#define IXGBE_GCR_EXT_VT_MODE_64 0x00000003 ++#define IXGBE_GCR_EXT_SRIOV (IXGBE_GCR_EXT_MSIX_EN | \ + IXGBE_GCR_EXT_VT_MODE_64) +- ++#define IXGBE_GCR_EXT_VT_MODE_MASK 0x00000003 + /* Time Sync Registers */ +-#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */ +-#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */ +-#define IXGBE_RXSTMPL 0x051E8 /* Rx timestamp Low - RO */ +-#define IXGBE_RXSTMPH 0x051A4 /* Rx timestamp High - RO */ +-#define IXGBE_RXSATRL 0x051A0 /* Rx timestamp attribute low - RO */ +-#define IXGBE_RXSATRH 0x051A8 /* Rx timestamp attribute high - RO */ +-#define IXGBE_RXMTRL 0x05120 /* RX message type register low - RW */ +-#define IXGBE_TXSTMPL 0x08C04 /* Tx timestamp value Low - RO */ +-#define IXGBE_TXSTMPH 0x08C08 /* Tx timestamp value High - RO */ +-#define IXGBE_SYSTIML 0x08C0C /* System time register Low - RO */ +-#define IXGBE_SYSTIMH 0x08C10 /* System time register High - RO */ +-#define IXGBE_TIMINCA 0x08C14 /* Increment attributes register - RW */ +-#define IXGBE_TIMADJL 0x08C18 /* Time Adjustment Offset register Low - RW */ +-#define IXGBE_TIMADJH 0x08C1C /* Time Adjustment Offset register High - RW */ +-#define IXGBE_TSAUXC 0x08C20 /* TimeSync Auxiliary Control register - RW */ +-#define IXGBE_TRGTTIML0 0x08C24 /* Target Time Register 0 Low - RW */ +-#define IXGBE_TRGTTIMH0 0x08C28 /* Target Time Register 0 High - RW */ +-#define IXGBE_TRGTTIML1 0x08C2C /* Target Time Register 1 Low - RW */ +-#define IXGBE_TRGTTIMH1 0x08C30 /* Target Time Register 1 High - RW */ +-#define IXGBE_CLKTIML 0x08C34 /* Clock Out Time Register Low - RW */ +-#define IXGBE_CLKTIMH 0x08C38 /* Clock Out Time Register High - RW */ +-#define IXGBE_FREQOUT0 0x08C34 /* Frequency Out 0 Control register - RW */ +-#define IXGBE_FREQOUT1 0x08C38 /* Frequency Out 1 Control register - RW */ +-#define IXGBE_AUXSTMPL0 0x08C3C /* Auxiliary Time Stamp 0 register Low - RO */ +-#define IXGBE_AUXSTMPH0 0x08C40 /* Auxiliary Time Stamp 0 register High - RO */ +-#define IXGBE_AUXSTMPL1 0x08C44 /* Auxiliary Time Stamp 1 register Low - RO */ +-#define IXGBE_AUXSTMPH1 0x08C48 /* Auxiliary Time Stamp 1 register High - RO */ ++#define IXGBE_TSYNCRXCTL 0x05188 /* Rx Time Sync Control register - RW */ ++#define IXGBE_TSYNCTXCTL 0x08C00 /* Tx Time Sync Control register - RW */ ++#define IXGBE_RXSTMPL 0x051E8 /* Rx timestamp Low - RO */ ++#define IXGBE_RXSTMPH 0x051A4 /* Rx timestamp High - RO */ ++#define IXGBE_RXSATRL 0x051A0 /* Rx timestamp attribute low - RO */ ++#define IXGBE_RXSATRH 0x051A8 /* Rx timestamp attribute high - RO */ ++#define IXGBE_RXMTRL 0x05120 /* RX message type register low - RW */ ++#define IXGBE_TXSTMPL 0x08C04 /* Tx timestamp value Low - RO */ ++#define IXGBE_TXSTMPH 0x08C08 /* Tx timestamp value High - RO */ ++#define IXGBE_SYSTIML 0x08C0C /* System time register Low - RO */ ++#define IXGBE_SYSTIMH 0x08C10 /* System time register High - RO */ ++#define IXGBE_SYSTIMR 0x08C58 /* System time register Residue - RO */ ++#define IXGBE_TIMINCA 0x08C14 /* Increment attributes register - RW */ ++#define IXGBE_TIMADJL 0x08C18 /* Time Adjustment Offset register Low - RW */ ++#define IXGBE_TIMADJH 0x08C1C /* Time Adjustment Offset register High - RW */ ++#define IXGBE_TSAUXC 0x08C20 /* TimeSync Auxiliary Control register - RW */ ++#define IXGBE_TRGTTIML0 0x08C24 /* Target Time Register 0 Low - RW */ ++#define IXGBE_TRGTTIMH0 0x08C28 /* Target Time Register 0 High - RW */ ++#define IXGBE_TRGTTIML1 0x08C2C /* Target Time Register 1 Low - RW */ ++#define IXGBE_TRGTTIMH1 0x08C30 /* Target Time Register 1 High - RW */ ++#define IXGBE_CLKTIML 0x08C34 /* Clock Out Time Register Low - RW */ ++#define IXGBE_CLKTIMH 0x08C38 /* Clock Out Time Register High - RW */ ++#define IXGBE_FREQOUT0 0x08C34 /* Frequency Out 0 Control register - RW */ ++#define IXGBE_FREQOUT1 0x08C38 /* Frequency Out 1 Control register - RW */ ++#define IXGBE_AUXSTMPL0 0x08C3C /* Auxiliary Time Stamp 0 register Low - RO */ ++#define IXGBE_AUXSTMPH0 0x08C40 /* Auxiliary Time Stamp 0 register High - RO */ ++#define IXGBE_AUXSTMPL1 0x08C44 /* Auxiliary Time Stamp 1 register Low - RO */ ++#define IXGBE_AUXSTMPH1 0x08C48 /* Auxiliary Time Stamp 1 register High - RO */ ++#define IXGBE_TSIM 0x08C68 /* TimeSync Interrupt Mask Register - RW */ ++#define IXGBE_TSICR 0x08C60 /* TimeSync Interrupt Cause Register - WO */ ++#define IXGBE_TSSDP 0x0003C /* TimeSync SDP Configuration Register - RW */ + + /* Diagnostic Registers */ +-#define IXGBE_RDSTATCTL 0x02C20 +-#define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */ +-#define IXGBE_RDHMPN 0x02F08 +-#define IXGBE_RIC_DW(_i) (0x02F10 + ((_i) * 4)) +-#define IXGBE_RDPROBE 0x02F20 +-#define IXGBE_RDMAM 0x02F30 +-#define IXGBE_RDMAD 0x02F34 +-#define IXGBE_TDSTATCTL 0x07C20 +-#define IXGBE_TDSTAT(_i) (0x07C00 + ((_i) * 4)) /* 0x07C00 - 0x07C1C */ +-#define IXGBE_TDHMPN 0x07F08 +-#define IXGBE_TDHMPN2 0x082FC +-#define IXGBE_TXDESCIC 0x082CC +-#define IXGBE_TIC_DW(_i) (0x07F10 + ((_i) * 4)) +-#define IXGBE_TIC_DW2(_i) (0x082B0 + ((_i) * 4)) +-#define IXGBE_TDPROBE 0x07F20 +-#define IXGBE_TXBUFCTRL 0x0C600 +-#define IXGBE_TXBUFDATA0 0x0C610 +-#define IXGBE_TXBUFDATA1 0x0C614 +-#define IXGBE_TXBUFDATA2 0x0C618 +-#define IXGBE_TXBUFDATA3 0x0C61C +-#define IXGBE_RXBUFCTRL 0x03600 +-#define IXGBE_RXBUFDATA0 0x03610 +-#define IXGBE_RXBUFDATA1 0x03614 +-#define IXGBE_RXBUFDATA2 0x03618 +-#define IXGBE_RXBUFDATA3 0x0361C +-#define IXGBE_PCIE_DIAG(_i) (0x11090 + ((_i) * 4)) /* 8 of these */ +-#define IXGBE_RFVAL 0x050A4 +-#define IXGBE_MDFTC1 0x042B8 +-#define IXGBE_MDFTC2 0x042C0 +-#define IXGBE_MDFTFIFO1 0x042C4 +-#define IXGBE_MDFTFIFO2 0x042C8 +-#define IXGBE_MDFTS 0x042CC +-#define IXGBE_RXDATAWRPTR(_i) (0x03700 + ((_i) * 4)) /* 8 of these 3700-370C*/ +-#define IXGBE_RXDESCWRPTR(_i) (0x03710 + ((_i) * 4)) /* 8 of these 3710-371C*/ +-#define IXGBE_RXDATARDPTR(_i) (0x03720 + ((_i) * 4)) /* 8 of these 3720-372C*/ +-#define IXGBE_RXDESCRDPTR(_i) (0x03730 + ((_i) * 4)) /* 8 of these 3730-373C*/ +-#define IXGBE_TXDATAWRPTR(_i) (0x0C700 + ((_i) * 4)) /* 8 of these C700-C70C*/ +-#define IXGBE_TXDESCWRPTR(_i) (0x0C710 + ((_i) * 4)) /* 8 of these C710-C71C*/ +-#define IXGBE_TXDATARDPTR(_i) (0x0C720 + ((_i) * 4)) /* 8 of these C720-C72C*/ +-#define IXGBE_TXDESCRDPTR(_i) (0x0C730 + ((_i) * 4)) /* 8 of these C730-C73C*/ +-#define IXGBE_PCIEECCCTL 0x1106C +-#define IXGBE_RXWRPTR(_i) (0x03100 + ((_i) * 4)) /* 8 of these 3100-310C*/ +-#define IXGBE_RXUSED(_i) (0x03120 + ((_i) * 4)) /* 8 of these 3120-312C*/ +-#define IXGBE_RXRDPTR(_i) (0x03140 + ((_i) * 4)) /* 8 of these 3140-314C*/ +-#define IXGBE_RXRDWRPTR(_i) (0x03160 + ((_i) * 4)) /* 8 of these 3160-310C*/ +-#define IXGBE_TXWRPTR(_i) (0x0C100 + ((_i) * 4)) /* 8 of these C100-C10C*/ +-#define IXGBE_TXUSED(_i) (0x0C120 + ((_i) * 4)) /* 8 of these C120-C12C*/ +-#define IXGBE_TXRDPTR(_i) (0x0C140 + ((_i) * 4)) /* 8 of these C140-C14C*/ +-#define IXGBE_TXRDWRPTR(_i) (0x0C160 + ((_i) * 4)) /* 8 of these C160-C10C*/ +-#define IXGBE_PCIEECCCTL0 0x11100 +-#define IXGBE_PCIEECCCTL1 0x11104 +-#define IXGBE_RXDBUECC 0x03F70 +-#define IXGBE_TXDBUECC 0x0CF70 +-#define IXGBE_RXDBUEST 0x03F74 +-#define IXGBE_TXDBUEST 0x0CF74 +-#define IXGBE_PBTXECC 0x0C300 +-#define IXGBE_PBRXECC 0x03300 +-#define IXGBE_GHECCR 0x110B0 ++#define IXGBE_RDSTATCTL 0x02C20 ++#define IXGBE_RDSTAT(_i) (0x02C00 + ((_i) * 4)) /* 0x02C00-0x02C1C */ ++#define IXGBE_RDHMPN 0x02F08 ++#define IXGBE_RIC_DW(_i) (0x02F10 + ((_i) * 4)) ++#define IXGBE_RDPROBE 0x02F20 ++#define IXGBE_RDMAM 0x02F30 ++#define IXGBE_RDMAD 0x02F34 ++#define IXGBE_TDHMPN 0x07F08 ++#define IXGBE_TDHMPN2 0x082FC ++#define IXGBE_TXDESCIC 0x082CC ++#define IXGBE_TIC_DW(_i) (0x07F10 + ((_i) * 4)) ++#define IXGBE_TIC_DW2(_i) (0x082B0 + ((_i) * 4)) ++#define IXGBE_TDPROBE 0x07F20 ++#define IXGBE_TXBUFCTRL 0x0C600 ++#define IXGBE_TXBUFDATA0 0x0C610 ++#define IXGBE_TXBUFDATA1 0x0C614 ++#define IXGBE_TXBUFDATA2 0x0C618 ++#define IXGBE_TXBUFDATA3 0x0C61C ++#define IXGBE_RXBUFCTRL 0x03600 ++#define IXGBE_RXBUFDATA0 0x03610 ++#define IXGBE_RXBUFDATA1 0x03614 ++#define IXGBE_RXBUFDATA2 0x03618 ++#define IXGBE_RXBUFDATA3 0x0361C ++#define IXGBE_PCIE_DIAG(_i) (0x11090 + ((_i) * 4)) /* 8 of these */ ++#define IXGBE_RFVAL 0x050A4 ++#define IXGBE_MDFTC1 0x042B8 ++#define IXGBE_MDFTC2 0x042C0 ++#define IXGBE_MDFTFIFO1 0x042C4 ++#define IXGBE_MDFTFIFO2 0x042C8 ++#define IXGBE_MDFTS 0x042CC ++#define IXGBE_RXDATAWRPTR(_i) (0x03700 + ((_i) * 4)) /* 8 of these 3700-370C*/ ++#define IXGBE_RXDESCWRPTR(_i) (0x03710 + ((_i) * 4)) /* 8 of these 3710-371C*/ ++#define IXGBE_RXDATARDPTR(_i) (0x03720 + ((_i) * 4)) /* 8 of these 3720-372C*/ ++#define IXGBE_RXDESCRDPTR(_i) (0x03730 + ((_i) * 4)) /* 8 of these 3730-373C*/ ++#define IXGBE_TXDATAWRPTR(_i) (0x0C700 + ((_i) * 4)) /* 8 of these C700-C70C*/ ++#define IXGBE_TXDESCWRPTR(_i) (0x0C710 + ((_i) * 4)) /* 8 of these C710-C71C*/ ++#define IXGBE_TXDATARDPTR(_i) (0x0C720 + ((_i) * 4)) /* 8 of these C720-C72C*/ ++#define IXGBE_TXDESCRDPTR(_i) (0x0C730 + ((_i) * 4)) /* 8 of these C730-C73C*/ ++#define IXGBE_PCIEECCCTL 0x1106C ++#define IXGBE_RXWRPTR(_i) (0x03100 + ((_i) * 4)) /* 8 of these 3100-310C*/ ++#define IXGBE_RXUSED(_i) (0x03120 + ((_i) * 4)) /* 8 of these 3120-312C*/ ++#define IXGBE_RXRDPTR(_i) (0x03140 + ((_i) * 4)) /* 8 of these 3140-314C*/ ++#define IXGBE_RXRDWRPTR(_i) (0x03160 + ((_i) * 4)) /* 8 of these 3160-310C*/ ++#define IXGBE_TXWRPTR(_i) (0x0C100 + ((_i) * 4)) /* 8 of these C100-C10C*/ ++#define IXGBE_TXUSED(_i) (0x0C120 + ((_i) * 4)) /* 8 of these C120-C12C*/ ++#define IXGBE_TXRDPTR(_i) (0x0C140 + ((_i) * 4)) /* 8 of these C140-C14C*/ ++#define IXGBE_TXRDWRPTR(_i) (0x0C160 + ((_i) * 4)) /* 8 of these C160-C10C*/ ++#define IXGBE_PCIEECCCTL0 0x11100 ++#define IXGBE_PCIEECCCTL1 0x11104 ++#define IXGBE_RXDBUECC 0x03F70 ++#define IXGBE_TXDBUECC 0x0CF70 ++#define IXGBE_RXDBUEST 0x03F74 ++#define IXGBE_TXDBUEST 0x0CF74 ++#define IXGBE_PBTXECC 0x0C300 ++#define IXGBE_PBRXECC 0x03300 ++#define IXGBE_GHECCR 0x110B0 + + /* MAC Registers */ +-#define IXGBE_PCS1GCFIG 0x04200 +-#define IXGBE_PCS1GLCTL 0x04208 +-#define IXGBE_PCS1GLSTA 0x0420C +-#define IXGBE_PCS1GDBG0 0x04210 +-#define IXGBE_PCS1GDBG1 0x04214 +-#define IXGBE_PCS1GANA 0x04218 +-#define IXGBE_PCS1GANLP 0x0421C +-#define IXGBE_PCS1GANNP 0x04220 +-#define IXGBE_PCS1GANLPNP 0x04224 +-#define IXGBE_HLREG0 0x04240 +-#define IXGBE_HLREG1 0x04244 +-#define IXGBE_PAP 0x04248 +-#define IXGBE_MACA 0x0424C +-#define IXGBE_APAE 0x04250 +-#define IXGBE_ARD 0x04254 +-#define IXGBE_AIS 0x04258 +-#define IXGBE_MSCA 0x0425C +-#define IXGBE_MSRWD 0x04260 +-#define IXGBE_MLADD 0x04264 +-#define IXGBE_MHADD 0x04268 +-#define IXGBE_MAXFRS 0x04268 +-#define IXGBE_TREG 0x0426C +-#define IXGBE_PCSS1 0x04288 +-#define IXGBE_PCSS2 0x0428C +-#define IXGBE_XPCSS 0x04290 +-#define IXGBE_MFLCN 0x04294 +-#define IXGBE_SERDESC 0x04298 +-#define IXGBE_MACS 0x0429C +-#define IXGBE_AUTOC 0x042A0 +-#define IXGBE_LINKS 0x042A4 +-#define IXGBE_LINKS2 0x04324 +-#define IXGBE_AUTOC2 0x042A8 +-#define IXGBE_AUTOC3 0x042AC +-#define IXGBE_ANLP1 0x042B0 +-#define IXGBE_ANLP2 0x042B4 +-#define IXGBE_MACC 0x04330 +-#define IXGBE_ATLASCTL 0x04800 +-#define IXGBE_MMNGC 0x042D0 +-#define IXGBE_ANLPNP1 0x042D4 +-#define IXGBE_ANLPNP2 0x042D8 +-#define IXGBE_KRPCSFC 0x042E0 +-#define IXGBE_KRPCSS 0x042E4 +-#define IXGBE_FECS1 0x042E8 +-#define IXGBE_FECS2 0x042EC +-#define IXGBE_SMADARCTL 0x14F10 +-#define IXGBE_MPVC 0x04318 +-#define IXGBE_SGMIIC 0x04314 ++#define IXGBE_PCS1GCFIG 0x04200 ++#define IXGBE_PCS1GLCTL 0x04208 ++#define IXGBE_PCS1GLSTA 0x0420C ++#define IXGBE_PCS1GDBG0 0x04210 ++#define IXGBE_PCS1GDBG1 0x04214 ++#define IXGBE_PCS1GANA 0x04218 ++#define IXGBE_PCS1GANLP 0x0421C ++#define IXGBE_PCS1GANNP 0x04220 ++#define IXGBE_PCS1GANLPNP 0x04224 ++#define IXGBE_HLREG0 0x04240 ++#define IXGBE_HLREG1 0x04244 ++#define IXGBE_PAP 0x04248 ++#define IXGBE_MACA 0x0424C ++#define IXGBE_APAE 0x04250 ++#define IXGBE_ARD 0x04254 ++#define IXGBE_AIS 0x04258 ++#define IXGBE_MSCA 0x0425C ++#define IXGBE_MSRWD 0x04260 ++#define IXGBE_MLADD 0x04264 ++#define IXGBE_MHADD 0x04268 ++#define IXGBE_MAXFRS 0x04268 ++#define IXGBE_TREG 0x0426C ++#define IXGBE_PCSS1 0x04288 ++#define IXGBE_PCSS2 0x0428C ++#define IXGBE_XPCSS 0x04290 ++#define IXGBE_MFLCN 0x04294 ++#define IXGBE_SERDESC 0x04298 ++#define IXGBE_MAC_SGMII_BUSY 0x04298 ++#define IXGBE_MACS 0x0429C ++#define IXGBE_AUTOC 0x042A0 ++#define IXGBE_LINKS 0x042A4 ++#define IXGBE_LINKS2 0x04324 ++#define IXGBE_AUTOC2 0x042A8 ++#define IXGBE_AUTOC3 0x042AC ++#define IXGBE_ANLP1 0x042B0 ++#define IXGBE_ANLP2 0x042B4 ++#define IXGBE_MACC 0x04330 ++#define IXGBE_ATLASCTL 0x04800 ++#define IXGBE_MMNGC 0x042D0 ++#define IXGBE_ANLPNP1 0x042D4 ++#define IXGBE_ANLPNP2 0x042D8 ++#define IXGBE_KRPCSFC 0x042E0 ++#define IXGBE_KRPCSS 0x042E4 ++#define IXGBE_FECS1 0x042E8 ++#define IXGBE_FECS2 0x042EC ++#define IXGBE_SMADARCTL 0x14F10 ++#define IXGBE_MPVC 0x04318 ++#define IXGBE_SGMIIC 0x04314 + + /* Statistics Registers */ +-#define IXGBE_RXNFGPC 0x041B0 +-#define IXGBE_RXNFGBCL 0x041B4 +-#define IXGBE_RXNFGBCH 0x041B8 +-#define IXGBE_RXDGPC 0x02F50 +-#define IXGBE_RXDGBCL 0x02F54 +-#define IXGBE_RXDGBCH 0x02F58 +-#define IXGBE_RXDDGPC 0x02F5C +-#define IXGBE_RXDDGBCL 0x02F60 +-#define IXGBE_RXDDGBCH 0x02F64 +-#define IXGBE_RXLPBKGPC 0x02F68 +-#define IXGBE_RXLPBKGBCL 0x02F6C +-#define IXGBE_RXLPBKGBCH 0x02F70 +-#define IXGBE_RXDLPBKGPC 0x02F74 +-#define IXGBE_RXDLPBKGBCL 0x02F78 +-#define IXGBE_RXDLPBKGBCH 0x02F7C +-#define IXGBE_TXDGPC 0x087A0 +-#define IXGBE_TXDGBCL 0x087A4 +-#define IXGBE_TXDGBCH 0x087A8 +- +-#define IXGBE_RXDSTATCTRL 0x02F40 ++#define IXGBE_RXNFGPC 0x041B0 ++#define IXGBE_RXNFGBCL 0x041B4 ++#define IXGBE_RXNFGBCH 0x041B8 ++#define IXGBE_RXDGPC 0x02F50 ++#define IXGBE_RXDGBCL 0x02F54 ++#define IXGBE_RXDGBCH 0x02F58 ++#define IXGBE_RXDDGPC 0x02F5C ++#define IXGBE_RXDDGBCL 0x02F60 ++#define IXGBE_RXDDGBCH 0x02F64 ++#define IXGBE_RXLPBKGPC 0x02F68 ++#define IXGBE_RXLPBKGBCL 0x02F6C ++#define IXGBE_RXLPBKGBCH 0x02F70 ++#define IXGBE_RXDLPBKGPC 0x02F74 ++#define IXGBE_RXDLPBKGBCL 0x02F78 ++#define IXGBE_RXDLPBKGBCH 0x02F7C ++#define IXGBE_TXDGPC 0x087A0 ++#define IXGBE_TXDGBCL 0x087A4 ++#define IXGBE_TXDGBCH 0x087A8 ++ ++#define IXGBE_RXDSTATCTRL 0x02F40 + + /* Copper Pond 2 link timeout */ + #define IXGBE_VALIDATE_LINK_READY_TIMEOUT 50 + + /* Omer CORECTL */ +-#define IXGBE_CORECTL 0x014F00 ++#define IXGBE_CORECTL 0x014F00 + /* BARCTRL */ +-#define IXGBE_BARCTRL 0x110F4 +-#define IXGBE_BARCTRL_FLSIZE 0x0700 +-#define IXGBE_BARCTRL_FLSIZE_SHIFT 8 +-#define IXGBE_BARCTRL_CSRSIZE 0x2000 ++#define IXGBE_BARCTRL 0x110F4 ++#define IXGBE_BARCTRL_FLSIZE 0x0700 ++#define IXGBE_BARCTRL_FLSIZE_SHIFT 8 ++#define IXGBE_BARCTRL_CSRSIZE 0x2000 + + /* RSCCTL Bit Masks */ +-#define IXGBE_RSCCTL_RSCEN 0x01 +-#define IXGBE_RSCCTL_MAXDESC_1 0x00 +-#define IXGBE_RSCCTL_MAXDESC_4 0x04 +-#define IXGBE_RSCCTL_MAXDESC_8 0x08 +-#define IXGBE_RSCCTL_MAXDESC_16 0x0C ++#define IXGBE_RSCCTL_RSCEN 0x01 ++#define IXGBE_RSCCTL_MAXDESC_1 0x00 ++#define IXGBE_RSCCTL_MAXDESC_4 0x04 ++#define IXGBE_RSCCTL_MAXDESC_8 0x08 ++#define IXGBE_RSCCTL_MAXDESC_16 0x0C ++#define IXGBE_RSCCTL_TS_DIS 0x02 + + /* RSCDBU Bit Masks */ +-#define IXGBE_RSCDBU_RSCSMALDIS_MASK 0x0000007F +-#define IXGBE_RSCDBU_RSCACKDIS 0x00000080 ++#define IXGBE_RSCDBU_RSCSMALDIS_MASK 0x0000007F ++#define IXGBE_RSCDBU_RSCACKDIS 0x00000080 + + /* RDRXCTL Bit Masks */ +-#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 /* Rx Desc Min Threshold Size */ +-#define IXGBE_RDRXCTL_CRCSTRIP 0x00000002 /* CRC Strip */ +-#define IXGBE_RDRXCTL_MVMEN 0x00000020 +-#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */ +-#define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */ +-#define IXGBE_RDRXCTL_RSCFRSTSIZE 0x003E0000 /* RSC First packet size */ +-#define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disable RSC compl on LLI */ +-#define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC enabled */ +-#define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC enabled */ ++#define IXGBE_RDRXCTL_RDMTS_1_2 0x00000000 /* Rx Desc Min THLD Size */ ++#define IXGBE_RDRXCTL_CRCSTRIP 0x00000002 /* CRC Strip */ ++#define IXGBE_RDRXCTL_PSP 0x00000004 /* Pad Small Packet */ ++#define IXGBE_RDRXCTL_MVMEN 0x00000020 ++#define IXGBE_RDRXCTL_RSC_PUSH_DIS 0x00000020 ++#define IXGBE_RDRXCTL_DMAIDONE 0x00000008 /* DMA init cycle done */ ++#define IXGBE_RDRXCTL_RSC_PUSH 0x00000080 ++#define IXGBE_RDRXCTL_AGGDIS 0x00010000 /* Aggregation disable */ ++#define IXGBE_RDRXCTL_RSCFRSTSIZE 0x003E0000 /* RSC First packet size */ ++#define IXGBE_RDRXCTL_RSCLLIDIS 0x00800000 /* Disable RSC compl on LLI*/ ++#define IXGBE_RDRXCTL_RSCACKC 0x02000000 /* must set 1 when RSC ena */ ++#define IXGBE_RDRXCTL_FCOE_WRFIX 0x04000000 /* must set 1 when RSC ena */ ++#define IXGBE_RDRXCTL_MBINTEN 0x10000000 ++#define IXGBE_RDRXCTL_MDP_EN 0x20000000 + + /* RQTC Bit Masks and Shifts */ +-#define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4) +-#define IXGBE_RQTC_TC0_MASK (0x7 << 0) +-#define IXGBE_RQTC_TC1_MASK (0x7 << 4) +-#define IXGBE_RQTC_TC2_MASK (0x7 << 8) +-#define IXGBE_RQTC_TC3_MASK (0x7 << 12) +-#define IXGBE_RQTC_TC4_MASK (0x7 << 16) +-#define IXGBE_RQTC_TC5_MASK (0x7 << 20) +-#define IXGBE_RQTC_TC6_MASK (0x7 << 24) +-#define IXGBE_RQTC_TC7_MASK (0x7 << 28) ++#define IXGBE_RQTC_SHIFT_TC(_i) ((_i) * 4) ++#define IXGBE_RQTC_TC0_MASK (0x7 << 0) ++#define IXGBE_RQTC_TC1_MASK (0x7 << 4) ++#define IXGBE_RQTC_TC2_MASK (0x7 << 8) ++#define IXGBE_RQTC_TC3_MASK (0x7 << 12) ++#define IXGBE_RQTC_TC4_MASK (0x7 << 16) ++#define IXGBE_RQTC_TC5_MASK (0x7 << 20) ++#define IXGBE_RQTC_TC6_MASK (0x7 << 24) ++#define IXGBE_RQTC_TC7_MASK (0x7 << 28) + + /* PSRTYPE.RQPL Bit masks and shift */ +-#define IXGBE_PSRTYPE_RQPL_MASK 0x7 +-#define IXGBE_PSRTYPE_RQPL_SHIFT 29 ++#define IXGBE_PSRTYPE_RQPL_MASK 0x7 ++#define IXGBE_PSRTYPE_RQPL_SHIFT 29 + + /* CTRL Bit Masks */ +-#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */ +-#define IXGBE_CTRL_LNK_RST 0x00000008 /* Link Reset. Resets everything. */ +-#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */ +-#define IXGBE_CTRL_RST_MASK (IXGBE_CTRL_LNK_RST | IXGBE_CTRL_RST) ++#define IXGBE_CTRL_GIO_DIS 0x00000004 /* Global IO Master Disable bit */ ++#define IXGBE_CTRL_LNK_RST 0x00000008 /* Link Reset. Resets everything. */ ++#define IXGBE_CTRL_RST 0x04000000 /* Reset (SW) */ ++#define IXGBE_CTRL_RST_MASK (IXGBE_CTRL_LNK_RST | IXGBE_CTRL_RST) + + /* FACTPS */ +-#define IXGBE_FACTPS_MNGCG 0x20000000 /* Manageblility Clock Gated */ +-#define IXGBE_FACTPS_LFS 0x40000000 /* LAN Function Select */ ++#define IXGBE_FACTPS_MNGCG 0x20000000 /* Manageblility Clock Gated */ ++#define IXGBE_FACTPS_LFS 0x40000000 /* LAN Function Select */ + + /* MHADD Bit Masks */ +-#define IXGBE_MHADD_MFS_MASK 0xFFFF0000 +-#define IXGBE_MHADD_MFS_SHIFT 16 ++#define IXGBE_MHADD_MFS_MASK 0xFFFF0000 ++#define IXGBE_MHADD_MFS_SHIFT 16 + + /* Extended Device Control */ +-#define IXGBE_CTRL_EXT_PFRSTD 0x00004000 /* Physical Function Reset Done */ +-#define IXGBE_CTRL_EXT_NS_DIS 0x00010000 /* No Snoop disable */ +-#define IXGBE_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ +-#define IXGBE_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ ++#define IXGBE_CTRL_EXT_PFRSTD 0x00004000 /* Physical Function Reset Done */ ++#define IXGBE_CTRL_EXT_NS_DIS 0x00010000 /* No Snoop disable */ ++#define IXGBE_CTRL_EXT_RO_DIS 0x00020000 /* Relaxed Ordering disable */ ++#define IXGBE_CTRL_EXT_DRV_LOAD 0x10000000 /* Driver loaded bit for FW */ + + /* Direct Cache Access (DCA) definitions */ +-#define IXGBE_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */ +-#define IXGBE_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */ +- +-#define IXGBE_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */ +-#define IXGBE_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ +- +-#define IXGBE_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ +-#define IXGBE_DCA_RXCTRL_CPUID_MASK_82599 0xFF000000 /* Rx CPUID Mask */ +-#define IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599 24 /* Rx CPUID Shift */ +-#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */ +-#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */ +-#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */ +-#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */ +-#define IXGBE_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */ +-#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */ +- +-#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ +-#define IXGBE_DCA_TXCTRL_CPUID_MASK_82599 0xFF000000 /* Tx CPUID Mask */ +-#define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599 24 /* Tx CPUID Shift */ +-#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ +-#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */ +-#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */ +-#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */ +-#define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */ ++#define IXGBE_DCA_CTRL_DCA_ENABLE 0x00000000 /* DCA Enable */ ++#define IXGBE_DCA_CTRL_DCA_DISABLE 0x00000001 /* DCA Disable */ ++ ++#define IXGBE_DCA_CTRL_DCA_MODE_CB1 0x00 /* DCA Mode CB1 */ ++#define IXGBE_DCA_CTRL_DCA_MODE_CB2 0x02 /* DCA Mode CB2 */ ++ ++#define IXGBE_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */ ++#define IXGBE_DCA_RXCTRL_CPUID_MASK_82599 0xFF000000 /* Rx CPUID Mask */ ++#define IXGBE_DCA_RXCTRL_CPUID_SHIFT_82599 24 /* Rx CPUID Shift */ ++#define IXGBE_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* Rx Desc enable */ ++#define IXGBE_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* Rx Desc header ena */ ++#define IXGBE_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* Rx Desc payload ena */ ++#define IXGBE_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* Rx rd Desc Relax Order */ ++#define IXGBE_DCA_RXCTRL_DATA_WRO_EN (1 << 13) /* Rx wr data Relax Order */ ++#define IXGBE_DCA_RXCTRL_HEAD_WRO_EN (1 << 15) /* Rx wr header RO */ ++ ++#define IXGBE_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */ ++#define IXGBE_DCA_TXCTRL_CPUID_MASK_82599 0xFF000000 /* Tx CPUID Mask */ ++#define IXGBE_DCA_TXCTRL_CPUID_SHIFT_82599 24 /* Tx CPUID Shift */ ++#define IXGBE_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */ ++#define IXGBE_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */ ++#define IXGBE_DCA_TXCTRL_DESC_WRO_EN (1 << 11) /* Tx Desc writeback RO bit */ ++#define IXGBE_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */ ++#define IXGBE_DCA_MAX_QUEUES_82598 16 /* DCA regs only on 16 queues */ + + /* MSCA Bit Masks */ +-#define IXGBE_MSCA_NP_ADDR_MASK 0x0000FFFF /* MDI Address (new protocol) */ +-#define IXGBE_MSCA_NP_ADDR_SHIFT 0 +-#define IXGBE_MSCA_DEV_TYPE_MASK 0x001F0000 /* Device Type (new protocol) */ +-#define IXGBE_MSCA_DEV_TYPE_SHIFT 16 /* Register Address (old protocol */ +-#define IXGBE_MSCA_PHY_ADDR_MASK 0x03E00000 /* PHY Address mask */ +-#define IXGBE_MSCA_PHY_ADDR_SHIFT 21 /* PHY Address shift*/ +-#define IXGBE_MSCA_OP_CODE_MASK 0x0C000000 /* OP CODE mask */ +-#define IXGBE_MSCA_OP_CODE_SHIFT 26 /* OP CODE shift */ +-#define IXGBE_MSCA_ADDR_CYCLE 0x00000000 /* OP CODE 00 (addr cycle) */ +-#define IXGBE_MSCA_WRITE 0x04000000 /* OP CODE 01 (write) */ +-#define IXGBE_MSCA_READ 0x0C000000 /* OP CODE 11 (read) */ +-#define IXGBE_MSCA_READ_AUTOINC 0x08000000 /* OP CODE 10 (read, auto inc)*/ +-#define IXGBE_MSCA_ST_CODE_MASK 0x30000000 /* ST Code mask */ +-#define IXGBE_MSCA_ST_CODE_SHIFT 28 /* ST Code shift */ +-#define IXGBE_MSCA_NEW_PROTOCOL 0x00000000 /* ST CODE 00 (new protocol) */ +-#define IXGBE_MSCA_OLD_PROTOCOL 0x10000000 /* ST CODE 01 (old protocol) */ +-#define IXGBE_MSCA_MDI_COMMAND 0x40000000 /* Initiate MDI command */ +-#define IXGBE_MSCA_MDI_IN_PROG_EN 0x80000000 /* MDI in progress enable */ ++#define IXGBE_MSCA_NP_ADDR_MASK 0x0000FFFF /* MDI Addr (new prot) */ ++#define IXGBE_MSCA_NP_ADDR_SHIFT 0 ++#define IXGBE_MSCA_DEV_TYPE_MASK 0x001F0000 /* Dev Type (new prot) */ ++#define IXGBE_MSCA_DEV_TYPE_SHIFT 16 /* Register Address (old prot */ ++#define IXGBE_MSCA_PHY_ADDR_MASK 0x03E00000 /* PHY Address mask */ ++#define IXGBE_MSCA_PHY_ADDR_SHIFT 21 /* PHY Address shift*/ ++#define IXGBE_MSCA_OP_CODE_MASK 0x0C000000 /* OP CODE mask */ ++#define IXGBE_MSCA_OP_CODE_SHIFT 26 /* OP CODE shift */ ++#define IXGBE_MSCA_ADDR_CYCLE 0x00000000 /* OP CODE 00 (addr cycle) */ ++#define IXGBE_MSCA_WRITE 0x04000000 /* OP CODE 01 (wr) */ ++#define IXGBE_MSCA_READ 0x0C000000 /* OP CODE 11 (rd) */ ++#define IXGBE_MSCA_READ_AUTOINC 0x08000000 /* OP CODE 10 (rd auto inc)*/ ++#define IXGBE_MSCA_ST_CODE_MASK 0x30000000 /* ST Code mask */ ++#define IXGBE_MSCA_ST_CODE_SHIFT 28 /* ST Code shift */ ++#define IXGBE_MSCA_NEW_PROTOCOL 0x00000000 /* ST CODE 00 (new prot) */ ++#define IXGBE_MSCA_OLD_PROTOCOL 0x10000000 /* ST CODE 01 (old prot) */ ++#define IXGBE_MSCA_MDI_COMMAND 0x40000000 /* Initiate MDI command */ ++#define IXGBE_MSCA_MDI_IN_PROG_EN 0x80000000 /* MDI in progress ena */ + + /* MSRWD bit masks */ +-#define IXGBE_MSRWD_WRITE_DATA_MASK 0x0000FFFF +-#define IXGBE_MSRWD_WRITE_DATA_SHIFT 0 +-#define IXGBE_MSRWD_READ_DATA_MASK 0xFFFF0000 +-#define IXGBE_MSRWD_READ_DATA_SHIFT 16 ++#define IXGBE_MSRWD_WRITE_DATA_MASK 0x0000FFFF ++#define IXGBE_MSRWD_WRITE_DATA_SHIFT 0 ++#define IXGBE_MSRWD_READ_DATA_MASK 0xFFFF0000 ++#define IXGBE_MSRWD_READ_DATA_SHIFT 16 + + /* Atlas registers */ +-#define IXGBE_ATLAS_PDN_LPBK 0x24 +-#define IXGBE_ATLAS_PDN_10G 0xB +-#define IXGBE_ATLAS_PDN_1G 0xC +-#define IXGBE_ATLAS_PDN_AN 0xD ++#define IXGBE_ATLAS_PDN_LPBK 0x24 ++#define IXGBE_ATLAS_PDN_10G 0xB ++#define IXGBE_ATLAS_PDN_1G 0xC ++#define IXGBE_ATLAS_PDN_AN 0xD + + /* Atlas bit masks */ +-#define IXGBE_ATLASCTL_WRITE_CMD 0x00010000 +-#define IXGBE_ATLAS_PDN_TX_REG_EN 0x10 +-#define IXGBE_ATLAS_PDN_TX_10G_QL_ALL 0xF0 +-#define IXGBE_ATLAS_PDN_TX_1G_QL_ALL 0xF0 +-#define IXGBE_ATLAS_PDN_TX_AN_QL_ALL 0xF0 ++#define IXGBE_ATLASCTL_WRITE_CMD 0x00010000 ++#define IXGBE_ATLAS_PDN_TX_REG_EN 0x10 ++#define IXGBE_ATLAS_PDN_TX_10G_QL_ALL 0xF0 ++#define IXGBE_ATLAS_PDN_TX_1G_QL_ALL 0xF0 ++#define IXGBE_ATLAS_PDN_TX_AN_QL_ALL 0xF0 + + /* Omer bit masks */ +-#define IXGBE_CORECTL_WRITE_CMD 0x00010000 +- +-/* MDIO definitions */ +- +-#define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */ +- +-#define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Control Reg */ +-#define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */ +-#define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */ +-#define IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS 0x0010 /* 0 - 10G, 1 - 1G */ +-#define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED 0x0018 +-#define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED 0x0010 +- +-#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */ +-#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */ +-#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */ ++#define IXGBE_CORECTL_WRITE_CMD 0x00010000 ++ ++/* Device Type definitions for new protocol MDIO commands */ ++#define IXGBE_MDIO_ZERO_DEV_TYPE 0x0 ++#define IXGBE_MDIO_PMA_PMD_DEV_TYPE 0x1 ++#define IXGBE_MDIO_PCS_DEV_TYPE 0x3 ++#define IXGBE_MDIO_PHY_XS_DEV_TYPE 0x4 ++#define IXGBE_MDIO_AUTO_NEG_DEV_TYPE 0x7 ++#define IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE 0x1E /* Device 30 */ ++#define IXGBE_TWINAX_DEV 1 ++ ++#define IXGBE_MDIO_COMMAND_TIMEOUT 100 /* PHY Timeout for 1 GB mode */ ++ ++#define IXGBE_MDIO_VENDOR_SPECIFIC_1_CONTROL 0x0 /* VS1 Ctrl Reg */ ++#define IXGBE_MDIO_VENDOR_SPECIFIC_1_STATUS 0x1 /* VS1 Status Reg */ ++#define IXGBE_MDIO_VENDOR_SPECIFIC_1_LINK_STATUS 0x0008 /* 1 = Link Up */ ++#define IXGBE_MDIO_VENDOR_SPECIFIC_1_SPEED_STATUS 0x0010 /* 0-10G, 1-1G */ ++#define IXGBE_MDIO_VENDOR_SPECIFIC_1_10G_SPEED 0x0018 ++#define IXGBE_MDIO_VENDOR_SPECIFIC_1_1G_SPEED 0x0010 ++ ++#define IXGBE_MDIO_AUTO_NEG_CONTROL 0x0 /* AUTO_NEG Control Reg */ ++#define IXGBE_MDIO_AUTO_NEG_STATUS 0x1 /* AUTO_NEG Status Reg */ ++#define IXGBE_MDIO_AUTO_NEG_VENDOR_STAT 0xC800 /* AUTO_NEG Vendor Status Reg */ ++#define IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM 0xCC00 /* AUTO_NEG Vendor TX Reg */ ++#define IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2 0xCC01 /* AUTO_NEG Vendor Tx Reg */ ++#define IXGBE_MDIO_AUTO_NEG_VEN_LSC 0x1 /* AUTO_NEG Vendor Tx LSC */ ++#define IXGBE_MDIO_AUTO_NEG_ADVT 0x10 /* AUTO_NEG Advt Reg */ ++#define IXGBE_MDIO_AUTO_NEG_LP 0x13 /* AUTO_NEG LP Status Reg */ ++#define IXGBE_MDIO_AUTO_NEG_EEE_ADVT 0x3C /* AUTO_NEG EEE Advt Reg */ ++#define IXGBE_AUTO_NEG_10GBASE_EEE_ADVT 0x8 /* AUTO NEG EEE 10GBaseT Advt */ ++#define IXGBE_AUTO_NEG_1000BASE_EEE_ADVT 0x4 /* AUTO NEG EEE 1000BaseT Advt */ ++#define IXGBE_AUTO_NEG_100BASE_EEE_ADVT 0x2 /* AUTO NEG EEE 100BaseT Advt */ ++#define IXGBE_MDIO_PHY_XS_CONTROL 0x0 /* PHY_XS Control Reg */ ++#define IXGBE_MDIO_PHY_XS_RESET 0x8000 /* PHY_XS Reset */ ++#define IXGBE_MDIO_PHY_ID_HIGH 0x2 /* PHY ID High Reg*/ ++#define IXGBE_MDIO_PHY_ID_LOW 0x3 /* PHY ID Low Reg*/ ++#define IXGBE_MDIO_PHY_SPEED_ABILITY 0x4 /* Speed Ability Reg */ ++#define IXGBE_MDIO_PHY_SPEED_10G 0x0001 /* 10G capable */ ++#define IXGBE_MDIO_PHY_SPEED_1G 0x0010 /* 1G capable */ ++#define IXGBE_MDIO_PHY_SPEED_100M 0x0020 /* 100M capable */ ++#define IXGBE_MDIO_PHY_EXT_ABILITY 0xB /* Ext Ability Reg */ ++#define IXGBE_MDIO_PHY_10GBASET_ABILITY 0x0004 /* 10GBaseT capable */ ++#define IXGBE_MDIO_PHY_1000BASET_ABILITY 0x0020 /* 1000BaseT capable */ ++#define IXGBE_MDIO_PHY_100BASETX_ABILITY 0x0080 /* 100BaseTX capable */ ++#define IXGBE_MDIO_PHY_SET_LOW_POWER_MODE 0x0800 /* Set low power mode */ ++#define IXGBE_AUTO_NEG_LP_STATUS 0xE820 /* AUTO NEG Rx LP Status Reg */ ++#define IXGBE_AUTO_NEG_LP_1000BASE_CAP 0x8000 /* AUTO NEG Rx LP 1000BaseT Cap */ ++#define IXGBE_AUTO_NEG_LP_10GBASE_CAP 0x0800 /* AUTO NEG Rx LP 10GBaseT Cap */ ++#define IXGBE_AUTO_NEG_10GBASET_STAT 0x0021 /* AUTO NEG 10G BaseT Stat */ ++ ++#define IXGBE_MDIO_TX_VENDOR_ALARMS_3 0xCC02 /* Vendor Alarms 3 Reg */ ++#define IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK 0x3 /* PHY Reset Complete Mask */ ++#define IXGBE_MDIO_GLOBAL_RES_PR_10 0xC479 /* Global Resv Provisioning 10 Reg */ ++#define IXGBE_MDIO_POWER_UP_STALL 0x8000 /* Power Up Stall */ ++#define IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK 0xFF00 /* int std mask */ ++#define IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG 0xFC00 /* chip std int flag */ ++#define IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK 0xFF01 /* int chip-wide mask */ ++#define IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG 0xFC01 /* int chip-wide mask */ ++#define IXGBE_MDIO_GLOBAL_ALARM_1 0xCC00 /* Global alarm 1 */ ++#define IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT 0x0010 /* device fault */ ++#define IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL 0x4000 /* high temp failure */ ++#define IXGBE_MDIO_GLOBAL_FAULT_MSG 0xC850 /* Global Fault Message */ ++#define IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP 0x8007 /* high temp failure */ ++#define IXGBE_MDIO_GLOBAL_INT_MASK 0xD400 /* Global int mask */ ++#define IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN 0x1000 /* autoneg vendor alarm int enable */ ++#define IXGBE_MDIO_GLOBAL_ALARM_1_INT 0x4 /* int in Global alarm 1 */ ++#define IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN 0x1 /* vendor alarm int enable */ ++#define IXGBE_MDIO_GLOBAL_STD_ALM2_INT 0x200 /* vendor alarm2 int mask */ ++#define IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN 0x4000 /* int high temp enable */ ++#define IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN 0x0010 /* int dev fault enable */ ++#define IXGBE_MDIO_PMA_PMD_CONTROL_ADDR 0x0000 /* PMA/PMD Control Reg */ ++#define IXGBE_MDIO_PMA_PMD_SDA_SCL_ADDR 0xC30A /* PHY_XS SDA/SCL Addr Reg */ ++#define IXGBE_MDIO_PMA_PMD_SDA_SCL_DATA 0xC30B /* PHY_XS SDA/SCL Data Reg */ ++#define IXGBE_MDIO_PMA_PMD_SDA_SCL_STAT 0xC30C /* PHY_XS SDA/SCL Status Reg */ ++#define IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK 0xD401 /* PHY TX Vendor LASI */ ++#define IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN 0x1 /* PHY TX Vendor LASI enable */ ++#define IXGBE_MDIO_PMD_STD_TX_DISABLE_CNTR 0x9 /* Standard Transmit Dis Reg */ ++#define IXGBE_MDIO_PMD_GLOBAL_TX_DISABLE 0x0001 /* PMD Global Transmit Dis */ ++ ++#define IXGBE_PCRC8ECL 0x0E810 /* PCR CRC-8 Error Count Lo */ ++#define IXGBE_PCRC8ECH 0x0E811 /* PCR CRC-8 Error Count Hi */ ++#define IXGBE_PCRC8ECH_MASK 0x1F ++#define IXGBE_LDPCECL 0x0E820 /* PCR Uncorrected Error Count Lo */ ++#define IXGBE_LDPCECH 0x0E821 /* PCR Uncorrected Error Count Hi */ + + /* MII clause 22/28 definitions */ ++#define IXGBE_MDIO_PHY_LOW_POWER_MODE 0x0800 ++ ++#define IXGBE_MDIO_XENPAK_LASI_STATUS 0x9005 /* XENPAK LASI Status register*/ ++#define IXGBE_XENPAK_LASI_LINK_STATUS_ALARM 0x1 /* Link Status Alarm change */ ++ ++#define IXGBE_MDIO_AUTO_NEG_LINK_STATUS 0x4 /* Indicates if link is up */ ++ ++#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK 0x7 /* Speed/Duplex Mask */ ++#define IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK 0x6 /* Speed Mask */ ++#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_HALF 0x0 /* 10Mb/s Half Duplex */ ++#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10M_FULL 0x1 /* 10Mb/s Full Duplex */ ++#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_HALF 0x2 /* 100Mb/s Half Duplex */ ++#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_100M_FULL 0x3 /* 100Mb/s Full Duplex */ ++#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_HALF 0x4 /* 1Gb/s Half Duplex */ ++#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL 0x5 /* 1Gb/s Full Duplex */ ++#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_HALF 0x6 /* 10Gb/s Half Duplex */ ++#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL 0x7 /* 10Gb/s Full Duplex */ ++#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB 0x4 /* 1Gb/s */ ++#define IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB 0x6 /* 10Gb/s */ ++ ++#define IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG 0x20 /* 10G Control Reg */ + #define IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG 0xC400 /* 1G Provisioning 1 */ +-#define IXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */ +-#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/ +-#define IXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/ +-#define IXGBE_MII_AUTONEG_REG 0x0 +- +-#define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0 +-#define IXGBE_MAX_PHY_ADDR 32 ++#define IXGBE_MII_AUTONEG_XNP_TX_REG 0x17 /* 1G XNP Transmit */ ++#define IXGBE_MII_AUTONEG_ADVERTISE_REG 0x10 /* 100M Advertisement */ ++#define IXGBE_MII_10GBASE_T_ADVERTISE 0x1000 /* full duplex, bit:12*/ ++#define IXGBE_MII_1GBASE_T_ADVERTISE_XNP_TX 0x4000 /* full duplex, bit:14*/ ++#define IXGBE_MII_1GBASE_T_ADVERTISE 0x8000 /* full duplex, bit:15*/ ++#define IXGBE_MII_2_5GBASE_T_ADVERTISE 0x0400 ++#define IXGBE_MII_5GBASE_T_ADVERTISE 0x0800 ++#define IXGBE_MII_100BASE_T_ADVERTISE 0x0100 /* full duplex, bit:8 */ ++#define IXGBE_MII_100BASE_T_ADVERTISE_HALF 0x0080 /* half duplex, bit:7 */ ++#define IXGBE_MII_RESTART 0x200 ++#define IXGBE_MII_AUTONEG_COMPLETE 0x20 ++#define IXGBE_MII_AUTONEG_LINK_UP 0x04 ++#define IXGBE_MII_AUTONEG_REG 0x0 ++ ++#define IXGBE_PHY_REVISION_MASK 0xFFFFFFF0 ++#define IXGBE_MAX_PHY_ADDR 32 + + /* PHY IDs*/ +-#define TN1010_PHY_ID 0x00A19410 +-#define TNX_FW_REV 0xB +-#define X540_PHY_ID 0x01540200 +-#define QT2022_PHY_ID 0x0043A400 +-#define ATH_PHY_ID 0x03429050 +-#define AQ_FW_REV 0x20 ++#define TN1010_PHY_ID 0x00A19410 ++#define TNX_FW_REV 0xB ++#define X540_PHY_ID 0x01540200 ++#define X550_PHY_ID2 0x01540223 ++#define X550_PHY_ID3 0x01540221 ++#define X557_PHY_ID 0x01540240 ++#define X557_PHY_ID2 0x01540250 ++#define AQ_FW_REV 0x20 ++#define QT2022_PHY_ID 0x0043A400 ++#define ATH_PHY_ID 0x03429050 + + /* PHY Types */ +-#define IXGBE_M88E1145_E_PHY_ID 0x01410CD0 ++#define IXGBE_M88E1500_E_PHY_ID 0x01410DD0 ++#define IXGBE_M88E1543_E_PHY_ID 0x01410EA0 + + /* Special PHY Init Routine */ +-#define IXGBE_PHY_INIT_OFFSET_NL 0x002B +-#define IXGBE_PHY_INIT_END_NL 0xFFFF +-#define IXGBE_CONTROL_MASK_NL 0xF000 +-#define IXGBE_DATA_MASK_NL 0x0FFF +-#define IXGBE_CONTROL_SHIFT_NL 12 +-#define IXGBE_DELAY_NL 0 +-#define IXGBE_DATA_NL 1 +-#define IXGBE_CONTROL_NL 0x000F +-#define IXGBE_CONTROL_EOL_NL 0x0FFF +-#define IXGBE_CONTROL_SOL_NL 0x0000 ++#define IXGBE_PHY_INIT_OFFSET_NL 0x002B ++#define IXGBE_PHY_INIT_END_NL 0xFFFF ++#define IXGBE_CONTROL_MASK_NL 0xF000 ++#define IXGBE_DATA_MASK_NL 0x0FFF ++#define IXGBE_CONTROL_SHIFT_NL 12 ++#define IXGBE_DELAY_NL 0 ++#define IXGBE_DATA_NL 1 ++#define IXGBE_CONTROL_NL 0x000F ++#define IXGBE_CONTROL_EOL_NL 0x0FFF ++#define IXGBE_CONTROL_SOL_NL 0x0000 + + /* General purpose Interrupt Enable */ +-#define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */ +-#define IXGBE_SDP1_GPIEN 0x00000002 /* SDP1 */ +-#define IXGBE_SDP2_GPIEN 0x00000004 /* SDP2 */ +-#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */ +-#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */ +-#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */ +-#define IXGBE_GPIE_EIAME 0x40000000 +-#define IXGBE_GPIE_PBA_SUPPORT 0x80000000 +-#define IXGBE_GPIE_RSC_DELAY_SHIFT 11 +-#define IXGBE_GPIE_VTMODE_MASK 0x0000C000 /* VT Mode Mask */ +-#define IXGBE_GPIE_VTMODE_16 0x00004000 /* 16 VFs 8 queues per VF */ +-#define IXGBE_GPIE_VTMODE_32 0x00008000 /* 32 VFs 4 queues per VF */ +-#define IXGBE_GPIE_VTMODE_64 0x0000C000 /* 64 VFs 2 queues per VF */ ++#define IXGBE_SDP0_GPIEN 0x00000001 /* SDP0 */ ++#define IXGBE_SDP1_GPIEN 0x00000002 /* SDP1 */ ++#define IXGBE_SDP2_GPIEN 0x00000004 /* SDP2 */ ++#define IXGBE_SDP0_GPIEN_X540 0x00000002 /* SDP0 on X540 and X550 */ ++#define IXGBE_SDP1_GPIEN_X540 0x00000004 /* SDP1 on X540 and X550 */ ++#define IXGBE_SDP2_GPIEN_X540 0x00000008 /* SDP2 on X540 and X550 */ ++#define IXGBE_SDP0_GPIEN_X550 IXGBE_SDP0_GPIEN_X540 ++#define IXGBE_SDP1_GPIEN_X550 IXGBE_SDP1_GPIEN_X540 ++#define IXGBE_SDP2_GPIEN_X550 IXGBE_SDP2_GPIEN_X540 ++#define IXGBE_SDP0_GPIEN_X550EM_x IXGBE_SDP0_GPIEN_X540 ++#define IXGBE_SDP1_GPIEN_X550EM_x IXGBE_SDP1_GPIEN_X540 ++#define IXGBE_SDP2_GPIEN_X550EM_x IXGBE_SDP2_GPIEN_X540 ++#define IXGBE_SDP0_GPIEN_X550EM_a IXGBE_SDP0_GPIEN_X540 ++#define IXGBE_SDP1_GPIEN_X550EM_a IXGBE_SDP1_GPIEN_X540 ++#define IXGBE_SDP2_GPIEN_X550EM_a IXGBE_SDP2_GPIEN_X540 ++#define IXGBE_SDP0_GPIEN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SDP0_GPIEN) ++#define IXGBE_SDP1_GPIEN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SDP1_GPIEN) ++#define IXGBE_SDP2_GPIEN_BY_MAC(_hw) IXGBE_BY_MAC((_hw), SDP2_GPIEN) ++ ++#define IXGBE_GPIE_MSIX_MODE 0x00000010 /* MSI-X mode */ ++#define IXGBE_GPIE_OCD 0x00000020 /* Other Clear Disable */ ++#define IXGBE_GPIE_EIMEN 0x00000040 /* Immediate Interrupt Enable */ ++#define IXGBE_GPIE_EIAME 0x40000000 ++#define IXGBE_GPIE_PBA_SUPPORT 0x80000000 ++#define IXGBE_GPIE_RSC_DELAY_SHIFT 11 ++#define IXGBE_GPIE_VTMODE_MASK 0x0000C000 /* VT Mode Mask */ ++#define IXGBE_GPIE_VTMODE_16 0x00004000 /* 16 VFs 8 queues per VF */ ++#define IXGBE_GPIE_VTMODE_32 0x00008000 /* 32 VFs 4 queues per VF */ ++#define IXGBE_GPIE_VTMODE_64 0x0000C000 /* 64 VFs 2 queues per VF */ + + /* Packet Buffer Initialization */ +-#define IXGBE_TXPBSIZE_20KB 0x00005000 /* 20KB Packet Buffer */ +-#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */ +-#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */ +-#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */ +-#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */ +-#define IXGBE_RXPBSIZE_128KB 0x00020000 /* 128KB Packet Buffer */ +-#define IXGBE_RXPBSIZE_MAX 0x00080000 /* 512KB Packet Buffer*/ +-#define IXGBE_TXPBSIZE_MAX 0x00028000 /* 160KB Packet Buffer*/ +- +-#define IXGBE_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */ ++#define IXGBE_MAX_PACKET_BUFFERS 8 ++ ++#define IXGBE_TXPBSIZE_20KB 0x00005000 /* 20KB Packet Buffer */ ++#define IXGBE_TXPBSIZE_40KB 0x0000A000 /* 40KB Packet Buffer */ ++#define IXGBE_RXPBSIZE_48KB 0x0000C000 /* 48KB Packet Buffer */ ++#define IXGBE_RXPBSIZE_64KB 0x00010000 /* 64KB Packet Buffer */ ++#define IXGBE_RXPBSIZE_80KB 0x00014000 /* 80KB Packet Buffer */ ++#define IXGBE_RXPBSIZE_128KB 0x00020000 /* 128KB Packet Buffer */ ++#define IXGBE_RXPBSIZE_MAX 0x00080000 /* 512KB Packet Buffer */ ++#define IXGBE_TXPBSIZE_MAX 0x00028000 /* 160KB Packet Buffer */ ++ ++#define IXGBE_TXPKT_SIZE_MAX 0xA /* Max Tx Packet size */ + #define IXGBE_MAX_PB 8 + + /* Packet buffer allocation strategies */ + enum { +- PBA_STRATEGY_EQUAL = 0, /* Distribute PB space equally */ ++ PBA_STRATEGY_EQUAL = 0, /* Distribute PB space equally */ + #define PBA_STRATEGY_EQUAL PBA_STRATEGY_EQUAL +- PBA_STRATEGY_WEIGHTED = 1, /* Weight front half of TCs */ ++ PBA_STRATEGY_WEIGHTED = 1, /* Weight front half of TCs */ + #define PBA_STRATEGY_WEIGHTED PBA_STRATEGY_WEIGHTED + }; + + /* Transmit Flow Control status */ +-#define IXGBE_TFCS_TXOFF 0x00000001 +-#define IXGBE_TFCS_TXOFF0 0x00000100 +-#define IXGBE_TFCS_TXOFF1 0x00000200 +-#define IXGBE_TFCS_TXOFF2 0x00000400 +-#define IXGBE_TFCS_TXOFF3 0x00000800 +-#define IXGBE_TFCS_TXOFF4 0x00001000 +-#define IXGBE_TFCS_TXOFF5 0x00002000 +-#define IXGBE_TFCS_TXOFF6 0x00004000 +-#define IXGBE_TFCS_TXOFF7 0x00008000 ++#define IXGBE_TFCS_TXOFF 0x00000001 ++#define IXGBE_TFCS_TXOFF0 0x00000100 ++#define IXGBE_TFCS_TXOFF1 0x00000200 ++#define IXGBE_TFCS_TXOFF2 0x00000400 ++#define IXGBE_TFCS_TXOFF3 0x00000800 ++#define IXGBE_TFCS_TXOFF4 0x00001000 ++#define IXGBE_TFCS_TXOFF5 0x00002000 ++#define IXGBE_TFCS_TXOFF6 0x00004000 ++#define IXGBE_TFCS_TXOFF7 0x00008000 + + /* TCP Timer */ +-#define IXGBE_TCPTIMER_KS 0x00000100 +-#define IXGBE_TCPTIMER_COUNT_ENABLE 0x00000200 +-#define IXGBE_TCPTIMER_COUNT_FINISH 0x00000400 +-#define IXGBE_TCPTIMER_LOOP 0x00000800 +-#define IXGBE_TCPTIMER_DURATION_MASK 0x000000FF ++#define IXGBE_TCPTIMER_KS 0x00000100 ++#define IXGBE_TCPTIMER_COUNT_ENABLE 0x00000200 ++#define IXGBE_TCPTIMER_COUNT_FINISH 0x00000400 ++#define IXGBE_TCPTIMER_LOOP 0x00000800 ++#define IXGBE_TCPTIMER_DURATION_MASK 0x000000FF + + /* HLREG0 Bit Masks */ +-#define IXGBE_HLREG0_TXCRCEN 0x00000001 /* bit 0 */ +-#define IXGBE_HLREG0_RXCRCSTRP 0x00000002 /* bit 1 */ +-#define IXGBE_HLREG0_JUMBOEN 0x00000004 /* bit 2 */ +-#define IXGBE_HLREG0_TXPADEN 0x00000400 /* bit 10 */ +-#define IXGBE_HLREG0_TXPAUSEEN 0x00001000 /* bit 12 */ +-#define IXGBE_HLREG0_RXPAUSEEN 0x00004000 /* bit 14 */ +-#define IXGBE_HLREG0_LPBK 0x00008000 /* bit 15 */ +-#define IXGBE_HLREG0_MDCSPD 0x00010000 /* bit 16 */ +-#define IXGBE_HLREG0_CONTMDC 0x00020000 /* bit 17 */ +-#define IXGBE_HLREG0_CTRLFLTR 0x00040000 /* bit 18 */ +-#define IXGBE_HLREG0_PREPEND 0x00F00000 /* bits 20-23 */ +-#define IXGBE_HLREG0_PRIPAUSEEN 0x01000000 /* bit 24 */ +-#define IXGBE_HLREG0_RXPAUSERECDA 0x06000000 /* bits 25-26 */ +-#define IXGBE_HLREG0_RXLNGTHERREN 0x08000000 /* bit 27 */ +-#define IXGBE_HLREG0_RXPADSTRIPEN 0x10000000 /* bit 28 */ ++#define IXGBE_HLREG0_TXCRCEN 0x00000001 /* bit 0 */ ++#define IXGBE_HLREG0_RXCRCSTRP 0x00000002 /* bit 1 */ ++#define IXGBE_HLREG0_JUMBOEN 0x00000004 /* bit 2 */ ++#define IXGBE_HLREG0_TXPADEN 0x00000400 /* bit 10 */ ++#define IXGBE_HLREG0_TXPAUSEEN 0x00001000 /* bit 12 */ ++#define IXGBE_HLREG0_RXPAUSEEN 0x00004000 /* bit 14 */ ++#define IXGBE_HLREG0_LPBK 0x00008000 /* bit 15 */ ++#define IXGBE_HLREG0_MDCSPD 0x00010000 /* bit 16 */ ++#define IXGBE_HLREG0_CONTMDC 0x00020000 /* bit 17 */ ++#define IXGBE_HLREG0_CTRLFLTR 0x00040000 /* bit 18 */ ++#define IXGBE_HLREG0_PREPEND 0x00F00000 /* bits 20-23 */ ++#define IXGBE_HLREG0_PRIPAUSEEN 0x01000000 /* bit 24 */ ++#define IXGBE_HLREG0_RXPAUSERECDA 0x06000000 /* bits 25-26 */ ++#define IXGBE_HLREG0_RXLNGTHERREN 0x08000000 /* bit 27 */ ++#define IXGBE_HLREG0_RXPADSTRIPEN 0x10000000 /* bit 28 */ + + /* VMD_CTL bitmasks */ +-#define IXGBE_VMD_CTL_VMDQ_EN 0x00000001 +-#define IXGBE_VMD_CTL_VMDQ_FILTER 0x00000002 ++#define IXGBE_VMD_CTL_VMDQ_EN 0x00000001 ++#define IXGBE_VMD_CTL_VMDQ_FILTER 0x00000002 + + /* VT_CTL bitmasks */ +-#define IXGBE_VT_CTL_DIS_DEFPL 0x20000000 /* disable default pool */ +-#define IXGBE_VT_CTL_REPLEN 0x40000000 /* replication enabled */ +-#define IXGBE_VT_CTL_VT_ENABLE 0x00000001 /* Enable VT Mode */ +-#define IXGBE_VT_CTL_POOL_SHIFT 7 +-#define IXGBE_VT_CTL_POOL_MASK (0x3F << IXGBE_VT_CTL_POOL_SHIFT) ++#define IXGBE_VT_CTL_DIS_DEFPL 0x20000000 /* disable default pool */ ++#define IXGBE_VT_CTL_REPLEN 0x40000000 /* replication enabled */ ++#define IXGBE_VT_CTL_VT_ENABLE 0x00000001 /* Enable VT Mode */ ++#define IXGBE_VT_CTL_POOL_SHIFT 7 ++#define IXGBE_VT_CTL_POOL_MASK (0x3F << IXGBE_VT_CTL_POOL_SHIFT) + + /* VMOLR bitmasks */ +-#define IXGBE_VMOLR_AUPE 0x01000000 /* accept untagged packets */ +-#define IXGBE_VMOLR_ROMPE 0x02000000 /* accept packets in MTA tbl */ +-#define IXGBE_VMOLR_ROPE 0x04000000 /* accept packets in UC tbl */ +-#define IXGBE_VMOLR_BAM 0x08000000 /* accept broadcast packets */ +-#define IXGBE_VMOLR_MPE 0x10000000 /* multicast promiscuous */ ++#define IXGBE_VMOLR_UPE 0x00400000 /* unicast promiscuous */ ++#define IXGBE_VMOLR_VPE 0x00800000 /* VLAN promiscuous */ ++#define IXGBE_VMOLR_AUPE 0x01000000 /* accept untagged packets */ ++#define IXGBE_VMOLR_ROMPE 0x02000000 /* accept packets in MTA tbl */ ++#define IXGBE_VMOLR_ROPE 0x04000000 /* accept packets in UC tbl */ ++#define IXGBE_VMOLR_BAM 0x08000000 /* accept broadcast packets */ ++#define IXGBE_VMOLR_MPE 0x10000000 /* multicast promiscuous */ + + /* VFRE bitmask */ +-#define IXGBE_VFRE_ENABLE_ALL 0xFFFFFFFF ++#define IXGBE_VFRE_ENABLE_ALL 0xFFFFFFFF + +-#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ ++#define IXGBE_VF_INIT_TIMEOUT 200 /* Number of retries to clear RSTI */ + + /* RDHMPN and TDHMPN bitmasks */ +-#define IXGBE_RDHMPN_RDICADDR 0x007FF800 +-#define IXGBE_RDHMPN_RDICRDREQ 0x00800000 +-#define IXGBE_RDHMPN_RDICADDR_SHIFT 11 +-#define IXGBE_TDHMPN_TDICADDR 0x003FF800 +-#define IXGBE_TDHMPN_TDICRDREQ 0x00800000 +-#define IXGBE_TDHMPN_TDICADDR_SHIFT 11 +- +-#define IXGBE_RDMAM_MEM_SEL_SHIFT 13 +-#define IXGBE_RDMAM_DWORD_SHIFT 9 +-#define IXGBE_RDMAM_DESC_COMP_FIFO 1 +-#define IXGBE_RDMAM_DFC_CMD_FIFO 2 +-#define IXGBE_RDMAM_TCN_STATUS_RAM 4 +-#define IXGBE_RDMAM_WB_COLL_FIFO 5 +-#define IXGBE_RDMAM_QSC_CNT_RAM 6 +-#define IXGBE_RDMAM_QSC_QUEUE_CNT 8 +-#define IXGBE_RDMAM_QSC_QUEUE_RAM 0xA +-#define IXGBE_RDMAM_DESC_COM_FIFO_RANGE 135 +-#define IXGBE_RDMAM_DESC_COM_FIFO_COUNT 4 +-#define IXGBE_RDMAM_DFC_CMD_FIFO_RANGE 48 +-#define IXGBE_RDMAM_DFC_CMD_FIFO_COUNT 7 +-#define IXGBE_RDMAM_TCN_STATUS_RAM_RANGE 256 +-#define IXGBE_RDMAM_TCN_STATUS_RAM_COUNT 9 +-#define IXGBE_RDMAM_WB_COLL_FIFO_RANGE 8 +-#define IXGBE_RDMAM_WB_COLL_FIFO_COUNT 4 +-#define IXGBE_RDMAM_QSC_CNT_RAM_RANGE 64 +-#define IXGBE_RDMAM_QSC_CNT_RAM_COUNT 4 +-#define IXGBE_RDMAM_QSC_QUEUE_CNT_RANGE 32 +-#define IXGBE_RDMAM_QSC_QUEUE_CNT_COUNT 4 +-#define IXGBE_RDMAM_QSC_QUEUE_RAM_RANGE 128 +-#define IXGBE_RDMAM_QSC_QUEUE_RAM_COUNT 8 +- +-#define IXGBE_TXDESCIC_READY 0x80000000 ++#define IXGBE_RDHMPN_RDICADDR 0x007FF800 ++#define IXGBE_RDHMPN_RDICRDREQ 0x00800000 ++#define IXGBE_RDHMPN_RDICADDR_SHIFT 11 ++#define IXGBE_TDHMPN_TDICADDR 0x003FF800 ++#define IXGBE_TDHMPN_TDICRDREQ 0x00800000 ++#define IXGBE_TDHMPN_TDICADDR_SHIFT 11 ++ ++#define IXGBE_RDMAM_MEM_SEL_SHIFT 13 ++#define IXGBE_RDMAM_DWORD_SHIFT 9 ++#define IXGBE_RDMAM_DESC_COMP_FIFO 1 ++#define IXGBE_RDMAM_DFC_CMD_FIFO 2 ++#define IXGBE_RDMAM_RSC_HEADER_ADDR 3 ++#define IXGBE_RDMAM_TCN_STATUS_RAM 4 ++#define IXGBE_RDMAM_WB_COLL_FIFO 5 ++#define IXGBE_RDMAM_QSC_CNT_RAM 6 ++#define IXGBE_RDMAM_QSC_FCOE_RAM 7 ++#define IXGBE_RDMAM_QSC_QUEUE_CNT 8 ++#define IXGBE_RDMAM_QSC_QUEUE_RAM 0xA ++#define IXGBE_RDMAM_QSC_RSC_RAM 0xB ++#define IXGBE_RDMAM_DESC_COM_FIFO_RANGE 135 ++#define IXGBE_RDMAM_DESC_COM_FIFO_COUNT 4 ++#define IXGBE_RDMAM_DFC_CMD_FIFO_RANGE 48 ++#define IXGBE_RDMAM_DFC_CMD_FIFO_COUNT 7 ++#define IXGBE_RDMAM_RSC_HEADER_ADDR_RANGE 32 ++#define IXGBE_RDMAM_RSC_HEADER_ADDR_COUNT 4 ++#define IXGBE_RDMAM_TCN_STATUS_RAM_RANGE 256 ++#define IXGBE_RDMAM_TCN_STATUS_RAM_COUNT 9 ++#define IXGBE_RDMAM_WB_COLL_FIFO_RANGE 8 ++#define IXGBE_RDMAM_WB_COLL_FIFO_COUNT 4 ++#define IXGBE_RDMAM_QSC_CNT_RAM_RANGE 64 ++#define IXGBE_RDMAM_QSC_CNT_RAM_COUNT 4 ++#define IXGBE_RDMAM_QSC_FCOE_RAM_RANGE 512 ++#define IXGBE_RDMAM_QSC_FCOE_RAM_COUNT 5 ++#define IXGBE_RDMAM_QSC_QUEUE_CNT_RANGE 32 ++#define IXGBE_RDMAM_QSC_QUEUE_CNT_COUNT 4 ++#define IXGBE_RDMAM_QSC_QUEUE_RAM_RANGE 128 ++#define IXGBE_RDMAM_QSC_QUEUE_RAM_COUNT 8 ++#define IXGBE_RDMAM_QSC_RSC_RAM_RANGE 32 ++#define IXGBE_RDMAM_QSC_RSC_RAM_COUNT 8 ++ ++#define IXGBE_TXDESCIC_READY 0x80000000 + + /* Receive Checksum Control */ +-#define IXGBE_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ +-#define IXGBE_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ ++#define IXGBE_RXCSUM_IPPCSE 0x00001000 /* IP payload checksum enable */ ++#define IXGBE_RXCSUM_PCSD 0x00002000 /* packet checksum disabled */ + + /* FCRTL Bit Masks */ +-#define IXGBE_FCRTL_XONE 0x80000000 /* XON enable */ +-#define IXGBE_FCRTH_FCEN 0x80000000 /* Packet buffer fc enable */ ++#define IXGBE_FCRTL_XONE 0x80000000 /* XON enable */ ++#define IXGBE_FCRTH_FCEN 0x80000000 /* Packet buffer fc enable */ + + /* PAP bit masks*/ +-#define IXGBE_PAP_TXPAUSECNT_MASK 0x0000FFFF /* Pause counter mask */ ++#define IXGBE_PAP_TXPAUSECNT_MASK 0x0000FFFF /* Pause counter mask */ + + /* RMCS Bit Masks */ +-#define IXGBE_RMCS_RRM 0x00000002 /* Receive Recycle Mode enable */ ++#define IXGBE_RMCS_RRM 0x00000002 /* Rx Recycle Mode enable */ + /* Receive Arbitration Control: 0 Round Robin, 1 DFP */ +-#define IXGBE_RMCS_RAC 0x00000004 +-#define IXGBE_RMCS_DFP IXGBE_RMCS_RAC /* Deficit Fixed Priority ena */ +-#define IXGBE_RMCS_TFCE_802_3X 0x00000008 /* Tx Priority FC ena */ +-#define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority FC ena */ +-#define IXGBE_RMCS_ARBDIS 0x00000040 /* Arbitration disable bit */ ++#define IXGBE_RMCS_RAC 0x00000004 ++/* Deficit Fixed Prio ena */ ++#define IXGBE_RMCS_DFP IXGBE_RMCS_RAC ++#define IXGBE_RMCS_TFCE_802_3X 0x00000008 /* Tx Priority FC ena */ ++#define IXGBE_RMCS_TFCE_PRIORITY 0x00000010 /* Tx Priority FC ena */ ++#define IXGBE_RMCS_ARBDIS 0x00000040 /* Arbitration disable bit */ + + /* FCCFG Bit Masks */ +-#define IXGBE_FCCFG_TFCE_802_3X 0x00000008 /* Tx link FC enable */ +-#define IXGBE_FCCFG_TFCE_PRIORITY 0x00000010 /* Tx priority FC enable */ ++#define IXGBE_FCCFG_TFCE_802_3X 0x00000008 /* Tx link FC enable */ ++#define IXGBE_FCCFG_TFCE_PRIORITY 0x00000010 /* Tx priority FC enable */ + + /* Interrupt register bitmasks */ + + /* Extended Interrupt Cause Read */ +-#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */ +-#define IXGBE_EICR_FLOW_DIR 0x00010000 /* FDir Exception */ +-#define IXGBE_EICR_RX_MISS 0x00020000 /* Packet Buffer Overrun */ +-#define IXGBE_EICR_PCI 0x00040000 /* PCI Exception */ +-#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */ +-#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */ +-#define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */ +-#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */ +-#define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */ +-#define IXGBE_EICR_TIMESYNC 0x01000000 /* Timesync Event */ +-#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */ +-#define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */ +-#define IXGBE_EICR_GPI_SDP2 0x04000000 /* Gen Purpose Interrupt on SDP2 */ +-#define IXGBE_EICR_ECC 0x10000000 /* ECC Error */ +-#define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */ +-#define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */ +-#define IXGBE_EICR_TCP_TIMER 0x40000000 /* TCP Timer */ +-#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ ++#define IXGBE_EICR_RTX_QUEUE 0x0000FFFF /* RTx Queue Interrupt */ ++#define IXGBE_EICR_FLOW_DIR 0x00010000 /* FDir Exception */ ++#define IXGBE_EICR_RX_MISS 0x00020000 /* Packet Buffer Overrun */ ++#define IXGBE_EICR_PCI 0x00040000 /* PCI Exception */ ++#define IXGBE_EICR_MAILBOX 0x00080000 /* VF to PF Mailbox Interrupt */ ++#define IXGBE_EICR_LSC 0x00100000 /* Link Status Change */ ++#define IXGBE_EICR_LINKSEC 0x00200000 /* PN Threshold */ ++#define IXGBE_EICR_MNG 0x00400000 /* Manageability Event Interrupt */ ++#define IXGBE_EICR_TS 0x00800000 /* Thermal Sensor Event */ ++#define IXGBE_EICR_TIMESYNC 0x01000000 /* Timesync Event */ ++#define IXGBE_EICR_GPI_SDP0 0x01000000 /* Gen Purpose Interrupt on SDP0 */ ++#define IXGBE_EICR_GPI_SDP1 0x02000000 /* Gen Purpose Interrupt on SDP1 */ ++#define IXGBE_EICR_GPI_SDP2 0x04000000 /* Gen Purpose Interrupt on SDP2 */ ++#define IXGBE_EICR_ECC 0x10000000 /* ECC Error */ ++#define IXGBE_EICR_GPI_SDP0_X540 0x02000000 /* Gen Purpose Interrupt on SDP0 */ ++#define IXGBE_EICR_GPI_SDP1_X540 0x04000000 /* Gen Purpose Interrupt on SDP1 */ ++#define IXGBE_EICR_GPI_SDP2_X540 0x08000000 /* Gen Purpose Interrupt on SDP2 */ ++#define IXGBE_EICR_GPI_SDP0_X550 IXGBE_EICR_GPI_SDP0_X540 ++#define IXGBE_EICR_GPI_SDP1_X550 IXGBE_EICR_GPI_SDP1_X540 ++#define IXGBE_EICR_GPI_SDP2_X550 IXGBE_EICR_GPI_SDP2_X540 ++#define IXGBE_EICR_GPI_SDP0_X550EM_x IXGBE_EICR_GPI_SDP0_X540 ++#define IXGBE_EICR_GPI_SDP1_X550EM_x IXGBE_EICR_GPI_SDP1_X540 ++#define IXGBE_EICR_GPI_SDP2_X550EM_x IXGBE_EICR_GPI_SDP2_X540 ++#define IXGBE_EICR_GPI_SDP0_X550EM_a IXGBE_EICR_GPI_SDP0_X540 ++#define IXGBE_EICR_GPI_SDP1_X550EM_a IXGBE_EICR_GPI_SDP1_X540 ++#define IXGBE_EICR_GPI_SDP2_X550EM_a IXGBE_EICR_GPI_SDP2_X540 ++#define IXGBE_EICR_GPI_SDP0_BY_MAC(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP0) ++#define IXGBE_EICR_GPI_SDP1_BY_MAC(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP1) ++#define IXGBE_EICR_GPI_SDP2_BY_MAC(_hw) IXGBE_BY_MAC((_hw), EICR_GPI_SDP2) ++ ++#define IXGBE_EICR_PBUR 0x10000000 /* Packet Buffer Handler Error */ ++#define IXGBE_EICR_DHER 0x20000000 /* Descriptor Handler Error */ ++#define IXGBE_EICR_TCP_TIMER 0x40000000 /* TCP Timer */ ++#define IXGBE_EICR_OTHER 0x80000000 /* Interrupt Cause Active */ + + /* Extended Interrupt Cause Set */ +-#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ +-#define IXGBE_EICS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ +-#define IXGBE_EICS_RX_MISS IXGBE_EICR_RX_MISS /* Pkt Buffer Overrun */ +-#define IXGBE_EICS_PCI IXGBE_EICR_PCI /* PCI Exception */ +-#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ +-#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */ +-#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ +-#define IXGBE_EICS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ +-#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ +-#define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ +-#define IXGBE_EICS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ +-#define IXGBE_EICS_ECC IXGBE_EICR_ECC /* ECC Error */ +-#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ +-#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */ +-#define IXGBE_EICS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ +-#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ ++#define IXGBE_EICS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ ++#define IXGBE_EICS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ ++#define IXGBE_EICS_RX_MISS IXGBE_EICR_RX_MISS /* Pkt Buffer Overrun */ ++#define IXGBE_EICS_PCI IXGBE_EICR_PCI /* PCI Exception */ ++#define IXGBE_EICS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ ++#define IXGBE_EICS_LSC IXGBE_EICR_LSC /* Link Status Change */ ++#define IXGBE_EICS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ ++#define IXGBE_EICS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ ++#define IXGBE_EICS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ ++#define IXGBE_EICS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ ++#define IXGBE_EICS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ ++#define IXGBE_EICS_ECC IXGBE_EICR_ECC /* ECC Error */ ++#define IXGBE_EICS_GPI_SDP0_BY_MAC(_hw) IXGBE_EICR_GPI_SDP0_BY_MAC(_hw) ++#define IXGBE_EICS_GPI_SDP1_BY_MAC(_hw) IXGBE_EICR_GPI_SDP1_BY_MAC(_hw) ++#define IXGBE_EICS_GPI_SDP2_BY_MAC(_hw) IXGBE_EICR_GPI_SDP2_BY_MAC(_hw) ++#define IXGBE_EICS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ ++#define IXGBE_EICS_DHER IXGBE_EICR_DHER /* Desc Handler Error */ ++#define IXGBE_EICS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ ++#define IXGBE_EICS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ + + /* Extended Interrupt Mask Set */ +-#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ +-#define IXGBE_EIMS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ +-#define IXGBE_EIMS_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */ +-#define IXGBE_EIMS_PCI IXGBE_EICR_PCI /* PCI Exception */ +-#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ +-#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */ +-#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ +-#define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermel Sensor Event */ +-#define IXGBE_EIMS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ +-#define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ +-#define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ +-#define IXGBE_EIMS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ +-#define IXGBE_EIMS_ECC IXGBE_EICR_ECC /* ECC Error */ +-#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ +-#define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */ +-#define IXGBE_EIMS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ +-#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ ++#define IXGBE_EIMS_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ ++#define IXGBE_EIMS_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ ++#define IXGBE_EIMS_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */ ++#define IXGBE_EIMS_PCI IXGBE_EICR_PCI /* PCI Exception */ ++#define IXGBE_EIMS_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ ++#define IXGBE_EIMS_LSC IXGBE_EICR_LSC /* Link Status Change */ ++#define IXGBE_EIMS_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ ++#define IXGBE_EIMS_TS IXGBE_EICR_TS /* Thermal Sensor Event */ ++#define IXGBE_EIMS_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ ++#define IXGBE_EIMS_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ ++#define IXGBE_EIMS_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ ++#define IXGBE_EIMS_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ ++#define IXGBE_EIMS_ECC IXGBE_EICR_ECC /* ECC Error */ ++#define IXGBE_EIMS_GPI_SDP0_BY_MAC(_hw) IXGBE_EICR_GPI_SDP0_BY_MAC(_hw) ++#define IXGBE_EIMS_GPI_SDP1_BY_MAC(_hw) IXGBE_EICR_GPI_SDP1_BY_MAC(_hw) ++#define IXGBE_EIMS_GPI_SDP2_BY_MAC(_hw) IXGBE_EICR_GPI_SDP2_BY_MAC(_hw) ++#define IXGBE_EIMS_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ ++#define IXGBE_EIMS_DHER IXGBE_EICR_DHER /* Descr Handler Error */ ++#define IXGBE_EIMS_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ ++#define IXGBE_EIMS_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ + + /* Extended Interrupt Mask Clear */ +-#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ +-#define IXGBE_EIMC_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ +-#define IXGBE_EIMC_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */ +-#define IXGBE_EIMC_PCI IXGBE_EICR_PCI /* PCI Exception */ +-#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ +-#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */ +-#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ +-#define IXGBE_EIMC_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ +-#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ +-#define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ +-#define IXGBE_EIMC_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ +-#define IXGBE_EIMC_ECC IXGBE_EICR_ECC /* ECC Error */ +-#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ +-#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Err */ +-#define IXGBE_EIMC_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ +-#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ ++#define IXGBE_EIMC_RTX_QUEUE IXGBE_EICR_RTX_QUEUE /* RTx Queue Interrupt */ ++#define IXGBE_EIMC_FLOW_DIR IXGBE_EICR_FLOW_DIR /* FDir Exception */ ++#define IXGBE_EIMC_RX_MISS IXGBE_EICR_RX_MISS /* Packet Buffer Overrun */ ++#define IXGBE_EIMC_PCI IXGBE_EICR_PCI /* PCI Exception */ ++#define IXGBE_EIMC_MAILBOX IXGBE_EICR_MAILBOX /* VF to PF Mailbox Int */ ++#define IXGBE_EIMC_LSC IXGBE_EICR_LSC /* Link Status Change */ ++#define IXGBE_EIMC_MNG IXGBE_EICR_MNG /* MNG Event Interrupt */ ++#define IXGBE_EIMC_TIMESYNC IXGBE_EICR_TIMESYNC /* Timesync Event */ ++#define IXGBE_EIMC_GPI_SDP0 IXGBE_EICR_GPI_SDP0 /* SDP0 Gen Purpose Int */ ++#define IXGBE_EIMC_GPI_SDP1 IXGBE_EICR_GPI_SDP1 /* SDP1 Gen Purpose Int */ ++#define IXGBE_EIMC_GPI_SDP2 IXGBE_EICR_GPI_SDP2 /* SDP2 Gen Purpose Int */ ++#define IXGBE_EIMC_ECC IXGBE_EICR_ECC /* ECC Error */ ++#define IXGBE_EIMC_GPI_SDP0_BY_MAC(_hw) IXGBE_EICR_GPI_SDP0_BY_MAC(_hw) ++#define IXGBE_EIMC_GPI_SDP1_BY_MAC(_hw) IXGBE_EICR_GPI_SDP1_BY_MAC(_hw) ++#define IXGBE_EIMC_GPI_SDP2_BY_MAC(_hw) IXGBE_EICR_GPI_SDP2_BY_MAC(_hw) ++#define IXGBE_EIMC_PBUR IXGBE_EICR_PBUR /* Pkt Buf Handler Err */ ++#define IXGBE_EIMC_DHER IXGBE_EICR_DHER /* Desc Handler Err */ ++#define IXGBE_EIMC_TCP_TIMER IXGBE_EICR_TCP_TIMER /* TCP Timer */ ++#define IXGBE_EIMC_OTHER IXGBE_EICR_OTHER /* INT Cause Active */ + + #define IXGBE_EIMS_ENABLE_MASK ( \ +- IXGBE_EIMS_RTX_QUEUE | \ +- IXGBE_EIMS_LSC | \ +- IXGBE_EIMS_TCP_TIMER | \ ++ IXGBE_EIMS_RTX_QUEUE | \ ++ IXGBE_EIMS_LSC | \ ++ IXGBE_EIMS_TCP_TIMER | \ + IXGBE_EIMS_OTHER) + + /* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ +-#define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */ +-#define IXGBE_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */ +-#define IXGBE_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ +-#define IXGBE_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */ +-#define IXGBE_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */ +-#define IXGBE_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */ +-#define IXGBE_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */ +-#define IXGBE_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */ +-#define IXGBE_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */ +-#define IXGBE_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of control bits */ +-#define IXGBE_IMIR_SIZE_BP_82599 0x00001000 /* Packet size bypass */ +-#define IXGBE_IMIR_CTRL_URG_82599 0x00002000 /* Check URG bit in header */ +-#define IXGBE_IMIR_CTRL_ACK_82599 0x00004000 /* Check ACK bit in header */ +-#define IXGBE_IMIR_CTRL_PSH_82599 0x00008000 /* Check PSH bit in header */ +-#define IXGBE_IMIR_CTRL_RST_82599 0x00010000 /* Check RST bit in header */ +-#define IXGBE_IMIR_CTRL_SYN_82599 0x00020000 /* Check SYN bit in header */ +-#define IXGBE_IMIR_CTRL_FIN_82599 0x00040000 /* Check FIN bit in header */ +-#define IXGBE_IMIR_CTRL_BP_82599 0x00080000 /* Bypass check of control bits */ +-#define IXGBE_IMIR_LLI_EN_82599 0x00100000 /* Enables low latency Int */ +-#define IXGBE_IMIR_RX_QUEUE_MASK_82599 0x0000007F /* Rx Queue Mask */ +-#define IXGBE_IMIR_RX_QUEUE_SHIFT_82599 21 /* Rx Queue Shift */ +-#define IXGBE_IMIRVP_PRIORITY_MASK 0x00000007 /* VLAN priority mask */ +-#define IXGBE_IMIRVP_PRIORITY_EN 0x00000008 /* VLAN priority enable */ +- +-#define IXGBE_MAX_FTQF_FILTERS 128 +-#define IXGBE_FTQF_PROTOCOL_MASK 0x00000003 +-#define IXGBE_FTQF_PROTOCOL_TCP 0x00000000 +-#define IXGBE_FTQF_PROTOCOL_UDP 0x00000001 +-#define IXGBE_FTQF_PROTOCOL_SCTP 2 +-#define IXGBE_FTQF_PRIORITY_MASK 0x00000007 +-#define IXGBE_FTQF_PRIORITY_SHIFT 2 +-#define IXGBE_FTQF_POOL_MASK 0x0000003F +-#define IXGBE_FTQF_POOL_SHIFT 8 +-#define IXGBE_FTQF_5TUPLE_MASK_MASK 0x0000001F +-#define IXGBE_FTQF_5TUPLE_MASK_SHIFT 25 +-#define IXGBE_FTQF_SOURCE_ADDR_MASK 0x1E +-#define IXGBE_FTQF_DEST_ADDR_MASK 0x1D +-#define IXGBE_FTQF_SOURCE_PORT_MASK 0x1B +-#define IXGBE_FTQF_DEST_PORT_MASK 0x17 +-#define IXGBE_FTQF_PROTOCOL_COMP_MASK 0x0F +-#define IXGBE_FTQF_POOL_MASK_EN 0x40000000 +-#define IXGBE_FTQF_QUEUE_ENABLE 0x80000000 ++#define IXGBE_IMIR_PORT_IM_EN 0x00010000 /* TCP port enable */ ++#define IXGBE_IMIR_PORT_BP 0x00020000 /* TCP port check bypass */ ++#define IXGBE_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */ ++#define IXGBE_IMIREXT_CTRL_URG 0x00002000 /* Check URG bit in header */ ++#define IXGBE_IMIREXT_CTRL_ACK 0x00004000 /* Check ACK bit in header */ ++#define IXGBE_IMIREXT_CTRL_PSH 0x00008000 /* Check PSH bit in header */ ++#define IXGBE_IMIREXT_CTRL_RST 0x00010000 /* Check RST bit in header */ ++#define IXGBE_IMIREXT_CTRL_SYN 0x00020000 /* Check SYN bit in header */ ++#define IXGBE_IMIREXT_CTRL_FIN 0x00040000 /* Check FIN bit in header */ ++#define IXGBE_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of control bits */ ++#define IXGBE_IMIR_SIZE_BP_82599 0x00001000 /* Packet size bypass */ ++#define IXGBE_IMIR_CTRL_URG_82599 0x00002000 /* Check URG bit in header */ ++#define IXGBE_IMIR_CTRL_ACK_82599 0x00004000 /* Check ACK bit in header */ ++#define IXGBE_IMIR_CTRL_PSH_82599 0x00008000 /* Check PSH bit in header */ ++#define IXGBE_IMIR_CTRL_RST_82599 0x00010000 /* Check RST bit in header */ ++#define IXGBE_IMIR_CTRL_SYN_82599 0x00020000 /* Check SYN bit in header */ ++#define IXGBE_IMIR_CTRL_FIN_82599 0x00040000 /* Check FIN bit in header */ ++#define IXGBE_IMIR_CTRL_BP_82599 0x00080000 /* Bypass chk of ctrl bits */ ++#define IXGBE_IMIR_LLI_EN_82599 0x00100000 /* Enables low latency Int */ ++#define IXGBE_IMIR_RX_QUEUE_MASK_82599 0x0000007F /* Rx Queue Mask */ ++#define IXGBE_IMIR_RX_QUEUE_SHIFT_82599 21 /* Rx Queue Shift */ ++#define IXGBE_IMIRVP_PRIORITY_MASK 0x00000007 /* VLAN priority mask */ ++#define IXGBE_IMIRVP_PRIORITY_EN 0x00000008 /* VLAN priority enable */ ++ ++#define IXGBE_MAX_FTQF_FILTERS 128 ++#define IXGBE_FTQF_PROTOCOL_MASK 0x00000003 ++#define IXGBE_FTQF_PROTOCOL_TCP 0x00000000 ++#define IXGBE_FTQF_PROTOCOL_UDP 0x00000001 ++#define IXGBE_FTQF_PROTOCOL_SCTP 2 ++#define IXGBE_FTQF_PRIORITY_MASK 0x00000007 ++#define IXGBE_FTQF_PRIORITY_SHIFT 2 ++#define IXGBE_FTQF_POOL_MASK 0x0000003F ++#define IXGBE_FTQF_POOL_SHIFT 8 ++#define IXGBE_FTQF_5TUPLE_MASK_MASK 0x0000001F ++#define IXGBE_FTQF_5TUPLE_MASK_SHIFT 25 ++#define IXGBE_FTQF_SOURCE_ADDR_MASK 0x1E ++#define IXGBE_FTQF_DEST_ADDR_MASK 0x1D ++#define IXGBE_FTQF_SOURCE_PORT_MASK 0x1B ++#define IXGBE_FTQF_DEST_PORT_MASK 0x17 ++#define IXGBE_FTQF_PROTOCOL_COMP_MASK 0x0F ++#define IXGBE_FTQF_POOL_MASK_EN 0x40000000 ++#define IXGBE_FTQF_QUEUE_ENABLE 0x80000000 + + /* Interrupt clear mask */ +-#define IXGBE_IRQ_CLEAR_MASK 0xFFFFFFFF ++#define IXGBE_IRQ_CLEAR_MASK 0xFFFFFFFF + + /* Interrupt Vector Allocation Registers */ +-#define IXGBE_IVAR_REG_NUM 25 +-#define IXGBE_IVAR_REG_NUM_82599 64 +-#define IXGBE_IVAR_TXRX_ENTRY 96 +-#define IXGBE_IVAR_RX_ENTRY 64 +-#define IXGBE_IVAR_RX_QUEUE(_i) (0 + (_i)) +-#define IXGBE_IVAR_TX_QUEUE(_i) (64 + (_i)) +-#define IXGBE_IVAR_TX_ENTRY 32 ++#define IXGBE_IVAR_REG_NUM 25 ++#define IXGBE_IVAR_REG_NUM_82599 64 ++#define IXGBE_IVAR_TXRX_ENTRY 96 ++#define IXGBE_IVAR_RX_ENTRY 64 ++#define IXGBE_IVAR_RX_QUEUE(_i) (0 + (_i)) ++#define IXGBE_IVAR_TX_QUEUE(_i) (64 + (_i)) ++#define IXGBE_IVAR_TX_ENTRY 32 + +-#define IXGBE_IVAR_TCP_TIMER_INDEX 96 /* 0 based index */ +-#define IXGBE_IVAR_OTHER_CAUSES_INDEX 97 /* 0 based index */ ++#define IXGBE_IVAR_TCP_TIMER_INDEX 96 /* 0 based index */ ++#define IXGBE_IVAR_OTHER_CAUSES_INDEX 97 /* 0 based index */ + +-#define IXGBE_MSIX_VECTOR(_i) (0 + (_i)) ++#define IXGBE_MSIX_VECTOR(_i) (0 + (_i)) + +-#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ ++#define IXGBE_IVAR_ALLOC_VAL 0x80 /* Interrupt Allocation valid */ + + /* ETYPE Queue Filter/Select Bit Masks */ +-#define IXGBE_MAX_ETQF_FILTERS 8 +-#define IXGBE_ETQF_FCOE 0x08000000 /* bit 27 */ +-#define IXGBE_ETQF_BCN 0x10000000 /* bit 28 */ +-#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */ +-#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */ +-#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */ ++#define IXGBE_MAX_ETQF_FILTERS 8 ++#define IXGBE_ETQF_FCOE 0x08000000 /* bit 27 */ ++#define IXGBE_ETQF_BCN 0x10000000 /* bit 28 */ ++#define IXGBE_ETQF_TX_ANTISPOOF 0x20000000 /* bit 29 */ ++#define IXGBE_ETQF_1588 0x40000000 /* bit 30 */ ++#define IXGBE_ETQF_FILTER_EN 0x80000000 /* bit 31 */ ++#define IXGBE_ETQF_POOL_ENABLE (1 << 26) /* bit 26 */ + #define IXGBE_ETQF_POOL_SHIFT 20 + +-#define IXGBE_ETQS_RX_QUEUE 0x007F0000 /* bits 22:16 */ +-#define IXGBE_ETQS_RX_QUEUE_SHIFT 16 +-#define IXGBE_ETQS_LLI 0x20000000 /* bit 29 */ +-#define IXGBE_ETQS_QUEUE_EN 0x80000000 /* bit 31 */ ++#define IXGBE_ETQS_RX_QUEUE 0x007F0000 /* bits 22:16 */ ++#define IXGBE_ETQS_RX_QUEUE_SHIFT 16 ++#define IXGBE_ETQS_LLI 0x20000000 /* bit 29 */ ++#define IXGBE_ETQS_QUEUE_EN 0x80000000 /* bit 31 */ + + /* + * ETQF filter list: one static filter per filter consumer. This is +- * to avoid filter collisions later. Add new filters +- * here!! ++ * to avoid filter collisions later. Add new filters ++ * here!! + * + * Current filters: +- * EAPOL 802.1x (0x888e): Filter 0 +- * FCoE (0x8906): Filter 2 +- * 1588 (0x88f7): Filter 3 +- * FIP (0x8914): Filter 4 ++ * EAPOL 802.1x (0x888e): Filter 0 ++ * FCoE (0x8906): Filter 2 ++ * 1588 (0x88f7): Filter 3 ++ * FIP (0x8914): Filter 4 ++ * LLDP (0x88CC): Filter 5 ++ * LACP (0x8809): Filter 6 ++ * FC (0x8808): Filter 7 + */ +-#define IXGBE_ETQF_FILTER_EAPOL 0 +-#define IXGBE_ETQF_FILTER_FCOE 2 +-#define IXGBE_ETQF_FILTER_1588 3 +-#define IXGBE_ETQF_FILTER_FIP 4 ++#define IXGBE_ETQF_FILTER_EAPOL 0 ++#define IXGBE_ETQF_FILTER_FCOE 2 ++#define IXGBE_ETQF_FILTER_1588 3 ++#define IXGBE_ETQF_FILTER_FIP 4 ++#define IXGBE_ETQF_FILTER_LLDP 5 ++#define IXGBE_ETQF_FILTER_LACP 6 ++#define IXGBE_ETQF_FILTER_FC 7 + /* VLAN Control Bit Masks */ +-#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */ +-#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */ +-#define IXGBE_VLNCTRL_CFIEN 0x20000000 /* bit 29 */ +-#define IXGBE_VLNCTRL_VFE 0x40000000 /* bit 30 */ +-#define IXGBE_VLNCTRL_VME 0x80000000 /* bit 31 */ ++#define IXGBE_VLNCTRL_VET 0x0000FFFF /* bits 0-15 */ ++#define IXGBE_VLNCTRL_CFI 0x10000000 /* bit 28 */ ++#define IXGBE_VLNCTRL_CFIEN 0x20000000 /* bit 29 */ ++#define IXGBE_VLNCTRL_VFE 0x40000000 /* bit 30 */ ++#define IXGBE_VLNCTRL_VME 0x80000000 /* bit 31 */ + + /* VLAN pool filtering masks */ +-#define IXGBE_VLVF_VIEN 0x80000000 /* filter is valid */ +-#define IXGBE_VLVF_ENTRIES 64 +-#define IXGBE_VLVF_VLANID_MASK 0x00000FFF +- ++#define IXGBE_VLVF_VIEN 0x80000000 /* filter is valid */ ++#define IXGBE_VLVF_ENTRIES 64 ++#define IXGBE_VLVF_VLANID_MASK 0x00000FFF + /* Per VF Port VLAN insertion rules */ +-#define IXGBE_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ +-#define IXGBE_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */ ++#define IXGBE_VMVIR_VLANA_DEFAULT 0x40000000 /* Always use default VLAN */ ++#define IXGBE_VMVIR_VLANA_NEVER 0x80000000 /* Never insert VLAN tag */ + +-#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */ ++#define IXGBE_ETHERNET_IEEE_VLAN_TYPE 0x8100 /* 802.1q protocol */ + + /* STATUS Bit Masks */ +-#define IXGBE_STATUS_LAN_ID 0x0000000C /* LAN ID */ +-#define IXGBE_STATUS_LAN_ID_SHIFT 2 /* LAN ID Shift*/ +-#define IXGBE_STATUS_GIO 0x00080000 /* GIO Master Enable Status */ ++#define IXGBE_STATUS_LAN_ID 0x0000000C /* LAN ID */ ++#define IXGBE_STATUS_LAN_ID_SHIFT 2 /* LAN ID Shift*/ ++#define IXGBE_STATUS_GIO 0x00080000 /* GIO Master Ena Status */ + +-#define IXGBE_STATUS_LAN_ID_0 0x00000000 /* LAN ID 0 */ +-#define IXGBE_STATUS_LAN_ID_1 0x00000004 /* LAN ID 1 */ ++#define IXGBE_STATUS_LAN_ID_0 0x00000000 /* LAN ID 0 */ ++#define IXGBE_STATUS_LAN_ID_1 0x00000004 /* LAN ID 1 */ + + /* ESDP Bit Masks */ +-#define IXGBE_ESDP_SDP0 0x00000001 /* SDP0 Data Value */ +-#define IXGBE_ESDP_SDP1 0x00000002 /* SDP1 Data Value */ +-#define IXGBE_ESDP_SDP2 0x00000004 /* SDP2 Data Value */ +-#define IXGBE_ESDP_SDP3 0x00000008 /* SDP3 Data Value */ +-#define IXGBE_ESDP_SDP4 0x00000010 /* SDP4 Data Value */ +-#define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */ +-#define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */ +-#define IXGBE_ESDP_SDP0_DIR 0x00000100 /* SDP0 IO direction */ +-#define IXGBE_ESDP_SDP1_DIR 0x00000200 /* SDP1 IO direction */ +-#define IXGBE_ESDP_SDP4_DIR 0x00000004 /* SDP4 IO direction */ +-#define IXGBE_ESDP_SDP5_DIR 0x00002000 /* SDP5 IO direction */ +-#define IXGBE_ESDP_SDP0_NATIVE 0x00010000 /* SDP0 Native Function */ +-#define IXGBE_ESDP_SDP1_NATIVE 0x00020000 /* SDP1 IO mode */ ++#define IXGBE_ESDP_SDP0 0x00000001 /* SDP0 Data Value */ ++#define IXGBE_ESDP_SDP1 0x00000002 /* SDP1 Data Value */ ++#define IXGBE_ESDP_SDP2 0x00000004 /* SDP2 Data Value */ ++#define IXGBE_ESDP_SDP3 0x00000008 /* SDP3 Data Value */ ++#define IXGBE_ESDP_SDP4 0x00000010 /* SDP4 Data Value */ ++#define IXGBE_ESDP_SDP5 0x00000020 /* SDP5 Data Value */ ++#define IXGBE_ESDP_SDP6 0x00000040 /* SDP6 Data Value */ ++#define IXGBE_ESDP_SDP7 0x00000080 /* SDP7 Data Value */ ++#define IXGBE_ESDP_SDP0_DIR 0x00000100 /* SDP0 IO direction */ ++#define IXGBE_ESDP_SDP1_DIR 0x00000200 /* SDP1 IO direction */ ++#define IXGBE_ESDP_SDP2_DIR 0x00000400 /* SDP1 IO direction */ ++#define IXGBE_ESDP_SDP3_DIR 0x00000800 /* SDP3 IO direction */ ++#define IXGBE_ESDP_SDP4_DIR 0x00001000 /* SDP4 IO direction */ ++#define IXGBE_ESDP_SDP5_DIR 0x00002000 /* SDP5 IO direction */ ++#define IXGBE_ESDP_SDP6_DIR 0x00004000 /* SDP6 IO direction */ ++#define IXGBE_ESDP_SDP7_DIR 0x00008000 /* SDP7 IO direction */ ++#define IXGBE_ESDP_SDP0_NATIVE 0x00010000 /* SDP0 IO mode */ ++#define IXGBE_ESDP_SDP1_NATIVE 0x00020000 /* SDP1 IO mode */ ++ + + /* LEDCTL Bit Masks */ +-#define IXGBE_LED_IVRT_BASE 0x00000040 +-#define IXGBE_LED_BLINK_BASE 0x00000080 +-#define IXGBE_LED_MODE_MASK_BASE 0x0000000F +-#define IXGBE_LED_OFFSET(_base, _i) (_base << (8 * (_i))) +-#define IXGBE_LED_MODE_SHIFT(_i) (8 * (_i)) +-#define IXGBE_LED_IVRT(_i) IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i) +-#define IXGBE_LED_BLINK(_i) IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i) +-#define IXGBE_LED_MODE_MASK(_i) IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i) ++#define IXGBE_LED_IVRT_BASE 0x00000040 ++#define IXGBE_LED_BLINK_BASE 0x00000080 ++#define IXGBE_LED_MODE_MASK_BASE 0x0000000F ++#define IXGBE_LED_OFFSET(_base, _i) (_base << (8 * (_i))) ++#define IXGBE_LED_MODE_SHIFT(_i) (8*(_i)) ++#define IXGBE_LED_IVRT(_i) IXGBE_LED_OFFSET(IXGBE_LED_IVRT_BASE, _i) ++#define IXGBE_LED_BLINK(_i) IXGBE_LED_OFFSET(IXGBE_LED_BLINK_BASE, _i) ++#define IXGBE_LED_MODE_MASK(_i) IXGBE_LED_OFFSET(IXGBE_LED_MODE_MASK_BASE, _i) ++#define IXGBE_X557_LED_MANUAL_SET_MASK (1 << 8) ++#define IXGBE_X557_MAX_LED_INDEX 3 ++#define IXGBE_X557_LED_PROVISIONING 0xC430 + + /* LED modes */ +-#define IXGBE_LED_LINK_UP 0x0 +-#define IXGBE_LED_LINK_10G 0x1 +-#define IXGBE_LED_MAC 0x2 +-#define IXGBE_LED_FILTER 0x3 +-#define IXGBE_LED_LINK_ACTIVE 0x4 +-#define IXGBE_LED_LINK_1G 0x5 +-#define IXGBE_LED_ON 0xE +-#define IXGBE_LED_OFF 0xF ++#define IXGBE_LED_LINK_UP 0x0 ++#define IXGBE_LED_LINK_10G 0x1 ++#define IXGBE_LED_MAC 0x2 ++#define IXGBE_LED_FILTER 0x3 ++#define IXGBE_LED_LINK_ACTIVE 0x4 ++#define IXGBE_LED_LINK_1G 0x5 ++#define IXGBE_LED_ON 0xE ++#define IXGBE_LED_OFF 0xF + + /* AUTOC Bit Masks */ + #define IXGBE_AUTOC_KX4_KX_SUPP_MASK 0xC0000000 +-#define IXGBE_AUTOC_KX4_SUPP 0x80000000 +-#define IXGBE_AUTOC_KX_SUPP 0x40000000 +-#define IXGBE_AUTOC_PAUSE 0x30000000 +-#define IXGBE_AUTOC_ASM_PAUSE 0x20000000 +-#define IXGBE_AUTOC_SYM_PAUSE 0x10000000 +-#define IXGBE_AUTOC_RF 0x08000000 +-#define IXGBE_AUTOC_PD_TMR 0x06000000 +-#define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000 +-#define IXGBE_AUTOC_AN_RX_DRIFT 0x00800000 +-#define IXGBE_AUTOC_AN_RX_ALIGN 0x007C0000 +-#define IXGBE_AUTOC_FECA 0x00040000 +-#define IXGBE_AUTOC_FECR 0x00020000 +-#define IXGBE_AUTOC_KR_SUPP 0x00010000 +-#define IXGBE_AUTOC_AN_RESTART 0x00001000 +-#define IXGBE_AUTOC_FLU 0x00000001 +-#define IXGBE_AUTOC_LMS_SHIFT 13 +-#define IXGBE_AUTOC_LMS_10G_SERIAL (0x3 << IXGBE_AUTOC_LMS_SHIFT) +-#define IXGBE_AUTOC_LMS_KX4_KX_KR (0x4 << IXGBE_AUTOC_LMS_SHIFT) +-#define IXGBE_AUTOC_LMS_SGMII_1G_100M (0x5 << IXGBE_AUTOC_LMS_SHIFT) +-#define IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT) +-#define IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII (0x7 << IXGBE_AUTOC_LMS_SHIFT) +-#define IXGBE_AUTOC_LMS_MASK (0x7 << IXGBE_AUTOC_LMS_SHIFT) +-#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN (0x0 << IXGBE_AUTOC_LMS_SHIFT) +-#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT) +-#define IXGBE_AUTOC_LMS_1G_AN (0x2 << IXGBE_AUTOC_LMS_SHIFT) +-#define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT) +-#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT) +-#define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +- +-#define IXGBE_AUTOC_1G_PMA_PMD_MASK 0x00000200 +-#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9 +-#define IXGBE_AUTOC_10G_PMA_PMD_MASK 0x00000180 +-#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7 +-#define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +-#define IXGBE_AUTOC_10G_KX4 (0x1 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +-#define IXGBE_AUTOC_10G_CX4 (0x2 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) +-#define IXGBE_AUTOC_1G_BX (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +-#define IXGBE_AUTOC_1G_KX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +-#define IXGBE_AUTOC_1G_SFI (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +-#define IXGBE_AUTOC_1G_KX_BX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) +- +-#define IXGBE_AUTOC2_UPPER_MASK 0xFFFF0000 +-#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK 0x00030000 +-#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT 16 +-#define IXGBE_AUTOC2_10G_KR (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) +-#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) +-#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) +-#define IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK 0x50000000 +-#define IXGBE_AUTOC2_LINK_DISABLE_MASK 0x70000000 +- +-#define IXGBE_MACC_FLU 0x00000001 +-#define IXGBE_MACC_FSV_10G 0x00030000 +-#define IXGBE_MACC_FS 0x00040000 +-#define IXGBE_MAC_RX2TX_LPBK 0x00000002 ++#define IXGBE_AUTOC_KX4_SUPP 0x80000000 ++#define IXGBE_AUTOC_KX_SUPP 0x40000000 ++#define IXGBE_AUTOC_PAUSE 0x30000000 ++#define IXGBE_AUTOC_ASM_PAUSE 0x20000000 ++#define IXGBE_AUTOC_SYM_PAUSE 0x10000000 ++#define IXGBE_AUTOC_RF 0x08000000 ++#define IXGBE_AUTOC_PD_TMR 0x06000000 ++#define IXGBE_AUTOC_AN_RX_LOOSE 0x01000000 ++#define IXGBE_AUTOC_AN_RX_DRIFT 0x00800000 ++#define IXGBE_AUTOC_AN_RX_ALIGN 0x007C0000 ++#define IXGBE_AUTOC_FECA 0x00040000 ++#define IXGBE_AUTOC_FECR 0x00020000 ++#define IXGBE_AUTOC_KR_SUPP 0x00010000 ++#define IXGBE_AUTOC_AN_RESTART 0x00001000 ++#define IXGBE_AUTOC_FLU 0x00000001 ++#define IXGBE_AUTOC_LMS_SHIFT 13 ++#define IXGBE_AUTOC_LMS_10G_SERIAL (0x3 << IXGBE_AUTOC_LMS_SHIFT) ++#define IXGBE_AUTOC_LMS_KX4_KX_KR (0x4 << IXGBE_AUTOC_LMS_SHIFT) ++#define IXGBE_AUTOC_LMS_SGMII_1G_100M (0x5 << IXGBE_AUTOC_LMS_SHIFT) ++#define IXGBE_AUTOC_LMS_KX4_KX_KR_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT) ++#define IXGBE_AUTOC_LMS_KX4_KX_KR_SGMII (0x7 << IXGBE_AUTOC_LMS_SHIFT) ++#define IXGBE_AUTOC_LMS_MASK (0x7 << IXGBE_AUTOC_LMS_SHIFT) ++#define IXGBE_AUTOC_LMS_1G_LINK_NO_AN (0x0 << IXGBE_AUTOC_LMS_SHIFT) ++#define IXGBE_AUTOC_LMS_10G_LINK_NO_AN (0x1 << IXGBE_AUTOC_LMS_SHIFT) ++#define IXGBE_AUTOC_LMS_1G_AN (0x2 << IXGBE_AUTOC_LMS_SHIFT) ++#define IXGBE_AUTOC_LMS_KX4_AN (0x4 << IXGBE_AUTOC_LMS_SHIFT) ++#define IXGBE_AUTOC_LMS_KX4_AN_1G_AN (0x6 << IXGBE_AUTOC_LMS_SHIFT) ++#define IXGBE_AUTOC_LMS_ATTACH_TYPE (0x7 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) ++ ++#define IXGBE_AUTOC_1G_PMA_PMD_MASK 0x00000200 ++#define IXGBE_AUTOC_1G_PMA_PMD_SHIFT 9 ++#define IXGBE_AUTOC_10G_PMA_PMD_MASK 0x00000180 ++#define IXGBE_AUTOC_10G_PMA_PMD_SHIFT 7 ++#define IXGBE_AUTOC_10G_XAUI (0x0 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) ++#define IXGBE_AUTOC_10G_KX4 (0x1 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) ++#define IXGBE_AUTOC_10G_CX4 (0x2 << IXGBE_AUTOC_10G_PMA_PMD_SHIFT) ++#define IXGBE_AUTOC_1G_BX (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) ++#define IXGBE_AUTOC_1G_KX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) ++#define IXGBE_AUTOC_1G_SFI (0x0 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) ++#define IXGBE_AUTOC_1G_KX_BX (0x1 << IXGBE_AUTOC_1G_PMA_PMD_SHIFT) ++ ++#define IXGBE_AUTOC2_UPPER_MASK 0xFFFF0000 ++#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_MASK 0x00030000 ++#define IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT 16 ++#define IXGBE_AUTOC2_10G_KR (0x0 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) ++#define IXGBE_AUTOC2_10G_XFI (0x1 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) ++#define IXGBE_AUTOC2_10G_SFI (0x2 << IXGBE_AUTOC2_10G_SERIAL_PMA_PMD_SHIFT) ++#define IXGBE_AUTOC2_LINK_DISABLE_ON_D3_MASK 0x50000000 ++#define IXGBE_AUTOC2_LINK_DISABLE_MASK 0x70000000 ++ ++#define IXGBE_MACC_FLU 0x00000001 ++#define IXGBE_MACC_FSV_10G 0x00030000 ++#define IXGBE_MACC_FS 0x00040000 ++#define IXGBE_MAC_RX2TX_LPBK 0x00000002 + + /* Veto Bit definiton */ +-#define IXGBE_MMNGC_MNG_VETO 0x00000001 ++#define IXGBE_MMNGC_MNG_VETO 0x00000001 + + /* LINKS Bit Masks */ +-#define IXGBE_LINKS_KX_AN_COMP 0x80000000 +-#define IXGBE_LINKS_UP 0x40000000 +-#define IXGBE_LINKS_SPEED 0x20000000 +-#define IXGBE_LINKS_MODE 0x18000000 +-#define IXGBE_LINKS_RX_MODE 0x06000000 +-#define IXGBE_LINKS_TX_MODE 0x01800000 +-#define IXGBE_LINKS_XGXS_EN 0x00400000 +-#define IXGBE_LINKS_SGMII_EN 0x02000000 +-#define IXGBE_LINKS_PCS_1G_EN 0x00200000 +-#define IXGBE_LINKS_1G_AN_EN 0x00100000 +-#define IXGBE_LINKS_KX_AN_IDLE 0x00080000 +-#define IXGBE_LINKS_1G_SYNC 0x00040000 +-#define IXGBE_LINKS_10G_ALIGN 0x00020000 +-#define IXGBE_LINKS_10G_LANE_SYNC 0x00017000 +-#define IXGBE_LINKS_TL_FAULT 0x00001000 +-#define IXGBE_LINKS_SIGNAL 0x00000F00 +- +-#define IXGBE_LINKS_SPEED_82599 0x30000000 +-#define IXGBE_LINKS_SPEED_10G_82599 0x30000000 +-#define IXGBE_LINKS_SPEED_1G_82599 0x20000000 +-#define IXGBE_LINKS_SPEED_100_82599 0x10000000 +-#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */ +-#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */ +- +-#define IXGBE_LINKS2_AN_SUPPORTED 0x00000040 ++#define IXGBE_LINKS_KX_AN_COMP 0x80000000 ++#define IXGBE_LINKS_UP 0x40000000 ++#define IXGBE_LINKS_SPEED 0x20000000 ++#define IXGBE_LINKS_MODE 0x18000000 ++#define IXGBE_LINKS_RX_MODE 0x06000000 ++#define IXGBE_LINKS_TX_MODE 0x01800000 ++#define IXGBE_LINKS_XGXS_EN 0x00400000 ++#define IXGBE_LINKS_SGMII_EN 0x02000000 ++#define IXGBE_LINKS_PCS_1G_EN 0x00200000 ++#define IXGBE_LINKS_1G_AN_EN 0x00100000 ++#define IXGBE_LINKS_KX_AN_IDLE 0x00080000 ++#define IXGBE_LINKS_1G_SYNC 0x00040000 ++#define IXGBE_LINKS_10G_ALIGN 0x00020000 ++#define IXGBE_LINKS_10G_LANE_SYNC 0x00017000 ++#define IXGBE_LINKS_TL_FAULT 0x00001000 ++#define IXGBE_LINKS_SIGNAL 0x00000F00 ++ ++#define IXGBE_LINKS_SPEED_NON_STD 0x08000000 ++#define IXGBE_LINKS_SPEED_82599 0x30000000 ++#define IXGBE_LINKS_SPEED_10G_82599 0x30000000 ++#define IXGBE_LINKS_SPEED_1G_82599 0x20000000 ++#define IXGBE_LINKS_SPEED_100_82599 0x10000000 ++#define IXGBE_LINKS_SPEED_10_X550EM_A 0x00000000 ++#define IXGBE_LINK_UP_TIME 90 /* 9.0 Seconds */ ++#define IXGBE_AUTO_NEG_TIME 45 /* 4.5 Seconds */ ++ ++#define IXGBE_LINKS2_AN_SUPPORTED 0x00000040 + + /* PCS1GLSTA Bit Masks */ +-#define IXGBE_PCS1GLSTA_LINK_OK 1 +-#define IXGBE_PCS1GLSTA_SYNK_OK 0x10 +-#define IXGBE_PCS1GLSTA_AN_COMPLETE 0x10000 +-#define IXGBE_PCS1GLSTA_AN_PAGE_RX 0x20000 +-#define IXGBE_PCS1GLSTA_AN_TIMED_OUT 0x40000 +-#define IXGBE_PCS1GLSTA_AN_REMOTE_FAULT 0x80000 +-#define IXGBE_PCS1GLSTA_AN_ERROR_RWS 0x100000 ++#define IXGBE_PCS1GLSTA_LINK_OK 1 ++#define IXGBE_PCS1GLSTA_SYNK_OK 0x10 ++#define IXGBE_PCS1GLSTA_AN_COMPLETE 0x10000 ++#define IXGBE_PCS1GLSTA_AN_PAGE_RX 0x20000 ++#define IXGBE_PCS1GLSTA_AN_TIMED_OUT 0x40000 ++#define IXGBE_PCS1GLSTA_AN_REMOTE_FAULT 0x80000 ++#define IXGBE_PCS1GLSTA_AN_ERROR_RWS 0x100000 + +-#define IXGBE_PCS1GANA_SYM_PAUSE 0x80 +-#define IXGBE_PCS1GANA_ASM_PAUSE 0x100 ++#define IXGBE_PCS1GANA_SYM_PAUSE 0x80 ++#define IXGBE_PCS1GANA_ASM_PAUSE 0x100 + + /* PCS1GLCTL Bit Masks */ +-#define IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN 0x00040000 /* PCS 1G autoneg to en */ +-#define IXGBE_PCS1GLCTL_FLV_LINK_UP 1 +-#define IXGBE_PCS1GLCTL_FORCE_LINK 0x20 +-#define IXGBE_PCS1GLCTL_LOW_LINK_LATCH 0x40 +-#define IXGBE_PCS1GLCTL_AN_ENABLE 0x10000 +-#define IXGBE_PCS1GLCTL_AN_RESTART 0x20000 ++#define IXGBE_PCS1GLCTL_AN_1G_TIMEOUT_EN 0x00040000 /* PCS 1G autoneg to en */ ++#define IXGBE_PCS1GLCTL_FLV_LINK_UP 1 ++#define IXGBE_PCS1GLCTL_FORCE_LINK 0x20 ++#define IXGBE_PCS1GLCTL_LOW_LINK_LATCH 0x40 ++#define IXGBE_PCS1GLCTL_AN_ENABLE 0x10000 ++#define IXGBE_PCS1GLCTL_AN_RESTART 0x20000 + + /* ANLP1 Bit Masks */ +-#define IXGBE_ANLP1_PAUSE 0x0C00 +-#define IXGBE_ANLP1_SYM_PAUSE 0x0400 +-#define IXGBE_ANLP1_ASM_PAUSE 0x0800 +-#define IXGBE_ANLP1_AN_STATE_MASK 0x000f0000 ++#define IXGBE_ANLP1_PAUSE 0x0C00 ++#define IXGBE_ANLP1_SYM_PAUSE 0x0400 ++#define IXGBE_ANLP1_ASM_PAUSE 0x0800 ++#define IXGBE_ANLP1_AN_STATE_MASK 0x000f0000 + + /* SW Semaphore Register bitmasks */ +-#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ +-#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ +-#define IXGBE_SWSM_WMNG 0x00000004 /* Wake MNG Clock */ +-#define IXGBE_SWFW_REGSMP 0x80000000 /* Register Semaphore bit 31 */ ++#define IXGBE_SWSM_SMBI 0x00000001 /* Driver Semaphore bit */ ++#define IXGBE_SWSM_SWESMBI 0x00000002 /* FW Semaphore bit */ ++#define IXGBE_SWSM_WMNG 0x00000004 /* Wake MNG Clock */ ++#define IXGBE_SWFW_REGSMP 0x80000000 /* Register Semaphore bit 31 */ + + /* SW_FW_SYNC/GSSR definitions */ +-#define IXGBE_GSSR_EEP_SM 0x0001 +-#define IXGBE_GSSR_PHY0_SM 0x0002 +-#define IXGBE_GSSR_PHY1_SM 0x0004 +-#define IXGBE_GSSR_MAC_CSR_SM 0x0008 +-#define IXGBE_GSSR_FLASH_SM 0x0010 +-#define IXGBE_GSSR_SW_MNG_SM 0x0400 ++#define IXGBE_GSSR_EEP_SM 0x0001 ++#define IXGBE_GSSR_PHY0_SM 0x0002 ++#define IXGBE_GSSR_PHY1_SM 0x0004 ++#define IXGBE_GSSR_MAC_CSR_SM 0x0008 ++#define IXGBE_GSSR_FLASH_SM 0x0010 ++#define IXGBE_GSSR_NVM_UPDATE_SM 0x0200 ++#define IXGBE_GSSR_SW_MNG_SM 0x0400 ++#define IXGBE_GSSR_TOKEN_SM 0x40000000 /* SW bit for shared access */ ++#define IXGBE_GSSR_SHARED_I2C_SM 0x1806 /* Wait for both phys and both I2Cs */ ++#define IXGBE_GSSR_I2C_MASK 0x1800 ++#define IXGBE_GSSR_NVM_PHY_MASK 0xF + + /* FW Status register bitmask */ +-#define IXGBE_FWSTS_FWRI 0x00000200 /* Firmware Reset Indication */ ++#define IXGBE_FWSTS_FWRI 0x00000200 /* Firmware Reset Indication */ + + /* EEC Register */ +-#define IXGBE_EEC_SK 0x00000001 /* EEPROM Clock */ +-#define IXGBE_EEC_CS 0x00000002 /* EEPROM Chip Select */ +-#define IXGBE_EEC_DI 0x00000004 /* EEPROM Data In */ +-#define IXGBE_EEC_DO 0x00000008 /* EEPROM Data Out */ +-#define IXGBE_EEC_FWE_MASK 0x00000030 /* FLASH Write Enable */ +-#define IXGBE_EEC_FWE_DIS 0x00000010 /* Disable FLASH writes */ +-#define IXGBE_EEC_FWE_EN 0x00000020 /* Enable FLASH writes */ +-#define IXGBE_EEC_FWE_SHIFT 4 +-#define IXGBE_EEC_REQ 0x00000040 /* EEPROM Access Request */ +-#define IXGBE_EEC_GNT 0x00000080 /* EEPROM Access Grant */ +-#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */ +-#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */ +-#define IXGBE_EEC_FLUP 0x00800000 /* Flash update command */ +-#define IXGBE_EEC_SEC1VAL 0x02000000 /* Sector 1 Valid */ +-#define IXGBE_EEC_FLUDONE 0x04000000 /* Flash update done */ ++#define IXGBE_EEC_SK 0x00000001 /* EEPROM Clock */ ++#define IXGBE_EEC_CS 0x00000002 /* EEPROM Chip Select */ ++#define IXGBE_EEC_DI 0x00000004 /* EEPROM Data In */ ++#define IXGBE_EEC_DO 0x00000008 /* EEPROM Data Out */ ++#define IXGBE_EEC_FWE_MASK 0x00000030 /* FLASH Write Enable */ ++#define IXGBE_EEC_FWE_DIS 0x00000010 /* Disable FLASH writes */ ++#define IXGBE_EEC_FWE_EN 0x00000020 /* Enable FLASH writes */ ++#define IXGBE_EEC_FWE_SHIFT 4 ++#define IXGBE_EEC_REQ 0x00000040 /* EEPROM Access Request */ ++#define IXGBE_EEC_GNT 0x00000080 /* EEPROM Access Grant */ ++#define IXGBE_EEC_PRES 0x00000100 /* EEPROM Present */ ++#define IXGBE_EEC_ARD 0x00000200 /* EEPROM Auto Read Done */ ++#define IXGBE_EEC_FLUP 0x00800000 /* Flash update command */ ++#define IXGBE_EEC_SEC1VAL 0x02000000 /* Sector 1 Valid */ ++#define IXGBE_EEC_FLUDONE 0x04000000 /* Flash update done */ + /* EEPROM Addressing bits based on type (0-small, 1-large) */ +-#define IXGBE_EEC_ADDR_SIZE 0x00000400 +-#define IXGBE_EEC_SIZE 0x00007800 /* EEPROM Size */ +-#define IXGBE_EERD_MAX_ADDR 0x00003FFF /* EERD alows 14 bits for addr. */ ++#define IXGBE_EEC_ADDR_SIZE 0x00000400 ++#define IXGBE_EEC_SIZE 0x00007800 /* EEPROM Size */ ++#define IXGBE_EERD_MAX_ADDR 0x00003FFF /* EERD alows 14 bits for addr. */ + +-#define IXGBE_EEC_SIZE_SHIFT 11 +-#define IXGBE_EEPROM_WORD_SIZE_SHIFT 6 +-#define IXGBE_EEPROM_OPCODE_BITS 8 ++#define IXGBE_EEC_SIZE_SHIFT 11 ++#define IXGBE_EEPROM_WORD_SIZE_SHIFT 6 ++#define IXGBE_EEPROM_OPCODE_BITS 8 ++ ++/* FLA Register */ ++#define IXGBE_FLA_LOCKED 0x00000040 + + /* Part Number String Length */ +-#define IXGBE_PBANUM_LENGTH 11 ++#define IXGBE_PBANUM_LENGTH 11 + + /* Checksum and EEPROM pointers */ +-#define IXGBE_PBANUM_PTR_GUARD 0xFAFA +-#define IXGBE_EEPROM_CHECKSUM 0x3F +-#define IXGBE_EEPROM_SUM 0xBABA +-#define IXGBE_PCIE_ANALOG_PTR 0x03 +-#define IXGBE_ATLAS0_CONFIG_PTR 0x04 +-#define IXGBE_PHY_PTR 0x04 +-#define IXGBE_ATLAS1_CONFIG_PTR 0x05 +-#define IXGBE_OPTION_ROM_PTR 0x05 +-#define IXGBE_PCIE_GENERAL_PTR 0x06 +-#define IXGBE_PCIE_CONFIG0_PTR 0x07 +-#define IXGBE_PCIE_CONFIG1_PTR 0x08 +-#define IXGBE_CORE0_PTR 0x09 +-#define IXGBE_CORE1_PTR 0x0A +-#define IXGBE_MAC0_PTR 0x0B +-#define IXGBE_MAC1_PTR 0x0C +-#define IXGBE_CSR0_CONFIG_PTR 0x0D +-#define IXGBE_CSR1_CONFIG_PTR 0x0E +-#define IXGBE_FW_PTR 0x0F +-#define IXGBE_PBANUM0_PTR 0x15 +-#define IXGBE_PBANUM1_PTR 0x16 +-#define IXGBE_FREE_SPACE_PTR 0X3E ++#define IXGBE_PBANUM_PTR_GUARD 0xFAFA ++#define IXGBE_EEPROM_CHECKSUM 0x3F ++#define IXGBE_EEPROM_SUM 0xBABA ++#define IXGBE_EEPROM_CTRL_4 0x45 ++#define IXGBE_EE_CTRL_4_INST_ID 0x10 ++#define IXGBE_EE_CTRL_4_INST_ID_SHIFT 4 ++#define IXGBE_PCIE_ANALOG_PTR 0x03 ++#define IXGBE_ATLAS0_CONFIG_PTR 0x04 ++#define IXGBE_PHY_PTR 0x04 ++#define IXGBE_ATLAS1_CONFIG_PTR 0x05 ++#define IXGBE_OPTION_ROM_PTR 0x05 ++#define IXGBE_PCIE_GENERAL_PTR 0x06 ++#define IXGBE_PCIE_CONFIG0_PTR 0x07 ++#define IXGBE_PCIE_CONFIG1_PTR 0x08 ++#define IXGBE_CORE0_PTR 0x09 ++#define IXGBE_CORE1_PTR 0x0A ++#define IXGBE_MAC0_PTR 0x0B ++#define IXGBE_MAC1_PTR 0x0C ++#define IXGBE_CSR0_CONFIG_PTR 0x0D ++#define IXGBE_CSR1_CONFIG_PTR 0x0E ++#define IXGBE_PCIE_ANALOG_PTR_X550 0x02 ++#define IXGBE_SHADOW_RAM_SIZE_X550 0x4000 ++#define IXGBE_IXGBE_PCIE_GENERAL_SIZE 0x24 ++#define IXGBE_PCIE_CONFIG_SIZE 0x08 ++#define IXGBE_EEPROM_LAST_WORD 0x41 ++#define IXGBE_FW_PTR 0x0F ++#define IXGBE_PBANUM0_PTR 0x15 ++#define IXGBE_PBANUM1_PTR 0x16 ++#define IXGBE_ALT_MAC_ADDR_PTR 0x37 ++#define IXGBE_FREE_SPACE_PTR 0X3E + + /* External Thermal Sensor Config */ +-#define IXGBE_ETS_CFG 0x26 +-#define IXGBE_ETS_LTHRES_DELTA_MASK 0x07C0 +-#define IXGBE_ETS_LTHRES_DELTA_SHIFT 6 +-#define IXGBE_ETS_TYPE_MASK 0x0038 +-#define IXGBE_ETS_TYPE_SHIFT 3 +-#define IXGBE_ETS_TYPE_EMC 0x000 +-#define IXGBE_ETS_TYPE_EMC_SHIFTED 0x000 +-#define IXGBE_ETS_NUM_SENSORS_MASK 0x0007 +-#define IXGBE_ETS_DATA_LOC_MASK 0x3C00 +-#define IXGBE_ETS_DATA_LOC_SHIFT 10 +-#define IXGBE_ETS_DATA_INDEX_MASK 0x0300 +-#define IXGBE_ETS_DATA_INDEX_SHIFT 8 +-#define IXGBE_ETS_DATA_HTHRESH_MASK 0x00FF +- +-#define IXGBE_SAN_MAC_ADDR_PTR 0x28 +-#define IXGBE_DEVICE_CAPS 0x2C +-#define IXGBE_SERIAL_NUMBER_MAC_ADDR 0x11 +-#define IXGBE_PCIE_MSIX_82599_CAPS 0x72 ++#define IXGBE_ETS_CFG 0x26 ++#define IXGBE_ETS_LTHRES_DELTA_MASK 0x07C0 ++#define IXGBE_ETS_LTHRES_DELTA_SHIFT 6 ++#define IXGBE_ETS_TYPE_MASK 0x0038 ++#define IXGBE_ETS_TYPE_SHIFT 3 ++#define IXGBE_ETS_TYPE_EMC 0x000 ++#define IXGBE_ETS_NUM_SENSORS_MASK 0x0007 ++#define IXGBE_ETS_DATA_LOC_MASK 0x3C00 ++#define IXGBE_ETS_DATA_LOC_SHIFT 10 ++#define IXGBE_ETS_DATA_INDEX_MASK 0x0300 ++#define IXGBE_ETS_DATA_INDEX_SHIFT 8 ++#define IXGBE_ETS_DATA_HTHRESH_MASK 0x00FF ++ ++#define IXGBE_SAN_MAC_ADDR_PTR 0x28 ++#define IXGBE_DEVICE_CAPS 0x2C ++#define IXGBE_82599_SERIAL_NUMBER_MAC_ADDR 0x11 ++#define IXGBE_X550_SERIAL_NUMBER_MAC_ADDR 0x04 ++ ++#define IXGBE_PCIE_MSIX_82599_CAPS 0x72 + #define IXGBE_MAX_MSIX_VECTORS_82599 0x40 +-#define IXGBE_PCIE_MSIX_82598_CAPS 0x62 ++#define IXGBE_PCIE_MSIX_82598_CAPS 0x62 + #define IXGBE_MAX_MSIX_VECTORS_82598 0x13 + + /* MSI-X capability fields masks */ +-#define IXGBE_PCIE_MSIX_TBL_SZ_MASK 0x7FF ++#define IXGBE_PCIE_MSIX_TBL_SZ_MASK 0x7FF + + /* Legacy EEPROM word offsets */ +-#define IXGBE_ISCSI_BOOT_CAPS 0x0033 +-#define IXGBE_ISCSI_SETUP_PORT_0 0x0030 +-#define IXGBE_ISCSI_SETUP_PORT_1 0x0034 ++#define IXGBE_ISCSI_BOOT_CAPS 0x0033 ++#define IXGBE_ISCSI_SETUP_PORT_0 0x0030 ++#define IXGBE_ISCSI_SETUP_PORT_1 0x0034 + + /* EEPROM Commands - SPI */ +-#define IXGBE_EEPROM_MAX_RETRY_SPI 5000 /* Max wait 5ms for RDY signal */ +-#define IXGBE_EEPROM_STATUS_RDY_SPI 0x01 +-#define IXGBE_EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ +-#define IXGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ +-#define IXGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */ +-#define IXGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */ ++#define IXGBE_EEPROM_MAX_RETRY_SPI 5000 /* Max wait 5ms for RDY signal */ ++#define IXGBE_EEPROM_STATUS_RDY_SPI 0x01 ++#define IXGBE_EEPROM_READ_OPCODE_SPI 0x03 /* EEPROM read opcode */ ++#define IXGBE_EEPROM_WRITE_OPCODE_SPI 0x02 /* EEPROM write opcode */ ++#define IXGBE_EEPROM_A8_OPCODE_SPI 0x08 /* opcode bit-3 = addr bit-8 */ ++#define IXGBE_EEPROM_WREN_OPCODE_SPI 0x06 /* EEPROM set Write Ena latch */ + /* EEPROM reset Write Enable latch */ +-#define IXGBE_EEPROM_WRDI_OPCODE_SPI 0x04 +-#define IXGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */ +-#define IXGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */ +-#define IXGBE_EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */ +-#define IXGBE_EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */ +-#define IXGBE_EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */ ++#define IXGBE_EEPROM_WRDI_OPCODE_SPI 0x04 ++#define IXGBE_EEPROM_RDSR_OPCODE_SPI 0x05 /* EEPROM read Status reg */ ++#define IXGBE_EEPROM_WRSR_OPCODE_SPI 0x01 /* EEPROM write Status reg */ ++#define IXGBE_EEPROM_ERASE4K_OPCODE_SPI 0x20 /* EEPROM ERASE 4KB */ ++#define IXGBE_EEPROM_ERASE64K_OPCODE_SPI 0xD8 /* EEPROM ERASE 64KB */ ++#define IXGBE_EEPROM_ERASE256_OPCODE_SPI 0xDB /* EEPROM ERASE 256B */ + + /* EEPROM Read Register */ +-#define IXGBE_EEPROM_RW_REG_DATA 16 /* data offset in EEPROM read reg */ +-#define IXGBE_EEPROM_RW_REG_DONE 2 /* Offset to READ done bit */ +-#define IXGBE_EEPROM_RW_REG_START 1 /* First bit to start operation */ +-#define IXGBE_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ +-#define IXGBE_NVM_POLL_WRITE 1 /* Flag for polling for write complete */ +-#define IXGBE_NVM_POLL_READ 0 /* Flag for polling for read complete */ +- +-#define IXGBE_EEPROM_PAGE_SIZE_MAX 128 +-#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 512 /* EEPROM words # read in burst */ +-#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* EEPROM words # wr in burst */ +- +-#define IXGBE_EEPROM_CTRL_2 1 /* EEPROM CTRL word 2 */ +-#define IXGBE_EEPROM_CCD_BIT 2 /* EEPROM Core Clock Disable bit */ ++#define IXGBE_EEPROM_RW_REG_DATA 16 /* data offset in EEPROM read reg */ ++#define IXGBE_EEPROM_RW_REG_DONE 2 /* Offset to READ done bit */ ++#define IXGBE_EEPROM_RW_REG_START 1 /* First bit to start operation */ ++#define IXGBE_EEPROM_RW_ADDR_SHIFT 2 /* Shift to the address bits */ ++#define IXGBE_NVM_POLL_WRITE 1 /* Flag for polling for wr complete */ ++#define IXGBE_NVM_POLL_READ 0 /* Flag for polling for rd complete */ ++ ++#define NVM_INIT_CTRL_3 0x38 ++#define NVM_INIT_CTRL_3_LPLU 0x8 ++#define NVM_INIT_CTRL_3_D10GMP_PORT0 0x40 ++#define NVM_INIT_CTRL_3_D10GMP_PORT1 0x100 ++ ++#define IXGBE_ETH_LENGTH_OF_ADDRESS 6 ++ ++#define IXGBE_EEPROM_PAGE_SIZE_MAX 128 ++#define IXGBE_EEPROM_RD_BUFFER_MAX_COUNT 256 /* words rd in burst */ ++#define IXGBE_EEPROM_WR_BUFFER_MAX_COUNT 256 /* words wr in burst */ ++#define IXGBE_EEPROM_CTRL_2 1 /* EEPROM CTRL word 2 */ ++#define IXGBE_EEPROM_CCD_BIT 2 + + #ifndef IXGBE_EEPROM_GRANT_ATTEMPTS +-#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */ ++#define IXGBE_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM attempts to gain grant */ + #endif + + #ifndef IXGBE_EERD_EEWR_ATTEMPTS + /* Number of 5 microseconds we wait for EERD read and + * EERW write to complete */ +-#define IXGBE_EERD_EEWR_ATTEMPTS 100000 ++#define IXGBE_EERD_EEWR_ATTEMPTS 100000 + #endif + + #ifndef IXGBE_FLUDONE_ATTEMPTS + /* # attempts we wait for flush update to complete */ +-#define IXGBE_FLUDONE_ATTEMPTS 20000 ++#define IXGBE_FLUDONE_ATTEMPTS 20000 + #endif + +-#define IXGBE_PCIE_CTRL2 0x5 /* PCIe Control 2 Offset */ +-#define IXGBE_PCIE_CTRL2_DUMMY_ENABLE 0x8 /* Dummy Function Enable */ +-#define IXGBE_PCIE_CTRL2_LAN_DISABLE 0x2 /* LAN PCI Disable */ +-#define IXGBE_PCIE_CTRL2_DISABLE_SELECT 0x1 /* LAN Disable Select */ +- +-#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0 +-#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3 +-#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1 +-#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2 +-#define IXGBE_FW_LESM_PARAMETERS_PTR 0x2 +-#define IXGBE_FW_LESM_STATE_1 0x1 +-#define IXGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */ +-#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4 +-#define IXGBE_FW_PATCH_VERSION_4 0x7 +-#define IXGBE_FCOE_IBA_CAPS_BLK_PTR 0x33 /* iSCSI/FCOE block */ +-#define IXGBE_FCOE_IBA_CAPS_FCOE 0x20 /* FCOE flags */ +-#define IXGBE_ISCSI_FCOE_BLK_PTR 0x17 /* iSCSI/FCOE block */ +-#define IXGBE_ISCSI_FCOE_FLAGS_OFFSET 0x0 /* FCOE flags */ +-#define IXGBE_ISCSI_FCOE_FLAGS_ENABLE 0x1 /* FCOE flags enable bit */ +-#define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x27 /* Alt. SAN MAC block */ +-#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt. SAN MAC capability */ +-#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt. SAN MAC 0 offset */ +-#define IXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt. SAN MAC 1 offset */ +-#define IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET 0x7 /* Alt. WWNN prefix offset */ +-#define IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET 0x8 /* Alt. WWPN prefix offset */ +-#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC 0x0 /* Alt. SAN MAC exists */ +-#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt. WWN base exists */ +- +-#define IXGBE_DEVICE_CAPS_WOL_PORT0_1 0x4 /* WoL supported on ports 0 & 1 */ +-#define IXGBE_DEVICE_CAPS_WOL_PORT0 0x8 /* WoL supported on port 0 */ +-#define IXGBE_DEVICE_CAPS_WOL_MASK 0xC /* Mask for WoL capabilities */ ++#define IXGBE_PCIE_CTRL2 0x5 /* PCIe Control 2 Offset */ ++#define IXGBE_PCIE_CTRL2_DUMMY_ENABLE 0x8 /* Dummy Function Enable */ ++#define IXGBE_PCIE_CTRL2_LAN_DISABLE 0x2 /* LAN PCI Disable */ ++#define IXGBE_PCIE_CTRL2_DISABLE_SELECT 0x1 /* LAN Disable Select */ ++ ++#define IXGBE_SAN_MAC_ADDR_PORT0_OFFSET 0x0 ++#define IXGBE_SAN_MAC_ADDR_PORT1_OFFSET 0x3 ++#define IXGBE_DEVICE_CAPS_ALLOW_ANY_SFP 0x1 ++#define IXGBE_DEVICE_CAPS_FCOE_OFFLOADS 0x2 ++#define IXGBE_DEVICE_CAPS_NO_CROSSTALK_WR (1 << 7) ++#define IXGBE_FW_LESM_PARAMETERS_PTR 0x2 ++#define IXGBE_FW_LESM_STATE_1 0x1 ++#define IXGBE_FW_LESM_STATE_ENABLED 0x8000 /* LESM Enable bit */ ++#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4 ++#define IXGBE_FW_PATCH_VERSION_4 0x7 ++#define IXGBE_FCOE_IBA_CAPS_BLK_PTR 0x33 /* iSCSI/FCOE block */ ++#define IXGBE_FCOE_IBA_CAPS_FCOE 0x20 /* FCOE flags */ ++#define IXGBE_ISCSI_FCOE_BLK_PTR 0x17 /* iSCSI/FCOE block */ ++#define IXGBE_ISCSI_FCOE_FLAGS_OFFSET 0x0 /* FCOE flags */ ++#define IXGBE_ISCSI_FCOE_FLAGS_ENABLE 0x1 /* FCOE flags enable bit */ ++#define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x27 /* Alt. SAN MAC block */ ++#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt SAN MAC capability */ ++#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt SAN MAC 0 offset */ ++#define IXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt SAN MAC 1 offset */ ++#define IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET 0x7 /* Alt WWNN prefix offset */ ++#define IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET 0x8 /* Alt WWPN prefix offset */ ++#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC 0x0 /* Alt SAN MAC exists */ ++#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt WWN base exists */ ++ ++/* FW header offset */ ++#define IXGBE_X540_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4 ++#define IXGBE_X540_FW_MODULE_MASK 0x7FFF ++/* 4KB multiplier */ ++#define IXGBE_X540_FW_MODULE_LENGTH 0x1000 ++/* version word 2 (month & day) */ ++#define IXGBE_X540_FW_PATCH_VERSION_2 0x5 ++/* version word 3 (silicon compatibility & year) */ ++#define IXGBE_X540_FW_PATCH_VERSION_3 0x6 ++/* version word 4 (major & minor numbers) */ ++#define IXGBE_X540_FW_PATCH_VERSION_4 0x7 ++ ++#define IXGBE_DEVICE_CAPS_WOL_PORT0_1 0x4 /* WoL supported on ports 0 & 1 */ ++#define IXGBE_DEVICE_CAPS_WOL_PORT0 0x8 /* WoL supported on port 0 */ ++#define IXGBE_DEVICE_CAPS_WOL_MASK 0xC /* Mask for WoL capabilities */ + + /* PCI Bus Info */ +-#define IXGBE_PCI_DEVICE_STATUS 0xAA +-#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020 +-#define IXGBE_PCI_LINK_STATUS 0xB2 +-#define IXGBE_PCI_DEVICE_CONTROL2 0xC8 +-#define IXGBE_PCI_LINK_WIDTH 0x3F0 +-#define IXGBE_PCI_LINK_WIDTH_1 0x10 +-#define IXGBE_PCI_LINK_WIDTH_2 0x20 +-#define IXGBE_PCI_LINK_WIDTH_4 0x40 +-#define IXGBE_PCI_LINK_WIDTH_8 0x80 +-#define IXGBE_PCI_LINK_SPEED 0xF +-#define IXGBE_PCI_LINK_SPEED_2500 0x1 +-#define IXGBE_PCI_LINK_SPEED_5000 0x2 +-#define IXGBE_PCI_LINK_SPEED_8000 0x3 +-#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E +-#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80 +-#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005 ++#define IXGBE_PCI_DEVICE_STATUS 0xAA ++#define IXGBE_PCI_DEVICE_STATUS_TRANSACTION_PENDING 0x0020 ++#define IXGBE_PCI_LINK_STATUS 0xB2 ++#define IXGBE_PCI_DEVICE_CONTROL2 0xC8 ++#define IXGBE_PCI_LINK_WIDTH 0x3F0 ++#define IXGBE_PCI_LINK_WIDTH_1 0x10 ++#define IXGBE_PCI_LINK_WIDTH_2 0x20 ++#define IXGBE_PCI_LINK_WIDTH_4 0x40 ++#define IXGBE_PCI_LINK_WIDTH_8 0x80 ++#define IXGBE_PCI_LINK_SPEED 0xF ++#define IXGBE_PCI_LINK_SPEED_2500 0x1 ++#define IXGBE_PCI_LINK_SPEED_5000 0x2 ++#define IXGBE_PCI_LINK_SPEED_8000 0x3 ++#define IXGBE_PCI_HEADER_TYPE_REGISTER 0x0E ++#define IXGBE_PCI_HEADER_TYPE_MULTIFUNC 0x80 ++#define IXGBE_PCI_DEVICE_CONTROL2_16ms 0x0005 + + #define IXGBE_PCIDEVCTRL2_TIMEO_MASK 0xf + #define IXGBE_PCIDEVCTRL2_16_32ms_def 0x0 +@@ -1875,55 +2499,72 @@ enum { + /* Number of 100 microseconds we wait for PCI Express master disable */ + #define IXGBE_PCI_MASTER_DISABLE_TIMEOUT 800 + ++/* Check whether address is multicast. This is little-endian specific check.*/ ++#define IXGBE_IS_MULTICAST(Address) \ ++ (bool)(((u8 *)(Address))[0] & ((u8)0x01)) ++ ++/* Check whether an address is broadcast. */ ++#define IXGBE_IS_BROADCAST(Address) \ ++ ((((u8 *)(Address))[0] == ((u8)0xff)) && \ ++ (((u8 *)(Address))[1] == ((u8)0xff))) ++ + /* RAH */ +-#define IXGBE_RAH_VIND_MASK 0x003C0000 +-#define IXGBE_RAH_VIND_SHIFT 18 +-#define IXGBE_RAH_AV 0x80000000 +-#define IXGBE_CLEAR_VMDQ_ALL 0xFFFFFFFF ++#define IXGBE_RAH_VIND_MASK 0x003C0000 ++#define IXGBE_RAH_VIND_SHIFT 18 ++#define IXGBE_RAH_AV 0x80000000 ++#define IXGBE_CLEAR_VMDQ_ALL 0xFFFFFFFF + + /* Header split receive */ +-#define IXGBE_RFCTL_ISCSI_DIS 0x00000001 +-#define IXGBE_RFCTL_ISCSI_DWC_MASK 0x0000003E +-#define IXGBE_RFCTL_ISCSI_DWC_SHIFT 1 ++#define IXGBE_RFCTL_ISCSI_DIS 0x00000001 ++#define IXGBE_RFCTL_ISCSI_DWC_MASK 0x0000003E ++#define IXGBE_RFCTL_ISCSI_DWC_SHIFT 1 + #define IXGBE_RFCTL_RSC_DIS 0x00000020 +-#define IXGBE_RFCTL_NFSW_DIS 0x00000040 +-#define IXGBE_RFCTL_NFSR_DIS 0x00000080 +-#define IXGBE_RFCTL_NFS_VER_MASK 0x00000300 +-#define IXGBE_RFCTL_NFS_VER_SHIFT 8 +-#define IXGBE_RFCTL_NFS_VER_2 0 +-#define IXGBE_RFCTL_NFS_VER_3 1 +-#define IXGBE_RFCTL_NFS_VER_4 2 +-#define IXGBE_RFCTL_IPV6_DIS 0x00000400 +-#define IXGBE_RFCTL_IPV6_XSUM_DIS 0x00000800 +-#define IXGBE_RFCTL_IPFRSP_DIS 0x00004000 +-#define IXGBE_RFCTL_IPV6_EX_DIS 0x00010000 +-#define IXGBE_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 ++#define IXGBE_RFCTL_NFSW_DIS 0x00000040 ++#define IXGBE_RFCTL_NFSR_DIS 0x00000080 ++#define IXGBE_RFCTL_NFS_VER_MASK 0x00000300 ++#define IXGBE_RFCTL_NFS_VER_SHIFT 8 ++#define IXGBE_RFCTL_NFS_VER_2 0 ++#define IXGBE_RFCTL_NFS_VER_3 1 ++#define IXGBE_RFCTL_NFS_VER_4 2 ++#define IXGBE_RFCTL_IPV6_DIS 0x00000400 ++#define IXGBE_RFCTL_IPV6_XSUM_DIS 0x00000800 ++#define IXGBE_RFCTL_IPFRSP_DIS 0x00004000 ++#define IXGBE_RFCTL_IPV6_EX_DIS 0x00010000 ++#define IXGBE_RFCTL_NEW_IPV6_EXT_DIS 0x00020000 + + /* Transmit Config masks */ +-#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Enable specific Tx Queue */ +-#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. write-back flushing */ +-#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */ ++#define IXGBE_TXDCTL_ENABLE 0x02000000 /* Ena specific Tx Queue */ ++#define IXGBE_TXDCTL_SWFLSH 0x04000000 /* Tx Desc. wr-bk flushing */ ++#define IXGBE_TXDCTL_WTHRESH_SHIFT 16 /* shift to WTHRESH bits */ + /* Enable short packet padding to 64 bytes */ +-#define IXGBE_TX_PAD_ENABLE 0x00000400 +-#define IXGBE_JUMBO_FRAME_ENABLE 0x00000004 /* Allow jumbo frames */ ++#define IXGBE_TX_PAD_ENABLE 0x00000400 ++#define IXGBE_JUMBO_FRAME_ENABLE 0x00000004 /* Allow jumbo frames */ + /* This allows for 16K packets + 4k for vlan */ +-#define IXGBE_MAX_FRAME_SZ 0x40040000 ++#define IXGBE_MAX_FRAME_SZ 0x40040000 + +-#define IXGBE_TDWBAL_HEAD_WB_ENABLE 0x1 /* Tx head write-back enable */ +-#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq# write-back enable */ ++#define IXGBE_TDWBAL_HEAD_WB_ENABLE 0x1 /* Tx head write-back enable */ ++#define IXGBE_TDWBAL_SEQNUM_WB_ENABLE 0x2 /* Tx seq# write-back enable */ + + /* Receive Config masks */ +-#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */ +-#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Descriptor Monitor Bypass */ +-#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Enable specific Rx Queue */ +-#define IXGBE_RXDCTL_SWFLSH 0x04000000 /* Rx Desc. write-back flushing */ +-#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* Only supported on the X540 */ +-#define IXGBE_RXDCTL_RLPML_EN 0x00008000 +-#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */ +- +-#define IXGBE_TSAUXC_EN_CLK 0x00000004 +-#define IXGBE_TSAUXC_SYNCLK 0x00000008 +-#define IXGBE_TSAUXC_SDP0_INT 0x00000040 ++#define IXGBE_RXCTRL_RXEN 0x00000001 /* Enable Receiver */ ++#define IXGBE_RXCTRL_DMBYPS 0x00000002 /* Desc Monitor Bypass */ ++#define IXGBE_RXDCTL_ENABLE 0x02000000 /* Ena specific Rx Queue */ ++#define IXGBE_RXDCTL_SWFLSH 0x04000000 /* Rx Desc wr-bk flushing */ ++#define IXGBE_RXDCTL_RLPMLMASK 0x00003FFF /* X540 supported only */ ++#define IXGBE_RXDCTL_RLPML_EN 0x00008000 ++#define IXGBE_RXDCTL_VME 0x40000000 /* VLAN mode enable */ ++ ++#define IXGBE_TSAUXC_EN_CLK 0x00000004 ++#define IXGBE_TSAUXC_SYNCLK 0x00000008 ++#define IXGBE_TSAUXC_SDP0_INT 0x00000040 ++#define IXGBE_TSAUXC_EN_TT0 0x00000001 ++#define IXGBE_TSAUXC_EN_TT1 0x00000002 ++#define IXGBE_TSAUXC_ST0 0x00000010 ++#define IXGBE_TSAUXC_DISABLE_SYSTIME 0x80000000 ++ ++#define IXGBE_TSSDP_TS_SDP0_SEL_MASK 0x000000C0 ++#define IXGBE_TSSDP_TS_SDP0_CLK0 0x00000080 ++#define IXGBE_TSSDP_TS_SDP0_EN 0x00000100 + + #define IXGBE_TSYNCTXCTL_VALID 0x00000001 /* Tx timestamp valid */ + #define IXGBE_TSYNCTXCTL_ENABLED 0x00000010 /* Tx timestamping enabled */ +@@ -1933,8 +2574,19 @@ enum { + #define IXGBE_TSYNCRXCTL_TYPE_L2_V2 0x00 + #define IXGBE_TSYNCRXCTL_TYPE_L4_V1 0x02 + #define IXGBE_TSYNCRXCTL_TYPE_L2_L4_V2 0x04 ++#define IXGBE_TSYNCRXCTL_TYPE_ALL 0x08 + #define IXGBE_TSYNCRXCTL_TYPE_EVENT_V2 0x0A + #define IXGBE_TSYNCRXCTL_ENABLED 0x00000010 /* Rx Timestamping enabled */ ++#define IXGBE_TSYNCRXCTL_TSIP_UT_EN 0x00800000 /* Rx Timestamp in Packet */ ++#define IXGBE_TSYNCRXCTL_TSIP_UP_MASK 0xFF000000 /* Rx Timestamp UP Mask */ ++ ++#define IXGBE_TSIM_SYS_WRAP 0x00000001 ++#define IXGBE_TSIM_TXTS 0x00000002 ++#define IXGBE_TSIM_TADJ 0x00000080 ++ ++#define IXGBE_TSICR_SYS_WRAP IXGBE_TSIM_SYS_WRAP ++#define IXGBE_TSICR_TXTS IXGBE_TSIM_TXTS ++#define IXGBE_TSICR_TADJ IXGBE_TSIM_TADJ + + #define IXGBE_RXMTRL_V1_CTRLT_MASK 0x000000FF + #define IXGBE_RXMTRL_V1_SYNC_MSG 0x00 +@@ -1943,359 +2595,545 @@ enum { + #define IXGBE_RXMTRL_V1_DELAY_RESP_MSG 0x03 + #define IXGBE_RXMTRL_V1_MGMT_MSG 0x04 + +-#define IXGBE_RXMTRL_V2_MSGID_MASK 0x0000FF00 +-#define IXGBE_RXMTRL_V2_SYNC_MSG 0x0000 +-#define IXGBE_RXMTRL_V2_DELAY_REQ_MSG 0x0100 +-#define IXGBE_RXMTRL_V2_PDELAY_REQ_MSG 0x0200 +-#define IXGBE_RXMTRL_V2_PDELAY_RESP_MSG 0x0300 +-#define IXGBE_RXMTRL_V2_FOLLOWUP_MSG 0x0800 +-#define IXGBE_RXMTRL_V2_DELAY_RESP_MSG 0x0900 +-#define IXGBE_RXMTRL_V2_PDELAY_FOLLOWUP_MSG 0x0A00 +-#define IXGBE_RXMTRL_V2_ANNOUNCE_MSG 0x0B00 +-#define IXGBE_RXMTRL_V2_SIGNALING_MSG 0x0C00 +-#define IXGBE_RXMTRL_V2_MGMT_MSG 0x0D00 +- +-#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */ +-#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/ +-#define IXGBE_FCTRL_UPE 0x00000200 /* Unicast Promiscuous Ena */ +-#define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */ +-#define IXGBE_FCTRL_PMCF 0x00001000 /* Pass MAC Control Frames */ +-#define IXGBE_FCTRL_DPF 0x00002000 /* Discard Pause Frame */ ++#define IXGBE_RXMTRL_V2_MSGID_MASK 0x0000FF00 ++#define IXGBE_RXMTRL_V2_SYNC_MSG 0x0000 ++#define IXGBE_RXMTRL_V2_DELAY_REQ_MSG 0x0100 ++#define IXGBE_RXMTRL_V2_PDELAY_REQ_MSG 0x0200 ++#define IXGBE_RXMTRL_V2_PDELAY_RESP_MSG 0x0300 ++#define IXGBE_RXMTRL_V2_FOLLOWUP_MSG 0x0800 ++#define IXGBE_RXMTRL_V2_DELAY_RESP_MSG 0x0900 ++#define IXGBE_RXMTRL_V2_PDELAY_FOLLOWUP_MSG 0x0A00 ++#define IXGBE_RXMTRL_V2_ANNOUNCE_MSG 0x0B00 ++#define IXGBE_RXMTRL_V2_SIGNALLING_MSG 0x0C00 ++#define IXGBE_RXMTRL_V2_MGMT_MSG 0x0D00 ++ ++#define IXGBE_FCTRL_SBP 0x00000002 /* Store Bad Packet */ ++#define IXGBE_FCTRL_MPE 0x00000100 /* Multicast Promiscuous Ena*/ ++#define IXGBE_FCTRL_UPE 0x00000200 /* Unicast Promiscuous Ena */ ++#define IXGBE_FCTRL_BAM 0x00000400 /* Broadcast Accept Mode */ ++#define IXGBE_FCTRL_PMCF 0x00001000 /* Pass MAC Control Frames */ ++#define IXGBE_FCTRL_DPF 0x00002000 /* Discard Pause Frame */ + /* Receive Priority Flow Control Enable */ +-#define IXGBE_FCTRL_RPFCE 0x00004000 +-#define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */ +-#define IXGBE_MFLCN_PMCF 0x00000001 /* Pass MAC Control Frames */ +-#define IXGBE_MFLCN_DPF 0x00000002 /* Discard Pause Frame */ +-#define IXGBE_MFLCN_RPFCE 0x00000004 /* Receive Priority FC Enable */ +-#define IXGBE_MFLCN_RFCE 0x00000008 /* Receive FC Enable */ +-#define IXGBE_MFLCN_RPFCE_MASK 0x00000FF4 /* Receive FC Mask */ +- +-#define IXGBE_MFLCN_RPFCE_SHIFT 4 ++#define IXGBE_FCTRL_RPFCE 0x00004000 ++#define IXGBE_FCTRL_RFCE 0x00008000 /* Receive Flow Control Ena */ ++#define IXGBE_MFLCN_PMCF 0x00000001 /* Pass MAC Control Frames */ ++#define IXGBE_MFLCN_DPF 0x00000002 /* Discard Pause Frame */ ++#define IXGBE_MFLCN_RPFCE 0x00000004 /* Receive Priority FC Enable */ ++#define IXGBE_MFLCN_RFCE 0x00000008 /* Receive FC Enable */ ++#define IXGBE_MFLCN_RPFCE_MASK 0x00000FF4 /* Rx Priority FC bitmap mask */ ++#define IXGBE_MFLCN_RPFCE_SHIFT 4 /* Rx Priority FC bitmap shift */ + + /* Multiple Receive Queue Control */ +-#define IXGBE_MRQC_RSSEN 0x00000001 /* RSS Enable */ +-#define IXGBE_MRQC_MRQE_MASK 0xF /* Bits 3:0 */ +-#define IXGBE_MRQC_RT8TCEN 0x00000002 /* 8 TC no RSS */ +-#define IXGBE_MRQC_RT4TCEN 0x00000003 /* 4 TC no RSS */ +-#define IXGBE_MRQC_RTRSS8TCEN 0x00000004 /* 8 TC w/ RSS */ +-#define IXGBE_MRQC_RTRSS4TCEN 0x00000005 /* 4 TC w/ RSS */ +-#define IXGBE_MRQC_VMDQEN 0x00000008 /* VMDq2 64 pools no RSS */ +-#define IXGBE_MRQC_VMDQRSS32EN 0x0000000A /* VMDq2 32 pools w/ RSS */ +-#define IXGBE_MRQC_VMDQRSS64EN 0x0000000B /* VMDq2 64 pools w/ RSS */ +-#define IXGBE_MRQC_VMDQRT8TCEN 0x0000000C /* VMDq2/RT 16 pool 8 TC */ +-#define IXGBE_MRQC_VMDQRT4TCEN 0x0000000D /* VMDq2/RT 32 pool 4 TC */ +-#define IXGBE_MRQC_RSS_FIELD_MASK 0xFFFF0000 +-#define IXGBE_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 +-#define IXGBE_MRQC_RSS_FIELD_IPV4 0x00020000 ++#define IXGBE_MRQC_RSSEN 0x00000001 /* RSS Enable */ ++#define IXGBE_MRQC_MRQE_MASK 0xF /* Bits 3:0 */ ++#define IXGBE_MRQC_RT8TCEN 0x00000002 /* 8 TC no RSS */ ++#define IXGBE_MRQC_RT4TCEN 0x00000003 /* 4 TC no RSS */ ++#define IXGBE_MRQC_RTRSS8TCEN 0x00000004 /* 8 TC w/ RSS */ ++#define IXGBE_MRQC_RTRSS4TCEN 0x00000005 /* 4 TC w/ RSS */ ++#define IXGBE_MRQC_VMDQEN 0x00000008 /* VMDq2 64 pools no RSS */ ++#define IXGBE_MRQC_VMDQRSS32EN 0x0000000A /* VMDq2 32 pools w/ RSS */ ++#define IXGBE_MRQC_VMDQRSS64EN 0x0000000B /* VMDq2 64 pools w/ RSS */ ++#define IXGBE_MRQC_VMDQRT8TCEN 0x0000000C /* VMDq2/RT 16 pool 8 TC */ ++#define IXGBE_MRQC_VMDQRT4TCEN 0x0000000D /* VMDq2/RT 32 pool 4 TC */ ++#define IXGBE_MRQC_L3L4TXSWEN 0x00008000 /* Enable L3/L4 Tx switch */ ++#define IXGBE_MRQC_RSS_FIELD_MASK 0xFFFF0000 ++#define IXGBE_MRQC_RSS_FIELD_IPV4_TCP 0x00010000 ++#define IXGBE_MRQC_RSS_FIELD_IPV4 0x00020000 + #define IXGBE_MRQC_RSS_FIELD_IPV6_EX_TCP 0x00040000 +-#define IXGBE_MRQC_RSS_FIELD_IPV6_EX 0x00080000 +-#define IXGBE_MRQC_RSS_FIELD_IPV6 0x00100000 +-#define IXGBE_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 +-#define IXGBE_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 +-#define IXGBE_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 ++#define IXGBE_MRQC_RSS_FIELD_IPV6_EX 0x00080000 ++#define IXGBE_MRQC_RSS_FIELD_IPV6 0x00100000 ++#define IXGBE_MRQC_RSS_FIELD_IPV6_TCP 0x00200000 ++#define IXGBE_MRQC_RSS_FIELD_IPV4_UDP 0x00400000 ++#define IXGBE_MRQC_RSS_FIELD_IPV6_UDP 0x00800000 + #define IXGBE_MRQC_RSS_FIELD_IPV6_EX_UDP 0x01000000 +-#define IXGBE_MRQC_L3L4TXSWEN 0x00008000 +- +-#define IXGBE_FWSM_TS_ENABLED 0x1 ++#define IXGBE_MRQC_MULTIPLE_RSS 0x00002000 ++#define IXGBE_MRQC_L3L4TXSWEN 0x00008000 + + /* Queue Drop Enable */ + #define IXGBE_QDE_ENABLE 0x00000001 ++#define IXGBE_QDE_HIDE_VLAN 0x00000002 + #define IXGBE_QDE_IDX_MASK 0x00007F00 + #define IXGBE_QDE_IDX_SHIFT 8 + #define IXGBE_QDE_WRITE 0x00010000 +- +-#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ +-#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ +-#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */ +-#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ +-#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */ +-#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */ +-#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Descriptor extension (0 = legacy) */ +-#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ +-#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */ +- +-#define IXGBE_RXDADV_IPSEC_STATUS_SECP 0x00020000 +-#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000 +-#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000 +-#define IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED 0x18000000 +-#define IXGBE_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000 ++#define IXGBE_QDE_READ 0x00020000 ++ ++#define IXGBE_TXD_POPTS_IXSM 0x01 /* Insert IP checksum */ ++#define IXGBE_TXD_POPTS_TXSM 0x02 /* Insert TCP/UDP checksum */ ++#define IXGBE_TXD_CMD_EOP 0x01000000 /* End of Packet */ ++#define IXGBE_TXD_CMD_IFCS 0x02000000 /* Insert FCS (Ethernet CRC) */ ++#define IXGBE_TXD_CMD_IC 0x04000000 /* Insert Checksum */ ++#define IXGBE_TXD_CMD_RS 0x08000000 /* Report Status */ ++#define IXGBE_TXD_CMD_DEXT 0x20000000 /* Desc extension (0 = legacy) */ ++#define IXGBE_TXD_CMD_VLE 0x40000000 /* Add VLAN tag */ ++#define IXGBE_TXD_STAT_DD 0x00000001 /* Descriptor Done */ ++ ++#define IXGBE_RXDADV_IPSEC_STATUS_SECP 0x00020000 ++#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_PROTOCOL 0x08000000 ++#define IXGBE_RXDADV_IPSEC_ERROR_INVALID_LENGTH 0x10000000 ++#define IXGBE_RXDADV_IPSEC_ERROR_AUTH_FAILED 0x18000000 ++#define IXGBE_RXDADV_IPSEC_ERROR_BIT_MASK 0x18000000 + /* Multiple Transmit Queue Command Register */ +-#define IXGBE_MTQC_RT_ENA 0x1 /* DCB Enable */ +-#define IXGBE_MTQC_VT_ENA 0x2 /* VMDQ2 Enable */ +-#define IXGBE_MTQC_64Q_1PB 0x0 /* 64 queues 1 pack buffer */ +-#define IXGBE_MTQC_32VF 0x8 /* 4 TX Queues per pool w/32VF's */ +-#define IXGBE_MTQC_64VF 0x4 /* 2 TX Queues per pool w/64VF's */ +-#define IXGBE_MTQC_8TC_8TQ 0xC /* 8 TC if RT_ENA or 8 TQ if VT_ENA */ +-#define IXGBE_MTQC_4TC_4TQ 0x8 /* 4 TC if RT_ENA or 4 TQ if VT_ENA */ ++#define IXGBE_MTQC_RT_ENA 0x1 /* DCB Enable */ ++#define IXGBE_MTQC_VT_ENA 0x2 /* VMDQ2 Enable */ ++#define IXGBE_MTQC_64Q_1PB 0x0 /* 64 queues 1 pack buffer */ ++#define IXGBE_MTQC_32VF 0x8 /* 4 TX Queues per pool w/32VF's */ ++#define IXGBE_MTQC_64VF 0x4 /* 2 TX Queues per pool w/64VF's */ ++#define IXGBE_MTQC_4TC_4TQ 0x8 /* 4 TC if RT_ENA and VT_ENA */ ++#define IXGBE_MTQC_8TC_8TQ 0xC /* 8 TC if RT_ENA or 8 TQ if VT_ENA */ + + /* Receive Descriptor bit definitions */ +-#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */ +-#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */ +-#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */ +-#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ +-#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */ +-#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004 +-#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ +-#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */ +-#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ +-#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */ +-#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */ +-#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */ +-#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ +-#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ +-#define IXGBE_RXD_STAT_LLINT 0x800 /* Pkt caused Low Latency Interrupt */ +-#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */ +-#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */ +-#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */ +-#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ +-#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */ +-#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */ +-#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */ +-#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */ +-#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */ +-#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */ +-#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */ +-#define IXGBE_RXDADV_ERR_MASK 0xfff00000 /* RDESC.ERRORS mask */ +-#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */ +-#define IXGBE_RXDADV_ERR_FCEOFE 0x80000000 /* FCoEFe/IPE */ +-#define IXGBE_RXDADV_ERR_FCERR 0x00700000 /* FCERR/FDIRERR */ +-#define IXGBE_RXDADV_ERR_FDIR_LEN 0x00100000 /* FDIR Length error */ +-#define IXGBE_RXDADV_ERR_FDIR_DROP 0x00200000 /* FDIR Drop error */ +-#define IXGBE_RXDADV_ERR_FDIR_COLL 0x00400000 /* FDIR Collision error */ +-#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */ +-#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */ +-#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */ +-#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */ +-#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */ +-#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */ +-#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */ +-#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */ +-#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ +-#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ +-#define IXGBE_RXD_PRI_SHIFT 13 +-#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */ +-#define IXGBE_RXD_CFI_SHIFT 12 +- +-#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */ +-#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */ +-#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */ +-#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */ +-#define IXGBE_RXDADV_STAT_MASK 0x000fffff /* Stat/NEXTP: bit 0-19 */ +-#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */ +-#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */ +-#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */ +-#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */ +-#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */ +-#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */ +-#define IXGBE_RXDADV_STAT_TS 0x00010000 /* IEEE 1588 Time Stamp */ ++#define IXGBE_RXD_STAT_DD 0x01 /* Descriptor Done */ ++#define IXGBE_RXD_STAT_EOP 0x02 /* End of Packet */ ++#define IXGBE_RXD_STAT_FLM 0x04 /* FDir Match */ ++#define IXGBE_RXD_STAT_VP 0x08 /* IEEE VLAN Packet */ ++#define IXGBE_RXDADV_NEXTP_MASK 0x000FFFF0 /* Next Descriptor Index */ ++#define IXGBE_RXDADV_NEXTP_SHIFT 0x00000004 ++#define IXGBE_RXD_STAT_UDPCS 0x10 /* UDP xsum calculated */ ++#define IXGBE_RXD_STAT_L4CS 0x20 /* L4 xsum calculated */ ++#define IXGBE_RXD_STAT_IPCS 0x40 /* IP xsum calculated */ ++#define IXGBE_RXD_STAT_PIF 0x80 /* passed in-exact filter */ ++#define IXGBE_RXD_STAT_CRCV 0x100 /* Speculative CRC Valid */ ++#define IXGBE_RXD_STAT_OUTERIPCS 0x100 /* Cloud IP xsum calculated */ ++#define IXGBE_RXD_STAT_VEXT 0x200 /* 1st VLAN found */ ++#define IXGBE_RXD_STAT_UDPV 0x400 /* Valid UDP checksum */ ++#define IXGBE_RXD_STAT_DYNINT 0x800 /* Pkt caused INT via DYNINT */ ++#define IXGBE_RXD_STAT_LLINT 0x800 /* Pkt caused Low Latency Interrupt */ ++#define IXGBE_RXD_STAT_TSIP 0x08000 /* Time Stamp in packet buffer */ ++#define IXGBE_RXD_STAT_TS 0x10000 /* Time Stamp */ ++#define IXGBE_RXD_STAT_SECP 0x20000 /* Security Processing */ ++#define IXGBE_RXD_STAT_LB 0x40000 /* Loopback Status */ ++#define IXGBE_RXD_STAT_ACK 0x8000 /* ACK Packet indication */ ++#define IXGBE_RXD_ERR_CE 0x01 /* CRC Error */ ++#define IXGBE_RXD_ERR_LE 0x02 /* Length Error */ ++#define IXGBE_RXD_ERR_PE 0x08 /* Packet Error */ ++#define IXGBE_RXD_ERR_OSE 0x10 /* Oversize Error */ ++#define IXGBE_RXD_ERR_USE 0x20 /* Undersize Error */ ++#define IXGBE_RXD_ERR_TCPE 0x40 /* TCP/UDP Checksum Error */ ++#define IXGBE_RXD_ERR_IPE 0x80 /* IP Checksum Error */ ++#define IXGBE_RXDADV_ERR_MASK 0xfff00000 /* RDESC.ERRORS mask */ ++#define IXGBE_RXDADV_ERR_SHIFT 20 /* RDESC.ERRORS shift */ ++#define IXGBE_RXDADV_ERR_OUTERIPER 0x04000000 /* CRC IP Header error */ ++#define IXGBE_RXDADV_ERR_RXE 0x20000000 /* Any MAC Error */ ++#define IXGBE_RXDADV_ERR_FCEOFE 0x80000000 /* FCEOFe/IPE */ ++#define IXGBE_RXDADV_ERR_FCERR 0x00700000 /* FCERR/FDIRERR */ ++#define IXGBE_RXDADV_ERR_FDIR_LEN 0x00100000 /* FDIR Length error */ ++#define IXGBE_RXDADV_ERR_FDIR_DROP 0x00200000 /* FDIR Drop error */ ++#define IXGBE_RXDADV_ERR_FDIR_COLL 0x00400000 /* FDIR Collision error */ ++#define IXGBE_RXDADV_ERR_HBO 0x00800000 /*Header Buffer Overflow */ ++#define IXGBE_RXDADV_ERR_CE 0x01000000 /* CRC Error */ ++#define IXGBE_RXDADV_ERR_LE 0x02000000 /* Length Error */ ++#define IXGBE_RXDADV_ERR_PE 0x08000000 /* Packet Error */ ++#define IXGBE_RXDADV_ERR_OSE 0x10000000 /* Oversize Error */ ++#define IXGBE_RXDADV_ERR_USE 0x20000000 /* Undersize Error */ ++#define IXGBE_RXDADV_ERR_TCPE 0x40000000 /* TCP/UDP Checksum Error */ ++#define IXGBE_RXDADV_ERR_IPE 0x80000000 /* IP Checksum Error */ ++#define IXGBE_RXD_VLAN_ID_MASK 0x0FFF /* VLAN ID is in lower 12 bits */ ++#define IXGBE_RXD_PRI_MASK 0xE000 /* Priority is in upper 3 bits */ ++#define IXGBE_RXD_PRI_SHIFT 13 ++#define IXGBE_RXD_CFI_MASK 0x1000 /* CFI is bit 12 */ ++#define IXGBE_RXD_CFI_SHIFT 12 ++ ++#define IXGBE_RXDADV_STAT_DD IXGBE_RXD_STAT_DD /* Done */ ++#define IXGBE_RXDADV_STAT_EOP IXGBE_RXD_STAT_EOP /* End of Packet */ ++#define IXGBE_RXDADV_STAT_FLM IXGBE_RXD_STAT_FLM /* FDir Match */ ++#define IXGBE_RXDADV_STAT_VP IXGBE_RXD_STAT_VP /* IEEE VLAN Pkt */ ++#define IXGBE_RXDADV_STAT_MASK 0x000fffff /* Stat/NEXTP: bit 0-19 */ ++#define IXGBE_RXDADV_STAT_FCEOFS 0x00000040 /* FCoE EOF/SOF Stat */ ++#define IXGBE_RXDADV_STAT_FCSTAT 0x00000030 /* FCoE Pkt Stat */ ++#define IXGBE_RXDADV_STAT_FCSTAT_NOMTCH 0x00000000 /* 00: No Ctxt Match */ ++#define IXGBE_RXDADV_STAT_FCSTAT_NODDP 0x00000010 /* 01: Ctxt w/o DDP */ ++#define IXGBE_RXDADV_STAT_FCSTAT_FCPRSP 0x00000020 /* 10: Recv. FCP_RSP */ ++#define IXGBE_RXDADV_STAT_FCSTAT_DDP 0x00000030 /* 11: Ctxt w/ DDP */ ++#define IXGBE_RXDADV_STAT_TS 0x00010000 /* IEEE1588 Time Stamp */ ++#define IXGBE_RXDADV_STAT_TSIP 0x00008000 /* Time Stamp in packet buffer */ + + /* PSRTYPE bit definitions */ +-#define IXGBE_PSRTYPE_TCPHDR 0x00000010 +-#define IXGBE_PSRTYPE_UDPHDR 0x00000020 +-#define IXGBE_PSRTYPE_IPV4HDR 0x00000100 +-#define IXGBE_PSRTYPE_IPV6HDR 0x00000200 +-#define IXGBE_PSRTYPE_L2HDR 0x00001000 ++#define IXGBE_PSRTYPE_TCPHDR 0x00000010 ++#define IXGBE_PSRTYPE_UDPHDR 0x00000020 ++#define IXGBE_PSRTYPE_IPV4HDR 0x00000100 ++#define IXGBE_PSRTYPE_IPV6HDR 0x00000200 ++#define IXGBE_PSRTYPE_L2HDR 0x00001000 + + /* SRRCTL bit definitions */ +-#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */ +-#define IXGBE_SRRCTL_RDMTS_SHIFT 22 +-#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000 +-#define IXGBE_SRRCTL_DROP_EN 0x10000000 +-#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F +-#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00 +-#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000 ++#define IXGBE_SRRCTL_BSIZEPKT_SHIFT 10 /* so many KBs */ ++#define IXGBE_SRRCTL_BSIZEHDRSIZE_SHIFT 2 /* 64byte resolution (>> 6) ++ * + at bit 8 offset (<< 8) ++ * = (<< 2) ++ */ ++#define IXGBE_SRRCTL_RDMTS_SHIFT 22 ++#define IXGBE_SRRCTL_RDMTS_MASK 0x01C00000 ++#define IXGBE_SRRCTL_DROP_EN 0x10000000 ++#define IXGBE_SRRCTL_BSIZEPKT_MASK 0x0000007F ++#define IXGBE_SRRCTL_BSIZEHDR_MASK 0x00003F00 ++#define IXGBE_SRRCTL_DESCTYPE_LEGACY 0x00000000 + #define IXGBE_SRRCTL_DESCTYPE_ADV_ONEBUF 0x02000000 +-#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 ++#define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT 0x04000000 + #define IXGBE_SRRCTL_DESCTYPE_HDR_REPLICATION_LARGE_PKT 0x08000000 + #define IXGBE_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS 0x0A000000 +-#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000 ++#define IXGBE_SRRCTL_DESCTYPE_MASK 0x0E000000 + +-#define IXGBE_RXDPS_HDRSTAT_HDRSP 0x00008000 +-#define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF ++#define IXGBE_RXDPS_HDRSTAT_HDRSP 0x00008000 ++#define IXGBE_RXDPS_HDRSTAT_HDRLEN_MASK 0x000003FF + +-#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F +-#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0 +-#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0 +-#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0 +-#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000 +-#define IXGBE_RXDADV_RSCCNT_SHIFT 17 +-#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5 +-#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000 +-#define IXGBE_RXDADV_SPH 0x8000 ++#define IXGBE_RXDADV_RSSTYPE_MASK 0x0000000F ++#define IXGBE_RXDADV_PKTTYPE_MASK 0x0000FFF0 ++#define IXGBE_RXDADV_PKTTYPE_MASK_EX 0x0001FFF0 ++#define IXGBE_RXDADV_HDRBUFLEN_MASK 0x00007FE0 ++#define IXGBE_RXDADV_RSCCNT_MASK 0x001E0000 ++#define IXGBE_RXDADV_RSCCNT_SHIFT 17 ++#define IXGBE_RXDADV_HDRBUFLEN_SHIFT 5 ++#define IXGBE_RXDADV_SPLITHEADER_EN 0x00001000 ++#define IXGBE_RXDADV_SPH 0x8000 + + /* RSS Hash results */ +-#define IXGBE_RXDADV_RSSTYPE_NONE 0x00000000 +-#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP 0x00000001 +-#define IXGBE_RXDADV_RSSTYPE_IPV4 0x00000002 +-#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP 0x00000003 +-#define IXGBE_RXDADV_RSSTYPE_IPV6_EX 0x00000004 +-#define IXGBE_RXDADV_RSSTYPE_IPV6 0x00000005 ++#define IXGBE_RXDADV_RSSTYPE_NONE 0x00000000 ++#define IXGBE_RXDADV_RSSTYPE_IPV4_TCP 0x00000001 ++#define IXGBE_RXDADV_RSSTYPE_IPV4 0x00000002 ++#define IXGBE_RXDADV_RSSTYPE_IPV6_TCP 0x00000003 ++#define IXGBE_RXDADV_RSSTYPE_IPV6_EX 0x00000004 ++#define IXGBE_RXDADV_RSSTYPE_IPV6 0x00000005 + #define IXGBE_RXDADV_RSSTYPE_IPV6_TCP_EX 0x00000006 +-#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP 0x00000007 +-#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP 0x00000008 ++#define IXGBE_RXDADV_RSSTYPE_IPV4_UDP 0x00000007 ++#define IXGBE_RXDADV_RSSTYPE_IPV6_UDP 0x00000008 + #define IXGBE_RXDADV_RSSTYPE_IPV6_UDP_EX 0x00000009 + + /* RSS Packet Types as indicated in the receive descriptor. */ +-#define IXGBE_RXDADV_PKTTYPE_NONE 0x00000000 +-#define IXGBE_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPv4 hdr present */ +-#define IXGBE_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPv4 hdr + extensions */ +-#define IXGBE_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPv6 hdr present */ +-#define IXGBE_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPv6 hdr + extensions */ +-#define IXGBE_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */ +-#define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */ +-#define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */ +-#define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */ +-#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */ +-#define IXGBE_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */ +-#define IXGBE_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */ +-#define IXGBE_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */ +-#define IXGBE_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */ +-#define IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */ ++#define IXGBE_RXDADV_PKTTYPE_NONE 0x00000000 ++#define IXGBE_RXDADV_PKTTYPE_IPV4 0x00000010 /* IPv4 hdr present */ ++#define IXGBE_RXDADV_PKTTYPE_IPV4_EX 0x00000020 /* IPv4 hdr + extensions */ ++#define IXGBE_RXDADV_PKTTYPE_IPV6 0x00000040 /* IPv6 hdr present */ ++#define IXGBE_RXDADV_PKTTYPE_IPV6_EX 0x00000080 /* IPv6 hdr + extensions */ ++#define IXGBE_RXDADV_PKTTYPE_TCP 0x00000100 /* TCP hdr present */ ++#define IXGBE_RXDADV_PKTTYPE_UDP 0x00000200 /* UDP hdr present */ ++#define IXGBE_RXDADV_PKTTYPE_SCTP 0x00000400 /* SCTP hdr present */ ++#define IXGBE_RXDADV_PKTTYPE_NFS 0x00000800 /* NFS hdr present */ ++#define IXGBE_RXDADV_PKTTYPE_GENEVE 0x00000800 /* GENEVE hdr present */ ++#define IXGBE_RXDADV_PKTTYPE_VXLAN 0x00000800 /* VXLAN hdr present */ ++#define IXGBE_RXDADV_PKTTYPE_TUNNEL 0x00010000 /* Tunnel type */ ++#define IXGBE_RXDADV_PKTTYPE_IPSEC_ESP 0x00001000 /* IPSec ESP */ ++#define IXGBE_RXDADV_PKTTYPE_IPSEC_AH 0x00002000 /* IPSec AH */ ++#define IXGBE_RXDADV_PKTTYPE_LINKSEC 0x00004000 /* LinkSec Encap */ ++#define IXGBE_RXDADV_PKTTYPE_ETQF 0x00008000 /* PKTTYPE is ETQF index */ ++#define IXGBE_RXDADV_PKTTYPE_ETQF_MASK 0x00000070 /* ETQF has 8 indices */ ++#define IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT 4 /* Right-shift 4 bits */ + + /* Security Processing bit Indication */ +-#define IXGBE_RXDADV_LNKSEC_STATUS_SECP 0x00020000 +-#define IXGBE_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000 +-#define IXGBE_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000 +-#define IXGBE_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000 +-#define IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000 ++#define IXGBE_RXDADV_LNKSEC_STATUS_SECP 0x00020000 ++#define IXGBE_RXDADV_LNKSEC_ERROR_NO_SA_MATCH 0x08000000 ++#define IXGBE_RXDADV_LNKSEC_ERROR_REPLAY_ERROR 0x10000000 ++#define IXGBE_RXDADV_LNKSEC_ERROR_BIT_MASK 0x18000000 ++#define IXGBE_RXDADV_LNKSEC_ERROR_BAD_SIG 0x18000000 + + /* Masks to determine if packets should be dropped due to frame errors */ + #define IXGBE_RXD_ERR_FRAME_ERR_MASK ( \ +- IXGBE_RXD_ERR_CE | \ +- IXGBE_RXD_ERR_LE | \ +- IXGBE_RXD_ERR_PE | \ +- IXGBE_RXD_ERR_OSE | \ +- IXGBE_RXD_ERR_USE) ++ IXGBE_RXD_ERR_CE | \ ++ IXGBE_RXD_ERR_LE | \ ++ IXGBE_RXD_ERR_PE | \ ++ IXGBE_RXD_ERR_OSE | \ ++ IXGBE_RXD_ERR_USE) + + #define IXGBE_RXDADV_ERR_FRAME_ERR_MASK ( \ +- IXGBE_RXDADV_ERR_CE | \ +- IXGBE_RXDADV_ERR_LE | \ +- IXGBE_RXDADV_ERR_PE | \ +- IXGBE_RXDADV_ERR_OSE | \ +- IXGBE_RXDADV_ERR_USE) ++ IXGBE_RXDADV_ERR_CE | \ ++ IXGBE_RXDADV_ERR_LE | \ ++ IXGBE_RXDADV_ERR_PE | \ ++ IXGBE_RXDADV_ERR_OSE | \ ++ IXGBE_RXDADV_ERR_USE) ++ ++#define IXGBE_RXDADV_ERR_FRAME_ERR_MASK_82599 IXGBE_RXDADV_ERR_RXE + + /* Multicast bit mask */ +-#define IXGBE_MCSTCTRL_MFE 0x4 ++#define IXGBE_MCSTCTRL_MFE 0x4 + + /* Number of Transmit and Receive Descriptors must be a multiple of 8 */ +-#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8 +-#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8 +-#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024 ++#define IXGBE_REQ_TX_DESCRIPTOR_MULTIPLE 8 ++#define IXGBE_REQ_RX_DESCRIPTOR_MULTIPLE 8 ++#define IXGBE_REQ_TX_BUFFER_GRANULARITY 1024 + + /* Vlan-specific macros */ +-#define IXGBE_RX_DESC_SPECIAL_VLAN_MASK 0x0FFF /* VLAN ID in lower 12 bits */ +-#define IXGBE_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority in upper 3 bits */ +-#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */ +-#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT ++#define IXGBE_RX_DESC_SPECIAL_VLAN_MASK 0x0FFF /* VLAN ID in lower 12 bits */ ++#define IXGBE_RX_DESC_SPECIAL_PRI_MASK 0xE000 /* Priority in upper 3 bits */ ++#define IXGBE_RX_DESC_SPECIAL_PRI_SHIFT 0x000D /* Priority in upper 3 of 16 */ ++#define IXGBE_TX_DESC_SPECIAL_PRI_SHIFT IXGBE_RX_DESC_SPECIAL_PRI_SHIFT + + /* SR-IOV specific macros */ +-#define IXGBE_MBVFICR_INDEX(vf_number) (vf_number >> 4) ++#define IXGBE_MBVFICR_INDEX(vf_number) (vf_number >> 4) + #define IXGBE_MBVFICR(_i) (0x00710 + ((_i) * 4)) +-#define IXGBE_VFLRE(_i) ((((_i) & 1) ? 0x001C0 : 0x00600)) +-#define IXGBE_VFLREC(_i) (0x00700 + ((_i) * 4)) ++#define IXGBE_VFLRE(_i) (((_i & 1) ? 0x001C0 : 0x00600)) ++#define IXGBE_VFLREC(_i) (0x00700 + ((_i) * 4)) + /* Translated register #defines */ ++#define IXGBE_PVFCTRL(P) (0x00300 + (4 * (P))) ++#define IXGBE_PVFSTATUS(P) (0x00008 + (0 * (P))) ++#define IXGBE_PVFLINKS(P) (0x042A4 + (0 * (P))) ++#define IXGBE_PVFRTIMER(P) (0x00048 + (0 * (P))) ++#define IXGBE_PVFMAILBOX(P) (0x04C00 + (4 * (P))) ++#define IXGBE_PVFRXMEMWRAP(P) (0x03190 + (0 * (P))) ++#define IXGBE_PVTEICR(P) (0x00B00 + (4 * (P))) ++#define IXGBE_PVTEICS(P) (0x00C00 + (4 * (P))) ++#define IXGBE_PVTEIMS(P) (0x00D00 + (4 * (P))) ++#define IXGBE_PVTEIMC(P) (0x00E00 + (4 * (P))) ++#define IXGBE_PVTEIAC(P) (0x00F00 + (4 * (P))) ++#define IXGBE_PVTEIAM(P) (0x04D00 + (4 * (P))) ++#define IXGBE_PVTEITR(P) (((P) < 24) ? (0x00820 + ((P) * 4)) : \ ++ (0x012300 + (((P) - 24) * 4))) ++#define IXGBE_PVTIVAR(P) (0x12500 + (4 * (P))) ++#define IXGBE_PVTIVAR_MISC(P) (0x04E00 + (4 * (P))) ++#define IXGBE_PVTRSCINT(P) (0x12000 + (4 * (P))) ++#define IXGBE_VFPBACL(P) (0x110C8 + (4 * (P))) ++#define IXGBE_PVFRDBAL(P) ((P < 64) ? (0x01000 + (0x40 * (P))) \ ++ : (0x0D000 + (0x40 * ((P) - 64)))) ++#define IXGBE_PVFRDBAH(P) ((P < 64) ? (0x01004 + (0x40 * (P))) \ ++ : (0x0D004 + (0x40 * ((P) - 64)))) ++#define IXGBE_PVFRDLEN(P) ((P < 64) ? (0x01008 + (0x40 * (P))) \ ++ : (0x0D008 + (0x40 * ((P) - 64)))) ++#define IXGBE_PVFRDH(P) ((P < 64) ? (0x01010 + (0x40 * (P))) \ ++ : (0x0D010 + (0x40 * ((P) - 64)))) ++#define IXGBE_PVFRDT(P) ((P < 64) ? (0x01018 + (0x40 * (P))) \ ++ : (0x0D018 + (0x40 * ((P) - 64)))) ++#define IXGBE_PVFRXDCTL(P) ((P < 64) ? (0x01028 + (0x40 * (P))) \ ++ : (0x0D028 + (0x40 * ((P) - 64)))) ++#define IXGBE_PVFSRRCTL(P) ((P < 64) ? (0x01014 + (0x40 * (P))) \ ++ : (0x0D014 + (0x40 * ((P) - 64)))) ++#define IXGBE_PVFPSRTYPE(P) (0x0EA00 + (4 * (P))) ++#define IXGBE_PVFTDBAL(P) (0x06000 + (0x40 * (P))) ++#define IXGBE_PVFTDBAH(P) (0x06004 + (0x40 * (P))) ++#define IXGBE_PVFTDLEN(P) (0x06008 + (0x40 * (P))) ++#define IXGBE_PVFTDH(P) (0x06010 + (0x40 * (P))) ++#define IXGBE_PVFTDT(P) (0x06018 + (0x40 * (P))) ++#define IXGBE_PVFTXDCTL(P) (0x06028 + (0x40 * (P))) + #define IXGBE_PVFTDWBAL(P) (0x06038 + (0x40 * (P))) + #define IXGBE_PVFTDWBAH(P) (0x0603C + (0x40 * (P))) ++#define IXGBE_PVFDCA_RXCTRL(P) (((P) < 64) ? (0x0100C + (0x40 * (P))) \ ++ : (0x0D00C + (0x40 * ((P) - 64)))) ++#define IXGBE_PVFDCA_TXCTRL(P) (0x0600C + (0x40 * (P))) ++#define IXGBE_PVFGPRC(x) (0x0101C + (0x40 * (x))) ++#define IXGBE_PVFGPTC(x) (0x08300 + (0x04 * (x))) ++#define IXGBE_PVFGORC_LSB(x) (0x01020 + (0x40 * (x))) ++#define IXGBE_PVFGORC_MSB(x) (0x0D020 + (0x40 * (x))) ++#define IXGBE_PVFGOTC_LSB(x) (0x08400 + (0x08 * (x))) ++#define IXGBE_PVFGOTC_MSB(x) (0x08404 + (0x08 * (x))) ++#define IXGBE_PVFMPRC(x) (0x0D01C + (0x40 * (x))) + + #define IXGBE_PVFTDWBALn(q_per_pool, vf_number, vf_q_index) \ + (IXGBE_PVFTDWBAL((q_per_pool)*(vf_number) + (vf_q_index))) + #define IXGBE_PVFTDWBAHn(q_per_pool, vf_number, vf_q_index) \ + (IXGBE_PVFTDWBAH((q_per_pool)*(vf_number) + (vf_q_index))) + ++#define IXGBE_PVFTDHn(q_per_pool, vf_number, vf_q_index) \ ++ (IXGBE_PVFTDH((q_per_pool)*(vf_number) + (vf_q_index))) ++#define IXGBE_PVFTDTn(q_per_pool, vf_number, vf_q_index) \ ++ (IXGBE_PVFTDT((q_per_pool)*(vf_number) + (vf_q_index))) ++ ++/* Little Endian defines */ ++#ifndef __le16 ++#define __le16 u16 ++#endif ++#ifndef __le32 ++#define __le32 u32 ++#endif ++#ifndef __le64 ++#define __le64 u64 ++ ++#endif ++#ifndef __be16 ++/* Big Endian defines */ ++#define __be16 u16 ++#define __be32 u32 ++#define __be64 u64 ++ ++#endif + enum ixgbe_fdir_pballoc_type { + IXGBE_FDIR_PBALLOC_NONE = 0, + IXGBE_FDIR_PBALLOC_64K = 1, + IXGBE_FDIR_PBALLOC_128K = 2, + IXGBE_FDIR_PBALLOC_256K = 3, + }; +-#define IXGBE_FDIR_PBALLOC_SIZE_SHIFT 16 + + /* Flow Director register values */ +-#define IXGBE_FDIRCTRL_PBALLOC_64K 0x00000001 +-#define IXGBE_FDIRCTRL_PBALLOC_128K 0x00000002 +-#define IXGBE_FDIRCTRL_PBALLOC_256K 0x00000003 +-#define IXGBE_FDIRCTRL_INIT_DONE 0x00000008 +-#define IXGBE_FDIRCTRL_PERFECT_MATCH 0x00000010 +-#define IXGBE_FDIRCTRL_REPORT_STATUS 0x00000020 +-#define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS 0x00000080 +-#define IXGBE_FDIRCTRL_DROP_Q_SHIFT 8 +-#define IXGBE_FDIRCTRL_FLEX_SHIFT 16 +-#define IXGBE_FDIRCTRL_SEARCHLIM 0x00800000 +-#define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT 24 +-#define IXGBE_FDIRCTRL_FULL_THRESH_MASK 0xF0000000 +-#define IXGBE_FDIRCTRL_FULL_THRESH_SHIFT 28 +- +-#define IXGBE_FDIRTCPM_DPORTM_SHIFT 16 +-#define IXGBE_FDIRUDPM_DPORTM_SHIFT 16 +-#define IXGBE_FDIRIP6M_DIPM_SHIFT 16 +-#define IXGBE_FDIRM_VLANID 0x00000001 +-#define IXGBE_FDIRM_VLANP 0x00000002 +-#define IXGBE_FDIRM_POOL 0x00000004 +-#define IXGBE_FDIRM_L4P 0x00000008 +-#define IXGBE_FDIRM_FLEX 0x00000010 +-#define IXGBE_FDIRM_DIPv6 0x00000020 +- +-#define IXGBE_FDIRFREE_FREE_MASK 0xFFFF +-#define IXGBE_FDIRFREE_FREE_SHIFT 0 +-#define IXGBE_FDIRFREE_COLL_MASK 0x7FFF0000 +-#define IXGBE_FDIRFREE_COLL_SHIFT 16 +-#define IXGBE_FDIRLEN_MAXLEN_MASK 0x3F +-#define IXGBE_FDIRLEN_MAXLEN_SHIFT 0 +-#define IXGBE_FDIRLEN_MAXHASH_MASK 0x7FFF0000 +-#define IXGBE_FDIRLEN_MAXHASH_SHIFT 16 +-#define IXGBE_FDIRUSTAT_ADD_MASK 0xFFFF +-#define IXGBE_FDIRUSTAT_ADD_SHIFT 0 +-#define IXGBE_FDIRUSTAT_REMOVE_MASK 0xFFFF0000 +-#define IXGBE_FDIRUSTAT_REMOVE_SHIFT 16 +-#define IXGBE_FDIRFSTAT_FADD_MASK 0x00FF +-#define IXGBE_FDIRFSTAT_FADD_SHIFT 0 +-#define IXGBE_FDIRFSTAT_FREMOVE_MASK 0xFF00 +-#define IXGBE_FDIRFSTAT_FREMOVE_SHIFT 8 +-#define IXGBE_FDIRPORT_DESTINATION_SHIFT 16 +-#define IXGBE_FDIRVLAN_FLEX_SHIFT 16 +-#define IXGBE_FDIRHASH_BUCKET_VALID_SHIFT 15 +-#define IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT 16 +- +-#define IXGBE_FDIRCMD_CMD_MASK 0x00000003 +-#define IXGBE_FDIRCMD_CMD_ADD_FLOW 0x00000001 +-#define IXGBE_FDIRCMD_CMD_REMOVE_FLOW 0x00000002 +-#define IXGBE_FDIRCMD_CMD_QUERY_REM_FILT 0x00000003 +-#define IXGBE_FDIRCMD_FILTER_VALID 0x00000004 +-#define IXGBE_FDIRCMD_FILTER_UPDATE 0x00000008 +-#define IXGBE_FDIRCMD_IPv6DMATCH 0x00000010 +-#define IXGBE_FDIRCMD_L4TYPE_UDP 0x00000020 +-#define IXGBE_FDIRCMD_L4TYPE_TCP 0x00000040 +-#define IXGBE_FDIRCMD_L4TYPE_SCTP 0x00000060 +-#define IXGBE_FDIRCMD_IPV6 0x00000080 +-#define IXGBE_FDIRCMD_CLEARHT 0x00000100 +-#define IXGBE_FDIRCMD_DROP 0x00000200 +-#define IXGBE_FDIRCMD_INT 0x00000400 +-#define IXGBE_FDIRCMD_LAST 0x00000800 +-#define IXGBE_FDIRCMD_COLLISION 0x00001000 +-#define IXGBE_FDIRCMD_QUEUE_EN 0x00008000 +-#define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT 5 +-#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16 +-#define IXGBE_FDIRCMD_VT_POOL_SHIFT 24 +-#define IXGBE_FDIR_INIT_DONE_POLL 10 +-#define IXGBE_FDIRCMD_CMD_POLL 10 +- +-#define IXGBE_FDIR_DROP_QUEUE 127 ++#define IXGBE_FDIRCTRL_PBALLOC_64K 0x00000001 ++#define IXGBE_FDIRCTRL_PBALLOC_128K 0x00000002 ++#define IXGBE_FDIRCTRL_PBALLOC_256K 0x00000003 ++#define IXGBE_FDIRCTRL_INIT_DONE 0x00000008 ++#define IXGBE_FDIRCTRL_PERFECT_MATCH 0x00000010 ++#define IXGBE_FDIRCTRL_REPORT_STATUS 0x00000020 ++#define IXGBE_FDIRCTRL_REPORT_STATUS_ALWAYS 0x00000080 ++#define IXGBE_FDIRCTRL_DROP_Q_SHIFT 8 ++#define IXGBE_FDIRCTRL_DROP_Q_MASK 0x00007F00 ++#define IXGBE_FDIRCTRL_FLEX_SHIFT 16 ++#define IXGBE_FDIRCTRL_DROP_NO_MATCH 0x00008000 ++#define IXGBE_FDIRCTRL_FILTERMODE_SHIFT 21 ++#define IXGBE_FDIRCTRL_FILTERMODE_MACVLAN 0x0001 /* bit 23:21, 001b */ ++#define IXGBE_FDIRCTRL_FILTERMODE_CLOUD 0x0002 /* bit 23:21, 010b */ ++#define IXGBE_FDIRCTRL_SEARCHLIM 0x00800000 ++#define IXGBE_FDIRCTRL_FILTERMODE_MASK 0x00E00000 ++#define IXGBE_FDIRCTRL_MAX_LENGTH_SHIFT 24 ++#define IXGBE_FDIRCTRL_FULL_THRESH_MASK 0xF0000000 ++#define IXGBE_FDIRCTRL_FULL_THRESH_SHIFT 28 ++ ++#define IXGBE_FDIRTCPM_DPORTM_SHIFT 16 ++#define IXGBE_FDIRUDPM_DPORTM_SHIFT 16 ++#define IXGBE_FDIRIP6M_DIPM_SHIFT 16 ++#define IXGBE_FDIRM_VLANID 0x00000001 ++#define IXGBE_FDIRM_VLANP 0x00000002 ++#define IXGBE_FDIRM_POOL 0x00000004 ++#define IXGBE_FDIRM_L4P 0x00000008 ++#define IXGBE_FDIRM_FLEX 0x00000010 ++#define IXGBE_FDIRM_DIPv6 0x00000020 ++#define IXGBE_FDIRM_L3P 0x00000040 ++ ++#define IXGBE_FDIRIP6M_INNER_MAC 0x03F0 /* bit 9:4 */ ++#define IXGBE_FDIRIP6M_TUNNEL_TYPE 0x0800 /* bit 11 */ ++#define IXGBE_FDIRIP6M_TNI_VNI 0xF000 /* bit 15:12 */ ++#define IXGBE_FDIRIP6M_TNI_VNI_24 0x1000 /* bit 12 */ ++#define IXGBE_FDIRIP6M_ALWAYS_MASK 0x040F /* bit 10, 3:0 */ ++ ++#define IXGBE_FDIRFREE_FREE_MASK 0xFFFF ++#define IXGBE_FDIRFREE_FREE_SHIFT 0 ++#define IXGBE_FDIRFREE_COLL_MASK 0x7FFF0000 ++#define IXGBE_FDIRFREE_COLL_SHIFT 16 ++#define IXGBE_FDIRLEN_MAXLEN_MASK 0x3F ++#define IXGBE_FDIRLEN_MAXLEN_SHIFT 0 ++#define IXGBE_FDIRLEN_MAXHASH_MASK 0x7FFF0000 ++#define IXGBE_FDIRLEN_MAXHASH_SHIFT 16 ++#define IXGBE_FDIRUSTAT_ADD_MASK 0xFFFF ++#define IXGBE_FDIRUSTAT_ADD_SHIFT 0 ++#define IXGBE_FDIRUSTAT_REMOVE_MASK 0xFFFF0000 ++#define IXGBE_FDIRUSTAT_REMOVE_SHIFT 16 ++#define IXGBE_FDIRFSTAT_FADD_MASK 0x00FF ++#define IXGBE_FDIRFSTAT_FADD_SHIFT 0 ++#define IXGBE_FDIRFSTAT_FREMOVE_MASK 0xFF00 ++#define IXGBE_FDIRFSTAT_FREMOVE_SHIFT 8 ++#define IXGBE_FDIRPORT_DESTINATION_SHIFT 16 ++#define IXGBE_FDIRVLAN_FLEX_SHIFT 16 ++#define IXGBE_FDIRHASH_BUCKET_VALID_SHIFT 15 ++#define IXGBE_FDIRHASH_SIG_SW_INDEX_SHIFT 16 ++ ++#define IXGBE_FDIRCMD_CMD_MASK 0x00000003 ++#define IXGBE_FDIRCMD_CMD_ADD_FLOW 0x00000001 ++#define IXGBE_FDIRCMD_CMD_REMOVE_FLOW 0x00000002 ++#define IXGBE_FDIRCMD_CMD_QUERY_REM_FILT 0x00000003 ++#define IXGBE_FDIRCMD_FILTER_VALID 0x00000004 ++#define IXGBE_FDIRCMD_FILTER_UPDATE 0x00000008 ++#define IXGBE_FDIRCMD_IPv6DMATCH 0x00000010 ++#define IXGBE_FDIRCMD_L4TYPE_UDP 0x00000020 ++#define IXGBE_FDIRCMD_L4TYPE_TCP 0x00000040 ++#define IXGBE_FDIRCMD_L4TYPE_SCTP 0x00000060 ++#define IXGBE_FDIRCMD_IPV6 0x00000080 ++#define IXGBE_FDIRCMD_CLEARHT 0x00000100 ++#define IXGBE_FDIRCMD_DROP 0x00000200 ++#define IXGBE_FDIRCMD_INT 0x00000400 ++#define IXGBE_FDIRCMD_LAST 0x00000800 ++#define IXGBE_FDIRCMD_COLLISION 0x00001000 ++#define IXGBE_FDIRCMD_QUEUE_EN 0x00008000 ++#define IXGBE_FDIRCMD_FLOW_TYPE_SHIFT 5 ++#define IXGBE_FDIRCMD_RX_QUEUE_SHIFT 16 ++#define IXGBE_FDIRCMD_TUNNEL_FILTER_SHIFT 23 ++#define IXGBE_FDIRCMD_VT_POOL_SHIFT 24 ++#define IXGBE_FDIR_INIT_DONE_POLL 10 ++#define IXGBE_FDIRCMD_CMD_POLL 10 ++#define IXGBE_FDIRCMD_TUNNEL_FILTER 0x00800000 ++#define IXGBE_FDIR_DROP_QUEUE 127 + + /* Manageablility Host Interface defines */ +-#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */ +-#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */ +-#define IXGBE_HI_COMMAND_TIMEOUT 500 /* Process HI command limit */ ++#define IXGBE_HI_MAX_BLOCK_BYTE_LENGTH 1792 /* Num of bytes in range */ ++#define IXGBE_HI_MAX_BLOCK_DWORD_LENGTH 448 /* Num of dwords in range */ ++#define IXGBE_HI_COMMAND_TIMEOUT 500 /* Process HI command limit */ ++#define IXGBE_HI_FLASH_ERASE_TIMEOUT 1000 /* Process Erase command limit */ ++#define IXGBE_HI_FLASH_UPDATE_TIMEOUT 5000 /* Process Update command limit */ ++#define IXGBE_HI_FLASH_APPLY_TIMEOUT 0 /* Process Apply command limit */ ++#define IXGBE_HI_PHY_MGMT_REQ_TIMEOUT 2000 /* Wait up to 2 seconds */ + + /* CEM Support */ +-#define FW_CEM_HDR_LEN 0x4 +-#define FW_CEM_CMD_DRIVER_INFO 0xDD +-#define FW_CEM_CMD_DRIVER_INFO_LEN 0x5 +-#define FW_CEM_CMD_RESERVED 0x0 +-#define FW_CEM_UNUSED_VER 0x0 +-#define FW_CEM_MAX_RETRIES 3 +-#define FW_CEM_RESP_STATUS_SUCCESS 0x1 ++#define FW_CEM_HDR_LEN 0x4 ++#define FW_CEM_CMD_DRIVER_INFO 0xDD ++#define FW_CEM_CMD_DRIVER_INFO_LEN 0x5 ++#define FW_CEM_CMD_RESERVED 0X0 ++#define FW_CEM_UNUSED_VER 0x0 ++#define FW_CEM_MAX_RETRIES 3 ++#define FW_CEM_RESP_STATUS_SUCCESS 0x1 ++#define FW_CEM_DRIVER_VERSION_SIZE 39 /* +9 would send 48 bytes to fw */ ++#define FW_READ_SHADOW_RAM_CMD 0x31 ++#define FW_READ_SHADOW_RAM_LEN 0x6 ++#define FW_WRITE_SHADOW_RAM_CMD 0x33 ++#define FW_WRITE_SHADOW_RAM_LEN 0xA /* 8 plus 1 WORD to write */ ++#define FW_SHADOW_RAM_DUMP_CMD 0x36 ++#define FW_SHADOW_RAM_DUMP_LEN 0 ++#define FW_DEFAULT_CHECKSUM 0xFF /* checksum always 0xFF */ ++#define FW_NVM_DATA_OFFSET 3 ++#define FW_MAX_READ_BUFFER_SIZE 1024 ++#define FW_DISABLE_RXEN_CMD 0xDE ++#define FW_DISABLE_RXEN_LEN 0x1 ++#define FW_PHY_MGMT_REQ_CMD 0x20 ++#define FW_PHY_TOKEN_REQ_CMD 0xA ++#define FW_PHY_TOKEN_REQ_LEN 2 ++#define FW_PHY_TOKEN_REQ 0 ++#define FW_PHY_TOKEN_REL 1 ++#define FW_PHY_TOKEN_OK 1 ++#define FW_PHY_TOKEN_RETRY 0x80 ++#define FW_PHY_TOKEN_DELAY 5 /* milliseconds */ ++#define FW_PHY_TOKEN_WAIT 5 /* seconds */ ++#define FW_PHY_TOKEN_RETRIES ((FW_PHY_TOKEN_WAIT * 1000) / FW_PHY_TOKEN_DELAY) ++#define FW_INT_PHY_REQ_CMD 0xB ++#define FW_INT_PHY_REQ_LEN 10 ++#define FW_INT_PHY_REQ_READ 0 ++#define FW_INT_PHY_REQ_WRITE 1 ++#define FW_PHY_ACT_REQ_CMD 5 ++#define FW_PHY_ACT_DATA_COUNT 4 ++#define FW_PHY_ACT_REQ_LEN (4 + 4 * FW_PHY_ACT_DATA_COUNT) ++#define FW_PHY_ACT_INIT_PHY 1 ++#define FW_PHY_ACT_SETUP_LINK 2 ++#define FW_PHY_ACT_LINK_SPEED_10 (1u << 0) ++#define FW_PHY_ACT_LINK_SPEED_100 (1u << 1) ++#define FW_PHY_ACT_LINK_SPEED_1G (1u << 2) ++#define FW_PHY_ACT_LINK_SPEED_2_5G (1u << 3) ++#define FW_PHY_ACT_LINK_SPEED_5G (1u << 4) ++#define FW_PHY_ACT_LINK_SPEED_10G (1u << 5) ++#define FW_PHY_ACT_LINK_SPEED_20G (1u << 6) ++#define FW_PHY_ACT_LINK_SPEED_25G (1u << 7) ++#define FW_PHY_ACT_LINK_SPEED_40G (1u << 8) ++#define FW_PHY_ACT_LINK_SPEED_50G (1u << 9) ++#define FW_PHY_ACT_LINK_SPEED_100G (1u << 10) ++#define FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT 16 ++#define FW_PHY_ACT_SETUP_LINK_PAUSE_MASK (3u << \ ++ FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT) ++#define FW_PHY_ACT_SETUP_LINK_PAUSE_NONE 0u ++#define FW_PHY_ACT_SETUP_LINK_PAUSE_TX 1u ++#define FW_PHY_ACT_SETUP_LINK_PAUSE_RX 2u ++#define FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX 3u ++#define FW_PHY_ACT_SETUP_LINK_LP (1u << 18) ++#define FW_PHY_ACT_SETUP_LINK_HP (1u << 19) ++#define FW_PHY_ACT_SETUP_LINK_EEE (1u << 20) ++#define FW_PHY_ACT_SETUP_LINK_AN (1u << 22) ++#define FW_PHY_ACT_SETUP_LINK_RSP_DOWN (1u << 0) ++#define FW_PHY_ACT_GET_LINK_INFO 3 ++#define FW_PHY_ACT_GET_LINK_INFO_EEE (1u << 19) ++#define FW_PHY_ACT_GET_LINK_INFO_FC_TX (1u << 20) ++#define FW_PHY_ACT_GET_LINK_INFO_FC_RX (1u << 21) ++#define FW_PHY_ACT_GET_LINK_INFO_POWER (1u << 22) ++#define FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE (1u << 24) ++#define FW_PHY_ACT_GET_LINK_INFO_TEMP (1u << 25) ++#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX (1u << 28) ++#define FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX (1u << 29) ++#define FW_PHY_ACT_FORCE_LINK_DOWN 4 ++#define FW_PHY_ACT_FORCE_LINK_DOWN_OFF (1u << 0) ++#define FW_PHY_ACT_PHY_SW_RESET 5 ++#define FW_PHY_ACT_PHY_HW_RESET 6 ++#define FW_PHY_ACT_GET_PHY_INFO 7 ++#define FW_PHY_ACT_UD_2 0x1002 ++#define FW_PHY_ACT_UD_2_10G_KR_EEE (1u << 6) ++#define FW_PHY_ACT_UD_2_10G_KX4_EEE (1u << 5) ++#define FW_PHY_ACT_UD_2_1G_KX_EEE (1u << 4) ++#define FW_PHY_ACT_UD_2_10G_T_EEE (1u << 3) ++#define FW_PHY_ACT_UD_2_1G_T_EEE (1u << 2) ++#define FW_PHY_ACT_UD_2_100M_TX_EEE (1u << 1) ++#define FW_PHY_ACT_RETRIES 50 ++#define FW_PHY_INFO_SPEED_MASK 0xFFFu ++#define FW_PHY_INFO_ID_HI_MASK 0xFFFF0000u ++#define FW_PHY_INFO_ID_LO_MASK 0x0000FFFFu + + /* Host Interface Command Structures */ ++ ++#pragma pack(push, 1) ++ + struct ixgbe_hic_hdr { + u8 cmd; + u8 buf_len; +@@ -2306,6 +3144,25 @@ struct ixgbe_hic_hdr { + u8 checksum; + }; + ++struct ixgbe_hic_hdr2_req { ++ u8 cmd; ++ u8 buf_lenh; ++ u8 buf_lenl; ++ u8 checksum; ++}; ++ ++struct ixgbe_hic_hdr2_rsp { ++ u8 cmd; ++ u8 buf_lenl; ++ u8 buf_lenh_status; /* 7-5: high bits of buf_len, 4-0: status */ ++ u8 checksum; ++}; ++ ++union ixgbe_hic_hdr2 { ++ struct ixgbe_hic_hdr2_req req; ++ struct ixgbe_hic_hdr2_rsp rsp; ++}; ++ + struct ixgbe_hic_drv_info { + struct ixgbe_hic_hdr hdr; + u8 port_num; +@@ -2317,20 +3174,124 @@ struct ixgbe_hic_drv_info { + u16 pad2; /* end spacing to ensure length is mult. of dword2 */ + }; + ++struct ixgbe_hic_drv_info2 { ++ struct ixgbe_hic_hdr hdr; ++ u8 port_num; ++ u8 ver_sub; ++ u8 ver_build; ++ u8 ver_min; ++ u8 ver_maj; ++ char driver_string[FW_CEM_DRIVER_VERSION_SIZE]; ++}; ++ ++/* These need to be dword aligned */ ++struct ixgbe_hic_read_shadow_ram { ++ union ixgbe_hic_hdr2 hdr; ++ u32 address; ++ u16 length; ++ u16 pad2; ++ u16 data; ++ u16 pad3; ++}; ++ ++struct ixgbe_hic_write_shadow_ram { ++ union ixgbe_hic_hdr2 hdr; ++ u32 address; ++ u16 length; ++ u16 pad2; ++ u16 data; ++ u16 pad3; ++}; ++ ++struct ixgbe_hic_disable_rxen { ++ struct ixgbe_hic_hdr hdr; ++ u8 port_number; ++ u8 pad2; ++ u16 pad3; ++}; ++ ++struct ixgbe_hic_phy_token_req { ++ struct ixgbe_hic_hdr hdr; ++ u8 port_number; ++ u8 command_type; ++ u16 pad; ++}; ++ ++struct ixgbe_hic_internal_phy_req { ++ struct ixgbe_hic_hdr hdr; ++ u8 port_number; ++ u8 command_type; ++ __be16 address; ++ u16 rsv1; ++ __be32 write_data; ++ u16 pad; ++}; ++ ++struct ixgbe_hic_internal_phy_resp { ++ struct ixgbe_hic_hdr hdr; ++ __be32 read_data; ++}; ++ ++struct ixgbe_hic_phy_activity_req { ++ struct ixgbe_hic_hdr hdr; ++ u8 port_number; ++ u8 pad; ++ __le16 activity_id; ++ __be32 data[FW_PHY_ACT_DATA_COUNT]; ++}; ++ ++struct ixgbe_hic_phy_activity_resp { ++ struct ixgbe_hic_hdr hdr; ++ __be32 data[FW_PHY_ACT_DATA_COUNT]; ++}; ++ ++#pragma pack(pop) ++ ++/* Transmit Descriptor - Legacy */ ++struct ixgbe_legacy_tx_desc { ++ u64 buffer_addr; /* Address of the descriptor's data buffer */ ++ union { ++ __le32 data; ++ struct { ++ __le16 length; /* Data buffer length */ ++ u8 cso; /* Checksum offset */ ++ u8 cmd; /* Descriptor control */ ++ } flags; ++ } lower; ++ union { ++ __le32 data; ++ struct { ++ u8 status; /* Descriptor status */ ++ u8 css; /* Checksum start */ ++ __le16 vlan; ++ } fields; ++ } upper; ++}; ++ + /* Transmit Descriptor - Advanced */ + union ixgbe_adv_tx_desc { + struct { +- __le64 buffer_addr; /* Address of descriptor's data buf */ ++ __le64 buffer_addr; /* Address of descriptor's data buf */ + __le32 cmd_type_len; + __le32 olinfo_status; + } read; + struct { +- __le64 rsvd; /* Reserved */ ++ __le64 rsvd; /* Reserved */ + __le32 nxtseq_seed; + __le32 status; + } wb; + }; + ++/* Receive Descriptor - Legacy */ ++struct ixgbe_legacy_rx_desc { ++ __le64 buffer_addr; /* Address of the descriptor's data buffer */ ++ __le16 length; /* Length of data DMAed into data buffer */ ++ __le16 csum; /* Packet checksum */ ++ u8 status; /* Descriptor status */ ++ u8 errors; /* Descriptor Errors */ ++ __le16 vlan; ++}; ++ + /* Receive Descriptor - Advanced */ + union ixgbe_adv_rx_desc { + struct { +@@ -2371,101 +3332,115 @@ struct ixgbe_adv_tx_context_desc { + }; + + /* Adv Transmit Descriptor Config Masks */ +-#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buf length(bytes) */ +-#define IXGBE_ADVTXD_MAC_LINKSEC 0x00040000 /* Insert LinkSec */ +-#define IXGBE_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE 1588 Time Stamp */ +-#define IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK 0x000003FF /* IPSec SA index */ +-#define IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK 0x000001FF /* IPSec ESP length */ +-#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */ +-#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Advanced Context Desc */ +-#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Advanced Data Descriptor */ +-#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */ +-#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */ +-#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */ +-#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */ +-#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext (1=Adv) */ +-#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */ +-#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ +-#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */ +-#define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED pres in WB */ +-#define IXGBE_ADVTXD_STAT_RSV 0x0000000C /* STA Reserved */ +-#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ +-#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */ +-#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ +-#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ +- IXGBE_ADVTXD_POPTS_SHIFT) +-#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ +- IXGBE_ADVTXD_POPTS_SHIFT) +-#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ +-#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ +-#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ +-#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800 /* 1st&Last TSO-full iSCSI PDU */ +-#define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */ +-#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ +-#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ +-#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ +-#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ +-#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ +-#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ +-#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ +-#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ +-#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /*Req requires Markers and CRC*/ +-#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */ ++#define IXGBE_ADVTXD_DTALEN_MASK 0x0000FFFF /* Data buf length(bytes) */ ++#define IXGBE_ADVTXD_MAC_LINKSEC 0x00040000 /* Insert LinkSec */ ++#define IXGBE_ADVTXD_MAC_TSTAMP 0x00080000 /* IEEE1588 time stamp */ ++#define IXGBE_ADVTXD_IPSEC_SA_INDEX_MASK 0x000003FF /* IPSec SA index */ ++#define IXGBE_ADVTXD_IPSEC_ESP_LEN_MASK 0x000001FF /* IPSec ESP length */ ++#define IXGBE_ADVTXD_DTYP_MASK 0x00F00000 /* DTYP mask */ ++#define IXGBE_ADVTXD_DTYP_CTXT 0x00200000 /* Adv Context Desc */ ++#define IXGBE_ADVTXD_DTYP_DATA 0x00300000 /* Adv Data Descriptor */ ++#define IXGBE_ADVTXD_DCMD_EOP IXGBE_TXD_CMD_EOP /* End of Packet */ ++#define IXGBE_ADVTXD_DCMD_IFCS IXGBE_TXD_CMD_IFCS /* Insert FCS */ ++#define IXGBE_ADVTXD_DCMD_RS IXGBE_TXD_CMD_RS /* Report Status */ ++#define IXGBE_ADVTXD_DCMD_DDTYP_ISCSI 0x10000000 /* DDP hdr type or iSCSI */ ++#define IXGBE_ADVTXD_DCMD_DEXT IXGBE_TXD_CMD_DEXT /* Desc ext 1=Adv */ ++#define IXGBE_ADVTXD_DCMD_VLE IXGBE_TXD_CMD_VLE /* VLAN pkt enable */ ++#define IXGBE_ADVTXD_DCMD_TSE 0x80000000 /* TCP Seg enable */ ++#define IXGBE_ADVTXD_STAT_DD IXGBE_TXD_STAT_DD /* Descriptor Done */ ++#define IXGBE_ADVTXD_STAT_SN_CRC 0x00000002 /* NXTSEQ/SEED pres in WB */ ++#define IXGBE_ADVTXD_STAT_RSV 0x0000000C /* STA Reserved */ ++#define IXGBE_ADVTXD_IDX_SHIFT 4 /* Adv desc Index shift */ ++#define IXGBE_ADVTXD_CC 0x00000080 /* Check Context */ ++#define IXGBE_ADVTXD_POPTS_SHIFT 8 /* Adv desc POPTS shift */ ++#define IXGBE_ADVTXD_POPTS_IXSM (IXGBE_TXD_POPTS_IXSM << \ ++ IXGBE_ADVTXD_POPTS_SHIFT) ++#define IXGBE_ADVTXD_POPTS_TXSM (IXGBE_TXD_POPTS_TXSM << \ ++ IXGBE_ADVTXD_POPTS_SHIFT) ++#define IXGBE_ADVTXD_POPTS_ISCO_1ST 0x00000000 /* 1st TSO of iSCSI PDU */ ++#define IXGBE_ADVTXD_POPTS_ISCO_MDL 0x00000800 /* Middle TSO of iSCSI PDU */ ++#define IXGBE_ADVTXD_POPTS_ISCO_LAST 0x00001000 /* Last TSO of iSCSI PDU */ ++/* 1st&Last TSO-full iSCSI PDU */ ++#define IXGBE_ADVTXD_POPTS_ISCO_FULL 0x00001800 ++#define IXGBE_ADVTXD_POPTS_RSV 0x00002000 /* POPTS Reserved */ ++#define IXGBE_ADVTXD_PAYLEN_SHIFT 14 /* Adv desc PAYLEN shift */ ++#define IXGBE_ADVTXD_MACLEN_SHIFT 9 /* Adv ctxt desc mac len shift */ ++#define IXGBE_ADVTXD_VLAN_SHIFT 16 /* Adv ctxt vlan tag shift */ ++#define IXGBE_ADVTXD_TUCMD_IPV4 0x00000400 /* IP Packet Type: 1=IPv4 */ ++#define IXGBE_ADVTXD_TUCMD_IPV6 0x00000000 /* IP Packet Type: 0=IPv6 */ ++#define IXGBE_ADVTXD_TUCMD_L4T_UDP 0x00000000 /* L4 Packet TYPE of UDP */ ++#define IXGBE_ADVTXD_TUCMD_L4T_TCP 0x00000800 /* L4 Packet TYPE of TCP */ ++#define IXGBE_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 Packet TYPE of SCTP */ ++#define IXGBE_ADVTXD_TUCMD_L4T_RSV 0x00001800 /* RSV L4 Packet TYPE */ ++#define IXGBE_ADVTXD_TUCMD_MKRREQ 0x00002000 /* req Markers and CRC */ ++#define IXGBE_ADVTXD_POPTS_IPSEC 0x00000400 /* IPSec offload request */ + #define IXGBE_ADVTXD_TUCMD_IPSEC_TYPE_ESP 0x00002000 /* IPSec Type ESP */ + #define IXGBE_ADVTXD_TUCMD_IPSEC_ENCRYPT_EN 0x00004000/* ESP Encrypt Enable */ +-#define IXGBE_ADVTXT_TUCMD_FCOE 0x00008000 /* FCoE Frame Type */ +-#define IXGBE_ADVTXD_FCOEF_EOF_MASK (0x3 << 10) /* FC EOF index */ +-#define IXGBE_ADVTXD_FCOEF_SOF ((1 << 2) << 10) /* FC SOF index */ +-#define IXGBE_ADVTXD_FCOEF_PARINC ((1 << 3) << 10) /* Rel_Off in F_CTL */ +-#define IXGBE_ADVTXD_FCOEF_ORIE ((1 << 4) << 10) /* Orientation: End */ +-#define IXGBE_ADVTXD_FCOEF_ORIS ((1 << 5) << 10) /* Orientation: Start */ +-#define IXGBE_ADVTXD_FCOEF_EOF_N (0x0 << 10) /* 00: EOFn */ +-#define IXGBE_ADVTXD_FCOEF_EOF_T (0x1 << 10) /* 01: EOFt */ +-#define IXGBE_ADVTXD_FCOEF_EOF_NI (0x2 << 10) /* 10: EOFni */ +-#define IXGBE_ADVTXD_FCOEF_EOF_A (0x3 << 10) /* 11: EOFa */ +-#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ +-#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ +- ++#define IXGBE_ADVTXT_TUCMD_FCOE 0x00008000 /* FCoE Frame Type */ ++#define IXGBE_ADVTXD_FCOEF_EOF_MASK (0x3 << 10) /* FC EOF index */ ++#define IXGBE_ADVTXD_FCOEF_SOF ((1 << 2) << 10) /* FC SOF index */ ++#define IXGBE_ADVTXD_FCOEF_PARINC ((1 << 3) << 10) /* Rel_Off in F_CTL */ ++#define IXGBE_ADVTXD_FCOEF_ORIE ((1 << 4) << 10) /* Orientation End */ ++#define IXGBE_ADVTXD_FCOEF_ORIS ((1 << 5) << 10) /* Orientation Start */ ++#define IXGBE_ADVTXD_FCOEF_EOF_N (0x0 << 10) /* 00: EOFn */ ++#define IXGBE_ADVTXD_FCOEF_EOF_T (0x1 << 10) /* 01: EOFt */ ++#define IXGBE_ADVTXD_FCOEF_EOF_NI (0x2 << 10) /* 10: EOFni */ ++#define IXGBE_ADVTXD_FCOEF_EOF_A (0x3 << 10) /* 11: EOFa */ ++#define IXGBE_ADVTXD_L4LEN_SHIFT 8 /* Adv ctxt L4LEN shift */ ++#define IXGBE_ADVTXD_MSS_SHIFT 16 /* Adv ctxt MSS shift */ ++ ++#define IXGBE_ADVTXD_OUTER_IPLEN 16 /* Adv ctxt OUTERIPLEN shift */ ++#define IXGBE_ADVTXD_TUNNEL_LEN 24 /* Adv ctxt TUNNELLEN shift */ ++#define IXGBE_ADVTXD_TUNNEL_TYPE_SHIFT 16 /* Adv Tx Desc Tunnel Type shift */ ++#define IXGBE_ADVTXD_OUTERIPCS_SHIFT 17 /* Adv Tx Desc OUTERIPCS Shift */ ++#define IXGBE_ADVTXD_TUNNEL_TYPE_NVGRE 1 /* Adv Tx Desc Tunnel Type NVGRE */ ++/* Adv Tx Desc OUTERIPCS Shift for X550EM_a */ ++#define IXGBE_ADVTXD_OUTERIPCS_SHIFT_X550EM_a 26 + /* Autonegotiation advertised speeds */ + typedef u32 ixgbe_autoneg_advertised; + /* Link speed */ + typedef u32 ixgbe_link_speed; +-#define IXGBE_LINK_SPEED_UNKNOWN 0 +-#define IXGBE_LINK_SPEED_100_FULL 0x0008 +-#define IXGBE_LINK_SPEED_1GB_FULL 0x0020 +-#define IXGBE_LINK_SPEED_10GB_FULL 0x0080 +-#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \ +- IXGBE_LINK_SPEED_10GB_FULL) +-#define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \ +- IXGBE_LINK_SPEED_1GB_FULL | \ +- IXGBE_LINK_SPEED_10GB_FULL) +- ++#define IXGBE_LINK_SPEED_UNKNOWN 0 ++#define IXGBE_LINK_SPEED_10_FULL 0x0002 ++#define IXGBE_LINK_SPEED_100_FULL 0x0008 ++#define IXGBE_LINK_SPEED_1GB_FULL 0x0020 ++#define IXGBE_LINK_SPEED_2_5GB_FULL 0x0400 ++#define IXGBE_LINK_SPEED_5GB_FULL 0x0800 ++#define IXGBE_LINK_SPEED_10GB_FULL 0x0080 ++#define IXGBE_LINK_SPEED_82598_AUTONEG (IXGBE_LINK_SPEED_1GB_FULL | \ ++ IXGBE_LINK_SPEED_10GB_FULL) ++#define IXGBE_LINK_SPEED_82599_AUTONEG (IXGBE_LINK_SPEED_100_FULL | \ ++ IXGBE_LINK_SPEED_1GB_FULL | \ ++ IXGBE_LINK_SPEED_10GB_FULL) + + /* Physical layer type */ +-typedef u32 ixgbe_physical_layer; +-#define IXGBE_PHYSICAL_LAYER_UNKNOWN 0 +-#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x0001 +-#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x0002 +-#define IXGBE_PHYSICAL_LAYER_100BASE_TX 0x0004 +-#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x0008 +-#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x0010 +-#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x0020 +-#define IXGBE_PHYSICAL_LAYER_10GBASE_SR 0x0040 +-#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x0080 +-#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x0100 +-#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x0200 +-#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x0400 +-#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x0800 +-#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x1000 +-#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x2000 ++typedef u64 ixgbe_physical_layer; ++#define IXGBE_PHYSICAL_LAYER_UNKNOWN 0 ++#define IXGBE_PHYSICAL_LAYER_10GBASE_T 0x00001 ++#define IXGBE_PHYSICAL_LAYER_1000BASE_T 0x00002 ++#define IXGBE_PHYSICAL_LAYER_100BASE_TX 0x00004 ++#define IXGBE_PHYSICAL_LAYER_SFP_PLUS_CU 0x00008 ++#define IXGBE_PHYSICAL_LAYER_10GBASE_LR 0x00010 ++#define IXGBE_PHYSICAL_LAYER_10GBASE_LRM 0x00020 ++#define IXGBE_PHYSICAL_LAYER_10GBASE_SR 0x00040 ++#define IXGBE_PHYSICAL_LAYER_10GBASE_KX4 0x00080 ++#define IXGBE_PHYSICAL_LAYER_10GBASE_CX4 0x00100 ++#define IXGBE_PHYSICAL_LAYER_1000BASE_KX 0x00200 ++#define IXGBE_PHYSICAL_LAYER_1000BASE_BX 0x00400 ++#define IXGBE_PHYSICAL_LAYER_10GBASE_KR 0x00800 ++#define IXGBE_PHYSICAL_LAYER_10GBASE_XAUI 0x01000 ++#define IXGBE_PHYSICAL_LAYER_SFP_ACTIVE_DA 0x02000 ++#define IXGBE_PHYSICAL_LAYER_1000BASE_SX 0x04000 ++#define IXGBE_PHYSICAL_LAYER_10BASE_T 0x08000 ++#define IXGBE_PHYSICAL_LAYER_2500BASE_KX 0x10000 + + /* Flow Control Data Sheet defined values + * Calculation and defines taken from 802.1bb Annex O + */ + + /* BitTimes (BT) conversion */ +-#define IXGBE_BT2KB(BT) ((BT + (8 * 1024 - 1)) / (8 * 1024)) +-#define IXGBE_B2BT(BT) (BT * 8) ++#define IXGBE_BT2KB(BT) ((BT + (8 * 1024 - 1)) / (8 * 1024)) ++#define IXGBE_B2BT(BT) (BT * 8) + + /* Calculate Delay to respond to PFC */ + #define IXGBE_PFC_D 672 +@@ -2475,8 +3450,8 @@ typedef u32 ixgbe_physical_layer; + #define IXGBE_CABLE_DO 5000 /* Delay Optical */ + + /* Calculate Interface Delay X540 */ +-#define IXGBE_PHY_DC 25600 /* Delay 10G BASET */ +-#define IXGBE_MAC_DC 8192 /* Delay Copper XAUI interface */ ++#define IXGBE_PHY_DC 25600 /* Delay 10G BASET */ ++#define IXGBE_MAC_DC 8192 /* Delay Copper XAUI interface */ + #define IXGBE_XAUI_DC (2 * 2048) /* Delay Copper Phy */ + + #define IXGBE_ID_X540 (IXGBE_MAC_DC + IXGBE_XAUI_DC + IXGBE_PHY_DC) +@@ -2522,25 +3497,34 @@ typedef u32 ixgbe_physical_layer; + (2 * IXGBE_LOW_DV_X540(_max_frame_tc)) + + /* Software ATR hash keys */ +-#define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2 +-#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614 ++#define IXGBE_ATR_BUCKET_HASH_KEY 0x3DAD14E2 ++#define IXGBE_ATR_SIGNATURE_HASH_KEY 0x174D3614 + + /* Software ATR input stream values and masks */ +-#define IXGBE_ATR_HASH_MASK 0x7fff +-#define IXGBE_ATR_L4TYPE_MASK 0x3 +-#define IXGBE_ATR_L4TYPE_UDP 0x1 +-#define IXGBE_ATR_L4TYPE_TCP 0x2 +-#define IXGBE_ATR_L4TYPE_SCTP 0x3 +-#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4 ++#define IXGBE_ATR_HASH_MASK 0x7fff ++#define IXGBE_ATR_L4TYPE_MASK 0x3 ++#define IXGBE_ATR_L4TYPE_UDP 0x1 ++#define IXGBE_ATR_L4TYPE_TCP 0x2 ++#define IXGBE_ATR_L4TYPE_SCTP 0x3 ++#define IXGBE_ATR_L4TYPE_IPV6_MASK 0x4 ++#define IXGBE_ATR_L4TYPE_TUNNEL_MASK 0x10 + enum ixgbe_atr_flow_type { +- IXGBE_ATR_FLOW_TYPE_IPV4 = 0x0, +- IXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1, +- IXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2, +- IXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3, +- IXGBE_ATR_FLOW_TYPE_IPV6 = 0x4, +- IXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5, +- IXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6, +- IXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7, ++ IXGBE_ATR_FLOW_TYPE_IPV4 = 0x0, ++ IXGBE_ATR_FLOW_TYPE_UDPV4 = 0x1, ++ IXGBE_ATR_FLOW_TYPE_TCPV4 = 0x2, ++ IXGBE_ATR_FLOW_TYPE_SCTPV4 = 0x3, ++ IXGBE_ATR_FLOW_TYPE_IPV6 = 0x4, ++ IXGBE_ATR_FLOW_TYPE_UDPV6 = 0x5, ++ IXGBE_ATR_FLOW_TYPE_TCPV6 = 0x6, ++ IXGBE_ATR_FLOW_TYPE_SCTPV6 = 0x7, ++ IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV4 = 0x10, ++ IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV4 = 0x11, ++ IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV4 = 0x12, ++ IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV4 = 0x13, ++ IXGBE_ATR_FLOW_TYPE_TUNNELED_IPV6 = 0x14, ++ IXGBE_ATR_FLOW_TYPE_TUNNELED_UDPV6 = 0x15, ++ IXGBE_ATR_FLOW_TYPE_TUNNELED_TCPV6 = 0x16, ++ IXGBE_ATR_FLOW_TYPE_TUNNELED_SCTPV6 = 0x17, + }; + + /* Flow Director ATR input struct. */ +@@ -2548,28 +3532,34 @@ union ixgbe_atr_input { + /* + * Byte layout in order, all values with MSB first: + * +- * vm_pool - 1 byte +- * flow_type - 1 byte +- * vlan_id - 2 bytes +- * src_ip - 16 bytes +- * dst_ip - 16 bytes +- * src_port - 2 bytes +- * dst_port - 2 bytes +- * flex_bytes - 2 bytes +- * bkt_hash - 2 bytes ++ * vm_pool - 1 byte ++ * flow_type - 1 byte ++ * vlan_id - 2 bytes ++ * src_ip - 16 bytes ++ * inner_mac - 6 bytes ++ * cloud_mode - 2 bytes ++ * tni_vni - 4 bytes ++ * dst_ip - 16 bytes ++ * src_port - 2 bytes ++ * dst_port - 2 bytes ++ * flex_bytes - 2 bytes ++ * bkt_hash - 2 bytes + */ + struct { +- u8 vm_pool; +- u8 flow_type; ++ u8 vm_pool; ++ u8 flow_type; + __be16 vlan_id; + __be32 dst_ip[4]; + __be32 src_ip[4]; ++ u8 inner_mac[6]; ++ __be16 tunnel_type; ++ __be32 tni_vni; + __be16 src_port; + __be16 dst_port; + __be16 flex_bytes; + __be16 bkt_hash; + } formatted; +- __be32 dword_stream[11]; ++ __be32 dword_stream[14]; + }; + + /* Flow Director compressed ATR hash input struct */ +@@ -2588,6 +3578,48 @@ union ixgbe_atr_hash_dword { + __be32 dword; + }; + ++#define IXGBE_MVALS_INIT(m) \ ++ IXGBE_CAT(EEC, m), \ ++ IXGBE_CAT(FLA, m), \ ++ IXGBE_CAT(GRC, m), \ ++ IXGBE_CAT(SRAMREL, m), \ ++ IXGBE_CAT(FACTPS, m), \ ++ IXGBE_CAT(SWSM, m), \ ++ IXGBE_CAT(SWFW_SYNC, m), \ ++ IXGBE_CAT(FWSM, m), \ ++ IXGBE_CAT(SDP0_GPIEN, m), \ ++ IXGBE_CAT(SDP1_GPIEN, m), \ ++ IXGBE_CAT(SDP2_GPIEN, m), \ ++ IXGBE_CAT(EICR_GPI_SDP0, m), \ ++ IXGBE_CAT(EICR_GPI_SDP1, m), \ ++ IXGBE_CAT(EICR_GPI_SDP2, m), \ ++ IXGBE_CAT(CIAA, m), \ ++ IXGBE_CAT(CIAD, m), \ ++ IXGBE_CAT(I2C_CLK_IN, m), \ ++ IXGBE_CAT(I2C_CLK_OUT, m), \ ++ IXGBE_CAT(I2C_DATA_IN, m), \ ++ IXGBE_CAT(I2C_DATA_OUT, m), \ ++ IXGBE_CAT(I2C_DATA_OE_N_EN, m), \ ++ IXGBE_CAT(I2C_BB_EN, m), \ ++ IXGBE_CAT(I2C_CLK_OE_N_EN, m), \ ++ IXGBE_CAT(I2CCTL, m) ++ ++enum ixgbe_mvals { ++ IXGBE_MVALS_INIT(_IDX), ++ IXGBE_MVALS_IDX_LIMIT ++}; ++ ++/* ++ * Unavailable: The FCoE Boot Option ROM is not present in the flash. ++ * Disabled: Present; boot order is not set for any targets on the port. ++ * Enabled: Present; boot order is set for at least one target on the port. ++ */ ++enum ixgbe_fcoe_boot_status { ++ ixgbe_fcoe_bootstatus_disabled = 0, ++ ixgbe_fcoe_bootstatus_enabled = 1, ++ ixgbe_fcoe_bootstatus_unavailable = 0xFFFF ++}; ++ + enum ixgbe_eeprom_type { + ixgbe_eeprom_uninitialized = 0, + ixgbe_eeprom_spi, +@@ -2600,6 +3632,9 @@ enum ixgbe_mac_type { + ixgbe_mac_82598EB, + ixgbe_mac_82599EB, + ixgbe_mac_X540, ++ ixgbe_mac_X550, ++ ixgbe_mac_X550EM_x, ++ ixgbe_mac_X550EM_a, + ixgbe_num_macs + }; + +@@ -2608,6 +3643,11 @@ enum ixgbe_phy_type { + ixgbe_phy_none, + ixgbe_phy_tn, + ixgbe_phy_aq, ++ ixgbe_phy_x550em_kr, ++ ixgbe_phy_x550em_kx4, ++ ixgbe_phy_x550em_xfi, ++ ixgbe_phy_x550em_ext_t, ++ ixgbe_phy_ext_1g_t, + ixgbe_phy_cu_unknown, + ixgbe_phy_qt, + ixgbe_phy_xaui, +@@ -2624,22 +3664,24 @@ enum ixgbe_phy_type { + ixgbe_phy_qsfp_active_unknown, + ixgbe_phy_qsfp_intel, + ixgbe_phy_qsfp_unknown, +- ixgbe_phy_sfp_unsupported, ++ ixgbe_phy_sfp_unsupported, /*Enforce bit set with unsupported module*/ ++ ixgbe_phy_sgmii, ++ ixgbe_phy_fw, + ixgbe_phy_generic + }; + + /* + * SFP+ module type IDs: + * +- * ID Module Type ++ * ID Module Type + * ============= +- * 0 SFP_DA_CU +- * 1 SFP_SR +- * 2 SFP_LR +- * 3 SFP_DA_CU_CORE0 - 82599-specific +- * 4 SFP_DA_CU_CORE1 - 82599-specific +- * 5 SFP_SR/LR_CORE0 - 82599-specific +- * 6 SFP_SR/LR_CORE1 - 82599-specific ++ * 0 SFP_DA_CU ++ * 1 SFP_SR ++ * 2 SFP_LR ++ * 3 SFP_DA_CU_CORE0 - 82599-specific ++ * 4 SFP_DA_CU_CORE1 - 82599-specific ++ * 5 SFP_SR/LR_CORE0 - 82599-specific ++ * 6 SFP_SR/LR_CORE1 - 82599-specific + */ + enum ixgbe_sfp_type { + ixgbe_sfp_type_da_cu = 0, +@@ -2695,32 +3737,33 @@ enum ixgbe_bus_type { + ixgbe_bus_type_pci, + ixgbe_bus_type_pcix, + ixgbe_bus_type_pci_express, ++ ixgbe_bus_type_internal, + ixgbe_bus_type_reserved + }; + + /* PCI bus speeds */ + enum ixgbe_bus_speed { +- ixgbe_bus_speed_unknown = 0, +- ixgbe_bus_speed_33 = 33, +- ixgbe_bus_speed_66 = 66, +- ixgbe_bus_speed_100 = 100, +- ixgbe_bus_speed_120 = 120, +- ixgbe_bus_speed_133 = 133, +- ixgbe_bus_speed_2500 = 2500, +- ixgbe_bus_speed_5000 = 5000, +- ixgbe_bus_speed_8000 = 8000, ++ ixgbe_bus_speed_unknown = 0, ++ ixgbe_bus_speed_33 = 33, ++ ixgbe_bus_speed_66 = 66, ++ ixgbe_bus_speed_100 = 100, ++ ixgbe_bus_speed_120 = 120, ++ ixgbe_bus_speed_133 = 133, ++ ixgbe_bus_speed_2500 = 2500, ++ ixgbe_bus_speed_5000 = 5000, ++ ixgbe_bus_speed_8000 = 8000, + ixgbe_bus_speed_reserved + }; + + /* PCI bus widths */ + enum ixgbe_bus_width { +- ixgbe_bus_width_unknown = 0, +- ixgbe_bus_width_pcie_x1 = 1, +- ixgbe_bus_width_pcie_x2 = 2, +- ixgbe_bus_width_pcie_x4 = 4, +- ixgbe_bus_width_pcie_x8 = 8, +- ixgbe_bus_width_32 = 32, +- ixgbe_bus_width_64 = 64, ++ ixgbe_bus_width_unknown = 0, ++ ixgbe_bus_width_pcie_x1 = 1, ++ ixgbe_bus_width_pcie_x2 = 2, ++ ixgbe_bus_width_pcie_x4 = 4, ++ ixgbe_bus_width_pcie_x8 = 8, ++ ixgbe_bus_width_32 = 32, ++ ixgbe_bus_width_64 = 64, + ixgbe_bus_width_reserved + }; + +@@ -2729,7 +3772,6 @@ struct ixgbe_addr_filter_info { + u32 rar_used_count; + u32 mta_in_use; + u32 overflow_promisc; +- bool uc_set_promisc; + bool user_set_promisc; + }; + +@@ -2740,13 +3782,14 @@ struct ixgbe_bus_info { + enum ixgbe_bus_type type; + + u16 func; +- u16 lan_id; ++ u8 lan_id; ++ u16 instance_id; + }; + + /* Flow control parameters */ + struct ixgbe_fc_info { +- u32 high_water[MAX_TRAFFIC_CLASS]; /* Flow Control High-water */ +- u32 low_water[MAX_TRAFFIC_CLASS]; /* Flow Control Low-water */ ++ u32 high_water[IXGBE_DCB_MAX_TRAFFIC_CLASS]; /* Flow Ctrl High-water */ ++ u32 low_water[IXGBE_DCB_MAX_TRAFFIC_CLASS]; /* Flow Ctrl Low-water */ + u16 pause_time; /* Flow Control Pause timer */ + bool send_xon; /* Flow control send XON */ + bool strict_ieee; /* Strict IEEE mode */ +@@ -2807,8 +3850,6 @@ struct ixgbe_hw_stats { + u64 mptc; + u64 bptc; + u64 xec; +- u64 rqsmr[16]; +- u64 tqsmr[8]; + u64 qprc[16]; + u64 qptc[16]; + u64 qbrc[16]; +@@ -2822,6 +3863,7 @@ struct ixgbe_hw_stats { + u64 fdirmatch; + u64 fdirmiss; + u64 fccrc; ++ u64 fclast; + u64 fcoerpdc; + u64 fcoeprc; + u64 fcoeptc; +@@ -2829,6 +3871,8 @@ struct ixgbe_hw_stats { + u64 fcoedwtc; + u64 fcoe_noddp; + u64 fcoe_noddp_ext_buff; ++ u64 ldpcec; ++ u64 pcrc8ec; + u64 b2ospc; + u64 b2ogprc; + u64 o2bgptc; +@@ -2851,7 +3895,7 @@ struct ixgbe_eeprom_operations { + s32 (*write_buffer)(struct ixgbe_hw *, u16, u16, u16 *); + s32 (*validate_checksum)(struct ixgbe_hw *, u16 *); + s32 (*update_checksum)(struct ixgbe_hw *); +- u16 (*calc_checksum)(struct ixgbe_hw *); ++ s32 (*calc_checksum)(struct ixgbe_hw *); + }; + + struct ixgbe_mac_operations { +@@ -2860,22 +3904,25 @@ struct ixgbe_mac_operations { + s32 (*start_hw)(struct ixgbe_hw *); + s32 (*clear_hw_cntrs)(struct ixgbe_hw *); + enum ixgbe_media_type (*get_media_type)(struct ixgbe_hw *); +- u32 (*get_supported_physical_layer)(struct ixgbe_hw *); ++ u64 (*get_supported_physical_layer)(struct ixgbe_hw *); + s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *); + s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *); ++ s32 (*set_san_mac_addr)(struct ixgbe_hw *, u8 *); + s32 (*get_device_caps)(struct ixgbe_hw *, u16 *); + s32 (*get_wwn_prefix)(struct ixgbe_hw *, u16 *, u16 *); ++ s32 (*get_fcoe_boot_status)(struct ixgbe_hw *, u16 *); + s32 (*stop_adapter)(struct ixgbe_hw *); + s32 (*get_bus_info)(struct ixgbe_hw *); + void (*set_lan_id)(struct ixgbe_hw *); + s32 (*read_analog_reg8)(struct ixgbe_hw*, u32, u8*); + s32 (*write_analog_reg8)(struct ixgbe_hw*, u32, u8); + s32 (*setup_sfp)(struct ixgbe_hw *); +- s32 (*disable_rx_buff)(struct ixgbe_hw *); +- s32 (*enable_rx_buff)(struct ixgbe_hw *); + s32 (*enable_rx_dma)(struct ixgbe_hw *, u32); +- s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u16); +- void (*release_swfw_sync)(struct ixgbe_hw *, u16); ++ s32 (*disable_sec_rx_path)(struct ixgbe_hw *); ++ s32 (*enable_sec_rx_path)(struct ixgbe_hw *); ++ s32 (*acquire_swfw_sync)(struct ixgbe_hw *, u32); ++ void (*release_swfw_sync)(struct ixgbe_hw *, u32); ++ void (*init_swfw_sync)(struct ixgbe_hw *); + s32 (*prot_autoc_read)(struct ixgbe_hw *, bool *, u32 *); + s32 (*prot_autoc_write)(struct ixgbe_hw *, u32, bool); + +@@ -2883,44 +3930,72 @@ struct ixgbe_mac_operations { + void (*disable_tx_laser)(struct ixgbe_hw *); + void (*enable_tx_laser)(struct ixgbe_hw *); + void (*flap_tx_laser)(struct ixgbe_hw *); +- void (*stop_link_on_d3)(struct ixgbe_hw *); + s32 (*setup_link)(struct ixgbe_hw *, ixgbe_link_speed, bool); ++ s32 (*setup_mac_link)(struct ixgbe_hw *, ixgbe_link_speed, bool); + s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *, bool); + s32 (*get_link_capabilities)(struct ixgbe_hw *, ixgbe_link_speed *, + bool *); ++ void (*set_rate_select_speed)(struct ixgbe_hw *, ixgbe_link_speed); + +- /* Packet Buffer Manipulation */ +- void (*set_rxpba)(struct ixgbe_hw *, int, u32, int); ++ /* Packet Buffer manipulation */ ++ void (*setup_rxpba)(struct ixgbe_hw *, int, u32, int); + + /* LED */ + s32 (*led_on)(struct ixgbe_hw *, u32); + s32 (*led_off)(struct ixgbe_hw *, u32); + s32 (*blink_led_start)(struct ixgbe_hw *, u32); + s32 (*blink_led_stop)(struct ixgbe_hw *, u32); ++ s32 (*init_led_link_act)(struct ixgbe_hw *); + + /* RAR, Multicast, VLAN */ + s32 (*set_rar)(struct ixgbe_hw *, u32, u8 *, u32, u32); ++ s32 (*set_uc_addr)(struct ixgbe_hw *, u32, u8 *); + s32 (*clear_rar)(struct ixgbe_hw *, u32); ++ s32 (*insert_mac_addr)(struct ixgbe_hw *, u8 *, u32); + s32 (*set_vmdq)(struct ixgbe_hw *, u32, u32); + s32 (*set_vmdq_san_mac)(struct ixgbe_hw *, u32); + s32 (*clear_vmdq)(struct ixgbe_hw *, u32, u32); + s32 (*init_rx_addrs)(struct ixgbe_hw *); +- s32 (*update_mc_addr_list)(struct ixgbe_hw *, struct net_device *); ++ s32 (*update_uc_addr_list)(struct ixgbe_hw *, u8 *, u32, ++ ixgbe_mc_addr_itr); ++ s32 (*update_mc_addr_list)(struct ixgbe_hw *, u8 *, u32, ++ ixgbe_mc_addr_itr, bool clear); + s32 (*enable_mc)(struct ixgbe_hw *); + s32 (*disable_mc)(struct ixgbe_hw *); + s32 (*clear_vfta)(struct ixgbe_hw *); +- s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool); ++ s32 (*set_vfta)(struct ixgbe_hw *, u32, u32, bool, bool); ++ s32 (*set_vlvf)(struct ixgbe_hw *, u32, u32, bool, u32 *, u32, ++ bool); + s32 (*init_uta_tables)(struct ixgbe_hw *); + void (*set_mac_anti_spoofing)(struct ixgbe_hw *, bool, int); + void (*set_vlan_anti_spoofing)(struct ixgbe_hw *, bool, int); + + /* Flow Control */ + s32 (*fc_enable)(struct ixgbe_hw *); ++ s32 (*setup_fc)(struct ixgbe_hw *); ++ void (*fc_autoneg)(struct ixgbe_hw *); + + /* Manageability interface */ +- s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8); ++ s32 (*set_fw_drv_ver)(struct ixgbe_hw *, u8, u8, u8, u8, u16, ++ const char *); + s32 (*get_thermal_sensor_data)(struct ixgbe_hw *); + s32 (*init_thermal_sensor_thresh)(struct ixgbe_hw *hw); ++ void (*get_rtrup2tc)(struct ixgbe_hw *hw, u8 *map); ++ void (*disable_rx)(struct ixgbe_hw *hw); ++ void (*enable_rx)(struct ixgbe_hw *hw); ++ void (*set_source_address_pruning)(struct ixgbe_hw *, bool, ++ unsigned int); ++ void (*set_ethertype_anti_spoofing)(struct ixgbe_hw *, bool, int); ++ s32 (*dmac_update_tcs)(struct ixgbe_hw *hw); ++ s32 (*dmac_config_tcs)(struct ixgbe_hw *hw); ++ s32 (*dmac_config)(struct ixgbe_hw *hw); ++ s32 (*setup_eee)(struct ixgbe_hw *hw, bool enable_eee); ++ s32 (*read_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32 *); ++ s32 (*write_iosf_sb_reg)(struct ixgbe_hw *, u32, u32, u32); ++ void (*disable_mdd)(struct ixgbe_hw *hw); ++ void (*enable_mdd)(struct ixgbe_hw *hw); ++ void (*mdd_event)(struct ixgbe_hw *hw, u32 *vf_bitmap); ++ void (*restore_mdd_vf)(struct ixgbe_hw *hw, u32 vf); + }; + + struct ixgbe_phy_operations { +@@ -2933,6 +4008,7 @@ struct ixgbe_phy_operations { + s32 (*read_reg_mdi)(struct ixgbe_hw *, u32, u32, u16 *); + s32 (*write_reg_mdi)(struct ixgbe_hw *, u32, u32, u16); + s32 (*setup_link)(struct ixgbe_hw *); ++ s32 (*setup_internal_link)(struct ixgbe_hw *); + s32 (*setup_link_speed)(struct ixgbe_hw *, ixgbe_link_speed, bool); + s32 (*check_link)(struct ixgbe_hw *, ixgbe_link_speed *, bool *); + s32 (*get_firmware_version)(struct ixgbe_hw *, u16 *); +@@ -2941,78 +4017,113 @@ struct ixgbe_phy_operations { + s32 (*read_i2c_sff8472)(struct ixgbe_hw *, u8 , u8 *); + s32 (*read_i2c_eeprom)(struct ixgbe_hw *, u8 , u8 *); + s32 (*write_i2c_eeprom)(struct ixgbe_hw *, u8, u8); ++ void (*i2c_bus_clear)(struct ixgbe_hw *); + s32 (*check_overtemp)(struct ixgbe_hw *); ++ s32 (*set_phy_power)(struct ixgbe_hw *, bool on); ++ s32 (*enter_lplu)(struct ixgbe_hw *); ++ s32 (*handle_lasi)(struct ixgbe_hw *hw); ++ s32 (*read_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr, ++ u8 *value); ++ s32 (*write_i2c_byte_unlocked)(struct ixgbe_hw *, u8 offset, u8 addr, ++ u8 value); ++}; ++ ++struct ixgbe_link_operations { ++ s32 (*read_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 *val); ++ s32 (*read_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg, ++ u16 *val); ++ s32 (*write_link)(struct ixgbe_hw *, u8 addr, u16 reg, u16 val); ++ s32 (*write_link_unlocked)(struct ixgbe_hw *, u8 addr, u16 reg, ++ u16 val); ++}; ++ ++struct ixgbe_link_info { ++ struct ixgbe_link_operations ops; ++ u8 addr; + }; + + struct ixgbe_eeprom_info { +- struct ixgbe_eeprom_operations ops; +- enum ixgbe_eeprom_type type; +- u32 semaphore_delay; +- u16 word_size; +- u16 address_bits; +- u16 word_page_size; ++ struct ixgbe_eeprom_operations ops; ++ enum ixgbe_eeprom_type type; ++ u32 semaphore_delay; ++ u16 word_size; ++ u16 address_bits; ++ u16 word_page_size; ++ u16 ctrl_word_3; + }; + + #define IXGBE_FLAGS_DOUBLE_RESET_REQUIRED 0x01 + struct ixgbe_mac_info { +- struct ixgbe_mac_operations ops; +- enum ixgbe_mac_type type; +- u8 addr[ETH_ALEN]; +- u8 perm_addr[ETH_ALEN]; +- u8 san_addr[ETH_ALEN]; ++ struct ixgbe_mac_operations ops; ++ enum ixgbe_mac_type type; ++ u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; ++ u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; ++ u8 san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; + /* prefix for World Wide Node Name (WWNN) */ +- u16 wwnn_prefix; ++ u16 wwnn_prefix; + /* prefix for World Wide Port Name (WWPN) */ +- u16 wwpn_prefix; +- u16 max_msix_vectors; ++ u16 wwpn_prefix; + #define IXGBE_MAX_MTA 128 +- u32 mta_shadow[IXGBE_MAX_MTA]; +- s32 mc_filter_type; +- u32 mcft_size; +- u32 vft_size; +- u32 num_rar_entries; +- u32 rar_highwater; +- u32 rx_pb_size; +- u32 max_tx_queues; +- u32 max_rx_queues; +- u32 orig_autoc; +- u32 orig_autoc2; +- bool orig_link_settings_stored; +- bool autotry_restart; +- u8 flags; +- u8 san_mac_rar_index; ++ u32 mta_shadow[IXGBE_MAX_MTA]; ++ s32 mc_filter_type; ++ u32 mcft_size; ++ u32 vft_size; ++ u32 num_rar_entries; ++ u32 rar_highwater; ++ u32 rx_pb_size; ++ u32 max_tx_queues; ++ u32 max_rx_queues; ++ u32 orig_autoc; ++ u8 san_mac_rar_index; ++ bool get_link_status; ++ u32 orig_autoc2; ++ u16 max_msix_vectors; ++ bool arc_subsystem_valid; ++ bool orig_link_settings_stored; ++ bool autotry_restart; ++ u8 flags; + struct ixgbe_thermal_sensor_data thermal_sensor_data; ++ bool thermal_sensor_enabled; ++ struct ixgbe_dmac_config dmac_config; ++ bool set_lben; ++ u32 max_link_up_time; ++ u8 led_link_act; + }; + + struct ixgbe_phy_info { +- struct ixgbe_phy_operations ops; +- struct mdio_if_info mdio; +- enum ixgbe_phy_type type; +- u32 id; +- enum ixgbe_sfp_type sfp_type; +- bool sfp_setup_needed; +- u32 revision; +- enum ixgbe_media_type media_type; +- bool reset_disable; +- ixgbe_autoneg_advertised autoneg_advertised; +- enum ixgbe_smart_speed smart_speed; +- bool smart_speed_active; +- bool multispeed_fiber; +- bool reset_if_overtemp; +- bool qsfp_shared_i2c_bus; ++ struct ixgbe_phy_operations ops; ++ enum ixgbe_phy_type type; ++ u32 addr; ++ u32 id; ++ enum ixgbe_sfp_type sfp_type; ++ bool sfp_setup_needed; ++ u32 revision; ++ enum ixgbe_media_type media_type; ++ u32 phy_semaphore_mask; ++ bool reset_disable; ++ ixgbe_autoneg_advertised autoneg_advertised; ++ ixgbe_link_speed speeds_supported; ++ ixgbe_link_speed eee_speeds_supported; ++ ixgbe_link_speed eee_speeds_advertised; ++ enum ixgbe_smart_speed smart_speed; ++ bool smart_speed_active; ++ bool multispeed_fiber; ++ bool reset_if_overtemp; ++ bool qsfp_shared_i2c_bus; ++ u32 nw_mng_if_sel; + }; + + #include "ixgbe_mbx.h" + + struct ixgbe_mbx_operations { +- s32 (*init_params)(struct ixgbe_hw *hw); +- s32 (*read)(struct ixgbe_hw *, u32 *, u16, u16); +- s32 (*write)(struct ixgbe_hw *, u32 *, u16, u16); +- s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16); +- s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16); +- s32 (*check_for_msg)(struct ixgbe_hw *, u16); +- s32 (*check_for_ack)(struct ixgbe_hw *, u16); +- s32 (*check_for_rst)(struct ixgbe_hw *, u16); ++ void (*init_params)(struct ixgbe_hw *hw); ++ s32 (*read)(struct ixgbe_hw *, u32 *, u16, u16); ++ s32 (*write)(struct ixgbe_hw *, u32 *, u16, u16); ++ s32 (*read_posted)(struct ixgbe_hw *, u32 *, u16, u16); ++ s32 (*write_posted)(struct ixgbe_hw *, u32 *, u16, u16); ++ s32 (*check_for_msg)(struct ixgbe_hw *, u16); ++ s32 (*check_for_ack)(struct ixgbe_hw *, u16); ++ s32 (*check_for_rst)(struct ixgbe_hw *, u16); + }; + + struct ixgbe_mbx_stats { +@@ -3034,69 +4145,193 @@ struct ixgbe_mbx_info { + }; + + struct ixgbe_hw { +- u8 __iomem *hw_addr; +- void *back; +- struct ixgbe_mac_info mac; +- struct ixgbe_addr_filter_info addr_ctrl; +- struct ixgbe_fc_info fc; +- struct ixgbe_phy_info phy; +- struct ixgbe_eeprom_info eeprom; +- struct ixgbe_bus_info bus; +- struct ixgbe_mbx_info mbx; +- u16 device_id; +- u16 vendor_id; +- u16 subsystem_device_id; +- u16 subsystem_vendor_id; +- u8 revision_id; +- bool adapter_stopped; +- bool force_full_reset; +- bool allow_unsupported_sfp; +- bool wol_enabled; +-}; +- +-struct ixgbe_info { +- enum ixgbe_mac_type mac; +- s32 (*get_invariants)(struct ixgbe_hw *); +- struct ixgbe_mac_operations *mac_ops; +- struct ixgbe_eeprom_operations *eeprom_ops; +- struct ixgbe_phy_operations *phy_ops; +- struct ixgbe_mbx_operations *mbx_ops; ++ u8 IOMEM *hw_addr; ++ void *back; ++ struct ixgbe_mac_info mac; ++ struct ixgbe_addr_filter_info addr_ctrl; ++ struct ixgbe_fc_info fc; ++ struct ixgbe_phy_info phy; ++ struct ixgbe_link_info link; ++ struct ixgbe_eeprom_info eeprom; ++ struct ixgbe_bus_info bus; ++ struct ixgbe_mbx_info mbx; ++ const u32 *mvals; ++ u16 device_id; ++ u16 vendor_id; ++ u16 subsystem_device_id; ++ u16 subsystem_vendor_id; ++ u8 revision_id; ++ bool adapter_stopped; ++ int api_version; ++ bool force_full_reset; ++ bool allow_unsupported_sfp; ++ bool wol_enabled; ++ bool need_crosstalk_fix; + }; + ++#define ixgbe_call_func(hw, func, params, error) \ ++ (func != NULL) ? func params : error + + /* Error Codes */ +-#define IXGBE_ERR_EEPROM -1 +-#define IXGBE_ERR_EEPROM_CHECKSUM -2 +-#define IXGBE_ERR_PHY -3 +-#define IXGBE_ERR_CONFIG -4 +-#define IXGBE_ERR_PARAM -5 +-#define IXGBE_ERR_MAC_TYPE -6 +-#define IXGBE_ERR_UNKNOWN_PHY -7 +-#define IXGBE_ERR_LINK_SETUP -8 +-#define IXGBE_ERR_ADAPTER_STOPPED -9 +-#define IXGBE_ERR_INVALID_MAC_ADDR -10 +-#define IXGBE_ERR_DEVICE_NOT_SUPPORTED -11 +-#define IXGBE_ERR_MASTER_REQUESTS_PENDING -12 +-#define IXGBE_ERR_INVALID_LINK_SETTINGS -13 +-#define IXGBE_ERR_AUTONEG_NOT_COMPLETE -14 +-#define IXGBE_ERR_RESET_FAILED -15 +-#define IXGBE_ERR_SWFW_SYNC -16 +-#define IXGBE_ERR_PHY_ADDR_INVALID -17 +-#define IXGBE_ERR_I2C -18 +-#define IXGBE_ERR_SFP_NOT_SUPPORTED -19 +-#define IXGBE_ERR_SFP_NOT_PRESENT -20 +-#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -21 +-#define IXGBE_ERR_NO_SAN_ADDR_PTR -22 +-#define IXGBE_ERR_FDIR_REINIT_FAILED -23 +-#define IXGBE_ERR_EEPROM_VERSION -24 +-#define IXGBE_ERR_NO_SPACE -25 +-#define IXGBE_ERR_OVERTEMP -26 +-#define IXGBE_ERR_FC_NOT_NEGOTIATED -27 +-#define IXGBE_ERR_FC_NOT_SUPPORTED -28 +-#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30 +-#define IXGBE_ERR_PBA_SECTION -31 +-#define IXGBE_ERR_INVALID_ARGUMENT -32 +-#define IXGBE_ERR_HOST_INTERFACE_COMMAND -33 +-#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF ++#define IXGBE_SUCCESS 0 ++#define IXGBE_ERR_EEPROM -1 ++#define IXGBE_ERR_EEPROM_CHECKSUM -2 ++#define IXGBE_ERR_PHY -3 ++#define IXGBE_ERR_CONFIG -4 ++#define IXGBE_ERR_PARAM -5 ++#define IXGBE_ERR_MAC_TYPE -6 ++#define IXGBE_ERR_UNKNOWN_PHY -7 ++#define IXGBE_ERR_LINK_SETUP -8 ++#define IXGBE_ERR_ADAPTER_STOPPED -9 ++#define IXGBE_ERR_INVALID_MAC_ADDR -10 ++#define IXGBE_ERR_DEVICE_NOT_SUPPORTED -11 ++#define IXGBE_ERR_MASTER_REQUESTS_PENDING -12 ++#define IXGBE_ERR_INVALID_LINK_SETTINGS -13 ++#define IXGBE_ERR_AUTONEG_NOT_COMPLETE -14 ++#define IXGBE_ERR_RESET_FAILED -15 ++#define IXGBE_ERR_SWFW_SYNC -16 ++#define IXGBE_ERR_PHY_ADDR_INVALID -17 ++#define IXGBE_ERR_I2C -18 ++#define IXGBE_ERR_SFP_NOT_SUPPORTED -19 ++#define IXGBE_ERR_SFP_NOT_PRESENT -20 ++#define IXGBE_ERR_SFP_NO_INIT_SEQ_PRESENT -21 ++#define IXGBE_ERR_NO_SAN_ADDR_PTR -22 ++#define IXGBE_ERR_FDIR_REINIT_FAILED -23 ++#define IXGBE_ERR_EEPROM_VERSION -24 ++#define IXGBE_ERR_NO_SPACE -25 ++#define IXGBE_ERR_OVERTEMP -26 ++#define IXGBE_ERR_FC_NOT_NEGOTIATED -27 ++#define IXGBE_ERR_FC_NOT_SUPPORTED -28 ++#define IXGBE_ERR_SFP_SETUP_NOT_COMPLETE -30 ++#define IXGBE_ERR_PBA_SECTION -31 ++#define IXGBE_ERR_INVALID_ARGUMENT -32 ++#define IXGBE_ERR_HOST_INTERFACE_COMMAND -33 ++#define IXGBE_ERR_OUT_OF_MEM -34 ++#define IXGBE_ERR_FEATURE_NOT_SUPPORTED -36 ++#define IXGBE_ERR_EEPROM_PROTECTED_REGION -37 ++#define IXGBE_ERR_FDIR_CMD_INCOMPLETE -38 ++#define IXGBE_ERR_FW_RESP_INVALID -39 ++#define IXGBE_ERR_TOKEN_RETRY -40 ++ ++#define IXGBE_NOT_IMPLEMENTED 0x7FFFFFFF ++ ++#define IXGBE_FUSES0_GROUP(_i) (0x11158 + ((_i) * 4)) ++#define IXGBE_FUSES0_300MHZ (1 << 5) ++#define IXGBE_FUSES0_REV_MASK (3 << 6) ++ ++#define IXGBE_KRM_PORT_CAR_GEN_CTRL(P) ((P) ? 0x8010 : 0x4010) ++#define IXGBE_KRM_LINK_S1(P) ((P) ? 0x8200 : 0x4200) ++#define IXGBE_KRM_LINK_CTRL_1(P) ((P) ? 0x820C : 0x420C) ++#define IXGBE_KRM_AN_CNTL_1(P) ((P) ? 0x822C : 0x422C) ++#define IXGBE_KRM_AN_CNTL_4(P) ((P) ? 0x8238 : 0x4238) ++#define IXGBE_KRM_AN_CNTL_8(P) ((P) ? 0x8248 : 0x4248) ++#define IXGBE_KRM_PCS_KX_AN(P) ((P) ? 0x9918 : 0x5918) ++#define IXGBE_KRM_PCS_KX_AN_LP(P) ((P) ? 0x991C : 0x591C) ++#define IXGBE_KRM_SGMII_CTRL(P) ((P) ? 0x82A0 : 0x42A0) ++#define IXGBE_KRM_LP_BASE_PAGE_HIGH(P) ((P) ? 0x836C : 0x436C) ++#define IXGBE_KRM_DSP_TXFFE_STATE_4(P) ((P) ? 0x8634 : 0x4634) ++#define IXGBE_KRM_DSP_TXFFE_STATE_5(P) ((P) ? 0x8638 : 0x4638) ++#define IXGBE_KRM_RX_TRN_LINKUP_CTRL(P) ((P) ? 0x8B00 : 0x4B00) ++#define IXGBE_KRM_PMD_DFX_BURNIN(P) ((P) ? 0x8E00 : 0x4E00) ++#define IXGBE_KRM_PMD_FLX_MASK_ST20(P) ((P) ? 0x9054 : 0x5054) ++#define IXGBE_KRM_TX_COEFF_CTRL_1(P) ((P) ? 0x9520 : 0x5520) ++#define IXGBE_KRM_RX_ANA_CTL(P) ((P) ? 0x9A00 : 0x5A00) ++ ++#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA ~(0x3 << 20) ++#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR (1u << 20) ++#define IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_LR (0x2 << 20) ++#define IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN (1u << 25) ++#define IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN (1u << 26) ++#define IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN (1u << 27) ++#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10M ~(0x7 << 28) ++#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_100M (1u << 28) ++#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G (0x2 << 28) ++#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G (0x3 << 28) ++#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN (0x4 << 28) ++#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_2_5G (0x7 << 28) ++#define IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK (0x7 << 28) ++#define IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART (1u << 31) ++ ++#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B (1 << 9) ++#define IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS (1 << 11) ++ ++#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK (0x7 << 8) ++#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G (2 << 8) ++#define IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G (4 << 8) ++#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN (1 << 12) ++#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN (1 << 13) ++#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_FEC_REQ (1 << 14) ++#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_FEC (1 << 15) ++#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX (1 << 16) ++#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR (1 << 18) ++#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KX (1 << 24) ++#define IXGBE_KRM_LINK_CTRL_1_TETH_EEE_CAP_KR (1 << 26) ++#define IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE (1 << 28) ++#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE (1 << 29) ++#define IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART (1 << 31) ++ ++#define IXGBE_KRM_AN_CNTL_1_SYM_PAUSE (1 << 28) ++#define IXGBE_KRM_AN_CNTL_1_ASM_PAUSE (1 << 29) ++#define IXGBE_KRM_PCS_KX_AN_SYM_PAUSE (1 << 1) ++#define IXGBE_KRM_PCS_KX_AN_ASM_PAUSE (1 << 2) ++#define IXGBE_KRM_PCS_KX_AN_LP_SYM_PAUSE (1 << 2) ++#define IXGBE_KRM_PCS_KX_AN_LP_ASM_PAUSE (1 << 3) ++#define IXGBE_KRM_AN_CNTL_4_ECSR_AN37_OVER_73 (1 << 29) ++#define IXGBE_KRM_AN_CNTL_8_LINEAR (1 << 0) ++#define IXGBE_KRM_AN_CNTL_8_LIMITING (1 << 1) ++ ++#define IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE (1 << 10) ++#define IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE (1 << 11) ++ ++#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D (1 << 12) ++#define IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D (1 << 19) ++ ++#define IXGBE_KRM_DSP_TXFFE_STATE_C0_EN (1 << 6) ++#define IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN (1 << 15) ++#define IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN (1 << 16) ++ ++#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL (1 << 4) ++#define IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS (1 << 2) ++ ++#define IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK (0x3 << 16) ++ ++#define IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN (1 << 1) ++#define IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN (1 << 2) ++#define IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN (1 << 3) ++#define IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN (1 << 31) ++ ++#define IXGBE_SB_IOSF_INDIRECT_CTRL 0x00011144 ++#define IXGBE_SB_IOSF_INDIRECT_DATA 0x00011148 ++ ++#define IXGBE_SB_IOSF_CTRL_ADDR_SHIFT 0 ++#define IXGBE_SB_IOSF_CTRL_ADDR_MASK 0xFF ++#define IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT 18 ++#define IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK \ ++ (0x3 << IXGBE_SB_IOSF_CTRL_RESP_STAT_SHIFT) ++#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT 20 ++#define IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK \ ++ (0xFF << IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT) ++#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT 28 ++#define IXGBE_SB_IOSF_CTRL_TARGET_SELECT_MASK 0x7 ++#define IXGBE_SB_IOSF_CTRL_BUSY_SHIFT 31 ++#define IXGBE_SB_IOSF_CTRL_BUSY (1 << IXGBE_SB_IOSF_CTRL_BUSY_SHIFT) ++#define IXGBE_SB_IOSF_TARGET_KR_PHY 0 ++ ++#define IXGBE_NW_MNG_IF_SEL 0x00011178 ++#define IXGBE_NW_MNG_IF_SEL_MDIO_ACT (1u << 1) ++#define IXGBE_NW_MNG_IF_SEL_MDIO_IF_MODE (1u << 2) ++#define IXGBE_NW_MNG_IF_SEL_EN_SHARED_MDIO (1u << 13) ++#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10M (1u << 17) ++#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_100M (1u << 18) ++#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_1G (1u << 19) ++#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G (1u << 20) ++#define IXGBE_NW_MNG_IF_SEL_PHY_SPEED_10G (1u << 21) ++#define IXGBE_NW_MNG_IF_SEL_SGMII_ENABLE (1u << 25) ++#define IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE (1 << 24) /* X552 reg field only */ ++#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT 3 ++#define IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD \ ++ (0x1F << IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT) ++ ++#include "ixgbe_osdep2.h" + + #endif /* _IXGBE_TYPE_H_ */ +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +index 40dd798..2a62f44 100644 +--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.c +@@ -1,7 +1,7 @@ + /******************************************************************************* + +- Intel 10 Gigabit PCI Express Linux driver +- Copyright(c) 1999 - 2013 Intel Corporation. ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. + + This program is free software; you can redistribute it and/or modify it + under the terms and conditions of the GNU General Public License, +@@ -12,10 +12,6 @@ + FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + more details. + +- You should have received a copy of the GNU General Public License along with +- this program; if not, write to the Free Software Foundation, Inc., +- 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. +- + The full GNU General Public License is included in this distribution in + the file called "COPYING". + +@@ -26,11 +22,10 @@ + + *******************************************************************************/ + +-#include +-#include +-#include +- +-#include "ixgbe.h" ++#include "ixgbe_x540.h" ++#include "ixgbe_type.h" ++#include "ixgbe_api.h" ++#include "ixgbe_common.h" + #include "ixgbe_phy.h" + + #define IXGBE_X540_MAX_TX_QUEUES 128 +@@ -40,48 +35,154 @@ + #define IXGBE_X540_VFT_TBL_SIZE 128 + #define IXGBE_X540_RX_PB_SIZE 384 + +-static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw); +-static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw); +-static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask); +-static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask); +-static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw); +-static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw); ++STATIC s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw); ++STATIC s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw); ++STATIC void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw); + +-static enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw) ++/** ++ * ixgbe_init_ops_X540 - Inits func ptrs and MAC type ++ * @hw: pointer to hardware structure ++ * ++ * Initialize the function pointers and assign the MAC type for X540. ++ * Does not touch the hardware. ++ **/ ++s32 ixgbe_init_ops_X540(struct ixgbe_hw *hw) + { +- return ixgbe_media_type_copper; ++ struct ixgbe_mac_info *mac = &hw->mac; ++ struct ixgbe_phy_info *phy = &hw->phy; ++ struct ixgbe_eeprom_info *eeprom = &hw->eeprom; ++ s32 ret_val; ++ ++ DEBUGFUNC("ixgbe_init_ops_X540"); ++ ++ ret_val = ixgbe_init_phy_ops_generic(hw); ++ ret_val = ixgbe_init_ops_generic(hw); ++ ++ /* EEPROM */ ++ eeprom->ops.init_params = ixgbe_init_eeprom_params_X540; ++ eeprom->ops.read = ixgbe_read_eerd_X540; ++ eeprom->ops.read_buffer = ixgbe_read_eerd_buffer_X540; ++ eeprom->ops.write = ixgbe_write_eewr_X540; ++ eeprom->ops.write_buffer = ixgbe_write_eewr_buffer_X540; ++ eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X540; ++ eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X540; ++ eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X540; ++ ++ /* PHY */ ++ phy->ops.init = ixgbe_init_phy_ops_generic; ++ phy->ops.reset = NULL; ++ phy->ops.set_phy_power = ixgbe_set_copper_phy_power; ++ ++ /* MAC */ ++ mac->ops.reset_hw = ixgbe_reset_hw_X540; ++ mac->ops.get_media_type = ixgbe_get_media_type_X540; ++ mac->ops.get_supported_physical_layer = ++ ixgbe_get_supported_physical_layer_X540; ++ mac->ops.read_analog_reg8 = NULL; ++ mac->ops.write_analog_reg8 = NULL; ++ mac->ops.start_hw = ixgbe_start_hw_X540; ++ mac->ops.get_san_mac_addr = ixgbe_get_san_mac_addr_generic; ++ mac->ops.set_san_mac_addr = ixgbe_set_san_mac_addr_generic; ++ mac->ops.get_device_caps = ixgbe_get_device_caps_generic; ++ mac->ops.get_wwn_prefix = ixgbe_get_wwn_prefix_generic; ++ mac->ops.get_fcoe_boot_status = ixgbe_get_fcoe_boot_status_generic; ++ mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X540; ++ mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X540; ++ mac->ops.init_swfw_sync = ixgbe_init_swfw_sync_X540; ++ mac->ops.disable_sec_rx_path = ixgbe_disable_sec_rx_path_generic; ++ mac->ops.enable_sec_rx_path = ixgbe_enable_sec_rx_path_generic; ++ ++ /* RAR, Multicast, VLAN */ ++ mac->ops.set_vmdq = ixgbe_set_vmdq_generic; ++ mac->ops.set_vmdq_san_mac = ixgbe_set_vmdq_san_mac_generic; ++ mac->ops.clear_vmdq = ixgbe_clear_vmdq_generic; ++ mac->ops.insert_mac_addr = ixgbe_insert_mac_addr_generic; ++ mac->rar_highwater = 1; ++ mac->ops.set_vfta = ixgbe_set_vfta_generic; ++ mac->ops.set_vlvf = ixgbe_set_vlvf_generic; ++ mac->ops.clear_vfta = ixgbe_clear_vfta_generic; ++ mac->ops.init_uta_tables = ixgbe_init_uta_tables_generic; ++ mac->ops.set_mac_anti_spoofing = ixgbe_set_mac_anti_spoofing; ++ mac->ops.set_vlan_anti_spoofing = ixgbe_set_vlan_anti_spoofing; ++ ++ /* Link */ ++ mac->ops.get_link_capabilities = ++ ixgbe_get_copper_link_capabilities_generic; ++ mac->ops.setup_link = ixgbe_setup_mac_link_X540; ++ mac->ops.setup_rxpba = ixgbe_set_rxpba_generic; ++ mac->ops.check_link = ixgbe_check_mac_link_generic; ++ ++ mac->mcft_size = IXGBE_X540_MC_TBL_SIZE; ++ mac->vft_size = IXGBE_X540_VFT_TBL_SIZE; ++ mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES; ++ mac->rx_pb_size = IXGBE_X540_RX_PB_SIZE; ++ mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES; ++ mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES; ++ mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); ++ ++ /* ++ * FWSM register ++ * ARC supported; valid only if manageability features are ++ * enabled. ++ */ ++ mac->arc_subsystem_valid = !!(IXGBE_READ_REG(hw, IXGBE_FWSM_BY_MAC(hw)) ++ & IXGBE_FWSM_MODE_MASK); ++ ++ hw->mbx.ops.init_params = ixgbe_init_mbx_params_pf; ++ ++ /* LEDs */ ++ mac->ops.blink_led_start = ixgbe_blink_led_start_X540; ++ mac->ops.blink_led_stop = ixgbe_blink_led_stop_X540; ++ ++ /* Manageability interface */ ++ mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_generic; ++ ++ mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic; ++ ++ return ret_val; + } + +-static s32 ixgbe_get_invariants_X540(struct ixgbe_hw *hw) ++/** ++ * ixgbe_get_link_capabilities_X540 - Determines link capabilities ++ * @hw: pointer to hardware structure ++ * @speed: pointer to link speed ++ * @autoneg: true when autoneg or autotry is enabled ++ * ++ * Determines the link capabilities by reading the AUTOC register. ++ **/ ++s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw, ++ ixgbe_link_speed *speed, ++ bool *autoneg) + { +- struct ixgbe_mac_info *mac = &hw->mac; +- +- /* Call PHY identify routine to get the phy type */ +- ixgbe_identify_phy_generic(hw); ++ ixgbe_get_copper_link_capabilities_generic(hw, speed, autoneg); + +- mac->mcft_size = IXGBE_X540_MC_TBL_SIZE; +- mac->vft_size = IXGBE_X540_VFT_TBL_SIZE; +- mac->num_rar_entries = IXGBE_X540_RAR_ENTRIES; +- mac->rx_pb_size = IXGBE_X540_RX_PB_SIZE; +- mac->max_rx_queues = IXGBE_X540_MAX_RX_QUEUES; +- mac->max_tx_queues = IXGBE_X540_MAX_TX_QUEUES; +- mac->max_msix_vectors = ixgbe_get_pcie_msix_count_generic(hw); ++ return IXGBE_SUCCESS; ++} + +- return 0; ++/** ++ * ixgbe_get_media_type_X540 - Get media type ++ * @hw: pointer to hardware structure ++ * ++ * Returns the media type (fiber, copper, backplane) ++ **/ ++enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw) ++{ ++ UNREFERENCED_1PARAMETER(hw); ++ return ixgbe_media_type_copper; + } + + /** +- * ixgbe_setup_mac_link_X540 - Set the auto advertised capabilitires ++ * ixgbe_setup_mac_link_X540 - Sets the auto advertised capabilities + * @hw: pointer to hardware structure + * @speed: new link speed + * @autoneg_wait_to_complete: true when waiting for completion is needed + **/ +-static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, +- ixgbe_link_speed speed, +- bool autoneg_wait_to_complete) ++s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ++ ixgbe_link_speed speed, ++ bool autoneg_wait_to_complete) + { +- return hw->phy.ops.setup_link_speed(hw, speed, +- autoneg_wait_to_complete); ++ DEBUGFUNC("ixgbe_setup_mac_link_X540"); ++ return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete); + } + + /** +@@ -89,31 +190,40 @@ static s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, + * @hw: pointer to hardware structure + * + * Resets the hardware by resetting the transmit and receive units, masks +- * and clears all interrupts, perform a PHY reset, and perform a link (MAC) +- * reset. ++ * and clears all interrupts, and perform a reset. + **/ +-static s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) ++s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw) + { + s32 status; + u32 ctrl, i; ++ u32 swfw_mask = hw->phy.phy_semaphore_mask; ++ ++ DEBUGFUNC("ixgbe_reset_hw_X540"); + + /* Call adapter stop to disable tx/rx and clear interrupts */ + status = hw->mac.ops.stop_adapter(hw); +- if (status != 0) ++ if (status != IXGBE_SUCCESS) + goto reset_hw_out; + + /* flush pending Tx transactions */ + ixgbe_clear_tx_pending(hw); + + mac_reset_top: ++ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); ++ if (status != IXGBE_SUCCESS) { ++ ERROR_REPORT2(IXGBE_ERROR_CAUTION, ++ "semaphore failed with %d", status); ++ return IXGBE_ERR_SWFW_SYNC; ++ } + ctrl = IXGBE_CTRL_RST; + ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); + IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); + IXGBE_WRITE_FLUSH(hw); ++ hw->mac.ops.release_swfw_sync(hw, swfw_mask); + + /* Poll for reset bit to self-clear indicating reset is complete */ + for (i = 0; i < 10; i++) { +- udelay(1); ++ usec_delay(1); + ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); + if (!(ctrl & IXGBE_CTRL_RST_MASK)) + break; +@@ -121,9 +231,10 @@ mac_reset_top: + + if (ctrl & IXGBE_CTRL_RST_MASK) { + status = IXGBE_ERR_RESET_FAILED; +- hw_dbg(hw, "Reset polling failed to complete.\n"); ++ ERROR_REPORT1(IXGBE_ERROR_POLLING, ++ "Reset polling failed to complete.\n"); + } +- msleep(100); ++ msec_delay(100); + + /* + * Double resets are required for recovery from certain error +@@ -146,20 +257,24 @@ mac_reset_top: + * clear the multicast table. Also reset num_rar_entries to 128, + * since we modify this value when programming the SAN MAC address. + */ +- hw->mac.num_rar_entries = IXGBE_X540_MAX_TX_QUEUES; ++ hw->mac.num_rar_entries = 128; + hw->mac.ops.init_rx_addrs(hw); + + /* Store the permanent SAN mac address */ + hw->mac.ops.get_san_mac_addr(hw, hw->mac.san_addr); + + /* Add the SAN MAC address to the RAR only if it's a valid address */ +- if (is_valid_ether_addr(hw->mac.san_addr)) { +- hw->mac.ops.set_rar(hw, hw->mac.num_rar_entries - 1, +- hw->mac.san_addr, 0, IXGBE_RAH_AV); +- ++ if (ixgbe_validate_mac_addr(hw->mac.san_addr) == 0) { + /* Save the SAN MAC RAR index */ + hw->mac.san_mac_rar_index = hw->mac.num_rar_entries - 1; + ++ hw->mac.ops.set_rar(hw, hw->mac.san_mac_rar_index, ++ hw->mac.san_addr, 0, IXGBE_RAH_AV); ++ ++ /* clear VMDq pool/queue selection for this RAR */ ++ hw->mac.ops.clear_vmdq(hw, hw->mac.san_mac_rar_index, ++ IXGBE_CLEAR_VMDQ_ALL); ++ + /* Reserve the last RAR for the SAN MAC address */ + hw->mac.num_rar_entries--; + } +@@ -180,15 +295,18 @@ reset_hw_out: + * and the generation start_hw function. + * Then performs revision-specific operations, if any. + **/ +-static s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw) ++s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw) + { +- s32 ret_val = 0; ++ s32 ret_val = IXGBE_SUCCESS; ++ ++ DEBUGFUNC("ixgbe_start_hw_X540"); + + ret_val = ixgbe_start_hw_generic(hw); +- if (ret_val != 0) ++ if (ret_val != IXGBE_SUCCESS) + goto out; + + ret_val = ixgbe_start_hw_gen2(hw); ++ + out: + return ret_val; + } +@@ -199,20 +317,20 @@ out: + * + * Determines physical layer capabilities of the current configuration. + **/ +-static u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw) ++u64 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw) + { +- u32 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; ++ u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; + u16 ext_ability = 0; + +- hw->phy.ops.identify(hw); ++ DEBUGFUNC("ixgbe_get_supported_physical_layer_X540"); + +- hw->phy.ops.read_reg(hw, MDIO_PMA_EXTABLE, MDIO_MMD_PMAPMD, +- &ext_ability); +- if (ext_ability & MDIO_PMA_EXTABLE_10GBT) ++ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, ++ IXGBE_MDIO_PMA_PMD_DEV_TYPE, &ext_ability); ++ if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; +- if (ext_ability & MDIO_PMA_EXTABLE_1000BT) ++ if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; +- if (ext_ability & MDIO_PMA_EXTABLE_100BTX) ++ if (ext_ability & IXGBE_MDIO_PHY_100BASETX_ABILITY) + physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; + + return physical_layer; +@@ -225,27 +343,29 @@ static u32 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw) + * Initializes the EEPROM parameters ixgbe_eeprom_info within the + * ixgbe_hw struct in order to set up EEPROM access. + **/ +-static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) ++s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) + { + struct ixgbe_eeprom_info *eeprom = &hw->eeprom; + u32 eec; + u16 eeprom_size; + ++ DEBUGFUNC("ixgbe_init_eeprom_params_X540"); ++ + if (eeprom->type == ixgbe_eeprom_uninitialized) { + eeprom->semaphore_delay = 10; + eeprom->type = ixgbe_flash; + +- eec = IXGBE_READ_REG(hw, IXGBE_EEC); ++ eec = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); + eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> + IXGBE_EEC_SIZE_SHIFT); + eeprom->word_size = 1 << (eeprom_size + + IXGBE_EEPROM_WORD_SIZE_SHIFT); + +- hw_dbg(hw, "Eeprom params: type = %d, size = %d\n", +- eeprom->type, eeprom->word_size); ++ DEBUGOUT2("Eeprom params: type = %d, size = %d\n", ++ eeprom->type, eeprom->word_size); + } + +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +@@ -256,22 +376,24 @@ static s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw) + * + * Reads a 16 bit word from the EEPROM using the EERD register. + **/ +-static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data) ++s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data) + { +- s32 status = 0; ++ s32 status = IXGBE_SUCCESS; + ++ DEBUGFUNC("ixgbe_read_eerd_X540"); + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == +- 0) ++ IXGBE_SUCCESS) { + status = ixgbe_read_eerd_generic(hw, offset, data); +- else ++ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); ++ } else { + status = IXGBE_ERR_SWFW_SYNC; ++ } + +- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + return status; + } + + /** +- * ixgbe_read_eerd_buffer_X540 - Read EEPROM word(s) using EERD ++ * ixgbe_read_eerd_buffer_X540- Read EEPROM word(s) using EERD + * @hw: pointer to hardware structure + * @offset: offset of word in the EEPROM to read + * @words: number of words +@@ -279,19 +401,21 @@ static s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data) + * + * Reads a 16 bit word(s) from the EEPROM using the EERD register. + **/ +-static s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw, +- u16 offset, u16 words, u16 *data) ++s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw, ++ u16 offset, u16 words, u16 *data) + { +- s32 status = 0; ++ s32 status = IXGBE_SUCCESS; + ++ DEBUGFUNC("ixgbe_read_eerd_buffer_X540"); + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == +- 0) ++ IXGBE_SUCCESS) { + status = ixgbe_read_eerd_buffer_generic(hw, offset, + words, data); +- else ++ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); ++ } else { + status = IXGBE_ERR_SWFW_SYNC; ++ } + +- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + return status; + } + +@@ -303,16 +427,19 @@ static s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw, + * + * Write a 16 bit word to the EEPROM using the EEWR register. + **/ +-static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data) ++s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data) + { +- s32 status = 0; ++ s32 status = IXGBE_SUCCESS; + +- if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) ++ DEBUGFUNC("ixgbe_write_eewr_X540"); ++ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == ++ IXGBE_SUCCESS) { + status = ixgbe_write_eewr_generic(hw, offset, data); +- else ++ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); ++ } else { + status = IXGBE_ERR_SWFW_SYNC; ++ } + +- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + return status; + } + +@@ -325,19 +452,21 @@ static s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data) + * + * Write a 16 bit word(s) to the EEPROM using the EEWR register. + **/ +-static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, +- u16 offset, u16 words, u16 *data) ++s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, ++ u16 offset, u16 words, u16 *data) + { +- s32 status = 0; ++ s32 status = IXGBE_SUCCESS; + ++ DEBUGFUNC("ixgbe_write_eewr_buffer_X540"); + if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == +- 0) ++ IXGBE_SUCCESS) { + status = ixgbe_write_eewr_buffer_generic(hw, offset, + words, data); +- else ++ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); ++ } else { + status = IXGBE_ERR_SWFW_SYNC; ++ } + +- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + return status; + } + +@@ -348,42 +477,46 @@ static s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, + * be used internally by function which utilize ixgbe_acquire_swfw_sync_X540. + * + * @hw: pointer to hardware structure ++ * ++ * Returns a negative error code on error, or the 16-bit checksum + **/ +-static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) ++s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) + { +- u16 i; +- u16 j; ++ u16 i, j; + u16 checksum = 0; + u16 length = 0; + u16 pointer = 0; + u16 word = 0; ++ u16 ptr_start = IXGBE_PCIE_ANALOG_PTR; + +- /* +- * Do not use hw->eeprom.ops.read because we do not want to take ++ /* Do not use hw->eeprom.ops.read because we do not want to take + * the synchronization semaphores here. Instead use + * ixgbe_read_eerd_generic + */ + +- /* Include 0x0-0x3F in the checksum */ ++ DEBUGFUNC("ixgbe_calc_eeprom_checksum_X540"); ++ ++ /* Include 0x0 up to IXGBE_EEPROM_CHECKSUM; do not include the ++ * checksum itself ++ */ + for (i = 0; i < IXGBE_EEPROM_CHECKSUM; i++) { +- if (ixgbe_read_eerd_generic(hw, i, &word) != 0) { +- hw_dbg(hw, "EEPROM read failed\n"); +- break; ++ if (ixgbe_read_eerd_generic(hw, i, &word)) { ++ DEBUGOUT("EEPROM read failed\n"); ++ return IXGBE_ERR_EEPROM; + } + checksum += word; + } + +- /* +- * Include all data from pointers 0x3, 0x6-0xE. This excludes the ++ /* Include all data from pointers 0x3, 0x6-0xE. This excludes the + * FW, PHY module, and PCIe Expansion/Option ROM pointers. + */ +- for (i = IXGBE_PCIE_ANALOG_PTR; i < IXGBE_FW_PTR; i++) { ++ for (i = ptr_start; i < IXGBE_FW_PTR; i++) { + if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR) + continue; + +- if (ixgbe_read_eerd_generic(hw, i, &pointer) != 0) { +- hw_dbg(hw, "EEPROM read failed\n"); +- break; ++ if (ixgbe_read_eerd_generic(hw, i, &pointer)) { ++ DEBUGOUT("EEPROM read failed\n"); ++ return IXGBE_ERR_EEPROM; + } + + /* Skip pointer section if the pointer is invalid. */ +@@ -391,9 +524,9 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) + pointer >= hw->eeprom.word_size) + continue; + +- if (ixgbe_read_eerd_generic(hw, pointer, &length) != 0) { +- hw_dbg(hw, "EEPROM read failed\n"); +- break; ++ if (ixgbe_read_eerd_generic(hw, pointer, &length)) { ++ DEBUGOUT("EEPROM read failed\n"); ++ return IXGBE_ERR_EEPROM; + } + + /* Skip pointer section if length is invalid. */ +@@ -401,10 +534,10 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) + (pointer + length) >= hw->eeprom.word_size) + continue; + +- for (j = pointer+1; j <= pointer+length; j++) { +- if (ixgbe_read_eerd_generic(hw, j, &word) != 0) { +- hw_dbg(hw, "EEPROM read failed\n"); +- break; ++ for (j = pointer + 1; j <= pointer + length; j++) { ++ if (ixgbe_read_eerd_generic(hw, j, &word)) { ++ DEBUGOUT("EEPROM read failed\n"); ++ return IXGBE_ERR_EEPROM; + } + checksum += word; + } +@@ -412,7 +545,7 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) + + checksum = (u16)IXGBE_EEPROM_SUM - checksum; + +- return checksum; ++ return (s32)checksum; + } + + /** +@@ -423,51 +556,58 @@ static u16 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw) + * Performs checksum calculation and validates the EEPROM checksum. If the + * caller does not need checksum_val, the value can be NULL. + **/ +-static s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, +- u16 *checksum_val) ++s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, ++ u16 *checksum_val) + { + s32 status; + u16 checksum; + u16 read_checksum = 0; + +- /* +- * Read the first word from the EEPROM. If this times out or fails, do ++ DEBUGFUNC("ixgbe_validate_eeprom_checksum_X540"); ++ ++ /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = hw->eeprom.ops.read(hw, 0, &checksum); +- +- if (status != 0) { +- hw_dbg(hw, "EEPROM read failed\n"); +- goto out; ++ if (status) { ++ DEBUGOUT("EEPROM read failed\n"); ++ return status; + } + +- if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) { +- checksum = hw->eeprom.ops.calc_checksum(hw); ++ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) ++ return IXGBE_ERR_SWFW_SYNC; + +- /* +- * Do not use hw->eeprom.ops.read because we do not want to take +- * the synchronization semaphores twice here. +- */ +- ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM, +- &read_checksum); ++ status = hw->eeprom.ops.calc_checksum(hw); ++ if (status < 0) ++ goto out; + +- /* +- * Verify read checksum from EEPROM is the same as +- * calculated checksum +- */ +- if (read_checksum != checksum) +- status = IXGBE_ERR_EEPROM_CHECKSUM; ++ checksum = (u16)(status & 0xffff); + +- /* If the user cares, return the calculated checksum */ +- if (checksum_val) +- *checksum_val = checksum; +- } else { +- status = IXGBE_ERR_SWFW_SYNC; ++ /* Do not use hw->eeprom.ops.read because we do not want to take ++ * the synchronization semaphores twice here. ++ */ ++ status = ixgbe_read_eerd_generic(hw, IXGBE_EEPROM_CHECKSUM, ++ &read_checksum); ++ if (status) ++ goto out; ++ ++ /* Verify read checksum from EEPROM is the same as ++ * calculated checksum ++ */ ++ if (read_checksum != checksum) { ++ ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE, ++ "Invalid EEPROM checksum"); ++ status = IXGBE_ERR_EEPROM_CHECKSUM; + } + +- hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); ++ /* If the user cares, return the calculated checksum */ ++ if (checksum_val) ++ *checksum_val = checksum; ++ + out: ++ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); ++ + return status; + } + +@@ -479,269 +619,360 @@ out: + * checksum and updates the EEPROM and instructs the hardware to update + * the flash. + **/ +-static s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw) ++s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw) + { + s32 status; + u16 checksum; + +- /* +- * Read the first word from the EEPROM. If this times out or fails, do ++ DEBUGFUNC("ixgbe_update_eeprom_checksum_X540"); ++ ++ /* Read the first word from the EEPROM. If this times out or fails, do + * not continue or we could be in for a very long wait while every + * EEPROM read fails + */ + status = hw->eeprom.ops.read(hw, 0, &checksum); ++ if (status) { ++ DEBUGOUT("EEPROM read failed\n"); ++ return status; ++ } + +- if (status != 0) +- hw_dbg(hw, "EEPROM read failed\n"); ++ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM)) ++ return IXGBE_ERR_SWFW_SYNC; + +- if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == 0) { +- checksum = hw->eeprom.ops.calc_checksum(hw); ++ status = hw->eeprom.ops.calc_checksum(hw); ++ if (status < 0) ++ goto out; + +- /* +- * Do not use hw->eeprom.ops.write because we do not want to +- * take the synchronization semaphores twice here. +- */ +- status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM, +- checksum); ++ checksum = (u16)(status & 0xffff); + +- if (status == 0) +- status = ixgbe_update_flash_X540(hw); +- else +- status = IXGBE_ERR_SWFW_SYNC; +- } ++ /* Do not use hw->eeprom.ops.write because we do not want to ++ * take the synchronization semaphores twice here. ++ */ ++ status = ixgbe_write_eewr_generic(hw, IXGBE_EEPROM_CHECKSUM, checksum); ++ if (status) ++ goto out; + ++ status = ixgbe_update_flash_X540(hw); ++ ++out: + hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); + + return status; + } + + /** +- * ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device +- * @hw: pointer to hardware structure ++ * ixgbe_update_flash_X540 - Instruct HW to copy EEPROM to Flash device ++ * @hw: pointer to hardware structure + * +- * Set FLUP (bit 23) of the EEC register to instruct Hardware to copy +- * EEPROM from shadow RAM to the flash device. ++ * Set FLUP (bit 23) of the EEC register to instruct Hardware to copy ++ * EEPROM from shadow RAM to the flash device. + **/ +-static s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw) ++s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw) + { + u32 flup; +- s32 status = IXGBE_ERR_EEPROM; ++ s32 status; ++ ++ DEBUGFUNC("ixgbe_update_flash_X540"); + + status = ixgbe_poll_flash_update_done_X540(hw); + if (status == IXGBE_ERR_EEPROM) { +- hw_dbg(hw, "Flash update time out\n"); ++ DEBUGOUT("Flash update time out\n"); + goto out; + } + +- flup = IXGBE_READ_REG(hw, IXGBE_EEC) | IXGBE_EEC_FLUP; +- IXGBE_WRITE_REG(hw, IXGBE_EEC, flup); ++ flup = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)) | IXGBE_EEC_FLUP; ++ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), flup); + + status = ixgbe_poll_flash_update_done_X540(hw); +- if (status == 0) +- hw_dbg(hw, "Flash update complete\n"); ++ if (status == IXGBE_SUCCESS) ++ DEBUGOUT("Flash update complete\n"); + else +- hw_dbg(hw, "Flash update time out\n"); ++ DEBUGOUT("Flash update time out\n"); + +- if (hw->revision_id == 0) { +- flup = IXGBE_READ_REG(hw, IXGBE_EEC); ++ if (hw->mac.type == ixgbe_mac_X540 && hw->revision_id == 0) { ++ flup = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); + + if (flup & IXGBE_EEC_SEC1VAL) { + flup |= IXGBE_EEC_FLUP; +- IXGBE_WRITE_REG(hw, IXGBE_EEC, flup); ++ IXGBE_WRITE_REG(hw, IXGBE_EEC_BY_MAC(hw), flup); + } + + status = ixgbe_poll_flash_update_done_X540(hw); +- if (status == 0) +- hw_dbg(hw, "Flash update complete\n"); ++ if (status == IXGBE_SUCCESS) ++ DEBUGOUT("Flash update complete\n"); + else +- hw_dbg(hw, "Flash update time out\n"); ++ DEBUGOUT("Flash update time out\n"); + } + out: + return status; + } + + /** +- * ixgbe_poll_flash_update_done_X540 - Poll flash update status +- * @hw: pointer to hardware structure ++ * ixgbe_poll_flash_update_done_X540 - Poll flash update status ++ * @hw: pointer to hardware structure + * +- * Polls the FLUDONE (bit 26) of the EEC Register to determine when the +- * flash update is done. ++ * Polls the FLUDONE (bit 26) of the EEC Register to determine when the ++ * flash update is done. + **/ +-static s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw) ++STATIC s32 ixgbe_poll_flash_update_done_X540(struct ixgbe_hw *hw) + { + u32 i; + u32 reg; + s32 status = IXGBE_ERR_EEPROM; + ++ DEBUGFUNC("ixgbe_poll_flash_update_done_X540"); ++ + for (i = 0; i < IXGBE_FLUDONE_ATTEMPTS; i++) { +- reg = IXGBE_READ_REG(hw, IXGBE_EEC); ++ reg = IXGBE_READ_REG(hw, IXGBE_EEC_BY_MAC(hw)); + if (reg & IXGBE_EEC_FLUDONE) { +- status = 0; ++ status = IXGBE_SUCCESS; + break; + } +- udelay(5); ++ msec_delay(5); + } ++ ++ if (i == IXGBE_FLUDONE_ATTEMPTS) ++ ERROR_REPORT1(IXGBE_ERROR_POLLING, ++ "Flash update status polling timed out"); ++ + return status; + } + + /** +- * ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore +- * @hw: pointer to hardware structure +- * @mask: Mask to specify which semaphore to acquire ++ * ixgbe_acquire_swfw_sync_X540 - Acquire SWFW semaphore ++ * @hw: pointer to hardware structure ++ * @mask: Mask to specify which semaphore to acquire + * +- * Acquires the SWFW semaphore thought the SW_FW_SYNC register for +- * the specified function (CSR, PHY0, PHY1, NVM, Flash) ++ * Acquires the SWFW semaphore thought the SW_FW_SYNC register for ++ * the specified function (CSR, PHY0, PHY1, NVM, Flash) + **/ +-static s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask) ++s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) + { +- u32 swfw_sync; +- u32 swmask = mask; +- u32 fwmask = mask << 5; +- u32 hwmask = 0; ++ u32 swmask = mask & IXGBE_GSSR_NVM_PHY_MASK; ++ u32 fwmask = swmask << 5; ++ u32 swi2c_mask = mask & IXGBE_GSSR_I2C_MASK; + u32 timeout = 200; ++ u32 hwmask = 0; ++ u32 swfw_sync; + u32 i; + +- if (swmask == IXGBE_GSSR_EEP_SM) +- hwmask = IXGBE_GSSR_FLASH_SM; ++ DEBUGFUNC("ixgbe_acquire_swfw_sync_X540"); ++ ++ if (swmask & IXGBE_GSSR_EEP_SM) ++ hwmask |= IXGBE_GSSR_FLASH_SM; + ++ /* SW only mask doesn't have FW bit pair */ ++ if (mask & IXGBE_GSSR_SW_MNG_SM) ++ swmask |= IXGBE_GSSR_SW_MNG_SM; ++ ++ swmask |= swi2c_mask; ++ fwmask |= swi2c_mask << 2; + for (i = 0; i < timeout; i++) { +- /* +- * SW NVM semaphore bit is used for access to all ++ /* SW NVM semaphore bit is used for access to all + * SW_FW_SYNC bits (not just NVM) + */ +- if (ixgbe_get_swfw_sync_semaphore(hw)) ++ if (ixgbe_get_swfw_sync_semaphore(hw)) { ++ DEBUGOUT("Failed to get NVM access and register semaphore, returning IXGBE_ERR_SWFW_SYNC\n"); + return IXGBE_ERR_SWFW_SYNC; ++ } + +- swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); ++ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw)); + if (!(swfw_sync & (fwmask | swmask | hwmask))) { + swfw_sync |= swmask; +- IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync); +- ixgbe_release_swfw_sync_semaphore(hw); +- break; +- } else { +- /* +- * Firmware currently using resource (fwmask), +- * hardware currently using resource (hwmask), +- * or other software thread currently using +- * resource (swmask) +- */ ++ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), ++ swfw_sync); + ixgbe_release_swfw_sync_semaphore(hw); +- usleep_range(5000, 10000); ++ return IXGBE_SUCCESS; + } ++ /* Firmware currently using resource (fwmask), hardware ++ * currently using resource (hwmask), or other software ++ * thread currently using resource (swmask) ++ */ ++ ixgbe_release_swfw_sync_semaphore(hw); ++ msec_delay(5); + } + +- /* +- * If the resource is not released by the FW/HW the SW can assume that +- * the FW/HW malfunctions. In that case the SW should sets the +- * SW bit(s) of the requested resource(s) while ignoring the +- * corresponding FW/HW bits in the SW_FW_SYNC register. ++ /* If the resource is not released by the FW/HW the SW can assume that ++ * the FW/HW malfunctions. In that case the SW should set the SW bit(s) ++ * of the requested resource(s) while ignoring the corresponding FW/HW ++ * bits in the SW_FW_SYNC register. + */ +- if (i >= timeout) { +- swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); +- if (swfw_sync & (fwmask | hwmask)) { +- if (ixgbe_get_swfw_sync_semaphore(hw)) +- return IXGBE_ERR_SWFW_SYNC; +- +- swfw_sync |= swmask; +- IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync); +- ixgbe_release_swfw_sync_semaphore(hw); +- } ++ if (ixgbe_get_swfw_sync_semaphore(hw)) { ++ DEBUGOUT("Failed to get NVM sempahore and register semaphore while forcefully ignoring FW sempahore bit(s) and setting SW semaphore bit(s), returning IXGBE_ERR_SWFW_SYNC\n"); ++ return IXGBE_ERR_SWFW_SYNC; ++ } ++ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw)); ++ if (swfw_sync & (fwmask | hwmask)) { ++ swfw_sync |= swmask; ++ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), swfw_sync); ++ ixgbe_release_swfw_sync_semaphore(hw); ++ msec_delay(5); ++ return IXGBE_SUCCESS; ++ } ++ /* If the resource is not released by other SW the SW can assume that ++ * the other SW malfunctions. In that case the SW should clear all SW ++ * flags that it does not own and then repeat the whole process once ++ * again. ++ */ ++ if (swfw_sync & swmask) { ++ u32 rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM | ++ IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM | ++ IXGBE_GSSR_SW_MNG_SM; ++ ++ if (swi2c_mask) ++ rmask |= IXGBE_GSSR_I2C_MASK; ++ ixgbe_release_swfw_sync_X540(hw, rmask); ++ ixgbe_release_swfw_sync_semaphore(hw); ++ DEBUGOUT("Resource not released by other SW, returning IXGBE_ERR_SWFW_SYNC\n"); ++ return IXGBE_ERR_SWFW_SYNC; + } ++ ixgbe_release_swfw_sync_semaphore(hw); ++ DEBUGOUT("Returning error IXGBE_ERR_SWFW_SYNC\n"); + +- usleep_range(5000, 10000); +- return 0; ++ return IXGBE_ERR_SWFW_SYNC; + } + + /** +- * ixgbe_release_swfw_sync_X540 - Release SWFW semaphore +- * @hw: pointer to hardware structure +- * @mask: Mask to specify which semaphore to release ++ * ixgbe_release_swfw_sync_X540 - Release SWFW semaphore ++ * @hw: pointer to hardware structure ++ * @mask: Mask to specify which semaphore to release + * +- * Releases the SWFW semaphore through the SW_FW_SYNC register +- * for the specified function (CSR, PHY0, PHY1, EVM, Flash) ++ * Releases the SWFW semaphore through the SW_FW_SYNC register ++ * for the specified function (CSR, PHY0, PHY1, EVM, Flash) + **/ +-static void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u16 mask) ++void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask) + { ++ u32 swmask = mask & (IXGBE_GSSR_NVM_PHY_MASK | IXGBE_GSSR_SW_MNG_SM); + u32 swfw_sync; +- u32 swmask = mask; + ++ DEBUGFUNC("ixgbe_release_swfw_sync_X540"); ++ ++ if (mask & IXGBE_GSSR_I2C_MASK) ++ swmask |= mask & IXGBE_GSSR_I2C_MASK; + ixgbe_get_swfw_sync_semaphore(hw); + +- swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); ++ swfw_sync = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw)); + swfw_sync &= ~swmask; +- IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swfw_sync); ++ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), swfw_sync); + + ixgbe_release_swfw_sync_semaphore(hw); +- usleep_range(5000, 10000); ++ msec_delay(2); + } + + /** +- * ixgbe_get_nvm_semaphore - Get hardware semaphore +- * @hw: pointer to hardware structure ++ * ixgbe_get_swfw_sync_semaphore - Get hardware semaphore ++ * @hw: pointer to hardware structure + * +- * Sets the hardware semaphores so SW/FW can gain control of shared resources ++ * Sets the hardware semaphores so SW/FW can gain control of shared resources + **/ +-static s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw) ++STATIC s32 ixgbe_get_swfw_sync_semaphore(struct ixgbe_hw *hw) + { + s32 status = IXGBE_ERR_EEPROM; + u32 timeout = 2000; + u32 i; + u32 swsm; + ++ DEBUGFUNC("ixgbe_get_swfw_sync_semaphore"); ++ + /* Get SMBI software semaphore between device drivers first */ + for (i = 0; i < timeout; i++) { + /* + * If the SMBI bit is 0 when we read it, then the bit will be + * set and we have the semaphore + */ +- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); ++ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); + if (!(swsm & IXGBE_SWSM_SMBI)) { +- status = 0; ++ status = IXGBE_SUCCESS; + break; + } +- udelay(50); ++ usec_delay(50); + } + + /* Now get the semaphore between SW/FW through the REGSMP bit */ +- if (status) { ++ if (status == IXGBE_SUCCESS) { + for (i = 0; i < timeout; i++) { +- swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); ++ swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw)); + if (!(swsm & IXGBE_SWFW_REGSMP)) + break; + +- udelay(50); ++ usec_delay(50); ++ } ++ ++ /* ++ * Release semaphores and return error if SW NVM semaphore ++ * was not granted because we don't have access to the EEPROM ++ */ ++ if (i >= timeout) { ++ ERROR_REPORT1(IXGBE_ERROR_POLLING, ++ "REGSMP Software NVM semaphore not granted.\n"); ++ ixgbe_release_swfw_sync_semaphore(hw); ++ status = IXGBE_ERR_EEPROM; + } + } else { +- hw_dbg(hw, "Software semaphore SMBI between device drivers not granted.\n"); ++ ERROR_REPORT1(IXGBE_ERROR_POLLING, ++ "Software semaphore SMBI between device drivers " ++ "not granted.\n"); + } + + return status; + } + + /** +- * ixgbe_release_nvm_semaphore - Release hardware semaphore +- * @hw: pointer to hardware structure ++ * ixgbe_release_swfw_sync_semaphore - Release hardware semaphore ++ * @hw: pointer to hardware structure + * +- * This function clears hardware semaphore bits. ++ * This function clears hardware semaphore bits. + **/ +-static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw) ++STATIC void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw) + { +- u32 swsm; ++ u32 swsm; + +- /* Release both semaphores by writing 0 to the bits REGSMP and SMBI */ ++ DEBUGFUNC("ixgbe_release_swfw_sync_semaphore"); + +- swsm = IXGBE_READ_REG(hw, IXGBE_SWSM); +- swsm &= ~IXGBE_SWSM_SMBI; +- IXGBE_WRITE_REG(hw, IXGBE_SWSM, swsm); ++ /* Release both semaphores by writing 0 to the bits REGSMP and SMBI */ + +- swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC); ++ swsm = IXGBE_READ_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw)); + swsm &= ~IXGBE_SWFW_REGSMP; +- IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC, swsm); ++ IXGBE_WRITE_REG(hw, IXGBE_SWFW_SYNC_BY_MAC(hw), swsm); ++ ++ swsm = IXGBE_READ_REG(hw, IXGBE_SWSM_BY_MAC(hw)); ++ swsm &= ~IXGBE_SWSM_SMBI; ++ IXGBE_WRITE_REG(hw, IXGBE_SWSM_BY_MAC(hw), swsm); + + IXGBE_WRITE_FLUSH(hw); + } + + /** ++ * ixgbe_init_swfw_sync_X540 - Release hardware semaphore ++ * @hw: pointer to hardware structure ++ * ++ * This function reset hardware semaphore bits for a semaphore that may ++ * have be left locked due to a catastrophic failure. ++ **/ ++void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw) ++{ ++ u32 rmask; ++ ++ /* First try to grab the semaphore but we don't need to bother ++ * looking to see whether we got the lock or not since we do ++ * the same thing regardless of whether we got the lock or not. ++ * We got the lock - we release it. ++ * We timeout trying to get the lock - we force its release. ++ */ ++ ixgbe_get_swfw_sync_semaphore(hw); ++ ixgbe_release_swfw_sync_semaphore(hw); ++ ++ /* Acquire and release all software resources. */ ++ rmask = IXGBE_GSSR_EEP_SM | IXGBE_GSSR_PHY0_SM | ++ IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_MAC_CSR_SM | ++ IXGBE_GSSR_SW_MNG_SM; ++ ++ rmask |= IXGBE_GSSR_I2C_MASK; ++ ixgbe_acquire_swfw_sync_X540(hw, rmask); ++ ixgbe_release_swfw_sync_X540(hw, rmask); ++} ++ ++/** + * ixgbe_blink_led_start_X540 - Blink LED based on index. + * @hw: pointer to hardware structure + * @index: led number to blink +@@ -749,20 +980,25 @@ static void ixgbe_release_swfw_sync_semaphore(struct ixgbe_hw *hw) + * Devices that implement the version 2 interface: + * X540 + **/ +-static s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) ++s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) + { + u32 macc_reg; + u32 ledctl_reg; + ixgbe_link_speed speed; + bool link_up; + ++ DEBUGFUNC("ixgbe_blink_led_start_X540"); ++ ++ if (index > 3) ++ return IXGBE_ERR_PARAM; ++ + /* + * Link should be up in order for the blink bit in the LED control + * register to work. Force link and speed in the MAC if link is down. + * This will be reversed when we stop the blinking. + */ + hw->mac.ops.check_link(hw, &speed, &link_up, false); +- if (!link_up) { ++ if (link_up == false) { + macc_reg = IXGBE_READ_REG(hw, IXGBE_MACC); + macc_reg |= IXGBE_MACC_FLU | IXGBE_MACC_FSV_10G | IXGBE_MACC_FS; + IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg); +@@ -774,7 +1010,7 @@ static s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) + IXGBE_WRITE_REG(hw, IXGBE_LEDCTL, ledctl_reg); + IXGBE_WRITE_FLUSH(hw); + +- return 0; ++ return IXGBE_SUCCESS; + } + + /** +@@ -785,11 +1021,16 @@ static s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index) + * Devices that implement the version 2 interface: + * X540 + **/ +-static s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index) ++s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index) + { + u32 macc_reg; + u32 ledctl_reg; + ++ if (index > 3) ++ return IXGBE_ERR_PARAM; ++ ++ DEBUGFUNC("ixgbe_blink_led_stop_X540"); ++ + /* Restore the LED to its default value. */ + ledctl_reg = IXGBE_READ_REG(hw, IXGBE_LEDCTL); + ledctl_reg &= ~IXGBE_LED_MODE_MASK(index); +@@ -803,95 +1044,5 @@ static s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index) + IXGBE_WRITE_REG(hw, IXGBE_MACC, macc_reg); + IXGBE_WRITE_FLUSH(hw); + +- return 0; ++ return IXGBE_SUCCESS; + } +-static struct ixgbe_mac_operations mac_ops_X540 = { +- .init_hw = &ixgbe_init_hw_generic, +- .reset_hw = &ixgbe_reset_hw_X540, +- .start_hw = &ixgbe_start_hw_X540, +- .clear_hw_cntrs = &ixgbe_clear_hw_cntrs_generic, +- .get_media_type = &ixgbe_get_media_type_X540, +- .get_supported_physical_layer = +- &ixgbe_get_supported_physical_layer_X540, +- .enable_rx_dma = &ixgbe_enable_rx_dma_generic, +- .get_mac_addr = &ixgbe_get_mac_addr_generic, +- .get_san_mac_addr = &ixgbe_get_san_mac_addr_generic, +- .get_device_caps = &ixgbe_get_device_caps_generic, +- .get_wwn_prefix = &ixgbe_get_wwn_prefix_generic, +- .stop_adapter = &ixgbe_stop_adapter_generic, +- .get_bus_info = &ixgbe_get_bus_info_generic, +- .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, +- .read_analog_reg8 = NULL, +- .write_analog_reg8 = NULL, +- .setup_link = &ixgbe_setup_mac_link_X540, +- .set_rxpba = &ixgbe_set_rxpba_generic, +- .check_link = &ixgbe_check_mac_link_generic, +- .get_link_capabilities = &ixgbe_get_copper_link_capabilities_generic, +- .led_on = &ixgbe_led_on_generic, +- .led_off = &ixgbe_led_off_generic, +- .blink_led_start = &ixgbe_blink_led_start_X540, +- .blink_led_stop = &ixgbe_blink_led_stop_X540, +- .set_rar = &ixgbe_set_rar_generic, +- .clear_rar = &ixgbe_clear_rar_generic, +- .set_vmdq = &ixgbe_set_vmdq_generic, +- .set_vmdq_san_mac = &ixgbe_set_vmdq_san_mac_generic, +- .clear_vmdq = &ixgbe_clear_vmdq_generic, +- .init_rx_addrs = &ixgbe_init_rx_addrs_generic, +- .update_mc_addr_list = &ixgbe_update_mc_addr_list_generic, +- .enable_mc = &ixgbe_enable_mc_generic, +- .disable_mc = &ixgbe_disable_mc_generic, +- .clear_vfta = &ixgbe_clear_vfta_generic, +- .set_vfta = &ixgbe_set_vfta_generic, +- .fc_enable = &ixgbe_fc_enable_generic, +- .set_fw_drv_ver = &ixgbe_set_fw_drv_ver_generic, +- .init_uta_tables = &ixgbe_init_uta_tables_generic, +- .setup_sfp = NULL, +- .set_mac_anti_spoofing = &ixgbe_set_mac_anti_spoofing, +- .set_vlan_anti_spoofing = &ixgbe_set_vlan_anti_spoofing, +- .acquire_swfw_sync = &ixgbe_acquire_swfw_sync_X540, +- .release_swfw_sync = &ixgbe_release_swfw_sync_X540, +- .disable_rx_buff = &ixgbe_disable_rx_buff_generic, +- .enable_rx_buff = &ixgbe_enable_rx_buff_generic, +- .get_thermal_sensor_data = NULL, +- .init_thermal_sensor_thresh = NULL, +- .prot_autoc_read = &prot_autoc_read_generic, +- .prot_autoc_write = &prot_autoc_write_generic, +-}; +- +-static struct ixgbe_eeprom_operations eeprom_ops_X540 = { +- .init_params = &ixgbe_init_eeprom_params_X540, +- .read = &ixgbe_read_eerd_X540, +- .read_buffer = &ixgbe_read_eerd_buffer_X540, +- .write = &ixgbe_write_eewr_X540, +- .write_buffer = &ixgbe_write_eewr_buffer_X540, +- .calc_checksum = &ixgbe_calc_eeprom_checksum_X540, +- .validate_checksum = &ixgbe_validate_eeprom_checksum_X540, +- .update_checksum = &ixgbe_update_eeprom_checksum_X540, +-}; +- +-static struct ixgbe_phy_operations phy_ops_X540 = { +- .identify = &ixgbe_identify_phy_generic, +- .identify_sfp = &ixgbe_identify_sfp_module_generic, +- .init = NULL, +- .reset = NULL, +- .read_reg = &ixgbe_read_phy_reg_generic, +- .write_reg = &ixgbe_write_phy_reg_generic, +- .setup_link = &ixgbe_setup_phy_link_generic, +- .setup_link_speed = &ixgbe_setup_phy_link_speed_generic, +- .read_i2c_byte = &ixgbe_read_i2c_byte_generic, +- .write_i2c_byte = &ixgbe_write_i2c_byte_generic, +- .read_i2c_sff8472 = &ixgbe_read_i2c_sff8472_generic, +- .read_i2c_eeprom = &ixgbe_read_i2c_eeprom_generic, +- .write_i2c_eeprom = &ixgbe_write_i2c_eeprom_generic, +- .check_overtemp = &ixgbe_tn_check_overtemp, +- .get_firmware_version = &ixgbe_get_phy_firmware_version_generic, +-}; +- +-struct ixgbe_info ixgbe_X540_info = { +- .mac = ixgbe_mac_X540, +- .get_invariants = &ixgbe_get_invariants_X540, +- .mac_ops = &mac_ops_X540, +- .eeprom_ops = &eeprom_ops_X540, +- .phy_ops = &phy_ops_X540, +- .mbx_ops = &mbx_ops_generic, +-}; +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h +new file mode 100644 +index 0000000..4cace85 +--- /dev/null ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x540.h +@@ -0,0 +1,58 @@ ++/******************************************************************************* ++ ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#ifndef _IXGBE_X540_H_ ++#define _IXGBE_X540_H_ ++ ++#include "ixgbe_type.h" ++ ++s32 ixgbe_get_link_capabilities_X540(struct ixgbe_hw *hw, ++ ixgbe_link_speed *speed, bool *autoneg); ++enum ixgbe_media_type ixgbe_get_media_type_X540(struct ixgbe_hw *hw); ++s32 ixgbe_setup_mac_link_X540(struct ixgbe_hw *hw, ixgbe_link_speed speed, ++ bool link_up_wait_to_complete); ++s32 ixgbe_reset_hw_X540(struct ixgbe_hw *hw); ++s32 ixgbe_start_hw_X540(struct ixgbe_hw *hw); ++u64 ixgbe_get_supported_physical_layer_X540(struct ixgbe_hw *hw); ++ ++s32 ixgbe_init_eeprom_params_X540(struct ixgbe_hw *hw); ++s32 ixgbe_read_eerd_X540(struct ixgbe_hw *hw, u16 offset, u16 *data); ++s32 ixgbe_read_eerd_buffer_X540(struct ixgbe_hw *hw, u16 offset, u16 words, ++ u16 *data); ++s32 ixgbe_write_eewr_X540(struct ixgbe_hw *hw, u16 offset, u16 data); ++s32 ixgbe_write_eewr_buffer_X540(struct ixgbe_hw *hw, u16 offset, u16 words, ++ u16 *data); ++s32 ixgbe_update_eeprom_checksum_X540(struct ixgbe_hw *hw); ++s32 ixgbe_validate_eeprom_checksum_X540(struct ixgbe_hw *hw, u16 *checksum_val); ++s32 ixgbe_calc_eeprom_checksum_X540(struct ixgbe_hw *hw); ++s32 ixgbe_update_flash_X540(struct ixgbe_hw *hw); ++ ++s32 ixgbe_acquire_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask); ++void ixgbe_release_swfw_sync_X540(struct ixgbe_hw *hw, u32 mask); ++void ixgbe_init_swfw_sync_X540(struct ixgbe_hw *hw); ++ ++s32 ixgbe_blink_led_start_X540(struct ixgbe_hw *hw, u32 index); ++s32 ixgbe_blink_led_stop_X540(struct ixgbe_hw *hw, u32 index); ++#endif /* _IXGBE_X540_H_ */ ++ +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +new file mode 100644 +index 0000000..7c8b72f +--- /dev/null ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.c +@@ -0,0 +1,4707 @@ ++/******************************************************************************* ++ ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#include "ixgbe_x550.h" ++#include "ixgbe_x540.h" ++#include "ixgbe_type.h" ++#include "ixgbe_api.h" ++#include "ixgbe_common.h" ++#include "ixgbe_phy.h" ++ ++STATIC s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed); ++STATIC s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *, u32 mask); ++STATIC void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *, u32 mask); ++STATIC s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw); ++ ++/** ++ * ixgbe_init_ops_X550 - Inits func ptrs and MAC type ++ * @hw: pointer to hardware structure ++ * ++ * Initialize the function pointers and assign the MAC type for X550. ++ * Does not touch the hardware. ++ **/ ++s32 ixgbe_init_ops_X550(struct ixgbe_hw *hw) ++{ ++ struct ixgbe_mac_info *mac = &hw->mac; ++ struct ixgbe_eeprom_info *eeprom = &hw->eeprom; ++ s32 ret_val; ++ ++ DEBUGFUNC("ixgbe_init_ops_X550"); ++ ++ ret_val = ixgbe_init_ops_X540(hw); ++ mac->ops.dmac_config = ixgbe_dmac_config_X550; ++ mac->ops.dmac_config_tcs = ixgbe_dmac_config_tcs_X550; ++ mac->ops.dmac_update_tcs = ixgbe_dmac_update_tcs_X550; ++ mac->ops.setup_eee = NULL; ++ mac->ops.set_source_address_pruning = ++ ixgbe_set_source_address_pruning_X550; ++ mac->ops.set_ethertype_anti_spoofing = ++ ixgbe_set_ethertype_anti_spoofing_X550; ++ ++ mac->ops.get_rtrup2tc = ixgbe_dcb_get_rtrup2tc_generic; ++ eeprom->ops.init_params = ixgbe_init_eeprom_params_X550; ++ eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550; ++ eeprom->ops.read = ixgbe_read_ee_hostif_X550; ++ eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550; ++ eeprom->ops.write = ixgbe_write_ee_hostif_X550; ++ eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550; ++ eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550; ++ eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550; ++ ++ mac->ops.disable_mdd = ixgbe_disable_mdd_X550; ++ mac->ops.enable_mdd = ixgbe_enable_mdd_X550; ++ mac->ops.mdd_event = ixgbe_mdd_event_X550; ++ mac->ops.restore_mdd_vf = ixgbe_restore_mdd_vf_X550; ++ mac->ops.disable_rx = ixgbe_disable_rx_x550; ++ /* Manageability interface */ ++ mac->ops.set_fw_drv_ver = ixgbe_set_fw_drv_ver_x550; ++ switch (hw->device_id) { ++ case IXGBE_DEV_ID_X550EM_X_1G_T: ++ hw->mac.ops.led_on = NULL; ++ hw->mac.ops.led_off = NULL; ++ break; ++ case IXGBE_DEV_ID_X550EM_X_10G_T: ++ case IXGBE_DEV_ID_X550EM_A_10G_T: ++ hw->mac.ops.led_on = ixgbe_led_on_t_X550em; ++ hw->mac.ops.led_off = ixgbe_led_off_t_X550em; ++ break; ++ default: ++ break; ++ } ++ return ret_val; ++} ++ ++/** ++ * ixgbe_read_cs4227 - Read CS4227 register ++ * @hw: pointer to hardware structure ++ * @reg: register number to write ++ * @value: pointer to receive value read ++ * ++ * Returns status code ++ **/ ++STATIC s32 ixgbe_read_cs4227(struct ixgbe_hw *hw, u16 reg, u16 *value) ++{ ++ return hw->link.ops.read_link_unlocked(hw, hw->link.addr, reg, value); ++} ++ ++/** ++ * ixgbe_write_cs4227 - Write CS4227 register ++ * @hw: pointer to hardware structure ++ * @reg: register number to write ++ * @value: value to write to register ++ * ++ * Returns status code ++ **/ ++STATIC s32 ixgbe_write_cs4227(struct ixgbe_hw *hw, u16 reg, u16 value) ++{ ++ return hw->link.ops.write_link_unlocked(hw, hw->link.addr, reg, value); ++} ++ ++/** ++ * ixgbe_read_pe - Read register from port expander ++ * @hw: pointer to hardware structure ++ * @reg: register number to read ++ * @value: pointer to receive read value ++ * ++ * Returns status code ++ **/ ++STATIC s32 ixgbe_read_pe(struct ixgbe_hw *hw, u8 reg, u8 *value) ++{ ++ s32 status; ++ ++ status = ixgbe_read_i2c_byte_unlocked(hw, reg, IXGBE_PE, value); ++ if (status != IXGBE_SUCCESS) ++ ERROR_REPORT2(IXGBE_ERROR_CAUTION, ++ "port expander access failed with %d\n", status); ++ return status; ++} ++ ++/** ++ * ixgbe_write_pe - Write register to port expander ++ * @hw: pointer to hardware structure ++ * @reg: register number to write ++ * @value: value to write ++ * ++ * Returns status code ++ **/ ++STATIC s32 ixgbe_write_pe(struct ixgbe_hw *hw, u8 reg, u8 value) ++{ ++ s32 status; ++ ++ status = ixgbe_write_i2c_byte_unlocked(hw, reg, IXGBE_PE, value); ++ if (status != IXGBE_SUCCESS) ++ ERROR_REPORT2(IXGBE_ERROR_CAUTION, ++ "port expander access failed with %d\n", status); ++ return status; ++} ++ ++/** ++ * ixgbe_reset_cs4227 - Reset CS4227 using port expander ++ * @hw: pointer to hardware structure ++ * ++ * This function assumes that the caller has acquired the proper semaphore. ++ * Returns error code ++ **/ ++STATIC s32 ixgbe_reset_cs4227(struct ixgbe_hw *hw) ++{ ++ s32 status; ++ u32 retry; ++ u16 value; ++ u8 reg; ++ ++ /* Trigger hard reset. */ ++ status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®); ++ if (status != IXGBE_SUCCESS) ++ return status; ++ reg |= IXGBE_PE_BIT1; ++ status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg); ++ if (status != IXGBE_SUCCESS) ++ return status; ++ ++ status = ixgbe_read_pe(hw, IXGBE_PE_CONFIG, ®); ++ if (status != IXGBE_SUCCESS) ++ return status; ++ reg &= ~IXGBE_PE_BIT1; ++ status = ixgbe_write_pe(hw, IXGBE_PE_CONFIG, reg); ++ if (status != IXGBE_SUCCESS) ++ return status; ++ ++ status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®); ++ if (status != IXGBE_SUCCESS) ++ return status; ++ reg &= ~IXGBE_PE_BIT1; ++ status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg); ++ if (status != IXGBE_SUCCESS) ++ return status; ++ ++ usec_delay(IXGBE_CS4227_RESET_HOLD); ++ ++ status = ixgbe_read_pe(hw, IXGBE_PE_OUTPUT, ®); ++ if (status != IXGBE_SUCCESS) ++ return status; ++ reg |= IXGBE_PE_BIT1; ++ status = ixgbe_write_pe(hw, IXGBE_PE_OUTPUT, reg); ++ if (status != IXGBE_SUCCESS) ++ return status; ++ ++ /* Wait for the reset to complete. */ ++ msec_delay(IXGBE_CS4227_RESET_DELAY); ++ for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) { ++ status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EFUSE_STATUS, ++ &value); ++ if (status == IXGBE_SUCCESS && ++ value == IXGBE_CS4227_EEPROM_LOAD_OK) ++ break; ++ msec_delay(IXGBE_CS4227_CHECK_DELAY); ++ } ++ if (retry == IXGBE_CS4227_RETRIES) { ++ ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE, ++ "CS4227 reset did not complete."); ++ return IXGBE_ERR_PHY; ++ } ++ ++ status = ixgbe_read_cs4227(hw, IXGBE_CS4227_EEPROM_STATUS, &value); ++ if (status != IXGBE_SUCCESS || ++ !(value & IXGBE_CS4227_EEPROM_LOAD_OK)) { ++ ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE, ++ "CS4227 EEPROM did not load successfully."); ++ return IXGBE_ERR_PHY; ++ } ++ ++ return IXGBE_SUCCESS; ++} ++ ++/** ++ * ixgbe_check_cs4227 - Check CS4227 and reset as needed ++ * @hw: pointer to hardware structure ++ **/ ++STATIC void ixgbe_check_cs4227(struct ixgbe_hw *hw) ++{ ++ s32 status = IXGBE_SUCCESS; ++ u32 swfw_mask = hw->phy.phy_semaphore_mask; ++ u16 value = 0; ++ u8 retry; ++ ++ for (retry = 0; retry < IXGBE_CS4227_RETRIES; retry++) { ++ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); ++ if (status != IXGBE_SUCCESS) { ++ ERROR_REPORT2(IXGBE_ERROR_CAUTION, ++ "semaphore failed with %d", status); ++ msec_delay(IXGBE_CS4227_CHECK_DELAY); ++ continue; ++ } ++ ++ /* Get status of reset flow. */ ++ status = ixgbe_read_cs4227(hw, IXGBE_CS4227_SCRATCH, &value); ++ ++ if (status == IXGBE_SUCCESS && ++ value == IXGBE_CS4227_RESET_COMPLETE) ++ goto out; ++ ++ if (status != IXGBE_SUCCESS || ++ value != IXGBE_CS4227_RESET_PENDING) ++ break; ++ ++ /* Reset is pending. Wait and check again. */ ++ hw->mac.ops.release_swfw_sync(hw, swfw_mask); ++ msec_delay(IXGBE_CS4227_CHECK_DELAY); ++ } ++ ++ /* If still pending, assume other instance failed. */ ++ if (retry == IXGBE_CS4227_RETRIES) { ++ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); ++ if (status != IXGBE_SUCCESS) { ++ ERROR_REPORT2(IXGBE_ERROR_CAUTION, ++ "semaphore failed with %d", status); ++ return; ++ } ++ } ++ ++ /* Reset the CS4227. */ ++ status = ixgbe_reset_cs4227(hw); ++ if (status != IXGBE_SUCCESS) { ++ ERROR_REPORT2(IXGBE_ERROR_INVALID_STATE, ++ "CS4227 reset failed: %d", status); ++ goto out; ++ } ++ ++ /* Reset takes so long, temporarily release semaphore in case the ++ * other driver instance is waiting for the reset indication. ++ */ ++ ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH, ++ IXGBE_CS4227_RESET_PENDING); ++ hw->mac.ops.release_swfw_sync(hw, swfw_mask); ++ msec_delay(10); ++ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); ++ if (status != IXGBE_SUCCESS) { ++ ERROR_REPORT2(IXGBE_ERROR_CAUTION, ++ "semaphore failed with %d", status); ++ return; ++ } ++ ++ /* Record completion for next time. */ ++ status = ixgbe_write_cs4227(hw, IXGBE_CS4227_SCRATCH, ++ IXGBE_CS4227_RESET_COMPLETE); ++ ++out: ++ hw->mac.ops.release_swfw_sync(hw, swfw_mask); ++ msec_delay(hw->eeprom.semaphore_delay); ++} ++ ++/** ++ * ixgbe_setup_mux_ctl - Setup ESDP register for I2C mux control ++ * @hw: pointer to hardware structure ++ **/ ++STATIC void ixgbe_setup_mux_ctl(struct ixgbe_hw *hw) ++{ ++ u32 esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); ++ ++ if (hw->bus.lan_id) { ++ esdp &= ~(IXGBE_ESDP_SDP1_NATIVE | IXGBE_ESDP_SDP1); ++ esdp |= IXGBE_ESDP_SDP1_DIR; ++ } ++ esdp &= ~(IXGBE_ESDP_SDP0_NATIVE | IXGBE_ESDP_SDP0_DIR); ++ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); ++ IXGBE_WRITE_FLUSH(hw); ++} ++ ++/** ++ * ixgbe_read_phy_reg_mdi_22 - Read from a clause 22 PHY register without lock ++ * @hw: pointer to hardware structure ++ * @reg_addr: 32 bit address of PHY register to read ++ * @dev_type: always unused ++ * @phy_data: Pointer to read data from PHY register ++ */ ++STATIC s32 ixgbe_read_phy_reg_mdi_22(struct ixgbe_hw *hw, u32 reg_addr, ++ u32 dev_type, u16 *phy_data) ++{ ++ u32 i, data, command; ++ UNREFERENCED_1PARAMETER(dev_type); ++ ++ /* Setup and write the read command */ ++ command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) | ++ (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | ++ IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_READ_AUTOINC | ++ IXGBE_MSCA_MDI_COMMAND; ++ ++ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); ++ ++ /* Check every 10 usec to see if the access completed. ++ * The MDI Command bit will clear when the operation is ++ * complete ++ */ ++ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { ++ usec_delay(10); ++ ++ command = IXGBE_READ_REG(hw, IXGBE_MSCA); ++ if (!(command & IXGBE_MSCA_MDI_COMMAND)) ++ break; ++ } ++ ++ if (command & IXGBE_MSCA_MDI_COMMAND) { ++ ERROR_REPORT1(IXGBE_ERROR_POLLING, ++ "PHY read command did not complete.\n"); ++ return IXGBE_ERR_PHY; ++ } ++ ++ /* Read operation is complete. Get the data from MSRWD */ ++ data = IXGBE_READ_REG(hw, IXGBE_MSRWD); ++ data >>= IXGBE_MSRWD_READ_DATA_SHIFT; ++ *phy_data = (u16)data; ++ ++ return IXGBE_SUCCESS; ++} ++ ++/** ++ * ixgbe_write_phy_reg_mdi_22 - Write to a clause 22 PHY register without lock ++ * @hw: pointer to hardware structure ++ * @reg_addr: 32 bit PHY register to write ++ * @dev_type: always unused ++ * @phy_data: Data to write to the PHY register ++ */ ++STATIC s32 ixgbe_write_phy_reg_mdi_22(struct ixgbe_hw *hw, u32 reg_addr, ++ u32 dev_type, u16 phy_data) ++{ ++ u32 i, command; ++ UNREFERENCED_1PARAMETER(dev_type); ++ ++ /* Put the data in the MDI single read and write data register*/ ++ IXGBE_WRITE_REG(hw, IXGBE_MSRWD, (u32)phy_data); ++ ++ /* Setup and write the write command */ ++ command = (reg_addr << IXGBE_MSCA_DEV_TYPE_SHIFT) | ++ (hw->phy.addr << IXGBE_MSCA_PHY_ADDR_SHIFT) | ++ IXGBE_MSCA_OLD_PROTOCOL | IXGBE_MSCA_WRITE | ++ IXGBE_MSCA_MDI_COMMAND; ++ ++ IXGBE_WRITE_REG(hw, IXGBE_MSCA, command); ++ ++ /* Check every 10 usec to see if the access completed. ++ * The MDI Command bit will clear when the operation is ++ * complete ++ */ ++ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { ++ usec_delay(10); ++ ++ command = IXGBE_READ_REG(hw, IXGBE_MSCA); ++ if (!(command & IXGBE_MSCA_MDI_COMMAND)) ++ break; ++ } ++ ++ if (command & IXGBE_MSCA_MDI_COMMAND) { ++ ERROR_REPORT1(IXGBE_ERROR_POLLING, ++ "PHY write cmd didn't complete\n"); ++ return IXGBE_ERR_PHY; ++ } ++ ++ return IXGBE_SUCCESS; ++} ++ ++/** ++ * ixgbe_identify_phy_x550em - Get PHY type based on device id ++ * @hw: pointer to hardware structure ++ * ++ * Returns error code ++ */ ++STATIC s32 ixgbe_identify_phy_x550em(struct ixgbe_hw *hw) ++{ ++ hw->mac.ops.set_lan_id(hw); ++ ++ ixgbe_read_mng_if_sel_x550em(hw); ++ ++ switch (hw->device_id) { ++ case IXGBE_DEV_ID_X550EM_A_SFP: ++ return ixgbe_identify_module_generic(hw); ++ case IXGBE_DEV_ID_X550EM_X_SFP: ++ /* set up for CS4227 usage */ ++ ixgbe_setup_mux_ctl(hw); ++ ixgbe_check_cs4227(hw); ++ /* Fallthrough */ ++ ++ case IXGBE_DEV_ID_X550EM_A_SFP_N: ++ return ixgbe_identify_module_generic(hw); ++ break; ++ case IXGBE_DEV_ID_X550EM_X_KX4: ++ hw->phy.type = ixgbe_phy_x550em_kx4; ++ break; ++ case IXGBE_DEV_ID_X550EM_X_XFI: ++ hw->phy.type = ixgbe_phy_x550em_xfi; ++ break; ++ case IXGBE_DEV_ID_X550EM_X_KR: ++ case IXGBE_DEV_ID_X550EM_A_KR: ++ case IXGBE_DEV_ID_X550EM_A_KR_L: ++ hw->phy.type = ixgbe_phy_x550em_kr; ++ break; ++ case IXGBE_DEV_ID_X550EM_A_10G_T: ++ case IXGBE_DEV_ID_X550EM_X_10G_T: ++ return ixgbe_identify_phy_generic(hw); ++ case IXGBE_DEV_ID_X550EM_X_1G_T: ++ hw->phy.type = ixgbe_phy_ext_1g_t; ++ hw->phy.ops.read_reg = NULL; ++ hw->phy.ops.write_reg = NULL; ++ break; ++ case IXGBE_DEV_ID_X550EM_A_1G_T: ++ case IXGBE_DEV_ID_X550EM_A_1G_T_L: ++ hw->phy.type = ixgbe_phy_fw; ++ hw->phy.ops.read_reg = NULL; ++ hw->phy.ops.write_reg = NULL; ++ if (hw->bus.lan_id) ++ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM; ++ else ++ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM; ++ break; ++ default: ++ break; ++ } ++ return IXGBE_SUCCESS; ++} ++ ++/** ++ * ixgbe_fw_phy_activity - Perform an activity on a PHY ++ * @hw: pointer to hardware structure ++ * @activity: activity to perform ++ * @data: Pointer to 4 32-bit words of data ++ */ ++s32 ixgbe_fw_phy_activity(struct ixgbe_hw *hw, u16 activity, ++ u32 (*data)[FW_PHY_ACT_DATA_COUNT]) ++{ ++ union { ++ struct ixgbe_hic_phy_activity_req cmd; ++ struct ixgbe_hic_phy_activity_resp rsp; ++ } hic; ++ u16 retries = FW_PHY_ACT_RETRIES; ++ s32 rc; ++ u16 i; ++ ++ do { ++ memset(&hic, 0, sizeof(hic)); ++ hic.cmd.hdr.cmd = FW_PHY_ACT_REQ_CMD; ++ hic.cmd.hdr.buf_len = FW_PHY_ACT_REQ_LEN; ++ hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; ++ hic.cmd.port_number = hw->bus.lan_id; ++ hic.cmd.activity_id = IXGBE_CPU_TO_LE16(activity); ++ for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i) ++ hic.cmd.data[i] = IXGBE_CPU_TO_BE32((*data)[i]); ++ ++ rc = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd, ++ sizeof(hic.cmd), ++ IXGBE_HI_COMMAND_TIMEOUT, ++ true); ++ if (rc != IXGBE_SUCCESS) ++ return rc; ++ if (hic.rsp.hdr.cmd_or_resp.ret_status == ++ FW_CEM_RESP_STATUS_SUCCESS) { ++ for (i = 0; i < FW_PHY_ACT_DATA_COUNT; ++i) ++ (*data)[i] = IXGBE_BE32_TO_CPU(hic.rsp.data[i]); ++ return IXGBE_SUCCESS; ++ } ++ usec_delay(20); ++ --retries; ++ } while (retries > 0); ++ ++ return IXGBE_ERR_HOST_INTERFACE_COMMAND; ++} ++ ++static const struct { ++ u16 fw_speed; ++ ixgbe_link_speed phy_speed; ++} ixgbe_fw_map[] = { ++ { FW_PHY_ACT_LINK_SPEED_10, IXGBE_LINK_SPEED_10_FULL }, ++ { FW_PHY_ACT_LINK_SPEED_100, IXGBE_LINK_SPEED_100_FULL }, ++ { FW_PHY_ACT_LINK_SPEED_1G, IXGBE_LINK_SPEED_1GB_FULL }, ++ { FW_PHY_ACT_LINK_SPEED_2_5G, IXGBE_LINK_SPEED_2_5GB_FULL }, ++ { FW_PHY_ACT_LINK_SPEED_5G, IXGBE_LINK_SPEED_5GB_FULL }, ++ { FW_PHY_ACT_LINK_SPEED_10G, IXGBE_LINK_SPEED_10GB_FULL }, ++}; ++ ++/** ++ * ixgbe_get_phy_id_fw - Get the phy ID via firmware command ++ * @hw: pointer to hardware structure ++ * ++ * Returns error code ++ */ ++static s32 ixgbe_get_phy_id_fw(struct ixgbe_hw *hw) ++{ ++ u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 }; ++ u16 phy_speeds; ++ u16 phy_id_lo; ++ s32 rc; ++ u16 i; ++ ++ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_PHY_INFO, &info); ++ if (rc) ++ return rc; ++ ++ hw->phy.speeds_supported = 0; ++ phy_speeds = info[0] & FW_PHY_INFO_SPEED_MASK; ++ for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) { ++ if (phy_speeds & ixgbe_fw_map[i].fw_speed) ++ hw->phy.speeds_supported |= ixgbe_fw_map[i].phy_speed; ++ } ++ if (!hw->phy.autoneg_advertised) ++ hw->phy.autoneg_advertised = hw->phy.speeds_supported; ++ ++ hw->phy.id = info[0] & FW_PHY_INFO_ID_HI_MASK; ++ phy_id_lo = info[1] & FW_PHY_INFO_ID_LO_MASK; ++ hw->phy.id |= phy_id_lo & IXGBE_PHY_REVISION_MASK; ++ hw->phy.revision = phy_id_lo & ~IXGBE_PHY_REVISION_MASK; ++ if (!hw->phy.id || hw->phy.id == IXGBE_PHY_REVISION_MASK) ++ return IXGBE_ERR_PHY_ADDR_INVALID; ++ return IXGBE_SUCCESS; ++} ++ ++/** ++ * ixgbe_identify_phy_fw - Get PHY type based on firmware command ++ * @hw: pointer to hardware structure ++ * ++ * Returns error code ++ */ ++static s32 ixgbe_identify_phy_fw(struct ixgbe_hw *hw) ++{ ++ if (hw->bus.lan_id) ++ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY1_SM; ++ else ++ hw->phy.phy_semaphore_mask = IXGBE_GSSR_PHY0_SM; ++ ++ hw->phy.type = ixgbe_phy_fw; ++ hw->phy.ops.read_reg = NULL; ++ hw->phy.ops.write_reg = NULL; ++ return ixgbe_get_phy_id_fw(hw); ++} ++ ++/** ++ * ixgbe_shutdown_fw_phy - Shutdown a firmware-controlled PHY ++ * @hw: pointer to hardware structure ++ * ++ * Returns error code ++ */ ++s32 ixgbe_shutdown_fw_phy(struct ixgbe_hw *hw) ++{ ++ u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 }; ++ ++ setup[0] = FW_PHY_ACT_FORCE_LINK_DOWN_OFF; ++ return ixgbe_fw_phy_activity(hw, FW_PHY_ACT_FORCE_LINK_DOWN, &setup); ++} ++ ++STATIC s32 ixgbe_read_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr, ++ u32 device_type, u16 *phy_data) ++{ ++ UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, *phy_data); ++ return IXGBE_NOT_IMPLEMENTED; ++} ++ ++STATIC s32 ixgbe_write_phy_reg_x550em(struct ixgbe_hw *hw, u32 reg_addr, ++ u32 device_type, u16 phy_data) ++{ ++ UNREFERENCED_4PARAMETER(*hw, reg_addr, device_type, phy_data); ++ return IXGBE_NOT_IMPLEMENTED; ++} ++ ++/** ++ * ixgbe_read_i2c_combined_generic - Perform I2C read combined operation ++ * @hw: pointer to the hardware structure ++ * @addr: I2C bus address to read from ++ * @reg: I2C device register to read from ++ * @val: pointer to location to receive read value ++ * ++ * Returns an error code on error. ++ **/ ++STATIC s32 ixgbe_read_i2c_combined_generic(struct ixgbe_hw *hw, u8 addr, ++ u16 reg, u16 *val) ++{ ++ return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, true); ++} ++ ++/** ++ * ixgbe_read_i2c_combined_generic_unlocked - Do I2C read combined operation ++ * @hw: pointer to the hardware structure ++ * @addr: I2C bus address to read from ++ * @reg: I2C device register to read from ++ * @val: pointer to location to receive read value ++ * ++ * Returns an error code on error. ++ **/ ++STATIC s32 ++ixgbe_read_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, u8 addr, ++ u16 reg, u16 *val) ++{ ++ return ixgbe_read_i2c_combined_generic_int(hw, addr, reg, val, false); ++} ++ ++/** ++ * ixgbe_write_i2c_combined_generic - Perform I2C write combined operation ++ * @hw: pointer to the hardware structure ++ * @addr: I2C bus address to write to ++ * @reg: I2C device register to write to ++ * @val: value to write ++ * ++ * Returns an error code on error. ++ **/ ++STATIC s32 ixgbe_write_i2c_combined_generic(struct ixgbe_hw *hw, ++ u8 addr, u16 reg, u16 val) ++{ ++ return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, true); ++} ++ ++/** ++ * ixgbe_write_i2c_combined_generic_unlocked - Do I2C write combined operation ++ * @hw: pointer to the hardware structure ++ * @addr: I2C bus address to write to ++ * @reg: I2C device register to write to ++ * @val: value to write ++ * ++ * Returns an error code on error. ++ **/ ++STATIC s32 ++ixgbe_write_i2c_combined_generic_unlocked(struct ixgbe_hw *hw, ++ u8 addr, u16 reg, u16 val) ++{ ++ return ixgbe_write_i2c_combined_generic_int(hw, addr, reg, val, false); ++} ++ ++/** ++* ixgbe_init_ops_X550EM - Inits func ptrs and MAC type ++* @hw: pointer to hardware structure ++* ++* Initialize the function pointers and for MAC type X550EM. ++* Does not touch the hardware. ++**/ ++s32 ixgbe_init_ops_X550EM(struct ixgbe_hw *hw) ++{ ++ struct ixgbe_mac_info *mac = &hw->mac; ++ struct ixgbe_eeprom_info *eeprom = &hw->eeprom; ++ struct ixgbe_phy_info *phy = &hw->phy; ++ s32 ret_val; ++ ++ DEBUGFUNC("ixgbe_init_ops_X550EM"); ++ ++ /* Similar to X550 so start there. */ ++ ret_val = ixgbe_init_ops_X550(hw); ++ ++ /* Since this function eventually calls ++ * ixgbe_init_ops_540 by design, we are setting ++ * the pointers to NULL explicitly here to overwrite ++ * the values being set in the x540 function. ++ */ ++ /* Thermal sensor not supported in x550EM */ ++ mac->ops.get_thermal_sensor_data = NULL; ++ mac->ops.init_thermal_sensor_thresh = NULL; ++ mac->thermal_sensor_enabled = false; ++ ++ /* FCOE not supported in x550EM */ ++ mac->ops.get_san_mac_addr = NULL; ++ mac->ops.set_san_mac_addr = NULL; ++ mac->ops.get_wwn_prefix = NULL; ++ mac->ops.get_fcoe_boot_status = NULL; ++ ++ /* IPsec not supported in x550EM */ ++ mac->ops.disable_sec_rx_path = NULL; ++ mac->ops.enable_sec_rx_path = NULL; ++ ++ /* AUTOC register is not present in x550EM. */ ++ mac->ops.prot_autoc_read = NULL; ++ mac->ops.prot_autoc_write = NULL; ++ ++ /* X550EM bus type is internal*/ ++ hw->bus.type = ixgbe_bus_type_internal; ++ mac->ops.get_bus_info = ixgbe_get_bus_info_X550em; ++ ++ ++ mac->ops.get_media_type = ixgbe_get_media_type_X550em; ++ mac->ops.setup_sfp = ixgbe_setup_sfp_modules_X550em; ++ mac->ops.get_link_capabilities = ixgbe_get_link_capabilities_X550em; ++ mac->ops.reset_hw = ixgbe_reset_hw_X550em; ++ mac->ops.get_supported_physical_layer = ++ ixgbe_get_supported_physical_layer_X550em; ++ ++ if (mac->ops.get_media_type(hw) == ixgbe_media_type_copper) ++ mac->ops.setup_fc = ixgbe_setup_fc_generic; ++ else ++ mac->ops.setup_fc = ixgbe_setup_fc_X550em; ++ ++ /* PHY */ ++ phy->ops.init = ixgbe_init_phy_ops_X550em; ++ switch (hw->device_id) { ++ case IXGBE_DEV_ID_X550EM_A_1G_T: ++ case IXGBE_DEV_ID_X550EM_A_1G_T_L: ++ mac->ops.setup_fc = NULL; ++ phy->ops.identify = ixgbe_identify_phy_fw; ++ phy->ops.set_phy_power = NULL; ++ phy->ops.get_firmware_version = NULL; ++ break; ++ case IXGBE_DEV_ID_X550EM_X_1G_T: ++ mac->ops.setup_fc = NULL; ++ phy->ops.identify = ixgbe_identify_phy_x550em; ++ phy->ops.set_phy_power = NULL; ++ break; ++ default: ++ phy->ops.identify = ixgbe_identify_phy_x550em; ++ } ++ ++ if (mac->ops.get_media_type(hw) != ixgbe_media_type_copper) ++ phy->ops.set_phy_power = NULL; ++ ++ /* EEPROM */ ++ eeprom->ops.init_params = ixgbe_init_eeprom_params_X540; ++ eeprom->ops.read = ixgbe_read_ee_hostif_X550; ++ eeprom->ops.read_buffer = ixgbe_read_ee_hostif_buffer_X550; ++ eeprom->ops.write = ixgbe_write_ee_hostif_X550; ++ eeprom->ops.write_buffer = ixgbe_write_ee_hostif_buffer_X550; ++ eeprom->ops.update_checksum = ixgbe_update_eeprom_checksum_X550; ++ eeprom->ops.validate_checksum = ixgbe_validate_eeprom_checksum_X550; ++ eeprom->ops.calc_checksum = ixgbe_calc_eeprom_checksum_X550; ++ ++ return ret_val; ++} ++ ++/** ++ * ixgbe_setup_fw_link - Setup firmware-controlled PHYs ++ * @hw: pointer to hardware structure ++ */ ++static s32 ixgbe_setup_fw_link(struct ixgbe_hw *hw) ++{ ++ u32 setup[FW_PHY_ACT_DATA_COUNT] = { 0 }; ++ s32 rc; ++ u16 i; ++ ++ if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw)) ++ return 0; ++ ++ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { ++ ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, ++ "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); ++ return IXGBE_ERR_INVALID_LINK_SETTINGS; ++ } ++ ++ switch (hw->fc.requested_mode) { ++ case ixgbe_fc_full: ++ setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RXTX << ++ FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT; ++ break; ++ case ixgbe_fc_rx_pause: ++ setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_RX << ++ FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT; ++ break; ++ case ixgbe_fc_tx_pause: ++ setup[0] |= FW_PHY_ACT_SETUP_LINK_PAUSE_TX << ++ FW_PHY_ACT_SETUP_LINK_PAUSE_SHIFT; ++ break; ++ default: ++ break; ++ } ++ ++ for (i = 0; i < sizeof(ixgbe_fw_map) / sizeof(ixgbe_fw_map[0]); ++i) { ++ if (hw->phy.autoneg_advertised & ixgbe_fw_map[i].phy_speed) ++ setup[0] |= ixgbe_fw_map[i].fw_speed; ++ } ++ setup[0] |= FW_PHY_ACT_SETUP_LINK_HP | FW_PHY_ACT_SETUP_LINK_AN; ++ ++ if (hw->phy.eee_speeds_advertised) ++ setup[0] |= FW_PHY_ACT_SETUP_LINK_EEE; ++ ++ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_SETUP_LINK, &setup); ++ if (rc) ++ return rc; ++ if (setup[0] == FW_PHY_ACT_SETUP_LINK_RSP_DOWN) ++ return IXGBE_ERR_OVERTEMP; ++ return IXGBE_SUCCESS; ++} ++ ++/** ++ * ixgbe_fc_autoneg_fw _ Set up flow control for FW-controlled PHYs ++ * @hw: pointer to hardware structure ++ * ++ * Called at init time to set up flow control. ++ */ ++static s32 ixgbe_fc_autoneg_fw(struct ixgbe_hw *hw) ++{ ++ if (hw->fc.requested_mode == ixgbe_fc_default) ++ hw->fc.requested_mode = ixgbe_fc_full; ++ ++ return ixgbe_setup_fw_link(hw); ++} ++ ++/** ++ * ixgbe_setup_eee_fw - Enable/disable EEE support ++ * @hw: pointer to the HW structure ++ * @enable_eee: boolean flag to enable EEE ++ * ++ * Enable/disable EEE based on enable_eee flag. ++ * This function controls EEE for firmware-based PHY implementations. ++ */ ++static s32 ixgbe_setup_eee_fw(struct ixgbe_hw *hw, bool enable_eee) ++{ ++ if (!!hw->phy.eee_speeds_advertised == enable_eee) ++ return IXGBE_SUCCESS; ++ if (enable_eee) ++ hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported; ++ else ++ hw->phy.eee_speeds_advertised = 0; ++ return hw->phy.ops.setup_link(hw); ++} ++ ++/** ++* ixgbe_init_ops_X550EM_a - Inits func ptrs and MAC type ++* @hw: pointer to hardware structure ++* ++* Initialize the function pointers and for MAC type X550EM_a. ++* Does not touch the hardware. ++**/ ++s32 ixgbe_init_ops_X550EM_a(struct ixgbe_hw *hw) ++{ ++ struct ixgbe_mac_info *mac = &hw->mac; ++ s32 ret_val; ++ ++ DEBUGFUNC("ixgbe_init_ops_X550EM_a"); ++ ++ /* Start with generic X550EM init */ ++ ret_val = ixgbe_init_ops_X550EM(hw); ++ ++ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII || ++ hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L) { ++ mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550; ++ mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550; ++ } else { ++ mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550a; ++ mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550a; ++ } ++ mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550a; ++ mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550a; ++ ++ switch (mac->ops.get_media_type(hw)) { ++ case ixgbe_media_type_fiber: ++ mac->ops.setup_fc = NULL; ++ mac->ops.fc_autoneg = ixgbe_fc_autoneg_fiber_x550em_a; ++ break; ++ case ixgbe_media_type_backplane: ++ mac->ops.fc_autoneg = ixgbe_fc_autoneg_backplane_x550em_a; ++ mac->ops.setup_fc = ixgbe_setup_fc_backplane_x550em_a; ++ break; ++ default: ++ break; ++ } ++ ++ switch (hw->device_id) { ++ case IXGBE_DEV_ID_X550EM_A_1G_T: ++ case IXGBE_DEV_ID_X550EM_A_1G_T_L: ++ mac->ops.fc_autoneg = ixgbe_fc_autoneg_sgmii_x550em_a; ++ mac->ops.setup_fc = ixgbe_fc_autoneg_fw; ++ mac->ops.setup_eee = ixgbe_setup_eee_fw; ++ hw->phy.eee_speeds_supported = IXGBE_LINK_SPEED_100_FULL | ++ IXGBE_LINK_SPEED_1GB_FULL; ++ hw->phy.eee_speeds_advertised = hw->phy.eee_speeds_supported; ++ break; ++ default: ++ break; ++ } ++ ++ return ret_val; ++} ++ ++/** ++* ixgbe_init_ops_X550EM_x - Inits func ptrs and MAC type ++* @hw: pointer to hardware structure ++* ++* Initialize the function pointers and for MAC type X550EM_x. ++* Does not touch the hardware. ++**/ ++s32 ixgbe_init_ops_X550EM_x(struct ixgbe_hw *hw) ++{ ++ struct ixgbe_mac_info *mac = &hw->mac; ++ struct ixgbe_link_info *link = &hw->link; ++ s32 ret_val; ++ ++ DEBUGFUNC("ixgbe_init_ops_X550EM_x"); ++ ++ /* Start with generic X550EM init */ ++ ret_val = ixgbe_init_ops_X550EM(hw); ++ ++ mac->ops.read_iosf_sb_reg = ixgbe_read_iosf_sb_reg_x550; ++ mac->ops.write_iosf_sb_reg = ixgbe_write_iosf_sb_reg_x550; ++ mac->ops.acquire_swfw_sync = ixgbe_acquire_swfw_sync_X550em; ++ mac->ops.release_swfw_sync = ixgbe_release_swfw_sync_X550em; ++ link->ops.read_link = ixgbe_read_i2c_combined_generic; ++ link->ops.read_link_unlocked = ixgbe_read_i2c_combined_generic_unlocked; ++ link->ops.write_link = ixgbe_write_i2c_combined_generic; ++ link->ops.write_link_unlocked = ++ ixgbe_write_i2c_combined_generic_unlocked; ++ link->addr = IXGBE_CS4227; ++ ++ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T) { ++ mac->ops.setup_fc = NULL; ++ mac->ops.setup_eee = NULL; ++ mac->ops.init_led_link_act = NULL; ++ } ++ ++ return ret_val; ++} ++ ++/** ++ * ixgbe_dmac_config_X550 ++ * @hw: pointer to hardware structure ++ * ++ * Configure DMA coalescing. If enabling dmac, dmac is activated. ++ * When disabling dmac, dmac enable dmac bit is cleared. ++ **/ ++s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw) ++{ ++ u32 reg, high_pri_tc; ++ ++ DEBUGFUNC("ixgbe_dmac_config_X550"); ++ ++ /* Disable DMA coalescing before configuring */ ++ reg = IXGBE_READ_REG(hw, IXGBE_DMACR); ++ reg &= ~IXGBE_DMACR_DMAC_EN; ++ IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg); ++ ++ /* Disable DMA Coalescing if the watchdog timer is 0 */ ++ if (!hw->mac.dmac_config.watchdog_timer) ++ goto out; ++ ++ ixgbe_dmac_config_tcs_X550(hw); ++ ++ /* Configure DMA Coalescing Control Register */ ++ reg = IXGBE_READ_REG(hw, IXGBE_DMACR); ++ ++ /* Set the watchdog timer in units of 40.96 usec */ ++ reg &= ~IXGBE_DMACR_DMACWT_MASK; ++ reg |= (hw->mac.dmac_config.watchdog_timer * 100) / 4096; ++ ++ reg &= ~IXGBE_DMACR_HIGH_PRI_TC_MASK; ++ /* If fcoe is enabled, set high priority traffic class */ ++ if (hw->mac.dmac_config.fcoe_en) { ++ high_pri_tc = 1 << hw->mac.dmac_config.fcoe_tc; ++ reg |= ((high_pri_tc << IXGBE_DMACR_HIGH_PRI_TC_SHIFT) & ++ IXGBE_DMACR_HIGH_PRI_TC_MASK); ++ } ++ reg |= IXGBE_DMACR_EN_MNG_IND; ++ ++ /* Enable DMA coalescing after configuration */ ++ reg |= IXGBE_DMACR_DMAC_EN; ++ IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg); ++ ++out: ++ return IXGBE_SUCCESS; ++} ++ ++/** ++ * ixgbe_dmac_config_tcs_X550 ++ * @hw: pointer to hardware structure ++ * ++ * Configure DMA coalescing threshold per TC. The dmac enable bit must ++ * be cleared before configuring. ++ **/ ++s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw) ++{ ++ u32 tc, reg, pb_headroom, rx_pb_size, maxframe_size_kb; ++ ++ DEBUGFUNC("ixgbe_dmac_config_tcs_X550"); ++ ++ /* Configure DMA coalescing enabled */ ++ switch (hw->mac.dmac_config.link_speed) { ++ case IXGBE_LINK_SPEED_10_FULL: ++ case IXGBE_LINK_SPEED_100_FULL: ++ pb_headroom = IXGBE_DMACRXT_100M; ++ break; ++ case IXGBE_LINK_SPEED_1GB_FULL: ++ pb_headroom = IXGBE_DMACRXT_1G; ++ break; ++ default: ++ pb_headroom = IXGBE_DMACRXT_10G; ++ break; ++ } ++ ++ maxframe_size_kb = ((IXGBE_READ_REG(hw, IXGBE_MAXFRS) >> ++ IXGBE_MHADD_MFS_SHIFT) / 1024); ++ ++ /* Set the per Rx packet buffer receive threshold */ ++ for (tc = 0; tc < IXGBE_DCB_MAX_TRAFFIC_CLASS; tc++) { ++ reg = IXGBE_READ_REG(hw, IXGBE_DMCTH(tc)); ++ reg &= ~IXGBE_DMCTH_DMACRXT_MASK; ++ ++ if (tc < hw->mac.dmac_config.num_tcs) { ++ /* Get Rx PB size */ ++ rx_pb_size = IXGBE_READ_REG(hw, IXGBE_RXPBSIZE(tc)); ++ rx_pb_size = (rx_pb_size & IXGBE_RXPBSIZE_MASK) >> ++ IXGBE_RXPBSIZE_SHIFT; ++ ++ /* Calculate receive buffer threshold in kilobytes */ ++ if (rx_pb_size > pb_headroom) ++ rx_pb_size = rx_pb_size - pb_headroom; ++ else ++ rx_pb_size = 0; ++ ++ /* Minimum of MFS shall be set for DMCTH */ ++ reg |= (rx_pb_size > maxframe_size_kb) ? ++ rx_pb_size : maxframe_size_kb; ++ } ++ IXGBE_WRITE_REG(hw, IXGBE_DMCTH(tc), reg); ++ } ++ return IXGBE_SUCCESS; ++} ++ ++/** ++ * ixgbe_dmac_update_tcs_X550 ++ * @hw: pointer to hardware structure ++ * ++ * Disables dmac, updates per TC settings, and then enables dmac. ++ **/ ++s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw) ++{ ++ u32 reg; ++ ++ DEBUGFUNC("ixgbe_dmac_update_tcs_X550"); ++ ++ /* Disable DMA coalescing before configuring */ ++ reg = IXGBE_READ_REG(hw, IXGBE_DMACR); ++ reg &= ~IXGBE_DMACR_DMAC_EN; ++ IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg); ++ ++ ixgbe_dmac_config_tcs_X550(hw); ++ ++ /* Enable DMA coalescing after configuration */ ++ reg = IXGBE_READ_REG(hw, IXGBE_DMACR); ++ reg |= IXGBE_DMACR_DMAC_EN; ++ IXGBE_WRITE_REG(hw, IXGBE_DMACR, reg); ++ ++ return IXGBE_SUCCESS; ++} ++ ++/** ++ * ixgbe_init_eeprom_params_X550 - Initialize EEPROM params ++ * @hw: pointer to hardware structure ++ * ++ * Initializes the EEPROM parameters ixgbe_eeprom_info within the ++ * ixgbe_hw struct in order to set up EEPROM access. ++ **/ ++s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw) ++{ ++ struct ixgbe_eeprom_info *eeprom = &hw->eeprom; ++ u32 eec; ++ u16 eeprom_size; ++ ++ DEBUGFUNC("ixgbe_init_eeprom_params_X550"); ++ ++ if (eeprom->type == ixgbe_eeprom_uninitialized) { ++ eeprom->semaphore_delay = 10; ++ eeprom->type = ixgbe_flash; ++ ++ eec = IXGBE_READ_REG(hw, IXGBE_EEC); ++ eeprom_size = (u16)((eec & IXGBE_EEC_SIZE) >> ++ IXGBE_EEC_SIZE_SHIFT); ++ eeprom->word_size = 1 << (eeprom_size + ++ IXGBE_EEPROM_WORD_SIZE_SHIFT); ++ ++ DEBUGOUT2("Eeprom params: type = %d, size = %d\n", ++ eeprom->type, eeprom->word_size); ++ } ++ ++ return IXGBE_SUCCESS; ++} ++ ++/** ++ * ixgbe_set_source_address_pruning_X550 - Enable/Disbale source address pruning ++ * @hw: pointer to hardware structure ++ * @enable: enable or disable source address pruning ++ * @pool: Rx pool to set source address pruning for ++ **/ ++void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable, ++ unsigned int pool) ++{ ++ u64 pfflp; ++ ++ /* max rx pool is 63 */ ++ if (pool > 63) ++ return; ++ ++ pfflp = (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPL); ++ pfflp |= (u64)IXGBE_READ_REG(hw, IXGBE_PFFLPH) << 32; ++ ++ if (enable) ++ pfflp |= (1ULL << pool); ++ else ++ pfflp &= ~(1ULL << pool); ++ ++ IXGBE_WRITE_REG(hw, IXGBE_PFFLPL, (u32)pfflp); ++ IXGBE_WRITE_REG(hw, IXGBE_PFFLPH, (u32)(pfflp >> 32)); ++} ++ ++/** ++ * ixgbe_set_ethertype_anti_spoofing_X550 - Enable/Disable Ethertype anti-spoofing ++ * @hw: pointer to hardware structure ++ * @enable: enable or disable switch for Ethertype anti-spoofing ++ * @vf: Virtual Function pool - VF Pool to set for Ethertype anti-spoofing ++ * ++ **/ ++void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, ++ bool enable, int vf) ++{ ++ int vf_target_reg = vf >> 3; ++ int vf_target_shift = vf % 8 + IXGBE_SPOOF_ETHERTYPEAS_SHIFT; ++ u32 pfvfspoof; ++ ++ DEBUGFUNC("ixgbe_set_ethertype_anti_spoofing_X550"); ++ ++ pfvfspoof = IXGBE_READ_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg)); ++ if (enable) ++ pfvfspoof |= (1 << vf_target_shift); ++ else ++ pfvfspoof &= ~(1 << vf_target_shift); ++ ++ IXGBE_WRITE_REG(hw, IXGBE_PFVFSPOOF(vf_target_reg), pfvfspoof); ++} ++ ++/** ++ * ixgbe_iosf_wait - Wait for IOSF command completion ++ * @hw: pointer to hardware structure ++ * @ctrl: pointer to location to receive final IOSF control value ++ * ++ * Returns failing status on timeout ++ * ++ * Note: ctrl can be NULL if the IOSF control register value is not needed ++ **/ ++STATIC s32 ixgbe_iosf_wait(struct ixgbe_hw *hw, u32 *ctrl) ++{ ++ u32 i, command = 0; ++ ++ /* Check every 10 usec to see if the address cycle completed. ++ * The SB IOSF BUSY bit will clear when the operation is ++ * complete ++ */ ++ for (i = 0; i < IXGBE_MDIO_COMMAND_TIMEOUT; i++) { ++ command = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL); ++ if ((command & IXGBE_SB_IOSF_CTRL_BUSY) == 0) ++ break; ++ usec_delay(10); ++ } ++ if (ctrl) ++ *ctrl = command; ++ if (i == IXGBE_MDIO_COMMAND_TIMEOUT) { ++ ERROR_REPORT1(IXGBE_ERROR_POLLING, "Wait timed out\n"); ++ return IXGBE_ERR_PHY; ++ } ++ ++ return IXGBE_SUCCESS; ++} ++ ++/** ++ * ixgbe_write_iosf_sb_reg_x550 - Writes a value to specified register ++ * of the IOSF device ++ * @hw: pointer to hardware structure ++ * @reg_addr: 32 bit PHY register to write ++ * @device_type: 3 bit device type ++ * @data: Data to write to the register ++ **/ ++s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, ++ u32 device_type, u32 data) ++{ ++ u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM; ++ u32 command, error; ++ s32 ret; ++ ++ ret = ixgbe_acquire_swfw_semaphore(hw, gssr); ++ if (ret != IXGBE_SUCCESS) ++ return ret; ++ ++ ret = ixgbe_iosf_wait(hw, NULL); ++ if (ret != IXGBE_SUCCESS) ++ goto out; ++ ++ command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) | ++ (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT)); ++ ++ /* Write IOSF control register */ ++ IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command); ++ ++ /* Write IOSF data register */ ++ IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA, data); ++ ++ ret = ixgbe_iosf_wait(hw, &command); ++ ++ if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) { ++ error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >> ++ IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT; ++ ERROR_REPORT2(IXGBE_ERROR_POLLING, ++ "Failed to write, error %x\n", error); ++ ret = IXGBE_ERR_PHY; ++ } ++ ++out: ++ ixgbe_release_swfw_semaphore(hw, gssr); ++ return ret; ++} ++ ++/** ++ * ixgbe_read_iosf_sb_reg_x550 - Reads specified register of the IOSF device ++ * @hw: pointer to hardware structure ++ * @reg_addr: 32 bit PHY register to write ++ * @device_type: 3 bit device type ++ * @data: Pointer to read data from the register ++ **/ ++s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, ++ u32 device_type, u32 *data) ++{ ++ u32 gssr = IXGBE_GSSR_PHY1_SM | IXGBE_GSSR_PHY0_SM; ++ u32 command, error; ++ s32 ret; ++ ++ ret = ixgbe_acquire_swfw_semaphore(hw, gssr); ++ if (ret != IXGBE_SUCCESS) ++ return ret; ++ ++ ret = ixgbe_iosf_wait(hw, NULL); ++ if (ret != IXGBE_SUCCESS) ++ goto out; ++ ++ command = ((reg_addr << IXGBE_SB_IOSF_CTRL_ADDR_SHIFT) | ++ (device_type << IXGBE_SB_IOSF_CTRL_TARGET_SELECT_SHIFT)); ++ ++ /* Write IOSF control register */ ++ IXGBE_WRITE_REG(hw, IXGBE_SB_IOSF_INDIRECT_CTRL, command); ++ ++ ret = ixgbe_iosf_wait(hw, &command); ++ ++ if ((command & IXGBE_SB_IOSF_CTRL_RESP_STAT_MASK) != 0) { ++ error = (command & IXGBE_SB_IOSF_CTRL_CMPL_ERR_MASK) >> ++ IXGBE_SB_IOSF_CTRL_CMPL_ERR_SHIFT; ++ ERROR_REPORT2(IXGBE_ERROR_POLLING, ++ "Failed to read, error %x\n", error); ++ ret = IXGBE_ERR_PHY; ++ } ++ ++ if (ret == IXGBE_SUCCESS) ++ *data = IXGBE_READ_REG(hw, IXGBE_SB_IOSF_INDIRECT_DATA); ++ ++out: ++ ixgbe_release_swfw_semaphore(hw, gssr); ++ return ret; ++} ++ ++/** ++ * ixgbe_get_phy_token - Get the token for shared phy access ++ * @hw: Pointer to hardware structure ++ */ ++ ++s32 ixgbe_get_phy_token(struct ixgbe_hw *hw) ++{ ++ struct ixgbe_hic_phy_token_req token_cmd; ++ s32 status; ++ ++ token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD; ++ token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN; ++ token_cmd.hdr.cmd_or_resp.cmd_resv = 0; ++ token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; ++ token_cmd.port_number = hw->bus.lan_id; ++ token_cmd.command_type = FW_PHY_TOKEN_REQ; ++ token_cmd.pad = 0; ++ status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd, ++ sizeof(token_cmd), ++ IXGBE_HI_COMMAND_TIMEOUT, ++ true); ++ if (status) { ++ DEBUGOUT1("Issuing host interface command failed with Status = %d\n", ++ status); ++ return status; ++ } ++ if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK) ++ return IXGBE_SUCCESS; ++ if (token_cmd.hdr.cmd_or_resp.ret_status != FW_PHY_TOKEN_RETRY) { ++ DEBUGOUT1("Host interface command returned 0x%08x , returning IXGBE_ERR_FW_RESP_INVALID\n", ++ token_cmd.hdr.cmd_or_resp.ret_status); ++ return IXGBE_ERR_FW_RESP_INVALID; ++ } ++ ++ DEBUGOUT("Returning IXGBE_ERR_TOKEN_RETRY\n"); ++ return IXGBE_ERR_TOKEN_RETRY; ++} ++ ++/** ++ * ixgbe_put_phy_token - Put the token for shared phy access ++ * @hw: Pointer to hardware structure ++ */ ++ ++s32 ixgbe_put_phy_token(struct ixgbe_hw *hw) ++{ ++ struct ixgbe_hic_phy_token_req token_cmd; ++ s32 status; ++ ++ token_cmd.hdr.cmd = FW_PHY_TOKEN_REQ_CMD; ++ token_cmd.hdr.buf_len = FW_PHY_TOKEN_REQ_LEN; ++ token_cmd.hdr.cmd_or_resp.cmd_resv = 0; ++ token_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; ++ token_cmd.port_number = hw->bus.lan_id; ++ token_cmd.command_type = FW_PHY_TOKEN_REL; ++ token_cmd.pad = 0; ++ status = ixgbe_host_interface_command(hw, (u32 *)&token_cmd, ++ sizeof(token_cmd), ++ IXGBE_HI_COMMAND_TIMEOUT, ++ true); ++ if (status) ++ return status; ++ if (token_cmd.hdr.cmd_or_resp.ret_status == FW_PHY_TOKEN_OK) ++ return IXGBE_SUCCESS; ++ ++ DEBUGOUT("Put PHY Token host interface command failed"); ++ return IXGBE_ERR_FW_RESP_INVALID; ++} ++ ++/** ++ * ixgbe_write_iosf_sb_reg_x550a - Writes a value to specified register ++ * of the IOSF device ++ * @hw: pointer to hardware structure ++ * @reg_addr: 32 bit PHY register to write ++ * @device_type: 3 bit device type ++ * @data: Data to write to the register ++ **/ ++s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, ++ u32 device_type, u32 data) ++{ ++ struct ixgbe_hic_internal_phy_req write_cmd; ++ s32 status; ++ UNREFERENCED_1PARAMETER(device_type); ++ ++ memset(&write_cmd, 0, sizeof(write_cmd)); ++ write_cmd.hdr.cmd = FW_INT_PHY_REQ_CMD; ++ write_cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN; ++ write_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; ++ write_cmd.port_number = hw->bus.lan_id; ++ write_cmd.command_type = FW_INT_PHY_REQ_WRITE; ++ write_cmd.address = IXGBE_CPU_TO_BE16(reg_addr); ++ write_cmd.write_data = IXGBE_CPU_TO_BE32(data); ++ ++ status = ixgbe_host_interface_command(hw, (u32 *)&write_cmd, ++ sizeof(write_cmd), ++ IXGBE_HI_COMMAND_TIMEOUT, false); ++ ++ return status; ++} ++ ++/** ++ * ixgbe_read_iosf_sb_reg_x550a - Reads specified register of the IOSF device ++ * @hw: pointer to hardware structure ++ * @reg_addr: 32 bit PHY register to write ++ * @device_type: 3 bit device type ++ * @data: Pointer to read data from the register ++ **/ ++s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, ++ u32 device_type, u32 *data) ++{ ++ union { ++ struct ixgbe_hic_internal_phy_req cmd; ++ struct ixgbe_hic_internal_phy_resp rsp; ++ } hic; ++ s32 status; ++ UNREFERENCED_1PARAMETER(device_type); ++ ++ memset(&hic, 0, sizeof(hic)); ++ hic.cmd.hdr.cmd = FW_INT_PHY_REQ_CMD; ++ hic.cmd.hdr.buf_len = FW_INT_PHY_REQ_LEN; ++ hic.cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; ++ hic.cmd.port_number = hw->bus.lan_id; ++ hic.cmd.command_type = FW_INT_PHY_REQ_READ; ++ hic.cmd.address = IXGBE_CPU_TO_BE16(reg_addr); ++ ++ status = ixgbe_host_interface_command(hw, (u32 *)&hic.cmd, ++ sizeof(hic.cmd), ++ IXGBE_HI_COMMAND_TIMEOUT, true); ++ ++ /* Extract the register value from the response. */ ++ *data = IXGBE_BE32_TO_CPU(hic.rsp.read_data); ++ ++ return status; ++} ++ ++/** ++ * ixgbe_disable_mdd_X550 ++ * @hw: pointer to hardware structure ++ * ++ * Disable malicious driver detection ++ **/ ++void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw) ++{ ++ u32 reg; ++ ++ DEBUGFUNC("ixgbe_disable_mdd_X550"); ++ ++ /* Disable MDD for TX DMA and interrupt */ ++ reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); ++ reg &= ~(IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN); ++ IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); ++ ++ /* Disable MDD for RX and interrupt */ ++ reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); ++ reg &= ~(IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN); ++ IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg); ++} ++ ++/** ++ * ixgbe_enable_mdd_X550 ++ * @hw: pointer to hardware structure ++ * ++ * Enable malicious driver detection ++ **/ ++void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw) ++{ ++ u32 reg; ++ ++ DEBUGFUNC("ixgbe_enable_mdd_X550"); ++ ++ /* Enable MDD for TX DMA and interrupt */ ++ reg = IXGBE_READ_REG(hw, IXGBE_DMATXCTL); ++ reg |= (IXGBE_DMATXCTL_MDP_EN | IXGBE_DMATXCTL_MBINTEN); ++ IXGBE_WRITE_REG(hw, IXGBE_DMATXCTL, reg); ++ ++ /* Enable MDD for RX and interrupt */ ++ reg = IXGBE_READ_REG(hw, IXGBE_RDRXCTL); ++ reg |= (IXGBE_RDRXCTL_MDP_EN | IXGBE_RDRXCTL_MBINTEN); ++ IXGBE_WRITE_REG(hw, IXGBE_RDRXCTL, reg); ++} ++ ++/** ++ * ixgbe_restore_mdd_vf_X550 ++ * @hw: pointer to hardware structure ++ * @vf: vf index ++ * ++ * Restore VF that was disabled during malicious driver detection event ++ **/ ++void ixgbe_restore_mdd_vf_X550(struct ixgbe_hw *hw, u32 vf) ++{ ++ u32 idx, reg, num_qs, start_q, bitmask; ++ ++ DEBUGFUNC("ixgbe_restore_mdd_vf_X550"); ++ ++ /* Map VF to queues */ ++ reg = IXGBE_READ_REG(hw, IXGBE_MRQC); ++ switch (reg & IXGBE_MRQC_MRQE_MASK) { ++ case IXGBE_MRQC_VMDQRT8TCEN: ++ num_qs = 8; /* 16 VFs / pools */ ++ bitmask = 0x000000FF; ++ break; ++ case IXGBE_MRQC_VMDQRSS32EN: ++ case IXGBE_MRQC_VMDQRT4TCEN: ++ num_qs = 4; /* 32 VFs / pools */ ++ bitmask = 0x0000000F; ++ break; ++ default: /* 64 VFs / pools */ ++ num_qs = 2; ++ bitmask = 0x00000003; ++ break; ++ } ++ start_q = vf * num_qs; ++ ++ /* Release vf's queues by clearing WQBR_TX and WQBR_RX (RW1C) */ ++ idx = start_q / 32; ++ reg = 0; ++ reg |= (bitmask << (start_q % 32)); ++ IXGBE_WRITE_REG(hw, IXGBE_WQBR_TX(idx), reg); ++ IXGBE_WRITE_REG(hw, IXGBE_WQBR_RX(idx), reg); ++} ++ ++/** ++ * ixgbe_mdd_event_X550 ++ * @hw: pointer to hardware structure ++ * @vf_bitmap: vf bitmap of malicious vfs ++ * ++ * Handle malicious driver detection event. ++ **/ ++void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap) ++{ ++ u32 wqbr; ++ u32 i, j, reg, q, shift, vf, idx; ++ ++ DEBUGFUNC("ixgbe_mdd_event_X550"); ++ ++ /* figure out pool size for mapping to vf's */ ++ reg = IXGBE_READ_REG(hw, IXGBE_MRQC); ++ switch (reg & IXGBE_MRQC_MRQE_MASK) { ++ case IXGBE_MRQC_VMDQRT8TCEN: ++ shift = 3; /* 16 VFs / pools */ ++ break; ++ case IXGBE_MRQC_VMDQRSS32EN: ++ case IXGBE_MRQC_VMDQRT4TCEN: ++ shift = 2; /* 32 VFs / pools */ ++ break; ++ default: ++ shift = 1; /* 64 VFs / pools */ ++ break; ++ } ++ ++ /* Read WQBR_TX and WQBR_RX and check for malicious queues */ ++ for (i = 0; i < 4; i++) { ++ wqbr = IXGBE_READ_REG(hw, IXGBE_WQBR_TX(i)); ++ wqbr |= IXGBE_READ_REG(hw, IXGBE_WQBR_RX(i)); ++ ++ if (!wqbr) ++ continue; ++ ++ /* Get malicious queue */ ++ for (j = 0; j < 32 && wqbr; j++) { ++ ++ if (!(wqbr & (1 << j))) ++ continue; ++ ++ /* Get queue from bitmask */ ++ q = j + (i * 32); ++ ++ /* Map queue to vf */ ++ vf = (q >> shift); ++ ++ /* Set vf bit in vf_bitmap */ ++ idx = vf / 32; ++ vf_bitmap[idx] |= (1 << (vf % 32)); ++ wqbr &= ~(1 << j); ++ } ++ } ++} ++ ++/** ++ * ixgbe_get_media_type_X550em - Get media type ++ * @hw: pointer to hardware structure ++ * ++ * Returns the media type (fiber, copper, backplane) ++ */ ++enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw) ++{ ++ enum ixgbe_media_type media_type; ++ ++ DEBUGFUNC("ixgbe_get_media_type_X550em"); ++ ++ /* Detect if there is a copper PHY attached. */ ++ switch (hw->device_id) { ++ case IXGBE_DEV_ID_X550EM_X_KR: ++ case IXGBE_DEV_ID_X550EM_X_KX4: ++ case IXGBE_DEV_ID_X550EM_X_XFI: ++ case IXGBE_DEV_ID_X550EM_A_KR: ++ case IXGBE_DEV_ID_X550EM_A_KR_L: ++ media_type = ixgbe_media_type_backplane; ++ break; ++ case IXGBE_DEV_ID_X550EM_X_SFP: ++ case IXGBE_DEV_ID_X550EM_A_SFP: ++ case IXGBE_DEV_ID_X550EM_A_SFP_N: ++ case IXGBE_DEV_ID_X550EM_A_QSFP: ++ case IXGBE_DEV_ID_X550EM_A_QSFP_N: ++ media_type = ixgbe_media_type_fiber; ++ break; ++ case IXGBE_DEV_ID_X550EM_X_1G_T: ++ case IXGBE_DEV_ID_X550EM_X_10G_T: ++ case IXGBE_DEV_ID_X550EM_A_10G_T: ++ media_type = ixgbe_media_type_copper; ++ break; ++ case IXGBE_DEV_ID_X550EM_A_SGMII: ++ case IXGBE_DEV_ID_X550EM_A_SGMII_L: ++ media_type = ixgbe_media_type_backplane; ++ hw->phy.type = ixgbe_phy_sgmii; ++ break; ++ case IXGBE_DEV_ID_X550EM_A_1G_T: ++ case IXGBE_DEV_ID_X550EM_A_1G_T_L: ++ media_type = ixgbe_media_type_copper; ++ break; ++ default: ++ media_type = ixgbe_media_type_unknown; ++ break; ++ } ++ return media_type; ++} ++ ++/** ++ * ixgbe_supported_sfp_modules_X550em - Check if SFP module type is supported ++ * @hw: pointer to hardware structure ++ * @linear: true if SFP module is linear ++ */ ++STATIC s32 ixgbe_supported_sfp_modules_X550em(struct ixgbe_hw *hw, bool *linear) ++{ ++ DEBUGFUNC("ixgbe_supported_sfp_modules_X550em"); ++ ++ switch (hw->phy.sfp_type) { ++ case ixgbe_sfp_type_not_present: ++ return IXGBE_ERR_SFP_NOT_PRESENT; ++ case ixgbe_sfp_type_da_cu_core0: ++ case ixgbe_sfp_type_da_cu_core1: ++ *linear = true; ++ break; ++ case ixgbe_sfp_type_srlr_core0: ++ case ixgbe_sfp_type_srlr_core1: ++ case ixgbe_sfp_type_da_act_lmt_core0: ++ case ixgbe_sfp_type_da_act_lmt_core1: ++ case ixgbe_sfp_type_1g_sx_core0: ++ case ixgbe_sfp_type_1g_sx_core1: ++ case ixgbe_sfp_type_1g_lx_core0: ++ case ixgbe_sfp_type_1g_lx_core1: ++ *linear = false; ++ break; ++ case ixgbe_sfp_type_unknown: ++ case ixgbe_sfp_type_1g_cu_core0: ++ case ixgbe_sfp_type_1g_cu_core1: ++ default: ++ return IXGBE_ERR_SFP_NOT_SUPPORTED; ++ } ++ ++ return IXGBE_SUCCESS; ++} ++ ++/** ++ * ixgbe_identify_sfp_module_X550em - Identifies SFP modules ++ * @hw: pointer to hardware structure ++ * ++ * Searches for and identifies the SFP module and assigns appropriate PHY type. ++ **/ ++s32 ixgbe_identify_sfp_module_X550em(struct ixgbe_hw *hw) ++{ ++ s32 status; ++ bool linear; ++ ++ DEBUGFUNC("ixgbe_identify_sfp_module_X550em"); ++ ++ status = ixgbe_identify_module_generic(hw); ++ ++ if (status != IXGBE_SUCCESS) ++ return status; ++ ++ /* Check if SFP module is supported */ ++ status = ixgbe_supported_sfp_modules_X550em(hw, &linear); ++ ++ return status; ++} ++ ++/** ++ * ixgbe_setup_sfp_modules_X550em - Setup MAC link ops ++ * @hw: pointer to hardware structure ++ */ ++s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw) ++{ ++ s32 status; ++ bool linear; ++ ++ DEBUGFUNC("ixgbe_setup_sfp_modules_X550em"); ++ ++ /* Check if SFP module is supported */ ++ status = ixgbe_supported_sfp_modules_X550em(hw, &linear); ++ ++ if (status != IXGBE_SUCCESS) ++ return status; ++ ++ ixgbe_init_mac_link_ops_X550em(hw); ++ hw->phy.ops.reset = NULL; ++ ++ return IXGBE_SUCCESS; ++} ++ ++/** ++* ixgbe_restart_an_internal_phy_x550em - restart autonegotiation for the ++* internal PHY ++* @hw: pointer to hardware structure ++**/ ++STATIC s32 ixgbe_restart_an_internal_phy_x550em(struct ixgbe_hw *hw) ++{ ++ s32 status; ++ u32 link_ctrl; ++ ++ /* Restart auto-negotiation. */ ++ status = hw->mac.ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, &link_ctrl); ++ ++ if (status) { ++ DEBUGOUT("Auto-negotiation did not complete\n"); ++ return status; ++ } ++ ++ link_ctrl |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_RESTART; ++ status = hw->mac.ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, link_ctrl); ++ ++ if (hw->mac.type == ixgbe_mac_X550EM_a) { ++ u32 flx_mask_st20; ++ ++ /* Indicate to FW that AN restart has been asserted */ ++ status = hw->mac.ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_mask_st20); ++ ++ if (status) { ++ DEBUGOUT("Auto-negotiation did not complete\n"); ++ return status; ++ } ++ ++ flx_mask_st20 |= IXGBE_KRM_PMD_FLX_MASK_ST20_FW_AN_RESTART; ++ status = hw->mac.ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, flx_mask_st20); ++ } ++ ++ return status; ++} ++ ++/** ++ * ixgbe_setup_sgmii - Set up link for sgmii ++ * @hw: pointer to hardware structure ++ */ ++STATIC s32 ixgbe_setup_sgmii(struct ixgbe_hw *hw, ixgbe_link_speed speed, ++ bool autoneg_wait) ++{ ++ struct ixgbe_mac_info *mac = &hw->mac; ++ u32 lval, sval, flx_val; ++ s32 rc; ++ ++ rc = mac->ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, &lval); ++ if (rc) ++ return rc; ++ ++ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; ++ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; ++ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN; ++ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN; ++ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; ++ rc = mac->ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, lval); ++ if (rc) ++ return rc; ++ ++ rc = mac->ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, &sval); ++ if (rc) ++ return rc; ++ ++ sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D; ++ sval |= IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D; ++ rc = mac->ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, sval); ++ if (rc) ++ return rc; ++ ++ rc = mac->ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val); ++ if (rc) ++ return rc; ++ ++ flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; ++ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G; ++ flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; ++ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; ++ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; ++ ++ rc = mac->ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val); ++ if (rc) ++ return rc; ++ ++ rc = ixgbe_restart_an_internal_phy_x550em(hw); ++ if (rc) ++ return rc; ++ ++ return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait); ++} ++ ++/** ++ * ixgbe_setup_sgmii_fw - Set up link for sgmii with firmware-controlled PHYs ++ * @hw: pointer to hardware structure ++ */ ++STATIC s32 ixgbe_setup_sgmii_fw(struct ixgbe_hw *hw, ixgbe_link_speed speed, ++ bool autoneg_wait) ++{ ++ struct ixgbe_mac_info *mac = &hw->mac; ++ u32 lval, sval, flx_val; ++ s32 rc; ++ ++ rc = mac->ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, &lval); ++ if (rc) ++ return rc; ++ ++ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; ++ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; ++ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_SGMII_EN; ++ lval |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CLAUSE_37_EN; ++ lval &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; ++ rc = mac->ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, lval); ++ if (rc) ++ return rc; ++ ++ rc = mac->ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, &sval); ++ if (rc) ++ return rc; ++ ++ sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_10_D; ++ sval &= ~IXGBE_KRM_SGMII_CTRL_MAC_TAR_FORCE_100_D; ++ rc = mac->ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_SGMII_CTRL(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, sval); ++ if (rc) ++ return rc; ++ ++ rc = mac->ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, lval); ++ if (rc) ++ return rc; ++ ++ rc = mac->ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, &flx_val); ++ if (rc) ++ return rc; ++ ++ flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; ++ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN; ++ flx_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; ++ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; ++ flx_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; ++ ++ rc = mac->ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, flx_val); ++ if (rc) ++ return rc; ++ ++ rc = ixgbe_restart_an_internal_phy_x550em(hw); ++ ++ return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait); ++} ++ ++/** ++ * ixgbe_init_mac_link_ops_X550em - init mac link function pointers ++ * @hw: pointer to hardware structure ++ */ ++void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw) ++{ ++ struct ixgbe_mac_info *mac = &hw->mac; ++ ++ DEBUGFUNC("ixgbe_init_mac_link_ops_X550em"); ++ ++ switch (hw->mac.ops.get_media_type(hw)) { ++ case ixgbe_media_type_fiber: ++ /* CS4227 does not support autoneg, so disable the laser control ++ * functions for SFP+ fiber ++ */ ++ mac->ops.disable_tx_laser = NULL; ++ mac->ops.enable_tx_laser = NULL; ++ mac->ops.flap_tx_laser = NULL; ++ mac->ops.setup_link = ixgbe_setup_mac_link_multispeed_fiber; ++ mac->ops.set_rate_select_speed = ++ ixgbe_set_soft_rate_select_speed; ++ ++ if ((hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) || ++ (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP)) ++ mac->ops.setup_mac_link = ++ ixgbe_setup_mac_link_sfp_x550a; ++ else ++ mac->ops.setup_mac_link = ++ ixgbe_setup_mac_link_sfp_x550em; ++ break; ++ case ixgbe_media_type_copper: ++ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_1G_T) ++ break; ++ if (hw->mac.type == ixgbe_mac_X550EM_a) { ++ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T || ++ hw->device_id == IXGBE_DEV_ID_X550EM_A_1G_T_L) { ++ mac->ops.setup_link = ixgbe_setup_sgmii_fw; ++ mac->ops.check_link = ++ ixgbe_check_mac_link_generic; ++ } else { ++ mac->ops.setup_link = ++ ixgbe_setup_mac_link_t_X550em; ++ } ++ } else { ++ mac->ops.setup_link = ixgbe_setup_mac_link_t_X550em; ++ mac->ops.check_link = ixgbe_check_link_t_X550em; ++ } ++ break; ++ case ixgbe_media_type_backplane: ++ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII || ++ hw->device_id == IXGBE_DEV_ID_X550EM_A_SGMII_L) ++ mac->ops.setup_link = ixgbe_setup_sgmii; ++ break; ++ default: ++ break; ++ } ++} ++ ++/** ++ * ixgbe_get_link_capabilities_x550em - Determines link capabilities ++ * @hw: pointer to hardware structure ++ * @speed: pointer to link speed ++ * @autoneg: true when autoneg or autotry is enabled ++ */ ++s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, ++ ixgbe_link_speed *speed, ++ bool *autoneg) ++{ ++ DEBUGFUNC("ixgbe_get_link_capabilities_X550em"); ++ ++ if (hw->phy.type == ixgbe_phy_fw) { ++ *autoneg = true; ++ *speed = hw->phy.speeds_supported; ++ return 0; ++ } ++ ++ /* SFP */ ++ if (hw->phy.media_type == ixgbe_media_type_fiber) { ++ ++ /* CS4227 SFP must not enable auto-negotiation */ ++ *autoneg = false; ++ ++ /* Check if 1G SFP module. */ ++ if (hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core0 || ++ hw->phy.sfp_type == ixgbe_sfp_type_1g_sx_core1 ++ || hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core0 || ++ hw->phy.sfp_type == ixgbe_sfp_type_1g_lx_core1) { ++ *speed = IXGBE_LINK_SPEED_1GB_FULL; ++ return IXGBE_SUCCESS; ++ } ++ ++ /* Link capabilities are based on SFP */ ++ if (hw->phy.multispeed_fiber) ++ *speed = IXGBE_LINK_SPEED_10GB_FULL | ++ IXGBE_LINK_SPEED_1GB_FULL; ++ else ++ *speed = IXGBE_LINK_SPEED_10GB_FULL; ++ } else { ++ switch (hw->phy.type) { ++ case ixgbe_phy_ext_1g_t: ++ case ixgbe_phy_sgmii: ++ *speed = IXGBE_LINK_SPEED_1GB_FULL; ++ break; ++ case ixgbe_phy_x550em_kr: ++ if (hw->mac.type == ixgbe_mac_X550EM_a) { ++ /* check different backplane modes */ ++ if (hw->phy.nw_mng_if_sel & ++ IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) { ++ *speed = IXGBE_LINK_SPEED_2_5GB_FULL; ++ break; ++ } else if (hw->device_id == ++ IXGBE_DEV_ID_X550EM_A_KR_L) { ++ *speed = IXGBE_LINK_SPEED_1GB_FULL; ++ break; ++ } ++ } ++ /* fall through */ ++ default: ++ *speed = IXGBE_LINK_SPEED_10GB_FULL | ++ IXGBE_LINK_SPEED_1GB_FULL; ++ break; ++ } ++ *autoneg = true; ++ } ++ ++ return IXGBE_SUCCESS; ++} ++ ++/** ++ * ixgbe_get_lasi_ext_t_x550em - Determime external Base T PHY interrupt cause ++ * @hw: pointer to hardware structure ++ * @lsc: pointer to boolean flag which indicates whether external Base T ++ * PHY interrupt is lsc ++ * ++ * Determime if external Base T PHY interrupt cause is high temperature ++ * failure alarm or link status change. ++ * ++ * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature ++ * failure alarm, else return PHY access status. ++ */ ++STATIC s32 ixgbe_get_lasi_ext_t_x550em(struct ixgbe_hw *hw, bool *lsc) ++{ ++ u32 status; ++ u16 reg; ++ ++ *lsc = false; ++ ++ /* Vendor alarm triggered */ ++ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG, ++ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ++ ®); ++ ++ if (status != IXGBE_SUCCESS || ++ !(reg & IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN)) ++ return status; ++ ++ /* Vendor Auto-Neg alarm triggered or Global alarm 1 triggered */ ++ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_FLAG, ++ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ++ ®); ++ ++ if (status != IXGBE_SUCCESS || ++ !(reg & (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN | ++ IXGBE_MDIO_GLOBAL_ALARM_1_INT))) ++ return status; ++ ++ /* Global alarm triggered */ ++ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_ALARM_1, ++ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ++ ®); ++ ++ if (status != IXGBE_SUCCESS) ++ return status; ++ ++ /* If high temperature failure, then return over temp error and exit */ ++ if (reg & IXGBE_MDIO_GLOBAL_ALM_1_HI_TMP_FAIL) { ++ /* power down the PHY in case the PHY FW didn't already */ ++ ixgbe_set_copper_phy_power(hw, false); ++ return IXGBE_ERR_OVERTEMP; ++ } else if (reg & IXGBE_MDIO_GLOBAL_ALM_1_DEV_FAULT) { ++ /* device fault alarm triggered */ ++ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_FAULT_MSG, ++ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ++ ®); ++ ++ if (status != IXGBE_SUCCESS) ++ return status; ++ ++ /* if device fault was due to high temp alarm handle and exit */ ++ if (reg == IXGBE_MDIO_GLOBAL_FAULT_MSG_HI_TMP) { ++ /* power down the PHY in case the PHY FW didn't */ ++ ixgbe_set_copper_phy_power(hw, false); ++ return IXGBE_ERR_OVERTEMP; ++ } ++ } ++ ++ /* Vendor alarm 2 triggered */ ++ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_CHIP_STD_INT_FLAG, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®); ++ ++ if (status != IXGBE_SUCCESS || ++ !(reg & IXGBE_MDIO_GLOBAL_STD_ALM2_INT)) ++ return status; ++ ++ /* link connect/disconnect event occurred */ ++ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM2, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®); ++ ++ if (status != IXGBE_SUCCESS) ++ return status; ++ ++ /* Indicate LSC */ ++ if (reg & IXGBE_MDIO_AUTO_NEG_VEN_LSC) ++ *lsc = true; ++ ++ return IXGBE_SUCCESS; ++} ++ ++/** ++ * ixgbe_enable_lasi_ext_t_x550em - Enable external Base T PHY interrupts ++ * @hw: pointer to hardware structure ++ * ++ * Enable link status change and temperature failure alarm for the external ++ * Base T PHY ++ * ++ * Returns PHY access status ++ */ ++STATIC s32 ixgbe_enable_lasi_ext_t_x550em(struct ixgbe_hw *hw) ++{ ++ u32 status; ++ u16 reg; ++ bool lsc; ++ ++ /* Clear interrupt flags */ ++ status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc); ++ ++ /* Enable link status change alarm */ ++ ++ /* Enable the LASI interrupts on X552 devices to receive notifications ++ * of the link configurations of the external PHY and correspondingly ++ * support the configuration of the internal iXFI link, since iXFI does ++ * not support auto-negotiation. This is not required for X553 devices ++ * having KR support, which performs auto-negotiations and which is used ++ * as the internal link to the external PHY. Hence adding a check here ++ * to avoid enabling LASI interrupts for X553 devices. ++ */ ++ if (hw->mac.type != ixgbe_mac_X550EM_a) { ++ status = hw->phy.ops.read_reg(hw, ++ IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ®); ++ ++ if (status != IXGBE_SUCCESS) ++ return status; ++ ++ reg |= IXGBE_MDIO_PMA_TX_VEN_LASI_INT_EN; ++ ++ status = hw->phy.ops.write_reg(hw, ++ IXGBE_MDIO_PMA_TX_VEN_LASI_INT_MASK, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, reg); ++ ++ if (status != IXGBE_SUCCESS) ++ return status; ++ } ++ ++ /* Enable high temperature failure and global fault alarms */ ++ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK, ++ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ++ ®); ++ ++ if (status != IXGBE_SUCCESS) ++ return status; ++ ++ reg |= (IXGBE_MDIO_GLOBAL_INT_HI_TEMP_EN | ++ IXGBE_MDIO_GLOBAL_INT_DEV_FAULT_EN); ++ ++ status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_MASK, ++ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ++ reg); ++ ++ if (status != IXGBE_SUCCESS) ++ return status; ++ ++ /* Enable vendor Auto-Neg alarm and Global Interrupt Mask 1 alarm */ ++ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK, ++ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ++ ®); ++ ++ if (status != IXGBE_SUCCESS) ++ return status; ++ ++ reg |= (IXGBE_MDIO_GLOBAL_AN_VEN_ALM_INT_EN | ++ IXGBE_MDIO_GLOBAL_ALARM_1_INT); ++ ++ status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_VEN_MASK, ++ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ++ reg); ++ ++ if (status != IXGBE_SUCCESS) ++ return status; ++ ++ /* Enable chip-wide vendor alarm */ ++ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK, ++ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ++ ®); ++ ++ if (status != IXGBE_SUCCESS) ++ return status; ++ ++ reg |= IXGBE_MDIO_GLOBAL_VEN_ALM_INT_EN; ++ ++ status = hw->phy.ops.write_reg(hw, IXGBE_MDIO_GLOBAL_INT_CHIP_STD_MASK, ++ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ++ reg); ++ ++ return status; ++} ++ ++/** ++ * ixgbe_setup_kr_speed_x550em - Configure the KR PHY for link speed. ++ * @hw: pointer to hardware structure ++ * @speed: link speed ++ * ++ * Configures the integrated KR PHY. ++ **/ ++STATIC s32 ixgbe_setup_kr_speed_x550em(struct ixgbe_hw *hw, ++ ixgbe_link_speed speed) ++{ ++ s32 status; ++ u32 reg_val; ++ ++ status = hw->mac.ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); ++ if (status) ++ return status; ++ ++ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; ++ reg_val &= ~(IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR | ++ IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX); ++ ++ /* Advertise 10G support. */ ++ if (speed & IXGBE_LINK_SPEED_10GB_FULL) ++ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KR; ++ ++ /* Advertise 1G support. */ ++ if (speed & IXGBE_LINK_SPEED_1GB_FULL) ++ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_AN_CAP_KX; ++ ++ status = hw->mac.ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); ++ ++ if (hw->mac.type == ixgbe_mac_X550EM_a) { ++ /* Set lane mode to KR auto negotiation */ ++ status = hw->mac.ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); ++ ++ if (status) ++ return status; ++ ++ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; ++ reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_AN; ++ reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; ++ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; ++ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; ++ ++ status = hw->mac.ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); ++ } ++ ++ return ixgbe_restart_an_internal_phy_x550em(hw); ++} ++ ++/** ++ * ixgbe_reset_phy_fw - Reset firmware-controlled PHYs ++ * @hw: pointer to hardware structure ++ */ ++static s32 ixgbe_reset_phy_fw(struct ixgbe_hw *hw) ++{ ++ u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 }; ++ s32 rc; ++ ++ if (hw->phy.reset_disable || ixgbe_check_reset_blocked(hw)) ++ return IXGBE_SUCCESS; ++ ++ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_PHY_SW_RESET, &store); ++ if (rc) ++ return rc; ++ memset(store, 0, sizeof(store)); ++ ++ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_INIT_PHY, &store); ++ if (rc) ++ return rc; ++ ++ return ixgbe_setup_fw_link(hw); ++} ++ ++/** ++ * ixgbe_check_overtemp_fw - Check firmware-controlled PHYs for overtemp ++ * @hw: pointer to hardware structure ++ */ ++static s32 ixgbe_check_overtemp_fw(struct ixgbe_hw *hw) ++{ ++ u32 store[FW_PHY_ACT_DATA_COUNT] = { 0 }; ++ s32 rc; ++ ++ rc = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &store); ++ if (rc) ++ return rc; ++ ++ if (store[0] & FW_PHY_ACT_GET_LINK_INFO_TEMP) { ++ ixgbe_shutdown_fw_phy(hw); ++ return IXGBE_ERR_OVERTEMP; ++ } ++ return IXGBE_SUCCESS; ++} ++ ++/** ++ * ixgbe_read_mng_if_sel_x550em - Read NW_MNG_IF_SEL register ++ * @hw: pointer to hardware structure ++ * ++ * Read NW_MNG_IF_SEL register and save field values, and check for valid field ++ * values. ++ **/ ++STATIC s32 ixgbe_read_mng_if_sel_x550em(struct ixgbe_hw *hw) ++{ ++ /* Save NW management interface connected on board. This is used ++ * to determine internal PHY mode. ++ */ ++ hw->phy.nw_mng_if_sel = IXGBE_READ_REG(hw, IXGBE_NW_MNG_IF_SEL); ++ ++ /* If X552 (X550EM_a) and MDIO is connected to external PHY, then set ++ * PHY address. This register field was has only been used for X552. ++ */ ++ if (hw->mac.type == ixgbe_mac_X550EM_a && ++ hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_MDIO_ACT) { ++ hw->phy.addr = (hw->phy.nw_mng_if_sel & ++ IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD) >> ++ IXGBE_NW_MNG_IF_SEL_MDIO_PHY_ADD_SHIFT; ++ } ++ ++ return IXGBE_SUCCESS; ++} ++ ++/** ++ * ixgbe_init_phy_ops_X550em - PHY/SFP specific init ++ * @hw: pointer to hardware structure ++ * ++ * Initialize any function pointers that were not able to be ++ * set during init_shared_code because the PHY/SFP type was ++ * not known. Perform the SFP init if necessary. ++ */ ++s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw) ++{ ++ struct ixgbe_phy_info *phy = &hw->phy; ++ s32 ret_val; ++ ++ DEBUGFUNC("ixgbe_init_phy_ops_X550em"); ++ ++ hw->mac.ops.set_lan_id(hw); ++ ixgbe_read_mng_if_sel_x550em(hw); ++ ++ if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) { ++ phy->phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; ++ ixgbe_setup_mux_ctl(hw); ++ phy->ops.identify_sfp = ixgbe_identify_sfp_module_X550em; ++ } ++ ++ switch (hw->device_id) { ++ case IXGBE_DEV_ID_X550EM_A_1G_T: ++ case IXGBE_DEV_ID_X550EM_A_1G_T_L: ++ phy->ops.read_reg_mdi = ixgbe_read_phy_reg_mdi_22; ++ phy->ops.write_reg_mdi = ixgbe_write_phy_reg_mdi_22; ++ hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a; ++ hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a; ++ phy->ops.check_overtemp = ixgbe_check_overtemp_fw; ++ if (hw->bus.lan_id) ++ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM; ++ else ++ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM; ++ ++ break; ++ case IXGBE_DEV_ID_X550EM_A_10G_T: ++ case IXGBE_DEV_ID_X550EM_A_SFP: ++ hw->phy.ops.read_reg = ixgbe_read_phy_reg_x550a; ++ hw->phy.ops.write_reg = ixgbe_write_phy_reg_x550a; ++ if (hw->bus.lan_id) ++ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY1_SM; ++ else ++ hw->phy.phy_semaphore_mask |= IXGBE_GSSR_PHY0_SM; ++ break; ++ case IXGBE_DEV_ID_X550EM_X_SFP: ++ /* set up for CS4227 usage */ ++ hw->phy.phy_semaphore_mask = IXGBE_GSSR_SHARED_I2C_SM; ++ break; ++ default: ++ break; ++ } ++ ++ /* Identify the PHY or SFP module */ ++ ret_val = phy->ops.identify(hw); ++ if (ret_val == IXGBE_ERR_SFP_NOT_SUPPORTED || ++ ret_val == IXGBE_ERR_PHY_ADDR_INVALID) ++ return ret_val; ++ ++ /* Setup function pointers based on detected hardware */ ++ ixgbe_init_mac_link_ops_X550em(hw); ++ if (phy->sfp_type != ixgbe_sfp_type_unknown) ++ phy->ops.reset = NULL; ++ ++ /* Set functions pointers based on phy type */ ++ switch (hw->phy.type) { ++ case ixgbe_phy_x550em_kx4: ++ phy->ops.setup_link = NULL; ++ phy->ops.read_reg = ixgbe_read_phy_reg_x550em; ++ phy->ops.write_reg = ixgbe_write_phy_reg_x550em; ++ break; ++ case ixgbe_phy_x550em_kr: ++ phy->ops.setup_link = ixgbe_setup_kr_x550em; ++ phy->ops.read_reg = ixgbe_read_phy_reg_x550em; ++ phy->ops.write_reg = ixgbe_write_phy_reg_x550em; ++ break; ++ case ixgbe_phy_ext_1g_t: ++ /* link is managed by FW */ ++ phy->ops.setup_link = NULL; ++ phy->ops.reset = NULL; ++ break; ++ case ixgbe_phy_x550em_xfi: ++ /* link is managed by HW */ ++ phy->ops.setup_link = NULL; ++ phy->ops.read_reg = ixgbe_read_phy_reg_x550em; ++ phy->ops.write_reg = ixgbe_write_phy_reg_x550em; ++ break; ++ case ixgbe_phy_x550em_ext_t: ++ /* If internal link mode is XFI, then setup iXFI internal link, ++ * else setup KR now. ++ */ ++ phy->ops.setup_internal_link = ++ ixgbe_setup_internal_phy_t_x550em; ++ ++ /* setup SW LPLU only for first revision of X550EM_x */ ++ if ((hw->mac.type == ixgbe_mac_X550EM_x) && ++ !(IXGBE_FUSES0_REV_MASK & ++ IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)))) ++ phy->ops.enter_lplu = ixgbe_enter_lplu_t_x550em; ++ ++ phy->ops.handle_lasi = ixgbe_handle_lasi_ext_t_x550em; ++ phy->ops.reset = ixgbe_reset_phy_t_X550em; ++ break; ++ case ixgbe_phy_sgmii: ++ phy->ops.setup_link = NULL; ++ break; ++ case ixgbe_phy_fw: ++ phy->ops.setup_link = ixgbe_setup_fw_link; ++ phy->ops.reset = ixgbe_reset_phy_fw; ++ break; ++ default: ++ break; ++ } ++ return ret_val; ++} ++ ++/** ++ * ixgbe_set_mdio_speed - Set MDIO clock speed ++ * @hw: pointer to hardware structure ++ */ ++STATIC void ixgbe_set_mdio_speed(struct ixgbe_hw *hw) ++{ ++ u32 hlreg0; ++ ++ switch (hw->device_id) { ++ case IXGBE_DEV_ID_X550EM_X_10G_T: ++ case IXGBE_DEV_ID_X550EM_A_SGMII: ++ case IXGBE_DEV_ID_X550EM_A_SGMII_L: ++ case IXGBE_DEV_ID_X550EM_A_10G_T: ++ case IXGBE_DEV_ID_X550EM_A_SFP: ++ case IXGBE_DEV_ID_X550EM_A_QSFP: ++ /* Config MDIO clock speed before the first MDIO PHY access */ ++ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); ++ hlreg0 &= ~IXGBE_HLREG0_MDCSPD; ++ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); ++ break; ++ case IXGBE_DEV_ID_X550EM_A_1G_T: ++ case IXGBE_DEV_ID_X550EM_A_1G_T_L: ++ /* Select fast MDIO clock speed for these devices */ ++ hlreg0 = IXGBE_READ_REG(hw, IXGBE_HLREG0); ++ hlreg0 |= IXGBE_HLREG0_MDCSPD; ++ IXGBE_WRITE_REG(hw, IXGBE_HLREG0, hlreg0); ++ break; ++ default: ++ break; ++ } ++} ++ ++/** ++ * ixgbe_reset_hw_X550em - Perform hardware reset ++ * @hw: pointer to hardware structure ++ * ++ * Resets the hardware by resetting the transmit and receive units, masks ++ * and clears all interrupts, perform a PHY reset, and perform a link (MAC) ++ * reset. ++ */ ++s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw) ++{ ++ ixgbe_link_speed link_speed; ++ s32 status; ++ u32 ctrl = 0; ++ u32 i; ++ bool link_up = false; ++ u32 swfw_mask = hw->phy.phy_semaphore_mask; ++ ++ DEBUGFUNC("ixgbe_reset_hw_X550em"); ++ ++ /* Call adapter stop to disable Tx/Rx and clear interrupts */ ++ status = hw->mac.ops.stop_adapter(hw); ++ if (status != IXGBE_SUCCESS) { ++ DEBUGOUT1("Failed to stop adapter, STATUS = %d\n", status); ++ return status; ++ } ++ /* flush pending Tx transactions */ ++ ixgbe_clear_tx_pending(hw); ++ ++ ixgbe_set_mdio_speed(hw); ++ ++ /* PHY ops must be identified and initialized prior to reset */ ++ status = hw->phy.ops.init(hw); ++ ++ if (status) ++ DEBUGOUT1("Failed to initialize PHY ops, STATUS = %d\n", ++ status); ++ ++ if (status == IXGBE_ERR_SFP_NOT_SUPPORTED || ++ status == IXGBE_ERR_PHY_ADDR_INVALID) { ++ DEBUGOUT("Returning from reset HW due to PHY init failure\n"); ++ return status; ++ } ++ ++ /* start the external PHY */ ++ if (hw->phy.type == ixgbe_phy_x550em_ext_t) { ++ status = ixgbe_init_ext_t_x550em(hw); ++ if (status) { ++ DEBUGOUT1("Failed to start the external PHY, STATUS = %d\n", ++ status); ++ return status; ++ } ++ } ++ ++ /* Setup SFP module if there is one present. */ ++ if (hw->phy.sfp_setup_needed) { ++ status = hw->mac.ops.setup_sfp(hw); ++ hw->phy.sfp_setup_needed = false; ++ } ++ ++ if (status == IXGBE_ERR_SFP_NOT_SUPPORTED) ++ return status; ++ ++ /* Reset PHY */ ++ if (!hw->phy.reset_disable && hw->phy.ops.reset) { ++ if (hw->phy.ops.reset(hw) == IXGBE_ERR_OVERTEMP) ++ return IXGBE_ERR_OVERTEMP; ++ } ++ ++mac_reset_top: ++ /* Issue global reset to the MAC. Needs to be SW reset if link is up. ++ * If link reset is used when link is up, it might reset the PHY when ++ * mng is using it. If link is down or the flag to force full link ++ * reset is set, then perform link reset. ++ */ ++ ctrl = IXGBE_CTRL_LNK_RST; ++ if (!hw->force_full_reset) { ++ hw->mac.ops.check_link(hw, &link_speed, &link_up, false); ++ if (link_up) ++ ctrl = IXGBE_CTRL_RST; ++ } ++ ++ status = hw->mac.ops.acquire_swfw_sync(hw, swfw_mask); ++ if (status != IXGBE_SUCCESS) { ++ ERROR_REPORT2(IXGBE_ERROR_CAUTION, ++ "semaphore failed with %d", status); ++ return IXGBE_ERR_SWFW_SYNC; ++ } ++ ctrl |= IXGBE_READ_REG(hw, IXGBE_CTRL); ++ IXGBE_WRITE_REG(hw, IXGBE_CTRL, ctrl); ++ IXGBE_WRITE_FLUSH(hw); ++ hw->mac.ops.release_swfw_sync(hw, swfw_mask); ++ ++ /* Poll for reset bit to self-clear meaning reset is complete */ ++ for (i = 0; i < 10; i++) { ++ usec_delay(1); ++ ctrl = IXGBE_READ_REG(hw, IXGBE_CTRL); ++ if (!(ctrl & IXGBE_CTRL_RST_MASK)) ++ break; ++ } ++ ++ if (ctrl & IXGBE_CTRL_RST_MASK) { ++ status = IXGBE_ERR_RESET_FAILED; ++ DEBUGOUT("Reset polling failed to complete.\n"); ++ } ++ ++ msec_delay(50); ++ ++ /* Double resets are required for recovery from certain error ++ * conditions. Between resets, it is necessary to stall to ++ * allow time for any pending HW events to complete. ++ */ ++ if (hw->mac.flags & IXGBE_FLAGS_DOUBLE_RESET_REQUIRED) { ++ hw->mac.flags &= ~IXGBE_FLAGS_DOUBLE_RESET_REQUIRED; ++ goto mac_reset_top; ++ } ++ ++ /* Store the permanent mac address */ ++ hw->mac.ops.get_mac_addr(hw, hw->mac.perm_addr); ++ ++ /* Store MAC address from RAR0, clear receive address registers, and ++ * clear the multicast table. Also reset num_rar_entries to 128, ++ * since we modify this value when programming the SAN MAC address. ++ */ ++ hw->mac.num_rar_entries = 128; ++ hw->mac.ops.init_rx_addrs(hw); ++ ++ ixgbe_set_mdio_speed(hw); ++ ++ if (hw->device_id == IXGBE_DEV_ID_X550EM_X_SFP) ++ ixgbe_setup_mux_ctl(hw); ++ ++ if (status != IXGBE_SUCCESS) ++ DEBUGOUT1("Reset HW failed, STATUS = %d\n", status); ++ ++ return status; ++} ++ ++/** ++ * ixgbe_init_ext_t_x550em - Start (unstall) the external Base T PHY. ++ * @hw: pointer to hardware structure ++ */ ++s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw) ++{ ++ u32 status; ++ u16 reg; ++ ++ status = hw->phy.ops.read_reg(hw, ++ IXGBE_MDIO_TX_VENDOR_ALARMS_3, ++ IXGBE_MDIO_PMA_PMD_DEV_TYPE, ++ ®); ++ ++ if (status != IXGBE_SUCCESS) ++ return status; ++ ++ /* If PHY FW reset completed bit is set then this is the first ++ * SW instance after a power on so the PHY FW must be un-stalled. ++ */ ++ if (reg & IXGBE_MDIO_TX_VENDOR_ALARMS_3_RST_MASK) { ++ status = hw->phy.ops.read_reg(hw, ++ IXGBE_MDIO_GLOBAL_RES_PR_10, ++ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ++ ®); ++ ++ if (status != IXGBE_SUCCESS) ++ return status; ++ ++ reg &= ~IXGBE_MDIO_POWER_UP_STALL; ++ ++ status = hw->phy.ops.write_reg(hw, ++ IXGBE_MDIO_GLOBAL_RES_PR_10, ++ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, ++ reg); ++ ++ if (status != IXGBE_SUCCESS) ++ return status; ++ } ++ ++ return status; ++} ++ ++/** ++ * ixgbe_setup_kr_x550em - Configure the KR PHY. ++ * @hw: pointer to hardware structure ++ **/ ++s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw) ++{ ++ /* leave link alone for 2.5G */ ++ if (hw->phy.autoneg_advertised & IXGBE_LINK_SPEED_2_5GB_FULL) ++ return IXGBE_SUCCESS; ++ ++ if (ixgbe_check_reset_blocked(hw)) ++ return 0; ++ ++ return ixgbe_setup_kr_speed_x550em(hw, hw->phy.autoneg_advertised); ++} ++ ++/** ++ * ixgbe_setup_mac_link_sfp_x550em - Setup internal/external the PHY for SFP ++ * @hw: pointer to hardware structure ++ * ++ * Configure the external PHY and the integrated KR PHY for SFP support. ++ **/ ++s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw, ++ ixgbe_link_speed speed, ++ bool autoneg_wait_to_complete) ++{ ++ s32 ret_val; ++ u16 reg_slice, reg_val; ++ bool setup_linear = false; ++ UNREFERENCED_1PARAMETER(autoneg_wait_to_complete); ++ ++ /* Check if SFP module is supported and linear */ ++ ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); ++ ++ /* If no SFP module present, then return success. Return success since ++ * there is no reason to configure CS4227 and SFP not present error is ++ * not excepted in the setup MAC link flow. ++ */ ++ if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT) ++ return IXGBE_SUCCESS; ++ ++ if (ret_val != IXGBE_SUCCESS) ++ return ret_val; ++ ++ /* Configure internal PHY for KR/KX. */ ++ ixgbe_setup_kr_speed_x550em(hw, speed); ++ ++ /* Configure CS4227 LINE side to proper mode. */ ++ reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + ++ (hw->bus.lan_id << 12); ++ if (setup_linear) ++ reg_val = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1; ++ else ++ reg_val = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1; ++ ret_val = hw->link.ops.write_link(hw, hw->link.addr, reg_slice, ++ reg_val); ++ return ret_val; ++} ++ ++/** ++ * ixgbe_setup_sfi_x550a - Configure the internal PHY for native SFI mode ++ * @hw: pointer to hardware structure ++ * @speed: the link speed to force ++ * ++ * Configures the integrated PHY for native SFI mode. Used to connect the ++ * internal PHY directly to an SFP cage, without autonegotiation. ++ **/ ++STATIC s32 ixgbe_setup_sfi_x550a(struct ixgbe_hw *hw, ixgbe_link_speed *speed) ++{ ++ struct ixgbe_mac_info *mac = &hw->mac; ++ s32 status; ++ u32 reg_val; ++ ++ /* Disable all AN and force speed to 10G Serial. */ ++ status = mac->ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); ++ if (status != IXGBE_SUCCESS) ++ return status; ++ ++ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN_EN; ++ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_AN37_EN; ++ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SGMII_EN; ++ reg_val &= ~IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_MASK; ++ ++ /* Select forced link speed for internal PHY. */ ++ switch (*speed) { ++ case IXGBE_LINK_SPEED_10GB_FULL: ++ reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_10G; ++ break; ++ case IXGBE_LINK_SPEED_1GB_FULL: ++ reg_val |= IXGBE_KRM_PMD_FLX_MASK_ST20_SPEED_1G; ++ break; ++ default: ++ /* Other link speeds are not supported by internal PHY. */ ++ return IXGBE_ERR_LINK_SETUP; ++ } ++ ++ status = mac->ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); ++ ++ /* Toggle port SW reset by AN reset. */ ++ status = ixgbe_restart_an_internal_phy_x550em(hw); ++ ++ return status; ++} ++ ++/** ++ * ixgbe_setup_mac_link_sfp_x550a - Setup internal PHY for SFP ++ * @hw: pointer to hardware structure ++ * ++ * Configure the the integrated PHY for SFP support. ++ **/ ++s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ++ ixgbe_link_speed speed, ++ bool autoneg_wait_to_complete) ++{ ++ s32 ret_val; ++ u16 reg_phy_ext; ++ bool setup_linear = false; ++ u32 reg_slice, reg_phy_int, slice_offset; ++ ++ UNREFERENCED_1PARAMETER(autoneg_wait_to_complete); ++ ++ /* Check if SFP module is supported and linear */ ++ ret_val = ixgbe_supported_sfp_modules_X550em(hw, &setup_linear); ++ ++ /* If no SFP module present, then return success. Return success since ++ * SFP not present error is not excepted in the setup MAC link flow. ++ */ ++ if (ret_val == IXGBE_ERR_SFP_NOT_PRESENT) ++ return IXGBE_SUCCESS; ++ ++ if (ret_val != IXGBE_SUCCESS) ++ return ret_val; ++ ++ if (hw->device_id == IXGBE_DEV_ID_X550EM_A_SFP_N) { ++ /* Configure internal PHY for native SFI based on module type */ ++ ret_val = hw->mac.ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, ®_phy_int); ++ ++ if (ret_val != IXGBE_SUCCESS) ++ return ret_val; ++ ++ reg_phy_int &= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_DA; ++ if (!setup_linear) ++ reg_phy_int |= IXGBE_KRM_PMD_FLX_MASK_ST20_SFI_10G_SR; ++ ++ ret_val = hw->mac.ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_PMD_FLX_MASK_ST20(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_phy_int); ++ ++ if (ret_val != IXGBE_SUCCESS) ++ return ret_val; ++ ++ /* Setup SFI internal link. */ ++ ret_val = ixgbe_setup_sfi_x550a(hw, &speed); ++ } else { ++ /* Configure internal PHY for KR/KX. */ ++ ixgbe_setup_kr_speed_x550em(hw, speed); ++ ++ if (hw->phy.addr == 0x0 || hw->phy.addr == 0xFFFF) { ++ /* Find Address */ ++ DEBUGOUT("Invalid NW_MNG_IF_SEL.MDIO_PHY_ADD value\n"); ++ return IXGBE_ERR_PHY_ADDR_INVALID; ++ } ++ ++ /* Get external PHY SKU id */ ++ ret_val = hw->phy.ops.read_reg(hw, IXGBE_CS4227_EFUSE_PDF_SKU, ++ IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); ++ ++ if (ret_val != IXGBE_SUCCESS) ++ return ret_val; ++ ++ /* When configuring quad port CS4223, the MAC instance is part ++ * of the slice offset. ++ */ ++ if (reg_phy_ext == IXGBE_CS4223_SKU_ID) ++ slice_offset = (hw->bus.lan_id + ++ (hw->bus.instance_id << 1)) << 12; ++ else ++ slice_offset = hw->bus.lan_id << 12; ++ ++ /* Configure CS4227/CS4223 LINE side to proper mode. */ ++ reg_slice = IXGBE_CS4227_LINE_SPARE24_LSB + slice_offset; ++ ++ ret_val = hw->phy.ops.read_reg(hw, reg_slice, ++ IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); ++ ++ if (ret_val != IXGBE_SUCCESS) ++ return ret_val; ++ ++ reg_phy_ext &= ~((IXGBE_CS4227_EDC_MODE_CX1 << 1) | ++ (IXGBE_CS4227_EDC_MODE_SR << 1)); ++ ++ if (setup_linear) ++ reg_phy_ext = (IXGBE_CS4227_EDC_MODE_CX1 << 1) | 0x1; ++ else ++ reg_phy_ext = (IXGBE_CS4227_EDC_MODE_SR << 1) | 0x1; ++ ret_val = hw->phy.ops.write_reg(hw, reg_slice, ++ IXGBE_MDIO_ZERO_DEV_TYPE, reg_phy_ext); ++ ++ /* Flush previous write with a read */ ++ ret_val = hw->phy.ops.read_reg(hw, reg_slice, ++ IXGBE_MDIO_ZERO_DEV_TYPE, ®_phy_ext); ++ } ++ return ret_val; ++} ++ ++/** ++ * ixgbe_setup_ixfi_x550em_x - MAC specific iXFI configuration ++ * @hw: pointer to hardware structure ++ * ++ * iXfI configuration needed for ixgbe_mac_X550EM_x devices. ++ **/ ++STATIC s32 ixgbe_setup_ixfi_x550em_x(struct ixgbe_hw *hw) ++{ ++ struct ixgbe_mac_info *mac = &hw->mac; ++ s32 status; ++ u32 reg_val; ++ ++ /* Disable training protocol FSM. */ ++ status = mac->ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); ++ if (status != IXGBE_SUCCESS) ++ return status; ++ reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_CONV_WO_PROTOCOL; ++ status = mac->ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); ++ if (status != IXGBE_SUCCESS) ++ return status; ++ ++ /* Disable Flex from training TXFFE. */ ++ status = mac->ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); ++ if (status != IXGBE_SUCCESS) ++ return status; ++ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN; ++ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN; ++ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN; ++ status = mac->ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_DSP_TXFFE_STATE_4(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); ++ if (status != IXGBE_SUCCESS) ++ return status; ++ status = mac->ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); ++ if (status != IXGBE_SUCCESS) ++ return status; ++ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_C0_EN; ++ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CP1_CN1_EN; ++ reg_val &= ~IXGBE_KRM_DSP_TXFFE_STATE_CO_ADAPT_EN; ++ status = mac->ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_DSP_TXFFE_STATE_5(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); ++ if (status != IXGBE_SUCCESS) ++ return status; ++ ++ /* Enable override for coefficients. */ ++ status = mac->ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); ++ if (status != IXGBE_SUCCESS) ++ return status; ++ reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_OVRRD_EN; ++ reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CZERO_EN; ++ reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CPLUS1_OVRRD_EN; ++ reg_val |= IXGBE_KRM_TX_COEFF_CTRL_1_CMINUS1_OVRRD_EN; ++ status = mac->ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_TX_COEFF_CTRL_1(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); ++ return status; ++} ++ ++/** ++ * ixgbe_setup_ixfi_x550em - Configure the KR PHY for iXFI mode. ++ * @hw: pointer to hardware structure ++ * @speed: the link speed to force ++ * ++ * Configures the integrated KR PHY to use iXFI mode. Used to connect an ++ * internal and external PHY at a specific speed, without autonegotiation. ++ **/ ++STATIC s32 ixgbe_setup_ixfi_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed) ++{ ++ struct ixgbe_mac_info *mac = &hw->mac; ++ s32 status; ++ u32 reg_val; ++ ++ /* iXFI is only supported with X552 */ ++ if (mac->type != ixgbe_mac_X550EM_x) ++ return IXGBE_ERR_LINK_SETUP; ++ ++ /* Disable AN and force speed to 10G Serial. */ ++ status = mac->ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); ++ if (status != IXGBE_SUCCESS) ++ return status; ++ ++ reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; ++ reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; ++ ++ /* Select forced link speed for internal PHY. */ ++ switch (*speed) { ++ case IXGBE_LINK_SPEED_10GB_FULL: ++ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G; ++ break; ++ case IXGBE_LINK_SPEED_1GB_FULL: ++ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_1G; ++ break; ++ default: ++ /* Other link speeds are not supported by internal KR PHY. */ ++ return IXGBE_ERR_LINK_SETUP; ++ } ++ ++ status = mac->ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); ++ if (status != IXGBE_SUCCESS) ++ return status; ++ ++ /* Additional configuration needed for x550em_x */ ++ if (hw->mac.type == ixgbe_mac_X550EM_x) { ++ status = ixgbe_setup_ixfi_x550em_x(hw); ++ if (status != IXGBE_SUCCESS) ++ return status; ++ } ++ ++ /* Toggle port SW reset by AN reset. */ ++ status = ixgbe_restart_an_internal_phy_x550em(hw); ++ ++ return status; ++} ++ ++/** ++ * ixgbe_ext_phy_t_x550em_get_link - Get ext phy link status ++ * @hw: address of hardware structure ++ * @link_up: address of boolean to indicate link status ++ * ++ * Returns error code if unable to get link status. ++ */ ++STATIC s32 ixgbe_ext_phy_t_x550em_get_link(struct ixgbe_hw *hw, bool *link_up) ++{ ++ u32 ret; ++ u16 autoneg_status; ++ ++ *link_up = false; ++ ++ /* read this twice back to back to indicate current status */ ++ ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ &autoneg_status); ++ if (ret != IXGBE_SUCCESS) ++ return ret; ++ ++ ret = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ &autoneg_status); ++ if (ret != IXGBE_SUCCESS) ++ return ret; ++ ++ *link_up = !!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS); ++ ++ return IXGBE_SUCCESS; ++} ++ ++/** ++ * ixgbe_setup_internal_phy_t_x550em - Configure KR PHY to X557 link ++ * @hw: point to hardware structure ++ * ++ * Configures the link between the integrated KR PHY and the external X557 PHY ++ * The driver will call this function when it gets a link status change ++ * interrupt from the X557 PHY. This function configures the link speed ++ * between the PHYs to match the link speed of the BASE-T link. ++ * ++ * A return of a non-zero value indicates an error, and the base driver should ++ * not report link up. ++ */ ++s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw) ++{ ++ ixgbe_link_speed force_speed; ++ bool link_up; ++ u32 status; ++ u16 speed; ++ ++ if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) ++ return IXGBE_ERR_CONFIG; ++ ++ if (hw->mac.type == ixgbe_mac_X550EM_x && ++ !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { ++ /* If link is down, there is no setup necessary so return */ ++ status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); ++ if (status != IXGBE_SUCCESS) ++ return status; ++ ++ if (!link_up) ++ return IXGBE_SUCCESS; ++ ++ status = hw->phy.ops.read_reg(hw, ++ IXGBE_MDIO_AUTO_NEG_VENDOR_STAT, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ &speed); ++ if (status != IXGBE_SUCCESS) ++ return status; ++ ++ /* If link is still down - no setup is required so return */ ++ status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); ++ if (status != IXGBE_SUCCESS) ++ return status; ++ if (!link_up) ++ return IXGBE_SUCCESS; ++ ++ /* clear everything but the speed and duplex bits */ ++ speed &= IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_MASK; ++ ++ switch (speed) { ++ case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB_FULL: ++ force_speed = IXGBE_LINK_SPEED_10GB_FULL; ++ break; ++ case IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB_FULL: ++ force_speed = IXGBE_LINK_SPEED_1GB_FULL; ++ break; ++ default: ++ /* Internal PHY does not support anything else */ ++ return IXGBE_ERR_INVALID_LINK_SETTINGS; ++ } ++ ++ return ixgbe_setup_ixfi_x550em(hw, &force_speed); ++ } else { ++ speed = IXGBE_LINK_SPEED_10GB_FULL | ++ IXGBE_LINK_SPEED_1GB_FULL; ++ return ixgbe_setup_kr_speed_x550em(hw, speed); ++ } ++} ++ ++/** ++ * ixgbe_setup_phy_loopback_x550em - Configure the KR PHY for loopback. ++ * @hw: pointer to hardware structure ++ * ++ * Configures the integrated KR PHY to use internal loopback mode. ++ **/ ++s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw) ++{ ++ s32 status; ++ u32 reg_val; ++ ++ /* Disable AN and force speed to 10G Serial. */ ++ status = hw->mac.ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); ++ if (status != IXGBE_SUCCESS) ++ return status; ++ reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_AN_ENABLE; ++ reg_val &= ~IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_MASK; ++ reg_val |= IXGBE_KRM_LINK_CTRL_1_TETH_FORCE_SPEED_10G; ++ status = hw->mac.ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_LINK_CTRL_1(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); ++ if (status != IXGBE_SUCCESS) ++ return status; ++ ++ /* Set near-end loopback clocks. */ ++ status = hw->mac.ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); ++ if (status != IXGBE_SUCCESS) ++ return status; ++ reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_32B; ++ reg_val |= IXGBE_KRM_PORT_CAR_GEN_CTRL_NELB_KRPCS; ++ status = hw->mac.ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_PORT_CAR_GEN_CTRL(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); ++ if (status != IXGBE_SUCCESS) ++ return status; ++ ++ /* Set loopback enable. */ ++ status = hw->mac.ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); ++ if (status != IXGBE_SUCCESS) ++ return status; ++ reg_val |= IXGBE_KRM_PMD_DFX_BURNIN_TX_RX_KR_LB_MASK; ++ status = hw->mac.ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_PMD_DFX_BURNIN(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); ++ if (status != IXGBE_SUCCESS) ++ return status; ++ ++ /* Training bypass. */ ++ status = hw->mac.ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); ++ if (status != IXGBE_SUCCESS) ++ return status; ++ reg_val |= IXGBE_KRM_RX_TRN_LINKUP_CTRL_PROTOCOL_BYPASS; ++ status = hw->mac.ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_RX_TRN_LINKUP_CTRL(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); ++ ++ return status; ++} ++ ++/** ++ * ixgbe_read_ee_hostif_X550 - Read EEPROM word using a host interface command ++ * assuming that the semaphore is already obtained. ++ * @hw: pointer to hardware structure ++ * @offset: offset of word in the EEPROM to read ++ * @data: word read from the EEPROM ++ * ++ * Reads a 16 bit word from the EEPROM using the hostif. ++ **/ ++s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, u16 *data) ++{ ++ const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM; ++ struct ixgbe_hic_read_shadow_ram buffer; ++ s32 status; ++ ++ DEBUGFUNC("ixgbe_read_ee_hostif_X550"); ++ buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; ++ buffer.hdr.req.buf_lenh = 0; ++ buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; ++ buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; ++ ++ /* convert offset from words to bytes */ ++ buffer.address = IXGBE_CPU_TO_BE32(offset * 2); ++ /* one word */ ++ buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16)); ++ ++ status = hw->mac.ops.acquire_swfw_sync(hw, mask); ++ if (status) ++ return status; ++ ++ status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer), ++ IXGBE_HI_COMMAND_TIMEOUT); ++ if (!status) { ++ *data = (u16)IXGBE_READ_REG_ARRAY(hw, IXGBE_FLEX_MNG, ++ FW_NVM_DATA_OFFSET); ++ } ++ ++ hw->mac.ops.release_swfw_sync(hw, mask); ++ return status; ++} ++ ++/** ++ * ixgbe_read_ee_hostif_buffer_X550- Read EEPROM word(s) using hostif ++ * @hw: pointer to hardware structure ++ * @offset: offset of word in the EEPROM to read ++ * @words: number of words ++ * @data: word(s) read from the EEPROM ++ * ++ * Reads a 16 bit word(s) from the EEPROM using the hostif. ++ **/ ++s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, ++ u16 offset, u16 words, u16 *data) ++{ ++ const u32 mask = IXGBE_GSSR_SW_MNG_SM | IXGBE_GSSR_EEP_SM; ++ struct ixgbe_hic_read_shadow_ram buffer; ++ u32 current_word = 0; ++ u16 words_to_read; ++ s32 status; ++ u32 i; ++ ++ DEBUGFUNC("ixgbe_read_ee_hostif_buffer_X550"); ++ ++ /* Take semaphore for the entire operation. */ ++ status = hw->mac.ops.acquire_swfw_sync(hw, mask); ++ if (status) { ++ DEBUGOUT("EEPROM read buffer - semaphore failed\n"); ++ return status; ++ } ++ ++ while (words) { ++ if (words > FW_MAX_READ_BUFFER_SIZE / 2) ++ words_to_read = FW_MAX_READ_BUFFER_SIZE / 2; ++ else ++ words_to_read = words; ++ ++ buffer.hdr.req.cmd = FW_READ_SHADOW_RAM_CMD; ++ buffer.hdr.req.buf_lenh = 0; ++ buffer.hdr.req.buf_lenl = FW_READ_SHADOW_RAM_LEN; ++ buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; ++ ++ /* convert offset from words to bytes */ ++ buffer.address = IXGBE_CPU_TO_BE32((offset + current_word) * 2); ++ buffer.length = IXGBE_CPU_TO_BE16(words_to_read * 2); ++ ++ status = ixgbe_hic_unlocked(hw, (u32 *)&buffer, sizeof(buffer), ++ IXGBE_HI_COMMAND_TIMEOUT); ++ ++ if (status) { ++ DEBUGOUT("Host interface command failed\n"); ++ goto out; ++ } ++ ++ for (i = 0; i < words_to_read; i++) { ++ u32 reg = IXGBE_FLEX_MNG + (FW_NVM_DATA_OFFSET << 2) + ++ 2 * i; ++ u32 value = IXGBE_READ_REG(hw, reg); ++ ++ data[current_word] = (u16)(value & 0xffff); ++ current_word++; ++ i++; ++ if (i < words_to_read) { ++ value >>= 16; ++ data[current_word] = (u16)(value & 0xffff); ++ current_word++; ++ } ++ } ++ words -= words_to_read; ++ } ++ ++out: ++ hw->mac.ops.release_swfw_sync(hw, mask); ++ return status; ++} ++ ++/** ++ * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif ++ * @hw: pointer to hardware structure ++ * @offset: offset of word in the EEPROM to write ++ * @data: word write to the EEPROM ++ * ++ * Write a 16 bit word to the EEPROM using the hostif. ++ **/ ++s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, ++ u16 data) ++{ ++ s32 status; ++ struct ixgbe_hic_write_shadow_ram buffer; ++ ++ DEBUGFUNC("ixgbe_write_ee_hostif_data_X550"); ++ ++ buffer.hdr.req.cmd = FW_WRITE_SHADOW_RAM_CMD; ++ buffer.hdr.req.buf_lenh = 0; ++ buffer.hdr.req.buf_lenl = FW_WRITE_SHADOW_RAM_LEN; ++ buffer.hdr.req.checksum = FW_DEFAULT_CHECKSUM; ++ ++ /* one word */ ++ buffer.length = IXGBE_CPU_TO_BE16(sizeof(u16)); ++ buffer.data = data; ++ buffer.address = IXGBE_CPU_TO_BE32(offset * 2); ++ ++ status = ixgbe_host_interface_command(hw, (u32 *)&buffer, ++ sizeof(buffer), ++ IXGBE_HI_COMMAND_TIMEOUT, false); ++ ++ return status; ++} ++ ++/** ++ * ixgbe_write_ee_hostif_X550 - Write EEPROM word using hostif ++ * @hw: pointer to hardware structure ++ * @offset: offset of word in the EEPROM to write ++ * @data: word write to the EEPROM ++ * ++ * Write a 16 bit word to the EEPROM using the hostif. ++ **/ ++s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, ++ u16 data) ++{ ++ s32 status = IXGBE_SUCCESS; ++ ++ DEBUGFUNC("ixgbe_write_ee_hostif_X550"); ++ ++ if (hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM) == ++ IXGBE_SUCCESS) { ++ status = ixgbe_write_ee_hostif_data_X550(hw, offset, data); ++ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); ++ } else { ++ DEBUGOUT("write ee hostif failed to get semaphore"); ++ status = IXGBE_ERR_SWFW_SYNC; ++ } ++ ++ return status; ++} ++ ++/** ++ * ixgbe_write_ee_hostif_buffer_X550 - Write EEPROM word(s) using hostif ++ * @hw: pointer to hardware structure ++ * @offset: offset of word in the EEPROM to write ++ * @words: number of words ++ * @data: word(s) write to the EEPROM ++ * ++ * Write a 16 bit word(s) to the EEPROM using the hostif. ++ **/ ++s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw, ++ u16 offset, u16 words, u16 *data) ++{ ++ s32 status = IXGBE_SUCCESS; ++ u32 i = 0; ++ ++ DEBUGFUNC("ixgbe_write_ee_hostif_buffer_X550"); ++ ++ /* Take semaphore for the entire operation. */ ++ status = hw->mac.ops.acquire_swfw_sync(hw, IXGBE_GSSR_EEP_SM); ++ if (status != IXGBE_SUCCESS) { ++ DEBUGOUT("EEPROM write buffer - semaphore failed\n"); ++ goto out; ++ } ++ ++ for (i = 0; i < words; i++) { ++ status = ixgbe_write_ee_hostif_data_X550(hw, offset + i, ++ data[i]); ++ ++ if (status != IXGBE_SUCCESS) { ++ DEBUGOUT("Eeprom buffered write failed\n"); ++ break; ++ } ++ } ++ ++ hw->mac.ops.release_swfw_sync(hw, IXGBE_GSSR_EEP_SM); ++out: ++ ++ return status; ++} ++ ++/** ++ * ixgbe_checksum_ptr_x550 - Checksum one pointer region ++ * @hw: pointer to hardware structure ++ * @ptr: pointer offset in eeprom ++ * @size: size of section pointed by ptr, if 0 first word will be used as size ++ * @csum: address of checksum to update ++ * ++ * Returns error status for any failure ++ */ ++STATIC s32 ixgbe_checksum_ptr_x550(struct ixgbe_hw *hw, u16 ptr, ++ u16 size, u16 *csum, u16 *buffer, ++ u32 buffer_size) ++{ ++ u16 buf[256]; ++ s32 status; ++ u16 length, bufsz, i, start; ++ u16 *local_buffer; ++ ++ bufsz = sizeof(buf) / sizeof(buf[0]); ++ ++ /* Read a chunk at the pointer location */ ++ if (!buffer) { ++ status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, bufsz, buf); ++ if (status) { ++ DEBUGOUT("Failed to read EEPROM image\n"); ++ return status; ++ } ++ local_buffer = buf; ++ } else { ++ if (buffer_size < ptr) ++ return IXGBE_ERR_PARAM; ++ local_buffer = &buffer[ptr]; ++ } ++ ++ if (size) { ++ start = 0; ++ length = size; ++ } else { ++ start = 1; ++ length = local_buffer[0]; ++ ++ /* Skip pointer section if length is invalid. */ ++ if (length == 0xFFFF || length == 0 || ++ (ptr + length) >= hw->eeprom.word_size) ++ return IXGBE_SUCCESS; ++ } ++ ++ if (buffer && ((u32)start + (u32)length > buffer_size)) ++ return IXGBE_ERR_PARAM; ++ ++ for (i = start; length; i++, length--) { ++ if (i == bufsz && !buffer) { ++ ptr += bufsz; ++ i = 0; ++ if (length < bufsz) ++ bufsz = length; ++ ++ /* Read a chunk at the pointer location */ ++ status = ixgbe_read_ee_hostif_buffer_X550(hw, ptr, ++ bufsz, buf); ++ if (status) { ++ DEBUGOUT("Failed to read EEPROM image\n"); ++ return status; ++ } ++ } ++ *csum += local_buffer[i]; ++ } ++ return IXGBE_SUCCESS; ++} ++ ++/** ++ * ixgbe_calc_checksum_X550 - Calculates and returns the checksum ++ * @hw: pointer to hardware structure ++ * @buffer: pointer to buffer containing calculated checksum ++ * @buffer_size: size of buffer ++ * ++ * Returns a negative error code on error, or the 16-bit checksum ++ **/ ++s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size) ++{ ++ u16 eeprom_ptrs[IXGBE_EEPROM_LAST_WORD + 1]; ++ u16 *local_buffer; ++ s32 status; ++ u16 checksum = 0; ++ u16 pointer, i, size; ++ ++ DEBUGFUNC("ixgbe_calc_eeprom_checksum_X550"); ++ ++ hw->eeprom.ops.init_params(hw); ++ ++ if (!buffer) { ++ /* Read pointer area */ ++ status = ixgbe_read_ee_hostif_buffer_X550(hw, 0, ++ IXGBE_EEPROM_LAST_WORD + 1, ++ eeprom_ptrs); ++ if (status) { ++ DEBUGOUT("Failed to read EEPROM image\n"); ++ return status; ++ } ++ local_buffer = eeprom_ptrs; ++ } else { ++ if (buffer_size < IXGBE_EEPROM_LAST_WORD) ++ return IXGBE_ERR_PARAM; ++ local_buffer = buffer; ++ } ++ ++ /* ++ * For X550 hardware include 0x0-0x41 in the checksum, skip the ++ * checksum word itself ++ */ ++ for (i = 0; i <= IXGBE_EEPROM_LAST_WORD; i++) ++ if (i != IXGBE_EEPROM_CHECKSUM) ++ checksum += local_buffer[i]; ++ ++ /* ++ * Include all data from pointers 0x3, 0x6-0xE. This excludes the ++ * FW, PHY module, and PCIe Expansion/Option ROM pointers. ++ */ ++ for (i = IXGBE_PCIE_ANALOG_PTR_X550; i < IXGBE_FW_PTR; i++) { ++ if (i == IXGBE_PHY_PTR || i == IXGBE_OPTION_ROM_PTR) ++ continue; ++ ++ pointer = local_buffer[i]; ++ ++ /* Skip pointer section if the pointer is invalid. */ ++ if (pointer == 0xFFFF || pointer == 0 || ++ pointer >= hw->eeprom.word_size) ++ continue; ++ ++ switch (i) { ++ case IXGBE_PCIE_GENERAL_PTR: ++ size = IXGBE_IXGBE_PCIE_GENERAL_SIZE; ++ break; ++ case IXGBE_PCIE_CONFIG0_PTR: ++ case IXGBE_PCIE_CONFIG1_PTR: ++ size = IXGBE_PCIE_CONFIG_SIZE; ++ break; ++ default: ++ size = 0; ++ break; ++ } ++ ++ status = ixgbe_checksum_ptr_x550(hw, pointer, size, &checksum, ++ buffer, buffer_size); ++ if (status) ++ return status; ++ } ++ ++ checksum = (u16)IXGBE_EEPROM_SUM - checksum; ++ ++ return (s32)checksum; ++} ++ ++/** ++ * ixgbe_calc_eeprom_checksum_X550 - Calculates and returns the checksum ++ * @hw: pointer to hardware structure ++ * ++ * Returns a negative error code on error, or the 16-bit checksum ++ **/ ++s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw) ++{ ++ return ixgbe_calc_checksum_X550(hw, NULL, 0); ++} ++ ++/** ++ * ixgbe_validate_eeprom_checksum_X550 - Validate EEPROM checksum ++ * @hw: pointer to hardware structure ++ * @checksum_val: calculated checksum ++ * ++ * Performs checksum calculation and validates the EEPROM checksum. If the ++ * caller does not need checksum_val, the value can be NULL. ++ **/ ++s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val) ++{ ++ s32 status; ++ u16 checksum; ++ u16 read_checksum = 0; ++ ++ DEBUGFUNC("ixgbe_validate_eeprom_checksum_X550"); ++ ++ /* Read the first word from the EEPROM. If this times out or fails, do ++ * not continue or we could be in for a very long wait while every ++ * EEPROM read fails ++ */ ++ status = hw->eeprom.ops.read(hw, 0, &checksum); ++ if (status) { ++ DEBUGOUT("EEPROM read failed\n"); ++ return status; ++ } ++ ++ status = hw->eeprom.ops.calc_checksum(hw); ++ if (status < 0) ++ return status; ++ ++ checksum = (u16)(status & 0xffff); ++ ++ status = ixgbe_read_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM, ++ &read_checksum); ++ if (status) ++ return status; ++ ++ /* Verify read checksum from EEPROM is the same as ++ * calculated checksum ++ */ ++ if (read_checksum != checksum) { ++ status = IXGBE_ERR_EEPROM_CHECKSUM; ++ ERROR_REPORT1(IXGBE_ERROR_INVALID_STATE, ++ "Invalid EEPROM checksum"); ++ } ++ ++ /* If the user cares, return the calculated checksum */ ++ if (checksum_val) ++ *checksum_val = checksum; ++ ++ return status; ++} ++ ++/** ++ * ixgbe_update_eeprom_checksum_X550 - Updates the EEPROM checksum and flash ++ * @hw: pointer to hardware structure ++ * ++ * After writing EEPROM to shadow RAM using EEWR register, software calculates ++ * checksum and updates the EEPROM and instructs the hardware to update ++ * the flash. ++ **/ ++s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw) ++{ ++ s32 status; ++ u16 checksum = 0; ++ ++ DEBUGFUNC("ixgbe_update_eeprom_checksum_X550"); ++ ++ /* Read the first word from the EEPROM. If this times out or fails, do ++ * not continue or we could be in for a very long wait while every ++ * EEPROM read fails ++ */ ++ status = ixgbe_read_ee_hostif_X550(hw, 0, &checksum); ++ if (status) { ++ DEBUGOUT("EEPROM read failed\n"); ++ return status; ++ } ++ ++ status = ixgbe_calc_eeprom_checksum_X550(hw); ++ if (status < 0) ++ return status; ++ ++ checksum = (u16)(status & 0xffff); ++ ++ status = ixgbe_write_ee_hostif_X550(hw, IXGBE_EEPROM_CHECKSUM, ++ checksum); ++ if (status) ++ return status; ++ ++ status = ixgbe_update_flash_X550(hw); ++ ++ return status; ++} ++ ++/** ++ * ixgbe_update_flash_X550 - Instruct HW to copy EEPROM to Flash device ++ * @hw: pointer to hardware structure ++ * ++ * Issue a shadow RAM dump to FW to copy EEPROM from shadow RAM to the flash. ++ **/ ++s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw) ++{ ++ s32 status = IXGBE_SUCCESS; ++ union ixgbe_hic_hdr2 buffer; ++ ++ DEBUGFUNC("ixgbe_update_flash_X550"); ++ ++ buffer.req.cmd = FW_SHADOW_RAM_DUMP_CMD; ++ buffer.req.buf_lenh = 0; ++ buffer.req.buf_lenl = FW_SHADOW_RAM_DUMP_LEN; ++ buffer.req.checksum = FW_DEFAULT_CHECKSUM; ++ ++ status = ixgbe_host_interface_command(hw, (u32 *)&buffer, ++ sizeof(buffer), ++ IXGBE_HI_COMMAND_TIMEOUT, false); ++ ++ return status; ++} ++ ++/** ++ * ixgbe_get_supported_physical_layer_X550em - Returns physical layer type ++ * @hw: pointer to hardware structure ++ * ++ * Determines physical layer capabilities of the current configuration. ++ **/ ++u64 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw) ++{ ++ u64 physical_layer = IXGBE_PHYSICAL_LAYER_UNKNOWN; ++ u16 ext_ability = 0; ++ ++ DEBUGFUNC("ixgbe_get_supported_physical_layer_X550em"); ++ ++ hw->phy.ops.identify(hw); ++ ++ switch (hw->phy.type) { ++ case ixgbe_phy_x550em_kr: ++ if (hw->mac.type == ixgbe_mac_X550EM_a) { ++ if (hw->phy.nw_mng_if_sel & ++ IXGBE_NW_MNG_IF_SEL_PHY_SPEED_2_5G) { ++ physical_layer = ++ IXGBE_PHYSICAL_LAYER_2500BASE_KX; ++ break; ++ } else if (hw->device_id == ++ IXGBE_DEV_ID_X550EM_A_KR_L) { ++ physical_layer = ++ IXGBE_PHYSICAL_LAYER_1000BASE_KX; ++ break; ++ } ++ } ++ /* fall through */ ++ case ixgbe_phy_x550em_xfi: ++ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KR | ++ IXGBE_PHYSICAL_LAYER_1000BASE_KX; ++ break; ++ case ixgbe_phy_x550em_kx4: ++ physical_layer = IXGBE_PHYSICAL_LAYER_10GBASE_KX4 | ++ IXGBE_PHYSICAL_LAYER_1000BASE_KX; ++ break; ++ case ixgbe_phy_x550em_ext_t: ++ hw->phy.ops.read_reg(hw, IXGBE_MDIO_PHY_EXT_ABILITY, ++ IXGBE_MDIO_PMA_PMD_DEV_TYPE, ++ &ext_ability); ++ if (ext_ability & IXGBE_MDIO_PHY_10GBASET_ABILITY) ++ physical_layer |= IXGBE_PHYSICAL_LAYER_10GBASE_T; ++ if (ext_ability & IXGBE_MDIO_PHY_1000BASET_ABILITY) ++ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; ++ break; ++ case ixgbe_phy_fw: ++ if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_1GB_FULL) ++ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; ++ if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_100_FULL) ++ physical_layer |= IXGBE_PHYSICAL_LAYER_100BASE_TX; ++ if (hw->phy.speeds_supported & IXGBE_LINK_SPEED_10_FULL) ++ physical_layer |= IXGBE_PHYSICAL_LAYER_10BASE_T; ++ break; ++ case ixgbe_phy_sgmii: ++ physical_layer = IXGBE_PHYSICAL_LAYER_1000BASE_KX; ++ break; ++ case ixgbe_phy_ext_1g_t: ++ physical_layer |= IXGBE_PHYSICAL_LAYER_1000BASE_T; ++ break; ++ default: ++ break; ++ } ++ ++ if (hw->mac.ops.get_media_type(hw) == ixgbe_media_type_fiber) ++ physical_layer = ixgbe_get_supported_phy_sfp_layer_generic(hw); ++ ++ return physical_layer; ++} ++ ++/** ++ * ixgbe_get_bus_info_x550em - Set PCI bus info ++ * @hw: pointer to hardware structure ++ * ++ * Sets bus link width and speed to unknown because X550em is ++ * not a PCI device. ++ **/ ++s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw) ++{ ++ ++ DEBUGFUNC("ixgbe_get_bus_info_x550em"); ++ ++ hw->bus.width = ixgbe_bus_width_unknown; ++ hw->bus.speed = ixgbe_bus_speed_unknown; ++ ++ hw->mac.ops.set_lan_id(hw); ++ ++ return IXGBE_SUCCESS; ++} ++ ++/** ++ * ixgbe_disable_rx_x550 - Disable RX unit ++ * ++ * Enables the Rx DMA unit for x550 ++ **/ ++void ixgbe_disable_rx_x550(struct ixgbe_hw *hw) ++{ ++ u32 rxctrl, pfdtxgswc; ++ s32 status; ++ struct ixgbe_hic_disable_rxen fw_cmd; ++ ++ DEBUGFUNC("ixgbe_enable_rx_dma_x550"); ++ ++ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); ++ if (rxctrl & IXGBE_RXCTRL_RXEN) { ++ pfdtxgswc = IXGBE_READ_REG(hw, IXGBE_PFDTXGSWC); ++ if (pfdtxgswc & IXGBE_PFDTXGSWC_VT_LBEN) { ++ pfdtxgswc &= ~IXGBE_PFDTXGSWC_VT_LBEN; ++ IXGBE_WRITE_REG(hw, IXGBE_PFDTXGSWC, pfdtxgswc); ++ hw->mac.set_lben = true; ++ } else { ++ hw->mac.set_lben = false; ++ } ++ ++ fw_cmd.hdr.cmd = FW_DISABLE_RXEN_CMD; ++ fw_cmd.hdr.buf_len = FW_DISABLE_RXEN_LEN; ++ fw_cmd.hdr.checksum = FW_DEFAULT_CHECKSUM; ++ fw_cmd.port_number = (u8)hw->bus.lan_id; ++ ++ status = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, ++ sizeof(struct ixgbe_hic_disable_rxen), ++ IXGBE_HI_COMMAND_TIMEOUT, true); ++ ++ /* If we fail - disable RX using register write */ ++ if (status) { ++ rxctrl = IXGBE_READ_REG(hw, IXGBE_RXCTRL); ++ if (rxctrl & IXGBE_RXCTRL_RXEN) { ++ rxctrl &= ~IXGBE_RXCTRL_RXEN; ++ IXGBE_WRITE_REG(hw, IXGBE_RXCTRL, rxctrl); ++ } ++ } ++ } ++} ++ ++/** ++ * ixgbe_enter_lplu_x550em - Transition to low power states ++ * @hw: pointer to hardware structure ++ * ++ * Configures Low Power Link Up on transition to low power states ++ * (from D0 to non-D0). Link is required to enter LPLU so avoid resetting the ++ * X557 PHY immediately prior to entering LPLU. ++ **/ ++s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw) ++{ ++ u16 an_10g_cntl_reg, autoneg_reg, speed; ++ s32 status; ++ ixgbe_link_speed lcd_speed; ++ u32 save_autoneg; ++ bool link_up; ++ ++ /* SW LPLU not required on later HW revisions. */ ++ if ((hw->mac.type == ixgbe_mac_X550EM_x) && ++ (IXGBE_FUSES0_REV_MASK & ++ IXGBE_READ_REG(hw, IXGBE_FUSES0_GROUP(0)))) ++ return IXGBE_SUCCESS; ++ ++ /* If blocked by MNG FW, then don't restart AN */ ++ if (ixgbe_check_reset_blocked(hw)) ++ return IXGBE_SUCCESS; ++ ++ status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); ++ if (status != IXGBE_SUCCESS) ++ return status; ++ ++ status = ixgbe_read_eeprom(hw, NVM_INIT_CTRL_3, &hw->eeprom.ctrl_word_3); ++ ++ if (status != IXGBE_SUCCESS) ++ return status; ++ ++ /* If link is down, LPLU disabled in NVM, WoL disabled, or manageability ++ * disabled, then force link down by entering low power mode. ++ */ ++ if (!link_up || !(hw->eeprom.ctrl_word_3 & NVM_INIT_CTRL_3_LPLU) || ++ !(hw->wol_enabled || ixgbe_mng_present(hw))) ++ return ixgbe_set_copper_phy_power(hw, FALSE); ++ ++ /* Determine LCD */ ++ status = ixgbe_get_lcd_t_x550em(hw, &lcd_speed); ++ ++ if (status != IXGBE_SUCCESS) ++ return status; ++ ++ /* If no valid LCD link speed, then force link down and exit. */ ++ if (lcd_speed == IXGBE_LINK_SPEED_UNKNOWN) ++ return ixgbe_set_copper_phy_power(hw, FALSE); ++ ++ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_STAT, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ &speed); ++ ++ if (status != IXGBE_SUCCESS) ++ return status; ++ ++ /* If no link now, speed is invalid so take link down */ ++ status = ixgbe_ext_phy_t_x550em_get_link(hw, &link_up); ++ if (status != IXGBE_SUCCESS) ++ return ixgbe_set_copper_phy_power(hw, false); ++ ++ /* clear everything but the speed bits */ ++ speed &= IXGBE_MDIO_AUTO_NEG_VEN_STAT_SPEED_MASK; ++ ++ /* If current speed is already LCD, then exit. */ ++ if (((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_1GB) && ++ (lcd_speed == IXGBE_LINK_SPEED_1GB_FULL)) || ++ ((speed == IXGBE_MDIO_AUTO_NEG_VENDOR_STATUS_10GB) && ++ (lcd_speed == IXGBE_LINK_SPEED_10GB_FULL))) ++ return status; ++ ++ /* Clear AN completed indication */ ++ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_VENDOR_TX_ALARM, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ &autoneg_reg); ++ ++ if (status != IXGBE_SUCCESS) ++ return status; ++ ++ status = hw->phy.ops.read_reg(hw, IXGBE_MII_10GBASE_T_AUTONEG_CTRL_REG, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ &an_10g_cntl_reg); ++ ++ if (status != IXGBE_SUCCESS) ++ return status; ++ ++ status = hw->phy.ops.read_reg(hw, ++ IXGBE_MII_AUTONEG_VENDOR_PROVISION_1_REG, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ &autoneg_reg); ++ ++ if (status != IXGBE_SUCCESS) ++ return status; ++ ++ save_autoneg = hw->phy.autoneg_advertised; ++ ++ /* Setup link at least common link speed */ ++ status = hw->mac.ops.setup_link(hw, lcd_speed, false); ++ ++ /* restore autoneg from before setting lplu speed */ ++ hw->phy.autoneg_advertised = save_autoneg; ++ ++ return status; ++} ++ ++/** ++ * ixgbe_get_lcd_x550em - Determine lowest common denominator ++ * @hw: pointer to hardware structure ++ * @lcd_speed: pointer to lowest common link speed ++ * ++ * Determine lowest common link speed with link partner. ++ **/ ++s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *lcd_speed) ++{ ++ u16 an_lp_status; ++ s32 status; ++ u16 word = hw->eeprom.ctrl_word_3; ++ ++ *lcd_speed = IXGBE_LINK_SPEED_UNKNOWN; ++ ++ status = hw->phy.ops.read_reg(hw, IXGBE_AUTO_NEG_LP_STATUS, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ &an_lp_status); ++ ++ if (status != IXGBE_SUCCESS) ++ return status; ++ ++ /* If link partner advertised 1G, return 1G */ ++ if (an_lp_status & IXGBE_AUTO_NEG_LP_1000BASE_CAP) { ++ *lcd_speed = IXGBE_LINK_SPEED_1GB_FULL; ++ return status; ++ } ++ ++ /* If 10G disabled for LPLU via NVM D10GMP, then return no valid LCD */ ++ if ((hw->bus.lan_id && (word & NVM_INIT_CTRL_3_D10GMP_PORT1)) || ++ (word & NVM_INIT_CTRL_3_D10GMP_PORT0)) ++ return status; ++ ++ /* Link partner not capable of lower speeds, return 10G */ ++ *lcd_speed = IXGBE_LINK_SPEED_10GB_FULL; ++ return status; ++} ++ ++/** ++ * ixgbe_setup_fc_X550em - Set up flow control ++ * @hw: pointer to hardware structure ++ * ++ * Called at init time to set up flow control. ++ **/ ++s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw) ++{ ++ s32 ret_val = IXGBE_SUCCESS; ++ u32 pause, asm_dir, reg_val; ++ ++ DEBUGFUNC("ixgbe_setup_fc_X550em"); ++ ++ /* Validate the requested mode */ ++ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { ++ ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, ++ "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); ++ ret_val = IXGBE_ERR_INVALID_LINK_SETTINGS; ++ goto out; ++ } ++ ++ /* 10gig parts do not have a word in the EEPROM to determine the ++ * default flow control setting, so we explicitly set it to full. ++ */ ++ if (hw->fc.requested_mode == ixgbe_fc_default) ++ hw->fc.requested_mode = ixgbe_fc_full; ++ ++ /* Determine PAUSE and ASM_DIR bits. */ ++ switch (hw->fc.requested_mode) { ++ case ixgbe_fc_none: ++ pause = 0; ++ asm_dir = 0; ++ break; ++ case ixgbe_fc_tx_pause: ++ pause = 0; ++ asm_dir = 1; ++ break; ++ case ixgbe_fc_rx_pause: ++ /* Rx Flow control is enabled and Tx Flow control is ++ * disabled by software override. Since there really ++ * isn't a way to advertise that we are capable of RX ++ * Pause ONLY, we will advertise that we support both ++ * symmetric and asymmetric Rx PAUSE, as such we fall ++ * through to the fc_full statement. Later, we will ++ * disable the adapter's ability to send PAUSE frames. ++ */ ++ case ixgbe_fc_full: ++ pause = 1; ++ asm_dir = 1; ++ break; ++ default: ++ ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, ++ "Flow control param set incorrectly\n"); ++ ret_val = IXGBE_ERR_CONFIG; ++ goto out; ++ } ++ ++ switch (hw->device_id) { ++ case IXGBE_DEV_ID_X550EM_X_KR: ++ case IXGBE_DEV_ID_X550EM_A_KR: ++ case IXGBE_DEV_ID_X550EM_A_KR_L: ++ ret_val = hw->mac.ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, ®_val); ++ if (ret_val != IXGBE_SUCCESS) ++ goto out; ++ reg_val &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | ++ IXGBE_KRM_AN_CNTL_1_ASM_PAUSE); ++ if (pause) ++ reg_val |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE; ++ if (asm_dir) ++ reg_val |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; ++ ret_val = hw->mac.ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, reg_val); ++ ++ /* This device does not fully support AN. */ ++ hw->fc.disable_fc_autoneg = true; ++ break; ++ case IXGBE_DEV_ID_X550EM_X_XFI: ++ hw->fc.disable_fc_autoneg = true; ++ break; ++ default: ++ break; ++ } ++ ++out: ++ return ret_val; ++} ++ ++/** ++ * ixgbe_fc_autoneg_backplane_x550em_a - Enable flow control IEEE clause 37 ++ * @hw: pointer to hardware structure ++ * ++ * Enable flow control according to IEEE clause 37. ++ **/ ++void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw) ++{ ++ u32 link_s1, lp_an_page_low, an_cntl_1; ++ s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED; ++ ixgbe_link_speed speed; ++ bool link_up; ++ ++ /* AN should have completed when the cable was plugged in. ++ * Look for reasons to bail out. Bail out if: ++ * - FC autoneg is disabled, or if ++ * - link is not up. ++ */ ++ if (hw->fc.disable_fc_autoneg) { ++ ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, ++ "Flow control autoneg is disabled"); ++ goto out; ++ } ++ ++ hw->mac.ops.check_link(hw, &speed, &link_up, false); ++ if (!link_up) { ++ ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down"); ++ goto out; ++ } ++ ++ /* Check at auto-negotiation has completed */ ++ status = hw->mac.ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_LINK_S1(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, &link_s1); ++ ++ if (status != IXGBE_SUCCESS || ++ (link_s1 & IXGBE_KRM_LINK_S1_MAC_AN_COMPLETE) == 0) { ++ DEBUGOUT("Auto-Negotiation did not complete\n"); ++ status = IXGBE_ERR_FC_NOT_NEGOTIATED; ++ goto out; ++ } ++ ++ /* Read the 10g AN autoc and LP ability registers and resolve ++ * local flow control settings accordingly ++ */ ++ status = hw->mac.ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl_1); ++ ++ if (status != IXGBE_SUCCESS) { ++ DEBUGOUT("Auto-Negotiation did not complete\n"); ++ goto out; ++ } ++ ++ status = hw->mac.ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_LP_BASE_PAGE_HIGH(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, &lp_an_page_low); ++ ++ if (status != IXGBE_SUCCESS) { ++ DEBUGOUT("Auto-Negotiation did not complete\n"); ++ goto out; ++ } ++ ++ status = ixgbe_negotiate_fc(hw, an_cntl_1, lp_an_page_low, ++ IXGBE_KRM_AN_CNTL_1_SYM_PAUSE, ++ IXGBE_KRM_AN_CNTL_1_ASM_PAUSE, ++ IXGBE_KRM_LP_BASE_PAGE_HIGH_SYM_PAUSE, ++ IXGBE_KRM_LP_BASE_PAGE_HIGH_ASM_PAUSE); ++ ++out: ++ if (status == IXGBE_SUCCESS) { ++ hw->fc.fc_was_autonegged = true; ++ } else { ++ hw->fc.fc_was_autonegged = false; ++ hw->fc.current_mode = hw->fc.requested_mode; ++ } ++} ++ ++/** ++ * ixgbe_fc_autoneg_fiber_x550em_a - passthrough FC settings ++ * @hw: pointer to hardware structure ++ * ++ **/ ++void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw) ++{ ++ hw->fc.fc_was_autonegged = false; ++ hw->fc.current_mode = hw->fc.requested_mode; ++} ++ ++/** ++ * ixgbe_fc_autoneg_sgmii_x550em_a - Enable flow control IEEE clause 37 ++ * @hw: pointer to hardware structure ++ * ++ * Enable flow control according to IEEE clause 37. ++ **/ ++void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw) ++{ ++ s32 status = IXGBE_ERR_FC_NOT_NEGOTIATED; ++ u32 info[FW_PHY_ACT_DATA_COUNT] = { 0 }; ++ ixgbe_link_speed speed; ++ bool link_up; ++ ++ /* AN should have completed when the cable was plugged in. ++ * Look for reasons to bail out. Bail out if: ++ * - FC autoneg is disabled, or if ++ * - link is not up. ++ */ ++ if (hw->fc.disable_fc_autoneg) { ++ ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, ++ "Flow control autoneg is disabled"); ++ goto out; ++ } ++ ++ hw->mac.ops.check_link(hw, &speed, &link_up, false); ++ if (!link_up) { ++ ERROR_REPORT1(IXGBE_ERROR_SOFTWARE, "The link is down"); ++ goto out; ++ } ++ ++ /* Check if auto-negotiation has completed */ ++ status = ixgbe_fw_phy_activity(hw, FW_PHY_ACT_GET_LINK_INFO, &info); ++ if (status != IXGBE_SUCCESS || ++ !(info[0] & FW_PHY_ACT_GET_LINK_INFO_AN_COMPLETE)) { ++ DEBUGOUT("Auto-Negotiation did not complete\n"); ++ status = IXGBE_ERR_FC_NOT_NEGOTIATED; ++ goto out; ++ } ++ ++ /* Negotiate the flow control */ ++ status = ixgbe_negotiate_fc(hw, info[0], info[0], ++ FW_PHY_ACT_GET_LINK_INFO_FC_RX, ++ FW_PHY_ACT_GET_LINK_INFO_FC_TX, ++ FW_PHY_ACT_GET_LINK_INFO_LP_FC_RX, ++ FW_PHY_ACT_GET_LINK_INFO_LP_FC_TX); ++ ++out: ++ if (status == IXGBE_SUCCESS) { ++ hw->fc.fc_was_autonegged = true; ++ } else { ++ hw->fc.fc_was_autonegged = false; ++ hw->fc.current_mode = hw->fc.requested_mode; ++ } ++} ++ ++/** ++ * ixgbe_setup_fc_backplane_x550em_a - Set up flow control ++ * @hw: pointer to hardware structure ++ * ++ * Called at init time to set up flow control. ++ **/ ++s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw) ++{ ++ s32 status = IXGBE_SUCCESS; ++ u32 an_cntl = 0; ++ ++ DEBUGFUNC("ixgbe_setup_fc_backplane_x550em_a"); ++ ++ /* Validate the requested mode */ ++ if (hw->fc.strict_ieee && hw->fc.requested_mode == ixgbe_fc_rx_pause) { ++ ERROR_REPORT1(IXGBE_ERROR_UNSUPPORTED, ++ "ixgbe_fc_rx_pause not valid in strict IEEE mode\n"); ++ return IXGBE_ERR_INVALID_LINK_SETTINGS; ++ } ++ ++ if (hw->fc.requested_mode == ixgbe_fc_default) ++ hw->fc.requested_mode = ixgbe_fc_full; ++ ++ /* Set up the 1G and 10G flow control advertisement registers so the ++ * HW will be able to do FC autoneg once the cable is plugged in. If ++ * we link at 10G, the 1G advertisement is harmless and vice versa. ++ */ ++ status = hw->mac.ops.read_iosf_sb_reg(hw, ++ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, &an_cntl); ++ ++ if (status != IXGBE_SUCCESS) { ++ DEBUGOUT("Auto-Negotiation did not complete\n"); ++ return status; ++ } ++ ++ /* The possible values of fc.requested_mode are: ++ * 0: Flow control is completely disabled ++ * 1: Rx flow control is enabled (we can receive pause frames, ++ * but not send pause frames). ++ * 2: Tx flow control is enabled (we can send pause frames but ++ * we do not support receiving pause frames). ++ * 3: Both Rx and Tx flow control (symmetric) are enabled. ++ * other: Invalid. ++ */ ++ switch (hw->fc.requested_mode) { ++ case ixgbe_fc_none: ++ /* Flow control completely disabled by software override. */ ++ an_cntl &= ~(IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | ++ IXGBE_KRM_AN_CNTL_1_ASM_PAUSE); ++ break; ++ case ixgbe_fc_tx_pause: ++ /* Tx Flow control is enabled, and Rx Flow control is ++ * disabled by software override. ++ */ ++ an_cntl |= IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; ++ an_cntl &= ~IXGBE_KRM_AN_CNTL_1_SYM_PAUSE; ++ break; ++ case ixgbe_fc_rx_pause: ++ /* Rx Flow control is enabled and Tx Flow control is ++ * disabled by software override. Since there really ++ * isn't a way to advertise that we are capable of RX ++ * Pause ONLY, we will advertise that we support both ++ * symmetric and asymmetric Rx PAUSE, as such we fall ++ * through to the fc_full statement. Later, we will ++ * disable the adapter's ability to send PAUSE frames. ++ */ ++ case ixgbe_fc_full: ++ /* Flow control (both Rx and Tx) is enabled by SW override. */ ++ an_cntl |= IXGBE_KRM_AN_CNTL_1_SYM_PAUSE | ++ IXGBE_KRM_AN_CNTL_1_ASM_PAUSE; ++ break; ++ default: ++ ERROR_REPORT1(IXGBE_ERROR_ARGUMENT, ++ "Flow control param set incorrectly\n"); ++ return IXGBE_ERR_CONFIG; ++ } ++ ++ status = hw->mac.ops.write_iosf_sb_reg(hw, ++ IXGBE_KRM_AN_CNTL_1(hw->bus.lan_id), ++ IXGBE_SB_IOSF_TARGET_KR_PHY, an_cntl); ++ ++ /* Restart auto-negotiation. */ ++ status = ixgbe_restart_an_internal_phy_x550em(hw); ++ ++ return status; ++} ++ ++/** ++ * ixgbe_set_mux - Set mux for port 1 access with CS4227 ++ * @hw: pointer to hardware structure ++ * @state: set mux if 1, clear if 0 ++ */ ++STATIC void ixgbe_set_mux(struct ixgbe_hw *hw, u8 state) ++{ ++ u32 esdp; ++ ++ if (!hw->bus.lan_id) ++ return; ++ esdp = IXGBE_READ_REG(hw, IXGBE_ESDP); ++ if (state) ++ esdp |= IXGBE_ESDP_SDP1; ++ else ++ esdp &= ~IXGBE_ESDP_SDP1; ++ IXGBE_WRITE_REG(hw, IXGBE_ESDP, esdp); ++ IXGBE_WRITE_FLUSH(hw); ++} ++ ++/** ++ * ixgbe_acquire_swfw_sync_X550em - Acquire SWFW semaphore ++ * @hw: pointer to hardware structure ++ * @mask: Mask to specify which semaphore to acquire ++ * ++ * Acquires the SWFW semaphore and sets the I2C MUX ++ **/ ++s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask) ++{ ++ s32 status; ++ ++ DEBUGFUNC("ixgbe_acquire_swfw_sync_X550em"); ++ ++ status = ixgbe_acquire_swfw_sync_X540(hw, mask); ++ if (status) ++ return status; ++ ++ if (mask & IXGBE_GSSR_I2C_MASK) ++ ixgbe_set_mux(hw, 1); ++ ++ return IXGBE_SUCCESS; ++} ++ ++/** ++ * ixgbe_release_swfw_sync_X550em - Release SWFW semaphore ++ * @hw: pointer to hardware structure ++ * @mask: Mask to specify which semaphore to release ++ * ++ * Releases the SWFW semaphore and sets the I2C MUX ++ **/ ++void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask) ++{ ++ DEBUGFUNC("ixgbe_release_swfw_sync_X550em"); ++ ++ if (mask & IXGBE_GSSR_I2C_MASK) ++ ixgbe_set_mux(hw, 0); ++ ++ ixgbe_release_swfw_sync_X540(hw, mask); ++} ++ ++/** ++ * ixgbe_acquire_swfw_sync_X550a - Acquire SWFW semaphore ++ * @hw: pointer to hardware structure ++ * @mask: Mask to specify which semaphore to acquire ++ * ++ * Acquires the SWFW semaphore and get the shared phy token as needed ++ */ ++STATIC s32 ixgbe_acquire_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask) ++{ ++ u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM; ++ int retries = FW_PHY_TOKEN_RETRIES; ++ s32 status = IXGBE_SUCCESS; ++ ++ DEBUGFUNC("ixgbe_acquire_swfw_sync_X550a"); ++ ++ while (--retries) { ++ status = IXGBE_SUCCESS; ++ if (hmask) ++ status = ixgbe_acquire_swfw_sync_X540(hw, hmask); ++ if (status) { ++ DEBUGOUT1("Could not acquire SWFW semaphore, Status = %d\n", ++ status); ++ return status; ++ } ++ if (!(mask & IXGBE_GSSR_TOKEN_SM)) ++ return IXGBE_SUCCESS; ++ ++ status = ixgbe_get_phy_token(hw); ++ if (status == IXGBE_ERR_TOKEN_RETRY) ++ DEBUGOUT1("Could not acquire PHY token, Status = %d\n", ++ status); ++ ++ if (status == IXGBE_SUCCESS) ++ return IXGBE_SUCCESS; ++ ++ if (hmask) ++ ixgbe_release_swfw_sync_X540(hw, hmask); ++ ++ if (status != IXGBE_ERR_TOKEN_RETRY) { ++ DEBUGOUT1("Unable to retry acquiring the PHY token, Status = %d\n", ++ status); ++ return status; ++ } ++ } ++ ++ DEBUGOUT1("Semaphore acquisition retries failed!: PHY ID = 0x%08X\n", ++ hw->phy.id); ++ return status; ++} ++ ++/** ++ * ixgbe_release_swfw_sync_X550a - Release SWFW semaphore ++ * @hw: pointer to hardware structure ++ * @mask: Mask to specify which semaphore to release ++ * ++ * Releases the SWFW semaphore and puts the shared phy token as needed ++ */ ++STATIC void ixgbe_release_swfw_sync_X550a(struct ixgbe_hw *hw, u32 mask) ++{ ++ u32 hmask = mask & ~IXGBE_GSSR_TOKEN_SM; ++ ++ DEBUGFUNC("ixgbe_release_swfw_sync_X550a"); ++ ++ if (mask & IXGBE_GSSR_TOKEN_SM) ++ ixgbe_put_phy_token(hw); ++ ++ if (hmask) ++ ixgbe_release_swfw_sync_X540(hw, hmask); ++} ++ ++/** ++ * ixgbe_read_phy_reg_x550a - Reads specified PHY register ++ * @hw: pointer to hardware structure ++ * @reg_addr: 32 bit address of PHY register to read ++ * @phy_data: Pointer to read data from PHY register ++ * ++ * Reads a value from a specified PHY register using the SWFW lock and PHY ++ * Token. The PHY Token is needed since the MDIO is shared between to MAC ++ * instances. ++ **/ ++s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, ++ u32 device_type, u16 *phy_data) ++{ ++ s32 status; ++ u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM; ++ ++ DEBUGFUNC("ixgbe_read_phy_reg_x550a"); ++ ++ if (hw->mac.ops.acquire_swfw_sync(hw, mask)) ++ return IXGBE_ERR_SWFW_SYNC; ++ ++ status = hw->phy.ops.read_reg_mdi(hw, reg_addr, device_type, phy_data); ++ ++ hw->mac.ops.release_swfw_sync(hw, mask); ++ ++ return status; ++} ++ ++/** ++ * ixgbe_write_phy_reg_x550a - Writes specified PHY register ++ * @hw: pointer to hardware structure ++ * @reg_addr: 32 bit PHY register to write ++ * @device_type: 5 bit device type ++ * @phy_data: Data to write to the PHY register ++ * ++ * Writes a value to specified PHY register using the SWFW lock and PHY Token. ++ * The PHY Token is needed since the MDIO is shared between to MAC instances. ++ **/ ++s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, ++ u32 device_type, u16 phy_data) ++{ ++ s32 status; ++ u32 mask = hw->phy.phy_semaphore_mask | IXGBE_GSSR_TOKEN_SM; ++ ++ DEBUGFUNC("ixgbe_write_phy_reg_x550a"); ++ ++ if (hw->mac.ops.acquire_swfw_sync(hw, mask) == IXGBE_SUCCESS) { ++ status = hw->phy.ops.write_reg_mdi(hw, reg_addr, device_type, ++ phy_data); ++ hw->mac.ops.release_swfw_sync(hw, mask); ++ } else { ++ status = IXGBE_ERR_SWFW_SYNC; ++ } ++ ++ return status; ++} ++ ++/** ++ * ixgbe_handle_lasi_ext_t_x550em - Handle external Base T PHY interrupt ++ * @hw: pointer to hardware structure ++ * ++ * Handle external Base T PHY interrupt. If high temperature ++ * failure alarm then return error, else if link status change ++ * then setup internal/external PHY link ++ * ++ * Return IXGBE_ERR_OVERTEMP if interrupt is high temperature ++ * failure alarm, else return PHY access status. ++ */ ++s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw) ++{ ++ bool lsc; ++ u32 status; ++ ++ status = ixgbe_get_lasi_ext_t_x550em(hw, &lsc); ++ ++ if (status != IXGBE_SUCCESS) ++ return status; ++ ++ if (lsc) ++ return ixgbe_setup_internal_phy(hw); ++ ++ return IXGBE_SUCCESS; ++} ++ ++/** ++ * ixgbe_setup_mac_link_t_X550em - Sets the auto advertised link speed ++ * @hw: pointer to hardware structure ++ * @speed: new link speed ++ * @autoneg_wait_to_complete: true when waiting for completion is needed ++ * ++ * Setup internal/external PHY link speed based on link speed, then set ++ * external PHY auto advertised link speed. ++ * ++ * Returns error status for any failure ++ **/ ++s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw, ++ ixgbe_link_speed speed, ++ bool autoneg_wait_to_complete) ++{ ++ s32 status; ++ ixgbe_link_speed force_speed; ++ ++ DEBUGFUNC("ixgbe_setup_mac_link_t_X550em"); ++ ++ /* Setup internal/external PHY link speed to iXFI (10G), unless ++ * only 1G is auto advertised then setup KX link. ++ */ ++ if (speed & IXGBE_LINK_SPEED_10GB_FULL) ++ force_speed = IXGBE_LINK_SPEED_10GB_FULL; ++ else ++ force_speed = IXGBE_LINK_SPEED_1GB_FULL; ++ ++ /* If X552 and internal link mode is XFI, then setup XFI internal link. ++ */ ++ if (hw->mac.type == ixgbe_mac_X550EM_x && ++ !(hw->phy.nw_mng_if_sel & IXGBE_NW_MNG_IF_SEL_INT_PHY_MODE)) { ++ status = ixgbe_setup_ixfi_x550em(hw, &force_speed); ++ ++ if (status != IXGBE_SUCCESS) ++ return status; ++ } ++ ++ return hw->phy.ops.setup_link_speed(hw, speed, autoneg_wait_to_complete); ++} ++ ++/** ++ * ixgbe_check_link_t_X550em - Determine link and speed status ++ * @hw: pointer to hardware structure ++ * @speed: pointer to link speed ++ * @link_up: true when link is up ++ * @link_up_wait_to_complete: bool used to wait for link up or not ++ * ++ * Check that both the MAC and X557 external PHY have link. ++ **/ ++s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed, ++ bool *link_up, bool link_up_wait_to_complete) ++{ ++ u32 status; ++ u16 i, autoneg_status = 0; ++ ++ if (hw->mac.ops.get_media_type(hw) != ixgbe_media_type_copper) ++ return IXGBE_ERR_CONFIG; ++ ++ status = ixgbe_check_mac_link_generic(hw, speed, link_up, ++ link_up_wait_to_complete); ++ ++ /* If check link fails or MAC link is not up, then return */ ++ if (status != IXGBE_SUCCESS || !(*link_up)) ++ return status; ++ ++ /* MAC link is up, so check external PHY link. ++ * X557 PHY. Link status is latching low, and can only be used to detect ++ * link drop, and not the current status of the link without performing ++ * back-to-back reads. ++ */ ++ for (i = 0; i < 2; i++) { ++ status = hw->phy.ops.read_reg(hw, IXGBE_MDIO_AUTO_NEG_STATUS, ++ IXGBE_MDIO_AUTO_NEG_DEV_TYPE, ++ &autoneg_status); ++ ++ if (status != IXGBE_SUCCESS) ++ return status; ++ } ++ ++ /* If external PHY link is not up, then indicate link not up */ ++ if (!(autoneg_status & IXGBE_MDIO_AUTO_NEG_LINK_STATUS)) ++ *link_up = false; ++ ++ return IXGBE_SUCCESS; ++} ++ ++/** ++ * ixgbe_reset_phy_t_X550em - Performs X557 PHY reset and enables LASI ++ * @hw: pointer to hardware structure ++ **/ ++s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw) ++{ ++ s32 status; ++ ++ status = ixgbe_reset_phy_generic(hw); ++ ++ if (status != IXGBE_SUCCESS) ++ return status; ++ ++ /* Configure Link Status Alarm and Temperature Threshold interrupts */ ++ return ixgbe_enable_lasi_ext_t_x550em(hw); ++} ++ ++/** ++ * ixgbe_led_on_t_X550em - Turns on the software controllable LEDs. ++ * @hw: pointer to hardware structure ++ * @led_idx: led number to turn on ++ **/ ++s32 ixgbe_led_on_t_X550em(struct ixgbe_hw *hw, u32 led_idx) ++{ ++ u16 phy_data; ++ ++ DEBUGFUNC("ixgbe_led_on_t_X550em"); ++ ++ if (led_idx >= IXGBE_X557_MAX_LED_INDEX) ++ return IXGBE_ERR_PARAM; ++ ++ /* To turn on the LED, set mode to ON. */ ++ ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, ++ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data); ++ phy_data |= IXGBE_X557_LED_MANUAL_SET_MASK; ++ ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, ++ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data); ++ ++ /* Some designs have the LEDs wired to the MAC */ ++ return ixgbe_led_on_generic(hw, led_idx); ++} ++ ++/** ++ * ixgbe_led_off_t_X550em - Turns off the software controllable LEDs. ++ * @hw: pointer to hardware structure ++ * @led_idx: led number to turn off ++ **/ ++s32 ixgbe_led_off_t_X550em(struct ixgbe_hw *hw, u32 led_idx) ++{ ++ u16 phy_data; ++ ++ DEBUGFUNC("ixgbe_led_off_t_X550em"); ++ ++ if (led_idx >= IXGBE_X557_MAX_LED_INDEX) ++ return IXGBE_ERR_PARAM; ++ ++ /* To turn on the LED, set mode to ON. */ ++ ixgbe_read_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, ++ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, &phy_data); ++ phy_data &= ~IXGBE_X557_LED_MANUAL_SET_MASK; ++ ixgbe_write_phy_reg(hw, IXGBE_X557_LED_PROVISIONING + led_idx, ++ IXGBE_MDIO_VENDOR_SPECIFIC_1_DEV_TYPE, phy_data); ++ ++ /* Some designs have the LEDs wired to the MAC */ ++ return ixgbe_led_off_generic(hw, led_idx); ++} ++ ++/** ++ * ixgbe_set_fw_drv_ver_x550 - Sends driver version to firmware ++ * @hw: pointer to the HW structure ++ * @maj: driver version major number ++ * @min: driver version minor number ++ * @build: driver version build number ++ * @sub: driver version sub build number ++ * @len: length of driver_ver string ++ * @driver_ver: driver string ++ * ++ * Sends driver version number to firmware through the manageability ++ * block. On success return IXGBE_SUCCESS ++ * else returns IXGBE_ERR_SWFW_SYNC when encountering an error acquiring ++ * semaphore or IXGBE_ERR_HOST_INTERFACE_COMMAND when command fails. ++ **/ ++s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min, ++ u8 build, u8 sub, u16 len, const char *driver_ver) ++{ ++ struct ixgbe_hic_drv_info2 fw_cmd; ++ s32 ret_val = IXGBE_SUCCESS; ++ int i; ++ ++ DEBUGFUNC("ixgbe_set_fw_drv_ver_x550"); ++ ++ if ((len == 0) || (driver_ver == NULL) || ++ (len > sizeof(fw_cmd.driver_string))) ++ return IXGBE_ERR_INVALID_ARGUMENT; ++ ++ fw_cmd.hdr.cmd = FW_CEM_CMD_DRIVER_INFO; ++ fw_cmd.hdr.buf_len = FW_CEM_CMD_DRIVER_INFO_LEN + len; ++ fw_cmd.hdr.cmd_or_resp.cmd_resv = FW_CEM_CMD_RESERVED; ++ fw_cmd.port_num = (u8)hw->bus.func; ++ fw_cmd.ver_maj = maj; ++ fw_cmd.ver_min = min; ++ fw_cmd.ver_build = build; ++ fw_cmd.ver_sub = sub; ++ fw_cmd.hdr.checksum = 0; ++ memcpy(fw_cmd.driver_string, driver_ver, len); ++ fw_cmd.hdr.checksum = ixgbe_calculate_checksum((u8 *)&fw_cmd, ++ (FW_CEM_HDR_LEN + fw_cmd.hdr.buf_len)); ++ ++ for (i = 0; i <= FW_CEM_MAX_RETRIES; i++) { ++ ret_val = ixgbe_host_interface_command(hw, (u32 *)&fw_cmd, ++ sizeof(fw_cmd), ++ IXGBE_HI_COMMAND_TIMEOUT, ++ true); ++ if (ret_val != IXGBE_SUCCESS) ++ continue; ++ ++ if (fw_cmd.hdr.cmd_or_resp.ret_status == ++ FW_CEM_RESP_STATUS_SUCCESS) ++ ret_val = IXGBE_SUCCESS; ++ else ++ ret_val = IXGBE_ERR_HOST_INTERFACE_COMMAND; ++ ++ break; ++ } ++ ++ return ret_val; ++} +diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h +new file mode 100644 +index 0000000..ff2c4ea +--- /dev/null ++++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_x550.h +@@ -0,0 +1,115 @@ ++/******************************************************************************* ++ ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#ifndef _IXGBE_X550_H_ ++#define _IXGBE_X550_H_ ++ ++#include "ixgbe_type.h" ++ ++s32 ixgbe_dmac_config_X550(struct ixgbe_hw *hw); ++s32 ixgbe_dmac_config_tcs_X550(struct ixgbe_hw *hw); ++s32 ixgbe_dmac_update_tcs_X550(struct ixgbe_hw *hw); ++ ++s32 ixgbe_get_bus_info_X550em(struct ixgbe_hw *hw); ++s32 ixgbe_init_eeprom_params_X550(struct ixgbe_hw *hw); ++s32 ixgbe_update_eeprom_checksum_X550(struct ixgbe_hw *hw); ++s32 ixgbe_calc_eeprom_checksum_X550(struct ixgbe_hw *hw); ++s32 ixgbe_calc_checksum_X550(struct ixgbe_hw *hw, u16 *buffer, u32 buffer_size); ++s32 ixgbe_validate_eeprom_checksum_X550(struct ixgbe_hw *hw, u16 *checksum_val); ++s32 ixgbe_update_flash_X550(struct ixgbe_hw *hw); ++s32 ixgbe_write_ee_hostif_buffer_X550(struct ixgbe_hw *hw, ++ u16 offset, u16 words, u16 *data); ++s32 ixgbe_write_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, ++ u16 data); ++s32 ixgbe_read_ee_hostif_buffer_X550(struct ixgbe_hw *hw, ++ u16 offset, u16 words, u16 *data); ++s32 ixgbe_read_ee_hostif_X550(struct ixgbe_hw *hw, u16 offset, ++u16 *data); ++s32 ixgbe_write_ee_hostif_data_X550(struct ixgbe_hw *hw, u16 offset, ++ u16 data); ++void ixgbe_set_source_address_pruning_X550(struct ixgbe_hw *hw, bool enable, ++ unsigned int pool); ++void ixgbe_set_ethertype_anti_spoofing_X550(struct ixgbe_hw *hw, ++ bool enable, int vf); ++s32 ixgbe_write_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, ++ u32 device_type, u32 data); ++s32 ixgbe_read_iosf_sb_reg_x550(struct ixgbe_hw *hw, u32 reg_addr, ++ u32 device_type, u32 *data); ++s32 ixgbe_set_fw_drv_ver_x550(struct ixgbe_hw *hw, u8 maj, u8 min, ++ u8 build, u8 ver, u16 len, const char *str); ++s32 ixgbe_get_phy_token(struct ixgbe_hw *); ++s32 ixgbe_put_phy_token(struct ixgbe_hw *); ++s32 ixgbe_write_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, ++ u32 device_type, u32 data); ++s32 ixgbe_read_iosf_sb_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, ++ u32 device_type, u32 *data); ++void ixgbe_disable_mdd_X550(struct ixgbe_hw *hw); ++void ixgbe_enable_mdd_X550(struct ixgbe_hw *hw); ++void ixgbe_mdd_event_X550(struct ixgbe_hw *hw, u32 *vf_bitmap); ++void ixgbe_restore_mdd_vf_X550(struct ixgbe_hw *hw, u32 vf); ++enum ixgbe_media_type ixgbe_get_media_type_X550em(struct ixgbe_hw *hw); ++s32 ixgbe_setup_sfp_modules_X550em(struct ixgbe_hw *hw); ++s32 ixgbe_get_link_capabilities_X550em(struct ixgbe_hw *hw, ++ ixgbe_link_speed *speed, bool *autoneg); ++void ixgbe_init_mac_link_ops_X550em(struct ixgbe_hw *hw); ++s32 ixgbe_reset_hw_X550em(struct ixgbe_hw *hw); ++s32 ixgbe_init_phy_ops_X550em(struct ixgbe_hw *hw); ++s32 ixgbe_setup_kr_x550em(struct ixgbe_hw *hw); ++s32 ixgbe_init_ext_t_x550em(struct ixgbe_hw *hw); ++s32 ixgbe_setup_internal_phy_t_x550em(struct ixgbe_hw *hw); ++s32 ixgbe_setup_phy_loopback_x550em(struct ixgbe_hw *hw); ++u64 ixgbe_get_supported_physical_layer_X550em(struct ixgbe_hw *hw); ++void ixgbe_disable_rx_x550(struct ixgbe_hw *hw); ++s32 ixgbe_get_lcd_t_x550em(struct ixgbe_hw *hw, ixgbe_link_speed *lcd_speed); ++s32 ixgbe_enter_lplu_t_x550em(struct ixgbe_hw *hw); ++s32 ixgbe_acquire_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask); ++void ixgbe_release_swfw_sync_X550em(struct ixgbe_hw *hw, u32 mask); ++s32 ixgbe_setup_fc_X550em(struct ixgbe_hw *hw); ++s32 ixgbe_setup_mac_link_sfp_x550em(struct ixgbe_hw *hw, ++ ixgbe_link_speed speed, ++ bool autoneg_wait_to_complete); ++s32 ixgbe_setup_mac_link_sfp_x550a(struct ixgbe_hw *hw, ++ ixgbe_link_speed speed, ++ bool autoneg_wait_to_complete); ++s32 ixgbe_read_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, ++ u32 device_type, u16 *phy_data); ++s32 ixgbe_write_phy_reg_x550a(struct ixgbe_hw *hw, u32 reg_addr, ++ u32 device_type, u16 phy_data); ++s32 ixgbe_setup_fc_fiber_x550em_a(struct ixgbe_hw *hw); ++s32 ixgbe_setup_fc_backplane_x550em_a(struct ixgbe_hw *hw); ++s32 ixgbe_setup_fc_sgmii_x550em_a(struct ixgbe_hw *hw); ++void ixgbe_fc_autoneg_fiber_x550em_a(struct ixgbe_hw *hw); ++void ixgbe_fc_autoneg_backplane_x550em_a(struct ixgbe_hw *hw); ++void ixgbe_fc_autoneg_sgmii_x550em_a(struct ixgbe_hw *hw); ++s32 ixgbe_handle_lasi_ext_t_x550em(struct ixgbe_hw *hw); ++s32 ixgbe_setup_mac_link_t_X550em(struct ixgbe_hw *hw, ++ ixgbe_link_speed speed, ++ bool autoneg_wait_to_complete); ++s32 ixgbe_check_link_t_X550em(struct ixgbe_hw *hw, ixgbe_link_speed *speed, ++ bool *link_up, bool link_up_wait_to_complete); ++s32 ixgbe_reset_phy_t_X550em(struct ixgbe_hw *hw); ++s32 ixgbe_identify_sfp_module_X550em(struct ixgbe_hw *hw); ++s32 ixgbe_led_on_t_X550em(struct ixgbe_hw *hw, u32 led_idx); ++s32 ixgbe_led_off_t_X550em(struct ixgbe_hw *hw, u32 led_idx); ++#endif /* _IXGBE_X550_H_ */ +diff --git a/drivers/net/ethernet/intel/ixgbe/kcompat.c b/drivers/net/ethernet/intel/ixgbe/kcompat.c +new file mode 100644 +index 0000000..b1974fb +--- /dev/null ++++ b/drivers/net/ethernet/intel/ixgbe/kcompat.c +@@ -0,0 +1,2375 @@ ++/******************************************************************************* ++ ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#include "ixgbe.h" ++#include "kcompat.h" ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) ) || defined __VMKLNX__ ++/* From lib/vsprintf.c */ ++#include ++ ++static int skip_atoi(const char **s) ++{ ++ int i=0; ++ ++ while (isdigit(**s)) ++ i = i*10 + *((*s)++) - '0'; ++ return i; ++} ++ ++#define _kc_ZEROPAD 1 /* pad with zero */ ++#define _kc_SIGN 2 /* unsigned/signed long */ ++#define _kc_PLUS 4 /* show plus */ ++#define _kc_SPACE 8 /* space if plus */ ++#define _kc_LEFT 16 /* left justified */ ++#define _kc_SPECIAL 32 /* 0x */ ++#define _kc_LARGE 64 /* use 'ABCDEF' instead of 'abcdef' */ ++ ++static char * number(char * buf, char * end, long long num, int base, int size, int precision, int type) ++{ ++ char c,sign,tmp[66]; ++ const char *digits; ++ const char small_digits[] = "0123456789abcdefghijklmnopqrstuvwxyz"; ++ const char large_digits[] = "0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZ"; ++ int i; ++ ++ digits = (type & _kc_LARGE) ? large_digits : small_digits; ++ if (type & _kc_LEFT) ++ type &= ~_kc_ZEROPAD; ++ if (base < 2 || base > 36) ++ return 0; ++ c = (type & _kc_ZEROPAD) ? '0' : ' '; ++ sign = 0; ++ if (type & _kc_SIGN) { ++ if (num < 0) { ++ sign = '-'; ++ num = -num; ++ size--; ++ } else if (type & _kc_PLUS) { ++ sign = '+'; ++ size--; ++ } else if (type & _kc_SPACE) { ++ sign = ' '; ++ size--; ++ } ++ } ++ if (type & _kc_SPECIAL) { ++ if (base == 16) ++ size -= 2; ++ else if (base == 8) ++ size--; ++ } ++ i = 0; ++ if (num == 0) ++ tmp[i++]='0'; ++ else while (num != 0) ++ tmp[i++] = digits[do_div(num,base)]; ++ if (i > precision) ++ precision = i; ++ size -= precision; ++ if (!(type&(_kc_ZEROPAD+_kc_LEFT))) { ++ while(size-->0) { ++ if (buf <= end) ++ *buf = ' '; ++ ++buf; ++ } ++ } ++ if (sign) { ++ if (buf <= end) ++ *buf = sign; ++ ++buf; ++ } ++ if (type & _kc_SPECIAL) { ++ if (base==8) { ++ if (buf <= end) ++ *buf = '0'; ++ ++buf; ++ } else if (base==16) { ++ if (buf <= end) ++ *buf = '0'; ++ ++buf; ++ if (buf <= end) ++ *buf = digits[33]; ++ ++buf; ++ } ++ } ++ if (!(type & _kc_LEFT)) { ++ while (size-- > 0) { ++ if (buf <= end) ++ *buf = c; ++ ++buf; ++ } ++ } ++ while (i < precision--) { ++ if (buf <= end) ++ *buf = '0'; ++ ++buf; ++ } ++ while (i-- > 0) { ++ if (buf <= end) ++ *buf = tmp[i]; ++ ++buf; ++ } ++ while (size-- > 0) { ++ if (buf <= end) ++ *buf = ' '; ++ ++buf; ++ } ++ return buf; ++} ++ ++int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args) ++{ ++ int len; ++ unsigned long long num; ++ int i, base; ++ char *str, *end, c; ++ const char *s; ++ ++ int flags; /* flags to number() */ ++ ++ int field_width; /* width of output field */ ++ int precision; /* min. # of digits for integers; max ++ number of chars for from string */ ++ int qualifier; /* 'h', 'l', or 'L' for integer fields */ ++ /* 'z' support added 23/7/1999 S.H. */ ++ /* 'z' changed to 'Z' --davidm 1/25/99 */ ++ ++ str = buf; ++ end = buf + size - 1; ++ ++ if (end < buf - 1) { ++ end = ((void *) -1); ++ size = end - buf + 1; ++ } ++ ++ for (; *fmt ; ++fmt) { ++ if (*fmt != '%') { ++ if (str <= end) ++ *str = *fmt; ++ ++str; ++ continue; ++ } ++ ++ /* process flags */ ++ flags = 0; ++ repeat: ++ ++fmt; /* this also skips first '%' */ ++ switch (*fmt) { ++ case '-': flags |= _kc_LEFT; goto repeat; ++ case '+': flags |= _kc_PLUS; goto repeat; ++ case ' ': flags |= _kc_SPACE; goto repeat; ++ case '#': flags |= _kc_SPECIAL; goto repeat; ++ case '0': flags |= _kc_ZEROPAD; goto repeat; ++ } ++ ++ /* get field width */ ++ field_width = -1; ++ if (isdigit(*fmt)) ++ field_width = skip_atoi(&fmt); ++ else if (*fmt == '*') { ++ ++fmt; ++ /* it's the next argument */ ++ field_width = va_arg(args, int); ++ if (field_width < 0) { ++ field_width = -field_width; ++ flags |= _kc_LEFT; ++ } ++ } ++ ++ /* get the precision */ ++ precision = -1; ++ if (*fmt == '.') { ++ ++fmt; ++ if (isdigit(*fmt)) ++ precision = skip_atoi(&fmt); ++ else if (*fmt == '*') { ++ ++fmt; ++ /* it's the next argument */ ++ precision = va_arg(args, int); ++ } ++ if (precision < 0) ++ precision = 0; ++ } ++ ++ /* get the conversion qualifier */ ++ qualifier = -1; ++ if (*fmt == 'h' || *fmt == 'l' || *fmt == 'L' || *fmt =='Z') { ++ qualifier = *fmt; ++ ++fmt; ++ } ++ ++ /* default base */ ++ base = 10; ++ ++ switch (*fmt) { ++ case 'c': ++ if (!(flags & _kc_LEFT)) { ++ while (--field_width > 0) { ++ if (str <= end) ++ *str = ' '; ++ ++str; ++ } ++ } ++ c = (unsigned char) va_arg(args, int); ++ if (str <= end) ++ *str = c; ++ ++str; ++ while (--field_width > 0) { ++ if (str <= end) ++ *str = ' '; ++ ++str; ++ } ++ continue; ++ ++ case 's': ++ s = va_arg(args, char *); ++ if (!s) ++ s = ""; ++ ++ len = strnlen(s, precision); ++ ++ if (!(flags & _kc_LEFT)) { ++ while (len < field_width--) { ++ if (str <= end) ++ *str = ' '; ++ ++str; ++ } ++ } ++ for (i = 0; i < len; ++i) { ++ if (str <= end) ++ *str = *s; ++ ++str; ++s; ++ } ++ while (len < field_width--) { ++ if (str <= end) ++ *str = ' '; ++ ++str; ++ } ++ continue; ++ ++ case 'p': ++ if ('M' == *(fmt+1)) { ++ str = get_mac(str, end, va_arg(args, unsigned char *)); ++ fmt++; ++ } else { ++ if (field_width == -1) { ++ field_width = 2*sizeof(void *); ++ flags |= _kc_ZEROPAD; ++ } ++ str = number(str, end, ++ (unsigned long) va_arg(args, void *), ++ 16, field_width, precision, flags); ++ } ++ continue; ++ ++ case 'n': ++ /* FIXME: ++ * What does C99 say about the overflow case here? */ ++ if (qualifier == 'l') { ++ long * ip = va_arg(args, long *); ++ *ip = (str - buf); ++ } else if (qualifier == 'Z') { ++ size_t * ip = va_arg(args, size_t *); ++ *ip = (str - buf); ++ } else { ++ int * ip = va_arg(args, int *); ++ *ip = (str - buf); ++ } ++ continue; ++ ++ case '%': ++ if (str <= end) ++ *str = '%'; ++ ++str; ++ continue; ++ ++ /* integer number formats - set up the flags and "break" */ ++ case 'o': ++ base = 8; ++ break; ++ ++ case 'X': ++ flags |= _kc_LARGE; ++ case 'x': ++ base = 16; ++ break; ++ ++ case 'd': ++ case 'i': ++ flags |= _kc_SIGN; ++ case 'u': ++ break; ++ ++ default: ++ if (str <= end) ++ *str = '%'; ++ ++str; ++ if (*fmt) { ++ if (str <= end) ++ *str = *fmt; ++ ++str; ++ } else { ++ --fmt; ++ } ++ continue; ++ } ++ if (qualifier == 'L') ++ num = va_arg(args, long long); ++ else if (qualifier == 'l') { ++ num = va_arg(args, unsigned long); ++ if (flags & _kc_SIGN) ++ num = (signed long) num; ++ } else if (qualifier == 'Z') { ++ num = va_arg(args, size_t); ++ } else if (qualifier == 'h') { ++ num = (unsigned short) va_arg(args, int); ++ if (flags & _kc_SIGN) ++ num = (signed short) num; ++ } else { ++ num = va_arg(args, unsigned int); ++ if (flags & _kc_SIGN) ++ num = (signed int) num; ++ } ++ str = number(str, end, num, base, ++ field_width, precision, flags); ++ } ++ if (str <= end) ++ *str = '\0'; ++ else if (size > 0) ++ /* don't write out a null byte if the buf size is zero */ ++ *end = '\0'; ++ /* the trailing null byte doesn't count towards the total ++ * ++str; ++ */ ++ return str-buf; ++} ++ ++int _kc_snprintf(char * buf, size_t size, const char *fmt, ...) ++{ ++ va_list args; ++ int i; ++ ++ va_start(args, fmt); ++ i = _kc_vsnprintf(buf,size,fmt,args); ++ va_end(args); ++ return i; ++} ++#endif /* < 2.4.8 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) ) ++ ++/**************************************/ ++/* PCI DMA MAPPING */ ++ ++#if defined(CONFIG_HIGHMEM) ++ ++#ifndef PCI_DRAM_OFFSET ++#define PCI_DRAM_OFFSET 0 ++#endif ++ ++u64 ++_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, ++ size_t size, int direction) ++{ ++ return (((u64) (page - mem_map) << PAGE_SHIFT) + offset + ++ PCI_DRAM_OFFSET); ++} ++ ++#else /* CONFIG_HIGHMEM */ ++ ++u64 ++_kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, ++ size_t size, int direction) ++{ ++ return pci_map_single(dev, (void *)page_address(page) + offset, size, ++ direction); ++} ++ ++#endif /* CONFIG_HIGHMEM */ ++ ++void ++_kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, ++ int direction) ++{ ++ return pci_unmap_single(dev, dma_addr, size, direction); ++} ++ ++#endif /* 2.4.13 => 2.4.3 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) ) ++ ++/**************************************/ ++/* PCI DRIVER API */ ++ ++int ++_kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask) ++{ ++ if (!pci_dma_supported(dev, mask)) ++ return -EIO; ++ dev->dma_mask = mask; ++ return 0; ++} ++ ++int ++_kc_pci_request_regions(struct pci_dev *dev, char *res_name) ++{ ++ int i; ++ ++ for (i = 0; i < 6; i++) { ++ if (pci_resource_len(dev, i) == 0) ++ continue; ++ ++ if (pci_resource_flags(dev, i) & IORESOURCE_IO) { ++ if (!request_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) { ++ pci_release_regions(dev); ++ return -EBUSY; ++ } ++ } else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) { ++ if (!request_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i), res_name)) { ++ pci_release_regions(dev); ++ return -EBUSY; ++ } ++ } ++ } ++ return 0; ++} ++ ++void ++_kc_pci_release_regions(struct pci_dev *dev) ++{ ++ int i; ++ ++ for (i = 0; i < 6; i++) { ++ if (pci_resource_len(dev, i) == 0) ++ continue; ++ ++ if (pci_resource_flags(dev, i) & IORESOURCE_IO) ++ release_region(pci_resource_start(dev, i), pci_resource_len(dev, i)); ++ ++ else if (pci_resource_flags(dev, i) & IORESOURCE_MEM) ++ release_mem_region(pci_resource_start(dev, i), pci_resource_len(dev, i)); ++ } ++} ++ ++/**************************************/ ++/* NETWORK DRIVER API */ ++ ++struct net_device * ++_kc_alloc_etherdev(int sizeof_priv) ++{ ++ struct net_device *dev; ++ int alloc_size; ++ ++ alloc_size = sizeof(*dev) + sizeof_priv + IFNAMSIZ + 31; ++ dev = kzalloc(alloc_size, GFP_KERNEL); ++ if (!dev) ++ return NULL; ++ ++ if (sizeof_priv) ++ dev->priv = (void *) (((unsigned long)(dev + 1) + 31) & ~31); ++ dev->name[0] = '\0'; ++ ether_setup(dev); ++ ++ return dev; ++} ++ ++int ++_kc_is_valid_ether_addr(u8 *addr) ++{ ++ const char zaddr[6] = { 0, }; ++ ++ return !(addr[0] & 1) && memcmp(addr, zaddr, 6); ++} ++ ++#endif /* 2.4.3 => 2.4.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) ) ++ ++int ++_kc_pci_set_power_state(struct pci_dev *dev, int state) ++{ ++ return 0; ++} ++ ++int ++_kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable) ++{ ++ return 0; ++} ++ ++#endif /* 2.4.6 => 2.4.3 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) ++void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, ++ int off, int size) ++{ ++ skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; ++ frag->page = page; ++ frag->page_offset = off; ++ frag->size = size; ++ skb_shinfo(skb)->nr_frags = i + 1; ++} ++ ++/* ++ * Original Copyright: ++ * find_next_bit.c: fallback find next bit implementation ++ * ++ * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved. ++ * Written by David Howells (dhowells@redhat.com) ++ */ ++ ++/** ++ * find_next_bit - find the next set bit in a memory region ++ * @addr: The address to base the search on ++ * @offset: The bitnumber to start searching at ++ * @size: The maximum size to search ++ */ ++unsigned long find_next_bit(const unsigned long *addr, unsigned long size, ++ unsigned long offset) ++{ ++ const unsigned long *p = addr + BITOP_WORD(offset); ++ unsigned long result = offset & ~(BITS_PER_LONG-1); ++ unsigned long tmp; ++ ++ if (offset >= size) ++ return size; ++ size -= result; ++ offset %= BITS_PER_LONG; ++ if (offset) { ++ tmp = *(p++); ++ tmp &= (~0UL << offset); ++ if (size < BITS_PER_LONG) ++ goto found_first; ++ if (tmp) ++ goto found_middle; ++ size -= BITS_PER_LONG; ++ result += BITS_PER_LONG; ++ } ++ while (size & ~(BITS_PER_LONG-1)) { ++ if ((tmp = *(p++))) ++ goto found_middle; ++ result += BITS_PER_LONG; ++ size -= BITS_PER_LONG; ++ } ++ if (!size) ++ return result; ++ tmp = *p; ++ ++found_first: ++ tmp &= (~0UL >> (BITS_PER_LONG - size)); ++ if (tmp == 0UL) /* Are any bits set? */ ++ return result + size; /* Nope. */ ++found_middle: ++ return result + ffs(tmp); ++} ++ ++size_t _kc_strlcpy(char *dest, const char *src, size_t size) ++{ ++ size_t ret = strlen(src); ++ ++ if (size) { ++ size_t len = (ret >= size) ? size - 1 : ret; ++ memcpy(dest, src, len); ++ dest[len] = '\0'; ++ } ++ return ret; ++} ++ ++#ifndef do_div ++#if BITS_PER_LONG == 32 ++uint32_t __attribute__((weak)) _kc__div64_32(uint64_t *n, uint32_t base) ++{ ++ uint64_t rem = *n; ++ uint64_t b = base; ++ uint64_t res, d = 1; ++ uint32_t high = rem >> 32; ++ ++ /* Reduce the thing a bit first */ ++ res = 0; ++ if (high >= base) { ++ high /= base; ++ res = (uint64_t) high << 32; ++ rem -= (uint64_t) (high*base) << 32; ++ } ++ ++ while ((int64_t)b > 0 && b < rem) { ++ b = b+b; ++ d = d+d; ++ } ++ ++ do { ++ if (rem >= b) { ++ rem -= b; ++ res += d; ++ } ++ b >>= 1; ++ d >>= 1; ++ } while (d); ++ ++ *n = res; ++ return rem; ++} ++#endif /* BITS_PER_LONG == 32 */ ++#endif /* do_div */ ++#endif /* 2.6.0 => 2.4.6 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) ++int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...) ++{ ++ va_list args; ++ int i; ++ ++ va_start(args, fmt); ++ i = vsnprintf(buf, size, fmt, args); ++ va_end(args); ++ return (i >= size) ? (size - 1) : i; ++} ++#endif /* < 2.6.4 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) ) ++DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES) = {1}; ++#endif /* < 2.6.10 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) ) ++char *_kc_kstrdup(const char *s, unsigned int gfp) ++{ ++ size_t len; ++ char *buf; ++ ++ if (!s) ++ return NULL; ++ ++ len = strlen(s) + 1; ++ buf = kmalloc(len, gfp); ++ if (buf) ++ memcpy(buf, s, len); ++ return buf; ++} ++#endif /* < 2.6.13 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) ) ++void *_kc_kzalloc(size_t size, int flags) ++{ ++ void *ret = kmalloc(size, flags); ++ if (ret) ++ memset(ret, 0, size); ++ return ret; ++} ++#endif /* <= 2.6.13 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) ) ++int _kc_skb_pad(struct sk_buff *skb, int pad) ++{ ++ int ntail; ++ ++ /* If the skbuff is non linear tailroom is always zero.. */ ++ if(!skb_cloned(skb) && skb_tailroom(skb) >= pad) { ++ memset(skb->data+skb->len, 0, pad); ++ return 0; ++ } ++ ++ ntail = skb->data_len + pad - (skb->end - skb->tail); ++ if (likely(skb_cloned(skb) || ntail > 0)) { ++ if (pskb_expand_head(skb, 0, ntail, GFP_ATOMIC)) ++ goto free_skb; ++ } ++ ++#ifdef MAX_SKB_FRAGS ++ if (skb_is_nonlinear(skb) && ++ !__pskb_pull_tail(skb, skb->data_len)) ++ goto free_skb; ++ ++#endif ++ memset(skb->data + skb->len, 0, pad); ++ return 0; ++ ++free_skb: ++ kfree_skb(skb); ++ return -ENOMEM; ++} ++ ++#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4))) ++int _kc_pci_save_state(struct pci_dev *pdev) ++{ ++ struct adapter_struct *adapter = pci_get_drvdata(pdev); ++ int size = PCI_CONFIG_SPACE_LEN, i; ++ u16 pcie_cap_offset, pcie_link_status; ++ ++#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) ++ /* no ->dev for 2.4 kernels */ ++ WARN_ON(pdev->dev.driver_data == NULL); ++#endif ++ pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); ++ if (pcie_cap_offset) { ++ if (!pci_read_config_word(pdev, ++ pcie_cap_offset + PCIE_LINK_STATUS, ++ &pcie_link_status)) ++ size = PCIE_CONFIG_SPACE_LEN; ++ } ++ pci_config_space_ich8lan(); ++#ifdef HAVE_PCI_ERS ++ if (adapter->config_space == NULL) ++#else ++ WARN_ON(adapter->config_space != NULL); ++#endif ++ adapter->config_space = kmalloc(size, GFP_KERNEL); ++ if (!adapter->config_space) { ++ printk(KERN_ERR "Out of memory in pci_save_state\n"); ++ return -ENOMEM; ++ } ++ for (i = 0; i < (size / 4); i++) ++ pci_read_config_dword(pdev, i * 4, &adapter->config_space[i]); ++ return 0; ++} ++ ++void _kc_pci_restore_state(struct pci_dev *pdev) ++{ ++ struct adapter_struct *adapter = pci_get_drvdata(pdev); ++ int size = PCI_CONFIG_SPACE_LEN, i; ++ u16 pcie_cap_offset; ++ u16 pcie_link_status; ++ ++ if (adapter->config_space != NULL) { ++ pcie_cap_offset = pci_find_capability(pdev, PCI_CAP_ID_EXP); ++ if (pcie_cap_offset && ++ !pci_read_config_word(pdev, ++ pcie_cap_offset + PCIE_LINK_STATUS, ++ &pcie_link_status)) ++ size = PCIE_CONFIG_SPACE_LEN; ++ ++ pci_config_space_ich8lan(); ++ for (i = 0; i < (size / 4); i++) ++ pci_write_config_dword(pdev, i * 4, adapter->config_space[i]); ++#ifndef HAVE_PCI_ERS ++ kfree(adapter->config_space); ++ adapter->config_space = NULL; ++#endif ++ } ++} ++#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */ ++ ++#ifdef HAVE_PCI_ERS ++void _kc_free_netdev(struct net_device *netdev) ++{ ++ struct adapter_struct *adapter = netdev_priv(netdev); ++ ++ kfree(adapter->config_space); ++#ifdef CONFIG_SYSFS ++ if (netdev->reg_state == NETREG_UNINITIALIZED) { ++ kfree((char *)netdev - netdev->padded); ++ } else { ++ BUG_ON(netdev->reg_state != NETREG_UNREGISTERED); ++ netdev->reg_state = NETREG_RELEASED; ++ class_device_put(&netdev->class_dev); ++ } ++#else ++ kfree((char *)netdev - netdev->padded); ++#endif ++} ++#endif ++ ++void *_kc_kmemdup(const void *src, size_t len, unsigned gfp) ++{ ++ void *p; ++ ++ p = kzalloc(len, gfp); ++ if (p) ++ memcpy(p, src, len); ++ return p; ++} ++#endif /* <= 2.6.19 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) ++struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev) ++{ ++ return ((struct adapter_struct *)netdev_priv(netdev))->pdev; ++} ++#endif /* < 2.6.21 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) ++/* hexdump code taken from lib/hexdump.c */ ++static void _kc_hex_dump_to_buffer(const void *buf, size_t len, int rowsize, ++ int groupsize, unsigned char *linebuf, ++ size_t linebuflen, bool ascii) ++{ ++ const u8 *ptr = buf; ++ u8 ch; ++ int j, lx = 0; ++ int ascii_column; ++ ++ if (rowsize != 16 && rowsize != 32) ++ rowsize = 16; ++ ++ if (!len) ++ goto nil; ++ if (len > rowsize) /* limit to one line at a time */ ++ len = rowsize; ++ if ((len % groupsize) != 0) /* no mixed size output */ ++ groupsize = 1; ++ ++ switch (groupsize) { ++ case 8: { ++ const u64 *ptr8 = buf; ++ int ngroups = len / groupsize; ++ ++ for (j = 0; j < ngroups; j++) ++ lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, ++ "%s%16.16llx", j ? " " : "", ++ (unsigned long long)*(ptr8 + j)); ++ ascii_column = 17 * ngroups + 2; ++ break; ++ } ++ ++ case 4: { ++ const u32 *ptr4 = buf; ++ int ngroups = len / groupsize; ++ ++ for (j = 0; j < ngroups; j++) ++ lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, ++ "%s%8.8x", j ? " " : "", *(ptr4 + j)); ++ ascii_column = 9 * ngroups + 2; ++ break; ++ } ++ ++ case 2: { ++ const u16 *ptr2 = buf; ++ int ngroups = len / groupsize; ++ ++ for (j = 0; j < ngroups; j++) ++ lx += scnprintf((char *)(linebuf + lx), linebuflen - lx, ++ "%s%4.4x", j ? " " : "", *(ptr2 + j)); ++ ascii_column = 5 * ngroups + 2; ++ break; ++ } ++ ++ default: ++ for (j = 0; (j < len) && (lx + 3) <= linebuflen; j++) { ++ ch = ptr[j]; ++ linebuf[lx++] = hex_asc(ch >> 4); ++ linebuf[lx++] = hex_asc(ch & 0x0f); ++ linebuf[lx++] = ' '; ++ } ++ if (j) ++ lx--; ++ ++ ascii_column = 3 * rowsize + 2; ++ break; ++ } ++ if (!ascii) ++ goto nil; ++ ++ while (lx < (linebuflen - 1) && lx < (ascii_column - 1)) ++ linebuf[lx++] = ' '; ++ for (j = 0; (j < len) && (lx + 2) < linebuflen; j++) ++ linebuf[lx++] = (isascii(ptr[j]) && isprint(ptr[j])) ? ptr[j] ++ : '.'; ++nil: ++ linebuf[lx++] = '\0'; ++} ++ ++void _kc_print_hex_dump(const char *level, ++ const char *prefix_str, int prefix_type, ++ int rowsize, int groupsize, ++ const void *buf, size_t len, bool ascii) ++{ ++ const u8 *ptr = buf; ++ int i, linelen, remaining = len; ++ unsigned char linebuf[200]; ++ ++ if (rowsize != 16 && rowsize != 32) ++ rowsize = 16; ++ ++ for (i = 0; i < len; i += rowsize) { ++ linelen = min(remaining, rowsize); ++ remaining -= rowsize; ++ _kc_hex_dump_to_buffer(ptr + i, linelen, rowsize, groupsize, ++ linebuf, sizeof(linebuf), ascii); ++ ++ switch (prefix_type) { ++ case DUMP_PREFIX_ADDRESS: ++ printk("%s%s%*p: %s\n", level, prefix_str, ++ (int)(2 * sizeof(void *)), ptr + i, linebuf); ++ break; ++ case DUMP_PREFIX_OFFSET: ++ printk("%s%s%.8x: %s\n", level, prefix_str, i, linebuf); ++ break; ++ default: ++ printk("%s%s%s\n", level, prefix_str, linebuf); ++ break; ++ } ++ } ++} ++ ++#endif /* < 2.6.22 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) ) ++int ixgbe_dcb_netlink_register(void) ++{ ++ return 0; ++} ++ ++int ixgbe_dcb_netlink_unregister(void) ++{ ++ return 0; ++} ++ ++int ixgbe_copy_dcb_cfg(struct ixgbe_adapter __always_unused *adapter, int __always_unused tc_max) ++{ ++ return 0; ++} ++#endif /* < 2.6.23 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) ++#ifdef NAPI ++struct net_device *napi_to_poll_dev(const struct napi_struct *napi) ++{ ++ struct adapter_q_vector *q_vector = container_of(napi, ++ struct adapter_q_vector, ++ napi); ++ return &q_vector->poll_dev; ++} ++ ++int __kc_adapter_clean(struct net_device *netdev, int *budget) ++{ ++ int work_done; ++ int work_to_do = min(*budget, netdev->quota); ++ /* kcompat.h netif_napi_add puts napi struct in "fake netdev->priv" */ ++ struct napi_struct *napi = netdev->priv; ++ work_done = napi->poll(napi, work_to_do); ++ *budget -= work_done; ++ netdev->quota -= work_done; ++ return (work_done >= work_to_do) ? 1 : 0; ++} ++#endif /* NAPI */ ++#endif /* <= 2.6.24 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) ) ++void _kc_pci_disable_link_state(struct pci_dev *pdev, int state) ++{ ++ struct pci_dev *parent = pdev->bus->self; ++ u16 link_state; ++ int pos; ++ ++ if (!parent) ++ return; ++ ++ pos = pci_find_capability(parent, PCI_CAP_ID_EXP); ++ if (pos) { ++ pci_read_config_word(parent, pos + PCI_EXP_LNKCTL, &link_state); ++ link_state &= ~state; ++ pci_write_config_word(parent, pos + PCI_EXP_LNKCTL, link_state); ++ } ++} ++#endif /* < 2.6.26 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) ) ++#ifdef HAVE_TX_MQ ++void _kc_netif_tx_stop_all_queues(struct net_device *netdev) ++{ ++ struct adapter_struct *adapter = netdev_priv(netdev); ++ int i; ++ ++ netif_stop_queue(netdev); ++ if (netif_is_multiqueue(netdev)) ++ for (i = 0; i < adapter->num_tx_queues; i++) ++ netif_stop_subqueue(netdev, i); ++} ++void _kc_netif_tx_wake_all_queues(struct net_device *netdev) ++{ ++ struct adapter_struct *adapter = netdev_priv(netdev); ++ int i; ++ ++ netif_wake_queue(netdev); ++ if (netif_is_multiqueue(netdev)) ++ for (i = 0; i < adapter->num_tx_queues; i++) ++ netif_wake_subqueue(netdev, i); ++} ++void _kc_netif_tx_start_all_queues(struct net_device *netdev) ++{ ++ struct adapter_struct *adapter = netdev_priv(netdev); ++ int i; ++ ++ netif_start_queue(netdev); ++ if (netif_is_multiqueue(netdev)) ++ for (i = 0; i < adapter->num_tx_queues; i++) ++ netif_start_subqueue(netdev, i); ++} ++#endif /* HAVE_TX_MQ */ ++ ++void __kc_warn_slowpath(const char *file, int line, const char *fmt, ...) ++{ ++ va_list args; ++ ++ printk(KERN_WARNING "------------[ cut here ]------------\n"); ++ printk(KERN_WARNING "WARNING: at %s:%d \n", file, line); ++ va_start(args, fmt); ++ vprintk(fmt, args); ++ va_end(args); ++ ++ dump_stack(); ++} ++#endif /* __VMKLNX__ */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ) ++ ++int ++_kc_pci_prepare_to_sleep(struct pci_dev *dev) ++{ ++ pci_power_t target_state; ++ int error; ++ ++ target_state = pci_choose_state(dev, PMSG_SUSPEND); ++ ++ pci_enable_wake(dev, target_state, true); ++ ++ error = pci_set_power_state(dev, target_state); ++ ++ if (error) ++ pci_enable_wake(dev, target_state, false); ++ ++ return error; ++} ++ ++int ++_kc_pci_wake_from_d3(struct pci_dev *dev, bool enable) ++{ ++ int err; ++ ++ err = pci_enable_wake(dev, PCI_D3cold, enable); ++ if (err) ++ goto out; ++ ++ err = pci_enable_wake(dev, PCI_D3hot, enable); ++ ++out: ++ return err; ++} ++#endif /* < 2.6.28 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) ) ++static void __kc_pci_set_master(struct pci_dev *pdev, bool enable) ++{ ++ u16 old_cmd, cmd; ++ ++ pci_read_config_word(pdev, PCI_COMMAND, &old_cmd); ++ if (enable) ++ cmd = old_cmd | PCI_COMMAND_MASTER; ++ else ++ cmd = old_cmd & ~PCI_COMMAND_MASTER; ++ if (cmd != old_cmd) { ++ dev_dbg(pci_dev_to_dev(pdev), "%s bus mastering\n", ++ enable ? "enabling" : "disabling"); ++ pci_write_config_word(pdev, PCI_COMMAND, cmd); ++ } ++#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,7) ) ++ pdev->is_busmaster = enable; ++#endif ++} ++ ++void _kc_pci_clear_master(struct pci_dev *dev) ++{ ++ __kc_pci_set_master(dev, false); ++} ++#endif /* < 2.6.29 */ ++ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) ) ++#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) ++int _kc_pci_num_vf(struct pci_dev __maybe_unused *dev) ++{ ++ int num_vf = 0; ++#ifdef CONFIG_PCI_IOV ++ struct pci_dev *vfdev; ++ ++ /* loop through all ethernet devices starting at PF dev */ ++ vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, NULL); ++ while (vfdev) { ++ if (vfdev->is_virtfn && vfdev->physfn == dev) ++ num_vf++; ++ ++ vfdev = pci_get_class(PCI_CLASS_NETWORK_ETHERNET << 8, vfdev); ++ } ++ ++#endif ++ return num_vf; ++} ++#endif /* RHEL_RELEASE_CODE */ ++#endif /* < 2.6.34 */ ++ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) ) ++#ifdef HAVE_TX_MQ ++#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0))) ++#ifndef CONFIG_NETDEVICES_MULTIQUEUE ++int _kc_netif_set_real_num_tx_queues(struct net_device *dev, unsigned int txq) ++{ ++ unsigned int real_num = dev->real_num_tx_queues; ++ struct Qdisc *qdisc; ++ int i; ++ ++ if (txq < 1 || txq > dev->num_tx_queues) ++ return -EINVAL; ++ ++ else if (txq > real_num) ++ dev->real_num_tx_queues = txq; ++ else if (txq < real_num) { ++ dev->real_num_tx_queues = txq; ++ for (i = txq; i < dev->num_tx_queues; i++) { ++ qdisc = netdev_get_tx_queue(dev, i)->qdisc; ++ if (qdisc) { ++ spin_lock_bh(qdisc_lock(qdisc)); ++ qdisc_reset(qdisc); ++ spin_unlock_bh(qdisc_lock(qdisc)); ++ } ++ } ++ } ++ ++ return 0; ++} ++#endif /* CONFIG_NETDEVICES_MULTIQUEUE */ ++#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */ ++#endif /* HAVE_TX_MQ */ ++ ++ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos, ++ const void __user *from, size_t count) ++{ ++ loff_t pos = *ppos; ++ size_t res; ++ ++ if (pos < 0) ++ return -EINVAL; ++ if (pos >= available || !count) ++ return 0; ++ if (count > available - pos) ++ count = available - pos; ++ res = copy_from_user(to + pos, from, count); ++ if (res == count) ++ return -EFAULT; ++ count -= res; ++ *ppos = pos + count; ++ return count; ++} ++ ++#endif /* < 2.6.35 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) ++static const u32 _kc_flags_dup_features = ++ (ETH_FLAG_LRO | ETH_FLAG_NTUPLE | ETH_FLAG_RXHASH); ++ ++u32 _kc_ethtool_op_get_flags(struct net_device *dev) ++{ ++ return dev->features & _kc_flags_dup_features; ++} ++ ++int _kc_ethtool_op_set_flags(struct net_device *dev, u32 data, u32 supported) ++{ ++ if (data & ~supported) ++ return -EINVAL; ++ ++ dev->features = ((dev->features & ~_kc_flags_dup_features) | ++ (data & _kc_flags_dup_features)); ++ return 0; ++} ++#endif /* < 2.6.36 */ ++ ++/******************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) ) ++#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0))) ++#ifdef HAVE_NETDEV_SELECT_QUEUE ++#include ++#include ++ ++u16 ___kc_skb_tx_hash(struct net_device *dev, const struct sk_buff *skb, ++ u16 num_tx_queues) ++{ ++ u32 hash; ++ u16 qoffset = 0; ++ u16 qcount = num_tx_queues; ++ ++ if (skb_rx_queue_recorded(skb)) { ++ hash = skb_get_rx_queue(skb); ++ while (unlikely(hash >= num_tx_queues)) ++ hash -= num_tx_queues; ++ return hash; ++ } ++ ++ if (netdev_get_num_tc(dev)) { ++ struct adapter_struct *kc_adapter = netdev_priv(dev); ++ ++ if (skb->priority == TC_PRIO_CONTROL) { ++ qoffset = kc_adapter->dcb_tc - 1; ++ } else { ++ qoffset = skb->vlan_tci; ++ qoffset &= IXGBE_TX_FLAGS_VLAN_PRIO_MASK; ++ qoffset >>= 13; ++ } ++ ++ qcount = kc_adapter->ring_feature[RING_F_RSS].indices; ++ qoffset *= qcount; ++ } ++ ++ if (skb->sk && skb->sk->sk_hash) ++ hash = skb->sk->sk_hash; ++ else ++#ifdef NETIF_F_RXHASH ++ hash = (__force u16) skb->protocol ^ skb->rxhash; ++#else ++ hash = skb->protocol; ++#endif ++ ++ hash = jhash_1word(hash, _kc_hashrnd); ++ ++ return (u16) (((u64) hash * qcount) >> 32) + qoffset; ++} ++#endif /* HAVE_NETDEV_SELECT_QUEUE */ ++ ++u8 _kc_netdev_get_num_tc(struct net_device *dev) ++{ ++ struct adapter_struct *kc_adapter = netdev_priv(dev); ++ if (kc_adapter->flags & IXGBE_FLAG_DCB_ENABLED) ++ return kc_adapter->dcb_tc; ++ else ++ return 0; ++} ++ ++int _kc_netdev_set_num_tc(struct net_device *dev, u8 num_tc) ++{ ++ struct adapter_struct *kc_adapter = netdev_priv(dev); ++ ++ if (num_tc > IXGBE_DCB_MAX_TRAFFIC_CLASS) ++ return -EINVAL; ++ ++ kc_adapter->dcb_tc = num_tc; ++ ++ return 0; ++} ++ ++u8 _kc_netdev_get_prio_tc_map(struct net_device __maybe_unused *dev, u8 __maybe_unused up) ++{ ++ struct adapter_struct *kc_adapter = netdev_priv(dev); ++ ++ return ixgbe_dcb_get_tc_from_up(&kc_adapter->dcb_cfg, 0, up); ++} ++ ++#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */ ++#endif /* < 2.6.39 */ ++ ++/******************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) ) ++void _kc_skb_add_rx_frag(struct sk_buff *skb, int i, struct page *page, ++ int off, int size, unsigned int truesize) ++{ ++ skb_fill_page_desc(skb, i, page, off, size); ++ skb->len += size; ++ skb->data_len += size; ++ skb->truesize += truesize; ++} ++ ++#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) ++int _kc_simple_open(struct inode *inode, struct file *file) ++{ ++ if (inode->i_private) ++ file->private_data = inode->i_private; ++ ++ return 0; ++} ++#endif /* SLE_VERSION < 11,3,0 */ ++ ++#endif /* < 3.4.0 */ ++ ++/******************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) ) ++static inline int __kc_pcie_cap_version(struct pci_dev *dev) ++{ ++ int pos; ++ u16 reg16; ++ ++ pos = pci_find_capability(dev, PCI_CAP_ID_EXP); ++ if (!pos) ++ return 0; ++ pci_read_config_word(dev, pos + PCI_EXP_FLAGS, ®16); ++ return reg16 & PCI_EXP_FLAGS_VERS; ++} ++ ++static inline bool __kc_pcie_cap_has_devctl(const struct pci_dev __always_unused *dev) ++{ ++ return true; ++} ++ ++static inline bool __kc_pcie_cap_has_lnkctl(struct pci_dev *dev) ++{ ++ int type = pci_pcie_type(dev); ++ ++ return __kc_pcie_cap_version(dev) > 1 || ++ type == PCI_EXP_TYPE_ROOT_PORT || ++ type == PCI_EXP_TYPE_ENDPOINT || ++ type == PCI_EXP_TYPE_LEG_END; ++} ++ ++static inline bool __kc_pcie_cap_has_sltctl(struct pci_dev *dev) ++{ ++ int type = pci_pcie_type(dev); ++ int pos; ++ u16 pcie_flags_reg; ++ ++ pos = pci_find_capability(dev, PCI_CAP_ID_EXP); ++ if (!pos) ++ return false; ++ pci_read_config_word(dev, pos + PCI_EXP_FLAGS, &pcie_flags_reg); ++ ++ return __kc_pcie_cap_version(dev) > 1 || ++ type == PCI_EXP_TYPE_ROOT_PORT || ++ (type == PCI_EXP_TYPE_DOWNSTREAM && ++ pcie_flags_reg & PCI_EXP_FLAGS_SLOT); ++} ++ ++static inline bool __kc_pcie_cap_has_rtctl(struct pci_dev *dev) ++{ ++ int type = pci_pcie_type(dev); ++ ++ return __kc_pcie_cap_version(dev) > 1 || ++ type == PCI_EXP_TYPE_ROOT_PORT || ++ type == PCI_EXP_TYPE_RC_EC; ++} ++ ++static bool __kc_pcie_capability_reg_implemented(struct pci_dev *dev, int pos) ++{ ++ if (!pci_is_pcie(dev)) ++ return false; ++ ++ switch (pos) { ++ case PCI_EXP_FLAGS_TYPE: ++ return true; ++ case PCI_EXP_DEVCAP: ++ case PCI_EXP_DEVCTL: ++ case PCI_EXP_DEVSTA: ++ return __kc_pcie_cap_has_devctl(dev); ++ case PCI_EXP_LNKCAP: ++ case PCI_EXP_LNKCTL: ++ case PCI_EXP_LNKSTA: ++ return __kc_pcie_cap_has_lnkctl(dev); ++ case PCI_EXP_SLTCAP: ++ case PCI_EXP_SLTCTL: ++ case PCI_EXP_SLTSTA: ++ return __kc_pcie_cap_has_sltctl(dev); ++ case PCI_EXP_RTCTL: ++ case PCI_EXP_RTCAP: ++ case PCI_EXP_RTSTA: ++ return __kc_pcie_cap_has_rtctl(dev); ++ case PCI_EXP_DEVCAP2: ++ case PCI_EXP_DEVCTL2: ++ case PCI_EXP_LNKCAP2: ++ case PCI_EXP_LNKCTL2: ++ case PCI_EXP_LNKSTA2: ++ return __kc_pcie_cap_version(dev) > 1; ++ default: ++ return false; ++ } ++} ++ ++/* ++ * Note that these accessor functions are only for the "PCI Express ++ * Capability" (see PCIe spec r3.0, sec 7.8). They do not apply to the ++ * other "PCI Express Extended Capabilities" (AER, VC, ACS, MFVC, etc.) ++ */ ++int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val) ++{ ++ int ret; ++ ++ *val = 0; ++ if (pos & 1) ++ return -EINVAL; ++ ++ if (__kc_pcie_capability_reg_implemented(dev, pos)) { ++ ret = pci_read_config_word(dev, pci_pcie_cap(dev) + pos, val); ++ /* ++ * Reset *val to 0 if pci_read_config_word() fails, it may ++ * have been written as 0xFFFF if hardware error happens ++ * during pci_read_config_word(). ++ */ ++ if (ret) ++ *val = 0; ++ return ret; ++ } ++ ++ /* ++ * For Functions that do not implement the Slot Capabilities, ++ * Slot Status, and Slot Control registers, these spaces must ++ * be hardwired to 0b, with the exception of the Presence Detect ++ * State bit in the Slot Status register of Downstream Ports, ++ * which must be hardwired to 1b. (PCIe Base Spec 3.0, sec 7.8) ++ */ ++ if (pci_is_pcie(dev) && pos == PCI_EXP_SLTSTA && ++ pci_pcie_type(dev) == PCI_EXP_TYPE_DOWNSTREAM) { ++ *val = PCI_EXP_SLTSTA_PDS; ++ } ++ ++ return 0; ++} ++ ++int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val) ++{ ++ if (pos & 1) ++ return -EINVAL; ++ ++ if (!__kc_pcie_capability_reg_implemented(dev, pos)) ++ return 0; ++ ++ return pci_write_config_word(dev, pci_pcie_cap(dev) + pos, val); ++} ++ ++int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, ++ u16 clear, u16 set) ++{ ++ int ret; ++ u16 val; ++ ++ ret = __kc_pcie_capability_read_word(dev, pos, &val); ++ if (!ret) { ++ val &= ~clear; ++ val |= set; ++ ret = __kc_pcie_capability_write_word(dev, pos, val); ++ } ++ ++ return ret; ++} ++ ++int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos, ++ u16 clear) ++{ ++ return __kc_pcie_capability_clear_and_set_word(dev, pos, clear, 0); ++} ++#endif /* < 3.7.0 */ ++ ++/****************************************************************************** ++ * ripped from linux/net/ipv6/exthdrs_core.c, GPL2, no direct copyright, ++ * inferred copyright from kernel ++ */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0) ) ++int __kc_ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, ++ int target, unsigned short *fragoff, int *flags) ++{ ++ unsigned int start = skb_network_offset(skb) + sizeof(struct ipv6hdr); ++ u8 nexthdr = ipv6_hdr(skb)->nexthdr; ++ unsigned int len; ++ bool found; ++ ++#define __KC_IP6_FH_F_FRAG BIT(0) ++#define __KC_IP6_FH_F_AUTH BIT(1) ++#define __KC_IP6_FH_F_SKIP_RH BIT(2) ++ ++ if (fragoff) ++ *fragoff = 0; ++ ++ if (*offset) { ++ struct ipv6hdr _ip6, *ip6; ++ ++ ip6 = skb_header_pointer(skb, *offset, sizeof(_ip6), &_ip6); ++ if (!ip6 || (ip6->version != 6)) { ++ printk(KERN_ERR "IPv6 header not found\n"); ++ return -EBADMSG; ++ } ++ start = *offset + sizeof(struct ipv6hdr); ++ nexthdr = ip6->nexthdr; ++ } ++ len = skb->len - start; ++ ++ do { ++ struct ipv6_opt_hdr _hdr, *hp; ++ unsigned int hdrlen; ++ found = (nexthdr == target); ++ ++ if ((!ipv6_ext_hdr(nexthdr)) || nexthdr == NEXTHDR_NONE) { ++ if (target < 0 || found) ++ break; ++ return -ENOENT; ++ } ++ ++ hp = skb_header_pointer(skb, start, sizeof(_hdr), &_hdr); ++ if (!hp) ++ return -EBADMSG; ++ ++ if (nexthdr == NEXTHDR_ROUTING) { ++ struct ipv6_rt_hdr _rh, *rh; ++ ++ rh = skb_header_pointer(skb, start, sizeof(_rh), ++ &_rh); ++ if (!rh) ++ return -EBADMSG; ++ ++ if (flags && (*flags & __KC_IP6_FH_F_SKIP_RH) && ++ rh->segments_left == 0) ++ found = false; ++ } ++ ++ if (nexthdr == NEXTHDR_FRAGMENT) { ++ unsigned short _frag_off; ++ __be16 *fp; ++ ++ if (flags) /* Indicate that this is a fragment */ ++ *flags |= __KC_IP6_FH_F_FRAG; ++ fp = skb_header_pointer(skb, ++ start+offsetof(struct frag_hdr, ++ frag_off), ++ sizeof(_frag_off), ++ &_frag_off); ++ if (!fp) ++ return -EBADMSG; ++ ++ _frag_off = ntohs(*fp) & ~0x7; ++ if (_frag_off) { ++ if (target < 0 && ++ ((!ipv6_ext_hdr(hp->nexthdr)) || ++ hp->nexthdr == NEXTHDR_NONE)) { ++ if (fragoff) ++ *fragoff = _frag_off; ++ return hp->nexthdr; ++ } ++ return -ENOENT; ++ } ++ hdrlen = 8; ++ } else if (nexthdr == NEXTHDR_AUTH) { ++ if (flags && (*flags & __KC_IP6_FH_F_AUTH) && (target < 0)) ++ break; ++ hdrlen = (hp->hdrlen + 2) << 2; ++ } else ++ hdrlen = ipv6_optlen(hp); ++ ++ if (!found) { ++ nexthdr = hp->nexthdr; ++ len -= hdrlen; ++ start += hdrlen; ++ } ++ } while (!found); ++ ++ *offset = start; ++ return nexthdr; ++} ++#endif /* < 3.8.0 */ ++ ++/******************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) ) ++#ifdef CONFIG_XPS ++#if NR_CPUS < 64 ++#define _KC_MAX_XPS_CPUS NR_CPUS ++#else ++#define _KC_MAX_XPS_CPUS 64 ++#endif ++ ++/* ++ * netdev_queue sysfs structures and functions. ++ */ ++struct _kc_netdev_queue_attribute { ++ struct attribute attr; ++ ssize_t (*show)(struct netdev_queue *queue, ++ struct _kc_netdev_queue_attribute *attr, char *buf); ++ ssize_t (*store)(struct netdev_queue *queue, ++ struct _kc_netdev_queue_attribute *attr, const char *buf, size_t len); ++}; ++ ++#define to_kc_netdev_queue_attr(_attr) container_of(_attr, \ ++ struct _kc_netdev_queue_attribute, attr) ++ ++int __kc_netif_set_xps_queue(struct net_device *dev, const struct cpumask *mask, ++ u16 index) ++{ ++ struct netdev_queue *txq = netdev_get_tx_queue(dev, index); ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) ) ++ /* Redhat requires some odd extended netdev structures */ ++ struct netdev_tx_queue_extended *txq_ext = ++ netdev_extended(dev)->_tx_ext + index; ++ struct kobj_type *ktype = txq_ext->kobj.ktype; ++#else ++ struct kobj_type *ktype = txq->kobj.ktype; ++#endif ++ struct _kc_netdev_queue_attribute *xps_attr; ++ struct attribute *attr = NULL; ++ int i, len, err; ++#define _KC_XPS_BUFLEN (DIV_ROUND_UP(_KC_MAX_XPS_CPUS, 32) * 9) ++ char buf[_KC_XPS_BUFLEN]; ++ ++ if (!ktype) ++ return -ENOMEM; ++ ++ /* attempt to locate the XPS attribute in the Tx queue */ ++ for (i = 0; (attr = ktype->default_attrs[i]); i++) { ++ if (!strcmp("xps_cpus", attr->name)) ++ break; ++ } ++ ++ /* if we did not find it return an error */ ++ if (!attr) ++ return -EINVAL; ++ ++ /* copy the mask into a string */ ++ len = bitmap_scnprintf(buf, _KC_XPS_BUFLEN, ++ cpumask_bits(mask), _KC_MAX_XPS_CPUS); ++ if (!len) ++ return -ENOMEM; ++ ++ xps_attr = to_kc_netdev_queue_attr(attr); ++ ++ /* Store the XPS value using the SYSFS store call */ ++ err = xps_attr->store(txq, xps_attr, buf, len); ++ ++ /* we only had an error on err < 0 */ ++ return (err < 0) ? err : 0; ++} ++#endif /* CONFIG_XPS */ ++#ifdef HAVE_NETDEV_SELECT_QUEUE ++static inline int kc_get_xps_queue(struct net_device *dev, struct sk_buff *skb) ++{ ++#ifdef CONFIG_XPS ++ struct xps_dev_maps *dev_maps; ++ struct xps_map *map; ++ int queue_index = -1; ++ ++ rcu_read_lock(); ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) ) ++ /* Redhat requires some odd extended netdev structures */ ++ dev_maps = rcu_dereference(netdev_extended(dev)->xps_maps); ++#else ++ dev_maps = rcu_dereference(dev->xps_maps); ++#endif ++ if (dev_maps) { ++ map = rcu_dereference( ++ dev_maps->cpu_map[raw_smp_processor_id()]); ++ if (map) { ++ if (map->len == 1) ++ queue_index = map->queues[0]; ++ else { ++ u32 hash; ++ if (skb->sk && skb->sk->sk_hash) ++ hash = skb->sk->sk_hash; ++ else ++ hash = (__force u16) skb->protocol ^ ++ skb->rxhash; ++ hash = jhash_1word(hash, _kc_hashrnd); ++ queue_index = map->queues[ ++ ((u64)hash * map->len) >> 32]; ++ } ++ if (unlikely(queue_index >= dev->real_num_tx_queues)) ++ queue_index = -1; ++ } ++ } ++ rcu_read_unlock(); ++ ++ return queue_index; ++#else ++ struct adapter_struct *kc_adapter = netdev_priv(dev); ++ int queue_index = -1; ++ ++ if (kc_adapter->flags & IXGBE_FLAG_FDIR_HASH_CAPABLE) { ++ queue_index = skb_rx_queue_recorded(skb) ? ++ skb_get_rx_queue(skb) : ++ smp_processor_id(); ++ while (unlikely(queue_index >= dev->real_num_tx_queues)) ++ queue_index -= dev->real_num_tx_queues; ++ return queue_index; ++ } ++ ++ return -1; ++#endif ++} ++ ++u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb) ++{ ++ struct sock *sk = skb->sk; ++ int queue_index = sk_tx_queue_get(sk); ++ int new_index; ++ ++ if (queue_index >= 0 && queue_index < dev->real_num_tx_queues) { ++#ifdef CONFIG_XPS ++ if (!skb->ooo_okay) ++#endif ++ return queue_index; ++ } ++ ++ new_index = kc_get_xps_queue(dev, skb); ++ if (new_index < 0) ++ new_index = skb_tx_hash(dev, skb); ++ ++ if (queue_index != new_index && sk) { ++ struct dst_entry *dst = ++ rcu_dereference(sk->sk_dst_cache); ++ ++ if (dst && skb_dst(skb) == dst) ++ sk_tx_queue_set(sk, new_index); ++ ++ } ++ ++ return new_index; ++} ++ ++#endif /* HAVE_NETDEV_SELECT_QUEUE */ ++#endif /* 3.9.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) ++#ifdef HAVE_FDB_OPS ++#ifdef USE_CONST_DEV_UC_CHAR ++int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], ++ struct net_device *dev, const unsigned char *addr, ++ u16 flags) ++#else ++int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, ++ unsigned char *addr, u16 flags) ++#endif ++{ ++ int err = -EINVAL; ++ ++ /* If aging addresses are supported device will need to ++ * implement its own handler for this. ++ */ ++ if (ndm->ndm_state && !(ndm->ndm_state & NUD_PERMANENT)) { ++ pr_info("%s: FDB only supports static addresses\n", dev->name); ++ return err; ++ } ++ ++ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) ++ err = dev_uc_add_excl(dev, addr); ++ else if (is_multicast_ether_addr(addr)) ++ err = dev_mc_add_excl(dev, addr); ++ ++ /* Only return duplicate errors if NLM_F_EXCL is set */ ++ if (err == -EEXIST && !(flags & NLM_F_EXCL)) ++ err = 0; ++ ++ return err; ++} ++ ++#ifdef USE_CONST_DEV_UC_CHAR ++#ifdef HAVE_FDB_DEL_NLATTR ++int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], ++ struct net_device *dev, const unsigned char *addr) ++#else ++int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, ++ const unsigned char *addr) ++#endif ++#else ++int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, ++ unsigned char *addr) ++#endif ++{ ++ int err = -EINVAL; ++ ++ /* If aging addresses are supported device will need to ++ * implement its own handler for this. ++ */ ++ if (!(ndm->ndm_state & NUD_PERMANENT)) { ++ pr_info("%s: FDB only supports static addresses\n", dev->name); ++ return err; ++ } ++ ++ if (is_unicast_ether_addr(addr) || is_link_local_ether_addr(addr)) ++ err = dev_uc_del(dev, addr); ++ else if (is_multicast_ether_addr(addr)) ++ err = dev_mc_del(dev, addr); ++ ++ return err; ++} ++ ++#endif /* HAVE_FDB_OPS */ ++#ifdef CONFIG_PCI_IOV ++int __kc_pci_vfs_assigned(struct pci_dev __maybe_unused *dev) ++{ ++ unsigned int vfs_assigned = 0; ++#ifdef HAVE_PCI_DEV_FLAGS_ASSIGNED ++ int pos; ++ struct pci_dev *vfdev; ++ unsigned short dev_id; ++ ++ /* only search if we are a PF */ ++ if (!dev->is_physfn) ++ return 0; ++ ++ /* find SR-IOV capability */ ++ pos = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_SRIOV); ++ if (!pos) ++ return 0; ++ ++ /* ++ * determine the device ID for the VFs, the vendor ID will be the ++ * same as the PF so there is no need to check for that one ++ */ ++ pci_read_config_word(dev, pos + PCI_SRIOV_VF_DID, &dev_id); ++ ++ /* loop through all the VFs to see if we own any that are assigned */ ++ vfdev = pci_get_device(dev->vendor, dev_id, NULL); ++ while (vfdev) { ++ /* ++ * It is considered assigned if it is a virtual function with ++ * our dev as the physical function and the assigned bit is set ++ */ ++ if (vfdev->is_virtfn && (vfdev->physfn == dev) && ++ (vfdev->dev_flags & PCI_DEV_FLAGS_ASSIGNED)) ++ vfs_assigned++; ++ ++ vfdev = pci_get_device(dev->vendor, dev_id, vfdev); ++ } ++ ++#endif /* HAVE_PCI_DEV_FLAGS_ASSIGNED */ ++ return vfs_assigned; ++} ++ ++#endif /* CONFIG_PCI_IOV */ ++#endif /* 3.10.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0) ) ++const unsigned char pcie_link_speed[] = { ++ PCI_SPEED_UNKNOWN, /* 0 */ ++ PCIE_SPEED_2_5GT, /* 1 */ ++ PCIE_SPEED_5_0GT, /* 2 */ ++ PCIE_SPEED_8_0GT, /* 3 */ ++ PCI_SPEED_UNKNOWN, /* 4 */ ++ PCI_SPEED_UNKNOWN, /* 5 */ ++ PCI_SPEED_UNKNOWN, /* 6 */ ++ PCI_SPEED_UNKNOWN, /* 7 */ ++ PCI_SPEED_UNKNOWN, /* 8 */ ++ PCI_SPEED_UNKNOWN, /* 9 */ ++ PCI_SPEED_UNKNOWN, /* A */ ++ PCI_SPEED_UNKNOWN, /* B */ ++ PCI_SPEED_UNKNOWN, /* C */ ++ PCI_SPEED_UNKNOWN, /* D */ ++ PCI_SPEED_UNKNOWN, /* E */ ++ PCI_SPEED_UNKNOWN /* F */ ++}; ++ ++int __kc_pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed, ++ enum pcie_link_width *width) ++{ ++ int ret; ++ ++ *speed = PCI_SPEED_UNKNOWN; ++ *width = PCIE_LNK_WIDTH_UNKNOWN; ++ ++ while (dev) { ++ u16 lnksta; ++ enum pci_bus_speed next_speed; ++ enum pcie_link_width next_width; ++ ++ ret = pcie_capability_read_word(dev, PCI_EXP_LNKSTA, &lnksta); ++ if (ret) ++ return ret; ++ ++ next_speed = pcie_link_speed[lnksta & PCI_EXP_LNKSTA_CLS]; ++ next_width = (lnksta & PCI_EXP_LNKSTA_NLW) >> ++ PCI_EXP_LNKSTA_NLW_SHIFT; ++ ++ if (next_speed < *speed) ++ *speed = next_speed; ++ ++ if (next_width < *width) ++ *width = next_width; ++ ++ dev = dev->bus->self; ++ } ++ ++ return 0; ++} ++ ++#endif ++ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0) ) ++int __kc_dma_set_mask_and_coherent(struct device *dev, u64 mask) ++{ ++ int err = dma_set_mask(dev, mask); ++ ++ if (!err) ++ /* coherent mask for the same size will always succeed if ++ * dma_set_mask does. However we store the error anyways, due ++ * to some kernels which use gcc's warn_unused_result on their ++ * definition of dma_set_coherent_mask. ++ */ ++ err = dma_set_coherent_mask(dev, mask); ++ return err; ++} ++ ++void __kc_netdev_rss_key_fill_ixgbe(void *buffer, size_t len) ++{ ++ /* Set of random keys generated using kernel random number generator */ ++ static const u8 seed[NETDEV_RSS_KEY_LEN] = {0xE6, 0xFA, 0x35, 0x62, ++ 0x95, 0x12, 0x3E, 0xA3, 0xFB, 0x46, 0xC1, 0x5F, ++ 0xB1, 0x43, 0x82, 0x5B, 0x6A, 0x49, 0x50, 0x95, ++ 0xCD, 0xAB, 0xD8, 0x11, 0x8F, 0xC5, 0xBD, 0xBC, ++ 0x6A, 0x4A, 0xB2, 0xD4, 0x1F, 0xFE, 0xBC, 0x41, ++ 0xBF, 0xAC, 0xB2, 0x9A, 0x8F, 0x70, 0xE9, 0x2A, ++ 0xD7, 0xB2, 0x80, 0xB6, 0x5B, 0xAA, 0x9D, 0x20}; ++ ++ BUG_ON(len > NETDEV_RSS_KEY_LEN); ++ memcpy(buffer, seed, len); ++} ++#endif /* 3.13.0 */ ++ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) ) ++int __kc_pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, ++ int minvec, int maxvec) ++{ ++ int nvec = maxvec; ++ int rc; ++ ++ if (maxvec < minvec) ++ return -ERANGE; ++ ++ do { ++ rc = pci_enable_msix(dev, entries, nvec); ++ if (rc < 0) { ++ return rc; ++ } else if (rc > 0) { ++ if (rc < minvec) ++ return -ENOSPC; ++ nvec = rc; ++ } ++ } while (rc); ++ ++ return nvec; ++} ++#endif /* 3.14.0 */ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0)) ++char *_kc_devm_kstrdup(struct device *dev, const char *s, gfp_t gfp) ++{ ++ size_t size; ++ char *buf; ++ ++ if (!s) ++ return NULL; ++ ++ size = strlen(s) + 1; ++ buf = devm_kzalloc(dev, size, gfp); ++ if (buf) ++ memcpy(buf, s, size); ++ return buf; ++} ++#endif /* 3.15.0 */ ++ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0) ) ++#ifdef HAVE_SET_RX_MODE ++#ifdef NETDEV_HW_ADDR_T_UNICAST ++int __kc_hw_addr_sync_dev(struct netdev_hw_addr_list *list, ++ struct net_device *dev, ++ int (*sync)(struct net_device *, const unsigned char *), ++ int (*unsync)(struct net_device *, const unsigned char *)) ++{ ++ struct netdev_hw_addr *ha, *tmp; ++ int err; ++ ++ /* first go through and flush out any stale entries */ ++ list_for_each_entry_safe(ha, tmp, &list->list, list) { ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) ++ if (!ha->synced || ha->refcount != 1) ++#else ++ if (!ha->sync_cnt || ha->refcount != 1) ++#endif ++ continue; ++ ++ if (unsync && unsync(dev, ha->addr)) ++ continue; ++ ++ list_del_rcu(&ha->list); ++ kfree_rcu(ha, rcu_head); ++ list->count--; ++ } ++ ++ /* go through and sync new entries to the list */ ++ list_for_each_entry_safe(ha, tmp, &list->list, list) { ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) ++ if (ha->synced) ++#else ++ if (ha->sync_cnt) ++#endif ++ continue; ++ ++ err = sync(dev, ha->addr); ++ if (err) ++ return err; ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) ++ ha->synced = true; ++#else ++ ha->sync_cnt++; ++#endif ++ ha->refcount++; ++ } ++ ++ return 0; ++} ++ ++void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list, ++ struct net_device *dev, ++ int (*unsync)(struct net_device *, const unsigned char *)) ++{ ++ struct netdev_hw_addr *ha, *tmp; ++ ++ list_for_each_entry_safe(ha, tmp, &list->list, list) { ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) ++ if (!ha->synced) ++#else ++ if (!ha->sync_cnt) ++#endif ++ continue; ++ ++ if (unsync && unsync(dev, ha->addr)) ++ continue; ++ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) ++ ha->synced = false; ++#else ++ ha->sync_cnt--; ++#endif ++ if (--ha->refcount) ++ continue; ++ ++ list_del_rcu(&ha->list); ++ kfree_rcu(ha, rcu_head); ++ list->count--; ++ } ++} ++ ++#endif /* NETDEV_HW_ADDR_T_UNICAST */ ++#ifndef NETDEV_HW_ADDR_T_MULTICAST ++int __kc_dev_addr_sync_dev(struct dev_addr_list **list, int *count, ++ struct net_device *dev, ++ int (*sync)(struct net_device *, const unsigned char *), ++ int (*unsync)(struct net_device *, const unsigned char *)) ++{ ++ struct dev_addr_list *da, **next = list; ++ int err; ++ ++ /* first go through and flush out any stale entries */ ++ while ((da = *next) != NULL) { ++ if (da->da_synced && da->da_users == 1) { ++ if (!unsync || !unsync(dev, da->da_addr)) { ++ *next = da->next; ++ kfree(da); ++ (*count)--; ++ continue; ++ } ++ } ++ next = &da->next; ++ } ++ ++ /* go through and sync new entries to the list */ ++ for (da = *list; da != NULL; da = da->next) { ++ if (da->da_synced) ++ continue; ++ ++ err = sync(dev, da->da_addr); ++ if (err) ++ return err; ++ ++ da->da_synced++; ++ da->da_users++; ++ } ++ ++ return 0; ++} ++ ++void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count, ++ struct net_device *dev, ++ int (*unsync)(struct net_device *, const unsigned char *)) ++{ ++ struct dev_addr_list *da; ++ ++ while ((da = *list) != NULL) { ++ if (da->da_synced) { ++ if (!unsync || !unsync(dev, da->da_addr)) { ++ da->da_synced--; ++ if (--da->da_users == 0) { ++ *list = da->next; ++ kfree(da); ++ (*count)--; ++ continue; ++ } ++ } ++ } ++ list = &da->next; ++ } ++} ++#endif /* NETDEV_HW_ADDR_T_MULTICAST */ ++#endif /* HAVE_SET_RX_MODE */ ++void *__kc_devm_kmemdup(struct device *dev, const void *src, size_t len, ++ unsigned int gfp) ++{ ++ void *p; ++ ++ p = devm_kzalloc(dev, len, gfp); ++ if (p) ++ memcpy(p, src, len); ++ ++ return p; ++} ++#endif /* 3.16.0 */ ++ ++/******************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0) ) ++#endif /* 3.17.0 */ ++ ++/******************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0) ) ++#ifndef NO_PTP_SUPPORT ++static void __kc_sock_efree(struct sk_buff *skb) ++{ ++ sock_put(skb->sk); ++} ++ ++struct sk_buff *__kc_skb_clone_sk_ixgbe(struct sk_buff *skb) ++{ ++ struct sock *sk = skb->sk; ++ struct sk_buff *clone; ++ ++ if (!sk || !atomic_inc_not_zero(&sk->sk_refcnt)) ++ return NULL; ++ ++ clone = skb_clone(skb, GFP_ATOMIC); ++ if (!clone) { ++ sock_put(sk); ++ return NULL; ++ } ++ ++ clone->sk = sk; ++ clone->destructor = __kc_sock_efree; ++ ++ return clone; ++} ++ ++void __kc_skb_complete_tx_timestamp_ixgbe(struct sk_buff *skb, ++ struct skb_shared_hwtstamps *hwtstamps) ++{ ++ struct sock_exterr_skb *serr; ++ struct sock *sk = skb->sk; ++ int err; ++ ++ sock_hold(sk); ++ ++ *skb_hwtstamps(skb) = *hwtstamps; ++ ++ serr = SKB_EXT_ERR(skb); ++ memset(serr, 0, sizeof(*serr)); ++ serr->ee.ee_errno = ENOMSG; ++ serr->ee.ee_origin = SO_EE_ORIGIN_TIMESTAMPING; ++ ++ err = sock_queue_err_skb(sk, skb); ++ if (err) ++ kfree_skb(skb); ++ ++ sock_put(sk); ++} ++#endif ++ ++/* include headers needed for get_headlen function */ ++#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) ++#include ++#endif ++#ifdef HAVE_SCTP ++#include ++#endif ++ ++unsigned int __kc_eth_get_headlen_ixgbe(unsigned char *data, unsigned int max_len) ++{ ++ union { ++ unsigned char *network; ++ /* l2 headers */ ++ struct ethhdr *eth; ++ struct vlan_hdr *vlan; ++ /* l3 headers */ ++ struct iphdr *ipv4; ++ struct ipv6hdr *ipv6; ++ } hdr; ++ __be16 proto; ++ u8 nexthdr = 0; /* default to not TCP */ ++ u8 hlen; ++ ++ /* this should never happen, but better safe than sorry */ ++ if (max_len < ETH_HLEN) ++ return max_len; ++ ++ /* initialize network frame pointer */ ++ hdr.network = data; ++ ++ /* set first protocol and move network header forward */ ++ proto = hdr.eth->h_proto; ++ hdr.network += ETH_HLEN; ++ ++again: ++ switch (proto) { ++ /* handle any vlan tag if present */ ++ case __constant_htons(ETH_P_8021AD): ++ case __constant_htons(ETH_P_8021Q): ++ if ((hdr.network - data) > (max_len - VLAN_HLEN)) ++ return max_len; ++ ++ proto = hdr.vlan->h_vlan_encapsulated_proto; ++ hdr.network += VLAN_HLEN; ++ goto again; ++ /* handle L3 protocols */ ++ case __constant_htons(ETH_P_IP): ++ if ((hdr.network - data) > (max_len - sizeof(struct iphdr))) ++ return max_len; ++ ++ /* access ihl as a u8 to avoid unaligned access on ia64 */ ++ hlen = (hdr.network[0] & 0x0F) << 2; ++ ++ /* verify hlen meets minimum size requirements */ ++ if (hlen < sizeof(struct iphdr)) ++ return hdr.network - data; ++ ++ /* record next protocol if header is present */ ++ if (!(hdr.ipv4->frag_off & htons(IP_OFFSET))) ++ nexthdr = hdr.ipv4->protocol; ++ ++ hdr.network += hlen; ++ break; ++#ifdef NETIF_F_TSO6 ++ case __constant_htons(ETH_P_IPV6): ++ if ((hdr.network - data) > (max_len - sizeof(struct ipv6hdr))) ++ return max_len; ++ ++ /* record next protocol */ ++ nexthdr = hdr.ipv6->nexthdr; ++ hdr.network += sizeof(struct ipv6hdr); ++ break; ++#endif /* NETIF_F_TSO6 */ ++#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) ++ case __constant_htons(ETH_P_FCOE): ++ hdr.network += FCOE_HEADER_LEN; ++ break; ++#endif ++ default: ++ return hdr.network - data; ++ } ++ ++ /* finally sort out L4 */ ++ switch (nexthdr) { ++ case IPPROTO_TCP: ++ if ((hdr.network - data) > (max_len - sizeof(struct tcphdr))) ++ return max_len; ++ ++ /* access doff as a u8 to avoid unaligned access on ia64 */ ++ hdr.network += max_t(u8, sizeof(struct tcphdr), ++ (hdr.network[12] & 0xF0) >> 2); ++ ++ break; ++ case IPPROTO_UDP: ++ case IPPROTO_UDPLITE: ++ hdr.network += sizeof(struct udphdr); ++ break; ++#ifdef HAVE_SCTP ++ case IPPROTO_SCTP: ++ hdr.network += sizeof(struct sctphdr); ++ break; ++#endif ++ } ++ ++ /* ++ * If everything has gone correctly hdr.network should be the ++ * data section of the packet and will be the end of the header. ++ * If not then it probably represents the end of the last recognized ++ * header. ++ */ ++ return min_t(unsigned int, hdr.network - data, max_len); ++} ++ ++#endif /* < 3.18.0 */ ++ ++/******************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) ) ++#ifdef HAVE_NET_GET_RANDOM_ONCE ++static u8 __kc_netdev_rss_key[NETDEV_RSS_KEY_LEN]; ++ ++void __kc_netdev_rss_key_fill_ixgbe(void *buffer, size_t len) ++{ ++ BUG_ON(len > sizeof(__kc_netdev_rss_key)); ++ net_get_random_once(__kc_netdev_rss_key, sizeof(__kc_netdev_rss_key)); ++ memcpy(buffer, __kc_netdev_rss_key, len); ++} ++#endif ++#endif ++ ++/******************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) ) ++#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3))) ++#ifdef CONFIG_SPARC ++#include ++#include ++#endif ++int _kc_eth_platform_get_mac_address(struct device *dev __maybe_unused, ++ u8 *mac_addr __maybe_unused) ++{ ++#if (((LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0)) && defined(CONFIG_OF) && \ ++ !defined(HAVE_STRUCT_DEVICE_OF_NODE) || !defined(CONFIG_OF)) && \ ++ !defined(CONFIG_SPARC)) ++ return -ENODEV; ++#else ++ const unsigned char *addr; ++ struct device_node *dp; ++ ++ if (dev_is_pci(dev)) ++ dp = pci_device_to_OF_node(to_pci_dev(dev)); ++ else ++#if defined(HAVE_STRUCT_DEVICE_OF_NODE) && defined(CONFIG_OF) ++ dp = dev->of_node; ++#else ++ dp = NULL; ++#endif ++ ++ addr = NULL; ++ if (dp) ++ addr = of_get_mac_address(dp); ++#ifdef CONFIG_SPARC ++ /* Kernel hasn't implemented arch_get_platform_mac_address, but we ++ * should handle the SPARC case here since it was supported ++ * originally. This is replaced by arch_get_platform_mac_address() ++ * upstream. ++ */ ++ if (!addr) ++ addr = idprom->id_ethaddr; ++#endif ++ if (!addr) ++ return -ENODEV; ++ ++ ether_addr_copy(mac_addr, addr); ++ return 0; ++#endif ++} ++#endif /* !(RHEL_RELEASE >= 7.3) */ ++#endif +diff --git a/drivers/net/ethernet/intel/ixgbe/kcompat.h b/drivers/net/ethernet/intel/ixgbe/kcompat.h +new file mode 100644 +index 0000000..7d3298f +--- /dev/null ++++ b/drivers/net/ethernet/intel/ixgbe/kcompat.h +@@ -0,0 +1,5610 @@ ++/******************************************************************************* ++ ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++#ifndef _KCOMPAT_H_ ++#define _KCOMPAT_H_ ++ ++#ifndef LINUX_VERSION_CODE ++#include ++#else ++#define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c)) ++#endif ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#ifndef NSEC_PER_MSEC ++#define NSEC_PER_MSEC 1000000L ++#endif ++#include ++/* UTS_RELEASE is in a different header starting in kernel 2.6.18 */ ++#ifndef UTS_RELEASE ++/* utsrelease.h changed locations in 2.6.33 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) ) ++#include ++#else ++#include ++#endif ++#endif ++ ++/* NAPI enable/disable flags here */ ++#define NAPI ++ ++#define adapter_struct ixgbe_adapter ++#define adapter_q_vector ixgbe_q_vector ++ ++/* and finally set defines so that the code sees the changes */ ++#ifdef NAPI ++#else ++#endif /* NAPI */ ++ ++/* Dynamic LTR and deeper C-State support disable/enable */ ++ ++/* packet split disable/enable */ ++#ifdef DISABLE_PACKET_SPLIT ++#ifndef CONFIG_IXGBE_DISABLE_PACKET_SPLIT ++#define CONFIG_IXGBE_DISABLE_PACKET_SPLIT ++#endif ++#endif /* DISABLE_PACKET_SPLIT */ ++ ++/* MSI compatibility code for all kernels and drivers */ ++#ifdef DISABLE_PCI_MSI ++#undef CONFIG_PCI_MSI ++#endif ++#ifndef CONFIG_PCI_MSI ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) ) ++struct msix_entry { ++ u16 vector; /* kernel uses to write allocated vector */ ++ u16 entry; /* driver uses to specify entry, OS writes */ ++}; ++#endif ++#undef pci_enable_msi ++#define pci_enable_msi(a) -ENOTSUPP ++#undef pci_disable_msi ++#define pci_disable_msi(a) do {} while (0) ++#undef pci_enable_msix ++#define pci_enable_msix(a, b, c) -ENOTSUPP ++#undef pci_disable_msix ++#define pci_disable_msix(a) do {} while (0) ++#define msi_remove_pci_irq_vectors(a) do {} while (0) ++#endif /* CONFIG_PCI_MSI */ ++#ifdef DISABLE_PM ++#undef CONFIG_PM ++#endif ++ ++#ifdef DISABLE_NET_POLL_CONTROLLER ++#undef CONFIG_NET_POLL_CONTROLLER ++#endif ++ ++#ifndef PMSG_SUSPEND ++#define PMSG_SUSPEND 3 ++#endif ++ ++/* generic boolean compatibility */ ++#undef TRUE ++#undef FALSE ++#define TRUE true ++#define FALSE false ++#ifdef GCC_VERSION ++#if ( GCC_VERSION < 3000 ) ++#define _Bool char ++#endif ++#else ++#define _Bool char ++#endif ++ ++#undef __always_unused ++#define __always_unused __attribute__((__unused__)) ++ ++#undef __maybe_unused ++#define __maybe_unused __attribute__((__unused__)) ++ ++/* kernels less than 2.4.14 don't have this */ ++#ifndef ETH_P_8021Q ++#define ETH_P_8021Q 0x8100 ++#endif ++ ++#ifndef module_param ++#define module_param(v,t,p) MODULE_PARM(v, "i"); ++#endif ++ ++#ifndef DMA_64BIT_MASK ++#define DMA_64BIT_MASK 0xffffffffffffffffULL ++#endif ++ ++#ifndef DMA_32BIT_MASK ++#define DMA_32BIT_MASK 0x00000000ffffffffULL ++#endif ++ ++#ifndef PCI_CAP_ID_EXP ++#define PCI_CAP_ID_EXP 0x10 ++#endif ++ ++#ifndef uninitialized_var ++#define uninitialized_var(x) x = x ++#endif ++ ++#ifndef PCIE_LINK_STATE_L0S ++#define PCIE_LINK_STATE_L0S 1 ++#endif ++#ifndef PCIE_LINK_STATE_L1 ++#define PCIE_LINK_STATE_L1 2 ++#endif ++ ++#ifndef mmiowb ++#ifdef CONFIG_IA64 ++#define mmiowb() asm volatile ("mf.a" ::: "memory") ++#else ++#define mmiowb() ++#endif ++#endif ++ ++#ifndef SET_NETDEV_DEV ++#define SET_NETDEV_DEV(net, pdev) ++#endif ++ ++#if !defined(HAVE_FREE_NETDEV) && ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) ) ++#define free_netdev(x) kfree(x) ++#endif ++ ++#ifdef HAVE_POLL_CONTROLLER ++#define CONFIG_NET_POLL_CONTROLLER ++#endif ++ ++#ifndef SKB_DATAREF_SHIFT ++/* if we do not have the infrastructure to detect if skb_header is cloned ++ just return false in all cases */ ++#define skb_header_cloned(x) 0 ++#endif ++ ++#ifndef NETIF_F_GSO ++#define gso_size tso_size ++#define gso_segs tso_segs ++#endif ++ ++#ifndef NETIF_F_GRO ++#define vlan_gro_receive(_napi, _vlgrp, _vlan, _skb) \ ++ vlan_hwaccel_receive_skb(_skb, _vlgrp, _vlan) ++#define napi_gro_receive(_napi, _skb) netif_receive_skb(_skb) ++#endif ++ ++#ifndef NETIF_F_SCTP_CSUM ++#define NETIF_F_SCTP_CSUM 0 ++#endif ++ ++#ifndef NETIF_F_LRO ++#define NETIF_F_LRO (1 << 15) ++#endif ++ ++#ifndef NETIF_F_NTUPLE ++#define NETIF_F_NTUPLE (1 << 27) ++#endif ++ ++#ifndef NETIF_F_ALL_FCOE ++#define NETIF_F_ALL_FCOE (NETIF_F_FCOE_CRC | NETIF_F_FCOE_MTU | \ ++ NETIF_F_FSO) ++#endif ++ ++#ifndef IPPROTO_SCTP ++#define IPPROTO_SCTP 132 ++#endif ++ ++#ifndef IPPROTO_UDPLITE ++#define IPPROTO_UDPLITE 136 ++#endif ++ ++#ifndef CHECKSUM_PARTIAL ++#define CHECKSUM_PARTIAL CHECKSUM_HW ++#define CHECKSUM_COMPLETE CHECKSUM_HW ++#endif ++ ++#ifndef __read_mostly ++#define __read_mostly ++#endif ++ ++#ifndef MII_RESV1 ++#define MII_RESV1 0x17 /* Reserved... */ ++#endif ++ ++#ifndef unlikely ++#define unlikely(_x) _x ++#define likely(_x) _x ++#endif ++ ++#ifndef WARN_ON ++#define WARN_ON(x) ++#endif ++ ++#ifndef PCI_DEVICE ++#define PCI_DEVICE(vend,dev) \ ++ .vendor = (vend), .device = (dev), \ ++ .subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID ++#endif ++ ++#ifndef node_online ++#define node_online(node) ((node) == 0) ++#endif ++ ++#ifndef num_online_cpus ++#define num_online_cpus() smp_num_cpus ++#endif ++ ++#ifndef cpu_online ++#define cpu_online(cpuid) test_bit((cpuid), &cpu_online_map) ++#endif ++ ++#ifndef _LINUX_RANDOM_H ++#include ++#endif ++ ++#ifndef DECLARE_BITMAP ++#ifndef BITS_TO_LONGS ++#define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG) ++#endif ++#define DECLARE_BITMAP(name,bits) long name[BITS_TO_LONGS(bits)] ++#endif ++ ++#ifndef VLAN_HLEN ++#define VLAN_HLEN 4 ++#endif ++ ++#ifndef VLAN_ETH_HLEN ++#define VLAN_ETH_HLEN 18 ++#endif ++ ++#ifndef VLAN_ETH_FRAME_LEN ++#define VLAN_ETH_FRAME_LEN 1518 ++#endif ++ ++#ifndef DCA_GET_TAG_TWO_ARGS ++#define dca3_get_tag(a,b) dca_get_tag(b) ++#endif ++ ++#ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS ++#if defined(__i386__) || defined(__x86_64__) ++#define CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS ++#endif ++#endif ++ ++/* taken from 2.6.24 definition in linux/kernel.h */ ++#ifndef IS_ALIGNED ++#define IS_ALIGNED(x,a) (((x) % ((typeof(x))(a))) == 0) ++#endif ++ ++#ifdef IS_ENABLED ++#undef IS_ENABLED ++#undef __ARG_PLACEHOLDER_1 ++#undef config_enabled ++#undef _config_enabled ++#undef __config_enabled ++#undef ___config_enabled ++#endif ++ ++#define __ARG_PLACEHOLDER_1 0, ++#define config_enabled(cfg) _config_enabled(cfg) ++#define _config_enabled(value) __config_enabled(__ARG_PLACEHOLDER_##value) ++#define __config_enabled(arg1_or_junk) ___config_enabled(arg1_or_junk 1, 0) ++#define ___config_enabled(__ignored, val, ...) val ++ ++#define IS_ENABLED(option) \ ++ (config_enabled(option) || config_enabled(option##_MODULE)) ++ ++#if !defined(NETIF_F_HW_VLAN_TX) && !defined(NETIF_F_HW_VLAN_CTAG_TX) ++struct _kc_vlan_ethhdr { ++ unsigned char h_dest[ETH_ALEN]; ++ unsigned char h_source[ETH_ALEN]; ++ __be16 h_vlan_proto; ++ __be16 h_vlan_TCI; ++ __be16 h_vlan_encapsulated_proto; ++}; ++#define vlan_ethhdr _kc_vlan_ethhdr ++struct _kc_vlan_hdr { ++ __be16 h_vlan_TCI; ++ __be16 h_vlan_encapsulated_proto; ++}; ++#define vlan_hdr _kc_vlan_hdr ++#define vlan_tx_tag_present(_skb) 0 ++#define vlan_tx_tag_get(_skb) 0 ++#endif /* NETIF_F_HW_VLAN_TX && NETIF_F_HW_VLAN_CTAG_TX */ ++ ++#ifndef VLAN_PRIO_SHIFT ++#define VLAN_PRIO_SHIFT 13 ++#endif ++ ++#ifndef PCI_EXP_LNKSTA_CLS_2_5GB ++#define PCI_EXP_LNKSTA_CLS_2_5GB 0x0001 ++#endif ++ ++#ifndef PCI_EXP_LNKSTA_CLS_5_0GB ++#define PCI_EXP_LNKSTA_CLS_5_0GB 0x0002 ++#endif ++ ++#ifndef PCI_EXP_LNKSTA_CLS_8_0GB ++#define PCI_EXP_LNKSTA_CLS_8_0GB 0x0003 ++#endif ++ ++#ifndef PCI_EXP_LNKSTA_NLW_X1 ++#define PCI_EXP_LNKSTA_NLW_X1 0x0010 ++#endif ++ ++#ifndef PCI_EXP_LNKSTA_NLW_X2 ++#define PCI_EXP_LNKSTA_NLW_X2 0x0020 ++#endif ++ ++#ifndef PCI_EXP_LNKSTA_NLW_X4 ++#define PCI_EXP_LNKSTA_NLW_X4 0x0040 ++#endif ++ ++#ifndef PCI_EXP_LNKSTA_NLW_X8 ++#define PCI_EXP_LNKSTA_NLW_X8 0x0080 ++#endif ++ ++#ifndef __GFP_COLD ++#define __GFP_COLD 0 ++#endif ++ ++#ifndef __GFP_COMP ++#define __GFP_COMP 0 ++#endif ++ ++#ifndef IP_OFFSET ++#define IP_OFFSET 0x1FFF /* "Fragment Offset" part */ ++#endif ++ ++/*****************************************************************************/ ++/* Installations with ethtool version without eeprom, adapter id, or statistics ++ * support */ ++ ++#ifndef ETH_GSTRING_LEN ++#define ETH_GSTRING_LEN 32 ++#endif ++ ++#ifndef ETHTOOL_GSTATS ++#define ETHTOOL_GSTATS 0x1d ++#undef ethtool_drvinfo ++#define ethtool_drvinfo k_ethtool_drvinfo ++struct k_ethtool_drvinfo { ++ u32 cmd; ++ char driver[32]; ++ char version[32]; ++ char fw_version[32]; ++ char bus_info[32]; ++ char reserved1[32]; ++ char reserved2[16]; ++ u32 n_stats; ++ u32 testinfo_len; ++ u32 eedump_len; ++ u32 regdump_len; ++}; ++ ++struct ethtool_stats { ++ u32 cmd; ++ u32 n_stats; ++ u64 data[0]; ++}; ++#endif /* ETHTOOL_GSTATS */ ++ ++#ifndef ETHTOOL_PHYS_ID ++#define ETHTOOL_PHYS_ID 0x1c ++#endif /* ETHTOOL_PHYS_ID */ ++ ++#ifndef ETHTOOL_GSTRINGS ++#define ETHTOOL_GSTRINGS 0x1b ++enum ethtool_stringset { ++ ETH_SS_TEST = 0, ++ ETH_SS_STATS, ++}; ++struct ethtool_gstrings { ++ u32 cmd; /* ETHTOOL_GSTRINGS */ ++ u32 string_set; /* string set id e.c. ETH_SS_TEST, etc*/ ++ u32 len; /* number of strings in the string set */ ++ u8 data[0]; ++}; ++#endif /* ETHTOOL_GSTRINGS */ ++ ++#ifndef ETHTOOL_TEST ++#define ETHTOOL_TEST 0x1a ++enum ethtool_test_flags { ++ ETH_TEST_FL_OFFLINE = (1 << 0), ++ ETH_TEST_FL_FAILED = (1 << 1), ++}; ++struct ethtool_test { ++ u32 cmd; ++ u32 flags; ++ u32 reserved; ++ u32 len; ++ u64 data[0]; ++}; ++#endif /* ETHTOOL_TEST */ ++ ++#ifndef ETHTOOL_GEEPROM ++#define ETHTOOL_GEEPROM 0xb ++#undef ETHTOOL_GREGS ++struct ethtool_eeprom { ++ u32 cmd; ++ u32 magic; ++ u32 offset; ++ u32 len; ++ u8 data[0]; ++}; ++ ++struct ethtool_value { ++ u32 cmd; ++ u32 data; ++}; ++#endif /* ETHTOOL_GEEPROM */ ++ ++#ifndef ETHTOOL_GLINK ++#define ETHTOOL_GLINK 0xa ++#endif /* ETHTOOL_GLINK */ ++ ++#ifndef ETHTOOL_GWOL ++#define ETHTOOL_GWOL 0x5 ++#define ETHTOOL_SWOL 0x6 ++#define SOPASS_MAX 6 ++struct ethtool_wolinfo { ++ u32 cmd; ++ u32 supported; ++ u32 wolopts; ++ u8 sopass[SOPASS_MAX]; /* SecureOn(tm) password */ ++}; ++#endif /* ETHTOOL_GWOL */ ++ ++#ifndef ETHTOOL_GREGS ++#define ETHTOOL_GREGS 0x00000004 /* Get NIC registers */ ++#define ethtool_regs _kc_ethtool_regs ++/* for passing big chunks of data */ ++struct _kc_ethtool_regs { ++ u32 cmd; ++ u32 version; /* driver-specific, indicates different chips/revs */ ++ u32 len; /* bytes */ ++ u8 data[0]; ++}; ++#endif /* ETHTOOL_GREGS */ ++ ++#ifndef ETHTOOL_GMSGLVL ++#define ETHTOOL_GMSGLVL 0x00000007 /* Get driver message level */ ++#endif ++#ifndef ETHTOOL_SMSGLVL ++#define ETHTOOL_SMSGLVL 0x00000008 /* Set driver msg level, priv. */ ++#endif ++#ifndef ETHTOOL_NWAY_RST ++#define ETHTOOL_NWAY_RST 0x00000009 /* Restart autonegotiation, priv */ ++#endif ++#ifndef ETHTOOL_GLINK ++#define ETHTOOL_GLINK 0x0000000a /* Get link status */ ++#endif ++#ifndef ETHTOOL_GEEPROM ++#define ETHTOOL_GEEPROM 0x0000000b /* Get EEPROM data */ ++#endif ++#ifndef ETHTOOL_SEEPROM ++#define ETHTOOL_SEEPROM 0x0000000c /* Set EEPROM data */ ++#endif ++#ifndef ETHTOOL_GCOALESCE ++#define ETHTOOL_GCOALESCE 0x0000000e /* Get coalesce config */ ++/* for configuring coalescing parameters of chip */ ++#define ethtool_coalesce _kc_ethtool_coalesce ++struct _kc_ethtool_coalesce { ++ u32 cmd; /* ETHTOOL_{G,S}COALESCE */ ++ ++ /* How many usecs to delay an RX interrupt after ++ * a packet arrives. If 0, only rx_max_coalesced_frames ++ * is used. ++ */ ++ u32 rx_coalesce_usecs; ++ ++ /* How many packets to delay an RX interrupt after ++ * a packet arrives. If 0, only rx_coalesce_usecs is ++ * used. It is illegal to set both usecs and max frames ++ * to zero as this would cause RX interrupts to never be ++ * generated. ++ */ ++ u32 rx_max_coalesced_frames; ++ ++ /* Same as above two parameters, except that these values ++ * apply while an IRQ is being serviced by the host. Not ++ * all cards support this feature and the values are ignored ++ * in that case. ++ */ ++ u32 rx_coalesce_usecs_irq; ++ u32 rx_max_coalesced_frames_irq; ++ ++ /* How many usecs to delay a TX interrupt after ++ * a packet is sent. If 0, only tx_max_coalesced_frames ++ * is used. ++ */ ++ u32 tx_coalesce_usecs; ++ ++ /* How many packets to delay a TX interrupt after ++ * a packet is sent. If 0, only tx_coalesce_usecs is ++ * used. It is illegal to set both usecs and max frames ++ * to zero as this would cause TX interrupts to never be ++ * generated. ++ */ ++ u32 tx_max_coalesced_frames; ++ ++ /* Same as above two parameters, except that these values ++ * apply while an IRQ is being serviced by the host. Not ++ * all cards support this feature and the values are ignored ++ * in that case. ++ */ ++ u32 tx_coalesce_usecs_irq; ++ u32 tx_max_coalesced_frames_irq; ++ ++ /* How many usecs to delay in-memory statistics ++ * block updates. Some drivers do not have an in-memory ++ * statistic block, and in such cases this value is ignored. ++ * This value must not be zero. ++ */ ++ u32 stats_block_coalesce_usecs; ++ ++ /* Adaptive RX/TX coalescing is an algorithm implemented by ++ * some drivers to improve latency under low packet rates and ++ * improve throughput under high packet rates. Some drivers ++ * only implement one of RX or TX adaptive coalescing. Anything ++ * not implemented by the driver causes these values to be ++ * silently ignored. ++ */ ++ u32 use_adaptive_rx_coalesce; ++ u32 use_adaptive_tx_coalesce; ++ ++ /* When the packet rate (measured in packets per second) ++ * is below pkt_rate_low, the {rx,tx}_*_low parameters are ++ * used. ++ */ ++ u32 pkt_rate_low; ++ u32 rx_coalesce_usecs_low; ++ u32 rx_max_coalesced_frames_low; ++ u32 tx_coalesce_usecs_low; ++ u32 tx_max_coalesced_frames_low; ++ ++ /* When the packet rate is below pkt_rate_high but above ++ * pkt_rate_low (both measured in packets per second) the ++ * normal {rx,tx}_* coalescing parameters are used. ++ */ ++ ++ /* When the packet rate is (measured in packets per second) ++ * is above pkt_rate_high, the {rx,tx}_*_high parameters are ++ * used. ++ */ ++ u32 pkt_rate_high; ++ u32 rx_coalesce_usecs_high; ++ u32 rx_max_coalesced_frames_high; ++ u32 tx_coalesce_usecs_high; ++ u32 tx_max_coalesced_frames_high; ++ ++ /* How often to do adaptive coalescing packet rate sampling, ++ * measured in seconds. Must not be zero. ++ */ ++ u32 rate_sample_interval; ++}; ++#endif /* ETHTOOL_GCOALESCE */ ++ ++#ifndef ETHTOOL_SCOALESCE ++#define ETHTOOL_SCOALESCE 0x0000000f /* Set coalesce config. */ ++#endif ++#ifndef ETHTOOL_GRINGPARAM ++#define ETHTOOL_GRINGPARAM 0x00000010 /* Get ring parameters */ ++/* for configuring RX/TX ring parameters */ ++#define ethtool_ringparam _kc_ethtool_ringparam ++struct _kc_ethtool_ringparam { ++ u32 cmd; /* ETHTOOL_{G,S}RINGPARAM */ ++ ++ /* Read only attributes. These indicate the maximum number ++ * of pending RX/TX ring entries the driver will allow the ++ * user to set. ++ */ ++ u32 rx_max_pending; ++ u32 rx_mini_max_pending; ++ u32 rx_jumbo_max_pending; ++ u32 tx_max_pending; ++ ++ /* Values changeable by the user. The valid values are ++ * in the range 1 to the "*_max_pending" counterpart above. ++ */ ++ u32 rx_pending; ++ u32 rx_mini_pending; ++ u32 rx_jumbo_pending; ++ u32 tx_pending; ++}; ++#endif /* ETHTOOL_GRINGPARAM */ ++ ++#ifndef ETHTOOL_SRINGPARAM ++#define ETHTOOL_SRINGPARAM 0x00000011 /* Set ring parameters, priv. */ ++#endif ++#ifndef ETHTOOL_GPAUSEPARAM ++#define ETHTOOL_GPAUSEPARAM 0x00000012 /* Get pause parameters */ ++/* for configuring link flow control parameters */ ++#define ethtool_pauseparam _kc_ethtool_pauseparam ++struct _kc_ethtool_pauseparam { ++ u32 cmd; /* ETHTOOL_{G,S}PAUSEPARAM */ ++ ++ /* If the link is being auto-negotiated (via ethtool_cmd.autoneg ++ * being true) the user may set 'autoneg' here non-zero to have the ++ * pause parameters be auto-negotiated too. In such a case, the ++ * {rx,tx}_pause values below determine what capabilities are ++ * advertised. ++ * ++ * If 'autoneg' is zero or the link is not being auto-negotiated, ++ * then {rx,tx}_pause force the driver to use/not-use pause ++ * flow control. ++ */ ++ u32 autoneg; ++ u32 rx_pause; ++ u32 tx_pause; ++}; ++#endif /* ETHTOOL_GPAUSEPARAM */ ++ ++#ifndef ETHTOOL_SPAUSEPARAM ++#define ETHTOOL_SPAUSEPARAM 0x00000013 /* Set pause parameters. */ ++#endif ++#ifndef ETHTOOL_GRXCSUM ++#define ETHTOOL_GRXCSUM 0x00000014 /* Get RX hw csum enable (ethtool_value) */ ++#endif ++#ifndef ETHTOOL_SRXCSUM ++#define ETHTOOL_SRXCSUM 0x00000015 /* Set RX hw csum enable (ethtool_value) */ ++#endif ++#ifndef ETHTOOL_GTXCSUM ++#define ETHTOOL_GTXCSUM 0x00000016 /* Get TX hw csum enable (ethtool_value) */ ++#endif ++#ifndef ETHTOOL_STXCSUM ++#define ETHTOOL_STXCSUM 0x00000017 /* Set TX hw csum enable (ethtool_value) */ ++#endif ++#ifndef ETHTOOL_GSG ++#define ETHTOOL_GSG 0x00000018 /* Get scatter-gather enable ++ * (ethtool_value) */ ++#endif ++#ifndef ETHTOOL_SSG ++#define ETHTOOL_SSG 0x00000019 /* Set scatter-gather enable ++ * (ethtool_value). */ ++#endif ++#ifndef ETHTOOL_TEST ++#define ETHTOOL_TEST 0x0000001a /* execute NIC self-test, priv. */ ++#endif ++#ifndef ETHTOOL_GSTRINGS ++#define ETHTOOL_GSTRINGS 0x0000001b /* get specified string set */ ++#endif ++#ifndef ETHTOOL_PHYS_ID ++#define ETHTOOL_PHYS_ID 0x0000001c /* identify the NIC */ ++#endif ++#ifndef ETHTOOL_GSTATS ++#define ETHTOOL_GSTATS 0x0000001d /* get NIC-specific statistics */ ++#endif ++#ifndef ETHTOOL_GTSO ++#define ETHTOOL_GTSO 0x0000001e /* Get TSO enable (ethtool_value) */ ++#endif ++#ifndef ETHTOOL_STSO ++#define ETHTOOL_STSO 0x0000001f /* Set TSO enable (ethtool_value) */ ++#endif ++ ++#ifndef ETHTOOL_BUSINFO_LEN ++#define ETHTOOL_BUSINFO_LEN 32 ++#endif ++ ++#ifndef SPEED_2500 ++#define SPEED_2500 2500 ++#endif ++#ifndef SPEED_5000 ++#define SPEED_5000 5000 ++#endif ++#ifndef SPEED_25000 ++#define SPEED_25000 25000 ++#endif ++#ifndef SPEED_50000 ++#define SPEED_50000 50000 ++#endif ++#ifndef SPEED_100000 ++#define SPEED_100000 100000 ++#endif ++ ++#ifndef RHEL_RELEASE_VERSION ++#define RHEL_RELEASE_VERSION(a,b) (((a) << 8) + (b)) ++#endif ++#ifndef AX_RELEASE_VERSION ++#define AX_RELEASE_VERSION(a,b) (((a) << 8) + (b)) ++#endif ++ ++#ifndef AX_RELEASE_CODE ++#define AX_RELEASE_CODE 0 ++#endif ++ ++#if (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,0)) ++#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,0) ++#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,1)) ++#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,1) ++#elif (AX_RELEASE_CODE && AX_RELEASE_CODE == AX_RELEASE_VERSION(3,2)) ++#define RHEL_RELEASE_CODE RHEL_RELEASE_VERSION(5,3) ++#endif ++ ++#ifndef RHEL_RELEASE_CODE ++/* NOTE: RHEL_RELEASE_* introduced in RHEL4.5 */ ++#define RHEL_RELEASE_CODE 0 ++#endif ++ ++/* RHEL 7 didn't backport the parameter change in ++ * create_singlethread_workqueue. ++ * If/when RH corrects this we will want to tighten up the version check. ++ */ ++#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)) ++#undef create_singlethread_workqueue ++#define create_singlethread_workqueue(name) \ ++ alloc_ordered_workqueue("%s", WQ_MEM_RECLAIM, name) ++#endif ++ ++/* Ubuntu Release ABI is the 4th digit of their kernel version. You can find ++ * it in /usr/src/linux/$(uname -r)/include/generated/utsrelease.h for new ++ * enough versions of Ubuntu. Otherwise you can simply see it in the output of ++ * uname as the 4th digit of the kernel. The UTS_UBUNTU_RELEASE_ABI is not in ++ * the linux-source package, but in the linux-headers package. It begins to ++ * appear in later releases of 14.04 and 14.10. ++ * ++ * Ex: ++ * ++ * $uname -r ++ * 3.13.0-45-generic ++ * ABI is 45 ++ * ++ * ++ * $uname -r ++ * 3.16.0-23-generic ++ * ABI is 23 ++ */ ++#ifndef UTS_UBUNTU_RELEASE_ABI ++#define UTS_UBUNTU_RELEASE_ABI 0 ++#define UBUNTU_VERSION_CODE 0 ++#else ++/* Ubuntu does not provide actual release version macro, so we use the kernel ++ * version plus the ABI to generate a unique version code specific to Ubuntu. ++ * In addition, we mask the lower 8 bits of LINUX_VERSION_CODE in order to ++ * ignore differences in sublevel which are not important since we have the ++ * ABI value. Otherwise, it becomes impossible to correlate ABI to version for ++ * ordering checks. ++ */ ++#define UBUNTU_VERSION_CODE (((~0xFF & LINUX_VERSION_CODE) << 8) + \ ++ UTS_UBUNTU_RELEASE_ABI) ++ ++#if UTS_UBUNTU_RELEASE_ABI > 255 ++#error UTS_UBUNTU_RELEASE_ABI is too large... ++#endif /* UTS_UBUNTU_RELEASE_ABI > 255 */ ++ ++#if ( LINUX_VERSION_CODE <= KERNEL_VERSION(3,0,0) ) ++/* Our version code scheme does not make sense for non 3.x or newer kernels, ++ * and we have no support in kcompat for this scenario. Thus, treat this as a ++ * non-Ubuntu kernel. Possibly might be better to error here. ++ */ ++#define UTS_UBUNTU_RELEASE_ABI 0 ++#define UBUNTU_VERSION_CODE 0 ++#endif ++ ++#endif ++ ++/* Note that the 3rd digit is always zero, and will be ignored. This is ++ * because Ubuntu kernels are based on x.y.0-ABI values, and while their linux ++ * version codes are 3 digit, this 3rd digit is superseded by the ABI value. ++ */ ++#define UBUNTU_VERSION(a,b,c,d) ((KERNEL_VERSION(a,b,0) << 8) + (d)) ++ ++/* SuSE version macros are the same as Linux kernel version macro */ ++#ifndef SLE_VERSION ++#define SLE_VERSION(a,b,c) KERNEL_VERSION(a,b,c) ++#endif ++#define SLE_LOCALVERSION(a,b,c) KERNEL_VERSION(a,b,c) ++#ifdef CONFIG_SUSE_KERNEL ++#if ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,27) ) ++/* SLES11 GA is 2.6.27 based */ ++#define SLE_VERSION_CODE SLE_VERSION(11,0,0) ++#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(2,6,32) ) ++/* SLES11 SP1 is 2.6.32 based */ ++#define SLE_VERSION_CODE SLE_VERSION(11,1,0) ++#elif ( LINUX_VERSION_CODE == KERNEL_VERSION(3,0,13) ) ++/* SLES11 SP2 GA is 3.0.13-0.27 */ ++#define SLE_VERSION_CODE SLE_VERSION(11,2,0) ++#elif ((LINUX_VERSION_CODE == KERNEL_VERSION(3,0,76))) ++/* SLES11 SP3 GA is 3.0.76-0.11 */ ++#define SLE_VERSION_CODE SLE_VERSION(11,3,0) ++#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,0,101)) ++ #if (SLE_LOCALVERSION_CODE < SLE_LOCALVERSION(0,8,0)) ++ /* some SLES11sp2 update kernels up to 3.0.101-0.7.x */ ++ #define SLE_VERSION_CODE SLE_VERSION(11,2,0) ++ #elif (SLE_LOCALVERSION_CODE < SLE_LOCALVERSION(63,0,0)) ++ /* most SLES11sp3 update kernels */ ++ #define SLE_VERSION_CODE SLE_VERSION(11,3,0) ++ #else ++ /* SLES11 SP4 GA (3.0.101-63) and update kernels 3.0.101-63+ */ ++ #define SLE_VERSION_CODE SLE_VERSION(11,4,0) ++ #endif ++#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,12,28)) ++/* SLES12 GA is 3.12.28-4 ++ * kernel updates 3.12.xx-<33 through 52>[.yy] */ ++#define SLE_VERSION_CODE SLE_VERSION(12,0,0) ++#elif (LINUX_VERSION_CODE == KERNEL_VERSION(3,12,49)) ++/* SLES12 SP1 GA is 3.12.49-11 ++ * updates 3.12.xx-60.yy where xx={51..} */ ++#define SLE_VERSION_CODE SLE_VERSION(12,1,0) ++#elif (LINUX_VERSION_CODE == KERNEL_VERSION(4,4,21)) ++/* SLES12 SP2 GA is 4.4.21-69 */ ++#define SLE_VERSION_CODE SLE_VERSION(12,2,0) ++/* SLES12 SP3 Beta3 is 4.4.68-2 */ ++#elif (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,68)) ++#define SLE_VERSION_CODE SLE_VERSION(12,3,0) ++/* new SLES kernels must be added here with >= based on kernel ++ * the idea is to order from newest to oldest and just catch all ++ * of them using the >= ++ */ ++#endif /* LINUX_VERSION_CODE == KERNEL_VERSION(x,y,z) */ ++#endif /* CONFIG_SUSE_KERNEL */ ++#ifndef SLE_VERSION_CODE ++#define SLE_VERSION_CODE 0 ++#endif /* SLE_VERSION_CODE */ ++#ifndef SLE_LOCALVERSION_CODE ++#define SLE_LOCALVERSION_CODE 0 ++#endif /* SLE_LOCALVERSION_CODE */ ++ ++#ifdef __KLOCWORK__ ++/* The following are not compiled into the binary driver; they are here ++ * only to tune Klocwork scans to workaround false-positive issues. ++ */ ++#ifdef ARRAY_SIZE ++#undef ARRAY_SIZE ++#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) ++#endif ++ ++#define memcpy(dest, src, len) memcpy_s(dest, len, src, len) ++ ++static inline int _kc_test_and_clear_bit(int nr, volatile unsigned long *addr) ++{ ++ unsigned long mask = BIT_MASK(nr); ++ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); ++ unsigned long old; ++ unsigned long flags = 0; ++ ++ _atomic_spin_lock_irqsave(p, flags); ++ old = *p; ++ *p = old & ~mask; ++ _atomic_spin_unlock_irqrestore(p, flags); ++ ++ return (old & mask) != 0; ++} ++#define test_and_clear_bit(nr, addr) _kc_test_and_clear_bit(nr, addr) ++ ++static inline int _kc_test_and_set_bit(int nr, volatile unsigned long *addr) ++{ ++ unsigned long mask = BIT_MASK(nr); ++ unsigned long *p = ((unsigned long *)addr) + BIT_WORD(nr); ++ unsigned long old; ++ unsigned long flags = 0; ++ ++ _atomic_spin_lock_irqsave(p, flags); ++ old = *p; ++ *p = old | mask; ++ _atomic_spin_unlock_irqrestore(p, flags); ++ ++ return (old & mask) != 0; ++} ++#define test_and_set_bit(nr, addr) _kc_test_and_set_bit(nr, addr) ++ ++#ifdef CONFIG_DYNAMIC_DEBUG ++#undef dev_dbg ++#define dev_dbg(dev, format, arg...) dev_printk(KERN_DEBUG, dev, format, ##arg) ++#endif /* CONFIG_DYNAMIC_DEBUG */ ++ ++#endif /* __KLOCWORK__ */ ++ ++/*****************************************************************************/ ++/* 2.4.3 => 2.4.0 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,3) ) ++ ++/**************************************/ ++/* PCI DRIVER API */ ++ ++#ifndef pci_set_dma_mask ++#define pci_set_dma_mask _kc_pci_set_dma_mask ++extern int _kc_pci_set_dma_mask(struct pci_dev *dev, dma_addr_t mask); ++#endif ++ ++#ifndef pci_request_regions ++#define pci_request_regions _kc_pci_request_regions ++extern int _kc_pci_request_regions(struct pci_dev *pdev, char *res_name); ++#endif ++ ++#ifndef pci_release_regions ++#define pci_release_regions _kc_pci_release_regions ++extern void _kc_pci_release_regions(struct pci_dev *pdev); ++#endif ++ ++/**************************************/ ++/* NETWORK DRIVER API */ ++ ++#ifndef alloc_etherdev ++#define alloc_etherdev _kc_alloc_etherdev ++extern struct net_device * _kc_alloc_etherdev(int sizeof_priv); ++#endif ++ ++#ifndef is_valid_ether_addr ++#define is_valid_ether_addr _kc_is_valid_ether_addr ++extern int _kc_is_valid_ether_addr(u8 *addr); ++#endif ++ ++/**************************************/ ++/* MISCELLANEOUS */ ++ ++#ifndef INIT_TQUEUE ++#define INIT_TQUEUE(_tq, _routine, _data) \ ++ do { \ ++ INIT_LIST_HEAD(&(_tq)->list); \ ++ (_tq)->sync = 0; \ ++ (_tq)->routine = _routine; \ ++ (_tq)->data = _data; \ ++ } while (0) ++#endif ++ ++#endif /* 2.4.3 => 2.4.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,5) ) ++/* Generic MII registers. */ ++#define MII_BMCR 0x00 /* Basic mode control register */ ++#define MII_BMSR 0x01 /* Basic mode status register */ ++#define MII_PHYSID1 0x02 /* PHYS ID 1 */ ++#define MII_PHYSID2 0x03 /* PHYS ID 2 */ ++#define MII_ADVERTISE 0x04 /* Advertisement control reg */ ++#define MII_LPA 0x05 /* Link partner ability reg */ ++#define MII_EXPANSION 0x06 /* Expansion register */ ++/* Basic mode control register. */ ++#define BMCR_FULLDPLX 0x0100 /* Full duplex */ ++#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */ ++/* Basic mode status register. */ ++#define BMSR_ERCAP 0x0001 /* Ext-reg capability */ ++#define BMSR_ANEGCAPABLE 0x0008 /* Able to do auto-negotiation */ ++#define BMSR_10HALF 0x0800 /* Can do 10mbps, half-duplex */ ++#define BMSR_10FULL 0x1000 /* Can do 10mbps, full-duplex */ ++#define BMSR_100HALF 0x2000 /* Can do 100mbps, half-duplex */ ++#define BMSR_100FULL 0x4000 /* Can do 100mbps, full-duplex */ ++/* Advertisement control register. */ ++#define ADVERTISE_CSMA 0x0001 /* Only selector supported */ ++#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */ ++#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */ ++#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */ ++#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */ ++#define ADVERTISE_ALL (ADVERTISE_10HALF | ADVERTISE_10FULL | \ ++ ADVERTISE_100HALF | ADVERTISE_100FULL) ++/* Expansion register for auto-negotiation. */ ++#define EXPANSION_ENABLENPAGE 0x0004 /* This enables npage words */ ++#endif ++ ++/*****************************************************************************/ ++/* 2.4.6 => 2.4.3 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,6) ) ++ ++#ifndef pci_set_power_state ++#define pci_set_power_state _kc_pci_set_power_state ++extern int _kc_pci_set_power_state(struct pci_dev *dev, int state); ++#endif ++ ++#ifndef pci_enable_wake ++#define pci_enable_wake _kc_pci_enable_wake ++extern int _kc_pci_enable_wake(struct pci_dev *pdev, u32 state, int enable); ++#endif ++ ++#ifndef pci_disable_device ++#define pci_disable_device _kc_pci_disable_device ++extern void _kc_pci_disable_device(struct pci_dev *pdev); ++#endif ++ ++/* PCI PM entry point syntax changed, so don't support suspend/resume */ ++#undef CONFIG_PM ++ ++#endif /* 2.4.6 => 2.4.3 */ ++ ++#ifndef HAVE_PCI_SET_MWI ++#define pci_set_mwi(X) pci_write_config_word(X, \ ++ PCI_COMMAND, adapter->hw.bus.pci_cmd_word | \ ++ PCI_COMMAND_INVALIDATE); ++#define pci_clear_mwi(X) pci_write_config_word(X, \ ++ PCI_COMMAND, adapter->hw.bus.pci_cmd_word & \ ++ ~PCI_COMMAND_INVALIDATE); ++#endif ++ ++/*****************************************************************************/ ++/* 2.4.10 => 2.4.9 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,10) ) ++ ++/**************************************/ ++/* MODULE API */ ++ ++#ifndef MODULE_LICENSE ++ #define MODULE_LICENSE(X) ++#endif ++ ++/**************************************/ ++/* OTHER */ ++ ++#undef min ++#define min(x,y) ({ \ ++ const typeof(x) _x = (x); \ ++ const typeof(y) _y = (y); \ ++ (void) (&_x == &_y); \ ++ _x < _y ? _x : _y; }) ++ ++#undef max ++#define max(x,y) ({ \ ++ const typeof(x) _x = (x); \ ++ const typeof(y) _y = (y); \ ++ (void) (&_x == &_y); \ ++ _x > _y ? _x : _y; }) ++ ++#define min_t(type,x,y) ({ \ ++ type _x = (x); \ ++ type _y = (y); \ ++ _x < _y ? _x : _y; }) ++ ++#define max_t(type,x,y) ({ \ ++ type _x = (x); \ ++ type _y = (y); \ ++ _x > _y ? _x : _y; }) ++ ++#ifndef list_for_each_safe ++#define list_for_each_safe(pos, n, head) \ ++ for (pos = (head)->next, n = pos->next; pos != (head); \ ++ pos = n, n = pos->next) ++#endif ++ ++#ifndef ____cacheline_aligned_in_smp ++#ifdef CONFIG_SMP ++#define ____cacheline_aligned_in_smp ____cacheline_aligned ++#else ++#define ____cacheline_aligned_in_smp ++#endif /* CONFIG_SMP */ ++#endif ++ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,8) ) ++extern int _kc_snprintf(char * buf, size_t size, const char *fmt, ...); ++#define snprintf(buf, size, fmt, args...) _kc_snprintf(buf, size, fmt, ##args) ++extern int _kc_vsnprintf(char *buf, size_t size, const char *fmt, va_list args); ++#define vsnprintf(buf, size, fmt, args) _kc_vsnprintf(buf, size, fmt, args) ++#else /* 2.4.8 => 2.4.9 */ ++extern int snprintf(char * buf, size_t size, const char *fmt, ...); ++extern int vsnprintf(char *buf, size_t size, const char *fmt, va_list args); ++#endif ++#endif /* 2.4.10 -> 2.4.6 */ ++ ++ ++/*****************************************************************************/ ++/* 2.4.12 => 2.4.10 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,12) ) ++#ifndef HAVE_NETIF_MSG ++#define HAVE_NETIF_MSG 1 ++enum { ++ NETIF_MSG_DRV = 0x0001, ++ NETIF_MSG_PROBE = 0x0002, ++ NETIF_MSG_LINK = 0x0004, ++ NETIF_MSG_TIMER = 0x0008, ++ NETIF_MSG_IFDOWN = 0x0010, ++ NETIF_MSG_IFUP = 0x0020, ++ NETIF_MSG_RX_ERR = 0x0040, ++ NETIF_MSG_TX_ERR = 0x0080, ++ NETIF_MSG_TX_QUEUED = 0x0100, ++ NETIF_MSG_INTR = 0x0200, ++ NETIF_MSG_TX_DONE = 0x0400, ++ NETIF_MSG_RX_STATUS = 0x0800, ++ NETIF_MSG_PKTDATA = 0x1000, ++ NETIF_MSG_HW = 0x2000, ++ NETIF_MSG_WOL = 0x4000, ++}; ++ ++#define netif_msg_drv(p) ((p)->msg_enable & NETIF_MSG_DRV) ++#define netif_msg_probe(p) ((p)->msg_enable & NETIF_MSG_PROBE) ++#define netif_msg_link(p) ((p)->msg_enable & NETIF_MSG_LINK) ++#define netif_msg_timer(p) ((p)->msg_enable & NETIF_MSG_TIMER) ++#define netif_msg_ifdown(p) ((p)->msg_enable & NETIF_MSG_IFDOWN) ++#define netif_msg_ifup(p) ((p)->msg_enable & NETIF_MSG_IFUP) ++#define netif_msg_rx_err(p) ((p)->msg_enable & NETIF_MSG_RX_ERR) ++#define netif_msg_tx_err(p) ((p)->msg_enable & NETIF_MSG_TX_ERR) ++#define netif_msg_tx_queued(p) ((p)->msg_enable & NETIF_MSG_TX_QUEUED) ++#define netif_msg_intr(p) ((p)->msg_enable & NETIF_MSG_INTR) ++#define netif_msg_tx_done(p) ((p)->msg_enable & NETIF_MSG_TX_DONE) ++#define netif_msg_rx_status(p) ((p)->msg_enable & NETIF_MSG_RX_STATUS) ++#define netif_msg_pktdata(p) ((p)->msg_enable & NETIF_MSG_PKTDATA) ++#endif /* !HAVE_NETIF_MSG */ ++#endif /* 2.4.12 => 2.4.10 */ ++ ++/*****************************************************************************/ ++/* 2.4.13 => 2.4.12 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,13) ) ++ ++/**************************************/ ++/* PCI DMA MAPPING */ ++ ++#ifndef virt_to_page ++ #define virt_to_page(v) (mem_map + (virt_to_phys(v) >> PAGE_SHIFT)) ++#endif ++ ++#ifndef pci_map_page ++#define pci_map_page _kc_pci_map_page ++extern u64 _kc_pci_map_page(struct pci_dev *dev, struct page *page, unsigned long offset, size_t size, int direction); ++#endif ++ ++#ifndef pci_unmap_page ++#define pci_unmap_page _kc_pci_unmap_page ++extern void _kc_pci_unmap_page(struct pci_dev *dev, u64 dma_addr, size_t size, int direction); ++#endif ++ ++/* pci_set_dma_mask takes dma_addr_t, which is only 32-bits prior to 2.4.13 */ ++ ++#undef DMA_32BIT_MASK ++#define DMA_32BIT_MASK 0xffffffff ++#undef DMA_64BIT_MASK ++#define DMA_64BIT_MASK 0xffffffff ++ ++/**************************************/ ++/* OTHER */ ++ ++#ifndef cpu_relax ++#define cpu_relax() rep_nop() ++#endif ++ ++struct vlan_ethhdr { ++ unsigned char h_dest[ETH_ALEN]; ++ unsigned char h_source[ETH_ALEN]; ++ unsigned short h_vlan_proto; ++ unsigned short h_vlan_TCI; ++ unsigned short h_vlan_encapsulated_proto; ++}; ++#endif /* 2.4.13 => 2.4.12 */ ++ ++/*****************************************************************************/ ++/* 2.4.17 => 2.4.12 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,17) ) ++ ++#ifndef __devexit_p ++ #define __devexit_p(x) &(x) ++#endif ++ ++#endif /* 2.4.17 => 2.4.13 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,18) ) ++#define NETIF_MSG_HW 0x2000 ++#define NETIF_MSG_WOL 0x4000 ++ ++#ifndef netif_msg_hw ++#define netif_msg_hw(p) ((p)->msg_enable & NETIF_MSG_HW) ++#endif ++#ifndef netif_msg_wol ++#define netif_msg_wol(p) ((p)->msg_enable & NETIF_MSG_WOL) ++#endif ++#endif /* 2.4.18 */ ++ ++/*****************************************************************************/ ++ ++/*****************************************************************************/ ++/* 2.4.20 => 2.4.19 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,20) ) ++ ++/* we won't support NAPI on less than 2.4.20 */ ++#ifdef NAPI ++#undef NAPI ++#endif ++ ++#endif /* 2.4.20 => 2.4.19 */ ++ ++/*****************************************************************************/ ++/* 2.4.22 => 2.4.17 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) ) ++#define pci_name(x) ((x)->slot_name) ++ ++#ifndef SUPPORTED_10000baseT_Full ++#define SUPPORTED_10000baseT_Full (1 << 12) ++#endif ++#ifndef ADVERTISED_10000baseT_Full ++#define ADVERTISED_10000baseT_Full (1 << 12) ++#endif ++#endif ++ ++/*****************************************************************************/ ++/* 2.4.22 => 2.4.17 */ ++ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,22) ) ++#endif ++ ++/*****************************************************************************/ ++/*****************************************************************************/ ++/* 2.4.23 => 2.4.22 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,23) ) ++/*****************************************************************************/ ++#ifdef NAPI ++#ifndef netif_poll_disable ++#define netif_poll_disable(x) _kc_netif_poll_disable(x) ++static inline void _kc_netif_poll_disable(struct net_device *netdev) ++{ ++ while (test_and_set_bit(__LINK_STATE_RX_SCHED, &netdev->state)) { ++ /* No hurry */ ++ current->state = TASK_INTERRUPTIBLE; ++ schedule_timeout(1); ++ } ++} ++#endif ++#ifndef netif_poll_enable ++#define netif_poll_enable(x) _kc_netif_poll_enable(x) ++static inline void _kc_netif_poll_enable(struct net_device *netdev) ++{ ++ clear_bit(__LINK_STATE_RX_SCHED, &netdev->state); ++} ++#endif ++#endif /* NAPI */ ++#ifndef netif_tx_disable ++#define netif_tx_disable(x) _kc_netif_tx_disable(x) ++static inline void _kc_netif_tx_disable(struct net_device *dev) ++{ ++ spin_lock_bh(&dev->xmit_lock); ++ netif_stop_queue(dev); ++ spin_unlock_bh(&dev->xmit_lock); ++} ++#endif ++#else /* 2.4.23 => 2.4.22 */ ++#define HAVE_SCTP ++#endif /* 2.4.23 => 2.4.22 */ ++ ++/*****************************************************************************/ ++/* 2.6.4 => 2.6.0 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,25) || \ ++ ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \ ++ LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) ) ++#define ETHTOOL_OPS_COMPAT ++#endif /* 2.6.4 => 2.6.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) ++#define __user ++#endif /* < 2.4.27 */ ++ ++/*****************************************************************************/ ++/* 2.5.71 => 2.4.x */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,71) ) ++#define sk_protocol protocol ++#define pci_get_device pci_find_device ++#endif /* 2.5.70 => 2.4.x */ ++ ++/*****************************************************************************/ ++/* < 2.4.27 or 2.6.0 <= 2.6.5 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) || \ ++ ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) && \ ++ LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) ) ++ ++#ifndef netif_msg_init ++#define netif_msg_init _kc_netif_msg_init ++static inline u32 _kc_netif_msg_init(int debug_value, int default_msg_enable_bits) ++{ ++ /* use default */ ++ if (debug_value < 0 || debug_value >= (sizeof(u32) * 8)) ++ return default_msg_enable_bits; ++ if (debug_value == 0) /* no output */ ++ return 0; ++ /* set low N bits */ ++ return (1 << debug_value) -1; ++} ++#endif ++ ++#endif /* < 2.4.27 or 2.6.0 <= 2.6.5 */ ++/*****************************************************************************/ ++#if (( LINUX_VERSION_CODE < KERNEL_VERSION(2,4,27) ) || \ ++ (( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) && \ ++ ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) ))) ++#define netdev_priv(x) x->priv ++#endif ++ ++/*****************************************************************************/ ++/* <= 2.5.0 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,0) ) ++#include ++#undef pci_register_driver ++#define pci_register_driver pci_module_init ++ ++/* ++ * Most of the dma compat code is copied/modifed from the 2.4.37 ++ * /include/linux/libata-compat.h header file ++ */ ++/* These definitions mirror those in pci.h, so they can be used ++ * interchangeably with their PCI_ counterparts */ ++enum dma_data_direction { ++ DMA_BIDIRECTIONAL = 0, ++ DMA_TO_DEVICE = 1, ++ DMA_FROM_DEVICE = 2, ++ DMA_NONE = 3, ++}; ++ ++struct device { ++ struct pci_dev pdev; ++}; ++ ++static inline struct pci_dev *to_pci_dev (struct device *dev) ++{ ++ return (struct pci_dev *) dev; ++} ++static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) ++{ ++ return (struct device *) pdev; ++} ++#define pdev_printk(lvl, pdev, fmt, args...) \ ++ printk("%s %s: " fmt, lvl, pci_name(pdev), ## args) ++#define dev_err(dev, fmt, args...) \ ++ pdev_printk(KERN_ERR, to_pci_dev(dev), fmt, ## args) ++#define dev_info(dev, fmt, args...) \ ++ pdev_printk(KERN_INFO, to_pci_dev(dev), fmt, ## args) ++#define dev_warn(dev, fmt, args...) \ ++ pdev_printk(KERN_WARNING, to_pci_dev(dev), fmt, ## args) ++#define dev_notice(dev, fmt, args...) \ ++ pdev_printk(KERN_NOTICE, to_pci_dev(dev), fmt, ## args) ++#define dev_dbg(dev, fmt, args...) \ ++ pdev_printk(KERN_DEBUG, to_pci_dev(dev), fmt, ## args) ++ ++/* NOTE: dangerous! we ignore the 'gfp' argument */ ++#define dma_alloc_coherent(dev,sz,dma,gfp) \ ++ pci_alloc_consistent(to_pci_dev(dev),(sz),(dma)) ++#define dma_free_coherent(dev,sz,addr,dma_addr) \ ++ pci_free_consistent(to_pci_dev(dev),(sz),(addr),(dma_addr)) ++ ++#define dma_map_page(dev,a,b,c,d) \ ++ pci_map_page(to_pci_dev(dev),(a),(b),(c),(d)) ++#define dma_unmap_page(dev,a,b,c) \ ++ pci_unmap_page(to_pci_dev(dev),(a),(b),(c)) ++ ++#define dma_map_single(dev,a,b,c) \ ++ pci_map_single(to_pci_dev(dev),(a),(b),(c)) ++#define dma_unmap_single(dev,a,b,c) \ ++ pci_unmap_single(to_pci_dev(dev),(a),(b),(c)) ++ ++#define dma_map_sg(dev, sg, nents, dir) \ ++ pci_map_sg(to_pci_dev(dev), (sg), (nents), (dir) ++#define dma_unmap_sg(dev, sg, nents, dir) \ ++ pci_unmap_sg(to_pci_dev(dev), (sg), (nents), (dir) ++ ++#define dma_sync_single(dev,a,b,c) \ ++ pci_dma_sync_single(to_pci_dev(dev),(a),(b),(c)) ++ ++/* for range just sync everything, that's all the pci API can do */ ++#define dma_sync_single_range(dev,addr,off,sz,dir) \ ++ pci_dma_sync_single(to_pci_dev(dev),(addr),(off)+(sz),(dir)) ++ ++#define dma_set_mask(dev,mask) \ ++ pci_set_dma_mask(to_pci_dev(dev),(mask)) ++ ++/* hlist_* code - double linked lists */ ++struct hlist_head { ++ struct hlist_node *first; ++}; ++ ++struct hlist_node { ++ struct hlist_node *next, **pprev; ++}; ++ ++static inline void __hlist_del(struct hlist_node *n) ++{ ++ struct hlist_node *next = n->next; ++ struct hlist_node **pprev = n->pprev; ++ *pprev = next; ++ if (next) ++ next->pprev = pprev; ++} ++ ++static inline void hlist_del(struct hlist_node *n) ++{ ++ __hlist_del(n); ++ n->next = NULL; ++ n->pprev = NULL; ++} ++ ++static inline void hlist_add_head(struct hlist_node *n, struct hlist_head *h) ++{ ++ struct hlist_node *first = h->first; ++ n->next = first; ++ if (first) ++ first->pprev = &n->next; ++ h->first = n; ++ n->pprev = &h->first; ++} ++ ++static inline int hlist_empty(const struct hlist_head *h) ++{ ++ return !h->first; ++} ++#define HLIST_HEAD_INIT { .first = NULL } ++#define HLIST_HEAD(name) struct hlist_head name = { .first = NULL } ++#define INIT_HLIST_HEAD(ptr) ((ptr)->first = NULL) ++static inline void INIT_HLIST_NODE(struct hlist_node *h) ++{ ++ h->next = NULL; ++ h->pprev = NULL; ++} ++ ++#ifndef might_sleep ++#define might_sleep() ++#endif ++#else ++static inline struct device *pci_dev_to_dev(struct pci_dev *pdev) ++{ ++ return &pdev->dev; ++} ++#endif /* <= 2.5.0 */ ++ ++/*****************************************************************************/ ++/* 2.5.28 => 2.4.23 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) ) ++ ++#include ++#define work_struct tq_struct ++#undef INIT_WORK ++#define INIT_WORK(a,b) INIT_TQUEUE(a,(void (*)(void *))b,a) ++#undef container_of ++#define container_of list_entry ++#define schedule_work schedule_task ++#define flush_scheduled_work flush_scheduled_tasks ++#define cancel_work_sync(x) flush_scheduled_work() ++ ++#endif /* 2.5.28 => 2.4.17 */ ++ ++/*****************************************************************************/ ++/* 2.6.0 => 2.5.28 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) ++#ifndef read_barrier_depends ++#define read_barrier_depends() rmb() ++#endif ++ ++#ifndef rcu_head ++struct __kc_callback_head { ++ struct __kc_callback_head *next; ++ void (*func)(struct callback_head *head); ++}; ++#define rcu_head __kc_callback_head ++#endif ++ ++#undef get_cpu ++#define get_cpu() smp_processor_id() ++#undef put_cpu ++#define put_cpu() do { } while(0) ++#define MODULE_INFO(version, _version) ++#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT ++#define CONFIG_E1000_DISABLE_PACKET_SPLIT 1 ++#endif ++#ifndef CONFIG_IGB_DISABLE_PACKET_SPLIT ++#define CONFIG_IGB_DISABLE_PACKET_SPLIT 1 ++#endif ++#ifndef CONFIG_IGC_DISABLE_PACKET_SPLIT ++#define CONFIG_IGC_DISABLE_PACKET_SPLIT 1 ++#endif ++ ++#define dma_set_coherent_mask(dev,mask) 1 ++ ++#undef dev_put ++#define dev_put(dev) __dev_put(dev) ++ ++#ifndef skb_fill_page_desc ++#define skb_fill_page_desc _kc_skb_fill_page_desc ++extern void _kc_skb_fill_page_desc(struct sk_buff *skb, int i, struct page *page, int off, int size); ++#endif ++ ++#undef ALIGN ++#define ALIGN(x,a) (((x)+(a)-1)&~((a)-1)) ++ ++#ifndef page_count ++#define page_count(p) atomic_read(&(p)->count) ++#endif ++ ++#ifdef MAX_NUMNODES ++#undef MAX_NUMNODES ++#endif ++#define MAX_NUMNODES 1 ++ ++/* find_first_bit and find_next bit are not defined for most ++ * 2.4 kernels (except for the redhat 2.4.21 kernels ++ */ ++#include ++#define BITOP_WORD(nr) ((nr) / BITS_PER_LONG) ++#undef find_next_bit ++#define find_next_bit _kc_find_next_bit ++extern unsigned long _kc_find_next_bit(const unsigned long *addr, ++ unsigned long size, ++ unsigned long offset); ++#define find_first_bit(addr, size) find_next_bit((addr), (size), 0) ++ ++#ifndef netdev_name ++static inline const char *_kc_netdev_name(const struct net_device *dev) ++{ ++ if (strchr(dev->name, '%')) ++ return "(unregistered net_device)"; ++ return dev->name; ++} ++#define netdev_name(netdev) _kc_netdev_name(netdev) ++#endif /* netdev_name */ ++ ++#ifndef strlcpy ++#define strlcpy _kc_strlcpy ++extern size_t _kc_strlcpy(char *dest, const char *src, size_t size); ++#endif /* strlcpy */ ++ ++#ifndef do_div ++#if BITS_PER_LONG == 64 ++# define do_div(n,base) ({ \ ++ uint32_t __base = (base); \ ++ uint32_t __rem; \ ++ __rem = ((uint64_t)(n)) % __base; \ ++ (n) = ((uint64_t)(n)) / __base; \ ++ __rem; \ ++ }) ++#elif BITS_PER_LONG == 32 ++extern uint32_t _kc__div64_32(uint64_t *dividend, uint32_t divisor); ++# define do_div(n,base) ({ \ ++ uint32_t __base = (base); \ ++ uint32_t __rem; \ ++ if (likely(((n) >> 32) == 0)) { \ ++ __rem = (uint32_t)(n) % __base; \ ++ (n) = (uint32_t)(n) / __base; \ ++ } else \ ++ __rem = _kc__div64_32(&(n), __base); \ ++ __rem; \ ++ }) ++#else /* BITS_PER_LONG == ?? */ ++# error do_div() does not yet support the C64 ++#endif /* BITS_PER_LONG */ ++#endif /* do_div */ ++ ++#ifndef NSEC_PER_SEC ++#define NSEC_PER_SEC 1000000000L ++#endif ++ ++#undef HAVE_I2C_SUPPORT ++#else /* 2.6.0 */ ++ ++#endif /* 2.6.0 => 2.5.28 */ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,3) ) ++#define dma_pool pci_pool ++#define dma_pool_destroy pci_pool_destroy ++#define dma_pool_alloc pci_pool_alloc ++#define dma_pool_free pci_pool_free ++ ++#define dma_pool_create(name,dev,size,align,allocation) \ ++ pci_pool_create((name),to_pci_dev(dev),(size),(align),(allocation)) ++#endif /* < 2.6.3 */ ++ ++/*****************************************************************************/ ++/* 2.6.4 => 2.6.0 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) ++#define MODULE_VERSION(_version) MODULE_INFO(version, _version) ++#endif /* 2.6.4 => 2.6.0 */ ++ ++/*****************************************************************************/ ++/* 2.6.5 => 2.6.0 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,5) ) ++#define dma_sync_single_for_cpu dma_sync_single ++#define dma_sync_single_for_device dma_sync_single ++#define dma_sync_single_range_for_cpu dma_sync_single_range ++#define dma_sync_single_range_for_device dma_sync_single_range ++#ifndef pci_dma_mapping_error ++#define pci_dma_mapping_error _kc_pci_dma_mapping_error ++static inline int _kc_pci_dma_mapping_error(dma_addr_t dma_addr) ++{ ++ return dma_addr == 0; ++} ++#endif ++#endif /* 2.6.5 => 2.6.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,4) ) ++extern int _kc_scnprintf(char * buf, size_t size, const char *fmt, ...); ++#define scnprintf(buf, size, fmt, args...) _kc_scnprintf(buf, size, fmt, ##args) ++#endif /* < 2.6.4 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,6) ) ++/* taken from 2.6 include/linux/bitmap.h */ ++#undef bitmap_zero ++#define bitmap_zero _kc_bitmap_zero ++static inline void _kc_bitmap_zero(unsigned long *dst, int nbits) ++{ ++ if (nbits <= BITS_PER_LONG) ++ *dst = 0UL; ++ else { ++ int len = BITS_TO_LONGS(nbits) * sizeof(unsigned long); ++ memset(dst, 0, len); ++ } ++} ++#define page_to_nid(x) 0 ++ ++#endif /* < 2.6.6 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,7) ) ++#undef if_mii ++#define if_mii _kc_if_mii ++static inline struct mii_ioctl_data *_kc_if_mii(struct ifreq *rq) ++{ ++ return (struct mii_ioctl_data *) &rq->ifr_ifru; ++} ++ ++#ifndef __force ++#define __force ++#endif ++#endif /* < 2.6.7 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,8) ) ++#ifndef PCI_EXP_DEVCTL ++#define PCI_EXP_DEVCTL 8 ++#endif ++#ifndef PCI_EXP_DEVCTL_CERE ++#define PCI_EXP_DEVCTL_CERE 0x0001 ++#endif ++#define PCI_EXP_FLAGS 2 /* Capabilities register */ ++#define PCI_EXP_FLAGS_VERS 0x000f /* Capability version */ ++#define PCI_EXP_FLAGS_TYPE 0x00f0 /* Device/Port type */ ++#define PCI_EXP_TYPE_ENDPOINT 0x0 /* Express Endpoint */ ++#define PCI_EXP_TYPE_LEG_END 0x1 /* Legacy Endpoint */ ++#define PCI_EXP_TYPE_ROOT_PORT 0x4 /* Root Port */ ++#define PCI_EXP_TYPE_DOWNSTREAM 0x6 /* Downstream Port */ ++#define PCI_EXP_FLAGS_SLOT 0x0100 /* Slot implemented */ ++#define PCI_EXP_DEVCAP 4 /* Device capabilities */ ++#define PCI_EXP_DEVSTA 10 /* Device Status */ ++#define msleep(x) do { set_current_state(TASK_UNINTERRUPTIBLE); \ ++ schedule_timeout((x * HZ)/1000 + 2); \ ++ } while (0) ++ ++#endif /* < 2.6.8 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,9)) ++#include ++#define __iomem ++ ++#ifndef kcalloc ++#define kcalloc(n, size, flags) _kc_kzalloc(((n) * (size)), flags) ++extern void *_kc_kzalloc(size_t size, int flags); ++#endif ++#define MSEC_PER_SEC 1000L ++static inline unsigned int _kc_jiffies_to_msecs(const unsigned long j) ++{ ++#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) ++ return (MSEC_PER_SEC / HZ) * j; ++#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) ++ return (j + (HZ / MSEC_PER_SEC) - 1)/(HZ / MSEC_PER_SEC); ++#else ++ return (j * MSEC_PER_SEC) / HZ; ++#endif ++} ++static inline unsigned long _kc_msecs_to_jiffies(const unsigned int m) ++{ ++ if (m > _kc_jiffies_to_msecs(MAX_JIFFY_OFFSET)) ++ return MAX_JIFFY_OFFSET; ++#if HZ <= MSEC_PER_SEC && !(MSEC_PER_SEC % HZ) ++ return (m + (MSEC_PER_SEC / HZ) - 1) / (MSEC_PER_SEC / HZ); ++#elif HZ > MSEC_PER_SEC && !(HZ % MSEC_PER_SEC) ++ return m * (HZ / MSEC_PER_SEC); ++#else ++ return (m * HZ + MSEC_PER_SEC - 1) / MSEC_PER_SEC; ++#endif ++} ++ ++#define msleep_interruptible _kc_msleep_interruptible ++static inline unsigned long _kc_msleep_interruptible(unsigned int msecs) ++{ ++ unsigned long timeout = _kc_msecs_to_jiffies(msecs) + 1; ++ ++ while (timeout && !signal_pending(current)) { ++ __set_current_state(TASK_INTERRUPTIBLE); ++ timeout = schedule_timeout(timeout); ++ } ++ return _kc_jiffies_to_msecs(timeout); ++} ++ ++/* Basic mode control register. */ ++#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */ ++ ++#ifndef __le16 ++#define __le16 u16 ++#endif ++#ifndef __le32 ++#define __le32 u32 ++#endif ++#ifndef __le64 ++#define __le64 u64 ++#endif ++#ifndef __be16 ++#define __be16 u16 ++#endif ++#ifndef __be32 ++#define __be32 u32 ++#endif ++#ifndef __be64 ++#define __be64 u64 ++#endif ++ ++static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) ++{ ++ return (struct vlan_ethhdr *)skb->mac.raw; ++} ++ ++/* Wake-On-Lan options. */ ++#define WAKE_PHY (1 << 0) ++#define WAKE_UCAST (1 << 1) ++#define WAKE_MCAST (1 << 2) ++#define WAKE_BCAST (1 << 3) ++#define WAKE_ARP (1 << 4) ++#define WAKE_MAGIC (1 << 5) ++#define WAKE_MAGICSECURE (1 << 6) /* only meaningful if WAKE_MAGIC */ ++ ++#define skb_header_pointer _kc_skb_header_pointer ++static inline void *_kc_skb_header_pointer(const struct sk_buff *skb, ++ int offset, int len, void *buffer) ++{ ++ int hlen = skb_headlen(skb); ++ ++ if (hlen - offset >= len) ++ return skb->data + offset; ++ ++#ifdef MAX_SKB_FRAGS ++ if (skb_copy_bits(skb, offset, buffer, len) < 0) ++ return NULL; ++ ++ return buffer; ++#else ++ return NULL; ++#endif ++ ++#ifndef NETDEV_TX_OK ++#define NETDEV_TX_OK 0 ++#endif ++#ifndef NETDEV_TX_BUSY ++#define NETDEV_TX_BUSY 1 ++#endif ++#ifndef NETDEV_TX_LOCKED ++#define NETDEV_TX_LOCKED -1 ++#endif ++} ++ ++#ifndef __bitwise ++#define __bitwise ++#endif ++#endif /* < 2.6.9 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,10) ) ++#ifdef module_param_array_named ++#undef module_param_array_named ++#define module_param_array_named(name, array, type, nump, perm) \ ++ static struct kparam_array __param_arr_##name \ ++ = { ARRAY_SIZE(array), nump, param_set_##type, param_get_##type, \ ++ sizeof(array[0]), array }; \ ++ module_param_call(name, param_array_set, param_array_get, \ ++ &__param_arr_##name, perm) ++#endif /* module_param_array_named */ ++/* ++ * num_online is broken for all < 2.6.10 kernels. This is needed to support ++ * Node module parameter of ixgbe. ++ */ ++#undef num_online_nodes ++#define num_online_nodes(n) 1 ++extern DECLARE_BITMAP(_kcompat_node_online_map, MAX_NUMNODES); ++#undef node_online_map ++#define node_online_map _kcompat_node_online_map ++#define pci_get_class pci_find_class ++#endif /* < 2.6.10 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,11) ) ++#define PCI_D0 0 ++#define PCI_D1 1 ++#define PCI_D2 2 ++#define PCI_D3hot 3 ++#define PCI_D3cold 4 ++typedef int pci_power_t; ++#define pci_choose_state(pdev,state) state ++#define PMSG_SUSPEND 3 ++#define PCI_EXP_LNKCTL 16 ++ ++#undef NETIF_F_LLTX ++ ++#ifndef ARCH_HAS_PREFETCH ++#define prefetch(X) ++#endif ++ ++#ifndef NET_IP_ALIGN ++#define NET_IP_ALIGN 2 ++#endif ++ ++#define KC_USEC_PER_SEC 1000000L ++#define usecs_to_jiffies _kc_usecs_to_jiffies ++static inline unsigned int _kc_jiffies_to_usecs(const unsigned long j) ++{ ++#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ) ++ return (KC_USEC_PER_SEC / HZ) * j; ++#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC) ++ return (j + (HZ / KC_USEC_PER_SEC) - 1)/(HZ / KC_USEC_PER_SEC); ++#else ++ return (j * KC_USEC_PER_SEC) / HZ; ++#endif ++} ++static inline unsigned long _kc_usecs_to_jiffies(const unsigned int m) ++{ ++ if (m > _kc_jiffies_to_usecs(MAX_JIFFY_OFFSET)) ++ return MAX_JIFFY_OFFSET; ++#if HZ <= KC_USEC_PER_SEC && !(KC_USEC_PER_SEC % HZ) ++ return (m + (KC_USEC_PER_SEC / HZ) - 1) / (KC_USEC_PER_SEC / HZ); ++#elif HZ > KC_USEC_PER_SEC && !(HZ % KC_USEC_PER_SEC) ++ return m * (HZ / KC_USEC_PER_SEC); ++#else ++ return (m * HZ + KC_USEC_PER_SEC - 1) / KC_USEC_PER_SEC; ++#endif ++} ++ ++#define PCI_EXP_LNKCAP 12 /* Link Capabilities */ ++#define PCI_EXP_LNKSTA 18 /* Link Status */ ++#define PCI_EXP_SLTCAP 20 /* Slot Capabilities */ ++#define PCI_EXP_SLTCTL 24 /* Slot Control */ ++#define PCI_EXP_SLTSTA 26 /* Slot Status */ ++#define PCI_EXP_RTCTL 28 /* Root Control */ ++#define PCI_EXP_RTCAP 30 /* Root Capabilities */ ++#define PCI_EXP_RTSTA 32 /* Root Status */ ++#endif /* < 2.6.11 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,12) ) ++#include ++#define USE_REBOOT_NOTIFIER ++ ++/* Generic MII registers. */ ++#define MII_CTRL1000 0x09 /* 1000BASE-T control */ ++#define MII_STAT1000 0x0a /* 1000BASE-T status */ ++/* Advertisement control register. */ ++#define ADVERTISE_PAUSE_CAP 0x0400 /* Try for pause */ ++#define ADVERTISE_PAUSE_ASYM 0x0800 /* Try for asymmetric pause */ ++/* Link partner ability register. */ ++#define LPA_PAUSE_CAP 0x0400 /* Can pause */ ++#define LPA_PAUSE_ASYM 0x0800 /* Can pause asymetrically */ ++/* 1000BASE-T Control register */ ++#define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */ ++#define ADVERTISE_1000HALF 0x0100 /* Advertise 1000BASE-T half duplex */ ++/* 1000BASE-T Status register */ ++#define LPA_1000LOCALRXOK 0x2000 /* Link partner local receiver status */ ++#define LPA_1000REMRXOK 0x1000 /* Link partner remote receiver status */ ++ ++#ifndef is_zero_ether_addr ++#define is_zero_ether_addr _kc_is_zero_ether_addr ++static inline int _kc_is_zero_ether_addr(const u8 *addr) ++{ ++ return !(addr[0] | addr[1] | addr[2] | addr[3] | addr[4] | addr[5]); ++} ++#endif /* is_zero_ether_addr */ ++#ifndef is_multicast_ether_addr ++#define is_multicast_ether_addr _kc_is_multicast_ether_addr ++static inline int _kc_is_multicast_ether_addr(const u8 *addr) ++{ ++ return addr[0] & 0x01; ++} ++#endif /* is_multicast_ether_addr */ ++#endif /* < 2.6.12 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,13) ) ++#ifndef kstrdup ++#define kstrdup _kc_kstrdup ++extern char *_kc_kstrdup(const char *s, unsigned int gfp); ++#endif ++#endif /* < 2.6.13 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14) ) ++#define pm_message_t u32 ++#ifndef kzalloc ++#define kzalloc _kc_kzalloc ++extern void *_kc_kzalloc(size_t size, int flags); ++#endif ++ ++/* Generic MII registers. */ ++#define MII_ESTATUS 0x0f /* Extended Status */ ++/* Basic mode status register. */ ++#define BMSR_ESTATEN 0x0100 /* Extended Status in R15 */ ++/* Extended status register. */ ++#define ESTATUS_1000_TFULL 0x2000 /* Can do 1000BT Full */ ++#define ESTATUS_1000_THALF 0x1000 /* Can do 1000BT Half */ ++ ++#define SUPPORTED_Pause (1 << 13) ++#define SUPPORTED_Asym_Pause (1 << 14) ++#define ADVERTISED_Pause (1 << 13) ++#define ADVERTISED_Asym_Pause (1 << 14) ++ ++#if (!(RHEL_RELEASE_CODE && \ ++ (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,3)) && \ ++ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)))) ++#if ((LINUX_VERSION_CODE == KERNEL_VERSION(2,6,9)) && !defined(gfp_t)) ++#define gfp_t unsigned ++#else ++typedef unsigned gfp_t; ++#endif ++#endif /* !RHEL4.3->RHEL5.0 */ ++ ++#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9) ) ++#ifdef CONFIG_X86_64 ++#define dma_sync_single_range_for_cpu(dev, addr, off, sz, dir) \ ++ dma_sync_single_for_cpu((dev), (addr), (off) + (sz), (dir)) ++#define dma_sync_single_range_for_device(dev, addr, off, sz, dir) \ ++ dma_sync_single_for_device((dev), (addr), (off) + (sz), (dir)) ++#endif ++#endif ++#endif /* < 2.6.14 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,15) ) ++#ifndef kfree_rcu ++/* this is placed here due to a lack of rcu_barrier in previous kernels */ ++#define kfree_rcu(_ptr, _offset) kfree(_ptr) ++#endif /* kfree_rcu */ ++#ifndef vmalloc_node ++#define vmalloc_node(a,b) vmalloc(a) ++#endif /* vmalloc_node*/ ++ ++#define setup_timer(_timer, _function, _data) \ ++do { \ ++ (_timer)->function = _function; \ ++ (_timer)->data = _data; \ ++ init_timer(_timer); \ ++} while (0) ++#ifndef device_can_wakeup ++#define device_can_wakeup(dev) (1) ++#endif ++#ifndef device_set_wakeup_enable ++#define device_set_wakeup_enable(dev, val) do{}while(0) ++#endif ++#ifndef device_init_wakeup ++#define device_init_wakeup(dev,val) do {} while (0) ++#endif ++static inline unsigned _kc_compare_ether_addr(const u8 *addr1, const u8 *addr2) ++{ ++ const u16 *a = (const u16 *) addr1; ++ const u16 *b = (const u16 *) addr2; ++ ++ return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | (a[2] ^ b[2])) != 0; ++} ++#undef compare_ether_addr ++#define compare_ether_addr(addr1, addr2) _kc_compare_ether_addr(addr1, addr2) ++#endif /* < 2.6.15 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,16) ) ++#undef DEFINE_MUTEX ++#define DEFINE_MUTEX(x) DECLARE_MUTEX(x) ++#define mutex_lock(x) down_interruptible(x) ++#define mutex_unlock(x) up(x) ++ ++#ifndef ____cacheline_internodealigned_in_smp ++#ifdef CONFIG_SMP ++#define ____cacheline_internodealigned_in_smp ____cacheline_aligned_in_smp ++#else ++#define ____cacheline_internodealigned_in_smp ++#endif /* CONFIG_SMP */ ++#endif /* ____cacheline_internodealigned_in_smp */ ++#undef HAVE_PCI_ERS ++#else /* 2.6.16 and above */ ++#undef HAVE_PCI_ERS ++#define HAVE_PCI_ERS ++#if ( SLE_VERSION_CODE && SLE_VERSION_CODE == SLE_VERSION(10,4,0) ) ++#ifdef device_can_wakeup ++#undef device_can_wakeup ++#endif /* device_can_wakeup */ ++#define device_can_wakeup(dev) 1 ++#endif /* SLE_VERSION(10,4,0) */ ++#endif /* < 2.6.16 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,17) ) ++#ifndef dev_notice ++#define dev_notice(dev, fmt, args...) \ ++ dev_printk(KERN_NOTICE, dev, fmt, ## args) ++#endif ++ ++#ifndef first_online_node ++#define first_online_node 0 ++#endif ++#ifndef NET_SKB_PAD ++#define NET_SKB_PAD 16 ++#endif ++#endif /* < 2.6.17 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18) ) ++ ++#ifndef IRQ_HANDLED ++#define irqreturn_t void ++#define IRQ_HANDLED ++#define IRQ_NONE ++#endif ++ ++#ifndef IRQF_PROBE_SHARED ++#ifdef SA_PROBEIRQ ++#define IRQF_PROBE_SHARED SA_PROBEIRQ ++#else ++#define IRQF_PROBE_SHARED 0 ++#endif ++#endif ++ ++#ifndef IRQF_SHARED ++#define IRQF_SHARED SA_SHIRQ ++#endif ++ ++#ifndef ARRAY_SIZE ++#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0])) ++#endif ++ ++#ifndef FIELD_SIZEOF ++#define FIELD_SIZEOF(t, f) (sizeof(((t*)0)->f)) ++#endif ++ ++#ifndef skb_is_gso ++#ifdef NETIF_F_TSO ++#define skb_is_gso _kc_skb_is_gso ++static inline int _kc_skb_is_gso(const struct sk_buff *skb) ++{ ++ return skb_shinfo(skb)->gso_size; ++} ++#else ++#define skb_is_gso(a) 0 ++#endif ++#endif ++ ++#ifndef resource_size_t ++#define resource_size_t unsigned long ++#endif ++ ++#ifdef skb_pad ++#undef skb_pad ++#endif ++#define skb_pad(x,y) _kc_skb_pad(x, y) ++int _kc_skb_pad(struct sk_buff *skb, int pad); ++#ifdef skb_padto ++#undef skb_padto ++#endif ++#define skb_padto(x,y) _kc_skb_padto(x, y) ++static inline int _kc_skb_padto(struct sk_buff *skb, unsigned int len) ++{ ++ unsigned int size = skb->len; ++ if(likely(size >= len)) ++ return 0; ++ return _kc_skb_pad(skb, len - size); ++} ++ ++#ifndef DECLARE_PCI_UNMAP_ADDR ++#define DECLARE_PCI_UNMAP_ADDR(ADDR_NAME) \ ++ dma_addr_t ADDR_NAME ++#define DECLARE_PCI_UNMAP_LEN(LEN_NAME) \ ++ u32 LEN_NAME ++#define pci_unmap_addr(PTR, ADDR_NAME) \ ++ ((PTR)->ADDR_NAME) ++#define pci_unmap_addr_set(PTR, ADDR_NAME, VAL) \ ++ (((PTR)->ADDR_NAME) = (VAL)) ++#define pci_unmap_len(PTR, LEN_NAME) \ ++ ((PTR)->LEN_NAME) ++#define pci_unmap_len_set(PTR, LEN_NAME, VAL) \ ++ (((PTR)->LEN_NAME) = (VAL)) ++#endif /* DECLARE_PCI_UNMAP_ADDR */ ++#endif /* < 2.6.18 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19) ) ++enum pcie_link_width { ++ PCIE_LNK_WIDTH_RESRV = 0x00, ++ PCIE_LNK_X1 = 0x01, ++ PCIE_LNK_X2 = 0x02, ++ PCIE_LNK_X4 = 0x04, ++ PCIE_LNK_X8 = 0x08, ++ PCIE_LNK_X12 = 0x0C, ++ PCIE_LNK_X16 = 0x10, ++ PCIE_LNK_X32 = 0x20, ++ PCIE_LNK_WIDTH_UNKNOWN = 0xFF, ++}; ++ ++#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,0))) ++#define i_private u.generic_ip ++#endif /* >= RHEL 5.0 */ ++ ++#ifndef DIV_ROUND_UP ++#define DIV_ROUND_UP(n,d) (((n) + (d) - 1) / (d)) ++#endif ++#ifndef __ALIGN_MASK ++#define __ALIGN_MASK(x, mask) (((x) + (mask)) & ~(mask)) ++#endif ++#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,0) ) ++#if (!((RHEL_RELEASE_CODE && \ ++ ((RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(4,4) && \ ++ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0)) || \ ++ (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,0)))))) ++typedef irqreturn_t (*irq_handler_t)(int, void*, struct pt_regs *); ++#endif ++#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) ++#undef CONFIG_INET_LRO ++#undef CONFIG_INET_LRO_MODULE ++#undef CONFIG_FCOE ++#undef CONFIG_FCOE_MODULE ++#endif ++typedef irqreturn_t (*new_handler_t)(int, void*); ++static inline irqreturn_t _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id) ++#else /* 2.4.x */ ++typedef void (*irq_handler_t)(int, void*, struct pt_regs *); ++typedef void (*new_handler_t)(int, void*); ++static inline int _kc_request_irq(unsigned int irq, new_handler_t handler, unsigned long flags, const char *devname, void *dev_id) ++#endif /* >= 2.5.x */ ++{ ++ irq_handler_t new_handler = (irq_handler_t) handler; ++ return request_irq(irq, new_handler, flags, devname, dev_id); ++} ++ ++#undef request_irq ++#define request_irq(irq, handler, flags, devname, dev_id) _kc_request_irq((irq), (handler), (flags), (devname), (dev_id)) ++ ++#define irq_handler_t new_handler_t ++/* pci_restore_state and pci_save_state handles MSI/PCIE from 2.6.19 */ ++#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4))) ++#define PCIE_CONFIG_SPACE_LEN 256 ++#define PCI_CONFIG_SPACE_LEN 64 ++#define PCIE_LINK_STATUS 0x12 ++#define pci_config_space_ich8lan() do {} while(0) ++#undef pci_save_state ++extern int _kc_pci_save_state(struct pci_dev *); ++#define pci_save_state(pdev) _kc_pci_save_state(pdev) ++#undef pci_restore_state ++extern void _kc_pci_restore_state(struct pci_dev *); ++#define pci_restore_state(pdev) _kc_pci_restore_state(pdev) ++#endif /* !(RHEL_RELEASE_CODE >= RHEL 5.4) */ ++ ++#ifdef HAVE_PCI_ERS ++#undef free_netdev ++extern void _kc_free_netdev(struct net_device *); ++#define free_netdev(netdev) _kc_free_netdev(netdev) ++#endif ++static inline int pci_enable_pcie_error_reporting(struct pci_dev __always_unused *dev) ++{ ++ return 0; ++} ++#define pci_disable_pcie_error_reporting(dev) do {} while (0) ++#define pci_cleanup_aer_uncorrect_error_status(dev) do {} while (0) ++ ++extern void *_kc_kmemdup(const void *src, size_t len, unsigned gfp); ++#define kmemdup(src, len, gfp) _kc_kmemdup(src, len, gfp) ++#ifndef bool ++#define bool _Bool ++#define true 1 ++#define false 0 ++#endif ++#else /* 2.6.19 */ ++#include ++#include ++ ++#define NEW_SKB_CSUM_HELP ++#endif /* < 2.6.19 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20) ) ++#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,5,28) ) ++#undef INIT_WORK ++#define INIT_WORK(_work, _func) \ ++do { \ ++ INIT_LIST_HEAD(&(_work)->entry); \ ++ (_work)->pending = 0; \ ++ (_work)->func = (void (*)(void *))_func; \ ++ (_work)->data = _work; \ ++ init_timer(&(_work)->timer); \ ++} while (0) ++#endif ++ ++#ifndef PCI_VDEVICE ++#define PCI_VDEVICE(ven, dev) \ ++ PCI_VENDOR_ID_##ven, (dev), \ ++ PCI_ANY_ID, PCI_ANY_ID, 0, 0 ++#endif ++ ++#ifndef PCI_VENDOR_ID_INTEL ++#define PCI_VENDOR_ID_INTEL 0x8086 ++#endif ++ ++#ifndef round_jiffies ++#define round_jiffies(x) x ++#endif ++ ++#define csum_offset csum ++ ++#define HAVE_EARLY_VMALLOC_NODE ++#define dev_to_node(dev) -1 ++#undef set_dev_node ++/* remove compiler warning with b=b, for unused variable */ ++#define set_dev_node(a, b) do { (b) = (b); } while(0) ++ ++#if (!(RHEL_RELEASE_CODE && \ ++ (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \ ++ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \ ++ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,6)))) && \ ++ !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0))) ++typedef __u16 __bitwise __sum16; ++typedef __u32 __bitwise __wsum; ++#endif ++ ++#if (!(RHEL_RELEASE_CODE && \ ++ (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(4,7)) && \ ++ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(5,0))) || \ ++ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,4)))) && \ ++ !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(10,2,0))) ++static inline __wsum csum_unfold(__sum16 n) ++{ ++ return (__force __wsum)n; ++} ++#endif ++ ++#else /* < 2.6.20 */ ++#define HAVE_DEVICE_NUMA_NODE ++#endif /* < 2.6.20 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) ++#define to_net_dev(class) container_of(class, struct net_device, class_dev) ++#define NETDEV_CLASS_DEV ++#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5))) ++#define vlan_group_get_device(vg, id) (vg->vlan_devices[id]) ++#define vlan_group_set_device(vg, id, dev) \ ++ do { \ ++ if (vg) vg->vlan_devices[id] = dev; \ ++ } while (0) ++#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,5)) */ ++#define pci_channel_offline(pdev) (pdev->error_state && \ ++ pdev->error_state != pci_channel_io_normal) ++#define pci_request_selected_regions(pdev, bars, name) \ ++ pci_request_regions(pdev, name) ++#define pci_release_selected_regions(pdev, bars) pci_release_regions(pdev); ++ ++#ifndef __aligned ++#define __aligned(x) __attribute__((aligned(x))) ++#endif ++ ++extern struct pci_dev *_kc_netdev_to_pdev(struct net_device *netdev); ++#define netdev_to_dev(netdev) \ ++ pci_dev_to_dev(_kc_netdev_to_pdev(netdev)) ++#define devm_kzalloc(dev, size, flags) kzalloc(size, flags) ++#define devm_kfree(dev, p) kfree(p) ++#else /* 2.6.21 */ ++static inline struct device *netdev_to_dev(struct net_device *netdev) ++{ ++ return &netdev->dev; ++} ++ ++#endif /* < 2.6.21 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) ++#define tcp_hdr(skb) (skb->h.th) ++#define tcp_hdrlen(skb) (skb->h.th->doff << 2) ++#define skb_transport_offset(skb) (skb->h.raw - skb->data) ++#define skb_transport_header(skb) (skb->h.raw) ++#define ipv6_hdr(skb) (skb->nh.ipv6h) ++#define ip_hdr(skb) (skb->nh.iph) ++#define skb_network_offset(skb) (skb->nh.raw - skb->data) ++#define skb_network_header(skb) (skb->nh.raw) ++#define skb_tail_pointer(skb) skb->tail ++#define skb_reset_tail_pointer(skb) \ ++ do { \ ++ skb->tail = skb->data; \ ++ } while (0) ++#define skb_set_tail_pointer(skb, offset) \ ++ do { \ ++ skb->tail = skb->data + offset; \ ++ } while (0) ++#define skb_copy_to_linear_data(skb, from, len) \ ++ memcpy(skb->data, from, len) ++#define skb_copy_to_linear_data_offset(skb, offset, from, len) \ ++ memcpy(skb->data + offset, from, len) ++#define skb_network_header_len(skb) (skb->h.raw - skb->nh.raw) ++#define pci_register_driver pci_module_init ++#define skb_mac_header(skb) skb->mac.raw ++ ++#ifdef NETIF_F_MULTI_QUEUE ++#ifndef alloc_etherdev_mq ++#define alloc_etherdev_mq(_a, _b) alloc_etherdev(_a) ++#endif ++#endif /* NETIF_F_MULTI_QUEUE */ ++ ++#ifndef ETH_FCS_LEN ++#define ETH_FCS_LEN 4 ++#endif ++#define cancel_work_sync(x) flush_scheduled_work() ++#ifndef udp_hdr ++#define udp_hdr _udp_hdr ++static inline struct udphdr *_udp_hdr(const struct sk_buff *skb) ++{ ++ return (struct udphdr *)skb_transport_header(skb); ++} ++#endif ++ ++#ifdef cpu_to_be16 ++#undef cpu_to_be16 ++#endif ++#define cpu_to_be16(x) __constant_htons(x) ++ ++#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1))) ++enum { ++ DUMP_PREFIX_NONE, ++ DUMP_PREFIX_ADDRESS, ++ DUMP_PREFIX_OFFSET ++}; ++#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(5,1)) */ ++#ifndef hex_asc ++#define hex_asc(x) "0123456789abcdef"[x] ++#endif ++#include ++extern void _kc_print_hex_dump(const char *level, const char *prefix_str, ++ int prefix_type, int rowsize, int groupsize, ++ const void *buf, size_t len, bool ascii); ++#define print_hex_dump(lvl, s, t, r, g, b, l, a) \ ++ _kc_print_hex_dump(lvl, s, t, r, g, b, l, a) ++#ifndef ADVERTISED_2500baseX_Full ++#define ADVERTISED_2500baseX_Full (1 << 15) ++#endif ++#ifndef SUPPORTED_2500baseX_Full ++#define SUPPORTED_2500baseX_Full (1 << 15) ++#endif ++ ++#ifndef ETH_P_PAUSE ++#define ETH_P_PAUSE 0x8808 ++#endif ++ ++static inline int compound_order(struct page *page) ++{ ++ return 0; ++} ++ ++#ifndef SKB_WITH_OVERHEAD ++#define SKB_WITH_OVERHEAD(X) \ ++ ((X) - SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) ++#endif ++#else /* 2.6.22 */ ++#define ETH_TYPE_TRANS_SETS_DEV ++#define HAVE_NETDEV_STATS_IN_NETDEV ++#endif /* < 2.6.22 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,22) ) ++#undef SET_MODULE_OWNER ++#define SET_MODULE_OWNER(dev) do { } while (0) ++#endif /* > 2.6.22 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23) ) ++#define netif_subqueue_stopped(_a, _b) 0 ++#ifndef PTR_ALIGN ++#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a))) ++#endif ++ ++#ifndef CONFIG_PM_SLEEP ++#define CONFIG_PM_SLEEP CONFIG_PM ++#endif ++ ++#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,13) ) ++#define HAVE_ETHTOOL_GET_PERM_ADDR ++#endif /* 2.6.14 through 2.6.22 */ ++ ++static inline int __kc_skb_cow_head(struct sk_buff *skb, unsigned int headroom) ++{ ++ int delta = 0; ++ ++ if (headroom > (skb->data - skb->head)) ++ delta = headroom - (skb->data - skb->head); ++ ++ if (delta || skb_header_cloned(skb)) ++ return pskb_expand_head(skb, ALIGN(delta, NET_SKB_PAD), 0, ++ GFP_ATOMIC); ++ return 0; ++} ++#define skb_cow_head(s, h) __kc_skb_cow_head((s), (h)) ++#endif /* < 2.6.23 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) ++#ifndef ETH_FLAG_LRO ++#define ETH_FLAG_LRO NETIF_F_LRO ++#endif ++ ++#ifndef ACCESS_ONCE ++#define ACCESS_ONCE(x) (*(volatile typeof(x) *)&(x)) ++#endif ++ ++/* if GRO is supported then the napi struct must already exist */ ++#ifndef NETIF_F_GRO ++/* NAPI API changes in 2.6.24 break everything */ ++struct napi_struct { ++ /* used to look up the real NAPI polling routine */ ++ int (*poll)(struct napi_struct *, int); ++ struct net_device *dev; ++ int weight; ++}; ++#endif ++ ++#ifdef NAPI ++extern int __kc_adapter_clean(struct net_device *, int *); ++/* The following definitions are multi-queue aware, and thus we have a driver ++ * define list which determines which drivers support multiple queues, and ++ * thus need these stronger defines. If a driver does not support multi-queue ++ * functionality, you don't need to add it to this list. ++ */ ++extern struct net_device *napi_to_poll_dev(const struct napi_struct *napi); ++ ++static inline void __kc_mq_netif_napi_add(struct net_device *dev, struct napi_struct *napi, ++ int (*poll)(struct napi_struct *, int), int weight) ++{ ++ struct net_device *poll_dev = napi_to_poll_dev(napi); ++ poll_dev->poll = __kc_adapter_clean; ++ poll_dev->priv = napi; ++ poll_dev->weight = weight; ++ set_bit(__LINK_STATE_RX_SCHED, &poll_dev->state); ++ set_bit(__LINK_STATE_START, &poll_dev->state); ++ dev_hold(poll_dev); ++ napi->poll = poll; ++ napi->weight = weight; ++ napi->dev = dev; ++} ++#define netif_napi_add __kc_mq_netif_napi_add ++ ++static inline void __kc_mq_netif_napi_del(struct napi_struct *napi) ++{ ++ struct net_device *poll_dev = napi_to_poll_dev(napi); ++ WARN_ON(!test_bit(__LINK_STATE_RX_SCHED, &poll_dev->state)); ++ dev_put(poll_dev); ++ memset(poll_dev, 0, sizeof(struct net_device)); ++} ++ ++#define netif_napi_del __kc_mq_netif_napi_del ++ ++static inline bool __kc_mq_napi_schedule_prep(struct napi_struct *napi) ++{ ++ return netif_running(napi->dev) && ++ netif_rx_schedule_prep(napi_to_poll_dev(napi)); ++} ++#define napi_schedule_prep __kc_mq_napi_schedule_prep ++ ++static inline void __kc_mq_napi_schedule(struct napi_struct *napi) ++{ ++ if (napi_schedule_prep(napi)) ++ __netif_rx_schedule(napi_to_poll_dev(napi)); ++} ++#define napi_schedule __kc_mq_napi_schedule ++ ++#define napi_enable(_napi) netif_poll_enable(napi_to_poll_dev(_napi)) ++#define napi_disable(_napi) netif_poll_disable(napi_to_poll_dev(_napi)) ++#ifdef CONFIG_SMP ++static inline void napi_synchronize(const struct napi_struct *n) ++{ ++ struct net_device *dev = napi_to_poll_dev(n); ++ ++ while (test_bit(__LINK_STATE_RX_SCHED, &dev->state)) { ++ /* No hurry. */ ++ msleep(1); ++ } ++} ++#else ++#define napi_synchronize(n) barrier() ++#endif /* CONFIG_SMP */ ++#define __napi_schedule(_napi) __netif_rx_schedule(napi_to_poll_dev(_napi)) ++static inline void _kc_napi_complete(struct napi_struct *napi) ++{ ++#ifdef NETIF_F_GRO ++ napi_gro_flush(napi); ++#endif ++ netif_rx_complete(napi_to_poll_dev(napi)); ++} ++#define napi_complete _kc_napi_complete ++#else /* NAPI */ ++ ++/* The following definitions are only used if we don't support NAPI at all. */ ++ ++static inline __kc_netif_napi_add(struct net_device *dev, struct napi_struct *napi, ++ int (*poll)(struct napi_struct *, int), int weight) ++{ ++ dev->poll = poll; ++ dev->weight = weight; ++ napi->poll = poll; ++ napi->weight = weight; ++ napi->dev = dev; ++} ++#define netif_napi_del(_a) do {} while (0) ++#endif /* NAPI */ ++ ++#undef dev_get_by_name ++#define dev_get_by_name(_a, _b) dev_get_by_name(_b) ++#define __netif_subqueue_stopped(_a, _b) netif_subqueue_stopped(_a, _b) ++#ifndef DMA_BIT_MASK ++#define DMA_BIT_MASK(n) (((n) == 64) ? DMA_64BIT_MASK : ((1ULL<<(n))-1)) ++#endif ++ ++#ifdef NETIF_F_TSO6 ++#define skb_is_gso_v6 _kc_skb_is_gso_v6 ++static inline int _kc_skb_is_gso_v6(const struct sk_buff *skb) ++{ ++ return skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6; ++} ++#endif /* NETIF_F_TSO6 */ ++ ++#ifndef KERN_CONT ++#define KERN_CONT "" ++#endif ++#ifndef pr_err ++#define pr_err(fmt, arg...) \ ++ printk(KERN_ERR fmt, ##arg) ++#endif ++ ++#ifndef rounddown_pow_of_two ++#define rounddown_pow_of_two(n) \ ++ __builtin_constant_p(n) ? ( \ ++ (n == 1) ? 0 : \ ++ (1UL << ilog2(n))) : \ ++ (1UL << (fls_long(n) - 1)) ++#endif ++ ++#ifndef BIT ++#define BIT(nr) (1UL << (nr)) ++#endif ++ ++#else /* < 2.6.24 */ ++#define HAVE_ETHTOOL_GET_SSET_COUNT ++#define HAVE_NETDEV_NAPI_LIST ++#endif /* < 2.6.24 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,24) ) ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) ) ++#define INCLUDE_PM_QOS_PARAMS_H ++#include ++#else /* >= 3.2.0 */ ++#include ++#endif /* else >= 3.2.0 */ ++#endif /* > 2.6.24 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,25) ) ++#define PM_QOS_CPU_DMA_LATENCY 1 ++ ++#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,18) ) ++#include ++#define PM_QOS_DEFAULT_VALUE INFINITE_LATENCY ++#define pm_qos_add_requirement(pm_qos_class, name, value) \ ++ set_acceptable_latency(name, value) ++#define pm_qos_remove_requirement(pm_qos_class, name) \ ++ remove_acceptable_latency(name) ++#define pm_qos_update_requirement(pm_qos_class, name, value) \ ++ modify_acceptable_latency(name, value) ++#else ++#define PM_QOS_DEFAULT_VALUE -1 ++#define pm_qos_add_requirement(pm_qos_class, name, value) ++#define pm_qos_remove_requirement(pm_qos_class, name) ++#define pm_qos_update_requirement(pm_qos_class, name, value) { \ ++ if (value != PM_QOS_DEFAULT_VALUE) { \ ++ printk(KERN_WARNING "%s: unable to set PM QoS requirement\n", \ ++ pci_name(adapter->pdev)); \ ++ } \ ++} ++ ++#endif /* > 2.6.18 */ ++ ++#define pci_enable_device_mem(pdev) pci_enable_device(pdev) ++ ++#ifndef DEFINE_PCI_DEVICE_TABLE ++#define DEFINE_PCI_DEVICE_TABLE(_table) struct pci_device_id _table[] ++#endif /* DEFINE_PCI_DEVICE_TABLE */ ++ ++#ifndef strict_strtol ++#define strict_strtol(s, b, r) _kc_strict_strtol(s, b, r) ++static inline int _kc_strict_strtol(const char *buf, unsigned int base, long *res) ++{ ++ /* adapted from strict_strtoul() in 2.6.25 */ ++ char *tail; ++ long val; ++ size_t len; ++ ++ *res = 0; ++ len = strlen(buf); ++ if (!len) ++ return -EINVAL; ++ val = simple_strtol(buf, &tail, base); ++ if (tail == buf) ++ return -EINVAL; ++ if ((*tail == '\0') || ++ ((len == (size_t)(tail - buf) + 1) && (*tail == '\n'))) { ++ *res = val; ++ return 0; ++ } ++ ++ return -EINVAL; ++} ++#endif ++ ++#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) ++#ifndef IXGBE_PROCFS ++#define IXGBE_PROCFS ++#endif /* IXGBE_PROCFS */ ++#endif /* >= 2.6.0 */ ++ ++#else /* < 2.6.25 */ ++ ++#ifndef IXGBE_SYSFS ++#define IXGBE_SYSFS ++#endif /* IXGBE_SYSFS */ ++#if IS_ENABLED(CONFIG_HWMON) ++#ifndef IXGBE_HWMON ++#define IXGBE_HWMON ++#endif /* IXGBE_HWMON */ ++#endif /* CONFIG_HWMON */ ++ ++#endif /* < 2.6.25 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) ) ++#ifndef clamp_t ++#define clamp_t(type, val, min, max) ({ \ ++ type __val = (val); \ ++ type __min = (min); \ ++ type __max = (max); \ ++ __val = __val < __min ? __min : __val; \ ++ __val > __max ? __max : __val; }) ++#endif /* clamp_t */ ++#undef kzalloc_node ++#define kzalloc_node(_size, _flags, _node) kzalloc(_size, _flags) ++ ++extern void _kc_pci_disable_link_state(struct pci_dev *dev, int state); ++#define pci_disable_link_state(p, s) _kc_pci_disable_link_state(p, s) ++#else /* < 2.6.26 */ ++#define NETDEV_CAN_SET_GSO_MAX_SIZE ++#include ++#define HAVE_NETDEV_VLAN_FEATURES ++#ifndef PCI_EXP_LNKCAP_ASPMS ++#define PCI_EXP_LNKCAP_ASPMS 0x00000c00 /* ASPM Support */ ++#endif /* PCI_EXP_LNKCAP_ASPMS */ ++#endif /* < 2.6.26 */ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,27) ) ++static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep, ++ __u32 speed) ++{ ++ ep->speed = (__u16)speed; ++ /* ep->speed_hi = (__u16)(speed >> 16); */ ++} ++#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set ++ ++static inline __u32 _kc_ethtool_cmd_speed(struct ethtool_cmd *ep) ++{ ++ /* no speed_hi before 2.6.27, and probably no need for it yet */ ++ return (__u32)ep->speed; ++} ++#define ethtool_cmd_speed _kc_ethtool_cmd_speed ++ ++#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,15) ) ++#if ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,23)) && defined(CONFIG_PM)) ++#define ANCIENT_PM 1 ++#elif ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,23)) && \ ++ (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26)) && \ ++ defined(CONFIG_PM_SLEEP)) ++#define NEWER_PM 1 ++#endif ++#if defined(ANCIENT_PM) || defined(NEWER_PM) ++#undef device_set_wakeup_enable ++#define device_set_wakeup_enable(dev, val) \ ++ do { \ ++ u16 pmc = 0; \ ++ int pm = pci_find_capability(adapter->pdev, PCI_CAP_ID_PM); \ ++ if (pm) { \ ++ pci_read_config_word(adapter->pdev, pm + PCI_PM_PMC, \ ++ &pmc); \ ++ } \ ++ (dev)->power.can_wakeup = !!(pmc >> 11); \ ++ (dev)->power.should_wakeup = (val && (pmc >> 11)); \ ++ } while (0) ++#endif /* 2.6.15-2.6.22 and CONFIG_PM or 2.6.23-2.6.25 and CONFIG_PM_SLEEP */ ++#endif /* 2.6.15 through 2.6.27 */ ++#ifndef netif_napi_del ++#define netif_napi_del(_a) do {} while (0) ++#ifdef NAPI ++#ifdef CONFIG_NETPOLL ++#undef netif_napi_del ++#define netif_napi_del(_a) list_del(&(_a)->dev_list); ++#endif ++#endif ++#endif /* netif_napi_del */ ++#ifdef dma_mapping_error ++#undef dma_mapping_error ++#endif ++#define dma_mapping_error(dev, dma_addr) pci_dma_mapping_error(dma_addr) ++ ++#ifdef CONFIG_NETDEVICES_MULTIQUEUE ++#define HAVE_TX_MQ ++#endif ++ ++#ifndef DMA_ATTR_WEAK_ORDERING ++#define DMA_ATTR_WEAK_ORDERING 0 ++#endif ++ ++#ifdef HAVE_TX_MQ ++extern void _kc_netif_tx_stop_all_queues(struct net_device *); ++extern void _kc_netif_tx_wake_all_queues(struct net_device *); ++extern void _kc_netif_tx_start_all_queues(struct net_device *); ++#define netif_tx_stop_all_queues(a) _kc_netif_tx_stop_all_queues(a) ++#define netif_tx_wake_all_queues(a) _kc_netif_tx_wake_all_queues(a) ++#define netif_tx_start_all_queues(a) _kc_netif_tx_start_all_queues(a) ++#undef netif_stop_subqueue ++#define netif_stop_subqueue(_ndev,_qi) do { \ ++ if (netif_is_multiqueue((_ndev))) \ ++ netif_stop_subqueue((_ndev), (_qi)); \ ++ else \ ++ netif_stop_queue((_ndev)); \ ++ } while (0) ++#undef netif_start_subqueue ++#define netif_start_subqueue(_ndev,_qi) do { \ ++ if (netif_is_multiqueue((_ndev))) \ ++ netif_start_subqueue((_ndev), (_qi)); \ ++ else \ ++ netif_start_queue((_ndev)); \ ++ } while (0) ++#else /* HAVE_TX_MQ */ ++#define netif_tx_stop_all_queues(a) netif_stop_queue(a) ++#define netif_tx_wake_all_queues(a) netif_wake_queue(a) ++#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,12) ) ++#define netif_tx_start_all_queues(a) netif_start_queue(a) ++#else ++#define netif_tx_start_all_queues(a) do {} while (0) ++#endif ++#define netif_stop_subqueue(_ndev,_qi) netif_stop_queue((_ndev)) ++#define netif_start_subqueue(_ndev,_qi) netif_start_queue((_ndev)) ++#endif /* HAVE_TX_MQ */ ++#ifndef NETIF_F_MULTI_QUEUE ++#define NETIF_F_MULTI_QUEUE 0 ++#define netif_is_multiqueue(a) 0 ++#define netif_wake_subqueue(a, b) ++#endif /* NETIF_F_MULTI_QUEUE */ ++ ++#ifndef __WARN_printf ++extern void __kc_warn_slowpath(const char *file, const int line, ++ const char *fmt, ...) __attribute__((format(printf, 3, 4))); ++#define __WARN_printf(arg...) __kc_warn_slowpath(__FILE__, __LINE__, arg) ++#endif /* __WARN_printf */ ++ ++#ifndef WARN ++#define WARN(condition, format...) ({ \ ++ int __ret_warn_on = !!(condition); \ ++ if (unlikely(__ret_warn_on)) \ ++ __WARN_printf(format); \ ++ unlikely(__ret_warn_on); \ ++}) ++#endif /* WARN */ ++#undef HAVE_IXGBE_DEBUG_FS ++#undef HAVE_IGB_DEBUG_FS ++#else /* < 2.6.27 */ ++#define ethtool_cmd_speed_set _kc_ethtool_cmd_speed_set ++static inline void _kc_ethtool_cmd_speed_set(struct ethtool_cmd *ep, ++ __u32 speed) ++{ ++ ep->speed = (__u16)(speed & 0xFFFF); ++ ep->speed_hi = (__u16)(speed >> 16); ++} ++#define HAVE_TX_MQ ++#define HAVE_NETDEV_SELECT_QUEUE ++#ifdef CONFIG_DEBUG_FS ++#define HAVE_IXGBE_DEBUG_FS ++#define HAVE_IGB_DEBUG_FS ++#endif /* CONFIG_DEBUG_FS */ ++#endif /* < 2.6.27 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,28) ) ++#define pci_ioremap_bar(pdev, bar) ioremap(pci_resource_start(pdev, bar), \ ++ pci_resource_len(pdev, bar)) ++#define pci_wake_from_d3 _kc_pci_wake_from_d3 ++#define pci_prepare_to_sleep _kc_pci_prepare_to_sleep ++extern int _kc_pci_wake_from_d3(struct pci_dev *dev, bool enable); ++extern int _kc_pci_prepare_to_sleep(struct pci_dev *dev); ++#define netdev_alloc_page(a) alloc_page(GFP_ATOMIC) ++#ifndef __skb_queue_head_init ++static inline void __kc_skb_queue_head_init(struct sk_buff_head *list) ++{ ++ list->prev = list->next = (struct sk_buff *)list; ++ list->qlen = 0; ++} ++#define __skb_queue_head_init(_q) __kc_skb_queue_head_init(_q) ++#endif ++ ++#define PCI_EXP_DEVCAP2 36 /* Device Capabilities 2 */ ++#define PCI_EXP_DEVCTL2 40 /* Device Control 2 */ ++ ++#define PCI_EXP_DEVCAP_FLR 0x10000000 /* Function Level Reset */ ++#define PCI_EXP_DEVCTL_BCR_FLR 0x8000 /* Bridge Configuration Retry / FLR */ ++ ++#endif /* < 2.6.28 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,29) ) ++#ifndef swap ++#define swap(a, b) \ ++ do { typeof(a) __tmp = (a); (a) = (b); (b) = __tmp; } while (0) ++#endif ++#define pci_request_selected_regions_exclusive(pdev, bars, name) \ ++ pci_request_selected_regions(pdev, bars, name) ++#ifndef CONFIG_NR_CPUS ++#define CONFIG_NR_CPUS 1 ++#endif /* CONFIG_NR_CPUS */ ++#ifndef pcie_aspm_enabled ++#define pcie_aspm_enabled() (1) ++#endif /* pcie_aspm_enabled */ ++ ++#define PCI_EXP_SLTSTA_PDS 0x0040 /* Presence Detect State */ ++ ++#ifndef PCI_EXP_LNKSTA_CLS ++#define PCI_EXP_LNKSTA_CLS 0x000f /* Current Link Speed */ ++#endif ++#ifndef PCI_EXP_LNKSTA_NLW ++#define PCI_EXP_LNKSTA_NLW 0x03f0 /* Negotiated Link Width */ ++#endif ++ ++#ifndef pci_clear_master ++extern void _kc_pci_clear_master(struct pci_dev *dev); ++#define pci_clear_master(dev) _kc_pci_clear_master(dev) ++#endif ++ ++#ifndef PCI_EXP_LNKCTL_ASPMC ++#define PCI_EXP_LNKCTL_ASPMC 0x0003 /* ASPM Control */ ++#endif ++#else /* < 2.6.29 */ ++#ifndef HAVE_NET_DEVICE_OPS ++#define HAVE_NET_DEVICE_OPS ++#endif ++#ifdef CONFIG_DCB ++#define HAVE_PFC_MODE_ENABLE ++#endif /* CONFIG_DCB */ ++#endif /* < 2.6.29 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30) ) ++#define NO_PTP_SUPPORT ++#define skb_rx_queue_recorded(a) false ++#define skb_get_rx_queue(a) 0 ++#define skb_record_rx_queue(a, b) do {} while (0) ++#define skb_tx_hash(n, s) ___kc_skb_tx_hash((n), (s), (n)->real_num_tx_queues) ++#undef CONFIG_FCOE ++#undef CONFIG_FCOE_MODULE ++#ifndef CONFIG_PCI_IOV ++#undef pci_enable_sriov ++#define pci_enable_sriov(a, b) -ENOTSUPP ++#undef pci_disable_sriov ++#define pci_disable_sriov(a) do {} while (0) ++#endif /* CONFIG_PCI_IOV */ ++#ifndef pr_cont ++#define pr_cont(fmt, ...) \ ++ printk(KERN_CONT fmt, ##__VA_ARGS__) ++#endif /* pr_cont */ ++static inline void _kc_synchronize_irq(unsigned int a) ++{ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,5,28) ) ++ synchronize_irq(); ++#else /* < 2.5.28 */ ++ synchronize_irq(a); ++#endif /* < 2.5.28 */ ++} ++#undef synchronize_irq ++#define synchronize_irq(a) _kc_synchronize_irq(a) ++ ++#define PCI_EXP_LNKCTL2 48 /* Link Control 2 */ ++ ++#ifdef nr_cpus_node ++#undef nr_cpus_node ++#define nr_cpus_node(node) cpumask_weight(cpumask_of_node(node)) ++#endif ++ ++#else /* < 2.6.30 */ ++#define HAVE_ASPM_QUIRKS ++#endif /* < 2.6.30 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,31) ) ++#define ETH_P_1588 0x88F7 ++#define ETH_P_FIP 0x8914 ++#ifndef netdev_uc_count ++#define netdev_uc_count(dev) ((dev)->uc_count) ++#endif ++#ifndef netdev_for_each_uc_addr ++#define netdev_for_each_uc_addr(uclist, dev) \ ++ for (uclist = dev->uc_list; uclist; uclist = uclist->next) ++#endif ++#ifndef PORT_OTHER ++#define PORT_OTHER 0xff ++#endif ++#ifndef MDIO_PHY_ID_PRTAD ++#define MDIO_PHY_ID_PRTAD 0x03e0 ++#endif ++#ifndef MDIO_PHY_ID_DEVAD ++#define MDIO_PHY_ID_DEVAD 0x001f ++#endif ++#ifndef skb_dst ++#define skb_dst(s) ((s)->dst) ++#endif ++ ++#ifndef SUPPORTED_1000baseKX_Full ++#define SUPPORTED_1000baseKX_Full (1 << 17) ++#endif ++#ifndef SUPPORTED_10000baseKX4_Full ++#define SUPPORTED_10000baseKX4_Full (1 << 18) ++#endif ++#ifndef SUPPORTED_10000baseKR_Full ++#define SUPPORTED_10000baseKR_Full (1 << 19) ++#endif ++ ++#ifndef ADVERTISED_1000baseKX_Full ++#define ADVERTISED_1000baseKX_Full (1 << 17) ++#endif ++#ifndef ADVERTISED_10000baseKX4_Full ++#define ADVERTISED_10000baseKX4_Full (1 << 18) ++#endif ++#ifndef ADVERTISED_10000baseKR_Full ++#define ADVERTISED_10000baseKR_Full (1 << 19) ++#endif ++ ++static inline unsigned long dev_trans_start(struct net_device *dev) ++{ ++ return dev->trans_start; ++} ++#else /* < 2.6.31 */ ++#ifndef HAVE_NETDEV_STORAGE_ADDRESS ++#define HAVE_NETDEV_STORAGE_ADDRESS ++#endif ++#ifndef HAVE_NETDEV_HW_ADDR ++#define HAVE_NETDEV_HW_ADDR ++#endif ++#ifndef HAVE_TRANS_START_IN_QUEUE ++#define HAVE_TRANS_START_IN_QUEUE ++#endif ++#ifndef HAVE_INCLUDE_LINUX_MDIO_H ++#define HAVE_INCLUDE_LINUX_MDIO_H ++#endif ++#include ++#endif /* < 2.6.31 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32) ) ++#undef netdev_tx_t ++#define netdev_tx_t int ++#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) ++#ifndef NETIF_F_FCOE_MTU ++#define NETIF_F_FCOE_MTU (1 << 26) ++#endif ++#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ ++ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) ++static inline int _kc_pm_runtime_get_sync() ++{ ++ return 1; ++} ++#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync() ++#else /* 2.6.0 => 2.6.32 */ ++static inline int _kc_pm_runtime_get_sync(struct device __always_unused *dev) ++{ ++ return 1; ++} ++#ifndef pm_runtime_get_sync ++#define pm_runtime_get_sync(dev) _kc_pm_runtime_get_sync(dev) ++#endif ++#endif /* 2.6.0 => 2.6.32 */ ++#ifndef pm_runtime_put ++#define pm_runtime_put(dev) do {} while (0) ++#endif ++#ifndef pm_runtime_put_sync ++#define pm_runtime_put_sync(dev) do {} while (0) ++#endif ++#ifndef pm_runtime_resume ++#define pm_runtime_resume(dev) do {} while (0) ++#endif ++#ifndef pm_schedule_suspend ++#define pm_schedule_suspend(dev, t) do {} while (0) ++#endif ++#ifndef pm_runtime_set_suspended ++#define pm_runtime_set_suspended(dev) do {} while (0) ++#endif ++#ifndef pm_runtime_disable ++#define pm_runtime_disable(dev) do {} while (0) ++#endif ++#ifndef pm_runtime_put_noidle ++#define pm_runtime_put_noidle(dev) do {} while (0) ++#endif ++#ifndef pm_runtime_set_active ++#define pm_runtime_set_active(dev) do {} while (0) ++#endif ++#ifndef pm_runtime_enable ++#define pm_runtime_enable(dev) do {} while (0) ++#endif ++#ifndef pm_runtime_get_noresume ++#define pm_runtime_get_noresume(dev) do {} while (0) ++#endif ++#else /* < 2.6.32 */ ++#if (RHEL_RELEASE_CODE && \ ++ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)) && \ ++ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) ++#define HAVE_RHEL6_NET_DEVICE_EXTENDED ++#endif /* RHEL >= 6.2 && RHEL < 7.0 */ ++#if (RHEL_RELEASE_CODE && \ ++ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) && \ ++ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) ++#define HAVE_RHEL6_NET_DEVICE_OPS_EXT ++#define HAVE_NDO_SET_FEATURES ++#endif /* RHEL >= 6.6 && RHEL < 7.0 */ ++#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) ++#ifndef HAVE_NETDEV_OPS_FCOE_ENABLE ++#define HAVE_NETDEV_OPS_FCOE_ENABLE ++#endif ++#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ ++#ifdef CONFIG_DCB ++#ifndef HAVE_DCBNL_OPS_GETAPP ++#define HAVE_DCBNL_OPS_GETAPP ++#endif ++#endif /* CONFIG_DCB */ ++#include ++/* IOV bad DMA target work arounds require at least this kernel rev support */ ++#define HAVE_PCIE_TYPE ++#endif /* < 2.6.32 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,33) ) ++#ifndef pci_pcie_cap ++#define pci_pcie_cap(pdev) pci_find_capability(pdev, PCI_CAP_ID_EXP) ++#endif ++#ifndef IPV4_FLOW ++#define IPV4_FLOW 0x10 ++#endif /* IPV4_FLOW */ ++#ifndef IPV6_FLOW ++#define IPV6_FLOW 0x11 ++#endif /* IPV6_FLOW */ ++/* Features back-ported to RHEL6 or SLES11 SP1 after 2.6.32 */ ++#if ( (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) || \ ++ (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,1,0)) ) ++#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) ++#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN ++#define HAVE_NETDEV_OPS_FCOE_GETWWN ++#endif ++#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ ++#endif /* RHEL6 or SLES11 SP1 */ ++#ifndef __percpu ++#define __percpu ++#endif /* __percpu */ ++ ++#ifndef PORT_DA ++#define PORT_DA PORT_OTHER ++#endif /* PORT_DA */ ++#ifndef PORT_NONE ++#define PORT_NONE PORT_OTHER ++#endif ++ ++#if ((RHEL_RELEASE_CODE && \ ++ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) && \ ++ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))) ++#if !defined(CONFIG_X86_32) && !defined(CONFIG_NEED_DMA_MAP_STATE) ++#undef DEFINE_DMA_UNMAP_ADDR ++#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME ++#undef DEFINE_DMA_UNMAP_LEN ++#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME ++#undef dma_unmap_addr ++#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) ++#undef dma_unmap_addr_set ++#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) ++#undef dma_unmap_len ++#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) ++#undef dma_unmap_len_set ++#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) ++#endif /* CONFIG_X86_64 && !CONFIG_NEED_DMA_MAP_STATE */ ++#endif /* RHEL_RELEASE_CODE */ ++ ++#if (!(RHEL_RELEASE_CODE && \ ++ (((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,8)) && \ ++ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0))) || \ ++ ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1)) && \ ++ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))))) ++static inline bool pci_is_pcie(struct pci_dev *dev) ++{ ++ return !!pci_pcie_cap(dev); ++} ++#endif /* RHEL_RELEASE_CODE */ ++ ++#if (!(RHEL_RELEASE_CODE && \ ++ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)))) ++#define sk_tx_queue_get(_sk) (-1) ++#define sk_tx_queue_set(_sk, _tx_queue) do {} while(0) ++#endif /* !(RHEL >= 6.2) */ ++ ++#if (RHEL_RELEASE_CODE && \ ++ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ ++ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) ++#define HAVE_RHEL6_ETHTOOL_OPS_EXT_STRUCT ++#define HAVE_ETHTOOL_GRXFHINDIR_SIZE ++#define HAVE_ETHTOOL_SET_PHYS_ID ++#define HAVE_ETHTOOL_GET_TS_INFO ++#if (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,5)) ++#define HAVE_ETHTOOL_GSRSSH ++#define HAVE_RHEL6_SRIOV_CONFIGURE ++#define HAVE_RXFH_NONCONST ++#endif /* RHEL > 6.5 */ ++#endif /* RHEL >= 6.4 && RHEL < 7.0 */ ++ ++#else /* < 2.6.33 */ ++#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) ++#ifndef HAVE_NETDEV_OPS_FCOE_GETWWN ++#define HAVE_NETDEV_OPS_FCOE_GETWWN ++#endif ++#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ ++#endif /* < 2.6.33 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,34) ) ++#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,0)) ++#ifndef pci_num_vf ++#define pci_num_vf(pdev) _kc_pci_num_vf(pdev) ++extern int _kc_pci_num_vf(struct pci_dev *dev); ++#endif ++#endif /* RHEL_RELEASE_CODE */ ++ ++#ifndef dev_is_pci ++#define dev_is_pci(d) ((d)->bus == &pci_bus_type) ++#endif ++ ++#ifndef ETH_FLAG_NTUPLE ++#define ETH_FLAG_NTUPLE NETIF_F_NTUPLE ++#endif ++ ++#ifndef netdev_mc_count ++#define netdev_mc_count(dev) ((dev)->mc_count) ++#endif ++#ifndef netdev_mc_empty ++#define netdev_mc_empty(dev) (netdev_mc_count(dev) == 0) ++#endif ++#ifndef netdev_for_each_mc_addr ++#define netdev_for_each_mc_addr(mclist, dev) \ ++ for (mclist = dev->mc_list; mclist; mclist = mclist->next) ++#endif ++#ifndef netdev_uc_count ++#define netdev_uc_count(dev) ((dev)->uc.count) ++#endif ++#ifndef netdev_uc_empty ++#define netdev_uc_empty(dev) (netdev_uc_count(dev) == 0) ++#endif ++#ifndef netdev_for_each_uc_addr ++#define netdev_for_each_uc_addr(ha, dev) \ ++ list_for_each_entry(ha, &dev->uc.list, list) ++#endif ++#ifndef dma_set_coherent_mask ++#define dma_set_coherent_mask(dev,mask) \ ++ pci_set_consistent_dma_mask(to_pci_dev(dev),(mask)) ++#endif ++#ifndef pci_dev_run_wake ++#define pci_dev_run_wake(pdev) (0) ++#endif ++ ++/* netdev logging taken from include/linux/netdevice.h */ ++#ifndef netdev_name ++static inline const char *_kc_netdev_name(const struct net_device *dev) ++{ ++ if (dev->reg_state != NETREG_REGISTERED) ++ return "(unregistered net_device)"; ++ return dev->name; ++} ++#define netdev_name(netdev) _kc_netdev_name(netdev) ++#endif /* netdev_name */ ++ ++#undef netdev_printk ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) ++#define netdev_printk(level, netdev, format, args...) \ ++do { \ ++ struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \ ++ printk(level "%s: " format, pci_name(pdev), ##args); \ ++} while(0) ++#elif ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21) ) ++#define netdev_printk(level, netdev, format, args...) \ ++do { \ ++ struct pci_dev *pdev = _kc_netdev_to_pdev(netdev); \ ++ struct device *dev = pci_dev_to_dev(pdev); \ ++ dev_printk(level, dev, "%s: " format, \ ++ netdev_name(netdev), ##args); \ ++} while(0) ++#else /* 2.6.21 => 2.6.34 */ ++#define netdev_printk(level, netdev, format, args...) \ ++ dev_printk(level, (netdev)->dev.parent, \ ++ "%s: " format, \ ++ netdev_name(netdev), ##args) ++#endif /* <2.6.0 <2.6.21 <2.6.34 */ ++#undef netdev_emerg ++#define netdev_emerg(dev, format, args...) \ ++ netdev_printk(KERN_EMERG, dev, format, ##args) ++#undef netdev_alert ++#define netdev_alert(dev, format, args...) \ ++ netdev_printk(KERN_ALERT, dev, format, ##args) ++#undef netdev_crit ++#define netdev_crit(dev, format, args...) \ ++ netdev_printk(KERN_CRIT, dev, format, ##args) ++#undef netdev_err ++#define netdev_err(dev, format, args...) \ ++ netdev_printk(KERN_ERR, dev, format, ##args) ++#undef netdev_warn ++#define netdev_warn(dev, format, args...) \ ++ netdev_printk(KERN_WARNING, dev, format, ##args) ++#undef netdev_notice ++#define netdev_notice(dev, format, args...) \ ++ netdev_printk(KERN_NOTICE, dev, format, ##args) ++#undef netdev_info ++#define netdev_info(dev, format, args...) \ ++ netdev_printk(KERN_INFO, dev, format, ##args) ++#undef netdev_dbg ++#if defined(DEBUG) ++#define netdev_dbg(__dev, format, args...) \ ++ netdev_printk(KERN_DEBUG, __dev, format, ##args) ++#elif defined(CONFIG_DYNAMIC_DEBUG) ++#define netdev_dbg(__dev, format, args...) \ ++do { \ ++ dynamic_dev_dbg((__dev)->dev.parent, "%s: " format, \ ++ netdev_name(__dev), ##args); \ ++} while (0) ++#else /* DEBUG */ ++#define netdev_dbg(__dev, format, args...) \ ++({ \ ++ if (0) \ ++ netdev_printk(KERN_DEBUG, __dev, format, ##args); \ ++ 0; \ ++}) ++#endif /* DEBUG */ ++ ++#undef netif_printk ++#define netif_printk(priv, type, level, dev, fmt, args...) \ ++do { \ ++ if (netif_msg_##type(priv)) \ ++ netdev_printk(level, (dev), fmt, ##args); \ ++} while (0) ++ ++#undef netif_emerg ++#define netif_emerg(priv, type, dev, fmt, args...) \ ++ netif_level(emerg, priv, type, dev, fmt, ##args) ++#undef netif_alert ++#define netif_alert(priv, type, dev, fmt, args...) \ ++ netif_level(alert, priv, type, dev, fmt, ##args) ++#undef netif_crit ++#define netif_crit(priv, type, dev, fmt, args...) \ ++ netif_level(crit, priv, type, dev, fmt, ##args) ++#undef netif_err ++#define netif_err(priv, type, dev, fmt, args...) \ ++ netif_level(err, priv, type, dev, fmt, ##args) ++#undef netif_warn ++#define netif_warn(priv, type, dev, fmt, args...) \ ++ netif_level(warn, priv, type, dev, fmt, ##args) ++#undef netif_notice ++#define netif_notice(priv, type, dev, fmt, args...) \ ++ netif_level(notice, priv, type, dev, fmt, ##args) ++#undef netif_info ++#define netif_info(priv, type, dev, fmt, args...) \ ++ netif_level(info, priv, type, dev, fmt, ##args) ++#undef netif_dbg ++#define netif_dbg(priv, type, dev, fmt, args...) \ ++ netif_level(dbg, priv, type, dev, fmt, ##args) ++ ++#ifdef SET_SYSTEM_SLEEP_PM_OPS ++#define HAVE_SYSTEM_SLEEP_PM_OPS ++#endif ++ ++#ifndef for_each_set_bit ++#define for_each_set_bit(bit, addr, size) \ ++ for ((bit) = find_first_bit((addr), (size)); \ ++ (bit) < (size); \ ++ (bit) = find_next_bit((addr), (size), (bit) + 1)) ++#endif /* for_each_set_bit */ ++ ++#ifndef DEFINE_DMA_UNMAP_ADDR ++#define DEFINE_DMA_UNMAP_ADDR DECLARE_PCI_UNMAP_ADDR ++#define DEFINE_DMA_UNMAP_LEN DECLARE_PCI_UNMAP_LEN ++#define dma_unmap_addr pci_unmap_addr ++#define dma_unmap_addr_set pci_unmap_addr_set ++#define dma_unmap_len pci_unmap_len ++#define dma_unmap_len_set pci_unmap_len_set ++#endif /* DEFINE_DMA_UNMAP_ADDR */ ++ ++#if (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(6,3)) ++#ifdef IGB_HWMON ++#ifdef CONFIG_DEBUG_LOCK_ALLOC ++#define sysfs_attr_init(attr) \ ++ do { \ ++ static struct lock_class_key __key; \ ++ (attr)->key = &__key; \ ++ } while (0) ++#else ++#define sysfs_attr_init(attr) do {} while (0) ++#endif /* CONFIG_DEBUG_LOCK_ALLOC */ ++#endif /* IGB_HWMON */ ++#endif /* RHEL_RELEASE_CODE */ ++ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,0) ) ++static inline bool _kc_pm_runtime_suspended() ++{ ++ return false; ++} ++#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended() ++#else /* 2.6.0 => 2.6.34 */ ++static inline bool _kc_pm_runtime_suspended(struct device __always_unused *dev) ++{ ++ return false; ++} ++#ifndef pm_runtime_suspended ++#define pm_runtime_suspended(dev) _kc_pm_runtime_suspended(dev) ++#endif ++#endif /* 2.6.0 => 2.6.34 */ ++ ++#ifndef pci_bus_speed ++/* override pci_bus_speed introduced in 2.6.19 with an expanded enum type */ ++enum _kc_pci_bus_speed { ++ _KC_PCIE_SPEED_2_5GT = 0x14, ++ _KC_PCIE_SPEED_5_0GT = 0x15, ++ _KC_PCIE_SPEED_8_0GT = 0x16, ++ _KC_PCI_SPEED_UNKNOWN = 0xff, ++}; ++#define pci_bus_speed _kc_pci_bus_speed ++#define PCIE_SPEED_2_5GT _KC_PCIE_SPEED_2_5GT ++#define PCIE_SPEED_5_0GT _KC_PCIE_SPEED_5_0GT ++#define PCIE_SPEED_8_0GT _KC_PCIE_SPEED_8_0GT ++#define PCI_SPEED_UNKNOWN _KC_PCI_SPEED_UNKNOWN ++#endif /* pci_bus_speed */ ++ ++#else /* < 2.6.34 */ ++#define HAVE_SYSTEM_SLEEP_PM_OPS ++#ifndef HAVE_SET_RX_MODE ++#define HAVE_SET_RX_MODE ++#endif ++ ++#endif /* < 2.6.34 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) ) ++ssize_t _kc_simple_write_to_buffer(void *to, size_t available, loff_t *ppos, ++ const void __user *from, size_t count); ++#define simple_write_to_buffer _kc_simple_write_to_buffer ++ ++#ifndef PCI_EXP_LNKSTA_NLW_SHIFT ++#define PCI_EXP_LNKSTA_NLW_SHIFT 4 ++#endif ++ ++#ifndef numa_node_id ++#define numa_node_id() 0 ++#endif ++#ifndef numa_mem_id ++#define numa_mem_id numa_node_id ++#endif ++#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0))) ++#ifdef HAVE_TX_MQ ++#include ++#ifndef CONFIG_NETDEVICES_MULTIQUEUE ++int _kc_netif_set_real_num_tx_queues(struct net_device *, unsigned int); ++#else /* CONFIG_NETDEVICES_MULTI_QUEUE */ ++static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev, ++ unsigned int txq) ++{ ++ dev->egress_subqueue_count = txq; ++ return 0; ++} ++#endif /* CONFIG_NETDEVICES_MULTI_QUEUE */ ++#else /* HAVE_TX_MQ */ ++static inline int _kc_netif_set_real_num_tx_queues(struct net_device __always_unused *dev, ++ unsigned int __always_unused txq) ++{ ++ return 0; ++} ++#endif /* HAVE_TX_MQ */ ++#define netif_set_real_num_tx_queues(dev, txq) \ ++ _kc_netif_set_real_num_tx_queues(dev, txq) ++#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) */ ++#ifndef ETH_FLAG_RXHASH ++#define ETH_FLAG_RXHASH (1<<28) ++#endif /* ETH_FLAG_RXHASH */ ++#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,0)) ++#define HAVE_IRQ_AFFINITY_HINT ++#endif ++struct device_node; ++#else /* < 2.6.35 */ ++#define HAVE_STRUCT_DEVICE_OF_NODE ++#define HAVE_PM_QOS_REQUEST_LIST ++#define HAVE_IRQ_AFFINITY_HINT ++#include ++#endif /* < 2.6.35 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) ++extern int _kc_ethtool_op_set_flags(struct net_device *, u32, u32); ++#define ethtool_op_set_flags _kc_ethtool_op_set_flags ++extern u32 _kc_ethtool_op_get_flags(struct net_device *); ++#define ethtool_op_get_flags _kc_ethtool_op_get_flags ++ ++enum { ++ WQ_UNBOUND = 0, ++ WQ_RESCUER = 0, ++}; ++ ++#ifdef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS ++#ifdef NET_IP_ALIGN ++#undef NET_IP_ALIGN ++#endif ++#define NET_IP_ALIGN 0 ++#endif /* CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS */ ++ ++#ifdef NET_SKB_PAD ++#undef NET_SKB_PAD ++#endif ++ ++#if (L1_CACHE_BYTES > 32) ++#define NET_SKB_PAD L1_CACHE_BYTES ++#else ++#define NET_SKB_PAD 32 ++#endif ++ ++static inline struct sk_buff *_kc_netdev_alloc_skb_ip_align(struct net_device *dev, ++ unsigned int length) ++{ ++ struct sk_buff *skb; ++ ++ skb = alloc_skb(length + NET_SKB_PAD + NET_IP_ALIGN, GFP_ATOMIC); ++ if (skb) { ++#if (NET_IP_ALIGN + NET_SKB_PAD) ++ skb_reserve(skb, NET_IP_ALIGN + NET_SKB_PAD); ++#endif ++ skb->dev = dev; ++ } ++ return skb; ++} ++ ++#ifdef netdev_alloc_skb_ip_align ++#undef netdev_alloc_skb_ip_align ++#endif ++#define netdev_alloc_skb_ip_align(n, l) _kc_netdev_alloc_skb_ip_align(n, l) ++ ++#undef netif_level ++#define netif_level(level, priv, type, dev, fmt, args...) \ ++do { \ ++ if (netif_msg_##type(priv)) \ ++ netdev_##level(dev, fmt, ##args); \ ++} while (0) ++ ++#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3))) ++#undef usleep_range ++#define usleep_range(min, max) msleep(DIV_ROUND_UP(min, 1000)) ++#endif ++ ++#define u64_stats_update_begin(a) do { } while(0) ++#define u64_stats_update_end(a) do { } while(0) ++#define u64_stats_fetch_begin(a) do { } while(0) ++#define u64_stats_fetch_retry_bh(a,b) (0) ++#define u64_stats_fetch_begin_bh(a) (0) ++ ++#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,1)) ++#define HAVE_8021P_SUPPORT ++#endif ++ ++/* RHEL6.4 and SLES11sp2 backported skb_tx_timestamp */ ++#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ ++ !(SLE_VERSION_CODE >= SLE_VERSION(11,2,0))) ++static inline void skb_tx_timestamp(struct sk_buff __always_unused *skb) ++{ ++ return; ++} ++#endif ++ ++#else /* < 2.6.36 */ ++ ++#define msleep(x) do { if (x > 20) \ ++ msleep(x); \ ++ else \ ++ usleep_range(1000 * x, 2000 * x); \ ++ } while (0) ++ ++#define HAVE_PM_QOS_REQUEST_ACTIVE ++#define HAVE_8021P_SUPPORT ++#define HAVE_NDO_GET_STATS64 ++#endif /* < 2.6.36 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,37) ) ++#define HAVE_NON_CONST_PCI_DRIVER_NAME ++#ifndef netif_set_real_num_tx_queues ++static inline int _kc_netif_set_real_num_tx_queues(struct net_device *dev, ++ unsigned int txq) ++{ ++ netif_set_real_num_tx_queues(dev, txq); ++ return 0; ++} ++#define netif_set_real_num_tx_queues(dev, txq) \ ++ _kc_netif_set_real_num_tx_queues(dev, txq) ++#endif ++#ifndef netif_set_real_num_rx_queues ++static inline int __kc_netif_set_real_num_rx_queues(struct net_device __always_unused *dev, ++ unsigned int __always_unused rxq) ++{ ++ return 0; ++} ++#define netif_set_real_num_rx_queues(dev, rxq) \ ++ __kc_netif_set_real_num_rx_queues((dev), (rxq)) ++#endif ++#ifndef ETHTOOL_RXNTUPLE_ACTION_CLEAR ++#define ETHTOOL_RXNTUPLE_ACTION_CLEAR (-2) ++#endif ++#ifndef VLAN_N_VID ++#define VLAN_N_VID VLAN_GROUP_ARRAY_LEN ++#endif /* VLAN_N_VID */ ++#ifndef ETH_FLAG_TXVLAN ++#define ETH_FLAG_TXVLAN (1 << 7) ++#endif /* ETH_FLAG_TXVLAN */ ++#ifndef ETH_FLAG_RXVLAN ++#define ETH_FLAG_RXVLAN (1 << 8) ++#endif /* ETH_FLAG_RXVLAN */ ++ ++#define WQ_MEM_RECLAIM WQ_RESCUER ++ ++static inline void _kc_skb_checksum_none_assert(struct sk_buff *skb) ++{ ++ WARN_ON(skb->ip_summed != CHECKSUM_NONE); ++} ++#define skb_checksum_none_assert(skb) _kc_skb_checksum_none_assert(skb) ++ ++static inline void *_kc_vzalloc_node(unsigned long size, int node) ++{ ++ void *addr = vmalloc_node(size, node); ++ if (addr) ++ memset(addr, 0, size); ++ return addr; ++} ++#define vzalloc_node(_size, _node) _kc_vzalloc_node(_size, _node) ++ ++static inline void *_kc_vzalloc(unsigned long size) ++{ ++ void *addr = vmalloc(size); ++ if (addr) ++ memset(addr, 0, size); ++ return addr; ++} ++#define vzalloc(_size) _kc_vzalloc(_size) ++ ++#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(5,7)) || \ ++ (RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,0))) ++static inline __be16 vlan_get_protocol(const struct sk_buff *skb) ++{ ++ if (vlan_tx_tag_present(skb) || ++ skb->protocol != cpu_to_be16(ETH_P_8021Q)) ++ return skb->protocol; ++ ++ if (skb_headlen(skb) < sizeof(struct vlan_ethhdr)) ++ return 0; ++ ++ return ((struct vlan_ethhdr*)skb->data)->h_vlan_encapsulated_proto; ++} ++#endif /* !RHEL5.7+ || RHEL6.0 */ ++ ++#ifdef HAVE_HW_TIME_STAMP ++#define SKBTX_HW_TSTAMP (1 << 0) ++#define SKBTX_IN_PROGRESS (1 << 2) ++#define SKB_SHARED_TX_IS_UNION ++#endif ++ ++#ifndef device_wakeup_enable ++#define device_wakeup_enable(dev) device_set_wakeup_enable(dev, true) ++#endif ++ ++#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,18) ) ++#ifndef HAVE_VLAN_RX_REGISTER ++#define HAVE_VLAN_RX_REGISTER ++#endif ++#endif /* > 2.4.18 */ ++#endif /* < 2.6.37 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,38) ) ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22) ) ++#define skb_checksum_start_offset(skb) skb_transport_offset(skb) ++#else /* 2.6.22 -> 2.6.37 */ ++static inline int _kc_skb_checksum_start_offset(const struct sk_buff *skb) ++{ ++ return skb->csum_start - skb_headroom(skb); ++} ++#define skb_checksum_start_offset(skb) _kc_skb_checksum_start_offset(skb) ++#endif /* 2.6.22 -> 2.6.37 */ ++#if IS_ENABLED(CONFIG_DCB) ++#ifndef IEEE_8021QAZ_MAX_TCS ++#define IEEE_8021QAZ_MAX_TCS 8 ++#endif ++#ifndef DCB_CAP_DCBX_HOST ++#define DCB_CAP_DCBX_HOST 0x01 ++#endif ++#ifndef DCB_CAP_DCBX_LLD_MANAGED ++#define DCB_CAP_DCBX_LLD_MANAGED 0x02 ++#endif ++#ifndef DCB_CAP_DCBX_VER_CEE ++#define DCB_CAP_DCBX_VER_CEE 0x04 ++#endif ++#ifndef DCB_CAP_DCBX_VER_IEEE ++#define DCB_CAP_DCBX_VER_IEEE 0x08 ++#endif ++#ifndef DCB_CAP_DCBX_STATIC ++#define DCB_CAP_DCBX_STATIC 0x10 ++#endif ++#endif /* CONFIG_DCB */ ++#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,2)) ++#define CONFIG_XPS ++#endif /* RHEL_RELEASE_VERSION(6,2) */ ++#endif /* < 2.6.38 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,39) ) ++#ifndef TC_BITMASK ++#define TC_BITMASK 15 ++#endif ++#ifndef NETIF_F_RXCSUM ++#define NETIF_F_RXCSUM (1 << 29) ++#endif ++#ifndef skb_queue_reverse_walk_safe ++#define skb_queue_reverse_walk_safe(queue, skb, tmp) \ ++ for (skb = (queue)->prev, tmp = skb->prev; \ ++ skb != (struct sk_buff *)(queue); \ ++ skb = tmp, tmp = skb->prev) ++#endif ++#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) ++#ifndef FCOE_MTU ++#define FCOE_MTU 2158 ++#endif ++#endif ++#if IS_ENABLED(CONFIG_DCB) ++#ifndef IEEE_8021QAZ_APP_SEL_ETHERTYPE ++#define IEEE_8021QAZ_APP_SEL_ETHERTYPE 1 ++#endif ++#endif ++#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4))) ++#define kstrtoul(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0) ++#define kstrtouint(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0) ++#define kstrtou32(a, b, c) ((*(c)) = simple_strtoul((a), NULL, (b)), 0) ++#endif /* !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) */ ++#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0))) ++extern u16 ___kc_skb_tx_hash(struct net_device *, const struct sk_buff *, u16); ++#define __skb_tx_hash(n, s, q) ___kc_skb_tx_hash((n), (s), (q)) ++extern u8 _kc_netdev_get_num_tc(struct net_device *dev); ++#define netdev_get_num_tc(dev) _kc_netdev_get_num_tc(dev) ++extern int _kc_netdev_set_num_tc(struct net_device *dev, u8 num_tc); ++#define netdev_set_num_tc(dev, tc) _kc_netdev_set_num_tc((dev), (tc)) ++#define netdev_reset_tc(dev) _kc_netdev_set_num_tc((dev), 0) ++#define netdev_set_tc_queue(dev, tc, cnt, off) do {} while (0) ++extern u8 _kc_netdev_get_prio_tc_map(struct net_device *dev, u8 up); ++#define netdev_get_prio_tc_map(dev, up) _kc_netdev_get_prio_tc_map(dev, up) ++#define netdev_set_prio_tc_map(dev, up, tc) do {} while (0) ++#else /* RHEL6.1 or greater */ ++#ifndef HAVE_MQPRIO ++#define HAVE_MQPRIO ++#endif /* HAVE_MQPRIO */ ++#if IS_ENABLED(CONFIG_DCB) ++#ifndef HAVE_DCBNL_IEEE ++#define HAVE_DCBNL_IEEE ++#ifndef IEEE_8021QAZ_TSA_STRICT ++#define IEEE_8021QAZ_TSA_STRICT 0 ++#endif ++#ifndef IEEE_8021QAZ_TSA_ETS ++#define IEEE_8021QAZ_TSA_ETS 2 ++#endif ++#ifndef IEEE_8021QAZ_APP_SEL_ETHERTYPE ++#define IEEE_8021QAZ_APP_SEL_ETHERTYPE 1 ++#endif ++#endif ++#endif /* CONFIG_DCB */ ++#endif /* !(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,0)) */ ++ ++#ifndef udp_csum ++#define udp_csum __kc_udp_csum ++static inline __wsum __kc_udp_csum(struct sk_buff *skb) ++{ ++ __wsum csum = csum_partial(skb_transport_header(skb), ++ sizeof(struct udphdr), skb->csum); ++ ++ for (skb = skb_shinfo(skb)->frag_list; skb; skb = skb->next) { ++ csum = csum_add(csum, skb->csum); ++ } ++ return csum; ++} ++#endif /* udp_csum */ ++#else /* < 2.6.39 */ ++#if defined(CONFIG_FCOE) || defined(CONFIG_FCOE_MODULE) ++#ifndef HAVE_NETDEV_OPS_FCOE_DDP_TARGET ++#define HAVE_NETDEV_OPS_FCOE_DDP_TARGET ++#endif ++#endif /* CONFIG_FCOE || CONFIG_FCOE_MODULE */ ++#ifndef HAVE_MQPRIO ++#define HAVE_MQPRIO ++#endif ++#ifndef HAVE_SETUP_TC ++#define HAVE_SETUP_TC ++#endif ++#ifdef CONFIG_DCB ++#ifndef HAVE_DCBNL_IEEE ++#define HAVE_DCBNL_IEEE ++#endif ++#endif /* CONFIG_DCB */ ++#ifndef HAVE_NDO_SET_FEATURES ++#define HAVE_NDO_SET_FEATURES ++#endif ++#define HAVE_IRQ_AFFINITY_NOTIFY ++#endif /* < 2.6.39 */ ++ ++/*****************************************************************************/ ++/* use < 2.6.40 because of a Fedora 15 kernel update where they ++ * updated the kernel version to 2.6.40.x and they back-ported 3.0 features ++ * like set_phys_id for ethtool. ++ */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,40) ) ++#ifdef ETHTOOL_GRXRINGS ++#ifndef FLOW_EXT ++#define FLOW_EXT 0x80000000 ++union _kc_ethtool_flow_union { ++ struct ethtool_tcpip4_spec tcp_ip4_spec; ++ struct ethtool_usrip4_spec usr_ip4_spec; ++ __u8 hdata[60]; ++}; ++struct _kc_ethtool_flow_ext { ++ __be16 vlan_etype; ++ __be16 vlan_tci; ++ __be32 data[2]; ++}; ++struct _kc_ethtool_rx_flow_spec { ++ __u32 flow_type; ++ union _kc_ethtool_flow_union h_u; ++ struct _kc_ethtool_flow_ext h_ext; ++ union _kc_ethtool_flow_union m_u; ++ struct _kc_ethtool_flow_ext m_ext; ++ __u64 ring_cookie; ++ __u32 location; ++}; ++#define ethtool_rx_flow_spec _kc_ethtool_rx_flow_spec ++#endif /* FLOW_EXT */ ++#endif ++ ++#define pci_disable_link_state_locked pci_disable_link_state ++ ++#ifndef PCI_LTR_VALUE_MASK ++#define PCI_LTR_VALUE_MASK 0x000003ff ++#endif ++#ifndef PCI_LTR_SCALE_MASK ++#define PCI_LTR_SCALE_MASK 0x00001c00 ++#endif ++#ifndef PCI_LTR_SCALE_SHIFT ++#define PCI_LTR_SCALE_SHIFT 10 ++#endif ++ ++#else /* < 2.6.40 */ ++#define HAVE_ETHTOOL_SET_PHYS_ID ++#endif /* < 2.6.40 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,0,0) ) ++#define USE_LEGACY_PM_SUPPORT ++#ifndef kfree_rcu ++#define kfree_rcu(_ptr, _rcu_head) kfree(_ptr) ++#endif /* kfree_rcu */ ++#ifndef kstrtol_from_user ++#define kstrtol_from_user(s, c, b, r) _kc_kstrtol_from_user(s, c, b, r) ++static inline int _kc_kstrtol_from_user(const char __user *s, size_t count, ++ unsigned int base, long *res) ++{ ++ /* sign, base 2 representation, newline, terminator */ ++ char buf[1 + sizeof(long) * 8 + 1 + 1]; ++ ++ count = min(count, sizeof(buf) - 1); ++ if (copy_from_user(buf, s, count)) ++ return -EFAULT; ++ buf[count] = '\0'; ++ return strict_strtol(buf, base, res); ++} ++#endif ++ ++/* 20000base_blah_full Supported and Advertised Registers */ ++#define SUPPORTED_20000baseMLD2_Full (1 << 21) ++#define SUPPORTED_20000baseKR2_Full (1 << 22) ++#define ADVERTISED_20000baseMLD2_Full (1 << 21) ++#define ADVERTISED_20000baseKR2_Full (1 << 22) ++#endif /* < 3.0.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,1,0) ) ++#ifndef __netdev_alloc_skb_ip_align ++#define __netdev_alloc_skb_ip_align(d,l,_g) netdev_alloc_skb_ip_align(d,l) ++#endif /* __netdev_alloc_skb_ip_align */ ++#define dcb_ieee_setapp(dev, app) dcb_setapp(dev, app) ++#define dcb_ieee_delapp(dev, app) 0 ++#define dcb_ieee_getapp_mask(dev, app) (1 << app->priority) ++ ++/* 1000BASE-T Control register */ ++#define CTL1000_AS_MASTER 0x0800 ++#define CTL1000_ENABLE_MASTER 0x1000 ++ ++/* kernels less than 3.0.0 don't have this */ ++#ifndef ETH_P_8021AD ++#define ETH_P_8021AD 0x88A8 ++#endif ++ ++/* Stub definition for !CONFIG_OF is introduced later */ ++#ifdef CONFIG_OF ++static inline struct device_node * ++pci_device_to_OF_node(struct pci_dev __maybe_unused *pdev) ++{ ++#ifdef HAVE_STRUCT_DEVICE_OF_NODE ++ return pdev ? pdev->dev.of_node : NULL; ++#else ++ return NULL; ++#endif /* !HAVE_STRUCT_DEVICE_OF_NODE */ ++} ++#endif /* CONFIG_OF */ ++#else /* < 3.1.0 */ ++#ifndef HAVE_DCBNL_IEEE_DELAPP ++#define HAVE_DCBNL_IEEE_DELAPP ++#endif ++#endif /* < 3.1.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,2,0) ) ++#ifndef dma_zalloc_coherent ++#define dma_zalloc_coherent(d, s, h, f) _kc_dma_zalloc_coherent(d, s, h, f) ++static inline void *_kc_dma_zalloc_coherent(struct device *dev, size_t size, ++ dma_addr_t *dma_handle, gfp_t flag) ++{ ++ void *ret = dma_alloc_coherent(dev, size, dma_handle, flag); ++ if (ret) ++ memset(ret, 0, size); ++ return ret; ++} ++#endif ++#ifdef ETHTOOL_GRXRINGS ++#define HAVE_ETHTOOL_GET_RXNFC_VOID_RULE_LOCS ++#endif /* ETHTOOL_GRXRINGS */ ++ ++#ifndef skb_frag_size ++#define skb_frag_size(frag) _kc_skb_frag_size(frag) ++static inline unsigned int _kc_skb_frag_size(const skb_frag_t *frag) ++{ ++ return frag->size; ++} ++#endif /* skb_frag_size */ ++ ++#ifndef skb_frag_size_sub ++#define skb_frag_size_sub(frag, delta) _kc_skb_frag_size_sub(frag, delta) ++static inline void _kc_skb_frag_size_sub(skb_frag_t *frag, int delta) ++{ ++ frag->size -= delta; ++} ++#endif /* skb_frag_size_sub */ ++ ++#ifndef skb_frag_page ++#define skb_frag_page(frag) _kc_skb_frag_page(frag) ++static inline struct page *_kc_skb_frag_page(const skb_frag_t *frag) ++{ ++ return frag->page; ++} ++#endif /* skb_frag_page */ ++ ++#ifndef skb_frag_address ++#define skb_frag_address(frag) _kc_skb_frag_address(frag) ++static inline void *_kc_skb_frag_address(const skb_frag_t *frag) ++{ ++ return page_address(skb_frag_page(frag)) + frag->page_offset; ++} ++#endif /* skb_frag_address */ ++ ++#ifndef skb_frag_dma_map ++#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,0) ) ++#include ++#endif ++#define skb_frag_dma_map(dev,frag,offset,size,dir) \ ++ _kc_skb_frag_dma_map(dev,frag,offset,size,dir) ++static inline dma_addr_t _kc_skb_frag_dma_map(struct device *dev, ++ const skb_frag_t *frag, ++ size_t offset, size_t size, ++ enum dma_data_direction dir) ++{ ++ return dma_map_page(dev, skb_frag_page(frag), ++ frag->page_offset + offset, size, dir); ++} ++#endif /* skb_frag_dma_map */ ++ ++#ifndef __skb_frag_unref ++#define __skb_frag_unref(frag) __kc_skb_frag_unref(frag) ++static inline void __kc_skb_frag_unref(skb_frag_t *frag) ++{ ++ put_page(skb_frag_page(frag)); ++} ++#endif /* __skb_frag_unref */ ++ ++#ifndef SPEED_UNKNOWN ++#define SPEED_UNKNOWN -1 ++#endif ++#ifndef DUPLEX_UNKNOWN ++#define DUPLEX_UNKNOWN 0xff ++#endif ++#if ((RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,3)) ||\ ++ (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0))) ++#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED ++#define HAVE_PCI_DEV_FLAGS_ASSIGNED ++#endif ++#endif ++#else /* < 3.2.0 */ ++#ifndef HAVE_PCI_DEV_FLAGS_ASSIGNED ++#define HAVE_PCI_DEV_FLAGS_ASSIGNED ++#define HAVE_VF_SPOOFCHK_CONFIGURE ++#endif ++#ifndef HAVE_SKB_L4_RXHASH ++#define HAVE_SKB_L4_RXHASH ++#endif ++#define HAVE_IOMMU_PRESENT ++#define HAVE_PM_QOS_REQUEST_LIST_NEW ++#endif /* < 3.2.0 */ ++ ++#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE == RHEL_RELEASE_VERSION(6,2)) ++#undef ixgbe_get_netdev_tc_txq ++#define ixgbe_get_netdev_tc_txq(dev, tc) (&netdev_extended(dev)->qos_data.tc_to_txq[tc]) ++#endif ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0) ) ++/* NOTE: the order of parameters to _kc_alloc_workqueue() is different than ++ * alloc_workqueue() to avoid compiler warning from -Wvarargs ++ */ ++static inline struct workqueue_struct * __attribute__ ((format(printf, 3, 4))) ++_kc_alloc_workqueue(__maybe_unused int flags, __maybe_unused int max_active, ++ const char *fmt, ...) ++{ ++ struct workqueue_struct *wq; ++ va_list args, temp; ++ unsigned int len; ++ char *p; ++ ++ va_start(args, fmt); ++ va_copy(temp, args); ++ len = vsnprintf(NULL, 0, fmt, temp); ++ va_end(temp); ++ ++ p = kmalloc(len + 1, GFP_KERNEL); ++ if (!p) { ++ va_end(args); ++ return NULL; ++ } ++ ++ vsnprintf(p, len + 1, fmt, args); ++ va_end(args); ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,36) ) ++ wq = create_workqueue(p); ++#else ++ wq = alloc_workqueue(p, flags, max_active); ++#endif ++ kfree(p); ++ ++ return wq; ++} ++#ifdef alloc_workqueue ++#undef alloc_workqueue ++#endif ++#define alloc_workqueue(fmt, flags, max_active, args...) \ ++ _kc_alloc_workqueue(flags, max_active, fmt, ##args) ++ ++#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5)) ++typedef u32 netdev_features_t; ++#endif ++#undef PCI_EXP_TYPE_RC_EC ++#define PCI_EXP_TYPE_RC_EC 0xa /* Root Complex Event Collector */ ++#ifndef CONFIG_BQL ++#define netdev_tx_completed_queue(_q, _p, _b) do {} while (0) ++#define netdev_completed_queue(_n, _p, _b) do {} while (0) ++#define netdev_tx_sent_queue(_q, _b) do {} while (0) ++#define netdev_sent_queue(_n, _b) do {} while (0) ++#define netdev_tx_reset_queue(_q) do {} while (0) ++#define netdev_reset_queue(_n) do {} while (0) ++#endif ++#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) ++#define HAVE_ETHTOOL_GRXFHINDIR_SIZE ++#endif /* SLE_VERSION(11,3,0) */ ++#define netif_xmit_stopped(_q) netif_tx_queue_stopped(_q) ++#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0)) ++static inline int __kc_ipv6_skip_exthdr(const struct sk_buff *skb, int start, ++ u8 *nexthdrp, ++ __be16 __always_unused *frag_offp) ++{ ++ return ipv6_skip_exthdr(skb, start, nexthdrp); ++} ++#undef ipv6_skip_exthdr ++#define ipv6_skip_exthdr(a,b,c,d) __kc_ipv6_skip_exthdr((a), (b), (c), (d)) ++#endif /* !SLES11sp4 or greater */ ++ ++#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4)) && \ ++ !(SLE_VERSION_CODE >= SLE_VERSION(11,3,0))) ++static inline u32 ethtool_rxfh_indir_default(u32 index, u32 n_rx_rings) ++{ ++ return index % n_rx_rings; ++} ++#endif ++ ++#else /* ! < 3.3.0 */ ++#define HAVE_ETHTOOL_GRXFHINDIR_SIZE ++#define HAVE_INT_NDO_VLAN_RX_ADD_VID ++#ifdef ETHTOOL_SRXNTUPLE ++#undef ETHTOOL_SRXNTUPLE ++#endif ++#endif /* < 3.3.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,4,0) ) ++#ifndef NETIF_F_RXFCS ++#define NETIF_F_RXFCS 0 ++#endif /* NETIF_F_RXFCS */ ++#ifndef NETIF_F_RXALL ++#define NETIF_F_RXALL 0 ++#endif /* NETIF_F_RXALL */ ++ ++#if !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) ++#define NUMTCS_RETURNS_U8 ++ ++int _kc_simple_open(struct inode *inode, struct file *file); ++#define simple_open _kc_simple_open ++#endif /* !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) */ ++ ++#ifndef skb_add_rx_frag ++#define skb_add_rx_frag _kc_skb_add_rx_frag ++extern void _kc_skb_add_rx_frag(struct sk_buff *, int, struct page *, ++ int, int, unsigned int); ++#endif ++#ifdef NET_ADDR_RANDOM ++#define eth_hw_addr_random(N) do { \ ++ eth_random_addr(N->dev_addr); \ ++ N->addr_assign_type |= NET_ADDR_RANDOM; \ ++ } while (0) ++#else /* NET_ADDR_RANDOM */ ++#define eth_hw_addr_random(N) eth_random_addr(N->dev_addr) ++#endif /* NET_ADDR_RANDOM */ ++ ++#ifndef for_each_set_bit_from ++#define for_each_set_bit_from(bit, addr, size) \ ++ for ((bit) = find_next_bit((addr), (size), (bit)); \ ++ (bit) < (size); \ ++ (bit) = find_next_bit((addr), (size), (bit) + 1)) ++#endif /* for_each_set_bit_from */ ++ ++#else /* < 3.4.0 */ ++#include ++#endif /* >= 3.4.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) || \ ++ ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4) ) ++#if !defined(NO_PTP_SUPPORT) && IS_ENABLED(CONFIG_PTP_1588_CLOCK) ++#define HAVE_PTP_1588_CLOCK ++#endif /* !NO_PTP_SUPPORT && IS_ENABLED(CONFIG_PTP_1588_CLOCK) */ ++#endif /* >= 3.0.0 || RHEL_RELEASE > 6.4 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,5,0) ) ++ ++#ifndef BITS_PER_LONG_LONG ++#define BITS_PER_LONG_LONG 64 ++#endif ++ ++#ifndef ether_addr_equal ++static inline bool __kc_ether_addr_equal(const u8 *addr1, const u8 *addr2) ++{ ++ return !compare_ether_addr(addr1, addr2); ++} ++#define ether_addr_equal(_addr1, _addr2) __kc_ether_addr_equal((_addr1),(_addr2)) ++#endif ++ ++/* Definitions for !CONFIG_OF_NET are introduced in 3.10 */ ++#ifdef CONFIG_OF_NET ++static inline int of_get_phy_mode(struct device_node __always_unused *np) ++{ ++ return -ENODEV; ++} ++ ++static inline const void * ++of_get_mac_address(struct device_node __always_unused *np) ++{ ++ return NULL; ++} ++#endif ++#else ++#include ++#define HAVE_FDB_OPS ++#define HAVE_ETHTOOL_GET_TS_INFO ++#endif /* < 3.5.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0) ) ++#define PCI_EXP_LNKCAP2 44 /* Link Capability 2 */ ++ ++#ifndef MDIO_EEE_100TX ++#define MDIO_EEE_100TX 0x0002 /* 100TX EEE cap */ ++#endif ++#ifndef MDIO_EEE_1000T ++#define MDIO_EEE_1000T 0x0004 /* 1000T EEE cap */ ++#endif ++#ifndef MDIO_EEE_10GT ++#define MDIO_EEE_10GT 0x0008 /* 10GT EEE cap */ ++#endif ++#ifndef MDIO_EEE_1000KX ++#define MDIO_EEE_1000KX 0x0010 /* 1000KX EEE cap */ ++#endif ++#ifndef MDIO_EEE_10GKX4 ++#define MDIO_EEE_10GKX4 0x0020 /* 10G KX4 EEE cap */ ++#endif ++#ifndef MDIO_EEE_10GKR ++#define MDIO_EEE_10GKR 0x0040 /* 10G KR EEE cap */ ++#endif ++ ++#ifndef __GFP_MEMALLOC ++#define __GFP_MEMALLOC 0 ++#endif ++ ++#ifndef eth_broadcast_addr ++#define eth_broadcast_addr _kc_eth_broadcast_addr ++static inline void _kc_eth_broadcast_addr(u8 *addr) ++{ ++ memset(addr, 0xff, ETH_ALEN); ++} ++#endif ++ ++#ifndef eth_random_addr ++#define eth_random_addr _kc_eth_random_addr ++static inline void _kc_eth_random_addr(u8 *addr) ++{ ++ get_random_bytes(addr, ETH_ALEN); ++ addr[0] &= 0xfe; /* clear multicast */ ++ addr[0] |= 0x02; /* set local assignment */ ++} ++#endif /* eth_random_addr */ ++ ++#ifndef DMA_ATTR_SKIP_CPU_SYNC ++#define DMA_ATTR_SKIP_CPU_SYNC 0 ++#endif ++#else /* < 3.6.0 */ ++#define HAVE_STRUCT_PAGE_PFMEMALLOC ++#endif /* < 3.6.0 */ ++ ++/******************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,7,0) ) ++#ifndef ADVERTISED_40000baseKR4_Full ++/* these defines were all added in one commit, so should be safe ++ * to trigger activiation on one define ++ */ ++#define SUPPORTED_40000baseKR4_Full (1 << 23) ++#define SUPPORTED_40000baseCR4_Full (1 << 24) ++#define SUPPORTED_40000baseSR4_Full (1 << 25) ++#define SUPPORTED_40000baseLR4_Full (1 << 26) ++#define ADVERTISED_40000baseKR4_Full (1 << 23) ++#define ADVERTISED_40000baseCR4_Full (1 << 24) ++#define ADVERTISED_40000baseSR4_Full (1 << 25) ++#define ADVERTISED_40000baseLR4_Full (1 << 26) ++#endif ++ ++#ifndef mmd_eee_cap_to_ethtool_sup_t ++/** ++ * mmd_eee_cap_to_ethtool_sup_t ++ * @eee_cap: value of the MMD EEE Capability register ++ * ++ * A small helper function that translates MMD EEE Capability (3.20) bits ++ * to ethtool supported settings. ++ */ ++static inline u32 __kc_mmd_eee_cap_to_ethtool_sup_t(u16 eee_cap) ++{ ++ u32 supported = 0; ++ ++ if (eee_cap & MDIO_EEE_100TX) ++ supported |= SUPPORTED_100baseT_Full; ++ if (eee_cap & MDIO_EEE_1000T) ++ supported |= SUPPORTED_1000baseT_Full; ++ if (eee_cap & MDIO_EEE_10GT) ++ supported |= SUPPORTED_10000baseT_Full; ++ if (eee_cap & MDIO_EEE_1000KX) ++ supported |= SUPPORTED_1000baseKX_Full; ++ if (eee_cap & MDIO_EEE_10GKX4) ++ supported |= SUPPORTED_10000baseKX4_Full; ++ if (eee_cap & MDIO_EEE_10GKR) ++ supported |= SUPPORTED_10000baseKR_Full; ++ ++ return supported; ++} ++#define mmd_eee_cap_to_ethtool_sup_t(eee_cap) \ ++ __kc_mmd_eee_cap_to_ethtool_sup_t(eee_cap) ++#endif /* mmd_eee_cap_to_ethtool_sup_t */ ++ ++#ifndef mmd_eee_adv_to_ethtool_adv_t ++/** ++ * mmd_eee_adv_to_ethtool_adv_t ++ * @eee_adv: value of the MMD EEE Advertisement/Link Partner Ability registers ++ * ++ * A small helper function that translates the MMD EEE Advertisement (7.60) ++ * and MMD EEE Link Partner Ability (7.61) bits to ethtool advertisement ++ * settings. ++ */ ++static inline u32 __kc_mmd_eee_adv_to_ethtool_adv_t(u16 eee_adv) ++{ ++ u32 adv = 0; ++ ++ if (eee_adv & MDIO_EEE_100TX) ++ adv |= ADVERTISED_100baseT_Full; ++ if (eee_adv & MDIO_EEE_1000T) ++ adv |= ADVERTISED_1000baseT_Full; ++ if (eee_adv & MDIO_EEE_10GT) ++ adv |= ADVERTISED_10000baseT_Full; ++ if (eee_adv & MDIO_EEE_1000KX) ++ adv |= ADVERTISED_1000baseKX_Full; ++ if (eee_adv & MDIO_EEE_10GKX4) ++ adv |= ADVERTISED_10000baseKX4_Full; ++ if (eee_adv & MDIO_EEE_10GKR) ++ adv |= ADVERTISED_10000baseKR_Full; ++ ++ return adv; ++} ++ ++#define mmd_eee_adv_to_ethtool_adv_t(eee_adv) \ ++ __kc_mmd_eee_adv_to_ethtool_adv_t(eee_adv) ++#endif /* mmd_eee_adv_to_ethtool_adv_t */ ++ ++#ifndef ethtool_adv_to_mmd_eee_adv_t ++/** ++ * ethtool_adv_to_mmd_eee_adv_t ++ * @adv: the ethtool advertisement settings ++ * ++ * A small helper function that translates ethtool advertisement settings ++ * to EEE advertisements for the MMD EEE Advertisement (7.60) and ++ * MMD EEE Link Partner Ability (7.61) registers. ++ */ ++static inline u16 __kc_ethtool_adv_to_mmd_eee_adv_t(u32 adv) ++{ ++ u16 reg = 0; ++ ++ if (adv & ADVERTISED_100baseT_Full) ++ reg |= MDIO_EEE_100TX; ++ if (adv & ADVERTISED_1000baseT_Full) ++ reg |= MDIO_EEE_1000T; ++ if (adv & ADVERTISED_10000baseT_Full) ++ reg |= MDIO_EEE_10GT; ++ if (adv & ADVERTISED_1000baseKX_Full) ++ reg |= MDIO_EEE_1000KX; ++ if (adv & ADVERTISED_10000baseKX4_Full) ++ reg |= MDIO_EEE_10GKX4; ++ if (adv & ADVERTISED_10000baseKR_Full) ++ reg |= MDIO_EEE_10GKR; ++ ++ return reg; ++} ++#define ethtool_adv_to_mmd_eee_adv_t(adv) __kc_ethtool_adv_to_mmd_eee_adv_t(adv) ++#endif /* ethtool_adv_to_mmd_eee_adv_t */ ++ ++#ifndef pci_pcie_type ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24) ) ++static inline u8 pci_pcie_type(struct pci_dev *pdev) ++{ ++ int pos; ++ u16 reg16; ++ ++ pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); ++ BUG_ON(!pos); ++ pci_read_config_word(pdev, pos + PCI_EXP_FLAGS, ®16); ++ return (reg16 & PCI_EXP_FLAGS_TYPE) >> 4; ++} ++#else /* < 2.6.24 */ ++#define pci_pcie_type(x) (x)->pcie_type ++#endif /* < 2.6.24 */ ++#endif /* pci_pcie_type */ ++ ++#if ( ! ( RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,4) ) ) && \ ++ ( ! ( SLE_VERSION_CODE >= SLE_VERSION(11,3,0) ) ) && \ ++ ( LINUX_VERSION_CODE >= KERNEL_VERSION(3,0,0) ) ++#define ptp_clock_register(caps, args...) ptp_clock_register(caps) ++#endif ++ ++#ifndef pcie_capability_read_word ++int __kc_pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val); ++#define pcie_capability_read_word(d,p,v) __kc_pcie_capability_read_word(d,p,v) ++#endif /* pcie_capability_read_word */ ++ ++#ifndef pcie_capability_write_word ++int __kc_pcie_capability_write_word(struct pci_dev *dev, int pos, u16 val); ++#define pcie_capability_write_word(d,p,v) __kc_pcie_capability_write_word(d,p,v) ++#endif /* pcie_capability_write_word */ ++ ++#ifndef pcie_capability_clear_and_set_word ++int __kc_pcie_capability_clear_and_set_word(struct pci_dev *dev, int pos, ++ u16 clear, u16 set); ++#define pcie_capability_clear_and_set_word(d,p,c,s) \ ++ __kc_pcie_capability_clear_and_set_word(d,p,c,s) ++#endif /* pcie_capability_clear_and_set_word */ ++ ++#ifndef pcie_capability_clear_word ++int __kc_pcie_capability_clear_word(struct pci_dev *dev, int pos, ++ u16 clear); ++#define pcie_capability_clear_word(d, p, c) \ ++ __kc_pcie_capability_clear_word(d, p, c) ++#endif /* pcie_capability_clear_word */ ++ ++#ifndef PCI_EXP_LNKSTA2 ++#define PCI_EXP_LNKSTA2 50 /* Link Status 2 */ ++#endif ++ ++#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,3,0)) ++#define USE_CONST_DEV_UC_CHAR ++#endif ++ ++#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8)) ++#define napi_gro_flush(_napi, _flush_old) napi_gro_flush(_napi) ++#endif /* !RHEL6.8+ */ ++ ++#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) ++#include ++#else ++ ++#define DEFINE_HASHTABLE(name, bits) \ ++ struct hlist_head name[1 << (bits)] = \ ++ { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } ++ ++#define DEFINE_READ_MOSTLY_HASHTABLE(name, bits) \ ++ struct hlist_head name[1 << (bits)] __read_mostly = \ ++ { [0 ... ((1 << (bits)) - 1)] = HLIST_HEAD_INIT } ++ ++#define DECLARE_HASHTABLE(name, bits) \ ++ struct hlist_head name[1 << (bits)] ++ ++#define HASH_SIZE(name) (ARRAY_SIZE(name)) ++#define HASH_BITS(name) ilog2(HASH_SIZE(name)) ++ ++/* Use hash_32 when possible to allow for fast 32bit hashing in 64bit kernels. */ ++#define hash_min(val, bits) \ ++ (sizeof(val) <= 4 ? hash_32(val, bits) : hash_long(val, bits)) ++ ++static inline void __hash_init(struct hlist_head *ht, unsigned int sz) ++{ ++ unsigned int i; ++ ++ for (i = 0; i < sz; i++) ++ INIT_HLIST_HEAD(&ht[i]); ++} ++ ++#define hash_init(hashtable) __hash_init(hashtable, HASH_SIZE(hashtable)) ++ ++#define hash_add(hashtable, node, key) \ ++ hlist_add_head(node, &hashtable[hash_min(key, HASH_BITS(hashtable))]) ++ ++static inline bool hash_hashed(struct hlist_node *node) ++{ ++ return !hlist_unhashed(node); ++} ++ ++static inline bool __hash_empty(struct hlist_head *ht, unsigned int sz) ++{ ++ unsigned int i; ++ ++ for (i = 0; i < sz; i++) ++ if (!hlist_empty(&ht[i])) ++ return false; ++ ++ return true; ++} ++ ++#define hash_empty(hashtable) __hash_empty(hashtable, HASH_SIZE(hashtable)) ++ ++static inline void hash_del(struct hlist_node *node) ++{ ++ hlist_del_init(node); ++} ++#endif /* RHEL >= 6.6 */ ++ ++#else /* >= 3.7.0 */ ++#include ++#define HAVE_CONST_STRUCT_PCI_ERROR_HANDLERS ++#define USE_CONST_DEV_UC_CHAR ++#endif /* >= 3.7.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,8,0) ) ++#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,5)) && \ ++ !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0))) ++#ifndef pci_sriov_set_totalvfs ++static inline int __kc_pci_sriov_set_totalvfs(struct pci_dev __always_unused *dev, u16 __always_unused numvfs) ++{ ++ return 0; ++} ++#define pci_sriov_set_totalvfs(a, b) __kc_pci_sriov_set_totalvfs((a), (b)) ++#endif ++#endif /* !(RHEL_RELEASE_CODE >= 6.5 && SLE_VERSION_CODE >= 11.4) */ ++#ifndef PCI_EXP_LNKCTL_ASPM_L0S ++#define PCI_EXP_LNKCTL_ASPM_L0S 0x01 /* L0s Enable */ ++#endif ++#ifndef PCI_EXP_LNKCTL_ASPM_L1 ++#define PCI_EXP_LNKCTL_ASPM_L1 0x02 /* L1 Enable */ ++#endif ++#define HAVE_CONFIG_HOTPLUG ++/* Reserved Ethernet Addresses per IEEE 802.1Q */ ++static const u8 eth_reserved_addr_base[ETH_ALEN] __aligned(2) = { ++ 0x01, 0x80, 0xc2, 0x00, 0x00, 0x00 }; ++ ++#ifndef is_link_local_ether_addr ++static inline bool __kc_is_link_local_ether_addr(const u8 *addr) ++{ ++ __be16 *a = (__be16 *)addr; ++ static const __be16 *b = (const __be16 *)eth_reserved_addr_base; ++ static const __be16 m = cpu_to_be16(0xfff0); ++ ++ return ((a[0] ^ b[0]) | (a[1] ^ b[1]) | ((a[2] ^ b[2]) & m)) == 0; ++} ++#define is_link_local_ether_addr(addr) __kc_is_link_local_ether_addr(addr) ++#endif /* is_link_local_ether_addr */ ++int __kc_ipv6_find_hdr(const struct sk_buff *skb, unsigned int *offset, ++ int target, unsigned short *fragoff, int *flags); ++#define ipv6_find_hdr(a, b, c, d, e) __kc_ipv6_find_hdr((a), (b), (c), (d), (e)) ++ ++#ifndef FLOW_MAC_EXT ++#define FLOW_MAC_EXT 0x40000000 ++#endif /* FLOW_MAC_EXT */ ++ ++#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0)) ++#define HAVE_SRIOV_CONFIGURE ++#endif ++ ++#else /* >= 3.8.0 */ ++#ifndef __devinit ++#define __devinit ++#endif ++ ++#ifndef __devinitdata ++#define __devinitdata ++#endif ++ ++#ifndef __devinitconst ++#define __devinitconst ++#endif ++ ++#ifndef __devexit ++#define __devexit ++#endif ++ ++#ifndef __devexit_p ++#define __devexit_p ++#endif ++ ++#ifndef HAVE_ENCAP_CSUM_OFFLOAD ++#define HAVE_ENCAP_CSUM_OFFLOAD ++#endif ++ ++#ifndef HAVE_GRE_ENCAP_OFFLOAD ++#define HAVE_GRE_ENCAP_OFFLOAD ++#endif ++ ++#ifndef HAVE_SRIOV_CONFIGURE ++#define HAVE_SRIOV_CONFIGURE ++#endif ++ ++#define HAVE_BRIDGE_ATTRIBS ++#ifndef BRIDGE_MODE_VEB ++#define BRIDGE_MODE_VEB 0 /* Default loopback mode */ ++#endif /* BRIDGE_MODE_VEB */ ++#ifndef BRIDGE_MODE_VEPA ++#define BRIDGE_MODE_VEPA 1 /* 802.1Qbg defined VEPA mode */ ++#endif /* BRIDGE_MODE_VEPA */ ++#endif /* >= 3.8.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,9,0) ) ++ ++#undef BUILD_BUG_ON ++#ifdef __CHECKER__ ++#define BUILD_BUG_ON(condition) (0) ++#else /* __CHECKER__ */ ++#ifndef __compiletime_warning ++#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400) ++#define __compiletime_warning(message) __attribute__((warning(message))) ++#else /* __GNUC__ */ ++#define __compiletime_warning(message) ++#endif /* __GNUC__ */ ++#endif /* __compiletime_warning */ ++#ifndef __compiletime_error ++#if defined(__GNUC__) && ((__GNUC__ * 10000 + __GNUC_MINOR__ * 100) >= 40400) ++#define __compiletime_error(message) __attribute__((error(message))) ++#define __compiletime_error_fallback(condition) do { } while (0) ++#else /* __GNUC__ */ ++#define __compiletime_error(message) ++#define __compiletime_error_fallback(condition) \ ++ do { ((void)sizeof(char[1 - 2 * condition])); } while (0) ++#endif /* __GNUC__ */ ++#else /* __compiletime_error */ ++#define __compiletime_error_fallback(condition) do { } while (0) ++#endif /* __compiletime_error */ ++#define __compiletime_assert(condition, msg, prefix, suffix) \ ++ do { \ ++ bool __cond = !(condition); \ ++ extern void prefix ## suffix(void) __compiletime_error(msg); \ ++ if (__cond) \ ++ prefix ## suffix(); \ ++ __compiletime_error_fallback(__cond); \ ++ } while (0) ++ ++#define _compiletime_assert(condition, msg, prefix, suffix) \ ++ __compiletime_assert(condition, msg, prefix, suffix) ++#define compiletime_assert(condition, msg) \ ++ _compiletime_assert(condition, msg, __compiletime_assert_, __LINE__) ++#define BUILD_BUG_ON_MSG(cond, msg) compiletime_assert(!(cond), msg) ++#ifndef __OPTIMIZE__ ++#define BUILD_BUG_ON(condition) ((void)sizeof(char[1 - 2*!!(condition)])) ++#else /* __OPTIMIZE__ */ ++#define BUILD_BUG_ON(condition) \ ++ BUILD_BUG_ON_MSG(condition, "BUILD_BUG_ON failed: " #condition) ++#endif /* __OPTIMIZE__ */ ++#endif /* __CHECKER__ */ ++ ++#undef hlist_entry ++#define hlist_entry(ptr, type, member) container_of(ptr,type,member) ++ ++#undef hlist_entry_safe ++#define hlist_entry_safe(ptr, type, member) \ ++ ({ typeof(ptr) ____ptr = (ptr); \ ++ ____ptr ? hlist_entry(____ptr, type, member) : NULL; \ ++ }) ++ ++#undef hlist_for_each_entry ++#define hlist_for_each_entry(pos, head, member) \ ++ for (pos = hlist_entry_safe((head)->first, typeof(*(pos)), member); \ ++ pos; \ ++ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) ++ ++#undef hlist_for_each_entry_safe ++#define hlist_for_each_entry_safe(pos, n, head, member) \ ++ for (pos = hlist_entry_safe((head)->first, typeof(*pos), member); \ ++ pos && ({ n = pos->member.next; 1; }); \ ++ pos = hlist_entry_safe(n, typeof(*pos), member)) ++ ++#undef hlist_for_each_entry_continue ++#define hlist_for_each_entry_continue(pos, member) \ ++ for (pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member);\ ++ pos; \ ++ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) ++ ++#undef hlist_for_each_entry_from ++#define hlist_for_each_entry_from(pos, member) \ ++ for (; pos; \ ++ pos = hlist_entry_safe((pos)->member.next, typeof(*(pos)), member)) ++ ++#undef hash_for_each ++#define hash_for_each(name, bkt, obj, member) \ ++ for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ ++ (bkt)++)\ ++ hlist_for_each_entry(obj, &name[bkt], member) ++ ++#undef hash_for_each_safe ++#define hash_for_each_safe(name, bkt, tmp, obj, member) \ ++ for ((bkt) = 0, obj = NULL; obj == NULL && (bkt) < HASH_SIZE(name);\ ++ (bkt)++)\ ++ hlist_for_each_entry_safe(obj, tmp, &name[bkt], member) ++ ++#undef hash_for_each_possible ++#define hash_for_each_possible(name, obj, member, key) \ ++ hlist_for_each_entry(obj, &name[hash_min(key, HASH_BITS(name))], member) ++ ++#undef hash_for_each_possible_safe ++#define hash_for_each_possible_safe(name, obj, tmp, member, key) \ ++ hlist_for_each_entry_safe(obj, tmp,\ ++ &name[hash_min(key, HASH_BITS(name))], member) ++ ++#ifdef CONFIG_XPS ++extern int __kc_netif_set_xps_queue(struct net_device *, const struct cpumask *, u16); ++#define netif_set_xps_queue(_dev, _mask, _idx) __kc_netif_set_xps_queue((_dev), (_mask), (_idx)) ++#else /* CONFIG_XPS */ ++#define netif_set_xps_queue(_dev, _mask, _idx) do {} while (0) ++#endif /* CONFIG_XPS */ ++ ++#ifdef HAVE_NETDEV_SELECT_QUEUE ++#define _kc_hashrnd 0xd631614b /* not so random hash salt */ ++extern u16 __kc_netdev_pick_tx(struct net_device *dev, struct sk_buff *skb); ++#define __netdev_pick_tx __kc_netdev_pick_tx ++#endif /* HAVE_NETDEV_SELECT_QUEUE */ ++#else ++#define HAVE_BRIDGE_FILTER ++#define HAVE_FDB_DEL_NLATTR ++#endif /* < 3.9.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0) ) ++#ifndef NAPI_POLL_WEIGHT ++#define NAPI_POLL_WEIGHT 64 ++#endif ++#ifdef CONFIG_PCI_IOV ++extern int __kc_pci_vfs_assigned(struct pci_dev *dev); ++#else ++static inline int __kc_pci_vfs_assigned(struct pci_dev __always_unused *dev) ++{ ++ return 0; ++} ++#endif ++#define pci_vfs_assigned(dev) __kc_pci_vfs_assigned(dev) ++ ++#ifndef list_first_entry_or_null ++#define list_first_entry_or_null(ptr, type, member) \ ++ (!list_empty(ptr) ? list_first_entry(ptr, type, member) : NULL) ++#endif ++ ++#ifndef VLAN_TX_COOKIE_MAGIC ++static inline struct sk_buff *__kc__vlan_hwaccel_put_tag(struct sk_buff *skb, ++ u16 vlan_tci) ++{ ++#ifdef VLAN_TAG_PRESENT ++ vlan_tci |= VLAN_TAG_PRESENT; ++#endif ++ skb->vlan_tci = vlan_tci; ++ return skb; ++} ++#define __vlan_hwaccel_put_tag(skb, vlan_proto, vlan_tci) \ ++ __kc__vlan_hwaccel_put_tag(skb, vlan_tci) ++#endif ++ ++#ifdef HAVE_FDB_OPS ++#ifdef USE_CONST_DEV_UC_CHAR ++extern int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct nlattr *tb[], ++ struct net_device *dev, ++ const unsigned char *addr, u16 flags); ++#ifdef HAVE_FDB_DEL_NLATTR ++extern int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct nlattr *tb[], ++ struct net_device *dev, ++ const unsigned char *addr); ++#else ++extern int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, ++ const unsigned char *addr); ++#endif ++#else ++extern int __kc_ndo_dflt_fdb_add(struct ndmsg *ndm, struct net_device *dev, ++ unsigned char *addr, u16 flags); ++extern int __kc_ndo_dflt_fdb_del(struct ndmsg *ndm, struct net_device *dev, ++ unsigned char *addr); ++#endif ++#define ndo_dflt_fdb_add __kc_ndo_dflt_fdb_add ++#define ndo_dflt_fdb_del __kc_ndo_dflt_fdb_del ++#endif /* HAVE_FDB_OPS */ ++ ++#ifndef PCI_DEVID ++#define PCI_DEVID(bus, devfn) ((((u16)(bus)) << 8) | (devfn)) ++#endif ++ ++/* The definitions for these functions when CONFIG_OF_NET is defined are ++ * pulled in from . For kernels older than 3.5 we already have ++ * backports for when CONFIG_OF_NET is true. These are separated and ++ * duplicated in order to cover all cases so that all kernels get either the ++ * real definitions (when CONFIG_OF_NET is defined) or the stub definitions ++ * (when CONFIG_OF_NET is not defined, or the kernel is too old to have real ++ * definitions). ++ */ ++#ifndef CONFIG_OF_NET ++static inline int of_get_phy_mode(struct device_node __always_unused *np) ++{ ++ return -ENODEV; ++} ++ ++static inline const void * ++of_get_mac_address(struct device_node __always_unused *np) ++{ ++ return NULL; ++} ++#endif ++ ++#else /* >= 3.10.0 */ ++#define HAVE_ENCAP_TSO_OFFLOAD ++#define USE_DEFAULT_FDB_DEL_DUMP ++#define HAVE_SKB_INNER_NETWORK_HEADER ++#if (RHEL_RELEASE_CODE && \ ++ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)) && \ ++ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(8,0))) ++#define HAVE_RHEL7_PCI_DRIVER_RH ++#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) ++#define HAVE_RHEL7_PCI_RESET_NOTIFY ++#endif /* RHEL >= 7.2 */ ++#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) ++#define HAVE_RHEL7_NET_DEVICE_OPS_EXT ++#define HAVE_GENEVE_RX_OFFLOAD ++#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_GENEVE) ++#define HAVE_UDP_ENC_TUNNEL ++#endif ++#ifdef ETHTOOL_GLINKSETTINGS ++#define HAVE_ETHTOOL_25G_BITS ++#endif /* ETHTOOL_GLINKSETTINGS */ ++#endif /* RHEL >= 7.3 */ ++ ++/* new hooks added to net_device_ops_extended in RHEL7.4 */ ++#if (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) ++#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_SET_VF_VLAN ++#define HAVE_RHEL7_NETDEV_OPS_EXT_NDO_UDP_TUNNEL ++#define HAVE_UDP_ENC_RX_OFFLOAD ++#endif /* RHEL >= 7.4 */ ++ ++#endif /* RHEL >= 7.0 && RHEL < 8.0 */ ++#endif /* >= 3.10.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0) ) ++#define netdev_notifier_info_to_dev(ptr) ptr ++#if ((RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,6)) ||\ ++ (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(11,4,0))) ++#define HAVE_NDO_SET_VF_LINK_STATE ++#endif ++#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) ++#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK ++#endif ++#else /* >= 3.11.0 */ ++#define HAVE_NDO_SET_VF_LINK_STATE ++#define HAVE_SKB_INNER_PROTOCOL ++#define HAVE_MPLS_FEATURES ++#endif /* >= 3.11.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,12,0) ) ++extern int __kc_pcie_get_minimum_link(struct pci_dev *dev, ++ enum pci_bus_speed *speed, ++ enum pcie_link_width *width); ++#ifndef pcie_get_minimum_link ++#define pcie_get_minimum_link(_p, _s, _w) __kc_pcie_get_minimum_link(_p, _s, _w) ++#endif ++#else /* >= 3.12.0 */ ++#if ( SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0)) ++#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK ++#endif ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) ) ++#define HAVE_VXLAN_RX_OFFLOAD ++#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_VXLAN) ++#define HAVE_UDP_ENC_TUNNEL ++#endif ++#endif /* < 4.8.0 */ ++#define HAVE_NDO_GET_PHYS_PORT_ID ++#endif /* >= 3.12.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0) ) ++#define dma_set_mask_and_coherent(_p, _m) __kc_dma_set_mask_and_coherent(_p, _m) ++extern int __kc_dma_set_mask_and_coherent(struct device *dev, u64 mask); ++#ifndef u64_stats_init ++#define u64_stats_init(a) do { } while(0) ++#endif ++#ifndef BIT_ULL ++#define BIT_ULL(n) (1ULL << (n)) ++#endif ++ ++#if (SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,1,0)) ++#undef HAVE_STRUCT_PAGE_PFMEMALLOC ++#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT ++#endif ++#ifndef list_next_entry ++#define list_next_entry(pos, member) \ ++ list_entry((pos)->member.next, typeof(*(pos)), member) ++#endif ++#ifndef list_prev_entry ++#define list_prev_entry(pos, member) \ ++ list_entry((pos)->member.prev, typeof(*(pos)), member) ++#endif ++ ++#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,6,20) ) ++#define devm_kcalloc(dev, cnt, size, flags) \ ++ devm_kzalloc(dev, cnt * size, flags) ++#endif /* > 2.6.20 */ ++ ++#else /* >= 3.13.0 */ ++#define HAVE_VXLAN_CHECKS ++#if (UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3,13,0,24)) ++#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK ++#else ++#define HAVE_NDO_SELECT_QUEUE_ACCEL ++#endif ++#define HAVE_NET_GET_RANDOM_ONCE ++#define HAVE_HWMON_DEVICE_REGISTER_WITH_GROUPS ++#endif ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0) ) ++ ++#ifndef U16_MAX ++#define U16_MAX ((u16)~0U) ++#endif ++ ++#ifndef U32_MAX ++#define U32_MAX ((u32)~0U) ++#endif ++ ++#define dev_consume_skb_any(x) dev_kfree_skb_any(x) ++ ++#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,0)) && \ ++ !(SLE_VERSION_CODE && SLE_VERSION_CODE >= SLE_VERSION(12,0,0))) ++ ++/* it isn't expected that this would be a #define unless we made it so */ ++#ifndef skb_set_hash ++ ++#define PKT_HASH_TYPE_NONE 0 ++#define PKT_HASH_TYPE_L2 1 ++#define PKT_HASH_TYPE_L3 2 ++#define PKT_HASH_TYPE_L4 3 ++ ++enum _kc_pkt_hash_types { ++ _KC_PKT_HASH_TYPE_NONE = PKT_HASH_TYPE_NONE, ++ _KC_PKT_HASH_TYPE_L2 = PKT_HASH_TYPE_L2, ++ _KC_PKT_HASH_TYPE_L3 = PKT_HASH_TYPE_L3, ++ _KC_PKT_HASH_TYPE_L4 = PKT_HASH_TYPE_L4, ++}; ++#define pkt_hash_types _kc_pkt_hash_types ++ ++#define skb_set_hash __kc_skb_set_hash ++static inline void __kc_skb_set_hash(struct sk_buff __maybe_unused *skb, ++ u32 __maybe_unused hash, ++ int __maybe_unused type) ++{ ++#ifdef HAVE_SKB_L4_RXHASH ++ skb->l4_rxhash = (type == PKT_HASH_TYPE_L4); ++#endif ++#ifdef NETIF_F_RXHASH ++ skb->rxhash = hash; ++#endif ++} ++#endif /* !skb_set_hash */ ++ ++#else /* RHEL_RELEASE_CODE >= 7.0 || SLE_VERSION_CODE >= 12.0 */ ++ ++#ifndef HAVE_VXLAN_RX_OFFLOAD ++#define HAVE_VXLAN_RX_OFFLOAD ++#endif /* HAVE_VXLAN_RX_OFFLOAD */ ++ ++#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_VXLAN) ++#define HAVE_UDP_ENC_TUNNEL ++#endif ++ ++#ifndef HAVE_VXLAN_CHECKS ++#define HAVE_VXLAN_CHECKS ++#endif /* HAVE_VXLAN_CHECKS */ ++#endif /* !(RHEL_RELEASE_CODE >= 7.0 && SLE_VERSION_CODE >= 12.0) */ ++ ++#ifndef pci_enable_msix_range ++extern int __kc_pci_enable_msix_range(struct pci_dev *dev, ++ struct msix_entry *entries, ++ int minvec, int maxvec); ++#define pci_enable_msix_range __kc_pci_enable_msix_range ++#endif ++ ++#ifndef ether_addr_copy ++#define ether_addr_copy __kc_ether_addr_copy ++static inline void __kc_ether_addr_copy(u8 *dst, const u8 *src) ++{ ++#if defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS) ++ *(u32 *)dst = *(const u32 *)src; ++ *(u16 *)(dst + 4) = *(const u16 *)(src + 4); ++#else ++ u16 *a = (u16 *)dst; ++ const u16 *b = (const u16 *)src; ++ ++ a[0] = b[0]; ++ a[1] = b[1]; ++ a[2] = b[2]; ++#endif ++} ++#endif /* ether_addr_copy */ ++ ++#else /* >= 3.14.0 */ ++ ++/* for ndo_dfwd_ ops add_station, del_station and _start_xmit */ ++#ifndef HAVE_NDO_DFWD_OPS ++#define HAVE_NDO_DFWD_OPS ++#endif ++#define HAVE_NDO_SELECT_QUEUE_ACCEL_FALLBACK ++#endif /* 3.14.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0) ) ++ ++#if (!(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) && \ ++ !(UBUNTU_VERSION_CODE && UBUNTU_VERSION_CODE >= UBUNTU_VERSION(3,13,0,30))) ++#define u64_stats_fetch_begin_irq u64_stats_fetch_begin_bh ++#define u64_stats_fetch_retry_irq u64_stats_fetch_retry_bh ++#endif ++ ++char *_kc_devm_kstrdup(struct device *dev, const char *s, gfp_t gfp); ++#define devm_kstrdup(dev, s, gfp) _kc_devm_kstrdup(dev, s, gfp) ++ ++#else ++#define HAVE_PTP_1588_CLOCK_PINS ++#define HAVE_NETDEV_PORT ++#endif /* 3.15.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0) ) ++#ifndef smp_mb__before_atomic ++#define smp_mb__before_atomic() smp_mb() ++#define smp_mb__after_atomic() smp_mb() ++#endif ++#ifndef __dev_uc_sync ++#ifdef HAVE_SET_RX_MODE ++#ifdef NETDEV_HW_ADDR_T_UNICAST ++int __kc_hw_addr_sync_dev(struct netdev_hw_addr_list *list, ++ struct net_device *dev, ++ int (*sync)(struct net_device *, const unsigned char *), ++ int (*unsync)(struct net_device *, const unsigned char *)); ++void __kc_hw_addr_unsync_dev(struct netdev_hw_addr_list *list, ++ struct net_device *dev, ++ int (*unsync)(struct net_device *, const unsigned char *)); ++#endif ++#ifndef NETDEV_HW_ADDR_T_MULTICAST ++int __kc_dev_addr_sync_dev(struct dev_addr_list **list, int *count, ++ struct net_device *dev, ++ int (*sync)(struct net_device *, const unsigned char *), ++ int (*unsync)(struct net_device *, const unsigned char *)); ++void __kc_dev_addr_unsync_dev(struct dev_addr_list **list, int *count, ++ struct net_device *dev, ++ int (*unsync)(struct net_device *, const unsigned char *)); ++#endif ++#endif /* HAVE_SET_RX_MODE */ ++ ++static inline int __kc_dev_uc_sync(struct net_device __maybe_unused *dev, ++ int __maybe_unused (*sync)(struct net_device *, const unsigned char *), ++ int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) ++{ ++#ifdef NETDEV_HW_ADDR_T_UNICAST ++ return __kc_hw_addr_sync_dev(&dev->uc, dev, sync, unsync); ++#elif defined(HAVE_SET_RX_MODE) ++ return __kc_dev_addr_sync_dev(&dev->uc_list, &dev->uc_count, ++ dev, sync, unsync); ++#else ++ return 0; ++#endif ++} ++#define __dev_uc_sync __kc_dev_uc_sync ++ ++static inline void __kc_dev_uc_unsync(struct net_device __maybe_unused *dev, ++ int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) ++{ ++#ifdef HAVE_SET_RX_MODE ++#ifdef NETDEV_HW_ADDR_T_UNICAST ++ __kc_hw_addr_unsync_dev(&dev->uc, dev, unsync); ++#else /* NETDEV_HW_ADDR_T_MULTICAST */ ++ __kc_dev_addr_unsync_dev(&dev->uc_list, &dev->uc_count, dev, unsync); ++#endif /* NETDEV_HW_ADDR_T_UNICAST */ ++#endif /* HAVE_SET_RX_MODE */ ++} ++#define __dev_uc_unsync __kc_dev_uc_unsync ++ ++static inline int __kc_dev_mc_sync(struct net_device __maybe_unused *dev, ++ int __maybe_unused (*sync)(struct net_device *, const unsigned char *), ++ int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) ++{ ++#ifdef NETDEV_HW_ADDR_T_MULTICAST ++ return __kc_hw_addr_sync_dev(&dev->mc, dev, sync, unsync); ++#elif defined(HAVE_SET_RX_MODE) ++ return __kc_dev_addr_sync_dev(&dev->mc_list, &dev->mc_count, ++ dev, sync, unsync); ++#else ++ return 0; ++#endif ++ ++} ++#define __dev_mc_sync __kc_dev_mc_sync ++ ++static inline void __kc_dev_mc_unsync(struct net_device __maybe_unused *dev, ++ int __maybe_unused (*unsync)(struct net_device *, const unsigned char *)) ++{ ++#ifdef HAVE_SET_RX_MODE ++#ifdef NETDEV_HW_ADDR_T_MULTICAST ++ __kc_hw_addr_unsync_dev(&dev->mc, dev, unsync); ++#else /* NETDEV_HW_ADDR_T_MULTICAST */ ++ __kc_dev_addr_unsync_dev(&dev->mc_list, &dev->mc_count, dev, unsync); ++#endif /* NETDEV_HW_ADDR_T_MULTICAST */ ++#endif /* HAVE_SET_RX_MODE */ ++} ++#define __dev_mc_unsync __kc_dev_mc_unsync ++#endif /* __dev_uc_sync */ ++ ++#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) ++#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE ++#endif ++ ++#ifndef NETIF_F_GSO_UDP_TUNNEL_CSUM ++/* if someone backports this, hopefully they backport as a #define. ++ * declare it as zero on older kernels so that if it get's or'd in ++ * it won't effect anything, therefore preventing core driver changes ++ */ ++#define NETIF_F_GSO_UDP_TUNNEL_CSUM 0 ++#define SKB_GSO_UDP_TUNNEL_CSUM 0 ++#endif ++extern void *__kc_devm_kmemdup(struct device *dev, const void *src, size_t len, ++ unsigned int gfp); ++#define devm_kmemdup __kc_devm_kmemdup ++ ++#else ++#define HAVE_PCI_ERROR_HANDLER_RESET_NOTIFY ++#define HAVE_NDO_SET_VF_MIN_MAX_TX_RATE ++#endif /* 3.16.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0) ) ++#if !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,8) && \ ++ RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)) && \ ++ !(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) ++#ifndef timespec64 ++#define timespec64 timespec ++static inline struct timespec64 timespec_to_timespec64(const struct timespec ts) ++{ ++ return ts; ++} ++static inline struct timespec timespec64_to_timespec(const struct timespec64 ts64) ++{ ++ return ts64; ++} ++#define timespec64_equal timespec_equal ++#define timespec64_compare timespec_compare ++#define set_normalized_timespec64 set_normalized_timespec ++#define timespec64_add_safe timespec_add_safe ++#define timespec64_add timespec_add ++#define timespec64_sub timespec_sub ++#define timespec64_valid timespec_valid ++#define timespec64_valid_strict timespec_valid_strict ++#define timespec64_to_ns timespec_to_ns ++#define ns_to_timespec64 ns_to_timespec ++#define ktime_to_timespec64 ktime_to_timespec ++#define timespec64_add_ns timespec_add_ns ++#endif /* timespec64 */ ++#endif /* !(RHEL6.8= RHEL_RELEASE_VERSION(7,4)) ++#define hlist_add_behind(_a, _b) hlist_add_after(_b, _a) ++#endif ++ ++#else ++#define HAVE_DCBNL_OPS_SETAPP_RETURN_INT ++#include ++#endif /* 3.17.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,0) ) ++#ifndef NO_PTP_SUPPORT ++#include ++extern struct sk_buff *__kc_skb_clone_sk_ixgbe(struct sk_buff *skb); ++extern void __kc_skb_complete_tx_timestamp_ixgbe(struct sk_buff *skb, ++ struct skb_shared_hwtstamps *hwtstamps); ++#define skb_clone_sk __kc_skb_clone_sk_ixgbe ++#define skb_complete_tx_timestamp __kc_skb_complete_tx_timestamp_ixgbe ++#endif ++extern unsigned int __kc_eth_get_headlen_ixgbe(unsigned char *data, unsigned int max_len); ++#define eth_get_headlen __kc_eth_get_headlen_ixgbe ++#ifndef ETH_P_XDSA ++#define ETH_P_XDSA 0x00F8 ++#endif ++/* RHEL 7.1 backported csum_level, but SLES 12 and 12-SP1 did not */ ++#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,1)) ++#define HAVE_SKBUFF_CSUM_LEVEL ++#endif /* >= RH 7.1 */ ++ ++#undef GENMASK ++#define GENMASK(h, l) \ ++ (((~0UL) << (l)) & (~0UL >> (BITS_PER_LONG - 1 - (h)))) ++#undef GENMASK_ULL ++#define GENMASK_ULL(h, l) \ ++ (((~0ULL) << (l)) & (~0ULL >> (BITS_PER_LONG_LONG - 1 - (h)))) ++ ++#else /* 3.18.0 */ ++#define HAVE_SKBUFF_CSUM_LEVEL ++#define HAVE_SKB_XMIT_MORE ++#define HAVE_SKB_INNER_PROTOCOL_TYPE ++#endif /* 3.18.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,18,4) ) ++#else ++#define HAVE_NDO_FEATURES_CHECK ++#endif /* 3.18.4 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0) ) ++/* netdev_phys_port_id renamed to netdev_phys_item_id */ ++#define netdev_phys_item_id netdev_phys_port_id ++ ++static inline void _kc_napi_complete_done(struct napi_struct *napi, ++ int __always_unused work_done) { ++ napi_complete(napi); ++} ++#define napi_complete_done _kc_napi_complete_done ++ ++#ifndef NETDEV_RSS_KEY_LEN ++#define NETDEV_RSS_KEY_LEN (13 * 4) ++#endif ++#if ( !(RHEL_RELEASE_CODE && \ ++ (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(6,7) && \ ++ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0)))) ) ++#define netdev_rss_key_fill(buffer, len) __kc_netdev_rss_key_fill_ixgbe(buffer, len) ++#endif /* RHEL_RELEASE_CODE */ ++extern void __kc_netdev_rss_key_fill_ixgbe(void *buffer, size_t len); ++#define SPEED_20000 20000 ++#define SPEED_40000 40000 ++#ifndef dma_rmb ++#define dma_rmb() rmb() ++#endif ++#ifndef dev_alloc_pages ++#define dev_alloc_pages(_order) alloc_pages_node(NUMA_NO_NODE, (GFP_ATOMIC | __GFP_COLD | __GFP_COMP | __GFP_MEMALLOC), (_order)) ++#endif ++#ifndef dev_alloc_page ++#define dev_alloc_page() dev_alloc_pages(0) ++#endif ++#if !defined(eth_skb_pad) && !defined(skb_put_padto) ++/** ++ * __kc_skb_put_padto - increase size and pad an skbuff up to a minimal size ++ * @skb: buffer to pad ++ * @len: minimal length ++ * ++ * Pads up a buffer to ensure the trailing bytes exist and are ++ * blanked. If the buffer already contains sufficient data it ++ * is untouched. Otherwise it is extended. Returns zero on ++ * success. The skb is freed on error. ++ */ ++static inline int __kc_skb_put_padto(struct sk_buff *skb, unsigned int len) ++{ ++ unsigned int size = skb->len; ++ ++ if (unlikely(size < len)) { ++ len -= size; ++ if (skb_pad(skb, len)) ++ return -ENOMEM; ++ __skb_put(skb, len); ++ } ++ return 0; ++} ++#define skb_put_padto(skb, len) __kc_skb_put_padto(skb, len) ++ ++static inline int __kc_eth_skb_pad(struct sk_buff *skb) ++{ ++ return __kc_skb_put_padto(skb, ETH_ZLEN); ++} ++#define eth_skb_pad(skb) __kc_eth_skb_pad(skb) ++#endif /* eth_skb_pad && skb_put_padto */ ++ ++#ifndef SKB_ALLOC_NAPI ++/* RHEL 7.2 backported napi_alloc_skb and friends */ ++static inline struct sk_buff *__kc_napi_alloc_skb(struct napi_struct *napi, unsigned int length) ++{ ++ return netdev_alloc_skb_ip_align(napi->dev, length); ++} ++#define napi_alloc_skb(napi,len) __kc_napi_alloc_skb(napi,len) ++#define __napi_alloc_skb(napi,len,mask) __kc_napi_alloc_skb(napi,len) ++#endif /* SKB_ALLOC_NAPI */ ++#define HAVE_CONFIG_PM_RUNTIME ++#if (RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(6,7)) && \ ++ (RHEL_RELEASE_CODE < RHEL_RELEASE_VERSION(7,0))) ++#define HAVE_RXFH_HASHFUNC ++#endif /* 6.7 < RHEL < 7.0 */ ++#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) ++#define HAVE_RXFH_HASHFUNC ++#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS ++#endif /* RHEL > 7.1 */ ++#ifndef napi_schedule_irqoff ++#define napi_schedule_irqoff napi_schedule ++#endif ++#ifndef READ_ONCE ++#define READ_ONCE(_x) ACCESS_ONCE(_x) ++#endif ++#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) ++#define HAVE_NDO_FDB_ADD_VID ++#endif ++#ifndef ETH_MODULE_SFF_8636 ++#define ETH_MODULE_SFF_8636 0x3 ++#endif ++#ifndef ETH_MODULE_SFF_8636_LEN ++#define ETH_MODULE_SFF_8636_LEN 256 ++#endif ++#ifndef ETH_MODULE_SFF_8436 ++#define ETH_MODULE_SFF_8436 0x4 ++#endif ++#ifndef ETH_MODULE_SFF_8436_LEN ++#define ETH_MODULE_SFF_8436_LEN 256 ++#endif ++#else /* 3.19.0 */ ++#define HAVE_NDO_FDB_ADD_VID ++#define HAVE_RXFH_HASHFUNC ++#define NDO_DFLT_BRIDGE_GETLINK_HAS_BRFLAGS ++#endif /* 3.19.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(3,20,0) ) ++/* vlan_tx_xx functions got renamed to skb_vlan */ ++#ifndef skb_vlan_tag_get ++#define skb_vlan_tag_get vlan_tx_tag_get ++#endif ++#ifndef skb_vlan_tag_present ++#define skb_vlan_tag_present vlan_tx_tag_present ++#endif ++#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,1)) ++#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H ++#endif ++#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) ++#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS ++#endif ++#else ++#define HAVE_INCLUDE_LINUX_TIMECOUNTER_H ++#define HAVE_NDO_BRIDGE_SET_DEL_LINK_FLAGS ++#endif /* 3.20.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,0,0) ) ++/* Definition for CONFIG_OF was introduced earlier */ ++#if !defined(CONFIG_OF) && \ ++ !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) ++static inline struct device_node * ++pci_device_to_OF_node(const struct pci_dev __always_unused *pdev) { return NULL; } ++#endif /* !CONFIG_OF && RHEL < 7.3 */ ++#endif /* < 4.0 */ ++ ++/*****************************************************************************/ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,1,0) ) ++#ifndef NO_PTP_SUPPORT ++#ifdef HAVE_INCLUDE_LINUX_TIMECOUNTER_H ++#include ++#else ++#include ++#endif ++static inline void __kc_timecounter_adjtime(struct timecounter *tc, s64 delta) ++{ ++ tc->nsec += delta; ++} ++ ++static inline struct net_device * ++of_find_net_device_by_node(struct device_node __always_unused *np) ++{ ++ return NULL; ++} ++ ++#define timecounter_adjtime __kc_timecounter_adjtime ++#endif ++#if ((RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2))) || \ ++ (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,2,0)))) ++#define HAVE_NDO_SET_VF_RSS_QUERY_EN ++#endif ++#if RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) ++#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS ++#endif ++#else /* >= 4,1,0 */ ++#define HAVE_PTP_CLOCK_INFO_GETTIME64 ++#define HAVE_NDO_BRIDGE_GETLINK_NLFLAGS ++#define HAVE_PASSTHRU_FEATURES_CHECK ++#define HAVE_NDO_SET_VF_RSS_QUERY_EN ++#endif /* 4,1,0 */ ++ ++/*****************************************************************************/ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,1,9)) ++#if (!(RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2)) && \ ++ !((SLE_VERSION_CODE == SLE_VERSION(11,3,0)) && \ ++ (SLE_LOCALVERSION_CODE >= SLE_LOCALVERSION(0,47,71))) && \ ++ !((SLE_VERSION_CODE == SLE_VERSION(11,4,0)) && \ ++ (SLE_LOCALVERSION_CODE >= SLE_LOCALVERSION(65,0,0))) && \ ++ !(SLE_VERSION_CODE >= SLE_VERSION(12,1,0))) ++static inline bool page_is_pfmemalloc(struct page __maybe_unused *page) ++{ ++#ifdef HAVE_STRUCT_PAGE_PFMEMALLOC ++ return page->pfmemalloc; ++#else ++ return false; ++#endif ++} ++#endif /* !RHEL7.2+ && !SLES11sp3(3.0.101-0.47.71+ update) && !SLES11sp4(3.0.101-65+ update) & !SLES12sp1+ */ ++#else ++#undef HAVE_STRUCT_PAGE_PFMEMALLOC ++#endif /* 4.1.9 */ ++ ++/*****************************************************************************/ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0)) ++#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,2)) && \ ++ !(SLE_VERSION_CODE >= SLE_VERSION(12,1,0))) ++#define ETHTOOL_RX_FLOW_SPEC_RING 0x00000000FFFFFFFFULL ++#define ETHTOOL_RX_FLOW_SPEC_RING_VF 0x000000FF00000000ULL ++#define ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF 32 ++static inline __u64 ethtool_get_flow_spec_ring(__u64 ring_cookie) ++{ ++ return ETHTOOL_RX_FLOW_SPEC_RING & ring_cookie; ++}; ++ ++static inline __u64 ethtool_get_flow_spec_ring_vf(__u64 ring_cookie) ++{ ++ return (ETHTOOL_RX_FLOW_SPEC_RING_VF & ring_cookie) >> ++ ETHTOOL_RX_FLOW_SPEC_RING_VF_OFF; ++}; ++#endif /* ! RHEL >= 7.2 && ! SLES >= 12.1 */ ++#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) ++#define HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT ++#endif ++#else ++#define HAVE_NDO_DFLT_BRIDGE_GETLINK_VLAN_SUPPORT ++#endif /* 4.2.0 */ ++ ++/*****************************************************************************/ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,4,0)) ++#if (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3)) ++#define HAVE_NDO_SET_VF_TRUST ++#endif /* (RHEL_RELEASE >= 7.3) */ ++#ifndef CONFIG_64BIT ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0)) ++#include /* 32-bit readq/writeq */ ++#else /* 3.3.0 => 4.3.x */ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)) ++#include ++#endif /* 2.6.26 => 3.3.0 */ ++#ifndef readq ++static inline __u64 readq(const volatile void __iomem *addr) ++{ ++ const volatile u32 __iomem *p = addr; ++ u32 low, high; ++ ++ low = readl(p); ++ high = readl(p + 1); ++ ++ return low + ((u64)high << 32); ++} ++#define readq readq ++#endif ++ ++#ifndef writeq ++static inline void writeq(__u64 val, volatile void __iomem *addr) ++{ ++ writel(val, addr); ++ writel(val >> 32, addr + 4); ++} ++#define writeq writeq ++#endif ++#endif /* < 3.3.0 */ ++#endif /* !CONFIG_64BIT */ ++#else /* < 4.4.0 */ ++#define HAVE_NDO_SET_VF_TRUST ++ ++#ifndef CONFIG_64BIT ++#include /* 32-bit readq/writeq */ ++#endif /* !CONFIG_64BIT */ ++#endif /* 4.4.0 */ ++ ++/*****************************************************************************/ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0)) ++/* protect against a likely backport */ ++#ifndef NETIF_F_CSUM_MASK ++#define NETIF_F_CSUM_MASK NETIF_F_ALL_CSUM ++#endif /* NETIF_F_CSUM_MASK */ ++#ifndef NETIF_F_SCTP_CRC ++#define NETIF_F_SCTP_CRC NETIF_F_SCTP_CSUM ++#endif /* NETIF_F_SCTP_CRC */ ++#if (!(RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,3))) ++#define eth_platform_get_mac_address _kc_eth_platform_get_mac_address ++extern int _kc_eth_platform_get_mac_address(struct device *dev __maybe_unused, ++ u8 *mac_addr __maybe_unused); ++#endif /* !(RHEL_RELEASE >= 7.3) */ ++#else /* 4.5.0 */ ++#if ( LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0) ) ++#define HAVE_GENEVE_RX_OFFLOAD ++#if !defined(HAVE_UDP_ENC_TUNNEL) && IS_ENABLED(CONFIG_GENEVE) ++#define HAVE_UDP_ENC_TUNNEL ++#endif ++#endif /* < 4.8.0 */ ++#define HAVE_NETIF_NAPI_ADD_CALLS_NAPI_HASH_ADD ++#endif /* 4.5.0 */ ++ ++/*****************************************************************************/ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0)) ++#if !(UBUNTU_VERSION_CODE && \ ++ UBUNTU_VERSION_CODE >= UBUNTU_VERSION(4,4,0,21)) && \ ++ !(RHEL_RELEASE_CODE && \ ++ (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))) && \ ++ !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) ++static inline void napi_consume_skb(struct sk_buff *skb, ++ int __always_unused budget) ++{ ++ dev_consume_skb_any(skb); ++} ++ ++#endif /* UBUNTU 4,4,0,21, RHEL 7.2, SLES12 SP3 */ ++#if !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) && \ ++ !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) ++static inline void csum_replace_by_diff(__sum16 *sum, __wsum diff) ++{ ++ * sum = csum_fold(csum_add(diff, ~csum_unfold(*sum))); ++} ++#endif ++ ++#if !(RHEL_RELEASE_CODE && (RHEL_RELEASE_CODE > RHEL_RELEASE_VERSION(7,2))) ++static inline void page_ref_inc(struct page *page) ++{ ++ get_page(page); ++} ++#else ++#define HAVE_PAGE_COUNT_BULK_UPDATE ++#endif ++ ++#else /* 4.6.0 */ ++#define HAVE_PAGE_COUNT_BULK_UPDATE ++#endif /* 4.6.0 */ ++ ++/*****************************************************************************/ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)) ++#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) ||\ ++ (RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) ++#define HAVE_NETIF_TRANS_UPDATE ++#endif ++#else /* 4.7.0 */ ++#define HAVE_NETIF_TRANS_UPDATE ++#define HAVE_ETHTOOL_CONVERT_U32_AND_LINK_MODE ++#ifdef ETHTOOL_GLINKSETTINGS ++#define HAVE_ETHTOOL_25G_BITS ++#endif /* ETHTOOL_GLINKSETTINGS */ ++#endif /* 4.7.0 */ ++ ++/*****************************************************************************/ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0)) ++#if !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) ++enum udp_parsable_tunnel_type { ++ UDP_TUNNEL_TYPE_VXLAN, ++ UDP_TUNNEL_TYPE_GENEVE, ++}; ++struct udp_tunnel_info { ++ unsigned short type; ++ sa_family_t sa_family; ++ __be16 port; ++}; ++#endif ++ ++#if !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) &&\ ++ !(RHEL_RELEASE_CODE && RHEL_RELEASE_CODE >= RHEL_RELEASE_VERSION(7,4)) ++static inline int ++#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME ++pci_request_io_regions(struct pci_dev *pdev, char *name) ++#else ++pci_request_io_regions(struct pci_dev *pdev, const char *name) ++#endif ++{ ++ return pci_request_selected_regions(pdev, ++ pci_select_bars(pdev, IORESOURCE_IO), name); ++} ++ ++static inline void ++pci_release_io_regions(struct pci_dev *pdev) ++{ ++ return pci_release_selected_regions(pdev, ++ pci_select_bars(pdev, IORESOURCE_IO)); ++} ++ ++static inline int ++#ifdef HAVE_NON_CONST_PCI_DRIVER_NAME ++pci_request_mem_regions(struct pci_dev *pdev, char *name) ++#else ++pci_request_mem_regions(struct pci_dev *pdev, const char *name) ++#endif ++{ ++ return pci_request_selected_regions(pdev, ++ pci_select_bars(pdev, IORESOURCE_MEM), name); ++} ++ ++static inline void ++pci_release_mem_regions(struct pci_dev *pdev) ++{ ++ return pci_release_selected_regions(pdev, ++ pci_select_bars(pdev, IORESOURCE_MEM)); ++} ++#endif /* !SLE_VERSION(12,3,0) */ ++#else ++#define HAVE_UDP_ENC_RX_OFFLOAD ++#endif /* 4.8.0 */ ++ ++/*****************************************************************************/ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,9,0)) ++#else ++#endif /* 4.9.0 */ ++ ++/*****************************************************************************/ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)) ++#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) ++#define HAVE_STRUCT_DMA_ATTRS ++#define HAVE_NETDEVICE_MIN_MAX_MTU ++#endif ++ ++#if !(SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) ++#ifndef dma_map_page_attrs ++#define dma_map_page_attrs __kc_dma_map_page_attrs ++static inline dma_addr_t __kc_dma_map_page_attrs(struct device *dev, ++ struct page *page, ++ size_t offset, size_t size, ++ enum dma_data_direction dir, ++ unsigned long __always_unused attrs) ++{ ++ return dma_map_page(dev, page, offset, size, dir); ++} ++#endif ++ ++#ifndef dma_unmap_page_attrs ++#define dma_unmap_page_attrs __kc_dma_unmap_page_attrs ++static inline void __kc_dma_unmap_page_attrs(struct device *dev, ++ dma_addr_t addr, size_t size, ++ enum dma_data_direction dir, ++ unsigned long __always_unused attrs) ++{ ++ dma_unmap_page(dev, addr, size, dir); ++} ++#endif ++ ++static inline void __page_frag_cache_drain(struct page *page, ++ unsigned int count) ++{ ++#ifdef HAVE_PAGE_COUNT_BULK_UPDATE ++ if (!page_ref_sub_and_test(page, count)) ++ return; ++ ++ init_page_count(page); ++#else ++ BUG_ON(count > 1); ++ if (!count) ++ return; ++#endif ++ __free_pages(page, compound_order(page)); ++} ++#endif /* !SLE_VERSION(12,3,0) */ ++#ifndef ETH_MIN_MTU ++#define ETH_MIN_MTU 68 ++#endif /* ETH_MIN_MTU */ ++#else ++#define HAVE_NETDEVICE_MIN_MAX_MTU ++#define HAVE_SWIOTLB_SKIP_CPU_SYNC ++#define HAVE_NETDEV_TC_RESETS_XPS ++#endif /* 4.10.0 */ ++ ++/*****************************************************************************/ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0)) ++#ifdef CONFIG_NET_RX_BUSY_POLL ++#define HAVE_NDO_BUSY_POLL ++#if (SLE_VERSION_CODE && (SLE_VERSION_CODE >= SLE_VERSION(12,3,0))) ++#define HAVE_VOID_NDO_GET_STATS64 ++#endif ++#endif ++#else /* > 4.11 */ ++#define HAVE_VOID_NDO_GET_STATS64 ++#endif /* 4.11.0 */ ++ ++/*****************************************************************************/ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,13,0)) ++#else /* > 4.13 */ ++#define HAVE_HWTSTAMP_FILTER_NTP_ALL ++#endif /* 4.13.0 */ ++ ++#endif /* _KCOMPAT_H_ */ +diff --git a/drivers/net/ethernet/intel/ixgbe/kcompat_ethtool.c b/drivers/net/ethernet/intel/ixgbe/kcompat_ethtool.c +new file mode 100644 +index 0000000..16fbd74 +--- /dev/null ++++ b/drivers/net/ethernet/intel/ixgbe/kcompat_ethtool.c +@@ -0,0 +1,1169 @@ ++/******************************************************************************* ++ ++ Intel(R) 10GbE PCI Express Linux Network Driver ++ Copyright(c) 1999 - 2017 Intel Corporation. ++ ++ This program is free software; you can redistribute it and/or modify it ++ under the terms and conditions of the GNU General Public License, ++ version 2, as published by the Free Software Foundation. ++ ++ This program is distributed in the hope it will be useful, but WITHOUT ++ ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or ++ FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for ++ more details. ++ ++ The full GNU General Public License is included in this distribution in ++ the file called "COPYING". ++ ++ Contact Information: ++ Linux NICS ++ e1000-devel Mailing List ++ Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 ++ ++*******************************************************************************/ ++ ++/* ++ * net/core/ethtool.c - Ethtool ioctl handler ++ * Copyright (c) 2003 Matthew Wilcox ++ * ++ * This file is where we call all the ethtool_ops commands to get ++ * the information ethtool needs. We fall back to calling do_ioctl() ++ * for drivers which haven't been converted to ethtool_ops yet. ++ * ++ * It's GPL, stupid. ++ * ++ * Modification by sfeldma@pobox.com to work as backward compat ++ * solution for pre-ethtool_ops kernels. ++ * - copied struct ethtool_ops from ethtool.h ++ * - defined SET_ETHTOOL_OPS ++ * - put in some #ifndef NETIF_F_xxx wrappers ++ * - changes refs to dev->ethtool_ops to ethtool_ops ++ * - changed dev_ethtool to ethtool_ioctl ++ * - remove EXPORT_SYMBOL()s ++ * - added _kc_ prefix in built-in ethtool_op_xxx ops. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "kcompat.h" ++ ++#undef SUPPORTED_10000baseT_Full ++#define SUPPORTED_10000baseT_Full (1 << 12) ++#undef ADVERTISED_10000baseT_Full ++#define ADVERTISED_10000baseT_Full (1 << 12) ++#undef SPEED_10000 ++#define SPEED_10000 10000 ++ ++#undef ethtool_ops ++#define ethtool_ops _kc_ethtool_ops ++ ++struct _kc_ethtool_ops { ++ int (*get_settings)(struct net_device *, struct ethtool_cmd *); ++ int (*set_settings)(struct net_device *, struct ethtool_cmd *); ++ void (*get_drvinfo)(struct net_device *, struct ethtool_drvinfo *); ++ int (*get_regs_len)(struct net_device *); ++ void (*get_regs)(struct net_device *, struct ethtool_regs *, void *); ++ void (*get_wol)(struct net_device *, struct ethtool_wolinfo *); ++ int (*set_wol)(struct net_device *, struct ethtool_wolinfo *); ++ u32 (*get_msglevel)(struct net_device *); ++ void (*set_msglevel)(struct net_device *, u32); ++ int (*nway_reset)(struct net_device *); ++ u32 (*get_link)(struct net_device *); ++ int (*get_eeprom_len)(struct net_device *); ++ int (*get_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); ++ int (*set_eeprom)(struct net_device *, struct ethtool_eeprom *, u8 *); ++ int (*get_coalesce)(struct net_device *, struct ethtool_coalesce *); ++ int (*set_coalesce)(struct net_device *, struct ethtool_coalesce *); ++ void (*get_ringparam)(struct net_device *, struct ethtool_ringparam *); ++ int (*set_ringparam)(struct net_device *, struct ethtool_ringparam *); ++ void (*get_pauseparam)(struct net_device *, ++ struct ethtool_pauseparam*); ++ int (*set_pauseparam)(struct net_device *, ++ struct ethtool_pauseparam*); ++ u32 (*get_rx_csum)(struct net_device *); ++ int (*set_rx_csum)(struct net_device *, u32); ++ u32 (*get_tx_csum)(struct net_device *); ++ int (*set_tx_csum)(struct net_device *, u32); ++ u32 (*get_sg)(struct net_device *); ++ int (*set_sg)(struct net_device *, u32); ++ u32 (*get_tso)(struct net_device *); ++ int (*set_tso)(struct net_device *, u32); ++ int (*self_test_count)(struct net_device *); ++ void (*self_test)(struct net_device *, struct ethtool_test *, u64 *); ++ void (*get_strings)(struct net_device *, u32 stringset, u8 *); ++ int (*phys_id)(struct net_device *, u32); ++ int (*get_stats_count)(struct net_device *); ++ void (*get_ethtool_stats)(struct net_device *, struct ethtool_stats *, ++ u64 *); ++} *ethtool_ops = NULL; ++ ++#undef SET_ETHTOOL_OPS ++#define SET_ETHTOOL_OPS(netdev, ops) (ethtool_ops = (ops)) ++ ++/* ++ * Some useful ethtool_ops methods that are device independent. If we find that ++ * all drivers want to do the same thing here, we can turn these into dev_() ++ * function calls. ++ */ ++ ++#undef ethtool_op_get_link ++#define ethtool_op_get_link _kc_ethtool_op_get_link ++u32 _kc_ethtool_op_get_link(struct net_device *dev) ++{ ++ return netif_carrier_ok(dev) ? 1 : 0; ++} ++ ++#undef ethtool_op_get_tx_csum ++#define ethtool_op_get_tx_csum _kc_ethtool_op_get_tx_csum ++u32 _kc_ethtool_op_get_tx_csum(struct net_device *dev) ++{ ++#ifdef NETIF_F_IP_CSUM ++ return (dev->features & NETIF_F_IP_CSUM) != 0; ++#else ++ return 0; ++#endif ++} ++ ++#undef ethtool_op_set_tx_csum ++#define ethtool_op_set_tx_csum _kc_ethtool_op_set_tx_csum ++int _kc_ethtool_op_set_tx_csum(struct net_device *dev, u32 data) ++{ ++#ifdef NETIF_F_IP_CSUM ++ if (data) ++#ifdef NETIF_F_IPV6_CSUM ++ dev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); ++ else ++ dev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); ++#else ++ dev->features |= NETIF_F_IP_CSUM; ++ else ++ dev->features &= ~NETIF_F_IP_CSUM; ++#endif ++#endif ++ ++ return 0; ++} ++ ++#undef ethtool_op_get_sg ++#define ethtool_op_get_sg _kc_ethtool_op_get_sg ++u32 _kc_ethtool_op_get_sg(struct net_device *dev) ++{ ++#ifdef NETIF_F_SG ++ return (dev->features & NETIF_F_SG) != 0; ++#else ++ return 0; ++#endif ++} ++ ++#undef ethtool_op_set_sg ++#define ethtool_op_set_sg _kc_ethtool_op_set_sg ++int _kc_ethtool_op_set_sg(struct net_device *dev, u32 data) ++{ ++#ifdef NETIF_F_SG ++ if (data) ++ dev->features |= NETIF_F_SG; ++ else ++ dev->features &= ~NETIF_F_SG; ++#endif ++ ++ return 0; ++} ++ ++#undef ethtool_op_get_tso ++#define ethtool_op_get_tso _kc_ethtool_op_get_tso ++u32 _kc_ethtool_op_get_tso(struct net_device *dev) ++{ ++#ifdef NETIF_F_TSO ++ return (dev->features & NETIF_F_TSO) != 0; ++#else ++ return 0; ++#endif ++} ++ ++#undef ethtool_op_set_tso ++#define ethtool_op_set_tso _kc_ethtool_op_set_tso ++int _kc_ethtool_op_set_tso(struct net_device *dev, u32 data) ++{ ++#ifdef NETIF_F_TSO ++ if (data) ++ dev->features |= NETIF_F_TSO; ++ else ++ dev->features &= ~NETIF_F_TSO; ++#endif ++ ++ return 0; ++} ++ ++/* Handlers for each ethtool command */ ++ ++static int ethtool_get_settings(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_cmd cmd = { ETHTOOL_GSET }; ++ int err; ++ ++ if (!ethtool_ops->get_settings) ++ return -EOPNOTSUPP; ++ ++ err = ethtool_ops->get_settings(dev, &cmd); ++ if (err < 0) ++ return err; ++ ++ if (copy_to_user(useraddr, &cmd, sizeof(cmd))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_set_settings(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_cmd cmd; ++ ++ if (!ethtool_ops->set_settings) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&cmd, useraddr, sizeof(cmd))) ++ return -EFAULT; ++ ++ return ethtool_ops->set_settings(dev, &cmd); ++} ++ ++static int ethtool_get_drvinfo(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_drvinfo info; ++ struct ethtool_ops *ops = ethtool_ops; ++ ++ if (!ops->get_drvinfo) ++ return -EOPNOTSUPP; ++ ++ memset(&info, 0, sizeof(info)); ++ info.cmd = ETHTOOL_GDRVINFO; ++ ops->get_drvinfo(dev, &info); ++ ++ if (ops->self_test_count) ++ info.testinfo_len = ops->self_test_count(dev); ++ if (ops->get_stats_count) ++ info.n_stats = ops->get_stats_count(dev); ++ if (ops->get_regs_len) ++ info.regdump_len = ops->get_regs_len(dev); ++ if (ops->get_eeprom_len) ++ info.eedump_len = ops->get_eeprom_len(dev); ++ ++ if (copy_to_user(useraddr, &info, sizeof(info))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_get_regs(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_regs regs; ++ struct ethtool_ops *ops = ethtool_ops; ++ void *regbuf; ++ int reglen, ret; ++ ++ if (!ops->get_regs || !ops->get_regs_len) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(®s, useraddr, sizeof(regs))) ++ return -EFAULT; ++ ++ reglen = ops->get_regs_len(dev); ++ if (regs.len > reglen) ++ regs.len = reglen; ++ ++ regbuf = kmalloc(reglen, GFP_USER); ++ if (!regbuf) ++ return -ENOMEM; ++ ++ ops->get_regs(dev, ®s, regbuf); ++ ++ ret = -EFAULT; ++ if (copy_to_user(useraddr, ®s, sizeof(regs))) ++ goto out; ++ useraddr += offsetof(struct ethtool_regs, data); ++ if (copy_to_user(useraddr, regbuf, reglen)) ++ goto out; ++ ret = 0; ++ ++out: ++ kfree(regbuf); ++ return ret; ++} ++ ++static int ethtool_get_wol(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_wolinfo wol = { ETHTOOL_GWOL }; ++ ++ if (!ethtool_ops->get_wol) ++ return -EOPNOTSUPP; ++ ++ ethtool_ops->get_wol(dev, &wol); ++ ++ if (copy_to_user(useraddr, &wol, sizeof(wol))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_set_wol(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_wolinfo wol; ++ ++ if (!ethtool_ops->set_wol) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&wol, useraddr, sizeof(wol))) ++ return -EFAULT; ++ ++ return ethtool_ops->set_wol(dev, &wol); ++} ++ ++static int ethtool_get_msglevel(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_value edata = { ETHTOOL_GMSGLVL }; ++ ++ if (!ethtool_ops->get_msglevel) ++ return -EOPNOTSUPP; ++ ++ edata.data = ethtool_ops->get_msglevel(dev); ++ ++ if (copy_to_user(useraddr, &edata, sizeof(edata))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_set_msglevel(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_value edata; ++ ++ if (!ethtool_ops->set_msglevel) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&edata, useraddr, sizeof(edata))) ++ return -EFAULT; ++ ++ ethtool_ops->set_msglevel(dev, edata.data); ++ return 0; ++} ++ ++static int ethtool_nway_reset(struct net_device *dev) ++{ ++ if (!ethtool_ops->nway_reset) ++ return -EOPNOTSUPP; ++ ++ return ethtool_ops->nway_reset(dev); ++} ++ ++static int ethtool_get_link(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_value edata = { ETHTOOL_GLINK }; ++ ++ if (!ethtool_ops->get_link) ++ return -EOPNOTSUPP; ++ ++ edata.data = ethtool_ops->get_link(dev); ++ ++ if (copy_to_user(useraddr, &edata, sizeof(edata))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_get_eeprom(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_eeprom eeprom; ++ struct ethtool_ops *ops = ethtool_ops; ++ u8 *data; ++ int ret; ++ ++ if (!ops->get_eeprom || !ops->get_eeprom_len) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) ++ return -EFAULT; ++ ++ /* Check for wrap and zero */ ++ if (eeprom.offset + eeprom.len <= eeprom.offset) ++ return -EINVAL; ++ ++ /* Check for exceeding total eeprom len */ ++ if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev)) ++ return -EINVAL; ++ ++ data = kmalloc(eeprom.len, GFP_USER); ++ if (!data) ++ return -ENOMEM; ++ ++ ret = -EFAULT; ++ if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len)) ++ goto out; ++ ++ ret = ops->get_eeprom(dev, &eeprom, data); ++ if (ret) ++ goto out; ++ ++ ret = -EFAULT; ++ if (copy_to_user(useraddr, &eeprom, sizeof(eeprom))) ++ goto out; ++ if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len)) ++ goto out; ++ ret = 0; ++ ++out: ++ kfree(data); ++ return ret; ++} ++ ++static int ethtool_set_eeprom(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_eeprom eeprom; ++ struct ethtool_ops *ops = ethtool_ops; ++ u8 *data; ++ int ret; ++ ++ if (!ops->set_eeprom || !ops->get_eeprom_len) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&eeprom, useraddr, sizeof(eeprom))) ++ return -EFAULT; ++ ++ /* Check for wrap and zero */ ++ if (eeprom.offset + eeprom.len <= eeprom.offset) ++ return -EINVAL; ++ ++ /* Check for exceeding total eeprom len */ ++ if (eeprom.offset + eeprom.len > ops->get_eeprom_len(dev)) ++ return -EINVAL; ++ ++ data = kmalloc(eeprom.len, GFP_USER); ++ if (!data) ++ return -ENOMEM; ++ ++ ret = -EFAULT; ++ if (copy_from_user(data, useraddr + sizeof(eeprom), eeprom.len)) ++ goto out; ++ ++ ret = ops->set_eeprom(dev, &eeprom, data); ++ if (ret) ++ goto out; ++ ++ if (copy_to_user(useraddr + sizeof(eeprom), data, eeprom.len)) ++ ret = -EFAULT; ++ ++out: ++ kfree(data); ++ return ret; ++} ++ ++static int ethtool_get_coalesce(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_coalesce coalesce = { ETHTOOL_GCOALESCE }; ++ ++ if (!ethtool_ops->get_coalesce) ++ return -EOPNOTSUPP; ++ ++ ethtool_ops->get_coalesce(dev, &coalesce); ++ ++ if (copy_to_user(useraddr, &coalesce, sizeof(coalesce))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_set_coalesce(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_coalesce coalesce; ++ ++ if (!ethtool_ops->get_coalesce) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&coalesce, useraddr, sizeof(coalesce))) ++ return -EFAULT; ++ ++ return ethtool_ops->set_coalesce(dev, &coalesce); ++} ++ ++static int ethtool_get_ringparam(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_ringparam ringparam = { ETHTOOL_GRINGPARAM }; ++ ++ if (!ethtool_ops->get_ringparam) ++ return -EOPNOTSUPP; ++ ++ ethtool_ops->get_ringparam(dev, &ringparam); ++ ++ if (copy_to_user(useraddr, &ringparam, sizeof(ringparam))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_set_ringparam(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_ringparam ringparam; ++ ++ if (!ethtool_ops->get_ringparam) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&ringparam, useraddr, sizeof(ringparam))) ++ return -EFAULT; ++ ++ return ethtool_ops->set_ringparam(dev, &ringparam); ++} ++ ++static int ethtool_get_pauseparam(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_pauseparam pauseparam = { ETHTOOL_GPAUSEPARAM }; ++ ++ if (!ethtool_ops->get_pauseparam) ++ return -EOPNOTSUPP; ++ ++ ethtool_ops->get_pauseparam(dev, &pauseparam); ++ ++ if (copy_to_user(useraddr, &pauseparam, sizeof(pauseparam))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_set_pauseparam(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_pauseparam pauseparam; ++ ++ if (!ethtool_ops->get_pauseparam) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&pauseparam, useraddr, sizeof(pauseparam))) ++ return -EFAULT; ++ ++ return ethtool_ops->set_pauseparam(dev, &pauseparam); ++} ++ ++static int ethtool_get_rx_csum(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_value edata = { ETHTOOL_GRXCSUM }; ++ ++ if (!ethtool_ops->get_rx_csum) ++ return -EOPNOTSUPP; ++ ++ edata.data = ethtool_ops->get_rx_csum(dev); ++ ++ if (copy_to_user(useraddr, &edata, sizeof(edata))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_set_rx_csum(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_value edata; ++ ++ if (!ethtool_ops->set_rx_csum) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&edata, useraddr, sizeof(edata))) ++ return -EFAULT; ++ ++ ethtool_ops->set_rx_csum(dev, edata.data); ++ return 0; ++} ++ ++static int ethtool_get_tx_csum(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_value edata = { ETHTOOL_GTXCSUM }; ++ ++ if (!ethtool_ops->get_tx_csum) ++ return -EOPNOTSUPP; ++ ++ edata.data = ethtool_ops->get_tx_csum(dev); ++ ++ if (copy_to_user(useraddr, &edata, sizeof(edata))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_set_tx_csum(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_value edata; ++ ++ if (!ethtool_ops->set_tx_csum) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&edata, useraddr, sizeof(edata))) ++ return -EFAULT; ++ ++ return ethtool_ops->set_tx_csum(dev, edata.data); ++} ++ ++static int ethtool_get_sg(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_value edata = { ETHTOOL_GSG }; ++ ++ if (!ethtool_ops->get_sg) ++ return -EOPNOTSUPP; ++ ++ edata.data = ethtool_ops->get_sg(dev); ++ ++ if (copy_to_user(useraddr, &edata, sizeof(edata))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_set_sg(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_value edata; ++ ++ if (!ethtool_ops->set_sg) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&edata, useraddr, sizeof(edata))) ++ return -EFAULT; ++ ++ return ethtool_ops->set_sg(dev, edata.data); ++} ++ ++static int ethtool_get_tso(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_value edata = { ETHTOOL_GTSO }; ++ ++ if (!ethtool_ops->get_tso) ++ return -EOPNOTSUPP; ++ ++ edata.data = ethtool_ops->get_tso(dev); ++ ++ if (copy_to_user(useraddr, &edata, sizeof(edata))) ++ return -EFAULT; ++ return 0; ++} ++ ++static int ethtool_set_tso(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_value edata; ++ ++ if (!ethtool_ops->set_tso) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&edata, useraddr, sizeof(edata))) ++ return -EFAULT; ++ ++ return ethtool_ops->set_tso(dev, edata.data); ++} ++ ++static int ethtool_self_test(struct net_device *dev, char *useraddr) ++{ ++ struct ethtool_test test; ++ struct ethtool_ops *ops = ethtool_ops; ++ u64 *data; ++ int ret; ++ ++ if (!ops->self_test || !ops->self_test_count) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&test, useraddr, sizeof(test))) ++ return -EFAULT; ++ ++ test.len = ops->self_test_count(dev); ++ data = kmalloc(test.len * sizeof(u64), GFP_USER); ++ if (!data) ++ return -ENOMEM; ++ ++ ops->self_test(dev, &test, data); ++ ++ ret = -EFAULT; ++ if (copy_to_user(useraddr, &test, sizeof(test))) ++ goto out; ++ useraddr += sizeof(test); ++ if (copy_to_user(useraddr, data, test.len * sizeof(u64))) ++ goto out; ++ ret = 0; ++ ++out: ++ kfree(data); ++ return ret; ++} ++ ++static int ethtool_get_strings(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_gstrings gstrings; ++ struct ethtool_ops *ops = ethtool_ops; ++ u8 *data; ++ int ret; ++ ++ if (!ops->get_strings) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&gstrings, useraddr, sizeof(gstrings))) ++ return -EFAULT; ++ ++ switch (gstrings.string_set) { ++ case ETH_SS_TEST: ++ if (!ops->self_test_count) ++ return -EOPNOTSUPP; ++ gstrings.len = ops->self_test_count(dev); ++ break; ++ case ETH_SS_STATS: ++ if (!ops->get_stats_count) ++ return -EOPNOTSUPP; ++ gstrings.len = ops->get_stats_count(dev); ++ break; ++ default: ++ return -EINVAL; ++ } ++ ++ data = kmalloc(gstrings.len * ETH_GSTRING_LEN, GFP_USER); ++ if (!data) ++ return -ENOMEM; ++ ++ ops->get_strings(dev, gstrings.string_set, data); ++ ++ ret = -EFAULT; ++ if (copy_to_user(useraddr, &gstrings, sizeof(gstrings))) ++ goto out; ++ useraddr += sizeof(gstrings); ++ if (copy_to_user(useraddr, data, gstrings.len * ETH_GSTRING_LEN)) ++ goto out; ++ ret = 0; ++ ++out: ++ kfree(data); ++ return ret; ++} ++ ++static int ethtool_phys_id(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_value id; ++ ++ if (!ethtool_ops->phys_id) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&id, useraddr, sizeof(id))) ++ return -EFAULT; ++ ++ return ethtool_ops->phys_id(dev, id.data); ++} ++ ++static int ethtool_get_stats(struct net_device *dev, void *useraddr) ++{ ++ struct ethtool_stats stats; ++ struct ethtool_ops *ops = ethtool_ops; ++ u64 *data; ++ int ret; ++ ++ if (!ops->get_ethtool_stats || !ops->get_stats_count) ++ return -EOPNOTSUPP; ++ ++ if (copy_from_user(&stats, useraddr, sizeof(stats))) ++ return -EFAULT; ++ ++ stats.n_stats = ops->get_stats_count(dev); ++ data = kmalloc(stats.n_stats * sizeof(u64), GFP_USER); ++ if (!data) ++ return -ENOMEM; ++ ++ ops->get_ethtool_stats(dev, &stats, data); ++ ++ ret = -EFAULT; ++ if (copy_to_user(useraddr, &stats, sizeof(stats))) ++ goto out; ++ useraddr += sizeof(stats); ++ if (copy_to_user(useraddr, data, stats.n_stats * sizeof(u64))) ++ goto out; ++ ret = 0; ++ ++out: ++ kfree(data); ++ return ret; ++} ++ ++/* The main entry point in this file. Called from net/core/dev.c */ ++ ++#define ETHTOOL_OPS_COMPAT ++int ethtool_ioctl(struct ifreq *ifr) ++{ ++ struct net_device *dev = __dev_get_by_name(ifr->ifr_name); ++ void *useraddr = (void *) ifr->ifr_data; ++ u32 ethcmd; ++ ++ /* ++ * XXX: This can be pushed down into the ethtool_* handlers that ++ * need it. Keep existing behavior for the moment. ++ */ ++ if (!capable(CAP_NET_ADMIN)) ++ return -EPERM; ++ ++ if (!dev || !netif_device_present(dev)) ++ return -ENODEV; ++ ++ if (copy_from_user(ðcmd, useraddr, sizeof (ethcmd))) ++ return -EFAULT; ++ ++ switch (ethcmd) { ++ case ETHTOOL_GSET: ++ return ethtool_get_settings(dev, useraddr); ++ case ETHTOOL_SSET: ++ return ethtool_set_settings(dev, useraddr); ++ case ETHTOOL_GDRVINFO: ++ return ethtool_get_drvinfo(dev, useraddr); ++ case ETHTOOL_GREGS: ++ return ethtool_get_regs(dev, useraddr); ++ case ETHTOOL_GWOL: ++ return ethtool_get_wol(dev, useraddr); ++ case ETHTOOL_SWOL: ++ return ethtool_set_wol(dev, useraddr); ++ case ETHTOOL_GMSGLVL: ++ return ethtool_get_msglevel(dev, useraddr); ++ case ETHTOOL_SMSGLVL: ++ return ethtool_set_msglevel(dev, useraddr); ++ case ETHTOOL_NWAY_RST: ++ return ethtool_nway_reset(dev); ++ case ETHTOOL_GLINK: ++ return ethtool_get_link(dev, useraddr); ++ case ETHTOOL_GEEPROM: ++ return ethtool_get_eeprom(dev, useraddr); ++ case ETHTOOL_SEEPROM: ++ return ethtool_set_eeprom(dev, useraddr); ++ case ETHTOOL_GCOALESCE: ++ return ethtool_get_coalesce(dev, useraddr); ++ case ETHTOOL_SCOALESCE: ++ return ethtool_set_coalesce(dev, useraddr); ++ case ETHTOOL_GRINGPARAM: ++ return ethtool_get_ringparam(dev, useraddr); ++ case ETHTOOL_SRINGPARAM: ++ return ethtool_set_ringparam(dev, useraddr); ++ case ETHTOOL_GPAUSEPARAM: ++ return ethtool_get_pauseparam(dev, useraddr); ++ case ETHTOOL_SPAUSEPARAM: ++ return ethtool_set_pauseparam(dev, useraddr); ++ case ETHTOOL_GRXCSUM: ++ return ethtool_get_rx_csum(dev, useraddr); ++ case ETHTOOL_SRXCSUM: ++ return ethtool_set_rx_csum(dev, useraddr); ++ case ETHTOOL_GTXCSUM: ++ return ethtool_get_tx_csum(dev, useraddr); ++ case ETHTOOL_STXCSUM: ++ return ethtool_set_tx_csum(dev, useraddr); ++ case ETHTOOL_GSG: ++ return ethtool_get_sg(dev, useraddr); ++ case ETHTOOL_SSG: ++ return ethtool_set_sg(dev, useraddr); ++ case ETHTOOL_GTSO: ++ return ethtool_get_tso(dev, useraddr); ++ case ETHTOOL_STSO: ++ return ethtool_set_tso(dev, useraddr); ++ case ETHTOOL_TEST: ++ return ethtool_self_test(dev, useraddr); ++ case ETHTOOL_GSTRINGS: ++ return ethtool_get_strings(dev, useraddr); ++ case ETHTOOL_PHYS_ID: ++ return ethtool_phys_id(dev, useraddr); ++ case ETHTOOL_GSTATS: ++ return ethtool_get_stats(dev, useraddr); ++ default: ++ return -EOPNOTSUPP; ++ } ++ ++ return -EOPNOTSUPP; ++} ++ ++#define mii_if_info _kc_mii_if_info ++struct _kc_mii_if_info { ++ int phy_id; ++ int advertising; ++ int phy_id_mask; ++ int reg_num_mask; ++ ++ unsigned int full_duplex : 1; /* is full duplex? */ ++ unsigned int force_media : 1; /* is autoneg. disabled? */ ++ ++ struct net_device *dev; ++ int (*mdio_read) (struct net_device *dev, int phy_id, int location); ++ void (*mdio_write) (struct net_device *dev, int phy_id, int location, int val); ++}; ++ ++struct ethtool_cmd; ++struct mii_ioctl_data; ++ ++#undef mii_link_ok ++#define mii_link_ok _kc_mii_link_ok ++#undef mii_nway_restart ++#define mii_nway_restart _kc_mii_nway_restart ++#undef mii_ethtool_gset ++#define mii_ethtool_gset _kc_mii_ethtool_gset ++#undef mii_ethtool_sset ++#define mii_ethtool_sset _kc_mii_ethtool_sset ++#undef mii_check_link ++#define mii_check_link _kc_mii_check_link ++extern int _kc_mii_link_ok (struct mii_if_info *mii); ++extern int _kc_mii_nway_restart (struct mii_if_info *mii); ++extern int _kc_mii_ethtool_gset(struct mii_if_info *mii, ++ struct ethtool_cmd *ecmd); ++extern int _kc_mii_ethtool_sset(struct mii_if_info *mii, ++ struct ethtool_cmd *ecmd); ++extern void _kc_mii_check_link (struct mii_if_info *mii); ++#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,6) ) ++#undef generic_mii_ioctl ++#define generic_mii_ioctl _kc_generic_mii_ioctl ++extern int _kc_generic_mii_ioctl(struct mii_if_info *mii_if, ++ struct mii_ioctl_data *mii_data, int cmd, ++ unsigned int *duplex_changed); ++#endif /* > 2.4.6 */ ++ ++ ++struct _kc_pci_dev_ext { ++ struct pci_dev *dev; ++ void *pci_drvdata; ++ struct pci_driver *driver; ++}; ++ ++struct _kc_net_dev_ext { ++ struct net_device *dev; ++ unsigned int carrier; ++}; ++ ++ ++/**************************************/ ++/* mii support */ ++ ++int _kc_mii_ethtool_gset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) ++{ ++ struct net_device *dev = mii->dev; ++ u32 advert, bmcr, lpa, nego; ++ ++ ecmd->supported = ++ (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | ++ SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | ++ SUPPORTED_Autoneg | SUPPORTED_TP | SUPPORTED_MII); ++ ++ /* only supports twisted-pair */ ++ ecmd->port = PORT_MII; ++ ++ /* only supports internal transceiver */ ++ ecmd->transceiver = XCVR_INTERNAL; ++ ++ /* this isn't fully supported at higher layers */ ++ ecmd->phy_address = mii->phy_id; ++ ++ ecmd->advertising = ADVERTISED_TP | ADVERTISED_MII; ++ advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE); ++ if (advert & ADVERTISE_10HALF) ++ ecmd->advertising |= ADVERTISED_10baseT_Half; ++ if (advert & ADVERTISE_10FULL) ++ ecmd->advertising |= ADVERTISED_10baseT_Full; ++ if (advert & ADVERTISE_100HALF) ++ ecmd->advertising |= ADVERTISED_100baseT_Half; ++ if (advert & ADVERTISE_100FULL) ++ ecmd->advertising |= ADVERTISED_100baseT_Full; ++ ++ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); ++ lpa = mii->mdio_read(dev, mii->phy_id, MII_LPA); ++ if (bmcr & BMCR_ANENABLE) { ++ ecmd->advertising |= ADVERTISED_Autoneg; ++ ecmd->autoneg = AUTONEG_ENABLE; ++ ++ nego = mii_nway_result(advert & lpa); ++ if (nego == LPA_100FULL || nego == LPA_100HALF) ++ ecmd->speed = SPEED_100; ++ else ++ ecmd->speed = SPEED_10; ++ if (nego == LPA_100FULL || nego == LPA_10FULL) { ++ ecmd->duplex = DUPLEX_FULL; ++ mii->full_duplex = 1; ++ } else { ++ ecmd->duplex = DUPLEX_HALF; ++ mii->full_duplex = 0; ++ } ++ } else { ++ ecmd->autoneg = AUTONEG_DISABLE; ++ ++ ecmd->speed = (bmcr & BMCR_SPEED100) ? SPEED_100 : SPEED_10; ++ ecmd->duplex = (bmcr & BMCR_FULLDPLX) ? DUPLEX_FULL : DUPLEX_HALF; ++ } ++ ++ /* ignore maxtxpkt, maxrxpkt for now */ ++ ++ return 0; ++} ++ ++int _kc_mii_ethtool_sset(struct mii_if_info *mii, struct ethtool_cmd *ecmd) ++{ ++ struct net_device *dev = mii->dev; ++ ++ if (ecmd->speed != SPEED_10 && ecmd->speed != SPEED_100) ++ return -EINVAL; ++ if (ecmd->duplex != DUPLEX_HALF && ecmd->duplex != DUPLEX_FULL) ++ return -EINVAL; ++ if (ecmd->port != PORT_MII) ++ return -EINVAL; ++ if (ecmd->transceiver != XCVR_INTERNAL) ++ return -EINVAL; ++ if (ecmd->phy_address != mii->phy_id) ++ return -EINVAL; ++ if (ecmd->autoneg != AUTONEG_DISABLE && ecmd->autoneg != AUTONEG_ENABLE) ++ return -EINVAL; ++ ++ /* ignore supported, maxtxpkt, maxrxpkt */ ++ ++ if (ecmd->autoneg == AUTONEG_ENABLE) { ++ u32 bmcr, advert, tmp; ++ ++ if ((ecmd->advertising & (ADVERTISED_10baseT_Half | ++ ADVERTISED_10baseT_Full | ++ ADVERTISED_100baseT_Half | ++ ADVERTISED_100baseT_Full)) == 0) ++ return -EINVAL; ++ ++ /* advertise only what has been requested */ ++ advert = mii->mdio_read(dev, mii->phy_id, MII_ADVERTISE); ++ tmp = advert & ~(ADVERTISE_ALL | ADVERTISE_100BASE4); ++ if (ADVERTISED_10baseT_Half) ++ tmp |= ADVERTISE_10HALF; ++ if (ADVERTISED_10baseT_Full) ++ tmp |= ADVERTISE_10FULL; ++ if (ADVERTISED_100baseT_Half) ++ tmp |= ADVERTISE_100HALF; ++ if (ADVERTISED_100baseT_Full) ++ tmp |= ADVERTISE_100FULL; ++ if (advert != tmp) { ++ mii->mdio_write(dev, mii->phy_id, MII_ADVERTISE, tmp); ++ mii->advertising = tmp; ++ } ++ ++ /* turn on autonegotiation, and force a renegotiate */ ++ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); ++ bmcr |= (BMCR_ANENABLE | BMCR_ANRESTART); ++ mii->mdio_write(dev, mii->phy_id, MII_BMCR, bmcr); ++ ++ mii->force_media = 0; ++ } else { ++ u32 bmcr, tmp; ++ ++ /* turn off auto negotiation, set speed and duplexity */ ++ bmcr = mii->mdio_read(dev, mii->phy_id, MII_BMCR); ++ tmp = bmcr & ~(BMCR_ANENABLE | BMCR_SPEED100 | BMCR_FULLDPLX); ++ if (ecmd->speed == SPEED_100) ++ tmp |= BMCR_SPEED100; ++ if (ecmd->duplex == DUPLEX_FULL) { ++ tmp |= BMCR_FULLDPLX; ++ mii->full_duplex = 1; ++ } else ++ mii->full_duplex = 0; ++ if (bmcr != tmp) ++ mii->mdio_write(dev, mii->phy_id, MII_BMCR, tmp); ++ ++ mii->force_media = 1; ++ } ++ return 0; ++} ++ ++int _kc_mii_link_ok (struct mii_if_info *mii) ++{ ++ /* first, a dummy read, needed to latch some MII phys */ ++ mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR); ++ if (mii->mdio_read(mii->dev, mii->phy_id, MII_BMSR) & BMSR_LSTATUS) ++ return 1; ++ return 0; ++} ++ ++int _kc_mii_nway_restart (struct mii_if_info *mii) ++{ ++ int bmcr; ++ int r = -EINVAL; ++ ++ /* if autoneg is off, it's an error */ ++ bmcr = mii->mdio_read(mii->dev, mii->phy_id, MII_BMCR); ++ ++ if (bmcr & BMCR_ANENABLE) { ++ bmcr |= BMCR_ANRESTART; ++ mii->mdio_write(mii->dev, mii->phy_id, MII_BMCR, bmcr); ++ r = 0; ++ } ++ ++ return r; ++} ++ ++void _kc_mii_check_link (struct mii_if_info *mii) ++{ ++ int cur_link = mii_link_ok(mii); ++ int prev_link = netif_carrier_ok(mii->dev); ++ ++ if (cur_link && !prev_link) ++ netif_carrier_on(mii->dev); ++ else if (prev_link && !cur_link) ++ netif_carrier_off(mii->dev); ++} ++ ++#if ( LINUX_VERSION_CODE > KERNEL_VERSION(2,4,6) ) ++int _kc_generic_mii_ioctl(struct mii_if_info *mii_if, ++ struct mii_ioctl_data *mii_data, int cmd, ++ unsigned int *duplex_chg_out) ++{ ++ int rc = 0; ++ unsigned int duplex_changed = 0; ++ ++ if (duplex_chg_out) ++ *duplex_chg_out = 0; ++ ++ mii_data->phy_id &= mii_if->phy_id_mask; ++ mii_data->reg_num &= mii_if->reg_num_mask; ++ ++ switch(cmd) { ++ case SIOCDEVPRIVATE: /* binary compat, remove in 2.5 */ ++ case SIOCGMIIPHY: ++ mii_data->phy_id = mii_if->phy_id; ++ /* fall through */ ++ ++ case SIOCDEVPRIVATE + 1:/* binary compat, remove in 2.5 */ ++ case SIOCGMIIREG: ++ mii_data->val_out = ++ mii_if->mdio_read(mii_if->dev, mii_data->phy_id, ++ mii_data->reg_num); ++ break; ++ ++ case SIOCDEVPRIVATE + 2:/* binary compat, remove in 2.5 */ ++ case SIOCSMIIREG: { ++ u16 val = mii_data->val_in; ++ ++ if (!capable(CAP_NET_ADMIN)) ++ return -EPERM; ++ ++ if (mii_data->phy_id == mii_if->phy_id) { ++ switch(mii_data->reg_num) { ++ case MII_BMCR: { ++ unsigned int new_duplex = 0; ++ if (val & (BMCR_RESET|BMCR_ANENABLE)) ++ mii_if->force_media = 0; ++ else ++ mii_if->force_media = 1; ++ if (mii_if->force_media && ++ (val & BMCR_FULLDPLX)) ++ new_duplex = 1; ++ if (mii_if->full_duplex != new_duplex) { ++ duplex_changed = 1; ++ mii_if->full_duplex = new_duplex; ++ } ++ break; ++ } ++ case MII_ADVERTISE: ++ mii_if->advertising = val; ++ break; ++ default: ++ /* do nothing */ ++ break; ++ } ++ } ++ ++ mii_if->mdio_write(mii_if->dev, mii_data->phy_id, ++ mii_data->reg_num, val); ++ break; ++ } ++ ++ default: ++ rc = -EOPNOTSUPP; ++ break; ++ } ++ ++ if ((rc == 0) && (duplex_chg_out) && (duplex_changed)) ++ *duplex_chg_out = 1; ++ ++ return rc; ++} ++#endif /* > 2.4.6 */ ++ +-- +2.1.4 + diff --git a/packages/base/any/kernels/3.16-lts/patches/series b/packages/base/any/kernels/3.16-lts/patches/series index 37e24f77..72485e2d 100644 --- a/packages/base/any/kernels/3.16-lts/patches/series +++ b/packages/base/any/kernels/3.16-lts/patches/series @@ -28,3 +28,4 @@ driver-support-intel-igb-bcm50210-phy.patch driver-igb-netberg-aurora.patch driver-hid-cp2112-mods.patch gcc-no-pie.patch +driver-ixgbe-version-5.2.4.patch diff --git a/packages/base/any/kernels/3.18.25/.gitignore b/packages/base/any/kernels/3.18.25/.gitignore deleted file mode 100644 index b9ef4135..00000000 --- a/packages/base/any/kernels/3.18.25/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -linux-3.18.25 -linux-3.18.25-mbuild -linux-3.18.25-dtbs - diff --git a/packages/base/any/kernels/3.18.25/configs/arm64-all/.gitignore b/packages/base/any/kernels/3.18.25/configs/arm64-all/.gitignore deleted file mode 100644 index 6a57178c..00000000 --- a/packages/base/any/kernels/3.18.25/configs/arm64-all/.gitignore +++ /dev/null @@ -1 +0,0 @@ -kernel-* diff --git a/packages/base/any/kernels/3.18.25/configs/arm64-all/Makefile b/packages/base/any/kernels/3.18.25/configs/arm64-all/Makefile deleted file mode 100644 index f8c7663c..00000000 --- a/packages/base/any/kernels/3.18.25/configs/arm64-all/Makefile +++ /dev/null @@ -1,28 +0,0 @@ -############################################################ -# -# Default 3.18.25 configuration for arm64 platforms. -# -############################################################ -THIS_DIR := $(abspath $(dir $(lastword $(MAKEFILE_LIST)))) -include $(ONL)/make/config.mk - -ifndef K_TARGET_DIR -K_TARGET_DIR := $(THIS_DIR) -endif - -MODSYNCLIST_EXTRA = arch/arm64/include arch/arm64/Makefile arch/arm64/lib -K_PATCH_SERIES=series.arm64 - -include ../../kconfig.mk -K_CONFIG := arm64-all.config -K_BUILD_TARGET := Image Image.gz arm64-nxp-ls2080ardb-r0.dtb arm64-nxp-ls2088ardb-r1.dtb -K_COPY_SRC := arch/arm64/boot/Image -K_COPY_GZIP := 1 -ifndef K_COPY_DST -K_COPY_DST := kernel-3.18.25-arm64-all.bin.gz -endif - -export ARCH=arm64 -DTS_LIST := arm64-nxp-ls2080ardb-r0 - -include $(ONL)/make/kbuild.mk diff --git a/packages/base/any/kernels/3.18.25/configs/x86_64-all/.gitignore b/packages/base/any/kernels/3.18.25/configs/x86_64-all/.gitignore deleted file mode 100644 index c785d46f..00000000 --- a/packages/base/any/kernels/3.18.25/configs/x86_64-all/.gitignore +++ /dev/null @@ -1 +0,0 @@ -kernel-3.18-x86_64-all diff --git a/packages/base/any/kernels/3.18.25/configs/x86_64-all/x86_64-all.config b/packages/base/any/kernels/3.18.25/configs/x86_64-all/x86_64-all.config deleted file mode 100644 index 40dd2323..00000000 --- a/packages/base/any/kernels/3.18.25/configs/x86_64-all/x86_64-all.config +++ /dev/null @@ -1,3581 +0,0 @@ -# -# Automatically generated file; DO NOT EDIT. -# Linux/x86_64 3.18.25 Kernel Configuration -# -CONFIG_64BIT=y -CONFIG_X86_64=y -CONFIG_X86=y -CONFIG_INSTRUCTION_DECODER=y -CONFIG_PERF_EVENTS_INTEL_UNCORE=y -CONFIG_OUTPUT_FORMAT="elf64-x86-64" -CONFIG_ARCH_DEFCONFIG="arch/x86/configs/x86_64_defconfig" -CONFIG_LOCKDEP_SUPPORT=y -CONFIG_STACKTRACE_SUPPORT=y -CONFIG_HAVE_LATENCYTOP_SUPPORT=y -CONFIG_MMU=y -CONFIG_NEED_DMA_MAP_STATE=y -CONFIG_NEED_SG_DMA_LENGTH=y -CONFIG_GENERIC_ISA_DMA=y -CONFIG_GENERIC_BUG=y -CONFIG_GENERIC_BUG_RELATIVE_POINTERS=y -CONFIG_GENERIC_HWEIGHT=y -CONFIG_ARCH_MAY_HAVE_PC_FDC=y -CONFIG_RWSEM_XCHGADD_ALGORITHM=y -CONFIG_GENERIC_CALIBRATE_DELAY=y -CONFIG_ARCH_HAS_CPU_RELAX=y -CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y -CONFIG_HAVE_SETUP_PER_CPU_AREA=y -CONFIG_NEED_PER_CPU_EMBED_FIRST_CHUNK=y -CONFIG_NEED_PER_CPU_PAGE_FIRST_CHUNK=y -CONFIG_ARCH_HIBERNATION_POSSIBLE=y -CONFIG_ARCH_SUSPEND_POSSIBLE=y -CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y -CONFIG_ARCH_WANT_GENERAL_HUGETLB=y -CONFIG_ZONE_DMA32=y -CONFIG_AUDIT_ARCH=y -CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y -CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y -CONFIG_X86_64_SMP=y -CONFIG_X86_HT=y -CONFIG_ARCH_HWEIGHT_CFLAGS="-fcall-saved-rdi -fcall-saved-rsi -fcall-saved-rdx -fcall-saved-rcx -fcall-saved-r8 -fcall-saved-r9 -fcall-saved-r10 -fcall-saved-r11" -CONFIG_ARCH_SUPPORTS_UPROBES=y -CONFIG_FIX_EARLYCON_MEM=y -CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" -CONFIG_IRQ_WORK=y -CONFIG_BUILDTIME_EXTABLE_SORT=y - -# -# General setup -# -CONFIG_INIT_ENV_ARG_LIMIT=32 -CONFIG_CROSS_COMPILE="" -# CONFIG_COMPILE_TEST is not set -CONFIG_LOCALVERSION="-OpenNetworkLinux" -# CONFIG_LOCALVERSION_AUTO is not set -CONFIG_HAVE_KERNEL_GZIP=y -CONFIG_HAVE_KERNEL_BZIP2=y -CONFIG_HAVE_KERNEL_LZMA=y -CONFIG_HAVE_KERNEL_XZ=y -CONFIG_HAVE_KERNEL_LZO=y -CONFIG_HAVE_KERNEL_LZ4=y -# CONFIG_KERNEL_GZIP is not set -CONFIG_KERNEL_BZIP2=y -# CONFIG_KERNEL_LZMA is not set -# CONFIG_KERNEL_XZ is not set -# CONFIG_KERNEL_LZO is not set -# CONFIG_KERNEL_LZ4 is not set -CONFIG_DEFAULT_HOSTNAME="(none)" -CONFIG_SWAP=y -CONFIG_SYSVIPC=y -CONFIG_SYSVIPC_SYSCTL=y -CONFIG_POSIX_MQUEUE=y -CONFIG_POSIX_MQUEUE_SYSCTL=y -CONFIG_CROSS_MEMORY_ATTACH=y -CONFIG_FHANDLE=y -CONFIG_USELIB=y -CONFIG_AUDIT=y -CONFIG_HAVE_ARCH_AUDITSYSCALL=y -CONFIG_AUDITSYSCALL=y -CONFIG_AUDIT_WATCH=y -CONFIG_AUDIT_TREE=y - -# -# IRQ subsystem -# -CONFIG_GENERIC_IRQ_PROBE=y -CONFIG_GENERIC_IRQ_SHOW=y -CONFIG_GENERIC_IRQ_LEGACY_ALLOC_HWIRQ=y -CONFIG_GENERIC_PENDING_IRQ=y -CONFIG_IRQ_DOMAIN=y -CONFIG_GENERIC_MSI_IRQ=y -# CONFIG_IRQ_DOMAIN_DEBUG is not set -CONFIG_IRQ_FORCED_THREADING=y -CONFIG_SPARSE_IRQ=y -CONFIG_CLOCKSOURCE_WATCHDOG=y -CONFIG_ARCH_CLOCKSOURCE_DATA=y -CONFIG_CLOCKSOURCE_VALIDATE_LAST_CYCLE=y -CONFIG_GENERIC_TIME_VSYSCALL=y -CONFIG_GENERIC_CLOCKEVENTS=y -CONFIG_GENERIC_CLOCKEVENTS_BUILD=y -CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y -CONFIG_GENERIC_CLOCKEVENTS_MIN_ADJUST=y -CONFIG_GENERIC_CMOS_UPDATE=y - -# -# Timers subsystem -# -CONFIG_TICK_ONESHOT=y -CONFIG_NO_HZ_COMMON=y -# CONFIG_HZ_PERIODIC is not set -CONFIG_NO_HZ_IDLE=y -# CONFIG_NO_HZ_FULL is not set -CONFIG_NO_HZ=y -CONFIG_HIGH_RES_TIMERS=y - -# -# CPU/Task time and stats accounting -# -CONFIG_TICK_CPU_ACCOUNTING=y -# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set -# CONFIG_IRQ_TIME_ACCOUNTING is not set -CONFIG_BSD_PROCESS_ACCT=y -CONFIG_BSD_PROCESS_ACCT_V3=y -CONFIG_TASKSTATS=y -CONFIG_TASK_DELAY_ACCT=y -CONFIG_TASK_XACCT=y -CONFIG_TASK_IO_ACCOUNTING=y - -# -# RCU Subsystem -# -CONFIG_TREE_RCU=y -# CONFIG_PREEMPT_RCU is not set -# CONFIG_TASKS_RCU is not set -CONFIG_RCU_STALL_COMMON=y -# CONFIG_RCU_USER_QS is not set -CONFIG_RCU_FANOUT=64 -CONFIG_RCU_FANOUT_LEAF=16 -# CONFIG_RCU_FANOUT_EXACT is not set -CONFIG_RCU_FAST_NO_HZ=y -# CONFIG_TREE_RCU_TRACE is not set -# CONFIG_RCU_NOCB_CPU is not set -CONFIG_BUILD_BIN2C=y -CONFIG_IKCONFIG=y -CONFIG_IKCONFIG_PROC=y -CONFIG_LOG_BUF_SHIFT=17 -CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 -CONFIG_HAVE_UNSTABLE_SCHED_CLOCK=y -CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y -CONFIG_ARCH_SUPPORTS_INT128=y -CONFIG_CGROUPS=y -# CONFIG_CGROUP_DEBUG is not set -CONFIG_CGROUP_FREEZER=y -CONFIG_CGROUP_DEVICE=y -CONFIG_CPUSETS=y -CONFIG_PROC_PID_CPUSET=y -CONFIG_CGROUP_CPUACCT=y -CONFIG_RESOURCE_COUNTERS=y -CONFIG_MEMCG=y -CONFIG_MEMCG_SWAP=y -CONFIG_MEMCG_SWAP_ENABLED=y -CONFIG_MEMCG_KMEM=y -# CONFIG_CGROUP_HUGETLB is not set -CONFIG_CGROUP_PERF=y -CONFIG_CGROUP_SCHED=y -CONFIG_FAIR_GROUP_SCHED=y -# CONFIG_CFS_BANDWIDTH is not set -# CONFIG_RT_GROUP_SCHED is not set -CONFIG_BLK_CGROUP=y -# CONFIG_DEBUG_BLK_CGROUP is not set -# CONFIG_CHECKPOINT_RESTORE is not set -CONFIG_NAMESPACES=y -CONFIG_UTS_NS=y -CONFIG_IPC_NS=y -CONFIG_USER_NS=y -CONFIG_PID_NS=y -CONFIG_NET_NS=y -CONFIG_SCHED_AUTOGROUP=y -# CONFIG_SYSFS_DEPRECATED is not set -CONFIG_RELAY=y -CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="" -CONFIG_RD_GZIP=y -CONFIG_RD_BZIP2=y -CONFIG_RD_LZMA=y -CONFIG_RD_XZ=y -CONFIG_RD_LZO=y -# CONFIG_RD_LZ4 is not set -CONFIG_CC_OPTIMIZE_FOR_SIZE=y -CONFIG_SYSCTL=y -CONFIG_ANON_INODES=y -CONFIG_HAVE_UID16=y -CONFIG_SYSCTL_EXCEPTION_TRACE=y -CONFIG_HAVE_PCSPKR_PLATFORM=y -CONFIG_BPF=y -CONFIG_EXPERT=y -CONFIG_UID16=y -CONFIG_SGETMASK_SYSCALL=y -CONFIG_SYSFS_SYSCALL=y -# CONFIG_SYSCTL_SYSCALL is not set -CONFIG_KALLSYMS=y -# CONFIG_KALLSYMS_ALL is not set -CONFIG_PRINTK=y -CONFIG_BUG=y -CONFIG_ELF_CORE=y -CONFIG_PCSPKR_PLATFORM=y -CONFIG_BASE_FULL=y -CONFIG_FUTEX=y -CONFIG_EPOLL=y -CONFIG_SIGNALFD=y -CONFIG_TIMERFD=y -CONFIG_EVENTFD=y -# CONFIG_BPF_SYSCALL is not set -CONFIG_SHMEM=y -CONFIG_AIO=y -CONFIG_ADVISE_SYSCALLS=y -CONFIG_PCI_QUIRKS=y -CONFIG_EMBEDDED=y -CONFIG_HAVE_PERF_EVENTS=y - -# -# Kernel Performance Events And Counters -# -CONFIG_PERF_EVENTS=y -# CONFIG_DEBUG_PERF_USE_VMALLOC is not set -CONFIG_VM_EVENT_COUNTERS=y -# CONFIG_COMPAT_BRK is not set -CONFIG_SLAB=y -# CONFIG_SLUB is not set -# CONFIG_SLOB is not set -# CONFIG_SYSTEM_TRUSTED_KEYRING is not set -# CONFIG_PROFILING is not set -CONFIG_TRACEPOINTS=y -CONFIG_HAVE_OPROFILE=y -CONFIG_OPROFILE_NMI_TIMER=y -# CONFIG_KPROBES is not set -# CONFIG_JUMP_LABEL is not set -# CONFIG_UPROBES is not set -# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set -CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y -CONFIG_ARCH_USE_BUILTIN_BSWAP=y -CONFIG_HAVE_IOREMAP_PROT=y -CONFIG_HAVE_KPROBES=y -CONFIG_HAVE_KRETPROBES=y -CONFIG_HAVE_OPTPROBES=y -CONFIG_HAVE_KPROBES_ON_FTRACE=y -CONFIG_HAVE_ARCH_TRACEHOOK=y -CONFIG_HAVE_DMA_ATTRS=y -CONFIG_HAVE_DMA_CONTIGUOUS=y -CONFIG_GENERIC_SMP_IDLE_THREAD=y -CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y -CONFIG_HAVE_DMA_API_DEBUG=y -CONFIG_HAVE_HW_BREAKPOINT=y -CONFIG_HAVE_MIXED_BREAKPOINTS_REGS=y -CONFIG_HAVE_USER_RETURN_NOTIFIER=y -CONFIG_HAVE_PERF_EVENTS_NMI=y -CONFIG_HAVE_PERF_REGS=y -CONFIG_HAVE_PERF_USER_STACK_DUMP=y -CONFIG_HAVE_ARCH_JUMP_LABEL=y -CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y -CONFIG_HAVE_CMPXCHG_LOCAL=y -CONFIG_HAVE_CMPXCHG_DOUBLE=y -CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y -CONFIG_ARCH_WANT_OLD_COMPAT_IPC=y -CONFIG_HAVE_ARCH_SECCOMP_FILTER=y -CONFIG_SECCOMP_FILTER=y -CONFIG_HAVE_CC_STACKPROTECTOR=y -# CONFIG_CC_STACKPROTECTOR is not set -CONFIG_CC_STACKPROTECTOR_NONE=y -# CONFIG_CC_STACKPROTECTOR_REGULAR is not set -# CONFIG_CC_STACKPROTECTOR_STRONG is not set -CONFIG_HAVE_CONTEXT_TRACKING=y -CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y -CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y -CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y -CONFIG_HAVE_ARCH_SOFT_DIRTY=y -CONFIG_MODULES_USE_ELF_RELA=y -CONFIG_HAVE_IRQ_EXIT_ON_IRQ_STACK=y -CONFIG_OLD_SIGSUSPEND3=y -CONFIG_COMPAT_OLD_SIGACTION=y - -# -# GCOV-based kernel profiling -# -# CONFIG_GCOV_KERNEL is not set -# CONFIG_HAVE_GENERIC_DMA_COHERENT is not set -CONFIG_SLABINFO=y -CONFIG_RT_MUTEXES=y -CONFIG_BASE_SMALL=0 -CONFIG_MODULES=y -# CONFIG_MODULE_FORCE_LOAD is not set -CONFIG_MODULE_UNLOAD=y -# CONFIG_MODULE_FORCE_UNLOAD is not set -# CONFIG_MODVERSIONS is not set -# CONFIG_MODULE_SRCVERSION_ALL is not set -# CONFIG_MODULE_SIG is not set -# CONFIG_MODULE_COMPRESS is not set -CONFIG_STOP_MACHINE=y -CONFIG_BLOCK=y -CONFIG_BLK_DEV_BSG=y -CONFIG_BLK_DEV_BSGLIB=y -CONFIG_BLK_DEV_INTEGRITY=y -# CONFIG_BLK_DEV_THROTTLING is not set -# CONFIG_BLK_CMDLINE_PARSER is not set - -# -# Partition Types -# -CONFIG_PARTITION_ADVANCED=y -CONFIG_ACORN_PARTITION=y -CONFIG_ACORN_PARTITION_CUMANA=y -CONFIG_ACORN_PARTITION_EESOX=y -CONFIG_ACORN_PARTITION_ICS=y -CONFIG_ACORN_PARTITION_ADFS=y -CONFIG_ACORN_PARTITION_POWERTEC=y -CONFIG_ACORN_PARTITION_RISCIX=y -# CONFIG_AIX_PARTITION is not set -CONFIG_OSF_PARTITION=y -CONFIG_AMIGA_PARTITION=y -CONFIG_ATARI_PARTITION=y -CONFIG_MAC_PARTITION=y -CONFIG_MSDOS_PARTITION=y -CONFIG_BSD_DISKLABEL=y -CONFIG_MINIX_SUBPARTITION=y -CONFIG_SOLARIS_X86_PARTITION=y -CONFIG_UNIXWARE_DISKLABEL=y -CONFIG_LDM_PARTITION=y -# CONFIG_LDM_DEBUG is not set -CONFIG_SGI_PARTITION=y -CONFIG_ULTRIX_PARTITION=y -CONFIG_SUN_PARTITION=y -CONFIG_KARMA_PARTITION=y -CONFIG_EFI_PARTITION=y -# CONFIG_SYSV68_PARTITION is not set -# CONFIG_CMDLINE_PARTITION is not set -CONFIG_BLOCK_COMPAT=y - -# -# IO Schedulers -# -CONFIG_IOSCHED_NOOP=y -CONFIG_IOSCHED_DEADLINE=y -CONFIG_IOSCHED_CFQ=y -CONFIG_CFQ_GROUP_IOSCHED=y -# CONFIG_DEFAULT_DEADLINE is not set -CONFIG_DEFAULT_CFQ=y -# CONFIG_DEFAULT_NOOP is not set -CONFIG_DEFAULT_IOSCHED="cfq" -CONFIG_INLINE_SPIN_UNLOCK_IRQ=y -CONFIG_INLINE_READ_UNLOCK=y -CONFIG_INLINE_READ_UNLOCK_IRQ=y -CONFIG_INLINE_WRITE_UNLOCK=y -CONFIG_INLINE_WRITE_UNLOCK_IRQ=y -CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y -CONFIG_MUTEX_SPIN_ON_OWNER=y -CONFIG_RWSEM_SPIN_ON_OWNER=y -CONFIG_ARCH_USE_QUEUE_RWLOCK=y -CONFIG_QUEUE_RWLOCK=y -CONFIG_FREEZER=y - -# -# Processor type and features -# -CONFIG_ZONE_DMA=y -CONFIG_SMP=y -CONFIG_X86_FEATURE_NAMES=y -CONFIG_X86_MPPARSE=y -# CONFIG_X86_EXTENDED_PLATFORM is not set -# CONFIG_X86_INTEL_LPSS is not set -# CONFIG_IOSF_MBI is not set -CONFIG_X86_SUPPORTS_MEMORY_FAILURE=y -CONFIG_SCHED_OMIT_FRAME_POINTER=y -# CONFIG_HYPERVISOR_GUEST is not set -CONFIG_NO_BOOTMEM=y -CONFIG_MEMTEST=y -# CONFIG_MK8 is not set -# CONFIG_MPSC is not set -# CONFIG_MCORE2 is not set -# CONFIG_MATOM is not set -CONFIG_GENERIC_CPU=y -CONFIG_X86_INTERNODE_CACHE_SHIFT=6 -CONFIG_X86_L1_CACHE_SHIFT=6 -CONFIG_X86_TSC=y -CONFIG_X86_CMPXCHG64=y -CONFIG_X86_CMOV=y -CONFIG_X86_MINIMUM_CPU_FAMILY=64 -CONFIG_X86_DEBUGCTLMSR=y -# CONFIG_PROCESSOR_SELECT is not set -CONFIG_CPU_SUP_INTEL=y -CONFIG_CPU_SUP_AMD=y -CONFIG_CPU_SUP_CENTAUR=y -CONFIG_HPET_TIMER=y -CONFIG_HPET_EMULATE_RTC=y -CONFIG_DMI=y -CONFIG_GART_IOMMU=y -CONFIG_CALGARY_IOMMU=y -CONFIG_CALGARY_IOMMU_ENABLED_BY_DEFAULT=y -CONFIG_SWIOTLB=y -CONFIG_IOMMU_HELPER=y -# CONFIG_MAXSMP is not set -CONFIG_NR_CPUS=512 -CONFIG_SCHED_SMT=y -CONFIG_SCHED_MC=y -# CONFIG_PREEMPT_NONE is not set -CONFIG_PREEMPT_VOLUNTARY=y -# CONFIG_PREEMPT is not set -CONFIG_X86_UP_APIC_MSI=y -CONFIG_X86_LOCAL_APIC=y -CONFIG_X86_IO_APIC=y -CONFIG_X86_REROUTE_FOR_BROKEN_BOOT_IRQS=y -CONFIG_X86_MCE=y -CONFIG_X86_MCE_INTEL=y -CONFIG_X86_MCE_AMD=y -CONFIG_X86_MCE_THRESHOLD=y -# CONFIG_X86_MCE_INJECT is not set -CONFIG_X86_THERMAL_VECTOR=y -# CONFIG_X86_16BIT is not set -# CONFIG_I8K is not set -# CONFIG_MICROCODE is not set -# CONFIG_MICROCODE_INTEL_EARLY is not set -# CONFIG_MICROCODE_AMD_EARLY is not set -CONFIG_X86_MSR=y -CONFIG_X86_CPUID=y -CONFIG_ARCH_PHYS_ADDR_T_64BIT=y -CONFIG_ARCH_DMA_ADDR_T_64BIT=y -CONFIG_DIRECT_GBPAGES=y -# CONFIG_NUMA is not set -CONFIG_ARCH_SPARSEMEM_ENABLE=y -CONFIG_ARCH_SPARSEMEM_DEFAULT=y -CONFIG_ARCH_SELECT_MEMORY_MODEL=y -CONFIG_ARCH_MEMORY_PROBE=y -CONFIG_ARCH_PROC_KCORE_TEXT=y -CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 -CONFIG_SELECT_MEMORY_MODEL=y -CONFIG_SPARSEMEM_MANUAL=y -CONFIG_SPARSEMEM=y -CONFIG_HAVE_MEMORY_PRESENT=y -CONFIG_SPARSEMEM_EXTREME=y -CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y -CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER=y -CONFIG_SPARSEMEM_VMEMMAP=y -CONFIG_HAVE_MEMBLOCK=y -CONFIG_HAVE_MEMBLOCK_NODE_MAP=y -CONFIG_ARCH_DISCARD_MEMBLOCK=y -CONFIG_MEMORY_ISOLATION=y -CONFIG_HAVE_BOOTMEM_INFO_NODE=y -CONFIG_MEMORY_HOTPLUG=y -CONFIG_MEMORY_HOTPLUG_SPARSE=y -CONFIG_MEMORY_HOTREMOVE=y -CONFIG_PAGEFLAGS_EXTENDED=y -CONFIG_SPLIT_PTLOCK_CPUS=4 -CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y -CONFIG_COMPACTION=y -CONFIG_MIGRATION=y -CONFIG_ARCH_ENABLE_HUGEPAGE_MIGRATION=y -CONFIG_PHYS_ADDR_T_64BIT=y -CONFIG_ZONE_DMA_FLAG=1 -CONFIG_BOUNCE=y -CONFIG_NEED_BOUNCE_POOL=y -CONFIG_VIRT_TO_BUS=y -CONFIG_KSM=y -CONFIG_DEFAULT_MMAP_MIN_ADDR=65536 -CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y -CONFIG_MEMORY_FAILURE=y -CONFIG_HWPOISON_INJECT=y -CONFIG_TRANSPARENT_HUGEPAGE=y -# CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS is not set -CONFIG_TRANSPARENT_HUGEPAGE_MADVISE=y -# CONFIG_CLEANCACHE is not set -# CONFIG_FRONTSWAP is not set -# CONFIG_CMA is not set -# CONFIG_ZPOOL is not set -# CONFIG_ZBUD is not set -# CONFIG_ZSMALLOC is not set -CONFIG_GENERIC_EARLY_IOREMAP=y -# CONFIG_X86_CHECK_BIOS_CORRUPTION is not set -CONFIG_X86_RESERVE_LOW=64 -CONFIG_MTRR=y -CONFIG_MTRR_SANITIZER=y -CONFIG_MTRR_SANITIZER_ENABLE_DEFAULT=0 -CONFIG_MTRR_SANITIZER_SPARE_REG_NR_DEFAULT=1 -CONFIG_X86_PAT=y -CONFIG_ARCH_USES_PG_UNCACHED=y -CONFIG_ARCH_RANDOM=y -CONFIG_X86_SMAP=y -# CONFIG_EFI is not set -CONFIG_SECCOMP=y -# CONFIG_HZ_100 is not set -CONFIG_HZ_250=y -# CONFIG_HZ_300 is not set -# CONFIG_HZ_1000 is not set -CONFIG_HZ=250 -CONFIG_SCHED_HRTICK=y -CONFIG_KEXEC=y -# CONFIG_KEXEC_FILE is not set -CONFIG_CRASH_DUMP=y -CONFIG_PHYSICAL_START=0x1000000 -CONFIG_RELOCATABLE=y -# CONFIG_RANDOMIZE_BASE is not set -CONFIG_PHYSICAL_ALIGN=0x1000000 -CONFIG_HOTPLUG_CPU=y -# CONFIG_BOOTPARAM_HOTPLUG_CPU0 is not set -# CONFIG_DEBUG_HOTPLUG_CPU0 is not set -# CONFIG_COMPAT_VDSO is not set -# CONFIG_CMDLINE_BOOL is not set -CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y -CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y - -# -# Power management and ACPI options -# -# CONFIG_SUSPEND is not set -# CONFIG_HIBERNATION is not set -# CONFIG_PM_RUNTIME is not set -CONFIG_ACPI=y -CONFIG_ACPI_LEGACY_TABLES_LOOKUP=y -CONFIG_ARCH_MIGHT_HAVE_ACPI_PDC=y -# CONFIG_ACPI_PROCFS_POWER is not set -# CONFIG_ACPI_EC_DEBUGFS is not set -CONFIG_ACPI_AC=y -CONFIG_ACPI_BATTERY=y -CONFIG_ACPI_BUTTON=y -CONFIG_ACPI_FAN=y -# CONFIG_ACPI_DOCK is not set -CONFIG_ACPI_PROCESSOR=y -CONFIG_ACPI_HOTPLUG_CPU=y -# CONFIG_ACPI_PROCESSOR_AGGREGATOR is not set -CONFIG_ACPI_THERMAL=y -# CONFIG_ACPI_CUSTOM_DSDT is not set -# CONFIG_ACPI_INITRD_TABLE_OVERRIDE is not set -# CONFIG_ACPI_DEBUG is not set -# CONFIG_ACPI_PCI_SLOT is not set -CONFIG_X86_PM_TIMER=y -CONFIG_ACPI_CONTAINER=y -# CONFIG_ACPI_HOTPLUG_MEMORY is not set -# CONFIG_ACPI_SBS is not set -# CONFIG_ACPI_HED is not set -CONFIG_ACPI_CUSTOM_METHOD=y -# CONFIG_ACPI_REDUCED_HARDWARE_ONLY is not set -CONFIG_HAVE_ACPI_APEI=y -CONFIG_HAVE_ACPI_APEI_NMI=y -# CONFIG_ACPI_APEI is not set -# CONFIG_ACPI_EXTLOG is not set -# CONFIG_SFI is not set - -# -# CPU Frequency scaling -# -CONFIG_CPU_FREQ=y -CONFIG_CPU_FREQ_GOV_COMMON=y -CONFIG_CPU_FREQ_STAT=y -# CONFIG_CPU_FREQ_STAT_DETAILS is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set -CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y -# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set -CONFIG_CPU_FREQ_GOV_PERFORMANCE=y -CONFIG_CPU_FREQ_GOV_POWERSAVE=y -CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_GOV_ONDEMAND=y -CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y - -# -# x86 CPU frequency scaling drivers -# -# CONFIG_X86_INTEL_PSTATE is not set -# CONFIG_X86_PCC_CPUFREQ is not set -# CONFIG_X86_ACPI_CPUFREQ is not set -# CONFIG_X86_SPEEDSTEP_CENTRINO is not set -CONFIG_X86_P4_CLOCKMOD=y - -# -# shared options -# -CONFIG_X86_SPEEDSTEP_LIB=y - -# -# CPU Idle -# -CONFIG_CPU_IDLE=y -CONFIG_CPU_IDLE_GOV_LADDER=y -CONFIG_CPU_IDLE_GOV_MENU=y -# CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED is not set -CONFIG_INTEL_IDLE=y - -# -# Memory power savings -# -CONFIG_I7300_IDLE_IOAT_CHANNEL=y -CONFIG_I7300_IDLE=y - -# -# Bus options (PCI etc.) -# -CONFIG_PCI=y -CONFIG_PCI_DIRECT=y -CONFIG_PCI_MMCONFIG=y -CONFIG_PCI_DOMAINS=y -# CONFIG_PCI_CNB20LE_QUIRK is not set -CONFIG_PCIEPORTBUS=y -CONFIG_HOTPLUG_PCI_PCIE=y -CONFIG_PCIEAER=y -# CONFIG_PCIE_ECRC is not set -CONFIG_PCIEAER_INJECT=y -CONFIG_PCIEASPM=y -# CONFIG_PCIEASPM_DEBUG is not set -CONFIG_PCIEASPM_DEFAULT=y -# CONFIG_PCIEASPM_POWERSAVE is not set -# CONFIG_PCIEASPM_PERFORMANCE is not set -CONFIG_PCI_MSI=y -# CONFIG_PCI_DEBUG is not set -# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set -# CONFIG_PCI_STUB is not set -CONFIG_HT_IRQ=y -CONFIG_PCI_ATS=y -CONFIG_PCI_IOV=y -# CONFIG_PCI_PRI is not set -# CONFIG_PCI_PASID is not set -CONFIG_PCI_IOAPIC=y -CONFIG_PCI_LABEL=y - -# -# PCI host controller drivers -# -CONFIG_ISA_DMA_API=y -CONFIG_AMD_NB=y -CONFIG_PCCARD=y -CONFIG_PCMCIA=y -CONFIG_PCMCIA_LOAD_CIS=y -CONFIG_CARDBUS=y - -# -# PC-card bridges -# -# CONFIG_YENTA is not set -CONFIG_PD6729=y -CONFIG_I82092=y -CONFIG_PCCARD_NONSTATIC=y -CONFIG_HOTPLUG_PCI=y -# CONFIG_HOTPLUG_PCI_ACPI is not set -CONFIG_HOTPLUG_PCI_CPCI=y -CONFIG_HOTPLUG_PCI_CPCI_ZT5550=y -CONFIG_HOTPLUG_PCI_CPCI_GENERIC=y -CONFIG_HOTPLUG_PCI_SHPC=y -# CONFIG_RAPIDIO is not set -# CONFIG_X86_SYSFB is not set - -# -# Executable file formats / Emulations -# -CONFIG_BINFMT_ELF=y -CONFIG_COMPAT_BINFMT_ELF=y -CONFIG_ARCH_BINFMT_ELF_RANDOMIZE_PIE=y -CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS=y -CONFIG_BINFMT_SCRIPT=y -# CONFIG_HAVE_AOUT is not set -CONFIG_BINFMT_MISC=y -CONFIG_COREDUMP=y -CONFIG_IA32_EMULATION=y -CONFIG_IA32_AOUT=y -# CONFIG_X86_X32 is not set -CONFIG_COMPAT=y -CONFIG_COMPAT_FOR_U64_ALIGNMENT=y -CONFIG_SYSVIPC_COMPAT=y -CONFIG_KEYS_COMPAT=y -CONFIG_X86_DEV_DMA_OPS=y -CONFIG_PMC_ATOM=y -CONFIG_NET=y - -# -# Networking options -# -CONFIG_PACKET=y -# CONFIG_PACKET_DIAG is not set -CONFIG_UNIX=y -# CONFIG_UNIX_DIAG is not set -CONFIG_XFRM=y -CONFIG_XFRM_ALGO=y -CONFIG_XFRM_USER=y -CONFIG_XFRM_SUB_POLICY=y -CONFIG_XFRM_MIGRATE=y -# CONFIG_XFRM_STATISTICS is not set -CONFIG_XFRM_IPCOMP=y -CONFIG_NET_KEY=y -CONFIG_NET_KEY_MIGRATE=y -CONFIG_INET=y -CONFIG_IP_MULTICAST=y -CONFIG_IP_ADVANCED_ROUTER=y -CONFIG_IP_FIB_TRIE_STATS=y -CONFIG_IP_MULTIPLE_TABLES=y -CONFIG_IP_ROUTE_MULTIPATH=y -CONFIG_IP_ROUTE_VERBOSE=y -CONFIG_IP_ROUTE_CLASSID=y -# CONFIG_IP_PNP is not set -# CONFIG_NET_IPIP is not set -# CONFIG_NET_IPGRE_DEMUX is not set -CONFIG_NET_IP_TUNNEL=y -# CONFIG_IP_MROUTE is not set -# CONFIG_SYN_COOKIES is not set -# CONFIG_NET_UDP_TUNNEL is not set -# CONFIG_NET_FOU is not set -# CONFIG_GENEVE is not set -# CONFIG_INET_AH is not set -# CONFIG_INET_ESP is not set -# CONFIG_INET_IPCOMP is not set -# CONFIG_INET_XFRM_TUNNEL is not set -CONFIG_INET_TUNNEL=y -# CONFIG_INET_XFRM_MODE_TRANSPORT is not set -# CONFIG_INET_XFRM_MODE_TUNNEL is not set -# CONFIG_INET_XFRM_MODE_BEET is not set -CONFIG_INET_LRO=y -CONFIG_INET_DIAG=y -CONFIG_INET_TCP_DIAG=y -# CONFIG_INET_UDP_DIAG is not set -CONFIG_TCP_CONG_ADVANCED=y -CONFIG_TCP_CONG_BIC=y -CONFIG_TCP_CONG_CUBIC=y -CONFIG_TCP_CONG_WESTWOOD=y -CONFIG_TCP_CONG_HTCP=y -CONFIG_TCP_CONG_HSTCP=y -CONFIG_TCP_CONG_HYBLA=y -CONFIG_TCP_CONG_VEGAS=y -CONFIG_TCP_CONG_SCALABLE=y -CONFIG_TCP_CONG_LP=y -CONFIG_TCP_CONG_VENO=y -CONFIG_TCP_CONG_YEAH=y -CONFIG_TCP_CONG_ILLINOIS=y -# CONFIG_TCP_CONG_DCTCP is not set -# CONFIG_DEFAULT_BIC is not set -CONFIG_DEFAULT_CUBIC=y -# CONFIG_DEFAULT_HTCP is not set -# CONFIG_DEFAULT_HYBLA is not set -# CONFIG_DEFAULT_VEGAS is not set -# CONFIG_DEFAULT_VENO is not set -# CONFIG_DEFAULT_WESTWOOD is not set -# CONFIG_DEFAULT_RENO is not set -CONFIG_DEFAULT_TCP_CONG="cubic" -CONFIG_TCP_MD5SIG=y -CONFIG_IPV6=y -CONFIG_IPV6_ROUTER_PREF=y -CONFIG_IPV6_ROUTE_INFO=y -CONFIG_IPV6_OPTIMISTIC_DAD=y -CONFIG_INET6_AH=y -CONFIG_INET6_ESP=y -CONFIG_INET6_IPCOMP=y -CONFIG_IPV6_MIP6=y -CONFIG_INET6_XFRM_TUNNEL=y -CONFIG_INET6_TUNNEL=y -CONFIG_INET6_XFRM_MODE_TRANSPORT=y -CONFIG_INET6_XFRM_MODE_TUNNEL=y -CONFIG_INET6_XFRM_MODE_BEET=y -CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=y -# CONFIG_IPV6_VTI is not set -CONFIG_IPV6_SIT=y -CONFIG_IPV6_SIT_6RD=y -CONFIG_IPV6_NDISC_NODETYPE=y -CONFIG_IPV6_TUNNEL=y -# CONFIG_IPV6_GRE is not set -CONFIG_IPV6_MULTIPLE_TABLES=y -CONFIG_IPV6_SUBTREES=y -CONFIG_IPV6_MROUTE=y -CONFIG_IPV6_MROUTE_MULTIPLE_TABLES=y -CONFIG_IPV6_PIMSM_V2=y -CONFIG_NETWORK_SECMARK=y -CONFIG_NET_PTP_CLASSIFY=y -# CONFIG_NETWORK_PHY_TIMESTAMPING is not set -CONFIG_NETFILTER=y -# CONFIG_NETFILTER_DEBUG is not set -CONFIG_NETFILTER_ADVANCED=y -CONFIG_BRIDGE_NETFILTER=y - -# -# Core Netfilter Configuration -# -CONFIG_NETFILTER_NETLINK=y -CONFIG_NETFILTER_NETLINK_ACCT=y -CONFIG_NETFILTER_NETLINK_QUEUE=y -CONFIG_NETFILTER_NETLINK_LOG=y -CONFIG_NF_CONNTRACK=y -CONFIG_NF_LOG_COMMON=y -CONFIG_NF_CONNTRACK_MARK=y -CONFIG_NF_CONNTRACK_SECMARK=y -CONFIG_NF_CONNTRACK_ZONES=y -CONFIG_NF_CONNTRACK_PROCFS=y -CONFIG_NF_CONNTRACK_EVENTS=y -# CONFIG_NF_CONNTRACK_TIMEOUT is not set -CONFIG_NF_CONNTRACK_TIMESTAMP=y -CONFIG_NF_CONNTRACK_LABELS=y -CONFIG_NF_CT_PROTO_DCCP=y -CONFIG_NF_CT_PROTO_GRE=y -CONFIG_NF_CT_PROTO_SCTP=y -CONFIG_NF_CT_PROTO_UDPLITE=y -CONFIG_NF_CONNTRACK_AMANDA=y -CONFIG_NF_CONNTRACK_FTP=y -CONFIG_NF_CONNTRACK_H323=y -CONFIG_NF_CONNTRACK_IRC=y -CONFIG_NF_CONNTRACK_BROADCAST=y -CONFIG_NF_CONNTRACK_NETBIOS_NS=y -CONFIG_NF_CONNTRACK_SNMP=y -CONFIG_NF_CONNTRACK_PPTP=y -CONFIG_NF_CONNTRACK_SANE=y -CONFIG_NF_CONNTRACK_SIP=y -CONFIG_NF_CONNTRACK_TFTP=y -CONFIG_NF_CT_NETLINK=y -CONFIG_NF_CT_NETLINK_TIMEOUT=y -CONFIG_NF_CT_NETLINK_HELPER=y -CONFIG_NETFILTER_NETLINK_QUEUE_CT=y -CONFIG_NF_NAT=y -CONFIG_NF_NAT_NEEDED=y -CONFIG_NF_NAT_PROTO_DCCP=y -CONFIG_NF_NAT_PROTO_UDPLITE=y -CONFIG_NF_NAT_PROTO_SCTP=y -CONFIG_NF_NAT_AMANDA=y -CONFIG_NF_NAT_FTP=y -CONFIG_NF_NAT_IRC=y -CONFIG_NF_NAT_SIP=y -CONFIG_NF_NAT_TFTP=y -# CONFIG_NF_TABLES is not set -CONFIG_NETFILTER_XTABLES=y - -# -# Xtables combined modules -# -CONFIG_NETFILTER_XT_MARK=y -CONFIG_NETFILTER_XT_CONNMARK=y -CONFIG_NETFILTER_XT_SET=y - -# -# Xtables targets -# -CONFIG_NETFILTER_XT_TARGET_AUDIT=y -CONFIG_NETFILTER_XT_TARGET_CHECKSUM=y -CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y -CONFIG_NETFILTER_XT_TARGET_CONNMARK=y -CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y -CONFIG_NETFILTER_XT_TARGET_CT=y -CONFIG_NETFILTER_XT_TARGET_DSCP=y -CONFIG_NETFILTER_XT_TARGET_HL=y -CONFIG_NETFILTER_XT_TARGET_HMARK=y -CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y -CONFIG_NETFILTER_XT_TARGET_LOG=y -CONFIG_NETFILTER_XT_TARGET_MARK=y -CONFIG_NETFILTER_XT_NAT=y -CONFIG_NETFILTER_XT_TARGET_NETMAP=y -CONFIG_NETFILTER_XT_TARGET_NFLOG=y -CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y -CONFIG_NETFILTER_XT_TARGET_NOTRACK=y -CONFIG_NETFILTER_XT_TARGET_RATEEST=y -CONFIG_NETFILTER_XT_TARGET_REDIRECT=y -CONFIG_NETFILTER_XT_TARGET_TEE=y -CONFIG_NETFILTER_XT_TARGET_TPROXY=y -CONFIG_NETFILTER_XT_TARGET_TRACE=y -CONFIG_NETFILTER_XT_TARGET_SECMARK=y -CONFIG_NETFILTER_XT_TARGET_TCPMSS=y -CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=y - -# -# Xtables matches -# -CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y -CONFIG_NETFILTER_XT_MATCH_BPF=y -CONFIG_NETFILTER_XT_MATCH_CGROUP=y -CONFIG_NETFILTER_XT_MATCH_CLUSTER=y -CONFIG_NETFILTER_XT_MATCH_COMMENT=y -CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y -CONFIG_NETFILTER_XT_MATCH_CONNLABEL=y -CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y -CONFIG_NETFILTER_XT_MATCH_CONNMARK=y -CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y -CONFIG_NETFILTER_XT_MATCH_CPU=y -CONFIG_NETFILTER_XT_MATCH_DCCP=y -CONFIG_NETFILTER_XT_MATCH_DEVGROUP=y -CONFIG_NETFILTER_XT_MATCH_DSCP=y -CONFIG_NETFILTER_XT_MATCH_ECN=y -CONFIG_NETFILTER_XT_MATCH_ESP=y -CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y -CONFIG_NETFILTER_XT_MATCH_HELPER=y -CONFIG_NETFILTER_XT_MATCH_HL=y -CONFIG_NETFILTER_XT_MATCH_IPCOMP=y -CONFIG_NETFILTER_XT_MATCH_IPRANGE=y -CONFIG_NETFILTER_XT_MATCH_IPVS=y -CONFIG_NETFILTER_XT_MATCH_L2TP=y -CONFIG_NETFILTER_XT_MATCH_LENGTH=y -CONFIG_NETFILTER_XT_MATCH_LIMIT=y -CONFIG_NETFILTER_XT_MATCH_MAC=y -CONFIG_NETFILTER_XT_MATCH_MARK=y -CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y -CONFIG_NETFILTER_XT_MATCH_NFACCT=y -CONFIG_NETFILTER_XT_MATCH_OSF=y -CONFIG_NETFILTER_XT_MATCH_OWNER=y -CONFIG_NETFILTER_XT_MATCH_POLICY=y -CONFIG_NETFILTER_XT_MATCH_PHYSDEV=y -CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y -CONFIG_NETFILTER_XT_MATCH_QUOTA=y -CONFIG_NETFILTER_XT_MATCH_RATEEST=y -CONFIG_NETFILTER_XT_MATCH_REALM=y -CONFIG_NETFILTER_XT_MATCH_RECENT=y -CONFIG_NETFILTER_XT_MATCH_SCTP=y -CONFIG_NETFILTER_XT_MATCH_SOCKET=y -CONFIG_NETFILTER_XT_MATCH_STATE=y -CONFIG_NETFILTER_XT_MATCH_STATISTIC=y -CONFIG_NETFILTER_XT_MATCH_STRING=y -CONFIG_NETFILTER_XT_MATCH_TCPMSS=y -CONFIG_NETFILTER_XT_MATCH_TIME=y -CONFIG_NETFILTER_XT_MATCH_U32=y -CONFIG_IP_SET=y -CONFIG_IP_SET_MAX=256 -CONFIG_IP_SET_BITMAP_IP=y -CONFIG_IP_SET_BITMAP_IPMAC=y -CONFIG_IP_SET_BITMAP_PORT=y -CONFIG_IP_SET_HASH_IP=y -# CONFIG_IP_SET_HASH_IPMARK is not set -CONFIG_IP_SET_HASH_IPPORT=y -CONFIG_IP_SET_HASH_IPPORTIP=y -CONFIG_IP_SET_HASH_IPPORTNET=y -# CONFIG_IP_SET_HASH_MAC is not set -# CONFIG_IP_SET_HASH_NETPORTNET is not set -CONFIG_IP_SET_HASH_NET=y -# CONFIG_IP_SET_HASH_NETNET is not set -CONFIG_IP_SET_HASH_NETPORT=y -CONFIG_IP_SET_HASH_NETIFACE=y -CONFIG_IP_SET_LIST_SET=y -CONFIG_IP_VS=y -CONFIG_IP_VS_IPV6=y -# CONFIG_IP_VS_DEBUG is not set -CONFIG_IP_VS_TAB_BITS=12 - -# -# IPVS transport protocol load balancing support -# -CONFIG_IP_VS_PROTO_TCP=y -CONFIG_IP_VS_PROTO_UDP=y -CONFIG_IP_VS_PROTO_AH_ESP=y -CONFIG_IP_VS_PROTO_ESP=y -CONFIG_IP_VS_PROTO_AH=y -CONFIG_IP_VS_PROTO_SCTP=y - -# -# IPVS scheduler -# -CONFIG_IP_VS_RR=y -CONFIG_IP_VS_WRR=y -CONFIG_IP_VS_LC=y -CONFIG_IP_VS_WLC=y -# CONFIG_IP_VS_FO is not set -CONFIG_IP_VS_LBLC=y -CONFIG_IP_VS_LBLCR=y -CONFIG_IP_VS_DH=y -CONFIG_IP_VS_SH=y -CONFIG_IP_VS_SED=y -CONFIG_IP_VS_NQ=y - -# -# IPVS SH scheduler -# -CONFIG_IP_VS_SH_TAB_BITS=8 - -# -# IPVS application helper -# -# CONFIG_IP_VS_FTP is not set -CONFIG_IP_VS_NFCT=y -CONFIG_IP_VS_PE_SIP=y - -# -# IP: Netfilter Configuration -# -CONFIG_NF_DEFRAG_IPV4=y -CONFIG_NF_CONNTRACK_IPV4=y -CONFIG_NF_CONNTRACK_PROC_COMPAT=y -# CONFIG_NF_LOG_ARP is not set -CONFIG_NF_LOG_IPV4=y -CONFIG_NF_REJECT_IPV4=y -CONFIG_NF_NAT_IPV4=y -CONFIG_NF_NAT_MASQUERADE_IPV4=y -CONFIG_NF_NAT_SNMP_BASIC=y -CONFIG_NF_NAT_PROTO_GRE=y -CONFIG_NF_NAT_PPTP=y -CONFIG_NF_NAT_H323=y -CONFIG_IP_NF_IPTABLES=y -CONFIG_IP_NF_MATCH_AH=y -CONFIG_IP_NF_MATCH_ECN=y -# CONFIG_IP_NF_MATCH_RPFILTER is not set -CONFIG_IP_NF_MATCH_TTL=y -CONFIG_IP_NF_FILTER=y -CONFIG_IP_NF_TARGET_REJECT=y -# CONFIG_IP_NF_TARGET_SYNPROXY is not set -CONFIG_IP_NF_NAT=y -CONFIG_IP_NF_TARGET_MASQUERADE=y -# CONFIG_IP_NF_TARGET_NETMAP is not set -# CONFIG_IP_NF_TARGET_REDIRECT is not set -CONFIG_IP_NF_MANGLE=y -CONFIG_IP_NF_TARGET_CLUSTERIP=y -CONFIG_IP_NF_TARGET_ECN=y -CONFIG_IP_NF_TARGET_TTL=y -CONFIG_IP_NF_RAW=y -CONFIG_IP_NF_ARPTABLES=y -CONFIG_IP_NF_ARPFILTER=y -CONFIG_IP_NF_ARP_MANGLE=y - -# -# IPv6: Netfilter Configuration -# -CONFIG_NF_DEFRAG_IPV6=y -CONFIG_NF_CONNTRACK_IPV6=y -CONFIG_NF_REJECT_IPV6=y -CONFIG_NF_LOG_IPV6=y -# CONFIG_NF_NAT_IPV6 is not set -CONFIG_IP6_NF_IPTABLES=y -CONFIG_IP6_NF_MATCH_AH=y -CONFIG_IP6_NF_MATCH_EUI64=y -CONFIG_IP6_NF_MATCH_FRAG=y -CONFIG_IP6_NF_MATCH_OPTS=y -CONFIG_IP6_NF_MATCH_HL=y -CONFIG_IP6_NF_MATCH_IPV6HEADER=y -CONFIG_IP6_NF_MATCH_MH=y -# CONFIG_IP6_NF_MATCH_RPFILTER is not set -CONFIG_IP6_NF_MATCH_RT=y -CONFIG_IP6_NF_TARGET_HL=y -CONFIG_IP6_NF_FILTER=y -CONFIG_IP6_NF_TARGET_REJECT=y -# CONFIG_IP6_NF_TARGET_SYNPROXY is not set -CONFIG_IP6_NF_MANGLE=y -CONFIG_IP6_NF_RAW=y -# CONFIG_IP6_NF_NAT is not set -CONFIG_BRIDGE_NF_EBTABLES=y -CONFIG_BRIDGE_EBT_BROUTE=y -CONFIG_BRIDGE_EBT_T_FILTER=y -CONFIG_BRIDGE_EBT_T_NAT=y -CONFIG_BRIDGE_EBT_802_3=y -CONFIG_BRIDGE_EBT_AMONG=y -CONFIG_BRIDGE_EBT_ARP=y -CONFIG_BRIDGE_EBT_IP=y -CONFIG_BRIDGE_EBT_IP6=y -CONFIG_BRIDGE_EBT_LIMIT=y -CONFIG_BRIDGE_EBT_MARK=y -CONFIG_BRIDGE_EBT_PKTTYPE=y -CONFIG_BRIDGE_EBT_STP=y -CONFIG_BRIDGE_EBT_VLAN=y -CONFIG_BRIDGE_EBT_ARPREPLY=y -CONFIG_BRIDGE_EBT_DNAT=y -CONFIG_BRIDGE_EBT_MARK_T=y -CONFIG_BRIDGE_EBT_REDIRECT=y -CONFIG_BRIDGE_EBT_SNAT=y -CONFIG_BRIDGE_EBT_LOG=y -CONFIG_BRIDGE_EBT_NFLOG=y -# CONFIG_IP_DCCP is not set -CONFIG_IP_SCTP=y -# CONFIG_SCTP_DBG_OBJCNT is not set -CONFIG_SCTP_DEFAULT_COOKIE_HMAC_MD5=y -# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_SHA1 is not set -# CONFIG_SCTP_DEFAULT_COOKIE_HMAC_NONE is not set -CONFIG_SCTP_COOKIE_HMAC_MD5=y -# CONFIG_SCTP_COOKIE_HMAC_SHA1 is not set -# CONFIG_RDS is not set -# CONFIG_TIPC is not set -# CONFIG_ATM is not set -# CONFIG_L2TP is not set -CONFIG_STP=y -CONFIG_BRIDGE=y -CONFIG_BRIDGE_IGMP_SNOOPING=y -CONFIG_BRIDGE_VLAN_FILTERING=y -CONFIG_HAVE_NET_DSA=y -CONFIG_VLAN_8021Q=y -# CONFIG_VLAN_8021Q_GVRP is not set -# CONFIG_VLAN_8021Q_MVRP is not set -# CONFIG_DECNET is not set -CONFIG_LLC=y -CONFIG_LLC2=y -# CONFIG_IPX is not set -# CONFIG_ATALK is not set -# CONFIG_X25 is not set -# CONFIG_LAPB is not set -# CONFIG_PHONET is not set -# CONFIG_6LOWPAN is not set -# CONFIG_IEEE802154 is not set -# CONFIG_NET_SCHED is not set -# CONFIG_DCB is not set -CONFIG_DNS_RESOLVER=y -# CONFIG_BATMAN_ADV is not set -# CONFIG_OPENVSWITCH is not set -# CONFIG_VSOCKETS is not set -CONFIG_NETLINK_MMAP=y -CONFIG_NETLINK_DIAG=y -# CONFIG_NET_MPLS_GSO is not set -# CONFIG_HSR is not set -CONFIG_RPS=y -CONFIG_RFS_ACCEL=y -CONFIG_XPS=y -# CONFIG_CGROUP_NET_PRIO is not set -CONFIG_CGROUP_NET_CLASSID=y -CONFIG_NET_RX_BUSY_POLL=y -CONFIG_BQL=y -# CONFIG_BPF_JIT is not set -CONFIG_NET_FLOW_LIMIT=y - -# -# Network testing -# -CONFIG_NET_PKTGEN=y -CONFIG_NET_DROP_MONITOR=y -# CONFIG_HAMRADIO is not set -# CONFIG_CAN is not set -# CONFIG_IRDA is not set -# CONFIG_BT is not set -CONFIG_AF_RXRPC=y -# CONFIG_AF_RXRPC_DEBUG is not set -# CONFIG_RXKAD is not set -CONFIG_FIB_RULES=y -CONFIG_WIRELESS=y -# CONFIG_CFG80211 is not set -# CONFIG_LIB80211 is not set - -# -# CFG80211 needs to be enabled for MAC80211 -# -# CONFIG_WIMAX is not set -# CONFIG_RFKILL is not set -# CONFIG_NET_9P is not set -# CONFIG_CAIF is not set -CONFIG_CEPH_LIB=y -# CONFIG_CEPH_LIB_PRETTYDEBUG is not set -# CONFIG_CEPH_LIB_USE_DNS_RESOLVER is not set -# CONFIG_NFC is not set -CONFIG_HAVE_BPF_JIT=y - -# -# Device Drivers -# - -# -# Generic Driver Options -# -CONFIG_UEVENT_HELPER=y -CONFIG_UEVENT_HELPER_PATH="" -CONFIG_DEVTMPFS=y -# CONFIG_DEVTMPFS_MOUNT is not set -CONFIG_STANDALONE=y -CONFIG_PREVENT_FIRMWARE_BUILD=y -CONFIG_FW_LOADER=y -# CONFIG_FIRMWARE_IN_KERNEL is not set -CONFIG_EXTRA_FIRMWARE="" -CONFIG_FW_LOADER_USER_HELPER=y -# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set -CONFIG_ALLOW_DEV_COREDUMP=y -# CONFIG_DEBUG_DRIVER is not set -# CONFIG_DEBUG_DEVRES is not set -# CONFIG_SYS_HYPERVISOR is not set -# CONFIG_GENERIC_CPU_DEVICES is not set -CONFIG_GENERIC_CPU_AUTOPROBE=y -# CONFIG_DMA_SHARED_BUFFER is not set - -# -# Bus devices -# -CONFIG_CONNECTOR=y -CONFIG_PROC_EVENTS=y -# CONFIG_MTD is not set -CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y -# CONFIG_PARPORT is not set -CONFIG_PNP=y -CONFIG_PNP_DEBUG_MESSAGES=y - -# -# Protocols -# -CONFIG_PNPACPI=y -CONFIG_BLK_DEV=y -# CONFIG_BLK_DEV_NULL_BLK is not set -# CONFIG_BLK_DEV_FD is not set -# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set -# CONFIG_BLK_CPQ_CISS_DA is not set -# CONFIG_BLK_DEV_DAC960 is not set -# CONFIG_BLK_DEV_UMEM is not set -# CONFIG_BLK_DEV_COW_COMMON is not set -CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 -# CONFIG_BLK_DEV_CRYPTOLOOP is not set -# CONFIG_BLK_DEV_DRBD is not set -CONFIG_BLK_DEV_NBD=y -# CONFIG_BLK_DEV_NVME is not set -# CONFIG_BLK_DEV_SKD is not set -# CONFIG_BLK_DEV_OSD is not set -CONFIG_BLK_DEV_SX8=y -CONFIG_BLK_DEV_RAM=y -CONFIG_BLK_DEV_RAM_COUNT=16 -CONFIG_BLK_DEV_RAM_SIZE=65536 -# CONFIG_BLK_DEV_XIP is not set -# CONFIG_CDROM_PKTCDVD is not set -# CONFIG_ATA_OVER_ETH is not set -# CONFIG_BLK_DEV_HD is not set -# CONFIG_BLK_DEV_RBD is not set -# CONFIG_BLK_DEV_RSXX is not set - -# -# Misc devices -# -# CONFIG_SENSORS_LIS3LV02D is not set -# CONFIG_AD525X_DPOT is not set -CONFIG_DUMMY_IRQ=y -# CONFIG_IBM_ASM is not set -# CONFIG_PHANTOM is not set -# CONFIG_SGI_IOC4 is not set -# CONFIG_TIFM_CORE is not set -# CONFIG_ICS932S401 is not set -# CONFIG_ENCLOSURE_SERVICES is not set -# CONFIG_HP_ILO is not set -# CONFIG_APDS9802ALS is not set -# CONFIG_ISL29003 is not set -# CONFIG_ISL29020 is not set -# CONFIG_SENSORS_TSL2550 is not set -# CONFIG_SENSORS_BH1780 is not set -# CONFIG_SENSORS_BH1770 is not set -# CONFIG_SENSORS_APDS990X is not set -# CONFIG_HMC6352 is not set -# CONFIG_DS1682 is not set -CONFIG_TI_DAC7512=y -# CONFIG_BMP085_I2C is not set -# CONFIG_BMP085_SPI is not set -# CONFIG_USB_SWITCH_FSA9480 is not set -# CONFIG_LATTICE_ECP3_CONFIG is not set -# CONFIG_SRAM is not set -# CONFIG_C2PORT is not set - -# -# EEPROM support -# -CONFIG_EEPROM_AT24=y -CONFIG_EEPROM_AT25=y -# CONFIG_EEPROM_LEGACY is not set -# CONFIG_EEPROM_MAX6875 is not set -CONFIG_EEPROM_93CX6=y -# CONFIG_EEPROM_93XX46 is not set -CONFIG_CB710_CORE=y -# CONFIG_CB710_DEBUG is not set -CONFIG_CB710_DEBUG_ASSUMPTIONS=y - -# -# Texas Instruments shared transport line discipline -# -# CONFIG_TI_ST is not set -# CONFIG_SENSORS_LIS3_I2C is not set - -# -# Altera FPGA firmware download module -# -# CONFIG_ALTERA_STAPL is not set -# CONFIG_VMWARE_VMCI is not set - -# -# Intel MIC Bus Driver -# -# CONFIG_INTEL_MIC_BUS is not set - -# -# Intel MIC Host Driver -# - -# -# Intel MIC Card Driver -# -# CONFIG_GENWQE is not set -# CONFIG_ECHO is not set -# CONFIG_CXL_BASE is not set -CONFIG_HAVE_IDE=y -# CONFIG_IDE is not set - -# -# SCSI device support -# -CONFIG_SCSI_MOD=y -CONFIG_RAID_ATTRS=y -CONFIG_SCSI=y -CONFIG_SCSI_DMA=y -CONFIG_SCSI_NETLINK=y -# CONFIG_SCSI_MQ_DEFAULT is not set -# CONFIG_SCSI_PROC_FS is not set - -# -# SCSI support type (disk, tape, CD-ROM) -# -CONFIG_BLK_DEV_SD=y -# CONFIG_CHR_DEV_ST is not set -# CONFIG_CHR_DEV_OSST is not set -# CONFIG_BLK_DEV_SR is not set -CONFIG_CHR_DEV_SG=y -# CONFIG_CHR_DEV_SCH is not set -# CONFIG_SCSI_CONSTANTS is not set -# CONFIG_SCSI_LOGGING is not set -# CONFIG_SCSI_SCAN_ASYNC is not set - -# -# SCSI Transports -# -CONFIG_SCSI_SPI_ATTRS=y -CONFIG_SCSI_FC_ATTRS=y -CONFIG_SCSI_ISCSI_ATTRS=y -CONFIG_SCSI_SAS_ATTRS=y -CONFIG_SCSI_SAS_LIBSAS=y -CONFIG_SCSI_SAS_ATA=y -CONFIG_SCSI_SAS_HOST_SMP=y -CONFIG_SCSI_SRP_ATTRS=y -CONFIG_SCSI_LOWLEVEL=y -CONFIG_ISCSI_TCP=y -CONFIG_ISCSI_BOOT_SYSFS=y -CONFIG_SCSI_CXGB3_ISCSI=y -CONFIG_SCSI_CXGB4_ISCSI=y -CONFIG_SCSI_BNX2_ISCSI=y -CONFIG_SCSI_BNX2X_FCOE=y -CONFIG_BE2ISCSI=y -CONFIG_BLK_DEV_3W_XXXX_RAID=y -CONFIG_SCSI_HPSA=y -CONFIG_SCSI_3W_9XXX=y -CONFIG_SCSI_3W_SAS=y -CONFIG_SCSI_ACARD=y -CONFIG_SCSI_AACRAID=y -CONFIG_SCSI_AIC7XXX=y -CONFIG_AIC7XXX_CMDS_PER_DEVICE=8 -CONFIG_AIC7XXX_RESET_DELAY_MS=15000 -CONFIG_AIC7XXX_DEBUG_ENABLE=y -CONFIG_AIC7XXX_DEBUG_MASK=0 -CONFIG_AIC7XXX_REG_PRETTY_PRINT=y -CONFIG_SCSI_AIC79XX=y -CONFIG_AIC79XX_CMDS_PER_DEVICE=32 -CONFIG_AIC79XX_RESET_DELAY_MS=15000 -CONFIG_AIC79XX_DEBUG_ENABLE=y -CONFIG_AIC79XX_DEBUG_MASK=0 -CONFIG_AIC79XX_REG_PRETTY_PRINT=y -CONFIG_SCSI_AIC94XX=y -# CONFIG_AIC94XX_DEBUG is not set -CONFIG_SCSI_MVSAS=y -# CONFIG_SCSI_MVSAS_DEBUG is not set -# CONFIG_SCSI_MVSAS_TASKLET is not set -CONFIG_SCSI_MVUMI=y -CONFIG_SCSI_DPT_I2O=y -CONFIG_SCSI_ADVANSYS=y -CONFIG_SCSI_ARCMSR=y -# CONFIG_SCSI_ESAS2R is not set -CONFIG_MEGARAID_NEWGEN=y -CONFIG_MEGARAID_MM=y -CONFIG_MEGARAID_MAILBOX=y -CONFIG_MEGARAID_LEGACY=y -CONFIG_MEGARAID_SAS=y -CONFIG_SCSI_MPT2SAS=y -CONFIG_SCSI_MPT2SAS_MAX_SGE=128 -# CONFIG_SCSI_MPT2SAS_LOGGING is not set -# CONFIG_SCSI_MPT3SAS is not set -# CONFIG_SCSI_UFSHCD is not set -CONFIG_SCSI_HPTIOP=y -CONFIG_SCSI_BUSLOGIC=y -# CONFIG_SCSI_FLASHPOINT is not set -CONFIG_VMWARE_PVSCSI=y -CONFIG_LIBFC=y -CONFIG_LIBFCOE=y -CONFIG_FCOE=y -CONFIG_FCOE_FNIC=y -CONFIG_SCSI_DMX3191D=y -CONFIG_SCSI_EATA=y -CONFIG_SCSI_EATA_TAGGED_QUEUE=y -CONFIG_SCSI_EATA_LINKED_COMMANDS=y -CONFIG_SCSI_EATA_MAX_TAGS=16 -CONFIG_SCSI_FUTURE_DOMAIN=y -CONFIG_SCSI_GDTH=y -CONFIG_SCSI_ISCI=y -CONFIG_SCSI_IPS=y -CONFIG_SCSI_INITIO=y -CONFIG_SCSI_INIA100=y -CONFIG_SCSI_STEX=y -CONFIG_SCSI_SYM53C8XX_2=y -CONFIG_SCSI_SYM53C8XX_DMA_ADDRESSING_MODE=1 -CONFIG_SCSI_SYM53C8XX_DEFAULT_TAGS=16 -CONFIG_SCSI_SYM53C8XX_MAX_TAGS=64 -CONFIG_SCSI_SYM53C8XX_MMIO=y -CONFIG_SCSI_IPR=y -# CONFIG_SCSI_IPR_TRACE is not set -# CONFIG_SCSI_IPR_DUMP is not set -CONFIG_SCSI_QLOGIC_1280=y -CONFIG_SCSI_QLA_FC=y -CONFIG_SCSI_QLA_ISCSI=y -CONFIG_SCSI_LPFC=y -# CONFIG_SCSI_LPFC_DEBUG_FS is not set -CONFIG_SCSI_DC395x=y -CONFIG_SCSI_DC390T=y -CONFIG_SCSI_DEBUG=y -CONFIG_SCSI_PMCRAID=y -CONFIG_SCSI_PM8001=y -CONFIG_SCSI_BFA_FC=y -# CONFIG_SCSI_CHELSIO_FCOE is not set -CONFIG_SCSI_LOWLEVEL_PCMCIA=y -# CONFIG_PCMCIA_AHA152X is not set -# CONFIG_PCMCIA_FDOMAIN is not set -# CONFIG_PCMCIA_QLOGIC is not set -# CONFIG_PCMCIA_SYM53C500 is not set -CONFIG_SCSI_DH=y -CONFIG_SCSI_DH_RDAC=y -CONFIG_SCSI_DH_HP_SW=y -CONFIG_SCSI_DH_EMC=y -CONFIG_SCSI_DH_ALUA=y -CONFIG_SCSI_OSD_INITIATOR=y -CONFIG_SCSI_OSD_ULD=y -CONFIG_SCSI_OSD_DPRINT_SENSE=1 -# CONFIG_SCSI_OSD_DEBUG is not set -CONFIG_ATA=y -# CONFIG_ATA_NONSTANDARD is not set -CONFIG_ATA_VERBOSE_ERROR=y -CONFIG_ATA_ACPI=y -CONFIG_SATA_PMP=y - -# -# Controllers with non-SFF native interface -# -CONFIG_SATA_AHCI=y -CONFIG_SATA_AHCI_PLATFORM=y -# CONFIG_SATA_INIC162X is not set -CONFIG_SATA_ACARD_AHCI=y -CONFIG_SATA_SIL24=y -CONFIG_ATA_SFF=y - -# -# SFF controllers with custom DMA interface -# -CONFIG_PDC_ADMA=y -CONFIG_SATA_QSTOR=y -CONFIG_SATA_SX4=y -CONFIG_ATA_BMDMA=y - -# -# SATA SFF controllers with BMDMA -# -CONFIG_ATA_PIIX=y -CONFIG_SATA_MV=y -CONFIG_SATA_NV=y -CONFIG_SATA_PROMISE=y -CONFIG_SATA_SIL=y -CONFIG_SATA_SIS=y -CONFIG_SATA_SVW=y -CONFIG_SATA_ULI=y -CONFIG_SATA_VIA=y -CONFIG_SATA_VITESSE=y - -# -# PATA SFF controllers with BMDMA -# -CONFIG_PATA_ALI=y -CONFIG_PATA_AMD=y -CONFIG_PATA_ARTOP=y -CONFIG_PATA_ATIIXP=y -CONFIG_PATA_ATP867X=y -CONFIG_PATA_CMD64X=y -# CONFIG_PATA_CYPRESS is not set -CONFIG_PATA_EFAR=y -CONFIG_PATA_HPT366=y -CONFIG_PATA_HPT37X=y -# CONFIG_PATA_HPT3X2N is not set -# CONFIG_PATA_HPT3X3 is not set -CONFIG_PATA_IT8213=y -CONFIG_PATA_IT821X=y -CONFIG_PATA_JMICRON=y -CONFIG_PATA_MARVELL=y -CONFIG_PATA_NETCELL=y -CONFIG_PATA_NINJA32=y -CONFIG_PATA_NS87415=y -CONFIG_PATA_OLDPIIX=y -# CONFIG_PATA_OPTIDMA is not set -CONFIG_PATA_PDC2027X=y -CONFIG_PATA_PDC_OLD=y -# CONFIG_PATA_RADISYS is not set -CONFIG_PATA_RDC=y -CONFIG_PATA_SCH=y -CONFIG_PATA_SERVERWORKS=y -CONFIG_PATA_SIL680=y -CONFIG_PATA_SIS=y -CONFIG_PATA_TOSHIBA=y -CONFIG_PATA_TRIFLEX=y -CONFIG_PATA_VIA=y -# CONFIG_PATA_WINBOND is not set - -# -# PIO-only SFF controllers -# -# CONFIG_PATA_CMD640_PCI is not set -CONFIG_PATA_MPIIX=y -CONFIG_PATA_NS87410=y -# CONFIG_PATA_OPTI is not set -CONFIG_PATA_PCMCIA=y -CONFIG_PATA_PLATFORM=y -CONFIG_PATA_RZ1000=y - -# -# Generic fallback / legacy drivers -# -# CONFIG_PATA_ACPI is not set -CONFIG_ATA_GENERIC=y -# CONFIG_PATA_LEGACY is not set -# CONFIG_MD is not set -# CONFIG_TARGET_CORE is not set -# CONFIG_FUSION is not set - -# -# IEEE 1394 (FireWire) support -# -CONFIG_FIREWIRE=y -CONFIG_FIREWIRE_OHCI=y -CONFIG_FIREWIRE_SBP2=y -CONFIG_FIREWIRE_NET=y -CONFIG_FIREWIRE_NOSY=y -# CONFIG_I2O is not set -# CONFIG_MACINTOSH_DRIVERS is not set -CONFIG_NETDEVICES=y -CONFIG_MII=y -CONFIG_NET_CORE=y -# CONFIG_BONDING is not set -CONFIG_DUMMY=y -# CONFIG_EQUALIZER is not set -# CONFIG_NET_FC is not set -# CONFIG_NET_TEAM is not set -CONFIG_MACVLAN=y -CONFIG_MACVTAP=y -# CONFIG_VXLAN is not set -# CONFIG_NETCONSOLE is not set -# CONFIG_NETPOLL is not set -# CONFIG_NET_POLL_CONTROLLER is not set -CONFIG_TUN=y -CONFIG_VETH=y -# CONFIG_NLMON is not set -# CONFIG_ARCNET is not set - -# -# CAIF transport drivers -# - -# -# Distributed Switch Architecture drivers -# -# CONFIG_NET_DSA_MV88E6XXX is not set -# CONFIG_NET_DSA_MV88E6060 is not set -# CONFIG_NET_DSA_MV88E6XXX_NEED_PPU is not set -# CONFIG_NET_DSA_MV88E6131 is not set -# CONFIG_NET_DSA_MV88E6123_61_65 is not set -# CONFIG_NET_DSA_MV88E6171 is not set -# CONFIG_NET_DSA_BCM_SF2 is not set -CONFIG_ETHERNET=y -CONFIG_MDIO=y -# CONFIG_NET_VENDOR_3COM is not set -# CONFIG_NET_VENDOR_ADAPTEC is not set -CONFIG_NET_VENDOR_AGERE=y -# CONFIG_ET131X is not set -# CONFIG_NET_VENDOR_ALTEON is not set -# CONFIG_ALTERA_TSE is not set -# CONFIG_NET_VENDOR_AMD is not set -# CONFIG_NET_XGENE is not set -CONFIG_NET_VENDOR_ARC=y -# CONFIG_NET_VENDOR_ATHEROS is not set -CONFIG_NET_VENDOR_BROADCOM=y -CONFIG_B44=y -CONFIG_B44_PCI_AUTOSELECT=y -CONFIG_B44_PCICORE_AUTOSELECT=y -CONFIG_B44_PCI=y -CONFIG_BNX2=y -CONFIG_CNIC=y -CONFIG_TIGON3=y -CONFIG_BNX2X=y -CONFIG_BNX2X_SRIOV=y -# CONFIG_NET_VENDOR_BROCADE is not set -CONFIG_NET_VENDOR_CHELSIO=y -# CONFIG_CHELSIO_T1 is not set -CONFIG_CHELSIO_T3=y -CONFIG_CHELSIO_T4=y -CONFIG_CHELSIO_T4VF=y -# CONFIG_NET_VENDOR_CISCO is not set -# CONFIG_CX_ECAT is not set -# CONFIG_DNET is not set -# CONFIG_NET_VENDOR_DEC is not set -# CONFIG_NET_VENDOR_DLINK is not set -# CONFIG_NET_VENDOR_EMULEX is not set -# CONFIG_NET_VENDOR_EXAR is not set -# CONFIG_NET_VENDOR_FUJITSU is not set -# CONFIG_NET_VENDOR_HP is not set -CONFIG_NET_VENDOR_INTEL=y -# CONFIG_E100 is not set -CONFIG_E1000=y -CONFIG_E1000E=y -CONFIG_IGB=y -CONFIG_IGB_HWMON=y -CONFIG_IGBVF=y -CONFIG_IXGB=y -CONFIG_IXGBE=y -CONFIG_IXGBE_HWMON=y -CONFIG_IXGBEVF=y -# CONFIG_I40E is not set -# CONFIG_I40EVF is not set -# CONFIG_FM10K is not set -CONFIG_NET_VENDOR_I825XX=y -# CONFIG_IP1000 is not set -# CONFIG_JME is not set -# CONFIG_NET_VENDOR_MARVELL is not set -CONFIG_NET_VENDOR_MELLANOX=y -# CONFIG_MLX4_EN is not set -# CONFIG_MLX4_CORE is not set -# CONFIG_MLX5_CORE is not set -# CONFIG_NET_VENDOR_MICREL is not set -CONFIG_NET_VENDOR_MICROCHIP=y -CONFIG_ENC28J60=y -CONFIG_ENC28J60_WRITEVERIFY=y -# CONFIG_NET_VENDOR_MYRI is not set -# CONFIG_FEALNX is not set -# CONFIG_NET_VENDOR_NATSEMI is not set -# CONFIG_NET_VENDOR_NVIDIA is not set -# CONFIG_NET_VENDOR_OKI is not set -# CONFIG_ETHOC is not set -# CONFIG_NET_PACKET_ENGINE is not set -# CONFIG_NET_VENDOR_QLOGIC is not set -CONFIG_NET_VENDOR_QUALCOMM=y -CONFIG_NET_VENDOR_REALTEK=y -# CONFIG_8139CP is not set -# CONFIG_8139TOO is not set -CONFIG_R8169=y -# CONFIG_NET_VENDOR_RDC is not set -CONFIG_NET_VENDOR_SAMSUNG=y -# CONFIG_SXGBE_ETH is not set -# CONFIG_NET_VENDOR_SEEQ is not set -# CONFIG_NET_VENDOR_SILAN is not set -# CONFIG_NET_VENDOR_SIS is not set -# CONFIG_SFC is not set -# CONFIG_NET_VENDOR_SMSC is not set -# CONFIG_NET_VENDOR_STMICRO is not set -# CONFIG_NET_VENDOR_SUN is not set -# CONFIG_NET_VENDOR_TEHUTI is not set -# CONFIG_NET_VENDOR_TI is not set -# CONFIG_NET_VENDOR_VIA is not set -CONFIG_NET_VENDOR_WIZNET=y -# CONFIG_WIZNET_W5100 is not set -# CONFIG_WIZNET_W5300 is not set -# CONFIG_NET_VENDOR_XIRCOM is not set -# CONFIG_FDDI is not set -# CONFIG_HIPPI is not set -# CONFIG_NET_SB1000 is not set -CONFIG_PHYLIB=y - -# -# MII PHY device drivers -# -# CONFIG_AT803X_PHY is not set -# CONFIG_AMD_PHY is not set -CONFIG_MARVELL_PHY=y -CONFIG_DAVICOM_PHY=y -CONFIG_QSEMI_PHY=y -CONFIG_LXT_PHY=y -CONFIG_CICADA_PHY=y -CONFIG_VITESSE_PHY=y -CONFIG_SMSC_PHY=y -CONFIG_BROADCOM_PHY=y -# CONFIG_BCM7XXX_PHY is not set -# CONFIG_BCM87XX_PHY is not set -# CONFIG_ICPLUS_PHY is not set -CONFIG_REALTEK_PHY=y -CONFIG_NATIONAL_PHY=y -CONFIG_STE10XP=y -CONFIG_LSI_ET1011C_PHY=y -CONFIG_MICREL_PHY=y -CONFIG_FIXED_PHY=y -CONFIG_MDIO_BITBANG=y -# CONFIG_MDIO_GPIO is not set -# CONFIG_MDIO_BCM_UNIMAC is not set -# CONFIG_MICREL_KS8995MA is not set -CONFIG_PPP=y -# CONFIG_PPP_BSDCOMP is not set -# CONFIG_PPP_DEFLATE is not set -# CONFIG_PPP_FILTER is not set -# CONFIG_PPP_MPPE is not set -# CONFIG_PPP_MULTILINK is not set -# CONFIG_PPPOE is not set -# CONFIG_PPP_ASYNC is not set -# CONFIG_PPP_SYNC_TTY is not set -# CONFIG_SLIP is not set -CONFIG_SLHC=y -CONFIG_USB_NET_DRIVERS=y -# CONFIG_USB_CATC is not set -# CONFIG_USB_KAWETH is not set -# CONFIG_USB_PEGASUS is not set -# CONFIG_USB_RTL8150 is not set -# CONFIG_USB_RTL8152 is not set -CONFIG_USB_USBNET=y -# CONFIG_USB_NET_AX8817X is not set -# CONFIG_USB_NET_AX88179_178A is not set -CONFIG_USB_NET_CDCETHER=y -# CONFIG_USB_NET_CDC_EEM is not set -CONFIG_USB_NET_CDC_NCM=y -# CONFIG_USB_NET_HUAWEI_CDC_NCM is not set -# CONFIG_USB_NET_CDC_MBIM is not set -# CONFIG_USB_NET_DM9601 is not set -# CONFIG_USB_NET_SR9700 is not set -# CONFIG_USB_NET_SR9800 is not set -# CONFIG_USB_NET_SMSC75XX is not set -# CONFIG_USB_NET_SMSC95XX is not set -# CONFIG_USB_NET_GL620A is not set -CONFIG_USB_NET_NET1080=y -# CONFIG_USB_NET_PLUSB is not set -# CONFIG_USB_NET_MCS7830 is not set -# CONFIG_USB_NET_RNDIS_HOST is not set -CONFIG_USB_NET_CDC_SUBSET=y -# CONFIG_USB_ALI_M5632 is not set -# CONFIG_USB_AN2720 is not set -CONFIG_USB_BELKIN=y -# CONFIG_USB_ARMLINUX is not set -# CONFIG_USB_EPSON2888 is not set -# CONFIG_USB_KC2190 is not set -CONFIG_USB_NET_ZAURUS=y -# CONFIG_USB_NET_CX82310_ETH is not set -# CONFIG_USB_NET_KALMIA is not set -# CONFIG_USB_NET_QMI_WWAN is not set -# CONFIG_USB_NET_INT51X1 is not set -# CONFIG_USB_IPHETH is not set -# CONFIG_USB_SIERRA_NET is not set -# CONFIG_USB_VL600 is not set -# CONFIG_WLAN is not set - -# -# Enable WiMAX (Networking options) to see the WiMAX drivers -# -# CONFIG_WAN is not set -# CONFIG_VMXNET3 is not set -# CONFIG_ISDN is not set - -# -# Input device support -# -CONFIG_INPUT=y -CONFIG_INPUT_FF_MEMLESS=y -CONFIG_INPUT_POLLDEV=y -CONFIG_INPUT_SPARSEKMAP=y -# CONFIG_INPUT_MATRIXKMAP is not set - -# -# Userland interfaces -# -CONFIG_INPUT_MOUSEDEV=y -CONFIG_INPUT_MOUSEDEV_PSAUX=y -CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 -CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 -CONFIG_INPUT_JOYDEV=y -CONFIG_INPUT_EVDEV=y -# CONFIG_INPUT_EVBUG is not set - -# -# Input Device Drivers -# -# CONFIG_INPUT_KEYBOARD is not set -# CONFIG_INPUT_MOUSE is not set -# CONFIG_INPUT_JOYSTICK is not set -# CONFIG_INPUT_TABLET is not set -# CONFIG_INPUT_TOUCHSCREEN is not set -# CONFIG_INPUT_MISC is not set - -# -# Hardware I/O ports -# -# CONFIG_SERIO is not set -CONFIG_ARCH_MIGHT_HAVE_PC_SERIO=y -# CONFIG_GAMEPORT is not set - -# -# Character devices -# -CONFIG_TTY=y -CONFIG_VT=y -CONFIG_CONSOLE_TRANSLATIONS=y -CONFIG_VT_CONSOLE=y -CONFIG_HW_CONSOLE=y -CONFIG_VT_HW_CONSOLE_BINDING=y -CONFIG_UNIX98_PTYS=y -CONFIG_DEVPTS_MULTIPLE_INSTANCES=y -# CONFIG_LEGACY_PTYS is not set -CONFIG_SERIAL_NONSTANDARD=y -# CONFIG_ROCKETPORT is not set -# CONFIG_CYCLADES is not set -# CONFIG_MOXA_INTELLIO is not set -# CONFIG_MOXA_SMARTIO is not set -# CONFIG_SYNCLINK is not set -# CONFIG_SYNCLINKMP is not set -# CONFIG_SYNCLINK_GT is not set -# CONFIG_NOZOMI is not set -# CONFIG_ISI is not set -# CONFIG_N_HDLC is not set -# CONFIG_N_GSM is not set -# CONFIG_TRACE_SINK is not set -# CONFIG_DEVKMEM is not set - -# -# Serial drivers -# -CONFIG_SERIAL_EARLYCON=y -CONFIG_SERIAL_8250=y -CONFIG_SERIAL_8250_DEPRECATED_OPTIONS=y -CONFIG_SERIAL_8250_PNP=y -CONFIG_SERIAL_8250_CONSOLE=y -CONFIG_SERIAL_8250_PCI=y -CONFIG_SERIAL_8250_CS=y -CONFIG_SERIAL_8250_NR_UARTS=32 -CONFIG_SERIAL_8250_RUNTIME_UARTS=4 -CONFIG_SERIAL_8250_EXTENDED=y -CONFIG_SERIAL_8250_MANY_PORTS=y -CONFIG_SERIAL_8250_SHARE_IRQ=y -# CONFIG_SERIAL_8250_DETECT_IRQ is not set -CONFIG_SERIAL_8250_RSA=y -# CONFIG_SERIAL_8250_DW is not set -# CONFIG_SERIAL_8250_FINTEK is not set - -# -# Non-8250 serial port support -# -# CONFIG_SERIAL_MAX3100 is not set -# CONFIG_SERIAL_MAX310X is not set -CONFIG_SERIAL_MFD_HSU=y -# CONFIG_SERIAL_MFD_HSU_CONSOLE is not set -CONFIG_SERIAL_CORE=y -CONFIG_SERIAL_CORE_CONSOLE=y -CONFIG_SERIAL_JSM=y -# CONFIG_SERIAL_SCCNXP is not set -# CONFIG_SERIAL_SC16IS7XX is not set -# CONFIG_SERIAL_ALTERA_JTAGUART is not set -# CONFIG_SERIAL_ALTERA_UART is not set -# CONFIG_SERIAL_IFX6X60 is not set -# CONFIG_SERIAL_ARC is not set -# CONFIG_SERIAL_RP2 is not set -# CONFIG_SERIAL_FSL_LPUART is not set -# CONFIG_TTY_PRINTK is not set -# CONFIG_IPMI_HANDLER is not set -CONFIG_HW_RANDOM=y -CONFIG_HW_RANDOM_TIMERIOMEM=y -CONFIG_HW_RANDOM_INTEL=y -CONFIG_HW_RANDOM_AMD=y -CONFIG_HW_RANDOM_VIA=y -CONFIG_NVRAM=y -# CONFIG_R3964 is not set -# CONFIG_APPLICOM is not set - -# -# PCMCIA character devices -# -CONFIG_SYNCLINK_CS=y -CONFIG_CARDMAN_4000=y -CONFIG_CARDMAN_4040=y -CONFIG_IPWIRELESS=y -# CONFIG_MWAVE is not set -# CONFIG_RAW_DRIVER is not set -# CONFIG_HPET is not set -# CONFIG_HANGCHECK_TIMER is not set -# CONFIG_TCG_TPM is not set -# CONFIG_TELCLOCK is not set -CONFIG_DEVPORT=y -# CONFIG_XILLYBUS is not set - -# -# I2C support -# -CONFIG_I2C=y -CONFIG_ACPI_I2C_OPREGION=y -CONFIG_I2C_BOARDINFO=y -CONFIG_I2C_COMPAT=y -CONFIG_I2C_CHARDEV=y -CONFIG_I2C_MUX=y - -# -# Multiplexer I2C Chip support -# -CONFIG_I2C_MUX_GPIO=y -CONFIG_I2C_MUX_PCA9541=y -CONFIG_I2C_MUX_PCA954x=y -CONFIG_I2C_HELPER_AUTO=y -CONFIG_I2C_ALGOBIT=y -CONFIG_I2C_ALGOPCA=y - -# -# I2C Hardware Bus support -# - -# -# PC SMBus host controller drivers -# -# CONFIG_I2C_ALI1535 is not set -# CONFIG_I2C_ALI1563 is not set -# CONFIG_I2C_ALI15X3 is not set -# CONFIG_I2C_AMD756 is not set -# CONFIG_I2C_AMD8111 is not set -CONFIG_I2C_I801=y -CONFIG_I2C_ISCH=y -CONFIG_I2C_ISMT=y -# CONFIG_I2C_PIIX4 is not set -# CONFIG_I2C_NFORCE2 is not set -# CONFIG_I2C_SIS5595 is not set -# CONFIG_I2C_SIS630 is not set -# CONFIG_I2C_SIS96X is not set -# CONFIG_I2C_VIA is not set -# CONFIG_I2C_VIAPRO is not set - -# -# ACPI drivers -# -# CONFIG_I2C_SCMI is not set - -# -# I2C system bus drivers (mostly embedded / system-on-chip) -# -# CONFIG_I2C_CBUS_GPIO is not set -# CONFIG_I2C_DESIGNWARE_PCI is not set -# CONFIG_I2C_GPIO is not set -# CONFIG_I2C_OCORES is not set -CONFIG_I2C_PCA_PLATFORM=y -# CONFIG_I2C_PXA_PCI is not set -# CONFIG_I2C_SIMTEC is not set -# CONFIG_I2C_XILINX is not set - -# -# External I2C/SMBus adapter drivers -# -# CONFIG_I2C_DIOLAN_U2C is not set -# CONFIG_I2C_PARPORT_LIGHT is not set -# CONFIG_I2C_ROBOTFUZZ_OSIF is not set -# CONFIG_I2C_TAOS_EVM is not set -# CONFIG_I2C_TINY_USB is not set - -# -# Other I2C/SMBus bus drivers -# -# CONFIG_I2C_STUB is not set -# CONFIG_I2C_DEBUG_CORE is not set -# CONFIG_I2C_DEBUG_ALGO is not set -# CONFIG_I2C_DEBUG_BUS is not set -CONFIG_SPI=y -# CONFIG_SPI_DEBUG is not set -CONFIG_SPI_MASTER=y - -# -# SPI Master Controller Drivers -# -# CONFIG_SPI_ALTERA is not set -# CONFIG_SPI_BITBANG is not set -# CONFIG_SPI_GPIO is not set -# CONFIG_SPI_OC_TINY is not set -# CONFIG_SPI_PXA2XX is not set -# CONFIG_SPI_PXA2XX_PCI is not set -# CONFIG_SPI_SC18IS602 is not set -# CONFIG_SPI_XCOMM is not set -# CONFIG_SPI_XILINX is not set -# CONFIG_SPI_DESIGNWARE is not set - -# -# SPI Protocol Masters -# -# CONFIG_SPI_SPIDEV is not set -# CONFIG_SPI_TLE62X0 is not set -# CONFIG_SPMI is not set -# CONFIG_HSI is not set - -# -# PPS support -# -CONFIG_PPS=y -# CONFIG_PPS_DEBUG is not set - -# -# PPS clients support -# -# CONFIG_PPS_CLIENT_KTIMER is not set -# CONFIG_PPS_CLIENT_LDISC is not set -# CONFIG_PPS_CLIENT_GPIO is not set - -# -# PPS generators support -# - -# -# PTP clock support -# -CONFIG_PTP_1588_CLOCK=y - -# -# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks. -# -CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y -CONFIG_GPIOLIB=y -CONFIG_GPIO_DEVRES=y -CONFIG_GPIO_ACPI=y -# CONFIG_DEBUG_GPIO is not set -CONFIG_GPIO_SYSFS=y -CONFIG_GPIO_GENERIC=y -CONFIG_GPIO_MAX730X=y - -# -# Memory mapped GPIO drivers: -# -CONFIG_GPIO_GENERIC_PLATFORM=y -# CONFIG_GPIO_DWAPB is not set -# CONFIG_GPIO_IT8761E is not set -# CONFIG_GPIO_F7188X is not set -# CONFIG_GPIO_SCH311X is not set -CONFIG_GPIO_SCH=y -# CONFIG_GPIO_ICH is not set -# CONFIG_GPIO_VX855 is not set -# CONFIG_GPIO_LYNXPOINT is not set - -# -# I2C GPIO expanders: -# -# CONFIG_GPIO_MAX7300 is not set -# CONFIG_GPIO_MAX732X is not set -CONFIG_GPIO_PCA953X=y -# CONFIG_GPIO_PCA953X_IRQ is not set -CONFIG_GPIO_PCF857X=y -# CONFIG_GPIO_SX150X is not set -# CONFIG_GPIO_ADP5588 is not set - -# -# PCI GPIO expanders: -# -# CONFIG_GPIO_BT8XX is not set -# CONFIG_GPIO_AMD8111 is not set -# CONFIG_GPIO_INTEL_MID is not set -# CONFIG_GPIO_ML_IOH is not set -# CONFIG_GPIO_RDC321X is not set - -# -# SPI GPIO expanders: -# -CONFIG_GPIO_MAX7301=y -# CONFIG_GPIO_MCP23S08 is not set -CONFIG_GPIO_MC33880=y - -# -# AC97 GPIO expanders: -# - -# -# LPC GPIO expanders: -# - -# -# MODULbus GPIO expanders: -# - -# -# USB GPIO expanders: -# -# CONFIG_W1 is not set -CONFIG_POWER_SUPPLY=y -# CONFIG_POWER_SUPPLY_DEBUG is not set -# CONFIG_PDA_POWER is not set -# CONFIG_TEST_POWER is not set -# CONFIG_BATTERY_DS2780 is not set -# CONFIG_BATTERY_DS2781 is not set -# CONFIG_BATTERY_DS2782 is not set -# CONFIG_BATTERY_SBS is not set -# CONFIG_BATTERY_BQ27x00 is not set -# CONFIG_BATTERY_MAX17040 is not set -# CONFIG_BATTERY_MAX17042 is not set -# CONFIG_CHARGER_MAX8903 is not set -# CONFIG_CHARGER_LP8727 is not set -# CONFIG_CHARGER_GPIO is not set -# CONFIG_CHARGER_BQ2415X is not set -# CONFIG_CHARGER_BQ24190 is not set -# CONFIG_CHARGER_BQ24735 is not set -# CONFIG_CHARGER_SMB347 is not set -# CONFIG_POWER_RESET is not set -# CONFIG_POWER_AVS is not set -CONFIG_HWMON=y -CONFIG_HWMON_VID=y -# CONFIG_HWMON_DEBUG_CHIP is not set - -# -# Native drivers -# -# CONFIG_SENSORS_ABITUGURU is not set -# CONFIG_SENSORS_ABITUGURU3 is not set -# CONFIG_SENSORS_AD7314 is not set -# CONFIG_SENSORS_AD7414 is not set -# CONFIG_SENSORS_AD7418 is not set -CONFIG_SENSORS_ADM1021=y -# CONFIG_SENSORS_ADM1025 is not set -# CONFIG_SENSORS_ADM1026 is not set -# CONFIG_SENSORS_ADM1029 is not set -# CONFIG_SENSORS_ADM1031 is not set -# CONFIG_SENSORS_ADM9240 is not set -# CONFIG_SENSORS_ADT7310 is not set -# CONFIG_SENSORS_ADT7410 is not set -# CONFIG_SENSORS_ADT7411 is not set -# CONFIG_SENSORS_ADT7462 is not set -# CONFIG_SENSORS_ADT7470 is not set -# CONFIG_SENSORS_ADT7475 is not set -# CONFIG_SENSORS_ASC7621 is not set -# CONFIG_SENSORS_K8TEMP is not set -# CONFIG_SENSORS_K10TEMP is not set -# CONFIG_SENSORS_FAM15H_POWER is not set -# CONFIG_SENSORS_APPLESMC is not set -# CONFIG_SENSORS_ASB100 is not set -# CONFIG_SENSORS_ATXP1 is not set -# CONFIG_SENSORS_DS620 is not set -# CONFIG_SENSORS_DS1621 is not set -# CONFIG_SENSORS_I5K_AMB is not set -# CONFIG_SENSORS_F71805F is not set -# CONFIG_SENSORS_F71882FG is not set -# CONFIG_SENSORS_F75375S is not set -# CONFIG_SENSORS_FSCHMD is not set -# CONFIG_SENSORS_GL518SM is not set -# CONFIG_SENSORS_GL520SM is not set -# CONFIG_SENSORS_G760A is not set -# CONFIG_SENSORS_G762 is not set -CONFIG_SENSORS_GPIO_FAN=y -# CONFIG_SENSORS_HIH6130 is not set -CONFIG_SENSORS_CORETEMP=y -# CONFIG_SENSORS_IT87 is not set -# CONFIG_SENSORS_JC42 is not set -# CONFIG_SENSORS_POWR1220 is not set -# CONFIG_SENSORS_LINEAGE is not set -# CONFIG_SENSORS_LTC2945 is not set -CONFIG_SENSORS_LTC4151=y -CONFIG_SENSORS_LTC4215=y -# CONFIG_SENSORS_LTC4222 is not set -CONFIG_SENSORS_LTC4245=y -# CONFIG_SENSORS_LTC4260 is not set -CONFIG_SENSORS_LTC4261=y -# CONFIG_SENSORS_MAX1111 is not set -# CONFIG_SENSORS_MAX16065 is not set -# CONFIG_SENSORS_MAX1619 is not set -# CONFIG_SENSORS_MAX1668 is not set -# CONFIG_SENSORS_MAX197 is not set -# CONFIG_SENSORS_MAX6639 is not set -# CONFIG_SENSORS_MAX6642 is not set -CONFIG_SENSORS_MAX6650=y -# CONFIG_SENSORS_MAX6697 is not set -# CONFIG_SENSORS_HTU21 is not set -# CONFIG_SENSORS_MCP3021 is not set -# CONFIG_SENSORS_ADCXX is not set -# CONFIG_SENSORS_LM63 is not set -# CONFIG_SENSORS_LM70 is not set -# CONFIG_SENSORS_LM73 is not set -CONFIG_SENSORS_LM75=y -# CONFIG_SENSORS_LM77 is not set -# CONFIG_SENSORS_LM78 is not set -# CONFIG_SENSORS_LM80 is not set -# CONFIG_SENSORS_LM83 is not set -CONFIG_SENSORS_LM85=y -# CONFIG_SENSORS_LM87 is not set -CONFIG_SENSORS_LM90=y -# CONFIG_SENSORS_LM92 is not set -# CONFIG_SENSORS_LM93 is not set -# CONFIG_SENSORS_LM95234 is not set -# CONFIG_SENSORS_LM95241 is not set -# CONFIG_SENSORS_LM95245 is not set -# CONFIG_SENSORS_PC87360 is not set -# CONFIG_SENSORS_PC87427 is not set -# CONFIG_SENSORS_NTC_THERMISTOR is not set -# CONFIG_SENSORS_NCT6683 is not set -# CONFIG_SENSORS_NCT6775 is not set -# CONFIG_SENSORS_PCF8591 is not set -# CONFIG_PMBUS is not set -# CONFIG_SENSORS_SHT15 is not set -# CONFIG_SENSORS_SHT21 is not set -# CONFIG_SENSORS_SHTC1 is not set -# CONFIG_SENSORS_SIS5595 is not set -# CONFIG_SENSORS_DME1737 is not set -# CONFIG_SENSORS_EMC1403 is not set -# CONFIG_SENSORS_EMC2103 is not set -# CONFIG_SENSORS_EMC6W201 is not set -# CONFIG_SENSORS_SMSC47M1 is not set -# CONFIG_SENSORS_SMSC47M192 is not set -# CONFIG_SENSORS_SMSC47B397 is not set -# CONFIG_SENSORS_SCH56XX_COMMON is not set -# CONFIG_SENSORS_SMM665 is not set -# CONFIG_SENSORS_ADC128D818 is not set -# CONFIG_SENSORS_ADS1015 is not set -# CONFIG_SENSORS_ADS7828 is not set -# CONFIG_SENSORS_ADS7871 is not set -# CONFIG_SENSORS_AMC6821 is not set -# CONFIG_SENSORS_INA209 is not set -# CONFIG_SENSORS_INA2XX is not set -# CONFIG_SENSORS_THMC50 is not set -# CONFIG_SENSORS_TMP102 is not set -# CONFIG_SENSORS_TMP103 is not set -# CONFIG_SENSORS_TMP401 is not set -# CONFIG_SENSORS_TMP421 is not set -# CONFIG_SENSORS_VIA_CPUTEMP is not set -# CONFIG_SENSORS_VIA686A is not set -# CONFIG_SENSORS_VT1211 is not set -# CONFIG_SENSORS_VT8231 is not set -CONFIG_SENSORS_W83781D=y -# CONFIG_SENSORS_W83791D is not set -# CONFIG_SENSORS_W83792D is not set -# CONFIG_SENSORS_W83793 is not set -# CONFIG_SENSORS_W83795 is not set -# CONFIG_SENSORS_W83L785TS is not set -# CONFIG_SENSORS_W83L786NG is not set -# CONFIG_SENSORS_W83627HF is not set -# CONFIG_SENSORS_W83627EHF is not set - -# -# ACPI drivers -# -# CONFIG_SENSORS_ACPI_POWER is not set -# CONFIG_SENSORS_ATK0110 is not set -CONFIG_THERMAL=y -CONFIG_THERMAL_HWMON=y -CONFIG_THERMAL_DEFAULT_GOV_STEP_WISE=y -# CONFIG_THERMAL_DEFAULT_GOV_FAIR_SHARE is not set -# CONFIG_THERMAL_DEFAULT_GOV_USER_SPACE is not set -# CONFIG_THERMAL_GOV_FAIR_SHARE is not set -CONFIG_THERMAL_GOV_STEP_WISE=y -# CONFIG_THERMAL_GOV_BANG_BANG is not set -CONFIG_THERMAL_GOV_USER_SPACE=y -# CONFIG_THERMAL_EMULATION is not set -# CONFIG_INTEL_POWERCLAMP is not set -CONFIG_X86_PKG_TEMP_THERMAL=m -# CONFIG_INT340X_THERMAL is not set - -# -# Texas Instruments thermal drivers -# -# CONFIG_WATCHDOG is not set -CONFIG_SSB_POSSIBLE=y - -# -# Sonics Silicon Backplane -# -CONFIG_SSB=y -CONFIG_SSB_SPROM=y -CONFIG_SSB_PCIHOST_POSSIBLE=y -CONFIG_SSB_PCIHOST=y -# CONFIG_SSB_B43_PCI_BRIDGE is not set -CONFIG_SSB_PCMCIAHOST_POSSIBLE=y -CONFIG_SSB_PCMCIAHOST=y -CONFIG_SSB_SDIOHOST_POSSIBLE=y -CONFIG_SSB_SDIOHOST=y -# CONFIG_SSB_SILENT is not set -# CONFIG_SSB_DEBUG is not set -CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y -CONFIG_SSB_DRIVER_PCICORE=y -# CONFIG_SSB_DRIVER_GPIO is not set -CONFIG_BCMA_POSSIBLE=y - -# -# Broadcom specific AMBA -# -CONFIG_BCMA=y -CONFIG_BCMA_HOST_PCI_POSSIBLE=y -CONFIG_BCMA_HOST_PCI=y -# CONFIG_BCMA_HOST_SOC is not set -# CONFIG_BCMA_DRIVER_GMAC_CMN is not set -# CONFIG_BCMA_DRIVER_GPIO is not set -# CONFIG_BCMA_DEBUG is not set - -# -# Multifunction device drivers -# -CONFIG_MFD_CORE=y -# CONFIG_MFD_AS3711 is not set -# CONFIG_PMIC_ADP5520 is not set -# CONFIG_MFD_AAT2870_CORE is not set -# CONFIG_MFD_BCM590XX is not set -# CONFIG_MFD_AXP20X is not set -# CONFIG_MFD_CROS_EC is not set -# CONFIG_PMIC_DA903X is not set -# CONFIG_MFD_DA9052_SPI is not set -# CONFIG_MFD_DA9052_I2C is not set -# CONFIG_MFD_DA9055 is not set -# CONFIG_MFD_DA9063 is not set -# CONFIG_MFD_MC13XXX_SPI is not set -# CONFIG_MFD_MC13XXX_I2C is not set -# CONFIG_HTC_PASIC3 is not set -# CONFIG_HTC_I2CPLD is not set -# CONFIG_LPC_ICH is not set -CONFIG_LPC_SCH=y -# CONFIG_INTEL_SOC_PMIC is not set -# CONFIG_MFD_JANZ_CMODIO is not set -# CONFIG_MFD_KEMPLD is not set -# CONFIG_MFD_88PM800 is not set -# CONFIG_MFD_88PM805 is not set -# CONFIG_MFD_88PM860X is not set -# CONFIG_MFD_MAX14577 is not set -# CONFIG_MFD_MAX77686 is not set -# CONFIG_MFD_MAX77693 is not set -# CONFIG_MFD_MAX8907 is not set -# CONFIG_MFD_MAX8925 is not set -# CONFIG_MFD_MAX8997 is not set -# CONFIG_MFD_MAX8998 is not set -# CONFIG_MFD_MENF21BMC is not set -# CONFIG_EZX_PCAP is not set -# CONFIG_MFD_VIPERBOARD is not set -# CONFIG_MFD_RETU is not set -# CONFIG_MFD_PCF50633 is not set -# CONFIG_MFD_RDC321X is not set -# CONFIG_MFD_RTSX_PCI is not set -# CONFIG_MFD_RTSX_USB is not set -# CONFIG_MFD_RC5T583 is not set -# CONFIG_MFD_RN5T618 is not set -# CONFIG_MFD_SEC_CORE is not set -# CONFIG_MFD_SI476X_CORE is not set -# CONFIG_MFD_SM501 is not set -# CONFIG_MFD_SMSC is not set -# CONFIG_ABX500_CORE is not set -# CONFIG_MFD_SYSCON is not set -# CONFIG_MFD_TI_AM335X_TSCADC is not set -# CONFIG_MFD_LP3943 is not set -# CONFIG_MFD_LP8788 is not set -# CONFIG_MFD_PALMAS is not set -# CONFIG_TPS6105X is not set -# CONFIG_TPS65010 is not set -# CONFIG_TPS6507X is not set -# CONFIG_MFD_TPS65090 is not set -# CONFIG_MFD_TPS65217 is not set -# CONFIG_MFD_TPS65218 is not set -# CONFIG_MFD_TPS6586X is not set -# CONFIG_MFD_TPS65910 is not set -# CONFIG_MFD_TPS65912 is not set -# CONFIG_MFD_TPS65912_I2C is not set -# CONFIG_MFD_TPS65912_SPI is not set -# CONFIG_MFD_TPS80031 is not set -# CONFIG_TWL4030_CORE is not set -# CONFIG_TWL6040_CORE is not set -CONFIG_MFD_WL1273_CORE=y -# CONFIG_MFD_LM3533 is not set -# CONFIG_MFD_TC3589X is not set -# CONFIG_MFD_TMIO is not set -# CONFIG_MFD_VX855 is not set -# CONFIG_MFD_ARIZONA_I2C is not set -# CONFIG_MFD_ARIZONA_SPI is not set -# CONFIG_MFD_WM8400 is not set -# CONFIG_MFD_WM831X_I2C is not set -# CONFIG_MFD_WM831X_SPI is not set -# CONFIG_MFD_WM8350_I2C is not set -# CONFIG_MFD_WM8994 is not set -# CONFIG_REGULATOR is not set -# CONFIG_MEDIA_SUPPORT is not set - -# -# Graphics support -# -# CONFIG_AGP is not set -# CONFIG_VGA_ARB is not set -# CONFIG_VGA_SWITCHEROO is not set - -# -# Direct Rendering Manager -# -# CONFIG_DRM is not set - -# -# Frame buffer Devices -# -# CONFIG_FB is not set -# CONFIG_BACKLIGHT_LCD_SUPPORT is not set -# CONFIG_VGASTATE is not set - -# -# Console display driver support -# -CONFIG_VGA_CONSOLE=y -# CONFIG_VGACON_SOFT_SCROLLBACK is not set -CONFIG_DUMMY_CONSOLE=y -# CONFIG_SOUND is not set - -# -# HID support -# -CONFIG_HID=y -# CONFIG_HID_BATTERY_STRENGTH is not set -# CONFIG_HIDRAW is not set -# CONFIG_UHID is not set -CONFIG_HID_GENERIC=y - -# -# Special HID drivers -# -# CONFIG_HID_A4TECH is not set -# CONFIG_HID_ACRUX is not set -# CONFIG_HID_APPLE is not set -# CONFIG_HID_APPLEIR is not set -# CONFIG_HID_AUREAL is not set -# CONFIG_HID_BELKIN is not set -# CONFIG_HID_CHERRY is not set -# CONFIG_HID_CHICONY is not set -# CONFIG_HID_CP2112 is not set -# CONFIG_HID_CYPRESS is not set -# CONFIG_HID_DRAGONRISE is not set -# CONFIG_HID_EMS_FF is not set -# CONFIG_HID_ELECOM is not set -# CONFIG_HID_ELO is not set -# CONFIG_HID_EZKEY is not set -# CONFIG_HID_HOLTEK is not set -# CONFIG_HID_GT683R is not set -# CONFIG_HID_HUION is not set -# CONFIG_HID_KEYTOUCH is not set -# CONFIG_HID_KYE is not set -# CONFIG_HID_UCLOGIC is not set -# CONFIG_HID_WALTOP is not set -# CONFIG_HID_GYRATION is not set -# CONFIG_HID_ICADE is not set -# CONFIG_HID_TWINHAN is not set -# CONFIG_HID_KENSINGTON is not set -# CONFIG_HID_LCPOWER is not set -# CONFIG_HID_LENOVO is not set -# CONFIG_HID_LOGITECH is not set -# CONFIG_HID_MAGICMOUSE is not set -# CONFIG_HID_MICROSOFT is not set -# CONFIG_HID_MONTEREY is not set -# CONFIG_HID_MULTITOUCH is not set -# CONFIG_HID_NTRIG is not set -# CONFIG_HID_ORTEK is not set -# CONFIG_HID_PANTHERLORD is not set -# CONFIG_HID_PENMOUNT is not set -# CONFIG_HID_PETALYNX is not set -# CONFIG_HID_PICOLCD is not set -# CONFIG_HID_PRIMAX is not set -# CONFIG_HID_ROCCAT is not set -# CONFIG_HID_SAITEK is not set -# CONFIG_HID_SAMSUNG is not set -# CONFIG_HID_SONY is not set -# CONFIG_HID_SPEEDLINK is not set -# CONFIG_HID_STEELSERIES is not set -# CONFIG_HID_SUNPLUS is not set -# CONFIG_HID_RMI is not set -# CONFIG_HID_GREENASIA is not set -# CONFIG_HID_SMARTJOYPLUS is not set -# CONFIG_HID_TIVO is not set -# CONFIG_HID_TOPSEED is not set -# CONFIG_HID_THINGM is not set -# CONFIG_HID_THRUSTMASTER is not set -# CONFIG_HID_WACOM is not set -# CONFIG_HID_WIIMOTE is not set -# CONFIG_HID_XINMO is not set -# CONFIG_HID_ZEROPLUS is not set -# CONFIG_HID_ZYDACRON is not set -# CONFIG_HID_SENSOR_HUB is not set - -# -# USB HID support -# -CONFIG_USB_HID=y -# CONFIG_HID_PID is not set -# CONFIG_USB_HIDDEV is not set - -# -# I2C HID support -# -# CONFIG_I2C_HID is not set -CONFIG_USB_OHCI_LITTLE_ENDIAN=y -CONFIG_USB_SUPPORT=y -CONFIG_USB_COMMON=y -CONFIG_USB_ARCH_HAS_HCD=y -CONFIG_USB=y -CONFIG_USB_ANNOUNCE_NEW_DEVICES=y - -# -# Miscellaneous USB options -# -CONFIG_USB_DEFAULT_PERSIST=y -# CONFIG_USB_DYNAMIC_MINORS is not set -# CONFIG_USB_OTG_WHITELIST is not set -# CONFIG_USB_OTG_BLACKLIST_HUB is not set -# CONFIG_USB_OTG_FSM is not set -# CONFIG_USB_MON is not set -# CONFIG_USB_WUSB_CBAF is not set - -# -# USB Host Controller Drivers -# -# CONFIG_USB_C67X00_HCD is not set -CONFIG_USB_XHCI_HCD=y -CONFIG_USB_XHCI_PCI=y -CONFIG_USB_EHCI_HCD=y -CONFIG_USB_EHCI_ROOT_HUB_TT=y -CONFIG_USB_EHCI_TT_NEWSCHED=y -CONFIG_USB_EHCI_PCI=y -# CONFIG_USB_EHCI_HCD_PLATFORM is not set -# CONFIG_USB_OXU210HP_HCD is not set -# CONFIG_USB_ISP116X_HCD is not set -# CONFIG_USB_ISP1760_HCD is not set -# CONFIG_USB_ISP1362_HCD is not set -# CONFIG_USB_FUSBH200_HCD is not set -# CONFIG_USB_FOTG210_HCD is not set -# CONFIG_USB_MAX3421_HCD is not set -CONFIG_USB_OHCI_HCD=y -CONFIG_USB_OHCI_HCD_PCI=y -# CONFIG_USB_OHCI_HCD_SSB is not set -# CONFIG_USB_OHCI_HCD_PLATFORM is not set -CONFIG_USB_UHCI_HCD=y -# CONFIG_USB_SL811_HCD is not set -# CONFIG_USB_R8A66597_HCD is not set -# CONFIG_USB_HCD_BCMA is not set -# CONFIG_USB_HCD_SSB is not set -# CONFIG_USB_HCD_TEST_MODE is not set - -# -# USB Device Class drivers -# -# CONFIG_USB_ACM is not set -# CONFIG_USB_PRINTER is not set -# CONFIG_USB_WDM is not set -# CONFIG_USB_TMC is not set - -# -# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may -# - -# -# also be needed; see USB_STORAGE Help for more info -# -CONFIG_USB_STORAGE=y -# CONFIG_USB_STORAGE_DEBUG is not set -# CONFIG_USB_STORAGE_REALTEK is not set -# CONFIG_USB_STORAGE_DATAFAB is not set -# CONFIG_USB_STORAGE_FREECOM is not set -# CONFIG_USB_STORAGE_ISD200 is not set -# CONFIG_USB_STORAGE_USBAT is not set -# CONFIG_USB_STORAGE_SDDR09 is not set -# CONFIG_USB_STORAGE_SDDR55 is not set -# CONFIG_USB_STORAGE_JUMPSHOT is not set -# CONFIG_USB_STORAGE_ALAUDA is not set -# CONFIG_USB_STORAGE_ONETOUCH is not set -# CONFIG_USB_STORAGE_KARMA is not set -# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set -# CONFIG_USB_STORAGE_ENE_UB6250 is not set -# CONFIG_USB_UAS is not set - -# -# USB Imaging devices -# -# CONFIG_USB_MDC800 is not set -# CONFIG_USB_MICROTEK is not set -# CONFIG_USBIP_CORE is not set -# CONFIG_USB_MUSB_HDRC is not set -# CONFIG_USB_DWC3 is not set -# CONFIG_USB_DWC2 is not set -# CONFIG_USB_CHIPIDEA is not set - -# -# USB port drivers -# -CONFIG_USB_SERIAL=y -CONFIG_USB_SERIAL_CONSOLE=y -# CONFIG_USB_SERIAL_GENERIC is not set -# CONFIG_USB_SERIAL_SIMPLE is not set -# CONFIG_USB_SERIAL_AIRCABLE is not set -# CONFIG_USB_SERIAL_ARK3116 is not set -# CONFIG_USB_SERIAL_BELKIN is not set -# CONFIG_USB_SERIAL_CH341 is not set -# CONFIG_USB_SERIAL_WHITEHEAT is not set -# CONFIG_USB_SERIAL_DIGI_ACCELEPORT is not set -# CONFIG_USB_SERIAL_CP210X is not set -# CONFIG_USB_SERIAL_CYPRESS_M8 is not set -# CONFIG_USB_SERIAL_EMPEG is not set -# CONFIG_USB_SERIAL_FTDI_SIO is not set -# CONFIG_USB_SERIAL_VISOR is not set -# CONFIG_USB_SERIAL_IPAQ is not set -# CONFIG_USB_SERIAL_IR is not set -# CONFIG_USB_SERIAL_EDGEPORT is not set -# CONFIG_USB_SERIAL_EDGEPORT_TI is not set -# CONFIG_USB_SERIAL_F81232 is not set -# CONFIG_USB_SERIAL_GARMIN is not set -# CONFIG_USB_SERIAL_IPW is not set -# CONFIG_USB_SERIAL_IUU is not set -# CONFIG_USB_SERIAL_KEYSPAN_PDA is not set -# CONFIG_USB_SERIAL_KEYSPAN is not set -# CONFIG_USB_SERIAL_KLSI is not set -# CONFIG_USB_SERIAL_KOBIL_SCT is not set -# CONFIG_USB_SERIAL_MCT_U232 is not set -# CONFIG_USB_SERIAL_METRO is not set -# CONFIG_USB_SERIAL_MOS7720 is not set -# CONFIG_USB_SERIAL_MOS7840 is not set -# CONFIG_USB_SERIAL_MXUPORT is not set -# CONFIG_USB_SERIAL_NAVMAN is not set -# CONFIG_USB_SERIAL_PL2303 is not set -# CONFIG_USB_SERIAL_OTI6858 is not set -# CONFIG_USB_SERIAL_QCAUX is not set -# CONFIG_USB_SERIAL_QUALCOMM is not set -# CONFIG_USB_SERIAL_SPCP8X5 is not set -# CONFIG_USB_SERIAL_SAFE is not set -# CONFIG_USB_SERIAL_SIERRAWIRELESS is not set -# CONFIG_USB_SERIAL_SYMBOL is not set -# CONFIG_USB_SERIAL_TI is not set -# CONFIG_USB_SERIAL_CYBERJACK is not set -# CONFIG_USB_SERIAL_XIRCOM is not set -# CONFIG_USB_SERIAL_OPTION is not set -# CONFIG_USB_SERIAL_OMNINET is not set -# CONFIG_USB_SERIAL_OPTICON is not set -# CONFIG_USB_SERIAL_XSENS_MT is not set -# CONFIG_USB_SERIAL_WISHBONE is not set -# CONFIG_USB_SERIAL_SSU100 is not set -# CONFIG_USB_SERIAL_QT2 is not set -# CONFIG_USB_SERIAL_DEBUG is not set - -# -# USB Miscellaneous drivers -# -# CONFIG_USB_EMI62 is not set -# CONFIG_USB_EMI26 is not set -# CONFIG_USB_ADUTUX is not set -# CONFIG_USB_SEVSEG is not set -# CONFIG_USB_RIO500 is not set -# CONFIG_USB_LEGOTOWER is not set -# CONFIG_USB_LCD is not set -# CONFIG_USB_LED is not set -# CONFIG_USB_CYPRESS_CY7C63 is not set -# CONFIG_USB_CYTHERM is not set -# CONFIG_USB_IDMOUSE is not set -# CONFIG_USB_FTDI_ELAN is not set -# CONFIG_USB_APPLEDISPLAY is not set -# CONFIG_USB_SISUSBVGA is not set -# CONFIG_USB_LD is not set -# CONFIG_USB_TRANCEVIBRATOR is not set -# CONFIG_USB_IOWARRIOR is not set -# CONFIG_USB_TEST is not set -# CONFIG_USB_EHSET_TEST_FIXTURE is not set -# CONFIG_USB_ISIGHTFW is not set -# CONFIG_USB_YUREX is not set -# CONFIG_USB_EZUSB_FX2 is not set -# CONFIG_USB_HSIC_USB3503 is not set -# CONFIG_USB_LINK_LAYER_TEST is not set - -# -# USB Physical Layer drivers -# -# CONFIG_USB_PHY is not set -# CONFIG_NOP_USB_XCEIV is not set -# CONFIG_USB_GPIO_VBUS is not set -# CONFIG_USB_ISP1301 is not set -# CONFIG_USB_GADGET is not set -# CONFIG_UWB is not set -CONFIG_MMC=y -# CONFIG_MMC_DEBUG is not set -# CONFIG_MMC_CLKGATE is not set - -# -# MMC/SD/SDIO Card Drivers -# -CONFIG_MMC_BLOCK=y -CONFIG_MMC_BLOCK_MINORS=8 -CONFIG_MMC_BLOCK_BOUNCE=y -# CONFIG_SDIO_UART is not set -# CONFIG_MMC_TEST is not set - -# -# MMC/SD/SDIO Host Controller Drivers -# -CONFIG_MMC_SDHCI=y -CONFIG_MMC_SDHCI_PCI=y -# CONFIG_MMC_RICOH_MMC is not set -# CONFIG_MMC_SDHCI_ACPI is not set -CONFIG_MMC_SDHCI_PLTFM=y -# CONFIG_MMC_WBSD is not set -# CONFIG_MMC_TIFM_SD is not set -CONFIG_MMC_SPI=y -# CONFIG_MMC_SDRICOH_CS is not set -# CONFIG_MMC_CB710 is not set -# CONFIG_MMC_VIA_SDMMC is not set -# CONFIG_MMC_VUB300 is not set -# CONFIG_MMC_USHC is not set -# CONFIG_MMC_USDHI6ROL0 is not set -# CONFIG_MEMSTICK is not set -CONFIG_NEW_LEDS=y -CONFIG_LEDS_CLASS=y - -# -# LED drivers -# -# CONFIG_LEDS_LM3530 is not set -# CONFIG_LEDS_LM3642 is not set -# CONFIG_LEDS_PCA9532 is not set -# CONFIG_LEDS_GPIO is not set -# CONFIG_LEDS_LP3944 is not set -# CONFIG_LEDS_LP5521 is not set -# CONFIG_LEDS_LP5523 is not set -# CONFIG_LEDS_LP5562 is not set -# CONFIG_LEDS_LP8501 is not set -# CONFIG_LEDS_PCA955X is not set -# CONFIG_LEDS_PCA963X is not set -# CONFIG_LEDS_DAC124S085 is not set -# CONFIG_LEDS_BD2802 is not set -# CONFIG_LEDS_INTEL_SS4200 is not set -# CONFIG_LEDS_LT3593 is not set -# CONFIG_LEDS_TCA6507 is not set -# CONFIG_LEDS_LM355x is not set - -# -# LED driver for blink(1) USB RGB LED is under Special HID drivers (HID_THINGM) -# -# CONFIG_LEDS_BLINKM is not set - -# -# LED Triggers -# -# CONFIG_LEDS_TRIGGERS is not set -# CONFIG_ACCESSIBILITY is not set -# CONFIG_INFINIBAND is not set -# CONFIG_EDAC is not set -CONFIG_RTC_LIB=y -CONFIG_RTC_CLASS=y -CONFIG_RTC_HCTOSYS=y -CONFIG_RTC_SYSTOHC=y -CONFIG_RTC_HCTOSYS_DEVICE="rtc0" -# CONFIG_RTC_DEBUG is not set - -# -# RTC interfaces -# -CONFIG_RTC_INTF_SYSFS=y -CONFIG_RTC_INTF_PROC=y -CONFIG_RTC_INTF_DEV=y -# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set -# CONFIG_RTC_DRV_TEST is not set - -# -# I2C RTC drivers -# -CONFIG_RTC_DRV_DS1307=y -CONFIG_RTC_DRV_DS1374=y -CONFIG_RTC_DRV_DS1672=y -CONFIG_RTC_DRV_DS3232=y -CONFIG_RTC_DRV_MAX6900=y -CONFIG_RTC_DRV_RS5C372=y -CONFIG_RTC_DRV_ISL1208=y -CONFIG_RTC_DRV_ISL12022=y -# CONFIG_RTC_DRV_ISL12057 is not set -CONFIG_RTC_DRV_X1205=y -# CONFIG_RTC_DRV_PCF2127 is not set -# CONFIG_RTC_DRV_PCF8523 is not set -CONFIG_RTC_DRV_PCF8563=y -# CONFIG_RTC_DRV_PCF85063 is not set -CONFIG_RTC_DRV_PCF8583=y -CONFIG_RTC_DRV_M41T80=y -# CONFIG_RTC_DRV_M41T80_WDT is not set -CONFIG_RTC_DRV_BQ32K=y -CONFIG_RTC_DRV_S35390A=y -CONFIG_RTC_DRV_FM3130=y -CONFIG_RTC_DRV_RX8581=y -CONFIG_RTC_DRV_RX8025=y -# CONFIG_RTC_DRV_EM3027 is not set -# CONFIG_RTC_DRV_RV3029C2 is not set - -# -# SPI RTC drivers -# -# CONFIG_RTC_DRV_M41T93 is not set -# CONFIG_RTC_DRV_M41T94 is not set -# CONFIG_RTC_DRV_DS1305 is not set -# CONFIG_RTC_DRV_DS1343 is not set -# CONFIG_RTC_DRV_DS1347 is not set -# CONFIG_RTC_DRV_DS1390 is not set -# CONFIG_RTC_DRV_MAX6902 is not set -# CONFIG_RTC_DRV_R9701 is not set -# CONFIG_RTC_DRV_RS5C348 is not set -# CONFIG_RTC_DRV_DS3234 is not set -# CONFIG_RTC_DRV_PCF2123 is not set -# CONFIG_RTC_DRV_RX4581 is not set -# CONFIG_RTC_DRV_MCP795 is not set - -# -# Platform RTC drivers -# -CONFIG_RTC_DRV_CMOS=y -CONFIG_RTC_DRV_DS1286=y -CONFIG_RTC_DRV_DS1511=y -CONFIG_RTC_DRV_DS1553=y -CONFIG_RTC_DRV_DS1742=y -# CONFIG_RTC_DRV_DS2404 is not set -CONFIG_RTC_DRV_STK17TA8=y -CONFIG_RTC_DRV_M48T86=y -CONFIG_RTC_DRV_M48T35=y -CONFIG_RTC_DRV_M48T59=y -CONFIG_RTC_DRV_MSM6242=y -CONFIG_RTC_DRV_BQ4802=y -CONFIG_RTC_DRV_RP5C01=y -CONFIG_RTC_DRV_V3020=y - -# -# on-CPU RTC drivers -# -# CONFIG_RTC_DRV_XGENE is not set - -# -# HID Sensor RTC drivers -# -# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set -# CONFIG_DMADEVICES is not set -# CONFIG_AUXDISPLAY is not set -CONFIG_UIO=y -# CONFIG_UIO_CIF is not set -# CONFIG_UIO_PDRV_GENIRQ is not set -# CONFIG_UIO_DMEM_GENIRQ is not set -# CONFIG_UIO_AEC is not set -# CONFIG_UIO_SERCOS3 is not set -# CONFIG_UIO_PCI_GENERIC is not set -# CONFIG_UIO_NETX is not set -# CONFIG_UIO_MF624 is not set -# CONFIG_VIRT_DRIVERS is not set - -# -# Virtio drivers -# -# CONFIG_VIRTIO_PCI is not set -# CONFIG_VIRTIO_MMIO is not set - -# -# Microsoft Hyper-V guest support -# -# CONFIG_STAGING is not set -CONFIG_X86_PLATFORM_DEVICES=y -# CONFIG_ACERHDF is not set -# CONFIG_ASUS_LAPTOP is not set -# CONFIG_DELL_SMO8800 is not set -# CONFIG_FUJITSU_TABLET is not set -# CONFIG_HP_WIRELESS is not set -# CONFIG_THINKPAD_ACPI is not set -# CONFIG_SENSORS_HDAPS is not set -# CONFIG_INTEL_MENLOW is not set -# CONFIG_EEEPC_LAPTOP is not set -# CONFIG_ACPI_WMI is not set -# CONFIG_TOPSTAR_LAPTOP is not set -# CONFIG_TOSHIBA_BT_RFKILL is not set -# CONFIG_TOSHIBA_HAPS is not set -# CONFIG_ACPI_CMPC is not set -# CONFIG_INTEL_IPS is not set -# CONFIG_IBM_RTL is not set -# CONFIG_SAMSUNG_Q10 is not set -# CONFIG_INTEL_RST is not set -# CONFIG_INTEL_SMARTCONNECT is not set -# CONFIG_PVPANIC is not set -# CONFIG_CHROME_PLATFORMS is not set - -# -# SOC (System On Chip) specific Drivers -# -# CONFIG_SOC_TI is not set - -# -# Hardware Spinlock drivers -# - -# -# Clock Source drivers -# -CONFIG_CLKEVT_I8253=y -CONFIG_I8253_LOCK=y -CONFIG_CLKBLD_I8253=y -# CONFIG_ATMEL_PIT is not set -# CONFIG_SH_TIMER_CMT is not set -# CONFIG_SH_TIMER_MTU2 is not set -# CONFIG_SH_TIMER_TMU is not set -# CONFIG_EM_TIMER_STI is not set -# CONFIG_MAILBOX is not set -CONFIG_IOMMU_SUPPORT=y - -# -# Generic IOMMU Pagetable Support -# -# CONFIG_IOMMU_IO_PGTABLE_LPAE is not set -# CONFIG_AMD_IOMMU is not set -# CONFIG_INTEL_IOMMU is not set -# CONFIG_IRQ_REMAP is not set - -# -# Remoteproc drivers -# -# CONFIG_STE_MODEM_RPROC is not set - -# -# Rpmsg drivers -# - -# -# SOC (System On Chip) specific Drivers -# -# CONFIG_PM_DEVFREQ is not set -# CONFIG_EXTCON is not set -# CONFIG_MEMORY is not set -# CONFIG_IIO is not set -# CONFIG_NTB is not set -# CONFIG_VME_BUS is not set -# CONFIG_PWM is not set -# CONFIG_IPACK_BUS is not set -# CONFIG_RESET_CONTROLLER is not set -# CONFIG_FMC is not set - -# -# PHY Subsystem -# -CONFIG_GENERIC_PHY=y -# CONFIG_BCM_KONA_USB2_PHY is not set -# CONFIG_POWERCAP is not set -# CONFIG_MCB is not set -CONFIG_RAS=y -# CONFIG_THUNDERBOLT is not set - -# -# Firmware Drivers -# -CONFIG_EDD=y -# CONFIG_EDD_OFF is not set -CONFIG_FIRMWARE_MEMMAP=y -CONFIG_DELL_RBU=y -CONFIG_DCDBAS=y -CONFIG_DMIID=y -CONFIG_DMI_SYSFS=y -CONFIG_DMI_SCAN_MACHINE_NON_EFI_FALLBACK=y -CONFIG_ISCSI_IBFT_FIND=y -CONFIG_ISCSI_IBFT=y -# CONFIG_GOOGLE_FIRMWARE is not set - -# -# File systems -# -CONFIG_DCACHE_WORD_ACCESS=y -CONFIG_EXT2_FS=y -CONFIG_EXT2_FS_XATTR=y -CONFIG_EXT2_FS_POSIX_ACL=y -CONFIG_EXT2_FS_SECURITY=y -# CONFIG_EXT2_FS_XIP is not set -CONFIG_EXT3_FS=y -CONFIG_EXT3_DEFAULTS_TO_ORDERED=y -CONFIG_EXT3_FS_XATTR=y -CONFIG_EXT3_FS_POSIX_ACL=y -CONFIG_EXT3_FS_SECURITY=y -CONFIG_EXT4_FS=y -CONFIG_EXT4_FS_POSIX_ACL=y -CONFIG_EXT4_FS_SECURITY=y -# CONFIG_EXT4_DEBUG is not set -CONFIG_JBD=y -# CONFIG_JBD_DEBUG is not set -CONFIG_JBD2=y -# CONFIG_JBD2_DEBUG is not set -CONFIG_FS_MBCACHE=y -# CONFIG_REISERFS_FS is not set -# CONFIG_JFS_FS is not set -# CONFIG_XFS_FS is not set -# CONFIG_GFS2_FS is not set -# CONFIG_OCFS2_FS is not set -CONFIG_BTRFS_FS=y -CONFIG_BTRFS_FS_POSIX_ACL=y -# CONFIG_BTRFS_FS_CHECK_INTEGRITY is not set -# CONFIG_BTRFS_FS_RUN_SANITY_TESTS is not set -# CONFIG_BTRFS_DEBUG is not set -# CONFIG_BTRFS_ASSERT is not set -# CONFIG_NILFS2_FS is not set -CONFIG_FS_POSIX_ACL=y -CONFIG_EXPORTFS=y -CONFIG_FILE_LOCKING=y -CONFIG_FSNOTIFY=y -CONFIG_DNOTIFY=y -CONFIG_INOTIFY_USER=y -CONFIG_FANOTIFY=y -# CONFIG_QUOTA is not set -# CONFIG_QUOTACTL is not set -# CONFIG_AUTOFS4_FS is not set -# CONFIG_FUSE_FS is not set -CONFIG_OVERLAY_FS=y - -# -# Caches -# -CONFIG_FSCACHE=y -CONFIG_FSCACHE_STATS=y -# CONFIG_FSCACHE_HISTOGRAM is not set -# CONFIG_FSCACHE_DEBUG is not set -# CONFIG_FSCACHE_OBJECT_LIST is not set -CONFIG_CACHEFILES=y -# CONFIG_CACHEFILES_DEBUG is not set -# CONFIG_CACHEFILES_HISTOGRAM is not set - -# -# CD-ROM/DVD Filesystems -# -CONFIG_ISO9660_FS=y -CONFIG_JOLIET=y -CONFIG_ZISOFS=y -CONFIG_UDF_FS=y -CONFIG_UDF_NLS=y - -# -# DOS/FAT/NT Filesystems -# -CONFIG_FAT_FS=y -CONFIG_MSDOS_FS=y -CONFIG_VFAT_FS=y -CONFIG_FAT_DEFAULT_CODEPAGE=437 -CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" -# CONFIG_NTFS_FS is not set - -# -# Pseudo filesystems -# -CONFIG_PROC_FS=y -CONFIG_PROC_KCORE=y -CONFIG_PROC_VMCORE=y -CONFIG_PROC_SYSCTL=y -CONFIG_PROC_PAGE_MONITOR=y -CONFIG_KERNFS=y -CONFIG_SYSFS=y -CONFIG_TMPFS=y -CONFIG_TMPFS_POSIX_ACL=y -CONFIG_TMPFS_XATTR=y -CONFIG_HUGETLBFS=y -CONFIG_HUGETLB_PAGE=y -CONFIG_CONFIGFS_FS=y -CONFIG_MISC_FILESYSTEMS=y -# CONFIG_ADFS_FS is not set -# CONFIG_AFFS_FS is not set -# CONFIG_ECRYPT_FS is not set -# CONFIG_HFS_FS is not set -# CONFIG_HFSPLUS_FS is not set -# CONFIG_BEFS_FS is not set -# CONFIG_BFS_FS is not set -# CONFIG_EFS_FS is not set -# CONFIG_LOGFS is not set -# CONFIG_CRAMFS is not set -CONFIG_SQUASHFS=y -CONFIG_SQUASHFS_FILE_CACHE=y -# CONFIG_SQUASHFS_FILE_DIRECT is not set -CONFIG_SQUASHFS_DECOMP_SINGLE=y -# CONFIG_SQUASHFS_DECOMP_MULTI is not set -# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set -CONFIG_SQUASHFS_XATTR=y -CONFIG_SQUASHFS_ZLIB=y -CONFIG_SQUASHFS_LZO=y -CONFIG_SQUASHFS_XZ=y -# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set -# CONFIG_SQUASHFS_EMBEDDED is not set -CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 -# CONFIG_VXFS_FS is not set -# CONFIG_MINIX_FS is not set -# CONFIG_OMFS_FS is not set -# CONFIG_HPFS_FS is not set -# CONFIG_QNX4FS_FS is not set -# CONFIG_QNX6FS_FS is not set -# CONFIG_ROMFS_FS is not set -# CONFIG_PSTORE is not set -# CONFIG_SYSV_FS is not set -# CONFIG_UFS_FS is not set -# CONFIG_EXOFS_FS is not set -# CONFIG_F2FS_FS is not set -CONFIG_AUFS_FS=y -CONFIG_AUFS_BRANCH_MAX_127=y -# CONFIG_AUFS_BRANCH_MAX_511 is not set -# CONFIG_AUFS_BRANCH_MAX_1023 is not set -# CONFIG_AUFS_BRANCH_MAX_32767 is not set -CONFIG_AUFS_SBILIST=y -# CONFIG_AUFS_HNOTIFY is not set -# CONFIG_AUFS_EXPORT is not set -# CONFIG_AUFS_XATTR is not set -# CONFIG_AUFS_FHSM is not set -# CONFIG_AUFS_RDU is not set -# CONFIG_AUFS_SHWH is not set -# CONFIG_AUFS_BR_RAMFS is not set -CONFIG_AUFS_BDEV_LOOP=y -# CONFIG_AUFS_DEBUG is not set -CONFIG_ORE=y -CONFIG_NETWORK_FILESYSTEMS=y -CONFIG_NFS_FS=y -CONFIG_NFS_V2=y -CONFIG_NFS_V3=y -CONFIG_NFS_V3_ACL=y -CONFIG_NFS_V4=y -# CONFIG_NFS_SWAP is not set -CONFIG_NFS_V4_1=y -# CONFIG_NFS_V4_2 is not set -CONFIG_PNFS_FILE_LAYOUT=y -CONFIG_PNFS_OBJLAYOUT=y -CONFIG_NFS_V4_1_IMPLEMENTATION_ID_DOMAIN="kernel.org" -# CONFIG_NFS_V4_1_MIGRATION is not set -# CONFIG_NFS_FSCACHE is not set -# CONFIG_NFS_USE_LEGACY_DNS is not set -CONFIG_NFS_USE_KERNEL_DNS=y -CONFIG_NFSD=y -CONFIG_NFSD_V2_ACL=y -CONFIG_NFSD_V3=y -CONFIG_NFSD_V3_ACL=y -CONFIG_NFSD_V4=y -# CONFIG_NFSD_FAULT_INJECTION is not set -CONFIG_GRACE_PERIOD=y -CONFIG_LOCKD=y -CONFIG_LOCKD_V4=y -CONFIG_NFS_ACL_SUPPORT=y -CONFIG_NFS_COMMON=y -CONFIG_SUNRPC=y -CONFIG_SUNRPC_GSS=y -CONFIG_SUNRPC_BACKCHANNEL=y -# CONFIG_RPCSEC_GSS_KRB5 is not set -# CONFIG_SUNRPC_DEBUG is not set -# CONFIG_CEPH_FS is not set -# CONFIG_CIFS is not set -# CONFIG_NCP_FS is not set -# CONFIG_CODA_FS is not set -# CONFIG_AFS_FS is not set -CONFIG_NLS=y -CONFIG_NLS_DEFAULT="utf8" -CONFIG_NLS_CODEPAGE_437=y -# CONFIG_NLS_CODEPAGE_737 is not set -# CONFIG_NLS_CODEPAGE_775 is not set -# CONFIG_NLS_CODEPAGE_850 is not set -# CONFIG_NLS_CODEPAGE_852 is not set -# CONFIG_NLS_CODEPAGE_855 is not set -# CONFIG_NLS_CODEPAGE_857 is not set -# CONFIG_NLS_CODEPAGE_860 is not set -# CONFIG_NLS_CODEPAGE_861 is not set -# CONFIG_NLS_CODEPAGE_862 is not set -# CONFIG_NLS_CODEPAGE_863 is not set -# CONFIG_NLS_CODEPAGE_864 is not set -# CONFIG_NLS_CODEPAGE_865 is not set -# CONFIG_NLS_CODEPAGE_866 is not set -# CONFIG_NLS_CODEPAGE_869 is not set -# CONFIG_NLS_CODEPAGE_936 is not set -# CONFIG_NLS_CODEPAGE_950 is not set -# CONFIG_NLS_CODEPAGE_932 is not set -# CONFIG_NLS_CODEPAGE_949 is not set -# CONFIG_NLS_CODEPAGE_874 is not set -# CONFIG_NLS_ISO8859_8 is not set -# CONFIG_NLS_CODEPAGE_1250 is not set -# CONFIG_NLS_CODEPAGE_1251 is not set -CONFIG_NLS_ASCII=y -CONFIG_NLS_ISO8859_1=y -# CONFIG_NLS_ISO8859_2 is not set -# CONFIG_NLS_ISO8859_3 is not set -# CONFIG_NLS_ISO8859_4 is not set -# CONFIG_NLS_ISO8859_5 is not set -# CONFIG_NLS_ISO8859_6 is not set -# CONFIG_NLS_ISO8859_7 is not set -# CONFIG_NLS_ISO8859_9 is not set -# CONFIG_NLS_ISO8859_13 is not set -# CONFIG_NLS_ISO8859_14 is not set -# CONFIG_NLS_ISO8859_15 is not set -# CONFIG_NLS_KOI8_R is not set -# CONFIG_NLS_KOI8_U is not set -# CONFIG_NLS_MAC_ROMAN is not set -# CONFIG_NLS_MAC_CELTIC is not set -# CONFIG_NLS_MAC_CENTEURO is not set -# CONFIG_NLS_MAC_CROATIAN is not set -# CONFIG_NLS_MAC_CYRILLIC is not set -# CONFIG_NLS_MAC_GAELIC is not set -# CONFIG_NLS_MAC_GREEK is not set -# CONFIG_NLS_MAC_ICELAND is not set -# CONFIG_NLS_MAC_INUIT is not set -# CONFIG_NLS_MAC_ROMANIAN is not set -# CONFIG_NLS_MAC_TURKISH is not set -CONFIG_NLS_UTF8=y -# CONFIG_DLM is not set - -# -# Kernel hacking -# -CONFIG_TRACE_IRQFLAGS_SUPPORT=y - -# -# printk and dmesg options -# -# CONFIG_PRINTK_TIME is not set -CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 -CONFIG_BOOT_PRINTK_DELAY=y -# CONFIG_DYNAMIC_DEBUG is not set - -# -# Compile-time checks and compiler options -# -CONFIG_DEBUG_INFO=y -# CONFIG_DEBUG_INFO_REDUCED is not set -# CONFIG_DEBUG_INFO_SPLIT is not set -# CONFIG_DEBUG_INFO_DWARF4 is not set -CONFIG_ENABLE_WARN_DEPRECATED=y -CONFIG_ENABLE_MUST_CHECK=y -CONFIG_FRAME_WARN=2048 -CONFIG_STRIP_ASM_SYMS=y -# CONFIG_READABLE_ASM is not set -CONFIG_UNUSED_SYMBOLS=y -CONFIG_DEBUG_FS=y -# CONFIG_HEADERS_CHECK is not set -# CONFIG_DEBUG_SECTION_MISMATCH is not set -CONFIG_ARCH_WANT_FRAME_POINTERS=y -# CONFIG_FRAME_POINTER is not set -# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set -CONFIG_MAGIC_SYSRQ=y -CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 -CONFIG_DEBUG_KERNEL=y - -# -# Memory Debugging -# -# CONFIG_DEBUG_PAGEALLOC is not set -# CONFIG_DEBUG_OBJECTS is not set -# CONFIG_DEBUG_SLAB is not set -CONFIG_HAVE_DEBUG_KMEMLEAK=y -# CONFIG_DEBUG_KMEMLEAK is not set -# CONFIG_DEBUG_STACK_USAGE is not set -# CONFIG_DEBUG_VM is not set -# CONFIG_DEBUG_VIRTUAL is not set -CONFIG_DEBUG_MEMORY_INIT=y -# CONFIG_DEBUG_PER_CPU_MAPS is not set -CONFIG_HAVE_DEBUG_STACKOVERFLOW=y -# CONFIG_DEBUG_STACKOVERFLOW is not set -CONFIG_HAVE_ARCH_KMEMCHECK=y -# CONFIG_DEBUG_SHIRQ is not set - -# -# Debug Lockups and Hangs -# -CONFIG_LOCKUP_DETECTOR=y -CONFIG_HARDLOCKUP_DETECTOR=y -# CONFIG_BOOTPARAM_HARDLOCKUP_PANIC is not set -CONFIG_BOOTPARAM_HARDLOCKUP_PANIC_VALUE=0 -# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set -CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 -CONFIG_DETECT_HUNG_TASK=y -CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 -# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set -CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 -# CONFIG_PANIC_ON_OOPS is not set -CONFIG_PANIC_ON_OOPS_VALUE=0 -CONFIG_PANIC_TIMEOUT=0 -CONFIG_SCHED_DEBUG=y -# CONFIG_SCHEDSTATS is not set -# CONFIG_SCHED_STACK_END_CHECK is not set -CONFIG_TIMER_STATS=y - -# -# Lock Debugging (spinlocks, mutexes, etc...) -# -# CONFIG_DEBUG_RT_MUTEXES is not set -# CONFIG_DEBUG_SPINLOCK is not set -# CONFIG_DEBUG_MUTEXES is not set -# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set -# CONFIG_DEBUG_LOCK_ALLOC is not set -# CONFIG_PROVE_LOCKING is not set -# CONFIG_LOCK_STAT is not set -# CONFIG_DEBUG_ATOMIC_SLEEP is not set -# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set -# CONFIG_LOCK_TORTURE_TEST is not set -CONFIG_STACKTRACE=y -# CONFIG_DEBUG_KOBJECT is not set -CONFIG_DEBUG_BUGVERBOSE=y -# CONFIG_DEBUG_LIST is not set -# CONFIG_DEBUG_PI_LIST is not set -# CONFIG_DEBUG_SG is not set -# CONFIG_DEBUG_NOTIFIERS is not set -# CONFIG_DEBUG_CREDENTIALS is not set - -# -# RCU Debugging -# -# CONFIG_SPARSE_RCU_POINTER is not set -# CONFIG_TORTURE_TEST is not set -# CONFIG_RCU_TORTURE_TEST is not set -CONFIG_RCU_CPU_STALL_TIMEOUT=60 -# CONFIG_RCU_CPU_STALL_INFO is not set -# CONFIG_RCU_TRACE is not set -# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set -# CONFIG_NOTIFIER_ERROR_INJECTION is not set -# CONFIG_FAULT_INJECTION is not set -# CONFIG_LATENCYTOP is not set -CONFIG_ARCH_HAS_DEBUG_STRICT_USER_COPY_CHECKS=y -# CONFIG_DEBUG_STRICT_USER_COPY_CHECKS is not set -CONFIG_USER_STACKTRACE_SUPPORT=y -CONFIG_NOP_TRACER=y -CONFIG_HAVE_FUNCTION_TRACER=y -CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y -CONFIG_HAVE_FUNCTION_GRAPH_FP_TEST=y -CONFIG_HAVE_DYNAMIC_FTRACE=y -CONFIG_HAVE_DYNAMIC_FTRACE_WITH_REGS=y -CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y -CONFIG_HAVE_SYSCALL_TRACEPOINTS=y -CONFIG_HAVE_FENTRY=y -CONFIG_HAVE_C_RECORDMCOUNT=y -CONFIG_TRACE_CLOCK=y -CONFIG_RING_BUFFER=y -CONFIG_EVENT_TRACING=y -CONFIG_CONTEXT_SWITCH_TRACER=y -CONFIG_TRACING=y -CONFIG_GENERIC_TRACER=y -CONFIG_TRACING_SUPPORT=y -CONFIG_FTRACE=y -# CONFIG_FUNCTION_TRACER is not set -# CONFIG_IRQSOFF_TRACER is not set -# CONFIG_SCHED_TRACER is not set -# CONFIG_FTRACE_SYSCALLS is not set -# CONFIG_TRACER_SNAPSHOT is not set -CONFIG_BRANCH_PROFILE_NONE=y -# CONFIG_PROFILE_ANNOTATED_BRANCHES is not set -# CONFIG_PROFILE_ALL_BRANCHES is not set -# CONFIG_STACK_TRACER is not set -CONFIG_BLK_DEV_IO_TRACE=y -# CONFIG_UPROBE_EVENT is not set -# CONFIG_PROBE_EVENTS is not set -# CONFIG_FTRACE_STARTUP_TEST is not set -# CONFIG_MMIOTRACE is not set -# CONFIG_TRACEPOINT_BENCHMARK is not set -# CONFIG_RING_BUFFER_BENCHMARK is not set -# CONFIG_RING_BUFFER_STARTUP_TEST is not set - -# -# Runtime Testing -# -# CONFIG_LKDTM is not set -# CONFIG_TEST_LIST_SORT is not set -# CONFIG_BACKTRACE_SELF_TEST is not set -# CONFIG_RBTREE_TEST is not set -# CONFIG_INTERVAL_TREE_TEST is not set -# CONFIG_PERCPU_TEST is not set -# CONFIG_ATOMIC64_SELFTEST is not set -# CONFIG_TEST_STRING_HELPERS is not set -# CONFIG_TEST_KSTRTOX is not set -# CONFIG_TEST_RHASHTABLE is not set -# CONFIG_PROVIDE_OHCI1394_DMA_INIT is not set -# CONFIG_DMA_API_DEBUG is not set -# CONFIG_TEST_LKM is not set -# CONFIG_TEST_USER_COPY is not set -# CONFIG_TEST_BPF is not set -# CONFIG_TEST_FIRMWARE is not set -# CONFIG_TEST_UDELAY is not set -# CONFIG_SAMPLES is not set -CONFIG_HAVE_ARCH_KGDB=y -# CONFIG_KGDB is not set -CONFIG_STRICT_DEVMEM=y -CONFIG_X86_VERBOSE_BOOTUP=y -CONFIG_EARLY_PRINTK=y -# CONFIG_EARLY_PRINTK_DBGP is not set -# CONFIG_X86_PTDUMP is not set -CONFIG_DEBUG_RODATA=y -# CONFIG_DEBUG_RODATA_TEST is not set -# CONFIG_DEBUG_SET_MODULE_RONX is not set -# CONFIG_DEBUG_NX_TEST is not set -CONFIG_DOUBLEFAULT=y -# CONFIG_DEBUG_TLBFLUSH is not set -# CONFIG_IOMMU_DEBUG is not set -# CONFIG_IOMMU_STRESS is not set -CONFIG_HAVE_MMIOTRACE_SUPPORT=y -CONFIG_IO_DELAY_TYPE_0X80=0 -CONFIG_IO_DELAY_TYPE_0XED=1 -CONFIG_IO_DELAY_TYPE_UDELAY=2 -CONFIG_IO_DELAY_TYPE_NONE=3 -CONFIG_IO_DELAY_0X80=y -# CONFIG_IO_DELAY_0XED is not set -# CONFIG_IO_DELAY_UDELAY is not set -# CONFIG_IO_DELAY_NONE is not set -CONFIG_DEFAULT_IO_DELAY_TYPE=0 -# CONFIG_DEBUG_BOOT_PARAMS is not set -# CONFIG_CPA_DEBUG is not set -CONFIG_OPTIMIZE_INLINING=y -# CONFIG_DEBUG_NMI_SELFTEST is not set -# CONFIG_X86_DEBUG_STATIC_CPU_HAS is not set - -# -# Security options -# -CONFIG_KEYS=y -# CONFIG_PERSISTENT_KEYRINGS is not set -# CONFIG_BIG_KEYS is not set -# CONFIG_ENCRYPTED_KEYS is not set -# CONFIG_KEYS_DEBUG_PROC_KEYS is not set -# CONFIG_SECURITY_DMESG_RESTRICT is not set -# CONFIG_SECURITY is not set -# CONFIG_SECURITYFS is not set -CONFIG_DEFAULT_SECURITY_DAC=y -CONFIG_DEFAULT_SECURITY="" -CONFIG_XOR_BLOCKS=y -CONFIG_ASYNC_CORE=y -CONFIG_ASYNC_XOR=y -CONFIG_ASYNC_PQ=y -CONFIG_CRYPTO=y - -# -# Crypto core or helper -# -CONFIG_CRYPTO_ALGAPI=y -CONFIG_CRYPTO_ALGAPI2=y -CONFIG_CRYPTO_AEAD=y -CONFIG_CRYPTO_AEAD2=y -CONFIG_CRYPTO_BLKCIPHER=y -CONFIG_CRYPTO_BLKCIPHER2=y -CONFIG_CRYPTO_HASH=y -CONFIG_CRYPTO_HASH2=y -CONFIG_CRYPTO_RNG=y -CONFIG_CRYPTO_RNG2=y -CONFIG_CRYPTO_PCOMP=y -CONFIG_CRYPTO_PCOMP2=y -CONFIG_CRYPTO_MANAGER=y -CONFIG_CRYPTO_MANAGER2=y -# CONFIG_CRYPTO_USER is not set -# CONFIG_CRYPTO_MANAGER_DISABLE_TESTS is not set -CONFIG_CRYPTO_GF128MUL=y -CONFIG_CRYPTO_NULL=y -# CONFIG_CRYPTO_PCRYPT is not set -CONFIG_CRYPTO_WORKQUEUE=y -CONFIG_CRYPTO_CRYPTD=y -# CONFIG_CRYPTO_MCRYPTD is not set -CONFIG_CRYPTO_AUTHENC=y -# CONFIG_CRYPTO_TEST is not set -CONFIG_CRYPTO_ABLK_HELPER=y -CONFIG_CRYPTO_GLUE_HELPER_X86=y - -# -# Authenticated Encryption with Associated Data -# -CONFIG_CRYPTO_CCM=y -CONFIG_CRYPTO_GCM=y -CONFIG_CRYPTO_SEQIV=y - -# -# Block modes -# -CONFIG_CRYPTO_CBC=y -CONFIG_CRYPTO_CTR=y -CONFIG_CRYPTO_CTS=y -CONFIG_CRYPTO_ECB=y -CONFIG_CRYPTO_LRW=y -CONFIG_CRYPTO_PCBC=y -CONFIG_CRYPTO_XTS=y - -# -# Hash modes -# -# CONFIG_CRYPTO_CMAC is not set -CONFIG_CRYPTO_HMAC=y -CONFIG_CRYPTO_XCBC=y -CONFIG_CRYPTO_VMAC=y - -# -# Digest -# -CONFIG_CRYPTO_CRC32C=y -CONFIG_CRYPTO_CRC32C_INTEL=y -# CONFIG_CRYPTO_CRC32 is not set -# CONFIG_CRYPTO_CRC32_PCLMUL is not set -CONFIG_CRYPTO_CRCT10DIF=y -# CONFIG_CRYPTO_CRCT10DIF_PCLMUL is not set -CONFIG_CRYPTO_GHASH=y -CONFIG_CRYPTO_MD4=y -CONFIG_CRYPTO_MD5=y -CONFIG_CRYPTO_MICHAEL_MIC=y -CONFIG_CRYPTO_RMD128=y -CONFIG_CRYPTO_RMD160=y -CONFIG_CRYPTO_RMD256=y -CONFIG_CRYPTO_RMD320=y -CONFIG_CRYPTO_SHA1=y -CONFIG_CRYPTO_SHA1_SSSE3=y -# CONFIG_CRYPTO_SHA256_SSSE3 is not set -# CONFIG_CRYPTO_SHA512_SSSE3 is not set -# CONFIG_CRYPTO_SHA1_MB is not set -CONFIG_CRYPTO_SHA256=y -CONFIG_CRYPTO_SHA512=y -CONFIG_CRYPTO_TGR192=y -CONFIG_CRYPTO_WP512=y -CONFIG_CRYPTO_GHASH_CLMUL_NI_INTEL=y - -# -# Ciphers -# -CONFIG_CRYPTO_AES=y -CONFIG_CRYPTO_AES_X86_64=y -CONFIG_CRYPTO_AES_NI_INTEL=y -CONFIG_CRYPTO_ANUBIS=y -CONFIG_CRYPTO_ARC4=y -CONFIG_CRYPTO_BLOWFISH=y -CONFIG_CRYPTO_BLOWFISH_COMMON=y -CONFIG_CRYPTO_BLOWFISH_X86_64=y -CONFIG_CRYPTO_CAMELLIA=y -# CONFIG_CRYPTO_CAMELLIA_X86_64 is not set -# CONFIG_CRYPTO_CAMELLIA_AESNI_AVX_X86_64 is not set -# CONFIG_CRYPTO_CAMELLIA_AESNI_AVX2_X86_64 is not set -CONFIG_CRYPTO_CAST_COMMON=y -CONFIG_CRYPTO_CAST5=y -# CONFIG_CRYPTO_CAST5_AVX_X86_64 is not set -CONFIG_CRYPTO_CAST6=y -# CONFIG_CRYPTO_CAST6_AVX_X86_64 is not set -CONFIG_CRYPTO_DES=y -# CONFIG_CRYPTO_DES3_EDE_X86_64 is not set -CONFIG_CRYPTO_FCRYPT=y -CONFIG_CRYPTO_KHAZAD=y -CONFIG_CRYPTO_SALSA20=y -CONFIG_CRYPTO_SALSA20_X86_64=y -CONFIG_CRYPTO_SEED=y -CONFIG_CRYPTO_SERPENT=y -# CONFIG_CRYPTO_SERPENT_SSE2_X86_64 is not set -# CONFIG_CRYPTO_SERPENT_AVX_X86_64 is not set -# CONFIG_CRYPTO_SERPENT_AVX2_X86_64 is not set -CONFIG_CRYPTO_TEA=y -CONFIG_CRYPTO_TWOFISH=y -CONFIG_CRYPTO_TWOFISH_COMMON=y -CONFIG_CRYPTO_TWOFISH_X86_64=y -CONFIG_CRYPTO_TWOFISH_X86_64_3WAY=y -# CONFIG_CRYPTO_TWOFISH_AVX_X86_64 is not set - -# -# Compression -# -CONFIG_CRYPTO_DEFLATE=y -CONFIG_CRYPTO_ZLIB=y -CONFIG_CRYPTO_LZO=y -# CONFIG_CRYPTO_LZ4 is not set -# CONFIG_CRYPTO_LZ4HC is not set - -# -# Random Number Generation -# -CONFIG_CRYPTO_ANSI_CPRNG=y -# CONFIG_CRYPTO_DRBG_MENU is not set -CONFIG_CRYPTO_USER_API=y -CONFIG_CRYPTO_USER_API_HASH=y -CONFIG_CRYPTO_USER_API_SKCIPHER=y -CONFIG_CRYPTO_HW=y -CONFIG_CRYPTO_DEV_PADLOCK=y -CONFIG_CRYPTO_DEV_PADLOCK_AES=y -CONFIG_CRYPTO_DEV_PADLOCK_SHA=y -# CONFIG_CRYPTO_DEV_CCP is not set -# CONFIG_CRYPTO_DEV_QAT_DH895xCC is not set -# CONFIG_ASYMMETRIC_KEY_TYPE is not set -CONFIG_HAVE_KVM=y -# CONFIG_VIRTUALIZATION is not set -CONFIG_BINARY_PRINTF=y - -# -# Library routines -# -CONFIG_RAID6_PQ=y -CONFIG_BITREVERSE=y -CONFIG_GENERIC_STRNCPY_FROM_USER=y -CONFIG_GENERIC_STRNLEN_USER=y -CONFIG_GENERIC_NET_UTILS=y -CONFIG_GENERIC_FIND_FIRST_BIT=y -CONFIG_GENERIC_PCI_IOMAP=y -CONFIG_GENERIC_IOMAP=y -CONFIG_GENERIC_IO=y -CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y -CONFIG_ARCH_HAS_FAST_MULTIPLIER=y -CONFIG_CRC_CCITT=y -CONFIG_CRC16=y -CONFIG_CRC_T10DIF=y -CONFIG_CRC_ITU_T=y -CONFIG_CRC32=y -# CONFIG_CRC32_SELFTEST is not set -CONFIG_CRC32_SLICEBY8=y -# CONFIG_CRC32_SLICEBY4 is not set -# CONFIG_CRC32_SARWATE is not set -# CONFIG_CRC32_BIT is not set -CONFIG_CRC7=y -CONFIG_LIBCRC32C=y -CONFIG_CRC8=y -# CONFIG_AUDIT_ARCH_COMPAT_GENERIC is not set -# CONFIG_RANDOM32_SELFTEST is not set -CONFIG_ZLIB_INFLATE=y -CONFIG_ZLIB_DEFLATE=y -CONFIG_LZO_COMPRESS=y -CONFIG_LZO_DECOMPRESS=y -CONFIG_XZ_DEC=y -CONFIG_XZ_DEC_X86=y -CONFIG_XZ_DEC_POWERPC=y -CONFIG_XZ_DEC_IA64=y -CONFIG_XZ_DEC_ARM=y -CONFIG_XZ_DEC_ARMTHUMB=y -CONFIG_XZ_DEC_SPARC=y -CONFIG_XZ_DEC_BCJ=y -# CONFIG_XZ_DEC_TEST is not set -CONFIG_DECOMPRESS_GZIP=y -CONFIG_DECOMPRESS_BZIP2=y -CONFIG_DECOMPRESS_LZMA=y -CONFIG_DECOMPRESS_XZ=y -CONFIG_DECOMPRESS_LZO=y -CONFIG_TEXTSEARCH=y -CONFIG_TEXTSEARCH_KMP=y -CONFIG_TEXTSEARCH_BM=y -CONFIG_TEXTSEARCH_FSM=y -CONFIG_ASSOCIATIVE_ARRAY=y -CONFIG_HAS_IOMEM=y -CONFIG_HAS_IOPORT_MAP=y -CONFIG_HAS_DMA=y -CONFIG_CHECK_SIGNATURE=y -CONFIG_CPU_RMAP=y -CONFIG_DQL=y -CONFIG_GLOB=y -# CONFIG_GLOB_SELFTEST is not set -CONFIG_NLATTR=y -CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE=y -CONFIG_AVERAGE=y -CONFIG_CORDIC=y -# CONFIG_DDR is not set -CONFIG_OID_REGISTRY=y -CONFIG_ARCH_HAS_SG_CHAIN=y diff --git a/packages/base/any/kernels/3.18.25/patches/0001-Patch-set-for-booting-ls2088rdb-with-vfio.patch b/packages/base/any/kernels/3.18.25/patches/0001-Patch-set-for-booting-ls2088rdb-with-vfio.patch deleted file mode 100644 index ad0e4a06..00000000 --- a/packages/base/any/kernels/3.18.25/patches/0001-Patch-set-for-booting-ls2088rdb-with-vfio.patch +++ /dev/null @@ -1,101360 +0,0 @@ -From 7ab86f28bfb4d36d4d741a41941a0aa971124d88 Mon Sep 17 00:00:00 2001 -From: "Chenyin.Ha" -Date: Fri, 19 May 2017 13:44:09 +0800 -Subject: [PATCH] Patch set for booting ls2088rdb with vfio - ---- - Documentation/IRQ-domain.txt | 71 + - Documentation/devicetree/bindings/arm/fsl.txt | 15 + - Documentation/devicetree/bindings/arm/gic.txt | 8 +- - .../devicetree/bindings/clock/qoriq-clock.txt | 64 +- - Documentation/devicetree/bindings/i2c/i2c-imx.txt | 11 + - .../devicetree/bindings/i2c/i2c-mux-pca954x.txt | 3 + - .../bindings/memory-controllers/fsl/ifc.txt | 3 + - .../devicetree/bindings/pci/designware-pcie.txt | 3 +- - .../devicetree/bindings/powerpc/fsl/board.txt | 14 +- - Documentation/devicetree/bindings/usb/dwc3.txt | 3 +- - Documentation/devicetree/of_selftest.txt | 20 +- - Documentation/devicetree/todo.txt | 1 - - MAINTAINERS | 60 + - arch/arm/Kconfig | 3 + - arch/arm/Makefile | 8 +- - arch/arm/boot/dts/Makefile | 12 +- - arch/arm/include/asm/dma-mapping.h | 10 +- - arch/arm/include/asm/mach/pci.h | 12 +- - arch/arm/include/asm/pci.h | 7 - - arch/arm/kernel/bios32.c | 39 +- - arch/arm/mach-iop13xx/msi.c | 10 +- - arch/arm64/Kconfig | 8 +- - arch/arm64/Makefile | 11 +- - arch/arm64/boot/dts/Makefile | 2 +- - arch/arm64/boot/dts/Makefile.rej | 10 + - arch/arm64/boot/dts/arm64-nxp-ls2080ardb-r0.dts | 249 ++ - arch/arm64/boot/dts/arm64-nxp-ls2088ardb-r1.dts | 256 ++ - arch/arm64/boot/dts/fsl-ls2080a.dtsi | 729 +++++ - arch/arm64/boot/dts/fsl-ls2088a.dtsi | 833 ++++++ - arch/arm64/boot/dts/include/dt-bindings | 1 + - arch/arm64/boot/dts/thermal.h | 17 + - arch/arm64/configs/defconfig | 1 + - arch/arm64/configs/nxp_ls2088rdb_config | 3034 ++++++++++++++++++++ - arch/arm64/include/asm/device.h | 1 + - arch/arm64/include/asm/dma-mapping.h | 16 +- - arch/arm64/include/asm/io.h | 1 + - arch/arm64/include/asm/mmu_context.h | 43 + - arch/arm64/include/asm/page.h | 6 +- - arch/arm64/include/asm/pgtable-hwdef.h | 7 +- - arch/arm64/include/asm/pgtable.h | 8 + - arch/arm64/kernel/head.S | 37 + - arch/arm64/kernel/smp.c | 1 + - arch/arm64/mm/mmu.c | 7 +- - arch/arm64/mm/proc-macros.S | 10 + - arch/arm64/mm/proc.S | 3 + - arch/ia64/kernel/msi_ia64.c | 8 +- - arch/ia64/sn/kernel/msi_sn.c | 8 +- - arch/mips/pci/msi-octeon.c | 2 +- - arch/mips/pci/msi-xlp.c | 12 +- - arch/mips/pci/pci-xlr.c | 2 +- - arch/powerpc/include/asm/mpc85xx.h | 94 - - arch/powerpc/platforms/512x/mpc5121_ads_cpld.c | 3 +- - arch/powerpc/platforms/85xx/mpc85xx_mds.c | 2 +- - arch/powerpc/platforms/85xx/mpc85xx_rdb.c | 2 +- - arch/powerpc/platforms/85xx/p1022_ds.c | 2 +- - arch/powerpc/platforms/85xx/p1022_rdk.c | 2 +- - arch/powerpc/platforms/85xx/smp.c | 2 +- - arch/powerpc/platforms/85xx/twr_p102x.c | 2 +- - arch/powerpc/platforms/86xx/mpc8610_hpcd.c | 2 +- - arch/powerpc/platforms/cell/axon_msi.c | 8 +- - arch/powerpc/platforms/cell/interrupt.c | 3 +- - arch/powerpc/platforms/embedded6xx/flipper-pic.c | 3 +- - arch/powerpc/platforms/powermac/pic.c | 3 +- - arch/powerpc/platforms/powernv/pci.c | 2 +- - arch/powerpc/platforms/ps3/interrupt.c | 3 +- - arch/powerpc/platforms/pseries/msi.c | 2 +- - arch/powerpc/sysdev/ehv_pic.c | 3 +- - arch/powerpc/sysdev/fsl_msi.c | 6 +- - arch/powerpc/sysdev/i8259.c | 3 +- - arch/powerpc/sysdev/ipic.c | 3 +- - arch/powerpc/sysdev/mpic.c | 3 +- - arch/powerpc/sysdev/mpic_pasemi_msi.c | 6 +- - arch/powerpc/sysdev/mpic_u3msi.c | 6 +- - arch/powerpc/sysdev/ppc4xx_hsta_msi.c | 2 +- - arch/powerpc/sysdev/ppc4xx_msi.c | 2 +- - arch/powerpc/sysdev/qe_lib/qe_ic.c | 3 +- - arch/powerpc/sysdev/xics/ics-opal.c | 2 +- - arch/powerpc/sysdev/xics/ics-rtas.c | 2 +- - arch/powerpc/sysdev/xics/xics-common.c | 3 +- - arch/s390/pci/pci.c | 10 +- - arch/sparc/kernel/pci_msi.c | 10 +- - arch/tile/kernel/pci_gx.c | 8 +- - arch/x86/include/asm/x86_init.h | 3 - - arch/x86/kernel/apic/io_apic.c | 8 +- - arch/x86/kernel/x86_init.c | 10 - - arch/x86/pci/bus_numa.c | 4 +- - arch/x86/pci/xen.c | 23 +- - drivers/acpi/acpi_lpss.c | 8 +- - drivers/acpi/acpi_platform.c | 4 +- - drivers/acpi/resource.c | 17 +- - drivers/base/core.c | 3 + - drivers/base/platform.c | 1 + - drivers/block/loop.c | 18 + - drivers/clk/Kconfig | 10 +- - drivers/clk/Makefile | 2 +- - drivers/clk/clk-qoriq.c | 1256 ++++++++ - drivers/cpufreq/Kconfig.powerpc | 2 +- - drivers/dma/acpi-dma.c | 10 +- - drivers/i2c/busses/Kconfig | 4 +- - drivers/i2c/busses/i2c-imx.c | 373 ++- - drivers/i2c/muxes/i2c-mux-pca9541.c | 4 +- - drivers/i2c/muxes/i2c-mux-pca954x.c | 57 +- - drivers/iommu/Kconfig | 34 +- - drivers/iommu/Makefile | 2 + - drivers/iommu/amd_iommu.c | 6 +- - drivers/iommu/arm-smmu.c | 1382 +++++---- - drivers/iommu/exynos-iommu.c | 2 +- - drivers/iommu/fsl_pamu.c | 3 +- - drivers/iommu/intel-iommu.c | 1 + - drivers/iommu/io-pgtable-arm.c | 997 +++++++ - drivers/iommu/io-pgtable.c | 82 + - drivers/iommu/io-pgtable.h | 143 + - drivers/iommu/iommu.c | 111 +- - drivers/iommu/ipmmu-vmsa.c | 2 +- - drivers/iommu/irq_remapping.c | 8 - - drivers/iommu/msm_iommu.c | 1 + - drivers/iommu/of_iommu.c | 95 + - drivers/iommu/omap-iommu.c | 1 + - drivers/iommu/shmobile-iommu.c | 1 + - drivers/iommu/shmobile-ipmmu.c | 1 - - drivers/iommu/tegra-gart.c | 1 - - drivers/iommu/tegra-smmu.c | 2 +- - drivers/irqchip/Kconfig | 12 + - drivers/irqchip/Makefile | 2 + - drivers/irqchip/irq-armada-370-xp.c | 16 +- - drivers/irqchip/irq-atmel-aic.c | 40 +- - drivers/irqchip/irq-atmel-aic5.c | 65 +- - drivers/irqchip/irq-gic-common.c | 18 +- - drivers/irqchip/irq-gic-common.h | 2 +- - drivers/irqchip/irq-gic-v2m.c | 333 +++ - drivers/irqchip/irq-gic-v3-its.c | 1630 +++++++++++ - drivers/irqchip/irq-gic-v3.c | 180 +- - drivers/irqchip/irq-gic.c | 90 +- - drivers/irqchip/irq-hip04.c | 9 +- - drivers/irqchip/irq-sunxi-nmi.c | 4 +- - drivers/irqchip/irq-tb10x.c | 4 +- - drivers/memory/Kconfig | 2 +- - drivers/memory/fsl_ifc.c | 77 +- - drivers/mfd/vexpress-sysreg.c | 2 +- - drivers/mmc/card/block.c | 4 + - drivers/mmc/host/Kconfig | 10 +- - drivers/mmc/host/sdhci-esdhc.h | 9 +- - drivers/mmc/host/sdhci-of-esdhc.c | 680 ++++- - drivers/mmc/host/sdhci.c | 250 +- - drivers/mmc/host/sdhci.h | 42 + - drivers/mtd/nand/Kconfig | 2 +- - drivers/mtd/nand/fsl_ifc_nand.c | 301 +- - drivers/net/ethernet/freescale/Kconfig | 8 +- - drivers/net/ethernet/freescale/fec_mpc52xx.c | 2 +- - drivers/net/ethernet/freescale/fec_mpc52xx_phy.c | 2 +- - .../net/ethernet/freescale/fs_enet/fs_enet-main.c | 4 +- - .../net/ethernet/freescale/fs_enet/mii-bitbang.c | 2 +- - drivers/net/ethernet/freescale/fs_enet/mii-fec.c | 4 +- - drivers/net/ethernet/freescale/fsl_pq_mdio.c | 2 +- - drivers/net/ethernet/freescale/gianfar.c | 8 +- - drivers/net/ethernet/freescale/gianfar_ptp.c | 2 +- - drivers/net/ethernet/freescale/ucc_geth.c | 2 +- - drivers/net/ethernet/freescale/xgmac_mdio.c | 194 +- - drivers/net/ethernet/intel/igb/e1000_82575.c | 6 + - drivers/net/ethernet/intel/igb/e1000_defines.h | 1 + - drivers/net/ethernet/intel/igb/e1000_hw.h | 1 + - drivers/net/ethernet/intel/igb/igb_main.c | 1 + - drivers/net/phy/Kconfig | 19 +- - drivers/net/phy/Makefile | 5 +- - drivers/net/phy/aquantia.c | 201 ++ - drivers/net/phy/at803x.c | 4 + - drivers/net/phy/fixed.c | 336 --- - drivers/net/phy/fixed_phy.c | 370 +++ - drivers/net/phy/fsl_10gkr.c | 1467 ++++++++++ - drivers/net/phy/marvell.c | 11 + - drivers/net/phy/mdio_bus.c | 34 +- - drivers/net/phy/phy.c | 19 +- - drivers/net/phy/phy_device.c | 90 +- - drivers/net/phy/realtek.c | 82 +- - drivers/net/phy/teranetics.c | 135 + - drivers/of/base.c | 53 +- - drivers/of/device.c | 84 + - drivers/of/dynamic.c | 13 - - drivers/of/fdt.c | 30 +- - drivers/of/irq.c | 21 + - drivers/of/of_pci.c | 34 +- - drivers/of/pdt.c | 27 +- - drivers/of/platform.c | 139 +- - drivers/of/selftest.c | 71 +- - drivers/pci/Kconfig | 6 + - drivers/pci/Makefile | 1 + - drivers/pci/access.c | 87 + - drivers/pci/bus.c | 18 +- - drivers/pci/host-bridge.c | 22 +- - drivers/pci/host/Kconfig | 19 +- - drivers/pci/host/Makefile | 3 + - drivers/pci/host/pci-dra7xx.c | 8 +- - drivers/pci/host/pci-exynos.c | 5 +- - drivers/pci/host/pci-host-generic.c | 229 +- - drivers/pci/host/pci-keystone-dw.c | 37 +- - drivers/pci/host/pci-keystone.h | 4 +- - drivers/pci/host/pci-layerscape.c | 729 +++++ - drivers/pci/host/pci-layerscape.h | 13 + - drivers/pci/host/pci-mvebu.c | 17 +- - drivers/pci/host/pci-tegra.c | 22 +- - drivers/pci/host/pci-xgene-msi.c | 595 ++++ - drivers/pci/host/pci-xgene.c | 25 +- - drivers/pci/host/pcie-designware.c | 665 ++--- - drivers/pci/host/pcie-designware.h | 24 +- - drivers/pci/host/pcie-rcar.c | 22 +- - drivers/pci/host/pcie-xilinx.c | 64 +- - drivers/pci/msi.c | 533 ++-- - drivers/pci/pci.c | 1 + - drivers/pci/pci.h | 21 + - drivers/pci/pcie/portdrv_core.c | 31 +- - drivers/pci/probe.c | 29 +- - drivers/pci/quirks.c | 10 +- - drivers/pci/remove.c | 2 + - drivers/pci/search.c | 5 +- - drivers/pci/setup-bus.c | 1 + - drivers/pci/setup-irq.c | 1 + - drivers/pci/xen-pcifront.c | 2 +- - drivers/power/reset/Kconfig | 6 + - drivers/power/reset/Makefile | 1 + - drivers/power/reset/ls-reboot.c | 93 + - drivers/soc/Kconfig | 13 + - drivers/soc/Makefile | 1 + - drivers/soc/fsl/Kconfig | 6 + - drivers/soc/fsl/Kconfig.arm | 25 + - drivers/soc/fsl/Makefile | 6 + - drivers/soc/fsl/guts.c | 123 + - drivers/soc/fsl/ls1/Kconfig | 11 + - drivers/soc/fsl/ls1/Makefile | 1 + - drivers/soc/fsl/ls1/ftm_alarm.c | 274 ++ - drivers/staging/Kconfig | 4 + - drivers/staging/Makefile | 2 + - drivers/staging/fsl-dpaa2/Kconfig | 12 + - drivers/staging/fsl-dpaa2/Makefile | 6 + - drivers/staging/fsl-dpaa2/ethernet/Kconfig | 36 + - drivers/staging/fsl-dpaa2/ethernet/Makefile | 21 + - .../staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c | 317 ++ - .../staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h | 61 + - .../staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h | 185 ++ - drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 2957 +++++++++++++++++++ - drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h | 397 +++ - drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c | 732 +++++ - drivers/staging/fsl-dpaa2/ethernet/dpkg.h | 175 ++ - drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h | 1058 +++++++ - drivers/staging/fsl-dpaa2/ethernet/dpni.c | 1907 ++++++++++++ - drivers/staging/fsl-dpaa2/ethernet/dpni.h | 2581 +++++++++++++++++ - drivers/staging/fsl-dpaa2/mac/Kconfig | 24 + - drivers/staging/fsl-dpaa2/mac/Makefile | 10 + - drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h | 195 ++ - drivers/staging/fsl-dpaa2/mac/dpmac.c | 422 +++ - drivers/staging/fsl-dpaa2/mac/dpmac.h | 593 ++++ - drivers/staging/fsl-dpaa2/mac/mac.c | 694 +++++ - drivers/staging/fsl-mc/Kconfig | 1 + - drivers/staging/fsl-mc/Makefile | 2 + - drivers/staging/fsl-mc/TODO | 13 + - drivers/staging/fsl-mc/bus/Kconfig | 45 + - drivers/staging/fsl-mc/bus/Makefile | 24 + - drivers/staging/fsl-mc/bus/dpbp.c | 459 +++ - drivers/staging/fsl-mc/bus/dpcon.c | 407 +++ - drivers/staging/fsl-mc/bus/dpio/Makefile | 9 + - drivers/staging/fsl-mc/bus/dpio/dpio-drv.c | 401 +++ - drivers/staging/fsl-mc/bus/dpio/dpio-drv.h | 33 + - drivers/staging/fsl-mc/bus/dpio/dpio.c | 468 +++ - drivers/staging/fsl-mc/bus/dpio/dpio_service.c | 801 ++++++ - drivers/staging/fsl-mc/bus/dpio/fsl_dpio.h | 460 +++ - drivers/staging/fsl-mc/bus/dpio/fsl_dpio_cmd.h | 184 ++ - drivers/staging/fsl-mc/bus/dpio/fsl_qbman_base.h | 123 + - drivers/staging/fsl-mc/bus/dpio/fsl_qbman_portal.h | 753 +++++ - drivers/staging/fsl-mc/bus/dpio/qbman_debug.c | 846 ++++++ - drivers/staging/fsl-mc/bus/dpio/qbman_debug.h | 136 + - drivers/staging/fsl-mc/bus/dpio/qbman_portal.c | 1212 ++++++++ - drivers/staging/fsl-mc/bus/dpio/qbman_portal.h | 261 ++ - drivers/staging/fsl-mc/bus/dpio/qbman_private.h | 173 ++ - drivers/staging/fsl-mc/bus/dpio/qbman_sys.h | 307 ++ - drivers/staging/fsl-mc/bus/dpio/qbman_sys_decl.h | 86 + - drivers/staging/fsl-mc/bus/dpio/qbman_test.c | 664 +++++ - drivers/staging/fsl-mc/bus/dpmcp-cmd.h | 56 + - drivers/staging/fsl-mc/bus/dpmcp.c | 318 ++ - drivers/staging/fsl-mc/bus/dpmcp.h | 323 +++ - drivers/staging/fsl-mc/bus/dpmng-cmd.h | 47 + - drivers/staging/fsl-mc/bus/dpmng.c | 85 + - drivers/staging/fsl-mc/bus/dprc-cmd.h | 87 + - drivers/staging/fsl-mc/bus/dprc-driver.c | 1084 +++++++ - drivers/staging/fsl-mc/bus/dprc.c | 1218 ++++++++ - drivers/staging/fsl-mc/bus/mc-allocator.c | 716 +++++ - drivers/staging/fsl-mc/bus/mc-bus.c | 1347 +++++++++ - drivers/staging/fsl-mc/bus/mc-ioctl.h | 25 + - drivers/staging/fsl-mc/bus/mc-restool.c | 312 ++ - drivers/staging/fsl-mc/bus/mc-sys.c | 677 +++++ - drivers/staging/fsl-mc/include/dpbp-cmd.h | 62 + - drivers/staging/fsl-mc/include/dpbp.h | 438 +++ - drivers/staging/fsl-mc/include/dpcon-cmd.h | 162 ++ - drivers/staging/fsl-mc/include/dpcon.h | 407 +++ - drivers/staging/fsl-mc/include/dpmac-cmd.h | 192 ++ - drivers/staging/fsl-mc/include/dpmac.h | 528 ++++ - drivers/staging/fsl-mc/include/dpmng.h | 80 + - drivers/staging/fsl-mc/include/dprc.h | 990 +++++++ - drivers/staging/fsl-mc/include/fsl_dpaa2_fd.h | 774 +++++ - drivers/staging/fsl-mc/include/fsl_dpaa2_io.h | 619 ++++ - drivers/staging/fsl-mc/include/mc-cmd.h | 133 + - drivers/staging/fsl-mc/include/mc-private.h | 168 ++ - drivers/staging/fsl-mc/include/mc-sys.h | 128 + - drivers/staging/fsl-mc/include/mc.h | 244 ++ - drivers/staging/fsl-mc/include/net.h | 481 ++++ - drivers/usb/core/config.c | 3 +- - drivers/usb/core/driver.c | 6 +- - drivers/usb/core/hcd-pci.c | 9 + - drivers/usb/core/hub.c | 66 +- - drivers/usb/core/quirks.c | 6 + - drivers/usb/dwc3/core.c | 76 +- - drivers/usb/dwc3/core.h | 8 + - drivers/usb/dwc3/host.c | 6 + - drivers/usb/host/xhci-pci.c | 114 +- - drivers/usb/host/xhci-ring.c | 6 +- - drivers/usb/host/xhci.c | 34 +- - drivers/usb/host/xhci.h | 3 + - drivers/vfio/Kconfig | 5 +- - drivers/vfio/Makefile | 1 + - drivers/vfio/fsl-mc/Kconfig | 9 + - drivers/vfio/fsl-mc/Makefile | 2 + - drivers/vfio/fsl-mc/vfio_fsl_mc.c | 603 ++++ - drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c | 273 ++ - drivers/vfio/fsl-mc/vfio_fsl_mc_private.h | 43 + - drivers/vfio/pci/vfio_pci_intrs.c | 2 +- - drivers/vfio/vfio_iommu_type1.c | 5 +- - fs/Kconfig | 1 + - fs/Makefile | 1 + - fs/aufs/Kconfig | 185 ++ - fs/aufs/Makefile | 44 + - fs/aufs/aufs.h | 59 + - fs/aufs/branch.c | 1402 +++++++++ - fs/aufs/branch.h | 279 ++ - fs/aufs/conf.mk | 38 + - fs/aufs/cpup.c | 1368 +++++++++ - fs/aufs/cpup.h | 94 + - fs/aufs/dbgaufs.c | 432 +++ - fs/aufs/dbgaufs.h | 48 + - fs/aufs/dcsub.c | 224 ++ - fs/aufs/dcsub.h | 123 + - fs/aufs/debug.c | 436 +++ - fs/aufs/debug.h | 228 ++ - fs/aufs/dentry.c | 1129 ++++++++ - fs/aufs/dentry.h | 234 ++ - fs/aufs/dinfo.c | 544 ++++ - fs/aufs/dir.c | 756 +++++ - fs/aufs/dir.h | 131 + - fs/aufs/dynop.c | 379 +++ - fs/aufs/dynop.h | 76 + - fs/aufs/export.c | 831 ++++++ - fs/aufs/f_op.c | 781 +++++ - fs/aufs/fhsm.c | 426 +++ - fs/aufs/file.c | 857 ++++++ - fs/aufs/file.h | 291 ++ - fs/aufs/finfo.c | 156 + - fs/aufs/fstype.h | 400 +++ - fs/aufs/hfsnotify.c | 288 ++ - fs/aufs/hfsplus.c | 56 + - fs/aufs/hnotify.c | 714 +++++ - fs/aufs/i_op.c | 1460 ++++++++++ - fs/aufs/i_op_add.c | 930 ++++++ - fs/aufs/i_op_del.c | 506 ++++ - fs/aufs/i_op_ren.c | 1013 +++++++ - fs/aufs/iinfo.c | 277 ++ - fs/aufs/inode.c | 522 ++++ - fs/aufs/inode.h | 686 +++++ - fs/aufs/ioctl.c | 219 ++ - fs/aufs/loop.c | 146 + - fs/aufs/loop.h | 52 + - fs/aufs/magic.mk | 30 + - fs/aufs/module.c | 222 ++ - fs/aufs/module.h | 105 + - fs/aufs/mvdown.c | 703 +++++ - fs/aufs/opts.c | 1878 ++++++++++++ - fs/aufs/opts.h | 212 ++ - fs/aufs/plink.c | 506 ++++ - fs/aufs/poll.c | 52 + - fs/aufs/posix_acl.c | 98 + - fs/aufs/procfs.c | 169 ++ - fs/aufs/rdu.c | 388 +++ - fs/aufs/rwsem.h | 191 ++ - fs/aufs/sbinfo.c | 348 +++ - fs/aufs/spl.h | 111 + - fs/aufs/super.c | 1041 +++++++ - fs/aufs/super.h | 626 ++++ - fs/aufs/sysaufs.c | 104 + - fs/aufs/sysaufs.h | 101 + - fs/aufs/sysfs.c | 376 +++ - fs/aufs/sysrq.c | 157 + - fs/aufs/vdir.c | 888 ++++++ - fs/aufs/vfsub.c | 864 ++++++ - fs/aufs/vfsub.h | 315 ++ - fs/aufs/wbr_policy.c | 765 +++++ - fs/aufs/whout.c | 1061 +++++++ - fs/aufs/whout.h | 85 + - fs/aufs/wkq.c | 213 ++ - fs/aufs/wkq.h | 91 + - fs/aufs/xattr.c | 344 +++ - fs/aufs/xino.c | 1343 +++++++++ - fs/buffer.c | 2 +- - fs/dcache.c | 2 +- - fs/fcntl.c | 4 +- - fs/inode.c | 2 +- - fs/proc/base.c | 2 +- - fs/proc/nommu.c | 5 +- - fs/proc/task_mmu.c | 7 +- - fs/proc/task_nommu.c | 5 +- - fs/splice.c | 10 +- - include/asm-generic/msi.h | 32 + - include/asm-generic/vmlinux.lds.h | 2 + - include/linux/acpi.h | 6 +- - include/linux/device.h | 24 + - include/linux/dma-mapping.h | 13 +- - include/linux/file.h | 1 + - include/linux/fs.h | 3 + - include/linux/fsl/guts.h | 195 ++ - include/linux/fsl/svr.h | 95 + - include/linux/fsl_ifc.h | 116 +- - include/linux/interrupt.h | 14 + - include/linux/iommu.h | 76 +- - include/linux/iopoll.h | 144 + - include/linux/irq.h | 75 +- - include/linux/irqchip/arm-gic-v3.h | 165 ++ - include/linux/irqchip/arm-gic.h | 2 + - include/linux/irqdomain.h | 127 +- - include/linux/irqhandler.h | 14 + - include/linux/mm.h | 22 + - include/linux/mm_types.h | 2 + - include/linux/mmc/sdhci.h | 16 +- - include/linux/msi.h | 199 +- - include/linux/of.h | 11 +- - include/linux/of_device.h | 3 + - include/linux/of_iommu.h | 25 + - include/linux/of_irq.h | 1 + - include/linux/of_pci.h | 15 +- - include/linux/of_pdt.h | 3 +- - include/linux/of_platform.h | 6 + - include/linux/pci.h | 32 +- - include/linux/phy.h | 1 + - include/linux/phy_fixed.h | 11 +- - include/linux/resource_ext.h | 77 + - include/linux/splice.h | 6 + - include/linux/usb/quirks.h | 3 + - include/trace/events/iommu.h | 31 +- - include/uapi/linux/Kbuild | 1 + - include/uapi/linux/aufs_type.h | 419 +++ - include/uapi/linux/vfio.h | 5 + - kernel/fork.c | 2 +- - kernel/irq/Kconfig | 15 + - kernel/irq/Makefile | 1 + - kernel/irq/chip.c | 163 +- - kernel/irq/generic-chip.c | 36 +- - kernel/irq/irqdomain.c | 585 +++- - kernel/irq/manage.c | 93 + - kernel/irq/msi.c | 356 +++ - kernel/resource.c | 25 + - mm/Makefile | 2 +- - mm/filemap.c | 2 +- - mm/fremap.c | 16 +- - mm/memory.c | 2 +- - mm/mmap.c | 12 +- - mm/nommu.c | 10 +- - mm/prfile.c | 86 + - scripts/Kbuild.include | 6 + - scripts/Makefile.dtbinst | 51 + - scripts/Makefile.lib | 12 - - sound/soc/fsl/mpc8610_hpcd.c | 2 +- - sound/soc/fsl/p1022_ds.c | 2 +- - sound/soc/fsl/p1022_rdk.c | 2 +- - 467 files changed, 87181 insertions(+), 3457 deletions(-) - create mode 100644 arch/arm64/boot/dts/Makefile.rej - create mode 100644 arch/arm64/boot/dts/arm64-nxp-ls2080ardb-r0.dts - create mode 100644 arch/arm64/boot/dts/arm64-nxp-ls2088ardb-r1.dts - create mode 100644 arch/arm64/boot/dts/fsl-ls2080a.dtsi - create mode 100644 arch/arm64/boot/dts/fsl-ls2088a.dtsi - create mode 120000 arch/arm64/boot/dts/include/dt-bindings - create mode 100644 arch/arm64/boot/dts/thermal.h - create mode 100644 arch/arm64/configs/nxp_ls2088rdb_config - delete mode 100644 arch/powerpc/include/asm/mpc85xx.h - create mode 100644 drivers/clk/clk-qoriq.c - create mode 100644 drivers/iommu/io-pgtable-arm.c - create mode 100644 drivers/iommu/io-pgtable.c - create mode 100644 drivers/iommu/io-pgtable.h - create mode 100644 drivers/irqchip/irq-gic-v2m.c - create mode 100644 drivers/irqchip/irq-gic-v3-its.c - create mode 100644 drivers/net/phy/aquantia.c - delete mode 100644 drivers/net/phy/fixed.c - create mode 100644 drivers/net/phy/fixed_phy.c - create mode 100644 drivers/net/phy/fsl_10gkr.c - create mode 100644 drivers/net/phy/teranetics.c - create mode 100644 drivers/pci/host/pci-layerscape.c - create mode 100644 drivers/pci/host/pci-layerscape.h - create mode 100644 drivers/pci/host/pci-xgene-msi.c - create mode 100644 drivers/power/reset/ls-reboot.c - create mode 100644 drivers/soc/fsl/Kconfig - create mode 100644 drivers/soc/fsl/Kconfig.arm - create mode 100644 drivers/soc/fsl/Makefile - create mode 100644 drivers/soc/fsl/guts.c - create mode 100644 drivers/soc/fsl/ls1/Kconfig - create mode 100644 drivers/soc/fsl/ls1/Makefile - create mode 100644 drivers/soc/fsl/ls1/ftm_alarm.c - create mode 100644 drivers/staging/fsl-dpaa2/Kconfig - create mode 100644 drivers/staging/fsl-dpaa2/Makefile - create mode 100644 drivers/staging/fsl-dpaa2/ethernet/Kconfig - create mode 100644 drivers/staging/fsl-dpaa2/ethernet/Makefile - create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c - create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h - create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h - create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c - create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h - create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c - create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpkg.h - create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h - create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni.c - create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni.h - create mode 100644 drivers/staging/fsl-dpaa2/mac/Kconfig - create mode 100644 drivers/staging/fsl-dpaa2/mac/Makefile - create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h - create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac.c - create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac.h - create mode 100644 drivers/staging/fsl-dpaa2/mac/mac.c - create mode 100644 drivers/staging/fsl-mc/Kconfig - create mode 100644 drivers/staging/fsl-mc/Makefile - create mode 100644 drivers/staging/fsl-mc/TODO - create mode 100644 drivers/staging/fsl-mc/bus/Kconfig - create mode 100644 drivers/staging/fsl-mc/bus/Makefile - create mode 100644 drivers/staging/fsl-mc/bus/dpbp.c - create mode 100644 drivers/staging/fsl-mc/bus/dpcon.c - create mode 100644 drivers/staging/fsl-mc/bus/dpio/Makefile - create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio-drv.c - create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio-drv.h - create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio.c - create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio_service.c - create mode 100644 drivers/staging/fsl-mc/bus/dpio/fsl_dpio.h - create mode 100644 drivers/staging/fsl-mc/bus/dpio/fsl_dpio_cmd.h - create mode 100644 drivers/staging/fsl-mc/bus/dpio/fsl_qbman_base.h - create mode 100644 drivers/staging/fsl-mc/bus/dpio/fsl_qbman_portal.h - create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_debug.c - create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_debug.h - create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_portal.c - create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_portal.h - create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_private.h - create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_sys.h - create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_sys_decl.h - create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_test.c - create mode 100644 drivers/staging/fsl-mc/bus/dpmcp-cmd.h - create mode 100644 drivers/staging/fsl-mc/bus/dpmcp.c - create mode 100644 drivers/staging/fsl-mc/bus/dpmcp.h - create mode 100644 drivers/staging/fsl-mc/bus/dpmng-cmd.h - create mode 100644 drivers/staging/fsl-mc/bus/dpmng.c - create mode 100644 drivers/staging/fsl-mc/bus/dprc-cmd.h - create mode 100644 drivers/staging/fsl-mc/bus/dprc-driver.c - create mode 100644 drivers/staging/fsl-mc/bus/dprc.c - create mode 100644 drivers/staging/fsl-mc/bus/mc-allocator.c - create mode 100644 drivers/staging/fsl-mc/bus/mc-bus.c - create mode 100644 drivers/staging/fsl-mc/bus/mc-ioctl.h - create mode 100644 drivers/staging/fsl-mc/bus/mc-restool.c - create mode 100644 drivers/staging/fsl-mc/bus/mc-sys.c - create mode 100644 drivers/staging/fsl-mc/include/dpbp-cmd.h - create mode 100644 drivers/staging/fsl-mc/include/dpbp.h - create mode 100644 drivers/staging/fsl-mc/include/dpcon-cmd.h - create mode 100644 drivers/staging/fsl-mc/include/dpcon.h - create mode 100644 drivers/staging/fsl-mc/include/dpmac-cmd.h - create mode 100644 drivers/staging/fsl-mc/include/dpmac.h - create mode 100644 drivers/staging/fsl-mc/include/dpmng.h - create mode 100644 drivers/staging/fsl-mc/include/dprc.h - create mode 100644 drivers/staging/fsl-mc/include/fsl_dpaa2_fd.h - create mode 100644 drivers/staging/fsl-mc/include/fsl_dpaa2_io.h - create mode 100644 drivers/staging/fsl-mc/include/mc-cmd.h - create mode 100644 drivers/staging/fsl-mc/include/mc-private.h - create mode 100644 drivers/staging/fsl-mc/include/mc-sys.h - create mode 100644 drivers/staging/fsl-mc/include/mc.h - create mode 100644 drivers/staging/fsl-mc/include/net.h - create mode 100644 drivers/vfio/fsl-mc/Kconfig - create mode 100644 drivers/vfio/fsl-mc/Makefile - create mode 100644 drivers/vfio/fsl-mc/vfio_fsl_mc.c - create mode 100644 drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c - create mode 100644 drivers/vfio/fsl-mc/vfio_fsl_mc_private.h - create mode 100644 fs/aufs/Kconfig - create mode 100644 fs/aufs/Makefile - create mode 100644 fs/aufs/aufs.h - create mode 100644 fs/aufs/branch.c - create mode 100644 fs/aufs/branch.h - create mode 100644 fs/aufs/conf.mk - create mode 100644 fs/aufs/cpup.c - create mode 100644 fs/aufs/cpup.h - create mode 100644 fs/aufs/dbgaufs.c - create mode 100644 fs/aufs/dbgaufs.h - create mode 100644 fs/aufs/dcsub.c - create mode 100644 fs/aufs/dcsub.h - create mode 100644 fs/aufs/debug.c - create mode 100644 fs/aufs/debug.h - create mode 100644 fs/aufs/dentry.c - create mode 100644 fs/aufs/dentry.h - create mode 100644 fs/aufs/dinfo.c - create mode 100644 fs/aufs/dir.c - create mode 100644 fs/aufs/dir.h - create mode 100644 fs/aufs/dynop.c - create mode 100644 fs/aufs/dynop.h - create mode 100644 fs/aufs/export.c - create mode 100644 fs/aufs/f_op.c - create mode 100644 fs/aufs/fhsm.c - create mode 100644 fs/aufs/file.c - create mode 100644 fs/aufs/file.h - create mode 100644 fs/aufs/finfo.c - create mode 100644 fs/aufs/fstype.h - create mode 100644 fs/aufs/hfsnotify.c - create mode 100644 fs/aufs/hfsplus.c - create mode 100644 fs/aufs/hnotify.c - create mode 100644 fs/aufs/i_op.c - create mode 100644 fs/aufs/i_op_add.c - create mode 100644 fs/aufs/i_op_del.c - create mode 100644 fs/aufs/i_op_ren.c - create mode 100644 fs/aufs/iinfo.c - create mode 100644 fs/aufs/inode.c - create mode 100644 fs/aufs/inode.h - create mode 100644 fs/aufs/ioctl.c - create mode 100644 fs/aufs/loop.c - create mode 100644 fs/aufs/loop.h - create mode 100644 fs/aufs/magic.mk - create mode 100644 fs/aufs/module.c - create mode 100644 fs/aufs/module.h - create mode 100644 fs/aufs/mvdown.c - create mode 100644 fs/aufs/opts.c - create mode 100644 fs/aufs/opts.h - create mode 100644 fs/aufs/plink.c - create mode 100644 fs/aufs/poll.c - create mode 100644 fs/aufs/posix_acl.c - create mode 100644 fs/aufs/procfs.c - create mode 100644 fs/aufs/rdu.c - create mode 100644 fs/aufs/rwsem.h - create mode 100644 fs/aufs/sbinfo.c - create mode 100644 fs/aufs/spl.h - create mode 100644 fs/aufs/super.c - create mode 100644 fs/aufs/super.h - create mode 100644 fs/aufs/sysaufs.c - create mode 100644 fs/aufs/sysaufs.h - create mode 100644 fs/aufs/sysfs.c - create mode 100644 fs/aufs/sysrq.c - create mode 100644 fs/aufs/vdir.c - create mode 100644 fs/aufs/vfsub.c - create mode 100644 fs/aufs/vfsub.h - create mode 100644 fs/aufs/wbr_policy.c - create mode 100644 fs/aufs/whout.c - create mode 100644 fs/aufs/whout.h - create mode 100644 fs/aufs/wkq.c - create mode 100644 fs/aufs/wkq.h - create mode 100644 fs/aufs/xattr.c - create mode 100644 fs/aufs/xino.c - create mode 100644 include/asm-generic/msi.h - create mode 100644 include/linux/fsl/guts.h - create mode 100644 include/linux/fsl/svr.h - create mode 100644 include/linux/iopoll.h - create mode 100644 include/linux/irqhandler.h - create mode 100644 include/linux/resource_ext.h - create mode 100644 include/uapi/linux/aufs_type.h - create mode 100644 kernel/irq/msi.c - create mode 100644 mm/prfile.c - create mode 100644 scripts/Makefile.dtbinst - -diff --git a/Documentation/IRQ-domain.txt b/Documentation/IRQ-domain.txt -index 8a8b82c..39cfa72 100644 ---- a/Documentation/IRQ-domain.txt -+++ b/Documentation/IRQ-domain.txt -@@ -151,3 +151,74 @@ used and no descriptor gets allocated it is very important to make sure - that the driver using the simple domain call irq_create_mapping() - before any irq_find_mapping() since the latter will actually work - for the static IRQ assignment case. -+ -+==== Hierarchy IRQ domain ==== -+On some architectures, there may be multiple interrupt controllers -+involved in delivering an interrupt from the device to the target CPU. -+Let's look at a typical interrupt delivering path on x86 platforms: -+ -+Device --> IOAPIC -> Interrupt remapping Controller -> Local APIC -> CPU -+ -+There are three interrupt controllers involved: -+1) IOAPIC controller -+2) Interrupt remapping controller -+3) Local APIC controller -+ -+To support such a hardware topology and make software architecture match -+hardware architecture, an irq_domain data structure is built for each -+interrupt controller and those irq_domains are organized into hierarchy. -+When building irq_domain hierarchy, the irq_domain near to the device is -+child and the irq_domain near to CPU is parent. So a hierarchy structure -+as below will be built for the example above. -+ CPU Vector irq_domain (root irq_domain to manage CPU vectors) -+ ^ -+ | -+ Interrupt Remapping irq_domain (manage irq_remapping entries) -+ ^ -+ | -+ IOAPIC irq_domain (manage IOAPIC delivery entries/pins) -+ -+There are four major interfaces to use hierarchy irq_domain: -+1) irq_domain_alloc_irqs(): allocate IRQ descriptors and interrupt -+ controller related resources to deliver these interrupts. -+2) irq_domain_free_irqs(): free IRQ descriptors and interrupt controller -+ related resources associated with these interrupts. -+3) irq_domain_activate_irq(): activate interrupt controller hardware to -+ deliver the interrupt. -+3) irq_domain_deactivate_irq(): deactivate interrupt controller hardware -+ to stop delivering the interrupt. -+ -+Following changes are needed to support hierarchy irq_domain. -+1) a new field 'parent' is added to struct irq_domain; it's used to -+ maintain irq_domain hierarchy information. -+2) a new field 'parent_data' is added to struct irq_data; it's used to -+ build hierarchy irq_data to match hierarchy irq_domains. The irq_data -+ is used to store irq_domain pointer and hardware irq number. -+3) new callbacks are added to struct irq_domain_ops to support hierarchy -+ irq_domain operations. -+ -+With support of hierarchy irq_domain and hierarchy irq_data ready, an -+irq_domain structure is built for each interrupt controller, and an -+irq_data structure is allocated for each irq_domain associated with an -+IRQ. Now we could go one step further to support stacked(hierarchy) -+irq_chip. That is, an irq_chip is associated with each irq_data along -+the hierarchy. A child irq_chip may implement a required action by -+itself or by cooperating with its parent irq_chip. -+ -+With stacked irq_chip, interrupt controller driver only needs to deal -+with the hardware managed by itself and may ask for services from its -+parent irq_chip when needed. So we could achieve a much cleaner -+software architecture. -+ -+For an interrupt controller driver to support hierarchy irq_domain, it -+needs to: -+1) Implement irq_domain_ops.alloc and irq_domain_ops.free -+2) Optionally implement irq_domain_ops.activate and -+ irq_domain_ops.deactivate. -+3) Optionally implement an irq_chip to manage the interrupt controller -+ hardware. -+4) No need to implement irq_domain_ops.map and irq_domain_ops.unmap, -+ they are unused with hierarchy irq_domain. -+ -+Hierarchy irq_domain may also be used to support other architectures, -+such as ARM, ARM64 etc. -diff --git a/Documentation/devicetree/bindings/arm/fsl.txt b/Documentation/devicetree/bindings/arm/fsl.txt -index e935d7d..5c9f338 100644 ---- a/Documentation/devicetree/bindings/arm/fsl.txt -+++ b/Documentation/devicetree/bindings/arm/fsl.txt -@@ -74,3 +74,18 @@ Required root node properties: - i.MX6q generic board - Required root node properties: - - compatible = "fsl,imx6q"; -+ -++Freescale ARMv8 based Layerscape SoC family Device Tree Bindings -++---------------------------------------------------------------- -+ -+LS2080A ARMv8 based Simulator model -+Required root node properties: -+ - compatible = "fsl,ls2080a-simu", "fsl,ls2080a"; -+ -+LS2080A ARMv8 based QDS Board -+Required root node properties: -+ - compatible = "fsl,ls2080a-qds", "fsl,ls2080a"; -+ -+LS2080A ARMv8 based RDB Board -+Required root node properties: -+ - compatible = "fsl,ls2080a-rdb", "fsl,ls2080a"; -diff --git a/Documentation/devicetree/bindings/arm/gic.txt b/Documentation/devicetree/bindings/arm/gic.txt -index c7d2fa1..e87d3d7 100644 ---- a/Documentation/devicetree/bindings/arm/gic.txt -+++ b/Documentation/devicetree/bindings/arm/gic.txt -@@ -31,12 +31,16 @@ Main node required properties: - The 3rd cell is the flags, encoded as follows: - bits[3:0] trigger type and level flags. - 1 = low-to-high edge triggered -- 2 = high-to-low edge triggered -+ 2 = high-to-low edge triggered (invalid for SPIs) - 4 = active high level-sensitive -- 8 = active low level-sensitive -+ 8 = active low level-sensitive (invalid for SPIs). - bits[15:8] PPI interrupt cpu mask. Each bit corresponds to each of - the 8 possible cpus attached to the GIC. A bit set to '1' indicated - the interrupt is wired to that CPU. Only valid for PPI interrupts. -+ Also note that the configurability of PPI interrupts is IMPLEMENTATION -+ DEFINED and as such not guaranteed to be present (most SoC available -+ in 2014 seem to ignore the setting of this flag and use the hardware -+ default value). - - - reg : Specifies base physical address(s) and size of the GIC registers. The - first region is the GIC distributor register base and size. The 2nd region is -diff --git a/Documentation/devicetree/bindings/clock/qoriq-clock.txt b/Documentation/devicetree/bindings/clock/qoriq-clock.txt -index 5666812..128fc72 100644 ---- a/Documentation/devicetree/bindings/clock/qoriq-clock.txt -+++ b/Documentation/devicetree/bindings/clock/qoriq-clock.txt -@@ -1,6 +1,6 @@ --* Clock Block on Freescale CoreNet Platforms -+* Clock Block on Freescale QorIQ Platforms - --Freescale CoreNet chips take primary clocking input from the external -+Freescale QorIQ chips take primary clocking input from the external - SYSCLK signal. The SYSCLK input (frequency) is multiplied using - multiple phase locked loops (PLL) to create a variety of frequencies - which can then be passed to a variety of internal logic, including -@@ -13,14 +13,16 @@ which the chip complies. - Chassis Version Example Chips - --------------- ------------- - 1.0 p4080, p5020, p5040 --2.0 t4240, b4860, t1040 -+2.0 t4240, b4860 - - 1. Clock Block Binding - - Required properties: --- compatible: Should contain a specific clock block compatible string -- and a single chassis clock compatible string. -- Clock block strings include, but not limited to, one of the: -+- compatible: Should contain a chip-specific clock block compatible -+ string and (if applicable) may contain a chassis-version clock -+ compatible string. -+ -+ Chip-specific strings are of the form "fsl,-clockgen", such as: - * "fsl,p2041-clockgen" - * "fsl,p3041-clockgen" - * "fsl,p4080-clockgen" -@@ -29,15 +31,15 @@ Required properties: - * "fsl,t4240-clockgen" - * "fsl,b4420-clockgen" - * "fsl,b4860-clockgen" -- Chassis clock strings include: -+ * "fsl,ls1021a-clockgen" -+ Chassis-version clock strings include: - * "fsl,qoriq-clockgen-1.0": for chassis 1.0 clocks - * "fsl,qoriq-clockgen-2.0": for chassis 2.0 clocks - - reg: Describes the address of the device's resources within the - address space defined by its parent bus, and resource zero - represents the clock register set --- clock-frequency: Input system clock frequency - --Recommended properties: -+Optional properties: - - ranges: Allows valid translation between child's address space and - parent's. Must be present if the device has sub-nodes. - - #address-cells: Specifies the number of cells used to represent -@@ -46,8 +48,46 @@ Recommended properties: - - #size-cells: Specifies the number of cells used to represent - the size of an address. Must be present if the device has - sub-nodes and set to 1 if present -+- clock-frequency: Input system clock frequency (SYSCLK) -+- clocks: If clock-frequency is not specified, sysclk may be provided -+ as an input clock. Either clock-frequency or clocks must be -+ provided. -+ -+2. Clock Provider -+ -+The clockgen node should act as a clock provider, though in older device -+trees the children of the clockgen node are the clock providers. -+ -+When the clockgen node is a clock provider, #clock-cells = <2>. -+The first cell of the clock specifier is the clock type, and the -+second cell is the clock index for the specified type. -+ -+ Type# Name Index Cell -+ 0 sysclk must be 0 -+ 1 cmux index (n in CLKCnCSR) -+ 2 hwaccel index (n in CLKCGnHWACSR) -+ 3 fman 0 for fm1, 1 for fm2 -+ 4 platform pll 0=pll, 1=pll/2, 2=pll/3, 3=pll/4 -+ -+3. Example -+ -+ clockgen: global-utilities@e1000 { -+ compatible = "fsl,p5020-clockgen", "fsl,qoriq-clockgen-1.0"; -+ clock-frequency = <133333333>; -+ reg = <0xe1000 0x1000>; -+ #clock-cells = <2>; -+ }; -+ -+ fman@400000 { -+ ... -+ clocks = <&clockgen 3 0>; -+ ... -+ }; -+} -+4. Legacy Child Nodes - --2. Clock Provider/Consumer Binding -+NOTE: These nodes are deprecated. Kernels should continue to support -+device trees with these nodes, but new device trees should not use them. - - Most of the bindings are from the common clock binding[1]. - [1] Documentation/devicetree/bindings/clock/clock-bindings.txt -@@ -79,7 +119,7 @@ Recommended properties: - - reg: Should be the offset and length of clock block base address. - The length should be 4. - --Example for clock block and clock provider: -+Legacy Example: - / { - clockgen: global-utilities@e1000 { - compatible = "fsl,p5020-clockgen", "fsl,qoriq-clockgen-1.0"; -@@ -131,7 +171,7 @@ Example for clock block and clock provider: - }; - } - --Example for clock consumer: -+Example for legacy clock consumer: - - / { - cpu0: PowerPC,e5500@0 { -diff --git a/Documentation/devicetree/bindings/i2c/i2c-imx.txt b/Documentation/devicetree/bindings/i2c/i2c-imx.txt -index 4a8513e..52d37fd 100644 ---- a/Documentation/devicetree/bindings/i2c/i2c-imx.txt -+++ b/Documentation/devicetree/bindings/i2c/i2c-imx.txt -@@ -11,6 +11,8 @@ Required properties: - Optional properties: - - clock-frequency : Constains desired I2C/HS-I2C bus clock frequency in Hz. - The absence of the propoerty indicates the default frequency 100 kHz. -+- dmas: A list of two dma specifiers, one for each entry in dma-names. -+- dma-names: should contain "tx" and "rx". - - Examples: - -@@ -26,3 +28,12 @@ i2c@70038000 { /* HS-I2C on i.MX51 */ - interrupts = <64>; - clock-frequency = <400000>; - }; -+ -+i2c0: i2c@40066000 { /* i2c0 on vf610 */ -+ compatible = "fsl,vf610-i2c"; -+ reg = <0x40066000 0x1000>; -+ interrupts =<0 71 0x04>; -+ dmas = <&edma0 0 50>, -+ <&edma0 0 51>; -+ dma-names = "rx","tx"; -+}; -diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt -index 34a3fb6..cf53d5f 100644 ---- a/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt -+++ b/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt -@@ -16,6 +16,9 @@ Required Properties: - Optional Properties: - - - reset-gpios: Reference to the GPIO connected to the reset input. -+ - i2c-mux-idle-disconnect: Boolean; if defined, forces mux to disconnect all -+ children in idle state. This is necessary for example, if there are several -+ multiplexers on the bus and the devices behind them use same I2C addresses. - - - Example: -diff --git a/Documentation/devicetree/bindings/memory-controllers/fsl/ifc.txt b/Documentation/devicetree/bindings/memory-controllers/fsl/ifc.txt -index d5e3704..89427b0 100644 ---- a/Documentation/devicetree/bindings/memory-controllers/fsl/ifc.txt -+++ b/Documentation/devicetree/bindings/memory-controllers/fsl/ifc.txt -@@ -18,6 +18,8 @@ Properties: - interrupt (NAND_EVTER_STAT). If there is only one, - that interrupt reports both types of event. - -+- little-endian : If this property is absent, the big-endian mode will -+ be in use as default for registers. - - - ranges : Each range corresponds to a single chipselect, and covers - the entire access window as configured. -@@ -34,6 +36,7 @@ Example: - #size-cells = <1>; - reg = <0x0 0xffe1e000 0 0x2000>; - interrupts = <16 2 19 2>; -+ little-endian; - - /* NOR, NAND Flashes and CPLD on board */ - ranges = <0x0 0x0 0x0 0xee000000 0x02000000 -diff --git a/Documentation/devicetree/bindings/pci/designware-pcie.txt b/Documentation/devicetree/bindings/pci/designware-pcie.txt -index 9f4faa8..0036ab3 100644 ---- a/Documentation/devicetree/bindings/pci/designware-pcie.txt -+++ b/Documentation/devicetree/bindings/pci/designware-pcie.txt -@@ -14,7 +14,6 @@ Required properties: - - interrupt-map-mask and interrupt-map: standard PCI properties - to define the mapping of the PCIe interface to interrupt - numbers. --- num-lanes: number of lanes to use - - clocks: Must contain an entry for each entry in clock-names. - See ../clocks/clock-bindings.txt for details. - - clock-names: Must include the following entries: -@@ -22,6 +21,8 @@ Required properties: - - "pcie_bus" - - Optional properties: -+- num-lanes: number of lanes to use (this property should be specified unless -+ the link is brought already up in BIOS) - - reset-gpio: gpio pin number of power good signal - - bus-range: PCI bus numbers covered (it is recommended for new devicetrees to - specify this property, to keep backwards compatibility a range of 0x00-0xff -diff --git a/Documentation/devicetree/bindings/powerpc/fsl/board.txt b/Documentation/devicetree/bindings/powerpc/fsl/board.txt -index cff38bd..89c90f4 100644 ---- a/Documentation/devicetree/bindings/powerpc/fsl/board.txt -+++ b/Documentation/devicetree/bindings/powerpc/fsl/board.txt -@@ -21,11 +21,14 @@ Example: - - This is the memory-mapped registers for on board FPGA. - --Required properities: -+Required properties: - - compatible: should be a board-specific string followed by a string - indicating the type of FPGA. Example: -- "fsl,-fpga", "fsl,fpga-pixis" -+ "fsl,-fpga", "fsl,fpga-pixis" or -+ "fsl,-fpga", "fsl,fpga-qixis" - - reg: should contain the address and the length of the FPGA register set. -+ -+Optional properties: - - interrupt-parent: should specify phandle for the interrupt controller. - - interrupts: should specify event (wakeup) IRQ. - -@@ -38,6 +41,13 @@ Example (P1022DS): - interrupts = <8 8 0 0>; - }; - -+Example (LS2080A-RDB): -+ -+ cpld@3,0 { -+ compatible = "fsl,ls2080ardb-fpga", "fsl,fpga-qixis"; -+ reg = <0x3 0 0x10000>; -+ }; -+ - * Freescale BCSR GPIO banks - - Some BCSR registers act as simple GPIO controllers, each such -diff --git a/Documentation/devicetree/bindings/usb/dwc3.txt b/Documentation/devicetree/bindings/usb/dwc3.txt -index 471366d..1f9900c 100644 ---- a/Documentation/devicetree/bindings/usb/dwc3.txt -+++ b/Documentation/devicetree/bindings/usb/dwc3.txt -@@ -1,6 +1,7 @@ - synopsys DWC3 CORE - --DWC3- USB3 CONTROLLER -+DWC3- USB3 CONTROLLER. Complies to the generic USB binding properties -+ as described in 'usb/generic.txt' - - Required properties: - - compatible: must be "snps,dwc3" -diff --git a/Documentation/devicetree/of_selftest.txt b/Documentation/devicetree/of_selftest.txt -index 1e3d5c9..57a808b 100644 ---- a/Documentation/devicetree/of_selftest.txt -+++ b/Documentation/devicetree/of_selftest.txt -@@ -63,7 +63,6 @@ struct device_node { - struct device_node *parent; - struct device_node *child; - struct device_node *sibling; -- struct device_node *allnext; /* next in list of all nodes */ - ... - }; - -@@ -99,12 +98,6 @@ child11 -> sibling12 -> sibling13 -> sibling14 -> null - Figure 1: Generic structure of un-flattened device tree - - --*allnext: it is used to link all the nodes of DT into a list. So, for the -- above tree the list would be as follows: -- --root->child1->child11->sibling12->sibling13->child131->sibling14->sibling2-> --child21->sibling22->sibling23->sibling3->child31->sibling32->sibling4->null -- - Before executing OF selftest, it is required to attach the test data to - machine's device tree (if present). So, when selftest_data_add() is called, - at first it reads the flattened device tree data linked into the kernel image -@@ -131,11 +124,6 @@ root ('/') - test-child01 null null null - - --allnext list: -- --root->testcase-data->test-child0->test-child01->test-sibling1->test-sibling2 --->test-sibling3->null -- - Figure 2: Example test data tree to be attached to live tree. - - According to the scenario above, the live tree is already present so it isn't -@@ -204,8 +192,6 @@ detached and then moving up the parent nodes are removed, and eventually the - whole tree). selftest_data_remove() calls detach_node_and_children() that uses - of_detach_node() to detach the nodes from the live device tree. - --To detach a node, of_detach_node() first updates all_next linked list, by --attaching the previous node's allnext to current node's allnext pointer. And --then, it either updates the child pointer of given node's parent to its --sibling or attaches the previous sibling to the given node's sibling, as --appropriate. That is it :) -+To detach a node, of_detach_node() either updates the child pointer of given -+node's parent to its sibling or attaches the previous sibling to the given -+node's sibling, as appropriate. That is it :) -diff --git a/Documentation/devicetree/todo.txt b/Documentation/devicetree/todo.txt -index c3cf065..b5139d1 100644 ---- a/Documentation/devicetree/todo.txt -+++ b/Documentation/devicetree/todo.txt -@@ -2,7 +2,6 @@ Todo list for devicetree: - - === General structure === - - Switch from custom lists to (h)list_head for nodes and properties structure --- Remove of_allnodes list and iterate using list of child nodes alone - - === CONFIG_OF_DYNAMIC === - - Switch to RCU for tree updates and get rid of global spinlock -diff --git a/MAINTAINERS b/MAINTAINERS -index c721042..cb2296a 100644 ---- a/MAINTAINERS -+++ b/MAINTAINERS -@@ -1562,6 +1562,7 @@ M: Will Deacon - L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) - S: Maintained - F: drivers/iommu/arm-smmu.c -+F: drivers/iommu/io-pgtable-arm.c - - ARM64 PORT (AARCH64 ARCHITECTURE) - M: Catalin Marinas -@@ -1795,6 +1796,20 @@ F: include/linux/audit.h - F: include/uapi/linux/audit.h - F: kernel/audit* - -+AUFS (advanced multi layered unification filesystem) FILESYSTEM -+M: "J. R. Okajima" -+L: linux-unionfs@vger.kernel.org -+L: aufs-users@lists.sourceforge.net (members only) -+W: http://aufs.sourceforge.net -+T: git://git.code.sf.net/p/aufs/aufs3-linux -+T: git://github.com/sfjro/aufs3-linux.git -+S: Supported -+F: Documentation/filesystems/aufs/ -+F: Documentation/ABI/testing/debugfs-aufs -+F: Documentation/ABI/testing/sysfs-aufs -+F: fs/aufs/ -+F: include/uapi/linux/aufs_type.h -+ - AUXILIARY DISPLAY DRIVERS - M: Miguel Ojeda Sandonis - W: http://miguelojeda.es/auxdisplay.htm -@@ -3972,6 +3987,33 @@ F: sound/soc/fsl/fsl* - F: sound/soc/fsl/imx* - F: sound/soc/fsl/mpc8610_hpcd.c - -+FREESCALE QORIQ MANAGEMENT COMPLEX DRIVER -+M: J. German Rivera -+L: linux-kernel@vger.kernel.org -+S: Maintained -+F: drivers/staging/fsl-mc/ -+ -+FREESCALE DPAA2 ETH DRIVER -+M: Ioana Radulescu -+M: Bogdan Hamciuc -+M: Cristian Sovaiala -+L: linux-kernel@vger.kernel.org -+S: Maintained -+F: drivers/staging/fsl-dpaa2/ethernet/ -+ -+FREESCALE QORIQ MANAGEMENT COMPLEX RESTOOL DRIVER -+M: Lijun Pan -+L: linux-kernel@vger.kernel.org -+S: Maintained -+F: drivers/staging/fsl-mc/bus/mc-ioctl.h -+F: drivers/staging/fsl-mc/bus/mc-restool.c -+ -+FREESCALE DPAA2 MAC/PHY INTERFACE DRIVER -+M: Alex Marginean -+L: linux-kernel@vger.kernel.org -+S: Maintained -+F: drivers/staging/fsl-dpaa2/mac/ -+ - FREEVXFS FILESYSTEM - M: Christoph Hellwig - W: ftp://ftp.openlinux.org/pub/people/hch/vxfs -@@ -7047,6 +7089,16 @@ S: Maintained - F: Documentation/devicetree/bindings/pci/xgene-pci.txt - F: drivers/pci/host/pci-xgene.c - -+PCI DRIVER FOR FREESCALE LAYERSCAPE -+M: Minghuan Lian -+M: Mingkai Hu -+M: Roy Zang -+L: linuxppc-dev@lists.ozlabs.org -+L: linux-pci@vger.kernel.org -+L: linux-arm-kernel@lists.infradead.org -+S: Maintained -+F: drivers/pci/host/*layerscape* -+ - PCI DRIVER FOR IMX6 - M: Richard Zhu - M: Lucas Stach -@@ -7122,6 +7174,14 @@ L: linux-pci@vger.kernel.org - S: Maintained - F: drivers/pci/host/*spear* - -+PCI MSI DRIVER FOR APPLIEDMICRO XGENE -+M: Duc Dang -+L: linux-pci@vger.kernel.org -+L: linux-arm-kernel@lists.infradead.org -+S: Maintained -+F: Documentation/devicetree/bindings/pci/xgene-pci-msi.txt -+F: drivers/pci/host/pci-xgene-msi.c -+ - PCMCIA SUBSYSTEM - P: Linux PCMCIA Team - L: linux-pcmcia@lists.infradead.org -diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig -index 89c4b5c..29544f0 100644 ---- a/arch/arm/Kconfig -+++ b/arch/arm/Kconfig -@@ -1292,6 +1292,9 @@ config PCI_DOMAINS - bool - depends on PCI - -+config PCI_DOMAINS_GENERIC -+ def_bool PCI_DOMAINS -+ - config PCI_NANOENGINE - bool "BSE nanoEngine PCI support" - depends on SA1100_NANOENGINE -diff --git a/arch/arm/Makefile b/arch/arm/Makefile -index b5d7988..93a30a2 100644 ---- a/arch/arm/Makefile -+++ b/arch/arm/Makefile -@@ -320,8 +320,12 @@ $(INSTALL_TARGETS): - $(Q)$(MAKE) $(build)=$(boot)/dts MACHINE=$(MACHINE) $(boot)/dts/$@ - - PHONY += dtbs dtbs_install --dtbs dtbs_install: prepare scripts -- $(Q)$(MAKE) $(build)=$(boot)/dts MACHINE=$(MACHINE) $@ -+ -+dtbs: prepare scripts -+ $(Q)$(MAKE) $(build)=$(boot)/dts MACHINE=$(MACHINE) -+ -+dtbs_install: -+ $(Q)$(MAKE) $(dtbinst)=$(boot)/dts MACHINE=$(MACHINE) - - # We use MRPROPER_FILES and CLEAN_FILES now - archclean: -diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile -index 38c89ca..6e784fa 100644 ---- a/arch/arm/boot/dts/Makefile -+++ b/arch/arm/boot/dts/Makefile -@@ -517,15 +517,7 @@ dtb-$(CONFIG_MACH_DOVE) += dove-cm-a510.dtb \ - dove-dove-db.dtb - dtb-$(CONFIG_ARCH_MEDIATEK) += mt6589-aquaris5.dtb - --targets += dtbs dtbs_install --targets += $(dtb-y) - endif - --# *.dtb used to be generated in the directory above. Clean out the --# old build results so people don't accidentally use them. --dtbs: $(addprefix $(obj)/, $(dtb-y)) -- $(Q)rm -f $(obj)/../*.dtb -- --clean-files := *.dtb -- --dtbs_install: $(addsuffix _dtbinst_, $(dtb-y)) -+always := $(dtb-y) -+clean-files := *.dtb -diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h -index 85738b2..f3c0d95 100644 ---- a/arch/arm/include/asm/dma-mapping.h -+++ b/arch/arm/include/asm/dma-mapping.h -@@ -121,12 +121,14 @@ static inline unsigned long dma_max_pfn(struct device *dev) - } - #define dma_max_pfn(dev) dma_max_pfn(dev) - --static inline int set_arch_dma_coherent_ops(struct device *dev) -+static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, -+ u64 size, struct iommu_ops *iommu, -+ bool coherent) - { -- set_dma_ops(dev, &arm_coherent_dma_ops); -- return 0; -+ if (coherent) -+ set_dma_ops(dev, &arm_coherent_dma_ops); - } --#define set_arch_dma_coherent_ops(dev) set_arch_dma_coherent_ops(dev) -+#define arch_setup_dma_ops arch_setup_dma_ops - - static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) - { -diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h -index 7fc4278..c074e7a 100644 ---- a/arch/arm/include/asm/mach/pci.h -+++ b/arch/arm/include/asm/mach/pci.h -@@ -19,9 +19,7 @@ struct pci_bus; - struct device; - - struct hw_pci { --#ifdef CONFIG_PCI_DOMAINS -- int domain; --#endif -+ struct msi_controller *msi_ctrl; - struct pci_ops *ops; - int nr_controllers; - void **private_data; -@@ -36,16 +34,14 @@ struct hw_pci { - resource_size_t start, - resource_size_t size, - resource_size_t align); -- void (*add_bus)(struct pci_bus *bus); -- void (*remove_bus)(struct pci_bus *bus); - }; - - /* - * Per-controller structure - */ - struct pci_sys_data { --#ifdef CONFIG_PCI_DOMAINS -- int domain; -+#ifdef CONFIG_PCI_MSI -+ struct msi_controller *msi_ctrl; - #endif - struct list_head node; - int busnr; /* primary bus number */ -@@ -65,8 +61,6 @@ struct pci_sys_data { - resource_size_t start, - resource_size_t size, - resource_size_t align); -- void (*add_bus)(struct pci_bus *bus); -- void (*remove_bus)(struct pci_bus *bus); - void *private_data; /* platform controller private data */ - }; - -diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h -index 7e95d85..585dc33 100644 ---- a/arch/arm/include/asm/pci.h -+++ b/arch/arm/include/asm/pci.h -@@ -18,13 +18,6 @@ static inline int pcibios_assign_all_busses(void) - } - - #ifdef CONFIG_PCI_DOMAINS --static inline int pci_domain_nr(struct pci_bus *bus) --{ -- struct pci_sys_data *root = bus->sysdata; -- -- return root->domain; --} -- - static inline int pci_proc_domain(struct pci_bus *bus) - { - return pci_domain_nr(bus); -diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c -index 17a26c1..a5cd259 100644 ---- a/arch/arm/kernel/bios32.c -+++ b/arch/arm/kernel/bios32.c -@@ -18,6 +18,15 @@ - - static int debug_pci; - -+#ifdef CONFIG_PCI_MSI -+struct msi_controller *pcibios_msi_controller(struct pci_dev *dev) -+{ -+ struct pci_sys_data *sysdata = dev->bus->sysdata; -+ -+ return sysdata->msi_ctrl; -+} -+#endif -+ - /* - * We can't use pci_get_device() here since we are - * called from interrupt context. -@@ -360,20 +369,6 @@ void pcibios_fixup_bus(struct pci_bus *bus) - } - EXPORT_SYMBOL(pcibios_fixup_bus); - --void pcibios_add_bus(struct pci_bus *bus) --{ -- struct pci_sys_data *sys = bus->sysdata; -- if (sys->add_bus) -- sys->add_bus(bus); --} -- --void pcibios_remove_bus(struct pci_bus *bus) --{ -- struct pci_sys_data *sys = bus->sysdata; -- if (sys->remove_bus) -- sys->remove_bus(bus); --} -- - /* - * Swizzle the device pin each time we cross a bridge. If a platform does - * not provide a swizzle function, we perform the standard PCI swizzling. -@@ -427,17 +422,16 @@ static int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) - static int pcibios_init_resources(int busnr, struct pci_sys_data *sys) - { - int ret; -- struct pci_host_bridge_window *window; -+ struct resource_entry *window; - - if (list_empty(&sys->resources)) { - pci_add_resource_offset(&sys->resources, - &iomem_resource, sys->mem_offset); - } - -- list_for_each_entry(window, &sys->resources, list) { -+ resource_list_for_each_entry(window, &sys->resources) - if (resource_type(window->res) == IORESOURCE_IO) - return 0; -- } - - sys->io_res.start = (busnr * SZ_64K) ? : pcibios_min_io; - sys->io_res.end = (busnr + 1) * SZ_64K - 1; -@@ -468,15 +462,13 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw, - if (!sys) - panic("PCI: unable to allocate sys data!"); - --#ifdef CONFIG_PCI_DOMAINS -- sys->domain = hw->domain; -+#ifdef CONFIG_PCI_MSI -+ sys->msi_ctrl = hw->msi_ctrl; - #endif - sys->busnr = busnr; - sys->swizzle = hw->swizzle; - sys->map_irq = hw->map_irq; - sys->align_resource = hw->align_resource; -- sys->add_bus = hw->add_bus; -- sys->remove_bus = hw->remove_bus; - INIT_LIST_HEAD(&sys->resources); - - if (hw->private_data) -@@ -494,8 +486,9 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw, - if (hw->scan) - sys->bus = hw->scan(nr, sys); - else -- sys->bus = pci_scan_root_bus(parent, sys->busnr, -- hw->ops, sys, &sys->resources); -+ sys->bus = pci_scan_root_bus_msi(parent, -+ sys->busnr, hw->ops, sys, -+ &sys->resources, hw->msi_ctrl); - - if (!sys->bus) - panic("PCI: unable to scan bus!"); -diff --git a/arch/arm/mach-iop13xx/msi.c b/arch/arm/mach-iop13xx/msi.c -index e7730cf..9f89e76 100644 ---- a/arch/arm/mach-iop13xx/msi.c -+++ b/arch/arm/mach-iop13xx/msi.c -@@ -126,10 +126,10 @@ static void iop13xx_msi_nop(struct irq_data *d) - static struct irq_chip iop13xx_msi_chip = { - .name = "PCI-MSI", - .irq_ack = iop13xx_msi_nop, -- .irq_enable = unmask_msi_irq, -- .irq_disable = mask_msi_irq, -- .irq_mask = mask_msi_irq, -- .irq_unmask = unmask_msi_irq, -+ .irq_enable = pci_msi_unmask_irq, -+ .irq_disable = pci_msi_mask_irq, -+ .irq_mask = pci_msi_mask_irq, -+ .irq_unmask = pci_msi_unmask_irq, - }; - - int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) -@@ -153,7 +153,7 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) - id = iop13xx_cpu_id(); - msg.data = (id << IOP13XX_MU_MIMR_CORE_SELECT) | (irq & 0x7f); - -- write_msi_msg(irq, &msg); -+ pci_write_msi_msg(irq, &msg); - irq_set_chip_and_handler(irq, &iop13xx_msi_chip, handle_simple_irq); - - return 0; -diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig -index 00b9c48..329f5f4 100644 ---- a/arch/arm64/Kconfig -+++ b/arch/arm64/Kconfig -@@ -13,7 +13,9 @@ config ARM64 - select ARM_ARCH_TIMER - select ARM_GIC - select AUDIT_ARCH_COMPAT_GENERIC -+ select ARM_GIC_V2M if PCI_MSI - select ARM_GIC_V3 -+ select ARM_GIC_V3_ITS if PCI_MSI - select BUILDTIME_EXTABLE_SORT - select CLONE_BACKWARDS - select COMMON_CLK -@@ -166,6 +168,11 @@ config ARCH_XGENE - help - This enables support for AppliedMicro X-Gene SOC Family - -+config ARCH_LAYERSCAPE -+ bool "ARMv8 based Freescale Layerscape SoC family" -+ help -+ This enables support for the Freescale Layerscape SoC family. -+ - endmenu - - menu "Bus support" -@@ -366,7 +373,6 @@ config ARM64_VA_BITS_42 - - config ARM64_VA_BITS_48 - bool "48-bit" -- depends on !ARM_SMMU - - endchoice - -diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile -index 2d54c55..7cf8a29 100644 ---- a/arch/arm64/Makefile -+++ b/arch/arm64/Makefile -@@ -74,8 +74,13 @@ zinstall install: vmlinux - %.dtb: scripts - $(Q)$(MAKE) $(build)=$(boot)/dts $(boot)/dts/$@ - --dtbs: scripts -- $(Q)$(MAKE) $(build)=$(boot)/dts dtbs -+PHONY += dtbs dtbs_install -+ -+dtbs: prepare scripts -+ $(Q)$(MAKE) $(build)=$(boot)/dts -+ -+dtbs_install: -+ $(Q)$(MAKE) $(dtbinst)=$(boot)/dts - - PHONY += vdso_install - vdso_install: -@@ -84,11 +89,13 @@ vdso_install: - # We use MRPROPER_FILES and CLEAN_FILES now - archclean: - $(Q)$(MAKE) $(clean)=$(boot) -+ $(Q)$(MAKE) $(clean)=$(boot)/dts - - define archhelp - echo '* Image.gz - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)' - echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)' - echo '* dtbs - Build device tree blobs for enabled boards' -+ echo ' dtbs_install - Install dtbs to $(INSTALL_DTBS_PATH)' - echo ' install - Install uncompressed kernel' - echo ' zinstall - Install compressed kernel' - echo ' Install using (your) ~/bin/installkernel or' -diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile -index f8001a6..2644389 100644 ---- a/arch/arm64/boot/dts/Makefile -+++ b/arch/arm64/boot/dts/Makefile -@@ -1,6 +1,6 @@ - dtb-$(CONFIG_ARCH_THUNDER) += thunder-88xx.dtb - dtb-$(CONFIG_ARCH_VEXPRESS) += rtsm_ve-aemv8a.dtb foundation-v8.dtb --dtb-$(CONFIG_ARCH_XGENE) += apm-mustang.dtb -+dtb-$(CONFIG_ARCH_XGENE) += apm-mustang.dtb arm64-nxp-ls2088ardb-r1.dtb - - targets += dtbs - targets += $(dtb-y) -diff --git a/arch/arm64/boot/dts/Makefile.rej b/arch/arm64/boot/dts/Makefile.rej -new file mode 100644 -index 0000000..3610e7d ---- /dev/null -+++ b/arch/arm64/boot/dts/Makefile.rej -@@ -0,0 +1,10 @@ -+--- arch/arm64/boot/dts/Makefile -++++ arch/arm64/boot/dts/Makefile -+@@ -1,6 +1,7 @@ -+ dtb-$(CONFIG_ARCH_THUNDER) += thunder-88xx.dtb -+ dtb-$(CONFIG_ARCH_VEXPRESS) += rtsm_ve-aemv8a.dtb foundation-v8.dtb -+ dtb-$(CONFIG_ARCH_XGENE) += apm-mustang.dtb -++dtb-$(CONFIG_ARCH_LAYERSCAPE) += arm64-nxp-ls2080ardb-r0.dtb -+ -+ targets += dtbs -+ targets += $(dtb-y) -diff --git a/arch/arm64/boot/dts/arm64-nxp-ls2080ardb-r0.dts b/arch/arm64/boot/dts/arm64-nxp-ls2080ardb-r0.dts -new file mode 100644 -index 0000000..5da2834 ---- /dev/null -+++ b/arch/arm64/boot/dts/arm64-nxp-ls2080ardb-r0.dts -@@ -0,0 +1,249 @@ -+/* -+ * Device Tree file for NXP LS2080a RDB board -+ * -+ */ -+ -+/dts-v1/; -+ -+#include "fsl-ls2080a.dtsi" -+ -+/ { -+ model = "arm64-nxp-ls2080ardb-r0"; -+ compatible = "fsl,ls2080a-rdb", "fsl,ls2080a"; -+}; -+ -+&esdhc { -+ status = "okay"; -+}; -+ -+&ifc { -+ status = "okay"; -+ #address-cells = <2>; -+ #size-cells = <1>; -+ ranges = <0x0 0x0 0x5 0x80000000 0x08000000 -+ 0x2 0x0 0x5 0x30000000 0x00010000 -+ 0x3 0x0 0x5 0x20000000 0x00010000>; -+ -+ nor@0,0 { -+ #address-cells = <1>; -+ #size-cells = <1>; -+ compatible = "cfi-flash"; -+ reg = <0x0 0x0 0x8000000>; -+ bank-width = <2>; -+ device-width = <1>; -+ -+ partition@0 { -+ /* SoC RCW, this location must not be altered */ -+ reg = <0x0 0x100000>; -+ label = "rcw (RO)"; -+ read-only; -+ }; -+ -+ partition@1 { -+ /* U-Boot image */ -+ reg = <0x100000 0x100000>; -+ label = "uboot"; -+ }; -+ -+ partition@2 { -+ /* U-Boot environment varialbes, 1MB */ -+ reg = <0x200000 0x100000>; -+ label = "uboot-env"; -+ env_size = <0x20000>; -+ }; -+ -+ partition@3 { -+ /* MC firmware, 4MB*/ -+ reg = <0x300000 0x400000>; -+ label = "mc_firmware"; -+ }; -+ -+ partition@4 { -+ /* MC DPL Blob, 1MB */ -+ reg = <0x700000 0x100000>; -+ label = "mc_dpl_blob"; -+ }; -+ -+ partition@5 { -+ /* MC DPC Blob, 1MB */ -+ reg = <0x800000 0x100000>; -+ label = "mc_dpc_blob"; -+ }; -+ -+ partition@6 { -+ /* AIOP FW, 4MB */ -+ reg = <0x900000 0x400000>; -+ label = "aiop_fw"; -+ }; -+ -+ partition@7 { -+ /* DebugServerFW, 2MB */ -+ reg = <0xd00000 0x200000>; -+ label = "DebugServer_fw"; -+ }; -+ }; -+ -+ nand@2,0 { -+ #address-cells = <1>; -+ #size-cells = <1>; -+ compatible = "fsl,ifc-nand"; -+ reg = <0x2 0x0 0x10000>; -+ }; -+ -+ cpld@3,0 { -+ reg = <0x3 0x0 0x10000>; -+ compatible = "fsl,ls2080a-rdb-qixis", "fsl,fpga-qixis"; -+ }; -+ -+}; -+ -+&i2c0 { -+ status = "okay"; -+ pca9547@75 { -+ compatible = "nxp,pca9547"; -+ reg = <0x75>; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ i2c-mux-never-disable; -+ i2c@1 { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x01>; -+ rtc@68 { -+ compatible = "dallas,ds3232"; -+ reg = <0x68>; -+ }; -+ }; -+ -+ i2c@3 { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x3>; -+ -+ adt7481@4c { -+ compatible = "adi,adt7461"; -+ reg = <0x4c>; -+ }; -+ }; -+ }; -+}; -+ -+&i2c1 { -+ status = "disabled"; -+}; -+ -+&i2c2 { -+ status = "disabled"; -+}; -+ -+&i2c3 { -+ status = "disabled"; -+}; -+ -+&dspi { -+ status = "okay"; -+ dflash0: n25q512a { -+ #address-cells = <1>; -+ #size-cells = <1>; -+ compatible = "st,m25p80"; -+ spi-max-frequency = <3000000>; -+ reg = <0>; -+ }; -+}; -+ -+&qspi { -+ status = "disabled"; -+}; -+ -+&sata0 { -+ status = "okay"; -+}; -+ -+&sata1 { -+ status = "okay"; -+}; -+ -+&usb0 { -+ status = "okay"; -+}; -+ -+&usb1 { -+ status = "okay"; -+}; -+ -+&emdio1 { -+ status = "disabled"; -+ /* CS4340 PHYs */ -+ mdio1_phy1: emdio1_phy@1 { -+ reg = <0x10>; -+ phy-connection-type = "xfi"; -+ }; -+ mdio1_phy2: emdio1_phy@2 { -+ reg = <0x11>; -+ phy-connection-type = "xfi"; -+ }; -+ mdio1_phy3: emdio1_phy@3 { -+ reg = <0x12>; -+ phy-connection-type = "xfi"; -+ }; -+ mdio1_phy4: emdio1_phy@4 { -+ reg = <0x13>; -+ phy-connection-type = "xfi"; -+ }; -+}; -+ -+&emdio2 { -+ /* AQR405 PHYs */ -+ mdio2_phy1: emdio2_phy@1 { -+ compatible = "ethernet-phy-ieee802.3-c45"; -+ interrupts = <0 1 0x4>; /* Level high type */ -+ reg = <0x0>; -+ phy-connection-type = "xfi"; -+ }; -+ mdio2_phy2: emdio2_phy@2 { -+ compatible = "ethernet-phy-ieee802.3-c45"; -+ interrupts = <0 2 0x4>; /* Level high type */ -+ reg = <0x1>; -+ phy-connection-type = "xfi"; -+ }; -+ mdio2_phy3: emdio2_phy@3 { -+ compatible = "ethernet-phy-ieee802.3-c45"; -+ interrupts = <0 4 0x4>; /* Level high type */ -+ reg = <0x2>; -+ phy-connection-type = "xfi"; -+ }; -+ mdio2_phy4: emdio2_phy@4 { -+ compatible = "ethernet-phy-ieee802.3-c45"; -+ interrupts = <0 5 0x4>; /* Level high type */ -+ reg = <0x3>; -+ phy-connection-type = "xfi"; -+ }; -+}; -+ -+/* Update DPMAC connections to external PHYs, under the assumption of -+ * SerDes 0x2a_0x41. This is currently the only SerDes supported on the board. -+ */ -+&dpmac1 { -+ phy-handle = <&mdio1_phy1>; -+}; -+&dpmac2 { -+ phy-handle = <&mdio1_phy2>; -+}; -+&dpmac3 { -+ phy-handle = <&mdio1_phy3>; -+}; -+&dpmac4 { -+ phy-handle = <&mdio1_phy4>; -+}; -+&dpmac5 { -+ phy-handle = <&mdio2_phy1>; -+}; -+&dpmac6 { -+ phy-handle = <&mdio2_phy2>; -+}; -+&dpmac7 { -+ phy-handle = <&mdio2_phy3>; -+}; -+&dpmac8 { -+ phy-handle = <&mdio2_phy4>; -+}; -diff --git a/arch/arm64/boot/dts/arm64-nxp-ls2088ardb-r1.dts b/arch/arm64/boot/dts/arm64-nxp-ls2088ardb-r1.dts -new file mode 100644 -index 0000000..0433cf2 ---- /dev/null -+++ b/arch/arm64/boot/dts/arm64-nxp-ls2088ardb-r1.dts -@@ -0,0 +1,256 @@ -+/* -+ * Device Tree file for NXP LS2088a RDB board -+ * -+ * Copyright (C) 2016, Freescale Semiconductor -+ * -+ * This file is licensed under the terms of the GNU General Public -+ * License version 2. This program is licensed "as is" without any -+ * warranty of any kind, whether express or implied. -+ */ -+ -+/dts-v1/; -+ -+#include "fsl-ls2088a.dtsi" -+ -+/ { -+ model = "arm64-nxp-ls2088ardb-r1"; -+ compatible = "fsl,ls2088a-rdb", "fsl,ls2088a"; -+}; -+ -+&esdhc { -+ status = "okay"; -+}; -+ -+&ifc { -+ status = "okay"; -+ #address-cells = <2>; -+ #size-cells = <1>; -+ ranges = <0x0 0x0 0x5 0x80000000 0x08000000 -+ 0x2 0x0 0x5 0x30000000 0x00010000 -+ 0x3 0x0 0x5 0x20000000 0x00010000>; -+ -+ nor@0,0 { -+ #address-cells = <1>; -+ #size-cells = <1>; -+ compatible = "cfi-flash"; -+ reg = <0x0 0x0 0x8000000>; -+ bank-width = <2>; -+ device-width = <1>; -+ -+ partition@0 { -+ /* SoC RCW, this location must not be altered */ -+ reg = <0x0 0x100000>; -+ label = "rcw (RO)"; -+ read-only; -+ }; -+ -+ partition@1 { -+ /* U-Boot image */ -+ reg = <0x100000 0x100000>; -+ label = "uboot"; -+ }; -+ -+ partition@2 { -+ /* U-Boot environment varialbes, 1MB */ -+ reg = <0x200000 0x100000>; -+ label = "uboot-env"; -+ env_size = <0x20000>; -+ }; -+ -+ partition@3 { -+ /* MC firmware, 4MB*/ -+ reg = <0x300000 0x400000>; -+ label = "mc_firmware"; -+ }; -+ -+ partition@4 { -+ /* MC DPL Blob, 1MB */ -+ reg = <0x700000 0x100000>; -+ label = "mc_dpl_blob"; -+ }; -+ -+ partition@5 { -+ /* MC DPC Blob, 1MB */ -+ reg = <0x800000 0x100000>; -+ label = "mc_dpc_blob"; -+ }; -+ -+ partition@6 { -+ /* AIOP FW, 4MB */ -+ reg = <0x900000 0x400000>; -+ label = "aiop_fw"; -+ }; -+ -+ partition@7 { -+ /* DebugServerFW, 2MB */ -+ reg = <0xd00000 0x200000>; -+ label = "DebugServer_fw"; -+ }; -+ }; -+ -+ nand@2,0 { -+ #address-cells = <1>; -+ #size-cells = <1>; -+ compatible = "fsl,ifc-nand"; -+ reg = <0x2 0x0 0x10000>; -+ }; -+ -+ cpld@3,0 { -+ reg = <0x3 0x0 0x10000>; -+ compatible = "fsl,ls2088a-rdb-qixis", "fsl,fpga-qixis"; -+ }; -+}; -+ -+&ftm0 { -+ status = "okay"; -+}; -+ -+&i2c0 { -+ status = "okay"; -+ pca9547@75 { -+ compatible = "nxp,pca9547"; -+ reg = <0x75>; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ i2c-mux-never-disable; -+ i2c@1 { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x01>; -+ rtc@68 { -+ compatible = "dallas,ds3232"; -+ reg = <0x68>; -+ }; -+ }; -+ -+ i2c@3 { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x3>; -+ -+ adt7481@4c { -+ compatible = "adi,adt7461"; -+ reg = <0x4c>; -+ }; -+ }; -+ }; -+}; -+ -+&i2c1 { -+ status = "disabled"; -+}; -+ -+&i2c2 { -+ status = "disabled"; -+}; -+ -+&i2c3 { -+ status = "disabled"; -+}; -+ -+&dspi { -+ status = "okay"; -+ dflash0: n25q512a { -+ #address-cells = <1>; -+ #size-cells = <1>; -+ compatible = "st,m25p80"; -+ spi-max-frequency = <3000000>; -+ reg = <0>; -+ }; -+}; -+ -+&qspi { -+ status = "disabled"; -+}; -+ -+&sata0 { -+ status = "okay"; -+}; -+ -+&sata1 { -+ status = "okay"; -+}; -+ -+&usb0 { -+ status = "okay"; -+}; -+ -+&usb1 { -+ status = "okay"; -+}; -+ -+&emdio1 { -+ /* CS4340 PHYs */ -+ mdio1_phy1: emdio1_phy@1 { -+ reg = <0x10>; -+ phy-connection-type = "xfi"; -+ }; -+ mdio1_phy2: emdio1_phy@2 { -+ reg = <0x11>; -+ phy-connection-type = "xfi"; -+ }; -+ mdio1_phy3: emdio1_phy@3 { -+ reg = <0x12>; -+ phy-connection-type = "xfi"; -+ }; -+ mdio1_phy4: emdio1_phy@4 { -+ reg = <0x13>; -+ phy-connection-type = "xfi"; -+ }; -+}; -+ -+&emdio2 { -+ /* AQR405 PHYs */ -+ mdio2_phy1: emdio2_phy@1 { -+ compatible = "ethernet-phy-ieee802.3-c45"; -+ interrupts = <0 1 0x4>; /* Level high type */ -+ reg = <0x0>; -+ phy-connection-type = "xfi"; -+ }; -+ mdio2_phy2: emdio2_phy@2 { -+ compatible = "ethernet-phy-ieee802.3-c45"; -+ interrupts = <0 2 0x4>; /* Level high type */ -+ reg = <0x1>; -+ phy-connection-type = "xfi"; -+ }; -+ mdio2_phy3: emdio2_phy@3 { -+ compatible = "ethernet-phy-ieee802.3-c45"; -+ interrupts = <0 4 0x4>; /* Level high type */ -+ reg = <0x2>; -+ phy-connection-type = "xfi"; -+ }; -+ mdio2_phy4: emdio2_phy@4 { -+ compatible = "ethernet-phy-ieee802.3-c45"; -+ interrupts = <0 5 0x4>; /* Level high type */ -+ reg = <0x3>; -+ phy-connection-type = "xfi"; -+ }; -+}; -+ -+/* Update DPMAC connections to external PHYs, under the assumption of -+ * SerDes 0x2a_0x41. This is currently the only SerDes supported on the board. -+ */ -+&dpmac1 { -+ phy-handle = <&mdio1_phy1>; -+}; -+&dpmac2 { -+ phy-handle = <&mdio1_phy2>; -+}; -+&dpmac3 { -+ phy-handle = <&mdio1_phy3>; -+}; -+&dpmac4 { -+ phy-handle = <&mdio1_phy4>; -+}; -+&dpmac5 { -+ phy-handle = <&mdio2_phy1>; -+}; -+&dpmac6 { -+ phy-handle = <&mdio2_phy2>; -+}; -+&dpmac7 { -+ phy-handle = <&mdio2_phy3>; -+}; -+&dpmac8 { -+ phy-handle = <&mdio2_phy4>; -+}; -diff --git a/arch/arm64/boot/dts/fsl-ls2080a.dtsi b/arch/arm64/boot/dts/fsl-ls2080a.dtsi -new file mode 100644 -index 0000000..5e53b04 ---- /dev/null -+++ b/arch/arm64/boot/dts/fsl-ls2080a.dtsi -@@ -0,0 +1,729 @@ -+/* -+ * Device Tree Include file for Freescale Layerscape-2080A family SoC. -+ * -+ * Copyright (C) 2014-2015, Freescale Semiconductor -+ * -+ * Bhupesh Sharma -+ * Harninder Rai -+ * -+ * This file is licensed under the terms of the GNU General Public -+ * License version 2. This program is licensed "as is" without any -+ * warranty of any kind, whether express or implied. -+ */ -+ -+#include -+ -+/memreserve/ 0x80000000 0x00010000; -+ -+/ { -+ compatible = "fsl,ls2080a"; -+ interrupt-parent = <&gic>; -+ #address-cells = <2>; -+ #size-cells = <2>; -+ -+ cpus { -+ #address-cells = <2>; -+ #size-cells = <0>; -+ -+ /* We have 4 clusters having 2 Cortex-A57 cores each */ -+ cpu0: cpu@0 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a57"; -+ reg = <0x0 0x0>; -+ clocks = <&clockgen 1 0>; -+ #cooling-cells = <2>; -+ }; -+ -+ cpu1: cpu@1 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a57"; -+ reg = <0x0 0x1>; -+ clocks = <&clockgen 1 0>; -+ }; -+ -+ cpu2: cpu@100 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a57"; -+ reg = <0x0 0x100>; -+ clocks = <&clockgen 1 1>; -+ #cooling-cells = <2>; -+ }; -+ -+ cpu3: cpu@101 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a57"; -+ reg = <0x0 0x101>; -+ clocks = <&clockgen 1 1>; -+ }; -+ -+ cpu4: cpu@200 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a57"; -+ reg = <0x0 0x200>; -+ clocks = <&clockgen 1 2>; -+ #cooling-cells = <2>; -+ }; -+ -+ cpu5: cpu@201 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a57"; -+ reg = <0x0 0x201>; -+ clocks = <&clockgen 1 2>; -+ }; -+ -+ cpu6: cpu@300 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a57"; -+ reg = <0x0 0x300>; -+ clocks = <&clockgen 1 3>; -+ #cooling-cells = <2>; -+ }; -+ -+ cpu7: cpu@301 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a57"; -+ reg = <0x0 0x301>; -+ clocks = <&clockgen 1 3>; -+ }; -+ }; -+ -+ pmu { -+ compatible = "arm,armv8-pmuv3"; -+ interrupts = <1 7 0x8>; /* PMU PPI, Level low type */ -+ }; -+ -+ gic: interrupt-controller@6000000 { -+ compatible = "arm,gic-v3"; -+ reg = <0x0 0x06000000 0 0x10000>, /* GIC Dist */ -+ <0x0 0x06100000 0 0x100000>, /* GICR (RD_base + SGI_base) */ -+ <0x0 0x0c0c0000 0 0x2000>, /* GICC */ -+ <0x0 0x0c0d0000 0 0x1000>, /* GICH */ -+ <0x0 0x0c0e0000 0 0x20000>; /* GICV */ -+ #interrupt-cells = <3>; -+ #address-cells = <2>; -+ #size-cells = <2>; -+ ranges; -+ interrupt-controller; -+ interrupts = <1 9 0x4>; -+ -+ its: gic-its@6020000 { -+ compatible = "arm,gic-v3-its"; -+ msi-controller; -+ reg = <0x0 0x6020000 0 0x20000>; -+ }; -+ }; -+ -+ sysclk: sysclk { -+ compatible = "fixed-clock"; -+ #clock-cells = <0>; -+ clock-frequency = <100000000>; -+ clock-output-names = "sysclk"; -+ }; -+ -+ clockgen: clocking@1300000 { -+ compatible = "fsl,ls2080a-clockgen"; -+ reg = <0 0x1300000 0 0xa0000>; -+ #clock-cells = <2>; -+ clocks = <&sysclk>; -+ }; -+ -+ tmu: tmu@1f80000 { -+ compatible = "fsl,qoriq-tmu", "fsl,ls2080a-tmu"; -+ reg = <0x0 0x1f80000 0x0 0x10000>; -+ interrupts = <0 23 0x4>; -+ fsl,tmu-range = <0xb0000 0x9002a 0x6004c 0x30062>; -+ fsl,tmu-calibration = <0x00000000 0x00000026 -+ 0x00000001 0x0000002d -+ 0x00000002 0x00000032 -+ 0x00000003 0x00000039 -+ 0x00000004 0x0000003f -+ 0x00000005 0x00000046 -+ 0x00000006 0x0000004d -+ 0x00000007 0x00000054 -+ 0x00000008 0x0000005a -+ 0x00000009 0x00000061 -+ 0x0000000a 0x0000006a -+ 0x0000000b 0x00000071 -+ -+ 0x00010000 0x00000025 -+ 0x00010001 0x0000002c -+ 0x00010002 0x00000035 -+ 0x00010003 0x0000003d -+ 0x00010004 0x00000045 -+ 0x00010005 0x0000004e -+ 0x00010006 0x00000057 -+ 0x00010007 0x00000061 -+ 0x00010008 0x0000006b -+ 0x00010009 0x00000076 -+ -+ 0x00020000 0x00000029 -+ 0x00020001 0x00000033 -+ 0x00020002 0x0000003d -+ 0x00020003 0x00000049 -+ 0x00020004 0x00000056 -+ 0x00020005 0x00000061 -+ 0x00020006 0x0000006d -+ -+ 0x00030000 0x00000021 -+ 0x00030001 0x0000002a -+ 0x00030002 0x0000003c -+ 0x00030003 0x0000004e>; -+ little-endian; -+ #thermal-sensor-cells = <1>; -+ }; -+ -+ thermal-zones { -+ cpu_thermal: cpu-thermal { -+ polling-delay-passive = <1000>; -+ polling-delay = <5000>; -+ -+ thermal-sensors = <&tmu 4>; -+ -+ trips { -+ cpu_alert: cpu-alert { -+ temperature = <75000>; -+ hysteresis = <2000>; -+ type = "passive"; -+ }; -+ cpu_crit: cpu-crit { -+ temperature = <85000>; -+ hysteresis = <2000>; -+ type = "critical"; -+ }; -+ }; -+ -+ cooling-maps { -+ map0 { -+ trip = <&cpu_alert>; -+ cooling-device = -+ <&cpu0 THERMAL_NO_LIMIT -+ THERMAL_NO_LIMIT>; -+ }; -+ map1 { -+ trip = <&cpu_alert>; -+ cooling-device = -+ <&cpu2 THERMAL_NO_LIMIT -+ THERMAL_NO_LIMIT>; -+ }; -+ map2 { -+ trip = <&cpu_alert>; -+ cooling-device = -+ <&cpu4 THERMAL_NO_LIMIT -+ THERMAL_NO_LIMIT>; -+ }; -+ map3 { -+ trip = <&cpu_alert>; -+ cooling-device = -+ <&cpu6 THERMAL_NO_LIMIT -+ THERMAL_NO_LIMIT>; -+ }; -+ }; -+ }; -+ }; -+ -+ serial0: serial@21c0500 { -+ device_type = "serial"; -+ compatible = "fsl,ns16550", "ns16550a"; -+ reg = <0x0 0x21c0500 0x0 0x100>; -+ clocks = <&clockgen 4 3>; -+ interrupts = <0 32 0x4>; /* Level high type */ -+ }; -+ -+ serial1: serial@21c0600 { -+ device_type = "serial"; -+ compatible = "fsl,ns16550", "ns16550a"; -+ reg = <0x0 0x21c0600 0x0 0x100>; -+ clocks = <&clockgen 4 3>; -+ interrupts = <0 32 0x4>; /* Level high type */ -+ }; -+ -+ gpio0: gpio@2300000 { -+ compatible = "fsl,qoriq-gpio"; -+ reg = <0x0 0x2300000 0x0 0x10000>; -+ interrupts = <0 36 0x4>; /* Level high type */ -+ gpio-controller; -+ little-endian; -+ #gpio-cells = <2>; -+ interrupt-controller; -+ #interrupt-cells = <2>; -+ }; -+ -+ gpio1: gpio@2310000 { -+ compatible = "fsl,qoriq-gpio"; -+ reg = <0x0 0x2310000 0x0 0x10000>; -+ interrupts = <0 36 0x4>; /* Level high type */ -+ gpio-controller; -+ little-endian; -+ #gpio-cells = <2>; -+ interrupt-controller; -+ #interrupt-cells = <2>; -+ }; -+ -+ gpio2: gpio@2320000 { -+ compatible = "fsl,qoriq-gpio"; -+ reg = <0x0 0x2320000 0x0 0x10000>; -+ interrupts = <0 37 0x4>; /* Level high type */ -+ gpio-controller; -+ little-endian; -+ #gpio-cells = <2>; -+ interrupt-controller; -+ #interrupt-cells = <2>; -+ }; -+ -+ gpio3: gpio@2330000 { -+ compatible = "fsl,qoriq-gpio"; -+ reg = <0x0 0x2330000 0x0 0x10000>; -+ interrupts = <0 37 0x4>; /* Level high type */ -+ gpio-controller; -+ little-endian; -+ #gpio-cells = <2>; -+ interrupt-controller; -+ #interrupt-cells = <2>; -+ }; -+ -+ /* TODO: WRIOP (CCSR?) */ -+ emdio1: mdio@0x8B96000 { /* WRIOP0: 0x8B8_0000, E-MDIO1: 0x1_6000 */ -+ compatible = "fsl,fman-memac-mdio"; -+ reg = <0x0 0x8B96000 0x0 0x1000>; -+ device_type = "mdio"; /* TODO: is this necessary? */ -+ little-endian; /* force the driver in LE mode */ -+ -+ /* Not necessary on the QDS, but needed on the RDB */ -+ #address-cells = <1>; -+ #size-cells = <0>; -+ }; -+ -+ emdio2: mdio@0x8B97000 { /* WRIOP0: 0x8B8_0000, E-MDIO2: 0x1_7000 */ -+ compatible = "fsl,fman-memac-mdio"; -+ reg = <0x0 0x8B97000 0x0 0x1000>; -+ device_type = "mdio"; /* TODO: is this necessary? */ -+ little-endian; /* force the driver in LE mode */ -+ -+ #address-cells = <1>; -+ #size-cells = <0>; -+ }; -+ -+ ifc: ifc@2240000 { -+ compatible = "fsl,ifc", "simple-bus"; -+ reg = <0x0 0x2240000 0x0 0x20000>; -+ interrupts = <0 21 0x4>; /* Level high type */ -+ little-endian; -+ #address-cells = <2>; -+ #size-cells = <1>; -+ -+ ranges = <0 0 0x5 0x80000000 0x08000000 -+ 2 0 0x5 0x30000000 0x00010000 -+ 3 0 0x5 0x20000000 0x00010000>; -+ }; -+ -+ esdhc: esdhc@2140000 { -+ compatible = "fsl,ls2080a-esdhc", "fsl,esdhc"; -+ reg = <0x0 0x2140000 0x0 0x10000>; -+ interrupts = <0 28 0x4>; /* Level high type */ -+ clock-frequency = <0>; -+ voltage-ranges = <1800 1800 3300 3300>; -+ sdhci,auto-cmd12; -+ little-endian; -+ bus-width = <4>; -+ }; -+ -+ ftm0: ftm0@2800000 { -+ compatible = "fsl,ftm-alarm"; -+ reg = <0x0 0x2800000 0x0 0x10000>; -+ interrupts = <0 44 4>; -+ }; -+ -+ reset: reset@1E60000 { -+ compatible = "fsl,ls-reset"; -+ reg = <0x0 0x1E60000 0x0 0x10000>; -+ }; -+ -+ dspi: dspi@2100000 { -+ compatible = "fsl,ls2085a-dspi", "fsl,ls2080a-dspi"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x2100000 0x0 0x10000>; -+ interrupts = <0 26 0x4>; /* Level high type */ -+ clocks = <&clockgen 4 3>; -+ clock-names = "dspi"; -+ spi-num-chipselects = <5>; -+ bus-num = <0>; -+ }; -+ -+ i2c0: i2c@2000000 { -+ compatible = "fsl,vf610-i2c"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x2000000 0x0 0x10000>; -+ interrupts = <0 34 0x4>; /* Level high type */ -+ clock-names = "i2c"; -+ clocks = <&clockgen 4 3>; -+ }; -+ -+ i2c1: i2c@2010000 { -+ compatible = "fsl,vf610-i2c"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x2010000 0x0 0x10000>; -+ interrupts = <0 34 0x4>; /* Level high type */ -+ clock-names = "i2c"; -+ clocks = <&clockgen 4 3>; -+ }; -+ -+ i2c2: i2c@2020000 { -+ compatible = "fsl,vf610-i2c"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x2020000 0x0 0x10000>; -+ interrupts = <0 35 0x4>; /* Level high type */ -+ clock-names = "i2c"; -+ clocks = <&clockgen 4 3>; -+ }; -+ -+ i2c3: i2c@2030000 { -+ compatible = "fsl,vf610-i2c"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x2030000 0x0 0x10000>; -+ interrupts = <0 35 0x4>; /* Level high type */ -+ clock-names = "i2c"; -+ clocks = <&clockgen 4 3>; -+ }; -+ -+ qspi: quadspi@20c0000 { -+ compatible = "fsl,ls2080a-qspi"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x20c0000 0x0 0x10000>, -+ <0x0 0x20000000 0x0 0x10000000>; -+ reg-names = "QuadSPI", "QuadSPI-memory"; -+ interrupts = <0 25 0x4>; /* Level high type */ -+ clocks = <&clockgen 4 3>, <&clockgen 4 3>; -+ clock-names = "qspi_en", "qspi"; -+ }; -+ -+ pcie@3400000 { -+ compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie", -+ "snps,dw-pcie"; -+ reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */ -+ 0x10 0x00000000 0x0 0x00001000>; /* configuration space */ -+ reg-names = "regs", "config"; -+ interrupts = <0 108 0x4>; /* Level high type */ -+ interrupt-names = "intr"; -+ #address-cells = <3>; -+ #size-cells = <2>; -+ device_type = "pci"; -+ num-lanes = <4>; -+ bus-range = <0x0 0xff>; -+ ranges = <0x81000000 0x0 0x00000000 0x10 0x00010000 0x0 0x00010000 /* downstream I/O */ -+ 0x82000000 0x0 0x40000000 0x10 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ -+ msi-parent = <&its>; -+ #interrupt-cells = <1>; -+ interrupt-map-mask = <0 0 0 7>; -+ interrupt-map = <0000 0 0 1 &gic 0 0 0 109 4>, -+ <0000 0 0 2 &gic 0 0 0 110 4>, -+ <0000 0 0 3 &gic 0 0 0 111 4>, -+ <0000 0 0 4 &gic 0 0 0 112 4>; -+ }; -+ -+ pcie@3500000 { -+ compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie", -+ "snps,dw-pcie"; -+ reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */ -+ 0x12 0x00000000 0x0 0x00001000>; /* configuration space */ -+ reg-names = "regs", "config"; -+ interrupts = <0 113 0x4>; /* Level high type */ -+ interrupt-names = "intr"; -+ #address-cells = <3>; -+ #size-cells = <2>; -+ device_type = "pci"; -+ num-lanes = <4>; -+ bus-range = <0x0 0xff>; -+ ranges = <0x81000000 0x0 0x00000000 0x12 0x00010000 0x0 0x00010000 /* downstream I/O */ -+ 0x82000000 0x0 0x40000000 0x12 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ -+ msi-parent = <&its>; -+ #interrupt-cells = <1>; -+ interrupt-map-mask = <0 0 0 7>; -+ interrupt-map = <0000 0 0 1 &gic 0 0 0 114 4>, -+ <0000 0 0 2 &gic 0 0 0 115 4>, -+ <0000 0 0 3 &gic 0 0 0 116 4>, -+ <0000 0 0 4 &gic 0 0 0 117 4>; -+ }; -+ -+ pcie@3600000 { -+ compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie", -+ "snps,dw-pcie"; -+ reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */ -+ 0x14 0x00000000 0x0 0x00001000>; /* configuration space */ -+ reg-names = "regs", "config"; -+ interrupts = <0 118 0x4>; /* Level high type */ -+ interrupt-names = "intr"; -+ #address-cells = <3>; -+ #size-cells = <2>; -+ device_type = "pci"; -+ num-lanes = <8>; -+ bus-range = <0x0 0xff>; -+ ranges = <0x81000000 0x0 0x00000000 0x14 0x00010000 0x0 0x00010000 /* downstream I/O */ -+ 0x82000000 0x0 0x40000000 0x14 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ -+ msi-parent = <&its>; -+ #interrupt-cells = <1>; -+ interrupt-map-mask = <0 0 0 7>; -+ interrupt-map = <0000 0 0 1 &gic 0 0 0 119 4>, -+ <0000 0 0 2 &gic 0 0 0 120 4>, -+ <0000 0 0 3 &gic 0 0 0 121 4>, -+ <0000 0 0 4 &gic 0 0 0 122 4>; -+ }; -+ -+ pcie@3700000 { -+ compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie", -+ "snps,dw-pcie"; -+ reg = <0x00 0x03700000 0x0 0x00100000 /* controller registers */ -+ 0x16 0x00000000 0x0 0x00001000>; /* configuration space */ -+ reg-names = "regs", "config"; -+ interrupts = <0 123 0x4>; /* Level high type */ -+ interrupt-names = "intr"; -+ #address-cells = <3>; -+ #size-cells = <2>; -+ device_type = "pci"; -+ num-lanes = <4>; -+ bus-range = <0x0 0xff>; -+ ranges = <0x81000000 0x0 0x00000000 0x16 0x00010000 0x0 0x00010000 /* downstream I/O */ -+ 0x82000000 0x0 0x40000000 0x16 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ -+ msi-parent = <&its>; -+ #interrupt-cells = <1>; -+ interrupt-map-mask = <0 0 0 7>; -+ interrupt-map = <0000 0 0 1 &gic 0 0 0 124 4>, -+ <0000 0 0 2 &gic 0 0 0 125 4>, -+ <0000 0 0 3 &gic 0 0 0 126 4>, -+ <0000 0 0 4 &gic 0 0 0 127 4>; -+ }; -+ -+ sata0: sata@3200000 { -+ compatible = "fsl,ls2080a-ahci", "fsl,ls1021a-ahci"; -+ reg = <0x0 0x3200000 0x0 0x10000>; -+ interrupts = <0 133 0x4>; /* Level high type */ -+ clocks = <&clockgen 4 3>; -+ }; -+ -+ sata1: sata@3210000 { -+ compatible = "fsl,ls2080a-ahci", "fsl,ls1021a-ahci"; -+ reg = <0x0 0x3210000 0x0 0x10000>; -+ interrupts = <0 136 0x4>; /* Level high type */ -+ clocks = <&clockgen 4 3>; -+ }; -+ -+ usb0: usb3@3100000 { -+ compatible = "snps,dwc3"; -+ reg = <0x0 0x3100000 0x0 0x10000>; -+ interrupts = <0 80 0x4>; /* Level high type */ -+ dr_mode = "host"; -+ configure-gfladj; -+ }; -+ -+ usb1: usb3@3110000 { -+ compatible = "snps,dwc3"; -+ reg = <0x0 0x3110000 0x0 0x10000>; -+ interrupts = <0 81 0x4>; /* Level high type */ -+ dr_mode = "host"; -+ configure-gfladj; -+ }; -+ -+ smmu: iommu@5000000 { -+ compatible = "arm,mmu-500"; -+ reg = <0 0x5000000 0 0x800000>; -+ #global-interrupts = <12>; -+ interrupts = <0 13 4>, /* global secure fault */ -+ <0 14 4>, /* combined secure interrupt */ -+ <0 15 4>, /* global non-secure fault */ -+ <0 16 4>, /* combined non-secure interrupt */ -+ /* performance counter interrupts 0-7 */ -+ <0 211 4>, -+ <0 212 4>, -+ <0 213 4>, -+ <0 214 4>, -+ <0 215 4>, -+ <0 216 4>, -+ <0 217 4>, -+ <0 218 4>, -+ /* per context interrupt, 64 interrupts */ -+ <0 146 4>, -+ <0 147 4>, -+ <0 148 4>, -+ <0 149 4>, -+ <0 150 4>, -+ <0 151 4>, -+ <0 152 4>, -+ <0 153 4>, -+ <0 154 4>, -+ <0 155 4>, -+ <0 156 4>, -+ <0 157 4>, -+ <0 158 4>, -+ <0 159 4>, -+ <0 160 4>, -+ <0 161 4>, -+ <0 162 4>, -+ <0 163 4>, -+ <0 164 4>, -+ <0 165 4>, -+ <0 166 4>, -+ <0 167 4>, -+ <0 168 4>, -+ <0 169 4>, -+ <0 170 4>, -+ <0 171 4>, -+ <0 172 4>, -+ <0 173 4>, -+ <0 174 4>, -+ <0 175 4>, -+ <0 176 4>, -+ <0 177 4>, -+ <0 178 4>, -+ <0 179 4>, -+ <0 180 4>, -+ <0 181 4>, -+ <0 182 4>, -+ <0 183 4>, -+ <0 184 4>, -+ <0 185 4>, -+ <0 186 4>, -+ <0 187 4>, -+ <0 188 4>, -+ <0 189 4>, -+ <0 190 4>, -+ <0 191 4>, -+ <0 192 4>, -+ <0 193 4>, -+ <0 194 4>, -+ <0 195 4>, -+ <0 196 4>, -+ <0 197 4>, -+ <0 198 4>, -+ <0 199 4>, -+ <0 200 4>, -+ <0 201 4>, -+ <0 202 4>, -+ <0 203 4>, -+ <0 204 4>, -+ <0 205 4>, -+ <0 206 4>, -+ <0 207 4>, -+ <0 208 4>, -+ <0 209 4>; -+ mmu-masters = <&fsl_mc 0x300 0>; -+ }; -+ -+ timer { -+ compatible = "arm,armv8-timer"; -+ interrupts = <1 13 0x1>, /* Physical Secure PPI, edge triggered */ -+ <1 14 0x1>, /* Physical Non-Secure PPI, edge triggered */ -+ <1 11 0x1>, /* Virtual PPI, edge triggered */ -+ <1 10 0x1>; /* Hypervisor PPI, edge triggered */ -+ arm,reread-timer; -+ }; -+ -+ fsl_mc: fsl-mc@80c000000 { -+ compatible = "fsl,qoriq-mc"; -+ #stream-id-cells = <2>; -+ reg = <0x00000008 0x0c000000 0 0x40>, /* MC portal base */ -+ <0x00000000 0x08340000 0 0x40000>; /* MC control reg */ -+ msi-parent = <&its>; -+ #address-cells = <3>; -+ #size-cells = <1>; -+ -+ /* -+ * Region type 0x0 - MC portals -+ * Region type 0x1 - QBMAN portals -+ */ -+ ranges = <0x0 0x0 0x0 0x8 0x0c000000 0x4000000 -+ 0x1 0x0 0x0 0x8 0x18000000 0x8000000>; -+ -+ /* -+ * Define the maximum number of MACs present on the SoC. -+ * They won't necessarily be all probed, since the -+ * Data Path Layout file and the MC firmware can put fewer -+ * actual DPMAC objects on the MC bus. -+ */ -+ dpmacs { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ -+ dpmac1: dpmac@1 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <1>; -+ }; -+ dpmac2: dpmac@2 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <2>; -+ }; -+ dpmac3: dpmac@3 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <3>; -+ }; -+ dpmac4: dpmac@4 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <4>; -+ }; -+ dpmac5: dpmac@5 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <5>; -+ }; -+ dpmac6: dpmac@6 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <6>; -+ }; -+ dpmac7: dpmac@7 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <7>; -+ }; -+ dpmac8: dpmac@8 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <8>; -+ }; -+ dpmac9: dpmac@9 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <9>; -+ }; -+ dpmac10: dpmac@10 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <0xa>; -+ }; -+ dpmac11: dpmac@11 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <0xb>; -+ }; -+ dpmac12: dpmac@12 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <0xc>; -+ }; -+ dpmac13: dpmac@13 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <0xd>; -+ }; -+ dpmac14: dpmac@14 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <0xe>; -+ }; -+ dpmac15: dpmac@15 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <0xf>; -+ }; -+ dpmac16: dpmac@16 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <0x10>; -+ }; -+ }; -+ }; -+ -+ ccn@4000000 { -+ compatible = "arm,ccn-504"; -+ reg = <0x0 0x04000000 0x0 0x01000000>; -+ interrupts = <0 12 4>; -+ }; -+ -+ memory@80000000 { -+ device_type = "memory"; -+ reg = <0x00000000 0x80000000 0 0x80000000>; -+ /* DRAM space 1 - 2 GB DRAM */ -+ }; -+}; -diff --git a/arch/arm64/boot/dts/fsl-ls2088a.dtsi b/arch/arm64/boot/dts/fsl-ls2088a.dtsi -new file mode 100644 -index 0000000..2e3529a ---- /dev/null -+++ b/arch/arm64/boot/dts/fsl-ls2088a.dtsi -@@ -0,0 +1,833 @@ -+/* -+ * Device Tree Include file for Freescale Layerscape-2088A family SoC. -+ * -+ * Copyright (C) 2016, Freescale Semiconductor -+ * -+ * Abhimanyu Saini -+ * -+ * This file is dual-licensed: you can use it either under the terms -+ * of the GPLv2 or the X11 license, at your option. Note that this dual -+ * licensing only applies to this file, and not this project as a -+ * whole. -+ * -+ * a) This library is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License as -+ * published by the Free Software Foundation; either version 2 of the -+ * License, or (at your option) any later version. -+ * -+ * This library is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * Or, alternatively, -+ * -+ * b) Permission is hereby granted, free of charge, to any person -+ * obtaining a copy of this software and associated documentation -+ * files (the "Software"), to deal in the Software without -+ * restriction, including without limitation the rights to use, -+ * copy, modify, merge, publish, distribute, sublicense, and/or -+ * sell copies of the Software, and to permit persons to whom the -+ * Software is furnished to do so, subject to the following -+ * conditions: -+ * -+ * The above copyright notice and this permission notice shall be -+ * included in all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -+ * OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#include "thermal.h" -+ -+/memreserve/ 0x80000000 0x00010000; -+ -+/ { -+ compatible = "fsl,ls2088a"; -+ interrupt-parent = <&gic>; -+ #address-cells = <2>; -+ #size-cells = <2>; -+ -+ cpus { -+ #address-cells = <2>; -+ #size-cells = <0>; -+ -+ cpu0: cpu@0 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a72"; -+ reg = <0x0 0x0>; -+ clocks = <&clockgen 1 0>; -+ #cooling-cells = <2>; -+ }; -+ -+ cpu1: cpu@1 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a72"; -+ reg = <0x0 0x1>; -+ clocks = <&clockgen 1 0>; -+ }; -+ -+ cpu2: cpu@100 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a72"; -+ reg = <0x0 0x100>; -+ clocks = <&clockgen 1 1>; -+ #cooling-cells = <2>; -+ }; -+ -+ cpu3: cpu@101 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a72"; -+ reg = <0x0 0x101>; -+ clocks = <&clockgen 1 1>; -+ }; -+ -+ cpu4: cpu@200 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a72"; -+ reg = <0x0 0x200>; -+ clocks = <&clockgen 1 2>; -+ #cooling-cells = <2>; -+ }; -+ -+ cpu5: cpu@201 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a72"; -+ reg = <0x0 0x201>; -+ clocks = <&clockgen 1 2>; -+ }; -+ -+ cpu6: cpu@300 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a72"; -+ reg = <0x0 0x300>; -+ clocks = <&clockgen 1 3>; -+ #cooling-cells = <2>; -+ }; -+ -+ cpu7: cpu@301 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a72"; -+ reg = <0x0 0x301>; -+ clocks = <&clockgen 1 3>; -+ }; -+ }; -+ -+ pmu { -+ compatible = "arm,armv8-pmuv3"; -+ interrupts = <1 7 0x8>; /* PMU PPI, Level low type */ -+ }; -+ -+ gic: interrupt-controller@6000000 { -+ compatible = "arm,gic-v3"; -+ reg = <0x0 0x06000000 0 0x10000>, /* GIC Dist */ -+ <0x0 0x06100000 0 0x100000>, /* GICR (RD_base + SGI_base) */ -+ <0x0 0x0c0c0000 0 0x2000>, /* GICC */ -+ <0x0 0x0c0d0000 0 0x1000>, /* GICH */ -+ <0x0 0x0c0e0000 0 0x20000>; /* GICV */ -+ #interrupt-cells = <3>; -+ #address-cells = <2>; -+ #size-cells = <2>; -+ ranges; -+ interrupt-controller; -+ interrupts = <1 9 0x4>; -+ -+ its: gic-its@6020000 { -+ compatible = "arm,gic-v3-its"; -+ msi-controller; -+ reg = <0x0 0x6020000 0 0x20000>; -+ }; -+ }; -+ -+ sysclk: sysclk { -+ compatible = "fixed-clock"; -+ #clock-cells = <0>; -+ clock-frequency = <100000000>; -+ clock-output-names = "sysclk"; -+ }; -+ -+ clockgen: clocking@1300000 { -+ compatible = "fsl,ls2088a-clockgen"; -+ reg = <0 0x1300000 0 0xa0000>; -+ #clock-cells = <2>; -+ clocks = <&sysclk>; -+ }; -+ -+ tmu: tmu@1f80000 { -+ compatible = "fsl,qoriq-tmu", "fsl,ls2080a-tmu", "fsl,ls2088a-tmu"; -+ reg = <0x0 0x1f80000 0x0 0x10000>; -+ interrupts = <0 23 0x4>; -+ fsl,tmu-range = <0xb0000 0x9002a 0x6004c 0x30062>; -+ fsl,tmu-calibration = <0x00000000 0x00000026 -+ 0x00000001 0x0000002d -+ 0x00000002 0x00000032 -+ 0x00000003 0x00000039 -+ 0x00000004 0x0000003f -+ 0x00000005 0x00000046 -+ 0x00000006 0x0000004d -+ 0x00000007 0x00000054 -+ 0x00000008 0x0000005a -+ 0x00000009 0x00000061 -+ 0x0000000a 0x0000006a -+ 0x0000000b 0x00000071 -+ -+ 0x00010000 0x00000025 -+ 0x00010001 0x0000002c -+ 0x00010002 0x00000035 -+ 0x00010003 0x0000003d -+ 0x00010004 0x00000045 -+ 0x00010005 0x0000004e -+ 0x00010006 0x00000057 -+ 0x00010007 0x00000061 -+ 0x00010008 0x0000006b -+ 0x00010009 0x00000076 -+ -+ 0x00020000 0x00000029 -+ 0x00020001 0x00000033 -+ 0x00020002 0x0000003d -+ 0x00020003 0x00000049 -+ 0x00020004 0x00000056 -+ 0x00020005 0x00000061 -+ 0x00020006 0x0000006d -+ -+ 0x00030000 0x00000021 -+ 0x00030001 0x0000002a -+ 0x00030002 0x0000003c -+ 0x00030003 0x0000004e>; -+ little-endian; -+ #thermal-sensor-cells = <1>; -+ }; -+ -+ thermal-zones { -+ cpu_thermal: cpu-thermal { -+ polling-delay-passive = <1000>; -+ polling-delay = <5000>; -+ -+ thermal-sensors = <&tmu 4>; -+ -+ trips { -+ cpu_alert: cpu-alert { -+ temperature = <75000>; -+ hysteresis = <2000>; -+ type = "passive"; -+ }; -+ cpu_crit: cpu-crit { -+ temperature = <85000>; -+ hysteresis = <2000>; -+ type = "critical"; -+ }; -+ }; -+ -+ cooling-maps { -+ map0 { -+ trip = <&cpu_alert>; -+ cooling-device = -+ <&cpu0 THERMAL_NO_LIMIT -+ THERMAL_NO_LIMIT>; -+ }; -+ map1 { -+ trip = <&cpu_alert>; -+ cooling-device = -+ <&cpu2 THERMAL_NO_LIMIT -+ THERMAL_NO_LIMIT>; -+ }; -+ map2 { -+ trip = <&cpu_alert>; -+ cooling-device = -+ <&cpu4 THERMAL_NO_LIMIT -+ THERMAL_NO_LIMIT>; -+ }; -+ map3 { -+ trip = <&cpu_alert>; -+ cooling-device = -+ <&cpu6 THERMAL_NO_LIMIT -+ THERMAL_NO_LIMIT>; -+ }; -+ }; -+ }; -+ }; -+ -+ serial0: serial@21c0500 { -+ device_type = "serial"; -+ compatible = "fsl,ns16550", "ns16550a"; -+ reg = <0x0 0x21c0500 0x0 0x100>; -+ clocks = <&clockgen 4 3>; -+ interrupts = <0 32 0x4>; /* Level high type */ -+ }; -+ -+ serial1: serial@21c0600 { -+ device_type = "serial"; -+ compatible = "fsl,ns16550", "ns16550a"; -+ reg = <0x0 0x21c0600 0x0 0x100>; -+ clocks = <&clockgen 4 3>; -+ interrupts = <0 32 0x4>; /* Level high type */ -+ }; -+ cluster1_core0_watchdog: wdt@c000000 { -+ compatible = "arm,sp805-wdt", "arm,primecell"; -+ reg = <0x0 0xc000000 0x0 0x1000>; -+ clocks = <&clockgen 4 3>, <&clockgen 4 3>; -+ clock-names = "apb_pclk", "wdog_clk"; -+ }; -+ -+ cluster1_core1_watchdog: wdt@c010000 { -+ compatible = "arm,sp805-wdt", "arm,primecell"; -+ reg = <0x0 0xc010000 0x0 0x1000>; -+ clocks = <&clockgen 4 3>, <&clockgen 4 3>; -+ clock-names = "apb_pclk", "wdog_clk"; -+ }; -+ -+ cluster2_core0_watchdog: wdt@c100000 { -+ compatible = "arm,sp805-wdt", "arm,primecell"; -+ reg = <0x0 0xc100000 0x0 0x1000>; -+ clocks = <&clockgen 4 3>, <&clockgen 4 3>; -+ clock-names = "apb_pclk", "wdog_clk"; -+ }; -+ -+ cluster2_core1_watchdog: wdt@c110000 { -+ compatible = "arm,sp805-wdt", "arm,primecell"; -+ reg = <0x0 0xc110000 0x0 0x1000>; -+ clocks = <&clockgen 4 3>, <&clockgen 4 3>; -+ clock-names = "apb_pclk", "wdog_clk"; -+ }; -+ -+ cluster3_core0_watchdog: wdt@c200000 { -+ compatible = "arm,sp805-wdt", "arm,primecell"; -+ reg = <0x0 0xc200000 0x0 0x1000>; -+ clocks = <&clockgen 4 3>, <&clockgen 4 3>; -+ clock-names = "apb_pclk", "wdog_clk"; -+ }; -+ -+ cluster3_core1_watchdog: wdt@c210000 { -+ compatible = "arm,sp805-wdt", "arm,primecell"; -+ reg = <0x0 0xc210000 0x0 0x1000>; -+ clocks = <&clockgen 4 3>, <&clockgen 4 3>; -+ clock-names = "apb_pclk", "wdog_clk"; -+ }; -+ -+ cluster4_core0_watchdog: wdt@c300000 { -+ compatible = "arm,sp805-wdt", "arm,primecell"; -+ reg = <0x0 0xc300000 0x0 0x1000>; -+ clocks = <&clockgen 4 3>, <&clockgen 4 3>; -+ clock-names = "apb_pclk", "wdog_clk"; -+ }; -+ -+ cluster4_core1_watchdog: wdt@c310000 { -+ compatible = "arm,sp805-wdt", "arm,primecell"; -+ reg = <0x0 0xc310000 0x0 0x1000>; -+ clocks = <&clockgen 4 3>, <&clockgen 4 3>; -+ clock-names = "apb_pclk", "wdog_clk"; -+ }; -+ -+ gpio0: gpio@2300000 { -+ compatible = "fsl,qoriq-gpio"; -+ reg = <0x0 0x2300000 0x0 0x10000>; -+ interrupts = <0 36 0x4>; /* Level high type */ -+ gpio-controller; -+ little-endian; -+ #gpio-cells = <2>; -+ interrupt-controller; -+ #interrupt-cells = <2>; -+ }; -+ -+ gpio1: gpio@2310000 { -+ compatible = "fsl,qoriq-gpio"; -+ reg = <0x0 0x2310000 0x0 0x10000>; -+ interrupts = <0 36 0x4>; /* Level high type */ -+ gpio-controller; -+ little-endian; -+ #gpio-cells = <2>; -+ interrupt-controller; -+ #interrupt-cells = <2>; -+ }; -+ -+ gpio2: gpio@2320000 { -+ compatible = "fsl,qoriq-gpio"; -+ reg = <0x0 0x2320000 0x0 0x10000>; -+ interrupts = <0 37 0x4>; /* Level high type */ -+ gpio-controller; -+ little-endian; -+ #gpio-cells = <2>; -+ interrupt-controller; -+ #interrupt-cells = <2>; -+ }; -+ -+ gpio3: gpio@2330000 { -+ compatible = "fsl,qoriq-gpio"; -+ reg = <0x0 0x2330000 0x0 0x10000>; -+ interrupts = <0 37 0x4>; /* Level high type */ -+ gpio-controller; -+ little-endian; -+ #gpio-cells = <2>; -+ interrupt-controller; -+ #interrupt-cells = <2>; -+ }; -+ -+ /* TODO: WRIOP (CCSR?) */ -+ emdio1: mdio@0x8B96000 { /* WRIOP0: 0x8B8_0000, E-MDIO1: 0x1_6000 */ -+ compatible = "fsl,fman-memac-mdio"; -+ reg = <0x0 0x8B96000 0x0 0x1000>; -+ device_type = "mdio"; /* TODO: is this necessary? */ -+ little-endian; /* force the driver in LE mode */ -+ -+ /* Not necessary on the QDS, but needed on the RDB */ -+ #address-cells = <1>; -+ #size-cells = <0>; -+ }; -+ -+ emdio2: mdio@0x8B97000 { /* WRIOP0: 0x8B8_0000, E-MDIO2: 0x1_7000 */ -+ compatible = "fsl,fman-memac-mdio"; -+ reg = <0x0 0x8B97000 0x0 0x1000>; -+ device_type = "mdio"; /* TODO: is this necessary? */ -+ little-endian; /* force the driver in LE mode */ -+ -+ #address-cells = <1>; -+ #size-cells = <0>; -+ }; -+ -+ ifc: ifc@2240000 { -+ compatible = "fsl,ifc", "simple-bus"; -+ reg = <0x0 0x2240000 0x0 0x20000>; -+ interrupts = <0 21 0x4>; /* Level high type */ -+ little-endian; -+ #address-cells = <2>; -+ #size-cells = <1>; -+ -+ ranges = <0 0 0x5 0x80000000 0x08000000 -+ 2 0 0x5 0x30000000 0x00010000 -+ 3 0 0x5 0x20000000 0x00010000>; -+ }; -+ -+ esdhc: esdhc@2140000 { -+ compatible = "fsl,ls2088a-esdhc", "fsl,ls2080a-esdhc", -+ "fsl,esdhc"; -+ reg = <0x0 0x2140000 0x0 0x10000>; -+ interrupts = <0 28 0x4>; /* Level high type */ -+ clock-frequency = <0>; -+ voltage-ranges = <1800 1800 3300 3300>; -+ sdhci,auto-cmd12; -+ little-endian; -+ bus-width = <4>; -+ }; -+ -+ ftm0: ftm0@2800000 { -+ compatible = "fsl,ftm-alarm"; -+ reg = <0x0 0x2800000 0x0 0x10000>; -+ interrupts = <0 44 4>; -+ }; -+ -+ reset: reset@1E60000 { -+ compatible = "fsl,ls-reset"; -+ reg = <0x0 0x1E60000 0x0 0x10000>; -+ }; -+ -+ dspi: dspi@2100000 { -+ compatible = "fsl,ls2088a-dspi", "fsl,ls2085a-dspi", -+ "fsl,ls2080a-dspi"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x2100000 0x0 0x10000>; -+ interrupts = <0 26 0x4>; /* Level high type */ -+ clocks = <&clockgen 4 3>; -+ clock-names = "dspi"; -+ spi-num-chipselects = <5>; -+ bus-num = <0>; -+ }; -+ -+ i2c0: i2c@2000000 { -+ compatible = "fsl,vf610-i2c"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x2000000 0x0 0x10000>; -+ interrupts = <0 34 0x4>; /* Level high type */ -+ clock-names = "i2c"; -+ clocks = <&clockgen 4 3>; -+ }; -+ -+ i2c1: i2c@2010000 { -+ compatible = "fsl,vf610-i2c"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x2010000 0x0 0x10000>; -+ interrupts = <0 34 0x4>; /* Level high type */ -+ clock-names = "i2c"; -+ clocks = <&clockgen 4 3>; -+ }; -+ -+ i2c2: i2c@2020000 { -+ compatible = "fsl,vf610-i2c"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x2020000 0x0 0x10000>; -+ interrupts = <0 35 0x4>; /* Level high type */ -+ clock-names = "i2c"; -+ clocks = <&clockgen 4 3>; -+ }; -+ -+ i2c3: i2c@2030000 { -+ compatible = "fsl,vf610-i2c"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x2030000 0x0 0x10000>; -+ interrupts = <0 35 0x4>; /* Level high type */ -+ clock-names = "i2c"; -+ clocks = <&clockgen 4 3>; -+ }; -+ -+ qspi: quadspi@20c0000 { -+ compatible = "fsl,ls2088a-qspi", "fsl,ls2080a-qspi"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x20c0000 0x0 0x10000>, -+ <0x0 0x20000000 0x0 0x10000000>; -+ reg-names = "QuadSPI", "QuadSPI-memory"; -+ interrupts = <0 25 0x4>; /* Level high type */ -+ clocks = <&clockgen 4 3>, <&clockgen 4 3>; -+ clock-names = "qspi_en", "qspi"; -+ }; -+ -+ pcie1: pcie@3400000 { -+ compatible = "fsl,ls2088a-pcie", "fsl,ls2080a-pcie", -+ "fsl,ls2085a-pcie", "snps,dw-pcie"; -+ reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */ -+ 0x20 0x00000000 0x0 0x00001000>; /* configuration space */ -+ reg-names = "regs", "config"; -+ interrupts = <0 108 0x4>; /* Level high type */ -+ interrupt-names = "aer"; -+ #address-cells = <3>; -+ #size-cells = <2>; -+ device_type = "pci"; -+ dma-coherent; -+ fsl,lut_diff; -+ num-lanes = <4>; -+ bus-range = <0x0 0xff>; -+ ranges = <0x81000000 0x0 0x00000000 0x20 0x00010000 0x0 0x00010000 /* downstream I/O */ -+ 0x82000000 0x0 0x40000000 0x20 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ -+ msi-parent = <&its>; -+ #interrupt-cells = <1>; -+ interrupt-map-mask = <0 0 0 7>; -+ interrupt-map = <0000 0 0 1 &gic 0 0 0 109 4>, -+ <0000 0 0 2 &gic 0 0 0 110 4>, -+ <0000 0 0 3 &gic 0 0 0 111 4>, -+ <0000 0 0 4 &gic 0 0 0 112 4>; -+ }; -+ -+ pcie2: pcie@3500000 { -+ compatible = "fsl,ls2080a-pcie", "fsl,ls2080a-pcie", -+ "fsl,ls2085a-pcie", "snps,dw-pcie"; -+ reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */ -+ 0x28 0x00000000 0x0 0x00001000>; /* configuration space */ -+ reg-names = "regs", "config"; -+ interrupts = <0 113 0x4>; /* Level high type */ -+ interrupt-names = "aer"; -+ #address-cells = <3>; -+ #size-cells = <2>; -+ device_type = "pci"; -+ dma-coherent; -+ fsl,lut_diff; -+ num-lanes = <4>; -+ bus-range = <0x0 0xff>; -+ ranges = <0x81000000 0x0 0x00000000 0x28 0x00010000 0x0 0x00010000 /* downstream I/O */ -+ 0x82000000 0x0 0x40000000 0x28 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ -+ msi-parent = <&its>; -+ #interrupt-cells = <1>; -+ interrupt-map-mask = <0 0 0 7>; -+ interrupt-map = <0000 0 0 1 &gic 0 0 0 114 4>, -+ <0000 0 0 2 &gic 0 0 0 115 4>, -+ <0000 0 0 3 &gic 0 0 0 116 4>, -+ <0000 0 0 4 &gic 0 0 0 117 4>; -+ }; -+ -+ pcie3: pcie@3600000 { -+ compatible = "fsl,ls2088a-pcie", "fsl,ls2080a-pcie", -+ "fsl,ls2085a-pcie", "snps,dw-pcie"; -+ reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */ -+ 0x30 0x00000000 0x0 0x00001000>; /* configuration space */ -+ reg-names = "regs", "config"; -+ interrupts = <0 118 0x4>; /* Level high type */ -+ interrupt-names = "aer"; -+ #address-cells = <3>; -+ #size-cells = <2>; -+ device_type = "pci"; -+ dma-coherent; -+ fsl,lut_diff; -+ num-lanes = <8>; -+ bus-range = <0x0 0xff>; -+ ranges = <0x81000000 0x0 0x00000000 0x30 0x00010000 0x0 0x00010000 /* downstream I/O */ -+ 0x82000000 0x0 0x40000000 0x30 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ -+ msi-parent = <&its>; -+ #interrupt-cells = <1>; -+ interrupt-map-mask = <0 0 0 7>; -+ interrupt-map = <0000 0 0 1 &gic 0 0 0 119 4>, -+ <0000 0 0 2 &gic 0 0 0 120 4>, -+ <0000 0 0 3 &gic 0 0 0 121 4>, -+ <0000 0 0 4 &gic 0 0 0 122 4>; -+ }; -+ -+ pcie4: pcie@3700000 { -+ compatible = "fsl,ls2080a-pcie", "fsl,ls2080a-pcie", -+ "fsl,ls2085a-pcie", "snps,dw-pcie"; -+ reg = <0x00 0x03700000 0x0 0x00100000 /* controller registers */ -+ 0x38 0x00000000 0x0 0x00001000>; /* configuration space */ -+ reg-names = "regs", "config"; -+ interrupts = <0 123 0x4>; /* Level high type */ -+ interrupt-names = "aer"; -+ #address-cells = <3>; -+ #size-cells = <2>; -+ device_type = "pci"; -+ dma-coherent; -+ fsl,lut_diff; -+ num-lanes = <4>; -+ bus-range = <0x0 0xff>; -+ ranges = <0x81000000 0x0 0x00000000 0x38 0x00010000 0x0 0x00010000 /* downstream I/O */ -+ 0x82000000 0x0 0x40000000 0x38 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ -+ msi-parent = <&its>; -+ #interrupt-cells = <1>; -+ interrupt-map-mask = <0 0 0 7>; -+ interrupt-map = <0000 0 0 1 &gic 0 0 0 124 4>, -+ <0000 0 0 2 &gic 0 0 0 125 4>, -+ <0000 0 0 3 &gic 0 0 0 126 4>, -+ <0000 0 0 4 &gic 0 0 0 127 4>; -+ }; -+ -+ sata0: sata@3200000 { -+ status = "disabled"; -+ compatible = "fsl,ls2088a-ahci", "fsl,ls2080a-ahci"; -+ reg = <0x0 0x3200000 0x0 0x10000>; -+ interrupts = <0 133 0x4>; /* Level high type */ -+ clocks = <&clockgen 4 3>; -+ }; -+ -+ sata1: sata@3210000 { -+ status = "disabled"; -+ compatible = "fsl,ls2088a-ahci", "fsl,ls2080a-ahci"; -+ reg = <0x0 0x3210000 0x0 0x10000>; -+ interrupts = <0 136 0x4>; /* Level high type */ -+ clocks = <&clockgen 4 3>; -+ }; -+ -+ usb0: usb3@3100000 { -+ status = "disabled"; -+ compatible = "snps,dwc3"; -+ reg = <0x0 0x3100000 0x0 0x10000>; -+ interrupts = <0 80 0x4>; /* Level high type */ -+ dr_mode = "host"; -+ configure-gfladj; -+ snps,dis_rxdet_inp3_quirk; -+ }; -+ -+ usb1: usb3@3110000 { -+ status = "disabled"; -+ compatible = "snps,dwc3"; -+ reg = <0x0 0x3110000 0x0 0x10000>; -+ interrupts = <0 81 0x4>; /* Level high type */ -+ dr_mode = "host"; -+ configure-gfladj; -+ snps,dis_rxdet_inp3_quirk; -+ }; -+ -+ smmu: iommu@5000000 { -+ compatible = "arm,mmu-500"; -+ reg = <0 0x5000000 0 0x800000>; -+ #global-interrupts = <12>; -+ interrupts = <0 13 4>, /* global secure fault */ -+ <0 14 4>, /* combined secure interrupt */ -+ <0 15 4>, /* global non-secure fault */ -+ <0 16 4>, /* combined non-secure interrupt */ -+ /* performance counter interrupts 0-7 */ -+ <0 211 4>, -+ <0 212 4>, -+ <0 213 4>, -+ <0 214 4>, -+ <0 215 4>, -+ <0 216 4>, -+ <0 217 4>, -+ <0 218 4>, -+ /* per context interrupt, 64 interrupts */ -+ <0 146 4>, -+ <0 147 4>, -+ <0 148 4>, -+ <0 149 4>, -+ <0 150 4>, -+ <0 151 4>, -+ <0 152 4>, -+ <0 153 4>, -+ <0 154 4>, -+ <0 155 4>, -+ <0 156 4>, -+ <0 157 4>, -+ <0 158 4>, -+ <0 159 4>, -+ <0 160 4>, -+ <0 161 4>, -+ <0 162 4>, -+ <0 163 4>, -+ <0 164 4>, -+ <0 165 4>, -+ <0 166 4>, -+ <0 167 4>, -+ <0 168 4>, -+ <0 169 4>, -+ <0 170 4>, -+ <0 171 4>, -+ <0 172 4>, -+ <0 173 4>, -+ <0 174 4>, -+ <0 175 4>, -+ <0 176 4>, -+ <0 177 4>, -+ <0 178 4>, -+ <0 179 4>, -+ <0 180 4>, -+ <0 181 4>, -+ <0 182 4>, -+ <0 183 4>, -+ <0 184 4>, -+ <0 185 4>, -+ <0 186 4>, -+ <0 187 4>, -+ <0 188 4>, -+ <0 189 4>, -+ <0 190 4>, -+ <0 191 4>, -+ <0 192 4>, -+ <0 193 4>, -+ <0 194 4>, -+ <0 195 4>, -+ <0 196 4>, -+ <0 197 4>, -+ <0 198 4>, -+ <0 199 4>, -+ <0 200 4>, -+ <0 201 4>, -+ <0 202 4>, -+ <0 203 4>, -+ <0 204 4>, -+ <0 205 4>, -+ <0 206 4>, -+ <0 207 4>, -+ <0 208 4>, -+ <0 209 4>; -+ mmu-masters = <&fsl_mc 0x300 0>; -+ }; -+ -+ timer { -+ compatible = "arm,armv8-timer"; -+ interrupts = <1 13 0x1>, /* Physical Secure PPI, edge triggered */ -+ <1 14 0x1>, /* Physical Non-Secure PPI, edge triggered */ -+ <1 11 0x1>, /* Virtual PPI, edge triggered */ -+ <1 10 0x1>; /* Hypervisor PPI, edge triggered */ -+ arm,reread-timer; -+ fsl,erratum-a008585; -+ }; -+ -+ fsl_mc: fsl-mc@80c000000 { -+ compatible = "fsl,qoriq-mc"; -+ #stream-id-cells = <2>; -+ reg = <0x00000008 0x0c000000 0 0x40>, /* MC portal base */ -+ <0x00000000 0x08340000 0 0x40000>; /* MC control reg */ -+ msi-parent = <&its>; -+ #address-cells = <3>; -+ #size-cells = <1>; -+ -+ /* -+ * Region type 0x0 - MC portals -+ * Region type 0x1 - QBMAN portals -+ */ -+ ranges = <0x0 0x0 0x0 0x8 0x0c000000 0x4000000 -+ 0x1 0x0 0x0 0x8 0x18000000 0x8000000>; -+ -+ /* -+ * Define the maximum number of MACs present on the SoC. -+ * They won't necessarily be all probed, since the -+ * Data Path Layout file and the MC firmware can put fewer -+ * actual DPMAC objects on the MC bus. -+ */ -+ dpmacs { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ -+ dpmac1: dpmac@1 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <1>; -+ }; -+ dpmac2: dpmac@2 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <2>; -+ }; -+ dpmac3: dpmac@3 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <3>; -+ }; -+ dpmac4: dpmac@4 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <4>; -+ }; -+ dpmac5: dpmac@5 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <5>; -+ }; -+ dpmac6: dpmac@6 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <6>; -+ }; -+ dpmac7: dpmac@7 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <7>; -+ }; -+ dpmac8: dpmac@8 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <8>; -+ }; -+ dpmac9: dpmac@9 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <9>; -+ }; -+ dpmac10: dpmac@10 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <0xa>; -+ }; -+ dpmac11: dpmac@11 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <0xb>; -+ }; -+ dpmac12: dpmac@12 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <0xc>; -+ }; -+ dpmac13: dpmac@13 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <0xd>; -+ }; -+ dpmac14: dpmac@14 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <0xe>; -+ }; -+ dpmac15: dpmac@15 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <0xf>; -+ }; -+ dpmac16: dpmac@16 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <0x10>; -+ }; -+ }; -+ }; -+ -+ ccn@4000000 { -+ compatible = "arm,ccn-504"; -+ reg = <0x0 0x04000000 0x0 0x01000000>; -+ interrupts = <0 12 4>; -+ }; -+ -+ memory@80000000 { -+ device_type = "memory"; -+ reg = <0x00000000 0x80000000 0 0x80000000>; -+ /* DRAM space 1 - 2 GB DRAM */ -+ }; -+}; -diff --git a/arch/arm64/boot/dts/include/dt-bindings b/arch/arm64/boot/dts/include/dt-bindings -new file mode 120000 -index 0000000..08c00e4 ---- /dev/null -+++ b/arch/arm64/boot/dts/include/dt-bindings -@@ -0,0 +1 @@ -+../../../../../include/dt-bindings -\ No newline at end of file -diff --git a/arch/arm64/boot/dts/thermal.h b/arch/arm64/boot/dts/thermal.h -new file mode 100644 -index 0000000..59822a9 ---- /dev/null -+++ b/arch/arm64/boot/dts/thermal.h -@@ -0,0 +1,17 @@ -+/* -+ * This header provides constants for most thermal bindings. -+ * -+ * Copyright (C) 2013 Texas Instruments -+ * Eduardo Valentin -+ * -+ * GPLv2 only -+ */ -+ -+#ifndef _DT_BINDINGS_THERMAL_THERMAL_H -+#define _DT_BINDINGS_THERMAL_THERMAL_H -+ -+/* On cooling devices upper and lower limits */ -+#define THERMAL_NO_LIMIT (-1UL) -+ -+#endif -+ -diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig -index dd301be..3852a77 100644 ---- a/arch/arm64/configs/defconfig -+++ b/arch/arm64/configs/defconfig -@@ -32,6 +32,7 @@ CONFIG_MODULES=y - CONFIG_MODULE_UNLOAD=y - # CONFIG_BLK_DEV_BSG is not set - # CONFIG_IOSCHED_DEADLINE is not set -+CONFIG_ARCH_LAYERSCAPE=y - CONFIG_ARCH_THUNDER=y - CONFIG_ARCH_VEXPRESS=y - CONFIG_ARCH_XGENE=y -diff --git a/arch/arm64/configs/nxp_ls2088rdb_config b/arch/arm64/configs/nxp_ls2088rdb_config -new file mode 100644 -index 0000000..f1127f9 ---- /dev/null -+++ b/arch/arm64/configs/nxp_ls2088rdb_config -@@ -0,0 +1,3034 @@ -+# -+# Automatically generated file; DO NOT EDIT. -+# Linux/arm64 3.18.25 Kernel Configuration -+# -+CONFIG_ARM64=y -+CONFIG_64BIT=y -+CONFIG_ARCH_PHYS_ADDR_T_64BIT=y -+CONFIG_MMU=y -+CONFIG_STACKTRACE_SUPPORT=y -+CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 -+CONFIG_LOCKDEP_SUPPORT=y -+CONFIG_TRACE_IRQFLAGS_SUPPORT=y -+CONFIG_RWSEM_XCHGADD_ALGORITHM=y -+CONFIG_GENERIC_HWEIGHT=y -+CONFIG_GENERIC_CSUM=y -+CONFIG_GENERIC_CALIBRATE_DELAY=y -+CONFIG_ZONE_DMA=y -+CONFIG_HAVE_GENERIC_RCU_GUP=y -+CONFIG_ARCH_DMA_ADDR_T_64BIT=y -+CONFIG_NEED_DMA_MAP_STATE=y -+CONFIG_NEED_SG_DMA_LENGTH=y -+CONFIG_SWIOTLB=y -+CONFIG_IOMMU_HELPER=y -+CONFIG_KERNEL_MODE_NEON=y -+CONFIG_FIX_EARLYCON_MEM=y -+CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" -+CONFIG_IRQ_WORK=y -+CONFIG_BUILDTIME_EXTABLE_SORT=y -+ -+# -+# General setup -+# -+CONFIG_INIT_ENV_ARG_LIMIT=32 -+CONFIG_CROSS_COMPILE="aarch64-linux-gnu-" -+# CONFIG_COMPILE_TEST is not set -+CONFIG_LOCALVERSION="" -+CONFIG_LOCALVERSION_AUTO=y -+CONFIG_DEFAULT_HOSTNAME="(none)" -+CONFIG_SWAP=y -+CONFIG_SYSVIPC=y -+CONFIG_SYSVIPC_SYSCTL=y -+CONFIG_POSIX_MQUEUE=y -+CONFIG_POSIX_MQUEUE_SYSCTL=y -+CONFIG_CROSS_MEMORY_ATTACH=y -+# CONFIG_FHANDLE is not set -+CONFIG_USELIB=y -+CONFIG_AUDIT=y -+CONFIG_HAVE_ARCH_AUDITSYSCALL=y -+# CONFIG_AUDITSYSCALL is not set -+ -+# -+# IRQ subsystem -+# -+CONFIG_GENERIC_IRQ_PROBE=y -+CONFIG_GENERIC_IRQ_SHOW=y -+CONFIG_HARDIRQS_SW_RESEND=y -+CONFIG_IRQ_DOMAIN=y -+CONFIG_IRQ_DOMAIN_HIERARCHY=y -+CONFIG_GENERIC_MSI_IRQ=y -+CONFIG_GENERIC_MSI_IRQ_DOMAIN=y -+CONFIG_HANDLE_DOMAIN_IRQ=y -+# CONFIG_IRQ_DOMAIN_DEBUG is not set -+CONFIG_SPARSE_IRQ=y -+CONFIG_GENERIC_TIME_VSYSCALL=y -+CONFIG_GENERIC_CLOCKEVENTS=y -+CONFIG_GENERIC_CLOCKEVENTS_BUILD=y -+CONFIG_ARCH_HAS_TICK_BROADCAST=y -+CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y -+ -+# -+# Timers subsystem -+# -+CONFIG_TICK_ONESHOT=y -+CONFIG_NO_HZ_COMMON=y -+# CONFIG_HZ_PERIODIC is not set -+CONFIG_NO_HZ_IDLE=y -+# CONFIG_NO_HZ_FULL is not set -+# CONFIG_NO_HZ is not set -+CONFIG_HIGH_RES_TIMERS=y -+ -+# -+# CPU/Task time and stats accounting -+# -+CONFIG_TICK_CPU_ACCOUNTING=y -+# CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set -+CONFIG_BSD_PROCESS_ACCT=y -+CONFIG_BSD_PROCESS_ACCT_V3=y -+CONFIG_TASKSTATS=y -+CONFIG_TASK_DELAY_ACCT=y -+CONFIG_TASK_XACCT=y -+CONFIG_TASK_IO_ACCOUNTING=y -+ -+# -+# RCU Subsystem -+# -+CONFIG_TREE_PREEMPT_RCU=y -+CONFIG_PREEMPT_RCU=y -+# CONFIG_TASKS_RCU is not set -+CONFIG_RCU_STALL_COMMON=y -+# CONFIG_RCU_USER_QS is not set -+CONFIG_RCU_FANOUT=64 -+CONFIG_RCU_FANOUT_LEAF=16 -+# CONFIG_RCU_FANOUT_EXACT is not set -+# CONFIG_RCU_FAST_NO_HZ is not set -+# CONFIG_TREE_RCU_TRACE is not set -+# CONFIG_RCU_BOOST is not set -+# CONFIG_RCU_NOCB_CPU is not set -+CONFIG_BUILD_BIN2C=y -+CONFIG_IKCONFIG=y -+CONFIG_IKCONFIG_PROC=y -+CONFIG_LOG_BUF_SHIFT=14 -+CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 -+CONFIG_GENERIC_SCHED_CLOCK=y -+CONFIG_CGROUPS=y -+# CONFIG_CGROUP_DEBUG is not set -+# CONFIG_CGROUP_FREEZER is not set -+# CONFIG_CGROUP_DEVICE is not set -+# CONFIG_CPUSETS is not set -+# CONFIG_CGROUP_CPUACCT is not set -+CONFIG_RESOURCE_COUNTERS=y -+CONFIG_MEMCG=y -+CONFIG_MEMCG_SWAP=y -+CONFIG_MEMCG_SWAP_ENABLED=y -+CONFIG_MEMCG_KMEM=y -+CONFIG_CGROUP_HUGETLB=y -+# CONFIG_CGROUP_PERF is not set -+CONFIG_CGROUP_SCHED=y -+CONFIG_FAIR_GROUP_SCHED=y -+# CONFIG_CFS_BANDWIDTH is not set -+# CONFIG_RT_GROUP_SCHED is not set -+# CONFIG_BLK_CGROUP is not set -+# CONFIG_CHECKPOINT_RESTORE is not set -+CONFIG_NAMESPACES=y -+# CONFIG_UTS_NS is not set -+# CONFIG_IPC_NS is not set -+# CONFIG_USER_NS is not set -+# CONFIG_PID_NS is not set -+CONFIG_NET_NS=y -+CONFIG_SCHED_AUTOGROUP=y -+# CONFIG_SYSFS_DEPRECATED is not set -+# CONFIG_RELAY is not set -+CONFIG_BLK_DEV_INITRD=y -+CONFIG_INITRAMFS_SOURCE="" -+CONFIG_RD_GZIP=y -+CONFIG_RD_BZIP2=y -+CONFIG_RD_LZMA=y -+CONFIG_RD_XZ=y -+CONFIG_RD_LZO=y -+CONFIG_RD_LZ4=y -+# CONFIG_CC_OPTIMIZE_FOR_SIZE is not set -+CONFIG_SYSCTL=y -+CONFIG_ANON_INODES=y -+CONFIG_HAVE_UID16=y -+CONFIG_SYSCTL_EXCEPTION_TRACE=y -+CONFIG_BPF=y -+# CONFIG_EXPERT is not set -+CONFIG_UID16=y -+# CONFIG_SGETMASK_SYSCALL is not set -+CONFIG_SYSFS_SYSCALL=y -+# CONFIG_SYSCTL_SYSCALL is not set -+CONFIG_KALLSYMS=y -+CONFIG_KALLSYMS_ALL=y -+CONFIG_PRINTK=y -+CONFIG_BUG=y -+CONFIG_ELF_CORE=y -+CONFIG_BASE_FULL=y -+CONFIG_FUTEX=y -+CONFIG_EPOLL=y -+CONFIG_SIGNALFD=y -+CONFIG_TIMERFD=y -+CONFIG_EVENTFD=y -+# CONFIG_BPF_SYSCALL is not set -+CONFIG_SHMEM=y -+CONFIG_AIO=y -+CONFIG_ADVISE_SYSCALLS=y -+CONFIG_PCI_QUIRKS=y -+# CONFIG_EMBEDDED is not set -+CONFIG_HAVE_PERF_EVENTS=y -+CONFIG_PERF_USE_VMALLOC=y -+ -+# -+# Kernel Performance Events And Counters -+# -+CONFIG_PERF_EVENTS=y -+# CONFIG_DEBUG_PERF_USE_VMALLOC is not set -+CONFIG_VM_EVENT_COUNTERS=y -+# CONFIG_COMPAT_BRK is not set -+CONFIG_SLAB=y -+# CONFIG_SLUB is not set -+# CONFIG_SYSTEM_TRUSTED_KEYRING is not set -+CONFIG_PROFILING=y -+CONFIG_JUMP_LABEL=y -+# CONFIG_UPROBES is not set -+# CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set -+CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y -+CONFIG_HAVE_ARCH_TRACEHOOK=y -+CONFIG_HAVE_DMA_ATTRS=y -+CONFIG_HAVE_DMA_CONTIGUOUS=y -+CONFIG_GENERIC_SMP_IDLE_THREAD=y -+CONFIG_HAVE_CLK=y -+CONFIG_HAVE_DMA_API_DEBUG=y -+CONFIG_HAVE_HW_BREAKPOINT=y -+CONFIG_HAVE_PERF_REGS=y -+CONFIG_HAVE_PERF_USER_STACK_DUMP=y -+CONFIG_HAVE_ARCH_JUMP_LABEL=y -+CONFIG_HAVE_RCU_TABLE_FREE=y -+CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y -+CONFIG_HAVE_CC_STACKPROTECTOR=y -+# CONFIG_CC_STACKPROTECTOR is not set -+CONFIG_CC_STACKPROTECTOR_NONE=y -+# CONFIG_CC_STACKPROTECTOR_REGULAR is not set -+# CONFIG_CC_STACKPROTECTOR_STRONG is not set -+CONFIG_HAVE_CONTEXT_TRACKING=y -+CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y -+CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y -+CONFIG_MODULES_USE_ELF_RELA=y -+CONFIG_CLONE_BACKWARDS=y -+CONFIG_OLD_SIGSUSPEND3=y -+CONFIG_COMPAT_OLD_SIGACTION=y -+ -+# -+# GCOV-based kernel profiling -+# -+# CONFIG_GCOV_KERNEL is not set -+CONFIG_HAVE_GENERIC_DMA_COHERENT=y -+CONFIG_SLABINFO=y -+CONFIG_RT_MUTEXES=y -+CONFIG_BASE_SMALL=0 -+CONFIG_MODULES=y -+CONFIG_MODULE_FORCE_LOAD=y -+CONFIG_MODULE_UNLOAD=y -+# CONFIG_MODULE_FORCE_UNLOAD is not set -+CONFIG_MODVERSIONS=y -+# CONFIG_MODULE_SRCVERSION_ALL is not set -+# CONFIG_MODULE_SIG is not set -+# CONFIG_MODULE_COMPRESS is not set -+CONFIG_STOP_MACHINE=y -+CONFIG_BLOCK=y -+# CONFIG_BLK_DEV_BSG is not set -+# CONFIG_BLK_DEV_BSGLIB is not set -+# CONFIG_BLK_DEV_INTEGRITY is not set -+# CONFIG_BLK_CMDLINE_PARSER is not set -+ -+# -+# Partition Types -+# -+CONFIG_PARTITION_ADVANCED=y -+# CONFIG_ACORN_PARTITION is not set -+# CONFIG_AIX_PARTITION is not set -+# CONFIG_OSF_PARTITION is not set -+# CONFIG_AMIGA_PARTITION is not set -+# CONFIG_ATARI_PARTITION is not set -+# CONFIG_MAC_PARTITION is not set -+CONFIG_MSDOS_PARTITION=y -+# CONFIG_BSD_DISKLABEL is not set -+# CONFIG_MINIX_SUBPARTITION is not set -+# CONFIG_SOLARIS_X86_PARTITION is not set -+# CONFIG_UNIXWARE_DISKLABEL is not set -+# CONFIG_LDM_PARTITION is not set -+# CONFIG_SGI_PARTITION is not set -+# CONFIG_ULTRIX_PARTITION is not set -+# CONFIG_SUN_PARTITION is not set -+# CONFIG_KARMA_PARTITION is not set -+CONFIG_EFI_PARTITION=y -+# CONFIG_SYSV68_PARTITION is not set -+# CONFIG_CMDLINE_PARTITION is not set -+CONFIG_BLOCK_COMPAT=y -+ -+# -+# IO Schedulers -+# -+CONFIG_IOSCHED_NOOP=y -+# CONFIG_IOSCHED_DEADLINE is not set -+CONFIG_IOSCHED_CFQ=y -+CONFIG_DEFAULT_CFQ=y -+# CONFIG_DEFAULT_NOOP is not set -+CONFIG_DEFAULT_IOSCHED="cfq" -+CONFIG_PREEMPT_NOTIFIERS=y -+CONFIG_UNINLINE_SPIN_UNLOCK=y -+CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y -+CONFIG_MUTEX_SPIN_ON_OWNER=y -+CONFIG_RWSEM_SPIN_ON_OWNER=y -+CONFIG_FREEZER=y -+ -+# -+# Platform selection -+# -+CONFIG_ARCH_THUNDER=y -+CONFIG_ARCH_VEXPRESS=y -+CONFIG_ARCH_XGENE=y -+CONFIG_ARCH_LAYERSCAPE=y -+ -+# -+# Bus support -+# -+CONFIG_ARM_AMBA=y -+CONFIG_PCI=y -+CONFIG_PCI_DOMAINS=y -+CONFIG_PCI_DOMAINS_GENERIC=y -+CONFIG_PCI_SYSCALL=y -+CONFIG_PCI_MSI=y -+CONFIG_PCI_MSI_IRQ_DOMAIN=y -+# CONFIG_PCI_DEBUG is not set -+# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set -+# CONFIG_PCI_STUB is not set -+# CONFIG_PCI_IOV is not set -+# CONFIG_PCI_PRI is not set -+# CONFIG_PCI_PASID is not set -+ -+# -+# PCI host controller drivers -+# -+CONFIG_PCIE_DW=y -+# CONFIG_PCI_HOST_GENERIC is not set -+CONFIG_PCI_XGENE=y -+CONFIG_PCI_XGENE_MSI=y -+CONFIG_PCI_LAYERSCAPE=y -+CONFIG_PCIEPORTBUS=y -+CONFIG_PCIEAER=y -+# CONFIG_PCIE_ECRC is not set -+# CONFIG_PCIEAER_INJECT is not set -+CONFIG_PCIEASPM=y -+# CONFIG_PCIEASPM_DEBUG is not set -+CONFIG_PCIEASPM_DEFAULT=y -+# CONFIG_PCIEASPM_POWERSAVE is not set -+# CONFIG_PCIEASPM_PERFORMANCE is not set -+# CONFIG_HOTPLUG_PCI is not set -+ -+# -+# Kernel Features -+# -+ -+# -+# ARM errata workarounds via the alternatives framework -+# -+CONFIG_ARM64_ERRATUM_826319=y -+CONFIG_ARM64_ERRATUM_827319=y -+CONFIG_ARM64_ERRATUM_824069=y -+CONFIG_ARM64_ERRATUM_819472=y -+CONFIG_ARM64_ERRATUM_832075=y -+CONFIG_ARM64_ERRATUM_845719=y -+CONFIG_ARM64_4K_PAGES=y -+# CONFIG_ARM64_64K_PAGES is not set -+# CONFIG_ARM64_VA_BITS_39 is not set -+CONFIG_ARM64_VA_BITS_48=y -+CONFIG_ARM64_VA_BITS=48 -+CONFIG_ARM64_PGTABLE_LEVELS=4 -+# CONFIG_CPU_BIG_ENDIAN is not set -+CONFIG_SMP=y -+# CONFIG_SCHED_MC is not set -+# CONFIG_SCHED_SMT is not set -+CONFIG_NR_CPUS=64 -+CONFIG_HOTPLUG_CPU=y -+# CONFIG_PREEMPT_NONE is not set -+# CONFIG_PREEMPT_VOLUNTARY is not set -+CONFIG_PREEMPT=y -+CONFIG_PREEMPT_COUNT=y -+CONFIG_HZ=100 -+CONFIG_ARCH_HAS_HOLES_MEMORYMODEL=y -+CONFIG_ARCH_SPARSEMEM_ENABLE=y -+CONFIG_ARCH_SPARSEMEM_DEFAULT=y -+CONFIG_ARCH_SELECT_MEMORY_MODEL=y -+CONFIG_HAVE_ARCH_PFN_VALID=y -+CONFIG_HW_PERF_EVENTS=y -+CONFIG_SYS_SUPPORTS_HUGETLBFS=y -+CONFIG_ARCH_WANT_GENERAL_HUGETLB=y -+CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y -+CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y -+CONFIG_SELECT_MEMORY_MODEL=y -+CONFIG_SPARSEMEM_MANUAL=y -+CONFIG_SPARSEMEM=y -+CONFIG_HAVE_MEMORY_PRESENT=y -+CONFIG_SPARSEMEM_EXTREME=y -+CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y -+CONFIG_SPARSEMEM_VMEMMAP=y -+CONFIG_HAVE_MEMBLOCK=y -+CONFIG_NO_BOOTMEM=y -+CONFIG_MEMORY_ISOLATION=y -+# CONFIG_HAVE_BOOTMEM_INFO_NODE is not set -+CONFIG_PAGEFLAGS_EXTENDED=y -+CONFIG_SPLIT_PTLOCK_CPUS=4 -+CONFIG_MEMORY_BALLOON=y -+CONFIG_BALLOON_COMPACTION=y -+CONFIG_COMPACTION=y -+CONFIG_MIGRATION=y -+CONFIG_PHYS_ADDR_T_64BIT=y -+CONFIG_ZONE_DMA_FLAG=1 -+CONFIG_BOUNCE=y -+CONFIG_MMU_NOTIFIER=y -+CONFIG_KSM=y -+CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 -+CONFIG_TRANSPARENT_HUGEPAGE=y -+CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y -+# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set -+# CONFIG_CLEANCACHE is not set -+# CONFIG_FRONTSWAP is not set -+CONFIG_CMA=y -+# CONFIG_CMA_DEBUG is not set -+CONFIG_CMA_AREAS=7 -+# CONFIG_ZPOOL is not set -+# CONFIG_ZBUD is not set -+# CONFIG_ZSMALLOC is not set -+CONFIG_GENERIC_EARLY_IOREMAP=y -+# CONFIG_XEN is not set -+CONFIG_FORCE_MAX_ZONEORDER=11 -+ -+# -+# Boot options -+# -+CONFIG_CMDLINE="console=ttyAMA0" -+# CONFIG_CMDLINE_FORCE is not set -+CONFIG_EFI_STUB=y -+CONFIG_EFI=y -+ -+# -+# Userspace binary formats -+# -+CONFIG_BINFMT_ELF=y -+CONFIG_COMPAT_BINFMT_ELF=y -+CONFIG_ARCH_BINFMT_ELF_RANDOMIZE_PIE=y -+# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set -+CONFIG_BINFMT_SCRIPT=y -+# CONFIG_HAVE_AOUT is not set -+# CONFIG_BINFMT_MISC is not set -+CONFIG_COREDUMP=y -+CONFIG_COMPAT=y -+CONFIG_SYSVIPC_COMPAT=y -+ -+# -+# Power management options -+# -+CONFIG_SUSPEND=y -+CONFIG_SUSPEND_FREEZER=y -+CONFIG_PM_SLEEP=y -+CONFIG_PM_SLEEP_SMP=y -+# CONFIG_PM_AUTOSLEEP is not set -+# CONFIG_PM_WAKELOCKS is not set -+# CONFIG_PM_RUNTIME is not set -+CONFIG_PM=y -+# CONFIG_PM_DEBUG is not set -+CONFIG_PM_CLK=y -+# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set -+CONFIG_CPU_PM=y -+CONFIG_ARCH_SUSPEND_POSSIBLE=y -+CONFIG_ARM64_CPU_SUSPEND=y -+ -+# -+# CPU Power Management -+# -+ -+# -+# CPU Idle -+# -+# CONFIG_CPU_IDLE is not set -+# CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED is not set -+ -+# -+# CPU Frequency scaling -+# -+CONFIG_CPU_FREQ=y -+CONFIG_CPU_FREQ_GOV_COMMON=y -+CONFIG_CPU_FREQ_STAT=y -+# CONFIG_CPU_FREQ_STAT_DETAILS is not set -+CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y -+# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set -+# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set -+# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set -+CONFIG_CPU_FREQ_GOV_PERFORMANCE=y -+CONFIG_CPU_FREQ_GOV_POWERSAVE=y -+CONFIG_CPU_FREQ_GOV_USERSPACE=y -+CONFIG_CPU_FREQ_GOV_ONDEMAND=y -+CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y -+# CONFIG_CPUFREQ_DT is not set -+ -+# -+# ARM CPU frequency scaling drivers -+# -+# CONFIG_ARM_KIRKWOOD_CPUFREQ is not set -+CONFIG_ARM64_ERRATUM_843419=y -+CONFIG_NET=y -+ -+# -+# Networking options -+# -+CONFIG_PACKET=y -+# CONFIG_PACKET_DIAG is not set -+CONFIG_UNIX=y -+# CONFIG_UNIX_DIAG is not set -+CONFIG_XFRM=y -+CONFIG_XFRM_ALGO=y -+# CONFIG_XFRM_USER is not set -+# CONFIG_XFRM_SUB_POLICY is not set -+# CONFIG_XFRM_MIGRATE is not set -+# CONFIG_XFRM_STATISTICS is not set -+CONFIG_XFRM_IPCOMP=y -+# CONFIG_NET_KEY is not set -+CONFIG_INET=y -+CONFIG_IP_MULTICAST=y -+CONFIG_IP_ADVANCED_ROUTER=y -+CONFIG_IP_FIB_TRIE_STATS=y -+CONFIG_IP_MULTIPLE_TABLES=y -+CONFIG_IP_ROUTE_MULTIPATH=y -+# CONFIG_IP_ROUTE_VERBOSE is not set -+CONFIG_IP_PNP=y -+CONFIG_IP_PNP_DHCP=y -+CONFIG_IP_PNP_BOOTP=y -+# CONFIG_IP_PNP_RARP is not set -+# CONFIG_NET_IPIP is not set -+# CONFIG_NET_IPGRE_DEMUX is not set -+CONFIG_NET_IP_TUNNEL=y -+CONFIG_IP_MROUTE=y -+# CONFIG_IP_MROUTE_MULTIPLE_TABLES is not set -+# CONFIG_IP_PIMSM_V1 is not set -+CONFIG_IP_PIMSM_V2=y -+# CONFIG_SYN_COOKIES is not set -+# CONFIG_NET_IPVTI is not set -+# CONFIG_NET_UDP_TUNNEL is not set -+# CONFIG_NET_FOU is not set -+# CONFIG_GENEVE is not set -+# CONFIG_INET_AH is not set -+# CONFIG_INET_ESP is not set -+# CONFIG_INET_IPCOMP is not set -+# CONFIG_INET_XFRM_TUNNEL is not set -+CONFIG_INET_TUNNEL=y -+CONFIG_INET_XFRM_MODE_TRANSPORT=y -+CONFIG_INET_XFRM_MODE_TUNNEL=y -+CONFIG_INET_XFRM_MODE_BEET=y -+# CONFIG_INET_LRO is not set -+CONFIG_INET_DIAG=y -+CONFIG_INET_TCP_DIAG=y -+# CONFIG_INET_UDP_DIAG is not set -+CONFIG_TCP_CONG_ADVANCED=y -+CONFIG_TCP_CONG_BIC=y -+CONFIG_TCP_CONG_CUBIC=y -+CONFIG_TCP_CONG_WESTWOOD=y -+CONFIG_TCP_CONG_HTCP=y -+# CONFIG_TCP_CONG_HSTCP is not set -+# CONFIG_TCP_CONG_HYBLA is not set -+# CONFIG_TCP_CONG_VEGAS is not set -+# CONFIG_TCP_CONG_SCALABLE is not set -+# CONFIG_TCP_CONG_LP is not set -+# CONFIG_TCP_CONG_VENO is not set -+# CONFIG_TCP_CONG_YEAH is not set -+# CONFIG_TCP_CONG_ILLINOIS is not set -+# CONFIG_TCP_CONG_DCTCP is not set -+# CONFIG_DEFAULT_BIC is not set -+CONFIG_DEFAULT_CUBIC=y -+# CONFIG_DEFAULT_HTCP is not set -+# CONFIG_DEFAULT_WESTWOOD is not set -+# CONFIG_DEFAULT_RENO is not set -+CONFIG_DEFAULT_TCP_CONG="cubic" -+# CONFIG_TCP_MD5SIG is not set -+CONFIG_IPV6=y -+CONFIG_IPV6_ROUTER_PREF=y -+CONFIG_IPV6_ROUTE_INFO=y -+CONFIG_IPV6_OPTIMISTIC_DAD=y -+CONFIG_INET6_AH=y -+CONFIG_INET6_ESP=y -+CONFIG_INET6_IPCOMP=y -+CONFIG_IPV6_MIP6=y -+CONFIG_INET6_XFRM_TUNNEL=y -+CONFIG_INET6_TUNNEL=y -+CONFIG_INET6_XFRM_MODE_TRANSPORT=y -+CONFIG_INET6_XFRM_MODE_TUNNEL=y -+CONFIG_INET6_XFRM_MODE_BEET=y -+CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=y -+# CONFIG_IPV6_VTI is not set -+CONFIG_IPV6_SIT=y -+# CONFIG_IPV6_SIT_6RD is not set -+CONFIG_IPV6_NDISC_NODETYPE=y -+CONFIG_IPV6_TUNNEL=y -+# CONFIG_IPV6_GRE is not set -+CONFIG_IPV6_MULTIPLE_TABLES=y -+CONFIG_IPV6_SUBTREES=y -+# CONFIG_IPV6_MROUTE is not set -+# CONFIG_NETLABEL is not set -+# CONFIG_NETWORK_SECMARK is not set -+CONFIG_NET_PTP_CLASSIFY=y -+# CONFIG_NETWORK_PHY_TIMESTAMPING is not set -+# CONFIG_NETFILTER is not set -+# CONFIG_IP_DCCP is not set -+# CONFIG_IP_SCTP is not set -+# CONFIG_RDS is not set -+# CONFIG_TIPC is not set -+# CONFIG_ATM is not set -+# CONFIG_L2TP is not set -+CONFIG_STP=m -+CONFIG_BRIDGE=m -+CONFIG_BRIDGE_IGMP_SNOOPING=y -+# CONFIG_BRIDGE_VLAN_FILTERING is not set -+CONFIG_HAVE_NET_DSA=y -+CONFIG_VLAN_8021Q=y -+# CONFIG_VLAN_8021Q_GVRP is not set -+# CONFIG_VLAN_8021Q_MVRP is not set -+# CONFIG_DECNET is not set -+CONFIG_LLC=m -+# CONFIG_LLC2 is not set -+# CONFIG_IPX is not set -+# CONFIG_ATALK is not set -+# CONFIG_X25 is not set -+# CONFIG_LAPB is not set -+# CONFIG_PHONET is not set -+# CONFIG_6LOWPAN is not set -+# CONFIG_IEEE802154 is not set -+CONFIG_NET_SCHED=y -+ -+# -+# Queueing/Scheduling -+# -+# CONFIG_NET_SCH_CBQ is not set -+# CONFIG_NET_SCH_HTB is not set -+# CONFIG_NET_SCH_HFSC is not set -+# CONFIG_NET_SCH_PRIO is not set -+# CONFIG_NET_SCH_MULTIQ is not set -+# CONFIG_NET_SCH_RED is not set -+# CONFIG_NET_SCH_SFB is not set -+# CONFIG_NET_SCH_SFQ is not set -+# CONFIG_NET_SCH_TEQL is not set -+# CONFIG_NET_SCH_TBF is not set -+# CONFIG_NET_SCH_GRED is not set -+# CONFIG_NET_SCH_DSMARK is not set -+# CONFIG_NET_SCH_NETEM is not set -+# CONFIG_NET_SCH_DRR is not set -+# CONFIG_NET_SCH_MQPRIO is not set -+# CONFIG_NET_SCH_CHOKE is not set -+# CONFIG_NET_SCH_QFQ is not set -+# CONFIG_NET_SCH_CODEL is not set -+# CONFIG_NET_SCH_FQ_CODEL is not set -+# CONFIG_NET_SCH_FQ is not set -+# CONFIG_NET_SCH_HHF is not set -+# CONFIG_NET_SCH_PIE is not set -+# CONFIG_NET_SCH_PLUG is not set -+ -+# -+# Classification -+# -+# CONFIG_NET_CLS_BASIC is not set -+# CONFIG_NET_CLS_TCINDEX is not set -+# CONFIG_NET_CLS_ROUTE4 is not set -+# CONFIG_NET_CLS_FW is not set -+# CONFIG_NET_CLS_U32 is not set -+# CONFIG_NET_CLS_RSVP is not set -+# CONFIG_NET_CLS_RSVP6 is not set -+# CONFIG_NET_CLS_FLOW is not set -+# CONFIG_NET_CLS_CGROUP is not set -+# CONFIG_NET_CLS_BPF is not set -+# CONFIG_NET_EMATCH is not set -+# CONFIG_NET_CLS_ACT is not set -+CONFIG_NET_SCH_FIFO=y -+CONFIG_DCB=y -+CONFIG_DNS_RESOLVER=y -+# CONFIG_BATMAN_ADV is not set -+# CONFIG_OPENVSWITCH is not set -+# CONFIG_VSOCKETS is not set -+# CONFIG_NETLINK_MMAP is not set -+# CONFIG_NETLINK_DIAG is not set -+# CONFIG_NET_MPLS_GSO is not set -+# CONFIG_HSR is not set -+CONFIG_RPS=y -+CONFIG_RFS_ACCEL=y -+CONFIG_XPS=y -+# CONFIG_CGROUP_NET_PRIO is not set -+# CONFIG_CGROUP_NET_CLASSID is not set -+CONFIG_NET_RX_BUSY_POLL=y -+CONFIG_BQL=y -+CONFIG_BPF_JIT=y -+CONFIG_NET_FLOW_LIMIT=y -+ -+# -+# Network testing -+# -+# CONFIG_NET_PKTGEN is not set -+# CONFIG_HAMRADIO is not set -+# CONFIG_CAN is not set -+# CONFIG_IRDA is not set -+# CONFIG_BT is not set -+# CONFIG_AF_RXRPC is not set -+CONFIG_FIB_RULES=y -+# CONFIG_WIRELESS is not set -+# CONFIG_WIMAX is not set -+# CONFIG_RFKILL is not set -+# CONFIG_RFKILL_REGULATOR is not set -+CONFIG_NET_9P=y -+CONFIG_NET_9P_VIRTIO=y -+# CONFIG_NET_9P_DEBUG is not set -+# CONFIG_CAIF is not set -+# CONFIG_CEPH_LIB is not set -+# CONFIG_NFC is not set -+CONFIG_HAVE_BPF_JIT=y -+ -+# -+# Device Drivers -+# -+ -+# -+# Generic Driver Options -+# -+CONFIG_UEVENT_HELPER=y -+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" -+CONFIG_DEVTMPFS=y -+CONFIG_DEVTMPFS_MOUNT=y -+CONFIG_STANDALONE=y -+CONFIG_PREVENT_FIRMWARE_BUILD=y -+CONFIG_FW_LOADER=y -+CONFIG_FIRMWARE_IN_KERNEL=y -+CONFIG_EXTRA_FIRMWARE="" -+# CONFIG_FW_LOADER_USER_HELPER_FALLBACK is not set -+CONFIG_ALLOW_DEV_COREDUMP=y -+# CONFIG_DEBUG_DRIVER is not set -+# CONFIG_DEBUG_DEVRES is not set -+# CONFIG_SYS_HYPERVISOR is not set -+# CONFIG_GENERIC_CPU_DEVICES is not set -+CONFIG_GENERIC_CPU_AUTOPROBE=y -+CONFIG_REGMAP=y -+CONFIG_REGMAP_MMIO=y -+# CONFIG_DMA_SHARED_BUFFER is not set -+CONFIG_DMA_CMA=y -+ -+# -+# Default contiguous memory area size: -+# -+CONFIG_CMA_SIZE_MBYTES=16 -+CONFIG_CMA_SIZE_SEL_MBYTES=y -+# CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set -+# CONFIG_CMA_SIZE_SEL_MIN is not set -+# CONFIG_CMA_SIZE_SEL_MAX is not set -+CONFIG_CMA_ALIGNMENT=8 -+ -+# -+# Bus devices -+# -+# CONFIG_ARM_CCN is not set -+CONFIG_VEXPRESS_CONFIG=y -+# CONFIG_CONNECTOR is not set -+CONFIG_MTD=y -+# CONFIG_MTD_TESTS is not set -+# CONFIG_MTD_REDBOOT_PARTS is not set -+CONFIG_MTD_CMDLINE_PARTS=y -+CONFIG_MTD_OF_PARTS=y -+# CONFIG_MTD_AR7_PARTS is not set -+ -+# -+# User Modules And Translation Layers -+# -+CONFIG_MTD_BLKDEVS=y -+CONFIG_MTD_BLOCK=y -+CONFIG_FTL=y -+# CONFIG_NFTL is not set -+# CONFIG_INFTL is not set -+# CONFIG_RFD_FTL is not set -+# CONFIG_SSFDC is not set -+# CONFIG_SM_FTL is not set -+# CONFIG_MTD_OOPS is not set -+# CONFIG_MTD_SWAP is not set -+ -+# -+# RAM/ROM/Flash chip drivers -+# -+CONFIG_MTD_CFI=y -+# CONFIG_MTD_JEDECPROBE is not set -+CONFIG_MTD_GEN_PROBE=y -+CONFIG_MTD_CFI_ADV_OPTIONS=y -+CONFIG_MTD_CFI_NOSWAP=y -+# CONFIG_MTD_CFI_BE_BYTE_SWAP is not set -+# CONFIG_MTD_CFI_LE_BYTE_SWAP is not set -+# CONFIG_MTD_CFI_GEOMETRY is not set -+CONFIG_MTD_MAP_BANK_WIDTH_1=y -+CONFIG_MTD_MAP_BANK_WIDTH_2=y -+CONFIG_MTD_MAP_BANK_WIDTH_4=y -+# CONFIG_MTD_MAP_BANK_WIDTH_8 is not set -+# CONFIG_MTD_MAP_BANK_WIDTH_16 is not set -+# CONFIG_MTD_MAP_BANK_WIDTH_32 is not set -+CONFIG_MTD_CFI_I1=y -+CONFIG_MTD_CFI_I2=y -+# CONFIG_MTD_CFI_I4 is not set -+# CONFIG_MTD_CFI_I8 is not set -+# CONFIG_MTD_OTP is not set -+CONFIG_MTD_CFI_INTELEXT=y -+CONFIG_MTD_CFI_AMDSTD=y -+CONFIG_MTD_CFI_STAA=y -+CONFIG_MTD_CFI_UTIL=y -+CONFIG_MTD_RAM=y -+# CONFIG_MTD_ROM is not set -+# CONFIG_MTD_ABSENT is not set -+ -+# -+# Mapping drivers for chip access -+# -+# CONFIG_MTD_COMPLEX_MAPPINGS is not set -+CONFIG_MTD_PHYSMAP=y -+# CONFIG_MTD_PHYSMAP_COMPAT is not set -+CONFIG_MTD_PHYSMAP_OF=y -+# CONFIG_MTD_INTEL_VR_NOR is not set -+CONFIG_MTD_PLATRAM=y -+ -+# -+# Self-contained MTD device drivers -+# -+# CONFIG_MTD_PMC551 is not set -+# CONFIG_MTD_DATAFLASH is not set -+CONFIG_MTD_M25P80=y -+# CONFIG_MTD_SST25L is not set -+# CONFIG_MTD_SLRAM is not set -+# CONFIG_MTD_PHRAM is not set -+# CONFIG_MTD_MTDRAM is not set -+# CONFIG_MTD_BLOCK2MTD is not set -+ -+# -+# Disk-On-Chip Device Drivers -+# -+# CONFIG_MTD_DOCG3 is not set -+CONFIG_MTD_NAND_ECC=y -+# CONFIG_MTD_NAND_ECC_SMC is not set -+CONFIG_MTD_NAND=y -+# CONFIG_MTD_NAND_ECC_BCH is not set -+# CONFIG_MTD_SM_COMMON is not set -+# CONFIG_MTD_NAND_DENALI is not set -+CONFIG_MTD_NAND_GPIO=y -+# CONFIG_MTD_NAND_OMAP_BCH_BUILD is not set -+CONFIG_MTD_NAND_IDS=y -+# CONFIG_MTD_NAND_RICOH is not set -+# CONFIG_MTD_NAND_DISKONCHIP is not set -+# CONFIG_MTD_NAND_DOCG4 is not set -+# CONFIG_MTD_NAND_CAFE is not set -+# CONFIG_MTD_NAND_NANDSIM is not set -+# CONFIG_MTD_NAND_PLATFORM is not set -+CONFIG_MTD_NAND_FSL_IFC=y -+# CONFIG_MTD_ONENAND is not set -+ -+# -+# LPDDR & LPDDR2 PCM memory drivers -+# -+# CONFIG_MTD_LPDDR is not set -+CONFIG_MTD_SPI_NOR=y -+CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y -+# CONFIG_MTD_UBI is not set -+CONFIG_DTC=y -+CONFIG_OF=y -+ -+# -+# Device Tree and Open Firmware support -+# -+# CONFIG_OF_SELFTEST is not set -+CONFIG_OF_FLATTREE=y -+CONFIG_OF_EARLY_FLATTREE=y -+CONFIG_OF_ADDRESS=y -+CONFIG_OF_ADDRESS_PCI=y -+CONFIG_OF_IRQ=y -+CONFIG_OF_NET=y -+CONFIG_OF_MDIO=y -+CONFIG_OF_PCI=y -+CONFIG_OF_PCI_IRQ=y -+CONFIG_OF_MTD=y -+CONFIG_OF_RESERVED_MEM=y -+# CONFIG_PARPORT is not set -+CONFIG_BLK_DEV=y -+# CONFIG_BLK_DEV_NULL_BLK is not set -+# CONFIG_BLK_DEV_PCIESSD_MTIP32XX is not set -+# CONFIG_BLK_CPQ_CISS_DA is not set -+# CONFIG_BLK_DEV_DAC960 is not set -+# CONFIG_BLK_DEV_UMEM is not set -+# CONFIG_BLK_DEV_COW_COMMON is not set -+CONFIG_BLK_DEV_LOOP=y -+CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 -+# CONFIG_BLK_DEV_CRYPTOLOOP is not set -+# CONFIG_BLK_DEV_DRBD is not set -+# CONFIG_BLK_DEV_NBD is not set -+# CONFIG_BLK_DEV_NVME is not set -+# CONFIG_BLK_DEV_SKD is not set -+# CONFIG_BLK_DEV_SX8 is not set -+CONFIG_BLK_DEV_RAM=y -+CONFIG_BLK_DEV_RAM_COUNT=16 -+CONFIG_BLK_DEV_RAM_SIZE=262144 -+# CONFIG_BLK_DEV_XIP is not set -+# CONFIG_CDROM_PKTCDVD is not set -+# CONFIG_ATA_OVER_ETH is not set -+CONFIG_VIRTIO_BLK=y -+# CONFIG_BLK_DEV_RBD is not set -+# CONFIG_BLK_DEV_RSXX is not set -+ -+# -+# Misc devices -+# -+# CONFIG_SENSORS_LIS3LV02D is not set -+# CONFIG_AD525X_DPOT is not set -+# CONFIG_DUMMY_IRQ is not set -+# CONFIG_PHANTOM is not set -+# CONFIG_SGI_IOC4 is not set -+# CONFIG_TIFM_CORE is not set -+# CONFIG_ICS932S401 is not set -+# CONFIG_ENCLOSURE_SERVICES is not set -+# CONFIG_HP_ILO is not set -+# CONFIG_APDS9802ALS is not set -+# CONFIG_ISL29003 is not set -+# CONFIG_ISL29020 is not set -+# CONFIG_SENSORS_TSL2550 is not set -+# CONFIG_SENSORS_BH1780 is not set -+# CONFIG_SENSORS_BH1770 is not set -+# CONFIG_SENSORS_APDS990X is not set -+# CONFIG_HMC6352 is not set -+# CONFIG_DS1682 is not set -+# CONFIG_TI_DAC7512 is not set -+# CONFIG_BMP085_I2C is not set -+# CONFIG_BMP085_SPI is not set -+# CONFIG_USB_SWITCH_FSA9480 is not set -+# CONFIG_LATTICE_ECP3_CONFIG is not set -+# CONFIG_SRAM is not set -+CONFIG_VEXPRESS_SYSCFG=y -+# CONFIG_C2PORT is not set -+ -+# -+# EEPROM support -+# -+CONFIG_EEPROM_AT24=y -+CONFIG_EEPROM_AT25=y -+# CONFIG_EEPROM_LEGACY is not set -+# CONFIG_EEPROM_MAX6875 is not set -+# CONFIG_EEPROM_93CX6 is not set -+# CONFIG_EEPROM_93XX46 is not set -+# CONFIG_CB710_CORE is not set -+ -+# -+# Texas Instruments shared transport line discipline -+# -+# CONFIG_TI_ST is not set -+# CONFIG_SENSORS_LIS3_SPI is not set -+# CONFIG_SENSORS_LIS3_I2C is not set -+ -+# -+# Altera FPGA firmware download module -+# -+# CONFIG_ALTERA_STAPL is not set -+ -+# -+# Intel MIC Bus Driver -+# -+ -+# -+# Intel MIC Host Driver -+# -+ -+# -+# Intel MIC Card Driver -+# -+# CONFIG_GENWQE is not set -+# CONFIG_ECHO is not set -+# CONFIG_CXL_BASE is not set -+ -+# -+# SCSI device support -+# -+CONFIG_SCSI_MOD=y -+# CONFIG_RAID_ATTRS is not set -+CONFIG_SCSI=y -+CONFIG_SCSI_DMA=y -+# CONFIG_SCSI_NETLINK is not set -+# CONFIG_SCSI_MQ_DEFAULT is not set -+CONFIG_SCSI_PROC_FS=y -+ -+# -+# SCSI support type (disk, tape, CD-ROM) -+# -+CONFIG_BLK_DEV_SD=y -+# CONFIG_CHR_DEV_ST is not set -+# CONFIG_CHR_DEV_OSST is not set -+# CONFIG_BLK_DEV_SR is not set -+# CONFIG_CHR_DEV_SG is not set -+# CONFIG_CHR_DEV_SCH is not set -+# CONFIG_SCSI_CONSTANTS is not set -+# CONFIG_SCSI_LOGGING is not set -+# CONFIG_SCSI_SCAN_ASYNC is not set -+ -+# -+# SCSI Transports -+# -+# CONFIG_SCSI_SPI_ATTRS is not set -+# CONFIG_SCSI_FC_ATTRS is not set -+# CONFIG_SCSI_ISCSI_ATTRS is not set -+# CONFIG_SCSI_SAS_ATTRS is not set -+# CONFIG_SCSI_SAS_LIBSAS is not set -+# CONFIG_SCSI_SRP_ATTRS is not set -+# CONFIG_SCSI_LOWLEVEL is not set -+# CONFIG_SCSI_LOWLEVEL_PCMCIA is not set -+# CONFIG_SCSI_DH is not set -+# CONFIG_SCSI_OSD_INITIATOR is not set -+CONFIG_HAVE_PATA_PLATFORM=y -+CONFIG_ATA=y -+# CONFIG_ATA_NONSTANDARD is not set -+CONFIG_ATA_VERBOSE_ERROR=y -+CONFIG_SATA_PMP=y -+ -+# -+# Controllers with non-SFF native interface -+# -+CONFIG_SATA_AHCI=y -+CONFIG_SATA_AHCI_PLATFORM=y -+CONFIG_AHCI_XGENE=y -+# CONFIG_SATA_INIC162X is not set -+# CONFIG_SATA_ACARD_AHCI is not set -+# CONFIG_SATA_SIL24 is not set -+CONFIG_ATA_SFF=y -+ -+# -+# SFF controllers with custom DMA interface -+# -+# CONFIG_PDC_ADMA is not set -+# CONFIG_SATA_QSTOR is not set -+# CONFIG_SATA_SX4 is not set -+CONFIG_ATA_BMDMA=y -+ -+# -+# SATA SFF controllers with BMDMA -+# -+# CONFIG_ATA_PIIX is not set -+# CONFIG_SATA_MV is not set -+# CONFIG_SATA_NV is not set -+# CONFIG_SATA_PROMISE is not set -+# CONFIG_SATA_SIL is not set -+# CONFIG_SATA_SIS is not set -+# CONFIG_SATA_SVW is not set -+# CONFIG_SATA_ULI is not set -+# CONFIG_SATA_VIA is not set -+# CONFIG_SATA_VITESSE is not set -+ -+# -+# PATA SFF controllers with BMDMA -+# -+# CONFIG_PATA_ALI is not set -+# CONFIG_PATA_AMD is not set -+# CONFIG_PATA_ARTOP is not set -+# CONFIG_PATA_ATIIXP is not set -+# CONFIG_PATA_ATP867X is not set -+# CONFIG_PATA_CMD64X is not set -+# CONFIG_PATA_CYPRESS is not set -+# CONFIG_PATA_EFAR is not set -+# CONFIG_PATA_HPT366 is not set -+# CONFIG_PATA_HPT37X is not set -+# CONFIG_PATA_HPT3X2N is not set -+# CONFIG_PATA_HPT3X3 is not set -+# CONFIG_PATA_IT8213 is not set -+# CONFIG_PATA_IT821X is not set -+# CONFIG_PATA_JMICRON is not set -+# CONFIG_PATA_MARVELL is not set -+# CONFIG_PATA_NETCELL is not set -+# CONFIG_PATA_NINJA32 is not set -+# CONFIG_PATA_NS87415 is not set -+# CONFIG_PATA_OLDPIIX is not set -+# CONFIG_PATA_OPTIDMA is not set -+# CONFIG_PATA_PDC2027X is not set -+# CONFIG_PATA_PDC_OLD is not set -+# CONFIG_PATA_RADISYS is not set -+# CONFIG_PATA_RDC is not set -+# CONFIG_PATA_SCH is not set -+# CONFIG_PATA_SERVERWORKS is not set -+# CONFIG_PATA_SIL680 is not set -+# CONFIG_PATA_SIS is not set -+# CONFIG_PATA_TOSHIBA is not set -+# CONFIG_PATA_TRIFLEX is not set -+# CONFIG_PATA_VIA is not set -+# CONFIG_PATA_WINBOND is not set -+ -+# -+# PIO-only SFF controllers -+# -+# CONFIG_PATA_CMD640_PCI is not set -+# CONFIG_PATA_MPIIX is not set -+# CONFIG_PATA_NS87410 is not set -+# CONFIG_PATA_OPTI is not set -+# CONFIG_PATA_PLATFORM is not set -+# CONFIG_PATA_RZ1000 is not set -+ -+# -+# Generic fallback / legacy drivers -+# -+# CONFIG_ATA_GENERIC is not set -+# CONFIG_PATA_LEGACY is not set -+# CONFIG_MD is not set -+# CONFIG_TARGET_CORE is not set -+# CONFIG_FUSION is not set -+ -+# -+# IEEE 1394 (FireWire) support -+# -+# CONFIG_FIREWIRE is not set -+# CONFIG_FIREWIRE_NOSY is not set -+# CONFIG_I2O is not set -+CONFIG_NETDEVICES=y -+CONFIG_MII=y -+CONFIG_NET_CORE=y -+# CONFIG_BONDING is not set -+# CONFIG_DUMMY is not set -+# CONFIG_EQUALIZER is not set -+# CONFIG_NET_FC is not set -+# CONFIG_NET_TEAM is not set -+CONFIG_MACVLAN=y -+# CONFIG_MACVTAP is not set -+# CONFIG_VXLAN is not set -+# CONFIG_NETCONSOLE is not set -+# CONFIG_NETPOLL is not set -+# CONFIG_NET_POLL_CONTROLLER is not set -+CONFIG_TUN=y -+# CONFIG_VETH is not set -+CONFIG_VIRTIO_NET=y -+# CONFIG_NLMON is not set -+# CONFIG_ARCNET is not set -+ -+# -+# CAIF transport drivers -+# -+ -+# -+# Distributed Switch Architecture drivers -+# -+# CONFIG_NET_DSA_MV88E6XXX is not set -+# CONFIG_NET_DSA_MV88E6060 is not set -+# CONFIG_NET_DSA_MV88E6XXX_NEED_PPU is not set -+# CONFIG_NET_DSA_MV88E6131 is not set -+# CONFIG_NET_DSA_MV88E6123_61_65 is not set -+# CONFIG_NET_DSA_MV88E6171 is not set -+# CONFIG_NET_DSA_BCM_SF2 is not set -+CONFIG_ETHERNET=y -+CONFIG_NET_VENDOR_3COM=y -+# CONFIG_VORTEX is not set -+# CONFIG_TYPHOON is not set -+CONFIG_NET_VENDOR_ADAPTEC=y -+# CONFIG_ADAPTEC_STARFIRE is not set -+CONFIG_NET_VENDOR_AGERE=y -+# CONFIG_ET131X is not set -+CONFIG_NET_VENDOR_ALTEON=y -+# CONFIG_ACENIC is not set -+# CONFIG_ALTERA_TSE is not set -+CONFIG_NET_VENDOR_AMD=y -+# CONFIG_AMD8111_ETH is not set -+# CONFIG_PCNET32 is not set -+# CONFIG_AMD_XGBE is not set -+CONFIG_NET_XGENE=y -+CONFIG_NET_VENDOR_ARC=y -+# CONFIG_ARC_EMAC is not set -+# CONFIG_EMAC_ROCKCHIP is not set -+CONFIG_NET_VENDOR_ATHEROS=y -+# CONFIG_ATL2 is not set -+# CONFIG_ATL1 is not set -+# CONFIG_ATL1E is not set -+# CONFIG_ATL1C is not set -+# CONFIG_ALX is not set -+CONFIG_NET_VENDOR_BROADCOM=y -+# CONFIG_B44 is not set -+# CONFIG_BCMGENET is not set -+# CONFIG_BNX2 is not set -+# CONFIG_CNIC is not set -+# CONFIG_TIGON3 is not set -+# CONFIG_BNX2X is not set -+# CONFIG_SYSTEMPORT is not set -+CONFIG_NET_VENDOR_BROCADE=y -+# CONFIG_BNA is not set -+CONFIG_NET_VENDOR_CHELSIO=y -+# CONFIG_CHELSIO_T1 is not set -+# CONFIG_CHELSIO_T3 is not set -+# CONFIG_CHELSIO_T4 is not set -+# CONFIG_CHELSIO_T4VF is not set -+CONFIG_NET_VENDOR_CISCO=y -+# CONFIG_ENIC is not set -+# CONFIG_DNET is not set -+CONFIG_NET_VENDOR_DEC=y -+# CONFIG_NET_TULIP is not set -+CONFIG_NET_VENDOR_DLINK=y -+# CONFIG_DL2K is not set -+# CONFIG_SUNDANCE is not set -+CONFIG_NET_VENDOR_EMULEX=y -+# CONFIG_BE2NET is not set -+CONFIG_NET_VENDOR_EXAR=y -+# CONFIG_S2IO is not set -+# CONFIG_VXGE is not set -+CONFIG_NET_VENDOR_FREESCALE=y -+# CONFIG_FSL_PQ_MDIO is not set -+CONFIG_FSL_XGMAC_MDIO=y -+CONFIG_NET_VENDOR_HP=y -+# CONFIG_HP100 is not set -+CONFIG_NET_VENDOR_INTEL=y -+# CONFIG_E100 is not set -+CONFIG_E1000=y -+CONFIG_E1000E=y -+# CONFIG_IGB is not set -+# CONFIG_IGBVF is not set -+# CONFIG_IXGB is not set -+# CONFIG_IXGBE is not set -+# CONFIG_IXGBEVF is not set -+# CONFIG_I40E is not set -+# CONFIG_I40EVF is not set -+# CONFIG_FM10K is not set -+CONFIG_NET_VENDOR_I825XX=y -+# CONFIG_IP1000 is not set -+# CONFIG_JME is not set -+CONFIG_NET_VENDOR_MARVELL=y -+# CONFIG_MVMDIO is not set -+# CONFIG_SKGE is not set -+# CONFIG_SKY2 is not set -+CONFIG_NET_VENDOR_MELLANOX=y -+# CONFIG_MLX4_EN is not set -+# CONFIG_MLX4_CORE is not set -+# CONFIG_MLX5_CORE is not set -+CONFIG_NET_VENDOR_MICREL=y -+# CONFIG_KS8842 is not set -+# CONFIG_KS8851 is not set -+# CONFIG_KS8851_MLL is not set -+# CONFIG_KSZ884X_PCI is not set -+CONFIG_NET_VENDOR_MICROCHIP=y -+# CONFIG_ENC28J60 is not set -+CONFIG_NET_VENDOR_MYRI=y -+# CONFIG_MYRI10GE is not set -+# CONFIG_FEALNX is not set -+CONFIG_NET_VENDOR_NATSEMI=y -+# CONFIG_NATSEMI is not set -+# CONFIG_NS83820 is not set -+CONFIG_NET_VENDOR_8390=y -+# CONFIG_NE2K_PCI is not set -+CONFIG_NET_VENDOR_NVIDIA=y -+# CONFIG_FORCEDETH is not set -+CONFIG_NET_VENDOR_OKI=y -+# CONFIG_ETHOC is not set -+CONFIG_NET_PACKET_ENGINE=y -+# CONFIG_HAMACHI is not set -+# CONFIG_YELLOWFIN is not set -+CONFIG_NET_VENDOR_QLOGIC=y -+# CONFIG_QLA3XXX is not set -+# CONFIG_QLCNIC is not set -+# CONFIG_QLGE is not set -+# CONFIG_NETXEN_NIC is not set -+CONFIG_NET_VENDOR_QUALCOMM=y -+# CONFIG_QCA7000 is not set -+CONFIG_NET_VENDOR_REALTEK=y -+# CONFIG_8139CP is not set -+# CONFIG_8139TOO is not set -+# CONFIG_R8169 is not set -+CONFIG_NET_VENDOR_RDC=y -+# CONFIG_R6040 is not set -+CONFIG_NET_VENDOR_SAMSUNG=y -+# CONFIG_SXGBE_ETH is not set -+CONFIG_NET_VENDOR_SEEQ=y -+CONFIG_NET_VENDOR_SILAN=y -+# CONFIG_SC92031 is not set -+CONFIG_NET_VENDOR_SIS=y -+# CONFIG_SIS900 is not set -+# CONFIG_SIS190 is not set -+# CONFIG_SFC is not set -+CONFIG_NET_VENDOR_SMSC=y -+CONFIG_SMC91X=y -+# CONFIG_EPIC100 is not set -+CONFIG_SMSC911X=y -+# CONFIG_SMSC911X_ARCH_HOOKS is not set -+# CONFIG_SMSC9420 is not set -+CONFIG_NET_VENDOR_STMICRO=y -+# CONFIG_STMMAC_ETH is not set -+CONFIG_NET_VENDOR_SUN=y -+# CONFIG_HAPPYMEAL is not set -+# CONFIG_SUNGEM is not set -+# CONFIG_CASSINI is not set -+# CONFIG_NIU is not set -+CONFIG_NET_VENDOR_TEHUTI=y -+# CONFIG_TEHUTI is not set -+CONFIG_NET_VENDOR_TI=y -+# CONFIG_TLAN is not set -+CONFIG_NET_VENDOR_VIA=y -+# CONFIG_VIA_RHINE is not set -+# CONFIG_VIA_VELOCITY is not set -+CONFIG_NET_VENDOR_WIZNET=y -+# CONFIG_WIZNET_W5100 is not set -+# CONFIG_WIZNET_W5300 is not set -+# CONFIG_FDDI is not set -+# CONFIG_HIPPI is not set -+CONFIG_PHYLIB=y -+ -+# -+# MII PHY device drivers -+# -+CONFIG_AQUANTIA_PHY=y -+# CONFIG_AT803X_PHY is not set -+# CONFIG_AMD_PHY is not set -+# CONFIG_AMD_XGBE_PHY is not set -+# CONFIG_MARVELL_PHY is not set -+# CONFIG_DAVICOM_PHY is not set -+# CONFIG_QSEMI_PHY is not set -+# CONFIG_LXT_PHY is not set -+# CONFIG_CICADA_PHY is not set -+CONFIG_VITESSE_PHY=y -+# CONFIG_TERANETICS_PHY is not set -+CONFIG_SMSC_PHY=y -+CONFIG_BROADCOM_PHY=y -+# CONFIG_BCM7XXX_PHY is not set -+# CONFIG_BCM87XX_PHY is not set -+# CONFIG_ICPLUS_PHY is not set -+CONFIG_REALTEK_PHY=y -+# CONFIG_NATIONAL_PHY is not set -+# CONFIG_STE10XP is not set -+# CONFIG_LSI_ET1011C_PHY is not set -+# CONFIG_MICREL_PHY is not set -+CONFIG_FIXED_PHY=y -+# CONFIG_MDIO_BITBANG is not set -+CONFIG_MDIO_BUS_MUX=y -+# CONFIG_MDIO_BUS_MUX_GPIO is not set -+CONFIG_MDIO_BUS_MUX_MMIOREG=y -+# CONFIG_FSL_10GBASE_KR is not set -+# CONFIG_MDIO_BCM_UNIMAC is not set -+# CONFIG_MICREL_KS8995MA is not set -+# CONFIG_PPP is not set -+# CONFIG_SLIP is not set -+CONFIG_USB_NET_DRIVERS=y -+# CONFIG_USB_CATC is not set -+# CONFIG_USB_KAWETH is not set -+# CONFIG_USB_PEGASUS is not set -+# CONFIG_USB_RTL8150 is not set -+# CONFIG_USB_RTL8152 is not set -+# CONFIG_USB_USBNET is not set -+# CONFIG_USB_IPHETH is not set -+# CONFIG_WLAN is not set -+ -+# -+# Enable WiMAX (Networking options) to see the WiMAX drivers -+# -+# CONFIG_WAN is not set -+# CONFIG_VMXNET3 is not set -+# CONFIG_ISDN is not set -+ -+# -+# Input device support -+# -+CONFIG_INPUT=y -+# CONFIG_INPUT_FF_MEMLESS is not set -+# CONFIG_INPUT_POLLDEV is not set -+# CONFIG_INPUT_SPARSEKMAP is not set -+# CONFIG_INPUT_MATRIXKMAP is not set -+ -+# -+# Userland interfaces -+# -+CONFIG_INPUT_MOUSEDEV=y -+CONFIG_INPUT_MOUSEDEV_PSAUX=y -+CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 -+CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 -+# CONFIG_INPUT_JOYDEV is not set -+CONFIG_INPUT_EVDEV=y -+# CONFIG_INPUT_EVBUG is not set -+ -+# -+# Input Device Drivers -+# -+CONFIG_INPUT_KEYBOARD=y -+# CONFIG_KEYBOARD_ADP5588 is not set -+# CONFIG_KEYBOARD_ADP5589 is not set -+CONFIG_KEYBOARD_ATKBD=y -+# CONFIG_KEYBOARD_QT1070 is not set -+# CONFIG_KEYBOARD_QT2160 is not set -+# CONFIG_KEYBOARD_LKKBD is not set -+# CONFIG_KEYBOARD_GPIO is not set -+# CONFIG_KEYBOARD_GPIO_POLLED is not set -+# CONFIG_KEYBOARD_TCA6416 is not set -+# CONFIG_KEYBOARD_TCA8418 is not set -+# CONFIG_KEYBOARD_MATRIX is not set -+# CONFIG_KEYBOARD_LM8333 is not set -+# CONFIG_KEYBOARD_MAX7359 is not set -+# CONFIG_KEYBOARD_MCS is not set -+# CONFIG_KEYBOARD_MPR121 is not set -+# CONFIG_KEYBOARD_NEWTON is not set -+# CONFIG_KEYBOARD_OPENCORES is not set -+# CONFIG_KEYBOARD_SAMSUNG is not set -+# CONFIG_KEYBOARD_STOWAWAY is not set -+# CONFIG_KEYBOARD_SUNKBD is not set -+# CONFIG_KEYBOARD_OMAP4 is not set -+# CONFIG_KEYBOARD_XTKBD is not set -+# CONFIG_KEYBOARD_CAP1106 is not set -+CONFIG_INPUT_MOUSE=y -+CONFIG_MOUSE_PS2=y -+CONFIG_MOUSE_PS2_ALPS=y -+CONFIG_MOUSE_PS2_LOGIPS2PP=y -+CONFIG_MOUSE_PS2_SYNAPTICS=y -+CONFIG_MOUSE_PS2_CYPRESS=y -+CONFIG_MOUSE_PS2_TRACKPOINT=y -+# CONFIG_MOUSE_PS2_ELANTECH is not set -+# CONFIG_MOUSE_PS2_SENTELIC is not set -+# CONFIG_MOUSE_PS2_TOUCHKIT is not set -+# CONFIG_MOUSE_SERIAL is not set -+# CONFIG_MOUSE_APPLETOUCH is not set -+# CONFIG_MOUSE_BCM5974 is not set -+# CONFIG_MOUSE_CYAPA is not set -+# CONFIG_MOUSE_VSXXXAA is not set -+# CONFIG_MOUSE_GPIO is not set -+# CONFIG_MOUSE_SYNAPTICS_I2C is not set -+# CONFIG_MOUSE_SYNAPTICS_USB is not set -+# CONFIG_INPUT_JOYSTICK is not set -+# CONFIG_INPUT_TABLET is not set -+# CONFIG_INPUT_TOUCHSCREEN is not set -+# CONFIG_INPUT_MISC is not set -+ -+# -+# Hardware I/O ports -+# -+CONFIG_SERIO=y -+# CONFIG_SERIO_SERPORT is not set -+CONFIG_SERIO_AMBAKMI=y -+# CONFIG_SERIO_PCIPS2 is not set -+CONFIG_SERIO_LIBPS2=y -+# CONFIG_SERIO_RAW is not set -+# CONFIG_SERIO_ALTERA_PS2 is not set -+# CONFIG_SERIO_PS2MULT is not set -+# CONFIG_SERIO_ARC_PS2 is not set -+# CONFIG_SERIO_APBPS2 is not set -+# CONFIG_GAMEPORT is not set -+ -+# -+# Character devices -+# -+CONFIG_TTY=y -+CONFIG_VT=y -+CONFIG_CONSOLE_TRANSLATIONS=y -+CONFIG_VT_CONSOLE=y -+CONFIG_VT_CONSOLE_SLEEP=y -+CONFIG_HW_CONSOLE=y -+CONFIG_VT_HW_CONSOLE_BINDING=y -+CONFIG_UNIX98_PTYS=y -+# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set -+CONFIG_LEGACY_PTYS=y -+CONFIG_LEGACY_PTY_COUNT=16 -+# CONFIG_SERIAL_NONSTANDARD is not set -+# CONFIG_NOZOMI is not set -+# CONFIG_N_GSM is not set -+# CONFIG_TRACE_SINK is not set -+CONFIG_DEVKMEM=y -+ -+# -+# Serial drivers -+# -+CONFIG_SERIAL_EARLYCON=y -+CONFIG_SERIAL_8250=y -+CONFIG_SERIAL_8250_DEPRECATED_OPTIONS=y -+CONFIG_SERIAL_8250_CONSOLE=y -+CONFIG_SERIAL_8250_DMA=y -+CONFIG_SERIAL_8250_PCI=y -+CONFIG_SERIAL_8250_NR_UARTS=4 -+CONFIG_SERIAL_8250_RUNTIME_UARTS=4 -+# CONFIG_SERIAL_8250_EXTENDED is not set -+# CONFIG_SERIAL_8250_DW is not set -+ -+# -+# Non-8250 serial port support -+# -+# CONFIG_SERIAL_AMBA_PL010 is not set -+CONFIG_SERIAL_AMBA_PL011=y -+CONFIG_SERIAL_AMBA_PL011_CONSOLE=y -+# CONFIG_SERIAL_EARLYCON_ARM_SEMIHOST is not set -+# CONFIG_SERIAL_MAX3100 is not set -+# CONFIG_SERIAL_MAX310X is not set -+# CONFIG_SERIAL_MFD_HSU is not set -+CONFIG_SERIAL_CORE=y -+CONFIG_SERIAL_CORE_CONSOLE=y -+# CONFIG_SERIAL_JSM is not set -+CONFIG_SERIAL_OF_PLATFORM=y -+# CONFIG_SERIAL_SCCNXP is not set -+# CONFIG_SERIAL_SC16IS7XX is not set -+# CONFIG_SERIAL_ALTERA_JTAGUART is not set -+# CONFIG_SERIAL_ALTERA_UART is not set -+# CONFIG_SERIAL_IFX6X60 is not set -+# CONFIG_SERIAL_XILINX_PS_UART is not set -+# CONFIG_SERIAL_ARC is not set -+# CONFIG_SERIAL_RP2 is not set -+# CONFIG_SERIAL_FSL_LPUART is not set -+CONFIG_HVC_DRIVER=y -+CONFIG_VIRTIO_CONSOLE=y -+# CONFIG_IPMI_HANDLER is not set -+CONFIG_HW_RANDOM=y -+# CONFIG_HW_RANDOM_TIMERIOMEM is not set -+# CONFIG_HW_RANDOM_VIRTIO is not set -+CONFIG_HW_RANDOM_XGENE=y -+# CONFIG_R3964 is not set -+# CONFIG_APPLICOM is not set -+ -+# -+# PCMCIA character devices -+# -+# CONFIG_RAW_DRIVER is not set -+# CONFIG_TCG_TPM is not set -+CONFIG_DEVPORT=y -+# CONFIG_XILLYBUS is not set -+ -+# -+# I2C support -+# -+CONFIG_I2C=y -+CONFIG_I2C_BOARDINFO=y -+CONFIG_I2C_COMPAT=y -+CONFIG_I2C_CHARDEV=y -+CONFIG_I2C_MUX=y -+ -+# -+# Multiplexer I2C Chip support -+# -+# CONFIG_I2C_ARB_GPIO_CHALLENGE is not set -+# CONFIG_I2C_MUX_GPIO is not set -+# CONFIG_I2C_MUX_PCA9541 is not set -+CONFIG_I2C_MUX_PCA954x=y -+CONFIG_I2C_HELPER_AUTO=y -+ -+# -+# I2C Hardware Bus support -+# -+ -+# -+# PC SMBus host controller drivers -+# -+# CONFIG_I2C_ALI1535 is not set -+# CONFIG_I2C_ALI1563 is not set -+# CONFIG_I2C_ALI15X3 is not set -+# CONFIG_I2C_AMD756 is not set -+# CONFIG_I2C_AMD8111 is not set -+# CONFIG_I2C_I801 is not set -+# CONFIG_I2C_ISCH is not set -+# CONFIG_I2C_PIIX4 is not set -+# CONFIG_I2C_NFORCE2 is not set -+# CONFIG_I2C_SIS5595 is not set -+# CONFIG_I2C_SIS630 is not set -+# CONFIG_I2C_SIS96X is not set -+# CONFIG_I2C_VIA is not set -+# CONFIG_I2C_VIAPRO is not set -+ -+# -+# I2C system bus drivers (mostly embedded / system-on-chip) -+# -+# CONFIG_I2C_CBUS_GPIO is not set -+# CONFIG_I2C_DESIGNWARE_PLATFORM is not set -+# CONFIG_I2C_DESIGNWARE_PCI is not set -+# CONFIG_I2C_GPIO is not set -+CONFIG_I2C_IMX=y -+# CONFIG_I2C_NOMADIK is not set -+# CONFIG_I2C_OCORES is not set -+# CONFIG_I2C_PCA_PLATFORM is not set -+# CONFIG_I2C_PXA_PCI is not set -+# CONFIG_I2C_RK3X is not set -+# CONFIG_I2C_SIMTEC is not set -+# CONFIG_I2C_VERSATILE is not set -+# CONFIG_I2C_XILINX is not set -+ -+# -+# External I2C/SMBus adapter drivers -+# -+# CONFIG_I2C_DIOLAN_U2C is not set -+# CONFIG_I2C_PARPORT_LIGHT is not set -+# CONFIG_I2C_ROBOTFUZZ_OSIF is not set -+# CONFIG_I2C_TAOS_EVM is not set -+# CONFIG_I2C_TINY_USB is not set -+ -+# -+# Other I2C/SMBus bus drivers -+# -+# CONFIG_I2C_STUB is not set -+# CONFIG_I2C_DEBUG_CORE is not set -+# CONFIG_I2C_DEBUG_ALGO is not set -+# CONFIG_I2C_DEBUG_BUS is not set -+CONFIG_SPI=y -+# CONFIG_SPI_DEBUG is not set -+CONFIG_SPI_MASTER=y -+ -+# -+# SPI Master Controller Drivers -+# -+# CONFIG_SPI_ALTERA is not set -+# CONFIG_SPI_BITBANG is not set -+# CONFIG_SPI_GPIO is not set -+# CONFIG_SPI_FSL_SPI is not set -+# CONFIG_SPI_OC_TINY is not set -+CONFIG_SPI_PL022=y -+# CONFIG_SPI_PXA2XX is not set -+# CONFIG_SPI_PXA2XX_PCI is not set -+# CONFIG_SPI_ROCKCHIP is not set -+# CONFIG_SPI_SC18IS602 is not set -+# CONFIG_SPI_XCOMM is not set -+# CONFIG_SPI_XILINX is not set -+# CONFIG_SPI_DESIGNWARE is not set -+ -+# -+# SPI Protocol Masters -+# -+# CONFIG_SPI_SPIDEV is not set -+# CONFIG_SPI_TLE62X0 is not set -+# CONFIG_SPMI is not set -+# CONFIG_HSI is not set -+ -+# -+# PPS support -+# -+CONFIG_PPS=y -+# CONFIG_PPS_DEBUG is not set -+# CONFIG_NTP_PPS is not set -+ -+# -+# PPS clients support -+# -+# CONFIG_PPS_CLIENT_KTIMER is not set -+# CONFIG_PPS_CLIENT_LDISC is not set -+# CONFIG_PPS_CLIENT_GPIO is not set -+ -+# -+# PPS generators support -+# -+ -+# -+# PTP clock support -+# -+CONFIG_PTP_1588_CLOCK=y -+ -+# -+# Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks. -+# -+CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y -+CONFIG_ARCH_REQUIRE_GPIOLIB=y -+CONFIG_GPIOLIB=y -+CONFIG_GPIO_DEVRES=y -+CONFIG_OF_GPIO=y -+CONFIG_GPIOLIB_IRQCHIP=y -+# CONFIG_DEBUG_GPIO is not set -+# CONFIG_GPIO_SYSFS is not set -+CONFIG_GPIO_GENERIC=y -+ -+# -+# Memory mapped GPIO drivers: -+# -+CONFIG_GPIO_GENERIC_PLATFORM=y -+# CONFIG_GPIO_DWAPB is not set -+CONFIG_GPIO_PL061=y -+# CONFIG_GPIO_SCH311X is not set -+# CONFIG_GPIO_SYSCON is not set -+CONFIG_GPIO_XGENE=y -+# CONFIG_GPIO_VX855 is not set -+# CONFIG_GPIO_GRGPIO is not set -+ -+# -+# I2C GPIO expanders: -+# -+# CONFIG_GPIO_MAX7300 is not set -+# CONFIG_GPIO_MAX732X is not set -+# CONFIG_GPIO_PCA953X is not set -+# CONFIG_GPIO_PCF857X is not set -+# CONFIG_GPIO_SX150X is not set -+# CONFIG_GPIO_ADP5588 is not set -+# CONFIG_GPIO_ADNP is not set -+ -+# -+# PCI GPIO expanders: -+# -+# CONFIG_GPIO_BT8XX is not set -+# CONFIG_GPIO_AMD8111 is not set -+# CONFIG_GPIO_ML_IOH is not set -+# CONFIG_GPIO_RDC321X is not set -+ -+# -+# SPI GPIO expanders: -+# -+# CONFIG_GPIO_MAX7301 is not set -+# CONFIG_GPIO_MCP23S08 is not set -+# CONFIG_GPIO_MC33880 is not set -+# CONFIG_GPIO_74X164 is not set -+ -+# -+# AC97 GPIO expanders: -+# -+ -+# -+# LPC GPIO expanders: -+# -+ -+# -+# MODULbus GPIO expanders: -+# -+ -+# -+# USB GPIO expanders: -+# -+# CONFIG_W1 is not set -+CONFIG_POWER_SUPPLY=y -+# CONFIG_POWER_SUPPLY_DEBUG is not set -+# CONFIG_PDA_POWER is not set -+# CONFIG_TEST_POWER is not set -+# CONFIG_BATTERY_DS2780 is not set -+# CONFIG_BATTERY_DS2781 is not set -+# CONFIG_BATTERY_DS2782 is not set -+# CONFIG_BATTERY_SBS is not set -+# CONFIG_BATTERY_BQ27x00 is not set -+# CONFIG_BATTERY_MAX17040 is not set -+# CONFIG_BATTERY_MAX17042 is not set -+# CONFIG_CHARGER_MAX8903 is not set -+# CONFIG_CHARGER_LP8727 is not set -+# CONFIG_CHARGER_GPIO is not set -+# CONFIG_CHARGER_MANAGER is not set -+# CONFIG_CHARGER_BQ2415X is not set -+# CONFIG_CHARGER_BQ24190 is not set -+# CONFIG_CHARGER_BQ24735 is not set -+# CONFIG_CHARGER_SMB347 is not set -+CONFIG_POWER_RESET=y -+# CONFIG_POWER_RESET_GPIO is not set -+# CONFIG_POWER_RESET_GPIO_RESTART is not set -+# CONFIG_POWER_RESET_LTC2952 is not set -+CONFIG_POWER_RESET_VEXPRESS=y -+# CONFIG_POWER_RESET_XGENE is not set -+# CONFIG_POWER_RESET_SYSCON is not set -+CONFIG_POWER_RESET_LAYERSCAPE=y -+# CONFIG_POWER_AVS is not set -+# CONFIG_HWMON is not set -+# CONFIG_THERMAL is not set -+# CONFIG_WATCHDOG is not set -+CONFIG_SSB_POSSIBLE=y -+ -+# -+# Sonics Silicon Backplane -+# -+# CONFIG_SSB is not set -+CONFIG_BCMA_POSSIBLE=y -+ -+# -+# Broadcom specific AMBA -+# -+# CONFIG_BCMA is not set -+ -+# -+# Multifunction device drivers -+# -+CONFIG_MFD_CORE=y -+# CONFIG_MFD_AS3711 is not set -+# CONFIG_MFD_AS3722 is not set -+# CONFIG_PMIC_ADP5520 is not set -+# CONFIG_MFD_AAT2870_CORE is not set -+# CONFIG_MFD_BCM590XX is not set -+# CONFIG_MFD_AXP20X is not set -+# CONFIG_MFD_CROS_EC is not set -+# CONFIG_PMIC_DA903X is not set -+# CONFIG_MFD_DA9052_SPI is not set -+# CONFIG_MFD_DA9052_I2C is not set -+# CONFIG_MFD_DA9055 is not set -+# CONFIG_MFD_DA9063 is not set -+# CONFIG_MFD_MC13XXX_SPI is not set -+# CONFIG_MFD_MC13XXX_I2C is not set -+# CONFIG_MFD_HI6421_PMIC is not set -+# CONFIG_HTC_PASIC3 is not set -+# CONFIG_HTC_I2CPLD is not set -+# CONFIG_LPC_ICH is not set -+# CONFIG_LPC_SCH is not set -+# CONFIG_INTEL_SOC_PMIC is not set -+# CONFIG_MFD_JANZ_CMODIO is not set -+# CONFIG_MFD_KEMPLD is not set -+# CONFIG_MFD_88PM800 is not set -+# CONFIG_MFD_88PM805 is not set -+# CONFIG_MFD_88PM860X is not set -+# CONFIG_MFD_MAX14577 is not set -+# CONFIG_MFD_MAX77686 is not set -+# CONFIG_MFD_MAX77693 is not set -+# CONFIG_MFD_MAX8907 is not set -+# CONFIG_MFD_MAX8925 is not set -+# CONFIG_MFD_MAX8997 is not set -+# CONFIG_MFD_MAX8998 is not set -+# CONFIG_MFD_MENF21BMC is not set -+# CONFIG_EZX_PCAP is not set -+# CONFIG_MFD_VIPERBOARD is not set -+# CONFIG_MFD_RETU is not set -+# CONFIG_MFD_PCF50633 is not set -+# CONFIG_MFD_RDC321X is not set -+# CONFIG_MFD_RTSX_PCI is not set -+# CONFIG_MFD_RTSX_USB is not set -+# CONFIG_MFD_RC5T583 is not set -+# CONFIG_MFD_RK808 is not set -+# CONFIG_MFD_RN5T618 is not set -+# CONFIG_MFD_SEC_CORE is not set -+# CONFIG_MFD_SI476X_CORE is not set -+# CONFIG_MFD_SM501 is not set -+# CONFIG_MFD_SMSC is not set -+# CONFIG_ABX500_CORE is not set -+# CONFIG_MFD_STMPE is not set -+CONFIG_MFD_SYSCON=y -+# CONFIG_MFD_TI_AM335X_TSCADC is not set -+# CONFIG_MFD_LP3943 is not set -+# CONFIG_MFD_LP8788 is not set -+# CONFIG_MFD_PALMAS is not set -+# CONFIG_TPS6105X is not set -+# CONFIG_TPS65010 is not set -+# CONFIG_TPS6507X is not set -+# CONFIG_MFD_TPS65090 is not set -+# CONFIG_MFD_TPS65217 is not set -+# CONFIG_MFD_TPS65218 is not set -+# CONFIG_MFD_TPS6586X is not set -+# CONFIG_MFD_TPS65910 is not set -+# CONFIG_MFD_TPS65912 is not set -+# CONFIG_MFD_TPS65912_I2C is not set -+# CONFIG_MFD_TPS65912_SPI is not set -+# CONFIG_MFD_TPS80031 is not set -+# CONFIG_TWL4030_CORE is not set -+# CONFIG_TWL6040_CORE is not set -+# CONFIG_MFD_WL1273_CORE is not set -+# CONFIG_MFD_LM3533 is not set -+# CONFIG_MFD_TC3589X is not set -+# CONFIG_MFD_TMIO is not set -+# CONFIG_MFD_VX855 is not set -+# CONFIG_MFD_ARIZONA_I2C is not set -+# CONFIG_MFD_ARIZONA_SPI is not set -+# CONFIG_MFD_WM8400 is not set -+# CONFIG_MFD_WM831X_I2C is not set -+# CONFIG_MFD_WM831X_SPI is not set -+# CONFIG_MFD_WM8350_I2C is not set -+# CONFIG_MFD_WM8994 is not set -+CONFIG_MFD_VEXPRESS_SYSREG=y -+CONFIG_REGULATOR=y -+# CONFIG_REGULATOR_DEBUG is not set -+CONFIG_REGULATOR_FIXED_VOLTAGE=y -+# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set -+# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set -+# CONFIG_REGULATOR_ACT8865 is not set -+# CONFIG_REGULATOR_AD5398 is not set -+# CONFIG_REGULATOR_ANATOP is not set -+# CONFIG_REGULATOR_DA9210 is not set -+# CONFIG_REGULATOR_DA9211 is not set -+# CONFIG_REGULATOR_FAN53555 is not set -+# CONFIG_REGULATOR_GPIO is not set -+# CONFIG_REGULATOR_ISL9305 is not set -+# CONFIG_REGULATOR_ISL6271A is not set -+# CONFIG_REGULATOR_LP3971 is not set -+# CONFIG_REGULATOR_LP3972 is not set -+# CONFIG_REGULATOR_LP872X is not set -+# CONFIG_REGULATOR_LP8755 is not set -+# CONFIG_REGULATOR_LTC3589 is not set -+# CONFIG_REGULATOR_MAX1586 is not set -+# CONFIG_REGULATOR_MAX8649 is not set -+# CONFIG_REGULATOR_MAX8660 is not set -+# CONFIG_REGULATOR_MAX8952 is not set -+# CONFIG_REGULATOR_MAX8973 is not set -+# CONFIG_REGULATOR_PFUZE100 is not set -+# CONFIG_REGULATOR_TPS51632 is not set -+# CONFIG_REGULATOR_TPS62360 is not set -+# CONFIG_REGULATOR_TPS65023 is not set -+# CONFIG_REGULATOR_TPS6507X is not set -+# CONFIG_REGULATOR_TPS6524X is not set -+# CONFIG_REGULATOR_VEXPRESS is not set -+# CONFIG_MEDIA_SUPPORT is not set -+ -+# -+# Graphics support -+# -+CONFIG_VGA_ARB=y -+CONFIG_VGA_ARB_MAX_GPUS=16 -+ -+# -+# Direct Rendering Manager -+# -+# CONFIG_DRM is not set -+ -+# -+# Frame buffer Devices -+# -+CONFIG_FB=y -+# CONFIG_FIRMWARE_EDID is not set -+CONFIG_FB_CMDLINE=y -+# CONFIG_FB_DDC is not set -+# CONFIG_FB_BOOT_VESA_SUPPORT is not set -+CONFIG_FB_CFB_FILLRECT=y -+CONFIG_FB_CFB_COPYAREA=y -+CONFIG_FB_CFB_IMAGEBLIT=y -+# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set -+# CONFIG_FB_SYS_FILLRECT is not set -+# CONFIG_FB_SYS_COPYAREA is not set -+# CONFIG_FB_SYS_IMAGEBLIT is not set -+# CONFIG_FB_FOREIGN_ENDIAN is not set -+# CONFIG_FB_SYS_FOPS is not set -+# CONFIG_FB_SVGALIB is not set -+# CONFIG_FB_MACMODES is not set -+# CONFIG_FB_BACKLIGHT is not set -+CONFIG_FB_MODE_HELPERS=y -+# CONFIG_FB_TILEBLITTING is not set -+ -+# -+# Frame buffer hardware drivers -+# -+# CONFIG_FB_CIRRUS is not set -+# CONFIG_FB_PM2 is not set -+CONFIG_FB_ARMCLCD=y -+# CONFIG_FB_CYBER2000 is not set -+# CONFIG_FB_ASILIANT is not set -+# CONFIG_FB_IMSTT is not set -+# CONFIG_FB_OPENCORES is not set -+# CONFIG_FB_S1D13XXX is not set -+# CONFIG_FB_NVIDIA is not set -+# CONFIG_FB_RIVA is not set -+# CONFIG_FB_I740 is not set -+# CONFIG_FB_MATROX is not set -+# CONFIG_FB_RADEON is not set -+# CONFIG_FB_ATY128 is not set -+# CONFIG_FB_ATY is not set -+# CONFIG_FB_S3 is not set -+# CONFIG_FB_SAVAGE is not set -+# CONFIG_FB_SIS is not set -+# CONFIG_FB_NEOMAGIC is not set -+# CONFIG_FB_KYRO is not set -+# CONFIG_FB_3DFX is not set -+# CONFIG_FB_VOODOO1 is not set -+# CONFIG_FB_VT8623 is not set -+# CONFIG_FB_TRIDENT is not set -+# CONFIG_FB_ARK is not set -+# CONFIG_FB_PM3 is not set -+# CONFIG_FB_CARMINE is not set -+# CONFIG_FB_SMSCUFX is not set -+# CONFIG_FB_UDL is not set -+# CONFIG_FB_VIRTUAL is not set -+# CONFIG_FB_METRONOME is not set -+# CONFIG_FB_MB862XX is not set -+# CONFIG_FB_BROADSHEET is not set -+# CONFIG_FB_AUO_K190X is not set -+# CONFIG_FB_SIMPLE is not set -+# CONFIG_FB_SSD1307 is not set -+# CONFIG_BACKLIGHT_LCD_SUPPORT is not set -+# CONFIG_VGASTATE is not set -+CONFIG_VIDEOMODE_HELPERS=y -+ -+# -+# Console display driver support -+# -+CONFIG_DUMMY_CONSOLE=y -+CONFIG_FRAMEBUFFER_CONSOLE=y -+# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set -+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set -+CONFIG_LOGO=y -+# CONFIG_LOGO_LINUX_MONO is not set -+# CONFIG_LOGO_LINUX_VGA16 is not set -+CONFIG_LOGO_LINUX_CLUT224=y -+# CONFIG_SOUND is not set -+ -+# -+# HID support -+# -+CONFIG_HID=y -+# CONFIG_HID_BATTERY_STRENGTH is not set -+# CONFIG_HIDRAW is not set -+# CONFIG_UHID is not set -+CONFIG_HID_GENERIC=y -+ -+# -+# Special HID drivers -+# -+CONFIG_HID_A4TECH=y -+# CONFIG_HID_ACRUX is not set -+CONFIG_HID_APPLE=y -+# CONFIG_HID_APPLEIR is not set -+# CONFIG_HID_AUREAL is not set -+CONFIG_HID_BELKIN=y -+CONFIG_HID_CHERRY=y -+CONFIG_HID_CHICONY=y -+# CONFIG_HID_CP2112 is not set -+CONFIG_HID_CYPRESS=y -+# CONFIG_HID_DRAGONRISE is not set -+# CONFIG_HID_EMS_FF is not set -+# CONFIG_HID_ELECOM is not set -+# CONFIG_HID_ELO is not set -+CONFIG_HID_EZKEY=y -+# CONFIG_HID_HOLTEK is not set -+# CONFIG_HID_HUION is not set -+# CONFIG_HID_KEYTOUCH is not set -+# CONFIG_HID_KYE is not set -+# CONFIG_HID_UCLOGIC is not set -+# CONFIG_HID_WALTOP is not set -+# CONFIG_HID_GYRATION is not set -+# CONFIG_HID_ICADE is not set -+# CONFIG_HID_TWINHAN is not set -+CONFIG_HID_KENSINGTON=y -+# CONFIG_HID_LCPOWER is not set -+# CONFIG_HID_LENOVO is not set -+CONFIG_HID_LOGITECH=y -+# CONFIG_HID_LOGITECH_HIDPP is not set -+# CONFIG_LOGITECH_FF is not set -+# CONFIG_LOGIRUMBLEPAD2_FF is not set -+# CONFIG_LOGIG940_FF is not set -+# CONFIG_LOGIWHEELS_FF is not set -+# CONFIG_HID_MAGICMOUSE is not set -+CONFIG_HID_MICROSOFT=y -+CONFIG_HID_MONTEREY=y -+# CONFIG_HID_MULTITOUCH is not set -+# CONFIG_HID_NTRIG is not set -+# CONFIG_HID_ORTEK is not set -+# CONFIG_HID_PANTHERLORD is not set -+# CONFIG_HID_PENMOUNT is not set -+# CONFIG_HID_PETALYNX is not set -+# CONFIG_HID_PICOLCD is not set -+# CONFIG_HID_PRIMAX is not set -+# CONFIG_HID_ROCCAT is not set -+# CONFIG_HID_SAITEK is not set -+# CONFIG_HID_SAMSUNG is not set -+# CONFIG_HID_SPEEDLINK is not set -+# CONFIG_HID_STEELSERIES is not set -+# CONFIG_HID_SUNPLUS is not set -+# CONFIG_HID_RMI is not set -+# CONFIG_HID_GREENASIA is not set -+# CONFIG_HID_SMARTJOYPLUS is not set -+# CONFIG_HID_TIVO is not set -+# CONFIG_HID_TOPSEED is not set -+# CONFIG_HID_THRUSTMASTER is not set -+# CONFIG_HID_WACOM is not set -+# CONFIG_HID_XINMO is not set -+# CONFIG_HID_ZEROPLUS is not set -+# CONFIG_HID_ZYDACRON is not set -+# CONFIG_HID_SENSOR_HUB is not set -+ -+# -+# USB HID support -+# -+CONFIG_USB_HID=y -+# CONFIG_HID_PID is not set -+# CONFIG_USB_HIDDEV is not set -+ -+# -+# I2C HID support -+# -+# CONFIG_I2C_HID is not set -+CONFIG_USB_OHCI_LITTLE_ENDIAN=y -+CONFIG_USB_SUPPORT=y -+CONFIG_USB_COMMON=y -+CONFIG_USB_ARCH_HAS_HCD=y -+CONFIG_USB=y -+# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set -+ -+# -+# Miscellaneous USB options -+# -+CONFIG_USB_DEFAULT_PERSIST=y -+# CONFIG_USB_DYNAMIC_MINORS is not set -+# CONFIG_USB_OTG_WHITELIST is not set -+# CONFIG_USB_OTG_FSM is not set -+# CONFIG_USB_MON is not set -+# CONFIG_USB_WUSB_CBAF is not set -+ -+# -+# USB Host Controller Drivers -+# -+# CONFIG_USB_C67X00_HCD is not set -+CONFIG_USB_XHCI_HCD=y -+CONFIG_USB_XHCI_PCI=y -+CONFIG_USB_XHCI_PLATFORM=y -+CONFIG_USB_EHCI_HCD=y -+# CONFIG_USB_EHCI_ROOT_HUB_TT is not set -+CONFIG_USB_EHCI_TT_NEWSCHED=y -+CONFIG_USB_EHCI_PCI=y -+CONFIG_USB_EHCI_HCD_PLATFORM=y -+# CONFIG_USB_OXU210HP_HCD is not set -+# CONFIG_USB_ISP116X_HCD is not set -+CONFIG_USB_ISP1760_HCD=y -+# CONFIG_USB_ISP1362_HCD is not set -+# CONFIG_USB_FUSBH200_HCD is not set -+# CONFIG_USB_FOTG210_HCD is not set -+# CONFIG_USB_MAX3421_HCD is not set -+CONFIG_USB_OHCI_HCD=y -+CONFIG_USB_OHCI_HCD_PCI=y -+CONFIG_USB_OHCI_HCD_PLATFORM=y -+# CONFIG_USB_UHCI_HCD is not set -+# CONFIG_USB_SL811_HCD is not set -+# CONFIG_USB_R8A66597_HCD is not set -+# CONFIG_USB_HCD_TEST_MODE is not set -+ -+# -+# USB Device Class drivers -+# -+# CONFIG_USB_ACM is not set -+# CONFIG_USB_PRINTER is not set -+# CONFIG_USB_WDM is not set -+# CONFIG_USB_TMC is not set -+ -+# -+# NOTE: USB_STORAGE depends on SCSI but BLK_DEV_SD may -+# -+ -+# -+# also be needed; see USB_STORAGE Help for more info -+# -+CONFIG_USB_STORAGE=y -+# CONFIG_USB_STORAGE_DEBUG is not set -+# CONFIG_USB_STORAGE_REALTEK is not set -+# CONFIG_USB_STORAGE_DATAFAB is not set -+# CONFIG_USB_STORAGE_FREECOM is not set -+# CONFIG_USB_STORAGE_ISD200 is not set -+# CONFIG_USB_STORAGE_USBAT is not set -+# CONFIG_USB_STORAGE_SDDR09 is not set -+# CONFIG_USB_STORAGE_SDDR55 is not set -+# CONFIG_USB_STORAGE_JUMPSHOT is not set -+# CONFIG_USB_STORAGE_ALAUDA is not set -+# CONFIG_USB_STORAGE_ONETOUCH is not set -+# CONFIG_USB_STORAGE_KARMA is not set -+# CONFIG_USB_STORAGE_CYPRESS_ATACB is not set -+# CONFIG_USB_STORAGE_ENE_UB6250 is not set -+# CONFIG_USB_UAS is not set -+ -+# -+# USB Imaging devices -+# -+# CONFIG_USB_MDC800 is not set -+# CONFIG_USB_MICROTEK is not set -+# CONFIG_USBIP_CORE is not set -+# CONFIG_USB_MUSB_HDRC is not set -+CONFIG_USB_DWC3=y -+CONFIG_USB_DWC3_HOST=y -+ -+# -+# Platform Glue Driver Support -+# -+CONFIG_USB_DWC3_PCI=y -+ -+# -+# Debugging features -+# -+# CONFIG_USB_DWC3_DEBUG is not set -+# CONFIG_DWC3_HOST_USB3_LPM_ENABLE is not set -+# CONFIG_USB_DWC2 is not set -+# CONFIG_USB_CHIPIDEA is not set -+ -+# -+# USB port drivers -+# -+# CONFIG_USB_SERIAL is not set -+ -+# -+# USB Miscellaneous drivers -+# -+# CONFIG_USB_EMI62 is not set -+# CONFIG_USB_EMI26 is not set -+# CONFIG_USB_ADUTUX is not set -+# CONFIG_USB_SEVSEG is not set -+# CONFIG_USB_RIO500 is not set -+# CONFIG_USB_LEGOTOWER is not set -+# CONFIG_USB_LCD is not set -+# CONFIG_USB_LED is not set -+# CONFIG_USB_CYPRESS_CY7C63 is not set -+# CONFIG_USB_CYTHERM is not set -+# CONFIG_USB_IDMOUSE is not set -+# CONFIG_USB_FTDI_ELAN is not set -+# CONFIG_USB_APPLEDISPLAY is not set -+# CONFIG_USB_SISUSBVGA is not set -+# CONFIG_USB_LD is not set -+# CONFIG_USB_TRANCEVIBRATOR is not set -+# CONFIG_USB_IOWARRIOR is not set -+# CONFIG_USB_TEST is not set -+# CONFIG_USB_EHSET_TEST_FIXTURE is not set -+# CONFIG_USB_ISIGHTFW is not set -+# CONFIG_USB_YUREX is not set -+# CONFIG_USB_EZUSB_FX2 is not set -+# CONFIG_USB_HSIC_USB3503 is not set -+# CONFIG_USB_LINK_LAYER_TEST is not set -+ -+# -+# USB Physical Layer drivers -+# -+# CONFIG_USB_PHY is not set -+# CONFIG_NOP_USB_XCEIV is not set -+# CONFIG_USB_GPIO_VBUS is not set -+# CONFIG_USB_ISP1301 is not set -+CONFIG_USB_ULPI=y -+# CONFIG_USB_GADGET is not set -+# CONFIG_UWB is not set -+CONFIG_MMC=y -+# CONFIG_MMC_DEBUG is not set -+# CONFIG_MMC_CLKGATE is not set -+ -+# -+# MMC/SD/SDIO Card Drivers -+# -+CONFIG_MMC_BLOCK=y -+CONFIG_MMC_BLOCK_MINORS=8 -+CONFIG_MMC_BLOCK_BOUNCE=y -+# CONFIG_SDIO_UART is not set -+# CONFIG_MMC_TEST is not set -+ -+# -+# MMC/SD/SDIO Host Controller Drivers -+# -+CONFIG_MMC_ARMMMCI=y -+CONFIG_MMC_SDHCI=y -+CONFIG_MMC_SDHCI_IO_ACCESSORS=y -+# CONFIG_MMC_SDHCI_PCI is not set -+CONFIG_MMC_SDHCI_PLTFM=y -+# CONFIG_MMC_SDHCI_OF_ARASAN is not set -+CONFIG_MMC_SDHCI_OF_ESDHC=y -+# CONFIG_MMC_SDHCI_PXAV3 is not set -+# CONFIG_MMC_SDHCI_PXAV2 is not set -+# CONFIG_MMC_TIFM_SD is not set -+CONFIG_MMC_SPI=y -+# CONFIG_MMC_CB710 is not set -+# CONFIG_MMC_VIA_SDMMC is not set -+# CONFIG_MMC_VUB300 is not set -+# CONFIG_MMC_USHC is not set -+# CONFIG_MMC_USDHI6ROL0 is not set -+# CONFIG_MEMSTICK is not set -+# CONFIG_NEW_LEDS is not set -+# CONFIG_ACCESSIBILITY is not set -+# CONFIG_INFINIBAND is not set -+CONFIG_RTC_LIB=y -+CONFIG_RTC_CLASS=y -+CONFIG_RTC_HCTOSYS=y -+CONFIG_RTC_SYSTOHC=y -+CONFIG_RTC_HCTOSYS_DEVICE="rtc0" -+# CONFIG_RTC_DEBUG is not set -+ -+# -+# RTC interfaces -+# -+CONFIG_RTC_INTF_SYSFS=y -+CONFIG_RTC_INTF_PROC=y -+CONFIG_RTC_INTF_DEV=y -+# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set -+# CONFIG_RTC_DRV_TEST is not set -+ -+# -+# I2C RTC drivers -+# -+# CONFIG_RTC_DRV_DS1307 is not set -+# CONFIG_RTC_DRV_DS1374 is not set -+# CONFIG_RTC_DRV_DS1672 is not set -+CONFIG_RTC_DRV_DS3232=y -+# CONFIG_RTC_DRV_HYM8563 is not set -+# CONFIG_RTC_DRV_MAX6900 is not set -+# CONFIG_RTC_DRV_RS5C372 is not set -+# CONFIG_RTC_DRV_ISL1208 is not set -+# CONFIG_RTC_DRV_ISL12022 is not set -+# CONFIG_RTC_DRV_ISL12057 is not set -+# CONFIG_RTC_DRV_X1205 is not set -+# CONFIG_RTC_DRV_PCF2127 is not set -+# CONFIG_RTC_DRV_PCF8523 is not set -+# CONFIG_RTC_DRV_PCF8563 is not set -+# CONFIG_RTC_DRV_PCF85063 is not set -+# CONFIG_RTC_DRV_PCF8583 is not set -+# CONFIG_RTC_DRV_M41T80 is not set -+# CONFIG_RTC_DRV_BQ32K is not set -+# CONFIG_RTC_DRV_S35390A is not set -+# CONFIG_RTC_DRV_FM3130 is not set -+# CONFIG_RTC_DRV_RX8581 is not set -+# CONFIG_RTC_DRV_RX8025 is not set -+# CONFIG_RTC_DRV_EM3027 is not set -+# CONFIG_RTC_DRV_RV3029C2 is not set -+ -+# -+# SPI RTC drivers -+# -+# CONFIG_RTC_DRV_M41T93 is not set -+# CONFIG_RTC_DRV_M41T94 is not set -+# CONFIG_RTC_DRV_DS1305 is not set -+# CONFIG_RTC_DRV_DS1343 is not set -+# CONFIG_RTC_DRV_DS1347 is not set -+# CONFIG_RTC_DRV_DS1390 is not set -+# CONFIG_RTC_DRV_MAX6902 is not set -+# CONFIG_RTC_DRV_R9701 is not set -+# CONFIG_RTC_DRV_RS5C348 is not set -+# CONFIG_RTC_DRV_DS3234 is not set -+# CONFIG_RTC_DRV_PCF2123 is not set -+# CONFIG_RTC_DRV_RX4581 is not set -+# CONFIG_RTC_DRV_MCP795 is not set -+ -+# -+# Platform RTC drivers -+# -+# CONFIG_RTC_DRV_DS1286 is not set -+# CONFIG_RTC_DRV_DS1511 is not set -+# CONFIG_RTC_DRV_DS1553 is not set -+# CONFIG_RTC_DRV_DS1742 is not set -+# CONFIG_RTC_DRV_DS2404 is not set -+CONFIG_RTC_DRV_EFI=y -+# CONFIG_RTC_DRV_STK17TA8 is not set -+# CONFIG_RTC_DRV_M48T86 is not set -+# CONFIG_RTC_DRV_M48T35 is not set -+# CONFIG_RTC_DRV_M48T59 is not set -+# CONFIG_RTC_DRV_MSM6242 is not set -+# CONFIG_RTC_DRV_BQ4802 is not set -+# CONFIG_RTC_DRV_RP5C01 is not set -+# CONFIG_RTC_DRV_V3020 is not set -+ -+# -+# on-CPU RTC drivers -+# -+# CONFIG_RTC_DRV_PL030 is not set -+# CONFIG_RTC_DRV_PL031 is not set -+# CONFIG_RTC_DRV_SNVS is not set -+CONFIG_RTC_DRV_XGENE=y -+ -+# -+# HID Sensor RTC drivers -+# -+# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set -+CONFIG_DMADEVICES=y -+# CONFIG_DMADEVICES_DEBUG is not set -+ -+# -+# DMA Devices -+# -+# CONFIG_AMBA_PL08X is not set -+# CONFIG_DW_DMAC_CORE is not set -+# CONFIG_DW_DMAC is not set -+# CONFIG_DW_DMAC_PCI is not set -+# CONFIG_PL330_DMA is not set -+# CONFIG_FSL_EDMA is not set -+CONFIG_DMA_ENGINE=y -+CONFIG_DMA_OF=y -+ -+# -+# DMA Clients -+# -+# CONFIG_ASYNC_TX_DMA is not set -+# CONFIG_DMATEST is not set -+# CONFIG_AUXDISPLAY is not set -+# CONFIG_UIO is not set -+# CONFIG_VFIO_IOMMU_TYPE1 is not set -+CONFIG_VFIO=y -+CONFIG_VFIO_PCI=y -+CONFIG_VFIO_FSL_MC=y -+# CONFIG_VIRT_DRIVERS is not set -+CONFIG_VIRTIO=y -+ -+# -+# Virtio drivers -+# -+CONFIG_VIRTIO_PCI=y -+CONFIG_VIRTIO_BALLOON=y -+CONFIG_VIRTIO_MMIO=y -+# CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES is not set -+ -+# -+# Microsoft Hyper-V guest support -+# -+CONFIG_STAGING=y -+# CONFIG_COMEDI is not set -+# CONFIG_RTS5208 is not set -+# CONFIG_FB_XGI is not set -+# CONFIG_BCM_WIMAX is not set -+# CONFIG_FT1000 is not set -+ -+# -+# Speakup console speech -+# -+# CONFIG_SPEAKUP is not set -+# CONFIG_TOUCHSCREEN_CLEARPAD_TM1217 is not set -+# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4 is not set -+# CONFIG_STAGING_MEDIA is not set -+ -+# -+# Android -+# -+# CONFIG_ANDROID is not set -+# CONFIG_USB_WPAN_HCD is not set -+# CONFIG_WIMAX_GDM72XX is not set -+# CONFIG_LTE_GDM724X is not set -+# CONFIG_MTD_SPINAND_MT29F is not set -+# CONFIG_LUSTRE_FS is not set -+# CONFIG_DGNC is not set -+# CONFIG_DGAP is not set -+# CONFIG_GS_FPGABOOT is not set -+CONFIG_FSL_MC_BUS=y -+CONFIG_FSL_MC_RESTOOL=y -+CONFIG_FSL_MC_DPIO=y -+# CONFIG_FSL_QBMAN_DEBUG is not set -+CONFIG_FSL_DPAA2=y -+CONFIG_FSL_DPAA2_ETH=y -+# CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE is not set -+CONFIG_FSL_DPAA2_MAC=y -+# CONFIG_FSL_DPAA2_MAC_NETDEVS is not set -+ -+# -+# SOC (System On Chip) specific Drivers -+# -+# CONFIG_SOC_TI is not set -+CONFIG_FSL_SOC_DRIVERS=y -+CONFIG_FSL_GUTS=y -+CONFIG_LS_SOC_DRIVERS=y -+CONFIG_CLKDEV_LOOKUP=y -+CONFIG_HAVE_CLK_PREPARE=y -+CONFIG_COMMON_CLK=y -+ -+# -+# Common Clock Framework -+# -+CONFIG_COMMON_CLK_VERSATILE=y -+CONFIG_CLK_SP810=y -+CONFIG_CLK_VEXPRESS_OSC=y -+# CONFIG_COMMON_CLK_SI5351 is not set -+# CONFIG_COMMON_CLK_SI570 is not set -+CONFIG_CLK_QORIQ=y -+CONFIG_COMMON_CLK_XGENE=y -+# CONFIG_COMMON_CLK_PXA is not set -+# CONFIG_COMMON_CLK_QCOM is not set -+ -+# -+# Hardware Spinlock drivers -+# -+ -+# -+# Clock Source drivers -+# -+CONFIG_CLKSRC_OF=y -+CONFIG_CLKSRC_MMIO=y -+CONFIG_ARM_ARCH_TIMER=y -+CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y -+# CONFIG_ATMEL_PIT is not set -+# CONFIG_SH_TIMER_CMT is not set -+# CONFIG_SH_TIMER_MTU2 is not set -+# CONFIG_SH_TIMER_TMU is not set -+# CONFIG_EM_TIMER_STI is not set -+CONFIG_CLKSRC_VERSATILE=y -+# CONFIG_MAILBOX is not set -+CONFIG_IOMMU_API=y -+CONFIG_IOMMU_SUPPORT=y -+ -+# -+# Generic IOMMU Pagetable Support -+# -+CONFIG_IOMMU_IO_PGTABLE=y -+CONFIG_IOMMU_IO_PGTABLE_LPAE=y -+# CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST is not set -+CONFIG_OF_IOMMU=y -+CONFIG_ARM_SMMU=y -+ -+# -+# Remoteproc drivers -+# -+# CONFIG_STE_MODEM_RPROC is not set -+ -+# -+# Rpmsg drivers -+# -+ -+# -+# SOC (System On Chip) specific Drivers -+# -+# CONFIG_PM_DEVFREQ is not set -+# CONFIG_EXTCON is not set -+CONFIG_MEMORY=y -+CONFIG_FSL_IFC=y -+# CONFIG_IIO is not set -+# CONFIG_VME_BUS is not set -+# CONFIG_PWM is not set -+CONFIG_IRQCHIP=y -+CONFIG_ARM_GIC=y -+CONFIG_ARM_GIC_V2M=y -+CONFIG_ARM_GIC_V3=y -+CONFIG_ARM_GIC_V3_ITS=y -+# CONFIG_IPACK_BUS is not set -+CONFIG_RESET_CONTROLLER=y -+# CONFIG_FMC is not set -+ -+# -+# PHY Subsystem -+# -+CONFIG_GENERIC_PHY=y -+# CONFIG_BCM_KONA_USB2_PHY is not set -+CONFIG_PHY_XGENE=y -+# CONFIG_POWERCAP is not set -+# CONFIG_MCB is not set -+CONFIG_RAS=y -+# CONFIG_THUNDERBOLT is not set -+ -+# -+# Firmware Drivers -+# -+# CONFIG_FIRMWARE_MEMMAP is not set -+ -+# -+# EFI (Extensible Firmware Interface) Support -+# -+# CONFIG_EFI_VARS is not set -+CONFIG_EFI_PARAMS_FROM_FDT=y -+CONFIG_EFI_RUNTIME_WRAPPERS=y -+CONFIG_EFI_ARMSTUB=y -+ -+# -+# File systems -+# -+CONFIG_DCACHE_WORD_ACCESS=y -+CONFIG_EXT2_FS=y -+# CONFIG_EXT2_FS_XATTR is not set -+# CONFIG_EXT2_FS_XIP is not set -+CONFIG_EXT3_FS=y -+# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -+# CONFIG_EXT3_FS_XATTR is not set -+CONFIG_EXT4_FS=y -+# CONFIG_EXT4_FS_POSIX_ACL is not set -+# CONFIG_EXT4_FS_SECURITY is not set -+# CONFIG_EXT4_DEBUG is not set -+CONFIG_JBD=y -+# CONFIG_JBD_DEBUG is not set -+CONFIG_JBD2=y -+# CONFIG_JBD2_DEBUG is not set -+CONFIG_FS_MBCACHE=y -+# CONFIG_REISERFS_FS is not set -+# CONFIG_JFS_FS is not set -+# CONFIG_XFS_FS is not set -+# CONFIG_GFS2_FS is not set -+# CONFIG_BTRFS_FS is not set -+# CONFIG_NILFS2_FS is not set -+# CONFIG_FS_POSIX_ACL is not set -+CONFIG_FILE_LOCKING=y -+CONFIG_FSNOTIFY=y -+CONFIG_DNOTIFY=y -+CONFIG_INOTIFY_USER=y -+CONFIG_FANOTIFY=y -+CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y -+# CONFIG_QUOTA is not set -+# CONFIG_QUOTACTL is not set -+# CONFIG_AUTOFS4_FS is not set -+CONFIG_FUSE_FS=y -+CONFIG_CUSE=y -+CONFIG_OVERLAY_FS=y -+ -+# -+# Caches -+# -+# CONFIG_FSCACHE is not set -+ -+# -+# CD-ROM/DVD Filesystems -+# -+# CONFIG_ISO9660_FS is not set -+# CONFIG_UDF_FS is not set -+ -+# -+# DOS/FAT/NT Filesystems -+# -+CONFIG_FAT_FS=y -+CONFIG_MSDOS_FS=y -+CONFIG_VFAT_FS=y -+CONFIG_FAT_DEFAULT_CODEPAGE=437 -+CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" -+# CONFIG_NTFS_FS is not set -+ -+# -+# Pseudo filesystems -+# -+CONFIG_PROC_FS=y -+# CONFIG_PROC_KCORE is not set -+CONFIG_PROC_SYSCTL=y -+CONFIG_PROC_PAGE_MONITOR=y -+CONFIG_KERNFS=y -+CONFIG_SYSFS=y -+CONFIG_TMPFS=y -+# CONFIG_TMPFS_POSIX_ACL is not set -+CONFIG_TMPFS_XATTR=y -+CONFIG_HUGETLBFS=y -+CONFIG_HUGETLB_PAGE=y -+# CONFIG_CONFIGFS_FS is not set -+CONFIG_MISC_FILESYSTEMS=y -+# CONFIG_ADFS_FS is not set -+# CONFIG_AFFS_FS is not set -+# CONFIG_ECRYPT_FS is not set -+# CONFIG_HFS_FS is not set -+# CONFIG_HFSPLUS_FS is not set -+# CONFIG_BEFS_FS is not set -+# CONFIG_BFS_FS is not set -+# CONFIG_EFS_FS is not set -+CONFIG_JFFS2_FS=y -+CONFIG_JFFS2_FS_DEBUG=0 -+CONFIG_JFFS2_FS_WRITEBUFFER=y -+# CONFIG_JFFS2_FS_WBUF_VERIFY is not set -+CONFIG_JFFS2_SUMMARY=y -+# CONFIG_JFFS2_FS_XATTR is not set -+# CONFIG_JFFS2_COMPRESSION_OPTIONS is not set -+CONFIG_JFFS2_ZLIB=y -+# CONFIG_JFFS2_LZO is not set -+CONFIG_JFFS2_RTIME=y -+# CONFIG_JFFS2_RUBIN is not set -+# CONFIG_LOGFS is not set -+# CONFIG_CRAMFS is not set -+CONFIG_SQUASHFS=y -+CONFIG_SQUASHFS_FILE_CACHE=y -+# CONFIG_SQUASHFS_FILE_DIRECT is not set -+CONFIG_SQUASHFS_DECOMP_SINGLE=y -+# CONFIG_SQUASHFS_DECOMP_MULTI is not set -+# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set -+CONFIG_SQUASHFS_XATTR=y -+CONFIG_SQUASHFS_ZLIB=y -+CONFIG_SQUASHFS_LZO=y -+CONFIG_SQUASHFS_XZ=y -+# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set -+# CONFIG_SQUASHFS_EMBEDDED is not set -+CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 -+# CONFIG_VXFS_FS is not set -+# CONFIG_MINIX_FS is not set -+# CONFIG_OMFS_FS is not set -+# CONFIG_HPFS_FS is not set -+# CONFIG_QNX4FS_FS is not set -+# CONFIG_QNX6FS_FS is not set -+# CONFIG_ROMFS_FS is not set -+# CONFIG_PSTORE is not set -+# CONFIG_SYSV_FS is not set -+# CONFIG_UFS_FS is not set -+# CONFIG_F2FS_FS is not set -+# CONFIG_EFIVAR_FS is not set -+# CONFIG_AUFS_FS is not set -+CONFIG_NETWORK_FILESYSTEMS=y -+CONFIG_NFS_FS=y -+CONFIG_NFS_V2=y -+CONFIG_NFS_V3=y -+# CONFIG_NFS_V3_ACL is not set -+CONFIG_NFS_V4=y -+# CONFIG_NFS_SWAP is not set -+# CONFIG_NFS_V4_1 is not set -+CONFIG_ROOT_NFS=y -+# CONFIG_NFS_USE_LEGACY_DNS is not set -+CONFIG_NFS_USE_KERNEL_DNS=y -+# CONFIG_NFSD is not set -+CONFIG_GRACE_PERIOD=y -+CONFIG_LOCKD=y -+CONFIG_LOCKD_V4=y -+CONFIG_NFS_COMMON=y -+CONFIG_SUNRPC=y -+CONFIG_SUNRPC_GSS=y -+# CONFIG_SUNRPC_DEBUG is not set -+# CONFIG_CEPH_FS is not set -+# CONFIG_CIFS is not set -+# CONFIG_NCP_FS is not set -+# CONFIG_CODA_FS is not set -+# CONFIG_AFS_FS is not set -+CONFIG_9P_FS=y -+# CONFIG_9P_FS_POSIX_ACL is not set -+# CONFIG_9P_FS_SECURITY is not set -+CONFIG_NLS=y -+CONFIG_NLS_DEFAULT="iso8859-1" -+CONFIG_NLS_CODEPAGE_437=y -+# CONFIG_NLS_CODEPAGE_737 is not set -+# CONFIG_NLS_CODEPAGE_775 is not set -+# CONFIG_NLS_CODEPAGE_850 is not set -+# CONFIG_NLS_CODEPAGE_852 is not set -+# CONFIG_NLS_CODEPAGE_855 is not set -+# CONFIG_NLS_CODEPAGE_857 is not set -+# CONFIG_NLS_CODEPAGE_860 is not set -+# CONFIG_NLS_CODEPAGE_861 is not set -+# CONFIG_NLS_CODEPAGE_862 is not set -+# CONFIG_NLS_CODEPAGE_863 is not set -+# CONFIG_NLS_CODEPAGE_864 is not set -+# CONFIG_NLS_CODEPAGE_865 is not set -+# CONFIG_NLS_CODEPAGE_866 is not set -+# CONFIG_NLS_CODEPAGE_869 is not set -+# CONFIG_NLS_CODEPAGE_936 is not set -+# CONFIG_NLS_CODEPAGE_950 is not set -+# CONFIG_NLS_CODEPAGE_932 is not set -+# CONFIG_NLS_CODEPAGE_949 is not set -+# CONFIG_NLS_CODEPAGE_874 is not set -+# CONFIG_NLS_ISO8859_8 is not set -+# CONFIG_NLS_CODEPAGE_1250 is not set -+# CONFIG_NLS_CODEPAGE_1251 is not set -+CONFIG_NLS_ASCII=y -+CONFIG_NLS_ISO8859_1=y -+# CONFIG_NLS_ISO8859_2 is not set -+# CONFIG_NLS_ISO8859_3 is not set -+# CONFIG_NLS_ISO8859_4 is not set -+# CONFIG_NLS_ISO8859_5 is not set -+# CONFIG_NLS_ISO8859_6 is not set -+# CONFIG_NLS_ISO8859_7 is not set -+# CONFIG_NLS_ISO8859_9 is not set -+# CONFIG_NLS_ISO8859_13 is not set -+# CONFIG_NLS_ISO8859_14 is not set -+# CONFIG_NLS_ISO8859_15 is not set -+# CONFIG_NLS_KOI8_R is not set -+# CONFIG_NLS_KOI8_U is not set -+# CONFIG_NLS_MAC_ROMAN is not set -+# CONFIG_NLS_MAC_CELTIC is not set -+# CONFIG_NLS_MAC_CENTEURO is not set -+# CONFIG_NLS_MAC_CROATIAN is not set -+# CONFIG_NLS_MAC_CYRILLIC is not set -+# CONFIG_NLS_MAC_GAELIC is not set -+# CONFIG_NLS_MAC_GREEK is not set -+# CONFIG_NLS_MAC_ICELAND is not set -+# CONFIG_NLS_MAC_INUIT is not set -+# CONFIG_NLS_MAC_ROMANIAN is not set -+# CONFIG_NLS_MAC_TURKISH is not set -+CONFIG_NLS_UTF8=y -+CONFIG_HAVE_KVM_IRQCHIP=y -+CONFIG_KVM_MMIO=y -+CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y -+CONFIG_VIRTUALIZATION=y -+CONFIG_KVM=y -+CONFIG_KVM_ARM_HOST=y -+CONFIG_KVM_ARM_MAX_VCPUS=8 -+CONFIG_KVM_ARM_VGIC=y -+CONFIG_KVM_ARM_TIMER=y -+ -+# -+# Kernel hacking -+# -+ -+# -+# printk and dmesg options -+# -+CONFIG_PRINTK_TIME=y -+CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 -+# CONFIG_BOOT_PRINTK_DELAY is not set -+# CONFIG_DYNAMIC_DEBUG is not set -+ -+# -+# Compile-time checks and compiler options -+# -+CONFIG_DEBUG_INFO=y -+# CONFIG_DEBUG_INFO_REDUCED is not set -+# CONFIG_DEBUG_INFO_SPLIT is not set -+# CONFIG_DEBUG_INFO_DWARF4 is not set -+CONFIG_ENABLE_WARN_DEPRECATED=y -+CONFIG_ENABLE_MUST_CHECK=y -+CONFIG_FRAME_WARN=2048 -+# CONFIG_STRIP_ASM_SYMS is not set -+# CONFIG_READABLE_ASM is not set -+# CONFIG_UNUSED_SYMBOLS is not set -+CONFIG_DEBUG_FS=y -+# CONFIG_HEADERS_CHECK is not set -+# CONFIG_DEBUG_SECTION_MISMATCH is not set -+CONFIG_ARCH_WANT_FRAME_POINTERS=y -+CONFIG_FRAME_POINTER=y -+# CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set -+CONFIG_MAGIC_SYSRQ=y -+CONFIG_MAGIC_SYSRQ_DEFAULT_ENABLE=0x1 -+CONFIG_DEBUG_KERNEL=y -+ -+# -+# Memory Debugging -+# -+# CONFIG_DEBUG_PAGEALLOC is not set -+# CONFIG_DEBUG_OBJECTS is not set -+# CONFIG_DEBUG_SLAB is not set -+CONFIG_HAVE_DEBUG_KMEMLEAK=y -+# CONFIG_DEBUG_KMEMLEAK is not set -+# CONFIG_DEBUG_STACK_USAGE is not set -+# CONFIG_DEBUG_VM is not set -+CONFIG_DEBUG_MEMORY_INIT=y -+# CONFIG_DEBUG_PER_CPU_MAPS is not set -+# CONFIG_DEBUG_SHIRQ is not set -+ -+# -+# Debug Lockups and Hangs -+# -+CONFIG_LOCKUP_DETECTOR=y -+# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set -+CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 -+CONFIG_DETECT_HUNG_TASK=y -+CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 -+# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set -+CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 -+# CONFIG_PANIC_ON_OOPS is not set -+CONFIG_PANIC_ON_OOPS_VALUE=0 -+CONFIG_PANIC_TIMEOUT=0 -+CONFIG_SCHED_DEBUG=y -+# CONFIG_SCHEDSTATS is not set -+# CONFIG_SCHED_STACK_END_CHECK is not set -+# CONFIG_TIMER_STATS is not set -+CONFIG_DEBUG_PREEMPT=y -+ -+# -+# Lock Debugging (spinlocks, mutexes, etc...) -+# -+# CONFIG_DEBUG_RT_MUTEXES is not set -+# CONFIG_DEBUG_SPINLOCK is not set -+# CONFIG_DEBUG_MUTEXES is not set -+# CONFIG_DEBUG_WW_MUTEX_SLOWPATH is not set -+# CONFIG_DEBUG_LOCK_ALLOC is not set -+# CONFIG_PROVE_LOCKING is not set -+# CONFIG_LOCK_STAT is not set -+# CONFIG_DEBUG_ATOMIC_SLEEP is not set -+# CONFIG_DEBUG_LOCKING_API_SELFTESTS is not set -+# CONFIG_LOCK_TORTURE_TEST is not set -+# CONFIG_STACKTRACE is not set -+# CONFIG_DEBUG_KOBJECT is not set -+CONFIG_HAVE_DEBUG_BUGVERBOSE=y -+CONFIG_DEBUG_BUGVERBOSE=y -+# CONFIG_DEBUG_LIST is not set -+# CONFIG_DEBUG_PI_LIST is not set -+# CONFIG_DEBUG_SG is not set -+# CONFIG_DEBUG_NOTIFIERS is not set -+# CONFIG_DEBUG_CREDENTIALS is not set -+ -+# -+# RCU Debugging -+# -+# CONFIG_SPARSE_RCU_POINTER is not set -+# CONFIG_TORTURE_TEST is not set -+# CONFIG_RCU_TORTURE_TEST is not set -+CONFIG_RCU_CPU_STALL_TIMEOUT=21 -+CONFIG_RCU_CPU_STALL_VERBOSE=y -+# CONFIG_RCU_CPU_STALL_INFO is not set -+# CONFIG_RCU_TRACE is not set -+# CONFIG_DEBUG_BLOCK_EXT_DEVT is not set -+# CONFIG_NOTIFIER_ERROR_INJECTION is not set -+# CONFIG_FAULT_INJECTION is not set -+CONFIG_HAVE_FUNCTION_TRACER=y -+CONFIG_HAVE_FUNCTION_GRAPH_TRACER=y -+CONFIG_HAVE_DYNAMIC_FTRACE=y -+CONFIG_HAVE_FTRACE_MCOUNT_RECORD=y -+CONFIG_HAVE_SYSCALL_TRACEPOINTS=y -+CONFIG_HAVE_C_RECORDMCOUNT=y -+CONFIG_TRACING_SUPPORT=y -+# CONFIG_FTRACE is not set -+ -+# -+# Runtime Testing -+# -+# CONFIG_LKDTM is not set -+# CONFIG_TEST_LIST_SORT is not set -+# CONFIG_BACKTRACE_SELF_TEST is not set -+# CONFIG_RBTREE_TEST is not set -+# CONFIG_INTERVAL_TREE_TEST is not set -+# CONFIG_PERCPU_TEST is not set -+# CONFIG_ATOMIC64_SELFTEST is not set -+# CONFIG_TEST_STRING_HELPERS is not set -+# CONFIG_TEST_KSTRTOX is not set -+# CONFIG_TEST_RHASHTABLE is not set -+# CONFIG_DMA_API_DEBUG is not set -+# CONFIG_TEST_LKM is not set -+# CONFIG_TEST_USER_COPY is not set -+# CONFIG_TEST_BPF is not set -+# CONFIG_TEST_FIRMWARE is not set -+# CONFIG_TEST_UDELAY is not set -+# CONFIG_SAMPLES is not set -+CONFIG_HAVE_ARCH_KGDB=y -+# CONFIG_KGDB is not set -+# CONFIG_STRICT_DEVMEM is not set -+CONFIG_PID_IN_CONTEXTIDR=y -+# CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET is not set -+# CONFIG_DEBUG_SET_MODULE_RONX is not set -+ -+# -+# Security options -+# -+CONFIG_KEYS=y -+# CONFIG_PERSISTENT_KEYRINGS is not set -+# CONFIG_BIG_KEYS is not set -+# CONFIG_ENCRYPTED_KEYS is not set -+# CONFIG_KEYS_DEBUG_PROC_KEYS is not set -+# CONFIG_SECURITY_DMESG_RESTRICT is not set -+CONFIG_SECURITY=y -+# CONFIG_SECURITYFS is not set -+# CONFIG_SECURITY_NETWORK is not set -+# CONFIG_SECURITY_PATH is not set -+# CONFIG_SECURITY_SMACK is not set -+# CONFIG_SECURITY_TOMOYO is not set -+# CONFIG_SECURITY_APPARMOR is not set -+# CONFIG_SECURITY_YAMA is not set -+CONFIG_INTEGRITY=y -+# CONFIG_INTEGRITY_SIGNATURE is not set -+CONFIG_INTEGRITY_AUDIT=y -+# CONFIG_IMA is not set -+# CONFIG_EVM is not set -+CONFIG_DEFAULT_SECURITY_DAC=y -+CONFIG_DEFAULT_SECURITY="" -+CONFIG_CRYPTO=y -+ -+# -+# Crypto core or helper -+# -+CONFIG_CRYPTO_ALGAPI=y -+CONFIG_CRYPTO_ALGAPI2=y -+CONFIG_CRYPTO_AEAD=y -+CONFIG_CRYPTO_AEAD2=y -+CONFIG_CRYPTO_BLKCIPHER=y -+CONFIG_CRYPTO_BLKCIPHER2=y -+CONFIG_CRYPTO_HASH=y -+CONFIG_CRYPTO_HASH2=y -+CONFIG_CRYPTO_RNG=y -+CONFIG_CRYPTO_RNG2=y -+CONFIG_CRYPTO_PCOMP2=y -+CONFIG_CRYPTO_MANAGER=y -+CONFIG_CRYPTO_MANAGER2=y -+# CONFIG_CRYPTO_USER is not set -+CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y -+# CONFIG_CRYPTO_GF128MUL is not set -+# CONFIG_CRYPTO_NULL is not set -+# CONFIG_CRYPTO_PCRYPT is not set -+CONFIG_CRYPTO_WORKQUEUE=y -+CONFIG_CRYPTO_CRYPTD=y -+# CONFIG_CRYPTO_MCRYPTD is not set -+CONFIG_CRYPTO_AUTHENC=y -+# CONFIG_CRYPTO_TEST is not set -+CONFIG_CRYPTO_ABLK_HELPER=y -+ -+# -+# Authenticated Encryption with Associated Data -+# -+# CONFIG_CRYPTO_CCM is not set -+# CONFIG_CRYPTO_GCM is not set -+# CONFIG_CRYPTO_SEQIV is not set -+ -+# -+# Block modes -+# -+CONFIG_CRYPTO_CBC=y -+# CONFIG_CRYPTO_CTR is not set -+# CONFIG_CRYPTO_CTS is not set -+# CONFIG_CRYPTO_ECB is not set -+# CONFIG_CRYPTO_LRW is not set -+# CONFIG_CRYPTO_PCBC is not set -+# CONFIG_CRYPTO_XTS is not set -+ -+# -+# Hash modes -+# -+# CONFIG_CRYPTO_CMAC is not set -+CONFIG_CRYPTO_HMAC=y -+# CONFIG_CRYPTO_XCBC is not set -+# CONFIG_CRYPTO_VMAC is not set -+ -+# -+# Digest -+# -+CONFIG_CRYPTO_CRC32C=y -+# CONFIG_CRYPTO_CRC32 is not set -+# CONFIG_CRYPTO_CRCT10DIF is not set -+# CONFIG_CRYPTO_GHASH is not set -+# CONFIG_CRYPTO_MD4 is not set -+CONFIG_CRYPTO_MD5=y -+# CONFIG_CRYPTO_MICHAEL_MIC is not set -+# CONFIG_CRYPTO_RMD128 is not set -+# CONFIG_CRYPTO_RMD160 is not set -+# CONFIG_CRYPTO_RMD256 is not set -+# CONFIG_CRYPTO_RMD320 is not set -+CONFIG_CRYPTO_SHA1=y -+# CONFIG_CRYPTO_SHA256 is not set -+# CONFIG_CRYPTO_SHA512 is not set -+# CONFIG_CRYPTO_TGR192 is not set -+# CONFIG_CRYPTO_WP512 is not set -+ -+# -+# Ciphers -+# -+CONFIG_CRYPTO_AES=y -+# CONFIG_CRYPTO_ANUBIS is not set -+# CONFIG_CRYPTO_ARC4 is not set -+# CONFIG_CRYPTO_BLOWFISH is not set -+# CONFIG_CRYPTO_CAMELLIA is not set -+# CONFIG_CRYPTO_CAST5 is not set -+# CONFIG_CRYPTO_CAST6 is not set -+CONFIG_CRYPTO_DES=y -+# CONFIG_CRYPTO_FCRYPT is not set -+# CONFIG_CRYPTO_KHAZAD is not set -+# CONFIG_CRYPTO_SALSA20 is not set -+# CONFIG_CRYPTO_SEED is not set -+# CONFIG_CRYPTO_SERPENT is not set -+# CONFIG_CRYPTO_TEA is not set -+# CONFIG_CRYPTO_TWOFISH is not set -+ -+# -+# Compression -+# -+CONFIG_CRYPTO_DEFLATE=y -+# CONFIG_CRYPTO_ZLIB is not set -+# CONFIG_CRYPTO_LZO is not set -+# CONFIG_CRYPTO_LZ4 is not set -+# CONFIG_CRYPTO_LZ4HC is not set -+ -+# -+# Random Number Generation -+# -+CONFIG_CRYPTO_ANSI_CPRNG=y -+# CONFIG_CRYPTO_DRBG_MENU is not set -+# CONFIG_CRYPTO_USER_API_HASH is not set -+# CONFIG_CRYPTO_USER_API_SKCIPHER is not set -+CONFIG_CRYPTO_HW=y -+# CONFIG_CRYPTO_DEV_CCP is not set -+# CONFIG_ASYMMETRIC_KEY_TYPE is not set -+CONFIG_ARM64_CRYPTO=y -+CONFIG_CRYPTO_SHA1_ARM64_CE=y -+CONFIG_CRYPTO_SHA2_ARM64_CE=y -+CONFIG_CRYPTO_GHASH_ARM64_CE=y -+CONFIG_CRYPTO_AES_ARM64_CE=y -+CONFIG_CRYPTO_AES_ARM64_CE_CCM=y -+CONFIG_CRYPTO_AES_ARM64_CE_BLK=y -+CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y -+# CONFIG_BINARY_PRINTF is not set -+ -+# -+# Library routines -+# -+CONFIG_BITREVERSE=y -+CONFIG_GENERIC_STRNCPY_FROM_USER=y -+CONFIG_GENERIC_STRNLEN_USER=y -+CONFIG_GENERIC_NET_UTILS=y -+CONFIG_GENERIC_PCI_IOMAP=y -+CONFIG_GENERIC_IOMAP=y -+CONFIG_GENERIC_IO=y -+CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y -+# CONFIG_CRC_CCITT is not set -+CONFIG_CRC16=y -+# CONFIG_CRC_T10DIF is not set -+CONFIG_CRC_ITU_T=y -+CONFIG_CRC32=y -+# CONFIG_CRC32_SELFTEST is not set -+CONFIG_CRC32_SLICEBY8=y -+# CONFIG_CRC32_SLICEBY4 is not set -+# CONFIG_CRC32_SARWATE is not set -+# CONFIG_CRC32_BIT is not set -+CONFIG_CRC7=y -+# CONFIG_LIBCRC32C is not set -+# CONFIG_CRC8 is not set -+CONFIG_AUDIT_GENERIC=y -+CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y -+CONFIG_AUDIT_COMPAT_GENERIC=y -+# CONFIG_RANDOM32_SELFTEST is not set -+CONFIG_ZLIB_INFLATE=y -+CONFIG_ZLIB_DEFLATE=y -+CONFIG_LZO_COMPRESS=y -+CONFIG_LZO_DECOMPRESS=y -+CONFIG_LZ4_DECOMPRESS=y -+CONFIG_XZ_DEC=y -+CONFIG_XZ_DEC_X86=y -+CONFIG_XZ_DEC_POWERPC=y -+CONFIG_XZ_DEC_IA64=y -+CONFIG_XZ_DEC_ARM=y -+CONFIG_XZ_DEC_ARMTHUMB=y -+CONFIG_XZ_DEC_SPARC=y -+CONFIG_XZ_DEC_BCJ=y -+# CONFIG_XZ_DEC_TEST is not set -+CONFIG_DECOMPRESS_GZIP=y -+CONFIG_DECOMPRESS_BZIP2=y -+CONFIG_DECOMPRESS_LZMA=y -+CONFIG_DECOMPRESS_XZ=y -+CONFIG_DECOMPRESS_LZO=y -+CONFIG_DECOMPRESS_LZ4=y -+CONFIG_GENERIC_ALLOCATOR=y -+CONFIG_ASSOCIATIVE_ARRAY=y -+CONFIG_HAS_IOMEM=y -+CONFIG_HAS_IOPORT_MAP=y -+CONFIG_HAS_DMA=y -+CONFIG_CPU_RMAP=y -+CONFIG_DQL=y -+CONFIG_GLOB=y -+# CONFIG_GLOB_SELFTEST is not set -+CONFIG_NLATTR=y -+CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE=y -+CONFIG_AVERAGE=y -+# CONFIG_CORDIC is not set -+# CONFIG_DDR is not set -+CONFIG_LIBFDT=y -+CONFIG_OID_REGISTRY=y -+CONFIG_UCS2_STRING=y -+CONFIG_FONT_SUPPORT=y -+# CONFIG_FONTS is not set -+CONFIG_FONT_8x8=y -+CONFIG_FONT_8x16=y -+CONFIG_ARCH_HAS_SG_CHAIN=y -diff --git a/arch/arm64/include/asm/device.h b/arch/arm64/include/asm/device.h -index cf98b36..243ef25 100644 ---- a/arch/arm64/include/asm/device.h -+++ b/arch/arm64/include/asm/device.h -@@ -21,6 +21,7 @@ struct dev_archdata { - #ifdef CONFIG_IOMMU_API - void *iommu; /* private IOMMU data */ - #endif -+ bool dma_coherent; - }; - - struct pdev_archdata { -diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h -index adeae3f..9ce3e68 100644 ---- a/arch/arm64/include/asm/dma-mapping.h -+++ b/arch/arm64/include/asm/dma-mapping.h -@@ -52,12 +52,20 @@ static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) - dev->archdata.dma_ops = ops; - } - --static inline int set_arch_dma_coherent_ops(struct device *dev) -+static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, -+ struct iommu_ops *iommu, bool coherent) - { -- set_dma_ops(dev, &coherent_swiotlb_dma_ops); -- return 0; -+ dev->archdata.dma_coherent = coherent; -+ if (coherent) -+ set_dma_ops(dev, &coherent_swiotlb_dma_ops); -+} -+#define arch_setup_dma_ops arch_setup_dma_ops -+ -+/* do not use this function in a driver */ -+static inline bool is_device_dma_coherent(struct device *dev) -+{ -+ return dev->archdata.dma_coherent; - } --#define set_arch_dma_coherent_ops set_arch_dma_coherent_ops - - #include - -diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h -index 75825b6..f58e31a 100644 ---- a/arch/arm64/include/asm/io.h -+++ b/arch/arm64/include/asm/io.h -@@ -249,6 +249,7 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size); - #define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) - #define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) - #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC)) -+#define ioremap_cache_ns(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NS)) - #define iounmap __iounmap - - #define ARCH_HAS_IOREMAP_WC -diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h -index 101a42b..8ec41e5 100644 ---- a/arch/arm64/include/asm/mmu_context.h -+++ b/arch/arm64/include/asm/mmu_context.h -@@ -64,6 +64,49 @@ static inline void cpu_set_reserved_ttbr0(void) - : "r" (ttbr)); - } - -+/* -+ * TCR.T0SZ value to use when the ID map is active. Usually equals -+ * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in -+ * physical memory, in which case it will be smaller. -+ */ -+extern u64 idmap_t0sz; -+ -+static inline bool __cpu_uses_extended_idmap(void) -+{ -+ return (!IS_ENABLED(CONFIG_ARM64_VA_BITS_48) && -+ unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS))); -+} -+ -+static inline void __cpu_set_tcr_t0sz(u64 t0sz) -+{ -+ unsigned long tcr; -+ -+ if (__cpu_uses_extended_idmap()) -+ asm volatile ( -+ " mrs %0, tcr_el1 ;" -+ " bfi %0, %1, %2, %3 ;" -+ " msr tcr_el1, %0 ;" -+ " isb" -+ : "=&r" (tcr) -+ : "r"(t0sz), "I"(TCR_T0SZ_OFFSET), "I"(TCR_TxSZ_WIDTH)); -+} -+ -+/* -+ * Set TCR.T0SZ to the value appropriate for activating the identity map. -+ */ -+static inline void cpu_set_idmap_tcr_t0sz(void) -+{ -+ __cpu_set_tcr_t0sz(idmap_t0sz); -+} -+ -+/* -+ * Set TCR.T0SZ to its default value (based on VA_BITS) -+ */ -+static inline void cpu_set_default_tcr_t0sz(void) -+{ -+ __cpu_set_tcr_t0sz(TCR_T0SZ(VA_BITS)); -+} -+ - static inline void switch_new_context(struct mm_struct *mm) - { - unsigned long flags; -diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h -index 22b1623..3d02b18 100644 ---- a/arch/arm64/include/asm/page.h -+++ b/arch/arm64/include/asm/page.h -@@ -33,7 +33,9 @@ - * image. Both require pgd, pud (4 levels only) and pmd tables to (section) - * map the kernel. With the 64K page configuration, swapper and idmap need to - * map to pte level. The swapper also maps the FDT (see __create_page_tables -- * for more information). -+ * for more information). Note that the number of ID map translation levels -+ * could be increased on the fly if system RAM is out of reach for the default -+ * VA range, so 3 pages are reserved in all cases. - */ - #ifdef CONFIG_ARM64_64K_PAGES - #define SWAPPER_PGTABLE_LEVELS (CONFIG_ARM64_PGTABLE_LEVELS) -@@ -42,7 +44,7 @@ - #endif - - #define SWAPPER_DIR_SIZE (SWAPPER_PGTABLE_LEVELS * PAGE_SIZE) --#define IDMAP_DIR_SIZE (SWAPPER_DIR_SIZE) -+#define IDMAP_DIR_SIZE (3 * PAGE_SIZE) - - #ifndef __ASSEMBLY__ - -diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h -index 88174e0..500b74e 100644 ---- a/arch/arm64/include/asm/pgtable-hwdef.h -+++ b/arch/arm64/include/asm/pgtable-hwdef.h -@@ -142,7 +142,12 @@ - /* - * TCR flags. - */ --#define TCR_TxSZ(x) (((UL(64) - (x)) << 16) | ((UL(64) - (x)) << 0)) -+#define TCR_T0SZ_OFFSET 0 -+#define TCR_T1SZ_OFFSET 16 -+#define TCR_T0SZ(x) ((UL(64) - (x)) << TCR_T0SZ_OFFSET) -+#define TCR_T1SZ(x) ((UL(64) - (x)) << TCR_T1SZ_OFFSET) -+#define TCR_TxSZ(x) (TCR_T0SZ(x) | TCR_T1SZ(x)) -+#define TCR_TxSZ_WIDTH 6 - #define TCR_IRGN_NC ((UL(0) << 8) | (UL(0) << 24)) - #define TCR_IRGN_WBWA ((UL(1) << 8) | (UL(1) << 24)) - #define TCR_IRGN_WT ((UL(2) << 8) | (UL(2) << 24)) -diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h -index 41a43bf..9b417b8 100644 ---- a/arch/arm64/include/asm/pgtable.h -+++ b/arch/arm64/include/asm/pgtable.h -@@ -65,6 +65,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val); - #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) - #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_NC)) - #define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL)) -+#define PROT_NORMAL_NS (PTE_TYPE_PAGE | PTE_AF | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL)) - - #define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE)) - #define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) -@@ -321,6 +322,13 @@ static inline int has_transparent_hugepage(void) - #define pgprot_device(prot) \ - __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_DEVICE_nGnRE) | PTE_PXN | PTE_UXN) - #define __HAVE_PHYS_MEM_ACCESS_PROT -+#define pgprot_cached_ns(prot) \ -+ __pgprot(pgprot_val(pgprot_cached(prot)) & ~PTE_SHARED) -+#define pgprot_cached(prot) \ -+ __pgprot_modify(prot, PTE_ATTRINDX_MASK, PTE_ATTRINDX(MT_NORMAL) | \ -+ PTE_PXN | PTE_UXN) -+ -+ - struct file; - extern pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn, - unsigned long size, pgprot_t vma_prot); -diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S -index 2877dd8..ca02239 100644 ---- a/arch/arm64/kernel/head.S -+++ b/arch/arm64/kernel/head.S -@@ -592,6 +592,43 @@ __create_page_tables: - mov x0, x25 // idmap_pg_dir - ldr x3, =KERNEL_START - add x3, x3, x28 // __pa(KERNEL_START) -+ -+#ifndef CONFIG_ARM64_VA_BITS_48 -+#define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3) -+#define EXTRA_PTRS (1 << (48 - EXTRA_SHIFT)) -+ -+ /* -+ * If VA_BITS < 48, it may be too small to allow for an ID mapping to be -+ * created that covers system RAM if that is located sufficiently high -+ * in the physical address space. So for the ID map, use an extended -+ * virtual range in that case, by configuring an additional translation -+ * level. -+ * First, we have to verify our assumption that the current value of -+ * VA_BITS was chosen such that all translation levels are fully -+ * utilised, and that lowering T0SZ will always result in an additional -+ * translation level to be configured. -+ */ -+#if VA_BITS != EXTRA_SHIFT -+#error "Mismatch between VA_BITS and page size/number of translation levels" -+#endif -+ -+ /* -+ * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the -+ * entire kernel image can be ID mapped. As T0SZ == (64 - #bits used), -+ * this number conveniently equals the number of leading zeroes in -+ * the physical address of KERNEL_END. -+ */ -+ adrp x5, KERNEL_END -+ clz x5, x5 -+ cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough? -+ b.ge 1f // .. then skip additional level -+ -+ str_l x5, idmap_t0sz, x6 -+ -+ create_table_entry x0, x3, EXTRA_SHIFT, EXTRA_PTRS, x5, x6 -+1: -+#endif -+ - create_pgd_entry x0, x3, x5, x6 - ldr x6, =KERNEL_END - mov x5, x3 // __pa(KERNEL_START) -diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c -index 0ef8789..5657692 100644 ---- a/arch/arm64/kernel/smp.c -+++ b/arch/arm64/kernel/smp.c -@@ -152,6 +152,7 @@ asmlinkage void secondary_start_kernel(void) - */ - cpu_set_reserved_ttbr0(); - flush_tlb_all(); -+ cpu_set_default_tcr_t0sz(); - - preempt_disable(); - trace_hardirqs_off(); -diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c -index f4f8b50..53bbff9 100644 ---- a/arch/arm64/mm/mmu.c -+++ b/arch/arm64/mm/mmu.c -@@ -37,6 +37,8 @@ - - #include "mm.h" - -+u64 idmap_t0sz = TCR_T0SZ(VA_BITS); -+ - /* - * Empty_zero_page is a special page that is used for zero-initialized data - * and COW. -@@ -369,6 +371,7 @@ void __init paging_init(void) - */ - cpu_set_reserved_ttbr0(); - flush_tlb_all(); -+ cpu_set_default_tcr_t0sz(); - } - - /* -@@ -376,8 +379,10 @@ void __init paging_init(void) - */ - void setup_mm_for_reboot(void) - { -- cpu_switch_mm(idmap_pg_dir, &init_mm); -+ cpu_set_reserved_ttbr0(); - flush_tlb_all(); -+ cpu_set_idmap_tcr_t0sz(); -+ cpu_switch_mm(idmap_pg_dir, &init_mm); - } - - /* -diff --git a/arch/arm64/mm/proc-macros.S b/arch/arm64/mm/proc-macros.S -index 005d29e..4c4d93c 100644 ---- a/arch/arm64/mm/proc-macros.S -+++ b/arch/arm64/mm/proc-macros.S -@@ -52,3 +52,13 @@ - mov \reg, #4 // bytes per word - lsl \reg, \reg, \tmp // actual cache line size - .endm -+ -+/* -+ * tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map -+ */ -+ .macro tcr_set_idmap_t0sz, valreg, tmpreg -+#ifndef CONFIG_ARM64_VA_BITS_48 -+ ldr_l \tmpreg, idmap_t0sz -+ bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH -+#endif -+ .endm -diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S -index 4e778b1..cbea872 100644 ---- a/arch/arm64/mm/proc.S -+++ b/arch/arm64/mm/proc.S -@@ -156,6 +156,7 @@ ENTRY(cpu_do_resume) - msr cpacr_el1, x6 - msr ttbr0_el1, x1 - msr ttbr1_el1, x7 -+ tcr_set_idmap_t0sz x8, x7 - msr tcr_el1, x8 - msr vbar_el1, x9 - msr mdscr_el1, x10 -@@ -233,6 +234,8 @@ ENTRY(__cpu_setup) - */ - ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ - TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0 -+ tcr_set_idmap_t0sz x10, x9 -+ - /* - * Read the PARange bits from ID_AA64MMFR0_EL1 and set the IPS bits in - * TCR_EL1. -diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c -index 8c3730c..8ae36ea 100644 ---- a/arch/ia64/kernel/msi_ia64.c -+++ b/arch/ia64/kernel/msi_ia64.c -@@ -35,7 +35,7 @@ static int ia64_set_msi_irq_affinity(struct irq_data *idata, - data |= MSI_DATA_VECTOR(irq_to_vector(irq)); - msg.data = data; - -- write_msi_msg(irq, &msg); -+ pci_write_msi_msg(irq, &msg); - cpumask_copy(idata->affinity, cpumask_of(cpu)); - - return 0; -@@ -71,7 +71,7 @@ int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) - MSI_DATA_DELIVERY_FIXED | - MSI_DATA_VECTOR(vector); - -- write_msi_msg(irq, &msg); -+ pci_write_msi_msg(irq, &msg); - irq_set_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq); - - return 0; -@@ -102,8 +102,8 @@ static int ia64_msi_retrigger_irq(struct irq_data *data) - */ - static struct irq_chip ia64_msi_chip = { - .name = "PCI-MSI", -- .irq_mask = mask_msi_irq, -- .irq_unmask = unmask_msi_irq, -+ .irq_mask = pci_msi_mask_irq, -+ .irq_unmask = pci_msi_unmask_irq, - .irq_ack = ia64_ack_msi_irq, - #ifdef CONFIG_SMP - .irq_set_affinity = ia64_set_msi_irq_affinity, -diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c -index 446e779..a0eb27b 100644 ---- a/arch/ia64/sn/kernel/msi_sn.c -+++ b/arch/ia64/sn/kernel/msi_sn.c -@@ -145,7 +145,7 @@ int sn_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *entry) - msg.data = 0x100 + irq; - - irq_set_msi_desc(irq, entry); -- write_msi_msg(irq, &msg); -+ pci_write_msi_msg(irq, &msg); - irq_set_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq); - - return 0; -@@ -205,7 +205,7 @@ static int sn_set_msi_irq_affinity(struct irq_data *data, - msg.address_hi = (u32)(bus_addr >> 32); - msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff); - -- write_msi_msg(irq, &msg); -+ pci_write_msi_msg(irq, &msg); - cpumask_copy(data->affinity, cpu_mask); - - return 0; -@@ -228,8 +228,8 @@ static int sn_msi_retrigger_irq(struct irq_data *data) - - static struct irq_chip sn_msi_chip = { - .name = "PCI-MSI", -- .irq_mask = mask_msi_irq, -- .irq_unmask = unmask_msi_irq, -+ .irq_mask = pci_msi_mask_irq, -+ .irq_unmask = pci_msi_unmask_irq, - .irq_ack = sn_ack_msi_irq, - #ifdef CONFIG_SMP - .irq_set_affinity = sn_set_msi_irq_affinity, -diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c -index 63bbe07..cffaaf4 100644 ---- a/arch/mips/pci/msi-octeon.c -+++ b/arch/mips/pci/msi-octeon.c -@@ -178,7 +178,7 @@ msi_irq_allocated: - pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); - - irq_set_msi_desc(irq, desc); -- write_msi_msg(irq, &msg); -+ pci_write_msi_msg(irq, &msg); - return 0; - } - -diff --git a/arch/mips/pci/msi-xlp.c b/arch/mips/pci/msi-xlp.c -index f7ac3ed..6a40f24 100644 ---- a/arch/mips/pci/msi-xlp.c -+++ b/arch/mips/pci/msi-xlp.c -@@ -217,7 +217,7 @@ static void xlp_msix_mask_ack(struct irq_data *d) - - msixvec = nlm_irq_msixvec(d->irq); - link = nlm_irq_msixlink(msixvec); -- mask_msi_irq(d); -+ pci_msi_mask_irq(d); - md = irq_data_get_irq_handler_data(d); - - /* Ack MSI on bridge */ -@@ -239,10 +239,10 @@ static void xlp_msix_mask_ack(struct irq_data *d) - - static struct irq_chip xlp_msix_chip = { - .name = "XLP-MSIX", -- .irq_enable = unmask_msi_irq, -- .irq_disable = mask_msi_irq, -+ .irq_enable = pci_msi_unmask_irq, -+ .irq_disable = pci_msi_mask_irq, - .irq_mask_ack = xlp_msix_mask_ack, -- .irq_unmask = unmask_msi_irq, -+ .irq_unmask = pci_msi_unmask_irq, - }; - - void arch_teardown_msi_irq(unsigned int irq) -@@ -345,7 +345,7 @@ static int xlp_setup_msi(uint64_t lnkbase, int node, int link, - if (ret < 0) - return ret; - -- write_msi_msg(xirq, &msg); -+ pci_write_msi_msg(xirq, &msg); - return 0; - } - -@@ -446,7 +446,7 @@ static int xlp_setup_msix(uint64_t lnkbase, int node, int link, - if (ret < 0) - return ret; - -- write_msi_msg(xirq, &msg); -+ pci_write_msi_msg(xirq, &msg); - return 0; - } - -diff --git a/arch/mips/pci/pci-xlr.c b/arch/mips/pci/pci-xlr.c -index 0dde803..26d2dab 100644 ---- a/arch/mips/pci/pci-xlr.c -+++ b/arch/mips/pci/pci-xlr.c -@@ -260,7 +260,7 @@ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) - if (ret < 0) - return ret; - -- write_msi_msg(irq, &msg); -+ pci_write_msi_msg(irq, &msg); - return 0; - } - #endif -diff --git a/arch/powerpc/include/asm/mpc85xx.h b/arch/powerpc/include/asm/mpc85xx.h -deleted file mode 100644 -index 3bef74a..0000000 ---- a/arch/powerpc/include/asm/mpc85xx.h -+++ /dev/null -@@ -1,94 +0,0 @@ --/* -- * MPC85xx cpu type detection -- * -- * Copyright 2011-2012 Freescale Semiconductor, Inc. -- * -- * This is free software; you can redistribute it and/or modify -- * it under the terms of the GNU General Public License as published by -- * the Free Software Foundation; either version 2 of the License, or -- * (at your option) any later version. -- */ -- --#ifndef __ASM_PPC_MPC85XX_H --#define __ASM_PPC_MPC85XX_H -- --#define SVR_REV(svr) ((svr) & 0xFF) /* SOC design resision */ --#define SVR_MAJ(svr) (((svr) >> 4) & 0xF) /* Major revision field*/ --#define SVR_MIN(svr) (((svr) >> 0) & 0xF) /* Minor revision field*/ -- --/* Some parts define SVR[0:23] as the SOC version */ --#define SVR_SOC_VER(svr) (((svr) >> 8) & 0xFFF7FF) /* SOC Version fields */ -- --#define SVR_8533 0x803400 --#define SVR_8535 0x803701 --#define SVR_8536 0x803700 --#define SVR_8540 0x803000 --#define SVR_8541 0x807200 --#define SVR_8543 0x803200 --#define SVR_8544 0x803401 --#define SVR_8545 0x803102 --#define SVR_8547 0x803101 --#define SVR_8548 0x803100 --#define SVR_8555 0x807100 --#define SVR_8560 0x807000 --#define SVR_8567 0x807501 --#define SVR_8568 0x807500 --#define SVR_8569 0x808000 --#define SVR_8572 0x80E000 --#define SVR_P1010 0x80F100 --#define SVR_P1011 0x80E500 --#define SVR_P1012 0x80E501 --#define SVR_P1013 0x80E700 --#define SVR_P1014 0x80F101 --#define SVR_P1017 0x80F700 --#define SVR_P1020 0x80E400 --#define SVR_P1021 0x80E401 --#define SVR_P1022 0x80E600 --#define SVR_P1023 0x80F600 --#define SVR_P1024 0x80E402 --#define SVR_P1025 0x80E403 --#define SVR_P2010 0x80E300 --#define SVR_P2020 0x80E200 --#define SVR_P2040 0x821000 --#define SVR_P2041 0x821001 --#define SVR_P3041 0x821103 --#define SVR_P4040 0x820100 --#define SVR_P4080 0x820000 --#define SVR_P5010 0x822100 --#define SVR_P5020 0x822000 --#define SVR_P5021 0X820500 --#define SVR_P5040 0x820400 --#define SVR_T4240 0x824000 --#define SVR_T4120 0x824001 --#define SVR_T4160 0x824100 --#define SVR_C291 0x850000 --#define SVR_C292 0x850020 --#define SVR_C293 0x850030 --#define SVR_B4860 0X868000 --#define SVR_G4860 0x868001 --#define SVR_G4060 0x868003 --#define SVR_B4440 0x868100 --#define SVR_G4440 0x868101 --#define SVR_B4420 0x868102 --#define SVR_B4220 0x868103 --#define SVR_T1040 0x852000 --#define SVR_T1041 0x852001 --#define SVR_T1042 0x852002 --#define SVR_T1020 0x852100 --#define SVR_T1021 0x852101 --#define SVR_T1022 0x852102 --#define SVR_T2080 0x853000 --#define SVR_T2081 0x853100 -- --#define SVR_8610 0x80A000 --#define SVR_8641 0x809000 --#define SVR_8641D 0x809001 -- --#define SVR_9130 0x860001 --#define SVR_9131 0x860000 --#define SVR_9132 0x861000 --#define SVR_9232 0x861400 -- --#define SVR_Unknown 0xFFFFFF -- --#endif -diff --git a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c -index ca3a062..11090ab 100644 ---- a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c -+++ b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c -@@ -123,7 +123,8 @@ cpld_pic_cascade(unsigned int irq, struct irq_desc *desc) - } - - static int --cpld_pic_host_match(struct irq_domain *h, struct device_node *node) -+cpld_pic_host_match(struct irq_domain *h, struct device_node *node, -+ enum irq_domain_bus_token bus_token) - { - return cpld_pic_node == node; - } -diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c -index a392e94..f0be439 100644 ---- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c -+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c -@@ -34,6 +34,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -51,7 +52,6 @@ - #include - #include - #include --#include - #include "smp.h" - - #include "mpc85xx.h" -diff --git a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c -index e358bed..50dcc00 100644 ---- a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c -+++ b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c -@@ -17,6 +17,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -27,7 +28,6 @@ - #include - #include - #include --#include - - #include - #include -diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c -index 6ac986d..371df82 100644 ---- a/arch/powerpc/platforms/85xx/p1022_ds.c -+++ b/arch/powerpc/platforms/85xx/p1022_ds.c -@@ -16,6 +16,7 @@ - * kind, whether express or implied. - */ - -+#include - #include - #include - #include -@@ -25,7 +26,6 @@ - #include - #include - #include --#include - #include - #include "smp.h" - -diff --git a/arch/powerpc/platforms/85xx/p1022_rdk.c b/arch/powerpc/platforms/85xx/p1022_rdk.c -index 7a180f0..4f8fc5f 100644 ---- a/arch/powerpc/platforms/85xx/p1022_rdk.c -+++ b/arch/powerpc/platforms/85xx/p1022_rdk.c -@@ -12,6 +12,7 @@ - * kind, whether express or implied. - */ - -+#include - #include - #include - #include -@@ -21,7 +22,6 @@ - #include - #include - #include --#include - #include "smp.h" - - #include "mpc85xx.h" -diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c -index d7c1e69..3956455 100644 ---- a/arch/powerpc/platforms/85xx/smp.c -+++ b/arch/powerpc/platforms/85xx/smp.c -@@ -19,6 +19,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -26,7 +27,6 @@ - #include - #include - #include --#include - #include - #include - -diff --git a/arch/powerpc/platforms/85xx/twr_p102x.c b/arch/powerpc/platforms/85xx/twr_p102x.c -index 1eadb6d..2799120 100644 ---- a/arch/powerpc/platforms/85xx/twr_p102x.c -+++ b/arch/powerpc/platforms/85xx/twr_p102x.c -@@ -15,6 +15,7 @@ - #include - #include - #include -+#include - #include - #include - -@@ -23,7 +24,6 @@ - #include - #include - #include --#include - - #include - #include -diff --git a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c -index 55413a5..437a9c3 100644 ---- a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c -+++ b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c -@@ -24,6 +24,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -38,7 +39,6 @@ - #include - #include - #include --#include - - #include "mpc86xx.h" - -diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c -index 862b327..0883994 100644 ---- a/arch/powerpc/platforms/cell/axon_msi.c -+++ b/arch/powerpc/platforms/cell/axon_msi.c -@@ -279,7 +279,7 @@ static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) - - irq_set_msi_desc(virq, entry); - msg.data = virq; -- write_msi_msg(virq, &msg); -+ pci_write_msi_msg(virq, &msg); - } - - return 0; -@@ -301,9 +301,9 @@ static void axon_msi_teardown_msi_irqs(struct pci_dev *dev) - } - - static struct irq_chip msic_irq_chip = { -- .irq_mask = mask_msi_irq, -- .irq_unmask = unmask_msi_irq, -- .irq_shutdown = mask_msi_irq, -+ .irq_mask = pci_msi_mask_irq, -+ .irq_unmask = pci_msi_unmask_irq, -+ .irq_shutdown = pci_msi_mask_irq, - .name = "AXON-MSI", - }; - -diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c -index 28e558d..109d236 100644 ---- a/arch/powerpc/platforms/cell/interrupt.c -+++ b/arch/powerpc/platforms/cell/interrupt.c -@@ -222,7 +222,8 @@ void iic_request_IPIs(void) - #endif /* CONFIG_SMP */ - - --static int iic_host_match(struct irq_domain *h, struct device_node *node) -+static int iic_host_match(struct irq_domain *h, struct device_node *node, -+ enum irq_domain_bus_token bus_token) - { - return of_device_is_compatible(node, - "IBM,CBEA-Internal-Interrupt-Controller"); -diff --git a/arch/powerpc/platforms/embedded6xx/flipper-pic.c b/arch/powerpc/platforms/embedded6xx/flipper-pic.c -index 4cde8e7..b7866e0 100644 ---- a/arch/powerpc/platforms/embedded6xx/flipper-pic.c -+++ b/arch/powerpc/platforms/embedded6xx/flipper-pic.c -@@ -108,7 +108,8 @@ static int flipper_pic_map(struct irq_domain *h, unsigned int virq, - return 0; - } - --static int flipper_pic_match(struct irq_domain *h, struct device_node *np) -+static int flipper_pic_match(struct irq_domain *h, struct device_node *np, -+ enum irq_domain_bus_token bus_token) - { - return 1; - } -diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c -index 4c24bf6..246cab4 100644 ---- a/arch/powerpc/platforms/powermac/pic.c -+++ b/arch/powerpc/platforms/powermac/pic.c -@@ -268,7 +268,8 @@ static struct irqaction gatwick_cascade_action = { - .name = "cascade", - }; - --static int pmac_pic_host_match(struct irq_domain *h, struct device_node *node) -+static int pmac_pic_host_match(struct irq_domain *h, struct device_node *node, -+ enum irq_domain_bus_token bus_token) - { - /* We match all, we don't always have a node anyway */ - return 1; -diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c -index 9ff55d5..019991d 100644 ---- a/arch/powerpc/platforms/powernv/pci.c -+++ b/arch/powerpc/platforms/powernv/pci.c -@@ -90,7 +90,7 @@ static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) - return rc; - } - irq_set_msi_desc(virq, entry); -- write_msi_msg(virq, &msg); -+ pci_write_msi_msg(virq, &msg); - } - return 0; - } -diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c -index 5f3b232..df0c086 100644 ---- a/arch/powerpc/platforms/ps3/interrupt.c -+++ b/arch/powerpc/platforms/ps3/interrupt.c -@@ -678,7 +678,8 @@ static int ps3_host_map(struct irq_domain *h, unsigned int virq, - return 0; - } - --static int ps3_host_match(struct irq_domain *h, struct device_node *np) -+static int ps3_host_match(struct irq_domain *h, struct device_node *np, -+ enum irq_domain_bus_token bus_token) - { - /* Match all */ - return 1; -diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c -index 8b909e9..691a154 100644 ---- a/arch/powerpc/platforms/pseries/msi.c -+++ b/arch/powerpc/platforms/pseries/msi.c -@@ -476,7 +476,7 @@ again: - irq_set_msi_desc(virq, entry); - - /* Read config space back so we can restore after reset */ -- __read_msi_msg(entry, &msg); -+ __pci_read_msi_msg(entry, &msg); - entry->msg = msg; - } - -diff --git a/arch/powerpc/sysdev/ehv_pic.c b/arch/powerpc/sysdev/ehv_pic.c -index 2d20f10..eca0b00 100644 ---- a/arch/powerpc/sysdev/ehv_pic.c -+++ b/arch/powerpc/sysdev/ehv_pic.c -@@ -177,7 +177,8 @@ unsigned int ehv_pic_get_irq(void) - return irq_linear_revmap(global_ehv_pic->irqhost, irq); - } - --static int ehv_pic_host_match(struct irq_domain *h, struct device_node *node) -+static int ehv_pic_host_match(struct irq_domain *h, struct device_node *node, -+ enum irq_domain_bus_token bus_token) - { - /* Exact match, unless ehv_pic node is NULL */ - return h->of_node == NULL || h->of_node == node; -diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c -index ea6b3a1..f13282c 100644 ---- a/arch/powerpc/sysdev/fsl_msi.c -+++ b/arch/powerpc/sysdev/fsl_msi.c -@@ -82,8 +82,8 @@ static void fsl_msi_print_chip(struct irq_data *irqd, struct seq_file *p) - - - static struct irq_chip fsl_msi_chip = { -- .irq_mask = mask_msi_irq, -- .irq_unmask = unmask_msi_irq, -+ .irq_mask = pci_msi_mask_irq, -+ .irq_unmask = pci_msi_unmask_irq, - .irq_ack = fsl_msi_end_irq, - .irq_print_chip = fsl_msi_print_chip, - }; -@@ -243,7 +243,7 @@ static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) - irq_set_msi_desc(virq, entry); - - fsl_compose_msi_msg(pdev, hwirq, &msg, msi_data); -- write_msi_msg(virq, &msg); -+ pci_write_msi_msg(virq, &msg); - } - return 0; - -diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c -index 45598da..8c3756c 100644 ---- a/arch/powerpc/sysdev/i8259.c -+++ b/arch/powerpc/sysdev/i8259.c -@@ -162,7 +162,8 @@ static struct resource pic_edgectrl_iores = { - .flags = IORESOURCE_BUSY, - }; - --static int i8259_host_match(struct irq_domain *h, struct device_node *node) -+static int i8259_host_match(struct irq_domain *h, struct device_node *node, -+ enum irq_domain_bus_token bus_token) - { - return h->of_node == NULL || h->of_node == node; - } -diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c -index b50f978..1b9b00f 100644 ---- a/arch/powerpc/sysdev/ipic.c -+++ b/arch/powerpc/sysdev/ipic.c -@@ -672,7 +672,8 @@ static struct irq_chip ipic_edge_irq_chip = { - .irq_set_type = ipic_set_irq_type, - }; - --static int ipic_host_match(struct irq_domain *h, struct device_node *node) -+static int ipic_host_match(struct irq_domain *h, struct device_node *node, -+ enum irq_domain_bus_token bus_token) - { - /* Exact match, unless ipic node is NULL */ - return h->of_node == NULL || h->of_node == node; -diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c -index 89cec0e..bf6f77e 100644 ---- a/arch/powerpc/sysdev/mpic.c -+++ b/arch/powerpc/sysdev/mpic.c -@@ -1009,7 +1009,8 @@ static struct irq_chip mpic_irq_ht_chip = { - #endif /* CONFIG_MPIC_U3_HT_IRQS */ - - --static int mpic_host_match(struct irq_domain *h, struct device_node *node) -+static int mpic_host_match(struct irq_domain *h, struct device_node *node, -+ enum irq_domain_bus_token bus_token) - { - /* Exact match, unless mpic node is NULL */ - return h->of_node == NULL || h->of_node == node; -diff --git a/arch/powerpc/sysdev/mpic_pasemi_msi.c b/arch/powerpc/sysdev/mpic_pasemi_msi.c -index a6add4a..5a4c474 100644 ---- a/arch/powerpc/sysdev/mpic_pasemi_msi.c -+++ b/arch/powerpc/sysdev/mpic_pasemi_msi.c -@@ -42,7 +42,7 @@ static struct mpic *msi_mpic; - static void mpic_pasemi_msi_mask_irq(struct irq_data *data) - { - pr_debug("mpic_pasemi_msi_mask_irq %d\n", data->irq); -- mask_msi_irq(data); -+ pci_msi_mask_irq(data); - mpic_mask_irq(data); - } - -@@ -50,7 +50,7 @@ static void mpic_pasemi_msi_unmask_irq(struct irq_data *data) - { - pr_debug("mpic_pasemi_msi_unmask_irq %d\n", data->irq); - mpic_unmask_irq(data); -- unmask_msi_irq(data); -+ pci_msi_unmask_irq(data); - } - - static struct irq_chip mpic_pasemi_msi_chip = { -@@ -138,7 +138,7 @@ static int pasemi_msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) - * register to generate MSI [512...1023] - */ - msg.data = hwirq-0x200; -- write_msi_msg(virq, &msg); -+ pci_write_msi_msg(virq, &msg); - } - - return 0; -diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c -index db35a40..65880cc 100644 ---- a/arch/powerpc/sysdev/mpic_u3msi.c -+++ b/arch/powerpc/sysdev/mpic_u3msi.c -@@ -25,14 +25,14 @@ static struct mpic *msi_mpic; - - static void mpic_u3msi_mask_irq(struct irq_data *data) - { -- mask_msi_irq(data); -+ pci_msi_mask_irq(data); - mpic_mask_irq(data); - } - - static void mpic_u3msi_unmask_irq(struct irq_data *data) - { - mpic_unmask_irq(data); -- unmask_msi_irq(data); -+ pci_msi_unmask_irq(data); - } - - static struct irq_chip mpic_u3msi_chip = { -@@ -172,7 +172,7 @@ static int u3msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) - printk("u3msi: allocated virq 0x%x (hw 0x%x) addr 0x%lx\n", - virq, hwirq, (unsigned long)addr); - msg.data = hwirq; -- write_msi_msg(virq, &msg); -+ pci_write_msi_msg(virq, &msg); - - hwirq++; - } -diff --git a/arch/powerpc/sysdev/ppc4xx_hsta_msi.c b/arch/powerpc/sysdev/ppc4xx_hsta_msi.c -index a6a4dbd..908105f 100644 ---- a/arch/powerpc/sysdev/ppc4xx_hsta_msi.c -+++ b/arch/powerpc/sysdev/ppc4xx_hsta_msi.c -@@ -85,7 +85,7 @@ static int hsta_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) - msi_bitmap_free_hwirqs(&ppc4xx_hsta_msi.bmp, irq, 1); - return -EINVAL; - } -- write_msi_msg(hwirq, &msg); -+ pci_write_msi_msg(hwirq, &msg); - } - - return 0; -diff --git a/arch/powerpc/sysdev/ppc4xx_msi.c b/arch/powerpc/sysdev/ppc4xx_msi.c -index 85d9c18..c6df3e2 100644 ---- a/arch/powerpc/sysdev/ppc4xx_msi.c -+++ b/arch/powerpc/sysdev/ppc4xx_msi.c -@@ -116,7 +116,7 @@ static int ppc4xx_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) - - irq_set_msi_desc(virq, entry); - msg.data = int_no; -- write_msi_msg(virq, &msg); -+ pci_write_msi_msg(virq, &msg); - } - return 0; - } -diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c -index b2b87c3..a433b3d 100644 ---- a/arch/powerpc/sysdev/qe_lib/qe_ic.c -+++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c -@@ -245,7 +245,8 @@ static struct irq_chip qe_ic_irq_chip = { - .irq_mask_ack = qe_ic_mask_irq, - }; - --static int qe_ic_host_match(struct irq_domain *h, struct device_node *node) -+static int qe_ic_host_match(struct irq_domain *h, struct device_node *node, -+ enum irq_domain_bus_token bus_token) - { - /* Exact match, unless qe_ic node is NULL */ - return h->of_node == NULL || h->of_node == node; -diff --git a/arch/powerpc/sysdev/xics/ics-opal.c b/arch/powerpc/sysdev/xics/ics-opal.c -index 3c6ee1b..4ba554e 100644 ---- a/arch/powerpc/sysdev/xics/ics-opal.c -+++ b/arch/powerpc/sysdev/xics/ics-opal.c -@@ -73,7 +73,7 @@ static unsigned int ics_opal_startup(struct irq_data *d) - * at that level, so we do it here by hand. - */ - if (d->msi_desc) -- unmask_msi_irq(d); -+ pci_msi_unmask_irq(d); - #endif - - /* unmask it */ -diff --git a/arch/powerpc/sysdev/xics/ics-rtas.c b/arch/powerpc/sysdev/xics/ics-rtas.c -index 936575d..bc81335 100644 ---- a/arch/powerpc/sysdev/xics/ics-rtas.c -+++ b/arch/powerpc/sysdev/xics/ics-rtas.c -@@ -76,7 +76,7 @@ static unsigned int ics_rtas_startup(struct irq_data *d) - * at that level, so we do it here by hand. - */ - if (d->msi_desc) -- unmask_msi_irq(d); -+ pci_msi_unmask_irq(d); - #endif - /* unmask it */ - ics_rtas_unmask_irq(d); -diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c -index fe0cca4..13ab716 100644 ---- a/arch/powerpc/sysdev/xics/xics-common.c -+++ b/arch/powerpc/sysdev/xics/xics-common.c -@@ -300,7 +300,8 @@ int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask, - } - #endif /* CONFIG_SMP */ - --static int xics_host_match(struct irq_domain *h, struct device_node *node) -+static int xics_host_match(struct irq_domain *h, struct device_node *node, -+ enum irq_domain_bus_token bus_token) - { - struct ics *ics; - -diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c -index 2fa7b14..d59c825 100644 ---- a/arch/s390/pci/pci.c -+++ b/arch/s390/pci/pci.c -@@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(zpci_list_lock); - - static struct irq_chip zpci_irq_chip = { - .name = "zPCI", -- .irq_unmask = unmask_msi_irq, -- .irq_mask = mask_msi_irq, -+ .irq_unmask = pci_msi_unmask_irq, -+ .irq_mask = pci_msi_mask_irq, - }; - - static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES); -@@ -403,7 +403,7 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) - msg.data = hwirq; - msg.address_lo = zdev->msi_addr & 0xffffffff; - msg.address_hi = zdev->msi_addr >> 32; -- write_msi_msg(irq, &msg); -+ pci_write_msi_msg(irq, &msg); - airq_iv_set_data(zdev->aibv, hwirq, irq); - hwirq++; - } -@@ -448,9 +448,9 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev) - /* Release MSI interrupts */ - list_for_each_entry(msi, &pdev->msi_list, list) { - if (msi->msi_attrib.is_msix) -- default_msix_mask_irq(msi, 1); -+ __pci_msix_desc_mask_irq(msi, 1); - else -- default_msi_mask_irq(msi, 1, 1); -+ __pci_msi_desc_mask_irq(msi, 1, 1); - irq_set_msi_desc(msi->irq, NULL); - irq_free_desc(msi->irq); - msi->msg.address_lo = 0; -diff --git a/arch/sparc/kernel/pci_msi.c b/arch/sparc/kernel/pci_msi.c -index 580651a..84e16d8 100644 ---- a/arch/sparc/kernel/pci_msi.c -+++ b/arch/sparc/kernel/pci_msi.c -@@ -111,10 +111,10 @@ static void free_msi(struct pci_pbm_info *pbm, int msi_num) - - static struct irq_chip msi_irq = { - .name = "PCI-MSI", -- .irq_mask = mask_msi_irq, -- .irq_unmask = unmask_msi_irq, -- .irq_enable = unmask_msi_irq, -- .irq_disable = mask_msi_irq, -+ .irq_mask = pci_msi_mask_irq, -+ .irq_unmask = pci_msi_unmask_irq, -+ .irq_enable = pci_msi_unmask_irq, -+ .irq_disable = pci_msi_mask_irq, - /* XXX affinity XXX */ - }; - -@@ -161,7 +161,7 @@ static int sparc64_setup_msi_irq(unsigned int *irq_p, - msg.data = msi; - - irq_set_msi_desc(*irq_p, entry); -- write_msi_msg(*irq_p, &msg); -+ pci_write_msi_msg(*irq_p, &msg); - - return 0; - -diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c -index e39f9c5..e717af2 100644 ---- a/arch/tile/kernel/pci_gx.c -+++ b/arch/tile/kernel/pci_gx.c -@@ -1453,7 +1453,7 @@ static struct pci_ops tile_cfg_ops = { - static unsigned int tilegx_msi_startup(struct irq_data *d) - { - if (d->msi_desc) -- unmask_msi_irq(d); -+ pci_msi_unmask_irq(d); - - return 0; - } -@@ -1465,14 +1465,14 @@ static void tilegx_msi_ack(struct irq_data *d) - - static void tilegx_msi_mask(struct irq_data *d) - { -- mask_msi_irq(d); -+ pci_msi_mask_irq(d); - __insn_mtspr(SPR_IPI_MASK_SET_K, 1UL << d->irq); - } - - static void tilegx_msi_unmask(struct irq_data *d) - { - __insn_mtspr(SPR_IPI_MASK_RESET_K, 1UL << d->irq); -- unmask_msi_irq(d); -+ pci_msi_unmask_irq(d); - } - - static struct irq_chip tilegx_msi_chip = { -@@ -1590,7 +1590,7 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) - msg.address_hi = msi_addr >> 32; - msg.address_lo = msi_addr & 0xffffffff; - -- write_msi_msg(irq, &msg); -+ pci_write_msi_msg(irq, &msg); - irq_set_chip_and_handler(irq, &tilegx_msi_chip, handle_level_irq); - irq_set_handler_data(irq, controller); - -diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h -index e45e4da..f58a9c7 100644 ---- a/arch/x86/include/asm/x86_init.h -+++ b/arch/x86/include/asm/x86_init.h -@@ -172,7 +172,6 @@ struct x86_platform_ops { - - struct pci_dev; - struct msi_msg; --struct msi_desc; - - struct x86_msi_ops { - int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type); -@@ -183,8 +182,6 @@ struct x86_msi_ops { - void (*teardown_msi_irqs)(struct pci_dev *dev); - void (*restore_msi_irqs)(struct pci_dev *dev); - int (*setup_hpet_msi)(unsigned int irq, unsigned int id); -- u32 (*msi_mask_irq)(struct msi_desc *desc, u32 mask, u32 flag); -- u32 (*msix_mask_irq)(struct msi_desc *desc, u32 flag); - }; - - struct IO_APIC_route_entry; -diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c -index 1183d54..7ffe0a2 100644 ---- a/arch/x86/kernel/apic/io_apic.c -+++ b/arch/x86/kernel/apic/io_apic.c -@@ -3158,7 +3158,7 @@ msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) - msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; - msg.address_lo |= MSI_ADDR_DEST_ID(dest); - -- __write_msi_msg(data->msi_desc, &msg); -+ __pci_write_msi_msg(data->msi_desc, &msg); - - return IRQ_SET_MASK_OK_NOCOPY; - } -@@ -3169,8 +3169,8 @@ msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) - */ - static struct irq_chip msi_chip = { - .name = "PCI-MSI", -- .irq_unmask = unmask_msi_irq, -- .irq_mask = mask_msi_irq, -+ .irq_unmask = pci_msi_unmask_irq, -+ .irq_mask = pci_msi_mask_irq, - .irq_ack = ack_apic_edge, - .irq_set_affinity = msi_set_affinity, - .irq_retrigger = ioapic_retrigger_irq, -@@ -3196,7 +3196,7 @@ int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, - * MSI message denotes a contiguous group of IRQs, written for 0th IRQ. - */ - if (!irq_offset) -- write_msi_msg(irq, &msg); -+ pci_write_msi_msg(irq, &msg); - - setup_remapped_irq(irq, irq_cfg(irq), chip); - -diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c -index e48b674..234b072 100644 ---- a/arch/x86/kernel/x86_init.c -+++ b/arch/x86/kernel/x86_init.c -@@ -116,8 +116,6 @@ struct x86_msi_ops x86_msi = { - .teardown_msi_irqs = default_teardown_msi_irqs, - .restore_msi_irqs = default_restore_msi_irqs, - .setup_hpet_msi = default_setup_hpet_msi, -- .msi_mask_irq = default_msi_mask_irq, -- .msix_mask_irq = default_msix_mask_irq, - }; - - /* MSI arch specific hooks */ -@@ -140,14 +138,6 @@ void arch_restore_msi_irqs(struct pci_dev *dev) - { - x86_msi.restore_msi_irqs(dev); - } --u32 arch_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) --{ -- return x86_msi.msi_mask_irq(desc, mask, flag); --} --u32 arch_msix_mask_irq(struct msi_desc *desc, u32 flag) --{ -- return x86_msi.msix_mask_irq(desc, flag); --} - #endif - - struct x86_io_apic_ops x86_io_apic_ops = { -diff --git a/arch/x86/pci/bus_numa.c b/arch/x86/pci/bus_numa.c -index f3a2cfc..7bcf06a 100644 ---- a/arch/x86/pci/bus_numa.c -+++ b/arch/x86/pci/bus_numa.c -@@ -31,7 +31,7 @@ void x86_pci_root_bus_resources(int bus, struct list_head *resources) - { - struct pci_root_info *info = x86_find_pci_root_info(bus); - struct pci_root_res *root_res; -- struct pci_host_bridge_window *window; -+ struct resource_entry *window; - bool found = false; - - if (!info) -@@ -41,7 +41,7 @@ void x86_pci_root_bus_resources(int bus, struct list_head *resources) - bus); - - /* already added by acpi ? */ -- list_for_each_entry(window, resources, list) -+ resource_list_for_each_entry(window, resources) - if (window->res->flags & IORESOURCE_BUS) { - found = true; - break; -diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c -index 6b3cf7c..878fb8e 100644 ---- a/arch/x86/pci/xen.c -+++ b/arch/x86/pci/xen.c -@@ -229,7 +229,7 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) - return 1; - - list_for_each_entry(msidesc, &dev->msi_list, list) { -- __read_msi_msg(msidesc, &msg); -+ __pci_read_msi_msg(msidesc, &msg); - pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) | - ((msg.address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff); - if (msg.data != XEN_PIRQ_MSI_DATA || -@@ -240,7 +240,7 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) - goto error; - } - xen_msi_compose_msg(dev, pirq, &msg); -- __write_msi_msg(msidesc, &msg); -+ __pci_write_msi_msg(msidesc, &msg); - dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq); - } else { - dev_dbg(&dev->dev, -@@ -296,12 +296,16 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) - map_irq.entry_nr = nvec; - } else if (type == PCI_CAP_ID_MSIX) { - int pos; -+ unsigned long flags; - u32 table_offset, bir; - - pos = dev->msix_cap; - pci_read_config_dword(dev, pos + PCI_MSIX_TABLE, - &table_offset); - bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR); -+ flags = pci_resource_flags(dev, bir); -+ if (!flags || (flags & IORESOURCE_UNSET)) -+ return -EINVAL; - - map_irq.table_base = pci_resource_start(dev, bir); - map_irq.entry_nr = msidesc->msi_attrib.entry_nr; -@@ -394,14 +398,7 @@ static void xen_teardown_msi_irq(unsigned int irq) - { - xen_destroy_irq(irq); - } --static u32 xen_nop_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) --{ -- return 0; --} --static u32 xen_nop_msix_mask_irq(struct msi_desc *desc, u32 flag) --{ -- return 0; --} -+ - #endif - - int __init pci_xen_init(void) -@@ -425,8 +422,7 @@ int __init pci_xen_init(void) - x86_msi.setup_msi_irqs = xen_setup_msi_irqs; - x86_msi.teardown_msi_irq = xen_teardown_msi_irq; - x86_msi.teardown_msi_irqs = xen_teardown_msi_irqs; -- x86_msi.msi_mask_irq = xen_nop_msi_mask_irq; -- x86_msi.msix_mask_irq = xen_nop_msix_mask_irq; -+ pci_msi_ignore_mask = 1; - #endif - return 0; - } -@@ -460,8 +456,7 @@ int __init pci_xen_initial_domain(void) - x86_msi.setup_msi_irqs = xen_initdom_setup_msi_irqs; - x86_msi.teardown_msi_irq = xen_teardown_msi_irq; - x86_msi.restore_msi_irqs = xen_initdom_restore_msi_irqs; -- x86_msi.msi_mask_irq = xen_nop_msi_mask_irq; -- x86_msi.msix_mask_irq = xen_nop_msix_mask_irq; -+ pci_msi_ignore_mask = 1; - #endif - __acpi_register_gsi = acpi_register_gsi_xen; - /* Pre-allocate legacy irqs */ -diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c -index fdb5701..0ad0ce6 100644 ---- a/drivers/acpi/acpi_lpss.c -+++ b/drivers/acpi/acpi_lpss.c -@@ -325,7 +325,7 @@ static int acpi_lpss_create_device(struct acpi_device *adev, - { - struct lpss_device_desc *dev_desc; - struct lpss_private_data *pdata; -- struct resource_list_entry *rentry; -+ struct resource_entry *rentry; - struct list_head resource_list; - struct platform_device *pdev; - int ret; -@@ -345,12 +345,12 @@ static int acpi_lpss_create_device(struct acpi_device *adev, - goto err_out; - - list_for_each_entry(rentry, &resource_list, node) -- if (resource_type(&rentry->res) == IORESOURCE_MEM) { -+ if (resource_type(rentry->res) == IORESOURCE_MEM) { - if (dev_desc->prv_size_override) - pdata->mmio_size = dev_desc->prv_size_override; - else -- pdata->mmio_size = resource_size(&rentry->res); -- pdata->mmio_base = ioremap(rentry->res.start, -+ pdata->mmio_size = resource_size(rentry->res); -+ pdata->mmio_base = ioremap(rentry->res->start, - pdata->mmio_size); - break; - } -diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c -index 6ba8beb..1284138 100644 ---- a/drivers/acpi/acpi_platform.c -+++ b/drivers/acpi/acpi_platform.c -@@ -45,7 +45,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev) - struct platform_device *pdev = NULL; - struct acpi_device *acpi_parent; - struct platform_device_info pdevinfo; -- struct resource_list_entry *rentry; -+ struct resource_entry *rentry; - struct list_head resource_list; - struct resource *resources = NULL; - int count; -@@ -71,7 +71,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev) - } - count = 0; - list_for_each_entry(rentry, &resource_list, node) -- resources[count++] = rentry->res; -+ resources[count++] = *rentry->res; - - acpi_dev_free_resource_list(&resource_list); - } -diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c -index 2ba8f02..e7f4aa0 100644 ---- a/drivers/acpi/resource.c -+++ b/drivers/acpi/resource.c -@@ -415,12 +415,7 @@ EXPORT_SYMBOL_GPL(acpi_dev_resource_interrupt); - */ - void acpi_dev_free_resource_list(struct list_head *list) - { -- struct resource_list_entry *rentry, *re; -- -- list_for_each_entry_safe(rentry, re, list, node) { -- list_del(&rentry->node); -- kfree(rentry); -- } -+ resource_list_free(list); - } - EXPORT_SYMBOL_GPL(acpi_dev_free_resource_list); - -@@ -435,15 +430,15 @@ struct res_proc_context { - static acpi_status acpi_dev_new_resource_entry(struct resource *r, - struct res_proc_context *c) - { -- struct resource_list_entry *rentry; -+ struct resource_entry *rentry; - -- rentry = kmalloc(sizeof(*rentry), GFP_KERNEL); -+ rentry = resource_list_create_entry(NULL, 0); - if (!rentry) { - c->error = -ENOMEM; - return AE_NO_MEMORY; - } -- rentry->res = *r; -- list_add_tail(&rentry->node, c->list); -+ *rentry->res = *r; -+ resource_list_add_tail(rentry, c->list); - c->count++; - return AE_OK; - } -@@ -503,7 +498,7 @@ static acpi_status acpi_dev_process_resource(struct acpi_resource *ares, - * returned as the final error code. - * - * The resultant struct resource objects are put on the list pointed to by -- * @list, that must be empty initially, as members of struct resource_list_entry -+ * @list, that must be empty initially, as members of struct resource_entry - * objects. Callers of this routine should use %acpi_dev_free_resource_list() to - * free that list. - * -diff --git a/drivers/base/core.c b/drivers/base/core.c -index 842d047..4c7a18f 100644 ---- a/drivers/base/core.c -+++ b/drivers/base/core.c -@@ -661,6 +661,9 @@ void device_initialize(struct device *dev) - INIT_LIST_HEAD(&dev->devres_head); - device_pm_init(dev); - set_dev_node(dev, -1); -+#ifdef CONFIG_GENERIC_MSI_IRQ -+ INIT_LIST_HEAD(&dev->msi_list); -+#endif - } - EXPORT_SYMBOL_GPL(device_initialize); - -diff --git a/drivers/base/platform.c b/drivers/base/platform.c -index 317e0e4..b387fb9 100644 ---- a/drivers/base/platform.c -+++ b/drivers/base/platform.c -@@ -1011,6 +1011,7 @@ int __init platform_bus_init(void) - error = bus_register(&platform_bus_type); - if (error) - device_unregister(&platform_bus); -+ of_platform_register_reconfig_notifier(); - return error; - } - -diff --git a/drivers/block/loop.c b/drivers/block/loop.c -index 6cb1beb..12678be 100644 ---- a/drivers/block/loop.c -+++ b/drivers/block/loop.c -@@ -692,6 +692,24 @@ static inline int is_loop_device(struct file *file) - return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR; - } - -+/* -+ * for AUFS -+ * no get/put for file. -+ */ -+struct file *loop_backing_file(struct super_block *sb) -+{ -+ struct file *ret; -+ struct loop_device *l; -+ -+ ret = NULL; -+ if (MAJOR(sb->s_dev) == LOOP_MAJOR) { -+ l = sb->s_bdev->bd_disk->private_data; -+ ret = l->lo_backing_file; -+ } -+ return ret; -+} -+EXPORT_SYMBOL_GPL(loop_backing_file); -+ - /* loop sysfs attributes */ - - static ssize_t loop_attr_show(struct device *dev, char *page, -diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig -index 455fd17..38c8814 100644 ---- a/drivers/clk/Kconfig -+++ b/drivers/clk/Kconfig -@@ -101,12 +101,12 @@ config COMMON_CLK_AXI_CLKGEN - Support for the Analog Devices axi-clkgen pcore clock generator for Xilinx - FPGAs. It is commonly used in Analog Devices' reference designs. - --config CLK_PPC_CORENET -- bool "Clock driver for PowerPC corenet platforms" -- depends on PPC_E500MC && OF -+config CLK_QORIQ -+ bool "Clock driver for Freescale QorIQ platforms" -+ depends on (PPC_E500MC || ARM || ARM64) && OF - ---help--- -- This adds the clock driver support for Freescale PowerPC corenet -- platforms using common clock framework. -+ This adds the clock driver support for Freescale QorIQ platforms -+ using common clock framework. - - config COMMON_CLK_XGENE - bool "Clock driver for APM XGene SoC" -diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile -index d5fba5b..4ff94cd 100644 ---- a/drivers/clk/Makefile -+++ b/drivers/clk/Makefile -@@ -30,7 +30,7 @@ obj-$(CONFIG_ARCH_MOXART) += clk-moxart.o - obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o - obj-$(CONFIG_ARCH_NSPIRE) += clk-nspire.o - obj-$(CONFIG_COMMON_CLK_PALMAS) += clk-palmas.o --obj-$(CONFIG_CLK_PPC_CORENET) += clk-ppc-corenet.o -+obj-$(CONFIG_CLK_QORIQ) += clk-qoriq.o - obj-$(CONFIG_COMMON_CLK_RK808) += clk-rk808.o - obj-$(CONFIG_COMMON_CLK_S2MPS11) += clk-s2mps11.o - obj-$(CONFIG_COMMON_CLK_SI5351) += clk-si5351.o -diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c -new file mode 100644 -index 0000000..74051c9 ---- /dev/null -+++ b/drivers/clk/clk-qoriq.c -@@ -0,0 +1,1256 @@ -+/* -+ * Copyright 2013 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * clock driver for Freescale QorIQ SoCs. -+ */ -+ -+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define PLL_DIV1 0 -+#define PLL_DIV2 1 -+#define PLL_DIV3 2 -+#define PLL_DIV4 3 -+ -+#define PLATFORM_PLL 0 -+#define CGA_PLL1 1 -+#define CGA_PLL2 2 -+#define CGA_PLL3 3 -+#define CGA_PLL4 4 /* only on clockgen-1.0, which lacks CGB */ -+#define CGB_PLL1 4 -+#define CGB_PLL2 5 -+ -+struct clockgen_pll_div { -+ struct clk *clk; -+ char name[32]; -+}; -+ -+struct clockgen_pll { -+ struct clockgen_pll_div div[4]; -+}; -+ -+#define CLKSEL_VALID 1 -+#define CLKSEL_80PCT 2 /* Only allowed if PLL <= 80% of max cpu freq */ -+ -+struct clockgen_sourceinfo { -+ u32 flags; /* CLKSEL_xxx */ -+ int pll; /* CGx_PLLn */ -+ int div; /* PLL_DIVn */ -+}; -+ -+#define NUM_MUX_PARENTS 16 -+ -+struct clockgen_muxinfo { -+ struct clockgen_sourceinfo clksel[NUM_MUX_PARENTS]; -+}; -+ -+#define NUM_HWACCEL 5 -+#define NUM_CMUX 8 -+ -+struct clockgen; -+ -+/* -+ * cmux freq must be >= platform pll. -+ * If not set, cmux freq must be >= platform pll/2 -+ */ -+#define CG_CMUX_GE_PLAT 1 -+ -+#define CG_PLL_8BIT 2 /* PLLCnGSR[CFG] is 8 bits, not 6 */ -+#define CG_VER3 4 /* version 3 cg: reg layout different */ -+#define CG_LITTLE_ENDIAN 8 -+ -+struct clockgen_chipinfo { -+ const char *compat, *guts_compat; -+ const struct clockgen_muxinfo *cmux_groups[2]; -+ const struct clockgen_muxinfo *hwaccel[NUM_HWACCEL]; -+ void (*init_periph)(struct clockgen *cg); -+ int cmux_to_group[NUM_CMUX]; /* -1 terminates if fewer than NUM_CMUX */ -+ u32 pll_mask; /* 1 << n bit set if PLL n is valid */ -+ u32 flags; /* CG_xxx */ -+}; -+ -+struct clockgen { -+ struct device_node *node; -+ void __iomem *regs; -+ struct clockgen_chipinfo info; /* mutable copy */ -+ struct clk *sysclk; -+ struct clockgen_pll pll[6]; -+ struct clk *cmux[NUM_CMUX]; -+ struct clk *hwaccel[NUM_HWACCEL]; -+ struct clk *fman[2]; -+ struct ccsr_guts __iomem *guts; -+}; -+ -+static struct clockgen clockgen; -+ -+static void cg_out(struct clockgen *cg, u32 val, u32 __iomem *reg) -+{ -+ if (cg->info.flags & CG_LITTLE_ENDIAN) -+ iowrite32(val, reg); -+ else -+ iowrite32be(val, reg); -+} -+ -+static u32 cg_in(struct clockgen *cg, u32 __iomem *reg) -+{ -+ u32 val; -+ -+ if (cg->info.flags & CG_LITTLE_ENDIAN) -+ val = ioread32(reg); -+ else -+ val = ioread32be(reg); -+ -+ return val; -+} -+ -+static const struct clockgen_muxinfo p2041_cmux_grp1 = { -+ { -+ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, -+ [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, -+ [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, -+ } -+}; -+ -+static const struct clockgen_muxinfo p2041_cmux_grp2 = { -+ { -+ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, -+ [4] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, -+ [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, -+ } -+}; -+ -+static const struct clockgen_muxinfo p5020_cmux_grp1 = { -+ { -+ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, -+ [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, -+ [4] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV1 }, -+ } -+}; -+ -+static const struct clockgen_muxinfo p5020_cmux_grp2 = { -+ { -+ [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 }, -+ [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, -+ [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, -+ } -+}; -+ -+static const struct clockgen_muxinfo p5040_cmux_grp1 = { -+ { -+ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, -+ [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, -+ [4] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV1 }, -+ [5] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV2 }, -+ } -+}; -+ -+static const struct clockgen_muxinfo p5040_cmux_grp2 = { -+ { -+ [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 }, -+ [1] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV2 }, -+ [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, -+ [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, -+ } -+}; -+ -+static const struct clockgen_muxinfo p4080_cmux_grp1 = { -+ { -+ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, -+ [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, -+ [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, -+ [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, -+ [8] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL3, PLL_DIV1 }, -+ } -+}; -+ -+static const struct clockgen_muxinfo p4080_cmux_grp2 = { -+ { -+ [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 }, -+ [8] = { CLKSEL_VALID, CGA_PLL3, PLL_DIV1 }, -+ [9] = { CLKSEL_VALID, CGA_PLL3, PLL_DIV2 }, -+ [12] = { CLKSEL_VALID, CGA_PLL4, PLL_DIV1 }, -+ [13] = { CLKSEL_VALID, CGA_PLL4, PLL_DIV2 }, -+ } -+}; -+ -+static const struct clockgen_muxinfo t1023_cmux = { -+ { -+ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, -+ [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, -+ } -+}; -+ -+static const struct clockgen_muxinfo t1040_cmux = { -+ { -+ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, -+ [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, -+ [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, -+ [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, -+ } -+}; -+ -+ -+static const struct clockgen_muxinfo clockgen2_cmux_cga = { -+ { -+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, -+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, -+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 }, -+ {}, -+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, -+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, -+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 }, -+ {}, -+ { CLKSEL_VALID, CGA_PLL3, PLL_DIV1 }, -+ { CLKSEL_VALID, CGA_PLL3, PLL_DIV2 }, -+ { CLKSEL_VALID, CGA_PLL3, PLL_DIV4 }, -+ }, -+}; -+ -+static const struct clockgen_muxinfo clockgen2_cmux_cga12 = { -+ { -+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, -+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, -+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 }, -+ {}, -+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, -+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, -+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 }, -+ }, -+}; -+ -+static const struct clockgen_muxinfo clockgen2_cmux_cgb = { -+ { -+ { CLKSEL_VALID, CGB_PLL1, PLL_DIV1 }, -+ { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 }, -+ { CLKSEL_VALID, CGB_PLL1, PLL_DIV4 }, -+ {}, -+ { CLKSEL_VALID, CGB_PLL2, PLL_DIV1 }, -+ { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 }, -+ { CLKSEL_VALID, CGB_PLL2, PLL_DIV4 }, -+ }, -+}; -+ -+static const struct clockgen_muxinfo t1023_hwa1 = { -+ { -+ {}, -+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, -+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, -+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 }, -+ }, -+}; -+ -+static const struct clockgen_muxinfo t1023_hwa2 = { -+ { -+ [6] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, -+ }, -+}; -+ -+static const struct clockgen_muxinfo t2080_hwa1 = { -+ { -+ {}, -+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, -+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, -+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 }, -+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 }, -+ { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 }, -+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, -+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 }, -+ }, -+}; -+ -+static const struct clockgen_muxinfo t2080_hwa2 = { -+ { -+ {}, -+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, -+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, -+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 }, -+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 }, -+ { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 }, -+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, -+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 }, -+ }, -+}; -+ -+static const struct clockgen_muxinfo t4240_hwa1 = { -+ { -+ { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV2 }, -+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, -+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, -+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 }, -+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 }, -+ {}, -+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, -+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 }, -+ }, -+}; -+ -+static const struct clockgen_muxinfo t4240_hwa4 = { -+ { -+ [2] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 }, -+ [3] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV3 }, -+ [4] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV4 }, -+ [5] = { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 }, -+ [6] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 }, -+ }, -+}; -+ -+static const struct clockgen_muxinfo t4240_hwa5 = { -+ { -+ [2] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 }, -+ [3] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV3 }, -+ [4] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV4 }, -+ [5] = { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 }, -+ [6] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 }, -+ [7] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV3 }, -+ }, -+}; -+ -+#define RCWSR7_FM1_CLK_SEL 0x40000000 -+#define RCWSR7_FM2_CLK_SEL 0x20000000 -+#define RCWSR7_HWA_ASYNC_DIV 0x04000000 -+ -+static void __init p2041_init_periph(struct clockgen *cg) -+{ -+ u32 reg; -+ -+ reg = ioread32be(&cg->guts->rcwsr[7]); -+ -+ if (reg & RCWSR7_FM1_CLK_SEL) -+ cg->fman[0] = cg->pll[CGA_PLL2].div[PLL_DIV2].clk; -+ else -+ cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk; -+} -+ -+static void __init p4080_init_periph(struct clockgen *cg) -+{ -+ u32 reg; -+ -+ reg = ioread32be(&cg->guts->rcwsr[7]); -+ -+ if (reg & RCWSR7_FM1_CLK_SEL) -+ cg->fman[0] = cg->pll[CGA_PLL3].div[PLL_DIV2].clk; -+ else -+ cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk; -+ -+ if (reg & RCWSR7_FM2_CLK_SEL) -+ cg->fman[1] = cg->pll[CGA_PLL3].div[PLL_DIV2].clk; -+ else -+ cg->fman[1] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk; -+} -+ -+static void __init p5020_init_periph(struct clockgen *cg) -+{ -+ u32 reg; -+ int div = PLL_DIV2; -+ -+ reg = ioread32be(&cg->guts->rcwsr[7]); -+ if (reg & RCWSR7_HWA_ASYNC_DIV) -+ div = PLL_DIV4; -+ -+ if (reg & RCWSR7_FM1_CLK_SEL) -+ cg->fman[0] = cg->pll[CGA_PLL2].div[div].clk; -+ else -+ cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk; -+} -+ -+static void __init p5040_init_periph(struct clockgen *cg) -+{ -+ u32 reg; -+ int div = PLL_DIV2; -+ -+ reg = ioread32be(&cg->guts->rcwsr[7]); -+ if (reg & RCWSR7_HWA_ASYNC_DIV) -+ div = PLL_DIV4; -+ -+ if (reg & RCWSR7_FM1_CLK_SEL) -+ cg->fman[0] = cg->pll[CGA_PLL3].div[div].clk; -+ else -+ cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk; -+ -+ if (reg & RCWSR7_FM2_CLK_SEL) -+ cg->fman[1] = cg->pll[CGA_PLL3].div[div].clk; -+ else -+ cg->fman[1] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk; -+} -+ -+static void __init t1023_init_periph(struct clockgen *cg) -+{ -+ cg->fman[0] = cg->hwaccel[1]; -+} -+ -+static void __init t1040_init_periph(struct clockgen *cg) -+{ -+ cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk; -+} -+ -+static void __init t2080_init_periph(struct clockgen *cg) -+{ -+ cg->fman[0] = cg->hwaccel[0]; -+} -+ -+static void __init t4240_init_periph(struct clockgen *cg) -+{ -+ cg->fman[0] = cg->hwaccel[3]; -+ cg->fman[1] = cg->hwaccel[4]; -+} -+ -+static const struct clockgen_chipinfo chipinfo[] = { -+ { -+ .compat = "fsl,b4420-clockgen", -+ .guts_compat = "fsl,b4860-device-config", -+ .init_periph = t2080_init_periph, -+ .cmux_groups = { -+ &clockgen2_cmux_cga12, &clockgen2_cmux_cgb -+ }, -+ .hwaccel = { -+ &t2080_hwa1 -+ }, -+ .cmux_to_group = { -+ 0, 1, 1, 1, -1 -+ }, -+ .pll_mask = 0x3f, -+ .flags = CG_PLL_8BIT, -+ }, -+ { -+ .compat = "fsl,b4860-clockgen", -+ .guts_compat = "fsl,b4860-device-config", -+ .init_periph = t2080_init_periph, -+ .cmux_groups = { -+ &clockgen2_cmux_cga12, &clockgen2_cmux_cgb -+ }, -+ .hwaccel = { -+ &t2080_hwa1 -+ }, -+ .cmux_to_group = { -+ 0, 1, 1, 1, -1 -+ }, -+ .pll_mask = 0x3f, -+ .flags = CG_PLL_8BIT, -+ }, -+ { -+ .compat = "fsl,ls1021a-clockgen", -+ .cmux_groups = { -+ &t1023_cmux -+ }, -+ .cmux_to_group = { -+ 0, -1 -+ }, -+ .pll_mask = 0x03, -+ }, -+ { -+ .compat = "fsl,ls2080a-clockgen", -+ .cmux_groups = { -+ &clockgen2_cmux_cga12, &clockgen2_cmux_cgb -+ }, -+ .cmux_to_group = { -+ 0, 0, 1, 1, -1 -+ }, -+ .pll_mask = 0x37, -+ .flags = CG_VER3 | CG_LITTLE_ENDIAN, -+ }, -+ { -+ .compat = "fsl,ls2088a-clockgen", -+ .cmux_groups = { -+ &clockgen2_cmux_cga12, &clockgen2_cmux_cgb -+ }, -+ .cmux_to_group = { -+ 0, 0, 1, 1, -1 -+ }, -+ .pll_mask = 0x37, -+ .flags = CG_VER3 | CG_LITTLE_ENDIAN, -+ }, -+ { -+ .compat = "fsl,p2041-clockgen", -+ .guts_compat = "fsl,qoriq-device-config-1.0", -+ .init_periph = p2041_init_periph, -+ .cmux_groups = { -+ &p2041_cmux_grp1, &p2041_cmux_grp2 -+ }, -+ .cmux_to_group = { -+ 0, 0, 1, 1, -1 -+ }, -+ .pll_mask = 0x07, -+ }, -+ { -+ .compat = "fsl,p3041-clockgen", -+ .guts_compat = "fsl,qoriq-device-config-1.0", -+ .init_periph = p2041_init_periph, -+ .cmux_groups = { -+ &p2041_cmux_grp1, &p2041_cmux_grp2 -+ }, -+ .cmux_to_group = { -+ 0, 0, 1, 1, -1 -+ }, -+ .pll_mask = 0x07, -+ }, -+ { -+ .compat = "fsl,p4080-clockgen", -+ .guts_compat = "fsl,qoriq-device-config-1.0", -+ .init_periph = p4080_init_periph, -+ .cmux_groups = { -+ &p4080_cmux_grp1, &p4080_cmux_grp2 -+ }, -+ .cmux_to_group = { -+ 0, 0, 0, 0, 1, 1, 1, 1 -+ }, -+ .pll_mask = 0x1f, -+ }, -+ { -+ .compat = "fsl,p5020-clockgen", -+ .guts_compat = "fsl,qoriq-device-config-1.0", -+ .init_periph = p5020_init_periph, -+ .cmux_groups = { -+ &p2041_cmux_grp1, &p2041_cmux_grp2 -+ }, -+ .cmux_to_group = { -+ 0, 1, -1 -+ }, -+ .pll_mask = 0x07, -+ }, -+ { -+ .compat = "fsl,p5040-clockgen", -+ .guts_compat = "fsl,p5040-device-config", -+ .init_periph = p5040_init_periph, -+ .cmux_groups = { -+ &p5040_cmux_grp1, &p5040_cmux_grp2 -+ }, -+ .cmux_to_group = { -+ 0, 0, 1, 1, -1 -+ }, -+ .pll_mask = 0x0f, -+ }, -+ { -+ .compat = "fsl,t1023-clockgen", -+ .guts_compat = "fsl,t1023-device-config", -+ .init_periph = t1023_init_periph, -+ .cmux_groups = { -+ &t1023_cmux -+ }, -+ .hwaccel = { -+ &t1023_hwa1, &t1023_hwa2 -+ }, -+ .cmux_to_group = { -+ 0, 0, -1 -+ }, -+ .pll_mask = 0x03, -+ .flags = CG_PLL_8BIT, -+ }, -+ { -+ .compat = "fsl,t1040-clockgen", -+ .guts_compat = "fsl,t1040-device-config", -+ .init_periph = t1040_init_periph, -+ .cmux_groups = { -+ &t1040_cmux -+ }, -+ .cmux_to_group = { -+ 0, 0, 0, 0, -1 -+ }, -+ .pll_mask = 0x07, -+ .flags = CG_PLL_8BIT, -+ }, -+ { -+ .compat = "fsl,t2080-clockgen", -+ .guts_compat = "fsl,t2080-device-config", -+ .init_periph = t2080_init_periph, -+ .cmux_groups = { -+ &clockgen2_cmux_cga12 -+ }, -+ .hwaccel = { -+ &t2080_hwa1, &t2080_hwa2 -+ }, -+ .cmux_to_group = { -+ 0, -1 -+ }, -+ .pll_mask = 0x07, -+ .flags = CG_PLL_8BIT, -+ }, -+ { -+ .compat = "fsl,t4240-clockgen", -+ .guts_compat = "fsl,t4240-device-config", -+ .init_periph = t4240_init_periph, -+ .cmux_groups = { -+ &clockgen2_cmux_cga, &clockgen2_cmux_cgb -+ }, -+ .hwaccel = { -+ &t4240_hwa1, NULL, NULL, &t4240_hwa4, &t4240_hwa5 -+ }, -+ .cmux_to_group = { -+ 0, 0, 1, -1 -+ }, -+ .pll_mask = 0x3f, -+ .flags = CG_PLL_8BIT, -+ }, -+ {}, -+}; -+ -+struct mux_hwclock { -+ struct clk_hw hw; -+ struct clockgen *cg; -+ const struct clockgen_muxinfo *info; -+ u32 __iomem *reg; -+ u8 parent_to_clksel[NUM_MUX_PARENTS]; -+ s8 clksel_to_parent[NUM_MUX_PARENTS]; -+ int num_parents; -+}; -+ -+#define to_mux_hwclock(p) container_of(p, struct mux_hwclock, hw) -+#define CLKSEL_MASK 0x78000000 -+#define CLKSEL_SHIFT 27 -+ -+static int mux_set_parent(struct clk_hw *hw, u8 idx) -+{ -+ struct mux_hwclock *hwc = to_mux_hwclock(hw); -+ u32 clksel; -+ -+ if (idx >= hwc->num_parents) -+ return -EINVAL; -+ -+ clksel = hwc->parent_to_clksel[idx]; -+ cg_out(hwc->cg, (clksel << CLKSEL_SHIFT) & CLKSEL_MASK, hwc->reg); -+ -+ return 0; -+} -+ -+static u8 mux_get_parent(struct clk_hw *hw) -+{ -+ struct mux_hwclock *hwc = to_mux_hwclock(hw); -+ u32 clksel; -+ s8 ret; -+ -+ clksel = (cg_in(hwc->cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT; -+ -+ ret = hwc->clksel_to_parent[clksel]; -+ if (ret < 0) { -+ pr_err("%s: mux at %p has bad clksel\n", __func__, hwc->reg); -+ return 0; -+ } -+ -+ return ret; -+} -+ -+static const struct clk_ops cmux_ops = { -+ .get_parent = mux_get_parent, -+ .set_parent = mux_set_parent, -+}; -+ -+/* -+ * Don't allow setting for now, as the clock options haven't been -+ * sanitized for additional restrictions. -+ */ -+static const struct clk_ops hwaccel_ops = { -+ .get_parent = mux_get_parent, -+}; -+ -+static const struct clockgen_pll_div *get_pll_div(struct clockgen *cg, -+ struct mux_hwclock *hwc, -+ int idx) -+{ -+ int pll, div; -+ -+ if (!(hwc->info->clksel[idx].flags & CLKSEL_VALID)) -+ return NULL; -+ -+ pll = hwc->info->clksel[idx].pll; -+ div = hwc->info->clksel[idx].div; -+ -+ return &cg->pll[pll].div[div]; -+} -+ -+static struct clk * __init create_mux_common(struct clockgen *cg, -+ struct mux_hwclock *hwc, -+ const struct clk_ops *ops, -+ unsigned long min_rate, -+ unsigned long pct80_rate, -+ const char *fmt, int idx) -+{ -+ struct clk_init_data init = {}; -+ struct clk *clk; -+ const struct clockgen_pll_div *div; -+ const char *parent_names[NUM_MUX_PARENTS]; -+ char name[32]; -+ int i, j; -+ -+ snprintf(name, sizeof(name), fmt, idx); -+ -+ for (i = 0, j = 0; i < NUM_MUX_PARENTS; i++) { -+ unsigned long rate; -+ -+ hwc->clksel_to_parent[i] = -1; -+ -+ div = get_pll_div(cg, hwc, i); -+ if (!div) -+ continue; -+ -+ rate = clk_get_rate(div->clk); -+ -+ if (hwc->info->clksel[i].flags & CLKSEL_80PCT && -+ rate > pct80_rate) -+ continue; -+ if (rate < min_rate) -+ continue; -+ -+ parent_names[j] = div->name; -+ hwc->parent_to_clksel[j] = i; -+ hwc->clksel_to_parent[i] = j; -+ j++; -+ } -+ -+ init.name = name; -+ init.ops = ops; -+ init.parent_names = parent_names; -+ init.num_parents = hwc->num_parents = j; -+ init.flags = 0; -+ hwc->hw.init = &init; -+ hwc->cg = cg; -+ -+ clk = clk_register(NULL, &hwc->hw); -+ if (IS_ERR(clk)) { -+ pr_err("%s: Couldn't register %s: %ld\n", __func__, name, -+ PTR_ERR(clk)); -+ kfree(hwc); -+ return NULL; -+ } -+ -+ return clk; -+} -+ -+static struct clk * __init create_one_cmux(struct clockgen *cg, int idx) -+{ -+ struct mux_hwclock *hwc; -+ const struct clockgen_pll_div *div; -+ unsigned long plat_rate, min_rate; -+ u64 pct80_rate; -+ u32 clksel; -+ -+ hwc = kzalloc(sizeof(*hwc), GFP_KERNEL); -+ if (!hwc) -+ return NULL; -+ -+ if (cg->info.flags & CG_VER3) -+ hwc->reg = cg->regs + 0x70000 + 0x20 * idx; -+ else -+ hwc->reg = cg->regs + 0x20 * idx; -+ -+ hwc->info = cg->info.cmux_groups[cg->info.cmux_to_group[idx]]; -+ -+ /* -+ * Find the rate for the default clksel, and treat it as the -+ * maximum rated core frequency. If this is an incorrect -+ * assumption, certain clock options (possibly including the -+ * default clksel) may be inappropriately excluded on certain -+ * chips. -+ */ -+ clksel = (cg_in(cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT; -+ div = get_pll_div(cg, hwc, clksel); -+ if (!div) -+ return NULL; -+ -+ pct80_rate = clk_get_rate(div->clk); -+ pct80_rate *= 8; -+ do_div(pct80_rate, 10); -+ -+ plat_rate = clk_get_rate(cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk); -+ -+ if (cg->info.flags & CG_CMUX_GE_PLAT) -+ min_rate = plat_rate; -+ else -+ min_rate = plat_rate / 2; -+ -+ return create_mux_common(cg, hwc, &cmux_ops, min_rate, -+ pct80_rate, "cg-cmux%d", idx); -+} -+ -+static struct clk * __init create_one_hwaccel(struct clockgen *cg, int idx) -+{ -+ struct mux_hwclock *hwc; -+ -+ hwc = kzalloc(sizeof(*hwc), GFP_KERNEL); -+ if (!hwc) -+ return NULL; -+ -+ hwc->reg = cg->regs + 0x20 * idx + 0x10; -+ hwc->info = cg->info.hwaccel[idx]; -+ -+ return create_mux_common(cg, hwc, &hwaccel_ops, 0, 0, -+ "cg-hwaccel%d", idx); -+} -+ -+static void __init create_muxes(struct clockgen *cg) -+{ -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(cg->cmux); i++) { -+ if (cg->info.cmux_to_group[i] < 0) -+ break; -+ if (cg->info.cmux_to_group[i] >= -+ ARRAY_SIZE(cg->info.cmux_groups)) { -+ WARN_ON_ONCE(1); -+ continue; -+ } -+ -+ cg->cmux[i] = create_one_cmux(cg, i); -+ } -+ -+ for (i = 0; i < ARRAY_SIZE(cg->hwaccel); i++) { -+ if (!cg->info.hwaccel[i]) -+ continue; -+ -+ cg->hwaccel[i] = create_one_hwaccel(cg, i); -+ } -+} -+ -+static void __init clockgen_init(struct device_node *np); -+ -+/* Legacy nodes may get probed before the parent clockgen node */ -+static void __init legacy_init_clockgen(struct device_node *np) -+{ -+ if (!clockgen.node) -+ clockgen_init(of_get_parent(np)); -+} -+ -+/* Legacy node */ -+static void __init core_mux_init(struct device_node *np) -+{ -+ struct clk *clk; -+ struct resource res; -+ int idx, rc; -+ -+ legacy_init_clockgen(np); -+ -+ if (of_address_to_resource(np, 0, &res)) -+ return; -+ -+ idx = (res.start & 0xf0) >> 5; -+ clk = clockgen.cmux[idx]; -+ -+ rc = of_clk_add_provider(np, of_clk_src_simple_get, clk); -+ if (rc) { -+ pr_err("%s: Couldn't register clk provider for node %s: %d\n", -+ __func__, np->name, rc); -+ return; -+ } -+} -+ -+static struct clk *sysclk_from_fixed(struct device_node *node, const char *name) -+{ -+ u32 rate; -+ -+ if (of_property_read_u32(node, "clock-frequency", &rate)) -+ return ERR_PTR(-ENODEV); -+ -+ return clk_register_fixed_rate(NULL, name, NULL, CLK_IS_ROOT, rate); -+} -+ -+static struct clk *sysclk_from_parent(const char *name) -+{ -+ struct clk *clk; -+ const char *parent_name; -+ -+ clk = of_clk_get(clockgen.node, 0); -+ if (IS_ERR(clk)) -+ return clk; -+ -+ /* Register the input clock under the desired name. */ -+ parent_name = __clk_get_name(clk); -+ clk = clk_register_fixed_factor(NULL, name, parent_name, -+ 0, 1, 1); -+ if (IS_ERR(clk)) -+ pr_err("%s: Couldn't register %s: %ld\n", __func__, name, -+ PTR_ERR(clk)); -+ -+ return clk; -+} -+ -+static struct clk * __init create_sysclk(const char *name) -+{ -+ struct device_node *sysclk; -+ struct clk *clk; -+ -+ clk = sysclk_from_fixed(clockgen.node, name); -+ if (!IS_ERR(clk)) -+ return clk; -+ -+ clk = sysclk_from_parent(name); -+ if (!IS_ERR(clk)) -+ return clk; -+ -+ sysclk = of_get_child_by_name(clockgen.node, "sysclk"); -+ if (sysclk) { -+ clk = sysclk_from_fixed(sysclk, name); -+ if (!IS_ERR(clk)) -+ return clk; -+ } -+ -+ pr_err("%s: No input clock\n", __func__); -+ return NULL; -+} -+ -+/* Legacy node */ -+static void __init sysclk_init(struct device_node *node) -+{ -+ struct clk *clk; -+ -+ legacy_init_clockgen(node); -+ -+ clk = clockgen.sysclk; -+ if (clk) -+ of_clk_add_provider(node, of_clk_src_simple_get, clk); -+} -+ -+#define PLL_KILL BIT(31) -+ -+static void __init create_one_pll(struct clockgen *cg, int idx) -+{ -+ u32 __iomem *reg; -+ u32 mult; -+ struct clockgen_pll *pll = &cg->pll[idx]; -+ int i; -+ -+ if (!(cg->info.pll_mask & (1 << idx))) -+ return; -+ -+ if (cg->info.flags & CG_VER3) { -+ switch (idx) { -+ case PLATFORM_PLL: -+ reg = cg->regs + 0x60080; -+ break; -+ case CGA_PLL1: -+ reg = cg->regs + 0x80; -+ break; -+ case CGA_PLL2: -+ reg = cg->regs + 0xa0; -+ break; -+ case CGB_PLL1: -+ reg = cg->regs + 0x10080; -+ break; -+ case CGB_PLL2: -+ reg = cg->regs + 0x100a0; -+ break; -+ default: -+ WARN_ONCE(1, "index %d\n", idx); -+ return; -+ } -+ } else { -+ if (idx == PLATFORM_PLL) -+ reg = cg->regs + 0xc00; -+ else -+ reg = cg->regs + 0x800 + 0x20 * (idx - 1); -+ } -+ -+ /* Get the multiple of PLL */ -+ mult = cg_in(cg, reg); -+ -+ /* Check if this PLL is disabled */ -+ if (mult & PLL_KILL) { -+ pr_debug("%s(): pll %p disabled\n", __func__, reg); -+ return; -+ } -+ -+ if ((cg->info.flags & CG_VER3) || -+ ((cg->info.flags & CG_PLL_8BIT) && idx != PLATFORM_PLL)) -+ mult = (mult & GENMASK(8, 1)) >> 1; -+ else -+ mult = (mult & GENMASK(6, 1)) >> 1; -+ -+ for (i = 0; i < ARRAY_SIZE(pll->div); i++) { -+ struct clk *clk; -+ -+ snprintf(pll->div[i].name, sizeof(pll->div[i].name), -+ "cg-pll%d-div%d", idx, i + 1); -+ -+ clk = clk_register_fixed_factor(NULL, -+ pll->div[i].name, "cg-sysclk", 0, mult, i + 1); -+ if (IS_ERR(clk)) { -+ pr_err("%s: %s: register failed %ld\n", -+ __func__, pll->div[i].name, PTR_ERR(clk)); -+ continue; -+ } -+ -+ pll->div[i].clk = clk; -+ } -+} -+ -+static void __init create_plls(struct clockgen *cg) -+{ -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(cg->pll); i++) -+ create_one_pll(cg, i); -+} -+ -+static void __init legacy_pll_init(struct device_node *np, int idx) -+{ -+ struct clockgen_pll *pll; -+ struct clk_onecell_data *onecell_data; -+ struct clk **subclks; -+ int count, rc; -+ -+ legacy_init_clockgen(np); -+ -+ pll = &clockgen.pll[idx]; -+ count = of_property_count_strings(np, "clock-output-names"); -+ -+ BUILD_BUG_ON(ARRAY_SIZE(pll->div) < 4); -+ subclks = kcalloc(4, sizeof(struct clk *), GFP_KERNEL); -+ if (!subclks) -+ return; -+ -+ onecell_data = kmalloc(sizeof(*onecell_data), GFP_KERNEL); -+ if (!onecell_data) -+ goto err_clks; -+ -+ if (count <= 3) { -+ subclks[0] = pll->div[0].clk; -+ subclks[1] = pll->div[1].clk; -+ subclks[2] = pll->div[3].clk; -+ } else { -+ subclks[0] = pll->div[0].clk; -+ subclks[1] = pll->div[1].clk; -+ subclks[2] = pll->div[2].clk; -+ subclks[3] = pll->div[3].clk; -+ } -+ -+ onecell_data->clks = subclks; -+ onecell_data->clk_num = count; -+ -+ rc = of_clk_add_provider(np, of_clk_src_onecell_get, onecell_data); -+ if (rc) { -+ pr_err("%s: Couldn't register clk provider for node %s: %d\n", -+ __func__, np->name, rc); -+ goto err_cell; -+ } -+ -+ return; -+err_cell: -+ kfree(onecell_data); -+err_clks: -+ kfree(subclks); -+} -+ -+/* Legacy node */ -+static void __init pltfrm_pll_init(struct device_node *np) -+{ -+ legacy_pll_init(np, PLATFORM_PLL); -+} -+ -+/* Legacy node */ -+static void __init core_pll_init(struct device_node *np) -+{ -+ struct resource res; -+ int idx; -+ -+ if (of_address_to_resource(np, 0, &res)) -+ return; -+ -+ if ((res.start & 0xfff) == 0xc00) { -+ /* -+ * ls1021a devtree labels the platform PLL -+ * with the core PLL compatible -+ */ -+ pltfrm_pll_init(np); -+ } else { -+ idx = (res.start & 0xf0) >> 5; -+ legacy_pll_init(np, CGA_PLL1 + idx); -+ } -+} -+ -+static struct clk *clockgen_clk_get(struct of_phandle_args *clkspec, void *data) -+{ -+ struct clockgen *cg = data; -+ struct clk *clk; -+ struct clockgen_pll *pll; -+ u32 type, idx; -+ -+ if (clkspec->args_count < 2) { -+ pr_err("%s: insufficient phandle args\n", __func__); -+ return ERR_PTR(-EINVAL); -+ } -+ -+ type = clkspec->args[0]; -+ idx = clkspec->args[1]; -+ -+ switch (type) { -+ case 0: -+ if (idx != 0) -+ goto bad_args; -+ clk = cg->sysclk; -+ break; -+ case 1: -+ if (idx >= ARRAY_SIZE(cg->cmux)) -+ goto bad_args; -+ clk = cg->cmux[idx]; -+ break; -+ case 2: -+ if (idx >= ARRAY_SIZE(cg->hwaccel)) -+ goto bad_args; -+ clk = cg->hwaccel[idx]; -+ break; -+ case 3: -+ if (idx >= ARRAY_SIZE(cg->fman)) -+ goto bad_args; -+ clk = cg->fman[idx]; -+ break; -+ case 4: -+ pll = &cg->pll[PLATFORM_PLL]; -+ if (idx >= ARRAY_SIZE(pll->div)) -+ goto bad_args; -+ clk = pll->div[idx].clk; -+ break; -+ default: -+ goto bad_args; -+ } -+ -+ if (!clk) -+ return ERR_PTR(-ENOENT); -+ return clk; -+ -+bad_args: -+ pr_err("%s: Bad phandle args %u %u\n", __func__, type, idx); -+ return ERR_PTR(-EINVAL); -+} -+ -+#ifdef CONFIG_PPC -+ -+static const u32 a4510_svrs[] __initconst = { -+ (SVR_P2040 << 8) | 0x10, /* P2040 1.0 */ -+ (SVR_P2040 << 8) | 0x11, /* P2040 1.1 */ -+ (SVR_P2041 << 8) | 0x10, /* P2041 1.0 */ -+ (SVR_P2041 << 8) | 0x11, /* P2041 1.1 */ -+ (SVR_P3041 << 8) | 0x10, /* P3041 1.0 */ -+ (SVR_P3041 << 8) | 0x11, /* P3041 1.1 */ -+ (SVR_P4040 << 8) | 0x20, /* P4040 2.0 */ -+ (SVR_P4080 << 8) | 0x20, /* P4080 2.0 */ -+ (SVR_P5010 << 8) | 0x10, /* P5010 1.0 */ -+ (SVR_P5010 << 8) | 0x20, /* P5010 2.0 */ -+ (SVR_P5020 << 8) | 0x10, /* P5020 1.0 */ -+ (SVR_P5021 << 8) | 0x10, /* P5021 1.0 */ -+ (SVR_P5040 << 8) | 0x10, /* P5040 1.0 */ -+}; -+ -+#define SVR_SECURITY 0x80000 /* The Security (E) bit */ -+ -+static bool __init has_erratum_a4510(void) -+{ -+ u32 svr = mfspr(SPRN_SVR); -+ int i; -+ -+ svr &= ~SVR_SECURITY; -+ -+ for (i = 0; i < ARRAY_SIZE(a4510_svrs); i++) { -+ if (svr == a4510_svrs[i]) -+ return true; -+ } -+ -+ return false; -+} -+#else -+static bool __init has_erratum_a4510(void) -+{ -+ return false; -+} -+#endif -+ -+static void __init clockgen_init(struct device_node *np) -+{ -+ int i, ret; -+ bool is_old_ls1021a = false; -+ -+ /* May have already been called by a legacy probe */ -+ if (clockgen.node) -+ return; -+ -+ clockgen.node = np; -+ clockgen.regs = of_iomap(np, 0); -+ if (!clockgen.regs && -+ of_device_is_compatible(of_root, "fsl,ls1021a")) { -+ /* Compatibility hack for old, broken device trees */ -+ clockgen.regs = ioremap(0x1ee1000, 0x1000); -+ is_old_ls1021a = true; -+ } -+ if (!clockgen.regs) { -+ pr_err("%s(): %s: of_iomap() failed\n", __func__, np->name); -+ return; -+ } -+ -+ for (i = 0; i < ARRAY_SIZE(chipinfo); i++) { -+ if (of_device_is_compatible(np, chipinfo[i].compat)) -+ break; -+ if (is_old_ls1021a && -+ !strcmp(chipinfo[i].compat, "fsl,ls1021a-clockgen")) -+ break; -+ } -+ -+ if (i == ARRAY_SIZE(chipinfo)) { -+ pr_err("%s: unknown clockgen node %s\n", __func__, -+ np->full_name); -+ goto err; -+ } -+ clockgen.info = chipinfo[i]; -+ -+ if (clockgen.info.guts_compat) { -+ struct device_node *guts; -+ -+ guts = of_find_compatible_node(NULL, NULL, -+ clockgen.info.guts_compat); -+ if (guts) { -+ clockgen.guts = of_iomap(guts, 0); -+ if (!clockgen.guts) { -+ pr_err("%s: Couldn't map %s regs\n", __func__, -+ guts->full_name); -+ } -+ } -+ -+ } -+ -+ if (has_erratum_a4510()) -+ clockgen.info.flags |= CG_CMUX_GE_PLAT; -+ -+ clockgen.sysclk = create_sysclk("cg-sysclk"); -+ create_plls(&clockgen); -+ create_muxes(&clockgen); -+ -+ if (clockgen.info.init_periph) -+ clockgen.info.init_periph(&clockgen); -+ -+ ret = of_clk_add_provider(np, clockgen_clk_get, &clockgen); -+ if (ret) { -+ pr_err("%s: Couldn't register clk provider for node %s: %d\n", -+ __func__, np->name, ret); -+ } -+ -+ return; -+err: -+ iounmap(clockgen.regs); -+ clockgen.regs = NULL; -+} -+ -+CLK_OF_DECLARE(qoriq_clockgen_1, "fsl,qoriq-clockgen-1.0", clockgen_init); -+CLK_OF_DECLARE(qoriq_clockgen_2, "fsl,qoriq-clockgen-2.0", clockgen_init); -+CLK_OF_DECLARE(qoriq_clockgen_ls1021a, "fsl,ls1021a-clockgen", clockgen_init); -+CLK_OF_DECLARE(qoriq_clockgen_ls2080a, "fsl,ls2080a-clockgen", clockgen_init); -+CLK_OF_DECLARE(qoriq_clockgen_ls2088a, "fsl,ls2088a-clockgen", clockgen_init); -+ -+/* Legacy nodes */ -+CLK_OF_DECLARE(qoriq_sysclk_1, "fsl,qoriq-sysclk-1.0", sysclk_init); -+CLK_OF_DECLARE(qoriq_sysclk_2, "fsl,qoriq-sysclk-2.0", sysclk_init); -+CLK_OF_DECLARE(qoriq_core_pll_1, "fsl,qoriq-core-pll-1.0", core_pll_init); -+CLK_OF_DECLARE(qoriq_core_pll_2, "fsl,qoriq-core-pll-2.0", core_pll_init); -+CLK_OF_DECLARE(qoriq_core_mux_1, "fsl,qoriq-core-mux-1.0", core_mux_init); -+CLK_OF_DECLARE(qoriq_core_mux_2, "fsl,qoriq-core-mux-2.0", core_mux_init); -+CLK_OF_DECLARE(qoriq_pltfrm_pll_1, "fsl,qoriq-platform-pll-1.0", pltfrm_pll_init); -+CLK_OF_DECLARE(qoriq_pltfrm_pll_2, "fsl,qoriq-platform-pll-2.0", pltfrm_pll_init); -diff --git a/drivers/cpufreq/Kconfig.powerpc b/drivers/cpufreq/Kconfig.powerpc -index 72564b7..7ea2441 100644 ---- a/drivers/cpufreq/Kconfig.powerpc -+++ b/drivers/cpufreq/Kconfig.powerpc -@@ -26,7 +26,7 @@ config CPU_FREQ_MAPLE - config PPC_CORENET_CPUFREQ - tristate "CPU frequency scaling driver for Freescale E500MC SoCs" - depends on PPC_E500MC && OF && COMMON_CLK -- select CLK_PPC_CORENET -+ select CLK_QORIQ - help - This adds the CPUFreq driver support for Freescale e500mc, - e5500 and e6500 series SoCs which are capable of changing -diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c -index de361a1..5a63564 100644 ---- a/drivers/dma/acpi-dma.c -+++ b/drivers/dma/acpi-dma.c -@@ -43,7 +43,7 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp, - { - const struct acpi_csrt_shared_info *si; - struct list_head resource_list; -- struct resource_list_entry *rentry; -+ struct resource_entry *rentry; - resource_size_t mem = 0, irq = 0; - int ret; - -@@ -56,10 +56,10 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp, - return 0; - - list_for_each_entry(rentry, &resource_list, node) { -- if (resource_type(&rentry->res) == IORESOURCE_MEM) -- mem = rentry->res.start; -- else if (resource_type(&rentry->res) == IORESOURCE_IRQ) -- irq = rentry->res.start; -+ if (resource_type(rentry->res) == IORESOURCE_MEM) -+ mem = rentry->res->start; -+ else if (resource_type(rentry->res) == IORESOURCE_IRQ) -+ irq = rentry->res->start; - } - - acpi_dev_free_resource_list(&resource_list); -diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig -index 06e99eb..bbf8ae4 100644 ---- a/drivers/i2c/busses/Kconfig -+++ b/drivers/i2c/busses/Kconfig -@@ -526,10 +526,10 @@ config I2C_IBM_IIC - - config I2C_IMX - tristate "IMX I2C interface" -- depends on ARCH_MXC -+ depends on ARCH_MXC || ARCH_LAYERSCAPE - help - Say Y here if you want to use the IIC bus controller on -- the Freescale i.MX/MXC processors. -+ the Freescale i.MX/MXC and layerscape processors. - - This driver can also be built as a module. If so, the module - will be called i2c-imx. -diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c -index e9fb7cf..13f88f9 100644 ---- a/drivers/i2c/busses/i2c-imx.c -+++ b/drivers/i2c/busses/i2c-imx.c -@@ -33,6 +33,10 @@ - *******************************************************************************/ - - #include -+#include -+#include -+#include -+#include - #include - #include - #include -@@ -47,6 +51,7 @@ - #include - #include - #include -+#include - #include - - /** Defines ******************************************************************** -@@ -58,6 +63,15 @@ - /* Default value */ - #define IMX_I2C_BIT_RATE 100000 /* 100kHz */ - -+/* -+ * Enable DMA if transfer byte size is bigger than this threshold. -+ * As the hardware request, it must bigger than 4 bytes.\ -+ * I have set '16' here, maybe it's not the best but I think it's -+ * the appropriate. -+ */ -+#define DMA_THRESHOLD 16 -+#define DMA_TIMEOUT 1000 -+ - /* IMX I2C registers: - * the I2C register offset is different between SoCs, - * to provid support for all these chips, split the -@@ -83,6 +97,7 @@ - #define I2SR_IBB 0x20 - #define I2SR_IAAS 0x40 - #define I2SR_ICF 0x80 -+#define I2CR_DMAEN 0x02 - #define I2CR_RSTA 0x04 - #define I2CR_TXAK 0x08 - #define I2CR_MTX 0x10 -@@ -169,6 +184,17 @@ struct imx_i2c_hwdata { - unsigned i2cr_ien_opcode; - }; - -+struct imx_i2c_dma { -+ struct dma_chan *chan_tx; -+ struct dma_chan *chan_rx; -+ struct dma_chan *chan_using; -+ struct completion cmd_complete; -+ dma_addr_t dma_buf; -+ unsigned int dma_len; -+ enum dma_transfer_direction dma_transfer_dir; -+ enum dma_data_direction dma_data_dir; -+}; -+ - struct imx_i2c_struct { - struct i2c_adapter adapter; - struct clk *clk; -@@ -181,6 +207,8 @@ struct imx_i2c_struct { - unsigned int cur_clk; - unsigned int bitrate; - const struct imx_i2c_hwdata *hwdata; -+ -+ struct imx_i2c_dma *dma; - }; - - static const struct imx_i2c_hwdata imx1_i2c_hwdata = { -@@ -251,6 +279,162 @@ static inline unsigned char imx_i2c_read_reg(struct imx_i2c_struct *i2c_imx, - return readb(i2c_imx->base + (reg << i2c_imx->hwdata->regshift)); - } - -+/* Functions for DMA support */ -+static void i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx, -+ dma_addr_t phy_addr) -+{ -+ struct imx_i2c_dma *dma; -+ struct dma_slave_config dma_sconfig; -+ struct device *dev = &i2c_imx->adapter.dev; -+ int ret; -+ -+ dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL); -+ if (!dma) -+ return; -+ -+ dma->chan_tx = dma_request_slave_channel(dev, "tx"); -+ if (!dma->chan_tx) { -+ dev_dbg(dev, "can't request DMA tx channel\n"); -+ goto fail_al; -+ } -+ -+ dma_sconfig.dst_addr = phy_addr + -+ (IMX_I2C_I2DR << i2c_imx->hwdata->regshift); -+ dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; -+ dma_sconfig.dst_maxburst = 1; -+ dma_sconfig.direction = DMA_MEM_TO_DEV; -+ ret = dmaengine_slave_config(dma->chan_tx, &dma_sconfig); -+ if (ret < 0) { -+ dev_dbg(dev, "can't configure tx channel\n"); -+ goto fail_tx; -+ } -+ -+ dma->chan_rx = dma_request_slave_channel(dev, "rx"); -+ if (!dma->chan_rx) { -+ dev_dbg(dev, "can't request DMA rx channel\n"); -+ goto fail_tx; -+ } -+ -+ dma_sconfig.src_addr = phy_addr + -+ (IMX_I2C_I2DR << i2c_imx->hwdata->regshift); -+ dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; -+ dma_sconfig.src_maxburst = 1; -+ dma_sconfig.direction = DMA_DEV_TO_MEM; -+ ret = dmaengine_slave_config(dma->chan_rx, &dma_sconfig); -+ if (ret < 0) { -+ dev_dbg(dev, "can't configure rx channel\n"); -+ goto fail_rx; -+ } -+ -+ i2c_imx->dma = dma; -+ init_completion(&dma->cmd_complete); -+ dev_info(dev, "using %s (tx) and %s (rx) for DMA transfers\n", -+ dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx)); -+ -+ return; -+ -+fail_rx: -+ dma_release_channel(dma->chan_rx); -+fail_tx: -+ dma_release_channel(dma->chan_tx); -+fail_al: -+ devm_kfree(dev, dma); -+ dev_info(dev, "can't use DMA\n"); -+} -+ -+static void i2c_imx_dma_callback(void *arg) -+{ -+ struct imx_i2c_struct *i2c_imx = (struct imx_i2c_struct *)arg; -+ struct imx_i2c_dma *dma = i2c_imx->dma; -+ -+ dma_unmap_single(dma->chan_using->device->dev, dma->dma_buf, -+ dma->dma_len, dma->dma_data_dir); -+ complete(&dma->cmd_complete); -+} -+ -+static int i2c_imx_dma_xfer(struct imx_i2c_struct *i2c_imx, -+ struct i2c_msg *msgs) -+{ -+ struct imx_i2c_dma *dma = i2c_imx->dma; -+ struct dma_async_tx_descriptor *txdesc; -+ struct device *dev = &i2c_imx->adapter.dev; -+ struct device *chan_dev = dma->chan_using->device->dev; -+ -+ dma->dma_buf = dma_map_single(chan_dev, msgs->buf, -+ dma->dma_len, dma->dma_data_dir); -+ if (dma_mapping_error(chan_dev, dma->dma_buf)) { -+ dev_err(dev, "DMA mapping failed\n"); -+ goto err_map; -+ } -+ -+ txdesc = dmaengine_prep_slave_single(dma->chan_using, dma->dma_buf, -+ dma->dma_len, dma->dma_transfer_dir, -+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK); -+ if (!txdesc) { -+ dev_err(dev, "Not able to get desc for DMA xfer\n"); -+ goto err_desc; -+ } -+ -+ txdesc->callback = i2c_imx_dma_callback; -+ txdesc->callback_param = i2c_imx; -+ if (dma_submit_error(dmaengine_submit(txdesc))) { -+ dev_err(dev, "DMA submit failed\n"); -+ goto err_submit; -+ } -+ -+ dma_async_issue_pending(dma->chan_using); -+ return 0; -+ -+err_submit: -+err_desc: -+ dma_unmap_single(chan_dev, dma->dma_buf, -+ dma->dma_len, dma->dma_data_dir); -+err_map: -+ return -EINVAL; -+} -+ -+static void i2c_imx_dma_free(struct imx_i2c_struct *i2c_imx) -+{ -+ struct imx_i2c_dma *dma = i2c_imx->dma; -+ -+ dma->dma_buf = 0; -+ dma->dma_len = 0; -+ -+ dma_release_channel(dma->chan_tx); -+ dma->chan_tx = NULL; -+ -+ dma_release_channel(dma->chan_rx); -+ dma->chan_rx = NULL; -+ -+ dma->chan_using = NULL; -+} -+ -+/* -+ * When a system reset does not cause all I2C devices to be reset, it is -+ * sometimes necessary to force the I2C module to become the I2C bus master -+ * out of reset and drive SCL A slave can hold bus low to cause bus hang. -+ * Thus, SDA can be driven low by another I2C device while this I2C module -+ * is coming out of reset and will stay low indefinitely. -+ * The I2C master has to generate 9 clock pulses to get the bus free or idle. -+ */ -+static void imx_i2c_fixup(struct imx_i2c_struct *i2c_imx) -+{ -+ int k; -+ u32 delay_val = 1000000 / i2c_imx->cur_clk + 1; -+ -+ if (delay_val < 2) -+ delay_val = 2; -+ -+ for (k = 9; k; k--) { -+ imx_i2c_write_reg(I2CR_IEN, i2c_imx, IMX_I2C_I2CR); -+ imx_i2c_write_reg((I2CR_MSTA | I2CR_MTX) & (~I2CR_IEN), -+ i2c_imx, IMX_I2C_I2CR); -+ imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); -+ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2CR); -+ udelay(delay_val << 1); -+ } -+} -+ - /** Functions for IMX I2C adapter driver *************************************** - *******************************************************************************/ - -@@ -276,8 +460,15 @@ static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy) - if (!for_busy && !(temp & I2SR_IBB)) - break; - if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) { -+ u8 status = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR); -+ - dev_dbg(&i2c_imx->adapter.dev, - "<%s> I2C bus is busy\n", __func__); -+ if ((status & (I2SR_ICF | I2SR_IBB | I2CR_TXAK)) != 0) { -+ imx_i2c_write_reg(status & ~I2SR_IAL, i2c_imx, -+ IMX_I2C_I2CR); -+ imx_i2c_fixup(i2c_imx); -+ } - return -ETIMEDOUT; - } - schedule(); -@@ -382,6 +573,7 @@ static int i2c_imx_start(struct imx_i2c_struct *i2c_imx) - i2c_imx->stopped = 0; - - temp |= I2CR_IIEN | I2CR_MTX | I2CR_TXAK; -+ temp &= ~I2CR_DMAEN; - imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); - return result; - } -@@ -395,6 +587,8 @@ static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx) - dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__); - temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); - temp &= ~(I2CR_MSTA | I2CR_MTX); -+ if (i2c_imx->dma) -+ temp &= ~I2CR_DMAEN; - imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); - } - if (is_imx1_i2c(i2c_imx)) { -@@ -435,6 +629,157 @@ static irqreturn_t i2c_imx_isr(int irq, void *dev_id) - return IRQ_NONE; - } - -+static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx, -+ struct i2c_msg *msgs) -+{ -+ int result; -+ unsigned long time_left; -+ unsigned int temp = 0; -+ unsigned long orig_jiffies = jiffies; -+ struct imx_i2c_dma *dma = i2c_imx->dma; -+ struct device *dev = &i2c_imx->adapter.dev; -+ -+ dma->chan_using = dma->chan_tx; -+ dma->dma_transfer_dir = DMA_MEM_TO_DEV; -+ dma->dma_data_dir = DMA_TO_DEVICE; -+ dma->dma_len = msgs->len - 1; -+ result = i2c_imx_dma_xfer(i2c_imx, msgs); -+ if (result) -+ return result; -+ -+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); -+ temp |= I2CR_DMAEN; -+ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); -+ -+ /* -+ * Write slave address. -+ * The first byte must be transmitted by the CPU. -+ */ -+ imx_i2c_write_reg(msgs->addr << 1, i2c_imx, IMX_I2C_I2DR); -+ reinit_completion(&i2c_imx->dma->cmd_complete); -+ time_left = wait_for_completion_timeout( -+ &i2c_imx->dma->cmd_complete, -+ msecs_to_jiffies(DMA_TIMEOUT)); -+ if (time_left == 0) { -+ dmaengine_terminate_all(dma->chan_using); -+ return -ETIMEDOUT; -+ } -+ -+ /* Waiting for transfer complete. */ -+ while (1) { -+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR); -+ if (temp & I2SR_ICF) -+ break; -+ if (time_after(jiffies, orig_jiffies + -+ msecs_to_jiffies(DMA_TIMEOUT))) { -+ dev_dbg(dev, "<%s> Timeout\n", __func__); -+ return -ETIMEDOUT; -+ } -+ schedule(); -+ } -+ -+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); -+ temp &= ~I2CR_DMAEN; -+ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); -+ -+ /* The last data byte must be transferred by the CPU. */ -+ imx_i2c_write_reg(msgs->buf[msgs->len-1], -+ i2c_imx, IMX_I2C_I2DR); -+ result = i2c_imx_trx_complete(i2c_imx); -+ if (result) -+ return result; -+ -+ return i2c_imx_acked(i2c_imx); -+} -+ -+static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx, -+ struct i2c_msg *msgs, bool is_lastmsg) -+{ -+ int result; -+ unsigned long time_left; -+ unsigned int temp; -+ unsigned long orig_jiffies = jiffies; -+ struct imx_i2c_dma *dma = i2c_imx->dma; -+ struct device *dev = &i2c_imx->adapter.dev; -+ -+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); -+ temp |= I2CR_DMAEN; -+ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); -+ -+ dma->chan_using = dma->chan_rx; -+ dma->dma_transfer_dir = DMA_DEV_TO_MEM; -+ dma->dma_data_dir = DMA_FROM_DEVICE; -+ /* The last two data bytes must be transferred by the CPU. */ -+ dma->dma_len = msgs->len - 2; -+ result = i2c_imx_dma_xfer(i2c_imx, msgs); -+ if (result) -+ return result; -+ -+ reinit_completion(&i2c_imx->dma->cmd_complete); -+ time_left = wait_for_completion_timeout( -+ &i2c_imx->dma->cmd_complete, -+ msecs_to_jiffies(DMA_TIMEOUT)); -+ if (time_left == 0) { -+ dmaengine_terminate_all(dma->chan_using); -+ return -ETIMEDOUT; -+ } -+ -+ /* waiting for transfer complete. */ -+ while (1) { -+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR); -+ if (temp & I2SR_ICF) -+ break; -+ if (time_after(jiffies, orig_jiffies + -+ msecs_to_jiffies(DMA_TIMEOUT))) { -+ dev_dbg(dev, "<%s> Timeout\n", __func__); -+ return -ETIMEDOUT; -+ } -+ schedule(); -+ } -+ -+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); -+ temp &= ~I2CR_DMAEN; -+ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); -+ -+ /* read n-1 byte data */ -+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); -+ temp |= I2CR_TXAK; -+ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); -+ -+ msgs->buf[msgs->len-2] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); -+ /* read n byte data */ -+ result = i2c_imx_trx_complete(i2c_imx); -+ if (result) -+ return result; -+ -+ if (is_lastmsg) { -+ /* -+ * It must generate STOP before read I2DR to prevent -+ * controller from generating another clock cycle -+ */ -+ dev_dbg(dev, "<%s> clear MSTA\n", __func__); -+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); -+ temp &= ~(I2CR_MSTA | I2CR_MTX); -+ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); -+ i2c_imx_bus_busy(i2c_imx, 0); -+ i2c_imx->stopped = 1; -+ } else { -+ /* -+ * For i2c master receiver repeat restart operation like: -+ * read -> repeat MSTA -> read/write -+ * The controller must set MTX before read the last byte in -+ * the first read operation, otherwise the first read cost -+ * one extra clock cycle. -+ */ -+ temp = readb(i2c_imx->base + IMX_I2C_I2CR); -+ temp |= I2CR_MTX; -+ writeb(temp, i2c_imx->base + IMX_I2C_I2CR); -+ } -+ msgs->buf[msgs->len-1] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); -+ -+ return 0; -+} -+ - static int i2c_imx_write(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs) - { - int i, result; -@@ -504,6 +849,9 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo - - dev_dbg(&i2c_imx->adapter.dev, "<%s> read data\n", __func__); - -+ if (i2c_imx->dma && msgs->len >= DMA_THRESHOLD && !block_data) -+ return i2c_imx_dma_read(i2c_imx, msgs, is_lastmsg); -+ - /* read data */ - for (i = 0; i < msgs->len; i++) { - u8 len = 0; -@@ -577,6 +925,13 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter, - - dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__); - -+ /* workround for ERR010027: ensure that the I2C BUS is idle -+ before switching to master mode and attempting a Start cycle -+ */ -+ result = i2c_imx_bus_busy(i2c_imx, 0); -+ if (result) -+ goto fail0; -+ - /* Start I2C transfer */ - result = i2c_imx_start(i2c_imx); - if (result) -@@ -618,8 +973,12 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter, - #endif - if (msgs[i].flags & I2C_M_RD) - result = i2c_imx_read(i2c_imx, &msgs[i], is_lastmsg); -- else -- result = i2c_imx_write(i2c_imx, &msgs[i]); -+ else { -+ if (i2c_imx->dma && msgs[i].len >= DMA_THRESHOLD) -+ result = i2c_imx_dma_write(i2c_imx, &msgs[i]); -+ else -+ result = i2c_imx_write(i2c_imx, &msgs[i]); -+ } - if (result) - goto fail0; - } -@@ -654,6 +1013,7 @@ static int i2c_imx_probe(struct platform_device *pdev) - struct imxi2c_platform_data *pdata = dev_get_platdata(&pdev->dev); - void __iomem *base; - int irq, ret; -+ dma_addr_t phy_addr; - - dev_dbg(&pdev->dev, "<%s>\n", __func__); - -@@ -668,6 +1028,7 @@ static int i2c_imx_probe(struct platform_device *pdev) - if (IS_ERR(base)) - return PTR_ERR(base); - -+ phy_addr = (dma_addr_t)res->start; - i2c_imx = devm_kzalloc(&pdev->dev, sizeof(struct imx_i2c_struct), - GFP_KERNEL); - if (!i2c_imx) -@@ -701,7 +1062,7 @@ static int i2c_imx_probe(struct platform_device *pdev) - return ret; - } - /* Request IRQ */ -- ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, 0, -+ ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, IRQF_SHARED, - pdev->name, i2c_imx); - if (ret) { - dev_err(&pdev->dev, "can't claim irq %d\n", irq); -@@ -743,6 +1104,9 @@ static int i2c_imx_probe(struct platform_device *pdev) - i2c_imx->adapter.name); - dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n"); - -+ /* Init DMA config if support*/ -+ i2c_imx_dma_request(i2c_imx, phy_addr); -+ - return 0; /* Return OK */ - - clk_disable: -@@ -758,6 +1122,9 @@ static int i2c_imx_remove(struct platform_device *pdev) - dev_dbg(&i2c_imx->adapter.dev, "adapter removed\n"); - i2c_del_adapter(&i2c_imx->adapter); - -+ if (i2c_imx->dma) -+ i2c_imx_dma_free(i2c_imx); -+ - /* setup chip registers to defaults */ - imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IADR); - imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IFDR); -diff --git a/drivers/i2c/muxes/i2c-mux-pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c -index cb77277..0c8d4d2 100644 ---- a/drivers/i2c/muxes/i2c-mux-pca9541.c -+++ b/drivers/i2c/muxes/i2c-mux-pca9541.c -@@ -104,7 +104,7 @@ static int pca9541_reg_write(struct i2c_client *client, u8 command, u8 val) - buf[0] = command; - buf[1] = val; - msg.buf = buf; -- ret = adap->algo->master_xfer(adap, &msg, 1); -+ ret = __i2c_transfer(adap, &msg, 1); - } else { - union i2c_smbus_data data; - -@@ -144,7 +144,7 @@ static int pca9541_reg_read(struct i2c_client *client, u8 command) - .buf = &val - } - }; -- ret = adap->algo->master_xfer(adap, msg, 2); -+ ret = __i2c_transfer(adap, msg, 2); - if (ret == 2) - ret = val; - else if (ret >= 0) -diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c -index ec11b40..28540a4 100644 ---- a/drivers/i2c/muxes/i2c-mux-pca954x.c -+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c -@@ -41,6 +41,7 @@ - #include - #include - #include -+#include - #include - #include - -@@ -62,6 +63,7 @@ struct pca954x { - struct i2c_adapter *virt_adaps[PCA954X_MAX_NCHANS]; - - u8 last_chan; /* last register value */ -+ u8 disable_mux; /* do not disable mux if val not 0 */ - }; - - struct chip_desc { -@@ -133,7 +135,7 @@ static int pca954x_reg_write(struct i2c_adapter *adap, - msg.len = 1; - buf[0] = val; - msg.buf = buf; -- ret = adap->algo->master_xfer(adap, &msg, 1); -+ ret = __i2c_transfer(adap, &msg, 1); - } else { - union i2c_smbus_data data; - ret = adap->algo->smbus_xfer(adap, client->addr, -@@ -173,6 +175,13 @@ static int pca954x_deselect_mux(struct i2c_adapter *adap, - { - struct pca954x *data = i2c_get_clientdata(client); - -+#ifdef CONFIG_ARCH_LAYERSCAPE -+ if (data->disable_mux != 0) -+ data->last_chan = chips[data->type].nchans; -+ else -+ data->last_chan = 0; -+ return pca954x_reg_write(adap, client, data->disable_mux); -+#endif - /* Deselect active channel */ - data->last_chan = 0; - return pca954x_reg_write(adap, client, data->last_chan); -@@ -186,6 +195,8 @@ static int pca954x_probe(struct i2c_client *client, - { - struct i2c_adapter *adap = to_i2c_adapter(client->dev.parent); - struct pca954x_platform_data *pdata = dev_get_platdata(&client->dev); -+ struct device_node *of_node = client->dev.of_node; -+ bool idle_disconnect_dt; - struct gpio_desc *gpio; - int num, force, class; - struct pca954x *data; -@@ -198,27 +209,55 @@ static int pca954x_probe(struct i2c_client *client, - if (!data) - return -ENOMEM; - -+#ifdef CONFIG_ARCH_LAYERSCAPE -+ /* The point here is that you must not disable a mux if there -+ * are no pullups on the input or you mess up the I2C. This -+ * needs to be put into the DTS really as the kernel cannot -+ * know this otherwise. -+ */ -+ data->type = id->driver_data; -+ data->disable_mux = of_node && -+ of_property_read_bool(of_node, "i2c-mux-never-disable") && -+ chips[data->type].muxtype == pca954x_ismux ? -+ chips[data->type].enable : 0; -+ /* force the first selection */ -+ if (data->disable_mux != 0) -+ data->last_chan = chips[data->type].nchans; -+ else -+ data->last_chan = 0; -+#endif - i2c_set_clientdata(client, data); - - /* Get the mux out of reset if a reset GPIO is specified. */ -- gpio = devm_gpiod_get(&client->dev, "reset"); -- if (!IS_ERR(gpio)) -- gpiod_direction_output(gpio, 0); -+ gpio = devm_gpiod_get_optional(&client->dev, "reset", GPIOD_OUT_LOW); -+ if (IS_ERR(gpio)) -+ return PTR_ERR(gpio); - - /* Write the mux register at addr to verify - * that the mux is in fact present. This also - * initializes the mux to disconnected state. - */ -+#ifdef CONFIG_ARCH_LAYERSCAPE -+ if (i2c_smbus_write_byte(client, data->disable_mux) < 0) { -+#else - if (i2c_smbus_write_byte(client, 0) < 0) { -+#endif - dev_warn(&client->dev, "probe failed\n"); - return -ENODEV; - } - -+#ifndef CONFIG_ARCH_LAYERSCAPE - data->type = id->driver_data; - data->last_chan = 0; /* force the first selection */ -+#endif -+ -+ idle_disconnect_dt = of_node && -+ of_property_read_bool(of_node, "i2c-mux-idle-disconnect"); - - /* Now create an adapter for each channel */ - for (num = 0; num < chips[data->type].nchans; num++) { -+ bool idle_disconnect_pd = false; -+ - force = 0; /* dynamic adap number */ - class = 0; /* no class by default */ - if (pdata) { -@@ -229,12 +268,13 @@ static int pca954x_probe(struct i2c_client *client, - } else - /* discard unconfigured channels */ - break; -+ idle_disconnect_pd = pdata->modes[num].deselect_on_exit; - } - - data->virt_adaps[num] = - i2c_add_mux_adapter(adap, &client->dev, client, - force, num, class, pca954x_select_chan, -- (pdata && pdata->modes[num].deselect_on_exit) -+ (idle_disconnect_pd || idle_disconnect_dt) - ? pca954x_deselect_mux : NULL); - - if (data->virt_adaps[num] == NULL) { -@@ -280,6 +320,13 @@ static int pca954x_resume(struct device *dev) - struct i2c_client *client = to_i2c_client(dev); - struct pca954x *data = i2c_get_clientdata(client); - -+#ifdef CONFIG_ARCH_LAYERSCAPE -+ if (data->disable_mux != 0) -+ data->last_chan = chips[data->type].nchans; -+ else -+ data->last_chan = 0; -+ return i2c_smbus_write_byte(client, data->disable_mux); -+#endif - data->last_chan = 0; - return i2c_smbus_write_byte(client, 0); - } -diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig -index dd51122..2cdcc76 100644 ---- a/drivers/iommu/Kconfig -+++ b/drivers/iommu/Kconfig -@@ -13,9 +13,35 @@ menuconfig IOMMU_SUPPORT - - if IOMMU_SUPPORT - -+menu "Generic IOMMU Pagetable Support" -+ -+# Selected by the actual pagetable implementations -+config IOMMU_IO_PGTABLE -+ bool -+ -+config IOMMU_IO_PGTABLE_LPAE -+ bool "ARMv7/v8 Long Descriptor Format" -+ select IOMMU_IO_PGTABLE -+ help -+ Enable support for the ARM long descriptor pagetable format. -+ This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page -+ sizes at both stage-1 and stage-2, as well as address spaces -+ up to 48-bits in size. -+ -+config IOMMU_IO_PGTABLE_LPAE_SELFTEST -+ bool "LPAE selftests" -+ depends on IOMMU_IO_PGTABLE_LPAE -+ help -+ Enable self-tests for LPAE page table allocator. This performs -+ a series of page-table consistency checks during boot. -+ -+ If unsure, say N here. -+ -+endmenu -+ - config OF_IOMMU - def_bool y -- depends on OF -+ depends on OF && IOMMU_API - - config FSL_PAMU - bool "Freescale IOMMU support" -@@ -291,13 +317,13 @@ config SPAPR_TCE_IOMMU - - config ARM_SMMU - bool "ARM Ltd. System MMU (SMMU) Support" -- depends on ARM64 || (ARM_LPAE && OF) -+ depends on ARM64 || ARM - select IOMMU_API -+ select IOMMU_IO_PGTABLE_LPAE - select ARM_DMA_USE_IOMMU if ARM - help - Support for implementations of the ARM System MMU architecture -- versions 1 and 2. The driver supports both v7l and v8l table -- formats with 4k and 64k page sizes. -+ versions 1 and 2. - - Say Y here if your SoC includes an IOMMU device implementing - the ARM SMMU architecture. -diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile -index 16edef7..269cdd8 100644 ---- a/drivers/iommu/Makefile -+++ b/drivers/iommu/Makefile -@@ -1,6 +1,8 @@ - obj-$(CONFIG_IOMMU_API) += iommu.o - obj-$(CONFIG_IOMMU_API) += iommu-traces.o - obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o -+obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o -+obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o - obj-$(CONFIG_OF_IOMMU) += of_iommu.o - obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o - obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o -diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c -index af3daf8..f7131fa 100644 ---- a/drivers/iommu/amd_iommu.c -+++ b/drivers/iommu/amd_iommu.c -@@ -343,8 +343,9 @@ static u16 get_alias(struct device *dev) - */ - if (pci_alias == devid && - PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) { -- pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN; -- pdev->dma_alias_devfn = ivrs_alias & 0xff; -+ pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVID; -+ pdev->dma_alias_devid = PCI_DEVID(pdev->bus->number, -+ ivrs_alias & 0xff); - pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n", - PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias), - dev_name(dev)); -@@ -3432,6 +3433,7 @@ static const struct iommu_ops amd_iommu_ops = { - .detach_dev = amd_iommu_detach_device, - .map = amd_iommu_map, - .unmap = amd_iommu_unmap, -+ .map_sg = default_iommu_map_sg, - .iova_to_phys = amd_iommu_iova_to_phys, - .pgsize_bitmap = AMD_IOMMU_PGSIZES, - }; -diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c -index 60558f7..10e584b 100644 ---- a/drivers/iommu/arm-smmu.c -+++ b/drivers/iommu/arm-smmu.c -@@ -23,8 +23,6 @@ - * - Stream-matching and stream-indexing - * - v7/v8 long-descriptor format - * - Non-secure access to the SMMU -- * - 4k and 64k pages, with contiguous pte hints. -- * - Up to 48-bit addressing (dependent on VA_BITS) - * - Context fault reporting - */ - -@@ -36,7 +34,7 @@ - #include - #include - #include --#include -+#include - #include - #include - #include -@@ -46,6 +44,16 @@ - - #include - -+#include "io-pgtable.h" -+ -+#ifdef CONFIG_FSL_MC_BUS -+#include <../drivers/staging/fsl-mc/include/mc.h> -+#endif -+ -+#ifdef CONFIG_PCI_LAYERSCAPE -+#include <../drivers/pci/host/pci-layerscape.h> -+#endif -+ - #include - - /* Maximum number of stream IDs assigned to a single device */ -@@ -71,40 +79,6 @@ - ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \ - ? 0x400 : 0)) - --/* Page table bits */ --#define ARM_SMMU_PTE_XN (((pteval_t)3) << 53) --#define ARM_SMMU_PTE_CONT (((pteval_t)1) << 52) --#define ARM_SMMU_PTE_AF (((pteval_t)1) << 10) --#define ARM_SMMU_PTE_SH_NS (((pteval_t)0) << 8) --#define ARM_SMMU_PTE_SH_OS (((pteval_t)2) << 8) --#define ARM_SMMU_PTE_SH_IS (((pteval_t)3) << 8) --#define ARM_SMMU_PTE_PAGE (((pteval_t)3) << 0) -- --#if PAGE_SIZE == SZ_4K --#define ARM_SMMU_PTE_CONT_ENTRIES 16 --#elif PAGE_SIZE == SZ_64K --#define ARM_SMMU_PTE_CONT_ENTRIES 32 --#else --#define ARM_SMMU_PTE_CONT_ENTRIES 1 --#endif -- --#define ARM_SMMU_PTE_CONT_SIZE (PAGE_SIZE * ARM_SMMU_PTE_CONT_ENTRIES) --#define ARM_SMMU_PTE_CONT_MASK (~(ARM_SMMU_PTE_CONT_SIZE - 1)) -- --/* Stage-1 PTE */ --#define ARM_SMMU_PTE_AP_UNPRIV (((pteval_t)1) << 6) --#define ARM_SMMU_PTE_AP_RDONLY (((pteval_t)2) << 6) --#define ARM_SMMU_PTE_ATTRINDX_SHIFT 2 --#define ARM_SMMU_PTE_nG (((pteval_t)1) << 11) -- --/* Stage-2 PTE */ --#define ARM_SMMU_PTE_HAP_FAULT (((pteval_t)0) << 6) --#define ARM_SMMU_PTE_HAP_READ (((pteval_t)1) << 6) --#define ARM_SMMU_PTE_HAP_WRITE (((pteval_t)2) << 6) --#define ARM_SMMU_PTE_MEMATTR_OIWB (((pteval_t)0xf) << 2) --#define ARM_SMMU_PTE_MEMATTR_NC (((pteval_t)0x5) << 2) --#define ARM_SMMU_PTE_MEMATTR_DEV (((pteval_t)0x1) << 2) -- - /* Configuration registers */ - #define ARM_SMMU_GR0_sCR0 0x0 - #define sCR0_CLIENTPD (1 << 0) -@@ -132,17 +106,12 @@ - #define ARM_SMMU_GR0_sGFSYNR0 0x50 - #define ARM_SMMU_GR0_sGFSYNR1 0x54 - #define ARM_SMMU_GR0_sGFSYNR2 0x58 --#define ARM_SMMU_GR0_PIDR0 0xfe0 --#define ARM_SMMU_GR0_PIDR1 0xfe4 --#define ARM_SMMU_GR0_PIDR2 0xfe8 - - #define ID0_S1TS (1 << 30) - #define ID0_S2TS (1 << 29) - #define ID0_NTS (1 << 28) - #define ID0_SMS (1 << 27) --#define ID0_PTFS_SHIFT 24 --#define ID0_PTFS_MASK 0x2 --#define ID0_PTFS_V8_ONLY 0x2 -+#define ID0_ATOSNS (1 << 26) - #define ID0_CTTW (1 << 14) - #define ID0_NUMIRPT_SHIFT 16 - #define ID0_NUMIRPT_MASK 0xff -@@ -169,11 +138,7 @@ - #define ID2_PTFS_16K (1 << 13) - #define ID2_PTFS_64K (1 << 14) - --#define PIDR2_ARCH_SHIFT 4 --#define PIDR2_ARCH_MASK 0xf -- - /* Global TLB invalidation */ --#define ARM_SMMU_GR0_STLBIALL 0x60 - #define ARM_SMMU_GR0_TLBIVMID 0x64 - #define ARM_SMMU_GR0_TLBIALLNSNH 0x68 - #define ARM_SMMU_GR0_TLBIALLH 0x6c -@@ -231,13 +196,25 @@ - #define ARM_SMMU_CB_TTBCR2 0x10 - #define ARM_SMMU_CB_TTBR0_LO 0x20 - #define ARM_SMMU_CB_TTBR0_HI 0x24 -+#define ARM_SMMU_CB_TTBR1_LO 0x28 -+#define ARM_SMMU_CB_TTBR1_HI 0x2c - #define ARM_SMMU_CB_TTBCR 0x30 - #define ARM_SMMU_CB_S1_MAIR0 0x38 -+#define ARM_SMMU_CB_S1_MAIR1 0x3c -+#define ARM_SMMU_CB_PAR_LO 0x50 -+#define ARM_SMMU_CB_PAR_HI 0x54 - #define ARM_SMMU_CB_FSR 0x58 - #define ARM_SMMU_CB_FAR_LO 0x60 - #define ARM_SMMU_CB_FAR_HI 0x64 - #define ARM_SMMU_CB_FSYNR0 0x68 -+#define ARM_SMMU_CB_S1_TLBIVA 0x600 - #define ARM_SMMU_CB_S1_TLBIASID 0x610 -+#define ARM_SMMU_CB_S1_TLBIVAL 0x620 -+#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630 -+#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638 -+#define ARM_SMMU_CB_ATS1PR_LO 0x800 -+#define ARM_SMMU_CB_ATS1PR_HI 0x804 -+#define ARM_SMMU_CB_ATSR 0x8f0 - - #define SCTLR_S1_ASIDPNE (1 << 12) - #define SCTLR_CFCFG (1 << 7) -@@ -249,64 +226,17 @@ - #define SCTLR_M (1 << 0) - #define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE) - --#define RESUME_RETRY (0 << 0) --#define RESUME_TERMINATE (1 << 0) -- --#define TTBCR_EAE (1 << 31) -+#define CB_PAR_F (1 << 0) - --#define TTBCR_PASIZE_SHIFT 16 --#define TTBCR_PASIZE_MASK 0x7 -+#define ATSR_ACTIVE (1 << 0) - --#define TTBCR_TG0_4K (0 << 14) --#define TTBCR_TG0_64K (1 << 14) -- --#define TTBCR_SH0_SHIFT 12 --#define TTBCR_SH0_MASK 0x3 --#define TTBCR_SH_NS 0 --#define TTBCR_SH_OS 2 --#define TTBCR_SH_IS 3 -- --#define TTBCR_ORGN0_SHIFT 10 --#define TTBCR_IRGN0_SHIFT 8 --#define TTBCR_RGN_MASK 0x3 --#define TTBCR_RGN_NC 0 --#define TTBCR_RGN_WBWA 1 --#define TTBCR_RGN_WT 2 --#define TTBCR_RGN_WB 3 -- --#define TTBCR_SL0_SHIFT 6 --#define TTBCR_SL0_MASK 0x3 --#define TTBCR_SL0_LVL_2 0 --#define TTBCR_SL0_LVL_1 1 -- --#define TTBCR_T1SZ_SHIFT 16 --#define TTBCR_T0SZ_SHIFT 0 --#define TTBCR_SZ_MASK 0xf -+#define RESUME_RETRY (0 << 0) -+#define RESUME_TERMINATE (1 << 0) - - #define TTBCR2_SEP_SHIFT 15 --#define TTBCR2_SEP_MASK 0x7 -- --#define TTBCR2_PASIZE_SHIFT 0 --#define TTBCR2_PASIZE_MASK 0x7 -- --/* Common definitions for PASize and SEP fields */ --#define TTBCR2_ADDR_32 0 --#define TTBCR2_ADDR_36 1 --#define TTBCR2_ADDR_40 2 --#define TTBCR2_ADDR_42 3 --#define TTBCR2_ADDR_44 4 --#define TTBCR2_ADDR_48 5 -- --#define TTBRn_HI_ASID_SHIFT 16 -- --#define MAIR_ATTR_SHIFT(n) ((n) << 3) --#define MAIR_ATTR_MASK 0xff --#define MAIR_ATTR_DEVICE 0x04 --#define MAIR_ATTR_NC 0x44 --#define MAIR_ATTR_WBRWA 0xff --#define MAIR_ATTR_IDX_NC 0 --#define MAIR_ATTR_IDX_CACHE 1 --#define MAIR_ATTR_IDX_DEV 2 -+#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT) -+ -+#define TTBRn_HI_ASID_SHIFT 16 - - #define FSR_MULTI (1 << 31) - #define FSR_SS (1 << 30) -@@ -345,6 +275,7 @@ struct arm_smmu_smr { - struct arm_smmu_master_cfg { - int num_streamids; - u16 streamids[MAX_MASTER_STREAMIDS]; -+ u16 mask; - struct arm_smmu_smr *smrs; - }; - -@@ -366,6 +297,7 @@ struct arm_smmu_device { - #define ARM_SMMU_FEAT_TRANS_S1 (1 << 2) - #define ARM_SMMU_FEAT_TRANS_S2 (1 << 3) - #define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4) -+#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5) - u32 features; - - #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0) -@@ -380,10 +312,9 @@ struct arm_smmu_device { - u32 num_mapping_groups; - DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS); - -- unsigned long s1_input_size; -- unsigned long s1_output_size; -- unsigned long s2_input_size; -- unsigned long s2_output_size; -+ unsigned long va_size; -+ unsigned long ipa_size; -+ unsigned long pa_size; - - u32 num_global_irqs; - u32 num_context_irqs; -@@ -397,19 +328,33 @@ struct arm_smmu_cfg { - u8 cbndx; - u8 irptndx; - u32 cbar; -- pgd_t *pgd; - }; - #define INVALID_IRPTNDX 0xff - - #define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx) - #define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1) - -+enum arm_smmu_domain_stage { -+ ARM_SMMU_DOMAIN_S1 = 0, -+ ARM_SMMU_DOMAIN_S2, -+ ARM_SMMU_DOMAIN_NESTED, -+}; -+ - struct arm_smmu_domain { - struct arm_smmu_device *smmu; -+ struct io_pgtable_ops *pgtbl_ops; -+ spinlock_t pgtbl_lock; - struct arm_smmu_cfg cfg; -- spinlock_t lock; -+ enum arm_smmu_domain_stage stage; -+ struct mutex init_mutex; /* Protects smmu pointer */ -+ struct iommu_domain domain; - }; - -+static struct iommu_ops arm_smmu_ops; -+#ifdef CONFIG_FSL_MC_BUS -+static struct iommu_ops arm_fsl_mc_smmu_ops; -+#endif -+ - static DEFINE_SPINLOCK(arm_smmu_devices_lock); - static LIST_HEAD(arm_smmu_devices); - -@@ -422,6 +367,43 @@ static struct arm_smmu_option_prop arm_smmu_options[] = { - { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" }, - { 0, NULL}, - }; -+#define CONFIG_AIOP_ERRATA -+#ifdef CONFIG_AIOP_ERRATA -+/* -+ * PL = 1, BMT = 1, VA = 1 -+ */ -+#define AIOP_SMR_VALUE 0x380 -+/* -+ * Following should be set: -+ * SHCFG: 0x3 -+ * MTCFG: 0x1 -+ * MemAttr: 0xf -+ * Type: 0x1 -+ * RACFG: 0x2 -+ * WACFG: 0x2 -+ */ -+#define AIOP_S2CR_VALUE 0xA1FB00 -+ -+static void arm_smmu_aiop_attr_trans(struct arm_smmu_device *smmu) -+{ -+ void __iomem *gr0_base = ARM_SMMU_GR0(smmu); -+ u16 mask = 0x7c7f; -+ int index; -+ u32 reg; -+ /* reserve one smr group for AIOP */ -+ index = --smmu->num_mapping_groups; -+ -+ reg = SMR_VALID | AIOP_SMR_VALUE << SMR_ID_SHIFT | -+ mask << SMR_MASK_SHIFT; -+ writel(reg, gr0_base + ARM_SMMU_GR0_SMR(index)); -+ writel(AIOP_S2CR_VALUE, gr0_base + ARM_SMMU_GR0_S2CR(index)); -+} -+#endif -+ -+static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) -+{ -+ return container_of(dom, struct arm_smmu_domain, domain); -+} - - static void parse_driver_options(struct arm_smmu_device *smmu) - { -@@ -447,6 +429,16 @@ static struct device_node *dev_get_dev_node(struct device *dev) - return bus->bridge->parent->of_node; - } - -+#ifdef CONFIG_FSL_MC_BUS -+ if (dev->bus == &fsl_mc_bus_type) { -+ /* -+ * Get to the MC device tree node. -+ */ -+ while (dev->bus == &fsl_mc_bus_type) -+ dev = dev->parent; -+ } -+#endif -+ - return dev->of_node; - } - -@@ -590,7 +582,7 @@ static void __arm_smmu_free_bitmap(unsigned long *map, int idx) - } - - /* Wait for any pending TLB invalidations to complete */ --static void arm_smmu_tlb_sync(struct arm_smmu_device *smmu) -+static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu) - { - int count = 0; - void __iomem *gr0_base = ARM_SMMU_GR0(smmu); -@@ -608,12 +600,19 @@ static void arm_smmu_tlb_sync(struct arm_smmu_device *smmu) - } - } - --static void arm_smmu_tlb_inv_context(struct arm_smmu_domain *smmu_domain) -+static void arm_smmu_tlb_sync(void *cookie) - { -+ struct arm_smmu_domain *smmu_domain = cookie; -+ __arm_smmu_tlb_sync(smmu_domain->smmu); -+} -+ -+static void arm_smmu_tlb_inv_context(void *cookie) -+{ -+ struct arm_smmu_domain *smmu_domain = cookie; - struct arm_smmu_cfg *cfg = &smmu_domain->cfg; - struct arm_smmu_device *smmu = smmu_domain->smmu; -- void __iomem *base = ARM_SMMU_GR0(smmu); - bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; -+ void __iomem *base; - - if (stage1) { - base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); -@@ -625,16 +624,83 @@ static void arm_smmu_tlb_inv_context(struct arm_smmu_domain *smmu_domain) - base + ARM_SMMU_GR0_TLBIVMID); - } - -- arm_smmu_tlb_sync(smmu); -+ __arm_smmu_tlb_sync(smmu); -+} -+ -+static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, -+ bool leaf, void *cookie) -+{ -+ struct arm_smmu_domain *smmu_domain = cookie; -+ struct arm_smmu_cfg *cfg = &smmu_domain->cfg; -+ struct arm_smmu_device *smmu = smmu_domain->smmu; -+ bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; -+ void __iomem *reg; -+ -+ if (stage1) { -+ reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); -+ reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA; -+ -+ if (!IS_ENABLED(CONFIG_64BIT) || smmu->version == ARM_SMMU_V1) { -+ iova &= ~12UL; -+ iova |= ARM_SMMU_CB_ASID(cfg); -+ writel_relaxed(iova, reg); -+#ifdef CONFIG_64BIT -+ } else { -+ iova >>= 12; -+ iova |= (u64)ARM_SMMU_CB_ASID(cfg) << 48; -+ writeq_relaxed(iova, reg); -+#endif -+ } -+#ifdef CONFIG_64BIT -+ } else if (smmu->version == ARM_SMMU_V2) { -+ reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); -+ reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L : -+ ARM_SMMU_CB_S2_TLBIIPAS2; -+ writeq_relaxed(iova >> 12, reg); -+#endif -+ } else { -+ reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID; -+ writel_relaxed(ARM_SMMU_CB_VMID(cfg), reg); -+ } -+} -+ -+static void arm_smmu_flush_pgtable(void *addr, size_t size, void *cookie) -+{ -+ struct arm_smmu_domain *smmu_domain = cookie; -+ struct arm_smmu_device *smmu = smmu_domain->smmu; -+ unsigned long offset = (unsigned long)addr & ~PAGE_MASK; -+ -+ -+ /* Ensure new page tables are visible to the hardware walker */ -+ if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) { -+ dsb(ishst); -+ } else { -+ /* -+ * If the SMMU can't walk tables in the CPU caches, treat them -+ * like non-coherent DMA since we need to flush the new entries -+ * all the way out to memory. There's no possibility of -+ * recursion here as the SMMU table walker will not be wired -+ * through another SMMU. -+ */ -+ dma_map_page(smmu->dev, virt_to_page(addr), offset, size, -+ DMA_TO_DEVICE); -+ } - } - -+static struct iommu_gather_ops arm_smmu_gather_ops = { -+ .tlb_flush_all = arm_smmu_tlb_inv_context, -+ .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, -+ .tlb_sync = arm_smmu_tlb_sync, -+ .flush_pgtable = arm_smmu_flush_pgtable, -+}; -+ - static irqreturn_t arm_smmu_context_fault(int irq, void *dev) - { - int flags, ret; - u32 fsr, far, fsynr, resume; - unsigned long iova; - struct iommu_domain *domain = dev; -- struct arm_smmu_domain *smmu_domain = domain->priv; -+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - struct arm_smmu_cfg *cfg = &smmu_domain->cfg; - struct arm_smmu_device *smmu = smmu_domain->smmu; - void __iomem *cb_base; -@@ -705,29 +771,8 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev) - return IRQ_HANDLED; - } - --static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr, -- size_t size) --{ -- unsigned long offset = (unsigned long)addr & ~PAGE_MASK; -- -- -- /* Ensure new page tables are visible to the hardware walker */ -- if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) { -- dsb(ishst); -- } else { -- /* -- * If the SMMU can't walk tables in the CPU caches, treat them -- * like non-coherent DMA since we need to flush the new entries -- * all the way out to memory. There's no possibility of -- * recursion here as the SMMU table walker will not be wired -- * through another SMMU. -- */ -- dma_map_page(smmu->dev, virt_to_page(addr), offset, size, -- DMA_TO_DEVICE); -- } --} -- --static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) -+static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, -+ struct io_pgtable_cfg *pgtbl_cfg) - { - u32 reg; - bool stage1; -@@ -740,6 +785,20 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) - stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; - cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); - -+ if (smmu->version > ARM_SMMU_V1) { -+ /* -+ * CBA2R. -+ * *Must* be initialised before CBAR thanks to VMID16 -+ * architectural oversight affected some implementations. -+ */ -+#ifdef CONFIG_64BIT -+ reg = CBA2R_RW64_64BIT; -+#else -+ reg = CBA2R_RW64_32BIT; -+#endif -+ writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx)); -+ } -+ - /* CBAR */ - reg = cfg->cbar; - if (smmu->version == ARM_SMMU_V1) -@@ -757,135 +816,51 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) - } - writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx)); - -- if (smmu->version > ARM_SMMU_V1) { -- /* CBA2R */ --#ifdef CONFIG_64BIT -- reg = CBA2R_RW64_64BIT; --#else -- reg = CBA2R_RW64_32BIT; --#endif -- writel_relaxed(reg, -- gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx)); -- -- /* TTBCR2 */ -- switch (smmu->s1_input_size) { -- case 32: -- reg = (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT); -- break; -- case 36: -- reg = (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT); -- break; -- case 39: -- case 40: -- reg = (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT); -- break; -- case 42: -- reg = (TTBCR2_ADDR_42 << TTBCR2_SEP_SHIFT); -- break; -- case 44: -- reg = (TTBCR2_ADDR_44 << TTBCR2_SEP_SHIFT); -- break; -- case 48: -- reg = (TTBCR2_ADDR_48 << TTBCR2_SEP_SHIFT); -- break; -- } -- -- switch (smmu->s1_output_size) { -- case 32: -- reg |= (TTBCR2_ADDR_32 << TTBCR2_PASIZE_SHIFT); -- break; -- case 36: -- reg |= (TTBCR2_ADDR_36 << TTBCR2_PASIZE_SHIFT); -- break; -- case 39: -- case 40: -- reg |= (TTBCR2_ADDR_40 << TTBCR2_PASIZE_SHIFT); -- break; -- case 42: -- reg |= (TTBCR2_ADDR_42 << TTBCR2_PASIZE_SHIFT); -- break; -- case 44: -- reg |= (TTBCR2_ADDR_44 << TTBCR2_PASIZE_SHIFT); -- break; -- case 48: -- reg |= (TTBCR2_ADDR_48 << TTBCR2_PASIZE_SHIFT); -- break; -- } -- -- if (stage1) -- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2); -- } -+ /* TTBRs */ -+ if (stage1) { -+ reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0]; -+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO); -+ reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0] >> 32; -+ reg |= ARM_SMMU_CB_ASID(cfg) << TTBRn_HI_ASID_SHIFT; -+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI); - -- /* TTBR0 */ -- arm_smmu_flush_pgtable(smmu, cfg->pgd, -- PTRS_PER_PGD * sizeof(pgd_t)); -- reg = __pa(cfg->pgd); -- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO); -- reg = (phys_addr_t)__pa(cfg->pgd) >> 32; -- if (stage1) -+ reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1]; -+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1_LO); -+ reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1] >> 32; - reg |= ARM_SMMU_CB_ASID(cfg) << TTBRn_HI_ASID_SHIFT; -- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI); -+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1_HI); -+ } else { -+ reg = pgtbl_cfg->arm_lpae_s2_cfg.vttbr; -+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO); -+ reg = pgtbl_cfg->arm_lpae_s2_cfg.vttbr >> 32; -+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI); -+ } - -- /* -- * TTBCR -- * We use long descriptor, with inner-shareable WBWA tables in TTBR0. -- */ -- if (smmu->version > ARM_SMMU_V1) { -- if (PAGE_SIZE == SZ_4K) -- reg = TTBCR_TG0_4K; -- else -- reg = TTBCR_TG0_64K; -- -- if (!stage1) { -- reg |= (64 - smmu->s2_input_size) << TTBCR_T0SZ_SHIFT; -- -- switch (smmu->s2_output_size) { -- case 32: -- reg |= (TTBCR2_ADDR_32 << TTBCR_PASIZE_SHIFT); -- break; -- case 36: -- reg |= (TTBCR2_ADDR_36 << TTBCR_PASIZE_SHIFT); -- break; -- case 40: -- reg |= (TTBCR2_ADDR_40 << TTBCR_PASIZE_SHIFT); -- break; -- case 42: -- reg |= (TTBCR2_ADDR_42 << TTBCR_PASIZE_SHIFT); -- break; -- case 44: -- reg |= (TTBCR2_ADDR_44 << TTBCR_PASIZE_SHIFT); -- break; -- case 48: -- reg |= (TTBCR2_ADDR_48 << TTBCR_PASIZE_SHIFT); -- break; -- } -- } else { -- reg |= (64 - smmu->s1_input_size) << TTBCR_T0SZ_SHIFT; -+ /* TTBCR */ -+ if (stage1) { -+ reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr; -+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); -+ if (smmu->version > ARM_SMMU_V1) { -+ reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32; -+ reg |= TTBCR2_SEP_UPSTREAM; -+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2); - } - } else { -- reg = 0; -+ reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr; -+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); - } - -- reg |= TTBCR_EAE | -- (TTBCR_SH_IS << TTBCR_SH0_SHIFT) | -- (TTBCR_RGN_WBWA << TTBCR_ORGN0_SHIFT) | -- (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT); -- -- if (!stage1) -- reg |= (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT); -- -- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); -- -- /* MAIR0 (stage-1 only) */ -+ /* MAIRs (stage-1 only) */ - if (stage1) { -- reg = (MAIR_ATTR_NC << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_NC)) | -- (MAIR_ATTR_WBRWA << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_CACHE)) | -- (MAIR_ATTR_DEVICE << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_DEV)); -+ reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0]; - writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0); -+ reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1]; -+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1); - } - - /* SCTLR */ -- reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP; -+ /* Disable stall mode */ -+ reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP; - if (stage1) - reg |= SCTLR_S1_ASIDPNE; - #ifdef __BIG_ENDIAN -@@ -898,27 +873,69 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, - struct arm_smmu_device *smmu) - { - int irq, start, ret = 0; -- unsigned long flags; -- struct arm_smmu_domain *smmu_domain = domain->priv; -+ unsigned long ias, oas; -+ struct io_pgtable_ops *pgtbl_ops; -+ struct io_pgtable_cfg pgtbl_cfg; -+ enum io_pgtable_fmt fmt; -+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - struct arm_smmu_cfg *cfg = &smmu_domain->cfg; - -- spin_lock_irqsave(&smmu_domain->lock, flags); -+ mutex_lock(&smmu_domain->init_mutex); - if (smmu_domain->smmu) - goto out_unlock; - -- if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) { -+ /* -+ * Mapping the requested stage onto what we support is surprisingly -+ * complicated, mainly because the spec allows S1+S2 SMMUs without -+ * support for nested translation. That means we end up with the -+ * following table: -+ * -+ * Requested Supported Actual -+ * S1 N S1 -+ * S1 S1+S2 S1 -+ * S1 S2 S2 -+ * S1 S1 S1 -+ * N N N -+ * N S1+S2 S2 -+ * N S2 S2 -+ * N S1 S1 -+ * -+ * Note that you can't actually request stage-2 mappings. -+ */ -+ if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1)) -+ smmu_domain->stage = ARM_SMMU_DOMAIN_S2; -+ if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2)) -+ smmu_domain->stage = ARM_SMMU_DOMAIN_S1; -+ -+ switch (smmu_domain->stage) { -+ case ARM_SMMU_DOMAIN_S1: -+ cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; -+ start = smmu->num_s2_context_banks; -+ ias = smmu->va_size; -+ oas = smmu->ipa_size; -+ if (IS_ENABLED(CONFIG_64BIT)) -+ fmt = ARM_64_LPAE_S1; -+ else -+ fmt = ARM_32_LPAE_S1; -+ break; -+ case ARM_SMMU_DOMAIN_NESTED: - /* - * We will likely want to change this if/when KVM gets - * involved. - */ -- cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; -- start = smmu->num_s2_context_banks; -- } else if (smmu->features & ARM_SMMU_FEAT_TRANS_S1) { -- cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; -- start = smmu->num_s2_context_banks; -- } else { -+ case ARM_SMMU_DOMAIN_S2: - cfg->cbar = CBAR_TYPE_S2_TRANS; - start = 0; -+ ias = smmu->ipa_size; -+ oas = smmu->pa_size; -+ if (IS_ENABLED(CONFIG_64BIT)) -+ fmt = ARM_64_LPAE_S2; -+ else -+ fmt = ARM_32_LPAE_S2; -+ break; -+ default: -+ ret = -EINVAL; -+ goto out_unlock; - } - - ret = __arm_smmu_alloc_bitmap(smmu->context_map, start, -@@ -934,10 +951,33 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, - cfg->irptndx = cfg->cbndx; - } - -- ACCESS_ONCE(smmu_domain->smmu) = smmu; -- arm_smmu_init_context_bank(smmu_domain); -- spin_unlock_irqrestore(&smmu_domain->lock, flags); -+ pgtbl_cfg = (struct io_pgtable_cfg) { -+ .pgsize_bitmap = arm_smmu_ops.pgsize_bitmap, -+ .ias = ias, -+ .oas = oas, -+ .tlb = &arm_smmu_gather_ops, -+ }; -+ -+ smmu_domain->smmu = smmu; -+ pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain); -+ if (!pgtbl_ops) { -+ ret = -ENOMEM; -+ goto out_clear_smmu; -+ } -+ -+ /* Update our support page sizes to reflect the page table format */ -+ arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; -+#ifdef CONFIG_FSL_MC_BUS -+ arm_fsl_mc_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; -+#endif -+ -+ /* Initialise the context bank with our page table cfg */ -+ arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg); - -+ /* -+ * Request context fault interrupt. Do this last to avoid the -+ * handler seeing a half-initialised domain state. -+ */ - irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; - ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED, - "arm-smmu-context-fault", domain); -@@ -947,16 +987,22 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, - cfg->irptndx = INVALID_IRPTNDX; - } - -+ mutex_unlock(&smmu_domain->init_mutex); -+ -+ /* Publish page table ops for map/unmap */ -+ smmu_domain->pgtbl_ops = pgtbl_ops; - return 0; - -+out_clear_smmu: -+ smmu_domain->smmu = NULL; - out_unlock: -- spin_unlock_irqrestore(&smmu_domain->lock, flags); -+ mutex_unlock(&smmu_domain->init_mutex); - return ret; - } - - static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) - { -- struct arm_smmu_domain *smmu_domain = domain->priv; -+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - struct arm_smmu_device *smmu = smmu_domain->smmu; - struct arm_smmu_cfg *cfg = &smmu_domain->cfg; - void __iomem *cb_base; -@@ -965,24 +1011,30 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) - if (!smmu) - return; - -- /* Disable the context bank and nuke the TLB before freeing it. */ -+ /* -+ * Disable the context bank and free the page tables before freeing -+ * it. -+ */ - cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); - writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); -- arm_smmu_tlb_inv_context(smmu_domain); - - if (cfg->irptndx != INVALID_IRPTNDX) { - irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; - free_irq(irq, domain); - } - -+ if (smmu_domain->pgtbl_ops) -+ free_io_pgtable_ops(smmu_domain->pgtbl_ops); -+ - __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx); - } - --static int arm_smmu_domain_init(struct iommu_domain *domain) -+static struct iommu_domain *arm_smmu_domain_alloc(unsigned type) - { - struct arm_smmu_domain *smmu_domain; -- pgd_t *pgd; - -+ if (type != IOMMU_DOMAIN_UNMANAGED) -+ return NULL; - /* - * Allocate the domain and initialise some of its data structures. - * We can't really do anything meaningful until we've added a -@@ -990,95 +1042,23 @@ static int arm_smmu_domain_init(struct iommu_domain *domain) - */ - smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL); - if (!smmu_domain) -- return -ENOMEM; -+ return NULL; - -- pgd = kcalloc(PTRS_PER_PGD, sizeof(pgd_t), GFP_KERNEL); -- if (!pgd) -- goto out_free_domain; -- smmu_domain->cfg.pgd = pgd; -+ mutex_init(&smmu_domain->init_mutex); -+ spin_lock_init(&smmu_domain->pgtbl_lock); - -- spin_lock_init(&smmu_domain->lock); -- domain->priv = smmu_domain; -- return 0; -- --out_free_domain: -- kfree(smmu_domain); -- return -ENOMEM; -+ return &smmu_domain->domain; - } - --static void arm_smmu_free_ptes(pmd_t *pmd) -+static void arm_smmu_domain_free(struct iommu_domain *domain) - { -- pgtable_t table = pmd_pgtable(*pmd); -- -- __free_page(table); --} -- --static void arm_smmu_free_pmds(pud_t *pud) --{ -- int i; -- pmd_t *pmd, *pmd_base = pmd_offset(pud, 0); -- -- pmd = pmd_base; -- for (i = 0; i < PTRS_PER_PMD; ++i) { -- if (pmd_none(*pmd)) -- continue; -- -- arm_smmu_free_ptes(pmd); -- pmd++; -- } -- -- pmd_free(NULL, pmd_base); --} -- --static void arm_smmu_free_puds(pgd_t *pgd) --{ -- int i; -- pud_t *pud, *pud_base = pud_offset(pgd, 0); -- -- pud = pud_base; -- for (i = 0; i < PTRS_PER_PUD; ++i) { -- if (pud_none(*pud)) -- continue; -- -- arm_smmu_free_pmds(pud); -- pud++; -- } -- -- pud_free(NULL, pud_base); --} -- --static void arm_smmu_free_pgtables(struct arm_smmu_domain *smmu_domain) --{ -- int i; -- struct arm_smmu_cfg *cfg = &smmu_domain->cfg; -- pgd_t *pgd, *pgd_base = cfg->pgd; -- -- /* -- * Recursively free the page tables for this domain. We don't -- * care about speculative TLB filling because the tables should -- * not be active in any context bank at this point (SCTLR.M is 0). -- */ -- pgd = pgd_base; -- for (i = 0; i < PTRS_PER_PGD; ++i) { -- if (pgd_none(*pgd)) -- continue; -- arm_smmu_free_puds(pgd); -- pgd++; -- } -- -- kfree(pgd_base); --} -- --static void arm_smmu_domain_destroy(struct iommu_domain *domain) --{ -- struct arm_smmu_domain *smmu_domain = domain->priv; -+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - - /* - * Free the domain resources. We assume that all devices have - * already been detached. - */ - arm_smmu_destroy_domain_context(domain); -- arm_smmu_free_pgtables(smmu_domain); - kfree(smmu_domain); - } - -@@ -1113,7 +1093,7 @@ static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu, - - smrs[i] = (struct arm_smmu_smr) { - .idx = idx, -- .mask = 0, /* We don't currently share SMRs */ -+ .mask = cfg->mask, - .id = cfg->streamids[i], - }; - } -@@ -1209,8 +1189,8 @@ static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain, - static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) - { - int ret; -- struct arm_smmu_domain *smmu_domain = domain->priv; -- struct arm_smmu_device *smmu, *dom_smmu; -+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); -+ struct arm_smmu_device *smmu; - struct arm_smmu_master_cfg *cfg; - - smmu = find_smmu_for_device(dev); -@@ -1224,21 +1204,16 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) - return -EEXIST; - } - -+ /* Ensure that the domain is finalised */ -+ ret = arm_smmu_init_domain_context(domain, smmu); -+ if (IS_ERR_VALUE(ret)) -+ return ret; -+ - /* - * Sanity check the domain. We don't support domains across - * different SMMUs. - */ -- dom_smmu = ACCESS_ONCE(smmu_domain->smmu); -- if (!dom_smmu) { -- /* Now that we have a master, we can finalise the domain */ -- ret = arm_smmu_init_domain_context(domain, smmu); -- if (IS_ERR_VALUE(ret)) -- return ret; -- -- dom_smmu = smmu_domain->smmu; -- } -- -- if (dom_smmu != smmu) { -+ if (smmu_domain->smmu != smmu) { - dev_err(dev, - "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n", - dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev)); -@@ -1258,7 +1233,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) - - static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev) - { -- struct arm_smmu_domain *smmu_domain = domain->priv; -+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - struct arm_smmu_master_cfg *cfg; - - cfg = find_smmu_master_cfg(dev); -@@ -1269,292 +1244,106 @@ static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev) - arm_smmu_domain_remove_master(smmu_domain, cfg); - } - --static bool arm_smmu_pte_is_contiguous_range(unsigned long addr, -- unsigned long end) --{ -- return !(addr & ~ARM_SMMU_PTE_CONT_MASK) && -- (addr + ARM_SMMU_PTE_CONT_SIZE <= end); --} -- --static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, -- unsigned long addr, unsigned long end, -- unsigned long pfn, int prot, int stage) --{ -- pte_t *pte, *start; -- pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF | ARM_SMMU_PTE_XN; -- -- if (pmd_none(*pmd)) { -- /* Allocate a new set of tables */ -- pgtable_t table = alloc_page(GFP_ATOMIC|__GFP_ZERO); -- -- if (!table) -- return -ENOMEM; -- -- arm_smmu_flush_pgtable(smmu, page_address(table), PAGE_SIZE); -- pmd_populate(NULL, pmd, table); -- arm_smmu_flush_pgtable(smmu, pmd, sizeof(*pmd)); -- } -- -- if (stage == 1) { -- pteval |= ARM_SMMU_PTE_AP_UNPRIV | ARM_SMMU_PTE_nG; -- if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ)) -- pteval |= ARM_SMMU_PTE_AP_RDONLY; -- -- if (prot & IOMMU_CACHE) -- pteval |= (MAIR_ATTR_IDX_CACHE << -- ARM_SMMU_PTE_ATTRINDX_SHIFT); -- } else { -- pteval |= ARM_SMMU_PTE_HAP_FAULT; -- if (prot & IOMMU_READ) -- pteval |= ARM_SMMU_PTE_HAP_READ; -- if (prot & IOMMU_WRITE) -- pteval |= ARM_SMMU_PTE_HAP_WRITE; -- if (prot & IOMMU_CACHE) -- pteval |= ARM_SMMU_PTE_MEMATTR_OIWB; -- else -- pteval |= ARM_SMMU_PTE_MEMATTR_NC; -- } -- -- /* If no access, create a faulting entry to avoid TLB fills */ -- if (prot & IOMMU_EXEC) -- pteval &= ~ARM_SMMU_PTE_XN; -- else if (!(prot & (IOMMU_READ | IOMMU_WRITE))) -- pteval &= ~ARM_SMMU_PTE_PAGE; -- -- pteval |= ARM_SMMU_PTE_SH_IS; -- start = pmd_page_vaddr(*pmd) + pte_index(addr); -- pte = start; -- -- /* -- * Install the page table entries. This is fairly complicated -- * since we attempt to make use of the contiguous hint in the -- * ptes where possible. The contiguous hint indicates a series -- * of ARM_SMMU_PTE_CONT_ENTRIES ptes mapping a physically -- * contiguous region with the following constraints: -- * -- * - The region start is aligned to ARM_SMMU_PTE_CONT_SIZE -- * - Each pte in the region has the contiguous hint bit set -- * -- * This complicates unmapping (also handled by this code, when -- * neither IOMMU_READ or IOMMU_WRITE are set) because it is -- * possible, yet highly unlikely, that a client may unmap only -- * part of a contiguous range. This requires clearing of the -- * contiguous hint bits in the range before installing the new -- * faulting entries. -- * -- * Note that re-mapping an address range without first unmapping -- * it is not supported, so TLB invalidation is not required here -- * and is instead performed at unmap and domain-init time. -- */ -- do { -- int i = 1; -- -- pteval &= ~ARM_SMMU_PTE_CONT; -- -- if (arm_smmu_pte_is_contiguous_range(addr, end)) { -- i = ARM_SMMU_PTE_CONT_ENTRIES; -- pteval |= ARM_SMMU_PTE_CONT; -- } else if (pte_val(*pte) & -- (ARM_SMMU_PTE_CONT | ARM_SMMU_PTE_PAGE)) { -- int j; -- pte_t *cont_start; -- unsigned long idx = pte_index(addr); -- -- idx &= ~(ARM_SMMU_PTE_CONT_ENTRIES - 1); -- cont_start = pmd_page_vaddr(*pmd) + idx; -- for (j = 0; j < ARM_SMMU_PTE_CONT_ENTRIES; ++j) -- pte_val(*(cont_start + j)) &= -- ~ARM_SMMU_PTE_CONT; -- -- arm_smmu_flush_pgtable(smmu, cont_start, -- sizeof(*pte) * -- ARM_SMMU_PTE_CONT_ENTRIES); -- } -- -- do { -- *pte = pfn_pte(pfn, __pgprot(pteval)); -- } while (pte++, pfn++, addr += PAGE_SIZE, --i); -- } while (addr != end); -- -- arm_smmu_flush_pgtable(smmu, start, sizeof(*pte) * (pte - start)); -- return 0; --} -- --static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud, -- unsigned long addr, unsigned long end, -- phys_addr_t phys, int prot, int stage) -+static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, -+ phys_addr_t paddr, size_t size, int prot) - { - int ret; -- pmd_t *pmd; -- unsigned long next, pfn = __phys_to_pfn(phys); -- --#ifndef __PAGETABLE_PMD_FOLDED -- if (pud_none(*pud)) { -- pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC); -- if (!pmd) -- return -ENOMEM; -- -- arm_smmu_flush_pgtable(smmu, pmd, PAGE_SIZE); -- pud_populate(NULL, pud, pmd); -- arm_smmu_flush_pgtable(smmu, pud, sizeof(*pud)); -- -- pmd += pmd_index(addr); -- } else --#endif -- pmd = pmd_offset(pud, addr); -+ unsigned long flags; -+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); -+ struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; - -- do { -- next = pmd_addr_end(addr, end); -- ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, next, pfn, -- prot, stage); -- phys += next - addr; -- pfn = __phys_to_pfn(phys); -- } while (pmd++, addr = next, addr < end); -+ if (!ops) -+ return -ENODEV; - -+ spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); -+ ret = ops->map(ops, iova, paddr, size, prot); -+ spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); - return ret; - } - --static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd, -- unsigned long addr, unsigned long end, -- phys_addr_t phys, int prot, int stage) -+static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, -+ size_t size) - { -- int ret = 0; -- pud_t *pud; -- unsigned long next; -- --#ifndef __PAGETABLE_PUD_FOLDED -- if (pgd_none(*pgd)) { -- pud = (pud_t *)get_zeroed_page(GFP_ATOMIC); -- if (!pud) -- return -ENOMEM; -- -- arm_smmu_flush_pgtable(smmu, pud, PAGE_SIZE); -- pgd_populate(NULL, pgd, pud); -- arm_smmu_flush_pgtable(smmu, pgd, sizeof(*pgd)); -- -- pud += pud_index(addr); -- } else --#endif -- pud = pud_offset(pgd, addr); -+ size_t ret; -+ unsigned long flags; -+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); -+ struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; - -- do { -- next = pud_addr_end(addr, end); -- ret = arm_smmu_alloc_init_pmd(smmu, pud, addr, next, phys, -- prot, stage); -- phys += next - addr; -- } while (pud++, addr = next, addr < end); -+ if (!ops) -+ return 0; - -+ spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); -+ ret = ops->unmap(ops, iova, size); -+ spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); - return ret; - } - --static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain, -- unsigned long iova, phys_addr_t paddr, -- size_t size, int prot) -+static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, -+ dma_addr_t iova) - { -- int ret, stage; -- unsigned long end; -- phys_addr_t input_mask, output_mask; -+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - struct arm_smmu_device *smmu = smmu_domain->smmu; - struct arm_smmu_cfg *cfg = &smmu_domain->cfg; -- pgd_t *pgd = cfg->pgd; -- unsigned long flags; -+ struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; -+ struct device *dev = smmu->dev; -+ void __iomem *cb_base; -+ u32 tmp; -+ u64 phys; -+ -+ cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); - -- if (cfg->cbar == CBAR_TYPE_S2_TRANS) { -- stage = 2; -- input_mask = (1ULL << smmu->s2_input_size) - 1; -- output_mask = (1ULL << smmu->s2_output_size) - 1; -+ if (smmu->version == 1) { -+ u32 reg = iova & ~0xfff; -+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_ATS1PR_LO); - } else { -- stage = 1; -- input_mask = (1ULL << smmu->s1_input_size) - 1; -- output_mask = (1ULL << smmu->s1_output_size) - 1; -+ u32 reg = iova & ~0xfff; -+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_ATS1PR_LO); -+ reg = ((u64)iova & ~0xfff) >> 32; -+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_ATS1PR_HI); - } - -- if (!pgd) -- return -EINVAL; -- -- if (size & ~PAGE_MASK) -- return -EINVAL; -- -- if ((phys_addr_t)iova & ~input_mask) -- return -ERANGE; -- -- if (paddr & ~output_mask) -- return -ERANGE; -- -- spin_lock_irqsave(&smmu_domain->lock, flags); -- pgd += pgd_index(iova); -- end = iova + size; -- do { -- unsigned long next = pgd_addr_end(iova, end); -- -- ret = arm_smmu_alloc_init_pud(smmu, pgd, iova, next, paddr, -- prot, stage); -- if (ret) -- goto out_unlock; -- -- paddr += next - iova; -- iova = next; -- } while (pgd++, iova != end); -- --out_unlock: -- spin_unlock_irqrestore(&smmu_domain->lock, flags); -- -- return ret; --} -- --static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, -- phys_addr_t paddr, size_t size, int prot) --{ -- struct arm_smmu_domain *smmu_domain = domain->priv; -- -- if (!smmu_domain) -- return -ENODEV; -+ if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp, -+ !(tmp & ATSR_ACTIVE), 5, 50)) { -+ dev_err(dev, -+ "iova to phys timed out on 0x%pad. Falling back to software table walk.\n", -+ &iova); -+ return ops->iova_to_phys(ops, iova); -+ } - -- return arm_smmu_handle_mapping(smmu_domain, iova, paddr, size, prot); --} -+ phys = readl_relaxed(cb_base + ARM_SMMU_CB_PAR_LO); -+ phys |= ((u64)readl_relaxed(cb_base + ARM_SMMU_CB_PAR_HI)) << 32; - --static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, -- size_t size) --{ -- int ret; -- struct arm_smmu_domain *smmu_domain = domain->priv; -+ if (phys & CB_PAR_F) { -+ dev_err(dev, "translation fault!\n"); -+ dev_err(dev, "PAR = 0x%llx\n", phys); -+ return 0; -+ } - -- ret = arm_smmu_handle_mapping(smmu_domain, iova, 0, size, 0); -- arm_smmu_tlb_inv_context(smmu_domain); -- return ret ? 0 : size; -+ return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff); - } - - static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, -- dma_addr_t iova) -+ dma_addr_t iova) - { -- pgd_t *pgdp, pgd; -- pud_t pud; -- pmd_t pmd; -- pte_t pte; -- struct arm_smmu_domain *smmu_domain = domain->priv; -- struct arm_smmu_cfg *cfg = &smmu_domain->cfg; -- -- pgdp = cfg->pgd; -- if (!pgdp) -- return 0; -- -- pgd = *(pgdp + pgd_index(iova)); -- if (pgd_none(pgd)) -- return 0; -+ phys_addr_t ret; -+ unsigned long flags; -+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); -+ struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; - -- pud = *pud_offset(&pgd, iova); -- if (pud_none(pud)) -+ if (!ops) - return 0; - -- pmd = *pmd_offset(&pud, iova); -- if (pmd_none(pmd)) -- return 0; -+ spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); -+ if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS && -+ smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { -+ ret = arm_smmu_iova_to_phys_hard(domain, iova); -+ } else { -+ ret = ops->iova_to_phys(ops, iova); -+ } - -- pte = *(pmd_page_vaddr(pmd) + pte_index(iova)); -- if (pte_none(pte)) -- return 0; -+ spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); - -- return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK); -+ return ret; - } - - static bool arm_smmu_capable(enum iommu_cap cap) -@@ -1568,6 +1357,8 @@ static bool arm_smmu_capable(enum iommu_cap cap) - return true; - case IOMMU_CAP_INTR_REMAP: - return true; /* MSIs are just memory writes */ -+ case IOMMU_CAP_NOEXEC: -+ return true; - default: - return false; - } -@@ -1584,81 +1375,248 @@ static void __arm_smmu_release_pci_iommudata(void *data) - kfree(data); - } - --static int arm_smmu_add_device(struct device *dev) -+static int arm_smmu_add_pci_device(struct pci_dev *pdev) - { -- struct arm_smmu_device *smmu; -+ int i, ret; -+ u16 sid; -+ struct iommu_group *group; - struct arm_smmu_master_cfg *cfg; -+#ifdef CONFIG_PCI_LAYERSCAPE -+ u32 streamid; -+#endif -+ -+ group = iommu_group_get_for_dev(&pdev->dev); -+ if (IS_ERR(group)) -+ return PTR_ERR(group); -+ -+ cfg = iommu_group_get_iommudata(group); -+ if (!cfg) { -+ cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); -+ if (!cfg) { -+ ret = -ENOMEM; -+ goto out_put_group; -+ } -+ -+ iommu_group_set_iommudata(group, cfg, -+ __arm_smmu_release_pci_iommudata); -+ } -+ -+ if (cfg->num_streamids >= MAX_MASTER_STREAMIDS) { -+ ret = -ENOSPC; -+ goto out_put_group; -+ } -+ -+ /* -+ * Assume Stream ID == Requester ID for now. -+ * We need a way to describe the ID mappings in FDT. -+ */ -+ pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid); -+ for (i = 0; i < cfg->num_streamids; ++i) -+ if (cfg->streamids[i] == sid) -+ break; -+ -+ /* Avoid duplicate SIDs, as this can lead to SMR conflicts */ -+ if (i == cfg->num_streamids) -+ cfg->streamids[cfg->num_streamids++] = sid; -+ -+#ifdef CONFIG_PCI_LAYERSCAPE -+ streamid = set_pcie_streamid_translation(pdev, sid); -+ if (~streamid == 0) { -+ ret = -ENODEV; -+ goto out_put_group; -+ } -+ cfg->streamids[0] = streamid; -+ cfg->mask = 0x7c00; -+ -+ pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVID; -+ pdev->dma_alias_devid = streamid; -+#endif -+ -+ return 0; -+out_put_group: -+ iommu_group_put(group); -+ return ret; -+} -+ -+static int arm_smmu_add_platform_device(struct device *dev) -+{ - struct iommu_group *group; -- void (*releasefn)(void *) = NULL; -- int ret; -+ struct arm_smmu_master *master; -+ struct arm_smmu_device *smmu = find_smmu_for_device(dev); - -- smmu = find_smmu_for_device(dev); - if (!smmu) - return -ENODEV; - -+ master = find_smmu_master(smmu, dev->of_node); -+ if (!master) -+ return -ENODEV; -+ -+ /* No automatic group creation for platform devices */ - group = iommu_group_alloc(); -- if (IS_ERR(group)) { -- dev_err(dev, "Failed to allocate IOMMU group\n"); -+ if (IS_ERR(group)) - return PTR_ERR(group); -+ -+ iommu_group_set_iommudata(group, &master->cfg, NULL); -+ return iommu_group_add_device(group, dev); -+} -+ -+static int arm_smmu_add_device(struct device *dev) -+{ -+ if (dev_is_pci(dev)) -+ return arm_smmu_add_pci_device(to_pci_dev(dev)); -+ -+ return arm_smmu_add_platform_device(dev); -+} -+ -+static void arm_smmu_remove_device(struct device *dev) -+{ -+ iommu_group_remove_device(dev); -+} -+ -+static int arm_smmu_domain_get_attr(struct iommu_domain *domain, -+ enum iommu_attr attr, void *data) -+{ -+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); -+ -+ switch (attr) { -+ case DOMAIN_ATTR_NESTING: -+ *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED); -+ return 0; -+ default: -+ return -ENODEV; - } -+} - -- if (dev_is_pci(dev)) { -- struct pci_dev *pdev = to_pci_dev(dev); -+static int arm_smmu_domain_set_attr(struct iommu_domain *domain, -+ enum iommu_attr attr, void *data) -+{ -+ int ret = 0; -+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - -- cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); -- if (!cfg) { -- ret = -ENOMEM; -- goto out_put_group; -+ mutex_lock(&smmu_domain->init_mutex); -+ -+ switch (attr) { -+ case DOMAIN_ATTR_NESTING: -+ if (smmu_domain->smmu) { -+ ret = -EPERM; -+ goto out_unlock; - } - -- cfg->num_streamids = 1; -+ if (*(int *)data) -+ smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; -+ else -+ smmu_domain->stage = ARM_SMMU_DOMAIN_S1; -+ -+ break; -+ default: -+ ret = -ENODEV; -+ } -+ -+out_unlock: -+ mutex_unlock(&smmu_domain->init_mutex); -+ return ret; -+} -+ -+static struct iommu_ops arm_smmu_ops = { -+ .capable = arm_smmu_capable, -+ .domain_alloc = arm_smmu_domain_alloc, -+ .domain_free = arm_smmu_domain_free, -+ .attach_dev = arm_smmu_attach_dev, -+ .detach_dev = arm_smmu_detach_dev, -+ .map = arm_smmu_map, -+ .unmap = arm_smmu_unmap, -+ .iova_to_phys = arm_smmu_iova_to_phys, -+ .add_device = arm_smmu_add_device, -+ .remove_device = arm_smmu_remove_device, -+ .domain_get_attr = arm_smmu_domain_get_attr, -+ .domain_set_attr = arm_smmu_domain_set_attr, -+ .pgsize_bitmap = -1UL, /* Restricted during device attach */ -+}; -+ -+#ifdef CONFIG_FSL_MC_BUS -+ -+static void arm_smmu_release_fsl_mc_iommudata(void *data) -+{ -+ kfree(data); -+} -+ -+/* -+ * IOMMU group creation and stream ID programming for -+ * the LS devices -+ * -+ */ -+static int arm_fsl_mc_smmu_add_device(struct device *dev) -+{ -+ struct device *cont_dev; -+ struct fsl_mc_device *mc_dev; -+ struct iommu_group *group; -+ struct arm_smmu_master_cfg *cfg; -+ int ret = 0; -+ -+ mc_dev = to_fsl_mc_device(dev); -+ if (mc_dev->flags & FSL_MC_IS_DPRC) -+ cont_dev = dev; -+ else -+ cont_dev = mc_dev->dev.parent; -+ -+ get_device(cont_dev); -+ group = iommu_group_get(cont_dev); -+ put_device(cont_dev); -+ if (!group) { -+ void (*releasefn)(void *) = NULL; -+ -+ group = iommu_group_alloc(); -+ if (IS_ERR(group)) -+ return PTR_ERR(group); - /* -- * Assume Stream ID == Requester ID for now. -- * We need a way to describe the ID mappings in FDT. -+ * allocate the cfg for the container and associate it with -+ * the iommu group. In the find cfg function we get the cfg -+ * from the iommu group. - */ -- pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, -- &cfg->streamids[0]); -- releasefn = __arm_smmu_release_pci_iommudata; -- } else { -- struct arm_smmu_master *master; -- -- master = find_smmu_master(smmu, dev->of_node); -- if (!master) { -- ret = -ENODEV; -- goto out_put_group; -- } -+ cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); -+ if (!cfg) -+ return -ENOMEM; - -- cfg = &master->cfg; -+ mc_dev = to_fsl_mc_device(cont_dev); -+ cfg->num_streamids = 1; -+ cfg->streamids[0] = mc_dev->icid; -+ cfg->mask = 0x7c00; -+ releasefn = arm_smmu_release_fsl_mc_iommudata; -+ iommu_group_set_iommudata(group, cfg, releasefn); -+ ret = iommu_group_add_device(group, cont_dev); - } - -- iommu_group_set_iommudata(group, cfg, releasefn); -- ret = iommu_group_add_device(group, dev); -+ if (!ret && cont_dev != dev) -+ ret = iommu_group_add_device(group, dev); - --out_put_group: - iommu_group_put(group); -+ - return ret; - } - --static void arm_smmu_remove_device(struct device *dev) -+static void arm_fsl_mc_smmu_remove_device(struct device *dev) - { - iommu_group_remove_device(dev); -+ - } - --static const struct iommu_ops arm_smmu_ops = { -- .capable = arm_smmu_capable, -- .domain_init = arm_smmu_domain_init, -- .domain_destroy = arm_smmu_domain_destroy, -- .attach_dev = arm_smmu_attach_dev, -- .detach_dev = arm_smmu_detach_dev, -- .map = arm_smmu_map, -- .unmap = arm_smmu_unmap, -- .iova_to_phys = arm_smmu_iova_to_phys, -- .add_device = arm_smmu_add_device, -- .remove_device = arm_smmu_remove_device, -- .pgsize_bitmap = (SECTION_SIZE | -- ARM_SMMU_PTE_CONT_SIZE | -- PAGE_SIZE), -+static struct iommu_ops arm_fsl_mc_smmu_ops = { -+ .capable = arm_smmu_capable, -+ .domain_alloc = arm_smmu_domain_alloc, -+ .domain_free = arm_smmu_domain_free, -+ .attach_dev = arm_smmu_attach_dev, -+ .detach_dev = arm_smmu_detach_dev, -+ .map = arm_smmu_map, -+ .unmap = arm_smmu_unmap, -+ .map_sg = default_iommu_map_sg, -+ .iova_to_phys = arm_smmu_iova_to_phys, -+ .add_device = arm_fsl_mc_smmu_add_device, -+ .remove_device = arm_fsl_mc_smmu_remove_device, -+ .domain_get_attr = arm_smmu_domain_get_attr, -+ .domain_set_attr = arm_smmu_domain_set_attr, -+ .pgsize_bitmap = -1UL, /* Restricted during device attach */ - }; -+#endif - - static void arm_smmu_device_reset(struct arm_smmu_device *smmu) - { -@@ -1686,7 +1644,6 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) - } - - /* Invalidate the TLB, just in case */ -- writel_relaxed(0, gr0_base + ARM_SMMU_GR0_STLBIALL); - writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH); - writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH); - -@@ -1708,7 +1665,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) - reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT); - - /* Push the button */ -- arm_smmu_tlb_sync(smmu); -+ __arm_smmu_tlb_sync(smmu); - writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); - } - -@@ -1742,12 +1699,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) - - /* ID0 */ - id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0); --#ifndef CONFIG_64BIT -- if (((id >> ID0_PTFS_SHIFT) & ID0_PTFS_MASK) == ID0_PTFS_V8_ONLY) { -- dev_err(smmu->dev, "\tno v7 descriptor support!\n"); -- return -ENODEV; -- } --#endif - - /* Restrict available stages based on module parameter */ - if (force_stage == 1) -@@ -1776,6 +1727,11 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) - return -ENODEV; - } - -+ if ((id & ID0_S1TS) && ((smmu->version == 1) || !(id & ID0_ATOSNS))) { -+ smmu->features |= ARM_SMMU_FEAT_TRANS_OPS; -+ dev_notice(smmu->dev, "\taddress translation ops\n"); -+ } -+ - if (id & ID0_CTTW) { - smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK; - dev_notice(smmu->dev, "\tcoherent table walk\n"); -@@ -1820,16 +1776,14 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) - smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12; - - /* Check for size mismatch of SMMU address space from mapped region */ -- size = 1 << -- (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1); -+ size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1); - size *= 2 << smmu->pgshift; - if (smmu->size != size) - dev_warn(smmu->dev, - "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n", - size, smmu->size); - -- smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & -- ID1_NUMS2CB_MASK; -+ smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK; - smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK; - if (smmu->num_s2_context_banks > smmu->num_context_banks) { - dev_err(smmu->dev, "impossible number of S2 context banks!\n"); -@@ -1841,46 +1795,49 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) - /* ID2 */ - id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2); - size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK); -- smmu->s1_output_size = min_t(unsigned long, PHYS_MASK_SHIFT, size); -- -- /* Stage-2 input size limited due to pgd allocation (PTRS_PER_PGD) */ --#ifdef CONFIG_64BIT -- smmu->s2_input_size = min_t(unsigned long, VA_BITS, size); --#else -- smmu->s2_input_size = min(32UL, size); --#endif -+ smmu->ipa_size = size; - -- /* The stage-2 output mask is also applied for bypass */ -+ /* The output mask is also applied for bypass */ - size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK); -- smmu->s2_output_size = min_t(unsigned long, PHYS_MASK_SHIFT, size); -+ smmu->pa_size = size; -+ -+ /* -+ * What the page table walker can address actually depends on which -+ * descriptor format is in use, but since a) we don't know that yet, -+ * and b) it can vary per context bank, this will have to do... -+ */ -+ if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size))) -+ dev_warn(smmu->dev, -+ "failed to set DMA mask for table walker\n"); - - if (smmu->version == ARM_SMMU_V1) { -- smmu->s1_input_size = 32; -+ smmu->va_size = smmu->ipa_size; -+ size = SZ_4K | SZ_2M | SZ_1G; - } else { --#ifdef CONFIG_64BIT - size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK; -- size = min(VA_BITS, arm_smmu_id_size_to_bits(size)); --#else -- size = 32; -+ smmu->va_size = arm_smmu_id_size_to_bits(size); -+#ifndef CONFIG_64BIT -+ smmu->va_size = min(32UL, smmu->va_size); - #endif -- smmu->s1_input_size = size; -- -- if ((PAGE_SIZE == SZ_4K && !(id & ID2_PTFS_4K)) || -- (PAGE_SIZE == SZ_64K && !(id & ID2_PTFS_64K)) || -- (PAGE_SIZE != SZ_4K && PAGE_SIZE != SZ_64K)) { -- dev_err(smmu->dev, "CPU page size 0x%lx unsupported\n", -- PAGE_SIZE); -- return -ENODEV; -- } -+ size = 0; -+ if (id & ID2_PTFS_4K) -+ size |= SZ_4K | SZ_2M | SZ_1G; -+ if (id & ID2_PTFS_16K) -+ size |= SZ_16K | SZ_32M; -+ if (id & ID2_PTFS_64K) -+ size |= SZ_64K | SZ_512M; - } - -+ arm_smmu_ops.pgsize_bitmap &= size; -+ dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size); -+ - if (smmu->features & ARM_SMMU_FEAT_TRANS_S1) - dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n", -- smmu->s1_input_size, smmu->s1_output_size); -+ smmu->va_size, smmu->ipa_size); - - if (smmu->features & ARM_SMMU_FEAT_TRANS_S2) - dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n", -- smmu->s2_input_size, smmu->s2_output_size); -+ smmu->ipa_size, smmu->pa_size); - - return 0; - } -@@ -2007,6 +1964,10 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) - spin_unlock(&arm_smmu_devices_lock); - - arm_smmu_device_reset(smmu); -+ /* AIOP Rev1 errata work around */ -+#ifdef CONFIG_AIOP_ERRATA -+ arm_smmu_aiop_attr_trans(smmu); -+#endif - return 0; - - out_free_irqs: -@@ -2062,7 +2023,6 @@ static int arm_smmu_device_remove(struct platform_device *pdev) - - static struct platform_driver arm_smmu_driver = { - .driver = { -- .owner = THIS_MODULE, - .name = "arm-smmu", - .of_match_table = of_match_ptr(arm_smmu_of_match), - }, -@@ -2072,8 +2032,20 @@ static struct platform_driver arm_smmu_driver = { - - static int __init arm_smmu_init(void) - { -+ struct device_node *np; - int ret; - -+ /* -+ * Play nice with systems that don't have an ARM SMMU by checking that -+ * an ARM SMMU exists in the system before proceeding with the driver -+ * and IOMMU bus operation registration. -+ */ -+ np = of_find_matching_node(NULL, arm_smmu_of_match); -+ if (!np) -+ return 0; -+ -+ of_node_put(np); -+ - ret = platform_driver_register(&arm_smmu_driver); - if (ret) - return ret; -@@ -2092,6 +2064,10 @@ static int __init arm_smmu_init(void) - bus_set_iommu(&pci_bus_type, &arm_smmu_ops); - #endif - -+#ifdef CONFIG_FSL_MC_BUS -+ if (!iommu_present(&fsl_mc_bus_type)) -+ bus_set_iommu(&fsl_mc_bus_type, &arm_fsl_mc_smmu_ops); -+#endif - return 0; - } - -diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c -index 7423318..7ce5273 100644 ---- a/drivers/iommu/exynos-iommu.c -+++ b/drivers/iommu/exynos-iommu.c -@@ -684,7 +684,6 @@ static const struct of_device_id sysmmu_of_match[] __initconst = { - static struct platform_driver exynos_sysmmu_driver __refdata = { - .probe = exynos_sysmmu_probe, - .driver = { -- .owner = THIS_MODULE, - .name = "exynos-sysmmu", - .of_match_table = sysmmu_of_match, - } -@@ -1178,6 +1177,7 @@ static const struct iommu_ops exynos_iommu_ops = { - .detach_dev = exynos_iommu_detach_device, - .map = exynos_iommu_map, - .unmap = exynos_iommu_unmap, -+ .map_sg = default_iommu_map_sg, - .iova_to_phys = exynos_iommu_iova_to_phys, - .add_device = exynos_iommu_add_device, - .remove_device = exynos_iommu_remove_device, -diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c -index 2b6ce93..9396c85 100644 ---- a/drivers/iommu/fsl_pamu.c -+++ b/drivers/iommu/fsl_pamu.c -@@ -31,7 +31,7 @@ - #include - #include - #include --#include -+#include - - #include "fsl_pamu.h" - -@@ -1227,7 +1227,6 @@ static const struct of_device_id fsl_of_pamu_ids[] = { - static struct platform_driver fsl_of_pamu_driver = { - .driver = { - .name = "fsl-of-pamu", -- .owner = THIS_MODULE, - }, - .probe = fsl_pamu_probe, - }; -diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c -index 3d1fc73..9e97328 100644 ---- a/drivers/iommu/intel-iommu.c -+++ b/drivers/iommu/intel-iommu.c -@@ -4474,6 +4474,7 @@ static const struct iommu_ops intel_iommu_ops = { - .detach_dev = intel_iommu_detach_device, - .map = intel_iommu_map, - .unmap = intel_iommu_unmap, -+ .map_sg = default_iommu_map_sg, - .iova_to_phys = intel_iommu_iova_to_phys, - .add_device = intel_iommu_add_device, - .remove_device = intel_iommu_remove_device, -diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c -new file mode 100644 -index 0000000..fd6dd22 ---- /dev/null -+++ b/drivers/iommu/io-pgtable-arm.c -@@ -0,0 +1,997 @@ -+/* -+ * CPU-agnostic ARM page table allocator. -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ * -+ * Copyright (C) 2014 ARM Limited -+ * -+ * Author: Will Deacon -+ */ -+ -+#define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt -+ -+#include -+#include -+#include -+#include -+#include -+ -+#include "io-pgtable.h" -+ -+#define ARM_LPAE_MAX_ADDR_BITS 48 -+#define ARM_LPAE_S2_MAX_CONCAT_PAGES 16 -+#define ARM_LPAE_MAX_LEVELS 4 -+ -+/* Struct accessors */ -+#define io_pgtable_to_data(x) \ -+ container_of((x), struct arm_lpae_io_pgtable, iop) -+ -+#define io_pgtable_ops_to_pgtable(x) \ -+ container_of((x), struct io_pgtable, ops) -+ -+#define io_pgtable_ops_to_data(x) \ -+ io_pgtable_to_data(io_pgtable_ops_to_pgtable(x)) -+ -+/* -+ * For consistency with the architecture, we always consider -+ * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0 -+ */ -+#define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels) -+ -+/* -+ * Calculate the right shift amount to get to the portion describing level l -+ * in a virtual address mapped by the pagetable in d. -+ */ -+#define ARM_LPAE_LVL_SHIFT(l,d) \ -+ ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \ -+ * (d)->bits_per_level) + (d)->pg_shift) -+ -+#define ARM_LPAE_PAGES_PER_PGD(d) \ -+ DIV_ROUND_UP((d)->pgd_size, 1UL << (d)->pg_shift) -+ -+/* -+ * Calculate the index at level l used to map virtual address a using the -+ * pagetable in d. -+ */ -+#define ARM_LPAE_PGD_IDX(l,d) \ -+ ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0) -+ -+#define ARM_LPAE_LVL_IDX(a,l,d) \ -+ (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ -+ ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1)) -+ -+/* Calculate the block/page mapping size at level l for pagetable in d. */ -+#define ARM_LPAE_BLOCK_SIZE(l,d) \ -+ (1 << (ilog2(sizeof(arm_lpae_iopte)) + \ -+ ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level))) -+ -+/* Page table bits */ -+#define ARM_LPAE_PTE_TYPE_SHIFT 0 -+#define ARM_LPAE_PTE_TYPE_MASK 0x3 -+ -+#define ARM_LPAE_PTE_TYPE_BLOCK 1 -+#define ARM_LPAE_PTE_TYPE_TABLE 3 -+#define ARM_LPAE_PTE_TYPE_PAGE 3 -+ -+#define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63) -+#define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53) -+#define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10) -+#define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8) -+#define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8) -+#define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8) -+#define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5) -+#define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0) -+ -+#define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2) -+/* Ignore the contiguous bit for block splitting */ -+#define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52) -+#define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \ -+ ARM_LPAE_PTE_ATTR_HI_MASK) -+ -+/* Stage-1 PTE */ -+#define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6) -+#define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6) -+#define ARM_LPAE_PTE_ATTRINDX_SHIFT 2 -+#define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11) -+ -+/* Stage-2 PTE */ -+#define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6) -+#define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6) -+#define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6) -+#define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2) -+#define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2) -+#define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2) -+ -+/* Register bits */ -+#define ARM_32_LPAE_TCR_EAE (1 << 31) -+#define ARM_64_LPAE_S2_TCR_RES1 (1 << 31) -+ -+#define ARM_LPAE_TCR_EPD1 (1 << 23) -+ -+#define ARM_LPAE_TCR_TG0_4K (0 << 14) -+#define ARM_LPAE_TCR_TG0_64K (1 << 14) -+#define ARM_LPAE_TCR_TG0_16K (2 << 14) -+ -+#define ARM_LPAE_TCR_SH0_SHIFT 12 -+#define ARM_LPAE_TCR_SH0_MASK 0x3 -+#define ARM_LPAE_TCR_SH_NS 0 -+#define ARM_LPAE_TCR_SH_OS 2 -+#define ARM_LPAE_TCR_SH_IS 3 -+ -+#define ARM_LPAE_TCR_ORGN0_SHIFT 10 -+#define ARM_LPAE_TCR_IRGN0_SHIFT 8 -+#define ARM_LPAE_TCR_RGN_MASK 0x3 -+#define ARM_LPAE_TCR_RGN_NC 0 -+#define ARM_LPAE_TCR_RGN_WBWA 1 -+#define ARM_LPAE_TCR_RGN_WT 2 -+#define ARM_LPAE_TCR_RGN_WB 3 -+ -+#define ARM_LPAE_TCR_SL0_SHIFT 6 -+#define ARM_LPAE_TCR_SL0_MASK 0x3 -+ -+#define ARM_LPAE_TCR_T0SZ_SHIFT 0 -+#define ARM_LPAE_TCR_SZ_MASK 0xf -+ -+#define ARM_LPAE_TCR_PS_SHIFT 16 -+#define ARM_LPAE_TCR_PS_MASK 0x7 -+ -+#define ARM_LPAE_TCR_IPS_SHIFT 32 -+#define ARM_LPAE_TCR_IPS_MASK 0x7 -+ -+#define ARM_LPAE_TCR_PS_32_BIT 0x0ULL -+#define ARM_LPAE_TCR_PS_36_BIT 0x1ULL -+#define ARM_LPAE_TCR_PS_40_BIT 0x2ULL -+#define ARM_LPAE_TCR_PS_42_BIT 0x3ULL -+#define ARM_LPAE_TCR_PS_44_BIT 0x4ULL -+#define ARM_LPAE_TCR_PS_48_BIT 0x5ULL -+ -+#define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3) -+#define ARM_LPAE_MAIR_ATTR_MASK 0xff -+#define ARM_LPAE_MAIR_ATTR_DEVICE 0x04 -+#define ARM_LPAE_MAIR_ATTR_NC 0x44 -+#define ARM_LPAE_MAIR_ATTR_WBRWA 0xff -+#define ARM_LPAE_MAIR_ATTR_IDX_NC 0 -+#define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1 -+#define ARM_LPAE_MAIR_ATTR_IDX_DEV 2 -+ -+/* IOPTE accessors */ -+#define iopte_deref(pte,d) \ -+ (__va((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1) \ -+ & ~((1ULL << (d)->pg_shift) - 1))) -+ -+#define iopte_type(pte,l) \ -+ (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK) -+ -+#define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK) -+ -+#define iopte_leaf(pte,l) \ -+ (l == (ARM_LPAE_MAX_LEVELS - 1) ? \ -+ (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) : \ -+ (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK)) -+ -+#define iopte_to_pfn(pte,d) \ -+ (((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) >> (d)->pg_shift) -+ -+#define pfn_to_iopte(pfn,d) \ -+ (((pfn) << (d)->pg_shift) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) -+ -+struct arm_lpae_io_pgtable { -+ struct io_pgtable iop; -+ -+ int levels; -+ size_t pgd_size; -+ unsigned long pg_shift; -+ unsigned long bits_per_level; -+ -+ void *pgd; -+}; -+ -+typedef u64 arm_lpae_iopte; -+ -+static bool selftest_running = false; -+ -+static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, -+ unsigned long iova, phys_addr_t paddr, -+ arm_lpae_iopte prot, int lvl, -+ arm_lpae_iopte *ptep) -+{ -+ arm_lpae_iopte pte = prot; -+ -+ /* We require an unmap first */ -+ if (iopte_leaf(*ptep, lvl)) { -+ WARN_ON(!selftest_running); -+ return -EEXIST; -+ } -+ -+ if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS) -+ pte |= ARM_LPAE_PTE_NS; -+ -+ if (lvl == ARM_LPAE_MAX_LEVELS - 1) -+ pte |= ARM_LPAE_PTE_TYPE_PAGE; -+ else -+ pte |= ARM_LPAE_PTE_TYPE_BLOCK; -+ -+ pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS; -+ pte |= pfn_to_iopte(paddr >> data->pg_shift, data); -+ -+ *ptep = pte; -+ data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), data->iop.cookie); -+ return 0; -+} -+ -+static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, -+ phys_addr_t paddr, size_t size, arm_lpae_iopte prot, -+ int lvl, arm_lpae_iopte *ptep) -+{ -+ arm_lpae_iopte *cptep, pte; -+ void *cookie = data->iop.cookie; -+ size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data); -+ -+ /* Find our entry at the current level */ -+ ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); -+ -+ /* If we can install a leaf entry at this level, then do so */ -+ if (size == block_size && (size & data->iop.cfg.pgsize_bitmap)) -+ return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep); -+ -+ /* We can't allocate tables at the final level */ -+ if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1)) -+ return -EINVAL; -+ -+ /* Grab a pointer to the next level */ -+ pte = *ptep; -+ if (!pte) { -+ cptep = alloc_pages_exact(1UL << data->pg_shift, -+ GFP_ATOMIC | __GFP_ZERO); -+ if (!cptep) -+ return -ENOMEM; -+ -+ data->iop.cfg.tlb->flush_pgtable(cptep, 1UL << data->pg_shift, -+ cookie); -+ pte = __pa(cptep) | ARM_LPAE_PTE_TYPE_TABLE; -+ if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS) -+ pte |= ARM_LPAE_PTE_NSTABLE; -+ *ptep = pte; -+ data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), cookie); -+ } else { -+ cptep = iopte_deref(pte, data); -+ } -+ -+ /* Rinse, repeat */ -+ return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep); -+} -+ -+static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, -+ int prot) -+{ -+ arm_lpae_iopte pte; -+ -+ if (data->iop.fmt == ARM_64_LPAE_S1 || -+ data->iop.fmt == ARM_32_LPAE_S1) { -+ pte = ARM_LPAE_PTE_AP_UNPRIV | ARM_LPAE_PTE_nG; -+ -+ if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ)) -+ pte |= ARM_LPAE_PTE_AP_RDONLY; -+ -+ if (prot & IOMMU_CACHE) -+ pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE -+ << ARM_LPAE_PTE_ATTRINDX_SHIFT); -+ else if (prot & IOMMU_MMIO) -+ pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV -+ << ARM_LPAE_PTE_ATTRINDX_SHIFT); -+ } else { -+ pte = ARM_LPAE_PTE_HAP_FAULT; -+ if (prot & IOMMU_READ) -+ pte |= ARM_LPAE_PTE_HAP_READ; -+ if (prot & IOMMU_WRITE) -+ pte |= ARM_LPAE_PTE_HAP_WRITE; -+ if (prot & IOMMU_CACHE) -+ pte |= ARM_LPAE_PTE_MEMATTR_OIWB; -+ else if (prot & IOMMU_MMIO) -+ pte |= ARM_LPAE_PTE_MEMATTR_DEV; -+ else -+ pte |= ARM_LPAE_PTE_MEMATTR_NC; -+ } -+ -+ if (prot & IOMMU_NOEXEC) -+ pte |= ARM_LPAE_PTE_XN; -+ -+ return pte; -+} -+ -+static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, -+ phys_addr_t paddr, size_t size, int iommu_prot) -+{ -+ struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); -+ arm_lpae_iopte *ptep = data->pgd; -+ int lvl = ARM_LPAE_START_LVL(data); -+ arm_lpae_iopte prot; -+ -+ /* If no access, then nothing to do */ -+ if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE))) -+ return 0; -+ -+ prot = arm_lpae_prot_to_pte(data, iommu_prot); -+ return __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep); -+} -+ -+static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl, -+ arm_lpae_iopte *ptep) -+{ -+ arm_lpae_iopte *start, *end; -+ unsigned long table_size; -+ -+ /* Only leaf entries at the last level */ -+ if (lvl == ARM_LPAE_MAX_LEVELS - 1) -+ return; -+ -+ if (lvl == ARM_LPAE_START_LVL(data)) -+ table_size = data->pgd_size; -+ else -+ table_size = 1UL << data->pg_shift; -+ -+ start = ptep; -+ end = (void *)ptep + table_size; -+ -+ while (ptep != end) { -+ arm_lpae_iopte pte = *ptep++; -+ -+ if (!pte || iopte_leaf(pte, lvl)) -+ continue; -+ -+ __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data)); -+ } -+ -+ free_pages_exact(start, table_size); -+} -+ -+static void arm_lpae_free_pgtable(struct io_pgtable *iop) -+{ -+ struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop); -+ -+ __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd); -+ kfree(data); -+} -+ -+static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, -+ unsigned long iova, size_t size, -+ arm_lpae_iopte prot, int lvl, -+ arm_lpae_iopte *ptep, size_t blk_size) -+{ -+ unsigned long blk_start, blk_end; -+ phys_addr_t blk_paddr; -+ arm_lpae_iopte table = 0; -+ void *cookie = data->iop.cookie; -+ const struct iommu_gather_ops *tlb = data->iop.cfg.tlb; -+ -+ blk_start = iova & ~(blk_size - 1); -+ blk_end = blk_start + blk_size; -+ blk_paddr = iopte_to_pfn(*ptep, data) << data->pg_shift; -+ -+ for (; blk_start < blk_end; blk_start += size, blk_paddr += size) { -+ arm_lpae_iopte *tablep; -+ -+ /* Unmap! */ -+ if (blk_start == iova) -+ continue; -+ -+ /* __arm_lpae_map expects a pointer to the start of the table */ -+ tablep = &table - ARM_LPAE_LVL_IDX(blk_start, lvl, data); -+ if (__arm_lpae_map(data, blk_start, blk_paddr, size, prot, lvl, -+ tablep) < 0) { -+ if (table) { -+ /* Free the table we allocated */ -+ tablep = iopte_deref(table, data); -+ __arm_lpae_free_pgtable(data, lvl + 1, tablep); -+ } -+ return 0; /* Bytes unmapped */ -+ } -+ } -+ -+ *ptep = table; -+ tlb->flush_pgtable(ptep, sizeof(*ptep), cookie); -+ iova &= ~(blk_size - 1); -+ tlb->tlb_add_flush(iova, blk_size, true, cookie); -+ return size; -+} -+ -+static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, -+ unsigned long iova, size_t size, int lvl, -+ arm_lpae_iopte *ptep) -+{ -+ arm_lpae_iopte pte; -+ const struct iommu_gather_ops *tlb = data->iop.cfg.tlb; -+ void *cookie = data->iop.cookie; -+ size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data); -+ -+ ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); -+ pte = *ptep; -+ -+ /* Something went horribly wrong and we ran out of page table */ -+ if (WARN_ON(!pte || (lvl == ARM_LPAE_MAX_LEVELS))) -+ return 0; -+ -+ /* If the size matches this level, we're in the right place */ -+ if (size == blk_size) { -+ *ptep = 0; -+ tlb->flush_pgtable(ptep, sizeof(*ptep), cookie); -+ -+ if (!iopte_leaf(pte, lvl)) { -+ /* Also flush any partial walks */ -+ tlb->tlb_add_flush(iova, size, false, cookie); -+ tlb->tlb_sync(data->iop.cookie); -+ ptep = iopte_deref(pte, data); -+ __arm_lpae_free_pgtable(data, lvl + 1, ptep); -+ } else { -+ tlb->tlb_add_flush(iova, size, true, cookie); -+ } -+ -+ return size; -+ } else if (iopte_leaf(pte, lvl)) { -+ /* -+ * Insert a table at the next level to map the old region, -+ * minus the part we want to unmap -+ */ -+ return arm_lpae_split_blk_unmap(data, iova, size, -+ iopte_prot(pte), lvl, ptep, -+ blk_size); -+ } -+ -+ /* Keep on walkin' */ -+ ptep = iopte_deref(pte, data); -+ return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep); -+} -+ -+static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova, -+ size_t size) -+{ -+ size_t unmapped; -+ struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); -+ struct io_pgtable *iop = &data->iop; -+ arm_lpae_iopte *ptep = data->pgd; -+ int lvl = ARM_LPAE_START_LVL(data); -+ -+ unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep); -+ if (unmapped) -+ iop->cfg.tlb->tlb_sync(iop->cookie); -+ -+ return unmapped; -+} -+ -+static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops, -+ unsigned long iova) -+{ -+ struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); -+ arm_lpae_iopte pte, *ptep = data->pgd; -+ int lvl = ARM_LPAE_START_LVL(data); -+ -+ do { -+ /* Valid IOPTE pointer? */ -+ if (!ptep) -+ return 0; -+ -+ /* Grab the IOPTE we're interested in */ -+ pte = *(ptep + ARM_LPAE_LVL_IDX(iova, lvl, data)); -+ -+ /* Valid entry? */ -+ if (!pte) -+ return 0; -+ -+ /* Leaf entry? */ -+ if (iopte_leaf(pte,lvl)) -+ goto found_translation; -+ -+ /* Take it to the next level */ -+ ptep = iopte_deref(pte, data); -+ } while (++lvl < ARM_LPAE_MAX_LEVELS); -+ -+ /* Ran out of page tables to walk */ -+ return 0; -+ -+found_translation: -+ iova &= ((1 << data->pg_shift) - 1); -+ return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova; -+} -+ -+static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg) -+{ -+ unsigned long granule; -+ -+ /* -+ * We need to restrict the supported page sizes to match the -+ * translation regime for a particular granule. Aim to match -+ * the CPU page size if possible, otherwise prefer smaller sizes. -+ * While we're at it, restrict the block sizes to match the -+ * chosen granule. -+ */ -+ if (cfg->pgsize_bitmap & PAGE_SIZE) -+ granule = PAGE_SIZE; -+ else if (cfg->pgsize_bitmap & ~PAGE_MASK) -+ granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK); -+ else if (cfg->pgsize_bitmap & PAGE_MASK) -+ granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK); -+ else -+ granule = 0; -+ -+ switch (granule) { -+ case SZ_4K: -+ cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); -+ break; -+ case SZ_16K: -+ cfg->pgsize_bitmap &= (SZ_16K | SZ_32M); -+ break; -+ case SZ_64K: -+ cfg->pgsize_bitmap &= (SZ_64K | SZ_512M); -+ break; -+ default: -+ cfg->pgsize_bitmap = 0; -+ } -+} -+ -+static struct arm_lpae_io_pgtable * -+arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg) -+{ -+ unsigned long va_bits, pgd_bits; -+ struct arm_lpae_io_pgtable *data; -+ -+ arm_lpae_restrict_pgsizes(cfg); -+ -+ if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K))) -+ return NULL; -+ -+ if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS) -+ return NULL; -+ -+ if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS) -+ return NULL; -+ -+ data = kmalloc(sizeof(*data), GFP_KERNEL); -+ if (!data) -+ return NULL; -+ -+ data->pg_shift = __ffs(cfg->pgsize_bitmap); -+ data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte)); -+ -+ va_bits = cfg->ias - data->pg_shift; -+ data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level); -+ -+ /* Calculate the actual size of our pgd (without concatenation) */ -+ pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1)); -+ data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte))); -+ -+ data->iop.ops = (struct io_pgtable_ops) { -+ .map = arm_lpae_map, -+ .unmap = arm_lpae_unmap, -+ .iova_to_phys = arm_lpae_iova_to_phys, -+ }; -+ -+ return data; -+} -+ -+static struct io_pgtable * -+arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) -+{ -+ u64 reg; -+ struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg); -+ -+ if (!data) -+ return NULL; -+ -+ /* TCR */ -+ reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) | -+ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) | -+ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT); -+ -+ switch (1 << data->pg_shift) { -+ case SZ_4K: -+ reg |= ARM_LPAE_TCR_TG0_4K; -+ break; -+ case SZ_16K: -+ reg |= ARM_LPAE_TCR_TG0_16K; -+ break; -+ case SZ_64K: -+ reg |= ARM_LPAE_TCR_TG0_64K; -+ break; -+ } -+ -+ switch (cfg->oas) { -+ case 32: -+ reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT); -+ break; -+ case 36: -+ reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT); -+ break; -+ case 40: -+ reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT); -+ break; -+ case 42: -+ reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT); -+ break; -+ case 44: -+ reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT); -+ break; -+ case 48: -+ reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT); -+ break; -+ default: -+ goto out_free_data; -+ } -+ -+ reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT; -+ -+ /* Disable speculative walks through TTBR1 */ -+ reg |= ARM_LPAE_TCR_EPD1; -+ cfg->arm_lpae_s1_cfg.tcr = reg; -+ -+ /* MAIRs */ -+ reg = (ARM_LPAE_MAIR_ATTR_NC -+ << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) | -+ (ARM_LPAE_MAIR_ATTR_WBRWA -+ << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) | -+ (ARM_LPAE_MAIR_ATTR_DEVICE -+ << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)); -+ -+ cfg->arm_lpae_s1_cfg.mair[0] = reg; -+ cfg->arm_lpae_s1_cfg.mair[1] = 0; -+ -+ /* Looking good; allocate a pgd */ -+ data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO); -+ if (!data->pgd) -+ goto out_free_data; -+ -+ cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie); -+ -+ /* TTBRs */ -+ cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd); -+ cfg->arm_lpae_s1_cfg.ttbr[1] = 0; -+ return &data->iop; -+ -+out_free_data: -+ kfree(data); -+ return NULL; -+} -+ -+static struct io_pgtable * -+arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) -+{ -+ u64 reg, sl; -+ struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg); -+ -+ if (!data) -+ return NULL; -+ -+ /* -+ * Concatenate PGDs at level 1 if possible in order to reduce -+ * the depth of the stage-2 walk. -+ */ -+ if (data->levels == ARM_LPAE_MAX_LEVELS) { -+ unsigned long pgd_pages; -+ -+ pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte)); -+ if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) { -+ data->pgd_size = pgd_pages << data->pg_shift; -+ data->levels--; -+ } -+ } -+ -+ /* VTCR */ -+ reg = ARM_64_LPAE_S2_TCR_RES1 | -+ (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) | -+ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) | -+ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT); -+ -+ sl = ARM_LPAE_START_LVL(data); -+ -+ switch (1 << data->pg_shift) { -+ case SZ_4K: -+ reg |= ARM_LPAE_TCR_TG0_4K; -+ sl++; /* SL0 format is different for 4K granule size */ -+ break; -+ case SZ_16K: -+ reg |= ARM_LPAE_TCR_TG0_16K; -+ break; -+ case SZ_64K: -+ reg |= ARM_LPAE_TCR_TG0_64K; -+ break; -+ } -+ -+ switch (cfg->oas) { -+ case 32: -+ reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT); -+ break; -+ case 36: -+ reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT); -+ break; -+ case 40: -+ reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT); -+ break; -+ case 42: -+ reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT); -+ break; -+ case 44: -+ reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT); -+ break; -+ case 48: -+ reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT); -+ break; -+ default: -+ goto out_free_data; -+ } -+ -+ reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT; -+ reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT; -+ cfg->arm_lpae_s2_cfg.vtcr = reg; -+ -+ /* Allocate pgd pages */ -+ data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO); -+ if (!data->pgd) -+ goto out_free_data; -+ -+ cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie); -+ -+ /* VTTBR */ -+ cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd); -+ return &data->iop; -+ -+out_free_data: -+ kfree(data); -+ return NULL; -+} -+ -+static struct io_pgtable * -+arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) -+{ -+ struct io_pgtable *iop; -+ -+ if (cfg->ias > 32 || cfg->oas > 40) -+ return NULL; -+ -+ cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); -+ iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie); -+ if (iop) { -+ cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE; -+ cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff; -+ } -+ -+ return iop; -+} -+ -+static struct io_pgtable * -+arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) -+{ -+ struct io_pgtable *iop; -+ -+ if (cfg->ias > 40 || cfg->oas > 40) -+ return NULL; -+ -+ cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); -+ iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie); -+ if (iop) -+ cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff; -+ -+ return iop; -+} -+ -+struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = { -+ .alloc = arm_64_lpae_alloc_pgtable_s1, -+ .free = arm_lpae_free_pgtable, -+}; -+ -+struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = { -+ .alloc = arm_64_lpae_alloc_pgtable_s2, -+ .free = arm_lpae_free_pgtable, -+}; -+ -+struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = { -+ .alloc = arm_32_lpae_alloc_pgtable_s1, -+ .free = arm_lpae_free_pgtable, -+}; -+ -+struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = { -+ .alloc = arm_32_lpae_alloc_pgtable_s2, -+ .free = arm_lpae_free_pgtable, -+}; -+ -+#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST -+ -+static struct io_pgtable_cfg *cfg_cookie; -+ -+static void dummy_tlb_flush_all(void *cookie) -+{ -+ WARN_ON(cookie != cfg_cookie); -+} -+ -+static void dummy_tlb_add_flush(unsigned long iova, size_t size, bool leaf, -+ void *cookie) -+{ -+ WARN_ON(cookie != cfg_cookie); -+ WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); -+} -+ -+static void dummy_tlb_sync(void *cookie) -+{ -+ WARN_ON(cookie != cfg_cookie); -+} -+ -+static void dummy_flush_pgtable(void *ptr, size_t size, void *cookie) -+{ -+ WARN_ON(cookie != cfg_cookie); -+} -+ -+static struct iommu_gather_ops dummy_tlb_ops __initdata = { -+ .tlb_flush_all = dummy_tlb_flush_all, -+ .tlb_add_flush = dummy_tlb_add_flush, -+ .tlb_sync = dummy_tlb_sync, -+ .flush_pgtable = dummy_flush_pgtable, -+}; -+ -+static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops) -+{ -+ struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); -+ struct io_pgtable_cfg *cfg = &data->iop.cfg; -+ -+ pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n", -+ cfg->pgsize_bitmap, cfg->ias); -+ pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n", -+ data->levels, data->pgd_size, data->pg_shift, -+ data->bits_per_level, data->pgd); -+} -+ -+#define __FAIL(ops, i) ({ \ -+ WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \ -+ arm_lpae_dump_ops(ops); \ -+ selftest_running = false; \ -+ -EFAULT; \ -+}) -+ -+static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) -+{ -+ static const enum io_pgtable_fmt fmts[] = { -+ ARM_64_LPAE_S1, -+ ARM_64_LPAE_S2, -+ }; -+ -+ int i, j; -+ unsigned long iova; -+ size_t size; -+ struct io_pgtable_ops *ops; -+ -+ selftest_running = true; -+ -+ for (i = 0; i < ARRAY_SIZE(fmts); ++i) { -+ cfg_cookie = cfg; -+ ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg); -+ if (!ops) { -+ pr_err("selftest: failed to allocate io pgtable ops\n"); -+ return -ENOMEM; -+ } -+ -+ /* -+ * Initial sanity checks. -+ * Empty page tables shouldn't provide any translations. -+ */ -+ if (ops->iova_to_phys(ops, 42)) -+ return __FAIL(ops, i); -+ -+ if (ops->iova_to_phys(ops, SZ_1G + 42)) -+ return __FAIL(ops, i); -+ -+ if (ops->iova_to_phys(ops, SZ_2G + 42)) -+ return __FAIL(ops, i); -+ -+ /* -+ * Distinct mappings of different granule sizes. -+ */ -+ iova = 0; -+ j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG); -+ while (j != BITS_PER_LONG) { -+ size = 1UL << j; -+ -+ if (ops->map(ops, iova, iova, size, IOMMU_READ | -+ IOMMU_WRITE | -+ IOMMU_NOEXEC | -+ IOMMU_CACHE)) -+ return __FAIL(ops, i); -+ -+ /* Overlapping mappings */ -+ if (!ops->map(ops, iova, iova + size, size, -+ IOMMU_READ | IOMMU_NOEXEC)) -+ return __FAIL(ops, i); -+ -+ if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) -+ return __FAIL(ops, i); -+ -+ iova += SZ_1G; -+ j++; -+ j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j); -+ } -+ -+ /* Partial unmap */ -+ size = 1UL << __ffs(cfg->pgsize_bitmap); -+ if (ops->unmap(ops, SZ_1G + size, size) != size) -+ return __FAIL(ops, i); -+ -+ /* Remap of partial unmap */ -+ if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ)) -+ return __FAIL(ops, i); -+ -+ if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42)) -+ return __FAIL(ops, i); -+ -+ /* Full unmap */ -+ iova = 0; -+ j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG); -+ while (j != BITS_PER_LONG) { -+ size = 1UL << j; -+ -+ if (ops->unmap(ops, iova, size) != size) -+ return __FAIL(ops, i); -+ -+ if (ops->iova_to_phys(ops, iova + 42)) -+ return __FAIL(ops, i); -+ -+ /* Remap full block */ -+ if (ops->map(ops, iova, iova, size, IOMMU_WRITE)) -+ return __FAIL(ops, i); -+ -+ if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) -+ return __FAIL(ops, i); -+ -+ iova += SZ_1G; -+ j++; -+ j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j); -+ } -+ -+ free_io_pgtable_ops(ops); -+ } -+ -+ selftest_running = false; -+ return 0; -+} -+ -+static int __init arm_lpae_do_selftests(void) -+{ -+ static const unsigned long pgsize[] = { -+ SZ_4K | SZ_2M | SZ_1G, -+ SZ_16K | SZ_32M, -+ SZ_64K | SZ_512M, -+ }; -+ -+ static const unsigned int ias[] = { -+ 32, 36, 40, 42, 44, 48, -+ }; -+ -+ int i, j, pass = 0, fail = 0; -+ struct io_pgtable_cfg cfg = { -+ .tlb = &dummy_tlb_ops, -+ .oas = 48, -+ }; -+ -+ for (i = 0; i < ARRAY_SIZE(pgsize); ++i) { -+ for (j = 0; j < ARRAY_SIZE(ias); ++j) { -+ cfg.pgsize_bitmap = pgsize[i]; -+ cfg.ias = ias[j]; -+ pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n", -+ pgsize[i], ias[j]); -+ if (arm_lpae_run_tests(&cfg)) -+ fail++; -+ else -+ pass++; -+ } -+ } -+ -+ pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail); -+ return fail ? -EFAULT : 0; -+} -+subsys_initcall(arm_lpae_do_selftests); -+#endif -diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c -new file mode 100644 -index 0000000..6436fe2 ---- /dev/null -+++ b/drivers/iommu/io-pgtable.c -@@ -0,0 +1,82 @@ -+/* -+ * Generic page table allocator for IOMMUs. -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ * -+ * Copyright (C) 2014 ARM Limited -+ * -+ * Author: Will Deacon -+ */ -+ -+#include -+#include -+#include -+ -+#include "io-pgtable.h" -+ -+extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns; -+extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns; -+extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns; -+extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns; -+ -+static const struct io_pgtable_init_fns * -+io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] = -+{ -+#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE -+ [ARM_32_LPAE_S1] = &io_pgtable_arm_32_lpae_s1_init_fns, -+ [ARM_32_LPAE_S2] = &io_pgtable_arm_32_lpae_s2_init_fns, -+ [ARM_64_LPAE_S1] = &io_pgtable_arm_64_lpae_s1_init_fns, -+ [ARM_64_LPAE_S2] = &io_pgtable_arm_64_lpae_s2_init_fns, -+#endif -+}; -+ -+struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt, -+ struct io_pgtable_cfg *cfg, -+ void *cookie) -+{ -+ struct io_pgtable *iop; -+ const struct io_pgtable_init_fns *fns; -+ -+ if (fmt >= IO_PGTABLE_NUM_FMTS) -+ return NULL; -+ -+ fns = io_pgtable_init_table[fmt]; -+ if (!fns) -+ return NULL; -+ -+ iop = fns->alloc(cfg, cookie); -+ if (!iop) -+ return NULL; -+ -+ iop->fmt = fmt; -+ iop->cookie = cookie; -+ iop->cfg = *cfg; -+ -+ return &iop->ops; -+} -+ -+/* -+ * It is the IOMMU driver's responsibility to ensure that the page table -+ * is no longer accessible to the walker by this point. -+ */ -+void free_io_pgtable_ops(struct io_pgtable_ops *ops) -+{ -+ struct io_pgtable *iop; -+ -+ if (!ops) -+ return; -+ -+ iop = container_of(ops, struct io_pgtable, ops); -+ iop->cfg.tlb->tlb_flush_all(iop->cookie); -+ io_pgtable_init_table[iop->fmt]->free(iop); -+} -diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h -new file mode 100644 -index 0000000..10e32f6 ---- /dev/null -+++ b/drivers/iommu/io-pgtable.h -@@ -0,0 +1,143 @@ -+#ifndef __IO_PGTABLE_H -+#define __IO_PGTABLE_H -+ -+/* -+ * Public API for use by IOMMU drivers -+ */ -+enum io_pgtable_fmt { -+ ARM_32_LPAE_S1, -+ ARM_32_LPAE_S2, -+ ARM_64_LPAE_S1, -+ ARM_64_LPAE_S2, -+ IO_PGTABLE_NUM_FMTS, -+}; -+ -+/** -+ * struct iommu_gather_ops - IOMMU callbacks for TLB and page table management. -+ * -+ * @tlb_flush_all: Synchronously invalidate the entire TLB context. -+ * @tlb_add_flush: Queue up a TLB invalidation for a virtual address range. -+ * @tlb_sync: Ensure any queue TLB invalidation has taken effect. -+ * @flush_pgtable: Ensure page table updates are visible to the IOMMU. -+ * -+ * Note that these can all be called in atomic context and must therefore -+ * not block. -+ */ -+struct iommu_gather_ops { -+ void (*tlb_flush_all)(void *cookie); -+ void (*tlb_add_flush)(unsigned long iova, size_t size, bool leaf, -+ void *cookie); -+ void (*tlb_sync)(void *cookie); -+ void (*flush_pgtable)(void *ptr, size_t size, void *cookie); -+}; -+ -+/** -+ * struct io_pgtable_cfg - Configuration data for a set of page tables. -+ * -+ * @quirks: A bitmap of hardware quirks that require some special -+ * action by the low-level page table allocator. -+ * @pgsize_bitmap: A bitmap of page sizes supported by this set of page -+ * tables. -+ * @ias: Input address (iova) size, in bits. -+ * @oas: Output address (paddr) size, in bits. -+ * @tlb: TLB management callbacks for this set of tables. -+ */ -+struct io_pgtable_cfg { -+ #define IO_PGTABLE_QUIRK_ARM_NS (1 << 0) /* Set NS bit in PTEs */ -+ int quirks; -+ unsigned long pgsize_bitmap; -+ unsigned int ias; -+ unsigned int oas; -+ const struct iommu_gather_ops *tlb; -+ -+ /* Low-level data specific to the table format */ -+ union { -+ struct { -+ u64 ttbr[2]; -+ u64 tcr; -+ u64 mair[2]; -+ } arm_lpae_s1_cfg; -+ -+ struct { -+ u64 vttbr; -+ u64 vtcr; -+ } arm_lpae_s2_cfg; -+ }; -+}; -+ -+/** -+ * struct io_pgtable_ops - Page table manipulation API for IOMMU drivers. -+ * -+ * @map: Map a physically contiguous memory region. -+ * @unmap: Unmap a physically contiguous memory region. -+ * @iova_to_phys: Translate iova to physical address. -+ * -+ * These functions map directly onto the iommu_ops member functions with -+ * the same names. -+ */ -+struct io_pgtable_ops { -+ int (*map)(struct io_pgtable_ops *ops, unsigned long iova, -+ phys_addr_t paddr, size_t size, int prot); -+ int (*unmap)(struct io_pgtable_ops *ops, unsigned long iova, -+ size_t size); -+ phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops, -+ unsigned long iova); -+}; -+ -+/** -+ * alloc_io_pgtable_ops() - Allocate a page table allocator for use by an IOMMU. -+ * -+ * @fmt: The page table format. -+ * @cfg: The page table configuration. This will be modified to represent -+ * the configuration actually provided by the allocator (e.g. the -+ * pgsize_bitmap may be restricted). -+ * @cookie: An opaque token provided by the IOMMU driver and passed back to -+ * the callback routines in cfg->tlb. -+ */ -+struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt, -+ struct io_pgtable_cfg *cfg, -+ void *cookie); -+ -+/** -+ * free_io_pgtable_ops() - Free an io_pgtable_ops structure. The caller -+ * *must* ensure that the page table is no longer -+ * live, but the TLB can be dirty. -+ * -+ * @ops: The ops returned from alloc_io_pgtable_ops. -+ */ -+void free_io_pgtable_ops(struct io_pgtable_ops *ops); -+ -+ -+/* -+ * Internal structures for page table allocator implementations. -+ */ -+ -+/** -+ * struct io_pgtable - Internal structure describing a set of page tables. -+ * -+ * @fmt: The page table format. -+ * @cookie: An opaque token provided by the IOMMU driver and passed back to -+ * any callback routines. -+ * @cfg: A copy of the page table configuration. -+ * @ops: The page table operations in use for this set of page tables. -+ */ -+struct io_pgtable { -+ enum io_pgtable_fmt fmt; -+ void *cookie; -+ struct io_pgtable_cfg cfg; -+ struct io_pgtable_ops ops; -+}; -+ -+/** -+ * struct io_pgtable_init_fns - Alloc/free a set of page tables for a -+ * particular format. -+ * -+ * @alloc: Allocate a set of page tables described by cfg. -+ * @free: Free the page tables associated with iop. -+ */ -+struct io_pgtable_init_fns { -+ struct io_pgtable *(*alloc)(struct io_pgtable_cfg *cfg, void *cookie); -+ void (*free)(struct io_pgtable *iop); -+}; -+ -+#endif /* __IO_PGTABLE_H */ -diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c -index ed8b048..8d8e5a7 100644 ---- a/drivers/iommu/iommu.c -+++ b/drivers/iommu/iommu.c -@@ -591,10 +591,10 @@ static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, - continue; - - /* We alias them or they alias us */ -- if (((pdev->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN) && -- pdev->dma_alias_devfn == tmp->devfn) || -- ((tmp->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN) && -- tmp->dma_alias_devfn == pdev->devfn)) { -+ if (((pdev->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVID) && -+ (pdev->dma_alias_devid & 0xff) == tmp->devfn) || -+ ((tmp->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVID) && -+ (tmp->dma_alias_devid & 0xff) == pdev->devfn)) { - - group = get_pci_alias_group(tmp, devfns); - if (group) { -@@ -737,7 +737,7 @@ static int add_iommu_group(struct device *dev, void *data) - const struct iommu_ops *ops = cb->ops; - - if (!ops->add_device) -- return -ENODEV; -+ return 0; - - WARN_ON(dev->iommu_group); - -@@ -818,7 +818,15 @@ static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops) - kfree(nb); - return err; - } -- return bus_for_each_dev(bus, NULL, &cb, add_iommu_group); -+ -+ err = bus_for_each_dev(bus, NULL, &cb, add_iommu_group); -+ if (err) { -+ bus_unregister_notifier(bus, nb); -+ kfree(nb); -+ return err; -+ } -+ -+ return 0; - } - - /** -@@ -836,13 +844,19 @@ static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops) - */ - int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops) - { -+ int err; -+ - if (bus->iommu_ops != NULL) - return -EBUSY; - - bus->iommu_ops = ops; - - /* Do IOMMU specific setup for this bus-type */ -- return iommu_bus_init(bus, ops); -+ err = iommu_bus_init(bus, ops); -+ if (err) -+ bus->iommu_ops = NULL; -+ -+ return err; - } - EXPORT_SYMBOL_GPL(bus_set_iommu); - -@@ -887,36 +901,24 @@ EXPORT_SYMBOL_GPL(iommu_set_fault_handler); - struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) - { - struct iommu_domain *domain; -- int ret; - - if (bus == NULL || bus->iommu_ops == NULL) - return NULL; - -- domain = kzalloc(sizeof(*domain), GFP_KERNEL); -+ domain = bus->iommu_ops->domain_alloc(IOMMU_DOMAIN_UNMANAGED); - if (!domain) - return NULL; - -- domain->ops = bus->iommu_ops; -- -- ret = domain->ops->domain_init(domain); -- if (ret) -- goto out_free; -+ domain->ops = bus->iommu_ops; -+ domain->type = IOMMU_DOMAIN_UNMANAGED; - - return domain; -- --out_free: -- kfree(domain); -- -- return NULL; - } - EXPORT_SYMBOL_GPL(iommu_domain_alloc); - - void iommu_domain_free(struct iommu_domain *domain) - { -- if (likely(domain->ops->domain_destroy != NULL)) -- domain->ops->domain_destroy(domain); -- -- kfree(domain); -+ domain->ops->domain_free(domain); - } - EXPORT_SYMBOL_GPL(iommu_domain_free); - -@@ -943,6 +945,16 @@ void iommu_detach_device(struct iommu_domain *domain, struct device *dev) - } - EXPORT_SYMBOL_GPL(iommu_detach_device); - -+struct iommu_domain *iommu_get_dev_domain(struct device *dev) -+{ -+ const struct iommu_ops *ops = dev->bus->iommu_ops; -+ -+ if (unlikely(ops == NULL || ops->get_dev_iommu_domain == NULL)) -+ return NULL; -+ -+ return ops->get_dev_iommu_domain(dev); -+} -+EXPORT_SYMBOL_GPL(iommu_get_dev_domain); - /* - * IOMMU groups are really the natrual working unit of the IOMMU, but - * the IOMMU API works on domains and devices. Bridge that gap by -@@ -1035,6 +1047,9 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova, - domain->ops->pgsize_bitmap == 0UL)) - return -ENODEV; - -+ if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) -+ return -EINVAL; -+ - /* find out the minimum page size supported */ - min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); - -@@ -1070,7 +1085,7 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova, - if (ret) - iommu_unmap(domain, orig_iova, orig_size - size); - else -- trace_map(iova, paddr, size); -+ trace_map(orig_iova, paddr, orig_size); - - return ret; - } -@@ -1080,11 +1095,15 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) - { - size_t unmapped_page, unmapped = 0; - unsigned int min_pagesz; -+ unsigned long orig_iova = iova; - - if (unlikely(domain->ops->unmap == NULL || - domain->ops->pgsize_bitmap == 0UL)) - return -ENODEV; - -+ if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) -+ return -EINVAL; -+ - /* find out the minimum page size supported */ - min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); - -@@ -1119,11 +1138,53 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) - unmapped += unmapped_page; - } - -- trace_unmap(iova, 0, size); -+ trace_unmap(orig_iova, size, unmapped); - return unmapped; - } - EXPORT_SYMBOL_GPL(iommu_unmap); - -+size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova, -+ struct scatterlist *sg, unsigned int nents, int prot) -+{ -+ struct scatterlist *s; -+ size_t mapped = 0; -+ unsigned int i, min_pagesz; -+ int ret; -+ -+ if (unlikely(domain->ops->pgsize_bitmap == 0UL)) -+ return 0; -+ -+ min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); -+ -+ for_each_sg(sg, s, nents, i) { -+ phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset; -+ -+ /* -+ * We are mapping on IOMMU page boundaries, so offset within -+ * the page must be 0. However, the IOMMU may support pages -+ * smaller than PAGE_SIZE, so s->offset may still represent -+ * an offset of that boundary within the CPU page. -+ */ -+ if (!IS_ALIGNED(s->offset, min_pagesz)) -+ goto out_err; -+ -+ ret = iommu_map(domain, iova + mapped, phys, s->length, prot); -+ if (ret) -+ goto out_err; -+ -+ mapped += s->length; -+ } -+ -+ return mapped; -+ -+out_err: -+ /* undo mappings already done */ -+ iommu_unmap(domain, iova, mapped); -+ -+ return 0; -+ -+} -+EXPORT_SYMBOL_GPL(default_iommu_map_sg); - - int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, - phys_addr_t paddr, u64 size, int prot) -diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c -index 7dab5cb..f3c5ab6 100644 ---- a/drivers/iommu/ipmmu-vmsa.c -+++ b/drivers/iommu/ipmmu-vmsa.c -@@ -1127,6 +1127,7 @@ static const struct iommu_ops ipmmu_ops = { - .detach_dev = ipmmu_detach_device, - .map = ipmmu_map, - .unmap = ipmmu_unmap, -+ .map_sg = default_iommu_map_sg, - .iova_to_phys = ipmmu_iova_to_phys, - .add_device = ipmmu_add_device, - .remove_device = ipmmu_remove_device, -@@ -1221,7 +1222,6 @@ static int ipmmu_remove(struct platform_device *pdev) - - static struct platform_driver ipmmu_driver = { - .driver = { -- .owner = THIS_MODULE, - .name = "ipmmu-vmsa", - }, - .probe = ipmmu_probe, -diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c -index 74a1767..2c3f5ad 100644 ---- a/drivers/iommu/irq_remapping.c -+++ b/drivers/iommu/irq_remapping.c -@@ -56,19 +56,13 @@ static int do_setup_msi_irqs(struct pci_dev *dev, int nvec) - unsigned int irq; - struct msi_desc *msidesc; - -- WARN_ON(!list_is_singular(&dev->msi_list)); - msidesc = list_entry(dev->msi_list.next, struct msi_desc, list); -- WARN_ON(msidesc->irq); -- WARN_ON(msidesc->msi_attrib.multiple); -- WARN_ON(msidesc->nvec_used); - - irq = irq_alloc_hwirqs(nvec, dev_to_node(&dev->dev)); - if (irq == 0) - return -ENOSPC; - - nvec_pow2 = __roundup_pow_of_two(nvec); -- msidesc->nvec_used = nvec; -- msidesc->msi_attrib.multiple = ilog2(nvec_pow2); - for (sub_handle = 0; sub_handle < nvec; sub_handle++) { - if (!sub_handle) { - index = msi_alloc_remapped_irq(dev, irq, nvec_pow2); -@@ -96,8 +90,6 @@ error: - * IRQs from tearing down again in default_teardown_msi_irqs() - */ - msidesc->irq = 0; -- msidesc->nvec_used = 0; -- msidesc->msi_attrib.multiple = 0; - - return ret; - } -diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c -index 6e3dcc2..1c7b78e 100644 ---- a/drivers/iommu/msm_iommu.c -+++ b/drivers/iommu/msm_iommu.c -@@ -681,6 +681,7 @@ static const struct iommu_ops msm_iommu_ops = { - .detach_dev = msm_iommu_detach_dev, - .map = msm_iommu_map, - .unmap = msm_iommu_unmap, -+ .map_sg = default_iommu_map_sg, - .iova_to_phys = msm_iommu_iova_to_phys, - .pgsize_bitmap = MSM_IOMMU_PGSIZES, - }; -diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c -index e550ccb..43429ab 100644 ---- a/drivers/iommu/of_iommu.c -+++ b/drivers/iommu/of_iommu.c -@@ -18,9 +18,14 @@ - */ - - #include -+#include - #include - #include - #include -+#include -+ -+static const struct of_device_id __iommu_of_table_sentinel -+ __used __section(__iommu_of_table_end); - - /** - * of_get_dma_window - Parse *dma-window property and returns 0 if found. -@@ -89,3 +94,93 @@ int of_get_dma_window(struct device_node *dn, const char *prefix, int index, - return 0; - } - EXPORT_SYMBOL_GPL(of_get_dma_window); -+ -+struct of_iommu_node { -+ struct list_head list; -+ struct device_node *np; -+ struct iommu_ops *ops; -+}; -+static LIST_HEAD(of_iommu_list); -+static DEFINE_SPINLOCK(of_iommu_lock); -+ -+void of_iommu_set_ops(struct device_node *np, struct iommu_ops *ops) -+{ -+ struct of_iommu_node *iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); -+ -+ if (WARN_ON(!iommu)) -+ return; -+ -+ INIT_LIST_HEAD(&iommu->list); -+ iommu->np = np; -+ iommu->ops = ops; -+ spin_lock(&of_iommu_lock); -+ list_add_tail(&iommu->list, &of_iommu_list); -+ spin_unlock(&of_iommu_lock); -+} -+ -+struct iommu_ops *of_iommu_get_ops(struct device_node *np) -+{ -+ struct of_iommu_node *node; -+ struct iommu_ops *ops = NULL; -+ -+ spin_lock(&of_iommu_lock); -+ list_for_each_entry(node, &of_iommu_list, list) -+ if (node->np == np) { -+ ops = node->ops; -+ break; -+ } -+ spin_unlock(&of_iommu_lock); -+ return ops; -+} -+ -+struct iommu_ops *of_iommu_configure(struct device *dev, -+ struct device_node *master_np) -+{ -+ struct of_phandle_args iommu_spec; -+ struct device_node *np; -+ struct iommu_ops *ops = NULL; -+ int idx = 0; -+ -+ if (dev_is_pci(dev)) { -+ dev_err(dev, "IOMMU is currently not supported for PCI\n"); -+ return NULL; -+ } -+ -+ /* -+ * We don't currently walk up the tree looking for a parent IOMMU. -+ * See the `Notes:' section of -+ * Documentation/devicetree/bindings/iommu/iommu.txt -+ */ -+ while (!of_parse_phandle_with_args(master_np, "iommus", -+ "#iommu-cells", idx, -+ &iommu_spec)) { -+ np = iommu_spec.np; -+ ops = of_iommu_get_ops(np); -+ -+ if (!ops || !ops->of_xlate || ops->of_xlate(dev, &iommu_spec)) -+ goto err_put_node; -+ -+ of_node_put(np); -+ idx++; -+ } -+ -+ return ops; -+ -+err_put_node: -+ of_node_put(np); -+ return NULL; -+} -+ -+void __init of_iommu_init(void) -+{ -+ struct device_node *np; -+ const struct of_device_id *match, *matches = &__iommu_of_table; -+ -+ for_each_matching_node_and_match(np, matches, &match) { -+ const of_iommu_init_fn init_fn = match->data; -+ -+ if (init_fn(np)) -+ pr_err("Failed to initialise IOMMU %s\n", -+ of_node_full_name(np)); -+ } -+} -diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c -index 3627887..18003c0 100644 ---- a/drivers/iommu/omap-iommu.c -+++ b/drivers/iommu/omap-iommu.c -@@ -1288,6 +1288,7 @@ static const struct iommu_ops omap_iommu_ops = { - .detach_dev = omap_iommu_detach_dev, - .map = omap_iommu_map, - .unmap = omap_iommu_unmap, -+ .map_sg = default_iommu_map_sg, - .iova_to_phys = omap_iommu_iova_to_phys, - .add_device = omap_iommu_add_device, - .remove_device = omap_iommu_remove_device, -diff --git a/drivers/iommu/shmobile-iommu.c b/drivers/iommu/shmobile-iommu.c -index 1333e6f..f1b0077 100644 ---- a/drivers/iommu/shmobile-iommu.c -+++ b/drivers/iommu/shmobile-iommu.c -@@ -361,6 +361,7 @@ static const struct iommu_ops shmobile_iommu_ops = { - .detach_dev = shmobile_iommu_detach_device, - .map = shmobile_iommu_map, - .unmap = shmobile_iommu_unmap, -+ .map_sg = default_iommu_map_sg, - .iova_to_phys = shmobile_iommu_iova_to_phys, - .add_device = shmobile_iommu_add_device, - .pgsize_bitmap = SZ_1M | SZ_64K | SZ_4K, -diff --git a/drivers/iommu/shmobile-ipmmu.c b/drivers/iommu/shmobile-ipmmu.c -index bd97ade..951651a 100644 ---- a/drivers/iommu/shmobile-ipmmu.c -+++ b/drivers/iommu/shmobile-ipmmu.c -@@ -118,7 +118,6 @@ static int ipmmu_probe(struct platform_device *pdev) - static struct platform_driver ipmmu_driver = { - .probe = ipmmu_probe, - .driver = { -- .owner = THIS_MODULE, - .name = "ipmmu", - }, - }; -diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c -index a6d76ab..f722a0c 100644 ---- a/drivers/iommu/tegra-gart.c -+++ b/drivers/iommu/tegra-gart.c -@@ -425,7 +425,6 @@ static struct platform_driver tegra_gart_driver = { - .probe = tegra_gart_probe, - .remove = tegra_gart_remove, - .driver = { -- .owner = THIS_MODULE, - .name = "tegra-gart", - .pm = &tegra_gart_pm_ops, - .of_match_table = tegra_gart_of_match, -diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c -index 3afdf43..cb0c9bf 100644 ---- a/drivers/iommu/tegra-smmu.c -+++ b/drivers/iommu/tegra-smmu.c -@@ -955,6 +955,7 @@ static const struct iommu_ops smmu_iommu_ops = { - .detach_dev = smmu_iommu_detach_dev, - .map = smmu_iommu_map, - .unmap = smmu_iommu_unmap, -+ .map_sg = default_iommu_map_sg, - .iova_to_phys = smmu_iommu_iova_to_phys, - .pgsize_bitmap = SMMU_IOMMU_PGSIZES, - }; -@@ -1269,7 +1270,6 @@ static struct platform_driver tegra_smmu_driver = { - .probe = tegra_smmu_probe, - .remove = tegra_smmu_remove, - .driver = { -- .owner = THIS_MODULE, - .name = "tegra-smmu", - .pm = &tegra_smmu_pm_ops, - .of_match_table = tegra_smmu_of_match, -diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig -index b21f12f..e72e239 100644 ---- a/drivers/irqchip/Kconfig -+++ b/drivers/irqchip/Kconfig -@@ -5,8 +5,15 @@ config IRQCHIP - config ARM_GIC - bool - select IRQ_DOMAIN -+ select IRQ_DOMAIN_HIERARCHY - select MULTI_IRQ_HANDLER - -+config ARM_GIC_V2M -+ bool -+ depends on ARM_GIC -+ depends on PCI && PCI_MSI -+ select PCI_MSI_IRQ_DOMAIN -+ - config GIC_NON_BANKED - bool - -@@ -14,6 +21,11 @@ config ARM_GIC_V3 - bool - select IRQ_DOMAIN - select MULTI_IRQ_HANDLER -+ select IRQ_DOMAIN_HIERARCHY -+ -+config ARM_GIC_V3_ITS -+ bool -+ select PCI_MSI_IRQ_DOMAIN - - config ARM_NVIC - bool -diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile -index 173bb5f..1c4f9a4 100644 ---- a/drivers/irqchip/Makefile -+++ b/drivers/irqchip/Makefile -@@ -19,7 +19,9 @@ obj-$(CONFIG_ARCH_SUNXI) += irq-sun4i.o - obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi-nmi.o - obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o - obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o -+obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o - obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o -+obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o - obj-$(CONFIG_ARM_NVIC) += irq-nvic.o - obj-$(CONFIG_ARM_VIC) += irq-vic.o - obj-$(CONFIG_ATMEL_AIC_IRQ) += irq-atmel-aic-common.o irq-atmel-aic.o -diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c -index 41ac85a..615075d 100644 ---- a/drivers/irqchip/irq-armada-370-xp.c -+++ b/drivers/irqchip/irq-armada-370-xp.c -@@ -131,7 +131,7 @@ static void armada_370_xp_free_msi(int hwirq) - mutex_unlock(&msi_used_lock); - } - --static int armada_370_xp_setup_msi_irq(struct msi_chip *chip, -+static int armada_370_xp_setup_msi_irq(struct msi_controller *chip, - struct pci_dev *pdev, - struct msi_desc *desc) - { -@@ -158,11 +158,11 @@ static int armada_370_xp_setup_msi_irq(struct msi_chip *chip, - msg.address_hi = 0; - msg.data = 0xf00 | (hwirq + 16); - -- write_msi_msg(virq, &msg); -+ pci_write_msi_msg(virq, &msg); - return 0; - } - --static void armada_370_xp_teardown_msi_irq(struct msi_chip *chip, -+static void armada_370_xp_teardown_msi_irq(struct msi_controller *chip, - unsigned int irq) - { - struct irq_data *d = irq_get_irq_data(irq); -@@ -174,10 +174,10 @@ static void armada_370_xp_teardown_msi_irq(struct msi_chip *chip, - - static struct irq_chip armada_370_xp_msi_irq_chip = { - .name = "armada_370_xp_msi_irq", -- .irq_enable = unmask_msi_irq, -- .irq_disable = mask_msi_irq, -- .irq_mask = mask_msi_irq, -- .irq_unmask = unmask_msi_irq, -+ .irq_enable = pci_msi_unmask_irq, -+ .irq_disable = pci_msi_mask_irq, -+ .irq_mask = pci_msi_mask_irq, -+ .irq_unmask = pci_msi_unmask_irq, - }; - - static int armada_370_xp_msi_map(struct irq_domain *domain, unsigned int virq, -@@ -197,7 +197,7 @@ static const struct irq_domain_ops armada_370_xp_msi_irq_ops = { - static int armada_370_xp_msi_init(struct device_node *node, - phys_addr_t main_int_phys_base) - { -- struct msi_chip *msi_chip; -+ struct msi_controller *msi_chip; - u32 reg; - int ret; - -diff --git a/drivers/irqchip/irq-atmel-aic.c b/drivers/irqchip/irq-atmel-aic.c -index 9a2cf3c..27fdd8c 100644 ---- a/drivers/irqchip/irq-atmel-aic.c -+++ b/drivers/irqchip/irq-atmel-aic.c -@@ -65,11 +65,11 @@ aic_handle(struct pt_regs *regs) - u32 irqnr; - u32 irqstat; - -- irqnr = irq_reg_readl(gc->reg_base + AT91_AIC_IVR); -- irqstat = irq_reg_readl(gc->reg_base + AT91_AIC_ISR); -+ irqnr = irq_reg_readl(gc, AT91_AIC_IVR); -+ irqstat = irq_reg_readl(gc, AT91_AIC_ISR); - - if (!irqstat) -- irq_reg_writel(0, gc->reg_base + AT91_AIC_EOICR); -+ irq_reg_writel(gc, 0, AT91_AIC_EOICR); - else - handle_domain_irq(aic_domain, irqnr, regs); - } -@@ -80,7 +80,7 @@ static int aic_retrigger(struct irq_data *d) - - /* Enable interrupt on AIC5 */ - irq_gc_lock(gc); -- irq_reg_writel(d->mask, gc->reg_base + AT91_AIC_ISCR); -+ irq_reg_writel(gc, d->mask, AT91_AIC_ISCR); - irq_gc_unlock(gc); - - return 0; -@@ -92,12 +92,12 @@ static int aic_set_type(struct irq_data *d, unsigned type) - unsigned int smr; - int ret; - -- smr = irq_reg_readl(gc->reg_base + AT91_AIC_SMR(d->hwirq)); -+ smr = irq_reg_readl(gc, AT91_AIC_SMR(d->hwirq)); - ret = aic_common_set_type(d, type, &smr); - if (ret) - return ret; - -- irq_reg_writel(smr, gc->reg_base + AT91_AIC_SMR(d->hwirq)); -+ irq_reg_writel(gc, smr, AT91_AIC_SMR(d->hwirq)); - - return 0; - } -@@ -108,8 +108,8 @@ static void aic_suspend(struct irq_data *d) - struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); - - irq_gc_lock(gc); -- irq_reg_writel(gc->mask_cache, gc->reg_base + AT91_AIC_IDCR); -- irq_reg_writel(gc->wake_active, gc->reg_base + AT91_AIC_IECR); -+ irq_reg_writel(gc, gc->mask_cache, AT91_AIC_IDCR); -+ irq_reg_writel(gc, gc->wake_active, AT91_AIC_IECR); - irq_gc_unlock(gc); - } - -@@ -118,8 +118,8 @@ static void aic_resume(struct irq_data *d) - struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); - - irq_gc_lock(gc); -- irq_reg_writel(gc->wake_active, gc->reg_base + AT91_AIC_IDCR); -- irq_reg_writel(gc->mask_cache, gc->reg_base + AT91_AIC_IECR); -+ irq_reg_writel(gc, gc->wake_active, AT91_AIC_IDCR); -+ irq_reg_writel(gc, gc->mask_cache, AT91_AIC_IECR); - irq_gc_unlock(gc); - } - -@@ -128,8 +128,8 @@ static void aic_pm_shutdown(struct irq_data *d) - struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); - - irq_gc_lock(gc); -- irq_reg_writel(0xffffffff, gc->reg_base + AT91_AIC_IDCR); -- irq_reg_writel(0xffffffff, gc->reg_base + AT91_AIC_ICCR); -+ irq_reg_writel(gc, 0xffffffff, AT91_AIC_IDCR); -+ irq_reg_writel(gc, 0xffffffff, AT91_AIC_ICCR); - irq_gc_unlock(gc); - } - #else -@@ -148,24 +148,24 @@ static void __init aic_hw_init(struct irq_domain *domain) - * will not Lock out nIRQ - */ - for (i = 0; i < 8; i++) -- irq_reg_writel(0, gc->reg_base + AT91_AIC_EOICR); -+ irq_reg_writel(gc, 0, AT91_AIC_EOICR); - - /* - * Spurious Interrupt ID in Spurious Vector Register. - * When there is no current interrupt, the IRQ Vector Register - * reads the value stored in AIC_SPU - */ -- irq_reg_writel(0xffffffff, gc->reg_base + AT91_AIC_SPU); -+ irq_reg_writel(gc, 0xffffffff, AT91_AIC_SPU); - - /* No debugging in AIC: Debug (Protect) Control Register */ -- irq_reg_writel(0, gc->reg_base + AT91_AIC_DCR); -+ irq_reg_writel(gc, 0, AT91_AIC_DCR); - - /* Disable and clear all interrupts initially */ -- irq_reg_writel(0xffffffff, gc->reg_base + AT91_AIC_IDCR); -- irq_reg_writel(0xffffffff, gc->reg_base + AT91_AIC_ICCR); -+ irq_reg_writel(gc, 0xffffffff, AT91_AIC_IDCR); -+ irq_reg_writel(gc, 0xffffffff, AT91_AIC_ICCR); - - for (i = 0; i < 32; i++) -- irq_reg_writel(i, gc->reg_base + AT91_AIC_SVR(i)); -+ irq_reg_writel(gc, i, AT91_AIC_SVR(i)); - } - - static int aic_irq_domain_xlate(struct irq_domain *d, -@@ -195,10 +195,10 @@ static int aic_irq_domain_xlate(struct irq_domain *d, - gc = dgc->gc[idx]; - - irq_gc_lock(gc); -- smr = irq_reg_readl(gc->reg_base + AT91_AIC_SMR(*out_hwirq)); -+ smr = irq_reg_readl(gc, AT91_AIC_SMR(*out_hwirq)); - ret = aic_common_set_priority(intspec[2], &smr); - if (!ret) -- irq_reg_writel(smr, gc->reg_base + AT91_AIC_SMR(*out_hwirq)); -+ irq_reg_writel(gc, smr, AT91_AIC_SMR(*out_hwirq)); - irq_gc_unlock(gc); - - return ret; -diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c -index a11aae8..a2e8c3f 100644 ---- a/drivers/irqchip/irq-atmel-aic5.c -+++ b/drivers/irqchip/irq-atmel-aic5.c -@@ -75,11 +75,11 @@ aic5_handle(struct pt_regs *regs) - u32 irqnr; - u32 irqstat; - -- irqnr = irq_reg_readl(gc->reg_base + AT91_AIC5_IVR); -- irqstat = irq_reg_readl(gc->reg_base + AT91_AIC5_ISR); -+ irqnr = irq_reg_readl(gc, AT91_AIC5_IVR); -+ irqstat = irq_reg_readl(gc, AT91_AIC5_ISR); - - if (!irqstat) -- irq_reg_writel(0, gc->reg_base + AT91_AIC5_EOICR); -+ irq_reg_writel(gc, 0, AT91_AIC5_EOICR); - else - handle_domain_irq(aic5_domain, irqnr, regs); - } -@@ -92,8 +92,8 @@ static void aic5_mask(struct irq_data *d) - - /* Disable interrupt on AIC5 */ - irq_gc_lock(gc); -- irq_reg_writel(d->hwirq, gc->reg_base + AT91_AIC5_SSR); -- irq_reg_writel(1, gc->reg_base + AT91_AIC5_IDCR); -+ irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR); -+ irq_reg_writel(gc, 1, AT91_AIC5_IDCR); - gc->mask_cache &= ~d->mask; - irq_gc_unlock(gc); - } -@@ -106,8 +106,8 @@ static void aic5_unmask(struct irq_data *d) - - /* Enable interrupt on AIC5 */ - irq_gc_lock(gc); -- irq_reg_writel(d->hwirq, gc->reg_base + AT91_AIC5_SSR); -- irq_reg_writel(1, gc->reg_base + AT91_AIC5_IECR); -+ irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR); -+ irq_reg_writel(gc, 1, AT91_AIC5_IECR); - gc->mask_cache |= d->mask; - irq_gc_unlock(gc); - } -@@ -120,8 +120,8 @@ static int aic5_retrigger(struct irq_data *d) - - /* Enable interrupt on AIC5 */ - irq_gc_lock(gc); -- irq_reg_writel(d->hwirq, gc->reg_base + AT91_AIC5_SSR); -- irq_reg_writel(1, gc->reg_base + AT91_AIC5_ISCR); -+ irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR); -+ irq_reg_writel(gc, 1, AT91_AIC5_ISCR); - irq_gc_unlock(gc); - - return 0; -@@ -136,11 +136,11 @@ static int aic5_set_type(struct irq_data *d, unsigned type) - int ret; - - irq_gc_lock(gc); -- irq_reg_writel(d->hwirq, gc->reg_base + AT91_AIC5_SSR); -- smr = irq_reg_readl(gc->reg_base + AT91_AIC5_SMR); -+ irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR); -+ smr = irq_reg_readl(gc, AT91_AIC5_SMR); - ret = aic_common_set_type(d, type, &smr); - if (!ret) -- irq_reg_writel(smr, gc->reg_base + AT91_AIC5_SMR); -+ irq_reg_writel(gc, smr, AT91_AIC5_SMR); - irq_gc_unlock(gc); - - return ret; -@@ -162,12 +162,11 @@ static void aic5_suspend(struct irq_data *d) - if ((mask & gc->mask_cache) == (mask & gc->wake_active)) - continue; - -- irq_reg_writel(i + gc->irq_base, -- bgc->reg_base + AT91_AIC5_SSR); -+ irq_reg_writel(bgc, i + gc->irq_base, AT91_AIC5_SSR); - if (mask & gc->wake_active) -- irq_reg_writel(1, bgc->reg_base + AT91_AIC5_IECR); -+ irq_reg_writel(bgc, 1, AT91_AIC5_IECR); - else -- irq_reg_writel(1, bgc->reg_base + AT91_AIC5_IDCR); -+ irq_reg_writel(bgc, 1, AT91_AIC5_IDCR); - } - irq_gc_unlock(bgc); - } -@@ -187,12 +186,11 @@ static void aic5_resume(struct irq_data *d) - if ((mask & gc->mask_cache) == (mask & gc->wake_active)) - continue; - -- irq_reg_writel(i + gc->irq_base, -- bgc->reg_base + AT91_AIC5_SSR); -+ irq_reg_writel(bgc, i + gc->irq_base, AT91_AIC5_SSR); - if (mask & gc->mask_cache) -- irq_reg_writel(1, bgc->reg_base + AT91_AIC5_IECR); -+ irq_reg_writel(bgc, 1, AT91_AIC5_IECR); - else -- irq_reg_writel(1, bgc->reg_base + AT91_AIC5_IDCR); -+ irq_reg_writel(bgc, 1, AT91_AIC5_IDCR); - } - irq_gc_unlock(bgc); - } -@@ -207,10 +205,9 @@ static void aic5_pm_shutdown(struct irq_data *d) - - irq_gc_lock(bgc); - for (i = 0; i < dgc->irqs_per_chip; i++) { -- irq_reg_writel(i + gc->irq_base, -- bgc->reg_base + AT91_AIC5_SSR); -- irq_reg_writel(1, bgc->reg_base + AT91_AIC5_IDCR); -- irq_reg_writel(1, bgc->reg_base + AT91_AIC5_ICCR); -+ irq_reg_writel(bgc, i + gc->irq_base, AT91_AIC5_SSR); -+ irq_reg_writel(bgc, 1, AT91_AIC5_IDCR); -+ irq_reg_writel(bgc, 1, AT91_AIC5_ICCR); - } - irq_gc_unlock(bgc); - } -@@ -230,24 +227,24 @@ static void __init aic5_hw_init(struct irq_domain *domain) - * will not Lock out nIRQ - */ - for (i = 0; i < 8; i++) -- irq_reg_writel(0, gc->reg_base + AT91_AIC5_EOICR); -+ irq_reg_writel(gc, 0, AT91_AIC5_EOICR); - - /* - * Spurious Interrupt ID in Spurious Vector Register. - * When there is no current interrupt, the IRQ Vector Register - * reads the value stored in AIC_SPU - */ -- irq_reg_writel(0xffffffff, gc->reg_base + AT91_AIC5_SPU); -+ irq_reg_writel(gc, 0xffffffff, AT91_AIC5_SPU); - - /* No debugging in AIC: Debug (Protect) Control Register */ -- irq_reg_writel(0, gc->reg_base + AT91_AIC5_DCR); -+ irq_reg_writel(gc, 0, AT91_AIC5_DCR); - - /* Disable and clear all interrupts initially */ - for (i = 0; i < domain->revmap_size; i++) { -- irq_reg_writel(i, gc->reg_base + AT91_AIC5_SSR); -- irq_reg_writel(i, gc->reg_base + AT91_AIC5_SVR); -- irq_reg_writel(1, gc->reg_base + AT91_AIC5_IDCR); -- irq_reg_writel(1, gc->reg_base + AT91_AIC5_ICCR); -+ irq_reg_writel(gc, i, AT91_AIC5_SSR); -+ irq_reg_writel(gc, i, AT91_AIC5_SVR); -+ irq_reg_writel(gc, 1, AT91_AIC5_IDCR); -+ irq_reg_writel(gc, 1, AT91_AIC5_ICCR); - } - } - -@@ -273,11 +270,11 @@ static int aic5_irq_domain_xlate(struct irq_domain *d, - gc = dgc->gc[0]; - - irq_gc_lock(gc); -- irq_reg_writel(*out_hwirq, gc->reg_base + AT91_AIC5_SSR); -- smr = irq_reg_readl(gc->reg_base + AT91_AIC5_SMR); -+ irq_reg_writel(gc, *out_hwirq, AT91_AIC5_SSR); -+ smr = irq_reg_readl(gc, AT91_AIC5_SMR); - ret = aic_common_set_priority(intspec[2], &smr); - if (!ret) -- irq_reg_writel(intspec[2] | smr, gc->reg_base + AT91_AIC5_SMR); -+ irq_reg_writel(gc, intspec[2] | smr, AT91_AIC5_SMR); - irq_gc_unlock(gc); - - return ret; -diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c -index 61541ff..ad96ebb 100644 ---- a/drivers/irqchip/irq-gic-common.c -+++ b/drivers/irqchip/irq-gic-common.c -@@ -21,7 +21,7 @@ - - #include "irq-gic-common.h" - --void gic_configure_irq(unsigned int irq, unsigned int type, -+int gic_configure_irq(unsigned int irq, unsigned int type, - void __iomem *base, void (*sync_access)(void)) - { - u32 enablemask = 1 << (irq % 32); -@@ -29,16 +29,17 @@ void gic_configure_irq(unsigned int irq, unsigned int type, - u32 confmask = 0x2 << ((irq % 16) * 2); - u32 confoff = (irq / 16) * 4; - bool enabled = false; -- u32 val; -+ u32 val, oldval; -+ int ret = 0; - - /* - * Read current configuration register, and insert the config - * for "irq", depending on "type". - */ -- val = readl_relaxed(base + GIC_DIST_CONFIG + confoff); -- if (type == IRQ_TYPE_LEVEL_HIGH) -+ val = oldval = readl_relaxed(base + GIC_DIST_CONFIG + confoff); -+ if (type & IRQ_TYPE_LEVEL_MASK) - val &= ~confmask; -- else if (type == IRQ_TYPE_EDGE_RISING) -+ else if (type & IRQ_TYPE_EDGE_BOTH) - val |= confmask; - - /* -@@ -54,15 +55,20 @@ void gic_configure_irq(unsigned int irq, unsigned int type, - - /* - * Write back the new configuration, and possibly re-enable -- * the interrupt. -+ * the interrupt. If we tried to write a new configuration and failed, -+ * return an error. - */ - writel_relaxed(val, base + GIC_DIST_CONFIG + confoff); -+ if (readl_relaxed(base + GIC_DIST_CONFIG + confoff) != val && val != oldval) -+ ret = -EINVAL; - - if (enabled) - writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff); - - if (sync_access) - sync_access(); -+ -+ return ret; - } - - void __init gic_dist_config(void __iomem *base, int gic_irqs, -diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h -index b41f024..35a9884 100644 ---- a/drivers/irqchip/irq-gic-common.h -+++ b/drivers/irqchip/irq-gic-common.h -@@ -20,7 +20,7 @@ - #include - #include - --void gic_configure_irq(unsigned int irq, unsigned int type, -+int gic_configure_irq(unsigned int irq, unsigned int type, - void __iomem *base, void (*sync_access)(void)); - void gic_dist_config(void __iomem *base, int gic_irqs, - void (*sync_access)(void)); -diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c -new file mode 100644 -index 0000000..fdf7065 ---- /dev/null -+++ b/drivers/irqchip/irq-gic-v2m.c -@@ -0,0 +1,333 @@ -+/* -+ * ARM GIC v2m MSI(-X) support -+ * Support for Message Signaled Interrupts for systems that -+ * implement ARM Generic Interrupt Controller: GICv2m. -+ * -+ * Copyright (C) 2014 Advanced Micro Devices, Inc. -+ * Authors: Suravee Suthikulpanit -+ * Harish Kasiviswanathan -+ * Brandon Anderson -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation. -+ */ -+ -+#define pr_fmt(fmt) "GICv2m: " fmt -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/* -+* MSI_TYPER: -+* [31:26] Reserved -+* [25:16] lowest SPI assigned to MSI -+* [15:10] Reserved -+* [9:0] Numer of SPIs assigned to MSI -+*/ -+#define V2M_MSI_TYPER 0x008 -+#define V2M_MSI_TYPER_BASE_SHIFT 16 -+#define V2M_MSI_TYPER_BASE_MASK 0x3FF -+#define V2M_MSI_TYPER_NUM_MASK 0x3FF -+#define V2M_MSI_SETSPI_NS 0x040 -+#define V2M_MIN_SPI 32 -+#define V2M_MAX_SPI 1019 -+ -+#define V2M_MSI_TYPER_BASE_SPI(x) \ -+ (((x) >> V2M_MSI_TYPER_BASE_SHIFT) & V2M_MSI_TYPER_BASE_MASK) -+ -+#define V2M_MSI_TYPER_NUM_SPI(x) ((x) & V2M_MSI_TYPER_NUM_MASK) -+ -+struct v2m_data { -+ spinlock_t msi_cnt_lock; -+ struct msi_controller mchip; -+ struct resource res; /* GICv2m resource */ -+ void __iomem *base; /* GICv2m virt address */ -+ u32 spi_start; /* The SPI number that MSIs start */ -+ u32 nr_spis; /* The number of SPIs for MSIs */ -+ unsigned long *bm; /* MSI vector bitmap */ -+ struct irq_domain *domain; -+}; -+ -+static void gicv2m_mask_msi_irq(struct irq_data *d) -+{ -+ pci_msi_mask_irq(d); -+ irq_chip_mask_parent(d); -+} -+ -+static void gicv2m_unmask_msi_irq(struct irq_data *d) -+{ -+ pci_msi_unmask_irq(d); -+ irq_chip_unmask_parent(d); -+} -+ -+static struct irq_chip gicv2m_msi_irq_chip = { -+ .name = "MSI", -+ .irq_mask = gicv2m_mask_msi_irq, -+ .irq_unmask = gicv2m_unmask_msi_irq, -+ .irq_eoi = irq_chip_eoi_parent, -+ .irq_write_msi_msg = pci_msi_domain_write_msg, -+}; -+ -+static struct msi_domain_info gicv2m_msi_domain_info = { -+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | -+ MSI_FLAG_PCI_MSIX), -+ .chip = &gicv2m_msi_irq_chip, -+}; -+ -+static int gicv2m_set_affinity(struct irq_data *irq_data, -+ const struct cpumask *mask, bool force) -+{ -+ int ret; -+ -+ ret = irq_chip_set_affinity_parent(irq_data, mask, force); -+ if (ret == IRQ_SET_MASK_OK) -+ ret = IRQ_SET_MASK_OK_DONE; -+ -+ return ret; -+} -+ -+static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) -+{ -+ struct v2m_data *v2m = irq_data_get_irq_chip_data(data); -+ phys_addr_t addr = v2m->res.start + V2M_MSI_SETSPI_NS; -+ -+ msg->address_hi = (u32) (addr >> 32); -+ msg->address_lo = (u32) (addr); -+ msg->data = data->hwirq; -+} -+ -+static struct irq_chip gicv2m_irq_chip = { -+ .name = "GICv2m", -+ .irq_mask = irq_chip_mask_parent, -+ .irq_unmask = irq_chip_unmask_parent, -+ .irq_eoi = irq_chip_eoi_parent, -+ .irq_set_affinity = gicv2m_set_affinity, -+ .irq_compose_msi_msg = gicv2m_compose_msi_msg, -+}; -+ -+static int gicv2m_irq_gic_domain_alloc(struct irq_domain *domain, -+ unsigned int virq, -+ irq_hw_number_t hwirq) -+{ -+ struct of_phandle_args args; -+ struct irq_data *d; -+ int err; -+ -+ args.np = domain->parent->of_node; -+ args.args_count = 3; -+ args.args[0] = 0; -+ args.args[1] = hwirq - 32; -+ args.args[2] = IRQ_TYPE_EDGE_RISING; -+ -+ err = irq_domain_alloc_irqs_parent(domain, virq, 1, &args); -+ if (err) -+ return err; -+ -+ /* Configure the interrupt line to be edge */ -+ d = irq_domain_get_irq_data(domain->parent, virq); -+ d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING); -+ return 0; -+} -+ -+static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq) -+{ -+ int pos; -+ -+ pos = hwirq - v2m->spi_start; -+ if (pos < 0 || pos >= v2m->nr_spis) { -+ pr_err("Failed to teardown msi. Invalid hwirq %d\n", hwirq); -+ return; -+ } -+ -+ spin_lock(&v2m->msi_cnt_lock); -+ __clear_bit(pos, v2m->bm); -+ spin_unlock(&v2m->msi_cnt_lock); -+} -+ -+static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, -+ unsigned int nr_irqs, void *args) -+{ -+ struct v2m_data *v2m = domain->host_data; -+ int hwirq, offset, err = 0; -+ -+ spin_lock(&v2m->msi_cnt_lock); -+ offset = find_first_zero_bit(v2m->bm, v2m->nr_spis); -+ if (offset < v2m->nr_spis) -+ __set_bit(offset, v2m->bm); -+ else -+ err = -ENOSPC; -+ spin_unlock(&v2m->msi_cnt_lock); -+ -+ if (err) -+ return err; -+ -+ hwirq = v2m->spi_start + offset; -+ -+ err = gicv2m_irq_gic_domain_alloc(domain, virq, hwirq); -+ if (err) { -+ gicv2m_unalloc_msi(v2m, hwirq); -+ return err; -+ } -+ -+ irq_domain_set_hwirq_and_chip(domain, virq, hwirq, -+ &gicv2m_irq_chip, v2m); -+ -+ return 0; -+} -+ -+static void gicv2m_irq_domain_free(struct irq_domain *domain, -+ unsigned int virq, unsigned int nr_irqs) -+{ -+ struct irq_data *d = irq_domain_get_irq_data(domain, virq); -+ struct v2m_data *v2m = irq_data_get_irq_chip_data(d); -+ -+ BUG_ON(nr_irqs != 1); -+ gicv2m_unalloc_msi(v2m, d->hwirq); -+ irq_domain_free_irqs_parent(domain, virq, nr_irqs); -+} -+ -+static const struct irq_domain_ops gicv2m_domain_ops = { -+ .alloc = gicv2m_irq_domain_alloc, -+ .free = gicv2m_irq_domain_free, -+}; -+ -+static bool is_msi_spi_valid(u32 base, u32 num) -+{ -+ if (base < V2M_MIN_SPI) { -+ pr_err("Invalid MSI base SPI (base:%u)\n", base); -+ return false; -+ } -+ -+ if ((num == 0) || (base + num > V2M_MAX_SPI)) { -+ pr_err("Number of SPIs (%u) exceed maximum (%u)\n", -+ num, V2M_MAX_SPI - V2M_MIN_SPI + 1); -+ return false; -+ } -+ -+ return true; -+} -+ -+static int __init gicv2m_init_one(struct device_node *node, -+ struct irq_domain *parent) -+{ -+ int ret; -+ struct v2m_data *v2m; -+ -+ v2m = kzalloc(sizeof(struct v2m_data), GFP_KERNEL); -+ if (!v2m) { -+ pr_err("Failed to allocate struct v2m_data.\n"); -+ return -ENOMEM; -+ } -+ -+ ret = of_address_to_resource(node, 0, &v2m->res); -+ if (ret) { -+ pr_err("Failed to allocate v2m resource.\n"); -+ goto err_free_v2m; -+ } -+ -+ v2m->base = ioremap(v2m->res.start, resource_size(&v2m->res)); -+ if (!v2m->base) { -+ pr_err("Failed to map GICv2m resource\n"); -+ ret = -ENOMEM; -+ goto err_free_v2m; -+ } -+ -+ if (!of_property_read_u32(node, "arm,msi-base-spi", &v2m->spi_start) && -+ !of_property_read_u32(node, "arm,msi-num-spis", &v2m->nr_spis)) { -+ pr_info("Overriding V2M MSI_TYPER (base:%u, num:%u)\n", -+ v2m->spi_start, v2m->nr_spis); -+ } else { -+ u32 typer = readl_relaxed(v2m->base + V2M_MSI_TYPER); -+ -+ v2m->spi_start = V2M_MSI_TYPER_BASE_SPI(typer); -+ v2m->nr_spis = V2M_MSI_TYPER_NUM_SPI(typer); -+ } -+ -+ if (!is_msi_spi_valid(v2m->spi_start, v2m->nr_spis)) { -+ ret = -EINVAL; -+ goto err_iounmap; -+ } -+ -+ v2m->bm = kzalloc(sizeof(long) * BITS_TO_LONGS(v2m->nr_spis), -+ GFP_KERNEL); -+ if (!v2m->bm) { -+ ret = -ENOMEM; -+ goto err_iounmap; -+ } -+ -+ v2m->domain = irq_domain_add_tree(NULL, &gicv2m_domain_ops, v2m); -+ if (!v2m->domain) { -+ pr_err("Failed to create GICv2m domain\n"); -+ ret = -ENOMEM; -+ goto err_free_bm; -+ } -+ -+ v2m->domain->parent = parent; -+ v2m->mchip.of_node = node; -+ v2m->mchip.domain = pci_msi_create_irq_domain(node, -+ &gicv2m_msi_domain_info, -+ v2m->domain); -+ if (!v2m->mchip.domain) { -+ pr_err("Failed to create MSI domain\n"); -+ ret = -ENOMEM; -+ goto err_free_domains; -+ } -+ -+ spin_lock_init(&v2m->msi_cnt_lock); -+ -+ ret = of_pci_msi_chip_add(&v2m->mchip); -+ if (ret) { -+ pr_err("Failed to add msi_chip.\n"); -+ goto err_free_domains; -+ } -+ -+ pr_info("Node %s: range[%#lx:%#lx], SPI[%d:%d]\n", node->name, -+ (unsigned long)v2m->res.start, (unsigned long)v2m->res.end, -+ v2m->spi_start, (v2m->spi_start + v2m->nr_spis)); -+ -+ return 0; -+ -+err_free_domains: -+ if (v2m->mchip.domain) -+ irq_domain_remove(v2m->mchip.domain); -+ if (v2m->domain) -+ irq_domain_remove(v2m->domain); -+err_free_bm: -+ kfree(v2m->bm); -+err_iounmap: -+ iounmap(v2m->base); -+err_free_v2m: -+ kfree(v2m); -+ return ret; -+} -+ -+static struct of_device_id gicv2m_device_id[] = { -+ { .compatible = "arm,gic-v2m-frame", }, -+ {}, -+}; -+ -+int __init gicv2m_of_init(struct device_node *node, struct irq_domain *parent) -+{ -+ int ret = 0; -+ struct device_node *child; -+ -+ for (child = of_find_matching_node(node, gicv2m_device_id); child; -+ child = of_find_matching_node(child, gicv2m_device_id)) { -+ if (!of_find_property(child, "msi-controller", NULL)) -+ continue; -+ -+ ret = gicv2m_init_one(child, parent); -+ if (ret) { -+ of_node_put(node); -+ break; -+ } -+ } -+ -+ return ret; -+} -diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c -new file mode 100644 -index 0000000..d689158 ---- /dev/null -+++ b/drivers/irqchip/irq-gic-v3-its.c -@@ -0,0 +1,1630 @@ -+/* -+ * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved. -+ * Author: Marc Zyngier -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+ -+#include -+#include -+#include -+ -+#include "irqchip.h" -+ -+#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1 << 0) -+ -+#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) -+ -+/* -+ * Collection structure - just an ID, and a redistributor address to -+ * ping. We use one per CPU as a bag of interrupts assigned to this -+ * CPU. -+ */ -+struct its_collection { -+ u64 target_address; -+ u16 col_id; -+}; -+ -+/* -+ * The ITS structure - contains most of the infrastructure, with the -+ * msi_controller, the command queue, the collections, and the list of -+ * devices writing to it. -+ */ -+struct its_node { -+ raw_spinlock_t lock; -+ struct list_head entry; -+ struct msi_controller msi_chip; -+ struct irq_domain *domain; -+ void __iomem *base; -+ unsigned long phys_base; -+ struct its_cmd_block *cmd_base; -+ struct its_cmd_block *cmd_write; -+ void *tables[GITS_BASER_NR_REGS]; -+ struct its_collection *collections; -+ struct list_head its_device_list; -+ u64 flags; -+ u32 ite_size; -+}; -+ -+#define ITS_ITT_ALIGN SZ_256 -+ -+struct event_lpi_map { -+ unsigned long *lpi_map; -+ u16 *col_map; -+ irq_hw_number_t lpi_base; -+ int nr_lpis; -+}; -+ -+/* -+ * The ITS view of a device - belongs to an ITS, a collection, owns an -+ * interrupt translation table, and a list of interrupts. -+ */ -+struct its_device { -+ struct list_head entry; -+ struct its_node *its; -+ struct event_lpi_map event_map; -+ void *itt; -+ u32 nr_ites; -+ u32 device_id; -+}; -+ -+static LIST_HEAD(its_nodes); -+static DEFINE_SPINLOCK(its_lock); -+static struct device_node *gic_root_node; -+static struct rdists *gic_rdists; -+ -+#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) -+#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) -+ -+static struct its_collection *dev_event_to_col(struct its_device *its_dev, -+ u32 event) -+{ -+ struct its_node *its = its_dev->its; -+ -+ return its->collections + its_dev->event_map.col_map[event]; -+} -+ -+/* -+ * ITS command descriptors - parameters to be encoded in a command -+ * block. -+ */ -+struct its_cmd_desc { -+ union { -+ struct { -+ struct its_device *dev; -+ u32 event_id; -+ } its_inv_cmd; -+ -+ struct { -+ struct its_device *dev; -+ u32 event_id; -+ } its_int_cmd; -+ -+ struct { -+ struct its_device *dev; -+ int valid; -+ } its_mapd_cmd; -+ -+ struct { -+ struct its_collection *col; -+ int valid; -+ } its_mapc_cmd; -+ -+ struct { -+ struct its_device *dev; -+ u32 phys_id; -+ u32 event_id; -+ } its_mapvi_cmd; -+ -+ struct { -+ struct its_device *dev; -+ struct its_collection *col; -+ u32 event_id; -+ } its_movi_cmd; -+ -+ struct { -+ struct its_device *dev; -+ u32 event_id; -+ } its_discard_cmd; -+ -+ struct { -+ struct its_collection *col; -+ } its_invall_cmd; -+ }; -+}; -+ -+/* -+ * The ITS command block, which is what the ITS actually parses. -+ */ -+struct its_cmd_block { -+ u64 raw_cmd[4]; -+}; -+ -+#define ITS_CMD_QUEUE_SZ SZ_64K -+#define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block)) -+ -+typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *, -+ struct its_cmd_desc *); -+ -+static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr) -+{ -+ cmd->raw_cmd[0] &= ~0xffUL; -+ cmd->raw_cmd[0] |= cmd_nr; -+} -+ -+static void its_encode_devid(struct its_cmd_block *cmd, u32 devid) -+{ -+ cmd->raw_cmd[0] &= BIT_ULL(32) - 1; -+ cmd->raw_cmd[0] |= ((u64)devid) << 32; -+} -+ -+static void its_encode_event_id(struct its_cmd_block *cmd, u32 id) -+{ -+ cmd->raw_cmd[1] &= ~0xffffffffUL; -+ cmd->raw_cmd[1] |= id; -+} -+ -+static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id) -+{ -+ cmd->raw_cmd[1] &= 0xffffffffUL; -+ cmd->raw_cmd[1] |= ((u64)phys_id) << 32; -+} -+ -+static void its_encode_size(struct its_cmd_block *cmd, u8 size) -+{ -+ cmd->raw_cmd[1] &= ~0x1fUL; -+ cmd->raw_cmd[1] |= size & 0x1f; -+} -+ -+static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr) -+{ -+ cmd->raw_cmd[2] &= ~0xffffffffffffUL; -+ cmd->raw_cmd[2] |= itt_addr & 0xffffffffff00UL; -+} -+ -+static void its_encode_valid(struct its_cmd_block *cmd, int valid) -+{ -+ cmd->raw_cmd[2] &= ~(1UL << 63); -+ cmd->raw_cmd[2] |= ((u64)!!valid) << 63; -+} -+ -+static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr) -+{ -+ cmd->raw_cmd[2] &= ~(0xffffffffUL << 16); -+ cmd->raw_cmd[2] |= (target_addr & (0xffffffffUL << 16)); -+} -+ -+static void its_encode_collection(struct its_cmd_block *cmd, u16 col) -+{ -+ cmd->raw_cmd[2] &= ~0xffffUL; -+ cmd->raw_cmd[2] |= col; -+} -+ -+static inline void its_fixup_cmd(struct its_cmd_block *cmd) -+{ -+ /* Let's fixup BE commands */ -+ cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]); -+ cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]); -+ cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]); -+ cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]); -+} -+ -+static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd, -+ struct its_cmd_desc *desc) -+{ -+ unsigned long itt_addr; -+ u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites); -+ -+ itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt); -+ itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN); -+ -+ its_encode_cmd(cmd, GITS_CMD_MAPD); -+ its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id); -+ its_encode_size(cmd, size - 1); -+ its_encode_itt(cmd, itt_addr); -+ its_encode_valid(cmd, desc->its_mapd_cmd.valid); -+ -+ its_fixup_cmd(cmd); -+ -+ return NULL; -+} -+ -+static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd, -+ struct its_cmd_desc *desc) -+{ -+ its_encode_cmd(cmd, GITS_CMD_MAPC); -+ its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); -+ its_encode_target(cmd, desc->its_mapc_cmd.col->target_address); -+ its_encode_valid(cmd, desc->its_mapc_cmd.valid); -+ -+ its_fixup_cmd(cmd); -+ -+ return desc->its_mapc_cmd.col; -+} -+ -+static struct its_collection *its_build_mapvi_cmd(struct its_cmd_block *cmd, -+ struct its_cmd_desc *desc) -+{ -+ struct its_collection *col; -+ -+ col = dev_event_to_col(desc->its_mapvi_cmd.dev, -+ desc->its_mapvi_cmd.event_id); -+ -+ its_encode_cmd(cmd, GITS_CMD_MAPVI); -+ its_encode_devid(cmd, desc->its_mapvi_cmd.dev->device_id); -+ its_encode_event_id(cmd, desc->its_mapvi_cmd.event_id); -+ its_encode_phys_id(cmd, desc->its_mapvi_cmd.phys_id); -+ its_encode_collection(cmd, col->col_id); -+ -+ its_fixup_cmd(cmd); -+ -+ return col; -+} -+ -+static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd, -+ struct its_cmd_desc *desc) -+{ -+ struct its_collection *col; -+ -+ col = dev_event_to_col(desc->its_movi_cmd.dev, -+ desc->its_movi_cmd.event_id); -+ -+ its_encode_cmd(cmd, GITS_CMD_MOVI); -+ its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id); -+ its_encode_event_id(cmd, desc->its_movi_cmd.event_id); -+ its_encode_collection(cmd, desc->its_movi_cmd.col->col_id); -+ -+ its_fixup_cmd(cmd); -+ -+ return col; -+} -+ -+static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd, -+ struct its_cmd_desc *desc) -+{ -+ struct its_collection *col; -+ -+ col = dev_event_to_col(desc->its_discard_cmd.dev, -+ desc->its_discard_cmd.event_id); -+ -+ its_encode_cmd(cmd, GITS_CMD_DISCARD); -+ its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id); -+ its_encode_event_id(cmd, desc->its_discard_cmd.event_id); -+ -+ its_fixup_cmd(cmd); -+ -+ return col; -+} -+ -+static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd, -+ struct its_cmd_desc *desc) -+{ -+ struct its_collection *col; -+ -+ col = dev_event_to_col(desc->its_inv_cmd.dev, -+ desc->its_inv_cmd.event_id); -+ -+ its_encode_cmd(cmd, GITS_CMD_INV); -+ its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id); -+ its_encode_event_id(cmd, desc->its_inv_cmd.event_id); -+ -+ its_fixup_cmd(cmd); -+ -+ return col; -+} -+ -+static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd, -+ struct its_cmd_desc *desc) -+{ -+ its_encode_cmd(cmd, GITS_CMD_INVALL); -+ its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); -+ -+ its_fixup_cmd(cmd); -+ -+ return NULL; -+} -+ -+static u64 its_cmd_ptr_to_offset(struct its_node *its, -+ struct its_cmd_block *ptr) -+{ -+ return (ptr - its->cmd_base) * sizeof(*ptr); -+} -+ -+static int its_queue_full(struct its_node *its) -+{ -+ int widx; -+ int ridx; -+ -+ widx = its->cmd_write - its->cmd_base; -+ ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block); -+ -+ /* This is incredibly unlikely to happen, unless the ITS locks up. */ -+ if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx) -+ return 1; -+ -+ return 0; -+} -+ -+static struct its_cmd_block *its_allocate_entry(struct its_node *its) -+{ -+ struct its_cmd_block *cmd; -+ u32 count = 1000000; /* 1s! */ -+ -+ while (its_queue_full(its)) { -+ count--; -+ if (!count) { -+ pr_err_ratelimited("ITS queue not draining\n"); -+ return NULL; -+ } -+ cpu_relax(); -+ udelay(1); -+ } -+ -+ cmd = its->cmd_write++; -+ -+ /* Handle queue wrapping */ -+ if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES)) -+ its->cmd_write = its->cmd_base; -+ -+ return cmd; -+} -+ -+static struct its_cmd_block *its_post_commands(struct its_node *its) -+{ -+ u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write); -+ -+ writel_relaxed(wr, its->base + GITS_CWRITER); -+ -+ return its->cmd_write; -+} -+ -+static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd) -+{ -+ /* -+ * Make sure the commands written to memory are observable by -+ * the ITS. -+ */ -+ if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING) -+ __flush_dcache_area(cmd, sizeof(*cmd)); -+ else -+ dsb(ishst); -+} -+ -+static void its_wait_for_range_completion(struct its_node *its, -+ struct its_cmd_block *from, -+ struct its_cmd_block *to) -+{ -+ u64 rd_idx, from_idx, to_idx; -+ u32 count = 1000000; /* 1s! */ -+ -+ from_idx = its_cmd_ptr_to_offset(its, from); -+ to_idx = its_cmd_ptr_to_offset(its, to); -+ -+ while (1) { -+ rd_idx = readl_relaxed(its->base + GITS_CREADR); -+ if (rd_idx >= to_idx || rd_idx < from_idx) -+ break; -+ -+ count--; -+ if (!count) { -+ pr_err_ratelimited("ITS queue timeout\n"); -+ return; -+ } -+ cpu_relax(); -+ udelay(1); -+ } -+} -+ -+static void its_send_single_command(struct its_node *its, -+ its_cmd_builder_t builder, -+ struct its_cmd_desc *desc) -+{ -+ struct its_cmd_block *cmd, *sync_cmd, *next_cmd; -+ struct its_collection *sync_col; -+ unsigned long flags; -+ -+ raw_spin_lock_irqsave(&its->lock, flags); -+ -+ cmd = its_allocate_entry(its); -+ if (!cmd) { /* We're soooooo screewed... */ -+ pr_err_ratelimited("ITS can't allocate, dropping command\n"); -+ raw_spin_unlock_irqrestore(&its->lock, flags); -+ return; -+ } -+ sync_col = builder(cmd, desc); -+ its_flush_cmd(its, cmd); -+ -+ if (sync_col) { -+ sync_cmd = its_allocate_entry(its); -+ if (!sync_cmd) { -+ pr_err_ratelimited("ITS can't SYNC, skipping\n"); -+ goto post; -+ } -+ its_encode_cmd(sync_cmd, GITS_CMD_SYNC); -+ its_encode_target(sync_cmd, sync_col->target_address); -+ its_fixup_cmd(sync_cmd); -+ its_flush_cmd(its, sync_cmd); -+ } -+ -+post: -+ next_cmd = its_post_commands(its); -+ raw_spin_unlock_irqrestore(&its->lock, flags); -+ -+ its_wait_for_range_completion(its, cmd, next_cmd); -+} -+ -+static void its_send_inv(struct its_device *dev, u32 event_id) -+{ -+ struct its_cmd_desc desc; -+ -+ desc.its_inv_cmd.dev = dev; -+ desc.its_inv_cmd.event_id = event_id; -+ -+ its_send_single_command(dev->its, its_build_inv_cmd, &desc); -+} -+ -+static void its_send_mapd(struct its_device *dev, int valid) -+{ -+ struct its_cmd_desc desc; -+ -+ desc.its_mapd_cmd.dev = dev; -+ desc.its_mapd_cmd.valid = !!valid; -+ -+ its_send_single_command(dev->its, its_build_mapd_cmd, &desc); -+} -+ -+static void its_send_mapc(struct its_node *its, struct its_collection *col, -+ int valid) -+{ -+ struct its_cmd_desc desc; -+ -+ desc.its_mapc_cmd.col = col; -+ desc.its_mapc_cmd.valid = !!valid; -+ -+ its_send_single_command(its, its_build_mapc_cmd, &desc); -+} -+ -+static void its_send_mapvi(struct its_device *dev, u32 irq_id, u32 id) -+{ -+ struct its_cmd_desc desc; -+ -+ desc.its_mapvi_cmd.dev = dev; -+ desc.its_mapvi_cmd.phys_id = irq_id; -+ desc.its_mapvi_cmd.event_id = id; -+ -+ its_send_single_command(dev->its, its_build_mapvi_cmd, &desc); -+} -+ -+static void its_send_movi(struct its_device *dev, -+ struct its_collection *col, u32 id) -+{ -+ struct its_cmd_desc desc; -+ -+ desc.its_movi_cmd.dev = dev; -+ desc.its_movi_cmd.col = col; -+ desc.its_movi_cmd.event_id = id; -+ -+ its_send_single_command(dev->its, its_build_movi_cmd, &desc); -+} -+ -+static void its_send_discard(struct its_device *dev, u32 id) -+{ -+ struct its_cmd_desc desc; -+ -+ desc.its_discard_cmd.dev = dev; -+ desc.its_discard_cmd.event_id = id; -+ -+ its_send_single_command(dev->its, its_build_discard_cmd, &desc); -+} -+ -+static void its_send_invall(struct its_node *its, struct its_collection *col) -+{ -+ struct its_cmd_desc desc; -+ -+ desc.its_invall_cmd.col = col; -+ -+ its_send_single_command(its, its_build_invall_cmd, &desc); -+} -+ -+/* -+ * irqchip functions - assumes MSI, mostly. -+ */ -+ -+static inline u32 its_get_event_id(struct irq_data *d) -+{ -+ struct its_device *its_dev = irq_data_get_irq_chip_data(d); -+ return d->hwirq - its_dev->event_map.lpi_base; -+} -+ -+static void lpi_set_config(struct irq_data *d, bool enable) -+{ -+ struct its_device *its_dev = irq_data_get_irq_chip_data(d); -+ irq_hw_number_t hwirq = d->hwirq; -+ u32 id = its_get_event_id(d); -+ u8 *cfg = page_address(gic_rdists->prop_page) + hwirq - 8192; -+ -+ if (enable) -+ *cfg |= LPI_PROP_ENABLED; -+ else -+ *cfg &= ~LPI_PROP_ENABLED; -+ -+ /* -+ * Make the above write visible to the redistributors. -+ * And yes, we're flushing exactly: One. Single. Byte. -+ * Humpf... -+ */ -+ if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING) -+ __flush_dcache_area(cfg, sizeof(*cfg)); -+ else -+ dsb(ishst); -+ its_send_inv(its_dev, id); -+} -+ -+static void its_mask_irq(struct irq_data *d) -+{ -+ lpi_set_config(d, false); -+} -+ -+static void its_unmask_irq(struct irq_data *d) -+{ -+ lpi_set_config(d, true); -+} -+ -+static void its_eoi_irq(struct irq_data *d) -+{ -+ gic_write_eoir(d->hwirq); -+} -+ -+static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, -+ bool force) -+{ -+ unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); -+ struct its_device *its_dev = irq_data_get_irq_chip_data(d); -+ struct its_collection *target_col; -+ u32 id = its_get_event_id(d); -+ -+ if (cpu >= nr_cpu_ids) -+ return -EINVAL; -+ -+ target_col = &its_dev->its->collections[cpu]; -+ its_send_movi(its_dev, target_col, id); -+ its_dev->event_map.col_map[id] = cpu; -+ -+ return IRQ_SET_MASK_OK_DONE; -+} -+ -+static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) -+{ -+ struct its_device *its_dev = irq_data_get_irq_chip_data(d); -+ struct its_node *its; -+ u64 addr; -+ -+ its = its_dev->its; -+ addr = its->phys_base + GITS_TRANSLATER; -+ -+ msg->address_lo = addr & ((1UL << 32) - 1); -+ msg->address_hi = addr >> 32; -+ msg->data = its_get_event_id(d); -+} -+ -+static struct irq_chip its_irq_chip = { -+ .name = "ITS", -+ .irq_mask = its_mask_irq, -+ .irq_unmask = its_unmask_irq, -+ .irq_eoi = its_eoi_irq, -+ .irq_set_affinity = its_set_affinity, -+ .irq_compose_msi_msg = its_irq_compose_msi_msg, -+}; -+ -+static void its_mask_msi_irq(struct irq_data *d) -+{ -+ pci_msi_mask_irq(d); -+ irq_chip_mask_parent(d); -+} -+ -+static void its_unmask_msi_irq(struct irq_data *d) -+{ -+ pci_msi_unmask_irq(d); -+ irq_chip_unmask_parent(d); -+} -+ -+static struct irq_chip its_msi_irq_chip = { -+ .name = "ITS-MSI", -+ .irq_unmask = its_unmask_msi_irq, -+ .irq_mask = its_mask_msi_irq, -+ .irq_eoi = irq_chip_eoi_parent, -+ .irq_write_msi_msg = pci_msi_domain_write_msg, -+}; -+ -+/* -+ * How we allocate LPIs: -+ * -+ * The GIC has id_bits bits for interrupt identifiers. From there, we -+ * must subtract 8192 which are reserved for SGIs/PPIs/SPIs. Then, as -+ * we allocate LPIs by chunks of 32, we can shift the whole thing by 5 -+ * bits to the right. -+ * -+ * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations. -+ */ -+#define IRQS_PER_CHUNK_SHIFT 5 -+#define IRQS_PER_CHUNK (1 << IRQS_PER_CHUNK_SHIFT) -+ -+static unsigned long *lpi_bitmap; -+static u32 lpi_chunks; -+static DEFINE_SPINLOCK(lpi_lock); -+ -+static int its_lpi_to_chunk(int lpi) -+{ -+ return (lpi - 8192) >> IRQS_PER_CHUNK_SHIFT; -+} -+ -+static int its_chunk_to_lpi(int chunk) -+{ -+ return (chunk << IRQS_PER_CHUNK_SHIFT) + 8192; -+} -+ -+static int its_lpi_init(u32 id_bits) -+{ -+ lpi_chunks = its_lpi_to_chunk(1UL << id_bits); -+ -+ lpi_bitmap = kzalloc(BITS_TO_LONGS(lpi_chunks) * sizeof(long), -+ GFP_KERNEL); -+ if (!lpi_bitmap) { -+ lpi_chunks = 0; -+ return -ENOMEM; -+ } -+ -+ pr_info("ITS: Allocated %d chunks for LPIs\n", (int)lpi_chunks); -+ return 0; -+} -+ -+static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids) -+{ -+ unsigned long *bitmap = NULL; -+ int chunk_id; -+ int nr_chunks; -+ int i; -+ -+ nr_chunks = DIV_ROUND_UP(nr_irqs, IRQS_PER_CHUNK); -+ -+ spin_lock(&lpi_lock); -+ -+ do { -+ chunk_id = bitmap_find_next_zero_area(lpi_bitmap, lpi_chunks, -+ 0, nr_chunks, 0); -+ if (chunk_id < lpi_chunks) -+ break; -+ -+ nr_chunks--; -+ } while (nr_chunks > 0); -+ -+ if (!nr_chunks) -+ goto out; -+ -+ bitmap = kzalloc(BITS_TO_LONGS(nr_chunks * IRQS_PER_CHUNK) * sizeof (long), -+ GFP_ATOMIC); -+ if (!bitmap) -+ goto out; -+ -+ for (i = 0; i < nr_chunks; i++) -+ set_bit(chunk_id + i, lpi_bitmap); -+ -+ *base = its_chunk_to_lpi(chunk_id); -+ *nr_ids = nr_chunks * IRQS_PER_CHUNK; -+ -+out: -+ spin_unlock(&lpi_lock); -+ -+ if (!bitmap) -+ *base = *nr_ids = 0; -+ -+ return bitmap; -+} -+ -+static void its_lpi_free(struct event_lpi_map *map) -+{ -+ int base = map->lpi_base; -+ int nr_ids = map->nr_lpis; -+ int lpi; -+ -+ spin_lock(&lpi_lock); -+ -+ for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) { -+ int chunk = its_lpi_to_chunk(lpi); -+ BUG_ON(chunk > lpi_chunks); -+ if (test_bit(chunk, lpi_bitmap)) { -+ clear_bit(chunk, lpi_bitmap); -+ } else { -+ pr_err("Bad LPI chunk %d\n", chunk); -+ } -+ } -+ -+ spin_unlock(&lpi_lock); -+ -+ kfree(map->lpi_map); -+ kfree(map->col_map); -+} -+ -+/* -+ * We allocate 64kB for PROPBASE. That gives us at most 64K LPIs to -+ * deal with (one configuration byte per interrupt). PENDBASE has to -+ * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI). -+ */ -+#define LPI_PROPBASE_SZ SZ_64K -+#define LPI_PENDBASE_SZ (LPI_PROPBASE_SZ / 8 + SZ_1K) -+ -+/* -+ * This is how many bits of ID we need, including the useless ones. -+ */ -+#define LPI_NRBITS ilog2(LPI_PROPBASE_SZ + SZ_8K) -+ -+#define LPI_PROP_DEFAULT_PRIO 0xa0 -+ -+static int __init its_alloc_lpi_tables(void) -+{ -+ phys_addr_t paddr; -+ -+ gic_rdists->prop_page = alloc_pages(GFP_NOWAIT, -+ get_order(LPI_PROPBASE_SZ)); -+ if (!gic_rdists->prop_page) { -+ pr_err("Failed to allocate PROPBASE\n"); -+ return -ENOMEM; -+ } -+ -+ paddr = page_to_phys(gic_rdists->prop_page); -+ pr_info("GIC: using LPI property table @%pa\n", &paddr); -+ -+ /* Priority 0xa0, Group-1, disabled */ -+ memset(page_address(gic_rdists->prop_page), -+ LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, -+ LPI_PROPBASE_SZ); -+ -+ /* Make sure the GIC will observe the written configuration */ -+ __flush_dcache_area(page_address(gic_rdists->prop_page), LPI_PROPBASE_SZ); -+ -+ return 0; -+} -+ -+static const char *its_base_type_string[] = { -+ [GITS_BASER_TYPE_DEVICE] = "Devices", -+ [GITS_BASER_TYPE_VCPU] = "Virtual CPUs", -+ [GITS_BASER_TYPE_CPU] = "Physical CPUs", -+ [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections", -+ [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)", -+ [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)", -+ [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)", -+}; -+ -+static void its_free_tables(struct its_node *its) -+{ -+ int i; -+ -+ for (i = 0; i < GITS_BASER_NR_REGS; i++) { -+ if (its->tables[i]) { -+ free_page((unsigned long)its->tables[i]); -+ its->tables[i] = NULL; -+ } -+ } -+} -+ -+static int its_alloc_tables(struct its_node *its) -+{ -+ int err; -+ int i; -+ int psz = SZ_64K; -+ u64 shr = GITS_BASER_InnerShareable; -+ u64 cache = GITS_BASER_WaWb; -+ -+ for (i = 0; i < GITS_BASER_NR_REGS; i++) { -+ u64 val = readq_relaxed(its->base + GITS_BASER + i * 8); -+ u64 type = GITS_BASER_TYPE(val); -+ u64 entry_size = GITS_BASER_ENTRY_SIZE(val); -+ int order = get_order(psz); -+ int alloc_size; -+ u64 tmp; -+ void *base; -+ -+ if (type == GITS_BASER_TYPE_NONE) -+ continue; -+ -+ /* -+ * Allocate as many entries as required to fit the -+ * range of device IDs that the ITS can grok... The ID -+ * space being incredibly sparse, this results in a -+ * massive waste of memory. -+ * -+ * For other tables, only allocate a single page. -+ */ -+ if (type == GITS_BASER_TYPE_DEVICE) { -+ u64 typer = readq_relaxed(its->base + GITS_TYPER); -+ u32 ids = GITS_TYPER_DEVBITS(typer); -+ -+ /* -+ * 'order' was initialized earlier to the default page -+ * granule of the the ITS. We can't have an allocation -+ * smaller than that. If the requested allocation -+ * is smaller, round up to the default page granule. -+ */ -+ order = max(get_order((1UL << ids) * entry_size), -+ order); -+ if (order >= MAX_ORDER) { -+ order = MAX_ORDER - 1; -+ pr_warn("%s: Device Table too large, reduce its page order to %u\n", -+ its->msi_chip.of_node->full_name, order); -+ } -+ } -+ -+ alloc_size = (1 << order) * PAGE_SIZE; -+ base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); -+ if (!base) { -+ err = -ENOMEM; -+ goto out_free; -+ } -+ -+ its->tables[i] = base; -+ -+retry_baser: -+ val = (virt_to_phys(base) | -+ (type << GITS_BASER_TYPE_SHIFT) | -+ ((entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) | -+ cache | -+ shr | -+ GITS_BASER_VALID); -+ -+ switch (psz) { -+ case SZ_4K: -+ val |= GITS_BASER_PAGE_SIZE_4K; -+ break; -+ case SZ_16K: -+ val |= GITS_BASER_PAGE_SIZE_16K; -+ break; -+ case SZ_64K: -+ val |= GITS_BASER_PAGE_SIZE_64K; -+ break; -+ } -+ -+ val |= (alloc_size / psz) - 1; -+ -+ writeq_relaxed(val, its->base + GITS_BASER + i * 8); -+ tmp = readq_relaxed(its->base + GITS_BASER + i * 8); -+ -+ if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) { -+ /* -+ * Shareability didn't stick. Just use -+ * whatever the read reported, which is likely -+ * to be the only thing this redistributor -+ * supports. If that's zero, make it -+ * non-cacheable as well. -+ */ -+ shr = tmp & GITS_BASER_SHAREABILITY_MASK; -+ if (!shr) { -+ cache = GITS_BASER_nC; -+ __flush_dcache_area(base, alloc_size); -+ } -+ goto retry_baser; -+ } -+ -+ if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) { -+ /* -+ * Page size didn't stick. Let's try a smaller -+ * size and retry. If we reach 4K, then -+ * something is horribly wrong... -+ */ -+ switch (psz) { -+ case SZ_16K: -+ psz = SZ_4K; -+ goto retry_baser; -+ case SZ_64K: -+ psz = SZ_16K; -+ goto retry_baser; -+ } -+ } -+ -+ if (val != tmp) { -+ pr_err("ITS: %s: GITS_BASER%d doesn't stick: %lx %lx\n", -+ its->msi_chip.of_node->full_name, i, -+ (unsigned long) val, (unsigned long) tmp); -+ err = -ENXIO; -+ goto out_free; -+ } -+ -+ pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n", -+ (int)(alloc_size / entry_size), -+ its_base_type_string[type], -+ (unsigned long)virt_to_phys(base), -+ psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); -+ } -+ -+ return 0; -+ -+out_free: -+ its_free_tables(its); -+ -+ return err; -+} -+ -+static int its_alloc_collections(struct its_node *its) -+{ -+ its->collections = kzalloc(nr_cpu_ids * sizeof(*its->collections), -+ GFP_KERNEL); -+ if (!its->collections) -+ return -ENOMEM; -+ -+ return 0; -+} -+ -+static void its_cpu_init_lpis(void) -+{ -+ void __iomem *rbase = gic_data_rdist_rd_base(); -+ struct page *pend_page; -+ u64 val, tmp; -+ -+ /* If we didn't allocate the pending table yet, do it now */ -+ pend_page = gic_data_rdist()->pend_page; -+ if (!pend_page) { -+ phys_addr_t paddr; -+ /* -+ * The pending pages have to be at least 64kB aligned, -+ * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below. -+ */ -+ pend_page = alloc_pages(GFP_NOWAIT | __GFP_ZERO, -+ get_order(max(LPI_PENDBASE_SZ, SZ_64K))); -+ if (!pend_page) { -+ pr_err("Failed to allocate PENDBASE for CPU%d\n", -+ smp_processor_id()); -+ return; -+ } -+ -+ /* Make sure the GIC will observe the zero-ed page */ -+ __flush_dcache_area(page_address(pend_page), LPI_PENDBASE_SZ); -+ -+ paddr = page_to_phys(pend_page); -+ pr_info("CPU%d: using LPI pending table @%pa\n", -+ smp_processor_id(), &paddr); -+ gic_data_rdist()->pend_page = pend_page; -+ } -+ -+ /* Disable LPIs */ -+ val = readl_relaxed(rbase + GICR_CTLR); -+ val &= ~GICR_CTLR_ENABLE_LPIS; -+ writel_relaxed(val, rbase + GICR_CTLR); -+ -+ /* -+ * Make sure any change to the table is observable by the GIC. -+ */ -+ dsb(sy); -+ -+ /* set PROPBASE */ -+ val = (page_to_phys(gic_rdists->prop_page) | -+ GICR_PROPBASER_InnerShareable | -+ GICR_PROPBASER_WaWb | -+ ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK)); -+ -+ writeq_relaxed(val, rbase + GICR_PROPBASER); -+ tmp = readq_relaxed(rbase + GICR_PROPBASER); -+ -+ if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) { -+ if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) { -+ /* -+ * The HW reports non-shareable, we must -+ * remove the cacheability attributes as -+ * well. -+ */ -+ val &= ~(GICR_PROPBASER_SHAREABILITY_MASK | -+ GICR_PROPBASER_CACHEABILITY_MASK); -+ val |= GICR_PROPBASER_nC; -+ writeq_relaxed(val, rbase + GICR_PROPBASER); -+ } -+ pr_info_once("GIC: using cache flushing for LPI property table\n"); -+ gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING; -+ } -+ -+ /* set PENDBASE */ -+ val = (page_to_phys(pend_page) | -+ GICR_PENDBASER_InnerShareable | -+ GICR_PENDBASER_WaWb); -+ -+ writeq_relaxed(val, rbase + GICR_PENDBASER); -+ tmp = readq_relaxed(rbase + GICR_PENDBASER); -+ -+ if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) { -+ /* -+ * The HW reports non-shareable, we must remove the -+ * cacheability attributes as well. -+ */ -+ val &= ~(GICR_PENDBASER_SHAREABILITY_MASK | -+ GICR_PENDBASER_CACHEABILITY_MASK); -+ val |= GICR_PENDBASER_nC; -+ writeq_relaxed(val, rbase + GICR_PENDBASER); -+ } -+ -+ /* Enable LPIs */ -+ val = readl_relaxed(rbase + GICR_CTLR); -+ val |= GICR_CTLR_ENABLE_LPIS; -+ writel_relaxed(val, rbase + GICR_CTLR); -+ -+ /* Make sure the GIC has seen the above */ -+ dsb(sy); -+} -+ -+static void its_cpu_init_collection(void) -+{ -+ struct its_node *its; -+ int cpu; -+ -+ spin_lock(&its_lock); -+ cpu = smp_processor_id(); -+ -+ list_for_each_entry(its, &its_nodes, entry) { -+ u64 target; -+ -+ /* -+ * We now have to bind each collection to its target -+ * redistributor. -+ */ -+ if (readq_relaxed(its->base + GITS_TYPER) & GITS_TYPER_PTA) { -+ /* -+ * This ITS wants the physical address of the -+ * redistributor. -+ */ -+ target = gic_data_rdist()->phys_base; -+ } else { -+ /* -+ * This ITS wants a linear CPU number. -+ */ -+ target = readq_relaxed(gic_data_rdist_rd_base() + GICR_TYPER); -+ target = GICR_TYPER_CPU_NUMBER(target) << 16; -+ } -+ -+ /* Perform collection mapping */ -+ its->collections[cpu].target_address = target; -+ its->collections[cpu].col_id = cpu; -+ -+ its_send_mapc(its, &its->collections[cpu], 1); -+ its_send_invall(its, &its->collections[cpu]); -+ } -+ -+ spin_unlock(&its_lock); -+} -+ -+static struct its_device *its_find_device(struct its_node *its, u32 dev_id) -+{ -+ struct its_device *its_dev = NULL, *tmp; -+ unsigned long flags; -+ -+ raw_spin_lock_irqsave(&its->lock, flags); -+ -+ list_for_each_entry(tmp, &its->its_device_list, entry) { -+ if (tmp->device_id == dev_id) { -+ its_dev = tmp; -+ break; -+ } -+ } -+ -+ raw_spin_unlock_irqrestore(&its->lock, flags); -+ -+ return its_dev; -+} -+ -+static struct its_device *its_create_device(struct its_node *its, u32 dev_id, -+ int nvecs) -+{ -+ struct its_device *dev; -+ unsigned long *lpi_map; -+ unsigned long flags; -+ u16 *col_map = NULL; -+ void *itt; -+ int lpi_base; -+ int nr_lpis; -+ int nr_ites; -+ int sz; -+ -+ dev = kzalloc(sizeof(*dev), GFP_KERNEL); -+ /* -+ * At least one bit of EventID is being used, hence a minimum -+ * of two entries. No, the architecture doesn't let you -+ * express an ITT with a single entry. -+ */ -+ nr_ites = max(2UL, roundup_pow_of_two(nvecs)); -+ sz = nr_ites * its->ite_size; -+ sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; -+ itt = kzalloc(sz, GFP_KERNEL); -+ lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis); -+ if (lpi_map) -+ col_map = kzalloc(sizeof(*col_map) * nr_lpis, GFP_KERNEL); -+ -+ if (!dev || !itt || !lpi_map || !col_map) { -+ kfree(dev); -+ kfree(itt); -+ kfree(lpi_map); -+ kfree(col_map); -+ return NULL; -+ } -+ -+ __flush_dcache_area(itt, sz); -+ -+ dev->its = its; -+ dev->itt = itt; -+ dev->nr_ites = nr_ites; -+ dev->event_map.lpi_map = lpi_map; -+ dev->event_map.col_map = col_map; -+ dev->event_map.lpi_base = lpi_base; -+ dev->event_map.nr_lpis = nr_lpis; -+ dev->device_id = dev_id; -+ INIT_LIST_HEAD(&dev->entry); -+ -+ raw_spin_lock_irqsave(&its->lock, flags); -+ list_add(&dev->entry, &its->its_device_list); -+ raw_spin_unlock_irqrestore(&its->lock, flags); -+ -+ /* Map device to its ITT */ -+ its_send_mapd(dev, 1); -+ -+ return dev; -+} -+ -+static void its_free_device(struct its_device *its_dev) -+{ -+ unsigned long flags; -+ -+ raw_spin_lock_irqsave(&its_dev->its->lock, flags); -+ list_del(&its_dev->entry); -+ raw_spin_unlock_irqrestore(&its_dev->its->lock, flags); -+ kfree(its_dev->itt); -+ kfree(its_dev); -+} -+ -+static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq) -+{ -+ int idx; -+ -+ idx = find_first_zero_bit(dev->event_map.lpi_map, -+ dev->event_map.nr_lpis); -+ if (idx == dev->event_map.nr_lpis) -+ return -ENOSPC; -+ -+ *hwirq = dev->event_map.lpi_base + idx; -+ set_bit(idx, dev->event_map.lpi_map); -+ -+ return 0; -+} -+ -+struct its_pci_alias { -+ struct pci_dev *pdev; -+ u32 dev_id; -+ u32 count; -+}; -+ -+static int its_pci_msi_vec_count(struct pci_dev *pdev) -+{ -+ int msi, msix; -+ -+ msi = max(pci_msi_vec_count(pdev), 0); -+ msix = max(pci_msix_vec_count(pdev), 0); -+ -+ return max(msi, msix); -+} -+ -+static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data) -+{ -+ struct its_pci_alias *dev_alias = data; -+ -+ dev_alias->dev_id = alias; -+ if (pdev != dev_alias->pdev) -+ dev_alias->count += its_pci_msi_vec_count(dev_alias->pdev); -+ -+ return 0; -+} -+ -+int __its_msi_prepare(struct irq_domain *domain, u32 dev_id, -+ struct device *dev, int nvec, msi_alloc_info_t *info) -+{ -+ struct its_node *its; -+ struct its_device *its_dev; -+ -+ its = domain->parent->host_data; -+ -+ its_dev = its_find_device(its, dev_id); -+ if (its_dev) { -+ /* -+ * We already have seen this ID, probably through -+ * another alias (PCI bridge of some sort). No need to -+ * create the device. -+ */ -+ dev_dbg(dev, "Reusing ITT for devID %x\n", dev_id); -+ goto out; -+ } -+ -+ its_dev = its_create_device(its, dev_id, nvec); -+ if (!its_dev) -+ return -ENOMEM; -+ -+ dev_dbg(dev, "ITT %d entries, %d bits\n", -+ nvec, ilog2(nvec)); -+out: -+ info->scratchpad[0].ptr = its_dev; -+ info->scratchpad[1].ptr = dev; -+ -+ return 0; -+} -+ -+static int its_msi_prepare(struct irq_domain *domain, struct device *dev, -+ int nvec, msi_alloc_info_t *info) -+{ -+ struct pci_dev *pdev; -+ struct its_pci_alias dev_alias; -+ u32 dev_id; -+ -+ if (!dev_is_pci(dev)) -+ return -EINVAL; -+ -+ pdev = to_pci_dev(dev); -+ dev_alias.pdev = pdev; -+ dev_alias.count = nvec; -+ -+ pci_for_each_dma_alias(pdev, its_get_pci_alias, &dev_alias); -+ -+ dev_dbg(dev, "ITT %d entries, %d bits\n", nvec, ilog2(nvec)); -+ dev_id = PCI_DEVID(pdev->bus->number, pdev->devfn); -+ return __its_msi_prepare(domain, dev_alias.dev_id, -+ dev, dev_alias.count, info); -+} -+ -+static struct msi_domain_ops its_pci_msi_ops = { -+ .msi_prepare = its_msi_prepare, -+}; -+ -+static struct msi_domain_info its_pci_msi_domain_info = { -+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | -+ MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX), -+ .ops = &its_pci_msi_ops, -+ .chip = &its_msi_irq_chip, -+}; -+ -+static int its_irq_gic_domain_alloc(struct irq_domain *domain, -+ unsigned int virq, -+ irq_hw_number_t hwirq) -+{ -+ struct of_phandle_args args; -+ -+ args.np = domain->parent->of_node; -+ args.args_count = 3; -+ args.args[0] = GIC_IRQ_TYPE_LPI; -+ args.args[1] = hwirq; -+ args.args[2] = IRQ_TYPE_EDGE_RISING; -+ -+ return irq_domain_alloc_irqs_parent(domain, virq, 1, &args); -+} -+ -+static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, -+ unsigned int nr_irqs, void *args) -+{ -+ msi_alloc_info_t *info = args; -+ struct its_device *its_dev = info->scratchpad[0].ptr; -+ irq_hw_number_t hwirq; -+ int err; -+ int i; -+ -+ for (i = 0; i < nr_irqs; i++) { -+ err = its_alloc_device_irq(its_dev, &hwirq); -+ if (err) -+ return err; -+ -+ err = its_irq_gic_domain_alloc(domain, virq + i, hwirq); -+ if (err) -+ return err; -+ -+ irq_domain_set_hwirq_and_chip(domain, virq + i, -+ hwirq, &its_irq_chip, its_dev); -+ dev_dbg(info->scratchpad[1].ptr, "ID:%d pID:%d vID:%d\n", -+ (int)(hwirq - its_dev->event_map.lpi_base), -+ (int)hwirq, virq + i); -+ } -+ -+ return 0; -+} -+ -+static void its_irq_domain_activate(struct irq_domain *domain, -+ struct irq_data *d) -+{ -+ struct its_device *its_dev = irq_data_get_irq_chip_data(d); -+ u32 event = its_get_event_id(d); -+ -+ /* Bind the LPI to the first possible CPU */ -+ its_dev->event_map.col_map[event] = cpumask_first(cpu_online_mask); -+ -+ /* Map the GIC IRQ and event to the device */ -+ its_send_mapvi(its_dev, d->hwirq, event); -+} -+ -+static void its_irq_domain_deactivate(struct irq_domain *domain, -+ struct irq_data *d) -+{ -+ struct its_device *its_dev = irq_data_get_irq_chip_data(d); -+ u32 event = its_get_event_id(d); -+ -+ /* Stop the delivery of interrupts */ -+ its_send_discard(its_dev, event); -+} -+ -+static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, -+ unsigned int nr_irqs) -+{ -+ struct irq_data *d = irq_domain_get_irq_data(domain, virq); -+ struct its_device *its_dev = irq_data_get_irq_chip_data(d); -+ int i; -+ -+ for (i = 0; i < nr_irqs; i++) { -+ struct irq_data *data = irq_domain_get_irq_data(domain, -+ virq + i); -+ u32 event = its_get_event_id(data); -+ -+ /* Mark interrupt index as unused */ -+ clear_bit(event, its_dev->event_map.lpi_map); -+ -+ /* Nuke the entry in the domain */ -+ irq_domain_reset_irq_data(data); -+ } -+ -+ /* If all interrupts have been freed, start mopping the floor */ -+ if (bitmap_empty(its_dev->event_map.lpi_map, -+ its_dev->event_map.nr_lpis)) { -+ its_lpi_free(&its_dev->event_map); -+ -+ /* Unmap device/itt */ -+ its_send_mapd(its_dev, 0); -+ its_free_device(its_dev); -+ } -+ -+ irq_domain_free_irqs_parent(domain, virq, nr_irqs); -+} -+ -+static const struct irq_domain_ops its_domain_ops = { -+ .alloc = its_irq_domain_alloc, -+ .free = its_irq_domain_free, -+ .activate = its_irq_domain_activate, -+ .deactivate = its_irq_domain_deactivate, -+}; -+ -+static int its_force_quiescent(void __iomem *base) -+{ -+ u32 count = 1000000; /* 1s */ -+ u32 val; -+ -+ val = readl_relaxed(base + GITS_CTLR); -+ if (val & GITS_CTLR_QUIESCENT) -+ return 0; -+ -+ /* Disable the generation of all interrupts to this ITS */ -+ val &= ~GITS_CTLR_ENABLE; -+ writel_relaxed(val, base + GITS_CTLR); -+ -+ /* Poll GITS_CTLR and wait until ITS becomes quiescent */ -+ while (1) { -+ val = readl_relaxed(base + GITS_CTLR); -+ if (val & GITS_CTLR_QUIESCENT) -+ return 0; -+ -+ count--; -+ if (!count) -+ return -EBUSY; -+ -+ cpu_relax(); -+ udelay(1); -+ } -+} -+ -+static int its_probe(struct device_node *node, struct irq_domain *parent) -+{ -+ struct resource res; -+ struct its_node *its; -+ void __iomem *its_base; -+ u32 val; -+ u64 baser, tmp; -+ int err; -+ -+ err = of_address_to_resource(node, 0, &res); -+ if (err) { -+ pr_warn("%s: no regs?\n", node->full_name); -+ return -ENXIO; -+ } -+ -+ its_base = ioremap(res.start, resource_size(&res)); -+ if (!its_base) { -+ pr_warn("%s: unable to map registers\n", node->full_name); -+ return -ENOMEM; -+ } -+ -+ val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK; -+ if (val != 0x30 && val != 0x40) { -+ pr_warn("%s: no ITS detected, giving up\n", node->full_name); -+ err = -ENODEV; -+ goto out_unmap; -+ } -+ -+ err = its_force_quiescent(its_base); -+ if (err) { -+ pr_warn("%s: failed to quiesce, giving up\n", -+ node->full_name); -+ goto out_unmap; -+ } -+ -+ pr_info("ITS: %s\n", node->full_name); -+ -+ its = kzalloc(sizeof(*its), GFP_KERNEL); -+ if (!its) { -+ err = -ENOMEM; -+ goto out_unmap; -+ } -+ -+ raw_spin_lock_init(&its->lock); -+ INIT_LIST_HEAD(&its->entry); -+ INIT_LIST_HEAD(&its->its_device_list); -+ its->base = its_base; -+ its->phys_base = res.start; -+ its->msi_chip.of_node = node; -+ its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1; -+ -+ its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL); -+ if (!its->cmd_base) { -+ err = -ENOMEM; -+ goto out_free_its; -+ } -+ its->cmd_write = its->cmd_base; -+ -+ err = its_alloc_tables(its); -+ if (err) -+ goto out_free_cmd; -+ -+ err = its_alloc_collections(its); -+ if (err) -+ goto out_free_tables; -+ -+ baser = (virt_to_phys(its->cmd_base) | -+ GITS_CBASER_WaWb | -+ GITS_CBASER_InnerShareable | -+ (ITS_CMD_QUEUE_SZ / SZ_4K - 1) | -+ GITS_CBASER_VALID); -+ -+ writeq_relaxed(baser, its->base + GITS_CBASER); -+ tmp = readq_relaxed(its->base + GITS_CBASER); -+ -+ if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) { -+ if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) { -+ /* -+ * The HW reports non-shareable, we must -+ * remove the cacheability attributes as -+ * well. -+ */ -+ baser &= ~(GITS_CBASER_SHAREABILITY_MASK | -+ GITS_CBASER_CACHEABILITY_MASK); -+ baser |= GITS_CBASER_nC; -+ writeq_relaxed(baser, its->base + GITS_CBASER); -+ } -+ pr_info("ITS: using cache flushing for cmd queue\n"); -+ its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING; -+ } -+ -+ writeq_relaxed(0, its->base + GITS_CWRITER); -+ writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR); -+ -+ if (of_property_read_bool(its->msi_chip.of_node, "msi-controller")) { -+ its->domain = irq_domain_add_tree(node, &its_domain_ops, its); -+ if (!its->domain) { -+ err = -ENOMEM; -+ goto out_free_tables; -+ } -+ -+ its->domain->parent = parent; -+ its->domain->bus_token = DOMAIN_BUS_NEXUS; -+ -+ its->msi_chip.domain = pci_msi_create_irq_domain(node, -+ &its_pci_msi_domain_info, -+ its->domain); -+ if (!its->msi_chip.domain) { -+ err = -ENOMEM; -+ goto out_free_domains; -+ } -+ -+ err = of_pci_msi_chip_add(&its->msi_chip); -+ if (err) -+ goto out_free_domains; -+ } -+ -+ spin_lock(&its_lock); -+ list_add(&its->entry, &its_nodes); -+ spin_unlock(&its_lock); -+ -+ return 0; -+ -+out_free_domains: -+ if (its->msi_chip.domain) -+ irq_domain_remove(its->msi_chip.domain); -+ if (its->domain) -+ irq_domain_remove(its->domain); -+out_free_tables: -+ its_free_tables(its); -+out_free_cmd: -+ kfree(its->cmd_base); -+out_free_its: -+ kfree(its); -+out_unmap: -+ iounmap(its_base); -+ pr_err("ITS: failed probing %s (%d)\n", node->full_name, err); -+ return err; -+} -+ -+static bool gic_rdists_supports_plpis(void) -+{ -+ return !!(readl_relaxed(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS); -+} -+ -+int its_cpu_init(void) -+{ -+ if (!list_empty(&its_nodes)) { -+ if (!gic_rdists_supports_plpis()) { -+ pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); -+ return -ENXIO; -+ } -+ its_cpu_init_lpis(); -+ its_cpu_init_collection(); -+ } -+ -+ return 0; -+} -+ -+static struct of_device_id its_device_id[] = { -+ { .compatible = "arm,gic-v3-its", }, -+ {}, -+}; -+ -+int its_init(struct device_node *node, struct rdists *rdists, -+ struct irq_domain *parent_domain) -+{ -+ struct device_node *np; -+ -+ for (np = of_find_matching_node(node, its_device_id); np; -+ np = of_find_matching_node(np, its_device_id)) { -+ its_probe(np, parent_domain); -+ } -+ -+ if (list_empty(&its_nodes)) { -+ pr_warn("ITS: No ITS available, not enabling LPIs\n"); -+ return -ENXIO; -+ } -+ -+ gic_rdists = rdists; -+ gic_root_node = node; -+ -+ its_alloc_lpi_tables(); -+ its_lpi_init(rdists->id_bits); -+ -+ return 0; -+} -diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c -index aa17ae8..fd8850d 100644 ---- a/drivers/irqchip/irq-gic-v3.c -+++ b/drivers/irqchip/irq-gic-v3.c -@@ -34,20 +34,25 @@ - #include "irq-gic-common.h" - #include "irqchip.h" - -+struct redist_region { -+ void __iomem *redist_base; -+ phys_addr_t phys_base; -+}; -+ - struct gic_chip_data { - void __iomem *dist_base; -- void __iomem **redist_base; -- void __iomem * __percpu *rdist; -+ struct redist_region *redist_regions; -+ struct rdists rdists; - struct irq_domain *domain; - u64 redist_stride; -- u32 redist_regions; -+ u32 nr_redist_regions; - unsigned int irq_nr; - }; - - static struct gic_chip_data gic_data __read_mostly; - --#define gic_data_rdist() (this_cpu_ptr(gic_data.rdist)) --#define gic_data_rdist_rd_base() (*gic_data_rdist()) -+#define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist)) -+#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) - #define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K) - - /* Our default, arbitrary priority value. Linux only uses one anyway. */ -@@ -71,9 +76,6 @@ static inline void __iomem *gic_dist_base(struct irq_data *d) - if (d->hwirq <= 1023) /* SPI -> dist_base */ - return gic_data.dist_base; - -- if (d->hwirq >= 8192) -- BUG(); /* LPI Detected!!! */ -- - return NULL; - } - -@@ -236,7 +238,9 @@ static int gic_set_type(struct irq_data *d, unsigned int type) - if (irq < 16) - return -EINVAL; - -- if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) -+ /* SPIs have restrictions on the supported types */ -+ if (irq >= 32 && type != IRQ_TYPE_LEVEL_HIGH && -+ type != IRQ_TYPE_EDGE_RISING) - return -EINVAL; - - if (gic_irq_in_rdist(d)) { -@@ -247,9 +251,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type) - rwp_wait = gic_dist_wait_for_rwp; - } - -- gic_configure_irq(irq, type, base, rwp_wait); -- -- return 0; -+ return gic_configure_irq(irq, type, base, rwp_wait); - } - - static u64 gic_mpidr_to_affinity(u64 mpidr) -@@ -271,11 +273,11 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs - do { - irqnr = gic_read_iar(); - -- if (likely(irqnr > 15 && irqnr < 1020)) { -+ if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) { - int err; - err = handle_domain_irq(gic_data.domain, irqnr, regs); - if (err) { -- WARN_ONCE(true, "Unexpected SPI received!\n"); -+ WARN_ONCE(true, "Unexpected interrupt received!\n"); - gic_write_eoir(irqnr); - } - continue; -@@ -333,8 +335,8 @@ static int gic_populate_rdist(void) - MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | - MPIDR_AFFINITY_LEVEL(mpidr, 0)); - -- for (i = 0; i < gic_data.redist_regions; i++) { -- void __iomem *ptr = gic_data.redist_base[i]; -+ for (i = 0; i < gic_data.nr_redist_regions; i++) { -+ void __iomem *ptr = gic_data.redist_regions[i].redist_base; - u32 reg; - - reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK; -@@ -347,10 +349,13 @@ static int gic_populate_rdist(void) - do { - typer = readq_relaxed(ptr + GICR_TYPER); - if ((typer >> 32) == aff) { -+ u64 offset = ptr - gic_data.redist_regions[i].redist_base; - gic_data_rdist_rd_base() = ptr; -- pr_info("CPU%d: found redistributor %llx @%p\n", -+ gic_data_rdist()->phys_base = gic_data.redist_regions[i].phys_base + offset; -+ pr_info("CPU%d: found redistributor %llx region %d:%pa\n", - smp_processor_id(), -- (unsigned long long)mpidr, ptr); -+ (unsigned long long)mpidr, -+ i, &gic_data_rdist()->phys_base); - return 0; - } - -@@ -385,6 +390,11 @@ static void gic_cpu_sys_reg_init(void) - gic_write_grpen1(1); - } - -+static int gic_dist_supports_lpis(void) -+{ -+ return !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS); -+} -+ - static void gic_cpu_init(void) - { - void __iomem *rbase; -@@ -399,6 +409,10 @@ static void gic_cpu_init(void) - - gic_cpu_config(rbase, gic_redist_wait_for_rwp); - -+ /* Give LPIs a spin */ -+ if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis()) -+ its_cpu_init(); -+ - /* initialise system registers */ - gic_cpu_sys_reg_init(); - } -@@ -452,7 +466,7 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, - tlist |= 1 << (mpidr & 0xf); - - cpu = cpumask_next(cpu, mask); -- if (cpu == nr_cpu_ids) -+ if (cpu >= nr_cpu_ids) - goto out; - - mpidr = cpu_logical_map(cpu); -@@ -467,15 +481,19 @@ out: - return tlist; - } - -+#define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \ -+ (MPIDR_AFFINITY_LEVEL(cluster_id, level) \ -+ << ICC_SGI1R_AFFINITY_## level ##_SHIFT) -+ - static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq) - { - u64 val; - -- val = (MPIDR_AFFINITY_LEVEL(cluster_id, 3) << 48 | -- MPIDR_AFFINITY_LEVEL(cluster_id, 2) << 32 | -- irq << 24 | -- MPIDR_AFFINITY_LEVEL(cluster_id, 1) << 16 | -- tlist); -+ val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) | -+ MPIDR_TO_SGI_AFFINITY(cluster_id, 2) | -+ irq << ICC_SGI1R_SGI_ID_SHIFT | -+ MPIDR_TO_SGI_AFFINITY(cluster_id, 1) | -+ tlist << ICC_SGI1R_TARGET_LIST_SHIFT); - - pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val); - gic_write_sgi1r(val); -@@ -585,26 +603,43 @@ static struct irq_chip gic_chip = { - .irq_set_affinity = gic_set_affinity, - }; - -+#define GIC_ID_NR (1U << gic_data.rdists.id_bits) -+ - static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, - irq_hw_number_t hw) - { - /* SGIs are private to the core kernel */ - if (hw < 16) - return -EPERM; -+ /* Nothing here */ -+ if (hw >= gic_data.irq_nr && hw < 8192) -+ return -EPERM; -+ /* Off limits */ -+ if (hw >= GIC_ID_NR) -+ return -EPERM; -+ - /* PPIs */ - if (hw < 32) { - irq_set_percpu_devid(irq); -- irq_set_chip_and_handler(irq, &gic_chip, -- handle_percpu_devid_irq); -+ irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data, -+ handle_percpu_devid_irq, NULL, NULL); - set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN); - } - /* SPIs */ - if (hw >= 32 && hw < gic_data.irq_nr) { -- irq_set_chip_and_handler(irq, &gic_chip, -- handle_fasteoi_irq); -+ irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data, -+ handle_fasteoi_irq, NULL, NULL); - set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); - } -- irq_set_chip_data(irq, d->host_data); -+ /* LPIs */ -+ if (hw >= 8192 && hw < GIC_ID_NR) { -+ if (!gic_dist_supports_lpis()) -+ return -EPERM; -+ irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data, -+ handle_fasteoi_irq, NULL, NULL); -+ set_irq_flags(irq, IRQF_VALID); -+ } -+ - return 0; - } - -@@ -625,6 +660,9 @@ static int gic_irq_domain_xlate(struct irq_domain *d, - case 1: /* PPI */ - *out_hwirq = intspec[1] + 16; - break; -+ case GIC_IRQ_TYPE_LPI: /* LPI */ -+ *out_hwirq = intspec[1]; -+ break; - default: - return -EINVAL; - } -@@ -633,17 +671,50 @@ static int gic_irq_domain_xlate(struct irq_domain *d, - return 0; - } - -+static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, -+ unsigned int nr_irqs, void *arg) -+{ -+ int i, ret; -+ irq_hw_number_t hwirq; -+ unsigned int type = IRQ_TYPE_NONE; -+ struct of_phandle_args *irq_data = arg; -+ -+ ret = gic_irq_domain_xlate(domain, irq_data->np, irq_data->args, -+ irq_data->args_count, &hwirq, &type); -+ if (ret) -+ return ret; -+ -+ for (i = 0; i < nr_irqs; i++) -+ gic_irq_domain_map(domain, virq + i, hwirq + i); -+ -+ return 0; -+} -+ -+static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq, -+ unsigned int nr_irqs) -+{ -+ int i; -+ -+ for (i = 0; i < nr_irqs; i++) { -+ struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); -+ irq_set_handler(virq + i, NULL); -+ irq_domain_reset_irq_data(d); -+ } -+} -+ - static const struct irq_domain_ops gic_irq_domain_ops = { -- .map = gic_irq_domain_map, - .xlate = gic_irq_domain_xlate, -+ .alloc = gic_irq_domain_alloc, -+ .free = gic_irq_domain_free, - }; - - static int __init gic_of_init(struct device_node *node, struct device_node *parent) - { - void __iomem *dist_base; -- void __iomem **redist_base; -+ struct redist_region *rdist_regs; - u64 redist_stride; -- u32 redist_regions; -+ u32 nr_redist_regions; -+ u32 typer; - u32 reg; - int gic_irqs; - int err; -@@ -664,54 +735,63 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare - goto out_unmap_dist; - } - -- if (of_property_read_u32(node, "#redistributor-regions", &redist_regions)) -- redist_regions = 1; -+ if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions)) -+ nr_redist_regions = 1; - -- redist_base = kzalloc(sizeof(*redist_base) * redist_regions, GFP_KERNEL); -- if (!redist_base) { -+ rdist_regs = kzalloc(sizeof(*rdist_regs) * nr_redist_regions, GFP_KERNEL); -+ if (!rdist_regs) { - err = -ENOMEM; - goto out_unmap_dist; - } - -- for (i = 0; i < redist_regions; i++) { -- redist_base[i] = of_iomap(node, 1 + i); -- if (!redist_base[i]) { -+ for (i = 0; i < nr_redist_regions; i++) { -+ struct resource res; -+ int ret; -+ -+ ret = of_address_to_resource(node, 1 + i, &res); -+ rdist_regs[i].redist_base = of_iomap(node, 1 + i); -+ if (ret || !rdist_regs[i].redist_base) { - pr_err("%s: couldn't map region %d\n", - node->full_name, i); - err = -ENODEV; - goto out_unmap_rdist; - } -+ rdist_regs[i].phys_base = res.start; - } - - if (of_property_read_u64(node, "redistributor-stride", &redist_stride)) - redist_stride = 0; - - gic_data.dist_base = dist_base; -- gic_data.redist_base = redist_base; -- gic_data.redist_regions = redist_regions; -+ gic_data.redist_regions = rdist_regs; -+ gic_data.nr_redist_regions = nr_redist_regions; - gic_data.redist_stride = redist_stride; - - /* - * Find out how many interrupts are supported. - * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI) - */ -- gic_irqs = readl_relaxed(gic_data.dist_base + GICD_TYPER) & 0x1f; -- gic_irqs = (gic_irqs + 1) * 32; -+ typer = readl_relaxed(gic_data.dist_base + GICD_TYPER); -+ gic_data.rdists.id_bits = GICD_TYPER_ID_BITS(typer); -+ gic_irqs = GICD_TYPER_IRQS(typer); - if (gic_irqs > 1020) - gic_irqs = 1020; - gic_data.irq_nr = gic_irqs; - - gic_data.domain = irq_domain_add_tree(node, &gic_irq_domain_ops, - &gic_data); -- gic_data.rdist = alloc_percpu(typeof(*gic_data.rdist)); -+ gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist)); - -- if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdist)) { -+ if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) { - err = -ENOMEM; - goto out_free; - } - - set_handle_irq(gic_handle_irq); - -+ if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis()) -+ its_init(node, &gic_data.rdists, gic_data.domain); -+ - gic_smp_init(); - gic_dist_init(); - gic_cpu_init(); -@@ -722,12 +802,12 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare - out_free: - if (gic_data.domain) - irq_domain_remove(gic_data.domain); -- free_percpu(gic_data.rdist); -+ free_percpu(gic_data.rdists.rdist); - out_unmap_rdist: -- for (i = 0; i < redist_regions; i++) -- if (redist_base[i]) -- iounmap(redist_base[i]); -- kfree(redist_base); -+ for (i = 0; i < nr_redist_regions; i++) -+ if (rdist_regs[i].redist_base) -+ iounmap(rdist_regs[i].redist_base); -+ kfree(rdist_regs); - out_unmap_dist: - iounmap(dist_base); - return err; -diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c -index 38493ff..ab0b1cb 100644 ---- a/drivers/irqchip/irq-gic.c -+++ b/drivers/irqchip/irq-gic.c -@@ -188,12 +188,15 @@ static int gic_set_type(struct irq_data *d, unsigned int type) - { - void __iomem *base = gic_dist_base(d); - unsigned int gicirq = gic_irq(d); -+ int ret; - - /* Interrupt configuration for SGIs can't be changed */ - if (gicirq < 16) - return -EINVAL; - -- if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) -+ /* SPIs have restrictions on the supported types */ -+ if (gicirq >= 32 && type != IRQ_TYPE_LEVEL_HIGH && -+ type != IRQ_TYPE_EDGE_RISING) - return -EINVAL; - - raw_spin_lock(&irq_controller_lock); -@@ -201,11 +204,11 @@ static int gic_set_type(struct irq_data *d, unsigned int type) - if (gic_arch_extn.irq_set_type) - gic_arch_extn.irq_set_type(d, type); - -- gic_configure_irq(gicirq, type, base, NULL); -+ ret = gic_configure_irq(gicirq, type, base, NULL); - - raw_spin_unlock(&irq_controller_lock); - -- return 0; -+ return ret; - } - - static int gic_retrigger(struct irq_data *d) -@@ -788,17 +791,16 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, - { - if (hw < 32) { - irq_set_percpu_devid(irq); -- irq_set_chip_and_handler(irq, &gic_chip, -- handle_percpu_devid_irq); -+ irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data, -+ handle_percpu_devid_irq, NULL, NULL); - set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN); - } else { -- irq_set_chip_and_handler(irq, &gic_chip, -- handle_fasteoi_irq); -+ irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data, -+ handle_fasteoi_irq, NULL, NULL); - set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); - - gic_routable_irq_domain_ops->map(d, irq, hw); - } -- irq_set_chip_data(irq, d->host_data); - return 0; - } - -@@ -858,6 +860,31 @@ static struct notifier_block gic_cpu_notifier = { - }; - #endif - -+static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, -+ unsigned int nr_irqs, void *arg) -+{ -+ int i, ret; -+ irq_hw_number_t hwirq; -+ unsigned int type = IRQ_TYPE_NONE; -+ struct of_phandle_args *irq_data = arg; -+ -+ ret = gic_irq_domain_xlate(domain, irq_data->np, irq_data->args, -+ irq_data->args_count, &hwirq, &type); -+ if (ret) -+ return ret; -+ -+ for (i = 0; i < nr_irqs; i++) -+ gic_irq_domain_map(domain, virq + i, hwirq + i); -+ -+ return 0; -+} -+ -+static const struct irq_domain_ops gic_irq_domain_hierarchy_ops = { -+ .xlate = gic_irq_domain_xlate, -+ .alloc = gic_irq_domain_alloc, -+ .free = irq_domain_free_irqs_top, -+}; -+ - static const struct irq_domain_ops gic_irq_domain_ops = { - .map = gic_irq_domain_map, - .unmap = gic_irq_domain_unmap, -@@ -948,18 +975,6 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, - gic_cpu_map[i] = 0xff; - - /* -- * For primary GICs, skip over SGIs. -- * For secondary GICs, skip over PPIs, too. -- */ -- if (gic_nr == 0 && (irq_start & 31) > 0) { -- hwirq_base = 16; -- if (irq_start != -1) -- irq_start = (irq_start & ~31) + 16; -- } else { -- hwirq_base = 32; -- } -- -- /* - * Find out how many interrupts are supported. - * The GIC only supports up to 1020 interrupt sources. - */ -@@ -969,10 +984,31 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, - gic_irqs = 1020; - gic->gic_irqs = gic_irqs; - -- gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */ -+ if (node) { /* DT case */ -+ const struct irq_domain_ops *ops = &gic_irq_domain_hierarchy_ops; -+ -+ if (!of_property_read_u32(node, "arm,routable-irqs", -+ &nr_routable_irqs)) { -+ ops = &gic_irq_domain_ops; -+ gic_irqs = nr_routable_irqs; -+ } -+ -+ gic->domain = irq_domain_add_linear(node, gic_irqs, ops, gic); -+ } else { /* Non-DT case */ -+ /* -+ * For primary GICs, skip over SGIs. -+ * For secondary GICs, skip over PPIs, too. -+ */ -+ if (gic_nr == 0 && (irq_start & 31) > 0) { -+ hwirq_base = 16; -+ if (irq_start != -1) -+ irq_start = (irq_start & ~31) + 16; -+ } else { -+ hwirq_base = 32; -+ } -+ -+ gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */ - -- if (of_property_read_u32(node, "arm,routable-irqs", -- &nr_routable_irqs)) { - irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, - numa_node_id()); - if (IS_ERR_VALUE(irq_base)) { -@@ -983,10 +1019,6 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, - - gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base, - hwirq_base, &gic_irq_domain_ops, gic); -- } else { -- gic->domain = irq_domain_add_linear(node, nr_routable_irqs, -- &gic_irq_domain_ops, -- gic); - } - - if (WARN_ON(!gic->domain)) -@@ -1037,6 +1069,10 @@ gic_of_init(struct device_node *node, struct device_node *parent) - irq = irq_of_parse_and_map(node, 0); - gic_cascade_irq(gic_cnt, irq); - } -+ -+ if (IS_ENABLED(CONFIG_ARM_GIC_V2M)) -+ gicv2m_of_init(node, gic_data[gic_cnt].domain); -+ - gic_cnt++; - return 0; - } -diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c -index 9c8f833..5507a0c 100644 ---- a/drivers/irqchip/irq-hip04.c -+++ b/drivers/irqchip/irq-hip04.c -@@ -120,21 +120,24 @@ static int hip04_irq_set_type(struct irq_data *d, unsigned int type) - { - void __iomem *base = hip04_dist_base(d); - unsigned int irq = hip04_irq(d); -+ int ret; - - /* Interrupt configuration for SGIs can't be changed */ - if (irq < 16) - return -EINVAL; - -- if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) -+ /* SPIs have restrictions on the supported types */ -+ if (irq >= 32 && type != IRQ_TYPE_LEVEL_HIGH && -+ type != IRQ_TYPE_EDGE_RISING) - return -EINVAL; - - raw_spin_lock(&irq_controller_lock); - -- gic_configure_irq(irq, type, base, NULL); -+ ret = gic_configure_irq(irq, type, base, NULL); - - raw_spin_unlock(&irq_controller_lock); - -- return 0; -+ return ret; - } - - #ifdef CONFIG_SMP -diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c -index eb9b59e..6b2b582 100644 ---- a/drivers/irqchip/irq-sunxi-nmi.c -+++ b/drivers/irqchip/irq-sunxi-nmi.c -@@ -50,12 +50,12 @@ static struct sunxi_sc_nmi_reg_offs sun6i_reg_offs = { - static inline void sunxi_sc_nmi_write(struct irq_chip_generic *gc, u32 off, - u32 val) - { -- irq_reg_writel(val, gc->reg_base + off); -+ irq_reg_writel(gc, val, off); - } - - static inline u32 sunxi_sc_nmi_read(struct irq_chip_generic *gc, u32 off) - { -- return irq_reg_readl(gc->reg_base + off); -+ return irq_reg_readl(gc, off); - } - - static void sunxi_sc_nmi_handle_irq(unsigned int irq, struct irq_desc *desc) -diff --git a/drivers/irqchip/irq-tb10x.c b/drivers/irqchip/irq-tb10x.c -index 7c44c99..accc200 100644 ---- a/drivers/irqchip/irq-tb10x.c -+++ b/drivers/irqchip/irq-tb10x.c -@@ -43,12 +43,12 @@ - static inline void ab_irqctl_writereg(struct irq_chip_generic *gc, u32 reg, - u32 val) - { -- irq_reg_writel(val, gc->reg_base + reg); -+ irq_reg_writel(gc, val, reg); - } - - static inline u32 ab_irqctl_readreg(struct irq_chip_generic *gc, u32 reg) - { -- return irq_reg_readl(gc->reg_base + reg); -+ return irq_reg_readl(gc, reg); - } - - static int tb10x_irq_set_type(struct irq_data *data, unsigned int flow_type) -diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig -index 6d91c27..d6af99f 100644 ---- a/drivers/memory/Kconfig -+++ b/drivers/memory/Kconfig -@@ -83,6 +83,6 @@ config FSL_CORENET_CF - - config FSL_IFC - bool -- depends on FSL_SOC -+ depends on FSL_SOC || ARCH_LAYERSCAPE - - endif -diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c -index 3d5d792..1b182b1 100644 ---- a/drivers/memory/fsl_ifc.c -+++ b/drivers/memory/fsl_ifc.c -@@ -22,6 +22,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -30,7 +31,9 @@ - #include - #include - #include --#include -+#include -+#include -+#include - - struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev; - EXPORT_SYMBOL(fsl_ifc_ctrl_dev); -@@ -58,11 +61,11 @@ int fsl_ifc_find(phys_addr_t addr_base) - { - int i = 0; - -- if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->regs) -+ if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->gregs) - return -ENODEV; - -- for (i = 0; i < ARRAY_SIZE(fsl_ifc_ctrl_dev->regs->cspr_cs); i++) { -- u32 cspr = in_be32(&fsl_ifc_ctrl_dev->regs->cspr_cs[i].cspr); -+ for (i = 0; i < fsl_ifc_ctrl_dev->banks; i++) { -+ u32 cspr = ifc_in32(&fsl_ifc_ctrl_dev->gregs->cspr_cs[i].cspr); - if (cspr & CSPR_V && (cspr & CSPR_BA) == - convert_ifc_address(addr_base)) - return i; -@@ -74,21 +77,21 @@ EXPORT_SYMBOL(fsl_ifc_find); - - static int fsl_ifc_ctrl_init(struct fsl_ifc_ctrl *ctrl) - { -- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; -+ struct fsl_ifc_global __iomem *ifc = ctrl->gregs; - - /* - * Clear all the common status and event registers - */ -- if (in_be32(&ifc->cm_evter_stat) & IFC_CM_EVTER_STAT_CSER) -- out_be32(&ifc->cm_evter_stat, IFC_CM_EVTER_STAT_CSER); -+ if (ifc_in32(&ifc->cm_evter_stat) & IFC_CM_EVTER_STAT_CSER) -+ ifc_out32(IFC_CM_EVTER_STAT_CSER, &ifc->cm_evter_stat); - - /* enable all error and events */ -- out_be32(&ifc->cm_evter_en, IFC_CM_EVTER_EN_CSEREN); -+ ifc_out32(IFC_CM_EVTER_EN_CSEREN, &ifc->cm_evter_en); - - /* enable all error and event interrupts */ -- out_be32(&ifc->cm_evter_intr_en, IFC_CM_EVTER_INTR_EN_CSERIREN); -- out_be32(&ifc->cm_erattr0, 0x0); -- out_be32(&ifc->cm_erattr1, 0x0); -+ ifc_out32(IFC_CM_EVTER_INTR_EN_CSERIREN, &ifc->cm_evter_intr_en); -+ ifc_out32(0x0, &ifc->cm_erattr0); -+ ifc_out32(0x0, &ifc->cm_erattr1); - - return 0; - } -@@ -103,7 +106,7 @@ static int fsl_ifc_ctrl_remove(struct platform_device *dev) - irq_dispose_mapping(ctrl->nand_irq); - irq_dispose_mapping(ctrl->irq); - -- iounmap(ctrl->regs); -+ iounmap(ctrl->gregs); - - dev_set_drvdata(&dev->dev, NULL); - kfree(ctrl); -@@ -121,15 +124,15 @@ static DEFINE_SPINLOCK(nand_irq_lock); - - static u32 check_nand_stat(struct fsl_ifc_ctrl *ctrl) - { -- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; -+ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; - unsigned long flags; - u32 stat; - - spin_lock_irqsave(&nand_irq_lock, flags); - -- stat = in_be32(&ifc->ifc_nand.nand_evter_stat); -+ stat = ifc_in32(&ifc->ifc_nand.nand_evter_stat); - if (stat) { -- out_be32(&ifc->ifc_nand.nand_evter_stat, stat); -+ ifc_out32(stat, &ifc->ifc_nand.nand_evter_stat); - ctrl->nand_stat = stat; - wake_up(&ctrl->nand_wait); - } -@@ -156,21 +159,21 @@ static irqreturn_t fsl_ifc_nand_irq(int irqno, void *data) - static irqreturn_t fsl_ifc_ctrl_irq(int irqno, void *data) - { - struct fsl_ifc_ctrl *ctrl = data; -- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; -+ struct fsl_ifc_global __iomem *ifc = ctrl->gregs; - u32 err_axiid, err_srcid, status, cs_err, err_addr; - irqreturn_t ret = IRQ_NONE; - - /* read for chip select error */ -- cs_err = in_be32(&ifc->cm_evter_stat); -+ cs_err = ifc_in32(&ifc->cm_evter_stat); - if (cs_err) { - dev_err(ctrl->dev, "transaction sent to IFC is not mapped to" - "any memory bank 0x%08X\n", cs_err); - /* clear the chip select error */ -- out_be32(&ifc->cm_evter_stat, IFC_CM_EVTER_STAT_CSER); -+ ifc_out32(IFC_CM_EVTER_STAT_CSER, &ifc->cm_evter_stat); - - /* read error attribute registers print the error information */ -- status = in_be32(&ifc->cm_erattr0); -- err_addr = in_be32(&ifc->cm_erattr1); -+ status = ifc_in32(&ifc->cm_erattr0); -+ err_addr = ifc_in32(&ifc->cm_erattr1); - - if (status & IFC_CM_ERATTR0_ERTYP_READ) - dev_err(ctrl->dev, "Read transaction error" -@@ -213,7 +216,8 @@ static irqreturn_t fsl_ifc_ctrl_irq(int irqno, void *data) - static int fsl_ifc_ctrl_probe(struct platform_device *dev) - { - int ret = 0; -- -+ int version, banks; -+ void __iomem *addr; - - dev_info(&dev->dev, "Freescale Integrated Flash Controller\n"); - -@@ -224,16 +228,41 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev) - dev_set_drvdata(&dev->dev, fsl_ifc_ctrl_dev); - - /* IOMAP the entire IFC region */ -- fsl_ifc_ctrl_dev->regs = of_iomap(dev->dev.of_node, 0); -- if (!fsl_ifc_ctrl_dev->regs) { -+ fsl_ifc_ctrl_dev->gregs = of_iomap(dev->dev.of_node, 0); -+ if (!fsl_ifc_ctrl_dev->gregs) { - dev_err(&dev->dev, "failed to get memory region\n"); - ret = -ENODEV; - goto err; - } - -+ if (of_property_read_bool(dev->dev.of_node, "little-endian")) { -+ fsl_ifc_ctrl_dev->little_endian = true; -+ dev_dbg(&dev->dev, "IFC REGISTERS are LITTLE endian\n"); -+ } else { -+ fsl_ifc_ctrl_dev->little_endian = false; -+ dev_dbg(&dev->dev, "IFC REGISTERS are BIG endian\n"); -+ } -+ -+ version = ifc_in32(&fsl_ifc_ctrl_dev->gregs->ifc_rev) & -+ FSL_IFC_VERSION_MASK; -+ -+ banks = (version == FSL_IFC_VERSION_1_0_0) ? 4 : 8; -+ dev_info(&dev->dev, "IFC version %d.%d, %d banks\n", -+ version >> 24, (version >> 16) & 0xf, banks); -+ -+ fsl_ifc_ctrl_dev->version = version; -+ fsl_ifc_ctrl_dev->banks = banks; -+ -+ addr = fsl_ifc_ctrl_dev->gregs; -+ if (version >= FSL_IFC_VERSION_2_0_0) -+ addr += PGOFFSET_64K; -+ else -+ addr += PGOFFSET_4K; -+ fsl_ifc_ctrl_dev->rregs = addr; -+ - /* get the Controller level irq */ - fsl_ifc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0); -- if (fsl_ifc_ctrl_dev->irq == NO_IRQ) { -+ if (fsl_ifc_ctrl_dev->irq == 0) { - dev_err(&dev->dev, "failed to get irq resource " - "for IFC\n"); - ret = -ENODEV; -diff --git a/drivers/mfd/vexpress-sysreg.c b/drivers/mfd/vexpress-sysreg.c -index 9e21e4f..8f43ab8 100644 ---- a/drivers/mfd/vexpress-sysreg.c -+++ b/drivers/mfd/vexpress-sysreg.c -@@ -223,7 +223,7 @@ static int vexpress_sysreg_probe(struct platform_device *pdev) - vexpress_config_set_master(vexpress_sysreg_get_master()); - - /* Confirm board type against DT property, if available */ -- if (of_property_read_u32(of_allnodes, "arm,hbi", &dt_hbi) == 0) { -+ if (of_property_read_u32(of_root, "arm,hbi", &dt_hbi) == 0) { - u32 id = vexpress_get_procid(VEXPRESS_SITE_MASTER); - u32 hbi = (id >> SYS_PROCIDx_HBI_SHIFT) & SYS_HBI_MASK; - -diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c -index 10ecc0a..d356dbc 100644 ---- a/drivers/mmc/card/block.c -+++ b/drivers/mmc/card/block.c -@@ -2402,6 +2402,10 @@ static const struct mmc_fixup blk_fixups[] = - * - * N.B. This doesn't affect SD cards. - */ -+ MMC_FIXUP("SDMB-32", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc, -+ MMC_QUIRK_BLK_NO_CMD23), -+ MMC_FIXUP("SDM032", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc, -+ MMC_QUIRK_BLK_NO_CMD23), - MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, - MMC_QUIRK_BLK_NO_CMD23), - MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, -diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig -index 1386065..b8c9b73 100644 ---- a/drivers/mmc/host/Kconfig -+++ b/drivers/mmc/host/Kconfig -@@ -66,7 +66,7 @@ config MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER - has the effect of scrambling the addresses and formats of data - accessed in sizes other than the datum size. - -- This is the case for the Freescale eSDHC and Nintendo Wii SDHCI. -+ This is the case for the Nintendo Wii SDHCI. - - config MMC_SDHCI_PCI - tristate "SDHCI support on PCI bus" -@@ -130,8 +130,10 @@ config MMC_SDHCI_OF_ARASAN - config MMC_SDHCI_OF_ESDHC - tristate "SDHCI OF support for the Freescale eSDHC controller" - depends on MMC_SDHCI_PLTFM -- depends on PPC_OF -- select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER -+ depends on PPC || ARCH_MXC || ARCH_LAYERSCAPE -+ select MMC_SDHCI_IO_ACCESSORS -+ select FSL_SOC_DRIVERS -+ select FSL_GUTS - help - This selects the Freescale eSDHC controller support. - -@@ -142,7 +144,7 @@ config MMC_SDHCI_OF_ESDHC - config MMC_SDHCI_OF_HLWD - tristate "SDHCI OF support for the Nintendo Wii SDHCI controllers" - depends on MMC_SDHCI_PLTFM -- depends on PPC_OF -+ depends on PPC - select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER - help - This selects the Secure Digital Host Controller Interface (SDHCI) -diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h -index a870c42..f2baede 100644 ---- a/drivers/mmc/host/sdhci-esdhc.h -+++ b/drivers/mmc/host/sdhci-esdhc.h -@@ -21,16 +21,23 @@ - #define ESDHC_DEFAULT_QUIRKS (SDHCI_QUIRK_FORCE_BLK_SZ_2048 | \ - SDHCI_QUIRK_NO_BUSY_IRQ | \ - SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \ -- SDHCI_QUIRK_PIO_NEEDS_DELAY) -+ SDHCI_QUIRK_PIO_NEEDS_DELAY | \ -+ SDHCI_QUIRK_NO_HISPD_BIT) -+ -+#define ESDHC_PROCTL 0x28 - - #define ESDHC_SYSTEM_CONTROL 0x2c - #define ESDHC_CLOCK_MASK 0x0000fff0 - #define ESDHC_PREDIV_SHIFT 8 - #define ESDHC_DIVIDER_SHIFT 4 -+#define ESDHC_CLOCK_CRDEN 0x00000008 - #define ESDHC_CLOCK_PEREN 0x00000004 - #define ESDHC_CLOCK_HCKEN 0x00000002 - #define ESDHC_CLOCK_IPGEN 0x00000001 - -+#define ESDHC_PRESENT_STATE 0x24 -+#define ESDHC_CLOCK_STABLE 0x00000008 -+ - /* pltfm-specific */ - #define ESDHC_HOST_CONTROL_LE 0x20 - -diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c -index 8872c85..4a4a693 100644 ---- a/drivers/mmc/host/sdhci-of-esdhc.c -+++ b/drivers/mmc/host/sdhci-of-esdhc.c -@@ -18,128 +18,334 @@ - #include - #include - #include -+#include -+#include - #include - #include "sdhci-pltfm.h" - #include "sdhci-esdhc.h" - - #define VENDOR_V_22 0x12 - #define VENDOR_V_23 0x13 --static u32 esdhc_readl(struct sdhci_host *host, int reg) -+ -+struct sdhci_esdhc { -+ u8 vendor_ver; -+ u8 spec_ver; -+ u32 soc_ver; -+ u8 soc_rev; -+}; -+ -+/** -+ * esdhc_read*_fixup - Fixup the value read from incompatible eSDHC register -+ * to make it compatible with SD spec. -+ * -+ * @host: pointer to sdhci_host -+ * @spec_reg: SD spec register address -+ * @value: 32bit eSDHC register value on spec_reg address -+ * -+ * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC -+ * registers are 32 bits. There are differences in register size, register -+ * address, register function, bit position and function between eSDHC spec -+ * and SD spec. -+ * -+ * Return a fixed up register value -+ */ -+static u32 esdhc_readl_fixup(struct sdhci_host *host, -+ int spec_reg, u32 value) - { -+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); -+ struct sdhci_esdhc *esdhc = pltfm_host->priv; - u32 ret; - -- ret = in_be32(host->ioaddr + reg); - /* - * The bit of ADMA flag in eSDHC is not compatible with standard - * SDHC register, so set fake flag SDHCI_CAN_DO_ADMA2 when ADMA is - * supported by eSDHC. - * And for many FSL eSDHC controller, the reset value of field -- * SDHCI_CAN_DO_ADMA1 is one, but some of them can't support ADMA, -+ * SDHCI_CAN_DO_ADMA1 is 1, but some of them can't support ADMA, - * only these vendor version is greater than 2.2/0x12 support ADMA. -- * For FSL eSDHC, must aligned 4-byte, so use 0xFC to read the -- * the verdor version number, oxFE is SDHCI_HOST_VERSION. - */ -- if ((reg == SDHCI_CAPABILITIES) && (ret & SDHCI_CAN_DO_ADMA1)) { -- u32 tmp = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS); -- tmp = (tmp & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT; -- if (tmp > VENDOR_V_22) -- ret |= SDHCI_CAN_DO_ADMA2; -+ if ((spec_reg == SDHCI_CAPABILITIES) && (value & SDHCI_CAN_DO_ADMA1)) { -+ if (esdhc->vendor_ver > VENDOR_V_22) { -+ ret = value | SDHCI_CAN_DO_ADMA2; -+ return ret; -+ } - } -- -+ ret = value; - return ret; - } - --static u16 esdhc_readw(struct sdhci_host *host, int reg) -+static u16 esdhc_readw_fixup(struct sdhci_host *host, -+ int spec_reg, u32 value) - { - u16 ret; -- int base = reg & ~0x3; -- int shift = (reg & 0x2) * 8; -+ int shift = (spec_reg & 0x2) * 8; - -- if (unlikely(reg == SDHCI_HOST_VERSION)) -- ret = in_be32(host->ioaddr + base) & 0xffff; -+ if (spec_reg == SDHCI_HOST_VERSION) -+ ret = value & 0xffff; - else -- ret = (in_be32(host->ioaddr + base) >> shift) & 0xffff; -+ ret = (value >> shift) & 0xffff; - return ret; - } - --static u8 esdhc_readb(struct sdhci_host *host, int reg) -+static u8 esdhc_readb_fixup(struct sdhci_host *host, -+ int spec_reg, u32 value) - { -- int base = reg & ~0x3; -- int shift = (reg & 0x3) * 8; -- u8 ret = (in_be32(host->ioaddr + base) >> shift) & 0xff; -+ u8 ret; -+ u8 dma_bits; -+ int shift = (spec_reg & 0x3) * 8; -+ -+ ret = (value >> shift) & 0xff; - - /* - * "DMA select" locates at offset 0x28 in SD specification, but on - * P5020 or P3041, it locates at 0x29. - */ -- if (reg == SDHCI_HOST_CONTROL) { -- u32 dma_bits; -- -- dma_bits = in_be32(host->ioaddr + reg); -+ if (spec_reg == SDHCI_HOST_CONTROL) { - /* DMA select is 22,23 bits in Protocol Control Register */ -- dma_bits = (dma_bits >> 5) & SDHCI_CTRL_DMA_MASK; -- -+ dma_bits = (value >> 5) & SDHCI_CTRL_DMA_MASK; - /* fixup the result */ - ret &= ~SDHCI_CTRL_DMA_MASK; - ret |= dma_bits; - } -- - return ret; - } - --static void esdhc_writel(struct sdhci_host *host, u32 val, int reg) -+/** -+ * esdhc_write*_fixup - Fixup the SD spec register value so that it could be -+ * written into eSDHC register. -+ * -+ * @host: pointer to sdhci_host -+ * @spec_reg: SD spec register address -+ * @value: 8/16/32bit SD spec register value that would be written -+ * @old_value: 32bit eSDHC register value on spec_reg address -+ * -+ * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC -+ * registers are 32 bits. There are differences in register size, register -+ * address, register function, bit position and function between eSDHC spec -+ * and SD spec. -+ * -+ * Return a fixed up register value -+ */ -+static u32 esdhc_writel_fixup(struct sdhci_host *host, -+ int spec_reg, u32 value, u32 old_value) - { -+ u32 ret; -+ - /* -- * Enable IRQSTATEN[BGESEN] is just to set IRQSTAT[BGE] -- * when SYSCTL[RSTD]) is set for some special operations. -- * No any impact other operation. -+ * Enabling IRQSTATEN[BGESEN] is just to set IRQSTAT[BGE] -+ * when SYSCTL[RSTD] is set for some special operations. -+ * No any impact on other operation. - */ -- if (reg == SDHCI_INT_ENABLE) -- val |= SDHCI_INT_BLK_GAP; -- sdhci_be32bs_writel(host, val, reg); -+ if (spec_reg == SDHCI_INT_ENABLE) -+ ret = value | SDHCI_INT_BLK_GAP; -+ else -+ ret = value; -+ -+ return ret; - } - --static void esdhc_writew(struct sdhci_host *host, u16 val, int reg) -+static u32 esdhc_writew_fixup(struct sdhci_host *host, -+ int spec_reg, u16 value, u32 old_value) - { -- if (reg == SDHCI_BLOCK_SIZE) { -+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); -+ int shift = (spec_reg & 0x2) * 8; -+ u32 ret; -+ -+ switch (spec_reg) { -+ case SDHCI_TRANSFER_MODE: -+ /* -+ * Postpone this write, we must do it together with a -+ * command write that is down below. Return old value. -+ */ -+ pltfm_host->xfer_mode_shadow = value; -+ return old_value; -+ case SDHCI_COMMAND: -+ ret = (value << 16) | pltfm_host->xfer_mode_shadow; -+ return ret; -+ } -+ -+ ret = old_value & (~(0xffff << shift)); -+ ret |= (value << shift); -+ -+ if (spec_reg == SDHCI_BLOCK_SIZE) { - /* - * Two last DMA bits are reserved, and first one is used for - * non-standard blksz of 4096 bytes that we don't support - * yet. So clear the DMA boundary bits. - */ -- val &= ~SDHCI_MAKE_BLKSZ(0x7, 0); -+ ret &= (~SDHCI_MAKE_BLKSZ(0x7, 0)); - } -- sdhci_be32bs_writew(host, val, reg); -+ return ret; - } - --static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg) -+static u32 esdhc_writeb_fixup(struct sdhci_host *host, -+ int spec_reg, u8 value, u32 old_value) - { -+ u32 ret; -+ u32 dma_bits; -+ u8 tmp; -+ int shift = (spec_reg & 0x3) * 8; -+ -+ /* -+ * eSDHC doesn't have a standard power control register, so we do -+ * nothing here to avoid incorrect operation. -+ */ -+ if (spec_reg == SDHCI_POWER_CONTROL) -+ return old_value; - /* - * "DMA select" location is offset 0x28 in SD specification, but on - * P5020 or P3041, it's located at 0x29. - */ -- if (reg == SDHCI_HOST_CONTROL) { -- u32 dma_bits; -- -+ if (spec_reg == SDHCI_HOST_CONTROL) { - /* - * If host control register is not standard, exit - * this function - */ - if (host->quirks2 & SDHCI_QUIRK2_BROKEN_HOST_CONTROL) -- return; -+ return old_value; - - /* DMA select is 22,23 bits in Protocol Control Register */ -- dma_bits = (val & SDHCI_CTRL_DMA_MASK) << 5; -- clrsetbits_be32(host->ioaddr + reg , SDHCI_CTRL_DMA_MASK << 5, -- dma_bits); -- val &= ~SDHCI_CTRL_DMA_MASK; -- val |= in_be32(host->ioaddr + reg) & SDHCI_CTRL_DMA_MASK; -+ dma_bits = (value & SDHCI_CTRL_DMA_MASK) << 5; -+ ret = (old_value & (~(SDHCI_CTRL_DMA_MASK << 5))) | dma_bits; -+ tmp = (value & (~SDHCI_CTRL_DMA_MASK)) | -+ (old_value & SDHCI_CTRL_DMA_MASK); -+ ret = (ret & (~0xff)) | tmp; -+ -+ /* Prevent SDHCI core from writing reserved bits (e.g. HISPD) */ -+ ret &= ~ESDHC_HOST_CONTROL_RES; -+ return ret; - } - -- /* Prevent SDHCI core from writing reserved bits (e.g. HISPD). */ -- if (reg == SDHCI_HOST_CONTROL) -- val &= ~ESDHC_HOST_CONTROL_RES; -- sdhci_be32bs_writeb(host, val, reg); -+ ret = (old_value & (~(0xff << shift))) | (value << shift); -+ return ret; -+} -+ -+static u32 esdhc_be_readl(struct sdhci_host *host, int reg) -+{ -+ u32 ret; -+ u32 value; -+ -+ value = ioread32be(host->ioaddr + reg); -+ ret = esdhc_readl_fixup(host, reg, value); -+ -+ return ret; -+} -+ -+static u32 esdhc_le_readl(struct sdhci_host *host, int reg) -+{ -+ u32 ret; -+ u32 value; -+ -+ value = ioread32(host->ioaddr + reg); -+ ret = esdhc_readl_fixup(host, reg, value); -+ -+ return ret; -+} -+ -+static u16 esdhc_be_readw(struct sdhci_host *host, int reg) -+{ -+ u16 ret; -+ u32 value; -+ int base = reg & ~0x3; -+ -+ value = ioread32be(host->ioaddr + base); -+ ret = esdhc_readw_fixup(host, reg, value); -+ return ret; -+} -+ -+static u16 esdhc_le_readw(struct sdhci_host *host, int reg) -+{ -+ u16 ret; -+ u32 value; -+ int base = reg & ~0x3; -+ -+ value = ioread32(host->ioaddr + base); -+ ret = esdhc_readw_fixup(host, reg, value); -+ return ret; -+} -+ -+static u8 esdhc_be_readb(struct sdhci_host *host, int reg) -+{ -+ u8 ret; -+ u32 value; -+ int base = reg & ~0x3; -+ -+ value = ioread32be(host->ioaddr + base); -+ ret = esdhc_readb_fixup(host, reg, value); -+ return ret; -+} -+ -+static u8 esdhc_le_readb(struct sdhci_host *host, int reg) -+{ -+ u8 ret; -+ u32 value; -+ int base = reg & ~0x3; -+ -+ value = ioread32(host->ioaddr + base); -+ ret = esdhc_readb_fixup(host, reg, value); -+ return ret; -+} -+ -+static void esdhc_be_writel(struct sdhci_host *host, u32 val, int reg) -+{ -+ u32 value; -+ -+ value = esdhc_writel_fixup(host, reg, val, 0); -+ iowrite32be(value, host->ioaddr + reg); -+} -+ -+static void esdhc_le_writel(struct sdhci_host *host, u32 val, int reg) -+{ -+ u32 value; -+ -+ value = esdhc_writel_fixup(host, reg, val, 0); -+ iowrite32(value, host->ioaddr + reg); -+} -+ -+static void esdhc_be_writew(struct sdhci_host *host, u16 val, int reg) -+{ -+ int base = reg & ~0x3; -+ u32 value; -+ u32 ret; -+ -+ value = ioread32be(host->ioaddr + base); -+ ret = esdhc_writew_fixup(host, reg, val, value); -+ if (reg != SDHCI_TRANSFER_MODE) -+ iowrite32be(ret, host->ioaddr + base); -+} -+ -+static void esdhc_le_writew(struct sdhci_host *host, u16 val, int reg) -+{ -+ int base = reg & ~0x3; -+ u32 value; -+ u32 ret; -+ -+ value = ioread32(host->ioaddr + base); -+ ret = esdhc_writew_fixup(host, reg, val, value); -+ if (reg != SDHCI_TRANSFER_MODE) -+ iowrite32(ret, host->ioaddr + base); -+} -+ -+static void esdhc_be_writeb(struct sdhci_host *host, u8 val, int reg) -+{ -+ int base = reg & ~0x3; -+ u32 value; -+ u32 ret; -+ -+ value = ioread32be(host->ioaddr + base); -+ ret = esdhc_writeb_fixup(host, reg, val, value); -+ iowrite32be(ret, host->ioaddr + base); -+} -+ -+static void esdhc_le_writeb(struct sdhci_host *host, u8 val, int reg) -+{ -+ int base = reg & ~0x3; -+ u32 value; -+ u32 ret; -+ -+ value = ioread32(host->ioaddr + base); -+ ret = esdhc_writeb_fixup(host, reg, val, value); -+ iowrite32(ret, host->ioaddr + base); - } - - /* -@@ -149,37 +355,116 @@ static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg) - * For Continue, apply soft reset for data(SYSCTL[RSTD]); - * and re-issue the entire read transaction from beginning. - */ --static void esdhci_of_adma_workaround(struct sdhci_host *host, u32 intmask) -+static void esdhc_of_adma_workaround(struct sdhci_host *host, u32 intmask) - { -- u32 tmp; -+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); -+ struct sdhci_esdhc *esdhc = pltfm_host->priv; - bool applicable; - dma_addr_t dmastart; - dma_addr_t dmanow; - -- tmp = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS); -- tmp = (tmp & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT; -- - applicable = (intmask & SDHCI_INT_DATA_END) && -- (intmask & SDHCI_INT_BLK_GAP) && -- (tmp == VENDOR_V_23); -- if (!applicable) -+ (intmask & SDHCI_INT_BLK_GAP) && -+ (esdhc->vendor_ver == VENDOR_V_23); -+ if (applicable) { -+ -+ sdhci_reset(host, SDHCI_RESET_DATA); -+ host->data->error = 0; -+ dmastart = sg_dma_address(host->data->sg); -+ dmanow = dmastart + host->data->bytes_xfered; -+ /* -+ * Force update to the next DMA block boundary. -+ */ -+ dmanow = (dmanow & ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) + -+ SDHCI_DEFAULT_BOUNDARY_SIZE; -+ host->data->bytes_xfered = dmanow - dmastart; -+ sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS); -+ - return; -+ } - -- host->data->error = 0; -- dmastart = sg_dma_address(host->data->sg); -- dmanow = dmastart + host->data->bytes_xfered; - /* -- * Force update to the next DMA block boundary. -+ * Check for A-004388: eSDHC DMA might not stop if error -+ * occurs on system transaction -+ * Impact list: -+ * T4240-4160-R1.0 B4860-4420-R1.0-R2.0 P1010-1014-R1.0 -+ * P3041-R1.0-R2.0-R1.1 P2041-2040-R1.0-R1.1-R2.0 -+ * P5020-5010-R2.0-R1.0 P5040-5021-R2.0-R2.1 - */ -- dmanow = (dmanow & ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) + -- SDHCI_DEFAULT_BOUNDARY_SIZE; -- host->data->bytes_xfered = dmanow - dmastart; -- sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS); -+ if (!(((esdhc->soc_ver == SVR_T4240) && (esdhc->soc_rev == 0x10)) || -+ ((esdhc->soc_ver == SVR_T4160) && (esdhc->soc_rev == 0x10)) || -+ ((esdhc->soc_ver == SVR_B4860) && (esdhc->soc_rev == 0x10)) || -+ ((esdhc->soc_ver == SVR_B4860) && (esdhc->soc_rev == 0x20)) || -+ ((esdhc->soc_ver == SVR_B4420) && (esdhc->soc_rev == 0x10)) || -+ ((esdhc->soc_ver == SVR_B4420) && (esdhc->soc_rev == 0x20)) || -+ ((esdhc->soc_ver == SVR_P1010) && (esdhc->soc_rev == 0x10)) || -+ ((esdhc->soc_ver == SVR_P1014) && (esdhc->soc_rev == 0x10)) || -+ ((esdhc->soc_ver == SVR_P3041) && (esdhc->soc_rev <= 0x20)) || -+ ((esdhc->soc_ver == SVR_P2041) && (esdhc->soc_rev <= 0x20)) || -+ ((esdhc->soc_ver == SVR_P2040) && (esdhc->soc_rev <= 0x20)) || -+ ((esdhc->soc_ver == SVR_P5020) && (esdhc->soc_rev <= 0x20)) || -+ ((esdhc->soc_ver == SVR_P5010) && (esdhc->soc_rev <= 0x20)) || -+ ((esdhc->soc_ver == SVR_P5040) && (esdhc->soc_rev <= 0x21)) || -+ ((esdhc->soc_ver == SVR_P5021) && (esdhc->soc_rev <= 0x21)))) -+ return; -+ -+ sdhci_reset(host, SDHCI_RESET_DATA); -+ -+ if (host->flags & SDHCI_USE_ADMA) { -+ u32 mod, i, offset; -+ u8 *desc; -+ dma_addr_t addr; -+ struct scatterlist *sg; -+ __le32 *dataddr; -+ __le32 *cmdlen; -+ -+ /* -+ * If block count was enabled, in case read transfer there -+ * is no data was corrupted -+ */ -+ mod = sdhci_readl(host, SDHCI_TRANSFER_MODE); -+ if ((mod & SDHCI_TRNS_BLK_CNT_EN) && -+ (host->data->flags & MMC_DATA_READ)) -+ host->data->error = 0; -+ -+ BUG_ON(!host->data); -+ desc = host->adma_table; -+ for_each_sg(host->data->sg, sg, host->sg_count, i) { -+ addr = sg_dma_address(sg); -+ offset = (4 - (addr & 0x3)) & 0x3; -+ if (offset) -+ desc += 8; -+ desc += 8; -+ } -+ -+ /* -+ * Add an extra zero descriptor next to the -+ * terminating descriptor. -+ */ -+ desc += 8; -+ WARN_ON((desc - (u8 *)(host->adma_table)) > (128 * 2 + 1) * 4); -+ -+ dataddr = (__le32 __force *)(desc + 4); -+ cmdlen = (__le32 __force *)desc; -+ -+ cmdlen[0] = cpu_to_le32(0); -+ dataddr[0] = cpu_to_le32(0); -+ } -+ -+ if ((host->flags & SDHCI_USE_SDMA) && -+ (host->data->flags & MMC_DATA_READ)) -+ host->data->error = 0; -+ -+ return; - } - - static int esdhc_of_enable_dma(struct sdhci_host *host) - { -- setbits32(host->ioaddr + ESDHC_DMA_SYSCTL, ESDHC_DMA_SNOOP); -+ u32 value; -+ -+ value = sdhci_readl(host, ESDHC_DMA_SYSCTL); -+ value |= ESDHC_DMA_SNOOP; -+ sdhci_writel(host, value, ESDHC_DMA_SYSCTL); - return 0; - } - -@@ -199,15 +484,22 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host) - - static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) - { -- int pre_div = 2; -+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); -+ struct sdhci_esdhc *esdhc = pltfm_host->priv; -+ int pre_div = 1; - int div = 1; - u32 temp; -+ u32 timeout; - - host->mmc->actual_clock = 0; - - if (clock == 0) - return; - -+ /* Workaround to start pre_div at 2 for VNN < VENDOR_V_23 */ -+ if (esdhc->vendor_ver < VENDOR_V_23) -+ pre_div = 2; -+ - /* Workaround to reduce the clock frequency for p1010 esdhc */ - if (of_find_compatible_node(NULL, NULL, "fsl,p1010-esdhc")) { - if (clock > 20000000) -@@ -218,7 +510,7 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) - - temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); - temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN -- | ESDHC_CLOCK_MASK); -+ | ESDHC_CLOCK_CRDEN | ESDHC_CLOCK_MASK); - sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); - - while (host->max_clk / pre_div / 16 > clock && pre_div < 256) -@@ -229,7 +521,7 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) - - dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n", - clock, host->max_clk / pre_div / div); -- -+ host->mmc->actual_clock = host->max_clk / pre_div / div; - pre_div >>= 1; - div--; - -@@ -238,70 +530,117 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) - | (div << ESDHC_DIVIDER_SHIFT) - | (pre_div << ESDHC_PREDIV_SHIFT)); - sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); -- mdelay(1); --} - --static void esdhc_of_platform_init(struct sdhci_host *host) --{ -- u32 vvn; -- -- vvn = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS); -- vvn = (vvn & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT; -- if (vvn == VENDOR_V_22) -- host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23; -+ /* Wait max 20 ms */ -+ timeout = 20; -+ while (!(sdhci_readl(host, ESDHC_PRESENT_STATE) & ESDHC_CLOCK_STABLE)) { -+ if (timeout == 0) { -+ pr_err("%s: Internal clock never stabilised.\n", -+ mmc_hostname(host->mmc)); -+ return; -+ } -+ timeout--; -+ mdelay(1); -+ } - -- if (vvn > VENDOR_V_22) -- host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ; -+ temp |= ESDHC_CLOCK_CRDEN; -+ sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); - } - - static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width) - { - u32 ctrl; - -+ ctrl = sdhci_readl(host, ESDHC_PROCTL); -+ ctrl &= (~ESDHC_CTRL_BUSWIDTH_MASK); - switch (width) { - case MMC_BUS_WIDTH_8: -- ctrl = ESDHC_CTRL_8BITBUS; -+ ctrl |= ESDHC_CTRL_8BITBUS; - break; - - case MMC_BUS_WIDTH_4: -- ctrl = ESDHC_CTRL_4BITBUS; -+ ctrl |= ESDHC_CTRL_4BITBUS; - break; - - default: -- ctrl = 0; - break; - } - -- clrsetbits_be32(host->ioaddr + SDHCI_HOST_CONTROL, -- ESDHC_CTRL_BUSWIDTH_MASK, ctrl); -+ sdhci_writel(host, ctrl, ESDHC_PROCTL); - } - --static const struct sdhci_ops sdhci_esdhc_ops = { -- .read_l = esdhc_readl, -- .read_w = esdhc_readw, -- .read_b = esdhc_readb, -- .write_l = esdhc_writel, -- .write_w = esdhc_writew, -- .write_b = esdhc_writeb, -- .set_clock = esdhc_of_set_clock, -- .enable_dma = esdhc_of_enable_dma, -- .get_max_clock = esdhc_of_get_max_clock, -- .get_min_clock = esdhc_of_get_min_clock, -- .platform_init = esdhc_of_platform_init, -- .adma_workaround = esdhci_of_adma_workaround, -- .set_bus_width = esdhc_pltfm_set_bus_width, -- .reset = sdhci_reset, -- .set_uhs_signaling = sdhci_set_uhs_signaling, --}; -+/* -+ * A-003980: SDHC: Glitch is generated on the card clock with software reset -+ * or clock divider change -+ * Workaround: -+ * A simple workaround is to disable the SD card clock before the software -+ * reset, and enable it when the module resumes normal operation. The Host -+ * and the SD card are in a master-slave relationship. The Host provides -+ * clock and control transfer across the interface. Therefore, any existing -+ * operation is discarded when the Host controller is reset. -+ */ -+static int esdhc_of_reset_workaround(struct sdhci_host *host, u8 mask) -+{ -+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); -+ struct sdhci_esdhc *esdhc = pltfm_host->priv; -+ bool disable_clk_before_reset = false; -+ u32 temp; - --#ifdef CONFIG_PM -+ /* -+ * Check for A-003980 -+ * Impact list: -+ * T4240-4160-R1.0-R2.0 B4860-4420-R1.0-R2.0 P5040-5021-R1.0-R2.0-R2.1 -+ * P5020-5010-R1.0-R2.0 P3041-R1.0-R1.1-R2.0 P2041-2040-R1.0-R1.1-R2.0 -+ * P1010-1014-R1.0 -+ */ -+ if (((esdhc->soc_ver == SVR_T4240) && (esdhc->soc_rev == 0x10)) || -+ ((esdhc->soc_ver == SVR_T4240) && (esdhc->soc_rev == 0x20)) || -+ ((esdhc->soc_ver == SVR_T4160) && (esdhc->soc_rev == 0x10)) || -+ ((esdhc->soc_ver == SVR_T4160) && (esdhc->soc_rev == 0x20)) || -+ ((esdhc->soc_ver == SVR_B4860) && (esdhc->soc_rev == 0x10)) || -+ ((esdhc->soc_ver == SVR_B4860) && (esdhc->soc_rev == 0x20)) || -+ ((esdhc->soc_ver == SVR_B4420) && (esdhc->soc_rev == 0x10)) || -+ ((esdhc->soc_ver == SVR_B4420) && (esdhc->soc_rev == 0x20)) || -+ ((esdhc->soc_ver == SVR_P5040) && (esdhc->soc_rev <= 0x21)) || -+ ((esdhc->soc_ver == SVR_P5021) && (esdhc->soc_rev <= 0x21)) || -+ ((esdhc->soc_ver == SVR_P5020) && (esdhc->soc_rev <= 0x20)) || -+ ((esdhc->soc_ver == SVR_P5010) && (esdhc->soc_rev <= 0x20)) || -+ ((esdhc->soc_ver == SVR_P3041) && (esdhc->soc_rev <= 0x20)) || -+ ((esdhc->soc_ver == SVR_P2041) && (esdhc->soc_rev <= 0x20)) || -+ ((esdhc->soc_ver == SVR_P2040) && (esdhc->soc_rev <= 0x20)) || -+ ((esdhc->soc_ver == SVR_P1014) && (esdhc->soc_rev == 0x10)) || -+ ((esdhc->soc_ver == SVR_P1010) && (esdhc->soc_rev == 0x10))) -+ disable_clk_before_reset = true; -+ -+ if (disable_clk_before_reset && (mask & SDHCI_RESET_ALL)) { -+ temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); -+ temp &= ~ESDHC_CLOCK_CRDEN; -+ sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); -+ sdhci_reset(host, mask); -+ temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); -+ temp |= ESDHC_CLOCK_CRDEN; -+ sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); -+ return 1; -+ } -+ return 0; -+} -+ -+static void esdhc_reset(struct sdhci_host *host, u8 mask) -+{ -+ if (!esdhc_of_reset_workaround(host, mask)) -+ sdhci_reset(host, mask); - -+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); -+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); -+} -+ -+#ifdef CONFIG_PM - static u32 esdhc_proctl; - static int esdhc_of_suspend(struct device *dev) - { - struct sdhci_host *host = dev_get_drvdata(dev); - -- esdhc_proctl = sdhci_be32bs_readl(host, SDHCI_HOST_CONTROL); -+ esdhc_proctl = sdhci_readl(host, SDHCI_HOST_CONTROL); - - return sdhci_suspend_host(host); - } -@@ -311,11 +650,8 @@ static int esdhc_of_resume(struct device *dev) - struct sdhci_host *host = dev_get_drvdata(dev); - int ret = sdhci_resume_host(host); - -- if (ret == 0) { -- /* Isn't this already done by sdhci_resume_host() ? --rmk */ -- esdhc_of_enable_dma(host); -- sdhci_be32bs_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL); -- } -+ if (ret == 0) -+ sdhci_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL); - - return ret; - } -@@ -329,30 +665,120 @@ static const struct dev_pm_ops esdhc_pmops = { - #define ESDHC_PMOPS NULL - #endif - --static const struct sdhci_pltfm_data sdhci_esdhc_pdata = { -- /* -- * card detection could be handled via GPIO -- * eSDHC cannot support End Attribute in NOP ADMA descriptor -- */ -+static const struct sdhci_ops sdhci_esdhc_be_ops = { -+ .read_l = esdhc_be_readl, -+ .read_w = esdhc_be_readw, -+ .read_b = esdhc_be_readb, -+ .write_l = esdhc_be_writel, -+ .write_w = esdhc_be_writew, -+ .write_b = esdhc_be_writeb, -+ .set_clock = esdhc_of_set_clock, -+ .enable_dma = esdhc_of_enable_dma, -+ .get_max_clock = esdhc_of_get_max_clock, -+ .get_min_clock = esdhc_of_get_min_clock, -+ .adma_workaround = esdhc_of_adma_workaround, -+ .set_bus_width = esdhc_pltfm_set_bus_width, -+ .reset = esdhc_reset, -+ .set_uhs_signaling = sdhci_set_uhs_signaling, -+}; -+ -+static const struct sdhci_ops sdhci_esdhc_le_ops = { -+ .read_l = esdhc_le_readl, -+ .read_w = esdhc_le_readw, -+ .read_b = esdhc_le_readb, -+ .write_l = esdhc_le_writel, -+ .write_w = esdhc_le_writew, -+ .write_b = esdhc_le_writeb, -+ .set_clock = esdhc_of_set_clock, -+ .enable_dma = esdhc_of_enable_dma, -+ .get_max_clock = esdhc_of_get_max_clock, -+ .get_min_clock = esdhc_of_get_min_clock, -+ .adma_workaround = esdhc_of_adma_workaround, -+ .set_bus_width = esdhc_pltfm_set_bus_width, -+ .reset = esdhc_reset, -+ .set_uhs_signaling = sdhci_set_uhs_signaling, -+}; -+ -+static const struct sdhci_pltfm_data sdhci_esdhc_be_pdata = { - .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION - | SDHCI_QUIRK_NO_CARD_NO_RESET - | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, -- .ops = &sdhci_esdhc_ops, -+ .ops = &sdhci_esdhc_be_ops, - }; - -+static const struct sdhci_pltfm_data sdhci_esdhc_le_pdata = { -+ .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION -+ | SDHCI_QUIRK_NO_CARD_NO_RESET -+ | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, -+ .ops = &sdhci_esdhc_le_ops, -+}; -+ -+static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host) -+{ -+ struct sdhci_pltfm_host *pltfm_host; -+ struct sdhci_esdhc *esdhc; -+ u16 host_ver; -+ u32 svr; -+ -+ pltfm_host = sdhci_priv(host); -+ esdhc = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_esdhc), -+ GFP_KERNEL); -+ pltfm_host->priv = esdhc; -+ -+ svr = guts_get_svr(); -+ esdhc->soc_ver = SVR_SOC_VER(svr); -+ esdhc->soc_rev = SVR_REV(svr); -+ -+ host_ver = sdhci_readw(host, SDHCI_HOST_VERSION); -+ esdhc->vendor_ver = (host_ver & SDHCI_VENDOR_VER_MASK) >> -+ SDHCI_VENDOR_VER_SHIFT; -+ esdhc->spec_ver = host_ver & SDHCI_SPEC_VER_MASK; -+} -+ - static int sdhci_esdhc_probe(struct platform_device *pdev) - { - struct sdhci_host *host; - struct device_node *np; -+ struct sdhci_pltfm_host *pltfm_host; -+ struct sdhci_esdhc *esdhc; - int ret; - -- host = sdhci_pltfm_init(pdev, &sdhci_esdhc_pdata, 0); -+ np = pdev->dev.of_node; -+ -+ if (of_get_property(np, "little-endian", NULL)) -+ host = sdhci_pltfm_init(pdev, &sdhci_esdhc_le_pdata, 0); -+ else -+ host = sdhci_pltfm_init(pdev, &sdhci_esdhc_be_pdata, 0); -+ - if (IS_ERR(host)) - return PTR_ERR(host); - -+ esdhc_init(pdev, host); -+ - sdhci_get_of_property(pdev); - -- np = pdev->dev.of_node; -+ pltfm_host = sdhci_priv(host); -+ esdhc = pltfm_host->priv; -+ if (esdhc->vendor_ver == VENDOR_V_22) -+ host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23; -+ -+ if (esdhc->vendor_ver > VENDOR_V_22) -+ host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ; -+ -+ if (of_device_is_compatible(np, "fsl,p5040-esdhc") || -+ of_device_is_compatible(np, "fsl,p5020-esdhc") || -+ of_device_is_compatible(np, "fsl,p4080-esdhc") || -+ of_device_is_compatible(np, "fsl,p1020-esdhc") || -+ of_device_is_compatible(np, "fsl,t1040-esdhc") || -+ of_device_is_compatible(np, "fsl,ls1021a-esdhc") || -+ of_device_is_compatible(np, "fsl,ls2080a-esdhc") || -+ of_device_is_compatible(np, "fsl,ls2085a-esdhc") || -+ of_device_is_compatible(np, "fsl,ls1043a-esdhc")) -+ host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; -+ -+ if (of_device_is_compatible(np, "fsl,ls1021a-esdhc")) -+ host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; -+ - if (of_device_is_compatible(np, "fsl,p2020-esdhc")) { - /* - * Freescale messed up with P2020 as it has a non-standard -@@ -362,13 +788,19 @@ static int sdhci_esdhc_probe(struct platform_device *pdev) - } - - /* call to generic mmc_of_parse to support additional capabilities */ -- mmc_of_parse(host->mmc); -+ ret = mmc_of_parse(host->mmc); -+ if (ret) -+ goto err; -+ - mmc_of_parse_voltage(np, &host->ocr_mask); - - ret = sdhci_add_host(host); - if (ret) -- sdhci_pltfm_free(pdev); -+ goto err; - -+ return 0; -+ err: -+ sdhci_pltfm_free(pdev); - return ret; - } - -diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c -index 023c201..8af38a6 100644 ---- a/drivers/mmc/host/sdhci.c -+++ b/drivers/mmc/host/sdhci.c -@@ -44,8 +44,6 @@ - - #define MAX_TUNING_LOOP 40 - --#define ADMA_SIZE ((128 * 2 + 1) * 4) -- - static unsigned int debug_quirks = 0; - static unsigned int debug_quirks2; - -@@ -119,10 +117,17 @@ static void sdhci_dumpregs(struct sdhci_host *host) - pr_debug(DRIVER_NAME ": Host ctl2: 0x%08x\n", - sdhci_readw(host, SDHCI_HOST_CONTROL2)); - -- if (host->flags & SDHCI_USE_ADMA) -- pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n", -- readl(host->ioaddr + SDHCI_ADMA_ERROR), -- readl(host->ioaddr + SDHCI_ADMA_ADDRESS)); -+ if (host->flags & SDHCI_USE_ADMA) { -+ if (host->flags & SDHCI_USE_64_BIT_DMA) -+ pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n", -+ readl(host->ioaddr + SDHCI_ADMA_ERROR), -+ readl(host->ioaddr + SDHCI_ADMA_ADDRESS_HI), -+ readl(host->ioaddr + SDHCI_ADMA_ADDRESS)); -+ else -+ pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n", -+ readl(host->ioaddr + SDHCI_ADMA_ERROR), -+ readl(host->ioaddr + SDHCI_ADMA_ADDRESS)); -+ } - - pr_debug(DRIVER_NAME ": ===========================================\n"); - } -@@ -231,6 +236,9 @@ static void sdhci_init(struct sdhci_host *host, int soft) - SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END | - SDHCI_INT_RESPONSE; - -+ if (host->flags & SDHCI_AUTO_CMD12) -+ host->ier |= SDHCI_INT_ACMD12ERR; -+ - sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); - sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); - -@@ -448,18 +456,26 @@ static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags) - local_irq_restore(*flags); - } - --static void sdhci_set_adma_desc(u8 *desc, u32 addr, int len, unsigned cmd) -+static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc, -+ dma_addr_t addr, int len, unsigned cmd) - { -- __le32 *dataddr = (__le32 __force *)(desc + 4); -- __le16 *cmdlen = (__le16 __force *)desc; -+ struct sdhci_adma2_64_desc *dma_desc = desc; -+ -+ /* 32-bit and 64-bit descriptors have these members in same position */ -+ dma_desc->cmd = cpu_to_le16(cmd); -+ dma_desc->len = cpu_to_le16(len); -+ dma_desc->addr_lo = cpu_to_le32((u32)addr); - -- /* SDHCI specification says ADMA descriptors should be 4 byte -- * aligned, so using 16 or 32bit operations should be safe. */ -+ if (host->flags & SDHCI_USE_64_BIT_DMA) -+ dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32); -+} - -- cmdlen[0] = cpu_to_le16(cmd); -- cmdlen[1] = cpu_to_le16(len); -+static void sdhci_adma_mark_end(void *desc) -+{ -+ struct sdhci_adma2_64_desc *dma_desc = desc; - -- dataddr[0] = cpu_to_le32(addr); -+ /* 32-bit and 64-bit descriptors have 'cmd' in same position */ -+ dma_desc->cmd |= cpu_to_le16(ADMA2_END); - } - - static int sdhci_adma_table_pre(struct sdhci_host *host, -@@ -467,8 +483,8 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, - { - int direction; - -- u8 *desc; -- u8 *align; -+ void *desc; -+ void *align; - dma_addr_t addr; - dma_addr_t align_addr; - int len, offset; -@@ -489,17 +505,17 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, - direction = DMA_TO_DEVICE; - - host->align_addr = dma_map_single(mmc_dev(host->mmc), -- host->align_buffer, 128 * 4, direction); -+ host->align_buffer, host->align_buffer_sz, direction); - if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr)) - goto fail; -- BUG_ON(host->align_addr & 0x3); -+ BUG_ON(host->align_addr & host->align_mask); - - host->sg_count = dma_map_sg(mmc_dev(host->mmc), - data->sg, data->sg_len, direction); - if (host->sg_count == 0) - goto unmap_align; - -- desc = host->adma_desc; -+ desc = host->adma_table; - align = host->align_buffer; - - align_addr = host->align_addr; -@@ -515,24 +531,27 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, - * the (up to three) bytes that screw up the - * alignment. - */ -- offset = (4 - (addr & 0x3)) & 0x3; -+ offset = (host->align_sz - (addr & host->align_mask)) & -+ host->align_mask; - if (offset) { - if (data->flags & MMC_DATA_WRITE) { - buffer = sdhci_kmap_atomic(sg, &flags); -- WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3)); -+ WARN_ON(((long)buffer & (PAGE_SIZE - 1)) > -+ (PAGE_SIZE - offset)); - memcpy(align, buffer, offset); - sdhci_kunmap_atomic(buffer, &flags); - } - - /* tran, valid */ -- sdhci_set_adma_desc(desc, align_addr, offset, 0x21); -+ sdhci_adma_write_desc(host, desc, align_addr, offset, -+ ADMA2_TRAN_VALID); - - BUG_ON(offset > 65536); - -- align += 4; -- align_addr += 4; -+ align += host->align_sz; -+ align_addr += host->align_sz; - -- desc += 8; -+ desc += host->desc_sz; - - addr += offset; - len -= offset; -@@ -541,23 +560,23 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, - BUG_ON(len > 65536); - - /* tran, valid */ -- sdhci_set_adma_desc(desc, addr, len, 0x21); -- desc += 8; -+ sdhci_adma_write_desc(host, desc, addr, len, ADMA2_TRAN_VALID); -+ desc += host->desc_sz; - - /* - * If this triggers then we have a calculation bug - * somewhere. :/ - */ -- WARN_ON((desc - host->adma_desc) > ADMA_SIZE); -+ WARN_ON((desc - host->adma_table) >= host->adma_table_sz); - } - - if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) { - /* - * Mark the last descriptor as the terminating descriptor - */ -- if (desc != host->adma_desc) { -- desc -= 8; -- desc[0] |= 0x2; /* end */ -+ if (desc != host->adma_table) { -+ desc -= host->desc_sz; -+ sdhci_adma_mark_end(desc); - } - } else { - /* -@@ -565,7 +584,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, - */ - - /* nop, end, valid */ -- sdhci_set_adma_desc(desc, 0, 0, 0x3); -+ sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID); - } - - /* -@@ -573,14 +592,14 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, - */ - if (data->flags & MMC_DATA_WRITE) { - dma_sync_single_for_device(mmc_dev(host->mmc), -- host->align_addr, 128 * 4, direction); -+ host->align_addr, host->align_buffer_sz, direction); - } - - return 0; - - unmap_align: - dma_unmap_single(mmc_dev(host->mmc), host->align_addr, -- 128 * 4, direction); -+ host->align_buffer_sz, direction); - fail: - return -EINVAL; - } -@@ -592,7 +611,7 @@ static void sdhci_adma_table_post(struct sdhci_host *host, - - struct scatterlist *sg; - int i, size; -- u8 *align; -+ void *align; - char *buffer; - unsigned long flags; - bool has_unaligned; -@@ -603,12 +622,12 @@ static void sdhci_adma_table_post(struct sdhci_host *host, - direction = DMA_TO_DEVICE; - - dma_unmap_single(mmc_dev(host->mmc), host->align_addr, -- 128 * 4, direction); -+ host->align_buffer_sz, direction); - - /* Do a quick scan of the SG list for any unaligned mappings */ - has_unaligned = false; - for_each_sg(data->sg, sg, host->sg_count, i) -- if (sg_dma_address(sg) & 3) { -+ if (sg_dma_address(sg) & host->align_mask) { - has_unaligned = true; - break; - } -@@ -620,15 +639,17 @@ static void sdhci_adma_table_post(struct sdhci_host *host, - align = host->align_buffer; - - for_each_sg(data->sg, sg, host->sg_count, i) { -- if (sg_dma_address(sg) & 0x3) { -- size = 4 - (sg_dma_address(sg) & 0x3); -+ if (sg_dma_address(sg) & host->align_mask) { -+ size = host->align_sz - -+ (sg_dma_address(sg) & host->align_mask); - - buffer = sdhci_kmap_atomic(sg, &flags); -- WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3)); -+ WARN_ON(((long)buffer & (PAGE_SIZE - 1)) > -+ (PAGE_SIZE - size)); - memcpy(buffer, align, size); - sdhci_kunmap_atomic(buffer, &flags); - -- align += 4; -+ align += host->align_sz; - } - } - } -@@ -822,6 +843,10 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) - } else { - sdhci_writel(host, host->adma_addr, - SDHCI_ADMA_ADDRESS); -+ if (host->flags & SDHCI_USE_64_BIT_DMA) -+ sdhci_writel(host, -+ (u64)host->adma_addr >> 32, -+ SDHCI_ADMA_ADDRESS_HI); - } - } else { - int sg_cnt; -@@ -855,10 +880,14 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) - ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); - ctrl &= ~SDHCI_CTRL_DMA_MASK; - if ((host->flags & SDHCI_REQ_USE_DMA) && -- (host->flags & SDHCI_USE_ADMA)) -- ctrl |= SDHCI_CTRL_ADMA32; -- else -+ (host->flags & SDHCI_USE_ADMA)) { -+ if (host->flags & SDHCI_USE_64_BIT_DMA) -+ ctrl |= SDHCI_CTRL_ADMA64; -+ else -+ ctrl |= SDHCI_CTRL_ADMA32; -+ } else { - ctrl |= SDHCI_CTRL_SDMA; -+ } - sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); - } - -@@ -1797,6 +1826,10 @@ static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host, - ctrl |= SDHCI_CTRL_VDD_180; - sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); - -+ /* Some controller need to do more when switching */ -+ if (host->ops->voltage_switch) -+ host->ops->voltage_switch(host); -+ - /* 1.8V regulator output should be stable within 5 ms */ - ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); - if (ctrl & SDHCI_CTRL_VDD_180) -@@ -2250,7 +2283,7 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask) - if (intmask & SDHCI_INT_TIMEOUT) - host->cmd->error = -ETIMEDOUT; - else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT | -- SDHCI_INT_INDEX)) -+ SDHCI_INT_INDEX | SDHCI_INT_ACMD12ERR)) - host->cmd->error = -EILSEQ; - - if (host->cmd->error) { -@@ -2292,32 +2325,36 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask) - } - - #ifdef CONFIG_MMC_DEBUG --static void sdhci_show_adma_error(struct sdhci_host *host) -+static void sdhci_adma_show_error(struct sdhci_host *host) - { - const char *name = mmc_hostname(host->mmc); -- u8 *desc = host->adma_desc; -- __le32 *dma; -- __le16 *len; -- u8 attr; -+ void *desc = host->adma_table; - - sdhci_dumpregs(host); - - while (true) { -- dma = (__le32 *)(desc + 4); -- len = (__le16 *)(desc + 2); -- attr = *desc; -- -- DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n", -- name, desc, le32_to_cpu(*dma), le16_to_cpu(*len), attr); -+ struct sdhci_adma2_64_desc *dma_desc = desc; -+ -+ if (host->flags & SDHCI_USE_64_BIT_DMA) -+ DBG("%s: %p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n", -+ name, desc, le32_to_cpu(dma_desc->addr_hi), -+ le32_to_cpu(dma_desc->addr_lo), -+ le16_to_cpu(dma_desc->len), -+ le16_to_cpu(dma_desc->cmd)); -+ else -+ DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n", -+ name, desc, le32_to_cpu(dma_desc->addr_lo), -+ le16_to_cpu(dma_desc->len), -+ le16_to_cpu(dma_desc->cmd)); - -- desc += 8; -+ desc += host->desc_sz; - -- if (attr & 2) -+ if (dma_desc->cmd & cpu_to_le16(ADMA2_END)) - break; - } - } - #else --static void sdhci_show_adma_error(struct sdhci_host *host) { } -+static void sdhci_adma_show_error(struct sdhci_host *host) { } - #endif - - static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) -@@ -2380,7 +2417,7 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) - host->data->error = -EILSEQ; - else if (intmask & SDHCI_INT_ADMA_ERROR) { - pr_err("%s: ADMA error\n", mmc_hostname(host->mmc)); -- sdhci_show_adma_error(host); -+ sdhci_adma_show_error(host); - host->data->error = -EIO; - if (host->ops->adma_workaround) - host->ops->adma_workaround(host, intmask); -@@ -2859,6 +2896,16 @@ int sdhci_add_host(struct sdhci_host *host) - host->flags &= ~SDHCI_USE_ADMA; - } - -+ /* -+ * It is assumed that a 64-bit capable device has set a 64-bit DMA mask -+ * and *must* do 64-bit DMA. A driver has the opportunity to change -+ * that during the first call to ->enable_dma(). Similarly -+ * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to -+ * implement. -+ */ -+ if (sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT) -+ host->flags |= SDHCI_USE_64_BIT_DMA; -+ - if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { - if (host->ops->enable_dma) { - if (host->ops->enable_dma(host)) { -@@ -2870,33 +2917,59 @@ int sdhci_add_host(struct sdhci_host *host) - } - } - -+ /* SDMA does not support 64-bit DMA */ -+ if (host->flags & SDHCI_USE_64_BIT_DMA) -+ host->flags &= ~SDHCI_USE_SDMA; -+ - if (host->flags & SDHCI_USE_ADMA) { - /* -- * We need to allocate descriptors for all sg entries -- * (128) and potentially one alignment transfer for -- * each of those entries. -+ * The DMA descriptor table size is calculated as the maximum -+ * number of segments times 2, to allow for an alignment -+ * descriptor for each segment, plus 1 for a nop end descriptor, -+ * all multipled by the descriptor size. - */ -- host->adma_desc = dma_alloc_coherent(mmc_dev(mmc), -- ADMA_SIZE, &host->adma_addr, -- GFP_KERNEL); -- host->align_buffer = kmalloc(128 * 4, GFP_KERNEL); -- if (!host->adma_desc || !host->align_buffer) { -- dma_free_coherent(mmc_dev(mmc), ADMA_SIZE, -- host->adma_desc, host->adma_addr); -+ if (host->flags & SDHCI_USE_64_BIT_DMA) { -+ host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) * -+ SDHCI_ADMA2_64_DESC_SZ; -+ host->align_buffer_sz = SDHCI_MAX_SEGS * -+ SDHCI_ADMA2_64_ALIGN; -+ host->desc_sz = SDHCI_ADMA2_64_DESC_SZ; -+ host->align_sz = SDHCI_ADMA2_64_ALIGN; -+ host->align_mask = SDHCI_ADMA2_64_ALIGN - 1; -+ } else { -+ host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) * -+ SDHCI_ADMA2_32_DESC_SZ; -+ host->align_buffer_sz = SDHCI_MAX_SEGS * -+ SDHCI_ADMA2_32_ALIGN; -+ host->desc_sz = SDHCI_ADMA2_32_DESC_SZ; -+ host->align_sz = SDHCI_ADMA2_32_ALIGN; -+ host->align_mask = SDHCI_ADMA2_32_ALIGN - 1; -+ } -+ host->adma_table = dma_alloc_coherent(mmc_dev(mmc), -+ host->adma_table_sz, -+ &host->adma_addr, -+ GFP_KERNEL); -+ host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL); -+ if (!host->adma_table || !host->align_buffer) { -+ if (host->adma_table) -+ dma_free_coherent(mmc_dev(mmc), -+ host->adma_table_sz, -+ host->adma_table, -+ host->adma_addr); - kfree(host->align_buffer); - pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", - mmc_hostname(mmc)); - host->flags &= ~SDHCI_USE_ADMA; -- host->adma_desc = NULL; -+ host->adma_table = NULL; - host->align_buffer = NULL; -- } else if (host->adma_addr & 3) { -+ } else if (host->adma_addr & host->align_mask) { - pr_warn("%s: unable to allocate aligned ADMA descriptor\n", - mmc_hostname(mmc)); - host->flags &= ~SDHCI_USE_ADMA; -- dma_free_coherent(mmc_dev(mmc), ADMA_SIZE, -- host->adma_desc, host->adma_addr); -+ dma_free_coherent(mmc_dev(mmc), host->adma_table_sz, -+ host->adma_table, host->adma_addr); - kfree(host->align_buffer); -- host->adma_desc = NULL; -+ host->adma_table = NULL; - host->align_buffer = NULL; - } - } -@@ -2995,7 +3068,8 @@ int sdhci_add_host(struct sdhci_host *host) - /* Auto-CMD23 stuff only works in ADMA or PIO. */ - if ((host->version >= SDHCI_SPEC_300) && - ((host->flags & SDHCI_USE_ADMA) || -- !(host->flags & SDHCI_USE_SDMA))) { -+ !(host->flags & SDHCI_USE_SDMA)) && -+ !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) { - host->flags |= SDHCI_AUTO_CMD23; - DBG("%s: Auto-CMD23 available\n", mmc_hostname(mmc)); - } else { -@@ -3152,13 +3226,14 @@ int sdhci_add_host(struct sdhci_host *host) - SDHCI_MAX_CURRENT_MULTIPLIER; - } - -- /* If OCR set by external regulators, use it instead */ -+ /* If OCR set by host, use it instead. */ -+ if (host->ocr_mask) -+ ocr_avail = host->ocr_mask; -+ -+ /* If OCR set by external regulators, give it highest prio. */ - if (mmc->ocr_avail) - ocr_avail = mmc->ocr_avail; - -- if (host->ocr_mask) -- ocr_avail &= host->ocr_mask; -- - mmc->ocr_avail = ocr_avail; - mmc->ocr_avail_sdio = ocr_avail; - if (host->ocr_avail_sdio) -@@ -3185,11 +3260,11 @@ int sdhci_add_host(struct sdhci_host *host) - * can do scatter/gather or not. - */ - if (host->flags & SDHCI_USE_ADMA) -- mmc->max_segs = 128; -+ mmc->max_segs = SDHCI_MAX_SEGS; - else if (host->flags & SDHCI_USE_SDMA) - mmc->max_segs = 1; - else /* PIO */ -- mmc->max_segs = 128; -+ mmc->max_segs = SDHCI_MAX_SEGS; - - /* - * Maximum number of sectors in one transfer. Limited by DMA boundary -@@ -3287,7 +3362,8 @@ int sdhci_add_host(struct sdhci_host *host) - - pr_info("%s: SDHCI controller on %s [%s] using %s\n", - mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)), -- (host->flags & SDHCI_USE_ADMA) ? "ADMA" : -+ (host->flags & SDHCI_USE_ADMA) ? -+ (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" : - (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO"); - - sdhci_enable_card_detection(host); -@@ -3355,12 +3431,12 @@ void sdhci_remove_host(struct sdhci_host *host, int dead) - if (!IS_ERR(mmc->supply.vqmmc)) - regulator_disable(mmc->supply.vqmmc); - -- if (host->adma_desc) -- dma_free_coherent(mmc_dev(mmc), ADMA_SIZE, -- host->adma_desc, host->adma_addr); -+ if (host->adma_table) -+ dma_free_coherent(mmc_dev(mmc), host->adma_table_sz, -+ host->adma_table, host->adma_addr); - kfree(host->align_buffer); - -- host->adma_desc = NULL; -+ host->adma_table = NULL; - host->align_buffer = NULL; - } - -diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h -index 31896a7..5220f36 100644 ---- a/drivers/mmc/host/sdhci.h -+++ b/drivers/mmc/host/sdhci.h -@@ -227,6 +227,7 @@ - /* 55-57 reserved */ - - #define SDHCI_ADMA_ADDRESS 0x58 -+#define SDHCI_ADMA_ADDRESS_HI 0x5C - - /* 60-FB reserved */ - -@@ -266,6 +267,46 @@ - #define SDHCI_DEFAULT_BOUNDARY_SIZE (512 * 1024) - #define SDHCI_DEFAULT_BOUNDARY_ARG (ilog2(SDHCI_DEFAULT_BOUNDARY_SIZE) - 12) - -+/* ADMA2 32-bit DMA descriptor size */ -+#define SDHCI_ADMA2_32_DESC_SZ 8 -+ -+/* ADMA2 32-bit DMA alignment */ -+#define SDHCI_ADMA2_32_ALIGN 4 -+ -+/* ADMA2 32-bit descriptor */ -+struct sdhci_adma2_32_desc { -+ __le16 cmd; -+ __le16 len; -+ __le32 addr; -+} __packed __aligned(SDHCI_ADMA2_32_ALIGN); -+ -+/* ADMA2 64-bit DMA descriptor size */ -+#define SDHCI_ADMA2_64_DESC_SZ 12 -+ -+/* ADMA2 64-bit DMA alignment */ -+#define SDHCI_ADMA2_64_ALIGN 8 -+ -+/* -+ * ADMA2 64-bit descriptor. Note 12-byte descriptor can't always be 8-byte -+ * aligned. -+ */ -+struct sdhci_adma2_64_desc { -+ __le16 cmd; -+ __le16 len; -+ __le32 addr_lo; -+ __le32 addr_hi; -+} __packed __aligned(4); -+ -+#define ADMA2_TRAN_VALID 0x21 -+#define ADMA2_NOP_END_VALID 0x3 -+#define ADMA2_END 0x2 -+ -+/* -+ * Maximum segments assuming a 512KiB maximum requisition size and a minimum -+ * 4KiB page size. -+ */ -+#define SDHCI_MAX_SEGS 128 -+ - struct sdhci_ops { - #ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS - u32 (*read_l)(struct sdhci_host *host, int reg); -@@ -296,6 +337,7 @@ struct sdhci_ops { - void (*adma_workaround)(struct sdhci_host *host, u32 intmask); - void (*platform_init)(struct sdhci_host *host); - void (*card_event)(struct sdhci_host *host); -+ void (*voltage_switch)(struct sdhci_host *host); - }; - - #ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS -diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig -index dd10646..34ce759 100644 ---- a/drivers/mtd/nand/Kconfig -+++ b/drivers/mtd/nand/Kconfig -@@ -429,7 +429,7 @@ config MTD_NAND_FSL_ELBC - - config MTD_NAND_FSL_IFC - tristate "NAND support for Freescale IFC controller" -- depends on MTD_NAND && FSL_SOC -+ depends on MTD_NAND && (FSL_SOC || ARCH_LAYERSCAPE) - select FSL_IFC - select MEMORY - help -diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c -index 2338124..c8be272 100644 ---- a/drivers/mtd/nand/fsl_ifc_nand.c -+++ b/drivers/mtd/nand/fsl_ifc_nand.c -@@ -31,7 +31,6 @@ - #include - #include - --#define FSL_IFC_V1_1_0 0x01010000 - #define ERR_BYTE 0xFF /* Value returned for read - bytes when read failed */ - #define IFC_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait -@@ -234,13 +233,13 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob) - struct nand_chip *chip = mtd->priv; - struct fsl_ifc_mtd *priv = chip->priv; - struct fsl_ifc_ctrl *ctrl = priv->ctrl; -- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; -+ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; - int buf_num; - - ifc_nand_ctrl->page = page_addr; - /* Program ROW0/COL0 */ -- iowrite32be(page_addr, &ifc->ifc_nand.row0); -- iowrite32be((oob ? IFC_NAND_COL_MS : 0) | column, &ifc->ifc_nand.col0); -+ ifc_out32(page_addr, &ifc->ifc_nand.row0); -+ ifc_out32((oob ? IFC_NAND_COL_MS : 0) | column, &ifc->ifc_nand.col0); - - buf_num = page_addr & priv->bufnum_mask; - -@@ -297,28 +296,28 @@ static void fsl_ifc_run_command(struct mtd_info *mtd) - struct fsl_ifc_mtd *priv = chip->priv; - struct fsl_ifc_ctrl *ctrl = priv->ctrl; - struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl; -- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; -+ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; - u32 eccstat[4]; - int i; - - /* set the chip select for NAND Transaction */ -- iowrite32be(priv->bank << IFC_NAND_CSEL_SHIFT, -- &ifc->ifc_nand.nand_csel); -+ ifc_out32(priv->bank << IFC_NAND_CSEL_SHIFT, -+ &ifc->ifc_nand.nand_csel); - - dev_vdbg(priv->dev, - "%s: fir0=%08x fcr0=%08x\n", - __func__, -- ioread32be(&ifc->ifc_nand.nand_fir0), -- ioread32be(&ifc->ifc_nand.nand_fcr0)); -+ ifc_in32(&ifc->ifc_nand.nand_fir0), -+ ifc_in32(&ifc->ifc_nand.nand_fcr0)); - - ctrl->nand_stat = 0; - - /* start read/write seq */ -- iowrite32be(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt); -+ ifc_out32(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt); - - /* wait for command complete flag or timeout */ - wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat, -- IFC_TIMEOUT_MSECS * HZ/1000); -+ msecs_to_jiffies(IFC_TIMEOUT_MSECS)); - - /* ctrl->nand_stat will be updated from IRQ context */ - if (!ctrl->nand_stat) -@@ -337,7 +336,7 @@ static void fsl_ifc_run_command(struct mtd_info *mtd) - int sector_end = sector + chip->ecc.steps - 1; - - for (i = sector / 4; i <= sector_end / 4; i++) -- eccstat[i] = ioread32be(&ifc->ifc_nand.nand_eccstat[i]); -+ eccstat[i] = ifc_in32(&ifc->ifc_nand.nand_eccstat[i]); - - for (i = sector; i <= sector_end; i++) { - errors = check_read_ecc(mtd, ctrl, eccstat, i); -@@ -373,37 +372,37 @@ static void fsl_ifc_do_read(struct nand_chip *chip, - { - struct fsl_ifc_mtd *priv = chip->priv; - struct fsl_ifc_ctrl *ctrl = priv->ctrl; -- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; -+ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; - - /* Program FIR/IFC_NAND_FCR0 for Small/Large page */ - if (mtd->writesize > 512) { -- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | -- (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) | -- (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) | -- (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) | -- (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT), -- &ifc->ifc_nand.nand_fir0); -- iowrite32be(0x0, &ifc->ifc_nand.nand_fir1); -- -- iowrite32be((NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) | -- (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT), -- &ifc->ifc_nand.nand_fcr0); -+ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | -+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) | -+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) | -+ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) | -+ (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT), -+ &ifc->ifc_nand.nand_fir0); -+ ifc_out32(0x0, &ifc->ifc_nand.nand_fir1); -+ -+ ifc_out32((NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) | -+ (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT), -+ &ifc->ifc_nand.nand_fcr0); - } else { -- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | -- (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) | -- (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) | -- (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT), -- &ifc->ifc_nand.nand_fir0); -- iowrite32be(0x0, &ifc->ifc_nand.nand_fir1); -+ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | -+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) | -+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) | -+ (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT), -+ &ifc->ifc_nand.nand_fir0); -+ ifc_out32(0x0, &ifc->ifc_nand.nand_fir1); - - if (oob) -- iowrite32be(NAND_CMD_READOOB << -- IFC_NAND_FCR0_CMD0_SHIFT, -- &ifc->ifc_nand.nand_fcr0); -+ ifc_out32(NAND_CMD_READOOB << -+ IFC_NAND_FCR0_CMD0_SHIFT, -+ &ifc->ifc_nand.nand_fcr0); - else -- iowrite32be(NAND_CMD_READ0 << -- IFC_NAND_FCR0_CMD0_SHIFT, -- &ifc->ifc_nand.nand_fcr0); -+ ifc_out32(NAND_CMD_READ0 << -+ IFC_NAND_FCR0_CMD0_SHIFT, -+ &ifc->ifc_nand.nand_fcr0); - } - } - -@@ -413,7 +412,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, - struct nand_chip *chip = mtd->priv; - struct fsl_ifc_mtd *priv = chip->priv; - struct fsl_ifc_ctrl *ctrl = priv->ctrl; -- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; -+ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; - - /* clear the read buffer */ - ifc_nand_ctrl->read_bytes = 0; -@@ -423,7 +422,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, - switch (command) { - /* READ0 read the entire buffer to use hardware ECC. */ - case NAND_CMD_READ0: -- iowrite32be(0, &ifc->ifc_nand.nand_fbcr); -+ ifc_out32(0, &ifc->ifc_nand.nand_fbcr); - set_addr(mtd, 0, page_addr, 0); - - ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize; -@@ -438,7 +437,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, - - /* READOOB reads only the OOB because no ECC is performed. */ - case NAND_CMD_READOOB: -- iowrite32be(mtd->oobsize - column, &ifc->ifc_nand.nand_fbcr); -+ ifc_out32(mtd->oobsize - column, &ifc->ifc_nand.nand_fbcr); - set_addr(mtd, column, page_addr, 1); - - ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize; -@@ -454,19 +453,19 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, - if (command == NAND_CMD_PARAM) - timing = IFC_FIR_OP_RBCD; - -- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | -- (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) | -- (timing << IFC_NAND_FIR0_OP2_SHIFT), -- &ifc->ifc_nand.nand_fir0); -- iowrite32be(command << IFC_NAND_FCR0_CMD0_SHIFT, -- &ifc->ifc_nand.nand_fcr0); -- iowrite32be(column, &ifc->ifc_nand.row3); -+ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | -+ (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) | -+ (timing << IFC_NAND_FIR0_OP2_SHIFT), -+ &ifc->ifc_nand.nand_fir0); -+ ifc_out32(command << IFC_NAND_FCR0_CMD0_SHIFT, -+ &ifc->ifc_nand.nand_fcr0); -+ ifc_out32(column, &ifc->ifc_nand.row3); - - /* - * although currently it's 8 bytes for READID, we always read - * the maximum 256 bytes(for PARAM) - */ -- iowrite32be(256, &ifc->ifc_nand.nand_fbcr); -+ ifc_out32(256, &ifc->ifc_nand.nand_fbcr); - ifc_nand_ctrl->read_bytes = 256; - - set_addr(mtd, 0, 0, 0); -@@ -481,16 +480,16 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, - - /* ERASE2 uses the block and page address from ERASE1 */ - case NAND_CMD_ERASE2: -- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | -- (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) | -- (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT), -- &ifc->ifc_nand.nand_fir0); -+ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | -+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) | -+ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT), -+ &ifc->ifc_nand.nand_fir0); - -- iowrite32be((NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) | -- (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT), -- &ifc->ifc_nand.nand_fcr0); -+ ifc_out32((NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) | -+ (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT), -+ &ifc->ifc_nand.nand_fcr0); - -- iowrite32be(0, &ifc->ifc_nand.nand_fbcr); -+ ifc_out32(0, &ifc->ifc_nand.nand_fbcr); - ifc_nand_ctrl->read_bytes = 0; - fsl_ifc_run_command(mtd); - return; -@@ -507,19 +506,18 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, - (NAND_CMD_STATUS << IFC_NAND_FCR0_CMD1_SHIFT) | - (NAND_CMD_PAGEPROG << IFC_NAND_FCR0_CMD2_SHIFT); - -- iowrite32be( -- (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | -- (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) | -- (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) | -- (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) | -- (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP4_SHIFT), -- &ifc->ifc_nand.nand_fir0); -- iowrite32be( -- (IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT) | -- (IFC_FIR_OP_RDSTAT << -- IFC_NAND_FIR1_OP6_SHIFT) | -- (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP7_SHIFT), -- &ifc->ifc_nand.nand_fir1); -+ ifc_out32( -+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | -+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) | -+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) | -+ (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) | -+ (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP4_SHIFT), -+ &ifc->ifc_nand.nand_fir0); -+ ifc_out32( -+ (IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT) | -+ (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR1_OP6_SHIFT) | -+ (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP7_SHIFT), -+ &ifc->ifc_nand.nand_fir1); - } else { - nand_fcr0 = ((NAND_CMD_PAGEPROG << - IFC_NAND_FCR0_CMD1_SHIFT) | -@@ -528,20 +526,19 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, - (NAND_CMD_STATUS << - IFC_NAND_FCR0_CMD3_SHIFT)); - -- iowrite32be( -+ ifc_out32( - (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | - (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP1_SHIFT) | - (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP2_SHIFT) | - (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP3_SHIFT) | - (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP4_SHIFT), - &ifc->ifc_nand.nand_fir0); -- iowrite32be( -- (IFC_FIR_OP_CMD1 << IFC_NAND_FIR1_OP5_SHIFT) | -- (IFC_FIR_OP_CW3 << IFC_NAND_FIR1_OP6_SHIFT) | -- (IFC_FIR_OP_RDSTAT << -- IFC_NAND_FIR1_OP7_SHIFT) | -- (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP8_SHIFT), -- &ifc->ifc_nand.nand_fir1); -+ ifc_out32( -+ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR1_OP5_SHIFT) | -+ (IFC_FIR_OP_CW3 << IFC_NAND_FIR1_OP6_SHIFT) | -+ (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR1_OP7_SHIFT) | -+ (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP8_SHIFT), -+ &ifc->ifc_nand.nand_fir1); - - if (column >= mtd->writesize) - nand_fcr0 |= -@@ -556,7 +553,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, - column -= mtd->writesize; - ifc_nand_ctrl->oob = 1; - } -- iowrite32be(nand_fcr0, &ifc->ifc_nand.nand_fcr0); -+ ifc_out32(nand_fcr0, &ifc->ifc_nand.nand_fcr0); - set_addr(mtd, column, page_addr, ifc_nand_ctrl->oob); - return; - } -@@ -564,24 +561,26 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, - /* PAGEPROG reuses all of the setup from SEQIN and adds the length */ - case NAND_CMD_PAGEPROG: { - if (ifc_nand_ctrl->oob) { -- iowrite32be(ifc_nand_ctrl->index - -- ifc_nand_ctrl->column, -- &ifc->ifc_nand.nand_fbcr); -+ ifc_out32(ifc_nand_ctrl->index - -+ ifc_nand_ctrl->column, -+ &ifc->ifc_nand.nand_fbcr); - } else { -- iowrite32be(0, &ifc->ifc_nand.nand_fbcr); -+ ifc_out32(0, &ifc->ifc_nand.nand_fbcr); - } - - fsl_ifc_run_command(mtd); - return; - } - -- case NAND_CMD_STATUS: -- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | -- (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT), -- &ifc->ifc_nand.nand_fir0); -- iowrite32be(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT, -- &ifc->ifc_nand.nand_fcr0); -- iowrite32be(1, &ifc->ifc_nand.nand_fbcr); -+ case NAND_CMD_STATUS: { -+ void __iomem *addr; -+ -+ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | -+ (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT), -+ &ifc->ifc_nand.nand_fir0); -+ ifc_out32(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT, -+ &ifc->ifc_nand.nand_fcr0); -+ ifc_out32(1, &ifc->ifc_nand.nand_fbcr); - set_addr(mtd, 0, 0, 0); - ifc_nand_ctrl->read_bytes = 1; - -@@ -591,17 +590,19 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, - * The chip always seems to report that it is - * write-protected, even when it is not. - */ -+ addr = ifc_nand_ctrl->addr; - if (chip->options & NAND_BUSWIDTH_16) -- setbits16(ifc_nand_ctrl->addr, NAND_STATUS_WP); -+ ifc_out16(ifc_in16(addr) | (NAND_STATUS_WP), addr); - else -- setbits8(ifc_nand_ctrl->addr, NAND_STATUS_WP); -+ ifc_out8(ifc_in8(addr) | (NAND_STATUS_WP), addr); - return; -+ } - - case NAND_CMD_RESET: -- iowrite32be(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT, -- &ifc->ifc_nand.nand_fir0); -- iowrite32be(NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT, -- &ifc->ifc_nand.nand_fcr0); -+ ifc_out32(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT, -+ &ifc->ifc_nand.nand_fir0); -+ ifc_out32(NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT, -+ &ifc->ifc_nand.nand_fcr0); - fsl_ifc_run_command(mtd); - return; - -@@ -659,7 +660,7 @@ static uint8_t fsl_ifc_read_byte(struct mtd_info *mtd) - */ - if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) { - offset = ifc_nand_ctrl->index++; -- return in_8(ifc_nand_ctrl->addr + offset); -+ return ifc_in8(ifc_nand_ctrl->addr + offset); - } - - dev_err(priv->dev, "%s: beyond end of buffer\n", __func__); -@@ -681,7 +682,7 @@ static uint8_t fsl_ifc_read_byte16(struct mtd_info *mtd) - * next byte. - */ - if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) { -- data = in_be16(ifc_nand_ctrl->addr + ifc_nand_ctrl->index); -+ data = ifc_in16(ifc_nand_ctrl->addr + ifc_nand_ctrl->index); - ifc_nand_ctrl->index += 2; - return (uint8_t) data; - } -@@ -723,22 +724,22 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip) - { - struct fsl_ifc_mtd *priv = chip->priv; - struct fsl_ifc_ctrl *ctrl = priv->ctrl; -- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; -+ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; - u32 nand_fsr; - - /* Use READ_STATUS command, but wait for the device to be ready */ -- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | -- (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT), -- &ifc->ifc_nand.nand_fir0); -- iowrite32be(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT, -- &ifc->ifc_nand.nand_fcr0); -- iowrite32be(1, &ifc->ifc_nand.nand_fbcr); -+ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | -+ (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT), -+ &ifc->ifc_nand.nand_fir0); -+ ifc_out32(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT, -+ &ifc->ifc_nand.nand_fcr0); -+ ifc_out32(1, &ifc->ifc_nand.nand_fbcr); - set_addr(mtd, 0, 0, 0); - ifc_nand_ctrl->read_bytes = 1; - - fsl_ifc_run_command(mtd); - -- nand_fsr = ioread32be(&ifc->ifc_nand.nand_fsr); -+ nand_fsr = ifc_in32(&ifc->ifc_nand.nand_fsr); - - /* - * The chip always seems to report that it is -@@ -825,67 +826,72 @@ static int fsl_ifc_chip_init_tail(struct mtd_info *mtd) - static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv) - { - struct fsl_ifc_ctrl *ctrl = priv->ctrl; -- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; -+ struct fsl_ifc_runtime __iomem *ifc_runtime = ctrl->rregs; -+ struct fsl_ifc_global __iomem *ifc_global = ctrl->gregs; - uint32_t csor = 0, csor_8k = 0, csor_ext = 0; - uint32_t cs = priv->bank; - - /* Save CSOR and CSOR_ext */ -- csor = ioread32be(&ifc->csor_cs[cs].csor); -- csor_ext = ioread32be(&ifc->csor_cs[cs].csor_ext); -+ csor = ifc_in32(&ifc_global->csor_cs[cs].csor); -+ csor_ext = ifc_in32(&ifc_global->csor_cs[cs].csor_ext); - - /* chage PageSize 8K and SpareSize 1K*/ - csor_8k = (csor & ~(CSOR_NAND_PGS_MASK)) | 0x0018C000; -- iowrite32be(csor_8k, &ifc->csor_cs[cs].csor); -- iowrite32be(0x0000400, &ifc->csor_cs[cs].csor_ext); -+ ifc_out32(csor_8k, &ifc_global->csor_cs[cs].csor); -+ ifc_out32(0x0000400, &ifc_global->csor_cs[cs].csor_ext); - - /* READID */ -- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | -+ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | - (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) | - (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT), -- &ifc->ifc_nand.nand_fir0); -- iowrite32be(NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT, -- &ifc->ifc_nand.nand_fcr0); -- iowrite32be(0x0, &ifc->ifc_nand.row3); -+ &ifc_runtime->ifc_nand.nand_fir0); -+ ifc_out32(NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT, -+ &ifc_runtime->ifc_nand.nand_fcr0); -+ ifc_out32(0x0, &ifc_runtime->ifc_nand.row3); - -- iowrite32be(0x0, &ifc->ifc_nand.nand_fbcr); -+ ifc_out32(0x0, &ifc_runtime->ifc_nand.nand_fbcr); - - /* Program ROW0/COL0 */ -- iowrite32be(0x0, &ifc->ifc_nand.row0); -- iowrite32be(0x0, &ifc->ifc_nand.col0); -+ ifc_out32(0x0, &ifc_runtime->ifc_nand.row0); -+ ifc_out32(0x0, &ifc_runtime->ifc_nand.col0); - - /* set the chip select for NAND Transaction */ -- iowrite32be(cs << IFC_NAND_CSEL_SHIFT, &ifc->ifc_nand.nand_csel); -+ ifc_out32(cs << IFC_NAND_CSEL_SHIFT, -+ &ifc_runtime->ifc_nand.nand_csel); - - /* start read seq */ -- iowrite32be(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt); -+ ifc_out32(IFC_NAND_SEQ_STRT_FIR_STRT, -+ &ifc_runtime->ifc_nand.nandseq_strt); - - /* wait for command complete flag or timeout */ - wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat, -- IFC_TIMEOUT_MSECS * HZ/1000); -+ msecs_to_jiffies(IFC_TIMEOUT_MSECS)); - - if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC) - printk(KERN_ERR "fsl-ifc: Failed to Initialise SRAM\n"); - - /* Restore CSOR and CSOR_ext */ -- iowrite32be(csor, &ifc->csor_cs[cs].csor); -- iowrite32be(csor_ext, &ifc->csor_cs[cs].csor_ext); -+ ifc_out32(csor, &ifc_global->csor_cs[cs].csor); -+ ifc_out32(csor_ext, &ifc_global->csor_cs[cs].csor_ext); - } - - static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv) - { - struct fsl_ifc_ctrl *ctrl = priv->ctrl; -- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; -+ struct fsl_ifc_global __iomem *ifc_global = ctrl->gregs; -+ struct fsl_ifc_runtime __iomem *ifc_runtime = ctrl->rregs; - struct nand_chip *chip = &priv->chip; - struct nand_ecclayout *layout; -- u32 csor, ver; -+ u32 csor; - - /* Fill in fsl_ifc_mtd structure */ - priv->mtd.priv = chip; -- priv->mtd.owner = THIS_MODULE; -+ priv->mtd.dev.parent = priv->dev; - - /* fill in nand_chip structure */ - /* set up function call table */ -- if ((ioread32be(&ifc->cspr_cs[priv->bank].cspr)) & CSPR_PORT_SIZE_16) -+ if ((ifc_in32(&ifc_global->cspr_cs[priv->bank].cspr)) -+ & CSPR_PORT_SIZE_16) - chip->read_byte = fsl_ifc_read_byte16; - else - chip->read_byte = fsl_ifc_read_byte; -@@ -899,13 +905,14 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv) - chip->bbt_td = &bbt_main_descr; - chip->bbt_md = &bbt_mirror_descr; - -- iowrite32be(0x0, &ifc->ifc_nand.ncfgr); -+ ifc_out32(0x0, &ifc_runtime->ifc_nand.ncfgr); - - /* set up nand options */ - chip->bbt_options = NAND_BBT_USE_FLASH; - chip->options = NAND_NO_SUBPAGE_WRITE; - -- if (ioread32be(&ifc->cspr_cs[priv->bank].cspr) & CSPR_PORT_SIZE_16) { -+ if (ifc_in32(&ifc_global->cspr_cs[priv->bank].cspr) -+ & CSPR_PORT_SIZE_16) { - chip->read_byte = fsl_ifc_read_byte16; - chip->options |= NAND_BUSWIDTH_16; - } else { -@@ -918,7 +925,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv) - chip->ecc.read_page = fsl_ifc_read_page; - chip->ecc.write_page = fsl_ifc_write_page; - -- csor = ioread32be(&ifc->csor_cs[priv->bank].csor); -+ csor = ifc_in32(&ifc_global->csor_cs[priv->bank].csor); - - /* Hardware generates ECC per 512 Bytes */ - chip->ecc.size = 512; -@@ -984,8 +991,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv) - chip->ecc.mode = NAND_ECC_SOFT; - } - -- ver = ioread32be(&ifc->ifc_rev); -- if (ver == FSL_IFC_V1_1_0) -+ if (ctrl->version == FSL_IFC_VERSION_1_1_0) - fsl_ifc_sram_init(priv); - - return 0; -@@ -1005,10 +1011,10 @@ static int fsl_ifc_chip_remove(struct fsl_ifc_mtd *priv) - return 0; - } - --static int match_bank(struct fsl_ifc_regs __iomem *ifc, int bank, -+static int match_bank(struct fsl_ifc_global __iomem *ifc_global, int bank, - phys_addr_t addr) - { -- u32 cspr = ioread32be(&ifc->cspr_cs[bank].cspr); -+ u32 cspr = ifc_in32(&ifc_global->cspr_cs[bank].cspr); - - if (!(cspr & CSPR_V)) - return 0; -@@ -1022,7 +1028,7 @@ static DEFINE_MUTEX(fsl_ifc_nand_mutex); - - static int fsl_ifc_nand_probe(struct platform_device *dev) - { -- struct fsl_ifc_regs __iomem *ifc; -+ struct fsl_ifc_runtime __iomem *ifc; - struct fsl_ifc_mtd *priv; - struct resource res; - static const char *part_probe_types[] -@@ -1033,9 +1039,9 @@ static int fsl_ifc_nand_probe(struct platform_device *dev) - struct mtd_part_parser_data ppdata; - - ppdata.of_node = dev->dev.of_node; -- if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->regs) -+ if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->rregs) - return -ENODEV; -- ifc = fsl_ifc_ctrl_dev->regs; -+ ifc = fsl_ifc_ctrl_dev->rregs; - - /* get, allocate and map the memory resource */ - ret = of_address_to_resource(node, 0, &res); -@@ -1045,12 +1051,12 @@ static int fsl_ifc_nand_probe(struct platform_device *dev) - } - - /* find which chip select it is connected to */ -- for (bank = 0; bank < FSL_IFC_BANK_COUNT; bank++) { -- if (match_bank(ifc, bank, res.start)) -+ for (bank = 0; bank < fsl_ifc_ctrl_dev->banks; bank++) { -+ if (match_bank(fsl_ifc_ctrl_dev->gregs, bank, res.start)) - break; - } - -- if (bank >= FSL_IFC_BANK_COUNT) { -+ if (bank >= fsl_ifc_ctrl_dev->banks) { - dev_err(&dev->dev, "%s: address did not match any chip selects\n", - __func__); - return -ENODEV; -@@ -1094,16 +1100,16 @@ static int fsl_ifc_nand_probe(struct platform_device *dev) - - dev_set_drvdata(priv->dev, priv); - -- iowrite32be(IFC_NAND_EVTER_EN_OPC_EN | -- IFC_NAND_EVTER_EN_FTOER_EN | -- IFC_NAND_EVTER_EN_WPER_EN, -- &ifc->ifc_nand.nand_evter_en); -+ ifc_out32(IFC_NAND_EVTER_EN_OPC_EN | -+ IFC_NAND_EVTER_EN_FTOER_EN | -+ IFC_NAND_EVTER_EN_WPER_EN, -+ &ifc->ifc_nand.nand_evter_en); - - /* enable NAND Machine Interrupts */ -- iowrite32be(IFC_NAND_EVTER_INTR_OPCIR_EN | -- IFC_NAND_EVTER_INTR_FTOERIR_EN | -- IFC_NAND_EVTER_INTR_WPERIR_EN, -- &ifc->ifc_nand.nand_evter_intr_en); -+ ifc_out32(IFC_NAND_EVTER_INTR_OPCIR_EN | -+ IFC_NAND_EVTER_INTR_FTOERIR_EN | -+ IFC_NAND_EVTER_INTR_WPERIR_EN, -+ &ifc->ifc_nand.nand_evter_intr_en); - priv->mtd.name = kasprintf(GFP_KERNEL, "%llx.flash", (u64)res.start); - if (!priv->mtd.name) { - ret = -ENOMEM; -@@ -1163,6 +1169,7 @@ static const struct of_device_id fsl_ifc_nand_match[] = { - }, - {} - }; -+MODULE_DEVICE_TABLE(of, fsl_ifc_nand_match); - - static struct platform_driver fsl_ifc_nand_driver = { - .driver = { -diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig -index 2703083..0c1c97d 100644 ---- a/drivers/net/ethernet/freescale/Kconfig -+++ b/drivers/net/ethernet/freescale/Kconfig -@@ -7,7 +7,8 @@ config NET_VENDOR_FREESCALE - default y - depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \ - M523x || M527x || M5272 || M528x || M520x || M532x || \ -- ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) -+ ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) || \ -+ ARCH_LAYERSCAPE - ---help--- - If you have a network (Ethernet) card belonging to this class, say Y - and read the Ethernet-HOWTO, available from -@@ -58,18 +59,17 @@ source "drivers/net/ethernet/freescale/fs_enet/Kconfig" - - config FSL_PQ_MDIO - tristate "Freescale PQ MDIO" -- depends on FSL_SOC - select PHYLIB - ---help--- - This driver supports the MDIO bus used by the gianfar and UCC drivers. - - config FSL_XGMAC_MDIO - tristate "Freescale XGMAC MDIO" -- depends on FSL_SOC - select PHYLIB - select OF_MDIO - ---help--- -- This driver supports the MDIO bus on the Fman 10G Ethernet MACs. -+ This driver supports the MDIO bus on the Fman 10G Ethernet MACs and -+ on mEMAC (which supports both Clauses 22 and 45) - - config UCC_GETH - tristate "Freescale QE Gigabit Ethernet" -diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c -index ff55fbb..76ff046 100644 ---- a/drivers/net/ethernet/freescale/fec_mpc52xx.c -+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c -@@ -1057,7 +1057,7 @@ static int mpc52xx_fec_of_resume(struct platform_device *op) - } - #endif - --static struct of_device_id mpc52xx_fec_match[] = { -+static const struct of_device_id mpc52xx_fec_match[] = { - { .compatible = "fsl,mpc5200b-fec", }, - { .compatible = "fsl,mpc5200-fec", }, - { .compatible = "mpc5200-fec", }, -diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c -index e052890..1e647be 100644 ---- a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c -+++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c -@@ -134,7 +134,7 @@ static int mpc52xx_fec_mdio_remove(struct platform_device *of) - return 0; - } - --static struct of_device_id mpc52xx_fec_mdio_match[] = { -+static const struct of_device_id mpc52xx_fec_mdio_match[] = { - { .compatible = "fsl,mpc5200b-mdio", }, - { .compatible = "fsl,mpc5200-mdio", }, - { .compatible = "mpc5200b-fec-phy", }, -diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c -index c92c3b7..dc0da6c 100644 ---- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c -+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c -@@ -886,7 +886,7 @@ static const struct net_device_ops fs_enet_netdev_ops = { - #endif - }; - --static struct of_device_id fs_enet_match[]; -+static const struct of_device_id fs_enet_match[]; - static int fs_enet_probe(struct platform_device *ofdev) - { - const struct of_device_id *match; -@@ -1047,7 +1047,7 @@ static int fs_enet_remove(struct platform_device *ofdev) - return 0; - } - --static struct of_device_id fs_enet_match[] = { -+static const struct of_device_id fs_enet_match[] = { - #ifdef CONFIG_FS_ENET_HAS_SCC - { - .compatible = "fsl,cpm1-scc-enet", -diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c -index 3d3fde6..9ec396b 100644 ---- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c -+++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c -@@ -213,7 +213,7 @@ static int fs_enet_mdio_remove(struct platform_device *ofdev) - return 0; - } - --static struct of_device_id fs_enet_mdio_bb_match[] = { -+static const struct of_device_id fs_enet_mdio_bb_match[] = { - { - .compatible = "fsl,cpm2-mdio-bitbang", - }, -diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c -index ebf5d64..72205b0 100644 ---- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c -+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c -@@ -95,7 +95,7 @@ static int fs_enet_fec_mii_write(struct mii_bus *bus, int phy_id, int location, - - } - --static struct of_device_id fs_enet_mdio_fec_match[]; -+static const struct of_device_id fs_enet_mdio_fec_match[]; - static int fs_enet_mdio_probe(struct platform_device *ofdev) - { - const struct of_device_id *match; -@@ -208,7 +208,7 @@ static int fs_enet_mdio_remove(struct platform_device *ofdev) - return 0; - } - --static struct of_device_id fs_enet_mdio_fec_match[] = { -+static const struct of_device_id fs_enet_mdio_fec_match[] = { - { - .compatible = "fsl,pq1-fec-mdio", - }, -diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c -index 964c6bf..f94fa63 100644 ---- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c -+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c -@@ -294,7 +294,7 @@ static void ucc_configure(phys_addr_t start, phys_addr_t end) - - #endif - --static struct of_device_id fsl_pq_mdio_match[] = { -+static const struct of_device_id fsl_pq_mdio_match[] = { - #if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE) - { - .compatible = "fsl,gianfar-tbi", -diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c -index 4fdf0aa..0359cfd 100644 ---- a/drivers/net/ethernet/freescale/gianfar.c -+++ b/drivers/net/ethernet/freescale/gianfar.c -@@ -86,11 +86,11 @@ - #include - #include - #include -+#include - - #include - #ifdef CONFIG_PPC - #include --#include - #endif - #include - #include -@@ -1720,8 +1720,10 @@ static void gfar_configure_serdes(struct net_device *dev) - * everything for us? Resetting it takes the link down and requires - * several seconds for it to come back. - */ -- if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) -+ if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) { -+ put_device(&tbiphy->dev); - return; -+ } - - /* Single clk mode, mii mode off(for serdes communication) */ - phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); -@@ -3455,7 +3457,7 @@ static noinline void gfar_update_link_state(struct gfar_private *priv) - phy_print_status(phydev); - } - --static struct of_device_id gfar_match[] = -+static const struct of_device_id gfar_match[] = - { - { - .type = "network", -diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c -index bb56800..c7c75de 100644 ---- a/drivers/net/ethernet/freescale/gianfar_ptp.c -+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c -@@ -554,7 +554,7 @@ static int gianfar_ptp_remove(struct platform_device *dev) - return 0; - } - --static struct of_device_id match_table[] = { -+static const struct of_device_id match_table[] = { - { .compatible = "fsl,etsec-ptp" }, - {}, - }; -diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c -index 3cf0478..741a7d4 100644 ---- a/drivers/net/ethernet/freescale/ucc_geth.c -+++ b/drivers/net/ethernet/freescale/ucc_geth.c -@@ -3930,7 +3930,7 @@ static int ucc_geth_remove(struct platform_device* ofdev) - return 0; - } - --static struct of_device_id ucc_geth_match[] = { -+static const struct of_device_id ucc_geth_match[] = { - { - .type = "network", - .compatible = "ucc_geth", -diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c -index 6e7db66..7b8fe86 100644 ---- a/drivers/net/ethernet/freescale/xgmac_mdio.c -+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c -@@ -32,31 +32,62 @@ struct tgec_mdio_controller { - __be32 mdio_addr; /* MDIO address */ - } __packed; - -+#define MDIO_STAT_ENC BIT(6) - #define MDIO_STAT_CLKDIV(x) (((x>>1) & 0xff) << 8) --#define MDIO_STAT_BSY (1 << 0) --#define MDIO_STAT_RD_ER (1 << 1) -+#define MDIO_STAT_BSY BIT(0) -+#define MDIO_STAT_RD_ER BIT(1) - #define MDIO_CTL_DEV_ADDR(x) (x & 0x1f) - #define MDIO_CTL_PORT_ADDR(x) ((x & 0x1f) << 5) --#define MDIO_CTL_PRE_DIS (1 << 10) --#define MDIO_CTL_SCAN_EN (1 << 11) --#define MDIO_CTL_POST_INC (1 << 14) --#define MDIO_CTL_READ (1 << 15) -+#define MDIO_CTL_PRE_DIS BIT(10) -+#define MDIO_CTL_SCAN_EN BIT(11) -+#define MDIO_CTL_POST_INC BIT(14) -+#define MDIO_CTL_READ BIT(15) - - #define MDIO_DATA(x) (x & 0xffff) --#define MDIO_DATA_BSY (1 << 31) -+#define MDIO_DATA_BSY BIT(31) -+ -+struct mdio_fsl_priv { -+ struct tgec_mdio_controller __iomem *mdio_base; -+ bool is_little_endian; -+}; -+ -+static u32 xgmac_read32(void __iomem *regs, -+ bool is_little_endian) -+{ -+ if (is_little_endian) -+ return ioread32(regs); -+ else -+ return ioread32be(regs); -+} -+ -+static void xgmac_write32(u32 value, -+ void __iomem *regs, -+ bool is_little_endian) -+{ -+ if (is_little_endian) -+ iowrite32(value, regs); -+ else -+ iowrite32be(value, regs); -+} - - /* - * Wait until the MDIO bus is free - */ - static int xgmac_wait_until_free(struct device *dev, -- struct tgec_mdio_controller __iomem *regs) -+ struct tgec_mdio_controller __iomem *regs, -+ bool is_little_endian) - { -- uint32_t status; -+ unsigned int timeout; - - /* Wait till the bus is free */ -- status = spin_event_timeout( -- !((in_be32(®s->mdio_stat)) & MDIO_STAT_BSY), TIMEOUT, 0); -- if (!status) { -+ timeout = TIMEOUT; -+ while ((xgmac_read32(®s->mdio_stat, is_little_endian) & -+ MDIO_STAT_BSY) && timeout) { -+ cpu_relax(); -+ timeout--; -+ } -+ -+ if (!timeout) { - dev_err(dev, "timeout waiting for bus to be free\n"); - return -ETIMEDOUT; - } -@@ -68,14 +99,20 @@ static int xgmac_wait_until_free(struct device *dev, - * Wait till the MDIO read or write operation is complete - */ - static int xgmac_wait_until_done(struct device *dev, -- struct tgec_mdio_controller __iomem *regs) -+ struct tgec_mdio_controller __iomem *regs, -+ bool is_little_endian) - { -- uint32_t status; -+ unsigned int timeout; - - /* Wait till the MDIO write is complete */ -- status = spin_event_timeout( -- !((in_be32(®s->mdio_data)) & MDIO_DATA_BSY), TIMEOUT, 0); -- if (!status) { -+ timeout = TIMEOUT; -+ while ((xgmac_read32(®s->mdio_stat, is_little_endian) & -+ MDIO_STAT_BSY) && timeout) { -+ cpu_relax(); -+ timeout--; -+ } -+ -+ if (!timeout) { - dev_err(dev, "timeout waiting for operation to complete\n"); - return -ETIMEDOUT; - } -@@ -90,32 +127,47 @@ static int xgmac_wait_until_done(struct device *dev, - */ - static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value) - { -- struct tgec_mdio_controller __iomem *regs = bus->priv; -- uint16_t dev_addr = regnum >> 16; -+ struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv; -+ struct tgec_mdio_controller __iomem *regs = priv->mdio_base; -+ uint16_t dev_addr; -+ u32 mdio_ctl, mdio_stat; - int ret; -+ bool endian = priv->is_little_endian; -+ -+ mdio_stat = xgmac_read32(®s->mdio_stat, endian); -+ if (regnum & MII_ADDR_C45) { -+ /* Clause 45 (ie 10G) */ -+ dev_addr = (regnum >> 16) & 0x1f; -+ mdio_stat |= MDIO_STAT_ENC; -+ } else { -+ /* Clause 22 (ie 1G) */ -+ dev_addr = regnum & 0x1f; -+ mdio_stat &= ~MDIO_STAT_ENC; -+ } - -- /* Setup the MII Mgmt clock speed */ -- out_be32(®s->mdio_stat, MDIO_STAT_CLKDIV(100)); -+ xgmac_write32(mdio_stat, ®s->mdio_stat, endian); - -- ret = xgmac_wait_until_free(&bus->dev, regs); -+ ret = xgmac_wait_until_free(&bus->dev, regs, endian); - if (ret) - return ret; - - /* Set the port and dev addr */ -- out_be32(®s->mdio_ctl, -- MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr)); -+ mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr); -+ xgmac_write32(mdio_ctl, ®s->mdio_ctl, endian); - - /* Set the register address */ -- out_be32(®s->mdio_addr, regnum & 0xffff); -+ if (regnum & MII_ADDR_C45) { -+ xgmac_write32(regnum & 0xffff, ®s->mdio_addr, endian); - -- ret = xgmac_wait_until_free(&bus->dev, regs); -- if (ret) -- return ret; -+ ret = xgmac_wait_until_free(&bus->dev, regs, endian); -+ if (ret) -+ return ret; -+ } - - /* Write the value to the register */ -- out_be32(®s->mdio_data, MDIO_DATA(value)); -+ xgmac_write32(MDIO_DATA(value), ®s->mdio_data, endian); - -- ret = xgmac_wait_until_done(&bus->dev, regs); -+ ret = xgmac_wait_until_done(&bus->dev, regs, endian); - if (ret) - return ret; - -@@ -129,74 +181,70 @@ static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 val - */ - static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum) - { -- struct tgec_mdio_controller __iomem *regs = bus->priv; -- uint16_t dev_addr = regnum >> 16; -+ struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv; -+ struct tgec_mdio_controller __iomem *regs = priv->mdio_base; -+ uint16_t dev_addr; -+ uint32_t mdio_stat; - uint32_t mdio_ctl; - uint16_t value; - int ret; -+ bool endian = priv->is_little_endian; -+ -+ mdio_stat = xgmac_read32(®s->mdio_stat, endian); -+ if (regnum & MII_ADDR_C45) { -+ dev_addr = (regnum >> 16) & 0x1f; -+ mdio_stat |= MDIO_STAT_ENC; -+ } else { -+ dev_addr = regnum & 0x1f; -+ mdio_stat &= ~MDIO_STAT_ENC; -+ } - -- /* Setup the MII Mgmt clock speed */ -- out_be32(®s->mdio_stat, MDIO_STAT_CLKDIV(100)); -+ xgmac_write32(mdio_stat, ®s->mdio_stat, endian); - -- ret = xgmac_wait_until_free(&bus->dev, regs); -+ ret = xgmac_wait_until_free(&bus->dev, regs, endian); - if (ret) - return ret; - - /* Set the Port and Device Addrs */ - mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr); -- out_be32(®s->mdio_ctl, mdio_ctl); -+ xgmac_write32(mdio_ctl, ®s->mdio_ctl, endian); - - /* Set the register address */ -- out_be32(®s->mdio_addr, regnum & 0xffff); -+ if (regnum & MII_ADDR_C45) { -+ xgmac_write32(regnum & 0xffff, ®s->mdio_addr, endian); - -- ret = xgmac_wait_until_free(&bus->dev, regs); -- if (ret) -- return ret; -+ ret = xgmac_wait_until_free(&bus->dev, regs, endian); -+ if (ret) -+ return ret; -+ } - - /* Initiate the read */ -- out_be32(®s->mdio_ctl, mdio_ctl | MDIO_CTL_READ); -+ xgmac_write32(mdio_ctl | MDIO_CTL_READ, ®s->mdio_ctl, endian); - -- ret = xgmac_wait_until_done(&bus->dev, regs); -+ ret = xgmac_wait_until_done(&bus->dev, regs, endian); - if (ret) - return ret; - - /* Return all Fs if nothing was there */ -- if (in_be32(®s->mdio_stat) & MDIO_STAT_RD_ER) { -+ if (xgmac_read32(®s->mdio_stat, endian) & MDIO_STAT_RD_ER) { - dev_err(&bus->dev, - "Error while reading PHY%d reg at %d.%hhu\n", - phy_id, dev_addr, regnum); - return 0xffff; - } - -- value = in_be32(®s->mdio_data) & 0xffff; -+ value = xgmac_read32(®s->mdio_data, endian) & 0xffff; - dev_dbg(&bus->dev, "read %04x\n", value); - - return value; - } - --/* Reset the MIIM registers, and wait for the bus to free */ --static int xgmac_mdio_reset(struct mii_bus *bus) --{ -- struct tgec_mdio_controller __iomem *regs = bus->priv; -- int ret; -- -- mutex_lock(&bus->mdio_lock); -- -- /* Setup the MII Mgmt clock speed */ -- out_be32(®s->mdio_stat, MDIO_STAT_CLKDIV(100)); -- -- ret = xgmac_wait_until_free(&bus->dev, regs); -- -- mutex_unlock(&bus->mdio_lock); -- -- return ret; --} -- - static int xgmac_mdio_probe(struct platform_device *pdev) - { - struct device_node *np = pdev->dev.of_node; - struct mii_bus *bus; - struct resource res; -+ struct mdio_fsl_priv *priv; - int ret; - - ret = of_address_to_resource(np, 0, &res); -@@ -205,25 +253,30 @@ static int xgmac_mdio_probe(struct platform_device *pdev) - return ret; - } - -- bus = mdiobus_alloc_size(PHY_MAX_ADDR * sizeof(int)); -+ bus = mdiobus_alloc_size(sizeof(struct mdio_fsl_priv)); - if (!bus) - return -ENOMEM; - - bus->name = "Freescale XGMAC MDIO Bus"; - bus->read = xgmac_mdio_read; - bus->write = xgmac_mdio_write; -- bus->reset = xgmac_mdio_reset; -- bus->irq = bus->priv; - bus->parent = &pdev->dev; - snprintf(bus->id, MII_BUS_ID_SIZE, "%llx", (unsigned long long)res.start); - - /* Set the PHY base address */ -- bus->priv = of_iomap(np, 0); -- if (!bus->priv) { -+ priv = bus->priv; -+ priv->mdio_base = of_iomap(np, 0); -+ if (!priv->mdio_base) { - ret = -ENOMEM; - goto err_ioremap; - } - -+ if (of_get_property(pdev->dev.of_node, -+ "little-endian", NULL)) -+ priv->is_little_endian = true; -+ else -+ priv->is_little_endian = false; -+ - ret = of_mdiobus_register(bus, np); - if (ret) { - dev_err(&pdev->dev, "cannot register MDIO bus\n"); -@@ -235,7 +288,7 @@ static int xgmac_mdio_probe(struct platform_device *pdev) - return 0; - - err_registration: -- iounmap(bus->priv); -+ iounmap(priv->mdio_base); - - err_ioremap: - mdiobus_free(bus); -@@ -254,10 +307,13 @@ static int xgmac_mdio_remove(struct platform_device *pdev) - return 0; - } - --static struct of_device_id xgmac_mdio_match[] = { -+static const struct of_device_id xgmac_mdio_match[] = { - { - .compatible = "fsl,fman-xmdio", - }, -+ { -+ .compatible = "fsl,fman-memac-mdio", -+ }, - {}, - }; - MODULE_DEVICE_TABLE(of, xgmac_mdio_match); -diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c -index 051ea94..2a04baa 100644 ---- a/drivers/net/ethernet/intel/igb/e1000_82575.c -+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c -@@ -286,6 +286,9 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw) - phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580; - phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; - break; -+ case BCM54616_E_PHY_ID: -+ phy->type = e1000_phy_bcm54616; -+ break; - default: - ret_val = -E1000_ERR_PHY; - goto out; -@@ -1550,6 +1553,7 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) - case e1000_i350: - case e1000_i210: - case e1000_i211: -+ case e1000_i354: - phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT); - phpm_reg &= ~E1000_82580_PM_GO_LINKD; - wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg); -@@ -1593,6 +1597,8 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) - case e1000_phy_82580: - ret_val = igb_copper_link_setup_82580(hw); - break; -+ case e1000_phy_bcm54616: -+ break; - default: - ret_val = -E1000_ERR_PHY; - break; -diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h -index 217f813..5322fbf 100644 ---- a/drivers/net/ethernet/intel/igb/e1000_defines.h -+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h -@@ -860,6 +860,7 @@ - #define M88_VENDOR 0x0141 - #define I210_I_PHY_ID 0x01410C00 - #define M88E1543_E_PHY_ID 0x01410EA0 -+#define BCM54616_E_PHY_ID 0x3625D10 - - /* M88E1000 Specific Registers */ - #define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ -diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h -index 2003b37..d82c96b 100644 ---- a/drivers/net/ethernet/intel/igb/e1000_hw.h -+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h -@@ -128,6 +128,7 @@ enum e1000_phy_type { - e1000_phy_ife, - e1000_phy_82580, - e1000_phy_i210, -+ e1000_phy_bcm54616, - }; - - enum e1000_bus_type { -diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c -index e0f3664..013c1f1 100644 ---- a/drivers/net/ethernet/intel/igb/igb_main.c -+++ b/drivers/net/ethernet/intel/igb/igb_main.c -@@ -108,6 +108,7 @@ static const struct pci_device_id igb_pci_tbl[] = { - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 }, -+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII), board_82575 }, - /* required last entry */ - {0, } - }; -diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig -index 75472cf..cdc9f8a 100644 ---- a/drivers/net/phy/Kconfig -+++ b/drivers/net/phy/Kconfig -@@ -14,6 +14,11 @@ if PHYLIB - - comment "MII PHY device drivers" - -+config AQUANTIA_PHY -+ tristate "Drivers for the Aquantia PHYs" -+ ---help--- -+ Currently supports the Aquantia AQ1202, AQ2104, AQR105, AQR405 -+ - config AT803X_PHY - tristate "Drivers for Atheros AT803X PHYs" - ---help--- -@@ -60,6 +65,11 @@ config VITESSE_PHY - ---help--- - Currently supports the vsc8244 - -+config TERANETICS_PHY -+ tristate "Drivers for the Teranetics PHYs" -+ ---help--- -+ Currently supports the Teranetics TN2020 -+ - config SMSC_PHY - tristate "Drivers for SMSC PHYs" - ---help--- -@@ -119,8 +129,8 @@ config MICREL_PHY - Supports the KSZ9021, VSC8201, KS8001 PHYs. - - config FIXED_PHY -- bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs" -- depends on PHYLIB=y -+ tristate "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs" -+ depends on PHYLIB - ---help--- - Adds the platform "fixed" MDIO Bus to cover the boards that use - PHYs that are not connected to the real MDIO bus. -@@ -202,6 +212,11 @@ config MDIO_BUS_MUX_MMIOREG - the FPGA's registers. - - Currently, only 8-bit registers are supported. -+config FSL_10GBASE_KR -+ tristate "Support for 10GBASE-KR on Freescale XFI interface" -+ depends on OF_MDIO -+ help -+ This module provides a driver for Freescale XFI's 10GBASE-KR. - - config MDIO_BCM_UNIMAC - tristate "Broadcom UniMAC MDIO bus controller" -diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile -index eb3b18b..8ad4ac6 100644 ---- a/drivers/net/phy/Makefile -+++ b/drivers/net/phy/Makefile -@@ -3,12 +3,14 @@ - libphy-objs := phy.o phy_device.o mdio_bus.o - - obj-$(CONFIG_PHYLIB) += libphy.o -+obj-$(CONFIG_AQUANTIA_PHY) += aquantia.o - obj-$(CONFIG_MARVELL_PHY) += marvell.o - obj-$(CONFIG_DAVICOM_PHY) += davicom.o - obj-$(CONFIG_CICADA_PHY) += cicada.o - obj-$(CONFIG_LXT_PHY) += lxt.o - obj-$(CONFIG_QSEMI_PHY) += qsemi.o - obj-$(CONFIG_SMSC_PHY) += smsc.o -+obj-$(CONFIG_TERANETICS_PHY) += teranetics.o - obj-$(CONFIG_VITESSE_PHY) += vitesse.o - obj-$(CONFIG_BROADCOM_PHY) += broadcom.o - obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o -@@ -17,7 +19,7 @@ obj-$(CONFIG_BCM87XX_PHY) += bcm87xx.o - obj-$(CONFIG_ICPLUS_PHY) += icplus.o - obj-$(CONFIG_REALTEK_PHY) += realtek.o - obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o --obj-$(CONFIG_FIXED_PHY) += fixed.o -+obj-$(CONFIG_FIXED_PHY) += fixed_phy.o - obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o - obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o - obj-$(CONFIG_NATIONAL_PHY) += national.o -@@ -31,6 +33,7 @@ obj-$(CONFIG_AMD_PHY) += amd.o - obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o - obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o - obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o -+obj-$(CONFIG_FSL_10GBASE_KR) += fsl_10gkr.o - obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o - obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o - obj-$(CONFIG_AMD_XGBE_PHY) += amd-xgbe-phy.o -diff --git a/drivers/net/phy/aquantia.c b/drivers/net/phy/aquantia.c -new file mode 100644 -index 0000000..d6111af ---- /dev/null -+++ b/drivers/net/phy/aquantia.c -@@ -0,0 +1,201 @@ -+/* -+ * Driver for Aquantia PHY -+ * -+ * Author: Shaohui Xie -+ * -+ * Copyright 2015 Freescale Semiconductor, Inc. -+ * -+ * This file is licensed under the terms of the GNU General Public License -+ * version 2. This program is licensed "as is" without any warranty of any -+ * kind, whether express or implied. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define PHY_ID_AQ1202 0x03a1b445 -+#define PHY_ID_AQ2104 0x03a1b460 -+#define PHY_ID_AQR105 0x03a1b4a2 -+#define PHY_ID_AQR405 0x03a1b4b0 -+ -+#define PHY_AQUANTIA_FEATURES (SUPPORTED_10000baseT_Full | \ -+ SUPPORTED_1000baseT_Full | \ -+ SUPPORTED_100baseT_Full | \ -+ PHY_DEFAULT_FEATURES) -+ -+static int aquantia_config_aneg(struct phy_device *phydev) -+{ -+ phydev->supported = PHY_AQUANTIA_FEATURES; -+ phydev->advertising = phydev->supported; -+ -+ return 0; -+} -+ -+static int aquantia_aneg_done(struct phy_device *phydev) -+{ -+ int reg; -+ -+ reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); -+ return (reg < 0) ? reg : (reg & BMSR_ANEGCOMPLETE); -+} -+ -+static int aquantia_config_intr(struct phy_device *phydev) -+{ -+ int err; -+ -+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { -+ err = phy_write_mmd(phydev, MDIO_MMD_AN, 0xd401, 1); -+ if (err < 0) -+ return err; -+ -+ err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff00, 1); -+ if (err < 0) -+ return err; -+ -+ err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff01, 0x1001); -+ } else { -+ err = phy_write_mmd(phydev, MDIO_MMD_AN, 0xd401, 0); -+ if (err < 0) -+ return err; -+ -+ err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff00, 0); -+ if (err < 0) -+ return err; -+ -+ err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff01, 0); -+ } -+ -+ return err; -+} -+ -+static int aquantia_ack_interrupt(struct phy_device *phydev) -+{ -+ int reg; -+ -+ reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xcc01); -+ return (reg < 0) ? reg : 0; -+} -+ -+static int aquantia_read_status(struct phy_device *phydev) -+{ -+ int reg; -+ -+ reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); -+ reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); -+ if (reg & MDIO_STAT1_LSTATUS) -+ phydev->link = 1; -+ else -+ phydev->link = 0; -+ -+ reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xc800); -+ mdelay(10); -+ reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xc800); -+ -+ switch (reg) { -+ case 0x9: -+ phydev->speed = SPEED_2500; -+ break; -+ case 0x5: -+ phydev->speed = SPEED_1000; -+ break; -+ case 0x3: -+ phydev->speed = SPEED_100; -+ break; -+ case 0x7: -+ default: -+ phydev->speed = SPEED_10000; -+ break; -+ } -+ phydev->duplex = DUPLEX_FULL; -+ -+ return 0; -+} -+ -+static struct phy_driver aquantia_driver[] = { -+{ -+ .phy_id = PHY_ID_AQ1202, -+ .phy_id_mask = 0xfffffff0, -+ .name = "Aquantia AQ1202", -+ .features = PHY_AQUANTIA_FEATURES, -+ .flags = PHY_HAS_INTERRUPT, -+ .aneg_done = aquantia_aneg_done, -+ .config_aneg = aquantia_config_aneg, -+ .config_intr = aquantia_config_intr, -+ .ack_interrupt = aquantia_ack_interrupt, -+ .read_status = aquantia_read_status, -+ .driver = { .owner = THIS_MODULE,}, -+}, -+{ -+ .phy_id = PHY_ID_AQ2104, -+ .phy_id_mask = 0xfffffff0, -+ .name = "Aquantia AQ2104", -+ .features = PHY_AQUANTIA_FEATURES, -+ .flags = PHY_HAS_INTERRUPT, -+ .aneg_done = aquantia_aneg_done, -+ .config_aneg = aquantia_config_aneg, -+ .config_intr = aquantia_config_intr, -+ .ack_interrupt = aquantia_ack_interrupt, -+ .read_status = aquantia_read_status, -+ .driver = { .owner = THIS_MODULE,}, -+}, -+{ -+ .phy_id = PHY_ID_AQR105, -+ .phy_id_mask = 0xfffffff0, -+ .name = "Aquantia AQR105", -+ .features = PHY_AQUANTIA_FEATURES, -+ .flags = PHY_HAS_INTERRUPT, -+ .aneg_done = aquantia_aneg_done, -+ .config_aneg = aquantia_config_aneg, -+ .config_intr = aquantia_config_intr, -+ .ack_interrupt = aquantia_ack_interrupt, -+ .read_status = aquantia_read_status, -+ .driver = { .owner = THIS_MODULE,}, -+}, -+{ -+ .phy_id = PHY_ID_AQR405, -+ .phy_id_mask = 0xfffffff0, -+ .name = "Aquantia AQR405", -+ .features = PHY_AQUANTIA_FEATURES, -+ .flags = PHY_HAS_INTERRUPT, -+ .aneg_done = aquantia_aneg_done, -+ .config_aneg = aquantia_config_aneg, -+ .config_intr = aquantia_config_intr, -+ .ack_interrupt = aquantia_ack_interrupt, -+ .read_status = aquantia_read_status, -+ .driver = { .owner = THIS_MODULE,}, -+}, -+}; -+ -+static int __init aquantia_init(void) -+{ -+ return phy_drivers_register(aquantia_driver, -+ ARRAY_SIZE(aquantia_driver)); -+} -+ -+static void __exit aquantia_exit(void) -+{ -+ return phy_drivers_unregister(aquantia_driver, -+ ARRAY_SIZE(aquantia_driver)); -+} -+ -+module_init(aquantia_init); -+module_exit(aquantia_exit); -+ -+static struct mdio_device_id __maybe_unused aquantia_tbl[] = { -+ { PHY_ID_AQ1202, 0xfffffff0 }, -+ { PHY_ID_AQ2104, 0xfffffff0 }, -+ { PHY_ID_AQR105, 0xfffffff0 }, -+ { PHY_ID_AQR405, 0xfffffff0 }, -+ { } -+}; -+ -+MODULE_DEVICE_TABLE(mdio, aquantia_tbl); -+ -+MODULE_DESCRIPTION("Aquantia PHY driver"); -+MODULE_AUTHOR("Shaohui Xie "); -+MODULE_LICENSE("GPL v2"); -diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c -index fdc1b41..a4f0886 100644 ---- a/drivers/net/phy/at803x.c -+++ b/drivers/net/phy/at803x.c -@@ -307,6 +307,8 @@ static struct phy_driver at803x_driver[] = { - .flags = PHY_HAS_INTERRUPT, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, -+ .ack_interrupt = at803x_ack_interrupt, -+ .config_intr = at803x_config_intr, - .driver = { - .owner = THIS_MODULE, - }, -@@ -326,6 +328,8 @@ static struct phy_driver at803x_driver[] = { - .flags = PHY_HAS_INTERRUPT, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, -+ .ack_interrupt = at803x_ack_interrupt, -+ .config_intr = at803x_config_intr, - .driver = { - .owner = THIS_MODULE, - }, -diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c -deleted file mode 100644 -index 47872ca..0000000 ---- a/drivers/net/phy/fixed.c -+++ /dev/null -@@ -1,336 +0,0 @@ --/* -- * Fixed MDIO bus (MDIO bus emulation with fixed PHYs) -- * -- * Author: Vitaly Bordug -- * Anton Vorontsov -- * -- * Copyright (c) 2006-2007 MontaVista Software, Inc. -- * -- * This program is free software; you can redistribute it and/or modify it -- * under the terms of the GNU General Public License as published by the -- * Free Software Foundation; either version 2 of the License, or (at your -- * option) any later version. -- */ -- --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include -- --#define MII_REGS_NUM 29 -- --struct fixed_mdio_bus { -- int irqs[PHY_MAX_ADDR]; -- struct mii_bus *mii_bus; -- struct list_head phys; --}; -- --struct fixed_phy { -- int addr; -- u16 regs[MII_REGS_NUM]; -- struct phy_device *phydev; -- struct fixed_phy_status status; -- int (*link_update)(struct net_device *, struct fixed_phy_status *); -- struct list_head node; --}; -- --static struct platform_device *pdev; --static struct fixed_mdio_bus platform_fmb = { -- .phys = LIST_HEAD_INIT(platform_fmb.phys), --}; -- --static int fixed_phy_update_regs(struct fixed_phy *fp) --{ -- u16 bmsr = BMSR_ANEGCAPABLE; -- u16 bmcr = 0; -- u16 lpagb = 0; -- u16 lpa = 0; -- -- if (fp->status.duplex) { -- bmcr |= BMCR_FULLDPLX; -- -- switch (fp->status.speed) { -- case 1000: -- bmsr |= BMSR_ESTATEN; -- bmcr |= BMCR_SPEED1000; -- lpagb |= LPA_1000FULL; -- break; -- case 100: -- bmsr |= BMSR_100FULL; -- bmcr |= BMCR_SPEED100; -- lpa |= LPA_100FULL; -- break; -- case 10: -- bmsr |= BMSR_10FULL; -- lpa |= LPA_10FULL; -- break; -- default: -- pr_warn("fixed phy: unknown speed\n"); -- return -EINVAL; -- } -- } else { -- switch (fp->status.speed) { -- case 1000: -- bmsr |= BMSR_ESTATEN; -- bmcr |= BMCR_SPEED1000; -- lpagb |= LPA_1000HALF; -- break; -- case 100: -- bmsr |= BMSR_100HALF; -- bmcr |= BMCR_SPEED100; -- lpa |= LPA_100HALF; -- break; -- case 10: -- bmsr |= BMSR_10HALF; -- lpa |= LPA_10HALF; -- break; -- default: -- pr_warn("fixed phy: unknown speed\n"); -- return -EINVAL; -- } -- } -- -- if (fp->status.link) -- bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE; -- -- if (fp->status.pause) -- lpa |= LPA_PAUSE_CAP; -- -- if (fp->status.asym_pause) -- lpa |= LPA_PAUSE_ASYM; -- -- fp->regs[MII_PHYSID1] = 0; -- fp->regs[MII_PHYSID2] = 0; -- -- fp->regs[MII_BMSR] = bmsr; -- fp->regs[MII_BMCR] = bmcr; -- fp->regs[MII_LPA] = lpa; -- fp->regs[MII_STAT1000] = lpagb; -- -- return 0; --} -- --static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num) --{ -- struct fixed_mdio_bus *fmb = bus->priv; -- struct fixed_phy *fp; -- -- if (reg_num >= MII_REGS_NUM) -- return -1; -- -- /* We do not support emulating Clause 45 over Clause 22 register reads -- * return an error instead of bogus data. -- */ -- switch (reg_num) { -- case MII_MMD_CTRL: -- case MII_MMD_DATA: -- return -1; -- default: -- break; -- } -- -- list_for_each_entry(fp, &fmb->phys, node) { -- if (fp->addr == phy_addr) { -- /* Issue callback if user registered it. */ -- if (fp->link_update) { -- fp->link_update(fp->phydev->attached_dev, -- &fp->status); -- fixed_phy_update_regs(fp); -- } -- return fp->regs[reg_num]; -- } -- } -- -- return 0xFFFF; --} -- --static int fixed_mdio_write(struct mii_bus *bus, int phy_addr, int reg_num, -- u16 val) --{ -- return 0; --} -- --/* -- * If something weird is required to be done with link/speed, -- * network driver is able to assign a function to implement this. -- * May be useful for PHY's that need to be software-driven. -- */ --int fixed_phy_set_link_update(struct phy_device *phydev, -- int (*link_update)(struct net_device *, -- struct fixed_phy_status *)) --{ -- struct fixed_mdio_bus *fmb = &platform_fmb; -- struct fixed_phy *fp; -- -- if (!link_update || !phydev || !phydev->bus) -- return -EINVAL; -- -- list_for_each_entry(fp, &fmb->phys, node) { -- if (fp->addr == phydev->addr) { -- fp->link_update = link_update; -- fp->phydev = phydev; -- return 0; -- } -- } -- -- return -ENOENT; --} --EXPORT_SYMBOL_GPL(fixed_phy_set_link_update); -- --int fixed_phy_add(unsigned int irq, int phy_addr, -- struct fixed_phy_status *status) --{ -- int ret; -- struct fixed_mdio_bus *fmb = &platform_fmb; -- struct fixed_phy *fp; -- -- fp = kzalloc(sizeof(*fp), GFP_KERNEL); -- if (!fp) -- return -ENOMEM; -- -- memset(fp->regs, 0xFF, sizeof(fp->regs[0]) * MII_REGS_NUM); -- -- fmb->irqs[phy_addr] = irq; -- -- fp->addr = phy_addr; -- fp->status = *status; -- -- ret = fixed_phy_update_regs(fp); -- if (ret) -- goto err_regs; -- -- list_add_tail(&fp->node, &fmb->phys); -- -- return 0; -- --err_regs: -- kfree(fp); -- return ret; --} --EXPORT_SYMBOL_GPL(fixed_phy_add); -- --void fixed_phy_del(int phy_addr) --{ -- struct fixed_mdio_bus *fmb = &platform_fmb; -- struct fixed_phy *fp, *tmp; -- -- list_for_each_entry_safe(fp, tmp, &fmb->phys, node) { -- if (fp->addr == phy_addr) { -- list_del(&fp->node); -- kfree(fp); -- return; -- } -- } --} --EXPORT_SYMBOL_GPL(fixed_phy_del); -- --static int phy_fixed_addr; --static DEFINE_SPINLOCK(phy_fixed_addr_lock); -- --struct phy_device *fixed_phy_register(unsigned int irq, -- struct fixed_phy_status *status, -- struct device_node *np) --{ -- struct fixed_mdio_bus *fmb = &platform_fmb; -- struct phy_device *phy; -- int phy_addr; -- int ret; -- -- /* Get the next available PHY address, up to PHY_MAX_ADDR */ -- spin_lock(&phy_fixed_addr_lock); -- if (phy_fixed_addr == PHY_MAX_ADDR) { -- spin_unlock(&phy_fixed_addr_lock); -- return ERR_PTR(-ENOSPC); -- } -- phy_addr = phy_fixed_addr++; -- spin_unlock(&phy_fixed_addr_lock); -- -- ret = fixed_phy_add(PHY_POLL, phy_addr, status); -- if (ret < 0) -- return ERR_PTR(ret); -- -- phy = get_phy_device(fmb->mii_bus, phy_addr, false); -- if (!phy || IS_ERR(phy)) { -- fixed_phy_del(phy_addr); -- return ERR_PTR(-EINVAL); -- } -- -- of_node_get(np); -- phy->dev.of_node = np; -- -- ret = phy_device_register(phy); -- if (ret) { -- phy_device_free(phy); -- of_node_put(np); -- fixed_phy_del(phy_addr); -- return ERR_PTR(ret); -- } -- -- return phy; --} -- --static int __init fixed_mdio_bus_init(void) --{ -- struct fixed_mdio_bus *fmb = &platform_fmb; -- int ret; -- -- pdev = platform_device_register_simple("Fixed MDIO bus", 0, NULL, 0); -- if (IS_ERR(pdev)) { -- ret = PTR_ERR(pdev); -- goto err_pdev; -- } -- -- fmb->mii_bus = mdiobus_alloc(); -- if (fmb->mii_bus == NULL) { -- ret = -ENOMEM; -- goto err_mdiobus_reg; -- } -- -- snprintf(fmb->mii_bus->id, MII_BUS_ID_SIZE, "fixed-0"); -- fmb->mii_bus->name = "Fixed MDIO Bus"; -- fmb->mii_bus->priv = fmb; -- fmb->mii_bus->parent = &pdev->dev; -- fmb->mii_bus->read = &fixed_mdio_read; -- fmb->mii_bus->write = &fixed_mdio_write; -- fmb->mii_bus->irq = fmb->irqs; -- -- ret = mdiobus_register(fmb->mii_bus); -- if (ret) -- goto err_mdiobus_alloc; -- -- return 0; -- --err_mdiobus_alloc: -- mdiobus_free(fmb->mii_bus); --err_mdiobus_reg: -- platform_device_unregister(pdev); --err_pdev: -- return ret; --} --module_init(fixed_mdio_bus_init); -- --static void __exit fixed_mdio_bus_exit(void) --{ -- struct fixed_mdio_bus *fmb = &platform_fmb; -- struct fixed_phy *fp, *tmp; -- -- mdiobus_unregister(fmb->mii_bus); -- mdiobus_free(fmb->mii_bus); -- platform_device_unregister(pdev); -- -- list_for_each_entry_safe(fp, tmp, &fmb->phys, node) { -- list_del(&fp->node); -- kfree(fp); -- } --} --module_exit(fixed_mdio_bus_exit); -- --MODULE_DESCRIPTION("Fixed MDIO bus (MDIO bus emulation with fixed PHYs)"); --MODULE_AUTHOR("Vitaly Bordug"); --MODULE_LICENSE("GPL"); -diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c -new file mode 100644 -index 0000000..88b8194 ---- /dev/null -+++ b/drivers/net/phy/fixed_phy.c -@@ -0,0 +1,370 @@ -+/* -+ * Fixed MDIO bus (MDIO bus emulation with fixed PHYs) -+ * -+ * Author: Vitaly Bordug -+ * Anton Vorontsov -+ * -+ * Copyright (c) 2006-2007 MontaVista Software, Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License as published by the -+ * Free Software Foundation; either version 2 of the License, or (at your -+ * option) any later version. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define MII_REGS_NUM 29 -+ -+struct fixed_mdio_bus { -+ int irqs[PHY_MAX_ADDR]; -+ struct mii_bus *mii_bus; -+ struct list_head phys; -+}; -+ -+struct fixed_phy { -+ int addr; -+ u16 regs[MII_REGS_NUM]; -+ struct phy_device *phydev; -+ struct fixed_phy_status status; -+ int (*link_update)(struct net_device *, struct fixed_phy_status *); -+ struct list_head node; -+}; -+ -+static struct platform_device *pdev; -+static struct fixed_mdio_bus platform_fmb = { -+ .phys = LIST_HEAD_INIT(platform_fmb.phys), -+}; -+ -+static int fixed_phy_update_regs(struct fixed_phy *fp) -+{ -+ u16 bmsr = BMSR_ANEGCAPABLE; -+ u16 bmcr = 0; -+ u16 lpagb = 0; -+ u16 lpa = 0; -+ -+ if (fp->status.duplex) { -+ bmcr |= BMCR_FULLDPLX; -+ -+ switch (fp->status.speed) { -+ case 10000: -+ break; -+ case 1000: -+ bmsr |= BMSR_ESTATEN; -+ bmcr |= BMCR_SPEED1000; -+ lpagb |= LPA_1000FULL; -+ break; -+ case 100: -+ bmsr |= BMSR_100FULL; -+ bmcr |= BMCR_SPEED100; -+ lpa |= LPA_100FULL; -+ break; -+ case 10: -+ bmsr |= BMSR_10FULL; -+ lpa |= LPA_10FULL; -+ break; -+ default: -+ pr_warn("fixed phy: unknown speed\n"); -+ return -EINVAL; -+ } -+ } else { -+ switch (fp->status.speed) { -+ case 10000: -+ break; -+ case 1000: -+ bmsr |= BMSR_ESTATEN; -+ bmcr |= BMCR_SPEED1000; -+ lpagb |= LPA_1000HALF; -+ break; -+ case 100: -+ bmsr |= BMSR_100HALF; -+ bmcr |= BMCR_SPEED100; -+ lpa |= LPA_100HALF; -+ break; -+ case 10: -+ bmsr |= BMSR_10HALF; -+ lpa |= LPA_10HALF; -+ break; -+ default: -+ pr_warn("fixed phy: unknown speed\n"); -+ return -EINVAL; -+ } -+ } -+ -+ if (fp->status.link) -+ bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE; -+ -+ if (fp->status.pause) -+ lpa |= LPA_PAUSE_CAP; -+ -+ if (fp->status.asym_pause) -+ lpa |= LPA_PAUSE_ASYM; -+ -+ fp->regs[MII_PHYSID1] = 0; -+ fp->regs[MII_PHYSID2] = 0; -+ -+ fp->regs[MII_BMSR] = bmsr; -+ fp->regs[MII_BMCR] = bmcr; -+ fp->regs[MII_LPA] = lpa; -+ fp->regs[MII_STAT1000] = lpagb; -+ -+ return 0; -+} -+ -+static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num) -+{ -+ struct fixed_mdio_bus *fmb = bus->priv; -+ struct fixed_phy *fp; -+ -+ if (reg_num >= MII_REGS_NUM) -+ return -1; -+ -+ /* We do not support emulating Clause 45 over Clause 22 register reads -+ * return an error instead of bogus data. -+ */ -+ switch (reg_num) { -+ case MII_MMD_CTRL: -+ case MII_MMD_DATA: -+ return -1; -+ default: -+ break; -+ } -+ -+ list_for_each_entry(fp, &fmb->phys, node) { -+ if (fp->addr == phy_addr) { -+ /* Issue callback if user registered it. */ -+ if (fp->link_update) { -+ fp->link_update(fp->phydev->attached_dev, -+ &fp->status); -+ fixed_phy_update_regs(fp); -+ } -+ return fp->regs[reg_num]; -+ } -+ } -+ -+ return 0xFFFF; -+} -+ -+static int fixed_mdio_write(struct mii_bus *bus, int phy_addr, int reg_num, -+ u16 val) -+{ -+ return 0; -+} -+ -+/* -+ * If something weird is required to be done with link/speed, -+ * network driver is able to assign a function to implement this. -+ * May be useful for PHY's that need to be software-driven. -+ */ -+int fixed_phy_set_link_update(struct phy_device *phydev, -+ int (*link_update)(struct net_device *, -+ struct fixed_phy_status *)) -+{ -+ struct fixed_mdio_bus *fmb = &platform_fmb; -+ struct fixed_phy *fp; -+ -+ if (!phydev || !phydev->bus) -+ return -EINVAL; -+ -+ list_for_each_entry(fp, &fmb->phys, node) { -+ if (fp->addr == phydev->addr) { -+ fp->link_update = link_update; -+ fp->phydev = phydev; -+ return 0; -+ } -+ } -+ -+ return -ENOENT; -+} -+EXPORT_SYMBOL_GPL(fixed_phy_set_link_update); -+ -+int fixed_phy_update_state(struct phy_device *phydev, -+ const struct fixed_phy_status *status, -+ const struct fixed_phy_status *changed) -+{ -+ struct fixed_mdio_bus *fmb = &platform_fmb; -+ struct fixed_phy *fp; -+ -+ if (!phydev || !phydev->bus) -+ return -EINVAL; -+ -+ list_for_each_entry(fp, &fmb->phys, node) { -+ if (fp->addr == phydev->addr) { -+#define _UPD(x) if (changed->x) \ -+ fp->status.x = status->x -+ _UPD(link); -+ _UPD(speed); -+ _UPD(duplex); -+ _UPD(pause); -+ _UPD(asym_pause); -+#undef _UPD -+ fixed_phy_update_regs(fp); -+ return 0; -+ } -+ } -+ -+ return -ENOENT; -+} -+EXPORT_SYMBOL(fixed_phy_update_state); -+ -+int fixed_phy_add(unsigned int irq, int phy_addr, -+ struct fixed_phy_status *status) -+{ -+ int ret; -+ struct fixed_mdio_bus *fmb = &platform_fmb; -+ struct fixed_phy *fp; -+ -+ fp = kzalloc(sizeof(*fp), GFP_KERNEL); -+ if (!fp) -+ return -ENOMEM; -+ -+ memset(fp->regs, 0xFF, sizeof(fp->regs[0]) * MII_REGS_NUM); -+ -+ fmb->irqs[phy_addr] = irq; -+ -+ fp->addr = phy_addr; -+ fp->status = *status; -+ -+ ret = fixed_phy_update_regs(fp); -+ if (ret) -+ goto err_regs; -+ -+ list_add_tail(&fp->node, &fmb->phys); -+ -+ return 0; -+ -+err_regs: -+ kfree(fp); -+ return ret; -+} -+EXPORT_SYMBOL_GPL(fixed_phy_add); -+ -+void fixed_phy_del(int phy_addr) -+{ -+ struct fixed_mdio_bus *fmb = &platform_fmb; -+ struct fixed_phy *fp, *tmp; -+ -+ list_for_each_entry_safe(fp, tmp, &fmb->phys, node) { -+ if (fp->addr == phy_addr) { -+ list_del(&fp->node); -+ kfree(fp); -+ return; -+ } -+ } -+} -+EXPORT_SYMBOL_GPL(fixed_phy_del); -+ -+static int phy_fixed_addr; -+static DEFINE_SPINLOCK(phy_fixed_addr_lock); -+ -+struct phy_device *fixed_phy_register(unsigned int irq, -+ struct fixed_phy_status *status, -+ struct device_node *np) -+{ -+ struct fixed_mdio_bus *fmb = &platform_fmb; -+ struct phy_device *phy; -+ int phy_addr; -+ int ret; -+ -+ /* Get the next available PHY address, up to PHY_MAX_ADDR */ -+ spin_lock(&phy_fixed_addr_lock); -+ if (phy_fixed_addr == PHY_MAX_ADDR) { -+ spin_unlock(&phy_fixed_addr_lock); -+ return ERR_PTR(-ENOSPC); -+ } -+ phy_addr = phy_fixed_addr++; -+ spin_unlock(&phy_fixed_addr_lock); -+ -+ ret = fixed_phy_add(PHY_POLL, phy_addr, status); -+ if (ret < 0) -+ return ERR_PTR(ret); -+ -+ phy = get_phy_device(fmb->mii_bus, phy_addr, false); -+ if (!phy || IS_ERR(phy)) { -+ fixed_phy_del(phy_addr); -+ return ERR_PTR(-EINVAL); -+ } -+ -+ of_node_get(np); -+ phy->dev.of_node = np; -+ -+ ret = phy_device_register(phy); -+ if (ret) { -+ phy_device_free(phy); -+ of_node_put(np); -+ fixed_phy_del(phy_addr); -+ return ERR_PTR(ret); -+ } -+ -+ return phy; -+} -+EXPORT_SYMBOL_GPL(fixed_phy_register); -+ -+static int __init fixed_mdio_bus_init(void) -+{ -+ struct fixed_mdio_bus *fmb = &platform_fmb; -+ int ret; -+ -+ pdev = platform_device_register_simple("Fixed MDIO bus", 0, NULL, 0); -+ if (IS_ERR(pdev)) { -+ ret = PTR_ERR(pdev); -+ goto err_pdev; -+ } -+ -+ fmb->mii_bus = mdiobus_alloc(); -+ if (fmb->mii_bus == NULL) { -+ ret = -ENOMEM; -+ goto err_mdiobus_reg; -+ } -+ -+ snprintf(fmb->mii_bus->id, MII_BUS_ID_SIZE, "fixed-0"); -+ fmb->mii_bus->name = "Fixed MDIO Bus"; -+ fmb->mii_bus->priv = fmb; -+ fmb->mii_bus->parent = &pdev->dev; -+ fmb->mii_bus->read = &fixed_mdio_read; -+ fmb->mii_bus->write = &fixed_mdio_write; -+ fmb->mii_bus->irq = fmb->irqs; -+ -+ ret = mdiobus_register(fmb->mii_bus); -+ if (ret) -+ goto err_mdiobus_alloc; -+ -+ return 0; -+ -+err_mdiobus_alloc: -+ mdiobus_free(fmb->mii_bus); -+err_mdiobus_reg: -+ platform_device_unregister(pdev); -+err_pdev: -+ return ret; -+} -+module_init(fixed_mdio_bus_init); -+ -+static void __exit fixed_mdio_bus_exit(void) -+{ -+ struct fixed_mdio_bus *fmb = &platform_fmb; -+ struct fixed_phy *fp, *tmp; -+ -+ mdiobus_unregister(fmb->mii_bus); -+ mdiobus_free(fmb->mii_bus); -+ platform_device_unregister(pdev); -+ -+ list_for_each_entry_safe(fp, tmp, &fmb->phys, node) { -+ list_del(&fp->node); -+ kfree(fp); -+ } -+} -+module_exit(fixed_mdio_bus_exit); -+ -+MODULE_DESCRIPTION("Fixed MDIO bus (MDIO bus emulation with fixed PHYs)"); -+MODULE_AUTHOR("Vitaly Bordug"); -+MODULE_LICENSE("GPL"); -diff --git a/drivers/net/phy/fsl_10gkr.c b/drivers/net/phy/fsl_10gkr.c -new file mode 100644 -index 0000000..3713726 ---- /dev/null -+++ b/drivers/net/phy/fsl_10gkr.c -@@ -0,0 +1,1467 @@ -+/* Freescale XFI 10GBASE-KR driver. -+ * Author: Shaohui Xie -+ * -+ * Copyright 2014 Freescale Semiconductor, Inc. -+ * -+ * Licensed under the GPL-2 or later. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define FSL_XFI_PCS_PHY_ID 0x7C000012 -+#define FSL_XFI_PCS_PHY_ID2 0x0083e400 -+ -+/* Freescale XFI PCS MMD */ -+#define FSL_XFI_PMD 0x1 -+#define FSL_XFI_PCS 0x3 -+#define FSL_XFI_AN 0x7 -+#define FSL_XFI_VS1 0x1e -+ -+/* Freescale XFI PMD registers */ -+#define FSL_XFI_PMD_CTRL 0x0 -+#define FSL_XFI_KR_PMD_CTRL 0x0096 -+#define FSL_XFI_KR_PMD_STATUS 0x0097 -+#define FSL_XFI_KR_LP_CU 0x0098 -+#define FSL_XFI_KR_LP_STATUS 0x0099 -+#define FSL_XFI_KR_LD_CU 0x009a -+#define FSL_XFI_KR_LD_STATUS 0x009b -+ -+/* PMD define */ -+#define PMD_RESET 0x1 -+#define PMD_STATUS_SUP_STAT 0x4 -+#define PMD_STATUS_FRAME_LOCK 0x2 -+#define TRAIN_EN 0x3 -+#define TRAIN_DISABLE 0x1 -+#define RX_STAT 0x1 -+ -+/* Freescale XFI PCS registers */ -+#define FSL_XFI_PCS_CTRL 0x0 -+#define FSL_XFI_PCS_STATUS 0x1 -+ -+/* Freescale XFI Auto-Negotiation Registers */ -+#define FSL_XFI_AN_CTRL 0x0000 -+#define FSL_XFI_LNK_STATUS 0x0001 -+#define FSL_XFI_AN_AD_1 0x0011 -+#define FSL_XFI_BP_STATUS 0x0030 -+ -+#define XFI_AN_AD1 0x85 -+#define XF_AN_RESTART 0x1200 -+#define XFI_AN_LNK_STAT_UP 0x4 -+ -+/* Freescale XFI Vendor-Specific 1 Registers */ -+#define FSL_XFI_PCS_INTR_EVENT 0x0002 -+#define FSL_XFI_PCS_INTR_MASK 0x0003 -+#define FSL_XFI_AN_INTR_EVENT 0x0004 -+#define FSL_XFI_AN_INTR_MASK 0x0005 -+#define FSL_XFI_LT_INTR_EVENT 0x0006 -+#define FSL_XFI_LT_INTR_MASK 0x0007 -+ -+/* C(-1) */ -+#define BIN_M1 0 -+/* C(1) */ -+#define BIN_LONG 1 -+#define BIN_M1_SEL 6 -+#define BIN_Long_SEL 7 -+#define CDR_SEL_MASK 0x00070000 -+#define BIN_SNAPSHOT_NUM 5 -+#define BIN_M1_THRESHOLD 3 -+#define BIN_LONG_THRESHOLD 2 -+ -+#define PRE_COE_MASK 0x03c00000 -+#define POST_COE_MASK 0x001f0000 -+#define ZERO_COE_MASK 0x00003f00 -+#define PRE_COE_SHIFT 22 -+#define POST_COE_SHIFT 16 -+#define ZERO_COE_SHIFT 8 -+ -+#define PRE_COE_MAX 0x0 -+#define PRE_COE_MIN 0x8 -+#define POST_COE_MAX 0x0 -+#define POST_COE_MIN 0x10 -+#define ZERO_COE_MAX 0x30 -+#define ZERO_COE_MIN 0x0 -+ -+#define TECR0_INIT 0x24200000 -+#define RATIO_PREQ 0x3 -+#define RATIO_PST1Q 0xd -+#define RATIO_EQ 0x20 -+ -+#define GCR1_CTL_SNP_START_MASK 0x00002000 -+#define GCR1_SNP_START_MASK 0x00000040 -+#define RECR1_SNP_DONE_MASK 0x00000004 -+#define RECR1_CTL_SNP_DONE_MASK 0x00000002 -+#define TCSR1_SNP_DATA_MASK 0x0000ffc0 -+#define TCSR1_SNP_DATA_SHIFT 6 -+#define TCSR1_EQ_SNPBIN_SIGN_MASK 0x100 -+ -+#define RECR1_GAINK2_MASK 0x0f000000 -+#define RECR1_GAINK2_SHIFT 24 -+#define RECR1_GAINK3_MASK 0x000f0000 -+#define RECR1_GAINK3_SHIFT 16 -+#define RECR1_OFFSET_MASK 0x00003f80 -+#define RECR1_OFFSET_SHIFT 7 -+#define RECR1_BLW_MASK 0x00000f80 -+#define RECR1_BLW_SHIFT 7 -+#define EYE_CTRL_SHIFT 12 -+#define BASE_WAND_SHIFT 10 -+ -+#define XGKR_TIMEOUT 1050 -+#define AN_ABILITY_MASK 0x9 -+#define AN_10GKR_MASK 0x8 -+#define LT_10GKR_MASK 0x4 -+#define TRAIN_FAIL 0x8 -+ -+#define INCREMENT 1 -+#define DECREMENT 2 -+#define TIMEOUT_LONG 3 -+#define TIMEOUT_M1 3 -+ -+#define RX_READY_MASK 0x8000 -+#define PRESET_MASK 0x2000 -+#define INIT_MASK 0x1000 -+#define COP1_MASK 0x30 -+#define COP1_SHIFT 4 -+#define COZ_MASK 0xc -+#define COZ_SHIFT 2 -+#define COM1_MASK 0x3 -+#define COM1_SHIFT 0 -+#define REQUEST_MASK 0x3f -+#define LD_ALL_MASK (PRESET_MASK | INIT_MASK | \ -+ COP1_MASK | COZ_MASK | COM1_MASK) -+ -+#define FSL_SERDES_INSTANCE1_BASE 0xffe0ea000 -+#define FSL_SERDES_INSTANCE2_BASE 0xffe0eb000 -+#define FSL_LANE_A_BASE 0x800 -+#define FSL_LANE_B_BASE 0x840 -+#define FSL_LANE_C_BASE 0x880 -+#define FSL_LANE_D_BASE 0x8C0 -+#define FSL_LANE_E_BASE 0x900 -+#define FSL_LANE_F_BASE 0x940 -+#define FSL_LANE_G_BASE 0x980 -+#define FSL_LANE_H_BASE 0x9C0 -+#define GCR0_RESET_MASK 0x600000 -+ -+#define NEW_ALGORITHM_TRAIN_TX -+#ifdef NEW_ALGORITHM_TRAIN_TX -+#define FORCE_INC_COP1_NUMBER 0 -+#define FORCE_INC_COM1_NUMBER 1 -+#endif -+ -+enum fsl_xgkr_driver { -+ FSL_XGKR_REV1, -+ FSL_XGKR_REV2, -+ FSL_XGKR_INV -+}; -+ -+static struct phy_driver fsl_xgkr_driver[FSL_XGKR_INV]; -+ -+enum coe_filed { -+ COE_COP1, -+ COE_COZ, -+ COE_COM -+}; -+ -+enum coe_update { -+ COE_NOTUPDATED, -+ COE_UPDATED, -+ COE_MIN, -+ COE_MAX, -+ COE_INV -+}; -+ -+enum serdes_inst { -+ SERDES_1, -+ SERDES_2, -+ SERDES_MAX -+}; -+ -+enum lane_inst { -+ LANE_A, -+ LANE_B, -+ LANE_C, -+ LANE_D, -+ LANE_E, -+ LANE_F, -+ LANE_G, -+ LANE_H, -+ LANE_MAX -+}; -+ -+struct serdes_map { -+ const char *serdes_name; -+ unsigned long serdes_base; -+}; -+ -+struct lane_map { -+ const char *lane_name; -+ unsigned long lane_base; -+}; -+ -+const struct serdes_map s_map[SERDES_MAX] = { -+ {"serdes-1", FSL_SERDES_INSTANCE1_BASE}, -+ {"serdes-2", FSL_SERDES_INSTANCE2_BASE} -+}; -+ -+const struct lane_map l_map[LANE_MAX] = { -+ {"lane-a", FSL_LANE_A_BASE}, -+ {"lane-b", FSL_LANE_B_BASE}, -+ {"lane-c", FSL_LANE_C_BASE}, -+ {"lane-d", FSL_LANE_D_BASE}, -+ {"lane-e", FSL_LANE_E_BASE}, -+ {"lane-f", FSL_LANE_F_BASE}, -+ {"lane-g", FSL_LANE_G_BASE}, -+ {"lane-h", FSL_LANE_H_BASE} -+}; -+ -+struct per_lane_ctrl_status { -+ __be32 gcr0; /* 0x.000 - General Control Register 0 */ -+ __be32 gcr1; /* 0x.004 - General Control Register 1 */ -+ __be32 gcr2; /* 0x.008 - General Control Register 2 */ -+ __be32 resv1; /* 0x.00C - Reserved */ -+ __be32 recr0; /* 0x.010 - Receive Equalization Control Register 0 */ -+ __be32 recr1; /* 0x.014 - Receive Equalization Control Register 1 */ -+ __be32 tecr0; /* 0x.018 - Transmit Equalization Control Register 0 */ -+ __be32 resv2; /* 0x.01C - Reserved */ -+ __be32 tlcr0; /* 0x.020 - TTL Control Register 0 */ -+ __be32 tlcr1; /* 0x.024 - TTL Control Register 1 */ -+ __be32 tlcr2; /* 0x.028 - TTL Control Register 2 */ -+ __be32 tlcr3; /* 0x.02C - TTL Control Register 3 */ -+ __be32 tcsr0; /* 0x.030 - Test Control/Status Register 0 */ -+ __be32 tcsr1; /* 0x.034 - Test Control/Status Register 1 */ -+ __be32 tcsr2; /* 0x.038 - Test Control/Status Register 2 */ -+ __be32 tcsr3; /* 0x.03C - Test Control/Status Register 3 */ -+}; -+ -+struct training_state_machine { -+ bool bin_m1_late_early; -+ bool bin_long_late_early; -+ bool bin_m1_stop; -+ bool bin_long_stop; -+ bool tx_complete; -+ bool an_ok; -+ bool link_up; -+ bool running; -+ bool sent_init; -+ int m1_min_max_cnt; -+ int long_min_max_cnt; -+#ifdef NEW_ALGORITHM_TRAIN_TX -+ int pre_inc; -+ int post_inc; -+#endif -+}; -+ -+struct fsl_xgkr_inst { -+ void *reg_base; -+ struct mii_bus *bus; -+ struct phy_device *phydev; -+ struct training_state_machine t_s_m; -+ u32 ld_update; -+ u32 ld_status; -+ u32 ratio_preq; -+ u32 ratio_pst1q; -+ u32 adpt_eq; -+}; -+ -+struct fsl_xgkr_wk { -+ struct work_struct xgkr_wk; -+ struct list_head xgkr_list; -+ struct fsl_xgkr_inst *xgkr_inst; -+}; -+ -+LIST_HEAD(fsl_xgkr_list); -+ -+static struct timer_list xgkr_timer; -+static int fire_timer; -+static struct workqueue_struct *xgkr_wq; -+ -+static void init_state_machine(struct training_state_machine *s_m) -+{ -+ s_m->bin_m1_late_early = true; -+ s_m->bin_long_late_early = false; -+ s_m->bin_m1_stop = false; -+ s_m->bin_long_stop = false; -+ s_m->tx_complete = false; -+ s_m->an_ok = false; -+ s_m->link_up = false; -+ s_m->running = false; -+ s_m->sent_init = false; -+ s_m->m1_min_max_cnt = 0; -+ s_m->long_min_max_cnt = 0; -+#ifdef NEW_ALGORITHM_TRAIN_TX -+ s_m->pre_inc = FORCE_INC_COM1_NUMBER; -+ s_m->post_inc = FORCE_INC_COP1_NUMBER; -+#endif -+} -+ -+void tune_tecr0(struct fsl_xgkr_inst *inst) -+{ -+ struct per_lane_ctrl_status *reg_base; -+ u32 val; -+ -+ reg_base = (struct per_lane_ctrl_status *)inst->reg_base; -+ -+ val = TECR0_INIT | -+ inst->adpt_eq << ZERO_COE_SHIFT | -+ inst->ratio_preq << PRE_COE_SHIFT | -+ inst->ratio_pst1q << POST_COE_SHIFT; -+ -+ /* reset the lane */ -+ iowrite32be(ioread32be(®_base->gcr0) & ~GCR0_RESET_MASK, -+ ®_base->gcr0); -+ udelay(1); -+ iowrite32be(val, ®_base->tecr0); -+ udelay(1); -+ /* unreset the lane */ -+ iowrite32be(ioread32be(®_base->gcr0) | GCR0_RESET_MASK, -+ ®_base->gcr0); -+ udelay(1); -+} -+ -+static void start_lt(struct phy_device *phydev) -+{ -+ phy_write_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_PMD_CTRL, TRAIN_EN); -+} -+ -+static void stop_lt(struct phy_device *phydev) -+{ -+ phy_write_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_PMD_CTRL, TRAIN_DISABLE); -+} -+ -+static void reset_gcr0(struct fsl_xgkr_inst *inst) -+{ -+ struct per_lane_ctrl_status *reg_base; -+ -+ reg_base = (struct per_lane_ctrl_status *)inst->reg_base; -+ -+ iowrite32be(ioread32be(®_base->gcr0) & ~GCR0_RESET_MASK, -+ ®_base->gcr0); -+ udelay(1); -+ iowrite32be(ioread32be(®_base->gcr0) | GCR0_RESET_MASK, -+ ®_base->gcr0); -+ udelay(1); -+} -+ -+static void reset_lt(struct phy_device *phydev) -+{ -+ phy_write_mmd(phydev, FSL_XFI_PMD, FSL_XFI_PMD_CTRL, PMD_RESET); -+ phy_write_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_PMD_CTRL, TRAIN_DISABLE); -+ phy_write_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_LD_CU, 0); -+ phy_write_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_LD_STATUS, 0); -+ phy_write_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_PMD_STATUS, 0); -+ phy_write_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_LP_CU, 0); -+ phy_write_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_LP_STATUS, 0); -+} -+ -+static void start_an(struct phy_device *phydev) -+{ -+ reset_lt(phydev); -+ phy_write_mmd(phydev, FSL_XFI_AN, FSL_XFI_AN_AD_1, XFI_AN_AD1); -+ phy_write_mmd(phydev, FSL_XFI_AN, FSL_XFI_AN_CTRL, XF_AN_RESTART); -+} -+ -+static void ld_coe_status(struct fsl_xgkr_inst *inst) -+{ -+ phy_write_mmd(inst->phydev, FSL_XFI_PMD, -+ FSL_XFI_KR_LD_STATUS, inst->ld_status); -+} -+ -+static void ld_coe_update(struct fsl_xgkr_inst *inst) -+{ -+ phy_write_mmd(inst->phydev, FSL_XFI_PMD, -+ FSL_XFI_KR_LD_CU, inst->ld_update); -+} -+ -+static void init_inst(struct fsl_xgkr_inst *inst, int reset) -+{ -+ if (reset) { -+ inst->ratio_preq = RATIO_PREQ; -+ inst->ratio_pst1q = RATIO_PST1Q; -+ inst->adpt_eq = RATIO_EQ; -+ tune_tecr0(inst); -+ } -+ -+ inst->ld_status &= RX_READY_MASK; -+ ld_coe_status(inst); -+ -+ /* init state machine */ -+ init_state_machine(&inst->t_s_m); -+ -+ inst->ld_update = 0; -+ ld_coe_update(inst); -+ -+ inst->ld_status &= ~RX_READY_MASK; -+ ld_coe_status(inst); -+} -+ -+#ifdef NEW_ALGORITHM_TRAIN_TX -+static int get_median_gaink2(u32 *reg) -+{ -+ int gaink2_snap_shot[BIN_SNAPSHOT_NUM]; -+ u32 rx_eq_snp; -+ struct per_lane_ctrl_status *reg_base; -+ int timeout; -+ int i, j, tmp, pos; -+ -+ reg_base = (struct per_lane_ctrl_status *)reg; -+ -+ for (i = 0; i < BIN_SNAPSHOT_NUM; i++) { -+ /* wait RECR1_CTL_SNP_DONE_MASK has cleared */ -+ timeout = 100; -+ while (ioread32be(®_base->recr1) & -+ RECR1_CTL_SNP_DONE_MASK) { -+ udelay(1); -+ timeout--; -+ if (timeout == 0) -+ break; -+ } -+ -+ /* start snap shot */ -+ iowrite32be((ioread32be(®_base->gcr1) | -+ GCR1_CTL_SNP_START_MASK), -+ ®_base->gcr1); -+ -+ /* wait for SNP done */ -+ timeout = 100; -+ while (!(ioread32be(®_base->recr1) & -+ RECR1_CTL_SNP_DONE_MASK)) { -+ udelay(1); -+ timeout--; -+ if (timeout == 0) -+ break; -+ } -+ -+ /* read and save the snap shot */ -+ rx_eq_snp = ioread32be(®_base->recr1); -+ gaink2_snap_shot[i] = (rx_eq_snp & RECR1_GAINK2_MASK) >> -+ RECR1_GAINK2_SHIFT; -+ -+ /* terminate the snap shot by setting GCR1[REQ_CTL_SNP] */ -+ iowrite32be((ioread32be(®_base->gcr1) & -+ ~GCR1_CTL_SNP_START_MASK), -+ ®_base->gcr1); -+ } -+ -+ /* get median of the 5 snap shot */ -+ for (i = 0; i < BIN_SNAPSHOT_NUM - 1; i++) { -+ tmp = gaink2_snap_shot[i]; -+ pos = i; -+ for (j = i + 1; j < BIN_SNAPSHOT_NUM; j++) { -+ if (gaink2_snap_shot[j] < tmp) { -+ tmp = gaink2_snap_shot[j]; -+ pos = j; -+ } -+ } -+ -+ gaink2_snap_shot[pos] = gaink2_snap_shot[i]; -+ gaink2_snap_shot[i] = tmp; -+ } -+ -+ return gaink2_snap_shot[2]; -+} -+#endif -+ -+static bool is_bin_early(int bin_sel, void __iomem *reg) -+{ -+ bool early = false; -+ int bin_snap_shot[BIN_SNAPSHOT_NUM]; -+ int i, negative_count = 0; -+ struct per_lane_ctrl_status *reg_base; -+ int timeout; -+ -+ reg_base = (struct per_lane_ctrl_status *)reg; -+ -+ for (i = 0; i < BIN_SNAPSHOT_NUM; i++) { -+ /* wait RECR1_SNP_DONE_MASK has cleared */ -+ timeout = 100; -+ while ((ioread32be(®_base->recr1) & RECR1_SNP_DONE_MASK)) { -+ udelay(1); -+ timeout--; -+ if (timeout == 0) -+ break; -+ } -+ -+ /* set TCSR1[CDR_SEL] to BinM1/BinLong */ -+ if (bin_sel == BIN_M1) { -+ iowrite32be((ioread32be(®_base->tcsr1) & -+ ~CDR_SEL_MASK) | BIN_M1_SEL, -+ ®_base->tcsr1); -+ } else { -+ iowrite32be((ioread32be(®_base->tcsr1) & -+ ~CDR_SEL_MASK) | BIN_Long_SEL, -+ ®_base->tcsr1); -+ } -+ -+ /* start snap shot */ -+ iowrite32be(ioread32be(®_base->gcr1) | GCR1_SNP_START_MASK, -+ ®_base->gcr1); -+ -+ /* wait for SNP done */ -+ timeout = 100; -+ while (!(ioread32be(®_base->recr1) & RECR1_SNP_DONE_MASK)) { -+ udelay(1); -+ timeout--; -+ if (timeout == 0) -+ break; -+ } -+ -+ /* read and save the snap shot */ -+ bin_snap_shot[i] = (ioread32be(®_base->tcsr1) & -+ TCSR1_SNP_DATA_MASK) >> TCSR1_SNP_DATA_SHIFT; -+ if (bin_snap_shot[i] & TCSR1_EQ_SNPBIN_SIGN_MASK) -+ negative_count++; -+ -+ /* terminate the snap shot by setting GCR1[REQ_CTL_SNP] */ -+ iowrite32be(ioread32be(®_base->gcr1) & ~GCR1_SNP_START_MASK, -+ ®_base->gcr1); -+ } -+ -+ if (((bin_sel == BIN_M1) && negative_count > BIN_M1_THRESHOLD) || -+ ((bin_sel == BIN_LONG && negative_count > BIN_LONG_THRESHOLD))) { -+ early = true; -+ } -+ -+ return early; -+} -+ -+static void train_tx(struct fsl_xgkr_inst *inst) -+{ -+ struct phy_device *phydev = inst->phydev; -+ struct training_state_machine *s_m = &inst->t_s_m; -+ bool bin_m1_early, bin_long_early; -+ u32 lp_status, old_ld_update; -+ u32 status_cop1, status_coz, status_com1; -+ u32 req_cop1, req_coz, req_com1, req_preset, req_init; -+ u32 temp; -+#ifdef NEW_ALGORITHM_TRAIN_TX -+ u32 median_gaink2; -+#endif -+ -+recheck: -+ if (s_m->bin_long_stop && s_m->bin_m1_stop) { -+ s_m->tx_complete = true; -+ inst->ld_status |= RX_READY_MASK; -+ ld_coe_status(inst); -+ /* tell LP we are ready */ -+ phy_write_mmd(phydev, FSL_XFI_PMD, -+ FSL_XFI_KR_PMD_STATUS, RX_STAT); -+ return; -+ } -+ -+ /* We start by checking the current LP status. If we got any responses, -+ * we can clear up the appropriate update request so that the -+ * subsequent code may easily issue new update requests if needed. -+ */ -+ lp_status = phy_read_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_LP_STATUS) & -+ REQUEST_MASK; -+ status_cop1 = (lp_status & COP1_MASK) >> COP1_SHIFT; -+ status_coz = (lp_status & COZ_MASK) >> COZ_SHIFT; -+ status_com1 = (lp_status & COM1_MASK) >> COM1_SHIFT; -+ -+ old_ld_update = inst->ld_update; -+ req_cop1 = (old_ld_update & COP1_MASK) >> COP1_SHIFT; -+ req_coz = (old_ld_update & COZ_MASK) >> COZ_SHIFT; -+ req_com1 = (old_ld_update & COM1_MASK) >> COM1_SHIFT; -+ req_preset = old_ld_update & PRESET_MASK; -+ req_init = old_ld_update & INIT_MASK; -+ -+ /* IEEE802.3-2008, 72.6.10.2.3.1 -+ * We may clear PRESET when all coefficients show UPDATED or MAX. -+ */ -+ if (req_preset) { -+ if ((status_cop1 == COE_UPDATED || status_cop1 == COE_MAX) && -+ (status_coz == COE_UPDATED || status_coz == COE_MAX) && -+ (status_com1 == COE_UPDATED || status_com1 == COE_MAX)) { -+ inst->ld_update &= ~PRESET_MASK; -+ } -+ } -+ -+ /* IEEE802.3-2008, 72.6.10.2.3.2 -+ * We may clear INITIALIZE when no coefficients show NOT UPDATED. -+ */ -+ if (req_init) { -+ if (status_cop1 != COE_NOTUPDATED && -+ status_coz != COE_NOTUPDATED && -+ status_com1 != COE_NOTUPDATED) { -+ inst->ld_update &= ~INIT_MASK; -+ } -+ } -+ -+ /* IEEE802.3-2008, 72.6.10.2.3.2 -+ * we send initialize to the other side to ensure default settings -+ * for the LP. Naturally, we should do this only once. -+ */ -+ if (!s_m->sent_init) { -+ if (!lp_status && !(old_ld_update & (LD_ALL_MASK))) { -+ inst->ld_update |= INIT_MASK; -+ s_m->sent_init = true; -+ } -+ } -+ -+ /* IEEE802.3-2008, 72.6.10.2.3.3 -+ * We set coefficient requests to HOLD when we get the information -+ * about any updates On clearing our prior response, we also update -+ * our internal status. -+ */ -+ if (status_cop1 != COE_NOTUPDATED) { -+ if (req_cop1) { -+ inst->ld_update &= ~COP1_MASK; -+#ifdef NEW_ALGORITHM_TRAIN_TX -+ if (s_m->post_inc) { -+ if (req_cop1 == INCREMENT && -+ status_cop1 == COE_MAX) { -+ s_m->post_inc = 0; -+ s_m->bin_long_stop = true; -+ s_m->bin_m1_stop = true; -+ } else { -+ s_m->post_inc -= 1; -+ } -+ -+ ld_coe_update(inst); -+ goto recheck; -+ } -+#endif -+ if ((req_cop1 == DECREMENT && status_cop1 == COE_MIN) || -+ (req_cop1 == INCREMENT && status_cop1 == COE_MAX)) { -+ s_m->long_min_max_cnt++; -+ if (s_m->long_min_max_cnt >= TIMEOUT_LONG) { -+ s_m->bin_long_stop = true; -+ ld_coe_update(inst); -+ goto recheck; -+ } -+ } -+ } -+ } -+ -+ if (status_coz != COE_NOTUPDATED) { -+ if (req_coz) -+ inst->ld_update &= ~COZ_MASK; -+ } -+ -+ if (status_com1 != COE_NOTUPDATED) { -+ if (req_com1) { -+ inst->ld_update &= ~COM1_MASK; -+#ifdef NEW_ALGORITHM_TRAIN_TX -+ if (s_m->pre_inc) { -+ if (req_com1 == INCREMENT && -+ status_com1 == COE_MAX) -+ s_m->pre_inc = 0; -+ else -+ s_m->pre_inc -= 1; -+ -+ ld_coe_update(inst); -+ goto recheck; -+ } -+#endif -+ /* Stop If we have reached the limit for a parameter. */ -+ if ((req_com1 == DECREMENT && status_com1 == COE_MIN) || -+ (req_com1 == INCREMENT && status_com1 == COE_MAX)) { -+ s_m->m1_min_max_cnt++; -+ if (s_m->m1_min_max_cnt >= TIMEOUT_M1) { -+ s_m->bin_m1_stop = true; -+ ld_coe_update(inst); -+ goto recheck; -+ } -+ } -+ } -+ } -+ -+ if (old_ld_update != inst->ld_update) { -+ ld_coe_update(inst); -+ /* Redo these status checks and updates until we have no more -+ * changes, to speed up the overall process. -+ */ -+ goto recheck; -+ } -+ -+ /* Do nothing if we have pending request. */ -+ if ((req_coz || req_com1 || req_cop1)) -+ return; -+ else if (lp_status) -+ /* No pending request but LP status was not reverted to -+ * not updated. -+ */ -+ return; -+ -+#ifdef NEW_ALGORITHM_TRAIN_TX -+ if (!(inst->ld_update & (PRESET_MASK | INIT_MASK))) { -+ if (s_m->pre_inc) { -+ inst->ld_update = INCREMENT << COM1_SHIFT; -+ ld_coe_update(inst); -+ return; -+ } -+ -+ if (status_cop1 != COE_MAX) { -+ median_gaink2 = get_median_gaink2(inst->reg_base); -+ if (median_gaink2 == 0xf) { -+ s_m->post_inc = 1; -+ } else { -+ /* Gaink2 median lower than "F" */ -+ s_m->bin_m1_stop = true; -+ s_m->bin_long_stop = true; -+ goto recheck; -+ } -+ } else { -+ /* C1 MAX */ -+ s_m->bin_m1_stop = true; -+ s_m->bin_long_stop = true; -+ goto recheck; -+ } -+ -+ if (s_m->post_inc) { -+ inst->ld_update = INCREMENT << COP1_SHIFT; -+ ld_coe_update(inst); -+ return; -+ } -+ } -+#endif -+ -+ /* snapshot and select bin */ -+ bin_m1_early = is_bin_early(BIN_M1, inst->reg_base); -+ bin_long_early = is_bin_early(BIN_LONG, inst->reg_base); -+ -+ if (!s_m->bin_m1_stop && !s_m->bin_m1_late_early && bin_m1_early) { -+ s_m->bin_m1_stop = true; -+ goto recheck; -+ } -+ -+ if (!s_m->bin_long_stop && -+ s_m->bin_long_late_early && !bin_long_early) { -+ s_m->bin_long_stop = true; -+ goto recheck; -+ } -+ -+ /* IEEE802.3-2008, 72.6.10.2.3.3 -+ * We only request coefficient updates when no PRESET/INITIALIZE is -+ * pending! We also only request coefficient updates when the -+ * corresponding status is NOT UPDATED and nothing is pending. -+ */ -+ if (!(inst->ld_update & (PRESET_MASK | INIT_MASK))) { -+ if (!s_m->bin_long_stop) { -+ /* BinM1 correction means changing COM1 */ -+ if (!status_com1 && !(inst->ld_update & COM1_MASK)) { -+ /* Avoid BinM1Late by requesting an -+ * immediate decrement. -+ */ -+ if (!bin_m1_early) { -+ /* request decrement c(-1) */ -+ temp = DECREMENT << COM1_SHIFT; -+ inst->ld_update |= temp; -+ ld_coe_update(inst); -+ s_m->bin_m1_late_early = bin_m1_early; -+ return; -+ } -+ } -+ -+ /* BinLong correction means changing COP1 */ -+ if (!status_cop1 && !(inst->ld_update & COP1_MASK)) { -+ /* Locate BinLong transition point (if any) -+ * while avoiding BinM1Late. -+ */ -+ if (bin_long_early) { -+ /* request increment c(1) */ -+ temp = INCREMENT << COP1_SHIFT; -+ inst->ld_update |= temp; -+ } else { -+ /* request decrement c(1) */ -+ temp = DECREMENT << COP1_SHIFT; -+ inst->ld_update |= temp; -+ } -+ -+ ld_coe_update(inst); -+ s_m->bin_long_late_early = bin_long_early; -+ } -+ /* We try to finish BinLong before we do BinM1 */ -+ return; -+ } -+ -+ if (!s_m->bin_m1_stop) { -+ /* BinM1 correction means changing COM1 */ -+ if (!status_com1 && !(inst->ld_update & COM1_MASK)) { -+ /* Locate BinM1 transition point (if any) */ -+ if (bin_m1_early) { -+ /* request increment c(-1) */ -+ temp = INCREMENT << COM1_SHIFT; -+ inst->ld_update |= temp; -+ } else { -+ /* request decrement c(-1) */ -+ temp = DECREMENT << COM1_SHIFT; -+ inst->ld_update |= temp; -+ } -+ -+ ld_coe_update(inst); -+ s_m->bin_m1_late_early = bin_m1_early; -+ } -+ } -+ } -+} -+ -+static int check_an_link(struct phy_device *phydev) -+{ -+ int val; -+ int timeout = 100; -+ -+ while (timeout--) { -+ val = phy_read_mmd(phydev, FSL_XFI_AN, FSL_XFI_LNK_STATUS); -+ if (val & XFI_AN_LNK_STAT_UP) -+ return 1; -+ usleep_range(100, 500); -+ } -+ -+ return 0; -+} -+ -+static int is_link_training_fail(struct phy_device *phydev) -+{ -+ int val; -+ -+ val = phy_read_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_PMD_STATUS); -+ if (!(val & TRAIN_FAIL) && (val & RX_STAT)) { -+ /* check LNK_STAT for sure */ -+ if (check_an_link(phydev)) -+ return 0; -+ return 1; -+ } -+ return 1; -+} -+ -+static int check_rx(struct phy_device *phydev) -+{ -+ return phy_read_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_LP_STATUS) & -+ RX_READY_MASK; -+} -+ -+/* Coefficient values have hardware restrictions */ -+static int is_ld_valid(u32 *ld_coe) -+{ -+ u32 ratio_pst1q = *ld_coe; -+ u32 adpt_eq = *(ld_coe + 1); -+ u32 ratio_preq = *(ld_coe + 2); -+ -+ if ((ratio_pst1q + adpt_eq + ratio_preq) > 48) -+ return 0; -+ -+ if (((ratio_pst1q + adpt_eq + ratio_preq) * 4) >= -+ ((adpt_eq - ratio_pst1q - ratio_preq) * 17)) -+ return 0; -+ -+ if (ratio_preq > ratio_pst1q) -+ return 0; -+ -+ if (ratio_preq > 8) -+ return 0; -+ -+ if (adpt_eq < 26) -+ return 0; -+ -+ if (ratio_pst1q > 16) -+ return 0; -+ -+ return 1; -+} -+ -+#define VAL_INVALID 0xff -+ -+static const u32 preq_table[] = {0x0, 0x1, 0x3, 0x5, -+ 0x7, 0x9, 0xb, 0xc, VAL_INVALID}; -+static const u32 pst1q_table[] = {0x0, 0x1, 0x3, 0x5, -+ 0x7, 0x9, 0xb, 0xd, 0xf, 0x10, VAL_INVALID}; -+ -+static int is_value_allowed(const u32 *val_table, u32 val) -+{ -+ int i; -+ -+ for (i = 0;; i++) { -+ if (*(val_table + i) == VAL_INVALID) -+ return 0; -+ if (*(val_table + i) == val) -+ return 1; -+ } -+} -+ -+static int inc_dec(struct fsl_xgkr_inst *inst, int field, int request) -+{ -+ u32 ld_limit[3], ld_coe[3], step[3]; -+ -+ ld_coe[0] = inst->ratio_pst1q; -+ ld_coe[1] = inst->adpt_eq; -+ ld_coe[2] = inst->ratio_preq; -+ -+ /* Information specific to the Freescale SerDes for 10GBase-KR: -+ * Incrementing C(+1) means *decrementing* RATIO_PST1Q -+ * Incrementing C(0) means incrementing ADPT_EQ -+ * Incrementing C(-1) means *decrementing* RATIO_PREQ -+ */ -+ step[0] = -1; -+ step[1] = 1; -+ step[2] = -1; -+ -+ switch (request) { -+ case INCREMENT: -+ ld_limit[0] = POST_COE_MAX; -+ ld_limit[1] = ZERO_COE_MAX; -+ ld_limit[2] = PRE_COE_MAX; -+ if (ld_coe[field] != ld_limit[field]) -+ ld_coe[field] += step[field]; -+ else -+ /* MAX */ -+ return 2; -+ break; -+ case DECREMENT: -+ ld_limit[0] = POST_COE_MIN; -+ ld_limit[1] = ZERO_COE_MIN; -+ ld_limit[2] = PRE_COE_MIN; -+ if (ld_coe[field] != ld_limit[field]) -+ ld_coe[field] -= step[field]; -+ else -+ /* MIN */ -+ return 1; -+ break; -+ default: -+ break; -+ } -+ -+ if (is_ld_valid(ld_coe)) { -+ /* accept new ld */ -+ inst->ratio_pst1q = ld_coe[0]; -+ inst->adpt_eq = ld_coe[1]; -+ inst->ratio_preq = ld_coe[2]; -+ /* only some values for preq and pst1q can be used. -+ * for preq: 0x0, 0x1, 0x3, 0x5, 0x7, 0x9, 0xb, 0xc. -+ * for pst1q: 0x0, 0x1, 0x3, 0x5, 0x7, 0x9, 0xb, 0xd, 0xf, 0x10. -+ */ -+ if (!is_value_allowed((const u32 *)&preq_table, ld_coe[2])) { -+ dev_dbg(&inst->phydev->dev, -+ "preq skipped value: %d.\n", ld_coe[2]); -+ return 0; -+ } -+ -+ if (!is_value_allowed((const u32 *)&pst1q_table, ld_coe[0])) { -+ dev_dbg(&inst->phydev->dev, -+ "pst1q skipped value: %d.\n", ld_coe[0]); -+ return 0; -+ } -+ -+ tune_tecr0(inst); -+ } else { -+ if (request == DECREMENT) -+ /* MIN */ -+ return 1; -+ if (request == INCREMENT) -+ /* MAX */ -+ return 2; -+ } -+ -+ return 0; -+} -+ -+static void min_max_updated(struct fsl_xgkr_inst *inst, int field, int new_ld) -+{ -+ u32 ld_coe[] = {COE_UPDATED, COE_MIN, COE_MAX}; -+ u32 mask, val; -+ -+ switch (field) { -+ case COE_COP1: -+ mask = COP1_MASK; -+ val = ld_coe[new_ld] << COP1_SHIFT; -+ break; -+ case COE_COZ: -+ mask = COZ_MASK; -+ val = ld_coe[new_ld] << COZ_SHIFT; -+ break; -+ case COE_COM: -+ mask = COM1_MASK; -+ val = ld_coe[new_ld] << COM1_SHIFT; -+ break; -+ default: -+ return; -+ break; -+ } -+ -+ inst->ld_status &= ~mask; -+ inst->ld_status |= val; -+} -+ -+static void check_request(struct fsl_xgkr_inst *inst, int request) -+{ -+ int cop1_req, coz_req, com_req; -+ int old_status, new_ld_sta; -+ -+ cop1_req = (request & COP1_MASK) >> COP1_SHIFT; -+ coz_req = (request & COZ_MASK) >> COZ_SHIFT; -+ com_req = (request & COM1_MASK) >> COM1_SHIFT; -+ -+ /* IEEE802.3-2008, 72.6.10.2.5 -+ * Ensure we only act on INCREMENT/DECREMENT when we are in NOT UPDATED! -+ */ -+ old_status = inst->ld_status; -+ -+ if (cop1_req && !(inst->ld_status & COP1_MASK)) { -+ new_ld_sta = inc_dec(inst, COE_COP1, cop1_req); -+ min_max_updated(inst, COE_COP1, new_ld_sta); -+ } -+ -+ if (coz_req && !(inst->ld_status & COZ_MASK)) { -+ new_ld_sta = inc_dec(inst, COE_COZ, coz_req); -+ min_max_updated(inst, COE_COZ, new_ld_sta); -+ } -+ -+ if (com_req && !(inst->ld_status & COM1_MASK)) { -+ new_ld_sta = inc_dec(inst, COE_COM, com_req); -+ min_max_updated(inst, COE_COM, new_ld_sta); -+ } -+ -+ if (old_status != inst->ld_status) -+ ld_coe_status(inst); -+ -+} -+ -+static void preset(struct fsl_xgkr_inst *inst) -+{ -+ /* These are all MAX values from the IEEE802.3 perspective! */ -+ inst->ratio_pst1q = POST_COE_MAX; -+ inst->adpt_eq = ZERO_COE_MAX; -+ inst->ratio_preq = PRE_COE_MAX; -+ -+ tune_tecr0(inst); -+ inst->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK); -+ inst->ld_status |= COE_MAX << COP1_SHIFT | -+ COE_MAX << COZ_SHIFT | -+ COE_MAX << COM1_SHIFT; -+ ld_coe_status(inst); -+} -+ -+static void initialize(struct fsl_xgkr_inst *inst) -+{ -+ inst->ratio_preq = RATIO_PREQ; -+ inst->ratio_pst1q = RATIO_PST1Q; -+ inst->adpt_eq = RATIO_EQ; -+ -+ tune_tecr0(inst); -+ inst->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK); -+ inst->ld_status |= COE_UPDATED << COP1_SHIFT | -+ COE_UPDATED << COZ_SHIFT | -+ COE_UPDATED << COM1_SHIFT; -+ ld_coe_status(inst); -+} -+ -+static void train_rx(struct fsl_xgkr_inst *inst) -+{ -+ struct phy_device *phydev = inst->phydev; -+ int request, old_ld_status; -+ -+ /* get request from LP */ -+ request = phy_read_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_LP_CU) & -+ (LD_ALL_MASK); -+ old_ld_status = inst->ld_status; -+ -+ /* IEEE802.3-2008, 72.6.10.2.5 -+ * Ensure we always go to NOT UDPATED for status reporting in -+ * response to HOLD requests. -+ * IEEE802.3-2008, 72.6.10.2.3.1/2 -+ * ... but only if PRESET/INITIALIZE are not active to ensure -+ * we keep status until they are released! -+ */ -+ if (!(request & (PRESET_MASK | INIT_MASK))) { -+ if (!(request & COP1_MASK)) -+ inst->ld_status &= ~COP1_MASK; -+ -+ if (!(request & COZ_MASK)) -+ inst->ld_status &= ~COZ_MASK; -+ -+ if (!(request & COM1_MASK)) -+ inst->ld_status &= ~COM1_MASK; -+ -+ if (old_ld_status != inst->ld_status) -+ ld_coe_status(inst); -+ -+ } -+ -+ /* As soon as the LP shows ready, no need to do any more updates. */ -+ if (check_rx(phydev)) { -+ /* LP receiver is ready */ -+ if (inst->ld_status & (COP1_MASK | COZ_MASK | COM1_MASK)) { -+ inst->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK); -+ ld_coe_status(inst); -+ } -+ } else { -+ /* IEEE802.3-2008, 72.6.10.2.3.1/2 -+ * only act on PRESET/INITIALIZE if all status is NOT UPDATED. -+ */ -+ if (request & (PRESET_MASK | INIT_MASK)) { -+ if (!(inst->ld_status & -+ (COP1_MASK | COZ_MASK | COM1_MASK))) { -+ if (request & PRESET_MASK) -+ preset(inst); -+ -+ if (request & INIT_MASK) -+ initialize(inst); -+ } -+ } -+ -+ /* LP Coefficient are not in HOLD */ -+ if (request & REQUEST_MASK) -+ check_request(inst, request & REQUEST_MASK); -+ } -+} -+ -+static void xgkr_wq_state_machine(struct work_struct *work) -+{ -+ struct fsl_xgkr_wk *wk = container_of(work, -+ struct fsl_xgkr_wk, xgkr_wk); -+ struct fsl_xgkr_inst *inst = wk->xgkr_inst; -+ struct training_state_machine *s_m = &inst->t_s_m; -+ struct phy_device *phydev = inst->phydev; -+ int val = 0, i; -+ int an_state, lt_state; -+ unsigned long dead_line; -+ int rx_ok, tx_ok; -+ -+ if (s_m->link_up) { -+ /* check abnormal link down events when link is up, for ex. -+ * the cable is pulled out or link partner is down. -+ */ -+ an_state = phy_read_mmd(phydev, FSL_XFI_AN, FSL_XFI_LNK_STATUS); -+ if (!(an_state & XFI_AN_LNK_STAT_UP)) { -+ dev_info(&phydev->dev, -+ "Detect hotplug, restart training!\n"); -+ init_inst(inst, 1); -+ start_an(phydev); -+ } -+ s_m->running = false; -+ return; -+ } -+ -+ if (!s_m->an_ok) { -+ an_state = phy_read_mmd(phydev, FSL_XFI_AN, FSL_XFI_BP_STATUS); -+ if (!(an_state & AN_10GKR_MASK)) { -+ s_m->running = false; -+ return; -+ } else -+ s_m->an_ok = true; -+ } -+ -+ dev_info(&phydev->dev, "is training.\n"); -+ -+ start_lt(phydev); -+ for (i = 0; i < 2;) { -+ /* i < 1 also works, but start one more try immediately when -+ * failed can adjust our training frequency to match other -+ * devices. This can help the link being established more -+ * quickly. -+ */ -+ dead_line = jiffies + msecs_to_jiffies(500); -+ while (time_before(jiffies, dead_line)) { -+ val = phy_read_mmd(phydev, FSL_XFI_PMD, -+ FSL_XFI_KR_PMD_STATUS); -+ if (val & TRAIN_FAIL) { -+ /* LT failed already, reset lane to avoid -+ * it run into hanging, then start LT again. -+ */ -+ reset_gcr0(inst); -+ start_lt(phydev); -+ } else if (val & PMD_STATUS_SUP_STAT && -+ val & PMD_STATUS_FRAME_LOCK) -+ break; -+ usleep_range(100, 500); -+ } -+ -+ if (!(val & PMD_STATUS_FRAME_LOCK && -+ val & PMD_STATUS_SUP_STAT)) { -+ i++; -+ continue; -+ } -+ -+ /* init process */ -+ rx_ok = tx_ok = false; -+ /* the LT should be finished in 500ms, failed or OK. */ -+ dead_line = jiffies + msecs_to_jiffies(500); -+ -+ while (time_before(jiffies, dead_line)) { -+ /* check if the LT is already failed */ -+ lt_state = phy_read_mmd(phydev, FSL_XFI_PMD, -+ FSL_XFI_KR_PMD_STATUS); -+ if (lt_state & TRAIN_FAIL) { -+ reset_gcr0(inst); -+ break; -+ } -+ -+ rx_ok = check_rx(phydev); -+ tx_ok = s_m->tx_complete; -+ -+ if (rx_ok && tx_ok) -+ break; -+ -+ if (!rx_ok) -+ train_rx(inst); -+ -+ if (!tx_ok) -+ train_tx(inst); -+ usleep_range(100, 500); -+ } -+ -+ i++; -+ /* check LT result */ -+ if (is_link_training_fail(phydev)) { -+ /* reset state machine */ -+ init_inst(inst, 0); -+ continue; -+ } else { -+ stop_lt(phydev); -+ s_m->running = false; -+ s_m->link_up = true; -+ dev_info(&phydev->dev, "LT training is SUCCEEDED!\n"); -+ break; -+ } -+ } -+ -+ if (!s_m->link_up) { -+ /* reset state machine */ -+ init_inst(inst, 0); -+ } -+} -+ -+static void xgkr_timer_handle(unsigned long arg) -+{ -+ struct list_head *pos; -+ struct fsl_xgkr_wk *wk; -+ struct fsl_xgkr_inst *xgkr_inst; -+ struct phy_device *phydev; -+ struct training_state_machine *s_m; -+ -+ list_for_each(pos, &fsl_xgkr_list) { -+ wk = list_entry(pos, struct fsl_xgkr_wk, xgkr_list); -+ xgkr_inst = wk->xgkr_inst; -+ phydev = xgkr_inst->phydev; -+ s_m = &xgkr_inst->t_s_m; -+ -+ if (!s_m->running && (!s_m->an_ok || s_m->link_up)) { -+ s_m->running = true; -+ queue_work(xgkr_wq, (struct work_struct *)wk); -+ } -+ } -+ -+ if (!list_empty(&fsl_xgkr_list)) -+ mod_timer(&xgkr_timer, -+ jiffies + msecs_to_jiffies(XGKR_TIMEOUT)); -+} -+ -+static int fsl_xgkr_bind_serdes(const char *lane_name, -+ struct phy_device *phydev) -+{ -+ unsigned long serdes_base; -+ unsigned long lane_base; -+ int i; -+ -+ for (i = 0; i < SERDES_MAX; i++) { -+ if (strstr(lane_name, s_map[i].serdes_name)) { -+ serdes_base = s_map[i].serdes_base; -+ break; -+ } -+ } -+ -+ if (i == SERDES_MAX) -+ goto serdes_err; -+ -+ for (i = 0; i < LANE_MAX; i++) { -+ if (strstr(lane_name, l_map[i].lane_name)) { -+ lane_base = l_map[i].lane_base; -+ break; -+ } -+ } -+ -+ if (i == LANE_MAX) -+ goto lane_err; -+ -+ phydev->priv = ioremap(serdes_base + lane_base, -+ sizeof(struct per_lane_ctrl_status)); -+ if (!phydev->priv) -+ return -ENOMEM; -+ -+ return 0; -+ -+serdes_err: -+ dev_err(&phydev->dev, "Unknown SerDes name"); -+ return -EINVAL; -+lane_err: -+ dev_err(&phydev->dev, "Unknown Lane name"); -+ return -EINVAL; -+} -+ -+static int fsl_xgkr_probe(struct phy_device *phydev) -+{ -+ struct fsl_xgkr_inst *xgkr_inst; -+ struct fsl_xgkr_wk *xgkr_wk; -+ struct device_node *child; -+ const char *lane_name; -+ int len; -+ -+ child = phydev->dev.of_node; -+ -+ /* if there is lane-instance property, 10G-KR need to run */ -+ lane_name = of_get_property(child, "lane-instance", &len); -+ if (!lane_name || (fsl_xgkr_bind_serdes(lane_name, phydev))) -+ return 0; -+ -+ xgkr_inst = kzalloc(sizeof(struct fsl_xgkr_inst), GFP_KERNEL); -+ if (!xgkr_inst) -+ goto mem_err1; -+ -+ xgkr_inst->reg_base = phydev->priv; -+ -+ xgkr_inst->bus = phydev->bus; -+ -+ xgkr_inst->phydev = phydev; -+ -+ init_inst(xgkr_inst, 1); -+ -+ xgkr_wk = kzalloc(sizeof(struct fsl_xgkr_wk), GFP_KERNEL); -+ if (!xgkr_wk) -+ goto mem_err2; -+ -+ xgkr_wk->xgkr_inst = xgkr_inst; -+ phydev->priv = xgkr_wk; -+ -+ list_add(&xgkr_wk->xgkr_list, &fsl_xgkr_list); -+ -+ if (!fire_timer) { -+ setup_timer(&xgkr_timer, xgkr_timer_handle, -+ (unsigned long)&fsl_xgkr_list); -+ mod_timer(&xgkr_timer, -+ jiffies + msecs_to_jiffies(XGKR_TIMEOUT)); -+ fire_timer = 1; -+ xgkr_wq = create_workqueue("fsl_xgkr"); -+ } -+ INIT_WORK((struct work_struct *)xgkr_wk, xgkr_wq_state_machine); -+ -+ /* start auto-negotiation to detect link partner */ -+ start_an(phydev); -+ -+ return 0; -+mem_err2: -+ kfree(xgkr_inst); -+mem_err1: -+ dev_err(&phydev->dev, "failed to allocate memory!\n"); -+ return -ENOMEM; -+} -+ -+static int fsl_xgkr_config_init(struct phy_device *phydev) -+{ -+ return 0; -+} -+ -+static int fsl_xgkr_config_aneg(struct phy_device *phydev) -+{ -+ return 0; -+} -+ -+static void fsl_xgkr_remove(struct phy_device *phydev) -+{ -+ struct fsl_xgkr_wk *wk = (struct fsl_xgkr_wk *)phydev->priv; -+ struct fsl_xgkr_inst *xgkr_inst = wk->xgkr_inst; -+ struct list_head *this, *next; -+ struct fsl_xgkr_wk *tmp; -+ -+ list_for_each_safe(this, next, &fsl_xgkr_list) { -+ tmp = list_entry(this, struct fsl_xgkr_wk, xgkr_list); -+ if (tmp == wk) { -+ cancel_work_sync((struct work_struct *)wk); -+ list_del(this); -+ } -+ } -+ -+ if (list_empty(&fsl_xgkr_list)) -+ del_timer(&xgkr_timer); -+ -+ if (xgkr_inst->reg_base) -+ iounmap(xgkr_inst->reg_base); -+ -+ kfree(xgkr_inst); -+ kfree(wk); -+} -+ -+static int fsl_xgkr_read_status(struct phy_device *phydev) -+{ -+ int val = phy_read_mmd(phydev, FSL_XFI_AN, FSL_XFI_LNK_STATUS); -+ -+ phydev->speed = SPEED_10000; -+ phydev->duplex = 1; -+ -+ if (val & XFI_AN_LNK_STAT_UP) -+ phydev->link = 1; -+ else -+ phydev->link = 0; -+ -+ return 0; -+} -+ -+static int fsl_xgkr_match_phy_device(struct phy_device *phydev) -+{ -+ return phydev->c45_ids.device_ids[3] == FSL_XFI_PCS_PHY_ID; -+} -+ -+static int fsl_xgkr_match_phy_device2(struct phy_device *phydev) -+{ -+ return phydev->c45_ids.device_ids[3] == FSL_XFI_PCS_PHY_ID2; -+} -+ -+static struct phy_driver fsl_xgkr_driver[] = { -+ { -+ .phy_id = FSL_XFI_PCS_PHY_ID, -+ .name = "Freescale 10G KR Rev1", -+ .phy_id_mask = 0xffffffff, -+ .features = PHY_GBIT_FEATURES, -+ .flags = PHY_HAS_INTERRUPT, -+ .probe = fsl_xgkr_probe, -+ .config_init = &fsl_xgkr_config_init, -+ .config_aneg = &fsl_xgkr_config_aneg, -+ .read_status = &fsl_xgkr_read_status, -+ .match_phy_device = fsl_xgkr_match_phy_device, -+ .remove = fsl_xgkr_remove, -+ .driver = { .owner = THIS_MODULE,}, -+ }, -+ { -+ .phy_id = FSL_XFI_PCS_PHY_ID2, -+ .name = "Freescale 10G KR Rev2", -+ .phy_id_mask = 0xffffffff, -+ .features = PHY_GBIT_FEATURES, -+ .flags = PHY_HAS_INTERRUPT, -+ .probe = fsl_xgkr_probe, -+ .config_init = &fsl_xgkr_config_init, -+ .config_aneg = &fsl_xgkr_config_aneg, -+ .read_status = &fsl_xgkr_read_status, -+ .match_phy_device = fsl_xgkr_match_phy_device2, -+ .remove = fsl_xgkr_remove, -+ .driver = { .owner = THIS_MODULE,}, -+ }, -+}; -+ -+static int __init fsl_xgkr_init(void) -+{ -+ return phy_drivers_register(fsl_xgkr_driver, -+ ARRAY_SIZE(fsl_xgkr_driver)); -+} -+ -+static void __exit fsl_xgkr_exit(void) -+{ -+ phy_drivers_unregister(fsl_xgkr_driver, -+ ARRAY_SIZE(fsl_xgkr_driver)); -+} -+ -+module_init(fsl_xgkr_init); -+module_exit(fsl_xgkr_exit); -+ -+static struct mdio_device_id __maybe_unused freescale_tbl[] = { -+ { FSL_XFI_PCS_PHY_ID, 0xffffffff }, -+ { FSL_XFI_PCS_PHY_ID2, 0xffffffff }, -+ { } -+}; -+ -+MODULE_DEVICE_TABLE(mdio, freescale_tbl); -diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c -index 225c033..969a198 100644 ---- a/drivers/net/phy/marvell.c -+++ b/drivers/net/phy/marvell.c -@@ -50,6 +50,7 @@ - #define MII_M1011_PHY_SCR 0x10 - #define MII_M1011_PHY_SCR_AUTO_CROSS 0x0060 - -+#define MII_M1145_PHY_EXT_ADDR_PAGE 0x16 - #define MII_M1145_PHY_EXT_SR 0x1b - #define MII_M1145_PHY_EXT_CR 0x14 - #define MII_M1145_RGMII_RX_DELAY 0x0080 -@@ -495,6 +496,16 @@ static int m88e1111_config_init(struct phy_device *phydev) - err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp); - if (err < 0) - return err; -+ -+ /* make sure copper is selected */ -+ err = phy_read(phydev, MII_M1145_PHY_EXT_ADDR_PAGE); -+ if (err < 0) -+ return err; -+ -+ err = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, -+ err & (~0xff)); -+ if (err < 0) -+ return err; - } - - if (phydev->interface == PHY_INTERFACE_MODE_RTBI) { -diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c -index 50051f2..accd605 100644 ---- a/drivers/net/phy/mdio_bus.c -+++ b/drivers/net/phy/mdio_bus.c -@@ -288,8 +288,11 @@ int mdiobus_register(struct mii_bus *bus) - - error: - while (--i >= 0) { -- if (bus->phy_map[i]) -- device_unregister(&bus->phy_map[i]->dev); -+ struct phy_device *phydev = bus->phy_map[i]; -+ if (phydev) { -+ phy_device_remove(phydev); -+ phy_device_free(phydev); -+ } - } - device_del(&bus->dev); - return err; -@@ -305,9 +308,11 @@ void mdiobus_unregister(struct mii_bus *bus) - - device_del(&bus->dev); - for (i = 0; i < PHY_MAX_ADDR; i++) { -- if (bus->phy_map[i]) -- device_unregister(&bus->phy_map[i]->dev); -- bus->phy_map[i] = NULL; -+ struct phy_device *phydev = bus->phy_map[i]; -+ if (phydev) { -+ phy_device_remove(phydev); -+ phy_device_free(phydev); -+ } - } - } - EXPORT_SYMBOL(mdiobus_unregister); -@@ -421,6 +426,8 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv) - { - struct phy_device *phydev = to_phy_device(dev); - struct phy_driver *phydrv = to_phy_driver(drv); -+ const int num_ids = ARRAY_SIZE(phydev->c45_ids.device_ids); -+ int i; - - if (of_driver_match_device(dev, drv)) - return 1; -@@ -428,8 +435,21 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv) - if (phydrv->match_phy_device) - return phydrv->match_phy_device(phydev); - -- return (phydrv->phy_id & phydrv->phy_id_mask) == -- (phydev->phy_id & phydrv->phy_id_mask); -+ if (phydev->is_c45) { -+ for (i = 1; i < num_ids; i++) { -+ if (!(phydev->c45_ids.devices_in_package & (1 << i))) -+ continue; -+ -+ if ((phydrv->phy_id & phydrv->phy_id_mask) == -+ (phydev->c45_ids.device_ids[i] & -+ phydrv->phy_id_mask)) -+ return 1; -+ } -+ return 0; -+ } else { -+ return (phydrv->phy_id & phydrv->phy_id_mask) == -+ (phydev->phy_id & phydrv->phy_id_mask); -+ } - } - - #ifdef CONFIG_PM -diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c -index 91d6d03..840075e 100644 ---- a/drivers/net/phy/phy.c -+++ b/drivers/net/phy/phy.c -@@ -768,6 +768,7 @@ void phy_state_machine(struct work_struct *work) - container_of(dwork, struct phy_device, state_queue); - bool needs_aneg = false, do_suspend = false, do_resume = false; - int err = 0; -+ int old_link; - - mutex_lock(&phydev->lock); - -@@ -814,6 +815,9 @@ void phy_state_machine(struct work_struct *work) - needs_aneg = true; - break; - case PHY_NOLINK: -+ if (phy_interrupt_is_valid(phydev)) -+ break; -+ - err = phy_read_status(phydev); - if (err) - break; -@@ -851,11 +855,18 @@ void phy_state_machine(struct work_struct *work) - phydev->adjust_link(phydev->attached_dev); - break; - case PHY_RUNNING: -- /* Only register a CHANGE if we are -- * polling or ignoring interrupts -+ /* Only register a CHANGE if we are polling or ignoring -+ * interrupts and link changed since latest checking. - */ -- if (!phy_interrupt_is_valid(phydev)) -- phydev->state = PHY_CHANGELINK; -+ if (!phy_interrupt_is_valid(phydev)) { -+ old_link = phydev->link; -+ err = phy_read_status(phydev); -+ if (err) -+ break; -+ -+ if (old_link != phydev->link) -+ phydev->state = PHY_CHANGELINK; -+ } - break; - case PHY_CHANGELINK: - err = phy_read_status(phydev); -diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c -index 70a0d88..07b1aa9 100644 ---- a/drivers/net/phy/phy_device.c -+++ b/drivers/net/phy/phy_device.c -@@ -205,6 +205,37 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, - } - EXPORT_SYMBOL(phy_device_create); - -+/* get_phy_c45_devs_in_pkg - reads a MMD's devices in package registers. -+ * @bus: the target MII bus -+ * @addr: PHY address on the MII bus -+ * @dev_addr: MMD address in the PHY. -+ * @devices_in_package: where to store the devices in package information. -+ * -+ * Description: reads devices in package registers of a MMD at @dev_addr -+ * from PHY at @addr on @bus. -+ * -+ * Returns: 0 on success, -EIO on failure. -+ */ -+static int get_phy_c45_devs_in_pkg(struct mii_bus *bus, int addr, int dev_addr, -+ u32 *devices_in_package) -+{ -+ int phy_reg, reg_addr; -+ -+ reg_addr = MII_ADDR_C45 | dev_addr << 16 | MDIO_DEVS2; -+ phy_reg = mdiobus_read(bus, addr, reg_addr); -+ if (phy_reg < 0) -+ return -EIO; -+ *devices_in_package = (phy_reg & 0xffff) << 16; -+ -+ reg_addr = MII_ADDR_C45 | dev_addr << 16 | MDIO_DEVS1; -+ phy_reg = mdiobus_read(bus, addr, reg_addr); -+ if (phy_reg < 0) -+ return -EIO; -+ *devices_in_package |= (phy_reg & 0xffff); -+ -+ return 0; -+} -+ - /** - * get_phy_c45_ids - reads the specified addr for its 802.3-c45 IDs. - * @bus: the target MII bus -@@ -223,31 +254,32 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id, - int phy_reg; - int i, reg_addr; - const int num_ids = ARRAY_SIZE(c45_ids->device_ids); -+ u32 *devs = &c45_ids->devices_in_package; - -- /* Find first non-zero Devices In package. Device -- * zero is reserved, so don't probe it. -+ /* Find first non-zero Devices In package. Device zero is reserved -+ * for 802.3 c45 complied PHYs, so don't probe it at first. - */ -- for (i = 1; -- i < num_ids && c45_ids->devices_in_package == 0; -- i++) { -- reg_addr = MII_ADDR_C45 | i << 16 | MDIO_DEVS2; -- phy_reg = mdiobus_read(bus, addr, reg_addr); -- if (phy_reg < 0) -- return -EIO; -- c45_ids->devices_in_package = (phy_reg & 0xffff) << 16; -- -- reg_addr = MII_ADDR_C45 | i << 16 | MDIO_DEVS1; -- phy_reg = mdiobus_read(bus, addr, reg_addr); -+ for (i = 1; i < num_ids && *devs == 0; i++) { -+ phy_reg = get_phy_c45_devs_in_pkg(bus, addr, i, devs); - if (phy_reg < 0) - return -EIO; -- c45_ids->devices_in_package |= (phy_reg & 0xffff); - -- /* If mostly Fs, there is no device there, -- * let's get out of here. -- */ -- if ((c45_ids->devices_in_package & 0x1fffffff) == 0x1fffffff) { -- *phy_id = 0xffffffff; -- return 0; -+ if ((*devs & 0x1fffffff) == 0x1fffffff) { -+ /* If mostly Fs, there is no device there, -+ * then let's continue to probe more, as some -+ * 10G PHYs have zero Devices In package, -+ * e.g. Cortina CS4315/CS4340 PHY. -+ */ -+ phy_reg = get_phy_c45_devs_in_pkg(bus, addr, 0, devs); -+ if (phy_reg < 0) -+ return -EIO; -+ /* no device there, let's get out of here */ -+ if ((*devs & 0x1fffffff) == 0x1fffffff) { -+ *phy_id = 0xffffffff; -+ return 0; -+ } else { -+ break; -+ } - } - } - -@@ -376,6 +408,24 @@ int phy_device_register(struct phy_device *phydev) - EXPORT_SYMBOL(phy_device_register); - - /** -+ * phy_device_remove - Remove a previously registered phy device from the MDIO bus -+ * @phydev: phy_device structure to remove -+ * -+ * This doesn't free the phy_device itself, it merely reverses the effects -+ * of phy_device_register(). Use phy_device_free() to free the device -+ * after calling this function. -+ */ -+void phy_device_remove(struct phy_device *phydev) -+{ -+ struct mii_bus *bus = phydev->bus; -+ int addr = phydev->addr; -+ -+ device_del(&phydev->dev); -+ bus->phy_map[addr] = NULL; -+} -+EXPORT_SYMBOL(phy_device_remove); -+ -+/** - * phy_find_first - finds the first PHY device on the bus - * @bus: the target MII bus - */ -diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c -index 45483fd..badcf24 100644 ---- a/drivers/net/phy/realtek.c -+++ b/drivers/net/phy/realtek.c -@@ -22,8 +22,12 @@ - #define RTL821x_INER 0x12 - #define RTL821x_INER_INIT 0x6400 - #define RTL821x_INSR 0x13 -+#define RTL8211E_INER_LINK_STATUS 0x400 - --#define RTL8211E_INER_LINK_STATUS 0x400 -+#define RTL8211F_INER_LINK_STATUS 0x0010 -+#define RTL8211F_INSR 0x1d -+#define RTL8211F_PAGE_SELECT 0x1f -+#define RTL8211F_TX_DELAY 0x100 - - MODULE_DESCRIPTION("Realtek PHY driver"); - MODULE_AUTHOR("Johnson Leung"); -@@ -38,6 +42,18 @@ static int rtl821x_ack_interrupt(struct phy_device *phydev) - return (err < 0) ? err : 0; - } - -+static int rtl8211f_ack_interrupt(struct phy_device *phydev) -+{ -+ int err; -+ -+ phy_write(phydev, RTL8211F_PAGE_SELECT, 0xa43); -+ err = phy_read(phydev, RTL8211F_INSR); -+ /* restore to default page 0 */ -+ phy_write(phydev, RTL8211F_PAGE_SELECT, 0x0); -+ -+ return (err < 0) ? err : 0; -+} -+ - static int rtl8211b_config_intr(struct phy_device *phydev) - { - int err; -@@ -64,6 +80,41 @@ static int rtl8211e_config_intr(struct phy_device *phydev) - return err; - } - -+static int rtl8211f_config_intr(struct phy_device *phydev) -+{ -+ int err; -+ -+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) -+ err = phy_write(phydev, RTL821x_INER, -+ RTL8211F_INER_LINK_STATUS); -+ else -+ err = phy_write(phydev, RTL821x_INER, 0); -+ -+ return err; -+} -+ -+static int rtl8211f_config_init(struct phy_device *phydev) -+{ -+ int ret; -+ u16 reg; -+ -+ ret = genphy_config_init(phydev); -+ if (ret < 0) -+ return ret; -+ -+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII) { -+ /* enable TXDLY */ -+ phy_write(phydev, RTL8211F_PAGE_SELECT, 0xd08); -+ reg = phy_read(phydev, 0x11); -+ reg |= RTL8211F_TX_DELAY; -+ phy_write(phydev, 0x11, reg); -+ /* restore to default page 0 */ -+ phy_write(phydev, RTL8211F_PAGE_SELECT, 0x0); -+ } -+ -+ return 0; -+} -+ - static struct phy_driver realtek_drvs[] = { - { - .phy_id = 0x00008201, -@@ -86,6 +137,19 @@ static struct phy_driver realtek_drvs[] = { - .config_intr = &rtl8211b_config_intr, - .driver = { .owner = THIS_MODULE,}, - }, { -+ .phy_id = 0x001cc914, -+ .name = "RTL8211DN Gigabit Ethernet", -+ .phy_id_mask = 0x001fffff, -+ .features = PHY_GBIT_FEATURES, -+ .flags = PHY_HAS_INTERRUPT, -+ .config_aneg = genphy_config_aneg, -+ .read_status = genphy_read_status, -+ .ack_interrupt = rtl821x_ack_interrupt, -+ .config_intr = rtl8211e_config_intr, -+ .suspend = genphy_suspend, -+ .resume = genphy_resume, -+ .driver = { .owner = THIS_MODULE,}, -+ }, { - .phy_id = 0x001cc915, - .name = "RTL8211E Gigabit Ethernet", - .phy_id_mask = 0x001fffff, -@@ -98,6 +162,20 @@ static struct phy_driver realtek_drvs[] = { - .suspend = genphy_suspend, - .resume = genphy_resume, - .driver = { .owner = THIS_MODULE,}, -+ }, { -+ .phy_id = 0x001cc916, -+ .name = "RTL8211F Gigabit Ethernet", -+ .phy_id_mask = 0x001fffff, -+ .features = PHY_GBIT_FEATURES, -+ .flags = PHY_HAS_INTERRUPT, -+ .config_aneg = &genphy_config_aneg, -+ .config_init = &rtl8211f_config_init, -+ .read_status = &genphy_read_status, -+ .ack_interrupt = &rtl8211f_ack_interrupt, -+ .config_intr = &rtl8211f_config_intr, -+ .suspend = genphy_suspend, -+ .resume = genphy_resume, -+ .driver = { .owner = THIS_MODULE }, - }, - }; - -@@ -116,7 +194,9 @@ module_exit(realtek_exit); - - static struct mdio_device_id __maybe_unused realtek_tbl[] = { - { 0x001cc912, 0x001fffff }, -+ { 0x001cc914, 0x001fffff }, - { 0x001cc915, 0x001fffff }, -+ { 0x001cc916, 0x001fffff }, - { } - }; - -diff --git a/drivers/net/phy/teranetics.c b/drivers/net/phy/teranetics.c -new file mode 100644 -index 0000000..91e1bec ---- /dev/null -+++ b/drivers/net/phy/teranetics.c -@@ -0,0 +1,135 @@ -+/* -+ * Driver for Teranetics PHY -+ * -+ * Author: Shaohui Xie -+ * -+ * Copyright 2015 Freescale Semiconductor, Inc. -+ * -+ * This file is licensed under the terms of the GNU General Public License -+ * version 2. This program is licensed "as is" without any warranty of any -+ * kind, whether express or implied. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+MODULE_DESCRIPTION("Teranetics PHY driver"); -+MODULE_AUTHOR("Shaohui Xie "); -+MODULE_LICENSE("GPL v2"); -+ -+#define PHY_ID_TN2020 0x00a19410 -+#define MDIO_PHYXS_LNSTAT_SYNC0 0x0001 -+#define MDIO_PHYXS_LNSTAT_SYNC1 0x0002 -+#define MDIO_PHYXS_LNSTAT_SYNC2 0x0004 -+#define MDIO_PHYXS_LNSTAT_SYNC3 0x0008 -+#define MDIO_PHYXS_LNSTAT_ALIGN 0x1000 -+ -+#define MDIO_PHYXS_LANE_READY (MDIO_PHYXS_LNSTAT_SYNC0 | \ -+ MDIO_PHYXS_LNSTAT_SYNC1 | \ -+ MDIO_PHYXS_LNSTAT_SYNC2 | \ -+ MDIO_PHYXS_LNSTAT_SYNC3 | \ -+ MDIO_PHYXS_LNSTAT_ALIGN) -+ -+static int teranetics_config_init(struct phy_device *phydev) -+{ -+ phydev->supported = SUPPORTED_10000baseT_Full; -+ phydev->advertising = SUPPORTED_10000baseT_Full; -+ -+ return 0; -+} -+ -+static int teranetics_soft_reset(struct phy_device *phydev) -+{ -+ return 0; -+} -+ -+static int teranetics_aneg_done(struct phy_device *phydev) -+{ -+ int reg; -+ -+ /* auto negotiation state can only be checked when using copper -+ * port, if using fiber port, just lie it's done. -+ */ -+ if (!phy_read_mmd(phydev, MDIO_MMD_VEND1, 93)) { -+ reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); -+ return (reg < 0) ? reg : (reg & BMSR_ANEGCOMPLETE); -+ } -+ -+ return 1; -+} -+ -+static int teranetics_config_aneg(struct phy_device *phydev) -+{ -+ return 0; -+} -+ -+static int teranetics_read_status(struct phy_device *phydev) -+{ -+ int reg; -+ -+ phydev->link = 1; -+ -+ phydev->speed = SPEED_10000; -+ phydev->duplex = DUPLEX_FULL; -+ -+ if (!phy_read_mmd(phydev, MDIO_MMD_VEND1, 93)) { -+ reg = phy_read_mmd(phydev, MDIO_MMD_PHYXS, MDIO_PHYXS_LNSTAT); -+ if (reg < 0 || -+ !((reg & MDIO_PHYXS_LANE_READY) == MDIO_PHYXS_LANE_READY)) { -+ phydev->link = 0; -+ return 0; -+ } -+ -+ reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); -+ if (reg < 0 || !(reg & MDIO_STAT1_LSTATUS)) -+ phydev->link = 0; -+ } -+ -+ return 0; -+} -+ -+static int teranetics_match_phy_device(struct phy_device *phydev) -+{ -+ return phydev->c45_ids.device_ids[3] == PHY_ID_TN2020; -+} -+ -+static struct phy_driver teranetics_driver[] = { -+{ -+ .phy_id = PHY_ID_TN2020, -+ .phy_id_mask = 0xffffffff, -+ .name = "Teranetics TN2020", -+ .soft_reset = teranetics_soft_reset, -+ .aneg_done = teranetics_aneg_done, -+ .config_init = teranetics_config_init, -+ .config_aneg = teranetics_config_aneg, -+ .read_status = teranetics_read_status, -+ .match_phy_device = teranetics_match_phy_device, -+ .driver = { .owner = THIS_MODULE,}, -+}, -+}; -+ -+static int __init teranetics_init(void) -+{ -+ return phy_drivers_register(teranetics_driver, -+ ARRAY_SIZE(teranetics_driver)); -+} -+ -+static void __exit teranetics_exit(void) -+{ -+ return phy_drivers_unregister(teranetics_driver, -+ ARRAY_SIZE(teranetics_driver)); -+} -+ -+module_init(teranetics_init); -+module_exit(teranetics_exit); -+ -+static struct mdio_device_id __maybe_unused teranetics_tbl[] = { -+ { PHY_ID_TN2020, 0xffffffff }, -+ { } -+}; -+ -+MODULE_DEVICE_TABLE(mdio, teranetics_tbl); -diff --git a/drivers/of/base.c b/drivers/of/base.c -index 469d2b7..210c876 100644 ---- a/drivers/of/base.c -+++ b/drivers/of/base.c -@@ -32,8 +32,8 @@ - - LIST_HEAD(aliases_lookup); - --struct device_node *of_allnodes; --EXPORT_SYMBOL(of_allnodes); -+struct device_node *of_root; -+EXPORT_SYMBOL(of_root); - struct device_node *of_chosen; - struct device_node *of_aliases; - struct device_node *of_stdout; -@@ -48,7 +48,7 @@ struct kset *of_kset; - */ - DEFINE_MUTEX(of_mutex); - --/* use when traversing tree through the allnext, child, sibling, -+/* use when traversing tree through the child, sibling, - * or parent members of struct device_node. - */ - DEFINE_RAW_SPINLOCK(devtree_lock); -@@ -204,7 +204,7 @@ static int __init of_init(void) - mutex_unlock(&of_mutex); - - /* Symlink in /proc as required by userspace ABI */ -- if (of_allnodes) -+ if (of_root) - proc_symlink("device-tree", NULL, "/sys/firmware/devicetree/base"); - - return 0; -@@ -245,6 +245,23 @@ struct property *of_find_property(const struct device_node *np, - } - EXPORT_SYMBOL(of_find_property); - -+struct device_node *__of_find_all_nodes(struct device_node *prev) -+{ -+ struct device_node *np; -+ if (!prev) { -+ np = of_root; -+ } else if (prev->child) { -+ np = prev->child; -+ } else { -+ /* Walk back up looking for a sibling, or the end of the structure */ -+ np = prev; -+ while (np->parent && !np->sibling) -+ np = np->parent; -+ np = np->sibling; /* Might be null at the end of the tree */ -+ } -+ return np; -+} -+ - /** - * of_find_all_nodes - Get next node in global list - * @prev: Previous node or NULL to start iteration -@@ -259,10 +276,8 @@ struct device_node *of_find_all_nodes(struct device_node *prev) - unsigned long flags; - - raw_spin_lock_irqsave(&devtree_lock, flags); -- np = prev ? prev->allnext : of_allnodes; -- for (; np != NULL; np = np->allnext) -- if (of_node_get(np)) -- break; -+ np = __of_find_all_nodes(prev); -+ of_node_get(np); - of_node_put(prev); - raw_spin_unlock_irqrestore(&devtree_lock, flags); - return np; -@@ -736,7 +751,7 @@ struct device_node *of_find_node_by_path(const char *path) - unsigned long flags; - - if (strcmp(path, "/") == 0) -- return of_node_get(of_allnodes); -+ return of_node_get(of_root); - - /* The path could begin with an alias */ - if (*path != '/') { -@@ -761,7 +776,7 @@ struct device_node *of_find_node_by_path(const char *path) - /* Step down the tree matching path components */ - raw_spin_lock_irqsave(&devtree_lock, flags); - if (!np) -- np = of_node_get(of_allnodes); -+ np = of_node_get(of_root); - while (np && *path == '/') { - path++; /* Increment past '/' delimiter */ - np = __of_find_node_by_path(np, path); -@@ -790,8 +805,7 @@ struct device_node *of_find_node_by_name(struct device_node *from, - unsigned long flags; - - raw_spin_lock_irqsave(&devtree_lock, flags); -- np = from ? from->allnext : of_allnodes; -- for (; np; np = np->allnext) -+ for_each_of_allnodes_from(from, np) - if (np->name && (of_node_cmp(np->name, name) == 0) - && of_node_get(np)) - break; -@@ -820,8 +834,7 @@ struct device_node *of_find_node_by_type(struct device_node *from, - unsigned long flags; - - raw_spin_lock_irqsave(&devtree_lock, flags); -- np = from ? from->allnext : of_allnodes; -- for (; np; np = np->allnext) -+ for_each_of_allnodes_from(from, np) - if (np->type && (of_node_cmp(np->type, type) == 0) - && of_node_get(np)) - break; -@@ -852,12 +865,10 @@ struct device_node *of_find_compatible_node(struct device_node *from, - unsigned long flags; - - raw_spin_lock_irqsave(&devtree_lock, flags); -- np = from ? from->allnext : of_allnodes; -- for (; np; np = np->allnext) { -+ for_each_of_allnodes_from(from, np) - if (__of_device_is_compatible(np, compatible, type, NULL) && - of_node_get(np)) - break; -- } - of_node_put(from); - raw_spin_unlock_irqrestore(&devtree_lock, flags); - return np; -@@ -884,8 +895,7 @@ struct device_node *of_find_node_with_property(struct device_node *from, - unsigned long flags; - - raw_spin_lock_irqsave(&devtree_lock, flags); -- np = from ? from->allnext : of_allnodes; -- for (; np; np = np->allnext) { -+ for_each_of_allnodes_from(from, np) { - for (pp = np->properties; pp; pp = pp->next) { - if (of_prop_cmp(pp->name, prop_name) == 0) { - of_node_get(np); -@@ -967,8 +977,7 @@ struct device_node *of_find_matching_node_and_match(struct device_node *from, - *match = NULL; - - raw_spin_lock_irqsave(&devtree_lock, flags); -- np = from ? from->allnext : of_allnodes; -- for (; np; np = np->allnext) { -+ for_each_of_allnodes_from(from, np) { - m = __of_match_node(matches, np); - if (m && of_node_get(np)) { - if (match) -@@ -1025,7 +1034,7 @@ struct device_node *of_find_node_by_phandle(phandle handle) - return NULL; - - raw_spin_lock_irqsave(&devtree_lock, flags); -- for (np = of_allnodes; np; np = np->allnext) -+ for_each_of_allnodes(np) - if (np->phandle == handle) - break; - of_node_get(np); -diff --git a/drivers/of/device.c b/drivers/of/device.c -index 46d6c75..20c1332 100644 ---- a/drivers/of/device.c -+++ b/drivers/of/device.c -@@ -2,6 +2,9 @@ - #include - #include - #include -+#include -+#include -+#include - #include - #include - #include -@@ -66,6 +69,87 @@ int of_device_add(struct platform_device *ofdev) - return device_add(&ofdev->dev); - } - -+/** -+ * of_dma_configure - Setup DMA configuration -+ * @dev: Device to apply DMA configuration -+ * @np: Pointer to OF node having DMA configuration -+ * -+ * Try to get devices's DMA configuration from DT and update it -+ * accordingly. -+ * -+ * If platform code needs to use its own special DMA configuration, it -+ * can use a platform bus notifier and handle BUS_NOTIFY_ADD_DEVICE events -+ * to fix up DMA configuration. -+ */ -+void of_dma_configure(struct device *dev, struct device_node *np) -+{ -+ u64 dma_addr, paddr, size; -+ int ret; -+ bool coherent; -+ unsigned long offset; -+ struct iommu_ops *iommu; -+ -+ /* -+ * Set default coherent_dma_mask to 32 bit. Drivers are expected to -+ * setup the correct supported mask. -+ */ -+ if (!dev->coherent_dma_mask) -+ dev->coherent_dma_mask = DMA_BIT_MASK(32); -+ -+ /* -+ * Set it to coherent_dma_mask by default if the architecture -+ * code has not set it. -+ */ -+ if (!dev->dma_mask) -+ dev->dma_mask = &dev->coherent_dma_mask; -+ -+ ret = of_dma_get_range(np, &dma_addr, &paddr, &size); -+ if (ret < 0) { -+ dma_addr = offset = 0; -+ size = dev->coherent_dma_mask + 1; -+ } else { -+ offset = PFN_DOWN(paddr - dma_addr); -+ -+ /* -+ * Add a work around to treat the size as mask + 1 in case -+ * it is defined in DT as a mask. -+ */ -+ if (size & 1) { -+ dev_warn(dev, "Invalid size 0x%llx for dma-range\n", -+ size); -+ size = size + 1; -+ } -+ -+ if (!size) { -+ dev_err(dev, "Adjusted size 0x%llx invalid\n", size); -+ return; -+ } -+ dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", offset); -+ } -+ -+ dev->dma_pfn_offset = offset; -+ -+ /* -+ * Limit coherent and dma mask based on size and default mask -+ * set by the driver. -+ */ -+ dev->coherent_dma_mask = min(dev->coherent_dma_mask, -+ DMA_BIT_MASK(ilog2(dma_addr + size))); -+ *dev->dma_mask = min((*dev->dma_mask), -+ DMA_BIT_MASK(ilog2(dma_addr + size))); -+ -+ coherent = of_dma_is_coherent(np); -+ dev_dbg(dev, "device is%sdma coherent\n", -+ coherent ? " " : " not "); -+ -+ iommu = of_iommu_configure(dev, np); -+ dev_dbg(dev, "device is%sbehind an iommu\n", -+ iommu ? " " : " not "); -+ -+ arch_setup_dma_ops(dev, dma_addr, size, iommu, coherent); -+} -+EXPORT_SYMBOL_GPL(of_dma_configure); -+ - int of_device_register(struct platform_device *pdev) - { - device_initialize(&pdev->dev); -diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c -index d499417..d43f305 100644 ---- a/drivers/of/dynamic.c -+++ b/drivers/of/dynamic.c -@@ -117,8 +117,6 @@ void __of_attach_node(struct device_node *np) - - np->child = NULL; - np->sibling = np->parent->child; -- np->allnext = np->parent->allnext; -- np->parent->allnext = np; - np->parent->child = np; - of_node_clear_flag(np, OF_DETACHED); - } -@@ -154,17 +152,6 @@ void __of_detach_node(struct device_node *np) - if (WARN_ON(!parent)) - return; - -- if (of_allnodes == np) -- of_allnodes = np->allnext; -- else { -- struct device_node *prev; -- for (prev = of_allnodes; -- prev->allnext != np; -- prev = prev->allnext) -- ; -- prev->allnext = np->allnext; -- } -- - if (parent->child == np) - parent->child = np->sibling; - else { -diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c -index d134710..f6eda02 100644 ---- a/drivers/of/fdt.c -+++ b/drivers/of/fdt.c -@@ -145,15 +145,15 @@ static void *unflatten_dt_alloc(void **mem, unsigned long size, - * @mem: Memory chunk to use for allocating device nodes and properties - * @p: pointer to node in flat tree - * @dad: Parent struct device_node -- * @allnextpp: pointer to ->allnext from last allocated device_node - * @fpsize: Size of the node path up at the current depth. - */ - static void * unflatten_dt_node(void *blob, - void *mem, - int *poffset, - struct device_node *dad, -- struct device_node ***allnextpp, -- unsigned long fpsize) -+ struct device_node **nodepp, -+ unsigned long fpsize, -+ bool dryrun) - { - const __be32 *p; - struct device_node *np; -@@ -200,7 +200,7 @@ static void * unflatten_dt_node(void *blob, - - np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl, - __alignof__(struct device_node)); -- if (allnextpp) { -+ if (!dryrun) { - char *fn; - of_node_init(np); - np->full_name = fn = ((char *)np) + sizeof(*np); -@@ -222,8 +222,6 @@ static void * unflatten_dt_node(void *blob, - memcpy(fn, pathp, l); - - prev_pp = &np->properties; -- **allnextpp = np; -- *allnextpp = &np->allnext; - if (dad != NULL) { - np->parent = dad; - /* we temporarily use the next field as `last_child'*/ -@@ -254,7 +252,7 @@ static void * unflatten_dt_node(void *blob, - has_name = 1; - pp = unflatten_dt_alloc(&mem, sizeof(struct property), - __alignof__(struct property)); -- if (allnextpp) { -+ if (!dryrun) { - /* We accept flattened tree phandles either in - * ePAPR-style "phandle" properties, or the - * legacy "linux,phandle" properties. If both -@@ -296,7 +294,7 @@ static void * unflatten_dt_node(void *blob, - sz = (pa - ps) + 1; - pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz, - __alignof__(struct property)); -- if (allnextpp) { -+ if (!dryrun) { - pp->name = "name"; - pp->length = sz; - pp->value = pp + 1; -@@ -308,7 +306,7 @@ static void * unflatten_dt_node(void *blob, - (char *)pp->value); - } - } -- if (allnextpp) { -+ if (!dryrun) { - *prev_pp = NULL; - np->name = of_get_property(np, "name", NULL); - np->type = of_get_property(np, "device_type", NULL); -@@ -324,11 +322,13 @@ static void * unflatten_dt_node(void *blob, - if (depth < 0) - depth = 0; - while (*poffset > 0 && depth > old_depth) -- mem = unflatten_dt_node(blob, mem, poffset, np, allnextpp, -- fpsize); -+ mem = unflatten_dt_node(blob, mem, poffset, np, NULL, -+ fpsize, dryrun); - - if (*poffset < 0 && *poffset != -FDT_ERR_NOTFOUND) - pr_err("unflatten: error %d processing FDT\n", *poffset); -+ if (nodepp) -+ *nodepp = np; - - return mem; - } -@@ -352,7 +352,6 @@ static void __unflatten_device_tree(void *blob, - unsigned long size; - int start; - void *mem; -- struct device_node **allnextp = mynodes; - - pr_debug(" -> unflatten_device_tree()\n"); - -@@ -373,7 +372,7 @@ static void __unflatten_device_tree(void *blob, - - /* First pass, scan for size */ - start = 0; -- size = (unsigned long)unflatten_dt_node(blob, NULL, &start, NULL, NULL, 0); -+ size = (unsigned long)unflatten_dt_node(blob, NULL, &start, NULL, NULL, 0, true); - size = ALIGN(size, 4); - - pr_debug(" size is %lx, allocating...\n", size); -@@ -388,11 +387,10 @@ static void __unflatten_device_tree(void *blob, - - /* Second pass, do actual unflattening */ - start = 0; -- unflatten_dt_node(blob, mem, &start, NULL, &allnextp, 0); -+ unflatten_dt_node(blob, mem, &start, NULL, mynodes, 0, false); - if (be32_to_cpup(mem + size) != 0xdeadbeef) - pr_warning("End of tree marker overwritten: %08x\n", - be32_to_cpup(mem + size)); -- *allnextp = NULL; - - pr_debug(" <- unflatten_device_tree()\n"); - } -@@ -1039,7 +1037,7 @@ bool __init early_init_dt_scan(void *params) - */ - void __init unflatten_device_tree(void) - { -- __unflatten_device_tree(initial_boot_params, &of_allnodes, -+ __unflatten_device_tree(initial_boot_params, &of_root, - early_init_dt_alloc_memory_arch); - - /* Get pointer to "/chosen" and "/aliases" nodes for use everywhere */ -diff --git a/drivers/of/irq.c b/drivers/of/irq.c -index b97363a..4419e62 100644 ---- a/drivers/of/irq.c -+++ b/drivers/of/irq.c -@@ -18,6 +18,7 @@ - * driver. - */ - -+#include - #include - #include - #include -@@ -576,3 +577,23 @@ err: - kfree(desc); - } - } -+ -+/** -+ * of_msi_configure - Set the msi_domain field of a device -+ * @dev: device structure to associate with an MSI irq domain -+ * @np: device node for that device -+ */ -+void of_msi_configure(struct device *dev, struct device_node *np) -+{ -+ struct device_node *msi_np; -+ struct irq_domain *d; -+ -+ msi_np = of_parse_phandle(np, "msi-parent", 0); -+ if (!msi_np) -+ return; -+ -+ d = irq_find_matching_host(msi_np, DOMAIN_BUS_PLATFORM_MSI); -+ if (!d) -+ d = irq_find_host(msi_np); -+ dev_set_msi_domain(dev, d); -+} -diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c -index ecc5fa5..5751dc5 100644 ---- a/drivers/of/of_pci.c -+++ b/drivers/of/of_pci.c -@@ -2,6 +2,7 @@ - #include - #include - #include -+#include - #include - #include - -@@ -116,6 +117,26 @@ int of_get_pci_domain_nr(struct device_node *node) - } - EXPORT_SYMBOL_GPL(of_get_pci_domain_nr); - -+/** -+ * of_pci_dma_configure - Setup DMA configuration -+ * @dev: ptr to pci_dev struct of the PCI device -+ * -+ * Function to update PCI devices's DMA configuration using the same -+ * info from the OF node of host bridge's parent (if any). -+ */ -+void of_pci_dma_configure(struct pci_dev *pci_dev) -+{ -+ struct device *dev = &pci_dev->dev; -+ struct device *bridge = pci_get_host_bridge_device(pci_dev); -+ -+ if (!bridge->parent) -+ return; -+ -+ of_dma_configure(dev, bridge->parent->of_node); -+ pci_put_host_bridge_device(bridge); -+} -+EXPORT_SYMBOL_GPL(of_pci_dma_configure); -+ - #if defined(CONFIG_OF_ADDRESS) - /** - * of_pci_get_host_bridge_resources - Parse PCI host bridge resources from DT -@@ -140,7 +161,7 @@ int of_pci_get_host_bridge_resources(struct device_node *dev, - unsigned char busno, unsigned char bus_max, - struct list_head *resources, resource_size_t *io_base) - { -- struct pci_host_bridge_window *window; -+ struct resource_entry *window; - struct resource *res; - struct resource *bus_range; - struct of_pci_range range; -@@ -226,10 +247,9 @@ int of_pci_get_host_bridge_resources(struct device_node *dev, - conversion_failed: - kfree(res); - parse_failed: -- list_for_each_entry(window, resources, list) -+ resource_list_for_each_entry(window, resources) - kfree(window->res); - pci_free_resource_list(resources); -- kfree(bus_range); - return err; - } - EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources); -@@ -240,7 +260,7 @@ EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources); - static LIST_HEAD(of_pci_msi_chip_list); - static DEFINE_MUTEX(of_pci_msi_chip_mutex); - --int of_pci_msi_chip_add(struct msi_chip *chip) -+int of_pci_msi_chip_add(struct msi_controller *chip) - { - if (!of_property_read_bool(chip->of_node, "msi-controller")) - return -EINVAL; -@@ -253,7 +273,7 @@ int of_pci_msi_chip_add(struct msi_chip *chip) - } - EXPORT_SYMBOL_GPL(of_pci_msi_chip_add); - --void of_pci_msi_chip_remove(struct msi_chip *chip) -+void of_pci_msi_chip_remove(struct msi_controller *chip) - { - mutex_lock(&of_pci_msi_chip_mutex); - list_del(&chip->list); -@@ -261,9 +281,9 @@ void of_pci_msi_chip_remove(struct msi_chip *chip) - } - EXPORT_SYMBOL_GPL(of_pci_msi_chip_remove); - --struct msi_chip *of_pci_find_msi_chip_by_node(struct device_node *of_node) -+struct msi_controller *of_pci_find_msi_chip_by_node(struct device_node *of_node) - { -- struct msi_chip *c; -+ struct msi_controller *c; - - mutex_lock(&of_pci_msi_chip_mutex); - list_for_each_entry(c, &of_pci_msi_chip_list, list) { -diff --git a/drivers/of/pdt.c b/drivers/of/pdt.c -index 36b4035..d2acae8 100644 ---- a/drivers/of/pdt.c -+++ b/drivers/of/pdt.c -@@ -25,8 +25,7 @@ - - static struct of_pdt_ops *of_pdt_prom_ops __initdata; - --void __initdata (*of_pdt_build_more)(struct device_node *dp, -- struct device_node ***nextp); -+void __initdata (*of_pdt_build_more)(struct device_node *dp); - - #if defined(CONFIG_SPARC) - unsigned int of_pdt_unique_id __initdata; -@@ -192,8 +191,7 @@ static struct device_node * __init of_pdt_create_node(phandle node, - } - - static struct device_node * __init of_pdt_build_tree(struct device_node *parent, -- phandle node, -- struct device_node ***nextp) -+ phandle node) - { - struct device_node *ret = NULL, *prev_sibling = NULL; - struct device_node *dp; -@@ -210,16 +208,12 @@ static struct device_node * __init of_pdt_build_tree(struct device_node *parent, - ret = dp; - prev_sibling = dp; - -- *(*nextp) = dp; -- *nextp = &dp->allnext; -- - dp->full_name = of_pdt_build_full_name(dp); - -- dp->child = of_pdt_build_tree(dp, -- of_pdt_prom_ops->getchild(node), nextp); -+ dp->child = of_pdt_build_tree(dp, of_pdt_prom_ops->getchild(node)); - - if (of_pdt_build_more) -- of_pdt_build_more(dp, nextp); -+ of_pdt_build_more(dp); - - node = of_pdt_prom_ops->getsibling(node); - } -@@ -234,20 +228,17 @@ static void * __init kernel_tree_alloc(u64 size, u64 align) - - void __init of_pdt_build_devicetree(phandle root_node, struct of_pdt_ops *ops) - { -- struct device_node **nextp; -- - BUG_ON(!ops); - of_pdt_prom_ops = ops; - -- of_allnodes = of_pdt_create_node(root_node, NULL); -+ of_root = of_pdt_create_node(root_node, NULL); - #if defined(CONFIG_SPARC) -- of_allnodes->path_component_name = ""; -+ of_root->path_component_name = ""; - #endif -- of_allnodes->full_name = "/"; -+ of_root->full_name = "/"; - -- nextp = &of_allnodes->allnext; -- of_allnodes->child = of_pdt_build_tree(of_allnodes, -- of_pdt_prom_ops->getchild(of_allnodes->phandle), &nextp); -+ of_root->child = of_pdt_build_tree(of_root, -+ of_pdt_prom_ops->getchild(of_root->phandle)); - - /* Get pointer to "/chosen" and "/aliases" nodes for use everywhere */ - of_alias_scan(kernel_tree_alloc); -diff --git a/drivers/of/platform.c b/drivers/of/platform.c -index 3b64d0b..8a002d6 100644 ---- a/drivers/of/platform.c -+++ b/drivers/of/platform.c -@@ -25,6 +25,7 @@ - - const struct of_device_id of_default_bus_match_table[] = { - { .compatible = "simple-bus", }, -+ { .compatible = "simple-mfd", }, - #ifdef CONFIG_ARM_AMBA - { .compatible = "arm,amba-bus", }, - #endif /* CONFIG_ARM_AMBA */ -@@ -138,7 +139,7 @@ struct platform_device *of_device_alloc(struct device_node *np, - } - - dev->dev.of_node = of_node_get(np); -- dev->dev.parent = parent; -+ dev->dev.parent = parent ? : &platform_bus; - - if (bus_id) - dev_set_name(&dev->dev, "%s", bus_id); -@@ -149,57 +150,9 @@ struct platform_device *of_device_alloc(struct device_node *np, - } - EXPORT_SYMBOL(of_device_alloc); - --/** -- * of_dma_configure - Setup DMA configuration -- * @dev: Device to apply DMA configuration -- * -- * Try to get devices's DMA configuration from DT and update it -- * accordingly. -- * -- * In case if platform code need to use own special DMA configuration,it -- * can use Platform bus notifier and handle BUS_NOTIFY_ADD_DEVICE event -- * to fix up DMA configuration. -- */ --static void of_dma_configure(struct device *dev) -+static void of_dma_deconfigure(struct device *dev) - { -- u64 dma_addr, paddr, size; -- int ret; -- -- /* -- * Set default dma-mask to 32 bit. Drivers are expected to setup -- * the correct supported dma_mask. -- */ -- dev->coherent_dma_mask = DMA_BIT_MASK(32); -- -- /* -- * Set it to coherent_dma_mask by default if the architecture -- * code has not set it. -- */ -- if (!dev->dma_mask) -- dev->dma_mask = &dev->coherent_dma_mask; -- -- /* -- * if dma-coherent property exist, call arch hook to setup -- * dma coherent operations. -- */ -- if (of_dma_is_coherent(dev->of_node)) { -- set_arch_dma_coherent_ops(dev); -- dev_dbg(dev, "device is dma coherent\n"); -- } -- -- /* -- * if dma-ranges property doesn't exist - just return else -- * setup the dma offset -- */ -- ret = of_dma_get_range(dev->of_node, &dma_addr, &paddr, &size); -- if (ret < 0) { -- dev_dbg(dev, "no dma range information to setup\n"); -- return; -- } -- -- /* DMA ranges found. Calculate and set dma_pfn_offset */ -- dev->dma_pfn_offset = PFN_DOWN(paddr - dma_addr); -- dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", dev->dma_pfn_offset); -+ arch_teardown_dma_ops(dev); - } - - /** -@@ -228,16 +181,13 @@ static struct platform_device *of_platform_device_create_pdata( - if (!dev) - goto err_clear_flag; - -- of_dma_configure(&dev->dev); - dev->dev.bus = &platform_bus_type; - dev->dev.platform_data = platform_data; -- -- /* We do not fill the DMA ops for platform devices by default. -- * This is currently the responsibility of the platform code -- * to do such, possibly using a device notifier -- */ -+ of_dma_configure(&dev->dev, dev->dev.of_node); -+ of_msi_configure(&dev->dev, dev->dev.of_node); - - if (of_device_add(dev) != 0) { -+ of_dma_deconfigure(&dev->dev); - platform_device_put(dev); - goto err_clear_flag; - } -@@ -291,13 +241,13 @@ static struct amba_device *of_amba_device_create(struct device_node *node, - - /* setup generic device info */ - dev->dev.of_node = of_node_get(node); -- dev->dev.parent = parent; -+ dev->dev.parent = parent ? : &platform_bus; - dev->dev.platform_data = platform_data; - if (bus_id) - dev_set_name(&dev->dev, "%s", bus_id); - else - of_device_make_bus_id(&dev->dev); -- of_dma_configure(&dev->dev); -+ of_dma_configure(&dev->dev, dev->dev.of_node); - - /* Allow the HW Peripheral ID to be overridden */ - prop = of_get_property(node, "arm,primecell-periphid", NULL); -@@ -500,6 +450,7 @@ int of_platform_populate(struct device_node *root, - if (rc) - break; - } -+ of_node_set_flag(root, OF_POPULATED_BUS); - - of_node_put(root); - return rc; -@@ -523,6 +474,7 @@ static int of_platform_device_destroy(struct device *dev, void *data) - amba_device_unregister(to_amba_device(dev)); - #endif - -+ of_dma_deconfigure(dev); - of_node_clear_flag(dev->of_node, OF_POPULATED); - of_node_clear_flag(dev->of_node, OF_POPULATED_BUS); - return 0; -@@ -542,8 +494,75 @@ static int of_platform_device_destroy(struct device *dev, void *data) - */ - void of_platform_depopulate(struct device *parent) - { -- device_for_each_child(parent, NULL, of_platform_device_destroy); -+ if (parent->of_node && of_node_check_flag(parent->of_node, OF_POPULATED_BUS)) { -+ device_for_each_child(parent, NULL, of_platform_device_destroy); -+ of_node_clear_flag(parent->of_node, OF_POPULATED_BUS); -+ } - } - EXPORT_SYMBOL_GPL(of_platform_depopulate); - -+#ifdef CONFIG_OF_DYNAMIC -+static int of_platform_notify(struct notifier_block *nb, -+ unsigned long action, void *arg) -+{ -+ struct of_reconfig_data *rd = arg; -+ struct platform_device *pdev_parent, *pdev; -+ bool children_left; -+ -+ switch (of_reconfig_get_state_change(action, rd)) { -+ case OF_RECONFIG_CHANGE_ADD: -+ /* verify that the parent is a bus */ -+ if (!of_node_check_flag(rd->dn->parent, OF_POPULATED_BUS)) -+ return NOTIFY_OK; /* not for us */ -+ -+ /* already populated? (driver using of_populate manually) */ -+ if (of_node_check_flag(rd->dn, OF_POPULATED)) -+ return NOTIFY_OK; -+ -+ /* pdev_parent may be NULL when no bus platform device */ -+ pdev_parent = of_find_device_by_node(rd->dn->parent); -+ pdev = of_platform_device_create(rd->dn, NULL, -+ pdev_parent ? &pdev_parent->dev : NULL); -+ of_dev_put(pdev_parent); -+ -+ if (pdev == NULL) { -+ pr_err("%s: failed to create for '%s'\n", -+ __func__, rd->dn->full_name); -+ /* of_platform_device_create tosses the error code */ -+ return notifier_from_errno(-EINVAL); -+ } -+ break; -+ -+ case OF_RECONFIG_CHANGE_REMOVE: -+ -+ /* already depopulated? */ -+ if (!of_node_check_flag(rd->dn, OF_POPULATED)) -+ return NOTIFY_OK; -+ -+ /* find our device by node */ -+ pdev = of_find_device_by_node(rd->dn); -+ if (pdev == NULL) -+ return NOTIFY_OK; /* no? not meant for us */ -+ -+ /* unregister takes one ref away */ -+ of_platform_device_destroy(&pdev->dev, &children_left); -+ -+ /* and put the reference of the find */ -+ of_dev_put(pdev); -+ break; -+ } -+ -+ return NOTIFY_OK; -+} -+ -+static struct notifier_block platform_of_notifier = { -+ .notifier_call = of_platform_notify, -+}; -+ -+void of_platform_register_reconfig_notifier(void) -+{ -+ WARN_ON(of_reconfig_notifier_register(&platform_of_notifier)); -+} -+#endif /* CONFIG_OF_DYNAMIC */ -+ - #endif /* CONFIG_OF_ADDRESS */ -diff --git a/drivers/of/selftest.c b/drivers/of/selftest.c -index e2d79af..e40089e 100644 ---- a/drivers/of/selftest.c -+++ b/drivers/of/selftest.c -@@ -148,7 +148,7 @@ static void __init of_selftest_dynamic(void) - - static int __init of_selftest_check_node_linkage(struct device_node *np) - { -- struct device_node *child, *allnext_index = np; -+ struct device_node *child; - int count = 0, rc; - - for_each_child_of_node(np, child) { -@@ -158,14 +158,6 @@ static int __init of_selftest_check_node_linkage(struct device_node *np) - return -EINVAL; - } - -- while (allnext_index && allnext_index != child) -- allnext_index = allnext_index->allnext; -- if (allnext_index != child) { -- pr_err("Node %s is ordered differently in sibling and allnode lists\n", -- child->name); -- return -EINVAL; -- } -- - rc = of_selftest_check_node_linkage(child); - if (rc < 0) - return rc; -@@ -180,12 +172,12 @@ static void __init of_selftest_check_tree_linkage(void) - struct device_node *np; - int allnode_count = 0, child_count; - -- if (!of_allnodes) -+ if (!of_root) - return; - - for_each_of_allnodes(np) - allnode_count++; -- child_count = of_selftest_check_node_linkage(of_allnodes); -+ child_count = of_selftest_check_node_linkage(of_root); - - selftest(child_count > 0, "Device node data structure is corrupted\n"); - selftest(child_count == allnode_count, "allnodes list size (%i) doesn't match" -@@ -775,33 +767,29 @@ static void update_node_properties(struct device_node *np, - */ - static int attach_node_and_children(struct device_node *np) - { -- struct device_node *next, *root = np, *dup; -+ struct device_node *next, *dup, *child; - -- /* skip root node */ -- np = np->child; -- /* storing a copy in temporary node */ -- dup = np; -+ dup = of_find_node_by_path(np->full_name); -+ if (dup) { -+ update_node_properties(np, dup); -+ return 0; -+ } - -- while (dup) { -+ /* Children of the root need to be remembered for removal */ -+ if (np->parent == of_root) { - if (WARN_ON(last_node_index >= NO_OF_NODES)) - return -EINVAL; -- nodes[last_node_index++] = dup; -- dup = dup->sibling; -+ nodes[last_node_index++] = np; - } -- dup = NULL; - -- while (np) { -- next = np->allnext; -- dup = of_find_node_by_path(np->full_name); -- if (dup) -- update_node_properties(np, dup); -- else { -- np->child = NULL; -- if (np->parent == root) -- np->parent = of_allnodes; -- of_attach_node(np); -- } -- np = next; -+ child = np->child; -+ np->child = NULL; -+ np->sibling = NULL; -+ of_attach_node(np); -+ while (child) { -+ next = child->sibling; -+ attach_node_and_children(child); -+ child = next; - } - - return 0; -@@ -846,10 +834,10 @@ static int __init selftest_data_add(void) - return -EINVAL; - } - -- if (!of_allnodes) { -+ if (!of_root) { - /* enabling flag for removing nodes */ - selftest_live_tree = true; -- of_allnodes = selftest_data_node; -+ of_root = selftest_data_node; - - for_each_of_allnodes(np) - __of_attach_node_sysfs(np); -@@ -859,7 +847,14 @@ static int __init selftest_data_add(void) - } - - /* attach the sub-tree to live tree */ -- return attach_node_and_children(selftest_data_node); -+ np = selftest_data_node->child; -+ while (np) { -+ struct device_node *next = np->sibling; -+ np->parent = of_root; -+ attach_node_and_children(np); -+ np = next; -+ } -+ return 0; - } - - /** -@@ -889,10 +884,10 @@ static void selftest_data_remove(void) - of_node_put(of_chosen); - of_aliases = NULL; - of_chosen = NULL; -- for_each_child_of_node(of_allnodes, np) -+ for_each_child_of_node(of_root, np) - detach_node_and_children(np); -- __of_detach_node_sysfs(of_allnodes); -- of_allnodes = NULL; -+ __of_detach_node_sysfs(of_root); -+ of_root = NULL; - return; - } - -diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig -index 893503f..cced842 100644 ---- a/drivers/pci/Kconfig -+++ b/drivers/pci/Kconfig -@@ -4,6 +4,7 @@ - config PCI_MSI - bool "Message Signaled Interrupts (MSI and MSI-X)" - depends on PCI -+ select GENERIC_MSI_IRQ - help - This allows device drivers to enable MSI (Message Signaled - Interrupts). Message Signaled Interrupts enable a device to -@@ -16,6 +17,11 @@ config PCI_MSI - - If you don't know what to do here, say Y. - -+config PCI_MSI_IRQ_DOMAIN -+ bool -+ depends on PCI_MSI -+ select GENERIC_MSI_IRQ_DOMAIN -+ - config PCI_DEBUG - bool "PCI Debugging" - depends on PCI && DEBUG_KERNEL -diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile -index e04fe2d..e9815ac 100644 ---- a/drivers/pci/Makefile -+++ b/drivers/pci/Makefile -@@ -35,6 +35,7 @@ obj-$(CONFIG_PCI_IOV) += iov.o - # - obj-$(CONFIG_ALPHA) += setup-irq.o - obj-$(CONFIG_ARM) += setup-irq.o -+obj-$(CONFIG_ARM64) += setup-irq.o - obj-$(CONFIG_UNICORE32) += setup-irq.o - obj-$(CONFIG_SUPERH) += setup-irq.o - obj-$(CONFIG_MIPS) += setup-irq.o -diff --git a/drivers/pci/access.c b/drivers/pci/access.c -index 7f249b9..b965c12 100644 ---- a/drivers/pci/access.c -+++ b/drivers/pci/access.c -@@ -67,6 +67,93 @@ EXPORT_SYMBOL(pci_bus_write_config_byte); - EXPORT_SYMBOL(pci_bus_write_config_word); - EXPORT_SYMBOL(pci_bus_write_config_dword); - -+int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn, -+ int where, int size, u32 *val) -+{ -+ void __iomem *addr; -+ -+ addr = bus->ops->map_bus(bus, devfn, where); -+ if (!addr) { -+ *val = ~0; -+ return PCIBIOS_DEVICE_NOT_FOUND; -+ } -+ -+ if (size == 1) -+ *val = readb(addr); -+ else if (size == 2) -+ *val = readw(addr); -+ else -+ *val = readl(addr); -+ -+ return PCIBIOS_SUCCESSFUL; -+} -+EXPORT_SYMBOL_GPL(pci_generic_config_read); -+ -+int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn, -+ int where, int size, u32 val) -+{ -+ void __iomem *addr; -+ -+ addr = bus->ops->map_bus(bus, devfn, where); -+ if (!addr) -+ return PCIBIOS_DEVICE_NOT_FOUND; -+ -+ if (size == 1) -+ writeb(val, addr); -+ else if (size == 2) -+ writew(val, addr); -+ else -+ writel(val, addr); -+ -+ return PCIBIOS_SUCCESSFUL; -+} -+EXPORT_SYMBOL_GPL(pci_generic_config_write); -+ -+int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn, -+ int where, int size, u32 *val) -+{ -+ void __iomem *addr; -+ -+ addr = bus->ops->map_bus(bus, devfn, where & ~0x3); -+ if (!addr) { -+ *val = ~0; -+ return PCIBIOS_DEVICE_NOT_FOUND; -+ } -+ -+ *val = readl(addr); -+ -+ if (size <= 2) -+ *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1); -+ -+ return PCIBIOS_SUCCESSFUL; -+} -+EXPORT_SYMBOL_GPL(pci_generic_config_read32); -+ -+int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn, -+ int where, int size, u32 val) -+{ -+ void __iomem *addr; -+ u32 mask, tmp; -+ -+ addr = bus->ops->map_bus(bus, devfn, where & ~0x3); -+ if (!addr) -+ return PCIBIOS_DEVICE_NOT_FOUND; -+ -+ if (size == 4) { -+ writel(val, addr); -+ return PCIBIOS_SUCCESSFUL; -+ } else { -+ mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8)); -+ } -+ -+ tmp = readl(addr) & mask; -+ tmp |= val << ((where & 0x3) * 8); -+ writel(tmp, addr); -+ -+ return PCIBIOS_SUCCESSFUL; -+} -+EXPORT_SYMBOL_GPL(pci_generic_config_write32); -+ - /** - * pci_bus_set_ops - Set raw operations of pci bus - * @bus: pci bus struct -diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c -index 8fb1618..90fa3a7 100644 ---- a/drivers/pci/bus.c -+++ b/drivers/pci/bus.c -@@ -20,17 +20,16 @@ - void pci_add_resource_offset(struct list_head *resources, struct resource *res, - resource_size_t offset) - { -- struct pci_host_bridge_window *window; -+ struct resource_entry *entry; - -- window = kzalloc(sizeof(struct pci_host_bridge_window), GFP_KERNEL); -- if (!window) { -+ entry = resource_list_create_entry(res, 0); -+ if (!entry) { - printk(KERN_ERR "PCI: can't add host bridge window %pR\n", res); - return; - } - -- window->res = res; -- window->offset = offset; -- list_add_tail(&window->list, resources); -+ entry->offset = offset; -+ resource_list_add_tail(entry, resources); - } - EXPORT_SYMBOL(pci_add_resource_offset); - -@@ -42,12 +41,7 @@ EXPORT_SYMBOL(pci_add_resource); - - void pci_free_resource_list(struct list_head *resources) - { -- struct pci_host_bridge_window *window, *tmp; -- -- list_for_each_entry_safe(window, tmp, resources, list) { -- list_del(&window->list); -- kfree(window); -- } -+ resource_list_free(resources); - } - EXPORT_SYMBOL(pci_free_resource_list); - -diff --git a/drivers/pci/host-bridge.c b/drivers/pci/host-bridge.c -index 0e5f3c9..3e5bbf9 100644 ---- a/drivers/pci/host-bridge.c -+++ b/drivers/pci/host-bridge.c -@@ -23,6 +23,20 @@ static struct pci_host_bridge *find_pci_host_bridge(struct pci_bus *bus) - return to_pci_host_bridge(root_bus->bridge); - } - -+struct device *pci_get_host_bridge_device(struct pci_dev *dev) -+{ -+ struct pci_bus *root_bus = find_pci_root_bus(dev->bus); -+ struct device *bridge = root_bus->bridge; -+ -+ kobject_get(&bridge->kobj); -+ return bridge; -+} -+ -+void pci_put_host_bridge_device(struct device *dev) -+{ -+ kobject_put(&dev->kobj); -+} -+ - void pci_set_host_bridge_release(struct pci_host_bridge *bridge, - void (*release_fn)(struct pci_host_bridge *), - void *release_data) -@@ -35,10 +49,10 @@ void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region, - struct resource *res) - { - struct pci_host_bridge *bridge = find_pci_host_bridge(bus); -- struct pci_host_bridge_window *window; -+ struct resource_entry *window; - resource_size_t offset = 0; - -- list_for_each_entry(window, &bridge->windows, list) { -+ resource_list_for_each_entry(window, &bridge->windows) { - if (resource_contains(window->res, res)) { - offset = window->offset; - break; -@@ -60,10 +74,10 @@ void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res, - struct pci_bus_region *region) - { - struct pci_host_bridge *bridge = find_pci_host_bridge(bus); -- struct pci_host_bridge_window *window; -+ struct resource_entry *window; - resource_size_t offset = 0; - -- list_for_each_entry(window, &bridge->windows, list) { -+ resource_list_for_each_entry(window, &bridge->windows) { - struct pci_bus_region bus_region; - - if (resource_type(res) != resource_type(window->res)) -diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig -index 3dc25fa..dafa3dc 100644 ---- a/drivers/pci/host/Kconfig -+++ b/drivers/pci/host/Kconfig -@@ -50,7 +50,7 @@ config PCI_RCAR_GEN2_PCIE - - config PCI_HOST_GENERIC - bool "Generic PCI host controller" -- depends on ARM && OF -+ depends on (ARM || ARM64) && OF - help - Say Y here if you want to support a simple generic PCI host - controller, such as the one emulated by kvmtool. -@@ -86,9 +86,26 @@ config PCI_XGENE - depends on ARCH_XGENE - depends on OF - select PCIEPORTBUS -+ select PCI_MSI_IRQ_DOMAIN if PCI_MSI - help - Say Y here if you want internal PCI support on APM X-Gene SoC. - There are 5 internal PCIe ports available. Each port is GEN3 capable - and have varied lanes from x1 to x8. - -+config PCI_XGENE_MSI -+ bool "X-Gene v1 PCIe MSI feature" -+ depends on PCI_XGENE && PCI_MSI -+ default y -+ help -+ Say Y here if you want PCIe MSI support for the APM X-Gene v1 SoC. -+ This MSI driver supports 5 PCIe ports on the APM X-Gene v1 SoC. -+ -+config PCI_LAYERSCAPE -+ bool "Freescale Layerscape PCIe controller" -+ depends on OF && (ARM || ARCH_LAYERSCAPE) -+ select PCIE_DW -+ select MFD_SYSCON -+ help -+ Say Y here if you want PCIe controller support on Layerscape SoCs. -+ - endmenu -diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile -index 26b3461..029685e 100644 ---- a/drivers/pci/host/Makefile -+++ b/drivers/pci/host/Makefile -@@ -1,3 +1,4 @@ -+obj-$(CONFIG_PCIE_DW_BASE) += pcie-designware-base.o - obj-$(CONFIG_PCIE_DW) += pcie-designware.o - obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o - obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o -@@ -11,3 +12,5 @@ obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o - obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o - obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o - obj-$(CONFIG_PCI_XGENE) += pci-xgene.o -+obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o -+obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o -diff --git a/drivers/pci/host/pci-dra7xx.c b/drivers/pci/host/pci-dra7xx.c -index 52b34fe..84a45cf 100644 ---- a/drivers/pci/host/pci-dra7xx.c -+++ b/drivers/pci/host/pci-dra7xx.c -@@ -61,6 +61,7 @@ - - #define PCIECTRL_DRA7XX_CONF_PHY_CS 0x010C - #define LINK_UP BIT(16) -+#define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF - - struct dra7xx_pcie { - void __iomem *base; -@@ -144,6 +145,12 @@ static void dra7xx_pcie_enable_interrupts(struct pcie_port *pp) - static void dra7xx_pcie_host_init(struct pcie_port *pp) - { - dw_pcie_setup_rc(pp); -+ -+ pp->io_base &= DRA7XX_CPU_TO_BUS_ADDR; -+ pp->mem_base &= DRA7XX_CPU_TO_BUS_ADDR; -+ pp->cfg0_base &= DRA7XX_CPU_TO_BUS_ADDR; -+ pp->cfg1_base &= DRA7XX_CPU_TO_BUS_ADDR; -+ - dra7xx_pcie_establish_link(pp); - if (IS_ENABLED(CONFIG_PCI_MSI)) - dw_pcie_msi_init(pp); -@@ -160,7 +167,6 @@ static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq, - { - irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); - irq_set_chip_data(irq, domain->host_data); -- set_irq_flags(irq, IRQF_VALID); - - return 0; - } -diff --git a/drivers/pci/host/pci-exynos.c b/drivers/pci/host/pci-exynos.c -index c5d0ca3..2fd6b4e 100644 ---- a/drivers/pci/host/pci-exynos.c -+++ b/drivers/pci/host/pci-exynos.c -@@ -466,7 +466,7 @@ static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, - int ret; - - exynos_pcie_sideband_dbi_r_mode(pp, true); -- ret = dw_pcie_cfg_read(pp->dbi_base + (where & ~0x3), where, size, val); -+ ret = dw_pcie_cfg_read(pp->dbi_base + where, size, val); - exynos_pcie_sideband_dbi_r_mode(pp, false); - return ret; - } -@@ -477,8 +477,7 @@ static int exynos_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, - int ret; - - exynos_pcie_sideband_dbi_w_mode(pp, true); -- ret = dw_pcie_cfg_write(pp->dbi_base + (where & ~0x3), -- where, size, val); -+ ret = dw_pcie_cfg_write(pp->dbi_base + where, size, val); - exynos_pcie_sideband_dbi_w_mode(pp, false); - return ret; - } -diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c -index 3d2076f..83fb705 100644 ---- a/drivers/pci/host/pci-host-generic.c -+++ b/drivers/pci/host/pci-host-generic.c -@@ -32,13 +32,22 @@ struct gen_pci_cfg_bus_ops { - - struct gen_pci_cfg_windows { - struct resource res; -- struct resource bus_range; -+ struct resource *bus_range; - void __iomem **win; - - const struct gen_pci_cfg_bus_ops *ops; - }; - -+/* -+ * ARM pcibios functions expect the ARM struct pci_sys_data as the PCI -+ * sysdata. Add pci_sys_data as the first element in struct gen_pci so -+ * that when we use a gen_pci pointer as sysdata, it is also a pointer to -+ * a struct pci_sys_data. -+ */ - struct gen_pci { -+#ifdef CONFIG_ARM -+ struct pci_sys_data sys; -+#endif - struct pci_host_bridge host; - struct gen_pci_cfg_windows cfg; - struct list_head resources; -@@ -48,9 +57,8 @@ static void __iomem *gen_pci_map_cfg_bus_cam(struct pci_bus *bus, - unsigned int devfn, - int where) - { -- struct pci_sys_data *sys = bus->sysdata; -- struct gen_pci *pci = sys->private_data; -- resource_size_t idx = bus->number - pci->cfg.bus_range.start; -+ struct gen_pci *pci = bus->sysdata; -+ resource_size_t idx = bus->number - pci->cfg.bus_range->start; - - return pci->cfg.win[idx] + ((devfn << 8) | where); - } -@@ -64,9 +72,8 @@ static void __iomem *gen_pci_map_cfg_bus_ecam(struct pci_bus *bus, - unsigned int devfn, - int where) - { -- struct pci_sys_data *sys = bus->sysdata; -- struct gen_pci *pci = sys->private_data; -- resource_size_t idx = bus->number - pci->cfg.bus_range.start; -+ struct gen_pci *pci = bus->sysdata; -+ resource_size_t idx = bus->number - pci->cfg.bus_range->start; - - return pci->cfg.win[idx] + ((devfn << 12) | where); - } -@@ -76,55 +83,9 @@ static struct gen_pci_cfg_bus_ops gen_pci_cfg_ecam_bus_ops = { - .map_bus = gen_pci_map_cfg_bus_ecam, - }; - --static int gen_pci_config_read(struct pci_bus *bus, unsigned int devfn, -- int where, int size, u32 *val) --{ -- void __iomem *addr; -- struct pci_sys_data *sys = bus->sysdata; -- struct gen_pci *pci = sys->private_data; -- -- addr = pci->cfg.ops->map_bus(bus, devfn, where); -- -- switch (size) { -- case 1: -- *val = readb(addr); -- break; -- case 2: -- *val = readw(addr); -- break; -- default: -- *val = readl(addr); -- } -- -- return PCIBIOS_SUCCESSFUL; --} -- --static int gen_pci_config_write(struct pci_bus *bus, unsigned int devfn, -- int where, int size, u32 val) --{ -- void __iomem *addr; -- struct pci_sys_data *sys = bus->sysdata; -- struct gen_pci *pci = sys->private_data; -- -- addr = pci->cfg.ops->map_bus(bus, devfn, where); -- -- switch (size) { -- case 1: -- writeb(val, addr); -- break; -- case 2: -- writew(val, addr); -- break; -- default: -- writel(val, addr); -- } -- -- return PCIBIOS_SUCCESSFUL; --} -- - static struct pci_ops gen_pci_ops = { -- .read = gen_pci_config_read, -- .write = gen_pci_config_write, -+ .read = pci_generic_config_read, -+ .write = pci_generic_config_write, - }; - - static const struct of_device_id gen_pci_of_match[] = { -@@ -138,106 +99,50 @@ static const struct of_device_id gen_pci_of_match[] = { - }; - MODULE_DEVICE_TABLE(of, gen_pci_of_match); - --static int gen_pci_calc_io_offset(struct device *dev, -- struct of_pci_range *range, -- struct resource *res, -- resource_size_t *offset) --{ -- static atomic_t wins = ATOMIC_INIT(0); -- int err, idx, max_win; -- unsigned int window; -- -- if (!PAGE_ALIGNED(range->cpu_addr)) -- return -EINVAL; -- -- max_win = (IO_SPACE_LIMIT + 1) / SZ_64K; -- idx = atomic_inc_return(&wins); -- if (idx > max_win) -- return -ENOSPC; -- -- window = (idx - 1) * SZ_64K; -- err = pci_ioremap_io(window, range->cpu_addr); -- if (err) -- return err; -- -- of_pci_range_to_resource(range, dev->of_node, res); -- res->start = window; -- res->end = res->start + range->size - 1; -- *offset = window - range->pci_addr; -- return 0; --} -- --static int gen_pci_calc_mem_offset(struct device *dev, -- struct of_pci_range *range, -- struct resource *res, -- resource_size_t *offset) --{ -- of_pci_range_to_resource(range, dev->of_node, res); -- *offset = range->cpu_addr - range->pci_addr; -- return 0; --} -- - static void gen_pci_release_of_pci_ranges(struct gen_pci *pci) - { -- struct pci_host_bridge_window *win; -- -- list_for_each_entry(win, &pci->resources, list) -- release_resource(win->res); -- - pci_free_resource_list(&pci->resources); - } - - static int gen_pci_parse_request_of_pci_ranges(struct gen_pci *pci) - { -- struct of_pci_range range; -- struct of_pci_range_parser parser; - int err, res_valid = 0; - struct device *dev = pci->host.dev.parent; - struct device_node *np = dev->of_node; -+ resource_size_t iobase; -+ struct resource_entry *win; - -- if (of_pci_range_parser_init(&parser, np)) { -- dev_err(dev, "missing \"ranges\" property\n"); -- return -EINVAL; -- } -- -- for_each_of_pci_range(&parser, &range) { -- struct resource *parent, *res; -- resource_size_t offset; -- u32 restype = range.flags & IORESOURCE_TYPE_BITS; -+ err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pci->resources, -+ &iobase); -+ if (err) -+ return err; - -- res = devm_kmalloc(dev, sizeof(*res), GFP_KERNEL); -- if (!res) { -- err = -ENOMEM; -- goto out_release_res; -- } -+ resource_list_for_each_entry(win, &pci->resources) { -+ struct resource *parent, *res = win->res; - -- switch (restype) { -+ switch (resource_type(res)) { - case IORESOURCE_IO: - parent = &ioport_resource; -- err = gen_pci_calc_io_offset(dev, &range, res, &offset); -+ err = pci_remap_iospace(res, iobase); -+ if (err) { -+ dev_warn(dev, "error %d: failed to map resource %pR\n", -+ err, res); -+ continue; -+ } - break; - case IORESOURCE_MEM: - parent = &iomem_resource; -- err = gen_pci_calc_mem_offset(dev, &range, res, &offset); -- res_valid |= !(res->flags & IORESOURCE_PREFETCH || err); -+ res_valid |= !(res->flags & IORESOURCE_PREFETCH); - break; -+ case IORESOURCE_BUS: -+ pci->cfg.bus_range = res; - default: -- err = -EINVAL; - continue; - } - -- if (err) { -- dev_warn(dev, -- "error %d: failed to add resource [type 0x%x, %lld bytes]\n", -- err, restype, range.size); -- continue; -- } -- -- err = request_resource(parent, res); -+ err = devm_request_resource(dev, parent, res); - if (err) - goto out_release_res; -- -- pci_add_resource_offset(&pci->resources, res, offset); - } - - if (!res_valid) { -@@ -262,38 +167,30 @@ static int gen_pci_parse_map_cfg_windows(struct gen_pci *pci) - struct device *dev = pci->host.dev.parent; - struct device_node *np = dev->of_node; - -- if (of_pci_parse_bus_range(np, &pci->cfg.bus_range)) -- pci->cfg.bus_range = (struct resource) { -- .name = np->name, -- .start = 0, -- .end = 0xff, -- .flags = IORESOURCE_BUS, -- }; -- - err = of_address_to_resource(np, 0, &pci->cfg.res); - if (err) { - dev_err(dev, "missing \"reg\" property\n"); - return err; - } - -- pci->cfg.win = devm_kcalloc(dev, resource_size(&pci->cfg.bus_range), -+ /* Limit the bus-range to fit within reg */ -+ bus_max = pci->cfg.bus_range->start + -+ (resource_size(&pci->cfg.res) >> pci->cfg.ops->bus_shift) - 1; -+ pci->cfg.bus_range->end = min_t(resource_size_t, -+ pci->cfg.bus_range->end, bus_max); -+ -+ pci->cfg.win = devm_kcalloc(dev, resource_size(pci->cfg.bus_range), - sizeof(*pci->cfg.win), GFP_KERNEL); - if (!pci->cfg.win) - return -ENOMEM; - -- /* Limit the bus-range to fit within reg */ -- bus_max = pci->cfg.bus_range.start + -- (resource_size(&pci->cfg.res) >> pci->cfg.ops->bus_shift) - 1; -- pci->cfg.bus_range.end = min_t(resource_size_t, pci->cfg.bus_range.end, -- bus_max); -- - /* Map our Configuration Space windows */ - if (!devm_request_mem_region(dev, pci->cfg.res.start, - resource_size(&pci->cfg.res), - "Configuration Space")) - return -ENOMEM; - -- bus_range = &pci->cfg.bus_range; -+ bus_range = pci->cfg.bus_range; - for (busn = bus_range->start; busn <= bus_range->end; ++busn) { - u32 idx = busn - bus_range->start; - u32 sz = 1 << pci->cfg.ops->bus_shift; -@@ -305,18 +202,9 @@ static int gen_pci_parse_map_cfg_windows(struct gen_pci *pci) - return -ENOMEM; - } - -- /* Register bus resource */ -- pci_add_resource(&pci->resources, bus_range); - return 0; - } - --static int gen_pci_setup(int nr, struct pci_sys_data *sys) --{ -- struct gen_pci *pci = sys->private_data; -- list_splice_init(&pci->resources, &sys->resources); -- return 1; --} -- - static int gen_pci_probe(struct platform_device *pdev) - { - int err; -@@ -326,13 +214,7 @@ static int gen_pci_probe(struct platform_device *pdev) - struct device *dev = &pdev->dev; - struct device_node *np = dev->of_node; - struct gen_pci *pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); -- struct hw_pci hw = { -- .nr_controllers = 1, -- .private_data = (void **)&pci, -- .setup = gen_pci_setup, -- .map_irq = of_irq_parse_and_map_pci, -- .ops = &gen_pci_ops, -- }; -+ struct pci_bus *bus, *child; - - if (!pci) - return -ENOMEM; -@@ -353,6 +235,7 @@ static int gen_pci_probe(struct platform_device *pdev) - - of_id = of_match_node(gen_pci_of_match, np); - pci->cfg.ops = of_id->data; -+ gen_pci_ops.map_bus = pci->cfg.ops->map_bus; - pci->host.dev.parent = dev; - INIT_LIST_HEAD(&pci->host.windows); - INIT_LIST_HEAD(&pci->resources); -@@ -369,7 +252,27 @@ static int gen_pci_probe(struct platform_device *pdev) - return err; - } - -- pci_common_init_dev(dev, &hw); -+ /* Do not reassign resources if probe only */ -+ if (!pci_has_flag(PCI_PROBE_ONLY)) -+ pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS); -+ -+ bus = pci_scan_root_bus(dev, 0, &gen_pci_ops, pci, &pci->resources); -+ if (!bus) { -+ dev_err(dev, "Scanning rootbus failed"); -+ return -ENODEV; -+ } -+ -+ pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci); -+ -+ if (!pci_has_flag(PCI_PROBE_ONLY)) { -+ pci_bus_size_bridges(bus); -+ pci_bus_assign_resources(bus); -+ -+ list_for_each_entry(child, &bus->children, node) -+ pcie_bus_configure_settings(child); -+ } -+ -+ pci_bus_add_devices(bus); - return 0; - } - -diff --git a/drivers/pci/host/pci-keystone-dw.c b/drivers/pci/host/pci-keystone-dw.c -index 34086ce..c1b5980 100644 ---- a/drivers/pci/host/pci-keystone-dw.c -+++ b/drivers/pci/host/pci-keystone-dw.c -@@ -70,7 +70,7 @@ static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset, - *bit_pos = offset >> 3; - } - --u32 ks_dw_pcie_get_msi_addr(struct pcie_port *pp) -+phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp) - { - struct keystone_pcie *ks_pcie = to_keystone_pcie(pp); - -@@ -104,14 +104,13 @@ static void ks_dw_pcie_msi_irq_ack(struct irq_data *d) - { - u32 offset, reg_offset, bit_pos; - struct keystone_pcie *ks_pcie; -- unsigned int irq = d->irq; - struct msi_desc *msi; - struct pcie_port *pp; - -- msi = irq_get_msi_desc(irq); -- pp = sys_to_pcie(msi->dev->bus->sysdata); -+ msi = irq_data_get_msi_desc(d); -+ pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi)); - ks_pcie = to_keystone_pcie(pp); -- offset = irq - irq_linear_revmap(pp->irq_domain, 0); -+ offset = d->irq - irq_linear_revmap(pp->irq_domain, 0); - update_reg_offset_bit_pos(offset, ®_offset, &bit_pos); - - writel(BIT(bit_pos), -@@ -142,20 +141,19 @@ void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq) - static void ks_dw_pcie_msi_irq_mask(struct irq_data *d) - { - struct keystone_pcie *ks_pcie; -- unsigned int irq = d->irq; - struct msi_desc *msi; - struct pcie_port *pp; - u32 offset; - -- msi = irq_get_msi_desc(irq); -- pp = sys_to_pcie(msi->dev->bus->sysdata); -+ msi = irq_data_get_msi_desc(d); -+ pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi)); - ks_pcie = to_keystone_pcie(pp); -- offset = irq - irq_linear_revmap(pp->irq_domain, 0); -+ offset = d->irq - irq_linear_revmap(pp->irq_domain, 0); - - /* Mask the end point if PVM implemented */ - if (IS_ENABLED(CONFIG_PCI_MSI)) { - if (msi->msi_attrib.maskbit) -- mask_msi_irq(d); -+ pci_msi_mask_irq(d); - } - - ks_dw_pcie_msi_clear_irq(pp, offset); -@@ -164,20 +162,19 @@ static void ks_dw_pcie_msi_irq_mask(struct irq_data *d) - static void ks_dw_pcie_msi_irq_unmask(struct irq_data *d) - { - struct keystone_pcie *ks_pcie; -- unsigned int irq = d->irq; - struct msi_desc *msi; - struct pcie_port *pp; - u32 offset; - -- msi = irq_get_msi_desc(irq); -- pp = sys_to_pcie(msi->dev->bus->sysdata); -+ msi = irq_data_get_msi_desc(d); -+ pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi)); - ks_pcie = to_keystone_pcie(pp); -- offset = irq - irq_linear_revmap(pp->irq_domain, 0); -+ offset = d->irq - irq_linear_revmap(pp->irq_domain, 0); - - /* Mask the end point if PVM implemented */ - if (IS_ENABLED(CONFIG_PCI_MSI)) { - if (msi->msi_attrib.maskbit) -- unmask_msi_irq(d); -+ pci_msi_unmask_irq(d); - } - - ks_dw_pcie_msi_set_irq(pp, offset); -@@ -196,7 +193,6 @@ static int ks_dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq, - irq_set_chip_and_handler(irq, &ks_dw_pcie_msi_irq_chip, - handle_level_irq); - irq_set_chip_data(irq, domain->host_data); -- set_irq_flags(irq, IRQF_VALID); - - return 0; - } -@@ -205,7 +201,7 @@ const struct irq_domain_ops ks_dw_pcie_msi_domain_ops = { - .map = ks_dw_pcie_msi_map, - }; - --int ks_dw_pcie_msi_host_init(struct pcie_port *pp, struct msi_chip *chip) -+int ks_dw_pcie_msi_host_init(struct pcie_port *pp, struct msi_controller *chip) - { - struct keystone_pcie *ks_pcie = to_keystone_pcie(pp); - int i; -@@ -277,7 +273,6 @@ static int ks_dw_pcie_init_legacy_irq_map(struct irq_domain *d, - irq_set_chip_and_handler(irq, &ks_dw_pcie_legacy_irq_chip, - handle_level_irq); - irq_set_chip_data(irq, d->host_data); -- set_irq_flags(irq, IRQF_VALID); - - return 0; - } -@@ -327,7 +322,7 @@ static void ks_dw_pcie_clear_dbi_mode(void __iomem *reg_virt) - void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie) - { - struct pcie_port *pp = &ks_pcie->pp; -- u32 start = pp->mem.start, end = pp->mem.end; -+ u32 start = pp->mem->start, end = pp->mem->end; - int i, tr_size; - - /* Disable BARs for inbound access */ -@@ -403,7 +398,7 @@ int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, - - addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn); - -- return dw_pcie_cfg_read(addr + (where & ~0x3), where, size, val); -+ return dw_pcie_cfg_read(addr + where, size, val); - } - - int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, -@@ -415,7 +410,7 @@ int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, - - addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn); - -- return dw_pcie_cfg_write(addr + (where & ~0x3), where, size, val); -+ return dw_pcie_cfg_write(addr + where, size, val); - } - - /** -diff --git a/drivers/pci/host/pci-keystone.h b/drivers/pci/host/pci-keystone.h -index 1fc1fce..f0944e8 100644 ---- a/drivers/pci/host/pci-keystone.h -+++ b/drivers/pci/host/pci-keystone.h -@@ -37,7 +37,7 @@ struct keystone_pcie { - - /* Keystone DW specific MSI controller APIs/definitions */ - void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset); --u32 ks_dw_pcie_get_msi_addr(struct pcie_port *pp); -+phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp); - - /* Keystone specific PCI controller APIs */ - void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie); -@@ -55,4 +55,4 @@ void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq); - void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq); - void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp); - int ks_dw_pcie_msi_host_init(struct pcie_port *pp, -- struct msi_chip *chip); -+ struct msi_controller *chip); -diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c -new file mode 100644 -index 0000000..baa1232 ---- /dev/null -+++ b/drivers/pci/host/pci-layerscape.c -@@ -0,0 +1,729 @@ -+/* -+ * PCIe host controller driver for Freescale Layerscape SoCs -+ * -+ * Copyright (C) 2014 Freescale Semiconductor. -+ * -+ * Author: Minghuan Lian -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "pcie-designware.h" -+ -+/* PEX1/2 Misc Ports Status Register */ -+#define SCFG_PEXMSCPORTSR(pex_idx) (0x94 + (pex_idx) * 4) -+#define SCFG_PEXPMWRCR(pex_idx) (0x5c + (pex_idx) * 0x64) -+#define LTSSM_STATE_SHIFT 20 -+#define LTSSM_STATE_MASK 0x3f -+#define LTSSM_PCIE_L0 0x11 /* L0 state */ -+#define LTSSM_PCIE_L2_IDLE 0x15 /* L2 idle state */ -+ -+#define PCIE_SRIOV_OFFSET 0x178 -+ -+/* CS2 */ -+#define PCIE_CS2_OFFSET 0x1000 /* For PCIe without SR-IOV */ -+#define PCIE_ENABLE_CS2 0x80000000 /* For PCIe with SR-IOV */ -+ -+/* PEX Internal Configuration Registers */ -+#define PCIE_STRFMR1 0x71c /* Symbol Timer & Filter Mask Register1 */ -+#define PCIE_DBI_RO_WR_EN 0x8bc /* DBI Read-Only Write Enable Register */ -+#define PCIE_ABSERR 0x8d0 /* Bridge Slave Error Response Register */ -+#define PCIE_ABSERR_SETTING 0x9401 /* Forward error of non-posted request */ -+ -+/* PEX LUT registers */ -+#define PCIE_LUT_DBG 0x7FC /* PEX LUT Debug Register */ -+#define PCIE_LUT_CTRL0 0x7f8 -+#define PCIE_LUT_UDR(n) (0x800 + (n) * 8) -+#define PCIE_LUT_LDR(n) (0x804 + (n) * 8) -+#define PCIE_LUT_MASK_ALL 0xffff -+#define PCIE_LUT_DR_NUM 32 -+#define PCIE_LUT_ENABLE (1 << 31) -+ -+#define PCIE_PM_SCR 0x44 -+#define PCIE_PM_SCR_PMEEN 0x10 -+#define PCIE_PM_SCR_PMEPS_D0 0xfffc -+#define PCIE_PM_SCR_PMEPS_D3 0x3 -+#define PCIE_PM_SCR_PME_STATE 0x8000 -+ -+#define PCIE_PEX_DCR 0x78 -+#define PCIE_PEX_DCR_AUXPOWEREN 0x0400 -+ -+#define PCIE_PEX_SSR 0x8a -+#define PCIE_PEX_SSR_PDS 0x40 -+ -+#define PCIE_PEX_RCR 0x8c -+#define PCIE_PEX_RCR_PMEIE 0x0008 -+ -+#define PCIE_PEX_RSR 0x90 -+#define PCIE_PEX_PMES 0x00010000 -+ -+#define QIXIS_RST_FORCE_3 0x45 -+#define QIXIS_RST_FORCE_3_PCIESLOT 0xe0 -+ -+#define CPLD_RST_PCIE_SLOT 0x14 -+#define CPLD_RST_PCIESLOT 0x3 -+ -+#define PCIE_IATU_NUM 6 -+ -+struct ls_pcie; -+ -+struct ls_pcie_pm_data { -+ void __iomem *fpga; -+ void __iomem *cpld; -+}; -+ -+struct ls_pcie_pm_ops { -+ u32 (*get_link_state)(struct ls_pcie *pcie); -+ int (*send_turn_off_message)(struct ls_pcie *pcie); -+ void (*clear_turn_off_message)(struct ls_pcie *pcie); -+ void (*reset_slot)(struct ls_pcie *pcie, -+ struct ls_pcie_pm_data *pm_data); -+}; -+ -+struct ls_pcie_drvdata { -+ u32 lut_offset; -+ u32 ltssm_shift; -+ struct pcie_host_ops *ops; -+ struct ls_pcie_pm_ops *pm; -+}; -+ -+struct ls_pcie { -+ struct list_head list_node; -+ void __iomem *dbi; -+ void __iomem *lut; -+ struct regmap *scfg; -+ struct pcie_port pp; -+ const struct ls_pcie_drvdata *drvdata; -+ struct ls_pcie_pm_data pm_data; -+ int index; -+ const u32 *avail_streamids; -+ int streamid_index; -+ int pme_irq; -+ bool in_slot; -+}; -+ -+#define to_ls_pcie(x) container_of(x, struct ls_pcie, pp) -+ -+static void ls_pcie_host_init(struct pcie_port *pp); -+ -+u32 set_pcie_streamid_translation(struct pci_dev *pdev, u32 devid) -+{ -+ u32 index, streamid; -+ struct pcie_port *pp = pdev->bus->sysdata; -+ struct ls_pcie *pcie = to_ls_pcie(pp); -+ -+ if (!pcie->avail_streamids || !pcie->streamid_index) -+ return ~(u32)0; -+ -+ index = --pcie->streamid_index; -+ /* mask is set as all zeroes, want to match all bits */ -+ iowrite32((devid << 16), pcie->lut + PCIE_LUT_UDR(index)); -+ streamid = be32_to_cpup(&pcie->avail_streamids[index]); -+ iowrite32(streamid | PCIE_LUT_ENABLE, pcie->lut + PCIE_LUT_LDR(index)); -+ -+ return streamid; -+} -+ -+LIST_HEAD(hose_list); -+ -+static bool ls_pcie_is_bridge(struct ls_pcie *pcie) -+{ -+ u32 header_type; -+ -+ header_type = ioread8(pcie->dbi + PCI_HEADER_TYPE); -+ header_type &= 0x7f; -+ -+ return header_type == PCI_HEADER_TYPE_BRIDGE; -+} -+ -+/* Clear multi-function bit */ -+static void ls_pcie_clear_multifunction(struct ls_pcie *pcie) -+{ -+ iowrite8(PCI_HEADER_TYPE_BRIDGE, pcie->dbi + PCI_HEADER_TYPE); -+} -+ -+/* Fix class value */ -+static void ls_pcie_fix_class(struct ls_pcie *pcie) -+{ -+ iowrite16(PCI_CLASS_BRIDGE_PCI, pcie->dbi + PCI_CLASS_DEVICE); -+} -+ -+/* Drop MSG TLP except for Vendor MSG */ -+static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie) -+{ -+ u32 val; -+ -+ val = ioread32(pcie->dbi + PCIE_STRFMR1); -+ val &= 0xDFFFFFFF; -+ iowrite32(val, pcie->dbi + PCIE_STRFMR1); -+} -+ -+/* Disable all bars in RC mode */ -+static void ls_pcie_disable_bars(struct ls_pcie *pcie) -+{ -+ u32 header; -+ -+ header = ioread32(pcie->dbi + PCIE_SRIOV_OFFSET); -+ if (PCI_EXT_CAP_ID(header) == PCI_EXT_CAP_ID_SRIOV) { -+ iowrite32(PCIE_ENABLE_CS2, pcie->lut + PCIE_LUT_CTRL0); -+ iowrite32(0, pcie->dbi + PCI_BASE_ADDRESS_0); -+ iowrite32(0, pcie->dbi + PCI_BASE_ADDRESS_1); -+ iowrite32(0, pcie->dbi + PCI_ROM_ADDRESS1); -+ iowrite32(0, pcie->lut + PCIE_LUT_CTRL0); -+ } else { -+ iowrite32(0, -+ pcie->dbi + PCIE_CS2_OFFSET + PCI_BASE_ADDRESS_0); -+ iowrite32(0, -+ pcie->dbi + PCIE_CS2_OFFSET + PCI_BASE_ADDRESS_1); -+ iowrite32(0, -+ pcie->dbi + PCIE_CS2_OFFSET + PCI_ROM_ADDRESS1); -+ } -+} -+ -+static void ls_pcie_disable_outbound_atus(struct ls_pcie *pcie) -+{ -+ int i; -+ -+ for (i = 0; i < PCIE_IATU_NUM; i++) -+ dw_pcie_disable_outbound_atu(&pcie->pp, i); -+} -+ -+/* Forward error response of outbound non-posted requests */ -+static void ls_pcie_fix_error_response(struct ls_pcie *pcie) -+{ -+ iowrite32(PCIE_ABSERR_SETTING, pcie->dbi + PCIE_ABSERR); -+} -+ -+static int ls1021_pcie_link_up(struct pcie_port *pp) -+{ -+ u32 state; -+ struct ls_pcie *pcie = to_ls_pcie(pp); -+ -+ if (!pcie->scfg) -+ return 0; -+ -+ regmap_read(pcie->scfg, SCFG_PEXMSCPORTSR(pcie->index), &state); -+ state = (state >> LTSSM_STATE_SHIFT) & LTSSM_STATE_MASK; -+ -+ if (state < LTSSM_PCIE_L0) -+ return 0; -+ -+ return 1; -+} -+ -+static u32 ls1021_pcie_get_link_state(struct ls_pcie *pcie) -+{ -+ u32 state; -+ -+ if (!pcie->scfg) -+ return 0; -+ -+ regmap_read(pcie->scfg, SCFG_PEXMSCPORTSR(pcie->index), &state); -+ state = (state >> LTSSM_STATE_SHIFT) & LTSSM_STATE_MASK; -+ -+ return state; -+} -+ -+static int ls1021_pcie_send_turn_off_message(struct ls_pcie *pcie) -+{ -+ u32 val; -+ -+ if (!pcie->scfg) -+ return -EINVAL; -+ -+ /* Send Turn_off message */ -+ regmap_read(pcie->scfg, SCFG_PEXPMWRCR(pcie->index), &val); -+ val |= 0x80000000; -+ regmap_write(pcie->scfg, SCFG_PEXPMWRCR(pcie->index), val); -+ -+ return 0; -+} -+ -+static void ls1021_pcie_clear_turn_off_message(struct ls_pcie *pcie) -+{ -+ u32 val; -+ -+ if (!pcie->scfg) -+ return; -+ -+ /* Clear Turn_off message */ -+ regmap_read(pcie->scfg, SCFG_PEXPMWRCR(pcie->index), &val); -+ val &= 0x00000000; -+ regmap_write(pcie->scfg, SCFG_PEXPMWRCR(pcie->index), val); -+} -+ -+static void ls1021_pcie_reset_slot(struct ls_pcie *pcie, -+ struct ls_pcie_pm_data *pm_data) -+{ -+ u8 val; -+ -+ /* Try to reset PCIe slot to relink EP */ -+ if (pm_data->fpga) { -+ /* PULL DOWN PCIe RST# */ -+ val = ioread8(pm_data->fpga + QIXIS_RST_FORCE_3); -+ val |= QIXIS_RST_FORCE_3_PCIESLOT; -+ iowrite8(val, pm_data->fpga + QIXIS_RST_FORCE_3); -+ -+ /* PULL ON PCIe RST# */ -+ val = ioread8(pm_data->fpga + QIXIS_RST_FORCE_3); -+ val &= 0x0; -+ iowrite8(val, pm_data->fpga + QIXIS_RST_FORCE_3); -+ } -+ -+ if (pm_data->cpld) { -+ /* PULL DOWN PCIe RST# */ -+ val = ioread8(pm_data->cpld + CPLD_RST_PCIE_SLOT); -+ val &= 0x0; -+ iowrite8(val, pm_data->cpld + CPLD_RST_PCIE_SLOT); -+ -+ /* PULL ON PCIe RST# */ -+ val = ioread8(pm_data->cpld + CPLD_RST_PCIE_SLOT); -+ val |= CPLD_RST_PCIESLOT; -+ iowrite8(val, pm_data->cpld + CPLD_RST_PCIE_SLOT); -+ } -+} -+ -+static void ls1021_pcie_host_init(struct pcie_port *pp) -+{ -+ struct ls_pcie *pcie = to_ls_pcie(pp); -+ u32 index[2]; -+ -+ pcie->scfg = syscon_regmap_lookup_by_phandle(pp->dev->of_node, -+ "fsl,pcie-scfg"); -+ if (IS_ERR(pcie->scfg)) { -+ dev_err(pp->dev, "No syscfg phandle specified\n"); -+ pcie->scfg = NULL; -+ return; -+ } -+ -+ if (of_property_read_u32_array(pp->dev->of_node, -+ "fsl,pcie-scfg", index, 2)) { -+ pcie->scfg = NULL; -+ return; -+ } -+ pcie->index = index[1]; -+ -+ ls_pcie_host_init(pp); -+ -+ dw_pcie_setup_rc(pp); -+} -+ -+static int ls_pcie_link_up(struct pcie_port *pp) -+{ -+ struct ls_pcie *pcie = to_ls_pcie(pp); -+ u32 state, offset; -+ -+ if (of_get_property(pp->dev->of_node, "fsl,lut_diff", NULL)) -+ offset = 0x407fc; -+ else -+ offset = PCIE_LUT_DBG; -+ -+ state = (ioread32(pcie->lut + offset) >> -+ pcie->drvdata->ltssm_shift) & -+ LTSSM_STATE_MASK; -+ -+ if (state < LTSSM_PCIE_L0) -+ return 0; -+ -+ return 1; -+} -+ -+static u32 ls_pcie_get_link_state(struct ls_pcie *pcie) -+{ -+ return (ioread32(pcie->lut + PCIE_LUT_DBG) >> -+ pcie->drvdata->ltssm_shift) & -+ LTSSM_STATE_MASK; -+} -+ -+static void ls_pcie_host_init(struct pcie_port *pp) -+{ -+ struct ls_pcie *pcie = to_ls_pcie(pp); -+ -+ iowrite32(1, pcie->dbi + PCIE_DBI_RO_WR_EN); -+ ls_pcie_fix_class(pcie); -+ ls_pcie_clear_multifunction(pcie); -+ ls_pcie_drop_msg_tlp(pcie); -+ iowrite32(0, pcie->dbi + PCIE_DBI_RO_WR_EN); -+ -+ ls_pcie_disable_bars(pcie); -+ ls_pcie_disable_outbound_atus(pcie); -+ ls_pcie_fix_error_response(pcie); -+} -+ -+static int ls_pcie_msi_host_init(struct pcie_port *pp, -+ struct msi_controller *chip) -+{ -+ struct device_node *msi_node; -+ struct device_node *np = pp->dev->of_node; -+ -+ /* -+ * The MSI domain is set by the generic of_msi_configure(). This -+ * .msi_host_init() function keeps us from doing the default MSI -+ * domain setup in dw_pcie_host_init() and also enforces the -+ * requirement that "msi-parent" exists. -+ */ -+ msi_node = of_parse_phandle(np, "msi-parent", 0); -+ if (!msi_node) { -+ dev_err(pp->dev, "failed to find msi-parent\n"); -+ return -EINVAL; -+ } -+ -+ return 0; -+} -+ -+static struct pcie_host_ops ls1021_pcie_host_ops = { -+ .link_up = ls1021_pcie_link_up, -+ .host_init = ls1021_pcie_host_init, -+ .msi_host_init = ls_pcie_msi_host_init, -+}; -+ -+static struct ls_pcie_pm_ops ls1021_pcie_host_pm_ops = { -+ .get_link_state = &ls1021_pcie_get_link_state, -+ .send_turn_off_message = &ls1021_pcie_send_turn_off_message, -+ .clear_turn_off_message = &ls1021_pcie_clear_turn_off_message, -+ .reset_slot = &ls1021_pcie_reset_slot, -+}; -+ -+static struct pcie_host_ops ls_pcie_host_ops = { -+ .link_up = ls_pcie_link_up, -+ .host_init = ls_pcie_host_init, -+ .msi_host_init = ls_pcie_msi_host_init, -+}; -+ -+static struct ls_pcie_pm_ops ls_pcie_host_pm_ops = { -+ .get_link_state = &ls_pcie_get_link_state, -+}; -+ -+static struct ls_pcie_drvdata ls1021_drvdata = { -+ .ops = &ls1021_pcie_host_ops, -+ .pm = &ls1021_pcie_host_pm_ops, -+}; -+ -+static struct ls_pcie_drvdata ls1043_drvdata = { -+ .lut_offset = 0x10000, -+ .ltssm_shift = 24, -+ .ops = &ls_pcie_host_ops, -+ .pm = &ls_pcie_host_pm_ops, -+}; -+ -+static struct ls_pcie_drvdata ls2080_drvdata = { -+ .lut_offset = 0x80000, -+ .ltssm_shift = 0, -+ .ops = &ls_pcie_host_ops, -+ .pm = &ls_pcie_host_pm_ops, -+}; -+ -+static const struct of_device_id ls_pcie_of_match[] = { -+ { .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata }, -+ { .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata }, -+ { .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata }, -+ { .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata }, -+ { }, -+}; -+MODULE_DEVICE_TABLE(of, ls_pcie_of_match); -+ -+static void ls_pcie_host_hack_pm_init(struct ls_pcie *pcie) -+{ -+ struct device_node *np; -+ struct ls_pcie_pm_data *pm_data = &pcie->pm_data; -+ -+ np = of_find_compatible_node(NULL, NULL, "fsl,ls1021aqds-fpga"); -+ if (np) -+ pm_data->fpga = of_iomap(np, 0); -+ -+ of_node_put(np); -+ -+ np = of_find_compatible_node(NULL, NULL, "fsl,ls1021atwr-cpld"); -+ if (np) -+ pm_data->cpld = of_iomap(np, 0); -+ -+ of_node_put(np); -+} -+ -+static irqreturn_t ls_pcie_pme_irq_handler(int irq, void *data) -+{ -+ struct pcie_port *pp = data; -+ struct ls_pcie *pcie = to_ls_pcie(pp); -+ u32 val; -+ -+ if (pcie->drvdata->pm->clear_turn_off_message) -+ pcie->drvdata->pm->clear_turn_off_message(pcie); -+ -+ /* Clear Host root PME_STATE bit */ -+ val = ioread32(pcie->dbi + PCIE_PEX_RSR); -+ val |= PCIE_PEX_PMES; -+ iowrite32(val, pcie->dbi + PCIE_PEX_RSR); -+ -+ return IRQ_HANDLED; -+} -+ -+static int ls_pcie_host_pme_init(struct ls_pcie *pcie, -+ struct platform_device *pdev) -+{ -+ struct pcie_port *pp; -+ int ret; -+ u16 val; -+ -+ pp = &pcie->pp; -+ -+ if (dw_pcie_link_up(&pcie->pp)) -+ pcie->in_slot = true; -+ else -+ pcie->in_slot = false; -+ -+ pcie->pme_irq = platform_get_irq_byname(pdev, "pme"); -+ if (pcie->pme_irq < 0) { -+ dev_err(&pdev->dev, -+ "failed to get PME IRQ: %d\n", pcie->pme_irq); -+ return pcie->pme_irq; -+ } -+ -+ ret = devm_request_irq(pp->dev, pcie->pme_irq, ls_pcie_pme_irq_handler, -+ IRQF_SHARED, "ls-pcie-pme", pp); -+ if (ret) { -+ dev_err(pp->dev, "Failed to request pme irq\n"); -+ return ret; -+ } -+ -+ ls_pcie_host_hack_pm_init(pcie); -+ -+ /* AUX Power PM Enable */ -+ val = ioread16(pcie->dbi + PCIE_PEX_DCR); -+ val |= PCIE_PEX_DCR_AUXPOWEREN; -+ iowrite16(val, pcie->dbi + PCIE_PEX_DCR); -+ -+ /* Enable PME message */ -+ val = ioread16(pcie->dbi + PCIE_PM_SCR); -+ val |= PCIE_PM_SCR_PMEEN; -+ iowrite16(val, pcie->dbi + PCIE_PM_SCR); -+ -+ /* Clear Host PME_STATE bit */ -+ val = ioread16(pcie->dbi + PCIE_PM_SCR); -+ val |= PCIE_PM_SCR_PME_STATE; -+ iowrite16(val, pcie->dbi + PCIE_PM_SCR); -+ -+ /* Enable Host %d interrupt */ -+ val = ioread16(pcie->dbi + PCIE_PEX_RCR); -+ val |= PCIE_PEX_RCR_PMEIE; -+ iowrite16(val, pcie->dbi + PCIE_PEX_RCR); -+ -+ return 0; -+} -+ -+static int __init ls_add_pcie_port(struct pcie_port *pp, -+ struct platform_device *pdev) -+{ -+ int ret; -+ struct ls_pcie *pcie = to_ls_pcie(pp); -+ -+ pp->dev = &pdev->dev; -+ pp->dbi_base = pcie->dbi; -+ pp->ops = pcie->drvdata->ops; -+ -+ ret = dw_pcie_host_init(pp); -+ if (ret) { -+ dev_err(pp->dev, "failed to initialize host\n"); -+ return ret; -+ } -+ -+ ret = ls_pcie_host_pme_init(pcie, pdev); -+ if (ret) -+ dev_warn(pp->dev, "failed to initialize PME\n"); -+ -+ return 0; -+} -+ -+static int ls_pcie_probe(struct platform_device *pdev) -+{ -+ const struct of_device_id *match; -+ struct ls_pcie *pcie; -+ struct resource *dbi_base; -+ int ret; -+ -+ match = of_match_device(ls_pcie_of_match, &pdev->dev); -+ if (!match) -+ return -ENODEV; -+ -+ pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL); -+ if (!pcie) -+ return -ENOMEM; -+ -+ dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); -+ pcie->dbi = devm_ioremap_resource(&pdev->dev, dbi_base); -+ if (IS_ERR(pcie->dbi)) { -+ dev_err(&pdev->dev, "missing *regs* space\n"); -+ return PTR_ERR(pcie->dbi); -+ } -+ -+ pcie->drvdata = match->data; -+ pcie->lut = pcie->dbi + pcie->drvdata->lut_offset; -+ /* Disable LDR zero */ -+ iowrite32(0, pcie->lut + PCIE_LUT_LDR(0)); -+ -+ if (!ls_pcie_is_bridge(pcie)) -+ return -ENODEV; -+ -+ if (of_device_is_compatible(pdev->dev.of_node, "fsl,ls2085a-pcie") || -+ of_device_is_compatible(pdev->dev.of_node, "fsl,ls2080a-pcie")) { -+ int len; -+ const u32 *prop; -+ struct device_node *np; -+ -+ np = pdev->dev.of_node; -+ prop = (u32 *)of_get_property(np, "available-stream-ids", &len); -+ if (prop) { -+ pcie->avail_streamids = prop; -+ pcie->streamid_index = len/sizeof(u32); -+ } else -+ dev_err(&pdev->dev, "PCIe endpoint partitioning not possible\n"); -+ } -+ -+ ret = ls_add_pcie_port(&pcie->pp, pdev); -+ if (ret < 0) -+ return ret; -+ -+ list_add_tail(&pcie->list_node, &hose_list); -+ -+ platform_set_drvdata(pdev, pcie); -+ -+ return 0; -+} -+ -+#ifdef CONFIG_PM_SLEEP -+static int ls_pcie_pm_do_suspend(struct ls_pcie *pcie) -+{ -+ u32 state; -+ int i = 0; -+ int ret; -+ u16 val; -+ -+ if (!pcie->in_slot) -+ return 0; -+ -+ if (!pcie->drvdata->pm->send_turn_off_message) -+ return 0; -+ -+ ret = pcie->drvdata->pm->send_turn_off_message(pcie); -+ if (ret) -+ return -EINVAL; -+ -+ while (i < 100) { -+ state = pcie->drvdata->pm->get_link_state(pcie); -+ if (state == LTSSM_PCIE_L2_IDLE) -+ break; -+ i++; -+ mdelay(1); -+ } -+ -+ /* Put RC in D3 */ -+ val = ioread16(pcie->dbi + PCIE_PM_SCR); -+ val |= PCIE_PM_SCR_PMEPS_D3; -+ iowrite16(val, pcie->dbi + PCIE_PM_SCR); -+ -+ mdelay(10); -+ -+ return 0; -+} -+ -+static int ls_pcie_pm_do_resume(struct ls_pcie *pcie) -+{ -+ u32 state; -+ int i = 0; -+ u16 val; -+ struct pcie_port *pp = &pcie->pp; -+ -+ if (!pcie->in_slot) -+ return 0; -+ -+ dw_pcie_setup_rc(pp); -+ ls_pcie_host_init(pp); -+ -+ /* Put RC in D0 */ -+ val = ioread16(pcie->dbi + PCIE_PM_SCR); -+ val &= PCIE_PM_SCR_PMEPS_D0; -+ iowrite16(val, pcie->dbi + PCIE_PM_SCR); -+ -+ mdelay(10); -+ -+ state = pcie->drvdata->pm->get_link_state(pcie); -+ if (state == LTSSM_PCIE_L0) -+ return 0; -+ -+ if (!pcie->drvdata->pm->reset_slot) -+ return -EINVAL; -+ -+ pcie->drvdata->pm->reset_slot(pcie, &pcie->pm_data); -+ -+ while (i < 100) { -+ state = pcie->drvdata->pm->get_link_state(pcie); -+ if (state == LTSSM_PCIE_L0) -+ return 0; -+ i++; -+ mdelay(1); -+ } -+ -+ return -EINVAL; -+} -+ -+static int ls_pcie_pm_suspend(void) -+{ -+ struct ls_pcie *hose, *tmp; -+ -+ list_for_each_entry_safe(hose, tmp, &hose_list, list_node) -+ ls_pcie_pm_do_suspend(hose); -+ -+ return 0; -+} -+ -+static void ls_pcie_pm_resume(void) -+{ -+ struct ls_pcie *hose, *tmp; -+ -+ list_for_each_entry_safe(hose, tmp, &hose_list, list_node) -+ ls_pcie_pm_do_resume(hose); -+} -+ -+static struct syscore_ops ls_pcie_syscore_pm_ops = { -+ .suspend = ls_pcie_pm_suspend, -+ .resume = ls_pcie_pm_resume, -+}; -+#endif /* CONFIG_PM_SLEEP */ -+ -+static struct platform_driver ls_pcie_driver = { -+ .probe = ls_pcie_probe, -+ .driver = { -+ .name = "layerscape-pcie", -+ .of_match_table = ls_pcie_of_match, -+ }, -+}; -+ -+static int __init fsl_pci_init(void) -+{ -+#ifdef CONFIG_PM_SLEEP -+ register_syscore_ops(&ls_pcie_syscore_pm_ops); -+#endif -+ return platform_driver_register(&ls_pcie_driver); -+} -+module_init(fsl_pci_init); -+ -+MODULE_AUTHOR("Minghuan Lian "); -+MODULE_DESCRIPTION("Freescale Layerscape PCIe host controller driver"); -+MODULE_LICENSE("GPL v2"); -diff --git a/drivers/pci/host/pci-layerscape.h b/drivers/pci/host/pci-layerscape.h -new file mode 100644 -index 0000000..e90e114 ---- /dev/null -+++ b/drivers/pci/host/pci-layerscape.h -@@ -0,0 +1,13 @@ -+/* -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ */ -+ -+#ifndef _PCI_LAYERSCAPE_H -+#define _PCI_LAYERSCAPE_H -+ -+/* function for setting up stream id to device id translation */ -+u32 set_pcie_streamid_translation(struct pci_dev *pdev, u32 devid); -+ -+#endif /* _PCI_LAYERSCAPE_H */ -diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c -index b1315e1..94b42d1 100644 ---- a/drivers/pci/host/pci-mvebu.c -+++ b/drivers/pci/host/pci-mvebu.c -@@ -99,11 +99,9 @@ struct mvebu_pcie_port; - struct mvebu_pcie { - struct platform_device *pdev; - struct mvebu_pcie_port *ports; -- struct msi_chip *msi; -+ struct msi_controller *msi; - struct resource io; -- char io_name[30]; - struct resource realio; -- char mem_name[30]; - struct resource mem; - struct resource busn; - int nports; -@@ -722,18 +720,9 @@ static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys) - { - struct mvebu_pcie *pcie = sys_to_pcie(sys); - int i; -- int domain = 0; - --#ifdef CONFIG_PCI_DOMAINS -- domain = sys->domain; --#endif -- -- snprintf(pcie->mem_name, sizeof(pcie->mem_name), "PCI MEM %04x", -- domain); -- pcie->mem.name = pcie->mem_name; -- -- snprintf(pcie->io_name, sizeof(pcie->io_name), "PCI I/O %04x", domain); -- pcie->realio.name = pcie->io_name; -+ pcie->mem.name = "PCI MEM"; -+ pcie->realio.name = "PCI I/O"; - - if (request_resource(&iomem_resource, &pcie->mem)) - return 0; -diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c -index 19bb19c..971d8d7 100644 ---- a/drivers/pci/host/pci-tegra.c -+++ b/drivers/pci/host/pci-tegra.c -@@ -238,7 +238,7 @@ - ) - - struct tegra_msi { -- struct msi_chip chip; -+ struct msi_controller chip; - DECLARE_BITMAP(used, INT_PCI_MSI_NR); - struct irq_domain *domain; - unsigned long pages; -@@ -259,7 +259,7 @@ struct tegra_pcie_soc_data { - bool has_gen2; - }; - --static inline struct tegra_msi *to_tegra_msi(struct msi_chip *chip) -+static inline struct tegra_msi *to_tegra_msi(struct msi_controller *chip) - { - return container_of(chip, struct tegra_msi, chip); - } -@@ -1280,8 +1280,8 @@ static irqreturn_t tegra_pcie_msi_irq(int irq, void *data) - return processed > 0 ? IRQ_HANDLED : IRQ_NONE; - } - --static int tegra_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev, -- struct msi_desc *desc) -+static int tegra_msi_setup_irq(struct msi_controller *chip, -+ struct pci_dev *pdev, struct msi_desc *desc) - { - struct tegra_msi *msi = to_tegra_msi(chip); - struct msi_msg msg; -@@ -1305,12 +1305,13 @@ static int tegra_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev, - msg.address_hi = 0; - msg.data = hwirq; - -- write_msi_msg(irq, &msg); -+ pci_write_msi_msg(irq, &msg); - - return 0; - } - --static void tegra_msi_teardown_irq(struct msi_chip *chip, unsigned int irq) -+static void tegra_msi_teardown_irq(struct msi_controller *chip, -+ unsigned int irq) - { - struct tegra_msi *msi = to_tegra_msi(chip); - struct irq_data *d = irq_get_irq_data(irq); -@@ -1322,10 +1323,10 @@ static void tegra_msi_teardown_irq(struct msi_chip *chip, unsigned int irq) - - static struct irq_chip tegra_msi_irq_chip = { - .name = "Tegra PCIe MSI", -- .irq_enable = unmask_msi_irq, -- .irq_disable = mask_msi_irq, -- .irq_mask = mask_msi_irq, -- .irq_unmask = unmask_msi_irq, -+ .irq_enable = pci_msi_unmask_irq, -+ .irq_disable = pci_msi_mask_irq, -+ .irq_mask = pci_msi_mask_irq, -+ .irq_unmask = pci_msi_unmask_irq, - }; - - static int tegra_msi_map(struct irq_domain *domain, unsigned int irq, -@@ -1333,7 +1334,6 @@ static int tegra_msi_map(struct irq_domain *domain, unsigned int irq, - { - irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq); - irq_set_chip_data(irq, domain->host_data); -- set_irq_flags(irq, IRQF_VALID); - - tegra_cpuidle_pcie_irqs_in_use(); - -diff --git a/drivers/pci/host/pci-xgene-msi.c b/drivers/pci/host/pci-xgene-msi.c -new file mode 100644 -index 0000000..8e559d1 ---- /dev/null -+++ b/drivers/pci/host/pci-xgene-msi.c -@@ -0,0 +1,595 @@ -+/* -+ * APM X-Gene MSI Driver -+ * -+ * Copyright (c) 2014, Applied Micro Circuits Corporation -+ * Author: Tanmay Inamdar -+ * Duc Dang -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License as published by the -+ * Free Software Foundation; either version 2 of the License, or (at your -+ * option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ */ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define MSI_IR0 0x000000 -+#define MSI_INT0 0x800000 -+#define IDX_PER_GROUP 8 -+#define IRQS_PER_IDX 16 -+#define NR_HW_IRQS 16 -+#define NR_MSI_VEC (IDX_PER_GROUP * IRQS_PER_IDX * NR_HW_IRQS) -+ -+struct xgene_msi_group { -+ struct xgene_msi *msi; -+ int gic_irq; -+ u32 msi_grp; -+}; -+ -+struct xgene_msi { -+ struct device_node *node; -+ struct msi_controller mchip; -+ struct irq_domain *domain; -+ u64 msi_addr; -+ void __iomem *msi_regs; -+ unsigned long *bitmap; -+ struct mutex bitmap_lock; -+ struct xgene_msi_group *msi_groups; -+ int num_cpus; -+}; -+ -+/* Global data */ -+static struct xgene_msi xgene_msi_ctrl; -+ -+static struct irq_chip xgene_msi_top_irq_chip = { -+ .name = "X-Gene1 MSI", -+ .irq_enable = pci_msi_unmask_irq, -+ .irq_disable = pci_msi_mask_irq, -+ .irq_mask = pci_msi_mask_irq, -+ .irq_unmask = pci_msi_unmask_irq, -+}; -+ -+static struct msi_domain_info xgene_msi_domain_info = { -+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | -+ MSI_FLAG_PCI_MSIX), -+ .chip = &xgene_msi_top_irq_chip, -+}; -+ -+/* -+ * X-Gene v1 has 16 groups of MSI termination registers MSInIRx, where -+ * n is group number (0..F), x is index of registers in each group (0..7) -+ * The register layout is as follows: -+ * MSI0IR0 base_addr -+ * MSI0IR1 base_addr + 0x10000 -+ * ... ... -+ * MSI0IR6 base_addr + 0x60000 -+ * MSI0IR7 base_addr + 0x70000 -+ * MSI1IR0 base_addr + 0x80000 -+ * MSI1IR1 base_addr + 0x90000 -+ * ... ... -+ * MSI1IR7 base_addr + 0xF0000 -+ * MSI2IR0 base_addr + 0x100000 -+ * ... ... -+ * MSIFIR0 base_addr + 0x780000 -+ * MSIFIR1 base_addr + 0x790000 -+ * ... ... -+ * MSIFIR7 base_addr + 0x7F0000 -+ * MSIINT0 base_addr + 0x800000 -+ * MSIINT1 base_addr + 0x810000 -+ * ... ... -+ * MSIINTF base_addr + 0x8F0000 -+ * -+ * Each index register supports 16 MSI vectors (0..15) to generate interrupt. -+ * There are total 16 GIC IRQs assigned for these 16 groups of MSI termination -+ * registers. -+ * -+ * Each MSI termination group has 1 MSIINTn register (n is 0..15) to indicate -+ * the MSI pending status caused by 1 of its 8 index registers. -+ */ -+ -+/* MSInIRx read helper */ -+static u32 xgene_msi_ir_read(struct xgene_msi *msi, -+ u32 msi_grp, u32 msir_idx) -+{ -+ return readl_relaxed(msi->msi_regs + MSI_IR0 + -+ (msi_grp << 19) + (msir_idx << 16)); -+} -+ -+/* MSIINTn read helper */ -+static u32 xgene_msi_int_read(struct xgene_msi *msi, u32 msi_grp) -+{ -+ return readl_relaxed(msi->msi_regs + MSI_INT0 + (msi_grp << 16)); -+} -+ -+/* -+ * With 2048 MSI vectors supported, the MSI message can be constructed using -+ * following scheme: -+ * - Divide into 8 256-vector groups -+ * Group 0: 0-255 -+ * Group 1: 256-511 -+ * Group 2: 512-767 -+ * ... -+ * Group 7: 1792-2047 -+ * - Each 256-vector group is divided into 16 16-vector groups -+ * As an example: 16 16-vector groups for 256-vector group 0-255 is -+ * Group 0: 0-15 -+ * Group 1: 16-32 -+ * ... -+ * Group 15: 240-255 -+ * - The termination address of MSI vector in 256-vector group n and 16-vector -+ * group x is the address of MSIxIRn -+ * - The data for MSI vector in 16-vector group x is x -+ */ -+static u32 hwirq_to_reg_set(unsigned long hwirq) -+{ -+ return (hwirq / (NR_HW_IRQS * IRQS_PER_IDX)); -+} -+ -+static u32 hwirq_to_group(unsigned long hwirq) -+{ -+ return (hwirq % NR_HW_IRQS); -+} -+ -+static u32 hwirq_to_msi_data(unsigned long hwirq) -+{ -+ return ((hwirq / NR_HW_IRQS) % IRQS_PER_IDX); -+} -+ -+static void xgene_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) -+{ -+ struct xgene_msi *msi = irq_data_get_irq_chip_data(data); -+ u32 reg_set = hwirq_to_reg_set(data->hwirq); -+ u32 group = hwirq_to_group(data->hwirq); -+ u64 target_addr = msi->msi_addr + (((8 * group) + reg_set) << 16); -+ -+ msg->address_hi = upper_32_bits(target_addr); -+ msg->address_lo = lower_32_bits(target_addr); -+ msg->data = hwirq_to_msi_data(data->hwirq); -+} -+ -+/* -+ * X-Gene v1 only has 16 MSI GIC IRQs for 2048 MSI vectors. To maintain -+ * the expected behaviour of .set_affinity for each MSI interrupt, the 16 -+ * MSI GIC IRQs are statically allocated to 8 X-Gene v1 cores (2 GIC IRQs -+ * for each core). The MSI vector is moved fom 1 MSI GIC IRQ to another -+ * MSI GIC IRQ to steer its MSI interrupt to correct X-Gene v1 core. As a -+ * consequence, the total MSI vectors that X-Gene v1 supports will be -+ * reduced to 256 (2048/8) vectors. -+ */ -+static int hwirq_to_cpu(unsigned long hwirq) -+{ -+ return (hwirq % xgene_msi_ctrl.num_cpus); -+} -+ -+static unsigned long hwirq_to_canonical_hwirq(unsigned long hwirq) -+{ -+ return (hwirq - hwirq_to_cpu(hwirq)); -+} -+ -+static int xgene_msi_set_affinity(struct irq_data *irqdata, -+ const struct cpumask *mask, bool force) -+{ -+ int target_cpu = cpumask_first(mask); -+ int curr_cpu; -+ -+ curr_cpu = hwirq_to_cpu(irqdata->hwirq); -+ if (curr_cpu == target_cpu) -+ return IRQ_SET_MASK_OK_DONE; -+ -+ /* Update MSI number to target the new CPU */ -+ irqdata->hwirq = hwirq_to_canonical_hwirq(irqdata->hwirq) + target_cpu; -+ -+ return IRQ_SET_MASK_OK; -+} -+ -+static struct irq_chip xgene_msi_bottom_irq_chip = { -+ .name = "MSI", -+ .irq_set_affinity = xgene_msi_set_affinity, -+ .irq_compose_msi_msg = xgene_compose_msi_msg, -+}; -+ -+static int xgene_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, -+ unsigned int nr_irqs, void *args) -+{ -+ struct xgene_msi *msi = domain->host_data; -+ int msi_irq; -+ -+ mutex_lock(&msi->bitmap_lock); -+ -+ msi_irq = bitmap_find_next_zero_area(msi->bitmap, NR_MSI_VEC, 0, -+ msi->num_cpus, 0); -+ if (msi_irq < NR_MSI_VEC) -+ bitmap_set(msi->bitmap, msi_irq, msi->num_cpus); -+ else -+ msi_irq = -ENOSPC; -+ -+ mutex_unlock(&msi->bitmap_lock); -+ -+ if (msi_irq < 0) -+ return msi_irq; -+ -+ irq_domain_set_info(domain, virq, msi_irq, -+ &xgene_msi_bottom_irq_chip, domain->host_data, -+ handle_simple_irq, NULL, NULL); -+ -+ return 0; -+} -+ -+static void xgene_irq_domain_free(struct irq_domain *domain, -+ unsigned int virq, unsigned int nr_irqs) -+{ -+ struct irq_data *d = irq_domain_get_irq_data(domain, virq); -+ struct xgene_msi *msi = irq_data_get_irq_chip_data(d); -+ u32 hwirq; -+ -+ mutex_lock(&msi->bitmap_lock); -+ -+ hwirq = hwirq_to_canonical_hwirq(d->hwirq); -+ bitmap_clear(msi->bitmap, hwirq, msi->num_cpus); -+ -+ mutex_unlock(&msi->bitmap_lock); -+ -+ irq_domain_free_irqs_parent(domain, virq, nr_irqs); -+} -+ -+static const struct irq_domain_ops msi_domain_ops = { -+ .alloc = xgene_irq_domain_alloc, -+ .free = xgene_irq_domain_free, -+}; -+ -+static int xgene_allocate_domains(struct xgene_msi *msi) -+{ -+ msi->domain = irq_domain_add_linear(NULL, NR_MSI_VEC, -+ &msi_domain_ops, msi); -+ if (!msi->domain) -+ return -ENOMEM; -+ -+ msi->mchip.domain = pci_msi_create_irq_domain(msi->mchip.of_node, -+ &xgene_msi_domain_info, -+ msi->domain); -+ -+ if (!msi->mchip.domain) { -+ irq_domain_remove(msi->domain); -+ return -ENOMEM; -+ } -+ -+ return 0; -+} -+ -+static void xgene_free_domains(struct xgene_msi *msi) -+{ -+ if (msi->mchip.domain) -+ irq_domain_remove(msi->mchip.domain); -+ if (msi->domain) -+ irq_domain_remove(msi->domain); -+} -+ -+static int xgene_msi_init_allocator(struct xgene_msi *xgene_msi) -+{ -+ int size = BITS_TO_LONGS(NR_MSI_VEC) * sizeof(long); -+ -+ xgene_msi->bitmap = kzalloc(size, GFP_KERNEL); -+ if (!xgene_msi->bitmap) -+ return -ENOMEM; -+ -+ mutex_init(&xgene_msi->bitmap_lock); -+ -+ xgene_msi->msi_groups = kcalloc(NR_HW_IRQS, -+ sizeof(struct xgene_msi_group), -+ GFP_KERNEL); -+ if (!xgene_msi->msi_groups) -+ return -ENOMEM; -+ -+ return 0; -+} -+ -+static void xgene_msi_isr(unsigned int irq, struct irq_desc *desc) -+{ -+ struct irq_chip *chip = irq_desc_get_chip(desc); -+ struct xgene_msi_group *msi_groups; -+ struct xgene_msi *xgene_msi; -+ unsigned int virq; -+ int msir_index, msir_val, hw_irq; -+ u32 intr_index, grp_select, msi_grp; -+ -+ chained_irq_enter(chip, desc); -+ -+ msi_groups = irq_desc_get_handler_data(desc); -+ xgene_msi = msi_groups->msi; -+ msi_grp = msi_groups->msi_grp; -+ -+ /* -+ * MSIINTn (n is 0..F) indicates if there is a pending MSI interrupt -+ * If bit x of this register is set (x is 0..7), one or more interupts -+ * corresponding to MSInIRx is set. -+ */ -+ grp_select = xgene_msi_int_read(xgene_msi, msi_grp); -+ while (grp_select) { -+ msir_index = ffs(grp_select) - 1; -+ /* -+ * Calculate MSInIRx address to read to check for interrupts -+ * (refer to termination address and data assignment -+ * described in xgene_compose_msi_msg() ) -+ */ -+ msir_val = xgene_msi_ir_read(xgene_msi, msi_grp, msir_index); -+ while (msir_val) { -+ intr_index = ffs(msir_val) - 1; -+ /* -+ * Calculate MSI vector number (refer to the termination -+ * address and data assignment described in -+ * xgene_compose_msi_msg function) -+ */ -+ hw_irq = (((msir_index * IRQS_PER_IDX) + intr_index) * -+ NR_HW_IRQS) + msi_grp; -+ /* -+ * As we have multiple hw_irq that maps to single MSI, -+ * always look up the virq using the hw_irq as seen from -+ * CPU0 -+ */ -+ hw_irq = hwirq_to_canonical_hwirq(hw_irq); -+ virq = irq_find_mapping(xgene_msi->domain, hw_irq); -+ WARN_ON(!virq); -+ if (virq != 0) -+ generic_handle_irq(virq); -+ msir_val &= ~(1 << intr_index); -+ } -+ grp_select &= ~(1 << msir_index); -+ -+ if (!grp_select) { -+ /* -+ * We handled all interrupts happened in this group, -+ * resample this group MSI_INTx register in case -+ * something else has been made pending in the meantime -+ */ -+ grp_select = xgene_msi_int_read(xgene_msi, msi_grp); -+ } -+ } -+ -+ chained_irq_exit(chip, desc); -+} -+ -+static int xgene_msi_remove(struct platform_device *pdev) -+{ -+ int virq, i; -+ struct xgene_msi *msi = platform_get_drvdata(pdev); -+ -+ for (i = 0; i < NR_HW_IRQS; i++) { -+ virq = msi->msi_groups[i].gic_irq; -+ if (virq != 0) { -+ irq_set_chained_handler(virq, NULL); -+ irq_set_handler_data(virq, NULL); -+ } -+ } -+ kfree(msi->msi_groups); -+ -+ kfree(msi->bitmap); -+ msi->bitmap = NULL; -+ -+ xgene_free_domains(msi); -+ -+ return 0; -+} -+ -+static int xgene_msi_hwirq_alloc(unsigned int cpu) -+{ -+ struct xgene_msi *msi = &xgene_msi_ctrl; -+ struct xgene_msi_group *msi_group; -+ cpumask_var_t mask; -+ int i; -+ int err; -+ -+ for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) { -+ msi_group = &msi->msi_groups[i]; -+ if (!msi_group->gic_irq) -+ continue; -+ -+ irq_set_chained_handler(msi_group->gic_irq, -+ xgene_msi_isr); -+ err = irq_set_handler_data(msi_group->gic_irq, msi_group); -+ if (err) { -+ pr_err("failed to register GIC IRQ handler\n"); -+ return -EINVAL; -+ } -+ /* -+ * Statically allocate MSI GIC IRQs to each CPU core. -+ * With 8-core X-Gene v1, 2 MSI GIC IRQs are allocated -+ * to each core. -+ */ -+ if (alloc_cpumask_var(&mask, GFP_KERNEL)) { -+ cpumask_clear(mask); -+ cpumask_set_cpu(cpu, mask); -+ err = irq_set_affinity(msi_group->gic_irq, mask); -+ if (err) -+ pr_err("failed to set affinity for GIC IRQ"); -+ free_cpumask_var(mask); -+ } else { -+ pr_err("failed to alloc CPU mask for affinity\n"); -+ err = -EINVAL; -+ } -+ -+ if (err) { -+ irq_set_chained_handler(msi_group->gic_irq, NULL); -+ irq_set_handler_data(msi_group->gic_irq, NULL); -+ return err; -+ } -+ } -+ -+ return 0; -+} -+ -+static void xgene_msi_hwirq_free(unsigned int cpu) -+{ -+ struct xgene_msi *msi = &xgene_msi_ctrl; -+ struct xgene_msi_group *msi_group; -+ int i; -+ -+ for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) { -+ msi_group = &msi->msi_groups[i]; -+ if (!msi_group->gic_irq) -+ continue; -+ -+ irq_set_chained_handler(msi_group->gic_irq, NULL); -+ irq_set_handler_data(msi_group->gic_irq, NULL); -+ } -+} -+ -+static int xgene_msi_cpu_callback(struct notifier_block *nfb, -+ unsigned long action, void *hcpu) -+{ -+ unsigned cpu = (unsigned long)hcpu; -+ -+ switch (action) { -+ case CPU_ONLINE: -+ case CPU_ONLINE_FROZEN: -+ xgene_msi_hwirq_alloc(cpu); -+ break; -+ case CPU_DEAD: -+ case CPU_DEAD_FROZEN: -+ xgene_msi_hwirq_free(cpu); -+ break; -+ default: -+ break; -+ } -+ -+ return NOTIFY_OK; -+} -+ -+static struct notifier_block xgene_msi_cpu_notifier = { -+ .notifier_call = xgene_msi_cpu_callback, -+}; -+ -+static const struct of_device_id xgene_msi_match_table[] = { -+ {.compatible = "apm,xgene1-msi"}, -+ {}, -+}; -+ -+static int xgene_msi_probe(struct platform_device *pdev) -+{ -+ struct resource *res; -+ int rc, irq_index; -+ struct xgene_msi *xgene_msi; -+ unsigned int cpu; -+ int virt_msir; -+ u32 msi_val, msi_idx; -+ -+ xgene_msi = &xgene_msi_ctrl; -+ -+ platform_set_drvdata(pdev, xgene_msi); -+ -+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -+ xgene_msi->msi_regs = devm_ioremap_resource(&pdev->dev, res); -+ if (IS_ERR(xgene_msi->msi_regs)) { -+ dev_err(&pdev->dev, "no reg space\n"); -+ rc = -EINVAL; -+ goto error; -+ } -+ xgene_msi->msi_addr = res->start; -+ -+ xgene_msi->num_cpus = num_possible_cpus(); -+ -+ rc = xgene_msi_init_allocator(xgene_msi); -+ if (rc) { -+ dev_err(&pdev->dev, "Error allocating MSI bitmap\n"); -+ goto error; -+ } -+ -+ rc = xgene_allocate_domains(xgene_msi); -+ if (rc) { -+ dev_err(&pdev->dev, "Failed to allocate MSI domain\n"); -+ goto error; -+ } -+ -+ for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) { -+ virt_msir = platform_get_irq(pdev, irq_index); -+ if (virt_msir < 0) { -+ dev_err(&pdev->dev, "Cannot translate IRQ index %d\n", -+ irq_index); -+ rc = -EINVAL; -+ goto error; -+ } -+ xgene_msi->msi_groups[irq_index].gic_irq = virt_msir; -+ xgene_msi->msi_groups[irq_index].msi_grp = irq_index; -+ xgene_msi->msi_groups[irq_index].msi = xgene_msi; -+ } -+ -+ /* -+ * MSInIRx registers are read-to-clear; before registering -+ * interrupt handlers, read all of them to clear spurious -+ * interrupts that may occur before the driver is probed. -+ */ -+ for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) { -+ for (msi_idx = 0; msi_idx < IDX_PER_GROUP; msi_idx++) -+ msi_val = xgene_msi_ir_read(xgene_msi, irq_index, -+ msi_idx); -+ /* Read MSIINTn to confirm */ -+ msi_val = xgene_msi_int_read(xgene_msi, irq_index); -+ if (msi_val) { -+ dev_err(&pdev->dev, "Failed to clear spurious IRQ\n"); -+ rc = -EINVAL; -+ goto error; -+ } -+ } -+ -+ cpu_notifier_register_begin(); -+ -+ for_each_online_cpu(cpu) -+ if (xgene_msi_hwirq_alloc(cpu)) { -+ dev_err(&pdev->dev, "failed to register MSI handlers\n"); -+ cpu_notifier_register_done(); -+ goto error; -+ } -+ -+ rc = __register_hotcpu_notifier(&xgene_msi_cpu_notifier); -+ if (rc) { -+ dev_err(&pdev->dev, "failed to add CPU MSI notifier\n"); -+ cpu_notifier_register_done(); -+ goto error; -+ } -+ -+ cpu_notifier_register_done(); -+ -+ xgene_msi->mchip.of_node = pdev->dev.of_node; -+ rc = of_pci_msi_chip_add(&xgene_msi->mchip); -+ if (rc) { -+ dev_err(&pdev->dev, "failed to add MSI controller chip\n"); -+ goto error_notifier; -+ } -+ -+ dev_info(&pdev->dev, "APM X-Gene PCIe MSI driver loaded\n"); -+ -+ return 0; -+ -+error_notifier: -+ unregister_hotcpu_notifier(&xgene_msi_cpu_notifier); -+error: -+ xgene_msi_remove(pdev); -+ return rc; -+} -+ -+static struct platform_driver xgene_msi_driver = { -+ .driver = { -+ .name = "xgene-msi", -+ .owner = THIS_MODULE, -+ .of_match_table = xgene_msi_match_table, -+ }, -+ .probe = xgene_msi_probe, -+ .remove = xgene_msi_remove, -+}; -+ -+static int __init xgene_pcie_msi_init(void) -+{ -+ return platform_driver_register(&xgene_msi_driver); -+} -+subsys_initcall(xgene_pcie_msi_init); -diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c -index 2988fe1..0dac1fb 100644 ---- a/drivers/pci/host/pci-xgene.c -+++ b/drivers/pci/host/pci-xgene.c -@@ -401,11 +401,11 @@ static int xgene_pcie_map_ranges(struct xgene_pcie_port *port, - struct list_head *res, - resource_size_t io_base) - { -- struct pci_host_bridge_window *window; -+ struct resource_entry *window; - struct device *dev = port->dev; - int ret; - -- list_for_each_entry(window, res, list) { -+ resource_list_for_each_entry(window, res) { - struct resource *res = window->res; - u64 restype = resource_type(res); - -@@ -600,6 +600,23 @@ static int xgene_pcie_setup(struct xgene_pcie_port *port, - return 0; - } - -+static int xgene_pcie_msi_enable(struct pci_bus *bus) -+{ -+ struct device_node *msi_node; -+ -+ msi_node = of_parse_phandle(bus->dev.of_node, -+ "msi-parent", 0); -+ if (!msi_node) -+ return -ENODEV; -+ -+ bus->msi = of_pci_find_msi_chip_by_node(msi_node); -+ if (!bus->msi) -+ return -ENODEV; -+ -+ bus->msi->dev = &bus->dev; -+ return 0; -+} -+ - static int xgene_pcie_probe_bridge(struct platform_device *pdev) - { - struct device_node *dn = pdev->dev.of_node; -@@ -636,6 +653,10 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev) - if (!bus) - return -ENOMEM; - -+ if (IS_ENABLED(CONFIG_PCI_MSI)) -+ if (xgene_pcie_msi_enable(bus)) -+ dev_info(port->dev, "failed to enable MSI\n"); -+ - pci_scan_child_bus(bus); - pci_assign_unassigned_bus_resources(bus); - pci_bus_add_devices(bus); -diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c -index f69b0d0..0961ffc 100644 ---- a/drivers/pci/host/pcie-designware.c -+++ b/drivers/pci/host/pcie-designware.c -@@ -15,7 +15,6 @@ - #include - #include - #include --#include - #include - #include - #include -@@ -31,6 +30,7 @@ - #define PORT_LINK_MODE_1_LANES (0x1 << 16) - #define PORT_LINK_MODE_2_LANES (0x3 << 16) - #define PORT_LINK_MODE_4_LANES (0x7 << 16) -+#define PORT_LINK_MODE_8_LANES (0xf << 16) - - #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C - #define PORT_LOGIC_SPEED_CHANGE (0x1 << 17) -@@ -38,12 +38,7 @@ - #define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8) - #define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8) - #define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8) -- --#define PCIE_MSI_ADDR_LO 0x820 --#define PCIE_MSI_ADDR_HI 0x824 --#define PCIE_MSI_INTR0_ENABLE 0x828 --#define PCIE_MSI_INTR0_MASK 0x82C --#define PCIE_MSI_INTR0_STATUS 0x830 -+#define PORT_LOGIC_LINK_WIDTH_8_LANES (0x8 << 8) - - #define PCIE_ATU_VIEWPORT 0x900 - #define PCIE_ATU_REGION_INBOUND (0x1 << 31) -@@ -67,39 +62,40 @@ - #define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16) - #define PCIE_ATU_UPPER_TARGET 0x91C - --static struct hw_pci dw_pci; -- --static unsigned long global_io_offset; -- --static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys) --{ -- BUG_ON(!sys->private_data); -- -- return sys->private_data; --} -+static struct pci_ops dw_pcie_ops; - --int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val) -+int dw_pcie_cfg_read(void __iomem *addr, int size, u32 *val) - { -- *val = readl(addr); -+ if ((uintptr_t)addr & (size - 1)) { -+ *val = 0; -+ return PCIBIOS_BAD_REGISTER_NUMBER; -+ } - -- if (size == 1) -- *val = (*val >> (8 * (where & 3))) & 0xff; -+ if (size == 4) -+ *val = readl(addr); - else if (size == 2) -- *val = (*val >> (8 * (where & 3))) & 0xffff; -- else if (size != 4) -+ *val = readw(addr); -+ else if (size == 1) -+ *val = readb(addr); -+ else { -+ *val = 0; - return PCIBIOS_BAD_REGISTER_NUMBER; -+ } - - return PCIBIOS_SUCCESSFUL; - } - --int dw_pcie_cfg_write(void __iomem *addr, int where, int size, u32 val) -+int dw_pcie_cfg_write(void __iomem *addr, int size, u32 val) - { -+ if ((uintptr_t)addr & (size - 1)) -+ return PCIBIOS_BAD_REGISTER_NUMBER; -+ - if (size == 4) - writel(val, addr); - else if (size == 2) -- writew(val, addr + (where & 2)); -+ writew(val, addr); - else if (size == 1) -- writeb(val, addr + (where & 3)); -+ writeb(val, addr); - else - return PCIBIOS_BAD_REGISTER_NUMBER; - -@@ -130,8 +126,7 @@ static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, - if (pp->ops->rd_own_conf) - ret = pp->ops->rd_own_conf(pp, where, size, val); - else -- ret = dw_pcie_cfg_read(pp->dbi_base + (where & ~0x3), where, -- size, val); -+ ret = dw_pcie_cfg_read(pp->dbi_base + where, size, val); - - return ret; - } -@@ -144,182 +139,33 @@ static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, - if (pp->ops->wr_own_conf) - ret = pp->ops->wr_own_conf(pp, where, size, val); - else -- ret = dw_pcie_cfg_write(pp->dbi_base + (where & ~0x3), where, -- size, val); -- -- return ret; --} -- --static struct irq_chip dw_msi_irq_chip = { -- .name = "PCI-MSI", -- .irq_enable = unmask_msi_irq, -- .irq_disable = mask_msi_irq, -- .irq_mask = mask_msi_irq, -- .irq_unmask = unmask_msi_irq, --}; -- --/* MSI int handler */ --irqreturn_t dw_handle_msi_irq(struct pcie_port *pp) --{ -- unsigned long val; -- int i, pos, irq; -- irqreturn_t ret = IRQ_NONE; -- -- for (i = 0; i < MAX_MSI_CTRLS; i++) { -- dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4, -- (u32 *)&val); -- if (val) { -- ret = IRQ_HANDLED; -- pos = 0; -- while ((pos = find_next_bit(&val, 32, pos)) != 32) { -- irq = irq_find_mapping(pp->irq_domain, -- i * 32 + pos); -- dw_pcie_wr_own_conf(pp, -- PCIE_MSI_INTR0_STATUS + i * 12, -- 4, 1 << pos); -- generic_handle_irq(irq); -- pos++; -- } -- } -- } -+ ret = dw_pcie_cfg_write(pp->dbi_base + where, size, val); - - return ret; - } - --void dw_pcie_msi_init(struct pcie_port *pp) --{ -- pp->msi_data = __get_free_pages(GFP_KERNEL, 0); -- -- /* program the msi_data */ -- dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4, -- virt_to_phys((void *)pp->msi_data)); -- dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4, 0); --} -- --static void dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq) --{ -- unsigned int res, bit, val; -- -- res = (irq / 32) * 12; -- bit = irq % 32; -- dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val); -- val &= ~(1 << bit); -- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val); --} -- --static void clear_irq_range(struct pcie_port *pp, unsigned int irq_base, -- unsigned int nvec, unsigned int pos) --{ -- unsigned int i; -- -- for (i = 0; i < nvec; i++) { -- irq_set_msi_desc_off(irq_base, i, NULL); -- /* Disable corresponding interrupt on MSI controller */ -- if (pp->ops->msi_clear_irq) -- pp->ops->msi_clear_irq(pp, pos + i); -- else -- dw_pcie_msi_clear_irq(pp, pos + i); -- } -- -- bitmap_release_region(pp->msi_irq_in_use, pos, order_base_2(nvec)); --} -- --static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq) --{ -- unsigned int res, bit, val; -- -- res = (irq / 32) * 12; -- bit = irq % 32; -- dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val); -- val |= 1 << bit; -- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val); --} -- --static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos) -+static void dw_pcie_prog_outbound_atu(struct pcie_port *pp, int index, -+ int type, u64 cpu_addr, u64 pci_addr, u32 size) - { -- int irq, pos0, i; -- struct pcie_port *pp = sys_to_pcie(desc->dev->bus->sysdata); -- -- pos0 = bitmap_find_free_region(pp->msi_irq_in_use, MAX_MSI_IRQS, -- order_base_2(no_irqs)); -- if (pos0 < 0) -- goto no_valid_irq; -- -- irq = irq_find_mapping(pp->irq_domain, pos0); -- if (!irq) -- goto no_valid_irq; -- -- /* -- * irq_create_mapping (called from dw_pcie_host_init) pre-allocates -- * descs so there is no need to allocate descs here. We can therefore -- * assume that if irq_find_mapping above returns non-zero, then the -- * descs are also successfully allocated. -- */ -- -- for (i = 0; i < no_irqs; i++) { -- if (irq_set_msi_desc_off(irq, i, desc) != 0) { -- clear_irq_range(pp, irq, i, pos0); -- goto no_valid_irq; -- } -- /*Enable corresponding interrupt in MSI interrupt controller */ -- if (pp->ops->msi_set_irq) -- pp->ops->msi_set_irq(pp, pos0 + i); -- else -- dw_pcie_msi_set_irq(pp, pos0 + i); -- } -- -- *pos = pos0; -- return irq; -- --no_valid_irq: -- *pos = pos0; -- return -ENOSPC; --} -- --static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev, -- struct msi_desc *desc) --{ -- int irq, pos; -- struct msi_msg msg; -- struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata); -- -- if (desc->msi_attrib.is_msix) -- return -EINVAL; -- -- irq = assign_irq(1, desc, &pos); -- if (irq < 0) -- return irq; -- -- if (pp->ops->get_msi_addr) -- msg.address_lo = pp->ops->get_msi_addr(pp); -- else -- msg.address_lo = virt_to_phys((void *)pp->msi_data); -- msg.address_hi = 0x0; -- -- if (pp->ops->get_msi_data) -- msg.data = pp->ops->get_msi_data(pp, pos); -- else -- msg.data = pos; -- -- write_msi_msg(irq, &msg); -- -- return 0; -+ dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | index, -+ PCIE_ATU_VIEWPORT); -+ dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr), PCIE_ATU_LOWER_BASE); -+ dw_pcie_writel_rc(pp, upper_32_bits(cpu_addr), PCIE_ATU_UPPER_BASE); -+ dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr + size - 1), -+ PCIE_ATU_LIMIT); -+ dw_pcie_writel_rc(pp, lower_32_bits(pci_addr), PCIE_ATU_LOWER_TARGET); -+ dw_pcie_writel_rc(pp, upper_32_bits(pci_addr), PCIE_ATU_UPPER_TARGET); -+ dw_pcie_writel_rc(pp, type, PCIE_ATU_CR1); -+ dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); - } - --static void dw_msi_teardown_irq(struct msi_chip *chip, unsigned int irq) -+void dw_pcie_disable_outbound_atu(struct pcie_port *pp, int index) - { -- struct irq_data *data = irq_get_irq_data(irq); -- struct msi_desc *msi = irq_data_get_msi(data); -- struct pcie_port *pp = sys_to_pcie(msi->dev->bus->sysdata); -- -- clear_irq_range(pp, irq, 1, data->hwirq); -+ dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | index, -+ PCIE_ATU_VIEWPORT); -+ dw_pcie_writel_rc(pp, 0, PCIE_ATU_CR2); - } - --static struct msi_chip dw_pcie_msi_chip = { -- .setup_irq = dw_msi_setup_irq, -- .teardown_irq = dw_msi_teardown_irq, --}; -- - int dw_pcie_link_up(struct pcie_port *pp) - { - if (pp->ops->link_up) -@@ -328,36 +174,42 @@ int dw_pcie_link_up(struct pcie_port *pp) - return 0; - } - --static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq, -- irq_hw_number_t hwirq) -+static int dw_pcie_msi_ctrl_init(struct pcie_port *pp) - { -- irq_set_chip_and_handler(irq, &dw_msi_irq_chip, handle_simple_irq); -- irq_set_chip_data(irq, domain->host_data); -- set_irq_flags(irq, IRQF_VALID); -+ struct device_node *msi_node; -+ -+ if (!IS_ENABLED(CONFIG_PCI_MSI)) { -+ pp->msi = NULL; -+ return 0; -+ } -+ -+ if (pp->msi) -+ return 0; -+ -+ msi_node = of_parse_phandle(pp->dev->of_node, "msi-parent", 0); -+ if (msi_node) { -+ pp->msi = of_pci_find_msi_chip_by_node(msi_node); -+ if (!pp->msi) { -+ dev_err(pp->dev, "Cannot find msi chip of %s\n", -+ msi_node->full_name); -+ return -ENODEV; -+ } else -+ return 0; -+ } - - return 0; - } - --static const struct irq_domain_ops msi_domain_ops = { -- .map = dw_pcie_msi_map, --}; -- - int dw_pcie_host_init(struct pcie_port *pp) - { - struct device_node *np = pp->dev->of_node; - struct platform_device *pdev = to_platform_device(pp->dev); -- struct of_pci_range range; -- struct of_pci_range_parser parser; -+ struct pci_bus *bus, *child; - struct resource *cfg_res; -- u32 val, na, ns; -- const __be32 *addrp; -- int i, index, ret; -- -- /* Find the address cell size and the number of cells in order to get -- * the untranslated address. -- */ -- of_property_read_u32(np, "#address-cells", &na); -- ns = of_n_size_cells(np); -+ u32 val; -+ int ret; -+ LIST_HEAD(res); -+ struct resource_entry *win; - - cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); - if (cfg_res) { -@@ -365,87 +217,61 @@ int dw_pcie_host_init(struct pcie_port *pp) - pp->cfg1_size = resource_size(cfg_res)/2; - pp->cfg0_base = cfg_res->start; - pp->cfg1_base = cfg_res->start + pp->cfg0_size; -- -- /* Find the untranslated configuration space address */ -- index = of_property_match_string(np, "reg-names", "config"); -- addrp = of_get_address(np, index, NULL, NULL); -- pp->cfg0_mod_base = of_read_number(addrp, ns); -- pp->cfg1_mod_base = pp->cfg0_mod_base + pp->cfg0_size; -- } else { -+ } else if (!pp->va_cfg0_base) { - dev_err(pp->dev, "missing *config* reg space\n"); - } - -- if (of_pci_range_parser_init(&parser, np)) { -- dev_err(pp->dev, "missing ranges property\n"); -- return -EINVAL; -- } -+ ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &pp->io_base); -+ if (ret) -+ return ret; - - /* Get the I/O and memory ranges from DT */ -- for_each_of_pci_range(&parser, &range) { -- unsigned long restype = range.flags & IORESOURCE_TYPE_BITS; -- if (restype == IORESOURCE_IO) { -- of_pci_range_to_resource(&range, np, &pp->io); -- pp->io.name = "I/O"; -- pp->io.start = max_t(resource_size_t, -- PCIBIOS_MIN_IO, -- range.pci_addr + global_io_offset); -- pp->io.end = min_t(resource_size_t, -- IO_SPACE_LIMIT, -- range.pci_addr + range.size -- + global_io_offset - 1); -- pp->io_size = resource_size(&pp->io); -- pp->io_bus_addr = range.pci_addr; -- pp->io_base = range.cpu_addr; -- -- /* Find the untranslated IO space address */ -- pp->io_mod_base = of_read_number(parser.range - -- parser.np + na, ns); -- } -- if (restype == IORESOURCE_MEM) { -- of_pci_range_to_resource(&range, np, &pp->mem); -- pp->mem.name = "MEM"; -- pp->mem_size = resource_size(&pp->mem); -- pp->mem_bus_addr = range.pci_addr; -- -- /* Find the untranslated MEM space address */ -- pp->mem_mod_base = of_read_number(parser.range - -- parser.np + na, ns); -- } -- if (restype == 0) { -- of_pci_range_to_resource(&range, np, &pp->cfg); -- pp->cfg0_size = resource_size(&pp->cfg)/2; -- pp->cfg1_size = resource_size(&pp->cfg)/2; -- pp->cfg0_base = pp->cfg.start; -- pp->cfg1_base = pp->cfg.start + pp->cfg0_size; -- -- /* Find the untranslated configuration space address */ -- pp->cfg0_mod_base = of_read_number(parser.range - -- parser.np + na, ns); -- pp->cfg1_mod_base = pp->cfg0_mod_base + -- pp->cfg0_size; -+ resource_list_for_each_entry(win, &res) { -+ switch (resource_type(win->res)) { -+ case IORESOURCE_IO: -+ pp->io = win->res; -+ pp->io->name = "I/O"; -+ pp->io_size = resource_size(pp->io); -+ pp->io_bus_addr = pp->io->start - win->offset; -+ ret = pci_remap_iospace(pp->io, pp->io_base); -+ if (ret) { -+ dev_warn(pp->dev, "error %d: failed to map resource %pR\n", -+ ret, pp->io); -+ continue; -+ } -+ pp->io_base = pp->io->start; -+ break; -+ case IORESOURCE_MEM: -+ pp->mem = win->res; -+ pp->mem->name = "MEM"; -+ pp->mem_size = resource_size(pp->mem); -+ pp->mem_bus_addr = pp->mem->start - win->offset; -+ break; -+ case 0: -+ pp->cfg = win->res; -+ pp->cfg0_size = resource_size(pp->cfg)/2; -+ pp->cfg1_size = resource_size(pp->cfg)/2; -+ pp->cfg0_base = pp->cfg->start; -+ pp->cfg1_base = pp->cfg->start + pp->cfg0_size; -+ break; -+ case IORESOURCE_BUS: -+ pp->busn = win->res; -+ break; -+ default: -+ continue; - } - } - -- ret = of_pci_parse_bus_range(np, &pp->busn); -- if (ret < 0) { -- pp->busn.name = np->name; -- pp->busn.start = 0; -- pp->busn.end = 0xff; -- pp->busn.flags = IORESOURCE_BUS; -- dev_dbg(pp->dev, "failed to parse bus-range property: %d, using default %pR\n", -- ret, &pp->busn); -- } -- - if (!pp->dbi_base) { -- pp->dbi_base = devm_ioremap(pp->dev, pp->cfg.start, -- resource_size(&pp->cfg)); -+ pp->dbi_base = devm_ioremap(pp->dev, pp->cfg->start, -+ resource_size(pp->cfg)); - if (!pp->dbi_base) { - dev_err(pp->dev, "error with ioremap\n"); - return -ENOMEM; - } - } - -- pp->mem_base = pp->mem.start; -+ pp->mem_base = pp->mem->start; - - if (!pp->va_cfg0_base) { - pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base, -@@ -465,33 +291,18 @@ int dw_pcie_host_init(struct pcie_port *pp) - } - } - -- if (of_property_read_u32(np, "num-lanes", &pp->lanes)) { -- dev_err(pp->dev, "Failed to parse the number of lanes\n"); -- return -EINVAL; -- } -- -- if (IS_ENABLED(CONFIG_PCI_MSI)) { -- if (!pp->ops->msi_host_init) { -- pp->irq_domain = irq_domain_add_linear(pp->dev->of_node, -- MAX_MSI_IRQS, &msi_domain_ops, -- &dw_pcie_msi_chip); -- if (!pp->irq_domain) { -- dev_err(pp->dev, "irq domain init failed\n"); -- return -ENXIO; -- } -- -- for (i = 0; i < MAX_MSI_IRQS; i++) -- irq_create_mapping(pp->irq_domain, i); -- } else { -- ret = pp->ops->msi_host_init(pp, &dw_pcie_msi_chip); -- if (ret < 0) -- return ret; -- } -- } -+ ret = of_property_read_u32(np, "num-lanes", &pp->lanes); -+ if (ret) -+ pp->lanes = 0; - - if (pp->ops->host_init) - pp->ops->host_init(pp); - -+ if (!pp->ops->rd_other_conf) -+ dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1, -+ PCIE_ATU_TYPE_MEM, pp->mem_base, -+ pp->mem_bus_addr, pp->mem_size); -+ - dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0); - - /* program correct class for RC */ -@@ -501,126 +312,113 @@ int dw_pcie_host_init(struct pcie_port *pp) - val |= PORT_LOGIC_SPEED_CHANGE; - dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val); - -- dw_pci.nr_controllers = 1; -- dw_pci.private_data = (void **)&pp; -+ pp->root_bus_nr = pp->busn->start; -+#if 0 -+ bus = pci_scan_root_bus(pp->dev, pp->root_bus_nr, &dw_pcie_ops, -+ pp, &res); -+ if (!bus) -+ return -ENOMEM; -+#else -+ bus = pci_create_root_bus(pp->dev, pp->root_bus_nr, &dw_pcie_ops, -+ pp, &res); -+ if (!bus) -+ return -ENODEV; -+ -+ ret = dw_pcie_msi_ctrl_init(pp); -+ if (ret) -+ return ret; -+ -+ bus->msi = pp->msi; - -- pci_common_init_dev(pp->dev, &dw_pci); --#ifdef CONFIG_PCI_DOMAINS -- dw_pci.domain++; -+ pci_scan_child_bus(bus); - #endif - -- return 0; --} -+ if (pp->ops->scan_bus) -+ pp->ops->scan_bus(pp); - --static void dw_pcie_prog_viewport_cfg0(struct pcie_port *pp, u32 busdev) --{ -- /* Program viewport 0 : OUTBOUND : CFG0 */ -- dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0, -- PCIE_ATU_VIEWPORT); -- dw_pcie_writel_rc(pp, pp->cfg0_mod_base, PCIE_ATU_LOWER_BASE); -- dw_pcie_writel_rc(pp, (pp->cfg0_mod_base >> 32), PCIE_ATU_UPPER_BASE); -- dw_pcie_writel_rc(pp, pp->cfg0_mod_base + pp->cfg0_size - 1, -- PCIE_ATU_LIMIT); -- dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET); -- dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET); -- dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG0, PCIE_ATU_CR1); -- dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); --} -+#ifdef CONFIG_ARM -+ /* support old dtbs that incorrectly describe IRQs */ -+ pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci); -+#endif - --static void dw_pcie_prog_viewport_cfg1(struct pcie_port *pp, u32 busdev) --{ -- /* Program viewport 1 : OUTBOUND : CFG1 */ -- dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1, -- PCIE_ATU_VIEWPORT); -- dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG1, PCIE_ATU_CR1); -- dw_pcie_writel_rc(pp, pp->cfg1_mod_base, PCIE_ATU_LOWER_BASE); -- dw_pcie_writel_rc(pp, (pp->cfg1_mod_base >> 32), PCIE_ATU_UPPER_BASE); -- dw_pcie_writel_rc(pp, pp->cfg1_mod_base + pp->cfg1_size - 1, -- PCIE_ATU_LIMIT); -- dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET); -- dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET); -- dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); --} -+ if (!pci_has_flag(PCI_PROBE_ONLY)) { -+ pci_bus_size_bridges(bus); -+ pci_bus_assign_resources(bus); - --static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp) --{ -- /* Program viewport 0 : OUTBOUND : MEM */ -- dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0, -- PCIE_ATU_VIEWPORT); -- dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1); -- dw_pcie_writel_rc(pp, pp->mem_mod_base, PCIE_ATU_LOWER_BASE); -- dw_pcie_writel_rc(pp, (pp->mem_mod_base >> 32), PCIE_ATU_UPPER_BASE); -- dw_pcie_writel_rc(pp, pp->mem_mod_base + pp->mem_size - 1, -- PCIE_ATU_LIMIT); -- dw_pcie_writel_rc(pp, pp->mem_bus_addr, PCIE_ATU_LOWER_TARGET); -- dw_pcie_writel_rc(pp, upper_32_bits(pp->mem_bus_addr), -- PCIE_ATU_UPPER_TARGET); -- dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); --} -+ list_for_each_entry(child, &bus->children, node) -+ pcie_bus_configure_settings(child); -+ } - --static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp) --{ -- /* Program viewport 1 : OUTBOUND : IO */ -- dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1, -- PCIE_ATU_VIEWPORT); -- dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_IO, PCIE_ATU_CR1); -- dw_pcie_writel_rc(pp, pp->io_mod_base, PCIE_ATU_LOWER_BASE); -- dw_pcie_writel_rc(pp, (pp->io_mod_base >> 32), PCIE_ATU_UPPER_BASE); -- dw_pcie_writel_rc(pp, pp->io_mod_base + pp->io_size - 1, -- PCIE_ATU_LIMIT); -- dw_pcie_writel_rc(pp, pp->io_bus_addr, PCIE_ATU_LOWER_TARGET); -- dw_pcie_writel_rc(pp, upper_32_bits(pp->io_bus_addr), -- PCIE_ATU_UPPER_TARGET); -- dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); -+ pci_bus_add_devices(bus); -+ -+ return 0; - } - - static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, - u32 devfn, int where, int size, u32 *val) - { -- int ret = PCIBIOS_SUCCESSFUL; -- u32 address, busdev; -+ int ret, type; -+ u32 busdev, cfg_size; -+ u64 cpu_addr; -+ void __iomem *va_cfg_base; - - busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | - PCIE_ATU_FUNC(PCI_FUNC(devfn)); -- address = where & ~0x3; - - if (bus->parent->number == pp->root_bus_nr) { -- dw_pcie_prog_viewport_cfg0(pp, busdev); -- ret = dw_pcie_cfg_read(pp->va_cfg0_base + address, where, size, -- val); -- dw_pcie_prog_viewport_mem_outbound(pp); -+ type = PCIE_ATU_TYPE_CFG0; -+ cpu_addr = pp->cfg0_base; -+ cfg_size = pp->cfg0_size; -+ va_cfg_base = pp->va_cfg0_base; - } else { -- dw_pcie_prog_viewport_cfg1(pp, busdev); -- ret = dw_pcie_cfg_read(pp->va_cfg1_base + address, where, size, -- val); -- dw_pcie_prog_viewport_io_outbound(pp); -+ type = PCIE_ATU_TYPE_CFG1; -+ cpu_addr = pp->cfg1_base; -+ cfg_size = pp->cfg1_size; -+ va_cfg_base = pp->va_cfg1_base; - } - -+ dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, -+ type, cpu_addr, -+ busdev, cfg_size); -+ ret = dw_pcie_cfg_read(va_cfg_base + where, size, val); -+ dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, -+ PCIE_ATU_TYPE_IO, pp->io_base, -+ pp->io_bus_addr, pp->io_size); -+ - return ret; - } - - static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, - u32 devfn, int where, int size, u32 val) - { -- int ret = PCIBIOS_SUCCESSFUL; -- u32 address, busdev; -+ int ret, type; -+ u32 busdev, cfg_size; -+ u64 cpu_addr; -+ void __iomem *va_cfg_base; - - busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | - PCIE_ATU_FUNC(PCI_FUNC(devfn)); -- address = where & ~0x3; - - if (bus->parent->number == pp->root_bus_nr) { -- dw_pcie_prog_viewport_cfg0(pp, busdev); -- ret = dw_pcie_cfg_write(pp->va_cfg0_base + address, where, size, -- val); -- dw_pcie_prog_viewport_mem_outbound(pp); -+ type = PCIE_ATU_TYPE_CFG0; -+ cpu_addr = pp->cfg0_base; -+ cfg_size = pp->cfg0_size; -+ va_cfg_base = pp->va_cfg0_base; - } else { -- dw_pcie_prog_viewport_cfg1(pp, busdev); -- ret = dw_pcie_cfg_write(pp->va_cfg1_base + address, where, size, -- val); -- dw_pcie_prog_viewport_io_outbound(pp); -+ type = PCIE_ATU_TYPE_CFG1; -+ cpu_addr = pp->cfg1_base; -+ cfg_size = pp->cfg1_size; -+ va_cfg_base = pp->va_cfg1_base; - } - -+ dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, -+ type, cpu_addr, -+ busdev, cfg_size); -+ ret = dw_pcie_cfg_write(va_cfg_base + where, size, val); -+ dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, -+ PCIE_ATU_TYPE_IO, pp->io_base, -+ pp->io_bus_addr, pp->io_size); -+ - return ret; - } - -@@ -650,7 +448,7 @@ static int dw_pcie_valid_config(struct pcie_port *pp, - static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, - int size, u32 *val) - { -- struct pcie_port *pp = sys_to_pcie(bus->sysdata); -+ struct pcie_port *pp = bus->sysdata; - int ret; - - if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) { -@@ -674,7 +472,7 @@ static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, - static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn, - int where, int size, u32 val) - { -- struct pcie_port *pp = sys_to_pcie(bus->sysdata); -+ struct pcie_port *pp = bus->sysdata; - int ret; - - if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) -@@ -698,81 +496,19 @@ static struct pci_ops dw_pcie_ops = { - .write = dw_pcie_wr_conf, - }; - --static int dw_pcie_setup(int nr, struct pci_sys_data *sys) --{ -- struct pcie_port *pp; -- -- pp = sys_to_pcie(sys); -- -- if (global_io_offset < SZ_1M && pp->io_size > 0) { -- sys->io_offset = global_io_offset - pp->io_bus_addr; -- pci_ioremap_io(global_io_offset, pp->io_base); -- global_io_offset += SZ_64K; -- pci_add_resource_offset(&sys->resources, &pp->io, -- sys->io_offset); -- } -- -- sys->mem_offset = pp->mem.start - pp->mem_bus_addr; -- pci_add_resource_offset(&sys->resources, &pp->mem, sys->mem_offset); -- pci_add_resource(&sys->resources, &pp->busn); -- -- return 1; --} -- --static struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys) --{ -- struct pci_bus *bus; -- struct pcie_port *pp = sys_to_pcie(sys); -- -- pp->root_bus_nr = sys->busnr; -- bus = pci_create_root_bus(pp->dev, sys->busnr, -- &dw_pcie_ops, sys, &sys->resources); -- if (!bus) -- return NULL; -- -- pci_scan_child_bus(bus); -- -- if (bus && pp->ops->scan_bus) -- pp->ops->scan_bus(pp); -- -- return bus; --} -- --static int dw_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) --{ -- struct pcie_port *pp = sys_to_pcie(dev->bus->sysdata); -- int irq; -- -- irq = of_irq_parse_and_map_pci(dev, slot, pin); -- if (!irq) -- irq = pp->irq; -- -- return irq; --} -- --static void dw_pcie_add_bus(struct pci_bus *bus) --{ -- if (IS_ENABLED(CONFIG_PCI_MSI)) { -- struct pcie_port *pp = sys_to_pcie(bus->sysdata); -- -- dw_pcie_msi_chip.dev = pp->dev; -- bus->msi = &dw_pcie_msi_chip; -- } --} -- --static struct hw_pci dw_pci = { -- .setup = dw_pcie_setup, -- .scan = dw_pcie_scan_bus, -- .map_irq = dw_pcie_map_irq, -- .add_bus = dw_pcie_add_bus, --}; -- - void dw_pcie_setup_rc(struct pcie_port *pp) - { - u32 val; - u32 membase; - u32 memlimit; - -+ dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, -+ PCIE_ATU_TYPE_IO, pp->io_base, -+ pp->io_bus_addr, pp->io_size); -+ dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1, -+ PCIE_ATU_TYPE_MEM, pp->mem_base, -+ pp->mem_bus_addr, pp->mem_size); -+ - /* set the number of lanes */ - dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL, &val); - val &= ~PORT_LINK_MODE_MASK; -@@ -786,6 +522,12 @@ void dw_pcie_setup_rc(struct pcie_port *pp) - case 4: - val |= PORT_LINK_MODE_4_LANES; - break; -+ case 8: -+ val |= PORT_LINK_MODE_8_LANES; -+ break; -+ default: -+ dev_err(pp->dev, "num-lanes %u: invalid value\n", pp->lanes); -+ return; - } - dw_pcie_writel_rc(pp, val, PCIE_PORT_LINK_CONTROL); - -@@ -802,6 +544,9 @@ void dw_pcie_setup_rc(struct pcie_port *pp) - case 4: - val |= PORT_LOGIC_LINK_WIDTH_4_LANES; - break; -+ case 8: -+ val |= PORT_LOGIC_LINK_WIDTH_8_LANES; -+ break; - } - dw_pcie_writel_rc(pp, val, PCIE_LINK_WIDTH_SPEED_CONTROL); - -diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h -index c625675..fcd6431 100644 ---- a/drivers/pci/host/pcie-designware.h -+++ b/drivers/pci/host/pcie-designware.h -@@ -27,28 +27,25 @@ struct pcie_port { - u8 root_bus_nr; - void __iomem *dbi_base; - u64 cfg0_base; -- u64 cfg0_mod_base; - void __iomem *va_cfg0_base; - u32 cfg0_size; - u64 cfg1_base; -- u64 cfg1_mod_base; - void __iomem *va_cfg1_base; - u32 cfg1_size; -- u64 io_base; -- u64 io_mod_base; -+ resource_size_t io_base; - phys_addr_t io_bus_addr; - u32 io_size; - u64 mem_base; -- u64 mem_mod_base; - phys_addr_t mem_bus_addr; - u32 mem_size; -- struct resource cfg; -- struct resource io; -- struct resource mem; -- struct resource busn; -+ struct resource *cfg; -+ struct resource *io; -+ struct resource *mem; -+ struct resource *busn; - int irq; - u32 lanes; - struct pcie_host_ops *ops; -+ struct msi_controller *msi; - int msi_irq; - struct irq_domain *irq_domain; - unsigned long msi_data; -@@ -70,18 +67,19 @@ struct pcie_host_ops { - void (*host_init)(struct pcie_port *pp); - void (*msi_set_irq)(struct pcie_port *pp, int irq); - void (*msi_clear_irq)(struct pcie_port *pp, int irq); -- u32 (*get_msi_addr)(struct pcie_port *pp); -+ phys_addr_t (*get_msi_addr)(struct pcie_port *pp); - u32 (*get_msi_data)(struct pcie_port *pp, int pos); - void (*scan_bus)(struct pcie_port *pp); -- int (*msi_host_init)(struct pcie_port *pp, struct msi_chip *chip); -+ int (*msi_host_init)(struct pcie_port *pp, struct msi_controller *chip); - }; - --int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val); --int dw_pcie_cfg_write(void __iomem *addr, int where, int size, u32 val); -+int dw_pcie_cfg_read(void __iomem *addr, int size, u32 *val); -+int dw_pcie_cfg_write(void __iomem *addr, int size, u32 val); - irqreturn_t dw_handle_msi_irq(struct pcie_port *pp); - void dw_pcie_msi_init(struct pcie_port *pp); - int dw_pcie_link_up(struct pcie_port *pp); - void dw_pcie_setup_rc(struct pcie_port *pp); - int dw_pcie_host_init(struct pcie_port *pp); -+void dw_pcie_disable_outbound_atu(struct pcie_port *pp, int index); - - #endif /* _PCIE_DESIGNWARE_H */ -diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c -index 61158e0..f8ec96d 100644 ---- a/drivers/pci/host/pcie-rcar.c -+++ b/drivers/pci/host/pcie-rcar.c -@@ -111,14 +111,14 @@ - struct rcar_msi { - DECLARE_BITMAP(used, INT_PCI_MSI_NR); - struct irq_domain *domain; -- struct msi_chip chip; -+ struct msi_controller chip; - unsigned long pages; - struct mutex lock; - int irq1; - int irq2; - }; - --static inline struct rcar_msi *to_rcar_msi(struct msi_chip *chip) -+static inline struct rcar_msi *to_rcar_msi(struct msi_controller *chip) - { - return container_of(chip, struct rcar_msi, chip); - } -@@ -404,9 +404,6 @@ static void rcar_pcie_enable(struct rcar_pcie *pcie) - rcar_pci.private_data = (void **)&pcie; - - pci_common_init_dev(&pdev->dev, &rcar_pci); --#ifdef CONFIG_PCI_DOMAINS -- rcar_pci.domain++; --#endif - } - - static int phy_wait_for_ack(struct rcar_pcie *pcie) -@@ -622,7 +619,7 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data) - return IRQ_HANDLED; - } - --static int rcar_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev, -+static int rcar_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev, - struct msi_desc *desc) - { - struct rcar_msi *msi = to_rcar_msi(chip); -@@ -647,12 +644,12 @@ static int rcar_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev, - msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR); - msg.data = hwirq; - -- write_msi_msg(irq, &msg); -+ pci_write_msi_msg(irq, &msg); - - return 0; - } - --static void rcar_msi_teardown_irq(struct msi_chip *chip, unsigned int irq) -+static void rcar_msi_teardown_irq(struct msi_controller *chip, unsigned int irq) - { - struct rcar_msi *msi = to_rcar_msi(chip); - struct irq_data *d = irq_get_irq_data(irq); -@@ -662,10 +659,10 @@ static void rcar_msi_teardown_irq(struct msi_chip *chip, unsigned int irq) - - static struct irq_chip rcar_msi_irq_chip = { - .name = "R-Car PCIe MSI", -- .irq_enable = unmask_msi_irq, -- .irq_disable = mask_msi_irq, -- .irq_mask = mask_msi_irq, -- .irq_unmask = unmask_msi_irq, -+ .irq_enable = pci_msi_unmask_irq, -+ .irq_disable = pci_msi_mask_irq, -+ .irq_mask = pci_msi_mask_irq, -+ .irq_unmask = pci_msi_unmask_irq, - }; - - static int rcar_msi_map(struct irq_domain *domain, unsigned int irq, -@@ -673,7 +670,6 @@ static int rcar_msi_map(struct irq_domain *domain, unsigned int irq, - { - irq_set_chip_and_handler(irq, &rcar_msi_irq_chip, handle_simple_irq); - irq_set_chip_data(irq, domain->host_data); -- set_irq_flags(irq, IRQF_VALID); - - return 0; - } -diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c -index ccc496b..eef849c 100644 ---- a/drivers/pci/host/pcie-xilinx.c -+++ b/drivers/pci/host/pcie-xilinx.c -@@ -297,18 +297,16 @@ static struct pci_ops xilinx_pcie_ops = { - */ - static void xilinx_pcie_destroy_msi(unsigned int irq) - { -- struct irq_desc *desc; - struct msi_desc *msi; - struct xilinx_pcie_port *port; - -- desc = irq_to_desc(irq); -- msi = irq_desc_get_msi_desc(desc); -- port = sys_to_pcie(msi->dev->bus->sysdata); -- -- if (!test_bit(irq, msi_irq_in_use)) -+ if (!test_bit(irq, msi_irq_in_use)) { -+ msi = irq_get_msi_desc(irq); -+ port = sys_to_pcie(msi_desc_to_pci_sys_data(msi)); - dev_err(port->dev, "Trying to free unused MSI#%d\n", irq); -- else -+ } else { - clear_bit(irq, msi_irq_in_use); -+ } - } - - /** -@@ -335,7 +333,8 @@ static int xilinx_pcie_assign_msi(struct xilinx_pcie_port *port) - * @chip: MSI Chip descriptor - * @irq: MSI IRQ to destroy - */ --static void xilinx_msi_teardown_irq(struct msi_chip *chip, unsigned int irq) -+static void xilinx_msi_teardown_irq(struct msi_controller *chip, -+ unsigned int irq) - { - xilinx_pcie_destroy_msi(irq); - } -@@ -348,7 +347,7 @@ static void xilinx_msi_teardown_irq(struct msi_chip *chip, unsigned int irq) - * - * Return: '0' on success and error value on failure - */ --static int xilinx_pcie_msi_setup_irq(struct msi_chip *chip, -+static int xilinx_pcie_msi_setup_irq(struct msi_controller *chip, - struct pci_dev *pdev, - struct msi_desc *desc) - { -@@ -374,13 +373,13 @@ static int xilinx_pcie_msi_setup_irq(struct msi_chip *chip, - msg.address_lo = msg_addr; - msg.data = irq; - -- write_msi_msg(irq, &msg); -+ pci_write_msi_msg(irq, &msg); - - return 0; - } - - /* MSI Chip Descriptor */ --static struct msi_chip xilinx_pcie_msi_chip = { -+static struct msi_controller xilinx_pcie_msi_chip = { - .setup_irq = xilinx_pcie_msi_setup_irq, - .teardown_irq = xilinx_msi_teardown_irq, - }; -@@ -388,10 +387,10 @@ static struct msi_chip xilinx_pcie_msi_chip = { - /* HW Interrupt Chip Descriptor */ - static struct irq_chip xilinx_msi_irq_chip = { - .name = "Xilinx PCIe MSI", -- .irq_enable = unmask_msi_irq, -- .irq_disable = mask_msi_irq, -- .irq_mask = mask_msi_irq, -- .irq_unmask = unmask_msi_irq, -+ .irq_enable = pci_msi_unmask_irq, -+ .irq_disable = pci_msi_mask_irq, -+ .irq_mask = pci_msi_mask_irq, -+ .irq_unmask = pci_msi_unmask_irq, - }; - - /** -@@ -407,7 +406,6 @@ static int xilinx_pcie_msi_map(struct irq_domain *domain, unsigned int irq, - { - irq_set_chip_and_handler(irq, &xilinx_msi_irq_chip, handle_simple_irq); - irq_set_chip_data(irq, domain->host_data); -- set_irq_flags(irq, IRQF_VALID); - - return 0; - } -@@ -431,20 +429,6 @@ static void xilinx_pcie_enable_msi(struct xilinx_pcie_port *port) - pcie_write(port, msg_addr, XILINX_PCIE_REG_MSIBASE2); - } - --/** -- * xilinx_pcie_add_bus - Add MSI chip info to PCIe bus -- * @bus: PCIe bus -- */ --static void xilinx_pcie_add_bus(struct pci_bus *bus) --{ -- if (IS_ENABLED(CONFIG_PCI_MSI)) { -- struct xilinx_pcie_port *port = sys_to_pcie(bus->sysdata); -- -- xilinx_pcie_msi_chip.dev = port->dev; -- bus->msi = &xilinx_pcie_msi_chip; -- } --} -- - /* INTx Functions */ - - /** -@@ -460,7 +444,6 @@ static int xilinx_pcie_intx_map(struct irq_domain *domain, unsigned int irq, - { - irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); - irq_set_chip_data(irq, domain->host_data); -- set_irq_flags(irq, IRQF_VALID); - - return 0; - } -@@ -730,9 +713,15 @@ static struct pci_bus *xilinx_pcie_scan_bus(int nr, struct pci_sys_data *sys) - struct pci_bus *bus; - - port->root_busno = sys->busnr; -- bus = pci_scan_root_bus(port->dev, sys->busnr, &xilinx_pcie_ops, -- sys, &sys->resources); - -+ if (IS_ENABLED(CONFIG_PCI_MSI)) -+ bus = pci_scan_root_bus_msi(port->dev, sys->busnr, -+ &xilinx_pcie_ops, sys, -+ &sys->resources, -+ &xilinx_pcie_msi_chip); -+ else -+ bus = pci_scan_root_bus(port->dev, sys->busnr, -+ &xilinx_pcie_ops, sys, &sys->resources); - return bus; - } - -@@ -750,7 +739,7 @@ static int xilinx_pcie_parse_and_add_res(struct xilinx_pcie_port *port) - resource_size_t offset; - struct of_pci_range_parser parser; - struct of_pci_range range; -- struct pci_host_bridge_window *win; -+ struct resource_entry *win; - int err = 0, mem_resno = 0; - - /* Get the ranges */ -@@ -820,7 +809,7 @@ static int xilinx_pcie_parse_and_add_res(struct xilinx_pcie_port *port) - - free_resources: - release_child_resources(&iomem_resource); -- list_for_each_entry(win, &port->resources, list) -+ resource_list_for_each_entry(win, &port->resources) - devm_kfree(dev, win->res); - pci_free_resource_list(&port->resources); - -@@ -924,10 +913,13 @@ static int xilinx_pcie_probe(struct platform_device *pdev) - .private_data = (void **)&port, - .setup = xilinx_pcie_setup, - .map_irq = of_irq_parse_and_map_pci, -- .add_bus = xilinx_pcie_add_bus, - .scan = xilinx_pcie_scan_bus, - .ops = &xilinx_pcie_ops, - }; -+ -+#ifdef CONFIG_PCI_MSI -+ xilinx_pcie_msi_chip.dev = port->dev; -+#endif - pci_common_init_dev(dev, &hw); - - return 0; -diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c -index 084587d..5e64d37 100644 ---- a/drivers/pci/msi.c -+++ b/drivers/pci/msi.c -@@ -19,19 +19,81 @@ - #include - #include - #include -+#include - - #include "pci.h" - - static int pci_msi_enable = 1; -+int pci_msi_ignore_mask; - - #define msix_table_size(flags) ((flags & PCI_MSIX_FLAGS_QSIZE) + 1) - -+#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN -+static struct irq_domain *pci_msi_default_domain; -+static DEFINE_MUTEX(pci_msi_domain_lock); -+ -+struct irq_domain * __weak arch_get_pci_msi_domain(struct pci_dev *dev) -+{ -+ return pci_msi_default_domain; -+} -+ -+static struct irq_domain *pci_msi_get_domain(struct pci_dev *dev) -+{ -+ struct irq_domain *domain; -+ -+ domain = dev_get_msi_domain(&dev->dev); -+ if (domain) -+ return domain; -+ -+ return arch_get_pci_msi_domain(dev); -+} -+ -+static int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) -+{ -+ struct irq_domain *domain; -+ -+ domain = pci_msi_get_domain(dev); -+ if (domain) -+ return pci_msi_domain_alloc_irqs(domain, dev, nvec, type); -+ -+ return arch_setup_msi_irqs(dev, nvec, type); -+} -+ -+static void pci_msi_teardown_msi_irqs(struct pci_dev *dev) -+{ -+ struct irq_domain *domain; -+ -+ domain = pci_msi_get_domain(dev); -+ if (domain) -+ pci_msi_domain_free_irqs(domain, dev); -+ else -+ arch_teardown_msi_irqs(dev); -+} -+#else -+#define pci_msi_setup_msi_irqs arch_setup_msi_irqs -+#define pci_msi_teardown_msi_irqs arch_teardown_msi_irqs -+#endif - - /* Arch hooks */ - -+struct msi_controller * __weak pcibios_msi_controller(struct pci_dev *dev) -+{ -+ return NULL; -+} -+ -+static struct msi_controller *pci_msi_controller(struct pci_dev *dev) -+{ -+ struct msi_controller *msi_ctrl = dev->bus->msi; -+ -+ if (msi_ctrl) -+ return msi_ctrl; -+ -+ return pcibios_msi_controller(dev); -+} -+ - int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) - { -- struct msi_chip *chip = dev->bus->msi; -+ struct msi_controller *chip = pci_msi_controller(dev); - int err; - - if (!chip || !chip->setup_irq) -@@ -48,7 +110,7 @@ int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) - - void __weak arch_teardown_msi_irq(unsigned int irq) - { -- struct msi_chip *chip = irq_get_chip_data(irq); -+ struct msi_controller *chip = irq_get_chip_data(irq); - - if (!chip || !chip->teardown_irq) - return; -@@ -58,9 +120,12 @@ void __weak arch_teardown_msi_irq(unsigned int irq) - - int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) - { -+ struct msi_controller *chip = dev->bus->msi; - struct msi_desc *entry; - int ret; - -+ if (chip && chip->setup_irqs) -+ return chip->setup_irqs(chip, dev, nvec, type); - /* - * If an architecture wants to support multiple MSI, it needs to - * override arch_setup_msi_irqs() -@@ -68,7 +133,7 @@ int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) - if (type == PCI_CAP_ID_MSI && nvec > 1) - return 1; - -- list_for_each_entry(entry, &dev->msi_list, list) { -+ for_each_pci_msi_entry(entry, dev) { - ret = arch_setup_msi_irq(dev, entry); - if (ret < 0) - return ret; -@@ -85,19 +150,13 @@ int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) - */ - void default_teardown_msi_irqs(struct pci_dev *dev) - { -+ int i; - struct msi_desc *entry; - -- list_for_each_entry(entry, &dev->msi_list, list) { -- int i, nvec; -- if (entry->irq == 0) -- continue; -- if (entry->nvec_used) -- nvec = entry->nvec_used; -- else -- nvec = 1 << entry->msi_attrib.multiple; -- for (i = 0; i < nvec; i++) -- arch_teardown_msi_irq(entry->irq + i); -- } -+ for_each_pci_msi_entry(entry, dev) -+ if (entry->irq) -+ for (i = 0; i < entry->nvec_used; i++) -+ arch_teardown_msi_irq(entry->irq + i); - } - - void __weak arch_teardown_msi_irqs(struct pci_dev *dev) -@@ -111,7 +170,7 @@ static void default_restore_msi_irq(struct pci_dev *dev, int irq) - - entry = NULL; - if (dev->msix_enabled) { -- list_for_each_entry(entry, &dev->msi_list, list) { -+ for_each_pci_msi_entry(entry, dev) { - if (irq == entry->irq) - break; - } -@@ -120,7 +179,7 @@ static void default_restore_msi_irq(struct pci_dev *dev, int irq) - } - - if (entry) -- __write_msi_msg(entry, &entry->msg); -+ __pci_write_msi_msg(entry, &entry->msg); - } - - void __weak arch_restore_msi_irqs(struct pci_dev *dev) -@@ -128,27 +187,6 @@ void __weak arch_restore_msi_irqs(struct pci_dev *dev) - return default_restore_msi_irqs(dev); - } - --static void msi_set_enable(struct pci_dev *dev, int enable) --{ -- u16 control; -- -- pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); -- control &= ~PCI_MSI_FLAGS_ENABLE; -- if (enable) -- control |= PCI_MSI_FLAGS_ENABLE; -- pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); --} -- --static void msix_clear_and_set_ctrl(struct pci_dev *dev, u16 clear, u16 set) --{ -- u16 ctrl; -- -- pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &ctrl); -- ctrl &= ~clear; -- ctrl |= set; -- pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, ctrl); --} -- - static inline __attribute_const__ u32 msi_mask(unsigned x) - { - /* Don't shift by >= width of type */ -@@ -163,28 +201,24 @@ static inline __attribute_const__ u32 msi_mask(unsigned x) - * reliably as devices without an INTx disable bit will then generate a - * level IRQ which will never be cleared. - */ --u32 default_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) -+u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) - { - u32 mask_bits = desc->masked; - -- if (!desc->msi_attrib.maskbit) -+ if (pci_msi_ignore_mask || !desc->msi_attrib.maskbit) - return 0; - - mask_bits &= ~mask; - mask_bits |= flag; -- pci_write_config_dword(desc->dev, desc->mask_pos, mask_bits); -+ pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->mask_pos, -+ mask_bits); - - return mask_bits; - } - --__weak u32 arch_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) --{ -- return default_msi_mask_irq(desc, mask, flag); --} -- - static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) - { -- desc->masked = arch_msi_mask_irq(desc, mask, flag); -+ desc->masked = __pci_msi_desc_mask_irq(desc, mask, flag); - } - - /* -@@ -194,11 +228,15 @@ static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) - * file. This saves a few milliseconds when initialising devices with lots - * of MSI-X interrupts. - */ --u32 default_msix_mask_irq(struct msi_desc *desc, u32 flag) -+u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag) - { - u32 mask_bits = desc->masked; - unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + - PCI_MSIX_ENTRY_VECTOR_CTRL; -+ -+ if (pci_msi_ignore_mask) -+ return 0; -+ - mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT; - if (flag) - mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT; -@@ -207,19 +245,14 @@ u32 default_msix_mask_irq(struct msi_desc *desc, u32 flag) - return mask_bits; - } - --__weak u32 arch_msix_mask_irq(struct msi_desc *desc, u32 flag) --{ -- return default_msix_mask_irq(desc, flag); --} -- - static void msix_mask_irq(struct msi_desc *desc, u32 flag) - { -- desc->masked = arch_msix_mask_irq(desc, flag); -+ desc->masked = __pci_msix_desc_mask_irq(desc, flag); - } - - static void msi_set_mask_bit(struct irq_data *data, u32 flag) - { -- struct msi_desc *desc = irq_data_get_msi(data); -+ struct msi_desc *desc = irq_data_get_msi_desc(data); - - if (desc->msi_attrib.is_msix) { - msix_mask_irq(desc, flag); -@@ -230,12 +263,20 @@ static void msi_set_mask_bit(struct irq_data *data, u32 flag) - } - } - --void mask_msi_irq(struct irq_data *data) -+/** -+ * pci_msi_mask_irq - Generic irq chip callback to mask PCI/MSI interrupts -+ * @data: pointer to irqdata associated to that interrupt -+ */ -+void pci_msi_mask_irq(struct irq_data *data) - { - msi_set_mask_bit(data, 1); - } - --void unmask_msi_irq(struct irq_data *data) -+/** -+ * pci_msi_unmask_irq - Generic irq chip callback to unmask PCI/MSI interrupts -+ * @data: pointer to irqdata associated to that interrupt -+ */ -+void pci_msi_unmask_irq(struct irq_data *data) - { - msi_set_mask_bit(data, 0); - } -@@ -244,14 +285,15 @@ void default_restore_msi_irqs(struct pci_dev *dev) - { - struct msi_desc *entry; - -- list_for_each_entry(entry, &dev->msi_list, list) { -+ for_each_pci_msi_entry(entry, dev) - default_restore_msi_irq(dev, entry->irq); -- } - } - --void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) -+void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) - { -- BUG_ON(entry->dev->current_state != PCI_D0); -+ struct pci_dev *dev = msi_desc_to_pci_dev(entry); -+ -+ BUG_ON(dev->current_state != PCI_D0); - - if (entry->msi_attrib.is_msix) { - void __iomem *base = entry->mask_base + -@@ -261,7 +303,6 @@ void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) - msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR); - msg->data = readl(base + PCI_MSIX_ENTRY_DATA); - } else { -- struct pci_dev *dev = entry->dev; - int pos = dev->msi_cap; - u16 data; - -@@ -279,34 +320,11 @@ void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) - } - } - --void read_msi_msg(unsigned int irq, struct msi_msg *msg) --{ -- struct msi_desc *entry = irq_get_msi_desc(irq); -- -- __read_msi_msg(entry, msg); --} -- --void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg) --{ -- /* Assert that the cache is valid, assuming that -- * valid messages are not all-zeroes. */ -- BUG_ON(!(entry->msg.address_hi | entry->msg.address_lo | -- entry->msg.data)); -- -- *msg = entry->msg; --} -- --void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) -+void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) - { -- struct msi_desc *entry = irq_get_msi_desc(irq); -- -- __get_cached_msi_msg(entry, msg); --} --EXPORT_SYMBOL_GPL(get_cached_msi_msg); -+ struct pci_dev *dev = msi_desc_to_pci_dev(entry); - --void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) --{ -- if (entry->dev->current_state != PCI_D0) { -+ if (dev->current_state != PCI_D0) { - /* Don't touch the hardware now */ - } else if (entry->msi_attrib.is_msix) { - void __iomem *base; -@@ -317,7 +335,6 @@ void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) - writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR); - writel(msg->data, base + PCI_MSIX_ENTRY_DATA); - } else { -- struct pci_dev *dev = entry->dev; - int pos = dev->msi_cap; - u16 msgctl; - -@@ -341,38 +358,32 @@ void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) - entry->msg = *msg; - } - --void write_msi_msg(unsigned int irq, struct msi_msg *msg) -+void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg) - { - struct msi_desc *entry = irq_get_msi_desc(irq); - -- __write_msi_msg(entry, msg); -+ __pci_write_msi_msg(entry, msg); - } --EXPORT_SYMBOL_GPL(write_msi_msg); -+EXPORT_SYMBOL_GPL(pci_write_msi_msg); - - static void free_msi_irqs(struct pci_dev *dev) - { -+ struct list_head *msi_list = dev_to_msi_list(&dev->dev); - struct msi_desc *entry, *tmp; - struct attribute **msi_attrs; - struct device_attribute *dev_attr; -- int count = 0; -+ int i, count = 0; - -- list_for_each_entry(entry, &dev->msi_list, list) { -- int i, nvec; -- if (!entry->irq) -- continue; -- if (entry->nvec_used) -- nvec = entry->nvec_used; -- else -- nvec = 1 << entry->msi_attrib.multiple; -- for (i = 0; i < nvec; i++) -- BUG_ON(irq_has_action(entry->irq + i)); -- } -+ for_each_pci_msi_entry(entry, dev) -+ if (entry->irq) -+ for (i = 0; i < entry->nvec_used; i++) -+ BUG_ON(irq_has_action(entry->irq + i)); - -- arch_teardown_msi_irqs(dev); -+ pci_msi_teardown_msi_irqs(dev); - -- list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) { -+ list_for_each_entry_safe(entry, tmp, msi_list, list) { - if (entry->msi_attrib.is_msix) { -- if (list_is_last(&entry->list, &dev->msi_list)) -+ if (list_is_last(&entry->list, msi_list)) - iounmap(entry->mask_base); - } - -@@ -397,18 +408,6 @@ static void free_msi_irqs(struct pci_dev *dev) - } - } - --static struct msi_desc *alloc_msi_entry(struct pci_dev *dev) --{ -- struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL); -- if (!desc) -- return NULL; -- -- INIT_LIST_HEAD(&desc->list); -- desc->dev = dev; -- -- return desc; --} -- - static void pci_intx_for_msi(struct pci_dev *dev, int enable) - { - if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG)) -@@ -426,7 +425,7 @@ static void __pci_restore_msi_state(struct pci_dev *dev) - entry = irq_get_msi_desc(dev->irq); - - pci_intx_for_msi(dev, 0); -- msi_set_enable(dev, 0); -+ pci_msi_set_enable(dev, 0); - arch_restore_msi_irqs(dev); - - pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); -@@ -443,19 +442,18 @@ static void __pci_restore_msix_state(struct pci_dev *dev) - - if (!dev->msix_enabled) - return; -- BUG_ON(list_empty(&dev->msi_list)); -+ BUG_ON(list_empty(dev_to_msi_list(&dev->dev))); - - /* route the table */ - pci_intx_for_msi(dev, 0); -- msix_clear_and_set_ctrl(dev, 0, -+ pci_msix_clear_and_set_ctrl(dev, 0, - PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL); - - arch_restore_msi_irqs(dev); -- list_for_each_entry(entry, &dev->msi_list, list) { -+ for_each_pci_msi_entry(entry, dev) - msix_mask_irq(entry, entry->masked); -- } - -- msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); -+ pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); - } - - void pci_restore_msi_state(struct pci_dev *dev) -@@ -497,9 +495,8 @@ static int populate_msi_sysfs(struct pci_dev *pdev) - int count = 0; - - /* Determine how many msi entries we have */ -- list_for_each_entry(entry, &pdev->msi_list, list) { -+ for_each_pci_msi_entry(entry, pdev) - ++num_msi; -- } - if (!num_msi) - return 0; - -@@ -507,7 +504,7 @@ static int populate_msi_sysfs(struct pci_dev *pdev) - msi_attrs = kzalloc(sizeof(void *) * (num_msi + 1), GFP_KERNEL); - if (!msi_attrs) - return -ENOMEM; -- list_for_each_entry(entry, &pdev->msi_list, list) { -+ for_each_pci_msi_entry(entry, pdev) { - msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL); - if (!msi_dev_attr) - goto error_attrs; -@@ -559,13 +556,13 @@ error_attrs: - return ret; - } - --static struct msi_desc *msi_setup_entry(struct pci_dev *dev) -+static struct msi_desc *msi_setup_entry(struct pci_dev *dev, int nvec) - { - u16 control; - struct msi_desc *entry; - - /* MSI Entry Initialization */ -- entry = alloc_msi_entry(dev); -+ entry = alloc_msi_entry(&dev->dev); - if (!entry) - return NULL; - -@@ -577,6 +574,8 @@ static struct msi_desc *msi_setup_entry(struct pci_dev *dev) - entry->msi_attrib.maskbit = !!(control & PCI_MSI_FLAGS_MASKBIT); - entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ - entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1; -+ entry->msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec)); -+ entry->nvec_used = nvec; - - if (control & PCI_MSI_FLAGS_64BIT) - entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64; -@@ -594,7 +593,7 @@ static int msi_verify_entries(struct pci_dev *dev) - { - struct msi_desc *entry; - -- list_for_each_entry(entry, &dev->msi_list, list) { -+ for_each_pci_msi_entry(entry, dev) { - if (!dev->no_64bit_msi || !entry->msg.address_hi) - continue; - dev_err(&dev->dev, "Device has broken 64-bit MSI but arch" -@@ -621,9 +620,9 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) - int ret; - unsigned mask; - -- msi_set_enable(dev, 0); /* Disable MSI during set up */ -+ pci_msi_set_enable(dev, 0); /* Disable MSI during set up */ - -- entry = msi_setup_entry(dev); -+ entry = msi_setup_entry(dev, nvec); - if (!entry) - return -ENOMEM; - -@@ -631,10 +630,10 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) - mask = msi_mask(entry->msi_attrib.multi_cap); - msi_mask_irq(entry, mask, mask); - -- list_add_tail(&entry->list, &dev->msi_list); -+ list_add_tail(&entry->list, dev_to_msi_list(&dev->dev)); - - /* Configure MSI capability structure */ -- ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); -+ ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); - if (ret) { - msi_mask_irq(entry, mask, ~mask); - free_msi_irqs(dev); -@@ -657,7 +656,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) - - /* Set MSI enabled bits */ - pci_intx_for_msi(dev, 0); -- msi_set_enable(dev, 1); -+ pci_msi_set_enable(dev, 1); - dev->msi_enabled = 1; - - dev->irq = entry->irq; -@@ -668,11 +667,16 @@ static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries) - { - resource_size_t phys_addr; - u32 table_offset; -+ unsigned long flags; - u8 bir; - - pci_read_config_dword(dev, dev->msix_cap + PCI_MSIX_TABLE, - &table_offset); - bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR); -+ flags = pci_resource_flags(dev, bir); -+ if (!flags || (flags & IORESOURCE_UNSET)) -+ return NULL; -+ - table_offset &= PCI_MSIX_TABLE_OFFSET; - phys_addr = pci_resource_start(dev, bir) + table_offset; - -@@ -686,7 +690,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, - int i; - - for (i = 0; i < nvec; i++) { -- entry = alloc_msi_entry(dev); -+ entry = alloc_msi_entry(&dev->dev); - if (!entry) { - if (!i) - iounmap(base); -@@ -701,8 +705,9 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, - entry->msi_attrib.entry_nr = entries[i].entry; - entry->msi_attrib.default_irq = dev->irq; - entry->mask_base = base; -+ entry->nvec_used = 1; - -- list_add_tail(&entry->list, &dev->msi_list); -+ list_add_tail(&entry->list, dev_to_msi_list(&dev->dev)); - } - - return 0; -@@ -714,12 +719,11 @@ static void msix_program_entries(struct pci_dev *dev, - struct msi_desc *entry; - int i = 0; - -- list_for_each_entry(entry, &dev->msi_list, list) { -+ for_each_pci_msi_entry(entry, dev) { - int offset = entries[i].entry * PCI_MSIX_ENTRY_SIZE + - PCI_MSIX_ENTRY_VECTOR_CTRL; - - entries[i].vector = entry->irq; -- irq_set_msi_desc(entry->irq, entry); - entry->masked = readl(entry->mask_base + offset); - msix_mask_irq(entry, 1); - i++; -@@ -744,7 +748,7 @@ static int msix_capability_init(struct pci_dev *dev, - void __iomem *base; - - /* Ensure MSI-X is disabled while it is set up */ -- msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); -+ pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); - - pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control); - /* Request & Map MSI-X table region */ -@@ -756,7 +760,7 @@ static int msix_capability_init(struct pci_dev *dev, - if (ret) - return ret; - -- ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); -+ ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); - if (ret) - goto out_avail; - -@@ -770,7 +774,7 @@ static int msix_capability_init(struct pci_dev *dev, - * MSI-X registers. We need to mask all the vectors to prevent - * interrupts coming in before they're fully set up. - */ -- msix_clear_and_set_ctrl(dev, 0, -+ pci_msix_clear_and_set_ctrl(dev, 0, - PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE); - - msix_program_entries(dev, entries); -@@ -783,7 +787,7 @@ static int msix_capability_init(struct pci_dev *dev, - pci_intx_for_msi(dev, 0); - dev->msix_enabled = 1; - -- msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); -+ pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); - - return 0; - -@@ -796,7 +800,7 @@ out_avail: - struct msi_desc *entry; - int avail = 0; - -- list_for_each_entry(entry, &dev->msi_list, list) { -+ for_each_pci_msi_entry(entry, dev) { - if (entry->irq != 0) - avail++; - } -@@ -885,17 +889,17 @@ void pci_msi_shutdown(struct pci_dev *dev) - if (!pci_msi_enable || !dev || !dev->msi_enabled) - return; - -- BUG_ON(list_empty(&dev->msi_list)); -- desc = list_first_entry(&dev->msi_list, struct msi_desc, list); -+ BUG_ON(list_empty(dev_to_msi_list(&dev->dev))); -+ desc = first_pci_msi_entry(dev); - -- msi_set_enable(dev, 0); -+ pci_msi_set_enable(dev, 0); - pci_intx_for_msi(dev, 1); - dev->msi_enabled = 0; - - /* Return the device with MSI unmasked as initial states */ - mask = msi_mask(desc->msi_attrib.multi_cap); - /* Keep cached state to be restored */ -- arch_msi_mask_irq(desc, mask, ~mask); -+ __pci_msi_desc_mask_irq(desc, mask, ~mask); - - /* Restore dev->irq to its default pin-assertion irq */ - dev->irq = desc->msi_attrib.default_irq; -@@ -991,12 +995,12 @@ void pci_msix_shutdown(struct pci_dev *dev) - return; - - /* Return the device with MSI-X masked as initial states */ -- list_for_each_entry(entry, &dev->msi_list, list) { -+ for_each_pci_msi_entry(entry, dev) { - /* Keep cached states to be restored */ -- arch_msix_mask_irq(entry, 1); -+ __pci_msix_desc_mask_irq(entry, 1); - } - -- msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); -+ pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); - pci_intx_for_msi(dev, 1); - dev->msix_enabled = 0; - } -@@ -1030,19 +1034,6 @@ EXPORT_SYMBOL(pci_msi_enabled); - - void pci_msi_init_pci_dev(struct pci_dev *dev) - { -- INIT_LIST_HEAD(&dev->msi_list); -- -- /* Disable the msi hardware to avoid screaming interrupts -- * during boot. This is the power on reset default so -- * usually this should be a noop. -- */ -- dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI); -- if (dev->msi_cap) -- msi_set_enable(dev, 0); -- -- dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX); -- if (dev->msix_cap) -- msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); - } - - /** -@@ -1138,3 +1129,217 @@ int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, - return nvec; - } - EXPORT_SYMBOL(pci_enable_msix_range); -+ -+struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc) -+{ -+ return to_pci_dev(desc->dev); -+} -+ -+void *msi_desc_to_pci_sysdata(struct msi_desc *desc) -+{ -+ struct pci_dev *dev = msi_desc_to_pci_dev(desc); -+ -+ return dev->bus->sysdata; -+} -+EXPORT_SYMBOL_GPL(msi_desc_to_pci_sysdata); -+ -+#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN -+/** -+ * pci_msi_domain_write_msg - Helper to write MSI message to PCI config space -+ * @irq_data: Pointer to interrupt data of the MSI interrupt -+ * @msg: Pointer to the message -+ */ -+void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg) -+{ -+ struct msi_desc *desc = irq_data->msi_desc; -+ -+ /* -+ * For MSI-X desc->irq is always equal to irq_data->irq. For -+ * MSI only the first interrupt of MULTI MSI passes the test. -+ */ -+ if (desc->irq == irq_data->irq) -+ __pci_write_msi_msg(desc, msg); -+} -+ -+/** -+ * pci_msi_domain_calc_hwirq - Generate a unique ID for an MSI source -+ * @dev: Pointer to the PCI device -+ * @desc: Pointer to the msi descriptor -+ * -+ * The ID number is only used within the irqdomain. -+ */ -+irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev, -+ struct msi_desc *desc) -+{ -+ return (irq_hw_number_t)desc->msi_attrib.entry_nr | -+ PCI_DEVID(dev->bus->number, dev->devfn) << 11 | -+ (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27; -+} -+ -+static inline bool pci_msi_desc_is_multi_msi(struct msi_desc *desc) -+{ -+ return !desc->msi_attrib.is_msix && desc->nvec_used > 1; -+} -+ -+/** -+ * pci_msi_domain_check_cap - Verify that @domain supports the capabilities for @dev -+ * @domain: The interrupt domain to check -+ * @info: The domain info for verification -+ * @dev: The device to check -+ * -+ * Returns: -+ * 0 if the functionality is supported -+ * 1 if Multi MSI is requested, but the domain does not support it -+ * -ENOTSUPP otherwise -+ */ -+int pci_msi_domain_check_cap(struct irq_domain *domain, -+ struct msi_domain_info *info, struct device *dev) -+{ -+ struct msi_desc *desc = first_pci_msi_entry(to_pci_dev(dev)); -+ -+ /* Special handling to support pci_enable_msi_range() */ -+ if (pci_msi_desc_is_multi_msi(desc) && -+ !(info->flags & MSI_FLAG_MULTI_PCI_MSI)) -+ return 1; -+ else if (desc->msi_attrib.is_msix && !(info->flags & MSI_FLAG_PCI_MSIX)) -+ return -ENOTSUPP; -+ -+ return 0; -+} -+ -+static int pci_msi_domain_handle_error(struct irq_domain *domain, -+ struct msi_desc *desc, int error) -+{ -+ /* Special handling to support pci_enable_msi_range() */ -+ if (pci_msi_desc_is_multi_msi(desc) && error == -ENOSPC) -+ return 1; -+ -+ return error; -+} -+ -+#ifdef GENERIC_MSI_DOMAIN_OPS -+static void pci_msi_domain_set_desc(msi_alloc_info_t *arg, -+ struct msi_desc *desc) -+{ -+ arg->desc = desc; -+ arg->hwirq = pci_msi_domain_calc_hwirq(msi_desc_to_pci_dev(desc), -+ desc); -+} -+#else -+#define pci_msi_domain_set_desc NULL -+#endif -+ -+static struct msi_domain_ops pci_msi_domain_ops_default = { -+ .set_desc = pci_msi_domain_set_desc, -+ .msi_check = pci_msi_domain_check_cap, -+ .handle_error = pci_msi_domain_handle_error, -+}; -+ -+static void pci_msi_domain_update_dom_ops(struct msi_domain_info *info) -+{ -+ struct msi_domain_ops *ops = info->ops; -+ -+ if (ops == NULL) { -+ info->ops = &pci_msi_domain_ops_default; -+ } else { -+ if (ops->set_desc == NULL) -+ ops->set_desc = pci_msi_domain_set_desc; -+ if (ops->msi_check == NULL) -+ ops->msi_check = pci_msi_domain_check_cap; -+ if (ops->handle_error == NULL) -+ ops->handle_error = pci_msi_domain_handle_error; -+ } -+} -+ -+static void pci_msi_domain_update_chip_ops(struct msi_domain_info *info) -+{ -+ struct irq_chip *chip = info->chip; -+ -+ BUG_ON(!chip); -+ if (!chip->irq_write_msi_msg) -+ chip->irq_write_msi_msg = pci_msi_domain_write_msg; -+} -+ -+/** -+ * pci_msi_create_irq_domain - Creat a MSI interrupt domain -+ * @node: Optional device-tree node of the interrupt controller -+ * @info: MSI domain info -+ * @parent: Parent irq domain -+ * -+ * Updates the domain and chip ops and creates a MSI interrupt domain. -+ * -+ * Returns: -+ * A domain pointer or NULL in case of failure. -+ */ -+struct irq_domain *pci_msi_create_irq_domain(struct device_node *node, -+ struct msi_domain_info *info, -+ struct irq_domain *parent) -+{ -+ struct irq_domain *domain; -+ -+ if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS) -+ pci_msi_domain_update_dom_ops(info); -+ if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) -+ pci_msi_domain_update_chip_ops(info); -+ -+ domain = msi_create_irq_domain(node, info, parent); -+ if (!domain) -+ return NULL; -+ -+ domain->bus_token = DOMAIN_BUS_PCI_MSI; -+ return domain; -+} -+ -+/** -+ * pci_msi_domain_alloc_irqs - Allocate interrupts for @dev in @domain -+ * @domain: The interrupt domain to allocate from -+ * @dev: The device for which to allocate -+ * @nvec: The number of interrupts to allocate -+ * @type: Unused to allow simpler migration from the arch_XXX interfaces -+ * -+ * Returns: -+ * A virtual interrupt number or an error code in case of failure -+ */ -+int pci_msi_domain_alloc_irqs(struct irq_domain *domain, struct pci_dev *dev, -+ int nvec, int type) -+{ -+ return msi_domain_alloc_irqs(domain, &dev->dev, nvec); -+} -+ -+/** -+ * pci_msi_domain_free_irqs - Free interrupts for @dev in @domain -+ * @domain: The interrupt domain -+ * @dev: The device for which to free interrupts -+ */ -+void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev) -+{ -+ msi_domain_free_irqs(domain, &dev->dev); -+} -+ -+/** -+ * pci_msi_create_default_irq_domain - Create a default MSI interrupt domain -+ * @node: Optional device-tree node of the interrupt controller -+ * @info: MSI domain info -+ * @parent: Parent irq domain -+ * -+ * Returns: A domain pointer or NULL in case of failure. If successful -+ * the default PCI/MSI irqdomain pointer is updated. -+ */ -+struct irq_domain *pci_msi_create_default_irq_domain(struct device_node *node, -+ struct msi_domain_info *info, struct irq_domain *parent) -+{ -+ struct irq_domain *domain; -+ -+ mutex_lock(&pci_msi_domain_lock); -+ if (pci_msi_default_domain) { -+ pr_err("PCI: default irq domain for PCI MSI has already been created.\n"); -+ domain = NULL; -+ } else { -+ domain = pci_msi_create_irq_domain(node, info, parent); -+ pci_msi_default_domain = domain; -+ } -+ mutex_unlock(&pci_msi_domain_lock); -+ -+ return domain; -+} -+#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */ -diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c -index ce0aa47..a6783a5 100644 ---- a/drivers/pci/pci.c -+++ b/drivers/pci/pci.c -@@ -2467,6 +2467,7 @@ u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp) - *pinp = pin; - return PCI_SLOT(dev->devfn); - } -+EXPORT_SYMBOL_GPL(pci_common_swizzle); - - /** - * pci_release_region - Release a PCI bar -diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h -index b5defca..df2169e 100644 ---- a/drivers/pci/pci.h -+++ b/drivers/pci/pci.h -@@ -140,6 +140,27 @@ static inline void pci_no_msi(void) { } - static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { } - #endif - -+static inline void pci_msi_set_enable(struct pci_dev *dev, int enable) -+{ -+ u16 control; -+ -+ pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); -+ control &= ~PCI_MSI_FLAGS_ENABLE; -+ if (enable) -+ control |= PCI_MSI_FLAGS_ENABLE; -+ pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); -+} -+ -+static inline void pci_msix_clear_and_set_ctrl(struct pci_dev *dev, u16 clear, u16 set) -+{ -+ u16 ctrl; -+ -+ pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &ctrl); -+ ctrl &= ~clear; -+ ctrl |= set; -+ pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, ctrl); -+} -+ - void pci_realloc_get_opt(char *); - - static inline int pci_no_d1d2(struct pci_dev *dev) -diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c -index 2f0ce66..95ef171 100644 ---- a/drivers/pci/pcie/portdrv_core.c -+++ b/drivers/pci/pcie/portdrv_core.c -@@ -15,6 +15,7 @@ - #include - #include - #include -+#include - - #include "../pci.h" - #include "portdrv.h" -@@ -199,6 +200,28 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask) - static int init_service_irqs(struct pci_dev *dev, int *irqs, int mask) - { - int i, irq = -1; -+ int ret; -+ struct device_node *np = NULL; -+ -+ for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) -+ irqs[i] = 0; -+ -+ if (dev->bus->dev.of_node) -+ np = dev->bus->dev.of_node; -+ -+ /* If root port doesn't support MSI/MSI-X/INTx in RC mode, -+ * request irq for aer -+ */ -+ if (IS_ENABLED(CONFIG_OF_IRQ) && np && -+ (mask & PCIE_PORT_SERVICE_PME)) { -+ ret = of_irq_get_byname(np, "aer"); -+ if (ret > 0) { -+ irqs[PCIE_PORT_SERVICE_AER_SHIFT] = ret; -+ if (dev->irq) -+ irq = dev->irq; -+ goto no_msi; -+ } -+ } - - /* - * If MSI cannot be used for PCIe PME or hotplug, we have to use -@@ -224,11 +247,13 @@ static int init_service_irqs(struct pci_dev *dev, int *irqs, int mask) - irq = dev->irq; - - no_msi: -- for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) -- irqs[i] = irq; -+ for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) { -+ if (!irqs[i]) -+ irqs[i] = irq; -+ } - irqs[PCIE_PORT_SERVICE_VC_SHIFT] = -1; - -- if (irq < 0) -+ if (irq < 0 && irqs[PCIE_PORT_SERVICE_AER_SHIFT] < 0) - return -ENODEV; - return 0; - } -diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c -index 3010ffc..0b16384 100644 ---- a/drivers/pci/probe.c -+++ b/drivers/pci/probe.c -@@ -1097,6 +1097,22 @@ int pci_cfg_space_size(struct pci_dev *dev) - - #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) - -+static void pci_msi_setup_pci_dev(struct pci_dev *dev) -+{ -+ /* -+ * Disable the MSI hardware to avoid screaming interrupts -+ * during boot. This is the power on reset default so -+ * usually this should be a noop. -+ */ -+ dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI); -+ if (dev->msi_cap) -+ pci_msi_set_enable(dev, 0); -+ -+ dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX); -+ if (dev->msix_cap) -+ pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); -+} -+ - /** - * pci_setup_device - fill in class and map information of a device - * @dev: the device structure to fill -@@ -1152,6 +1168,8 @@ int pci_setup_device(struct pci_dev *dev) - /* "Unknown power state" */ - dev->current_state = PCI_UNKNOWN; - -+ pci_msi_setup_pci_dev(dev); -+ - /* Early fixups, before probing the BARs */ - pci_fixup_device(pci_fixup_early, dev); - /* device class may be changed after fixup */ -@@ -1908,7 +1926,7 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus, - int error; - struct pci_host_bridge *bridge; - struct pci_bus *b, *b2; -- struct pci_host_bridge_window *window, *n; -+ struct resource_entry *window, *n; - struct resource *res; - resource_size_t offset; - char bus_addr[64]; -@@ -1972,8 +1990,8 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus, - printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev)); - - /* Add initial resources to the bus */ -- list_for_each_entry_safe(window, n, resources, list) { -- list_move_tail(&window->list, &bridge->windows); -+ resource_list_for_each_entry_safe(window, n, resources) { -+ list_move_tail(&window->node, &bridge->windows); - res = window->res; - offset = window->offset; - if (res->flags & IORESOURCE_BUS) -@@ -2006,6 +2024,7 @@ err_out: - kfree(b); - return NULL; - } -+EXPORT_SYMBOL_GPL(pci_create_root_bus); - - int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max) - { -@@ -2073,12 +2092,12 @@ void pci_bus_release_busn_res(struct pci_bus *b) - struct pci_bus *pci_scan_root_bus(struct device *parent, int bus, - struct pci_ops *ops, void *sysdata, struct list_head *resources) - { -- struct pci_host_bridge_window *window; -+ struct resource_entry *window; - bool found = false; - struct pci_bus *b; - int max; - -- list_for_each_entry(window, resources, list) -+ resource_list_for_each_entry(window, resources) - if (window->res->flags & IORESOURCE_BUS) { - found = true; - break; -diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c -index b6d646a..f3681e2 100644 ---- a/drivers/pci/quirks.c -+++ b/drivers/pci/quirks.c -@@ -3516,8 +3516,9 @@ int pci_dev_specific_reset(struct pci_dev *dev, int probe) - static void quirk_dma_func0_alias(struct pci_dev *dev) - { - if (PCI_FUNC(dev->devfn) != 0) { -- dev->dma_alias_devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 0); -- dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN; -+ dev->dma_alias_devid = PCI_DEVID(dev->bus->number, -+ PCI_DEVFN(PCI_SLOT(dev->devfn), 0)); -+ dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVID; - } - } - -@@ -3532,8 +3533,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe476, quirk_dma_func0_alias); - static void quirk_dma_func1_alias(struct pci_dev *dev) - { - if (PCI_FUNC(dev->devfn) != 1) { -- dev->dma_alias_devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 1); -- dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN; -+ dev->dma_alias_devid = PCI_DEVID(dev->bus->number, -+ PCI_DEVFN(PCI_SLOT(dev->devfn), 1)); -+ dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVID; - } - } - -diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c -index 8bd76c9..8a280e9 100644 ---- a/drivers/pci/remove.c -+++ b/drivers/pci/remove.c -@@ -139,6 +139,7 @@ void pci_stop_root_bus(struct pci_bus *bus) - /* stop the host bridge */ - device_release_driver(&host_bridge->dev); - } -+EXPORT_SYMBOL_GPL(pci_stop_root_bus); - - void pci_remove_root_bus(struct pci_bus *bus) - { -@@ -158,3 +159,4 @@ void pci_remove_root_bus(struct pci_bus *bus) - /* remove the host bridge */ - device_unregister(&host_bridge->dev); - } -+EXPORT_SYMBOL_GPL(pci_remove_root_bus); -diff --git a/drivers/pci/search.c b/drivers/pci/search.c -index a81f413..a00924f 100644 ---- a/drivers/pci/search.c -+++ b/drivers/pci/search.c -@@ -40,9 +40,8 @@ int pci_for_each_dma_alias(struct pci_dev *pdev, - * If the device is broken and uses an alias requester ID for - * DMA, iterate over that too. - */ -- if (unlikely(pdev->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN)) { -- ret = fn(pdev, PCI_DEVID(pdev->bus->number, -- pdev->dma_alias_devfn), data); -+ if (unlikely(pdev->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVID)) { -+ ret = fn(pdev, pdev->dma_alias_devid, data); - if (ret) - return ret; - } -diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c -index e3e17f3..8169597 100644 ---- a/drivers/pci/setup-bus.c -+++ b/drivers/pci/setup-bus.c -@@ -1750,3 +1750,4 @@ void pci_assign_unassigned_bus_resources(struct pci_bus *bus) - __pci_bus_assign_resources(bus, &add_list, NULL); - BUG_ON(!list_empty(&add_list)); - } -+EXPORT_SYMBOL_GPL(pci_assign_unassigned_bus_resources); -diff --git a/drivers/pci/setup-irq.c b/drivers/pci/setup-irq.c -index 4e2d595..95c225b 100644 ---- a/drivers/pci/setup-irq.c -+++ b/drivers/pci/setup-irq.c -@@ -65,3 +65,4 @@ void pci_fixup_irqs(u8 (*swizzle)(struct pci_dev *, u8 *), - for_each_pci_dev(dev) - pdev_fixup_irq(dev, swizzle, map_irq); - } -+EXPORT_SYMBOL_GPL(pci_fixup_irqs); -diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c -index 116ca37..37d4218 100644 ---- a/drivers/pci/xen-pcifront.c -+++ b/drivers/pci/xen-pcifront.c -@@ -267,7 +267,7 @@ static int pci_frontend_enable_msix(struct pci_dev *dev, - } - - i = 0; -- list_for_each_entry(entry, &dev->msi_list, list) { -+ for_each_pci_msi_entry(entry, dev) { - op.msix_entries[i].entry = entry->msi_attrib.entry_nr; - /* Vector is useless at this point. */ - op.msix_entries[i].vector = -1; -diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig -index f65ff49..b56b084 100644 ---- a/drivers/power/reset/Kconfig -+++ b/drivers/power/reset/Kconfig -@@ -150,5 +150,11 @@ config POWER_RESET_SYSCON - help - Reboot support for generic SYSCON mapped register reset. - -+config POWER_RESET_LAYERSCAPE -+ bool "Freescale LayerScape reset driver" -+ depends on ARCH_LAYERSCAPE -+ help -+ Reboot support for the Freescale LayerScape SoCs. -+ - endif - -diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile -index 76ce1c5..d924bdb 100644 ---- a/drivers/power/reset/Makefile -+++ b/drivers/power/reset/Makefile -@@ -17,3 +17,4 @@ obj-$(CONFIG_POWER_RESET_VEXPRESS) += vexpress-poweroff.o - obj-$(CONFIG_POWER_RESET_XGENE) += xgene-reboot.o - obj-$(CONFIG_POWER_RESET_KEYSTONE) += keystone-reset.o - obj-$(CONFIG_POWER_RESET_SYSCON) += syscon-reboot.o -+obj-$(CONFIG_POWER_RESET_LAYERSCAPE) += ls-reboot.o -diff --git a/drivers/power/reset/ls-reboot.c b/drivers/power/reset/ls-reboot.c -new file mode 100644 -index 0000000..fa1152c ---- /dev/null -+++ b/drivers/power/reset/ls-reboot.c -@@ -0,0 +1,93 @@ -+/* -+ * Freescale LayerScape reboot driver -+ * -+ * Copyright (c) 2015, Freescale Semiconductor. -+ * Author: Pankaj Chauhan -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+struct ls_reboot_priv { -+ struct device *dev; -+ u32 *rstcr; -+}; -+ -+static struct ls_reboot_priv *ls_reboot_priv; -+ -+static void ls_reboot(enum reboot_mode reboot_mode, const char *cmd) -+{ -+ struct ls_reboot_priv *priv = ls_reboot_priv; -+ u32 val; -+ unsigned long timeout; -+ -+ if (ls_reboot_priv) { -+ val = readl(priv->rstcr); -+ val |= 0x02; -+ writel(val, priv->rstcr); -+ } -+ -+ timeout = jiffies + HZ; -+ while (time_before(jiffies, timeout)) -+ cpu_relax(); -+ -+} -+ -+static int ls_reboot_probe(struct platform_device *pdev) -+{ -+ ls_reboot_priv = devm_kzalloc(&pdev->dev, -+ sizeof(*ls_reboot_priv), GFP_KERNEL); -+ if (!ls_reboot_priv) { -+ dev_err(&pdev->dev, "out of memory for context\n"); -+ return -ENODEV; -+ } -+ -+ ls_reboot_priv->rstcr = of_iomap(pdev->dev.of_node, 0); -+ if (!ls_reboot_priv->rstcr) { -+ devm_kfree(&pdev->dev, ls_reboot_priv); -+ dev_err(&pdev->dev, "can not map resource\n"); -+ return -ENODEV; -+ } -+ -+ ls_reboot_priv->dev = &pdev->dev; -+ -+ arm_pm_restart = ls_reboot; -+ -+ return 0; -+} -+ -+static struct of_device_id ls_reboot_of_match[] = { -+ { .compatible = "fsl,ls-reset" }, -+ {} -+}; -+ -+static struct platform_driver ls_reboot_driver = { -+ .probe = ls_reboot_probe, -+ .driver = { -+ .name = "ls-reset", -+ .of_match_table = ls_reboot_of_match, -+ }, -+}; -+ -+static int __init ls_reboot_init(void) -+{ -+ return platform_driver_register(&ls_reboot_driver); -+} -+device_initcall(ls_reboot_init); -diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig -index 76d6bd4..d4bcacf 100644 ---- a/drivers/soc/Kconfig -+++ b/drivers/soc/Kconfig -@@ -4,4 +4,17 @@ source "drivers/soc/qcom/Kconfig" - source "drivers/soc/ti/Kconfig" - source "drivers/soc/versatile/Kconfig" - -+config FSL_SOC_DRIVERS -+ bool "Freescale Soc Drivers" -+ depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE -+ default n -+ help -+ Say y here to enable Freescale Soc Device Drivers support. -+ The Soc Drivers provides the device driver that is a specific block -+ or feature on Freescale platform. -+ -+if FSL_SOC_DRIVERS -+ source "drivers/soc/fsl/Kconfig" -+endif -+ - endmenu -diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile -index 063113d..ef82e45 100644 ---- a/drivers/soc/Makefile -+++ b/drivers/soc/Makefile -@@ -6,3 +6,4 @@ obj-$(CONFIG_ARCH_QCOM) += qcom/ - obj-$(CONFIG_ARCH_TEGRA) += tegra/ - obj-$(CONFIG_SOC_TI) += ti/ - obj-$(CONFIG_PLAT_VERSATILE) += versatile/ -+obj-$(CONFIG_FSL_SOC_DRIVERS) += fsl/ -diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig -new file mode 100644 -index 0000000..92a085e ---- /dev/null -+++ b/drivers/soc/fsl/Kconfig -@@ -0,0 +1,6 @@ -+config FSL_GUTS -+ bool -+ -+if ARM || ARM64 -+source "drivers/soc/fsl/Kconfig.arm" -+endif -diff --git a/drivers/soc/fsl/Kconfig.arm b/drivers/soc/fsl/Kconfig.arm -new file mode 100644 -index 0000000..5f2d214 ---- /dev/null -+++ b/drivers/soc/fsl/Kconfig.arm -@@ -0,0 +1,25 @@ -+# -+# Freescale ARM SOC Drivers -+# -+ -+config LS1_SOC_DRIVERS -+ bool "LS1021A Soc Drivers" -+ depends on SOC_LS1021A -+ default n -+ help -+ Say y here to enable Freescale LS1021A Soc Device Drivers support. -+ The Soc Drivers provides the device driver that is a specific block -+ or feature on LS1021A platform. -+ -+config LS_SOC_DRIVERS -+ bool "Layerscape Soc Drivers" -+ depends on ARCH_LAYERSCAPE -+ default n -+ help -+ Say y here to enable Freescale Layerscape Soc Device Drivers support. -+ The Soc Drivers provides the device driver that is a specific block -+ or feature on Layerscape platform. -+ -+if LS1_SOC_DRIVERS -+ source "drivers/soc/fsl/ls1/Kconfig" -+endif -diff --git a/drivers/soc/fsl/Makefile b/drivers/soc/fsl/Makefile -new file mode 100644 -index 0000000..9fc17b3 ---- /dev/null -+++ b/drivers/soc/fsl/Makefile -@@ -0,0 +1,6 @@ -+# -+# Makefile for Freescale Soc specific device drivers. -+# -+ -+obj-$(CONFIG_LS1_SOC_DRIVERS) += ls1/ -+obj-$(CONFIG_FSL_GUTS) += guts.o -diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c -new file mode 100644 -index 0000000..11065c2 ---- /dev/null -+++ b/drivers/soc/fsl/guts.c -@@ -0,0 +1,123 @@ -+/* -+ * Freescale QorIQ Platforms GUTS Driver -+ * -+ * Copyright (C) 2016 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+struct guts { -+ struct ccsr_guts __iomem *regs; -+ bool little_endian; -+}; -+ -+static struct guts *guts; -+ -+u32 guts_get_svr(void) -+{ -+ u32 svr = 0; -+ -+ if ((!guts) || (!(guts->regs))) { -+#ifdef CONFIG_PPC -+ svr = mfspr(SPRN_SVR); -+#endif -+ return svr; -+ } -+ -+ if (guts->little_endian) -+ svr = ioread32(&guts->regs->svr); -+ else -+ svr = ioread32be(&guts->regs->svr); -+ -+ return svr; -+} -+EXPORT_SYMBOL_GPL(guts_get_svr); -+ -+static int guts_probe(struct platform_device *pdev) -+{ -+ struct device_node *np = pdev->dev.of_node; -+ -+ guts = kzalloc(sizeof(*guts), GFP_KERNEL); -+ if (!guts) -+ return -ENOMEM; -+ -+ if (of_property_read_bool(np, "little-endian")) -+ guts->little_endian = true; -+ else -+ guts->little_endian = false; -+ -+ guts->regs = of_iomap(np, 0); -+ if (!(guts->regs)) -+ return -ENOMEM; -+ -+ of_node_put(np); -+ return 0; -+} -+ -+static int guts_remove(struct platform_device *pdev) -+{ -+ iounmap(guts->regs); -+ kfree(guts); -+ return 0; -+} -+ -+/* -+ * Table for matching compatible strings, for device tree -+ * guts node, for Freescale QorIQ SOCs. -+ */ -+static const struct of_device_id guts_of_match[] = { -+ /* For T4 & B4 SOCs */ -+ { .compatible = "fsl,qoriq-device-config-1.0", }, -+ /* For P Series SOCs */ -+ { .compatible = "fsl,qoriq-device-config-2.0", }, -+ { .compatible = "fsl,p1010-guts", }, -+ { .compatible = "fsl,p1020-guts", }, -+ { .compatible = "fsl,p1021-guts", }, -+ { .compatible = "fsl,p1022-guts", }, -+ { .compatible = "fsl,p1023-guts", }, -+ { .compatible = "fsl,p2020-guts", }, -+ /* For BSC Series SOCs */ -+ { .compatible = "fsl,bsc9131-guts", }, -+ { .compatible = "fsl,bsc9132-guts", }, -+ /* For Layerscape Series SOCs */ -+ { .compatible = "fsl,ls1021a-dcfg", }, -+ { .compatible = "fsl,ls1043a-dcfg", }, -+ { .compatible = "fsl,ls2080a-dcfg", }, -+ {} -+}; -+MODULE_DEVICE_TABLE(of, guts_of_match); -+ -+static struct platform_driver guts_driver = { -+ .driver = { -+ .name = "fsl-guts", -+ .of_match_table = guts_of_match, -+ }, -+ .probe = guts_probe, -+ .remove = guts_remove, -+}; -+ -+static int __init guts_drv_init(void) -+{ -+ return platform_driver_register(&guts_driver); -+} -+subsys_initcall(guts_drv_init); -+ -+static void __exit guts_drv_exit(void) -+{ -+ platform_driver_unregister(&guts_driver); -+} -+module_exit(guts_drv_exit); -+ -+MODULE_AUTHOR("Freescale Semiconductor, Inc."); -+MODULE_DESCRIPTION("Freescale QorIQ Platforms GUTS Driver"); -+MODULE_LICENSE("GPL"); -diff --git a/drivers/soc/fsl/ls1/Kconfig b/drivers/soc/fsl/ls1/Kconfig -new file mode 100644 -index 0000000..c9b04c4 ---- /dev/null -+++ b/drivers/soc/fsl/ls1/Kconfig -@@ -0,0 +1,11 @@ -+# -+# LS-1 Soc drivers -+# -+config FTM_ALARM -+ bool "FTM alarm driver" -+ depends on SOC_LS1021A -+ default n -+ help -+ Say y here to enable FTM alarm support. The FTM alarm provides -+ alarm functions for wakeup system from deep sleep. There is only -+ one FTM can be used in ALARM(FTM 0). -diff --git a/drivers/soc/fsl/ls1/Makefile b/drivers/soc/fsl/ls1/Makefile -new file mode 100644 -index 0000000..6299aa1 ---- /dev/null -+++ b/drivers/soc/fsl/ls1/Makefile -@@ -0,0 +1 @@ -+obj-$(CONFIG_FTM_ALARM) += ftm_alarm.o -diff --git a/drivers/soc/fsl/ls1/ftm_alarm.c b/drivers/soc/fsl/ls1/ftm_alarm.c -new file mode 100644 -index 0000000..c42b26b ---- /dev/null -+++ b/drivers/soc/fsl/ls1/ftm_alarm.c -@@ -0,0 +1,274 @@ -+/* -+ * Freescale FlexTimer Module (FTM) Alarm driver. -+ * -+ * Copyright 2014 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2 -+ * of the License, or (at your option) any later version. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define FTM_SC 0x00 -+#define FTM_SC_CLK_SHIFT 3 -+#define FTM_SC_CLK_MASK (0x3 << FTM_SC_CLK_SHIFT) -+#define FTM_SC_CLK(c) ((c) << FTM_SC_CLK_SHIFT) -+#define FTM_SC_PS_MASK 0x7 -+#define FTM_SC_TOIE BIT(6) -+#define FTM_SC_TOF BIT(7) -+ -+#define FTM_SC_CLKS_FIXED_FREQ 0x02 -+ -+#define FTM_CNT 0x04 -+#define FTM_MOD 0x08 -+#define FTM_CNTIN 0x4C -+ -+#define FIXED_FREQ_CLK 32000 -+#define MAX_FREQ_DIV (1 << FTM_SC_PS_MASK) -+#define MAX_COUNT_VAL 0xffff -+ -+static void __iomem *ftm1_base; -+static u32 alarm_freq; -+static bool big_endian; -+ -+static inline u32 ftm_readl(void __iomem *addr) -+{ -+ if (big_endian) -+ return ioread32be(addr); -+ -+ return ioread32(addr); -+} -+ -+static inline void ftm_writel(u32 val, void __iomem *addr) -+{ -+ if (big_endian) -+ iowrite32be(val, addr); -+ else -+ iowrite32(val, addr); -+} -+ -+static inline void ftm_counter_enable(void __iomem *base) -+{ -+ u32 val; -+ -+ /* select and enable counter clock source */ -+ val = ftm_readl(base + FTM_SC); -+ val &= ~(FTM_SC_PS_MASK | FTM_SC_CLK_MASK); -+ val |= (FTM_SC_PS_MASK | FTM_SC_CLK(FTM_SC_CLKS_FIXED_FREQ)); -+ ftm_writel(val, base + FTM_SC); -+} -+ -+static inline void ftm_counter_disable(void __iomem *base) -+{ -+ u32 val; -+ -+ /* disable counter clock source */ -+ val = ftm_readl(base + FTM_SC); -+ val &= ~(FTM_SC_PS_MASK | FTM_SC_CLK_MASK); -+ ftm_writel(val, base + FTM_SC); -+} -+ -+static inline void ftm_irq_acknowledge(void __iomem *base) -+{ -+ u32 val; -+ -+ val = ftm_readl(base + FTM_SC); -+ val &= ~FTM_SC_TOF; -+ ftm_writel(val, base + FTM_SC); -+} -+ -+static inline void ftm_irq_enable(void __iomem *base) -+{ -+ u32 val; -+ -+ val = ftm_readl(base + FTM_SC); -+ val |= FTM_SC_TOIE; -+ ftm_writel(val, base + FTM_SC); -+} -+ -+static inline void ftm_irq_disable(void __iomem *base) -+{ -+ u32 val; -+ -+ val = ftm_readl(base + FTM_SC); -+ val &= ~FTM_SC_TOIE; -+ ftm_writel(val, base + FTM_SC); -+} -+ -+static inline void ftm_reset_counter(void __iomem *base) -+{ -+ /* -+ * The CNT register contains the FTM counter value. -+ * Reset clears the CNT register. Writing any value to COUNT -+ * updates the counter with its initial value, CNTIN. -+ */ -+ ftm_writel(0x00, base + FTM_CNT); -+} -+ -+static u32 time_to_cycle(unsigned long time) -+{ -+ u32 cycle; -+ -+ cycle = time * alarm_freq; -+ if (cycle > MAX_COUNT_VAL) { -+ pr_err("Out of alarm range.\n"); -+ cycle = 0; -+ } -+ -+ return cycle; -+} -+ -+static u32 cycle_to_time(u32 cycle) -+{ -+ return cycle / alarm_freq + 1; -+} -+ -+static void ftm_clean_alarm(void) -+{ -+ ftm_counter_disable(ftm1_base); -+ -+ ftm_writel(0x00, ftm1_base + FTM_CNTIN); -+ ftm_writel(~0UL, ftm1_base + FTM_MOD); -+ -+ ftm_reset_counter(ftm1_base); -+} -+ -+static int ftm_set_alarm(u64 cycle) -+{ -+ ftm_irq_disable(ftm1_base); -+ -+ /* -+ * The counter increments until the value of MOD is reached, -+ * at which point the counter is reloaded with the value of CNTIN. -+ * The TOF (the overflow flag) bit is set when the FTM counter -+ * changes from MOD to CNTIN. So we should using the cycle - 1. -+ */ -+ ftm_writel(cycle - 1, ftm1_base + FTM_MOD); -+ -+ ftm_counter_enable(ftm1_base); -+ -+ ftm_irq_enable(ftm1_base); -+ -+ return 0; -+} -+ -+static irqreturn_t ftm_alarm_interrupt(int irq, void *dev_id) -+{ -+ ftm_irq_acknowledge(ftm1_base); -+ ftm_irq_disable(ftm1_base); -+ ftm_clean_alarm(); -+ -+ return IRQ_HANDLED; -+} -+ -+static ssize_t ftm_alarm_show(struct device *dev, -+ struct device_attribute *attr, -+ char *buf) -+{ -+ u32 count, val; -+ -+ count = ftm_readl(ftm1_base + FTM_MOD); -+ val = ftm_readl(ftm1_base + FTM_CNT); -+ val = (count & MAX_COUNT_VAL) - val; -+ val = cycle_to_time(val); -+ -+ return sprintf(buf, "%u\n", val); -+} -+ -+static ssize_t ftm_alarm_store(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t count) -+{ -+ u32 cycle; -+ unsigned long time; -+ -+ if (kstrtoul(buf, 0, &time)) -+ return -EINVAL; -+ -+ ftm_clean_alarm(); -+ -+ cycle = time_to_cycle(time); -+ if (!cycle) -+ return -EINVAL; -+ -+ ftm_set_alarm(cycle); -+ -+ return count; -+} -+ -+static struct device_attribute ftm_alarm_attributes = __ATTR(ftm_alarm, 0644, -+ ftm_alarm_show, ftm_alarm_store); -+ -+static int ftm_alarm_probe(struct platform_device *pdev) -+{ -+ struct device_node *np = pdev->dev.of_node; -+ struct resource *r; -+ int irq; -+ int ret; -+ -+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0); -+ if (!r) -+ return -ENODEV; -+ -+ ftm1_base = devm_ioremap_resource(&pdev->dev, r); -+ if (IS_ERR(ftm1_base)) -+ return PTR_ERR(ftm1_base); -+ -+ irq = irq_of_parse_and_map(np, 0); -+ if (irq <= 0) { -+ pr_err("ftm: unable to get IRQ from DT, %d\n", irq); -+ return -EINVAL; -+ } -+ -+ big_endian = of_property_read_bool(np, "big-endian"); -+ -+ ret = devm_request_irq(&pdev->dev, irq, ftm_alarm_interrupt, -+ IRQF_NO_SUSPEND, dev_name(&pdev->dev), NULL); -+ if (ret < 0) { -+ dev_err(&pdev->dev, "failed to request irq\n"); -+ return ret; -+ } -+ -+ ret = device_create_file(&pdev->dev, &ftm_alarm_attributes); -+ if (ret) { -+ dev_err(&pdev->dev, "create sysfs fail.\n"); -+ return ret; -+ } -+ -+ alarm_freq = (u32)FIXED_FREQ_CLK / (u32)MAX_FREQ_DIV; -+ -+ ftm_clean_alarm(); -+ -+ device_init_wakeup(&pdev->dev, true); -+ -+ return ret; -+} -+ -+static const struct of_device_id ftm_alarm_match[] = { -+ { .compatible = "fsl,ftm-alarm", }, -+ { .compatible = "fsl,ftm-timer", }, -+ { }, -+}; -+ -+static struct platform_driver ftm_alarm_driver = { -+ .probe = ftm_alarm_probe, -+ .driver = { -+ .name = "ftm-alarm", -+ .owner = THIS_MODULE, -+ .of_match_table = ftm_alarm_match, -+ }, -+}; -+ -+static int __init ftm_alarm_init(void) -+{ -+ return platform_driver_register(&ftm_alarm_driver); -+} -+device_initcall(ftm_alarm_init); -diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig -index 4690ae9..43ff2b5 100644 ---- a/drivers/staging/Kconfig -+++ b/drivers/staging/Kconfig -@@ -108,4 +108,8 @@ source "drivers/staging/skein/Kconfig" - - source "drivers/staging/unisys/Kconfig" - -+source "drivers/staging/fsl-mc/Kconfig" -+ -+source "drivers/staging/fsl-dpaa2/Kconfig" -+ - endif # STAGING -diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile -index c780a0e..a9bd303 100644 ---- a/drivers/staging/Makefile -+++ b/drivers/staging/Makefile -@@ -46,3 +46,5 @@ obj-$(CONFIG_MTD_SPINAND_MT29F) += mt29f_spinand/ - obj-$(CONFIG_GS_FPGABOOT) += gs_fpgaboot/ - obj-$(CONFIG_CRYPTO_SKEIN) += skein/ - obj-$(CONFIG_UNISYSSPAR) += unisys/ -+obj-$(CONFIG_FSL_MC_BUS) += fsl-mc/ -+obj-$(CONFIG_FSL_DPAA2) += fsl-dpaa2/ -diff --git a/drivers/staging/fsl-dpaa2/Kconfig b/drivers/staging/fsl-dpaa2/Kconfig -new file mode 100644 -index 0000000..3fe47bc ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/Kconfig -@@ -0,0 +1,12 @@ -+# -+# Freescale device configuration -+# -+ -+config FSL_DPAA2 -+ bool "Freescale DPAA2 devices" -+ depends on FSL_MC_BUS -+ ---help--- -+ Build drivers for Freescale DataPath Acceleration Architecture (DPAA2) family of SoCs. -+# TODO move DPIO driver in-here? -+source "drivers/staging/fsl-dpaa2/ethernet/Kconfig" -+source "drivers/staging/fsl-dpaa2/mac/Kconfig" -diff --git a/drivers/staging/fsl-dpaa2/Makefile b/drivers/staging/fsl-dpaa2/Makefile -new file mode 100644 -index 0000000..bc687a1 ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/Makefile -@@ -0,0 +1,6 @@ -+# -+# Makefile for the Freescale network device drivers. -+# -+ -+obj-$(CONFIG_FSL_DPAA2_ETH) += ethernet/ -+obj-$(CONFIG_FSL_DPAA2_MAC) += mac/ -diff --git a/drivers/staging/fsl-dpaa2/ethernet/Kconfig b/drivers/staging/fsl-dpaa2/ethernet/Kconfig -new file mode 100644 -index 0000000..df91da2 ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/ethernet/Kconfig -@@ -0,0 +1,36 @@ -+# -+# Freescale DPAA Ethernet driver configuration -+# -+# Copyright (C) 2014-2015 Freescale Semiconductor, Inc. -+# -+# This file is released under the GPLv2 -+# -+ -+menuconfig FSL_DPAA2_ETH -+ tristate "Freescale DPAA2 Ethernet" -+ depends on FSL_DPAA2 && FSL_MC_BUS && FSL_MC_DPIO -+ select FSL_DPAA2_MAC -+ default y -+ ---help--- -+ Freescale Data Path Acceleration Architecture Ethernet -+ driver, using the Freescale MC bus driver. -+ -+if FSL_DPAA2_ETH -+ -+config FSL_DPAA2_ETH_USE_ERR_QUEUE -+ bool "Enable Rx error queue" -+ default n -+ ---help--- -+ Allow Rx error frames to be enqueued on an error queue -+ and processed by the driver (by default they are dropped -+ in hardware). -+ This may impact performance, recommended for debugging -+ purposes only. -+ -+config FSL_DPAA2_ETH_DEBUGFS -+ depends on DEBUG_FS && FSL_QBMAN_DEBUG -+ bool "Enable debugfs support" -+ default n -+ ---help--- -+ Enable advanced statistics through debugfs interface. -+endif -diff --git a/drivers/staging/fsl-dpaa2/ethernet/Makefile b/drivers/staging/fsl-dpaa2/ethernet/Makefile -new file mode 100644 -index 0000000..74bff15 ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/ethernet/Makefile -@@ -0,0 +1,21 @@ -+# -+# Makefile for the Freescale DPAA Ethernet controllers -+# -+# Copyright (C) 2014-2015 Freescale Semiconductor, Inc. -+# -+# This file is released under the GPLv2 -+# -+ -+ccflags-y += -DVERSION=\"\" -+ -+obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o -+ -+fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o -+fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DEBUGFS} += dpaa2-eth-debugfs.o -+ -+#Needed by the tracing framework -+CFLAGS_dpaa2-eth.o := -I$(src) -+ -+ifeq ($(CONFIG_FSL_DPAA2_ETH_GCOV),y) -+ GCOV_PROFILE := y -+endif -diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c -new file mode 100644 -index 0000000..c397983 ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c -@@ -0,0 +1,317 @@ -+ -+/* Copyright 2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include -+#include -+#include "dpaa2-eth.h" -+#include "dpaa2-eth-debugfs.h" -+ -+#define DPAA2_ETH_DBG_ROOT "dpaa2-eth" -+ -+static struct dentry *dpaa2_dbg_root; -+ -+static int dpaa2_dbg_cpu_show(struct seq_file *file, void *offset) -+{ -+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private; -+ struct rtnl_link_stats64 *stats; -+ struct dpaa2_eth_drv_stats *extras; -+ int i; -+ -+ seq_printf(file, "Per-CPU stats for %s\n", priv->net_dev->name); -+ seq_printf(file, "%s%16s%16s%16s%16s%16s%16s%16s%16s\n", -+ "CPU", "Rx", "Rx Err", "Rx SG", "Tx", "Tx Err", "Tx conf", -+ "Tx SG", "Enq busy"); -+ -+ for_each_online_cpu(i) { -+ stats = per_cpu_ptr(priv->percpu_stats, i); -+ extras = per_cpu_ptr(priv->percpu_extras, i); -+ seq_printf(file, "%3d%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu\n", -+ i, -+ stats->rx_packets, -+ stats->rx_errors, -+ extras->rx_sg_frames, -+ stats->tx_packets, -+ stats->tx_errors, -+ extras->tx_conf_frames, -+ extras->tx_sg_frames, -+ extras->tx_portal_busy); -+ } -+ -+ return 0; -+} -+ -+static int dpaa2_dbg_cpu_open(struct inode *inode, struct file *file) -+{ -+ int err; -+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private; -+ -+ err = single_open(file, dpaa2_dbg_cpu_show, priv); -+ if (err < 0) -+ netdev_err(priv->net_dev, "single_open() failed\n"); -+ -+ return err; -+} -+ -+static const struct file_operations dpaa2_dbg_cpu_ops = { -+ .open = dpaa2_dbg_cpu_open, -+ .read = seq_read, -+ .llseek = seq_lseek, -+ .release = single_release, -+}; -+ -+static char *fq_type_to_str(struct dpaa2_eth_fq *fq) -+{ -+ switch (fq->type) { -+ case DPAA2_RX_FQ: -+ return "Rx"; -+ case DPAA2_TX_CONF_FQ: -+ return "Tx conf"; -+ case DPAA2_RX_ERR_FQ: -+ return "Rx err"; -+ default: -+ return "N/A"; -+ } -+} -+ -+static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset) -+{ -+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private; -+ struct dpaa2_eth_fq *fq; -+ u32 fcnt, bcnt; -+ int i, err; -+ -+ seq_printf(file, "FQ stats for %s:\n", priv->net_dev->name); -+ seq_printf(file, "%s%16s%16s%16s%16s\n", -+ "VFQID", "CPU", "Type", "Frames", "Pending frames"); -+ -+ for (i = 0; i < priv->num_fqs; i++) { -+ fq = &priv->fq[i]; -+ err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt); -+ if (err) -+ fcnt = 0; -+ -+ seq_printf(file, "%5d%16d%16s%16llu%16u\n", -+ fq->fqid, -+ fq->target_cpu, -+ fq_type_to_str(fq), -+ fq->stats.frames, -+ fcnt); -+ } -+ -+ return 0; -+} -+ -+static int dpaa2_dbg_fqs_open(struct inode *inode, struct file *file) -+{ -+ int err; -+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private; -+ -+ err = single_open(file, dpaa2_dbg_fqs_show, priv); -+ if (err < 0) -+ netdev_err(priv->net_dev, "single_open() failed\n"); -+ -+ return err; -+} -+ -+static const struct file_operations dpaa2_dbg_fq_ops = { -+ .open = dpaa2_dbg_fqs_open, -+ .read = seq_read, -+ .llseek = seq_lseek, -+ .release = single_release, -+}; -+ -+static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset) -+{ -+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private; -+ struct dpaa2_eth_channel *ch; -+ int i; -+ -+ seq_printf(file, "Channel stats for %s:\n", priv->net_dev->name); -+ seq_printf(file, "%s%16s%16s%16s%16s%16s\n", -+ "CHID", "CPU", "Deq busy", "Frames", "CDANs", -+ "Avg frm/CDAN"); -+ -+ for (i = 0; i < priv->num_channels; i++) { -+ ch = priv->channel[i]; -+ seq_printf(file, "%4d%16d%16llu%16llu%16llu%16llu\n", -+ ch->ch_id, -+ ch->nctx.desired_cpu, -+ ch->stats.dequeue_portal_busy, -+ ch->stats.frames, -+ ch->stats.cdan, -+ ch->stats.frames / ch->stats.cdan); -+ } -+ -+ return 0; -+} -+ -+static int dpaa2_dbg_ch_open(struct inode *inode, struct file *file) -+{ -+ int err; -+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private; -+ -+ err = single_open(file, dpaa2_dbg_ch_show, priv); -+ if (err < 0) -+ netdev_err(priv->net_dev, "single_open() failed\n"); -+ -+ return err; -+} -+ -+static const struct file_operations dpaa2_dbg_ch_ops = { -+ .open = dpaa2_dbg_ch_open, -+ .read = seq_read, -+ .llseek = seq_lseek, -+ .release = single_release, -+}; -+ -+static ssize_t dpaa2_dbg_reset_write(struct file *file, const char __user *buf, -+ size_t count, loff_t *offset) -+{ -+ struct dpaa2_eth_priv *priv = file->private_data; -+ struct rtnl_link_stats64 *percpu_stats; -+ struct dpaa2_eth_drv_stats *percpu_extras; -+ struct dpaa2_eth_fq *fq; -+ struct dpaa2_eth_channel *ch; -+ int i; -+ -+ for_each_online_cpu(i) { -+ percpu_stats = per_cpu_ptr(priv->percpu_stats, i); -+ memset(percpu_stats, 0, sizeof(*percpu_stats)); -+ -+ percpu_extras = per_cpu_ptr(priv->percpu_extras, i); -+ memset(percpu_extras, 0, sizeof(*percpu_extras)); -+ } -+ -+ for (i = 0; i < priv->num_fqs; i++) { -+ fq = &priv->fq[i]; -+ memset(&fq->stats, 0, sizeof(fq->stats)); -+ } -+ -+ for_each_cpu(i, &priv->dpio_cpumask) { -+ ch = priv->channel[i]; -+ memset(&ch->stats, 0, sizeof(ch->stats)); -+ } -+ -+ return count; -+} -+ -+static const struct file_operations dpaa2_dbg_reset_ops = { -+ .open = simple_open, -+ .write = dpaa2_dbg_reset_write, -+}; -+ -+void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) -+{ -+ if (!dpaa2_dbg_root) -+ return; -+ -+ /* Create a directory for the interface */ -+ priv->dbg.dir = debugfs_create_dir(priv->net_dev->name, -+ dpaa2_dbg_root); -+ if (!priv->dbg.dir) { -+ netdev_err(priv->net_dev, "debugfs_create_dir() failed\n"); -+ return; -+ } -+ -+ /* per-cpu stats file */ -+ priv->dbg.cpu_stats = debugfs_create_file("cpu_stats", S_IRUGO, -+ priv->dbg.dir, priv, -+ &dpaa2_dbg_cpu_ops); -+ if (!priv->dbg.cpu_stats) { -+ netdev_err(priv->net_dev, "debugfs_create_file() failed\n"); -+ goto err_cpu_stats; -+ } -+ -+ /* per-fq stats file */ -+ priv->dbg.fq_stats = debugfs_create_file("fq_stats", S_IRUGO, -+ priv->dbg.dir, priv, -+ &dpaa2_dbg_fq_ops); -+ if (!priv->dbg.fq_stats) { -+ netdev_err(priv->net_dev, "debugfs_create_file() failed\n"); -+ goto err_fq_stats; -+ } -+ -+ /* per-fq stats file */ -+ priv->dbg.ch_stats = debugfs_create_file("ch_stats", S_IRUGO, -+ priv->dbg.dir, priv, -+ &dpaa2_dbg_ch_ops); -+ if (!priv->dbg.fq_stats) { -+ netdev_err(priv->net_dev, "debugfs_create_file() failed\n"); -+ goto err_ch_stats; -+ } -+ -+ /* reset stats */ -+ priv->dbg.reset_stats = debugfs_create_file("reset_stats", S_IWUSR, -+ priv->dbg.dir, priv, -+ &dpaa2_dbg_reset_ops); -+ if (!priv->dbg.reset_stats) { -+ netdev_err(priv->net_dev, "debugfs_create_file() failed\n"); -+ goto err_reset_stats; -+ } -+ -+ return; -+ -+err_reset_stats: -+ debugfs_remove(priv->dbg.ch_stats); -+err_ch_stats: -+ debugfs_remove(priv->dbg.fq_stats); -+err_fq_stats: -+ debugfs_remove(priv->dbg.cpu_stats); -+err_cpu_stats: -+ debugfs_remove(priv->dbg.dir); -+} -+ -+void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) -+{ -+ debugfs_remove(priv->dbg.reset_stats); -+ debugfs_remove(priv->dbg.fq_stats); -+ debugfs_remove(priv->dbg.ch_stats); -+ debugfs_remove(priv->dbg.cpu_stats); -+ debugfs_remove(priv->dbg.dir); -+} -+ -+void dpaa2_eth_dbg_init(void) -+{ -+ dpaa2_dbg_root = debugfs_create_dir(DPAA2_ETH_DBG_ROOT, NULL); -+ if (!dpaa2_dbg_root) { -+ pr_err("DPAA2-ETH: debugfs create failed\n"); -+ return; -+ } -+ -+ pr_info("DPAA2-ETH: debugfs created\n"); -+} -+ -+void __exit dpaa2_eth_dbg_exit(void) -+{ -+ debugfs_remove(dpaa2_dbg_root); -+} -+ -diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h -new file mode 100644 -index 0000000..7ba706c ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h -@@ -0,0 +1,61 @@ -+/* Copyright 2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#ifndef DPAA2_ETH_DEBUGFS_H -+#define DPAA2_ETH_DEBUGFS_H -+ -+#include -+#include "dpaa2-eth.h" -+ -+extern struct dpaa2_eth_priv *priv; -+ -+struct dpaa2_debugfs { -+ struct dentry *dir; -+ struct dentry *fq_stats; -+ struct dentry *ch_stats; -+ struct dentry *cpu_stats; -+ struct dentry *reset_stats; -+}; -+ -+#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS -+void dpaa2_eth_dbg_init(void); -+void dpaa2_eth_dbg_exit(void); -+void dpaa2_dbg_add(struct dpaa2_eth_priv *priv); -+void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv); -+#else -+static inline void dpaa2_eth_dbg_init(void) {} -+static inline void dpaa2_eth_dbg_exit(void) {} -+static inline void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) {} -+static inline void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) {} -+#endif /* CONFIG_FSL_DPAA2_ETH_DEBUGFS */ -+ -+#endif /* DPAA2_ETH_DEBUGFS_H */ -+ -diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h -new file mode 100644 -index 0000000..3b040e8 ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h -@@ -0,0 +1,185 @@ -+/* Copyright 2014-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#undef TRACE_SYSTEM -+#define TRACE_SYSTEM dpaa2_eth -+ -+#if !defined(_DPAA2_ETH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) -+#define _DPAA2_ETH_TRACE_H -+ -+#include -+#include -+#include "dpaa2-eth.h" -+#include -+ -+#define TR_FMT "[%s] fd: addr=0x%llx, len=%u, off=%u" -+/* trace_printk format for raw buffer event class */ -+#define TR_BUF_FMT "[%s] vaddr=%p size=%zu dma_addr=%pad map_size=%zu bpid=%d" -+ -+/* This is used to declare a class of events. -+ * individual events of this type will be defined below. -+ */ -+ -+/* Store details about a frame descriptor */ -+DECLARE_EVENT_CLASS(dpaa2_eth_fd, -+ /* Trace function prototype */ -+ TP_PROTO(struct net_device *netdev, -+ const struct dpaa2_fd *fd), -+ -+ /* Repeat argument list here */ -+ TP_ARGS(netdev, fd), -+ -+ /* A structure containing the relevant information we want -+ * to record. Declare name and type for each normal element, -+ * name, type and size for arrays. Use __string for variable -+ * length strings. -+ */ -+ TP_STRUCT__entry( -+ __field(u64, fd_addr) -+ __field(u32, fd_len) -+ __field(u16, fd_offset) -+ __string(name, netdev->name) -+ ), -+ -+ /* The function that assigns values to the above declared -+ * fields -+ */ -+ TP_fast_assign( -+ __entry->fd_addr = dpaa2_fd_get_addr(fd); -+ __entry->fd_len = dpaa2_fd_get_len(fd); -+ __entry->fd_offset = dpaa2_fd_get_offset(fd); -+ __assign_str(name, netdev->name); -+ ), -+ -+ /* This is what gets printed when the trace event is -+ * triggered. -+ */ -+ TP_printk(TR_FMT, -+ __get_str(name), -+ __entry->fd_addr, -+ __entry->fd_len, -+ __entry->fd_offset) -+); -+ -+/* Now declare events of the above type. Format is: -+ * DEFINE_EVENT(class, name, proto, args), with proto and args same as for class -+ */ -+ -+/* Tx (egress) fd */ -+DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_fd, -+ TP_PROTO(struct net_device *netdev, -+ const struct dpaa2_fd *fd), -+ -+ TP_ARGS(netdev, fd) -+); -+ -+/* Rx fd */ -+DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_fd, -+ TP_PROTO(struct net_device *netdev, -+ const struct dpaa2_fd *fd), -+ -+ TP_ARGS(netdev, fd) -+); -+ -+/* Tx confirmation fd */ -+DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_conf_fd, -+ TP_PROTO(struct net_device *netdev, -+ const struct dpaa2_fd *fd), -+ -+ TP_ARGS(netdev, fd) -+); -+ -+/* Log data about raw buffers. Useful for tracing DPBP content. */ -+TRACE_EVENT(dpaa2_eth_buf_seed, -+ /* Trace function prototype */ -+ TP_PROTO(struct net_device *netdev, -+ /* virtual address and size */ -+ void *vaddr, -+ size_t size, -+ /* dma map address and size */ -+ dma_addr_t dma_addr, -+ size_t map_size, -+ /* buffer pool id, if relevant */ -+ u16 bpid), -+ -+ /* Repeat argument list here */ -+ TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid), -+ -+ /* A structure containing the relevant information we want -+ * to record. Declare name and type for each normal element, -+ * name, type and size for arrays. Use __string for variable -+ * length strings. -+ */ -+ TP_STRUCT__entry( -+ __field(void *, vaddr) -+ __field(size_t, size) -+ __field(dma_addr_t, dma_addr) -+ __field(size_t, map_size) -+ __field(u16, bpid) -+ __string(name, netdev->name) -+ ), -+ -+ /* The function that assigns values to the above declared -+ * fields -+ */ -+ TP_fast_assign( -+ __entry->vaddr = vaddr; -+ __entry->size = size; -+ __entry->dma_addr = dma_addr; -+ __entry->map_size = map_size; -+ __entry->bpid = bpid; -+ __assign_str(name, netdev->name); -+ ), -+ -+ /* This is what gets printed when the trace event is -+ * triggered. -+ */ -+ TP_printk(TR_BUF_FMT, -+ __get_str(name), -+ __entry->vaddr, -+ __entry->size, -+ &__entry->dma_addr, -+ __entry->map_size, -+ __entry->bpid) -+); -+ -+/* If only one event of a certain type needs to be declared, use TRACE_EVENT(). -+ * The syntax is the same as for DECLARE_EVENT_CLASS(). -+ */ -+ -+#endif /* _DPAA2_ETH_TRACE_H */ -+ -+/* This must be outside ifdef _DPAA2_ETH_TRACE_H */ -+#undef TRACE_INCLUDE_PATH -+#define TRACE_INCLUDE_PATH . -+#undef TRACE_INCLUDE_FILE -+#define TRACE_INCLUDE_FILE dpaa2-eth-trace -+#include -diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c -new file mode 100644 -index 0000000..cb52ede ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c -@@ -0,0 +1,2957 @@ -+/* Copyright 2014-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "../../fsl-mc/include/mc.h" -+#include "../../fsl-mc/include/mc-sys.h" -+#include "dpaa2-eth.h" -+ -+/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files -+ * using trace events only need to #include -+ */ -+#define CREATE_TRACE_POINTS -+#include "dpaa2-eth-trace.h" -+ -+MODULE_LICENSE("Dual BSD/GPL"); -+MODULE_AUTHOR("Freescale Semiconductor, Inc"); -+MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver"); -+ -+static void validate_rx_csum(struct dpaa2_eth_priv *priv, -+ u32 fd_status, -+ struct sk_buff *skb) -+{ -+ skb_checksum_none_assert(skb); -+ -+ /* HW checksum validation is disabled, nothing to do here */ -+ if (!(priv->net_dev->features & NETIF_F_RXCSUM)) -+ return; -+ -+ /* Read checksum validation bits */ -+ if (!((fd_status & DPAA2_FAS_L3CV) && -+ (fd_status & DPAA2_FAS_L4CV))) -+ return; -+ -+ /* Inform the stack there's no need to compute L3/L4 csum anymore */ -+ skb->ip_summed = CHECKSUM_UNNECESSARY; -+} -+ -+/* Free a received FD. -+ * Not to be used for Tx conf FDs or on any other paths. -+ */ -+static void free_rx_fd(struct dpaa2_eth_priv *priv, -+ const struct dpaa2_fd *fd, -+ void *vaddr) -+{ -+ struct device *dev = priv->net_dev->dev.parent; -+ dma_addr_t addr = dpaa2_fd_get_addr(fd); -+ u8 fd_format = dpaa2_fd_get_format(fd); -+ struct dpaa2_sg_entry *sgt; -+ void *sg_vaddr; -+ int i; -+ -+ /* If single buffer frame, just free the data buffer */ -+ if (fd_format == dpaa2_fd_single) -+ goto free_buf; -+ -+ /* For S/G frames, we first need to free all SG entries */ -+ sgt = vaddr + dpaa2_fd_get_offset(fd); -+ for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { -+ dpaa2_sg_le_to_cpu(&sgt[i]); -+ -+ addr = dpaa2_sg_get_addr(&sgt[i]); -+ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, -+ DMA_FROM_DEVICE); -+ -+ sg_vaddr = phys_to_virt(addr); -+ put_page(virt_to_head_page(sg_vaddr)); -+ -+ if (dpaa2_sg_is_final(&sgt[i])) -+ break; -+ } -+ -+free_buf: -+ put_page(virt_to_head_page(vaddr)); -+} -+ -+/* Build a linear skb based on a single-buffer frame descriptor */ -+static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv, -+ struct dpaa2_eth_channel *ch, -+ const struct dpaa2_fd *fd, -+ void *fd_vaddr) -+{ -+ struct sk_buff *skb = NULL; -+ u16 fd_offset = dpaa2_fd_get_offset(fd); -+ u32 fd_length = dpaa2_fd_get_len(fd); -+ -+ skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_SIZE + -+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); -+ if (unlikely(!skb)) -+ return NULL; -+ -+ skb_reserve(skb, fd_offset); -+ skb_put(skb, fd_length); -+ -+ ch->buf_count--; -+ -+ return skb; -+} -+ -+/* Build a non linear (fragmented) skb based on a S/G table */ -+static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv, -+ struct dpaa2_eth_channel *ch, -+ struct dpaa2_sg_entry *sgt) -+{ -+ struct sk_buff *skb = NULL; -+ struct device *dev = priv->net_dev->dev.parent; -+ void *sg_vaddr; -+ dma_addr_t sg_addr; -+ u16 sg_offset; -+ u32 sg_length; -+ struct page *page, *head_page; -+ int page_offset; -+ int i; -+ -+ for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { -+ struct dpaa2_sg_entry *sge = &sgt[i]; -+ -+ dpaa2_sg_le_to_cpu(sge); -+ -+ /* NOTE: We only support SG entries in dpaa2_sg_single format, -+ * but this is the only format we may receive from HW anyway -+ */ -+ -+ /* Get the address and length from the S/G entry */ -+ sg_addr = dpaa2_sg_get_addr(sge); -+ dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE, -+ DMA_FROM_DEVICE); -+ -+ sg_vaddr = phys_to_virt(sg_addr); -+ sg_length = dpaa2_sg_get_len(sge); -+ -+ if (i == 0) { -+ /* We build the skb around the first data buffer */ -+ skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_SIZE + -+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); -+ if (unlikely(!skb)) -+ return NULL; -+ -+ sg_offset = dpaa2_sg_get_offset(sge); -+ skb_reserve(skb, sg_offset); -+ skb_put(skb, sg_length); -+ } else { -+ /* Rest of the data buffers are stored as skb frags */ -+ page = virt_to_page(sg_vaddr); -+ head_page = virt_to_head_page(sg_vaddr); -+ -+ /* Offset in page (which may be compound). -+ * Data in subsequent SG entries is stored from the -+ * beginning of the buffer, so we don't need to add the -+ * sg_offset. -+ */ -+ page_offset = ((unsigned long)sg_vaddr & -+ (PAGE_SIZE - 1)) + -+ (page_address(page) - page_address(head_page)); -+ -+ skb_add_rx_frag(skb, i - 1, head_page, page_offset, -+ sg_length, DPAA2_ETH_RX_BUF_SIZE); -+ } -+ -+ if (dpaa2_sg_is_final(sge)) -+ break; -+ } -+ -+ /* Count all data buffers + SG table buffer */ -+ ch->buf_count -= i + 2; -+ -+ return skb; -+} -+ -+/* Main Rx frame processing routine */ -+static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, -+ struct dpaa2_eth_channel *ch, -+ const struct dpaa2_fd *fd, -+ struct napi_struct *napi) -+{ -+ dma_addr_t addr = dpaa2_fd_get_addr(fd); -+ u8 fd_format = dpaa2_fd_get_format(fd); -+ void *vaddr; -+ struct sk_buff *skb; -+ struct rtnl_link_stats64 *percpu_stats; -+ struct dpaa2_eth_drv_stats *percpu_extras; -+ struct device *dev = priv->net_dev->dev.parent; -+ struct dpaa2_fas *fas; -+ u32 status = 0; -+ -+ /* Tracing point */ -+ trace_dpaa2_rx_fd(priv->net_dev, fd); -+ -+ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE); -+ vaddr = phys_to_virt(addr); -+ -+ prefetch(vaddr + priv->buf_layout.private_data_size); -+ prefetch(vaddr + dpaa2_fd_get_offset(fd)); -+ -+ percpu_stats = this_cpu_ptr(priv->percpu_stats); -+ percpu_extras = this_cpu_ptr(priv->percpu_extras); -+ -+ if (fd_format == dpaa2_fd_single) { -+ skb = build_linear_skb(priv, ch, fd, vaddr); -+ } else if (fd_format == dpaa2_fd_sg) { -+ struct dpaa2_sg_entry *sgt = -+ vaddr + dpaa2_fd_get_offset(fd); -+ skb = build_frag_skb(priv, ch, sgt); -+ put_page(virt_to_head_page(vaddr)); -+ percpu_extras->rx_sg_frames++; -+ percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd); -+ } else { -+ /* We don't support any other format */ -+ goto err_frame_format; -+ } -+ -+ if (unlikely(!skb)) -+ goto err_build_skb; -+ -+ prefetch(skb->data); -+ -+ /* Get the timestamp value */ -+ if (priv->ts_rx_en) { -+ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); -+ u64 *ns = (u64 *)(vaddr + -+ priv->buf_layout.private_data_size + -+ sizeof(struct dpaa2_fas)); -+ -+ *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ns); -+ memset(shhwtstamps, 0, sizeof(*shhwtstamps)); -+ shhwtstamps->hwtstamp = ns_to_ktime(*ns); -+ } -+ -+ /* Check if we need to validate the L4 csum */ -+ if (likely(fd->simple.frc & DPAA2_FD_FRC_FASV)) { -+ fas = (struct dpaa2_fas *) -+ (vaddr + priv->buf_layout.private_data_size); -+ status = le32_to_cpu(fas->status); -+ validate_rx_csum(priv, status, skb); -+ } -+ -+ skb->protocol = eth_type_trans(skb, priv->net_dev); -+ -+ percpu_stats->rx_packets++; -+ percpu_stats->rx_bytes += skb->len; -+ -+ if (priv->net_dev->features & NETIF_F_GRO) -+ napi_gro_receive(napi, skb); -+ else -+ netif_receive_skb(skb); -+ -+ return; -+err_frame_format: -+err_build_skb: -+ free_rx_fd(priv, fd, vaddr); -+ percpu_stats->rx_dropped++; -+} -+ -+#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE -+/* Processing of Rx frames received on the error FQ -+ * We check and print the error bits and then free the frame -+ */ -+static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv, -+ struct dpaa2_eth_channel *ch, -+ const struct dpaa2_fd *fd, -+ struct napi_struct *napi __always_unused) -+{ -+ struct device *dev = priv->net_dev->dev.parent; -+ dma_addr_t addr = dpaa2_fd_get_addr(fd); -+ void *vaddr; -+ struct rtnl_link_stats64 *percpu_stats; -+ struct dpaa2_fas *fas; -+ u32 status = 0; -+ -+ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE); -+ vaddr = phys_to_virt(addr); -+ -+ if (fd->simple.frc & DPAA2_FD_FRC_FASV) { -+ fas = (struct dpaa2_fas *) -+ (vaddr + priv->buf_layout.private_data_size); -+ status = le32_to_cpu(fas->status); -+ if (net_ratelimit()) -+ netdev_warn(priv->net_dev, "Rx frame error: 0x%08x\n", -+ status & DPAA2_ETH_RX_ERR_MASK); -+ } -+ free_rx_fd(priv, fd, vaddr); -+ -+ percpu_stats = this_cpu_ptr(priv->percpu_stats); -+ percpu_stats->rx_errors++; -+} -+#endif -+ -+/* Consume all frames pull-dequeued into the store. This is the simplest way to -+ * make sure we don't accidentally issue another volatile dequeue which would -+ * overwrite (leak) frames already in the store. -+ * -+ * Observance of NAPI budget is not our concern, leaving that to the caller. -+ */ -+static int consume_frames(struct dpaa2_eth_channel *ch) -+{ -+ struct dpaa2_eth_priv *priv = ch->priv; -+ struct dpaa2_eth_fq *fq; -+ struct dpaa2_dq *dq; -+ const struct dpaa2_fd *fd; -+ int cleaned = 0; -+ int is_last; -+ -+ do { -+ dq = dpaa2_io_store_next(ch->store, &is_last); -+ if (unlikely(!dq)) { -+ /* If we're here, we *must* have placed a -+ * volatile dequeue comnmand, so keep reading through -+ * the store until we get some sort of valid response -+ * token (either a valid frame or an "empty dequeue") -+ */ -+ continue; -+ } -+ -+ fd = dpaa2_dq_fd(dq); -+ fq = (struct dpaa2_eth_fq *)dpaa2_dq_fqd_ctx(dq); -+ fq->stats.frames++; -+ -+ fq->consume(priv, ch, fd, &ch->napi); -+ cleaned++; -+ } while (!is_last); -+ -+ return cleaned; -+} -+ -+/* Configure the egress frame annotation for timestamp update */ -+static void enable_tx_tstamp(struct dpaa2_fd *fd, void *hwa_start) -+{ -+ struct dpaa2_faead *faead; -+ u32 ctrl; -+ u32 frc; -+ -+ /* Mark the egress frame annotation area as valid */ -+ frc = dpaa2_fd_get_frc(fd); -+ dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV); -+ -+ /* enable UPD (update prepanded data) bit in FAEAD field of -+ * hardware frame annotation area -+ */ -+ ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD; -+ faead = hwa_start + DPAA2_FAEAD_OFFSET; -+ faead->ctrl = cpu_to_le32(ctrl); -+} -+ -+/* Create a frame descriptor based on a fragmented skb */ -+static int build_sg_fd(struct dpaa2_eth_priv *priv, -+ struct sk_buff *skb, -+ struct dpaa2_fd *fd) -+{ -+ struct device *dev = priv->net_dev->dev.parent; -+ void *sgt_buf = NULL; -+ void *hwa; -+ dma_addr_t addr; -+ int nr_frags = skb_shinfo(skb)->nr_frags; -+ struct dpaa2_sg_entry *sgt; -+ int i, j, err; -+ int sgt_buf_size; -+ struct scatterlist *scl, *crt_scl; -+ int num_sg; -+ int num_dma_bufs; -+ struct dpaa2_eth_swa *swa; -+ -+ /* Create and map scatterlist. -+ * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have -+ * to go beyond nr_frags+1. -+ * Note: We don't support chained scatterlists -+ */ -+ if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1)) -+ return -EINVAL; -+ -+ scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC); -+ if (unlikely(!scl)) -+ return -ENOMEM; -+ -+ sg_init_table(scl, nr_frags + 1); -+ num_sg = skb_to_sgvec(skb, scl, 0, skb->len); -+ num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_TO_DEVICE); -+ if (unlikely(!num_dma_bufs)) { -+ err = -ENOMEM; -+ goto dma_map_sg_failed; -+ } -+ -+ /* Prepare the HW SGT structure */ -+ sgt_buf_size = priv->tx_data_offset + -+ sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs); -+ sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN, GFP_ATOMIC); -+ if (unlikely(!sgt_buf)) { -+ err = -ENOMEM; -+ goto sgt_buf_alloc_failed; -+ } -+ sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN); -+ -+ /* PTA from egress side is passed as is to the confirmation side so -+ * we need to clear some fields here in order to find consistent values -+ * on TX confirmation. We are clearing FAS (Frame Annotation Status) -+ * field here. -+ */ -+ hwa = sgt_buf + priv->buf_layout.private_data_size; -+ memset(hwa, 0, 8); -+ -+ sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset); -+ -+ /* Fill in the HW SGT structure. -+ * -+ * sgt_buf is zeroed out, so the following fields are implicit -+ * in all sgt entries: -+ * - offset is 0 -+ * - format is 'dpaa2_sg_single' -+ */ -+ for_each_sg(scl, crt_scl, num_dma_bufs, i) { -+ dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl)); -+ dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl)); -+ } -+ dpaa2_sg_set_final(&sgt[i - 1], true); -+ -+ /* Store the skb backpointer in the SGT buffer. -+ * Fit the scatterlist and the number of buffers alongside the -+ * skb backpointer in the SWA. We'll need all of them on Tx Conf. -+ */ -+ swa = (struct dpaa2_eth_swa *)sgt_buf; -+ swa->skb = skb; -+ swa->scl = scl; -+ swa->num_sg = num_sg; -+ swa->num_dma_bufs = num_dma_bufs; -+ -+ /* Hardware expects the SG table to be in little endian format */ -+ for (j = 0; j < i; j++) -+ dpaa2_sg_cpu_to_le(&sgt[j]); -+ -+ /* Separately map the SGT buffer */ -+ addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_TO_DEVICE); -+ if (unlikely(dma_mapping_error(dev, addr))) { -+ err = -ENOMEM; -+ goto dma_map_single_failed; -+ } -+ dpaa2_fd_set_offset(fd, priv->tx_data_offset); -+ dpaa2_fd_set_format(fd, dpaa2_fd_sg); -+ dpaa2_fd_set_addr(fd, addr); -+ dpaa2_fd_set_len(fd, skb->len); -+ -+ fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA | -+ DPAA2_FD_CTRL_PTV1; -+ -+ if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) -+ enable_tx_tstamp(fd, hwa); -+ -+ return 0; -+ -+dma_map_single_failed: -+ kfree(sgt_buf); -+sgt_buf_alloc_failed: -+ dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE); -+dma_map_sg_failed: -+ kfree(scl); -+ return err; -+} -+ -+/* Create a frame descriptor based on a linear skb */ -+static int build_single_fd(struct dpaa2_eth_priv *priv, -+ struct sk_buff *skb, -+ struct dpaa2_fd *fd) -+{ -+ struct device *dev = priv->net_dev->dev.parent; -+ u8 *buffer_start; -+ struct sk_buff **skbh; -+ dma_addr_t addr; -+ void *hwa; -+ -+ buffer_start = PTR_ALIGN(skb->data - priv->tx_data_offset - -+ DPAA2_ETH_TX_BUF_ALIGN, -+ DPAA2_ETH_TX_BUF_ALIGN); -+ -+ /* PTA from egress side is passed as is to the confirmation side so -+ * we need to clear some fields here in order to find consistent values -+ * on TX confirmation. We are clearing FAS (Frame Annotation Status) -+ * field here -+ */ -+ hwa = buffer_start + priv->buf_layout.private_data_size; -+ memset(hwa, 0, 8); -+ -+ /* Store a backpointer to the skb at the beginning of the buffer -+ * (in the private data area) such that we can release it -+ * on Tx confirm -+ */ -+ skbh = (struct sk_buff **)buffer_start; -+ *skbh = skb; -+ -+ addr = dma_map_single(dev, buffer_start, -+ skb_tail_pointer(skb) - buffer_start, -+ DMA_TO_DEVICE); -+ if (unlikely(dma_mapping_error(dev, addr))) -+ return -ENOMEM; -+ -+ dpaa2_fd_set_addr(fd, addr); -+ dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start)); -+ dpaa2_fd_set_len(fd, skb->len); -+ dpaa2_fd_set_format(fd, dpaa2_fd_single); -+ -+ fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA | -+ DPAA2_FD_CTRL_PTV1; -+ -+ if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) -+ enable_tx_tstamp(fd, hwa); -+ -+ return 0; -+} -+ -+/* FD freeing routine on the Tx path -+ * -+ * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb -+ * back-pointed to is also freed. -+ * This can be called either from dpaa2_eth_tx_conf() or on the error path of -+ * dpaa2_eth_tx(). -+ * Optionally, return the frame annotation status word (FAS), which needs -+ * to be checked if we're on the confirmation path. -+ */ -+static void free_tx_fd(const struct dpaa2_eth_priv *priv, -+ const struct dpaa2_fd *fd, -+ u32 *status) -+{ -+ struct device *dev = priv->net_dev->dev.parent; -+ dma_addr_t fd_addr; -+ struct sk_buff **skbh, *skb; -+ unsigned char *buffer_start; -+ int unmap_size; -+ struct scatterlist *scl; -+ int num_sg, num_dma_bufs; -+ struct dpaa2_eth_swa *swa; -+ bool fd_single; -+ struct dpaa2_fas *fas; -+ -+ fd_addr = dpaa2_fd_get_addr(fd); -+ skbh = phys_to_virt(fd_addr); -+ fd_single = (dpaa2_fd_get_format(fd) == dpaa2_fd_single); -+ -+ if (fd_single) { -+ skb = *skbh; -+ buffer_start = (unsigned char *)skbh; -+ /* Accessing the skb buffer is safe before dma unmap, because -+ * we didn't map the actual skb shell. -+ */ -+ dma_unmap_single(dev, fd_addr, -+ skb_tail_pointer(skb) - buffer_start, -+ DMA_TO_DEVICE); -+ } else { -+ swa = (struct dpaa2_eth_swa *)skbh; -+ skb = swa->skb; -+ scl = swa->scl; -+ num_sg = swa->num_sg; -+ num_dma_bufs = swa->num_dma_bufs; -+ -+ /* Unmap the scatterlist */ -+ dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE); -+ kfree(scl); -+ -+ /* Unmap the SGT buffer */ -+ unmap_size = priv->tx_data_offset + -+ sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs); -+ dma_unmap_single(dev, fd_addr, unmap_size, DMA_TO_DEVICE); -+ } -+ -+ /* Get the timestamp value */ -+ if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { -+ struct skb_shared_hwtstamps shhwtstamps; -+ u64 *ns; -+ -+ memset(&shhwtstamps, 0, sizeof(shhwtstamps)); -+ -+ ns = (u64 *)((void *)skbh + -+ priv->buf_layout.private_data_size + -+ sizeof(struct dpaa2_fas)); -+ *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ns); -+ shhwtstamps.hwtstamp = ns_to_ktime(*ns); -+ skb_tstamp_tx(skb, &shhwtstamps); -+ } -+ -+ /* Read the status from the Frame Annotation after we unmap the first -+ * buffer but before we free it. The caller function is responsible -+ * for checking the status value. -+ */ -+ if (status && (fd->simple.frc & DPAA2_FD_FRC_FASV)) { -+ fas = (struct dpaa2_fas *) -+ ((void *)skbh + priv->buf_layout.private_data_size); -+ *status = le32_to_cpu(fas->status); -+ } -+ -+ /* Free SGT buffer kmalloc'ed on tx */ -+ if (!fd_single) -+ kfree(skbh); -+ -+ /* Move on with skb release */ -+ dev_kfree_skb(skb); -+} -+ -+static int dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ struct dpaa2_fd fd; -+ struct rtnl_link_stats64 *percpu_stats; -+ struct dpaa2_eth_drv_stats *percpu_extras; -+ u16 queue_mapping, flow_id; -+ int err, i; -+ -+ percpu_stats = this_cpu_ptr(priv->percpu_stats); -+ percpu_extras = this_cpu_ptr(priv->percpu_extras); -+ -+ if (unlikely(skb_headroom(skb) < DPAA2_ETH_NEEDED_HEADROOM(priv))) { -+ struct sk_buff *ns; -+ -+ ns = skb_realloc_headroom(skb, DPAA2_ETH_NEEDED_HEADROOM(priv)); -+ if (unlikely(!ns)) { -+ percpu_stats->tx_dropped++; -+ goto err_alloc_headroom; -+ } -+ dev_kfree_skb(skb); -+ skb = ns; -+ } -+ -+ /* We'll be holding a back-reference to the skb until Tx Confirmation; -+ * we don't want that overwritten by a concurrent Tx with a cloned skb. -+ */ -+ skb = skb_unshare(skb, GFP_ATOMIC); -+ if (unlikely(!skb)) { -+ /* skb_unshare() has already freed the skb */ -+ percpu_stats->tx_dropped++; -+ return NETDEV_TX_OK; -+ } -+ -+ /* Setup the FD fields */ -+ memset(&fd, 0, sizeof(fd)); -+ -+ if (skb_is_nonlinear(skb)) { -+ err = build_sg_fd(priv, skb, &fd); -+ percpu_extras->tx_sg_frames++; -+ percpu_extras->tx_sg_bytes += skb->len; -+ } else { -+ err = build_single_fd(priv, skb, &fd); -+ } -+ -+ if (unlikely(err)) { -+ percpu_stats->tx_dropped++; -+ goto err_build_fd; -+ } -+ -+ /* Tracing point */ -+ trace_dpaa2_tx_fd(net_dev, &fd); -+ -+ /* TxConf FQ selection primarily based on cpu affinity; this is -+ * non-migratable context, so it's safe to call smp_processor_id(). -+ */ -+ queue_mapping = smp_processor_id() % priv->dpni_attrs.max_senders; -+ flow_id = priv->fq[queue_mapping].flowid; -+ for (i = 0; i < (DPAA2_ETH_MAX_TX_QUEUES << 1); i++) { -+ err = dpaa2_io_service_enqueue_qd(NULL, priv->tx_qdid, 0, -+ flow_id, &fd); -+ if (err != -EBUSY) -+ break; -+ } -+ percpu_extras->tx_portal_busy += i; -+ if (unlikely(err < 0)) { -+ percpu_stats->tx_errors++; -+ /* Clean up everything, including freeing the skb */ -+ free_tx_fd(priv, &fd, NULL); -+ } else { -+ percpu_stats->tx_packets++; -+ percpu_stats->tx_bytes += skb->len; -+ } -+ -+ return NETDEV_TX_OK; -+ -+err_build_fd: -+err_alloc_headroom: -+ dev_kfree_skb(skb); -+ -+ return NETDEV_TX_OK; -+} -+ -+/* Tx confirmation frame processing routine */ -+static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv, -+ struct dpaa2_eth_channel *ch, -+ const struct dpaa2_fd *fd, -+ struct napi_struct *napi __always_unused) -+{ -+ struct rtnl_link_stats64 *percpu_stats; -+ struct dpaa2_eth_drv_stats *percpu_extras; -+ u32 status = 0; -+ -+ /* Tracing point */ -+ trace_dpaa2_tx_conf_fd(priv->net_dev, fd); -+ -+ percpu_extras = this_cpu_ptr(priv->percpu_extras); -+ percpu_extras->tx_conf_frames++; -+ percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd); -+ -+ free_tx_fd(priv, fd, &status); -+ -+ if (unlikely(status & DPAA2_ETH_TXCONF_ERR_MASK)) { -+ percpu_stats = this_cpu_ptr(priv->percpu_stats); -+ /* Tx-conf logically pertains to the egress path. */ -+ percpu_stats->tx_errors++; -+ } -+} -+ -+static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable) -+{ -+ int err; -+ -+ err = dpni_set_l3_chksum_validation(priv->mc_io, 0, priv->mc_token, -+ enable); -+ if (err) { -+ netdev_err(priv->net_dev, -+ "dpni_set_l3_chksum_validation() failed\n"); -+ return err; -+ } -+ -+ err = dpni_set_l4_chksum_validation(priv->mc_io, 0, priv->mc_token, -+ enable); -+ if (err) { -+ netdev_err(priv->net_dev, -+ "dpni_set_l4_chksum_validation failed\n"); -+ return err; -+ } -+ -+ return 0; -+} -+ -+static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable) -+{ -+ struct dpaa2_eth_fq *fq; -+ struct dpni_tx_flow_cfg tx_flow_cfg; -+ int err; -+ int i; -+ -+ memset(&tx_flow_cfg, 0, sizeof(tx_flow_cfg)); -+ tx_flow_cfg.options = DPNI_TX_FLOW_OPT_L3_CHKSUM_GEN | -+ DPNI_TX_FLOW_OPT_L4_CHKSUM_GEN; -+ tx_flow_cfg.l3_chksum_gen = enable; -+ tx_flow_cfg.l4_chksum_gen = enable; -+ -+ for (i = 0; i < priv->num_fqs; i++) { -+ fq = &priv->fq[i]; -+ if (fq->type != DPAA2_TX_CONF_FQ) -+ continue; -+ -+ /* The Tx flowid is kept in the corresponding TxConf FQ. */ -+ err = dpni_set_tx_flow(priv->mc_io, 0, priv->mc_token, -+ &fq->flowid, &tx_flow_cfg); -+ if (err) { -+ netdev_err(priv->net_dev, "dpni_set_tx_flow failed\n"); -+ return err; -+ } -+ } -+ -+ return 0; -+} -+ -+/* Perform a single release command to add buffers -+ * to the specified buffer pool -+ */ -+static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid) -+{ -+ struct device *dev = priv->net_dev->dev.parent; -+ u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; -+ void *buf; -+ dma_addr_t addr; -+ int i; -+ -+ for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) { -+ /* Allocate buffer visible to WRIOP + skb shared info + -+ * alignment padding -+ */ -+ buf = netdev_alloc_frag(DPAA2_ETH_BUF_RAW_SIZE); -+ if (unlikely(!buf)) -+ goto err_alloc; -+ -+ buf = PTR_ALIGN(buf, DPAA2_ETH_RX_BUF_ALIGN); -+ -+ addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE, -+ DMA_FROM_DEVICE); -+ if (unlikely(dma_mapping_error(dev, addr))) -+ goto err_map; -+ -+ buf_array[i] = addr; -+ -+ /* tracing point */ -+ trace_dpaa2_eth_buf_seed(priv->net_dev, -+ buf, DPAA2_ETH_BUF_RAW_SIZE, -+ addr, DPAA2_ETH_RX_BUF_SIZE, -+ bpid); -+ } -+ -+release_bufs: -+ /* In case the portal is busy, retry until successful. -+ * The buffer release function would only fail if the QBMan portal -+ * was busy, which implies portal contention (i.e. more CPUs than -+ * portals, i.e. GPPs w/o affine DPIOs). For all practical purposes, -+ * there is little we can realistically do, short of giving up - -+ * in which case we'd risk depleting the buffer pool and never again -+ * receiving the Rx interrupt which would kick-start the refill logic. -+ * So just keep retrying, at the risk of being moved to ksoftirqd. -+ */ -+ while (dpaa2_io_service_release(NULL, bpid, buf_array, i)) -+ cpu_relax(); -+ return i; -+ -+err_map: -+ put_page(virt_to_head_page(buf)); -+err_alloc: -+ if (i) -+ goto release_bufs; -+ -+ return 0; -+} -+ -+static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid) -+{ -+ int i, j; -+ int new_count; -+ -+ /* This is the lazy seeding of Rx buffer pools. -+ * dpaa2_add_bufs() is also used on the Rx hotpath and calls -+ * napi_alloc_frag(). The trouble with that is that it in turn ends up -+ * calling this_cpu_ptr(), which mandates execution in atomic context. -+ * Rather than splitting up the code, do a one-off preempt disable. -+ */ -+ preempt_disable(); -+ for (j = 0; j < priv->num_channels; j++) { -+ for (i = 0; i < DPAA2_ETH_NUM_BUFS; -+ i += DPAA2_ETH_BUFS_PER_CMD) { -+ new_count = add_bufs(priv, bpid); -+ priv->channel[j]->buf_count += new_count; -+ -+ if (new_count < DPAA2_ETH_BUFS_PER_CMD) { -+ preempt_enable(); -+ return -ENOMEM; -+ } -+ } -+ } -+ preempt_enable(); -+ -+ return 0; -+} -+ -+/** -+ * Drain the specified number of buffers from the DPNI's private buffer pool. -+ * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD -+ */ -+static void drain_bufs(struct dpaa2_eth_priv *priv, int count) -+{ -+ struct device *dev = priv->net_dev->dev.parent; -+ u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; -+ void *vaddr; -+ int ret, i; -+ -+ do { -+ ret = dpaa2_io_service_acquire(NULL, priv->dpbp_attrs.bpid, -+ buf_array, count); -+ if (ret < 0) { -+ netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n"); -+ return; -+ } -+ for (i = 0; i < ret; i++) { -+ /* Same logic as on regular Rx path */ -+ dma_unmap_single(dev, buf_array[i], -+ DPAA2_ETH_RX_BUF_SIZE, -+ DMA_FROM_DEVICE); -+ vaddr = phys_to_virt(buf_array[i]); -+ put_page(virt_to_head_page(vaddr)); -+ } -+ } while (ret); -+} -+ -+static void drain_pool(struct dpaa2_eth_priv *priv) -+{ -+ int i; -+ -+ drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD); -+ drain_bufs(priv, 1); -+ -+ for (i = 0; i < priv->num_channels; i++) -+ priv->channel[i]->buf_count = 0; -+} -+ -+/* Function is called from softirq context only, so we don't need to guard -+ * the access to percpu count -+ */ -+static int refill_pool(struct dpaa2_eth_priv *priv, -+ struct dpaa2_eth_channel *ch, -+ u16 bpid) -+{ -+ int new_count; -+ -+ if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH)) -+ return 0; -+ -+ do { -+ new_count = add_bufs(priv, bpid); -+ if (unlikely(!new_count)) { -+ /* Out of memory; abort for now, we'll try later on */ -+ break; -+ } -+ ch->buf_count += new_count; -+ } while (ch->buf_count < DPAA2_ETH_NUM_BUFS); -+ -+ if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS)) -+ return -ENOMEM; -+ -+ return 0; -+} -+ -+static int pull_channel(struct dpaa2_eth_channel *ch) -+{ -+ int err; -+ int dequeues = -1; -+ -+ /* Retry while portal is busy */ -+ do { -+ err = dpaa2_io_service_pull_channel(NULL, ch->ch_id, ch->store); -+ dequeues++; -+ cpu_relax(); -+ } while (err == -EBUSY); -+ -+ ch->stats.dequeue_portal_busy += dequeues; -+ if (unlikely(err)) -+ ch->stats.pull_err++; -+ -+ return err; -+} -+ -+/* NAPI poll routine -+ * -+ * Frames are dequeued from the QMan channel associated with this NAPI context. -+ * Rx, Tx confirmation and (if configured) Rx error frames all count -+ * towards the NAPI budget. -+ */ -+static int dpaa2_eth_poll(struct napi_struct *napi, int budget) -+{ -+ struct dpaa2_eth_channel *ch; -+ int cleaned = 0, store_cleaned; -+ struct dpaa2_eth_priv *priv; -+ int err; -+ -+ ch = container_of(napi, struct dpaa2_eth_channel, napi); -+ priv = ch->priv; -+ -+ while (cleaned < budget) { -+ err = pull_channel(ch); -+ if (unlikely(err)) -+ break; -+ -+ /* Refill pool if appropriate */ -+ refill_pool(priv, ch, priv->dpbp_attrs.bpid); -+ -+ store_cleaned = consume_frames(ch); -+ cleaned += store_cleaned; -+ -+ /* If we have enough budget left for a full store, -+ * try a new pull dequeue, otherwise we're done here -+ */ -+ if (store_cleaned == 0 || -+ cleaned > budget - DPAA2_ETH_STORE_SIZE) -+ break; -+ } -+ -+ if (cleaned < budget) { -+ napi_complete(napi); -+ /* Re-enable data available notifications */ -+ do { -+ err = dpaa2_io_service_rearm(NULL, &ch->nctx); -+ cpu_relax(); -+ } while (err == -EBUSY); -+ } -+ -+ ch->stats.frames += cleaned; -+ -+ return cleaned; -+} -+ -+static void enable_ch_napi(struct dpaa2_eth_priv *priv) -+{ -+ struct dpaa2_eth_channel *ch; -+ int i; -+ -+ for (i = 0; i < priv->num_channels; i++) { -+ ch = priv->channel[i]; -+ napi_enable(&ch->napi); -+ } -+} -+ -+static void disable_ch_napi(struct dpaa2_eth_priv *priv) -+{ -+ struct dpaa2_eth_channel *ch; -+ int i; -+ -+ for (i = 0; i < priv->num_channels; i++) { -+ ch = priv->channel[i]; -+ napi_disable(&ch->napi); -+ } -+} -+ -+static int link_state_update(struct dpaa2_eth_priv *priv) -+{ -+ struct dpni_link_state state; -+ int err; -+ -+ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); -+ if (unlikely(err)) { -+ netdev_err(priv->net_dev, -+ "dpni_get_link_state() failed\n"); -+ return err; -+ } -+ -+ /* Chech link state; speed / duplex changes are not treated yet */ -+ if (priv->link_state.up == state.up) -+ return 0; -+ -+ priv->link_state = state; -+ if (state.up) { -+ netif_carrier_on(priv->net_dev); -+ netif_tx_start_all_queues(priv->net_dev); -+ } else { -+ netif_tx_stop_all_queues(priv->net_dev); -+ netif_carrier_off(priv->net_dev); -+ } -+ -+ netdev_info(priv->net_dev, "Link Event: state %s", -+ state.up ? "up" : "down"); -+ -+ return 0; -+} -+ -+static int dpaa2_eth_open(struct net_device *net_dev) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ int err; -+ -+ err = seed_pool(priv, priv->dpbp_attrs.bpid); -+ if (err) { -+ /* Not much to do; the buffer pool, though not filled up, -+ * may still contain some buffers which would enable us -+ * to limp on. -+ */ -+ netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n", -+ priv->dpbp_dev->obj_desc.id, priv->dpbp_attrs.bpid); -+ } -+ -+ /* We'll only start the txqs when the link is actually ready; make sure -+ * we don't race against the link up notification, which may come -+ * immediately after dpni_enable(); -+ */ -+ netif_tx_stop_all_queues(net_dev); -+ enable_ch_napi(priv); -+ /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will -+ * return true and cause 'ip link show' to report the LOWER_UP flag, -+ * even though the link notification wasn't even received. -+ */ -+ netif_carrier_off(net_dev); -+ -+ err = dpni_enable(priv->mc_io, 0, priv->mc_token); -+ if (err < 0) { -+ netdev_err(net_dev, "dpni_enable() failed\n"); -+ goto enable_err; -+ } -+ -+ /* If the DPMAC object has already processed the link up interrupt, -+ * we have to learn the link state ourselves. -+ */ -+ err = link_state_update(priv); -+ if (err < 0) { -+ netdev_err(net_dev, "Can't update link state\n"); -+ goto link_state_err; -+ } -+ -+ return 0; -+ -+link_state_err: -+enable_err: -+ disable_ch_napi(priv); -+ drain_pool(priv); -+ return err; -+} -+ -+/* The DPIO store must be empty when we call this, -+ * at the end of every NAPI cycle. -+ */ -+static u32 drain_channel(struct dpaa2_eth_priv *priv, -+ struct dpaa2_eth_channel *ch) -+{ -+ u32 drained = 0, total = 0; -+ -+ do { -+ pull_channel(ch); -+ drained = consume_frames(ch); -+ total += drained; -+ } while (drained); -+ -+ return total; -+} -+ -+static u32 drain_ingress_frames(struct dpaa2_eth_priv *priv) -+{ -+ struct dpaa2_eth_channel *ch; -+ int i; -+ u32 drained = 0; -+ -+ for (i = 0; i < priv->num_channels; i++) { -+ ch = priv->channel[i]; -+ drained += drain_channel(priv, ch); -+ } -+ -+ return drained; -+} -+ -+static int dpaa2_eth_stop(struct net_device *net_dev) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ int dpni_enabled; -+ int retries = 10; -+ u32 drained; -+ -+ netif_tx_stop_all_queues(net_dev); -+ netif_carrier_off(net_dev); -+ -+ /* Loop while dpni_disable() attempts to drain the egress FQs -+ * and confirm them back to us. -+ */ -+ do { -+ dpni_disable(priv->mc_io, 0, priv->mc_token); -+ dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled); -+ if (dpni_enabled) -+ /* Allow the MC some slack */ -+ msleep(100); -+ } while (dpni_enabled && --retries); -+ if (!retries) { -+ netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n"); -+ /* Must go on and disable NAPI nonetheless, so we don't crash at -+ * the next "ifconfig up" -+ */ -+ } -+ -+ /* Wait for NAPI to complete on every core and disable it. -+ * In particular, this will also prevent NAPI from being rescheduled if -+ * a new CDAN is serviced, effectively discarding the CDAN. We therefore -+ * don't even need to disarm the channels, except perhaps for the case -+ * of a huge coalescing value. -+ */ -+ disable_ch_napi(priv); -+ -+ /* Manually drain the Rx and TxConf queues */ -+ drained = drain_ingress_frames(priv); -+ if (drained) -+ netdev_dbg(net_dev, "Drained %d frames.\n", drained); -+ -+ /* Empty the buffer pool */ -+ drain_pool(priv); -+ -+ return 0; -+} -+ -+static int dpaa2_eth_init(struct net_device *net_dev) -+{ -+ u64 supported = 0; -+ u64 not_supported = 0; -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ u32 options = priv->dpni_attrs.options; -+ -+ /* Capabilities listing */ -+ supported |= IFF_LIVE_ADDR_CHANGE | IFF_PROMISC | IFF_ALLMULTI; -+ -+ if (options & DPNI_OPT_UNICAST_FILTER) -+ supported |= IFF_UNICAST_FLT; -+ else -+ not_supported |= IFF_UNICAST_FLT; -+ -+ if (options & DPNI_OPT_MULTICAST_FILTER) -+ supported |= IFF_MULTICAST; -+ else -+ not_supported |= IFF_MULTICAST; -+ -+ net_dev->priv_flags |= supported; -+ net_dev->priv_flags &= ~not_supported; -+ -+ /* Features */ -+ net_dev->features = NETIF_F_RXCSUM | -+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | -+ NETIF_F_SG | NETIF_F_HIGHDMA | -+ NETIF_F_LLTX; -+ net_dev->hw_features = net_dev->features; -+ -+ return 0; -+} -+ -+static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ struct device *dev = net_dev->dev.parent; -+ int err; -+ -+ err = eth_mac_addr(net_dev, addr); -+ if (err < 0) { -+ dev_err(dev, "eth_mac_addr() failed with error %d\n", err); -+ return err; -+ } -+ -+ err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token, -+ net_dev->dev_addr); -+ if (err) { -+ dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err); -+ return err; -+ } -+ -+ return 0; -+} -+ -+/** Fill in counters maintained by the GPP driver. These may be different from -+ * the hardware counters obtained by ethtool. -+ */ -+static struct rtnl_link_stats64 -+*dpaa2_eth_get_stats(struct net_device *net_dev, -+ struct rtnl_link_stats64 *stats) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ struct rtnl_link_stats64 *percpu_stats; -+ u64 *cpustats; -+ u64 *netstats = (u64 *)stats; -+ int i, j; -+ int num = sizeof(struct rtnl_link_stats64) / sizeof(u64); -+ -+ for_each_possible_cpu(i) { -+ percpu_stats = per_cpu_ptr(priv->percpu_stats, i); -+ cpustats = (u64 *)percpu_stats; -+ for (j = 0; j < num; j++) -+ netstats[j] += cpustats[j]; -+ } -+ -+ return stats; -+} -+ -+static int dpaa2_eth_change_mtu(struct net_device *net_dev, int mtu) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ int err; -+ -+ if (mtu < 68 || mtu > DPAA2_ETH_MAX_MTU) { -+ netdev_err(net_dev, "Invalid MTU %d. Valid range is: 68..%d\n", -+ mtu, DPAA2_ETH_MAX_MTU); -+ return -EINVAL; -+ } -+ -+ /* Set the maximum Rx frame length to match the transmit side; -+ * account for L2 headers when computing the MFL -+ */ -+ err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, -+ (u16)DPAA2_ETH_L2_MAX_FRM(mtu)); -+ if (err) { -+ netdev_err(net_dev, "dpni_set_max_frame_length() failed\n"); -+ return err; -+ } -+ -+ net_dev->mtu = mtu; -+ return 0; -+} -+ -+/* Copy mac unicast addresses from @net_dev to @priv. -+ * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. -+ */ -+static void add_uc_hw_addr(const struct net_device *net_dev, -+ struct dpaa2_eth_priv *priv) -+{ -+ struct netdev_hw_addr *ha; -+ int err; -+ -+ netdev_for_each_uc_addr(ha, net_dev) { -+ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, -+ ha->addr); -+ if (err) -+ netdev_warn(priv->net_dev, -+ "Could not add ucast MAC %pM to the filtering table (err %d)\n", -+ ha->addr, err); -+ } -+} -+ -+/* Copy mac multicast addresses from @net_dev to @priv -+ * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. -+ */ -+static void add_mc_hw_addr(const struct net_device *net_dev, -+ struct dpaa2_eth_priv *priv) -+{ -+ struct netdev_hw_addr *ha; -+ int err; -+ -+ netdev_for_each_mc_addr(ha, net_dev) { -+ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, -+ ha->addr); -+ if (err) -+ netdev_warn(priv->net_dev, -+ "Could not add mcast MAC %pM to the filtering table (err %d)\n", -+ ha->addr, err); -+ } -+} -+ -+static void dpaa2_eth_set_rx_mode(struct net_device *net_dev) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ int uc_count = netdev_uc_count(net_dev); -+ int mc_count = netdev_mc_count(net_dev); -+ u8 max_uc = priv->dpni_attrs.max_unicast_filters; -+ u8 max_mc = priv->dpni_attrs.max_multicast_filters; -+ u32 options = priv->dpni_attrs.options; -+ u16 mc_token = priv->mc_token; -+ struct fsl_mc_io *mc_io = priv->mc_io; -+ int err; -+ -+ /* Basic sanity checks; these probably indicate a misconfiguration */ -+ if (!(options & DPNI_OPT_UNICAST_FILTER) && max_uc != 0) -+ netdev_info(net_dev, -+ "max_unicast_filters=%d, DPNI_OPT_UNICAST_FILTER option must be enabled\n", -+ max_uc); -+ if (!(options & DPNI_OPT_MULTICAST_FILTER) && max_mc != 0) -+ netdev_info(net_dev, -+ "max_multicast_filters=%d, DPNI_OPT_MULTICAST_FILTER option must be enabled\n", -+ max_mc); -+ -+ /* Force promiscuous if the uc or mc counts exceed our capabilities. */ -+ if (uc_count > max_uc) { -+ netdev_info(net_dev, -+ "Unicast addr count reached %d, max allowed is %d; forcing promisc\n", -+ uc_count, max_uc); -+ goto force_promisc; -+ } -+ if (mc_count > max_mc) { -+ netdev_info(net_dev, -+ "Multicast addr count reached %d, max allowed is %d; forcing promisc\n", -+ mc_count, max_mc); -+ goto force_mc_promisc; -+ } -+ -+ /* Adjust promisc settings due to flag combinations */ -+ if (net_dev->flags & IFF_PROMISC) -+ goto force_promisc; -+ if (net_dev->flags & IFF_ALLMULTI) { -+ /* First, rebuild unicast filtering table. This should be done -+ * in promisc mode, in order to avoid frame loss while we -+ * progressively add entries to the table. -+ * We don't know whether we had been in promisc already, and -+ * making an MC call to find out is expensive; so set uc promisc -+ * nonetheless. -+ */ -+ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); -+ if (err) -+ netdev_warn(net_dev, "Can't set uc promisc\n"); -+ -+ /* Actual uc table reconstruction. */ -+ err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0); -+ if (err) -+ netdev_warn(net_dev, "Can't clear uc filters\n"); -+ add_uc_hw_addr(net_dev, priv); -+ -+ /* Finally, clear uc promisc and set mc promisc as requested. */ -+ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0); -+ if (err) -+ netdev_warn(net_dev, "Can't clear uc promisc\n"); -+ goto force_mc_promisc; -+ } -+ -+ /* Neither unicast, nor multicast promisc will be on... eventually. -+ * For now, rebuild mac filtering tables while forcing both of them on. -+ */ -+ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); -+ if (err) -+ netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err); -+ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1); -+ if (err) -+ netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err); -+ -+ /* Actual mac filtering tables reconstruction */ -+ err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1); -+ if (err) -+ netdev_warn(net_dev, "Can't clear mac filters\n"); -+ add_mc_hw_addr(net_dev, priv); -+ add_uc_hw_addr(net_dev, priv); -+ -+ /* Now we can clear both ucast and mcast promisc, without risking -+ * to drop legitimate frames anymore. -+ */ -+ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0); -+ if (err) -+ netdev_warn(net_dev, "Can't clear ucast promisc\n"); -+ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0); -+ if (err) -+ netdev_warn(net_dev, "Can't clear mcast promisc\n"); -+ -+ return; -+ -+force_promisc: -+ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); -+ if (err) -+ netdev_warn(net_dev, "Can't set ucast promisc\n"); -+force_mc_promisc: -+ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1); -+ if (err) -+ netdev_warn(net_dev, "Can't set mcast promisc\n"); -+} -+ -+static int dpaa2_eth_set_features(struct net_device *net_dev, -+ netdev_features_t features) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ netdev_features_t changed = features ^ net_dev->features; -+ bool enable; -+ int err; -+ -+ if (changed & NETIF_F_RXCSUM) { -+ enable = !!(features & NETIF_F_RXCSUM); -+ err = set_rx_csum(priv, enable); -+ if (err) -+ return err; -+ } -+ -+ if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { -+ enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); -+ err = set_tx_csum(priv, enable); -+ if (err) -+ return err; -+ } -+ -+ return 0; -+} -+ -+static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(dev); -+ struct hwtstamp_config config; -+ -+ if (copy_from_user(&config, rq->ifr_data, sizeof(config))) -+ return -EFAULT; -+ -+ switch (config.tx_type) { -+ case HWTSTAMP_TX_OFF: -+ priv->ts_tx_en = false; -+ break; -+ case HWTSTAMP_TX_ON: -+ priv->ts_tx_en = true; -+ break; -+ default: -+ return -ERANGE; -+ } -+ -+ if (config.rx_filter == HWTSTAMP_FILTER_NONE) { -+ priv->ts_rx_en = false; -+ } else { -+ priv->ts_rx_en = true; -+ /* TS is set for all frame types, not only those requested */ -+ config.rx_filter = HWTSTAMP_FILTER_ALL; -+ } -+ -+ return copy_to_user(rq->ifr_data, &config, sizeof(config)) ? -+ -EFAULT : 0; -+} -+ -+static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) -+{ -+ if (cmd == SIOCSHWTSTAMP) -+ return dpaa2_eth_ts_ioctl(dev, rq, cmd); -+ -+ return -EINVAL; -+} -+ -+static const struct net_device_ops dpaa2_eth_ops = { -+ .ndo_open = dpaa2_eth_open, -+ .ndo_start_xmit = dpaa2_eth_tx, -+ .ndo_stop = dpaa2_eth_stop, -+ .ndo_init = dpaa2_eth_init, -+ .ndo_set_mac_address = dpaa2_eth_set_addr, -+ .ndo_get_stats64 = dpaa2_eth_get_stats, -+ .ndo_change_mtu = dpaa2_eth_change_mtu, -+ .ndo_set_rx_mode = dpaa2_eth_set_rx_mode, -+ .ndo_set_features = dpaa2_eth_set_features, -+ .ndo_do_ioctl = dpaa2_eth_ioctl, -+}; -+ -+static void cdan_cb(struct dpaa2_io_notification_ctx *ctx) -+{ -+ struct dpaa2_eth_channel *ch; -+ -+ ch = container_of(ctx, struct dpaa2_eth_channel, nctx); -+ -+ /* Update NAPI statistics */ -+ ch->stats.cdan++; -+ -+ napi_schedule(&ch->napi); -+} -+ -+/* Allocate and configure a DPCON object */ -+static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv) -+{ -+ struct fsl_mc_device *dpcon; -+ struct device *dev = priv->net_dev->dev.parent; -+ struct dpcon_attr attrs; -+ int err; -+ -+ err = fsl_mc_object_allocate(to_fsl_mc_device(dev), -+ FSL_MC_POOL_DPCON, &dpcon); -+ if (err) { -+ dev_info(dev, "Not enough DPCONs, will go on as-is\n"); -+ return NULL; -+ } -+ -+ err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle); -+ if (err) { -+ dev_err(dev, "dpcon_open() failed\n"); -+ goto err_open; -+ } -+ -+ err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle); -+ if (err) { -+ dev_err(dev, "dpcon_reset() failed\n"); -+ goto err_reset; -+ } -+ -+ err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs); -+ if (err) { -+ dev_err(dev, "dpcon_get_attributes() failed\n"); -+ goto err_get_attr; -+ } -+ -+ err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle); -+ if (err) { -+ dev_err(dev, "dpcon_enable() failed\n"); -+ goto err_enable; -+ } -+ -+ return dpcon; -+ -+err_enable: -+err_get_attr: -+err_reset: -+ dpcon_close(priv->mc_io, 0, dpcon->mc_handle); -+err_open: -+ fsl_mc_object_free(dpcon); -+ -+ return NULL; -+} -+ -+static void free_dpcon(struct dpaa2_eth_priv *priv, -+ struct fsl_mc_device *dpcon) -+{ -+ dpcon_disable(priv->mc_io, 0, dpcon->mc_handle); -+ dpcon_close(priv->mc_io, 0, dpcon->mc_handle); -+ fsl_mc_object_free(dpcon); -+} -+ -+static struct dpaa2_eth_channel * -+alloc_channel(struct dpaa2_eth_priv *priv) -+{ -+ struct dpaa2_eth_channel *channel; -+ struct dpcon_attr attr; -+ struct device *dev = priv->net_dev->dev.parent; -+ int err; -+ -+ channel = kzalloc(sizeof(*channel), GFP_ATOMIC); -+ if (!channel) -+ return NULL; -+ -+ channel->dpcon = setup_dpcon(priv); -+ if (!channel->dpcon) -+ goto err_setup; -+ -+ err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle, -+ &attr); -+ if (err) { -+ dev_err(dev, "dpcon_get_attributes() failed\n"); -+ goto err_get_attr; -+ } -+ -+ channel->dpcon_id = attr.id; -+ channel->ch_id = attr.qbman_ch_id; -+ channel->priv = priv; -+ -+ return channel; -+ -+err_get_attr: -+ free_dpcon(priv, channel->dpcon); -+err_setup: -+ kfree(channel); -+ return NULL; -+} -+ -+static void free_channel(struct dpaa2_eth_priv *priv, -+ struct dpaa2_eth_channel *channel) -+{ -+ free_dpcon(priv, channel->dpcon); -+ kfree(channel); -+} -+ -+/* DPIO setup: allocate and configure QBMan channels, setup core affinity -+ * and register data availability notifications -+ */ -+static int setup_dpio(struct dpaa2_eth_priv *priv) -+{ -+ struct dpaa2_io_notification_ctx *nctx; -+ struct dpaa2_eth_channel *channel; -+ struct dpcon_notification_cfg dpcon_notif_cfg; -+ struct device *dev = priv->net_dev->dev.parent; -+ int i, err; -+ -+ /* Don't allocate more channels than strictly necessary and assign -+ * them to cores starting from the first one available in -+ * cpu_online_mask. -+ * If the number of channels is lower than the number of cores, -+ * there will be no rx/tx conf processing on the last cores in the mask. -+ */ -+ cpumask_clear(&priv->dpio_cpumask); -+ for_each_online_cpu(i) { -+ /* Try to allocate a channel */ -+ channel = alloc_channel(priv); -+ if (!channel) -+ goto err_alloc_ch; -+ -+ priv->channel[priv->num_channels] = channel; -+ -+ nctx = &channel->nctx; -+ nctx->is_cdan = 1; -+ nctx->cb = cdan_cb; -+ nctx->id = channel->ch_id; -+ nctx->desired_cpu = i; -+ -+ /* Register the new context */ -+ err = dpaa2_io_service_register(NULL, nctx); -+ if (err) { -+ dev_info(dev, "No affine DPIO for core %d\n", i); -+ /* This core doesn't have an affine DPIO, but there's -+ * a chance another one does, so keep trying -+ */ -+ free_channel(priv, channel); -+ continue; -+ } -+ -+ /* Register DPCON notification with MC */ -+ dpcon_notif_cfg.dpio_id = nctx->dpio_id; -+ dpcon_notif_cfg.priority = 0; -+ dpcon_notif_cfg.user_ctx = nctx->qman64; -+ err = dpcon_set_notification(priv->mc_io, 0, -+ channel->dpcon->mc_handle, -+ &dpcon_notif_cfg); -+ if (err) { -+ dev_err(dev, "dpcon_set_notification failed()\n"); -+ goto err_set_cdan; -+ } -+ -+ /* If we managed to allocate a channel and also found an affine -+ * DPIO for this core, add it to the final mask -+ */ -+ cpumask_set_cpu(i, &priv->dpio_cpumask); -+ priv->num_channels++; -+ -+ if (priv->num_channels == dpaa2_eth_max_channels(priv)) -+ break; -+ } -+ -+ /* Tx confirmation queues can only be serviced by cpus -+ * with an affine DPIO/channel -+ */ -+ cpumask_copy(&priv->txconf_cpumask, &priv->dpio_cpumask); -+ -+ return 0; -+ -+err_set_cdan: -+ dpaa2_io_service_deregister(NULL, nctx); -+ free_channel(priv, channel); -+err_alloc_ch: -+ if (cpumask_empty(&priv->dpio_cpumask)) { -+ dev_err(dev, "No cpu with an affine DPIO/DPCON\n"); -+ return -ENODEV; -+ } -+ cpumask_copy(&priv->txconf_cpumask, &priv->dpio_cpumask); -+ -+ return 0; -+} -+ -+static void free_dpio(struct dpaa2_eth_priv *priv) -+{ -+ int i; -+ struct dpaa2_eth_channel *ch; -+ -+ /* deregister CDAN notifications and free channels */ -+ for (i = 0; i < priv->num_channels; i++) { -+ ch = priv->channel[i]; -+ dpaa2_io_service_deregister(NULL, &ch->nctx); -+ free_channel(priv, ch); -+ } -+} -+ -+static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv, -+ int cpu) -+{ -+ struct device *dev = priv->net_dev->dev.parent; -+ int i; -+ -+ for (i = 0; i < priv->num_channels; i++) -+ if (priv->channel[i]->nctx.desired_cpu == cpu) -+ return priv->channel[i]; -+ -+ /* We should never get here. Issue a warning and return -+ * the first channel, because it's still better than nothing -+ */ -+ dev_warn(dev, "No affine channel found for cpu %d\n", cpu); -+ -+ return priv->channel[0]; -+} -+ -+static void set_fq_affinity(struct dpaa2_eth_priv *priv) -+{ -+ struct device *dev = priv->net_dev->dev.parent; -+ struct dpaa2_eth_fq *fq; -+ int rx_cpu, txc_cpu; -+ int i; -+ -+ /* For each FQ, pick one channel/CPU to deliver frames to. -+ * This may well change at runtime, either through irqbalance or -+ * through direct user intervention. -+ */ -+ rx_cpu = cpumask_first(&priv->dpio_cpumask); -+ txc_cpu = cpumask_first(&priv->txconf_cpumask); -+ -+ for (i = 0; i < priv->num_fqs; i++) { -+ fq = &priv->fq[i]; -+ switch (fq->type) { -+ case DPAA2_RX_FQ: -+ case DPAA2_RX_ERR_FQ: -+ fq->target_cpu = rx_cpu; -+ rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask); -+ if (rx_cpu >= nr_cpu_ids) -+ rx_cpu = cpumask_first(&priv->dpio_cpumask); -+ break; -+ case DPAA2_TX_CONF_FQ: -+ fq->target_cpu = txc_cpu; -+ txc_cpu = cpumask_next(txc_cpu, &priv->txconf_cpumask); -+ if (txc_cpu >= nr_cpu_ids) -+ txc_cpu = cpumask_first(&priv->txconf_cpumask); -+ break; -+ default: -+ dev_err(dev, "Unknown FQ type: %d\n", fq->type); -+ } -+ fq->channel = get_affine_channel(priv, fq->target_cpu); -+ } -+} -+ -+static void setup_fqs(struct dpaa2_eth_priv *priv) -+{ -+ int i; -+ -+ /* We have one TxConf FQ per Tx flow */ -+ for (i = 0; i < priv->dpni_attrs.max_senders; i++) { -+ priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ; -+ priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf; -+ priv->fq[priv->num_fqs++].flowid = DPNI_NEW_FLOW_ID; -+ } -+ -+ /* The number of Rx queues (Rx distribution width) may be different from -+ * the number of cores. -+ * We only support one traffic class for now. -+ */ -+ for (i = 0; i < dpaa2_eth_queue_count(priv); i++) { -+ priv->fq[priv->num_fqs].type = DPAA2_RX_FQ; -+ priv->fq[priv->num_fqs].consume = dpaa2_eth_rx; -+ priv->fq[priv->num_fqs++].flowid = (u16)i; -+ } -+ -+#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE -+ /* We have exactly one Rx error queue per DPNI */ -+ priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ; -+ priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err; -+#endif -+ -+ /* For each FQ, decide on which core to process incoming frames */ -+ set_fq_affinity(priv); -+} -+ -+/* Allocate and configure one buffer pool for each interface */ -+static int setup_dpbp(struct dpaa2_eth_priv *priv) -+{ -+ int err; -+ struct fsl_mc_device *dpbp_dev; -+ struct device *dev = priv->net_dev->dev.parent; -+ -+ err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP, -+ &dpbp_dev); -+ if (err) { -+ dev_err(dev, "DPBP device allocation failed\n"); -+ return err; -+ } -+ -+ priv->dpbp_dev = dpbp_dev; -+ -+ err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id, -+ &dpbp_dev->mc_handle); -+ if (err) { -+ dev_err(dev, "dpbp_open() failed\n"); -+ goto err_open; -+ } -+ -+ err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle); -+ if (err) { -+ dev_err(dev, "dpbp_reset() failed\n"); -+ goto err_reset; -+ } -+ -+ err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle); -+ if (err) { -+ dev_err(dev, "dpbp_enable() failed\n"); -+ goto err_enable; -+ } -+ -+ err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle, -+ &priv->dpbp_attrs); -+ if (err) { -+ dev_err(dev, "dpbp_get_attributes() failed\n"); -+ goto err_get_attr; -+ } -+ -+ return 0; -+ -+err_get_attr: -+ dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle); -+err_enable: -+err_reset: -+ dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle); -+err_open: -+ fsl_mc_object_free(dpbp_dev); -+ -+ return err; -+} -+ -+static void free_dpbp(struct dpaa2_eth_priv *priv) -+{ -+ drain_pool(priv); -+ dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle); -+ dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle); -+ fsl_mc_object_free(priv->dpbp_dev); -+} -+ -+/* Configure the DPNI object this interface is associated with */ -+static int setup_dpni(struct fsl_mc_device *ls_dev) -+{ -+ struct device *dev = &ls_dev->dev; -+ struct dpaa2_eth_priv *priv; -+ struct net_device *net_dev; -+ void *dma_mem; -+ int err; -+ -+ net_dev = dev_get_drvdata(dev); -+ priv = netdev_priv(net_dev); -+ -+ priv->dpni_id = ls_dev->obj_desc.id; -+ -+ /* get a handle for the DPNI object */ -+ err = dpni_open(priv->mc_io, 0, priv->dpni_id, &priv->mc_token); -+ if (err) { -+ dev_err(dev, "dpni_open() failed\n"); -+ goto err_open; -+ } -+ -+ ls_dev->mc_io = priv->mc_io; -+ ls_dev->mc_handle = priv->mc_token; -+ -+ err = dpni_reset(priv->mc_io, 0, priv->mc_token); -+ if (err) { -+ dev_err(dev, "dpni_reset() failed\n"); -+ goto err_reset; -+ } -+ -+ /* Map a memory region which will be used by MC to pass us an -+ * attribute structure -+ */ -+ dma_mem = kzalloc(DPAA2_EXT_CFG_SIZE, GFP_DMA | GFP_KERNEL); -+ if (!dma_mem) -+ goto err_alloc; -+ -+ priv->dpni_attrs.ext_cfg_iova = dma_map_single(dev, dma_mem, -+ DPAA2_EXT_CFG_SIZE, -+ DMA_FROM_DEVICE); -+ if (dma_mapping_error(dev, priv->dpni_attrs.ext_cfg_iova)) { -+ dev_err(dev, "dma mapping for dpni_ext_cfg failed\n"); -+ goto err_dma_map; -+ } -+ -+ err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token, -+ &priv->dpni_attrs); -+ -+ /* We'll check the return code after unmapping, as we need to -+ * do this anyway -+ */ -+ dma_unmap_single(dev, priv->dpni_attrs.ext_cfg_iova, -+ DPAA2_EXT_CFG_SIZE, DMA_FROM_DEVICE); -+ -+ if (err) { -+ dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err); -+ goto err_get_attr; -+ } -+ -+ memset(&priv->dpni_ext_cfg, 0, sizeof(priv->dpni_ext_cfg)); -+ err = dpni_extract_extended_cfg(&priv->dpni_ext_cfg, dma_mem); -+ if (err) { -+ dev_err(dev, "dpni_extract_extended_cfg() failed\n"); -+ goto err_extract; -+ } -+ -+ /* Configure our buffers' layout */ -+ priv->buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT | -+ DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | -+ DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE | -+ DPNI_BUF_LAYOUT_OPT_DATA_ALIGN; -+ priv->buf_layout.pass_parser_result = true; -+ priv->buf_layout.pass_frame_status = true; -+ priv->buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE; -+ /* HW erratum mandates data alignment in multiples of 256 */ -+ priv->buf_layout.data_align = DPAA2_ETH_RX_BUF_ALIGN; -+ -+ /* rx buffer */ -+ err = dpni_set_rx_buffer_layout(priv->mc_io, 0, priv->mc_token, -+ &priv->buf_layout); -+ if (err) { -+ dev_err(dev, "dpni_set_rx_buffer_layout() failed"); -+ goto err_buf_layout; -+ } -+ /* tx buffer: remove Rx-only options */ -+ priv->buf_layout.options &= ~(DPNI_BUF_LAYOUT_OPT_DATA_ALIGN | -+ DPNI_BUF_LAYOUT_OPT_PARSER_RESULT); -+ err = dpni_set_tx_buffer_layout(priv->mc_io, 0, priv->mc_token, -+ &priv->buf_layout); -+ if (err) { -+ dev_err(dev, "dpni_set_tx_buffer_layout() failed"); -+ goto err_buf_layout; -+ } -+ /* tx-confirm: same options as tx */ -+ priv->buf_layout.options &= ~DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE; -+ priv->buf_layout.options |= DPNI_BUF_LAYOUT_OPT_TIMESTAMP; -+ priv->buf_layout.pass_timestamp = 1; -+ err = dpni_set_tx_conf_buffer_layout(priv->mc_io, 0, priv->mc_token, -+ &priv->buf_layout); -+ if (err) { -+ dev_err(dev, "dpni_set_tx_conf_buffer_layout() failed"); -+ goto err_buf_layout; -+ } -+ /* Now that we've set our tx buffer layout, retrieve the minimum -+ * required tx data offset. -+ */ -+ err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token, -+ &priv->tx_data_offset); -+ if (err) { -+ dev_err(dev, "dpni_get_tx_data_offset() failed\n"); -+ goto err_data_offset; -+ } -+ -+ if ((priv->tx_data_offset % 64) != 0) -+ dev_warn(dev, "Tx data offset (%d) not a multiple of 64B", -+ priv->tx_data_offset); -+ -+ /* Accommodate SWA space. */ -+ priv->tx_data_offset += DPAA2_ETH_SWA_SIZE; -+ -+ /* allocate classification rule space */ -+ priv->cls_rule = kzalloc(sizeof(*priv->cls_rule) * -+ DPAA2_CLASSIFIER_ENTRY_COUNT, GFP_KERNEL); -+ if (!priv->cls_rule) -+ goto err_cls_rule; -+ -+ kfree(dma_mem); -+ -+ return 0; -+ -+err_cls_rule: -+err_data_offset: -+err_buf_layout: -+err_extract: -+err_get_attr: -+err_dma_map: -+ kfree(dma_mem); -+err_alloc: -+err_reset: -+ dpni_close(priv->mc_io, 0, priv->mc_token); -+err_open: -+ return err; -+} -+ -+static void free_dpni(struct dpaa2_eth_priv *priv) -+{ -+ int err; -+ -+ err = dpni_reset(priv->mc_io, 0, priv->mc_token); -+ if (err) -+ netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n", -+ err); -+ -+ dpni_close(priv->mc_io, 0, priv->mc_token); -+} -+ -+static int setup_rx_flow(struct dpaa2_eth_priv *priv, -+ struct dpaa2_eth_fq *fq) -+{ -+ struct device *dev = priv->net_dev->dev.parent; -+ struct dpni_queue_attr rx_queue_attr; -+ struct dpni_queue_cfg queue_cfg; -+ int err; -+ -+ memset(&queue_cfg, 0, sizeof(queue_cfg)); -+ queue_cfg.options = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST | -+ DPNI_QUEUE_OPT_TAILDROP_THRESHOLD; -+ queue_cfg.dest_cfg.dest_type = DPNI_DEST_DPCON; -+ queue_cfg.dest_cfg.priority = 1; -+ queue_cfg.user_ctx = (u64)fq; -+ queue_cfg.dest_cfg.dest_id = fq->channel->dpcon_id; -+ queue_cfg.tail_drop_threshold = DPAA2_ETH_TAILDROP_THRESH; -+ err = dpni_set_rx_flow(priv->mc_io, 0, priv->mc_token, 0, fq->flowid, -+ &queue_cfg); -+ if (err) { -+ dev_err(dev, "dpni_set_rx_flow() failed\n"); -+ return err; -+ } -+ -+ /* Get the actual FQID that was assigned by MC */ -+ err = dpni_get_rx_flow(priv->mc_io, 0, priv->mc_token, 0, fq->flowid, -+ &rx_queue_attr); -+ if (err) { -+ dev_err(dev, "dpni_get_rx_flow() failed\n"); -+ return err; -+ } -+ fq->fqid = rx_queue_attr.fqid; -+ -+ return 0; -+} -+ -+static int setup_tx_flow(struct dpaa2_eth_priv *priv, -+ struct dpaa2_eth_fq *fq) -+{ -+ struct device *dev = priv->net_dev->dev.parent; -+ struct dpni_tx_flow_cfg tx_flow_cfg; -+ struct dpni_tx_conf_cfg tx_conf_cfg; -+ struct dpni_tx_conf_attr tx_conf_attr; -+ int err; -+ -+ memset(&tx_flow_cfg, 0, sizeof(tx_flow_cfg)); -+ tx_flow_cfg.options = DPNI_TX_FLOW_OPT_TX_CONF_ERROR; -+ tx_flow_cfg.use_common_tx_conf_queue = 0; -+ err = dpni_set_tx_flow(priv->mc_io, 0, priv->mc_token, -+ &fq->flowid, &tx_flow_cfg); -+ if (err) { -+ dev_err(dev, "dpni_set_tx_flow() failed\n"); -+ return err; -+ } -+ -+ tx_conf_cfg.errors_only = 0; -+ tx_conf_cfg.queue_cfg.options = DPNI_QUEUE_OPT_USER_CTX | -+ DPNI_QUEUE_OPT_DEST; -+ tx_conf_cfg.queue_cfg.user_ctx = (u64)fq; -+ tx_conf_cfg.queue_cfg.dest_cfg.dest_type = DPNI_DEST_DPCON; -+ tx_conf_cfg.queue_cfg.dest_cfg.dest_id = fq->channel->dpcon_id; -+ tx_conf_cfg.queue_cfg.dest_cfg.priority = 0; -+ -+ err = dpni_set_tx_conf(priv->mc_io, 0, priv->mc_token, fq->flowid, -+ &tx_conf_cfg); -+ if (err) { -+ dev_err(dev, "dpni_set_tx_conf() failed\n"); -+ return err; -+ } -+ -+ err = dpni_get_tx_conf(priv->mc_io, 0, priv->mc_token, fq->flowid, -+ &tx_conf_attr); -+ if (err) { -+ dev_err(dev, "dpni_get_tx_conf() failed\n"); -+ return err; -+ } -+ -+ fq->fqid = tx_conf_attr.queue_attr.fqid; -+ -+ return 0; -+} -+ -+#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE -+static int setup_rx_err_flow(struct dpaa2_eth_priv *priv, -+ struct dpaa2_eth_fq *fq) -+{ -+ struct dpni_queue_attr queue_attr; -+ struct dpni_queue_cfg queue_cfg; -+ int err; -+ -+ /* Configure the Rx error queue to generate CDANs, -+ * just like the Rx queues -+ */ -+ queue_cfg.options = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST; -+ queue_cfg.dest_cfg.dest_type = DPNI_DEST_DPCON; -+ queue_cfg.dest_cfg.priority = 1; -+ queue_cfg.user_ctx = (u64)fq; -+ queue_cfg.dest_cfg.dest_id = fq->channel->dpcon_id; -+ err = dpni_set_rx_err_queue(priv->mc_io, 0, priv->mc_token, &queue_cfg); -+ if (err) { -+ netdev_err(priv->net_dev, "dpni_set_rx_err_queue() failed\n"); -+ return err; -+ } -+ -+ /* Get the FQID */ -+ err = dpni_get_rx_err_queue(priv->mc_io, 0, priv->mc_token, -+ &queue_attr); -+ if (err) { -+ netdev_err(priv->net_dev, "dpni_get_rx_err_queue() failed\n"); -+ return err; -+ } -+ fq->fqid = queue_attr.fqid; -+ -+ return 0; -+} -+#endif -+ -+/* default hash key fields */ -+static struct dpaa2_eth_hash_fields default_hash_fields[] = { -+ { -+ /* L2 header */ -+ .rxnfc_field = RXH_L2DA, -+ .cls_prot = NET_PROT_ETH, -+ .cls_field = NH_FLD_ETH_DA, -+ .size = 6, -+ }, { -+ .cls_prot = NET_PROT_ETH, -+ .cls_field = NH_FLD_ETH_SA, -+ .size = 6, -+ }, { -+ /* This is the last ethertype field parsed: -+ * depending on frame format, it can be the MAC ethertype -+ * or the VLAN etype. -+ */ -+ .cls_prot = NET_PROT_ETH, -+ .cls_field = NH_FLD_ETH_TYPE, -+ .size = 2, -+ }, { -+ /* VLAN header */ -+ .rxnfc_field = RXH_VLAN, -+ .cls_prot = NET_PROT_VLAN, -+ .cls_field = NH_FLD_VLAN_TCI, -+ .size = 2, -+ }, { -+ /* IP header */ -+ .rxnfc_field = RXH_IP_SRC, -+ .cls_prot = NET_PROT_IP, -+ .cls_field = NH_FLD_IP_SRC, -+ .size = 4, -+ }, { -+ .rxnfc_field = RXH_IP_DST, -+ .cls_prot = NET_PROT_IP, -+ .cls_field = NH_FLD_IP_DST, -+ .size = 4, -+ }, { -+ .rxnfc_field = RXH_L3_PROTO, -+ .cls_prot = NET_PROT_IP, -+ .cls_field = NH_FLD_IP_PROTO, -+ .size = 1, -+ }, { -+ /* Using UDP ports, this is functionally equivalent to raw -+ * byte pairs from L4 header. -+ */ -+ .rxnfc_field = RXH_L4_B_0_1, -+ .cls_prot = NET_PROT_UDP, -+ .cls_field = NH_FLD_UDP_PORT_SRC, -+ .size = 2, -+ }, { -+ .rxnfc_field = RXH_L4_B_2_3, -+ .cls_prot = NET_PROT_UDP, -+ .cls_field = NH_FLD_UDP_PORT_DST, -+ .size = 2, -+ }, -+}; -+ -+/* Set RX hash options */ -+int set_hash(struct dpaa2_eth_priv *priv) -+{ -+ struct device *dev = priv->net_dev->dev.parent; -+ struct dpkg_profile_cfg cls_cfg; -+ struct dpni_rx_tc_dist_cfg dist_cfg; -+ u8 *dma_mem; -+ int i; -+ int err = 0; -+ -+ memset(&cls_cfg, 0, sizeof(cls_cfg)); -+ -+ for (i = 0; i < priv->num_hash_fields; i++) { -+ struct dpkg_extract *key = -+ &cls_cfg.extracts[cls_cfg.num_extracts]; -+ -+ key->type = DPKG_EXTRACT_FROM_HDR; -+ key->extract.from_hdr.prot = priv->hash_fields[i].cls_prot; -+ key->extract.from_hdr.type = DPKG_FULL_FIELD; -+ key->extract.from_hdr.field = priv->hash_fields[i].cls_field; -+ cls_cfg.num_extracts++; -+ -+ priv->rx_flow_hash |= priv->hash_fields[i].rxnfc_field; -+ } -+ -+ dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_DMA | GFP_KERNEL); -+ if (!dma_mem) -+ return -ENOMEM; -+ -+ err = dpni_prepare_key_cfg(&cls_cfg, dma_mem); -+ if (err) { -+ dev_err(dev, "dpni_prepare_key_cfg error %d", err); -+ return err; -+ } -+ -+ memset(&dist_cfg, 0, sizeof(dist_cfg)); -+ -+ /* Prepare for setting the rx dist */ -+ dist_cfg.key_cfg_iova = dma_map_single(dev, dma_mem, -+ DPAA2_CLASSIFIER_DMA_SIZE, -+ DMA_TO_DEVICE); -+ if (dma_mapping_error(dev, dist_cfg.key_cfg_iova)) { -+ dev_err(dev, "DMA mapping failed\n"); -+ kfree(dma_mem); -+ return -ENOMEM; -+ } -+ -+ dist_cfg.dist_size = dpaa2_eth_queue_count(priv); -+ if (dpaa2_eth_fs_enabled(priv)) { -+ dist_cfg.dist_mode = DPNI_DIST_MODE_FS; -+ dist_cfg.fs_cfg.miss_action = DPNI_FS_MISS_HASH; -+ } else { -+ dist_cfg.dist_mode = DPNI_DIST_MODE_HASH; -+ } -+ -+ err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg); -+ dma_unmap_single(dev, dist_cfg.key_cfg_iova, -+ DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE); -+ kfree(dma_mem); -+ if (err) { -+ dev_err(dev, "dpni_set_rx_tc_dist() error %d\n", err); -+ return err; -+ } -+ -+ return 0; -+} -+ -+/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs, -+ * frame queues and channels -+ */ -+static int bind_dpni(struct dpaa2_eth_priv *priv) -+{ -+ struct net_device *net_dev = priv->net_dev; -+ struct device *dev = net_dev->dev.parent; -+ struct dpni_pools_cfg pools_params; -+ struct dpni_error_cfg err_cfg; -+ int err = 0; -+ int i; -+ -+ pools_params.num_dpbp = 1; -+ pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id; -+ pools_params.pools[0].backup_pool = 0; -+ pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE; -+ err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params); -+ if (err) { -+ dev_err(dev, "dpni_set_pools() failed\n"); -+ return err; -+ } -+ -+ /* Verify classification options and disable hashing and/or -+ * flow steering support in case of invalid configuration values -+ */ -+ check_cls_support(priv); -+ -+ /* have the interface implicitly distribute traffic based on -+ * a static hash key -+ */ -+ if (dpaa2_eth_hash_enabled(priv)) { -+ priv->hash_fields = default_hash_fields; -+ priv->num_hash_fields = ARRAY_SIZE(default_hash_fields); -+ err = set_hash(priv); -+ if (err) { -+ dev_err(dev, "Hashing configuration failed\n"); -+ return err; -+ } -+ } -+ -+ /* Configure handling of error frames */ -+ err_cfg.errors = DPAA2_ETH_RX_ERR_MASK; -+ err_cfg.set_frame_annotation = 1; -+#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE -+ err_cfg.error_action = DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE; -+#else -+ err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD; -+#endif -+ err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token, -+ &err_cfg); -+ if (err) { -+ dev_err(dev, "dpni_set_errors_behavior failed\n"); -+ return err; -+ } -+ -+ /* Configure Rx and Tx conf queues to generate CDANs */ -+ for (i = 0; i < priv->num_fqs; i++) { -+ switch (priv->fq[i].type) { -+ case DPAA2_RX_FQ: -+ err = setup_rx_flow(priv, &priv->fq[i]); -+ break; -+ case DPAA2_TX_CONF_FQ: -+ err = setup_tx_flow(priv, &priv->fq[i]); -+ break; -+#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE -+ case DPAA2_RX_ERR_FQ: -+ err = setup_rx_err_flow(priv, &priv->fq[i]); -+ break; -+#endif -+ default: -+ dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type); -+ return -EINVAL; -+ } -+ if (err) -+ return err; -+ } -+ -+ err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token, &priv->tx_qdid); -+ if (err) { -+ dev_err(dev, "dpni_get_qdid() failed\n"); -+ return err; -+ } -+ -+ return 0; -+} -+ -+/* Allocate rings for storing incoming frame descriptors */ -+static int alloc_rings(struct dpaa2_eth_priv *priv) -+{ -+ struct net_device *net_dev = priv->net_dev; -+ struct device *dev = net_dev->dev.parent; -+ int i; -+ -+ for (i = 0; i < priv->num_channels; i++) { -+ priv->channel[i]->store = -+ dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev); -+ if (!priv->channel[i]->store) { -+ netdev_err(net_dev, "dpaa2_io_store_create() failed\n"); -+ goto err_ring; -+ } -+ } -+ -+ return 0; -+ -+err_ring: -+ for (i = 0; i < priv->num_channels; i++) { -+ if (!priv->channel[i]->store) -+ break; -+ dpaa2_io_store_destroy(priv->channel[i]->store); -+ } -+ -+ return -ENOMEM; -+} -+ -+static void free_rings(struct dpaa2_eth_priv *priv) -+{ -+ int i; -+ -+ for (i = 0; i < priv->num_channels; i++) -+ dpaa2_io_store_destroy(priv->channel[i]->store); -+} -+ -+static int netdev_init(struct net_device *net_dev) -+{ -+ int err; -+ struct device *dev = net_dev->dev.parent; -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ u8 mac_addr[ETH_ALEN]; -+ u8 bcast_addr[ETH_ALEN]; -+ -+ net_dev->netdev_ops = &dpaa2_eth_ops; -+ -+ /* If the DPNI attributes contain an all-0 mac_addr, -+ * set a random hardware address -+ */ -+ err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token, -+ mac_addr); -+ if (err) { -+ dev_err(dev, "dpni_get_primary_mac_addr() failed (%d)", err); -+ return err; -+ } -+ if (is_zero_ether_addr(mac_addr)) { -+ /* Fills in net_dev->dev_addr, as required by -+ * register_netdevice() -+ */ -+ eth_hw_addr_random(net_dev); -+ /* Make the user aware, without cluttering the boot log */ -+ pr_info_once(KBUILD_MODNAME " device(s) have all-zero hwaddr, replaced with random"); -+ err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token, -+ net_dev->dev_addr); -+ if (err) { -+ dev_err(dev, "dpni_set_primary_mac_addr(): %d\n", err); -+ return err; -+ } -+ /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all -+ * practical purposes, this will be our "permanent" mac address, -+ * at least until the next reboot. This move will also permit -+ * register_netdevice() to properly fill up net_dev->perm_addr. -+ */ -+ net_dev->addr_assign_type = NET_ADDR_PERM; -+ } else { -+ /* NET_ADDR_PERM is default, all we have to do is -+ * fill in the device addr. -+ */ -+ memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); -+ } -+ -+ /* Explicitly add the broadcast address to the MAC filtering table; -+ * the MC won't do that for us. -+ */ -+ eth_broadcast_addr(bcast_addr); -+ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr); -+ if (err) { -+ dev_warn(dev, "dpni_add_mac_addr() failed (%d)\n", err); -+ /* Won't return an error; at least, we'd have egress traffic */ -+ } -+ -+ /* Reserve enough space to align buffer as per hardware requirement; -+ * NOTE: priv->tx_data_offset MUST be initialized at this point. -+ */ -+ net_dev->needed_headroom = DPAA2_ETH_NEEDED_HEADROOM(priv); -+ -+ /* Our .ndo_init will be called herein */ -+ err = register_netdev(net_dev); -+ if (err < 0) { -+ dev_err(dev, "register_netdev() = %d\n", err); -+ return err; -+ } -+ -+ return 0; -+} -+ -+static int poll_link_state(void *arg) -+{ -+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg; -+ int err; -+ -+ while (!kthread_should_stop()) { -+ err = link_state_update(priv); -+ if (unlikely(err)) -+ return err; -+ -+ msleep(DPAA2_ETH_LINK_STATE_REFRESH); -+ } -+ -+ return 0; -+} -+ -+static irqreturn_t dpni_irq0_handler(int irq_num, void *arg) -+{ -+ return IRQ_WAKE_THREAD; -+} -+ -+static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg) -+{ -+ u8 irq_index = DPNI_IRQ_INDEX; -+ u32 status, clear = 0; -+ struct device *dev = (struct device *)arg; -+ struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev); -+ struct net_device *net_dev = dev_get_drvdata(dev); -+ int err; -+ -+ err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle, -+ irq_index, &status); -+ if (unlikely(err)) { -+ netdev_err(net_dev, "Can't get irq status (err %d)", err); -+ clear = 0xffffffff; -+ goto out; -+ } -+ -+ if (status & DPNI_IRQ_EVENT_LINK_CHANGED) { -+ clear |= DPNI_IRQ_EVENT_LINK_CHANGED; -+ link_state_update(netdev_priv(net_dev)); -+ } -+ -+out: -+ dpni_clear_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle, -+ irq_index, clear); -+ return IRQ_HANDLED; -+} -+ -+static int setup_irqs(struct fsl_mc_device *ls_dev) -+{ -+ int err = 0; -+ struct fsl_mc_device_irq *irq; -+ u8 irq_index = DPNI_IRQ_INDEX; -+ u32 mask = DPNI_IRQ_EVENT_LINK_CHANGED; -+ -+ err = fsl_mc_allocate_irqs(ls_dev); -+ if (err) { -+ dev_err(&ls_dev->dev, "MC irqs allocation failed\n"); -+ return err; -+ } -+ -+ irq = ls_dev->irqs[0]; -+ err = devm_request_threaded_irq(&ls_dev->dev, irq->irq_number, -+ dpni_irq0_handler, -+ dpni_irq0_handler_thread, -+ IRQF_NO_SUSPEND | IRQF_ONESHOT, -+ dev_name(&ls_dev->dev), &ls_dev->dev); -+ if (err < 0) { -+ dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d", err); -+ goto free_mc_irq; -+ } -+ -+ err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle, -+ irq_index, mask); -+ if (err < 0) { -+ dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d", err); -+ goto free_irq; -+ } -+ -+ err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle, -+ irq_index, 1); -+ if (err < 0) { -+ dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d", err); -+ goto free_irq; -+ } -+ -+ return 0; -+ -+free_irq: -+ devm_free_irq(&ls_dev->dev, irq->irq_number, &ls_dev->dev); -+free_mc_irq: -+ fsl_mc_free_irqs(ls_dev); -+ -+ return err; -+} -+ -+static void add_ch_napi(struct dpaa2_eth_priv *priv) -+{ -+ int i; -+ struct dpaa2_eth_channel *ch; -+ -+ for (i = 0; i < priv->num_channels; i++) { -+ ch = priv->channel[i]; -+ /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */ -+ netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll, -+ NAPI_POLL_WEIGHT); -+ } -+} -+ -+static void del_ch_napi(struct dpaa2_eth_priv *priv) -+{ -+ int i; -+ struct dpaa2_eth_channel *ch; -+ -+ for (i = 0; i < priv->num_channels; i++) { -+ ch = priv->channel[i]; -+ netif_napi_del(&ch->napi); -+ } -+} -+ -+/* SysFS support */ -+static ssize_t dpaa2_eth_show_tx_shaping(struct device *dev, -+ struct device_attribute *attr, -+ char *buf) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev)); -+ /* No MC API for getting the shaping config. We're stateful. */ -+ struct dpni_tx_shaping_cfg *scfg = &priv->shaping_cfg; -+ -+ return sprintf(buf, "%u %hu\n", scfg->rate_limit, scfg->max_burst_size); -+} -+ -+static ssize_t dpaa2_eth_write_tx_shaping(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, -+ size_t count) -+{ -+ int err, items; -+ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev)); -+ struct dpni_tx_shaping_cfg scfg; -+ -+ items = sscanf(buf, "%u %hu", &scfg.rate_limit, &scfg.max_burst_size); -+ if (items != 2) { -+ pr_err("Expected format: \"rate_limit(Mbps) max_burst_size(bytes)\"\n"); -+ return -EINVAL; -+ } -+ /* Size restriction as per MC API documentation */ -+ if (scfg.max_burst_size > 64000) { -+ pr_err("max_burst_size must be <= 64000, thanks.\n"); -+ return -EINVAL; -+ } -+ -+ err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &scfg); -+ if (err) { -+ dev_err(dev, "dpni_set_tx_shaping() failed\n"); -+ return -EPERM; -+ } -+ /* If successful, save the current configuration for future inquiries */ -+ priv->shaping_cfg = scfg; -+ -+ return count; -+} -+ -+static ssize_t dpaa2_eth_show_txconf_cpumask(struct device *dev, -+ struct device_attribute *attr, -+ char *buf) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev)); -+ -+ return cpumask_scnprintf(buf, PAGE_SIZE, &priv->txconf_cpumask); -+} -+ -+static ssize_t dpaa2_eth_write_txconf_cpumask(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, -+ size_t count) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev)); -+ struct dpaa2_eth_fq *fq; -+ bool running = netif_running(priv->net_dev); -+ int i, err; -+ -+ err = cpulist_parse(buf, &priv->txconf_cpumask); -+ if (err) -+ return err; -+ -+ /* Only accept CPUs that have an affine DPIO */ -+ if (!cpumask_subset(&priv->txconf_cpumask, &priv->dpio_cpumask)) { -+ netdev_info(priv->net_dev, -+ "cpumask must be a subset of 0x%lx\n", -+ *cpumask_bits(&priv->dpio_cpumask)); -+ cpumask_and(&priv->txconf_cpumask, &priv->dpio_cpumask, -+ &priv->txconf_cpumask); -+ } -+ -+ /* Rewiring the TxConf FQs requires interface shutdown. -+ */ -+ if (running) { -+ err = dpaa2_eth_stop(priv->net_dev); -+ if (err) -+ return -ENODEV; -+ } -+ -+ /* Set the new TxConf FQ affinities */ -+ set_fq_affinity(priv); -+ -+ /* dpaa2_eth_open() below will *stop* the Tx queues until an explicit -+ * link up notification is received. Give the polling thread enough time -+ * to detect the link state change, or else we'll end up with the -+ * transmission side forever shut down. -+ */ -+ if (priv->do_link_poll) -+ msleep(2 * DPAA2_ETH_LINK_STATE_REFRESH); -+ -+ for (i = 0; i < priv->num_fqs; i++) { -+ fq = &priv->fq[i]; -+ if (fq->type != DPAA2_TX_CONF_FQ) -+ continue; -+ setup_tx_flow(priv, fq); -+ } -+ -+ if (running) { -+ err = dpaa2_eth_open(priv->net_dev); -+ if (err) -+ return -ENODEV; -+ } -+ -+ return count; -+} -+ -+static struct device_attribute dpaa2_eth_attrs[] = { -+ __ATTR(txconf_cpumask, -+ S_IRUSR | S_IWUSR, -+ dpaa2_eth_show_txconf_cpumask, -+ dpaa2_eth_write_txconf_cpumask), -+ -+ __ATTR(tx_shaping, -+ S_IRUSR | S_IWUSR, -+ dpaa2_eth_show_tx_shaping, -+ dpaa2_eth_write_tx_shaping), -+}; -+ -+void dpaa2_eth_sysfs_init(struct device *dev) -+{ -+ int i, err; -+ -+ for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++) { -+ err = device_create_file(dev, &dpaa2_eth_attrs[i]); -+ if (err) { -+ dev_err(dev, "ERROR creating sysfs file\n"); -+ goto undo; -+ } -+ } -+ return; -+ -+undo: -+ while (i > 0) -+ device_remove_file(dev, &dpaa2_eth_attrs[--i]); -+} -+ -+void dpaa2_eth_sysfs_remove(struct device *dev) -+{ -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++) -+ device_remove_file(dev, &dpaa2_eth_attrs[i]); -+} -+ -+static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) -+{ -+ struct device *dev; -+ struct net_device *net_dev = NULL; -+ struct dpaa2_eth_priv *priv = NULL; -+ int err = 0; -+ -+ dev = &dpni_dev->dev; -+ -+ /* Net device */ -+ net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES); -+ if (!net_dev) { -+ dev_err(dev, "alloc_etherdev_mq() failed\n"); -+ return -ENOMEM; -+ } -+ -+ SET_NETDEV_DEV(net_dev, dev); -+ dev_set_drvdata(dev, net_dev); -+ -+ priv = netdev_priv(net_dev); -+ priv->net_dev = net_dev; -+ -+ /* Obtain a MC portal */ -+ err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, -+ &priv->mc_io); -+ if (err) { -+ dev_err(dev, "MC portal allocation failed\n"); -+ goto err_portal_alloc; -+ } -+ -+ /* MC objects initialization and configuration */ -+ err = setup_dpni(dpni_dev); -+ if (err) -+ goto err_dpni_setup; -+ -+ err = setup_dpio(priv); -+ if (err) -+ goto err_dpio_setup; -+ -+ setup_fqs(priv); -+ -+ err = setup_dpbp(priv); -+ if (err) -+ goto err_dpbp_setup; -+ -+ err = bind_dpni(priv); -+ if (err) -+ goto err_bind; -+ -+ /* Add a NAPI context for each channel */ -+ add_ch_napi(priv); -+ -+ /* Percpu statistics */ -+ priv->percpu_stats = alloc_percpu(*priv->percpu_stats); -+ if (!priv->percpu_stats) { -+ dev_err(dev, "alloc_percpu(percpu_stats) failed\n"); -+ err = -ENOMEM; -+ goto err_alloc_percpu_stats; -+ } -+ priv->percpu_extras = alloc_percpu(*priv->percpu_extras); -+ if (!priv->percpu_extras) { -+ dev_err(dev, "alloc_percpu(percpu_extras) failed\n"); -+ err = -ENOMEM; -+ goto err_alloc_percpu_extras; -+ } -+ -+ snprintf(net_dev->name, IFNAMSIZ, "ni%d", dpni_dev->obj_desc.id); -+ if (!dev_valid_name(net_dev->name)) { -+ dev_warn(&net_dev->dev, -+ "netdevice name \"%s\" cannot be used, reverting to default..\n", -+ net_dev->name); -+ dev_alloc_name(net_dev, "eth%d"); -+ dev_warn(&net_dev->dev, "using name \"%s\"\n", net_dev->name); -+ } -+ -+ err = netdev_init(net_dev); -+ if (err) -+ goto err_netdev_init; -+ -+ /* Configure checksum offload based on current interface flags */ -+ err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM)); -+ if (err) -+ goto err_csum; -+ -+ err = set_tx_csum(priv, !!(net_dev->features & -+ (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))); -+ if (err) -+ goto err_csum; -+ -+ err = alloc_rings(priv); -+ if (err) -+ goto err_alloc_rings; -+ -+ net_dev->ethtool_ops = &dpaa2_ethtool_ops; -+ -+ err = setup_irqs(dpni_dev); -+ if (err) { -+ netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n"); -+ priv->poll_thread = kthread_run(poll_link_state, priv, -+ "%s_poll_link", net_dev->name); -+ if (IS_ERR(priv->poll_thread)) { -+ netdev_err(net_dev, "Error starting polling thread\n"); -+ goto err_poll_thread; -+ } -+ priv->do_link_poll = true; -+ } -+ -+ dpaa2_eth_sysfs_init(&net_dev->dev); -+ dpaa2_dbg_add(priv); -+ -+ dev_info(dev, "Probed interface %s\n", net_dev->name); -+ return 0; -+ -+err_poll_thread: -+ free_rings(priv); -+err_alloc_rings: -+err_csum: -+ unregister_netdev(net_dev); -+err_netdev_init: -+ free_percpu(priv->percpu_extras); -+err_alloc_percpu_extras: -+ free_percpu(priv->percpu_stats); -+err_alloc_percpu_stats: -+ del_ch_napi(priv); -+err_bind: -+ free_dpbp(priv); -+err_dpbp_setup: -+ free_dpio(priv); -+err_dpio_setup: -+ kfree(priv->cls_rule); -+ dpni_close(priv->mc_io, 0, priv->mc_token); -+err_dpni_setup: -+ fsl_mc_portal_free(priv->mc_io); -+err_portal_alloc: -+ dev_set_drvdata(dev, NULL); -+ free_netdev(net_dev); -+ -+ return err; -+} -+ -+static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev) -+{ -+ struct device *dev; -+ struct net_device *net_dev; -+ struct dpaa2_eth_priv *priv; -+ -+ dev = &ls_dev->dev; -+ net_dev = dev_get_drvdata(dev); -+ priv = netdev_priv(net_dev); -+ -+ dpaa2_dbg_remove(priv); -+ dpaa2_eth_sysfs_remove(&net_dev->dev); -+ -+ unregister_netdev(net_dev); -+ dev_info(net_dev->dev.parent, "Removed interface %s\n", net_dev->name); -+ -+ free_dpio(priv); -+ free_rings(priv); -+ del_ch_napi(priv); -+ free_dpbp(priv); -+ free_dpni(priv); -+ -+ fsl_mc_portal_free(priv->mc_io); -+ -+ free_percpu(priv->percpu_stats); -+ free_percpu(priv->percpu_extras); -+ -+ if (priv->do_link_poll) -+ kthread_stop(priv->poll_thread); -+ else -+ fsl_mc_free_irqs(ls_dev); -+ -+ kfree(priv->cls_rule); -+ -+ dev_set_drvdata(dev, NULL); -+ free_netdev(net_dev); -+ -+ return 0; -+} -+ -+static const struct fsl_mc_device_match_id dpaa2_eth_match_id_table[] = { -+ { -+ .vendor = FSL_MC_VENDOR_FREESCALE, -+ .obj_type = "dpni", -+ .ver_major = DPNI_VER_MAJOR, -+ .ver_minor = DPNI_VER_MINOR -+ }, -+ { .vendor = 0x0 } -+}; -+ -+static struct fsl_mc_driver dpaa2_eth_driver = { -+ .driver = { -+ .name = KBUILD_MODNAME, -+ .owner = THIS_MODULE, -+ }, -+ .probe = dpaa2_eth_probe, -+ .remove = dpaa2_eth_remove, -+ .match_id_table = dpaa2_eth_match_id_table -+}; -+ -+static int __init dpaa2_eth_driver_init(void) -+{ -+ int err; -+ -+ dpaa2_eth_dbg_init(); -+ -+ err = fsl_mc_driver_register(&dpaa2_eth_driver); -+ if (err) { -+ dpaa2_eth_dbg_exit(); -+ return err; -+ } -+ -+ return 0; -+} -+ -+static void __exit dpaa2_eth_driver_exit(void) -+{ -+ fsl_mc_driver_unregister(&dpaa2_eth_driver); -+ dpaa2_eth_dbg_exit(); -+} -+ -+module_init(dpaa2_eth_driver_init); -+module_exit(dpaa2_eth_driver_exit); -diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h -new file mode 100644 -index 0000000..bdcdbd6 ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h -@@ -0,0 +1,397 @@ -+/* Copyright 2014-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#ifndef __DPAA2_ETH_H -+#define __DPAA2_ETH_H -+ -+#include -+#include -+#include "../../fsl-mc/include/fsl_dpaa2_io.h" -+#include "../../fsl-mc/include/fsl_dpaa2_fd.h" -+#include "../../fsl-mc/include/dpbp.h" -+#include "../../fsl-mc/include/dpbp-cmd.h" -+#include "../../fsl-mc/include/dpcon.h" -+#include "../../fsl-mc/include/dpcon-cmd.h" -+#include "dpni.h" -+#include "dpni-cmd.h" -+ -+#include "dpaa2-eth-trace.h" -+#include "dpaa2-eth-debugfs.h" -+ -+#define DPAA2_ETH_STORE_SIZE 16 -+ -+/* Maximum number of scatter-gather entries in an ingress frame, -+ * considering the maximum receive frame size is 64K -+ */ -+#define DPAA2_ETH_MAX_SG_ENTRIES ((64 * 1024) / DPAA2_ETH_RX_BUF_SIZE) -+ -+/* Maximum acceptable MTU value. It is in direct relation with the hardware -+ * enforced Max Frame Length (currently 10k). -+ */ -+#define DPAA2_ETH_MFL (10 * 1024) -+#define DPAA2_ETH_MAX_MTU (DPAA2_ETH_MFL - VLAN_ETH_HLEN) -+/* Convert L3 MTU to L2 MFL */ -+#define DPAA2_ETH_L2_MAX_FRM(mtu) (mtu + VLAN_ETH_HLEN) -+ -+/* Set the taildrop threshold (in bytes) to allow the enqueue of several jumbo -+ * frames in the Rx queues (length of the current frame is not -+ * taken into account when making the taildrop decision) -+ */ -+#define DPAA2_ETH_TAILDROP_THRESH (64 * 1024) -+ -+/* Buffer quota per queue. Must be large enough such that for minimum sized -+ * frames taildrop kicks in before the bpool gets depleted, so we compute -+ * how many 64B frames fit inside the taildrop threshold and add a margin -+ * to accommodate the buffer refill delay. -+ */ -+#define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_TAILDROP_THRESH / 64) -+#define DPAA2_ETH_NUM_BUFS (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256) -+#define DPAA2_ETH_REFILL_THRESH DPAA2_ETH_MAX_FRAMES_PER_QUEUE -+ -+/* Maximum number of buffers that can be acquired/released through a single -+ * QBMan command -+ */ -+#define DPAA2_ETH_BUFS_PER_CMD 7 -+ -+/* Hardware requires alignment for ingress/egress buffer addresses -+ * and ingress buffer lengths. -+ */ -+#define DPAA2_ETH_RX_BUF_SIZE 2048 -+#define DPAA2_ETH_TX_BUF_ALIGN 64 -+#define DPAA2_ETH_RX_BUF_ALIGN 256 -+#define DPAA2_ETH_NEEDED_HEADROOM(p_priv) \ -+ ((p_priv)->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN) -+ -+/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but we need to allocate ingress -+ * buffers large enough to allow building an skb around them and also account -+ * for alignment restrictions -+ */ -+#define DPAA2_ETH_BUF_RAW_SIZE \ -+ (DPAA2_ETH_RX_BUF_SIZE + \ -+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + \ -+ DPAA2_ETH_RX_BUF_ALIGN) -+ -+/* PTP nominal frequency 1GHz */ -+#define DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS 1 -+ -+/* We are accommodating a skb backpointer and some S/G info -+ * in the frame's software annotation. The hardware -+ * options are either 0 or 64, so we choose the latter. -+ */ -+#define DPAA2_ETH_SWA_SIZE 64 -+ -+/* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */ -+struct dpaa2_eth_swa { -+ struct sk_buff *skb; -+ struct scatterlist *scl; -+ int num_sg; -+ int num_dma_bufs; -+}; -+ -+/* Annotation valid bits in FD FRC */ -+#define DPAA2_FD_FRC_FASV 0x8000 -+#define DPAA2_FD_FRC_FAEADV 0x4000 -+#define DPAA2_FD_FRC_FAPRV 0x2000 -+#define DPAA2_FD_FRC_FAIADV 0x1000 -+#define DPAA2_FD_FRC_FASWOV 0x0800 -+#define DPAA2_FD_FRC_FAICFDV 0x0400 -+ -+/* Annotation bits in FD CTRL */ -+#define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128 */ -+#define DPAA2_FD_CTRL_PTA 0x00800000 -+#define DPAA2_FD_CTRL_PTV1 0x00400000 -+ -+/* Frame annotation status */ -+struct dpaa2_fas { -+ u8 reserved; -+ u8 ppid; -+ __le16 ifpid; -+ __le32 status; -+} __packed; -+ -+/* Frame annotation egress action descriptor */ -+#define DPAA2_FAEAD_OFFSET 0x58 -+ -+struct dpaa2_faead { -+ __le32 conf_fqid; -+ __le32 ctrl; -+}; -+ -+#define DPAA2_FAEAD_A2V 0x20000000 -+#define DPAA2_FAEAD_UPDV 0x00001000 -+#define DPAA2_FAEAD_UPD 0x00000010 -+ -+/* Error and status bits in the frame annotation status word */ -+/* Debug frame, otherwise supposed to be discarded */ -+#define DPAA2_FAS_DISC 0x80000000 -+/* MACSEC frame */ -+#define DPAA2_FAS_MS 0x40000000 -+#define DPAA2_FAS_PTP 0x08000000 -+/* Ethernet multicast frame */ -+#define DPAA2_FAS_MC 0x04000000 -+/* Ethernet broadcast frame */ -+#define DPAA2_FAS_BC 0x02000000 -+#define DPAA2_FAS_KSE 0x00040000 -+#define DPAA2_FAS_EOFHE 0x00020000 -+#define DPAA2_FAS_MNLE 0x00010000 -+#define DPAA2_FAS_TIDE 0x00008000 -+#define DPAA2_FAS_PIEE 0x00004000 -+/* Frame length error */ -+#define DPAA2_FAS_FLE 0x00002000 -+/* Frame physical error */ -+#define DPAA2_FAS_FPE 0x00001000 -+#define DPAA2_FAS_PTE 0x00000080 -+#define DPAA2_FAS_ISP 0x00000040 -+#define DPAA2_FAS_PHE 0x00000020 -+#define DPAA2_FAS_BLE 0x00000010 -+/* L3 csum validation performed */ -+#define DPAA2_FAS_L3CV 0x00000008 -+/* L3 csum error */ -+#define DPAA2_FAS_L3CE 0x00000004 -+/* L4 csum validation performed */ -+#define DPAA2_FAS_L4CV 0x00000002 -+/* L4 csum error */ -+#define DPAA2_FAS_L4CE 0x00000001 -+/* Possible errors on the ingress path */ -+#define DPAA2_ETH_RX_ERR_MASK (DPAA2_FAS_KSE | \ -+ DPAA2_FAS_EOFHE | \ -+ DPAA2_FAS_MNLE | \ -+ DPAA2_FAS_TIDE | \ -+ DPAA2_FAS_PIEE | \ -+ DPAA2_FAS_FLE | \ -+ DPAA2_FAS_FPE | \ -+ DPAA2_FAS_PTE | \ -+ DPAA2_FAS_ISP | \ -+ DPAA2_FAS_PHE | \ -+ DPAA2_FAS_BLE | \ -+ DPAA2_FAS_L3CE | \ -+ DPAA2_FAS_L4CE) -+/* Tx errors */ -+#define DPAA2_ETH_TXCONF_ERR_MASK (DPAA2_FAS_KSE | \ -+ DPAA2_FAS_EOFHE | \ -+ DPAA2_FAS_MNLE | \ -+ DPAA2_FAS_TIDE) -+ -+/* Time in milliseconds between link state updates */ -+#define DPAA2_ETH_LINK_STATE_REFRESH 1000 -+ -+/* Driver statistics, other than those in struct rtnl_link_stats64. -+ * These are usually collected per-CPU and aggregated by ethtool. -+ */ -+struct dpaa2_eth_drv_stats { -+ __u64 tx_conf_frames; -+ __u64 tx_conf_bytes; -+ __u64 tx_sg_frames; -+ __u64 tx_sg_bytes; -+ __u64 rx_sg_frames; -+ __u64 rx_sg_bytes; -+ /* Enqueues retried due to portal busy */ -+ __u64 tx_portal_busy; -+}; -+ -+/* Per-FQ statistics */ -+struct dpaa2_eth_fq_stats { -+ /* Number of frames received on this queue */ -+ __u64 frames; -+}; -+ -+/* Per-channel statistics */ -+struct dpaa2_eth_ch_stats { -+ /* Volatile dequeues retried due to portal busy */ -+ __u64 dequeue_portal_busy; -+ /* Number of CDANs; useful to estimate avg NAPI len */ -+ __u64 cdan; -+ /* Number of frames received on queues from this channel */ -+ __u64 frames; -+ /* Pull errors */ -+ __u64 pull_err; -+}; -+ -+/* Maximum number of queues associated with a DPNI */ -+#define DPAA2_ETH_MAX_RX_QUEUES 16 -+#define DPAA2_ETH_MAX_TX_QUEUES NR_CPUS -+#define DPAA2_ETH_MAX_RX_ERR_QUEUES 1 -+#define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \ -+ DPAA2_ETH_MAX_TX_QUEUES + \ -+ DPAA2_ETH_MAX_RX_ERR_QUEUES) -+ -+#define DPAA2_ETH_MAX_DPCONS NR_CPUS -+ -+enum dpaa2_eth_fq_type { -+ DPAA2_RX_FQ = 0, -+ DPAA2_TX_CONF_FQ, -+ DPAA2_RX_ERR_FQ -+}; -+ -+struct dpaa2_eth_priv; -+ -+struct dpaa2_eth_fq { -+ u32 fqid; -+ u16 flowid; -+ int target_cpu; -+ struct dpaa2_eth_channel *channel; -+ enum dpaa2_eth_fq_type type; -+ -+ void (*consume)(struct dpaa2_eth_priv *, -+ struct dpaa2_eth_channel *, -+ const struct dpaa2_fd *, -+ struct napi_struct *); -+ struct dpaa2_eth_fq_stats stats; -+}; -+ -+struct dpaa2_eth_channel { -+ struct dpaa2_io_notification_ctx nctx; -+ struct fsl_mc_device *dpcon; -+ int dpcon_id; -+ int ch_id; -+ int dpio_id; -+ struct napi_struct napi; -+ struct dpaa2_io_store *store; -+ struct dpaa2_eth_priv *priv; -+ int buf_count; -+ struct dpaa2_eth_ch_stats stats; -+}; -+ -+struct dpaa2_eth_cls_rule { -+ struct ethtool_rx_flow_spec fs; -+ bool in_use; -+}; -+ -+struct dpaa2_eth_hash_fields { -+ u64 rxnfc_field; -+ enum net_prot cls_prot; -+ int cls_field; -+ int offset; -+ int size; -+}; -+ -+/* Driver private data */ -+struct dpaa2_eth_priv { -+ struct net_device *net_dev; -+ -+ u8 num_fqs; -+ struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES]; -+ -+ u8 num_channels; -+ struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS]; -+ -+ int dpni_id; -+ struct dpni_attr dpni_attrs; -+ struct dpni_extended_cfg dpni_ext_cfg; -+ /* Insofar as the MC is concerned, we're using one layout on all 3 types -+ * of buffers (Rx, Tx, Tx-Conf). -+ */ -+ struct dpni_buffer_layout buf_layout; -+ u16 tx_data_offset; -+ -+ struct fsl_mc_device *dpbp_dev; -+ struct dpbp_attr dpbp_attrs; -+ -+ u16 tx_qdid; -+ struct fsl_mc_io *mc_io; -+ /* SysFS-controlled affinity mask for TxConf FQs */ -+ struct cpumask txconf_cpumask; -+ /* Cores which have an affine DPIO/DPCON. -+ * This is the cpu set on which Rx frames are processed; -+ * Tx confirmation frames are processed on a subset of this, -+ * depending on user settings. -+ */ -+ struct cpumask dpio_cpumask; -+ -+ /* Standard statistics */ -+ struct rtnl_link_stats64 __percpu *percpu_stats; -+ /* Extra stats, in addition to the ones known by the kernel */ -+ struct dpaa2_eth_drv_stats __percpu *percpu_extras; -+ -+ u16 mc_token; -+ -+ struct dpni_link_state link_state; -+ bool do_link_poll; -+ struct task_struct *poll_thread; -+ -+ struct dpaa2_eth_hash_fields *hash_fields; -+ u8 num_hash_fields; -+ /* enabled ethtool hashing bits */ -+ u64 rx_flow_hash; -+ -+#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS -+ struct dpaa2_debugfs dbg; -+#endif -+ -+ /* array of classification rules */ -+ struct dpaa2_eth_cls_rule *cls_rule; -+ -+ struct dpni_tx_shaping_cfg shaping_cfg; -+ -+ bool ts_tx_en; /* Tx timestamping enabled */ -+ bool ts_rx_en; /* Rx timestamping enabled */ -+}; -+ -+#define dpaa2_eth_hash_enabled(priv) \ -+ ((priv)->dpni_attrs.options & DPNI_OPT_DIST_HASH) -+ -+#define dpaa2_eth_fs_enabled(priv) \ -+ ((priv)->dpni_attrs.options & DPNI_OPT_DIST_FS) -+ -+#define dpaa2_eth_fs_mask_enabled(priv) \ -+ ((priv)->dpni_attrs.options & DPNI_OPT_FS_MASK_SUPPORT) -+ -+#define DPAA2_CLASSIFIER_ENTRY_COUNT 16 -+ -+/* Required by struct dpni_attr::ext_cfg_iova */ -+#define DPAA2_EXT_CFG_SIZE 256 -+ -+/* size of DMA memory used to pass configuration to classifier, in bytes */ -+#define DPAA2_CLASSIFIER_DMA_SIZE 256 -+ -+extern const struct ethtool_ops dpaa2_ethtool_ops; -+ -+static int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv) -+{ -+ if (!dpaa2_eth_hash_enabled(priv)) -+ return 1; -+ -+ return priv->dpni_ext_cfg.tc_cfg[0].max_dist; -+} -+ -+static inline int dpaa2_eth_max_channels(struct dpaa2_eth_priv *priv) -+{ -+ /* Ideally, we want a number of channels large enough -+ * to accommodate both the Rx distribution size -+ * and the max number of Tx confirmation queues -+ */ -+ return max_t(int, dpaa2_eth_queue_count(priv), -+ priv->dpni_attrs.max_senders); -+} -+ -+void check_cls_support(struct dpaa2_eth_priv *priv); -+ -+#endif /* __DPAA2_H */ -diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c -new file mode 100644 -index 0000000..1d792cd ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c -@@ -0,0 +1,732 @@ -+/* Copyright 2014-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include "dpni.h" /* DPNI_LINK_OPT_* */ -+#include "dpaa2-eth.h" -+ -+/* To be kept in sync with 'enum dpni_counter' */ -+char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = { -+ "rx frames", -+ "rx bytes", -+ /* rx frames filtered/policed */ -+ "rx filtered frames", -+ /* rx frames dropped with errors */ -+ "rx discarded frames", -+ "rx mcast frames", -+ "rx mcast bytes", -+ "rx bcast frames", -+ "rx bcast bytes", -+ "tx frames", -+ "tx bytes", -+ /* tx frames dropped with errors */ -+ "tx discarded frames", -+}; -+ -+#define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats) -+ -+/* To be kept in sync with 'struct dpaa2_eth_drv_stats' */ -+char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = { -+ /* per-cpu stats */ -+ -+ "tx conf frames", -+ "tx conf bytes", -+ "tx sg frames", -+ "tx sg bytes", -+ "rx sg frames", -+ "rx sg bytes", -+ /* how many times we had to retry the enqueue command */ -+ "enqueue portal busy", -+ -+ /* Channel stats */ -+ /* How many times we had to retry the volatile dequeue command */ -+ "dequeue portal busy", -+ "channel pull errors", -+ /* Number of notifications received */ -+ "cdan", -+#ifdef CONFIG_FSL_QBMAN_DEBUG -+ /* FQ stats */ -+ "rx pending frames", -+ "rx pending bytes", -+ "tx conf pending frames", -+ "tx conf pending bytes", -+ "buffer count" -+#endif -+}; -+ -+#define DPAA2_ETH_NUM_EXTRA_STATS ARRAY_SIZE(dpaa2_ethtool_extras) -+ -+static void dpaa2_eth_get_drvinfo(struct net_device *net_dev, -+ struct ethtool_drvinfo *drvinfo) -+{ -+ strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); -+ strlcpy(drvinfo->version, VERSION, sizeof(drvinfo->version)); -+ strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); -+ strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent), -+ sizeof(drvinfo->bus_info)); -+} -+ -+static int dpaa2_eth_get_settings(struct net_device *net_dev, -+ struct ethtool_cmd *cmd) -+{ -+ struct dpni_link_state state = {0}; -+ int err = 0; -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ -+ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); -+ if (err) { -+ netdev_err(net_dev, "ERROR %d getting link state", err); -+ goto out; -+ } -+ -+ /* At the moment, we have no way of interrogating the DPMAC -+ * from the DPNI side - and for that matter there may exist -+ * no DPMAC at all. So for now we just don't report anything -+ * beyond the DPNI attributes. -+ */ -+ if (state.options & DPNI_LINK_OPT_AUTONEG) -+ cmd->autoneg = AUTONEG_ENABLE; -+ if (!(state.options & DPNI_LINK_OPT_HALF_DUPLEX)) -+ cmd->duplex = DUPLEX_FULL; -+ ethtool_cmd_speed_set(cmd, state.rate); -+ -+out: -+ return err; -+} -+ -+static int dpaa2_eth_set_settings(struct net_device *net_dev, -+ struct ethtool_cmd *cmd) -+{ -+ struct dpni_link_cfg cfg = {0}; -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ int err = 0; -+ -+ netdev_dbg(net_dev, "Setting link parameters..."); -+ -+ /* Due to a temporary MC limitation, the DPNI must be down -+ * in order to be able to change link settings. Taking steps to let -+ * the user know that. -+ */ -+ if (netif_running(net_dev)) { -+ netdev_info(net_dev, "Sorry, interface must be brought down first.\n"); -+ return -EACCES; -+ } -+ -+ cfg.rate = ethtool_cmd_speed(cmd); -+ if (cmd->autoneg == AUTONEG_ENABLE) -+ cfg.options |= DPNI_LINK_OPT_AUTONEG; -+ else -+ cfg.options &= ~DPNI_LINK_OPT_AUTONEG; -+ if (cmd->duplex == DUPLEX_HALF) -+ cfg.options |= DPNI_LINK_OPT_HALF_DUPLEX; -+ else -+ cfg.options &= ~DPNI_LINK_OPT_HALF_DUPLEX; -+ -+ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg); -+ if (err) -+ /* ethtool will be loud enough if we return an error; no point -+ * in putting our own error message on the console by default -+ */ -+ netdev_dbg(net_dev, "ERROR %d setting link cfg", err); -+ -+ return err; -+} -+ -+static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset, -+ u8 *data) -+{ -+ u8 *p = data; -+ int i; -+ -+ switch (stringset) { -+ case ETH_SS_STATS: -+ for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) { -+ strlcpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN); -+ p += ETH_GSTRING_LEN; -+ } -+ for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) { -+ strlcpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN); -+ p += ETH_GSTRING_LEN; -+ } -+ break; -+ } -+} -+ -+static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset) -+{ -+ switch (sset) { -+ case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */ -+ return DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS; -+ default: -+ return -EOPNOTSUPP; -+ } -+} -+ -+/** Fill in hardware counters, as returned by MC. -+ */ -+static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev, -+ struct ethtool_stats *stats, -+ u64 *data) -+{ -+ int i; /* Current index in the data array */ -+ int j, k, err; -+ -+#ifdef CONFIG_FSL_QBMAN_DEBUG -+ u32 fcnt, bcnt; -+ u32 fcnt_rx_total = 0, fcnt_tx_total = 0; -+ u32 bcnt_rx_total = 0, bcnt_tx_total = 0; -+ u32 buf_cnt; -+#endif -+ u64 cdan = 0; -+ u64 portal_busy = 0, pull_err = 0; -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ struct dpaa2_eth_drv_stats *extras; -+ struct dpaa2_eth_ch_stats *ch_stats; -+ -+ memset(data, 0, -+ sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS)); -+ -+ /* Print standard counters, from DPNI statistics */ -+ for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) { -+ err = dpni_get_counter(priv->mc_io, 0, priv->mc_token, i, -+ data + i); -+ if (err != 0) -+ netdev_warn(net_dev, "Err %d getting DPNI counter %d", -+ err, i); -+ } -+ -+ /* Print per-cpu extra stats */ -+ for_each_online_cpu(k) { -+ extras = per_cpu_ptr(priv->percpu_extras, k); -+ for (j = 0; j < sizeof(*extras) / sizeof(__u64); j++) -+ *((__u64 *)data + i + j) += *((__u64 *)extras + j); -+ } -+ i += j; -+ -+ /* We may be using fewer DPIOs than actual CPUs */ -+ for_each_cpu(j, &priv->dpio_cpumask) { -+ ch_stats = &priv->channel[j]->stats; -+ cdan += ch_stats->cdan; -+ portal_busy += ch_stats->dequeue_portal_busy; -+ pull_err += ch_stats->pull_err; -+ } -+ -+ *(data + i++) = portal_busy; -+ *(data + i++) = pull_err; -+ *(data + i++) = cdan; -+ -+#ifdef CONFIG_FSL_QBMAN_DEBUG -+ for (j = 0; j < priv->num_fqs; j++) { -+ /* Print FQ instantaneous counts */ -+ err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid, -+ &fcnt, &bcnt); -+ if (err) { -+ netdev_warn(net_dev, "FQ query error %d", err); -+ return; -+ } -+ -+ if (priv->fq[j].type == DPAA2_TX_CONF_FQ) { -+ fcnt_tx_total += fcnt; -+ bcnt_tx_total += bcnt; -+ } else { -+ fcnt_rx_total += fcnt; -+ bcnt_rx_total += bcnt; -+ } -+ } -+ *(data + i++) = fcnt_rx_total; -+ *(data + i++) = bcnt_rx_total; -+ *(data + i++) = fcnt_tx_total; -+ *(data + i++) = bcnt_tx_total; -+ -+ err = dpaa2_io_query_bp_count(NULL, priv->dpbp_attrs.bpid, &buf_cnt); -+ if (err) { -+ netdev_warn(net_dev, "Buffer count query error %d\n", err); -+ return; -+ } -+ *(data + i++) = buf_cnt; -+#endif -+} -+ -+static int cls_key_off(struct dpaa2_eth_priv *priv, int prot, int field) -+{ -+ int i, off = 0; -+ -+ for (i = 0; i < priv->num_hash_fields; i++) { -+ if (priv->hash_fields[i].cls_prot == prot && -+ priv->hash_fields[i].cls_field == field) -+ return off; -+ off += priv->hash_fields[i].size; -+ } -+ -+ return -1; -+} -+ -+static u8 cls_key_size(struct dpaa2_eth_priv *priv) -+{ -+ u8 i, size = 0; -+ -+ for (i = 0; i < priv->num_hash_fields; i++) -+ size += priv->hash_fields[i].size; -+ -+ return size; -+} -+ -+void check_cls_support(struct dpaa2_eth_priv *priv) -+{ -+ u8 key_size = cls_key_size(priv); -+ struct device *dev = priv->net_dev->dev.parent; -+ -+ if (dpaa2_eth_hash_enabled(priv)) { -+ if (priv->dpni_attrs.max_dist_key_size < key_size) { -+ dev_dbg(dev, "max_dist_key_size = %d, expected %d. Hashing and steering are disabled\n", -+ priv->dpni_attrs.max_dist_key_size, -+ key_size); -+ goto disable_cls; -+ } -+ if (priv->num_hash_fields > DPKG_MAX_NUM_OF_EXTRACTS) { -+ dev_dbg(dev, "Too many key fields (max = %d). Hashing and steering are disabled\n", -+ DPKG_MAX_NUM_OF_EXTRACTS); -+ goto disable_cls; -+ } -+ } -+ -+ if (dpaa2_eth_fs_enabled(priv)) { -+ if (!dpaa2_eth_hash_enabled(priv)) { -+ dev_dbg(dev, "DPNI_OPT_DIST_HASH option missing. Steering is disabled\n"); -+ goto disable_cls; -+ } -+ if (!dpaa2_eth_fs_mask_enabled(priv)) { -+ dev_dbg(dev, "Key masks not supported. Steering is disabled\n"); -+ goto disable_fs; -+ } -+ } -+ -+ return; -+ -+disable_cls: -+ priv->dpni_attrs.options &= ~DPNI_OPT_DIST_HASH; -+disable_fs: -+ priv->dpni_attrs.options &= ~(DPNI_OPT_DIST_FS | -+ DPNI_OPT_FS_MASK_SUPPORT); -+} -+ -+static int prep_l4_rule(struct dpaa2_eth_priv *priv, -+ struct ethtool_tcpip4_spec *l4_value, -+ struct ethtool_tcpip4_spec *l4_mask, -+ void *key, void *mask, u8 l4_proto) -+{ -+ int offset; -+ -+ if (l4_mask->tos) { -+ netdev_err(priv->net_dev, "ToS is not supported for IPv4 L4\n"); -+ return -EOPNOTSUPP; -+ } -+ -+ if (l4_mask->ip4src) { -+ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_SRC); -+ *(u32 *)(key + offset) = l4_value->ip4src; -+ *(u32 *)(mask + offset) = l4_mask->ip4src; -+ } -+ -+ if (l4_mask->ip4dst) { -+ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_DST); -+ *(u32 *)(key + offset) = l4_value->ip4dst; -+ *(u32 *)(mask + offset) = l4_mask->ip4dst; -+ } -+ -+ if (l4_mask->psrc) { -+ offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_SRC); -+ *(u32 *)(key + offset) = l4_value->psrc; -+ *(u32 *)(mask + offset) = l4_mask->psrc; -+ } -+ -+ if (l4_mask->pdst) { -+ offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_DST); -+ *(u32 *)(key + offset) = l4_value->pdst; -+ *(u32 *)(mask + offset) = l4_mask->pdst; -+ } -+ -+ /* Only apply the rule for the user-specified L4 protocol -+ * and if ethertype matches IPv4 -+ */ -+ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_TYPE); -+ *(u16 *)(key + offset) = htons(ETH_P_IP); -+ *(u16 *)(mask + offset) = 0xFFFF; -+ -+ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_PROTO); -+ *(u8 *)(key + offset) = l4_proto; -+ *(u8 *)(mask + offset) = 0xFF; -+ -+ /* TODO: check IP version */ -+ -+ return 0; -+} -+ -+static int prep_eth_rule(struct dpaa2_eth_priv *priv, -+ struct ethhdr *eth_value, struct ethhdr *eth_mask, -+ void *key, void *mask) -+{ -+ int offset; -+ -+ if (eth_mask->h_proto) { -+ netdev_err(priv->net_dev, "Ethertype is not supported!\n"); -+ return -EOPNOTSUPP; -+ } -+ -+ if (!is_zero_ether_addr(eth_mask->h_source)) { -+ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_SA); -+ ether_addr_copy(key + offset, eth_value->h_source); -+ ether_addr_copy(mask + offset, eth_mask->h_source); -+ } -+ -+ if (!is_zero_ether_addr(eth_mask->h_dest)) { -+ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_DA); -+ ether_addr_copy(key + offset, eth_value->h_dest); -+ ether_addr_copy(mask + offset, eth_mask->h_dest); -+ } -+ -+ return 0; -+} -+ -+static int prep_user_ip_rule(struct dpaa2_eth_priv *priv, -+ struct ethtool_usrip4_spec *uip_value, -+ struct ethtool_usrip4_spec *uip_mask, -+ void *key, void *mask) -+{ -+ int offset; -+ -+ if (uip_mask->tos) -+ return -EOPNOTSUPP; -+ -+ if (uip_mask->ip4src) { -+ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_SRC); -+ *(u32 *)(key + offset) = uip_value->ip4src; -+ *(u32 *)(mask + offset) = uip_mask->ip4src; -+ } -+ -+ if (uip_mask->ip4dst) { -+ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_DST); -+ *(u32 *)(key + offset) = uip_value->ip4dst; -+ *(u32 *)(mask + offset) = uip_mask->ip4dst; -+ } -+ -+ if (uip_mask->proto) { -+ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_PROTO); -+ *(u32 *)(key + offset) = uip_value->proto; -+ *(u32 *)(mask + offset) = uip_mask->proto; -+ } -+ if (uip_mask->l4_4_bytes) { -+ offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_SRC); -+ *(u16 *)(key + offset) = uip_value->l4_4_bytes << 16; -+ *(u16 *)(mask + offset) = uip_mask->l4_4_bytes << 16; -+ -+ offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_DST); -+ *(u16 *)(key + offset) = uip_value->l4_4_bytes & 0xFFFF; -+ *(u16 *)(mask + offset) = uip_mask->l4_4_bytes & 0xFFFF; -+ } -+ -+ /* Ethertype must be IP */ -+ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_TYPE); -+ *(u16 *)(key + offset) = htons(ETH_P_IP); -+ *(u16 *)(mask + offset) = 0xFFFF; -+ -+ return 0; -+} -+ -+static int prep_ext_rule(struct dpaa2_eth_priv *priv, -+ struct ethtool_flow_ext *ext_value, -+ struct ethtool_flow_ext *ext_mask, -+ void *key, void *mask) -+{ -+ int offset; -+ -+ if (ext_mask->vlan_etype) -+ return -EOPNOTSUPP; -+ -+ if (ext_mask->vlan_tci) { -+ offset = cls_key_off(priv, NET_PROT_VLAN, NH_FLD_VLAN_TCI); -+ *(u16 *)(key + offset) = ext_value->vlan_tci; -+ *(u16 *)(mask + offset) = ext_mask->vlan_tci; -+ } -+ -+ return 0; -+} -+ -+static int prep_mac_ext_rule(struct dpaa2_eth_priv *priv, -+ struct ethtool_flow_ext *ext_value, -+ struct ethtool_flow_ext *ext_mask, -+ void *key, void *mask) -+{ -+ int offset; -+ -+ if (!is_zero_ether_addr(ext_mask->h_dest)) { -+ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_DA); -+ ether_addr_copy(key + offset, ext_value->h_dest); -+ ether_addr_copy(mask + offset, ext_mask->h_dest); -+ } -+ -+ return 0; -+} -+ -+static int prep_cls_rule(struct net_device *net_dev, -+ struct ethtool_rx_flow_spec *fs, -+ void *key) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ const u8 key_size = cls_key_size(priv); -+ void *msk = key + key_size; -+ int err; -+ -+ memset(key, 0, key_size * 2); -+ -+ switch (fs->flow_type & 0xff) { -+ case TCP_V4_FLOW: -+ err = prep_l4_rule(priv, &fs->h_u.tcp_ip4_spec, -+ &fs->m_u.tcp_ip4_spec, key, msk, -+ IPPROTO_TCP); -+ break; -+ case UDP_V4_FLOW: -+ err = prep_l4_rule(priv, &fs->h_u.udp_ip4_spec, -+ &fs->m_u.udp_ip4_spec, key, msk, -+ IPPROTO_UDP); -+ break; -+ case SCTP_V4_FLOW: -+ err = prep_l4_rule(priv, &fs->h_u.sctp_ip4_spec, -+ &fs->m_u.sctp_ip4_spec, key, msk, -+ IPPROTO_SCTP); -+ break; -+ case ETHER_FLOW: -+ err = prep_eth_rule(priv, &fs->h_u.ether_spec, -+ &fs->m_u.ether_spec, key, msk); -+ break; -+ case IP_USER_FLOW: -+ err = prep_user_ip_rule(priv, &fs->h_u.usr_ip4_spec, -+ &fs->m_u.usr_ip4_spec, key, msk); -+ break; -+ default: -+ /* TODO: AH, ESP */ -+ return -EOPNOTSUPP; -+ } -+ if (err) -+ return err; -+ -+ if (fs->flow_type & FLOW_EXT) { -+ err = prep_ext_rule(priv, &fs->h_ext, &fs->m_ext, key, msk); -+ if (err) -+ return err; -+ } -+ -+ if (fs->flow_type & FLOW_MAC_EXT) { -+ err = prep_mac_ext_rule(priv, &fs->h_ext, &fs->m_ext, key, msk); -+ if (err) -+ return err; -+ } -+ -+ return 0; -+} -+ -+static int do_cls(struct net_device *net_dev, -+ struct ethtool_rx_flow_spec *fs, -+ bool add) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ struct device *dev = net_dev->dev.parent; -+ const int rule_cnt = DPAA2_CLASSIFIER_ENTRY_COUNT; -+ struct dpni_rule_cfg rule_cfg; -+ void *dma_mem; -+ int err = 0; -+ -+ if (!dpaa2_eth_fs_enabled(priv)) { -+ netdev_err(net_dev, "dev does not support steering!\n"); -+ /* dev doesn't support steering */ -+ return -EOPNOTSUPP; -+ } -+ -+ if ((fs->ring_cookie != RX_CLS_FLOW_DISC && -+ fs->ring_cookie >= dpaa2_eth_queue_count(priv)) || -+ fs->location >= rule_cnt) -+ return -EINVAL; -+ -+ memset(&rule_cfg, 0, sizeof(rule_cfg)); -+ rule_cfg.key_size = cls_key_size(priv); -+ -+ /* allocate twice the key size, for the actual key and for mask */ -+ dma_mem = kzalloc(rule_cfg.key_size * 2, GFP_DMA | GFP_KERNEL); -+ if (!dma_mem) -+ return -ENOMEM; -+ -+ err = prep_cls_rule(net_dev, fs, dma_mem); -+ if (err) -+ goto err_free_mem; -+ -+ rule_cfg.key_iova = dma_map_single(dev, dma_mem, -+ rule_cfg.key_size * 2, -+ DMA_TO_DEVICE); -+ -+ rule_cfg.mask_iova = rule_cfg.key_iova + rule_cfg.key_size; -+ -+ /* No way to control rule order in firmware */ -+ if (add) -+ err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token, 0, -+ &rule_cfg, (u16)fs->ring_cookie); -+ else -+ err = dpni_remove_fs_entry(priv->mc_io, 0, priv->mc_token, 0, -+ &rule_cfg); -+ -+ dma_unmap_single(dev, rule_cfg.key_iova, -+ rule_cfg.key_size * 2, DMA_TO_DEVICE); -+ if (err) { -+ netdev_err(net_dev, "dpaa2_add/remove_cls() error %d\n", err); -+ goto err_free_mem; -+ } -+ -+ priv->cls_rule[fs->location].fs = *fs; -+ priv->cls_rule[fs->location].in_use = true; -+ -+err_free_mem: -+ kfree(dma_mem); -+ -+ return err; -+} -+ -+static int add_cls(struct net_device *net_dev, -+ struct ethtool_rx_flow_spec *fs) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ int err; -+ -+ err = do_cls(net_dev, fs, true); -+ if (err) -+ return err; -+ -+ priv->cls_rule[fs->location].in_use = true; -+ priv->cls_rule[fs->location].fs = *fs; -+ -+ return 0; -+} -+ -+static int del_cls(struct net_device *net_dev, int location) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ int err; -+ -+ err = do_cls(net_dev, &priv->cls_rule[location].fs, false); -+ if (err) -+ return err; -+ -+ priv->cls_rule[location].in_use = false; -+ -+ return 0; -+} -+ -+static int dpaa2_eth_set_rxnfc(struct net_device *net_dev, -+ struct ethtool_rxnfc *rxnfc) -+{ -+ int err = 0; -+ -+ switch (rxnfc->cmd) { -+ case ETHTOOL_SRXCLSRLINS: -+ err = add_cls(net_dev, &rxnfc->fs); -+ break; -+ -+ case ETHTOOL_SRXCLSRLDEL: -+ err = del_cls(net_dev, rxnfc->fs.location); -+ break; -+ -+ default: -+ err = -EOPNOTSUPP; -+ } -+ -+ return err; -+} -+ -+static int dpaa2_eth_get_rxnfc(struct net_device *net_dev, -+ struct ethtool_rxnfc *rxnfc, u32 *rule_locs) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ const int rule_cnt = DPAA2_CLASSIFIER_ENTRY_COUNT; -+ int i, j; -+ -+ switch (rxnfc->cmd) { -+ case ETHTOOL_GRXFH: -+ /* we purposely ignore cmd->flow_type, because the hashing key -+ * is the same (and fixed) for all protocols -+ */ -+ rxnfc->data = priv->rx_flow_hash; -+ break; -+ -+ case ETHTOOL_GRXRINGS: -+ rxnfc->data = dpaa2_eth_queue_count(priv); -+ break; -+ -+ case ETHTOOL_GRXCLSRLCNT: -+ for (i = 0, rxnfc->rule_cnt = 0; i < rule_cnt; i++) -+ if (priv->cls_rule[i].in_use) -+ rxnfc->rule_cnt++; -+ rxnfc->data = rule_cnt; -+ break; -+ -+ case ETHTOOL_GRXCLSRULE: -+ if (!priv->cls_rule[rxnfc->fs.location].in_use) -+ return -EINVAL; -+ -+ rxnfc->fs = priv->cls_rule[rxnfc->fs.location].fs; -+ break; -+ -+ case ETHTOOL_GRXCLSRLALL: -+ for (i = 0, j = 0; i < rule_cnt; i++) { -+ if (!priv->cls_rule[i].in_use) -+ continue; -+ if (j == rxnfc->rule_cnt) -+ return -EMSGSIZE; -+ rule_locs[j++] = i; -+ } -+ rxnfc->rule_cnt = j; -+ rxnfc->data = rule_cnt; -+ break; -+ -+ default: -+ return -EOPNOTSUPP; -+ } -+ -+ return 0; -+} -+ -+const struct ethtool_ops dpaa2_ethtool_ops = { -+ .get_drvinfo = dpaa2_eth_get_drvinfo, -+ .get_link = ethtool_op_get_link, -+ .get_settings = dpaa2_eth_get_settings, -+ .set_settings = dpaa2_eth_set_settings, -+ .get_sset_count = dpaa2_eth_get_sset_count, -+ .get_ethtool_stats = dpaa2_eth_get_ethtool_stats, -+ .get_strings = dpaa2_eth_get_strings, -+ .get_rxnfc = dpaa2_eth_get_rxnfc, -+ .set_rxnfc = dpaa2_eth_set_rxnfc, -+}; -diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpkg.h b/drivers/staging/fsl-dpaa2/ethernet/dpkg.h -new file mode 100644 -index 0000000..92ec12b ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/ethernet/dpkg.h -@@ -0,0 +1,175 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPKG_H_ -+#define __FSL_DPKG_H_ -+ -+#include -+#include "../../fsl-mc/include/net.h" -+ -+/* Data Path Key Generator API -+ * Contains initialization APIs and runtime APIs for the Key Generator -+ */ -+ -+/** Key Generator properties */ -+ -+/** -+ * Number of masks per key extraction -+ */ -+#define DPKG_NUM_OF_MASKS 4 -+/** -+ * Number of extractions per key profile -+ */ -+#define DPKG_MAX_NUM_OF_EXTRACTS 10 -+ -+/** -+ * enum dpkg_extract_from_hdr_type - Selecting extraction by header types -+ * @DPKG_FROM_HDR: Extract selected bytes from header, by offset -+ * @DPKG_FROM_FIELD: Extract selected bytes from header, by offset from field -+ * @DPKG_FULL_FIELD: Extract a full field -+ */ -+enum dpkg_extract_from_hdr_type { -+ DPKG_FROM_HDR = 0, -+ DPKG_FROM_FIELD = 1, -+ DPKG_FULL_FIELD = 2 -+}; -+ -+/** -+ * enum dpkg_extract_type - Enumeration for selecting extraction type -+ * @DPKG_EXTRACT_FROM_HDR: Extract from the header -+ * @DPKG_EXTRACT_FROM_DATA: Extract from data not in specific header -+ * @DPKG_EXTRACT_FROM_PARSE: Extract from parser-result; -+ * e.g. can be used to extract header existence; -+ * please refer to 'Parse Result definition' section in the parser BG -+ */ -+enum dpkg_extract_type { -+ DPKG_EXTRACT_FROM_HDR = 0, -+ DPKG_EXTRACT_FROM_DATA = 1, -+ DPKG_EXTRACT_FROM_PARSE = 3 -+}; -+ -+/** -+ * struct dpkg_mask - A structure for defining a single extraction mask -+ * @mask: Byte mask for the extracted content -+ * @offset: Offset within the extracted content -+ */ -+struct dpkg_mask { -+ uint8_t mask; -+ uint8_t offset; -+}; -+ -+/** -+ * struct dpkg_extract - A structure for defining a single extraction -+ * @type: Determines how the union below is interpreted: -+ * DPKG_EXTRACT_FROM_HDR: selects 'from_hdr'; -+ * DPKG_EXTRACT_FROM_DATA: selects 'from_data'; -+ * DPKG_EXTRACT_FROM_PARSE: selects 'from_parse' -+ * @extract: Selects extraction method -+ * @num_of_byte_masks: Defines the number of valid entries in the array below; -+ * This is also the number of bytes to be used as masks -+ * @masks: Masks parameters -+ */ -+struct dpkg_extract { -+ enum dpkg_extract_type type; -+ /** -+ * union extract - Selects extraction method -+ * @from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR' -+ * @from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA' -+ * @from_parse - Used when 'type = DPKG_EXTRACT_FROM_PARSE' -+ */ -+ union { -+ /** -+ * struct from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR' -+ * @prot: Any of the supported headers -+ * @type: Defines the type of header extraction: -+ * DPKG_FROM_HDR: use size & offset below; -+ * DPKG_FROM_FIELD: use field, size and offset below; -+ * DPKG_FULL_FIELD: use field below -+ * @field: One of the supported fields (NH_FLD_) -+ * -+ * @size: Size in bytes -+ * @offset: Byte offset -+ * @hdr_index: Clear for cases not listed below; -+ * Used for protocols that may have more than a single -+ * header, 0 indicates an outer header; -+ * Supported protocols (possible values): -+ * NET_PROT_VLAN (0, HDR_INDEX_LAST); -+ * NET_PROT_MPLS (0, 1, HDR_INDEX_LAST); -+ * NET_PROT_IP(0, HDR_INDEX_LAST); -+ * NET_PROT_IPv4(0, HDR_INDEX_LAST); -+ * NET_PROT_IPv6(0, HDR_INDEX_LAST); -+ */ -+ -+ struct { -+ enum net_prot prot; -+ enum dpkg_extract_from_hdr_type type; -+ uint32_t field; -+ uint8_t size; -+ uint8_t offset; -+ uint8_t hdr_index; -+ } from_hdr; -+ /** -+ * struct from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA' -+ * @size: Size in bytes -+ * @offset: Byte offset -+ */ -+ struct { -+ uint8_t size; -+ uint8_t offset; -+ } from_data; -+ -+ /** -+ * struct from_parse - Used when 'type = DPKG_EXTRACT_FROM_PARSE' -+ * @size: Size in bytes -+ * @offset: Byte offset -+ */ -+ struct { -+ uint8_t size; -+ uint8_t offset; -+ } from_parse; -+ } extract; -+ -+ uint8_t num_of_byte_masks; -+ struct dpkg_mask masks[DPKG_NUM_OF_MASKS]; -+}; -+ -+/** -+ * struct dpkg_profile_cfg - A structure for defining a full Key Generation -+ * profile (rule) -+ * @num_extracts: Defines the number of valid entries in the array below -+ * @extracts: Array of required extractions -+ */ -+struct dpkg_profile_cfg { -+ uint8_t num_extracts; -+ struct dpkg_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS]; -+}; -+ -+#endif /* __FSL_DPKG_H_ */ -diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h b/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h -new file mode 100644 -index 0000000..c0f8af0 ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h -@@ -0,0 +1,1058 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPNI_CMD_H -+#define _FSL_DPNI_CMD_H -+ -+/* DPNI Version */ -+#define DPNI_VER_MAJOR 6 -+#define DPNI_VER_MINOR 0 -+ -+/* Command IDs */ -+#define DPNI_CMDID_OPEN 0x801 -+#define DPNI_CMDID_CLOSE 0x800 -+#define DPNI_CMDID_CREATE 0x901 -+#define DPNI_CMDID_DESTROY 0x900 -+ -+#define DPNI_CMDID_ENABLE 0x002 -+#define DPNI_CMDID_DISABLE 0x003 -+#define DPNI_CMDID_GET_ATTR 0x004 -+#define DPNI_CMDID_RESET 0x005 -+#define DPNI_CMDID_IS_ENABLED 0x006 -+ -+#define DPNI_CMDID_SET_IRQ 0x010 -+#define DPNI_CMDID_GET_IRQ 0x011 -+#define DPNI_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPNI_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPNI_CMDID_SET_IRQ_MASK 0x014 -+#define DPNI_CMDID_GET_IRQ_MASK 0x015 -+#define DPNI_CMDID_GET_IRQ_STATUS 0x016 -+#define DPNI_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPNI_CMDID_SET_POOLS 0x200 -+#define DPNI_CMDID_GET_RX_BUFFER_LAYOUT 0x201 -+#define DPNI_CMDID_SET_RX_BUFFER_LAYOUT 0x202 -+#define DPNI_CMDID_GET_TX_BUFFER_LAYOUT 0x203 -+#define DPNI_CMDID_SET_TX_BUFFER_LAYOUT 0x204 -+#define DPNI_CMDID_SET_TX_CONF_BUFFER_LAYOUT 0x205 -+#define DPNI_CMDID_GET_TX_CONF_BUFFER_LAYOUT 0x206 -+#define DPNI_CMDID_SET_L3_CHKSUM_VALIDATION 0x207 -+#define DPNI_CMDID_GET_L3_CHKSUM_VALIDATION 0x208 -+#define DPNI_CMDID_SET_L4_CHKSUM_VALIDATION 0x209 -+#define DPNI_CMDID_GET_L4_CHKSUM_VALIDATION 0x20A -+#define DPNI_CMDID_SET_ERRORS_BEHAVIOR 0x20B -+#define DPNI_CMDID_SET_TX_CONF_REVOKE 0x20C -+ -+#define DPNI_CMDID_GET_QDID 0x210 -+#define DPNI_CMDID_GET_SP_INFO 0x211 -+#define DPNI_CMDID_GET_TX_DATA_OFFSET 0x212 -+#define DPNI_CMDID_GET_COUNTER 0x213 -+#define DPNI_CMDID_SET_COUNTER 0x214 -+#define DPNI_CMDID_GET_LINK_STATE 0x215 -+#define DPNI_CMDID_SET_MAX_FRAME_LENGTH 0x216 -+#define DPNI_CMDID_GET_MAX_FRAME_LENGTH 0x217 -+#define DPNI_CMDID_SET_MTU 0x218 -+#define DPNI_CMDID_GET_MTU 0x219 -+#define DPNI_CMDID_SET_LINK_CFG 0x21A -+#define DPNI_CMDID_SET_TX_SHAPING 0x21B -+ -+#define DPNI_CMDID_SET_MCAST_PROMISC 0x220 -+#define DPNI_CMDID_GET_MCAST_PROMISC 0x221 -+#define DPNI_CMDID_SET_UNICAST_PROMISC 0x222 -+#define DPNI_CMDID_GET_UNICAST_PROMISC 0x223 -+#define DPNI_CMDID_SET_PRIM_MAC 0x224 -+#define DPNI_CMDID_GET_PRIM_MAC 0x225 -+#define DPNI_CMDID_ADD_MAC_ADDR 0x226 -+#define DPNI_CMDID_REMOVE_MAC_ADDR 0x227 -+#define DPNI_CMDID_CLR_MAC_FILTERS 0x228 -+ -+#define DPNI_CMDID_SET_VLAN_FILTERS 0x230 -+#define DPNI_CMDID_ADD_VLAN_ID 0x231 -+#define DPNI_CMDID_REMOVE_VLAN_ID 0x232 -+#define DPNI_CMDID_CLR_VLAN_FILTERS 0x233 -+ -+#define DPNI_CMDID_SET_RX_TC_DIST 0x235 -+#define DPNI_CMDID_SET_TX_FLOW 0x236 -+#define DPNI_CMDID_GET_TX_FLOW 0x237 -+#define DPNI_CMDID_SET_RX_FLOW 0x238 -+#define DPNI_CMDID_GET_RX_FLOW 0x239 -+#define DPNI_CMDID_SET_RX_ERR_QUEUE 0x23A -+#define DPNI_CMDID_GET_RX_ERR_QUEUE 0x23B -+ -+#define DPNI_CMDID_SET_RX_TC_POLICING 0x23E -+#define DPNI_CMDID_SET_RX_TC_EARLY_DROP 0x23F -+ -+#define DPNI_CMDID_SET_QOS_TBL 0x240 -+#define DPNI_CMDID_ADD_QOS_ENT 0x241 -+#define DPNI_CMDID_REMOVE_QOS_ENT 0x242 -+#define DPNI_CMDID_CLR_QOS_TBL 0x243 -+#define DPNI_CMDID_ADD_FS_ENT 0x244 -+#define DPNI_CMDID_REMOVE_FS_ENT 0x245 -+#define DPNI_CMDID_CLR_FS_ENT 0x246 -+#define DPNI_CMDID_SET_VLAN_INSERTION 0x247 -+#define DPNI_CMDID_SET_VLAN_REMOVAL 0x248 -+#define DPNI_CMDID_SET_IPR 0x249 -+#define DPNI_CMDID_SET_IPF 0x24A -+ -+#define DPNI_CMDID_SET_TX_SELECTION 0x250 -+#define DPNI_CMDID_GET_RX_TC_POLICING 0x251 -+#define DPNI_CMDID_GET_RX_TC_EARLY_DROP 0x252 -+#define DPNI_CMDID_SET_RX_TC_CONGESTION_NOTIFICATION 0x253 -+#define DPNI_CMDID_GET_RX_TC_CONGESTION_NOTIFICATION 0x254 -+#define DPNI_CMDID_SET_TX_TC_CONGESTION_NOTIFICATION 0x255 -+#define DPNI_CMDID_GET_TX_TC_CONGESTION_NOTIFICATION 0x256 -+#define DPNI_CMDID_SET_TX_CONF 0x257 -+#define DPNI_CMDID_GET_TX_CONF 0x258 -+#define DPNI_CMDID_SET_TX_CONF_CONGESTION_NOTIFICATION 0x259 -+#define DPNI_CMDID_GET_TX_CONF_CONGESTION_NOTIFICATION 0x25A -+#define DPNI_CMDID_SET_TX_TC_EARLY_DROP 0x25B -+#define DPNI_CMDID_GET_TX_TC_EARLY_DROP 0x25C -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_OPEN(cmd, dpni_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpni_id) -+ -+#define DPNI_PREP_EXTENDED_CFG(ext, cfg) \ -+do { \ -+ MC_PREP_OP(ext, 0, 0, 16, uint16_t, cfg->tc_cfg[0].max_dist); \ -+ MC_PREP_OP(ext, 0, 16, 16, uint16_t, cfg->tc_cfg[0].max_fs_entries); \ -+ MC_PREP_OP(ext, 0, 32, 16, uint16_t, cfg->tc_cfg[1].max_dist); \ -+ MC_PREP_OP(ext, 0, 48, 16, uint16_t, cfg->tc_cfg[1].max_fs_entries); \ -+ MC_PREP_OP(ext, 1, 0, 16, uint16_t, cfg->tc_cfg[2].max_dist); \ -+ MC_PREP_OP(ext, 1, 16, 16, uint16_t, cfg->tc_cfg[2].max_fs_entries); \ -+ MC_PREP_OP(ext, 1, 32, 16, uint16_t, cfg->tc_cfg[3].max_dist); \ -+ MC_PREP_OP(ext, 1, 48, 16, uint16_t, cfg->tc_cfg[3].max_fs_entries); \ -+ MC_PREP_OP(ext, 2, 0, 16, uint16_t, cfg->tc_cfg[4].max_dist); \ -+ MC_PREP_OP(ext, 2, 16, 16, uint16_t, cfg->tc_cfg[4].max_fs_entries); \ -+ MC_PREP_OP(ext, 2, 32, 16, uint16_t, cfg->tc_cfg[5].max_dist); \ -+ MC_PREP_OP(ext, 2, 48, 16, uint16_t, cfg->tc_cfg[5].max_fs_entries); \ -+ MC_PREP_OP(ext, 3, 0, 16, uint16_t, cfg->tc_cfg[6].max_dist); \ -+ MC_PREP_OP(ext, 3, 16, 16, uint16_t, cfg->tc_cfg[6].max_fs_entries); \ -+ MC_PREP_OP(ext, 3, 32, 16, uint16_t, cfg->tc_cfg[7].max_dist); \ -+ MC_PREP_OP(ext, 3, 48, 16, uint16_t, cfg->tc_cfg[7].max_fs_entries); \ -+ MC_PREP_OP(ext, 4, 0, 16, uint16_t, \ -+ cfg->ipr_cfg.max_open_frames_ipv4); \ -+ MC_PREP_OP(ext, 4, 16, 16, uint16_t, \ -+ cfg->ipr_cfg.max_open_frames_ipv6); \ -+ MC_PREP_OP(ext, 4, 32, 16, uint16_t, \ -+ cfg->ipr_cfg.max_reass_frm_size); \ -+ MC_PREP_OP(ext, 5, 0, 16, uint16_t, \ -+ cfg->ipr_cfg.min_frag_size_ipv4); \ -+ MC_PREP_OP(ext, 5, 16, 16, uint16_t, \ -+ cfg->ipr_cfg.min_frag_size_ipv6); \ -+} while (0) -+ -+#define DPNI_EXT_EXTENDED_CFG(ext, cfg) \ -+do { \ -+ MC_EXT_OP(ext, 0, 0, 16, uint16_t, cfg->tc_cfg[0].max_dist); \ -+ MC_EXT_OP(ext, 0, 16, 16, uint16_t, cfg->tc_cfg[0].max_fs_entries); \ -+ MC_EXT_OP(ext, 0, 32, 16, uint16_t, cfg->tc_cfg[1].max_dist); \ -+ MC_EXT_OP(ext, 0, 48, 16, uint16_t, cfg->tc_cfg[1].max_fs_entries); \ -+ MC_EXT_OP(ext, 1, 0, 16, uint16_t, cfg->tc_cfg[2].max_dist); \ -+ MC_EXT_OP(ext, 1, 16, 16, uint16_t, cfg->tc_cfg[2].max_fs_entries); \ -+ MC_EXT_OP(ext, 1, 32, 16, uint16_t, cfg->tc_cfg[3].max_dist); \ -+ MC_EXT_OP(ext, 1, 48, 16, uint16_t, cfg->tc_cfg[3].max_fs_entries); \ -+ MC_EXT_OP(ext, 2, 0, 16, uint16_t, cfg->tc_cfg[4].max_dist); \ -+ MC_EXT_OP(ext, 2, 16, 16, uint16_t, cfg->tc_cfg[4].max_fs_entries); \ -+ MC_EXT_OP(ext, 2, 32, 16, uint16_t, cfg->tc_cfg[5].max_dist); \ -+ MC_EXT_OP(ext, 2, 48, 16, uint16_t, cfg->tc_cfg[5].max_fs_entries); \ -+ MC_EXT_OP(ext, 3, 0, 16, uint16_t, cfg->tc_cfg[6].max_dist); \ -+ MC_EXT_OP(ext, 3, 16, 16, uint16_t, cfg->tc_cfg[6].max_fs_entries); \ -+ MC_EXT_OP(ext, 3, 32, 16, uint16_t, cfg->tc_cfg[7].max_dist); \ -+ MC_EXT_OP(ext, 3, 48, 16, uint16_t, cfg->tc_cfg[7].max_fs_entries); \ -+ MC_EXT_OP(ext, 4, 0, 16, uint16_t, \ -+ cfg->ipr_cfg.max_open_frames_ipv4); \ -+ MC_EXT_OP(ext, 4, 16, 16, uint16_t, \ -+ cfg->ipr_cfg.max_open_frames_ipv6); \ -+ MC_EXT_OP(ext, 4, 32, 16, uint16_t, \ -+ cfg->ipr_cfg.max_reass_frm_size); \ -+ MC_EXT_OP(ext, 5, 0, 16, uint16_t, \ -+ cfg->ipr_cfg.min_frag_size_ipv4); \ -+ MC_EXT_OP(ext, 5, 16, 16, uint16_t, \ -+ cfg->ipr_cfg.min_frag_size_ipv6); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_CREATE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->adv.max_tcs); \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->adv.max_senders); \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->mac_addr[5]); \ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->mac_addr[4]); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->mac_addr[3]); \ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, cfg->mac_addr[2]); \ -+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, cfg->mac_addr[1]); \ -+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, cfg->mac_addr[0]); \ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->adv.options); \ -+ MC_CMD_OP(cmd, 2, 0, 8, uint8_t, cfg->adv.max_unicast_filters); \ -+ MC_CMD_OP(cmd, 2, 8, 8, uint8_t, cfg->adv.max_multicast_filters); \ -+ MC_CMD_OP(cmd, 2, 16, 8, uint8_t, cfg->adv.max_vlan_filters); \ -+ MC_CMD_OP(cmd, 2, 24, 8, uint8_t, cfg->adv.max_qos_entries); \ -+ MC_CMD_OP(cmd, 2, 32, 8, uint8_t, cfg->adv.max_qos_key_size); \ -+ MC_CMD_OP(cmd, 2, 48, 8, uint8_t, cfg->adv.max_dist_key_size); \ -+ MC_CMD_OP(cmd, 2, 56, 8, enum net_prot, cfg->adv.start_hdr); \ -+ MC_CMD_OP(cmd, 4, 48, 8, uint8_t, cfg->adv.max_policers); \ -+ MC_CMD_OP(cmd, 4, 56, 8, uint8_t, cfg->adv.max_congestion_ctrl); \ -+ MC_CMD_OP(cmd, 5, 0, 64, uint64_t, cfg->adv.ext_cfg_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_POOLS(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->num_dpbp); \ -+ MC_CMD_OP(cmd, 0, 8, 1, int, cfg->pools[0].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 9, 1, int, cfg->pools[1].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 10, 1, int, cfg->pools[2].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 11, 1, int, cfg->pools[3].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 12, 1, int, cfg->pools[4].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 13, 1, int, cfg->pools[5].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 14, 1, int, cfg->pools[6].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 15, 1, int, cfg->pools[7].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 32, 32, int, cfg->pools[0].dpbp_id); \ -+ MC_CMD_OP(cmd, 4, 32, 16, uint16_t, cfg->pools[0].buffer_size);\ -+ MC_CMD_OP(cmd, 1, 0, 32, int, cfg->pools[1].dpbp_id); \ -+ MC_CMD_OP(cmd, 4, 48, 16, uint16_t, cfg->pools[1].buffer_size);\ -+ MC_CMD_OP(cmd, 1, 32, 32, int, cfg->pools[2].dpbp_id); \ -+ MC_CMD_OP(cmd, 5, 0, 16, uint16_t, cfg->pools[2].buffer_size);\ -+ MC_CMD_OP(cmd, 2, 0, 32, int, cfg->pools[3].dpbp_id); \ -+ MC_CMD_OP(cmd, 5, 16, 16, uint16_t, cfg->pools[3].buffer_size);\ -+ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->pools[4].dpbp_id); \ -+ MC_CMD_OP(cmd, 5, 32, 16, uint16_t, cfg->pools[4].buffer_size);\ -+ MC_CMD_OP(cmd, 3, 0, 32, int, cfg->pools[5].dpbp_id); \ -+ MC_CMD_OP(cmd, 5, 48, 16, uint16_t, cfg->pools[5].buffer_size);\ -+ MC_CMD_OP(cmd, 3, 32, 32, int, cfg->pools[6].dpbp_id); \ -+ MC_CMD_OP(cmd, 6, 0, 16, uint16_t, cfg->pools[6].buffer_size);\ -+ MC_CMD_OP(cmd, 4, 0, 32, int, cfg->pools[7].dpbp_id); \ -+ MC_CMD_OP(cmd, 6, 16, 16, uint16_t, cfg->pools[7].buffer_size);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_IS_ENABLED(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_IRQ_ENABLE(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_ATTR(cmd, attr) \ -+ MC_CMD_OP(cmd, 6, 0, 64, uint64_t, attr->ext_cfg_iova) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id);\ -+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->max_tcs); \ -+ MC_RSP_OP(cmd, 0, 40, 8, uint8_t, attr->max_senders); \ -+ MC_RSP_OP(cmd, 0, 48, 8, enum net_prot, attr->start_hdr); \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->options); \ -+ MC_RSP_OP(cmd, 2, 0, 8, uint8_t, attr->max_unicast_filters); \ -+ MC_RSP_OP(cmd, 2, 8, 8, uint8_t, attr->max_multicast_filters);\ -+ MC_RSP_OP(cmd, 2, 16, 8, uint8_t, attr->max_vlan_filters); \ -+ MC_RSP_OP(cmd, 2, 24, 8, uint8_t, attr->max_qos_entries); \ -+ MC_RSP_OP(cmd, 2, 32, 8, uint8_t, attr->max_qos_key_size); \ -+ MC_RSP_OP(cmd, 2, 40, 8, uint8_t, attr->max_dist_key_size); \ -+ MC_RSP_OP(cmd, 4, 48, 8, uint8_t, attr->max_policers); \ -+ MC_RSP_OP(cmd, 4, 56, 8, uint8_t, attr->max_congestion_ctrl); \ -+ MC_RSP_OP(cmd, 5, 32, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 5, 48, 16, uint16_t, attr->version.minor);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_ERRORS_BEHAVIOR(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, cfg->errors); \ -+ MC_CMD_OP(cmd, 0, 32, 4, enum dpni_error_action, cfg->error_action); \ -+ MC_CMD_OP(cmd, 0, 36, 1, int, cfg->set_frame_annotation); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_RX_BUFFER_LAYOUT(cmd, layout) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \ -+ MC_RSP_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \ -+ MC_RSP_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \ -+ MC_RSP_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \ -+ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_RX_BUFFER_LAYOUT(cmd, layout) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, layout->options); \ -+ MC_CMD_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \ -+ MC_CMD_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \ -+ MC_CMD_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \ -+ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \ -+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_TX_BUFFER_LAYOUT(cmd, layout) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \ -+ MC_RSP_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \ -+ MC_RSP_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \ -+ MC_RSP_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \ -+ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_TX_BUFFER_LAYOUT(cmd, layout) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, layout->options); \ -+ MC_CMD_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \ -+ MC_CMD_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \ -+ MC_CMD_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \ -+ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \ -+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_TX_CONF_BUFFER_LAYOUT(cmd, layout) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \ -+ MC_RSP_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \ -+ MC_RSP_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \ -+ MC_RSP_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \ -+ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_TX_CONF_BUFFER_LAYOUT(cmd, layout) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, layout->options); \ -+ MC_CMD_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \ -+ MC_CMD_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \ -+ MC_CMD_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \ -+ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \ -+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_L3_CHKSUM_VALIDATION(cmd, en) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_L3_CHKSUM_VALIDATION(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_L4_CHKSUM_VALIDATION(cmd, en) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_L4_CHKSUM_VALIDATION(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_QDID(cmd, qdid) \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, qdid) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_SP_INFO(cmd, sp_info) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, sp_info->spids[0]); \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, sp_info->spids[1]); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_TX_DATA_OFFSET(cmd, data_offset) \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, data_offset) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_COUNTER(cmd, counter) \ -+ MC_CMD_OP(cmd, 0, 0, 16, enum dpni_counter, counter) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_COUNTER(cmd, value) \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, value) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_COUNTER(cmd, counter, value) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, enum dpni_counter, counter); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, value); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_LINK_CFG(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->rate);\ -+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->options);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_LINK_STATE(cmd, state) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 32, 1, int, state->up);\ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, state->rate);\ -+ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, state->options);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_TX_SHAPING(cmd, tx_shaper) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, tx_shaper->max_burst_size);\ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, tx_shaper->rate_limit);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_MAX_FRAME_LENGTH(cmd, max_frame_length) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, max_frame_length) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_MAX_FRAME_LENGTH(cmd, max_frame_length) \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, max_frame_length) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_MTU(cmd, mtu) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, mtu) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_MTU(cmd, mtu) \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, mtu) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_MULTICAST_PROMISC(cmd, en) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_MULTICAST_PROMISC(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_UNICAST_PROMISC(cmd, en) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_UNICAST_PROMISC(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_PRIMARY_MAC_ADDR(cmd, mac_addr) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \ -+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \ -+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_PRIMARY_MAC_ADDR(cmd, mac_addr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \ -+ MC_RSP_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \ -+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \ -+ MC_RSP_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \ -+ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \ -+ MC_RSP_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_ADD_MAC_ADDR(cmd, mac_addr) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \ -+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \ -+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_REMOVE_MAC_ADDR(cmd, mac_addr) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \ -+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \ -+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_CLEAR_MAC_FILTERS(cmd, unicast, multicast) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, unicast); \ -+ MC_CMD_OP(cmd, 0, 1, 1, int, multicast); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_VLAN_FILTERS(cmd, en) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_ADD_VLAN_ID(cmd, vlan_id) \ -+ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, vlan_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_REMOVE_VLAN_ID(cmd, vlan_id) \ -+ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, vlan_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_TX_SELECTION(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, cfg->tc_sched[0].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 0, 16, 4, enum dpni_tx_schedule_mode, \ -+ cfg->tc_sched[0].mode); \ -+ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, cfg->tc_sched[1].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 0, 48, 4, enum dpni_tx_schedule_mode, \ -+ cfg->tc_sched[1].mode); \ -+ MC_CMD_OP(cmd, 1, 0, 16, uint16_t, cfg->tc_sched[2].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 1, 16, 4, enum dpni_tx_schedule_mode, \ -+ cfg->tc_sched[2].mode); \ -+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, cfg->tc_sched[3].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 1, 48, 4, enum dpni_tx_schedule_mode, \ -+ cfg->tc_sched[3].mode); \ -+ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->tc_sched[4].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 2, 16, 4, enum dpni_tx_schedule_mode, \ -+ cfg->tc_sched[4].mode); \ -+ MC_CMD_OP(cmd, 2, 32, 16, uint16_t, cfg->tc_sched[5].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 2, 48, 4, enum dpni_tx_schedule_mode, \ -+ cfg->tc_sched[5].mode); \ -+ MC_CMD_OP(cmd, 3, 0, 16, uint16_t, cfg->tc_sched[6].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 3, 16, 4, enum dpni_tx_schedule_mode, \ -+ cfg->tc_sched[6].mode); \ -+ MC_CMD_OP(cmd, 3, 32, 16, uint16_t, cfg->tc_sched[7].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 3, 48, 4, enum dpni_tx_schedule_mode, \ -+ cfg->tc_sched[7].mode); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_RX_TC_DIST(cmd, tc_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, cfg->dist_size); \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 0, 24, 4, enum dpni_dist_mode, cfg->dist_mode); \ -+ MC_CMD_OP(cmd, 0, 28, 4, enum dpni_fs_miss_action, \ -+ cfg->fs_cfg.miss_action); \ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, cfg->fs_cfg.default_flow_id); \ -+ MC_CMD_OP(cmd, 6, 0, 64, uint64_t, cfg->key_cfg_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_TX_FLOW(cmd, flow_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 43, 1, int, cfg->l3_chksum_gen);\ -+ MC_CMD_OP(cmd, 0, 44, 1, int, cfg->l4_chksum_gen);\ -+ MC_CMD_OP(cmd, 0, 45, 1, int, cfg->use_common_tx_conf_queue);\ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id);\ -+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_SET_TX_FLOW(cmd, flow_id) \ -+ MC_RSP_OP(cmd, 0, 48, 16, uint16_t, flow_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_TX_FLOW(cmd, flow_id) \ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_TX_FLOW(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 43, 1, int, attr->l3_chksum_gen);\ -+ MC_RSP_OP(cmd, 0, 44, 1, int, attr->l4_chksum_gen);\ -+ MC_RSP_OP(cmd, 0, 45, 1, int, attr->use_common_tx_conf_queue);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_RX_FLOW(cmd, tc_id, flow_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority);\ -+ MC_CMD_OP(cmd, 0, 40, 2, enum dpni_dest, cfg->dest_cfg.dest_type);\ -+ MC_CMD_OP(cmd, 0, 42, 1, int, cfg->order_preservation_en);\ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \ -+ MC_CMD_OP(cmd, 2, 16, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 2, 32, 32, uint32_t, cfg->options); \ -+ MC_CMD_OP(cmd, 3, 0, 4, enum dpni_flc_type, cfg->flc_cfg.flc_type); \ -+ MC_CMD_OP(cmd, 3, 4, 4, enum dpni_stash_size, \ -+ cfg->flc_cfg.frame_data_size);\ -+ MC_CMD_OP(cmd, 3, 8, 4, enum dpni_stash_size, \ -+ cfg->flc_cfg.flow_context_size);\ -+ MC_CMD_OP(cmd, 3, 32, 32, uint32_t, cfg->flc_cfg.options);\ -+ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->flc_cfg.flow_context);\ -+ MC_CMD_OP(cmd, 5, 0, 32, uint32_t, cfg->tail_drop_threshold); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_RX_FLOW(cmd, tc_id, flow_id) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_RX_FLOW(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id); \ -+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\ -+ MC_RSP_OP(cmd, 0, 40, 2, enum dpni_dest, attr->dest_cfg.dest_type); \ -+ MC_RSP_OP(cmd, 0, 42, 1, int, attr->order_preservation_en);\ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->user_ctx); \ -+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->tail_drop_threshold); \ -+ MC_RSP_OP(cmd, 2, 32, 32, uint32_t, attr->fqid); \ -+ MC_RSP_OP(cmd, 3, 0, 4, enum dpni_flc_type, attr->flc_cfg.flc_type); \ -+ MC_RSP_OP(cmd, 3, 4, 4, enum dpni_stash_size, \ -+ attr->flc_cfg.frame_data_size);\ -+ MC_RSP_OP(cmd, 3, 8, 4, enum dpni_stash_size, \ -+ attr->flc_cfg.flow_context_size);\ -+ MC_RSP_OP(cmd, 3, 32, 32, uint32_t, attr->flc_cfg.options);\ -+ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, attr->flc_cfg.flow_context);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_RX_ERR_QUEUE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority);\ -+ MC_CMD_OP(cmd, 0, 40, 2, enum dpni_dest, cfg->dest_cfg.dest_type);\ -+ MC_CMD_OP(cmd, 0, 42, 1, int, cfg->order_preservation_en);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \ -+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options); \ -+ MC_CMD_OP(cmd, 2, 32, 32, uint32_t, cfg->tail_drop_threshold); \ -+ MC_CMD_OP(cmd, 3, 0, 4, enum dpni_flc_type, cfg->flc_cfg.flc_type); \ -+ MC_CMD_OP(cmd, 3, 4, 4, enum dpni_stash_size, \ -+ cfg->flc_cfg.frame_data_size);\ -+ MC_CMD_OP(cmd, 3, 8, 4, enum dpni_stash_size, \ -+ cfg->flc_cfg.flow_context_size);\ -+ MC_CMD_OP(cmd, 3, 32, 32, uint32_t, cfg->flc_cfg.options);\ -+ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->flc_cfg.flow_context);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_RX_ERR_QUEUE(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id); \ -+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\ -+ MC_RSP_OP(cmd, 0, 40, 2, enum dpni_dest, attr->dest_cfg.dest_type);\ -+ MC_RSP_OP(cmd, 0, 42, 1, int, attr->order_preservation_en);\ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->user_ctx); \ -+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->tail_drop_threshold); \ -+ MC_RSP_OP(cmd, 2, 32, 32, uint32_t, attr->fqid); \ -+ MC_RSP_OP(cmd, 3, 0, 4, enum dpni_flc_type, attr->flc_cfg.flc_type); \ -+ MC_RSP_OP(cmd, 3, 4, 4, enum dpni_stash_size, \ -+ attr->flc_cfg.frame_data_size);\ -+ MC_RSP_OP(cmd, 3, 8, 4, enum dpni_stash_size, \ -+ attr->flc_cfg.flow_context_size);\ -+ MC_RSP_OP(cmd, 3, 32, 32, uint32_t, attr->flc_cfg.options);\ -+ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, attr->flc_cfg.flow_context);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_TX_CONF_REVOKE(cmd, revoke) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, revoke) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_QOS_TABLE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->default_tc); \ -+ MC_CMD_OP(cmd, 0, 40, 1, int, cfg->discard_on_miss); \ -+ MC_CMD_OP(cmd, 6, 0, 64, uint64_t, cfg->key_cfg_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_ADD_QOS_ENTRY(cmd, cfg, tc_id) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->key_size); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \ -+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_REMOVE_QOS_ENTRY(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->key_size); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \ -+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_ADD_FS_ENTRY(cmd, tc_id, cfg, flow_id) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->key_size); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \ -+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_REMOVE_FS_ENTRY(cmd, tc_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->key_size); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \ -+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_CLEAR_FS_ENTRIES(cmd, tc_id) \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_VLAN_INSERTION(cmd, en) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_VLAN_REMOVAL(cmd, en) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_IPR(cmd, en) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_IPF(cmd, en) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_RX_TC_POLICING(cmd, tc_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 4, enum dpni_policer_mode, cfg->mode); \ -+ MC_CMD_OP(cmd, 0, 4, 4, enum dpni_policer_color, cfg->default_color); \ -+ MC_CMD_OP(cmd, 0, 8, 4, enum dpni_policer_unit, cfg->units); \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, cfg->options); \ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->cir); \ -+ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->cbs); \ -+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->eir); \ -+ MC_CMD_OP(cmd, 2, 32, 32, uint32_t, cfg->ebs);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_RX_TC_POLICING(cmd, tc_id) \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_RX_TC_POLICING(cmd, cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 4, enum dpni_policer_mode, cfg->mode); \ -+ MC_RSP_OP(cmd, 0, 4, 4, enum dpni_policer_color, cfg->default_color); \ -+ MC_RSP_OP(cmd, 0, 8, 4, enum dpni_policer_unit, cfg->units); \ -+ MC_RSP_OP(cmd, 0, 32, 32, uint32_t, cfg->options); \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->cir); \ -+ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, cfg->cbs); \ -+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, cfg->eir); \ -+ MC_RSP_OP(cmd, 2, 32, 32, uint32_t, cfg->ebs);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_PREP_EARLY_DROP(ext, cfg) \ -+do { \ -+ MC_PREP_OP(ext, 0, 0, 2, enum dpni_early_drop_mode, cfg->mode); \ -+ MC_PREP_OP(ext, 0, 2, 2, \ -+ enum dpni_congestion_unit, cfg->units); \ -+ MC_PREP_OP(ext, 0, 32, 32, uint32_t, cfg->tail_drop_threshold); \ -+ MC_PREP_OP(ext, 1, 0, 8, uint8_t, cfg->green.drop_probability); \ -+ MC_PREP_OP(ext, 2, 0, 64, uint64_t, cfg->green.max_threshold); \ -+ MC_PREP_OP(ext, 3, 0, 64, uint64_t, cfg->green.min_threshold); \ -+ MC_PREP_OP(ext, 5, 0, 8, uint8_t, cfg->yellow.drop_probability);\ -+ MC_PREP_OP(ext, 6, 0, 64, uint64_t, cfg->yellow.max_threshold); \ -+ MC_PREP_OP(ext, 7, 0, 64, uint64_t, cfg->yellow.min_threshold); \ -+ MC_PREP_OP(ext, 9, 0, 8, uint8_t, cfg->red.drop_probability); \ -+ MC_PREP_OP(ext, 10, 0, 64, uint64_t, cfg->red.max_threshold); \ -+ MC_PREP_OP(ext, 11, 0, 64, uint64_t, cfg->red.min_threshold); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_EXT_EARLY_DROP(ext, cfg) \ -+do { \ -+ MC_EXT_OP(ext, 0, 0, 2, enum dpni_early_drop_mode, cfg->mode); \ -+ MC_EXT_OP(ext, 0, 2, 2, \ -+ enum dpni_congestion_unit, cfg->units); \ -+ MC_EXT_OP(ext, 0, 32, 32, uint32_t, cfg->tail_drop_threshold); \ -+ MC_EXT_OP(ext, 1, 0, 8, uint8_t, cfg->green.drop_probability); \ -+ MC_EXT_OP(ext, 2, 0, 64, uint64_t, cfg->green.max_threshold); \ -+ MC_EXT_OP(ext, 3, 0, 64, uint64_t, cfg->green.min_threshold); \ -+ MC_EXT_OP(ext, 5, 0, 8, uint8_t, cfg->yellow.drop_probability);\ -+ MC_EXT_OP(ext, 6, 0, 64, uint64_t, cfg->yellow.max_threshold); \ -+ MC_EXT_OP(ext, 7, 0, 64, uint64_t, cfg->yellow.min_threshold); \ -+ MC_EXT_OP(ext, 9, 0, 8, uint8_t, cfg->red.drop_probability); \ -+ MC_EXT_OP(ext, 10, 0, 64, uint64_t, cfg->red.max_threshold); \ -+ MC_EXT_OP(ext, 11, 0, 64, uint64_t, cfg->red.min_threshold); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_RX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_RX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_TX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_TX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \ -+} while (0) -+ -+#define DPNI_CMD_SET_RX_TC_CONGESTION_NOTIFICATION(cmd, tc_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \ -+ MC_CMD_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \ -+ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \ -+ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \ -+ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \ -+ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \ -+} while (0) -+ -+#define DPNI_CMD_GET_RX_TC_CONGESTION_NOTIFICATION(cmd, tc_id) \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id) -+ -+#define DPNI_RSP_GET_RX_TC_CONGESTION_NOTIFICATION(cmd, cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \ -+ MC_RSP_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \ -+ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \ -+ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \ -+ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \ -+ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \ -+} while (0) -+ -+#define DPNI_CMD_SET_TX_TC_CONGESTION_NOTIFICATION(cmd, tc_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \ -+ MC_CMD_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \ -+ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \ -+ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \ -+ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \ -+ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \ -+} while (0) -+ -+#define DPNI_CMD_GET_TX_TC_CONGESTION_NOTIFICATION(cmd, tc_id) \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id) -+ -+#define DPNI_RSP_GET_TX_TC_CONGESTION_NOTIFICATION(cmd, cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \ -+ MC_RSP_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \ -+ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \ -+ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \ -+ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \ -+ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \ -+} while (0) -+ -+#define DPNI_CMD_SET_TX_CONF(cmd, flow_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->queue_cfg.dest_cfg.priority); \ -+ MC_CMD_OP(cmd, 0, 40, 2, enum dpni_dest, \ -+ cfg->queue_cfg.dest_cfg.dest_type); \ -+ MC_CMD_OP(cmd, 0, 42, 1, int, cfg->errors_only); \ -+ MC_CMD_OP(cmd, 0, 46, 1, int, cfg->queue_cfg.order_preservation_en); \ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->queue_cfg.user_ctx); \ -+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->queue_cfg.options); \ -+ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->queue_cfg.dest_cfg.dest_id); \ -+ MC_CMD_OP(cmd, 3, 0, 32, uint32_t, \ -+ cfg->queue_cfg.tail_drop_threshold); \ -+ MC_CMD_OP(cmd, 4, 0, 4, enum dpni_flc_type, \ -+ cfg->queue_cfg.flc_cfg.flc_type); \ -+ MC_CMD_OP(cmd, 4, 4, 4, enum dpni_stash_size, \ -+ cfg->queue_cfg.flc_cfg.frame_data_size); \ -+ MC_CMD_OP(cmd, 4, 8, 4, enum dpni_stash_size, \ -+ cfg->queue_cfg.flc_cfg.flow_context_size); \ -+ MC_CMD_OP(cmd, 4, 32, 32, uint32_t, cfg->queue_cfg.flc_cfg.options); \ -+ MC_CMD_OP(cmd, 5, 0, 64, uint64_t, \ -+ cfg->queue_cfg.flc_cfg.flow_context); \ -+} while (0) -+ -+#define DPNI_CMD_GET_TX_CONF(cmd, flow_id) \ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id) -+ -+#define DPNI_RSP_GET_TX_CONF(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, \ -+ attr->queue_attr.dest_cfg.priority); \ -+ MC_RSP_OP(cmd, 0, 40, 2, enum dpni_dest, \ -+ attr->queue_attr.dest_cfg.dest_type); \ -+ MC_RSP_OP(cmd, 0, 42, 1, int, attr->errors_only); \ -+ MC_RSP_OP(cmd, 0, 46, 1, int, \ -+ attr->queue_attr.order_preservation_en); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->queue_attr.user_ctx); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, attr->queue_attr.dest_cfg.dest_id); \ -+ MC_RSP_OP(cmd, 3, 0, 32, uint32_t, \ -+ attr->queue_attr.tail_drop_threshold); \ -+ MC_RSP_OP(cmd, 3, 32, 32, uint32_t, attr->queue_attr.fqid); \ -+ MC_RSP_OP(cmd, 4, 0, 4, enum dpni_flc_type, \ -+ attr->queue_attr.flc_cfg.flc_type); \ -+ MC_RSP_OP(cmd, 4, 4, 4, enum dpni_stash_size, \ -+ attr->queue_attr.flc_cfg.frame_data_size); \ -+ MC_RSP_OP(cmd, 4, 8, 4, enum dpni_stash_size, \ -+ attr->queue_attr.flc_cfg.flow_context_size); \ -+ MC_RSP_OP(cmd, 4, 32, 32, uint32_t, attr->queue_attr.flc_cfg.options); \ -+ MC_RSP_OP(cmd, 5, 0, 64, uint64_t, \ -+ attr->queue_attr.flc_cfg.flow_context); \ -+} while (0) -+ -+#define DPNI_CMD_SET_TX_CONF_CONGESTION_NOTIFICATION(cmd, flow_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \ -+ MC_CMD_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \ -+ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \ -+ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \ -+ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \ -+ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \ -+} while (0) -+ -+#define DPNI_CMD_GET_TX_CONF_CONGESTION_NOTIFICATION(cmd, flow_id) \ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id) -+ -+#define DPNI_RSP_GET_TX_CONF_CONGESTION_NOTIFICATION(cmd, cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \ -+ MC_RSP_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \ -+ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \ -+ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \ -+ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \ -+ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \ -+} while (0) -+ -+#endif /* _FSL_DPNI_CMD_H */ -diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpni.c b/drivers/staging/fsl-dpaa2/ethernet/dpni.c -new file mode 100644 -index 0000000..c228ce5 ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.c -@@ -0,0 +1,1907 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include "../../fsl-mc/include/mc-sys.h" -+#include "../../fsl-mc/include/mc-cmd.h" -+#include "dpni.h" -+#include "dpni-cmd.h" -+ -+int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, -+ uint8_t *key_cfg_buf) -+{ -+ int i, j; -+ int offset = 0; -+ int param = 1; -+ uint64_t *params = (uint64_t *)key_cfg_buf; -+ -+ if (!key_cfg_buf || !cfg) -+ return -EINVAL; -+ -+ params[0] |= mc_enc(0, 8, cfg->num_extracts); -+ params[0] = cpu_to_le64(params[0]); -+ -+ if (cfg->num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) -+ return -EINVAL; -+ -+ for (i = 0; i < cfg->num_extracts; i++) { -+ switch (cfg->extracts[i].type) { -+ case DPKG_EXTRACT_FROM_HDR: -+ params[param] |= mc_enc(0, 8, -+ cfg->extracts[i].extract.from_hdr.prot); -+ params[param] |= mc_enc(8, 4, -+ cfg->extracts[i].extract.from_hdr.type); -+ params[param] |= mc_enc(16, 8, -+ cfg->extracts[i].extract.from_hdr.size); -+ params[param] |= mc_enc(24, 8, -+ cfg->extracts[i].extract. -+ from_hdr.offset); -+ params[param] |= mc_enc(32, 32, -+ cfg->extracts[i].extract. -+ from_hdr.field); -+ params[param] = cpu_to_le64(params[param]); -+ param++; -+ params[param] |= mc_enc(0, 8, -+ cfg->extracts[i].extract. -+ from_hdr.hdr_index); -+ break; -+ case DPKG_EXTRACT_FROM_DATA: -+ params[param] |= mc_enc(16, 8, -+ cfg->extracts[i].extract. -+ from_data.size); -+ params[param] |= mc_enc(24, 8, -+ cfg->extracts[i].extract. -+ from_data.offset); -+ params[param] = cpu_to_le64(params[param]); -+ param++; -+ break; -+ case DPKG_EXTRACT_FROM_PARSE: -+ params[param] |= mc_enc(16, 8, -+ cfg->extracts[i].extract. -+ from_parse.size); -+ params[param] |= mc_enc(24, 8, -+ cfg->extracts[i].extract. -+ from_parse.offset); -+ params[param] = cpu_to_le64(params[param]); -+ param++; -+ break; -+ default: -+ return -EINVAL; -+ } -+ params[param] |= mc_enc( -+ 24, 8, cfg->extracts[i].num_of_byte_masks); -+ params[param] |= mc_enc(32, 4, cfg->extracts[i].type); -+ params[param] = cpu_to_le64(params[param]); -+ param++; -+ for (offset = 0, j = 0; -+ j < DPKG_NUM_OF_MASKS; -+ offset += 16, j++) { -+ params[param] |= mc_enc( -+ (offset), 8, cfg->extracts[i].masks[j].mask); -+ params[param] |= mc_enc( -+ (offset + 8), 8, -+ cfg->extracts[i].masks[j].offset); -+ } -+ params[param] = cpu_to_le64(params[param]); -+ param++; -+ } -+ return 0; -+} -+ -+int dpni_prepare_extended_cfg(const struct dpni_extended_cfg *cfg, -+ uint8_t *ext_cfg_buf) -+{ -+ uint64_t *ext_params = (uint64_t *)ext_cfg_buf; -+ -+ DPNI_PREP_EXTENDED_CFG(ext_params, cfg); -+ -+ return 0; -+} -+ -+int dpni_extract_extended_cfg(struct dpni_extended_cfg *cfg, -+ const uint8_t *ext_cfg_buf) -+{ -+ const uint64_t *ext_params = (const uint64_t *)ext_cfg_buf; -+ -+ DPNI_EXT_EXTENDED_CFG(ext_params, cfg); -+ -+ return 0; -+} -+ -+int dpni_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpni_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ DPNI_CMD_OPEN(cmd, dpni_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpni_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLOSE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpni_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ DPNI_CMD_CREATE(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpni_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_pools(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_pools_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_POOLS, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_POOLS(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_DISABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_IS_ENABLED, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_IS_ENABLED(cmd, *en); -+ -+ return 0; -+} -+ -+int dpni_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpni_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_IRQ(cmd, irq_index, irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpni_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_IRQ(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dpni_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_IRQ_ENABLE(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_IRQ_ENABLE(cmd, *en); -+ -+ return 0; -+} -+ -+int dpni_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_IRQ_MASK(cmd, irq_index, mask); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_IRQ_MASK(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_IRQ_MASK(cmd, *mask); -+ -+ return 0; -+} -+ -+int dpni_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_IRQ_STATUS(cmd, *status); -+ -+ return 0; -+} -+ -+int dpni_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPNI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_ATTR(cmd, attr); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpni_set_errors_behavior(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_error_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_ERRORS_BEHAVIOR, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_ERRORS_BEHAVIOR(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_rx_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_buffer_layout *layout) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_BUFFER_LAYOUT, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_RX_BUFFER_LAYOUT(cmd, layout); -+ -+ return 0; -+} -+ -+int dpni_set_rx_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_buffer_layout *layout) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_BUFFER_LAYOUT, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_RX_BUFFER_LAYOUT(cmd, layout); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_tx_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_buffer_layout *layout) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_BUFFER_LAYOUT, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_TX_BUFFER_LAYOUT(cmd, layout); -+ -+ return 0; -+} -+ -+int dpni_set_tx_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_buffer_layout *layout) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_BUFFER_LAYOUT, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_BUFFER_LAYOUT(cmd, layout); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_tx_conf_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_buffer_layout *layout) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_CONF_BUFFER_LAYOUT, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_TX_CONF_BUFFER_LAYOUT(cmd, layout); -+ -+ return 0; -+} -+ -+int dpni_set_tx_conf_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_buffer_layout *layout) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_CONF_BUFFER_LAYOUT, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_CONF_BUFFER_LAYOUT(cmd, layout); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_l3_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_L3_CHKSUM_VALIDATION, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_L3_CHKSUM_VALIDATION(cmd, *en); -+ -+ return 0; -+} -+ -+int dpni_set_l3_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_L3_CHKSUM_VALIDATION, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_L3_CHKSUM_VALIDATION(cmd, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_l4_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_L4_CHKSUM_VALIDATION, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_L4_CHKSUM_VALIDATION(cmd, *en); -+ -+ return 0; -+} -+ -+int dpni_set_l4_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_L4_CHKSUM_VALIDATION, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_L4_CHKSUM_VALIDATION(cmd, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_qdid(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *qdid) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QDID, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_QDID(cmd, *qdid); -+ -+ return 0; -+} -+ -+int dpni_get_sp_info(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_sp_info *sp_info) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_SP_INFO, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_SP_INFO(cmd, sp_info); -+ -+ return 0; -+} -+ -+int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *data_offset) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_DATA_OFFSET, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_TX_DATA_OFFSET(cmd, *data_offset); -+ -+ return 0; -+} -+ -+int dpni_get_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ enum dpni_counter counter, -+ uint64_t *value) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_COUNTER, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_COUNTER(cmd, counter); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_COUNTER(cmd, *value); -+ -+ return 0; -+} -+ -+int dpni_set_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ enum dpni_counter counter, -+ uint64_t value) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_COUNTER, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_COUNTER(cmd, counter, value); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_link_cfg(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_link_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_LINK_CFG, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_LINK_CFG(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_link_state(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_link_state *state) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_STATE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_LINK_STATE(cmd, state); -+ -+ return 0; -+} -+ -+int dpni_set_tx_shaping(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_tx_shaping_cfg *tx_shaper) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SHAPING, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_SHAPING(cmd, tx_shaper); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_max_frame_length(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t max_frame_length) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MAX_FRAME_LENGTH, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_MAX_FRAME_LENGTH(cmd, max_frame_length); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_max_frame_length(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *max_frame_length) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MAX_FRAME_LENGTH, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_MAX_FRAME_LENGTH(cmd, *max_frame_length); -+ -+ return 0; -+} -+ -+int dpni_set_mtu(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t mtu) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MTU, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_MTU(cmd, mtu); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_mtu(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *mtu) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MTU, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_MTU(cmd, *mtu); -+ -+ return 0; -+} -+ -+int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MCAST_PROMISC, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_MULTICAST_PROMISC(cmd, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MCAST_PROMISC, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_MULTICAST_PROMISC(cmd, *en); -+ -+ return 0; -+} -+ -+int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_UNICAST_PROMISC, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_UNICAST_PROMISC(cmd, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_UNICAST_PROMISC, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_UNICAST_PROMISC(cmd, *en); -+ -+ return 0; -+} -+ -+int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const uint8_t mac_addr[6]) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_PRIM_MAC, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_PRIMARY_MAC_ADDR(cmd, mac_addr); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t mac_addr[6]) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PRIM_MAC, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_PRIMARY_MAC_ADDR(cmd, mac_addr); -+ -+ return 0; -+} -+ -+int dpni_add_mac_addr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const uint8_t mac_addr[6]) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_MAC_ADDR, -+ cmd_flags, -+ token); -+ DPNI_CMD_ADD_MAC_ADDR(cmd, mac_addr); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_remove_mac_addr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const uint8_t mac_addr[6]) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_MAC_ADDR, -+ cmd_flags, -+ token); -+ DPNI_CMD_REMOVE_MAC_ADDR(cmd, mac_addr); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_clear_mac_filters(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int unicast, -+ int multicast) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_MAC_FILTERS, -+ cmd_flags, -+ token); -+ DPNI_CMD_CLEAR_MAC_FILTERS(cmd, unicast, multicast); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_vlan_filters(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_VLAN_FILTERS, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_VLAN_FILTERS(cmd, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_add_vlan_id(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_VLAN_ID, -+ cmd_flags, -+ token); -+ DPNI_CMD_ADD_VLAN_ID(cmd, vlan_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_remove_vlan_id(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_VLAN_ID, -+ cmd_flags, -+ token); -+ DPNI_CMD_REMOVE_VLAN_ID(cmd, vlan_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_clear_vlan_filters(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_VLAN_FILTERS, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_tx_selection(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_tx_selection_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SELECTION, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_SELECTION(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_rx_tc_dist_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_DIST, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_RX_TC_DIST(cmd, tc_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_tx_flow(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *flow_id, -+ const struct dpni_tx_flow_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_FLOW, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_FLOW(cmd, *flow_id, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_SET_TX_FLOW(cmd, *flow_id); -+ -+ return 0; -+} -+ -+int dpni_get_tx_flow(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ struct dpni_tx_flow_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_FLOW, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_TX_FLOW(cmd, flow_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_TX_FLOW(cmd, attr); -+ -+ return 0; -+} -+ -+int dpni_set_rx_flow(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint16_t flow_id, -+ const struct dpni_queue_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_FLOW, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_RX_FLOW(cmd, tc_id, flow_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_rx_flow(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint16_t flow_id, -+ struct dpni_queue_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_FLOW, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_RX_FLOW(cmd, tc_id, flow_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_RX_FLOW(cmd, attr); -+ -+ return 0; -+} -+ -+int dpni_set_rx_err_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_queue_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_ERR_QUEUE, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_RX_ERR_QUEUE(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_rx_err_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_queue_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_ERR_QUEUE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_RX_ERR_QUEUE(cmd, attr); -+ -+ return 0; -+} -+ -+int dpni_set_tx_conf_revoke(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int revoke) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_CONF_REVOKE, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_CONF_REVOKE(cmd, revoke); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_qos_table(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_qos_tbl_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QOS_TBL, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_QOS_TABLE(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_add_qos_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_rule_cfg *cfg, -+ uint8_t tc_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_QOS_ENT, -+ cmd_flags, -+ token); -+ DPNI_CMD_ADD_QOS_ENTRY(cmd, cfg, tc_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_remove_qos_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_rule_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_QOS_ENT, -+ cmd_flags, -+ token); -+ DPNI_CMD_REMOVE_QOS_ENTRY(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_clear_qos_table(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_QOS_TBL, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_add_fs_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_rule_cfg *cfg, -+ uint16_t flow_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_FS_ENT, -+ cmd_flags, -+ token); -+ DPNI_CMD_ADD_FS_ENTRY(cmd, tc_id, cfg, flow_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_remove_fs_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_rule_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_FS_ENT, -+ cmd_flags, -+ token); -+ DPNI_CMD_REMOVE_FS_ENTRY(cmd, tc_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_clear_fs_entries(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_FS_ENT, -+ cmd_flags, -+ token); -+ DPNI_CMD_CLEAR_FS_ENTRIES(cmd, tc_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_vlan_insertion(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_VLAN_INSERTION, -+ cmd_flags, token); -+ DPNI_CMD_SET_VLAN_INSERTION(cmd, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_vlan_removal(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_VLAN_REMOVAL, -+ cmd_flags, token); -+ DPNI_CMD_SET_VLAN_REMOVAL(cmd, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_ipr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IPR, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_IPR(cmd, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_ipf(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IPF, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_IPF(cmd, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_rx_tc_policing(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_rx_tc_policing_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_POLICING, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_RX_TC_POLICING(cmd, tc_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_rx_tc_policing(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ struct dpni_rx_tc_policing_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_TC_POLICING, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_RX_TC_POLICING(cmd, tc_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ DPNI_RSP_GET_RX_TC_POLICING(cmd, cfg); -+ -+ return 0; -+} -+ -+void dpni_prepare_early_drop(const struct dpni_early_drop_cfg *cfg, -+ uint8_t *early_drop_buf) -+{ -+ uint64_t *ext_params = (uint64_t *)early_drop_buf; -+ -+ DPNI_PREP_EARLY_DROP(ext_params, cfg); -+} -+ -+void dpni_extract_early_drop(struct dpni_early_drop_cfg *cfg, -+ const uint8_t *early_drop_buf) -+{ -+ const uint64_t *ext_params = (const uint64_t *)early_drop_buf; -+ -+ DPNI_EXT_EARLY_DROP(ext_params, cfg); -+} -+ -+int dpni_set_rx_tc_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint64_t early_drop_iova) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_EARLY_DROP, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_RX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_rx_tc_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint64_t early_drop_iova) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_TC_EARLY_DROP, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_RX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_tx_tc_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint64_t early_drop_iova) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_TC_EARLY_DROP, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_tx_tc_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint64_t early_drop_iova) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_TC_EARLY_DROP, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_TX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_rx_tc_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_congestion_notification_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header( -+ DPNI_CMDID_SET_RX_TC_CONGESTION_NOTIFICATION, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_RX_TC_CONGESTION_NOTIFICATION(cmd, tc_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_rx_tc_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ struct dpni_congestion_notification_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header( -+ DPNI_CMDID_GET_RX_TC_CONGESTION_NOTIFICATION, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_RX_TC_CONGESTION_NOTIFICATION(cmd, tc_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ DPNI_RSP_GET_RX_TC_CONGESTION_NOTIFICATION(cmd, cfg); -+ -+ return 0; -+} -+ -+int dpni_set_tx_tc_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_congestion_notification_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header( -+ DPNI_CMDID_SET_TX_TC_CONGESTION_NOTIFICATION, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_TC_CONGESTION_NOTIFICATION(cmd, tc_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_tx_tc_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ struct dpni_congestion_notification_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header( -+ DPNI_CMDID_GET_TX_TC_CONGESTION_NOTIFICATION, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_TX_TC_CONGESTION_NOTIFICATION(cmd, tc_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ DPNI_RSP_GET_TX_TC_CONGESTION_NOTIFICATION(cmd, cfg); -+ -+ return 0; -+} -+ -+int dpni_set_tx_conf(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ const struct dpni_tx_conf_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_CONF, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_CONF(cmd, flow_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_tx_conf(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ struct dpni_tx_conf_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_CONF, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_TX_CONF(cmd, flow_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ DPNI_RSP_GET_TX_CONF(cmd, attr); -+ -+ return 0; -+} -+ -+int dpni_set_tx_conf_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ const struct dpni_congestion_notification_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header( -+ DPNI_CMDID_SET_TX_CONF_CONGESTION_NOTIFICATION, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_CONF_CONGESTION_NOTIFICATION(cmd, flow_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_tx_conf_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ struct dpni_congestion_notification_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header( -+ DPNI_CMDID_GET_TX_CONF_CONGESTION_NOTIFICATION, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_TX_CONF_CONGESTION_NOTIFICATION(cmd, flow_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ DPNI_RSP_GET_TX_CONF_CONGESTION_NOTIFICATION(cmd, cfg); -+ -+ return 0; -+} -diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpni.h b/drivers/staging/fsl-dpaa2/ethernet/dpni.h -new file mode 100644 -index 0000000..fca426d ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.h -@@ -0,0 +1,2581 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPNI_H -+#define __FSL_DPNI_H -+ -+#include "dpkg.h" -+ -+struct fsl_mc_io; -+ -+/** -+ * Data Path Network Interface API -+ * Contains initialization APIs and runtime control APIs for DPNI -+ */ -+ -+/** General DPNI macros */ -+ -+/** -+ * Maximum number of traffic classes -+ */ -+#define DPNI_MAX_TC 8 -+/** -+ * Maximum number of buffer pools per DPNI -+ */ -+#define DPNI_MAX_DPBP 8 -+/** -+ * Maximum number of storage-profiles per DPNI -+ */ -+#define DPNI_MAX_SP 2 -+ -+/** -+ * All traffic classes considered; see dpni_set_rx_flow() -+ */ -+#define DPNI_ALL_TCS (uint8_t)(-1) -+/** -+ * All flows within traffic class considered; see dpni_set_rx_flow() -+ */ -+#define DPNI_ALL_TC_FLOWS (uint16_t)(-1) -+/** -+ * Generate new flow ID; see dpni_set_tx_flow() -+ */ -+#define DPNI_NEW_FLOW_ID (uint16_t)(-1) -+/* use for common tx-conf queue; see dpni_set_tx_conf_() */ -+#define DPNI_COMMON_TX_CONF (uint16_t)(-1) -+ -+/** -+ * dpni_open() - Open a control session for the specified object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dpni_id: DPNI unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpni_create() function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpni_id, -+ uint16_t *token); -+ -+/** -+ * dpni_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/* DPNI configuration options */ -+ -+/** -+ * Allow different distribution key profiles for different traffic classes; -+ * if not set, a single key profile is assumed -+ */ -+#define DPNI_OPT_ALLOW_DIST_KEY_PER_TC 0x00000001 -+ -+/** -+ * Disable all non-error transmit confirmation; error frames are reported -+ * back to a common Tx error queue -+ */ -+#define DPNI_OPT_TX_CONF_DISABLED 0x00000002 -+ -+/** -+ * Disable per-sender private Tx confirmation/error queue -+ */ -+#define DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED 0x00000004 -+ -+/** -+ * Support distribution based on hashed key; -+ * allows statistical distribution over receive queues in a traffic class -+ */ -+#define DPNI_OPT_DIST_HASH 0x00000010 -+ -+/** -+ * DEPRECATED - if this flag is selected and and all new 'max_fs_entries' are -+ * '0' then backward compatibility is preserved; -+ * Support distribution based on flow steering; -+ * allows explicit control of distribution over receive queues in a traffic -+ * class -+ */ -+#define DPNI_OPT_DIST_FS 0x00000020 -+ -+/** -+ * Unicast filtering support -+ */ -+#define DPNI_OPT_UNICAST_FILTER 0x00000080 -+/** -+ * Multicast filtering support -+ */ -+#define DPNI_OPT_MULTICAST_FILTER 0x00000100 -+/** -+ * VLAN filtering support -+ */ -+#define DPNI_OPT_VLAN_FILTER 0x00000200 -+/** -+ * Support IP reassembly on received packets -+ */ -+#define DPNI_OPT_IPR 0x00000800 -+/** -+ * Support IP fragmentation on transmitted packets -+ */ -+#define DPNI_OPT_IPF 0x00001000 -+/** -+ * VLAN manipulation support -+ */ -+#define DPNI_OPT_VLAN_MANIPULATION 0x00010000 -+/** -+ * Support masking of QoS lookup keys -+ */ -+#define DPNI_OPT_QOS_MASK_SUPPORT 0x00020000 -+/** -+ * Support masking of Flow Steering lookup keys -+ */ -+#define DPNI_OPT_FS_MASK_SUPPORT 0x00040000 -+ -+/** -+ * struct dpni_extended_cfg - Structure representing extended DPNI configuration -+ * @tc_cfg: TCs configuration -+ * @ipr_cfg: IP reassembly configuration -+ */ -+struct dpni_extended_cfg { -+ /** -+ * struct tc_cfg - TC configuration -+ * @max_dist: Maximum distribution size for Rx traffic class; -+ * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96, -+ * 112,128,192,224,256,384,448,512,768,896,1024; -+ * value '0' will be treated as '1'. -+ * other unsupported values will be round down to the nearest -+ * supported value. -+ * @max_fs_entries: Maximum FS entries for Rx traffic class; -+ * '0' means no support for this TC; -+ */ -+ struct { -+ uint16_t max_dist; -+ uint16_t max_fs_entries; -+ } tc_cfg[DPNI_MAX_TC]; -+ /** -+ * struct ipr_cfg - Structure representing IP reassembly configuration -+ * @max_reass_frm_size: Maximum size of the reassembled frame -+ * @min_frag_size_ipv4: Minimum fragment size of IPv4 fragments -+ * @min_frag_size_ipv6: Minimum fragment size of IPv6 fragments -+ * @max_open_frames_ipv4: Maximum concurrent IPv4 packets in reassembly -+ * process -+ * @max_open_frames_ipv6: Maximum concurrent IPv6 packets in reassembly -+ * process -+ */ -+ struct { -+ uint16_t max_reass_frm_size; -+ uint16_t min_frag_size_ipv4; -+ uint16_t min_frag_size_ipv6; -+ uint16_t max_open_frames_ipv4; -+ uint16_t max_open_frames_ipv6; -+ } ipr_cfg; -+}; -+ -+/** -+ * dpni_prepare_extended_cfg() - function prepare extended parameters -+ * @cfg: extended structure -+ * @ext_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA -+ * -+ * This function has to be called before dpni_create() -+ */ -+int dpni_prepare_extended_cfg(const struct dpni_extended_cfg *cfg, -+ uint8_t *ext_cfg_buf); -+ -+/** -+ * struct dpni_cfg - Structure representing DPNI configuration -+ * @mac_addr: Primary MAC address -+ * @adv: Advanced parameters; default is all zeros; -+ * use this structure to change default settings -+ */ -+struct dpni_cfg { -+ uint8_t mac_addr[6]; -+ /** -+ * struct adv - Advanced parameters -+ * @options: Mask of available options; use 'DPNI_OPT_' values -+ * @start_hdr: Selects the packet starting header for parsing; -+ * 'NET_PROT_NONE' is treated as default: 'NET_PROT_ETH' -+ * @max_senders: Maximum number of different senders; used as the number -+ * of dedicated Tx flows; Non-power-of-2 values are rounded -+ * up to the next power-of-2 value as hardware demands it; -+ * '0' will be treated as '1' -+ * @max_tcs: Maximum number of traffic classes (for both Tx and Rx); -+ * '0' will e treated as '1' -+ * @max_unicast_filters: Maximum number of unicast filters; -+ * '0' is treated as '16' -+ * @max_multicast_filters: Maximum number of multicast filters; -+ * '0' is treated as '64' -+ * @max_qos_entries: if 'max_tcs > 1', declares the maximum entries in -+ * the QoS table; '0' is treated as '64' -+ * @max_qos_key_size: Maximum key size for the QoS look-up; -+ * '0' is treated as '24' which is enough for IPv4 -+ * 5-tuple -+ * @max_dist_key_size: Maximum key size for the distribution; -+ * '0' is treated as '24' which is enough for IPv4 5-tuple -+ * @max_policers: Maximum number of policers; -+ * should be between '0' and max_tcs -+ * @max_congestion_ctrl: Maximum number of congestion control groups -+ * (CGs); covers early drop and congestion notification -+ * requirements; -+ * should be between '0' and ('max_tcs' + 'max_senders') -+ * @ext_cfg_iova: I/O virtual address of 256 bytes DMA-able memory -+ * filled with the extended configuration by calling -+ * dpni_prepare_extended_cfg() -+ */ -+ struct { -+ uint32_t options; -+ enum net_prot start_hdr; -+ uint8_t max_senders; -+ uint8_t max_tcs; -+ uint8_t max_unicast_filters; -+ uint8_t max_multicast_filters; -+ uint8_t max_vlan_filters; -+ uint8_t max_qos_entries; -+ uint8_t max_qos_key_size; -+ uint8_t max_dist_key_size; -+ uint8_t max_policers; -+ uint8_t max_congestion_ctrl; -+ uint64_t ext_cfg_iova; -+ } adv; -+}; -+ -+/** -+ * dpni_create() - Create the DPNI object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPNI object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpni_open() function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpni_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpni_destroy() - Destroy the DPNI object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpni_pools_cfg - Structure representing buffer pools configuration -+ * @num_dpbp: Number of DPBPs -+ * @pools: Array of buffer pools parameters; The number of valid entries -+ * must match 'num_dpbp' value -+ */ -+struct dpni_pools_cfg { -+ uint8_t num_dpbp; -+ /** -+ * struct pools - Buffer pools parameters -+ * @dpbp_id: DPBP object ID -+ * @buffer_size: Buffer size -+ * @backup_pool: Backup pool -+ */ -+ struct { -+ int dpbp_id; -+ uint16_t buffer_size; -+ int backup_pool; -+ } pools[DPNI_MAX_DPBP]; -+}; -+ -+/** -+ * dpni_set_pools() - Set buffer pools configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @cfg: Buffer pools configuration -+ * -+ * mandatory for DPNI operation -+ * warning:Allowed only when DPNI is disabled -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_pools(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_pools_cfg *cfg); -+ -+/** -+ * dpni_enable() - Enable the DPNI, allow sending and receiving frames. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpni_disable() - Disable the DPNI, stop sending and receiving frames. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpni_is_enabled() - Check if the DPNI is enabled. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Returns '1' if object is enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpni_reset() - Reset the DPNI, returns the object to initial state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * DPNI IRQ Index and Events -+ */ -+ -+/** -+ * IRQ index -+ */ -+#define DPNI_IRQ_INDEX 0 -+/** -+ * IRQ event - indicates a change in link state -+ */ -+#define DPNI_IRQ_EVENT_LINK_CHANGED 0x00000001 -+ -+/** -+ * struct dpni_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dpni_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dpni_set_irq() - Set IRQ information for the DPNI to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpni_irq_cfg *irq_cfg); -+ -+/** -+ * dpni_get_irq() - Get IRQ information from the DPNI. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpni_irq_cfg *irq_cfg); -+ -+/** -+ * dpni_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state: - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpni_get_irq_enable() - Get overall interrupt state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpni_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @irq_index: The interrupt index to configure -+ * @mask: event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpni_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpni_get_irq_status() - Get the current status of any pending interrupts. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dpni_clear_irq_status() - Clear a pending interrupt's status -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @irq_index: The interrupt index to configure -+ * @status: bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+ -+/** -+ * struct dpni_attr - Structure representing DPNI attributes -+ * @id: DPNI object ID -+ * @version: DPNI version -+ * @start_hdr: Indicates the packet starting header for parsing -+ * @options: Mask of available options; reflects the value as was given in -+ * object's creation -+ * @max_senders: Maximum number of different senders; used as the number -+ * of dedicated Tx flows; -+ * @max_tcs: Maximum number of traffic classes (for both Tx and Rx) -+ * @max_unicast_filters: Maximum number of unicast filters -+ * @max_multicast_filters: Maximum number of multicast filters -+ * @max_vlan_filters: Maximum number of VLAN filters -+ * @max_qos_entries: if 'max_tcs > 1', declares the maximum entries in QoS table -+ * @max_qos_key_size: Maximum key size for the QoS look-up -+ * @max_dist_key_size: Maximum key size for the distribution look-up -+ * @max_policers: Maximum number of policers; -+ * @max_congestion_ctrl: Maximum number of congestion control groups (CGs); -+ * @ext_cfg_iova: I/O virtual address of 256 bytes DMA-able memory; -+ * call dpni_extract_extended_cfg() to extract the extended configuration -+ */ -+struct dpni_attr { -+ int id; -+ /** -+ * struct version - DPNI version -+ * @major: DPNI major version -+ * @minor: DPNI minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+ enum net_prot start_hdr; -+ uint32_t options; -+ uint8_t max_senders; -+ uint8_t max_tcs; -+ uint8_t max_unicast_filters; -+ uint8_t max_multicast_filters; -+ uint8_t max_vlan_filters; -+ uint8_t max_qos_entries; -+ uint8_t max_qos_key_size; -+ uint8_t max_dist_key_size; -+ uint8_t max_policers; -+ uint8_t max_congestion_ctrl; -+ uint64_t ext_cfg_iova; -+}; -+ -+/** -+ * dpni_get_attributes() - Retrieve DPNI attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @attr: Object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_attr *attr); -+ -+/** -+ * dpni_extract_extended_cfg() - extract the extended parameters -+ * @cfg: extended structure -+ * @ext_cfg_buf: 256 bytes of DMA-able memory -+ * -+ * This function has to be called after dpni_get_attributes() -+ */ -+int dpni_extract_extended_cfg(struct dpni_extended_cfg *cfg, -+ const uint8_t *ext_cfg_buf); -+ -+/** -+ * DPNI errors -+ */ -+ -+/** -+ * Extract out of frame header error -+ */ -+#define DPNI_ERROR_EOFHE 0x00020000 -+/** -+ * Frame length error -+ */ -+#define DPNI_ERROR_FLE 0x00002000 -+/** -+ * Frame physical error -+ */ -+#define DPNI_ERROR_FPE 0x00001000 -+/** -+ * Parsing header error -+ */ -+#define DPNI_ERROR_PHE 0x00000020 -+/** -+ * Parser L3 checksum error -+ */ -+#define DPNI_ERROR_L3CE 0x00000004 -+/** -+ * Parser L3 checksum error -+ */ -+#define DPNI_ERROR_L4CE 0x00000001 -+ -+/** -+ * enum dpni_error_action - Defines DPNI behavior for errors -+ * @DPNI_ERROR_ACTION_DISCARD: Discard the frame -+ * @DPNI_ERROR_ACTION_CONTINUE: Continue with the normal flow -+ * @DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE: Send the frame to the error queue -+ */ -+enum dpni_error_action { -+ DPNI_ERROR_ACTION_DISCARD = 0, -+ DPNI_ERROR_ACTION_CONTINUE = 1, -+ DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE = 2 -+}; -+ -+/** -+ * struct dpni_error_cfg - Structure representing DPNI errors treatment -+ * @errors: Errors mask; use 'DPNI_ERROR__ -+ * @error_action: The desired action for the errors mask -+ * @set_frame_annotation: Set to '1' to mark the errors in frame annotation -+ * status (FAS); relevant only for the non-discard action -+ */ -+struct dpni_error_cfg { -+ uint32_t errors; -+ enum dpni_error_action error_action; -+ int set_frame_annotation; -+}; -+ -+/** -+ * dpni_set_errors_behavior() - Set errors behavior -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @cfg: Errors configuration -+ * -+ * this function may be called numerous times with different -+ * error masks -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_errors_behavior(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_error_cfg *cfg); -+ -+/** -+ * DPNI buffer layout modification options -+ */ -+ -+/** -+ * Select to modify the time-stamp setting -+ */ -+#define DPNI_BUF_LAYOUT_OPT_TIMESTAMP 0x00000001 -+/** -+ * Select to modify the parser-result setting; not applicable for Tx -+ */ -+#define DPNI_BUF_LAYOUT_OPT_PARSER_RESULT 0x00000002 -+/** -+ * Select to modify the frame-status setting -+ */ -+#define DPNI_BUF_LAYOUT_OPT_FRAME_STATUS 0x00000004 -+/** -+ * Select to modify the private-data-size setting -+ */ -+#define DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE 0x00000008 -+/** -+ * Select to modify the data-alignment setting -+ */ -+#define DPNI_BUF_LAYOUT_OPT_DATA_ALIGN 0x00000010 -+/** -+ * Select to modify the data-head-room setting -+ */ -+#define DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM 0x00000020 -+/** -+ * Select to modify the data-tail-room setting -+ */ -+#define DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM 0x00000040 -+ -+/** -+ * struct dpni_buffer_layout - Structure representing DPNI buffer layout -+ * @options: Flags representing the suggested modifications to the buffer -+ * layout; Use any combination of 'DPNI_BUF_LAYOUT_OPT_' flags -+ * @pass_timestamp: Pass timestamp value -+ * @pass_parser_result: Pass parser results -+ * @pass_frame_status: Pass frame status -+ * @private_data_size: Size kept for private data (in bytes) -+ * @data_align: Data alignment -+ * @data_head_room: Data head room -+ * @data_tail_room: Data tail room -+ */ -+struct dpni_buffer_layout { -+ uint32_t options; -+ int pass_timestamp; -+ int pass_parser_result; -+ int pass_frame_status; -+ uint16_t private_data_size; -+ uint16_t data_align; -+ uint16_t data_head_room; -+ uint16_t data_tail_room; -+}; -+ -+/** -+ * dpni_get_rx_buffer_layout() - Retrieve Rx buffer layout attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @layout: Returns buffer layout attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_rx_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_buffer_layout *layout); -+ -+/** -+ * dpni_set_rx_buffer_layout() - Set Rx buffer layout configuration. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @layout: Buffer layout configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ * -+ * @warning Allowed only when DPNI is disabled -+ */ -+int dpni_set_rx_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_buffer_layout *layout); -+ -+/** -+ * dpni_get_tx_buffer_layout() - Retrieve Tx buffer layout attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @layout: Returns buffer layout attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_tx_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_buffer_layout *layout); -+ -+/** -+ * dpni_set_tx_buffer_layout() - Set Tx buffer layout configuration. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @layout: Buffer layout configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ * -+ * @warning Allowed only when DPNI is disabled -+ */ -+int dpni_set_tx_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_buffer_layout *layout); -+ -+/** -+ * dpni_get_tx_conf_buffer_layout() - Retrieve Tx confirmation buffer layout -+ * attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @layout: Returns buffer layout attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_tx_conf_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_buffer_layout *layout); -+ -+/** -+ * dpni_set_tx_conf_buffer_layout() - Set Tx confirmation buffer layout -+ * configuration. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @layout: Buffer layout configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ * -+ * @warning Allowed only when DPNI is disabled -+ */ -+int dpni_set_tx_conf_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_buffer_layout *layout); -+ -+/** -+ * dpni_set_l3_chksum_validation() - Enable/disable L3 checksum validation -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Set to '1' to enable; '0' to disable -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_l3_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en); -+ -+/** -+ * dpni_get_l3_chksum_validation() - Get L3 checksum validation mode -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Returns '1' if enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_l3_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpni_set_l4_chksum_validation() - Enable/disable L4 checksum validation -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Set to '1' to enable; '0' to disable -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_l4_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en); -+ -+/** -+ * dpni_get_l4_chksum_validation() - Get L4 checksum validation mode -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Returns '1' if enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_l4_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpni_get_qdid() - Get the Queuing Destination ID (QDID) that should be used -+ * for enqueue operations -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @qdid: Returned virtual QDID value that should be used as an argument -+ * in all enqueue operations -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_qdid(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *qdid); -+ -+/** -+ * struct dpni_sp_info - Structure representing DPNI storage-profile information -+ * (relevant only for DPNI owned by AIOP) -+ * @spids: array of storage-profiles -+ */ -+struct dpni_sp_info { -+ uint16_t spids[DPNI_MAX_SP]; -+}; -+ -+/** -+ * dpni_get_spids() - Get the AIOP storage profile IDs associated with the DPNI -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @sp_info: Returned AIOP storage-profile information -+ * -+ * Return: '0' on Success; Error code otherwise. -+ * -+ * @warning Only relevant for DPNI that belongs to AIOP container. -+ */ -+int dpni_get_sp_info(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_sp_info *sp_info); -+ -+/** -+ * dpni_get_tx_data_offset() - Get the Tx data offset (from start of buffer) -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @data_offset: Tx data offset (from start of buffer) -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *data_offset); -+ -+/** -+ * enum dpni_counter - DPNI counter types -+ * @DPNI_CNT_ING_FRAME: Counts ingress frames -+ * @DPNI_CNT_ING_BYTE: Counts ingress bytes -+ * @DPNI_CNT_ING_FRAME_DROP: Counts ingress frames dropped due to explicit -+ * 'drop' setting -+ * @DPNI_CNT_ING_FRAME_DISCARD: Counts ingress frames discarded due to errors -+ * @DPNI_CNT_ING_MCAST_FRAME: Counts ingress multicast frames -+ * @DPNI_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes -+ * @DPNI_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames -+ * @DPNI_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes -+ * @DPNI_CNT_EGR_FRAME: Counts egress frames -+ * @DPNI_CNT_EGR_BYTE: Counts egress bytes -+ * @DPNI_CNT_EGR_FRAME_DISCARD: Counts egress frames discarded due to errors -+ */ -+enum dpni_counter { -+ DPNI_CNT_ING_FRAME = 0x0, -+ DPNI_CNT_ING_BYTE = 0x1, -+ DPNI_CNT_ING_FRAME_DROP = 0x2, -+ DPNI_CNT_ING_FRAME_DISCARD = 0x3, -+ DPNI_CNT_ING_MCAST_FRAME = 0x4, -+ DPNI_CNT_ING_MCAST_BYTE = 0x5, -+ DPNI_CNT_ING_BCAST_FRAME = 0x6, -+ DPNI_CNT_ING_BCAST_BYTES = 0x7, -+ DPNI_CNT_EGR_FRAME = 0x8, -+ DPNI_CNT_EGR_BYTE = 0x9, -+ DPNI_CNT_EGR_FRAME_DISCARD = 0xa -+}; -+ -+/** -+ * dpni_get_counter() - Read a specific DPNI counter -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @counter: The requested counter -+ * @value: Returned counter's current value -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ enum dpni_counter counter, -+ uint64_t *value); -+ -+/** -+ * dpni_set_counter() - Set (or clear) a specific DPNI counter -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @counter: The requested counter -+ * @value: New counter value; typically pass '0' for resetting -+ * the counter. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ enum dpni_counter counter, -+ uint64_t value); -+ -+/** -+ * Enable auto-negotiation -+ */ -+#define DPNI_LINK_OPT_AUTONEG 0x0000000000000001ULL -+/** -+ * Enable half-duplex mode -+ */ -+#define DPNI_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL -+/** -+ * Enable pause frames -+ */ -+#define DPNI_LINK_OPT_PAUSE 0x0000000000000004ULL -+/** -+ * Enable a-symmetric pause frames -+ */ -+#define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL -+ -+/** -+ * struct - Structure representing DPNI link configuration -+ * @rate: Rate -+ * @options: Mask of available options; use 'DPNI_LINK_OPT_' values -+ */ -+struct dpni_link_cfg { -+ uint32_t rate; -+ uint64_t options; -+}; -+ -+/** -+ * dpni_set_link_cfg() - set the link configuration. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @cfg: Link configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_link_cfg(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_link_cfg *cfg); -+ -+/** -+ * struct dpni_link_state - Structure representing DPNI link state -+ * @rate: Rate -+ * @options: Mask of available options; use 'DPNI_LINK_OPT_' values -+ * @up: Link state; '0' for down, '1' for up -+ */ -+struct dpni_link_state { -+ uint32_t rate; -+ uint64_t options; -+ int up; -+}; -+ -+/** -+ * dpni_get_link_state() - Return the link state (either up or down) -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @state: Returned link state; -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_link_state(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_link_state *state); -+ -+/** -+ * struct dpni_tx_shaping - Structure representing DPNI tx shaping configuration -+ * @rate_limit: rate in Mbps -+ * @max_burst_size: burst size in bytes (up to 64KB) -+ */ -+struct dpni_tx_shaping_cfg { -+ uint32_t rate_limit; -+ uint16_t max_burst_size; -+}; -+ -+/** -+ * dpni_set_tx_shaping() - Set the transmit shaping -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tx_shaper: tx shaping configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_tx_shaping(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_tx_shaping_cfg *tx_shaper); -+ -+/** -+ * dpni_set_max_frame_length() - Set the maximum received frame length. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @max_frame_length: Maximum received frame length (in -+ * bytes); frame is discarded if its -+ * length exceeds this value -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_max_frame_length(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t max_frame_length); -+ -+/** -+ * dpni_get_max_frame_length() - Get the maximum received frame length. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @max_frame_length: Maximum received frame length (in -+ * bytes); frame is discarded if its -+ * length exceeds this value -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_max_frame_length(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *max_frame_length); -+ -+/** -+ * dpni_set_mtu() - Set the MTU for the interface. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @mtu: MTU length (in bytes) -+ * -+ * MTU determines the maximum fragment size for performing IP -+ * fragmentation on egress packets. -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_mtu(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t mtu); -+ -+/** -+ * dpni_get_mtu() - Get the MTU. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @mtu: Returned MTU length (in bytes) -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_mtu(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *mtu); -+ -+/** -+ * dpni_set_multicast_promisc() - Enable/disable multicast promiscuous mode -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Set to '1' to enable; '0' to disable -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en); -+ -+/** -+ * dpni_get_multicast_promisc() - Get multicast promiscuous mode -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Returns '1' if enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpni_set_unicast_promisc() - Enable/disable unicast promiscuous mode -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Set to '1' to enable; '0' to disable -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en); -+ -+/** -+ * dpni_get_unicast_promisc() - Get unicast promiscuous mode -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Returns '1' if enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpni_set_primary_mac_addr() - Set the primary MAC address -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @mac_addr: MAC address to set as primary address -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const uint8_t mac_addr[6]); -+ -+/** -+ * dpni_get_primary_mac_addr() - Get the primary MAC address -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @mac_addr: Returned MAC address -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t mac_addr[6]); -+ -+/** -+ * dpni_add_mac_addr() - Add MAC address filter -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @mac_addr: MAC address to add -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_add_mac_addr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const uint8_t mac_addr[6]); -+ -+/** -+ * dpni_remove_mac_addr() - Remove MAC address filter -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @mac_addr: MAC address to remove -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_remove_mac_addr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const uint8_t mac_addr[6]); -+ -+/** -+ * dpni_clear_mac_filters() - Clear all unicast and/or multicast MAC filters -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @unicast: Set to '1' to clear unicast addresses -+ * @multicast: Set to '1' to clear multicast addresses -+ * -+ * The primary MAC address is not cleared by this operation. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_clear_mac_filters(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int unicast, -+ int multicast); -+ -+/** -+ * dpni_set_vlan_filters() - Enable/disable VLAN filtering mode -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Set to '1' to enable; '0' to disable -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_vlan_filters(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en); -+ -+/** -+ * dpni_add_vlan_id() - Add VLAN ID filter -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @vlan_id: VLAN ID to add -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_add_vlan_id(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id); -+ -+/** -+ * dpni_remove_vlan_id() - Remove VLAN ID filter -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @vlan_id: VLAN ID to remove -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_remove_vlan_id(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id); -+ -+/** -+ * dpni_clear_vlan_filters() - Clear all VLAN filters -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_clear_vlan_filters(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * enum dpni_tx_schedule_mode - DPNI Tx scheduling mode -+ * @DPNI_TX_SCHED_STRICT_PRIORITY: strict priority -+ * @DPNI_TX_SCHED_WEIGHTED: weighted based scheduling -+ */ -+enum dpni_tx_schedule_mode { -+ DPNI_TX_SCHED_STRICT_PRIORITY, -+ DPNI_TX_SCHED_WEIGHTED, -+}; -+ -+/** -+ * struct dpni_tx_schedule_cfg - Structure representing Tx -+ * scheduling configuration -+ * @mode: scheduling mode -+ * @delta_bandwidth: Bandwidth represented in weights from 100 to 10000; -+ * not applicable for 'strict-priority' mode; -+ */ -+struct dpni_tx_schedule_cfg { -+ enum dpni_tx_schedule_mode mode; -+ uint16_t delta_bandwidth; -+}; -+ -+/** -+ * struct dpni_tx_selection_cfg - Structure representing transmission -+ * selection configuration -+ * @tc_sched: an array of traffic-classes -+ */ -+struct dpni_tx_selection_cfg { -+ struct dpni_tx_schedule_cfg tc_sched[DPNI_MAX_TC]; -+}; -+ -+/** -+ * dpni_set_tx_selection() - Set transmission selection configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @cfg: transmission selection configuration -+ * -+ * warning: Allowed only when DPNI is disabled -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_tx_selection(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_tx_selection_cfg *cfg); -+ -+/** -+ * enum dpni_dist_mode - DPNI distribution mode -+ * @DPNI_DIST_MODE_NONE: No distribution -+ * @DPNI_DIST_MODE_HASH: Use hash distribution; only relevant if -+ * the 'DPNI_OPT_DIST_HASH' option was set at DPNI creation -+ * @DPNI_DIST_MODE_FS: Use explicit flow steering; only relevant if -+ * the 'DPNI_OPT_DIST_FS' option was set at DPNI creation -+ */ -+enum dpni_dist_mode { -+ DPNI_DIST_MODE_NONE = 0, -+ DPNI_DIST_MODE_HASH = 1, -+ DPNI_DIST_MODE_FS = 2 -+}; -+ -+/** -+ * enum dpni_fs_miss_action - DPNI Flow Steering miss action -+ * @DPNI_FS_MISS_DROP: In case of no-match, drop the frame -+ * @DPNI_FS_MISS_EXPLICIT_FLOWID: In case of no-match, use explicit flow-id -+ * @DPNI_FS_MISS_HASH: In case of no-match, distribute using hash -+ */ -+enum dpni_fs_miss_action { -+ DPNI_FS_MISS_DROP = 0, -+ DPNI_FS_MISS_EXPLICIT_FLOWID = 1, -+ DPNI_FS_MISS_HASH = 2 -+}; -+ -+/** -+ * struct dpni_fs_tbl_cfg - Flow Steering table configuration -+ * @miss_action: Miss action selection -+ * @default_flow_id: Used when 'miss_action = DPNI_FS_MISS_EXPLICIT_FLOWID' -+ */ -+struct dpni_fs_tbl_cfg { -+ enum dpni_fs_miss_action miss_action; -+ uint16_t default_flow_id; -+}; -+ -+/** -+ * dpni_prepare_key_cfg() - function prepare extract parameters -+ * @cfg: defining a full Key Generation profile (rule) -+ * @key_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA -+ * -+ * This function has to be called before the following functions: -+ * - dpni_set_rx_tc_dist() -+ * - dpni_set_qos_table() -+ */ -+int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, -+ uint8_t *key_cfg_buf); -+ -+/** -+ * struct dpni_rx_tc_dist_cfg - Rx traffic class distribution configuration -+ * @dist_size: Set the distribution size; -+ * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96, -+ * 112,128,192,224,256,384,448,512,768,896,1024 -+ * @dist_mode: Distribution mode -+ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with -+ * the extractions to be used for the distribution key by calling -+ * dpni_prepare_key_cfg() relevant only when -+ * 'dist_mode != DPNI_DIST_MODE_NONE', otherwise it can be '0' -+ * @fs_cfg: Flow Steering table configuration; only relevant if -+ * 'dist_mode = DPNI_DIST_MODE_FS' -+ */ -+struct dpni_rx_tc_dist_cfg { -+ uint16_t dist_size; -+ enum dpni_dist_mode dist_mode; -+ uint64_t key_cfg_iova; -+ struct dpni_fs_tbl_cfg fs_cfg; -+}; -+ -+/** -+ * dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @cfg: Traffic class distribution configuration -+ * -+ * warning: if 'dist_mode != DPNI_DIST_MODE_NONE', call dpni_prepare_key_cfg() -+ * first to prepare the key_cfg_iova parameter -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_rx_tc_dist_cfg *cfg); -+ -+/** -+ * Set to select color aware mode (otherwise - color blind) -+ */ -+#define DPNI_POLICER_OPT_COLOR_AWARE 0x00000001 -+/** -+ * Set to discard frame with RED color -+ */ -+#define DPNI_POLICER_OPT_DISCARD_RED 0x00000002 -+ -+/** -+ * enum dpni_policer_mode - selecting the policer mode -+ * @DPNI_POLICER_MODE_NONE: Policer is disabled -+ * @DPNI_POLICER_MODE_PASS_THROUGH: Policer pass through -+ * @DPNI_POLICER_MODE_RFC_2698: Policer algorithm RFC 2698 -+ * @DPNI_POLICER_MODE_RFC_4115: Policer algorithm RFC 4115 -+ */ -+enum dpni_policer_mode { -+ DPNI_POLICER_MODE_NONE = 0, -+ DPNI_POLICER_MODE_PASS_THROUGH, -+ DPNI_POLICER_MODE_RFC_2698, -+ DPNI_POLICER_MODE_RFC_4115 -+}; -+ -+/** -+ * enum dpni_policer_unit - DPNI policer units -+ * @DPNI_POLICER_UNIT_BYTES: bytes units -+ * @DPNI_POLICER_UNIT_FRAMES: frames units -+ */ -+enum dpni_policer_unit { -+ DPNI_POLICER_UNIT_BYTES = 0, -+ DPNI_POLICER_UNIT_FRAMES -+}; -+ -+/** -+ * enum dpni_policer_color - selecting the policer color -+ * @DPNI_POLICER_COLOR_GREEN: Green color -+ * @DPNI_POLICER_COLOR_YELLOW: Yellow color -+ * @DPNI_POLICER_COLOR_RED: Red color -+ */ -+enum dpni_policer_color { -+ DPNI_POLICER_COLOR_GREEN = 0, -+ DPNI_POLICER_COLOR_YELLOW, -+ DPNI_POLICER_COLOR_RED -+}; -+ -+/** -+ * struct dpni_rx_tc_policing_cfg - Policer configuration -+ * @options: Mask of available options; use 'DPNI_POLICER_OPT_' values -+ * @mode: policer mode -+ * @default_color: For pass-through mode the policer re-colors with this -+ * color any incoming packets. For Color aware non-pass-through mode: -+ * policer re-colors with this color all packets with FD[DROPP]>2. -+ * @units: Bytes or Packets -+ * @cir: Committed information rate (CIR) in Kbps or packets/second -+ * @cbs: Committed burst size (CBS) in bytes or packets -+ * @eir: Peak information rate (PIR, rfc2698) in Kbps or packets/second -+ * Excess information rate (EIR, rfc4115) in Kbps or packets/second -+ * @ebs: Peak burst size (PBS, rfc2698) in bytes or packets -+ * Excess burst size (EBS, rfc4115) in bytes or packets -+ */ -+struct dpni_rx_tc_policing_cfg { -+ uint32_t options; -+ enum dpni_policer_mode mode; -+ enum dpni_policer_unit units; -+ enum dpni_policer_color default_color; -+ uint32_t cir; -+ uint32_t cbs; -+ uint32_t eir; -+ uint32_t ebs; -+}; -+ -+/** -+ * dpni_set_rx_tc_policing() - Set Rx traffic class policing configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @cfg: Traffic class policing configuration -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_set_rx_tc_policing(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_rx_tc_policing_cfg *cfg); -+ -+/** -+ * dpni_get_rx_tc_policing() - Get Rx traffic class policing configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @cfg: Traffic class policing configuration -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_get_rx_tc_policing(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ struct dpni_rx_tc_policing_cfg *cfg); -+ -+/** -+ * enum dpni_congestion_unit - DPNI congestion units -+ * @DPNI_CONGESTION_UNIT_BYTES: bytes units -+ * @DPNI_CONGESTION_UNIT_FRAMES: frames units -+ */ -+enum dpni_congestion_unit { -+ DPNI_CONGESTION_UNIT_BYTES = 0, -+ DPNI_CONGESTION_UNIT_FRAMES -+}; -+ -+/** -+ * enum dpni_early_drop_mode - DPNI early drop mode -+ * @DPNI_EARLY_DROP_MODE_NONE: early drop is disabled -+ * @DPNI_EARLY_DROP_MODE_TAIL: early drop in taildrop mode -+ * @DPNI_EARLY_DROP_MODE_WRED: early drop in WRED mode -+ */ -+enum dpni_early_drop_mode { -+ DPNI_EARLY_DROP_MODE_NONE = 0, -+ DPNI_EARLY_DROP_MODE_TAIL, -+ DPNI_EARLY_DROP_MODE_WRED -+}; -+ -+/** -+ * struct dpni_wred_cfg - WRED configuration -+ * @max_threshold: maximum threshold that packets may be discarded. Above this -+ * threshold all packets are discarded; must be less than 2^39; -+ * approximated to be expressed as (x+256)*2^(y-1) due to HW -+ * implementation. -+ * @min_threshold: minimum threshold that packets may be discarded at -+ * @drop_probability: probability that a packet will be discarded (1-100, -+ * associated with the max_threshold). -+ */ -+struct dpni_wred_cfg { -+ uint64_t max_threshold; -+ uint64_t min_threshold; -+ uint8_t drop_probability; -+}; -+ -+/** -+ * struct dpni_early_drop_cfg - early-drop configuration -+ * @mode: drop mode -+ * @units: units type -+ * @green: WRED - 'green' configuration -+ * @yellow: WRED - 'yellow' configuration -+ * @red: WRED - 'red' configuration -+ * @tail_drop_threshold: tail drop threshold -+ */ -+struct dpni_early_drop_cfg { -+ enum dpni_early_drop_mode mode; -+ enum dpni_congestion_unit units; -+ -+ struct dpni_wred_cfg green; -+ struct dpni_wred_cfg yellow; -+ struct dpni_wred_cfg red; -+ -+ uint32_t tail_drop_threshold; -+}; -+ -+/** -+ * dpni_prepare_early_drop() - prepare an early drop. -+ * @cfg: Early-drop configuration -+ * @early_drop_buf: Zeroed 256 bytes of memory before mapping it to DMA -+ * -+ * This function has to be called before dpni_set_rx_tc_early_drop or -+ * dpni_set_tx_tc_early_drop -+ * -+ */ -+void dpni_prepare_early_drop(const struct dpni_early_drop_cfg *cfg, -+ uint8_t *early_drop_buf); -+ -+/** -+ * dpni_extract_early_drop() - extract the early drop configuration. -+ * @cfg: Early-drop configuration -+ * @early_drop_buf: Zeroed 256 bytes of memory before mapping it to DMA -+ * -+ * This function has to be called after dpni_get_rx_tc_early_drop or -+ * dpni_get_tx_tc_early_drop -+ * -+ */ -+void dpni_extract_early_drop(struct dpni_early_drop_cfg *cfg, -+ const uint8_t *early_drop_buf); -+ -+/** -+ * dpni_set_rx_tc_early_drop() - Set Rx traffic class early-drop configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory filled -+ * with the early-drop configuration by calling dpni_prepare_early_drop() -+ * -+ * warning: Before calling this function, call dpni_prepare_early_drop() to -+ * prepare the early_drop_iova parameter -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_set_rx_tc_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint64_t early_drop_iova); -+ -+/** -+ * dpni_get_rx_tc_early_drop() - Get Rx traffic class early-drop configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory -+ * -+ * warning: After calling this function, call dpni_extract_early_drop() to -+ * get the early drop configuration -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_get_rx_tc_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint64_t early_drop_iova); -+ -+/** -+ * dpni_set_tx_tc_early_drop() - Set Tx traffic class early-drop configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory filled -+ * with the early-drop configuration by calling dpni_prepare_early_drop() -+ * -+ * warning: Before calling this function, call dpni_prepare_early_drop() to -+ * prepare the early_drop_iova parameter -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_set_tx_tc_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint64_t early_drop_iova); -+ -+/** -+ * dpni_get_tx_tc_early_drop() - Get Tx traffic class early-drop configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory -+ * -+ * warning: After calling this function, call dpni_extract_early_drop() to -+ * get the early drop configuration -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_get_tx_tc_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint64_t early_drop_iova); -+ -+/** -+ * enum dpni_dest - DPNI destination types -+ * @DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and -+ * does not generate FQDAN notifications; user is expected to -+ * dequeue from the queue based on polling or other user-defined -+ * method -+ * @DPNI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN -+ * notifications to the specified DPIO; user is expected to dequeue -+ * from the queue only after notification is received -+ * @DPNI_DEST_DPCON: The queue is set in schedule mode and does not generate -+ * FQDAN notifications, but is connected to the specified DPCON -+ * object; user is expected to dequeue from the DPCON channel -+ */ -+enum dpni_dest { -+ DPNI_DEST_NONE = 0, -+ DPNI_DEST_DPIO = 1, -+ DPNI_DEST_DPCON = 2 -+}; -+ -+/** -+ * struct dpni_dest_cfg - Structure representing DPNI destination parameters -+ * @dest_type: Destination type -+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type -+ * @priority: Priority selection within the DPIO or DPCON channel; valid values -+ * are 0-1 or 0-7, depending on the number of priorities in that -+ * channel; not relevant for 'DPNI_DEST_NONE' option -+ */ -+struct dpni_dest_cfg { -+ enum dpni_dest dest_type; -+ int dest_id; -+ uint8_t priority; -+}; -+ -+/* DPNI congestion options */ -+ -+/** -+ * CSCN message is written to message_iova once entering a -+ * congestion state (see 'threshold_entry') -+ */ -+#define DPNI_CONG_OPT_WRITE_MEM_ON_ENTER 0x00000001 -+/** -+ * CSCN message is written to message_iova once exiting a -+ * congestion state (see 'threshold_exit') -+ */ -+#define DPNI_CONG_OPT_WRITE_MEM_ON_EXIT 0x00000002 -+/** -+ * CSCN write will attempt to allocate into a cache (coherent write); -+ * valid only if 'DPNI_CONG_OPT_WRITE_MEM_' is selected -+ */ -+#define DPNI_CONG_OPT_COHERENT_WRITE 0x00000004 -+/** -+ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to -+ * DPIO/DPCON's WQ channel once entering a congestion state -+ * (see 'threshold_entry') -+ */ -+#define DPNI_CONG_OPT_NOTIFY_DEST_ON_ENTER 0x00000008 -+/** -+ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to -+ * DPIO/DPCON's WQ channel once exiting a congestion state -+ * (see 'threshold_exit') -+ */ -+#define DPNI_CONG_OPT_NOTIFY_DEST_ON_EXIT 0x00000010 -+/** -+ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' when the CSCN is written to the -+ * sw-portal's DQRR, the DQRI interrupt is asserted immediately (if enabled) -+ */ -+#define DPNI_CONG_OPT_INTR_COALESCING_DISABLED 0x00000020 -+ -+/** -+ * struct dpni_congestion_notification_cfg - congestion notification -+ * configuration -+ * @units: units type -+ * @threshold_entry: above this threshold we enter a congestion state. -+ * set it to '0' to disable it -+ * @threshold_exit: below this threshold we exit the congestion state. -+ * @message_ctx: The context that will be part of the CSCN message -+ * @message_iova: I/O virtual address (must be in DMA-able memory), -+ * must be 16B aligned; valid only if 'DPNI_CONG_OPT_WRITE_MEM_' is -+ * contained in 'options' -+ * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel -+ * @options: Mask of available options; use 'DPNI_CONG_OPT_' values -+ */ -+ -+struct dpni_congestion_notification_cfg { -+ enum dpni_congestion_unit units; -+ uint32_t threshold_entry; -+ uint32_t threshold_exit; -+ uint64_t message_ctx; -+ uint64_t message_iova; -+ struct dpni_dest_cfg dest_cfg; -+ uint16_t options; -+}; -+ -+/** -+ * dpni_set_rx_tc_congestion_notification() - Set Rx traffic class congestion -+ * notification configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @cfg: congestion notification configuration -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_set_rx_tc_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_congestion_notification_cfg *cfg); -+ -+/** -+ * dpni_get_rx_tc_congestion_notification() - Get Rx traffic class congestion -+ * notification configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @cfg: congestion notification configuration -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_get_rx_tc_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ struct dpni_congestion_notification_cfg *cfg); -+ -+/** -+ * dpni_set_tx_tc_congestion_notification() - Set Tx traffic class congestion -+ * notification configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @cfg: congestion notification configuration -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_set_tx_tc_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_congestion_notification_cfg *cfg); -+ -+/** -+ * dpni_get_tx_tc_congestion_notification() - Get Tx traffic class congestion -+ * notification configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @cfg: congestion notification configuration -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_get_tx_tc_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ struct dpni_congestion_notification_cfg *cfg); -+ -+/** -+ * enum dpni_flc_type - DPNI FLC types -+ * @DPNI_FLC_USER_DEFINED: select the FLC to be used for user defined value -+ * @DPNI_FLC_STASH: select the FLC to be used for stash control -+ */ -+enum dpni_flc_type { -+ DPNI_FLC_USER_DEFINED = 0, -+ DPNI_FLC_STASH = 1, -+}; -+ -+/** -+ * enum dpni_stash_size - DPNI FLC stashing size -+ * @DPNI_STASH_SIZE_0B: no stash -+ * @DPNI_STASH_SIZE_64B: stashes 64 bytes -+ * @DPNI_STASH_SIZE_128B: stashes 128 bytes -+ * @DPNI_STASH_SIZE_192B: stashes 192 bytes -+ */ -+enum dpni_stash_size { -+ DPNI_STASH_SIZE_0B = 0, -+ DPNI_STASH_SIZE_64B = 1, -+ DPNI_STASH_SIZE_128B = 2, -+ DPNI_STASH_SIZE_192B = 3, -+}; -+ -+/* DPNI FLC stash options */ -+ -+/** -+ * stashes the whole annotation area (up to 192 bytes) -+ */ -+#define DPNI_FLC_STASH_FRAME_ANNOTATION 0x00000001 -+ -+/** -+ * struct dpni_flc_cfg - Structure representing DPNI FLC configuration -+ * @flc_type: FLC type -+ * @options: Mask of available options; -+ * use 'DPNI_FLC_STASH_' values -+ * @frame_data_size: Size of frame data to be stashed -+ * @flow_context_size: Size of flow context to be stashed -+ * @flow_context: 1. In case flc_type is 'DPNI_FLC_USER_DEFINED': -+ * this value will be provided in the frame descriptor -+ * (FD[FLC]) -+ * 2. In case flc_type is 'DPNI_FLC_STASH': -+ * this value will be I/O virtual address of the -+ * flow-context; -+ * Must be cacheline-aligned and DMA-able memory -+ */ -+struct dpni_flc_cfg { -+ enum dpni_flc_type flc_type; -+ uint32_t options; -+ enum dpni_stash_size frame_data_size; -+ enum dpni_stash_size flow_context_size; -+ uint64_t flow_context; -+}; -+ -+/** -+ * DPNI queue modification options -+ */ -+ -+/** -+ * Select to modify the user's context associated with the queue -+ */ -+#define DPNI_QUEUE_OPT_USER_CTX 0x00000001 -+/** -+ * Select to modify the queue's destination -+ */ -+#define DPNI_QUEUE_OPT_DEST 0x00000002 -+/** Select to modify the flow-context parameters; -+ * not applicable for Tx-conf/Err queues as the FD comes from the user -+ */ -+#define DPNI_QUEUE_OPT_FLC 0x00000004 -+/** -+ * Select to modify the queue's order preservation -+ */ -+#define DPNI_QUEUE_OPT_ORDER_PRESERVATION 0x00000008 -+/* Select to modify the queue's tail-drop threshold */ -+#define DPNI_QUEUE_OPT_TAILDROP_THRESHOLD 0x00000010 -+ -+/** -+ * struct dpni_queue_cfg - Structure representing queue configuration -+ * @options: Flags representing the suggested modifications to the queue; -+ * Use any combination of 'DPNI_QUEUE_OPT_' flags -+ * @user_ctx: User context value provided in the frame descriptor of each -+ * dequeued frame; valid only if 'DPNI_QUEUE_OPT_USER_CTX' -+ * is contained in 'options' -+ * @dest_cfg: Queue destination parameters; -+ * valid only if 'DPNI_QUEUE_OPT_DEST' is contained in 'options' -+ * @flc_cfg: Flow context configuration; in case the TC's distribution -+ * is either NONE or HASH the FLC's settings of flow#0 are used. -+ * in the case of FS (flow-steering) the flow's FLC settings -+ * are used. -+ * valid only if 'DPNI_QUEUE_OPT_FLC' is contained in 'options' -+ * @order_preservation_en: enable/disable order preservation; -+ * valid only if 'DPNI_QUEUE_OPT_ORDER_PRESERVATION' is contained -+ * in 'options' -+ * @tail_drop_threshold: set the queue's tail drop threshold in bytes; -+ * '0' value disable the threshold; maximum value is 0xE000000; -+ * valid only if 'DPNI_QUEUE_OPT_TAILDROP_THRESHOLD' is contained -+ * in 'options' -+ */ -+struct dpni_queue_cfg { -+ uint32_t options; -+ uint64_t user_ctx; -+ struct dpni_dest_cfg dest_cfg; -+ struct dpni_flc_cfg flc_cfg; -+ int order_preservation_en; -+ uint32_t tail_drop_threshold; -+}; -+ -+/** -+ * struct dpni_queue_attr - Structure representing queue attributes -+ * @user_ctx: User context value provided in the frame descriptor of each -+ * dequeued frame -+ * @dest_cfg: Queue destination configuration -+ * @flc_cfg: Flow context configuration -+ * @order_preservation_en: enable/disable order preservation -+ * @tail_drop_threshold: queue's tail drop threshold in bytes; -+ * @fqid: Virtual fqid value to be used for dequeue operations -+ */ -+struct dpni_queue_attr { -+ uint64_t user_ctx; -+ struct dpni_dest_cfg dest_cfg; -+ struct dpni_flc_cfg flc_cfg; -+ int order_preservation_en; -+ uint32_t tail_drop_threshold; -+ -+ uint32_t fqid; -+}; -+ -+/** -+ * DPNI Tx flow modification options -+ */ -+ -+/** -+ * Select to modify the settings for dedicate Tx confirmation/error -+ */ -+#define DPNI_TX_FLOW_OPT_TX_CONF_ERROR 0x00000001 -+/** -+ * Select to modify the L3 checksum generation setting -+ */ -+#define DPNI_TX_FLOW_OPT_L3_CHKSUM_GEN 0x00000010 -+/** -+ * Select to modify the L4 checksum generation setting -+ */ -+#define DPNI_TX_FLOW_OPT_L4_CHKSUM_GEN 0x00000020 -+ -+/** -+ * struct dpni_tx_flow_cfg - Structure representing Tx flow configuration -+ * @options: Flags representing the suggested modifications to the Tx flow; -+ * Use any combination 'DPNI_TX_FLOW_OPT_' flags -+ * @use_common_tx_conf_queue: Set to '1' to use the common (default) Tx -+ * confirmation and error queue; Set to '0' to use the private -+ * Tx confirmation and error queue; valid only if -+ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' wasn't set at DPNI creation -+ * and 'DPNI_TX_FLOW_OPT_TX_CONF_ERROR' is contained in 'options' -+ * @l3_chksum_gen: Set to '1' to enable L3 checksum generation; '0' to disable; -+ * valid only if 'DPNI_TX_FLOW_OPT_L3_CHKSUM_GEN' is contained in 'options' -+ * @l4_chksum_gen: Set to '1' to enable L4 checksum generation; '0' to disable; -+ * valid only if 'DPNI_TX_FLOW_OPT_L4_CHKSUM_GEN' is contained in 'options' -+ */ -+struct dpni_tx_flow_cfg { -+ uint32_t options; -+ int use_common_tx_conf_queue; -+ int l3_chksum_gen; -+ int l4_chksum_gen; -+}; -+ -+/** -+ * dpni_set_tx_flow() - Set Tx flow configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @flow_id: Provides (or returns) the sender's flow ID; -+ * for each new sender set (*flow_id) to 'DPNI_NEW_FLOW_ID' to generate -+ * a new flow_id; this ID should be used as the QDBIN argument -+ * in enqueue operations -+ * @cfg: Tx flow configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_tx_flow(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *flow_id, -+ const struct dpni_tx_flow_cfg *cfg); -+ -+/** -+ * struct dpni_tx_flow_attr - Structure representing Tx flow attributes -+ * @use_common_tx_conf_queue: '1' if using common (default) Tx confirmation and -+ * error queue; '0' if using private Tx confirmation and error queue -+ * @l3_chksum_gen: '1' if L3 checksum generation is enabled; '0' if disabled -+ * @l4_chksum_gen: '1' if L4 checksum generation is enabled; '0' if disabled -+ */ -+struct dpni_tx_flow_attr { -+ int use_common_tx_conf_queue; -+ int l3_chksum_gen; -+ int l4_chksum_gen; -+}; -+ -+/** -+ * dpni_get_tx_flow() - Get Tx flow attributes -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @flow_id: The sender's flow ID, as returned by the -+ * dpni_set_tx_flow() function -+ * @attr: Returned Tx flow attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_tx_flow(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ struct dpni_tx_flow_attr *attr); -+ -+/** -+ * struct dpni_tx_conf_cfg - Structure representing Tx conf configuration -+ * @errors_only: Set to '1' to report back only error frames; -+ * Set to '0' to confirm transmission/error for all transmitted frames; -+ * @queue_cfg: Queue configuration -+ */ -+struct dpni_tx_conf_cfg { -+ int errors_only; -+ struct dpni_queue_cfg queue_cfg; -+}; -+ -+/** -+ * dpni_set_tx_conf() - Set Tx confirmation and error queue configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @flow_id: The sender's flow ID, as returned by the -+ * dpni_set_tx_flow() function; -+ * use 'DPNI_COMMON_TX_CONF' for common tx-conf -+ * @cfg: Queue configuration -+ * -+ * If either 'DPNI_OPT_TX_CONF_DISABLED' or -+ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' were selected at DPNI creation, -+ * this function can ONLY be used with 'flow_id == DPNI_COMMON_TX_CONF'; -+ * i.e. only serve the common tx-conf-err queue; -+ * if 'DPNI_OPT_TX_CONF_DISABLED' was selected, only error frames are reported -+ * back - successfully transmitted frames are not confirmed. Otherwise, all -+ * transmitted frames are sent for confirmation. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_tx_conf(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ const struct dpni_tx_conf_cfg *cfg); -+ -+/** -+ * struct dpni_tx_conf_attr - Structure representing Tx conf attributes -+ * @errors_only: '1' if only error frames are reported back; '0' if all -+ * transmitted frames are confirmed -+ * @queue_attr: Queue attributes -+ */ -+struct dpni_tx_conf_attr { -+ int errors_only; -+ struct dpni_queue_attr queue_attr; -+}; -+ -+/** -+ * dpni_get_tx_conf() - Get Tx confirmation and error queue attributes -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @flow_id: The sender's flow ID, as returned by the -+ * dpni_set_tx_flow() function; -+ * use 'DPNI_COMMON_TX_CONF' for common tx-conf -+ * @attr: Returned tx-conf attributes -+ * -+ * If either 'DPNI_OPT_TX_CONF_DISABLED' or -+ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' were selected at DPNI creation, -+ * this function can ONLY be used with 'flow_id == DPNI_COMMON_TX_CONF'; -+ * i.e. only serve the common tx-conf-err queue; -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_tx_conf(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ struct dpni_tx_conf_attr *attr); -+ -+/** -+ * dpni_set_tx_conf_congestion_notification() - Set Tx conf congestion -+ * notification configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @flow_id: The sender's flow ID, as returned by the -+ * dpni_set_tx_flow() function; -+ * use 'DPNI_COMMON_TX_CONF' for common tx-conf -+ * @cfg: congestion notification configuration -+ * -+ * If either 'DPNI_OPT_TX_CONF_DISABLED' or -+ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' were selected at DPNI creation, -+ * this function can ONLY be used with 'flow_id == DPNI_COMMON_TX_CONF'; -+ * i.e. only serve the common tx-conf-err queue; -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_set_tx_conf_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ const struct dpni_congestion_notification_cfg *cfg); -+ -+/** -+ * dpni_get_tx_conf_congestion_notification() - Get Tx conf congestion -+ * notification configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @flow_id: The sender's flow ID, as returned by the -+ * dpni_set_tx_flow() function; -+ * use 'DPNI_COMMON_TX_CONF' for common tx-conf -+ * @cfg: congestion notification -+ * -+ * If either 'DPNI_OPT_TX_CONF_DISABLED' or -+ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' were selected at DPNI creation, -+ * this function can ONLY be used with 'flow_id == DPNI_COMMON_TX_CONF'; -+ * i.e. only serve the common tx-conf-err queue; -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_get_tx_conf_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ struct dpni_congestion_notification_cfg *cfg); -+ -+/** -+ * dpni_set_tx_conf_revoke() - Tx confirmation revocation -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @revoke: revoke or not -+ * -+ * This function is useful only when 'DPNI_OPT_TX_CONF_DISABLED' is not -+ * selected at DPNI creation. -+ * Calling this function with 'revoke' set to '1' disables all transmit -+ * confirmation (including the private confirmation queues), regardless of -+ * previous settings; Note that in this case, Tx error frames are still -+ * enqueued to the general transmit errors queue. -+ * Calling this function with 'revoke' set to '0' restores the previous -+ * settings for both general and private transmit confirmation. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_tx_conf_revoke(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int revoke); -+ -+/** -+ * dpni_set_rx_flow() - Set Rx flow configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7); -+ * use 'DPNI_ALL_TCS' to set all TCs and all flows -+ * @flow_id: Rx flow id within the traffic class; use -+ * 'DPNI_ALL_TC_FLOWS' to set all flows within -+ * this tc_id; ignored if tc_id is set to -+ * 'DPNI_ALL_TCS'; -+ * @cfg: Rx flow configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_rx_flow(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint16_t flow_id, -+ const struct dpni_queue_cfg *cfg); -+ -+/** -+ * dpni_get_rx_flow() - Get Rx flow attributes -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @flow_id: Rx flow id within the traffic class -+ * @attr: Returned Rx flow attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_rx_flow(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint16_t flow_id, -+ struct dpni_queue_attr *attr); -+ -+/** -+ * dpni_set_rx_err_queue() - Set Rx error queue configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @cfg: Queue configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_rx_err_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_queue_cfg *cfg); -+ -+/** -+ * dpni_get_rx_err_queue() - Get Rx error queue attributes -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @attr: Returned Queue attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_rx_err_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_queue_attr *attr); -+ -+/** -+ * struct dpni_qos_tbl_cfg - Structure representing QOS table configuration -+ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with -+ * key extractions to be used as the QoS criteria by calling -+ * dpni_prepare_key_cfg() -+ * @discard_on_miss: Set to '1' to discard frames in case of no match (miss); -+ * '0' to use the 'default_tc' in such cases -+ * @default_tc: Used in case of no-match and 'discard_on_miss'= 0 -+ */ -+struct dpni_qos_tbl_cfg { -+ uint64_t key_cfg_iova; -+ int discard_on_miss; -+ uint8_t default_tc; -+}; -+ -+/** -+ * dpni_set_qos_table() - Set QoS mapping table -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @cfg: QoS table configuration -+ * -+ * This function and all QoS-related functions require that -+ *'max_tcs > 1' was set at DPNI creation. -+ * -+ * warning: Before calling this function, call dpni_prepare_key_cfg() to -+ * prepare the key_cfg_iova parameter -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_qos_table(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_qos_tbl_cfg *cfg); -+ -+/** -+ * struct dpni_rule_cfg - Rule configuration for table lookup -+ * @key_iova: I/O virtual address of the key (must be in DMA-able memory) -+ * @mask_iova: I/O virtual address of the mask (must be in DMA-able memory) -+ * @key_size: key and mask size (in bytes) -+ */ -+struct dpni_rule_cfg { -+ uint64_t key_iova; -+ uint64_t mask_iova; -+ uint8_t key_size; -+}; -+ -+/** -+ * dpni_add_qos_entry() - Add QoS mapping entry (to select a traffic class) -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @cfg: QoS rule to add -+ * @tc_id: Traffic class selection (0-7) -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_add_qos_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_rule_cfg *cfg, -+ uint8_t tc_id); -+ -+/** -+ * dpni_remove_qos_entry() - Remove QoS mapping entry -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @cfg: QoS rule to remove -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_remove_qos_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_rule_cfg *cfg); -+ -+/** -+ * dpni_clear_qos_table() - Clear all QoS mapping entries -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * -+ * Following this function call, all frames are directed to -+ * the default traffic class (0) -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_clear_qos_table(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class -+ * (to select a flow ID) -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @cfg: Flow steering rule to add -+ * @flow_id: Flow id selection (must be smaller than the -+ * distribution size of the traffic class) -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_add_fs_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_rule_cfg *cfg, -+ uint16_t flow_id); -+ -+/** -+ * dpni_remove_fs_entry() - Remove Flow Steering entry from a specific -+ * traffic class -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @cfg: Flow steering rule to remove -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_remove_fs_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_rule_cfg *cfg); -+ -+/** -+ * dpni_clear_fs_entries() - Clear all Flow Steering entries of a specific -+ * traffic class -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_clear_fs_entries(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id); -+ -+/** -+ * dpni_set_vlan_insertion() - Enable/disable VLAN insertion for egress frames -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Set to '1' to enable; '0' to disable -+ * -+ * Requires that the 'DPNI_OPT_VLAN_MANIPULATION' option is set -+ * at DPNI creation. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_vlan_insertion(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en); -+ -+/** -+ * dpni_set_vlan_removal() - Enable/disable VLAN removal for ingress frames -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Set to '1' to enable; '0' to disable -+ * -+ * Requires that the 'DPNI_OPT_VLAN_MANIPULATION' option is set -+ * at DPNI creation. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_vlan_removal(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en); -+ -+/** -+ * dpni_set_ipr() - Enable/disable IP reassembly of ingress frames -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Set to '1' to enable; '0' to disable -+ * -+ * Requires that the 'DPNI_OPT_IPR' option is set at DPNI creation. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_ipr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en); -+ -+/** -+ * dpni_set_ipf() - Enable/disable IP fragmentation of egress frames -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Set to '1' to enable; '0' to disable -+ * -+ * Requires that the 'DPNI_OPT_IPF' option is set at DPNI -+ * creation. Fragmentation is performed according to MTU value -+ * set by dpni_set_mtu() function -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_ipf(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en); -+ -+#endif /* __FSL_DPNI_H */ -diff --git a/drivers/staging/fsl-dpaa2/mac/Kconfig b/drivers/staging/fsl-dpaa2/mac/Kconfig -new file mode 100644 -index 0000000..174a9cd ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/mac/Kconfig -@@ -0,0 +1,24 @@ -+config FSL_DPAA2_MAC -+ tristate "DPAA2 MAC / PHY interface" -+ depends on FSL_MC_BUS && FSL_DPAA2 -+ select MDIO_BUS_MUX_MMIOREG -+ select FSL_XGMAC_MDIO -+ select FIXED_PHY -+ ---help--- -+ Prototype driver for DPAA2 MAC / PHY interface object. -+ This driver works as a proxy between phylib including phy drivers and -+ the MC firmware. It receives updates on link state changes from PHY -+ lib and forwards them to MC and receives interrupt from MC whenever -+ a request is made to change the link state. -+ -+ -+config FSL_DPAA2_MAC_NETDEVS -+ bool "Expose net interfaces for PHYs" -+ default n -+ depends on FSL_DPAA2_MAC -+ ---help--- -+ Exposes macX net interfaces which allow direct control over MACs and -+ PHYs. -+ . -+ Leave disabled if unsure. -+ -diff --git a/drivers/staging/fsl-dpaa2/mac/Makefile b/drivers/staging/fsl-dpaa2/mac/Makefile -new file mode 100644 -index 0000000..bda9410 ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/mac/Makefile -@@ -0,0 +1,10 @@ -+ -+obj-$(CONFIG_FSL_DPAA2_MAC) += dpaa2-mac.o -+ -+dpaa2-mac-objs := mac.o dpmac.o -+ -+all: -+ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules -+ -+clean: -+ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean -diff --git a/drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h b/drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h -new file mode 100644 -index 0000000..dc00590 ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h -@@ -0,0 +1,195 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPMAC_CMD_H -+#define _FSL_DPMAC_CMD_H -+ -+/* DPMAC Version */ -+#define DPMAC_VER_MAJOR 3 -+#define DPMAC_VER_MINOR 2 -+ -+/* Command IDs */ -+#define DPMAC_CMDID_CLOSE 0x800 -+#define DPMAC_CMDID_OPEN 0x80c -+#define DPMAC_CMDID_CREATE 0x90c -+#define DPMAC_CMDID_DESTROY 0x900 -+ -+#define DPMAC_CMDID_GET_ATTR 0x004 -+#define DPMAC_CMDID_RESET 0x005 -+ -+#define DPMAC_CMDID_SET_IRQ 0x010 -+#define DPMAC_CMDID_GET_IRQ 0x011 -+#define DPMAC_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPMAC_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPMAC_CMDID_SET_IRQ_MASK 0x014 -+#define DPMAC_CMDID_GET_IRQ_MASK 0x015 -+#define DPMAC_CMDID_GET_IRQ_STATUS 0x016 -+#define DPMAC_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPMAC_CMDID_MDIO_READ 0x0c0 -+#define DPMAC_CMDID_MDIO_WRITE 0x0c1 -+#define DPMAC_CMDID_GET_LINK_CFG 0x0c2 -+#define DPMAC_CMDID_SET_LINK_STATE 0x0c3 -+#define DPMAC_CMDID_GET_COUNTER 0x0c4 -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_CREATE(cmd, cfg) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->mac_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_OPEN(cmd, dpmac_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpmac_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_IRQ_ENABLE(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_ATTRIBUTES(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->phy_id);\ -+ MC_RSP_OP(cmd, 0, 32, 32, int, attr->id);\ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\ -+ MC_RSP_OP(cmd, 1, 32, 8, enum dpmac_link_type, attr->link_type);\ -+ MC_RSP_OP(cmd, 1, 40, 8, enum dpmac_eth_if, attr->eth_if);\ -+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->max_rate);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_MDIO_READ(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->phy_addr); \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->reg); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_MDIO_READ(cmd, data) \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, data) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_MDIO_WRITE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->phy_addr); \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->reg); \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->data); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_LINK_CFG(cmd, cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 64, uint64_t, cfg->options); \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->rate); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_SET_LINK_STATE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 64, uint64_t, cfg->options); \ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->rate); \ -+ MC_CMD_OP(cmd, 2, 0, 1, int, cfg->up); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_GET_COUNTER(cmd, type) \ -+ MC_CMD_OP(cmd, 0, 0, 8, enum dpmac_counter, type) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_COUNTER(cmd, counter) \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, counter) -+ -+#endif /* _FSL_DPMAC_CMD_H */ -diff --git a/drivers/staging/fsl-dpaa2/mac/dpmac.c b/drivers/staging/fsl-dpaa2/mac/dpmac.c -new file mode 100644 -index 0000000..fc23b40 ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/mac/dpmac.c -@@ -0,0 +1,422 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include "../../fsl-mc/include/mc-sys.h" -+#include "../../fsl-mc/include/mc-cmd.h" -+#include "dpmac.h" -+#include "dpmac-cmd.h" -+ -+int dpmac_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpmac_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ DPMAC_CMD_OPEN(cmd, dpmac_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return err; -+} -+ -+int dpmac_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLOSE, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmac_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpmac_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ DPMAC_CMD_CREATE(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpmac_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmac_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpmac_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ DPMAC_CMD_SET_IRQ(cmd, irq_index, irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmac_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpmac_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ DPMAC_CMD_GET_IRQ(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPMAC_RSP_GET_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dpmac_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPMAC_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmac_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPMAC_CMD_GET_IRQ_ENABLE(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPMAC_RSP_GET_IRQ_ENABLE(cmd, *en); -+ -+ return 0; -+} -+ -+int dpmac_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPMAC_CMD_SET_IRQ_MASK(cmd, irq_index, mask); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmac_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPMAC_CMD_GET_IRQ_MASK(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPMAC_RSP_GET_IRQ_MASK(cmd, *mask); -+ -+ return 0; -+} -+ -+int dpmac_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPMAC_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPMAC_RSP_GET_IRQ_STATUS(cmd, *status); -+ -+ return 0; -+} -+ -+int dpmac_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPMAC_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmac_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPMAC_RSP_GET_ATTRIBUTES(cmd, attr); -+ -+ return 0; -+} -+ -+int dpmac_mdio_read(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_mdio_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_MDIO_READ, -+ cmd_flags, -+ token); -+ DPMAC_CMD_MDIO_READ(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPMAC_RSP_MDIO_READ(cmd, cfg->data); -+ -+ return 0; -+} -+ -+int dpmac_mdio_write(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_mdio_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_MDIO_WRITE, -+ cmd_flags, -+ token); -+ DPMAC_CMD_MDIO_WRITE(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmac_get_link_cfg(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_link_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err = 0; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_LINK_CFG, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ DPMAC_RSP_GET_LINK_CFG(cmd, cfg); -+ -+ return 0; -+} -+ -+int dpmac_set_link_state(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_link_state *link_state) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_LINK_STATE, -+ cmd_flags, -+ token); -+ DPMAC_CMD_SET_LINK_STATE(cmd, link_state); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmac_get_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ enum dpmac_counter type, -+ uint64_t *counter) -+{ -+ struct mc_command cmd = { 0 }; -+ int err = 0; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_COUNTER, -+ cmd_flags, -+ token); -+ DPMAC_CMD_GET_COUNTER(cmd, type); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ DPMAC_RSP_GET_COUNTER(cmd, *counter); -+ -+ return 0; -+} -diff --git a/drivers/staging/fsl-dpaa2/mac/dpmac.h b/drivers/staging/fsl-dpaa2/mac/dpmac.h -new file mode 100644 -index 0000000..ad27772 ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/mac/dpmac.h -@@ -0,0 +1,593 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPMAC_H -+#define __FSL_DPMAC_H -+ -+/* Data Path MAC API -+ * Contains initialization APIs and runtime control APIs for DPMAC -+ */ -+ -+struct fsl_mc_io; -+ -+/** -+ * dpmac_open() - Open a control session for the specified object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dpmac_id: DPMAC unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpmac_create function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpmac_id, -+ uint16_t *token); -+ -+/** -+ * dpmac_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * enum dpmac_link_type - DPMAC link type -+ * @DPMAC_LINK_TYPE_NONE: No link -+ * @DPMAC_LINK_TYPE_FIXED: Link is fixed type -+ * @DPMAC_LINK_TYPE_PHY: Link by PHY ID -+ * @DPMAC_LINK_TYPE_BACKPLANE: Backplane link type -+ */ -+enum dpmac_link_type { -+ DPMAC_LINK_TYPE_NONE, -+ DPMAC_LINK_TYPE_FIXED, -+ DPMAC_LINK_TYPE_PHY, -+ DPMAC_LINK_TYPE_BACKPLANE -+}; -+ -+/** -+ * enum dpmac_eth_if - DPMAC Ethrnet interface -+ * @DPMAC_ETH_IF_MII: MII interface -+ * @DPMAC_ETH_IF_RMII: RMII interface -+ * @DPMAC_ETH_IF_SMII: SMII interface -+ * @DPMAC_ETH_IF_GMII: GMII interface -+ * @DPMAC_ETH_IF_RGMII: RGMII interface -+ * @DPMAC_ETH_IF_SGMII: SGMII interface -+ * @DPMAC_ETH_IF_QSGMII: QSGMII interface -+ * @DPMAC_ETH_IF_XAUI: XAUI interface -+ * @DPMAC_ETH_IF_XFI: XFI interface -+ */ -+enum dpmac_eth_if { -+ DPMAC_ETH_IF_MII, -+ DPMAC_ETH_IF_RMII, -+ DPMAC_ETH_IF_SMII, -+ DPMAC_ETH_IF_GMII, -+ DPMAC_ETH_IF_RGMII, -+ DPMAC_ETH_IF_SGMII, -+ DPMAC_ETH_IF_QSGMII, -+ DPMAC_ETH_IF_XAUI, -+ DPMAC_ETH_IF_XFI -+}; -+ -+/** -+ * struct dpmac_cfg - Structure representing DPMAC configuration -+ * @mac_id: Represents the Hardware MAC ID; in case of multiple WRIOP, -+ * the MAC IDs are continuous. -+ * For example: 2 WRIOPs, 16 MACs in each: -+ * MAC IDs for the 1st WRIOP: 1-16, -+ * MAC IDs for the 2nd WRIOP: 17-32. -+ */ -+struct dpmac_cfg { -+ int mac_id; -+}; -+ -+/** -+ * dpmac_create() - Create the DPMAC object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPMAC object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpmac_open function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpmac_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpmac_destroy() - Destroy the DPMAC object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpmac_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * DPMAC IRQ Index and Events -+ */ -+ -+/** -+ * IRQ index -+ */ -+#define DPMAC_IRQ_INDEX 0 -+/** -+ * IRQ event - indicates a change in link state -+ */ -+#define DPMAC_IRQ_EVENT_LINK_CFG_REQ 0x00000001 -+/** -+ * IRQ event - Indicates that the link state changed -+ */ -+#define DPMAC_IRQ_EVENT_LINK_CHANGED 0x00000002 -+ -+/** -+ * struct dpmac_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dpmac_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dpmac_set_irq() - Set IRQ information for the DPMAC to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpmac_irq_cfg *irq_cfg); -+ -+/** -+ * dpmac_get_irq() - Get IRQ information from the DPMAC. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpmac_irq_cfg *irq_cfg); -+ -+/** -+ * dpmac_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpmac_get_irq_enable() - Get overall interrupt state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpmac_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @mask: Event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpmac_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpmac_get_irq_status() - Get the current status of any pending interrupts. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dpmac_clear_irq_status() - Clear a pending interrupt's status -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @status: Bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+ -+/** -+ * struct dpmac_attr - Structure representing DPMAC attributes -+ * @id: DPMAC object ID -+ * @phy_id: PHY ID -+ * @link_type: link type -+ * @eth_if: Ethernet interface -+ * @max_rate: Maximum supported rate - in Mbps -+ * @version: DPMAC version -+ */ -+struct dpmac_attr { -+ int id; -+ int phy_id; -+ enum dpmac_link_type link_type; -+ enum dpmac_eth_if eth_if; -+ uint32_t max_rate; -+ /** -+ * struct version - Structure representing DPMAC version -+ * @major: DPMAC major version -+ * @minor: DPMAC minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+}; -+ -+/** -+ * dpmac_get_attributes - Retrieve DPMAC attributes. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @attr: Returned object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_attr *attr); -+ -+/** -+ * struct dpmac_mdio_cfg - DPMAC MDIO read/write parameters -+ * @phy_addr: MDIO device address -+ * @reg: Address of the register within the Clause 45 PHY device from which data -+ * is to be read -+ * @data: Data read/write from/to MDIO -+ */ -+struct dpmac_mdio_cfg { -+ uint8_t phy_addr; -+ uint8_t reg; -+ uint16_t data; -+}; -+ -+/** -+ * dpmac_mdio_read() - Perform MDIO read transaction -+ * @mc_io: Pointer to opaque I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @cfg: Structure with MDIO transaction parameters -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_mdio_read(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_mdio_cfg *cfg); -+ -+/** -+ * dpmac_mdio_write() - Perform MDIO write transaction -+ * @mc_io: Pointer to opaque I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @cfg: Structure with MDIO transaction parameters -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_mdio_write(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_mdio_cfg *cfg); -+ -+/** -+ * DPMAC link configuration/state options -+ */ -+ -+/** -+ * Enable auto-negotiation -+ */ -+#define DPMAC_LINK_OPT_AUTONEG 0x0000000000000001ULL -+/** -+ * Enable half-duplex mode -+ */ -+#define DPMAC_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL -+/** -+ * Enable pause frames -+ */ -+#define DPMAC_LINK_OPT_PAUSE 0x0000000000000004ULL -+/** -+ * Enable a-symmetric pause frames -+ */ -+#define DPMAC_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL -+ -+/** -+ * struct dpmac_link_cfg - Structure representing DPMAC link configuration -+ * @rate: Link's rate - in Mbps -+ * @options: Enable/Disable DPMAC link cfg features (bitmap) -+ */ -+struct dpmac_link_cfg { -+ uint32_t rate; -+ uint64_t options; -+}; -+ -+/** -+ * dpmac_get_link_cfg() - Get Ethernet link configuration -+ * @mc_io: Pointer to opaque I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @cfg: Returned structure with the link configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_get_link_cfg(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_link_cfg *cfg); -+ -+/** -+ * struct dpmac_link_state - DPMAC link configuration request -+ * @rate: Rate in Mbps -+ * @options: Enable/Disable DPMAC link cfg features (bitmap) -+ * @up: Link state -+ */ -+struct dpmac_link_state { -+ uint32_t rate; -+ uint64_t options; -+ int up; -+}; -+ -+/** -+ * dpmac_set_link_state() - Set the Ethernet link status -+ * @mc_io: Pointer to opaque I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @link_state: Link state configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_set_link_state(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_link_state *link_state); -+ -+/** -+ * enum dpmac_counter - DPMAC counter types -+ * @DPMAC_CNT_ING_FRAME_64: counts 64-bytes frames, good or bad. -+ * @DPMAC_CNT_ING_FRAME_127: counts 65- to 127-bytes frames, good or bad. -+ * @DPMAC_CNT_ING_FRAME_255: counts 128- to 255-bytes frames, good or bad. -+ * @DPMAC_CNT_ING_FRAME_511: counts 256- to 511-bytes frames, good or bad. -+ * @DPMAC_CNT_ING_FRAME_1023: counts 512- to 1023-bytes frames, good or bad. -+ * @DPMAC_CNT_ING_FRAME_1518: counts 1024- to 1518-bytes frames, good or bad. -+ * @DPMAC_CNT_ING_FRAME_1519_MAX: counts 1519-bytes frames and larger -+ * (up to max frame length specified), -+ * good or bad. -+ * @DPMAC_CNT_ING_FRAG: counts frames which are shorter than 64 bytes received -+ * with a wrong CRC -+ * @DPMAC_CNT_ING_JABBER: counts frames longer than the maximum frame length -+ * specified, with a bad frame check sequence. -+ * @DPMAC_CNT_ING_FRAME_DISCARD: counts dropped frames due to internal errors. -+ * Occurs when a receive FIFO overflows. -+ * Includes also frames truncated as a result of -+ * the receive FIFO overflow. -+ * @DPMAC_CNT_ING_ALIGN_ERR: counts frames with an alignment error -+ * (optional used for wrong SFD). -+ * @DPMAC_CNT_EGR_UNDERSIZED: counts frames transmitted that was less than 64 -+ * bytes long with a good CRC. -+ * @DPMAC_CNT_ING_OVERSIZED: counts frames longer than the maximum frame length -+ * specified, with a good frame check sequence. -+ * @DPMAC_CNT_ING_VALID_PAUSE_FRAME: counts valid pause frames (regular and PFC) -+ * @DPMAC_CNT_EGR_VALID_PAUSE_FRAME: counts valid pause frames transmitted -+ * (regular and PFC). -+ * @DPMAC_CNT_ING_BYTE: counts bytes received except preamble for all valid -+ * frames and valid pause frames. -+ * @DPMAC_CNT_ING_MCAST_FRAME: counts received multicast frames. -+ * @DPMAC_CNT_ING_BCAST_FRAME: counts received broadcast frames. -+ * @DPMAC_CNT_ING_ALL_FRAME: counts each good or bad frames received. -+ * @DPMAC_CNT_ING_UCAST_FRAME: counts received unicast frames. -+ * @DPMAC_CNT_ING_ERR_FRAME: counts frames received with an error -+ * (except for undersized/fragment frame). -+ * @DPMAC_CNT_EGR_BYTE: counts bytes transmitted except preamble for all valid -+ * frames and valid pause frames transmitted. -+ * @DPMAC_CNT_EGR_MCAST_FRAME: counts transmitted multicast frames. -+ * @DPMAC_CNT_EGR_BCAST_FRAME: counts transmitted broadcast frames. -+ * @DPMAC_CNT_EGR_UCAST_FRAME: counts transmitted unicast frames. -+ * @DPMAC_CNT_EGR_ERR_FRAME: counts frames transmitted with an error. -+ * @DPMAC_CNT_ING_GOOD_FRAME: counts frames received without error, including -+ * pause frames. -+ * @DPMAC_CNT_ENG_GOOD_FRAME: counts frames transmitted without error, including -+ * pause frames. -+ */ -+enum dpmac_counter { -+ DPMAC_CNT_ING_FRAME_64, -+ DPMAC_CNT_ING_FRAME_127, -+ DPMAC_CNT_ING_FRAME_255, -+ DPMAC_CNT_ING_FRAME_511, -+ DPMAC_CNT_ING_FRAME_1023, -+ DPMAC_CNT_ING_FRAME_1518, -+ DPMAC_CNT_ING_FRAME_1519_MAX, -+ DPMAC_CNT_ING_FRAG, -+ DPMAC_CNT_ING_JABBER, -+ DPMAC_CNT_ING_FRAME_DISCARD, -+ DPMAC_CNT_ING_ALIGN_ERR, -+ DPMAC_CNT_EGR_UNDERSIZED, -+ DPMAC_CNT_ING_OVERSIZED, -+ DPMAC_CNT_ING_VALID_PAUSE_FRAME, -+ DPMAC_CNT_EGR_VALID_PAUSE_FRAME, -+ DPMAC_CNT_ING_BYTE, -+ DPMAC_CNT_ING_MCAST_FRAME, -+ DPMAC_CNT_ING_BCAST_FRAME, -+ DPMAC_CNT_ING_ALL_FRAME, -+ DPMAC_CNT_ING_UCAST_FRAME, -+ DPMAC_CNT_ING_ERR_FRAME, -+ DPMAC_CNT_EGR_BYTE, -+ DPMAC_CNT_EGR_MCAST_FRAME, -+ DPMAC_CNT_EGR_BCAST_FRAME, -+ DPMAC_CNT_EGR_UCAST_FRAME, -+ DPMAC_CNT_EGR_ERR_FRAME, -+ DPMAC_CNT_ING_GOOD_FRAME, -+ DPMAC_CNT_ENG_GOOD_FRAME -+}; -+ -+/** -+ * dpmac_get_counter() - Read a specific DPMAC counter -+ * @mc_io: Pointer to opaque I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @type: The requested counter -+ * @counter: Returned counter value -+ * -+ * Return: The requested counter; '0' otherwise. -+ */ -+int dpmac_get_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ enum dpmac_counter type, -+ uint64_t *counter); -+ -+#endif /* __FSL_DPMAC_H */ -diff --git a/drivers/staging/fsl-dpaa2/mac/mac.c b/drivers/staging/fsl-dpaa2/mac/mac.c -new file mode 100644 -index 0000000..fe16b8b ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/mac/mac.c -@@ -0,0 +1,694 @@ -+/* Copyright 2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include -+ -+#include -+#include -+#include -+#include -+ -+#include -+#include -+ -+#include -+#include -+#include -+#include -+#include -+ -+#include "../../fsl-mc/include/mc.h" -+#include "../../fsl-mc/include/mc-sys.h" -+ -+#include "dpmac.h" -+#include "dpmac-cmd.h" -+ -+#define DPAA2_SUPPORTED_DPMAC_VERSION 3 -+ -+struct dpaa2_mac_priv { -+ struct net_device *netdev; -+ struct fsl_mc_device *mc_dev; -+ struct dpmac_attr attr; -+ struct dpmac_link_state old_state; -+}; -+ -+/* TODO: fix the 10G modes, mapping can't be right: -+ * XGMII is paralel -+ * XAUI is serial, using 8b/10b encoding -+ * XFI is also serial but using 64b/66b encoding -+ * they can't all map to XGMII... -+ * -+ * This must be kept in sync with enum dpmac_eth_if. -+ */ -+static phy_interface_t dpaa2_mac_iface_mode[] = { -+ /* DPMAC_ETH_IF_MII */ -+ PHY_INTERFACE_MODE_MII, -+ /* DPMAC_ETH_IF_RMII */ -+ PHY_INTERFACE_MODE_RMII, -+ /* DPMAC_ETH_IF_SMII */ -+ PHY_INTERFACE_MODE_SMII, -+ /* DPMAC_ETH_IF_GMII */ -+ PHY_INTERFACE_MODE_GMII, -+ /* DPMAC_ETH_IF_RGMII */ -+ PHY_INTERFACE_MODE_RGMII, -+ /* DPMAC_ETH_IF_SGMII */ -+ PHY_INTERFACE_MODE_SGMII, -+ /* DPMAC_ETH_IF_QSGMII */ -+ PHY_INTERFACE_MODE_QSGMII, -+ /* DPMAC_ETH_IF_XAUI */ -+ PHY_INTERFACE_MODE_XGMII, -+ /* DPMAC_ETH_IF_XFI */ -+ PHY_INTERFACE_MODE_XGMII, -+}; -+ -+static void dpaa2_mac_link_changed(struct net_device *netdev) -+{ -+ struct phy_device *phydev; -+ struct dpmac_link_state state = { 0 }; -+ struct dpaa2_mac_priv *priv = netdev_priv(netdev); -+ int err; -+ -+ /* the PHY just notified us of link state change */ -+ phydev = netdev->phydev; -+ -+ state.up = !!phydev->link; -+ if (phydev->link) { -+ state.rate = phydev->speed; -+ -+ if (!phydev->duplex) -+ state.options |= DPMAC_LINK_OPT_HALF_DUPLEX; -+ if (phydev->autoneg) -+ state.options |= DPMAC_LINK_OPT_AUTONEG; -+ -+ netif_carrier_on(netdev); -+ } else { -+ netif_carrier_off(netdev); -+ } -+ -+ if (priv->old_state.up != state.up || -+ priv->old_state.rate != state.rate || -+ priv->old_state.options != state.options) { -+ priv->old_state = state; -+ phy_print_status(phydev); -+ } -+ -+ /* We must interrogate MC at all times, because we don't know -+ * when and whether a potential DPNI may have read the link state. -+ */ -+ err = dpmac_set_link_state(priv->mc_dev->mc_io, 0, -+ priv->mc_dev->mc_handle, &state); -+ if (unlikely(err)) -+ dev_err(&priv->mc_dev->dev, "dpmac_set_link_state: %d\n", err); -+} -+ -+/* IRQ bits that we handle */ -+static const u32 dpmac_irq_mask = DPMAC_IRQ_EVENT_LINK_CFG_REQ; -+ -+#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS -+static netdev_tx_t dpaa2_mac_drop_frame(struct sk_buff *skb, -+ struct net_device *dev) -+{ -+ /* we don't support I/O for now, drop the frame */ -+ dev_kfree_skb_any(skb); -+ return NETDEV_TX_OK; -+} -+ -+static int dpaa2_mac_open(struct net_device *netdev) -+{ -+ /* start PHY state machine */ -+ phy_start(netdev->phydev); -+ -+ return 0; -+} -+ -+static int dpaa2_mac_stop(struct net_device *netdev) -+{ -+ if (!netdev->phydev) -+ goto done; -+ -+ /* stop PHY state machine */ -+ phy_stop(netdev->phydev); -+ -+ /* signal link down to firmware */ -+ netdev->phydev->link = 0; -+ dpaa2_mac_link_changed(netdev); -+ -+done: -+ return 0; -+} -+ -+static int dpaa2_mac_get_settings(struct net_device *netdev, -+ struct ethtool_cmd *cmd) -+{ -+ return phy_ethtool_gset(netdev->phydev, cmd); -+} -+ -+static int dpaa2_mac_set_settings(struct net_device *netdev, -+ struct ethtool_cmd *cmd) -+{ -+ return phy_ethtool_sset(netdev->phydev, cmd); -+} -+ -+static struct rtnl_link_stats64 -+*dpaa2_mac_get_stats(struct net_device *netdev, -+ struct rtnl_link_stats64 *storage) -+{ -+ struct dpaa2_mac_priv *priv = netdev_priv(netdev); -+ u64 tmp; -+ int err; -+ -+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, -+ DPMAC_CNT_EGR_MCAST_FRAME, -+ &storage->tx_packets); -+ if (err) -+ goto error; -+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, -+ DPMAC_CNT_EGR_BCAST_FRAME, &tmp); -+ if (err) -+ goto error; -+ storage->tx_packets += tmp; -+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, -+ DPMAC_CNT_EGR_UCAST_FRAME, &tmp); -+ if (err) -+ goto error; -+ storage->tx_packets += tmp; -+ -+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, -+ DPMAC_CNT_EGR_UNDERSIZED, &storage->tx_dropped); -+ if (err) -+ goto error; -+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, -+ DPMAC_CNT_EGR_BYTE, &storage->tx_bytes); -+ if (err) -+ goto error; -+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, -+ DPMAC_CNT_EGR_ERR_FRAME, &storage->tx_errors); -+ if (err) -+ goto error; -+ -+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, -+ DPMAC_CNT_ING_ALL_FRAME, &storage->rx_packets); -+ if (err) -+ goto error; -+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, -+ DPMAC_CNT_ING_MCAST_FRAME, &storage->multicast); -+ if (err) -+ goto error; -+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, -+ DPMAC_CNT_ING_FRAME_DISCARD, -+ &storage->rx_dropped); -+ if (err) -+ goto error; -+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, -+ DPMAC_CNT_ING_ALIGN_ERR, &storage->rx_errors); -+ if (err) -+ goto error; -+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, -+ DPMAC_CNT_ING_OVERSIZED, &tmp); -+ if (err) -+ goto error; -+ storage->rx_errors += tmp; -+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, -+ DPMAC_CNT_ING_BYTE, &storage->rx_bytes); -+ if (err) -+ goto error; -+ -+ return storage; -+ -+error: -+ netdev_err(netdev, "dpmac_get_counter err %d\n", err); -+ return storage; -+} -+ -+static struct { -+ enum dpmac_counter id; -+ char name[ETH_GSTRING_LEN]; -+} dpaa2_mac_counters[] = { -+ {DPMAC_CNT_ING_ALL_FRAME, "rx all frames"}, -+ {DPMAC_CNT_ING_GOOD_FRAME, "rx frames ok"}, -+ {DPMAC_CNT_ING_ERR_FRAME, "rx frame errors"}, -+ {DPMAC_CNT_ING_FRAME_DISCARD, "rx frame discards"}, -+ {DPMAC_CNT_ING_UCAST_FRAME, "rx u-cast"}, -+ {DPMAC_CNT_ING_BCAST_FRAME, "rx b-cast"}, -+ {DPMAC_CNT_ING_MCAST_FRAME, "rx m-cast"}, -+ {DPMAC_CNT_ING_FRAME_64, "rx 64 bytes"}, -+ {DPMAC_CNT_ING_FRAME_127, "rx 65-127 bytes"}, -+ {DPMAC_CNT_ING_FRAME_255, "rx 128-255 bytes"}, -+ {DPMAC_CNT_ING_FRAME_511, "rx 256-511 bytes"}, -+ {DPMAC_CNT_ING_FRAME_1023, "rx 512-1023 bytes"}, -+ {DPMAC_CNT_ING_FRAME_1518, "rx 1024-1518 bytes"}, -+ {DPMAC_CNT_ING_FRAME_1519_MAX, "rx 1519-max bytes"}, -+ {DPMAC_CNT_ING_FRAG, "rx frags"}, -+ {DPMAC_CNT_ING_JABBER, "rx jabber"}, -+ {DPMAC_CNT_ING_ALIGN_ERR, "rx align errors"}, -+ {DPMAC_CNT_ING_OVERSIZED, "rx oversized"}, -+ {DPMAC_CNT_ING_VALID_PAUSE_FRAME, "rx pause"}, -+ {DPMAC_CNT_ING_BYTE, "rx bytes"}, -+ {DPMAC_CNT_ENG_GOOD_FRAME, "tx frames ok"}, -+ {DPMAC_CNT_EGR_UCAST_FRAME, "tx u-cast"}, -+ {DPMAC_CNT_EGR_MCAST_FRAME, "tx m-cast"}, -+ {DPMAC_CNT_EGR_BCAST_FRAME, "tx b-cast"}, -+ {DPMAC_CNT_EGR_ERR_FRAME, "tx frame errors"}, -+ {DPMAC_CNT_EGR_UNDERSIZED, "tx undersized"}, -+ {DPMAC_CNT_EGR_VALID_PAUSE_FRAME, "tx b-pause"}, -+ {DPMAC_CNT_EGR_BYTE, "tx bytes"}, -+ -+}; -+ -+static void dpaa2_mac_get_strings(struct net_device *netdev, -+ u32 stringset, u8 *data) -+{ -+ int i; -+ -+ switch (stringset) { -+ case ETH_SS_STATS: -+ for (i = 0; i < ARRAY_SIZE(dpaa2_mac_counters); i++) -+ memcpy(data + i * ETH_GSTRING_LEN, -+ dpaa2_mac_counters[i].name, -+ ETH_GSTRING_LEN); -+ break; -+ } -+} -+ -+static void dpaa2_mac_get_ethtool_stats(struct net_device *netdev, -+ struct ethtool_stats *stats, -+ u64 *data) -+{ -+ struct dpaa2_mac_priv *priv = netdev_priv(netdev); -+ int i; -+ int err; -+ -+ for (i = 0; i < ARRAY_SIZE(dpaa2_mac_counters); i++) { -+ err = dpmac_get_counter(priv->mc_dev->mc_io, -+ 0, -+ priv->mc_dev->mc_handle, -+ dpaa2_mac_counters[i].id, &data[i]); -+ if (err) -+ netdev_err(netdev, "dpmac_get_counter[%s] err %d\n", -+ dpaa2_mac_counters[i].name, err); -+ } -+} -+ -+static int dpaa2_mac_get_sset_count(struct net_device *dev, int sset) -+{ -+ switch (sset) { -+ case ETH_SS_STATS: -+ return ARRAY_SIZE(dpaa2_mac_counters); -+ default: -+ return -EOPNOTSUPP; -+ } -+} -+ -+static const struct net_device_ops dpaa2_mac_ndo_ops = { -+ .ndo_start_xmit = &dpaa2_mac_drop_frame, -+ .ndo_open = &dpaa2_mac_open, -+ .ndo_stop = &dpaa2_mac_stop, -+ .ndo_get_stats64 = &dpaa2_mac_get_stats, -+}; -+ -+static const struct ethtool_ops dpaa2_mac_ethtool_ops = { -+ .get_settings = &dpaa2_mac_get_settings, -+ .set_settings = &dpaa2_mac_set_settings, -+ .get_strings = &dpaa2_mac_get_strings, -+ .get_ethtool_stats = &dpaa2_mac_get_ethtool_stats, -+ .get_sset_count = &dpaa2_mac_get_sset_count, -+}; -+#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */ -+ -+static void configure_link(struct dpaa2_mac_priv *priv, -+ struct dpmac_link_cfg *cfg) -+{ -+ struct phy_device *phydev = priv->netdev->phydev; -+ -+ if (unlikely(!phydev)) -+ return; -+ -+ phydev->speed = cfg->rate; -+ phydev->duplex = !!(cfg->options & DPMAC_LINK_OPT_HALF_DUPLEX); -+ -+ if (cfg->options & DPMAC_LINK_OPT_AUTONEG) { -+ phydev->autoneg = 1; -+ phydev->advertising |= ADVERTISED_Autoneg; -+ } else { -+ phydev->autoneg = 0; -+ phydev->advertising &= ~ADVERTISED_Autoneg; -+ } -+ -+ phy_start_aneg(phydev); -+} -+ -+static irqreturn_t dpaa2_mac_irq_handler(int irq_num, void *arg) -+{ -+ struct device *dev = (struct device *)arg; -+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); -+ struct dpaa2_mac_priv *priv = dev_get_drvdata(dev); -+ struct dpmac_link_cfg link_cfg; -+ u32 status; -+ int err; -+ -+ err = dpmac_get_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle, -+ DPMAC_IRQ_INDEX, &status); -+ if (unlikely(err || !status)) -+ return IRQ_NONE; -+ -+ /* DPNI-initiated link configuration; 'ifconfig up' also calls this */ -+ if (status & DPMAC_IRQ_EVENT_LINK_CFG_REQ) { -+ err = dpmac_get_link_cfg(mc_dev->mc_io, 0, mc_dev->mc_handle, -+ &link_cfg); -+ if (unlikely(err)) -+ goto out; -+ -+ configure_link(priv, &link_cfg); -+ } -+ -+out: -+ dpmac_clear_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle, -+ DPMAC_IRQ_INDEX, status); -+ -+ return IRQ_HANDLED; -+} -+ -+static int setup_irqs(struct fsl_mc_device *mc_dev) -+{ -+ int err; -+ -+ err = fsl_mc_allocate_irqs(mc_dev); -+ if (err) { -+ dev_err(&mc_dev->dev, "fsl_mc_allocate_irqs err %d\n", err); -+ return err; -+ } -+ -+ err = devm_request_threaded_irq(&mc_dev->dev, -+ mc_dev->irqs[0]->irq_number, -+ NULL, &dpaa2_mac_irq_handler, -+ IRQF_NO_SUSPEND | IRQF_ONESHOT, -+ dev_name(&mc_dev->dev), &mc_dev->dev); -+ if (err) { -+ dev_err(&mc_dev->dev, "devm_request_threaded_irq err %d\n", -+ err); -+ goto free_irq; -+ } -+ -+ err = dpmac_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle, -+ DPMAC_IRQ_INDEX, dpmac_irq_mask); -+ if (err) { -+ dev_err(&mc_dev->dev, "dpmac_set_irq_mask err %d\n", err); -+ goto free_irq; -+ } -+ err = dpmac_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle, -+ DPMAC_IRQ_INDEX, 1); -+ if (err) { -+ dev_err(&mc_dev->dev, "dpmac_set_irq_enable err %d\n", err); -+ goto free_irq; -+ } -+ -+ return 0; -+ -+free_irq: -+ fsl_mc_free_irqs(mc_dev); -+ -+ return err; -+} -+ -+static void teardown_irqs(struct fsl_mc_device *mc_dev) -+{ -+ int err; -+ -+ err = dpmac_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle, -+ DPMAC_IRQ_INDEX, dpmac_irq_mask); -+ if (err) -+ dev_err(&mc_dev->dev, "dpmac_set_irq_mask err %d\n", err); -+ -+ err = dpmac_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle, -+ DPMAC_IRQ_INDEX, 0); -+ if (err) -+ dev_err(&mc_dev->dev, "dpmac_set_irq_enable err %d\n", err); -+ -+ fsl_mc_free_irqs(mc_dev); -+} -+ -+static struct device_node *lookup_node(struct device *dev, int dpmac_id) -+{ -+ struct device_node *dpmacs, *dpmac = NULL; -+ struct device_node *mc_node = dev->of_node; -+ u32 id; -+ int err; -+ -+ dpmacs = of_find_node_by_name(mc_node, "dpmacs"); -+ if (!dpmacs) { -+ dev_err(dev, "No dpmacs subnode in device-tree\n"); -+ return NULL; -+ } -+ -+ while ((dpmac = of_get_next_child(dpmacs, dpmac))) { -+ err = of_property_read_u32(dpmac, "reg", &id); -+ if (err) -+ continue; -+ if (id == dpmac_id) -+ return dpmac; -+ } -+ -+ return NULL; -+} -+ -+static int dpaa2_mac_probe(struct fsl_mc_device *mc_dev) -+{ -+ struct device *dev; -+ struct dpaa2_mac_priv *priv = NULL; -+ struct device_node *phy_node, *dpmac_node; -+ struct net_device *netdev; -+ phy_interface_t if_mode; -+ int err = 0; -+ -+ dev = &mc_dev->dev; -+ -+ /* prepare a net_dev structure to make the phy lib API happy */ -+ netdev = alloc_etherdev(sizeof(*priv)); -+ if (!netdev) { -+ dev_err(dev, "alloc_etherdev error\n"); -+ err = -ENOMEM; -+ goto err_exit; -+ } -+ priv = netdev_priv(netdev); -+ priv->mc_dev = mc_dev; -+ priv->netdev = netdev; -+ -+ SET_NETDEV_DEV(netdev, dev); -+ -+#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS -+ snprintf(netdev->name, IFNAMSIZ, "mac%d", mc_dev->obj_desc.id); -+#endif -+ -+ dev_set_drvdata(dev, priv); -+ -+ err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io); -+ if (err || !mc_dev->mc_io) { -+ dev_err(dev, "fsl_mc_portal_allocate error: %d\n", err); -+ err = -ENODEV; -+ goto err_free_netdev; -+ } -+ -+ err = dpmac_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id, -+ &mc_dev->mc_handle); -+ if (err || !mc_dev->mc_handle) { -+ dev_err(dev, "dpmac_open error: %d\n", err); -+ err = -ENODEV; -+ goto err_free_mcp; -+ } -+ -+ err = dpmac_get_attributes(mc_dev->mc_io, 0, -+ mc_dev->mc_handle, &priv->attr); -+ if (err) { -+ dev_err(dev, "dpmac_get_attributes err %d\n", err); -+ err = -EINVAL; -+ goto err_close; -+ } -+ -+ dev_warn(dev, "Using DPMAC API %d.%d\n", -+ priv->attr.version.major, priv->attr.version.minor); -+ -+ /* Look up the DPMAC node in the device-tree. */ -+ dpmac_node = lookup_node(dev, priv->attr.id); -+ if (!dpmac_node) { -+ dev_err(dev, "No dpmac@%d subnode found.\n", priv->attr.id); -+ err = -ENODEV; -+ goto err_close; -+ } -+ -+ err = setup_irqs(mc_dev); -+ if (err) { -+ err = -EFAULT; -+ goto err_close; -+ } -+ -+#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS -+ /* OPTIONAL, register netdev just to make it visible to the user */ -+ netdev->netdev_ops = &dpaa2_mac_ndo_ops; -+ netdev->ethtool_ops = &dpaa2_mac_ethtool_ops; -+ -+ /* phy starts up enabled so netdev should be up too */ -+ netdev->flags |= IFF_UP; -+ -+ err = register_netdev(priv->netdev); -+ if (err < 0) { -+ dev_err(dev, "register_netdev error %d\n", err); -+ err = -ENODEV; -+ goto err_free_irq; -+ } -+#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */ -+ -+ /* probe the PHY as a fixed-link if the link type declared in DPC -+ * explicitly mandates this -+ */ -+ if (priv->attr.link_type == DPMAC_LINK_TYPE_FIXED) -+ goto probe_fixed_link; -+ -+ if (priv->attr.eth_if < ARRAY_SIZE(dpaa2_mac_iface_mode)) { -+ if_mode = dpaa2_mac_iface_mode[priv->attr.eth_if]; -+ dev_dbg(dev, "\tusing if mode %s for eth_if %d\n", -+ phy_modes(if_mode), priv->attr.eth_if); -+ } else { -+ dev_warn(dev, "Unexpected interface mode %d, will probe as fixed link\n", -+ priv->attr.eth_if); -+ goto probe_fixed_link; -+ } -+ -+ /* try to connect to the PHY */ -+ phy_node = of_parse_phandle(dpmac_node, "phy-handle", 0); -+ if (!phy_node) { -+ if (!phy_node) { -+ dev_err(dev, "dpmac node has no phy-handle property\n"); -+ err = -ENODEV; -+ goto err_no_phy; -+ } -+ } -+ netdev->phydev = of_phy_connect(netdev, phy_node, -+ &dpaa2_mac_link_changed, 0, if_mode); -+ if (!netdev->phydev) { -+ /* No need for dev_err(); the kernel's loud enough as it is. */ -+ dev_dbg(dev, "Can't of_phy_connect() now.\n"); -+ /* We might be waiting for the MDIO MUX to probe, so defer -+ * our own probing. -+ */ -+ err = -EPROBE_DEFER; -+ goto err_defer; -+ } -+ dev_info(dev, "Connected to %s PHY.\n", phy_modes(if_mode)); -+ -+probe_fixed_link: -+ if (!netdev->phydev) { -+ struct fixed_phy_status status = { -+ .link = 1, -+ /* fixed-phys don't support 10Gbps speed for now */ -+ .speed = 1000, -+ .duplex = 1, -+ }; -+ -+ /* try to register a fixed link phy */ -+ netdev->phydev = fixed_phy_register(PHY_POLL, &status, NULL); -+ if (!netdev->phydev || IS_ERR(netdev->phydev)) { -+ dev_err(dev, "error trying to register fixed PHY\n"); -+ /* So we don't crash unregister_netdev() later on */ -+ netdev->phydev = NULL; -+ err = -EFAULT; -+ goto err_no_phy; -+ } -+ dev_info(dev, "Registered fixed PHY.\n"); -+ } -+ -+ /* start PHY state machine */ -+#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS -+ dpaa2_mac_open(netdev); -+#else /* CONFIG_FSL_DPAA2_MAC_NETDEVS */ -+ phy_start(netdev->phydev); -+#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */ -+ return 0; -+ -+err_defer: -+err_no_phy: -+#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS -+ unregister_netdev(netdev); -+err_free_irq: -+#endif -+ teardown_irqs(mc_dev); -+err_close: -+ dpmac_close(mc_dev->mc_io, 0, mc_dev->mc_handle); -+err_free_mcp: -+ fsl_mc_portal_free(mc_dev->mc_io); -+err_free_netdev: -+ free_netdev(netdev); -+err_exit: -+ return err; -+} -+ -+static int dpaa2_mac_remove(struct fsl_mc_device *mc_dev) -+{ -+ struct device *dev = &mc_dev->dev; -+ struct dpaa2_mac_priv *priv = dev_get_drvdata(dev); -+ -+#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS -+ unregister_netdev(priv->netdev); -+#endif -+ teardown_irqs(priv->mc_dev); -+ dpmac_close(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle); -+ fsl_mc_portal_free(priv->mc_dev->mc_io); -+ free_netdev(priv->netdev); -+ -+ dev_set_drvdata(dev, NULL); -+ kfree(priv); -+ -+ return 0; -+} -+ -+static const struct fsl_mc_device_match_id dpaa2_mac_match_id_table[] = { -+ { -+ .vendor = FSL_MC_VENDOR_FREESCALE, -+ .obj_type = "dpmac", -+ .ver_major = DPMAC_VER_MAJOR, -+ .ver_minor = DPMAC_VER_MINOR, -+ }, -+ {} -+}; -+ -+static struct fsl_mc_driver dpaa2_mac_drv = { -+ .driver = { -+ .name = KBUILD_MODNAME, -+ .owner = THIS_MODULE, -+ }, -+ .probe = dpaa2_mac_probe, -+ .remove = dpaa2_mac_remove, -+ .match_id_table = dpaa2_mac_match_id_table, -+}; -+ -+module_fsl_mc_driver(dpaa2_mac_drv); -+ -+MODULE_LICENSE("GPL"); -+MODULE_DESCRIPTION("DPAA2 PHY proxy interface driver"); -diff --git a/drivers/staging/fsl-mc/Kconfig b/drivers/staging/fsl-mc/Kconfig -new file mode 100644 -index 0000000..32df07b ---- /dev/null -+++ b/drivers/staging/fsl-mc/Kconfig -@@ -0,0 +1 @@ -+source "drivers/staging/fsl-mc/bus/Kconfig" -diff --git a/drivers/staging/fsl-mc/Makefile b/drivers/staging/fsl-mc/Makefile -new file mode 100644 -index 0000000..9c6a001 ---- /dev/null -+++ b/drivers/staging/fsl-mc/Makefile -@@ -0,0 +1,2 @@ -+# Freescale Management Complex (MC) bus drivers -+obj-$(CONFIG_FSL_MC_BUS) += bus/ -diff --git a/drivers/staging/fsl-mc/TODO b/drivers/staging/fsl-mc/TODO -new file mode 100644 -index 0000000..d78288b ---- /dev/null -+++ b/drivers/staging/fsl-mc/TODO -@@ -0,0 +1,13 @@ -+* Add README file (with ASCII art) describing relationships between -+ DPAA2 objects and how combine them to make a NIC, an LS2 switch, etc. -+ Also, define all acronyms used. -+ -+* Decide if multiple root fsl-mc buses will be supported per Linux instance, -+ and if so add support for this. -+ -+* Add at least one device driver for a DPAA2 object (child device of the -+ fsl-mc bus). -+ -+Please send any patches to Greg Kroah-Hartman , -+german.rivera@freescale.com, devel@driverdev.osuosl.org, -+linux-kernel@vger.kernel.org -diff --git a/drivers/staging/fsl-mc/bus/Kconfig b/drivers/staging/fsl-mc/bus/Kconfig -new file mode 100644 -index 0000000..8bef5b8 ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/Kconfig -@@ -0,0 +1,45 @@ -+# -+# Freescale Management Complex (MC) bus drivers -+# -+# Copyright (C) 2014 Freescale Semiconductor, Inc. -+# -+# This file is released under the GPLv2 -+# -+ -+config FSL_MC_BUS -+ tristate "Freescale Management Complex (MC) bus driver" -+ depends on OF && ARM64 -+ help -+ Driver to enable the bus infrastructure for the Freescale -+ QorIQ Management Complex (fsl-mc). The fsl-mc is a hardware -+ module of the QorIQ LS2 SoCs, that does resource management -+ for hardware building-blocks in the SoC that can be used -+ to dynamically create networking hardware objects such as -+ network interfaces (NICs), crypto accelerator instances, -+ or L2 switches. -+ -+ Only enable this option when building the kernel for -+ Freescale QorQIQ LS2xxxx SoCs. -+ -+config FSL_MC_RESTOOL -+ tristate "Freescale Management Complex (MC) restool driver" -+ depends on FSL_MC_BUS -+ help -+ Driver that provides kernel support for the Freescale Management -+ Complex resource manager user-space tool. -+ -+config FSL_MC_DPIO -+ tristate "Freescale Data Path I/O (DPIO) driver" -+ depends on FSL_MC_BUS -+ help -+ Driver for Freescale Data Path I/O (DPIO) devices. -+ A DPIO device provides queue and buffer management facilities -+ for software to interact with other Data Path devices. This -+ driver does not expose the DPIO device individually, but -+ groups them under a service layer API. -+ -+config FSL_QBMAN_DEBUG -+ tristate "Freescale QBMAN Debug APIs" -+ depends on FSL_MC_DPIO -+ help -+ QBMan debug assistant APIs. -diff --git a/drivers/staging/fsl-mc/bus/Makefile b/drivers/staging/fsl-mc/bus/Makefile -new file mode 100644 -index 0000000..f29399c ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/Makefile -@@ -0,0 +1,24 @@ -+# -+# Freescale Management Complex (MC) bus drivers -+# -+# Copyright (C) 2014 Freescale Semiconductor, Inc. -+# -+# This file is released under the GPLv2 -+# -+obj-$(CONFIG_FSL_MC_BUS) += mc-bus-driver.o -+ -+mc-bus-driver-objs := mc-bus.o \ -+ mc-sys.o \ -+ dprc.o \ -+ dpmng.o \ -+ dprc-driver.o \ -+ mc-allocator.o \ -+ dpmcp.o \ -+ dpbp.o \ -+ dpcon.o -+ -+# MC restool kernel support -+obj-$(CONFIG_FSL_MC_RESTOOL) += mc-restool.o -+ -+# MC DPIO driver -+obj-$(CONFIG_FSL_MC_DPIO) += dpio/ -diff --git a/drivers/staging/fsl-mc/bus/dpbp.c b/drivers/staging/fsl-mc/bus/dpbp.c -new file mode 100644 -index 0000000..f183121 ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpbp.c -@@ -0,0 +1,459 @@ -+/* Copyright 2013-2014 Freescale Semiconductor Inc. -+* -+* Redistribution and use in source and binary forms, with or without -+* modification, are permitted provided that the following conditions are met: -+* * Redistributions of source code must retain the above copyright -+* notice, this list of conditions and the following disclaimer. -+* * Redistributions in binary form must reproduce the above copyright -+* notice, this list of conditions and the following disclaimer in the -+* documentation and/or other materials provided with the distribution. -+* * Neither the name of the above-listed copyright holders nor the -+* names of any contributors may be used to endorse or promote products -+* derived from this software without specific prior written permission. -+* -+* -+* ALTERNATIVELY, this software may be distributed under the terms of the -+* GNU General Public License ("GPL") as published by the Free Software -+* Foundation, either version 2 of that License or (at your option) any -+* later version. -+* -+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+* POSSIBILITY OF SUCH DAMAGE. -+*/ -+#include "../include/mc-sys.h" -+#include "../include/mc-cmd.h" -+#include "../include/dpbp.h" -+#include "../include/dpbp-cmd.h" -+ -+int dpbp_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpbp_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ -+ cmd.params[0] |= mc_enc(0, 32, dpbp_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return err; -+} -+EXPORT_SYMBOL(dpbp_open); -+ -+int dpbp_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_CLOSE, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dpbp_close); -+ -+int dpbp_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpbp_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ (void)(cfg); /* unused */ -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpbp_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpbp_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_ENABLE, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dpbp_enable); -+ -+int dpbp_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_DISABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dpbp_disable); -+ -+int dpbp_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_IS_ENABLED, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *en = (int)mc_dec(cmd.params[0], 0, 1); -+ -+ return 0; -+} -+ -+int dpbp_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_RESET, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpbp_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpbp_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ -+ cmd.params[0] |= mc_enc(0, 8, irq_index); -+ cmd.params[0] |= mc_enc(32, 32, irq_cfg->val); -+ cmd.params[1] |= mc_enc(0, 64, irq_cfg->addr); -+ cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpbp_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpbp_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ irq_cfg->val = (uint32_t)mc_dec(cmd.params[0], 0, 32); -+ irq_cfg->addr = (uint64_t)mc_dec(cmd.params[1], 0, 64); -+ irq_cfg->irq_num = (int)mc_dec(cmd.params[2], 0, 32); -+ *type = (int)mc_dec(cmd.params[2], 32, 32); -+ -+ return 0; -+} -+ -+int dpbp_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ -+ cmd.params[0] |= mc_enc(0, 8, en); -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpbp_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *en = (uint8_t)mc_dec(cmd.params[0], 0, 8); -+ return 0; -+} -+ -+int dpbp_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ -+ cmd.params[0] |= mc_enc(0, 32, mask); -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpbp_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *mask = (uint32_t)mc_dec(cmd.params[0], 0, 32); -+ return 0; -+} -+ -+int dpbp_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ -+ cmd.params[0] |= mc_enc(0, 32, *status); -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *status = (uint32_t)mc_dec(cmd.params[0], 0, 32); -+ return 0; -+} -+ -+int dpbp_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ -+ cmd.params[0] |= mc_enc(0, 32, status); -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpbp_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpbp_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ attr->bpid = (uint16_t)mc_dec(cmd.params[0], 16, 16); -+ attr->id = (int)mc_dec(cmd.params[0], 32, 32); -+ attr->version.major = (uint16_t)mc_dec(cmd.params[1], 0, 16); -+ attr->version.minor = (uint16_t)mc_dec(cmd.params[1], 16, 16); -+ return 0; -+} -+EXPORT_SYMBOL(dpbp_get_attributes); -+ -+int dpbp_set_notifications(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpbp_notification_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_NOTIFICATIONS, -+ cmd_flags, -+ token); -+ -+ cmd.params[0] |= mc_enc(0, 32, cfg->depletion_entry); -+ cmd.params[0] |= mc_enc(32, 32, cfg->depletion_exit); -+ cmd.params[1] |= mc_enc(0, 32, cfg->surplus_entry); -+ cmd.params[1] |= mc_enc(32, 32, cfg->surplus_exit); -+ cmd.params[2] |= mc_enc(0, 16, cfg->options); -+ cmd.params[3] |= mc_enc(0, 64, cfg->message_ctx); -+ cmd.params[4] |= mc_enc(0, 64, cfg->message_iova); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpbp_get_notifications(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpbp_notification_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_NOTIFICATIONS, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ cfg->depletion_entry = (uint32_t)mc_dec(cmd.params[0], 0, 32); -+ cfg->depletion_exit = (uint32_t)mc_dec(cmd.params[0], 32, 32); -+ cfg->surplus_entry = (uint32_t)mc_dec(cmd.params[1], 0, 32); -+ cfg->surplus_exit = (uint32_t)mc_dec(cmd.params[1], 32, 32); -+ cfg->options = (uint16_t)mc_dec(cmd.params[2], 0, 16); -+ cfg->message_ctx = (uint64_t)mc_dec(cmd.params[3], 0, 64); -+ cfg->message_iova = (uint64_t)mc_dec(cmd.params[4], 0, 64); -+ -+ return 0; -+} -diff --git a/drivers/staging/fsl-mc/bus/dpcon.c b/drivers/staging/fsl-mc/bus/dpcon.c -new file mode 100644 -index 0000000..7965284 ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpcon.c -@@ -0,0 +1,407 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include "../include/mc-sys.h" -+#include "../include/mc-cmd.h" -+#include "../include/dpcon.h" -+#include "../include/dpcon-cmd.h" -+ -+int dpcon_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpcon_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ DPCON_CMD_OPEN(cmd, dpcon_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+EXPORT_SYMBOL(dpcon_open); -+ -+int dpcon_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CLOSE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dpcon_close); -+ -+int dpcon_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpcon_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ DPCON_CMD_CREATE(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpcon_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpcon_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_ENABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dpcon_enable); -+ -+int dpcon_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_DISABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dpcon_disable); -+ -+int dpcon_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_IS_ENABLED, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCON_RSP_IS_ENABLED(cmd, *en); -+ -+ return 0; -+} -+ -+int dpcon_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_RESET, -+ cmd_flags, token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpcon_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpcon_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ DPCON_CMD_SET_IRQ(cmd, irq_index, irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpcon_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpcon_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ DPCON_CMD_GET_IRQ(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCON_RSP_GET_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dpcon_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPCON_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpcon_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPCON_CMD_GET_IRQ_ENABLE(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCON_RSP_GET_IRQ_ENABLE(cmd, *en); -+ -+ return 0; -+} -+ -+int dpcon_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPCON_CMD_SET_IRQ_MASK(cmd, irq_index, mask); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpcon_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPCON_CMD_GET_IRQ_MASK(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCON_RSP_GET_IRQ_MASK(cmd, *mask); -+ -+ return 0; -+} -+ -+int dpcon_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPCON_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCON_RSP_GET_IRQ_STATUS(cmd, *status); -+ -+ return 0; -+} -+ -+int dpcon_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPCON_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpcon_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpcon_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCON_RSP_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+EXPORT_SYMBOL(dpcon_get_attributes); -+ -+int dpcon_set_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpcon_notification_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_NOTIFICATION, -+ cmd_flags, -+ token); -+ DPCON_CMD_SET_NOTIFICATION(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dpcon_set_notification); -+ -diff --git a/drivers/staging/fsl-mc/bus/dpio/Makefile b/drivers/staging/fsl-mc/bus/dpio/Makefile -new file mode 100644 -index 0000000..c20356b ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/Makefile -@@ -0,0 +1,9 @@ -+# -+# Freescale DPIO driver -+# -+ -+obj-$(CONFIG_FSL_MC_BUS) += fsl-dpio-drv.o -+ -+fsl-dpio-drv-objs := dpio-drv.o dpio_service.o dpio.o qbman_portal.o -+ -+obj-$(CONFIG_FSL_QBMAN_DEBUG) += qbman_debug.o -diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio-drv.c b/drivers/staging/fsl-mc/bus/dpio/dpio-drv.c -new file mode 100644 -index 0000000..80add27 ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/dpio-drv.c -@@ -0,0 +1,401 @@ -+/* Copyright 2014 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "../../include/mc.h" -+#include "../../include/fsl_dpaa2_io.h" -+ -+#include "fsl_qbman_portal.h" -+#include "fsl_dpio.h" -+#include "fsl_dpio_cmd.h" -+ -+#include "dpio-drv.h" -+ -+#define DPIO_DESCRIPTION "DPIO Driver" -+ -+MODULE_LICENSE("Dual BSD/GPL"); -+MODULE_AUTHOR("Freescale Semiconductor, Inc"); -+MODULE_DESCRIPTION(DPIO_DESCRIPTION); -+ -+#define MAX_DPIO_IRQ_NAME 16 /* Big enough for "FSL DPIO %d" */ -+ -+struct dpio_priv { -+ struct dpaa2_io *io; -+ char irq_name[MAX_DPIO_IRQ_NAME]; -+ struct task_struct *thread; -+}; -+ -+static int dpio_thread(void *data) -+{ -+ struct dpaa2_io *io = data; -+ -+ while (!kthread_should_stop()) { -+ int err = dpaa2_io_poll(io); -+ -+ if (err) { -+ pr_err("dpaa2_io_poll() failed\n"); -+ return err; -+ } -+ msleep(50); -+ } -+ return 0; -+} -+ -+static irqreturn_t dpio_irq_handler(int irq_num, void *arg) -+{ -+ struct device *dev = (struct device *)arg; -+ struct dpio_priv *priv = dev_get_drvdata(dev); -+ -+ return dpaa2_io_irq(priv->io); -+} -+ -+static void unregister_dpio_irq_handlers(struct fsl_mc_device *ls_dev) -+{ -+ int i; -+ struct fsl_mc_device_irq *irq; -+ int irq_count = ls_dev->obj_desc.irq_count; -+ -+ for (i = 0; i < irq_count; i++) { -+ irq = ls_dev->irqs[i]; -+ devm_free_irq(&ls_dev->dev, irq->irq_number, &ls_dev->dev); -+ } -+} -+ -+static int register_dpio_irq_handlers(struct fsl_mc_device *ls_dev, int cpu) -+{ -+ struct dpio_priv *priv; -+ unsigned int i; -+ int error; -+ struct fsl_mc_device_irq *irq; -+ unsigned int num_irq_handlers_registered = 0; -+ int irq_count = ls_dev->obj_desc.irq_count; -+ cpumask_t mask; -+ -+ priv = dev_get_drvdata(&ls_dev->dev); -+ -+ if (WARN_ON(irq_count != 1)) -+ return -EINVAL; -+ -+ for (i = 0; i < irq_count; i++) { -+ irq = ls_dev->irqs[i]; -+ error = devm_request_irq(&ls_dev->dev, -+ irq->irq_number, -+ dpio_irq_handler, -+ 0, -+ priv->irq_name, -+ &ls_dev->dev); -+ if (error < 0) { -+ dev_err(&ls_dev->dev, -+ "devm_request_irq() failed: %d\n", -+ error); -+ goto error_unregister_irq_handlers; -+ } -+ -+ /* Set the IRQ affinity */ -+ cpumask_clear(&mask); -+ cpumask_set_cpu(cpu, &mask); -+ if (irq_set_affinity(irq->irq_number, &mask)) -+ pr_err("irq_set_affinity failed irq %d cpu %d\n", -+ irq->irq_number, cpu); -+ -+ num_irq_handlers_registered++; -+ } -+ -+ return 0; -+ -+error_unregister_irq_handlers: -+ for (i = 0; i < num_irq_handlers_registered; i++) { -+ irq = ls_dev->irqs[i]; -+ devm_free_irq(&ls_dev->dev, irq->irq_number, -+ &ls_dev->dev); -+ } -+ -+ return error; -+} -+ -+static int __cold -+dpaa2_dpio_probe(struct fsl_mc_device *ls_dev) -+{ -+ struct dpio_attr dpio_attrs; -+ struct dpaa2_io_desc desc; -+ struct dpio_priv *priv; -+ int err = -ENOMEM; -+ struct device *dev = &ls_dev->dev; -+ struct dpaa2_io *defservice; -+ bool irq_allocated = false; -+ static int next_cpu; -+ -+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); -+ if (!priv) -+ goto err_priv_alloc; -+ -+ dev_set_drvdata(dev, priv); -+ -+ err = fsl_mc_portal_allocate(ls_dev, 0, &ls_dev->mc_io); -+ if (err) { -+ dev_err(dev, "MC portal allocation failed\n"); -+ err = -EPROBE_DEFER; -+ goto err_mcportal; -+ } -+ -+ err = dpio_open(ls_dev->mc_io, 0, ls_dev->obj_desc.id, -+ &ls_dev->mc_handle); -+ if (err) { -+ dev_err(dev, "dpio_open() failed\n"); -+ goto err_open; -+ } -+ -+ err = dpio_get_attributes(ls_dev->mc_io, 0, ls_dev->mc_handle, -+ &dpio_attrs); -+ if (err) { -+ dev_err(dev, "dpio_get_attributes() failed %d\n", err); -+ goto err_get_attr; -+ } -+ err = dpio_enable(ls_dev->mc_io, 0, ls_dev->mc_handle); -+ if (err) { -+ dev_err(dev, "dpio_enable() failed %d\n", err); -+ goto err_get_attr; -+ } -+ pr_info("ce_paddr=0x%llx, ci_paddr=0x%llx, portalid=%d, prios=%d\n", -+ ls_dev->regions[0].start, -+ ls_dev->regions[1].start, -+ dpio_attrs.qbman_portal_id, -+ dpio_attrs.num_priorities); -+ -+ pr_info("ce_size=0x%llx, ci_size=0x%llx\n", -+ resource_size(&ls_dev->regions[0]), -+ resource_size(&ls_dev->regions[1])); -+ -+ desc.qman_version = dpio_attrs.qbman_version; -+ /* Build DPIO driver object out of raw MC object */ -+ desc.receives_notifications = dpio_attrs.num_priorities ? 1 : 0; -+ desc.has_irq = 1; -+ desc.will_poll = 1; -+ desc.has_8prio = dpio_attrs.num_priorities == 8 ? 1 : 0; -+ desc.cpu = next_cpu; -+ desc.stash_affinity = next_cpu; -+ next_cpu = (next_cpu + 1) % num_active_cpus(); -+ desc.dpio_id = ls_dev->obj_desc.id; -+ desc.regs_cena = ioremap_cache_ns(ls_dev->regions[0].start, -+ resource_size(&ls_dev->regions[0])); -+ desc.regs_cinh = ioremap(ls_dev->regions[1].start, -+ resource_size(&ls_dev->regions[1])); -+ -+ err = fsl_mc_allocate_irqs(ls_dev); -+ if (err) { -+ dev_err(dev, "DPIO fsl_mc_allocate_irqs failed\n"); -+ desc.has_irq = 0; -+ } else { -+ irq_allocated = true; -+ -+ snprintf(priv->irq_name, MAX_DPIO_IRQ_NAME, "FSL DPIO %d", -+ desc.dpio_id); -+ -+ err = register_dpio_irq_handlers(ls_dev, desc.cpu); -+ if (err) -+ desc.has_irq = 0; -+ } -+ -+ priv->io = dpaa2_io_create(&desc); -+ if (!priv->io) { -+ dev_err(dev, "DPIO setup failed\n"); -+ goto err_dpaa2_io_create; -+ } -+ -+ /* If no irq then go to poll mode */ -+ if (desc.has_irq == 0) { -+ dev_info(dev, "Using polling mode for DPIO %d\n", -+ desc.dpio_id); -+ /* goto err_register_dpio_irq; */ -+ /* TEMP: Start polling if IRQ could not -+ be registered. This will go away once -+ KVM support for MSI is present */ -+ if (irq_allocated == true) -+ fsl_mc_free_irqs(ls_dev); -+ -+ if (desc.stash_affinity) -+ priv->thread = kthread_create_on_cpu(dpio_thread, -+ priv->io, -+ desc.cpu, -+ "dpio_aff%u"); -+ else -+ priv->thread = -+ kthread_create(dpio_thread, -+ priv->io, -+ "dpio_non%u", -+ dpio_attrs.qbman_portal_id); -+ if (IS_ERR(priv->thread)) { -+ dev_err(dev, "DPIO thread failure\n"); -+ err = PTR_ERR(priv->thread); -+ goto err_dpaa_thread; -+ } -+ wake_up_process(priv->thread); -+ } -+ -+ defservice = dpaa2_io_default_service(); -+ err = dpaa2_io_service_add(defservice, priv->io); -+ dpaa2_io_down(defservice); -+ if (err) { -+ dev_err(dev, "DPIO add-to-service failed\n"); -+ goto err_dpaa2_io_add; -+ } -+ -+ dev_info(dev, "dpio: probed object %d\n", ls_dev->obj_desc.id); -+ dev_info(dev, " receives_notifications = %d\n", -+ desc.receives_notifications); -+ dev_info(dev, " has_irq = %d\n", desc.has_irq); -+ dpio_close(ls_dev->mc_io, 0, ls_dev->mc_handle); -+ fsl_mc_portal_free(ls_dev->mc_io); -+ return 0; -+ -+err_dpaa2_io_add: -+ unregister_dpio_irq_handlers(ls_dev); -+/* TEMP: To be restored once polling is removed -+ err_register_dpio_irq: -+ fsl_mc_free_irqs(ls_dev); -+*/ -+err_dpaa_thread: -+err_dpaa2_io_create: -+ dpio_disable(ls_dev->mc_io, 0, ls_dev->mc_handle); -+err_get_attr: -+ dpio_close(ls_dev->mc_io, 0, ls_dev->mc_handle); -+err_open: -+ fsl_mc_portal_free(ls_dev->mc_io); -+err_mcportal: -+ dev_set_drvdata(dev, NULL); -+ devm_kfree(dev, priv); -+err_priv_alloc: -+ return err; -+} -+ -+/* -+ * Tear down interrupts for a given DPIO object -+ */ -+static void dpio_teardown_irqs(struct fsl_mc_device *ls_dev) -+{ -+ /* (void)disable_dpio_irqs(ls_dev); */ -+ unregister_dpio_irq_handlers(ls_dev); -+ fsl_mc_free_irqs(ls_dev); -+} -+ -+static int __cold -+dpaa2_dpio_remove(struct fsl_mc_device *ls_dev) -+{ -+ struct device *dev; -+ struct dpio_priv *priv; -+ int err; -+ -+ dev = &ls_dev->dev; -+ priv = dev_get_drvdata(dev); -+ -+ /* there is no implementation yet for pulling a DPIO object out of a -+ * running service (and they're currently always running). -+ */ -+ dev_crit(dev, "DPIO unplugging is broken, the service holds onto it\n"); -+ -+ if (priv->thread) -+ kthread_stop(priv->thread); -+ else -+ dpio_teardown_irqs(ls_dev); -+ -+ err = fsl_mc_portal_allocate(ls_dev, 0, &ls_dev->mc_io); -+ if (err) { -+ dev_err(dev, "MC portal allocation failed\n"); -+ goto err_mcportal; -+ } -+ -+ err = dpio_open(ls_dev->mc_io, 0, ls_dev->obj_desc.id, -+ &ls_dev->mc_handle); -+ if (err) { -+ dev_err(dev, "dpio_open() failed\n"); -+ goto err_open; -+ } -+ -+ dev_set_drvdata(dev, NULL); -+ dpaa2_io_down(priv->io); -+ -+ err = 0; -+ -+ dpio_disable(ls_dev->mc_io, 0, ls_dev->mc_handle); -+ dpio_close(ls_dev->mc_io, 0, ls_dev->mc_handle); -+err_open: -+ fsl_mc_portal_free(ls_dev->mc_io); -+err_mcportal: -+ return err; -+} -+ -+static const struct fsl_mc_device_match_id dpaa2_dpio_match_id_table[] = { -+ { -+ .vendor = FSL_MC_VENDOR_FREESCALE, -+ .obj_type = "dpio", -+ .ver_major = DPIO_VER_MAJOR, -+ .ver_minor = DPIO_VER_MINOR -+ }, -+ { .vendor = 0x0 } -+}; -+ -+static struct fsl_mc_driver dpaa2_dpio_driver = { -+ .driver = { -+ .name = KBUILD_MODNAME, -+ .owner = THIS_MODULE, -+ }, -+ .probe = dpaa2_dpio_probe, -+ .remove = dpaa2_dpio_remove, -+ .match_id_table = dpaa2_dpio_match_id_table -+}; -+ -+static int dpio_driver_init(void) -+{ -+ int err; -+ -+ err = dpaa2_io_service_driver_init(); -+ if (!err) { -+ err = fsl_mc_driver_register(&dpaa2_dpio_driver); -+ if (err) -+ dpaa2_io_service_driver_exit(); -+ } -+ return err; -+} -+static void dpio_driver_exit(void) -+{ -+ fsl_mc_driver_unregister(&dpaa2_dpio_driver); -+ dpaa2_io_service_driver_exit(); -+} -+module_init(dpio_driver_init); -+module_exit(dpio_driver_exit); -diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio-drv.h b/drivers/staging/fsl-mc/bus/dpio/dpio-drv.h -new file mode 100644 -index 0000000..fe8d40b ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/dpio-drv.h -@@ -0,0 +1,33 @@ -+/* Copyright 2014 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+int dpaa2_io_service_driver_init(void); -+void dpaa2_io_service_driver_exit(void); -diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio.c b/drivers/staging/fsl-mc/bus/dpio/dpio.c -new file mode 100644 -index 0000000..b63edd6 ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/dpio.c -@@ -0,0 +1,468 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include "../../include/mc-sys.h" -+#include "../../include/mc-cmd.h" -+#include "fsl_dpio.h" -+#include "fsl_dpio_cmd.h" -+ -+int dpio_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpio_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ DPIO_CMD_OPEN(cmd, dpio_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpio_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CLOSE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpio_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ DPIO_CMD_CREATE(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpio_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_ENABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_DISABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_IS_ENABLED, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPIO_RSP_IS_ENABLED(cmd, *en); -+ -+ return 0; -+} -+ -+int dpio_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_RESET, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpio_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ DPIO_CMD_SET_IRQ(cmd, irq_index, irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpio_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ DPIO_CMD_GET_IRQ(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPIO_RSP_GET_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dpio_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPIO_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPIO_CMD_GET_IRQ_ENABLE(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPIO_RSP_GET_IRQ_ENABLE(cmd, *en); -+ -+ return 0; -+} -+ -+int dpio_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPIO_CMD_SET_IRQ_MASK(cmd, irq_index, mask); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPIO_CMD_GET_IRQ_MASK(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPIO_RSP_GET_IRQ_MASK(cmd, *mask); -+ -+ return 0; -+} -+ -+int dpio_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPIO_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPIO_RSP_GET_IRQ_STATUS(cmd, *status); -+ -+ return 0; -+} -+ -+int dpio_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPIO_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpio_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPIO_RSP_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpio_set_stashing_destination(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t sdest) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_STASHING_DEST, -+ cmd_flags, -+ token); -+ DPIO_CMD_SET_STASHING_DEST(cmd, sdest); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_get_stashing_destination(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t *sdest) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_STASHING_DEST, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPIO_RSP_GET_STASHING_DEST(cmd, *sdest); -+ -+ return 0; -+} -+ -+int dpio_add_static_dequeue_channel(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpcon_id, -+ uint8_t *channel_index) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_ADD_STATIC_DEQUEUE_CHANNEL, -+ cmd_flags, -+ token); -+ DPIO_CMD_ADD_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPIO_RSP_ADD_STATIC_DEQUEUE_CHANNEL(cmd, *channel_index); -+ -+ return 0; -+} -+ -+int dpio_remove_static_dequeue_channel(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpcon_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header( -+ DPIO_CMDID_REMOVE_STATIC_DEQUEUE_CHANNEL, -+ cmd_flags, -+ token); -+ DPIO_CMD_REMOVE_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio_service.c b/drivers/staging/fsl-mc/bus/dpio/dpio_service.c -new file mode 100644 -index 0000000..ebcfd59 ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/dpio_service.c -@@ -0,0 +1,801 @@ -+/* Copyright 2014 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include "fsl_qbman_portal.h" -+#include "../../include/mc.h" -+#include "../../include/fsl_dpaa2_io.h" -+#include "fsl_dpio.h" -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "dpio-drv.h" -+#include "qbman_debug.h" -+ -+#define UNIMPLEMENTED() pr_err("FOO: %s unimplemented!\n", __func__) -+ -+#define MAGIC_SERVICE 0xabcd9876 -+#define MAGIC_OBJECT 0x1234fedc -+ -+struct dpaa2_io { -+ /* If MAGIC_SERVICE, this is a group of objects, use the 'service' part -+ * of the union. If MAGIC_OBJECT, use the 'object' part of the union. If -+ * it's neither, something got corrupted. This is mainly to satisfy -+ * dpaa2_io_from_registration(), which dereferences a caller- -+ * instantiated struct and so warrants a bug-checking step - hence the -+ * magic rather than a boolean. -+ */ -+ unsigned int magic; -+ atomic_t refs; -+ union { -+ struct dpaa2_io_service { -+ spinlock_t lock; -+ struct list_head list; -+ /* for targeted dpaa2_io selection */ -+ struct dpaa2_io *objects_by_cpu[NR_CPUS]; -+ cpumask_t cpus_notifications; -+ cpumask_t cpus_stashing; -+ int has_nonaffine; -+ /* slight hack. record the special case of the -+ * "default service", because that's the case where we -+ * need to avoid a kfree() ... */ -+ int is_defservice; -+ } service; -+ struct dpaa2_io_object { -+ struct dpaa2_io_desc dpio_desc; -+ struct qbman_swp_desc swp_desc; -+ struct qbman_swp *swp; -+ /* If the object is part of a service, this is it (and -+ * 'node' is linked into the service's list) */ -+ struct dpaa2_io *service; -+ struct list_head node; -+ /* Interrupt mask, as used with -+ * qbman_swp_interrupt_[gs]et_vanish(). This isn't -+ * locked, because the higher layer is driving all -+ * "ingress" processing. */ -+ uint32_t irq_mask; -+ /* As part of simplifying assumptions, we provide an -+ * irq-safe lock for each type of DPIO operation that -+ * isn't innately lockless. The selection algorithms -+ * (which are simplified) require this, whereas -+ * eventually adherence to cpu-affinity will presumably -+ * relax the locking requirements. */ -+ spinlock_t lock_mgmt_cmd; -+ spinlock_t lock_notifications; -+ struct list_head notifications; -+ } object; -+ }; -+}; -+ -+struct dpaa2_io_store { -+ unsigned int max; -+ dma_addr_t paddr; -+ struct dpaa2_dq *vaddr; -+ void *alloced_addr; /* the actual return from kmalloc as it may -+ be adjusted for alignment purposes */ -+ unsigned int idx; /* position of the next-to-be-returned entry */ -+ struct qbman_swp *swp; /* portal used to issue VDQCR */ -+ struct device *dev; /* device used for DMA mapping */ -+}; -+ -+static struct dpaa2_io def_serv; -+ -+/**********************/ -+/* Internal functions */ -+/**********************/ -+ -+static void service_init(struct dpaa2_io *d, int is_defservice) -+{ -+ struct dpaa2_io_service *s = &d->service; -+ -+ d->magic = MAGIC_SERVICE; -+ atomic_set(&d->refs, 1); -+ spin_lock_init(&s->lock); -+ INIT_LIST_HEAD(&s->list); -+ cpumask_clear(&s->cpus_notifications); -+ cpumask_clear(&s->cpus_stashing); -+ s->has_nonaffine = 0; -+ s->is_defservice = is_defservice; -+} -+ -+/* Selection algorithms, stupid ones at that. These are to handle the case where -+ * the given dpaa2_io is a service, by choosing the non-service dpaa2_io within -+ * it to use. -+ */ -+static struct dpaa2_io *_service_select_by_cpu_slow(struct dpaa2_io_service *ss, -+ int cpu) -+{ -+ struct dpaa2_io *o; -+ unsigned long irqflags; -+ -+ spin_lock_irqsave(&ss->lock, irqflags); -+ /* TODO: this is about the dumbest and slowest selection algorithm you -+ * could imagine. (We're looking for something working first, and -+ * something efficient second...) -+ */ -+ list_for_each_entry(o, &ss->list, object.node) -+ if (o->object.dpio_desc.cpu == cpu) -+ goto found; -+ -+ /* No joy. Try the first nonaffine portal (bleurgh) */ -+ if (ss->has_nonaffine) -+ list_for_each_entry(o, &ss->list, object.node) -+ if (!o->object.dpio_desc.stash_affinity) -+ goto found; -+ -+ /* No joy. Try the first object. Told you it was horrible. */ -+ if (!list_empty(&ss->list)) -+ o = list_entry(ss->list.next, struct dpaa2_io, object.node); -+ else -+ o = NULL; -+ -+found: -+ spin_unlock_irqrestore(&ss->lock, irqflags); -+ return o; -+} -+ -+static struct dpaa2_io *service_select_by_cpu(struct dpaa2_io *d, int cpu) -+{ -+ struct dpaa2_io_service *ss; -+ unsigned long irqflags; -+ -+ if (!d) -+ d = &def_serv; -+ else if (d->magic == MAGIC_OBJECT) -+ return d; -+ BUG_ON(d->magic != MAGIC_SERVICE); -+ -+ ss = &d->service; -+ -+ /* If cpu==-1, choose the current cpu, with no guarantees about -+ * potentially being migrated away. -+ */ -+ if (unlikely(cpu < 0)) { -+ spin_lock_irqsave(&ss->lock, irqflags); -+ cpu = smp_processor_id(); -+ spin_unlock_irqrestore(&ss->lock, irqflags); -+ -+ return _service_select_by_cpu_slow(ss, cpu); -+ } -+ -+ /* If a specific cpu was requested, pick it up immediately */ -+ return ss->objects_by_cpu[cpu]; -+} -+ -+static inline struct dpaa2_io *service_select_any(struct dpaa2_io *d) -+{ -+ struct dpaa2_io_service *ss; -+ struct dpaa2_io *o; -+ unsigned long irqflags; -+ -+ if (!d) -+ d = &def_serv; -+ else if (d->magic == MAGIC_OBJECT) -+ return d; -+ BUG_ON(d->magic != MAGIC_SERVICE); -+ -+ /* -+ * Lock the service, looking for the first DPIO object in the list, -+ * ignore everything else about that DPIO, and choose it to do the -+ * operation! As a post-selection step, move the DPIO to the end of -+ * the list. It should improve load-balancing a little, although it -+ * might also incur a performance hit, given that the lock is *global* -+ * and this may be called on the fast-path... -+ */ -+ ss = &d->service; -+ spin_lock_irqsave(&ss->lock, irqflags); -+ if (!list_empty(&ss->list)) { -+ o = list_entry(ss->list.next, struct dpaa2_io, object.node); -+ list_del(&o->object.node); -+ list_add_tail(&o->object.node, &ss->list); -+ } else -+ o = NULL; -+ spin_unlock_irqrestore(&ss->lock, irqflags); -+ return o; -+} -+ -+/* If the context is not preemptible, select the service affine to the -+ * current cpu. Otherwise, "select any". -+ */ -+static inline struct dpaa2_io *_service_select(struct dpaa2_io *d) -+{ -+ struct dpaa2_io *temp = d; -+ -+ if (likely(!preemptible())) { -+ d = service_select_by_cpu(d, smp_processor_id()); -+ if (likely(d)) -+ return d; -+ } -+ return service_select_any(temp); -+} -+ -+/**********************/ -+/* Exported functions */ -+/**********************/ -+ -+struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc) -+{ -+ struct dpaa2_io *ret = kmalloc(sizeof(*ret), GFP_KERNEL); -+ struct dpaa2_io_object *o = &ret->object; -+ -+ if (!ret) -+ return NULL; -+ ret->magic = MAGIC_OBJECT; -+ atomic_set(&ret->refs, 1); -+ o->dpio_desc = *desc; -+ o->swp_desc.cena_bar = o->dpio_desc.regs_cena; -+ o->swp_desc.cinh_bar = o->dpio_desc.regs_cinh; -+ o->swp_desc.qman_version = o->dpio_desc.qman_version; -+ o->swp = qbman_swp_init(&o->swp_desc); -+ o->service = NULL; -+ if (!o->swp) { -+ kfree(ret); -+ return NULL; -+ } -+ INIT_LIST_HEAD(&o->node); -+ spin_lock_init(&o->lock_mgmt_cmd); -+ spin_lock_init(&o->lock_notifications); -+ INIT_LIST_HEAD(&o->notifications); -+ if (!o->dpio_desc.has_irq) -+ qbman_swp_interrupt_set_vanish(o->swp, 0xffffffff); -+ else { -+ /* For now only enable DQRR interrupts */ -+ qbman_swp_interrupt_set_trigger(o->swp, -+ QBMAN_SWP_INTERRUPT_DQRI); -+ } -+ qbman_swp_interrupt_clear_status(o->swp, 0xffffffff); -+ if (o->dpio_desc.receives_notifications) -+ qbman_swp_push_set(o->swp, 0, 1); -+ return ret; -+} -+EXPORT_SYMBOL(dpaa2_io_create); -+ -+struct dpaa2_io *dpaa2_io_create_service(void) -+{ -+ struct dpaa2_io *ret = kmalloc(sizeof(*ret), GFP_KERNEL); -+ -+ if (ret) -+ service_init(ret, 0); -+ return ret; -+} -+EXPORT_SYMBOL(dpaa2_io_create_service); -+ -+struct dpaa2_io *dpaa2_io_default_service(void) -+{ -+ atomic_inc(&def_serv.refs); -+ return &def_serv; -+} -+EXPORT_SYMBOL(dpaa2_io_default_service); -+ -+void dpaa2_io_down(struct dpaa2_io *d) -+{ -+ if (!atomic_dec_and_test(&d->refs)) -+ return; -+ if (d->magic == MAGIC_SERVICE) { -+ BUG_ON(!list_empty(&d->service.list)); -+ if (d->service.is_defservice) -+ /* avoid the kfree()! */ -+ return; -+ } else { -+ BUG_ON(d->magic != MAGIC_OBJECT); -+ BUG_ON(d->object.service); -+ BUG_ON(!list_empty(&d->object.notifications)); -+ } -+ kfree(d); -+} -+EXPORT_SYMBOL(dpaa2_io_down); -+ -+int dpaa2_io_service_add(struct dpaa2_io *s, struct dpaa2_io *o) -+{ -+ struct dpaa2_io_service *ss = &s->service; -+ struct dpaa2_io_object *oo = &o->object; -+ int res = -EINVAL; -+ -+ if ((s->magic != MAGIC_SERVICE) || (o->magic != MAGIC_OBJECT)) -+ return res; -+ atomic_inc(&o->refs); -+ atomic_inc(&s->refs); -+ spin_lock(&ss->lock); -+ /* 'obj' must not already be associated with a service */ -+ if (!oo->service) { -+ oo->service = s; -+ list_add(&oo->node, &ss->list); -+ if (oo->dpio_desc.receives_notifications) { -+ cpumask_set_cpu(oo->dpio_desc.cpu, -+ &ss->cpus_notifications); -+ /* Update the fast-access array */ -+ ss->objects_by_cpu[oo->dpio_desc.cpu] = -+ container_of(oo, struct dpaa2_io, object); -+ } -+ if (oo->dpio_desc.stash_affinity) -+ cpumask_set_cpu(oo->dpio_desc.cpu, -+ &ss->cpus_stashing); -+ if (!oo->dpio_desc.stash_affinity) -+ ss->has_nonaffine = 1; -+ /* success */ -+ res = 0; -+ } -+ spin_unlock(&ss->lock); -+ if (res) { -+ dpaa2_io_down(s); -+ dpaa2_io_down(o); -+ } -+ return res; -+} -+EXPORT_SYMBOL(dpaa2_io_service_add); -+ -+int dpaa2_io_get_descriptor(struct dpaa2_io *obj, struct dpaa2_io_desc *desc) -+{ -+ if (obj->magic == MAGIC_SERVICE) -+ return -EINVAL; -+ BUG_ON(obj->magic != MAGIC_OBJECT); -+ *desc = obj->object.dpio_desc; -+ return 0; -+} -+EXPORT_SYMBOL(dpaa2_io_get_descriptor); -+ -+#define DPAA_POLL_MAX 32 -+ -+int dpaa2_io_poll(struct dpaa2_io *obj) -+{ -+ const struct dpaa2_dq *dq; -+ struct qbman_swp *swp; -+ int max = 0; -+ -+ if (obj->magic != MAGIC_OBJECT) -+ return -EINVAL; -+ swp = obj->object.swp; -+ dq = qbman_swp_dqrr_next(swp); -+ while (dq) { -+ if (qbman_result_is_SCN(dq)) { -+ struct dpaa2_io_notification_ctx *ctx; -+ uint64_t q64; -+ -+ q64 = qbman_result_SCN_ctx(dq); -+ ctx = (void *)q64; -+ ctx->cb(ctx); -+ } else -+ pr_crit("Unrecognised/ignored DQRR entry\n"); -+ qbman_swp_dqrr_consume(swp, dq); -+ ++max; -+ if (max > DPAA_POLL_MAX) -+ return 0; -+ dq = qbman_swp_dqrr_next(swp); -+ } -+ return 0; -+} -+EXPORT_SYMBOL(dpaa2_io_poll); -+ -+int dpaa2_io_irq(struct dpaa2_io *obj) -+{ -+ struct qbman_swp *swp; -+ uint32_t status; -+ -+ if (obj->magic != MAGIC_OBJECT) -+ return -EINVAL; -+ swp = obj->object.swp; -+ status = qbman_swp_interrupt_read_status(swp); -+ if (!status) -+ return IRQ_NONE; -+ dpaa2_io_poll(obj); -+ qbman_swp_interrupt_clear_status(swp, status); -+ qbman_swp_interrupt_set_inhibit(swp, 0); -+ return IRQ_HANDLED; -+} -+EXPORT_SYMBOL(dpaa2_io_irq); -+ -+int dpaa2_io_pause_poll(struct dpaa2_io *obj) -+{ -+ UNIMPLEMENTED(); -+ return -EINVAL; -+} -+EXPORT_SYMBOL(dpaa2_io_pause_poll); -+ -+int dpaa2_io_resume_poll(struct dpaa2_io *obj) -+{ -+ UNIMPLEMENTED(); -+ return -EINVAL; -+} -+EXPORT_SYMBOL(dpaa2_io_resume_poll); -+ -+void dpaa2_io_service_notifications(struct dpaa2_io *s, cpumask_t *mask) -+{ -+ struct dpaa2_io_service *ss = &s->service; -+ -+ BUG_ON(s->magic != MAGIC_SERVICE); -+ cpumask_copy(mask, &ss->cpus_notifications); -+} -+EXPORT_SYMBOL(dpaa2_io_service_notifications); -+ -+void dpaa2_io_service_stashing(struct dpaa2_io *s, cpumask_t *mask) -+{ -+ struct dpaa2_io_service *ss = &s->service; -+ -+ BUG_ON(s->magic != MAGIC_SERVICE); -+ cpumask_copy(mask, &ss->cpus_stashing); -+} -+EXPORT_SYMBOL(dpaa2_io_service_stashing); -+ -+int dpaa2_io_service_has_nonaffine(struct dpaa2_io *s) -+{ -+ struct dpaa2_io_service *ss = &s->service; -+ -+ BUG_ON(s->magic != MAGIC_SERVICE); -+ return ss->has_nonaffine; -+} -+EXPORT_SYMBOL(dpaa2_io_service_has_nonaffine); -+ -+int dpaa2_io_service_register(struct dpaa2_io *d, -+ struct dpaa2_io_notification_ctx *ctx) -+{ -+ unsigned long irqflags; -+ -+ d = service_select_by_cpu(d, ctx->desired_cpu); -+ if (!d) -+ return -ENODEV; -+ ctx->dpio_id = d->object.dpio_desc.dpio_id; -+ ctx->qman64 = (uint64_t)ctx; -+ ctx->dpio_private = d; -+ spin_lock_irqsave(&d->object.lock_notifications, irqflags); -+ list_add(&ctx->node, &d->object.notifications); -+ spin_unlock_irqrestore(&d->object.lock_notifications, irqflags); -+ if (ctx->is_cdan) -+ /* Enable the generation of CDAN notifications */ -+ qbman_swp_CDAN_set_context_enable(d->object.swp, -+ (uint16_t)ctx->id, -+ ctx->qman64); -+ return 0; -+} -+EXPORT_SYMBOL(dpaa2_io_service_register); -+ -+int dpaa2_io_service_deregister(struct dpaa2_io *service, -+ struct dpaa2_io_notification_ctx *ctx) -+{ -+ struct dpaa2_io *d = ctx->dpio_private; -+ unsigned long irqflags; -+ -+ if (!service) -+ service = &def_serv; -+ BUG_ON((service != d) && (service != d->object.service)); -+ if (ctx->is_cdan) -+ qbman_swp_CDAN_disable(d->object.swp, -+ (uint16_t)ctx->id); -+ spin_lock_irqsave(&d->object.lock_notifications, irqflags); -+ list_del(&ctx->node); -+ spin_unlock_irqrestore(&d->object.lock_notifications, irqflags); -+ return 0; -+} -+EXPORT_SYMBOL(dpaa2_io_service_deregister); -+ -+int dpaa2_io_service_rearm(struct dpaa2_io *d, -+ struct dpaa2_io_notification_ctx *ctx) -+{ -+ unsigned long irqflags; -+ int err; -+ -+ d = _service_select(d); -+ if (!d) -+ return -ENODEV; -+ spin_lock_irqsave(&d->object.lock_mgmt_cmd, irqflags); -+ if (ctx->is_cdan) -+ err = qbman_swp_CDAN_enable(d->object.swp, (uint16_t)ctx->id); -+ else -+ err = qbman_swp_fq_schedule(d->object.swp, ctx->id); -+ spin_unlock_irqrestore(&d->object.lock_mgmt_cmd, irqflags); -+ return err; -+} -+EXPORT_SYMBOL(dpaa2_io_service_rearm); -+ -+int dpaa2_io_from_registration(struct dpaa2_io_notification_ctx *ctx, -+ struct dpaa2_io **io) -+{ -+ struct dpaa2_io_notification_ctx *tmp; -+ struct dpaa2_io *d = ctx->dpio_private; -+ unsigned long irqflags; -+ int ret = 0; -+ -+ BUG_ON(d->magic != MAGIC_OBJECT); -+ /* Iterate the notifications associated with 'd' looking for a match. If -+ * not, we've been passed an unregistered ctx! */ -+ spin_lock_irqsave(&d->object.lock_notifications, irqflags); -+ list_for_each_entry(tmp, &d->object.notifications, node) -+ if (tmp == ctx) -+ goto found; -+ ret = -EINVAL; -+found: -+ spin_unlock_irqrestore(&d->object.lock_notifications, irqflags); -+ if (!ret) { -+ atomic_inc(&d->refs); -+ *io = d; -+ } -+ return ret; -+} -+EXPORT_SYMBOL(dpaa2_io_from_registration); -+ -+int dpaa2_io_service_get_persistent(struct dpaa2_io *service, int cpu, -+ struct dpaa2_io **ret) -+{ -+ if (cpu == -1) -+ *ret = service_select_any(service); -+ else -+ *ret = service_select_by_cpu(service, cpu); -+ if (*ret) { -+ atomic_inc(&(*ret)->refs); -+ return 0; -+ } -+ return -ENODEV; -+} -+EXPORT_SYMBOL(dpaa2_io_service_get_persistent); -+ -+int dpaa2_io_service_pull_fq(struct dpaa2_io *d, uint32_t fqid, -+ struct dpaa2_io_store *s) -+{ -+ struct qbman_pull_desc pd; -+ int err; -+ -+ qbman_pull_desc_clear(&pd); -+ qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1); -+ qbman_pull_desc_set_numframes(&pd, (uint8_t)s->max); -+ qbman_pull_desc_set_fq(&pd, fqid); -+ d = _service_select(d); -+ if (!d) -+ return -ENODEV; -+ s->swp = d->object.swp; -+ err = qbman_swp_pull(d->object.swp, &pd); -+ if (err) -+ s->swp = NULL; -+ return err; -+} -+EXPORT_SYMBOL(dpaa2_io_service_pull_fq); -+ -+int dpaa2_io_service_pull_channel(struct dpaa2_io *d, uint32_t channelid, -+ struct dpaa2_io_store *s) -+{ -+ struct qbman_pull_desc pd; -+ int err; -+ -+ qbman_pull_desc_clear(&pd); -+ qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1); -+ qbman_pull_desc_set_numframes(&pd, (uint8_t)s->max); -+ qbman_pull_desc_set_channel(&pd, channelid, qbman_pull_type_prio); -+ d = _service_select(d); -+ if (!d) -+ return -ENODEV; -+ s->swp = d->object.swp; -+ err = qbman_swp_pull(d->object.swp, &pd); -+ if (err) -+ s->swp = NULL; -+ return err; -+} -+EXPORT_SYMBOL(dpaa2_io_service_pull_channel); -+ -+int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d, -+ uint32_t fqid, -+ const struct dpaa2_fd *fd) -+{ -+ struct qbman_eq_desc ed; -+ -+ d = _service_select(d); -+ if (!d) -+ return -ENODEV; -+ qbman_eq_desc_clear(&ed); -+ qbman_eq_desc_set_no_orp(&ed, 0); -+ qbman_eq_desc_set_fq(&ed, fqid); -+ return qbman_swp_enqueue(d->object.swp, &ed, -+ (const struct qbman_fd *)fd); -+} -+EXPORT_SYMBOL(dpaa2_io_service_enqueue_fq); -+ -+int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d, -+ uint32_t qdid, uint8_t prio, uint16_t qdbin, -+ const struct dpaa2_fd *fd) -+{ -+ struct qbman_eq_desc ed; -+ -+ d = _service_select(d); -+ if (!d) -+ return -ENODEV; -+ qbman_eq_desc_clear(&ed); -+ qbman_eq_desc_set_no_orp(&ed, 0); -+ qbman_eq_desc_set_qd(&ed, qdid, qdbin, prio); -+ return qbman_swp_enqueue(d->object.swp, &ed, -+ (const struct qbman_fd *)fd); -+} -+EXPORT_SYMBOL(dpaa2_io_service_enqueue_qd); -+ -+int dpaa2_io_service_release(struct dpaa2_io *d, -+ uint32_t bpid, -+ const uint64_t *buffers, -+ unsigned int num_buffers) -+{ -+ struct qbman_release_desc rd; -+ -+ d = _service_select(d); -+ if (!d) -+ return -ENODEV; -+ qbman_release_desc_clear(&rd); -+ qbman_release_desc_set_bpid(&rd, bpid); -+ return qbman_swp_release(d->object.swp, &rd, buffers, num_buffers); -+} -+EXPORT_SYMBOL(dpaa2_io_service_release); -+ -+int dpaa2_io_service_acquire(struct dpaa2_io *d, -+ uint32_t bpid, -+ uint64_t *buffers, -+ unsigned int num_buffers) -+{ -+ unsigned long irqflags; -+ int err; -+ -+ d = _service_select(d); -+ if (!d) -+ return -ENODEV; -+ spin_lock_irqsave(&d->object.lock_mgmt_cmd, irqflags); -+ err = qbman_swp_acquire(d->object.swp, bpid, buffers, num_buffers); -+ spin_unlock_irqrestore(&d->object.lock_mgmt_cmd, irqflags); -+ return err; -+} -+EXPORT_SYMBOL(dpaa2_io_service_acquire); -+ -+struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames, -+ struct device *dev) -+{ -+ struct dpaa2_io_store *ret = kmalloc(sizeof(*ret), GFP_KERNEL); -+ size_t size; -+ -+ BUG_ON(!max_frames || (max_frames > 16)); -+ if (!ret) -+ return NULL; -+ ret->max = max_frames; -+ size = max_frames * sizeof(struct dpaa2_dq) + 64; -+ ret->alloced_addr = kmalloc(size, GFP_KERNEL); -+ if (!ret->alloced_addr) { -+ kfree(ret); -+ return NULL; -+ } -+ ret->vaddr = PTR_ALIGN(ret->alloced_addr, 64); -+ ret->paddr = dma_map_single(dev, ret->vaddr, -+ sizeof(struct dpaa2_dq) * max_frames, -+ DMA_FROM_DEVICE); -+ if (dma_mapping_error(dev, ret->paddr)) { -+ kfree(ret->alloced_addr); -+ kfree(ret); -+ return NULL; -+ } -+ ret->idx = 0; -+ ret->dev = dev; -+ return ret; -+} -+EXPORT_SYMBOL(dpaa2_io_store_create); -+ -+void dpaa2_io_store_destroy(struct dpaa2_io_store *s) -+{ -+ dma_unmap_single(s->dev, s->paddr, sizeof(struct dpaa2_dq) * s->max, -+ DMA_FROM_DEVICE); -+ kfree(s->alloced_addr); -+ kfree(s); -+} -+EXPORT_SYMBOL(dpaa2_io_store_destroy); -+ -+struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last) -+{ -+ int match; -+ struct dpaa2_dq *ret = &s->vaddr[s->idx]; -+ -+ match = qbman_result_has_new_result(s->swp, ret); -+ if (!match) { -+ *is_last = 0; -+ return NULL; -+ } -+ BUG_ON(!qbman_result_is_DQ(ret)); -+ s->idx++; -+ if (dpaa2_dq_is_pull_complete(ret)) { -+ *is_last = 1; -+ s->idx = 0; -+ /* If we get an empty dequeue result to terminate a zero-results -+ * vdqcr, return NULL to the caller rather than expecting him to -+ * check non-NULL results every time. */ -+ if (!(dpaa2_dq_flags(ret) & DPAA2_DQ_STAT_VALIDFRAME)) -+ ret = NULL; -+ } else -+ *is_last = 0; -+ return ret; -+} -+EXPORT_SYMBOL(dpaa2_io_store_next); -+ -+#ifdef CONFIG_FSL_QBMAN_DEBUG -+int dpaa2_io_query_fq_count(struct dpaa2_io *d, uint32_t fqid, -+ uint32_t *fcnt, uint32_t *bcnt) -+{ -+ struct qbman_attr state; -+ struct qbman_swp *swp; -+ unsigned long irqflags; -+ int ret; -+ -+ d = service_select_any(d); -+ if (!d) -+ return -ENODEV; -+ -+ swp = d->object.swp; -+ spin_lock_irqsave(&d->object.lock_mgmt_cmd, irqflags); -+ ret = qbman_fq_query_state(swp, fqid, &state); -+ spin_unlock_irqrestore(&d->object.lock_mgmt_cmd, irqflags); -+ if (ret) -+ return ret; -+ *fcnt = qbman_fq_state_frame_count(&state); -+ *bcnt = qbman_fq_state_byte_count(&state); -+ -+ return 0; -+} -+EXPORT_SYMBOL(dpaa2_io_query_fq_count); -+ -+int dpaa2_io_query_bp_count(struct dpaa2_io *d, uint32_t bpid, -+ uint32_t *num) -+{ -+ struct qbman_attr state; -+ struct qbman_swp *swp; -+ unsigned long irqflags; -+ int ret; -+ -+ d = service_select_any(d); -+ if (!d) -+ return -ENODEV; -+ -+ swp = d->object.swp; -+ spin_lock_irqsave(&d->object.lock_mgmt_cmd, irqflags); -+ ret = qbman_bp_query(swp, bpid, &state); -+ spin_unlock_irqrestore(&d->object.lock_mgmt_cmd, irqflags); -+ if (ret) -+ return ret; -+ *num = qbman_bp_info_num_free_bufs(&state); -+ return 0; -+} -+EXPORT_SYMBOL(dpaa2_io_query_bp_count); -+ -+#endif -+ -+/* module init/exit hooks called from dpio-drv.c. These are declared in -+ * dpio-drv.h. -+ */ -+int dpaa2_io_service_driver_init(void) -+{ -+ service_init(&def_serv, 1); -+ return 0; -+} -+ -+void dpaa2_io_service_driver_exit(void) -+{ -+ if (atomic_read(&def_serv.refs) != 1) -+ pr_err("default DPIO service leaves dangling DPIO objects!\n"); -+} -diff --git a/drivers/staging/fsl-mc/bus/dpio/fsl_dpio.h b/drivers/staging/fsl-mc/bus/dpio/fsl_dpio.h -new file mode 100644 -index 0000000..88a492f ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/fsl_dpio.h -@@ -0,0 +1,460 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPIO_H -+#define __FSL_DPIO_H -+ -+/* Data Path I/O Portal API -+ * Contains initialization APIs and runtime control APIs for DPIO -+ */ -+ -+struct fsl_mc_io; -+ -+/** -+ * dpio_open() - Open a control session for the specified object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dpio_id: DPIO unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpio_create() function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpio_id, -+ uint16_t *token); -+ -+/** -+ * dpio_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * enum dpio_channel_mode - DPIO notification channel mode -+ * @DPIO_NO_CHANNEL: No support for notification channel -+ * @DPIO_LOCAL_CHANNEL: Notifications on data availability can be received by a -+ * dedicated channel in the DPIO; user should point the queue's -+ * destination in the relevant interface to this DPIO -+ */ -+enum dpio_channel_mode { -+ DPIO_NO_CHANNEL = 0, -+ DPIO_LOCAL_CHANNEL = 1, -+}; -+ -+/** -+ * struct dpio_cfg - Structure representing DPIO configuration -+ * @channel_mode: Notification channel mode -+ * @num_priorities: Number of priorities for the notification channel (1-8); -+ * relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL' -+ */ -+struct dpio_cfg { -+ enum dpio_channel_mode channel_mode; -+ uint8_t num_priorities; -+}; -+ -+/** -+ * dpio_create() - Create the DPIO object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPIO object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpio_open() function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpio_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpio_destroy() - Destroy the DPIO object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * -+ * Return: '0' on Success; Error code otherwise -+ */ -+int dpio_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpio_enable() - Enable the DPIO, allow I/O portal operations. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * -+ * Return: '0' on Success; Error code otherwise -+ */ -+int dpio_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpio_disable() - Disable the DPIO, stop any I/O portal operation. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * -+ * Return: '0' on Success; Error code otherwise -+ */ -+int dpio_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpio_is_enabled() - Check if the DPIO is enabled. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @en: Returns '1' if object is enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpio_reset() - Reset the DPIO, returns the object to initial state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpio_set_stashing_destination() - Set the stashing destination. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @sdest: stashing destination value -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_set_stashing_destination(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t sdest); -+ -+/** -+ * dpio_get_stashing_destination() - Get the stashing destination.. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @sdest: Returns the stashing destination value -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_get_stashing_destination(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t *sdest); -+ -+/** -+ * dpio_add_static_dequeue_channel() - Add a static dequeue channel. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @dpcon_id: DPCON object ID -+ * @channel_index: Returned channel index to be used in qbman API -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_add_static_dequeue_channel(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpcon_id, -+ uint8_t *channel_index); -+ -+/** -+ * dpio_remove_static_dequeue_channel() - Remove a static dequeue channel. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @dpcon_id: DPCON object ID -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_remove_static_dequeue_channel(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpcon_id); -+ -+/** -+ * DPIO IRQ Index and Events -+ */ -+ -+/** -+ * Irq software-portal index -+ */ -+#define DPIO_IRQ_SWP_INDEX 0 -+ -+/** -+ * struct dpio_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dpio_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dpio_set_irq() - Set IRQ information for the DPIO to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpio_irq_cfg *irq_cfg); -+ -+/** -+ * dpio_get_irq() - Get IRQ information from the DPIO. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpio_irq_cfg *irq_cfg); -+ -+/** -+ * dpio_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpio_get_irq_enable() - Get overall interrupt state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpio_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @irq_index: The interrupt index to configure -+ * @mask: event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpio_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpio_get_irq_status() - Get the current status of any pending interrupts. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dpio_clear_irq_status() - Clear a pending interrupt's status -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @irq_index: The interrupt index to configure -+ * @status: bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+ -+/** -+ * struct dpio_attr - Structure representing DPIO attributes -+ * @id: DPIO object ID -+ * @version: DPIO version -+ * @qbman_portal_ce_offset: offset of the software portal cache-enabled area -+ * @qbman_portal_ci_offset: offset of the software portal cache-inhibited area -+ * @qbman_portal_id: Software portal ID -+ * @channel_mode: Notification channel mode -+ * @num_priorities: Number of priorities for the notification channel (1-8); -+ * relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL' -+ * @qbman_version: QBMAN version -+ */ -+struct dpio_attr { -+ int id; -+ /** -+ * struct version - DPIO version -+ * @major: DPIO major version -+ * @minor: DPIO minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+ uint64_t qbman_portal_ce_offset; -+ uint64_t qbman_portal_ci_offset; -+ uint16_t qbman_portal_id; -+ enum dpio_channel_mode channel_mode; -+ uint8_t num_priorities; -+ uint32_t qbman_version; -+}; -+ -+/** -+ * dpio_get_attributes() - Retrieve DPIO attributes -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @attr: Returned object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise -+ */ -+int dpio_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpio_attr *attr); -+#endif /* __FSL_DPIO_H */ -diff --git a/drivers/staging/fsl-mc/bus/dpio/fsl_dpio_cmd.h b/drivers/staging/fsl-mc/bus/dpio/fsl_dpio_cmd.h -new file mode 100644 -index 0000000..f339cd6 ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/fsl_dpio_cmd.h -@@ -0,0 +1,184 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPIO_CMD_H -+#define _FSL_DPIO_CMD_H -+ -+/* DPIO Version */ -+#define DPIO_VER_MAJOR 3 -+#define DPIO_VER_MINOR 2 -+ -+/* Command IDs */ -+#define DPIO_CMDID_CLOSE 0x800 -+#define DPIO_CMDID_OPEN 0x803 -+#define DPIO_CMDID_CREATE 0x903 -+#define DPIO_CMDID_DESTROY 0x900 -+ -+#define DPIO_CMDID_ENABLE 0x002 -+#define DPIO_CMDID_DISABLE 0x003 -+#define DPIO_CMDID_GET_ATTR 0x004 -+#define DPIO_CMDID_RESET 0x005 -+#define DPIO_CMDID_IS_ENABLED 0x006 -+ -+#define DPIO_CMDID_SET_IRQ 0x010 -+#define DPIO_CMDID_GET_IRQ 0x011 -+#define DPIO_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPIO_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPIO_CMDID_SET_IRQ_MASK 0x014 -+#define DPIO_CMDID_GET_IRQ_MASK 0x015 -+#define DPIO_CMDID_GET_IRQ_STATUS 0x016 -+#define DPIO_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPIO_CMDID_SET_STASHING_DEST 0x120 -+#define DPIO_CMDID_GET_STASHING_DEST 0x121 -+#define DPIO_CMDID_ADD_STATIC_DEQUEUE_CHANNEL 0x122 -+#define DPIO_CMDID_REMOVE_STATIC_DEQUEUE_CHANNEL 0x123 -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_OPEN(cmd, dpio_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpio_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_CREATE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 16, 2, enum dpio_channel_mode, \ -+ cfg->channel_mode);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->num_priorities);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_RSP_IS_ENABLED(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_RSP_GET_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_RSP_GET_IRQ_ENABLE(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_RSP_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id);\ -+ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->qbman_portal_id);\ -+ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, attr->num_priorities);\ -+ MC_RSP_OP(cmd, 0, 56, 4, enum dpio_channel_mode, attr->channel_mode);\ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->qbman_portal_ce_offset);\ -+ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, attr->qbman_portal_ci_offset);\ -+ MC_RSP_OP(cmd, 3, 0, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 3, 16, 16, uint16_t, attr->version.minor);\ -+ MC_RSP_OP(cmd, 3, 32, 32, uint32_t, attr->qbman_version);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_SET_STASHING_DEST(cmd, sdest) \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, sdest) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_RSP_GET_STASHING_DEST(cmd, sdest) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, sdest) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_ADD_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpcon_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_RSP_ADD_STATIC_DEQUEUE_CHANNEL(cmd, channel_index) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, channel_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_REMOVE_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpcon_id) -+#endif /* _FSL_DPIO_CMD_H */ -diff --git a/drivers/staging/fsl-mc/bus/dpio/fsl_qbman_base.h b/drivers/staging/fsl-mc/bus/dpio/fsl_qbman_base.h -new file mode 100644 -index 0000000..2874ff8 ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/fsl_qbman_base.h -@@ -0,0 +1,123 @@ -+/* Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_QBMAN_BASE_H -+#define _FSL_QBMAN_BASE_H -+ -+/** -+ * struct qbman_block_desc - qbman block descriptor structure -+ * -+ * Descriptor for a QBMan instance on the SoC. On partitions/targets that do not -+ * control this QBMan instance, these values may simply be place-holders. The -+ * idea is simply that we be able to distinguish between them, eg. so that SWP -+ * descriptors can identify which QBMan instance they belong to. -+ */ -+struct qbman_block_desc { -+ void *ccsr_reg_bar; /* CCSR register map */ -+ int irq_rerr; /* Recoverable error interrupt line */ -+ int irq_nrerr; /* Non-recoverable error interrupt line */ -+}; -+ -+/** -+ * struct qbman_swp_desc - qbman software portal descriptor structure -+ * -+ * Descriptor for a QBMan software portal, expressed in terms that make sense to -+ * the user context. Ie. on MC, this information is likely to be true-physical, -+ * and instantiated statically at compile-time. On GPP, this information is -+ * likely to be obtained via "discovery" over a partition's "layerscape bus" -+ * (ie. in response to a MC portal command), and would take into account any -+ * virtualisation of the GPP user's address space and/or interrupt numbering. -+ */ -+struct qbman_swp_desc { -+ const struct qbman_block_desc *block; /* The QBMan instance */ -+ void *cena_bar; /* Cache-enabled portal register map */ -+ void *cinh_bar; /* Cache-inhibited portal register map */ -+ uint32_t qman_version; -+}; -+ -+/* Driver object for managing a QBMan portal */ -+struct qbman_swp; -+ -+/** -+ * struct qbman_fd - basci structure for qbman frame descriptor -+ * -+ * Place-holder for FDs, we represent it via the simplest form that we need for -+ * now. Different overlays may be needed to support different options, etc. (It -+ * is impractical to define One True Struct, because the resulting encoding -+ * routines (lots of read-modify-writes) would be worst-case performance whether -+ * or not circumstances required them.) -+ * -+ * Note, as with all data-structures exchanged between software and hardware (be -+ * they located in the portal register map or DMA'd to and from main-memory), -+ * the driver ensures that the caller of the driver API sees the data-structures -+ * in host-endianness. "struct qbman_fd" is no exception. The 32-bit words -+ * contained within this structure are represented in host-endianness, even if -+ * hardware always treats them as little-endian. As such, if any of these fields -+ * are interpreted in a binary (rather than numerical) fashion by hardware -+ * blocks (eg. accelerators), then the user should be careful. We illustrate -+ * with an example; -+ * -+ * Suppose the desired behaviour of an accelerator is controlled by the "frc" -+ * field of the FDs that are sent to it. Suppose also that the behaviour desired -+ * by the user corresponds to an "frc" value which is expressed as the literal -+ * sequence of bytes 0xfe, 0xed, 0xab, and 0xba. So "frc" should be the 32-bit -+ * value in which 0xfe is the first byte and 0xba is the last byte, and as -+ * hardware is little-endian, this amounts to a 32-bit "value" of 0xbaabedfe. If -+ * the software is little-endian also, this can simply be achieved by setting -+ * frc=0xbaabedfe. On the other hand, if software is big-endian, it should set -+ * frc=0xfeedabba! The best away of avoiding trouble with this sort of thing is -+ * to treat the 32-bit words as numerical values, in which the offset of a field -+ * from the beginning of the first byte (as required or generated by hardware) -+ * is numerically encoded by a left-shift (ie. by raising the field to a -+ * corresponding power of 2). Ie. in the current example, software could set -+ * "frc" in the following way, and it would work correctly on both little-endian -+ * and big-endian operation; -+ * fd.frc = (0xfe << 0) | (0xed << 8) | (0xab << 16) | (0xba << 24); -+ */ -+struct qbman_fd { -+ union { -+ uint32_t words[8]; -+ struct qbman_fd_simple { -+ uint32_t addr_lo; -+ uint32_t addr_hi; -+ uint32_t len; -+ /* offset in the MS 16 bits, BPID in the LS 16 bits */ -+ uint32_t bpid_offset; -+ uint32_t frc; /* frame context */ -+ /* "err", "va", "cbmt", "asal", [...] */ -+ uint32_t ctrl; -+ /* flow context */ -+ uint32_t flc_lo; -+ uint32_t flc_hi; -+ } simple; -+ }; -+}; -+ -+#endif /* !_FSL_QBMAN_BASE_H */ -diff --git a/drivers/staging/fsl-mc/bus/dpio/fsl_qbman_portal.h b/drivers/staging/fsl-mc/bus/dpio/fsl_qbman_portal.h -new file mode 100644 -index 0000000..c9e543e ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/fsl_qbman_portal.h -@@ -0,0 +1,753 @@ -+/* Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_QBMAN_PORTAL_H -+#define _FSL_QBMAN_PORTAL_H -+ -+#include "fsl_qbman_base.h" -+ -+/** -+ * qbman_swp_init() - Create a functional object representing the given -+ * QBMan portal descriptor. -+ * @d: the given qbman swp descriptor -+ * -+ * Return qbman_swp portal object for success, NULL if the object cannot -+ * be created. -+ */ -+struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d); -+/** -+ * qbman_swp_finish() - Create and destroy a functional object representing -+ * the given QBMan portal descriptor. -+ * @p: the qbman_swp object to be destroyed. -+ * -+ */ -+void qbman_swp_finish(struct qbman_swp *p); -+ -+/** -+ * qbman_swp_get_desc() - Get the descriptor of the given portal object. -+ * @p: the given portal object. -+ * -+ * Return the descriptor for this portal. -+ */ -+const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p); -+ -+ /**************/ -+ /* Interrupts */ -+ /**************/ -+ -+/* See the QBMan driver API documentation for details on the interrupt -+ * mechanisms. */ -+#define QBMAN_SWP_INTERRUPT_EQRI ((uint32_t)0x00000001) -+#define QBMAN_SWP_INTERRUPT_EQDI ((uint32_t)0x00000002) -+#define QBMAN_SWP_INTERRUPT_DQRI ((uint32_t)0x00000004) -+#define QBMAN_SWP_INTERRUPT_RCRI ((uint32_t)0x00000008) -+#define QBMAN_SWP_INTERRUPT_RCDI ((uint32_t)0x00000010) -+#define QBMAN_SWP_INTERRUPT_VDCI ((uint32_t)0x00000020) -+ -+/** -+ * qbman_swp_interrupt_get_vanish() -+ * qbman_swp_interrupt_set_vanish() - Get/Set the data in software portal -+ * interrupt status disable register. -+ * @p: the given software portal object. -+ * @mask: The mask to set in SWP_IDSR register. -+ * -+ * Return the settings in SWP_ISDR register for Get function. -+ */ -+uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p); -+void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask); -+ -+/** -+ * qbman_swp_interrupt_read_status() -+ * qbman_swp_interrupt_clear_status() - Get/Set the data in software portal -+ * interrupt status register. -+ * @p: the given software portal object. -+ * @mask: The mask to set in SWP_ISR register. -+ * -+ * Return the settings in SWP_ISR register for Get function. -+ * -+ */ -+uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p); -+void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask); -+ -+/** -+ * qbman_swp_interrupt_get_trigger() -+ * qbman_swp_interrupt_set_trigger() - Get/Set the data in software portal -+ * interrupt enable register. -+ * @p: the given software portal object. -+ * @mask: The mask to set in SWP_IER register. -+ * -+ * Return the settings in SWP_IER register for Get function. -+ */ -+uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p); -+void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask); -+ -+/** -+ * qbman_swp_interrupt_get_inhibit() -+ * qbman_swp_interrupt_set_inhibit() - Set/Set the data in software portal -+ * interrupt inhibit register. -+ * @p: the given software portal object. -+ * @mask: The mask to set in SWP_IIR register. -+ * -+ * Return the settings in SWP_IIR register for Get function. -+ */ -+int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p); -+void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit); -+ -+ /************/ -+ /* Dequeues */ -+ /************/ -+ -+/* See the QBMan driver API documentation for details on the enqueue -+ * mechanisms. NB: the use of a 'dpaa2_' prefix for this type is because it is -+ * primarily used by the "DPIO" layer that sits above (and hides) the QBMan -+ * driver. The structure is defined in the DPIO interface, but to avoid circular -+ * dependencies we just pre/re-declare it here opaquely. */ -+struct dpaa2_dq; -+ -+/* ------------------- */ -+/* Push-mode dequeuing */ -+/* ------------------- */ -+ -+/** -+ * qbman_swp_push_get() - Get the push dequeue setup. -+ * @p: the software portal object. -+ * @channel_idx: the channel index to query. -+ * @enabled: returned boolean to show whether the push dequeue is enabled for -+ * the given channel. -+ */ -+void qbman_swp_push_get(struct qbman_swp *, uint8_t channel_idx, int *enabled); -+/** -+ * qbman_swp_push_set() - Enable or disable push dequeue. -+ * @p: the software portal object. -+ * @channel_idx: the channel index.. -+ * @enable: enable or disable push dequeue. -+ * -+ * The user of a portal can enable and disable push-mode dequeuing of up to 16 -+ * channels independently. It does not specify this toggling by channel IDs, but -+ * rather by specifying the index (from 0 to 15) that has been mapped to the -+ * desired channel. -+ */ -+void qbman_swp_push_set(struct qbman_swp *, uint8_t channel_idx, int enable); -+ -+/* ------------------- */ -+/* Pull-mode dequeuing */ -+/* ------------------- */ -+ -+/** -+ * struct qbman_pull_desc - the structure for pull dequeue descriptor -+ */ -+struct qbman_pull_desc { -+ uint32_t dont_manipulate_directly[6]; -+}; -+ -+enum qbman_pull_type_e { -+ /* dequeue with priority precedence, respect intra-class scheduling */ -+ qbman_pull_type_prio = 1, -+ /* dequeue with active FQ precedence, respect ICS */ -+ qbman_pull_type_active, -+ /* dequeue with active FQ precedence, no ICS */ -+ qbman_pull_type_active_noics -+}; -+ -+/** -+ * qbman_pull_desc_clear() - Clear the contents of a descriptor to -+ * default/starting state. -+ * @d: the pull dequeue descriptor to be cleared. -+ */ -+void qbman_pull_desc_clear(struct qbman_pull_desc *d); -+ -+/** -+ * qbman_pull_desc_set_storage()- Set the pull dequeue storage -+ * @d: the pull dequeue descriptor to be set. -+ * @storage: the pointer of the memory to store the dequeue result. -+ * @storage_phys: the physical address of the storage memory. -+ * @stash: to indicate whether write allocate is enabled. -+ * -+ * If not called, or if called with 'storage' as NULL, the result pull dequeues -+ * will produce results to DQRR. If 'storage' is non-NULL, then results are -+ * produced to the given memory location (using the physical/DMA address which -+ * the caller provides in 'storage_phys'), and 'stash' controls whether or not -+ * those writes to main-memory express a cache-warming attribute. -+ */ -+void qbman_pull_desc_set_storage(struct qbman_pull_desc *d, -+ struct dpaa2_dq *storage, -+ dma_addr_t storage_phys, -+ int stash); -+/** -+ * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued. -+ * @d: the pull dequeue descriptor to be set. -+ * @numframes: number of frames to be set, must be between 1 and 16, inclusive. -+ */ -+void qbman_pull_desc_set_numframes(struct qbman_pull_desc *, uint8_t numframes); -+ -+/** -+ * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues. -+ * @fqid: the frame queue index of the given FQ. -+ * -+ * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues. -+ * @wqid: composed of channel id and wqid within the channel. -+ * @dct: the dequeue command type. -+ * -+ * qbman_pull_desc_set_channel() - Set channelid from which the dequeue command -+ * dequeues. -+ * @chid: the channel id to be dequeued. -+ * @dct: the dequeue command type. -+ * -+ * Exactly one of the following descriptor "actions" should be set. (Calling any -+ * one of these will replace the effect of any prior call to one of these.) -+ * - pull dequeue from the given frame queue (FQ) -+ * - pull dequeue from any FQ in the given work queue (WQ) -+ * - pull dequeue from any FQ in any WQ in the given channel -+ */ -+void qbman_pull_desc_set_fq(struct qbman_pull_desc *, uint32_t fqid); -+void qbman_pull_desc_set_wq(struct qbman_pull_desc *, uint32_t wqid, -+ enum qbman_pull_type_e dct); -+void qbman_pull_desc_set_channel(struct qbman_pull_desc *, uint32_t chid, -+ enum qbman_pull_type_e dct); -+ -+/** -+ * qbman_swp_pull() - Issue the pull dequeue command -+ * @s: the software portal object. -+ * @d: the software portal descriptor which has been configured with -+ * the set of qbman_pull_desc_set_*() calls. -+ * -+ * Return 0 for success, and -EBUSY if the software portal is not ready -+ * to do pull dequeue. -+ */ -+int qbman_swp_pull(struct qbman_swp *, struct qbman_pull_desc *d); -+ -+/* -------------------------------- */ -+/* Polling DQRR for dequeue results */ -+/* -------------------------------- */ -+ -+/** -+ * qbman_swp_dqrr_next() - Get an valid DQRR entry. -+ * @s: the software portal object. -+ * -+ * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry -+ * only once, so repeated calls can return a sequence of DQRR entries, without -+ * requiring they be consumed immediately or in any particular order. -+ */ -+const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s); -+ -+/** -+ * qbman_swp_dqrr_consume() - Consume DQRR entries previously returned from -+ * qbman_swp_dqrr_next(). -+ * @s: the software portal object. -+ * @dq: the DQRR entry to be consumed. -+ */ -+void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq); -+ -+/* ------------------------------------------------- */ -+/* Polling user-provided storage for dequeue results */ -+/* ------------------------------------------------- */ -+/** -+ * qbman_result_has_new_result() - Check and get the dequeue response from the -+ * dq storage memory set in pull dequeue command -+ * @s: the software portal object. -+ * @dq: the dequeue result read from the memory. -+ * -+ * Only used for user-provided storage of dequeue results, not DQRR. For -+ * efficiency purposes, the driver will perform any required endianness -+ * conversion to ensure that the user's dequeue result storage is in host-endian -+ * format (whether or not that is the same as the little-endian format that -+ * hardware DMA'd to the user's storage). As such, once the user has called -+ * qbman_result_has_new_result() and been returned a valid dequeue result, -+ * they should not call it again on the same memory location (except of course -+ * if another dequeue command has been executed to produce a new result to that -+ * location). -+ * -+ * Return 1 for getting a valid dequeue result, or 0 for not getting a valid -+ * dequeue result. -+ */ -+int qbman_result_has_new_result(struct qbman_swp *, -+ const struct dpaa2_dq *); -+ -+/* -------------------------------------------------------- */ -+/* Parsing dequeue entries (DQRR and user-provided storage) */ -+/* -------------------------------------------------------- */ -+ -+/** -+ * qbman_result_is_DQ() - check the dequeue result is a dequeue response or not -+ * @dq: the dequeue result to be checked. -+ * -+ * DQRR entries may contain non-dequeue results, ie. notifications -+ */ -+int qbman_result_is_DQ(const struct dpaa2_dq *); -+ -+/** -+ * qbman_result_is_SCN() - Check the dequeue result is notification or not -+ * @dq: the dequeue result to be checked. -+ * -+ * All the non-dequeue results (FQDAN/CDAN/CSCN/...) are "state change -+ * notifications" of one type or another. Some APIs apply to all of them, of the -+ * form qbman_result_SCN_***(). -+ */ -+static inline int qbman_result_is_SCN(const struct dpaa2_dq *dq) -+{ -+ return !qbman_result_is_DQ(dq); -+} -+ -+/** -+ * Recognise different notification types, only required if the user allows for -+ * these to occur, and cares about them when they do. -+ */ -+int qbman_result_is_FQDAN(const struct dpaa2_dq *); -+ /* FQ Data Availability */ -+int qbman_result_is_CDAN(const struct dpaa2_dq *); -+ /* Channel Data Availability */ -+int qbman_result_is_CSCN(const struct dpaa2_dq *); -+ /* Congestion State Change */ -+int qbman_result_is_BPSCN(const struct dpaa2_dq *); -+ /* Buffer Pool State Change */ -+int qbman_result_is_CGCU(const struct dpaa2_dq *); -+ /* Congestion Group Count Update */ -+/* Frame queue state change notifications; (FQDAN in theory counts too as it -+ * leaves a FQ parked, but it is primarily a data availability notification) */ -+int qbman_result_is_FQRN(const struct dpaa2_dq *); /* Retirement */ -+int qbman_result_is_FQRNI(const struct dpaa2_dq *); -+ /* Retirement Immediate */ -+int qbman_result_is_FQPN(const struct dpaa2_dq *); /* Park */ -+ -+/* NB: for parsing dequeue results (when "is_DQ" is TRUE), use the higher-layer -+ * dpaa2_dq_*() functions. */ -+ -+/* State-change notifications (FQDAN/CDAN/CSCN/...). */ -+/** -+ * qbman_result_SCN_state() - Get the state field in State-change notification -+ */ -+uint8_t qbman_result_SCN_state(const struct dpaa2_dq *); -+/** -+ * qbman_result_SCN_rid() - Get the resource id in State-change notification -+ */ -+uint32_t qbman_result_SCN_rid(const struct dpaa2_dq *); -+/** -+ * qbman_result_SCN_ctx() - Get the context data in State-change notification -+ */ -+uint64_t qbman_result_SCN_ctx(const struct dpaa2_dq *); -+/** -+ * qbman_result_SCN_state_in_mem() - Get the state field in State-change -+ * notification which is written to memory instead of DQRR. -+ */ -+uint8_t qbman_result_SCN_state_in_mem(const struct dpaa2_dq *); -+/** -+ * qbman_result_SCN_rid_in_mem() - Get the resource id in State-change -+ * notification which is written to memory instead of DQRR. -+ */ -+uint32_t qbman_result_SCN_rid_in_mem(const struct dpaa2_dq *); -+ -+/* Type-specific "resource IDs". Mainly for illustration purposes, though it -+ * also gives the appropriate type widths. */ -+#define qbman_result_FQDAN_fqid(dq) qbman_result_SCN_rid(dq) -+#define qbman_result_FQRN_fqid(dq) qbman_result_SCN_rid(dq) -+#define qbman_result_FQRNI_fqid(dq) qbman_result_SCN_rid(dq) -+#define qbman_result_FQPN_fqid(dq) qbman_result_SCN_rid(dq) -+#define qbman_result_CDAN_cid(dq) ((uint16_t)qbman_result_SCN_rid(dq)) -+#define qbman_result_CSCN_cgid(dq) ((uint16_t)qbman_result_SCN_rid(dq)) -+ -+/** -+ * qbman_result_bpscn_bpid() - Get the bpid from BPSCN -+ * -+ * Return the buffer pool id. -+ */ -+uint16_t qbman_result_bpscn_bpid(const struct dpaa2_dq *); -+/** -+ * qbman_result_bpscn_has_free_bufs() - Check whether there are free -+ * buffers in the pool from BPSCN. -+ * -+ * Return the number of free buffers. -+ */ -+int qbman_result_bpscn_has_free_bufs(const struct dpaa2_dq *); -+/** -+ * qbman_result_bpscn_is_depleted() - Check BPSCN to see whether the -+ * buffer pool is depleted. -+ * -+ * Return the status of buffer pool depletion. -+ */ -+int qbman_result_bpscn_is_depleted(const struct dpaa2_dq *); -+/** -+ * qbman_result_bpscn_is_surplus() - Check BPSCN to see whether the buffer -+ * pool is surplus or not. -+ * -+ * Return the status of buffer pool surplus. -+ */ -+int qbman_result_bpscn_is_surplus(const struct dpaa2_dq *); -+/** -+ * qbman_result_bpscn_ctx() - Get the BPSCN CTX from BPSCN message -+ * -+ * Return the BPSCN context. -+ */ -+uint64_t qbman_result_bpscn_ctx(const struct dpaa2_dq *); -+ -+/* Parsing CGCU */ -+/** -+ * qbman_result_cgcu_cgid() - Check CGCU resouce id, i.e. cgid -+ * -+ * Return the CGCU resource id. -+ */ -+uint16_t qbman_result_cgcu_cgid(const struct dpaa2_dq *); -+/** -+ * qbman_result_cgcu_icnt() - Get the I_CNT from CGCU -+ * -+ * Return instantaneous count in the CGCU notification. -+ */ -+uint64_t qbman_result_cgcu_icnt(const struct dpaa2_dq *); -+ -+ /************/ -+ /* Enqueues */ -+ /************/ -+/** -+ * struct qbman_eq_desc - structure of enqueue descriptor -+ */ -+struct qbman_eq_desc { -+ uint32_t dont_manipulate_directly[8]; -+}; -+ -+/** -+ * struct qbman_eq_response - structure of enqueue response -+ */ -+struct qbman_eq_response { -+ uint32_t dont_manipulate_directly[16]; -+}; -+ -+/** -+ * qbman_eq_desc_clear() - Clear the contents of a descriptor to -+ * default/starting state. -+ */ -+void qbman_eq_desc_clear(struct qbman_eq_desc *); -+ -+/* Exactly one of the following descriptor "actions" should be set. (Calling -+ * any one of these will replace the effect of any prior call to one of these.) -+ * - enqueue without order-restoration -+ * - enqueue with order-restoration -+ * - fill a hole in the order-restoration sequence, without any enqueue -+ * - advance NESN (Next Expected Sequence Number), without any enqueue -+ * 'respond_success' indicates whether an enqueue response should be DMA'd -+ * after success (otherwise a response is DMA'd only after failure). -+ * 'incomplete' indicates that other fragments of the same 'seqnum' are yet to -+ * be enqueued. -+ */ -+/** -+ * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp -+ * @d: the enqueue descriptor. -+ * @response_success: 1 = enqueue with response always; 0 = enqueue with -+ * rejections returned on a FQ. -+ */ -+void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success); -+ -+/** -+ * qbman_eq_desc_set_orp() - Set order-resotration in the enqueue descriptor -+ * @d: the enqueue descriptor. -+ * @response_success: 1 = enqueue with response always; 0 = enqueue with -+ * rejections returned on a FQ. -+ * @opr_id: the order point record id. -+ * @seqnum: the order restoration sequence number. -+ * @incomplete: indiates whether this is the last fragments using the same -+ * sequeue number. -+ */ -+void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success, -+ uint32_t opr_id, uint32_t seqnum, int incomplete); -+ -+/** -+ * qbman_eq_desc_set_orp_hole() - fill a hole in the order-restoration sequence -+ * without any enqueue -+ * @d: the enqueue descriptor. -+ * @opr_id: the order point record id. -+ * @seqnum: the order restoration sequence number. -+ */ -+void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint32_t opr_id, -+ uint32_t seqnum); -+ -+/** -+ * qbman_eq_desc_set_orp_nesn() - advance NESN (Next Expected Sequence Number) -+ * without any enqueue -+ * @d: the enqueue descriptor. -+ * @opr_id: the order point record id. -+ * @seqnum: the order restoration sequence number. -+ */ -+void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint32_t opr_id, -+ uint32_t seqnum); -+ -+/** -+ * qbman_eq_desc_set_response() - Set the enqueue response info. -+ * @d: the enqueue descriptor -+ * @storage_phys: the physical address of the enqueue response in memory. -+ * @stash: indicate that the write allocation enabled or not. -+ * -+ * In the case where an enqueue response is DMA'd, this determines where that -+ * response should go. (The physical/DMA address is given for hardware's -+ * benefit, but software should interpret it as a "struct qbman_eq_response" -+ * data structure.) 'stash' controls whether or not the write to main-memory -+ * expresses a cache-warming attribute. -+ */ -+void qbman_eq_desc_set_response(struct qbman_eq_desc *d, -+ dma_addr_t storage_phys, -+ int stash); -+/** -+ * qbman_eq_desc_set_token() - Set token for the enqueue command -+ * @d: the enqueue descriptor -+ * @token: the token to be set. -+ * -+ * token is the value that shows up in an enqueue response that can be used to -+ * detect when the results have been published. The easiest technique is to zero -+ * result "storage" before issuing an enqueue, and use any non-zero 'token' -+ * value. -+ */ -+void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token); -+ -+/** -+ * qbman_eq_desc_set_fq() -+ * qbman_eq_desc_set_qd() - Set eithe FQ or Queuing Destination for the enqueue -+ * command. -+ * @d: the enqueue descriptor -+ * @fqid: the id of the frame queue to be enqueued. -+ * @qdid: the id of the queuing destination to be enqueued. -+ * @qd_bin: the queuing destination bin -+ * @qd_prio: the queuing destination priority. -+ * -+ * Exactly one of the following descriptor "targets" should be set. (Calling any -+ * one of these will replace the effect of any prior call to one of these.) -+ * - enqueue to a frame queue -+ * - enqueue to a queuing destination -+ * Note, that none of these will have any affect if the "action" type has been -+ * set to "orp_hole" or "orp_nesn". -+ */ -+void qbman_eq_desc_set_fq(struct qbman_eq_desc *, uint32_t fqid); -+void qbman_eq_desc_set_qd(struct qbman_eq_desc *, uint32_t qdid, -+ uint32_t qd_bin, uint32_t qd_prio); -+ -+/** -+ * qbman_eq_desc_set_eqdi() - enable/disable EQDI interrupt -+ * @d: the enqueue descriptor -+ * @enable: boolean to enable/disable EQDI -+ * -+ * Determines whether or not the portal's EQDI interrupt source should be -+ * asserted after the enqueue command is completed. -+ */ -+void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *, int enable); -+ -+/** -+ * qbman_eq_desc_set_dca() - Set DCA mode in the enqueue command. -+ * @d: the enqueue descriptor. -+ * @enable: enabled/disable DCA mode. -+ * @dqrr_idx: DCAP_CI, the DCAP consumer index. -+ * @park: determine the whether park the FQ or not -+ * -+ * Determines whether or not a portal DQRR entry should be consumed once the -+ * enqueue command is completed. (And if so, and the DQRR entry corresponds -+ * to a held-active (order-preserving) FQ, whether the FQ should be parked -+ * instead of being rescheduled.) -+ */ -+void qbman_eq_desc_set_dca(struct qbman_eq_desc *, int enable, -+ uint32_t dqrr_idx, int park); -+ -+/** -+ * qbman_swp_enqueue() - Issue an enqueue command. -+ * @s: the software portal used for enqueue. -+ * @d: the enqueue descriptor. -+ * @fd: the frame descriptor to be enqueued. -+ * -+ * Please note that 'fd' should only be NULL if the "action" of the -+ * descriptor is "orp_hole" or "orp_nesn". -+ * -+ * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready. -+ */ -+int qbman_swp_enqueue(struct qbman_swp *, const struct qbman_eq_desc *, -+ const struct qbman_fd *fd); -+ -+/** -+ * qbman_swp_enqueue_thresh() - Set the threshold for EQRI interrupt. -+ * -+ * An EQRI interrupt can be generated when the fill-level of EQCR falls below -+ * the 'thresh' value set here. Setting thresh==0 (the default) disables. -+ */ -+int qbman_swp_enqueue_thresh(struct qbman_swp *, unsigned int thresh); -+ -+ /*******************/ -+ /* Buffer releases */ -+ /*******************/ -+/** -+ * struct qbman_release_desc - The structure for buffer release descriptor -+ */ -+struct qbman_release_desc { -+ uint32_t dont_manipulate_directly[1]; -+}; -+ -+/** -+ * qbman_release_desc_clear() - Clear the contents of a descriptor to -+ * default/starting state. -+ */ -+void qbman_release_desc_clear(struct qbman_release_desc *); -+ -+/** -+ * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to -+ */ -+void qbman_release_desc_set_bpid(struct qbman_release_desc *, uint32_t bpid); -+ -+/** -+ * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI -+ * interrupt source should be asserted after the release command is completed. -+ */ -+void qbman_release_desc_set_rcdi(struct qbman_release_desc *, int enable); -+ -+/** -+ * qbman_swp_release() - Issue a buffer release command. -+ * @s: the software portal object. -+ * @d: the release descriptor. -+ * @buffers: a pointer pointing to the buffer address to be released. -+ * @num_buffers: number of buffers to be released, must be less than 8. -+ * -+ * Return 0 for success, -EBUSY if the release command ring is not ready. -+ */ -+int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d, -+ const uint64_t *buffers, unsigned int num_buffers); -+ -+ /*******************/ -+ /* Buffer acquires */ -+ /*******************/ -+ -+/** -+ * qbman_swp_acquire() - Issue a buffer acquire command. -+ * @s: the software portal object. -+ * @bpid: the buffer pool index. -+ * @buffers: a pointer pointing to the acquired buffer address|es. -+ * @num_buffers: number of buffers to be acquired, must be less than 8. -+ * -+ * Return 0 for success, or negative error code if the acquire command -+ * fails. -+ */ -+int qbman_swp_acquire(struct qbman_swp *, uint32_t bpid, uint64_t *buffers, -+ unsigned int num_buffers); -+ -+ /*****************/ -+ /* FQ management */ -+ /*****************/ -+ -+/** -+ * qbman_swp_fq_schedule() - Move the fq to the scheduled state. -+ * @s: the software portal object. -+ * @fqid: the index of frame queue to be scheduled. -+ * -+ * There are a couple of different ways that a FQ can end up parked state, -+ * This schedules it. -+ * -+ * Return 0 for success, or negative error code for failure. -+ */ -+int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid); -+ -+/** -+ * qbman_swp_fq_force() - Force the FQ to fully scheduled state. -+ * @s: the software portal object. -+ * @fqid: the index of frame queue to be forced. -+ * -+ * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled -+ * and thus be available for selection by any channel-dequeuing behaviour (push -+ * or pull). If the FQ is subsequently "dequeued" from the channel and is still -+ * empty at the time this happens, the resulting dq_entry will have no FD. -+ * (qbman_result_DQ_fd() will return NULL.) -+ * -+ * Return 0 for success, or negative error code for failure. -+ */ -+int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid); -+ -+/** -+ * qbman_swp_fq_xon() -+ * qbman_swp_fq_xoff() - XON/XOFF the frame queue. -+ * @s: the software portal object. -+ * @fqid: the index of frame queue. -+ * -+ * These functions change the FQ flow-control stuff between XON/XOFF. (The -+ * default is XON.) This setting doesn't affect enqueues to the FQ, just -+ * dequeues. XOFF FQs will remain in the tenatively-scheduled state, even when -+ * non-empty, meaning they won't be selected for scheduled dequeuing. If a FQ is -+ * changed to XOFF after it had already become truly-scheduled to a channel, and -+ * a pull dequeue of that channel occurs that selects that FQ for dequeuing, -+ * then the resulting dq_entry will have no FD. (qbman_result_DQ_fd() will -+ * return NULL.) -+ * -+ * Return 0 for success, or negative error code for failure. -+ */ -+int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid); -+int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid); -+ -+ /**********************/ -+ /* Channel management */ -+ /**********************/ -+ -+/* If the user has been allocated a channel object that is going to generate -+ * CDANs to another channel, then these functions will be necessary. -+ * CDAN-enabled channels only generate a single CDAN notification, after which -+ * it they need to be reenabled before they'll generate another. (The idea is -+ * that pull dequeuing will occur in reaction to the CDAN, followed by a -+ * reenable step.) Each function generates a distinct command to hardware, so a -+ * combination function is provided if the user wishes to modify the "context" -+ * (which shows up in each CDAN message) each time they reenable, as a single -+ * command to hardware. */ -+/** -+ * qbman_swp_CDAN_set_context() - Set CDAN context -+ * @s: the software portal object. -+ * @channelid: the channel index. -+ * @ctx: the context to be set in CDAN. -+ * -+ * Return 0 for success, or negative error code for failure. -+ */ -+int qbman_swp_CDAN_set_context(struct qbman_swp *, uint16_t channelid, -+ uint64_t ctx); -+ -+/** -+ * qbman_swp_CDAN_enable() - Enable CDAN for the channel. -+ * @s: the software portal object. -+ * @channelid: the index of the channel to generate CDAN. -+ * -+ * Return 0 for success, or negative error code for failure. -+ */ -+int qbman_swp_CDAN_enable(struct qbman_swp *, uint16_t channelid); -+ -+/** -+ * qbman_swp_CDAN_disable() - disable CDAN for the channel. -+ * @s: the software portal object. -+ * @channelid: the index of the channel to generate CDAN. -+ * -+ * Return 0 for success, or negative error code for failure. -+ */ -+int qbman_swp_CDAN_disable(struct qbman_swp *, uint16_t channelid); -+ -+/** -+ * qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN -+ * @s: the software portal object. -+ * @channelid: the index of the channel to generate CDAN. -+ * @ctx: the context set in CDAN. -+ * -+ * Return 0 for success, or negative error code for failure. -+ */ -+int qbman_swp_CDAN_set_context_enable(struct qbman_swp *, uint16_t channelid, -+ uint64_t ctx); -+ -+#endif /* !_FSL_QBMAN_PORTAL_H */ -diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman_debug.c b/drivers/staging/fsl-mc/bus/dpio/qbman_debug.c -new file mode 100644 -index 0000000..12e33d3 ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/qbman_debug.c -@@ -0,0 +1,846 @@ -+/* Copyright (C) 2015 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include "qbman_portal.h" -+#include "qbman_debug.h" -+#include "fsl_qbman_portal.h" -+ -+/* QBMan portal management command code */ -+#define QBMAN_BP_QUERY 0x32 -+#define QBMAN_FQ_QUERY 0x44 -+#define QBMAN_FQ_QUERY_NP 0x45 -+#define QBMAN_CGR_QUERY 0x51 -+#define QBMAN_WRED_QUERY 0x54 -+#define QBMAN_CGR_STAT_QUERY 0x55 -+#define QBMAN_CGR_STAT_QUERY_CLR 0x56 -+ -+enum qbman_attr_usage_e { -+ qbman_attr_usage_fq, -+ qbman_attr_usage_bpool, -+ qbman_attr_usage_cgr, -+}; -+ -+struct int_qbman_attr { -+ uint32_t words[32]; -+ enum qbman_attr_usage_e usage; -+}; -+ -+#define attr_type_set(a, e) \ -+{ \ -+ struct qbman_attr *__attr = a; \ -+ enum qbman_attr_usage_e __usage = e; \ -+ ((struct int_qbman_attr *)__attr)->usage = __usage; \ -+} -+ -+#define ATTR32(d) (&(d)->dont_manipulate_directly[0]) -+#define ATTR32_1(d) (&(d)->dont_manipulate_directly[16]) -+ -+static struct qb_attr_code code_bp_bpid = QB_CODE(0, 16, 16); -+static struct qb_attr_code code_bp_bdi = QB_CODE(1, 16, 1); -+static struct qb_attr_code code_bp_va = QB_CODE(1, 17, 1); -+static struct qb_attr_code code_bp_wae = QB_CODE(1, 18, 1); -+static struct qb_attr_code code_bp_swdet = QB_CODE(4, 0, 16); -+static struct qb_attr_code code_bp_swdxt = QB_CODE(4, 16, 16); -+static struct qb_attr_code code_bp_hwdet = QB_CODE(5, 0, 16); -+static struct qb_attr_code code_bp_hwdxt = QB_CODE(5, 16, 16); -+static struct qb_attr_code code_bp_swset = QB_CODE(6, 0, 16); -+static struct qb_attr_code code_bp_swsxt = QB_CODE(6, 16, 16); -+static struct qb_attr_code code_bp_vbpid = QB_CODE(7, 0, 14); -+static struct qb_attr_code code_bp_icid = QB_CODE(7, 16, 15); -+static struct qb_attr_code code_bp_pl = QB_CODE(7, 31, 1); -+static struct qb_attr_code code_bp_bpscn_addr_lo = QB_CODE(8, 0, 32); -+static struct qb_attr_code code_bp_bpscn_addr_hi = QB_CODE(9, 0, 32); -+static struct qb_attr_code code_bp_bpscn_ctx_lo = QB_CODE(10, 0, 32); -+static struct qb_attr_code code_bp_bpscn_ctx_hi = QB_CODE(11, 0, 32); -+static struct qb_attr_code code_bp_hw_targ = QB_CODE(12, 0, 16); -+static struct qb_attr_code code_bp_state = QB_CODE(1, 24, 3); -+static struct qb_attr_code code_bp_fill = QB_CODE(2, 0, 32); -+static struct qb_attr_code code_bp_hdptr = QB_CODE(3, 0, 32); -+static struct qb_attr_code code_bp_sdcnt = QB_CODE(13, 0, 8); -+static struct qb_attr_code code_bp_hdcnt = QB_CODE(13, 1, 8); -+static struct qb_attr_code code_bp_sscnt = QB_CODE(13, 2, 8); -+ -+void qbman_bp_attr_clear(struct qbman_attr *a) -+{ -+ memset(a, 0, sizeof(*a)); -+ attr_type_set(a, qbman_attr_usage_bpool); -+} -+ -+int qbman_bp_query(struct qbman_swp *s, uint32_t bpid, -+ struct qbman_attr *a) -+{ -+ uint32_t *p; -+ uint32_t verb, rslt; -+ uint32_t *attr = ATTR32(a); -+ -+ qbman_bp_attr_clear(a); -+ -+ /* Start the management command */ -+ p = qbman_swp_mc_start(s); -+ if (!p) -+ return -EBUSY; -+ -+ /* Encode the caller-provided attributes */ -+ qb_attr_code_encode(&code_bp_bpid, p, bpid); -+ -+ /* Complete the management command */ -+ p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_BP_QUERY); -+ -+ /* Decode the outcome */ -+ verb = qb_attr_code_decode(&code_generic_verb, p); -+ rslt = qb_attr_code_decode(&code_generic_rslt, p); -+ BUG_ON(verb != QBMAN_BP_QUERY); -+ -+ /* Determine success or failure */ -+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { -+ pr_err("Query of BPID 0x%x failed, code=0x%02x\n", bpid, rslt); -+ return -EIO; -+ } -+ -+ /* For the query, word[0] of the result contains only the -+ * verb/rslt fields, so skip word[0]. -+ */ -+ word_copy(&attr[1], &p[1], 15); -+ return 0; -+} -+ -+void qbman_bp_attr_get_bdi(struct qbman_attr *a, int *bdi, int *va, int *wae) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *bdi = !!qb_attr_code_decode(&code_bp_bdi, p); -+ *va = !!qb_attr_code_decode(&code_bp_va, p); -+ *wae = !!qb_attr_code_decode(&code_bp_wae, p); -+} -+ -+static uint32_t qbman_bp_thresh_to_value(uint32_t val) -+{ -+ return (val & 0xff) << ((val & 0xf00) >> 8); -+} -+ -+void qbman_bp_attr_get_swdet(struct qbman_attr *a, uint32_t *swdet) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *swdet = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swdet, -+ p)); -+} -+void qbman_bp_attr_get_swdxt(struct qbman_attr *a, uint32_t *swdxt) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *swdxt = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swdxt, -+ p)); -+} -+void qbman_bp_attr_get_hwdet(struct qbman_attr *a, uint32_t *hwdet) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *hwdet = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_hwdet, -+ p)); -+} -+void qbman_bp_attr_get_hwdxt(struct qbman_attr *a, uint32_t *hwdxt) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *hwdxt = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_hwdxt, -+ p)); -+} -+ -+void qbman_bp_attr_get_swset(struct qbman_attr *a, uint32_t *swset) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *swset = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swset, -+ p)); -+} -+ -+void qbman_bp_attr_get_swsxt(struct qbman_attr *a, uint32_t *swsxt) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *swsxt = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swsxt, -+ p)); -+} -+ -+void qbman_bp_attr_get_vbpid(struct qbman_attr *a, uint32_t *vbpid) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *vbpid = qb_attr_code_decode(&code_bp_vbpid, p); -+} -+ -+void qbman_bp_attr_get_icid(struct qbman_attr *a, uint32_t *icid, int *pl) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *icid = qb_attr_code_decode(&code_bp_icid, p); -+ *pl = !!qb_attr_code_decode(&code_bp_pl, p); -+} -+ -+void qbman_bp_attr_get_bpscn_addr(struct qbman_attr *a, uint64_t *bpscn_addr) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *bpscn_addr = ((uint64_t)qb_attr_code_decode(&code_bp_bpscn_addr_hi, -+ p) << 32) | -+ (uint64_t)qb_attr_code_decode(&code_bp_bpscn_addr_lo, -+ p); -+} -+ -+void qbman_bp_attr_get_bpscn_ctx(struct qbman_attr *a, uint64_t *bpscn_ctx) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *bpscn_ctx = ((uint64_t)qb_attr_code_decode(&code_bp_bpscn_ctx_hi, p) -+ << 32) | -+ (uint64_t)qb_attr_code_decode(&code_bp_bpscn_ctx_lo, -+ p); -+} -+ -+void qbman_bp_attr_get_hw_targ(struct qbman_attr *a, uint32_t *hw_targ) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *hw_targ = qb_attr_code_decode(&code_bp_hw_targ, p); -+} -+ -+int qbman_bp_info_has_free_bufs(struct qbman_attr *a) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ return !(int)(qb_attr_code_decode(&code_bp_state, p) & 0x1); -+} -+ -+int qbman_bp_info_is_depleted(struct qbman_attr *a) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ return (int)(qb_attr_code_decode(&code_bp_state, p) & 0x2); -+} -+ -+int qbman_bp_info_is_surplus(struct qbman_attr *a) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ return (int)(qb_attr_code_decode(&code_bp_state, p) & 0x4); -+} -+ -+uint32_t qbman_bp_info_num_free_bufs(struct qbman_attr *a) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ return qb_attr_code_decode(&code_bp_fill, p); -+} -+ -+uint32_t qbman_bp_info_hdptr(struct qbman_attr *a) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ return qb_attr_code_decode(&code_bp_hdptr, p); -+} -+ -+uint32_t qbman_bp_info_sdcnt(struct qbman_attr *a) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ return qb_attr_code_decode(&code_bp_sdcnt, p); -+} -+ -+uint32_t qbman_bp_info_hdcnt(struct qbman_attr *a) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ return qb_attr_code_decode(&code_bp_hdcnt, p); -+} -+ -+uint32_t qbman_bp_info_sscnt(struct qbman_attr *a) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ return qb_attr_code_decode(&code_bp_sscnt, p); -+} -+ -+static struct qb_attr_code code_fq_fqid = QB_CODE(1, 0, 24); -+static struct qb_attr_code code_fq_cgrid = QB_CODE(2, 16, 16); -+static struct qb_attr_code code_fq_destwq = QB_CODE(3, 0, 15); -+static struct qb_attr_code code_fq_fqctrl = QB_CODE(3, 24, 8); -+static struct qb_attr_code code_fq_icscred = QB_CODE(4, 0, 15); -+static struct qb_attr_code code_fq_tdthresh = QB_CODE(4, 16, 13); -+static struct qb_attr_code code_fq_oa_len = QB_CODE(5, 0, 12); -+static struct qb_attr_code code_fq_oa_ics = QB_CODE(5, 14, 1); -+static struct qb_attr_code code_fq_oa_cgr = QB_CODE(5, 15, 1); -+static struct qb_attr_code code_fq_mctl_bdi = QB_CODE(5, 24, 1); -+static struct qb_attr_code code_fq_mctl_ff = QB_CODE(5, 25, 1); -+static struct qb_attr_code code_fq_mctl_va = QB_CODE(5, 26, 1); -+static struct qb_attr_code code_fq_mctl_ps = QB_CODE(5, 27, 1); -+static struct qb_attr_code code_fq_ctx_lower32 = QB_CODE(6, 0, 32); -+static struct qb_attr_code code_fq_ctx_upper32 = QB_CODE(7, 0, 32); -+static struct qb_attr_code code_fq_icid = QB_CODE(8, 0, 15); -+static struct qb_attr_code code_fq_pl = QB_CODE(8, 15, 1); -+static struct qb_attr_code code_fq_vfqid = QB_CODE(9, 0, 24); -+static struct qb_attr_code code_fq_erfqid = QB_CODE(10, 0, 24); -+ -+void qbman_fq_attr_clear(struct qbman_attr *a) -+{ -+ memset(a, 0, sizeof(*a)); -+ attr_type_set(a, qbman_attr_usage_fq); -+} -+ -+/* FQ query function for programmable fields */ -+int qbman_fq_query(struct qbman_swp *s, uint32_t fqid, struct qbman_attr *desc) -+{ -+ uint32_t *p; -+ uint32_t verb, rslt; -+ uint32_t *d = ATTR32(desc); -+ -+ qbman_fq_attr_clear(desc); -+ -+ p = qbman_swp_mc_start(s); -+ if (!p) -+ return -EBUSY; -+ qb_attr_code_encode(&code_fq_fqid, p, fqid); -+ p = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY); -+ -+ /* Decode the outcome */ -+ verb = qb_attr_code_decode(&code_generic_verb, p); -+ rslt = qb_attr_code_decode(&code_generic_rslt, p); -+ BUG_ON(verb != QBMAN_FQ_QUERY); -+ -+ /* Determine success or failure */ -+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { -+ pr_err("Query of FQID 0x%x failed, code=0x%02x\n", -+ fqid, rslt); -+ return -EIO; -+ } -+ /* For the configure, word[0] of the command contains only the WE-mask. -+ * For the query, word[0] of the result contains only the verb/rslt -+ * fields. Skip word[0] in the latter case. */ -+ word_copy(&d[1], &p[1], 15); -+ return 0; -+} -+ -+void qbman_fq_attr_get_fqctrl(struct qbman_attr *d, uint32_t *fqctrl) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *fqctrl = qb_attr_code_decode(&code_fq_fqctrl, p); -+} -+ -+void qbman_fq_attr_get_cgrid(struct qbman_attr *d, uint32_t *cgrid) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *cgrid = qb_attr_code_decode(&code_fq_cgrid, p); -+} -+ -+void qbman_fq_attr_get_destwq(struct qbman_attr *d, uint32_t *destwq) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *destwq = qb_attr_code_decode(&code_fq_destwq, p); -+} -+ -+void qbman_fq_attr_get_icscred(struct qbman_attr *d, uint32_t *icscred) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *icscred = qb_attr_code_decode(&code_fq_icscred, p); -+} -+ -+static struct qb_attr_code code_tdthresh_exp = QB_CODE(0, 0, 5); -+static struct qb_attr_code code_tdthresh_mant = QB_CODE(0, 5, 8); -+static uint32_t qbman_thresh_to_value(uint32_t val) -+{ -+ uint32_t m, e; -+ -+ m = qb_attr_code_decode(&code_tdthresh_mant, &val); -+ e = qb_attr_code_decode(&code_tdthresh_exp, &val); -+ return m << e; -+} -+ -+void qbman_fq_attr_get_tdthresh(struct qbman_attr *d, uint32_t *tdthresh) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *tdthresh = qbman_thresh_to_value(qb_attr_code_decode(&code_fq_tdthresh, -+ p)); -+} -+ -+void qbman_fq_attr_get_oa(struct qbman_attr *d, -+ int *oa_ics, int *oa_cgr, int32_t *oa_len) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *oa_ics = !!qb_attr_code_decode(&code_fq_oa_ics, p); -+ *oa_cgr = !!qb_attr_code_decode(&code_fq_oa_cgr, p); -+ *oa_len = qb_attr_code_makesigned(&code_fq_oa_len, -+ qb_attr_code_decode(&code_fq_oa_len, p)); -+} -+ -+void qbman_fq_attr_get_mctl(struct qbman_attr *d, -+ int *bdi, int *ff, int *va, int *ps) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *bdi = !!qb_attr_code_decode(&code_fq_mctl_bdi, p); -+ *ff = !!qb_attr_code_decode(&code_fq_mctl_ff, p); -+ *va = !!qb_attr_code_decode(&code_fq_mctl_va, p); -+ *ps = !!qb_attr_code_decode(&code_fq_mctl_ps, p); -+} -+ -+void qbman_fq_attr_get_ctx(struct qbman_attr *d, uint32_t *hi, uint32_t *lo) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *hi = qb_attr_code_decode(&code_fq_ctx_upper32, p); -+ *lo = qb_attr_code_decode(&code_fq_ctx_lower32, p); -+} -+ -+void qbman_fq_attr_get_icid(struct qbman_attr *d, uint32_t *icid, int *pl) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *icid = qb_attr_code_decode(&code_fq_icid, p); -+ *pl = !!qb_attr_code_decode(&code_fq_pl, p); -+} -+ -+void qbman_fq_attr_get_vfqid(struct qbman_attr *d, uint32_t *vfqid) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *vfqid = qb_attr_code_decode(&code_fq_vfqid, p); -+} -+ -+void qbman_fq_attr_get_erfqid(struct qbman_attr *d, uint32_t *erfqid) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *erfqid = qb_attr_code_decode(&code_fq_erfqid, p); -+} -+ -+/* Query FQ Non-Programmalbe Fields */ -+static struct qb_attr_code code_fq_np_state = QB_CODE(0, 16, 3); -+static struct qb_attr_code code_fq_np_fe = QB_CODE(0, 19, 1); -+static struct qb_attr_code code_fq_np_x = QB_CODE(0, 20, 1); -+static struct qb_attr_code code_fq_np_r = QB_CODE(0, 21, 1); -+static struct qb_attr_code code_fq_np_oe = QB_CODE(0, 22, 1); -+static struct qb_attr_code code_fq_np_frm_cnt = QB_CODE(6, 0, 24); -+static struct qb_attr_code code_fq_np_byte_cnt = QB_CODE(7, 0, 32); -+ -+int qbman_fq_query_state(struct qbman_swp *s, uint32_t fqid, -+ struct qbman_attr *state) -+{ -+ uint32_t *p; -+ uint32_t verb, rslt; -+ uint32_t *d = ATTR32(state); -+ -+ qbman_fq_attr_clear(state); -+ -+ p = qbman_swp_mc_start(s); -+ if (!p) -+ return -EBUSY; -+ qb_attr_code_encode(&code_fq_fqid, p, fqid); -+ p = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY_NP); -+ -+ /* Decode the outcome */ -+ verb = qb_attr_code_decode(&code_generic_verb, p); -+ rslt = qb_attr_code_decode(&code_generic_rslt, p); -+ BUG_ON(verb != QBMAN_FQ_QUERY_NP); -+ -+ /* Determine success or failure */ -+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { -+ pr_err("Query NP fields of FQID 0x%x failed, code=0x%02x\n", -+ fqid, rslt); -+ return -EIO; -+ } -+ word_copy(&d[0], &p[0], 16); -+ return 0; -+} -+ -+uint32_t qbman_fq_state_schedstate(const struct qbman_attr *state) -+{ -+ const uint32_t *p = ATTR32(state); -+ -+ return qb_attr_code_decode(&code_fq_np_state, p); -+} -+ -+int qbman_fq_state_force_eligible(const struct qbman_attr *state) -+{ -+ const uint32_t *p = ATTR32(state); -+ -+ return !!qb_attr_code_decode(&code_fq_np_fe, p); -+} -+ -+int qbman_fq_state_xoff(const struct qbman_attr *state) -+{ -+ const uint32_t *p = ATTR32(state); -+ -+ return !!qb_attr_code_decode(&code_fq_np_x, p); -+} -+ -+int qbman_fq_state_retirement_pending(const struct qbman_attr *state) -+{ -+ const uint32_t *p = ATTR32(state); -+ -+ return !!qb_attr_code_decode(&code_fq_np_r, p); -+} -+ -+int qbman_fq_state_overflow_error(const struct qbman_attr *state) -+{ -+ const uint32_t *p = ATTR32(state); -+ -+ return !!qb_attr_code_decode(&code_fq_np_oe, p); -+} -+ -+uint32_t qbman_fq_state_frame_count(const struct qbman_attr *state) -+{ -+ const uint32_t *p = ATTR32(state); -+ -+ return qb_attr_code_decode(&code_fq_np_frm_cnt, p); -+} -+ -+uint32_t qbman_fq_state_byte_count(const struct qbman_attr *state) -+{ -+ const uint32_t *p = ATTR32(state); -+ -+ return qb_attr_code_decode(&code_fq_np_byte_cnt, p); -+} -+ -+/* Query CGR */ -+static struct qb_attr_code code_cgr_cgid = QB_CODE(0, 16, 16); -+static struct qb_attr_code code_cgr_cscn_wq_en_enter = QB_CODE(2, 0, 1); -+static struct qb_attr_code code_cgr_cscn_wq_en_exit = QB_CODE(2, 1, 1); -+static struct qb_attr_code code_cgr_cscn_wq_icd = QB_CODE(2, 2, 1); -+static struct qb_attr_code code_cgr_mode = QB_CODE(3, 16, 2); -+static struct qb_attr_code code_cgr_rej_cnt_mode = QB_CODE(3, 18, 1); -+static struct qb_attr_code code_cgr_cscn_bdi = QB_CODE(3, 19, 1); -+static struct qb_attr_code code_cgr_cscn_wr_en_enter = QB_CODE(3, 24, 1); -+static struct qb_attr_code code_cgr_cscn_wr_en_exit = QB_CODE(3, 25, 1); -+static struct qb_attr_code code_cgr_cg_wr_ae = QB_CODE(3, 26, 1); -+static struct qb_attr_code code_cgr_cscn_dcp_en = QB_CODE(3, 27, 1); -+static struct qb_attr_code code_cgr_cg_wr_va = QB_CODE(3, 28, 1); -+static struct qb_attr_code code_cgr_i_cnt_wr_en = QB_CODE(4, 0, 1); -+static struct qb_attr_code code_cgr_i_cnt_wr_bnd = QB_CODE(4, 1, 5); -+static struct qb_attr_code code_cgr_td_en = QB_CODE(4, 8, 1); -+static struct qb_attr_code code_cgr_cs_thres = QB_CODE(4, 16, 13); -+static struct qb_attr_code code_cgr_cs_thres_x = QB_CODE(5, 0, 13); -+static struct qb_attr_code code_cgr_td_thres = QB_CODE(5, 16, 13); -+static struct qb_attr_code code_cgr_cscn_tdcp = QB_CODE(6, 0, 16); -+static struct qb_attr_code code_cgr_cscn_wqid = QB_CODE(6, 16, 16); -+static struct qb_attr_code code_cgr_cscn_vcgid = QB_CODE(7, 0, 16); -+static struct qb_attr_code code_cgr_cg_icid = QB_CODE(7, 16, 15); -+static struct qb_attr_code code_cgr_cg_pl = QB_CODE(7, 31, 1); -+static struct qb_attr_code code_cgr_cg_wr_addr_lo = QB_CODE(8, 0, 32); -+static struct qb_attr_code code_cgr_cg_wr_addr_hi = QB_CODE(9, 0, 32); -+static struct qb_attr_code code_cgr_cscn_ctx_lo = QB_CODE(10, 0, 32); -+static struct qb_attr_code code_cgr_cscn_ctx_hi = QB_CODE(11, 0, 32); -+ -+void qbman_cgr_attr_clear(struct qbman_attr *a) -+{ -+ memset(a, 0, sizeof(*a)); -+ attr_type_set(a, qbman_attr_usage_cgr); -+} -+ -+int qbman_cgr_query(struct qbman_swp *s, uint32_t cgid, struct qbman_attr *attr) -+{ -+ uint32_t *p; -+ uint32_t verb, rslt; -+ uint32_t *d[2]; -+ int i; -+ uint32_t query_verb; -+ -+ d[0] = ATTR32(attr); -+ d[1] = ATTR32_1(attr); -+ -+ qbman_cgr_attr_clear(attr); -+ -+ for (i = 0; i < 2; i++) { -+ p = qbman_swp_mc_start(s); -+ if (!p) -+ return -EBUSY; -+ query_verb = i ? QBMAN_WRED_QUERY : QBMAN_CGR_QUERY; -+ -+ qb_attr_code_encode(&code_cgr_cgid, p, cgid); -+ p = qbman_swp_mc_complete(s, p, p[0] | query_verb); -+ -+ /* Decode the outcome */ -+ verb = qb_attr_code_decode(&code_generic_verb, p); -+ rslt = qb_attr_code_decode(&code_generic_rslt, p); -+ BUG_ON(verb != query_verb); -+ -+ /* Determine success or failure */ -+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { -+ pr_err("Query CGID 0x%x failed,", cgid); -+ pr_err(" verb=0x%02x, code=0x%02x\n", verb, rslt); -+ return -EIO; -+ } -+ /* For the configure, word[0] of the command contains only the -+ * verb/cgid. For the query, word[0] of the result contains -+ * only the verb/rslt fields. Skip word[0] in the latter case. -+ */ -+ word_copy(&d[i][1], &p[1], 15); -+ } -+ return 0; -+} -+ -+void qbman_cgr_attr_get_ctl1(struct qbman_attr *d, int *cscn_wq_en_enter, -+ int *cscn_wq_en_exit, int *cscn_wq_icd) -+ { -+ uint32_t *p = ATTR32(d); -+ *cscn_wq_en_enter = !!qb_attr_code_decode(&code_cgr_cscn_wq_en_enter, -+ p); -+ *cscn_wq_en_exit = !!qb_attr_code_decode(&code_cgr_cscn_wq_en_exit, p); -+ *cscn_wq_icd = !!qb_attr_code_decode(&code_cgr_cscn_wq_icd, p); -+} -+ -+void qbman_cgr_attr_get_mode(struct qbman_attr *d, uint32_t *mode, -+ int *rej_cnt_mode, int *cscn_bdi) -+{ -+ uint32_t *p = ATTR32(d); -+ *mode = qb_attr_code_decode(&code_cgr_mode, p); -+ *rej_cnt_mode = !!qb_attr_code_decode(&code_cgr_rej_cnt_mode, p); -+ *cscn_bdi = !!qb_attr_code_decode(&code_cgr_cscn_bdi, p); -+} -+ -+void qbman_cgr_attr_get_ctl2(struct qbman_attr *d, int *cscn_wr_en_enter, -+ int *cscn_wr_en_exit, int *cg_wr_ae, -+ int *cscn_dcp_en, int *cg_wr_va) -+{ -+ uint32_t *p = ATTR32(d); -+ *cscn_wr_en_enter = !!qb_attr_code_decode(&code_cgr_cscn_wr_en_enter, -+ p); -+ *cscn_wr_en_exit = !!qb_attr_code_decode(&code_cgr_cscn_wr_en_exit, p); -+ *cg_wr_ae = !!qb_attr_code_decode(&code_cgr_cg_wr_ae, p); -+ *cscn_dcp_en = !!qb_attr_code_decode(&code_cgr_cscn_dcp_en, p); -+ *cg_wr_va = !!qb_attr_code_decode(&code_cgr_cg_wr_va, p); -+} -+ -+void qbman_cgr_attr_get_iwc(struct qbman_attr *d, int *i_cnt_wr_en, -+ uint32_t *i_cnt_wr_bnd) -+{ -+ uint32_t *p = ATTR32(d); -+ *i_cnt_wr_en = !!qb_attr_code_decode(&code_cgr_i_cnt_wr_en, p); -+ *i_cnt_wr_bnd = qb_attr_code_decode(&code_cgr_i_cnt_wr_bnd, p); -+} -+ -+void qbman_cgr_attr_get_tdc(struct qbman_attr *d, int *td_en) -+{ -+ uint32_t *p = ATTR32(d); -+ *td_en = !!qb_attr_code_decode(&code_cgr_td_en, p); -+} -+ -+void qbman_cgr_attr_get_cs_thres(struct qbman_attr *d, uint32_t *cs_thres) -+{ -+ uint32_t *p = ATTR32(d); -+ *cs_thres = qbman_thresh_to_value(qb_attr_code_decode( -+ &code_cgr_cs_thres, p)); -+} -+ -+void qbman_cgr_attr_get_cs_thres_x(struct qbman_attr *d, -+ uint32_t *cs_thres_x) -+{ -+ uint32_t *p = ATTR32(d); -+ *cs_thres_x = qbman_thresh_to_value(qb_attr_code_decode( -+ &code_cgr_cs_thres_x, p)); -+} -+ -+void qbman_cgr_attr_get_td_thres(struct qbman_attr *d, uint32_t *td_thres) -+{ -+ uint32_t *p = ATTR32(d); -+ *td_thres = qbman_thresh_to_value(qb_attr_code_decode( -+ &code_cgr_td_thres, p)); -+} -+ -+void qbman_cgr_attr_get_cscn_tdcp(struct qbman_attr *d, uint32_t *cscn_tdcp) -+{ -+ uint32_t *p = ATTR32(d); -+ *cscn_tdcp = qb_attr_code_decode(&code_cgr_cscn_tdcp, p); -+} -+ -+void qbman_cgr_attr_get_cscn_wqid(struct qbman_attr *d, uint32_t *cscn_wqid) -+{ -+ uint32_t *p = ATTR32(d); -+ *cscn_wqid = qb_attr_code_decode(&code_cgr_cscn_wqid, p); -+} -+ -+void qbman_cgr_attr_get_cscn_vcgid(struct qbman_attr *d, -+ uint32_t *cscn_vcgid) -+{ -+ uint32_t *p = ATTR32(d); -+ *cscn_vcgid = qb_attr_code_decode(&code_cgr_cscn_vcgid, p); -+} -+ -+void qbman_cgr_attr_get_cg_icid(struct qbman_attr *d, uint32_t *icid, -+ int *pl) -+{ -+ uint32_t *p = ATTR32(d); -+ *icid = qb_attr_code_decode(&code_cgr_cg_icid, p); -+ *pl = !!qb_attr_code_decode(&code_cgr_cg_pl, p); -+} -+ -+void qbman_cgr_attr_get_cg_wr_addr(struct qbman_attr *d, -+ uint64_t *cg_wr_addr) -+{ -+ uint32_t *p = ATTR32(d); -+ *cg_wr_addr = ((uint64_t)qb_attr_code_decode(&code_cgr_cg_wr_addr_hi, -+ p) << 32) | -+ (uint64_t)qb_attr_code_decode(&code_cgr_cg_wr_addr_lo, -+ p); -+} -+ -+void qbman_cgr_attr_get_cscn_ctx(struct qbman_attr *d, uint64_t *cscn_ctx) -+{ -+ uint32_t *p = ATTR32(d); -+ *cscn_ctx = ((uint64_t)qb_attr_code_decode(&code_cgr_cscn_ctx_hi, p) -+ << 32) | -+ (uint64_t)qb_attr_code_decode(&code_cgr_cscn_ctx_lo, p); -+} -+ -+#define WRED_EDP_WORD(n) (18 + n/4) -+#define WRED_EDP_OFFSET(n) (8 * (n % 4)) -+#define WRED_PARM_DP_WORD(n) (n + 20) -+#define WRED_WE_EDP(n) (16 + n * 2) -+#define WRED_WE_PARM_DP(n) (17 + n * 2) -+void qbman_cgr_attr_wred_get_edp(struct qbman_attr *d, uint32_t idx, -+ int *edp) -+{ -+ uint32_t *p = ATTR32(d); -+ struct qb_attr_code code_wred_edp = QB_CODE(WRED_EDP_WORD(idx), -+ WRED_EDP_OFFSET(idx), 8); -+ *edp = (int)qb_attr_code_decode(&code_wred_edp, p); -+} -+ -+void qbman_cgr_attr_wred_dp_decompose(uint32_t dp, uint64_t *minth, -+ uint64_t *maxth, uint8_t *maxp) -+{ -+ uint8_t ma, mn, step_i, step_s, pn; -+ -+ ma = (uint8_t)(dp >> 24); -+ mn = (uint8_t)(dp >> 19) & 0x1f; -+ step_i = (uint8_t)(dp >> 11); -+ step_s = (uint8_t)(dp >> 6) & 0x1f; -+ pn = (uint8_t)dp & 0x3f; -+ -+ *maxp = ((pn<<2) * 100)/256; -+ -+ if (mn == 0) -+ *maxth = ma; -+ else -+ *maxth = ((ma+256) * (1<<(mn-1))); -+ -+ if (step_s == 0) -+ *minth = *maxth - step_i; -+ else -+ *minth = *maxth - (256 + step_i) * (1<<(step_s - 1)); -+} -+ -+void qbman_cgr_attr_wred_get_parm_dp(struct qbman_attr *d, uint32_t idx, -+ uint32_t *dp) -+{ -+ uint32_t *p = ATTR32(d); -+ struct qb_attr_code code_wred_parm_dp = QB_CODE(WRED_PARM_DP_WORD(idx), -+ 0, 8); -+ *dp = qb_attr_code_decode(&code_wred_parm_dp, p); -+} -+ -+/* Query CGR/CCGR/CQ statistics */ -+static struct qb_attr_code code_cgr_stat_ct = QB_CODE(4, 0, 32); -+static struct qb_attr_code code_cgr_stat_frame_cnt_lo = QB_CODE(4, 0, 32); -+static struct qb_attr_code code_cgr_stat_frame_cnt_hi = QB_CODE(5, 0, 8); -+static struct qb_attr_code code_cgr_stat_byte_cnt_lo = QB_CODE(6, 0, 32); -+static struct qb_attr_code code_cgr_stat_byte_cnt_hi = QB_CODE(7, 0, 16); -+static int qbman_cgr_statistics_query(struct qbman_swp *s, uint32_t cgid, -+ int clear, uint32_t command_type, -+ uint64_t *frame_cnt, uint64_t *byte_cnt) -+{ -+ uint32_t *p; -+ uint32_t verb, rslt; -+ uint32_t query_verb; -+ uint32_t hi, lo; -+ -+ p = qbman_swp_mc_start(s); -+ if (!p) -+ return -EBUSY; -+ -+ qb_attr_code_encode(&code_cgr_cgid, p, cgid); -+ if (command_type < 2) -+ qb_attr_code_encode(&code_cgr_stat_ct, p, command_type); -+ query_verb = clear ? -+ QBMAN_CGR_STAT_QUERY_CLR : QBMAN_CGR_STAT_QUERY; -+ p = qbman_swp_mc_complete(s, p, p[0] | query_verb); -+ -+ /* Decode the outcome */ -+ verb = qb_attr_code_decode(&code_generic_verb, p); -+ rslt = qb_attr_code_decode(&code_generic_rslt, p); -+ BUG_ON(verb != query_verb); -+ -+ /* Determine success or failure */ -+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { -+ pr_err("Query statistics of CGID 0x%x failed,", cgid); -+ pr_err(" verb=0x%02x code=0x%02x\n", verb, rslt); -+ return -EIO; -+ } -+ -+ if (*frame_cnt) { -+ hi = qb_attr_code_decode(&code_cgr_stat_frame_cnt_hi, p); -+ lo = qb_attr_code_decode(&code_cgr_stat_frame_cnt_lo, p); -+ *frame_cnt = ((uint64_t)hi << 32) | (uint64_t)lo; -+ } -+ if (*byte_cnt) { -+ hi = qb_attr_code_decode(&code_cgr_stat_byte_cnt_hi, p); -+ lo = qb_attr_code_decode(&code_cgr_stat_byte_cnt_lo, p); -+ *byte_cnt = ((uint64_t)hi << 32) | (uint64_t)lo; -+ } -+ -+ return 0; -+} -+ -+int qbman_cgr_reject_statistics(struct qbman_swp *s, uint32_t cgid, int clear, -+ uint64_t *frame_cnt, uint64_t *byte_cnt) -+{ -+ return qbman_cgr_statistics_query(s, cgid, clear, 0xff, -+ frame_cnt, byte_cnt); -+} -+ -+int qbman_ccgr_reject_statistics(struct qbman_swp *s, uint32_t cgid, int clear, -+ uint64_t *frame_cnt, uint64_t *byte_cnt) -+{ -+ return qbman_cgr_statistics_query(s, cgid, clear, 1, -+ frame_cnt, byte_cnt); -+} -+ -+int qbman_cq_dequeue_statistics(struct qbman_swp *s, uint32_t cgid, int clear, -+ uint64_t *frame_cnt, uint64_t *byte_cnt) -+{ -+ return qbman_cgr_statistics_query(s, cgid, clear, 0, -+ frame_cnt, byte_cnt); -+} -diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman_debug.h b/drivers/staging/fsl-mc/bus/dpio/qbman_debug.h -new file mode 100644 -index 0000000..1e6b002 ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/qbman_debug.h -@@ -0,0 +1,136 @@ -+/* Copyright (C) 2015 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+struct qbman_attr { -+ uint32_t dont_manipulate_directly[40]; -+}; -+ -+/* Buffer pool query commands */ -+int qbman_bp_query(struct qbman_swp *s, uint32_t bpid, -+ struct qbman_attr *a); -+void qbman_bp_attr_get_bdi(struct qbman_attr *a, int *bdi, int *va, int *wae); -+void qbman_bp_attr_get_swdet(struct qbman_attr *a, uint32_t *swdet); -+void qbman_bp_attr_get_swdxt(struct qbman_attr *a, uint32_t *swdxt); -+void qbman_bp_attr_get_hwdet(struct qbman_attr *a, uint32_t *hwdet); -+void qbman_bp_attr_get_hwdxt(struct qbman_attr *a, uint32_t *hwdxt); -+void qbman_bp_attr_get_swset(struct qbman_attr *a, uint32_t *swset); -+void qbman_bp_attr_get_swsxt(struct qbman_attr *a, uint32_t *swsxt); -+void qbman_bp_attr_get_vbpid(struct qbman_attr *a, uint32_t *vbpid); -+void qbman_bp_attr_get_icid(struct qbman_attr *a, uint32_t *icid, int *pl); -+void qbman_bp_attr_get_bpscn_addr(struct qbman_attr *a, uint64_t *bpscn_addr); -+void qbman_bp_attr_get_bpscn_ctx(struct qbman_attr *a, uint64_t *bpscn_ctx); -+void qbman_bp_attr_get_hw_targ(struct qbman_attr *a, uint32_t *hw_targ); -+int qbman_bp_info_has_free_bufs(struct qbman_attr *a); -+int qbman_bp_info_is_depleted(struct qbman_attr *a); -+int qbman_bp_info_is_surplus(struct qbman_attr *a); -+uint32_t qbman_bp_info_num_free_bufs(struct qbman_attr *a); -+uint32_t qbman_bp_info_hdptr(struct qbman_attr *a); -+uint32_t qbman_bp_info_sdcnt(struct qbman_attr *a); -+uint32_t qbman_bp_info_hdcnt(struct qbman_attr *a); -+uint32_t qbman_bp_info_sscnt(struct qbman_attr *a); -+ -+/* FQ query function for programmable fields */ -+int qbman_fq_query(struct qbman_swp *s, uint32_t fqid, -+ struct qbman_attr *desc); -+void qbman_fq_attr_get_fqctrl(struct qbman_attr *d, uint32_t *fqctrl); -+void qbman_fq_attr_get_cgrid(struct qbman_attr *d, uint32_t *cgrid); -+void qbman_fq_attr_get_destwq(struct qbman_attr *d, uint32_t *destwq); -+void qbman_fq_attr_get_icscred(struct qbman_attr *d, uint32_t *icscred); -+void qbman_fq_attr_get_tdthresh(struct qbman_attr *d, uint32_t *tdthresh); -+void qbman_fq_attr_get_oa(struct qbman_attr *d, -+ int *oa_ics, int *oa_cgr, int32_t *oa_len); -+void qbman_fq_attr_get_mctl(struct qbman_attr *d, -+ int *bdi, int *ff, int *va, int *ps); -+void qbman_fq_attr_get_ctx(struct qbman_attr *d, uint32_t *hi, uint32_t *lo); -+void qbman_fq_attr_get_icid(struct qbman_attr *d, uint32_t *icid, int *pl); -+void qbman_fq_attr_get_vfqid(struct qbman_attr *d, uint32_t *vfqid); -+void qbman_fq_attr_get_erfqid(struct qbman_attr *d, uint32_t *erfqid); -+ -+/* FQ query command for non-programmable fields*/ -+enum qbman_fq_schedstate_e { -+ qbman_fq_schedstate_oos = 0, -+ qbman_fq_schedstate_retired, -+ qbman_fq_schedstate_tentatively_scheduled, -+ qbman_fq_schedstate_truly_scheduled, -+ qbman_fq_schedstate_parked, -+ qbman_fq_schedstate_held_active, -+}; -+ -+int qbman_fq_query_state(struct qbman_swp *s, uint32_t fqid, -+ struct qbman_attr *state); -+uint32_t qbman_fq_state_schedstate(const struct qbman_attr *state); -+int qbman_fq_state_force_eligible(const struct qbman_attr *state); -+int qbman_fq_state_xoff(const struct qbman_attr *state); -+int qbman_fq_state_retirement_pending(const struct qbman_attr *state); -+int qbman_fq_state_overflow_error(const struct qbman_attr *state); -+uint32_t qbman_fq_state_frame_count(const struct qbman_attr *state); -+uint32_t qbman_fq_state_byte_count(const struct qbman_attr *state); -+ -+/* CGR query */ -+int qbman_cgr_query(struct qbman_swp *s, uint32_t cgid, -+ struct qbman_attr *attr); -+void qbman_cgr_attr_get_ctl1(struct qbman_attr *d, int *cscn_wq_en_enter, -+ int *cscn_wq_en_exit, int *cscn_wq_icd); -+void qbman_cgr_attr_get_mode(struct qbman_attr *d, uint32_t *mode, -+ int *rej_cnt_mode, int *cscn_bdi); -+void qbman_cgr_attr_get_ctl2(struct qbman_attr *d, int *cscn_wr_en_enter, -+ int *cscn_wr_en_exit, int *cg_wr_ae, -+ int *cscn_dcp_en, int *cg_wr_va); -+void qbman_cgr_attr_get_iwc(struct qbman_attr *d, int *i_cnt_wr_en, -+ uint32_t *i_cnt_wr_bnd); -+void qbman_cgr_attr_get_tdc(struct qbman_attr *d, int *td_en); -+void qbman_cgr_attr_get_cs_thres(struct qbman_attr *d, uint32_t *cs_thres); -+void qbman_cgr_attr_get_cs_thres_x(struct qbman_attr *d, -+ uint32_t *cs_thres_x); -+void qbman_cgr_attr_get_td_thres(struct qbman_attr *d, uint32_t *td_thres); -+void qbman_cgr_attr_get_cscn_tdcp(struct qbman_attr *d, uint32_t *cscn_tdcp); -+void qbman_cgr_attr_get_cscn_wqid(struct qbman_attr *d, uint32_t *cscn_wqid); -+void qbman_cgr_attr_get_cscn_vcgid(struct qbman_attr *d, -+ uint32_t *cscn_vcgid); -+void qbman_cgr_attr_get_cg_icid(struct qbman_attr *d, uint32_t *icid, -+ int *pl); -+void qbman_cgr_attr_get_cg_wr_addr(struct qbman_attr *d, -+ uint64_t *cg_wr_addr); -+void qbman_cgr_attr_get_cscn_ctx(struct qbman_attr *d, uint64_t *cscn_ctx); -+void qbman_cgr_attr_wred_get_edp(struct qbman_attr *d, uint32_t idx, -+ int *edp); -+void qbman_cgr_attr_wred_dp_decompose(uint32_t dp, uint64_t *minth, -+ uint64_t *maxth, uint8_t *maxp); -+void qbman_cgr_attr_wred_get_parm_dp(struct qbman_attr *d, uint32_t idx, -+ uint32_t *dp); -+ -+/* CGR/CCGR/CQ statistics query */ -+int qbman_cgr_reject_statistics(struct qbman_swp *s, uint32_t cgid, int clear, -+ uint64_t *frame_cnt, uint64_t *byte_cnt); -+int qbman_ccgr_reject_statistics(struct qbman_swp *s, uint32_t cgid, int clear, -+ uint64_t *frame_cnt, uint64_t *byte_cnt); -+int qbman_cq_dequeue_statistics(struct qbman_swp *s, uint32_t cgid, int clear, -+ uint64_t *frame_cnt, uint64_t *byte_cnt); -diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman_portal.c b/drivers/staging/fsl-mc/bus/dpio/qbman_portal.c -new file mode 100644 -index 0000000..6c5638b ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/qbman_portal.c -@@ -0,0 +1,1212 @@ -+/* Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include "qbman_portal.h" -+ -+/* QBMan portal management command codes */ -+#define QBMAN_MC_ACQUIRE 0x30 -+#define QBMAN_WQCHAN_CONFIGURE 0x46 -+ -+/* CINH register offsets */ -+#define QBMAN_CINH_SWP_EQAR 0x8c0 -+#define QBMAN_CINH_SWP_DQPI 0xa00 -+#define QBMAN_CINH_SWP_DCAP 0xac0 -+#define QBMAN_CINH_SWP_SDQCR 0xb00 -+#define QBMAN_CINH_SWP_RAR 0xcc0 -+#define QBMAN_CINH_SWP_ISR 0xe00 -+#define QBMAN_CINH_SWP_IER 0xe40 -+#define QBMAN_CINH_SWP_ISDR 0xe80 -+#define QBMAN_CINH_SWP_IIR 0xec0 -+ -+/* CENA register offsets */ -+#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((uint32_t)(n) << 6)) -+#define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((uint32_t)(n) << 6)) -+#define QBMAN_CENA_SWP_RCR(n) (0x400 + ((uint32_t)(n) << 6)) -+#define QBMAN_CENA_SWP_CR 0x600 -+#define QBMAN_CENA_SWP_RR(vb) (0x700 + ((uint32_t)(vb) >> 1)) -+#define QBMAN_CENA_SWP_VDQCR 0x780 -+ -+/* Reverse mapping of QBMAN_CENA_SWP_DQRR() */ -+#define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)p & 0x1ff) >> 6) -+ -+/* QBMan FQ management command codes */ -+#define QBMAN_FQ_SCHEDULE 0x48 -+#define QBMAN_FQ_FORCE 0x49 -+#define QBMAN_FQ_XON 0x4d -+#define QBMAN_FQ_XOFF 0x4e -+ -+/*******************************/ -+/* Pre-defined attribute codes */ -+/*******************************/ -+ -+struct qb_attr_code code_generic_verb = QB_CODE(0, 0, 7); -+struct qb_attr_code code_generic_rslt = QB_CODE(0, 8, 8); -+ -+/*************************/ -+/* SDQCR attribute codes */ -+/*************************/ -+ -+/* we put these here because at least some of them are required by -+ * qbman_swp_init() */ -+struct qb_attr_code code_sdqcr_dct = QB_CODE(0, 24, 2); -+struct qb_attr_code code_sdqcr_fc = QB_CODE(0, 29, 1); -+struct qb_attr_code code_sdqcr_tok = QB_CODE(0, 16, 8); -+#define CODE_SDQCR_DQSRC(n) QB_CODE(0, n, 1) -+enum qbman_sdqcr_dct { -+ qbman_sdqcr_dct_null = 0, -+ qbman_sdqcr_dct_prio_ics, -+ qbman_sdqcr_dct_active_ics, -+ qbman_sdqcr_dct_active -+}; -+enum qbman_sdqcr_fc { -+ qbman_sdqcr_fc_one = 0, -+ qbman_sdqcr_fc_up_to_3 = 1 -+}; -+struct qb_attr_code code_sdqcr_dqsrc = QB_CODE(0, 0, 16); -+ -+/*********************************/ -+/* Portal constructor/destructor */ -+/*********************************/ -+ -+/* Software portals should always be in the power-on state when we initialise, -+ * due to the CCSR-based portal reset functionality that MC has. -+ * -+ * Erk! Turns out that QMan versions prior to 4.1 do not correctly reset DQRR -+ * valid-bits, so we need to support a workaround where we don't trust -+ * valid-bits when detecting new entries until any stale ring entries have been -+ * overwritten at least once. The idea is that we read PI for the first few -+ * entries, then switch to valid-bit after that. The trick is to clear the -+ * bug-work-around boolean once the PI wraps around the ring for the first time. -+ * -+ * Note: this still carries a slight additional cost once the decrementer hits -+ * zero, so ideally the workaround should only be compiled in if the compiled -+ * image needs to support affected chips. We use WORKAROUND_DQRR_RESET_BUG for -+ * this. -+ */ -+struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d) -+{ -+ int ret; -+ struct qbman_swp *p = kmalloc(sizeof(*p), GFP_KERNEL); -+ -+ if (!p) -+ return NULL; -+ p->desc = d; -+#ifdef QBMAN_CHECKING -+ p->mc.check = swp_mc_can_start; -+#endif -+ p->mc.valid_bit = QB_VALID_BIT; -+ p->sdq = 0; -+ qb_attr_code_encode(&code_sdqcr_dct, &p->sdq, qbman_sdqcr_dct_prio_ics); -+ qb_attr_code_encode(&code_sdqcr_fc, &p->sdq, qbman_sdqcr_fc_up_to_3); -+ qb_attr_code_encode(&code_sdqcr_tok, &p->sdq, 0xbb); -+ atomic_set(&p->vdq.busy, 1); -+ p->vdq.valid_bit = QB_VALID_BIT; -+ p->dqrr.next_idx = 0; -+ p->dqrr.valid_bit = QB_VALID_BIT; -+ /* TODO: should also read PI/CI type registers and check that they're on -+ * PoR values. If we're asked to initialise portals that aren't in reset -+ * state, bad things will follow. */ -+#ifdef WORKAROUND_DQRR_RESET_BUG -+ p->dqrr.reset_bug = 1; -+#endif -+ if ((p->desc->qman_version & 0xFFFF0000) < QMAN_REV_4100) -+ p->dqrr.dqrr_size = 4; -+ else -+ p->dqrr.dqrr_size = 8; -+ ret = qbman_swp_sys_init(&p->sys, d, p->dqrr.dqrr_size); -+ if (ret) { -+ kfree(p); -+ pr_err("qbman_swp_sys_init() failed %d\n", ret); -+ return NULL; -+ } -+ /* SDQCR needs to be initialized to 0 when no channels are -+ being dequeued from or else the QMan HW will indicate an -+ error. The values that were calculated above will be -+ applied when dequeues from a specific channel are enabled */ -+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_SDQCR, 0); -+ return p; -+} -+ -+void qbman_swp_finish(struct qbman_swp *p) -+{ -+#ifdef QBMAN_CHECKING -+ BUG_ON(p->mc.check != swp_mc_can_start); -+#endif -+ qbman_swp_sys_finish(&p->sys); -+ kfree(p); -+} -+ -+const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p) -+{ -+ return p->desc; -+} -+ -+/**************/ -+/* Interrupts */ -+/**************/ -+ -+uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p) -+{ -+ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISDR); -+} -+ -+void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask) -+{ -+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISDR, mask); -+} -+ -+uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p) -+{ -+ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISR); -+} -+ -+void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask) -+{ -+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISR, mask); -+} -+ -+uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p) -+{ -+ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IER); -+} -+ -+void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask) -+{ -+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IER, mask); -+} -+ -+int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p) -+{ -+ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IIR); -+} -+ -+void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit) -+{ -+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0); -+} -+ -+/***********************/ -+/* Management commands */ -+/***********************/ -+ -+/* -+ * Internal code common to all types of management commands. -+ */ -+ -+void *qbman_swp_mc_start(struct qbman_swp *p) -+{ -+ void *ret; -+#ifdef QBMAN_CHECKING -+ BUG_ON(p->mc.check != swp_mc_can_start); -+#endif -+ ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR); -+#ifdef QBMAN_CHECKING -+ if (!ret) -+ p->mc.check = swp_mc_can_submit; -+#endif -+ return ret; -+} -+ -+void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint32_t cmd_verb) -+{ -+ uint32_t *v = cmd; -+#ifdef QBMAN_CHECKING -+ BUG_ON(!p->mc.check != swp_mc_can_submit); -+#endif -+ /* TBD: "|=" is going to hurt performance. Need to move as many fields -+ * out of word zero, and for those that remain, the "OR" needs to occur -+ * at the caller side. This debug check helps to catch cases where the -+ * caller wants to OR but has forgotten to do so. */ -+ BUG_ON((*v & cmd_verb) != *v); -+ *v = cmd_verb | p->mc.valid_bit; -+ qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd); -+#ifdef QBMAN_CHECKING -+ p->mc.check = swp_mc_can_poll; -+#endif -+} -+ -+void *qbman_swp_mc_result(struct qbman_swp *p) -+{ -+ uint32_t *ret, verb; -+#ifdef QBMAN_CHECKING -+ BUG_ON(p->mc.check != swp_mc_can_poll); -+#endif -+ qbman_cena_invalidate_prefetch(&p->sys, -+ QBMAN_CENA_SWP_RR(p->mc.valid_bit)); -+ ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR(p->mc.valid_bit)); -+ /* Remove the valid-bit - command completed iff the rest is non-zero */ -+ verb = ret[0] & ~QB_VALID_BIT; -+ if (!verb) -+ return NULL; -+#ifdef QBMAN_CHECKING -+ p->mc.check = swp_mc_can_start; -+#endif -+ p->mc.valid_bit ^= QB_VALID_BIT; -+ return ret; -+} -+ -+/***********/ -+/* Enqueue */ -+/***********/ -+ -+/* These should be const, eventually */ -+static struct qb_attr_code code_eq_cmd = QB_CODE(0, 0, 2); -+static struct qb_attr_code code_eq_eqdi = QB_CODE(0, 3, 1); -+static struct qb_attr_code code_eq_dca_en = QB_CODE(0, 15, 1); -+static struct qb_attr_code code_eq_dca_pk = QB_CODE(0, 14, 1); -+static struct qb_attr_code code_eq_dca_idx = QB_CODE(0, 8, 2); -+static struct qb_attr_code code_eq_orp_en = QB_CODE(0, 2, 1); -+static struct qb_attr_code code_eq_orp_is_nesn = QB_CODE(0, 31, 1); -+static struct qb_attr_code code_eq_orp_nlis = QB_CODE(0, 30, 1); -+static struct qb_attr_code code_eq_orp_seqnum = QB_CODE(0, 16, 14); -+static struct qb_attr_code code_eq_opr_id = QB_CODE(1, 0, 16); -+static struct qb_attr_code code_eq_tgt_id = QB_CODE(2, 0, 24); -+/* static struct qb_attr_code code_eq_tag = QB_CODE(3, 0, 32); */ -+static struct qb_attr_code code_eq_qd_en = QB_CODE(0, 4, 1); -+static struct qb_attr_code code_eq_qd_bin = QB_CODE(4, 0, 16); -+static struct qb_attr_code code_eq_qd_pri = QB_CODE(4, 16, 4); -+static struct qb_attr_code code_eq_rsp_stash = QB_CODE(5, 16, 1); -+static struct qb_attr_code code_eq_rsp_id = QB_CODE(5, 24, 8); -+static struct qb_attr_code code_eq_rsp_lo = QB_CODE(6, 0, 32); -+ -+enum qbman_eq_cmd_e { -+ /* No enqueue, primarily for plugging ORP gaps for dropped frames */ -+ qbman_eq_cmd_empty, -+ /* DMA an enqueue response once complete */ -+ qbman_eq_cmd_respond, -+ /* DMA an enqueue response only if the enqueue fails */ -+ qbman_eq_cmd_respond_reject -+}; -+ -+void qbman_eq_desc_clear(struct qbman_eq_desc *d) -+{ -+ memset(d, 0, sizeof(*d)); -+} -+ -+void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_eq_orp_en, cl, 0); -+ qb_attr_code_encode(&code_eq_cmd, cl, -+ respond_success ? qbman_eq_cmd_respond : -+ qbman_eq_cmd_respond_reject); -+} -+ -+void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success, -+ uint32_t opr_id, uint32_t seqnum, int incomplete) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_eq_orp_en, cl, 1); -+ qb_attr_code_encode(&code_eq_cmd, cl, -+ respond_success ? qbman_eq_cmd_respond : -+ qbman_eq_cmd_respond_reject); -+ qb_attr_code_encode(&code_eq_opr_id, cl, opr_id); -+ qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum); -+ qb_attr_code_encode(&code_eq_orp_nlis, cl, !!incomplete); -+} -+ -+void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint32_t opr_id, -+ uint32_t seqnum) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_eq_orp_en, cl, 1); -+ qb_attr_code_encode(&code_eq_cmd, cl, qbman_eq_cmd_empty); -+ qb_attr_code_encode(&code_eq_opr_id, cl, opr_id); -+ qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum); -+ qb_attr_code_encode(&code_eq_orp_nlis, cl, 0); -+ qb_attr_code_encode(&code_eq_orp_is_nesn, cl, 0); -+} -+ -+void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint32_t opr_id, -+ uint32_t seqnum) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_eq_orp_en, cl, 1); -+ qb_attr_code_encode(&code_eq_cmd, cl, qbman_eq_cmd_empty); -+ qb_attr_code_encode(&code_eq_opr_id, cl, opr_id); -+ qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum); -+ qb_attr_code_encode(&code_eq_orp_nlis, cl, 0); -+ qb_attr_code_encode(&code_eq_orp_is_nesn, cl, 1); -+} -+ -+void qbman_eq_desc_set_response(struct qbman_eq_desc *d, -+ dma_addr_t storage_phys, -+ int stash) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode_64(&code_eq_rsp_lo, (uint64_t *)cl, storage_phys); -+ qb_attr_code_encode(&code_eq_rsp_stash, cl, !!stash); -+} -+ -+void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_eq_rsp_id, cl, (uint32_t)token); -+} -+ -+void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_eq_qd_en, cl, 0); -+ qb_attr_code_encode(&code_eq_tgt_id, cl, fqid); -+} -+ -+void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid, -+ uint32_t qd_bin, uint32_t qd_prio) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_eq_qd_en, cl, 1); -+ qb_attr_code_encode(&code_eq_tgt_id, cl, qdid); -+ qb_attr_code_encode(&code_eq_qd_bin, cl, qd_bin); -+ qb_attr_code_encode(&code_eq_qd_pri, cl, qd_prio); -+} -+ -+void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_eq_eqdi, cl, !!enable); -+} -+ -+void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable, -+ uint32_t dqrr_idx, int park) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_eq_dca_en, cl, !!enable); -+ if (enable) { -+ qb_attr_code_encode(&code_eq_dca_pk, cl, !!park); -+ qb_attr_code_encode(&code_eq_dca_idx, cl, dqrr_idx); -+ } -+} -+ -+#define EQAR_IDX(eqar) ((eqar) & 0x7) -+#define EQAR_VB(eqar) ((eqar) & 0x80) -+#define EQAR_SUCCESS(eqar) ((eqar) & 0x100) -+ -+int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d, -+ const struct qbman_fd *fd) -+{ -+ uint32_t *p; -+ const uint32_t *cl = qb_cl(d); -+ uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR); -+ -+ pr_debug("EQAR=%08x\n", eqar); -+ if (!EQAR_SUCCESS(eqar)) -+ return -EBUSY; -+ p = qbman_cena_write_start(&s->sys, -+ QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar))); -+ word_copy(&p[1], &cl[1], 7); -+ word_copy(&p[8], fd, sizeof(*fd) >> 2); -+ /* Set the verb byte, have to substitute in the valid-bit */ -+ p[0] = cl[0] | EQAR_VB(eqar); -+ qbman_cena_write_complete(&s->sys, -+ QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)), -+ p); -+ return 0; -+} -+ -+/*************************/ -+/* Static (push) dequeue */ -+/*************************/ -+ -+void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled) -+{ -+ struct qb_attr_code code = CODE_SDQCR_DQSRC(channel_idx); -+ -+ BUG_ON(channel_idx > 15); -+ *enabled = (int)qb_attr_code_decode(&code, &s->sdq); -+} -+ -+void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable) -+{ -+ uint16_t dqsrc; -+ struct qb_attr_code code = CODE_SDQCR_DQSRC(channel_idx); -+ -+ BUG_ON(channel_idx > 15); -+ qb_attr_code_encode(&code, &s->sdq, !!enable); -+ /* Read make the complete src map. If no channels are enabled -+ the SDQCR must be 0 or else QMan will assert errors */ -+ dqsrc = (uint16_t)qb_attr_code_decode(&code_sdqcr_dqsrc, &s->sdq); -+ if (dqsrc != 0) -+ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, s->sdq); -+ else -+ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, 0); -+} -+ -+/***************************/ -+/* Volatile (pull) dequeue */ -+/***************************/ -+ -+/* These should be const, eventually */ -+static struct qb_attr_code code_pull_dct = QB_CODE(0, 0, 2); -+static struct qb_attr_code code_pull_dt = QB_CODE(0, 2, 2); -+static struct qb_attr_code code_pull_rls = QB_CODE(0, 4, 1); -+static struct qb_attr_code code_pull_stash = QB_CODE(0, 5, 1); -+static struct qb_attr_code code_pull_numframes = QB_CODE(0, 8, 4); -+static struct qb_attr_code code_pull_token = QB_CODE(0, 16, 8); -+static struct qb_attr_code code_pull_dqsource = QB_CODE(1, 0, 24); -+static struct qb_attr_code code_pull_rsp_lo = QB_CODE(2, 0, 32); -+ -+enum qb_pull_dt_e { -+ qb_pull_dt_channel, -+ qb_pull_dt_workqueue, -+ qb_pull_dt_framequeue -+}; -+ -+void qbman_pull_desc_clear(struct qbman_pull_desc *d) -+{ -+ memset(d, 0, sizeof(*d)); -+} -+ -+void qbman_pull_desc_set_storage(struct qbman_pull_desc *d, -+ struct dpaa2_dq *storage, -+ dma_addr_t storage_phys, -+ int stash) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ /* Squiggle the pointer 'storage' into the extra 2 words of the -+ * descriptor (which aren't copied to the hw command) */ -+ *(void **)&cl[4] = storage; -+ if (!storage) { -+ qb_attr_code_encode(&code_pull_rls, cl, 0); -+ return; -+ } -+ qb_attr_code_encode(&code_pull_rls, cl, 1); -+ qb_attr_code_encode(&code_pull_stash, cl, !!stash); -+ qb_attr_code_encode_64(&code_pull_rsp_lo, (uint64_t *)cl, storage_phys); -+} -+ -+void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, uint8_t numframes) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ BUG_ON(!numframes || (numframes > 16)); -+ qb_attr_code_encode(&code_pull_numframes, cl, -+ (uint32_t)(numframes - 1)); -+} -+ -+void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_pull_token, cl, token); -+} -+ -+void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_pull_dct, cl, 1); -+ qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_framequeue); -+ qb_attr_code_encode(&code_pull_dqsource, cl, fqid); -+} -+ -+void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid, -+ enum qbman_pull_type_e dct) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_pull_dct, cl, dct); -+ qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_workqueue); -+ qb_attr_code_encode(&code_pull_dqsource, cl, wqid); -+} -+ -+void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid, -+ enum qbman_pull_type_e dct) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_pull_dct, cl, dct); -+ qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_channel); -+ qb_attr_code_encode(&code_pull_dqsource, cl, chid); -+} -+ -+int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d) -+{ -+ uint32_t *p; -+ uint32_t *cl = qb_cl(d); -+ -+ if (!atomic_dec_and_test(&s->vdq.busy)) { -+ atomic_inc(&s->vdq.busy); -+ return -EBUSY; -+ } -+ s->vdq.storage = *(void **)&cl[4]; -+ qb_attr_code_encode(&code_pull_token, cl, 1); -+ p = qbman_cena_write_start(&s->sys, QBMAN_CENA_SWP_VDQCR); -+ word_copy(&p[1], &cl[1], 3); -+ /* Set the verb byte, have to substitute in the valid-bit */ -+ p[0] = cl[0] | s->vdq.valid_bit; -+ s->vdq.valid_bit ^= QB_VALID_BIT; -+ qbman_cena_write_complete(&s->sys, QBMAN_CENA_SWP_VDQCR, p); -+ return 0; -+} -+ -+/****************/ -+/* Polling DQRR */ -+/****************/ -+ -+static struct qb_attr_code code_dqrr_verb = QB_CODE(0, 0, 8); -+static struct qb_attr_code code_dqrr_response = QB_CODE(0, 0, 7); -+static struct qb_attr_code code_dqrr_stat = QB_CODE(0, 8, 8); -+static struct qb_attr_code code_dqrr_seqnum = QB_CODE(0, 16, 14); -+static struct qb_attr_code code_dqrr_odpid = QB_CODE(1, 0, 16); -+/* static struct qb_attr_code code_dqrr_tok = QB_CODE(1, 24, 8); */ -+static struct qb_attr_code code_dqrr_fqid = QB_CODE(2, 0, 24); -+static struct qb_attr_code code_dqrr_byte_count = QB_CODE(4, 0, 32); -+static struct qb_attr_code code_dqrr_frame_count = QB_CODE(5, 0, 24); -+static struct qb_attr_code code_dqrr_ctx_lo = QB_CODE(6, 0, 32); -+ -+#define QBMAN_RESULT_DQ 0x60 -+#define QBMAN_RESULT_FQRN 0x21 -+#define QBMAN_RESULT_FQRNI 0x22 -+#define QBMAN_RESULT_FQPN 0x24 -+#define QBMAN_RESULT_FQDAN 0x25 -+#define QBMAN_RESULT_CDAN 0x26 -+#define QBMAN_RESULT_CSCN_MEM 0x27 -+#define QBMAN_RESULT_CGCU 0x28 -+#define QBMAN_RESULT_BPSCN 0x29 -+#define QBMAN_RESULT_CSCN_WQ 0x2a -+ -+static struct qb_attr_code code_dqpi_pi = QB_CODE(0, 0, 4); -+ -+/* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry -+ * only once, so repeated calls can return a sequence of DQRR entries, without -+ * requiring they be consumed immediately or in any particular order. */ -+const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s) -+{ -+ uint32_t verb; -+ uint32_t response_verb; -+ uint32_t flags; -+ const struct dpaa2_dq *dq; -+ const uint32_t *p; -+ -+ /* Before using valid-bit to detect if something is there, we have to -+ * handle the case of the DQRR reset bug... */ -+#ifdef WORKAROUND_DQRR_RESET_BUG -+ if (unlikely(s->dqrr.reset_bug)) { -+ /* We pick up new entries by cache-inhibited producer index, -+ * which means that a non-coherent mapping would require us to -+ * invalidate and read *only* once that PI has indicated that -+ * there's an entry here. The first trip around the DQRR ring -+ * will be much less efficient than all subsequent trips around -+ * it... -+ */ -+ uint32_t dqpi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI); -+ uint32_t pi = qb_attr_code_decode(&code_dqpi_pi, &dqpi); -+ /* there are new entries iff pi != next_idx */ -+ if (pi == s->dqrr.next_idx) -+ return NULL; -+ /* if next_idx is/was the last ring index, and 'pi' is -+ * different, we can disable the workaround as all the ring -+ * entries have now been DMA'd to so valid-bit checking is -+ * repaired. Note: this logic needs to be based on next_idx -+ * (which increments one at a time), rather than on pi (which -+ * can burst and wrap-around between our snapshots of it). -+ */ -+ if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) { -+ pr_debug("DEBUG: next_idx=%d, pi=%d, clear reset bug\n", -+ s->dqrr.next_idx, pi); -+ s->dqrr.reset_bug = 0; -+ } -+ qbman_cena_invalidate_prefetch(&s->sys, -+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)); -+ } -+#endif -+ -+ dq = qbman_cena_read(&s->sys, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)); -+ p = qb_cl(dq); -+ verb = qb_attr_code_decode(&code_dqrr_verb, p); -+ -+ /* If the valid-bit isn't of the expected polarity, nothing there. Note, -+ * in the DQRR reset bug workaround, we shouldn't need to skip these -+ * check, because we've already determined that a new entry is available -+ * and we've invalidated the cacheline before reading it, so the -+ * valid-bit behaviour is repaired and should tell us what we already -+ * knew from reading PI. -+ */ -+ if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) { -+ qbman_cena_invalidate_prefetch(&s->sys, -+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)); -+ return NULL; -+ } -+ /* There's something there. Move "next_idx" attention to the next ring -+ * entry (and prefetch it) before returning what we found. */ -+ s->dqrr.next_idx++; -+ s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */ -+ /* TODO: it's possible to do all this without conditionals, optimise it -+ * later. */ -+ if (!s->dqrr.next_idx) -+ s->dqrr.valid_bit ^= QB_VALID_BIT; -+ -+ /* If this is the final response to a volatile dequeue command -+ indicate that the vdq is no longer busy */ -+ flags = dpaa2_dq_flags(dq); -+ response_verb = qb_attr_code_decode(&code_dqrr_response, &verb); -+ if ((response_verb == QBMAN_RESULT_DQ) && -+ (flags & DPAA2_DQ_STAT_VOLATILE) && -+ (flags & DPAA2_DQ_STAT_EXPIRED)) -+ atomic_inc(&s->vdq.busy); -+ -+ qbman_cena_invalidate_prefetch(&s->sys, -+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)); -+ return dq; -+} -+ -+/* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */ -+void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq) -+{ -+ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq)); -+} -+ -+/*********************************/ -+/* Polling user-provided storage */ -+/*********************************/ -+ -+int qbman_result_has_new_result(struct qbman_swp *s, -+ const struct dpaa2_dq *dq) -+{ -+ /* To avoid converting the little-endian DQ entry to host-endian prior -+ * to us knowing whether there is a valid entry or not (and run the -+ * risk of corrupting the incoming hardware LE write), we detect in -+ * hardware endianness rather than host. This means we need a different -+ * "code" depending on whether we are BE or LE in software, which is -+ * where DQRR_TOK_OFFSET comes in... */ -+ static struct qb_attr_code code_dqrr_tok_detect = -+ QB_CODE(0, DQRR_TOK_OFFSET, 8); -+ /* The user trying to poll for a result treats "dq" as const. It is -+ * however the same address that was provided to us non-const in the -+ * first place, for directing hardware DMA to. So we can cast away the -+ * const because it is mutable from our perspective. */ -+ uint32_t *p = qb_cl((struct dpaa2_dq *)dq); -+ uint32_t token; -+ -+ token = qb_attr_code_decode(&code_dqrr_tok_detect, &p[1]); -+ if (token != 1) -+ return 0; -+ qb_attr_code_encode(&code_dqrr_tok_detect, &p[1], 0); -+ -+ /* Only now do we convert from hardware to host endianness. Also, as we -+ * are returning success, the user has promised not to call us again, so -+ * there's no risk of us converting the endianness twice... */ -+ make_le32_n(p, 16); -+ -+ /* VDQCR "no longer busy" hook - not quite the same as DQRR, because the -+ * fact "VDQCR" shows busy doesn't mean that the result we're looking at -+ * is from the same command. Eg. we may be looking at our 10th dequeue -+ * result from our first VDQCR command, yet the second dequeue command -+ * could have been kicked off already, after seeing the 1st result. Ie. -+ * the result we're looking at is not necessarily proof that we can -+ * reset "busy". We instead base the decision on whether the current -+ * result is sitting at the first 'storage' location of the busy -+ * command. */ -+ if (s->vdq.storage == dq) { -+ s->vdq.storage = NULL; -+ atomic_inc(&s->vdq.busy); -+ } -+ return 1; -+} -+ -+/********************************/ -+/* Categorising qbman_result */ -+/********************************/ -+ -+static struct qb_attr_code code_result_in_mem = -+ QB_CODE(0, QBMAN_RESULT_VERB_OFFSET_IN_MEM, 7); -+ -+static inline int __qbman_result_is_x(const struct dpaa2_dq *dq, uint32_t x) -+{ -+ const uint32_t *p = qb_cl(dq); -+ uint32_t response_verb = qb_attr_code_decode(&code_dqrr_response, p); -+ -+ return response_verb == x; -+} -+ -+static inline int __qbman_result_is_x_in_mem(const struct dpaa2_dq *dq, -+ uint32_t x) -+{ -+ const uint32_t *p = qb_cl(dq); -+ uint32_t response_verb = qb_attr_code_decode(&code_result_in_mem, p); -+ -+ return (response_verb == x); -+} -+ -+int qbman_result_is_DQ(const struct dpaa2_dq *dq) -+{ -+ return __qbman_result_is_x(dq, QBMAN_RESULT_DQ); -+} -+ -+int qbman_result_is_FQDAN(const struct dpaa2_dq *dq) -+{ -+ return __qbman_result_is_x(dq, QBMAN_RESULT_FQDAN); -+} -+ -+int qbman_result_is_CDAN(const struct dpaa2_dq *dq) -+{ -+ return __qbman_result_is_x(dq, QBMAN_RESULT_CDAN); -+} -+ -+int qbman_result_is_CSCN(const struct dpaa2_dq *dq) -+{ -+ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_CSCN_MEM) || -+ __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_WQ); -+} -+ -+int qbman_result_is_BPSCN(const struct dpaa2_dq *dq) -+{ -+ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_BPSCN); -+} -+ -+int qbman_result_is_CGCU(const struct dpaa2_dq *dq) -+{ -+ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_CGCU); -+} -+ -+int qbman_result_is_FQRN(const struct dpaa2_dq *dq) -+{ -+ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_FQRN); -+} -+ -+int qbman_result_is_FQRNI(const struct dpaa2_dq *dq) -+{ -+ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_FQRNI); -+} -+ -+int qbman_result_is_FQPN(const struct dpaa2_dq *dq) -+{ -+ return __qbman_result_is_x(dq, QBMAN_RESULT_FQPN); -+} -+ -+/*********************************/ -+/* Parsing frame dequeue results */ -+/*********************************/ -+ -+/* These APIs assume qbman_result_is_DQ() is TRUE */ -+ -+uint32_t dpaa2_dq_flags(const struct dpaa2_dq *dq) -+{ -+ const uint32_t *p = qb_cl(dq); -+ -+ return qb_attr_code_decode(&code_dqrr_stat, p); -+} -+ -+uint16_t dpaa2_dq_seqnum(const struct dpaa2_dq *dq) -+{ -+ const uint32_t *p = qb_cl(dq); -+ -+ return (uint16_t)qb_attr_code_decode(&code_dqrr_seqnum, p); -+} -+ -+uint16_t dpaa2_dq_odpid(const struct dpaa2_dq *dq) -+{ -+ const uint32_t *p = qb_cl(dq); -+ -+ return (uint16_t)qb_attr_code_decode(&code_dqrr_odpid, p); -+} -+ -+uint32_t dpaa2_dq_fqid(const struct dpaa2_dq *dq) -+{ -+ const uint32_t *p = qb_cl(dq); -+ -+ return qb_attr_code_decode(&code_dqrr_fqid, p); -+} -+ -+uint32_t dpaa2_dq_byte_count(const struct dpaa2_dq *dq) -+{ -+ const uint32_t *p = qb_cl(dq); -+ -+ return qb_attr_code_decode(&code_dqrr_byte_count, p); -+} -+ -+uint32_t dpaa2_dq_frame_count(const struct dpaa2_dq *dq) -+{ -+ const uint32_t *p = qb_cl(dq); -+ -+ return qb_attr_code_decode(&code_dqrr_frame_count, p); -+} -+ -+uint64_t dpaa2_dq_fqd_ctx(const struct dpaa2_dq *dq) -+{ -+ const uint64_t *p = (uint64_t *)qb_cl(dq); -+ -+ return qb_attr_code_decode_64(&code_dqrr_ctx_lo, p); -+} -+EXPORT_SYMBOL(dpaa2_dq_fqd_ctx); -+ -+const struct dpaa2_fd *dpaa2_dq_fd(const struct dpaa2_dq *dq) -+{ -+ const uint32_t *p = qb_cl(dq); -+ -+ return (const struct dpaa2_fd *)&p[8]; -+} -+EXPORT_SYMBOL(dpaa2_dq_fd); -+ -+/**************************************/ -+/* Parsing state-change notifications */ -+/**************************************/ -+ -+static struct qb_attr_code code_scn_state = QB_CODE(0, 16, 8); -+static struct qb_attr_code code_scn_rid = QB_CODE(1, 0, 24); -+static struct qb_attr_code code_scn_state_in_mem = -+ QB_CODE(0, SCN_STATE_OFFSET_IN_MEM, 8); -+static struct qb_attr_code code_scn_rid_in_mem = -+ QB_CODE(1, SCN_RID_OFFSET_IN_MEM, 24); -+static struct qb_attr_code code_scn_ctx_lo = QB_CODE(2, 0, 32); -+ -+uint8_t qbman_result_SCN_state(const struct dpaa2_dq *scn) -+{ -+ const uint32_t *p = qb_cl(scn); -+ -+ return (uint8_t)qb_attr_code_decode(&code_scn_state, p); -+} -+ -+uint32_t qbman_result_SCN_rid(const struct dpaa2_dq *scn) -+{ -+ const uint32_t *p = qb_cl(scn); -+ -+ return qb_attr_code_decode(&code_scn_rid, p); -+} -+ -+uint64_t qbman_result_SCN_ctx(const struct dpaa2_dq *scn) -+{ -+ const uint64_t *p = (uint64_t *)qb_cl(scn); -+ -+ return qb_attr_code_decode_64(&code_scn_ctx_lo, p); -+} -+ -+uint8_t qbman_result_SCN_state_in_mem(const struct dpaa2_dq *scn) -+{ -+ const uint32_t *p = qb_cl(scn); -+ -+ return (uint8_t)qb_attr_code_decode(&code_scn_state_in_mem, p); -+} -+ -+uint32_t qbman_result_SCN_rid_in_mem(const struct dpaa2_dq *scn) -+{ -+ const uint32_t *p = qb_cl(scn); -+ uint32_t result_rid; -+ -+ result_rid = qb_attr_code_decode(&code_scn_rid_in_mem, p); -+ return make_le24(result_rid); -+} -+ -+/*****************/ -+/* Parsing BPSCN */ -+/*****************/ -+uint16_t qbman_result_bpscn_bpid(const struct dpaa2_dq *scn) -+{ -+ return (uint16_t)qbman_result_SCN_rid_in_mem(scn) & 0x3FFF; -+} -+ -+int qbman_result_bpscn_has_free_bufs(const struct dpaa2_dq *scn) -+{ -+ return !(int)(qbman_result_SCN_state_in_mem(scn) & 0x1); -+} -+ -+int qbman_result_bpscn_is_depleted(const struct dpaa2_dq *scn) -+{ -+ return (int)(qbman_result_SCN_state_in_mem(scn) & 0x2); -+} -+ -+int qbman_result_bpscn_is_surplus(const struct dpaa2_dq *scn) -+{ -+ return (int)(qbman_result_SCN_state_in_mem(scn) & 0x4); -+} -+ -+uint64_t qbman_result_bpscn_ctx(const struct dpaa2_dq *scn) -+{ -+ return qbman_result_SCN_ctx(scn); -+} -+ -+/*****************/ -+/* Parsing CGCU */ -+/*****************/ -+uint16_t qbman_result_cgcu_cgid(const struct dpaa2_dq *scn) -+{ -+ return (uint16_t)qbman_result_SCN_rid_in_mem(scn) & 0xFFFF; -+} -+ -+uint64_t qbman_result_cgcu_icnt(const struct dpaa2_dq *scn) -+{ -+ return qbman_result_SCN_ctx(scn) & 0xFFFFFFFFFF; -+} -+ -+/******************/ -+/* Buffer release */ -+/******************/ -+ -+/* These should be const, eventually */ -+/* static struct qb_attr_code code_release_num = QB_CODE(0, 0, 3); */ -+static struct qb_attr_code code_release_set_me = QB_CODE(0, 5, 1); -+static struct qb_attr_code code_release_rcdi = QB_CODE(0, 6, 1); -+static struct qb_attr_code code_release_bpid = QB_CODE(0, 16, 16); -+ -+void qbman_release_desc_clear(struct qbman_release_desc *d) -+{ -+ uint32_t *cl; -+ -+ memset(d, 0, sizeof(*d)); -+ cl = qb_cl(d); -+ qb_attr_code_encode(&code_release_set_me, cl, 1); -+} -+ -+void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint32_t bpid) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_release_bpid, cl, bpid); -+} -+ -+void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_release_rcdi, cl, !!enable); -+} -+ -+#define RAR_IDX(rar) ((rar) & 0x7) -+#define RAR_VB(rar) ((rar) & 0x80) -+#define RAR_SUCCESS(rar) ((rar) & 0x100) -+ -+int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d, -+ const uint64_t *buffers, unsigned int num_buffers) -+{ -+ uint32_t *p; -+ const uint32_t *cl = qb_cl(d); -+ uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR); -+ -+ pr_debug("RAR=%08x\n", rar); -+ if (!RAR_SUCCESS(rar)) -+ return -EBUSY; -+ BUG_ON(!num_buffers || (num_buffers > 7)); -+ /* Start the release command */ -+ p = qbman_cena_write_start(&s->sys, -+ QBMAN_CENA_SWP_RCR(RAR_IDX(rar))); -+ /* Copy the caller's buffer pointers to the command */ -+ u64_to_le32_copy(&p[2], buffers, num_buffers); -+ /* Set the verb byte, have to substitute in the valid-bit and the number -+ * of buffers. */ -+ p[0] = cl[0] | RAR_VB(rar) | num_buffers; -+ qbman_cena_write_complete(&s->sys, -+ QBMAN_CENA_SWP_RCR(RAR_IDX(rar)), -+ p); -+ return 0; -+} -+ -+/*******************/ -+/* Buffer acquires */ -+/*******************/ -+ -+/* These should be const, eventually */ -+static struct qb_attr_code code_acquire_bpid = QB_CODE(0, 16, 16); -+static struct qb_attr_code code_acquire_num = QB_CODE(1, 0, 3); -+static struct qb_attr_code code_acquire_r_num = QB_CODE(1, 0, 3); -+ -+int qbman_swp_acquire(struct qbman_swp *s, uint32_t bpid, uint64_t *buffers, -+ unsigned int num_buffers) -+{ -+ uint32_t *p; -+ uint32_t verb, rslt, num; -+ -+ BUG_ON(!num_buffers || (num_buffers > 7)); -+ -+ /* Start the management command */ -+ p = qbman_swp_mc_start(s); -+ -+ if (!p) -+ return -EBUSY; -+ -+ /* Encode the caller-provided attributes */ -+ qb_attr_code_encode(&code_acquire_bpid, p, bpid); -+ qb_attr_code_encode(&code_acquire_num, p, num_buffers); -+ -+ /* Complete the management command */ -+ p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_MC_ACQUIRE); -+ -+ /* Decode the outcome */ -+ verb = qb_attr_code_decode(&code_generic_verb, p); -+ rslt = qb_attr_code_decode(&code_generic_rslt, p); -+ num = qb_attr_code_decode(&code_acquire_r_num, p); -+ BUG_ON(verb != QBMAN_MC_ACQUIRE); -+ -+ /* Determine success or failure */ -+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { -+ pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n", -+ bpid, rslt); -+ return -EIO; -+ } -+ BUG_ON(num > num_buffers); -+ /* Copy the acquired buffers to the caller's array */ -+ u64_from_le32_copy(buffers, &p[2], num); -+ return (int)num; -+} -+ -+/*****************/ -+/* FQ management */ -+/*****************/ -+ -+static struct qb_attr_code code_fqalt_fqid = QB_CODE(1, 0, 32); -+ -+static int qbman_swp_alt_fq_state(struct qbman_swp *s, uint32_t fqid, -+ uint8_t alt_fq_verb) -+{ -+ uint32_t *p; -+ uint32_t verb, rslt; -+ -+ /* Start the management command */ -+ p = qbman_swp_mc_start(s); -+ if (!p) -+ return -EBUSY; -+ -+ qb_attr_code_encode(&code_fqalt_fqid, p, fqid); -+ /* Complete the management command */ -+ p = qbman_swp_mc_complete(s, p, p[0] | alt_fq_verb); -+ -+ /* Decode the outcome */ -+ verb = qb_attr_code_decode(&code_generic_verb, p); -+ rslt = qb_attr_code_decode(&code_generic_rslt, p); -+ BUG_ON(verb != alt_fq_verb); -+ -+ /* Determine success or failure */ -+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { -+ pr_err("ALT FQID %d failed: verb = 0x%08x, code = 0x%02x\n", -+ fqid, alt_fq_verb, rslt); -+ return -EIO; -+ } -+ -+ return 0; -+} -+ -+int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid) -+{ -+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE); -+} -+ -+int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid) -+{ -+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE); -+} -+ -+int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid) -+{ -+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON); -+} -+ -+int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid) -+{ -+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF); -+} -+ -+/**********************/ -+/* Channel management */ -+/**********************/ -+ -+static struct qb_attr_code code_cdan_cid = QB_CODE(0, 16, 12); -+static struct qb_attr_code code_cdan_we = QB_CODE(1, 0, 8); -+static struct qb_attr_code code_cdan_en = QB_CODE(1, 8, 1); -+static struct qb_attr_code code_cdan_ctx_lo = QB_CODE(2, 0, 32); -+ -+/* Hide "ICD" for now as we don't use it, don't set it, and don't test it, so it -+ * would be irresponsible to expose it. */ -+#define CODE_CDAN_WE_EN 0x1 -+#define CODE_CDAN_WE_CTX 0x4 -+ -+static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid, -+ uint8_t we_mask, uint8_t cdan_en, -+ uint64_t ctx) -+{ -+ uint32_t *p; -+ uint32_t verb, rslt; -+ -+ /* Start the management command */ -+ p = qbman_swp_mc_start(s); -+ if (!p) -+ return -EBUSY; -+ -+ /* Encode the caller-provided attributes */ -+ qb_attr_code_encode(&code_cdan_cid, p, channelid); -+ qb_attr_code_encode(&code_cdan_we, p, we_mask); -+ qb_attr_code_encode(&code_cdan_en, p, cdan_en); -+ qb_attr_code_encode_64(&code_cdan_ctx_lo, (uint64_t *)p, ctx); -+ /* Complete the management command */ -+ p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_WQCHAN_CONFIGURE); -+ -+ /* Decode the outcome */ -+ verb = qb_attr_code_decode(&code_generic_verb, p); -+ rslt = qb_attr_code_decode(&code_generic_rslt, p); -+ BUG_ON(verb != QBMAN_WQCHAN_CONFIGURE); -+ -+ /* Determine success or failure */ -+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { -+ pr_err("CDAN cQID %d failed: code = 0x%02x\n", -+ channelid, rslt); -+ return -EIO; -+ } -+ -+ return 0; -+} -+ -+int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid, -+ uint64_t ctx) -+{ -+ return qbman_swp_CDAN_set(s, channelid, -+ CODE_CDAN_WE_CTX, -+ 0, ctx); -+} -+ -+int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid) -+{ -+ return qbman_swp_CDAN_set(s, channelid, -+ CODE_CDAN_WE_EN, -+ 1, 0); -+} -+int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid) -+{ -+ return qbman_swp_CDAN_set(s, channelid, -+ CODE_CDAN_WE_EN, -+ 0, 0); -+} -+int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid, -+ uint64_t ctx) -+{ -+ return qbman_swp_CDAN_set(s, channelid, -+ CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX, -+ 1, ctx); -+} -diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman_portal.h b/drivers/staging/fsl-mc/bus/dpio/qbman_portal.h -new file mode 100644 -index 0000000..65ebf3f ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/qbman_portal.h -@@ -0,0 +1,261 @@ -+/* Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include "qbman_private.h" -+#include "fsl_qbman_portal.h" -+#include "../../include/fsl_dpaa2_fd.h" -+ -+/* All QBMan command and result structures use this "valid bit" encoding */ -+#define QB_VALID_BIT ((uint32_t)0x80) -+ -+/* Management command result codes */ -+#define QBMAN_MC_RSLT_OK 0xf0 -+ -+/* TBD: as of QBMan 4.1, DQRR will be 8 rather than 4! */ -+#define QBMAN_DQRR_SIZE 4 -+ -+/* DQRR valid-bit reset bug. See qbman_portal.c::qbman_swp_init(). */ -+#define WORKAROUND_DQRR_RESET_BUG -+ -+/* --------------------- */ -+/* portal data structure */ -+/* --------------------- */ -+ -+struct qbman_swp { -+ const struct qbman_swp_desc *desc; -+ /* The qbman_sys (ie. arch/OS-specific) support code can put anything it -+ * needs in here. */ -+ struct qbman_swp_sys sys; -+ /* Management commands */ -+ struct { -+#ifdef QBMAN_CHECKING -+ enum swp_mc_check { -+ swp_mc_can_start, /* call __qbman_swp_mc_start() */ -+ swp_mc_can_submit, /* call __qbman_swp_mc_submit() */ -+ swp_mc_can_poll, /* call __qbman_swp_mc_result() */ -+ } check; -+#endif -+ uint32_t valid_bit; /* 0x00 or 0x80 */ -+ } mc; -+ /* Push dequeues */ -+ uint32_t sdq; -+ /* Volatile dequeues */ -+ struct { -+ /* VDQCR supports a "1 deep pipeline", meaning that if you know -+ * the last-submitted command is already executing in the -+ * hardware (as evidenced by at least 1 valid dequeue result), -+ * you can write another dequeue command to the register, the -+ * hardware will start executing it as soon as the -+ * already-executing command terminates. (This minimises latency -+ * and stalls.) With that in mind, this "busy" variable refers -+ * to whether or not a command can be submitted, not whether or -+ * not a previously-submitted command is still executing. In -+ * other words, once proof is seen that the previously-submitted -+ * command is executing, "vdq" is no longer "busy". -+ */ -+ atomic_t busy; -+ uint32_t valid_bit; /* 0x00 or 0x80 */ -+ /* We need to determine when vdq is no longer busy. This depends -+ * on whether the "busy" (last-submitted) dequeue command is -+ * targeting DQRR or main-memory, and detected is based on the -+ * presence of the dequeue command's "token" showing up in -+ * dequeue entries in DQRR or main-memory (respectively). */ -+ struct dpaa2_dq *storage; /* NULL if DQRR */ -+ } vdq; -+ /* DQRR */ -+ struct { -+ uint32_t next_idx; -+ uint32_t valid_bit; -+ uint8_t dqrr_size; -+#ifdef WORKAROUND_DQRR_RESET_BUG -+ int reset_bug; -+#endif -+ } dqrr; -+}; -+ -+/* -------------------------- */ -+/* portal management commands */ -+/* -------------------------- */ -+ -+/* Different management commands all use this common base layer of code to issue -+ * commands and poll for results. The first function returns a pointer to where -+ * the caller should fill in their MC command (though they should ignore the -+ * verb byte), the second function commits merges in the caller-supplied command -+ * verb (which should not include the valid-bit) and submits the command to -+ * hardware, and the third function checks for a completed response (returns -+ * non-NULL if only if the response is complete). */ -+void *qbman_swp_mc_start(struct qbman_swp *p); -+void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint32_t cmd_verb); -+void *qbman_swp_mc_result(struct qbman_swp *p); -+ -+/* Wraps up submit + poll-for-result */ -+static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd, -+ uint32_t cmd_verb) -+{ -+ int loopvar; -+ -+ qbman_swp_mc_submit(swp, cmd, cmd_verb); -+ DBG_POLL_START(loopvar); -+ do { -+ DBG_POLL_CHECK(loopvar); -+ cmd = qbman_swp_mc_result(swp); -+ } while (!cmd); -+ return cmd; -+} -+ -+/* ------------ */ -+/* qb_attr_code */ -+/* ------------ */ -+ -+/* This struct locates a sub-field within a QBMan portal (CENA) cacheline which -+ * is either serving as a configuration command or a query result. The -+ * representation is inherently little-endian, as the indexing of the words is -+ * itself little-endian in nature and layerscape is little endian for anything -+ * that crosses a word boundary too (64-bit fields are the obvious examples). -+ */ -+struct qb_attr_code { -+ unsigned int word; /* which uint32_t[] array member encodes the field */ -+ unsigned int lsoffset; /* encoding offset from ls-bit */ -+ unsigned int width; /* encoding width. (bool must be 1.) */ -+}; -+ -+/* Some pre-defined codes */ -+extern struct qb_attr_code code_generic_verb; -+extern struct qb_attr_code code_generic_rslt; -+ -+/* Macros to define codes */ -+#define QB_CODE(a, b, c) { a, b, c} -+#define QB_CODE_NULL \ -+ QB_CODE((unsigned int)-1, (unsigned int)-1, (unsigned int)-1) -+ -+/* Rotate a code "ms", meaning that it moves from less-significant bytes to -+ * more-significant, from less-significant words to more-significant, etc. The -+ * "ls" version does the inverse, from more-significant towards -+ * less-significant. -+ */ -+static inline void qb_attr_code_rotate_ms(struct qb_attr_code *code, -+ unsigned int bits) -+{ -+ code->lsoffset += bits; -+ while (code->lsoffset > 31) { -+ code->word++; -+ code->lsoffset -= 32; -+ } -+} -+static inline void qb_attr_code_rotate_ls(struct qb_attr_code *code, -+ unsigned int bits) -+{ -+ /* Don't be fooled, this trick should work because the types are -+ * unsigned. So the case that interests the while loop (the rotate has -+ * gone too far and the word count needs to compensate for it), is -+ * manifested when lsoffset is negative. But that equates to a really -+ * large unsigned value, starting with lots of "F"s. As such, we can -+ * continue adding 32 back to it until it wraps back round above zero, -+ * to a value of 31 or less... -+ */ -+ code->lsoffset -= bits; -+ while (code->lsoffset > 31) { -+ code->word--; -+ code->lsoffset += 32; -+ } -+} -+/* Implement a loop of code rotations until 'expr' evaluates to FALSE (0). */ -+#define qb_attr_code_for_ms(code, bits, expr) \ -+ for (; expr; qb_attr_code_rotate_ms(code, bits)) -+#define qb_attr_code_for_ls(code, bits, expr) \ -+ for (; expr; qb_attr_code_rotate_ls(code, bits)) -+ -+/* decode a field from a cacheline */ -+static inline uint32_t qb_attr_code_decode(const struct qb_attr_code *code, -+ const uint32_t *cacheline) -+{ -+ return d32_uint32_t(code->lsoffset, code->width, cacheline[code->word]); -+} -+static inline uint64_t qb_attr_code_decode_64(const struct qb_attr_code *code, -+ const uint64_t *cacheline) -+{ -+ uint64_t res; -+ u64_from_le32_copy(&res, &cacheline[code->word/2], 1); -+ return res; -+} -+ -+/* encode a field to a cacheline */ -+static inline void qb_attr_code_encode(const struct qb_attr_code *code, -+ uint32_t *cacheline, uint32_t val) -+{ -+ cacheline[code->word] = -+ r32_uint32_t(code->lsoffset, code->width, cacheline[code->word]) -+ | e32_uint32_t(code->lsoffset, code->width, val); -+} -+static inline void qb_attr_code_encode_64(const struct qb_attr_code *code, -+ uint64_t *cacheline, uint64_t val) -+{ -+ u64_to_le32_copy(&cacheline[code->word/2], &val, 1); -+} -+ -+/* Small-width signed values (two's-complement) will decode into medium-width -+ * positives. (Eg. for an 8-bit signed field, which stores values from -128 to -+ * +127, a setting of -7 would appear to decode to the 32-bit unsigned value -+ * 249. Likewise -120 would decode as 136.) This function allows the caller to -+ * "re-sign" such fields to 32-bit signed. (Eg. -7, which was 249 with an 8-bit -+ * encoding, will become 0xfffffff9 if you cast the return value to uint32_t). -+ */ -+static inline int32_t qb_attr_code_makesigned(const struct qb_attr_code *code, -+ uint32_t val) -+{ -+ BUG_ON(val >= (1 << code->width)); -+ /* If the high bit was set, it was encoding a negative */ -+ if (val >= (1 << (code->width - 1))) -+ return (int32_t)0 - (int32_t)(((uint32_t)1 << code->width) - -+ val); -+ /* Otherwise, it was encoding a positive */ -+ return (int32_t)val; -+} -+ -+/* ---------------------- */ -+/* Descriptors/cachelines */ -+/* ---------------------- */ -+ -+/* To avoid needless dynamic allocation, the driver API often gives the caller -+ * a "descriptor" type that the caller can instantiate however they like. -+ * Ultimately though, it is just a cacheline of binary storage (or something -+ * smaller when it is known that the descriptor doesn't need all 64 bytes) for -+ * holding pre-formatted pieces of hardware commands. The performance-critical -+ * code can then copy these descriptors directly into hardware command -+ * registers more efficiently than trying to construct/format commands -+ * on-the-fly. The API user sees the descriptor as an array of 32-bit words in -+ * order for the compiler to know its size, but the internal details are not -+ * exposed. The following macro is used within the driver for converting *any* -+ * descriptor pointer to a usable array pointer. The use of a macro (instead of -+ * an inline) is necessary to work with different descriptor types and to work -+ * correctly with const and non-const inputs (and similarly-qualified outputs). -+ */ -+#define qb_cl(d) (&(d)->dont_manipulate_directly[0]) -diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman_private.h b/drivers/staging/fsl-mc/bus/dpio/qbman_private.h -new file mode 100644 -index 0000000..e376b80 ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/qbman_private.h -@@ -0,0 +1,173 @@ -+/* Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+*/ -+ -+/* Perform extra checking */ -+#define QBMAN_CHECKING -+ -+/* To maximise the amount of logic that is common between the Linux driver and -+ * other targets (such as the embedded MC firmware), we pivot here between the -+ * inclusion of two platform-specific headers. -+ * -+ * The first, qbman_sys_decl.h, includes any and all required system headers as -+ * well as providing any definitions for the purposes of compatibility. The -+ * second, qbman_sys.h, is where platform-specific routines go. -+ * -+ * The point of the split is that the platform-independent code (including this -+ * header) may depend on platform-specific declarations, yet other -+ * platform-specific routines may depend on platform-independent definitions. -+ */ -+ -+#include "qbman_sys_decl.h" -+ -+#define QMAN_REV_4000 0x04000000 -+#define QMAN_REV_4100 0x04010000 -+#define QMAN_REV_4101 0x04010001 -+ -+/* When things go wrong, it is a convenient trick to insert a few FOO() -+ * statements in the code to trace progress. TODO: remove this once we are -+ * hacking the code less actively. -+ */ -+#define FOO() fsl_os_print("FOO: %s:%d\n", __FILE__, __LINE__) -+ -+/* Any time there is a register interface which we poll on, this provides a -+ * "break after x iterations" scheme for it. It's handy for debugging, eg. -+ * where you don't want millions of lines of log output from a polling loop -+ * that won't, because such things tend to drown out the earlier log output -+ * that might explain what caused the problem. (NB: put ";" after each macro!) -+ * TODO: we should probably remove this once we're done sanitising the -+ * simulator... -+ */ -+#define DBG_POLL_START(loopvar) (loopvar = 10) -+#define DBG_POLL_CHECK(loopvar) \ -+ do {if (!(loopvar--)) BUG_ON(1); } while (0) -+ -+/* For CCSR or portal-CINH registers that contain fields at arbitrary offsets -+ * and widths, these macro-generated encode/decode/isolate/remove inlines can -+ * be used. -+ * -+ * Eg. to "d"ecode a 14-bit field out of a register (into a "uint16_t" type), -+ * where the field is located 3 bits "up" from the least-significant bit of the -+ * register (ie. the field location within the 32-bit register corresponds to a -+ * mask of 0x0001fff8), you would do; -+ * uint16_t field = d32_uint16_t(3, 14, reg_value); -+ * -+ * Or to "e"ncode a 1-bit boolean value (input type is "int", zero is FALSE, -+ * non-zero is TRUE, so must convert all non-zero inputs to 1, hence the "!!" -+ * operator) into a register at bit location 0x00080000 (19 bits "in" from the -+ * LS bit), do; -+ * reg_value |= e32_int(19, 1, !!field); -+ * -+ * If you wish to read-modify-write a register, such that you leave the 14-bit -+ * field as-is but have all other fields set to zero, then "i"solate the 14-bit -+ * value using; -+ * reg_value = i32_uint16_t(3, 14, reg_value); -+ * -+ * Alternatively, you could "r"emove the 1-bit boolean field (setting it to -+ * zero) but leaving all other fields as-is; -+ * reg_val = r32_int(19, 1, reg_value); -+ * -+ */ -+#define MAKE_MASK32(width) (width == 32 ? 0xffffffff : \ -+ (uint32_t)((1 << width) - 1)) -+#define DECLARE_CODEC32(t) \ -+static inline uint32_t e32_##t(uint32_t lsoffset, uint32_t width, t val) \ -+{ \ -+ BUG_ON(width > (sizeof(t) * 8)); \ -+ return ((uint32_t)val & MAKE_MASK32(width)) << lsoffset; \ -+} \ -+static inline t d32_##t(uint32_t lsoffset, uint32_t width, uint32_t val) \ -+{ \ -+ BUG_ON(width > (sizeof(t) * 8)); \ -+ return (t)((val >> lsoffset) & MAKE_MASK32(width)); \ -+} \ -+static inline uint32_t i32_##t(uint32_t lsoffset, uint32_t width, \ -+ uint32_t val) \ -+{ \ -+ BUG_ON(width > (sizeof(t) * 8)); \ -+ return e32_##t(lsoffset, width, d32_##t(lsoffset, width, val)); \ -+} \ -+static inline uint32_t r32_##t(uint32_t lsoffset, uint32_t width, \ -+ uint32_t val) \ -+{ \ -+ BUG_ON(width > (sizeof(t) * 8)); \ -+ return ~(MAKE_MASK32(width) << lsoffset) & val; \ -+} -+DECLARE_CODEC32(uint32_t) -+DECLARE_CODEC32(uint16_t) -+DECLARE_CODEC32(uint8_t) -+DECLARE_CODEC32(int) -+ -+ /*********************/ -+ /* Debugging assists */ -+ /*********************/ -+ -+static inline void __hexdump(unsigned long start, unsigned long end, -+ unsigned long p, size_t sz, const unsigned char *c) -+{ -+ while (start < end) { -+ unsigned int pos = 0; -+ char buf[64]; -+ int nl = 0; -+ -+ pos += sprintf(buf + pos, "%08lx: ", start); -+ do { -+ if ((start < p) || (start >= (p + sz))) -+ pos += sprintf(buf + pos, ".."); -+ else -+ pos += sprintf(buf + pos, "%02x", *(c++)); -+ if (!(++start & 15)) { -+ buf[pos++] = '\n'; -+ nl = 1; -+ } else { -+ nl = 0; -+ if (!(start & 1)) -+ buf[pos++] = ' '; -+ if (!(start & 3)) -+ buf[pos++] = ' '; -+ } -+ } while (start & 15); -+ if (!nl) -+ buf[pos++] = '\n'; -+ buf[pos] = '\0'; -+ pr_info("%s", buf); -+ } -+} -+static inline void hexdump(const void *ptr, size_t sz) -+{ -+ unsigned long p = (unsigned long)ptr; -+ unsigned long start = p & ~(unsigned long)15; -+ unsigned long end = (p + sz + 15) & ~(unsigned long)15; -+ const unsigned char *c = ptr; -+ -+ __hexdump(start, end, p, sz, c); -+} -+ -+#include "qbman_sys.h" -diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman_sys.h b/drivers/staging/fsl-mc/bus/dpio/qbman_sys.h -new file mode 100644 -index 0000000..4849212 ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/qbman_sys.h -@@ -0,0 +1,307 @@ -+/* Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+/* qbman_sys_decl.h and qbman_sys.h are the two platform-specific files in the -+ * driver. They are only included via qbman_private.h, which is itself a -+ * platform-independent file and is included by all the other driver source. -+ * -+ * qbman_sys_decl.h is included prior to all other declarations and logic, and -+ * it exists to provide compatibility with any linux interfaces our -+ * single-source driver code is dependent on (eg. kmalloc). Ie. this file -+ * provides linux compatibility. -+ * -+ * This qbman_sys.h header, on the other hand, is included *after* any common -+ * and platform-neutral declarations and logic in qbman_private.h, and exists to -+ * implement any platform-specific logic of the qbman driver itself. Ie. it is -+ * *not* to provide linux compatibility. -+ */ -+ -+/* Trace the 3 different classes of read/write access to QBMan. #undef as -+ * required. */ -+#undef QBMAN_CCSR_TRACE -+#undef QBMAN_CINH_TRACE -+#undef QBMAN_CENA_TRACE -+ -+static inline void word_copy(void *d, const void *s, unsigned int cnt) -+{ -+ uint32_t *dd = d; -+ const uint32_t *ss = s; -+ -+ while (cnt--) -+ *(dd++) = *(ss++); -+} -+ -+/* Currently, the CENA support code expects each 32-bit word to be written in -+ * host order, and these are converted to hardware (little-endian) order on -+ * command submission. However, 64-bit quantities are must be written (and read) -+ * as two 32-bit words with the least-significant word first, irrespective of -+ * host endianness. */ -+static inline void u64_to_le32_copy(void *d, const uint64_t *s, -+ unsigned int cnt) -+{ -+ uint32_t *dd = d; -+ const uint32_t *ss = (const uint32_t *)s; -+ -+ while (cnt--) { -+ /* TBD: the toolchain was choking on the use of 64-bit types up -+ * until recently so this works entirely with 32-bit variables. -+ * When 64-bit types become usable again, investigate better -+ * ways of doing this. */ -+#if defined(__BIG_ENDIAN) -+ *(dd++) = ss[1]; -+ *(dd++) = ss[0]; -+ ss += 2; -+#else -+ *(dd++) = *(ss++); -+ *(dd++) = *(ss++); -+#endif -+ } -+} -+static inline void u64_from_le32_copy(uint64_t *d, const void *s, -+ unsigned int cnt) -+{ -+ const uint32_t *ss = s; -+ uint32_t *dd = (uint32_t *)d; -+ -+ while (cnt--) { -+#if defined(__BIG_ENDIAN) -+ dd[1] = *(ss++); -+ dd[0] = *(ss++); -+ dd += 2; -+#else -+ *(dd++) = *(ss++); -+ *(dd++) = *(ss++); -+#endif -+ } -+} -+ -+/* Convert a host-native 32bit value into little endian */ -+#if defined(__BIG_ENDIAN) -+static inline uint32_t make_le32(uint32_t val) -+{ -+ return ((val & 0xff) << 24) | ((val & 0xff00) << 8) | -+ ((val & 0xff0000) >> 8) | ((val & 0xff000000) >> 24); -+} -+static inline uint32_t make_le24(uint32_t val) -+{ -+ return (((val & 0xff) << 16) | (val & 0xff00) | -+ ((val & 0xff0000) >> 16)); -+} -+#else -+#define make_le32(val) (val) -+#define make_le24(val) (val) -+#endif -+static inline void make_le32_n(uint32_t *val, unsigned int num) -+{ -+ while (num--) { -+ *val = make_le32(*val); -+ val++; -+ } -+} -+ -+ /******************/ -+ /* Portal access */ -+ /******************/ -+struct qbman_swp_sys { -+ /* On GPP, the sys support for qbman_swp is here. The CENA region isi -+ * not an mmap() of the real portal registers, but an allocated -+ * place-holder, because the actual writes/reads to/from the portal are -+ * marshalled from these allocated areas using QBMan's "MC access -+ * registers". CINH accesses are atomic so there's no need for a -+ * place-holder. */ -+ void *cena; -+ void __iomem *addr_cena; -+ void __iomem *addr_cinh; -+}; -+ -+/* P_OFFSET is (ACCESS_CMD,0,12) - offset within the portal -+ * C is (ACCESS_CMD,12,1) - is inhibited? (0==CENA, 1==CINH) -+ * SWP_IDX is (ACCESS_CMD,16,10) - Software portal index -+ * P is (ACCESS_CMD,28,1) - (0==special portal, 1==any portal) -+ * T is (ACCESS_CMD,29,1) - Command type (0==READ, 1==WRITE) -+ * E is (ACCESS_CMD,31,1) - Command execute (1 to issue, poll for 0==complete) -+ */ -+ -+static inline void qbman_cinh_write(struct qbman_swp_sys *s, uint32_t offset, -+ uint32_t val) -+{ -+ -+ writel_relaxed(val, s->addr_cinh + offset); -+#ifdef QBMAN_CINH_TRACE -+ pr_info("qbman_cinh_write(%p:0x%03x) 0x%08x\n", -+ s->addr_cinh, offset, val); -+#endif -+} -+ -+static inline uint32_t qbman_cinh_read(struct qbman_swp_sys *s, uint32_t offset) -+{ -+ uint32_t reg = readl_relaxed(s->addr_cinh + offset); -+ -+#ifdef QBMAN_CINH_TRACE -+ pr_info("qbman_cinh_read(%p:0x%03x) 0x%08x\n", -+ s->addr_cinh, offset, reg); -+#endif -+ return reg; -+} -+ -+static inline void *qbman_cena_write_start(struct qbman_swp_sys *s, -+ uint32_t offset) -+{ -+ void *shadow = s->cena + offset; -+ -+#ifdef QBMAN_CENA_TRACE -+ pr_info("qbman_cena_write_start(%p:0x%03x) %p\n", -+ s->addr_cena, offset, shadow); -+#endif -+ BUG_ON(offset & 63); -+ dcbz(shadow); -+ return shadow; -+} -+ -+static inline void qbman_cena_write_complete(struct qbman_swp_sys *s, -+ uint32_t offset, void *cmd) -+{ -+ const uint32_t *shadow = cmd; -+ int loop; -+ -+#ifdef QBMAN_CENA_TRACE -+ pr_info("qbman_cena_write_complete(%p:0x%03x) %p\n", -+ s->addr_cena, offset, shadow); -+ hexdump(cmd, 64); -+#endif -+ for (loop = 15; loop >= 1; loop--) -+ writel_relaxed(shadow[loop], s->addr_cena + -+ offset + loop * 4); -+ lwsync(); -+ writel_relaxed(shadow[0], s->addr_cena + offset); -+ dcbf(s->addr_cena + offset); -+} -+ -+static inline void *qbman_cena_read(struct qbman_swp_sys *s, uint32_t offset) -+{ -+ uint32_t *shadow = s->cena + offset; -+ unsigned int loop; -+ -+#ifdef QBMAN_CENA_TRACE -+ pr_info("qbman_cena_read(%p:0x%03x) %p\n", -+ s->addr_cena, offset, shadow); -+#endif -+ -+ for (loop = 0; loop < 16; loop++) -+ shadow[loop] = readl_relaxed(s->addr_cena + offset -+ + loop * 4); -+#ifdef QBMAN_CENA_TRACE -+ hexdump(shadow, 64); -+#endif -+ return shadow; -+} -+ -+static inline void qbman_cena_invalidate_prefetch(struct qbman_swp_sys *s, -+ uint32_t offset) -+{ -+ dcivac(s->addr_cena + offset); -+ prefetch_for_load(s->addr_cena + offset); -+} -+ -+ /******************/ -+ /* Portal support */ -+ /******************/ -+ -+/* The SWP_CFG portal register is special, in that it is used by the -+ * platform-specific code rather than the platform-independent code in -+ * qbman_portal.c. So use of it is declared locally here. */ -+#define QBMAN_CINH_SWP_CFG 0xd00 -+ -+/* For MC portal use, we always configure with -+ * DQRR_MF is (SWP_CFG,20,3) - DQRR max fill (<- 0x4) -+ * EST is (SWP_CFG,16,3) - EQCR_CI stashing threshold (<- 0x0) -+ * RPM is (SWP_CFG,12,2) - RCR production notification mode (<- 0x3) -+ * DCM is (SWP_CFG,10,2) - DQRR consumption notification mode (<- 0x2) -+ * EPM is (SWP_CFG,8,2) - EQCR production notification mode (<- 0x3) -+ * SD is (SWP_CFG,5,1) - memory stashing drop enable (<- FALSE) -+ * SP is (SWP_CFG,4,1) - memory stashing priority (<- TRUE) -+ * SE is (SWP_CFG,3,1) - memory stashing enable (<- 0x0) -+ * DP is (SWP_CFG,2,1) - dequeue stashing priority (<- TRUE) -+ * DE is (SWP_CFG,1,1) - dequeue stashing enable (<- 0x0) -+ * EP is (SWP_CFG,0,1) - EQCR_CI stashing priority (<- FALSE) -+ */ -+static inline uint32_t qbman_set_swp_cfg(uint8_t max_fill, uint8_t wn, -+ uint8_t est, uint8_t rpm, uint8_t dcm, -+ uint8_t epm, int sd, int sp, int se, -+ int dp, int de, int ep) -+{ -+ uint32_t reg; -+ -+ reg = e32_uint8_t(20, (uint32_t)(3 + (max_fill >> 3)), max_fill) | -+ e32_uint8_t(16, 3, est) | e32_uint8_t(12, 2, rpm) | -+ e32_uint8_t(10, 2, dcm) | e32_uint8_t(8, 2, epm) | -+ e32_int(5, 1, sd) | e32_int(4, 1, sp) | e32_int(3, 1, se) | -+ e32_int(2, 1, dp) | e32_int(1, 1, de) | e32_int(0, 1, ep) | -+ e32_uint8_t(14, 1, wn); -+ return reg; -+} -+ -+static inline int qbman_swp_sys_init(struct qbman_swp_sys *s, -+ const struct qbman_swp_desc *d, -+ uint8_t dqrr_size) -+{ -+ uint32_t reg; -+ -+ s->addr_cena = d->cena_bar; -+ s->addr_cinh = d->cinh_bar; -+ s->cena = (void *)get_zeroed_page(GFP_KERNEL); -+ if (!s->cena) { -+ pr_err("Could not allocate page for cena shadow\n"); -+ return -1; -+ } -+ -+#ifdef QBMAN_CHECKING -+ /* We should never be asked to initialise for a portal that isn't in -+ * the power-on state. (Ie. don't forget to reset portals when they are -+ * decommissioned!) -+ */ -+ reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG); -+ BUG_ON(reg); -+#endif -+ reg = qbman_set_swp_cfg(dqrr_size, 0, 0, 3, 2, 3, 0, 1, 0, 1, 0, 0); -+ qbman_cinh_write(s, QBMAN_CINH_SWP_CFG, reg); -+ reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG); -+ if (!reg) { -+ pr_err("The portal is not enabled!\n"); -+ kfree(s->cena); -+ return -1; -+ } -+ return 0; -+} -+ -+static inline void qbman_swp_sys_finish(struct qbman_swp_sys *s) -+{ -+ free_page((unsigned long)s->cena); -+} -diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman_sys_decl.h b/drivers/staging/fsl-mc/bus/dpio/qbman_sys_decl.h -new file mode 100644 -index 0000000..5b3a224 ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/qbman_sys_decl.h -@@ -0,0 +1,86 @@ -+/* Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "fsl_qbman_base.h" -+ -+/* The platform-independent code shouldn't need endianness, except for -+ * weird/fast-path cases like qbman_result_has_token(), which needs to -+ * perform a passive and endianness-specific test on a read-only data structure -+ * very quickly. It's an exception, and this symbol is used for that case. */ -+#if defined(__BIG_ENDIAN) -+#define DQRR_TOK_OFFSET 0 -+#define QBMAN_RESULT_VERB_OFFSET_IN_MEM 24 -+#define SCN_STATE_OFFSET_IN_MEM 8 -+#define SCN_RID_OFFSET_IN_MEM 8 -+#else -+#define DQRR_TOK_OFFSET 24 -+#define QBMAN_RESULT_VERB_OFFSET_IN_MEM 0 -+#define SCN_STATE_OFFSET_IN_MEM 16 -+#define SCN_RID_OFFSET_IN_MEM 0 -+#endif -+ -+/* Similarly-named functions */ -+#define upper32(a) upper_32_bits(a) -+#define lower32(a) lower_32_bits(a) -+ -+ /****************/ -+ /* arch assists */ -+ /****************/ -+ -+#define dcbz(p) { asm volatile("dc zva, %0" : : "r" (p) : "memory"); } -+#define lwsync() { asm volatile("dmb st" : : : "memory"); } -+#define dcbf(p) { asm volatile("dc cvac, %0;" : : "r" (p) : "memory"); } -+#define dcivac(p) { asm volatile("dc ivac, %0" : : "r"(p) : "memory"); } -+static inline void prefetch_for_load(void *p) -+{ -+ asm volatile("prfm pldl1keep, [%0, #64]" : : "r" (p)); -+} -+static inline void prefetch_for_store(void *p) -+{ -+ asm volatile("prfm pstl1keep, [%0, #64]" : : "r" (p)); -+} -diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman_test.c b/drivers/staging/fsl-mc/bus/dpio/qbman_test.c -new file mode 100644 -index 0000000..28396e7 ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/qbman_test.c -@@ -0,0 +1,664 @@ -+/* Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include -+#include -+#include -+ -+#include "qbman_private.h" -+#include "fsl_qbman_portal.h" -+#include "qbman_debug.h" -+#include "../../include/fsl_dpaa2_fd.h" -+ -+#define QBMAN_SWP_CENA_BASE 0x818000000 -+#define QBMAN_SWP_CINH_BASE 0x81c000000 -+ -+#define QBMAN_PORTAL_IDX 2 -+#define QBMAN_TEST_FQID 19 -+#define QBMAN_TEST_BPID 23 -+#define QBMAN_USE_QD -+#ifdef QBMAN_USE_QD -+#define QBMAN_TEST_QDID 1 -+#endif -+#define QBMAN_TEST_LFQID 0xf00010 -+ -+#define NUM_EQ_FRAME 10 -+#define NUM_DQ_FRAME 10 -+#define NUM_DQ_IN_DQRR 5 -+#define NUM_DQ_IN_MEM (NUM_DQ_FRAME - NUM_DQ_IN_DQRR) -+ -+static struct qbman_swp *swp; -+static struct qbman_eq_desc eqdesc; -+static struct qbman_pull_desc pulldesc; -+static struct qbman_release_desc releasedesc; -+static struct qbman_eq_response eq_storage[1]; -+static struct dpaa2_dq dq_storage[NUM_DQ_IN_MEM] __aligned(64); -+static dma_addr_t eq_storage_phys; -+static dma_addr_t dq_storage_phys; -+ -+/* FQ ctx attribute values for the test code. */ -+#define FQCTX_HI 0xabbaf00d -+#define FQCTX_LO 0x98765432 -+#define FQ_VFQID 0x123456 -+ -+/* Sample frame descriptor */ -+static struct qbman_fd_simple fd = { -+ .addr_lo = 0xbabaf33d, -+ .addr_hi = 0x01234567, -+ .len = 0x7777, -+ .frc = 0xdeadbeef, -+ .flc_lo = 0xcafecafe, -+ .flc_hi = 0xbeadabba -+}; -+ -+static void fd_inc(struct qbman_fd_simple *_fd) -+{ -+ _fd->addr_lo += _fd->len; -+ _fd->flc_lo += 0x100; -+ _fd->frc += 0x10; -+} -+ -+static int fd_cmp(struct qbman_fd *fda, struct qbman_fd *fdb) -+{ -+ int i; -+ -+ for (i = 0; i < 8; i++) -+ if (fda->words[i] - fdb->words[i]) -+ return 1; -+ return 0; -+} -+ -+struct qbman_fd fd_eq[NUM_EQ_FRAME]; -+struct qbman_fd fd_dq[NUM_DQ_FRAME]; -+ -+/* "Buffers" to be released (and storage for buffers to be acquired) */ -+static uint64_t rbufs[320]; -+static uint64_t abufs[320]; -+ -+static void do_enqueue(struct qbman_swp *swp) -+{ -+ int i, j, ret; -+ -+#ifdef QBMAN_USE_QD -+ pr_info("*****QBMan_test: Enqueue %d frames to QD %d\n", -+ NUM_EQ_FRAME, QBMAN_TEST_QDID); -+#else -+ pr_info("*****QBMan_test: Enqueue %d frames to FQ %d\n", -+ NUM_EQ_FRAME, QBMAN_TEST_FQID); -+#endif -+ for (i = 0; i < NUM_EQ_FRAME; i++) { -+ /*********************************/ -+ /* Prepare a enqueue descriptor */ -+ /*********************************/ -+ memset(eq_storage, 0, sizeof(eq_storage)); -+ eq_storage_phys = virt_to_phys(eq_storage); -+ qbman_eq_desc_clear(&eqdesc); -+ qbman_eq_desc_set_no_orp(&eqdesc, 0); -+ qbman_eq_desc_set_response(&eqdesc, eq_storage_phys, 0); -+ qbman_eq_desc_set_token(&eqdesc, 0x99); -+#ifdef QBMAN_USE_QD -+ /**********************************/ -+ /* Prepare a Queueing Destination */ -+ /**********************************/ -+ qbman_eq_desc_set_qd(&eqdesc, QBMAN_TEST_QDID, 0, 3); -+#else -+ qbman_eq_desc_set_fq(&eqdesc, QBMAN_TEST_FQID); -+#endif -+ -+ /******************/ -+ /* Try an enqueue */ -+ /******************/ -+ ret = qbman_swp_enqueue(swp, &eqdesc, -+ (const struct qbman_fd *)&fd); -+ BUG_ON(ret); -+ for (j = 0; j < 8; j++) -+ fd_eq[i].words[j] = *((uint32_t *)&fd + j); -+ fd_inc(&fd); -+ } -+} -+ -+static void do_push_dequeue(struct qbman_swp *swp) -+{ -+ int i, j; -+ const struct dpaa2_dq *dq_storage1; -+ const struct qbman_fd *__fd; -+ int loopvar; -+ -+ pr_info("*****QBMan_test: Start push dequeue\n"); -+ for (i = 0; i < NUM_DQ_FRAME; i++) { -+ DBG_POLL_START(loopvar); -+ do { -+ DBG_POLL_CHECK(loopvar); -+ dq_storage1 = qbman_swp_dqrr_next(swp); -+ } while (!dq_storage1); -+ if (dq_storage1) { -+ __fd = (const struct qbman_fd *) -+ dpaa2_dq_fd(dq_storage1); -+ for (j = 0; j < 8; j++) -+ fd_dq[i].words[j] = __fd->words[j]; -+ if (fd_cmp(&fd_eq[i], &fd_dq[i])) { -+ pr_info("enqueue FD is\n"); -+ hexdump(&fd_eq[i], 32); -+ pr_info("dequeue FD is\n"); -+ hexdump(&fd_dq[i], 32); -+ } -+ qbman_swp_dqrr_consume(swp, dq_storage1); -+ } else { -+ pr_info("The push dequeue fails\n"); -+ } -+ } -+} -+ -+static void do_pull_dequeue(struct qbman_swp *swp) -+{ -+ int i, j, ret; -+ const struct dpaa2_dq *dq_storage1; -+ const struct qbman_fd *__fd; -+ int loopvar; -+ -+ pr_info("*****QBMan_test: Dequeue %d frames with dq entry in DQRR\n", -+ NUM_DQ_IN_DQRR); -+ for (i = 0; i < NUM_DQ_IN_DQRR; i++) { -+ qbman_pull_desc_clear(&pulldesc); -+ qbman_pull_desc_set_storage(&pulldesc, NULL, 0, 0); -+ qbman_pull_desc_set_numframes(&pulldesc, 1); -+ qbman_pull_desc_set_fq(&pulldesc, QBMAN_TEST_FQID); -+ -+ ret = qbman_swp_pull(swp, &pulldesc); -+ BUG_ON(ret); -+ DBG_POLL_START(loopvar); -+ do { -+ DBG_POLL_CHECK(loopvar); -+ dq_storage1 = qbman_swp_dqrr_next(swp); -+ } while (!dq_storage1); -+ -+ if (dq_storage1) { -+ __fd = (const struct qbman_fd *) -+ dpaa2_dq_fd(dq_storage1); -+ for (j = 0; j < 8; j++) -+ fd_dq[i].words[j] = __fd->words[j]; -+ if (fd_cmp(&fd_eq[i], &fd_dq[i])) { -+ pr_info("enqueue FD is\n"); -+ hexdump(&fd_eq[i], 32); -+ pr_info("dequeue FD is\n"); -+ hexdump(&fd_dq[i], 32); -+ } -+ qbman_swp_dqrr_consume(swp, dq_storage1); -+ } else { -+ pr_info("Dequeue with dq entry in DQRR fails\n"); -+ } -+ } -+ -+ pr_info("*****QBMan_test: Dequeue %d frames with dq entry in memory\n", -+ NUM_DQ_IN_MEM); -+ for (i = 0; i < NUM_DQ_IN_MEM; i++) { -+ dq_storage_phys = virt_to_phys(&dq_storage[i]); -+ qbman_pull_desc_clear(&pulldesc); -+ qbman_pull_desc_set_storage(&pulldesc, &dq_storage[i], -+ dq_storage_phys, 1); -+ qbman_pull_desc_set_numframes(&pulldesc, 1); -+ qbman_pull_desc_set_fq(&pulldesc, QBMAN_TEST_FQID); -+ ret = qbman_swp_pull(swp, &pulldesc); -+ BUG_ON(ret); -+ -+ DBG_POLL_START(loopvar); -+ do { -+ DBG_POLL_CHECK(loopvar); -+ ret = qbman_result_has_new_result(swp, -+ &dq_storage[i]); -+ } while (!ret); -+ -+ if (ret) { -+ for (j = 0; j < 8; j++) -+ fd_dq[i + NUM_DQ_IN_DQRR].words[j] = -+ dq_storage[i].dont_manipulate_directly[j + 8]; -+ j = i + NUM_DQ_IN_DQRR; -+ if (fd_cmp(&fd_eq[j], &fd_dq[j])) { -+ pr_info("enqueue FD is\n"); -+ hexdump(&fd_eq[i + NUM_DQ_IN_DQRR], 32); -+ pr_info("dequeue FD is\n"); -+ hexdump(&fd_dq[i + NUM_DQ_IN_DQRR], 32); -+ hexdump(&dq_storage[i], 64); -+ } -+ } else { -+ pr_info("Dequeue with dq entry in memory fails\n"); -+ } -+ } -+} -+ -+static void release_buffer(struct qbman_swp *swp, unsigned int num) -+{ -+ int ret; -+ unsigned int i, j; -+ -+ qbman_release_desc_clear(&releasedesc); -+ qbman_release_desc_set_bpid(&releasedesc, QBMAN_TEST_BPID); -+ pr_info("*****QBMan_test: Release %d buffers to BP %d\n", -+ num, QBMAN_TEST_BPID); -+ for (i = 0; i < (num / 7 + 1); i++) { -+ j = ((num - i * 7) > 7) ? 7 : (num - i * 7); -+ ret = qbman_swp_release(swp, &releasedesc, &rbufs[i * 7], j); -+ BUG_ON(ret); -+ } -+} -+ -+static void acquire_buffer(struct qbman_swp *swp, unsigned int num) -+{ -+ int ret; -+ unsigned int i, j; -+ -+ pr_info("*****QBMan_test: Acquire %d buffers from BP %d\n", -+ num, QBMAN_TEST_BPID); -+ -+ for (i = 0; i < (num / 7 + 1); i++) { -+ j = ((num - i * 7) > 7) ? 7 : (num - i * 7); -+ ret = qbman_swp_acquire(swp, QBMAN_TEST_BPID, &abufs[i * 7], j); -+ BUG_ON(ret != j); -+ } -+} -+ -+static void buffer_pool_test(struct qbman_swp *swp) -+{ -+ struct qbman_attr info; -+ struct dpaa2_dq *bpscn_message; -+ dma_addr_t bpscn_phys; -+ uint64_t bpscn_ctx; -+ uint64_t ctx = 0xbbccddaadeadbeefull; -+ int i, ret; -+ uint32_t hw_targ; -+ -+ pr_info("*****QBMan_test: test buffer pool management\n"); -+ ret = qbman_bp_query(swp, QBMAN_TEST_BPID, &info); -+ qbman_bp_attr_get_bpscn_addr(&info, &bpscn_phys); -+ pr_info("The bpscn is %llx, info_phys is %llx\n", bpscn_phys, -+ virt_to_phys(&info)); -+ bpscn_message = phys_to_virt(bpscn_phys); -+ -+ for (i = 0; i < 320; i++) -+ rbufs[i] = 0xf00dabba01234567ull + i * 0x40; -+ -+ release_buffer(swp, 320); -+ -+ pr_info("QBMan_test: query the buffer pool\n"); -+ qbman_bp_query(swp, QBMAN_TEST_BPID, &info); -+ hexdump(&info, 64); -+ qbman_bp_attr_get_hw_targ(&info, &hw_targ); -+ pr_info("hw_targ is %d\n", hw_targ); -+ -+ /* Acquire buffers to trigger BPSCN */ -+ acquire_buffer(swp, 300); -+ /* BPSCN should be written to the memory */ -+ qbman_bp_query(swp, QBMAN_TEST_BPID, &info); -+ hexdump(&info, 64); -+ hexdump(bpscn_message, 64); -+ BUG_ON(!qbman_result_is_BPSCN(bpscn_message)); -+ /* There should be free buffers in the pool */ -+ BUG_ON(!(qbman_result_bpscn_has_free_bufs(bpscn_message))); -+ /* Buffer pool is depleted */ -+ BUG_ON(!qbman_result_bpscn_is_depleted(bpscn_message)); -+ /* The ctx should match */ -+ bpscn_ctx = qbman_result_bpscn_ctx(bpscn_message); -+ pr_info("BPSCN test: ctx %llx, bpscn_ctx %llx\n", ctx, bpscn_ctx); -+ BUG_ON(ctx != bpscn_ctx); -+ memset(bpscn_message, 0, sizeof(struct dpaa2_dq)); -+ -+ /* Re-seed the buffer pool to trigger BPSCN */ -+ release_buffer(swp, 240); -+ /* BPSCN should be written to the memory */ -+ BUG_ON(!qbman_result_is_BPSCN(bpscn_message)); -+ /* There should be free buffers in the pool */ -+ BUG_ON(!(qbman_result_bpscn_has_free_bufs(bpscn_message))); -+ /* Buffer pool is not depleted */ -+ BUG_ON(qbman_result_bpscn_is_depleted(bpscn_message)); -+ memset(bpscn_message, 0, sizeof(struct dpaa2_dq)); -+ -+ acquire_buffer(swp, 260); -+ /* BPSCN should be written to the memory */ -+ BUG_ON(!qbman_result_is_BPSCN(bpscn_message)); -+ /* There should be free buffers in the pool while BPSCN generated */ -+ BUG_ON(!(qbman_result_bpscn_has_free_bufs(bpscn_message))); -+ /* Buffer pool is depletion */ -+ BUG_ON(!qbman_result_bpscn_is_depleted(bpscn_message)); -+} -+ -+static void ceetm_test(struct qbman_swp *swp) -+{ -+ int i, j, ret; -+ -+ qbman_eq_desc_clear(&eqdesc); -+ qbman_eq_desc_set_no_orp(&eqdesc, 0); -+ qbman_eq_desc_set_fq(&eqdesc, QBMAN_TEST_LFQID); -+ pr_info("*****QBMan_test: Enqueue to LFQID %x\n", -+ QBMAN_TEST_LFQID); -+ for (i = 0; i < NUM_EQ_FRAME; i++) { -+ ret = qbman_swp_enqueue(swp, &eqdesc, -+ (const struct qbman_fd *)&fd); -+ BUG_ON(ret); -+ for (j = 0; j < 8; j++) -+ fd_eq[i].words[j] = *((uint32_t *)&fd + j); -+ fd_inc(&fd); -+ } -+} -+ -+int qbman_test(void) -+{ -+ struct qbman_swp_desc pd; -+ uint32_t reg; -+ -+ pd.cena_bar = ioremap_cache_ns(QBMAN_SWP_CENA_BASE + -+ QBMAN_PORTAL_IDX * 0x10000, 0x10000); -+ pd.cinh_bar = ioremap(QBMAN_SWP_CINH_BASE + -+ QBMAN_PORTAL_IDX * 0x10000, 0x10000); -+ -+ /* Detect whether the mc image is the test image with GPP setup */ -+ reg = readl_relaxed(pd.cena_bar + 0x4); -+ if (reg != 0xdeadbeef) { -+ pr_err("The MC image doesn't have GPP test setup, stop!\n"); -+ iounmap(pd.cena_bar); -+ iounmap(pd.cinh_bar); -+ return -1; -+ } -+ -+ pr_info("*****QBMan_test: Init QBMan SWP %d\n", QBMAN_PORTAL_IDX); -+ swp = qbman_swp_init(&pd); -+ if (!swp) { -+ iounmap(pd.cena_bar); -+ iounmap(pd.cinh_bar); -+ return -1; -+ } -+ -+ /*******************/ -+ /* Enqueue frames */ -+ /*******************/ -+ do_enqueue(swp); -+ -+ /*******************/ -+ /* Do pull dequeue */ -+ /*******************/ -+ do_pull_dequeue(swp); -+ -+ /*******************/ -+ /* Enqueue frames */ -+ /*******************/ -+ qbman_swp_push_set(swp, 0, 1); -+ qbman_swp_fq_schedule(swp, QBMAN_TEST_FQID); -+ do_enqueue(swp); -+ -+ /*******************/ -+ /* Do push dequeue */ -+ /*******************/ -+ do_push_dequeue(swp); -+ -+ /**************************/ -+ /* Test buffer pool funcs */ -+ /**************************/ -+ buffer_pool_test(swp); -+ -+ /******************/ -+ /* CEETM test */ -+ /******************/ -+ ceetm_test(swp); -+ -+ qbman_swp_finish(swp); -+ pr_info("*****QBMan_test: Kernel test Passed\n"); -+ return 0; -+} -+ -+/* user-space test-case, definitions: -+ * -+ * 1 portal only, using portal index 3. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define QBMAN_TEST_US_SWP 3 /* portal index for user space */ -+ -+#define QBMAN_TEST_MAGIC 'q' -+struct qbman_test_swp_ioctl { -+ unsigned long portal1_cinh; -+ unsigned long portal1_cena; -+}; -+struct qbman_test_dma_ioctl { -+ unsigned long ptr; -+ uint64_t phys_addr; -+}; -+ -+struct qbman_test_priv { -+ int has_swp_map; -+ int has_dma_map; -+ unsigned long pgoff; -+}; -+ -+#define QBMAN_TEST_SWP_MAP \ -+ _IOR(QBMAN_TEST_MAGIC, 0x01, struct qbman_test_swp_ioctl) -+#define QBMAN_TEST_SWP_UNMAP \ -+ _IOR(QBMAN_TEST_MAGIC, 0x02, struct qbman_test_swp_ioctl) -+#define QBMAN_TEST_DMA_MAP \ -+ _IOR(QBMAN_TEST_MAGIC, 0x03, struct qbman_test_dma_ioctl) -+#define QBMAN_TEST_DMA_UNMAP \ -+ _IOR(QBMAN_TEST_MAGIC, 0x04, struct qbman_test_dma_ioctl) -+ -+#define TEST_PORTAL1_CENA_PGOFF ((QBMAN_SWP_CENA_BASE + QBMAN_TEST_US_SWP * \ -+ 0x10000) >> PAGE_SHIFT) -+#define TEST_PORTAL1_CINH_PGOFF ((QBMAN_SWP_CINH_BASE + QBMAN_TEST_US_SWP * \ -+ 0x10000) >> PAGE_SHIFT) -+ -+static int qbman_test_open(struct inode *inode, struct file *filp) -+{ -+ struct qbman_test_priv *priv; -+ -+ priv = kmalloc(sizeof(struct qbman_test_priv), GFP_KERNEL); -+ if (!priv) -+ return -EIO; -+ filp->private_data = priv; -+ priv->has_swp_map = 0; -+ priv->has_dma_map = 0; -+ priv->pgoff = 0; -+ return 0; -+} -+ -+static int qbman_test_mmap(struct file *filp, struct vm_area_struct *vma) -+{ -+ int ret; -+ struct qbman_test_priv *priv = filp->private_data; -+ -+ BUG_ON(!priv); -+ -+ if (vma->vm_pgoff == TEST_PORTAL1_CINH_PGOFF) -+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); -+ else if (vma->vm_pgoff == TEST_PORTAL1_CENA_PGOFF) -+ vma->vm_page_prot = pgprot_cached_ns(vma->vm_page_prot); -+ else if (vma->vm_pgoff == priv->pgoff) -+ vma->vm_page_prot = pgprot_cached(vma->vm_page_prot); -+ else { -+ pr_err("Damn, unrecognised pg_off!!\n"); -+ return -EINVAL; -+ } -+ ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, -+ vma->vm_end - vma->vm_start, -+ vma->vm_page_prot); -+ return ret; -+} -+ -+static long qbman_test_ioctl(struct file *fp, unsigned int cmd, -+ unsigned long arg) -+{ -+ void __user *a = (void __user *)arg; -+ unsigned long longret, populate; -+ int ret = 0; -+ struct qbman_test_priv *priv = fp->private_data; -+ -+ BUG_ON(!priv); -+ -+ switch (cmd) { -+ case QBMAN_TEST_SWP_MAP: -+ { -+ struct qbman_test_swp_ioctl params; -+ -+ if (priv->has_swp_map) -+ return -EINVAL; -+ down_write(¤t->mm->mmap_sem); -+ /* Map portal1 CINH */ -+ longret = do_mmap_pgoff(fp, PAGE_SIZE, 0x10000, -+ PROT_READ | PROT_WRITE, MAP_SHARED, -+ TEST_PORTAL1_CINH_PGOFF, &populate); -+ if (longret & ~PAGE_MASK) { -+ ret = (int)longret; -+ goto out; -+ } -+ params.portal1_cinh = longret; -+ /* Map portal1 CENA */ -+ longret = do_mmap_pgoff(fp, PAGE_SIZE, 0x10000, -+ PROT_READ | PROT_WRITE, MAP_SHARED, -+ TEST_PORTAL1_CENA_PGOFF, &populate); -+ if (longret & ~PAGE_MASK) { -+ ret = (int)longret; -+ goto out; -+ } -+ params.portal1_cena = longret; -+ priv->has_swp_map = 1; -+out: -+ up_write(¤t->mm->mmap_sem); -+ if (!ret && copy_to_user(a, ¶ms, sizeof(params))) -+ return -EFAULT; -+ return ret; -+ } -+ case QBMAN_TEST_SWP_UNMAP: -+ { -+ struct qbman_test_swp_ioctl params; -+ -+ if (!priv->has_swp_map) -+ return -EINVAL; -+ -+ if (copy_from_user(¶ms, a, sizeof(params))) -+ return -EFAULT; -+ down_write(¤t->mm->mmap_sem); -+ do_munmap(current->mm, params.portal1_cena, 0x10000); -+ do_munmap(current->mm, params.portal1_cinh, 0x10000); -+ up_write(¤t->mm->mmap_sem); -+ priv->has_swp_map = 0; -+ return 0; -+ } -+ case QBMAN_TEST_DMA_MAP: -+ { -+ struct qbman_test_dma_ioctl params; -+ void *vaddr; -+ -+ if (priv->has_dma_map) -+ return -EINVAL; -+ vaddr = (void *)get_zeroed_page(GFP_KERNEL); -+ params.phys_addr = virt_to_phys(vaddr); -+ priv->pgoff = (unsigned long)params.phys_addr >> PAGE_SHIFT; -+ down_write(¤t->mm->mmap_sem); -+ longret = do_mmap_pgoff(fp, PAGE_SIZE, PAGE_SIZE, -+ PROT_READ | PROT_WRITE, MAP_SHARED, -+ priv->pgoff, &populate); -+ if (longret & ~PAGE_MASK) { -+ ret = (int)longret; -+ return ret; -+ } -+ params.ptr = longret; -+ priv->has_dma_map = 1; -+ up_write(¤t->mm->mmap_sem); -+ if (copy_to_user(a, ¶ms, sizeof(params))) -+ return -EFAULT; -+ return 0; -+ } -+ case QBMAN_TEST_DMA_UNMAP: -+ { -+ struct qbman_test_dma_ioctl params; -+ -+ if (!priv->has_dma_map) -+ return -EINVAL; -+ if (copy_from_user(¶ms, a, sizeof(params))) -+ return -EFAULT; -+ down_write(¤t->mm->mmap_sem); -+ do_munmap(current->mm, params.ptr, PAGE_SIZE); -+ up_write(¤t->mm->mmap_sem); -+ free_page((unsigned long)phys_to_virt(params.phys_addr)); -+ priv->has_dma_map = 0; -+ return 0; -+ } -+ default: -+ pr_err("Bad ioctl cmd!\n"); -+ } -+ return -EINVAL; -+} -+ -+static const struct file_operations qbman_fops = { -+ .open = qbman_test_open, -+ .mmap = qbman_test_mmap, -+ .unlocked_ioctl = qbman_test_ioctl -+}; -+ -+static struct miscdevice qbman_miscdev = { -+ .name = "qbman-test", -+ .fops = &qbman_fops, -+ .minor = MISC_DYNAMIC_MINOR, -+}; -+ -+static int qbman_miscdev_init; -+ -+static int test_init(void) -+{ -+ int ret = qbman_test(); -+ -+ if (!ret) { -+ /* MC image supports the test cases, so instantiate the -+ * character devic that the user-space test case will use to do -+ * its memory mappings. */ -+ ret = misc_register(&qbman_miscdev); -+ if (ret) { -+ pr_err("qbman-test: failed to register misc device\n"); -+ return ret; -+ } -+ pr_info("qbman-test: misc device registered!\n"); -+ qbman_miscdev_init = 1; -+ } -+ return 0; -+} -+ -+static void test_exit(void) -+{ -+ if (qbman_miscdev_init) { -+ misc_deregister(&qbman_miscdev); -+ qbman_miscdev_init = 0; -+ } -+} -+ -+module_init(test_init); -+module_exit(test_exit); -diff --git a/drivers/staging/fsl-mc/bus/dpmcp-cmd.h b/drivers/staging/fsl-mc/bus/dpmcp-cmd.h -new file mode 100644 -index 0000000..c9b52dd ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpmcp-cmd.h -@@ -0,0 +1,56 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPMCP_CMD_H -+#define _FSL_DPMCP_CMD_H -+ -+/* Minimal supported DPMCP Version */ -+#define DPMCP_MIN_VER_MAJOR 3 -+#define DPMCP_MIN_VER_MINOR 0 -+ -+/* Command IDs */ -+#define DPMCP_CMDID_CLOSE 0x800 -+#define DPMCP_CMDID_OPEN 0x80b -+#define DPMCP_CMDID_CREATE 0x90b -+#define DPMCP_CMDID_DESTROY 0x900 -+ -+#define DPMCP_CMDID_GET_ATTR 0x004 -+#define DPMCP_CMDID_RESET 0x005 -+ -+#define DPMCP_CMDID_SET_IRQ 0x010 -+#define DPMCP_CMDID_GET_IRQ 0x011 -+#define DPMCP_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPMCP_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPMCP_CMDID_SET_IRQ_MASK 0x014 -+#define DPMCP_CMDID_GET_IRQ_MASK 0x015 -+#define DPMCP_CMDID_GET_IRQ_STATUS 0x016 -+ -+#endif /* _FSL_DPMCP_CMD_H */ -diff --git a/drivers/staging/fsl-mc/bus/dpmcp.c b/drivers/staging/fsl-mc/bus/dpmcp.c -new file mode 100644 -index 0000000..e23592a ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpmcp.c -@@ -0,0 +1,318 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include "../include/mc-sys.h" -+#include "../include/mc-cmd.h" -+#include "dpmcp.h" -+#include "dpmcp-cmd.h" -+ -+int dpmcp_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpmcp_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ cmd.params[0] |= mc_enc(0, 32, dpmcp_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return err; -+} -+ -+int dpmcp_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CLOSE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmcp_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpmcp_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ cmd.params[0] |= mc_enc(0, 32, cfg->portal_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpmcp_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmcp_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_RESET, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmcp_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpmcp_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(0, 8, irq_index); -+ cmd.params[0] |= mc_enc(32, 32, irq_cfg->val); -+ cmd.params[1] |= mc_enc(0, 64, irq_cfg->paddr); -+ cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmcp_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpmcp_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ irq_cfg->val = (uint32_t)mc_dec(cmd.params[0], 0, 32); -+ irq_cfg->paddr = (uint64_t)mc_dec(cmd.params[1], 0, 64); -+ irq_cfg->irq_num = (int)mc_dec(cmd.params[2], 0, 32); -+ *type = (int)mc_dec(cmd.params[2], 32, 32); -+ return 0; -+} -+ -+int dpmcp_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(0, 8, en); -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmcp_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *en = (uint8_t)mc_dec(cmd.params[0], 0, 8); -+ return 0; -+} -+ -+int dpmcp_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(0, 32, mask); -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmcp_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *mask = (uint32_t)mc_dec(cmd.params[0], 0, 32); -+ return 0; -+} -+ -+int dpmcp_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *status = (uint32_t)mc_dec(cmd.params[0], 0, 32); -+ return 0; -+} -+ -+int dpmcp_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmcp_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ attr->id = (int)mc_dec(cmd.params[0], 32, 32); -+ attr->version.major = (uint16_t)mc_dec(cmd.params[1], 0, 16); -+ attr->version.minor = (uint16_t)mc_dec(cmd.params[1], 16, 16); -+ return 0; -+} -diff --git a/drivers/staging/fsl-mc/bus/dpmcp.h b/drivers/staging/fsl-mc/bus/dpmcp.h -new file mode 100644 -index 0000000..e434a24 ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpmcp.h -@@ -0,0 +1,323 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPMCP_H -+#define __FSL_DPMCP_H -+ -+/* Data Path Management Command Portal API -+ * Contains initialization APIs and runtime control APIs for DPMCP -+ */ -+ -+struct fsl_mc_io; -+ -+/** -+ * dpmcp_open() - Open a control session for the specified object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dpmcp_id: DPMCP unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpmcp_create function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpmcp_id, -+ uint16_t *token); -+ -+/* Get portal ID from pool */ -+#define DPMCP_GET_PORTAL_ID_FROM_POOL (-1) -+ -+/** -+ * dpmcp_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMCP object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpmcp_cfg - Structure representing DPMCP configuration -+ * @portal_id: Portal ID; 'DPMCP_GET_PORTAL_ID_FROM_POOL' to get the portal ID -+ * from pool -+ */ -+struct dpmcp_cfg { -+ int portal_id; -+}; -+ -+/** -+ * dpmcp_create() - Create the DPMCP object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPMCP object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpmcp_open function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpmcp_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpmcp_destroy() - Destroy the DPMCP object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMCP object -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpmcp_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpmcp_reset() - Reset the DPMCP, returns the object to initial state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMCP object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/* IRQ */ -+/* IRQ Index */ -+#define DPMCP_IRQ_INDEX 0 -+/* irq event - Indicates that the link state changed */ -+#define DPMCP_IRQ_EVENT_CMD_DONE 0x00000001 -+ -+/** -+ * struct dpmcp_irq_cfg - IRQ configuration -+ * @paddr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dpmcp_irq_cfg { -+ uint64_t paddr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dpmcp_set_irq() - Set IRQ information for the DPMCP to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMCP object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpmcp_irq_cfg *irq_cfg); -+ -+/** -+ * dpmcp_get_irq() - Get IRQ information from the DPMCP. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMCP object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpmcp_irq_cfg *irq_cfg); -+ -+/** -+ * dpmcp_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMCP object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpmcp_get_irq_enable() - Get overall interrupt state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMCP object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpmcp_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMCP object -+ * @irq_index: The interrupt index to configure -+ * @mask: Event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpmcp_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMCP object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpmcp_get_irq_status() - Get the current status of any pending interrupts. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMCP object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * struct dpmcp_attr - Structure representing DPMCP attributes -+ * @id: DPMCP object ID -+ * @version: DPMCP version -+ */ -+struct dpmcp_attr { -+ int id; -+ /** -+ * struct version - Structure representing DPMCP version -+ * @major: DPMCP major version -+ * @minor: DPMCP minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+}; -+ -+/** -+ * dpmcp_get_attributes - Retrieve DPMCP attributes. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMCP object -+ * @attr: Returned object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmcp_attr *attr); -+ -+#endif /* __FSL_DPMCP_H */ -diff --git a/drivers/staging/fsl-mc/bus/dpmng-cmd.h b/drivers/staging/fsl-mc/bus/dpmng-cmd.h -new file mode 100644 -index 0000000..ba8cfa9 ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpmng-cmd.h -@@ -0,0 +1,47 @@ -+/* Copyright 2013-2014 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+/*************************************************************************//* -+ dpmng-cmd.h -+ -+ defines portal commands -+ -+ *//**************************************************************************/ -+ -+#ifndef __FSL_DPMNG_CMD_H -+#define __FSL_DPMNG_CMD_H -+ -+/* Command IDs */ -+#define DPMNG_CMDID_GET_CONT_ID 0x830 -+#define DPMNG_CMDID_GET_VERSION 0x831 -+ -+#endif /* __FSL_DPMNG_CMD_H */ -diff --git a/drivers/staging/fsl-mc/bus/dpmng.c b/drivers/staging/fsl-mc/bus/dpmng.c -new file mode 100644 -index 0000000..387390b ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpmng.c -@@ -0,0 +1,85 @@ -+/* Copyright 2013-2014 Freescale Semiconductor Inc. -+* -+* Redistribution and use in source and binary forms, with or without -+* modification, are permitted provided that the following conditions are met: -+* * Redistributions of source code must retain the above copyright -+* notice, this list of conditions and the following disclaimer. -+* * Redistributions in binary form must reproduce the above copyright -+* notice, this list of conditions and the following disclaimer in the -+* documentation and/or other materials provided with the distribution. -+* * Neither the name of the above-listed copyright holders nor the -+* names of any contributors may be used to endorse or promote products -+* derived from this software without specific prior written permission. -+* -+* -+* ALTERNATIVELY, this software may be distributed under the terms of the -+* GNU General Public License ("GPL") as published by the Free Software -+* Foundation, either version 2 of that License or (at your option) any -+* later version. -+* -+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+* POSSIBILITY OF SUCH DAMAGE. -+*/ -+#include "../include/mc-sys.h" -+#include "../include/mc-cmd.h" -+#include "../include/dpmng.h" -+#include "dpmng-cmd.h" -+ -+int mc_get_version(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ struct mc_version *mc_ver_info) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMNG_CMDID_GET_VERSION, -+ cmd_flags, -+ 0); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ mc_ver_info->revision = mc_dec(cmd.params[0], 0, 32); -+ mc_ver_info->major = mc_dec(cmd.params[0], 32, 32); -+ mc_ver_info->minor = mc_dec(cmd.params[1], 0, 32); -+ -+ return 0; -+} -+EXPORT_SYMBOL(mc_get_version); -+ -+int dpmng_get_container_id(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int *container_id) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMNG_CMDID_GET_CONT_ID, -+ cmd_flags, -+ 0); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *container_id = mc_dec(cmd.params[0], 0, 32); -+ -+ return 0; -+} -+ -diff --git a/drivers/staging/fsl-mc/bus/dprc-cmd.h b/drivers/staging/fsl-mc/bus/dprc-cmd.h -new file mode 100644 -index 0000000..9b854fa ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dprc-cmd.h -@@ -0,0 +1,87 @@ -+/* Copyright 2013-2014 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+/*************************************************************************//* -+ dprc-cmd.h -+ -+ defines dprc portal commands -+ -+ *//**************************************************************************/ -+ -+#ifndef _FSL_DPRC_CMD_H -+#define _FSL_DPRC_CMD_H -+ -+/* Minimal supported DPRC Version */ -+#define DPRC_MIN_VER_MAJOR 5 -+#define DPRC_MIN_VER_MINOR 0 -+ -+/* Command IDs */ -+#define DPRC_CMDID_CLOSE 0x800 -+#define DPRC_CMDID_OPEN 0x805 -+#define DPRC_CMDID_CREATE 0x905 -+ -+#define DPRC_CMDID_GET_ATTR 0x004 -+#define DPRC_CMDID_RESET_CONT 0x005 -+ -+#define DPRC_CMDID_SET_IRQ 0x010 -+#define DPRC_CMDID_GET_IRQ 0x011 -+#define DPRC_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPRC_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPRC_CMDID_SET_IRQ_MASK 0x014 -+#define DPRC_CMDID_GET_IRQ_MASK 0x015 -+#define DPRC_CMDID_GET_IRQ_STATUS 0x016 -+#define DPRC_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPRC_CMDID_CREATE_CONT 0x151 -+#define DPRC_CMDID_DESTROY_CONT 0x152 -+#define DPRC_CMDID_SET_RES_QUOTA 0x155 -+#define DPRC_CMDID_GET_RES_QUOTA 0x156 -+#define DPRC_CMDID_ASSIGN 0x157 -+#define DPRC_CMDID_UNASSIGN 0x158 -+#define DPRC_CMDID_GET_OBJ_COUNT 0x159 -+#define DPRC_CMDID_GET_OBJ 0x15A -+#define DPRC_CMDID_GET_RES_COUNT 0x15B -+#define DPRC_CMDID_GET_RES_IDS 0x15C -+#define DPRC_CMDID_GET_OBJ_REG 0x15E -+#define DPRC_CMDID_SET_OBJ_IRQ 0x15F -+#define DPRC_CMDID_GET_OBJ_IRQ 0x160 -+#define DPRC_CMDID_SET_OBJ_LABEL 0x161 -+#define DPRC_CMDID_GET_OBJ_DESC 0x162 -+ -+#define DPRC_CMDID_CONNECT 0x167 -+#define DPRC_CMDID_DISCONNECT 0x168 -+#define DPRC_CMDID_GET_POOL 0x169 -+#define DPRC_CMDID_GET_POOL_COUNT 0x16A -+ -+#define DPRC_CMDID_GET_CONNECTION 0x16C -+ -+#endif /* _FSL_DPRC_CMD_H */ -diff --git a/drivers/staging/fsl-mc/bus/dprc-driver.c b/drivers/staging/fsl-mc/bus/dprc-driver.c -new file mode 100644 -index 0000000..5b6fa1c ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dprc-driver.c -@@ -0,0 +1,1084 @@ -+/* -+ * Freescale data path resource container (DPRC) driver -+ * -+ * Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * Author: German Rivera -+ * -+ * This file is licensed under the terms of the GNU General Public -+ * License version 2. This program is licensed "as is" without any -+ * warranty of any kind, whether express or implied. -+ */ -+ -+#include "../include/mc-private.h" -+#include "../include/mc-sys.h" -+#include -+#include -+#include -+#include "dprc-cmd.h" -+#include "dpmcp.h" -+ -+struct dprc_child_objs { -+ int child_count; -+ struct dprc_obj_desc *child_array; -+}; -+ -+static int __fsl_mc_device_remove_if_not_in_mc(struct device *dev, void *data) -+{ -+ int i; -+ struct dprc_child_objs *objs; -+ struct fsl_mc_device *mc_dev; -+ -+ WARN_ON(!dev); -+ WARN_ON(!data); -+ mc_dev = to_fsl_mc_device(dev); -+ objs = data; -+ -+ for (i = 0; i < objs->child_count; i++) { -+ struct dprc_obj_desc *obj_desc = &objs->child_array[i]; -+ -+ if (strlen(obj_desc->type) != 0 && -+ FSL_MC_DEVICE_MATCH(mc_dev, obj_desc)) -+ break; -+ } -+ -+ if (i == objs->child_count) -+ fsl_mc_device_remove(mc_dev); -+ -+ return 0; -+} -+ -+static int __fsl_mc_device_remove(struct device *dev, void *data) -+{ -+ WARN_ON(!dev); -+ WARN_ON(data); -+ fsl_mc_device_remove(to_fsl_mc_device(dev)); -+ return 0; -+} -+ -+/** -+ * dprc_remove_devices - Removes devices for objects removed from a DPRC -+ * -+ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object -+ * @obj_desc_array: array of object descriptors for child objects currently -+ * present in the DPRC in the MC. -+ * @num_child_objects_in_mc: number of entries in obj_desc_array -+ * -+ * Synchronizes the state of the Linux bus driver with the actual state of -+ * the MC by removing devices that represent MC objects that have -+ * been dynamically removed in the physical DPRC. -+ */ -+static void dprc_remove_devices(struct fsl_mc_device *mc_bus_dev, -+ struct dprc_obj_desc *obj_desc_array, -+ int num_child_objects_in_mc) -+{ -+ if (num_child_objects_in_mc != 0) { -+ /* -+ * Remove child objects that are in the DPRC in Linux, -+ * but not in the MC: -+ */ -+ struct dprc_child_objs objs; -+ -+ objs.child_count = num_child_objects_in_mc; -+ objs.child_array = obj_desc_array; -+ device_for_each_child(&mc_bus_dev->dev, &objs, -+ __fsl_mc_device_remove_if_not_in_mc); -+ } else { -+ /* -+ * There are no child objects for this DPRC in the MC. -+ * So, remove all the child devices from Linux: -+ */ -+ device_for_each_child(&mc_bus_dev->dev, NULL, -+ __fsl_mc_device_remove); -+ } -+} -+ -+static int __fsl_mc_device_match(struct device *dev, void *data) -+{ -+ struct dprc_obj_desc *obj_desc = data; -+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); -+ -+ return FSL_MC_DEVICE_MATCH(mc_dev, obj_desc); -+} -+ -+static struct fsl_mc_device *fsl_mc_device_lookup(struct dprc_obj_desc -+ *obj_desc, -+ struct fsl_mc_device -+ *mc_bus_dev) -+{ -+ struct device *dev; -+ -+ dev = device_find_child(&mc_bus_dev->dev, obj_desc, -+ __fsl_mc_device_match); -+ -+ return dev ? to_fsl_mc_device(dev) : NULL; -+} -+ -+/** -+ * check_plugged_state_change - Check change in an MC object's plugged state -+ * -+ * @mc_dev: pointer to the fsl-mc device for a given MC object -+ * @obj_desc: pointer to the MC object's descriptor in the MC -+ * -+ * If the plugged state has changed from unplugged to plugged, the fsl-mc -+ * device is bound to the corresponding device driver. -+ * If the plugged state has changed from plugged to unplugged, the fsl-mc -+ * device is unbound from the corresponding device driver. -+ */ -+static void check_plugged_state_change(struct fsl_mc_device *mc_dev, -+ struct dprc_obj_desc *obj_desc) -+{ -+ int error; -+ uint32_t plugged_flag_at_mc = -+ (obj_desc->state & DPRC_OBJ_STATE_PLUGGED); -+ -+ if (plugged_flag_at_mc != -+ (mc_dev->obj_desc.state & DPRC_OBJ_STATE_PLUGGED)) { -+ if (plugged_flag_at_mc) { -+ mc_dev->obj_desc.state |= DPRC_OBJ_STATE_PLUGGED; -+ error = device_attach(&mc_dev->dev); -+ if (error < 0) { -+ dev_err(&mc_dev->dev, -+ "device_attach() failed: %d\n", -+ error); -+ } -+ } else { -+ mc_dev->obj_desc.state &= ~DPRC_OBJ_STATE_PLUGGED; -+ device_release_driver(&mc_dev->dev); -+ } -+ } -+} -+ -+/** -+ * dprc_add_new_devices - Adds devices to the logical bus for a DPRC -+ * -+ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object -+ * @driver_override: driver override to apply to new objects found in the DPRC, -+ * or NULL, if none. -+ * @obj_desc_array: array of device descriptors for child devices currently -+ * present in the physical DPRC. -+ * @num_child_objects_in_mc: number of entries in obj_desc_array -+ * -+ * Synchronizes the state of the Linux bus driver with the actual -+ * state of the MC by adding objects that have been newly discovered -+ * in the physical DPRC. -+ */ -+static void dprc_add_new_devices(struct fsl_mc_device *mc_bus_dev, -+ const char *driver_override, -+ struct dprc_obj_desc *obj_desc_array, -+ int num_child_objects_in_mc) -+{ -+ int error; -+ int i; -+ -+ for (i = 0; i < num_child_objects_in_mc; i++) { -+ struct fsl_mc_device *child_dev; -+ struct dprc_obj_desc *obj_desc = &obj_desc_array[i]; -+ -+ if (strlen(obj_desc->type) == 0) -+ continue; -+ -+ /* -+ * Check if device is already known to Linux: -+ */ -+ child_dev = fsl_mc_device_lookup(obj_desc, mc_bus_dev); -+ if (child_dev) { -+ check_plugged_state_change(child_dev, obj_desc); -+ continue; -+ } -+ -+ error = fsl_mc_device_add(obj_desc, NULL, &mc_bus_dev->dev, -+ driver_override, &child_dev); -+ if (error < 0) -+ continue; -+ } -+} -+ -+void dprc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev) -+{ -+ int pool_type; -+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev); -+ -+ for (pool_type = 0; pool_type < FSL_MC_NUM_POOL_TYPES; pool_type++) { -+ struct fsl_mc_resource_pool *res_pool = -+ &mc_bus->resource_pools[pool_type]; -+ -+ res_pool->type = pool_type; -+ res_pool->max_count = 0; -+ res_pool->free_count = 0; -+ res_pool->mc_bus = mc_bus; -+ INIT_LIST_HEAD(&res_pool->free_list); -+ mutex_init(&res_pool->mutex); -+ } -+} -+ -+static void dprc_cleanup_resource_pool(struct fsl_mc_device *mc_bus_dev, -+ enum fsl_mc_pool_type pool_type) -+{ -+ struct fsl_mc_resource *resource; -+ struct fsl_mc_resource *next; -+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev); -+ struct fsl_mc_resource_pool *res_pool = -+ &mc_bus->resource_pools[pool_type]; -+ int free_count = 0; -+ -+ WARN_ON(res_pool->type != pool_type); -+ WARN_ON(res_pool->free_count != res_pool->max_count); -+ -+ list_for_each_entry_safe(resource, next, &res_pool->free_list, node) { -+ free_count++; -+ WARN_ON(resource->type != res_pool->type); -+ WARN_ON(resource->parent_pool != res_pool); -+ devm_kfree(&mc_bus_dev->dev, resource); -+ } -+ -+ WARN_ON(free_count != res_pool->free_count); -+} -+ -+/* -+ * Clean up all resource pools other than the IRQ pool -+ */ -+void dprc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev) -+{ -+ int pool_type; -+ -+ for (pool_type = 0; pool_type < FSL_MC_NUM_POOL_TYPES; pool_type++) { -+ if (pool_type != FSL_MC_POOL_IRQ) -+ dprc_cleanup_resource_pool(mc_bus_dev, pool_type); -+ } -+} -+ -+/** -+ * dprc_scan_objects - Discover objects in a DPRC -+ * -+ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object -+ * @driver_override: driver override to apply to new objects found in the DPRC, -+ * or NULL, if none. -+ * @total_irq_count: total number of IRQs needed by objects in the DPRC. -+ * -+ * Detects objects added and removed from a DPRC and synchronizes the -+ * state of the Linux bus driver, MC by adding and removing -+ * devices accordingly. -+ * Two types of devices can be found in a DPRC: allocatable objects (e.g., -+ * dpbp, dpmcp) and non-allocatable devices (e.g., dprc, dpni). -+ * All allocatable devices needed to be probed before all non-allocatable -+ * devices, to ensure that device drivers for non-allocatable -+ * devices can allocate any type of allocatable devices. -+ * That is, we need to ensure that the corresponding resource pools are -+ * populated before they can get allocation requests from probe callbacks -+ * of the device drivers for the non-allocatable devices. -+ */ -+int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev, -+ const char *driver_override, -+ unsigned int *total_irq_count) -+{ -+ int num_child_objects; -+ int dprc_get_obj_failures; -+ int error; -+ unsigned int irq_count = mc_bus_dev->obj_desc.irq_count; -+ struct dprc_obj_desc *child_obj_desc_array = NULL; -+ -+ error = dprc_get_obj_count(mc_bus_dev->mc_io, -+ 0, -+ mc_bus_dev->mc_handle, -+ &num_child_objects); -+ if (error < 0) { -+ dev_err(&mc_bus_dev->dev, "dprc_get_obj_count() failed: %d\n", -+ error); -+ return error; -+ } -+ -+ if (num_child_objects != 0) { -+ int i; -+ -+ child_obj_desc_array = -+ devm_kmalloc_array(&mc_bus_dev->dev, num_child_objects, -+ sizeof(*child_obj_desc_array), -+ GFP_KERNEL); -+ if (!child_obj_desc_array) -+ return -ENOMEM; -+ -+ /* -+ * Discover objects currently present in the physical DPRC: -+ */ -+ dprc_get_obj_failures = 0; -+ for (i = 0; i < num_child_objects; i++) { -+ struct dprc_obj_desc *obj_desc = -+ &child_obj_desc_array[i]; -+ -+ error = dprc_get_obj(mc_bus_dev->mc_io, -+ 0, -+ mc_bus_dev->mc_handle, -+ i, obj_desc); -+ -+ /* -+ * -ENXIO means object index was invalid. -+ * This is caused when the DPRC was changed at -+ * the MC during the scan. In this case, -+ * abort the current scan. -+ */ -+ if (error == -ENXIO) -+ return error; -+ -+ if (error < 0) { -+ dev_err(&mc_bus_dev->dev, -+ "dprc_get_obj(i=%d) failed: %d\n", -+ i, error); -+ /* -+ * Mark the obj entry as "invalid", by using the -+ * empty string as obj type: -+ */ -+ obj_desc->type[0] = '\0'; -+ obj_desc->id = error; -+ dprc_get_obj_failures++; -+ continue; -+ } -+ -+ /* -+ * for DPRC versions that do not support the -+ * shareability attribute, make simplifying assumption -+ * that only SEC is not shareable. -+ */ -+ if ((strcmp(obj_desc->type, "dpseci") == 0) && -+ (obj_desc->ver_major < 4)) -+ obj_desc->flags |= -+ DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY; -+ -+ irq_count += obj_desc->irq_count; -+ dev_dbg(&mc_bus_dev->dev, -+ "Discovered object: type %s, id %d\n", -+ obj_desc->type, obj_desc->id); -+ } -+ -+ if (dprc_get_obj_failures != 0) { -+ dev_err(&mc_bus_dev->dev, -+ "%d out of %d devices could not be retrieved\n", -+ dprc_get_obj_failures, num_child_objects); -+ } -+ } -+ -+ *total_irq_count = irq_count; -+ dprc_remove_devices(mc_bus_dev, child_obj_desc_array, -+ num_child_objects); -+ -+ dprc_add_new_devices(mc_bus_dev, driver_override, child_obj_desc_array, -+ num_child_objects); -+ -+ if (child_obj_desc_array) -+ devm_kfree(&mc_bus_dev->dev, child_obj_desc_array); -+ -+ return 0; -+} -+EXPORT_SYMBOL_GPL(dprc_scan_objects); -+ -+/** -+ * dprc_scan_container - Scans a physical DPRC and synchronizes Linux bus state -+ * -+ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object -+ * -+ * Scans the physical DPRC and synchronizes the state of the Linux -+ * bus driver with the actual state of the MC by adding and removing -+ * devices as appropriate. -+ */ -+static int dprc_scan_container(struct fsl_mc_device *mc_bus_dev) -+{ -+ int error; -+ unsigned int irq_count; -+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev); -+ -+ dprc_init_all_resource_pools(mc_bus_dev); -+ -+ /* -+ * Discover objects in the DPRC: -+ */ -+ mutex_lock(&mc_bus->scan_mutex); -+ error = dprc_scan_objects(mc_bus_dev, NULL, &irq_count); -+ mutex_unlock(&mc_bus->scan_mutex); -+ if (error < 0) -+ goto error; -+ -+ if (fsl_mc_interrupts_supported() && !mc_bus->irq_resources) { -+ irq_count += FSL_MC_IRQ_POOL_MAX_EXTRA_IRQS; -+ error = fsl_mc_populate_irq_pool(mc_bus, irq_count); -+ if (error < 0) -+ goto error; -+ } -+ -+ return 0; -+error: -+ device_for_each_child(&mc_bus_dev->dev, NULL, __fsl_mc_device_remove); -+ dprc_cleanup_all_resource_pools(mc_bus_dev); -+ return error; -+} -+ -+/** -+ * dprc_irq0_handler - Regular ISR for DPRC interrupt 0 -+ * -+ * @irq: IRQ number of the interrupt being handled -+ * @arg: Pointer to device structure -+ */ -+static irqreturn_t dprc_irq0_handler(int irq_num, void *arg) -+{ -+ return IRQ_WAKE_THREAD; -+} -+ -+/** -+ * dprc_irq0_handler_thread - Handler thread function for DPRC interrupt 0 -+ * -+ * @irq: IRQ number of the interrupt being handled -+ * @arg: Pointer to device structure -+ */ -+static irqreturn_t dprc_irq0_handler_thread(int irq_num, void *arg) -+{ -+ int error; -+ uint32_t status; -+ struct device *dev = (struct device *)arg; -+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); -+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev); -+ struct fsl_mc_io *mc_io = mc_dev->mc_io; -+ int irq_index = 0; -+ -+ dev_dbg(dev, "DPRC IRQ %d triggered on CPU %u\n", -+ irq_num, smp_processor_id()); -+ if (WARN_ON(!(mc_dev->flags & FSL_MC_IS_DPRC))) -+ return IRQ_HANDLED; -+ -+ mutex_lock(&mc_bus->scan_mutex); -+ if (WARN_ON(mc_dev->irqs[irq_index]->irq_number != (uint32_t)irq_num)) -+ goto out; -+ -+ status = 0; -+ error = dprc_get_irq_status(mc_io, 0, mc_dev->mc_handle, irq_index, -+ &status); -+ if (error < 0) { -+ dev_err(dev, -+ "dprc_get_irq_status() failed: %d\n", error); -+ goto out; -+ } -+ -+ error = dprc_clear_irq_status(mc_io, 0, mc_dev->mc_handle, irq_index, -+ status); -+ if (error < 0) { -+ dev_err(dev, -+ "dprc_clear_irq_status() failed: %d\n", error); -+ goto out; -+ } -+ -+ if (status & (DPRC_IRQ_EVENT_OBJ_ADDED | -+ DPRC_IRQ_EVENT_OBJ_REMOVED | -+ DPRC_IRQ_EVENT_CONTAINER_DESTROYED | -+ DPRC_IRQ_EVENT_OBJ_DESTROYED | -+ DPRC_IRQ_EVENT_OBJ_CREATED)) { -+ unsigned int irq_count; -+ -+ error = dprc_scan_objects(mc_dev, NULL, &irq_count); -+ if (error < 0) { -+ if (error != -ENXIO) /* don't need to report aborted scan */ -+ dev_err(dev, "dprc_scan_objects() failed: %d\n", error); -+ goto out; -+ } -+ -+ WARN_ON((int16_t)irq_count < 0); -+ -+ if ((int16_t)irq_count > -+ mc_bus->resource_pools[FSL_MC_POOL_IRQ].max_count) { -+ dev_warn(dev, -+ "IRQs needed (%u) exceed IRQs preallocated (%u)\n", -+ irq_count, -+ mc_bus->resource_pools[FSL_MC_POOL_IRQ]. -+ max_count); -+ } -+ } -+ -+out: -+ mutex_unlock(&mc_bus->scan_mutex); -+ return IRQ_HANDLED; -+} -+ -+/* -+ * Disable and clear interrupts for a given DPRC object -+ */ -+static int disable_dprc_irqs(struct fsl_mc_device *mc_dev) -+{ -+ int i; -+ int error; -+ struct fsl_mc_io *mc_io = mc_dev->mc_io; -+ int irq_count = mc_dev->obj_desc.irq_count; -+ -+ if (WARN_ON(irq_count == 0)) -+ return -EINVAL; -+ -+ for (i = 0; i < irq_count; i++) { -+ /* -+ * Disable generation of interrupt i, while we configure it: -+ */ -+ error = dprc_set_irq_enable(mc_io, 0, mc_dev->mc_handle, i, 0); -+ if (error < 0) { -+ dev_err(&mc_dev->dev, -+ "Disabling DPRC IRQ %d failed: dprc_set_irq_enable() failed: %d\n", -+ i, error); -+ -+ return error; -+ } -+ -+ /* -+ * Disable all interrupt causes for interrupt i: -+ */ -+ error = dprc_set_irq_mask(mc_io, 0, mc_dev->mc_handle, i, 0x0); -+ if (error < 0) { -+ dev_err(&mc_dev->dev, -+ "Disabling DPRC IRQ %d failed: dprc_set_irq_mask() failed: %d\n", -+ i, error); -+ -+ return error; -+ } -+ -+ /* -+ * Clear any leftover interrupt i: -+ */ -+ error = dprc_clear_irq_status(mc_io, 0, mc_dev->mc_handle, i, -+ ~0x0U); -+ if (error < 0) { -+ dev_err(&mc_dev->dev, -+ "Disabling DPRC IRQ %d failed: dprc_clear_irq_status() failed: %d\n", -+ i, error); -+ -+ return error; -+ } -+ } -+ -+ return 0; -+} -+ -+static void unregister_dprc_irq_handlers(struct fsl_mc_device *mc_dev) -+{ -+ int i; -+ struct fsl_mc_device_irq *irq; -+ int irq_count = mc_dev->obj_desc.irq_count; -+ -+ for (i = 0; i < irq_count; i++) { -+ irq = mc_dev->irqs[i]; -+ devm_free_irq(&mc_dev->dev, irq->irq_number, -+ &mc_dev->dev); -+ } -+} -+ -+static int register_dprc_irq_handlers(struct fsl_mc_device *mc_dev) -+{ -+ static const struct irq_handler { -+ irq_handler_t irq_handler; -+ irq_handler_t irq_handler_thread; -+ const char *irq_name; -+ } irq_handlers[] = { -+ [0] = { -+ .irq_handler = dprc_irq0_handler, -+ .irq_handler_thread = dprc_irq0_handler_thread, -+ .irq_name = "FSL MC DPRC irq0", -+ }, -+ }; -+ -+ unsigned int i; -+ int error; -+ struct fsl_mc_device_irq *irq; -+ unsigned int num_irq_handlers_registered = 0; -+ int irq_count = mc_dev->obj_desc.irq_count; -+ -+ if (WARN_ON(irq_count != ARRAY_SIZE(irq_handlers))) -+ return -EINVAL; -+ -+ for (i = 0; i < ARRAY_SIZE(irq_handlers); i++) { -+ irq = mc_dev->irqs[i]; -+ -+ /* -+ * NOTE: devm_request_threaded_irq() invokes the device-specific -+ * function that programs the MSI physically in the device -+ */ -+ error = devm_request_threaded_irq(&mc_dev->dev, -+ irq->irq_number, -+ irq_handlers[i].irq_handler, -+ irq_handlers[i]. -+ irq_handler_thread, -+ IRQF_NO_SUSPEND | -+ IRQF_ONESHOT, -+ irq_handlers[i].irq_name, -+ &mc_dev->dev); -+ if (error < 0) { -+ dev_err(&mc_dev->dev, -+ "devm_request_threaded_irq() failed: %d\n", -+ error); -+ goto error_unregister_irq_handlers; -+ } -+ -+ num_irq_handlers_registered++; -+ } -+ -+ return 0; -+ -+error_unregister_irq_handlers: -+ for (i = 0; i < num_irq_handlers_registered; i++) { -+ irq = mc_dev->irqs[i]; -+ devm_free_irq(&mc_dev->dev, irq->irq_number, -+ &mc_dev->dev); -+ } -+ -+ return error; -+} -+ -+static int enable_dprc_irqs(struct fsl_mc_device *mc_dev) -+{ -+ int i; -+ int error; -+ int irq_count = mc_dev->obj_desc.irq_count; -+ -+ for (i = 0; i < irq_count; i++) { -+ /* -+ * Enable all interrupt causes for the interrupt: -+ */ -+ error = dprc_set_irq_mask(mc_dev->mc_io, -+ 0, -+ mc_dev->mc_handle, -+ i, -+ ~0x0u); -+ if (error < 0) { -+ dev_err(&mc_dev->dev, -+ "Enabling DPRC IRQ %d failed: dprc_set_irq_mask() failed: %d\n", -+ i, error); -+ -+ return error; -+ } -+ -+ /* -+ * Enable generation of the interrupt: -+ */ -+ error = dprc_set_irq_enable(mc_dev->mc_io, -+ 0, -+ mc_dev->mc_handle, -+ i, 1); -+ if (error < 0) { -+ dev_err(&mc_dev->dev, -+ "Enabling DPRC IRQ %d failed: dprc_set_irq_enable() failed: %d\n", -+ i, error); -+ -+ return error; -+ } -+ } -+ -+ return 0; -+} -+ -+/* -+ * Setup interrupts for a given DPRC device -+ */ -+static int dprc_setup_irqs(struct fsl_mc_device *mc_dev) -+{ -+ int error; -+ -+ error = fsl_mc_allocate_irqs(mc_dev); -+ if (error < 0) -+ return error; -+ -+ error = disable_dprc_irqs(mc_dev); -+ if (error < 0) -+ goto error_free_irqs; -+ -+ error = register_dprc_irq_handlers(mc_dev); -+ if (error < 0) -+ goto error_free_irqs; -+ -+ error = enable_dprc_irqs(mc_dev); -+ if (error < 0) -+ goto error_unregister_irq_handlers; -+ -+ return 0; -+ -+error_unregister_irq_handlers: -+ unregister_dprc_irq_handlers(mc_dev); -+ -+error_free_irqs: -+ fsl_mc_free_irqs(mc_dev); -+ return error; -+} -+ -+/* -+ * Creates a DPMCP for a DPRC's built-in MC portal -+ */ -+static int dprc_create_dpmcp(struct fsl_mc_device *dprc_dev) -+{ -+ int error; -+ struct dpmcp_cfg dpmcp_cfg; -+ uint16_t dpmcp_handle; -+ struct dprc_res_req res_req; -+ struct dpmcp_attr dpmcp_attr; -+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(dprc_dev); -+ -+ dpmcp_cfg.portal_id = mc_bus->dprc_attr.portal_id; -+ error = dpmcp_create(dprc_dev->mc_io, -+ MC_CMD_FLAG_INTR_DIS, -+ &dpmcp_cfg, -+ &dpmcp_handle); -+ if (error < 0) { -+ dev_err(&dprc_dev->dev, "dpmcp_create() failed: %d\n", -+ error); -+ return error; -+ } -+ -+ /* -+ * Set the state of the newly created DPMCP object to be "plugged": -+ */ -+ -+ error = dpmcp_get_attributes(dprc_dev->mc_io, -+ MC_CMD_FLAG_INTR_DIS, -+ dpmcp_handle, -+ &dpmcp_attr); -+ if (error < 0) { -+ dev_err(&dprc_dev->dev, "dpmcp_get_attributes() failed: %d\n", -+ error); -+ goto error_destroy_dpmcp; -+ } -+ -+ if (WARN_ON(dpmcp_attr.id != mc_bus->dprc_attr.portal_id)) { -+ error = -EINVAL; -+ goto error_destroy_dpmcp; -+ } -+ -+ strcpy(res_req.type, "dpmcp"); -+ res_req.num = 1; -+ res_req.options = -+ (DPRC_RES_REQ_OPT_EXPLICIT | DPRC_RES_REQ_OPT_PLUGGED); -+ res_req.id_base_align = dpmcp_attr.id; -+ -+ error = dprc_assign(dprc_dev->mc_io, -+ MC_CMD_FLAG_INTR_DIS, -+ dprc_dev->mc_handle, -+ dprc_dev->obj_desc.id, -+ &res_req); -+ -+ if (error < 0) { -+ dev_err(&dprc_dev->dev, "dprc_assign() failed: %d\n", error); -+ goto error_destroy_dpmcp; -+ } -+ -+ (void)dpmcp_close(dprc_dev->mc_io, -+ MC_CMD_FLAG_INTR_DIS, -+ dpmcp_handle); -+ return 0; -+ -+error_destroy_dpmcp: -+ (void)dpmcp_destroy(dprc_dev->mc_io, -+ MC_CMD_FLAG_INTR_DIS, -+ dpmcp_handle); -+ return error; -+} -+ -+/* -+ * Destroys the DPMCP for a DPRC's built-in MC portal -+ */ -+static void dprc_destroy_dpmcp(struct fsl_mc_device *dprc_dev) -+{ -+ int error; -+ uint16_t dpmcp_handle; -+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(dprc_dev); -+ -+ if (WARN_ON(!dprc_dev->mc_io || dprc_dev->mc_io->dpmcp_dev)) -+ return; -+ -+ error = dpmcp_open(dprc_dev->mc_io, -+ MC_CMD_FLAG_INTR_DIS, -+ mc_bus->dprc_attr.portal_id, -+ &dpmcp_handle); -+ if (error < 0) { -+ dev_err(&dprc_dev->dev, "dpmcp_open() failed: %d\n", -+ error); -+ return; -+ } -+ -+ error = dpmcp_destroy(dprc_dev->mc_io, -+ MC_CMD_FLAG_INTR_DIS, -+ dpmcp_handle); -+ if (error < 0) { -+ dev_err(&dprc_dev->dev, "dpmcp_destroy() failed: %d\n", -+ error); -+ return; -+ } -+} -+ -+/** -+ * dprc_probe - callback invoked when a DPRC is being bound to this driver -+ * -+ * @mc_dev: Pointer to fsl-mc device representing a DPRC -+ * -+ * It opens the physical DPRC in the MC. -+ * It scans the DPRC to discover the MC objects contained in it. -+ * It creates the interrupt pool for the MC bus associated with the DPRC. -+ * It configures the interrupts for the DPRC device itself. -+ */ -+static int dprc_probe(struct fsl_mc_device *mc_dev) -+{ -+ int error; -+ size_t region_size; -+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev); -+ bool mc_io_created = false; -+ bool dev_root_set = false; -+ -+ if (WARN_ON(strcmp(mc_dev->obj_desc.type, "dprc") != 0)) -+ return -EINVAL; -+ -+ if (mc_dev->mc_io) { -+ /* -+ * This is the root DPRC -+ */ -+ if (WARN_ON(fsl_mc_bus_type.dev_root)) -+ return -EINVAL; -+ -+ fsl_mc_bus_type.dev_root = &mc_dev->dev; -+ dev_root_set = true; -+ } else { -+ /* -+ * This is a child DPRC -+ */ -+ if (WARN_ON(!fsl_mc_bus_type.dev_root)) -+ return -EINVAL; -+ -+ if (WARN_ON(mc_dev->obj_desc.region_count == 0)) -+ return -EINVAL; -+ -+ region_size = mc_dev->regions[0].end - -+ mc_dev->regions[0].start + 1; -+ -+ error = fsl_create_mc_io(&mc_dev->dev, -+ mc_dev->regions[0].start, -+ region_size, -+ NULL, 0, &mc_dev->mc_io); -+ if (error < 0) -+ return error; -+ -+ mc_io_created = true; -+ } -+ -+ error = dprc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id, -+ &mc_dev->mc_handle); -+ if (error < 0) { -+ dev_err(&mc_dev->dev, "dprc_open() failed: %d\n", error); -+ goto error_cleanup_mc_io; -+ } -+ -+ error = dprc_get_attributes(mc_dev->mc_io, 0, mc_dev->mc_handle, -+ &mc_bus->dprc_attr); -+ if (error < 0) { -+ dev_err(&mc_dev->dev, "dprc_get_attributes() failed: %d\n", -+ error); -+ goto error_cleanup_open; -+ } -+ -+ if (mc_bus->dprc_attr.version.major < DPRC_MIN_VER_MAJOR || -+ (mc_bus->dprc_attr.version.major == DPRC_MIN_VER_MAJOR && -+ mc_bus->dprc_attr.version.minor < DPRC_MIN_VER_MINOR)) { -+ dev_err(&mc_dev->dev, -+ "ERROR: DPRC version %d.%d not supported\n", -+ mc_bus->dprc_attr.version.major, -+ mc_bus->dprc_attr.version.minor); -+ error = -ENOTSUPP; -+ goto error_cleanup_open; -+ } -+ -+ if (fsl_mc_interrupts_supported()) { -+ /* -+ * Create DPMCP for the DPRC's built-in portal: -+ */ -+ error = dprc_create_dpmcp(mc_dev); -+ if (error < 0) -+ goto error_cleanup_open; -+ } -+ -+ mutex_init(&mc_bus->scan_mutex); -+ -+ /* -+ * Discover MC objects in the DPRC object: -+ */ -+ error = dprc_scan_container(mc_dev); -+ if (error < 0) -+ goto error_destroy_dpmcp; -+ -+ if (fsl_mc_interrupts_supported()) { -+ /* -+ * The fsl_mc_device object associated with the DPMCP object -+ * created above was created as part of the -+ * dprc_scan_container() call above: -+ */ -+ if (WARN_ON(!mc_dev->mc_io->dpmcp_dev)) { -+ error = -EINVAL; -+ goto error_cleanup_dprc_scan; -+ } -+ -+ /* -+ * Allocate MC portal to be used in atomic context -+ * (e.g., to program MSIs from program_msi_at_mc()) -+ */ -+ error = fsl_mc_portal_allocate(NULL, -+ FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, -+ &mc_bus->atomic_mc_io); -+ if (error < 0) -+ goto error_cleanup_dprc_scan; -+ -+ pr_info("fsl-mc: Allocated dpmcp.%d to dprc.%d for atomic MC I/O\n", -+ mc_bus->atomic_mc_io->dpmcp_dev->obj_desc.id, -+ mc_dev->obj_desc.id); -+ -+ /* -+ * Open DPRC handle to be used with mc_bus->atomic_mc_io: -+ */ -+ error = dprc_open(mc_bus->atomic_mc_io, 0, mc_dev->obj_desc.id, -+ &mc_bus->atomic_dprc_handle); -+ if (error < 0) { -+ dev_err(&mc_dev->dev, "dprc_open() failed: %d\n", -+ error); -+ goto error_cleanup_atomic_mc_io; -+ } -+ -+ /* -+ * Configure interrupt for the DPMCP object associated with the -+ * DPRC object's built-in portal: -+ * -+ * NOTE: We have to do this after calling dprc_scan_container(), -+ * since dprc_scan_container() populates the IRQ pool for -+ * this DPRC. -+ */ -+ error = fsl_mc_io_setup_dpmcp_irq(mc_dev->mc_io); -+ if (error < 0) -+ goto error_cleanup_atomic_dprc_handle; -+ -+ /* -+ * Configure interrupts for the DPRC object associated with -+ * this MC bus: -+ */ -+ error = dprc_setup_irqs(mc_dev); -+ if (error < 0) -+ goto error_cleanup_atomic_dprc_handle; -+ } -+ -+ dev_info(&mc_dev->dev, "DPRC device bound to driver"); -+ return 0; -+ -+error_cleanup_atomic_dprc_handle: -+ (void)dprc_close(mc_bus->atomic_mc_io, 0, mc_bus->atomic_dprc_handle); -+ -+error_cleanup_atomic_mc_io: -+ fsl_mc_portal_free(mc_bus->atomic_mc_io); -+ -+error_cleanup_dprc_scan: -+ fsl_mc_io_unset_dpmcp(mc_dev->mc_io); -+ device_for_each_child(&mc_dev->dev, NULL, __fsl_mc_device_remove); -+ dprc_cleanup_all_resource_pools(mc_dev); -+ if (fsl_mc_interrupts_supported()) -+ fsl_mc_cleanup_irq_pool(mc_bus); -+ -+error_destroy_dpmcp: -+ dprc_destroy_dpmcp(mc_dev); -+ -+error_cleanup_open: -+ (void)dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle); -+ -+error_cleanup_mc_io: -+ if (mc_io_created) { -+ fsl_destroy_mc_io(mc_dev->mc_io); -+ mc_dev->mc_io = NULL; -+ } -+ -+ if (dev_root_set) -+ fsl_mc_bus_type.dev_root = NULL; -+ -+ return error; -+} -+ -+/* -+ * Tear down interrupts for a given DPRC object -+ */ -+static void dprc_teardown_irqs(struct fsl_mc_device *mc_dev) -+{ -+ (void)disable_dprc_irqs(mc_dev); -+ unregister_dprc_irq_handlers(mc_dev); -+ fsl_mc_free_irqs(mc_dev); -+} -+ -+/** -+ * dprc_remove - callback invoked when a DPRC is being unbound from this driver -+ * -+ * @mc_dev: Pointer to fsl-mc device representing the DPRC -+ * -+ * It removes the DPRC's child objects from Linux (not from the MC) and -+ * closes the DPRC device in the MC. -+ * It tears down the interrupts that were configured for the DPRC device. -+ * It destroys the interrupt pool associated with this MC bus. -+ */ -+static int dprc_remove(struct fsl_mc_device *mc_dev) -+{ -+ int error; -+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev); -+ -+ if (WARN_ON(strcmp(mc_dev->obj_desc.type, "dprc") != 0)) -+ return -EINVAL; -+ if (WARN_ON(!mc_dev->mc_io)) -+ return -EINVAL; -+ -+ if (WARN_ON(!mc_bus->irq_resources)) -+ return -EINVAL; -+ -+ if (fsl_mc_interrupts_supported()) { -+ dprc_teardown_irqs(mc_dev); -+ error = dprc_close(mc_bus->atomic_mc_io, 0, -+ mc_bus->atomic_dprc_handle); -+ if (error < 0) { -+ dev_err(&mc_dev->dev, "dprc_close() failed: %d\n", -+ error); -+ } -+ -+ fsl_mc_portal_free(mc_bus->atomic_mc_io); -+ } -+ -+ fsl_mc_io_unset_dpmcp(mc_dev->mc_io); -+ device_for_each_child(&mc_dev->dev, NULL, __fsl_mc_device_remove); -+ dprc_cleanup_all_resource_pools(mc_dev); -+ dprc_destroy_dpmcp(mc_dev); -+ error = dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle); -+ if (error < 0) -+ dev_err(&mc_dev->dev, "dprc_close() failed: %d\n", error); -+ -+ if (fsl_mc_interrupts_supported()) -+ fsl_mc_cleanup_irq_pool(mc_bus); -+ -+ fsl_destroy_mc_io(mc_dev->mc_io); -+ mc_dev->mc_io = NULL; -+ -+ if (&mc_dev->dev == fsl_mc_bus_type.dev_root) -+ fsl_mc_bus_type.dev_root = NULL; -+ -+ dev_info(&mc_dev->dev, "DPRC device unbound from driver"); -+ return 0; -+} -+ -+static const struct fsl_mc_device_match_id match_id_table[] = { -+ { -+ .vendor = FSL_MC_VENDOR_FREESCALE, -+ .obj_type = "dprc"}, -+ {.vendor = 0x0}, -+}; -+ -+static struct fsl_mc_driver dprc_driver = { -+ .driver = { -+ .name = FSL_MC_DPRC_DRIVER_NAME, -+ .owner = THIS_MODULE, -+ .pm = NULL, -+ }, -+ .match_id_table = match_id_table, -+ .probe = dprc_probe, -+ .remove = dprc_remove, -+}; -+ -+int __init dprc_driver_init(void) -+{ -+ return fsl_mc_driver_register(&dprc_driver); -+} -+ -+void dprc_driver_exit(void) -+{ -+ fsl_mc_driver_unregister(&dprc_driver); -+} -diff --git a/drivers/staging/fsl-mc/bus/dprc.c b/drivers/staging/fsl-mc/bus/dprc.c -new file mode 100644 -index 0000000..4d86438 ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dprc.c -@@ -0,0 +1,1218 @@ -+/* Copyright 2013-2014 Freescale Semiconductor Inc. -+* -+* Redistribution and use in source and binary forms, with or without -+* modification, are permitted provided that the following conditions are met: -+* * Redistributions of source code must retain the above copyright -+* notice, this list of conditions and the following disclaimer. -+* * Redistributions in binary form must reproduce the above copyright -+* notice, this list of conditions and the following disclaimer in the -+* documentation and/or other materials provided with the distribution. -+* * Neither the name of the above-listed copyright holders nor the -+* names of any contributors may be used to endorse or promote products -+* derived from this software without specific prior written permission. -+* -+* -+* ALTERNATIVELY, this software may be distributed under the terms of the -+* GNU General Public License ("GPL") as published by the Free Software -+* Foundation, either version 2 of that License or (at your option) any -+* later version. -+* -+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+* POSSIBILITY OF SUCH DAMAGE. -+*/ -+#include "../include/mc-sys.h" -+#include "../include/mc-cmd.h" -+#include "../include/dprc.h" -+#include "dprc-cmd.h" -+ -+int dprc_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int container_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_OPEN, cmd_flags, -+ 0); -+ cmd.params[0] |= mc_enc(0, 32, container_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+EXPORT_SYMBOL(dprc_open); -+ -+int dprc_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CLOSE, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dprc_close); -+ -+int dprc_create_container(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dprc_cfg *cfg, -+ int *child_container_id, -+ uint64_t *child_portal_offset) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.params[0] |= mc_enc(32, 16, cfg->icid); -+ cmd.params[0] |= mc_enc(0, 32, cfg->options); -+ cmd.params[1] |= mc_enc(32, 32, cfg->portal_id); -+ cmd.params[2] |= mc_enc(0, 8, cfg->label[0]); -+ cmd.params[2] |= mc_enc(8, 8, cfg->label[1]); -+ cmd.params[2] |= mc_enc(16, 8, cfg->label[2]); -+ cmd.params[2] |= mc_enc(24, 8, cfg->label[3]); -+ cmd.params[2] |= mc_enc(32, 8, cfg->label[4]); -+ cmd.params[2] |= mc_enc(40, 8, cfg->label[5]); -+ cmd.params[2] |= mc_enc(48, 8, cfg->label[6]); -+ cmd.params[2] |= mc_enc(56, 8, cfg->label[7]); -+ cmd.params[3] |= mc_enc(0, 8, cfg->label[8]); -+ cmd.params[3] |= mc_enc(8, 8, cfg->label[9]); -+ cmd.params[3] |= mc_enc(16, 8, cfg->label[10]); -+ cmd.params[3] |= mc_enc(24, 8, cfg->label[11]); -+ cmd.params[3] |= mc_enc(32, 8, cfg->label[12]); -+ cmd.params[3] |= mc_enc(40, 8, cfg->label[13]); -+ cmd.params[3] |= mc_enc(48, 8, cfg->label[14]); -+ cmd.params[3] |= mc_enc(56, 8, cfg->label[15]); -+ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CREATE_CONT, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *child_container_id = mc_dec(cmd.params[1], 0, 32); -+ *child_portal_offset = mc_dec(cmd.params[2], 0, 64); -+ -+ return 0; -+} -+EXPORT_SYMBOL(dprc_create_container); -+ -+int dprc_destroy_container(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int child_container_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_DESTROY_CONT, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(0, 32, child_container_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dprc_destroy_container); -+ -+int dprc_reset_container(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int child_container_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_RESET_CONT, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(0, 32, child_container_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dprc_reset_container); -+ -+int dprc_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dprc_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ irq_cfg->val = mc_dec(cmd.params[0], 0, 32); -+ irq_cfg->paddr = mc_dec(cmd.params[1], 0, 64); -+ irq_cfg->irq_num = mc_dec(cmd.params[2], 0, 32); -+ *type = mc_dec(cmd.params[2], 32, 32); -+ -+ return 0; -+} -+EXPORT_SYMBOL(dprc_get_irq); -+ -+int dprc_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dprc_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ cmd.params[0] |= mc_enc(0, 32, irq_cfg->val); -+ cmd.params[1] |= mc_enc(0, 64, irq_cfg->paddr); -+ cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dprc_set_irq); -+ -+int dprc_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *en = mc_dec(cmd.params[0], 0, 8); -+ -+ return 0; -+} -+EXPORT_SYMBOL(dprc_get_irq_enable); -+ -+int dprc_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(0, 8, en); -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dprc_set_irq_enable); -+ -+int dprc_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *mask = mc_dec(cmd.params[0], 0, 32); -+ -+ return 0; -+} -+EXPORT_SYMBOL(dprc_get_irq_mask); -+ -+int dprc_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(0, 32, mask); -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dprc_set_irq_mask); -+ -+int dprc_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(0, 32, *status); -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *status = mc_dec(cmd.params[0], 0, 32); -+ -+ return 0; -+} -+EXPORT_SYMBOL(dprc_get_irq_status); -+ -+int dprc_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(0, 32, status); -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dprc_clear_irq_status); -+ -+int dprc_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dprc_attributes *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ attr->container_id = mc_dec(cmd.params[0], 0, 32); -+ attr->icid = mc_dec(cmd.params[0], 32, 16); -+ attr->options = mc_dec(cmd.params[1], 0, 32); -+ attr->portal_id = mc_dec(cmd.params[1], 32, 32); -+ attr->version.major = mc_dec(cmd.params[2], 0, 16); -+ attr->version.minor = mc_dec(cmd.params[2], 16, 16); -+ -+ return 0; -+} -+EXPORT_SYMBOL(dprc_get_attributes); -+ -+int dprc_set_res_quota(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int child_container_id, -+ char *type, -+ uint16_t quota) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_RES_QUOTA, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(0, 32, child_container_id); -+ cmd.params[0] |= mc_enc(32, 16, quota); -+ cmd.params[1] |= mc_enc(0, 8, type[0]); -+ cmd.params[1] |= mc_enc(8, 8, type[1]); -+ cmd.params[1] |= mc_enc(16, 8, type[2]); -+ cmd.params[1] |= mc_enc(24, 8, type[3]); -+ cmd.params[1] |= mc_enc(32, 8, type[4]); -+ cmd.params[1] |= mc_enc(40, 8, type[5]); -+ cmd.params[1] |= mc_enc(48, 8, type[6]); -+ cmd.params[1] |= mc_enc(56, 8, type[7]); -+ cmd.params[2] |= mc_enc(0, 8, type[8]); -+ cmd.params[2] |= mc_enc(8, 8, type[9]); -+ cmd.params[2] |= mc_enc(16, 8, type[10]); -+ cmd.params[2] |= mc_enc(24, 8, type[11]); -+ cmd.params[2] |= mc_enc(32, 8, type[12]); -+ cmd.params[2] |= mc_enc(40, 8, type[13]); -+ cmd.params[2] |= mc_enc(48, 8, type[14]); -+ cmd.params[2] |= mc_enc(56, 8, '\0'); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dprc_set_res_quota); -+ -+int dprc_get_res_quota(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int child_container_id, -+ char *type, -+ uint16_t *quota) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_QUOTA, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(0, 32, child_container_id); -+ cmd.params[1] |= mc_enc(0, 8, type[0]); -+ cmd.params[1] |= mc_enc(8, 8, type[1]); -+ cmd.params[1] |= mc_enc(16, 8, type[2]); -+ cmd.params[1] |= mc_enc(24, 8, type[3]); -+ cmd.params[1] |= mc_enc(32, 8, type[4]); -+ cmd.params[1] |= mc_enc(40, 8, type[5]); -+ cmd.params[1] |= mc_enc(48, 8, type[6]); -+ cmd.params[1] |= mc_enc(56, 8, type[7]); -+ cmd.params[2] |= mc_enc(0, 8, type[8]); -+ cmd.params[2] |= mc_enc(8, 8, type[9]); -+ cmd.params[2] |= mc_enc(16, 8, type[10]); -+ cmd.params[2] |= mc_enc(24, 8, type[11]); -+ cmd.params[2] |= mc_enc(32, 8, type[12]); -+ cmd.params[2] |= mc_enc(40, 8, type[13]); -+ cmd.params[2] |= mc_enc(48, 8, type[14]); -+ cmd.params[2] |= mc_enc(56, 8, '\0'); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *quota = mc_dec(cmd.params[0], 32, 16); -+ -+ return 0; -+} -+EXPORT_SYMBOL(dprc_get_res_quota); -+ -+int dprc_assign(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int container_id, -+ struct dprc_res_req *res_req) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_ASSIGN, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(0, 32, container_id); -+ cmd.params[0] |= mc_enc(32, 32, res_req->options); -+ cmd.params[1] |= mc_enc(0, 32, res_req->num); -+ cmd.params[1] |= mc_enc(32, 32, res_req->id_base_align); -+ cmd.params[2] |= mc_enc(0, 8, res_req->type[0]); -+ cmd.params[2] |= mc_enc(8, 8, res_req->type[1]); -+ cmd.params[2] |= mc_enc(16, 8, res_req->type[2]); -+ cmd.params[2] |= mc_enc(24, 8, res_req->type[3]); -+ cmd.params[2] |= mc_enc(32, 8, res_req->type[4]); -+ cmd.params[2] |= mc_enc(40, 8, res_req->type[5]); -+ cmd.params[2] |= mc_enc(48, 8, res_req->type[6]); -+ cmd.params[2] |= mc_enc(56, 8, res_req->type[7]); -+ cmd.params[3] |= mc_enc(0, 8, res_req->type[8]); -+ cmd.params[3] |= mc_enc(8, 8, res_req->type[9]); -+ cmd.params[3] |= mc_enc(16, 8, res_req->type[10]); -+ cmd.params[3] |= mc_enc(24, 8, res_req->type[11]); -+ cmd.params[3] |= mc_enc(32, 8, res_req->type[12]); -+ cmd.params[3] |= mc_enc(40, 8, res_req->type[13]); -+ cmd.params[3] |= mc_enc(48, 8, res_req->type[14]); -+ cmd.params[3] |= mc_enc(56, 8, res_req->type[15]); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dprc_assign); -+ -+int dprc_unassign(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int child_container_id, -+ struct dprc_res_req *res_req) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_UNASSIGN, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(0, 32, child_container_id); -+ cmd.params[0] |= mc_enc(32, 32, res_req->options); -+ cmd.params[1] |= mc_enc(0, 32, res_req->num); -+ cmd.params[1] |= mc_enc(32, 32, res_req->id_base_align); -+ cmd.params[2] |= mc_enc(0, 8, res_req->type[0]); -+ cmd.params[2] |= mc_enc(8, 8, res_req->type[1]); -+ cmd.params[2] |= mc_enc(16, 8, res_req->type[2]); -+ cmd.params[2] |= mc_enc(24, 8, res_req->type[3]); -+ cmd.params[2] |= mc_enc(32, 8, res_req->type[4]); -+ cmd.params[2] |= mc_enc(40, 8, res_req->type[5]); -+ cmd.params[2] |= mc_enc(48, 8, res_req->type[6]); -+ cmd.params[2] |= mc_enc(56, 8, res_req->type[7]); -+ cmd.params[3] |= mc_enc(0, 8, res_req->type[8]); -+ cmd.params[3] |= mc_enc(8, 8, res_req->type[9]); -+ cmd.params[3] |= mc_enc(16, 8, res_req->type[10]); -+ cmd.params[3] |= mc_enc(24, 8, res_req->type[11]); -+ cmd.params[3] |= mc_enc(32, 8, res_req->type[12]); -+ cmd.params[3] |= mc_enc(40, 8, res_req->type[13]); -+ cmd.params[3] |= mc_enc(48, 8, res_req->type[14]); -+ cmd.params[3] |= mc_enc(56, 8, res_req->type[15]); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dprc_unassign); -+ -+int dprc_get_pool_count(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *pool_count) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_POOL_COUNT, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *pool_count = mc_dec(cmd.params[0], 0, 32); -+ -+ return 0; -+} -+EXPORT_SYMBOL(dprc_get_pool_count); -+ -+int dprc_get_pool(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int pool_index, -+ char *type) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_POOL, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(0, 32, pool_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ type[0] = mc_dec(cmd.params[1], 0, 8); -+ type[1] = mc_dec(cmd.params[1], 8, 8); -+ type[2] = mc_dec(cmd.params[1], 16, 8); -+ type[3] = mc_dec(cmd.params[1], 24, 8); -+ type[4] = mc_dec(cmd.params[1], 32, 8); -+ type[5] = mc_dec(cmd.params[1], 40, 8); -+ type[6] = mc_dec(cmd.params[1], 48, 8); -+ type[7] = mc_dec(cmd.params[1], 56, 8); -+ type[8] = mc_dec(cmd.params[2], 0, 8); -+ type[9] = mc_dec(cmd.params[2], 8, 8); -+ type[10] = mc_dec(cmd.params[2], 16, 8); -+ type[11] = mc_dec(cmd.params[2], 24, 8); -+ type[12] = mc_dec(cmd.params[2], 32, 8); -+ type[13] = mc_dec(cmd.params[2], 40, 8); -+ type[14] = mc_dec(cmd.params[2], 48, 8); -+ type[15] = '\0'; -+ -+ return 0; -+} -+EXPORT_SYMBOL(dprc_get_pool); -+ -+int dprc_get_obj_count(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *obj_count) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_COUNT, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *obj_count = mc_dec(cmd.params[0], 32, 32); -+ -+ return 0; -+} -+EXPORT_SYMBOL(dprc_get_obj_count); -+ -+int dprc_get_obj(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int obj_index, -+ struct dprc_obj_desc *obj_desc) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(0, 32, obj_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ obj_desc->id = mc_dec(cmd.params[0], 32, 32); -+ obj_desc->vendor = mc_dec(cmd.params[1], 0, 16); -+ obj_desc->irq_count = mc_dec(cmd.params[1], 16, 8); -+ obj_desc->region_count = mc_dec(cmd.params[1], 24, 8); -+ obj_desc->state = mc_dec(cmd.params[1], 32, 32); -+ obj_desc->ver_major = mc_dec(cmd.params[2], 0, 16); -+ obj_desc->ver_minor = mc_dec(cmd.params[2], 16, 16); -+ obj_desc->flags = mc_dec(cmd.params[2], 32, 16); -+ obj_desc->type[0] = mc_dec(cmd.params[3], 0, 8); -+ obj_desc->type[1] = mc_dec(cmd.params[3], 8, 8); -+ obj_desc->type[2] = mc_dec(cmd.params[3], 16, 8); -+ obj_desc->type[3] = mc_dec(cmd.params[3], 24, 8); -+ obj_desc->type[4] = mc_dec(cmd.params[3], 32, 8); -+ obj_desc->type[5] = mc_dec(cmd.params[3], 40, 8); -+ obj_desc->type[6] = mc_dec(cmd.params[3], 48, 8); -+ obj_desc->type[7] = mc_dec(cmd.params[3], 56, 8); -+ obj_desc->type[8] = mc_dec(cmd.params[4], 0, 8); -+ obj_desc->type[9] = mc_dec(cmd.params[4], 8, 8); -+ obj_desc->type[10] = mc_dec(cmd.params[4], 16, 8); -+ obj_desc->type[11] = mc_dec(cmd.params[4], 24, 8); -+ obj_desc->type[12] = mc_dec(cmd.params[4], 32, 8); -+ obj_desc->type[13] = mc_dec(cmd.params[4], 40, 8); -+ obj_desc->type[14] = mc_dec(cmd.params[4], 48, 8); -+ obj_desc->type[15] = '\0'; -+ obj_desc->label[0] = mc_dec(cmd.params[5], 0, 8); -+ obj_desc->label[1] = mc_dec(cmd.params[5], 8, 8); -+ obj_desc->label[2] = mc_dec(cmd.params[5], 16, 8); -+ obj_desc->label[3] = mc_dec(cmd.params[5], 24, 8); -+ obj_desc->label[4] = mc_dec(cmd.params[5], 32, 8); -+ obj_desc->label[5] = mc_dec(cmd.params[5], 40, 8); -+ obj_desc->label[6] = mc_dec(cmd.params[5], 48, 8); -+ obj_desc->label[7] = mc_dec(cmd.params[5], 56, 8); -+ obj_desc->label[8] = mc_dec(cmd.params[6], 0, 8); -+ obj_desc->label[9] = mc_dec(cmd.params[6], 8, 8); -+ obj_desc->label[10] = mc_dec(cmd.params[6], 16, 8); -+ obj_desc->label[11] = mc_dec(cmd.params[6], 24, 8); -+ obj_desc->label[12] = mc_dec(cmd.params[6], 32, 8); -+ obj_desc->label[13] = mc_dec(cmd.params[6], 40, 8); -+ obj_desc->label[14] = mc_dec(cmd.params[6], 48, 8); -+ obj_desc->label[15] = '\0'; -+ return 0; -+} -+EXPORT_SYMBOL(dprc_get_obj); -+ -+int dprc_get_obj_desc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *obj_type, -+ int obj_id, -+ struct dprc_obj_desc *obj_desc) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_DESC, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(0, 32, obj_id); -+ cmd.params[1] |= mc_enc(0, 8, obj_type[0]); -+ cmd.params[1] |= mc_enc(8, 8, obj_type[1]); -+ cmd.params[1] |= mc_enc(16, 8, obj_type[2]); -+ cmd.params[1] |= mc_enc(24, 8, obj_type[3]); -+ cmd.params[1] |= mc_enc(32, 8, obj_type[4]); -+ cmd.params[1] |= mc_enc(40, 8, obj_type[5]); -+ cmd.params[1] |= mc_enc(48, 8, obj_type[6]); -+ cmd.params[1] |= mc_enc(56, 8, obj_type[7]); -+ cmd.params[2] |= mc_enc(0, 8, obj_type[8]); -+ cmd.params[2] |= mc_enc(8, 8, obj_type[9]); -+ cmd.params[2] |= mc_enc(16, 8, obj_type[10]); -+ cmd.params[2] |= mc_enc(24, 8, obj_type[11]); -+ cmd.params[2] |= mc_enc(32, 8, obj_type[12]); -+ cmd.params[2] |= mc_enc(40, 8, obj_type[13]); -+ cmd.params[2] |= mc_enc(48, 8, obj_type[14]); -+ cmd.params[2] |= mc_enc(56, 8, obj_type[15]); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ obj_desc->id = (int)mc_dec(cmd.params[0], 32, 32); -+ obj_desc->vendor = (uint16_t)mc_dec(cmd.params[1], 0, 16); -+ obj_desc->vendor = (uint8_t)mc_dec(cmd.params[1], 16, 8); -+ obj_desc->region_count = (uint8_t)mc_dec(cmd.params[1], 24, 8); -+ obj_desc->state = (uint32_t)mc_dec(cmd.params[1], 32, 32); -+ obj_desc->ver_major = (uint16_t)mc_dec(cmd.params[2], 0, 16); -+ obj_desc->ver_minor = (uint16_t)mc_dec(cmd.params[2], 16, 16); -+ obj_desc->flags = mc_dec(cmd.params[2], 32, 16); -+ obj_desc->type[0] = (char)mc_dec(cmd.params[3], 0, 8); -+ obj_desc->type[1] = (char)mc_dec(cmd.params[3], 8, 8); -+ obj_desc->type[2] = (char)mc_dec(cmd.params[3], 16, 8); -+ obj_desc->type[3] = (char)mc_dec(cmd.params[3], 24, 8); -+ obj_desc->type[4] = (char)mc_dec(cmd.params[3], 32, 8); -+ obj_desc->type[5] = (char)mc_dec(cmd.params[3], 40, 8); -+ obj_desc->type[6] = (char)mc_dec(cmd.params[3], 48, 8); -+ obj_desc->type[7] = (char)mc_dec(cmd.params[3], 56, 8); -+ obj_desc->type[8] = (char)mc_dec(cmd.params[4], 0, 8); -+ obj_desc->type[9] = (char)mc_dec(cmd.params[4], 8, 8); -+ obj_desc->type[10] = (char)mc_dec(cmd.params[4], 16, 8); -+ obj_desc->type[11] = (char)mc_dec(cmd.params[4], 24, 8); -+ obj_desc->type[12] = (char)mc_dec(cmd.params[4], 32, 8); -+ obj_desc->type[13] = (char)mc_dec(cmd.params[4], 40, 8); -+ obj_desc->type[14] = (char)mc_dec(cmd.params[4], 48, 8); -+ obj_desc->type[15] = (char)mc_dec(cmd.params[4], 56, 8); -+ obj_desc->label[0] = (char)mc_dec(cmd.params[5], 0, 8); -+ obj_desc->label[1] = (char)mc_dec(cmd.params[5], 8, 8); -+ obj_desc->label[2] = (char)mc_dec(cmd.params[5], 16, 8); -+ obj_desc->label[3] = (char)mc_dec(cmd.params[5], 24, 8); -+ obj_desc->label[4] = (char)mc_dec(cmd.params[5], 32, 8); -+ obj_desc->label[5] = (char)mc_dec(cmd.params[5], 40, 8); -+ obj_desc->label[6] = (char)mc_dec(cmd.params[5], 48, 8); -+ obj_desc->label[7] = (char)mc_dec(cmd.params[5], 56, 8); -+ obj_desc->label[8] = (char)mc_dec(cmd.params[6], 0, 8); -+ obj_desc->label[9] = (char)mc_dec(cmd.params[6], 8, 8); -+ obj_desc->label[10] = (char)mc_dec(cmd.params[6], 16, 8); -+ obj_desc->label[11] = (char)mc_dec(cmd.params[6], 24, 8); -+ obj_desc->label[12] = (char)mc_dec(cmd.params[6], 32, 8); -+ obj_desc->label[13] = (char)mc_dec(cmd.params[6], 40, 8); -+ obj_desc->label[14] = (char)mc_dec(cmd.params[6], 48, 8); -+ obj_desc->label[15] = (char)mc_dec(cmd.params[6], 56, 8); -+ -+ return 0; -+} -+EXPORT_SYMBOL(dprc_get_obj_desc); -+ -+int dprc_set_obj_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *obj_type, -+ int obj_id, -+ uint8_t irq_index, -+ struct dprc_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_OBJ_IRQ, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ cmd.params[0] |= mc_enc(0, 32, irq_cfg->val); -+ cmd.params[1] |= mc_enc(0, 64, irq_cfg->paddr); -+ cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num); -+ cmd.params[2] |= mc_enc(32, 32, obj_id); -+ cmd.params[3] |= mc_enc(0, 8, obj_type[0]); -+ cmd.params[3] |= mc_enc(8, 8, obj_type[1]); -+ cmd.params[3] |= mc_enc(16, 8, obj_type[2]); -+ cmd.params[3] |= mc_enc(24, 8, obj_type[3]); -+ cmd.params[3] |= mc_enc(32, 8, obj_type[4]); -+ cmd.params[3] |= mc_enc(40, 8, obj_type[5]); -+ cmd.params[3] |= mc_enc(48, 8, obj_type[6]); -+ cmd.params[3] |= mc_enc(56, 8, obj_type[7]); -+ cmd.params[4] |= mc_enc(0, 8, obj_type[8]); -+ cmd.params[4] |= mc_enc(8, 8, obj_type[9]); -+ cmd.params[4] |= mc_enc(16, 8, obj_type[10]); -+ cmd.params[4] |= mc_enc(24, 8, obj_type[11]); -+ cmd.params[4] |= mc_enc(32, 8, obj_type[12]); -+ cmd.params[4] |= mc_enc(40, 8, obj_type[13]); -+ cmd.params[4] |= mc_enc(48, 8, obj_type[14]); -+ cmd.params[4] |= mc_enc(56, 8, obj_type[15]); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dprc_set_obj_irq); -+ -+int dprc_get_obj_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *obj_type, -+ int obj_id, -+ uint8_t irq_index, -+ int *type, -+ struct dprc_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_IRQ, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(0, 32, obj_id); -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ cmd.params[1] |= mc_enc(0, 8, obj_type[0]); -+ cmd.params[1] |= mc_enc(8, 8, obj_type[1]); -+ cmd.params[1] |= mc_enc(16, 8, obj_type[2]); -+ cmd.params[1] |= mc_enc(24, 8, obj_type[3]); -+ cmd.params[1] |= mc_enc(32, 8, obj_type[4]); -+ cmd.params[1] |= mc_enc(40, 8, obj_type[5]); -+ cmd.params[1] |= mc_enc(48, 8, obj_type[6]); -+ cmd.params[1] |= mc_enc(56, 8, obj_type[7]); -+ cmd.params[2] |= mc_enc(0, 8, obj_type[8]); -+ cmd.params[2] |= mc_enc(8, 8, obj_type[9]); -+ cmd.params[2] |= mc_enc(16, 8, obj_type[10]); -+ cmd.params[2] |= mc_enc(24, 8, obj_type[11]); -+ cmd.params[2] |= mc_enc(32, 8, obj_type[12]); -+ cmd.params[2] |= mc_enc(40, 8, obj_type[13]); -+ cmd.params[2] |= mc_enc(48, 8, obj_type[14]); -+ cmd.params[2] |= mc_enc(56, 8, obj_type[15]); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ irq_cfg->val = (uint32_t)mc_dec(cmd.params[0], 0, 32); -+ irq_cfg->paddr = (uint64_t)mc_dec(cmd.params[1], 0, 64); -+ irq_cfg->irq_num = (int)mc_dec(cmd.params[2], 0, 32); -+ *type = (int)mc_dec(cmd.params[2], 32, 32); -+ -+ return 0; -+} -+EXPORT_SYMBOL(dprc_get_obj_irq); -+ -+int dprc_get_res_count(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *type, -+ int *res_count) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ *res_count = 0; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_COUNT, -+ cmd_flags, -+ token); -+ cmd.params[1] |= mc_enc(0, 8, type[0]); -+ cmd.params[1] |= mc_enc(8, 8, type[1]); -+ cmd.params[1] |= mc_enc(16, 8, type[2]); -+ cmd.params[1] |= mc_enc(24, 8, type[3]); -+ cmd.params[1] |= mc_enc(32, 8, type[4]); -+ cmd.params[1] |= mc_enc(40, 8, type[5]); -+ cmd.params[1] |= mc_enc(48, 8, type[6]); -+ cmd.params[1] |= mc_enc(56, 8, type[7]); -+ cmd.params[2] |= mc_enc(0, 8, type[8]); -+ cmd.params[2] |= mc_enc(8, 8, type[9]); -+ cmd.params[2] |= mc_enc(16, 8, type[10]); -+ cmd.params[2] |= mc_enc(24, 8, type[11]); -+ cmd.params[2] |= mc_enc(32, 8, type[12]); -+ cmd.params[2] |= mc_enc(40, 8, type[13]); -+ cmd.params[2] |= mc_enc(48, 8, type[14]); -+ cmd.params[2] |= mc_enc(56, 8, '\0'); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *res_count = mc_dec(cmd.params[0], 0, 32); -+ -+ return 0; -+} -+EXPORT_SYMBOL(dprc_get_res_count); -+ -+int dprc_get_res_ids(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *type, -+ struct dprc_res_ids_range_desc *range_desc) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_IDS, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(42, 7, range_desc->iter_status); -+ cmd.params[1] |= mc_enc(0, 32, range_desc->base_id); -+ cmd.params[1] |= mc_enc(32, 32, range_desc->last_id); -+ cmd.params[2] |= mc_enc(0, 8, type[0]); -+ cmd.params[2] |= mc_enc(8, 8, type[1]); -+ cmd.params[2] |= mc_enc(16, 8, type[2]); -+ cmd.params[2] |= mc_enc(24, 8, type[3]); -+ cmd.params[2] |= mc_enc(32, 8, type[4]); -+ cmd.params[2] |= mc_enc(40, 8, type[5]); -+ cmd.params[2] |= mc_enc(48, 8, type[6]); -+ cmd.params[2] |= mc_enc(56, 8, type[7]); -+ cmd.params[3] |= mc_enc(0, 8, type[8]); -+ cmd.params[3] |= mc_enc(8, 8, type[9]); -+ cmd.params[3] |= mc_enc(16, 8, type[10]); -+ cmd.params[3] |= mc_enc(24, 8, type[11]); -+ cmd.params[3] |= mc_enc(32, 8, type[12]); -+ cmd.params[3] |= mc_enc(40, 8, type[13]); -+ cmd.params[3] |= mc_enc(48, 8, type[14]); -+ cmd.params[3] |= mc_enc(56, 8, '\0'); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ range_desc->iter_status = mc_dec(cmd.params[0], 42, 7); -+ range_desc->base_id = mc_dec(cmd.params[1], 0, 32); -+ range_desc->last_id = mc_dec(cmd.params[1], 32, 32); -+ -+ return 0; -+} -+EXPORT_SYMBOL(dprc_get_res_ids); -+ -+int dprc_get_obj_region(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *obj_type, -+ int obj_id, -+ uint8_t region_index, -+ struct dprc_region_desc *region_desc) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(0, 32, obj_id); -+ cmd.params[0] |= mc_enc(48, 8, region_index); -+ cmd.params[3] |= mc_enc(0, 8, obj_type[0]); -+ cmd.params[3] |= mc_enc(8, 8, obj_type[1]); -+ cmd.params[3] |= mc_enc(16, 8, obj_type[2]); -+ cmd.params[3] |= mc_enc(24, 8, obj_type[3]); -+ cmd.params[3] |= mc_enc(32, 8, obj_type[4]); -+ cmd.params[3] |= mc_enc(40, 8, obj_type[5]); -+ cmd.params[3] |= mc_enc(48, 8, obj_type[6]); -+ cmd.params[3] |= mc_enc(56, 8, obj_type[7]); -+ cmd.params[4] |= mc_enc(0, 8, obj_type[8]); -+ cmd.params[4] |= mc_enc(8, 8, obj_type[9]); -+ cmd.params[4] |= mc_enc(16, 8, obj_type[10]); -+ cmd.params[4] |= mc_enc(24, 8, obj_type[11]); -+ cmd.params[4] |= mc_enc(32, 8, obj_type[12]); -+ cmd.params[4] |= mc_enc(40, 8, obj_type[13]); -+ cmd.params[4] |= mc_enc(48, 8, obj_type[14]); -+ cmd.params[4] |= mc_enc(56, 8, '\0'); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ region_desc->base_offset = mc_dec(cmd.params[1], 0, 64); -+ region_desc->size = mc_dec(cmd.params[2], 0, 32); -+ -+ return 0; -+} -+EXPORT_SYMBOL(dprc_get_obj_region); -+ -+int dprc_set_obj_label(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *obj_type, -+ int obj_id, -+ char *label) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_OBJ_LABEL, -+ cmd_flags, -+ token); -+ -+ cmd.params[0] |= mc_enc(0, 32, obj_id); -+ cmd.params[1] |= mc_enc(0, 8, label[0]); -+ cmd.params[1] |= mc_enc(8, 8, label[1]); -+ cmd.params[1] |= mc_enc(16, 8, label[2]); -+ cmd.params[1] |= mc_enc(24, 8, label[3]); -+ cmd.params[1] |= mc_enc(32, 8, label[4]); -+ cmd.params[1] |= mc_enc(40, 8, label[5]); -+ cmd.params[1] |= mc_enc(48, 8, label[6]); -+ cmd.params[1] |= mc_enc(56, 8, label[7]); -+ cmd.params[2] |= mc_enc(0, 8, label[8]); -+ cmd.params[2] |= mc_enc(8, 8, label[9]); -+ cmd.params[2] |= mc_enc(16, 8, label[10]); -+ cmd.params[2] |= mc_enc(24, 8, label[11]); -+ cmd.params[2] |= mc_enc(32, 8, label[12]); -+ cmd.params[2] |= mc_enc(40, 8, label[13]); -+ cmd.params[2] |= mc_enc(48, 8, label[14]); -+ cmd.params[2] |= mc_enc(56, 8, label[15]); -+ cmd.params[3] |= mc_enc(0, 8, obj_type[0]); -+ cmd.params[3] |= mc_enc(8, 8, obj_type[1]); -+ cmd.params[3] |= mc_enc(16, 8, obj_type[2]); -+ cmd.params[3] |= mc_enc(24, 8, obj_type[3]); -+ cmd.params[3] |= mc_enc(32, 8, obj_type[4]); -+ cmd.params[3] |= mc_enc(40, 8, obj_type[5]); -+ cmd.params[3] |= mc_enc(48, 8, obj_type[6]); -+ cmd.params[3] |= mc_enc(56, 8, obj_type[7]); -+ cmd.params[4] |= mc_enc(0, 8, obj_type[8]); -+ cmd.params[4] |= mc_enc(8, 8, obj_type[9]); -+ cmd.params[4] |= mc_enc(16, 8, obj_type[10]); -+ cmd.params[4] |= mc_enc(24, 8, obj_type[11]); -+ cmd.params[4] |= mc_enc(32, 8, obj_type[12]); -+ cmd.params[4] |= mc_enc(40, 8, obj_type[13]); -+ cmd.params[4] |= mc_enc(48, 8, obj_type[14]); -+ cmd.params[4] |= mc_enc(56, 8, obj_type[15]); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dprc_set_obj_label); -+ -+int dprc_connect(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dprc_endpoint *endpoint1, -+ const struct dprc_endpoint *endpoint2, -+ const struct dprc_connection_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CONNECT, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(0, 32, endpoint1->id); -+ cmd.params[0] |= mc_enc(32, 32, endpoint1->if_id); -+ cmd.params[1] |= mc_enc(0, 32, endpoint2->id); -+ cmd.params[1] |= mc_enc(32, 32, endpoint2->if_id); -+ cmd.params[2] |= mc_enc(0, 8, endpoint1->type[0]); -+ cmd.params[2] |= mc_enc(8, 8, endpoint1->type[1]); -+ cmd.params[2] |= mc_enc(16, 8, endpoint1->type[2]); -+ cmd.params[2] |= mc_enc(24, 8, endpoint1->type[3]); -+ cmd.params[2] |= mc_enc(32, 8, endpoint1->type[4]); -+ cmd.params[2] |= mc_enc(40, 8, endpoint1->type[5]); -+ cmd.params[2] |= mc_enc(48, 8, endpoint1->type[6]); -+ cmd.params[2] |= mc_enc(56, 8, endpoint1->type[7]); -+ cmd.params[3] |= mc_enc(0, 8, endpoint1->type[8]); -+ cmd.params[3] |= mc_enc(8, 8, endpoint1->type[9]); -+ cmd.params[3] |= mc_enc(16, 8, endpoint1->type[10]); -+ cmd.params[3] |= mc_enc(24, 8, endpoint1->type[11]); -+ cmd.params[3] |= mc_enc(32, 8, endpoint1->type[12]); -+ cmd.params[3] |= mc_enc(40, 8, endpoint1->type[13]); -+ cmd.params[3] |= mc_enc(48, 8, endpoint1->type[14]); -+ cmd.params[3] |= mc_enc(56, 8, endpoint1->type[15]); -+ cmd.params[4] |= mc_enc(0, 32, cfg->max_rate); -+ cmd.params[4] |= mc_enc(32, 32, cfg->committed_rate); -+ cmd.params[5] |= mc_enc(0, 8, endpoint2->type[0]); -+ cmd.params[5] |= mc_enc(8, 8, endpoint2->type[1]); -+ cmd.params[5] |= mc_enc(16, 8, endpoint2->type[2]); -+ cmd.params[5] |= mc_enc(24, 8, endpoint2->type[3]); -+ cmd.params[5] |= mc_enc(32, 8, endpoint2->type[4]); -+ cmd.params[5] |= mc_enc(40, 8, endpoint2->type[5]); -+ cmd.params[5] |= mc_enc(48, 8, endpoint2->type[6]); -+ cmd.params[5] |= mc_enc(56, 8, endpoint2->type[7]); -+ cmd.params[6] |= mc_enc(0, 8, endpoint2->type[8]); -+ cmd.params[6] |= mc_enc(8, 8, endpoint2->type[9]); -+ cmd.params[6] |= mc_enc(16, 8, endpoint2->type[10]); -+ cmd.params[6] |= mc_enc(24, 8, endpoint2->type[11]); -+ cmd.params[6] |= mc_enc(32, 8, endpoint2->type[12]); -+ cmd.params[6] |= mc_enc(40, 8, endpoint2->type[13]); -+ cmd.params[6] |= mc_enc(48, 8, endpoint2->type[14]); -+ cmd.params[6] |= mc_enc(56, 8, endpoint2->type[15]); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dprc_connect); -+ -+int dprc_disconnect(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dprc_endpoint *endpoint) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_DISCONNECT, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(0, 32, endpoint->id); -+ cmd.params[0] |= mc_enc(32, 32, endpoint->if_id); -+ cmd.params[1] |= mc_enc(0, 8, endpoint->type[0]); -+ cmd.params[1] |= mc_enc(8, 8, endpoint->type[1]); -+ cmd.params[1] |= mc_enc(16, 8, endpoint->type[2]); -+ cmd.params[1] |= mc_enc(24, 8, endpoint->type[3]); -+ cmd.params[1] |= mc_enc(32, 8, endpoint->type[4]); -+ cmd.params[1] |= mc_enc(40, 8, endpoint->type[5]); -+ cmd.params[1] |= mc_enc(48, 8, endpoint->type[6]); -+ cmd.params[1] |= mc_enc(56, 8, endpoint->type[7]); -+ cmd.params[2] |= mc_enc(0, 8, endpoint->type[8]); -+ cmd.params[2] |= mc_enc(8, 8, endpoint->type[9]); -+ cmd.params[2] |= mc_enc(16, 8, endpoint->type[10]); -+ cmd.params[2] |= mc_enc(24, 8, endpoint->type[11]); -+ cmd.params[2] |= mc_enc(32, 8, endpoint->type[12]); -+ cmd.params[2] |= mc_enc(40, 8, endpoint->type[13]); -+ cmd.params[2] |= mc_enc(48, 8, endpoint->type[14]); -+ cmd.params[2] |= mc_enc(56, 8, endpoint->type[15]); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dprc_disconnect); -+ -+int dprc_get_connection(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dprc_endpoint *endpoint1, -+ struct dprc_endpoint *endpoint2, -+ int *state) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONNECTION, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(0, 32, endpoint1->id); -+ cmd.params[0] |= mc_enc(32, 32, endpoint1->if_id); -+ cmd.params[1] |= mc_enc(0, 8, endpoint1->type[0]); -+ cmd.params[1] |= mc_enc(8, 8, endpoint1->type[1]); -+ cmd.params[1] |= mc_enc(16, 8, endpoint1->type[2]); -+ cmd.params[1] |= mc_enc(24, 8, endpoint1->type[3]); -+ cmd.params[1] |= mc_enc(32, 8, endpoint1->type[4]); -+ cmd.params[1] |= mc_enc(40, 8, endpoint1->type[5]); -+ cmd.params[1] |= mc_enc(48, 8, endpoint1->type[6]); -+ cmd.params[1] |= mc_enc(56, 8, endpoint1->type[7]); -+ cmd.params[2] |= mc_enc(0, 8, endpoint1->type[8]); -+ cmd.params[2] |= mc_enc(8, 8, endpoint1->type[9]); -+ cmd.params[2] |= mc_enc(16, 8, endpoint1->type[10]); -+ cmd.params[2] |= mc_enc(24, 8, endpoint1->type[11]); -+ cmd.params[2] |= mc_enc(32, 8, endpoint1->type[12]); -+ cmd.params[2] |= mc_enc(40, 8, endpoint1->type[13]); -+ cmd.params[2] |= mc_enc(48, 8, endpoint1->type[14]); -+ cmd.params[2] |= mc_enc(56, 8, endpoint1->type[15]); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ endpoint2->id = mc_dec(cmd.params[3], 0, 32); -+ endpoint2->if_id = mc_dec(cmd.params[3], 32, 32); -+ endpoint2->type[0] = mc_dec(cmd.params[4], 0, 8); -+ endpoint2->type[1] = mc_dec(cmd.params[4], 8, 8); -+ endpoint2->type[2] = mc_dec(cmd.params[4], 16, 8); -+ endpoint2->type[3] = mc_dec(cmd.params[4], 24, 8); -+ endpoint2->type[4] = mc_dec(cmd.params[4], 32, 8); -+ endpoint2->type[5] = mc_dec(cmd.params[4], 40, 8); -+ endpoint2->type[6] = mc_dec(cmd.params[4], 48, 8); -+ endpoint2->type[7] = mc_dec(cmd.params[4], 56, 8); -+ endpoint2->type[8] = mc_dec(cmd.params[5], 0, 8); -+ endpoint2->type[9] = mc_dec(cmd.params[5], 8, 8); -+ endpoint2->type[10] = mc_dec(cmd.params[5], 16, 8); -+ endpoint2->type[11] = mc_dec(cmd.params[5], 24, 8); -+ endpoint2->type[12] = mc_dec(cmd.params[5], 32, 8); -+ endpoint2->type[13] = mc_dec(cmd.params[5], 40, 8); -+ endpoint2->type[14] = mc_dec(cmd.params[5], 48, 8); -+ endpoint2->type[15] = mc_dec(cmd.params[5], 56, 8); -+ *state = mc_dec(cmd.params[6], 0, 32); -+ -+ return 0; -+} -+EXPORT_SYMBOL(dprc_get_connection); -diff --git a/drivers/staging/fsl-mc/bus/mc-allocator.c b/drivers/staging/fsl-mc/bus/mc-allocator.c -new file mode 100644 -index 0000000..a3940a0 ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/mc-allocator.c -@@ -0,0 +1,716 @@ -+/* -+ * Freescale MC object device allocator driver -+ * -+ * Copyright (C) 2013 Freescale Semiconductor, Inc. -+ * -+ * This file is licensed under the terms of the GNU General Public -+ * License version 2. This program is licensed "as is" without any -+ * warranty of any kind, whether express or implied. -+ */ -+ -+#include "../include/mc-private.h" -+#include "../include/mc-sys.h" -+#include -+#include "../include/dpbp-cmd.h" -+#include "../include/dpcon-cmd.h" -+#include "dpmcp-cmd.h" -+#include "dpmcp.h" -+ -+/** -+ * fsl_mc_resource_pool_add_device - add allocatable device to a resource -+ * pool of a given MC bus -+ * -+ * @mc_bus: pointer to the MC bus -+ * @pool_type: MC bus pool type -+ * @mc_dev: Pointer to allocatable MC object device -+ * -+ * It adds an allocatable MC object device to a container's resource pool of -+ * the given resource type -+ */ -+static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus -+ *mc_bus, -+ enum fsl_mc_pool_type -+ pool_type, -+ struct fsl_mc_device -+ *mc_dev) -+{ -+ struct fsl_mc_resource_pool *res_pool; -+ struct fsl_mc_resource *resource; -+ struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev; -+ int error = -EINVAL; -+ bool mutex_locked = false; -+ -+ if (WARN_ON(pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES)) -+ goto out; -+ if (WARN_ON(!FSL_MC_IS_ALLOCATABLE(mc_dev->obj_desc.type))) -+ goto out; -+ if (WARN_ON(mc_dev->resource)) -+ goto out; -+ -+ res_pool = &mc_bus->resource_pools[pool_type]; -+ if (WARN_ON(res_pool->type != pool_type)) -+ goto out; -+ if (WARN_ON(res_pool->mc_bus != mc_bus)) -+ goto out; -+ -+ mutex_lock(&res_pool->mutex); -+ mutex_locked = true; -+ -+ if (WARN_ON(res_pool->max_count < 0)) -+ goto out; -+ if (WARN_ON(res_pool->free_count < 0 || -+ res_pool->free_count > res_pool->max_count)) -+ goto out; -+ -+ resource = devm_kzalloc(&mc_bus_dev->dev, sizeof(*resource), -+ GFP_KERNEL); -+ if (!resource) { -+ error = -ENOMEM; -+ dev_err(&mc_bus_dev->dev, -+ "Failed to allocate memory for fsl_mc_resource\n"); -+ goto out; -+ } -+ -+ resource->type = pool_type; -+ resource->id = mc_dev->obj_desc.id; -+ resource->data = mc_dev; -+ resource->parent_pool = res_pool; -+ INIT_LIST_HEAD(&resource->node); -+ list_add_tail(&resource->node, &res_pool->free_list); -+ mc_dev->resource = resource; -+ res_pool->free_count++; -+ res_pool->max_count++; -+ error = 0; -+out: -+ if (mutex_locked) -+ mutex_unlock(&res_pool->mutex); -+ -+ return error; -+} -+ -+/** -+ * fsl_mc_resource_pool_remove_device - remove an allocatable device from a -+ * resource pool -+ * -+ * @mc_dev: Pointer to allocatable MC object device -+ * -+ * It permanently removes an allocatable MC object device from the resource -+ * pool, the device is currently in, as long as it is in the pool's free list. -+ */ -+static int __must_check fsl_mc_resource_pool_remove_device(struct fsl_mc_device -+ *mc_dev) -+{ -+ struct fsl_mc_device *mc_bus_dev; -+ struct fsl_mc_bus *mc_bus; -+ struct fsl_mc_resource_pool *res_pool; -+ struct fsl_mc_resource *resource; -+ int error = -EINVAL; -+ bool mutex_locked = false; -+ -+ if (WARN_ON(!FSL_MC_IS_ALLOCATABLE(mc_dev->obj_desc.type))) -+ goto out; -+ -+ resource = mc_dev->resource; -+ if (WARN_ON(!resource || resource->data != mc_dev)) -+ goto out; -+ -+ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent); -+ mc_bus = to_fsl_mc_bus(mc_bus_dev); -+ res_pool = resource->parent_pool; -+ if (WARN_ON(res_pool != &mc_bus->resource_pools[resource->type])) -+ goto out; -+ -+ mutex_lock(&res_pool->mutex); -+ mutex_locked = true; -+ -+ if (WARN_ON(res_pool->max_count <= 0)) -+ goto out; -+ if (WARN_ON(res_pool->free_count <= 0 || -+ res_pool->free_count > res_pool->max_count)) -+ goto out; -+ -+ /* -+ * If the device is currently allocated, its resource is not -+ * in the free list and thus, the device cannot be removed. -+ */ -+ if (list_empty(&resource->node)) { -+ error = -EBUSY; -+ dev_err(&mc_bus_dev->dev, -+ "Device %s cannot be removed from resource pool\n", -+ dev_name(&mc_dev->dev)); -+ goto out; -+ } -+ -+ list_del(&resource->node); -+ INIT_LIST_HEAD(&resource->node); -+ res_pool->free_count--; -+ res_pool->max_count--; -+ -+ devm_kfree(&mc_bus_dev->dev, resource); -+ mc_dev->resource = NULL; -+ error = 0; -+out: -+ if (mutex_locked) -+ mutex_unlock(&res_pool->mutex); -+ -+ return error; -+} -+ -+static const char *const fsl_mc_pool_type_strings[] = { -+ [FSL_MC_POOL_DPMCP] = "dpmcp", -+ [FSL_MC_POOL_DPBP] = "dpbp", -+ [FSL_MC_POOL_DPCON] = "dpcon", -+ [FSL_MC_POOL_IRQ] = "irq", -+}; -+ -+static int __must_check object_type_to_pool_type(const char *object_type, -+ enum fsl_mc_pool_type -+ *pool_type) -+{ -+ unsigned int i; -+ -+ for (i = 0; i < ARRAY_SIZE(fsl_mc_pool_type_strings); i++) { -+ if (strcmp(object_type, fsl_mc_pool_type_strings[i]) == 0) { -+ *pool_type = i; -+ return 0; -+ } -+ } -+ -+ return -EINVAL; -+} -+ -+int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus, -+ enum fsl_mc_pool_type pool_type, -+ struct fsl_mc_resource **new_resource) -+{ -+ struct fsl_mc_resource_pool *res_pool; -+ struct fsl_mc_resource *resource; -+ struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev; -+ int error = -EINVAL; -+ bool mutex_locked = false; -+ -+ BUILD_BUG_ON(ARRAY_SIZE(fsl_mc_pool_type_strings) != -+ FSL_MC_NUM_POOL_TYPES); -+ -+ *new_resource = NULL; -+ if (WARN_ON(pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES)) -+ goto error; -+ -+ res_pool = &mc_bus->resource_pools[pool_type]; -+ if (WARN_ON(res_pool->mc_bus != mc_bus)) -+ goto error; -+ -+ mutex_lock(&res_pool->mutex); -+ mutex_locked = true; -+ resource = list_first_entry_or_null(&res_pool->free_list, -+ struct fsl_mc_resource, node); -+ -+ if (!resource) { -+ WARN_ON(res_pool->free_count != 0); -+ error = -ENXIO; -+ dev_err(&mc_bus_dev->dev, -+ "No more resources of type %s left\n", -+ fsl_mc_pool_type_strings[pool_type]); -+ goto error; -+ } -+ -+ if (WARN_ON(resource->type != pool_type)) -+ goto error; -+ if (WARN_ON(resource->parent_pool != res_pool)) -+ goto error; -+ if (WARN_ON(res_pool->free_count <= 0 || -+ res_pool->free_count > res_pool->max_count)) -+ goto error; -+ -+ list_del(&resource->node); -+ INIT_LIST_HEAD(&resource->node); -+ -+ res_pool->free_count--; -+ mutex_unlock(&res_pool->mutex); -+ *new_resource = resource; -+ return 0; -+error: -+ if (mutex_locked) -+ mutex_unlock(&res_pool->mutex); -+ -+ return error; -+} -+EXPORT_SYMBOL_GPL(fsl_mc_resource_allocate); -+ -+void fsl_mc_resource_free(struct fsl_mc_resource *resource) -+{ -+ struct fsl_mc_resource_pool *res_pool; -+ bool mutex_locked = false; -+ -+ res_pool = resource->parent_pool; -+ if (WARN_ON(resource->type != res_pool->type)) -+ goto out; -+ -+ mutex_lock(&res_pool->mutex); -+ mutex_locked = true; -+ if (WARN_ON(res_pool->free_count < 0 || -+ res_pool->free_count >= res_pool->max_count)) -+ goto out; -+ -+ if (WARN_ON(!list_empty(&resource->node))) -+ goto out; -+ -+ list_add_tail(&resource->node, &res_pool->free_list); -+ res_pool->free_count++; -+out: -+ if (mutex_locked) -+ mutex_unlock(&res_pool->mutex); -+} -+EXPORT_SYMBOL_GPL(fsl_mc_resource_free); -+ -+/** -+ * fsl_mc_portal_allocate - Allocates an MC portal -+ * -+ * @mc_dev: MC device for which the MC portal is to be allocated -+ * @mc_io_flags: Flags for the fsl_mc_io object that wraps the allocated -+ * MC portal. -+ * @new_mc_io: Pointer to area where the pointer to the fsl_mc_io object -+ * that wraps the allocated MC portal is to be returned -+ * -+ * This function allocates an MC portal from the device's parent DPRC, -+ * from the corresponding MC bus' pool of MC portals and wraps -+ * it in a new fsl_mc_io object. If 'mc_dev' is a DPRC itself, the -+ * portal is allocated from its own MC bus. -+ */ -+int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev, -+ uint16_t mc_io_flags, -+ struct fsl_mc_io **new_mc_io) -+{ -+ struct fsl_mc_device *mc_bus_dev; -+ struct fsl_mc_bus *mc_bus; -+ phys_addr_t mc_portal_phys_addr; -+ size_t mc_portal_size; -+ struct fsl_mc_device *dpmcp_dev; -+ int error = -EINVAL; -+ struct fsl_mc_resource *resource = NULL; -+ struct fsl_mc_io *mc_io = NULL; -+ -+ if (!mc_dev) { -+ if (WARN_ON(!fsl_mc_bus_type.dev_root)) -+ return error; -+ -+ mc_bus_dev = to_fsl_mc_device(fsl_mc_bus_type.dev_root); -+ } else if (mc_dev->flags & FSL_MC_IS_DPRC) { -+ mc_bus_dev = mc_dev; -+ } else { -+ if (WARN_ON(mc_dev->dev.parent->bus != &fsl_mc_bus_type)) -+ return error; -+ -+ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent); -+ } -+ -+ mc_bus = to_fsl_mc_bus(mc_bus_dev); -+ *new_mc_io = NULL; -+ error = fsl_mc_resource_allocate(mc_bus, FSL_MC_POOL_DPMCP, &resource); -+ if (error < 0) -+ return error; -+ -+ error = -EINVAL; -+ dpmcp_dev = resource->data; -+ if (WARN_ON(!dpmcp_dev || -+ strcmp(dpmcp_dev->obj_desc.type, "dpmcp") != 0)) -+ goto error_cleanup_resource; -+ -+ if (dpmcp_dev->obj_desc.ver_major < DPMCP_MIN_VER_MAJOR || -+ (dpmcp_dev->obj_desc.ver_major == DPMCP_MIN_VER_MAJOR && -+ dpmcp_dev->obj_desc.ver_minor < DPMCP_MIN_VER_MINOR)) { -+ dev_err(&dpmcp_dev->dev, -+ "ERROR: Version %d.%d of DPMCP not supported.\n", -+ dpmcp_dev->obj_desc.ver_major, -+ dpmcp_dev->obj_desc.ver_minor); -+ error = -ENOTSUPP; -+ goto error_cleanup_resource; -+ } -+ -+ if (WARN_ON(dpmcp_dev->obj_desc.region_count == 0)) -+ goto error_cleanup_resource; -+ -+ mc_portal_phys_addr = dpmcp_dev->regions[0].start; -+ mc_portal_size = dpmcp_dev->regions[0].end - -+ dpmcp_dev->regions[0].start + 1; -+ -+ if (WARN_ON(mc_portal_size != mc_bus_dev->mc_io->portal_size)) -+ goto error_cleanup_resource; -+ -+ error = fsl_create_mc_io(&mc_bus_dev->dev, -+ mc_portal_phys_addr, -+ mc_portal_size, dpmcp_dev, -+ mc_io_flags, &mc_io); -+ if (error < 0) -+ goto error_cleanup_resource; -+ -+ *new_mc_io = mc_io; -+ return 0; -+ -+error_cleanup_resource: -+ fsl_mc_resource_free(resource); -+ return error; -+} -+EXPORT_SYMBOL_GPL(fsl_mc_portal_allocate); -+ -+/** -+ * fsl_mc_portal_free - Returns an MC portal to the pool of free MC portals -+ * of a given MC bus -+ * -+ * @mc_io: Pointer to the fsl_mc_io object that wraps the MC portal to free -+ */ -+void fsl_mc_portal_free(struct fsl_mc_io *mc_io) -+{ -+ struct fsl_mc_device *dpmcp_dev; -+ struct fsl_mc_resource *resource; -+ -+ /* -+ * Every mc_io obtained by calling fsl_mc_portal_allocate() is supposed -+ * to have a DPMCP object associated with. -+ */ -+ dpmcp_dev = mc_io->dpmcp_dev; -+ if (WARN_ON(!dpmcp_dev)) -+ return; -+ if (WARN_ON(strcmp(dpmcp_dev->obj_desc.type, "dpmcp") != 0)) -+ return; -+ if (WARN_ON(dpmcp_dev->mc_io != mc_io)) -+ return; -+ -+ resource = dpmcp_dev->resource; -+ if (WARN_ON(!resource || resource->type != FSL_MC_POOL_DPMCP)) -+ return; -+ -+ if (WARN_ON(resource->data != dpmcp_dev)) -+ return; -+ -+ fsl_destroy_mc_io(mc_io); -+ fsl_mc_resource_free(resource); -+} -+EXPORT_SYMBOL_GPL(fsl_mc_portal_free); -+ -+/** -+ * fsl_mc_portal_reset - Resets the dpmcp object for a given fsl_mc_io object -+ * -+ * @mc_io: Pointer to the fsl_mc_io object that wraps the MC portal to free -+ */ -+int fsl_mc_portal_reset(struct fsl_mc_io *mc_io) -+{ -+ int error; -+ struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev; -+ -+ if (WARN_ON(!dpmcp_dev)) -+ return -EINVAL; -+ -+ error = dpmcp_reset(mc_io, 0, dpmcp_dev->mc_handle); -+ if (error < 0) { -+ dev_err(&dpmcp_dev->dev, "dpmcp_reset() failed: %d\n", error); -+ return error; -+ } -+ -+ return 0; -+} -+EXPORT_SYMBOL_GPL(fsl_mc_portal_reset); -+ -+/** -+ * fsl_mc_object_allocate - Allocates a MC object device of the given -+ * pool type from a given MC bus -+ * -+ * @mc_dev: MC device for which the MC object device is to be allocated -+ * @pool_type: MC bus resource pool type -+ * @new_mc_dev: Pointer to area where the pointer to the allocated -+ * MC object device is to be returned -+ * -+ * This function allocates a MC object device from the device's parent DPRC, -+ * from the corresponding MC bus' pool of allocatable MC object devices of -+ * the given resource type. mc_dev cannot be a DPRC itself. -+ * -+ * NOTE: pool_type must be different from FSL_MC_POOL_MCP, since MC -+ * portals are allocated using fsl_mc_portal_allocate(), instead of -+ * this function. -+ */ -+int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev, -+ enum fsl_mc_pool_type pool_type, -+ struct fsl_mc_device **new_mc_adev) -+{ -+ struct fsl_mc_device *mc_bus_dev; -+ struct fsl_mc_bus *mc_bus; -+ struct fsl_mc_device *mc_adev; -+ int error = -EINVAL; -+ struct fsl_mc_resource *resource = NULL; -+ -+ *new_mc_adev = NULL; -+ if (WARN_ON(mc_dev->flags & FSL_MC_IS_DPRC)) -+ goto error; -+ -+ if (WARN_ON(mc_dev->dev.parent->bus != &fsl_mc_bus_type)) -+ goto error; -+ -+ if (WARN_ON(pool_type == FSL_MC_POOL_DPMCP)) -+ goto error; -+ -+ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent); -+ mc_bus = to_fsl_mc_bus(mc_bus_dev); -+ error = fsl_mc_resource_allocate(mc_bus, pool_type, &resource); -+ if (error < 0) -+ goto error; -+ -+ mc_adev = resource->data; -+ if (WARN_ON(!mc_adev)) -+ goto error; -+ -+ *new_mc_adev = mc_adev; -+ return 0; -+error: -+ if (resource) -+ fsl_mc_resource_free(resource); -+ -+ return error; -+} -+EXPORT_SYMBOL_GPL(fsl_mc_object_allocate); -+ -+/** -+ * fsl_mc_object_free - Returns an allocatable MC object device to the -+ * corresponding resource pool of a given MC bus. -+ * -+ * @mc_adev: Pointer to the MC object device -+ */ -+void fsl_mc_object_free(struct fsl_mc_device *mc_adev) -+{ -+ struct fsl_mc_resource *resource; -+ -+ resource = mc_adev->resource; -+ if (WARN_ON(resource->type == FSL_MC_POOL_DPMCP)) -+ return; -+ if (WARN_ON(resource->data != mc_adev)) -+ return; -+ -+ fsl_mc_resource_free(resource); -+} -+EXPORT_SYMBOL_GPL(fsl_mc_object_free); -+ -+/** -+ * It allocates the IRQs required by a given MC object device. The -+ * IRQs are allocated from the interrupt pool associated with the -+ * MC bus that contains the device, if the device is not a DPRC device. -+ * Otherwise, the IRQs are allocated from the interrupt pool associated -+ * with the MC bus that represents the DPRC device itself. -+ */ -+int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev) -+{ -+ int i; -+ int irq_count; -+ int res_allocated_count = 0; -+ int error = -EINVAL; -+ struct fsl_mc_device_irq **irqs = NULL; -+ struct fsl_mc_bus *mc_bus; -+ struct fsl_mc_resource_pool *res_pool; -+ struct fsl_mc *mc = dev_get_drvdata(fsl_mc_bus_type.dev_root->parent); -+ -+ if (!mc->gic_supported) -+ return -ENOTSUPP; -+ -+ if (WARN_ON(mc_dev->irqs)) -+ goto error; -+ -+ irq_count = mc_dev->obj_desc.irq_count; -+ if (WARN_ON(irq_count == 0)) -+ goto error; -+ -+ if (strcmp(mc_dev->obj_desc.type, "dprc") == 0) -+ mc_bus = to_fsl_mc_bus(mc_dev); -+ else -+ mc_bus = to_fsl_mc_bus(to_fsl_mc_device(mc_dev->dev.parent)); -+ -+ if (WARN_ON(!mc_bus->irq_resources)) -+ goto error; -+ -+ res_pool = &mc_bus->resource_pools[FSL_MC_POOL_IRQ]; -+ if (res_pool->free_count < irq_count) { -+ dev_err(&mc_dev->dev, -+ "Not able to allocate %u irqs for device\n", irq_count); -+ error = -ENOSPC; -+ goto error; -+ } -+ -+ irqs = devm_kzalloc(&mc_dev->dev, irq_count * sizeof(irqs[0]), -+ GFP_KERNEL); -+ if (!irqs) { -+ error = -ENOMEM; -+ dev_err(&mc_dev->dev, "No memory to allocate irqs[]\n"); -+ goto error; -+ } -+ -+ for (i = 0; i < irq_count; i++) { -+ struct fsl_mc_resource *resource; -+ -+ error = fsl_mc_resource_allocate(mc_bus, FSL_MC_POOL_IRQ, -+ &resource); -+ if (error < 0) -+ goto error; -+ -+ irqs[i] = to_fsl_mc_irq(resource); -+ res_allocated_count++; -+ -+ WARN_ON(irqs[i]->mc_dev); -+ irqs[i]->mc_dev = mc_dev; -+ irqs[i]->dev_irq_index = i; -+ } -+ -+ mc_dev->irqs = irqs; -+ return 0; -+error: -+ for (i = 0; i < res_allocated_count; i++) { -+ irqs[i]->mc_dev = NULL; -+ fsl_mc_resource_free(&irqs[i]->resource); -+ } -+ -+ if (irqs) -+ devm_kfree(&mc_dev->dev, irqs); -+ -+ return error; -+} -+EXPORT_SYMBOL_GPL(fsl_mc_allocate_irqs); -+ -+/* -+ * It frees the IRQs that were allocated for a MC object device, by -+ * returning them to the corresponding interrupt pool. -+ */ -+void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev) -+{ -+ int i; -+ int irq_count; -+ struct fsl_mc_bus *mc_bus; -+ struct fsl_mc_device_irq **irqs = mc_dev->irqs; -+ -+ if (WARN_ON(!irqs)) -+ return; -+ -+ irq_count = mc_dev->obj_desc.irq_count; -+ -+ if (strcmp(mc_dev->obj_desc.type, "dprc") == 0) -+ mc_bus = to_fsl_mc_bus(mc_dev); -+ else -+ mc_bus = to_fsl_mc_bus(to_fsl_mc_device(mc_dev->dev.parent)); -+ -+ if (WARN_ON(!mc_bus->irq_resources)) -+ return; -+ -+ for (i = 0; i < irq_count; i++) { -+ WARN_ON(!irqs[i]->mc_dev); -+ irqs[i]->mc_dev = NULL; -+ fsl_mc_resource_free(&irqs[i]->resource); -+ } -+ -+ devm_kfree(&mc_dev->dev, mc_dev->irqs); -+ mc_dev->irqs = NULL; -+} -+EXPORT_SYMBOL_GPL(fsl_mc_free_irqs); -+ -+/** -+ * fsl_mc_allocator_probe - callback invoked when an allocatable device is -+ * being added to the system -+ */ -+static int fsl_mc_allocator_probe(struct fsl_mc_device *mc_dev) -+{ -+ enum fsl_mc_pool_type pool_type; -+ struct fsl_mc_device *mc_bus_dev; -+ struct fsl_mc_bus *mc_bus; -+ int error = -EINVAL; -+ -+ if (WARN_ON(!FSL_MC_IS_ALLOCATABLE(mc_dev->obj_desc.type))) -+ goto error; -+ -+ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent); -+ if (WARN_ON(mc_bus_dev->dev.bus != &fsl_mc_bus_type)) -+ goto error; -+ -+ mc_bus = to_fsl_mc_bus(mc_bus_dev); -+ -+ /* -+ * If mc_dev is the DPMCP object for the parent DPRC's built-in -+ * portal, we don't add this DPMCP to the DPMCP object pool, -+ * but instead allocate it directly to the parent DPRC (mc_bus_dev): -+ */ -+ if (strcmp(mc_dev->obj_desc.type, "dpmcp") == 0 && -+ mc_dev->obj_desc.id == mc_bus->dprc_attr.portal_id) { -+ error = fsl_mc_io_set_dpmcp(mc_bus_dev->mc_io, mc_dev); -+ if (error < 0) -+ goto error; -+ } else { -+ error = object_type_to_pool_type(mc_dev->obj_desc.type, -+ &pool_type); -+ if (error < 0) -+ goto error; -+ -+ error = fsl_mc_resource_pool_add_device(mc_bus, pool_type, -+ mc_dev); -+ if (error < 0) -+ goto error; -+ } -+ -+ dev_dbg(&mc_dev->dev, -+ "Allocatable MC object device bound to fsl_mc_allocator driver"); -+ return 0; -+error: -+ -+ return error; -+} -+ -+/** -+ * fsl_mc_allocator_remove - callback invoked when an allocatable device is -+ * being removed from the system -+ */ -+static int fsl_mc_allocator_remove(struct fsl_mc_device *mc_dev) -+{ -+ int error; -+ -+ if (WARN_ON(!FSL_MC_IS_ALLOCATABLE(mc_dev->obj_desc.type))) -+ return -EINVAL; -+ -+ if (mc_dev->resource) { -+ error = fsl_mc_resource_pool_remove_device(mc_dev); -+ if (error < 0) -+ return error; -+ } -+ -+ dev_dbg(&mc_dev->dev, -+ "Allocatable MC object device unbound from fsl_mc_allocator driver"); -+ return 0; -+} -+ -+static const struct fsl_mc_device_match_id match_id_table[] = { -+ { -+ .vendor = FSL_MC_VENDOR_FREESCALE, -+ .obj_type = "dpbp", -+ }, -+ { -+ .vendor = FSL_MC_VENDOR_FREESCALE, -+ .obj_type = "dpmcp", -+ }, -+ { -+ .vendor = FSL_MC_VENDOR_FREESCALE, -+ .obj_type = "dpcon", -+ }, -+ {.vendor = 0x0}, -+}; -+ -+static struct fsl_mc_driver fsl_mc_allocator_driver = { -+ .driver = { -+ .name = "fsl_mc_allocator", -+ .owner = THIS_MODULE, -+ .pm = NULL, -+ }, -+ .match_id_table = match_id_table, -+ .probe = fsl_mc_allocator_probe, -+ .remove = fsl_mc_allocator_remove, -+}; -+ -+int __init fsl_mc_allocator_driver_init(void) -+{ -+ return fsl_mc_driver_register(&fsl_mc_allocator_driver); -+} -+ -+void __exit fsl_mc_allocator_driver_exit(void) -+{ -+ fsl_mc_driver_unregister(&fsl_mc_allocator_driver); -+} -diff --git a/drivers/staging/fsl-mc/bus/mc-bus.c b/drivers/staging/fsl-mc/bus/mc-bus.c -new file mode 100644 -index 0000000..f173b35 ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/mc-bus.c -@@ -0,0 +1,1347 @@ -+/* -+ * Freescale Management Complex (MC) bus driver -+ * -+ * Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * Author: German Rivera -+ * -+ * This file is licensed under the terms of the GNU General Public -+ * License version 2. This program is licensed "as is" without any -+ * warranty of any kind, whether express or implied. -+ */ -+ -+#include "../include/mc-private.h" -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "../include/dpmng.h" -+#include "../include/mc-sys.h" -+#include "dprc-cmd.h" -+ -+/* -+ * IOMMU stream ID flags -+ */ -+#define STREAM_ID_PL_MASK BIT(9) /* privilege level */ -+#define STREAM_ID_BMT_MASK BIT(8) /* bypass memory translation */ -+#define STREAM_ID_VA_MASK BIT(7) /* virtual address translation -+ * (two-stage translation) */ -+#define STREAM_ID_ICID_MASK (BIT(7) - 1) /* isolation context ID -+ * (translation context) */ -+ -+#define MAX_STREAM_ID_ICID STREAM_ID_ICID_MASK -+ -+static struct kmem_cache *mc_dev_cache; -+ -+/** -+ * fsl_mc_bus_match - device to driver matching callback -+ * @dev: the MC object device structure to match against -+ * @drv: the device driver to search for matching MC object device id -+ * structures -+ * -+ * Returns 1 on success, 0 otherwise. -+ */ -+static int fsl_mc_bus_match(struct device *dev, struct device_driver *drv) -+{ -+ const struct fsl_mc_device_match_id *id; -+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); -+ struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(drv); -+ bool found = false; -+ -+ /* When driver_override is set, only bind to the matching driver */ -+ if (mc_dev->driver_override) { -+ found = !strcmp(mc_dev->driver_override, mc_drv->driver.name); -+ goto out; -+ } -+ -+ if (!mc_drv->match_id_table) -+ goto out; -+ -+ /* -+ * If the object is not 'plugged' don't match. -+ * Only exception is the root DPRC, which is a special case. -+ * -+ * NOTE: Only when this function is invoked for the root DPRC, -+ * mc_dev->mc_io is not NULL -+ */ -+ if ((mc_dev->obj_desc.state & DPRC_OBJ_STATE_PLUGGED) == 0 && -+ !mc_dev->mc_io) -+ goto out; -+ -+ /* -+ * Traverse the match_id table of the given driver, trying to find -+ * a matching for the given MC object device. -+ */ -+ for (id = mc_drv->match_id_table; id->vendor != 0x0; id++) { -+ if (id->vendor == mc_dev->obj_desc.vendor && -+ strcmp(id->obj_type, mc_dev->obj_desc.type) == 0) { -+ found = true; -+ -+ break; -+ } -+ } -+ -+out: -+ dev_dbg(dev, "%smatched\n", found ? "" : "not "); -+ return found; -+} -+ -+/** -+ * fsl_mc_bus_uevent - callback invoked when a device is added -+ */ -+static int fsl_mc_bus_uevent(struct device *dev, struct kobj_uevent_env *env) -+{ -+ pr_debug("%s invoked\n", __func__); -+ return 0; -+} -+ -+static ssize_t driver_override_store(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t count) -+{ -+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); -+ const char *driver_override, *old = mc_dev->driver_override; -+ char *cp; -+ -+ if (WARN_ON(dev->bus != &fsl_mc_bus_type)) -+ return -EINVAL; -+ -+ if (count > PATH_MAX) -+ return -EINVAL; -+ -+ driver_override = kstrndup(buf, count, GFP_KERNEL); -+ if (!driver_override) -+ return -ENOMEM; -+ -+ cp = strchr(driver_override, '\n'); -+ if (cp) -+ *cp = '\0'; -+ -+ if (strlen(driver_override)) { -+ mc_dev->driver_override = driver_override; -+ } else { -+ kfree(driver_override); -+ mc_dev->driver_override = NULL; -+ } -+ -+ kfree(old); -+ -+ return count; -+} -+ -+static ssize_t driver_override_show(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); -+ -+ return sprintf(buf, "%s\n", mc_dev->driver_override); -+} -+ -+static DEVICE_ATTR_RW(driver_override); -+ -+static ssize_t rescan_store(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t count) -+{ -+ unsigned long val; -+ unsigned int irq_count; -+ struct fsl_mc_device *root_mc_dev; -+ struct fsl_mc_bus *root_mc_bus; -+ -+ if (!is_root_dprc(dev)) -+ return -EINVAL; -+ -+ root_mc_dev = to_fsl_mc_device(dev); -+ root_mc_bus = to_fsl_mc_bus(root_mc_dev); -+ -+ if (kstrtoul(buf, 0, &val) < 0) -+ return -EINVAL; -+ -+ if (val) { -+ mutex_lock(&root_mc_bus->scan_mutex); -+ dprc_scan_objects(root_mc_dev, NULL, &irq_count); -+ mutex_unlock(&root_mc_bus->scan_mutex); -+ } -+ -+ return count; -+} -+ -+static DEVICE_ATTR_WO(rescan); -+ -+static struct attribute *fsl_mc_dev_attrs[] = { -+ &dev_attr_driver_override.attr, -+ &dev_attr_rescan.attr, -+ NULL, -+}; -+ -+static const struct attribute_group fsl_mc_dev_group = { -+ .attrs = fsl_mc_dev_attrs, -+}; -+ -+static const struct attribute_group *fsl_mc_dev_groups[] = { -+ &fsl_mc_dev_group, -+ NULL, -+}; -+ -+static int scan_fsl_mc_bus(struct device *dev, void *data) -+{ -+ unsigned int irq_count; -+ struct fsl_mc_device *root_mc_dev; -+ struct fsl_mc_bus *root_mc_bus; -+ -+ if (is_root_dprc(dev)) { -+ root_mc_dev = to_fsl_mc_device(dev); -+ root_mc_bus = to_fsl_mc_bus(root_mc_dev); -+ mutex_lock(&root_mc_bus->scan_mutex); -+ dprc_scan_objects(root_mc_dev, NULL, &irq_count); -+ mutex_unlock(&root_mc_bus->scan_mutex); -+ } -+ -+ return 0; -+} -+ -+static ssize_t bus_rescan_store(struct bus_type *bus, -+ const char *buf, size_t count) -+{ -+ unsigned long val; -+ -+ if (kstrtoul(buf, 0, &val) < 0) -+ return -EINVAL; -+ -+ if (val) -+ bus_for_each_dev(bus, NULL, NULL, scan_fsl_mc_bus); -+ -+ return count; -+} -+static BUS_ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, bus_rescan_store); -+ -+static struct attribute *fsl_mc_bus_attrs[] = { -+ &bus_attr_rescan.attr, -+ NULL, -+}; -+ -+static const struct attribute_group fsl_mc_bus_group = { -+ .attrs = fsl_mc_bus_attrs, -+}; -+ -+static const struct attribute_group *fsl_mc_bus_groups[] = { -+ &fsl_mc_bus_group, -+ NULL, -+}; -+ -+struct bus_type fsl_mc_bus_type = { -+ .name = "fsl-mc", -+ .match = fsl_mc_bus_match, -+ .uevent = fsl_mc_bus_uevent, -+ .dev_groups = fsl_mc_dev_groups, -+ .bus_groups = fsl_mc_bus_groups, -+}; -+EXPORT_SYMBOL_GPL(fsl_mc_bus_type); -+ -+static int fsl_mc_driver_probe(struct device *dev) -+{ -+ struct fsl_mc_driver *mc_drv; -+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); -+ int error; -+ -+ if (WARN_ON(!dev->driver)) -+ return -EINVAL; -+ -+ mc_drv = to_fsl_mc_driver(dev->driver); -+ if (WARN_ON(!mc_drv->probe)) -+ return -EINVAL; -+ -+ error = mc_drv->probe(mc_dev); -+ if (error < 0) { -+ dev_err(dev, "MC object device probe callback failed: %d\n", -+ error); -+ return error; -+ } -+ -+ return 0; -+} -+ -+static int fsl_mc_driver_remove(struct device *dev) -+{ -+ struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver); -+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); -+ int error; -+ -+ if (WARN_ON(!dev->driver)) -+ return -EINVAL; -+ -+ error = mc_drv->remove(mc_dev); -+ if (error < 0) { -+ dev_err(dev, -+ "MC object device remove callback failed: %d\n", -+ error); -+ return error; -+ } -+ -+ return 0; -+} -+ -+static void fsl_mc_driver_shutdown(struct device *dev) -+{ -+ struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver); -+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); -+ -+ mc_drv->shutdown(mc_dev); -+} -+ -+/** -+ * __fsl_mc_driver_register - registers a child device driver with the -+ * MC bus -+ * -+ * This function is implicitly invoked from the registration function of -+ * fsl_mc device drivers, which is generated by the -+ * module_fsl_mc_driver() macro. -+ */ -+int __fsl_mc_driver_register(struct fsl_mc_driver *mc_driver, -+ struct module *owner) -+{ -+ int error; -+ -+ mc_driver->driver.owner = owner; -+ mc_driver->driver.bus = &fsl_mc_bus_type; -+ -+ if (mc_driver->probe) -+ mc_driver->driver.probe = fsl_mc_driver_probe; -+ -+ if (mc_driver->remove) -+ mc_driver->driver.remove = fsl_mc_driver_remove; -+ -+ if (mc_driver->shutdown) -+ mc_driver->driver.shutdown = fsl_mc_driver_shutdown; -+ -+ error = driver_register(&mc_driver->driver); -+ if (error < 0) { -+ pr_err("driver_register() failed for %s: %d\n", -+ mc_driver->driver.name, error); -+ return error; -+ } -+ -+ pr_info("MC object device driver %s registered\n", -+ mc_driver->driver.name); -+ return 0; -+} -+EXPORT_SYMBOL_GPL(__fsl_mc_driver_register); -+ -+/** -+ * fsl_mc_driver_unregister - unregisters a device driver from the -+ * MC bus -+ */ -+void fsl_mc_driver_unregister(struct fsl_mc_driver *mc_driver) -+{ -+ driver_unregister(&mc_driver->driver); -+} -+EXPORT_SYMBOL_GPL(fsl_mc_driver_unregister); -+ -+bool fsl_mc_interrupts_supported(void) -+{ -+ struct fsl_mc *mc = dev_get_drvdata(fsl_mc_bus_type.dev_root->parent); -+ -+ return mc->gic_supported; -+} -+EXPORT_SYMBOL_GPL(fsl_mc_interrupts_supported); -+ -+static int get_dprc_attr(struct fsl_mc_io *mc_io, -+ int container_id, struct dprc_attributes *attr) -+{ -+ uint16_t dprc_handle; -+ int error; -+ -+ error = dprc_open(mc_io, 0, container_id, &dprc_handle); -+ if (error < 0) { -+ pr_err("dprc_open() failed: %d\n", error); -+ return error; -+ } -+ -+ memset(attr, 0, sizeof(struct dprc_attributes)); -+ error = dprc_get_attributes(mc_io, 0, dprc_handle, attr); -+ if (error < 0) { -+ pr_err("dprc_get_attributes() failed: %d\n", error); -+ goto common_cleanup; -+ } -+ -+ error = 0; -+ -+common_cleanup: -+ (void)dprc_close(mc_io, 0, dprc_handle); -+ return error; -+} -+ -+static int get_dprc_icid(struct fsl_mc_io *mc_io, -+ int container_id, uint16_t *icid) -+{ -+ struct dprc_attributes attr; -+ int error; -+ -+ error = get_dprc_attr(mc_io, container_id, &attr); -+ if (error == 0) -+ *icid = attr.icid; -+ -+ return error; -+} -+ -+static int get_dprc_version(struct fsl_mc_io *mc_io, -+ int container_id, uint16_t *major, uint16_t *minor) -+{ -+ struct dprc_attributes attr; -+ int error; -+ -+ error = get_dprc_attr(mc_io, container_id, &attr); -+ if (error == 0) { -+ *major = attr.version.major; -+ *minor = attr.version.minor; -+ } -+ -+ return error; -+} -+ -+static int translate_mc_addr(enum fsl_mc_region_types mc_region_type, -+ uint64_t mc_offset, phys_addr_t *phys_addr) -+{ -+ int i; -+ struct fsl_mc *mc = dev_get_drvdata(fsl_mc_bus_type.dev_root->parent); -+ -+ if (mc->num_translation_ranges == 0) { -+ /* -+ * Do identity mapping: -+ */ -+ *phys_addr = mc_offset; -+ return 0; -+ } -+ -+ for (i = 0; i < mc->num_translation_ranges; i++) { -+ struct fsl_mc_addr_translation_range *range = -+ &mc->translation_ranges[i]; -+ -+ if (mc_region_type == range->mc_region_type && -+ mc_offset >= range->start_mc_offset && -+ mc_offset < range->end_mc_offset) { -+ *phys_addr = range->start_phys_addr + -+ (mc_offset - range->start_mc_offset); -+ return 0; -+ } -+ } -+ -+ return -EFAULT; -+} -+ -+static int fsl_mc_device_get_mmio_regions(struct fsl_mc_device *mc_dev, -+ struct fsl_mc_device *mc_bus_dev) -+{ -+ int i; -+ int error; -+ struct resource *regions; -+ struct dprc_obj_desc *obj_desc = &mc_dev->obj_desc; -+ struct device *parent_dev = mc_dev->dev.parent; -+ enum fsl_mc_region_types mc_region_type; -+ -+ if (strcmp(obj_desc->type, "dprc") == 0 || -+ strcmp(obj_desc->type, "dpmcp") == 0) { -+ mc_region_type = FSL_MC_PORTAL; -+ } else if (strcmp(obj_desc->type, "dpio") == 0) { -+ mc_region_type = FSL_QBMAN_PORTAL; -+ } else { -+ /* -+ * This function should not have been called for this MC object -+ * type, as this object type is not supposed to have MMIO -+ * regions -+ */ -+ WARN_ON(true); -+ return -EINVAL; -+ } -+ -+ regions = kmalloc_array(obj_desc->region_count, -+ sizeof(regions[0]), GFP_KERNEL); -+ if (!regions) -+ return -ENOMEM; -+ -+ for (i = 0; i < obj_desc->region_count; i++) { -+ struct dprc_region_desc region_desc; -+ -+ error = dprc_get_obj_region(mc_bus_dev->mc_io, -+ 0, -+ mc_bus_dev->mc_handle, -+ obj_desc->type, -+ obj_desc->id, i, ®ion_desc); -+ if (error < 0) { -+ dev_err(parent_dev, -+ "dprc_get_obj_region() failed: %d\n", error); -+ goto error_cleanup_regions; -+ } -+ -+ WARN_ON(region_desc.size == 0); -+ error = translate_mc_addr(mc_region_type, -+ region_desc.base_offset, -+ ®ions[i].start); -+ if (error < 0) { -+ dev_err(parent_dev, -+ "Invalid MC offset: %#x (for %s.%d\'s region %d)\n", -+ region_desc.base_offset, -+ obj_desc->type, obj_desc->id, i); -+ goto error_cleanup_regions; -+ } -+ -+ regions[i].end = regions[i].start + region_desc.size - 1; -+ regions[i].name = "fsl-mc object MMIO region"; -+ regions[i].flags = IORESOURCE_IO; -+ if (region_desc.flags & DPRC_REGION_CACHEABLE) -+ regions[i].flags |= IORESOURCE_CACHEABLE; -+ } -+ -+ mc_dev->regions = regions; -+ return 0; -+ -+error_cleanup_regions: -+ kfree(regions); -+ return error; -+} -+ -+/** -+ * Add a newly discovered MC object device to be visible in Linux -+ */ -+int fsl_mc_device_add(struct dprc_obj_desc *obj_desc, -+ struct fsl_mc_io *mc_io, -+ struct device *parent_dev, -+ const char *driver_override, -+ struct fsl_mc_device **new_mc_dev) -+{ -+ int error; -+ struct fsl_mc_device *mc_dev = NULL; -+ struct fsl_mc_bus *mc_bus = NULL; -+ struct fsl_mc_device *parent_mc_dev; -+ -+ if (parent_dev->bus == &fsl_mc_bus_type) -+ parent_mc_dev = to_fsl_mc_device(parent_dev); -+ else -+ parent_mc_dev = NULL; -+ -+ if (strcmp(obj_desc->type, "dprc") == 0) { -+ /* -+ * Allocate an MC bus device object: -+ */ -+ mc_bus = devm_kzalloc(parent_dev, sizeof(*mc_bus), GFP_KERNEL); -+ if (!mc_bus) -+ return -ENOMEM; -+ -+ mc_dev = &mc_bus->mc_dev; -+ } else { -+ /* -+ * Allocate a regular fsl_mc_device object: -+ */ -+ mc_dev = kmem_cache_zalloc(mc_dev_cache, GFP_KERNEL); -+ if (!mc_dev) -+ return -ENOMEM; -+ } -+ -+ mc_dev->obj_desc = *obj_desc; -+ mc_dev->mc_io = mc_io; -+ if (driver_override) { -+ /* -+ * We trust driver_override, so we don't need to use -+ * kstrndup() here -+ */ -+ mc_dev->driver_override = kstrdup(driver_override, GFP_KERNEL); -+ if (!mc_dev->driver_override) { -+ error = -ENOMEM; -+ goto error_cleanup_dev; -+ } -+ } -+ -+ device_initialize(&mc_dev->dev); -+ INIT_LIST_HEAD(&mc_dev->dev.msi_list); -+ mc_dev->dev.parent = parent_dev; -+ mc_dev->dev.bus = &fsl_mc_bus_type; -+ dev_set_name(&mc_dev->dev, "%s.%d", obj_desc->type, obj_desc->id); -+ -+ if (strcmp(obj_desc->type, "dprc") == 0) { -+ struct fsl_mc_io *mc_io2; -+ -+ mc_dev->flags |= FSL_MC_IS_DPRC; -+ -+ /* -+ * To get the DPRC's ICID, we need to open the DPRC -+ * in get_dprc_icid(). For child DPRCs, we do so using the -+ * parent DPRC's MC portal instead of the child DPRC's MC -+ * portal, in case the child DPRC is already opened with -+ * its own portal (e.g., the DPRC used by AIOP). -+ * -+ * NOTE: There cannot be more than one active open for a -+ * given MC object, using the same MC portal. -+ */ -+ if (parent_mc_dev) { -+ /* -+ * device being added is a child DPRC device -+ */ -+ mc_io2 = parent_mc_dev->mc_io; -+ } else { -+ /* -+ * device being added is the root DPRC device -+ */ -+ if (WARN_ON(!mc_io)) { -+ error = -EINVAL; -+ goto error_cleanup_dev; -+ } -+ -+ mc_io2 = mc_io; -+ } -+ -+ error = get_dprc_icid(mc_io2, obj_desc->id, &mc_dev->icid); -+ if (error < 0) -+ goto error_cleanup_dev; -+ } else { -+ /* -+ * A non-DPRC MC object device has to be a child of another -+ * MC object (specifically a DPRC object) -+ */ -+ mc_dev->icid = parent_mc_dev->icid; -+ mc_dev->dma_mask = FSL_MC_DEFAULT_DMA_MASK; -+ mc_dev->dev.dma_mask = &mc_dev->dma_mask; -+ } -+ -+ /* -+ * Get MMIO regions for the device from the MC: -+ * -+ * NOTE: the root DPRC is a special case as its MMIO region is -+ * obtained from the device tree -+ */ -+ if (parent_mc_dev && obj_desc->region_count != 0) { -+ error = fsl_mc_device_get_mmio_regions(mc_dev, -+ parent_mc_dev); -+ if (error < 0) -+ goto error_cleanup_dev; -+ } -+ -+ /* -+ * Objects are coherent, unless 'no shareability' flag set. -+ * FIXME: fill up @dma_base, @size, @iommu -+ */ -+ if (!(obj_desc->flags & DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY)) -+ arch_setup_dma_ops(&mc_dev->dev, 0, 0, NULL, true); -+ -+ /* -+ * The device-specific probe callback will get invoked by device_add() -+ */ -+ error = device_add(&mc_dev->dev); -+ if (error < 0) { -+ dev_err(parent_dev, -+ "device_add() failed for device %s: %d\n", -+ dev_name(&mc_dev->dev), error); -+ goto error_cleanup_dev; -+ } -+ -+ (void)get_device(&mc_dev->dev); -+ dev_dbg(parent_dev, "Added MC object device %s\n", -+ dev_name(&mc_dev->dev)); -+ -+ *new_mc_dev = mc_dev; -+ return 0; -+ -+error_cleanup_dev: -+ kfree(mc_dev->regions); -+ if (mc_bus) -+ devm_kfree(parent_dev, mc_bus); -+ else -+ kmem_cache_free(mc_dev_cache, mc_dev); -+ -+ return error; -+} -+EXPORT_SYMBOL_GPL(fsl_mc_device_add); -+ -+/** -+ * fsl_mc_device_remove - Remove a MC object device from being visible to -+ * Linux -+ * -+ * @mc_dev: Pointer to a MC object device object -+ */ -+void fsl_mc_device_remove(struct fsl_mc_device *mc_dev) -+{ -+ struct fsl_mc_bus *mc_bus = NULL; -+ -+ kfree(mc_dev->regions); -+ -+ /* -+ * The device-specific remove callback will get invoked by device_del() -+ */ -+ device_del(&mc_dev->dev); -+ put_device(&mc_dev->dev); -+ -+ if (strcmp(mc_dev->obj_desc.type, "dprc") == 0) { -+ mc_bus = to_fsl_mc_bus(mc_dev); -+ -+ if (&mc_dev->dev == fsl_mc_bus_type.dev_root) -+ fsl_mc_bus_type.dev_root = NULL; -+ } else -+ WARN_ON(mc_dev->mc_io != NULL); -+ -+ kfree(mc_dev->driver_override); -+ mc_dev->driver_override = NULL; -+ if (mc_bus) -+ devm_kfree(mc_dev->dev.parent, mc_bus); -+ else -+ kmem_cache_free(mc_dev_cache, mc_dev); -+} -+EXPORT_SYMBOL_GPL(fsl_mc_device_remove); -+ -+static int mc_bus_msi_prepare(struct irq_domain *domain, struct device *dev, -+ int nvec, msi_alloc_info_t *info) -+{ -+ int error; -+ u32 its_dev_id; -+ struct dprc_attributes dprc_attr; -+ struct fsl_mc_device *mc_bus_dev = to_fsl_mc_device(dev); -+ -+ if (WARN_ON(!(mc_bus_dev->flags & FSL_MC_IS_DPRC))) -+ return -EINVAL; -+ -+ error = dprc_get_attributes(mc_bus_dev->mc_io, -+ 0, -+ mc_bus_dev->mc_handle, &dprc_attr); -+ if (error < 0) { -+ dev_err(&mc_bus_dev->dev, -+ "dprc_get_attributes() failed: %d\n", -+ error); -+ return error; -+ } -+ -+ /* -+ * Build the device Id to be passed to the GIC-ITS: -+ * -+ * NOTE: This device id corresponds to the IOMMU stream ID -+ * associated with the DPRC object. -+ */ -+ its_dev_id = mc_bus_dev->icid; -+ if (its_dev_id > STREAM_ID_ICID_MASK) { -+ dev_err(&mc_bus_dev->dev, -+ "Invalid ICID: %#x\n", its_dev_id); -+ return -ERANGE; -+ } -+ -+ if (dprc_attr.options & DPRC_CFG_OPT_AIOP) -+ its_dev_id |= STREAM_ID_PL_MASK | STREAM_ID_BMT_MASK; -+ -+ return __its_msi_prepare(domain, its_dev_id, dev, nvec, info); -+} -+ -+static void mc_bus_mask_msi_irq(struct irq_data *d) -+{ -+ /* Bus specefic Mask */ -+ irq_chip_mask_parent(d); -+} -+ -+static void mc_bus_unmask_msi_irq(struct irq_data *d) -+{ -+ /* Bus specefic unmask */ -+ irq_chip_unmask_parent(d); -+} -+ -+static void program_msi_at_mc(struct fsl_mc_device *mc_bus_dev, -+ struct fsl_mc_device_irq *irq) -+{ -+ int error; -+ struct fsl_mc_device *owner_mc_dev = irq->mc_dev; -+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev); -+ struct dprc_irq_cfg irq_cfg; -+ -+ /* -+ * irq->msi_paddr is 0x0 when this function is invoked in the -+ * free_irq() code path. In this case, for the MC, we don't -+ * really need to "unprogram" the MSI, so we just return. -+ * This helps avoid subtle ordering problems in the MC -+ * bus IRQ teardown logic. -+ * FIXME: evaluate whether there is a better way to address -+ * the underlying issue (upstreamability concern) -+ */ -+ if (irq->msi_paddr == 0x0) -+ return; -+ -+ if (WARN_ON(!owner_mc_dev)) -+ return; -+ -+ irq_cfg.paddr = irq->msi_paddr; -+ irq_cfg.val = irq->msi_value; -+ irq_cfg.irq_num = irq->irq_number; -+ -+ if (owner_mc_dev == mc_bus_dev) { -+ /* -+ * IRQ is for the mc_bus_dev's DPRC itself -+ */ -+ error = dprc_set_irq(mc_bus->atomic_mc_io, -+ MC_CMD_FLAG_INTR_DIS | MC_CMD_FLAG_PRI, -+ mc_bus->atomic_dprc_handle, -+ irq->dev_irq_index, -+ &irq_cfg); -+ if (error < 0) { -+ dev_err(&owner_mc_dev->dev, -+ "dprc_set_irq() failed: %d\n", error); -+ } -+ } else { -+ error = dprc_set_obj_irq(mc_bus->atomic_mc_io, -+ MC_CMD_FLAG_INTR_DIS | MC_CMD_FLAG_PRI, -+ mc_bus->atomic_dprc_handle, -+ owner_mc_dev->obj_desc.type, -+ owner_mc_dev->obj_desc.id, -+ irq->dev_irq_index, -+ &irq_cfg); -+ if (error < 0) { -+ dev_err(&owner_mc_dev->dev, -+ "dprc_obj_set_irq() failed: %d\n", error); -+ } -+ } -+} -+ -+/* -+ * This function is invoked from devm_request_irq(), -+ * devm_request_threaded_irq(), dev_free_irq() -+ */ -+static void mc_bus_msi_domain_write_msg(struct irq_data *irq_data, -+ struct msi_msg *msg) -+{ -+ struct msi_desc *msi_entry = irq_data->msi_desc; -+ struct fsl_mc_device *mc_bus_dev = to_fsl_mc_device(msi_entry->dev); -+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev); -+ struct fsl_mc_device_irq *irq_res = -+ &mc_bus->irq_resources[msi_entry->msi_attrib.entry_nr]; -+ -+ /* -+ * NOTE: This function is invoked with interrupts disabled -+ */ -+ -+ if (irq_res->irq_number == irq_data->irq) { -+ irq_res->msi_paddr = -+ ((u64)msg->address_hi << 32) | msg->address_lo; -+ -+ irq_res->msi_value = msg->data; -+ -+ /* -+ * Program the MSI (paddr, value) pair in the device: -+ */ -+ program_msi_at_mc(mc_bus_dev, irq_res); -+ } -+} -+ -+static struct irq_chip mc_bus_msi_irq_chip = { -+ .name = "fsl-mc-bus-msi", -+ .irq_unmask = mc_bus_unmask_msi_irq, -+ .irq_mask = mc_bus_mask_msi_irq, -+ .irq_eoi = irq_chip_eoi_parent, -+ .irq_write_msi_msg = mc_bus_msi_domain_write_msg, -+}; -+ -+static struct msi_domain_ops mc_bus_msi_ops = { -+ .msi_prepare = mc_bus_msi_prepare, -+}; -+ -+static struct msi_domain_info mc_bus_msi_domain_info = { -+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | -+ MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX), -+ .ops = &mc_bus_msi_ops, -+ .chip = &mc_bus_msi_irq_chip, -+}; -+ -+static int create_mc_irq_domain(struct platform_device *mc_pdev, -+ struct irq_domain **new_irq_domain) -+{ -+ int error; -+ struct device_node *its_of_node; -+ struct irq_domain *its_domain; -+ struct irq_domain *irq_domain; -+ struct device_node *mc_of_node = mc_pdev->dev.of_node; -+ -+ its_of_node = of_parse_phandle(mc_of_node, "msi-parent", 0); -+ if (!its_of_node) { -+ dev_err(&mc_pdev->dev, -+ "msi-parent phandle missing for %s\n", -+ mc_of_node->full_name); -+ return -ENOENT; -+ } -+ -+ /* -+ * Extract MSI parent node: -+ */ -+ its_domain = irq_find_host(its_of_node); -+ if (!its_domain) { -+ dev_err(&mc_pdev->dev, "Unable to find parent domain\n"); -+ error = -ENOENT; -+ goto cleanup_its_of_node; -+ } -+ -+ irq_domain = msi_create_irq_domain(mc_of_node, &mc_bus_msi_domain_info, -+ its_domain->parent); -+ if (!irq_domain) { -+ dev_err(&mc_pdev->dev, "Failed to allocate msi_domain\n"); -+ error = -ENOMEM; -+ goto cleanup_its_of_node; -+ } -+ -+ dev_dbg(&mc_pdev->dev, "Allocated MSI domain\n"); -+ *new_irq_domain = irq_domain; -+ return 0; -+ -+cleanup_its_of_node: -+ of_node_put(its_of_node); -+ return error; -+} -+ -+/* -+ * Initialize the interrupt pool associated with a MC bus. -+ * It allocates a block of IRQs from the GIC-ITS -+ */ -+int __must_check fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus, -+ unsigned int irq_count) -+{ -+ unsigned int i; -+ struct msi_desc *msi_entry; -+ struct msi_desc *next_msi_entry; -+ struct fsl_mc_device_irq *irq_resources; -+ struct fsl_mc_device_irq *irq_res; -+ int error; -+ struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev; -+ struct fsl_mc *mc = dev_get_drvdata(fsl_mc_bus_type.dev_root->parent); -+ struct fsl_mc_resource_pool *res_pool = -+ &mc_bus->resource_pools[FSL_MC_POOL_IRQ]; -+ -+ /* -+ * Detect duplicate invocations of this function: -+ */ -+ if (WARN_ON(!list_empty(&mc_bus_dev->dev.msi_list))) -+ return -EINVAL; -+ -+ if (WARN_ON(irq_count == 0 || -+ irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS)) -+ return -EINVAL; -+ -+ irq_resources = -+ devm_kzalloc(&mc_bus_dev->dev, -+ sizeof(*irq_resources) * irq_count, -+ GFP_KERNEL); -+ if (!irq_resources) -+ return -ENOMEM; -+ -+ for (i = 0; i < irq_count; i++) { -+ irq_res = &irq_resources[i]; -+ msi_entry = alloc_msi_entry(&mc_bus_dev->dev); -+ if (!msi_entry) { -+ dev_err(&mc_bus_dev->dev, "Failed to allocate msi entry\n"); -+ error = -ENOMEM; -+ goto cleanup_msi_entries; -+ } -+ -+ msi_entry->msi_attrib.is_msix = 1; -+ msi_entry->msi_attrib.is_64 = 1; -+ msi_entry->msi_attrib.entry_nr = i; -+ msi_entry->nvec_used = 1; -+ list_add_tail(&msi_entry->list, &mc_bus_dev->dev.msi_list); -+ -+ /* -+ * NOTE: irq_res->msi_paddr will be set by the -+ * mc_bus_msi_domain_write_msg() callback -+ */ -+ irq_res->resource.type = res_pool->type; -+ irq_res->resource.data = irq_res; -+ irq_res->resource.parent_pool = res_pool; -+ INIT_LIST_HEAD(&irq_res->resource.node); -+ list_add_tail(&irq_res->resource.node, &res_pool->free_list); -+ } -+ -+ /* -+ * NOTE: Calling this function will trigger the invocation of the -+ * mc_bus_msi_prepare() callback -+ */ -+ error = msi_domain_alloc_irqs(mc->irq_domain, -+ &mc_bus_dev->dev, irq_count); -+ -+ if (error) { -+ dev_err(&mc_bus_dev->dev, "Failed to allocate IRQs\n"); -+ goto cleanup_msi_entries; -+ } -+ -+ for_each_msi_entry(msi_entry, &mc_bus_dev->dev) { -+ u32 irq_num = msi_entry->irq; -+ -+ irq_res = &irq_resources[msi_entry->msi_attrib.entry_nr]; -+ irq_res->irq_number = irq_num; -+ irq_res->resource.id = irq_num; -+ } -+ -+ res_pool->max_count = irq_count; -+ res_pool->free_count = irq_count; -+ mc_bus->irq_resources = irq_resources; -+ return 0; -+ -+cleanup_msi_entries: -+ list_for_each_entry_safe(msi_entry, next_msi_entry, -+ &mc_bus_dev->dev.msi_list, list) { -+ list_del(&msi_entry->list); -+ kfree(msi_entry); -+ } -+ -+ devm_kfree(&mc_bus_dev->dev, irq_resources); -+ return error; -+} -+EXPORT_SYMBOL_GPL(fsl_mc_populate_irq_pool); -+ -+/** -+ * Teardown the interrupt pool associated with an MC bus. -+ * It frees the IRQs that were allocated to the pool, back to the GIC-ITS. -+ */ -+void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus) -+{ -+ struct msi_desc *msi_entry; -+ struct msi_desc *next_msi_entry; -+ struct fsl_mc *mc = dev_get_drvdata(fsl_mc_bus_type.dev_root->parent); -+ struct fsl_mc_resource_pool *res_pool = -+ &mc_bus->resource_pools[FSL_MC_POOL_IRQ]; -+ -+ if (WARN_ON(!mc_bus->irq_resources)) -+ return; -+ -+ if (WARN_ON(res_pool->max_count == 0)) -+ return; -+ -+ if (WARN_ON(res_pool->free_count != res_pool->max_count)) -+ return; -+ -+ msi_domain_free_irqs(mc->irq_domain, &mc_bus->mc_dev.dev); -+ list_for_each_entry_safe(msi_entry, next_msi_entry, -+ &mc_bus->mc_dev.dev.msi_list, list) { -+ list_del(&msi_entry->list); -+ kfree(msi_entry); -+ } -+ -+ devm_kfree(&mc_bus->mc_dev.dev, mc_bus->irq_resources); -+ res_pool->max_count = 0; -+ res_pool->free_count = 0; -+ mc_bus->irq_resources = NULL; -+} -+EXPORT_SYMBOL_GPL(fsl_mc_cleanup_irq_pool); -+ -+static int parse_mc_ranges(struct device *dev, -+ int *paddr_cells, -+ int *mc_addr_cells, -+ int *mc_size_cells, -+ const __be32 **ranges_start, -+ uint8_t *num_ranges) -+{ -+ const __be32 *prop; -+ int range_tuple_cell_count; -+ int ranges_len; -+ int tuple_len; -+ struct device_node *mc_node = dev->of_node; -+ -+ *ranges_start = of_get_property(mc_node, "ranges", &ranges_len); -+ if (!(*ranges_start) || !ranges_len) { -+ dev_warn(dev, -+ "missing or empty ranges property for device tree node '%s'\n", -+ mc_node->name); -+ -+ *num_ranges = 0; -+ return 0; -+ } -+ -+ *paddr_cells = of_n_addr_cells(mc_node); -+ -+ prop = of_get_property(mc_node, "#address-cells", NULL); -+ if (prop) -+ *mc_addr_cells = be32_to_cpup(prop); -+ else -+ *mc_addr_cells = *paddr_cells; -+ -+ prop = of_get_property(mc_node, "#size-cells", NULL); -+ if (prop) -+ *mc_size_cells = be32_to_cpup(prop); -+ else -+ *mc_size_cells = of_n_size_cells(mc_node); -+ -+ range_tuple_cell_count = *paddr_cells + *mc_addr_cells + -+ *mc_size_cells; -+ -+ tuple_len = range_tuple_cell_count * sizeof(__be32); -+ if (ranges_len % tuple_len != 0) { -+ dev_err(dev, "malformed ranges property '%s'\n", mc_node->name); -+ return -EINVAL; -+ } -+ -+ *num_ranges = ranges_len / tuple_len; -+ return 0; -+} -+ -+static int get_mc_addr_translation_ranges(struct device *dev, -+ struct fsl_mc_addr_translation_range -+ **ranges, -+ uint8_t *num_ranges) -+{ -+ int error; -+ int paddr_cells; -+ int mc_addr_cells; -+ int mc_size_cells; -+ int i; -+ const __be32 *ranges_start; -+ const __be32 *cell; -+ -+ error = parse_mc_ranges(dev, -+ &paddr_cells, -+ &mc_addr_cells, -+ &mc_size_cells, -+ &ranges_start, -+ num_ranges); -+ if (error < 0) -+ return error; -+ -+ if (!(*num_ranges)) { -+ /* -+ * Missing or empty ranges property ("ranges;") for the -+ * 'fsl,qoriq-mc' node. In this case, identity mapping -+ * will be used. -+ */ -+ *ranges = NULL; -+ return 0; -+ } -+ -+ *ranges = devm_kcalloc(dev, *num_ranges, -+ sizeof(struct fsl_mc_addr_translation_range), -+ GFP_KERNEL); -+ if (!(*ranges)) -+ return -ENOMEM; -+ -+ cell = ranges_start; -+ for (i = 0; i < *num_ranges; ++i) { -+ struct fsl_mc_addr_translation_range *range = &(*ranges)[i]; -+ -+ range->mc_region_type = of_read_number(cell, 1); -+ range->start_mc_offset = of_read_number(cell + 1, -+ mc_addr_cells - 1); -+ cell += mc_addr_cells; -+ range->start_phys_addr = of_read_number(cell, paddr_cells); -+ cell += paddr_cells; -+ range->end_mc_offset = range->start_mc_offset + -+ of_read_number(cell, mc_size_cells); -+ -+ cell += mc_size_cells; -+ } -+ -+ return 0; -+} -+ -+/** -+ * fsl_mc_bus_probe - callback invoked when the root MC bus is being -+ * added -+ */ -+static int fsl_mc_bus_probe(struct platform_device *pdev) -+{ -+ struct dprc_obj_desc obj_desc; -+ int error; -+ struct fsl_mc *mc; -+ struct fsl_mc_device *mc_bus_dev = NULL; -+ struct fsl_mc_io *mc_io = NULL; -+ int container_id; -+ phys_addr_t mc_portal_phys_addr; -+ uint32_t mc_portal_size; -+ struct mc_version mc_version; -+ struct resource res; -+ -+ dev_info(&pdev->dev, "Root MC bus device probed"); -+ -+ mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL); -+ if (!mc) -+ return -ENOMEM; -+ -+ platform_set_drvdata(pdev, mc); -+ error = create_mc_irq_domain(pdev, &mc->irq_domain); -+ if (error < 0) { -+ dev_warn(&pdev->dev, -+ "WARNING: MC bus driver will run without interrupt support\n"); -+ } else { -+ mc->gic_supported = true; -+ } -+ -+ /* -+ * Get physical address of MC portal for the root DPRC: -+ */ -+ error = of_address_to_resource(pdev->dev.of_node, 0, &res); -+ if (error < 0) { -+ dev_err(&pdev->dev, -+ "of_address_to_resource() failed for %s\n", -+ pdev->dev.of_node->full_name); -+ goto error_cleanup_irq_domain; -+ } -+ -+ mc_portal_phys_addr = res.start; -+ mc_portal_size = resource_size(&res); -+ error = fsl_create_mc_io(&pdev->dev, mc_portal_phys_addr, -+ mc_portal_size, NULL, 0, &mc_io); -+ if (error < 0) -+ goto error_cleanup_irq_domain; -+ -+ error = mc_get_version(mc_io, 0, &mc_version); -+ if (error != 0) { -+ dev_err(&pdev->dev, -+ "mc_get_version() failed with error %d\n", error); -+ goto error_cleanup_mc_io; -+ } -+ -+ dev_info(&pdev->dev, -+ "Freescale Management Complex Firmware version: %u.%u.%u\n", -+ mc_version.major, mc_version.minor, mc_version.revision); -+ -+ error = get_mc_addr_translation_ranges(&pdev->dev, -+ &mc->translation_ranges, -+ &mc->num_translation_ranges); -+ if (error < 0) -+ goto error_cleanup_mc_io; -+ -+ error = dpmng_get_container_id(mc_io, 0, &container_id); -+ if (error < 0) { -+ dev_err(&pdev->dev, -+ "dpmng_get_container_id() failed: %d\n", error); -+ goto error_cleanup_mc_io; -+ } -+ -+ memset(&obj_desc, 0, sizeof(struct dprc_obj_desc)); -+ error = get_dprc_version(mc_io, container_id, -+ &obj_desc.ver_major, &obj_desc.ver_minor); -+ if (error < 0) -+ goto error_cleanup_mc_io; -+ -+ obj_desc.vendor = FSL_MC_VENDOR_FREESCALE; -+ strcpy(obj_desc.type, "dprc"); -+ obj_desc.id = container_id; -+ obj_desc.irq_count = 1; -+ obj_desc.region_count = 0; -+ -+ error = fsl_mc_device_add(&obj_desc, mc_io, &pdev->dev, NULL, -+ &mc_bus_dev); -+ if (error < 0) -+ goto error_cleanup_mc_io; -+ -+ mc->root_mc_bus_dev = mc_bus_dev; -+ return 0; -+ -+error_cleanup_mc_io: -+ fsl_destroy_mc_io(mc_io); -+ -+error_cleanup_irq_domain: -+ if (mc->gic_supported) -+ irq_domain_remove(mc->irq_domain); -+ -+ return error; -+} -+ -+/** -+ * fsl_mc_bus_remove - callback invoked when the root MC bus is being -+ * removed -+ */ -+static int fsl_mc_bus_remove(struct platform_device *pdev) -+{ -+ struct fsl_mc *mc = platform_get_drvdata(pdev); -+ -+ if (WARN_ON(&mc->root_mc_bus_dev->dev != fsl_mc_bus_type.dev_root)) -+ return -EINVAL; -+ -+ if (mc->gic_supported) -+ irq_domain_remove(mc->irq_domain); -+ -+ fsl_mc_device_remove(mc->root_mc_bus_dev); -+ fsl_destroy_mc_io(mc->root_mc_bus_dev->mc_io); -+ mc->root_mc_bus_dev->mc_io = NULL; -+ -+ dev_info(&pdev->dev, "Root MC bus device removed"); -+ return 0; -+} -+ -+static const struct of_device_id fsl_mc_bus_match_table[] = { -+ {.compatible = "fsl,qoriq-mc",}, -+ {}, -+}; -+ -+MODULE_DEVICE_TABLE(of, fsl_mc_bus_match_table); -+ -+static struct platform_driver fsl_mc_bus_driver = { -+ .driver = { -+ .name = "fsl_mc_bus", -+ .owner = THIS_MODULE, -+ .pm = NULL, -+ .of_match_table = fsl_mc_bus_match_table, -+ }, -+ .probe = fsl_mc_bus_probe, -+ .remove = fsl_mc_bus_remove, -+}; -+ -+static int __init fsl_mc_bus_driver_init(void) -+{ -+ int error; -+ -+ mc_dev_cache = kmem_cache_create("fsl_mc_device", -+ sizeof(struct fsl_mc_device), 0, 0, -+ NULL); -+ if (!mc_dev_cache) { -+ pr_err("Could not create fsl_mc_device cache\n"); -+ return -ENOMEM; -+ } -+ -+ error = bus_register(&fsl_mc_bus_type); -+ if (error < 0) { -+ pr_err("fsl-mc bus type registration failed: %d\n", error); -+ goto error_cleanup_cache; -+ } -+ -+ pr_info("fsl-mc bus type registered\n"); -+ -+ error = platform_driver_register(&fsl_mc_bus_driver); -+ if (error < 0) { -+ pr_err("platform_driver_register() failed: %d\n", error); -+ goto error_cleanup_bus; -+ } -+ -+ error = dprc_driver_init(); -+ if (error < 0) -+ goto error_cleanup_driver; -+ -+ error = fsl_mc_allocator_driver_init(); -+ if (error < 0) -+ goto error_cleanup_dprc_driver; -+ -+ return 0; -+ -+error_cleanup_dprc_driver: -+ dprc_driver_exit(); -+ -+error_cleanup_driver: -+ platform_driver_unregister(&fsl_mc_bus_driver); -+ -+error_cleanup_bus: -+ bus_unregister(&fsl_mc_bus_type); -+ -+error_cleanup_cache: -+ kmem_cache_destroy(mc_dev_cache); -+ return error; -+} -+ -+postcore_initcall(fsl_mc_bus_driver_init); -+ -+static void __exit fsl_mc_bus_driver_exit(void) -+{ -+ if (WARN_ON(!mc_dev_cache)) -+ return; -+ -+ fsl_mc_allocator_driver_exit(); -+ dprc_driver_exit(); -+ platform_driver_unregister(&fsl_mc_bus_driver); -+ bus_unregister(&fsl_mc_bus_type); -+ kmem_cache_destroy(mc_dev_cache); -+ pr_info("MC bus unregistered\n"); -+} -+ -+module_exit(fsl_mc_bus_driver_exit); -+ -+MODULE_AUTHOR("Freescale Semiconductor Inc."); -+MODULE_DESCRIPTION("Freescale Management Complex (MC) bus driver"); -+MODULE_LICENSE("GPL"); -diff --git a/drivers/staging/fsl-mc/bus/mc-ioctl.h b/drivers/staging/fsl-mc/bus/mc-ioctl.h -new file mode 100644 -index 0000000..d5c1bc3 ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/mc-ioctl.h -@@ -0,0 +1,25 @@ -+/* -+ * Freescale Management Complex (MC) ioclt interface -+ * -+ * Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * Author: German Rivera -+ * Lijun Pan -+ * -+ * This file is licensed under the terms of the GNU General Public -+ * License version 2. This program is licensed "as is" without any -+ * warranty of any kind, whether express or implied. -+ */ -+#ifndef _FSL_MC_IOCTL_H_ -+#define _FSL_MC_IOCTL_H_ -+ -+#include -+ -+#define RESTOOL_IOCTL_TYPE 'R' -+ -+#define RESTOOL_GET_ROOT_DPRC_INFO \ -+ _IOR(RESTOOL_IOCTL_TYPE, 0x1, uint32_t) -+ -+#define RESTOOL_SEND_MC_COMMAND \ -+ _IOWR(RESTOOL_IOCTL_TYPE, 0x4, struct mc_command) -+ -+#endif /* _FSL_MC_IOCTL_H_ */ -diff --git a/drivers/staging/fsl-mc/bus/mc-restool.c b/drivers/staging/fsl-mc/bus/mc-restool.c -new file mode 100644 -index 0000000..d261c1a ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/mc-restool.c -@@ -0,0 +1,312 @@ -+/* -+ * Freescale Management Complex (MC) restool driver -+ * -+ * Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * Author: German Rivera -+ * Lijun Pan -+ * This file is licensed under the terms of the GNU General Public -+ * License version 2. This program is licensed "as is" without any -+ * warranty of any kind, whether express or implied. -+ */ -+ -+#include "../include/mc-private.h" -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "mc-ioctl.h" -+#include "../include/mc-sys.h" -+#include "../include/mc-cmd.h" -+#include "../include/dpmng.h" -+ -+/** -+ * Maximum number of DPRCs that can be opened at the same time -+ */ -+#define MAX_DPRC_HANDLES 64 -+ -+/** -+ * struct fsl_mc_restool - Management Complex (MC) resource manager object -+ * @tool_mc_io: pointer to the MC I/O object used by the restool -+ */ -+struct fsl_mc_restool { -+ struct fsl_mc_io *tool_mc_io; -+}; -+ -+/** -+ * struct global_state - indicating the number of static and dynamic instance -+ * @dynamic_instance_count - number of dynamically created instances -+ * @static_instance_in_use - static instance is in use or not -+ * @mutex - mutex lock to serialze the operations -+ */ -+struct global_state { -+ uint32_t dynamic_instance_count; -+ bool static_instance_in_use; -+ struct mutex mutex; -+}; -+ -+static struct fsl_mc_restool fsl_mc_restool = { 0 }; -+static struct global_state global_state = { 0 }; -+ -+static int fsl_mc_restool_dev_open(struct inode *inode, struct file *filep) -+{ -+ struct fsl_mc_device *root_mc_dev; -+ int error = 0; -+ struct fsl_mc_restool *fsl_mc_restool_new = NULL; -+ -+ mutex_lock(&global_state.mutex); -+ -+ if (WARN_ON(fsl_mc_bus_type.dev_root == NULL)) { -+ error = -EINVAL; -+ goto error; -+ } -+ -+ if (!global_state.static_instance_in_use) { -+ global_state.static_instance_in_use = true; -+ filep->private_data = &fsl_mc_restool; -+ } else { -+ fsl_mc_restool_new = kmalloc(sizeof(struct fsl_mc_restool), -+ GFP_KERNEL); -+ if (fsl_mc_restool_new == NULL) { -+ error = -ENOMEM; -+ goto error; -+ } -+ memset(fsl_mc_restool_new, 0, sizeof(*fsl_mc_restool_new)); -+ -+ root_mc_dev = to_fsl_mc_device(fsl_mc_bus_type.dev_root); -+ error = fsl_mc_portal_allocate(root_mc_dev, 0, -+ &fsl_mc_restool_new->tool_mc_io); -+ if (error < 0) { -+ pr_err("Not able to allocate MC portal\n"); -+ goto error; -+ } -+ ++global_state.dynamic_instance_count; -+ filep->private_data = fsl_mc_restool_new; -+ } -+ -+ mutex_unlock(&global_state.mutex); -+ return 0; -+error: -+ if (fsl_mc_restool_new != NULL && -+ fsl_mc_restool_new->tool_mc_io != NULL) { -+ fsl_mc_portal_free(fsl_mc_restool_new->tool_mc_io); -+ fsl_mc_restool_new->tool_mc_io = NULL; -+ } -+ -+ kfree(fsl_mc_restool_new); -+ mutex_unlock(&global_state.mutex); -+ return error; -+} -+ -+static int fsl_mc_restool_dev_release(struct inode *inode, struct file *filep) -+{ -+ struct fsl_mc_restool *fsl_mc_restool_local = filep->private_data; -+ -+ if (WARN_ON(filep->private_data == NULL)) -+ return -EINVAL; -+ -+ mutex_lock(&global_state.mutex); -+ -+ if (WARN_ON(global_state.dynamic_instance_count == 0 && -+ !global_state.static_instance_in_use)) { -+ mutex_unlock(&global_state.mutex); -+ return -EINVAL; -+ } -+ -+ /* Globally clean up opened/untracked handles */ -+ fsl_mc_portal_reset(fsl_mc_restool_local->tool_mc_io); -+ -+ pr_debug("dynamic instance count: %d\n", -+ global_state.dynamic_instance_count); -+ pr_debug("static instance count: %d\n", -+ global_state.static_instance_in_use); -+ -+ /* -+ * must check -+ * whether fsl_mc_restool_local is dynamic or global instance -+ * Otherwise it will free up the reserved portal by accident -+ * or even not free up the dynamic allocated portal -+ * if 2 or more instances running concurrently -+ */ -+ if (fsl_mc_restool_local == &fsl_mc_restool) { -+ pr_debug("this is reserved portal"); -+ pr_debug("reserved portal not in use\n"); -+ global_state.static_instance_in_use = false; -+ } else { -+ pr_debug("this is dynamically allocated portal"); -+ pr_debug("free one dynamically allocated portal\n"); -+ fsl_mc_portal_free(fsl_mc_restool_local->tool_mc_io); -+ kfree(filep->private_data); -+ --global_state.dynamic_instance_count; -+ } -+ -+ filep->private_data = NULL; -+ mutex_unlock(&global_state.mutex); -+ return 0; -+} -+ -+static int restool_get_root_dprc_info(unsigned long arg) -+{ -+ int error = -EINVAL; -+ uint32_t root_dprc_id; -+ struct fsl_mc_device *root_mc_dev; -+ -+ root_mc_dev = to_fsl_mc_device(fsl_mc_bus_type.dev_root); -+ root_dprc_id = root_mc_dev->obj_desc.id; -+ error = copy_to_user((void __user *)arg, &root_dprc_id, -+ sizeof(root_dprc_id)); -+ if (error < 0) { -+ pr_err("copy_to_user() failed with error %d\n", error); -+ goto error; -+ } -+ -+ return 0; -+error: -+ return error; -+} -+ -+static int restool_send_mc_command(unsigned long arg, -+ struct fsl_mc_restool *fsl_mc_restool) -+{ -+ int error = -EINVAL; -+ struct mc_command mc_cmd; -+ -+ error = copy_from_user(&mc_cmd, (void __user *)arg, sizeof(mc_cmd)); -+ if (error < 0) { -+ pr_err("copy_to_user() failed with error %d\n", error); -+ goto error; -+ } -+ -+ /* -+ * Send MC command to the MC: -+ */ -+ error = mc_send_command(fsl_mc_restool->tool_mc_io, &mc_cmd); -+ if (error < 0) -+ goto error; -+ -+ error = copy_to_user((void __user *)arg, &mc_cmd, sizeof(mc_cmd)); -+ if (error < 0) { -+ pr_err("copy_to_user() failed with error %d\n", error); -+ goto error; -+ } -+ -+ return 0; -+error: -+ return error; -+} -+ -+static long -+fsl_mc_restool_dev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) -+{ -+ int error = -EINVAL; -+ -+ if (WARN_ON(fsl_mc_bus_type.dev_root == NULL)) -+ goto out; -+ -+ switch (cmd) { -+ case RESTOOL_GET_ROOT_DPRC_INFO: -+ error = restool_get_root_dprc_info(arg); -+ break; -+ -+ case RESTOOL_SEND_MC_COMMAND: -+ error = restool_send_mc_command(arg, file->private_data); -+ break; -+ default: -+ error = -EINVAL; -+ } -+out: -+ return error; -+} -+ -+static const struct file_operations fsl_mc_restool_dev_fops = { -+ .owner = THIS_MODULE, -+ .open = fsl_mc_restool_dev_open, -+ .release = fsl_mc_restool_dev_release, -+ .unlocked_ioctl = fsl_mc_restool_dev_ioctl, -+ .compat_ioctl = fsl_mc_restool_dev_ioctl, -+}; -+ -+static struct miscdevice fsl_mc_restool_dev = { -+ .minor = MISC_DYNAMIC_MINOR, -+ .name = "mc_restool", -+ .fops = &fsl_mc_restool_dev_fops -+}; -+ -+static int __init fsl_mc_restool_driver_init(void) -+{ -+ struct fsl_mc_device *root_mc_dev; -+ int error = -EINVAL; -+ bool restool_dev_registered = false; -+ -+ mutex_init(&global_state.mutex); -+ -+ if (WARN_ON(fsl_mc_restool.tool_mc_io != NULL)) -+ goto error; -+ -+ if (WARN_ON(global_state.dynamic_instance_count != 0)) -+ goto error; -+ -+ if (WARN_ON(global_state.static_instance_in_use)) -+ goto error; -+ -+ if (fsl_mc_bus_type.dev_root == NULL) { -+ pr_err("fsl-mc bus not found, restool driver registration failed\n"); -+ goto error; -+ } -+ -+ root_mc_dev = to_fsl_mc_device(fsl_mc_bus_type.dev_root); -+ error = fsl_mc_portal_allocate(root_mc_dev, 0, -+ &fsl_mc_restool.tool_mc_io); -+ if (error < 0) { -+ pr_err("Not able to allocate MC portal\n"); -+ goto error; -+ } -+ -+ error = misc_register(&fsl_mc_restool_dev); -+ if (error < 0) { -+ pr_err("misc_register() failed: %d\n", error); -+ goto error; -+ } -+ -+ restool_dev_registered = true; -+ pr_info("%s driver registered\n", fsl_mc_restool_dev.name); -+ return 0; -+error: -+ if (restool_dev_registered) -+ misc_deregister(&fsl_mc_restool_dev); -+ -+ if (fsl_mc_restool.tool_mc_io != NULL) { -+ fsl_mc_portal_free(fsl_mc_restool.tool_mc_io); -+ fsl_mc_restool.tool_mc_io = NULL; -+ } -+ -+ return error; -+} -+ -+module_init(fsl_mc_restool_driver_init); -+ -+static void __exit fsl_mc_restool_driver_exit(void) -+{ -+ if (WARN_ON(fsl_mc_restool.tool_mc_io == NULL)) -+ return; -+ -+ if (WARN_ON(global_state.dynamic_instance_count != 0)) -+ return; -+ -+ if (WARN_ON(global_state.static_instance_in_use)) -+ return; -+ -+ misc_deregister(&fsl_mc_restool_dev); -+ fsl_mc_portal_free(fsl_mc_restool.tool_mc_io); -+ fsl_mc_restool.tool_mc_io = NULL; -+ pr_info("%s driver unregistered\n", fsl_mc_restool_dev.name); -+} -+ -+module_exit(fsl_mc_restool_driver_exit); -+ -+MODULE_AUTHOR("Freescale Semiconductor Inc."); -+MODULE_DESCRIPTION("Freescale's MC restool driver"); -+MODULE_LICENSE("GPL"); -+ -diff --git a/drivers/staging/fsl-mc/bus/mc-sys.c b/drivers/staging/fsl-mc/bus/mc-sys.c -new file mode 100644 -index 0000000..d3b6940 ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/mc-sys.c -@@ -0,0 +1,677 @@ -+/* Copyright 2013-2014 Freescale Semiconductor Inc. -+ * -+ * I/O services to send MC commands to the MC hardware -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include "../include/mc-sys.h" -+#include "../include/mc-cmd.h" -+#include "../include/mc.h" -+#include -+#include -+#include -+#include -+#include -+#include "dpmcp.h" -+ -+/** -+ * Timeout in milliseconds to wait for the completion of an MC command -+ * 5000 ms is barely enough for dpsw/dpdmux creation -+ * TODO: if MC firmware could response faster, we should decrease this value -+ */ -+#define MC_CMD_COMPLETION_TIMEOUT_MS 5000 -+ -+/* -+ * usleep_range() min and max values used to throttle down polling -+ * iterations while waiting for MC command completion -+ */ -+#define MC_CMD_COMPLETION_POLLING_MIN_SLEEP_USECS 10 -+#define MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS 500 -+ -+#define MC_CMD_HDR_READ_CMDID(_hdr) \ -+ ((uint16_t)mc_dec((_hdr), MC_CMD_HDR_CMDID_O, MC_CMD_HDR_CMDID_S)) -+ -+/** -+ * dpmcp_irq0_handler - Regular ISR for DPMCP interrupt 0 -+ * -+ * @irq: IRQ number of the interrupt being handled -+ * @arg: Pointer to device structure -+ */ -+static irqreturn_t dpmcp_irq0_handler(int irq_num, void *arg) -+{ -+ struct device *dev = (struct device *)arg; -+ struct fsl_mc_device *dpmcp_dev = to_fsl_mc_device(dev); -+ struct fsl_mc_io *mc_io = dpmcp_dev->mc_io; -+ -+ dev_dbg(dev, "DPMCP IRQ %d triggered on CPU %u\n", irq_num, -+ smp_processor_id()); -+ -+ if (WARN_ON(dpmcp_dev->irqs[0]->irq_number != (uint32_t)irq_num)) -+ goto out; -+ -+ if (WARN_ON(!mc_io)) -+ goto out; -+ -+ complete(&mc_io->mc_command_done_completion); -+out: -+ return IRQ_HANDLED; -+} -+ -+/* -+ * Disable and clear interrupts for a given DPMCP object -+ */ -+static int disable_dpmcp_irq(struct fsl_mc_device *dpmcp_dev) -+{ -+ int error; -+ -+ /* -+ * Disable generation of the DPMCP interrupt: -+ */ -+ error = dpmcp_set_irq_enable(dpmcp_dev->mc_io, -+ MC_CMD_FLAG_INTR_DIS, -+ dpmcp_dev->mc_handle, -+ DPMCP_IRQ_INDEX, 0); -+ if (error < 0) { -+ dev_err(&dpmcp_dev->dev, -+ "dpmcp_set_irq_enable() failed: %d\n", error); -+ -+ return error; -+ } -+ -+ /* -+ * Disable all DPMCP interrupt causes: -+ */ -+ error = dpmcp_set_irq_mask(dpmcp_dev->mc_io, -+ MC_CMD_FLAG_INTR_DIS, -+ dpmcp_dev->mc_handle, -+ DPMCP_IRQ_INDEX, 0x0); -+ if (error < 0) { -+ dev_err(&dpmcp_dev->dev, -+ "dpmcp_set_irq_mask() failed: %d\n", error); -+ -+ return error; -+ } -+ -+ return 0; -+} -+ -+static void unregister_dpmcp_irq_handler(struct fsl_mc_device *dpmcp_dev) -+{ -+ struct fsl_mc_device_irq *irq = dpmcp_dev->irqs[DPMCP_IRQ_INDEX]; -+ -+ devm_free_irq(&dpmcp_dev->dev, irq->irq_number, &dpmcp_dev->dev); -+} -+ -+static int register_dpmcp_irq_handler(struct fsl_mc_device *dpmcp_dev) -+{ -+ int error; -+ struct fsl_mc_device_irq *irq = dpmcp_dev->irqs[DPMCP_IRQ_INDEX]; -+ -+ error = devm_request_irq(&dpmcp_dev->dev, -+ irq->irq_number, -+ dpmcp_irq0_handler, -+ IRQF_NO_SUSPEND | IRQF_ONESHOT, -+ "FSL MC DPMCP irq0", -+ &dpmcp_dev->dev); -+ if (error < 0) { -+ dev_err(&dpmcp_dev->dev, -+ "devm_request_irq() failed: %d\n", -+ error); -+ return error; -+ } -+ -+ return 0; -+} -+ -+static int enable_dpmcp_irq(struct fsl_mc_device *dpmcp_dev) -+{ -+ int error; -+ -+ /* -+ * Enable MC command completion event to trigger DPMCP interrupt: -+ */ -+ error = dpmcp_set_irq_mask(dpmcp_dev->mc_io, -+ MC_CMD_FLAG_INTR_DIS, -+ dpmcp_dev->mc_handle, -+ DPMCP_IRQ_INDEX, -+ DPMCP_IRQ_EVENT_CMD_DONE); -+ if (error < 0) { -+ dev_err(&dpmcp_dev->dev, -+ "dpmcp_set_irq_mask() failed: %d\n", error); -+ -+ return error; -+ } -+ -+ /* -+ * Enable generation of the interrupt: -+ */ -+ error = dpmcp_set_irq_enable(dpmcp_dev->mc_io, -+ MC_CMD_FLAG_INTR_DIS, -+ dpmcp_dev->mc_handle, -+ DPMCP_IRQ_INDEX, 1); -+ if (error < 0) { -+ dev_err(&dpmcp_dev->dev, -+ "dpmcp_set_irq_enable() failed: %d\n", error); -+ -+ return error; -+ } -+ -+ return 0; -+} -+ -+/* -+ * Setup MC command completion interrupt for the DPMCP device associated with a -+ * given fsl_mc_io object -+ */ -+int fsl_mc_io_setup_dpmcp_irq(struct fsl_mc_io *mc_io) -+{ -+ int error; -+ struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev; -+ -+ if (WARN_ON(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)) -+ return -EINVAL; -+ -+ if (WARN_ON(!dpmcp_dev)) -+ return -EINVAL; -+ -+ if (WARN_ON(!fsl_mc_interrupts_supported())) -+ return -EINVAL; -+ -+ if (WARN_ON(dpmcp_dev->obj_desc.irq_count != 1)) -+ return -EINVAL; -+ -+ if (WARN_ON(dpmcp_dev->mc_io != mc_io)) -+ return -EINVAL; -+ -+ error = fsl_mc_allocate_irqs(dpmcp_dev); -+ if (error < 0) -+ return error; -+ -+ error = disable_dpmcp_irq(dpmcp_dev); -+ if (error < 0) -+ goto error_free_irqs; -+ -+ error = register_dpmcp_irq_handler(dpmcp_dev); -+ if (error < 0) -+ goto error_free_irqs; -+ -+ error = enable_dpmcp_irq(dpmcp_dev); -+ if (error < 0) -+ goto error_unregister_irq_handler; -+ -+ mc_io->mc_command_done_irq_armed = true; -+ return 0; -+ -+error_unregister_irq_handler: -+ unregister_dpmcp_irq_handler(dpmcp_dev); -+ -+error_free_irqs: -+ fsl_mc_free_irqs(dpmcp_dev); -+ -+ return error; -+} -+EXPORT_SYMBOL_GPL(fsl_mc_io_setup_dpmcp_irq); -+ -+/* -+ * Tear down interrupts for the DPMCP device associated with a given fsl_mc_io -+ * object -+ */ -+static void teardown_dpmcp_irq(struct fsl_mc_io *mc_io) -+{ -+ struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev; -+ -+ if (WARN_ON(!dpmcp_dev)) -+ return; -+ if (WARN_ON(!fsl_mc_interrupts_supported())) -+ return; -+ if (WARN_ON(!dpmcp_dev->irqs)) -+ return; -+ -+ mc_io->mc_command_done_irq_armed = false; -+ (void)disable_dpmcp_irq(dpmcp_dev); -+ unregister_dpmcp_irq_handler(dpmcp_dev); -+ fsl_mc_free_irqs(dpmcp_dev); -+} -+ -+/** -+ * Creates an MC I/O object -+ * -+ * @dev: device to be associated with the MC I/O object -+ * @mc_portal_phys_addr: physical address of the MC portal to use -+ * @mc_portal_size: size in bytes of the MC portal -+ * @resource: Pointer to MC bus object allocator resource associated -+ * with this MC I/O object or NULL if none. -+ * @flags: flags for the new MC I/O object -+ * @new_mc_io: Area to return pointer to newly created MC I/O object -+ * -+ * Returns '0' on Success; Error code otherwise. -+ */ -+int __must_check fsl_create_mc_io(struct device *dev, -+ phys_addr_t mc_portal_phys_addr, -+ uint32_t mc_portal_size, -+ struct fsl_mc_device *dpmcp_dev, -+ uint32_t flags, struct fsl_mc_io **new_mc_io) -+{ -+ int error; -+ struct fsl_mc_io *mc_io; -+ void __iomem *mc_portal_virt_addr; -+ struct resource *res; -+ -+ mc_io = devm_kzalloc(dev, sizeof(*mc_io), GFP_KERNEL); -+ if (!mc_io) -+ return -ENOMEM; -+ -+ mc_io->dev = dev; -+ mc_io->flags = flags; -+ mc_io->portal_phys_addr = mc_portal_phys_addr; -+ mc_io->portal_size = mc_portal_size; -+ mc_io->mc_command_done_irq_armed = false; -+ if (flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL) { -+ spin_lock_init(&mc_io->spinlock); -+ } else { -+ mutex_init(&mc_io->mutex); -+ init_completion(&mc_io->mc_command_done_completion); -+ } -+ -+ res = devm_request_mem_region(dev, -+ mc_portal_phys_addr, -+ mc_portal_size, -+ "mc_portal"); -+ if (!res) { -+ dev_err(dev, -+ "devm_request_mem_region failed for MC portal %#llx\n", -+ mc_portal_phys_addr); -+ return -EBUSY; -+ } -+ -+ mc_portal_virt_addr = devm_ioremap_nocache(dev, -+ mc_portal_phys_addr, -+ mc_portal_size); -+ if (!mc_portal_virt_addr) { -+ dev_err(dev, -+ "devm_ioremap_nocache failed for MC portal %#llx\n", -+ mc_portal_phys_addr); -+ return -ENXIO; -+ } -+ -+ mc_io->portal_virt_addr = mc_portal_virt_addr; -+ if (dpmcp_dev) { -+ error = fsl_mc_io_set_dpmcp(mc_io, dpmcp_dev); -+ if (error < 0) -+ goto error_destroy_mc_io; -+ -+ if (!(flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL) && -+ fsl_mc_interrupts_supported()) { -+ error = fsl_mc_io_setup_dpmcp_irq(mc_io); -+ if (error < 0) -+ goto error_destroy_mc_io; -+ } -+ } -+ -+ *new_mc_io = mc_io; -+ return 0; -+ -+error_destroy_mc_io: -+ fsl_destroy_mc_io(mc_io); -+ return error; -+ -+} -+EXPORT_SYMBOL_GPL(fsl_create_mc_io); -+ -+/** -+ * Destroys an MC I/O object -+ * -+ * @mc_io: MC I/O object to destroy -+ */ -+void fsl_destroy_mc_io(struct fsl_mc_io *mc_io) -+{ -+ struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev; -+ -+ if (dpmcp_dev) -+ fsl_mc_io_unset_dpmcp(mc_io); -+ -+ devm_iounmap(mc_io->dev, mc_io->portal_virt_addr); -+ devm_release_mem_region(mc_io->dev, -+ mc_io->portal_phys_addr, -+ mc_io->portal_size); -+ -+ mc_io->portal_virt_addr = NULL; -+ devm_kfree(mc_io->dev, mc_io); -+} -+EXPORT_SYMBOL_GPL(fsl_destroy_mc_io); -+ -+int fsl_mc_io_set_dpmcp(struct fsl_mc_io *mc_io, -+ struct fsl_mc_device *dpmcp_dev) -+{ -+ int error; -+ -+ if (WARN_ON(!dpmcp_dev)) -+ return -EINVAL; -+ -+ if (WARN_ON(mc_io->dpmcp_dev)) -+ return -EINVAL; -+ -+ if (WARN_ON(dpmcp_dev->mc_io)) -+ return -EINVAL; -+ -+ if (!(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)) { -+ error = dpmcp_open(mc_io, -+ 0, -+ dpmcp_dev->obj_desc.id, -+ &dpmcp_dev->mc_handle); -+ if (error < 0) -+ return error; -+ } -+ -+ mc_io->dpmcp_dev = dpmcp_dev; -+ dpmcp_dev->mc_io = mc_io; -+ return 0; -+} -+EXPORT_SYMBOL_GPL(fsl_mc_io_set_dpmcp); -+ -+void fsl_mc_io_unset_dpmcp(struct fsl_mc_io *mc_io) -+{ -+ int error; -+ struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev; -+ -+ if (WARN_ON(!dpmcp_dev)) -+ return; -+ -+ if (WARN_ON(dpmcp_dev->mc_io != mc_io)) -+ return; -+ -+ if (!(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)) { -+ if (dpmcp_dev->irqs) -+ teardown_dpmcp_irq(mc_io); -+ -+ error = dpmcp_close(mc_io, -+ 0, -+ dpmcp_dev->mc_handle); -+ if (error < 0) { -+ dev_err(&dpmcp_dev->dev, "dpmcp_close() failed: %d\n", -+ error); -+ } -+ } -+ -+ mc_io->dpmcp_dev = NULL; -+ dpmcp_dev->mc_io = NULL; -+} -+EXPORT_SYMBOL_GPL(fsl_mc_io_unset_dpmcp); -+ -+static int mc_status_to_error(enum mc_cmd_status status) -+{ -+ static const int mc_status_to_error_map[] = { -+ [MC_CMD_STATUS_OK] = 0, -+ [MC_CMD_STATUS_AUTH_ERR] = -EACCES, -+ [MC_CMD_STATUS_NO_PRIVILEGE] = -EPERM, -+ [MC_CMD_STATUS_DMA_ERR] = -EIO, -+ [MC_CMD_STATUS_CONFIG_ERR] = -ENXIO, -+ [MC_CMD_STATUS_TIMEOUT] = -ETIMEDOUT, -+ [MC_CMD_STATUS_NO_RESOURCE] = -ENAVAIL, -+ [MC_CMD_STATUS_NO_MEMORY] = -ENOMEM, -+ [MC_CMD_STATUS_BUSY] = -EBUSY, -+ [MC_CMD_STATUS_UNSUPPORTED_OP] = -ENOTSUPP, -+ [MC_CMD_STATUS_INVALID_STATE] = -ENODEV, -+ }; -+ -+ if (WARN_ON((u32)status >= ARRAY_SIZE(mc_status_to_error_map))) -+ return -EINVAL; -+ -+ return mc_status_to_error_map[status]; -+} -+ -+static const char *mc_status_to_string(enum mc_cmd_status status) -+{ -+ static const char *const status_strings[] = { -+ [MC_CMD_STATUS_OK] = "Command completed successfully", -+ [MC_CMD_STATUS_READY] = "Command ready to be processed", -+ [MC_CMD_STATUS_AUTH_ERR] = "Authentication error", -+ [MC_CMD_STATUS_NO_PRIVILEGE] = "No privilege", -+ [MC_CMD_STATUS_DMA_ERR] = "DMA or I/O error", -+ [MC_CMD_STATUS_CONFIG_ERR] = "Configuration error", -+ [MC_CMD_STATUS_TIMEOUT] = "Operation timed out", -+ [MC_CMD_STATUS_NO_RESOURCE] = "No resources", -+ [MC_CMD_STATUS_NO_MEMORY] = "No memory available", -+ [MC_CMD_STATUS_BUSY] = "Device is busy", -+ [MC_CMD_STATUS_UNSUPPORTED_OP] = "Unsupported operation", -+ [MC_CMD_STATUS_INVALID_STATE] = "Invalid state" -+ }; -+ -+ if ((unsigned int)status >= ARRAY_SIZE(status_strings)) -+ return "Unknown MC error"; -+ -+ return status_strings[status]; -+} -+ -+/** -+ * mc_write_command - writes a command to a Management Complex (MC) portal -+ * -+ * @portal: pointer to an MC portal -+ * @cmd: pointer to a filled command -+ */ -+static inline void mc_write_command(struct mc_command __iomem *portal, -+ struct mc_command *cmd) -+{ -+ int i; -+ -+ /* copy command parameters into the portal */ -+ for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++) -+ writeq(cmd->params[i], &portal->params[i]); -+ -+ /* submit the command by writing the header */ -+ writeq(cmd->header, &portal->header); -+} -+ -+/** -+ * mc_read_response - reads the response for the last MC command from a -+ * Management Complex (MC) portal -+ * -+ * @portal: pointer to an MC portal -+ * @resp: pointer to command response buffer -+ * -+ * Returns MC_CMD_STATUS_OK on Success; Error code otherwise. -+ */ -+static inline enum mc_cmd_status mc_read_response(struct mc_command __iomem * -+ portal, -+ struct mc_command *resp) -+{ -+ int i; -+ enum mc_cmd_status status; -+ -+ /* Copy command response header from MC portal: */ -+ resp->header = readq(&portal->header); -+ status = MC_CMD_HDR_READ_STATUS(resp->header); -+ if (status != MC_CMD_STATUS_OK) -+ return status; -+ -+ /* Copy command response data from MC portal: */ -+ for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++) -+ resp->params[i] = readq(&portal->params[i]); -+ -+ return status; -+} -+ -+static int mc_completion_wait(struct fsl_mc_io *mc_io, struct mc_command *cmd, -+ enum mc_cmd_status *mc_status) -+{ -+ enum mc_cmd_status status; -+ unsigned long jiffies_left; -+ unsigned long timeout_jiffies = -+ msecs_to_jiffies(MC_CMD_COMPLETION_TIMEOUT_MS); -+ -+ if (WARN_ON(!mc_io->dpmcp_dev)) -+ return -EINVAL; -+ -+ if (WARN_ON(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)) -+ return -EINVAL; -+ -+ for (;;) { -+ status = mc_read_response(mc_io->portal_virt_addr, cmd); -+ if (status != MC_CMD_STATUS_READY) -+ break; -+ -+ jiffies_left = wait_for_completion_timeout( -+ &mc_io->mc_command_done_completion, -+ timeout_jiffies); -+ if (jiffies_left == 0) -+ return -ETIMEDOUT; -+ } -+ -+ *mc_status = status; -+ return 0; -+} -+ -+static int mc_polling_wait_preemptible(struct fsl_mc_io *mc_io, -+ struct mc_command *cmd, -+ enum mc_cmd_status *mc_status) -+{ -+ enum mc_cmd_status status; -+ unsigned long jiffies_until_timeout = -+ jiffies + msecs_to_jiffies(MC_CMD_COMPLETION_TIMEOUT_MS); -+ -+ for (;;) { -+ status = mc_read_response(mc_io->portal_virt_addr, cmd); -+ if (status != MC_CMD_STATUS_READY) -+ break; -+ -+ usleep_range(MC_CMD_COMPLETION_POLLING_MIN_SLEEP_USECS, -+ MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS); -+ -+ if (time_after_eq(jiffies, jiffies_until_timeout)) -+ return -ETIMEDOUT; -+ } -+ -+ *mc_status = status; -+ return 0; -+} -+ -+static int mc_polling_wait_atomic(struct fsl_mc_io *mc_io, -+ struct mc_command *cmd, -+ enum mc_cmd_status *mc_status) -+{ -+ enum mc_cmd_status status; -+ unsigned long timeout_usecs = MC_CMD_COMPLETION_TIMEOUT_MS * 1000; -+ -+ BUILD_BUG_ON((MC_CMD_COMPLETION_TIMEOUT_MS * 1000) % -+ MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS != 0); -+ -+ for (;;) { -+ status = mc_read_response(mc_io->portal_virt_addr, cmd); -+ if (status != MC_CMD_STATUS_READY) -+ break; -+ -+ udelay(MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS); -+ timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS; -+ if (timeout_usecs == 0) -+ return -ETIMEDOUT; -+ } -+ -+ *mc_status = status; -+ return 0; -+} -+ -+/** -+ * Sends a command to the MC device using the given MC I/O object -+ * -+ * @mc_io: MC I/O object to be used -+ * @cmd: command to be sent -+ * -+ * Returns '0' on Success; Error code otherwise. -+ */ -+int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd) -+{ -+ int error; -+ enum mc_cmd_status status; -+ unsigned long irq_flags = 0; -+ bool dpmcp_completion_intr_disabled = -+ (MC_CMD_HDR_READ_FLAGS(cmd->header) & MC_CMD_FLAG_INTR_DIS); -+ -+ if (WARN_ON(in_irq() && -+ (!dpmcp_completion_intr_disabled || -+ !(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)))) -+ return -EINVAL; -+ -+ if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL) -+ spin_lock_irqsave(&mc_io->spinlock, irq_flags); -+ else -+ mutex_lock(&mc_io->mutex); -+ -+ /* -+ * Send command to the MC hardware: -+ */ -+ mc_write_command(mc_io->portal_virt_addr, cmd); -+ -+ /* -+ * Wait for response from the MC hardware: -+ */ -+ if (mc_io->mc_command_done_irq_armed && !dpmcp_completion_intr_disabled) -+ error = mc_completion_wait(mc_io, cmd, &status); -+ else if (!(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)) -+ error = mc_polling_wait_preemptible(mc_io, cmd, &status); -+ else -+ error = mc_polling_wait_atomic(mc_io, cmd, &status); -+ -+ if (error < 0) { -+ if (error == -ETIMEDOUT) { -+ pr_debug("MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n", -+ mc_io->portal_phys_addr, -+ (unsigned int) -+ MC_CMD_HDR_READ_TOKEN(cmd->header), -+ (unsigned int) -+ MC_CMD_HDR_READ_CMDID(cmd->header)); -+ } -+ goto common_exit; -+ -+ } -+ -+ if (status != MC_CMD_STATUS_OK) { -+ pr_debug("MC command failed: portal: %#llx, obj handle: %#x, command: %#x, status: %s (%#x)\n", -+ mc_io->portal_phys_addr, -+ (unsigned int)MC_CMD_HDR_READ_TOKEN(cmd->header), -+ (unsigned int)MC_CMD_HDR_READ_CMDID(cmd->header), -+ mc_status_to_string(status), -+ (unsigned int)status); -+ -+ error = mc_status_to_error(status); -+ goto common_exit; -+ } -+ -+ error = 0; -+ -+common_exit: -+ if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL) -+ spin_unlock_irqrestore(&mc_io->spinlock, irq_flags); -+ else -+ mutex_unlock(&mc_io->mutex); -+ -+ return error; -+} -+EXPORT_SYMBOL(mc_send_command); -diff --git a/drivers/staging/fsl-mc/include/dpbp-cmd.h b/drivers/staging/fsl-mc/include/dpbp-cmd.h -new file mode 100644 -index 0000000..1ec04e4 ---- /dev/null -+++ b/drivers/staging/fsl-mc/include/dpbp-cmd.h -@@ -0,0 +1,62 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPBP_CMD_H -+#define _FSL_DPBP_CMD_H -+ -+/* DPBP Version */ -+#define DPBP_VER_MAJOR 2 -+#define DPBP_VER_MINOR 2 -+ -+/* Command IDs */ -+#define DPBP_CMDID_CLOSE 0x800 -+#define DPBP_CMDID_OPEN 0x804 -+#define DPBP_CMDID_CREATE 0x904 -+#define DPBP_CMDID_DESTROY 0x900 -+ -+#define DPBP_CMDID_ENABLE 0x002 -+#define DPBP_CMDID_DISABLE 0x003 -+#define DPBP_CMDID_GET_ATTR 0x004 -+#define DPBP_CMDID_RESET 0x005 -+#define DPBP_CMDID_IS_ENABLED 0x006 -+ -+#define DPBP_CMDID_SET_IRQ 0x010 -+#define DPBP_CMDID_GET_IRQ 0x011 -+#define DPBP_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPBP_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPBP_CMDID_SET_IRQ_MASK 0x014 -+#define DPBP_CMDID_GET_IRQ_MASK 0x015 -+#define DPBP_CMDID_GET_IRQ_STATUS 0x016 -+#define DPBP_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPBP_CMDID_SET_NOTIFICATIONS 0x01b0 -+#define DPBP_CMDID_GET_NOTIFICATIONS 0x01b1 -+#endif /* _FSL_DPBP_CMD_H */ -diff --git a/drivers/staging/fsl-mc/include/dpbp.h b/drivers/staging/fsl-mc/include/dpbp.h -new file mode 100644 -index 0000000..9856bb8 ---- /dev/null -+++ b/drivers/staging/fsl-mc/include/dpbp.h -@@ -0,0 +1,438 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPBP_H -+#define __FSL_DPBP_H -+ -+/* Data Path Buffer Pool API -+ * Contains initialization APIs and runtime control APIs for DPBP -+ */ -+ -+struct fsl_mc_io; -+ -+/** -+ * dpbp_open() - Open a control session for the specified object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dpbp_id: DPBP unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpbp_create function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpbp_id, -+ uint16_t *token); -+ -+/** -+ * dpbp_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpbp_cfg - Structure representing DPBP configuration -+ * @options: place holder -+ */ -+struct dpbp_cfg { -+ uint32_t options; -+}; -+ -+/** -+ * dpbp_create() - Create the DPBP object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPBP object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpbp_open function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpbp_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpbp_destroy() - Destroy the DPBP object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpbp_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpbp_enable() - Enable the DPBP. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpbp_disable() - Disable the DPBP. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpbp_is_enabled() - Check if the DPBP is enabled. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @en: Returns '1' if object is enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpbp_reset() - Reset the DPBP, returns the object to initial state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpbp_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dpbp_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dpbp_set_irq() - Set IRQ information for the DPBP to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpbp_irq_cfg *irq_cfg); -+ -+/** -+ * dpbp_get_irq() - Get IRQ information from the DPBP. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpbp_irq_cfg *irq_cfg); -+ -+/** -+ * dpbp_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpbp_get_irq_enable() - Get overall interrupt state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpbp_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @irq_index: The interrupt index to configure -+ * @mask: Event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpbp_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpbp_get_irq_status() - Get the current status of any pending interrupts. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dpbp_clear_irq_status() - Clear a pending interrupt's status -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @irq_index: The interrupt index to configure -+ * @status: Bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+ -+/** -+ * struct dpbp_attr - Structure representing DPBP attributes -+ * @id: DPBP object ID -+ * @version: DPBP version -+ * @bpid: Hardware buffer pool ID; should be used as an argument in -+ * acquire/release operations on buffers -+ */ -+struct dpbp_attr { -+ int id; -+ /** -+ * struct version - Structure representing DPBP version -+ * @major: DPBP major version -+ * @minor: DPBP minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+ uint16_t bpid; -+}; -+ -+/** -+ * dpbp_get_attributes - Retrieve DPBP attributes. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @attr: Returned object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpbp_attr *attr); -+ -+/** -+ * DPBP notifications options -+ */ -+ -+/** -+ * BPSCN write will attempt to allocate into a cache (coherent write) -+ */ -+#define DPBP_NOTIF_OPT_COHERENT_WRITE 0x00000001 -+ -+/** -+ * struct dpbp_notification_cfg - Structure representing DPBP notifications -+ * towards software -+ * @depletion_entry: below this threshold the pool is "depleted"; -+ * set it to '0' to disable it -+ * @depletion_exit: greater than or equal to this threshold the pool exit its -+ * "depleted" state -+ * @surplus_entry: above this threshold the pool is in "surplus" state; -+ * set it to '0' to disable it -+ * @surplus_exit: less than or equal to this threshold the pool exit its -+ * "surplus" state -+ * @message_iova: MUST be given if either 'depletion_entry' or 'surplus_entry' -+ * is not '0' (enable); I/O virtual address (must be in DMA-able memory), -+ * must be 16B aligned. -+ * @message_ctx: The context that will be part of the BPSCN message and will -+ * be written to 'message_iova' -+ * @options: Mask of available options; use 'DPBP_NOTIF_OPT_' values -+ */ -+struct dpbp_notification_cfg { -+ uint32_t depletion_entry; -+ uint32_t depletion_exit; -+ uint32_t surplus_entry; -+ uint32_t surplus_exit; -+ uint64_t message_iova; -+ uint64_t message_ctx; -+ uint16_t options; -+}; -+ -+/** -+ * dpbp_set_notifications() - Set notifications towards software -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @cfg: notifications configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_set_notifications(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpbp_notification_cfg *cfg); -+ -+/** -+ * dpbp_get_notifications() - Get the notifications configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @cfg: notifications configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_get_notifications(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpbp_notification_cfg *cfg); -+ -+#endif /* __FSL_DPBP_H */ -diff --git a/drivers/staging/fsl-mc/include/dpcon-cmd.h b/drivers/staging/fsl-mc/include/dpcon-cmd.h -new file mode 100644 -index 0000000..ecb40d0 ---- /dev/null -+++ b/drivers/staging/fsl-mc/include/dpcon-cmd.h -@@ -0,0 +1,162 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPCON_CMD_H -+#define _FSL_DPCON_CMD_H -+ -+/* DPCON Version */ -+#define DPCON_VER_MAJOR 2 -+#define DPCON_VER_MINOR 2 -+ -+/* Command IDs */ -+#define DPCON_CMDID_CLOSE 0x800 -+#define DPCON_CMDID_OPEN 0x808 -+#define DPCON_CMDID_CREATE 0x908 -+#define DPCON_CMDID_DESTROY 0x900 -+ -+#define DPCON_CMDID_ENABLE 0x002 -+#define DPCON_CMDID_DISABLE 0x003 -+#define DPCON_CMDID_GET_ATTR 0x004 -+#define DPCON_CMDID_RESET 0x005 -+#define DPCON_CMDID_IS_ENABLED 0x006 -+ -+#define DPCON_CMDID_SET_IRQ 0x010 -+#define DPCON_CMDID_GET_IRQ 0x011 -+#define DPCON_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPCON_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPCON_CMDID_SET_IRQ_MASK 0x014 -+#define DPCON_CMDID_GET_IRQ_MASK 0x015 -+#define DPCON_CMDID_GET_IRQ_STATUS 0x016 -+#define DPCON_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPCON_CMDID_SET_NOTIFICATION 0x100 -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_OPEN(cmd, dpcon_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpcon_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_CREATE(cmd, cfg) \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->num_priorities) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_RSP_IS_ENABLED(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_RSP_GET_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val);\ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_RSP_GET_IRQ_ENABLE(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_RSP_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id);\ -+ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->qbman_ch_id);\ -+ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, attr->num_priorities);\ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_SET_NOTIFICATION(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dpio_id);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->priority);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx);\ -+} while (0) -+ -+#endif /* _FSL_DPCON_CMD_H */ -diff --git a/drivers/staging/fsl-mc/include/dpcon.h b/drivers/staging/fsl-mc/include/dpcon.h -new file mode 100644 -index 0000000..2555be5 ---- /dev/null -+++ b/drivers/staging/fsl-mc/include/dpcon.h -@@ -0,0 +1,407 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPCON_H -+#define __FSL_DPCON_H -+ -+/* Data Path Concentrator API -+ * Contains initialization APIs and runtime control APIs for DPCON -+ */ -+ -+struct fsl_mc_io; -+ -+/** General DPCON macros */ -+ -+/** -+ * Use it to disable notifications; see dpcon_set_notification() -+ */ -+#define DPCON_INVALID_DPIO_ID (int)(-1) -+ -+/** -+ * dpcon_open() - Open a control session for the specified object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dpcon_id: DPCON unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpcon_create() function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpcon_id, -+ uint16_t *token); -+ -+/** -+ * dpcon_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpcon_cfg - Structure representing DPCON configuration -+ * @num_priorities: Number of priorities for the DPCON channel (1-8) -+ */ -+struct dpcon_cfg { -+ uint8_t num_priorities; -+}; -+ -+/** -+ * dpcon_create() - Create the DPCON object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPCON object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpcon_open() function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpcon_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpcon_destroy() - Destroy the DPCON object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpcon_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpcon_enable() - Enable the DPCON -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * -+ * Return: '0' on Success; Error code otherwise -+ */ -+int dpcon_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpcon_disable() - Disable the DPCON -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * -+ * Return: '0' on Success; Error code otherwise -+ */ -+int dpcon_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpcon_is_enabled() - Check if the DPCON is enabled. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @en: Returns '1' if object is enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpcon_reset() - Reset the DPCON, returns the object to initial state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpcon_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dpcon_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dpcon_set_irq() - Set IRQ information for the DPCON to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpcon_irq_cfg *irq_cfg); -+ -+/** -+ * dpcon_get_irq() - Get IRQ information from the DPCON. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpcon_irq_cfg *irq_cfg); -+ -+/** -+ * dpcon_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpcon_get_irq_enable() - Get overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpcon_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @irq_index: The interrupt index to configure -+ * @mask: Event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpcon_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpcon_get_irq_status() - Get the current status of any pending interrupts. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @irq_index: The interrupt index to configure -+ * @status: interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dpcon_clear_irq_status() - Clear a pending interrupt's status -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @irq_index: The interrupt index to configure -+ * @status: bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+ -+/** -+ * struct dpcon_attr - Structure representing DPCON attributes -+ * @id: DPCON object ID -+ * @version: DPCON version -+ * @qbman_ch_id: Channel ID to be used by dequeue operation -+ * @num_priorities: Number of priorities for the DPCON channel (1-8) -+ */ -+struct dpcon_attr { -+ int id; -+ /** -+ * struct version - DPCON version -+ * @major: DPCON major version -+ * @minor: DPCON minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+ uint16_t qbman_ch_id; -+ uint8_t num_priorities; -+}; -+ -+/** -+ * dpcon_get_attributes() - Retrieve DPCON attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @attr: Object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpcon_attr *attr); -+ -+/** -+ * struct dpcon_notification_cfg - Structure representing notification parameters -+ * @dpio_id: DPIO object ID; must be configured with a notification channel; -+ * to disable notifications set it to 'DPCON_INVALID_DPIO_ID'; -+ * @priority: Priority selection within the DPIO channel; valid values -+ * are 0-7, depending on the number of priorities in that channel -+ * @user_ctx: User context value provided with each CDAN message -+ */ -+struct dpcon_notification_cfg { -+ int dpio_id; -+ uint8_t priority; -+ uint64_t user_ctx; -+}; -+ -+/** -+ * dpcon_set_notification() - Set DPCON notification destination -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @cfg: Notification parameters -+ * -+ * Return: '0' on Success; Error code otherwise -+ */ -+int dpcon_set_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpcon_notification_cfg *cfg); -+ -+#endif /* __FSL_DPCON_H */ -diff --git a/drivers/staging/fsl-mc/include/dpmac-cmd.h b/drivers/staging/fsl-mc/include/dpmac-cmd.h -new file mode 100644 -index 0000000..c123aab ---- /dev/null -+++ b/drivers/staging/fsl-mc/include/dpmac-cmd.h -@@ -0,0 +1,192 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPMAC_CMD_H -+#define _FSL_DPMAC_CMD_H -+ -+/* DPMAC Version */ -+#define DPMAC_VER_MAJOR 3 -+#define DPMAC_VER_MINOR 0 -+ -+/* Command IDs */ -+#define DPMAC_CMDID_CLOSE 0x800 -+#define DPMAC_CMDID_OPEN 0x80c -+#define DPMAC_CMDID_CREATE 0x90c -+#define DPMAC_CMDID_DESTROY 0x900 -+ -+#define DPMAC_CMDID_GET_ATTR 0x004 -+#define DPMAC_CMDID_RESET 0x005 -+ -+#define DPMAC_CMDID_SET_IRQ 0x010 -+#define DPMAC_CMDID_GET_IRQ 0x011 -+#define DPMAC_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPMAC_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPMAC_CMDID_SET_IRQ_MASK 0x014 -+#define DPMAC_CMDID_GET_IRQ_MASK 0x015 -+#define DPMAC_CMDID_GET_IRQ_STATUS 0x016 -+#define DPMAC_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPMAC_CMDID_MDIO_READ 0x0c0 -+#define DPMAC_CMDID_MDIO_WRITE 0x0c1 -+#define DPMAC_CMDID_GET_LINK_CFG 0x0c2 -+#define DPMAC_CMDID_SET_LINK_STATE 0x0c3 -+#define DPMAC_CMDID_GET_COUNTER 0x0c4 -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_CREATE(cmd, cfg) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->mac_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_OPEN(cmd, dpmac_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpmac_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_SET_IRQ(cmd, irq_index, irq_addr, irq_val, user_irq_id) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_val);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_addr); \ -+ MC_CMD_OP(cmd, 2, 0, 32, int, user_irq_id); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_IRQ(cmd, type, irq_addr, irq_val, user_irq_id) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_val); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_addr); \ -+ MC_RSP_OP(cmd, 2, 0, 32, int, user_irq_id); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_IRQ_ENABLE(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_GET_IRQ_STATUS(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_ATTRIBUTES(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->phy_id);\ -+ MC_RSP_OP(cmd, 0, 32, 32, int, attr->id);\ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\ -+ MC_RSP_OP(cmd, 1, 32, 8, enum dpmac_link_type, attr->link_type);\ -+ MC_RSP_OP(cmd, 1, 40, 8, enum dpmac_eth_if, attr->eth_if);\ -+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->max_rate);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_MDIO_READ(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->phy_addr); \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->reg); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_MDIO_READ(cmd, data) \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, data) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_MDIO_WRITE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->phy_addr); \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->reg); \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->data); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_LINK_CFG(cmd, cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 64, uint64_t, cfg->options); \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->rate); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_SET_LINK_STATE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 64, uint64_t, cfg->options); \ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->rate); \ -+ MC_CMD_OP(cmd, 2, 0, 1, int, cfg->up); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_GET_COUNTER(cmd, type) \ -+ MC_CMD_OP(cmd, 0, 0, 8, enum dpmac_counter, type) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_COUNTER(cmd, counter) \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, counter) -+ -+#endif /* _FSL_DPMAC_CMD_H */ -diff --git a/drivers/staging/fsl-mc/include/dpmac.h b/drivers/staging/fsl-mc/include/dpmac.h -new file mode 100644 -index 0000000..88091b5 ---- /dev/null -+++ b/drivers/staging/fsl-mc/include/dpmac.h -@@ -0,0 +1,528 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPMAC_H -+#define __FSL_DPMAC_H -+ -+/* Data Path MAC API -+ * Contains initialization APIs and runtime control APIs for DPMAC -+ */ -+ -+struct fsl_mc_io; -+ -+/** -+ * dpmac_open() - Open a control session for the specified object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @dpmac_id: DPMAC unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpmac_create function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_open(struct fsl_mc_io *mc_io, int dpmac_id, uint16_t *token); -+ -+/** -+ * dpmac_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @token: Token of DPMAC object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_close(struct fsl_mc_io *mc_io, uint16_t token); -+ -+/** -+ * enum dpmac_link_type - DPMAC link type -+ * @DPMAC_LINK_TYPE_NONE: No link -+ * @DPMAC_LINK_TYPE_FIXED: Link is fixed type -+ * @DPMAC_LINK_TYPE_PHY: Link by PHY ID -+ * @DPMAC_LINK_TYPE_BACKPLANE: Backplane link type -+ */ -+enum dpmac_link_type { -+ DPMAC_LINK_TYPE_NONE, -+ DPMAC_LINK_TYPE_FIXED, -+ DPMAC_LINK_TYPE_PHY, -+ DPMAC_LINK_TYPE_BACKPLANE -+}; -+ -+/** -+ * enum dpmac_eth_if - DPMAC Ethrnet interface -+ * @DPMAC_ETH_IF_MII: MII interface -+ * @DPMAC_ETH_IF_RMII: RMII interface -+ * @DPMAC_ETH_IF_SMII: SMII interface -+ * @DPMAC_ETH_IF_GMII: GMII interface -+ * @DPMAC_ETH_IF_RGMII: RGMII interface -+ * @DPMAC_ETH_IF_SGMII: SGMII interface -+ * @DPMAC_ETH_IF_XGMII: XGMII interface -+ * @DPMAC_ETH_IF_QSGMII: QSGMII interface -+ * @DPMAC_ETH_IF_XAUI: XAUI interface -+ * @DPMAC_ETH_IF_XFI: XFI interface -+ */ -+enum dpmac_eth_if { -+ DPMAC_ETH_IF_MII, -+ DPMAC_ETH_IF_RMII, -+ DPMAC_ETH_IF_SMII, -+ DPMAC_ETH_IF_GMII, -+ DPMAC_ETH_IF_RGMII, -+ DPMAC_ETH_IF_SGMII, -+ DPMAC_ETH_IF_XGMII, -+ DPMAC_ETH_IF_QSGMII, -+ DPMAC_ETH_IF_XAUI, -+ DPMAC_ETH_IF_XFI -+}; -+ -+/** -+ * struct dpmac_cfg() - Structure representing DPMAC configuration -+ * @mac_id: Represents the Hardware MAC ID; in case of multiple WRIOP, -+ * the MAC IDs are continuous. -+ * For example: 2 WRIOPs, 16 MACs in each: -+ * MAC IDs for the 1st WRIOP: 1-16, -+ * MAC IDs for the 2nd WRIOP: 17-32. -+ */ -+struct dpmac_cfg { -+ int mac_id; -+}; -+ -+/** -+ * dpmac_create() - Create the DPMAC object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPMAC object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpmac_open function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_create(struct fsl_mc_io *mc_io, -+ const struct dpmac_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpmac_destroy() - Destroy the DPMAC object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @token: Token of DPMAC object -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpmac_destroy(struct fsl_mc_io *mc_io, uint16_t token); -+ -+/* DPMAC IRQ Index and Events */ -+ -+/* IRQ index */ -+#define DPMAC_IRQ_INDEX 0 -+/* IRQ event - indicates a change in link state */ -+#define DPMAC_IRQ_EVENT_LINK_CFG_REQ 0x00000001 -+/* irq event - Indicates that the link state changed */ -+#define DPMAC_IRQ_EVENT_LINK_CHANGED 0x00000002 -+ -+/** -+ * dpmac_set_irq() - Set IRQ information for the DPMAC to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @token: Token of DPMAC object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_addr: Address that must be written to -+ * signal a message-based interrupt -+ * @irq_val: Value to write into irq_addr address -+ * @user_irq_id: A user defined number associated with this IRQ -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_set_irq(struct fsl_mc_io *mc_io, -+ uint16_t token, -+ uint8_t irq_index, -+ uint64_t irq_addr, -+ uint32_t irq_val, -+ int user_irq_id); -+ -+/** -+ * dpmac_get_irq() - Get IRQ information from the DPMAC. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_addr: Returned address that must be written to -+ * signal the message-based interrupt -+ * @irq_val: Value to write into irq_addr address -+ * @user_irq_id: A user defined number associated with this IRQ -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_get_irq(struct fsl_mc_io *mc_io, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ uint64_t *irq_addr, -+ uint32_t *irq_val, -+ int *user_irq_id); -+ -+/** -+ * dpmac_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpmac_get_irq_enable() - Get overall interrupt state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpmac_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @mask: Event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpmac_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpmac_get_irq_status() - Get the current status of any pending interrupts. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_get_irq_status(struct fsl_mc_io *mc_io, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dpmac_clear_irq_status() - Clear a pending interrupt's status -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @status: Bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+ -+/** -+ * struct dpmac_attr - Structure representing DPMAC attributes -+ * @id: DPMAC object ID -+ * @phy_id: PHY ID -+ * @link_type: link type -+ * @eth_if: Ethernet interface -+ * @max_rate: Maximum supported rate - in Mbps -+ * @version: DPMAC version -+ */ -+struct dpmac_attr { -+ int id; -+ int phy_id; -+ enum dpmac_link_type link_type; -+ enum dpmac_eth_if eth_if; -+ uint32_t max_rate; -+ /** -+ * struct version - Structure representing DPMAC version -+ * @major: DPMAC major version -+ * @minor: DPMAC minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+}; -+ -+/** -+ * dpmac_get_attributes - Retrieve DPMAC attributes. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @token: Token of DPMAC object -+ * @attr: Returned object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_get_attributes(struct fsl_mc_io *mc_io, -+ uint16_t token, -+ struct dpmac_attr *attr); -+ -+/** -+ * struct dpmac_mdio_cfg - DPMAC MDIO read/write parameters -+ * @phy_addr: MDIO device address -+ * @reg: Address of the register within the Clause 45 PHY device from which data -+ * is to be read -+ * @data: Data read/write from/to MDIO -+ */ -+struct dpmac_mdio_cfg { -+ uint8_t phy_addr; -+ uint8_t reg; -+ uint16_t data; -+}; -+ -+/** -+ * dpmac_mdio_read() - Perform MDIO read transaction -+ * @mc_io: Pointer to opaque I/O object -+ * @token: Token of DPMAC object -+ * @cfg: Structure with MDIO transaction parameters -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_mdio_read(struct fsl_mc_io *mc_io, uint16_t token, -+ struct dpmac_mdio_cfg *cfg); -+ -+ -+/** -+ * dpmac_mdio_write() - Perform MDIO write transaction -+ * @mc_io: Pointer to opaque I/O object -+ * @token: Token of DPMAC object -+ * @cfg: Structure with MDIO transaction parameters -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_mdio_write(struct fsl_mc_io *mc_io, uint16_t token, -+ struct dpmac_mdio_cfg *cfg); -+ -+/* DPMAC link configuration/state options */ -+ -+/* Enable auto-negotiation */ -+#define DPMAC_LINK_OPT_AUTONEG 0x0000000000000001ULL -+/* Enable half-duplex mode */ -+#define DPMAC_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL -+/* Enable pause frames */ -+#define DPMAC_LINK_OPT_PAUSE 0x0000000000000004ULL -+/* Enable a-symmetric pause frames */ -+#define DPMAC_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL -+ -+/** -+ * struct dpmac_link_cfg - Structure representing DPMAC link configuration -+ * @rate: Link's rate - in Mbps -+ * @options: Enable/Disable DPMAC link cfg features (bitmap) -+ */ -+struct dpmac_link_cfg { -+ uint32_t rate; -+ uint64_t options; -+}; -+ -+/** -+ * dpmac_get_link_cfg() - Get Ethernet link configuration -+ * @mc_io: Pointer to opaque I/O object -+ * @token: Token of DPMAC object -+ * @cfg: Returned structure with the link configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_get_link_cfg(struct fsl_mc_io *mc_io, uint16_t token, -+ struct dpmac_link_cfg *cfg); -+ -+/** -+ * struct dpmac_link_state - DPMAC link configuration request -+ * @rate: Rate in Mbps -+ * @options: Enable/Disable DPMAC link cfg features (bitmap) -+ * @up: Link state -+ */ -+struct dpmac_link_state { -+ uint32_t rate; -+ uint64_t options; -+ int up; -+}; -+ -+/** -+ * dpmac_set_link_state() - Set the Ethernet link status -+ * @mc_io: Pointer to opaque I/O object -+ * @token: Token of DPMAC object -+ * @link_state: Link state configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_set_link_state(struct fsl_mc_io *mc_io, uint16_t token, -+ struct dpmac_link_state *link_state); -+ -+/** -+ * enum dpni_counter - DPNI counter types -+ * @DPMAC_CNT_ING_FRAME_64: counts 64-octet frame, good or bad. -+ * @DPMAC_CNT_ING_FRAME_127: counts 65- to 127-octet frame, good or bad. -+ * @DPMAC_CNT_ING_FRAME_255: counts 128- to 255-octet frame, good or bad. -+ * @DPMAC_CNT_ING_FRAME_511: counts 256- to 511-octet frame, good or bad. -+ * @DPMAC_CNT_ING_FRAME_1023: counts 512- to 1023-octet frame, good or bad. -+ * @DPMAC_CNT_ING_FRAME_1518: counts 1024- to 1518-octet frame, good or bad. -+ * @DPMAC_CNT_ING_FRAME_1519_MAX: counts 1519-octet frame and larger -+ * (up to max frame length specified), -+ * good or bad. -+ * @DPMAC_CNT_ING_FRAG: counts packet which is shorter than 64 octets received -+ * with a wrong CRC -+ * @DPMAC_CNT_ING_JABBER: counts packet longer than the maximum frame length -+ * specified, with a bad frame check sequence. -+ * @DPMAC_CNT_ING_FRAME_DISCARD: counts dropped packet due to internal errors. -+ * Occurs when a receive FIFO overflows. -+ * Includes also packets truncated as a result of -+ * the receive FIFO overflow. -+ * @DPMAC_CNT_ING_ALIGN_ERR: counts frame with an alignment error -+ * (optional used for wrong SFD) -+ * @DPMAC_CNT_EGR_UNDERSIZED: counts packet transmitted that was less than 64 -+ * octets long with a good CRC. -+ * @DPMAC_CNT_ING_OVERSIZED: counts packet longer than the maximum frame length -+ * specified, with a good frame check sequence. -+ * @DPMAC_CNT_ING_VALID_PAUSE_FRAME: counts valid pause frame (regular and PFC). -+ * @DPMAC_CNT_EGR_VALID_PAUSE_FRAME: counts valid pause frame transmitted -+ * (regular and PFC). -+ * @DPMAC_CNT_ING_BYTE: counts octet received except preamble for all valid -+ frames and valid pause frames. -+ * @DPMAC_CNT_ING_MCAST_FRAME: counts received multicast frame -+ * @DPMAC_CNT_ING_BCAST_FRAME: counts received broadcast frame -+ * @DPMAC_CNT_ING_ALL_FRAME: counts each good or bad packet received. -+ * @DPMAC_CNT_ING_UCAST_FRAME: counts received unicast frame -+ * @DPMAC_CNT_ING_ERR_FRAME: counts frame received with an error -+ * (except for undersized/fragment frame) -+ * @DPMAC_CNT_EGR_BYTE: counts octet transmitted except preamble for all valid -+ * frames and valid pause frames transmitted. -+ * @DPMAC_CNT_EGR_MCAST_FRAME: counts transmitted multicast frame -+ * @DPMAC_CNT_EGR_BCAST_FRAME: counts transmitted broadcast frame -+ * @DPMAC_CNT_EGR_UCAST_FRAME: counts transmitted unicast frame -+ * @DPMAC_CNT_EGR_ERR_FRAME: counts frame transmitted with an error -+ * @DPMAC_CNT_ING_GOOD_FRAME: counts frame received without error, including -+ * pause frames. -+ */ -+enum dpmac_counter { -+ DPMAC_CNT_ING_FRAME_64, -+ DPMAC_CNT_ING_FRAME_127, -+ DPMAC_CNT_ING_FRAME_255, -+ DPMAC_CNT_ING_FRAME_511, -+ DPMAC_CNT_ING_FRAME_1023, -+ DPMAC_CNT_ING_FRAME_1518, -+ DPMAC_CNT_ING_FRAME_1519_MAX, -+ DPMAC_CNT_ING_FRAG, -+ DPMAC_CNT_ING_JABBER, -+ DPMAC_CNT_ING_FRAME_DISCARD, -+ DPMAC_CNT_ING_ALIGN_ERR, -+ DPMAC_CNT_EGR_UNDERSIZED, -+ DPMAC_CNT_ING_OVERSIZED, -+ DPMAC_CNT_ING_VALID_PAUSE_FRAME, -+ DPMAC_CNT_EGR_VALID_PAUSE_FRAME, -+ DPMAC_CNT_ING_BYTE, -+ DPMAC_CNT_ING_MCAST_FRAME, -+ DPMAC_CNT_ING_BCAST_FRAME, -+ DPMAC_CNT_ING_ALL_FRAME, -+ DPMAC_CNT_ING_UCAST_FRAME, -+ DPMAC_CNT_ING_ERR_FRAME, -+ DPMAC_CNT_EGR_BYTE, -+ DPMAC_CNT_EGR_MCAST_FRAME, -+ DPMAC_CNT_EGR_BCAST_FRAME, -+ DPMAC_CNT_EGR_UCAST_FRAME, -+ DPMAC_CNT_EGR_ERR_FRAME, -+ DPMAC_CNT_ING_GOOD_FRAME -+}; -+ -+/** -+ * dpmac_get_counter() - Read a specific DPMAC counter -+ * @mc_io: Pointer to opaque I/O object -+ * @token: Token of DPMAC object -+ * @type: The requested counter -+ * @counter: Returned counter value -+ * -+ * Return: The requested counter; '0' otherwise. -+ */ -+int dpmac_get_counter(struct fsl_mc_io *mc_io, uint16_t token, -+ enum dpmac_counter type, -+ uint64_t *counter); -+ -+#endif /* __FSL_DPMAC_H */ -diff --git a/drivers/staging/fsl-mc/include/dpmng.h b/drivers/staging/fsl-mc/include/dpmng.h -new file mode 100644 -index 0000000..d1c4588 ---- /dev/null -+++ b/drivers/staging/fsl-mc/include/dpmng.h -@@ -0,0 +1,80 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPMNG_H -+#define __FSL_DPMNG_H -+ -+/* Management Complex General API -+ * Contains general API for the Management Complex firmware -+ */ -+ -+struct fsl_mc_io; -+ -+/** -+ * struct mc_version -+ * @major: Major version number: incremented on API compatibility changes -+ * @minor: Minor version number: incremented on API additions (that are -+ * backward compatible); reset when major version is incremented -+ * @revision: Internal revision number: incremented on implementation changes -+ * and/or bug fixes that have no impact on API -+ */ -+struct mc_version { -+ uint32_t major; -+ uint32_t minor; -+ uint32_t revision; -+}; -+ -+/** -+ * mc_get_version() - Retrieves the Management Complex firmware -+ * version information -+ * @mc_io: Pointer to opaque I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @mc_ver_info: Returned version information structure -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int mc_get_version(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ struct mc_version *mc_ver_info); -+ -+/** -+ * dpmng_get_container_id() - Get container ID associated with a given portal. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @container_id: Requested container ID -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmng_get_container_id(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int *container_id); -+ -+#endif /* __FSL_DPMNG_H */ -diff --git a/drivers/staging/fsl-mc/include/dprc.h b/drivers/staging/fsl-mc/include/dprc.h -new file mode 100644 -index 0000000..810ded0 ---- /dev/null -+++ b/drivers/staging/fsl-mc/include/dprc.h -@@ -0,0 +1,990 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPRC_H -+#define _FSL_DPRC_H -+ -+#include "mc-cmd.h" -+ -+/* Data Path Resource Container API -+ * Contains DPRC API for managing and querying DPAA resources -+ */ -+ -+struct fsl_mc_io; -+ -+/** -+ * Set this value as the icid value in dprc_cfg structure when creating a -+ * container, in case the ICID is not selected by the user and should be -+ * allocated by the DPRC from the pool of ICIDs. -+ */ -+#define DPRC_GET_ICID_FROM_POOL (uint16_t)(~(0)) -+ -+/** -+ * Set this value as the portal_id value in dprc_cfg structure when creating a -+ * container, in case the portal ID is not specifically selected by the -+ * user and should be allocated by the DPRC from the pool of portal ids. -+ */ -+#define DPRC_GET_PORTAL_ID_FROM_POOL (int)(~(0)) -+ -+/** -+ * dprc_open() - Open DPRC object for use -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @container_id: Container ID to open -+ * @token: Returned token of DPRC object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ * -+ * @warning Required before any operation on the object. -+ */ -+int dprc_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int container_id, -+ uint16_t *token); -+ -+/** -+ * dprc_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * Container general options -+ * -+ * These options may be selected at container creation by the container creator -+ * and can be retrieved using dprc_get_attributes() -+ */ -+ -+/* Spawn Policy Option allowed - Indicates that the new container is allowed -+ * to spawn and have its own child containers. -+ */ -+#define DPRC_CFG_OPT_SPAWN_ALLOWED 0x00000001 -+ -+/* General Container allocation policy - Indicates that the new container is -+ * allowed to allocate requested resources from its parent container; if not -+ * set, the container is only allowed to use resources in its own pools; Note -+ * that this is a container's global policy, but the parent container may -+ * override it and set specific quota per resource type. -+ */ -+#define DPRC_CFG_OPT_ALLOC_ALLOWED 0x00000002 -+ -+/* Object initialization allowed - software context associated with this -+ * container is allowed to invoke object initialization operations. -+ */ -+#define DPRC_CFG_OPT_OBJ_CREATE_ALLOWED 0x00000004 -+ -+/* Topology change allowed - software context associated with this -+ * container is allowed to invoke topology operations, such as attach/detach -+ * of network objects. -+ */ -+#define DPRC_CFG_OPT_TOPOLOGY_CHANGES_ALLOWED 0x00000008 -+ -+/* AIOP - Indicates that container belongs to AIOP. */ -+#define DPRC_CFG_OPT_AIOP 0x00000020 -+ -+/* IRQ Config - Indicates that the container allowed to configure its IRQs. */ -+#define DPRC_CFG_OPT_IRQ_CFG_ALLOWED 0x00000040 -+ -+/** -+ * struct dprc_cfg - Container configuration options -+ * @icid: Container's ICID; if set to 'DPRC_GET_ICID_FROM_POOL', a free -+ * ICID value is allocated by the DPRC -+ * @portal_id: Portal ID; if set to 'DPRC_GET_PORTAL_ID_FROM_POOL', a free -+ * portal ID is allocated by the DPRC -+ * @options: Combination of 'DPRC_CFG_OPT_' options -+ * @label: Object's label -+ */ -+struct dprc_cfg { -+ uint16_t icid; -+ int portal_id; -+ uint64_t options; -+ char label[16]; -+}; -+ -+/** -+ * dprc_create_container() - Create child container -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @cfg: Child container configuration -+ * @child_container_id: Returned child container ID -+ * @child_portal_offset: Returned child portal offset from MC portal base -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_create_container(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dprc_cfg *cfg, -+ int *child_container_id, -+ uint64_t *child_portal_offset); -+ -+/** -+ * dprc_destroy_container() - Destroy child container. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @child_container_id: ID of the container to destroy -+ * -+ * This function terminates the child container, so following this call the -+ * child container ID becomes invalid. -+ * -+ * Notes: -+ * - All resources and objects of the destroyed container are returned to the -+ * parent container or destroyed if were created be the destroyed container. -+ * - This function destroy all the child containers of the specified -+ * container prior to destroying the container itself. -+ * -+ * warning: Only the parent container is allowed to destroy a child policy -+ * Container 0 can't be destroyed -+ * -+ * Return: '0' on Success; Error code otherwise. -+ * -+ */ -+int dprc_destroy_container(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int child_container_id); -+ -+/** -+ * dprc_reset_container - Reset child container. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @child_container_id: ID of the container to reset -+ * -+ * In case a software context crashes or becomes non-responsive, the parent -+ * may wish to reset its resources container before the software context is -+ * restarted. -+ * -+ * This routine informs all objects assigned to the child container that the -+ * container is being reset, so they may perform any cleanup operations that are -+ * needed. All objects handles that were owned by the child container shall be -+ * closed. -+ * -+ * Note that such request may be submitted even if the child software context -+ * has not crashed, but the resulting object cleanup operations will not be -+ * aware of that. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_reset_container(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int child_container_id); -+ -+/* IRQ */ -+ -+/* IRQ index */ -+#define DPRC_IRQ_INDEX 0 -+ -+/* Number of dprc's IRQs */ -+#define DPRC_NUM_OF_IRQS 1 -+ -+/* DPRC IRQ events */ -+ -+/* IRQ event - Indicates that a new object added to the container */ -+#define DPRC_IRQ_EVENT_OBJ_ADDED 0x00000001 -+ -+/* IRQ event - Indicates that an object was removed from the container */ -+#define DPRC_IRQ_EVENT_OBJ_REMOVED 0x00000002 -+ -+/* IRQ event - Indicates that resources added to the container */ -+#define DPRC_IRQ_EVENT_RES_ADDED 0x00000004 -+ -+/* IRQ event - Indicates that resources removed from the container */ -+#define DPRC_IRQ_EVENT_RES_REMOVED 0x00000008 -+ -+/* IRQ event - Indicates that one of the descendant containers that opened by -+ * this container is destroyed -+ */ -+#define DPRC_IRQ_EVENT_CONTAINER_DESTROYED 0x00000010 -+ -+/* IRQ event - Indicates that on one of the container's opened object is -+ * destroyed -+ */ -+#define DPRC_IRQ_EVENT_OBJ_DESTROYED 0x00000020 -+ -+/* Irq event - Indicates that object is created at the container */ -+#define DPRC_IRQ_EVENT_OBJ_CREATED 0x00000040 -+ -+/** -+ * struct dprc_irq_cfg - IRQ configuration -+ * @paddr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dprc_irq_cfg { -+ uint64_t paddr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dprc_set_irq() - Set IRQ information for the DPRC to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dprc_irq_cfg *irq_cfg); -+ -+/** -+ * dprc_get_irq() - Get IRQ information from the DPRC. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dprc_irq_cfg *irq_cfg); -+ -+/** -+ * dprc_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dprc_get_irq_enable() - Get overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dprc_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @irq_index: The interrupt index to configure -+ * @mask: event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting irq -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dprc_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dprc_get_irq_status() - Get the current status of any pending interrupts. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dprc_clear_irq_status() - Clear a pending interrupt's status -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @irq_index: The interrupt index to configure -+ * @status: bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+ -+/** -+ * struct dprc_attributes - Container attributes -+ * @container_id: Container's ID -+ * @icid: Container's ICID -+ * @portal_id: Container's portal ID -+ * @options: Container's options as set at container's creation -+ * @version: DPRC version -+ */ -+struct dprc_attributes { -+ int container_id; -+ uint16_t icid; -+ int portal_id; -+ uint64_t options; -+ /** -+ * struct version - DPRC version -+ * @major: DPRC major version -+ * @minor: DPRC minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+}; -+ -+/** -+ * dprc_get_attributes() - Obtains container attributes -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @attributes: Returned container attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dprc_attributes *attributes); -+ -+/** -+ * dprc_set_res_quota() - Set allocation policy for a specific resource/object -+ * type in a child container -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @child_container_id: ID of the child container -+ * @type: Resource/object type -+ * @quota: Sets the maximum number of resources of the selected type -+ * that the child container is allowed to allocate from its parent; -+ * when quota is set to -1, the policy is the same as container's -+ * general policy. -+ * -+ * Allocation policy determines whether or not a container may allocate -+ * resources from its parent. Each container has a 'global' allocation policy -+ * that is set when the container is created. -+ * -+ * This function sets allocation policy for a specific resource type. -+ * The default policy for all resource types matches the container's 'global' -+ * allocation policy. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ * -+ * @warning Only the parent container is allowed to change a child policy. -+ */ -+int dprc_set_res_quota(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int child_container_id, -+ char *type, -+ uint16_t quota); -+ -+/** -+ * dprc_get_res_quota() - Gets the allocation policy of a specific -+ * resource/object type in a child container -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @child_container_id: ID of the child container -+ * @type: resource/object type -+ * @quota: Returnes the maximum number of resources of the selected type -+ * that the child container is allowed to allocate from the parent; -+ * when quota is set to -1, the policy is the same as container's -+ * general policy. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_res_quota(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int child_container_id, -+ char *type, -+ uint16_t *quota); -+ -+/* Resource request options */ -+ -+/* Explicit resource ID request - The requested objects/resources -+ * are explicit and sequential (in case of resources). -+ * The base ID is given at res_req at base_align field -+ */ -+#define DPRC_RES_REQ_OPT_EXPLICIT 0x00000001 -+ -+/* Aligned resources request - Relevant only for resources -+ * request (and not objects). Indicates that resources base ID should be -+ * sequential and aligned to the value given at dprc_res_req base_align field -+ */ -+#define DPRC_RES_REQ_OPT_ALIGNED 0x00000002 -+ -+/* Plugged Flag - Relevant only for object assignment request. -+ * Indicates that after all objects assigned. An interrupt will be invoked at -+ * the relevant GPP. The assigned object will be marked as plugged. -+ * plugged objects can't be assigned from their container -+ */ -+#define DPRC_RES_REQ_OPT_PLUGGED 0x00000004 -+ -+/** -+ * struct dprc_res_req - Resource request descriptor, to be used in assignment -+ * or un-assignment of resources and objects. -+ * @type: Resource/object type: Represent as a NULL terminated string. -+ * This string may received by using dprc_get_pool() to get resource -+ * type and dprc_get_obj() to get object type; -+ * Note: it is not possible to assign/un-assign DPRC objects -+ * @num: Number of resources -+ * @options: Request options: combination of DPRC_RES_REQ_OPT_ options -+ * @id_base_align: In case of explicit assignment (DPRC_RES_REQ_OPT_EXPLICIT -+ * is set at option), this field represents the required base ID -+ * for resource allocation; In case of aligned assignment -+ * (DPRC_RES_REQ_OPT_ALIGNED is set at option), this field -+ * indicates the required alignment for the resource ID(s) - -+ * use 0 if there is no alignment or explicit ID requirements -+ */ -+struct dprc_res_req { -+ char type[16]; -+ uint32_t num; -+ uint32_t options; -+ int id_base_align; -+}; -+ -+/** -+ * dprc_assign() - Assigns objects or resource to a child container. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @container_id: ID of the child container -+ * @res_req: Describes the type and amount of resources to -+ * assign to the given container -+ * -+ * Assignment is usually done by a parent (this DPRC) to one of its child -+ * containers. -+ * -+ * According to the DPRC allocation policy, the assigned resources may be taken -+ * (allocated) from the container's ancestors, if not enough resources are -+ * available in the container itself. -+ * -+ * The type of assignment depends on the dprc_res_req options, as follows: -+ * - DPRC_RES_REQ_OPT_EXPLICIT: indicates that assigned resources should have -+ * the explicit base ID specified at the id_base_align field of res_req. -+ * - DPRC_RES_REQ_OPT_ALIGNED: indicates that the assigned resources should be -+ * aligned to the value given at id_base_align field of res_req. -+ * - DPRC_RES_REQ_OPT_PLUGGED: Relevant only for object assignment, -+ * and indicates that the object must be set to the plugged state. -+ * -+ * A container may use this function with its own ID in order to change a -+ * object state to plugged or unplugged. -+ * -+ * If IRQ information has been set in the child DPRC, it will signal an -+ * interrupt following every change in its object assignment. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_assign(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int container_id, -+ struct dprc_res_req *res_req); -+ -+/** -+ * dprc_unassign() - Un-assigns objects or resources from a child container -+ * and moves them into this (parent) DPRC. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @child_container_id: ID of the child container -+ * @res_req: Describes the type and amount of resources to un-assign from -+ * the child container -+ * -+ * Un-assignment of objects can succeed only if the object is not in the -+ * plugged or opened state. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_unassign(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int child_container_id, -+ struct dprc_res_req *res_req); -+ -+/** -+ * dprc_get_pool_count() - Get the number of dprc's pools -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @mc_io: Pointer to MC portal's I/O object -+ * @token: Token of DPRC object -+ * @pool_count: Returned number of resource pools in the dprc -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_pool_count(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *pool_count); -+ -+/** -+ * dprc_get_pool() - Get the type (string) of a certain dprc's pool -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @pool_index: Index of the pool to be queried (< pool_count) -+ * @type: The type of the pool -+ * -+ * The pool types retrieved one by one by incrementing -+ * pool_index up to (not including) the value of pool_count returned -+ * from dprc_get_pool_count(). dprc_get_pool_count() must -+ * be called prior to dprc_get_pool(). -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_pool(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int pool_index, -+ char *type); -+ -+/** -+ * dprc_get_obj_count() - Obtains the number of objects in the DPRC -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @obj_count: Number of objects assigned to the DPRC -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_obj_count(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *obj_count); -+ -+/* Objects Attributes Flags */ -+ -+/* Opened state - Indicates that an object is open by at least one owner */ -+#define DPRC_OBJ_STATE_OPEN 0x00000001 -+/* Plugged state - Indicates that the object is plugged */ -+#define DPRC_OBJ_STATE_PLUGGED 0x00000002 -+ -+/** -+ * Shareability flag - Object flag indicating no memory shareability. -+ * the object generates memory accesses that are non coherent with other -+ * masters; -+ * user is responsible for proper memory handling through IOMMU configuration. -+ */ -+#define DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY 0x0001 -+ -+/** -+ * struct dprc_obj_desc - Object descriptor, returned from dprc_get_obj() -+ * @type: Type of object: NULL terminated string -+ * @id: ID of logical object resource -+ * @vendor: Object vendor identifier -+ * @ver_major: Major version number -+ * @ver_minor: Minor version number -+ * @irq_count: Number of interrupts supported by the object -+ * @region_count: Number of mappable regions supported by the object -+ * @state: Object state: combination of DPRC_OBJ_STATE_ states -+ * @label: Object label -+ * @flags: Object's flags -+ */ -+struct dprc_obj_desc { -+ char type[16]; -+ int id; -+ uint16_t vendor; -+ uint16_t ver_major; -+ uint16_t ver_minor; -+ uint8_t irq_count; -+ uint8_t region_count; -+ uint32_t state; -+ char label[16]; -+ uint16_t flags; -+}; -+ -+/** -+ * dprc_get_obj() - Get general information on an object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @obj_index: Index of the object to be queried (< obj_count) -+ * @obj_desc: Returns the requested object descriptor -+ * -+ * The object descriptors are retrieved one by one by incrementing -+ * obj_index up to (not including) the value of obj_count returned -+ * from dprc_get_obj_count(). dprc_get_obj_count() must -+ * be called prior to dprc_get_obj(). -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_obj(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int obj_index, -+ struct dprc_obj_desc *obj_desc); -+ -+/** -+ * dprc_get_obj_desc() - Get object descriptor. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @obj_type: The type of the object to get its descriptor. -+ * @obj_id: The id of the object to get its descriptor -+ * @obj_desc: The returned descriptor to fill and return to the user -+ * -+ * Return: '0' on Success; Error code otherwise. -+ * -+ */ -+int dprc_get_obj_desc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *obj_type, -+ int obj_id, -+ struct dprc_obj_desc *obj_desc); -+ -+/** -+ * dprc_set_obj_irq() - Set IRQ information for object to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @obj_type: Type of the object to set its IRQ -+ * @obj_id: ID of the object to set its IRQ -+ * @irq_index: The interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_set_obj_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *obj_type, -+ int obj_id, -+ uint8_t irq_index, -+ struct dprc_irq_cfg *irq_cfg); -+ -+/** -+ * dprc_get_obj_irq() - Get IRQ information from object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @obj_type: Type od the object to get its IRQ -+ * @obj_id: ID of the object to get its IRQ -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: The returned IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_obj_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *obj_type, -+ int obj_id, -+ uint8_t irq_index, -+ int *type, -+ struct dprc_irq_cfg *irq_cfg); -+ -+/** -+ * dprc_get_res_count() - Obtains the number of free resources that are assigned -+ * to this container, by pool type -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @type: pool type -+ * @res_count: Returned number of free resources of the given -+ * resource type that are assigned to this DPRC -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_res_count(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *type, -+ int *res_count); -+ -+/** -+ * enum dprc_iter_status - Iteration status -+ * @DPRC_ITER_STATUS_FIRST: Perform first iteration -+ * @DPRC_ITER_STATUS_MORE: Indicates more/next iteration is needed -+ * @DPRC_ITER_STATUS_LAST: Indicates last iteration -+ */ -+enum dprc_iter_status { -+ DPRC_ITER_STATUS_FIRST = 0, -+ DPRC_ITER_STATUS_MORE = 1, -+ DPRC_ITER_STATUS_LAST = 2 -+}; -+ -+/** -+ * struct dprc_res_ids_range_desc - Resource ID range descriptor -+ * @base_id: Base resource ID of this range -+ * @last_id: Last resource ID of this range -+ * @iter_status: Iteration status - should be set to DPRC_ITER_STATUS_FIRST at -+ * first iteration; while the returned marker is DPRC_ITER_STATUS_MORE, -+ * additional iterations are needed, until the returned marker is -+ * DPRC_ITER_STATUS_LAST -+ */ -+struct dprc_res_ids_range_desc { -+ int base_id; -+ int last_id; -+ enum dprc_iter_status iter_status; -+}; -+ -+/** -+ * dprc_get_res_ids() - Obtains IDs of free resources in the container -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @type: pool type -+ * @range_desc: range descriptor -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_res_ids(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *type, -+ struct dprc_res_ids_range_desc *range_desc); -+ -+/* Region flags */ -+/* Cacheable - Indicates that region should be mapped as cacheable */ -+#define DPRC_REGION_CACHEABLE 0x00000001 -+ -+/** -+ * enum dprc_region_type - Region type -+ * @DPRC_REGION_TYPE_MC_PORTAL: MC portal region -+ * @DPRC_REGION_TYPE_QBMAN_PORTAL: Qbman portal region -+ */ -+enum dprc_region_type { -+ DPRC_REGION_TYPE_MC_PORTAL, -+ DPRC_REGION_TYPE_QBMAN_PORTAL -+}; -+ -+/** -+ * struct dprc_region_desc - Mappable region descriptor -+ * @base_offset: Region offset from region's base address. -+ * For DPMCP and DPRC objects, region base is offset from SoC MC portals -+ * base address; For DPIO, region base is offset from SoC QMan portals -+ * base address -+ * @size: Region size (in bytes) -+ * @flags: Region attributes -+ * @type: Portal region type -+ */ -+struct dprc_region_desc { -+ uint32_t base_offset; -+ uint32_t size; -+ uint32_t flags; -+ enum dprc_region_type type; -+}; -+ -+/** -+ * dprc_get_obj_region() - Get region information for a specified object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @obj_type: Object type as returned in dprc_get_obj() -+ * @obj_id: Unique object instance as returned in dprc_get_obj() -+ * @region_index: The specific region to query -+ * @region_desc: Returns the requested region descriptor -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_obj_region(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *obj_type, -+ int obj_id, -+ uint8_t region_index, -+ struct dprc_region_desc *region_desc); -+ -+/** -+ * dprc_set_obj_label() - Set object label. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @obj_type: Object's type -+ * @obj_id: Object's ID -+ * @label: The required label. The maximum length is 16 chars. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_set_obj_label(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *obj_type, -+ int obj_id, -+ char *label); -+ -+/** -+ * struct dprc_endpoint - Endpoint description for link connect/disconnect -+ * operations -+ * @type: Endpoint object type: NULL terminated string -+ * @id: Endpoint object ID -+ * @if_id: Interface ID; should be set for endpoints with multiple -+ * interfaces ("dpsw", "dpdmux"); for others, always set to 0 -+ */ -+struct dprc_endpoint { -+ char type[16]; -+ int id; -+ int if_id; -+}; -+ -+/** -+ * struct dprc_connection_cfg - Connection configuration. -+ * Used for virtual connections only -+ * @committed_rate: Committed rate (Mbits/s) -+ * @max_rate: Maximum rate (Mbits/s) -+ */ -+struct dprc_connection_cfg { -+ uint32_t committed_rate; -+ uint32_t max_rate; -+}; -+ -+/** -+ * dprc_connect() - Connect two endpoints to create a network link between them -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @endpoint1: Endpoint 1 configuration parameters -+ * @endpoint2: Endpoint 2 configuration parameters -+ * @cfg: Connection configuration. The connection configuration is ignored for -+ * connections made to DPMAC objects, where rate is retrieved from the -+ * MAC configuration. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_connect(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dprc_endpoint *endpoint1, -+ const struct dprc_endpoint *endpoint2, -+ const struct dprc_connection_cfg *cfg); -+ -+/** -+ * dprc_disconnect() - Disconnect one endpoint to remove its network connection -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @endpoint: Endpoint configuration parameters -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_disconnect(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dprc_endpoint *endpoint); -+ -+/** -+* dprc_get_connection() - Get connected endpoint and link status if connection -+* exists. -+* @mc_io: Pointer to MC portal's I/O object -+* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+* @token: Token of DPRC object -+* @endpoint1: Endpoint 1 configuration parameters -+* @endpoint2: Returned endpoint 2 configuration parameters -+* @state: Returned link state: -+* 1 - link is up; -+* 0 - link is down; -+* -1 - no connection (endpoint2 information is irrelevant) -+* -+* Return: '0' on Success; -ENAVAIL if connection does not exist. -+*/ -+int dprc_get_connection(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dprc_endpoint *endpoint1, -+ struct dprc_endpoint *endpoint2, -+ int *state); -+ -+#endif /* _FSL_DPRC_H */ -+ -diff --git a/drivers/staging/fsl-mc/include/fsl_dpaa2_fd.h b/drivers/staging/fsl-mc/include/fsl_dpaa2_fd.h -new file mode 100644 -index 0000000..3e9af59 ---- /dev/null -+++ b/drivers/staging/fsl-mc/include/fsl_dpaa2_fd.h -@@ -0,0 +1,774 @@ -+/* Copyright 2014 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPAA2_FD_H -+#define __FSL_DPAA2_FD_H -+ -+/** -+ * DOC: DPAA2 FD - Frame Descriptor APIs for DPAA2 -+ * -+ * Frame Descriptors (FDs) are used to describe frame data in the DPAA2. -+ * Frames can be enqueued and dequeued to Frame Queues which are consumed -+ * by the various DPAA accelerators (WRIOP, SEC, PME, DCE) -+ * -+ * There are three types of frames: Single, Scatter Gather and Frame Lists. -+ * -+ * The set of APIs in this file must be used to create, manipulate and -+ * query Frame Descriptor. -+ * -+ */ -+ -+/** -+ * struct dpaa2_fd - Place-holder for FDs. -+ * @words: for easier/faster copying the whole FD structure. -+ * @addr_lo: the lower 32 bits of the address in FD. -+ * @addr_hi: the upper 32 bits of the address in FD. -+ * @len: the length field in FD. -+ * @bpid_offset: represent the bpid and offset fields in FD -+ * @frc: frame context -+ * @ctrl: the 32bit control bits including dd, sc,... va, err. -+ * @flc_lo: the lower 32bit of flow context. -+ * @flc_hi: the upper 32bits of flow context. -+ * -+ * This structure represents the basic Frame Descriptor used in the system. -+ * We represent it via the simplest form that we need for now. Different -+ * overlays may be needed to support different options, etc. (It is impractical -+ * to define One True Struct, because the resulting encoding routines (lots of -+ * read-modify-writes) would be worst-case performance whether or not -+ * circumstances required them.) -+ */ -+struct dpaa2_fd { -+ union { -+ u32 words[8]; -+ struct dpaa2_fd_simple { -+ u32 addr_lo; -+ u32 addr_hi; -+ u32 len; -+ /* offset in the MS 16 bits, BPID in the LS 16 bits */ -+ u32 bpid_offset; -+ u32 frc; /* frame context */ -+ /* "err", "va", "cbmt", "asal", [...] */ -+ u32 ctrl; -+ /* flow context */ -+ u32 flc_lo; -+ u32 flc_hi; -+ } simple; -+ }; -+}; -+ -+enum dpaa2_fd_format { -+ dpaa2_fd_single = 0, -+ dpaa2_fd_list, -+ dpaa2_fd_sg -+}; -+ -+/* Accessors for SG entry fields -+ * -+ * These setters and getters assume little endian format. For converting -+ * between LE and cpu endianness, the specific conversion functions must be -+ * called before the SGE contents are accessed by the core (on Rx), -+ * respectively before the SG table is sent to hardware (on Tx) -+ */ -+ -+/** -+ * dpaa2_fd_get_addr() - get the addr field of frame descriptor -+ * @fd: the given frame descriptor. -+ * -+ * Return the address in the frame descriptor. -+ */ -+static inline dma_addr_t dpaa2_fd_get_addr(const struct dpaa2_fd *fd) -+{ -+ return (dma_addr_t)((((uint64_t)fd->simple.addr_hi) << 32) -+ + fd->simple.addr_lo); -+} -+ -+/** -+ * dpaa2_fd_set_addr() - Set the addr field of frame descriptor -+ * @fd: the given frame descriptor. -+ * @addr: the address needs to be set in frame descriptor. -+ */ -+static inline void dpaa2_fd_set_addr(struct dpaa2_fd *fd, dma_addr_t addr) -+{ -+ fd->simple.addr_hi = upper_32_bits(addr); -+ fd->simple.addr_lo = lower_32_bits(addr); -+} -+ -+/** -+ * dpaa2_fd_get_frc() - Get the frame context in the frame descriptor -+ * @fd: the given frame descriptor. -+ * -+ * Return the frame context field in the frame descriptor. -+ */ -+static inline u32 dpaa2_fd_get_frc(const struct dpaa2_fd *fd) -+{ -+ return fd->simple.frc; -+} -+ -+/** -+ * dpaa2_fd_set_frc() - Set the frame context in the frame descriptor -+ * @fd: the given frame descriptor. -+ * @frc: the frame context needs to be set in frame descriptor. -+ */ -+static inline void dpaa2_fd_set_frc(struct dpaa2_fd *fd, u32 frc) -+{ -+ fd->simple.frc = frc; -+} -+ -+/** -+ * dpaa2_fd_get_flc() - Get the flow context in the frame descriptor -+ * @fd: the given frame descriptor. -+ * -+ * Return the flow context in the frame descriptor. -+ */ -+static inline dma_addr_t dpaa2_fd_get_flc(const struct dpaa2_fd *fd) -+{ -+ return (dma_addr_t)((((uint64_t)fd->simple.flc_hi) << 32) + -+ fd->simple.flc_lo); -+} -+ -+/** -+ * dpaa2_fd_set_flc() - Set the flow context field of frame descriptor -+ * @fd: the given frame descriptor. -+ * @flc_addr: the flow context needs to be set in frame descriptor. -+ */ -+static inline void dpaa2_fd_set_flc(struct dpaa2_fd *fd, dma_addr_t flc_addr) -+{ -+ fd->simple.flc_hi = upper_32_bits(flc_addr); -+ fd->simple.flc_lo = lower_32_bits(flc_addr); -+} -+ -+/** -+ * dpaa2_fd_get_len() - Get the length in the frame descriptor -+ * @fd: the given frame descriptor. -+ * -+ * Return the length field in the frame descriptor. -+ */ -+static inline u32 dpaa2_fd_get_len(const struct dpaa2_fd *fd) -+{ -+ return fd->simple.len; -+} -+ -+/** -+ * dpaa2_fd_set_len() - Set the length field of frame descriptor -+ * @fd: the given frame descriptor. -+ * @len: the length needs to be set in frame descriptor. -+ */ -+static inline void dpaa2_fd_set_len(struct dpaa2_fd *fd, u32 len) -+{ -+ fd->simple.len = len; -+} -+ -+/** -+ * dpaa2_fd_get_offset() - Get the offset field in the frame descriptor -+ * @fd: the given frame descriptor. -+ * -+ * Return the offset. -+ */ -+static inline uint16_t dpaa2_fd_get_offset(const struct dpaa2_fd *fd) -+{ -+ return (uint16_t)(fd->simple.bpid_offset >> 16) & 0x0FFF; -+} -+ -+/** -+ * dpaa2_fd_set_offset() - Set the offset field of frame descriptor -+ * -+ * @fd: the given frame descriptor. -+ * @offset: the offset needs to be set in frame descriptor. -+ */ -+static inline void dpaa2_fd_set_offset(struct dpaa2_fd *fd, uint16_t offset) -+{ -+ fd->simple.bpid_offset &= 0xF000FFFF; -+ fd->simple.bpid_offset |= (u32)offset << 16; -+} -+ -+/** -+ * dpaa2_fd_get_format() - Get the format field in the frame descriptor -+ * @fd: the given frame descriptor. -+ * -+ * Return the format. -+ */ -+static inline enum dpaa2_fd_format dpaa2_fd_get_format( -+ const struct dpaa2_fd *fd) -+{ -+ return (enum dpaa2_fd_format)((fd->simple.bpid_offset >> 28) & 0x3); -+} -+ -+/** -+ * dpaa2_fd_set_format() - Set the format field of frame descriptor -+ * -+ * @fd: the given frame descriptor. -+ * @format: the format needs to be set in frame descriptor. -+ */ -+static inline void dpaa2_fd_set_format(struct dpaa2_fd *fd, -+ enum dpaa2_fd_format format) -+{ -+ fd->simple.bpid_offset &= 0xCFFFFFFF; -+ fd->simple.bpid_offset |= (u32)format << 28; -+} -+ -+/** -+ * dpaa2_fd_get_bpid() - Get the bpid field in the frame descriptor -+ * @fd: the given frame descriptor. -+ * -+ * Return the bpid. -+ */ -+static inline uint16_t dpaa2_fd_get_bpid(const struct dpaa2_fd *fd) -+{ -+ return (uint16_t)(fd->simple.bpid_offset & 0xFFFF); -+} -+ -+/** -+ * dpaa2_fd_set_bpid() - Set the bpid field of frame descriptor -+ * -+ * @fd: the given frame descriptor. -+ * @bpid: the bpid needs to be set in frame descriptor. -+ */ -+static inline void dpaa2_fd_set_bpid(struct dpaa2_fd *fd, uint16_t bpid) -+{ -+ fd->simple.bpid_offset &= 0xFFFF0000; -+ fd->simple.bpid_offset |= (u32)bpid; -+} -+ -+/** -+ * struct dpaa2_sg_entry - the scatter-gathering structure -+ * @addr_lo: the lower 32bit of address -+ * @addr_hi: the upper 32bit of address -+ * @len: the length in this sg entry. -+ * @bpid_offset: offset in the MS 16 bits, BPID in the LS 16 bits. -+ */ -+struct dpaa2_sg_entry { -+ u32 addr_lo; -+ u32 addr_hi; -+ u32 len; -+ u32 bpid_offset; -+}; -+ -+enum dpaa2_sg_format { -+ dpaa2_sg_single = 0, -+ dpaa2_sg_frame_data, -+ dpaa2_sg_sgt_ext -+}; -+ -+/** -+ * dpaa2_sg_get_addr() - Get the address from SG entry -+ * @sg: the given scatter-gathering object. -+ * -+ * Return the address. -+ */ -+static inline dma_addr_t dpaa2_sg_get_addr(const struct dpaa2_sg_entry *sg) -+{ -+ return (dma_addr_t)((((u64)sg->addr_hi) << 32) + sg->addr_lo); -+} -+ -+/** -+ * dpaa2_sg_set_addr() - Set the address in SG entry -+ * @sg: the given scatter-gathering object. -+ * @addr: the address to be set. -+ */ -+static inline void dpaa2_sg_set_addr(struct dpaa2_sg_entry *sg, dma_addr_t addr) -+{ -+ sg->addr_hi = upper_32_bits(addr); -+ sg->addr_lo = lower_32_bits(addr); -+} -+ -+ -+static inline bool dpaa2_sg_short_len(const struct dpaa2_sg_entry *sg) -+{ -+ return (sg->bpid_offset >> 30) & 0x1; -+} -+ -+/** -+ * dpaa2_sg_get_len() - Get the length in SG entry -+ * @sg: the given scatter-gathering object. -+ * -+ * Return the length. -+ */ -+static inline u32 dpaa2_sg_get_len(const struct dpaa2_sg_entry *sg) -+{ -+ if (dpaa2_sg_short_len(sg)) -+ return sg->len & 0x1FFFF; -+ return sg->len; -+} -+ -+/** -+ * dpaa2_sg_set_len() - Set the length in SG entry -+ * @sg: the given scatter-gathering object. -+ * @len: the length to be set. -+ */ -+static inline void dpaa2_sg_set_len(struct dpaa2_sg_entry *sg, u32 len) -+{ -+ sg->len = len; -+} -+ -+/** -+ * dpaa2_sg_get_offset() - Get the offset in SG entry -+ * @sg: the given scatter-gathering object. -+ * -+ * Return the offset. -+ */ -+static inline u16 dpaa2_sg_get_offset(const struct dpaa2_sg_entry *sg) -+{ -+ return (u16)(sg->bpid_offset >> 16) & 0x0FFF; -+} -+ -+/** -+ * dpaa2_sg_set_offset() - Set the offset in SG entry -+ * @sg: the given scatter-gathering object. -+ * @offset: the offset to be set. -+ */ -+static inline void dpaa2_sg_set_offset(struct dpaa2_sg_entry *sg, -+ u16 offset) -+{ -+ sg->bpid_offset &= 0xF000FFFF; -+ sg->bpid_offset |= (u32)offset << 16; -+} -+ -+/** -+ * dpaa2_sg_get_format() - Get the SG format in SG entry -+ * @sg: the given scatter-gathering object. -+ * -+ * Return the format. -+ */ -+static inline enum dpaa2_sg_format -+ dpaa2_sg_get_format(const struct dpaa2_sg_entry *sg) -+{ -+ return (enum dpaa2_sg_format)((sg->bpid_offset >> 28) & 0x3); -+} -+ -+/** -+ * dpaa2_sg_set_format() - Set the SG format in SG entry -+ * @sg: the given scatter-gathering object. -+ * @format: the format to be set. -+ */ -+static inline void dpaa2_sg_set_format(struct dpaa2_sg_entry *sg, -+ enum dpaa2_sg_format format) -+{ -+ sg->bpid_offset &= 0xCFFFFFFF; -+ sg->bpid_offset |= (u32)format << 28; -+} -+ -+/** -+ * dpaa2_sg_get_bpid() - Get the buffer pool id in SG entry -+ * @sg: the given scatter-gathering object. -+ * -+ * Return the bpid. -+ */ -+static inline u16 dpaa2_sg_get_bpid(const struct dpaa2_sg_entry *sg) -+{ -+ return (u16)(sg->bpid_offset & 0x3FFF); -+} -+ -+/** -+ * dpaa2_sg_set_bpid() - Set the buffer pool id in SG entry -+ * @sg: the given scatter-gathering object. -+ * @bpid: the bpid to be set. -+ */ -+static inline void dpaa2_sg_set_bpid(struct dpaa2_sg_entry *sg, u16 bpid) -+{ -+ sg->bpid_offset &= 0xFFFFC000; -+ sg->bpid_offset |= (u32)bpid; -+} -+ -+/** -+ * dpaa2_sg_is_final() - Check final bit in SG entry -+ * @sg: the given scatter-gathering object. -+ * -+ * Return bool. -+ */ -+static inline bool dpaa2_sg_is_final(const struct dpaa2_sg_entry *sg) -+{ -+ return !!(sg->bpid_offset >> 31); -+} -+ -+/** -+ * dpaa2_sg_set_final() - Set the final bit in SG entry -+ * @sg: the given scatter-gathering object. -+ * @final: the final boolean to be set. -+ */ -+static inline void dpaa2_sg_set_final(struct dpaa2_sg_entry *sg, bool final) -+{ -+ sg->bpid_offset &= 0x7FFFFFFF; -+ sg->bpid_offset |= (u32)final << 31; -+} -+ -+/* Endianness conversion helper functions -+ * The accelerator drivers which construct / read scatter gather entries -+ * need to call these in order to account for endianness mismatches between -+ * hardware and cpu -+ */ -+#ifdef __BIG_ENDIAN -+/** -+ * dpaa2_sg_cpu_to_le() - convert scatter gather entry from native cpu -+ * format little endian format. -+ * @sg: the given scatter gather entry. -+ */ -+static inline void dpaa2_sg_cpu_to_le(struct dpaa2_sg_entry *sg) -+{ -+ uint32_t *p = (uint32_t *)sg; -+ int i; -+ -+ for (i = 0; i < sizeof(*sg) / sizeof(u32); i++) -+ cpu_to_le32s(p++); -+} -+ -+/** -+ * dpaa2_sg_le_to_cpu() - convert scatter gather entry from little endian -+ * format to native cpu format. -+ * @sg: the given scatter gather entry. -+ */ -+static inline void dpaa2_sg_le_to_cpu(struct dpaa2_sg_entry *sg) -+{ -+ uint32_t *p = (uint32_t *)sg; -+ int i; -+ -+ for (i = 0; i < sizeof(*sg) / sizeof(u32); i++) -+ le32_to_cpus(p++); -+} -+#else -+#define dpaa2_sg_cpu_to_le(sg) -+#define dpaa2_sg_le_to_cpu(sg) -+#endif /* __BIG_ENDIAN */ -+ -+ -+/** -+ * struct dpaa2_fl_entry - structure for frame list entry. -+ * @addr_lo: the lower 32bit of address -+ * @addr_hi: the upper 32bit of address -+ * @len: the length in this sg entry. -+ * @bpid_offset: offset in the MS 16 bits, BPID in the LS 16 bits. -+ * @frc: frame context -+ * @ctrl: the 32bit control bits including dd, sc,... va, err. -+ * @flc_lo: the lower 32bit of flow context. -+ * @flc_hi: the upper 32bits of flow context. -+ * -+ * Frame List Entry (FLE) -+ * Identical to dpaa2_fd.simple layout, but some bits are different -+ */ -+struct dpaa2_fl_entry { -+ u32 addr_lo; -+ u32 addr_hi; -+ u32 len; -+ u32 bpid_offset; -+ u32 frc; -+ u32 ctrl; -+ u32 flc_lo; -+ u32 flc_hi; -+}; -+ -+enum dpaa2_fl_format { -+ dpaa2_fl_single = 0, -+ dpaa2_fl_res, -+ dpaa2_fl_sg -+}; -+ -+/** -+ * dpaa2_fl_get_addr() - Get address in the frame list entry -+ * @fle: the given frame list entry. -+ * -+ * Return address for the get function. -+ */ -+static inline dma_addr_t dpaa2_fl_get_addr(const struct dpaa2_fl_entry *fle) -+{ -+ return (dma_addr_t)((((uint64_t)fle->addr_hi) << 32) + fle->addr_lo); -+} -+ -+/** -+ * dpaa2_fl_set_addr() - Set the address in the frame list entry -+ * @fle: the given frame list entry. -+ * @addr: the address needs to be set. -+ * -+ */ -+static inline void dpaa2_fl_set_addr(struct dpaa2_fl_entry *fle, -+ dma_addr_t addr) -+{ -+ fle->addr_hi = upper_32_bits(addr); -+ fle->addr_lo = lower_32_bits(addr); -+} -+ -+/** -+ * dpaa2_fl_get_flc() - Get the flow context in the frame list entry -+ * @fle: the given frame list entry. -+ * -+ * Return flow context for the get function. -+ */ -+static inline dma_addr_t dpaa2_fl_get_flc(const struct dpaa2_fl_entry *fle) -+{ -+ return (dma_addr_t)((((uint64_t)fle->flc_hi) << 32) + fle->flc_lo); -+} -+ -+/** -+ * dpaa2_fl_set_flc() - Set the flow context in the frame list entry -+ * @fle: the given frame list entry. -+ * @flc_addr: the flow context address needs to be set. -+ * -+ */ -+static inline void dpaa2_fl_set_flc(struct dpaa2_fl_entry *fle, -+ dma_addr_t flc_addr) -+{ -+ fle->flc_hi = upper_32_bits(flc_addr); -+ fle->flc_lo = lower_32_bits(flc_addr); -+} -+ -+/** -+ * dpaa2_fl_get_len() - Get the length in the frame list entry -+ * @fle: the given frame list entry. -+ * -+ * Return length for the get function. -+ */ -+static inline u32 dpaa2_fl_get_len(const struct dpaa2_fl_entry *fle) -+{ -+ return fle->len; -+} -+ -+/** -+ * dpaa2_fl_set_len() - Set the length in the frame list entry -+ * @fle: the given frame list entry. -+ * @len: the length needs to be set. -+ * -+ */ -+static inline void dpaa2_fl_set_len(struct dpaa2_fl_entry *fle, u32 len) -+{ -+ fle->len = len; -+} -+ -+/** -+ * dpaa2_fl_get_offset() - Get/Set the offset in the frame list entry -+ * @fle: the given frame list entry. -+ * -+ * Return offset for the get function. -+ */ -+static inline uint16_t dpaa2_fl_get_offset(const struct dpaa2_fl_entry *fle) -+{ -+ return (uint16_t)(fle->bpid_offset >> 16) & 0x0FFF; -+} -+ -+/** -+ * dpaa2_fl_set_offset() - Set the offset in the frame list entry -+ * @fle: the given frame list entry. -+ * @offset: the offset needs to be set. -+ * -+ */ -+static inline void dpaa2_fl_set_offset(struct dpaa2_fl_entry *fle, -+ uint16_t offset) -+{ -+ fle->bpid_offset &= 0xF000FFFF; -+ fle->bpid_offset |= (u32)(offset & 0x0FFF) << 16; -+} -+ -+/** -+ * dpaa2_fl_get_format() - Get the format in the frame list entry -+ * @fle: the given frame list entry. -+ * -+ * Return frame list format for the get function. -+ */ -+static inline enum dpaa2_fl_format dpaa2_fl_get_format( -+ const struct dpaa2_fl_entry *fle) -+{ -+ return (enum dpaa2_fl_format)((fle->bpid_offset >> 28) & 0x3); -+} -+ -+/** -+ * dpaa2_fl_set_format() - Set the format in the frame list entry -+ * @fle: the given frame list entry. -+ * @format: the frame list format needs to be set. -+ * -+ */ -+static inline void dpaa2_fl_set_format(struct dpaa2_fl_entry *fle, -+ enum dpaa2_fl_format format) -+{ -+ fle->bpid_offset &= 0xCFFFFFFF; -+ fle->bpid_offset |= (u32)(format & 0x3) << 28; -+} -+ -+/** -+ * dpaa2_fl_get_bpid() - Get the buffer pool id in the frame list entry -+ * @fle: the given frame list entry. -+ * -+ * Return bpid for the get function. -+ */ -+static inline uint16_t dpaa2_fl_get_bpid(const struct dpaa2_fl_entry *fle) -+{ -+ return (uint16_t)(fle->bpid_offset & 0x3FFF); -+} -+ -+/** -+ * dpaa2_fl_set_bpid() - Set the buffer pool id in the frame list entry -+ * @fle: the given frame list entry. -+ * @bpid: the buffer pool id needs to be set. -+ * -+ */ -+static inline void dpaa2_fl_set_bpid(struct dpaa2_fl_entry *fle, uint16_t bpid) -+{ -+ fle->bpid_offset &= 0xFFFFC000; -+ fle->bpid_offset |= (u32)bpid; -+} -+ -+/** dpaa2_fl_is_final() - check the final bit is set or not in the frame list. -+ * @fle: the given frame list entry. -+ * -+ * Return final bit settting. -+ */ -+static inline bool dpaa2_fl_is_final(const struct dpaa2_fl_entry *fle) -+{ -+ return !!(fle->bpid_offset >> 31); -+} -+ -+/** -+ * dpaa2_fl_set_final() - Set the final bit in the frame list entry -+ * @fle: the given frame list entry. -+ * @final: the final bit needs to be set. -+ * -+ */ -+static inline void dpaa2_fl_set_final(struct dpaa2_fl_entry *fle, bool final) -+{ -+ fle->bpid_offset &= 0x7FFFFFFF; -+ fle->bpid_offset |= (u32)final << 31; -+} -+ -+/** -+ * struct dpaa2_dq - the qman result structure -+ * @dont_manipulate_directly: the 16 32bit data to represent the whole -+ * possible qman dequeue result. -+ * -+ * When frames are dequeued, the FDs show up inside "dequeue" result structures -+ * (if at all, not all dequeue results contain valid FDs). This structure type -+ * is intentionally defined without internal detail, and the only reason it -+ * isn't declared opaquely (without size) is to allow the user to provide -+ * suitably-sized (and aligned) memory for these entries. -+ */ -+struct dpaa2_dq { -+ uint32_t dont_manipulate_directly[16]; -+}; -+ -+/* Parsing frame dequeue results */ -+/* FQ empty */ -+#define DPAA2_DQ_STAT_FQEMPTY 0x80 -+/* FQ held active */ -+#define DPAA2_DQ_STAT_HELDACTIVE 0x40 -+/* FQ force eligible */ -+#define DPAA2_DQ_STAT_FORCEELIGIBLE 0x20 -+/* Valid frame */ -+#define DPAA2_DQ_STAT_VALIDFRAME 0x10 -+/* FQ ODP enable */ -+#define DPAA2_DQ_STAT_ODPVALID 0x04 -+/* Volatile dequeue */ -+#define DPAA2_DQ_STAT_VOLATILE 0x02 -+/* volatile dequeue command is expired */ -+#define DPAA2_DQ_STAT_EXPIRED 0x01 -+ -+/** -+ * dpaa2_dq_flags() - Get the stat field of dequeue response -+ * @dq: the dequeue result. -+ */ -+uint32_t dpaa2_dq_flags(const struct dpaa2_dq *dq); -+ -+/** -+ * dpaa2_dq_is_pull() - Check whether the dq response is from a pull -+ * command. -+ * @dq: the dequeue result. -+ * -+ * Return 1 for volatile(pull) dequeue, 0 for static dequeue. -+ */ -+static inline int dpaa2_dq_is_pull(const struct dpaa2_dq *dq) -+{ -+ return (int)(dpaa2_dq_flags(dq) & DPAA2_DQ_STAT_VOLATILE); -+} -+ -+/** -+ * dpaa2_dq_is_pull_complete() - Check whether the pull command is completed. -+ * @dq: the dequeue result. -+ * -+ * Return boolean. -+ */ -+static inline int dpaa2_dq_is_pull_complete( -+ const struct dpaa2_dq *dq) -+{ -+ return (int)(dpaa2_dq_flags(dq) & DPAA2_DQ_STAT_EXPIRED); -+} -+ -+/** -+ * dpaa2_dq_seqnum() - Get the seqnum field in dequeue response -+ * seqnum is valid only if VALIDFRAME flag is TRUE -+ * @dq: the dequeue result. -+ * -+ * Return seqnum. -+ */ -+uint16_t dpaa2_dq_seqnum(const struct dpaa2_dq *dq); -+ -+/** -+ * dpaa2_dq_odpid() - Get the seqnum field in dequeue response -+ * odpid is valid only if ODPVAILD flag is TRUE. -+ * @dq: the dequeue result. -+ * -+ * Return odpid. -+ */ -+uint16_t dpaa2_dq_odpid(const struct dpaa2_dq *dq); -+ -+/** -+ * dpaa2_dq_fqid() - Get the fqid in dequeue response -+ * @dq: the dequeue result. -+ * -+ * Return fqid. -+ */ -+uint32_t dpaa2_dq_fqid(const struct dpaa2_dq *dq); -+ -+/** -+ * dpaa2_dq_byte_count() - Get the byte count in dequeue response -+ * @dq: the dequeue result. -+ * -+ * Return the byte count remaining in the FQ. -+ */ -+uint32_t dpaa2_dq_byte_count(const struct dpaa2_dq *dq); -+ -+/** -+ * dpaa2_dq_frame_count() - Get the frame count in dequeue response -+ * @dq: the dequeue result. -+ * -+ * Return the frame count remaining in the FQ. -+ */ -+uint32_t dpaa2_dq_frame_count(const struct dpaa2_dq *dq); -+ -+/** -+ * dpaa2_dq_fd_ctx() - Get the frame queue context in dequeue response -+ * @dq: the dequeue result. -+ * -+ * Return the frame queue context. -+ */ -+uint64_t dpaa2_dq_fqd_ctx(const struct dpaa2_dq *dq); -+ -+/** -+ * dpaa2_dq_fd() - Get the frame descriptor in dequeue response -+ * @dq: the dequeue result. -+ * -+ * Return the frame descriptor. -+ */ -+const struct dpaa2_fd *dpaa2_dq_fd(const struct dpaa2_dq *dq); -+ -+#endif /* __FSL_DPAA2_FD_H */ -diff --git a/drivers/staging/fsl-mc/include/fsl_dpaa2_io.h b/drivers/staging/fsl-mc/include/fsl_dpaa2_io.h -new file mode 100644 -index 0000000..6ea2ff9 ---- /dev/null -+++ b/drivers/staging/fsl-mc/include/fsl_dpaa2_io.h -@@ -0,0 +1,619 @@ -+/* Copyright 2014 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPAA2_IO_H -+#define __FSL_DPAA2_IO_H -+ -+#include "fsl_dpaa2_fd.h" -+ -+struct dpaa2_io; -+struct dpaa2_io_store; -+ -+/** -+ * DOC: DPIO Service Management -+ * -+ * The DPIO service provides APIs for users to interact with the datapath -+ * by enqueueing and dequeing frame descriptors. -+ * -+ * The following set of APIs can be used to enqueue and dequeue frames -+ * as well as producing notification callbacks when data is available -+ * for dequeue. -+ */ -+ -+/** -+ * struct dpaa2_io_desc - The DPIO descriptor. -+ * @receives_notifications: Use notificaton mode. -+ * @has_irq: use irq-based proessing. -+ * @will_poll: use poll processing. -+ * @has_8prio: set for channel with 8 priority WQs. -+ * @cpu: the cpu index that at least interrupt handlers will execute on. -+ * @stash_affinity: the stash affinity for this portal favour 'cpu' -+ * @regs_cena: the cache enabled regs. -+ * @regs_cinh: the cache inhibited regs. -+ * @dpio_id: The dpio index. -+ * @qman_version: the qman version -+ * -+ * Describe the attributes and features of the DPIO object. -+ */ -+struct dpaa2_io_desc { -+ /* non-zero iff the DPIO has a channel */ -+ int receives_notifications; -+ /* non-zero if the DPIO portal interrupt is handled. If so, the -+ * caller/OS handles the interrupt and calls dpaa2_io_service_irq(). */ -+ int has_irq; -+ /* non-zero if the caller/OS is prepared to called the -+ * dpaa2_io_service_poll() routine as part of its run-to-completion (or -+ * scheduling) loop. If so, the DPIO service may dynamically switch some -+ * of its processing between polling-based and irq-based. It is illegal -+ * combination to have (!has_irq && !will_poll). */ -+ int will_poll; -+ /* ignored unless 'receives_notifications'. Non-zero iff the channel has -+ * 8 priority WQs, otherwise the channel has 2. */ -+ int has_8prio; -+ /* the cpu index that at least interrupt handlers will execute on. And -+ * if 'stash_affinity' is non-zero, the cache targeted by stash -+ * transactions is affine to this cpu. */ -+ int cpu; -+ /* non-zero if stash transactions for this portal favour 'cpu' over -+ * other CPUs. (Eg. zero if there's no stashing, or stashing is to -+ * shared cache.) */ -+ int stash_affinity; -+ /* Caller-provided flags, determined by bus-scanning and/or creation of -+ * DPIO objects via MC commands. */ -+ void *regs_cena; -+ void *regs_cinh; -+ int dpio_id; -+ uint32_t qman_version; -+}; -+ -+/** -+ * dpaa2_io_create() - create a dpaa2_io object. -+ * @desc: the dpaa2_io descriptor -+ * -+ * Activates a "struct dpaa2_io" corresponding to the given config of an actual -+ * DPIO object. This handle can be used on it's own (like a one-portal "DPIO -+ * service") or later be added to a service-type "struct dpaa2_io" object. Note, -+ * the information required on 'cfg' is copied so the caller is free to do as -+ * they wish with the input parameter upon return. -+ * -+ * Return a valid dpaa2_io object for success, or NULL for failure. -+ */ -+struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc); -+ -+/** -+ * dpaa2_io_create_service() - Create an (initially empty) DPIO service. -+ * -+ * Return a valid dpaa2_io object for success, or NULL for failure. -+ */ -+struct dpaa2_io *dpaa2_io_create_service(void); -+ -+/** -+ * dpaa2_io_default_service() - Use the driver's own global (and initially -+ * empty) DPIO service. -+ * -+ * This increments the reference count, so don't forget to use dpaa2_io_down() -+ * for each time this function is called. -+ * -+ * Return a valid dpaa2_io object for success, or NULL for failure. -+ */ -+struct dpaa2_io *dpaa2_io_default_service(void); -+ -+/** -+ * dpaa2_io_down() - release the dpaa2_io object. -+ * @d: the dpaa2_io object to be released. -+ * -+ * The "struct dpaa2_io" type can represent an individual DPIO object (as -+ * described by "struct dpaa2_io_desc") or an instance of a "DPIO service", -+ * which can be used to group/encapsulate multiple DPIO objects. In all cases, -+ * each handle obtained should be released using this function. -+ */ -+void dpaa2_io_down(struct dpaa2_io *d); -+ -+/** -+ * dpaa2_io_service_add() - Add the given DPIO object to the given DPIO service. -+ * @service: the given DPIO service. -+ * @obj: the given DPIO object. -+ * -+ * 'service' must have been created by dpaa2_io_create_service() and 'obj' -+ * must have been created by dpaa2_io_create(). This increments the reference -+ * count on the object that 'obj' refers to, so the user could call -+ * dpaa2_io_down(obj) after this and the object will persist within the service -+ * (and will be destroyed when the service is destroyed). -+ * -+ * Return 0 for success, or -EINVAL for failure. -+ */ -+int dpaa2_io_service_add(struct dpaa2_io *service, struct dpaa2_io *obj); -+ -+/** -+ * dpaa2_io_get_descriptor() - Get the DPIO descriptor of the given DPIO object. -+ * @obj: the given DPIO object. -+ * @desc: the returned DPIO descriptor. -+ * -+ * This function will return failure if the given dpaa2_io struct represents a -+ * service rather than an individual DPIO object, otherwise it returns zero and -+ * the given 'cfg' structure is filled in. -+ * -+ * Return 0 for success, or -EINVAL for failure. -+ */ -+int dpaa2_io_get_descriptor(struct dpaa2_io *obj, struct dpaa2_io_desc *desc); -+ -+/** -+ * dpaa2_io_poll() - Process any notifications and h/w-initiated events that -+ * are polling-driven. -+ * @obj: the given DPIO object. -+ * -+ * Obligatory for DPIO objects that have dpaa2_io_desc::will_poll non-zero. -+ * -+ * Return 0 for success, or -EINVAL for failure. -+ */ -+int dpaa2_io_poll(struct dpaa2_io *obj); -+ -+/** -+ * dpaa2_io_irq() - Process any notifications and h/w-initiated events that are -+ * irq-driven. -+ * @obj: the given DPIO object. -+ * -+ * Obligatory for DPIO objects that have dpaa2_io_desc::has_irq non-zero. -+ * -+ * Return IRQ_HANDLED for success, or -EINVAL for failure. -+ */ -+int dpaa2_io_irq(struct dpaa2_io *obj); -+ -+/** -+ * dpaa2_io_pause_poll() - Used to stop polling. -+ * @obj: the given DPIO object. -+ * -+ * If a polling application is going to stop polling for a period of time and -+ * supports interrupt processing, it can call this function to convert all -+ * processing to IRQ. (Eg. when sleeping.) -+ * -+ * Return -EINVAL. -+ */ -+int dpaa2_io_pause_poll(struct dpaa2_io *obj); -+ -+/** -+ * dpaa2_io_resume_poll() - Resume polling -+ * @obj: the given DPIO object. -+ * -+ * Return -EINVAL. -+ */ -+int dpaa2_io_resume_poll(struct dpaa2_io *obj); -+ -+/** -+ * dpaa2_io_service_notifications() - Get a mask of cpus that the DPIO service -+ * can receive notifications on. -+ * @s: the given DPIO object. -+ * @mask: the mask of cpus. -+ * -+ * Note that this is a run-time snapshot. If things like cpu-hotplug are -+ * supported in the target system, then an attempt to register notifications -+ * for a cpu that appears present in the given mask might fail if that cpu has -+ * gone offline in the mean time. -+ */ -+void dpaa2_io_service_notifications(struct dpaa2_io *s, cpumask_t *mask); -+ -+/** -+ * dpaa2_io_service_stashing - Get a mask of cpus that the DPIO service has stash -+ * affinity to. -+ * @s: the given DPIO object. -+ * @mask: the mask of cpus. -+ */ -+void dpaa2_io_service_stashing(struct dpaa2_io *s, cpumask_t *mask); -+ -+/** -+ * dpaa2_io_service_nonaffine() - Check the DPIO service's cpu affinity -+ * for stashing. -+ * @s: the given DPIO object. -+ * -+ * Return a boolean, whether or not the DPIO service has resources that have no -+ * particular cpu affinity for stashing. (Useful to know if you wish to operate -+ * on CPUs that the service has no affinity to, you would choose to use -+ * resources that are neutral, rather than affine to a different CPU.) Unlike -+ * other service-specific APIs, this one doesn't return an error if it is passed -+ * a non-service object. So don't do it. -+ */ -+int dpaa2_io_service_has_nonaffine(struct dpaa2_io *s); -+ -+/*************************/ -+/* Notification handling */ -+/*************************/ -+ -+/** -+ * struct dpaa2_io_notification_ctx - The DPIO notification context structure. -+ * @cb: the callback to be invoked when the notification arrives. -+ * @is_cdan: Zero/FALSE for FQDAN, non-zero/TRUE for CDAN. -+ * @id: FQID or channel ID, needed for rearm. -+ * @desired_cpu: the cpu on which the notifications will show up. -+ * @actual_cpu: the cpu the notification actually shows up. -+ * @migration_cb: callback function used for migration. -+ * @dpio_id: the dpio index. -+ * @qman64: the 64-bit context value shows up in the FQDAN/CDAN. -+ * @node: the list node. -+ * @dpio_private: the dpio object internal to dpio_service. -+ * -+ * When a FQDAN/CDAN registration is made (eg. by DPNI/DPCON/DPAI code), a -+ * context of the following type is used. The caller can embed it within a -+ * larger structure in order to add state that is tracked along with the -+ * notification (this may be useful when callbacks are invoked that pass this -+ * notification context as a parameter). -+ */ -+struct dpaa2_io_notification_ctx { -+ void (*cb)(struct dpaa2_io_notification_ctx *); -+ int is_cdan; -+ uint32_t id; -+ /* This specifies which cpu the user wants notifications to show up on -+ * (ie. to execute 'cb'). If notification-handling on that cpu is not -+ * available at the time of notification registration, the registration -+ * will fail. */ -+ int desired_cpu; -+ /* If the target platform supports cpu-hotplug or other features -+ * (related to power-management, one would expect) that can migrate IRQ -+ * handling of a given DPIO object, then this value will potentially be -+ * different to 'desired_cpu' at run-time. */ -+ int actual_cpu; -+ /* And if migration does occur and this callback is non-NULL, it will -+ * be invoked prior to any futher notification callbacks executing on -+ * 'newcpu'. Note that 'oldcpu' is what 'actual_cpu' was prior to the -+ * migration, and 'newcpu' is what it is now. Both could conceivably be -+ * different to 'desired_cpu'. */ -+ void (*migration_cb)(struct dpaa2_io_notification_ctx *, -+ int oldcpu, int newcpu); -+ /* These are returned from dpaa2_io_service_register(). -+ * 'dpio_id' is the dpaa2_io_desc::dpio_id value of the DPIO object that -+ * has been selected by the service for receiving the notifications. The -+ * caller can use this value in the MC command that attaches the FQ (or -+ * channel) of their DPNI (or DPCON, respectively) to this DPIO for -+ * notification-generation. -+ * 'qman64' is the 64-bit context value that needs to be sent in the -+ * same MC command in order to be programmed into the FQ or channel - -+ * this is the 64-bit value that shows up in the FQDAN/CDAN messages to -+ * the DPIO object, and the DPIO service specifies this value back to -+ * the caller so that the notifications that show up will be -+ * comprensible/demux-able to the DPIO service. */ -+ int dpio_id; -+ uint64_t qman64; -+ /* These fields are internal to the DPIO service once the context is -+ * registered. TBD: may require more internal state fields. */ -+ struct list_head node; -+ void *dpio_private; -+}; -+ -+/** -+ * dpaa2_io_service_register() - Prepare for servicing of FQDAN or CDAN -+ * notifications on the given DPIO service. -+ * @service: the given DPIO service. -+ * @ctx: the notification context. -+ * -+ * The MC command to attach the caller's DPNI/DPCON/DPAI device to a -+ * DPIO object is performed after this function is called. In that way, (a) the -+ * DPIO service is "ready" to handle a notification arrival (which might happen -+ * before the "attach" command to MC has returned control of execution back to -+ * the caller), and (b) the DPIO service can provide back to the caller the -+ * 'dpio_id' and 'qman64' parameters that it should pass along in the MC command -+ * in order for the DPNI/DPCON/DPAI resources to be configured to produce the -+ * right notification fields to the DPIO service. -+ * -+ * Return 0 for success, or -ENODEV for failure. -+ */ -+int dpaa2_io_service_register(struct dpaa2_io *service, -+ struct dpaa2_io_notification_ctx *ctx); -+ -+/** -+ * dpaa2_io_service_deregister - The opposite of 'register'. -+ * @service: the given DPIO service. -+ * @ctx: the notification context. -+ * -+ * Note that 'register' should be called *before* -+ * making the MC call to attach the notification-producing device to the -+ * notification-handling DPIO service, the 'unregister' function should be -+ * called *after* making the MC call to detach the notification-producing -+ * device. -+ * -+ * Return 0 for success. -+ */ -+int dpaa2_io_service_deregister(struct dpaa2_io *service, -+ struct dpaa2_io_notification_ctx *ctx); -+ -+/** -+ * dpaa2_io_service_rearm() - Rearm the notification for the given DPIO service. -+ * @service: the given DPIO service. -+ * @ctx: the notification context. -+ * -+ * Once a FQDAN/CDAN has been produced, the corresponding FQ/channel is -+ * considered "disarmed". Ie. the user can issue pull dequeue operations on that -+ * traffic source for as long as it likes. Eventually it may wish to "rearm" -+ * that source to allow it to produce another FQDAN/CDAN, that's what this -+ * function achieves. -+ * -+ * Return 0 for success, or -ENODEV if no service available, -EBUSY/-EIO for not -+ * being able to implement the rearm the notifiaton due to setting CDAN or -+ * scheduling fq. -+ */ -+int dpaa2_io_service_rearm(struct dpaa2_io *service, -+ struct dpaa2_io_notification_ctx *ctx); -+ -+/** -+ * dpaa2_io_from_registration() - Get the DPIO object from the given notification -+ * context. -+ * @ctx: the given notifiation context. -+ * @ret: the returned DPIO object. -+ * -+ * Like 'dpaa2_io_service_get_persistent()' (see below), except that the -+ * returned handle is not selected based on a 'cpu' argument, but is the same -+ * DPIO object that the given notification context is registered against. The -+ * returned handle carries a reference count, so a corresponding dpaa2_io_down() -+ * would be required when the reference is no longer needed. -+ * -+ * Return 0 for success, or -EINVAL for failure. -+ */ -+int dpaa2_io_from_registration(struct dpaa2_io_notification_ctx *ctx, -+ struct dpaa2_io **ret); -+ -+/**********************************/ -+/* General usage of DPIO services */ -+/**********************************/ -+ -+/** -+ * dpaa2_io_service_get_persistent() - Get the DPIO resource from the given -+ * notification context and cpu. -+ * @service: the DPIO service. -+ * @cpu: the cpu that the DPIO resource has stashing affinity to. -+ * @ret: the returned DPIO resource. -+ * -+ * The various DPIO interfaces can accept a "struct dpaa2_io" handle that refers -+ * to an individual DPIO object or to a whole service. In the latter case, an -+ * internal choice is made for each operation. This function supports the former -+ * case, by selecting an individual DPIO object *from* the service in order for -+ * it to be used multiple times to provide "persistence". The returned handle -+ * also carries a reference count, so a corresponding dpaa2_io_down() would be -+ * required when the reference is no longer needed. Note, a parameter of -1 for -+ * 'cpu' will select a DPIO resource that has no particular stashing affinity to -+ * any cpu (eg. one that stashes to platform cache). -+ * -+ * Return 0 for success, or -ENODEV for failure. -+ */ -+int dpaa2_io_service_get_persistent(struct dpaa2_io *service, int cpu, -+ struct dpaa2_io **ret); -+ -+/*****************/ -+/* Pull dequeues */ -+/*****************/ -+ -+/** -+ * dpaa2_io_service_pull_fq() - pull dequeue functions from a fq. -+ * @d: the given DPIO service. -+ * @fqid: the given frame queue id. -+ * @s: the dpaa2_io_store object for the result. -+ * -+ * To support DCA/order-preservation, it will be necessary to support an -+ * alternative form, because they must ultimately dequeue to DQRR rather than a -+ * user-supplied dpaa2_io_store. Furthermore, those dequeue results will -+ * "complete" using a caller-provided callback (from DQRR processing) rather -+ * than the caller explicitly looking at their dpaa2_io_store for results. Eg. -+ * the alternative form will likely take a callback parameter rather than a -+ * store parameter. Ignoring it for now to keep the picture clearer. -+ * -+ * Return 0 for success, or error code for failure. -+ */ -+int dpaa2_io_service_pull_fq(struct dpaa2_io *d, uint32_t fqid, -+ struct dpaa2_io_store *s); -+ -+/** -+ * dpaa2_io_service_pull_channel() - pull dequeue functions from a channel. -+ * @d: the given DPIO service. -+ * @channelid: the given channel id. -+ * @s: the dpaa2_io_store object for the result. -+ * -+ * To support DCA/order-preservation, it will be necessary to support an -+ * alternative form, because they must ultimately dequeue to DQRR rather than a -+ * user-supplied dpaa2_io_store. Furthermore, those dequeue results will -+ * "complete" using a caller-provided callback (from DQRR processing) rather -+ * than the caller explicitly looking at their dpaa2_io_store for results. Eg. -+ * the alternative form will likely take a callback parameter rather than a -+ * store parameter. Ignoring it for now to keep the picture clearer. -+ * -+ * Return 0 for success, or error code for failure. -+ */ -+int dpaa2_io_service_pull_channel(struct dpaa2_io *d, uint32_t channelid, -+ struct dpaa2_io_store *s); -+ -+/************/ -+/* Enqueues */ -+/************/ -+ -+/** -+ * dpaa2_io_service_enqueue_fq() - Enqueue a frame to a frame queue. -+ * @d: the given DPIO service. -+ * @fqid: the given frame queue id. -+ * @fd: the frame descriptor which is enqueued. -+ * -+ * This definition bypasses some features that are not expected to be priority-1 -+ * features, and may not be needed at all via current assumptions (QBMan's -+ * feature set is wider than the MC object model is intendeding to support, -+ * initially at least). Plus, keeping them out (for now) keeps the API view -+ * simpler. Missing features are; -+ * - enqueue confirmation (results DMA'd back to the user) -+ * - ORP -+ * - DCA/order-preservation (see note in "pull dequeues") -+ * - enqueue consumption interrupts -+ * -+ * Return 0 for successful enqueue, or -EBUSY if the enqueue ring is not ready, -+ * or -ENODEV if there is no dpio service. -+ */ -+int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d, -+ uint32_t fqid, -+ const struct dpaa2_fd *fd); -+ -+/** -+ * dpaa2_io_service_enqueue_qd() - Enqueue a frame to a QD. -+ * @d: the given DPIO service. -+ * @qdid: the given queuing destination id. -+ * @prio: the given queuing priority. -+ * @qdbin: the given queuing destination bin. -+ * @fd: the frame descriptor which is enqueued. -+ * -+ * This definition bypasses some features that are not expected to be priority-1 -+ * features, and may not be needed at all via current assumptions (QBMan's -+ * feature set is wider than the MC object model is intendeding to support, -+ * initially at least). Plus, keeping them out (for now) keeps the API view -+ * simpler. Missing features are; -+ * - enqueue confirmation (results DMA'd back to the user) -+ * - ORP -+ * - DCA/order-preservation (see note in "pull dequeues") -+ * - enqueue consumption interrupts -+ * -+ * Return 0 for successful enqueue, or -EBUSY if the enqueue ring is not ready, -+ * or -ENODEV if there is no dpio service. -+ */ -+int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d, -+ uint32_t qdid, uint8_t prio, uint16_t qdbin, -+ const struct dpaa2_fd *fd); -+ -+/*******************/ -+/* Buffer handling */ -+/*******************/ -+ -+/** -+ * dpaa2_io_service_release() - Release buffers to a buffer pool. -+ * @d: the given DPIO object. -+ * @bpid: the buffer pool id. -+ * @buffers: the buffers to be released. -+ * @num_buffers: the number of the buffers to be released. -+ * -+ * Return 0 for success, and negative error code for failure. -+ */ -+int dpaa2_io_service_release(struct dpaa2_io *d, -+ uint32_t bpid, -+ const uint64_t *buffers, -+ unsigned int num_buffers); -+ -+/** -+ * dpaa2_io_service_acquire() - Acquire buffers from a buffer pool. -+ * @d: the given DPIO object. -+ * @bpid: the buffer pool id. -+ * @buffers: the buffer addresses for acquired buffers. -+ * @num_buffers: the expected number of the buffers to acquire. -+ * -+ * Return a negative error code if the command failed, otherwise it returns -+ * the number of buffers acquired, which may be less than the number requested. -+ * Eg. if the buffer pool is empty, this will return zero. -+ */ -+int dpaa2_io_service_acquire(struct dpaa2_io *d, -+ uint32_t bpid, -+ uint64_t *buffers, -+ unsigned int num_buffers); -+ -+/***************/ -+/* DPIO stores */ -+/***************/ -+ -+/* These are reusable memory blocks for retrieving dequeue results into, and to -+ * assist with parsing those results once they show up. They also hide the -+ * details of how to use "tokens" to make detection of DMA results possible (ie. -+ * comparing memory before the DMA and after it) while minimising the needless -+ * clearing/rewriting of those memory locations between uses. -+ */ -+ -+/** -+ * dpaa2_io_store_create() - Create the dma memory storage for dequeue -+ * result. -+ * @max_frames: the maximum number of dequeued result for frames, must be <= 16. -+ * @dev: the device to allow mapping/unmapping the DMAable region. -+ * -+ * Constructor - max_frames must be <= 16. The user provides the -+ * device struct to allow mapping/unmapping of the DMAable region. Area for -+ * storage will be allocated during create. The size of this storage is -+ * "max_frames*sizeof(struct dpaa2_dq)". The 'dpaa2_io_store' returned is a -+ * wrapper structure allocated within the DPIO code, which owns and manages -+ * allocated store. -+ * -+ * Return dpaa2_io_store struct for successfuly created storage memory, or NULL -+ * if not getting the stroage for dequeue result in create API. -+ */ -+struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames, -+ struct device *dev); -+ -+/** -+ * dpaa2_io_store_destroy() - Destroy the dma memory storage for dequeue -+ * result. -+ * @s: the storage memory to be destroyed. -+ * -+ * Frees to specified storage memory. -+ */ -+void dpaa2_io_store_destroy(struct dpaa2_io_store *s); -+ -+/** -+ * dpaa2_io_store_next() - Determine when the next dequeue result is available. -+ * @s: the dpaa2_io_store object. -+ * @is_last: indicate whether this is the last frame in the pull command. -+ * -+ * Once dpaa2_io_store has been passed to a function that performs dequeues to -+ * it, like dpaa2_ni_rx(), this function can be used to determine when the next -+ * frame result is available. Once this function returns non-NULL, a subsequent -+ * call to it will try to find the *next* dequeue result. -+ * -+ * Note that if a pull-dequeue has a null result because the target FQ/channel -+ * was empty, then this function will return NULL rather than expect the caller -+ * to always check for this on his own side. As such, "is_last" can be used to -+ * differentiate between "end-of-empty-dequeue" and "still-waiting". -+ * -+ * Return dequeue result for a valid dequeue result, or NULL for empty dequeue. -+ */ -+struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last); -+ -+#ifdef CONFIG_FSL_QBMAN_DEBUG -+/** -+ * dpaa2_io_query_fq_count() - Get the frame and byte count for a given fq. -+ * @d: the given DPIO object. -+ * @fqid: the id of frame queue to be queried. -+ * @fcnt: the queried frame count. -+ * @bcnt: the queried byte count. -+ * -+ * Knowing the FQ count at run-time can be useful in debugging situations. -+ * The instantaneous frame- and byte-count are hereby returned. -+ * -+ * Return 0 for a successful query, and negative error code if query fails. -+ */ -+int dpaa2_io_query_fq_count(struct dpaa2_io *d, uint32_t fqid, -+ uint32_t *fcnt, uint32_t *bcnt); -+ -+/** -+ * dpaa2_io_query_bp_count() - Query the number of buffers currenty in a -+ * buffer pool. -+ * @d: the given DPIO object. -+ * @bpid: the index of buffer pool to be queried. -+ * @num: the queried number of buffers in the buffer pool. -+ * -+ * Return 0 for a sucessful query, and negative error code if query fails. -+ */ -+int dpaa2_io_query_bp_count(struct dpaa2_io *d, uint32_t bpid, -+ uint32_t *num); -+#endif -+#endif /* __FSL_DPAA2_IO_H */ -diff --git a/drivers/staging/fsl-mc/include/mc-cmd.h b/drivers/staging/fsl-mc/include/mc-cmd.h -new file mode 100644 -index 0000000..00f0b74 ---- /dev/null -+++ b/drivers/staging/fsl-mc/include/mc-cmd.h -@@ -0,0 +1,133 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_MC_CMD_H -+#define __FSL_MC_CMD_H -+ -+#define MC_CMD_NUM_OF_PARAMS 7 -+ -+#define MAKE_UMASK64(_width) \ -+ ((uint64_t)((_width) < 64 ? ((uint64_t)1 << (_width)) - 1 : \ -+ (uint64_t)-1)) -+ -+static inline uint64_t mc_enc(int lsoffset, int width, uint64_t val) -+{ -+ return (uint64_t)(((uint64_t)val & MAKE_UMASK64(width)) << lsoffset); -+} -+ -+static inline uint64_t mc_dec(uint64_t val, int lsoffset, int width) -+{ -+ return (uint64_t)((val >> lsoffset) & MAKE_UMASK64(width)); -+} -+ -+struct mc_command { -+ uint64_t header; -+ uint64_t params[MC_CMD_NUM_OF_PARAMS]; -+}; -+ -+enum mc_cmd_status { -+ MC_CMD_STATUS_OK = 0x0, /* Completed successfully */ -+ MC_CMD_STATUS_READY = 0x1, /* Ready to be processed */ -+ MC_CMD_STATUS_AUTH_ERR = 0x3, /* Authentication error */ -+ MC_CMD_STATUS_NO_PRIVILEGE = 0x4, /* No privilege */ -+ MC_CMD_STATUS_DMA_ERR = 0x5, /* DMA or I/O error */ -+ MC_CMD_STATUS_CONFIG_ERR = 0x6, /* Configuration error */ -+ MC_CMD_STATUS_TIMEOUT = 0x7, /* Operation timed out */ -+ MC_CMD_STATUS_NO_RESOURCE = 0x8, /* No resources */ -+ MC_CMD_STATUS_NO_MEMORY = 0x9, /* No memory available */ -+ MC_CMD_STATUS_BUSY = 0xA, /* Device is busy */ -+ MC_CMD_STATUS_UNSUPPORTED_OP = 0xB, /* Unsupported operation */ -+ MC_CMD_STATUS_INVALID_STATE = 0xC /* Invalid state */ -+}; -+ -+/* -+ * MC command flags -+ */ -+ -+/* High priority flag */ -+#define MC_CMD_FLAG_PRI 0x00008000 -+/* Command completion flag */ -+#define MC_CMD_FLAG_INTR_DIS 0x01000000 -+ -+/* TODO Remove following two defines after completion of flib 8.0.0 -+integration */ -+#define MC_CMD_PRI_LOW 0 /*!< Low Priority command indication */ -+#define MC_CMD_PRI_HIGH 1 /*!< High Priority command indication */ -+ -+#define MC_CMD_HDR_CMDID_O 52 /* Command ID field offset */ -+#define MC_CMD_HDR_CMDID_S 12 /* Command ID field size */ -+#define MC_CMD_HDR_TOKEN_O 38 /* Token field offset */ -+#define MC_CMD_HDR_TOKEN_S 10 /* Token field size */ -+#define MC_CMD_HDR_STATUS_O 16 /* Status field offset */ -+#define MC_CMD_HDR_STATUS_S 8 /* Status field size*/ -+#define MC_CMD_HDR_FLAGS_O 0 /* Flags field offset */ -+#define MC_CMD_HDR_FLAGS_S 32 /* Flags field size*/ -+#define MC_CMD_HDR_FLAGS_MASK 0xFF00FF00 /* Command flags mask */ -+ -+#define MC_CMD_HDR_READ_STATUS(_hdr) \ -+ ((enum mc_cmd_status)mc_dec((_hdr), \ -+ MC_CMD_HDR_STATUS_O, MC_CMD_HDR_STATUS_S)) -+ -+#define MC_CMD_HDR_READ_TOKEN(_hdr) \ -+ ((uint16_t)mc_dec((_hdr), MC_CMD_HDR_TOKEN_O, MC_CMD_HDR_TOKEN_S)) -+ -+#define MC_CMD_HDR_READ_FLAGS(_hdr) \ -+ ((uint32_t)mc_dec((_hdr), MC_CMD_HDR_FLAGS_O, MC_CMD_HDR_FLAGS_S)) -+ -+#define MC_PREP_OP(_ext, _param, _offset, _width, _type, _arg) \ -+ ((_ext)[_param] |= cpu_to_le64(mc_enc((_offset), (_width), _arg))) -+ -+#define MC_EXT_OP(_ext, _param, _offset, _width, _type, _arg) \ -+ (_arg = (_type)mc_dec(cpu_to_le64(_ext[_param]), (_offset), (_width))) -+ -+#define MC_CMD_OP(_cmd, _param, _offset, _width, _type, _arg) \ -+ ((_cmd).params[_param] |= mc_enc((_offset), (_width), _arg)) -+ -+#define MC_RSP_OP(_cmd, _param, _offset, _width, _type, _arg) \ -+ (_arg = (_type)mc_dec(_cmd.params[_param], (_offset), (_width))) -+ -+static inline uint64_t mc_encode_cmd_header(uint16_t cmd_id, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ uint64_t hdr; -+ -+ hdr = mc_enc(MC_CMD_HDR_CMDID_O, MC_CMD_HDR_CMDID_S, cmd_id); -+ hdr |= mc_enc(MC_CMD_HDR_FLAGS_O, MC_CMD_HDR_FLAGS_S, -+ (cmd_flags & MC_CMD_HDR_FLAGS_MASK)); -+ hdr |= mc_enc(MC_CMD_HDR_TOKEN_O, MC_CMD_HDR_TOKEN_S, token); -+ hdr |= mc_enc(MC_CMD_HDR_STATUS_O, MC_CMD_HDR_STATUS_S, -+ MC_CMD_STATUS_READY); -+ -+ return hdr; -+} -+ -+#endif /* __FSL_MC_CMD_H */ -diff --git a/drivers/staging/fsl-mc/include/mc-private.h b/drivers/staging/fsl-mc/include/mc-private.h -new file mode 100644 -index 0000000..58ed441 ---- /dev/null -+++ b/drivers/staging/fsl-mc/include/mc-private.h -@@ -0,0 +1,168 @@ -+/* -+ * Freescale Management Complex (MC) bus private declarations -+ * -+ * Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * Author: German Rivera -+ * -+ * This file is licensed under the terms of the GNU General Public -+ * License version 2. This program is licensed "as is" without any -+ * warranty of any kind, whether express or implied. -+ */ -+#ifndef _FSL_MC_PRIVATE_H_ -+#define _FSL_MC_PRIVATE_H_ -+ -+#include "../include/mc.h" -+#include -+#include -+ -+#define FSL_MC_DPRC_DRIVER_NAME "fsl_mc_dprc" -+ -+#define FSL_MC_DEVICE_MATCH(_mc_dev, _obj_desc) \ -+ (strcmp((_mc_dev)->obj_desc.type, (_obj_desc)->type) == 0 && \ -+ (_mc_dev)->obj_desc.id == (_obj_desc)->id) -+ -+#define FSL_MC_IS_ALLOCATABLE(_obj_type) \ -+ (strcmp(_obj_type, "dpbp") == 0 || \ -+ strcmp(_obj_type, "dpmcp") == 0 || \ -+ strcmp(_obj_type, "dpcon") == 0) -+ -+/** -+ * Maximum number of total IRQs that can be pre-allocated for an MC bus' -+ * IRQ pool -+ */ -+#define FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS 256 -+ -+/** -+ * Maximum number of extra IRQs pre-reallocated for an MC bus' IRQ pool, -+ * to be used by dynamically created MC objects -+ */ -+#define FSL_MC_IRQ_POOL_MAX_EXTRA_IRQS 64 -+ -+/** -+ * struct fsl_mc - Private data of a "fsl,qoriq-mc" platform device -+ * @root_mc_bus_dev: MC object device representing the root DPRC -+ * @irq_domain: IRQ domain for the fsl-mc bus type -+ * @gic_supported: boolean flag that indicates if the GIC interrupt controller -+ * is supported. -+ * @num_translation_ranges: number of entries in addr_translation_ranges -+ * @addr_translation_ranges: array of bus to system address translation ranges -+ */ -+struct fsl_mc { -+ struct fsl_mc_device *root_mc_bus_dev; -+ struct irq_domain *irq_domain; -+ bool gic_supported; -+ uint8_t num_translation_ranges; -+ struct fsl_mc_addr_translation_range *translation_ranges; -+}; -+ -+/** -+ * enum mc_region_types - Types of MC MMIO regions -+ */ -+enum fsl_mc_region_types { -+ FSL_MC_PORTAL = 0x0, -+ FSL_QBMAN_PORTAL, -+ -+ /* -+ * New offset types must be added above this entry -+ */ -+ FSL_NUM_MC_OFFSET_TYPES -+}; -+ -+/** -+ * struct fsl_mc_addr_translation_range - bus to system address translation -+ * range -+ * @mc_region_type: Type of MC region for the range being translated -+ * @start_mc_offset: Start MC offset of the range being translated -+ * @end_mc_offset: MC offset of the first byte after the range (last MC -+ * offset of the range is end_mc_offset - 1) -+ * @start_phys_addr: system physical address corresponding to start_mc_addr -+ */ -+struct fsl_mc_addr_translation_range { -+ enum fsl_mc_region_types mc_region_type; -+ uint64_t start_mc_offset; -+ uint64_t end_mc_offset; -+ phys_addr_t start_phys_addr; -+}; -+ -+/** -+ * struct fsl_mc_resource_pool - Pool of MC resources of a given -+ * type -+ * @type: type of resources in the pool -+ * @max_count: maximum number of resources in the pool -+ * @free_count: number of free resources in the pool -+ * @mutex: mutex to serialize access to the pool's free list -+ * @free_list: anchor node of list of free resources in the pool -+ * @mc_bus: pointer to the MC bus that owns this resource pool -+ */ -+struct fsl_mc_resource_pool { -+ enum fsl_mc_pool_type type; -+ int16_t max_count; -+ int16_t free_count; -+ struct mutex mutex; /* serializes access to free_list */ -+ struct list_head free_list; -+ struct fsl_mc_bus *mc_bus; -+}; -+ -+/** -+ * struct fsl_mc_bus - logical bus that corresponds to a physical DPRC -+ * @mc_dev: fsl-mc device for the bus device itself. -+ * @resource_pools: array of resource pools (one pool per resource type) -+ * for this MC bus. These resources represent allocatable entities -+ * from the physical DPRC. -+ * @atomic_mc_io: mc_io object to be used to send DPRC commands to the MC -+ * in atomic context (e.g., when programming MSIs in program_msi_at_mc()). -+ * @atomic_dprc_handle: DPRC handle opened using the atomic_mc_io's portal. -+ * @irq_resources: Pointer to array of IRQ objects for the IRQ pool. -+ * @scan_mutex: Serializes bus scanning -+ * @dprc_attr: DPRC attributes -+ */ -+struct fsl_mc_bus { -+ struct fsl_mc_device mc_dev; -+ struct fsl_mc_resource_pool resource_pools[FSL_MC_NUM_POOL_TYPES]; -+ struct fsl_mc_device_irq *irq_resources; -+ struct fsl_mc_io *atomic_mc_io; -+ uint16_t atomic_dprc_handle; -+ struct mutex scan_mutex; /* serializes bus scanning */ -+ struct dprc_attributes dprc_attr; -+}; -+ -+#define to_fsl_mc_bus(_mc_dev) \ -+ container_of(_mc_dev, struct fsl_mc_bus, mc_dev) -+ -+int __must_check fsl_mc_device_add(struct dprc_obj_desc *obj_desc, -+ struct fsl_mc_io *mc_io, -+ struct device *parent_dev, -+ const char *driver_override, -+ struct fsl_mc_device **new_mc_dev); -+ -+void fsl_mc_device_remove(struct fsl_mc_device *mc_dev); -+ -+int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev, -+ const char *driver_override, -+ unsigned int *total_irq_count); -+ -+int __init dprc_driver_init(void); -+ -+void dprc_driver_exit(void); -+ -+int __init fsl_mc_allocator_driver_init(void); -+ -+void __exit fsl_mc_allocator_driver_exit(void); -+ -+int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus, -+ enum fsl_mc_pool_type pool_type, -+ struct fsl_mc_resource -+ **new_resource); -+ -+void fsl_mc_resource_free(struct fsl_mc_resource *resource); -+ -+int __must_check fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus, -+ unsigned int irq_count); -+ -+void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus); -+ -+void dprc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev); -+ -+void dprc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev); -+ -+#endif /* _FSL_MC_PRIVATE_H_ */ -diff --git a/drivers/staging/fsl-mc/include/mc-sys.h b/drivers/staging/fsl-mc/include/mc-sys.h -new file mode 100644 -index 0000000..b08df85 ---- /dev/null -+++ b/drivers/staging/fsl-mc/include/mc-sys.h -@@ -0,0 +1,128 @@ -+/* Copyright 2013-2014 Freescale Semiconductor Inc. -+ * -+ * Interface of the I/O services to send MC commands to the MC hardware -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#ifndef _FSL_MC_SYS_H -+#define _FSL_MC_SYS_H -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/** -+ * Bit masks for a MC I/O object (struct fsl_mc_io) flags -+ */ -+#define FSL_MC_IO_ATOMIC_CONTEXT_PORTAL 0x0001 -+ -+struct fsl_mc_resource; -+struct mc_command; -+ -+/** -+ * struct fsl_mc_io - MC I/O object to be passed-in to mc_send_command() -+ * @dev: device associated with this Mc I/O object -+ * @flags: flags for mc_send_command() -+ * @portal_size: MC command portal size in bytes -+ * @portal_phys_addr: MC command portal physical address -+ * @portal_virt_addr: MC command portal virtual address -+ * @dpmcp_dev: pointer to the DPMCP device associated with the MC portal. -+ * @mc_command_done_irq_armed: Flag indicating that the MC command done IRQ -+ * is currently armed. -+ * @mc_command_done_completion: Completion variable to be signaled when an MC -+ * command sent to the MC fw is completed. -+ * -+ * Fields are only meaningful if the FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is not -+ * set: -+ * @mutex: Mutex to serialize mc_send_command() calls that use the same MC -+ * portal, if the fsl_mc_io object was created with the -+ * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag off. mc_send_command() calls for this -+ * fsl_mc_io object must be made only from non-atomic context. -+ * @mc_command_done_completion: Linux completion variable to be signaled -+ * when a DPMCP command completion interrupts is received. -+ * @mc_command_done_irq_armed: Boolean flag that indicates if interrupts have -+ * been successfully configured for the corresponding DPMCP object. -+ * -+ * Fields are only meaningful if the FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is -+ * set: -+ * @spinlock: Spinlock to serialize mc_send_command() calls that use the same MC -+ * portal, if the fsl_mc_io object was created with the -+ * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag on. mc_send_command() calls for this -+ * fsl_mc_io object can be made from atomic or non-atomic context. -+ */ -+struct fsl_mc_io { -+ struct device *dev; -+ uint16_t flags; -+ uint16_t portal_size; -+ phys_addr_t portal_phys_addr; -+ void __iomem *portal_virt_addr; -+ struct fsl_mc_device *dpmcp_dev; -+ union { -+ /* -+ * These fields are only meaningful if the -+ * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is not set -+ */ -+ struct { -+ struct mutex mutex; /* serializes mc_send_command() */ -+ struct completion mc_command_done_completion; -+ bool mc_command_done_irq_armed; -+ }; -+ -+ /* -+ * This field is only meaningful if the -+ * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is set -+ */ -+ spinlock_t spinlock; /* serializes mc_send_command() */ -+ }; -+}; -+ -+int __must_check fsl_create_mc_io(struct device *dev, -+ phys_addr_t mc_portal_phys_addr, -+ uint32_t mc_portal_size, -+ struct fsl_mc_device *dpmcp_dev, -+ uint32_t flags, struct fsl_mc_io **new_mc_io); -+ -+void fsl_destroy_mc_io(struct fsl_mc_io *mc_io); -+ -+int fsl_mc_io_set_dpmcp(struct fsl_mc_io *mc_io, -+ struct fsl_mc_device *dpmcp_dev); -+ -+void fsl_mc_io_unset_dpmcp(struct fsl_mc_io *mc_io); -+ -+int fsl_mc_io_setup_dpmcp_irq(struct fsl_mc_io *mc_io); -+ -+int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd); -+ -+#endif /* _FSL_MC_SYS_H */ -diff --git a/drivers/staging/fsl-mc/include/mc.h b/drivers/staging/fsl-mc/include/mc.h -new file mode 100644 -index 0000000..bbeb121 ---- /dev/null -+++ b/drivers/staging/fsl-mc/include/mc.h -@@ -0,0 +1,244 @@ -+/* -+ * Freescale Management Complex (MC) bus public interface -+ * -+ * Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * Author: German Rivera -+ * -+ * This file is licensed under the terms of the GNU General Public -+ * License version 2. This program is licensed "as is" without any -+ * warranty of any kind, whether express or implied. -+ */ -+#ifndef _FSL_MC_H_ -+#define _FSL_MC_H_ -+ -+#include -+#include -+#include -+#include -+#include -+#include "../include/dprc.h" -+ -+#define FSL_MC_VENDOR_FREESCALE 0x1957 -+ -+struct fsl_mc_device; -+struct fsl_mc_io; -+ -+/** -+ * struct fsl_mc_driver - MC object device driver object -+ * @driver: Generic device driver -+ * @match_id_table: table of supported device matching Ids -+ * @probe: Function called when a device is added -+ * @remove: Function called when a device is removed -+ * @shutdown: Function called at shutdown time to quiesce the device -+ * @suspend: Function called when a device is stopped -+ * @resume: Function called when a device is resumed -+ * -+ * Generic DPAA device driver object for device drivers that are registered -+ * with a DPRC bus. This structure is to be embedded in each device-specific -+ * driver structure. -+ */ -+struct fsl_mc_driver { -+ struct device_driver driver; -+ const struct fsl_mc_device_match_id *match_id_table; -+ int (*probe)(struct fsl_mc_device *dev); -+ int (*remove)(struct fsl_mc_device *dev); -+ void (*shutdown)(struct fsl_mc_device *dev); -+ int (*suspend)(struct fsl_mc_device *dev, pm_message_t state); -+ int (*resume)(struct fsl_mc_device *dev); -+}; -+ -+#define to_fsl_mc_driver(_drv) \ -+ container_of(_drv, struct fsl_mc_driver, driver) -+ -+/** -+ * struct fsl_mc_device_match_id - MC object device Id entry for driver matching -+ * @vendor: vendor ID -+ * @obj_type: MC object type -+ * @ver_major: MC object version major number -+ * @ver_minor: MC object version minor number -+ * -+ * Type of entries in the "device Id" table for MC object devices supported by -+ * a MC object device driver. The last entry of the table has vendor set to 0x0 -+ */ -+struct fsl_mc_device_match_id { -+ uint16_t vendor; -+ const char obj_type[16]; -+ uint32_t ver_major; -+ uint32_t ver_minor; -+}; -+ -+/** -+ * enum fsl_mc_pool_type - Types of allocatable MC bus resources -+ * -+ * Entries in these enum are used as indices in the array of resource -+ * pools of an fsl_mc_bus object. -+ */ -+enum fsl_mc_pool_type { -+ FSL_MC_POOL_DPMCP = 0x0, /* corresponds to "dpmcp" in the MC */ -+ FSL_MC_POOL_DPBP, /* corresponds to "dpbp" in the MC */ -+ FSL_MC_POOL_DPCON, /* corresponds to "dpcon" in the MC */ -+ FSL_MC_POOL_IRQ, -+ -+ /* -+ * NOTE: New resource pool types must be added before this entry -+ */ -+ FSL_MC_NUM_POOL_TYPES -+}; -+ -+/** -+ * struct fsl_mc_resource - MC generic resource -+ * @type: type of resource -+ * @id: unique MC resource Id within the resources of the same type -+ * @data: pointer to resource-specific data if the resource is currently -+ * allocated, or NULL if the resource is not currently allocated. -+ * @parent_pool: pointer to the parent resource pool from which this -+ * resource is allocated from. -+ * @node: Node in the free list of the corresponding resource pool -+ * -+ * NOTE: This structure is to be embedded as a field of specific -+ * MC resource structures. -+ */ -+struct fsl_mc_resource { -+ enum fsl_mc_pool_type type; -+ int32_t id; -+ void *data; -+ struct fsl_mc_resource_pool *parent_pool; -+ struct list_head node; -+}; -+ -+/** -+ * struct fsl_mc_device_irq - MC object device message-based interrupt -+ * @msi_paddr: message-based interrupt physical address -+ * @msi_value: message-based interrupt data value -+ * @irq_number: Linux IRQ number assigned to the interrupt -+ * @mc_dev: MC object device that owns this interrupt -+ * @dev_irq_index: device-relative IRQ index -+ * @resource: MC generic resource associated with the interrupt -+ */ -+struct fsl_mc_device_irq { -+ phys_addr_t msi_paddr; -+ uint32_t msi_value; -+ uint32_t irq_number; -+ struct fsl_mc_device *mc_dev; -+ uint8_t dev_irq_index; -+ struct fsl_mc_resource resource; -+}; -+ -+#define to_fsl_mc_irq(_mc_resource) \ -+ container_of(_mc_resource, struct fsl_mc_device_irq, resource) -+ -+/** -+ * Bit masks for a MC object device (struct fsl_mc_device) flags -+ */ -+#define FSL_MC_IS_DPRC 0x0001 -+ -+/** -+ * root dprc's parent is a platform device -+ * that platform device's bus type is platform_bus_type. -+ */ -+#define is_root_dprc(dev) \ -+ ((to_fsl_mc_device(dev)->flags & FSL_MC_IS_DPRC) && \ -+ ((dev)->bus == &fsl_mc_bus_type) && \ -+ ((dev)->parent->bus == &platform_bus_type)) -+ -+/** -+ * Default DMA mask for devices on a fsl-mc bus -+ */ -+#define FSL_MC_DEFAULT_DMA_MASK (~0ULL) -+ -+/** -+ * struct fsl_mc_device - MC object device object -+ * @dev: Linux driver model device object -+ * @dma_mask: Default DMA mask -+ * @flags: MC object device flags -+ * @icid: Isolation context ID for the device -+ * @mc_handle: MC handle for the corresponding MC object opened -+ * @mc_io: Pointer to MC IO object assigned to this device or -+ * NULL if none. -+ * @obj_desc: MC description of the DPAA device -+ * @regions: pointer to array of MMIO region entries -+ * @irqs: pointer to array of pointers to interrupts allocated to this device -+ * @resource: generic resource associated with this MC object device, if any. -+ * @driver_override: Driver name to force a match -+ * -+ * Generic device object for MC object devices that are "attached" to a -+ * MC bus. -+ * -+ * NOTES: -+ * - For a non-DPRC object its icid is the same as its parent DPRC's icid. -+ * - The SMMU notifier callback gets invoked after device_add() has been -+ * called for an MC object device, but before the device-specific probe -+ * callback gets called. -+ * - DP_OBJ_DPRC objects are the only MC objects that have built-in MC -+ * portals. For all other MC objects, their device drivers are responsible for -+ * allocating MC portals for them by calling fsl_mc_portal_allocate(). -+ * - Some types of MC objects (e.g., DP_OBJ_DPBP, DP_OBJ_DPCON) are -+ * treated as resources that can be allocated/deallocated from the -+ * corresponding resource pool in the object's parent DPRC, using the -+ * fsl_mc_object_allocate()/fsl_mc_object_free() functions. These MC objects -+ * are known as "allocatable" objects. For them, the corresponding -+ * fsl_mc_device's 'resource' points to the associated resource object. -+ * For MC objects that are not allocatable (e.g., DP_OBJ_DPRC, DP_OBJ_DPNI), -+ * 'resource' is NULL. -+ */ -+struct fsl_mc_device { -+ struct device dev; -+ uint64_t dma_mask; -+ uint16_t flags; -+ uint16_t icid; -+ uint16_t mc_handle; -+ struct fsl_mc_io *mc_io; -+ struct dprc_obj_desc obj_desc; -+ struct resource *regions; -+ struct fsl_mc_device_irq **irqs; -+ struct fsl_mc_resource *resource; -+ const char *driver_override; -+}; -+ -+#define to_fsl_mc_device(_dev) \ -+ container_of(_dev, struct fsl_mc_device, dev) -+ -+/* -+ * module_fsl_mc_driver() - Helper macro for drivers that don't do -+ * anything special in module init/exit. This eliminates a lot of -+ * boilerplate. Each module may only use this macro once, and -+ * calling it replaces module_init() and module_exit() -+ */ -+#define module_fsl_mc_driver(__fsl_mc_driver) \ -+ module_driver(__fsl_mc_driver, fsl_mc_driver_register, \ -+ fsl_mc_driver_unregister) -+ -+/* -+ * Macro to avoid include chaining to get THIS_MODULE -+ */ -+#define fsl_mc_driver_register(drv) \ -+ __fsl_mc_driver_register(drv, THIS_MODULE) -+ -+int __must_check __fsl_mc_driver_register(struct fsl_mc_driver *fsl_mc_driver, -+ struct module *owner); -+ -+void fsl_mc_driver_unregister(struct fsl_mc_driver *driver); -+ -+bool fsl_mc_interrupts_supported(void); -+ -+int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev, -+ uint16_t mc_io_flags, -+ struct fsl_mc_io **new_mc_io); -+ -+void fsl_mc_portal_free(struct fsl_mc_io *mc_io); -+ -+int fsl_mc_portal_reset(struct fsl_mc_io *mc_io); -+ -+int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev, -+ enum fsl_mc_pool_type pool_type, -+ struct fsl_mc_device **new_mc_adev); -+ -+void fsl_mc_object_free(struct fsl_mc_device *mc_adev); -+ -+int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev); -+ -+void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev); -+ -+extern struct bus_type fsl_mc_bus_type; -+ -+#endif /* _FSL_MC_H_ */ -diff --git a/drivers/staging/fsl-mc/include/net.h b/drivers/staging/fsl-mc/include/net.h -new file mode 100644 -index 0000000..7480f6a ---- /dev/null -+++ b/drivers/staging/fsl-mc/include/net.h -@@ -0,0 +1,481 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_NET_H -+#define __FSL_NET_H -+ -+#define LAST_HDR_INDEX 0xFFFFFFFF -+ -+/*****************************************************************************/ -+/* Protocol fields */ -+/*****************************************************************************/ -+ -+/************************* Ethernet fields *********************************/ -+#define NH_FLD_ETH_DA (1) -+#define NH_FLD_ETH_SA (NH_FLD_ETH_DA << 1) -+#define NH_FLD_ETH_LENGTH (NH_FLD_ETH_DA << 2) -+#define NH_FLD_ETH_TYPE (NH_FLD_ETH_DA << 3) -+#define NH_FLD_ETH_FINAL_CKSUM (NH_FLD_ETH_DA << 4) -+#define NH_FLD_ETH_PADDING (NH_FLD_ETH_DA << 5) -+#define NH_FLD_ETH_ALL_FIELDS ((NH_FLD_ETH_DA << 6) - 1) -+ -+#define NH_FLD_ETH_ADDR_SIZE 6 -+ -+/*************************** VLAN fields ***********************************/ -+#define NH_FLD_VLAN_VPRI (1) -+#define NH_FLD_VLAN_CFI (NH_FLD_VLAN_VPRI << 1) -+#define NH_FLD_VLAN_VID (NH_FLD_VLAN_VPRI << 2) -+#define NH_FLD_VLAN_LENGTH (NH_FLD_VLAN_VPRI << 3) -+#define NH_FLD_VLAN_TYPE (NH_FLD_VLAN_VPRI << 4) -+#define NH_FLD_VLAN_ALL_FIELDS ((NH_FLD_VLAN_VPRI << 5) - 1) -+ -+#define NH_FLD_VLAN_TCI (NH_FLD_VLAN_VPRI | \ -+ NH_FLD_VLAN_CFI | \ -+ NH_FLD_VLAN_VID) -+ -+/************************ IP (generic) fields ******************************/ -+#define NH_FLD_IP_VER (1) -+#define NH_FLD_IP_DSCP (NH_FLD_IP_VER << 2) -+#define NH_FLD_IP_ECN (NH_FLD_IP_VER << 3) -+#define NH_FLD_IP_PROTO (NH_FLD_IP_VER << 4) -+#define NH_FLD_IP_SRC (NH_FLD_IP_VER << 5) -+#define NH_FLD_IP_DST (NH_FLD_IP_VER << 6) -+#define NH_FLD_IP_TOS_TC (NH_FLD_IP_VER << 7) -+#define NH_FLD_IP_ID (NH_FLD_IP_VER << 8) -+#define NH_FLD_IP_ALL_FIELDS ((NH_FLD_IP_VER << 9) - 1) -+ -+#define NH_FLD_IP_PROTO_SIZE 1 -+ -+/***************************** IPV4 fields *********************************/ -+#define NH_FLD_IPV4_VER (1) -+#define NH_FLD_IPV4_HDR_LEN (NH_FLD_IPV4_VER << 1) -+#define NH_FLD_IPV4_TOS (NH_FLD_IPV4_VER << 2) -+#define NH_FLD_IPV4_TOTAL_LEN (NH_FLD_IPV4_VER << 3) -+#define NH_FLD_IPV4_ID (NH_FLD_IPV4_VER << 4) -+#define NH_FLD_IPV4_FLAG_D (NH_FLD_IPV4_VER << 5) -+#define NH_FLD_IPV4_FLAG_M (NH_FLD_IPV4_VER << 6) -+#define NH_FLD_IPV4_OFFSET (NH_FLD_IPV4_VER << 7) -+#define NH_FLD_IPV4_TTL (NH_FLD_IPV4_VER << 8) -+#define NH_FLD_IPV4_PROTO (NH_FLD_IPV4_VER << 9) -+#define NH_FLD_IPV4_CKSUM (NH_FLD_IPV4_VER << 10) -+#define NH_FLD_IPV4_SRC_IP (NH_FLD_IPV4_VER << 11) -+#define NH_FLD_IPV4_DST_IP (NH_FLD_IPV4_VER << 12) -+#define NH_FLD_IPV4_OPTS (NH_FLD_IPV4_VER << 13) -+#define NH_FLD_IPV4_OPTS_COUNT (NH_FLD_IPV4_VER << 14) -+#define NH_FLD_IPV4_ALL_FIELDS ((NH_FLD_IPV4_VER << 15) - 1) -+ -+#define NH_FLD_IPV4_ADDR_SIZE 4 -+#define NH_FLD_IPV4_PROTO_SIZE 1 -+ -+/***************************** IPV6 fields *********************************/ -+#define NH_FLD_IPV6_VER (1) -+#define NH_FLD_IPV6_TC (NH_FLD_IPV6_VER << 1) -+#define NH_FLD_IPV6_SRC_IP (NH_FLD_IPV6_VER << 2) -+#define NH_FLD_IPV6_DST_IP (NH_FLD_IPV6_VER << 3) -+#define NH_FLD_IPV6_NEXT_HDR (NH_FLD_IPV6_VER << 4) -+#define NH_FLD_IPV6_FL (NH_FLD_IPV6_VER << 5) -+#define NH_FLD_IPV6_HOP_LIMIT (NH_FLD_IPV6_VER << 6) -+#define NH_FLD_IPV6_ID (NH_FLD_IPV6_VER << 7) -+#define NH_FLD_IPV6_ALL_FIELDS ((NH_FLD_IPV6_VER << 8) - 1) -+ -+#define NH_FLD_IPV6_ADDR_SIZE 16 -+#define NH_FLD_IPV6_NEXT_HDR_SIZE 1 -+ -+/***************************** ICMP fields *********************************/ -+#define NH_FLD_ICMP_TYPE (1) -+#define NH_FLD_ICMP_CODE (NH_FLD_ICMP_TYPE << 1) -+#define NH_FLD_ICMP_CKSUM (NH_FLD_ICMP_TYPE << 2) -+#define NH_FLD_ICMP_ID (NH_FLD_ICMP_TYPE << 3) -+#define NH_FLD_ICMP_SQ_NUM (NH_FLD_ICMP_TYPE << 4) -+#define NH_FLD_ICMP_ALL_FIELDS ((NH_FLD_ICMP_TYPE << 5) - 1) -+ -+#define NH_FLD_ICMP_CODE_SIZE 1 -+#define NH_FLD_ICMP_TYPE_SIZE 1 -+ -+/***************************** IGMP fields *********************************/ -+#define NH_FLD_IGMP_VERSION (1) -+#define NH_FLD_IGMP_TYPE (NH_FLD_IGMP_VERSION << 1) -+#define NH_FLD_IGMP_CKSUM (NH_FLD_IGMP_VERSION << 2) -+#define NH_FLD_IGMP_DATA (NH_FLD_IGMP_VERSION << 3) -+#define NH_FLD_IGMP_ALL_FIELDS ((NH_FLD_IGMP_VERSION << 4) - 1) -+ -+/***************************** TCP fields **********************************/ -+#define NH_FLD_TCP_PORT_SRC (1) -+#define NH_FLD_TCP_PORT_DST (NH_FLD_TCP_PORT_SRC << 1) -+#define NH_FLD_TCP_SEQ (NH_FLD_TCP_PORT_SRC << 2) -+#define NH_FLD_TCP_ACK (NH_FLD_TCP_PORT_SRC << 3) -+#define NH_FLD_TCP_OFFSET (NH_FLD_TCP_PORT_SRC << 4) -+#define NH_FLD_TCP_FLAGS (NH_FLD_TCP_PORT_SRC << 5) -+#define NH_FLD_TCP_WINDOW (NH_FLD_TCP_PORT_SRC << 6) -+#define NH_FLD_TCP_CKSUM (NH_FLD_TCP_PORT_SRC << 7) -+#define NH_FLD_TCP_URGPTR (NH_FLD_TCP_PORT_SRC << 8) -+#define NH_FLD_TCP_OPTS (NH_FLD_TCP_PORT_SRC << 9) -+#define NH_FLD_TCP_OPTS_COUNT (NH_FLD_TCP_PORT_SRC << 10) -+#define NH_FLD_TCP_ALL_FIELDS ((NH_FLD_TCP_PORT_SRC << 11) - 1) -+ -+#define NH_FLD_TCP_PORT_SIZE 2 -+ -+/***************************** UDP fields **********************************/ -+#define NH_FLD_UDP_PORT_SRC (1) -+#define NH_FLD_UDP_PORT_DST (NH_FLD_UDP_PORT_SRC << 1) -+#define NH_FLD_UDP_LEN (NH_FLD_UDP_PORT_SRC << 2) -+#define NH_FLD_UDP_CKSUM (NH_FLD_UDP_PORT_SRC << 3) -+#define NH_FLD_UDP_ALL_FIELDS ((NH_FLD_UDP_PORT_SRC << 4) - 1) -+ -+#define NH_FLD_UDP_PORT_SIZE 2 -+ -+/*************************** UDP-lite fields *******************************/ -+#define NH_FLD_UDP_LITE_PORT_SRC (1) -+#define NH_FLD_UDP_LITE_PORT_DST (NH_FLD_UDP_LITE_PORT_SRC << 1) -+#define NH_FLD_UDP_LITE_ALL_FIELDS \ -+ ((NH_FLD_UDP_LITE_PORT_SRC << 2) - 1) -+ -+#define NH_FLD_UDP_LITE_PORT_SIZE 2 -+ -+/*************************** UDP-encap-ESP fields **************************/ -+#define NH_FLD_UDP_ENC_ESP_PORT_SRC (1) -+#define NH_FLD_UDP_ENC_ESP_PORT_DST (NH_FLD_UDP_ENC_ESP_PORT_SRC << 1) -+#define NH_FLD_UDP_ENC_ESP_LEN (NH_FLD_UDP_ENC_ESP_PORT_SRC << 2) -+#define NH_FLD_UDP_ENC_ESP_CKSUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 3) -+#define NH_FLD_UDP_ENC_ESP_SPI (NH_FLD_UDP_ENC_ESP_PORT_SRC << 4) -+#define NH_FLD_UDP_ENC_ESP_SEQUENCE_NUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 5) -+#define NH_FLD_UDP_ENC_ESP_ALL_FIELDS \ -+ ((NH_FLD_UDP_ENC_ESP_PORT_SRC << 6) - 1) -+ -+#define NH_FLD_UDP_ENC_ESP_PORT_SIZE 2 -+#define NH_FLD_UDP_ENC_ESP_SPI_SIZE 4 -+ -+/***************************** SCTP fields *********************************/ -+#define NH_FLD_SCTP_PORT_SRC (1) -+#define NH_FLD_SCTP_PORT_DST (NH_FLD_SCTP_PORT_SRC << 1) -+#define NH_FLD_SCTP_VER_TAG (NH_FLD_SCTP_PORT_SRC << 2) -+#define NH_FLD_SCTP_CKSUM (NH_FLD_SCTP_PORT_SRC << 3) -+#define NH_FLD_SCTP_ALL_FIELDS ((NH_FLD_SCTP_PORT_SRC << 4) - 1) -+ -+#define NH_FLD_SCTP_PORT_SIZE 2 -+ -+/***************************** DCCP fields *********************************/ -+#define NH_FLD_DCCP_PORT_SRC (1) -+#define NH_FLD_DCCP_PORT_DST (NH_FLD_DCCP_PORT_SRC << 1) -+#define NH_FLD_DCCP_ALL_FIELDS ((NH_FLD_DCCP_PORT_SRC << 2) - 1) -+ -+#define NH_FLD_DCCP_PORT_SIZE 2 -+ -+/***************************** IPHC fields *********************************/ -+#define NH_FLD_IPHC_CID (1) -+#define NH_FLD_IPHC_CID_TYPE (NH_FLD_IPHC_CID << 1) -+#define NH_FLD_IPHC_HCINDEX (NH_FLD_IPHC_CID << 2) -+#define NH_FLD_IPHC_GEN (NH_FLD_IPHC_CID << 3) -+#define NH_FLD_IPHC_D_BIT (NH_FLD_IPHC_CID << 4) -+#define NH_FLD_IPHC_ALL_FIELDS ((NH_FLD_IPHC_CID << 5) - 1) -+ -+/***************************** SCTP fields *********************************/ -+#define NH_FLD_SCTP_CHUNK_DATA_TYPE (1) -+#define NH_FLD_SCTP_CHUNK_DATA_FLAGS (NH_FLD_SCTP_CHUNK_DATA_TYPE << 1) -+#define NH_FLD_SCTP_CHUNK_DATA_LENGTH (NH_FLD_SCTP_CHUNK_DATA_TYPE << 2) -+#define NH_FLD_SCTP_CHUNK_DATA_TSN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 3) -+#define NH_FLD_SCTP_CHUNK_DATA_STREAM_ID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 4) -+#define NH_FLD_SCTP_CHUNK_DATA_STREAM_SQN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 5) -+#define NH_FLD_SCTP_CHUNK_DATA_PAYLOAD_PID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 6) -+#define NH_FLD_SCTP_CHUNK_DATA_UNORDERED (NH_FLD_SCTP_CHUNK_DATA_TYPE << 7) -+#define NH_FLD_SCTP_CHUNK_DATA_BEGGINING (NH_FLD_SCTP_CHUNK_DATA_TYPE << 8) -+#define NH_FLD_SCTP_CHUNK_DATA_END (NH_FLD_SCTP_CHUNK_DATA_TYPE << 9) -+#define NH_FLD_SCTP_CHUNK_DATA_ALL_FIELDS \ -+ ((NH_FLD_SCTP_CHUNK_DATA_TYPE << 10) - 1) -+ -+/*************************** L2TPV2 fields *********************************/ -+#define NH_FLD_L2TPV2_TYPE_BIT (1) -+#define NH_FLD_L2TPV2_LENGTH_BIT (NH_FLD_L2TPV2_TYPE_BIT << 1) -+#define NH_FLD_L2TPV2_SEQUENCE_BIT (NH_FLD_L2TPV2_TYPE_BIT << 2) -+#define NH_FLD_L2TPV2_OFFSET_BIT (NH_FLD_L2TPV2_TYPE_BIT << 3) -+#define NH_FLD_L2TPV2_PRIORITY_BIT (NH_FLD_L2TPV2_TYPE_BIT << 4) -+#define NH_FLD_L2TPV2_VERSION (NH_FLD_L2TPV2_TYPE_BIT << 5) -+#define NH_FLD_L2TPV2_LEN (NH_FLD_L2TPV2_TYPE_BIT << 6) -+#define NH_FLD_L2TPV2_TUNNEL_ID (NH_FLD_L2TPV2_TYPE_BIT << 7) -+#define NH_FLD_L2TPV2_SESSION_ID (NH_FLD_L2TPV2_TYPE_BIT << 8) -+#define NH_FLD_L2TPV2_NS (NH_FLD_L2TPV2_TYPE_BIT << 9) -+#define NH_FLD_L2TPV2_NR (NH_FLD_L2TPV2_TYPE_BIT << 10) -+#define NH_FLD_L2TPV2_OFFSET_SIZE (NH_FLD_L2TPV2_TYPE_BIT << 11) -+#define NH_FLD_L2TPV2_FIRST_BYTE (NH_FLD_L2TPV2_TYPE_BIT << 12) -+#define NH_FLD_L2TPV2_ALL_FIELDS \ -+ ((NH_FLD_L2TPV2_TYPE_BIT << 13) - 1) -+ -+/*************************** L2TPV3 fields *********************************/ -+#define NH_FLD_L2TPV3_CTRL_TYPE_BIT (1) -+#define NH_FLD_L2TPV3_CTRL_LENGTH_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 1) -+#define NH_FLD_L2TPV3_CTRL_SEQUENCE_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 2) -+#define NH_FLD_L2TPV3_CTRL_VERSION (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 3) -+#define NH_FLD_L2TPV3_CTRL_LENGTH (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 4) -+#define NH_FLD_L2TPV3_CTRL_CONTROL (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 5) -+#define NH_FLD_L2TPV3_CTRL_SENT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 6) -+#define NH_FLD_L2TPV3_CTRL_RECV (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 7) -+#define NH_FLD_L2TPV3_CTRL_FIRST_BYTE (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 8) -+#define NH_FLD_L2TPV3_CTRL_ALL_FIELDS \ -+ ((NH_FLD_L2TPV3_CTRL_TYPE_BIT << 9) - 1) -+ -+#define NH_FLD_L2TPV3_SESS_TYPE_BIT (1) -+#define NH_FLD_L2TPV3_SESS_VERSION (NH_FLD_L2TPV3_SESS_TYPE_BIT << 1) -+#define NH_FLD_L2TPV3_SESS_ID (NH_FLD_L2TPV3_SESS_TYPE_BIT << 2) -+#define NH_FLD_L2TPV3_SESS_COOKIE (NH_FLD_L2TPV3_SESS_TYPE_BIT << 3) -+#define NH_FLD_L2TPV3_SESS_ALL_FIELDS \ -+ ((NH_FLD_L2TPV3_SESS_TYPE_BIT << 4) - 1) -+ -+/**************************** PPP fields ***********************************/ -+#define NH_FLD_PPP_PID (1) -+#define NH_FLD_PPP_COMPRESSED (NH_FLD_PPP_PID << 1) -+#define NH_FLD_PPP_ALL_FIELDS ((NH_FLD_PPP_PID << 2) - 1) -+ -+/************************** PPPoE fields ***********************************/ -+#define NH_FLD_PPPOE_VER (1) -+#define NH_FLD_PPPOE_TYPE (NH_FLD_PPPOE_VER << 1) -+#define NH_FLD_PPPOE_CODE (NH_FLD_PPPOE_VER << 2) -+#define NH_FLD_PPPOE_SID (NH_FLD_PPPOE_VER << 3) -+#define NH_FLD_PPPOE_LEN (NH_FLD_PPPOE_VER << 4) -+#define NH_FLD_PPPOE_SESSION (NH_FLD_PPPOE_VER << 5) -+#define NH_FLD_PPPOE_PID (NH_FLD_PPPOE_VER << 6) -+#define NH_FLD_PPPOE_ALL_FIELDS ((NH_FLD_PPPOE_VER << 7) - 1) -+ -+/************************* PPP-Mux fields **********************************/ -+#define NH_FLD_PPPMUX_PID (1) -+#define NH_FLD_PPPMUX_CKSUM (NH_FLD_PPPMUX_PID << 1) -+#define NH_FLD_PPPMUX_COMPRESSED (NH_FLD_PPPMUX_PID << 2) -+#define NH_FLD_PPPMUX_ALL_FIELDS ((NH_FLD_PPPMUX_PID << 3) - 1) -+ -+/*********************** PPP-Mux sub-frame fields **************************/ -+#define NH_FLD_PPPMUX_SUBFRM_PFF (1) -+#define NH_FLD_PPPMUX_SUBFRM_LXT (NH_FLD_PPPMUX_SUBFRM_PFF << 1) -+#define NH_FLD_PPPMUX_SUBFRM_LEN (NH_FLD_PPPMUX_SUBFRM_PFF << 2) -+#define NH_FLD_PPPMUX_SUBFRM_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 3) -+#define NH_FLD_PPPMUX_SUBFRM_USE_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 4) -+#define NH_FLD_PPPMUX_SUBFRM_ALL_FIELDS \ -+ ((NH_FLD_PPPMUX_SUBFRM_PFF << 5) - 1) -+ -+/*************************** LLC fields ************************************/ -+#define NH_FLD_LLC_DSAP (1) -+#define NH_FLD_LLC_SSAP (NH_FLD_LLC_DSAP << 1) -+#define NH_FLD_LLC_CTRL (NH_FLD_LLC_DSAP << 2) -+#define NH_FLD_LLC_ALL_FIELDS ((NH_FLD_LLC_DSAP << 3) - 1) -+ -+/*************************** NLPID fields **********************************/ -+#define NH_FLD_NLPID_NLPID (1) -+#define NH_FLD_NLPID_ALL_FIELDS ((NH_FLD_NLPID_NLPID << 1) - 1) -+ -+/*************************** SNAP fields ***********************************/ -+#define NH_FLD_SNAP_OUI (1) -+#define NH_FLD_SNAP_PID (NH_FLD_SNAP_OUI << 1) -+#define NH_FLD_SNAP_ALL_FIELDS ((NH_FLD_SNAP_OUI << 2) - 1) -+ -+/*************************** LLC SNAP fields *******************************/ -+#define NH_FLD_LLC_SNAP_TYPE (1) -+#define NH_FLD_LLC_SNAP_ALL_FIELDS ((NH_FLD_LLC_SNAP_TYPE << 1) - 1) -+ -+#define NH_FLD_ARP_HTYPE (1) -+#define NH_FLD_ARP_PTYPE (NH_FLD_ARP_HTYPE << 1) -+#define NH_FLD_ARP_HLEN (NH_FLD_ARP_HTYPE << 2) -+#define NH_FLD_ARP_PLEN (NH_FLD_ARP_HTYPE << 3) -+#define NH_FLD_ARP_OPER (NH_FLD_ARP_HTYPE << 4) -+#define NH_FLD_ARP_SHA (NH_FLD_ARP_HTYPE << 5) -+#define NH_FLD_ARP_SPA (NH_FLD_ARP_HTYPE << 6) -+#define NH_FLD_ARP_THA (NH_FLD_ARP_HTYPE << 7) -+#define NH_FLD_ARP_TPA (NH_FLD_ARP_HTYPE << 8) -+#define NH_FLD_ARP_ALL_FIELDS ((NH_FLD_ARP_HTYPE << 9) - 1) -+ -+/*************************** RFC2684 fields ********************************/ -+#define NH_FLD_RFC2684_LLC (1) -+#define NH_FLD_RFC2684_NLPID (NH_FLD_RFC2684_LLC << 1) -+#define NH_FLD_RFC2684_OUI (NH_FLD_RFC2684_LLC << 2) -+#define NH_FLD_RFC2684_PID (NH_FLD_RFC2684_LLC << 3) -+#define NH_FLD_RFC2684_VPN_OUI (NH_FLD_RFC2684_LLC << 4) -+#define NH_FLD_RFC2684_VPN_IDX (NH_FLD_RFC2684_LLC << 5) -+#define NH_FLD_RFC2684_ALL_FIELDS ((NH_FLD_RFC2684_LLC << 6) - 1) -+ -+/*************************** User defined fields ***************************/ -+#define NH_FLD_USER_DEFINED_SRCPORT (1) -+#define NH_FLD_USER_DEFINED_PCDID (NH_FLD_USER_DEFINED_SRCPORT << 1) -+#define NH_FLD_USER_DEFINED_ALL_FIELDS \ -+ ((NH_FLD_USER_DEFINED_SRCPORT << 2) - 1) -+ -+/*************************** Payload fields ********************************/ -+#define NH_FLD_PAYLOAD_BUFFER (1) -+#define NH_FLD_PAYLOAD_SIZE (NH_FLD_PAYLOAD_BUFFER << 1) -+#define NH_FLD_MAX_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 2) -+#define NH_FLD_MIN_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 3) -+#define NH_FLD_PAYLOAD_TYPE (NH_FLD_PAYLOAD_BUFFER << 4) -+#define NH_FLD_FRAME_SIZE (NH_FLD_PAYLOAD_BUFFER << 5) -+#define NH_FLD_PAYLOAD_ALL_FIELDS ((NH_FLD_PAYLOAD_BUFFER << 6) - 1) -+ -+/*************************** GRE fields ************************************/ -+#define NH_FLD_GRE_TYPE (1) -+#define NH_FLD_GRE_ALL_FIELDS ((NH_FLD_GRE_TYPE << 1) - 1) -+ -+/*************************** MINENCAP fields *******************************/ -+#define NH_FLD_MINENCAP_SRC_IP (1) -+#define NH_FLD_MINENCAP_DST_IP (NH_FLD_MINENCAP_SRC_IP << 1) -+#define NH_FLD_MINENCAP_TYPE (NH_FLD_MINENCAP_SRC_IP << 2) -+#define NH_FLD_MINENCAP_ALL_FIELDS \ -+ ((NH_FLD_MINENCAP_SRC_IP << 3) - 1) -+ -+/*************************** IPSEC AH fields *******************************/ -+#define NH_FLD_IPSEC_AH_SPI (1) -+#define NH_FLD_IPSEC_AH_NH (NH_FLD_IPSEC_AH_SPI << 1) -+#define NH_FLD_IPSEC_AH_ALL_FIELDS ((NH_FLD_IPSEC_AH_SPI << 2) - 1) -+ -+/*************************** IPSEC ESP fields ******************************/ -+#define NH_FLD_IPSEC_ESP_SPI (1) -+#define NH_FLD_IPSEC_ESP_SEQUENCE_NUM (NH_FLD_IPSEC_ESP_SPI << 1) -+#define NH_FLD_IPSEC_ESP_ALL_FIELDS ((NH_FLD_IPSEC_ESP_SPI << 2) - 1) -+ -+#define NH_FLD_IPSEC_ESP_SPI_SIZE 4 -+ -+/*************************** MPLS fields ***********************************/ -+#define NH_FLD_MPLS_LABEL_STACK (1) -+#define NH_FLD_MPLS_LABEL_STACK_ALL_FIELDS \ -+ ((NH_FLD_MPLS_LABEL_STACK << 1) - 1) -+ -+/*************************** MACSEC fields *********************************/ -+#define NH_FLD_MACSEC_SECTAG (1) -+#define NH_FLD_MACSEC_ALL_FIELDS ((NH_FLD_MACSEC_SECTAG << 1) - 1) -+ -+/*************************** GTP fields ************************************/ -+#define NH_FLD_GTP_TEID (1) -+ -+ -+/* Protocol options */ -+ -+/* Ethernet options */ -+#define NH_OPT_ETH_BROADCAST 1 -+#define NH_OPT_ETH_MULTICAST 2 -+#define NH_OPT_ETH_UNICAST 3 -+#define NH_OPT_ETH_BPDU 4 -+ -+#define NH_ETH_IS_MULTICAST_ADDR(addr) (addr[0] & 0x01) -+/* also applicable for broadcast */ -+ -+/* VLAN options */ -+#define NH_OPT_VLAN_CFI 1 -+ -+/* IPV4 options */ -+#define NH_OPT_IPV4_UNICAST 1 -+#define NH_OPT_IPV4_MULTICAST 2 -+#define NH_OPT_IPV4_BROADCAST 3 -+#define NH_OPT_IPV4_OPTION 4 -+#define NH_OPT_IPV4_FRAG 5 -+#define NH_OPT_IPV4_INITIAL_FRAG 6 -+ -+/* IPV6 options */ -+#define NH_OPT_IPV6_UNICAST 1 -+#define NH_OPT_IPV6_MULTICAST 2 -+#define NH_OPT_IPV6_OPTION 3 -+#define NH_OPT_IPV6_FRAG 4 -+#define NH_OPT_IPV6_INITIAL_FRAG 5 -+ -+/* General IP options (may be used for any version) */ -+#define NH_OPT_IP_FRAG 1 -+#define NH_OPT_IP_INITIAL_FRAG 2 -+#define NH_OPT_IP_OPTION 3 -+ -+/* Minenc. options */ -+#define NH_OPT_MINENCAP_SRC_ADDR_PRESENT 1 -+ -+/* GRE. options */ -+#define NH_OPT_GRE_ROUTING_PRESENT 1 -+ -+/* TCP options */ -+#define NH_OPT_TCP_OPTIONS 1 -+#define NH_OPT_TCP_CONTROL_HIGH_BITS 2 -+#define NH_OPT_TCP_CONTROL_LOW_BITS 3 -+ -+/* CAPWAP options */ -+#define NH_OPT_CAPWAP_DTLS 1 -+ -+enum net_prot { -+ NET_PROT_NONE = 0, -+ NET_PROT_PAYLOAD, -+ NET_PROT_ETH, -+ NET_PROT_VLAN, -+ NET_PROT_IPV4, -+ NET_PROT_IPV6, -+ NET_PROT_IP, -+ NET_PROT_TCP, -+ NET_PROT_UDP, -+ NET_PROT_UDP_LITE, -+ NET_PROT_IPHC, -+ NET_PROT_SCTP, -+ NET_PROT_SCTP_CHUNK_DATA, -+ NET_PROT_PPPOE, -+ NET_PROT_PPP, -+ NET_PROT_PPPMUX, -+ NET_PROT_PPPMUX_SUBFRM, -+ NET_PROT_L2TPV2, -+ NET_PROT_L2TPV3_CTRL, -+ NET_PROT_L2TPV3_SESS, -+ NET_PROT_LLC, -+ NET_PROT_LLC_SNAP, -+ NET_PROT_NLPID, -+ NET_PROT_SNAP, -+ NET_PROT_MPLS, -+ NET_PROT_IPSEC_AH, -+ NET_PROT_IPSEC_ESP, -+ NET_PROT_UDP_ENC_ESP, /* RFC 3948 */ -+ NET_PROT_MACSEC, -+ NET_PROT_GRE, -+ NET_PROT_MINENCAP, -+ NET_PROT_DCCP, -+ NET_PROT_ICMP, -+ NET_PROT_IGMP, -+ NET_PROT_ARP, -+ NET_PROT_CAPWAP_DATA, -+ NET_PROT_CAPWAP_CTRL, -+ NET_PROT_RFC2684, -+ NET_PROT_ICMPV6, -+ NET_PROT_FCOE, -+ NET_PROT_FIP, -+ NET_PROT_ISCSI, -+ NET_PROT_GTP, -+ NET_PROT_USER_DEFINED_L2, -+ NET_PROT_USER_DEFINED_L3, -+ NET_PROT_USER_DEFINED_L4, -+ NET_PROT_USER_DEFINED_L5, -+ NET_PROT_USER_DEFINED_SHIM1, -+ NET_PROT_USER_DEFINED_SHIM2, -+ -+ NET_PROT_DUMMY_LAST -+}; -+ -+/*! IEEE8021.Q */ -+#define NH_IEEE8021Q_ETYPE 0x8100 -+#define NH_IEEE8021Q_HDR(etype, pcp, dei, vlan_id) \ -+ ((((uint32_t)(etype & 0xFFFF)) << 16) | \ -+ (((uint32_t)(pcp & 0x07)) << 13) | \ -+ (((uint32_t)(dei & 0x01)) << 12) | \ -+ (((uint32_t)(vlan_id & 0xFFF)))) -+ -+#endif /* __FSL_NET_H */ -diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c -index b9ddf0c..894894f 100644 ---- a/drivers/usb/core/config.c -+++ b/drivers/usb/core/config.c -@@ -115,7 +115,8 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, - USB_SS_MULT(desc->bmAttributes) > 3) { - dev_warn(ddev, "Isoc endpoint has Mult of %d in " - "config %d interface %d altsetting %d ep %d: " -- "setting to 3\n", desc->bmAttributes + 1, -+ "setting to 3\n", -+ USB_SS_MULT(desc->bmAttributes), - cfgno, inum, asnum, ep->desc.bEndpointAddress); - ep->ss_ep_comp.bmAttributes = 2; - } -diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c -index d7a6d8b..66be3b4 100644 ---- a/drivers/usb/core/driver.c -+++ b/drivers/usb/core/driver.c -@@ -499,11 +499,15 @@ static int usb_unbind_interface(struct device *dev) - int usb_driver_claim_interface(struct usb_driver *driver, - struct usb_interface *iface, void *priv) - { -- struct device *dev = &iface->dev; -+ struct device *dev; - struct usb_device *udev; - int retval = 0; - int lpm_disable_error; - -+ if (!iface) -+ return -ENODEV; -+ -+ dev = &iface->dev; - if (dev->driver) - return -EBUSY; - -diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c -index efc9531..a4c0b85 100644 ---- a/drivers/usb/core/hcd-pci.c -+++ b/drivers/usb/core/hcd-pci.c -@@ -74,6 +74,15 @@ static void for_each_companion(struct pci_dev *pdev, struct usb_hcd *hcd, - if (companion->bus != pdev->bus || - PCI_SLOT(companion->devfn) != slot) - continue; -+ -+ /* -+ * Companion device should be either UHCI,OHCI or EHCI host -+ * controller, otherwise skip. -+ */ -+ if (companion->class != CL_UHCI && companion->class != CL_OHCI && -+ companion->class != CL_EHCI) -+ continue; -+ - companion_hcd = pci_get_drvdata(companion); - if (!companion_hcd || !companion_hcd->self.root_hub) - continue; -diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c -index 2222899..d8e1d5c 100644 ---- a/drivers/usb/core/hub.c -+++ b/drivers/usb/core/hub.c -@@ -124,6 +124,10 @@ struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev) - - static int usb_device_supports_lpm(struct usb_device *udev) - { -+ /* Some devices have trouble with LPM */ -+ if (udev->quirks & USB_QUIRK_NO_LPM) -+ return 0; -+ - /* USB 2.1 (and greater) devices indicate LPM support through - * their USB 2.0 Extended Capabilities BOS descriptor. - */ -@@ -1030,10 +1034,20 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) - unsigned delay; - - /* Continue a partial initialization */ -- if (type == HUB_INIT2) -- goto init2; -- if (type == HUB_INIT3) -+ if (type == HUB_INIT2 || type == HUB_INIT3) { -+ device_lock(hub->intfdev); -+ -+ /* Was the hub disconnected while we were waiting? */ -+ if (hub->disconnected) { -+ device_unlock(hub->intfdev); -+ kref_put(&hub->kref, hub_release); -+ return; -+ } -+ if (type == HUB_INIT2) -+ goto init2; - goto init3; -+ } -+ kref_get(&hub->kref); - - /* The superspeed hub except for root hub has to use Hub Depth - * value as an offset into the route string to locate the bits -@@ -1231,6 +1245,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) - queue_delayed_work(system_power_efficient_wq, - &hub->init_work, - msecs_to_jiffies(delay)); -+ device_unlock(hub->intfdev); - return; /* Continues at init3: below */ - } else { - msleep(delay); -@@ -1252,6 +1267,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) - /* Allow autosuspend if it was suppressed */ - if (type <= HUB_INIT3) - usb_autopm_put_interface_async(to_usb_interface(hub->intfdev)); -+ -+ if (type == HUB_INIT2 || type == HUB_INIT3) -+ device_unlock(hub->intfdev); -+ -+ kref_put(&hub->kref, hub_release); - } - - /* Implement the continuations for the delays above */ -@@ -4222,7 +4242,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1, - { - struct usb_device *hdev = hub->hdev; - struct usb_hcd *hcd = bus_to_hcd(hdev->bus); -- int i, j, retval; -+ int retries, operations, retval, i; - unsigned delay = HUB_SHORT_RESET_TIME; - enum usb_device_speed oldspeed = udev->speed; - const char *speed; -@@ -4324,7 +4344,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1, - * first 8 bytes of the device descriptor to get the ep0 maxpacket - * value. - */ -- for (i = 0; i < GET_DESCRIPTOR_TRIES; (++i, msleep(100))) { -+ for (retries = 0; retries < GET_DESCRIPTOR_TRIES; (++retries, msleep(100))) { - bool did_new_scheme = false; - - if (use_new_scheme(udev, retry_counter)) { -@@ -4351,7 +4371,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1, - * 255 is for WUSB devices, we actually need to use - * 512 (WUSB1.0[4.8.1]). - */ -- for (j = 0; j < 3; ++j) { -+ for (operations = 0; operations < 3; ++operations) { - buf->bMaxPacketSize0 = 0; - r = usb_control_msg(udev, usb_rcvaddr0pipe(), - USB_REQ_GET_DESCRIPTOR, USB_DIR_IN, -@@ -4371,7 +4391,13 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1, - r = -EPROTO; - break; - } -- if (r == 0) -+ /* -+ * Some devices time out if they are powered on -+ * when already connected. They need a second -+ * reset. But only on the first attempt, -+ * lest we get into a time out/reset loop -+ */ -+ if (r == 0 || (r == -ETIMEDOUT && retries == 0)) - break; - } - udev->descriptor.bMaxPacketSize0 = -@@ -4403,7 +4429,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1, - * authorization will assign the final address. - */ - if (udev->wusb == 0) { -- for (j = 0; j < SET_ADDRESS_TRIES; ++j) { -+ for (operations = 0; operations < SET_ADDRESS_TRIES; ++operations) { - retval = hub_set_address(udev, devnum); - if (retval >= 0) - break; -@@ -4498,6 +4524,8 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1, - goto fail; - } - -+ usb_detect_quirks(udev); -+ - if (udev->wusb == 0 && le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0201) { - retval = usb_get_bos_descriptor(udev); - if (!retval) { -@@ -4692,7 +4720,6 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, - if (status < 0) - goto loop; - -- usb_detect_quirks(udev); - if (udev->quirks & USB_QUIRK_DELAY_INIT) - msleep(1000); - -@@ -5324,9 +5351,6 @@ static int usb_reset_and_verify_device(struct usb_device *udev) - if (udev->usb2_hw_lpm_enabled == 1) - usb_set_usb2_hardware_lpm(udev, 0); - -- bos = udev->bos; -- udev->bos = NULL; -- - /* Disable LPM and LTM while we reset the device and reinstall the alt - * settings. Device-initiated LPM settings, and system exit latency - * settings are cleared when the device is reset, so we have to set -@@ -5335,15 +5359,17 @@ static int usb_reset_and_verify_device(struct usb_device *udev) - ret = usb_unlocked_disable_lpm(udev); - if (ret) { - dev_err(&udev->dev, "%s Failed to disable LPM\n.", __func__); -- goto re_enumerate; -+ goto re_enumerate_no_bos; - } - ret = usb_disable_ltm(udev); - if (ret) { - dev_err(&udev->dev, "%s Failed to disable LTM\n.", - __func__); -- goto re_enumerate; -+ goto re_enumerate_no_bos; - } - -+ bos = udev->bos; -+ - for (i = 0; i < SET_CONFIG_TRIES; ++i) { - - /* ep0 maxpacket size may change; let the HCD know about it. -@@ -5435,15 +5461,19 @@ done: - usb_set_usb2_hardware_lpm(udev, 1); - usb_unlocked_enable_lpm(udev); - usb_enable_ltm(udev); -- usb_release_bos_descriptor(udev); -- udev->bos = bos; -+ /* release the new BOS descriptor allocated by hub_port_init() */ -+ if (udev->bos != bos) { -+ usb_release_bos_descriptor(udev); -+ udev->bos = bos; -+ } - return 0; - - re_enumerate: -- /* LPM state doesn't matter when we're about to destroy the device. */ -- hub_port_logical_disconnect(parent_hub, port1); - usb_release_bos_descriptor(udev); - udev->bos = bos; -+re_enumerate_no_bos: -+ /* LPM state doesn't matter when we're about to destroy the device. */ -+ hub_port_logical_disconnect(parent_hub, port1); - return -ENODEV; - } - -diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c -index 8a77a41..6b53fc3 100644 ---- a/drivers/usb/core/quirks.c -+++ b/drivers/usb/core/quirks.c -@@ -196,6 +196,12 @@ static const struct usb_device_id usb_quirk_list[] = { - { USB_DEVICE(0x1a0a, 0x0200), .driver_info = - USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL }, - -+ /* Blackmagic Design Intensity Shuttle */ -+ { USB_DEVICE(0x1edb, 0xbd3b), .driver_info = USB_QUIRK_NO_LPM }, -+ -+ /* Blackmagic Design UltraStudio SDI */ -+ { USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM }, -+ - { } /* terminating entry must be last */ - }; - -diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c -index b0f4d52..17eeab8 100644 ---- a/drivers/usb/dwc3/core.c -+++ b/drivers/usb/dwc3/core.c -@@ -673,22 +673,20 @@ static int dwc3_probe(struct platform_device *pdev) - * since it will be requested by the xhci-plat driver. - */ - regs = devm_ioremap_resource(dev, res); -- if (IS_ERR(regs)) -- return PTR_ERR(regs); -+ if (IS_ERR(regs)) { -+ ret = PTR_ERR(regs); -+ goto err0; -+ } - - dwc->regs = regs; - dwc->regs_size = resource_size(res); -- /* -- * restore res->start back to its original value so that, -- * in case the probe is deferred, we don't end up getting error in -- * request the memory region the next time probe is called. -- */ -- res->start -= DWC3_GLOBALS_REGS_START; - - if (node) { - dwc->maximum_speed = of_usb_get_maximum_speed(node); - - dwc->needs_fifo_resize = of_property_read_bool(node, "tx-fifo-resize"); -+ dwc->configure_gfladj = -+ of_property_read_bool(node, "configure-gfladj"); - dwc->dr_mode = of_usb_get_dr_mode(node); - } else if (pdata) { - dwc->maximum_speed = pdata->maximum_speed; -@@ -703,7 +701,7 @@ static int dwc3_probe(struct platform_device *pdev) - - ret = dwc3_core_get_phy(dwc); - if (ret) -- return ret; -+ goto err0; - - spin_lock_init(&dwc->lock); - platform_set_drvdata(pdev, dwc); -@@ -722,7 +720,25 @@ static int dwc3_probe(struct platform_device *pdev) - if (ret) { - dev_err(dwc->dev, "failed to allocate event buffers\n"); - ret = -ENOMEM; -- goto err0; -+ goto err1; -+ } -+ -+ /* Adjust Frame Length */ -+ if (dwc->configure_gfladj) -+ dwc3_writel(dwc->regs, DWC3_GFLADJ, GFLADJ_30MHZ_REG_SEL | -+ GFLADJ_30MHZ(GFLADJ_30MHZ_DEFAULT)); -+ -+ /* Change burst beat and outstanding pipelined transfers requests */ -+ dwc3_writel(dwc->regs, DWC3_GSBUSCFG0, -+ (dwc3_readl(dwc->regs, DWC3_GSBUSCFG0) & ~0xff) | 0xf); -+ dwc3_writel(dwc->regs, DWC3_GSBUSCFG1, -+ dwc3_readl(dwc->regs, DWC3_GSBUSCFG1) | 0xf00); -+ -+ /* Enable Snooping */ -+ if (node && of_dma_is_coherent(node)) { -+ dwc3_writel(dwc->regs, DWC3_GSBUSCFG0, -+ dwc3_readl(dwc->regs, DWC3_GSBUSCFG0) | 0x22220000); -+ dev_dbg(dev, "enabled snooping for usb\n"); - } - - if (IS_ENABLED(CONFIG_USB_DWC3_HOST)) -@@ -736,65 +752,81 @@ static int dwc3_probe(struct platform_device *pdev) - ret = dwc3_core_init(dwc); - if (ret) { - dev_err(dev, "failed to initialize core\n"); -- goto err0; -+ goto err1; - } - - usb_phy_set_suspend(dwc->usb2_phy, 0); - usb_phy_set_suspend(dwc->usb3_phy, 0); - ret = phy_power_on(dwc->usb2_generic_phy); - if (ret < 0) -- goto err1; -+ goto err2; - - ret = phy_power_on(dwc->usb3_generic_phy); - if (ret < 0) -- goto err_usb2phy_power; -+ goto err3; - - ret = dwc3_event_buffers_setup(dwc); - if (ret) { - dev_err(dwc->dev, "failed to setup event buffers\n"); -- goto err_usb3phy_power; -+ goto err4; - } - - ret = dwc3_core_init_mode(dwc); - if (ret) -- goto err2; -+ goto err5; - - ret = dwc3_debugfs_init(dwc); - if (ret) { - dev_err(dev, "failed to initialize debugfs\n"); -- goto err3; -+ goto err6; - } - - pm_runtime_allow(dev); - - return 0; - --err3: -+err6: - dwc3_core_exit_mode(dwc); - --err2: -+err5: - dwc3_event_buffers_cleanup(dwc); - --err_usb3phy_power: -+err4: - phy_power_off(dwc->usb3_generic_phy); - --err_usb2phy_power: -+err3: - phy_power_off(dwc->usb2_generic_phy); - --err1: -+err2: - usb_phy_set_suspend(dwc->usb2_phy, 1); - usb_phy_set_suspend(dwc->usb3_phy, 1); - dwc3_core_exit(dwc); - --err0: -+err1: - dwc3_free_event_buffers(dwc); - -+err0: -+ /* -+ * restore res->start back to its original value so that, in case the -+ * probe is deferred, we don't end up getting error in request the -+ * memory region the next time probe is called. -+ */ -+ res->start -= DWC3_GLOBALS_REGS_START; -+ - return ret; - } - - static int dwc3_remove(struct platform_device *pdev) - { - struct dwc3 *dwc = platform_get_drvdata(pdev); -+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -+ -+ /* -+ * restore res->start back to its original value so that, in case the -+ * probe is deferred, we don't end up getting error in request the -+ * memory region the next time probe is called. -+ */ -+ res->start -= DWC3_GLOBALS_REGS_START; - - dwc3_debugfs_exit(dwc); - dwc3_core_exit_mode(dwc); -diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h -index 66f6256..aec8953 100644 ---- a/drivers/usb/dwc3/core.h -+++ b/drivers/usb/dwc3/core.h -@@ -26,6 +26,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -123,6 +124,7 @@ - #define DWC3_GEVNTCOUNT(n) (0xc40c + (n * 0x10)) - - #define DWC3_GHWPARAMS8 0xc600 -+#define DWC3_GFLADJ 0xc630 - - /* Device Registers */ - #define DWC3_DCFG 0xc700 -@@ -210,6 +212,11 @@ - #define DWC3_GHWPARAMS4_HIBER_SCRATCHBUFS(n) (((n) & (0x0f << 13)) >> 13) - #define DWC3_MAX_HIBER_SCRATCHBUFS 15 - -+/* Global Frame Length Adjustment Register */ -+#define GFLADJ_30MHZ_REG_SEL (1 << 7) -+#define GFLADJ_30MHZ(n) ((n) & 0x3f) -+#define GFLADJ_30MHZ_DEFAULT 0x20 -+ - /* Device Configuration Register */ - #define DWC3_DCFG_DEVADDR(addr) ((addr) << 3) - #define DWC3_DCFG_DEVADDR_MASK DWC3_DCFG_DEVADDR(0x7f) -@@ -766,6 +773,7 @@ struct dwc3 { - unsigned has_hibernation:1; - unsigned is_selfpowered:1; - unsigned needs_fifo_resize:1; -+ unsigned configure_gfladj:1; - unsigned pullups_connected:1; - unsigned resize_fifos:1; - unsigned setup_packet_pending:1; -diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c -index dcb8ca0..c41d46c 100644 ---- a/drivers/usb/dwc3/host.c -+++ b/drivers/usb/dwc3/host.c -@@ -39,6 +39,12 @@ int dwc3_host_init(struct dwc3 *dwc) - xhci->dev.dma_mask = dwc->dev->dma_mask; - xhci->dev.dma_parms = dwc->dev->dma_parms; - -+ /* set DMA operations */ -+ if (dwc->dev->of_node && of_dma_is_coherent(dwc->dev->of_node)) { -+ xhci->dev.archdata.dma_ops = dwc->dev->archdata.dma_ops; -+ dev_dbg(dwc->dev, "set dma_ops for usb\n"); -+ } -+ - dwc->xhci = xhci; - - ret = platform_device_add_resources(xhci, dwc->xhci_resources, -diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c -index 7e5c90e..c6027ac 100644 ---- a/drivers/usb/host/xhci-pci.c -+++ b/drivers/usb/host/xhci-pci.c -@@ -23,10 +23,17 @@ - #include - #include - #include -+#include - - #include "xhci.h" - #include "xhci-trace.h" - -+#define SSIC_PORT_NUM 2 -+#define SSIC_PORT_CFG2 0x880c -+#define SSIC_PORT_CFG2_OFFSET 0x30 -+#define PROG_DONE (1 << 30) -+#define SSIC_PORT_UNUSED (1 << 31) -+ - /* Device for a quirk */ - #define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73 - #define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000 -@@ -40,6 +47,8 @@ - #define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5 - #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f - #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f -+#define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8 -+#define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8 - - static const char hcd_name[] = "xhci_hcd"; - -@@ -140,9 +149,15 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) - if (pdev->vendor == PCI_VENDOR_ID_INTEL && - (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI || - pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI || -- pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI)) { -+ pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI || -+ pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI || -+ pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI)) { - xhci->quirks |= XHCI_PME_STUCK_QUIRK; - } -+ if (pdev->vendor == PCI_VENDOR_ID_INTEL && -+ pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) { -+ xhci->quirks |= XHCI_SSIC_PORT_UNUSED; -+ } - if (pdev->vendor == PCI_VENDOR_ID_ETRON && - pdev->device == PCI_DEVICE_ID_EJ168) { - xhci->quirks |= XHCI_RESET_ON_RESUME; -@@ -169,20 +184,18 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) - "QUIRK: Resetting on resume"); - } - --/* -- * Make sure PME works on some Intel xHCI controllers by writing 1 to clear -- * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4 -- */ --static void xhci_pme_quirk(struct xhci_hcd *xhci) -+#ifdef CONFIG_ACPI -+static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) - { -- u32 val; -- void __iomem *reg; -- -- reg = (void __iomem *) xhci->cap_regs + 0x80a4; -- val = readl(reg); -- writel(val | BIT(28), reg); -- readl(reg); -+ static const u8 intel_dsm_uuid[] = { -+ 0xb7, 0x0c, 0x34, 0xac, 0x01, 0xe9, 0xbf, 0x45, -+ 0xb7, 0xe6, 0x2b, 0x34, 0xec, 0x93, 0x1e, 0x23, -+ }; -+ acpi_evaluate_dsm(ACPI_HANDLE(&dev->dev), intel_dsm_uuid, 3, 1, NULL); - } -+#else -+ static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) { } -+#endif /* CONFIG_ACPI */ - - /* called during probe() after chip reset completes */ - static int xhci_pci_setup(struct usb_hcd *hcd) -@@ -263,6 +276,9 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) - HCC_MAX_PSA(xhci->hcc_params) >= 4) - xhci->shared_hcd->can_do_streams = 1; - -+ if (xhci->quirks & XHCI_PME_STUCK_QUIRK) -+ xhci_pme_acpi_rtd3_enable(dev); -+ - /* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */ - pm_runtime_put_noidle(&dev->dev); - -@@ -282,6 +298,7 @@ static void xhci_pci_remove(struct pci_dev *dev) - struct xhci_hcd *xhci; - - xhci = hcd_to_xhci(pci_get_drvdata(dev)); -+ xhci->xhc_state |= XHCI_STATE_REMOVING; - if (xhci->shared_hcd) { - usb_remove_hcd(xhci->shared_hcd); - usb_put_hcd(xhci->shared_hcd); -@@ -296,10 +313,65 @@ static void xhci_pci_remove(struct pci_dev *dev) - } - - #ifdef CONFIG_PM -+/* -+ * In some Intel xHCI controllers, in order to get D3 working, -+ * through a vendor specific SSIC CONFIG register at offset 0x883c, -+ * SSIC PORT need to be marked as "unused" before putting xHCI -+ * into D3. After D3 exit, the SSIC port need to be marked as "used". -+ * Without this change, xHCI might not enter D3 state. -+ */ -+static void xhci_ssic_port_unused_quirk(struct usb_hcd *hcd, bool suspend) -+{ -+ struct xhci_hcd *xhci = hcd_to_xhci(hcd); -+ u32 val; -+ void __iomem *reg; -+ int i; -+ -+ for (i = 0; i < SSIC_PORT_NUM; i++) { -+ reg = (void __iomem *) xhci->cap_regs + -+ SSIC_PORT_CFG2 + -+ i * SSIC_PORT_CFG2_OFFSET; -+ -+ /* Notify SSIC that SSIC profile programming is not done. */ -+ val = readl(reg) & ~PROG_DONE; -+ writel(val, reg); -+ -+ /* Mark SSIC port as unused(suspend) or used(resume) */ -+ val = readl(reg); -+ if (suspend) -+ val |= SSIC_PORT_UNUSED; -+ else -+ val &= ~SSIC_PORT_UNUSED; -+ writel(val, reg); -+ -+ /* Notify SSIC that SSIC profile programming is done */ -+ val = readl(reg) | PROG_DONE; -+ writel(val, reg); -+ readl(reg); -+ } -+} -+ -+/* -+ * Make sure PME works on some Intel xHCI controllers by writing 1 to clear -+ * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4 -+ */ -+static void xhci_pme_quirk(struct usb_hcd *hcd) -+{ -+ struct xhci_hcd *xhci = hcd_to_xhci(hcd); -+ void __iomem *reg; -+ u32 val; -+ -+ reg = (void __iomem *) xhci->cap_regs + 0x80a4; -+ val = readl(reg); -+ writel(val | BIT(28), reg); -+ readl(reg); -+} -+ - static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup) - { - struct xhci_hcd *xhci = hcd_to_xhci(hcd); - struct pci_dev *pdev = to_pci_dev(hcd->self.controller); -+ int ret; - - /* - * Systems with the TI redriver that loses port status change events -@@ -309,9 +381,16 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup) - pdev->no_d3cold = true; - - if (xhci->quirks & XHCI_PME_STUCK_QUIRK) -- xhci_pme_quirk(xhci); -+ xhci_pme_quirk(hcd); -+ -+ if (xhci->quirks & XHCI_SSIC_PORT_UNUSED) -+ xhci_ssic_port_unused_quirk(hcd, true); - -- return xhci_suspend(xhci, do_wakeup); -+ ret = xhci_suspend(xhci, do_wakeup); -+ if (ret && (xhci->quirks & XHCI_SSIC_PORT_UNUSED)) -+ xhci_ssic_port_unused_quirk(hcd, false); -+ -+ return ret; - } - - static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated) -@@ -341,8 +420,11 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated) - if (pdev->vendor == PCI_VENDOR_ID_INTEL) - usb_enable_intel_xhci_ports(pdev); - -+ if (xhci->quirks & XHCI_SSIC_PORT_UNUSED) -+ xhci_ssic_port_unused_quirk(hcd, false); -+ - if (xhci->quirks & XHCI_PME_STUCK_QUIRK) -- xhci_pme_quirk(xhci); -+ xhci_pme_quirk(hcd); - - retval = xhci_resume(xhci, hibernated); - return retval; -diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c -index 1e5fb8c..04e7525 100644 ---- a/drivers/usb/host/xhci-ring.c -+++ b/drivers/usb/host/xhci-ring.c -@@ -3840,8 +3840,12 @@ static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd, - { - int reserved_trbs = xhci->cmd_ring_reserved_trbs; - int ret; -- if (xhci->xhc_state & XHCI_STATE_DYING) -+ -+ if ((xhci->xhc_state & XHCI_STATE_DYING) || -+ (xhci->xhc_state & XHCI_STATE_HALTED)) { -+ xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n"); - return -ESHUTDOWN; -+ } - - if (!command_must_succeed) - reserved_trbs++; -diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c -index 98380fa..600a137 100644 ---- a/drivers/usb/host/xhci.c -+++ b/drivers/usb/host/xhci.c -@@ -147,7 +147,8 @@ static int xhci_start(struct xhci_hcd *xhci) - "waited %u microseconds.\n", - XHCI_MAX_HALT_USEC); - if (!ret) -- xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING); -+ /* clear state flags. Including dying, halted or removing */ -+ xhci->xhc_state = 0; - - return ret; - } -@@ -1102,8 +1103,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) - /* Resume root hubs only when have pending events. */ - status = readl(&xhci->op_regs->status); - if (status & STS_EINT) { -- usb_hcd_resume_root_hub(hcd); - usb_hcd_resume_root_hub(xhci->shared_hcd); -+ usb_hcd_resume_root_hub(hcd); - } - } - -@@ -1118,10 +1119,10 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) - - /* Re-enable port polling. */ - xhci_dbg(xhci, "%s: starting port polling.\n", __func__); -- set_bit(HCD_FLAG_POLL_RH, &hcd->flags); -- usb_hcd_poll_rh_status(hcd); - set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); - usb_hcd_poll_rh_status(xhci->shared_hcd); -+ set_bit(HCD_FLAG_POLL_RH, &hcd->flags); -+ usb_hcd_poll_rh_status(hcd); - - return retval; - } -@@ -1548,7 +1549,9 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) - xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, - "HW died, freeing TD."); - urb_priv = urb->hcpriv; -- for (i = urb_priv->td_cnt; i < urb_priv->length; i++) { -+ for (i = urb_priv->td_cnt; -+ i < urb_priv->length && xhci->devs[urb->dev->slot_id]; -+ i++) { - td = urb_priv->td[i]; - if (!list_empty(&td->td_list)) - list_del_init(&td->td_list); -@@ -1682,8 +1685,10 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, - cpu_to_le32(EP_STATE_DISABLED)) || - le32_to_cpu(ctrl_ctx->drop_flags) & - xhci_get_endpoint_flag(&ep->desc)) { -- xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", -- __func__, ep); -+ /* Do not warn when called after a usb_device_reset */ -+ if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL) -+ xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", -+ __func__, ep); - return 0; - } - -@@ -2751,7 +2756,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) - if (ret <= 0) - return ret; - xhci = hcd_to_xhci(hcd); -- if (xhci->xhc_state & XHCI_STATE_DYING) -+ if ((xhci->xhc_state & XHCI_STATE_DYING) || -+ (xhci->xhc_state & XHCI_STATE_REMOVING)) - return -ENODEV; - - xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); -@@ -3793,7 +3799,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, - u64 temp_64; - struct xhci_command *command; - -- if (xhci->xhc_state) /* dying or halted */ -+ if (xhci->xhc_state) /* dying, removing or halted */ - return -EINVAL; - - if (!udev->slot_id) { -@@ -4912,6 +4918,16 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) - goto error; - xhci_dbg(xhci, "Reset complete\n"); - -+ /* -+ * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0) -+ * of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit -+ * address memory pointers actually. So, this driver clears the AC64 -+ * bit of xhci->hcc_params to call dma_set_coherent_mask(dev, -+ * DMA_BIT_MASK(32)) in this xhci_gen_setup(). -+ */ -+ if (xhci->quirks & XHCI_NO_64BIT_SUPPORT) -+ xhci->hcc_params &= ~BIT(0); -+ - /* Set dma_mask and coherent_dma_mask to 64-bits, - * if xHC supports 64-bit addressing */ - if (HCC_64BIT_ADDR(xhci->hcc_params) && -diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h -index 54f386f..3850cb2 100644 ---- a/drivers/usb/host/xhci.h -+++ b/drivers/usb/host/xhci.h -@@ -1531,6 +1531,7 @@ struct xhci_hcd { - */ - #define XHCI_STATE_DYING (1 << 0) - #define XHCI_STATE_HALTED (1 << 1) -+#define XHCI_STATE_REMOVING (1 << 2) - /* Statistics */ - int error_bitmask; - unsigned int quirks; -@@ -1565,6 +1566,8 @@ struct xhci_hcd { - /* For controllers with a broken beyond repair streams implementation */ - #define XHCI_BROKEN_STREAMS (1 << 19) - #define XHCI_PME_STUCK_QUIRK (1 << 20) -+#define XHCI_SSIC_PORT_UNUSED (1 << 22) -+#define XHCI_NO_64BIT_SUPPORT (1 << 23) - unsigned int num_active_eps; - unsigned int limit_active_eps; - /* There are two roothubs to keep track of bus suspend info for */ -diff --git a/drivers/vfio/Kconfig b/drivers/vfio/Kconfig -index d8c5763..db494c0 100644 ---- a/drivers/vfio/Kconfig -+++ b/drivers/vfio/Kconfig -@@ -16,7 +16,7 @@ config VFIO_SPAPR_EEH - menuconfig VFIO - tristate "VFIO Non-Privileged userspace driver framework" - depends on IOMMU_API -- select VFIO_IOMMU_TYPE1 if X86 -+ select VFIO_IOMMU_TYPE1 if (X86 || ARM_SMMU) - select VFIO_IOMMU_SPAPR_TCE if (PPC_POWERNV || PPC_PSERIES) - select VFIO_SPAPR_EEH if (PPC_POWERNV || PPC_PSERIES) - select ANON_INODES -@@ -27,3 +27,6 @@ menuconfig VFIO - If you don't know what to do here, say N. - - source "drivers/vfio/pci/Kconfig" -+#source "drivers/vfio/platform/Kconfig" -+source "drivers/vfio/fsl-mc/Kconfig" -+ -diff --git a/drivers/vfio/Makefile b/drivers/vfio/Makefile -index 0b035b1..69bcd84 100644 ---- a/drivers/vfio/Makefile -+++ b/drivers/vfio/Makefile -@@ -3,3 +3,4 @@ obj-$(CONFIG_VFIO_IOMMU_TYPE1) += vfio_iommu_type1.o - obj-$(CONFIG_VFIO_IOMMU_SPAPR_TCE) += vfio_iommu_spapr_tce.o - obj-$(CONFIG_VFIO_SPAPR_EEH) += vfio_spapr_eeh.o - obj-$(CONFIG_VFIO_PCI) += pci/ -+obj-$(CONFIG_VFIO_FSL_MC) += fsl-mc/ -diff --git a/drivers/vfio/fsl-mc/Kconfig b/drivers/vfio/fsl-mc/Kconfig -new file mode 100644 -index 0000000..eb6ba2b ---- /dev/null -+++ b/drivers/vfio/fsl-mc/Kconfig -@@ -0,0 +1,9 @@ -+config VFIO_FSL_MC -+ tristate "VFIO support for Freescale Management Complex devices" -+ depends on VFIO && FSL_MC_BUS && EVENTFD -+ help -+ Support for the Freescale Management Complex(MC) VFIO driver. -+ This is required to passthrough Freescale MC devices using the -+ VFIO framework. -+ -+ If you don't know what to do here, say N. -diff --git a/drivers/vfio/fsl-mc/Makefile b/drivers/vfio/fsl-mc/Makefile -new file mode 100644 -index 0000000..2aca75a ---- /dev/null -+++ b/drivers/vfio/fsl-mc/Makefile -@@ -0,0 +1,2 @@ -+vfio-fsl_mc-y := vfio_fsl_mc.o -+obj-$(CONFIG_VFIO_FSL_MC) += vfio_fsl_mc.o vfio_fsl_mc_intr.o -diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc.c b/drivers/vfio/fsl-mc/vfio_fsl_mc.c -new file mode 100644 -index 0000000..ffbe845 ---- /dev/null -+++ b/drivers/vfio/fsl-mc/vfio_fsl_mc.c -@@ -0,0 +1,603 @@ -+/* -+ * Freescale Management Complex (MC) device passthrough using VFIO -+ * -+ * Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * Author: Bharat Bhushan -+ * -+ * This file is licensed under the terms of the GNU General Public -+ * License version 2. This program is licensed "as is" without any -+ * warranty of any kind, whether express or implied. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "../../staging/fsl-mc/include/mc.h" -+#include "../../staging/fsl-mc/include/mc-sys.h" -+#include "../../staging/fsl-mc/include/mc-private.h" -+ -+#include "vfio_fsl_mc_private.h" -+struct fsl_mc_io *vfio_mc_io = NULL; -+struct fsl_mc_io *vfio_atomic_mc_io = NULL; -+ -+static DEFINE_MUTEX(driver_lock); -+ -+/* Validate that requested address range falls in one of container's -+ * device region. -+ */ -+static bool vfio_validate_mmap_addr(struct vfio_fsl_mc_device *vdev, -+ unsigned long addr, unsigned long size) -+{ -+ struct fsl_mc_device *mc_dev = vdev->mc_dev; -+ phys_addr_t region_addr; -+ size_t region_size; -+ int idx; -+ -+ /* Do not try to validate if address range wraps */ -+ if ((addr + size) < addr) -+ return false; -+ -+ /* Hack to allow mmap GITS_TRANSLATOR Register Page */ -+ if (addr == 0x6030000) -+ return true; -+ -+ for (idx = 0; idx < mc_dev->obj_desc.region_count; idx++) { -+ region_addr = mc_dev->regions[idx].start; -+ region_size = mc_dev->regions[idx].end - -+ mc_dev->regions[idx].start + 1; -+ -+ /* -+ * Align search to minimum mappable size of PAGE_SIZE. -+ * Thanks to our hardware that even though the -+ * region_size is less then PAGE_SIZE but there -+ * is no other device maps in this address range. -+ * So no security threat/issue in mapping PAGE_SIZE. -+ */ -+ if (region_size < PAGE_SIZE) -+ region_size = PAGE_SIZE; -+ -+ if (addr >= region_addr && -+ ((addr + size) <= (region_addr + region_size))) -+ return true; -+ } -+ -+ return false; -+} -+ -+static long vfio_fsl_mc_ioctl(void *device_data, -+ unsigned int cmd, unsigned long arg) -+{ -+ struct vfio_fsl_mc_device *vdev = device_data; -+ struct fsl_mc_device *mc_dev = vdev->mc_dev; -+ struct device *dev = &mc_dev->dev; -+ unsigned long minsz; -+ int ret; -+ -+ if (WARN_ON(!mc_dev)) -+ return -ENODEV; -+ -+ switch (cmd) { -+ case VFIO_DEVICE_GET_INFO: -+ { -+ struct vfio_device_info info; -+ -+ minsz = offsetofend(struct vfio_device_info, num_irqs); -+ -+ if (copy_from_user(&info, (void __user *)arg, minsz)) -+ return -EFAULT; -+ -+ if (info.argsz < minsz) -+ return -EINVAL; -+ -+ info.flags = VFIO_DEVICE_FLAGS_FSL_MC; -+ if (strcmp(mc_dev->obj_desc.type, "dprc") == 0) -+ info.flags |= VFIO_DEVICE_FLAGS_RESET; -+ -+ info.num_regions = mc_dev->obj_desc.region_count; -+ info.num_irqs = mc_dev->obj_desc.irq_count; -+ -+ return copy_to_user((void __user *)arg, &info, minsz); -+ } -+ case VFIO_DEVICE_GET_REGION_INFO: -+ { -+ struct vfio_region_info info; -+ -+ minsz = offsetofend(struct vfio_region_info, offset); -+ -+ if (copy_from_user(&info, (void __user *)arg, minsz)) -+ return -EFAULT; -+ -+ if (info.argsz < minsz) -+ return -EINVAL; -+ -+ info.offset = mc_dev->regions[info.index].start; -+ info.size = mc_dev->regions[info.index].end - -+ mc_dev->regions[info.index].start + 1; -+ info.flags = VFIO_REGION_INFO_FLAG_READ | -+ VFIO_REGION_INFO_FLAG_WRITE | -+ VFIO_REGION_INFO_FLAG_MMAP; -+ -+ return copy_to_user((void __user *)arg, &info, minsz); -+ } -+ case VFIO_DEVICE_GET_IRQ_INFO: -+ { -+ struct vfio_irq_info info; -+ -+ minsz = offsetofend(struct vfio_irq_info, count); -+ if (copy_from_user(&info, (void __user *)arg, minsz)) -+ return -EFAULT; -+ -+ if (info.argsz < minsz) -+ return -EINVAL; -+ -+ if (info.index >= mc_dev->obj_desc.irq_count) -+ return -EINVAL; -+ -+ if (vdev->mc_irqs[info.index].irq_initialized) { -+ info.flags = vdev->mc_irqs[info.index].flags; -+ info.count = vdev->mc_irqs[info.index].count; -+ } else { -+ /* -+ * If IRQs are not initialized then these can not -+ * be configuted and used by user-space/ -+ */ -+ info.flags = 0; -+ info.count = 0; -+ } -+ -+ return copy_to_user((void __user *)arg, &info, minsz); -+ } -+ case VFIO_DEVICE_SET_IRQS: -+ { -+ struct vfio_irq_set hdr; -+ u8 *data = NULL; -+ int ret = 0; -+ -+ minsz = offsetofend(struct vfio_irq_set, count); -+ -+ if (copy_from_user(&hdr, (void __user *)arg, minsz)) -+ return -EFAULT; -+ -+ if (hdr.argsz < minsz) -+ return -EINVAL; -+ -+ if (hdr.index >= mc_dev->obj_desc.irq_count) -+ return -EINVAL; -+ -+ if (hdr.start != 0 || hdr.count > 1) -+ return -EINVAL; -+ -+ if (hdr.count == 0 && -+ (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE) || -+ !(hdr.flags & VFIO_IRQ_SET_ACTION_TRIGGER))) -+ return -EINVAL; -+ -+ if (hdr.flags & ~(VFIO_IRQ_SET_DATA_TYPE_MASK | -+ VFIO_IRQ_SET_ACTION_TYPE_MASK)) -+ return -EINVAL; -+ -+ if (!(hdr.flags & VFIO_IRQ_SET_DATA_NONE)) { -+ size_t size; -+ -+ if (hdr.flags & VFIO_IRQ_SET_DATA_BOOL) -+ size = sizeof(uint8_t); -+ else if (hdr.flags & VFIO_IRQ_SET_DATA_EVENTFD) -+ size = sizeof(int32_t); -+ else -+ return -EINVAL; -+ -+ if (hdr.argsz - minsz < hdr.count * size) -+ return -EINVAL; -+ -+ data = memdup_user((void __user *)(arg + minsz), -+ hdr.count * size); -+ if (IS_ERR(data)) -+ return PTR_ERR(data); -+ } -+ -+ ret = vfio_fsl_mc_set_irqs_ioctl(vdev, hdr.flags, -+ hdr.index, hdr.start, -+ hdr.count, data); -+ return ret; -+ } -+ case VFIO_DEVICE_RESET: -+ { -+ if (strcmp(mc_dev->obj_desc.type, "dprc") != 0) -+ return -EINVAL; -+ -+ ret = dprc_reset_container(mc_dev->mc_io, 0, -+ mc_dev->mc_handle, -+ mc_dev->obj_desc.id); -+ if (ret) { -+ dev_err(dev, "Error in resetting container %d\n", ret); -+ return ret; -+ } -+ -+ ret = 0; -+ break; -+ } -+ default: -+ ret = -EINVAL; -+ } -+ -+ return ret; -+} -+ -+static ssize_t vfio_fsl_mc_read(void *device_data, char __user *buf, -+ size_t count, loff_t *ppos) -+{ -+ struct vfio_fsl_mc_device *vdev = device_data; -+ struct fsl_mc_device *mc_dev = vdev->mc_dev; -+ struct device *dev = &mc_dev->dev; -+ -+ dev_err(dev, "%s: Unimplemented\n", __func__); -+ return -EFAULT; -+} -+ -+static ssize_t vfio_fsl_mc_write(void *device_data, const char __user *buf, -+ size_t count, loff_t *ppos) -+{ -+ struct vfio_fsl_mc_device *vdev = device_data; -+ struct fsl_mc_device *mc_dev = vdev->mc_dev; -+ struct device *dev = &mc_dev->dev; -+ -+ dev_err(dev, "%s: Unimplemented\n", __func__); -+ return -EFAULT; -+} -+ -+/* Allows mmaping fsl_mc device regions in assigned DPRC */ -+static int vfio_fsl_mc_mmap(void *device_data, struct vm_area_struct *vma) -+{ -+ struct vfio_fsl_mc_device *vdev = device_data; -+ struct fsl_mc_device *mc_dev = vdev->mc_dev; -+ unsigned long size = vma->vm_end - vma->vm_start; -+ unsigned long addr = vma->vm_pgoff << PAGE_SHIFT; -+ int ret; -+ -+ if (vma->vm_end < vma->vm_start) -+ return -EINVAL; -+ if (vma->vm_start & ~PAGE_MASK) -+ return -EINVAL; -+ if (vma->vm_end & ~PAGE_MASK) -+ return -EINVAL; -+ if ((vma->vm_flags & VM_SHARED) == 0) -+ return -EINVAL; -+ -+ if (!vfio_validate_mmap_addr(vdev, addr, size)) -+ return -EINVAL; -+ -+ vma->vm_private_data = mc_dev; -+ -+#define QBMAN_SWP_CENA_BASE 0x818000000ULL -+ if ((addr & 0xFFF000000) == QBMAN_SWP_CENA_BASE) -+ vma->vm_page_prot = pgprot_cached_ns(vma->vm_page_prot); -+ else -+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); -+ -+ ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, -+ size, vma->vm_page_prot); -+ return ret; -+} -+ -+static void vfio_fsl_mc_release(void *device_data) -+{ -+ struct vfio_fsl_mc_device *vdev = device_data; -+ struct fsl_mc_device *mc_dev = vdev->mc_dev; -+ -+ if (WARN_ON(mc_dev == NULL)) -+ return; -+ -+ mutex_lock(&driver_lock); -+ vdev->refcnt--; -+ -+ if (strcmp(mc_dev->obj_desc.type, "dprc") == 0) -+ dprc_reset_container(mc_dev->mc_io, 0, mc_dev->mc_handle, -+ mc_dev->obj_desc.id); -+ else -+ vfio_fsl_mc_unconfigure_irqs(vdev); -+ -+ mutex_unlock(&driver_lock); -+ -+ module_put(THIS_MODULE); -+} -+ -+static int vfio_fsl_mc_open(void *device_data) -+{ -+ struct vfio_fsl_mc_device *vdev = device_data; -+ -+ if (!try_module_get(THIS_MODULE)) -+ return -ENODEV; -+ -+ mutex_lock(&driver_lock); -+ vdev->refcnt++; -+ mutex_unlock(&driver_lock); -+ -+ return 0; -+} -+ -+static const struct vfio_device_ops vfio_fsl_mc_ops = { -+ .name = "vfio-fsl-mc", -+ .open = vfio_fsl_mc_open, -+ .release = vfio_fsl_mc_release, -+ .ioctl = vfio_fsl_mc_ioctl, -+ .read = vfio_fsl_mc_read, -+ .write = vfio_fsl_mc_write, -+ .mmap = vfio_fsl_mc_mmap, -+}; -+ -+static int vfio_fsl_mc_device_remove(struct device *dev, void *data) -+{ -+ struct fsl_mc_device *mc_dev; -+ WARN_ON(dev == NULL); -+ -+ mc_dev = to_fsl_mc_device(dev); -+ if (WARN_ON(mc_dev == NULL)) -+ return -ENODEV; -+ -+ fsl_mc_device_remove(mc_dev); -+ return 0; -+} -+ -+static int vfio_fsl_mc_probe(struct fsl_mc_device *mc_dev) -+{ -+ struct vfio_fsl_mc_device *vdev; -+ struct iommu_group *group; -+ struct device *dev = &mc_dev->dev; -+ struct fsl_mc_bus *mc_bus; -+ unsigned int irq_count; -+ int ret; -+ -+ dev_info(dev, "Binding with vfio-fsl_mc driver\n"); -+ -+ group = iommu_group_get(dev); -+ if (!group) { -+ dev_err(dev, "%s: VFIO: No IOMMU group\n", __func__); -+ return -EINVAL; -+ } -+ -+ vdev = kzalloc(sizeof(*vdev), GFP_KERNEL); -+ if (!vdev) { -+ iommu_group_put(group); -+ return -ENOMEM; -+ } -+ -+ if (strcmp(mc_dev->obj_desc.type, "dprc") == 0) { -+ vdev->mc_dev = mc_dev; -+ -+ /* Free inbuilt dprc MC portal if exists */ -+ if (mc_dev->mc_io && (mc_dev->mc_io != vfio_mc_io)) -+ fsl_destroy_mc_io(mc_dev->mc_io); -+ -+ /* Use New Allocated MC Portal (DPMCP object) */ -+ mc_dev->mc_io = vfio_mc_io; -+ -+ ret = dprc_open(mc_dev->mc_io, -+ 0, -+ mc_dev->obj_desc.id, -+ &mc_dev->mc_handle); -+ if (ret) { -+ dev_err(dev, "dprc_open() failed: error = %d\n", ret); -+ goto free_vfio_device; -+ } -+ -+ /* Initialize resource pool */ -+ dprc_init_all_resource_pools(mc_dev); -+ -+ mc_bus = to_fsl_mc_bus(mc_dev); -+ mutex_init(&mc_bus->scan_mutex); -+ -+ mc_bus->atomic_mc_io = vfio_atomic_mc_io; -+ ret = dprc_open(mc_bus->atomic_mc_io, 0, mc_dev->obj_desc.id, -+ &mc_bus->atomic_dprc_handle); -+ if (ret < 0) { -+ dev_err(dev, "fail to open dprc with atomic io (%d)\n", ret); -+ goto clean_resource_pool; -+ } -+ -+ if (fsl_mc_interrupts_supported() && !mc_bus->irq_resources) { -+ irq_count = FSL_MC_IRQ_POOL_MAX_EXTRA_IRQS; -+ ret = fsl_mc_populate_irq_pool(mc_bus, irq_count); -+ if (ret < 0) { -+ dev_err(dev, "%s: Failed to init irq-pool\n", -+ __func__); -+ goto free_open_dprc; -+ } -+ } -+ -+ mutex_lock(&mc_bus->scan_mutex); -+ ret = dprc_scan_objects(mc_dev, mc_dev->driver_override, -+ &irq_count); -+ mutex_unlock(&mc_bus->scan_mutex); -+ if (ret) { -+ dev_err(dev, "dprc_scan_objects() fails (%d)\n", ret); -+ goto clean_irq_pool; -+ } -+ -+ ret = vfio_add_group_dev(dev, &vfio_fsl_mc_ops, vdev); -+ if (ret) { -+ dev_err(dev, "%s: Failed to add to vfio group\n", -+ __func__); -+ goto dprc_clean_scan_objects; -+ } -+ -+ ret = vfio_fsl_mc_init_irqs(vdev); -+ if (ret) { -+ dev_err(dev, "%s: Failed to setup irqs\n", -+ __func__); -+ vfio_del_group_dev(dev); -+ goto dprc_clean_scan_objects; -+ } -+ } else { -+ vdev->mc_dev = mc_dev; -+ -+ /* Use New Allocated MC Portal (DPMCP object) */ -+ mc_dev->mc_io = vfio_mc_io; -+ -+ ret = vfio_add_group_dev(dev, &vfio_fsl_mc_ops, vdev); -+ if (ret) { -+ dev_err(dev, "%s: Failed to add to vfio group\n", -+ __func__); -+ goto free_vfio_device; -+ } -+ -+ if (mc_dev->obj_desc.irq_count) { -+ ret = vfio_fsl_mc_init_irqs(vdev); -+ if (ret) { -+ dev_err(dev, "%s: Failed to setup irqs\n", -+ __func__); -+ vfio_del_group_dev(dev); -+ goto free_vfio_device; -+ } -+ } -+ } -+ -+ return 0; -+ -+dprc_clean_scan_objects: -+ fsl_mc_cleanup_irq_pool(mc_bus); -+ device_for_each_child(&mc_dev->dev, NULL, vfio_fsl_mc_device_remove); -+ -+clean_irq_pool: -+ fsl_mc_cleanup_irq_pool(mc_bus); -+ -+free_open_dprc: -+ dprc_close(vfio_atomic_mc_io, 0, mc_dev->mc_handle); -+ -+clean_resource_pool: -+ dprc_cleanup_all_resource_pools(mc_dev); -+ dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle); -+ -+free_vfio_device: -+ kfree(vdev); -+ iommu_group_put(group); -+ return ret; -+} -+ -+static int vfio_fsl_mc_remove(struct fsl_mc_device *mc_dev) -+{ -+ struct vfio_fsl_mc_device *vdev; -+ struct fsl_mc_bus *mc_bus; -+ struct device *dev = &mc_dev->dev; -+ int ret; -+ -+ dev_info(dev, "Un-binding with vfio-fsl-mc driver\n"); -+ -+ vdev = vfio_del_group_dev(dev); -+ if (!vdev) -+ return -EINVAL; -+ -+ /* Only FSL-MC DPRC device can be unbound */ -+ if (strcmp(mc_dev->obj_desc.type, "dprc") == 0) { -+ device_for_each_child(dev, NULL, vfio_fsl_mc_device_remove); -+ -+ vfio_fsl_mc_free_irqs(vdev); -+ dprc_cleanup_all_resource_pools(mc_dev); -+ mc_bus = to_fsl_mc_bus(mc_dev); -+ -+ if (fsl_mc_interrupts_supported()) -+ fsl_mc_cleanup_irq_pool(mc_bus); -+ -+ ret = dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle); -+ if (ret < 0) -+ dev_err(dev, "dprc_close() fails %d\n", ret); -+ -+ ret = dprc_close(mc_bus->atomic_mc_io, 0, -+ mc_bus->atomic_dprc_handle); -+ if (ret < 0) -+ dev_err(dev, "dprc_close(atomic-io) fails %d\n", ret); -+ } else { -+ if (mc_dev->obj_desc.irq_count) -+ vfio_fsl_mc_free_irqs(vdev); -+ -+ mc_dev->mc_io = NULL; -+ } -+ -+ iommu_group_put(mc_dev->dev.iommu_group); -+ kfree(vdev); -+ -+ return 0; -+} -+ -+/* -+ * vfio-fsl_mc is a meta-driver, so use driver_override interface to -+ * bind a fsl_mc container with this driver and match_id_table is NULL. -+ */ -+static struct fsl_mc_driver vfio_fsl_mc_driver = { -+ .probe = vfio_fsl_mc_probe, -+ .remove = vfio_fsl_mc_remove, -+ .match_id_table = NULL, -+ .driver = { -+ .name = "vfio-fsl-mc", -+ .owner = THIS_MODULE, -+ }, -+}; -+ -+static int __init vfio_fsl_mc_driver_init(void) -+{ -+ int err; -+ struct fsl_mc_device *root_mc_dev; -+ -+ if (fsl_mc_bus_type.dev_root == NULL) { -+ pr_err("%s: Driver registration fails as no fsl_mc_bus found\n", -+ __func__); -+ return -ENODEV; -+ } -+ -+ root_mc_dev = to_fsl_mc_device(fsl_mc_bus_type.dev_root); -+ -+ /* Allocate a new MC portal (DPMCP object) */ -+ err = fsl_mc_portal_allocate(root_mc_dev, 0, &vfio_mc_io); -+ if (err < 0) -+ goto err; -+ -+ /* Reset MCP before move on */ -+ err = fsl_mc_portal_reset(vfio_mc_io); -+ if (err < 0) -+ return err; -+ -+ /* Allocate a new MC portal (DPMCP object) */ -+ err = fsl_mc_portal_allocate(root_mc_dev, -+ FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, -+ &vfio_atomic_mc_io); -+ if (err < 0) -+ goto err; -+ -+ err = fsl_mc_driver_register(&vfio_fsl_mc_driver); -+ if (err < 0) -+ goto err; -+ -+ return 0; -+err: -+ if (vfio_mc_io) -+ fsl_mc_portal_free(vfio_mc_io); -+ -+ if (vfio_atomic_mc_io) -+ fsl_mc_portal_free(vfio_atomic_mc_io); -+ -+ vfio_atomic_mc_io = NULL; -+ vfio_mc_io = NULL; -+ return err; -+} -+ -+static void __exit vfio_fsl_mc_driver_exit(void) -+{ -+ fsl_mc_portal_free(vfio_mc_io); -+ fsl_mc_portal_free(vfio_atomic_mc_io); -+ fsl_mc_driver_unregister(&vfio_fsl_mc_driver); -+} -+ -+module_init(vfio_fsl_mc_driver_init); -+module_exit(vfio_fsl_mc_driver_exit); -+ -+MODULE_AUTHOR("Bharat Bhushan "); -+MODULE_LICENSE("GPL v2"); -+MODULE_DESCRIPTION("VFIO for FSL MC devices - User Level meta-driver"); -diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c -new file mode 100644 -index 0000000..a4db758 ---- /dev/null -+++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_intr.c -@@ -0,0 +1,273 @@ -+/* -+ * Freescale Management Complex (MC) device passthrough using VFIO -+ * -+ * Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * Author: Bharat Bhushan -+ * -+ * This file is licensed under the terms of the GNU General Public -+ * License version 2. This program is licensed "as is" without any -+ * warranty of any kind, whether express or implied. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "../../staging/fsl-mc/include/mc.h" -+#include "../../staging/fsl-mc/include/mc-sys.h" -+#include "../../staging/fsl-mc/include/mc-private.h" -+#include -+ -+#include "vfio_fsl_mc_private.h" -+ -+static irqreturn_t vfio_fsl_mc_irq_handler(int irq_num, void *arg) -+{ -+ struct vfio_fsl_mc_irq *mc_irq = (struct vfio_fsl_mc_irq *)arg; -+ -+ eventfd_signal(mc_irq->trigger, 1); -+ return IRQ_HANDLED; -+} -+ -+int vfio_fsl_mc_configure_irq(struct vfio_fsl_mc_device *vdev, -+ int irq_index) -+{ -+ int error; -+ struct fsl_mc_device *mc_dev = vdev->mc_dev; -+ struct fsl_mc_device_irq *irq = mc_dev->irqs[irq_index]; -+ struct vfio_fsl_mc_irq *mc_irq = &vdev->mc_irqs[irq_index]; -+ struct device *dev = &mc_dev->dev; -+ -+ if (WARN_ON(!mc_irq->irq_initialized)) -+ return -EOPNOTSUPP; -+ -+ if (WARN_ON(mc_irq->irq_configured)) -+ return -EINVAL; -+ -+ mc_irq->name = kasprintf(GFP_KERNEL, "%s-%s-%d", "vfio-fsl-mc", -+ dev_name(dev), irq->irq_number); -+ -+ error = request_irq(irq->irq_number, vfio_fsl_mc_irq_handler, -+ 0, mc_irq->name, mc_irq); -+ if (error < 0) { -+ dev_err(&mc_dev->dev, -+ "IRQ registration fails with error: %d\n", error); -+ kfree(mc_irq->name); -+ return error; -+ } -+ -+ mc_irq->irq_configured = true; -+ return 0; -+} -+ -+static void vfio_fsl_mc_unconfigure_irq(struct vfio_fsl_mc_device *vdev, -+ int irq_index) -+{ -+ struct fsl_mc_device_irq *irq = vdev->mc_dev->irqs[irq_index]; -+ struct vfio_fsl_mc_irq *mc_irq = &vdev->mc_irqs[irq_index]; -+ -+ if (!vdev->mc_irqs[irq_index].irq_configured) -+ return; -+ -+ free_irq(irq->irq_number, mc_irq); -+ kfree(vdev->mc_irqs[irq_index].name); -+ vdev->mc_irqs[irq_index].irq_configured = false; -+} -+ -+static int vfio_fsl_mc_setup_irqs(struct fsl_mc_device *mc_dev) -+{ -+ int ret; -+ int irq_count = mc_dev->obj_desc.irq_count; -+ int hwirq; -+ int i; -+ -+ /* Allocate IRQs */ -+ ret = fsl_mc_allocate_irqs(mc_dev); -+ if (ret) -+ return ret; -+ -+ /* Disable IRQs */ -+ for (i = 0; i < irq_count; i++) { -+ hwirq = mc_dev->irqs[i]->irq_number; -+ disable_irq_nosync(hwirq); -+ } -+ -+ return 0; -+} -+ -+int vfio_fsl_mc_init_irqs(struct vfio_fsl_mc_device *vdev) -+{ -+ struct fsl_mc_device *mc_dev = vdev->mc_dev; -+ struct device *dev = &mc_dev->dev; -+ int irq_count = mc_dev->obj_desc.irq_count; -+ struct vfio_fsl_mc_irq *mc_irq; -+ int ret, i; -+ -+ mc_irq = kcalloc(irq_count, sizeof(*mc_irq), GFP_KERNEL); -+ if (mc_irq == NULL) -+ return -ENOMEM; -+ -+ ret = vfio_fsl_mc_setup_irqs(mc_dev); -+ if (ret) { -+ kfree(mc_irq); -+ dev_err(dev, "vfio_fsl_mc_setup_irqs Fails %d\n", ret); -+ return ret; -+ } -+ -+ for (i = 0; i < irq_count; i++) { -+ mc_irq[i].count = 1; -+ mc_irq[i].flags = VFIO_IRQ_INFO_EVENTFD | -+ VFIO_IRQ_INFO_MASKABLE; -+ mc_irq[i].irq_initialized = true; -+ } -+ -+ vdev->mc_irqs = mc_irq; -+ -+ return 0; -+} -+ -+int vfio_fsl_mc_unconfigure_irqs(struct vfio_fsl_mc_device *vdev) -+{ -+ struct fsl_mc_device *mc_dev = vdev->mc_dev; -+ int i; -+ -+ for (i = 0; i < mc_dev->obj_desc.irq_count; i++) { -+ if (!vdev->mc_irqs[i].irq_initialized) -+ continue; -+ -+ vfio_fsl_mc_unconfigure_irq(vdev, i); -+ } -+ return 0; -+} -+ -+/* Free All IRQs for the given MC object */ -+void vfio_fsl_mc_free_irqs(struct vfio_fsl_mc_device *vdev) -+{ -+ struct fsl_mc_device *mc_dev = vdev->mc_dev; -+ -+ vfio_fsl_mc_unconfigure_irqs(vdev); -+ fsl_mc_free_irqs(mc_dev); -+ -+ kfree(vdev->mc_irqs); -+} -+ -+static int vfio_fsl_mc_irq_mask(struct vfio_fsl_mc_device *vdev, -+ unsigned index, unsigned start, -+ unsigned count, uint32_t flags, void *data, -+ uint32_t mask) -+{ -+ uint8_t arr; -+ -+ if (start != 0 || count != 1) -+ return -EINVAL; -+ -+ switch (flags & VFIO_IRQ_SET_DATA_TYPE_MASK) { -+ case VFIO_IRQ_SET_DATA_BOOL: -+ arr = *(uint8_t *) data; -+ if (arr != 0x1) -+ return -EINVAL; -+ -+ case VFIO_IRQ_SET_DATA_NONE: -+ return -ENOTTY; /* To be Implemented */ -+ case VFIO_IRQ_SET_DATA_EVENTFD: -+ return -ENOTTY; /* To be Implemented */ -+ -+ default: -+ return -ENOTTY; -+ } -+ -+ return 0; -+} -+ -+static int vfio_fsl_mc_config_irq_signal(struct vfio_fsl_mc_device *vdev, -+ int irq_index, int32_t fd) -+{ -+ struct eventfd_ctx *trigger; -+ struct vfio_fsl_mc_irq *mc_irq = &vdev->mc_irqs[irq_index]; -+ int ret; -+ -+ if (vdev->mc_irqs[irq_index].trigger) { -+ eventfd_ctx_put(vdev->mc_irqs[irq_index].trigger); -+ vdev->mc_irqs[irq_index].trigger = NULL; -+ } -+ -+ if (fd < 0) -+ return 0; -+ -+ trigger = eventfd_ctx_fdget(fd); -+ if (IS_ERR(trigger)) -+ return PTR_ERR(trigger); -+ -+ /* If IRQ not configured the configure */ -+ if (!mc_irq->irq_configured) { -+ ret = vfio_fsl_mc_configure_irq(vdev, irq_index); -+ if (ret) { -+ eventfd_ctx_put(trigger); -+ return ret; -+ } -+ } -+ -+ vdev->mc_irqs[irq_index].trigger = trigger; -+ return 0; -+} -+ -+static int vfio_fsl_mc_set_irq_trigger(struct vfio_fsl_mc_device *vdev, -+ unsigned index, unsigned start, -+ unsigned count, uint32_t flags, void *data) -+{ -+ struct fsl_mc_device *mc_dev = vdev->mc_dev; -+ int32_t fd; -+ int hwirq; -+ -+ /* If count = 0 and DATA_NONE, disable interrupt */ -+ if (!count && (flags & VFIO_IRQ_SET_DATA_NONE)) { -+ hwirq = mc_dev->irqs[index]->irq_number; -+ disable_irq_nosync(hwirq); -+ return 0; -+ } -+ -+ if (flags & VFIO_IRQ_SET_DATA_BOOL) -+ fd = *(int8_t *)data; -+ else if (flags & VFIO_IRQ_SET_DATA_EVENTFD) -+ fd = *(int32_t *)data; -+ else -+ return -EINVAL; -+ -+ if (start != 0 || count != 1) -+ return -EINVAL; -+ -+ return vfio_fsl_mc_config_irq_signal(vdev, index, fd); -+} -+ -+int vfio_fsl_mc_set_irqs_ioctl(struct vfio_fsl_mc_device *vdev, -+ uint32_t flags, unsigned index, unsigned start, -+ unsigned count, void *data) -+{ -+ int ret = -ENOTTY; -+ -+ switch (flags & VFIO_IRQ_SET_ACTION_TYPE_MASK) { -+ case VFIO_IRQ_SET_ACTION_MASK: -+ /* mask all sources */ -+ ret = vfio_fsl_mc_irq_mask(vdev, index, start, -+ count, flags, data, 0); -+ break; -+ case VFIO_IRQ_SET_ACTION_UNMASK: -+ /* unmask all sources */ -+ ret = vfio_fsl_mc_irq_mask(vdev, index, start, -+ count, flags, data, ~0); -+ break; -+ case VFIO_IRQ_SET_ACTION_TRIGGER: -+ ret = vfio_fsl_mc_set_irq_trigger(vdev, index, start, -+ count, flags, data); -+ break; -+ } -+ -+ return ret; -+} -diff --git a/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h b/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h -new file mode 100644 -index 0000000..8980536 ---- /dev/null -+++ b/drivers/vfio/fsl-mc/vfio_fsl_mc_private.h -@@ -0,0 +1,43 @@ -+/* -+ * Freescale Management Complex VFIO private declarations -+ * -+ * Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * Author: Bharat Bhushan -+ * -+ * This file is licensed under the terms of the GNU General Public -+ * License version 2. This program is licensed "as is" without any -+ * warranty of any kind, whether express or implied. -+ */ -+ -+#include "../../staging/fsl-mc/include/mc.h" -+ -+#ifndef VFIO_FSL_MC_PRIVATE_H -+#define VFIO_FSL_MC_PRIVATE_H -+ -+struct vfio_fsl_mc_irq { -+ struct eventfd_ctx *trigger; -+ u32 flags; -+ u32 count; -+ char *name; -+ bool irq_initialized; -+ bool irq_configured; -+}; -+ -+struct vfio_fsl_mc_device { -+ struct fsl_mc_device *mc_dev; -+ int refcnt; -+ struct vfio_fsl_mc_irq *mc_irqs; -+}; -+ -+int vfio_fsl_mc_init_irqs(struct vfio_fsl_mc_device *vdev); -+ -+void vfio_fsl_mc_free_irqs(struct vfio_fsl_mc_device *vdev); -+ -+int vfio_fsl_mc_configure_irq(struct vfio_fsl_mc_device *vdev, int irq_idx); -+ -+int vfio_fsl_mc_unconfigure_irqs(struct vfio_fsl_mc_device *vdev); -+ -+int vfio_fsl_mc_set_irqs_ioctl(struct vfio_fsl_mc_device *vdev, -+ uint32_t flags, unsigned index, unsigned start, -+ unsigned count, void *data); -+#endif /* VFIO_PCI_PRIVATE_H */ -diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c -index 553212f..e8d695b 100644 ---- a/drivers/vfio/pci/vfio_pci_intrs.c -+++ b/drivers/vfio/pci/vfio_pci_intrs.c -@@ -560,7 +560,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, - struct msi_msg msg; - - get_cached_msi_msg(irq, &msg); -- write_msi_msg(irq, &msg); -+ pci_write_msi_msg(irq, &msg); - } - - ret = request_irq(irq, vfio_msihandler, 0, -diff --git a/drivers/vfio/vfio_iommu_type1.c b/drivers/vfio/vfio_iommu_type1.c -index 4a9d666..d795e07 100644 ---- a/drivers/vfio/vfio_iommu_type1.c -+++ b/drivers/vfio/vfio_iommu_type1.c -@@ -547,6 +547,8 @@ static int vfio_dma_do_map(struct vfio_iommu *iommu, - prot |= IOMMU_WRITE; - if (map->flags & VFIO_DMA_MAP_FLAG_READ) - prot |= IOMMU_READ; -+ if (map->flags & VFIO_DMA_MAP_FLAG_MMIO) -+ prot |= IOMMU_MMIO; - - if (!prot || !size || (size | iova | vaddr) & mask) - return -EINVAL; -@@ -933,7 +935,8 @@ static long vfio_iommu_type1_ioctl(void *iommu_data, - } else if (cmd == VFIO_IOMMU_MAP_DMA) { - struct vfio_iommu_type1_dma_map map; - uint32_t mask = VFIO_DMA_MAP_FLAG_READ | -- VFIO_DMA_MAP_FLAG_WRITE; -+ VFIO_DMA_MAP_FLAG_WRITE | -+ VFIO_DMA_MAP_FLAG_MMIO; - - minsz = offsetofend(struct vfio_iommu_type1_dma_map, size); - -diff --git a/fs/Kconfig b/fs/Kconfig -index 664991a..1481093 100644 ---- a/fs/Kconfig -+++ b/fs/Kconfig -@@ -210,6 +210,7 @@ source "fs/ufs/Kconfig" - source "fs/exofs/Kconfig" - source "fs/f2fs/Kconfig" - source "fs/efivarfs/Kconfig" -+source "fs/aufs/Kconfig" - - endif # MISC_FILESYSTEMS - -diff --git a/fs/Makefile b/fs/Makefile -index da0bbb4..c8bc724 100644 ---- a/fs/Makefile -+++ b/fs/Makefile -@@ -126,3 +126,4 @@ obj-y += exofs/ # Multiple modules - obj-$(CONFIG_CEPH_FS) += ceph/ - obj-$(CONFIG_PSTORE) += pstore/ - obj-$(CONFIG_EFIVAR_FS) += efivarfs/ -+obj-$(CONFIG_AUFS_FS) += aufs/ -diff --git a/fs/aufs/Kconfig b/fs/aufs/Kconfig -new file mode 100644 -index 0000000..63560ce ---- /dev/null -+++ b/fs/aufs/Kconfig -@@ -0,0 +1,185 @@ -+config AUFS_FS -+ tristate "Aufs (Advanced multi layered unification filesystem) support" -+ help -+ Aufs is a stackable unification filesystem such as Unionfs, -+ which unifies several directories and provides a merged single -+ directory. -+ In the early days, aufs was entirely re-designed and -+ re-implemented Unionfs Version 1.x series. Introducing many -+ original ideas, approaches and improvements, it becomes totally -+ different from Unionfs while keeping the basic features. -+ -+if AUFS_FS -+choice -+ prompt "Maximum number of branches" -+ default AUFS_BRANCH_MAX_127 -+ help -+ Specifies the maximum number of branches (or member directories) -+ in a single aufs. The larger value consumes more system -+ resources and has a minor impact to performance. -+config AUFS_BRANCH_MAX_127 -+ bool "127" -+ help -+ Specifies the maximum number of branches (or member directories) -+ in a single aufs. The larger value consumes more system -+ resources and has a minor impact to performance. -+config AUFS_BRANCH_MAX_511 -+ bool "511" -+ help -+ Specifies the maximum number of branches (or member directories) -+ in a single aufs. The larger value consumes more system -+ resources and has a minor impact to performance. -+config AUFS_BRANCH_MAX_1023 -+ bool "1023" -+ help -+ Specifies the maximum number of branches (or member directories) -+ in a single aufs. The larger value consumes more system -+ resources and has a minor impact to performance. -+config AUFS_BRANCH_MAX_32767 -+ bool "32767" -+ help -+ Specifies the maximum number of branches (or member directories) -+ in a single aufs. The larger value consumes more system -+ resources and has a minor impact to performance. -+endchoice -+ -+config AUFS_SBILIST -+ bool -+ depends on AUFS_MAGIC_SYSRQ || PROC_FS -+ default y -+ help -+ Automatic configuration for internal use. -+ When aufs supports Magic SysRq or /proc, enabled automatically. -+ -+config AUFS_HNOTIFY -+ bool "Detect direct branch access (bypassing aufs)" -+ help -+ If you want to modify files on branches directly, eg. bypassing aufs, -+ and want aufs to detect the changes of them fully, then enable this -+ option and use 'udba=notify' mount option. -+ Currently there is only one available configuration, "fsnotify". -+ It will have a negative impact to the performance. -+ See detail in aufs.5. -+ -+choice -+ prompt "method" if AUFS_HNOTIFY -+ default AUFS_HFSNOTIFY -+config AUFS_HFSNOTIFY -+ bool "fsnotify" -+ select FSNOTIFY -+endchoice -+ -+config AUFS_EXPORT -+ bool "NFS-exportable aufs" -+ depends on EXPORTFS -+ help -+ If you want to export your mounted aufs via NFS, then enable this -+ option. There are several requirements for this configuration. -+ See detail in aufs.5. -+ -+config AUFS_INO_T_64 -+ bool -+ depends on AUFS_EXPORT -+ depends on 64BIT && !(ALPHA || S390) -+ default y -+ help -+ Automatic configuration for internal use. -+ /* typedef unsigned long/int __kernel_ino_t */ -+ /* alpha and s390x are int */ -+ -+config AUFS_XATTR -+ bool "support for XATTR/EA (including Security Labels)" -+ help -+ If your branch fs supports XATTR/EA and you want to make them -+ available in aufs too, then enable this opsion and specify the -+ branch attributes for EA. -+ See detail in aufs.5. -+ -+config AUFS_FHSM -+ bool "File-based Hierarchical Storage Management" -+ help -+ Hierarchical Storage Management (or HSM) is a well-known feature -+ in the storage world. Aufs provides this feature as file-based. -+ with multiple branches. -+ These multiple branches are prioritized, ie. the topmost one -+ should be the fastest drive and be used heavily. -+ -+config AUFS_RDU -+ bool "Readdir in userspace" -+ help -+ Aufs has two methods to provide a merged view for a directory, -+ by a user-space library and by kernel-space natively. The latter -+ is always enabled but sometimes large and slow. -+ If you enable this option, install the library in aufs2-util -+ package, and set some environment variables for your readdir(3), -+ then the work will be handled in user-space which generally -+ shows better performance in most cases. -+ See detail in aufs.5. -+ -+config AUFS_SHWH -+ bool "Show whiteouts" -+ help -+ If you want to make the whiteouts in aufs visible, then enable -+ this option and specify 'shwh' mount option. Although it may -+ sounds like philosophy or something, but in technically it -+ simply shows the name of whiteout with keeping its behaviour. -+ -+config AUFS_BR_RAMFS -+ bool "Ramfs (initramfs/rootfs) as an aufs branch" -+ help -+ If you want to use ramfs as an aufs branch fs, then enable this -+ option. Generally tmpfs is recommended. -+ Aufs prohibited them to be a branch fs by default, because -+ initramfs becomes unusable after switch_root or something -+ generally. If you sets initramfs as an aufs branch and boot your -+ system by switch_root, you will meet a problem easily since the -+ files in initramfs may be inaccessible. -+ Unless you are going to use ramfs as an aufs branch fs without -+ switch_root or something, leave it N. -+ -+config AUFS_BR_FUSE -+ bool "Fuse fs as an aufs branch" -+ depends on FUSE_FS -+ select AUFS_POLL -+ help -+ If you want to use fuse-based userspace filesystem as an aufs -+ branch fs, then enable this option. -+ It implements the internal poll(2) operation which is -+ implemented by fuse only (curretnly). -+ -+config AUFS_POLL -+ bool -+ help -+ Automatic configuration for internal use. -+ -+config AUFS_BR_HFSPLUS -+ bool "Hfsplus as an aufs branch" -+ depends on HFSPLUS_FS -+ default y -+ help -+ If you want to use hfsplus fs as an aufs branch fs, then enable -+ this option. This option introduces a small overhead at -+ copying-up a file on hfsplus. -+ -+config AUFS_BDEV_LOOP -+ bool -+ depends on BLK_DEV_LOOP -+ default y -+ help -+ Automatic configuration for internal use. -+ Convert =[ym] into =y. -+ -+config AUFS_DEBUG -+ bool "Debug aufs" -+ help -+ Enable this to compile aufs internal debug code. -+ It will have a negative impact to the performance. -+ -+config AUFS_MAGIC_SYSRQ -+ bool -+ depends on AUFS_DEBUG && MAGIC_SYSRQ -+ default y -+ help -+ Automatic configuration for internal use. -+ When aufs supports Magic SysRq, enabled automatically. -+endif -diff --git a/fs/aufs/Makefile b/fs/aufs/Makefile -new file mode 100644 -index 0000000..c7a501e ---- /dev/null -+++ b/fs/aufs/Makefile -@@ -0,0 +1,44 @@ -+ -+include ${src}/magic.mk -+ifeq (${CONFIG_AUFS_FS},m) -+include ${src}/conf.mk -+endif -+-include ${src}/priv_def.mk -+ -+# cf. include/linux/kernel.h -+# enable pr_debug -+ccflags-y += -DDEBUG -+# sparse requires the full pathname -+ifdef M -+ccflags-y += -include ${M}/../../include/uapi/linux/aufs_type.h -+else -+ccflags-y += -include ${srctree}/include/uapi/linux/aufs_type.h -+endif -+ -+obj-$(CONFIG_AUFS_FS) += aufs.o -+aufs-y := module.o sbinfo.o super.o branch.o xino.o sysaufs.o opts.o \ -+ wkq.o vfsub.o dcsub.o \ -+ cpup.o whout.o wbr_policy.o \ -+ dinfo.o dentry.o \ -+ dynop.o \ -+ finfo.o file.o f_op.o \ -+ dir.o vdir.o \ -+ iinfo.o inode.o i_op.o i_op_add.o i_op_del.o i_op_ren.o \ -+ mvdown.o ioctl.o -+ -+# all are boolean -+aufs-$(CONFIG_PROC_FS) += procfs.o plink.o -+aufs-$(CONFIG_SYSFS) += sysfs.o -+aufs-$(CONFIG_DEBUG_FS) += dbgaufs.o -+aufs-$(CONFIG_AUFS_BDEV_LOOP) += loop.o -+aufs-$(CONFIG_AUFS_HNOTIFY) += hnotify.o -+aufs-$(CONFIG_AUFS_HFSNOTIFY) += hfsnotify.o -+aufs-$(CONFIG_AUFS_EXPORT) += export.o -+aufs-$(CONFIG_AUFS_XATTR) += xattr.o -+aufs-$(CONFIG_FS_POSIX_ACL) += posix_acl.o -+aufs-$(CONFIG_AUFS_FHSM) += fhsm.o -+aufs-$(CONFIG_AUFS_POLL) += poll.o -+aufs-$(CONFIG_AUFS_RDU) += rdu.o -+aufs-$(CONFIG_AUFS_BR_HFSPLUS) += hfsplus.o -+aufs-$(CONFIG_AUFS_DEBUG) += debug.o -+aufs-$(CONFIG_AUFS_MAGIC_SYSRQ) += sysrq.o -diff --git a/fs/aufs/aufs.h b/fs/aufs/aufs.h -new file mode 100644 -index 0000000..e48d268 ---- /dev/null -+++ b/fs/aufs/aufs.h -@@ -0,0 +1,59 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * all header files -+ */ -+ -+#ifndef __AUFS_H__ -+#define __AUFS_H__ -+ -+#ifdef __KERNEL__ -+ -+#define AuStub(type, name, body, ...) \ -+ static inline type name(__VA_ARGS__) { body; } -+ -+#define AuStubVoid(name, ...) \ -+ AuStub(void, name, , __VA_ARGS__) -+#define AuStubInt0(name, ...) \ -+ AuStub(int, name, return 0, __VA_ARGS__) -+ -+#include "debug.h" -+ -+#include "branch.h" -+#include "cpup.h" -+#include "dcsub.h" -+#include "dbgaufs.h" -+#include "dentry.h" -+#include "dir.h" -+#include "dynop.h" -+#include "file.h" -+#include "fstype.h" -+#include "inode.h" -+#include "loop.h" -+#include "module.h" -+#include "opts.h" -+#include "rwsem.h" -+#include "spl.h" -+#include "super.h" -+#include "sysaufs.h" -+#include "vfsub.h" -+#include "whout.h" -+#include "wkq.h" -+ -+#endif /* __KERNEL__ */ -+#endif /* __AUFS_H__ */ -diff --git a/fs/aufs/branch.c b/fs/aufs/branch.c -new file mode 100644 -index 0000000..17210b2 ---- /dev/null -+++ b/fs/aufs/branch.c -@@ -0,0 +1,1402 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * branch management -+ */ -+ -+#include -+#include -+#include "aufs.h" -+ -+/* -+ * free a single branch -+ */ -+static void au_br_do_free(struct au_branch *br) -+{ -+ int i; -+ struct au_wbr *wbr; -+ struct au_dykey **key; -+ -+ au_hnotify_fin_br(br); -+ -+ if (br->br_xino.xi_file) -+ fput(br->br_xino.xi_file); -+ mutex_destroy(&br->br_xino.xi_nondir_mtx); -+ -+ AuDebugOn(atomic_read(&br->br_count)); -+ -+ wbr = br->br_wbr; -+ if (wbr) { -+ for (i = 0; i < AuBrWh_Last; i++) -+ dput(wbr->wbr_wh[i]); -+ AuDebugOn(atomic_read(&wbr->wbr_wh_running)); -+ AuRwDestroy(&wbr->wbr_wh_rwsem); -+ } -+ -+ if (br->br_fhsm) { -+ au_br_fhsm_fin(br->br_fhsm); -+ kfree(br->br_fhsm); -+ } -+ -+ key = br->br_dykey; -+ for (i = 0; i < AuBrDynOp; i++, key++) -+ if (*key) -+ au_dy_put(*key); -+ else -+ break; -+ -+ /* recursive lock, s_umount of branch's */ -+ lockdep_off(); -+ path_put(&br->br_path); -+ lockdep_on(); -+ kfree(wbr); -+ kfree(br); -+} -+ -+/* -+ * frees all branches -+ */ -+void au_br_free(struct au_sbinfo *sbinfo) -+{ -+ aufs_bindex_t bmax; -+ struct au_branch **br; -+ -+ AuRwMustWriteLock(&sbinfo->si_rwsem); -+ -+ bmax = sbinfo->si_bend + 1; -+ br = sbinfo->si_branch; -+ while (bmax--) -+ au_br_do_free(*br++); -+} -+ -+/* -+ * find the index of a branch which is specified by @br_id. -+ */ -+int au_br_index(struct super_block *sb, aufs_bindex_t br_id) -+{ -+ aufs_bindex_t bindex, bend; -+ -+ bend = au_sbend(sb); -+ for (bindex = 0; bindex <= bend; bindex++) -+ if (au_sbr_id(sb, bindex) == br_id) -+ return bindex; -+ return -1; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * add a branch -+ */ -+ -+static int test_overlap(struct super_block *sb, struct dentry *h_adding, -+ struct dentry *h_root) -+{ -+ if (unlikely(h_adding == h_root -+ || au_test_loopback_overlap(sb, h_adding))) -+ return 1; -+ if (h_adding->d_sb != h_root->d_sb) -+ return 0; -+ return au_test_subdir(h_adding, h_root) -+ || au_test_subdir(h_root, h_adding); -+} -+ -+/* -+ * returns a newly allocated branch. @new_nbranch is a number of branches -+ * after adding a branch. -+ */ -+static struct au_branch *au_br_alloc(struct super_block *sb, int new_nbranch, -+ int perm) -+{ -+ struct au_branch *add_branch; -+ struct dentry *root; -+ int err; -+ -+ err = -ENOMEM; -+ root = sb->s_root; -+ add_branch = kzalloc(sizeof(*add_branch), GFP_NOFS); -+ if (unlikely(!add_branch)) -+ goto out; -+ -+ err = au_hnotify_init_br(add_branch, perm); -+ if (unlikely(err)) -+ goto out_br; -+ -+ if (au_br_writable(perm)) { -+ /* may be freed separately at changing the branch permission */ -+ add_branch->br_wbr = kzalloc(sizeof(*add_branch->br_wbr), -+ GFP_NOFS); -+ if (unlikely(!add_branch->br_wbr)) -+ goto out_hnotify; -+ } -+ -+ if (au_br_fhsm(perm)) { -+ err = au_fhsm_br_alloc(add_branch); -+ if (unlikely(err)) -+ goto out_wbr; -+ } -+ -+ err = au_sbr_realloc(au_sbi(sb), new_nbranch); -+ if (!err) -+ err = au_di_realloc(au_di(root), new_nbranch); -+ if (!err) -+ err = au_ii_realloc(au_ii(root->d_inode), new_nbranch); -+ if (!err) -+ return add_branch; /* success */ -+ -+out_wbr: -+ kfree(add_branch->br_wbr); -+out_hnotify: -+ au_hnotify_fin_br(add_branch); -+out_br: -+ kfree(add_branch); -+out: -+ return ERR_PTR(err); -+} -+ -+/* -+ * test if the branch permission is legal or not. -+ */ -+static int test_br(struct inode *inode, int brperm, char *path) -+{ -+ int err; -+ -+ err = (au_br_writable(brperm) && IS_RDONLY(inode)); -+ if (!err) -+ goto out; -+ -+ err = -EINVAL; -+ pr_err("write permission for readonly mount or inode, %s\n", path); -+ -+out: -+ return err; -+} -+ -+/* -+ * returns: -+ * 0: success, the caller will add it -+ * plus: success, it is already unified, the caller should ignore it -+ * minus: error -+ */ -+static int test_add(struct super_block *sb, struct au_opt_add *add, int remount) -+{ -+ int err; -+ aufs_bindex_t bend, bindex; -+ struct dentry *root; -+ struct inode *inode, *h_inode; -+ -+ root = sb->s_root; -+ bend = au_sbend(sb); -+ if (unlikely(bend >= 0 -+ && au_find_dbindex(root, add->path.dentry) >= 0)) { -+ err = 1; -+ if (!remount) { -+ err = -EINVAL; -+ pr_err("%s duplicated\n", add->pathname); -+ } -+ goto out; -+ } -+ -+ err = -ENOSPC; /* -E2BIG; */ -+ if (unlikely(AUFS_BRANCH_MAX <= add->bindex -+ || AUFS_BRANCH_MAX - 1 <= bend)) { -+ pr_err("number of branches exceeded %s\n", add->pathname); -+ goto out; -+ } -+ -+ err = -EDOM; -+ if (unlikely(add->bindex < 0 || bend + 1 < add->bindex)) { -+ pr_err("bad index %d\n", add->bindex); -+ goto out; -+ } -+ -+ inode = add->path.dentry->d_inode; -+ err = -ENOENT; -+ if (unlikely(!inode->i_nlink)) { -+ pr_err("no existence %s\n", add->pathname); -+ goto out; -+ } -+ -+ err = -EINVAL; -+ if (unlikely(inode->i_sb == sb)) { -+ pr_err("%s must be outside\n", add->pathname); -+ goto out; -+ } -+ -+ if (unlikely(au_test_fs_unsuppoted(inode->i_sb))) { -+ pr_err("unsupported filesystem, %s (%s)\n", -+ add->pathname, au_sbtype(inode->i_sb)); -+ goto out; -+ } -+ -+ if (unlikely(inode->i_sb->s_stack_depth)) { -+ pr_err("already stacked, %s (%s)\n", -+ add->pathname, au_sbtype(inode->i_sb)); -+ goto out; -+ } -+ -+ err = test_br(add->path.dentry->d_inode, add->perm, add->pathname); -+ if (unlikely(err)) -+ goto out; -+ -+ if (bend < 0) -+ return 0; /* success */ -+ -+ err = -EINVAL; -+ for (bindex = 0; bindex <= bend; bindex++) -+ if (unlikely(test_overlap(sb, add->path.dentry, -+ au_h_dptr(root, bindex)))) { -+ pr_err("%s is overlapped\n", add->pathname); -+ goto out; -+ } -+ -+ err = 0; -+ if (au_opt_test(au_mntflags(sb), WARN_PERM)) { -+ h_inode = au_h_dptr(root, 0)->d_inode; -+ if ((h_inode->i_mode & S_IALLUGO) != (inode->i_mode & S_IALLUGO) -+ || !uid_eq(h_inode->i_uid, inode->i_uid) -+ || !gid_eq(h_inode->i_gid, inode->i_gid)) -+ pr_warn("uid/gid/perm %s %u/%u/0%o, %u/%u/0%o\n", -+ add->pathname, -+ i_uid_read(inode), i_gid_read(inode), -+ (inode->i_mode & S_IALLUGO), -+ i_uid_read(h_inode), i_gid_read(h_inode), -+ (h_inode->i_mode & S_IALLUGO)); -+ } -+ -+out: -+ return err; -+} -+ -+/* -+ * initialize or clean the whiteouts for an adding branch -+ */ -+static int au_br_init_wh(struct super_block *sb, struct au_branch *br, -+ int new_perm) -+{ -+ int err, old_perm; -+ aufs_bindex_t bindex; -+ struct mutex *h_mtx; -+ struct au_wbr *wbr; -+ struct au_hinode *hdir; -+ -+ err = vfsub_mnt_want_write(au_br_mnt(br)); -+ if (unlikely(err)) -+ goto out; -+ -+ wbr = br->br_wbr; -+ old_perm = br->br_perm; -+ br->br_perm = new_perm; -+ hdir = NULL; -+ h_mtx = NULL; -+ bindex = au_br_index(sb, br->br_id); -+ if (0 <= bindex) { -+ hdir = au_hi(sb->s_root->d_inode, bindex); -+ au_hn_imtx_lock_nested(hdir, AuLsc_I_PARENT); -+ } else { -+ h_mtx = &au_br_dentry(br)->d_inode->i_mutex; -+ mutex_lock_nested(h_mtx, AuLsc_I_PARENT); -+ } -+ if (!wbr) -+ err = au_wh_init(br, sb); -+ else { -+ wbr_wh_write_lock(wbr); -+ err = au_wh_init(br, sb); -+ wbr_wh_write_unlock(wbr); -+ } -+ if (hdir) -+ au_hn_imtx_unlock(hdir); -+ else -+ mutex_unlock(h_mtx); -+ vfsub_mnt_drop_write(au_br_mnt(br)); -+ br->br_perm = old_perm; -+ -+ if (!err && wbr && !au_br_writable(new_perm)) { -+ kfree(wbr); -+ br->br_wbr = NULL; -+ } -+ -+out: -+ return err; -+} -+ -+static int au_wbr_init(struct au_branch *br, struct super_block *sb, -+ int perm) -+{ -+ int err; -+ struct kstatfs kst; -+ struct au_wbr *wbr; -+ -+ wbr = br->br_wbr; -+ au_rw_init(&wbr->wbr_wh_rwsem); -+ atomic_set(&wbr->wbr_wh_running, 0); -+ -+ /* -+ * a limit for rmdir/rename a dir -+ * cf. AUFS_MAX_NAMELEN in include/uapi/linux/aufs_type.h -+ */ -+ err = vfs_statfs(&br->br_path, &kst); -+ if (unlikely(err)) -+ goto out; -+ err = -EINVAL; -+ if (kst.f_namelen >= NAME_MAX) -+ err = au_br_init_wh(sb, br, perm); -+ else -+ pr_err("%pd(%s), unsupported namelen %ld\n", -+ au_br_dentry(br), -+ au_sbtype(au_br_dentry(br)->d_sb), kst.f_namelen); -+ -+out: -+ return err; -+} -+ -+/* initialize a new branch */ -+static int au_br_init(struct au_branch *br, struct super_block *sb, -+ struct au_opt_add *add) -+{ -+ int err; -+ -+ err = 0; -+ mutex_init(&br->br_xino.xi_nondir_mtx); -+ br->br_perm = add->perm; -+ br->br_path = add->path; /* set first, path_get() later */ -+ spin_lock_init(&br->br_dykey_lock); -+ atomic_set(&br->br_count, 0); -+ atomic_set(&br->br_xino_running, 0); -+ br->br_id = au_new_br_id(sb); -+ AuDebugOn(br->br_id < 0); -+ -+ if (au_br_writable(add->perm)) { -+ err = au_wbr_init(br, sb, add->perm); -+ if (unlikely(err)) -+ goto out_err; -+ } -+ -+ if (au_opt_test(au_mntflags(sb), XINO)) { -+ err = au_xino_br(sb, br, add->path.dentry->d_inode->i_ino, -+ au_sbr(sb, 0)->br_xino.xi_file, /*do_test*/1); -+ if (unlikely(err)) { -+ AuDebugOn(br->br_xino.xi_file); -+ goto out_err; -+ } -+ } -+ -+ sysaufs_br_init(br); -+ path_get(&br->br_path); -+ goto out; /* success */ -+ -+out_err: -+ memset(&br->br_path, 0, sizeof(br->br_path)); -+out: -+ return err; -+} -+ -+static void au_br_do_add_brp(struct au_sbinfo *sbinfo, aufs_bindex_t bindex, -+ struct au_branch *br, aufs_bindex_t bend, -+ aufs_bindex_t amount) -+{ -+ struct au_branch **brp; -+ -+ AuRwMustWriteLock(&sbinfo->si_rwsem); -+ -+ brp = sbinfo->si_branch + bindex; -+ memmove(brp + 1, brp, sizeof(*brp) * amount); -+ *brp = br; -+ sbinfo->si_bend++; -+ if (unlikely(bend < 0)) -+ sbinfo->si_bend = 0; -+} -+ -+static void au_br_do_add_hdp(struct au_dinfo *dinfo, aufs_bindex_t bindex, -+ aufs_bindex_t bend, aufs_bindex_t amount) -+{ -+ struct au_hdentry *hdp; -+ -+ AuRwMustWriteLock(&dinfo->di_rwsem); -+ -+ hdp = dinfo->di_hdentry + bindex; -+ memmove(hdp + 1, hdp, sizeof(*hdp) * amount); -+ au_h_dentry_init(hdp); -+ dinfo->di_bend++; -+ if (unlikely(bend < 0)) -+ dinfo->di_bstart = 0; -+} -+ -+static void au_br_do_add_hip(struct au_iinfo *iinfo, aufs_bindex_t bindex, -+ aufs_bindex_t bend, aufs_bindex_t amount) -+{ -+ struct au_hinode *hip; -+ -+ AuRwMustWriteLock(&iinfo->ii_rwsem); -+ -+ hip = iinfo->ii_hinode + bindex; -+ memmove(hip + 1, hip, sizeof(*hip) * amount); -+ hip->hi_inode = NULL; -+ au_hn_init(hip); -+ iinfo->ii_bend++; -+ if (unlikely(bend < 0)) -+ iinfo->ii_bstart = 0; -+} -+ -+static void au_br_do_add(struct super_block *sb, struct au_branch *br, -+ aufs_bindex_t bindex) -+{ -+ struct dentry *root, *h_dentry; -+ struct inode *root_inode; -+ aufs_bindex_t bend, amount; -+ -+ root = sb->s_root; -+ root_inode = root->d_inode; -+ bend = au_sbend(sb); -+ amount = bend + 1 - bindex; -+ h_dentry = au_br_dentry(br); -+ au_sbilist_lock(); -+ au_br_do_add_brp(au_sbi(sb), bindex, br, bend, amount); -+ au_br_do_add_hdp(au_di(root), bindex, bend, amount); -+ au_br_do_add_hip(au_ii(root_inode), bindex, bend, amount); -+ au_set_h_dptr(root, bindex, dget(h_dentry)); -+ au_set_h_iptr(root_inode, bindex, au_igrab(h_dentry->d_inode), -+ /*flags*/0); -+ au_sbilist_unlock(); -+} -+ -+int au_br_add(struct super_block *sb, struct au_opt_add *add, int remount) -+{ -+ int err; -+ aufs_bindex_t bend, add_bindex; -+ struct dentry *root, *h_dentry; -+ struct inode *root_inode; -+ struct au_branch *add_branch; -+ -+ root = sb->s_root; -+ root_inode = root->d_inode; -+ IMustLock(root_inode); -+ err = test_add(sb, add, remount); -+ if (unlikely(err < 0)) -+ goto out; -+ if (err) { -+ err = 0; -+ goto out; /* success */ -+ } -+ -+ bend = au_sbend(sb); -+ add_branch = au_br_alloc(sb, bend + 2, add->perm); -+ err = PTR_ERR(add_branch); -+ if (IS_ERR(add_branch)) -+ goto out; -+ -+ err = au_br_init(add_branch, sb, add); -+ if (unlikely(err)) { -+ au_br_do_free(add_branch); -+ goto out; -+ } -+ -+ add_bindex = add->bindex; -+ if (!remount) -+ au_br_do_add(sb, add_branch, add_bindex); -+ else { -+ sysaufs_brs_del(sb, add_bindex); -+ au_br_do_add(sb, add_branch, add_bindex); -+ sysaufs_brs_add(sb, add_bindex); -+ } -+ -+ h_dentry = add->path.dentry; -+ if (!add_bindex) { -+ au_cpup_attr_all(root_inode, /*force*/1); -+ sb->s_maxbytes = h_dentry->d_sb->s_maxbytes; -+ } else -+ au_add_nlink(root_inode, h_dentry->d_inode); -+ -+ /* -+ * this test/set prevents aufs from handling unnecesary notify events -+ * of xino files, in case of re-adding a writable branch which was -+ * once detached from aufs. -+ */ -+ if (au_xino_brid(sb) < 0 -+ && au_br_writable(add_branch->br_perm) -+ && !au_test_fs_bad_xino(h_dentry->d_sb) -+ && add_branch->br_xino.xi_file -+ && add_branch->br_xino.xi_file->f_dentry->d_parent == h_dentry) -+ au_xino_brid_set(sb, add_branch->br_id); -+ -+out: -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static unsigned long long au_farray_cb(void *a, -+ unsigned long long max __maybe_unused, -+ void *arg) -+{ -+ unsigned long long n; -+ struct file **p, *f; -+ struct au_sphlhead *files; -+ struct au_finfo *finfo; -+ struct super_block *sb = arg; -+ -+ n = 0; -+ p = a; -+ files = &au_sbi(sb)->si_files; -+ spin_lock(&files->spin); -+ hlist_for_each_entry(finfo, &files->head, fi_hlist) { -+ f = finfo->fi_file; -+ if (file_count(f) -+ && !special_file(file_inode(f)->i_mode)) { -+ get_file(f); -+ *p++ = f; -+ n++; -+ AuDebugOn(n > max); -+ } -+ } -+ spin_unlock(&files->spin); -+ -+ return n; -+} -+ -+static struct file **au_farray_alloc(struct super_block *sb, -+ unsigned long long *max) -+{ -+ *max = atomic_long_read(&au_sbi(sb)->si_nfiles); -+ return au_array_alloc(max, au_farray_cb, sb); -+} -+ -+static void au_farray_free(struct file **a, unsigned long long max) -+{ -+ unsigned long long ull; -+ -+ for (ull = 0; ull < max; ull++) -+ if (a[ull]) -+ fput(a[ull]); -+ kvfree(a); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * delete a branch -+ */ -+ -+/* to show the line number, do not make it inlined function */ -+#define AuVerbose(do_info, fmt, ...) do { \ -+ if (do_info) \ -+ pr_info(fmt, ##__VA_ARGS__); \ -+} while (0) -+ -+static int au_test_ibusy(struct inode *inode, aufs_bindex_t bstart, -+ aufs_bindex_t bend) -+{ -+ return (inode && !S_ISDIR(inode->i_mode)) || bstart == bend; -+} -+ -+static int au_test_dbusy(struct dentry *dentry, aufs_bindex_t bstart, -+ aufs_bindex_t bend) -+{ -+ return au_test_ibusy(dentry->d_inode, bstart, bend); -+} -+ -+/* -+ * test if the branch is deletable or not. -+ */ -+static int test_dentry_busy(struct dentry *root, aufs_bindex_t bindex, -+ unsigned int sigen, const unsigned int verbose) -+{ -+ int err, i, j, ndentry; -+ aufs_bindex_t bstart, bend; -+ struct au_dcsub_pages dpages; -+ struct au_dpage *dpage; -+ struct dentry *d; -+ -+ err = au_dpages_init(&dpages, GFP_NOFS); -+ if (unlikely(err)) -+ goto out; -+ err = au_dcsub_pages(&dpages, root, NULL, NULL); -+ if (unlikely(err)) -+ goto out_dpages; -+ -+ for (i = 0; !err && i < dpages.ndpage; i++) { -+ dpage = dpages.dpages + i; -+ ndentry = dpage->ndentry; -+ for (j = 0; !err && j < ndentry; j++) { -+ d = dpage->dentries[j]; -+ AuDebugOn(au_dcount(d) <= 0); -+ if (!au_digen_test(d, sigen)) { -+ di_read_lock_child(d, AuLock_IR); -+ if (unlikely(au_dbrange_test(d))) { -+ di_read_unlock(d, AuLock_IR); -+ continue; -+ } -+ } else { -+ di_write_lock_child(d); -+ if (unlikely(au_dbrange_test(d))) { -+ di_write_unlock(d); -+ continue; -+ } -+ err = au_reval_dpath(d, sigen); -+ if (!err) -+ di_downgrade_lock(d, AuLock_IR); -+ else { -+ di_write_unlock(d); -+ break; -+ } -+ } -+ -+ /* AuDbgDentry(d); */ -+ bstart = au_dbstart(d); -+ bend = au_dbend(d); -+ if (bstart <= bindex -+ && bindex <= bend -+ && au_h_dptr(d, bindex) -+ && au_test_dbusy(d, bstart, bend)) { -+ err = -EBUSY; -+ AuVerbose(verbose, "busy %pd\n", d); -+ AuDbgDentry(d); -+ } -+ di_read_unlock(d, AuLock_IR); -+ } -+ } -+ -+out_dpages: -+ au_dpages_free(&dpages); -+out: -+ return err; -+} -+ -+static int test_inode_busy(struct super_block *sb, aufs_bindex_t bindex, -+ unsigned int sigen, const unsigned int verbose) -+{ -+ int err; -+ unsigned long long max, ull; -+ struct inode *i, **array; -+ aufs_bindex_t bstart, bend; -+ -+ array = au_iarray_alloc(sb, &max); -+ err = PTR_ERR(array); -+ if (IS_ERR(array)) -+ goto out; -+ -+ err = 0; -+ AuDbg("b%d\n", bindex); -+ for (ull = 0; !err && ull < max; ull++) { -+ i = array[ull]; -+ if (unlikely(!i)) -+ break; -+ if (i->i_ino == AUFS_ROOT_INO) -+ continue; -+ -+ /* AuDbgInode(i); */ -+ if (au_iigen(i, NULL) == sigen) -+ ii_read_lock_child(i); -+ else { -+ ii_write_lock_child(i); -+ err = au_refresh_hinode_self(i); -+ au_iigen_dec(i); -+ if (!err) -+ ii_downgrade_lock(i); -+ else { -+ ii_write_unlock(i); -+ break; -+ } -+ } -+ -+ bstart = au_ibstart(i); -+ bend = au_ibend(i); -+ if (bstart <= bindex -+ && bindex <= bend -+ && au_h_iptr(i, bindex) -+ && au_test_ibusy(i, bstart, bend)) { -+ err = -EBUSY; -+ AuVerbose(verbose, "busy i%lu\n", i->i_ino); -+ AuDbgInode(i); -+ } -+ ii_read_unlock(i); -+ } -+ au_iarray_free(array, max); -+ -+out: -+ return err; -+} -+ -+static int test_children_busy(struct dentry *root, aufs_bindex_t bindex, -+ const unsigned int verbose) -+{ -+ int err; -+ unsigned int sigen; -+ -+ sigen = au_sigen(root->d_sb); -+ DiMustNoWaiters(root); -+ IiMustNoWaiters(root->d_inode); -+ di_write_unlock(root); -+ err = test_dentry_busy(root, bindex, sigen, verbose); -+ if (!err) -+ err = test_inode_busy(root->d_sb, bindex, sigen, verbose); -+ di_write_lock_child(root); /* aufs_write_lock() calls ..._child() */ -+ -+ return err; -+} -+ -+static int test_dir_busy(struct file *file, aufs_bindex_t br_id, -+ struct file **to_free, int *idx) -+{ -+ int err; -+ unsigned char matched, root; -+ aufs_bindex_t bindex, bend; -+ struct au_fidir *fidir; -+ struct au_hfile *hfile; -+ -+ err = 0; -+ root = IS_ROOT(file->f_dentry); -+ if (root) { -+ get_file(file); -+ to_free[*idx] = file; -+ (*idx)++; -+ goto out; -+ } -+ -+ matched = 0; -+ fidir = au_fi(file)->fi_hdir; -+ AuDebugOn(!fidir); -+ bend = au_fbend_dir(file); -+ for (bindex = au_fbstart(file); bindex <= bend; bindex++) { -+ hfile = fidir->fd_hfile + bindex; -+ if (!hfile->hf_file) -+ continue; -+ -+ if (hfile->hf_br->br_id == br_id) { -+ matched = 1; -+ break; -+ } -+ } -+ if (matched) -+ err = -EBUSY; -+ -+out: -+ return err; -+} -+ -+static int test_file_busy(struct super_block *sb, aufs_bindex_t br_id, -+ struct file **to_free, int opened) -+{ -+ int err, idx; -+ unsigned long long ull, max; -+ aufs_bindex_t bstart; -+ struct file *file, **array; -+ struct dentry *root; -+ struct au_hfile *hfile; -+ -+ array = au_farray_alloc(sb, &max); -+ err = PTR_ERR(array); -+ if (IS_ERR(array)) -+ goto out; -+ -+ err = 0; -+ idx = 0; -+ root = sb->s_root; -+ di_write_unlock(root); -+ for (ull = 0; ull < max; ull++) { -+ file = array[ull]; -+ if (unlikely(!file)) -+ break; -+ -+ /* AuDbg("%pD\n", file); */ -+ fi_read_lock(file); -+ bstart = au_fbstart(file); -+ if (!d_is_dir(file->f_path.dentry)) { -+ hfile = &au_fi(file)->fi_htop; -+ if (hfile->hf_br->br_id == br_id) -+ err = -EBUSY; -+ } else -+ err = test_dir_busy(file, br_id, to_free, &idx); -+ fi_read_unlock(file); -+ if (unlikely(err)) -+ break; -+ } -+ di_write_lock_child(root); -+ au_farray_free(array, max); -+ AuDebugOn(idx > opened); -+ -+out: -+ return err; -+} -+ -+static void br_del_file(struct file **to_free, unsigned long long opened, -+ aufs_bindex_t br_id) -+{ -+ unsigned long long ull; -+ aufs_bindex_t bindex, bstart, bend, bfound; -+ struct file *file; -+ struct au_fidir *fidir; -+ struct au_hfile *hfile; -+ -+ for (ull = 0; ull < opened; ull++) { -+ file = to_free[ull]; -+ if (unlikely(!file)) -+ break; -+ -+ /* AuDbg("%pD\n", file); */ -+ AuDebugOn(!d_is_dir(file->f_path.dentry)); -+ bfound = -1; -+ fidir = au_fi(file)->fi_hdir; -+ AuDebugOn(!fidir); -+ fi_write_lock(file); -+ bstart = au_fbstart(file); -+ bend = au_fbend_dir(file); -+ for (bindex = bstart; bindex <= bend; bindex++) { -+ hfile = fidir->fd_hfile + bindex; -+ if (!hfile->hf_file) -+ continue; -+ -+ if (hfile->hf_br->br_id == br_id) { -+ bfound = bindex; -+ break; -+ } -+ } -+ AuDebugOn(bfound < 0); -+ au_set_h_fptr(file, bfound, NULL); -+ if (bfound == bstart) { -+ for (bstart++; bstart <= bend; bstart++) -+ if (au_hf_dir(file, bstart)) { -+ au_set_fbstart(file, bstart); -+ break; -+ } -+ } -+ fi_write_unlock(file); -+ } -+} -+ -+static void au_br_do_del_brp(struct au_sbinfo *sbinfo, -+ const aufs_bindex_t bindex, -+ const aufs_bindex_t bend) -+{ -+ struct au_branch **brp, **p; -+ -+ AuRwMustWriteLock(&sbinfo->si_rwsem); -+ -+ brp = sbinfo->si_branch + bindex; -+ if (bindex < bend) -+ memmove(brp, brp + 1, sizeof(*brp) * (bend - bindex)); -+ sbinfo->si_branch[0 + bend] = NULL; -+ sbinfo->si_bend--; -+ -+ p = krealloc(sbinfo->si_branch, sizeof(*p) * bend, AuGFP_SBILIST); -+ if (p) -+ sbinfo->si_branch = p; -+ /* harmless error */ -+} -+ -+static void au_br_do_del_hdp(struct au_dinfo *dinfo, const aufs_bindex_t bindex, -+ const aufs_bindex_t bend) -+{ -+ struct au_hdentry *hdp, *p; -+ -+ AuRwMustWriteLock(&dinfo->di_rwsem); -+ -+ hdp = dinfo->di_hdentry; -+ if (bindex < bend) -+ memmove(hdp + bindex, hdp + bindex + 1, -+ sizeof(*hdp) * (bend - bindex)); -+ hdp[0 + bend].hd_dentry = NULL; -+ dinfo->di_bend--; -+ -+ p = krealloc(hdp, sizeof(*p) * bend, AuGFP_SBILIST); -+ if (p) -+ dinfo->di_hdentry = p; -+ /* harmless error */ -+} -+ -+static void au_br_do_del_hip(struct au_iinfo *iinfo, const aufs_bindex_t bindex, -+ const aufs_bindex_t bend) -+{ -+ struct au_hinode *hip, *p; -+ -+ AuRwMustWriteLock(&iinfo->ii_rwsem); -+ -+ hip = iinfo->ii_hinode + bindex; -+ if (bindex < bend) -+ memmove(hip, hip + 1, sizeof(*hip) * (bend - bindex)); -+ iinfo->ii_hinode[0 + bend].hi_inode = NULL; -+ au_hn_init(iinfo->ii_hinode + bend); -+ iinfo->ii_bend--; -+ -+ p = krealloc(iinfo->ii_hinode, sizeof(*p) * bend, AuGFP_SBILIST); -+ if (p) -+ iinfo->ii_hinode = p; -+ /* harmless error */ -+} -+ -+static void au_br_do_del(struct super_block *sb, aufs_bindex_t bindex, -+ struct au_branch *br) -+{ -+ aufs_bindex_t bend; -+ struct au_sbinfo *sbinfo; -+ struct dentry *root, *h_root; -+ struct inode *inode, *h_inode; -+ struct au_hinode *hinode; -+ -+ SiMustWriteLock(sb); -+ -+ root = sb->s_root; -+ inode = root->d_inode; -+ sbinfo = au_sbi(sb); -+ bend = sbinfo->si_bend; -+ -+ h_root = au_h_dptr(root, bindex); -+ hinode = au_hi(inode, bindex); -+ h_inode = au_igrab(hinode->hi_inode); -+ au_hiput(hinode); -+ -+ au_sbilist_lock(); -+ au_br_do_del_brp(sbinfo, bindex, bend); -+ au_br_do_del_hdp(au_di(root), bindex, bend); -+ au_br_do_del_hip(au_ii(inode), bindex, bend); -+ au_sbilist_unlock(); -+ -+ dput(h_root); -+ iput(h_inode); -+ au_br_do_free(br); -+} -+ -+static unsigned long long empty_cb(void *array, unsigned long long max, -+ void *arg) -+{ -+ return max; -+} -+ -+int au_br_del(struct super_block *sb, struct au_opt_del *del, int remount) -+{ -+ int err, rerr, i; -+ unsigned long long opened; -+ unsigned int mnt_flags; -+ aufs_bindex_t bindex, bend, br_id; -+ unsigned char do_wh, verbose; -+ struct au_branch *br; -+ struct au_wbr *wbr; -+ struct dentry *root; -+ struct file **to_free; -+ -+ err = 0; -+ opened = 0; -+ to_free = NULL; -+ root = sb->s_root; -+ bindex = au_find_dbindex(root, del->h_path.dentry); -+ if (bindex < 0) { -+ if (remount) -+ goto out; /* success */ -+ err = -ENOENT; -+ pr_err("%s no such branch\n", del->pathname); -+ goto out; -+ } -+ AuDbg("bindex b%d\n", bindex); -+ -+ err = -EBUSY; -+ mnt_flags = au_mntflags(sb); -+ verbose = !!au_opt_test(mnt_flags, VERBOSE); -+ bend = au_sbend(sb); -+ if (unlikely(!bend)) { -+ AuVerbose(verbose, "no more branches left\n"); -+ goto out; -+ } -+ br = au_sbr(sb, bindex); -+ AuDebugOn(!path_equal(&br->br_path, &del->h_path)); -+ -+ br_id = br->br_id; -+ opened = atomic_read(&br->br_count); -+ if (unlikely(opened)) { -+ to_free = au_array_alloc(&opened, empty_cb, NULL); -+ err = PTR_ERR(to_free); -+ if (IS_ERR(to_free)) -+ goto out; -+ -+ err = test_file_busy(sb, br_id, to_free, opened); -+ if (unlikely(err)) { -+ AuVerbose(verbose, "%llu file(s) opened\n", opened); -+ goto out; -+ } -+ } -+ -+ wbr = br->br_wbr; -+ do_wh = wbr && (wbr->wbr_whbase || wbr->wbr_plink || wbr->wbr_orph); -+ if (do_wh) { -+ /* instead of WbrWhMustWriteLock(wbr) */ -+ SiMustWriteLock(sb); -+ for (i = 0; i < AuBrWh_Last; i++) { -+ dput(wbr->wbr_wh[i]); -+ wbr->wbr_wh[i] = NULL; -+ } -+ } -+ -+ err = test_children_busy(root, bindex, verbose); -+ if (unlikely(err)) { -+ if (do_wh) -+ goto out_wh; -+ goto out; -+ } -+ -+ err = 0; -+ if (to_free) { -+ /* -+ * now we confirmed the branch is deletable. -+ * let's free the remaining opened dirs on the branch. -+ */ -+ di_write_unlock(root); -+ br_del_file(to_free, opened, br_id); -+ di_write_lock_child(root); -+ } -+ -+ if (!remount) -+ au_br_do_del(sb, bindex, br); -+ else { -+ sysaufs_brs_del(sb, bindex); -+ au_br_do_del(sb, bindex, br); -+ sysaufs_brs_add(sb, bindex); -+ } -+ -+ if (!bindex) { -+ au_cpup_attr_all(root->d_inode, /*force*/1); -+ sb->s_maxbytes = au_sbr_sb(sb, 0)->s_maxbytes; -+ } else -+ au_sub_nlink(root->d_inode, del->h_path.dentry->d_inode); -+ if (au_opt_test(mnt_flags, PLINK)) -+ au_plink_half_refresh(sb, br_id); -+ -+ if (au_xino_brid(sb) == br_id) -+ au_xino_brid_set(sb, -1); -+ goto out; /* success */ -+ -+out_wh: -+ /* revert */ -+ rerr = au_br_init_wh(sb, br, br->br_perm); -+ if (rerr) -+ pr_warn("failed re-creating base whiteout, %s. (%d)\n", -+ del->pathname, rerr); -+out: -+ if (to_free) -+ au_farray_free(to_free, opened); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int au_ibusy(struct super_block *sb, struct aufs_ibusy __user *arg) -+{ -+ int err; -+ aufs_bindex_t bstart, bend; -+ struct aufs_ibusy ibusy; -+ struct inode *inode, *h_inode; -+ -+ err = -EPERM; -+ if (unlikely(!capable(CAP_SYS_ADMIN))) -+ goto out; -+ -+ err = copy_from_user(&ibusy, arg, sizeof(ibusy)); -+ if (!err) -+ err = !access_ok(VERIFY_WRITE, &arg->h_ino, sizeof(arg->h_ino)); -+ if (unlikely(err)) { -+ err = -EFAULT; -+ AuTraceErr(err); -+ goto out; -+ } -+ -+ err = -EINVAL; -+ si_read_lock(sb, AuLock_FLUSH); -+ if (unlikely(ibusy.bindex < 0 || ibusy.bindex > au_sbend(sb))) -+ goto out_unlock; -+ -+ err = 0; -+ ibusy.h_ino = 0; /* invalid */ -+ inode = ilookup(sb, ibusy.ino); -+ if (!inode -+ || inode->i_ino == AUFS_ROOT_INO -+ || is_bad_inode(inode)) -+ goto out_unlock; -+ -+ ii_read_lock_child(inode); -+ bstart = au_ibstart(inode); -+ bend = au_ibend(inode); -+ if (bstart <= ibusy.bindex && ibusy.bindex <= bend) { -+ h_inode = au_h_iptr(inode, ibusy.bindex); -+ if (h_inode && au_test_ibusy(inode, bstart, bend)) -+ ibusy.h_ino = h_inode->i_ino; -+ } -+ ii_read_unlock(inode); -+ iput(inode); -+ -+out_unlock: -+ si_read_unlock(sb); -+ if (!err) { -+ err = __put_user(ibusy.h_ino, &arg->h_ino); -+ if (unlikely(err)) { -+ err = -EFAULT; -+ AuTraceErr(err); -+ } -+ } -+out: -+ return err; -+} -+ -+long au_ibusy_ioctl(struct file *file, unsigned long arg) -+{ -+ return au_ibusy(file->f_dentry->d_sb, (void __user *)arg); -+} -+ -+#ifdef CONFIG_COMPAT -+long au_ibusy_compat_ioctl(struct file *file, unsigned long arg) -+{ -+ return au_ibusy(file->f_dentry->d_sb, compat_ptr(arg)); -+} -+#endif -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * change a branch permission -+ */ -+ -+static void au_warn_ima(void) -+{ -+#ifdef CONFIG_IMA -+ /* since it doesn't support mark_files_ro() */ -+ AuWarn1("RW -> RO makes IMA to produce wrong message\n"); -+#endif -+} -+ -+static int do_need_sigen_inc(int a, int b) -+{ -+ return au_br_whable(a) && !au_br_whable(b); -+} -+ -+static int need_sigen_inc(int old, int new) -+{ -+ return do_need_sigen_inc(old, new) -+ || do_need_sigen_inc(new, old); -+} -+ -+static int au_br_mod_files_ro(struct super_block *sb, aufs_bindex_t bindex) -+{ -+ int err, do_warn; -+ unsigned int mnt_flags; -+ unsigned long long ull, max; -+ aufs_bindex_t br_id; -+ unsigned char verbose, writer; -+ struct file *file, *hf, **array; -+ struct inode *inode; -+ struct au_hfile *hfile; -+ -+ mnt_flags = au_mntflags(sb); -+ verbose = !!au_opt_test(mnt_flags, VERBOSE); -+ -+ array = au_farray_alloc(sb, &max); -+ err = PTR_ERR(array); -+ if (IS_ERR(array)) -+ goto out; -+ -+ do_warn = 0; -+ br_id = au_sbr_id(sb, bindex); -+ for (ull = 0; ull < max; ull++) { -+ file = array[ull]; -+ if (unlikely(!file)) -+ break; -+ -+ /* AuDbg("%pD\n", file); */ -+ fi_read_lock(file); -+ if (unlikely(au_test_mmapped(file))) { -+ err = -EBUSY; -+ AuVerbose(verbose, "mmapped %pD\n", file); -+ AuDbgFile(file); -+ FiMustNoWaiters(file); -+ fi_read_unlock(file); -+ goto out_array; -+ } -+ -+ inode = file_inode(file); -+ hfile = &au_fi(file)->fi_htop; -+ hf = hfile->hf_file; -+ if (!S_ISREG(inode->i_mode) -+ || !(file->f_mode & FMODE_WRITE) -+ || hfile->hf_br->br_id != br_id -+ || !(hf->f_mode & FMODE_WRITE)) -+ array[ull] = NULL; -+ else { -+ do_warn = 1; -+ get_file(file); -+ } -+ -+ FiMustNoWaiters(file); -+ fi_read_unlock(file); -+ fput(file); -+ } -+ -+ err = 0; -+ if (do_warn) -+ au_warn_ima(); -+ -+ for (ull = 0; ull < max; ull++) { -+ file = array[ull]; -+ if (!file) -+ continue; -+ -+ /* todo: already flushed? */ -+ /* -+ * fs/super.c:mark_files_ro() is gone, but aufs keeps its -+ * approach which resets f_mode and calls mnt_drop_write() and -+ * file_release_write() for each file, because the branch -+ * attribute in aufs world is totally different from the native -+ * fs rw/ro mode. -+ */ -+ /* fi_read_lock(file); */ -+ hfile = &au_fi(file)->fi_htop; -+ hf = hfile->hf_file; -+ /* fi_read_unlock(file); */ -+ spin_lock(&hf->f_lock); -+ writer = !!(hf->f_mode & FMODE_WRITER); -+ hf->f_mode &= ~(FMODE_WRITE | FMODE_WRITER); -+ spin_unlock(&hf->f_lock); -+ if (writer) { -+ put_write_access(file_inode(hf)); -+ __mnt_drop_write(hf->f_path.mnt); -+ } -+ } -+ -+out_array: -+ au_farray_free(array, max); -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+int au_br_mod(struct super_block *sb, struct au_opt_mod *mod, int remount, -+ int *do_refresh) -+{ -+ int err, rerr; -+ aufs_bindex_t bindex; -+ struct dentry *root; -+ struct au_branch *br; -+ struct au_br_fhsm *bf; -+ -+ root = sb->s_root; -+ bindex = au_find_dbindex(root, mod->h_root); -+ if (bindex < 0) { -+ if (remount) -+ return 0; /* success */ -+ err = -ENOENT; -+ pr_err("%s no such branch\n", mod->path); -+ goto out; -+ } -+ AuDbg("bindex b%d\n", bindex); -+ -+ err = test_br(mod->h_root->d_inode, mod->perm, mod->path); -+ if (unlikely(err)) -+ goto out; -+ -+ br = au_sbr(sb, bindex); -+ AuDebugOn(mod->h_root != au_br_dentry(br)); -+ if (br->br_perm == mod->perm) -+ return 0; /* success */ -+ -+ /* pre-allocate for non-fhsm --> fhsm */ -+ bf = NULL; -+ if (!au_br_fhsm(br->br_perm) && au_br_fhsm(mod->perm)) { -+ err = au_fhsm_br_alloc(br); -+ if (unlikely(err)) -+ goto out; -+ bf = br->br_fhsm; -+ br->br_fhsm = NULL; -+ } -+ -+ if (au_br_writable(br->br_perm)) { -+ /* remove whiteout base */ -+ err = au_br_init_wh(sb, br, mod->perm); -+ if (unlikely(err)) -+ goto out_bf; -+ -+ if (!au_br_writable(mod->perm)) { -+ /* rw --> ro, file might be mmapped */ -+ DiMustNoWaiters(root); -+ IiMustNoWaiters(root->d_inode); -+ di_write_unlock(root); -+ err = au_br_mod_files_ro(sb, bindex); -+ /* aufs_write_lock() calls ..._child() */ -+ di_write_lock_child(root); -+ -+ if (unlikely(err)) { -+ rerr = -ENOMEM; -+ br->br_wbr = kzalloc(sizeof(*br->br_wbr), -+ GFP_NOFS); -+ if (br->br_wbr) -+ rerr = au_wbr_init(br, sb, br->br_perm); -+ if (unlikely(rerr)) { -+ AuIOErr("nested error %d (%d)\n", -+ rerr, err); -+ br->br_perm = mod->perm; -+ } -+ } -+ } -+ } else if (au_br_writable(mod->perm)) { -+ /* ro --> rw */ -+ err = -ENOMEM; -+ br->br_wbr = kzalloc(sizeof(*br->br_wbr), GFP_NOFS); -+ if (br->br_wbr) { -+ err = au_wbr_init(br, sb, mod->perm); -+ if (unlikely(err)) { -+ kfree(br->br_wbr); -+ br->br_wbr = NULL; -+ } -+ } -+ } -+ if (unlikely(err)) -+ goto out_bf; -+ -+ if (au_br_fhsm(br->br_perm)) { -+ if (!au_br_fhsm(mod->perm)) { -+ /* fhsm --> non-fhsm */ -+ au_br_fhsm_fin(br->br_fhsm); -+ kfree(br->br_fhsm); -+ br->br_fhsm = NULL; -+ } -+ } else if (au_br_fhsm(mod->perm)) -+ /* non-fhsm --> fhsm */ -+ br->br_fhsm = bf; -+ -+ *do_refresh |= need_sigen_inc(br->br_perm, mod->perm); -+ br->br_perm = mod->perm; -+ goto out; /* success */ -+ -+out_bf: -+ kfree(bf); -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+int au_br_stfs(struct au_branch *br, struct aufs_stfs *stfs) -+{ -+ int err; -+ struct kstatfs kstfs; -+ -+ err = vfs_statfs(&br->br_path, &kstfs); -+ if (!err) { -+ stfs->f_blocks = kstfs.f_blocks; -+ stfs->f_bavail = kstfs.f_bavail; -+ stfs->f_files = kstfs.f_files; -+ stfs->f_ffree = kstfs.f_ffree; -+ } -+ -+ return err; -+} -diff --git a/fs/aufs/branch.h b/fs/aufs/branch.h -new file mode 100644 -index 0000000..6ae006e ---- /dev/null -+++ b/fs/aufs/branch.h -@@ -0,0 +1,279 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * branch filesystems and xino for them -+ */ -+ -+#ifndef __AUFS_BRANCH_H__ -+#define __AUFS_BRANCH_H__ -+ -+#ifdef __KERNEL__ -+ -+#include -+#include "dynop.h" -+#include "rwsem.h" -+#include "super.h" -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* a xino file */ -+struct au_xino_file { -+ struct file *xi_file; -+ struct mutex xi_nondir_mtx; -+ -+ /* todo: make xino files an array to support huge inode number */ -+ -+#ifdef CONFIG_DEBUG_FS -+ struct dentry *xi_dbgaufs; -+#endif -+}; -+ -+/* File-based Hierarchical Storage Management */ -+struct au_br_fhsm { -+#ifdef CONFIG_AUFS_FHSM -+ struct mutex bf_lock; -+ unsigned long bf_jiffy; -+ struct aufs_stfs bf_stfs; -+ int bf_readable; -+#endif -+}; -+ -+/* members for writable branch only */ -+enum {AuBrWh_BASE, AuBrWh_PLINK, AuBrWh_ORPH, AuBrWh_Last}; -+struct au_wbr { -+ struct au_rwsem wbr_wh_rwsem; -+ struct dentry *wbr_wh[AuBrWh_Last]; -+ atomic_t wbr_wh_running; -+#define wbr_whbase wbr_wh[AuBrWh_BASE] /* whiteout base */ -+#define wbr_plink wbr_wh[AuBrWh_PLINK] /* pseudo-link dir */ -+#define wbr_orph wbr_wh[AuBrWh_ORPH] /* dir for orphans */ -+ -+ /* mfs mode */ -+ unsigned long long wbr_bytes; -+}; -+ -+/* ext2 has 3 types of operations at least, ext3 has 4 */ -+#define AuBrDynOp (AuDyLast * 4) -+ -+#ifdef CONFIG_AUFS_HFSNOTIFY -+/* support for asynchronous destruction */ -+struct au_br_hfsnotify { -+ struct fsnotify_group *hfsn_group; -+}; -+#endif -+ -+/* sysfs entries */ -+struct au_brsysfs { -+ char name[16]; -+ struct attribute attr; -+}; -+ -+enum { -+ AuBrSysfs_BR, -+ AuBrSysfs_BRID, -+ AuBrSysfs_Last -+}; -+ -+/* protected by superblock rwsem */ -+struct au_branch { -+ struct au_xino_file br_xino; -+ -+ aufs_bindex_t br_id; -+ -+ int br_perm; -+ struct path br_path; -+ spinlock_t br_dykey_lock; -+ struct au_dykey *br_dykey[AuBrDynOp]; -+ atomic_t br_count; -+ -+ struct au_wbr *br_wbr; -+ struct au_br_fhsm *br_fhsm; -+ -+ /* xino truncation */ -+ atomic_t br_xino_running; -+ -+#ifdef CONFIG_AUFS_HFSNOTIFY -+ struct au_br_hfsnotify *br_hfsn; -+#endif -+ -+#ifdef CONFIG_SYSFS -+ /* entries under sysfs per mount-point */ -+ struct au_brsysfs br_sysfs[AuBrSysfs_Last]; -+#endif -+}; -+ -+/* ---------------------------------------------------------------------- */ -+ -+static inline struct vfsmount *au_br_mnt(struct au_branch *br) -+{ -+ return br->br_path.mnt; -+} -+ -+static inline struct dentry *au_br_dentry(struct au_branch *br) -+{ -+ return br->br_path.dentry; -+} -+ -+static inline struct super_block *au_br_sb(struct au_branch *br) -+{ -+ return au_br_mnt(br)->mnt_sb; -+} -+ -+static inline int au_br_rdonly(struct au_branch *br) -+{ -+ return ((au_br_sb(br)->s_flags & MS_RDONLY) -+ || !au_br_writable(br->br_perm)) -+ ? -EROFS : 0; -+} -+ -+static inline int au_br_hnotifyable(int brperm __maybe_unused) -+{ -+#ifdef CONFIG_AUFS_HNOTIFY -+ return !(brperm & AuBrPerm_RR); -+#else -+ return 0; -+#endif -+} -+ -+static inline int au_br_test_oflag(int oflag, struct au_branch *br) -+{ -+ int err, exec_flag; -+ -+ err = 0; -+ exec_flag = oflag & __FMODE_EXEC; -+ if (unlikely(exec_flag && (au_br_mnt(br)->mnt_flags & MNT_NOEXEC))) -+ err = -EACCES; -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* branch.c */ -+struct au_sbinfo; -+void au_br_free(struct au_sbinfo *sinfo); -+int au_br_index(struct super_block *sb, aufs_bindex_t br_id); -+struct au_opt_add; -+int au_br_add(struct super_block *sb, struct au_opt_add *add, int remount); -+struct au_opt_del; -+int au_br_del(struct super_block *sb, struct au_opt_del *del, int remount); -+long au_ibusy_ioctl(struct file *file, unsigned long arg); -+#ifdef CONFIG_COMPAT -+long au_ibusy_compat_ioctl(struct file *file, unsigned long arg); -+#endif -+struct au_opt_mod; -+int au_br_mod(struct super_block *sb, struct au_opt_mod *mod, int remount, -+ int *do_refresh); -+struct aufs_stfs; -+int au_br_stfs(struct au_branch *br, struct aufs_stfs *stfs); -+ -+/* xino.c */ -+static const loff_t au_loff_max = LLONG_MAX; -+ -+int au_xib_trunc(struct super_block *sb); -+ssize_t xino_fread(au_readf_t func, struct file *file, void *buf, size_t size, -+ loff_t *pos); -+ssize_t xino_fwrite(au_writef_t func, struct file *file, void *buf, size_t size, -+ loff_t *pos); -+struct file *au_xino_create2(struct file *base_file, struct file *copy_src); -+struct file *au_xino_create(struct super_block *sb, char *fname, int silent); -+ino_t au_xino_new_ino(struct super_block *sb); -+void au_xino_delete_inode(struct inode *inode, const int unlinked); -+int au_xino_write(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino, -+ ino_t ino); -+int au_xino_read(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino, -+ ino_t *ino); -+int au_xino_br(struct super_block *sb, struct au_branch *br, ino_t hino, -+ struct file *base_file, int do_test); -+int au_xino_trunc(struct super_block *sb, aufs_bindex_t bindex); -+ -+struct au_opt_xino; -+int au_xino_set(struct super_block *sb, struct au_opt_xino *xino, int remount); -+void au_xino_clr(struct super_block *sb); -+struct file *au_xino_def(struct super_block *sb); -+int au_xino_path(struct seq_file *seq, struct file *file); -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* Superblock to branch */ -+static inline -+aufs_bindex_t au_sbr_id(struct super_block *sb, aufs_bindex_t bindex) -+{ -+ return au_sbr(sb, bindex)->br_id; -+} -+ -+static inline -+struct vfsmount *au_sbr_mnt(struct super_block *sb, aufs_bindex_t bindex) -+{ -+ return au_br_mnt(au_sbr(sb, bindex)); -+} -+ -+static inline -+struct super_block *au_sbr_sb(struct super_block *sb, aufs_bindex_t bindex) -+{ -+ return au_br_sb(au_sbr(sb, bindex)); -+} -+ -+static inline void au_sbr_put(struct super_block *sb, aufs_bindex_t bindex) -+{ -+ atomic_dec(&au_sbr(sb, bindex)->br_count); -+} -+ -+static inline int au_sbr_perm(struct super_block *sb, aufs_bindex_t bindex) -+{ -+ return au_sbr(sb, bindex)->br_perm; -+} -+ -+static inline int au_sbr_whable(struct super_block *sb, aufs_bindex_t bindex) -+{ -+ return au_br_whable(au_sbr_perm(sb, bindex)); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * wbr_wh_read_lock, wbr_wh_write_lock -+ * wbr_wh_read_unlock, wbr_wh_write_unlock, wbr_wh_downgrade_lock -+ */ -+AuSimpleRwsemFuncs(wbr_wh, struct au_wbr *wbr, &wbr->wbr_wh_rwsem); -+ -+#define WbrWhMustNoWaiters(wbr) AuRwMustNoWaiters(&wbr->wbr_wh_rwsem) -+#define WbrWhMustAnyLock(wbr) AuRwMustAnyLock(&wbr->wbr_wh_rwsem) -+#define WbrWhMustWriteLock(wbr) AuRwMustWriteLock(&wbr->wbr_wh_rwsem) -+ -+/* ---------------------------------------------------------------------- */ -+ -+#ifdef CONFIG_AUFS_FHSM -+static inline void au_br_fhsm_init(struct au_br_fhsm *brfhsm) -+{ -+ mutex_init(&brfhsm->bf_lock); -+ brfhsm->bf_jiffy = 0; -+ brfhsm->bf_readable = 0; -+} -+ -+static inline void au_br_fhsm_fin(struct au_br_fhsm *brfhsm) -+{ -+ mutex_destroy(&brfhsm->bf_lock); -+} -+#else -+AuStubVoid(au_br_fhsm_init, struct au_br_fhsm *brfhsm) -+AuStubVoid(au_br_fhsm_fin, struct au_br_fhsm *brfhsm) -+#endif -+ -+#endif /* __KERNEL__ */ -+#endif /* __AUFS_BRANCH_H__ */ -diff --git a/fs/aufs/conf.mk b/fs/aufs/conf.mk -new file mode 100644 -index 0000000..0bbb2d3 ---- /dev/null -+++ b/fs/aufs/conf.mk -@@ -0,0 +1,38 @@ -+ -+AuConfStr = CONFIG_AUFS_FS=${CONFIG_AUFS_FS} -+ -+define AuConf -+ifdef ${1} -+AuConfStr += ${1}=${${1}} -+endif -+endef -+ -+AuConfAll = BRANCH_MAX_127 BRANCH_MAX_511 BRANCH_MAX_1023 BRANCH_MAX_32767 \ -+ SBILIST \ -+ HNOTIFY HFSNOTIFY \ -+ EXPORT INO_T_64 \ -+ XATTR \ -+ FHSM \ -+ RDU \ -+ SHWH \ -+ BR_RAMFS \ -+ BR_FUSE POLL \ -+ BR_HFSPLUS \ -+ BDEV_LOOP \ -+ DEBUG MAGIC_SYSRQ -+$(foreach i, ${AuConfAll}, \ -+ $(eval $(call AuConf,CONFIG_AUFS_${i}))) -+ -+AuConfName = ${obj}/conf.str -+${AuConfName}.tmp: FORCE -+ @echo ${AuConfStr} | tr ' ' '\n' | sed -e 's/^/"/' -e 's/$$/\\n"/' > $@ -+${AuConfName}: ${AuConfName}.tmp -+ @diff -q $< $@ > /dev/null 2>&1 || { \ -+ echo ' GEN ' $@; \ -+ cp -p $< $@; \ -+ } -+FORCE: -+clean-files += ${AuConfName} ${AuConfName}.tmp -+${obj}/sysfs.o: ${AuConfName} -+ -+-include ${srctree}/${src}/conf_priv.mk -diff --git a/fs/aufs/cpup.c b/fs/aufs/cpup.c -new file mode 100644 -index 0000000..9d8b767 ---- /dev/null -+++ b/fs/aufs/cpup.c -@@ -0,0 +1,1368 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * copy-up functions, see wbr_policy.c for copy-down -+ */ -+ -+#include -+#include -+#include -+#include "aufs.h" -+ -+void au_cpup_attr_flags(struct inode *dst, unsigned int iflags) -+{ -+ const unsigned int mask = S_DEAD | S_SWAPFILE | S_PRIVATE -+ | S_NOATIME | S_NOCMTIME | S_AUTOMOUNT; -+ -+ BUILD_BUG_ON(sizeof(iflags) != sizeof(dst->i_flags)); -+ -+ dst->i_flags |= iflags & ~mask; -+ if (au_test_fs_notime(dst->i_sb)) -+ dst->i_flags |= S_NOATIME | S_NOCMTIME; -+} -+ -+void au_cpup_attr_timesizes(struct inode *inode) -+{ -+ struct inode *h_inode; -+ -+ h_inode = au_h_iptr(inode, au_ibstart(inode)); -+ fsstack_copy_attr_times(inode, h_inode); -+ fsstack_copy_inode_size(inode, h_inode); -+} -+ -+void au_cpup_attr_nlink(struct inode *inode, int force) -+{ -+ struct inode *h_inode; -+ struct super_block *sb; -+ aufs_bindex_t bindex, bend; -+ -+ sb = inode->i_sb; -+ bindex = au_ibstart(inode); -+ h_inode = au_h_iptr(inode, bindex); -+ if (!force -+ && !S_ISDIR(h_inode->i_mode) -+ && au_opt_test(au_mntflags(sb), PLINK) -+ && au_plink_test(inode)) -+ return; -+ -+ /* -+ * 0 can happen in revalidating. -+ * h_inode->i_mutex may not be held here, but it is harmless since once -+ * i_nlink reaches 0, it will never become positive except O_TMPFILE -+ * case. -+ * todo: O_TMPFILE+linkat(AT_SYMLINK_FOLLOW) bypassing aufs may cause -+ * the incorrect link count. -+ */ -+ set_nlink(inode, h_inode->i_nlink); -+ -+ /* -+ * fewer nlink makes find(1) noisy, but larger nlink doesn't. -+ * it may includes whplink directory. -+ */ -+ if (S_ISDIR(h_inode->i_mode)) { -+ bend = au_ibend(inode); -+ for (bindex++; bindex <= bend; bindex++) { -+ h_inode = au_h_iptr(inode, bindex); -+ if (h_inode) -+ au_add_nlink(inode, h_inode); -+ } -+ } -+} -+ -+void au_cpup_attr_changeable(struct inode *inode) -+{ -+ struct inode *h_inode; -+ -+ h_inode = au_h_iptr(inode, au_ibstart(inode)); -+ inode->i_mode = h_inode->i_mode; -+ inode->i_uid = h_inode->i_uid; -+ inode->i_gid = h_inode->i_gid; -+ au_cpup_attr_timesizes(inode); -+ au_cpup_attr_flags(inode, h_inode->i_flags); -+} -+ -+void au_cpup_igen(struct inode *inode, struct inode *h_inode) -+{ -+ struct au_iinfo *iinfo = au_ii(inode); -+ -+ IiMustWriteLock(inode); -+ -+ iinfo->ii_higen = h_inode->i_generation; -+ iinfo->ii_hsb1 = h_inode->i_sb; -+} -+ -+void au_cpup_attr_all(struct inode *inode, int force) -+{ -+ struct inode *h_inode; -+ -+ h_inode = au_h_iptr(inode, au_ibstart(inode)); -+ au_cpup_attr_changeable(inode); -+ if (inode->i_nlink > 0) -+ au_cpup_attr_nlink(inode, force); -+ inode->i_rdev = h_inode->i_rdev; -+ inode->i_blkbits = h_inode->i_blkbits; -+ au_cpup_igen(inode, h_inode); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* Note: dt_dentry and dt_h_dentry are not dget/dput-ed */ -+ -+/* keep the timestamps of the parent dir when cpup */ -+void au_dtime_store(struct au_dtime *dt, struct dentry *dentry, -+ struct path *h_path) -+{ -+ struct inode *h_inode; -+ -+ dt->dt_dentry = dentry; -+ dt->dt_h_path = *h_path; -+ h_inode = h_path->dentry->d_inode; -+ dt->dt_atime = h_inode->i_atime; -+ dt->dt_mtime = h_inode->i_mtime; -+ /* smp_mb(); */ -+} -+ -+void au_dtime_revert(struct au_dtime *dt) -+{ -+ struct iattr attr; -+ int err; -+ -+ attr.ia_atime = dt->dt_atime; -+ attr.ia_mtime = dt->dt_mtime; -+ attr.ia_valid = ATTR_FORCE | ATTR_MTIME | ATTR_MTIME_SET -+ | ATTR_ATIME | ATTR_ATIME_SET; -+ -+ /* no delegation since this is a directory */ -+ err = vfsub_notify_change(&dt->dt_h_path, &attr, /*delegated*/NULL); -+ if (unlikely(err)) -+ pr_warn("restoring timestamps failed(%d). ignored\n", err); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* internal use only */ -+struct au_cpup_reg_attr { -+ int valid; -+ struct kstat st; -+ unsigned int iflags; /* inode->i_flags */ -+}; -+ -+static noinline_for_stack -+int cpup_iattr(struct dentry *dst, aufs_bindex_t bindex, struct dentry *h_src, -+ struct au_cpup_reg_attr *h_src_attr) -+{ -+ int err, sbits, icex; -+ unsigned int mnt_flags; -+ unsigned char verbose; -+ struct iattr ia; -+ struct path h_path; -+ struct inode *h_isrc, *h_idst; -+ struct kstat *h_st; -+ struct au_branch *br; -+ -+ h_path.dentry = au_h_dptr(dst, bindex); -+ h_idst = h_path.dentry->d_inode; -+ br = au_sbr(dst->d_sb, bindex); -+ h_path.mnt = au_br_mnt(br); -+ h_isrc = h_src->d_inode; -+ ia.ia_valid = ATTR_FORCE | ATTR_UID | ATTR_GID -+ | ATTR_ATIME | ATTR_MTIME -+ | ATTR_ATIME_SET | ATTR_MTIME_SET; -+ if (h_src_attr && h_src_attr->valid) { -+ h_st = &h_src_attr->st; -+ ia.ia_uid = h_st->uid; -+ ia.ia_gid = h_st->gid; -+ ia.ia_atime = h_st->atime; -+ ia.ia_mtime = h_st->mtime; -+ if (h_idst->i_mode != h_st->mode -+ && !S_ISLNK(h_idst->i_mode)) { -+ ia.ia_valid |= ATTR_MODE; -+ ia.ia_mode = h_st->mode; -+ } -+ sbits = !!(h_st->mode & (S_ISUID | S_ISGID)); -+ au_cpup_attr_flags(h_idst, h_src_attr->iflags); -+ } else { -+ ia.ia_uid = h_isrc->i_uid; -+ ia.ia_gid = h_isrc->i_gid; -+ ia.ia_atime = h_isrc->i_atime; -+ ia.ia_mtime = h_isrc->i_mtime; -+ if (h_idst->i_mode != h_isrc->i_mode -+ && !S_ISLNK(h_idst->i_mode)) { -+ ia.ia_valid |= ATTR_MODE; -+ ia.ia_mode = h_isrc->i_mode; -+ } -+ sbits = !!(h_isrc->i_mode & (S_ISUID | S_ISGID)); -+ au_cpup_attr_flags(h_idst, h_isrc->i_flags); -+ } -+ /* no delegation since it is just created */ -+ err = vfsub_notify_change(&h_path, &ia, /*delegated*/NULL); -+ -+ /* is this nfs only? */ -+ if (!err && sbits && au_test_nfs(h_path.dentry->d_sb)) { -+ ia.ia_valid = ATTR_FORCE | ATTR_MODE; -+ ia.ia_mode = h_isrc->i_mode; -+ err = vfsub_notify_change(&h_path, &ia, /*delegated*/NULL); -+ } -+ -+ icex = br->br_perm & AuBrAttr_ICEX; -+ if (!err) { -+ mnt_flags = au_mntflags(dst->d_sb); -+ verbose = !!au_opt_test(mnt_flags, VERBOSE); -+ err = au_cpup_xattr(h_path.dentry, h_src, icex, verbose); -+ } -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int au_do_copy_file(struct file *dst, struct file *src, loff_t len, -+ char *buf, unsigned long blksize) -+{ -+ int err; -+ size_t sz, rbytes, wbytes; -+ unsigned char all_zero; -+ char *p, *zp; -+ struct mutex *h_mtx; -+ /* reduce stack usage */ -+ struct iattr *ia; -+ -+ zp = page_address(ZERO_PAGE(0)); -+ if (unlikely(!zp)) -+ return -ENOMEM; /* possible? */ -+ -+ err = 0; -+ all_zero = 0; -+ while (len) { -+ AuDbg("len %lld\n", len); -+ sz = blksize; -+ if (len < blksize) -+ sz = len; -+ -+ rbytes = 0; -+ /* todo: signal_pending? */ -+ while (!rbytes || err == -EAGAIN || err == -EINTR) { -+ rbytes = vfsub_read_k(src, buf, sz, &src->f_pos); -+ err = rbytes; -+ } -+ if (unlikely(err < 0)) -+ break; -+ -+ all_zero = 0; -+ if (len >= rbytes && rbytes == blksize) -+ all_zero = !memcmp(buf, zp, rbytes); -+ if (!all_zero) { -+ wbytes = rbytes; -+ p = buf; -+ while (wbytes) { -+ size_t b; -+ -+ b = vfsub_write_k(dst, p, wbytes, &dst->f_pos); -+ err = b; -+ /* todo: signal_pending? */ -+ if (unlikely(err == -EAGAIN || err == -EINTR)) -+ continue; -+ if (unlikely(err < 0)) -+ break; -+ wbytes -= b; -+ p += b; -+ } -+ if (unlikely(err < 0)) -+ break; -+ } else { -+ loff_t res; -+ -+ AuLabel(hole); -+ res = vfsub_llseek(dst, rbytes, SEEK_CUR); -+ err = res; -+ if (unlikely(res < 0)) -+ break; -+ } -+ len -= rbytes; -+ err = 0; -+ } -+ -+ /* the last block may be a hole */ -+ if (!err && all_zero) { -+ AuLabel(last hole); -+ -+ err = 1; -+ if (au_test_nfs(dst->f_dentry->d_sb)) { -+ /* nfs requires this step to make last hole */ -+ /* is this only nfs? */ -+ do { -+ /* todo: signal_pending? */ -+ err = vfsub_write_k(dst, "\0", 1, &dst->f_pos); -+ } while (err == -EAGAIN || err == -EINTR); -+ if (err == 1) -+ dst->f_pos--; -+ } -+ -+ if (err == 1) { -+ ia = (void *)buf; -+ ia->ia_size = dst->f_pos; -+ ia->ia_valid = ATTR_SIZE | ATTR_FILE; -+ ia->ia_file = dst; -+ h_mtx = &file_inode(dst)->i_mutex; -+ mutex_lock_nested(h_mtx, AuLsc_I_CHILD2); -+ /* no delegation since it is just created */ -+ err = vfsub_notify_change(&dst->f_path, ia, -+ /*delegated*/NULL); -+ mutex_unlock(h_mtx); -+ } -+ } -+ -+ return err; -+} -+ -+int au_copy_file(struct file *dst, struct file *src, loff_t len) -+{ -+ int err; -+ unsigned long blksize; -+ unsigned char do_kfree; -+ char *buf; -+ -+ err = -ENOMEM; -+ blksize = dst->f_dentry->d_sb->s_blocksize; -+ if (!blksize || PAGE_SIZE < blksize) -+ blksize = PAGE_SIZE; -+ AuDbg("blksize %lu\n", blksize); -+ do_kfree = (blksize != PAGE_SIZE && blksize >= sizeof(struct iattr *)); -+ if (do_kfree) -+ buf = kmalloc(blksize, GFP_NOFS); -+ else -+ buf = (void *)__get_free_page(GFP_NOFS); -+ if (unlikely(!buf)) -+ goto out; -+ -+ if (len > (1 << 22)) -+ AuDbg("copying a large file %lld\n", (long long)len); -+ -+ src->f_pos = 0; -+ dst->f_pos = 0; -+ err = au_do_copy_file(dst, src, len, buf, blksize); -+ if (do_kfree) -+ kfree(buf); -+ else -+ free_page((unsigned long)buf); -+ -+out: -+ return err; -+} -+ -+/* -+ * to support a sparse file which is opened with O_APPEND, -+ * we need to close the file. -+ */ -+static int au_cp_regular(struct au_cp_generic *cpg) -+{ -+ int err, i; -+ enum { SRC, DST }; -+ struct { -+ aufs_bindex_t bindex; -+ unsigned int flags; -+ struct dentry *dentry; -+ int force_wr; -+ struct file *file; -+ void *label; -+ } *f, file[] = { -+ { -+ .bindex = cpg->bsrc, -+ .flags = O_RDONLY | O_NOATIME | O_LARGEFILE, -+ .label = &&out -+ }, -+ { -+ .bindex = cpg->bdst, -+ .flags = O_WRONLY | O_NOATIME | O_LARGEFILE, -+ .force_wr = !!au_ftest_cpup(cpg->flags, RWDST), -+ .label = &&out_src -+ } -+ }; -+ struct super_block *sb; -+ struct task_struct *tsk = current; -+ -+ /* bsrc branch can be ro/rw. */ -+ sb = cpg->dentry->d_sb; -+ f = file; -+ for (i = 0; i < 2; i++, f++) { -+ f->dentry = au_h_dptr(cpg->dentry, f->bindex); -+ f->file = au_h_open(cpg->dentry, f->bindex, f->flags, -+ /*file*/NULL, f->force_wr); -+ err = PTR_ERR(f->file); -+ if (IS_ERR(f->file)) -+ goto *f->label; -+ } -+ -+ /* try stopping to update while we copyup */ -+ IMustLock(file[SRC].dentry->d_inode); -+ err = au_copy_file(file[DST].file, file[SRC].file, cpg->len); -+ -+ /* i wonder if we had O_NO_DELAY_FPUT flag */ -+ if (tsk->flags & PF_KTHREAD) -+ __fput_sync(file[DST].file); -+ else { -+ WARN(1, "%pD\nPlease report this warning to aufs-users ML", -+ file[DST].file); -+ fput(file[DST].file); -+ /* -+ * too bad. -+ * we have to call both since we don't know which place the file -+ * was added to. -+ */ -+ task_work_run(); -+ flush_delayed_fput(); -+ } -+ au_sbr_put(sb, file[DST].bindex); -+ -+out_src: -+ fput(file[SRC].file); -+ au_sbr_put(sb, file[SRC].bindex); -+out: -+ return err; -+} -+ -+static int au_do_cpup_regular(struct au_cp_generic *cpg, -+ struct au_cpup_reg_attr *h_src_attr) -+{ -+ int err, rerr; -+ loff_t l; -+ struct path h_path; -+ struct inode *h_src_inode, *h_dst_inode; -+ -+ err = 0; -+ h_src_inode = au_h_iptr(cpg->dentry->d_inode, cpg->bsrc); -+ l = i_size_read(h_src_inode); -+ if (cpg->len == -1 || l < cpg->len) -+ cpg->len = l; -+ if (cpg->len) { -+ /* try stopping to update while we are referencing */ -+ mutex_lock_nested(&h_src_inode->i_mutex, AuLsc_I_CHILD); -+ au_pin_hdir_unlock(cpg->pin); -+ -+ h_path.dentry = au_h_dptr(cpg->dentry, cpg->bsrc); -+ h_path.mnt = au_sbr_mnt(cpg->dentry->d_sb, cpg->bsrc); -+ h_src_attr->iflags = h_src_inode->i_flags; -+ err = vfs_getattr(&h_path, &h_src_attr->st); -+ if (unlikely(err)) { -+ mutex_unlock(&h_src_inode->i_mutex); -+ goto out; -+ } -+ h_src_attr->valid = 1; -+ err = au_cp_regular(cpg); -+ mutex_unlock(&h_src_inode->i_mutex); -+ rerr = au_pin_hdir_relock(cpg->pin); -+ if (!err && rerr) -+ err = rerr; -+ } -+ if (!err && (h_src_inode->i_state & I_LINKABLE)) { -+ h_path.dentry = au_h_dptr(cpg->dentry, cpg->bdst); -+ h_dst_inode = h_path.dentry->d_inode; -+ spin_lock(&h_dst_inode->i_lock); -+ h_dst_inode->i_state |= I_LINKABLE; -+ spin_unlock(&h_dst_inode->i_lock); -+ } -+ -+out: -+ return err; -+} -+ -+static int au_do_cpup_symlink(struct path *h_path, struct dentry *h_src, -+ struct inode *h_dir) -+{ -+ int err, symlen; -+ mm_segment_t old_fs; -+ union { -+ char *k; -+ char __user *u; -+ } sym; -+ -+ err = -ENOSYS; -+ if (unlikely(!h_src->d_inode->i_op->readlink)) -+ goto out; -+ -+ err = -ENOMEM; -+ sym.k = (void *)__get_free_page(GFP_NOFS); -+ if (unlikely(!sym.k)) -+ goto out; -+ -+ /* unnecessary to support mmap_sem since symlink is not mmap-able */ -+ old_fs = get_fs(); -+ set_fs(KERNEL_DS); -+ symlen = h_src->d_inode->i_op->readlink(h_src, sym.u, PATH_MAX); -+ err = symlen; -+ set_fs(old_fs); -+ -+ if (symlen > 0) { -+ sym.k[symlen] = 0; -+ err = vfsub_symlink(h_dir, h_path, sym.k); -+ } -+ free_page((unsigned long)sym.k); -+ -+out: -+ return err; -+} -+ -+/* -+ * regardless 'acl' option, reset all ACL. -+ * All ACL will be copied up later from the original entry on the lower branch. -+ */ -+static int au_reset_acl(struct inode *h_dir, struct path *h_path, umode_t mode) -+{ -+ int err; -+ struct dentry *h_dentry; -+ struct inode *h_inode; -+ -+ h_dentry = h_path->dentry; -+ h_inode = h_dentry->d_inode; -+ /* forget_all_cached_acls(h_inode)); */ -+ err = vfsub_removexattr(h_dentry, XATTR_NAME_POSIX_ACL_ACCESS); -+ AuTraceErr(err); -+ if (err == -EOPNOTSUPP) -+ err = 0; -+ if (!err) -+ err = vfsub_acl_chmod(h_inode, mode); -+ -+ AuTraceErr(err); -+ return err; -+} -+ -+static int au_do_cpup_dir(struct au_cp_generic *cpg, struct dentry *dst_parent, -+ struct inode *h_dir, struct path *h_path) -+{ -+ int err; -+ struct inode *dir; -+ -+ err = vfsub_removexattr(h_path->dentry, XATTR_NAME_POSIX_ACL_DEFAULT); -+ AuTraceErr(err); -+ if (err == -EOPNOTSUPP) -+ err = 0; -+ if (unlikely(err)) -+ goto out; -+ -+ /* -+ * strange behaviour from the users view, -+ * particularry setattr case -+ */ -+ dir = dst_parent->d_inode; -+ if (au_ibstart(dir) == cpg->bdst) -+ au_cpup_attr_nlink(dir, /*force*/1); -+ au_cpup_attr_nlink(cpg->dentry->d_inode, /*force*/1); -+ -+out: -+ return err; -+} -+ -+static noinline_for_stack -+int cpup_entry(struct au_cp_generic *cpg, struct dentry *dst_parent, -+ struct au_cpup_reg_attr *h_src_attr) -+{ -+ int err; -+ umode_t mode; -+ unsigned int mnt_flags; -+ unsigned char isdir, isreg, force; -+ const unsigned char do_dt = !!au_ftest_cpup(cpg->flags, DTIME); -+ struct au_dtime dt; -+ struct path h_path; -+ struct dentry *h_src, *h_dst, *h_parent; -+ struct inode *h_inode, *h_dir; -+ struct super_block *sb; -+ -+ /* bsrc branch can be ro/rw. */ -+ h_src = au_h_dptr(cpg->dentry, cpg->bsrc); -+ h_inode = h_src->d_inode; -+ AuDebugOn(h_inode != au_h_iptr(cpg->dentry->d_inode, cpg->bsrc)); -+ -+ /* try stopping to be referenced while we are creating */ -+ h_dst = au_h_dptr(cpg->dentry, cpg->bdst); -+ if (au_ftest_cpup(cpg->flags, RENAME)) -+ AuDebugOn(strncmp(h_dst->d_name.name, AUFS_WH_PFX, -+ AUFS_WH_PFX_LEN)); -+ h_parent = h_dst->d_parent; /* dir inode is locked */ -+ h_dir = h_parent->d_inode; -+ IMustLock(h_dir); -+ AuDebugOn(h_parent != h_dst->d_parent); -+ -+ sb = cpg->dentry->d_sb; -+ h_path.mnt = au_sbr_mnt(sb, cpg->bdst); -+ if (do_dt) { -+ h_path.dentry = h_parent; -+ au_dtime_store(&dt, dst_parent, &h_path); -+ } -+ h_path.dentry = h_dst; -+ -+ isreg = 0; -+ isdir = 0; -+ mode = h_inode->i_mode; -+ switch (mode & S_IFMT) { -+ case S_IFREG: -+ isreg = 1; -+ err = vfsub_create(h_dir, &h_path, S_IRUSR | S_IWUSR, -+ /*want_excl*/true); -+ if (!err) -+ err = au_do_cpup_regular(cpg, h_src_attr); -+ break; -+ case S_IFDIR: -+ isdir = 1; -+ err = vfsub_mkdir(h_dir, &h_path, mode); -+ if (!err) -+ err = au_do_cpup_dir(cpg, dst_parent, h_dir, &h_path); -+ break; -+ case S_IFLNK: -+ err = au_do_cpup_symlink(&h_path, h_src, h_dir); -+ break; -+ case S_IFCHR: -+ case S_IFBLK: -+ AuDebugOn(!capable(CAP_MKNOD)); -+ /*FALLTHROUGH*/ -+ case S_IFIFO: -+ case S_IFSOCK: -+ err = vfsub_mknod(h_dir, &h_path, mode, h_inode->i_rdev); -+ break; -+ default: -+ AuIOErr("Unknown inode type 0%o\n", mode); -+ err = -EIO; -+ } -+ if (!err) -+ err = au_reset_acl(h_dir, &h_path, mode); -+ -+ mnt_flags = au_mntflags(sb); -+ if (!au_opt_test(mnt_flags, UDBA_NONE) -+ && !isdir -+ && au_opt_test(mnt_flags, XINO) -+ && (h_inode->i_nlink == 1 -+ || (h_inode->i_state & I_LINKABLE)) -+ /* todo: unnecessary? */ -+ /* && cpg->dentry->d_inode->i_nlink == 1 */ -+ && cpg->bdst < cpg->bsrc -+ && !au_ftest_cpup(cpg->flags, KEEPLINO)) -+ au_xino_write(sb, cpg->bsrc, h_inode->i_ino, /*ino*/0); -+ /* ignore this error */ -+ -+ if (!err) { -+ force = 0; -+ if (isreg) { -+ force = !!cpg->len; -+ if (cpg->len == -1) -+ force = !!i_size_read(h_inode); -+ } -+ au_fhsm_wrote(sb, cpg->bdst, force); -+ } -+ -+ if (do_dt) -+ au_dtime_revert(&dt); -+ return err; -+} -+ -+static int au_do_ren_after_cpup(struct au_cp_generic *cpg, struct path *h_path) -+{ -+ int err; -+ struct dentry *dentry, *h_dentry, *h_parent, *parent; -+ struct inode *h_dir; -+ aufs_bindex_t bdst; -+ -+ dentry = cpg->dentry; -+ bdst = cpg->bdst; -+ h_dentry = au_h_dptr(dentry, bdst); -+ if (!au_ftest_cpup(cpg->flags, OVERWRITE)) { -+ dget(h_dentry); -+ au_set_h_dptr(dentry, bdst, NULL); -+ err = au_lkup_neg(dentry, bdst, /*wh*/0); -+ if (!err) -+ h_path->dentry = dget(au_h_dptr(dentry, bdst)); -+ au_set_h_dptr(dentry, bdst, h_dentry); -+ } else { -+ err = 0; -+ parent = dget_parent(dentry); -+ h_parent = au_h_dptr(parent, bdst); -+ dput(parent); -+ h_path->dentry = vfsub_lkup_one(&dentry->d_name, h_parent); -+ if (IS_ERR(h_path->dentry)) -+ err = PTR_ERR(h_path->dentry); -+ } -+ if (unlikely(err)) -+ goto out; -+ -+ h_parent = h_dentry->d_parent; /* dir inode is locked */ -+ h_dir = h_parent->d_inode; -+ IMustLock(h_dir); -+ AuDbg("%pd %pd\n", h_dentry, h_path->dentry); -+ /* no delegation since it is just created */ -+ err = vfsub_rename(h_dir, h_dentry, h_dir, h_path, /*delegated*/NULL); -+ dput(h_path->dentry); -+ -+out: -+ return err; -+} -+ -+/* -+ * copyup the @dentry from @bsrc to @bdst. -+ * the caller must set the both of lower dentries. -+ * @len is for truncating when it is -1 copyup the entire file. -+ * in link/rename cases, @dst_parent may be different from the real one. -+ * basic->bsrc can be larger than basic->bdst. -+ */ -+static int au_cpup_single(struct au_cp_generic *cpg, struct dentry *dst_parent) -+{ -+ int err, rerr; -+ aufs_bindex_t old_ibstart; -+ unsigned char isdir, plink; -+ struct dentry *h_src, *h_dst, *h_parent; -+ struct inode *dst_inode, *h_dir, *inode, *delegated; -+ struct super_block *sb; -+ struct au_branch *br; -+ /* to reuduce stack size */ -+ struct { -+ struct au_dtime dt; -+ struct path h_path; -+ struct au_cpup_reg_attr h_src_attr; -+ } *a; -+ -+ err = -ENOMEM; -+ a = kmalloc(sizeof(*a), GFP_NOFS); -+ if (unlikely(!a)) -+ goto out; -+ a->h_src_attr.valid = 0; -+ -+ sb = cpg->dentry->d_sb; -+ br = au_sbr(sb, cpg->bdst); -+ a->h_path.mnt = au_br_mnt(br); -+ h_dst = au_h_dptr(cpg->dentry, cpg->bdst); -+ h_parent = h_dst->d_parent; /* dir inode is locked */ -+ h_dir = h_parent->d_inode; -+ IMustLock(h_dir); -+ -+ h_src = au_h_dptr(cpg->dentry, cpg->bsrc); -+ inode = cpg->dentry->d_inode; -+ -+ if (!dst_parent) -+ dst_parent = dget_parent(cpg->dentry); -+ else -+ dget(dst_parent); -+ -+ plink = !!au_opt_test(au_mntflags(sb), PLINK); -+ dst_inode = au_h_iptr(inode, cpg->bdst); -+ if (dst_inode) { -+ if (unlikely(!plink)) { -+ err = -EIO; -+ AuIOErr("hi%lu(i%lu) exists on b%d " -+ "but plink is disabled\n", -+ dst_inode->i_ino, inode->i_ino, cpg->bdst); -+ goto out_parent; -+ } -+ -+ if (dst_inode->i_nlink) { -+ const int do_dt = au_ftest_cpup(cpg->flags, DTIME); -+ -+ h_src = au_plink_lkup(inode, cpg->bdst); -+ err = PTR_ERR(h_src); -+ if (IS_ERR(h_src)) -+ goto out_parent; -+ if (unlikely(!h_src->d_inode)) { -+ err = -EIO; -+ AuIOErr("i%lu exists on b%d " -+ "but not pseudo-linked\n", -+ inode->i_ino, cpg->bdst); -+ dput(h_src); -+ goto out_parent; -+ } -+ -+ if (do_dt) { -+ a->h_path.dentry = h_parent; -+ au_dtime_store(&a->dt, dst_parent, &a->h_path); -+ } -+ -+ a->h_path.dentry = h_dst; -+ delegated = NULL; -+ err = vfsub_link(h_src, h_dir, &a->h_path, &delegated); -+ if (!err && au_ftest_cpup(cpg->flags, RENAME)) -+ err = au_do_ren_after_cpup(cpg, &a->h_path); -+ if (do_dt) -+ au_dtime_revert(&a->dt); -+ if (unlikely(err == -EWOULDBLOCK)) { -+ pr_warn("cannot retry for NFSv4 delegation" -+ " for an internal link\n"); -+ iput(delegated); -+ } -+ dput(h_src); -+ goto out_parent; -+ } else -+ /* todo: cpup_wh_file? */ -+ /* udba work */ -+ au_update_ibrange(inode, /*do_put_zero*/1); -+ } -+ -+ isdir = S_ISDIR(inode->i_mode); -+ old_ibstart = au_ibstart(inode); -+ err = cpup_entry(cpg, dst_parent, &a->h_src_attr); -+ if (unlikely(err)) -+ goto out_rev; -+ dst_inode = h_dst->d_inode; -+ mutex_lock_nested(&dst_inode->i_mutex, AuLsc_I_CHILD2); -+ /* todo: necessary? */ -+ /* au_pin_hdir_unlock(cpg->pin); */ -+ -+ err = cpup_iattr(cpg->dentry, cpg->bdst, h_src, &a->h_src_attr); -+ if (unlikely(err)) { -+ /* todo: necessary? */ -+ /* au_pin_hdir_relock(cpg->pin); */ /* ignore an error */ -+ mutex_unlock(&dst_inode->i_mutex); -+ goto out_rev; -+ } -+ -+ if (cpg->bdst < old_ibstart) { -+ if (S_ISREG(inode->i_mode)) { -+ err = au_dy_iaop(inode, cpg->bdst, dst_inode); -+ if (unlikely(err)) { -+ /* ignore an error */ -+ /* au_pin_hdir_relock(cpg->pin); */ -+ mutex_unlock(&dst_inode->i_mutex); -+ goto out_rev; -+ } -+ } -+ au_set_ibstart(inode, cpg->bdst); -+ } else -+ au_set_ibend(inode, cpg->bdst); -+ au_set_h_iptr(inode, cpg->bdst, au_igrab(dst_inode), -+ au_hi_flags(inode, isdir)); -+ -+ /* todo: necessary? */ -+ /* err = au_pin_hdir_relock(cpg->pin); */ -+ mutex_unlock(&dst_inode->i_mutex); -+ if (unlikely(err)) -+ goto out_rev; -+ -+ if (!isdir -+ && (h_src->d_inode->i_nlink > 1 -+ || h_src->d_inode->i_state & I_LINKABLE) -+ && plink) -+ au_plink_append(inode, cpg->bdst, h_dst); -+ -+ if (au_ftest_cpup(cpg->flags, RENAME)) { -+ a->h_path.dentry = h_dst; -+ err = au_do_ren_after_cpup(cpg, &a->h_path); -+ } -+ if (!err) -+ goto out_parent; /* success */ -+ -+ /* revert */ -+out_rev: -+ a->h_path.dentry = h_parent; -+ au_dtime_store(&a->dt, dst_parent, &a->h_path); -+ a->h_path.dentry = h_dst; -+ rerr = 0; -+ if (h_dst->d_inode) { -+ if (!isdir) { -+ /* no delegation since it is just created */ -+ rerr = vfsub_unlink(h_dir, &a->h_path, -+ /*delegated*/NULL, /*force*/0); -+ } else -+ rerr = vfsub_rmdir(h_dir, &a->h_path); -+ } -+ au_dtime_revert(&a->dt); -+ if (rerr) { -+ AuIOErr("failed removing broken entry(%d, %d)\n", err, rerr); -+ err = -EIO; -+ } -+out_parent: -+ dput(dst_parent); -+ kfree(a); -+out: -+ return err; -+} -+ -+#if 0 /* reserved */ -+struct au_cpup_single_args { -+ int *errp; -+ struct au_cp_generic *cpg; -+ struct dentry *dst_parent; -+}; -+ -+static void au_call_cpup_single(void *args) -+{ -+ struct au_cpup_single_args *a = args; -+ -+ au_pin_hdir_acquire_nest(a->cpg->pin); -+ *a->errp = au_cpup_single(a->cpg, a->dst_parent); -+ au_pin_hdir_release(a->cpg->pin); -+} -+#endif -+ -+/* -+ * prevent SIGXFSZ in copy-up. -+ * testing CAP_MKNOD is for generic fs, -+ * but CAP_FSETID is for xfs only, currently. -+ */ -+static int au_cpup_sio_test(struct au_pin *pin, umode_t mode) -+{ -+ int do_sio; -+ struct super_block *sb; -+ struct inode *h_dir; -+ -+ do_sio = 0; -+ sb = au_pinned_parent(pin)->d_sb; -+ if (!au_wkq_test() -+ && (!au_sbi(sb)->si_plink_maint_pid -+ || au_plink_maint(sb, AuLock_NOPLM))) { -+ switch (mode & S_IFMT) { -+ case S_IFREG: -+ /* no condition about RLIMIT_FSIZE and the file size */ -+ do_sio = 1; -+ break; -+ case S_IFCHR: -+ case S_IFBLK: -+ do_sio = !capable(CAP_MKNOD); -+ break; -+ } -+ if (!do_sio) -+ do_sio = ((mode & (S_ISUID | S_ISGID)) -+ && !capable(CAP_FSETID)); -+ /* this workaround may be removed in the future */ -+ if (!do_sio) { -+ h_dir = au_pinned_h_dir(pin); -+ do_sio = h_dir->i_mode & S_ISVTX; -+ } -+ } -+ -+ return do_sio; -+} -+ -+#if 0 /* reserved */ -+int au_sio_cpup_single(struct au_cp_generic *cpg, struct dentry *dst_parent) -+{ -+ int err, wkq_err; -+ struct dentry *h_dentry; -+ -+ h_dentry = au_h_dptr(cpg->dentry, cpg->bsrc); -+ if (!au_cpup_sio_test(pin, h_dentry->d_inode->i_mode)) -+ err = au_cpup_single(cpg, dst_parent); -+ else { -+ struct au_cpup_single_args args = { -+ .errp = &err, -+ .cpg = cpg, -+ .dst_parent = dst_parent -+ }; -+ wkq_err = au_wkq_wait(au_call_cpup_single, &args); -+ if (unlikely(wkq_err)) -+ err = wkq_err; -+ } -+ -+ return err; -+} -+#endif -+ -+/* -+ * copyup the @dentry from the first active lower branch to @bdst, -+ * using au_cpup_single(). -+ */ -+static int au_cpup_simple(struct au_cp_generic *cpg) -+{ -+ int err; -+ unsigned int flags_orig; -+ struct dentry *dentry; -+ -+ AuDebugOn(cpg->bsrc < 0); -+ -+ dentry = cpg->dentry; -+ DiMustWriteLock(dentry); -+ -+ err = au_lkup_neg(dentry, cpg->bdst, /*wh*/1); -+ if (!err) { -+ flags_orig = cpg->flags; -+ au_fset_cpup(cpg->flags, RENAME); -+ err = au_cpup_single(cpg, NULL); -+ cpg->flags = flags_orig; -+ if (!err) -+ return 0; /* success */ -+ -+ /* revert */ -+ au_set_h_dptr(dentry, cpg->bdst, NULL); -+ au_set_dbstart(dentry, cpg->bsrc); -+ } -+ -+ return err; -+} -+ -+struct au_cpup_simple_args { -+ int *errp; -+ struct au_cp_generic *cpg; -+}; -+ -+static void au_call_cpup_simple(void *args) -+{ -+ struct au_cpup_simple_args *a = args; -+ -+ au_pin_hdir_acquire_nest(a->cpg->pin); -+ *a->errp = au_cpup_simple(a->cpg); -+ au_pin_hdir_release(a->cpg->pin); -+} -+ -+static int au_do_sio_cpup_simple(struct au_cp_generic *cpg) -+{ -+ int err, wkq_err; -+ struct dentry *dentry, *parent; -+ struct file *h_file; -+ struct inode *h_dir; -+ -+ dentry = cpg->dentry; -+ h_file = NULL; -+ if (au_ftest_cpup(cpg->flags, HOPEN)) { -+ AuDebugOn(cpg->bsrc < 0); -+ h_file = au_h_open_pre(dentry, cpg->bsrc, /*force_wr*/0); -+ err = PTR_ERR(h_file); -+ if (IS_ERR(h_file)) -+ goto out; -+ } -+ -+ parent = dget_parent(dentry); -+ h_dir = au_h_iptr(parent->d_inode, cpg->bdst); -+ if (!au_test_h_perm_sio(h_dir, MAY_EXEC | MAY_WRITE) -+ && !au_cpup_sio_test(cpg->pin, dentry->d_inode->i_mode)) -+ err = au_cpup_simple(cpg); -+ else { -+ struct au_cpup_simple_args args = { -+ .errp = &err, -+ .cpg = cpg -+ }; -+ wkq_err = au_wkq_wait(au_call_cpup_simple, &args); -+ if (unlikely(wkq_err)) -+ err = wkq_err; -+ } -+ -+ dput(parent); -+ if (h_file) -+ au_h_open_post(dentry, cpg->bsrc, h_file); -+ -+out: -+ return err; -+} -+ -+int au_sio_cpup_simple(struct au_cp_generic *cpg) -+{ -+ aufs_bindex_t bsrc, bend; -+ struct dentry *dentry, *h_dentry; -+ -+ if (cpg->bsrc < 0) { -+ dentry = cpg->dentry; -+ bend = au_dbend(dentry); -+ for (bsrc = cpg->bdst + 1; bsrc <= bend; bsrc++) { -+ h_dentry = au_h_dptr(dentry, bsrc); -+ if (h_dentry) { -+ AuDebugOn(!h_dentry->d_inode); -+ break; -+ } -+ } -+ AuDebugOn(bsrc > bend); -+ cpg->bsrc = bsrc; -+ } -+ AuDebugOn(cpg->bsrc <= cpg->bdst); -+ return au_do_sio_cpup_simple(cpg); -+} -+ -+int au_sio_cpdown_simple(struct au_cp_generic *cpg) -+{ -+ AuDebugOn(cpg->bdst <= cpg->bsrc); -+ return au_do_sio_cpup_simple(cpg); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * copyup the deleted file for writing. -+ */ -+static int au_do_cpup_wh(struct au_cp_generic *cpg, struct dentry *wh_dentry, -+ struct file *file) -+{ -+ int err; -+ unsigned int flags_orig; -+ aufs_bindex_t bsrc_orig; -+ struct dentry *h_d_dst, *h_d_start; -+ struct au_dinfo *dinfo; -+ struct au_hdentry *hdp; -+ -+ dinfo = au_di(cpg->dentry); -+ AuRwMustWriteLock(&dinfo->di_rwsem); -+ -+ bsrc_orig = cpg->bsrc; -+ cpg->bsrc = dinfo->di_bstart; -+ hdp = dinfo->di_hdentry; -+ h_d_dst = hdp[0 + cpg->bdst].hd_dentry; -+ dinfo->di_bstart = cpg->bdst; -+ hdp[0 + cpg->bdst].hd_dentry = wh_dentry; -+ h_d_start = NULL; -+ if (file) { -+ h_d_start = hdp[0 + cpg->bsrc].hd_dentry; -+ hdp[0 + cpg->bsrc].hd_dentry = au_hf_top(file)->f_dentry; -+ } -+ flags_orig = cpg->flags; -+ cpg->flags = !AuCpup_DTIME; -+ err = au_cpup_single(cpg, /*h_parent*/NULL); -+ cpg->flags = flags_orig; -+ if (file) { -+ if (!err) -+ err = au_reopen_nondir(file); -+ hdp[0 + cpg->bsrc].hd_dentry = h_d_start; -+ } -+ hdp[0 + cpg->bdst].hd_dentry = h_d_dst; -+ dinfo->di_bstart = cpg->bsrc; -+ cpg->bsrc = bsrc_orig; -+ -+ return err; -+} -+ -+static int au_cpup_wh(struct au_cp_generic *cpg, struct file *file) -+{ -+ int err; -+ aufs_bindex_t bdst; -+ struct au_dtime dt; -+ struct dentry *dentry, *parent, *h_parent, *wh_dentry; -+ struct au_branch *br; -+ struct path h_path; -+ -+ dentry = cpg->dentry; -+ bdst = cpg->bdst; -+ br = au_sbr(dentry->d_sb, bdst); -+ parent = dget_parent(dentry); -+ h_parent = au_h_dptr(parent, bdst); -+ wh_dentry = au_whtmp_lkup(h_parent, br, &dentry->d_name); -+ err = PTR_ERR(wh_dentry); -+ if (IS_ERR(wh_dentry)) -+ goto out; -+ -+ h_path.dentry = h_parent; -+ h_path.mnt = au_br_mnt(br); -+ au_dtime_store(&dt, parent, &h_path); -+ err = au_do_cpup_wh(cpg, wh_dentry, file); -+ if (unlikely(err)) -+ goto out_wh; -+ -+ dget(wh_dentry); -+ h_path.dentry = wh_dentry; -+ if (!d_is_dir(wh_dentry)) { -+ /* no delegation since it is just created */ -+ err = vfsub_unlink(h_parent->d_inode, &h_path, -+ /*delegated*/NULL, /*force*/0); -+ } else -+ err = vfsub_rmdir(h_parent->d_inode, &h_path); -+ if (unlikely(err)) { -+ AuIOErr("failed remove copied-up tmp file %pd(%d)\n", -+ wh_dentry, err); -+ err = -EIO; -+ } -+ au_dtime_revert(&dt); -+ au_set_hi_wh(dentry->d_inode, bdst, wh_dentry); -+ -+out_wh: -+ dput(wh_dentry); -+out: -+ dput(parent); -+ return err; -+} -+ -+struct au_cpup_wh_args { -+ int *errp; -+ struct au_cp_generic *cpg; -+ struct file *file; -+}; -+ -+static void au_call_cpup_wh(void *args) -+{ -+ struct au_cpup_wh_args *a = args; -+ -+ au_pin_hdir_acquire_nest(a->cpg->pin); -+ *a->errp = au_cpup_wh(a->cpg, a->file); -+ au_pin_hdir_release(a->cpg->pin); -+} -+ -+int au_sio_cpup_wh(struct au_cp_generic *cpg, struct file *file) -+{ -+ int err, wkq_err; -+ aufs_bindex_t bdst; -+ struct dentry *dentry, *parent, *h_orph, *h_parent; -+ struct inode *dir, *h_dir, *h_tmpdir; -+ struct au_wbr *wbr; -+ struct au_pin wh_pin, *pin_orig; -+ -+ dentry = cpg->dentry; -+ bdst = cpg->bdst; -+ parent = dget_parent(dentry); -+ dir = parent->d_inode; -+ h_orph = NULL; -+ h_parent = NULL; -+ h_dir = au_igrab(au_h_iptr(dir, bdst)); -+ h_tmpdir = h_dir; -+ pin_orig = NULL; -+ if (!h_dir->i_nlink) { -+ wbr = au_sbr(dentry->d_sb, bdst)->br_wbr; -+ h_orph = wbr->wbr_orph; -+ -+ h_parent = dget(au_h_dptr(parent, bdst)); -+ au_set_h_dptr(parent, bdst, dget(h_orph)); -+ h_tmpdir = h_orph->d_inode; -+ au_set_h_iptr(dir, bdst, au_igrab(h_tmpdir), /*flags*/0); -+ -+ mutex_lock_nested(&h_tmpdir->i_mutex, AuLsc_I_PARENT3); -+ /* todo: au_h_open_pre()? */ -+ -+ pin_orig = cpg->pin; -+ au_pin_init(&wh_pin, dentry, bdst, AuLsc_DI_PARENT, -+ AuLsc_I_PARENT3, cpg->pin->udba, AuPin_DI_LOCKED); -+ cpg->pin = &wh_pin; -+ } -+ -+ if (!au_test_h_perm_sio(h_tmpdir, MAY_EXEC | MAY_WRITE) -+ && !au_cpup_sio_test(cpg->pin, dentry->d_inode->i_mode)) -+ err = au_cpup_wh(cpg, file); -+ else { -+ struct au_cpup_wh_args args = { -+ .errp = &err, -+ .cpg = cpg, -+ .file = file -+ }; -+ wkq_err = au_wkq_wait(au_call_cpup_wh, &args); -+ if (unlikely(wkq_err)) -+ err = wkq_err; -+ } -+ -+ if (h_orph) { -+ mutex_unlock(&h_tmpdir->i_mutex); -+ /* todo: au_h_open_post()? */ -+ au_set_h_iptr(dir, bdst, au_igrab(h_dir), /*flags*/0); -+ au_set_h_dptr(parent, bdst, h_parent); -+ AuDebugOn(!pin_orig); -+ cpg->pin = pin_orig; -+ } -+ iput(h_dir); -+ dput(parent); -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * generic routine for both of copy-up and copy-down. -+ */ -+/* cf. revalidate function in file.c */ -+int au_cp_dirs(struct dentry *dentry, aufs_bindex_t bdst, -+ int (*cp)(struct dentry *dentry, aufs_bindex_t bdst, -+ struct au_pin *pin, -+ struct dentry *h_parent, void *arg), -+ void *arg) -+{ -+ int err; -+ struct au_pin pin; -+ struct dentry *d, *parent, *h_parent, *real_parent; -+ -+ err = 0; -+ parent = dget_parent(dentry); -+ if (IS_ROOT(parent)) -+ goto out; -+ -+ au_pin_init(&pin, dentry, bdst, AuLsc_DI_PARENT2, AuLsc_I_PARENT2, -+ au_opt_udba(dentry->d_sb), AuPin_MNT_WRITE); -+ -+ /* do not use au_dpage */ -+ real_parent = parent; -+ while (1) { -+ dput(parent); -+ parent = dget_parent(dentry); -+ h_parent = au_h_dptr(parent, bdst); -+ if (h_parent) -+ goto out; /* success */ -+ -+ /* find top dir which is necessary to cpup */ -+ do { -+ d = parent; -+ dput(parent); -+ parent = dget_parent(d); -+ di_read_lock_parent3(parent, !AuLock_IR); -+ h_parent = au_h_dptr(parent, bdst); -+ di_read_unlock(parent, !AuLock_IR); -+ } while (!h_parent); -+ -+ if (d != real_parent) -+ di_write_lock_child3(d); -+ -+ /* somebody else might create while we were sleeping */ -+ if (!au_h_dptr(d, bdst) || !au_h_dptr(d, bdst)->d_inode) { -+ if (au_h_dptr(d, bdst)) -+ au_update_dbstart(d); -+ -+ au_pin_set_dentry(&pin, d); -+ err = au_do_pin(&pin); -+ if (!err) { -+ err = cp(d, bdst, &pin, h_parent, arg); -+ au_unpin(&pin); -+ } -+ } -+ -+ if (d != real_parent) -+ di_write_unlock(d); -+ if (unlikely(err)) -+ break; -+ } -+ -+out: -+ dput(parent); -+ return err; -+} -+ -+static int au_cpup_dir(struct dentry *dentry, aufs_bindex_t bdst, -+ struct au_pin *pin, -+ struct dentry *h_parent __maybe_unused, -+ void *arg __maybe_unused) -+{ -+ struct au_cp_generic cpg = { -+ .dentry = dentry, -+ .bdst = bdst, -+ .bsrc = -1, -+ .len = 0, -+ .pin = pin, -+ .flags = AuCpup_DTIME -+ }; -+ return au_sio_cpup_simple(&cpg); -+} -+ -+int au_cpup_dirs(struct dentry *dentry, aufs_bindex_t bdst) -+{ -+ return au_cp_dirs(dentry, bdst, au_cpup_dir, NULL); -+} -+ -+int au_test_and_cpup_dirs(struct dentry *dentry, aufs_bindex_t bdst) -+{ -+ int err; -+ struct dentry *parent; -+ struct inode *dir; -+ -+ parent = dget_parent(dentry); -+ dir = parent->d_inode; -+ err = 0; -+ if (au_h_iptr(dir, bdst)) -+ goto out; -+ -+ di_read_unlock(parent, AuLock_IR); -+ di_write_lock_parent(parent); -+ /* someone else might change our inode while we were sleeping */ -+ if (!au_h_iptr(dir, bdst)) -+ err = au_cpup_dirs(dentry, bdst); -+ di_downgrade_lock(parent, AuLock_IR); -+ -+out: -+ dput(parent); -+ return err; -+} -diff --git a/fs/aufs/cpup.h b/fs/aufs/cpup.h -new file mode 100644 -index 0000000..7721429 ---- /dev/null -+++ b/fs/aufs/cpup.h -@@ -0,0 +1,94 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * copy-up/down functions -+ */ -+ -+#ifndef __AUFS_CPUP_H__ -+#define __AUFS_CPUP_H__ -+ -+#ifdef __KERNEL__ -+ -+#include -+ -+struct inode; -+struct file; -+struct au_pin; -+ -+void au_cpup_attr_flags(struct inode *dst, unsigned int iflags); -+void au_cpup_attr_timesizes(struct inode *inode); -+void au_cpup_attr_nlink(struct inode *inode, int force); -+void au_cpup_attr_changeable(struct inode *inode); -+void au_cpup_igen(struct inode *inode, struct inode *h_inode); -+void au_cpup_attr_all(struct inode *inode, int force); -+ -+/* ---------------------------------------------------------------------- */ -+ -+struct au_cp_generic { -+ struct dentry *dentry; -+ aufs_bindex_t bdst, bsrc; -+ loff_t len; -+ struct au_pin *pin; -+ unsigned int flags; -+}; -+ -+/* cpup flags */ -+#define AuCpup_DTIME 1 /* do dtime_store/revert */ -+#define AuCpup_KEEPLINO (1 << 1) /* do not clear the lower xino, -+ for link(2) */ -+#define AuCpup_RENAME (1 << 2) /* rename after cpup */ -+#define AuCpup_HOPEN (1 << 3) /* call h_open_pre/post() in -+ cpup */ -+#define AuCpup_OVERWRITE (1 << 4) /* allow overwriting the -+ existing entry */ -+#define AuCpup_RWDST (1 << 5) /* force write target even if -+ the branch is marked as RO */ -+ -+#define au_ftest_cpup(flags, name) ((flags) & AuCpup_##name) -+#define au_fset_cpup(flags, name) \ -+ do { (flags) |= AuCpup_##name; } while (0) -+#define au_fclr_cpup(flags, name) \ -+ do { (flags) &= ~AuCpup_##name; } while (0) -+ -+int au_copy_file(struct file *dst, struct file *src, loff_t len); -+int au_sio_cpup_simple(struct au_cp_generic *cpg); -+int au_sio_cpdown_simple(struct au_cp_generic *cpg); -+int au_sio_cpup_wh(struct au_cp_generic *cpg, struct file *file); -+ -+int au_cp_dirs(struct dentry *dentry, aufs_bindex_t bdst, -+ int (*cp)(struct dentry *dentry, aufs_bindex_t bdst, -+ struct au_pin *pin, -+ struct dentry *h_parent, void *arg), -+ void *arg); -+int au_cpup_dirs(struct dentry *dentry, aufs_bindex_t bdst); -+int au_test_and_cpup_dirs(struct dentry *dentry, aufs_bindex_t bdst); -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* keep timestamps when copyup */ -+struct au_dtime { -+ struct dentry *dt_dentry; -+ struct path dt_h_path; -+ struct timespec dt_atime, dt_mtime; -+}; -+void au_dtime_store(struct au_dtime *dt, struct dentry *dentry, -+ struct path *h_path); -+void au_dtime_revert(struct au_dtime *dt); -+ -+#endif /* __KERNEL__ */ -+#endif /* __AUFS_CPUP_H__ */ -diff --git a/fs/aufs/dbgaufs.c b/fs/aufs/dbgaufs.c -new file mode 100644 -index 0000000..b4fdc25 ---- /dev/null -+++ b/fs/aufs/dbgaufs.c -@@ -0,0 +1,432 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * debugfs interface -+ */ -+ -+#include -+#include "aufs.h" -+ -+#ifndef CONFIG_SYSFS -+#error DEBUG_FS depends upon SYSFS -+#endif -+ -+static struct dentry *dbgaufs; -+static const mode_t dbgaufs_mode = S_IRUSR | S_IRGRP | S_IROTH; -+ -+/* 20 is max digits length of ulong 64 */ -+struct dbgaufs_arg { -+ int n; -+ char a[20 * 4]; -+}; -+ -+/* -+ * common function for all XINO files -+ */ -+static int dbgaufs_xi_release(struct inode *inode __maybe_unused, -+ struct file *file) -+{ -+ kfree(file->private_data); -+ return 0; -+} -+ -+static int dbgaufs_xi_open(struct file *xf, struct file *file, int do_fcnt) -+{ -+ int err; -+ struct kstat st; -+ struct dbgaufs_arg *p; -+ -+ err = -ENOMEM; -+ p = kmalloc(sizeof(*p), GFP_NOFS); -+ if (unlikely(!p)) -+ goto out; -+ -+ err = 0; -+ p->n = 0; -+ file->private_data = p; -+ if (!xf) -+ goto out; -+ -+ err = vfs_getattr(&xf->f_path, &st); -+ if (!err) { -+ if (do_fcnt) -+ p->n = snprintf -+ (p->a, sizeof(p->a), "%ld, %llux%lu %lld\n", -+ (long)file_count(xf), st.blocks, st.blksize, -+ (long long)st.size); -+ else -+ p->n = snprintf(p->a, sizeof(p->a), "%llux%lu %lld\n", -+ st.blocks, st.blksize, -+ (long long)st.size); -+ AuDebugOn(p->n >= sizeof(p->a)); -+ } else { -+ p->n = snprintf(p->a, sizeof(p->a), "err %d\n", err); -+ err = 0; -+ } -+ -+out: -+ return err; -+ -+} -+ -+static ssize_t dbgaufs_xi_read(struct file *file, char __user *buf, -+ size_t count, loff_t *ppos) -+{ -+ struct dbgaufs_arg *p; -+ -+ p = file->private_data; -+ return simple_read_from_buffer(buf, count, ppos, p->a, p->n); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+struct dbgaufs_plink_arg { -+ int n; -+ char a[]; -+}; -+ -+static int dbgaufs_plink_release(struct inode *inode __maybe_unused, -+ struct file *file) -+{ -+ free_page((unsigned long)file->private_data); -+ return 0; -+} -+ -+static int dbgaufs_plink_open(struct inode *inode, struct file *file) -+{ -+ int err, i, limit; -+ unsigned long n, sum; -+ struct dbgaufs_plink_arg *p; -+ struct au_sbinfo *sbinfo; -+ struct super_block *sb; -+ struct au_sphlhead *sphl; -+ -+ err = -ENOMEM; -+ p = (void *)get_zeroed_page(GFP_NOFS); -+ if (unlikely(!p)) -+ goto out; -+ -+ err = -EFBIG; -+ sbinfo = inode->i_private; -+ sb = sbinfo->si_sb; -+ si_noflush_read_lock(sb); -+ if (au_opt_test(au_mntflags(sb), PLINK)) { -+ limit = PAGE_SIZE - sizeof(p->n); -+ -+ /* the number of buckets */ -+ n = snprintf(p->a + p->n, limit, "%d\n", AuPlink_NHASH); -+ p->n += n; -+ limit -= n; -+ -+ sum = 0; -+ for (i = 0, sphl = sbinfo->si_plink; -+ i < AuPlink_NHASH; -+ i++, sphl++) { -+ n = au_sphl_count(sphl); -+ sum += n; -+ -+ n = snprintf(p->a + p->n, limit, "%lu ", n); -+ p->n += n; -+ limit -= n; -+ if (unlikely(limit <= 0)) -+ goto out_free; -+ } -+ p->a[p->n - 1] = '\n'; -+ -+ /* the sum of plinks */ -+ n = snprintf(p->a + p->n, limit, "%lu\n", sum); -+ p->n += n; -+ limit -= n; -+ if (unlikely(limit <= 0)) -+ goto out_free; -+ } else { -+#define str "1\n0\n0\n" -+ p->n = sizeof(str) - 1; -+ strcpy(p->a, str); -+#undef str -+ } -+ si_read_unlock(sb); -+ -+ err = 0; -+ file->private_data = p; -+ goto out; /* success */ -+ -+out_free: -+ free_page((unsigned long)p); -+out: -+ return err; -+} -+ -+static ssize_t dbgaufs_plink_read(struct file *file, char __user *buf, -+ size_t count, loff_t *ppos) -+{ -+ struct dbgaufs_plink_arg *p; -+ -+ p = file->private_data; -+ return simple_read_from_buffer(buf, count, ppos, p->a, p->n); -+} -+ -+static const struct file_operations dbgaufs_plink_fop = { -+ .owner = THIS_MODULE, -+ .open = dbgaufs_plink_open, -+ .release = dbgaufs_plink_release, -+ .read = dbgaufs_plink_read -+}; -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int dbgaufs_xib_open(struct inode *inode, struct file *file) -+{ -+ int err; -+ struct au_sbinfo *sbinfo; -+ struct super_block *sb; -+ -+ sbinfo = inode->i_private; -+ sb = sbinfo->si_sb; -+ si_noflush_read_lock(sb); -+ err = dbgaufs_xi_open(sbinfo->si_xib, file, /*do_fcnt*/0); -+ si_read_unlock(sb); -+ return err; -+} -+ -+static const struct file_operations dbgaufs_xib_fop = { -+ .owner = THIS_MODULE, -+ .open = dbgaufs_xib_open, -+ .release = dbgaufs_xi_release, -+ .read = dbgaufs_xi_read -+}; -+ -+/* ---------------------------------------------------------------------- */ -+ -+#define DbgaufsXi_PREFIX "xi" -+ -+static int dbgaufs_xino_open(struct inode *inode, struct file *file) -+{ -+ int err; -+ long l; -+ struct au_sbinfo *sbinfo; -+ struct super_block *sb; -+ struct file *xf; -+ struct qstr *name; -+ -+ err = -ENOENT; -+ xf = NULL; -+ name = &file->f_dentry->d_name; -+ if (unlikely(name->len < sizeof(DbgaufsXi_PREFIX) -+ || memcmp(name->name, DbgaufsXi_PREFIX, -+ sizeof(DbgaufsXi_PREFIX) - 1))) -+ goto out; -+ err = kstrtol(name->name + sizeof(DbgaufsXi_PREFIX) - 1, 10, &l); -+ if (unlikely(err)) -+ goto out; -+ -+ sbinfo = inode->i_private; -+ sb = sbinfo->si_sb; -+ si_noflush_read_lock(sb); -+ if (l <= au_sbend(sb)) { -+ xf = au_sbr(sb, (aufs_bindex_t)l)->br_xino.xi_file; -+ err = dbgaufs_xi_open(xf, file, /*do_fcnt*/1); -+ } else -+ err = -ENOENT; -+ si_read_unlock(sb); -+ -+out: -+ return err; -+} -+ -+static const struct file_operations dbgaufs_xino_fop = { -+ .owner = THIS_MODULE, -+ .open = dbgaufs_xino_open, -+ .release = dbgaufs_xi_release, -+ .read = dbgaufs_xi_read -+}; -+ -+void dbgaufs_brs_del(struct super_block *sb, aufs_bindex_t bindex) -+{ -+ aufs_bindex_t bend; -+ struct au_branch *br; -+ struct au_xino_file *xi; -+ -+ if (!au_sbi(sb)->si_dbgaufs) -+ return; -+ -+ bend = au_sbend(sb); -+ for (; bindex <= bend; bindex++) { -+ br = au_sbr(sb, bindex); -+ xi = &br->br_xino; -+ debugfs_remove(xi->xi_dbgaufs); -+ xi->xi_dbgaufs = NULL; -+ } -+} -+ -+void dbgaufs_brs_add(struct super_block *sb, aufs_bindex_t bindex) -+{ -+ struct au_sbinfo *sbinfo; -+ struct dentry *parent; -+ struct au_branch *br; -+ struct au_xino_file *xi; -+ aufs_bindex_t bend; -+ char name[sizeof(DbgaufsXi_PREFIX) + 5]; /* "xi" bindex NULL */ -+ -+ sbinfo = au_sbi(sb); -+ parent = sbinfo->si_dbgaufs; -+ if (!parent) -+ return; -+ -+ bend = au_sbend(sb); -+ for (; bindex <= bend; bindex++) { -+ snprintf(name, sizeof(name), DbgaufsXi_PREFIX "%d", bindex); -+ br = au_sbr(sb, bindex); -+ xi = &br->br_xino; -+ AuDebugOn(xi->xi_dbgaufs); -+ xi->xi_dbgaufs = debugfs_create_file(name, dbgaufs_mode, parent, -+ sbinfo, &dbgaufs_xino_fop); -+ /* ignore an error */ -+ if (unlikely(!xi->xi_dbgaufs)) -+ AuWarn1("failed %s under debugfs\n", name); -+ } -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+#ifdef CONFIG_AUFS_EXPORT -+static int dbgaufs_xigen_open(struct inode *inode, struct file *file) -+{ -+ int err; -+ struct au_sbinfo *sbinfo; -+ struct super_block *sb; -+ -+ sbinfo = inode->i_private; -+ sb = sbinfo->si_sb; -+ si_noflush_read_lock(sb); -+ err = dbgaufs_xi_open(sbinfo->si_xigen, file, /*do_fcnt*/0); -+ si_read_unlock(sb); -+ return err; -+} -+ -+static const struct file_operations dbgaufs_xigen_fop = { -+ .owner = THIS_MODULE, -+ .open = dbgaufs_xigen_open, -+ .release = dbgaufs_xi_release, -+ .read = dbgaufs_xi_read -+}; -+ -+static int dbgaufs_xigen_init(struct au_sbinfo *sbinfo) -+{ -+ int err; -+ -+ /* -+ * This function is a dynamic '__init' function actually, -+ * so the tiny check for si_rwsem is unnecessary. -+ */ -+ /* AuRwMustWriteLock(&sbinfo->si_rwsem); */ -+ -+ err = -EIO; -+ sbinfo->si_dbgaufs_xigen = debugfs_create_file -+ ("xigen", dbgaufs_mode, sbinfo->si_dbgaufs, sbinfo, -+ &dbgaufs_xigen_fop); -+ if (sbinfo->si_dbgaufs_xigen) -+ err = 0; -+ -+ return err; -+} -+#else -+static int dbgaufs_xigen_init(struct au_sbinfo *sbinfo) -+{ -+ return 0; -+} -+#endif /* CONFIG_AUFS_EXPORT */ -+ -+/* ---------------------------------------------------------------------- */ -+ -+void dbgaufs_si_fin(struct au_sbinfo *sbinfo) -+{ -+ /* -+ * This function is a dynamic '__fin' function actually, -+ * so the tiny check for si_rwsem is unnecessary. -+ */ -+ /* AuRwMustWriteLock(&sbinfo->si_rwsem); */ -+ -+ debugfs_remove_recursive(sbinfo->si_dbgaufs); -+ sbinfo->si_dbgaufs = NULL; -+ kobject_put(&sbinfo->si_kobj); -+} -+ -+int dbgaufs_si_init(struct au_sbinfo *sbinfo) -+{ -+ int err; -+ char name[SysaufsSiNameLen]; -+ -+ /* -+ * This function is a dynamic '__init' function actually, -+ * so the tiny check for si_rwsem is unnecessary. -+ */ -+ /* AuRwMustWriteLock(&sbinfo->si_rwsem); */ -+ -+ err = -ENOENT; -+ if (!dbgaufs) { -+ AuErr1("/debug/aufs is uninitialized\n"); -+ goto out; -+ } -+ -+ err = -EIO; -+ sysaufs_name(sbinfo, name); -+ sbinfo->si_dbgaufs = debugfs_create_dir(name, dbgaufs); -+ if (unlikely(!sbinfo->si_dbgaufs)) -+ goto out; -+ kobject_get(&sbinfo->si_kobj); -+ -+ sbinfo->si_dbgaufs_xib = debugfs_create_file -+ ("xib", dbgaufs_mode, sbinfo->si_dbgaufs, sbinfo, -+ &dbgaufs_xib_fop); -+ if (unlikely(!sbinfo->si_dbgaufs_xib)) -+ goto out_dir; -+ -+ sbinfo->si_dbgaufs_plink = debugfs_create_file -+ ("plink", dbgaufs_mode, sbinfo->si_dbgaufs, sbinfo, -+ &dbgaufs_plink_fop); -+ if (unlikely(!sbinfo->si_dbgaufs_plink)) -+ goto out_dir; -+ -+ err = dbgaufs_xigen_init(sbinfo); -+ if (!err) -+ goto out; /* success */ -+ -+out_dir: -+ dbgaufs_si_fin(sbinfo); -+out: -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+void dbgaufs_fin(void) -+{ -+ debugfs_remove(dbgaufs); -+} -+ -+int __init dbgaufs_init(void) -+{ -+ int err; -+ -+ err = -EIO; -+ dbgaufs = debugfs_create_dir(AUFS_NAME, NULL); -+ if (dbgaufs) -+ err = 0; -+ return err; -+} -diff --git a/fs/aufs/dbgaufs.h b/fs/aufs/dbgaufs.h -new file mode 100644 -index 0000000..d1e09bd ---- /dev/null -+++ b/fs/aufs/dbgaufs.h -@@ -0,0 +1,48 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * debugfs interface -+ */ -+ -+#ifndef __DBGAUFS_H__ -+#define __DBGAUFS_H__ -+ -+#ifdef __KERNEL__ -+ -+struct super_block; -+struct au_sbinfo; -+ -+#ifdef CONFIG_DEBUG_FS -+/* dbgaufs.c */ -+void dbgaufs_brs_del(struct super_block *sb, aufs_bindex_t bindex); -+void dbgaufs_brs_add(struct super_block *sb, aufs_bindex_t bindex); -+void dbgaufs_si_fin(struct au_sbinfo *sbinfo); -+int dbgaufs_si_init(struct au_sbinfo *sbinfo); -+void dbgaufs_fin(void); -+int __init dbgaufs_init(void); -+#else -+AuStubVoid(dbgaufs_brs_del, struct super_block *sb, aufs_bindex_t bindex) -+AuStubVoid(dbgaufs_brs_add, struct super_block *sb, aufs_bindex_t bindex) -+AuStubVoid(dbgaufs_si_fin, struct au_sbinfo *sbinfo) -+AuStubInt0(dbgaufs_si_init, struct au_sbinfo *sbinfo) -+AuStubVoid(dbgaufs_fin, void) -+AuStubInt0(__init dbgaufs_init, void) -+#endif /* CONFIG_DEBUG_FS */ -+ -+#endif /* __KERNEL__ */ -+#endif /* __DBGAUFS_H__ */ -diff --git a/fs/aufs/dcsub.c b/fs/aufs/dcsub.c -new file mode 100644 -index 0000000..832baa4 ---- /dev/null -+++ b/fs/aufs/dcsub.c -@@ -0,0 +1,224 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * sub-routines for dentry cache -+ */ -+ -+#include "aufs.h" -+ -+static void au_dpage_free(struct au_dpage *dpage) -+{ -+ int i; -+ struct dentry **p; -+ -+ p = dpage->dentries; -+ for (i = 0; i < dpage->ndentry; i++) -+ dput(*p++); -+ free_page((unsigned long)dpage->dentries); -+} -+ -+int au_dpages_init(struct au_dcsub_pages *dpages, gfp_t gfp) -+{ -+ int err; -+ void *p; -+ -+ err = -ENOMEM; -+ dpages->dpages = kmalloc(sizeof(*dpages->dpages), gfp); -+ if (unlikely(!dpages->dpages)) -+ goto out; -+ -+ p = (void *)__get_free_page(gfp); -+ if (unlikely(!p)) -+ goto out_dpages; -+ -+ dpages->dpages[0].ndentry = 0; -+ dpages->dpages[0].dentries = p; -+ dpages->ndpage = 1; -+ return 0; /* success */ -+ -+out_dpages: -+ kfree(dpages->dpages); -+out: -+ return err; -+} -+ -+void au_dpages_free(struct au_dcsub_pages *dpages) -+{ -+ int i; -+ struct au_dpage *p; -+ -+ p = dpages->dpages; -+ for (i = 0; i < dpages->ndpage; i++) -+ au_dpage_free(p++); -+ kfree(dpages->dpages); -+} -+ -+static int au_dpages_append(struct au_dcsub_pages *dpages, -+ struct dentry *dentry, gfp_t gfp) -+{ -+ int err, sz; -+ struct au_dpage *dpage; -+ void *p; -+ -+ dpage = dpages->dpages + dpages->ndpage - 1; -+ sz = PAGE_SIZE / sizeof(dentry); -+ if (unlikely(dpage->ndentry >= sz)) { -+ AuLabel(new dpage); -+ err = -ENOMEM; -+ sz = dpages->ndpage * sizeof(*dpages->dpages); -+ p = au_kzrealloc(dpages->dpages, sz, -+ sz + sizeof(*dpages->dpages), gfp); -+ if (unlikely(!p)) -+ goto out; -+ -+ dpages->dpages = p; -+ dpage = dpages->dpages + dpages->ndpage; -+ p = (void *)__get_free_page(gfp); -+ if (unlikely(!p)) -+ goto out; -+ -+ dpage->ndentry = 0; -+ dpage->dentries = p; -+ dpages->ndpage++; -+ } -+ -+ AuDebugOn(au_dcount(dentry) <= 0); -+ dpage->dentries[dpage->ndentry++] = dget_dlock(dentry); -+ return 0; /* success */ -+ -+out: -+ return err; -+} -+ -+/* todo: BAD approach */ -+/* copied from linux/fs/dcache.c */ -+enum d_walk_ret { -+ D_WALK_CONTINUE, -+ D_WALK_QUIT, -+ D_WALK_NORETRY, -+ D_WALK_SKIP, -+}; -+ -+extern void d_walk(struct dentry *parent, void *data, -+ enum d_walk_ret (*enter)(void *, struct dentry *), -+ void (*finish)(void *)); -+ -+struct ac_dpages_arg { -+ int err; -+ struct au_dcsub_pages *dpages; -+ struct super_block *sb; -+ au_dpages_test test; -+ void *arg; -+}; -+ -+static enum d_walk_ret au_call_dpages_append(void *_arg, struct dentry *dentry) -+{ -+ enum d_walk_ret ret; -+ struct ac_dpages_arg *arg = _arg; -+ -+ ret = D_WALK_CONTINUE; -+ if (dentry->d_sb == arg->sb -+ && !IS_ROOT(dentry) -+ && au_dcount(dentry) > 0 -+ && au_di(dentry) -+ && (!arg->test || arg->test(dentry, arg->arg))) { -+ arg->err = au_dpages_append(arg->dpages, dentry, GFP_ATOMIC); -+ if (unlikely(arg->err)) -+ ret = D_WALK_QUIT; -+ } -+ -+ return ret; -+} -+ -+int au_dcsub_pages(struct au_dcsub_pages *dpages, struct dentry *root, -+ au_dpages_test test, void *arg) -+{ -+ struct ac_dpages_arg args = { -+ .err = 0, -+ .dpages = dpages, -+ .sb = root->d_sb, -+ .test = test, -+ .arg = arg -+ }; -+ -+ d_walk(root, &args, au_call_dpages_append, NULL); -+ -+ return args.err; -+} -+ -+int au_dcsub_pages_rev(struct au_dcsub_pages *dpages, struct dentry *dentry, -+ int do_include, au_dpages_test test, void *arg) -+{ -+ int err; -+ -+ err = 0; -+ write_seqlock(&rename_lock); -+ spin_lock(&dentry->d_lock); -+ if (do_include -+ && au_dcount(dentry) > 0 -+ && (!test || test(dentry, arg))) -+ err = au_dpages_append(dpages, dentry, GFP_ATOMIC); -+ spin_unlock(&dentry->d_lock); -+ if (unlikely(err)) -+ goto out; -+ -+ /* -+ * RCU for vfsmount is unnecessary since this is a traverse in a single -+ * mount -+ */ -+ while (!IS_ROOT(dentry)) { -+ dentry = dentry->d_parent; /* rename_lock is locked */ -+ spin_lock(&dentry->d_lock); -+ if (au_dcount(dentry) > 0 -+ && (!test || test(dentry, arg))) -+ err = au_dpages_append(dpages, dentry, GFP_ATOMIC); -+ spin_unlock(&dentry->d_lock); -+ if (unlikely(err)) -+ break; -+ } -+ -+out: -+ write_sequnlock(&rename_lock); -+ return err; -+} -+ -+static inline int au_dcsub_dpages_aufs(struct dentry *dentry, void *arg) -+{ -+ return au_di(dentry) && dentry->d_sb == arg; -+} -+ -+int au_dcsub_pages_rev_aufs(struct au_dcsub_pages *dpages, -+ struct dentry *dentry, int do_include) -+{ -+ return au_dcsub_pages_rev(dpages, dentry, do_include, -+ au_dcsub_dpages_aufs, dentry->d_sb); -+} -+ -+int au_test_subdir(struct dentry *d1, struct dentry *d2) -+{ -+ struct path path[2] = { -+ { -+ .dentry = d1 -+ }, -+ { -+ .dentry = d2 -+ } -+ }; -+ -+ return path_is_under(path + 0, path + 1); -+} -diff --git a/fs/aufs/dcsub.h b/fs/aufs/dcsub.h -new file mode 100644 -index 0000000..7997944 ---- /dev/null -+++ b/fs/aufs/dcsub.h -@@ -0,0 +1,123 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * sub-routines for dentry cache -+ */ -+ -+#ifndef __AUFS_DCSUB_H__ -+#define __AUFS_DCSUB_H__ -+ -+#ifdef __KERNEL__ -+ -+#include -+#include -+ -+struct au_dpage { -+ int ndentry; -+ struct dentry **dentries; -+}; -+ -+struct au_dcsub_pages { -+ int ndpage; -+ struct au_dpage *dpages; -+}; -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* dcsub.c */ -+int au_dpages_init(struct au_dcsub_pages *dpages, gfp_t gfp); -+void au_dpages_free(struct au_dcsub_pages *dpages); -+typedef int (*au_dpages_test)(struct dentry *dentry, void *arg); -+int au_dcsub_pages(struct au_dcsub_pages *dpages, struct dentry *root, -+ au_dpages_test test, void *arg); -+int au_dcsub_pages_rev(struct au_dcsub_pages *dpages, struct dentry *dentry, -+ int do_include, au_dpages_test test, void *arg); -+int au_dcsub_pages_rev_aufs(struct au_dcsub_pages *dpages, -+ struct dentry *dentry, int do_include); -+int au_test_subdir(struct dentry *d1, struct dentry *d2); -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * todo: in linux-3.13, several similar (but faster) helpers are added to -+ * include/linux/dcache.h. Try them (in the future). -+ */ -+ -+static inline int au_d_hashed_positive(struct dentry *d) -+{ -+ int err; -+ struct inode *inode = d->d_inode; -+ -+ err = 0; -+ if (unlikely(d_unhashed(d) || !inode || !inode->i_nlink)) -+ err = -ENOENT; -+ return err; -+} -+ -+static inline int au_d_linkable(struct dentry *d) -+{ -+ int err; -+ struct inode *inode = d->d_inode; -+ -+ err = au_d_hashed_positive(d); -+ if (err -+ && inode -+ && (inode->i_state & I_LINKABLE)) -+ err = 0; -+ return err; -+} -+ -+static inline int au_d_alive(struct dentry *d) -+{ -+ int err; -+ struct inode *inode; -+ -+ err = 0; -+ if (!IS_ROOT(d)) -+ err = au_d_hashed_positive(d); -+ else { -+ inode = d->d_inode; -+ if (unlikely(d_unlinked(d) || !inode || !inode->i_nlink)) -+ err = -ENOENT; -+ } -+ return err; -+} -+ -+static inline int au_alive_dir(struct dentry *d) -+{ -+ int err; -+ -+ err = au_d_alive(d); -+ if (unlikely(err || IS_DEADDIR(d->d_inode))) -+ err = -ENOENT; -+ return err; -+} -+ -+static inline int au_qstreq(struct qstr *a, struct qstr *b) -+{ -+ return a->len == b->len -+ && !memcmp(a->name, b->name, a->len); -+} -+ -+static inline int au_dcount(struct dentry *d) -+{ -+ return (int)d_count(d); -+} -+ -+#endif /* __KERNEL__ */ -+#endif /* __AUFS_DCSUB_H__ */ -diff --git a/fs/aufs/debug.c b/fs/aufs/debug.c -new file mode 100644 -index 0000000..2747d13 ---- /dev/null -+++ b/fs/aufs/debug.c -@@ -0,0 +1,436 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * debug print functions -+ */ -+ -+#include "aufs.h" -+ -+/* Returns 0, or -errno. arg is in kp->arg. */ -+static int param_atomic_t_set(const char *val, const struct kernel_param *kp) -+{ -+ int err, n; -+ -+ err = kstrtoint(val, 0, &n); -+ if (!err) { -+ if (n > 0) -+ au_debug_on(); -+ else -+ au_debug_off(); -+ } -+ return err; -+} -+ -+/* Returns length written or -errno. Buffer is 4k (ie. be short!) */ -+static int param_atomic_t_get(char *buffer, const struct kernel_param *kp) -+{ -+ atomic_t *a; -+ -+ a = kp->arg; -+ return sprintf(buffer, "%d", atomic_read(a)); -+} -+ -+static struct kernel_param_ops param_ops_atomic_t = { -+ .set = param_atomic_t_set, -+ .get = param_atomic_t_get -+ /* void (*free)(void *arg) */ -+}; -+ -+atomic_t aufs_debug = ATOMIC_INIT(0); -+MODULE_PARM_DESC(debug, "debug print"); -+module_param_named(debug, aufs_debug, atomic_t, S_IRUGO | S_IWUSR | S_IWGRP); -+ -+DEFINE_MUTEX(au_dbg_mtx); /* just to serialize the dbg msgs */ -+char *au_plevel = KERN_DEBUG; -+#define dpri(fmt, ...) do { \ -+ if ((au_plevel \ -+ && strcmp(au_plevel, KERN_DEBUG)) \ -+ || au_debug_test()) \ -+ printk("%s" fmt, au_plevel, ##__VA_ARGS__); \ -+} while (0) -+ -+/* ---------------------------------------------------------------------- */ -+ -+void au_dpri_whlist(struct au_nhash *whlist) -+{ -+ unsigned long ul, n; -+ struct hlist_head *head; -+ struct au_vdir_wh *pos; -+ -+ n = whlist->nh_num; -+ head = whlist->nh_head; -+ for (ul = 0; ul < n; ul++) { -+ hlist_for_each_entry(pos, head, wh_hash) -+ dpri("b%d, %.*s, %d\n", -+ pos->wh_bindex, -+ pos->wh_str.len, pos->wh_str.name, -+ pos->wh_str.len); -+ head++; -+ } -+} -+ -+void au_dpri_vdir(struct au_vdir *vdir) -+{ -+ unsigned long ul; -+ union au_vdir_deblk_p p; -+ unsigned char *o; -+ -+ if (!vdir || IS_ERR(vdir)) { -+ dpri("err %ld\n", PTR_ERR(vdir)); -+ return; -+ } -+ -+ dpri("deblk %u, nblk %lu, deblk %p, last{%lu, %p}, ver %lu\n", -+ vdir->vd_deblk_sz, vdir->vd_nblk, vdir->vd_deblk, -+ vdir->vd_last.ul, vdir->vd_last.p.deblk, vdir->vd_version); -+ for (ul = 0; ul < vdir->vd_nblk; ul++) { -+ p.deblk = vdir->vd_deblk[ul]; -+ o = p.deblk; -+ dpri("[%lu]: %p\n", ul, o); -+ } -+} -+ -+static int do_pri_inode(aufs_bindex_t bindex, struct inode *inode, int hn, -+ struct dentry *wh) -+{ -+ char *n = NULL; -+ int l = 0; -+ -+ if (!inode || IS_ERR(inode)) { -+ dpri("i%d: err %ld\n", bindex, PTR_ERR(inode)); -+ return -1; -+ } -+ -+ /* the type of i_blocks depends upon CONFIG_LBDAF */ -+ BUILD_BUG_ON(sizeof(inode->i_blocks) != sizeof(unsigned long) -+ && sizeof(inode->i_blocks) != sizeof(u64)); -+ if (wh) { -+ n = (void *)wh->d_name.name; -+ l = wh->d_name.len; -+ } -+ -+ dpri("i%d: %p, i%lu, %s, cnt %d, nl %u, 0%o, sz %llu, blk %llu," -+ " hn %d, ct %lld, np %lu, st 0x%lx, f 0x%x, v %llu, g %x%s%.*s\n", -+ bindex, inode, -+ inode->i_ino, inode->i_sb ? au_sbtype(inode->i_sb) : "??", -+ atomic_read(&inode->i_count), inode->i_nlink, inode->i_mode, -+ i_size_read(inode), (unsigned long long)inode->i_blocks, -+ hn, (long long)timespec_to_ns(&inode->i_ctime) & 0x0ffff, -+ inode->i_mapping ? inode->i_mapping->nrpages : 0, -+ inode->i_state, inode->i_flags, inode->i_version, -+ inode->i_generation, -+ l ? ", wh " : "", l, n); -+ return 0; -+} -+ -+void au_dpri_inode(struct inode *inode) -+{ -+ struct au_iinfo *iinfo; -+ aufs_bindex_t bindex; -+ int err, hn; -+ -+ err = do_pri_inode(-1, inode, -1, NULL); -+ if (err || !au_test_aufs(inode->i_sb)) -+ return; -+ -+ iinfo = au_ii(inode); -+ if (!iinfo) -+ return; -+ dpri("i-1: bstart %d, bend %d, gen %d\n", -+ iinfo->ii_bstart, iinfo->ii_bend, au_iigen(inode, NULL)); -+ if (iinfo->ii_bstart < 0) -+ return; -+ hn = 0; -+ for (bindex = iinfo->ii_bstart; bindex <= iinfo->ii_bend; bindex++) { -+ hn = !!au_hn(iinfo->ii_hinode + bindex); -+ do_pri_inode(bindex, iinfo->ii_hinode[0 + bindex].hi_inode, hn, -+ iinfo->ii_hinode[0 + bindex].hi_whdentry); -+ } -+} -+ -+void au_dpri_dalias(struct inode *inode) -+{ -+ struct dentry *d; -+ -+ spin_lock(&inode->i_lock); -+ hlist_for_each_entry(d, &inode->i_dentry, d_u.d_alias) -+ au_dpri_dentry(d); -+ spin_unlock(&inode->i_lock); -+} -+ -+static int do_pri_dentry(aufs_bindex_t bindex, struct dentry *dentry) -+{ -+ struct dentry *wh = NULL; -+ int hn; -+ struct au_iinfo *iinfo; -+ -+ if (!dentry || IS_ERR(dentry)) { -+ dpri("d%d: err %ld\n", bindex, PTR_ERR(dentry)); -+ return -1; -+ } -+ /* do not call dget_parent() here */ -+ /* note: access d_xxx without d_lock */ -+ dpri("d%d: %p, %pd2?, %s, cnt %d, flags 0x%x, %shashed\n", -+ bindex, dentry, dentry, -+ dentry->d_sb ? au_sbtype(dentry->d_sb) : "??", -+ au_dcount(dentry), dentry->d_flags, -+ d_unhashed(dentry) ? "un" : ""); -+ hn = -1; -+ if (bindex >= 0 && dentry->d_inode && au_test_aufs(dentry->d_sb)) { -+ iinfo = au_ii(dentry->d_inode); -+ if (iinfo) { -+ hn = !!au_hn(iinfo->ii_hinode + bindex); -+ wh = iinfo->ii_hinode[0 + bindex].hi_whdentry; -+ } -+ } -+ do_pri_inode(bindex, dentry->d_inode, hn, wh); -+ return 0; -+} -+ -+void au_dpri_dentry(struct dentry *dentry) -+{ -+ struct au_dinfo *dinfo; -+ aufs_bindex_t bindex; -+ int err; -+ struct au_hdentry *hdp; -+ -+ err = do_pri_dentry(-1, dentry); -+ if (err || !au_test_aufs(dentry->d_sb)) -+ return; -+ -+ dinfo = au_di(dentry); -+ if (!dinfo) -+ return; -+ dpri("d-1: bstart %d, bend %d, bwh %d, bdiropq %d, gen %d, tmp %d\n", -+ dinfo->di_bstart, dinfo->di_bend, -+ dinfo->di_bwh, dinfo->di_bdiropq, au_digen(dentry), -+ dinfo->di_tmpfile); -+ if (dinfo->di_bstart < 0) -+ return; -+ hdp = dinfo->di_hdentry; -+ for (bindex = dinfo->di_bstart; bindex <= dinfo->di_bend; bindex++) -+ do_pri_dentry(bindex, hdp[0 + bindex].hd_dentry); -+} -+ -+static int do_pri_file(aufs_bindex_t bindex, struct file *file) -+{ -+ char a[32]; -+ -+ if (!file || IS_ERR(file)) { -+ dpri("f%d: err %ld\n", bindex, PTR_ERR(file)); -+ return -1; -+ } -+ a[0] = 0; -+ if (bindex < 0 -+ && !IS_ERR_OR_NULL(file->f_dentry) -+ && au_test_aufs(file->f_dentry->d_sb) -+ && au_fi(file)) -+ snprintf(a, sizeof(a), ", gen %d, mmapped %d", -+ au_figen(file), atomic_read(&au_fi(file)->fi_mmapped)); -+ dpri("f%d: mode 0x%x, flags 0%o, cnt %ld, v %llu, pos %llu%s\n", -+ bindex, file->f_mode, file->f_flags, (long)file_count(file), -+ file->f_version, file->f_pos, a); -+ if (!IS_ERR_OR_NULL(file->f_dentry)) -+ do_pri_dentry(bindex, file->f_dentry); -+ return 0; -+} -+ -+void au_dpri_file(struct file *file) -+{ -+ struct au_finfo *finfo; -+ struct au_fidir *fidir; -+ struct au_hfile *hfile; -+ aufs_bindex_t bindex; -+ int err; -+ -+ err = do_pri_file(-1, file); -+ if (err -+ || IS_ERR_OR_NULL(file->f_dentry) -+ || !au_test_aufs(file->f_dentry->d_sb)) -+ return; -+ -+ finfo = au_fi(file); -+ if (!finfo) -+ return; -+ if (finfo->fi_btop < 0) -+ return; -+ fidir = finfo->fi_hdir; -+ if (!fidir) -+ do_pri_file(finfo->fi_btop, finfo->fi_htop.hf_file); -+ else -+ for (bindex = finfo->fi_btop; -+ bindex >= 0 && bindex <= fidir->fd_bbot; -+ bindex++) { -+ hfile = fidir->fd_hfile + bindex; -+ do_pri_file(bindex, hfile ? hfile->hf_file : NULL); -+ } -+} -+ -+static int do_pri_br(aufs_bindex_t bindex, struct au_branch *br) -+{ -+ struct vfsmount *mnt; -+ struct super_block *sb; -+ -+ if (!br || IS_ERR(br)) -+ goto out; -+ mnt = au_br_mnt(br); -+ if (!mnt || IS_ERR(mnt)) -+ goto out; -+ sb = mnt->mnt_sb; -+ if (!sb || IS_ERR(sb)) -+ goto out; -+ -+ dpri("s%d: {perm 0x%x, id %d, cnt %d, wbr %p}, " -+ "%s, dev 0x%02x%02x, flags 0x%lx, cnt %d, active %d, " -+ "xino %d\n", -+ bindex, br->br_perm, br->br_id, atomic_read(&br->br_count), -+ br->br_wbr, au_sbtype(sb), MAJOR(sb->s_dev), MINOR(sb->s_dev), -+ sb->s_flags, sb->s_count, -+ atomic_read(&sb->s_active), !!br->br_xino.xi_file); -+ return 0; -+ -+out: -+ dpri("s%d: err %ld\n", bindex, PTR_ERR(br)); -+ return -1; -+} -+ -+void au_dpri_sb(struct super_block *sb) -+{ -+ struct au_sbinfo *sbinfo; -+ aufs_bindex_t bindex; -+ int err; -+ /* to reuduce stack size */ -+ struct { -+ struct vfsmount mnt; -+ struct au_branch fake; -+ } *a; -+ -+ /* this function can be called from magic sysrq */ -+ a = kzalloc(sizeof(*a), GFP_ATOMIC); -+ if (unlikely(!a)) { -+ dpri("no memory\n"); -+ return; -+ } -+ -+ a->mnt.mnt_sb = sb; -+ a->fake.br_path.mnt = &a->mnt; -+ atomic_set(&a->fake.br_count, 0); -+ smp_mb(); /* atomic_set */ -+ err = do_pri_br(-1, &a->fake); -+ kfree(a); -+ dpri("dev 0x%x\n", sb->s_dev); -+ if (err || !au_test_aufs(sb)) -+ return; -+ -+ sbinfo = au_sbi(sb); -+ if (!sbinfo) -+ return; -+ dpri("nw %d, gen %u, kobj %d\n", -+ atomic_read(&sbinfo->si_nowait.nw_len), sbinfo->si_generation, -+ atomic_read(&sbinfo->si_kobj.kref.refcount)); -+ for (bindex = 0; bindex <= sbinfo->si_bend; bindex++) -+ do_pri_br(bindex, sbinfo->si_branch[0 + bindex]); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+void __au_dbg_verify_dinode(struct dentry *dentry, const char *func, int line) -+{ -+ struct inode *h_inode, *inode = dentry->d_inode; -+ struct dentry *h_dentry; -+ aufs_bindex_t bindex, bend, bi; -+ -+ if (!inode /* || au_di(dentry)->di_lsc == AuLsc_DI_TMP */) -+ return; -+ -+ bend = au_dbend(dentry); -+ bi = au_ibend(inode); -+ if (bi < bend) -+ bend = bi; -+ bindex = au_dbstart(dentry); -+ bi = au_ibstart(inode); -+ if (bi > bindex) -+ bindex = bi; -+ -+ for (; bindex <= bend; bindex++) { -+ h_dentry = au_h_dptr(dentry, bindex); -+ if (!h_dentry) -+ continue; -+ h_inode = au_h_iptr(inode, bindex); -+ if (unlikely(h_inode != h_dentry->d_inode)) { -+ au_debug_on(); -+ AuDbg("b%d, %s:%d\n", bindex, func, line); -+ AuDbgDentry(dentry); -+ AuDbgInode(inode); -+ au_debug_off(); -+ BUG(); -+ } -+ } -+} -+ -+void au_dbg_verify_gen(struct dentry *parent, unsigned int sigen) -+{ -+ int err, i, j; -+ struct au_dcsub_pages dpages; -+ struct au_dpage *dpage; -+ struct dentry **dentries; -+ -+ err = au_dpages_init(&dpages, GFP_NOFS); -+ AuDebugOn(err); -+ err = au_dcsub_pages_rev_aufs(&dpages, parent, /*do_include*/1); -+ AuDebugOn(err); -+ for (i = dpages.ndpage - 1; !err && i >= 0; i--) { -+ dpage = dpages.dpages + i; -+ dentries = dpage->dentries; -+ for (j = dpage->ndentry - 1; !err && j >= 0; j--) -+ AuDebugOn(au_digen_test(dentries[j], sigen)); -+ } -+ au_dpages_free(&dpages); -+} -+ -+void au_dbg_verify_kthread(void) -+{ -+ if (au_wkq_test()) { -+ au_dbg_blocked(); -+ /* -+ * It may be recursive, but udba=notify between two aufs mounts, -+ * where a single ro branch is shared, is not a problem. -+ */ -+ /* WARN_ON(1); */ -+ } -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+int __init au_debug_init(void) -+{ -+ aufs_bindex_t bindex; -+ struct au_vdir_destr destr; -+ -+ bindex = -1; -+ AuDebugOn(bindex >= 0); -+ -+ destr.len = -1; -+ AuDebugOn(destr.len < NAME_MAX); -+ -+#ifdef CONFIG_4KSTACKS -+ pr_warn("CONFIG_4KSTACKS is defined.\n"); -+#endif -+ -+ return 0; -+} -diff --git a/fs/aufs/debug.h b/fs/aufs/debug.h -new file mode 100644 -index 0000000..039e6f8 ---- /dev/null -+++ b/fs/aufs/debug.h -@@ -0,0 +1,228 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * debug print functions -+ */ -+ -+#ifndef __AUFS_DEBUG_H__ -+#define __AUFS_DEBUG_H__ -+ -+#ifdef __KERNEL__ -+ -+#include -+#include -+#include -+#include -+ -+#ifdef CONFIG_AUFS_DEBUG -+#define AuDebugOn(a) BUG_ON(a) -+ -+/* module parameter */ -+extern atomic_t aufs_debug; -+static inline void au_debug_on(void) -+{ -+ atomic_inc(&aufs_debug); -+} -+static inline void au_debug_off(void) -+{ -+ atomic_dec_if_positive(&aufs_debug); -+} -+ -+static inline int au_debug_test(void) -+{ -+ return atomic_read(&aufs_debug) > 0; -+} -+#else -+#define AuDebugOn(a) do {} while (0) -+AuStubVoid(au_debug_on, void) -+AuStubVoid(au_debug_off, void) -+AuStubInt0(au_debug_test, void) -+#endif /* CONFIG_AUFS_DEBUG */ -+ -+#define param_check_atomic_t(name, p) __param_check(name, p, atomic_t) -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* debug print */ -+ -+#define AuDbg(fmt, ...) do { \ -+ if (au_debug_test()) \ -+ pr_debug("DEBUG: " fmt, ##__VA_ARGS__); \ -+} while (0) -+#define AuLabel(l) AuDbg(#l "\n") -+#define AuIOErr(fmt, ...) pr_err("I/O Error, " fmt, ##__VA_ARGS__) -+#define AuWarn1(fmt, ...) do { \ -+ static unsigned char _c; \ -+ if (!_c++) \ -+ pr_warn(fmt, ##__VA_ARGS__); \ -+} while (0) -+ -+#define AuErr1(fmt, ...) do { \ -+ static unsigned char _c; \ -+ if (!_c++) \ -+ pr_err(fmt, ##__VA_ARGS__); \ -+} while (0) -+ -+#define AuIOErr1(fmt, ...) do { \ -+ static unsigned char _c; \ -+ if (!_c++) \ -+ AuIOErr(fmt, ##__VA_ARGS__); \ -+} while (0) -+ -+#define AuUnsupportMsg "This operation is not supported." \ -+ " Please report this application to aufs-users ML." -+#define AuUnsupport(fmt, ...) do { \ -+ pr_err(AuUnsupportMsg "\n" fmt, ##__VA_ARGS__); \ -+ dump_stack(); \ -+} while (0) -+ -+#define AuTraceErr(e) do { \ -+ if (unlikely((e) < 0)) \ -+ AuDbg("err %d\n", (int)(e)); \ -+} while (0) -+ -+#define AuTraceErrPtr(p) do { \ -+ if (IS_ERR(p)) \ -+ AuDbg("err %ld\n", PTR_ERR(p)); \ -+} while (0) -+ -+/* dirty macros for debug print, use with "%.*s" and caution */ -+#define AuLNPair(qstr) (qstr)->len, (qstr)->name -+ -+/* ---------------------------------------------------------------------- */ -+ -+struct dentry; -+#ifdef CONFIG_AUFS_DEBUG -+extern struct mutex au_dbg_mtx; -+extern char *au_plevel; -+struct au_nhash; -+void au_dpri_whlist(struct au_nhash *whlist); -+struct au_vdir; -+void au_dpri_vdir(struct au_vdir *vdir); -+struct inode; -+void au_dpri_inode(struct inode *inode); -+void au_dpri_dalias(struct inode *inode); -+void au_dpri_dentry(struct dentry *dentry); -+struct file; -+void au_dpri_file(struct file *filp); -+struct super_block; -+void au_dpri_sb(struct super_block *sb); -+ -+#define au_dbg_verify_dinode(d) __au_dbg_verify_dinode(d, __func__, __LINE__) -+void __au_dbg_verify_dinode(struct dentry *dentry, const char *func, int line); -+void au_dbg_verify_gen(struct dentry *parent, unsigned int sigen); -+void au_dbg_verify_kthread(void); -+ -+int __init au_debug_init(void); -+ -+#define AuDbgWhlist(w) do { \ -+ mutex_lock(&au_dbg_mtx); \ -+ AuDbg(#w "\n"); \ -+ au_dpri_whlist(w); \ -+ mutex_unlock(&au_dbg_mtx); \ -+} while (0) -+ -+#define AuDbgVdir(v) do { \ -+ mutex_lock(&au_dbg_mtx); \ -+ AuDbg(#v "\n"); \ -+ au_dpri_vdir(v); \ -+ mutex_unlock(&au_dbg_mtx); \ -+} while (0) -+ -+#define AuDbgInode(i) do { \ -+ mutex_lock(&au_dbg_mtx); \ -+ AuDbg(#i "\n"); \ -+ au_dpri_inode(i); \ -+ mutex_unlock(&au_dbg_mtx); \ -+} while (0) -+ -+#define AuDbgDAlias(i) do { \ -+ mutex_lock(&au_dbg_mtx); \ -+ AuDbg(#i "\n"); \ -+ au_dpri_dalias(i); \ -+ mutex_unlock(&au_dbg_mtx); \ -+} while (0) -+ -+#define AuDbgDentry(d) do { \ -+ mutex_lock(&au_dbg_mtx); \ -+ AuDbg(#d "\n"); \ -+ au_dpri_dentry(d); \ -+ mutex_unlock(&au_dbg_mtx); \ -+} while (0) -+ -+#define AuDbgFile(f) do { \ -+ mutex_lock(&au_dbg_mtx); \ -+ AuDbg(#f "\n"); \ -+ au_dpri_file(f); \ -+ mutex_unlock(&au_dbg_mtx); \ -+} while (0) -+ -+#define AuDbgSb(sb) do { \ -+ mutex_lock(&au_dbg_mtx); \ -+ AuDbg(#sb "\n"); \ -+ au_dpri_sb(sb); \ -+ mutex_unlock(&au_dbg_mtx); \ -+} while (0) -+ -+#define AuDbgSym(addr) do { \ -+ char sym[KSYM_SYMBOL_LEN]; \ -+ sprint_symbol(sym, (unsigned long)addr); \ -+ AuDbg("%s\n", sym); \ -+} while (0) -+#else -+AuStubVoid(au_dbg_verify_dinode, struct dentry *dentry) -+AuStubVoid(au_dbg_verify_dir_parent, struct dentry *dentry, unsigned int sigen) -+AuStubVoid(au_dbg_verify_nondir_parent, struct dentry *dentry, -+ unsigned int sigen) -+AuStubVoid(au_dbg_verify_gen, struct dentry *parent, unsigned int sigen) -+AuStubVoid(au_dbg_verify_kthread, void) -+AuStubInt0(__init au_debug_init, void) -+ -+#define AuDbgWhlist(w) do {} while (0) -+#define AuDbgVdir(v) do {} while (0) -+#define AuDbgInode(i) do {} while (0) -+#define AuDbgDAlias(i) do {} while (0) -+#define AuDbgDentry(d) do {} while (0) -+#define AuDbgFile(f) do {} while (0) -+#define AuDbgSb(sb) do {} while (0) -+#define AuDbgSym(addr) do {} while (0) -+#endif /* CONFIG_AUFS_DEBUG */ -+ -+/* ---------------------------------------------------------------------- */ -+ -+#ifdef CONFIG_AUFS_MAGIC_SYSRQ -+int __init au_sysrq_init(void); -+void au_sysrq_fin(void); -+ -+#ifdef CONFIG_HW_CONSOLE -+#define au_dbg_blocked() do { \ -+ WARN_ON(1); \ -+ handle_sysrq('w'); \ -+} while (0) -+#else -+AuStubVoid(au_dbg_blocked, void) -+#endif -+ -+#else -+AuStubInt0(__init au_sysrq_init, void) -+AuStubVoid(au_sysrq_fin, void) -+AuStubVoid(au_dbg_blocked, void) -+#endif /* CONFIG_AUFS_MAGIC_SYSRQ */ -+ -+#endif /* __KERNEL__ */ -+#endif /* __AUFS_DEBUG_H__ */ -diff --git a/fs/aufs/dentry.c b/fs/aufs/dentry.c -new file mode 100644 -index 0000000..ed56947 ---- /dev/null -+++ b/fs/aufs/dentry.c -@@ -0,0 +1,1129 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * lookup and dentry operations -+ */ -+ -+#include -+#include "aufs.h" -+ -+#define AuLkup_ALLOW_NEG 1 -+#define AuLkup_IGNORE_PERM (1 << 1) -+#define au_ftest_lkup(flags, name) ((flags) & AuLkup_##name) -+#define au_fset_lkup(flags, name) \ -+ do { (flags) |= AuLkup_##name; } while (0) -+#define au_fclr_lkup(flags, name) \ -+ do { (flags) &= ~AuLkup_##name; } while (0) -+ -+struct au_do_lookup_args { -+ unsigned int flags; -+ mode_t type; -+}; -+ -+/* -+ * returns positive/negative dentry, NULL or an error. -+ * NULL means whiteout-ed or not-found. -+ */ -+static struct dentry* -+au_do_lookup(struct dentry *h_parent, struct dentry *dentry, -+ aufs_bindex_t bindex, struct qstr *wh_name, -+ struct au_do_lookup_args *args) -+{ -+ struct dentry *h_dentry; -+ struct inode *h_inode; -+ struct au_branch *br; -+ int wh_found, opq; -+ unsigned char wh_able; -+ const unsigned char allow_neg = !!au_ftest_lkup(args->flags, ALLOW_NEG); -+ const unsigned char ignore_perm = !!au_ftest_lkup(args->flags, -+ IGNORE_PERM); -+ -+ wh_found = 0; -+ br = au_sbr(dentry->d_sb, bindex); -+ wh_able = !!au_br_whable(br->br_perm); -+ if (wh_able) -+ wh_found = au_wh_test(h_parent, wh_name, /*try_sio*/0); -+ h_dentry = ERR_PTR(wh_found); -+ if (!wh_found) -+ goto real_lookup; -+ if (unlikely(wh_found < 0)) -+ goto out; -+ -+ /* We found a whiteout */ -+ /* au_set_dbend(dentry, bindex); */ -+ au_set_dbwh(dentry, bindex); -+ if (!allow_neg) -+ return NULL; /* success */ -+ -+real_lookup: -+ if (!ignore_perm) -+ h_dentry = vfsub_lkup_one(&dentry->d_name, h_parent); -+ else -+ h_dentry = au_sio_lkup_one(&dentry->d_name, h_parent); -+ if (IS_ERR(h_dentry)) { -+ if (PTR_ERR(h_dentry) == -ENAMETOOLONG -+ && !allow_neg) -+ h_dentry = NULL; -+ goto out; -+ } -+ -+ h_inode = h_dentry->d_inode; -+ if (!h_inode) { -+ if (!allow_neg) -+ goto out_neg; -+ } else if (wh_found -+ || (args->type && args->type != (h_inode->i_mode & S_IFMT))) -+ goto out_neg; -+ -+ if (au_dbend(dentry) <= bindex) -+ au_set_dbend(dentry, bindex); -+ if (au_dbstart(dentry) < 0 || bindex < au_dbstart(dentry)) -+ au_set_dbstart(dentry, bindex); -+ au_set_h_dptr(dentry, bindex, h_dentry); -+ -+ if (!d_is_dir(h_dentry) -+ || !wh_able -+ || (d_is_positive(dentry) && !d_is_dir(dentry))) -+ goto out; /* success */ -+ -+ mutex_lock_nested(&h_inode->i_mutex, AuLsc_I_CHILD); -+ opq = au_diropq_test(h_dentry); -+ mutex_unlock(&h_inode->i_mutex); -+ if (opq > 0) -+ au_set_dbdiropq(dentry, bindex); -+ else if (unlikely(opq < 0)) { -+ au_set_h_dptr(dentry, bindex, NULL); -+ h_dentry = ERR_PTR(opq); -+ } -+ goto out; -+ -+out_neg: -+ dput(h_dentry); -+ h_dentry = NULL; -+out: -+ return h_dentry; -+} -+ -+static int au_test_shwh(struct super_block *sb, const struct qstr *name) -+{ -+ if (unlikely(!au_opt_test(au_mntflags(sb), SHWH) -+ && !strncmp(name->name, AUFS_WH_PFX, AUFS_WH_PFX_LEN))) -+ return -EPERM; -+ return 0; -+} -+ -+/* -+ * returns the number of lower positive dentries, -+ * otherwise an error. -+ * can be called at unlinking with @type is zero. -+ */ -+int au_lkup_dentry(struct dentry *dentry, aufs_bindex_t bstart, mode_t type) -+{ -+ int npositive, err; -+ aufs_bindex_t bindex, btail, bdiropq; -+ unsigned char isdir, dirperm1; -+ struct qstr whname; -+ struct au_do_lookup_args args = { -+ .flags = 0, -+ .type = type -+ }; -+ const struct qstr *name = &dentry->d_name; -+ struct dentry *parent; -+ struct inode *inode; -+ struct super_block *sb; -+ -+ sb = dentry->d_sb; -+ err = au_test_shwh(sb, name); -+ if (unlikely(err)) -+ goto out; -+ -+ err = au_wh_name_alloc(&whname, name); -+ if (unlikely(err)) -+ goto out; -+ -+ inode = dentry->d_inode; -+ isdir = !!d_is_dir(dentry); -+ if (!type) -+ au_fset_lkup(args.flags, ALLOW_NEG); -+ dirperm1 = !!au_opt_test(au_mntflags(sb), DIRPERM1); -+ -+ npositive = 0; -+ parent = dget_parent(dentry); -+ btail = au_dbtaildir(parent); -+ for (bindex = bstart; bindex <= btail; bindex++) { -+ struct dentry *h_parent, *h_dentry; -+ struct inode *h_inode, *h_dir; -+ -+ h_dentry = au_h_dptr(dentry, bindex); -+ if (h_dentry) { -+ if (h_dentry->d_inode) -+ npositive++; -+ if (type != S_IFDIR) -+ break; -+ continue; -+ } -+ h_parent = au_h_dptr(parent, bindex); -+ if (!h_parent || !d_is_dir(h_parent)) -+ continue; -+ -+ h_dir = h_parent->d_inode; -+ mutex_lock_nested(&h_dir->i_mutex, AuLsc_I_PARENT); -+ h_dentry = au_do_lookup(h_parent, dentry, bindex, &whname, -+ &args); -+ mutex_unlock(&h_dir->i_mutex); -+ err = PTR_ERR(h_dentry); -+ if (IS_ERR(h_dentry)) -+ goto out_parent; -+ if (h_dentry) -+ au_fclr_lkup(args.flags, ALLOW_NEG); -+ if (dirperm1) -+ au_fset_lkup(args.flags, IGNORE_PERM); -+ -+ if (au_dbwh(dentry) == bindex) -+ break; -+ if (!h_dentry) -+ continue; -+ h_inode = h_dentry->d_inode; -+ if (!h_inode) -+ continue; -+ npositive++; -+ if (!args.type) -+ args.type = h_inode->i_mode & S_IFMT; -+ if (args.type != S_IFDIR) -+ break; -+ else if (isdir) { -+ /* the type of lower may be different */ -+ bdiropq = au_dbdiropq(dentry); -+ if (bdiropq >= 0 && bdiropq <= bindex) -+ break; -+ } -+ } -+ -+ if (npositive) { -+ AuLabel(positive); -+ au_update_dbstart(dentry); -+ } -+ err = npositive; -+ if (unlikely(!au_opt_test(au_mntflags(sb), UDBA_NONE) -+ && au_dbstart(dentry) < 0)) { -+ err = -EIO; -+ AuIOErr("both of real entry and whiteout found, %pd, err %d\n", -+ dentry, err); -+ } -+ -+out_parent: -+ dput(parent); -+ kfree(whname.name); -+out: -+ return err; -+} -+ -+struct dentry *au_sio_lkup_one(struct qstr *name, struct dentry *parent) -+{ -+ struct dentry *dentry; -+ int wkq_err; -+ -+ if (!au_test_h_perm_sio(parent->d_inode, MAY_EXEC)) -+ dentry = vfsub_lkup_one(name, parent); -+ else { -+ struct vfsub_lkup_one_args args = { -+ .errp = &dentry, -+ .name = name, -+ .parent = parent -+ }; -+ -+ wkq_err = au_wkq_wait(vfsub_call_lkup_one, &args); -+ if (unlikely(wkq_err)) -+ dentry = ERR_PTR(wkq_err); -+ } -+ -+ return dentry; -+} -+ -+/* -+ * lookup @dentry on @bindex which should be negative. -+ */ -+int au_lkup_neg(struct dentry *dentry, aufs_bindex_t bindex, int wh) -+{ -+ int err; -+ struct dentry *parent, *h_parent, *h_dentry; -+ struct au_branch *br; -+ -+ parent = dget_parent(dentry); -+ h_parent = au_h_dptr(parent, bindex); -+ br = au_sbr(dentry->d_sb, bindex); -+ if (wh) -+ h_dentry = au_whtmp_lkup(h_parent, br, &dentry->d_name); -+ else -+ h_dentry = au_sio_lkup_one(&dentry->d_name, h_parent); -+ err = PTR_ERR(h_dentry); -+ if (IS_ERR(h_dentry)) -+ goto out; -+ if (unlikely(h_dentry->d_inode)) { -+ err = -EIO; -+ AuIOErr("%pd should be negative on b%d.\n", h_dentry, bindex); -+ dput(h_dentry); -+ goto out; -+ } -+ -+ err = 0; -+ if (bindex < au_dbstart(dentry)) -+ au_set_dbstart(dentry, bindex); -+ if (au_dbend(dentry) < bindex) -+ au_set_dbend(dentry, bindex); -+ au_set_h_dptr(dentry, bindex, h_dentry); -+ -+out: -+ dput(parent); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* subset of struct inode */ -+struct au_iattr { -+ unsigned long i_ino; -+ /* unsigned int i_nlink; */ -+ kuid_t i_uid; -+ kgid_t i_gid; -+ u64 i_version; -+/* -+ loff_t i_size; -+ blkcnt_t i_blocks; -+*/ -+ umode_t i_mode; -+}; -+ -+static void au_iattr_save(struct au_iattr *ia, struct inode *h_inode) -+{ -+ ia->i_ino = h_inode->i_ino; -+ /* ia->i_nlink = h_inode->i_nlink; */ -+ ia->i_uid = h_inode->i_uid; -+ ia->i_gid = h_inode->i_gid; -+ ia->i_version = h_inode->i_version; -+/* -+ ia->i_size = h_inode->i_size; -+ ia->i_blocks = h_inode->i_blocks; -+*/ -+ ia->i_mode = (h_inode->i_mode & S_IFMT); -+} -+ -+static int au_iattr_test(struct au_iattr *ia, struct inode *h_inode) -+{ -+ return ia->i_ino != h_inode->i_ino -+ /* || ia->i_nlink != h_inode->i_nlink */ -+ || !uid_eq(ia->i_uid, h_inode->i_uid) -+ || !gid_eq(ia->i_gid, h_inode->i_gid) -+ || ia->i_version != h_inode->i_version -+/* -+ || ia->i_size != h_inode->i_size -+ || ia->i_blocks != h_inode->i_blocks -+*/ -+ || ia->i_mode != (h_inode->i_mode & S_IFMT); -+} -+ -+static int au_h_verify_dentry(struct dentry *h_dentry, struct dentry *h_parent, -+ struct au_branch *br) -+{ -+ int err; -+ struct au_iattr ia; -+ struct inode *h_inode; -+ struct dentry *h_d; -+ struct super_block *h_sb; -+ -+ err = 0; -+ memset(&ia, -1, sizeof(ia)); -+ h_sb = h_dentry->d_sb; -+ h_inode = h_dentry->d_inode; -+ if (h_inode) -+ au_iattr_save(&ia, h_inode); -+ else if (au_test_nfs(h_sb) || au_test_fuse(h_sb)) -+ /* nfs d_revalidate may return 0 for negative dentry */ -+ /* fuse d_revalidate always return 0 for negative dentry */ -+ goto out; -+ -+ /* main purpose is namei.c:cached_lookup() and d_revalidate */ -+ h_d = vfsub_lkup_one(&h_dentry->d_name, h_parent); -+ err = PTR_ERR(h_d); -+ if (IS_ERR(h_d)) -+ goto out; -+ -+ err = 0; -+ if (unlikely(h_d != h_dentry -+ || h_d->d_inode != h_inode -+ || (h_inode && au_iattr_test(&ia, h_inode)))) -+ err = au_busy_or_stale(); -+ dput(h_d); -+ -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+int au_h_verify(struct dentry *h_dentry, unsigned int udba, struct inode *h_dir, -+ struct dentry *h_parent, struct au_branch *br) -+{ -+ int err; -+ -+ err = 0; -+ if (udba == AuOpt_UDBA_REVAL -+ && !au_test_fs_remote(h_dentry->d_sb)) { -+ IMustLock(h_dir); -+ err = (h_dentry->d_parent->d_inode != h_dir); -+ } else if (udba != AuOpt_UDBA_NONE) -+ err = au_h_verify_dentry(h_dentry, h_parent, br); -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int au_do_refresh_hdentry(struct dentry *dentry, struct dentry *parent) -+{ -+ int err; -+ aufs_bindex_t new_bindex, bindex, bend, bwh, bdiropq; -+ struct au_hdentry tmp, *p, *q; -+ struct au_dinfo *dinfo; -+ struct super_block *sb; -+ -+ DiMustWriteLock(dentry); -+ -+ sb = dentry->d_sb; -+ dinfo = au_di(dentry); -+ bend = dinfo->di_bend; -+ bwh = dinfo->di_bwh; -+ bdiropq = dinfo->di_bdiropq; -+ p = dinfo->di_hdentry + dinfo->di_bstart; -+ for (bindex = dinfo->di_bstart; bindex <= bend; bindex++, p++) { -+ if (!p->hd_dentry) -+ continue; -+ -+ new_bindex = au_br_index(sb, p->hd_id); -+ if (new_bindex == bindex) -+ continue; -+ -+ if (dinfo->di_bwh == bindex) -+ bwh = new_bindex; -+ if (dinfo->di_bdiropq == bindex) -+ bdiropq = new_bindex; -+ if (new_bindex < 0) { -+ au_hdput(p); -+ p->hd_dentry = NULL; -+ continue; -+ } -+ -+ /* swap two lower dentries, and loop again */ -+ q = dinfo->di_hdentry + new_bindex; -+ tmp = *q; -+ *q = *p; -+ *p = tmp; -+ if (tmp.hd_dentry) { -+ bindex--; -+ p--; -+ } -+ } -+ -+ dinfo->di_bwh = -1; -+ if (bwh >= 0 && bwh <= au_sbend(sb) && au_sbr_whable(sb, bwh)) -+ dinfo->di_bwh = bwh; -+ -+ dinfo->di_bdiropq = -1; -+ if (bdiropq >= 0 -+ && bdiropq <= au_sbend(sb) -+ && au_sbr_whable(sb, bdiropq)) -+ dinfo->di_bdiropq = bdiropq; -+ -+ err = -EIO; -+ dinfo->di_bstart = -1; -+ dinfo->di_bend = -1; -+ bend = au_dbend(parent); -+ p = dinfo->di_hdentry; -+ for (bindex = 0; bindex <= bend; bindex++, p++) -+ if (p->hd_dentry) { -+ dinfo->di_bstart = bindex; -+ break; -+ } -+ -+ if (dinfo->di_bstart >= 0) { -+ p = dinfo->di_hdentry + bend; -+ for (bindex = bend; bindex >= 0; bindex--, p--) -+ if (p->hd_dentry) { -+ dinfo->di_bend = bindex; -+ err = 0; -+ break; -+ } -+ } -+ -+ return err; -+} -+ -+static void au_do_hide(struct dentry *dentry) -+{ -+ struct inode *inode; -+ -+ inode = dentry->d_inode; -+ if (inode) { -+ if (!S_ISDIR(inode->i_mode)) { -+ if (inode->i_nlink && !d_unhashed(dentry)) -+ drop_nlink(inode); -+ } else { -+ clear_nlink(inode); -+ /* stop next lookup */ -+ inode->i_flags |= S_DEAD; -+ } -+ smp_mb(); /* necessary? */ -+ } -+ d_drop(dentry); -+} -+ -+static int au_hide_children(struct dentry *parent) -+{ -+ int err, i, j, ndentry; -+ struct au_dcsub_pages dpages; -+ struct au_dpage *dpage; -+ struct dentry *dentry; -+ -+ err = au_dpages_init(&dpages, GFP_NOFS); -+ if (unlikely(err)) -+ goto out; -+ err = au_dcsub_pages(&dpages, parent, NULL, NULL); -+ if (unlikely(err)) -+ goto out_dpages; -+ -+ /* in reverse order */ -+ for (i = dpages.ndpage - 1; i >= 0; i--) { -+ dpage = dpages.dpages + i; -+ ndentry = dpage->ndentry; -+ for (j = ndentry - 1; j >= 0; j--) { -+ dentry = dpage->dentries[j]; -+ if (dentry != parent) -+ au_do_hide(dentry); -+ } -+ } -+ -+out_dpages: -+ au_dpages_free(&dpages); -+out: -+ return err; -+} -+ -+static void au_hide(struct dentry *dentry) -+{ -+ int err; -+ -+ AuDbgDentry(dentry); -+ if (d_is_dir(dentry)) { -+ /* shrink_dcache_parent(dentry); */ -+ err = au_hide_children(dentry); -+ if (unlikely(err)) -+ AuIOErr("%pd, failed hiding children, ignored %d\n", -+ dentry, err); -+ } -+ au_do_hide(dentry); -+} -+ -+/* -+ * By adding a dirty branch, a cached dentry may be affected in various ways. -+ * -+ * a dirty branch is added -+ * - on the top of layers -+ * - in the middle of layers -+ * - to the bottom of layers -+ * -+ * on the added branch there exists -+ * - a whiteout -+ * - a diropq -+ * - a same named entry -+ * + exist -+ * * negative --> positive -+ * * positive --> positive -+ * - type is unchanged -+ * - type is changed -+ * + doesn't exist -+ * * negative --> negative -+ * * positive --> negative (rejected by au_br_del() for non-dir case) -+ * - none -+ */ -+static int au_refresh_by_dinfo(struct dentry *dentry, struct au_dinfo *dinfo, -+ struct au_dinfo *tmp) -+{ -+ int err; -+ aufs_bindex_t bindex, bend; -+ struct { -+ struct dentry *dentry; -+ struct inode *inode; -+ mode_t mode; -+ } orig_h, tmp_h = { -+ .dentry = NULL -+ }; -+ struct au_hdentry *hd; -+ struct inode *inode, *h_inode; -+ struct dentry *h_dentry; -+ -+ err = 0; -+ AuDebugOn(dinfo->di_bstart < 0); -+ orig_h.dentry = dinfo->di_hdentry[dinfo->di_bstart].hd_dentry; -+ orig_h.inode = orig_h.dentry->d_inode; -+ orig_h.mode = 0; -+ if (orig_h.inode) -+ orig_h.mode = orig_h.inode->i_mode & S_IFMT; -+ if (tmp->di_bstart >= 0) { -+ tmp_h.dentry = tmp->di_hdentry[tmp->di_bstart].hd_dentry; -+ tmp_h.inode = tmp_h.dentry->d_inode; -+ if (tmp_h.inode) -+ tmp_h.mode = tmp_h.inode->i_mode & S_IFMT; -+ } -+ -+ inode = dentry->d_inode; -+ if (!orig_h.inode) { -+ AuDbg("nagative originally\n"); -+ if (inode) { -+ au_hide(dentry); -+ goto out; -+ } -+ AuDebugOn(inode); -+ AuDebugOn(dinfo->di_bstart != dinfo->di_bend); -+ AuDebugOn(dinfo->di_bdiropq != -1); -+ -+ if (!tmp_h.inode) { -+ AuDbg("negative --> negative\n"); -+ /* should have only one negative lower */ -+ if (tmp->di_bstart >= 0 -+ && tmp->di_bstart < dinfo->di_bstart) { -+ AuDebugOn(tmp->di_bstart != tmp->di_bend); -+ AuDebugOn(dinfo->di_bstart != dinfo->di_bend); -+ au_set_h_dptr(dentry, dinfo->di_bstart, NULL); -+ au_di_cp(dinfo, tmp); -+ hd = tmp->di_hdentry + tmp->di_bstart; -+ au_set_h_dptr(dentry, tmp->di_bstart, -+ dget(hd->hd_dentry)); -+ } -+ au_dbg_verify_dinode(dentry); -+ } else { -+ AuDbg("negative --> positive\n"); -+ /* -+ * similar to the behaviour of creating with bypassing -+ * aufs. -+ * unhash it in order to force an error in the -+ * succeeding create operation. -+ * we should not set S_DEAD here. -+ */ -+ d_drop(dentry); -+ /* au_di_swap(tmp, dinfo); */ -+ au_dbg_verify_dinode(dentry); -+ } -+ } else { -+ AuDbg("positive originally\n"); -+ /* inode may be NULL */ -+ AuDebugOn(inode && (inode->i_mode & S_IFMT) != orig_h.mode); -+ if (!tmp_h.inode) { -+ AuDbg("positive --> negative\n"); -+ /* or bypassing aufs */ -+ au_hide(dentry); -+ if (tmp->di_bwh >= 0 && tmp->di_bwh <= dinfo->di_bstart) -+ dinfo->di_bwh = tmp->di_bwh; -+ if (inode) -+ err = au_refresh_hinode_self(inode); -+ au_dbg_verify_dinode(dentry); -+ } else if (orig_h.mode == tmp_h.mode) { -+ AuDbg("positive --> positive, same type\n"); -+ if (!S_ISDIR(orig_h.mode) -+ && dinfo->di_bstart > tmp->di_bstart) { -+ /* -+ * similar to the behaviour of removing and -+ * creating. -+ */ -+ au_hide(dentry); -+ if (inode) -+ err = au_refresh_hinode_self(inode); -+ au_dbg_verify_dinode(dentry); -+ } else { -+ /* fill empty slots */ -+ if (dinfo->di_bstart > tmp->di_bstart) -+ dinfo->di_bstart = tmp->di_bstart; -+ if (dinfo->di_bend < tmp->di_bend) -+ dinfo->di_bend = tmp->di_bend; -+ dinfo->di_bwh = tmp->di_bwh; -+ dinfo->di_bdiropq = tmp->di_bdiropq; -+ hd = tmp->di_hdentry; -+ bend = dinfo->di_bend; -+ for (bindex = tmp->di_bstart; bindex <= bend; -+ bindex++) { -+ if (au_h_dptr(dentry, bindex)) -+ continue; -+ h_dentry = hd[bindex].hd_dentry; -+ if (!h_dentry) -+ continue; -+ h_inode = h_dentry->d_inode; -+ AuDebugOn(!h_inode); -+ AuDebugOn(orig_h.mode -+ != (h_inode->i_mode -+ & S_IFMT)); -+ au_set_h_dptr(dentry, bindex, -+ dget(h_dentry)); -+ } -+ err = au_refresh_hinode(inode, dentry); -+ au_dbg_verify_dinode(dentry); -+ } -+ } else { -+ AuDbg("positive --> positive, different type\n"); -+ /* similar to the behaviour of removing and creating */ -+ au_hide(dentry); -+ if (inode) -+ err = au_refresh_hinode_self(inode); -+ au_dbg_verify_dinode(dentry); -+ } -+ } -+ -+out: -+ return err; -+} -+ -+void au_refresh_dop(struct dentry *dentry, int force_reval) -+{ -+ const struct dentry_operations *dop -+ = force_reval ? &aufs_dop : dentry->d_sb->s_d_op; -+ static const unsigned int mask -+ = DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE; -+ -+ BUILD_BUG_ON(sizeof(mask) != sizeof(dentry->d_flags)); -+ -+ if (dentry->d_op == dop) -+ return; -+ -+ AuDbg("%pd\n", dentry); -+ spin_lock(&dentry->d_lock); -+ if (dop == &aufs_dop) -+ dentry->d_flags |= mask; -+ else -+ dentry->d_flags &= ~mask; -+ dentry->d_op = dop; -+ spin_unlock(&dentry->d_lock); -+} -+ -+int au_refresh_dentry(struct dentry *dentry, struct dentry *parent) -+{ -+ int err, ebrange; -+ unsigned int sigen; -+ struct au_dinfo *dinfo, *tmp; -+ struct super_block *sb; -+ struct inode *inode; -+ -+ DiMustWriteLock(dentry); -+ AuDebugOn(IS_ROOT(dentry)); -+ AuDebugOn(!parent->d_inode); -+ -+ sb = dentry->d_sb; -+ inode = dentry->d_inode; -+ sigen = au_sigen(sb); -+ err = au_digen_test(parent, sigen); -+ if (unlikely(err)) -+ goto out; -+ -+ dinfo = au_di(dentry); -+ err = au_di_realloc(dinfo, au_sbend(sb) + 1); -+ if (unlikely(err)) -+ goto out; -+ ebrange = au_dbrange_test(dentry); -+ if (!ebrange) -+ ebrange = au_do_refresh_hdentry(dentry, parent); -+ -+ if (d_unhashed(dentry) || ebrange /* || dinfo->di_tmpfile */) { -+ AuDebugOn(au_dbstart(dentry) < 0 && au_dbend(dentry) >= 0); -+ if (inode) -+ err = au_refresh_hinode_self(inode); -+ au_dbg_verify_dinode(dentry); -+ if (!err) -+ goto out_dgen; /* success */ -+ goto out; -+ } -+ -+ /* temporary dinfo */ -+ AuDbgDentry(dentry); -+ err = -ENOMEM; -+ tmp = au_di_alloc(sb, AuLsc_DI_TMP); -+ if (unlikely(!tmp)) -+ goto out; -+ au_di_swap(tmp, dinfo); -+ /* returns the number of positive dentries */ -+ /* -+ * if current working dir is removed, it returns an error. -+ * but the dentry is legal. -+ */ -+ err = au_lkup_dentry(dentry, /*bstart*/0, /*type*/0); -+ AuDbgDentry(dentry); -+ au_di_swap(tmp, dinfo); -+ if (err == -ENOENT) -+ err = 0; -+ if (err >= 0) { -+ /* compare/refresh by dinfo */ -+ AuDbgDentry(dentry); -+ err = au_refresh_by_dinfo(dentry, dinfo, tmp); -+ au_dbg_verify_dinode(dentry); -+ AuTraceErr(err); -+ } -+ au_rw_write_unlock(&tmp->di_rwsem); -+ au_di_free(tmp); -+ if (unlikely(err)) -+ goto out; -+ -+out_dgen: -+ au_update_digen(dentry); -+out: -+ if (unlikely(err && !(dentry->d_flags & DCACHE_NFSFS_RENAMED))) { -+ AuIOErr("failed refreshing %pd, %d\n", dentry, err); -+ AuDbgDentry(dentry); -+ } -+ AuTraceErr(err); -+ return err; -+} -+ -+static int au_do_h_d_reval(struct dentry *h_dentry, unsigned int flags, -+ struct dentry *dentry, aufs_bindex_t bindex) -+{ -+ int err, valid; -+ -+ err = 0; -+ if (!(h_dentry->d_flags & DCACHE_OP_REVALIDATE)) -+ goto out; -+ -+ AuDbg("b%d\n", bindex); -+ /* -+ * gave up supporting LOOKUP_CREATE/OPEN for lower fs, -+ * due to whiteout and branch permission. -+ */ -+ flags &= ~(/*LOOKUP_PARENT |*/ LOOKUP_OPEN | LOOKUP_CREATE -+ | LOOKUP_FOLLOW | LOOKUP_EXCL); -+ /* it may return tri-state */ -+ valid = h_dentry->d_op->d_revalidate(h_dentry, flags); -+ -+ if (unlikely(valid < 0)) -+ err = valid; -+ else if (!valid) -+ err = -EINVAL; -+ -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+/* todo: remove this */ -+static int h_d_revalidate(struct dentry *dentry, struct inode *inode, -+ unsigned int flags, int do_udba) -+{ -+ int err; -+ umode_t mode, h_mode; -+ aufs_bindex_t bindex, btail, bstart, ibs, ibe; -+ unsigned char plus, unhashed, is_root, h_plus, h_nfs, tmpfile; -+ struct inode *h_inode, *h_cached_inode; -+ struct dentry *h_dentry; -+ struct qstr *name, *h_name; -+ -+ err = 0; -+ plus = 0; -+ mode = 0; -+ ibs = -1; -+ ibe = -1; -+ unhashed = !!d_unhashed(dentry); -+ is_root = !!IS_ROOT(dentry); -+ name = &dentry->d_name; -+ tmpfile = au_di(dentry)->di_tmpfile; -+ -+ /* -+ * Theoretically, REVAL test should be unnecessary in case of -+ * {FS,I}NOTIFY. -+ * But {fs,i}notify doesn't fire some necessary events, -+ * IN_ATTRIB for atime/nlink/pageio -+ * Let's do REVAL test too. -+ */ -+ if (do_udba && inode) { -+ mode = (inode->i_mode & S_IFMT); -+ plus = (inode->i_nlink > 0); -+ ibs = au_ibstart(inode); -+ ibe = au_ibend(inode); -+ } -+ -+ bstart = au_dbstart(dentry); -+ btail = bstart; -+ if (inode && S_ISDIR(inode->i_mode)) -+ btail = au_dbtaildir(dentry); -+ for (bindex = bstart; bindex <= btail; bindex++) { -+ h_dentry = au_h_dptr(dentry, bindex); -+ if (!h_dentry) -+ continue; -+ -+ AuDbg("b%d, %pd\n", bindex, h_dentry); -+ h_nfs = !!au_test_nfs(h_dentry->d_sb); -+ spin_lock(&h_dentry->d_lock); -+ h_name = &h_dentry->d_name; -+ if (unlikely(do_udba -+ && !is_root -+ && ((!h_nfs -+ && (unhashed != !!d_unhashed(h_dentry) -+ || (!tmpfile -+ && !au_qstreq(name, h_name)) -+ )) -+ || (h_nfs -+ && !(flags & LOOKUP_OPEN) -+ && (h_dentry->d_flags -+ & DCACHE_NFSFS_RENAMED))) -+ )) { -+ int h_unhashed; -+ -+ h_unhashed = d_unhashed(h_dentry); -+ spin_unlock(&h_dentry->d_lock); -+ AuDbg("unhash 0x%x 0x%x, %pd %pd\n", -+ unhashed, h_unhashed, dentry, h_dentry); -+ goto err; -+ } -+ spin_unlock(&h_dentry->d_lock); -+ -+ err = au_do_h_d_reval(h_dentry, flags, dentry, bindex); -+ if (unlikely(err)) -+ /* do not goto err, to keep the errno */ -+ break; -+ -+ /* todo: plink too? */ -+ if (!do_udba) -+ continue; -+ -+ /* UDBA tests */ -+ h_inode = h_dentry->d_inode; -+ if (unlikely(!!inode != !!h_inode)) -+ goto err; -+ -+ h_plus = plus; -+ h_mode = mode; -+ h_cached_inode = h_inode; -+ if (h_inode) { -+ h_mode = (h_inode->i_mode & S_IFMT); -+ h_plus = (h_inode->i_nlink > 0); -+ } -+ if (inode && ibs <= bindex && bindex <= ibe) -+ h_cached_inode = au_h_iptr(inode, bindex); -+ -+ if (!h_nfs) { -+ if (unlikely(plus != h_plus && !tmpfile)) -+ goto err; -+ } else { -+ if (unlikely(!(h_dentry->d_flags & DCACHE_NFSFS_RENAMED) -+ && !is_root -+ && !IS_ROOT(h_dentry) -+ && unhashed != d_unhashed(h_dentry))) -+ goto err; -+ } -+ if (unlikely(mode != h_mode -+ || h_cached_inode != h_inode)) -+ goto err; -+ continue; -+ -+err: -+ err = -EINVAL; -+ break; -+ } -+ -+ AuTraceErr(err); -+ return err; -+} -+ -+/* todo: consolidate with do_refresh() and au_reval_for_attr() */ -+static int simple_reval_dpath(struct dentry *dentry, unsigned int sigen) -+{ -+ int err; -+ struct dentry *parent; -+ -+ if (!au_digen_test(dentry, sigen)) -+ return 0; -+ -+ parent = dget_parent(dentry); -+ di_read_lock_parent(parent, AuLock_IR); -+ AuDebugOn(au_digen_test(parent, sigen)); -+ au_dbg_verify_gen(parent, sigen); -+ err = au_refresh_dentry(dentry, parent); -+ di_read_unlock(parent, AuLock_IR); -+ dput(parent); -+ AuTraceErr(err); -+ return err; -+} -+ -+int au_reval_dpath(struct dentry *dentry, unsigned int sigen) -+{ -+ int err; -+ struct dentry *d, *parent; -+ struct inode *inode; -+ -+ if (!au_ftest_si(au_sbi(dentry->d_sb), FAILED_REFRESH_DIR)) -+ return simple_reval_dpath(dentry, sigen); -+ -+ /* slow loop, keep it simple and stupid */ -+ /* cf: au_cpup_dirs() */ -+ err = 0; -+ parent = NULL; -+ while (au_digen_test(dentry, sigen)) { -+ d = dentry; -+ while (1) { -+ dput(parent); -+ parent = dget_parent(d); -+ if (!au_digen_test(parent, sigen)) -+ break; -+ d = parent; -+ } -+ -+ inode = d->d_inode; -+ if (d != dentry) -+ di_write_lock_child2(d); -+ -+ /* someone might update our dentry while we were sleeping */ -+ if (au_digen_test(d, sigen)) { -+ /* -+ * todo: consolidate with simple_reval_dpath(), -+ * do_refresh() and au_reval_for_attr(). -+ */ -+ di_read_lock_parent(parent, AuLock_IR); -+ err = au_refresh_dentry(d, parent); -+ di_read_unlock(parent, AuLock_IR); -+ } -+ -+ if (d != dentry) -+ di_write_unlock(d); -+ dput(parent); -+ if (unlikely(err)) -+ break; -+ } -+ -+ return err; -+} -+ -+/* -+ * if valid returns 1, otherwise 0. -+ */ -+static int aufs_d_revalidate(struct dentry *dentry, unsigned int flags) -+{ -+ int valid, err; -+ unsigned int sigen; -+ unsigned char do_udba; -+ struct super_block *sb; -+ struct inode *inode; -+ -+ /* todo: support rcu-walk? */ -+ if (flags & LOOKUP_RCU) -+ return -ECHILD; -+ -+ valid = 0; -+ if (unlikely(!au_di(dentry))) -+ goto out; -+ -+ valid = 1; -+ sb = dentry->d_sb; -+ /* -+ * todo: very ugly -+ * i_mutex of parent dir may be held, -+ * but we should not return 'invalid' due to busy. -+ */ -+ err = aufs_read_lock(dentry, AuLock_FLUSH | AuLock_DW | AuLock_NOPLM); -+ if (unlikely(err)) { -+ valid = err; -+ AuTraceErr(err); -+ goto out; -+ } -+ inode = dentry->d_inode; -+ if (unlikely(inode && is_bad_inode(inode))) { -+ err = -EINVAL; -+ AuTraceErr(err); -+ goto out_dgrade; -+ } -+ if (unlikely(au_dbrange_test(dentry))) { -+ err = -EINVAL; -+ AuTraceErr(err); -+ goto out_dgrade; -+ } -+ -+ sigen = au_sigen(sb); -+ if (au_digen_test(dentry, sigen)) { -+ AuDebugOn(IS_ROOT(dentry)); -+ err = au_reval_dpath(dentry, sigen); -+ if (unlikely(err)) { -+ AuTraceErr(err); -+ goto out_dgrade; -+ } -+ } -+ di_downgrade_lock(dentry, AuLock_IR); -+ -+ err = -EINVAL; -+ if (!(flags & (LOOKUP_OPEN | LOOKUP_EMPTY)) -+ && inode -+ && !(inode->i_state && I_LINKABLE) -+ && (IS_DEADDIR(inode) || !inode->i_nlink)) { -+ AuTraceErr(err); -+ goto out_inval; -+ } -+ -+ do_udba = !au_opt_test(au_mntflags(sb), UDBA_NONE); -+ if (do_udba && inode) { -+ aufs_bindex_t bstart = au_ibstart(inode); -+ struct inode *h_inode; -+ -+ if (bstart >= 0) { -+ h_inode = au_h_iptr(inode, bstart); -+ if (h_inode && au_test_higen(inode, h_inode)) { -+ AuTraceErr(err); -+ goto out_inval; -+ } -+ } -+ } -+ -+ err = h_d_revalidate(dentry, inode, flags, do_udba); -+ if (unlikely(!err && do_udba && au_dbstart(dentry) < 0)) { -+ err = -EIO; -+ AuDbg("both of real entry and whiteout found, %p, err %d\n", -+ dentry, err); -+ } -+ goto out_inval; -+ -+out_dgrade: -+ di_downgrade_lock(dentry, AuLock_IR); -+out_inval: -+ aufs_read_unlock(dentry, AuLock_IR); -+ AuTraceErr(err); -+ valid = !err; -+out: -+ if (!valid) { -+ AuDbg("%pd invalid, %d\n", dentry, valid); -+ d_drop(dentry); -+ } -+ return valid; -+} -+ -+static void aufs_d_release(struct dentry *dentry) -+{ -+ if (au_di(dentry)) { -+ au_di_fin(dentry); -+ au_hn_di_reinit(dentry); -+ } -+} -+ -+const struct dentry_operations aufs_dop = { -+ .d_revalidate = aufs_d_revalidate, -+ .d_weak_revalidate = aufs_d_revalidate, -+ .d_release = aufs_d_release -+}; -+ -+/* aufs_dop without d_revalidate */ -+const struct dentry_operations aufs_dop_noreval = { -+ .d_release = aufs_d_release -+}; -diff --git a/fs/aufs/dentry.h b/fs/aufs/dentry.h -new file mode 100644 -index 0000000..4006484 ---- /dev/null -+++ b/fs/aufs/dentry.h -@@ -0,0 +1,234 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * lookup and dentry operations -+ */ -+ -+#ifndef __AUFS_DENTRY_H__ -+#define __AUFS_DENTRY_H__ -+ -+#ifdef __KERNEL__ -+ -+#include -+#include "rwsem.h" -+ -+struct au_hdentry { -+ struct dentry *hd_dentry; -+ aufs_bindex_t hd_id; -+}; -+ -+struct au_dinfo { -+ atomic_t di_generation; -+ -+ struct au_rwsem di_rwsem; -+ aufs_bindex_t di_bstart, di_bend, di_bwh, di_bdiropq; -+ unsigned char di_tmpfile; /* to allow the different name */ -+ struct au_hdentry *di_hdentry; -+} ____cacheline_aligned_in_smp; -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* dentry.c */ -+extern const struct dentry_operations aufs_dop, aufs_dop_noreval; -+struct au_branch; -+struct dentry *au_sio_lkup_one(struct qstr *name, struct dentry *parent); -+int au_h_verify(struct dentry *h_dentry, unsigned int udba, struct inode *h_dir, -+ struct dentry *h_parent, struct au_branch *br); -+ -+int au_lkup_dentry(struct dentry *dentry, aufs_bindex_t bstart, mode_t type); -+int au_lkup_neg(struct dentry *dentry, aufs_bindex_t bindex, int wh); -+int au_refresh_dentry(struct dentry *dentry, struct dentry *parent); -+int au_reval_dpath(struct dentry *dentry, unsigned int sigen); -+void au_refresh_dop(struct dentry *dentry, int force_reval); -+ -+/* dinfo.c */ -+void au_di_init_once(void *_di); -+struct au_dinfo *au_di_alloc(struct super_block *sb, unsigned int lsc); -+void au_di_free(struct au_dinfo *dinfo); -+void au_di_swap(struct au_dinfo *a, struct au_dinfo *b); -+void au_di_cp(struct au_dinfo *dst, struct au_dinfo *src); -+int au_di_init(struct dentry *dentry); -+void au_di_fin(struct dentry *dentry); -+int au_di_realloc(struct au_dinfo *dinfo, int nbr); -+ -+void di_read_lock(struct dentry *d, int flags, unsigned int lsc); -+void di_read_unlock(struct dentry *d, int flags); -+void di_downgrade_lock(struct dentry *d, int flags); -+void di_write_lock(struct dentry *d, unsigned int lsc); -+void di_write_unlock(struct dentry *d); -+void di_write_lock2_child(struct dentry *d1, struct dentry *d2, int isdir); -+void di_write_lock2_parent(struct dentry *d1, struct dentry *d2, int isdir); -+void di_write_unlock2(struct dentry *d1, struct dentry *d2); -+ -+struct dentry *au_h_dptr(struct dentry *dentry, aufs_bindex_t bindex); -+struct dentry *au_h_d_alias(struct dentry *dentry, aufs_bindex_t bindex); -+aufs_bindex_t au_dbtail(struct dentry *dentry); -+aufs_bindex_t au_dbtaildir(struct dentry *dentry); -+ -+void au_set_h_dptr(struct dentry *dentry, aufs_bindex_t bindex, -+ struct dentry *h_dentry); -+int au_digen_test(struct dentry *dentry, unsigned int sigen); -+int au_dbrange_test(struct dentry *dentry); -+void au_update_digen(struct dentry *dentry); -+void au_update_dbrange(struct dentry *dentry, int do_put_zero); -+void au_update_dbstart(struct dentry *dentry); -+void au_update_dbend(struct dentry *dentry); -+int au_find_dbindex(struct dentry *dentry, struct dentry *h_dentry); -+ -+/* ---------------------------------------------------------------------- */ -+ -+static inline struct au_dinfo *au_di(struct dentry *dentry) -+{ -+ return dentry->d_fsdata; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* lock subclass for dinfo */ -+enum { -+ AuLsc_DI_CHILD, /* child first */ -+ AuLsc_DI_CHILD2, /* rename(2), link(2), and cpup at hnotify */ -+ AuLsc_DI_CHILD3, /* copyup dirs */ -+ AuLsc_DI_PARENT, -+ AuLsc_DI_PARENT2, -+ AuLsc_DI_PARENT3, -+ AuLsc_DI_TMP /* temp for replacing dinfo */ -+}; -+ -+/* -+ * di_read_lock_child, di_write_lock_child, -+ * di_read_lock_child2, di_write_lock_child2, -+ * di_read_lock_child3, di_write_lock_child3, -+ * di_read_lock_parent, di_write_lock_parent, -+ * di_read_lock_parent2, di_write_lock_parent2, -+ * di_read_lock_parent3, di_write_lock_parent3, -+ */ -+#define AuReadLockFunc(name, lsc) \ -+static inline void di_read_lock_##name(struct dentry *d, int flags) \ -+{ di_read_lock(d, flags, AuLsc_DI_##lsc); } -+ -+#define AuWriteLockFunc(name, lsc) \ -+static inline void di_write_lock_##name(struct dentry *d) \ -+{ di_write_lock(d, AuLsc_DI_##lsc); } -+ -+#define AuRWLockFuncs(name, lsc) \ -+ AuReadLockFunc(name, lsc) \ -+ AuWriteLockFunc(name, lsc) -+ -+AuRWLockFuncs(child, CHILD); -+AuRWLockFuncs(child2, CHILD2); -+AuRWLockFuncs(child3, CHILD3); -+AuRWLockFuncs(parent, PARENT); -+AuRWLockFuncs(parent2, PARENT2); -+AuRWLockFuncs(parent3, PARENT3); -+ -+#undef AuReadLockFunc -+#undef AuWriteLockFunc -+#undef AuRWLockFuncs -+ -+#define DiMustNoWaiters(d) AuRwMustNoWaiters(&au_di(d)->di_rwsem) -+#define DiMustAnyLock(d) AuRwMustAnyLock(&au_di(d)->di_rwsem) -+#define DiMustWriteLock(d) AuRwMustWriteLock(&au_di(d)->di_rwsem) -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* todo: memory barrier? */ -+static inline unsigned int au_digen(struct dentry *d) -+{ -+ return atomic_read(&au_di(d)->di_generation); -+} -+ -+static inline void au_h_dentry_init(struct au_hdentry *hdentry) -+{ -+ hdentry->hd_dentry = NULL; -+} -+ -+static inline void au_hdput(struct au_hdentry *hd) -+{ -+ if (hd) -+ dput(hd->hd_dentry); -+} -+ -+static inline aufs_bindex_t au_dbstart(struct dentry *dentry) -+{ -+ DiMustAnyLock(dentry); -+ return au_di(dentry)->di_bstart; -+} -+ -+static inline aufs_bindex_t au_dbend(struct dentry *dentry) -+{ -+ DiMustAnyLock(dentry); -+ return au_di(dentry)->di_bend; -+} -+ -+static inline aufs_bindex_t au_dbwh(struct dentry *dentry) -+{ -+ DiMustAnyLock(dentry); -+ return au_di(dentry)->di_bwh; -+} -+ -+static inline aufs_bindex_t au_dbdiropq(struct dentry *dentry) -+{ -+ DiMustAnyLock(dentry); -+ return au_di(dentry)->di_bdiropq; -+} -+ -+/* todo: hard/soft set? */ -+static inline void au_set_dbstart(struct dentry *dentry, aufs_bindex_t bindex) -+{ -+ DiMustWriteLock(dentry); -+ au_di(dentry)->di_bstart = bindex; -+} -+ -+static inline void au_set_dbend(struct dentry *dentry, aufs_bindex_t bindex) -+{ -+ DiMustWriteLock(dentry); -+ au_di(dentry)->di_bend = bindex; -+} -+ -+static inline void au_set_dbwh(struct dentry *dentry, aufs_bindex_t bindex) -+{ -+ DiMustWriteLock(dentry); -+ /* dbwh can be outside of bstart - bend range */ -+ au_di(dentry)->di_bwh = bindex; -+} -+ -+static inline void au_set_dbdiropq(struct dentry *dentry, aufs_bindex_t bindex) -+{ -+ DiMustWriteLock(dentry); -+ au_di(dentry)->di_bdiropq = bindex; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+#ifdef CONFIG_AUFS_HNOTIFY -+static inline void au_digen_dec(struct dentry *d) -+{ -+ atomic_dec(&au_di(d)->di_generation); -+} -+ -+static inline void au_hn_di_reinit(struct dentry *dentry) -+{ -+ dentry->d_fsdata = NULL; -+} -+#else -+AuStubVoid(au_hn_di_reinit, struct dentry *dentry __maybe_unused) -+#endif /* CONFIG_AUFS_HNOTIFY */ -+ -+#endif /* __KERNEL__ */ -+#endif /* __AUFS_DENTRY_H__ */ -diff --git a/fs/aufs/dinfo.c b/fs/aufs/dinfo.c -new file mode 100644 -index 0000000..28c02b3 ---- /dev/null -+++ b/fs/aufs/dinfo.c -@@ -0,0 +1,544 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * dentry private data -+ */ -+ -+#include "aufs.h" -+ -+void au_di_init_once(void *_dinfo) -+{ -+ struct au_dinfo *dinfo = _dinfo; -+ static struct lock_class_key aufs_di; -+ -+ au_rw_init(&dinfo->di_rwsem); -+ au_rw_class(&dinfo->di_rwsem, &aufs_di); -+} -+ -+struct au_dinfo *au_di_alloc(struct super_block *sb, unsigned int lsc) -+{ -+ struct au_dinfo *dinfo; -+ int nbr, i; -+ -+ dinfo = au_cache_alloc_dinfo(); -+ if (unlikely(!dinfo)) -+ goto out; -+ -+ nbr = au_sbend(sb) + 1; -+ if (nbr <= 0) -+ nbr = 1; -+ dinfo->di_hdentry = kcalloc(nbr, sizeof(*dinfo->di_hdentry), GFP_NOFS); -+ if (dinfo->di_hdentry) { -+ au_rw_write_lock_nested(&dinfo->di_rwsem, lsc); -+ dinfo->di_bstart = -1; -+ dinfo->di_bend = -1; -+ dinfo->di_bwh = -1; -+ dinfo->di_bdiropq = -1; -+ dinfo->di_tmpfile = 0; -+ for (i = 0; i < nbr; i++) -+ dinfo->di_hdentry[i].hd_id = -1; -+ goto out; -+ } -+ -+ au_cache_free_dinfo(dinfo); -+ dinfo = NULL; -+ -+out: -+ return dinfo; -+} -+ -+void au_di_free(struct au_dinfo *dinfo) -+{ -+ struct au_hdentry *p; -+ aufs_bindex_t bend, bindex; -+ -+ /* dentry may not be revalidated */ -+ bindex = dinfo->di_bstart; -+ if (bindex >= 0) { -+ bend = dinfo->di_bend; -+ p = dinfo->di_hdentry + bindex; -+ while (bindex++ <= bend) -+ au_hdput(p++); -+ } -+ kfree(dinfo->di_hdentry); -+ au_cache_free_dinfo(dinfo); -+} -+ -+void au_di_swap(struct au_dinfo *a, struct au_dinfo *b) -+{ -+ struct au_hdentry *p; -+ aufs_bindex_t bi; -+ -+ AuRwMustWriteLock(&a->di_rwsem); -+ AuRwMustWriteLock(&b->di_rwsem); -+ -+#define DiSwap(v, name) \ -+ do { \ -+ v = a->di_##name; \ -+ a->di_##name = b->di_##name; \ -+ b->di_##name = v; \ -+ } while (0) -+ -+ DiSwap(p, hdentry); -+ DiSwap(bi, bstart); -+ DiSwap(bi, bend); -+ DiSwap(bi, bwh); -+ DiSwap(bi, bdiropq); -+ /* smp_mb(); */ -+ -+#undef DiSwap -+} -+ -+void au_di_cp(struct au_dinfo *dst, struct au_dinfo *src) -+{ -+ AuRwMustWriteLock(&dst->di_rwsem); -+ AuRwMustWriteLock(&src->di_rwsem); -+ -+ dst->di_bstart = src->di_bstart; -+ dst->di_bend = src->di_bend; -+ dst->di_bwh = src->di_bwh; -+ dst->di_bdiropq = src->di_bdiropq; -+ /* smp_mb(); */ -+} -+ -+int au_di_init(struct dentry *dentry) -+{ -+ int err; -+ struct super_block *sb; -+ struct au_dinfo *dinfo; -+ -+ err = 0; -+ sb = dentry->d_sb; -+ dinfo = au_di_alloc(sb, AuLsc_DI_CHILD); -+ if (dinfo) { -+ atomic_set(&dinfo->di_generation, au_sigen(sb)); -+ /* smp_mb(); */ /* atomic_set */ -+ dentry->d_fsdata = dinfo; -+ } else -+ err = -ENOMEM; -+ -+ return err; -+} -+ -+void au_di_fin(struct dentry *dentry) -+{ -+ struct au_dinfo *dinfo; -+ -+ dinfo = au_di(dentry); -+ AuRwDestroy(&dinfo->di_rwsem); -+ au_di_free(dinfo); -+} -+ -+int au_di_realloc(struct au_dinfo *dinfo, int nbr) -+{ -+ int err, sz; -+ struct au_hdentry *hdp; -+ -+ AuRwMustWriteLock(&dinfo->di_rwsem); -+ -+ err = -ENOMEM; -+ sz = sizeof(*hdp) * (dinfo->di_bend + 1); -+ if (!sz) -+ sz = sizeof(*hdp); -+ hdp = au_kzrealloc(dinfo->di_hdentry, sz, sizeof(*hdp) * nbr, GFP_NOFS); -+ if (hdp) { -+ dinfo->di_hdentry = hdp; -+ err = 0; -+ } -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static void do_ii_write_lock(struct inode *inode, unsigned int lsc) -+{ -+ switch (lsc) { -+ case AuLsc_DI_CHILD: -+ ii_write_lock_child(inode); -+ break; -+ case AuLsc_DI_CHILD2: -+ ii_write_lock_child2(inode); -+ break; -+ case AuLsc_DI_CHILD3: -+ ii_write_lock_child3(inode); -+ break; -+ case AuLsc_DI_PARENT: -+ ii_write_lock_parent(inode); -+ break; -+ case AuLsc_DI_PARENT2: -+ ii_write_lock_parent2(inode); -+ break; -+ case AuLsc_DI_PARENT3: -+ ii_write_lock_parent3(inode); -+ break; -+ default: -+ BUG(); -+ } -+} -+ -+static void do_ii_read_lock(struct inode *inode, unsigned int lsc) -+{ -+ switch (lsc) { -+ case AuLsc_DI_CHILD: -+ ii_read_lock_child(inode); -+ break; -+ case AuLsc_DI_CHILD2: -+ ii_read_lock_child2(inode); -+ break; -+ case AuLsc_DI_CHILD3: -+ ii_read_lock_child3(inode); -+ break; -+ case AuLsc_DI_PARENT: -+ ii_read_lock_parent(inode); -+ break; -+ case AuLsc_DI_PARENT2: -+ ii_read_lock_parent2(inode); -+ break; -+ case AuLsc_DI_PARENT3: -+ ii_read_lock_parent3(inode); -+ break; -+ default: -+ BUG(); -+ } -+} -+ -+void di_read_lock(struct dentry *d, int flags, unsigned int lsc) -+{ -+ au_rw_read_lock_nested(&au_di(d)->di_rwsem, lsc); -+ if (d->d_inode) { -+ if (au_ftest_lock(flags, IW)) -+ do_ii_write_lock(d->d_inode, lsc); -+ else if (au_ftest_lock(flags, IR)) -+ do_ii_read_lock(d->d_inode, lsc); -+ } -+} -+ -+void di_read_unlock(struct dentry *d, int flags) -+{ -+ if (d->d_inode) { -+ if (au_ftest_lock(flags, IW)) { -+ au_dbg_verify_dinode(d); -+ ii_write_unlock(d->d_inode); -+ } else if (au_ftest_lock(flags, IR)) { -+ au_dbg_verify_dinode(d); -+ ii_read_unlock(d->d_inode); -+ } -+ } -+ au_rw_read_unlock(&au_di(d)->di_rwsem); -+} -+ -+void di_downgrade_lock(struct dentry *d, int flags) -+{ -+ if (d->d_inode && au_ftest_lock(flags, IR)) -+ ii_downgrade_lock(d->d_inode); -+ au_rw_dgrade_lock(&au_di(d)->di_rwsem); -+} -+ -+void di_write_lock(struct dentry *d, unsigned int lsc) -+{ -+ au_rw_write_lock_nested(&au_di(d)->di_rwsem, lsc); -+ if (d->d_inode) -+ do_ii_write_lock(d->d_inode, lsc); -+} -+ -+void di_write_unlock(struct dentry *d) -+{ -+ au_dbg_verify_dinode(d); -+ if (d->d_inode) -+ ii_write_unlock(d->d_inode); -+ au_rw_write_unlock(&au_di(d)->di_rwsem); -+} -+ -+void di_write_lock2_child(struct dentry *d1, struct dentry *d2, int isdir) -+{ -+ AuDebugOn(d1 == d2 -+ || d1->d_inode == d2->d_inode -+ || d1->d_sb != d2->d_sb); -+ -+ if (isdir && au_test_subdir(d1, d2)) { -+ di_write_lock_child(d1); -+ di_write_lock_child2(d2); -+ } else { -+ /* there should be no races */ -+ di_write_lock_child(d2); -+ di_write_lock_child2(d1); -+ } -+} -+ -+void di_write_lock2_parent(struct dentry *d1, struct dentry *d2, int isdir) -+{ -+ AuDebugOn(d1 == d2 -+ || d1->d_inode == d2->d_inode -+ || d1->d_sb != d2->d_sb); -+ -+ if (isdir && au_test_subdir(d1, d2)) { -+ di_write_lock_parent(d1); -+ di_write_lock_parent2(d2); -+ } else { -+ /* there should be no races */ -+ di_write_lock_parent(d2); -+ di_write_lock_parent2(d1); -+ } -+} -+ -+void di_write_unlock2(struct dentry *d1, struct dentry *d2) -+{ -+ di_write_unlock(d1); -+ if (d1->d_inode == d2->d_inode) -+ au_rw_write_unlock(&au_di(d2)->di_rwsem); -+ else -+ di_write_unlock(d2); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+struct dentry *au_h_dptr(struct dentry *dentry, aufs_bindex_t bindex) -+{ -+ struct dentry *d; -+ -+ DiMustAnyLock(dentry); -+ -+ if (au_dbstart(dentry) < 0 || bindex < au_dbstart(dentry)) -+ return NULL; -+ AuDebugOn(bindex < 0); -+ d = au_di(dentry)->di_hdentry[0 + bindex].hd_dentry; -+ AuDebugOn(d && au_dcount(d) <= 0); -+ return d; -+} -+ -+/* -+ * extended version of au_h_dptr(). -+ * returns a hashed and positive (or linkable) h_dentry in bindex, NULL, or -+ * error. -+ */ -+struct dentry *au_h_d_alias(struct dentry *dentry, aufs_bindex_t bindex) -+{ -+ struct dentry *h_dentry; -+ struct inode *inode, *h_inode; -+ -+ inode = dentry->d_inode; -+ AuDebugOn(!inode); -+ -+ h_dentry = NULL; -+ if (au_dbstart(dentry) <= bindex -+ && bindex <= au_dbend(dentry)) -+ h_dentry = au_h_dptr(dentry, bindex); -+ if (h_dentry && !au_d_linkable(h_dentry)) { -+ dget(h_dentry); -+ goto out; /* success */ -+ } -+ -+ AuDebugOn(bindex < au_ibstart(inode)); -+ AuDebugOn(au_ibend(inode) < bindex); -+ h_inode = au_h_iptr(inode, bindex); -+ h_dentry = d_find_alias(h_inode); -+ if (h_dentry) { -+ if (!IS_ERR(h_dentry)) { -+ if (!au_d_linkable(h_dentry)) -+ goto out; /* success */ -+ dput(h_dentry); -+ } else -+ goto out; -+ } -+ -+ if (au_opt_test(au_mntflags(dentry->d_sb), PLINK)) { -+ h_dentry = au_plink_lkup(inode, bindex); -+ AuDebugOn(!h_dentry); -+ if (!IS_ERR(h_dentry)) { -+ if (!au_d_hashed_positive(h_dentry)) -+ goto out; /* success */ -+ dput(h_dentry); -+ h_dentry = NULL; -+ } -+ } -+ -+out: -+ AuDbgDentry(h_dentry); -+ return h_dentry; -+} -+ -+aufs_bindex_t au_dbtail(struct dentry *dentry) -+{ -+ aufs_bindex_t bend, bwh; -+ -+ bend = au_dbend(dentry); -+ if (0 <= bend) { -+ bwh = au_dbwh(dentry); -+ if (!bwh) -+ return bwh; -+ if (0 < bwh && bwh < bend) -+ return bwh - 1; -+ } -+ return bend; -+} -+ -+aufs_bindex_t au_dbtaildir(struct dentry *dentry) -+{ -+ aufs_bindex_t bend, bopq; -+ -+ bend = au_dbtail(dentry); -+ if (0 <= bend) { -+ bopq = au_dbdiropq(dentry); -+ if (0 <= bopq && bopq < bend) -+ bend = bopq; -+ } -+ return bend; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+void au_set_h_dptr(struct dentry *dentry, aufs_bindex_t bindex, -+ struct dentry *h_dentry) -+{ -+ struct au_hdentry *hd = au_di(dentry)->di_hdentry + bindex; -+ struct au_branch *br; -+ -+ DiMustWriteLock(dentry); -+ -+ au_hdput(hd); -+ hd->hd_dentry = h_dentry; -+ if (h_dentry) { -+ br = au_sbr(dentry->d_sb, bindex); -+ hd->hd_id = br->br_id; -+ } -+} -+ -+int au_dbrange_test(struct dentry *dentry) -+{ -+ int err; -+ aufs_bindex_t bstart, bend; -+ -+ err = 0; -+ bstart = au_dbstart(dentry); -+ bend = au_dbend(dentry); -+ if (bstart >= 0) -+ AuDebugOn(bend < 0 && bstart > bend); -+ else { -+ err = -EIO; -+ AuDebugOn(bend >= 0); -+ } -+ -+ return err; -+} -+ -+int au_digen_test(struct dentry *dentry, unsigned int sigen) -+{ -+ int err; -+ -+ err = 0; -+ if (unlikely(au_digen(dentry) != sigen -+ || au_iigen_test(dentry->d_inode, sigen))) -+ err = -EIO; -+ -+ return err; -+} -+ -+void au_update_digen(struct dentry *dentry) -+{ -+ atomic_set(&au_di(dentry)->di_generation, au_sigen(dentry->d_sb)); -+ /* smp_mb(); */ /* atomic_set */ -+} -+ -+void au_update_dbrange(struct dentry *dentry, int do_put_zero) -+{ -+ struct au_dinfo *dinfo; -+ struct dentry *h_d; -+ struct au_hdentry *hdp; -+ -+ DiMustWriteLock(dentry); -+ -+ dinfo = au_di(dentry); -+ if (!dinfo || dinfo->di_bstart < 0) -+ return; -+ -+ hdp = dinfo->di_hdentry; -+ if (do_put_zero) { -+ aufs_bindex_t bindex, bend; -+ -+ bend = dinfo->di_bend; -+ for (bindex = dinfo->di_bstart; bindex <= bend; bindex++) { -+ h_d = hdp[0 + bindex].hd_dentry; -+ if (h_d && !h_d->d_inode) -+ au_set_h_dptr(dentry, bindex, NULL); -+ } -+ } -+ -+ dinfo->di_bstart = -1; -+ while (++dinfo->di_bstart <= dinfo->di_bend) -+ if (hdp[0 + dinfo->di_bstart].hd_dentry) -+ break; -+ if (dinfo->di_bstart > dinfo->di_bend) { -+ dinfo->di_bstart = -1; -+ dinfo->di_bend = -1; -+ return; -+ } -+ -+ dinfo->di_bend++; -+ while (0 <= --dinfo->di_bend) -+ if (hdp[0 + dinfo->di_bend].hd_dentry) -+ break; -+ AuDebugOn(dinfo->di_bstart > dinfo->di_bend || dinfo->di_bend < 0); -+} -+ -+void au_update_dbstart(struct dentry *dentry) -+{ -+ aufs_bindex_t bindex, bend; -+ struct dentry *h_dentry; -+ -+ bend = au_dbend(dentry); -+ for (bindex = au_dbstart(dentry); bindex <= bend; bindex++) { -+ h_dentry = au_h_dptr(dentry, bindex); -+ if (!h_dentry) -+ continue; -+ if (h_dentry->d_inode) { -+ au_set_dbstart(dentry, bindex); -+ return; -+ } -+ au_set_h_dptr(dentry, bindex, NULL); -+ } -+} -+ -+void au_update_dbend(struct dentry *dentry) -+{ -+ aufs_bindex_t bindex, bstart; -+ struct dentry *h_dentry; -+ -+ bstart = au_dbstart(dentry); -+ for (bindex = au_dbend(dentry); bindex >= bstart; bindex--) { -+ h_dentry = au_h_dptr(dentry, bindex); -+ if (!h_dentry) -+ continue; -+ if (h_dentry->d_inode) { -+ au_set_dbend(dentry, bindex); -+ return; -+ } -+ au_set_h_dptr(dentry, bindex, NULL); -+ } -+} -+ -+int au_find_dbindex(struct dentry *dentry, struct dentry *h_dentry) -+{ -+ aufs_bindex_t bindex, bend; -+ -+ bend = au_dbend(dentry); -+ for (bindex = au_dbstart(dentry); bindex <= bend; bindex++) -+ if (au_h_dptr(dentry, bindex) == h_dentry) -+ return bindex; -+ return -1; -+} -diff --git a/fs/aufs/dir.c b/fs/aufs/dir.c -new file mode 100644 -index 0000000..3d61b05 ---- /dev/null -+++ b/fs/aufs/dir.c -@@ -0,0 +1,756 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * directory operations -+ */ -+ -+#include -+#include "aufs.h" -+ -+void au_add_nlink(struct inode *dir, struct inode *h_dir) -+{ -+ unsigned int nlink; -+ -+ AuDebugOn(!S_ISDIR(dir->i_mode) || !S_ISDIR(h_dir->i_mode)); -+ -+ nlink = dir->i_nlink; -+ nlink += h_dir->i_nlink - 2; -+ if (h_dir->i_nlink < 2) -+ nlink += 2; -+ smp_mb(); /* for i_nlink */ -+ /* 0 can happen in revaliding */ -+ set_nlink(dir, nlink); -+} -+ -+void au_sub_nlink(struct inode *dir, struct inode *h_dir) -+{ -+ unsigned int nlink; -+ -+ AuDebugOn(!S_ISDIR(dir->i_mode) || !S_ISDIR(h_dir->i_mode)); -+ -+ nlink = dir->i_nlink; -+ nlink -= h_dir->i_nlink - 2; -+ if (h_dir->i_nlink < 2) -+ nlink -= 2; -+ smp_mb(); /* for i_nlink */ -+ /* nlink == 0 means the branch-fs is broken */ -+ set_nlink(dir, nlink); -+} -+ -+loff_t au_dir_size(struct file *file, struct dentry *dentry) -+{ -+ loff_t sz; -+ aufs_bindex_t bindex, bend; -+ struct file *h_file; -+ struct dentry *h_dentry; -+ -+ sz = 0; -+ if (file) { -+ AuDebugOn(!d_is_dir(file->f_path.dentry)); -+ -+ bend = au_fbend_dir(file); -+ for (bindex = au_fbstart(file); -+ bindex <= bend && sz < KMALLOC_MAX_SIZE; -+ bindex++) { -+ h_file = au_hf_dir(file, bindex); -+ if (h_file && file_inode(h_file)) -+ sz += vfsub_f_size_read(h_file); -+ } -+ } else { -+ AuDebugOn(!dentry); -+ AuDebugOn(!d_is_dir(dentry)); -+ -+ bend = au_dbtaildir(dentry); -+ for (bindex = au_dbstart(dentry); -+ bindex <= bend && sz < KMALLOC_MAX_SIZE; -+ bindex++) { -+ h_dentry = au_h_dptr(dentry, bindex); -+ if (h_dentry && h_dentry->d_inode) -+ sz += i_size_read(h_dentry->d_inode); -+ } -+ } -+ if (sz < KMALLOC_MAX_SIZE) -+ sz = roundup_pow_of_two(sz); -+ if (sz > KMALLOC_MAX_SIZE) -+ sz = KMALLOC_MAX_SIZE; -+ else if (sz < NAME_MAX) { -+ BUILD_BUG_ON(AUFS_RDBLK_DEF < NAME_MAX); -+ sz = AUFS_RDBLK_DEF; -+ } -+ return sz; -+} -+ -+struct au_dir_ts_arg { -+ struct dentry *dentry; -+ aufs_bindex_t brid; -+}; -+ -+static void au_do_dir_ts(void *arg) -+{ -+ struct au_dir_ts_arg *a = arg; -+ struct au_dtime dt; -+ struct path h_path; -+ struct inode *dir, *h_dir; -+ struct super_block *sb; -+ struct au_branch *br; -+ struct au_hinode *hdir; -+ int err; -+ aufs_bindex_t bstart, bindex; -+ -+ sb = a->dentry->d_sb; -+ dir = a->dentry->d_inode; -+ if (!dir) -+ goto out; -+ /* no dir->i_mutex lock */ -+ aufs_read_lock(a->dentry, AuLock_DW); /* noflush */ -+ -+ bstart = au_ibstart(dir); -+ bindex = au_br_index(sb, a->brid); -+ if (bindex < bstart) -+ goto out_unlock; -+ -+ br = au_sbr(sb, bindex); -+ h_path.dentry = au_h_dptr(a->dentry, bindex); -+ if (!h_path.dentry) -+ goto out_unlock; -+ h_path.mnt = au_br_mnt(br); -+ au_dtime_store(&dt, a->dentry, &h_path); -+ -+ br = au_sbr(sb, bstart); -+ if (!au_br_writable(br->br_perm)) -+ goto out_unlock; -+ h_path.dentry = au_h_dptr(a->dentry, bstart); -+ h_path.mnt = au_br_mnt(br); -+ err = vfsub_mnt_want_write(h_path.mnt); -+ if (err) -+ goto out_unlock; -+ hdir = au_hi(dir, bstart); -+ au_hn_imtx_lock_nested(hdir, AuLsc_I_PARENT); -+ h_dir = au_h_iptr(dir, bstart); -+ if (h_dir->i_nlink -+ && timespec_compare(&h_dir->i_mtime, &dt.dt_mtime) < 0) { -+ dt.dt_h_path = h_path; -+ au_dtime_revert(&dt); -+ } -+ au_hn_imtx_unlock(hdir); -+ vfsub_mnt_drop_write(h_path.mnt); -+ au_cpup_attr_timesizes(dir); -+ -+out_unlock: -+ aufs_read_unlock(a->dentry, AuLock_DW); -+out: -+ dput(a->dentry); -+ au_nwt_done(&au_sbi(sb)->si_nowait); -+ kfree(arg); -+} -+ -+void au_dir_ts(struct inode *dir, aufs_bindex_t bindex) -+{ -+ int perm, wkq_err; -+ aufs_bindex_t bstart; -+ struct au_dir_ts_arg *arg; -+ struct dentry *dentry; -+ struct super_block *sb; -+ -+ IMustLock(dir); -+ -+ dentry = d_find_any_alias(dir); -+ AuDebugOn(!dentry); -+ sb = dentry->d_sb; -+ bstart = au_ibstart(dir); -+ if (bstart == bindex) { -+ au_cpup_attr_timesizes(dir); -+ goto out; -+ } -+ -+ perm = au_sbr_perm(sb, bstart); -+ if (!au_br_writable(perm)) -+ goto out; -+ -+ arg = kmalloc(sizeof(*arg), GFP_NOFS); -+ if (!arg) -+ goto out; -+ -+ arg->dentry = dget(dentry); /* will be dput-ted by au_do_dir_ts() */ -+ arg->brid = au_sbr_id(sb, bindex); -+ wkq_err = au_wkq_nowait(au_do_dir_ts, arg, sb, /*flags*/0); -+ if (unlikely(wkq_err)) { -+ pr_err("wkq %d\n", wkq_err); -+ dput(dentry); -+ kfree(arg); -+ } -+ -+out: -+ dput(dentry); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int reopen_dir(struct file *file) -+{ -+ int err; -+ unsigned int flags; -+ aufs_bindex_t bindex, btail, bstart; -+ struct dentry *dentry, *h_dentry; -+ struct file *h_file; -+ -+ /* open all lower dirs */ -+ dentry = file->f_dentry; -+ bstart = au_dbstart(dentry); -+ for (bindex = au_fbstart(file); bindex < bstart; bindex++) -+ au_set_h_fptr(file, bindex, NULL); -+ au_set_fbstart(file, bstart); -+ -+ btail = au_dbtaildir(dentry); -+ for (bindex = au_fbend_dir(file); btail < bindex; bindex--) -+ au_set_h_fptr(file, bindex, NULL); -+ au_set_fbend_dir(file, btail); -+ -+ flags = vfsub_file_flags(file); -+ for (bindex = bstart; bindex <= btail; bindex++) { -+ h_dentry = au_h_dptr(dentry, bindex); -+ if (!h_dentry) -+ continue; -+ h_file = au_hf_dir(file, bindex); -+ if (h_file) -+ continue; -+ -+ h_file = au_h_open(dentry, bindex, flags, file, /*force_wr*/0); -+ err = PTR_ERR(h_file); -+ if (IS_ERR(h_file)) -+ goto out; /* close all? */ -+ au_set_h_fptr(file, bindex, h_file); -+ } -+ au_update_figen(file); -+ /* todo: necessary? */ -+ /* file->f_ra = h_file->f_ra; */ -+ err = 0; -+ -+out: -+ return err; -+} -+ -+static int do_open_dir(struct file *file, int flags, struct file *h_file) -+{ -+ int err; -+ aufs_bindex_t bindex, btail; -+ struct dentry *dentry, *h_dentry; -+ struct vfsmount *mnt; -+ -+ FiMustWriteLock(file); -+ AuDebugOn(h_file); -+ -+ err = 0; -+ mnt = file->f_path.mnt; -+ dentry = file->f_dentry; -+ file->f_version = dentry->d_inode->i_version; -+ bindex = au_dbstart(dentry); -+ au_set_fbstart(file, bindex); -+ btail = au_dbtaildir(dentry); -+ au_set_fbend_dir(file, btail); -+ for (; !err && bindex <= btail; bindex++) { -+ h_dentry = au_h_dptr(dentry, bindex); -+ if (!h_dentry) -+ continue; -+ -+ err = vfsub_test_mntns(mnt, h_dentry->d_sb); -+ if (unlikely(err)) -+ break; -+ h_file = au_h_open(dentry, bindex, flags, file, /*force_wr*/0); -+ if (IS_ERR(h_file)) { -+ err = PTR_ERR(h_file); -+ break; -+ } -+ au_set_h_fptr(file, bindex, h_file); -+ } -+ au_update_figen(file); -+ /* todo: necessary? */ -+ /* file->f_ra = h_file->f_ra; */ -+ if (!err) -+ return 0; /* success */ -+ -+ /* close all */ -+ for (bindex = au_fbstart(file); bindex <= btail; bindex++) -+ au_set_h_fptr(file, bindex, NULL); -+ au_set_fbstart(file, -1); -+ au_set_fbend_dir(file, -1); -+ -+ return err; -+} -+ -+static int aufs_open_dir(struct inode *inode __maybe_unused, -+ struct file *file) -+{ -+ int err; -+ struct super_block *sb; -+ struct au_fidir *fidir; -+ -+ err = -ENOMEM; -+ sb = file->f_dentry->d_sb; -+ si_read_lock(sb, AuLock_FLUSH); -+ fidir = au_fidir_alloc(sb); -+ if (fidir) { -+ struct au_do_open_args args = { -+ .open = do_open_dir, -+ .fidir = fidir -+ }; -+ err = au_do_open(file, &args); -+ if (unlikely(err)) -+ kfree(fidir); -+ } -+ si_read_unlock(sb); -+ return err; -+} -+ -+static int aufs_release_dir(struct inode *inode __maybe_unused, -+ struct file *file) -+{ -+ struct au_vdir *vdir_cache; -+ struct au_finfo *finfo; -+ struct au_fidir *fidir; -+ aufs_bindex_t bindex, bend; -+ -+ finfo = au_fi(file); -+ fidir = finfo->fi_hdir; -+ if (fidir) { -+ au_sphl_del(&finfo->fi_hlist, -+ &au_sbi(file->f_dentry->d_sb)->si_files); -+ vdir_cache = fidir->fd_vdir_cache; /* lock-free */ -+ if (vdir_cache) -+ au_vdir_free(vdir_cache); -+ -+ bindex = finfo->fi_btop; -+ if (bindex >= 0) { -+ /* -+ * calls fput() instead of filp_close(), -+ * since no dnotify or lock for the lower file. -+ */ -+ bend = fidir->fd_bbot; -+ for (; bindex <= bend; bindex++) -+ au_set_h_fptr(file, bindex, NULL); -+ } -+ kfree(fidir); -+ finfo->fi_hdir = NULL; -+ } -+ au_finfo_fin(file); -+ return 0; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int au_do_flush_dir(struct file *file, fl_owner_t id) -+{ -+ int err; -+ aufs_bindex_t bindex, bend; -+ struct file *h_file; -+ -+ err = 0; -+ bend = au_fbend_dir(file); -+ for (bindex = au_fbstart(file); !err && bindex <= bend; bindex++) { -+ h_file = au_hf_dir(file, bindex); -+ if (h_file) -+ err = vfsub_flush(h_file, id); -+ } -+ return err; -+} -+ -+static int aufs_flush_dir(struct file *file, fl_owner_t id) -+{ -+ return au_do_flush(file, id, au_do_flush_dir); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int au_do_fsync_dir_no_file(struct dentry *dentry, int datasync) -+{ -+ int err; -+ aufs_bindex_t bend, bindex; -+ struct inode *inode; -+ struct super_block *sb; -+ -+ err = 0; -+ sb = dentry->d_sb; -+ inode = dentry->d_inode; -+ IMustLock(inode); -+ bend = au_dbend(dentry); -+ for (bindex = au_dbstart(dentry); !err && bindex <= bend; bindex++) { -+ struct path h_path; -+ -+ if (au_test_ro(sb, bindex, inode)) -+ continue; -+ h_path.dentry = au_h_dptr(dentry, bindex); -+ if (!h_path.dentry) -+ continue; -+ -+ h_path.mnt = au_sbr_mnt(sb, bindex); -+ err = vfsub_fsync(NULL, &h_path, datasync); -+ } -+ -+ return err; -+} -+ -+static int au_do_fsync_dir(struct file *file, int datasync) -+{ -+ int err; -+ aufs_bindex_t bend, bindex; -+ struct file *h_file; -+ struct super_block *sb; -+ struct inode *inode; -+ -+ err = au_reval_and_lock_fdi(file, reopen_dir, /*wlock*/1); -+ if (unlikely(err)) -+ goto out; -+ -+ inode = file_inode(file); -+ sb = inode->i_sb; -+ bend = au_fbend_dir(file); -+ for (bindex = au_fbstart(file); !err && bindex <= bend; bindex++) { -+ h_file = au_hf_dir(file, bindex); -+ if (!h_file || au_test_ro(sb, bindex, inode)) -+ continue; -+ -+ err = vfsub_fsync(h_file, &h_file->f_path, datasync); -+ } -+ -+out: -+ return err; -+} -+ -+/* -+ * @file may be NULL -+ */ -+static int aufs_fsync_dir(struct file *file, loff_t start, loff_t end, -+ int datasync) -+{ -+ int err; -+ struct dentry *dentry; -+ struct super_block *sb; -+ struct mutex *mtx; -+ -+ err = 0; -+ dentry = file->f_dentry; -+ mtx = &dentry->d_inode->i_mutex; -+ mutex_lock(mtx); -+ sb = dentry->d_sb; -+ si_noflush_read_lock(sb); -+ if (file) -+ err = au_do_fsync_dir(file, datasync); -+ else { -+ di_write_lock_child(dentry); -+ err = au_do_fsync_dir_no_file(dentry, datasync); -+ } -+ au_cpup_attr_timesizes(dentry->d_inode); -+ di_write_unlock(dentry); -+ if (file) -+ fi_write_unlock(file); -+ -+ si_read_unlock(sb); -+ mutex_unlock(mtx); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int aufs_iterate(struct file *file, struct dir_context *ctx) -+{ -+ int err; -+ struct dentry *dentry; -+ struct inode *inode, *h_inode; -+ struct super_block *sb; -+ -+ AuDbg("%pD, ctx{%pf, %llu}\n", file, ctx->actor, ctx->pos); -+ -+ dentry = file->f_dentry; -+ inode = dentry->d_inode; -+ IMustLock(inode); -+ -+ sb = dentry->d_sb; -+ si_read_lock(sb, AuLock_FLUSH); -+ err = au_reval_and_lock_fdi(file, reopen_dir, /*wlock*/1); -+ if (unlikely(err)) -+ goto out; -+ err = au_alive_dir(dentry); -+ if (!err) -+ err = au_vdir_init(file); -+ di_downgrade_lock(dentry, AuLock_IR); -+ if (unlikely(err)) -+ goto out_unlock; -+ -+ h_inode = au_h_iptr(inode, au_ibstart(inode)); -+ if (!au_test_nfsd()) { -+ err = au_vdir_fill_de(file, ctx); -+ fsstack_copy_attr_atime(inode, h_inode); -+ } else { -+ /* -+ * nfsd filldir may call lookup_one_len(), vfs_getattr(), -+ * encode_fh() and others. -+ */ -+ atomic_inc(&h_inode->i_count); -+ di_read_unlock(dentry, AuLock_IR); -+ si_read_unlock(sb); -+ err = au_vdir_fill_de(file, ctx); -+ fsstack_copy_attr_atime(inode, h_inode); -+ fi_write_unlock(file); -+ iput(h_inode); -+ -+ AuTraceErr(err); -+ return err; -+ } -+ -+out_unlock: -+ di_read_unlock(dentry, AuLock_IR); -+ fi_write_unlock(file); -+out: -+ si_read_unlock(sb); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+#define AuTestEmpty_WHONLY 1 -+#define AuTestEmpty_CALLED (1 << 1) -+#define AuTestEmpty_SHWH (1 << 2) -+#define au_ftest_testempty(flags, name) ((flags) & AuTestEmpty_##name) -+#define au_fset_testempty(flags, name) \ -+ do { (flags) |= AuTestEmpty_##name; } while (0) -+#define au_fclr_testempty(flags, name) \ -+ do { (flags) &= ~AuTestEmpty_##name; } while (0) -+ -+#ifndef CONFIG_AUFS_SHWH -+#undef AuTestEmpty_SHWH -+#define AuTestEmpty_SHWH 0 -+#endif -+ -+struct test_empty_arg { -+ struct dir_context ctx; -+ struct au_nhash *whlist; -+ unsigned int flags; -+ int err; -+ aufs_bindex_t bindex; -+}; -+ -+static int test_empty_cb(struct dir_context *ctx, const char *__name, -+ int namelen, loff_t offset __maybe_unused, u64 ino, -+ unsigned int d_type) -+{ -+ struct test_empty_arg *arg = container_of(ctx, struct test_empty_arg, -+ ctx); -+ char *name = (void *)__name; -+ -+ arg->err = 0; -+ au_fset_testempty(arg->flags, CALLED); -+ /* smp_mb(); */ -+ if (name[0] == '.' -+ && (namelen == 1 || (name[1] == '.' && namelen == 2))) -+ goto out; /* success */ -+ -+ if (namelen <= AUFS_WH_PFX_LEN -+ || memcmp(name, AUFS_WH_PFX, AUFS_WH_PFX_LEN)) { -+ if (au_ftest_testempty(arg->flags, WHONLY) -+ && !au_nhash_test_known_wh(arg->whlist, name, namelen)) -+ arg->err = -ENOTEMPTY; -+ goto out; -+ } -+ -+ name += AUFS_WH_PFX_LEN; -+ namelen -= AUFS_WH_PFX_LEN; -+ if (!au_nhash_test_known_wh(arg->whlist, name, namelen)) -+ arg->err = au_nhash_append_wh -+ (arg->whlist, name, namelen, ino, d_type, arg->bindex, -+ au_ftest_testempty(arg->flags, SHWH)); -+ -+out: -+ /* smp_mb(); */ -+ AuTraceErr(arg->err); -+ return arg->err; -+} -+ -+static int do_test_empty(struct dentry *dentry, struct test_empty_arg *arg) -+{ -+ int err; -+ struct file *h_file; -+ -+ h_file = au_h_open(dentry, arg->bindex, -+ O_RDONLY | O_NONBLOCK | O_DIRECTORY | O_LARGEFILE, -+ /*file*/NULL, /*force_wr*/0); -+ err = PTR_ERR(h_file); -+ if (IS_ERR(h_file)) -+ goto out; -+ -+ err = 0; -+ if (!au_opt_test(au_mntflags(dentry->d_sb), UDBA_NONE) -+ && !file_inode(h_file)->i_nlink) -+ goto out_put; -+ -+ do { -+ arg->err = 0; -+ au_fclr_testempty(arg->flags, CALLED); -+ /* smp_mb(); */ -+ err = vfsub_iterate_dir(h_file, &arg->ctx); -+ if (err >= 0) -+ err = arg->err; -+ } while (!err && au_ftest_testempty(arg->flags, CALLED)); -+ -+out_put: -+ fput(h_file); -+ au_sbr_put(dentry->d_sb, arg->bindex); -+out: -+ return err; -+} -+ -+struct do_test_empty_args { -+ int *errp; -+ struct dentry *dentry; -+ struct test_empty_arg *arg; -+}; -+ -+static void call_do_test_empty(void *args) -+{ -+ struct do_test_empty_args *a = args; -+ *a->errp = do_test_empty(a->dentry, a->arg); -+} -+ -+static int sio_test_empty(struct dentry *dentry, struct test_empty_arg *arg) -+{ -+ int err, wkq_err; -+ struct dentry *h_dentry; -+ struct inode *h_inode; -+ -+ h_dentry = au_h_dptr(dentry, arg->bindex); -+ h_inode = h_dentry->d_inode; -+ /* todo: i_mode changes anytime? */ -+ mutex_lock_nested(&h_inode->i_mutex, AuLsc_I_CHILD); -+ err = au_test_h_perm_sio(h_inode, MAY_EXEC | MAY_READ); -+ mutex_unlock(&h_inode->i_mutex); -+ if (!err) -+ err = do_test_empty(dentry, arg); -+ else { -+ struct do_test_empty_args args = { -+ .errp = &err, -+ .dentry = dentry, -+ .arg = arg -+ }; -+ unsigned int flags = arg->flags; -+ -+ wkq_err = au_wkq_wait(call_do_test_empty, &args); -+ if (unlikely(wkq_err)) -+ err = wkq_err; -+ arg->flags = flags; -+ } -+ -+ return err; -+} -+ -+int au_test_empty_lower(struct dentry *dentry) -+{ -+ int err; -+ unsigned int rdhash; -+ aufs_bindex_t bindex, bstart, btail; -+ struct au_nhash whlist; -+ struct test_empty_arg arg = { -+ .ctx = { -+ .actor = au_diractor(test_empty_cb) -+ } -+ }; -+ int (*test_empty)(struct dentry *dentry, struct test_empty_arg *arg); -+ -+ SiMustAnyLock(dentry->d_sb); -+ -+ rdhash = au_sbi(dentry->d_sb)->si_rdhash; -+ if (!rdhash) -+ rdhash = au_rdhash_est(au_dir_size(/*file*/NULL, dentry)); -+ err = au_nhash_alloc(&whlist, rdhash, GFP_NOFS); -+ if (unlikely(err)) -+ goto out; -+ -+ arg.flags = 0; -+ arg.whlist = &whlist; -+ bstart = au_dbstart(dentry); -+ if (au_opt_test(au_mntflags(dentry->d_sb), SHWH)) -+ au_fset_testempty(arg.flags, SHWH); -+ test_empty = do_test_empty; -+ if (au_opt_test(au_mntflags(dentry->d_sb), DIRPERM1)) -+ test_empty = sio_test_empty; -+ arg.bindex = bstart; -+ err = test_empty(dentry, &arg); -+ if (unlikely(err)) -+ goto out_whlist; -+ -+ au_fset_testempty(arg.flags, WHONLY); -+ btail = au_dbtaildir(dentry); -+ for (bindex = bstart + 1; !err && bindex <= btail; bindex++) { -+ struct dentry *h_dentry; -+ -+ h_dentry = au_h_dptr(dentry, bindex); -+ if (h_dentry && h_dentry->d_inode) { -+ arg.bindex = bindex; -+ err = test_empty(dentry, &arg); -+ } -+ } -+ -+out_whlist: -+ au_nhash_wh_free(&whlist); -+out: -+ return err; -+} -+ -+int au_test_empty(struct dentry *dentry, struct au_nhash *whlist) -+{ -+ int err; -+ struct test_empty_arg arg = { -+ .ctx = { -+ .actor = au_diractor(test_empty_cb) -+ } -+ }; -+ aufs_bindex_t bindex, btail; -+ -+ err = 0; -+ arg.whlist = whlist; -+ arg.flags = AuTestEmpty_WHONLY; -+ if (au_opt_test(au_mntflags(dentry->d_sb), SHWH)) -+ au_fset_testempty(arg.flags, SHWH); -+ btail = au_dbtaildir(dentry); -+ for (bindex = au_dbstart(dentry); !err && bindex <= btail; bindex++) { -+ struct dentry *h_dentry; -+ -+ h_dentry = au_h_dptr(dentry, bindex); -+ if (h_dentry && h_dentry->d_inode) { -+ arg.bindex = bindex; -+ err = sio_test_empty(dentry, &arg); -+ } -+ } -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+const struct file_operations aufs_dir_fop = { -+ .owner = THIS_MODULE, -+ .llseek = default_llseek, -+ .read = generic_read_dir, -+ .iterate = aufs_iterate, -+ .unlocked_ioctl = aufs_ioctl_dir, -+#ifdef CONFIG_COMPAT -+ .compat_ioctl = aufs_compat_ioctl_dir, -+#endif -+ .open = aufs_open_dir, -+ .release = aufs_release_dir, -+ .flush = aufs_flush_dir, -+ .fsync = aufs_fsync_dir -+}; -diff --git a/fs/aufs/dir.h b/fs/aufs/dir.h -new file mode 100644 -index 0000000..16821f9 ---- /dev/null -+++ b/fs/aufs/dir.h -@@ -0,0 +1,131 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * directory operations -+ */ -+ -+#ifndef __AUFS_DIR_H__ -+#define __AUFS_DIR_H__ -+ -+#ifdef __KERNEL__ -+ -+#include -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* need to be faster and smaller */ -+ -+struct au_nhash { -+ unsigned int nh_num; -+ struct hlist_head *nh_head; -+}; -+ -+struct au_vdir_destr { -+ unsigned char len; -+ unsigned char name[0]; -+} __packed; -+ -+struct au_vdir_dehstr { -+ struct hlist_node hash; -+ struct au_vdir_destr *str; -+} ____cacheline_aligned_in_smp; -+ -+struct au_vdir_de { -+ ino_t de_ino; -+ unsigned char de_type; -+ /* caution: packed */ -+ struct au_vdir_destr de_str; -+} __packed; -+ -+struct au_vdir_wh { -+ struct hlist_node wh_hash; -+#ifdef CONFIG_AUFS_SHWH -+ ino_t wh_ino; -+ aufs_bindex_t wh_bindex; -+ unsigned char wh_type; -+#else -+ aufs_bindex_t wh_bindex; -+#endif -+ /* caution: packed */ -+ struct au_vdir_destr wh_str; -+} __packed; -+ -+union au_vdir_deblk_p { -+ unsigned char *deblk; -+ struct au_vdir_de *de; -+}; -+ -+struct au_vdir { -+ unsigned char **vd_deblk; -+ unsigned long vd_nblk; -+ struct { -+ unsigned long ul; -+ union au_vdir_deblk_p p; -+ } vd_last; -+ -+ unsigned long vd_version; -+ unsigned int vd_deblk_sz; -+ unsigned long vd_jiffy; -+} ____cacheline_aligned_in_smp; -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* dir.c */ -+extern const struct file_operations aufs_dir_fop; -+void au_add_nlink(struct inode *dir, struct inode *h_dir); -+void au_sub_nlink(struct inode *dir, struct inode *h_dir); -+loff_t au_dir_size(struct file *file, struct dentry *dentry); -+void au_dir_ts(struct inode *dir, aufs_bindex_t bsrc); -+int au_test_empty_lower(struct dentry *dentry); -+int au_test_empty(struct dentry *dentry, struct au_nhash *whlist); -+ -+/* vdir.c */ -+unsigned int au_rdhash_est(loff_t sz); -+int au_nhash_alloc(struct au_nhash *nhash, unsigned int num_hash, gfp_t gfp); -+void au_nhash_wh_free(struct au_nhash *whlist); -+int au_nhash_test_longer_wh(struct au_nhash *whlist, aufs_bindex_t btgt, -+ int limit); -+int au_nhash_test_known_wh(struct au_nhash *whlist, char *name, int nlen); -+int au_nhash_append_wh(struct au_nhash *whlist, char *name, int nlen, ino_t ino, -+ unsigned int d_type, aufs_bindex_t bindex, -+ unsigned char shwh); -+void au_vdir_free(struct au_vdir *vdir); -+int au_vdir_init(struct file *file); -+int au_vdir_fill_de(struct file *file, struct dir_context *ctx); -+ -+/* ioctl.c */ -+long aufs_ioctl_dir(struct file *file, unsigned int cmd, unsigned long arg); -+ -+#ifdef CONFIG_AUFS_RDU -+/* rdu.c */ -+long au_rdu_ioctl(struct file *file, unsigned int cmd, unsigned long arg); -+#ifdef CONFIG_COMPAT -+long au_rdu_compat_ioctl(struct file *file, unsigned int cmd, -+ unsigned long arg); -+#endif -+#else -+AuStub(long, au_rdu_ioctl, return -EINVAL, struct file *file, -+ unsigned int cmd, unsigned long arg) -+#ifdef CONFIG_COMPAT -+AuStub(long, au_rdu_compat_ioctl, return -EINVAL, struct file *file, -+ unsigned int cmd, unsigned long arg) -+#endif -+#endif -+ -+#endif /* __KERNEL__ */ -+#endif /* __AUFS_DIR_H__ */ -diff --git a/fs/aufs/dynop.c b/fs/aufs/dynop.c -new file mode 100644 -index 0000000..d758805 ---- /dev/null -+++ b/fs/aufs/dynop.c -@@ -0,0 +1,379 @@ -+/* -+ * Copyright (C) 2010-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * dynamically customizable operations for regular files -+ */ -+ -+#include "aufs.h" -+ -+#define DyPrSym(key) AuDbgSym(key->dk_op.dy_hop) -+ -+/* -+ * How large will these lists be? -+ * Usually just a few elements, 20-30 at most for each, I guess. -+ */ -+static struct au_splhead dynop[AuDyLast]; -+ -+static struct au_dykey *dy_gfind_get(struct au_splhead *spl, const void *h_op) -+{ -+ struct au_dykey *key, *tmp; -+ struct list_head *head; -+ -+ key = NULL; -+ head = &spl->head; -+ rcu_read_lock(); -+ list_for_each_entry_rcu(tmp, head, dk_list) -+ if (tmp->dk_op.dy_hop == h_op) { -+ key = tmp; -+ kref_get(&key->dk_kref); -+ break; -+ } -+ rcu_read_unlock(); -+ -+ return key; -+} -+ -+static struct au_dykey *dy_bradd(struct au_branch *br, struct au_dykey *key) -+{ -+ struct au_dykey **k, *found; -+ const void *h_op = key->dk_op.dy_hop; -+ int i; -+ -+ found = NULL; -+ k = br->br_dykey; -+ for (i = 0; i < AuBrDynOp; i++) -+ if (k[i]) { -+ if (k[i]->dk_op.dy_hop == h_op) { -+ found = k[i]; -+ break; -+ } -+ } else -+ break; -+ if (!found) { -+ spin_lock(&br->br_dykey_lock); -+ for (; i < AuBrDynOp; i++) -+ if (k[i]) { -+ if (k[i]->dk_op.dy_hop == h_op) { -+ found = k[i]; -+ break; -+ } -+ } else { -+ k[i] = key; -+ break; -+ } -+ spin_unlock(&br->br_dykey_lock); -+ BUG_ON(i == AuBrDynOp); /* expand the array */ -+ } -+ -+ return found; -+} -+ -+/* kref_get() if @key is already added */ -+static struct au_dykey *dy_gadd(struct au_splhead *spl, struct au_dykey *key) -+{ -+ struct au_dykey *tmp, *found; -+ struct list_head *head; -+ const void *h_op = key->dk_op.dy_hop; -+ -+ found = NULL; -+ head = &spl->head; -+ spin_lock(&spl->spin); -+ list_for_each_entry(tmp, head, dk_list) -+ if (tmp->dk_op.dy_hop == h_op) { -+ kref_get(&tmp->dk_kref); -+ found = tmp; -+ break; -+ } -+ if (!found) -+ list_add_rcu(&key->dk_list, head); -+ spin_unlock(&spl->spin); -+ -+ if (!found) -+ DyPrSym(key); -+ return found; -+} -+ -+static void dy_free_rcu(struct rcu_head *rcu) -+{ -+ struct au_dykey *key; -+ -+ key = container_of(rcu, struct au_dykey, dk_rcu); -+ DyPrSym(key); -+ kfree(key); -+} -+ -+static void dy_free(struct kref *kref) -+{ -+ struct au_dykey *key; -+ struct au_splhead *spl; -+ -+ key = container_of(kref, struct au_dykey, dk_kref); -+ spl = dynop + key->dk_op.dy_type; -+ au_spl_del_rcu(&key->dk_list, spl); -+ call_rcu(&key->dk_rcu, dy_free_rcu); -+} -+ -+void au_dy_put(struct au_dykey *key) -+{ -+ kref_put(&key->dk_kref, dy_free); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+#define DyDbgSize(cnt, op) AuDebugOn(cnt != sizeof(op)/sizeof(void *)) -+ -+#ifdef CONFIG_AUFS_DEBUG -+#define DyDbgDeclare(cnt) unsigned int cnt = 0 -+#define DyDbgInc(cnt) do { cnt++; } while (0) -+#else -+#define DyDbgDeclare(cnt) do {} while (0) -+#define DyDbgInc(cnt) do {} while (0) -+#endif -+ -+#define DySet(func, dst, src, h_op, h_sb) do { \ -+ DyDbgInc(cnt); \ -+ if (h_op->func) { \ -+ if (src.func) \ -+ dst.func = src.func; \ -+ else \ -+ AuDbg("%s %s\n", au_sbtype(h_sb), #func); \ -+ } \ -+} while (0) -+ -+#define DySetForce(func, dst, src) do { \ -+ AuDebugOn(!src.func); \ -+ DyDbgInc(cnt); \ -+ dst.func = src.func; \ -+} while (0) -+ -+#define DySetAop(func) \ -+ DySet(func, dyaop->da_op, aufs_aop, h_aop, h_sb) -+#define DySetAopForce(func) \ -+ DySetForce(func, dyaop->da_op, aufs_aop) -+ -+static void dy_aop(struct au_dykey *key, const void *h_op, -+ struct super_block *h_sb __maybe_unused) -+{ -+ struct au_dyaop *dyaop = (void *)key; -+ const struct address_space_operations *h_aop = h_op; -+ DyDbgDeclare(cnt); -+ -+ AuDbg("%s\n", au_sbtype(h_sb)); -+ -+ DySetAop(writepage); -+ DySetAopForce(readpage); /* force */ -+ DySetAop(writepages); -+ DySetAop(set_page_dirty); -+ DySetAop(readpages); -+ DySetAop(write_begin); -+ DySetAop(write_end); -+ DySetAop(bmap); -+ DySetAop(invalidatepage); -+ DySetAop(releasepage); -+ DySetAop(freepage); -+ /* these two will be changed according to an aufs mount option */ -+ DySetAop(direct_IO); -+ DySetAop(get_xip_mem); -+ DySetAop(migratepage); -+ DySetAop(launder_page); -+ DySetAop(is_partially_uptodate); -+ DySetAop(is_dirty_writeback); -+ DySetAop(error_remove_page); -+ DySetAop(swap_activate); -+ DySetAop(swap_deactivate); -+ -+ DyDbgSize(cnt, *h_aop); -+ dyaop->da_get_xip_mem = h_aop->get_xip_mem; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static void dy_bug(struct kref *kref) -+{ -+ BUG(); -+} -+ -+static struct au_dykey *dy_get(struct au_dynop *op, struct au_branch *br) -+{ -+ struct au_dykey *key, *old; -+ struct au_splhead *spl; -+ struct op { -+ unsigned int sz; -+ void (*set)(struct au_dykey *key, const void *h_op, -+ struct super_block *h_sb __maybe_unused); -+ }; -+ static const struct op a[] = { -+ [AuDy_AOP] = { -+ .sz = sizeof(struct au_dyaop), -+ .set = dy_aop -+ } -+ }; -+ const struct op *p; -+ -+ spl = dynop + op->dy_type; -+ key = dy_gfind_get(spl, op->dy_hop); -+ if (key) -+ goto out_add; /* success */ -+ -+ p = a + op->dy_type; -+ key = kzalloc(p->sz, GFP_NOFS); -+ if (unlikely(!key)) { -+ key = ERR_PTR(-ENOMEM); -+ goto out; -+ } -+ -+ key->dk_op.dy_hop = op->dy_hop; -+ kref_init(&key->dk_kref); -+ p->set(key, op->dy_hop, au_br_sb(br)); -+ old = dy_gadd(spl, key); -+ if (old) { -+ kfree(key); -+ key = old; -+ } -+ -+out_add: -+ old = dy_bradd(br, key); -+ if (old) -+ /* its ref-count should never be zero here */ -+ kref_put(&key->dk_kref, dy_bug); -+out: -+ return key; -+} -+ -+/* ---------------------------------------------------------------------- */ -+/* -+ * Aufs prohibits O_DIRECT by defaut even if the branch supports it. -+ * This behaviour is necessary to return an error from open(O_DIRECT) instead -+ * of the succeeding I/O. The dio mount option enables O_DIRECT and makes -+ * open(O_DIRECT) always succeed, but the succeeding I/O may return an error. -+ * See the aufs manual in detail. -+ * -+ * To keep this behaviour, aufs has to set NULL to ->get_xip_mem too, and the -+ * performance of fadvise() and madvise() may be affected. -+ */ -+static void dy_adx(struct au_dyaop *dyaop, int do_dx) -+{ -+ if (!do_dx) { -+ dyaop->da_op.direct_IO = NULL; -+ dyaop->da_op.get_xip_mem = NULL; -+ } else { -+ dyaop->da_op.direct_IO = aufs_aop.direct_IO; -+ dyaop->da_op.get_xip_mem = aufs_aop.get_xip_mem; -+ if (!dyaop->da_get_xip_mem) -+ dyaop->da_op.get_xip_mem = NULL; -+ } -+} -+ -+static struct au_dyaop *dy_aget(struct au_branch *br, -+ const struct address_space_operations *h_aop, -+ int do_dx) -+{ -+ struct au_dyaop *dyaop; -+ struct au_dynop op; -+ -+ op.dy_type = AuDy_AOP; -+ op.dy_haop = h_aop; -+ dyaop = (void *)dy_get(&op, br); -+ if (IS_ERR(dyaop)) -+ goto out; -+ dy_adx(dyaop, do_dx); -+ -+out: -+ return dyaop; -+} -+ -+int au_dy_iaop(struct inode *inode, aufs_bindex_t bindex, -+ struct inode *h_inode) -+{ -+ int err, do_dx; -+ struct super_block *sb; -+ struct au_branch *br; -+ struct au_dyaop *dyaop; -+ -+ AuDebugOn(!S_ISREG(h_inode->i_mode)); -+ IiMustWriteLock(inode); -+ -+ sb = inode->i_sb; -+ br = au_sbr(sb, bindex); -+ do_dx = !!au_opt_test(au_mntflags(sb), DIO); -+ dyaop = dy_aget(br, h_inode->i_mapping->a_ops, do_dx); -+ err = PTR_ERR(dyaop); -+ if (IS_ERR(dyaop)) -+ /* unnecessary to call dy_fput() */ -+ goto out; -+ -+ err = 0; -+ inode->i_mapping->a_ops = &dyaop->da_op; -+ -+out: -+ return err; -+} -+ -+/* -+ * Is it safe to replace a_ops during the inode/file is in operation? -+ * Yes, I hope so. -+ */ -+int au_dy_irefresh(struct inode *inode) -+{ -+ int err; -+ aufs_bindex_t bstart; -+ struct inode *h_inode; -+ -+ err = 0; -+ if (S_ISREG(inode->i_mode)) { -+ bstart = au_ibstart(inode); -+ h_inode = au_h_iptr(inode, bstart); -+ err = au_dy_iaop(inode, bstart, h_inode); -+ } -+ return err; -+} -+ -+void au_dy_arefresh(int do_dx) -+{ -+ struct au_splhead *spl; -+ struct list_head *head; -+ struct au_dykey *key; -+ -+ spl = dynop + AuDy_AOP; -+ head = &spl->head; -+ spin_lock(&spl->spin); -+ list_for_each_entry(key, head, dk_list) -+ dy_adx((void *)key, do_dx); -+ spin_unlock(&spl->spin); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+void __init au_dy_init(void) -+{ -+ int i; -+ -+ /* make sure that 'struct au_dykey *' can be any type */ -+ BUILD_BUG_ON(offsetof(struct au_dyaop, da_key)); -+ -+ for (i = 0; i < AuDyLast; i++) -+ au_spl_init(dynop + i); -+} -+ -+void au_dy_fin(void) -+{ -+ int i; -+ -+ for (i = 0; i < AuDyLast; i++) -+ WARN_ON(!list_empty(&dynop[i].head)); -+} -diff --git a/fs/aufs/dynop.h b/fs/aufs/dynop.h -new file mode 100644 -index 0000000..cdf1499 ---- /dev/null -+++ b/fs/aufs/dynop.h -@@ -0,0 +1,76 @@ -+/* -+ * Copyright (C) 2010-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * dynamically customizable operations (for regular files only) -+ */ -+ -+#ifndef __AUFS_DYNOP_H__ -+#define __AUFS_DYNOP_H__ -+ -+#ifdef __KERNEL__ -+ -+#include -+#include -+ -+enum {AuDy_AOP, AuDyLast}; -+ -+struct au_dynop { -+ int dy_type; -+ union { -+ const void *dy_hop; -+ const struct address_space_operations *dy_haop; -+ }; -+}; -+ -+struct au_dykey { -+ union { -+ struct list_head dk_list; -+ struct rcu_head dk_rcu; -+ }; -+ struct au_dynop dk_op; -+ -+ /* -+ * during I am in the branch local array, kref is gotten. when the -+ * branch is removed, kref is put. -+ */ -+ struct kref dk_kref; -+}; -+ -+/* stop unioning since their sizes are very different from each other */ -+struct au_dyaop { -+ struct au_dykey da_key; -+ struct address_space_operations da_op; /* not const */ -+ int (*da_get_xip_mem)(struct address_space *, pgoff_t, int, -+ void **, unsigned long *); -+}; -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* dynop.c */ -+struct au_branch; -+void au_dy_put(struct au_dykey *key); -+int au_dy_iaop(struct inode *inode, aufs_bindex_t bindex, -+ struct inode *h_inode); -+int au_dy_irefresh(struct inode *inode); -+void au_dy_arefresh(int do_dio); -+ -+void __init au_dy_init(void); -+void au_dy_fin(void); -+ -+#endif /* __KERNEL__ */ -+#endif /* __AUFS_DYNOP_H__ */ -diff --git a/fs/aufs/export.c b/fs/aufs/export.c -new file mode 100644 -index 0000000..c5bfa76 ---- /dev/null -+++ b/fs/aufs/export.c -@@ -0,0 +1,831 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * export via nfs -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include "../fs/mount.h" -+#include "aufs.h" -+ -+union conv { -+#ifdef CONFIG_AUFS_INO_T_64 -+ __u32 a[2]; -+#else -+ __u32 a[1]; -+#endif -+ ino_t ino; -+}; -+ -+static ino_t decode_ino(__u32 *a) -+{ -+ union conv u; -+ -+ BUILD_BUG_ON(sizeof(u.ino) != sizeof(u.a)); -+ u.a[0] = a[0]; -+#ifdef CONFIG_AUFS_INO_T_64 -+ u.a[1] = a[1]; -+#endif -+ return u.ino; -+} -+ -+static void encode_ino(__u32 *a, ino_t ino) -+{ -+ union conv u; -+ -+ u.ino = ino; -+ a[0] = u.a[0]; -+#ifdef CONFIG_AUFS_INO_T_64 -+ a[1] = u.a[1]; -+#endif -+} -+ -+/* NFS file handle */ -+enum { -+ Fh_br_id, -+ Fh_sigen, -+#ifdef CONFIG_AUFS_INO_T_64 -+ /* support 64bit inode number */ -+ Fh_ino1, -+ Fh_ino2, -+ Fh_dir_ino1, -+ Fh_dir_ino2, -+#else -+ Fh_ino1, -+ Fh_dir_ino1, -+#endif -+ Fh_igen, -+ Fh_h_type, -+ Fh_tail, -+ -+ Fh_ino = Fh_ino1, -+ Fh_dir_ino = Fh_dir_ino1 -+}; -+ -+static int au_test_anon(struct dentry *dentry) -+{ -+ /* note: read d_flags without d_lock */ -+ return !!(dentry->d_flags & DCACHE_DISCONNECTED); -+} -+ -+int au_test_nfsd(void) -+{ -+ int ret; -+ struct task_struct *tsk = current; -+ char comm[sizeof(tsk->comm)]; -+ -+ ret = 0; -+ if (tsk->flags & PF_KTHREAD) { -+ get_task_comm(comm, tsk); -+ ret = !strcmp(comm, "nfsd"); -+ } -+ -+ return ret; -+} -+ -+/* ---------------------------------------------------------------------- */ -+/* inode generation external table */ -+ -+void au_xigen_inc(struct inode *inode) -+{ -+ loff_t pos; -+ ssize_t sz; -+ __u32 igen; -+ struct super_block *sb; -+ struct au_sbinfo *sbinfo; -+ -+ sb = inode->i_sb; -+ AuDebugOn(!au_opt_test(au_mntflags(sb), XINO)); -+ -+ sbinfo = au_sbi(sb); -+ pos = inode->i_ino; -+ pos *= sizeof(igen); -+ igen = inode->i_generation + 1; -+ sz = xino_fwrite(sbinfo->si_xwrite, sbinfo->si_xigen, &igen, -+ sizeof(igen), &pos); -+ if (sz == sizeof(igen)) -+ return; /* success */ -+ -+ if (unlikely(sz >= 0)) -+ AuIOErr("xigen error (%zd)\n", sz); -+} -+ -+int au_xigen_new(struct inode *inode) -+{ -+ int err; -+ loff_t pos; -+ ssize_t sz; -+ struct super_block *sb; -+ struct au_sbinfo *sbinfo; -+ struct file *file; -+ -+ err = 0; -+ /* todo: dirty, at mount time */ -+ if (inode->i_ino == AUFS_ROOT_INO) -+ goto out; -+ sb = inode->i_sb; -+ SiMustAnyLock(sb); -+ if (unlikely(!au_opt_test(au_mntflags(sb), XINO))) -+ goto out; -+ -+ err = -EFBIG; -+ pos = inode->i_ino; -+ if (unlikely(au_loff_max / sizeof(inode->i_generation) - 1 < pos)) { -+ AuIOErr1("too large i%lld\n", pos); -+ goto out; -+ } -+ pos *= sizeof(inode->i_generation); -+ -+ err = 0; -+ sbinfo = au_sbi(sb); -+ file = sbinfo->si_xigen; -+ BUG_ON(!file); -+ -+ if (vfsub_f_size_read(file) -+ < pos + sizeof(inode->i_generation)) { -+ inode->i_generation = atomic_inc_return(&sbinfo->si_xigen_next); -+ sz = xino_fwrite(sbinfo->si_xwrite, file, &inode->i_generation, -+ sizeof(inode->i_generation), &pos); -+ } else -+ sz = xino_fread(sbinfo->si_xread, file, &inode->i_generation, -+ sizeof(inode->i_generation), &pos); -+ if (sz == sizeof(inode->i_generation)) -+ goto out; /* success */ -+ -+ err = sz; -+ if (unlikely(sz >= 0)) { -+ err = -EIO; -+ AuIOErr("xigen error (%zd)\n", sz); -+ } -+ -+out: -+ return err; -+} -+ -+int au_xigen_set(struct super_block *sb, struct file *base) -+{ -+ int err; -+ struct au_sbinfo *sbinfo; -+ struct file *file; -+ -+ SiMustWriteLock(sb); -+ -+ sbinfo = au_sbi(sb); -+ file = au_xino_create2(base, sbinfo->si_xigen); -+ err = PTR_ERR(file); -+ if (IS_ERR(file)) -+ goto out; -+ err = 0; -+ if (sbinfo->si_xigen) -+ fput(sbinfo->si_xigen); -+ sbinfo->si_xigen = file; -+ -+out: -+ return err; -+} -+ -+void au_xigen_clr(struct super_block *sb) -+{ -+ struct au_sbinfo *sbinfo; -+ -+ SiMustWriteLock(sb); -+ -+ sbinfo = au_sbi(sb); -+ if (sbinfo->si_xigen) { -+ fput(sbinfo->si_xigen); -+ sbinfo->si_xigen = NULL; -+ } -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static struct dentry *decode_by_ino(struct super_block *sb, ino_t ino, -+ ino_t dir_ino) -+{ -+ struct dentry *dentry, *d; -+ struct inode *inode; -+ unsigned int sigen; -+ -+ dentry = NULL; -+ inode = ilookup(sb, ino); -+ if (!inode) -+ goto out; -+ -+ dentry = ERR_PTR(-ESTALE); -+ sigen = au_sigen(sb); -+ if (unlikely(is_bad_inode(inode) -+ || IS_DEADDIR(inode) -+ || sigen != au_iigen(inode, NULL))) -+ goto out_iput; -+ -+ dentry = NULL; -+ if (!dir_ino || S_ISDIR(inode->i_mode)) -+ dentry = d_find_alias(inode); -+ else { -+ spin_lock(&inode->i_lock); -+ hlist_for_each_entry(d, &inode->i_dentry, d_u.d_alias) { -+ spin_lock(&d->d_lock); -+ if (!au_test_anon(d) -+ && d->d_parent->d_inode->i_ino == dir_ino) { -+ dentry = dget_dlock(d); -+ spin_unlock(&d->d_lock); -+ break; -+ } -+ spin_unlock(&d->d_lock); -+ } -+ spin_unlock(&inode->i_lock); -+ } -+ if (unlikely(dentry && au_digen_test(dentry, sigen))) { -+ /* need to refresh */ -+ dput(dentry); -+ dentry = NULL; -+ } -+ -+out_iput: -+ iput(inode); -+out: -+ AuTraceErrPtr(dentry); -+ return dentry; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* todo: dirty? */ -+/* if exportfs_decode_fh() passed vfsmount*, we could be happy */ -+ -+struct au_compare_mnt_args { -+ /* input */ -+ struct super_block *sb; -+ -+ /* output */ -+ struct vfsmount *mnt; -+}; -+ -+static int au_compare_mnt(struct vfsmount *mnt, void *arg) -+{ -+ struct au_compare_mnt_args *a = arg; -+ -+ if (mnt->mnt_sb != a->sb) -+ return 0; -+ a->mnt = mntget(mnt); -+ return 1; -+} -+ -+static struct vfsmount *au_mnt_get(struct super_block *sb) -+{ -+ int err; -+ struct path root; -+ struct au_compare_mnt_args args = { -+ .sb = sb -+ }; -+ -+ get_fs_root(current->fs, &root); -+ rcu_read_lock(); -+ err = iterate_mounts(au_compare_mnt, &args, root.mnt); -+ rcu_read_unlock(); -+ path_put(&root); -+ AuDebugOn(!err); -+ AuDebugOn(!args.mnt); -+ return args.mnt; -+} -+ -+struct au_nfsd_si_lock { -+ unsigned int sigen; -+ aufs_bindex_t bindex, br_id; -+ unsigned char force_lock; -+}; -+ -+static int si_nfsd_read_lock(struct super_block *sb, -+ struct au_nfsd_si_lock *nsi_lock) -+{ -+ int err; -+ aufs_bindex_t bindex; -+ -+ si_read_lock(sb, AuLock_FLUSH); -+ -+ /* branch id may be wrapped around */ -+ err = 0; -+ bindex = au_br_index(sb, nsi_lock->br_id); -+ if (bindex >= 0 && nsi_lock->sigen + AUFS_BRANCH_MAX > au_sigen(sb)) -+ goto out; /* success */ -+ -+ err = -ESTALE; -+ bindex = -1; -+ if (!nsi_lock->force_lock) -+ si_read_unlock(sb); -+ -+out: -+ nsi_lock->bindex = bindex; -+ return err; -+} -+ -+struct find_name_by_ino { -+ struct dir_context ctx; -+ int called, found; -+ ino_t ino; -+ char *name; -+ int namelen; -+}; -+ -+static int -+find_name_by_ino(struct dir_context *ctx, const char *name, int namelen, -+ loff_t offset, u64 ino, unsigned int d_type) -+{ -+ struct find_name_by_ino *a = container_of(ctx, struct find_name_by_ino, -+ ctx); -+ -+ a->called++; -+ if (a->ino != ino) -+ return 0; -+ -+ memcpy(a->name, name, namelen); -+ a->namelen = namelen; -+ a->found = 1; -+ return 1; -+} -+ -+static struct dentry *au_lkup_by_ino(struct path *path, ino_t ino, -+ struct au_nfsd_si_lock *nsi_lock) -+{ -+ struct dentry *dentry, *parent; -+ struct file *file; -+ struct inode *dir; -+ struct find_name_by_ino arg = { -+ .ctx = { -+ .actor = au_diractor(find_name_by_ino) -+ } -+ }; -+ int err; -+ -+ parent = path->dentry; -+ if (nsi_lock) -+ si_read_unlock(parent->d_sb); -+ file = vfsub_dentry_open(path, au_dir_roflags); -+ dentry = (void *)file; -+ if (IS_ERR(file)) -+ goto out; -+ -+ dentry = ERR_PTR(-ENOMEM); -+ arg.name = (void *)__get_free_page(GFP_NOFS); -+ if (unlikely(!arg.name)) -+ goto out_file; -+ arg.ino = ino; -+ arg.found = 0; -+ do { -+ arg.called = 0; -+ /* smp_mb(); */ -+ err = vfsub_iterate_dir(file, &arg.ctx); -+ } while (!err && !arg.found && arg.called); -+ dentry = ERR_PTR(err); -+ if (unlikely(err)) -+ goto out_name; -+ /* instead of ENOENT */ -+ dentry = ERR_PTR(-ESTALE); -+ if (!arg.found) -+ goto out_name; -+ -+ /* do not call vfsub_lkup_one() */ -+ dir = parent->d_inode; -+ mutex_lock(&dir->i_mutex); -+ dentry = vfsub_lookup_one_len(arg.name, parent, arg.namelen); -+ mutex_unlock(&dir->i_mutex); -+ AuTraceErrPtr(dentry); -+ if (IS_ERR(dentry)) -+ goto out_name; -+ AuDebugOn(au_test_anon(dentry)); -+ if (unlikely(!dentry->d_inode)) { -+ dput(dentry); -+ dentry = ERR_PTR(-ENOENT); -+ } -+ -+out_name: -+ free_page((unsigned long)arg.name); -+out_file: -+ fput(file); -+out: -+ if (unlikely(nsi_lock -+ && si_nfsd_read_lock(parent->d_sb, nsi_lock) < 0)) -+ if (!IS_ERR(dentry)) { -+ dput(dentry); -+ dentry = ERR_PTR(-ESTALE); -+ } -+ AuTraceErrPtr(dentry); -+ return dentry; -+} -+ -+static struct dentry *decode_by_dir_ino(struct super_block *sb, ino_t ino, -+ ino_t dir_ino, -+ struct au_nfsd_si_lock *nsi_lock) -+{ -+ struct dentry *dentry; -+ struct path path; -+ -+ if (dir_ino != AUFS_ROOT_INO) { -+ path.dentry = decode_by_ino(sb, dir_ino, 0); -+ dentry = path.dentry; -+ if (!path.dentry || IS_ERR(path.dentry)) -+ goto out; -+ AuDebugOn(au_test_anon(path.dentry)); -+ } else -+ path.dentry = dget(sb->s_root); -+ -+ path.mnt = au_mnt_get(sb); -+ dentry = au_lkup_by_ino(&path, ino, nsi_lock); -+ path_put(&path); -+ -+out: -+ AuTraceErrPtr(dentry); -+ return dentry; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int h_acceptable(void *expv, struct dentry *dentry) -+{ -+ return 1; -+} -+ -+static char *au_build_path(struct dentry *h_parent, struct path *h_rootpath, -+ char *buf, int len, struct super_block *sb) -+{ -+ char *p; -+ int n; -+ struct path path; -+ -+ p = d_path(h_rootpath, buf, len); -+ if (IS_ERR(p)) -+ goto out; -+ n = strlen(p); -+ -+ path.mnt = h_rootpath->mnt; -+ path.dentry = h_parent; -+ p = d_path(&path, buf, len); -+ if (IS_ERR(p)) -+ goto out; -+ if (n != 1) -+ p += n; -+ -+ path.mnt = au_mnt_get(sb); -+ path.dentry = sb->s_root; -+ p = d_path(&path, buf, len - strlen(p)); -+ mntput(path.mnt); -+ if (IS_ERR(p)) -+ goto out; -+ if (n != 1) -+ p[strlen(p)] = '/'; -+ -+out: -+ AuTraceErrPtr(p); -+ return p; -+} -+ -+static -+struct dentry *decode_by_path(struct super_block *sb, ino_t ino, __u32 *fh, -+ int fh_len, struct au_nfsd_si_lock *nsi_lock) -+{ -+ struct dentry *dentry, *h_parent, *root; -+ struct super_block *h_sb; -+ char *pathname, *p; -+ struct vfsmount *h_mnt; -+ struct au_branch *br; -+ int err; -+ struct path path; -+ -+ br = au_sbr(sb, nsi_lock->bindex); -+ h_mnt = au_br_mnt(br); -+ h_sb = h_mnt->mnt_sb; -+ /* todo: call lower fh_to_dentry()? fh_to_parent()? */ -+ h_parent = exportfs_decode_fh(h_mnt, (void *)(fh + Fh_tail), -+ fh_len - Fh_tail, fh[Fh_h_type], -+ h_acceptable, /*context*/NULL); -+ dentry = h_parent; -+ if (unlikely(!h_parent || IS_ERR(h_parent))) { -+ AuWarn1("%s decode_fh failed, %ld\n", -+ au_sbtype(h_sb), PTR_ERR(h_parent)); -+ goto out; -+ } -+ dentry = NULL; -+ if (unlikely(au_test_anon(h_parent))) { -+ AuWarn1("%s decode_fh returned a disconnected dentry\n", -+ au_sbtype(h_sb)); -+ goto out_h_parent; -+ } -+ -+ dentry = ERR_PTR(-ENOMEM); -+ pathname = (void *)__get_free_page(GFP_NOFS); -+ if (unlikely(!pathname)) -+ goto out_h_parent; -+ -+ root = sb->s_root; -+ path.mnt = h_mnt; -+ di_read_lock_parent(root, !AuLock_IR); -+ path.dentry = au_h_dptr(root, nsi_lock->bindex); -+ di_read_unlock(root, !AuLock_IR); -+ p = au_build_path(h_parent, &path, pathname, PAGE_SIZE, sb); -+ dentry = (void *)p; -+ if (IS_ERR(p)) -+ goto out_pathname; -+ -+ si_read_unlock(sb); -+ err = vfsub_kern_path(p, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &path); -+ dentry = ERR_PTR(err); -+ if (unlikely(err)) -+ goto out_relock; -+ -+ dentry = ERR_PTR(-ENOENT); -+ AuDebugOn(au_test_anon(path.dentry)); -+ if (unlikely(!path.dentry->d_inode)) -+ goto out_path; -+ -+ if (ino != path.dentry->d_inode->i_ino) -+ dentry = au_lkup_by_ino(&path, ino, /*nsi_lock*/NULL); -+ else -+ dentry = dget(path.dentry); -+ -+out_path: -+ path_put(&path); -+out_relock: -+ if (unlikely(si_nfsd_read_lock(sb, nsi_lock) < 0)) -+ if (!IS_ERR(dentry)) { -+ dput(dentry); -+ dentry = ERR_PTR(-ESTALE); -+ } -+out_pathname: -+ free_page((unsigned long)pathname); -+out_h_parent: -+ dput(h_parent); -+out: -+ AuTraceErrPtr(dentry); -+ return dentry; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static struct dentry * -+aufs_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, -+ int fh_type) -+{ -+ struct dentry *dentry; -+ __u32 *fh = fid->raw; -+ struct au_branch *br; -+ ino_t ino, dir_ino; -+ struct au_nfsd_si_lock nsi_lock = { -+ .force_lock = 0 -+ }; -+ -+ dentry = ERR_PTR(-ESTALE); -+ /* it should never happen, but the file handle is unreliable */ -+ if (unlikely(fh_len < Fh_tail)) -+ goto out; -+ nsi_lock.sigen = fh[Fh_sigen]; -+ nsi_lock.br_id = fh[Fh_br_id]; -+ -+ /* branch id may be wrapped around */ -+ br = NULL; -+ if (unlikely(si_nfsd_read_lock(sb, &nsi_lock))) -+ goto out; -+ nsi_lock.force_lock = 1; -+ -+ /* is this inode still cached? */ -+ ino = decode_ino(fh + Fh_ino); -+ /* it should never happen */ -+ if (unlikely(ino == AUFS_ROOT_INO)) -+ goto out_unlock; -+ -+ dir_ino = decode_ino(fh + Fh_dir_ino); -+ dentry = decode_by_ino(sb, ino, dir_ino); -+ if (IS_ERR(dentry)) -+ goto out_unlock; -+ if (dentry) -+ goto accept; -+ -+ /* is the parent dir cached? */ -+ br = au_sbr(sb, nsi_lock.bindex); -+ atomic_inc(&br->br_count); -+ dentry = decode_by_dir_ino(sb, ino, dir_ino, &nsi_lock); -+ if (IS_ERR(dentry)) -+ goto out_unlock; -+ if (dentry) -+ goto accept; -+ -+ /* lookup path */ -+ dentry = decode_by_path(sb, ino, fh, fh_len, &nsi_lock); -+ if (IS_ERR(dentry)) -+ goto out_unlock; -+ if (unlikely(!dentry)) -+ /* todo?: make it ESTALE */ -+ goto out_unlock; -+ -+accept: -+ if (!au_digen_test(dentry, au_sigen(sb)) -+ && dentry->d_inode->i_generation == fh[Fh_igen]) -+ goto out_unlock; /* success */ -+ -+ dput(dentry); -+ dentry = ERR_PTR(-ESTALE); -+out_unlock: -+ if (br) -+ atomic_dec(&br->br_count); -+ si_read_unlock(sb); -+out: -+ AuTraceErrPtr(dentry); -+ return dentry; -+} -+ -+#if 0 /* reserved for future use */ -+/* support subtreecheck option */ -+static struct dentry *aufs_fh_to_parent(struct super_block *sb, struct fid *fid, -+ int fh_len, int fh_type) -+{ -+ struct dentry *parent; -+ __u32 *fh = fid->raw; -+ ino_t dir_ino; -+ -+ dir_ino = decode_ino(fh + Fh_dir_ino); -+ parent = decode_by_ino(sb, dir_ino, 0); -+ if (IS_ERR(parent)) -+ goto out; -+ if (!parent) -+ parent = decode_by_path(sb, au_br_index(sb, fh[Fh_br_id]), -+ dir_ino, fh, fh_len); -+ -+out: -+ AuTraceErrPtr(parent); -+ return parent; -+} -+#endif -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int aufs_encode_fh(struct inode *inode, __u32 *fh, int *max_len, -+ struct inode *dir) -+{ -+ int err; -+ aufs_bindex_t bindex; -+ struct super_block *sb, *h_sb; -+ struct dentry *dentry, *parent, *h_parent; -+ struct inode *h_dir; -+ struct au_branch *br; -+ -+ err = -ENOSPC; -+ if (unlikely(*max_len <= Fh_tail)) { -+ AuWarn1("NFSv2 client (max_len %d)?\n", *max_len); -+ goto out; -+ } -+ -+ err = FILEID_ROOT; -+ if (inode->i_ino == AUFS_ROOT_INO) { -+ AuDebugOn(inode->i_ino != AUFS_ROOT_INO); -+ goto out; -+ } -+ -+ h_parent = NULL; -+ sb = inode->i_sb; -+ err = si_read_lock(sb, AuLock_FLUSH); -+ if (unlikely(err)) -+ goto out; -+ -+#ifdef CONFIG_AUFS_DEBUG -+ if (unlikely(!au_opt_test(au_mntflags(sb), XINO))) -+ AuWarn1("NFS-exporting requires xino\n"); -+#endif -+ err = -EIO; -+ parent = NULL; -+ ii_read_lock_child(inode); -+ bindex = au_ibstart(inode); -+ if (!dir) { -+ dentry = d_find_any_alias(inode); -+ if (unlikely(!dentry)) -+ goto out_unlock; -+ AuDebugOn(au_test_anon(dentry)); -+ parent = dget_parent(dentry); -+ dput(dentry); -+ if (unlikely(!parent)) -+ goto out_unlock; -+ dir = parent->d_inode; -+ } -+ -+ ii_read_lock_parent(dir); -+ h_dir = au_h_iptr(dir, bindex); -+ ii_read_unlock(dir); -+ if (unlikely(!h_dir)) -+ goto out_parent; -+ h_parent = d_find_any_alias(h_dir); -+ if (unlikely(!h_parent)) -+ goto out_hparent; -+ -+ err = -EPERM; -+ br = au_sbr(sb, bindex); -+ h_sb = au_br_sb(br); -+ if (unlikely(!h_sb->s_export_op)) { -+ AuErr1("%s branch is not exportable\n", au_sbtype(h_sb)); -+ goto out_hparent; -+ } -+ -+ fh[Fh_br_id] = br->br_id; -+ fh[Fh_sigen] = au_sigen(sb); -+ encode_ino(fh + Fh_ino, inode->i_ino); -+ encode_ino(fh + Fh_dir_ino, dir->i_ino); -+ fh[Fh_igen] = inode->i_generation; -+ -+ *max_len -= Fh_tail; -+ fh[Fh_h_type] = exportfs_encode_fh(h_parent, (void *)(fh + Fh_tail), -+ max_len, -+ /*connectable or subtreecheck*/0); -+ err = fh[Fh_h_type]; -+ *max_len += Fh_tail; -+ /* todo: macros? */ -+ if (err != FILEID_INVALID) -+ err = 99; -+ else -+ AuWarn1("%s encode_fh failed\n", au_sbtype(h_sb)); -+ -+out_hparent: -+ dput(h_parent); -+out_parent: -+ dput(parent); -+out_unlock: -+ ii_read_unlock(inode); -+ si_read_unlock(sb); -+out: -+ if (unlikely(err < 0)) -+ err = FILEID_INVALID; -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int aufs_commit_metadata(struct inode *inode) -+{ -+ int err; -+ aufs_bindex_t bindex; -+ struct super_block *sb; -+ struct inode *h_inode; -+ int (*f)(struct inode *inode); -+ -+ sb = inode->i_sb; -+ si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW); -+ ii_write_lock_child(inode); -+ bindex = au_ibstart(inode); -+ AuDebugOn(bindex < 0); -+ h_inode = au_h_iptr(inode, bindex); -+ -+ f = h_inode->i_sb->s_export_op->commit_metadata; -+ if (f) -+ err = f(h_inode); -+ else { -+ struct writeback_control wbc = { -+ .sync_mode = WB_SYNC_ALL, -+ .nr_to_write = 0 /* metadata only */ -+ }; -+ -+ err = sync_inode(h_inode, &wbc); -+ } -+ -+ au_cpup_attr_timesizes(inode); -+ ii_write_unlock(inode); -+ si_read_unlock(sb); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static struct export_operations aufs_export_op = { -+ .fh_to_dentry = aufs_fh_to_dentry, -+ /* .fh_to_parent = aufs_fh_to_parent, */ -+ .encode_fh = aufs_encode_fh, -+ .commit_metadata = aufs_commit_metadata -+}; -+ -+void au_export_init(struct super_block *sb) -+{ -+ struct au_sbinfo *sbinfo; -+ __u32 u; -+ -+ sb->s_export_op = &aufs_export_op; -+ sbinfo = au_sbi(sb); -+ sbinfo->si_xigen = NULL; -+ get_random_bytes(&u, sizeof(u)); -+ BUILD_BUG_ON(sizeof(u) != sizeof(int)); -+ atomic_set(&sbinfo->si_xigen_next, u); -+} -diff --git a/fs/aufs/f_op.c b/fs/aufs/f_op.c -new file mode 100644 -index 0000000..b08981a ---- /dev/null -+++ b/fs/aufs/f_op.c -@@ -0,0 +1,781 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * file and vm operations -+ */ -+ -+#include -+#include -+#include -+#include -+#include "aufs.h" -+ -+int au_do_open_nondir(struct file *file, int flags, struct file *h_file) -+{ -+ int err; -+ aufs_bindex_t bindex; -+ struct dentry *dentry, *h_dentry; -+ struct au_finfo *finfo; -+ struct inode *h_inode; -+ -+ FiMustWriteLock(file); -+ -+ err = 0; -+ dentry = file->f_dentry; -+ AuDebugOn(IS_ERR_OR_NULL(dentry)); -+ finfo = au_fi(file); -+ memset(&finfo->fi_htop, 0, sizeof(finfo->fi_htop)); -+ atomic_set(&finfo->fi_mmapped, 0); -+ bindex = au_dbstart(dentry); -+ if (!h_file) { -+ h_dentry = au_h_dptr(dentry, bindex); -+ err = vfsub_test_mntns(file->f_path.mnt, h_dentry->d_sb); -+ if (unlikely(err)) -+ goto out; -+ h_file = au_h_open(dentry, bindex, flags, file, /*force_wr*/0); -+ } else { -+ h_dentry = h_file->f_dentry; -+ err = vfsub_test_mntns(file->f_path.mnt, h_dentry->d_sb); -+ if (unlikely(err)) -+ goto out; -+ get_file(h_file); -+ } -+ if (IS_ERR(h_file)) -+ err = PTR_ERR(h_file); -+ else { -+ if ((flags & __O_TMPFILE) -+ && !(flags & O_EXCL)) { -+ h_inode = file_inode(h_file); -+ spin_lock(&h_inode->i_lock); -+ h_inode->i_state |= I_LINKABLE; -+ spin_unlock(&h_inode->i_lock); -+ } -+ au_set_fbstart(file, bindex); -+ au_set_h_fptr(file, bindex, h_file); -+ au_update_figen(file); -+ /* todo: necessary? */ -+ /* file->f_ra = h_file->f_ra; */ -+ } -+ -+out: -+ return err; -+} -+ -+static int aufs_open_nondir(struct inode *inode __maybe_unused, -+ struct file *file) -+{ -+ int err; -+ struct super_block *sb; -+ struct au_do_open_args args = { -+ .open = au_do_open_nondir -+ }; -+ -+ AuDbg("%pD, f_flags 0x%x, f_mode 0x%x\n", -+ file, vfsub_file_flags(file), file->f_mode); -+ -+ sb = file->f_dentry->d_sb; -+ si_read_lock(sb, AuLock_FLUSH); -+ err = au_do_open(file, &args); -+ si_read_unlock(sb); -+ return err; -+} -+ -+int aufs_release_nondir(struct inode *inode __maybe_unused, struct file *file) -+{ -+ struct au_finfo *finfo; -+ aufs_bindex_t bindex; -+ -+ finfo = au_fi(file); -+ au_sphl_del(&finfo->fi_hlist, &au_sbi(file->f_dentry->d_sb)->si_files); -+ bindex = finfo->fi_btop; -+ if (bindex >= 0) -+ au_set_h_fptr(file, bindex, NULL); -+ -+ au_finfo_fin(file); -+ return 0; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int au_do_flush_nondir(struct file *file, fl_owner_t id) -+{ -+ int err; -+ struct file *h_file; -+ -+ err = 0; -+ h_file = au_hf_top(file); -+ if (h_file) -+ err = vfsub_flush(h_file, id); -+ return err; -+} -+ -+static int aufs_flush_nondir(struct file *file, fl_owner_t id) -+{ -+ return au_do_flush(file, id, au_do_flush_nondir); -+} -+ -+/* ---------------------------------------------------------------------- */ -+/* -+ * read and write functions acquire [fdi]_rwsem once, but release before -+ * mmap_sem. This is because to stop a race condition between mmap(2). -+ * Releasing these aufs-rwsem should be safe, no branch-mamagement (by keeping -+ * si_rwsem), no harmful copy-up should happen. Actually copy-up may happen in -+ * read functions after [fdi]_rwsem are released, but it should be harmless. -+ */ -+ -+/* Callers should call au_read_post() or fput() in the end */ -+struct file *au_read_pre(struct file *file, int keep_fi) -+{ -+ struct file *h_file; -+ int err; -+ -+ err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/0); -+ if (!err) { -+ di_read_unlock(file->f_dentry, AuLock_IR); -+ h_file = au_hf_top(file); -+ get_file(h_file); -+ if (!keep_fi) -+ fi_read_unlock(file); -+ } else -+ h_file = ERR_PTR(err); -+ -+ return h_file; -+} -+ -+static void au_read_post(struct inode *inode, struct file *h_file) -+{ -+ /* update without lock, I don't think it a problem */ -+ fsstack_copy_attr_atime(inode, file_inode(h_file)); -+ fput(h_file); -+} -+ -+struct au_write_pre { -+ blkcnt_t blks; -+ aufs_bindex_t bstart; -+}; -+ -+/* -+ * return with iinfo is write-locked -+ * callers should call au_write_post() or iinfo_write_unlock() + fput() in the -+ * end -+ */ -+static struct file *au_write_pre(struct file *file, int do_ready, -+ struct au_write_pre *wpre) -+{ -+ struct file *h_file; -+ struct dentry *dentry; -+ int err; -+ struct au_pin pin; -+ -+ err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/1); -+ h_file = ERR_PTR(err); -+ if (unlikely(err)) -+ goto out; -+ -+ dentry = file->f_dentry; -+ if (do_ready) { -+ err = au_ready_to_write(file, -1, &pin); -+ if (unlikely(err)) { -+ h_file = ERR_PTR(err); -+ di_write_unlock(dentry); -+ goto out_fi; -+ } -+ } -+ -+ di_downgrade_lock(dentry, /*flags*/0); -+ if (wpre) -+ wpre->bstart = au_fbstart(file); -+ h_file = au_hf_top(file); -+ get_file(h_file); -+ if (wpre) -+ wpre->blks = file_inode(h_file)->i_blocks; -+ if (do_ready) -+ au_unpin(&pin); -+ di_read_unlock(dentry, /*flags*/0); -+ -+out_fi: -+ fi_write_unlock(file); -+out: -+ return h_file; -+} -+ -+static void au_write_post(struct inode *inode, struct file *h_file, -+ struct au_write_pre *wpre, ssize_t written) -+{ -+ struct inode *h_inode; -+ -+ au_cpup_attr_timesizes(inode); -+ AuDebugOn(au_ibstart(inode) != wpre->bstart); -+ h_inode = file_inode(h_file); -+ inode->i_mode = h_inode->i_mode; -+ ii_write_unlock(inode); -+ fput(h_file); -+ -+ /* AuDbg("blks %llu, %llu\n", (u64)blks, (u64)h_inode->i_blocks); */ -+ if (written > 0) -+ au_fhsm_wrote(inode->i_sb, wpre->bstart, -+ /*force*/h_inode->i_blocks > wpre->blks); -+} -+ -+static ssize_t aufs_read(struct file *file, char __user *buf, size_t count, -+ loff_t *ppos) -+{ -+ ssize_t err; -+ struct inode *inode; -+ struct file *h_file; -+ struct super_block *sb; -+ -+ inode = file_inode(file); -+ sb = inode->i_sb; -+ si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW); -+ -+ h_file = au_read_pre(file, /*keep_fi*/0); -+ err = PTR_ERR(h_file); -+ if (IS_ERR(h_file)) -+ goto out; -+ -+ /* filedata may be obsoleted by concurrent copyup, but no problem */ -+ err = vfsub_read_u(h_file, buf, count, ppos); -+ /* todo: necessary? */ -+ /* file->f_ra = h_file->f_ra; */ -+ au_read_post(inode, h_file); -+ -+out: -+ si_read_unlock(sb); -+ return err; -+} -+ -+/* -+ * todo: very ugly -+ * it locks both of i_mutex and si_rwsem for read in safe. -+ * if the plink maintenance mode continues forever (that is the problem), -+ * may loop forever. -+ */ -+static void au_mtx_and_read_lock(struct inode *inode) -+{ -+ int err; -+ struct super_block *sb = inode->i_sb; -+ -+ while (1) { -+ mutex_lock(&inode->i_mutex); -+ err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM); -+ if (!err) -+ break; -+ mutex_unlock(&inode->i_mutex); -+ si_read_lock(sb, AuLock_NOPLMW); -+ si_read_unlock(sb); -+ } -+} -+ -+static ssize_t aufs_write(struct file *file, const char __user *ubuf, -+ size_t count, loff_t *ppos) -+{ -+ ssize_t err; -+ struct au_write_pre wpre; -+ struct inode *inode; -+ struct file *h_file; -+ char __user *buf = (char __user *)ubuf; -+ -+ inode = file_inode(file); -+ au_mtx_and_read_lock(inode); -+ -+ h_file = au_write_pre(file, /*do_ready*/1, &wpre); -+ err = PTR_ERR(h_file); -+ if (IS_ERR(h_file)) -+ goto out; -+ -+ err = vfsub_write_u(h_file, buf, count, ppos); -+ au_write_post(inode, h_file, &wpre, err); -+ -+out: -+ si_read_unlock(inode->i_sb); -+ mutex_unlock(&inode->i_mutex); -+ return err; -+} -+ -+static ssize_t au_do_iter(struct file *h_file, int rw, struct kiocb *kio, -+ struct iov_iter *iov_iter) -+{ -+ ssize_t err; -+ struct file *file; -+ ssize_t (*iter)(struct kiocb *, struct iov_iter *); -+ ssize_t (*aio)(struct kiocb *, const struct iovec *, unsigned long, -+ loff_t); -+ -+ err = security_file_permission(h_file, rw); -+ if (unlikely(err)) -+ goto out; -+ -+ err = -ENOSYS; -+ iter = NULL; -+ aio = NULL; -+ if (rw == MAY_READ) { -+ iter = h_file->f_op->read_iter; -+ aio = h_file->f_op->aio_read; -+ } else if (rw == MAY_WRITE) { -+ iter = h_file->f_op->write_iter; -+ aio = h_file->f_op->aio_write; -+ } -+ -+ file = kio->ki_filp; -+ kio->ki_filp = h_file; -+ if (iter) { -+ lockdep_off(); -+ err = iter(kio, iov_iter); -+ lockdep_on(); -+ } else if (aio) { -+ lockdep_off(); -+ err = aio(kio, iov_iter->iov, iov_iter->nr_segs, kio->ki_pos); -+ lockdep_on(); -+ } else -+ /* currently there is no such fs */ -+ WARN_ON_ONCE(1); -+ kio->ki_filp = file; -+ -+out: -+ return err; -+} -+ -+static ssize_t aufs_read_iter(struct kiocb *kio, struct iov_iter *iov_iter) -+{ -+ ssize_t err; -+ struct file *file, *h_file; -+ struct inode *inode; -+ struct super_block *sb; -+ -+ file = kio->ki_filp; -+ inode = file_inode(file); -+ sb = inode->i_sb; -+ si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW); -+ -+ h_file = au_read_pre(file, /*keep_fi*/0); -+ err = PTR_ERR(h_file); -+ if (IS_ERR(h_file)) -+ goto out; -+ -+ err = au_do_iter(h_file, MAY_READ, kio, iov_iter); -+ /* todo: necessary? */ -+ /* file->f_ra = h_file->f_ra; */ -+ au_read_post(inode, h_file); -+ -+out: -+ si_read_unlock(sb); -+ return err; -+} -+ -+static ssize_t aufs_write_iter(struct kiocb *kio, struct iov_iter *iov_iter) -+{ -+ ssize_t err; -+ struct au_write_pre wpre; -+ struct inode *inode; -+ struct file *file, *h_file; -+ -+ file = kio->ki_filp; -+ inode = file_inode(file); -+ au_mtx_and_read_lock(inode); -+ -+ h_file = au_write_pre(file, /*do_ready*/1, &wpre); -+ err = PTR_ERR(h_file); -+ if (IS_ERR(h_file)) -+ goto out; -+ -+ err = au_do_iter(h_file, MAY_WRITE, kio, iov_iter); -+ au_write_post(inode, h_file, &wpre, err); -+ -+out: -+ si_read_unlock(inode->i_sb); -+ mutex_unlock(&inode->i_mutex); -+ return err; -+} -+ -+static ssize_t aufs_splice_read(struct file *file, loff_t *ppos, -+ struct pipe_inode_info *pipe, size_t len, -+ unsigned int flags) -+{ -+ ssize_t err; -+ struct file *h_file; -+ struct inode *inode; -+ struct super_block *sb; -+ -+ inode = file_inode(file); -+ sb = inode->i_sb; -+ si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW); -+ -+ h_file = au_read_pre(file, /*keep_fi*/1); -+ err = PTR_ERR(h_file); -+ if (IS_ERR(h_file)) -+ goto out; -+ -+ if (au_test_loopback_kthread()) { -+ au_warn_loopback(h_file->f_dentry->d_sb); -+ if (file->f_mapping != h_file->f_mapping) { -+ file->f_mapping = h_file->f_mapping; -+ smp_mb(); /* unnecessary? */ -+ } -+ } -+ fi_read_unlock(file); -+ -+ err = vfsub_splice_to(h_file, ppos, pipe, len, flags); -+ /* todo: necessasry? */ -+ /* file->f_ra = h_file->f_ra; */ -+ au_read_post(inode, h_file); -+ -+out: -+ si_read_unlock(sb); -+ return err; -+} -+ -+static ssize_t -+aufs_splice_write(struct pipe_inode_info *pipe, struct file *file, loff_t *ppos, -+ size_t len, unsigned int flags) -+{ -+ ssize_t err; -+ struct au_write_pre wpre; -+ struct inode *inode; -+ struct file *h_file; -+ -+ inode = file_inode(file); -+ au_mtx_and_read_lock(inode); -+ -+ h_file = au_write_pre(file, /*do_ready*/1, &wpre); -+ err = PTR_ERR(h_file); -+ if (IS_ERR(h_file)) -+ goto out; -+ -+ err = vfsub_splice_from(pipe, h_file, ppos, len, flags); -+ au_write_post(inode, h_file, &wpre, err); -+ -+out: -+ si_read_unlock(inode->i_sb); -+ mutex_unlock(&inode->i_mutex); -+ return err; -+} -+ -+static long aufs_fallocate(struct file *file, int mode, loff_t offset, -+ loff_t len) -+{ -+ long err; -+ struct au_write_pre wpre; -+ struct inode *inode; -+ struct file *h_file; -+ -+ inode = file_inode(file); -+ au_mtx_and_read_lock(inode); -+ -+ h_file = au_write_pre(file, /*do_ready*/1, &wpre); -+ err = PTR_ERR(h_file); -+ if (IS_ERR(h_file)) -+ goto out; -+ -+ lockdep_off(); -+ err = do_fallocate(h_file, mode, offset, len); -+ lockdep_on(); -+ au_write_post(inode, h_file, &wpre, /*written*/1); -+ -+out: -+ si_read_unlock(inode->i_sb); -+ mutex_unlock(&inode->i_mutex); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * The locking order around current->mmap_sem. -+ * - in most and regular cases -+ * file I/O syscall -- aufs_read() or something -+ * -- si_rwsem for read -- mmap_sem -+ * (Note that [fdi]i_rwsem are released before mmap_sem). -+ * - in mmap case -+ * mmap(2) -- mmap_sem -- aufs_mmap() -- si_rwsem for read -- [fdi]i_rwsem -+ * This AB-BA order is definitly bad, but is not a problem since "si_rwsem for -+ * read" allows muliple processes to acquire it and [fdi]i_rwsem are not held in -+ * file I/O. Aufs needs to stop lockdep in aufs_mmap() though. -+ * It means that when aufs acquires si_rwsem for write, the process should never -+ * acquire mmap_sem. -+ * -+ * Actually aufs_iterate() holds [fdi]i_rwsem before mmap_sem, but this is not a -+ * problem either since any directory is not able to be mmap-ed. -+ * The similar scenario is applied to aufs_readlink() too. -+ */ -+ -+#if 0 /* stop calling security_file_mmap() */ -+/* cf. linux/include/linux/mman.h: calc_vm_prot_bits() */ -+#define AuConv_VM_PROT(f, b) _calc_vm_trans(f, VM_##b, PROT_##b) -+ -+static unsigned long au_arch_prot_conv(unsigned long flags) -+{ -+ /* currently ppc64 only */ -+#ifdef CONFIG_PPC64 -+ /* cf. linux/arch/powerpc/include/asm/mman.h */ -+ AuDebugOn(arch_calc_vm_prot_bits(-1) != VM_SAO); -+ return AuConv_VM_PROT(flags, SAO); -+#else -+ AuDebugOn(arch_calc_vm_prot_bits(-1)); -+ return 0; -+#endif -+} -+ -+static unsigned long au_prot_conv(unsigned long flags) -+{ -+ return AuConv_VM_PROT(flags, READ) -+ | AuConv_VM_PROT(flags, WRITE) -+ | AuConv_VM_PROT(flags, EXEC) -+ | au_arch_prot_conv(flags); -+} -+ -+/* cf. linux/include/linux/mman.h: calc_vm_flag_bits() */ -+#define AuConv_VM_MAP(f, b) _calc_vm_trans(f, VM_##b, MAP_##b) -+ -+static unsigned long au_flag_conv(unsigned long flags) -+{ -+ return AuConv_VM_MAP(flags, GROWSDOWN) -+ | AuConv_VM_MAP(flags, DENYWRITE) -+ | AuConv_VM_MAP(flags, LOCKED); -+} -+#endif -+ -+static int aufs_mmap(struct file *file, struct vm_area_struct *vma) -+{ -+ int err; -+ const unsigned char wlock -+ = (file->f_mode & FMODE_WRITE) && (vma->vm_flags & VM_SHARED); -+ struct super_block *sb; -+ struct file *h_file; -+ struct inode *inode; -+ -+ AuDbgVmRegion(file, vma); -+ -+ inode = file_inode(file); -+ sb = inode->i_sb; -+ lockdep_off(); -+ si_read_lock(sb, AuLock_NOPLMW); -+ -+ h_file = au_write_pre(file, wlock, /*wpre*/NULL); -+ lockdep_on(); -+ err = PTR_ERR(h_file); -+ if (IS_ERR(h_file)) -+ goto out; -+ -+ err = 0; -+ au_set_mmapped(file); -+ au_vm_file_reset(vma, h_file); -+ /* -+ * we cannot call security_mmap_file() here since it may acquire -+ * mmap_sem or i_mutex. -+ * -+ * err = security_mmap_file(h_file, au_prot_conv(vma->vm_flags), -+ * au_flag_conv(vma->vm_flags)); -+ */ -+ if (!err) -+ err = h_file->f_op->mmap(h_file, vma); -+ if (!err) { -+ au_vm_prfile_set(vma, file); -+ fsstack_copy_attr_atime(inode, file_inode(h_file)); -+ goto out_fput; /* success */ -+ } -+ au_unset_mmapped(file); -+ au_vm_file_reset(vma, file); -+ -+out_fput: -+ lockdep_off(); -+ ii_write_unlock(inode); -+ lockdep_on(); -+ fput(h_file); -+out: -+ lockdep_off(); -+ si_read_unlock(sb); -+ lockdep_on(); -+ AuTraceErr(err); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int aufs_fsync_nondir(struct file *file, loff_t start, loff_t end, -+ int datasync) -+{ -+ int err; -+ struct au_write_pre wpre; -+ struct inode *inode; -+ struct file *h_file; -+ -+ err = 0; /* -EBADF; */ /* posix? */ -+ if (unlikely(!(file->f_mode & FMODE_WRITE))) -+ goto out; -+ -+ inode = file_inode(file); -+ au_mtx_and_read_lock(inode); -+ -+ h_file = au_write_pre(file, /*do_ready*/1, &wpre); -+ err = PTR_ERR(h_file); -+ if (IS_ERR(h_file)) -+ goto out_unlock; -+ -+ err = vfsub_fsync(h_file, &h_file->f_path, datasync); -+ au_write_post(inode, h_file, &wpre, /*written*/0); -+ -+out_unlock: -+ si_read_unlock(inode->i_sb); -+ mutex_unlock(&inode->i_mutex); -+out: -+ return err; -+} -+ -+/* no one supports this operation, currently */ -+#if 0 -+static int aufs_aio_fsync_nondir(struct kiocb *kio, int datasync) -+{ -+ int err; -+ struct au_write_pre wpre; -+ struct inode *inode; -+ struct file *file, *h_file; -+ -+ err = 0; /* -EBADF; */ /* posix? */ -+ if (unlikely(!(file->f_mode & FMODE_WRITE))) -+ goto out; -+ -+ file = kio->ki_filp; -+ inode = file_inode(file); -+ au_mtx_and_read_lock(inode); -+ -+ h_file = au_write_pre(file, /*do_ready*/1, &wpre); -+ err = PTR_ERR(h_file); -+ if (IS_ERR(h_file)) -+ goto out_unlock; -+ -+ err = -ENOSYS; -+ h_file = au_hf_top(file); -+ if (h_file->f_op->aio_fsync) { -+ struct mutex *h_mtx; -+ -+ h_mtx = &file_inode(h_file)->i_mutex; -+ if (!is_sync_kiocb(kio)) { -+ get_file(h_file); -+ fput(file); -+ } -+ kio->ki_filp = h_file; -+ err = h_file->f_op->aio_fsync(kio, datasync); -+ mutex_lock_nested(h_mtx, AuLsc_I_CHILD); -+ if (!err) -+ vfsub_update_h_iattr(&h_file->f_path, /*did*/NULL); -+ /*ignore*/ -+ mutex_unlock(h_mtx); -+ } -+ au_write_post(inode, h_file, &wpre, /*written*/0); -+ -+out_unlock: -+ si_read_unlock(inode->sb); -+ mutex_unlock(&inode->i_mutex); -+out: -+ return err; -+} -+#endif -+ -+static int aufs_fasync(int fd, struct file *file, int flag) -+{ -+ int err; -+ struct file *h_file; -+ struct super_block *sb; -+ -+ sb = file->f_dentry->d_sb; -+ si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW); -+ -+ h_file = au_read_pre(file, /*keep_fi*/0); -+ err = PTR_ERR(h_file); -+ if (IS_ERR(h_file)) -+ goto out; -+ -+ if (h_file->f_op->fasync) -+ err = h_file->f_op->fasync(fd, h_file, flag); -+ fput(h_file); /* instead of au_read_post() */ -+ -+out: -+ si_read_unlock(sb); -+ return err; -+} -+ -+static int aufs_setfl(struct file *file, unsigned long arg) -+{ -+ int err; -+ struct file *h_file; -+ struct super_block *sb; -+ -+ sb = file->f_dentry->d_sb; -+ si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW); -+ -+ h_file = au_read_pre(file, /*keep_fi*/0); -+ err = PTR_ERR(h_file); -+ if (IS_ERR(h_file)) -+ goto out; -+ -+ arg |= vfsub_file_flags(file) & FASYNC; /* stop calling h_file->fasync */ -+ err = setfl(/*unused fd*/-1, h_file, arg); -+ fput(h_file); /* instead of au_read_post() */ -+ -+out: -+ si_read_unlock(sb); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* no one supports this operation, currently */ -+#if 0 -+static ssize_t aufs_sendpage(struct file *file, struct page *page, int offset, -+ size_t len, loff_t *pos, int more) -+{ -+} -+#endif -+ -+/* ---------------------------------------------------------------------- */ -+ -+const struct file_operations aufs_file_fop = { -+ .owner = THIS_MODULE, -+ -+ .llseek = default_llseek, -+ -+ .read = aufs_read, -+ .write = aufs_write, -+ .read_iter = aufs_read_iter, -+ .write_iter = aufs_write_iter, -+ -+#ifdef CONFIG_AUFS_POLL -+ .poll = aufs_poll, -+#endif -+ .unlocked_ioctl = aufs_ioctl_nondir, -+#ifdef CONFIG_COMPAT -+ .compat_ioctl = aufs_compat_ioctl_nondir, -+#endif -+ .mmap = aufs_mmap, -+ .open = aufs_open_nondir, -+ .flush = aufs_flush_nondir, -+ .release = aufs_release_nondir, -+ .fsync = aufs_fsync_nondir, -+ /* .aio_fsync = aufs_aio_fsync_nondir, */ -+ .fasync = aufs_fasync, -+ /* .sendpage = aufs_sendpage, */ -+ .setfl = aufs_setfl, -+ .splice_write = aufs_splice_write, -+ .splice_read = aufs_splice_read, -+#if 0 -+ .aio_splice_write = aufs_aio_splice_write, -+ .aio_splice_read = aufs_aio_splice_read, -+#endif -+ .fallocate = aufs_fallocate -+}; -diff --git a/fs/aufs/fhsm.c b/fs/aufs/fhsm.c -new file mode 100644 -index 0000000..5b3ad74 ---- /dev/null -+++ b/fs/aufs/fhsm.c -@@ -0,0 +1,426 @@ -+/* -+ * Copyright (C) 2011-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ */ -+ -+/* -+ * File-based Hierarchy Storage Management -+ */ -+ -+#include -+#include -+#include -+#include -+#include "aufs.h" -+ -+static aufs_bindex_t au_fhsm_bottom(struct super_block *sb) -+{ -+ struct au_sbinfo *sbinfo; -+ struct au_fhsm *fhsm; -+ -+ SiMustAnyLock(sb); -+ -+ sbinfo = au_sbi(sb); -+ fhsm = &sbinfo->si_fhsm; -+ AuDebugOn(!fhsm); -+ return fhsm->fhsm_bottom; -+} -+ -+void au_fhsm_set_bottom(struct super_block *sb, aufs_bindex_t bindex) -+{ -+ struct au_sbinfo *sbinfo; -+ struct au_fhsm *fhsm; -+ -+ SiMustWriteLock(sb); -+ -+ sbinfo = au_sbi(sb); -+ fhsm = &sbinfo->si_fhsm; -+ AuDebugOn(!fhsm); -+ fhsm->fhsm_bottom = bindex; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int au_fhsm_test_jiffy(struct au_sbinfo *sbinfo, struct au_branch *br) -+{ -+ struct au_br_fhsm *bf; -+ -+ bf = br->br_fhsm; -+ MtxMustLock(&bf->bf_lock); -+ -+ return !bf->bf_readable -+ || time_after(jiffies, -+ bf->bf_jiffy + sbinfo->si_fhsm.fhsm_expire); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static void au_fhsm_notify(struct super_block *sb, int val) -+{ -+ struct au_sbinfo *sbinfo; -+ struct au_fhsm *fhsm; -+ -+ SiMustAnyLock(sb); -+ -+ sbinfo = au_sbi(sb); -+ fhsm = &sbinfo->si_fhsm; -+ if (au_fhsm_pid(fhsm) -+ && atomic_read(&fhsm->fhsm_readable) != -1) { -+ atomic_set(&fhsm->fhsm_readable, val); -+ if (val) -+ wake_up(&fhsm->fhsm_wqh); -+ } -+} -+ -+static int au_fhsm_stfs(struct super_block *sb, aufs_bindex_t bindex, -+ struct aufs_stfs *rstfs, int do_lock, int do_notify) -+{ -+ int err; -+ struct au_branch *br; -+ struct au_br_fhsm *bf; -+ -+ br = au_sbr(sb, bindex); -+ AuDebugOn(au_br_rdonly(br)); -+ bf = br->br_fhsm; -+ AuDebugOn(!bf); -+ -+ if (do_lock) -+ mutex_lock(&bf->bf_lock); -+ else -+ MtxMustLock(&bf->bf_lock); -+ -+ /* sb->s_root for NFS is unreliable */ -+ err = au_br_stfs(br, &bf->bf_stfs); -+ if (unlikely(err)) { -+ AuErr1("FHSM failed (%d), b%d, ignored.\n", bindex, err); -+ goto out; -+ } -+ -+ bf->bf_jiffy = jiffies; -+ bf->bf_readable = 1; -+ if (do_notify) -+ au_fhsm_notify(sb, /*val*/1); -+ if (rstfs) -+ *rstfs = bf->bf_stfs; -+ -+out: -+ if (do_lock) -+ mutex_unlock(&bf->bf_lock); -+ au_fhsm_notify(sb, /*val*/1); -+ -+ return err; -+} -+ -+void au_fhsm_wrote(struct super_block *sb, aufs_bindex_t bindex, int force) -+{ -+ int err; -+ struct au_sbinfo *sbinfo; -+ struct au_fhsm *fhsm; -+ struct au_branch *br; -+ struct au_br_fhsm *bf; -+ -+ AuDbg("b%d, force %d\n", bindex, force); -+ SiMustAnyLock(sb); -+ -+ sbinfo = au_sbi(sb); -+ fhsm = &sbinfo->si_fhsm; -+ if (!au_ftest_si(sbinfo, FHSM) -+ || fhsm->fhsm_bottom == bindex) -+ return; -+ -+ br = au_sbr(sb, bindex); -+ bf = br->br_fhsm; -+ AuDebugOn(!bf); -+ mutex_lock(&bf->bf_lock); -+ if (force -+ || au_fhsm_pid(fhsm) -+ || au_fhsm_test_jiffy(sbinfo, br)) -+ err = au_fhsm_stfs(sb, bindex, /*rstfs*/NULL, /*do_lock*/0, -+ /*do_notify*/1); -+ mutex_unlock(&bf->bf_lock); -+} -+ -+void au_fhsm_wrote_all(struct super_block *sb, int force) -+{ -+ aufs_bindex_t bindex, bend; -+ struct au_branch *br; -+ -+ /* exclude the bottom */ -+ bend = au_fhsm_bottom(sb); -+ for (bindex = 0; bindex < bend; bindex++) { -+ br = au_sbr(sb, bindex); -+ if (au_br_fhsm(br->br_perm)) -+ au_fhsm_wrote(sb, bindex, force); -+ } -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static unsigned int au_fhsm_poll(struct file *file, -+ struct poll_table_struct *wait) -+{ -+ unsigned int mask; -+ struct au_sbinfo *sbinfo; -+ struct au_fhsm *fhsm; -+ -+ mask = 0; -+ sbinfo = file->private_data; -+ fhsm = &sbinfo->si_fhsm; -+ poll_wait(file, &fhsm->fhsm_wqh, wait); -+ if (atomic_read(&fhsm->fhsm_readable)) -+ mask = POLLIN /* | POLLRDNORM */; -+ -+ AuTraceErr((int)mask); -+ return mask; -+} -+ -+static int au_fhsm_do_read_one(struct aufs_stbr __user *stbr, -+ struct aufs_stfs *stfs, __s16 brid) -+{ -+ int err; -+ -+ err = copy_to_user(&stbr->stfs, stfs, sizeof(*stfs)); -+ if (!err) -+ err = __put_user(brid, &stbr->brid); -+ if (unlikely(err)) -+ err = -EFAULT; -+ -+ return err; -+} -+ -+static ssize_t au_fhsm_do_read(struct super_block *sb, -+ struct aufs_stbr __user *stbr, size_t count) -+{ -+ ssize_t err; -+ int nstbr; -+ aufs_bindex_t bindex, bend; -+ struct au_branch *br; -+ struct au_br_fhsm *bf; -+ -+ /* except the bottom branch */ -+ err = 0; -+ nstbr = 0; -+ bend = au_fhsm_bottom(sb); -+ for (bindex = 0; !err && bindex < bend; bindex++) { -+ br = au_sbr(sb, bindex); -+ if (!au_br_fhsm(br->br_perm)) -+ continue; -+ -+ bf = br->br_fhsm; -+ mutex_lock(&bf->bf_lock); -+ if (bf->bf_readable) { -+ err = -EFAULT; -+ if (count >= sizeof(*stbr)) -+ err = au_fhsm_do_read_one(stbr++, &bf->bf_stfs, -+ br->br_id); -+ if (!err) { -+ bf->bf_readable = 0; -+ count -= sizeof(*stbr); -+ nstbr++; -+ } -+ } -+ mutex_unlock(&bf->bf_lock); -+ } -+ if (!err) -+ err = sizeof(*stbr) * nstbr; -+ -+ return err; -+} -+ -+static ssize_t au_fhsm_read(struct file *file, char __user *buf, size_t count, -+ loff_t *pos) -+{ -+ ssize_t err; -+ int readable; -+ aufs_bindex_t nfhsm, bindex, bend; -+ struct au_sbinfo *sbinfo; -+ struct au_fhsm *fhsm; -+ struct au_branch *br; -+ struct super_block *sb; -+ -+ err = 0; -+ sbinfo = file->private_data; -+ fhsm = &sbinfo->si_fhsm; -+need_data: -+ spin_lock_irq(&fhsm->fhsm_wqh.lock); -+ if (!atomic_read(&fhsm->fhsm_readable)) { -+ if (vfsub_file_flags(file) & O_NONBLOCK) -+ err = -EAGAIN; -+ else -+ err = wait_event_interruptible_locked_irq -+ (fhsm->fhsm_wqh, -+ atomic_read(&fhsm->fhsm_readable)); -+ } -+ spin_unlock_irq(&fhsm->fhsm_wqh.lock); -+ if (unlikely(err)) -+ goto out; -+ -+ /* sb may already be dead */ -+ au_rw_read_lock(&sbinfo->si_rwsem); -+ readable = atomic_read(&fhsm->fhsm_readable); -+ if (readable > 0) { -+ sb = sbinfo->si_sb; -+ AuDebugOn(!sb); -+ /* exclude the bottom branch */ -+ nfhsm = 0; -+ bend = au_fhsm_bottom(sb); -+ for (bindex = 0; bindex < bend; bindex++) { -+ br = au_sbr(sb, bindex); -+ if (au_br_fhsm(br->br_perm)) -+ nfhsm++; -+ } -+ err = -EMSGSIZE; -+ if (nfhsm * sizeof(struct aufs_stbr) <= count) { -+ atomic_set(&fhsm->fhsm_readable, 0); -+ err = au_fhsm_do_read(sbinfo->si_sb, (void __user *)buf, -+ count); -+ } -+ } -+ au_rw_read_unlock(&sbinfo->si_rwsem); -+ if (!readable) -+ goto need_data; -+ -+out: -+ return err; -+} -+ -+static int au_fhsm_release(struct inode *inode, struct file *file) -+{ -+ struct au_sbinfo *sbinfo; -+ struct au_fhsm *fhsm; -+ -+ /* sb may already be dead */ -+ sbinfo = file->private_data; -+ fhsm = &sbinfo->si_fhsm; -+ spin_lock(&fhsm->fhsm_spin); -+ fhsm->fhsm_pid = 0; -+ spin_unlock(&fhsm->fhsm_spin); -+ kobject_put(&sbinfo->si_kobj); -+ -+ return 0; -+} -+ -+static const struct file_operations au_fhsm_fops = { -+ .owner = THIS_MODULE, -+ .llseek = noop_llseek, -+ .read = au_fhsm_read, -+ .poll = au_fhsm_poll, -+ .release = au_fhsm_release -+}; -+ -+int au_fhsm_fd(struct super_block *sb, int oflags) -+{ -+ int err, fd; -+ struct au_sbinfo *sbinfo; -+ struct au_fhsm *fhsm; -+ -+ err = -EPERM; -+ if (unlikely(!capable(CAP_SYS_ADMIN))) -+ goto out; -+ -+ err = -EINVAL; -+ if (unlikely(oflags & ~(O_CLOEXEC | O_NONBLOCK))) -+ goto out; -+ -+ err = 0; -+ sbinfo = au_sbi(sb); -+ fhsm = &sbinfo->si_fhsm; -+ spin_lock(&fhsm->fhsm_spin); -+ if (!fhsm->fhsm_pid) -+ fhsm->fhsm_pid = current->pid; -+ else -+ err = -EBUSY; -+ spin_unlock(&fhsm->fhsm_spin); -+ if (unlikely(err)) -+ goto out; -+ -+ oflags |= O_RDONLY; -+ /* oflags |= FMODE_NONOTIFY; */ -+ fd = anon_inode_getfd("[aufs_fhsm]", &au_fhsm_fops, sbinfo, oflags); -+ err = fd; -+ if (unlikely(fd < 0)) -+ goto out_pid; -+ -+ /* succeed reglardless 'fhsm' status */ -+ kobject_get(&sbinfo->si_kobj); -+ si_noflush_read_lock(sb); -+ if (au_ftest_si(sbinfo, FHSM)) -+ au_fhsm_wrote_all(sb, /*force*/0); -+ si_read_unlock(sb); -+ goto out; /* success */ -+ -+out_pid: -+ spin_lock(&fhsm->fhsm_spin); -+ fhsm->fhsm_pid = 0; -+ spin_unlock(&fhsm->fhsm_spin); -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+int au_fhsm_br_alloc(struct au_branch *br) -+{ -+ int err; -+ -+ err = 0; -+ br->br_fhsm = kmalloc(sizeof(*br->br_fhsm), GFP_NOFS); -+ if (br->br_fhsm) -+ au_br_fhsm_init(br->br_fhsm); -+ else -+ err = -ENOMEM; -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+void au_fhsm_fin(struct super_block *sb) -+{ -+ au_fhsm_notify(sb, /*val*/-1); -+} -+ -+void au_fhsm_init(struct au_sbinfo *sbinfo) -+{ -+ struct au_fhsm *fhsm; -+ -+ fhsm = &sbinfo->si_fhsm; -+ spin_lock_init(&fhsm->fhsm_spin); -+ init_waitqueue_head(&fhsm->fhsm_wqh); -+ atomic_set(&fhsm->fhsm_readable, 0); -+ fhsm->fhsm_expire -+ = msecs_to_jiffies(AUFS_FHSM_CACHE_DEF_SEC * MSEC_PER_SEC); -+ fhsm->fhsm_bottom = -1; -+} -+ -+void au_fhsm_set(struct au_sbinfo *sbinfo, unsigned int sec) -+{ -+ sbinfo->si_fhsm.fhsm_expire -+ = msecs_to_jiffies(sec * MSEC_PER_SEC); -+} -+ -+void au_fhsm_show(struct seq_file *seq, struct au_sbinfo *sbinfo) -+{ -+ unsigned int u; -+ -+ if (!au_ftest_si(sbinfo, FHSM)) -+ return; -+ -+ u = jiffies_to_msecs(sbinfo->si_fhsm.fhsm_expire) / MSEC_PER_SEC; -+ if (u != AUFS_FHSM_CACHE_DEF_SEC) -+ seq_printf(seq, ",fhsm_sec=%u", u); -+} -diff --git a/fs/aufs/file.c b/fs/aufs/file.c -new file mode 100644 -index 0000000..12c7620 ---- /dev/null -+++ b/fs/aufs/file.c -@@ -0,0 +1,857 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * handling file/dir, and address_space operation -+ */ -+ -+#ifdef CONFIG_AUFS_DEBUG -+#include -+#endif -+#include -+#include "aufs.h" -+ -+/* drop flags for writing */ -+unsigned int au_file_roflags(unsigned int flags) -+{ -+ flags &= ~(O_WRONLY | O_RDWR | O_APPEND | O_CREAT | O_TRUNC); -+ flags |= O_RDONLY | O_NOATIME; -+ return flags; -+} -+ -+/* common functions to regular file and dir */ -+struct file *au_h_open(struct dentry *dentry, aufs_bindex_t bindex, int flags, -+ struct file *file, int force_wr) -+{ -+ struct file *h_file; -+ struct dentry *h_dentry; -+ struct inode *h_inode; -+ struct super_block *sb; -+ struct au_branch *br; -+ struct path h_path; -+ int err; -+ -+ /* a race condition can happen between open and unlink/rmdir */ -+ h_file = ERR_PTR(-ENOENT); -+ h_dentry = au_h_dptr(dentry, bindex); -+ if (au_test_nfsd() && !h_dentry) -+ goto out; -+ h_inode = h_dentry->d_inode; -+ if (au_test_nfsd() && !h_inode) -+ goto out; -+ spin_lock(&h_dentry->d_lock); -+ err = (!d_unhashed(dentry) && d_unlinked(h_dentry)) -+ || !h_inode -+ /* || !dentry->d_inode->i_nlink */ -+ ; -+ spin_unlock(&h_dentry->d_lock); -+ if (unlikely(err)) -+ goto out; -+ -+ sb = dentry->d_sb; -+ br = au_sbr(sb, bindex); -+ err = au_br_test_oflag(flags, br); -+ h_file = ERR_PTR(err); -+ if (unlikely(err)) -+ goto out; -+ -+ /* drop flags for writing */ -+ if (au_test_ro(sb, bindex, dentry->d_inode)) { -+ if (force_wr && !(flags & O_WRONLY)) -+ force_wr = 0; -+ flags = au_file_roflags(flags); -+ if (force_wr) { -+ h_file = ERR_PTR(-EROFS); -+ flags = au_file_roflags(flags); -+ if (unlikely(vfsub_native_ro(h_inode) -+ || IS_APPEND(h_inode))) -+ goto out; -+ flags &= ~O_ACCMODE; -+ flags |= O_WRONLY; -+ } -+ } -+ flags &= ~O_CREAT; -+ atomic_inc(&br->br_count); -+ h_path.dentry = h_dentry; -+ h_path.mnt = au_br_mnt(br); -+ h_file = vfsub_dentry_open(&h_path, flags); -+ if (IS_ERR(h_file)) -+ goto out_br; -+ -+ if (flags & __FMODE_EXEC) { -+ err = deny_write_access(h_file); -+ if (unlikely(err)) { -+ fput(h_file); -+ h_file = ERR_PTR(err); -+ goto out_br; -+ } -+ } -+ fsnotify_open(h_file); -+ goto out; /* success */ -+ -+out_br: -+ atomic_dec(&br->br_count); -+out: -+ return h_file; -+} -+ -+static int au_cmoo(struct dentry *dentry) -+{ -+ int err, cmoo; -+ unsigned int udba; -+ struct path h_path; -+ struct au_pin pin; -+ struct au_cp_generic cpg = { -+ .dentry = dentry, -+ .bdst = -1, -+ .bsrc = -1, -+ .len = -1, -+ .pin = &pin, -+ .flags = AuCpup_DTIME | AuCpup_HOPEN -+ }; -+ struct inode *inode, *delegated; -+ struct super_block *sb; -+ struct au_sbinfo *sbinfo; -+ struct au_fhsm *fhsm; -+ pid_t pid; -+ struct au_branch *br; -+ struct dentry *parent; -+ struct au_hinode *hdir; -+ -+ DiMustWriteLock(dentry); -+ inode = dentry->d_inode; -+ IiMustWriteLock(inode); -+ -+ err = 0; -+ if (IS_ROOT(dentry)) -+ goto out; -+ cpg.bsrc = au_dbstart(dentry); -+ if (!cpg.bsrc) -+ goto out; -+ -+ sb = dentry->d_sb; -+ sbinfo = au_sbi(sb); -+ fhsm = &sbinfo->si_fhsm; -+ pid = au_fhsm_pid(fhsm); -+ if (pid -+ && (current->pid == pid -+ || current->real_parent->pid == pid)) -+ goto out; -+ -+ br = au_sbr(sb, cpg.bsrc); -+ cmoo = au_br_cmoo(br->br_perm); -+ if (!cmoo) -+ goto out; -+ if (!S_ISREG(inode->i_mode)) -+ cmoo &= AuBrAttr_COO_ALL; -+ if (!cmoo) -+ goto out; -+ -+ parent = dget_parent(dentry); -+ di_write_lock_parent(parent); -+ err = au_wbr_do_copyup_bu(dentry, cpg.bsrc - 1); -+ cpg.bdst = err; -+ if (unlikely(err < 0)) { -+ err = 0; /* there is no upper writable branch */ -+ goto out_dgrade; -+ } -+ AuDbg("bsrc %d, bdst %d\n", cpg.bsrc, cpg.bdst); -+ -+ /* do not respect the coo attrib for the target branch */ -+ err = au_cpup_dirs(dentry, cpg.bdst); -+ if (unlikely(err)) -+ goto out_dgrade; -+ -+ di_downgrade_lock(parent, AuLock_IR); -+ udba = au_opt_udba(sb); -+ err = au_pin(&pin, dentry, cpg.bdst, udba, -+ AuPin_DI_LOCKED | AuPin_MNT_WRITE); -+ if (unlikely(err)) -+ goto out_parent; -+ -+ err = au_sio_cpup_simple(&cpg); -+ au_unpin(&pin); -+ if (unlikely(err)) -+ goto out_parent; -+ if (!(cmoo & AuBrWAttr_MOO)) -+ goto out_parent; /* success */ -+ -+ err = au_pin(&pin, dentry, cpg.bsrc, udba, -+ AuPin_DI_LOCKED | AuPin_MNT_WRITE); -+ if (unlikely(err)) -+ goto out_parent; -+ -+ h_path.mnt = au_br_mnt(br); -+ h_path.dentry = au_h_dptr(dentry, cpg.bsrc); -+ hdir = au_hi(parent->d_inode, cpg.bsrc); -+ delegated = NULL; -+ err = vfsub_unlink(hdir->hi_inode, &h_path, &delegated, /*force*/1); -+ au_unpin(&pin); -+ /* todo: keep h_dentry or not? */ -+ if (unlikely(err == -EWOULDBLOCK)) { -+ pr_warn("cannot retry for NFSv4 delegation" -+ " for an internal unlink\n"); -+ iput(delegated); -+ } -+ if (unlikely(err)) { -+ pr_err("unlink %pd after coo failed (%d), ignored\n", -+ dentry, err); -+ err = 0; -+ } -+ goto out_parent; /* success */ -+ -+out_dgrade: -+ di_downgrade_lock(parent, AuLock_IR); -+out_parent: -+ di_read_unlock(parent, AuLock_IR); -+ dput(parent); -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+int au_do_open(struct file *file, struct au_do_open_args *args) -+{ -+ int err, no_lock = args->no_lock; -+ struct dentry *dentry; -+ struct au_finfo *finfo; -+ -+ if (!no_lock) -+ err = au_finfo_init(file, args->fidir); -+ else { -+ lockdep_off(); -+ err = au_finfo_init(file, args->fidir); -+ lockdep_on(); -+ } -+ if (unlikely(err)) -+ goto out; -+ -+ dentry = file->f_dentry; -+ AuDebugOn(IS_ERR_OR_NULL(dentry)); -+ if (!no_lock) { -+ di_write_lock_child(dentry); -+ err = au_cmoo(dentry); -+ di_downgrade_lock(dentry, AuLock_IR); -+ if (!err) -+ err = args->open(file, vfsub_file_flags(file), NULL); -+ di_read_unlock(dentry, AuLock_IR); -+ } else { -+ err = au_cmoo(dentry); -+ if (!err) -+ err = args->open(file, vfsub_file_flags(file), -+ args->h_file); -+ if (!err && au_fbstart(file) != au_dbstart(dentry)) -+ /* -+ * cmoo happens after h_file was opened. -+ * need to refresh file later. -+ */ -+ atomic_dec(&au_fi(file)->fi_generation); -+ } -+ -+ finfo = au_fi(file); -+ if (!err) { -+ finfo->fi_file = file; -+ au_sphl_add(&finfo->fi_hlist, -+ &au_sbi(file->f_dentry->d_sb)->si_files); -+ } -+ if (!no_lock) -+ fi_write_unlock(file); -+ else { -+ lockdep_off(); -+ fi_write_unlock(file); -+ lockdep_on(); -+ } -+ if (unlikely(err)) { -+ finfo->fi_hdir = NULL; -+ au_finfo_fin(file); -+ } -+ -+out: -+ return err; -+} -+ -+int au_reopen_nondir(struct file *file) -+{ -+ int err; -+ aufs_bindex_t bstart; -+ struct dentry *dentry; -+ struct file *h_file, *h_file_tmp; -+ -+ dentry = file->f_dentry; -+ bstart = au_dbstart(dentry); -+ h_file_tmp = NULL; -+ if (au_fbstart(file) == bstart) { -+ h_file = au_hf_top(file); -+ if (file->f_mode == h_file->f_mode) -+ return 0; /* success */ -+ h_file_tmp = h_file; -+ get_file(h_file_tmp); -+ au_set_h_fptr(file, bstart, NULL); -+ } -+ AuDebugOn(au_fi(file)->fi_hdir); -+ /* -+ * it can happen -+ * file exists on both of rw and ro -+ * open --> dbstart and fbstart are both 0 -+ * prepend a branch as rw, "rw" become ro -+ * remove rw/file -+ * delete the top branch, "rw" becomes rw again -+ * --> dbstart is 1, fbstart is still 0 -+ * write --> fbstart is 0 but dbstart is 1 -+ */ -+ /* AuDebugOn(au_fbstart(file) < bstart); */ -+ -+ h_file = au_h_open(dentry, bstart, vfsub_file_flags(file) & ~O_TRUNC, -+ file, /*force_wr*/0); -+ err = PTR_ERR(h_file); -+ if (IS_ERR(h_file)) { -+ if (h_file_tmp) { -+ atomic_inc(&au_sbr(dentry->d_sb, bstart)->br_count); -+ au_set_h_fptr(file, bstart, h_file_tmp); -+ h_file_tmp = NULL; -+ } -+ goto out; /* todo: close all? */ -+ } -+ -+ err = 0; -+ au_set_fbstart(file, bstart); -+ au_set_h_fptr(file, bstart, h_file); -+ au_update_figen(file); -+ /* todo: necessary? */ -+ /* file->f_ra = h_file->f_ra; */ -+ -+out: -+ if (h_file_tmp) -+ fput(h_file_tmp); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int au_reopen_wh(struct file *file, aufs_bindex_t btgt, -+ struct dentry *hi_wh) -+{ -+ int err; -+ aufs_bindex_t bstart; -+ struct au_dinfo *dinfo; -+ struct dentry *h_dentry; -+ struct au_hdentry *hdp; -+ -+ dinfo = au_di(file->f_dentry); -+ AuRwMustWriteLock(&dinfo->di_rwsem); -+ -+ bstart = dinfo->di_bstart; -+ dinfo->di_bstart = btgt; -+ hdp = dinfo->di_hdentry; -+ h_dentry = hdp[0 + btgt].hd_dentry; -+ hdp[0 + btgt].hd_dentry = hi_wh; -+ err = au_reopen_nondir(file); -+ hdp[0 + btgt].hd_dentry = h_dentry; -+ dinfo->di_bstart = bstart; -+ -+ return err; -+} -+ -+static int au_ready_to_write_wh(struct file *file, loff_t len, -+ aufs_bindex_t bcpup, struct au_pin *pin) -+{ -+ int err; -+ struct inode *inode, *h_inode; -+ struct dentry *h_dentry, *hi_wh; -+ struct au_cp_generic cpg = { -+ .dentry = file->f_dentry, -+ .bdst = bcpup, -+ .bsrc = -1, -+ .len = len, -+ .pin = pin -+ }; -+ -+ au_update_dbstart(cpg.dentry); -+ inode = cpg.dentry->d_inode; -+ h_inode = NULL; -+ if (au_dbstart(cpg.dentry) <= bcpup -+ && au_dbend(cpg.dentry) >= bcpup) { -+ h_dentry = au_h_dptr(cpg.dentry, bcpup); -+ if (h_dentry) -+ h_inode = h_dentry->d_inode; -+ } -+ hi_wh = au_hi_wh(inode, bcpup); -+ if (!hi_wh && !h_inode) -+ err = au_sio_cpup_wh(&cpg, file); -+ else -+ /* already copied-up after unlink */ -+ err = au_reopen_wh(file, bcpup, hi_wh); -+ -+ if (!err -+ && (inode->i_nlink > 1 -+ || (inode->i_state & I_LINKABLE)) -+ && au_opt_test(au_mntflags(cpg.dentry->d_sb), PLINK)) -+ au_plink_append(inode, bcpup, au_h_dptr(cpg.dentry, bcpup)); -+ -+ return err; -+} -+ -+/* -+ * prepare the @file for writing. -+ */ -+int au_ready_to_write(struct file *file, loff_t len, struct au_pin *pin) -+{ -+ int err; -+ aufs_bindex_t dbstart; -+ struct dentry *parent; -+ struct inode *inode; -+ struct super_block *sb; -+ struct file *h_file; -+ struct au_cp_generic cpg = { -+ .dentry = file->f_dentry, -+ .bdst = -1, -+ .bsrc = -1, -+ .len = len, -+ .pin = pin, -+ .flags = AuCpup_DTIME -+ }; -+ -+ sb = cpg.dentry->d_sb; -+ inode = cpg.dentry->d_inode; -+ cpg.bsrc = au_fbstart(file); -+ err = au_test_ro(sb, cpg.bsrc, inode); -+ if (!err && (au_hf_top(file)->f_mode & FMODE_WRITE)) { -+ err = au_pin(pin, cpg.dentry, cpg.bsrc, AuOpt_UDBA_NONE, -+ /*flags*/0); -+ goto out; -+ } -+ -+ /* need to cpup or reopen */ -+ parent = dget_parent(cpg.dentry); -+ di_write_lock_parent(parent); -+ err = AuWbrCopyup(au_sbi(sb), cpg.dentry); -+ cpg.bdst = err; -+ if (unlikely(err < 0)) -+ goto out_dgrade; -+ err = 0; -+ -+ if (!d_unhashed(cpg.dentry) && !au_h_dptr(parent, cpg.bdst)) { -+ err = au_cpup_dirs(cpg.dentry, cpg.bdst); -+ if (unlikely(err)) -+ goto out_dgrade; -+ } -+ -+ err = au_pin(pin, cpg.dentry, cpg.bdst, AuOpt_UDBA_NONE, -+ AuPin_DI_LOCKED | AuPin_MNT_WRITE); -+ if (unlikely(err)) -+ goto out_dgrade; -+ -+ dbstart = au_dbstart(cpg.dentry); -+ if (dbstart <= cpg.bdst) -+ cpg.bsrc = cpg.bdst; -+ -+ if (dbstart <= cpg.bdst /* just reopen */ -+ || !d_unhashed(cpg.dentry) /* copyup and reopen */ -+ ) { -+ h_file = au_h_open_pre(cpg.dentry, cpg.bsrc, /*force_wr*/0); -+ if (IS_ERR(h_file)) -+ err = PTR_ERR(h_file); -+ else { -+ di_downgrade_lock(parent, AuLock_IR); -+ if (dbstart > cpg.bdst) -+ err = au_sio_cpup_simple(&cpg); -+ if (!err) -+ err = au_reopen_nondir(file); -+ au_h_open_post(cpg.dentry, cpg.bsrc, h_file); -+ } -+ } else { /* copyup as wh and reopen */ -+ /* -+ * since writable hfsplus branch is not supported, -+ * h_open_pre/post() are unnecessary. -+ */ -+ err = au_ready_to_write_wh(file, len, cpg.bdst, pin); -+ di_downgrade_lock(parent, AuLock_IR); -+ } -+ -+ if (!err) { -+ au_pin_set_parent_lflag(pin, /*lflag*/0); -+ goto out_dput; /* success */ -+ } -+ au_unpin(pin); -+ goto out_unlock; -+ -+out_dgrade: -+ di_downgrade_lock(parent, AuLock_IR); -+out_unlock: -+ di_read_unlock(parent, AuLock_IR); -+out_dput: -+ dput(parent); -+out: -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+int au_do_flush(struct file *file, fl_owner_t id, -+ int (*flush)(struct file *file, fl_owner_t id)) -+{ -+ int err; -+ struct super_block *sb; -+ struct inode *inode; -+ -+ inode = file_inode(file); -+ sb = inode->i_sb; -+ si_noflush_read_lock(sb); -+ fi_read_lock(file); -+ ii_read_lock_child(inode); -+ -+ err = flush(file, id); -+ au_cpup_attr_timesizes(inode); -+ -+ ii_read_unlock(inode); -+ fi_read_unlock(file); -+ si_read_unlock(sb); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int au_file_refresh_by_inode(struct file *file, int *need_reopen) -+{ -+ int err; -+ struct au_pin pin; -+ struct au_finfo *finfo; -+ struct dentry *parent, *hi_wh; -+ struct inode *inode; -+ struct super_block *sb; -+ struct au_cp_generic cpg = { -+ .dentry = file->f_dentry, -+ .bdst = -1, -+ .bsrc = -1, -+ .len = -1, -+ .pin = &pin, -+ .flags = AuCpup_DTIME -+ }; -+ -+ FiMustWriteLock(file); -+ -+ err = 0; -+ finfo = au_fi(file); -+ sb = cpg.dentry->d_sb; -+ inode = cpg.dentry->d_inode; -+ cpg.bdst = au_ibstart(inode); -+ if (cpg.bdst == finfo->fi_btop || IS_ROOT(cpg.dentry)) -+ goto out; -+ -+ parent = dget_parent(cpg.dentry); -+ if (au_test_ro(sb, cpg.bdst, inode)) { -+ di_read_lock_parent(parent, !AuLock_IR); -+ err = AuWbrCopyup(au_sbi(sb), cpg.dentry); -+ cpg.bdst = err; -+ di_read_unlock(parent, !AuLock_IR); -+ if (unlikely(err < 0)) -+ goto out_parent; -+ err = 0; -+ } -+ -+ di_read_lock_parent(parent, AuLock_IR); -+ hi_wh = au_hi_wh(inode, cpg.bdst); -+ if (!S_ISDIR(inode->i_mode) -+ && au_opt_test(au_mntflags(sb), PLINK) -+ && au_plink_test(inode) -+ && !d_unhashed(cpg.dentry) -+ && cpg.bdst < au_dbstart(cpg.dentry)) { -+ err = au_test_and_cpup_dirs(cpg.dentry, cpg.bdst); -+ if (unlikely(err)) -+ goto out_unlock; -+ -+ /* always superio. */ -+ err = au_pin(&pin, cpg.dentry, cpg.bdst, AuOpt_UDBA_NONE, -+ AuPin_DI_LOCKED | AuPin_MNT_WRITE); -+ if (!err) { -+ err = au_sio_cpup_simple(&cpg); -+ au_unpin(&pin); -+ } -+ } else if (hi_wh) { -+ /* already copied-up after unlink */ -+ err = au_reopen_wh(file, cpg.bdst, hi_wh); -+ *need_reopen = 0; -+ } -+ -+out_unlock: -+ di_read_unlock(parent, AuLock_IR); -+out_parent: -+ dput(parent); -+out: -+ return err; -+} -+ -+static void au_do_refresh_dir(struct file *file) -+{ -+ aufs_bindex_t bindex, bend, new_bindex, brid; -+ struct au_hfile *p, tmp, *q; -+ struct au_finfo *finfo; -+ struct super_block *sb; -+ struct au_fidir *fidir; -+ -+ FiMustWriteLock(file); -+ -+ sb = file->f_dentry->d_sb; -+ finfo = au_fi(file); -+ fidir = finfo->fi_hdir; -+ AuDebugOn(!fidir); -+ p = fidir->fd_hfile + finfo->fi_btop; -+ brid = p->hf_br->br_id; -+ bend = fidir->fd_bbot; -+ for (bindex = finfo->fi_btop; bindex <= bend; bindex++, p++) { -+ if (!p->hf_file) -+ continue; -+ -+ new_bindex = au_br_index(sb, p->hf_br->br_id); -+ if (new_bindex == bindex) -+ continue; -+ if (new_bindex < 0) { -+ au_set_h_fptr(file, bindex, NULL); -+ continue; -+ } -+ -+ /* swap two lower inode, and loop again */ -+ q = fidir->fd_hfile + new_bindex; -+ tmp = *q; -+ *q = *p; -+ *p = tmp; -+ if (tmp.hf_file) { -+ bindex--; -+ p--; -+ } -+ } -+ -+ p = fidir->fd_hfile; -+ if (!au_test_mmapped(file) && !d_unlinked(file->f_dentry)) { -+ bend = au_sbend(sb); -+ for (finfo->fi_btop = 0; finfo->fi_btop <= bend; -+ finfo->fi_btop++, p++) -+ if (p->hf_file) { -+ if (file_inode(p->hf_file)) -+ break; -+ au_hfput(p, file); -+ } -+ } else { -+ bend = au_br_index(sb, brid); -+ for (finfo->fi_btop = 0; finfo->fi_btop < bend; -+ finfo->fi_btop++, p++) -+ if (p->hf_file) -+ au_hfput(p, file); -+ bend = au_sbend(sb); -+ } -+ -+ p = fidir->fd_hfile + bend; -+ for (fidir->fd_bbot = bend; fidir->fd_bbot >= finfo->fi_btop; -+ fidir->fd_bbot--, p--) -+ if (p->hf_file) { -+ if (file_inode(p->hf_file)) -+ break; -+ au_hfput(p, file); -+ } -+ AuDebugOn(fidir->fd_bbot < finfo->fi_btop); -+} -+ -+/* -+ * after branch manipulating, refresh the file. -+ */ -+static int refresh_file(struct file *file, int (*reopen)(struct file *file)) -+{ -+ int err, need_reopen; -+ aufs_bindex_t bend, bindex; -+ struct dentry *dentry; -+ struct au_finfo *finfo; -+ struct au_hfile *hfile; -+ -+ dentry = file->f_dentry; -+ finfo = au_fi(file); -+ if (!finfo->fi_hdir) { -+ hfile = &finfo->fi_htop; -+ AuDebugOn(!hfile->hf_file); -+ bindex = au_br_index(dentry->d_sb, hfile->hf_br->br_id); -+ AuDebugOn(bindex < 0); -+ if (bindex != finfo->fi_btop) -+ au_set_fbstart(file, bindex); -+ } else { -+ err = au_fidir_realloc(finfo, au_sbend(dentry->d_sb) + 1); -+ if (unlikely(err)) -+ goto out; -+ au_do_refresh_dir(file); -+ } -+ -+ err = 0; -+ need_reopen = 1; -+ if (!au_test_mmapped(file)) -+ err = au_file_refresh_by_inode(file, &need_reopen); -+ if (!err && need_reopen && !d_unlinked(dentry)) -+ err = reopen(file); -+ if (!err) { -+ au_update_figen(file); -+ goto out; /* success */ -+ } -+ -+ /* error, close all lower files */ -+ if (finfo->fi_hdir) { -+ bend = au_fbend_dir(file); -+ for (bindex = au_fbstart(file); bindex <= bend; bindex++) -+ au_set_h_fptr(file, bindex, NULL); -+ } -+ -+out: -+ return err; -+} -+ -+/* common function to regular file and dir */ -+int au_reval_and_lock_fdi(struct file *file, int (*reopen)(struct file *file), -+ int wlock) -+{ -+ int err; -+ unsigned int sigen, figen; -+ aufs_bindex_t bstart; -+ unsigned char pseudo_link; -+ struct dentry *dentry; -+ struct inode *inode; -+ -+ err = 0; -+ dentry = file->f_dentry; -+ inode = dentry->d_inode; -+ sigen = au_sigen(dentry->d_sb); -+ fi_write_lock(file); -+ figen = au_figen(file); -+ di_write_lock_child(dentry); -+ bstart = au_dbstart(dentry); -+ pseudo_link = (bstart != au_ibstart(inode)); -+ if (sigen == figen && !pseudo_link && au_fbstart(file) == bstart) { -+ if (!wlock) { -+ di_downgrade_lock(dentry, AuLock_IR); -+ fi_downgrade_lock(file); -+ } -+ goto out; /* success */ -+ } -+ -+ AuDbg("sigen %d, figen %d\n", sigen, figen); -+ if (au_digen_test(dentry, sigen)) { -+ err = au_reval_dpath(dentry, sigen); -+ AuDebugOn(!err && au_digen_test(dentry, sigen)); -+ } -+ -+ if (!err) -+ err = refresh_file(file, reopen); -+ if (!err) { -+ if (!wlock) { -+ di_downgrade_lock(dentry, AuLock_IR); -+ fi_downgrade_lock(file); -+ } -+ } else { -+ di_write_unlock(dentry); -+ fi_write_unlock(file); -+ } -+ -+out: -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* cf. aufs_nopage() */ -+/* for madvise(2) */ -+static int aufs_readpage(struct file *file __maybe_unused, struct page *page) -+{ -+ unlock_page(page); -+ return 0; -+} -+ -+/* it will never be called, but necessary to support O_DIRECT */ -+static ssize_t aufs_direct_IO(int rw, struct kiocb *iocb, -+ struct iov_iter *iter, loff_t offset) -+{ BUG(); return 0; } -+ -+/* -+ * it will never be called, but madvise and fadvise behaves differently -+ * when get_xip_mem is defined -+ */ -+static int aufs_get_xip_mem(struct address_space *mapping, pgoff_t pgoff, -+ int create, void **kmem, unsigned long *pfn) -+{ BUG(); return 0; } -+ -+/* they will never be called. */ -+#ifdef CONFIG_AUFS_DEBUG -+static int aufs_write_begin(struct file *file, struct address_space *mapping, -+ loff_t pos, unsigned len, unsigned flags, -+ struct page **pagep, void **fsdata) -+{ AuUnsupport(); return 0; } -+static int aufs_write_end(struct file *file, struct address_space *mapping, -+ loff_t pos, unsigned len, unsigned copied, -+ struct page *page, void *fsdata) -+{ AuUnsupport(); return 0; } -+static int aufs_writepage(struct page *page, struct writeback_control *wbc) -+{ AuUnsupport(); return 0; } -+ -+static int aufs_set_page_dirty(struct page *page) -+{ AuUnsupport(); return 0; } -+static void aufs_invalidatepage(struct page *page, unsigned int offset, -+ unsigned int length) -+{ AuUnsupport(); } -+static int aufs_releasepage(struct page *page, gfp_t gfp) -+{ AuUnsupport(); return 0; } -+#if 0 /* called by memory compaction regardless file */ -+static int aufs_migratepage(struct address_space *mapping, struct page *newpage, -+ struct page *page, enum migrate_mode mode) -+{ AuUnsupport(); return 0; } -+#endif -+static int aufs_launder_page(struct page *page) -+{ AuUnsupport(); return 0; } -+static int aufs_is_partially_uptodate(struct page *page, -+ unsigned long from, -+ unsigned long count) -+{ AuUnsupport(); return 0; } -+static void aufs_is_dirty_writeback(struct page *page, bool *dirty, -+ bool *writeback) -+{ AuUnsupport(); } -+static int aufs_error_remove_page(struct address_space *mapping, -+ struct page *page) -+{ AuUnsupport(); return 0; } -+static int aufs_swap_activate(struct swap_info_struct *sis, struct file *file, -+ sector_t *span) -+{ AuUnsupport(); return 0; } -+static void aufs_swap_deactivate(struct file *file) -+{ AuUnsupport(); } -+#endif /* CONFIG_AUFS_DEBUG */ -+ -+const struct address_space_operations aufs_aop = { -+ .readpage = aufs_readpage, -+ .direct_IO = aufs_direct_IO, -+ .get_xip_mem = aufs_get_xip_mem, -+#ifdef CONFIG_AUFS_DEBUG -+ .writepage = aufs_writepage, -+ /* no writepages, because of writepage */ -+ .set_page_dirty = aufs_set_page_dirty, -+ /* no readpages, because of readpage */ -+ .write_begin = aufs_write_begin, -+ .write_end = aufs_write_end, -+ /* no bmap, no block device */ -+ .invalidatepage = aufs_invalidatepage, -+ .releasepage = aufs_releasepage, -+ /* is fallback_migrate_page ok? */ -+ /* .migratepage = aufs_migratepage, */ -+ .launder_page = aufs_launder_page, -+ .is_partially_uptodate = aufs_is_partially_uptodate, -+ .is_dirty_writeback = aufs_is_dirty_writeback, -+ .error_remove_page = aufs_error_remove_page, -+ .swap_activate = aufs_swap_activate, -+ .swap_deactivate = aufs_swap_deactivate -+#endif /* CONFIG_AUFS_DEBUG */ -+}; -diff --git a/fs/aufs/file.h b/fs/aufs/file.h -new file mode 100644 -index 0000000..564be91 ---- /dev/null -+++ b/fs/aufs/file.h -@@ -0,0 +1,291 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * file operations -+ */ -+ -+#ifndef __AUFS_FILE_H__ -+#define __AUFS_FILE_H__ -+ -+#ifdef __KERNEL__ -+ -+#include -+#include -+#include -+#include "rwsem.h" -+ -+struct au_branch; -+struct au_hfile { -+ struct file *hf_file; -+ struct au_branch *hf_br; -+}; -+ -+struct au_vdir; -+struct au_fidir { -+ aufs_bindex_t fd_bbot; -+ aufs_bindex_t fd_nent; -+ struct au_vdir *fd_vdir_cache; -+ struct au_hfile fd_hfile[]; -+}; -+ -+static inline int au_fidir_sz(int nent) -+{ -+ AuDebugOn(nent < 0); -+ return sizeof(struct au_fidir) + sizeof(struct au_hfile) * nent; -+} -+ -+struct au_finfo { -+ atomic_t fi_generation; -+ -+ struct au_rwsem fi_rwsem; -+ aufs_bindex_t fi_btop; -+ -+ /* do not union them */ -+ struct { /* for non-dir */ -+ struct au_hfile fi_htop; -+ atomic_t fi_mmapped; -+ }; -+ struct au_fidir *fi_hdir; /* for dir only */ -+ -+ struct hlist_node fi_hlist; -+ struct file *fi_file; /* very ugly */ -+} ____cacheline_aligned_in_smp; -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* file.c */ -+extern const struct address_space_operations aufs_aop; -+unsigned int au_file_roflags(unsigned int flags); -+struct file *au_h_open(struct dentry *dentry, aufs_bindex_t bindex, int flags, -+ struct file *file, int force_wr); -+struct au_do_open_args { -+ int no_lock; -+ int (*open)(struct file *file, int flags, -+ struct file *h_file); -+ struct au_fidir *fidir; -+ struct file *h_file; -+}; -+int au_do_open(struct file *file, struct au_do_open_args *args); -+int au_reopen_nondir(struct file *file); -+struct au_pin; -+int au_ready_to_write(struct file *file, loff_t len, struct au_pin *pin); -+int au_reval_and_lock_fdi(struct file *file, int (*reopen)(struct file *file), -+ int wlock); -+int au_do_flush(struct file *file, fl_owner_t id, -+ int (*flush)(struct file *file, fl_owner_t id)); -+ -+/* poll.c */ -+#ifdef CONFIG_AUFS_POLL -+unsigned int aufs_poll(struct file *file, poll_table *wait); -+#endif -+ -+#ifdef CONFIG_AUFS_BR_HFSPLUS -+/* hfsplus.c */ -+struct file *au_h_open_pre(struct dentry *dentry, aufs_bindex_t bindex, -+ int force_wr); -+void au_h_open_post(struct dentry *dentry, aufs_bindex_t bindex, -+ struct file *h_file); -+#else -+AuStub(struct file *, au_h_open_pre, return NULL, struct dentry *dentry, -+ aufs_bindex_t bindex, int force_wr) -+AuStubVoid(au_h_open_post, struct dentry *dentry, aufs_bindex_t bindex, -+ struct file *h_file); -+#endif -+ -+/* f_op.c */ -+extern const struct file_operations aufs_file_fop; -+int au_do_open_nondir(struct file *file, int flags, struct file *h_file); -+int aufs_release_nondir(struct inode *inode __maybe_unused, struct file *file); -+struct file *au_read_pre(struct file *file, int keep_fi); -+ -+/* finfo.c */ -+void au_hfput(struct au_hfile *hf, struct file *file); -+void au_set_h_fptr(struct file *file, aufs_bindex_t bindex, -+ struct file *h_file); -+ -+void au_update_figen(struct file *file); -+struct au_fidir *au_fidir_alloc(struct super_block *sb); -+int au_fidir_realloc(struct au_finfo *finfo, int nbr); -+ -+void au_fi_init_once(void *_fi); -+void au_finfo_fin(struct file *file); -+int au_finfo_init(struct file *file, struct au_fidir *fidir); -+ -+/* ioctl.c */ -+long aufs_ioctl_nondir(struct file *file, unsigned int cmd, unsigned long arg); -+#ifdef CONFIG_COMPAT -+long aufs_compat_ioctl_dir(struct file *file, unsigned int cmd, -+ unsigned long arg); -+long aufs_compat_ioctl_nondir(struct file *file, unsigned int cmd, -+ unsigned long arg); -+#endif -+ -+/* ---------------------------------------------------------------------- */ -+ -+static inline struct au_finfo *au_fi(struct file *file) -+{ -+ return file->private_data; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * fi_read_lock, fi_write_lock, -+ * fi_read_unlock, fi_write_unlock, fi_downgrade_lock -+ */ -+AuSimpleRwsemFuncs(fi, struct file *f, &au_fi(f)->fi_rwsem); -+ -+#define FiMustNoWaiters(f) AuRwMustNoWaiters(&au_fi(f)->fi_rwsem) -+#define FiMustAnyLock(f) AuRwMustAnyLock(&au_fi(f)->fi_rwsem) -+#define FiMustWriteLock(f) AuRwMustWriteLock(&au_fi(f)->fi_rwsem) -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* todo: hard/soft set? */ -+static inline aufs_bindex_t au_fbstart(struct file *file) -+{ -+ FiMustAnyLock(file); -+ return au_fi(file)->fi_btop; -+} -+ -+static inline aufs_bindex_t au_fbend_dir(struct file *file) -+{ -+ FiMustAnyLock(file); -+ AuDebugOn(!au_fi(file)->fi_hdir); -+ return au_fi(file)->fi_hdir->fd_bbot; -+} -+ -+static inline struct au_vdir *au_fvdir_cache(struct file *file) -+{ -+ FiMustAnyLock(file); -+ AuDebugOn(!au_fi(file)->fi_hdir); -+ return au_fi(file)->fi_hdir->fd_vdir_cache; -+} -+ -+static inline void au_set_fbstart(struct file *file, aufs_bindex_t bindex) -+{ -+ FiMustWriteLock(file); -+ au_fi(file)->fi_btop = bindex; -+} -+ -+static inline void au_set_fbend_dir(struct file *file, aufs_bindex_t bindex) -+{ -+ FiMustWriteLock(file); -+ AuDebugOn(!au_fi(file)->fi_hdir); -+ au_fi(file)->fi_hdir->fd_bbot = bindex; -+} -+ -+static inline void au_set_fvdir_cache(struct file *file, -+ struct au_vdir *vdir_cache) -+{ -+ FiMustWriteLock(file); -+ AuDebugOn(!au_fi(file)->fi_hdir); -+ au_fi(file)->fi_hdir->fd_vdir_cache = vdir_cache; -+} -+ -+static inline struct file *au_hf_top(struct file *file) -+{ -+ FiMustAnyLock(file); -+ AuDebugOn(au_fi(file)->fi_hdir); -+ return au_fi(file)->fi_htop.hf_file; -+} -+ -+static inline struct file *au_hf_dir(struct file *file, aufs_bindex_t bindex) -+{ -+ FiMustAnyLock(file); -+ AuDebugOn(!au_fi(file)->fi_hdir); -+ return au_fi(file)->fi_hdir->fd_hfile[0 + bindex].hf_file; -+} -+ -+/* todo: memory barrier? */ -+static inline unsigned int au_figen(struct file *f) -+{ -+ return atomic_read(&au_fi(f)->fi_generation); -+} -+ -+static inline void au_set_mmapped(struct file *f) -+{ -+ if (atomic_inc_return(&au_fi(f)->fi_mmapped)) -+ return; -+ pr_warn("fi_mmapped wrapped around\n"); -+ while (!atomic_inc_return(&au_fi(f)->fi_mmapped)) -+ ; -+} -+ -+static inline void au_unset_mmapped(struct file *f) -+{ -+ atomic_dec(&au_fi(f)->fi_mmapped); -+} -+ -+static inline int au_test_mmapped(struct file *f) -+{ -+ return atomic_read(&au_fi(f)->fi_mmapped); -+} -+ -+/* customize vma->vm_file */ -+ -+static inline void au_do_vm_file_reset(struct vm_area_struct *vma, -+ struct file *file) -+{ -+ struct file *f; -+ -+ f = vma->vm_file; -+ get_file(file); -+ vma->vm_file = file; -+ fput(f); -+} -+ -+#ifdef CONFIG_MMU -+#define AuDbgVmRegion(file, vma) do {} while (0) -+ -+static inline void au_vm_file_reset(struct vm_area_struct *vma, -+ struct file *file) -+{ -+ au_do_vm_file_reset(vma, file); -+} -+#else -+#define AuDbgVmRegion(file, vma) \ -+ AuDebugOn((vma)->vm_region && (vma)->vm_region->vm_file != (file)) -+ -+static inline void au_vm_file_reset(struct vm_area_struct *vma, -+ struct file *file) -+{ -+ struct file *f; -+ -+ au_do_vm_file_reset(vma, file); -+ f = vma->vm_region->vm_file; -+ get_file(file); -+ vma->vm_region->vm_file = file; -+ fput(f); -+} -+#endif /* CONFIG_MMU */ -+ -+/* handle vma->vm_prfile */ -+static inline void au_vm_prfile_set(struct vm_area_struct *vma, -+ struct file *file) -+{ -+ get_file(file); -+ vma->vm_prfile = file; -+#ifndef CONFIG_MMU -+ get_file(file); -+ vma->vm_region->vm_prfile = file; -+#endif -+} -+ -+#endif /* __KERNEL__ */ -+#endif /* __AUFS_FILE_H__ */ -diff --git a/fs/aufs/finfo.c b/fs/aufs/finfo.c -new file mode 100644 -index 0000000..7e25db3 ---- /dev/null -+++ b/fs/aufs/finfo.c -@@ -0,0 +1,156 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * file private data -+ */ -+ -+#include "aufs.h" -+ -+void au_hfput(struct au_hfile *hf, struct file *file) -+{ -+ /* todo: direct access f_flags */ -+ if (vfsub_file_flags(file) & __FMODE_EXEC) -+ allow_write_access(hf->hf_file); -+ fput(hf->hf_file); -+ hf->hf_file = NULL; -+ atomic_dec(&hf->hf_br->br_count); -+ hf->hf_br = NULL; -+} -+ -+void au_set_h_fptr(struct file *file, aufs_bindex_t bindex, struct file *val) -+{ -+ struct au_finfo *finfo = au_fi(file); -+ struct au_hfile *hf; -+ struct au_fidir *fidir; -+ -+ fidir = finfo->fi_hdir; -+ if (!fidir) { -+ AuDebugOn(finfo->fi_btop != bindex); -+ hf = &finfo->fi_htop; -+ } else -+ hf = fidir->fd_hfile + bindex; -+ -+ if (hf && hf->hf_file) -+ au_hfput(hf, file); -+ if (val) { -+ FiMustWriteLock(file); -+ AuDebugOn(IS_ERR_OR_NULL(file->f_dentry)); -+ hf->hf_file = val; -+ hf->hf_br = au_sbr(file->f_dentry->d_sb, bindex); -+ } -+} -+ -+void au_update_figen(struct file *file) -+{ -+ atomic_set(&au_fi(file)->fi_generation, au_digen(file->f_dentry)); -+ /* smp_mb(); */ /* atomic_set */ -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+struct au_fidir *au_fidir_alloc(struct super_block *sb) -+{ -+ struct au_fidir *fidir; -+ int nbr; -+ -+ nbr = au_sbend(sb) + 1; -+ if (nbr < 2) -+ nbr = 2; /* initial allocate for 2 branches */ -+ fidir = kzalloc(au_fidir_sz(nbr), GFP_NOFS); -+ if (fidir) { -+ fidir->fd_bbot = -1; -+ fidir->fd_nent = nbr; -+ } -+ -+ return fidir; -+} -+ -+int au_fidir_realloc(struct au_finfo *finfo, int nbr) -+{ -+ int err; -+ struct au_fidir *fidir, *p; -+ -+ AuRwMustWriteLock(&finfo->fi_rwsem); -+ fidir = finfo->fi_hdir; -+ AuDebugOn(!fidir); -+ -+ err = -ENOMEM; -+ p = au_kzrealloc(fidir, au_fidir_sz(fidir->fd_nent), au_fidir_sz(nbr), -+ GFP_NOFS); -+ if (p) { -+ p->fd_nent = nbr; -+ finfo->fi_hdir = p; -+ err = 0; -+ } -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+void au_finfo_fin(struct file *file) -+{ -+ struct au_finfo *finfo; -+ -+ au_nfiles_dec(file->f_dentry->d_sb); -+ -+ finfo = au_fi(file); -+ AuDebugOn(finfo->fi_hdir); -+ AuRwDestroy(&finfo->fi_rwsem); -+ au_cache_free_finfo(finfo); -+} -+ -+void au_fi_init_once(void *_finfo) -+{ -+ struct au_finfo *finfo = _finfo; -+ static struct lock_class_key aufs_fi; -+ -+ au_rw_init(&finfo->fi_rwsem); -+ au_rw_class(&finfo->fi_rwsem, &aufs_fi); -+} -+ -+int au_finfo_init(struct file *file, struct au_fidir *fidir) -+{ -+ int err; -+ struct au_finfo *finfo; -+ struct dentry *dentry; -+ -+ err = -ENOMEM; -+ dentry = file->f_dentry; -+ finfo = au_cache_alloc_finfo(); -+ if (unlikely(!finfo)) -+ goto out; -+ -+ err = 0; -+ au_nfiles_inc(dentry->d_sb); -+ /* verbose coding for lock class name */ -+ if (!fidir) -+ au_rw_class(&finfo->fi_rwsem, au_lc_key + AuLcNonDir_FIINFO); -+ else -+ au_rw_class(&finfo->fi_rwsem, au_lc_key + AuLcDir_FIINFO); -+ au_rw_write_lock(&finfo->fi_rwsem); -+ finfo->fi_btop = -1; -+ finfo->fi_hdir = fidir; -+ atomic_set(&finfo->fi_generation, au_digen(dentry)); -+ /* smp_mb(); */ /* atomic_set */ -+ -+ file->private_data = finfo; -+ -+out: -+ return err; -+} -diff --git a/fs/aufs/fstype.h b/fs/aufs/fstype.h -new file mode 100644 -index 0000000..2842400 ---- /dev/null -+++ b/fs/aufs/fstype.h -@@ -0,0 +1,400 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * judging filesystem type -+ */ -+ -+#ifndef __AUFS_FSTYPE_H__ -+#define __AUFS_FSTYPE_H__ -+ -+#ifdef __KERNEL__ -+ -+#include -+#include -+#include -+#include -+ -+static inline int au_test_aufs(struct super_block *sb) -+{ -+ return sb->s_magic == AUFS_SUPER_MAGIC; -+} -+ -+static inline const char *au_sbtype(struct super_block *sb) -+{ -+ return sb->s_type->name; -+} -+ -+static inline int au_test_iso9660(struct super_block *sb __maybe_unused) -+{ -+#if defined(CONFIG_ISO9660_FS) || defined(CONFIG_ISO9660_FS_MODULE) -+ return sb->s_magic == ISOFS_SUPER_MAGIC; -+#else -+ return 0; -+#endif -+} -+ -+static inline int au_test_romfs(struct super_block *sb __maybe_unused) -+{ -+#if defined(CONFIG_ROMFS_FS) || defined(CONFIG_ROMFS_FS_MODULE) -+ return sb->s_magic == ROMFS_MAGIC; -+#else -+ return 0; -+#endif -+} -+ -+static inline int au_test_cramfs(struct super_block *sb __maybe_unused) -+{ -+#if defined(CONFIG_CRAMFS) || defined(CONFIG_CRAMFS_MODULE) -+ return sb->s_magic == CRAMFS_MAGIC; -+#endif -+ return 0; -+} -+ -+static inline int au_test_nfs(struct super_block *sb __maybe_unused) -+{ -+#if defined(CONFIG_NFS_FS) || defined(CONFIG_NFS_FS_MODULE) -+ return sb->s_magic == NFS_SUPER_MAGIC; -+#else -+ return 0; -+#endif -+} -+ -+static inline int au_test_fuse(struct super_block *sb __maybe_unused) -+{ -+#if defined(CONFIG_FUSE_FS) || defined(CONFIG_FUSE_FS_MODULE) -+ return sb->s_magic == FUSE_SUPER_MAGIC; -+#else -+ return 0; -+#endif -+} -+ -+static inline int au_test_xfs(struct super_block *sb __maybe_unused) -+{ -+#if defined(CONFIG_XFS_FS) || defined(CONFIG_XFS_FS_MODULE) -+ return sb->s_magic == XFS_SB_MAGIC; -+#else -+ return 0; -+#endif -+} -+ -+static inline int au_test_tmpfs(struct super_block *sb __maybe_unused) -+{ -+#ifdef CONFIG_TMPFS -+ return sb->s_magic == TMPFS_MAGIC; -+#else -+ return 0; -+#endif -+} -+ -+static inline int au_test_ecryptfs(struct super_block *sb __maybe_unused) -+{ -+#if defined(CONFIG_ECRYPT_FS) || defined(CONFIG_ECRYPT_FS_MODULE) -+ return !strcmp(au_sbtype(sb), "ecryptfs"); -+#else -+ return 0; -+#endif -+} -+ -+static inline int au_test_ramfs(struct super_block *sb) -+{ -+ return sb->s_magic == RAMFS_MAGIC; -+} -+ -+static inline int au_test_ubifs(struct super_block *sb __maybe_unused) -+{ -+#if defined(CONFIG_UBIFS_FS) || defined(CONFIG_UBIFS_FS_MODULE) -+ return sb->s_magic == UBIFS_SUPER_MAGIC; -+#else -+ return 0; -+#endif -+} -+ -+static inline int au_test_procfs(struct super_block *sb __maybe_unused) -+{ -+#ifdef CONFIG_PROC_FS -+ return sb->s_magic == PROC_SUPER_MAGIC; -+#else -+ return 0; -+#endif -+} -+ -+static inline int au_test_sysfs(struct super_block *sb __maybe_unused) -+{ -+#ifdef CONFIG_SYSFS -+ return sb->s_magic == SYSFS_MAGIC; -+#else -+ return 0; -+#endif -+} -+ -+static inline int au_test_configfs(struct super_block *sb __maybe_unused) -+{ -+#if defined(CONFIG_CONFIGFS_FS) || defined(CONFIG_CONFIGFS_FS_MODULE) -+ return sb->s_magic == CONFIGFS_MAGIC; -+#else -+ return 0; -+#endif -+} -+ -+static inline int au_test_minix(struct super_block *sb __maybe_unused) -+{ -+#if defined(CONFIG_MINIX_FS) || defined(CONFIG_MINIX_FS_MODULE) -+ return sb->s_magic == MINIX3_SUPER_MAGIC -+ || sb->s_magic == MINIX2_SUPER_MAGIC -+ || sb->s_magic == MINIX2_SUPER_MAGIC2 -+ || sb->s_magic == MINIX_SUPER_MAGIC -+ || sb->s_magic == MINIX_SUPER_MAGIC2; -+#else -+ return 0; -+#endif -+} -+ -+static inline int au_test_fat(struct super_block *sb __maybe_unused) -+{ -+#if defined(CONFIG_FAT_FS) || defined(CONFIG_FAT_FS_MODULE) -+ return sb->s_magic == MSDOS_SUPER_MAGIC; -+#else -+ return 0; -+#endif -+} -+ -+static inline int au_test_msdos(struct super_block *sb) -+{ -+ return au_test_fat(sb); -+} -+ -+static inline int au_test_vfat(struct super_block *sb) -+{ -+ return au_test_fat(sb); -+} -+ -+static inline int au_test_securityfs(struct super_block *sb __maybe_unused) -+{ -+#ifdef CONFIG_SECURITYFS -+ return sb->s_magic == SECURITYFS_MAGIC; -+#else -+ return 0; -+#endif -+} -+ -+static inline int au_test_squashfs(struct super_block *sb __maybe_unused) -+{ -+#if defined(CONFIG_SQUASHFS) || defined(CONFIG_SQUASHFS_MODULE) -+ return sb->s_magic == SQUASHFS_MAGIC; -+#else -+ return 0; -+#endif -+} -+ -+static inline int au_test_btrfs(struct super_block *sb __maybe_unused) -+{ -+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE) -+ return sb->s_magic == BTRFS_SUPER_MAGIC; -+#else -+ return 0; -+#endif -+} -+ -+static inline int au_test_xenfs(struct super_block *sb __maybe_unused) -+{ -+#if defined(CONFIG_XENFS) || defined(CONFIG_XENFS_MODULE) -+ return sb->s_magic == XENFS_SUPER_MAGIC; -+#else -+ return 0; -+#endif -+} -+ -+static inline int au_test_debugfs(struct super_block *sb __maybe_unused) -+{ -+#ifdef CONFIG_DEBUG_FS -+ return sb->s_magic == DEBUGFS_MAGIC; -+#else -+ return 0; -+#endif -+} -+ -+static inline int au_test_nilfs(struct super_block *sb __maybe_unused) -+{ -+#if defined(CONFIG_NILFS) || defined(CONFIG_NILFS_MODULE) -+ return sb->s_magic == NILFS_SUPER_MAGIC; -+#else -+ return 0; -+#endif -+} -+ -+static inline int au_test_hfsplus(struct super_block *sb __maybe_unused) -+{ -+#if defined(CONFIG_HFSPLUS_FS) || defined(CONFIG_HFSPLUS_FS_MODULE) -+ return sb->s_magic == HFSPLUS_SUPER_MAGIC; -+#else -+ return 0; -+#endif -+} -+ -+/* ---------------------------------------------------------------------- */ -+/* -+ * they can't be an aufs branch. -+ */ -+static inline int au_test_fs_unsuppoted(struct super_block *sb) -+{ -+ return -+#ifndef CONFIG_AUFS_BR_RAMFS -+ au_test_ramfs(sb) || -+#endif -+ au_test_procfs(sb) -+ || au_test_sysfs(sb) -+ || au_test_configfs(sb) -+ || au_test_debugfs(sb) -+ || au_test_securityfs(sb) -+ || au_test_xenfs(sb) -+ || au_test_ecryptfs(sb) -+ /* || !strcmp(au_sbtype(sb), "unionfs") */ -+ || au_test_aufs(sb); /* will be supported in next version */ -+} -+ -+static inline int au_test_fs_remote(struct super_block *sb) -+{ -+ return !au_test_tmpfs(sb) -+#ifdef CONFIG_AUFS_BR_RAMFS -+ && !au_test_ramfs(sb) -+#endif -+ && !(sb->s_type->fs_flags & FS_REQUIRES_DEV); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * Note: these functions (below) are created after reading ->getattr() in all -+ * filesystems under linux/fs. it means we have to do so in every update... -+ */ -+ -+/* -+ * some filesystems require getattr to refresh the inode attributes before -+ * referencing. -+ * in most cases, we can rely on the inode attribute in NFS (or every remote fs) -+ * and leave the work for d_revalidate() -+ */ -+static inline int au_test_fs_refresh_iattr(struct super_block *sb) -+{ -+ return au_test_nfs(sb) -+ || au_test_fuse(sb) -+ /* || au_test_btrfs(sb) */ /* untested */ -+ ; -+} -+ -+/* -+ * filesystems which don't maintain i_size or i_blocks. -+ */ -+static inline int au_test_fs_bad_iattr_size(struct super_block *sb) -+{ -+ return au_test_xfs(sb) -+ || au_test_btrfs(sb) -+ || au_test_ubifs(sb) -+ || au_test_hfsplus(sb) /* maintained, but incorrect */ -+ /* || au_test_minix(sb) */ /* untested */ -+ ; -+} -+ -+/* -+ * filesystems which don't store the correct value in some of their inode -+ * attributes. -+ */ -+static inline int au_test_fs_bad_iattr(struct super_block *sb) -+{ -+ return au_test_fs_bad_iattr_size(sb) -+ || au_test_fat(sb) -+ || au_test_msdos(sb) -+ || au_test_vfat(sb); -+} -+ -+/* they don't check i_nlink in link(2) */ -+static inline int au_test_fs_no_limit_nlink(struct super_block *sb) -+{ -+ return au_test_tmpfs(sb) -+#ifdef CONFIG_AUFS_BR_RAMFS -+ || au_test_ramfs(sb) -+#endif -+ || au_test_ubifs(sb) -+ || au_test_hfsplus(sb); -+} -+ -+/* -+ * filesystems which sets S_NOATIME and S_NOCMTIME. -+ */ -+static inline int au_test_fs_notime(struct super_block *sb) -+{ -+ return au_test_nfs(sb) -+ || au_test_fuse(sb) -+ || au_test_ubifs(sb) -+ ; -+} -+ -+/* temporary support for i#1 in cramfs */ -+static inline int au_test_fs_unique_ino(struct inode *inode) -+{ -+ if (au_test_cramfs(inode->i_sb)) -+ return inode->i_ino != 1; -+ return 1; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * the filesystem where the xino files placed must support i/o after unlink and -+ * maintain i_size and i_blocks. -+ */ -+static inline int au_test_fs_bad_xino(struct super_block *sb) -+{ -+ return au_test_fs_remote(sb) -+ || au_test_fs_bad_iattr_size(sb) -+ /* don't want unnecessary work for xino */ -+ || au_test_aufs(sb) -+ || au_test_ecryptfs(sb) -+ || au_test_nilfs(sb); -+} -+ -+static inline int au_test_fs_trunc_xino(struct super_block *sb) -+{ -+ return au_test_tmpfs(sb) -+ || au_test_ramfs(sb); -+} -+ -+/* -+ * test if the @sb is real-readonly. -+ */ -+static inline int au_test_fs_rr(struct super_block *sb) -+{ -+ return au_test_squashfs(sb) -+ || au_test_iso9660(sb) -+ || au_test_cramfs(sb) -+ || au_test_romfs(sb); -+} -+ -+/* -+ * test if the @inode is nfs with 'noacl' option -+ * NFS always sets MS_POSIXACL regardless its mount option 'noacl.' -+ */ -+static inline int au_test_nfs_noacl(struct inode *inode) -+{ -+ return au_test_nfs(inode->i_sb) -+ /* && IS_POSIXACL(inode) */ -+ && !nfs_server_capable(inode, NFS_CAP_ACLS); -+} -+ -+#endif /* __KERNEL__ */ -+#endif /* __AUFS_FSTYPE_H__ */ -diff --git a/fs/aufs/hfsnotify.c b/fs/aufs/hfsnotify.c -new file mode 100644 -index 0000000..6fa79b0 ---- /dev/null -+++ b/fs/aufs/hfsnotify.c -@@ -0,0 +1,288 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * fsnotify for the lower directories -+ */ -+ -+#include "aufs.h" -+ -+/* FS_IN_IGNORED is unnecessary */ -+static const __u32 AuHfsnMask = (FS_MOVED_TO | FS_MOVED_FROM | FS_DELETE -+ | FS_CREATE | FS_EVENT_ON_CHILD); -+static DECLARE_WAIT_QUEUE_HEAD(au_hfsn_wq); -+static __cacheline_aligned_in_smp atomic64_t au_hfsn_ifree = ATOMIC64_INIT(0); -+ -+static void au_hfsn_free_mark(struct fsnotify_mark *mark) -+{ -+ struct au_hnotify *hn = container_of(mark, struct au_hnotify, -+ hn_mark); -+ AuDbg("here\n"); -+ au_cache_free_hnotify(hn); -+ smp_mb__before_atomic(); -+ if (atomic64_dec_and_test(&au_hfsn_ifree)) -+ wake_up(&au_hfsn_wq); -+} -+ -+static int au_hfsn_alloc(struct au_hinode *hinode) -+{ -+ int err; -+ struct au_hnotify *hn; -+ struct super_block *sb; -+ struct au_branch *br; -+ struct fsnotify_mark *mark; -+ aufs_bindex_t bindex; -+ -+ hn = hinode->hi_notify; -+ sb = hn->hn_aufs_inode->i_sb; -+ bindex = au_br_index(sb, hinode->hi_id); -+ br = au_sbr(sb, bindex); -+ AuDebugOn(!br->br_hfsn); -+ -+ mark = &hn->hn_mark; -+ fsnotify_init_mark(mark, au_hfsn_free_mark); -+ mark->mask = AuHfsnMask; -+ /* -+ * by udba rename or rmdir, aufs assign a new inode to the known -+ * h_inode, so specify 1 to allow dups. -+ */ -+ lockdep_off(); -+ err = fsnotify_add_mark(mark, br->br_hfsn->hfsn_group, hinode->hi_inode, -+ /*mnt*/NULL, /*allow_dups*/1); -+ /* even if err */ -+ fsnotify_put_mark(mark); -+ lockdep_on(); -+ -+ return err; -+} -+ -+static int au_hfsn_free(struct au_hinode *hinode, struct au_hnotify *hn) -+{ -+ struct fsnotify_mark *mark; -+ unsigned long long ull; -+ struct fsnotify_group *group; -+ -+ ull = atomic64_inc_return(&au_hfsn_ifree); -+ BUG_ON(!ull); -+ -+ mark = &hn->hn_mark; -+ spin_lock(&mark->lock); -+ group = mark->group; -+ fsnotify_get_group(group); -+ spin_unlock(&mark->lock); -+ lockdep_off(); -+ fsnotify_destroy_mark(mark, group); -+ fsnotify_put_group(group); -+ lockdep_on(); -+ -+ /* free hn by myself */ -+ return 0; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static void au_hfsn_ctl(struct au_hinode *hinode, int do_set) -+{ -+ struct fsnotify_mark *mark; -+ -+ mark = &hinode->hi_notify->hn_mark; -+ spin_lock(&mark->lock); -+ if (do_set) { -+ AuDebugOn(mark->mask & AuHfsnMask); -+ mark->mask |= AuHfsnMask; -+ } else { -+ AuDebugOn(!(mark->mask & AuHfsnMask)); -+ mark->mask &= ~AuHfsnMask; -+ } -+ spin_unlock(&mark->lock); -+ /* fsnotify_recalc_inode_mask(hinode->hi_inode); */ -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* #define AuDbgHnotify */ -+#ifdef AuDbgHnotify -+static char *au_hfsn_name(u32 mask) -+{ -+#ifdef CONFIG_AUFS_DEBUG -+#define test_ret(flag) \ -+ do { \ -+ if (mask & flag) \ -+ return #flag; \ -+ } while (0) -+ test_ret(FS_ACCESS); -+ test_ret(FS_MODIFY); -+ test_ret(FS_ATTRIB); -+ test_ret(FS_CLOSE_WRITE); -+ test_ret(FS_CLOSE_NOWRITE); -+ test_ret(FS_OPEN); -+ test_ret(FS_MOVED_FROM); -+ test_ret(FS_MOVED_TO); -+ test_ret(FS_CREATE); -+ test_ret(FS_DELETE); -+ test_ret(FS_DELETE_SELF); -+ test_ret(FS_MOVE_SELF); -+ test_ret(FS_UNMOUNT); -+ test_ret(FS_Q_OVERFLOW); -+ test_ret(FS_IN_IGNORED); -+ test_ret(FS_ISDIR); -+ test_ret(FS_IN_ONESHOT); -+ test_ret(FS_EVENT_ON_CHILD); -+ return ""; -+#undef test_ret -+#else -+ return "??"; -+#endif -+} -+#endif -+ -+/* ---------------------------------------------------------------------- */ -+ -+static void au_hfsn_free_group(struct fsnotify_group *group) -+{ -+ struct au_br_hfsnotify *hfsn = group->private; -+ -+ AuDbg("here\n"); -+ kfree(hfsn); -+} -+ -+static int au_hfsn_handle_event(struct fsnotify_group *group, -+ struct inode *inode, -+ struct fsnotify_mark *inode_mark, -+ struct fsnotify_mark *vfsmount_mark, -+ u32 mask, void *data, int data_type, -+ const unsigned char *file_name, u32 cookie) -+{ -+ int err; -+ struct au_hnotify *hnotify; -+ struct inode *h_dir, *h_inode; -+ struct qstr h_child_qstr = QSTR_INIT(file_name, strlen(file_name)); -+ -+ AuDebugOn(data_type != FSNOTIFY_EVENT_INODE); -+ -+ err = 0; -+ /* if FS_UNMOUNT happens, there must be another bug */ -+ AuDebugOn(mask & FS_UNMOUNT); -+ if (mask & (FS_IN_IGNORED | FS_UNMOUNT)) -+ goto out; -+ -+ h_dir = inode; -+ h_inode = NULL; -+#ifdef AuDbgHnotify -+ au_debug_on(); -+ if (1 || h_child_qstr.len != sizeof(AUFS_XINO_FNAME) - 1 -+ || strncmp(h_child_qstr.name, AUFS_XINO_FNAME, h_child_qstr.len)) { -+ AuDbg("i%lu, mask 0x%x %s, hcname %.*s, hi%lu\n", -+ h_dir->i_ino, mask, au_hfsn_name(mask), -+ AuLNPair(&h_child_qstr), h_inode ? h_inode->i_ino : 0); -+ /* WARN_ON(1); */ -+ } -+ au_debug_off(); -+#endif -+ -+ AuDebugOn(!inode_mark); -+ hnotify = container_of(inode_mark, struct au_hnotify, hn_mark); -+ err = au_hnotify(h_dir, hnotify, mask, &h_child_qstr, h_inode); -+ -+out: -+ return err; -+} -+ -+static struct fsnotify_ops au_hfsn_ops = { -+ .handle_event = au_hfsn_handle_event, -+ .free_group_priv = au_hfsn_free_group -+}; -+ -+/* ---------------------------------------------------------------------- */ -+ -+static void au_hfsn_fin_br(struct au_branch *br) -+{ -+ struct au_br_hfsnotify *hfsn; -+ -+ hfsn = br->br_hfsn; -+ if (hfsn) { -+ lockdep_off(); -+ fsnotify_put_group(hfsn->hfsn_group); -+ lockdep_on(); -+ } -+} -+ -+static int au_hfsn_init_br(struct au_branch *br, int perm) -+{ -+ int err; -+ struct fsnotify_group *group; -+ struct au_br_hfsnotify *hfsn; -+ -+ err = 0; -+ br->br_hfsn = NULL; -+ if (!au_br_hnotifyable(perm)) -+ goto out; -+ -+ err = -ENOMEM; -+ hfsn = kmalloc(sizeof(*hfsn), GFP_NOFS); -+ if (unlikely(!hfsn)) -+ goto out; -+ -+ err = 0; -+ group = fsnotify_alloc_group(&au_hfsn_ops); -+ if (IS_ERR(group)) { -+ err = PTR_ERR(group); -+ pr_err("fsnotify_alloc_group() failed, %d\n", err); -+ goto out_hfsn; -+ } -+ -+ group->private = hfsn; -+ hfsn->hfsn_group = group; -+ br->br_hfsn = hfsn; -+ goto out; /* success */ -+ -+out_hfsn: -+ kfree(hfsn); -+out: -+ return err; -+} -+ -+static int au_hfsn_reset_br(unsigned int udba, struct au_branch *br, int perm) -+{ -+ int err; -+ -+ err = 0; -+ if (!br->br_hfsn) -+ err = au_hfsn_init_br(br, perm); -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static void au_hfsn_fin(void) -+{ -+ AuDbg("au_hfsn_ifree %lld\n", (long long)atomic64_read(&au_hfsn_ifree)); -+ wait_event(au_hfsn_wq, !atomic64_read(&au_hfsn_ifree)); -+} -+ -+const struct au_hnotify_op au_hnotify_op = { -+ .ctl = au_hfsn_ctl, -+ .alloc = au_hfsn_alloc, -+ .free = au_hfsn_free, -+ -+ .fin = au_hfsn_fin, -+ -+ .reset_br = au_hfsn_reset_br, -+ .fin_br = au_hfsn_fin_br, -+ .init_br = au_hfsn_init_br -+}; -diff --git a/fs/aufs/hfsplus.c b/fs/aufs/hfsplus.c -new file mode 100644 -index 0000000..8a54c82 ---- /dev/null -+++ b/fs/aufs/hfsplus.c -@@ -0,0 +1,56 @@ -+/* -+ * Copyright (C) 2010-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * special support for filesystems which aqucires an inode mutex -+ * at final closing a file, eg, hfsplus. -+ * -+ * This trick is very simple and stupid, just to open the file before really -+ * neceeary open to tell hfsplus that this is not the final closing. -+ * The caller should call au_h_open_pre() after acquiring the inode mutex, -+ * and au_h_open_post() after releasing it. -+ */ -+ -+#include "aufs.h" -+ -+struct file *au_h_open_pre(struct dentry *dentry, aufs_bindex_t bindex, -+ int force_wr) -+{ -+ struct file *h_file; -+ struct dentry *h_dentry; -+ -+ h_dentry = au_h_dptr(dentry, bindex); -+ AuDebugOn(!h_dentry); -+ AuDebugOn(!h_dentry->d_inode); -+ -+ h_file = NULL; -+ if (au_test_hfsplus(h_dentry->d_sb) -+ && S_ISREG(h_dentry->d_inode->i_mode)) -+ h_file = au_h_open(dentry, bindex, -+ O_RDONLY | O_NOATIME | O_LARGEFILE, -+ /*file*/NULL, force_wr); -+ return h_file; -+} -+ -+void au_h_open_post(struct dentry *dentry, aufs_bindex_t bindex, -+ struct file *h_file) -+{ -+ if (h_file) { -+ fput(h_file); -+ au_sbr_put(dentry->d_sb, bindex); -+ } -+} -diff --git a/fs/aufs/hnotify.c b/fs/aufs/hnotify.c -new file mode 100644 -index 0000000..1801420 ---- /dev/null -+++ b/fs/aufs/hnotify.c -@@ -0,0 +1,714 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * abstraction to notify the direct changes on lower directories -+ */ -+ -+#include "aufs.h" -+ -+int au_hn_alloc(struct au_hinode *hinode, struct inode *inode) -+{ -+ int err; -+ struct au_hnotify *hn; -+ -+ err = -ENOMEM; -+ hn = au_cache_alloc_hnotify(); -+ if (hn) { -+ hn->hn_aufs_inode = inode; -+ hinode->hi_notify = hn; -+ err = au_hnotify_op.alloc(hinode); -+ AuTraceErr(err); -+ if (unlikely(err)) { -+ hinode->hi_notify = NULL; -+ au_cache_free_hnotify(hn); -+ /* -+ * The upper dir was removed by udba, but the same named -+ * dir left. In this case, aufs assignes a new inode -+ * number and set the monitor again. -+ * For the lower dir, the old monitnor is still left. -+ */ -+ if (err == -EEXIST) -+ err = 0; -+ } -+ } -+ -+ AuTraceErr(err); -+ return err; -+} -+ -+void au_hn_free(struct au_hinode *hinode) -+{ -+ struct au_hnotify *hn; -+ -+ hn = hinode->hi_notify; -+ if (hn) { -+ hinode->hi_notify = NULL; -+ if (au_hnotify_op.free(hinode, hn)) -+ au_cache_free_hnotify(hn); -+ } -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+void au_hn_ctl(struct au_hinode *hinode, int do_set) -+{ -+ if (hinode->hi_notify) -+ au_hnotify_op.ctl(hinode, do_set); -+} -+ -+void au_hn_reset(struct inode *inode, unsigned int flags) -+{ -+ aufs_bindex_t bindex, bend; -+ struct inode *hi; -+ struct dentry *iwhdentry; -+ -+ bend = au_ibend(inode); -+ for (bindex = au_ibstart(inode); bindex <= bend; bindex++) { -+ hi = au_h_iptr(inode, bindex); -+ if (!hi) -+ continue; -+ -+ /* mutex_lock_nested(&hi->i_mutex, AuLsc_I_CHILD); */ -+ iwhdentry = au_hi_wh(inode, bindex); -+ if (iwhdentry) -+ dget(iwhdentry); -+ au_igrab(hi); -+ au_set_h_iptr(inode, bindex, NULL, 0); -+ au_set_h_iptr(inode, bindex, au_igrab(hi), -+ flags & ~AuHi_XINO); -+ iput(hi); -+ dput(iwhdentry); -+ /* mutex_unlock(&hi->i_mutex); */ -+ } -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int hn_xino(struct inode *inode, struct inode *h_inode) -+{ -+ int err; -+ aufs_bindex_t bindex, bend, bfound, bstart; -+ struct inode *h_i; -+ -+ err = 0; -+ if (unlikely(inode->i_ino == AUFS_ROOT_INO)) { -+ pr_warn("branch root dir was changed\n"); -+ goto out; -+ } -+ -+ bfound = -1; -+ bend = au_ibend(inode); -+ bstart = au_ibstart(inode); -+#if 0 /* reserved for future use */ -+ if (bindex == bend) { -+ /* keep this ino in rename case */ -+ goto out; -+ } -+#endif -+ for (bindex = bstart; bindex <= bend; bindex++) -+ if (au_h_iptr(inode, bindex) == h_inode) { -+ bfound = bindex; -+ break; -+ } -+ if (bfound < 0) -+ goto out; -+ -+ for (bindex = bstart; bindex <= bend; bindex++) { -+ h_i = au_h_iptr(inode, bindex); -+ if (!h_i) -+ continue; -+ -+ err = au_xino_write(inode->i_sb, bindex, h_i->i_ino, /*ino*/0); -+ /* ignore this error */ -+ /* bad action? */ -+ } -+ -+ /* children inode number will be broken */ -+ -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+static int hn_gen_tree(struct dentry *dentry) -+{ -+ int err, i, j, ndentry; -+ struct au_dcsub_pages dpages; -+ struct au_dpage *dpage; -+ struct dentry **dentries; -+ -+ err = au_dpages_init(&dpages, GFP_NOFS); -+ if (unlikely(err)) -+ goto out; -+ err = au_dcsub_pages(&dpages, dentry, NULL, NULL); -+ if (unlikely(err)) -+ goto out_dpages; -+ -+ for (i = 0; i < dpages.ndpage; i++) { -+ dpage = dpages.dpages + i; -+ dentries = dpage->dentries; -+ ndentry = dpage->ndentry; -+ for (j = 0; j < ndentry; j++) { -+ struct dentry *d; -+ -+ d = dentries[j]; -+ if (IS_ROOT(d)) -+ continue; -+ -+ au_digen_dec(d); -+ if (d->d_inode) -+ /* todo: reset children xino? -+ cached children only? */ -+ au_iigen_dec(d->d_inode); -+ } -+ } -+ -+out_dpages: -+ au_dpages_free(&dpages); -+ -+#if 0 -+ /* discard children */ -+ dentry_unhash(dentry); -+ dput(dentry); -+#endif -+out: -+ return err; -+} -+ -+/* -+ * return 0 if processed. -+ */ -+static int hn_gen_by_inode(char *name, unsigned int nlen, struct inode *inode, -+ const unsigned int isdir) -+{ -+ int err; -+ struct dentry *d; -+ struct qstr *dname; -+ -+ err = 1; -+ if (unlikely(inode->i_ino == AUFS_ROOT_INO)) { -+ pr_warn("branch root dir was changed\n"); -+ err = 0; -+ goto out; -+ } -+ -+ if (!isdir) { -+ AuDebugOn(!name); -+ au_iigen_dec(inode); -+ spin_lock(&inode->i_lock); -+ hlist_for_each_entry(d, &inode->i_dentry, d_u.d_alias) { -+ spin_lock(&d->d_lock); -+ dname = &d->d_name; -+ if (dname->len != nlen -+ && memcmp(dname->name, name, nlen)) { -+ spin_unlock(&d->d_lock); -+ continue; -+ } -+ err = 0; -+ au_digen_dec(d); -+ spin_unlock(&d->d_lock); -+ break; -+ } -+ spin_unlock(&inode->i_lock); -+ } else { -+ au_fset_si(au_sbi(inode->i_sb), FAILED_REFRESH_DIR); -+ d = d_find_any_alias(inode); -+ if (!d) { -+ au_iigen_dec(inode); -+ goto out; -+ } -+ -+ spin_lock(&d->d_lock); -+ dname = &d->d_name; -+ if (dname->len == nlen && !memcmp(dname->name, name, nlen)) { -+ spin_unlock(&d->d_lock); -+ err = hn_gen_tree(d); -+ spin_lock(&d->d_lock); -+ } -+ spin_unlock(&d->d_lock); -+ dput(d); -+ } -+ -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+static int hn_gen_by_name(struct dentry *dentry, const unsigned int isdir) -+{ -+ int err; -+ struct inode *inode; -+ -+ inode = dentry->d_inode; -+ if (IS_ROOT(dentry) -+ /* || (inode && inode->i_ino == AUFS_ROOT_INO) */ -+ ) { -+ pr_warn("branch root dir was changed\n"); -+ return 0; -+ } -+ -+ err = 0; -+ if (!isdir) { -+ au_digen_dec(dentry); -+ if (inode) -+ au_iigen_dec(inode); -+ } else { -+ au_fset_si(au_sbi(dentry->d_sb), FAILED_REFRESH_DIR); -+ if (inode) -+ err = hn_gen_tree(dentry); -+ } -+ -+ AuTraceErr(err); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* hnotify job flags */ -+#define AuHnJob_XINO0 1 -+#define AuHnJob_GEN (1 << 1) -+#define AuHnJob_DIRENT (1 << 2) -+#define AuHnJob_ISDIR (1 << 3) -+#define AuHnJob_TRYXINO0 (1 << 4) -+#define AuHnJob_MNTPNT (1 << 5) -+#define au_ftest_hnjob(flags, name) ((flags) & AuHnJob_##name) -+#define au_fset_hnjob(flags, name) \ -+ do { (flags) |= AuHnJob_##name; } while (0) -+#define au_fclr_hnjob(flags, name) \ -+ do { (flags) &= ~AuHnJob_##name; } while (0) -+ -+enum { -+ AuHn_CHILD, -+ AuHn_PARENT, -+ AuHnLast -+}; -+ -+struct au_hnotify_args { -+ struct inode *h_dir, *dir, *h_child_inode; -+ u32 mask; -+ unsigned int flags[AuHnLast]; -+ unsigned int h_child_nlen; -+ char h_child_name[]; -+}; -+ -+struct hn_job_args { -+ unsigned int flags; -+ struct inode *inode, *h_inode, *dir, *h_dir; -+ struct dentry *dentry; -+ char *h_name; -+ int h_nlen; -+}; -+ -+static int hn_job(struct hn_job_args *a) -+{ -+ const unsigned int isdir = au_ftest_hnjob(a->flags, ISDIR); -+ int e; -+ -+ /* reset xino */ -+ if (au_ftest_hnjob(a->flags, XINO0) && a->inode) -+ hn_xino(a->inode, a->h_inode); /* ignore this error */ -+ -+ if (au_ftest_hnjob(a->flags, TRYXINO0) -+ && a->inode -+ && a->h_inode) { -+ mutex_lock_nested(&a->h_inode->i_mutex, AuLsc_I_CHILD); -+ if (!a->h_inode->i_nlink -+ && !(a->h_inode->i_state & I_LINKABLE)) -+ hn_xino(a->inode, a->h_inode); /* ignore this error */ -+ mutex_unlock(&a->h_inode->i_mutex); -+ } -+ -+ /* make the generation obsolete */ -+ if (au_ftest_hnjob(a->flags, GEN)) { -+ e = -1; -+ if (a->inode) -+ e = hn_gen_by_inode(a->h_name, a->h_nlen, a->inode, -+ isdir); -+ if (e && a->dentry) -+ hn_gen_by_name(a->dentry, isdir); -+ /* ignore this error */ -+ } -+ -+ /* make dir entries obsolete */ -+ if (au_ftest_hnjob(a->flags, DIRENT) && a->inode) { -+ struct au_vdir *vdir; -+ -+ vdir = au_ivdir(a->inode); -+ if (vdir) -+ vdir->vd_jiffy = 0; -+ /* IMustLock(a->inode); */ -+ /* a->inode->i_version++; */ -+ } -+ -+ /* can do nothing but warn */ -+ if (au_ftest_hnjob(a->flags, MNTPNT) -+ && a->dentry -+ && d_mountpoint(a->dentry)) -+ pr_warn("mount-point %pd is removed or renamed\n", a->dentry); -+ -+ return 0; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static struct dentry *lookup_wlock_by_name(char *name, unsigned int nlen, -+ struct inode *dir) -+{ -+ struct dentry *dentry, *d, *parent; -+ struct qstr *dname; -+ -+ parent = d_find_any_alias(dir); -+ if (!parent) -+ return NULL; -+ -+ dentry = NULL; -+ spin_lock(&parent->d_lock); -+ list_for_each_entry(d, &parent->d_subdirs, d_child) { -+ /* AuDbg("%pd\n", d); */ -+ spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED); -+ dname = &d->d_name; -+ if (dname->len != nlen || memcmp(dname->name, name, nlen)) -+ goto cont_unlock; -+ if (au_di(d)) -+ au_digen_dec(d); -+ else -+ goto cont_unlock; -+ if (au_dcount(d) > 0) { -+ dentry = dget_dlock(d); -+ spin_unlock(&d->d_lock); -+ break; -+ } -+ -+cont_unlock: -+ spin_unlock(&d->d_lock); -+ } -+ spin_unlock(&parent->d_lock); -+ dput(parent); -+ -+ if (dentry) -+ di_write_lock_child(dentry); -+ -+ return dentry; -+} -+ -+static struct inode *lookup_wlock_by_ino(struct super_block *sb, -+ aufs_bindex_t bindex, ino_t h_ino) -+{ -+ struct inode *inode; -+ ino_t ino; -+ int err; -+ -+ inode = NULL; -+ err = au_xino_read(sb, bindex, h_ino, &ino); -+ if (!err && ino) -+ inode = ilookup(sb, ino); -+ if (!inode) -+ goto out; -+ -+ if (unlikely(inode->i_ino == AUFS_ROOT_INO)) { -+ pr_warn("wrong root branch\n"); -+ iput(inode); -+ inode = NULL; -+ goto out; -+ } -+ -+ ii_write_lock_child(inode); -+ -+out: -+ return inode; -+} -+ -+static void au_hn_bh(void *_args) -+{ -+ struct au_hnotify_args *a = _args; -+ struct super_block *sb; -+ aufs_bindex_t bindex, bend, bfound; -+ unsigned char xino, try_iput; -+ int err; -+ struct inode *inode; -+ ino_t h_ino; -+ struct hn_job_args args; -+ struct dentry *dentry; -+ struct au_sbinfo *sbinfo; -+ -+ AuDebugOn(!_args); -+ AuDebugOn(!a->h_dir); -+ AuDebugOn(!a->dir); -+ AuDebugOn(!a->mask); -+ AuDbg("mask 0x%x, i%lu, hi%lu, hci%lu\n", -+ a->mask, a->dir->i_ino, a->h_dir->i_ino, -+ a->h_child_inode ? a->h_child_inode->i_ino : 0); -+ -+ inode = NULL; -+ dentry = NULL; -+ /* -+ * do not lock a->dir->i_mutex here -+ * because of d_revalidate() may cause a deadlock. -+ */ -+ sb = a->dir->i_sb; -+ AuDebugOn(!sb); -+ sbinfo = au_sbi(sb); -+ AuDebugOn(!sbinfo); -+ si_write_lock(sb, AuLock_NOPLMW); -+ -+ ii_read_lock_parent(a->dir); -+ bfound = -1; -+ bend = au_ibend(a->dir); -+ for (bindex = au_ibstart(a->dir); bindex <= bend; bindex++) -+ if (au_h_iptr(a->dir, bindex) == a->h_dir) { -+ bfound = bindex; -+ break; -+ } -+ ii_read_unlock(a->dir); -+ if (unlikely(bfound < 0)) -+ goto out; -+ -+ xino = !!au_opt_test(au_mntflags(sb), XINO); -+ h_ino = 0; -+ if (a->h_child_inode) -+ h_ino = a->h_child_inode->i_ino; -+ -+ if (a->h_child_nlen -+ && (au_ftest_hnjob(a->flags[AuHn_CHILD], GEN) -+ || au_ftest_hnjob(a->flags[AuHn_CHILD], MNTPNT))) -+ dentry = lookup_wlock_by_name(a->h_child_name, a->h_child_nlen, -+ a->dir); -+ try_iput = 0; -+ if (dentry) -+ inode = dentry->d_inode; -+ if (xino && !inode && h_ino -+ && (au_ftest_hnjob(a->flags[AuHn_CHILD], XINO0) -+ || au_ftest_hnjob(a->flags[AuHn_CHILD], TRYXINO0) -+ || au_ftest_hnjob(a->flags[AuHn_CHILD], GEN))) { -+ inode = lookup_wlock_by_ino(sb, bfound, h_ino); -+ try_iput = 1; -+ } -+ -+ args.flags = a->flags[AuHn_CHILD]; -+ args.dentry = dentry; -+ args.inode = inode; -+ args.h_inode = a->h_child_inode; -+ args.dir = a->dir; -+ args.h_dir = a->h_dir; -+ args.h_name = a->h_child_name; -+ args.h_nlen = a->h_child_nlen; -+ err = hn_job(&args); -+ if (dentry) { -+ if (au_di(dentry)) -+ di_write_unlock(dentry); -+ dput(dentry); -+ } -+ if (inode && try_iput) { -+ ii_write_unlock(inode); -+ iput(inode); -+ } -+ -+ ii_write_lock_parent(a->dir); -+ args.flags = a->flags[AuHn_PARENT]; -+ args.dentry = NULL; -+ args.inode = a->dir; -+ args.h_inode = a->h_dir; -+ args.dir = NULL; -+ args.h_dir = NULL; -+ args.h_name = NULL; -+ args.h_nlen = 0; -+ err = hn_job(&args); -+ ii_write_unlock(a->dir); -+ -+out: -+ iput(a->h_child_inode); -+ iput(a->h_dir); -+ iput(a->dir); -+ si_write_unlock(sb); -+ au_nwt_done(&sbinfo->si_nowait); -+ kfree(a); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+int au_hnotify(struct inode *h_dir, struct au_hnotify *hnotify, u32 mask, -+ struct qstr *h_child_qstr, struct inode *h_child_inode) -+{ -+ int err, len; -+ unsigned int flags[AuHnLast], f; -+ unsigned char isdir, isroot, wh; -+ struct inode *dir; -+ struct au_hnotify_args *args; -+ char *p, *h_child_name; -+ -+ err = 0; -+ AuDebugOn(!hnotify || !hnotify->hn_aufs_inode); -+ dir = igrab(hnotify->hn_aufs_inode); -+ if (!dir) -+ goto out; -+ -+ isroot = (dir->i_ino == AUFS_ROOT_INO); -+ wh = 0; -+ h_child_name = (void *)h_child_qstr->name; -+ len = h_child_qstr->len; -+ if (h_child_name) { -+ if (len > AUFS_WH_PFX_LEN -+ && !memcmp(h_child_name, AUFS_WH_PFX, AUFS_WH_PFX_LEN)) { -+ h_child_name += AUFS_WH_PFX_LEN; -+ len -= AUFS_WH_PFX_LEN; -+ wh = 1; -+ } -+ } -+ -+ isdir = 0; -+ if (h_child_inode) -+ isdir = !!S_ISDIR(h_child_inode->i_mode); -+ flags[AuHn_PARENT] = AuHnJob_ISDIR; -+ flags[AuHn_CHILD] = 0; -+ if (isdir) -+ flags[AuHn_CHILD] = AuHnJob_ISDIR; -+ au_fset_hnjob(flags[AuHn_PARENT], DIRENT); -+ au_fset_hnjob(flags[AuHn_CHILD], GEN); -+ switch (mask & FS_EVENTS_POSS_ON_CHILD) { -+ case FS_MOVED_FROM: -+ case FS_MOVED_TO: -+ au_fset_hnjob(flags[AuHn_CHILD], XINO0); -+ au_fset_hnjob(flags[AuHn_CHILD], MNTPNT); -+ /*FALLTHROUGH*/ -+ case FS_CREATE: -+ AuDebugOn(!h_child_name); -+ break; -+ -+ case FS_DELETE: -+ /* -+ * aufs never be able to get this child inode. -+ * revalidation should be in d_revalidate() -+ * by checking i_nlink, i_generation or d_unhashed(). -+ */ -+ AuDebugOn(!h_child_name); -+ au_fset_hnjob(flags[AuHn_CHILD], TRYXINO0); -+ au_fset_hnjob(flags[AuHn_CHILD], MNTPNT); -+ break; -+ -+ default: -+ AuDebugOn(1); -+ } -+ -+ if (wh) -+ h_child_inode = NULL; -+ -+ err = -ENOMEM; -+ /* iput() and kfree() will be called in au_hnotify() */ -+ args = kmalloc(sizeof(*args) + len + 1, GFP_NOFS); -+ if (unlikely(!args)) { -+ AuErr1("no memory\n"); -+ iput(dir); -+ goto out; -+ } -+ args->flags[AuHn_PARENT] = flags[AuHn_PARENT]; -+ args->flags[AuHn_CHILD] = flags[AuHn_CHILD]; -+ args->mask = mask; -+ args->dir = dir; -+ args->h_dir = igrab(h_dir); -+ if (h_child_inode) -+ h_child_inode = igrab(h_child_inode); /* can be NULL */ -+ args->h_child_inode = h_child_inode; -+ args->h_child_nlen = len; -+ if (len) { -+ p = (void *)args; -+ p += sizeof(*args); -+ memcpy(p, h_child_name, len); -+ p[len] = 0; -+ } -+ -+ /* NFS fires the event for silly-renamed one from kworker */ -+ f = 0; -+ if (!dir->i_nlink -+ || (au_test_nfs(h_dir->i_sb) && (mask & FS_DELETE))) -+ f = AuWkq_NEST; -+ err = au_wkq_nowait(au_hn_bh, args, dir->i_sb, f); -+ if (unlikely(err)) { -+ pr_err("wkq %d\n", err); -+ iput(args->h_child_inode); -+ iput(args->h_dir); -+ iput(args->dir); -+ kfree(args); -+ } -+ -+out: -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+int au_hnotify_reset_br(unsigned int udba, struct au_branch *br, int perm) -+{ -+ int err; -+ -+ AuDebugOn(!(udba & AuOptMask_UDBA)); -+ -+ err = 0; -+ if (au_hnotify_op.reset_br) -+ err = au_hnotify_op.reset_br(udba, br, perm); -+ -+ return err; -+} -+ -+int au_hnotify_init_br(struct au_branch *br, int perm) -+{ -+ int err; -+ -+ err = 0; -+ if (au_hnotify_op.init_br) -+ err = au_hnotify_op.init_br(br, perm); -+ -+ return err; -+} -+ -+void au_hnotify_fin_br(struct au_branch *br) -+{ -+ if (au_hnotify_op.fin_br) -+ au_hnotify_op.fin_br(br); -+} -+ -+static void au_hn_destroy_cache(void) -+{ -+ kmem_cache_destroy(au_cachep[AuCache_HNOTIFY]); -+ au_cachep[AuCache_HNOTIFY] = NULL; -+} -+ -+int __init au_hnotify_init(void) -+{ -+ int err; -+ -+ err = -ENOMEM; -+ au_cachep[AuCache_HNOTIFY] = AuCache(au_hnotify); -+ if (au_cachep[AuCache_HNOTIFY]) { -+ err = 0; -+ if (au_hnotify_op.init) -+ err = au_hnotify_op.init(); -+ if (unlikely(err)) -+ au_hn_destroy_cache(); -+ } -+ AuTraceErr(err); -+ return err; -+} -+ -+void au_hnotify_fin(void) -+{ -+ if (au_hnotify_op.fin) -+ au_hnotify_op.fin(); -+ /* cf. au_cache_fin() */ -+ if (au_cachep[AuCache_HNOTIFY]) -+ au_hn_destroy_cache(); -+} -diff --git a/fs/aufs/i_op.c b/fs/aufs/i_op.c -new file mode 100644 -index 0000000..02dc95a ---- /dev/null -+++ b/fs/aufs/i_op.c -@@ -0,0 +1,1460 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * inode operations (except add/del/rename) -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include "aufs.h" -+ -+static int h_permission(struct inode *h_inode, int mask, -+ struct vfsmount *h_mnt, int brperm) -+{ -+ int err; -+ const unsigned char write_mask = !!(mask & (MAY_WRITE | MAY_APPEND)); -+ -+ err = -EACCES; -+ if ((write_mask && IS_IMMUTABLE(h_inode)) -+ || ((mask & MAY_EXEC) -+ && S_ISREG(h_inode->i_mode) -+ && ((h_mnt->mnt_flags & MNT_NOEXEC) -+ || !(h_inode->i_mode & S_IXUGO)))) -+ goto out; -+ -+ /* -+ * - skip the lower fs test in the case of write to ro branch. -+ * - nfs dir permission write check is optimized, but a policy for -+ * link/rename requires a real check. -+ * - nfs always sets MS_POSIXACL regardless its mount option 'noacl.' -+ * in this case, generic_permission() returns -EOPNOTSUPP. -+ */ -+ if ((write_mask && !au_br_writable(brperm)) -+ || (au_test_nfs(h_inode->i_sb) && S_ISDIR(h_inode->i_mode) -+ && write_mask && !(mask & MAY_READ)) -+ || !h_inode->i_op->permission) { -+ /* AuLabel(generic_permission); */ -+ /* AuDbg("get_acl %pf\n", h_inode->i_op->get_acl); */ -+ err = generic_permission(h_inode, mask); -+ if (err == -EOPNOTSUPP && au_test_nfs_noacl(h_inode)) -+ err = h_inode->i_op->permission(h_inode, mask); -+ AuTraceErr(err); -+ } else { -+ /* AuLabel(h_inode->permission); */ -+ err = h_inode->i_op->permission(h_inode, mask); -+ AuTraceErr(err); -+ } -+ -+ if (!err) -+ err = devcgroup_inode_permission(h_inode, mask); -+ if (!err) -+ err = security_inode_permission(h_inode, mask); -+ -+#if 0 -+ if (!err) { -+ /* todo: do we need to call ima_path_check()? */ -+ struct path h_path = { -+ .dentry = -+ .mnt = h_mnt -+ }; -+ err = ima_path_check(&h_path, -+ mask & (MAY_READ | MAY_WRITE | MAY_EXEC), -+ IMA_COUNT_LEAVE); -+ } -+#endif -+ -+out: -+ return err; -+} -+ -+static int aufs_permission(struct inode *inode, int mask) -+{ -+ int err; -+ aufs_bindex_t bindex, bend; -+ const unsigned char isdir = !!S_ISDIR(inode->i_mode), -+ write_mask = !!(mask & (MAY_WRITE | MAY_APPEND)); -+ struct inode *h_inode; -+ struct super_block *sb; -+ struct au_branch *br; -+ -+ /* todo: support rcu-walk? */ -+ if (mask & MAY_NOT_BLOCK) -+ return -ECHILD; -+ -+ sb = inode->i_sb; -+ si_read_lock(sb, AuLock_FLUSH); -+ ii_read_lock_child(inode); -+#if 0 -+ err = au_iigen_test(inode, au_sigen(sb)); -+ if (unlikely(err)) -+ goto out; -+#endif -+ -+ if (!isdir -+ || write_mask -+ || au_opt_test(au_mntflags(sb), DIRPERM1)) { -+ err = au_busy_or_stale(); -+ h_inode = au_h_iptr(inode, au_ibstart(inode)); -+ if (unlikely(!h_inode -+ || (h_inode->i_mode & S_IFMT) -+ != (inode->i_mode & S_IFMT))) -+ goto out; -+ -+ err = 0; -+ bindex = au_ibstart(inode); -+ br = au_sbr(sb, bindex); -+ err = h_permission(h_inode, mask, au_br_mnt(br), br->br_perm); -+ if (write_mask -+ && !err -+ && !special_file(h_inode->i_mode)) { -+ /* test whether the upper writable branch exists */ -+ err = -EROFS; -+ for (; bindex >= 0; bindex--) -+ if (!au_br_rdonly(au_sbr(sb, bindex))) { -+ err = 0; -+ break; -+ } -+ } -+ goto out; -+ } -+ -+ /* non-write to dir */ -+ err = 0; -+ bend = au_ibend(inode); -+ for (bindex = au_ibstart(inode); !err && bindex <= bend; bindex++) { -+ h_inode = au_h_iptr(inode, bindex); -+ if (h_inode) { -+ err = au_busy_or_stale(); -+ if (unlikely(!S_ISDIR(h_inode->i_mode))) -+ break; -+ -+ br = au_sbr(sb, bindex); -+ err = h_permission(h_inode, mask, au_br_mnt(br), -+ br->br_perm); -+ } -+ } -+ -+out: -+ ii_read_unlock(inode); -+ si_read_unlock(sb); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static struct dentry *aufs_lookup(struct inode *dir, struct dentry *dentry, -+ unsigned int flags) -+{ -+ struct dentry *ret, *parent; -+ struct inode *inode; -+ struct super_block *sb; -+ int err, npositive; -+ -+ IMustLock(dir); -+ -+ /* todo: support rcu-walk? */ -+ ret = ERR_PTR(-ECHILD); -+ if (flags & LOOKUP_RCU) -+ goto out; -+ -+ ret = ERR_PTR(-ENAMETOOLONG); -+ if (unlikely(dentry->d_name.len > AUFS_MAX_NAMELEN)) -+ goto out; -+ -+ sb = dir->i_sb; -+ err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM); -+ ret = ERR_PTR(err); -+ if (unlikely(err)) -+ goto out; -+ -+ err = au_di_init(dentry); -+ ret = ERR_PTR(err); -+ if (unlikely(err)) -+ goto out_si; -+ -+ inode = NULL; -+ npositive = 0; /* suppress a warning */ -+ parent = dentry->d_parent; /* dir inode is locked */ -+ di_read_lock_parent(parent, AuLock_IR); -+ err = au_alive_dir(parent); -+ if (!err) -+ err = au_digen_test(parent, au_sigen(sb)); -+ if (!err) { -+ npositive = au_lkup_dentry(dentry, au_dbstart(parent), -+ /*type*/0); -+ err = npositive; -+ } -+ di_read_unlock(parent, AuLock_IR); -+ ret = ERR_PTR(err); -+ if (unlikely(err < 0)) -+ goto out_unlock; -+ -+ if (npositive) { -+ inode = au_new_inode(dentry, /*must_new*/0); -+ if (IS_ERR(inode)) { -+ ret = (void *)inode; -+ inode = NULL; -+ goto out_unlock; -+ } -+ } -+ -+ if (inode) -+ atomic_inc(&inode->i_count); -+ ret = d_splice_alias(inode, dentry); -+ if (IS_ERR(ret) -+ && PTR_ERR(ret) == -EIO -+ && inode -+ && S_ISDIR(inode->i_mode)) { -+ atomic_inc(&inode->i_count); -+ ret = d_materialise_unique(dentry, inode); -+ if (!IS_ERR(ret)) -+ ii_write_unlock(inode); -+ } -+#if 0 -+ if (unlikely(d_need_lookup(dentry))) { -+ spin_lock(&dentry->d_lock); -+ dentry->d_flags &= ~DCACHE_NEED_LOOKUP; -+ spin_unlock(&dentry->d_lock); -+ } else -+#endif -+ if (inode) { -+ if (!IS_ERR(ret)) -+ iput(inode); -+ else { -+ ii_write_unlock(inode); -+ iput(inode); -+ inode = NULL; -+ } -+ } -+ -+out_unlock: -+ di_write_unlock(dentry); -+ if (inode) { -+ /* verbose coding for lock class name */ -+ if (unlikely(S_ISLNK(inode->i_mode))) -+ au_rw_class(&au_di(dentry)->di_rwsem, -+ au_lc_key + AuLcSymlink_DIINFO); -+ else if (unlikely(S_ISDIR(inode->i_mode))) -+ au_rw_class(&au_di(dentry)->di_rwsem, -+ au_lc_key + AuLcDir_DIINFO); -+ else /* likely */ -+ au_rw_class(&au_di(dentry)->di_rwsem, -+ au_lc_key + AuLcNonDir_DIINFO); -+ } -+out_si: -+ si_read_unlock(sb); -+out: -+ return ret; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+struct aopen_node { -+ struct hlist_node hlist; -+ struct file *file, *h_file; -+}; -+ -+static int au_do_aopen(struct inode *inode, struct file *file) -+{ -+ struct au_sphlhead *aopen; -+ struct aopen_node *node; -+ struct au_do_open_args args = { -+ .no_lock = 1, -+ .open = au_do_open_nondir -+ }; -+ -+ aopen = &au_sbi(inode->i_sb)->si_aopen; -+ spin_lock(&aopen->spin); -+ hlist_for_each_entry(node, &aopen->head, hlist) -+ if (node->file == file) { -+ args.h_file = node->h_file; -+ break; -+ } -+ spin_unlock(&aopen->spin); -+ /* AuDebugOn(!args.h_file); */ -+ -+ return au_do_open(file, &args); -+} -+ -+static int aufs_atomic_open(struct inode *dir, struct dentry *dentry, -+ struct file *file, unsigned int open_flag, -+ umode_t create_mode, int *opened) -+{ -+ int err, h_opened = *opened; -+ struct dentry *parent; -+ struct dentry *d; -+ struct au_sphlhead *aopen; -+ struct vfsub_aopen_args args = { -+ .open_flag = open_flag, -+ .create_mode = create_mode, -+ .opened = &h_opened -+ }; -+ struct aopen_node aopen_node = { -+ .file = file -+ }; -+ -+ IMustLock(dir); -+ AuDbg("open_flag 0x%x\n", open_flag); -+ AuDbgDentry(dentry); -+ -+ err = 0; -+ if (!au_di(dentry)) { -+ d = aufs_lookup(dir, dentry, /*flags*/0); -+ if (IS_ERR(d)) { -+ err = PTR_ERR(d); -+ goto out; -+ } else if (d) { -+ /* -+ * obsoleted dentry found. -+ * another error will be returned later. -+ */ -+ d_drop(d); -+ dput(d); -+ AuDbgDentry(d); -+ } -+ AuDbgDentry(dentry); -+ } -+ -+ if (d_is_positive(dentry) -+ || d_unhashed(dentry) -+ || d_unlinked(dentry) -+ || !(open_flag & O_CREAT)) -+ goto out_no_open; -+ -+ err = aufs_read_lock(dentry, AuLock_DW | AuLock_FLUSH | AuLock_GEN); -+ if (unlikely(err)) -+ goto out; -+ -+ parent = dentry->d_parent; /* dir is locked */ -+ di_write_lock_parent(parent); -+ err = au_lkup_dentry(dentry, /*bstart*/0, /*type*/0); -+ if (unlikely(err)) -+ goto out_unlock; -+ -+ AuDbgDentry(dentry); -+ if (d_is_positive(dentry)) -+ goto out_unlock; -+ -+ args.file = get_empty_filp(); -+ err = PTR_ERR(args.file); -+ if (IS_ERR(args.file)) -+ goto out_unlock; -+ -+ args.file->f_flags = file->f_flags; -+ err = au_aopen_or_create(dir, dentry, &args); -+ AuTraceErr(err); -+ AuDbgFile(args.file); -+ if (unlikely(err < 0)) { -+ if (h_opened & FILE_OPENED) -+ fput(args.file); -+ else -+ put_filp(args.file); -+ goto out_unlock; -+ } -+ -+ /* some filesystems don't set FILE_CREATED while succeeded? */ -+ *opened |= FILE_CREATED; -+ if (h_opened & FILE_OPENED) -+ aopen_node.h_file = args.file; -+ else { -+ put_filp(args.file); -+ args.file = NULL; -+ } -+ aopen = &au_sbi(dir->i_sb)->si_aopen; -+ au_sphl_add(&aopen_node.hlist, aopen); -+ err = finish_open(file, dentry, au_do_aopen, opened); -+ au_sphl_del(&aopen_node.hlist, aopen); -+ AuTraceErr(err); -+ AuDbgFile(file); -+ if (aopen_node.h_file) -+ fput(aopen_node.h_file); -+ -+out_unlock: -+ di_write_unlock(parent); -+ aufs_read_unlock(dentry, AuLock_DW); -+ AuDbgDentry(dentry); -+ if (unlikely(err)) -+ goto out; -+out_no_open: -+ if (!err && !(*opened & FILE_CREATED)) { -+ AuLabel(out_no_open); -+ dget(dentry); -+ err = finish_no_open(file, dentry); -+ } -+out: -+ AuDbg("%pd%s%s\n", dentry, -+ (*opened & FILE_CREATED) ? " created" : "", -+ (*opened & FILE_OPENED) ? " opened" : ""); -+ AuTraceErr(err); -+ return err; -+} -+ -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int au_wr_dir_cpup(struct dentry *dentry, struct dentry *parent, -+ const unsigned char add_entry, aufs_bindex_t bcpup, -+ aufs_bindex_t bstart) -+{ -+ int err; -+ struct dentry *h_parent; -+ struct inode *h_dir; -+ -+ if (add_entry) -+ IMustLock(parent->d_inode); -+ else -+ di_write_lock_parent(parent); -+ -+ err = 0; -+ if (!au_h_dptr(parent, bcpup)) { -+ if (bstart > bcpup) -+ err = au_cpup_dirs(dentry, bcpup); -+ else if (bstart < bcpup) -+ err = au_cpdown_dirs(dentry, bcpup); -+ else -+ BUG(); -+ } -+ if (!err && add_entry && !au_ftest_wrdir(add_entry, TMPFILE)) { -+ h_parent = au_h_dptr(parent, bcpup); -+ h_dir = h_parent->d_inode; -+ mutex_lock_nested(&h_dir->i_mutex, AuLsc_I_PARENT); -+ err = au_lkup_neg(dentry, bcpup, /*wh*/0); -+ /* todo: no unlock here */ -+ mutex_unlock(&h_dir->i_mutex); -+ -+ AuDbg("bcpup %d\n", bcpup); -+ if (!err) { -+ if (!dentry->d_inode) -+ au_set_h_dptr(dentry, bstart, NULL); -+ au_update_dbrange(dentry, /*do_put_zero*/0); -+ } -+ } -+ -+ if (!add_entry) -+ di_write_unlock(parent); -+ if (!err) -+ err = bcpup; /* success */ -+ -+ AuTraceErr(err); -+ return err; -+} -+ -+/* -+ * decide the branch and the parent dir where we will create a new entry. -+ * returns new bindex or an error. -+ * copyup the parent dir if needed. -+ */ -+int au_wr_dir(struct dentry *dentry, struct dentry *src_dentry, -+ struct au_wr_dir_args *args) -+{ -+ int err; -+ unsigned int flags; -+ aufs_bindex_t bcpup, bstart, src_bstart; -+ const unsigned char add_entry -+ = au_ftest_wrdir(args->flags, ADD_ENTRY) -+ | au_ftest_wrdir(args->flags, TMPFILE); -+ struct super_block *sb; -+ struct dentry *parent; -+ struct au_sbinfo *sbinfo; -+ -+ sb = dentry->d_sb; -+ sbinfo = au_sbi(sb); -+ parent = dget_parent(dentry); -+ bstart = au_dbstart(dentry); -+ bcpup = bstart; -+ if (args->force_btgt < 0) { -+ if (src_dentry) { -+ src_bstart = au_dbstart(src_dentry); -+ if (src_bstart < bstart) -+ bcpup = src_bstart; -+ } else if (add_entry) { -+ flags = 0; -+ if (au_ftest_wrdir(args->flags, ISDIR)) -+ au_fset_wbr(flags, DIR); -+ err = AuWbrCreate(sbinfo, dentry, flags); -+ bcpup = err; -+ } -+ -+ if (bcpup < 0 || au_test_ro(sb, bcpup, dentry->d_inode)) { -+ if (add_entry) -+ err = AuWbrCopyup(sbinfo, dentry); -+ else { -+ if (!IS_ROOT(dentry)) { -+ di_read_lock_parent(parent, !AuLock_IR); -+ err = AuWbrCopyup(sbinfo, dentry); -+ di_read_unlock(parent, !AuLock_IR); -+ } else -+ err = AuWbrCopyup(sbinfo, dentry); -+ } -+ bcpup = err; -+ if (unlikely(err < 0)) -+ goto out; -+ } -+ } else { -+ bcpup = args->force_btgt; -+ AuDebugOn(au_test_ro(sb, bcpup, dentry->d_inode)); -+ } -+ -+ AuDbg("bstart %d, bcpup %d\n", bstart, bcpup); -+ err = bcpup; -+ if (bcpup == bstart) -+ goto out; /* success */ -+ -+ /* copyup the new parent into the branch we process */ -+ err = au_wr_dir_cpup(dentry, parent, add_entry, bcpup, bstart); -+ if (err >= 0) { -+ if (!dentry->d_inode) { -+ au_set_h_dptr(dentry, bstart, NULL); -+ au_set_dbstart(dentry, bcpup); -+ au_set_dbend(dentry, bcpup); -+ } -+ AuDebugOn(add_entry -+ && !au_ftest_wrdir(args->flags, TMPFILE) -+ && !au_h_dptr(dentry, bcpup)); -+ } -+ -+out: -+ dput(parent); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+void au_pin_hdir_unlock(struct au_pin *p) -+{ -+ if (p->hdir) -+ au_hn_imtx_unlock(p->hdir); -+} -+ -+int au_pin_hdir_lock(struct au_pin *p) -+{ -+ int err; -+ -+ err = 0; -+ if (!p->hdir) -+ goto out; -+ -+ /* even if an error happens later, keep this lock */ -+ au_hn_imtx_lock_nested(p->hdir, p->lsc_hi); -+ -+ err = -EBUSY; -+ if (unlikely(p->hdir->hi_inode != p->h_parent->d_inode)) -+ goto out; -+ -+ err = 0; -+ if (p->h_dentry) -+ err = au_h_verify(p->h_dentry, p->udba, p->hdir->hi_inode, -+ p->h_parent, p->br); -+ -+out: -+ return err; -+} -+ -+int au_pin_hdir_relock(struct au_pin *p) -+{ -+ int err, i; -+ struct inode *h_i; -+ struct dentry *h_d[] = { -+ p->h_dentry, -+ p->h_parent -+ }; -+ -+ err = au_pin_hdir_lock(p); -+ if (unlikely(err)) -+ goto out; -+ -+ for (i = 0; !err && i < sizeof(h_d)/sizeof(*h_d); i++) { -+ if (!h_d[i]) -+ continue; -+ h_i = h_d[i]->d_inode; -+ if (h_i) -+ err = !h_i->i_nlink; -+ } -+ -+out: -+ return err; -+} -+ -+void au_pin_hdir_set_owner(struct au_pin *p, struct task_struct *task) -+{ -+#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP) -+ p->hdir->hi_inode->i_mutex.owner = task; -+#endif -+} -+ -+void au_pin_hdir_acquire_nest(struct au_pin *p) -+{ -+ if (p->hdir) { -+ mutex_acquire_nest(&p->hdir->hi_inode->i_mutex.dep_map, -+ p->lsc_hi, 0, NULL, _RET_IP_); -+ au_pin_hdir_set_owner(p, current); -+ } -+} -+ -+void au_pin_hdir_release(struct au_pin *p) -+{ -+ if (p->hdir) { -+ au_pin_hdir_set_owner(p, p->task); -+ mutex_release(&p->hdir->hi_inode->i_mutex.dep_map, 1, _RET_IP_); -+ } -+} -+ -+struct dentry *au_pinned_h_parent(struct au_pin *pin) -+{ -+ if (pin && pin->parent) -+ return au_h_dptr(pin->parent, pin->bindex); -+ return NULL; -+} -+ -+void au_unpin(struct au_pin *p) -+{ -+ if (p->hdir) -+ au_pin_hdir_unlock(p); -+ if (p->h_mnt && au_ftest_pin(p->flags, MNT_WRITE)) -+ vfsub_mnt_drop_write(p->h_mnt); -+ if (!p->hdir) -+ return; -+ -+ if (!au_ftest_pin(p->flags, DI_LOCKED)) -+ di_read_unlock(p->parent, AuLock_IR); -+ iput(p->hdir->hi_inode); -+ dput(p->parent); -+ p->parent = NULL; -+ p->hdir = NULL; -+ p->h_mnt = NULL; -+ /* do not clear p->task */ -+} -+ -+int au_do_pin(struct au_pin *p) -+{ -+ int err; -+ struct super_block *sb; -+ struct inode *h_dir; -+ -+ err = 0; -+ sb = p->dentry->d_sb; -+ p->br = au_sbr(sb, p->bindex); -+ if (IS_ROOT(p->dentry)) { -+ if (au_ftest_pin(p->flags, MNT_WRITE)) { -+ p->h_mnt = au_br_mnt(p->br); -+ err = vfsub_mnt_want_write(p->h_mnt); -+ if (unlikely(err)) { -+ au_fclr_pin(p->flags, MNT_WRITE); -+ goto out_err; -+ } -+ } -+ goto out; -+ } -+ -+ p->h_dentry = NULL; -+ if (p->bindex <= au_dbend(p->dentry)) -+ p->h_dentry = au_h_dptr(p->dentry, p->bindex); -+ -+ p->parent = dget_parent(p->dentry); -+ if (!au_ftest_pin(p->flags, DI_LOCKED)) -+ di_read_lock(p->parent, AuLock_IR, p->lsc_di); -+ -+ h_dir = NULL; -+ p->h_parent = au_h_dptr(p->parent, p->bindex); -+ p->hdir = au_hi(p->parent->d_inode, p->bindex); -+ if (p->hdir) -+ h_dir = p->hdir->hi_inode; -+ -+ /* -+ * udba case, or -+ * if DI_LOCKED is not set, then p->parent may be different -+ * and h_parent can be NULL. -+ */ -+ if (unlikely(!p->hdir || !h_dir || !p->h_parent)) { -+ err = -EBUSY; -+ if (!au_ftest_pin(p->flags, DI_LOCKED)) -+ di_read_unlock(p->parent, AuLock_IR); -+ dput(p->parent); -+ p->parent = NULL; -+ goto out_err; -+ } -+ -+ if (au_ftest_pin(p->flags, MNT_WRITE)) { -+ p->h_mnt = au_br_mnt(p->br); -+ err = vfsub_mnt_want_write(p->h_mnt); -+ if (unlikely(err)) { -+ au_fclr_pin(p->flags, MNT_WRITE); -+ if (!au_ftest_pin(p->flags, DI_LOCKED)) -+ di_read_unlock(p->parent, AuLock_IR); -+ dput(p->parent); -+ p->parent = NULL; -+ goto out_err; -+ } -+ } -+ -+ au_igrab(h_dir); -+ err = au_pin_hdir_lock(p); -+ if (!err) -+ goto out; /* success */ -+ -+ au_unpin(p); -+ -+out_err: -+ pr_err("err %d\n", err); -+ err = au_busy_or_stale(); -+out: -+ return err; -+} -+ -+void au_pin_init(struct au_pin *p, struct dentry *dentry, -+ aufs_bindex_t bindex, int lsc_di, int lsc_hi, -+ unsigned int udba, unsigned char flags) -+{ -+ p->dentry = dentry; -+ p->udba = udba; -+ p->lsc_di = lsc_di; -+ p->lsc_hi = lsc_hi; -+ p->flags = flags; -+ p->bindex = bindex; -+ -+ p->parent = NULL; -+ p->hdir = NULL; -+ p->h_mnt = NULL; -+ -+ p->h_dentry = NULL; -+ p->h_parent = NULL; -+ p->br = NULL; -+ p->task = current; -+} -+ -+int au_pin(struct au_pin *pin, struct dentry *dentry, aufs_bindex_t bindex, -+ unsigned int udba, unsigned char flags) -+{ -+ au_pin_init(pin, dentry, bindex, AuLsc_DI_PARENT, AuLsc_I_PARENT2, -+ udba, flags); -+ return au_do_pin(pin); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * ->setattr() and ->getattr() are called in various cases. -+ * chmod, stat: dentry is revalidated. -+ * fchmod, fstat: file and dentry are not revalidated, additionally they may be -+ * unhashed. -+ * for ->setattr(), ia->ia_file is passed from ftruncate only. -+ */ -+/* todo: consolidate with do_refresh() and simple_reval_dpath() */ -+int au_reval_for_attr(struct dentry *dentry, unsigned int sigen) -+{ -+ int err; -+ struct inode *inode; -+ struct dentry *parent; -+ -+ err = 0; -+ inode = dentry->d_inode; -+ if (au_digen_test(dentry, sigen)) { -+ parent = dget_parent(dentry); -+ di_read_lock_parent(parent, AuLock_IR); -+ err = au_refresh_dentry(dentry, parent); -+ di_read_unlock(parent, AuLock_IR); -+ dput(parent); -+ } -+ -+ AuTraceErr(err); -+ return err; -+} -+ -+int au_pin_and_icpup(struct dentry *dentry, struct iattr *ia, -+ struct au_icpup_args *a) -+{ -+ int err; -+ loff_t sz; -+ aufs_bindex_t bstart, ibstart; -+ struct dentry *hi_wh, *parent; -+ struct inode *inode; -+ struct au_wr_dir_args wr_dir_args = { -+ .force_btgt = -1, -+ .flags = 0 -+ }; -+ -+ if (d_is_dir(dentry)) -+ au_fset_wrdir(wr_dir_args.flags, ISDIR); -+ /* plink or hi_wh() case */ -+ bstart = au_dbstart(dentry); -+ inode = dentry->d_inode; -+ ibstart = au_ibstart(inode); -+ if (bstart != ibstart && !au_test_ro(inode->i_sb, ibstart, inode)) -+ wr_dir_args.force_btgt = ibstart; -+ err = au_wr_dir(dentry, /*src_dentry*/NULL, &wr_dir_args); -+ if (unlikely(err < 0)) -+ goto out; -+ a->btgt = err; -+ if (err != bstart) -+ au_fset_icpup(a->flags, DID_CPUP); -+ -+ err = 0; -+ a->pin_flags = AuPin_MNT_WRITE; -+ parent = NULL; -+ if (!IS_ROOT(dentry)) { -+ au_fset_pin(a->pin_flags, DI_LOCKED); -+ parent = dget_parent(dentry); -+ di_write_lock_parent(parent); -+ } -+ -+ err = au_pin(&a->pin, dentry, a->btgt, a->udba, a->pin_flags); -+ if (unlikely(err)) -+ goto out_parent; -+ -+ a->h_path.dentry = au_h_dptr(dentry, bstart); -+ a->h_inode = a->h_path.dentry->d_inode; -+ sz = -1; -+ if (ia && (ia->ia_valid & ATTR_SIZE)) { -+ mutex_lock_nested(&a->h_inode->i_mutex, AuLsc_I_CHILD); -+ if (ia->ia_size < i_size_read(a->h_inode)) -+ sz = ia->ia_size; -+ mutex_unlock(&a->h_inode->i_mutex); -+ } -+ -+ hi_wh = NULL; -+ if (au_ftest_icpup(a->flags, DID_CPUP) && d_unlinked(dentry)) { -+ hi_wh = au_hi_wh(inode, a->btgt); -+ if (!hi_wh) { -+ struct au_cp_generic cpg = { -+ .dentry = dentry, -+ .bdst = a->btgt, -+ .bsrc = -1, -+ .len = sz, -+ .pin = &a->pin -+ }; -+ err = au_sio_cpup_wh(&cpg, /*file*/NULL); -+ if (unlikely(err)) -+ goto out_unlock; -+ hi_wh = au_hi_wh(inode, a->btgt); -+ /* todo: revalidate hi_wh? */ -+ } -+ } -+ -+ if (parent) { -+ au_pin_set_parent_lflag(&a->pin, /*lflag*/0); -+ di_downgrade_lock(parent, AuLock_IR); -+ dput(parent); -+ parent = NULL; -+ } -+ if (!au_ftest_icpup(a->flags, DID_CPUP)) -+ goto out; /* success */ -+ -+ if (!d_unhashed(dentry)) { -+ struct au_cp_generic cpg = { -+ .dentry = dentry, -+ .bdst = a->btgt, -+ .bsrc = bstart, -+ .len = sz, -+ .pin = &a->pin, -+ .flags = AuCpup_DTIME | AuCpup_HOPEN -+ }; -+ err = au_sio_cpup_simple(&cpg); -+ if (!err) -+ a->h_path.dentry = au_h_dptr(dentry, a->btgt); -+ } else if (!hi_wh) -+ a->h_path.dentry = au_h_dptr(dentry, a->btgt); -+ else -+ a->h_path.dentry = hi_wh; /* do not dget here */ -+ -+out_unlock: -+ a->h_inode = a->h_path.dentry->d_inode; -+ if (!err) -+ goto out; /* success */ -+ au_unpin(&a->pin); -+out_parent: -+ if (parent) { -+ di_write_unlock(parent); -+ dput(parent); -+ } -+out: -+ if (!err) -+ mutex_lock_nested(&a->h_inode->i_mutex, AuLsc_I_CHILD); -+ return err; -+} -+ -+static int aufs_setattr(struct dentry *dentry, struct iattr *ia) -+{ -+ int err; -+ struct inode *inode, *delegated; -+ struct super_block *sb; -+ struct file *file; -+ struct au_icpup_args *a; -+ -+ inode = dentry->d_inode; -+ IMustLock(inode); -+ -+ err = -ENOMEM; -+ a = kzalloc(sizeof(*a), GFP_NOFS); -+ if (unlikely(!a)) -+ goto out; -+ -+ if (ia->ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID)) -+ ia->ia_valid &= ~ATTR_MODE; -+ -+ file = NULL; -+ sb = dentry->d_sb; -+ err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM); -+ if (unlikely(err)) -+ goto out_kfree; -+ -+ if (ia->ia_valid & ATTR_FILE) { -+ /* currently ftruncate(2) only */ -+ AuDebugOn(!S_ISREG(inode->i_mode)); -+ file = ia->ia_file; -+ err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/1); -+ if (unlikely(err)) -+ goto out_si; -+ ia->ia_file = au_hf_top(file); -+ a->udba = AuOpt_UDBA_NONE; -+ } else { -+ /* fchmod() doesn't pass ia_file */ -+ a->udba = au_opt_udba(sb); -+ di_write_lock_child(dentry); -+ /* no d_unlinked(), to set UDBA_NONE for root */ -+ if (d_unhashed(dentry)) -+ a->udba = AuOpt_UDBA_NONE; -+ if (a->udba != AuOpt_UDBA_NONE) { -+ AuDebugOn(IS_ROOT(dentry)); -+ err = au_reval_for_attr(dentry, au_sigen(sb)); -+ if (unlikely(err)) -+ goto out_dentry; -+ } -+ } -+ -+ err = au_pin_and_icpup(dentry, ia, a); -+ if (unlikely(err < 0)) -+ goto out_dentry; -+ if (au_ftest_icpup(a->flags, DID_CPUP)) { -+ ia->ia_file = NULL; -+ ia->ia_valid &= ~ATTR_FILE; -+ } -+ -+ a->h_path.mnt = au_sbr_mnt(sb, a->btgt); -+ if ((ia->ia_valid & (ATTR_MODE | ATTR_CTIME)) -+ == (ATTR_MODE | ATTR_CTIME)) { -+ err = security_path_chmod(&a->h_path, ia->ia_mode); -+ if (unlikely(err)) -+ goto out_unlock; -+ } else if ((ia->ia_valid & (ATTR_UID | ATTR_GID)) -+ && (ia->ia_valid & ATTR_CTIME)) { -+ err = security_path_chown(&a->h_path, ia->ia_uid, ia->ia_gid); -+ if (unlikely(err)) -+ goto out_unlock; -+ } -+ -+ if (ia->ia_valid & ATTR_SIZE) { -+ struct file *f; -+ -+ if (ia->ia_size < i_size_read(inode)) -+ /* unmap only */ -+ truncate_setsize(inode, ia->ia_size); -+ -+ f = NULL; -+ if (ia->ia_valid & ATTR_FILE) -+ f = ia->ia_file; -+ mutex_unlock(&a->h_inode->i_mutex); -+ err = vfsub_trunc(&a->h_path, ia->ia_size, ia->ia_valid, f); -+ mutex_lock_nested(&a->h_inode->i_mutex, AuLsc_I_CHILD); -+ } else { -+ delegated = NULL; -+ while (1) { -+ err = vfsub_notify_change(&a->h_path, ia, &delegated); -+ if (delegated) { -+ err = break_deleg_wait(&delegated); -+ if (!err) -+ continue; -+ } -+ break; -+ } -+ } -+ /* -+ * regardless aufs 'acl' option setting. -+ * why don't all acl-aware fs call this func from their ->setattr()? -+ */ -+ if (!err && (ia->ia_valid & ATTR_MODE)) -+ err = vfsub_acl_chmod(a->h_inode, ia->ia_mode); -+ if (!err) -+ au_cpup_attr_changeable(inode); -+ -+out_unlock: -+ mutex_unlock(&a->h_inode->i_mutex); -+ au_unpin(&a->pin); -+ if (unlikely(err)) -+ au_update_dbstart(dentry); -+out_dentry: -+ di_write_unlock(dentry); -+ if (file) { -+ fi_write_unlock(file); -+ ia->ia_file = file; -+ ia->ia_valid |= ATTR_FILE; -+ } -+out_si: -+ si_read_unlock(sb); -+out_kfree: -+ kfree(a); -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+#if IS_ENABLED(CONFIG_AUFS_XATTR) || IS_ENABLED(CONFIG_FS_POSIX_ACL) -+static int au_h_path_to_set_attr(struct dentry *dentry, -+ struct au_icpup_args *a, struct path *h_path) -+{ -+ int err; -+ struct super_block *sb; -+ -+ sb = dentry->d_sb; -+ a->udba = au_opt_udba(sb); -+ /* no d_unlinked(), to set UDBA_NONE for root */ -+ if (d_unhashed(dentry)) -+ a->udba = AuOpt_UDBA_NONE; -+ if (a->udba != AuOpt_UDBA_NONE) { -+ AuDebugOn(IS_ROOT(dentry)); -+ err = au_reval_for_attr(dentry, au_sigen(sb)); -+ if (unlikely(err)) -+ goto out; -+ } -+ err = au_pin_and_icpup(dentry, /*ia*/NULL, a); -+ if (unlikely(err < 0)) -+ goto out; -+ -+ h_path->dentry = a->h_path.dentry; -+ h_path->mnt = au_sbr_mnt(sb, a->btgt); -+ -+out: -+ return err; -+} -+ -+ssize_t au_srxattr(struct dentry *dentry, struct au_srxattr *arg) -+{ -+ int err; -+ struct path h_path; -+ struct super_block *sb; -+ struct au_icpup_args *a; -+ struct inode *inode, *h_inode; -+ -+ inode = dentry->d_inode; -+ IMustLock(inode); -+ -+ err = -ENOMEM; -+ a = kzalloc(sizeof(*a), GFP_NOFS); -+ if (unlikely(!a)) -+ goto out; -+ -+ sb = dentry->d_sb; -+ err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM); -+ if (unlikely(err)) -+ goto out_kfree; -+ -+ h_path.dentry = NULL; /* silence gcc */ -+ di_write_lock_child(dentry); -+ err = au_h_path_to_set_attr(dentry, a, &h_path); -+ if (unlikely(err)) -+ goto out_di; -+ -+ mutex_unlock(&a->h_inode->i_mutex); -+ switch (arg->type) { -+ case AU_XATTR_SET: -+ err = vfsub_setxattr(h_path.dentry, -+ arg->u.set.name, arg->u.set.value, -+ arg->u.set.size, arg->u.set.flags); -+ break; -+ case AU_XATTR_REMOVE: -+ err = vfsub_removexattr(h_path.dentry, arg->u.remove.name); -+ break; -+ case AU_ACL_SET: -+ err = -EOPNOTSUPP; -+ h_inode = h_path.dentry->d_inode; -+ if (h_inode->i_op->set_acl) -+ err = h_inode->i_op->set_acl(h_inode, -+ arg->u.acl_set.acl, -+ arg->u.acl_set.type); -+ break; -+ } -+ if (!err) -+ au_cpup_attr_timesizes(inode); -+ -+ au_unpin(&a->pin); -+ if (unlikely(err)) -+ au_update_dbstart(dentry); -+ -+out_di: -+ di_write_unlock(dentry); -+ si_read_unlock(sb); -+out_kfree: -+ kfree(a); -+out: -+ AuTraceErr(err); -+ return err; -+} -+#endif -+ -+static void au_refresh_iattr(struct inode *inode, struct kstat *st, -+ unsigned int nlink) -+{ -+ unsigned int n; -+ -+ inode->i_mode = st->mode; -+ /* don't i_[ug]id_write() here */ -+ inode->i_uid = st->uid; -+ inode->i_gid = st->gid; -+ inode->i_atime = st->atime; -+ inode->i_mtime = st->mtime; -+ inode->i_ctime = st->ctime; -+ -+ au_cpup_attr_nlink(inode, /*force*/0); -+ if (S_ISDIR(inode->i_mode)) { -+ n = inode->i_nlink; -+ n -= nlink; -+ n += st->nlink; -+ smp_mb(); /* for i_nlink */ -+ /* 0 can happen */ -+ set_nlink(inode, n); -+ } -+ -+ spin_lock(&inode->i_lock); -+ inode->i_blocks = st->blocks; -+ i_size_write(inode, st->size); -+ spin_unlock(&inode->i_lock); -+} -+ -+/* -+ * common routine for aufs_getattr() and aufs_getxattr(). -+ * returns zero or negative (an error). -+ * @dentry will be read-locked in success. -+ */ -+int au_h_path_getattr(struct dentry *dentry, int force, struct path *h_path) -+{ -+ int err; -+ unsigned int mnt_flags, sigen; -+ unsigned char udba_none; -+ aufs_bindex_t bindex; -+ struct super_block *sb, *h_sb; -+ struct inode *inode; -+ -+ h_path->mnt = NULL; -+ h_path->dentry = NULL; -+ -+ err = 0; -+ sb = dentry->d_sb; -+ mnt_flags = au_mntflags(sb); -+ udba_none = !!au_opt_test(mnt_flags, UDBA_NONE); -+ -+ /* support fstat(2) */ -+ if (!d_unlinked(dentry) && !udba_none) { -+ sigen = au_sigen(sb); -+ err = au_digen_test(dentry, sigen); -+ if (!err) { -+ di_read_lock_child(dentry, AuLock_IR); -+ err = au_dbrange_test(dentry); -+ if (unlikely(err)) { -+ di_read_unlock(dentry, AuLock_IR); -+ goto out; -+ } -+ } else { -+ AuDebugOn(IS_ROOT(dentry)); -+ di_write_lock_child(dentry); -+ err = au_dbrange_test(dentry); -+ if (!err) -+ err = au_reval_for_attr(dentry, sigen); -+ if (!err) -+ di_downgrade_lock(dentry, AuLock_IR); -+ else { -+ di_write_unlock(dentry); -+ goto out; -+ } -+ } -+ } else -+ di_read_lock_child(dentry, AuLock_IR); -+ -+ inode = dentry->d_inode; -+ bindex = au_ibstart(inode); -+ h_path->mnt = au_sbr_mnt(sb, bindex); -+ h_sb = h_path->mnt->mnt_sb; -+ if (!force -+ && !au_test_fs_bad_iattr(h_sb) -+ && udba_none) -+ goto out; /* success */ -+ -+ if (au_dbstart(dentry) == bindex) -+ h_path->dentry = au_h_dptr(dentry, bindex); -+ else if (au_opt_test(mnt_flags, PLINK) && au_plink_test(inode)) { -+ h_path->dentry = au_plink_lkup(inode, bindex); -+ if (IS_ERR(h_path->dentry)) -+ /* pretending success */ -+ h_path->dentry = NULL; -+ else -+ dput(h_path->dentry); -+ } -+ -+out: -+ return err; -+} -+ -+static int aufs_getattr(struct vfsmount *mnt __maybe_unused, -+ struct dentry *dentry, struct kstat *st) -+{ -+ int err; -+ unsigned char positive; -+ struct path h_path; -+ struct inode *inode; -+ struct super_block *sb; -+ -+ inode = dentry->d_inode; -+ sb = dentry->d_sb; -+ err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM); -+ if (unlikely(err)) -+ goto out; -+ err = au_h_path_getattr(dentry, /*force*/0, &h_path); -+ if (unlikely(err)) -+ goto out_si; -+ if (unlikely(!h_path.dentry)) -+ /* illegally overlapped or something */ -+ goto out_fill; /* pretending success */ -+ -+ positive = !!h_path.dentry->d_inode; -+ if (positive) -+ err = vfs_getattr(&h_path, st); -+ if (!err) { -+ if (positive) -+ au_refresh_iattr(inode, st, -+ h_path.dentry->d_inode->i_nlink); -+ goto out_fill; /* success */ -+ } -+ AuTraceErr(err); -+ goto out_di; -+ -+out_fill: -+ generic_fillattr(inode, st); -+out_di: -+ di_read_unlock(dentry, AuLock_IR); -+out_si: -+ si_read_unlock(sb); -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int h_readlink(struct dentry *dentry, int bindex, char __user *buf, -+ int bufsiz) -+{ -+ int err; -+ struct super_block *sb; -+ struct dentry *h_dentry; -+ -+ err = -EINVAL; -+ h_dentry = au_h_dptr(dentry, bindex); -+ if (unlikely(!h_dentry->d_inode->i_op->readlink)) -+ goto out; -+ -+ err = security_inode_readlink(h_dentry); -+ if (unlikely(err)) -+ goto out; -+ -+ sb = dentry->d_sb; -+ if (!au_test_ro(sb, bindex, dentry->d_inode)) { -+ vfsub_touch_atime(au_sbr_mnt(sb, bindex), h_dentry); -+ fsstack_copy_attr_atime(dentry->d_inode, h_dentry->d_inode); -+ } -+ err = h_dentry->d_inode->i_op->readlink(h_dentry, buf, bufsiz); -+ -+out: -+ return err; -+} -+ -+static int aufs_readlink(struct dentry *dentry, char __user *buf, int bufsiz) -+{ -+ int err; -+ -+ err = aufs_read_lock(dentry, AuLock_IR | AuLock_GEN); -+ if (unlikely(err)) -+ goto out; -+ err = au_d_hashed_positive(dentry); -+ if (!err) -+ err = h_readlink(dentry, au_dbstart(dentry), buf, bufsiz); -+ aufs_read_unlock(dentry, AuLock_IR); -+ -+out: -+ return err; -+} -+ -+static void *aufs_follow_link(struct dentry *dentry, struct nameidata *nd) -+{ -+ int err; -+ mm_segment_t old_fs; -+ union { -+ char *k; -+ char __user *u; -+ } buf; -+ -+ err = -ENOMEM; -+ buf.k = (void *)__get_free_page(GFP_NOFS); -+ if (unlikely(!buf.k)) -+ goto out; -+ -+ err = aufs_read_lock(dentry, AuLock_IR | AuLock_GEN); -+ if (unlikely(err)) -+ goto out_name; -+ -+ err = au_d_hashed_positive(dentry); -+ if (!err) { -+ old_fs = get_fs(); -+ set_fs(KERNEL_DS); -+ err = h_readlink(dentry, au_dbstart(dentry), buf.u, PATH_MAX); -+ set_fs(old_fs); -+ } -+ aufs_read_unlock(dentry, AuLock_IR); -+ -+ if (err >= 0) { -+ buf.k[err] = 0; -+ /* will be freed by put_link */ -+ nd_set_link(nd, buf.k); -+ return NULL; /* success */ -+ } -+ -+out_name: -+ free_page((unsigned long)buf.k); -+out: -+ AuTraceErr(err); -+ return ERR_PTR(err); -+} -+ -+static void aufs_put_link(struct dentry *dentry __maybe_unused, -+ struct nameidata *nd, void *cookie __maybe_unused) -+{ -+ char *p; -+ -+ p = nd_get_link(nd); -+ if (!IS_ERR_OR_NULL(p)) -+ free_page((unsigned long)p); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int aufs_update_time(struct inode *inode, struct timespec *ts, int flags) -+{ -+ int err; -+ struct super_block *sb; -+ struct inode *h_inode; -+ -+ sb = inode->i_sb; -+ /* mmap_sem might be acquired already, cf. aufs_mmap() */ -+ lockdep_off(); -+ si_read_lock(sb, AuLock_FLUSH); -+ ii_write_lock_child(inode); -+ lockdep_on(); -+ h_inode = au_h_iptr(inode, au_ibstart(inode)); -+ err = vfsub_update_time(h_inode, ts, flags); -+ lockdep_off(); -+ if (!err) -+ au_cpup_attr_timesizes(inode); -+ ii_write_unlock(inode); -+ si_read_unlock(sb); -+ lockdep_on(); -+ -+ if (!err && (flags & S_VERSION)) -+ inode_inc_iversion(inode); -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* no getattr version will be set by module.c:aufs_init() */ -+struct inode_operations aufs_iop_nogetattr[AuIop_Last], -+ aufs_iop[] = { -+ [AuIop_SYMLINK] = { -+ .permission = aufs_permission, -+#ifdef CONFIG_FS_POSIX_ACL -+ .get_acl = aufs_get_acl, -+ .set_acl = aufs_set_acl, /* unsupport for symlink? */ -+#endif -+ -+ .setattr = aufs_setattr, -+ .getattr = aufs_getattr, -+ -+#ifdef CONFIG_AUFS_XATTR -+ .setxattr = aufs_setxattr, -+ .getxattr = aufs_getxattr, -+ .listxattr = aufs_listxattr, -+ .removexattr = aufs_removexattr, -+#endif -+ -+ .readlink = aufs_readlink, -+ .follow_link = aufs_follow_link, -+ .put_link = aufs_put_link, -+ -+ /* .update_time = aufs_update_time */ -+ }, -+ [AuIop_DIR] = { -+ .create = aufs_create, -+ .lookup = aufs_lookup, -+ .link = aufs_link, -+ .unlink = aufs_unlink, -+ .symlink = aufs_symlink, -+ .mkdir = aufs_mkdir, -+ .rmdir = aufs_rmdir, -+ .mknod = aufs_mknod, -+ .rename = aufs_rename, -+ -+ .permission = aufs_permission, -+#ifdef CONFIG_FS_POSIX_ACL -+ .get_acl = aufs_get_acl, -+ .set_acl = aufs_set_acl, -+#endif -+ -+ .setattr = aufs_setattr, -+ .getattr = aufs_getattr, -+ -+#ifdef CONFIG_AUFS_XATTR -+ .setxattr = aufs_setxattr, -+ .getxattr = aufs_getxattr, -+ .listxattr = aufs_listxattr, -+ .removexattr = aufs_removexattr, -+#endif -+ -+ .update_time = aufs_update_time, -+ .atomic_open = aufs_atomic_open, -+ .tmpfile = aufs_tmpfile -+ }, -+ [AuIop_OTHER] = { -+ .permission = aufs_permission, -+#ifdef CONFIG_FS_POSIX_ACL -+ .get_acl = aufs_get_acl, -+ .set_acl = aufs_set_acl, -+#endif -+ -+ .setattr = aufs_setattr, -+ .getattr = aufs_getattr, -+ -+#ifdef CONFIG_AUFS_XATTR -+ .setxattr = aufs_setxattr, -+ .getxattr = aufs_getxattr, -+ .listxattr = aufs_listxattr, -+ .removexattr = aufs_removexattr, -+#endif -+ -+ .update_time = aufs_update_time -+ } -+}; -diff --git a/fs/aufs/i_op_add.c b/fs/aufs/i_op_add.c -new file mode 100644 -index 0000000..9e4f65c ---- /dev/null -+++ b/fs/aufs/i_op_add.c -@@ -0,0 +1,930 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * inode operations (add entry) -+ */ -+ -+#include "aufs.h" -+ -+/* -+ * final procedure of adding a new entry, except link(2). -+ * remove whiteout, instantiate, copyup the parent dir's times and size -+ * and update version. -+ * if it failed, re-create the removed whiteout. -+ */ -+static int epilog(struct inode *dir, aufs_bindex_t bindex, -+ struct dentry *wh_dentry, struct dentry *dentry) -+{ -+ int err, rerr; -+ aufs_bindex_t bwh; -+ struct path h_path; -+ struct super_block *sb; -+ struct inode *inode, *h_dir; -+ struct dentry *wh; -+ -+ bwh = -1; -+ sb = dir->i_sb; -+ if (wh_dentry) { -+ h_dir = wh_dentry->d_parent->d_inode; /* dir inode is locked */ -+ IMustLock(h_dir); -+ AuDebugOn(au_h_iptr(dir, bindex) != h_dir); -+ bwh = au_dbwh(dentry); -+ h_path.dentry = wh_dentry; -+ h_path.mnt = au_sbr_mnt(sb, bindex); -+ err = au_wh_unlink_dentry(au_h_iptr(dir, bindex), &h_path, -+ dentry); -+ if (unlikely(err)) -+ goto out; -+ } -+ -+ inode = au_new_inode(dentry, /*must_new*/1); -+ if (!IS_ERR(inode)) { -+ d_instantiate(dentry, inode); -+ dir = dentry->d_parent->d_inode; /* dir inode is locked */ -+ IMustLock(dir); -+ au_dir_ts(dir, bindex); -+ dir->i_version++; -+ au_fhsm_wrote(sb, bindex, /*force*/0); -+ return 0; /* success */ -+ } -+ -+ err = PTR_ERR(inode); -+ if (!wh_dentry) -+ goto out; -+ -+ /* revert */ -+ /* dir inode is locked */ -+ wh = au_wh_create(dentry, bwh, wh_dentry->d_parent); -+ rerr = PTR_ERR(wh); -+ if (IS_ERR(wh)) { -+ AuIOErr("%pd reverting whiteout failed(%d, %d)\n", -+ dentry, err, rerr); -+ err = -EIO; -+ } else -+ dput(wh); -+ -+out: -+ return err; -+} -+ -+static int au_d_may_add(struct dentry *dentry) -+{ -+ int err; -+ -+ err = 0; -+ if (unlikely(d_unhashed(dentry))) -+ err = -ENOENT; -+ if (unlikely(dentry->d_inode)) -+ err = -EEXIST; -+ return err; -+} -+ -+/* -+ * simple tests for the adding inode operations. -+ * following the checks in vfs, plus the parent-child relationship. -+ */ -+int au_may_add(struct dentry *dentry, aufs_bindex_t bindex, -+ struct dentry *h_parent, int isdir) -+{ -+ int err; -+ umode_t h_mode; -+ struct dentry *h_dentry; -+ struct inode *h_inode; -+ -+ err = -ENAMETOOLONG; -+ if (unlikely(dentry->d_name.len > AUFS_MAX_NAMELEN)) -+ goto out; -+ -+ h_dentry = au_h_dptr(dentry, bindex); -+ h_inode = h_dentry->d_inode; -+ if (!dentry->d_inode) { -+ err = -EEXIST; -+ if (unlikely(h_inode)) -+ goto out; -+ } else { -+ /* rename(2) case */ -+ err = -EIO; -+ if (unlikely(!h_inode || !h_inode->i_nlink)) -+ goto out; -+ -+ h_mode = h_inode->i_mode; -+ if (!isdir) { -+ err = -EISDIR; -+ if (unlikely(S_ISDIR(h_mode))) -+ goto out; -+ } else if (unlikely(!S_ISDIR(h_mode))) { -+ err = -ENOTDIR; -+ goto out; -+ } -+ } -+ -+ err = 0; -+ /* expected parent dir is locked */ -+ if (unlikely(h_parent != h_dentry->d_parent)) -+ err = -EIO; -+ -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+/* -+ * initial procedure of adding a new entry. -+ * prepare writable branch and the parent dir, lock it, -+ * and lookup whiteout for the new entry. -+ */ -+static struct dentry* -+lock_hdir_lkup_wh(struct dentry *dentry, struct au_dtime *dt, -+ struct dentry *src_dentry, struct au_pin *pin, -+ struct au_wr_dir_args *wr_dir_args) -+{ -+ struct dentry *wh_dentry, *h_parent; -+ struct super_block *sb; -+ struct au_branch *br; -+ int err; -+ unsigned int udba; -+ aufs_bindex_t bcpup; -+ -+ AuDbg("%pd\n", dentry); -+ -+ err = au_wr_dir(dentry, src_dentry, wr_dir_args); -+ bcpup = err; -+ wh_dentry = ERR_PTR(err); -+ if (unlikely(err < 0)) -+ goto out; -+ -+ sb = dentry->d_sb; -+ udba = au_opt_udba(sb); -+ err = au_pin(pin, dentry, bcpup, udba, -+ AuPin_DI_LOCKED | AuPin_MNT_WRITE); -+ wh_dentry = ERR_PTR(err); -+ if (unlikely(err)) -+ goto out; -+ -+ h_parent = au_pinned_h_parent(pin); -+ if (udba != AuOpt_UDBA_NONE -+ && au_dbstart(dentry) == bcpup) -+ err = au_may_add(dentry, bcpup, h_parent, -+ au_ftest_wrdir(wr_dir_args->flags, ISDIR)); -+ else if (unlikely(dentry->d_name.len > AUFS_MAX_NAMELEN)) -+ err = -ENAMETOOLONG; -+ wh_dentry = ERR_PTR(err); -+ if (unlikely(err)) -+ goto out_unpin; -+ -+ br = au_sbr(sb, bcpup); -+ if (dt) { -+ struct path tmp = { -+ .dentry = h_parent, -+ .mnt = au_br_mnt(br) -+ }; -+ au_dtime_store(dt, au_pinned_parent(pin), &tmp); -+ } -+ -+ wh_dentry = NULL; -+ if (bcpup != au_dbwh(dentry)) -+ goto out; /* success */ -+ -+ /* -+ * ENAMETOOLONG here means that if we allowed create such name, then it -+ * would not be able to removed in the future. So we don't allow such -+ * name here and we don't handle ENAMETOOLONG differently here. -+ */ -+ wh_dentry = au_wh_lkup(h_parent, &dentry->d_name, br); -+ -+out_unpin: -+ if (IS_ERR(wh_dentry)) -+ au_unpin(pin); -+out: -+ return wh_dentry; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+enum { Mknod, Symlink, Creat }; -+struct simple_arg { -+ int type; -+ union { -+ struct { -+ umode_t mode; -+ bool want_excl; -+ bool try_aopen; -+ struct vfsub_aopen_args *aopen; -+ } c; -+ struct { -+ const char *symname; -+ } s; -+ struct { -+ umode_t mode; -+ dev_t dev; -+ } m; -+ } u; -+}; -+ -+static int add_simple(struct inode *dir, struct dentry *dentry, -+ struct simple_arg *arg) -+{ -+ int err, rerr; -+ aufs_bindex_t bstart; -+ unsigned char created; -+ const unsigned char try_aopen -+ = (arg->type == Creat && arg->u.c.try_aopen); -+ struct dentry *wh_dentry, *parent; -+ struct inode *h_dir; -+ struct super_block *sb; -+ struct au_branch *br; -+ /* to reuduce stack size */ -+ struct { -+ struct au_dtime dt; -+ struct au_pin pin; -+ struct path h_path; -+ struct au_wr_dir_args wr_dir_args; -+ } *a; -+ -+ AuDbg("%pd\n", dentry); -+ IMustLock(dir); -+ -+ err = -ENOMEM; -+ a = kmalloc(sizeof(*a), GFP_NOFS); -+ if (unlikely(!a)) -+ goto out; -+ a->wr_dir_args.force_btgt = -1; -+ a->wr_dir_args.flags = AuWrDir_ADD_ENTRY; -+ -+ parent = dentry->d_parent; /* dir inode is locked */ -+ if (!try_aopen) { -+ err = aufs_read_lock(dentry, AuLock_DW | AuLock_GEN); -+ if (unlikely(err)) -+ goto out_free; -+ } -+ err = au_d_may_add(dentry); -+ if (unlikely(err)) -+ goto out_unlock; -+ if (!try_aopen) -+ di_write_lock_parent(parent); -+ wh_dentry = lock_hdir_lkup_wh(dentry, &a->dt, /*src_dentry*/NULL, -+ &a->pin, &a->wr_dir_args); -+ err = PTR_ERR(wh_dentry); -+ if (IS_ERR(wh_dentry)) -+ goto out_parent; -+ -+ bstart = au_dbstart(dentry); -+ sb = dentry->d_sb; -+ br = au_sbr(sb, bstart); -+ a->h_path.dentry = au_h_dptr(dentry, bstart); -+ a->h_path.mnt = au_br_mnt(br); -+ h_dir = au_pinned_h_dir(&a->pin); -+ switch (arg->type) { -+ case Creat: -+ err = 0; -+ if (!try_aopen || !h_dir->i_op->atomic_open) -+ err = vfsub_create(h_dir, &a->h_path, arg->u.c.mode, -+ arg->u.c.want_excl); -+ else -+ err = vfsub_atomic_open(h_dir, a->h_path.dentry, -+ arg->u.c.aopen, br); -+ break; -+ case Symlink: -+ err = vfsub_symlink(h_dir, &a->h_path, arg->u.s.symname); -+ break; -+ case Mknod: -+ err = vfsub_mknod(h_dir, &a->h_path, arg->u.m.mode, -+ arg->u.m.dev); -+ break; -+ default: -+ BUG(); -+ } -+ created = !err; -+ if (!err) -+ err = epilog(dir, bstart, wh_dentry, dentry); -+ -+ /* revert */ -+ if (unlikely(created && err && a->h_path.dentry->d_inode)) { -+ /* no delegation since it is just created */ -+ rerr = vfsub_unlink(h_dir, &a->h_path, /*delegated*/NULL, -+ /*force*/0); -+ if (rerr) { -+ AuIOErr("%pd revert failure(%d, %d)\n", -+ dentry, err, rerr); -+ err = -EIO; -+ } -+ au_dtime_revert(&a->dt); -+ } -+ -+ if (!err && try_aopen && !h_dir->i_op->atomic_open) -+ *arg->u.c.aopen->opened |= FILE_CREATED; -+ -+ au_unpin(&a->pin); -+ dput(wh_dentry); -+ -+out_parent: -+ if (!try_aopen) -+ di_write_unlock(parent); -+out_unlock: -+ if (unlikely(err)) { -+ au_update_dbstart(dentry); -+ d_drop(dentry); -+ } -+ if (!try_aopen) -+ aufs_read_unlock(dentry, AuLock_DW); -+out_free: -+ kfree(a); -+out: -+ return err; -+} -+ -+int aufs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, -+ dev_t dev) -+{ -+ struct simple_arg arg = { -+ .type = Mknod, -+ .u.m = { -+ .mode = mode, -+ .dev = dev -+ } -+ }; -+ return add_simple(dir, dentry, &arg); -+} -+ -+int aufs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) -+{ -+ struct simple_arg arg = { -+ .type = Symlink, -+ .u.s.symname = symname -+ }; -+ return add_simple(dir, dentry, &arg); -+} -+ -+int aufs_create(struct inode *dir, struct dentry *dentry, umode_t mode, -+ bool want_excl) -+{ -+ struct simple_arg arg = { -+ .type = Creat, -+ .u.c = { -+ .mode = mode, -+ .want_excl = want_excl -+ } -+ }; -+ return add_simple(dir, dentry, &arg); -+} -+ -+int au_aopen_or_create(struct inode *dir, struct dentry *dentry, -+ struct vfsub_aopen_args *aopen_args) -+{ -+ struct simple_arg arg = { -+ .type = Creat, -+ .u.c = { -+ .mode = aopen_args->create_mode, -+ .want_excl = aopen_args->open_flag & O_EXCL, -+ .try_aopen = true, -+ .aopen = aopen_args -+ } -+ }; -+ return add_simple(dir, dentry, &arg); -+} -+ -+int aufs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) -+{ -+ int err; -+ aufs_bindex_t bindex; -+ struct super_block *sb; -+ struct dentry *parent, *h_parent, *h_dentry; -+ struct inode *h_dir, *inode; -+ struct vfsmount *h_mnt; -+ struct au_wr_dir_args wr_dir_args = { -+ .force_btgt = -1, -+ .flags = AuWrDir_TMPFILE -+ }; -+ -+ /* copy-up may happen */ -+ mutex_lock(&dir->i_mutex); -+ -+ sb = dir->i_sb; -+ err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM); -+ if (unlikely(err)) -+ goto out; -+ -+ err = au_di_init(dentry); -+ if (unlikely(err)) -+ goto out_si; -+ -+ err = -EBUSY; -+ parent = d_find_any_alias(dir); -+ AuDebugOn(!parent); -+ di_write_lock_parent(parent); -+ if (unlikely(parent->d_inode != dir)) -+ goto out_parent; -+ -+ err = au_digen_test(parent, au_sigen(sb)); -+ if (unlikely(err)) -+ goto out_parent; -+ -+ bindex = au_dbstart(parent); -+ au_set_dbstart(dentry, bindex); -+ au_set_dbend(dentry, bindex); -+ err = au_wr_dir(dentry, /*src_dentry*/NULL, &wr_dir_args); -+ bindex = err; -+ if (unlikely(err < 0)) -+ goto out_parent; -+ -+ err = -EOPNOTSUPP; -+ h_dir = au_h_iptr(dir, bindex); -+ if (unlikely(!h_dir->i_op->tmpfile)) -+ goto out_parent; -+ -+ h_mnt = au_sbr_mnt(sb, bindex); -+ err = vfsub_mnt_want_write(h_mnt); -+ if (unlikely(err)) -+ goto out_parent; -+ -+ h_parent = au_h_dptr(parent, bindex); -+ err = inode_permission(h_parent->d_inode, MAY_WRITE | MAY_EXEC); -+ if (unlikely(err)) -+ goto out_mnt; -+ -+ err = -ENOMEM; -+ h_dentry = d_alloc(h_parent, &dentry->d_name); -+ if (unlikely(!h_dentry)) -+ goto out_mnt; -+ -+ err = h_dir->i_op->tmpfile(h_dir, h_dentry, mode); -+ if (unlikely(err)) -+ goto out_dentry; -+ -+ au_set_dbstart(dentry, bindex); -+ au_set_dbend(dentry, bindex); -+ au_set_h_dptr(dentry, bindex, dget(h_dentry)); -+ inode = au_new_inode(dentry, /*must_new*/1); -+ if (IS_ERR(inode)) { -+ err = PTR_ERR(inode); -+ au_set_h_dptr(dentry, bindex, NULL); -+ au_set_dbstart(dentry, -1); -+ au_set_dbend(dentry, -1); -+ } else { -+ if (!inode->i_nlink) -+ set_nlink(inode, 1); -+ d_tmpfile(dentry, inode); -+ au_di(dentry)->di_tmpfile = 1; -+ -+ /* update without i_mutex */ -+ if (au_ibstart(dir) == au_dbstart(dentry)) -+ au_cpup_attr_timesizes(dir); -+ } -+ -+out_dentry: -+ dput(h_dentry); -+out_mnt: -+ vfsub_mnt_drop_write(h_mnt); -+out_parent: -+ di_write_unlock(parent); -+ dput(parent); -+ di_write_unlock(dentry); -+ if (!err) -+#if 0 -+ /* verbose coding for lock class name */ -+ au_rw_class(&au_di(dentry)->di_rwsem, -+ au_lc_key + AuLcNonDir_DIINFO); -+#else -+ ; -+#endif -+ else { -+ au_di_fin(dentry); -+ dentry->d_fsdata = NULL; -+ } -+out_si: -+ si_read_unlock(sb); -+out: -+ mutex_unlock(&dir->i_mutex); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+struct au_link_args { -+ aufs_bindex_t bdst, bsrc; -+ struct au_pin pin; -+ struct path h_path; -+ struct dentry *src_parent, *parent; -+}; -+ -+static int au_cpup_before_link(struct dentry *src_dentry, -+ struct au_link_args *a) -+{ -+ int err; -+ struct dentry *h_src_dentry; -+ struct au_cp_generic cpg = { -+ .dentry = src_dentry, -+ .bdst = a->bdst, -+ .bsrc = a->bsrc, -+ .len = -1, -+ .pin = &a->pin, -+ .flags = AuCpup_DTIME | AuCpup_HOPEN /* | AuCpup_KEEPLINO */ -+ }; -+ -+ di_read_lock_parent(a->src_parent, AuLock_IR); -+ err = au_test_and_cpup_dirs(src_dentry, a->bdst); -+ if (unlikely(err)) -+ goto out; -+ -+ h_src_dentry = au_h_dptr(src_dentry, a->bsrc); -+ err = au_pin(&a->pin, src_dentry, a->bdst, -+ au_opt_udba(src_dentry->d_sb), -+ AuPin_DI_LOCKED | AuPin_MNT_WRITE); -+ if (unlikely(err)) -+ goto out; -+ -+ err = au_sio_cpup_simple(&cpg); -+ au_unpin(&a->pin); -+ -+out: -+ di_read_unlock(a->src_parent, AuLock_IR); -+ return err; -+} -+ -+static int au_cpup_or_link(struct dentry *src_dentry, struct dentry *dentry, -+ struct au_link_args *a) -+{ -+ int err; -+ unsigned char plink; -+ aufs_bindex_t bend; -+ struct dentry *h_src_dentry; -+ struct inode *h_inode, *inode, *delegated; -+ struct super_block *sb; -+ struct file *h_file; -+ -+ plink = 0; -+ h_inode = NULL; -+ sb = src_dentry->d_sb; -+ inode = src_dentry->d_inode; -+ if (au_ibstart(inode) <= a->bdst) -+ h_inode = au_h_iptr(inode, a->bdst); -+ if (!h_inode || !h_inode->i_nlink) { -+ /* copyup src_dentry as the name of dentry. */ -+ bend = au_dbend(dentry); -+ if (bend < a->bsrc) -+ au_set_dbend(dentry, a->bsrc); -+ au_set_h_dptr(dentry, a->bsrc, -+ dget(au_h_dptr(src_dentry, a->bsrc))); -+ dget(a->h_path.dentry); -+ au_set_h_dptr(dentry, a->bdst, NULL); -+ AuDbg("temporary d_inode...\n"); -+ spin_lock(&dentry->d_lock); -+ dentry->d_inode = src_dentry->d_inode; /* tmp */ -+ spin_unlock(&dentry->d_lock); -+ h_file = au_h_open_pre(dentry, a->bsrc, /*force_wr*/0); -+ if (IS_ERR(h_file)) -+ err = PTR_ERR(h_file); -+ else { -+ struct au_cp_generic cpg = { -+ .dentry = dentry, -+ .bdst = a->bdst, -+ .bsrc = -1, -+ .len = -1, -+ .pin = &a->pin, -+ .flags = AuCpup_KEEPLINO -+ }; -+ err = au_sio_cpup_simple(&cpg); -+ au_h_open_post(dentry, a->bsrc, h_file); -+ if (!err) { -+ dput(a->h_path.dentry); -+ a->h_path.dentry = au_h_dptr(dentry, a->bdst); -+ } else -+ au_set_h_dptr(dentry, a->bdst, -+ a->h_path.dentry); -+ } -+ spin_lock(&dentry->d_lock); -+ dentry->d_inode = NULL; /* restore */ -+ spin_unlock(&dentry->d_lock); -+ AuDbg("temporary d_inode...done\n"); -+ au_set_h_dptr(dentry, a->bsrc, NULL); -+ au_set_dbend(dentry, bend); -+ } else { -+ /* the inode of src_dentry already exists on a.bdst branch */ -+ h_src_dentry = d_find_alias(h_inode); -+ if (!h_src_dentry && au_plink_test(inode)) { -+ plink = 1; -+ h_src_dentry = au_plink_lkup(inode, a->bdst); -+ err = PTR_ERR(h_src_dentry); -+ if (IS_ERR(h_src_dentry)) -+ goto out; -+ -+ if (unlikely(!h_src_dentry->d_inode)) { -+ dput(h_src_dentry); -+ h_src_dentry = NULL; -+ } -+ -+ } -+ if (h_src_dentry) { -+ delegated = NULL; -+ err = vfsub_link(h_src_dentry, au_pinned_h_dir(&a->pin), -+ &a->h_path, &delegated); -+ if (unlikely(err == -EWOULDBLOCK)) { -+ pr_warn("cannot retry for NFSv4 delegation" -+ " for an internal link\n"); -+ iput(delegated); -+ } -+ dput(h_src_dentry); -+ } else { -+ AuIOErr("no dentry found for hi%lu on b%d\n", -+ h_inode->i_ino, a->bdst); -+ err = -EIO; -+ } -+ } -+ -+ if (!err && !plink) -+ au_plink_append(inode, a->bdst, a->h_path.dentry); -+ -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+int aufs_link(struct dentry *src_dentry, struct inode *dir, -+ struct dentry *dentry) -+{ -+ int err, rerr; -+ struct au_dtime dt; -+ struct au_link_args *a; -+ struct dentry *wh_dentry, *h_src_dentry; -+ struct inode *inode, *delegated; -+ struct super_block *sb; -+ struct au_wr_dir_args wr_dir_args = { -+ /* .force_btgt = -1, */ -+ .flags = AuWrDir_ADD_ENTRY -+ }; -+ -+ IMustLock(dir); -+ inode = src_dentry->d_inode; -+ IMustLock(inode); -+ -+ err = -ENOMEM; -+ a = kzalloc(sizeof(*a), GFP_NOFS); -+ if (unlikely(!a)) -+ goto out; -+ -+ a->parent = dentry->d_parent; /* dir inode is locked */ -+ err = aufs_read_and_write_lock2(dentry, src_dentry, -+ AuLock_NOPLM | AuLock_GEN); -+ if (unlikely(err)) -+ goto out_kfree; -+ err = au_d_linkable(src_dentry); -+ if (unlikely(err)) -+ goto out_unlock; -+ err = au_d_may_add(dentry); -+ if (unlikely(err)) -+ goto out_unlock; -+ -+ a->src_parent = dget_parent(src_dentry); -+ wr_dir_args.force_btgt = au_ibstart(inode); -+ -+ di_write_lock_parent(a->parent); -+ wr_dir_args.force_btgt = au_wbr(dentry, wr_dir_args.force_btgt); -+ wh_dentry = lock_hdir_lkup_wh(dentry, &dt, src_dentry, &a->pin, -+ &wr_dir_args); -+ err = PTR_ERR(wh_dentry); -+ if (IS_ERR(wh_dentry)) -+ goto out_parent; -+ -+ err = 0; -+ sb = dentry->d_sb; -+ a->bdst = au_dbstart(dentry); -+ a->h_path.dentry = au_h_dptr(dentry, a->bdst); -+ a->h_path.mnt = au_sbr_mnt(sb, a->bdst); -+ a->bsrc = au_ibstart(inode); -+ h_src_dentry = au_h_d_alias(src_dentry, a->bsrc); -+ if (!h_src_dentry && au_di(src_dentry)->di_tmpfile) -+ h_src_dentry = dget(au_hi_wh(inode, a->bsrc)); -+ if (!h_src_dentry) { -+ a->bsrc = au_dbstart(src_dentry); -+ h_src_dentry = au_h_d_alias(src_dentry, a->bsrc); -+ AuDebugOn(!h_src_dentry); -+ } else if (IS_ERR(h_src_dentry)) { -+ err = PTR_ERR(h_src_dentry); -+ goto out_parent; -+ } -+ -+ if (au_opt_test(au_mntflags(sb), PLINK)) { -+ if (a->bdst < a->bsrc -+ /* && h_src_dentry->d_sb != a->h_path.dentry->d_sb */) -+ err = au_cpup_or_link(src_dentry, dentry, a); -+ else { -+ delegated = NULL; -+ err = vfsub_link(h_src_dentry, au_pinned_h_dir(&a->pin), -+ &a->h_path, &delegated); -+ if (unlikely(err == -EWOULDBLOCK)) { -+ pr_warn("cannot retry for NFSv4 delegation" -+ " for an internal link\n"); -+ iput(delegated); -+ } -+ } -+ dput(h_src_dentry); -+ } else { -+ /* -+ * copyup src_dentry to the branch we process, -+ * and then link(2) to it. -+ */ -+ dput(h_src_dentry); -+ if (a->bdst < a->bsrc -+ /* && h_src_dentry->d_sb != a->h_path.dentry->d_sb */) { -+ au_unpin(&a->pin); -+ di_write_unlock(a->parent); -+ err = au_cpup_before_link(src_dentry, a); -+ di_write_lock_parent(a->parent); -+ if (!err) -+ err = au_pin(&a->pin, dentry, a->bdst, -+ au_opt_udba(sb), -+ AuPin_DI_LOCKED | AuPin_MNT_WRITE); -+ if (unlikely(err)) -+ goto out_wh; -+ } -+ if (!err) { -+ h_src_dentry = au_h_dptr(src_dentry, a->bdst); -+ err = -ENOENT; -+ if (h_src_dentry && h_src_dentry->d_inode) { -+ delegated = NULL; -+ err = vfsub_link(h_src_dentry, -+ au_pinned_h_dir(&a->pin), -+ &a->h_path, &delegated); -+ if (unlikely(err == -EWOULDBLOCK)) { -+ pr_warn("cannot retry" -+ " for NFSv4 delegation" -+ " for an internal link\n"); -+ iput(delegated); -+ } -+ } -+ } -+ } -+ if (unlikely(err)) -+ goto out_unpin; -+ -+ if (wh_dentry) { -+ a->h_path.dentry = wh_dentry; -+ err = au_wh_unlink_dentry(au_pinned_h_dir(&a->pin), &a->h_path, -+ dentry); -+ if (unlikely(err)) -+ goto out_revert; -+ } -+ -+ au_dir_ts(dir, a->bdst); -+ dir->i_version++; -+ inc_nlink(inode); -+ inode->i_ctime = dir->i_ctime; -+ d_instantiate(dentry, au_igrab(inode)); -+ if (d_unhashed(a->h_path.dentry)) -+ /* some filesystem calls d_drop() */ -+ d_drop(dentry); -+ /* some filesystems consume an inode even hardlink */ -+ au_fhsm_wrote(sb, a->bdst, /*force*/0); -+ goto out_unpin; /* success */ -+ -+out_revert: -+ /* no delegation since it is just created */ -+ rerr = vfsub_unlink(au_pinned_h_dir(&a->pin), &a->h_path, -+ /*delegated*/NULL, /*force*/0); -+ if (unlikely(rerr)) { -+ AuIOErr("%pd reverting failed(%d, %d)\n", dentry, err, rerr); -+ err = -EIO; -+ } -+ au_dtime_revert(&dt); -+out_unpin: -+ au_unpin(&a->pin); -+out_wh: -+ dput(wh_dentry); -+out_parent: -+ di_write_unlock(a->parent); -+ dput(a->src_parent); -+out_unlock: -+ if (unlikely(err)) { -+ au_update_dbstart(dentry); -+ d_drop(dentry); -+ } -+ aufs_read_and_write_unlock2(dentry, src_dentry); -+out_kfree: -+ kfree(a); -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+int aufs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) -+{ -+ int err, rerr; -+ aufs_bindex_t bindex; -+ unsigned char diropq; -+ struct path h_path; -+ struct dentry *wh_dentry, *parent, *opq_dentry; -+ struct mutex *h_mtx; -+ struct super_block *sb; -+ struct { -+ struct au_pin pin; -+ struct au_dtime dt; -+ } *a; /* reduce the stack usage */ -+ struct au_wr_dir_args wr_dir_args = { -+ .force_btgt = -1, -+ .flags = AuWrDir_ADD_ENTRY | AuWrDir_ISDIR -+ }; -+ -+ IMustLock(dir); -+ -+ err = -ENOMEM; -+ a = kmalloc(sizeof(*a), GFP_NOFS); -+ if (unlikely(!a)) -+ goto out; -+ -+ err = aufs_read_lock(dentry, AuLock_DW | AuLock_GEN); -+ if (unlikely(err)) -+ goto out_free; -+ err = au_d_may_add(dentry); -+ if (unlikely(err)) -+ goto out_unlock; -+ -+ parent = dentry->d_parent; /* dir inode is locked */ -+ di_write_lock_parent(parent); -+ wh_dentry = lock_hdir_lkup_wh(dentry, &a->dt, /*src_dentry*/NULL, -+ &a->pin, &wr_dir_args); -+ err = PTR_ERR(wh_dentry); -+ if (IS_ERR(wh_dentry)) -+ goto out_parent; -+ -+ sb = dentry->d_sb; -+ bindex = au_dbstart(dentry); -+ h_path.dentry = au_h_dptr(dentry, bindex); -+ h_path.mnt = au_sbr_mnt(sb, bindex); -+ err = vfsub_mkdir(au_pinned_h_dir(&a->pin), &h_path, mode); -+ if (unlikely(err)) -+ goto out_unpin; -+ -+ /* make the dir opaque */ -+ diropq = 0; -+ h_mtx = &h_path.dentry->d_inode->i_mutex; -+ if (wh_dentry -+ || au_opt_test(au_mntflags(sb), ALWAYS_DIROPQ)) { -+ mutex_lock_nested(h_mtx, AuLsc_I_CHILD); -+ opq_dentry = au_diropq_create(dentry, bindex); -+ mutex_unlock(h_mtx); -+ err = PTR_ERR(opq_dentry); -+ if (IS_ERR(opq_dentry)) -+ goto out_dir; -+ dput(opq_dentry); -+ diropq = 1; -+ } -+ -+ err = epilog(dir, bindex, wh_dentry, dentry); -+ if (!err) { -+ inc_nlink(dir); -+ goto out_unpin; /* success */ -+ } -+ -+ /* revert */ -+ if (diropq) { -+ AuLabel(revert opq); -+ mutex_lock_nested(h_mtx, AuLsc_I_CHILD); -+ rerr = au_diropq_remove(dentry, bindex); -+ mutex_unlock(h_mtx); -+ if (rerr) { -+ AuIOErr("%pd reverting diropq failed(%d, %d)\n", -+ dentry, err, rerr); -+ err = -EIO; -+ } -+ } -+ -+out_dir: -+ AuLabel(revert dir); -+ rerr = vfsub_rmdir(au_pinned_h_dir(&a->pin), &h_path); -+ if (rerr) { -+ AuIOErr("%pd reverting dir failed(%d, %d)\n", -+ dentry, err, rerr); -+ err = -EIO; -+ } -+ au_dtime_revert(&a->dt); -+out_unpin: -+ au_unpin(&a->pin); -+ dput(wh_dentry); -+out_parent: -+ di_write_unlock(parent); -+out_unlock: -+ if (unlikely(err)) { -+ au_update_dbstart(dentry); -+ d_drop(dentry); -+ } -+ aufs_read_unlock(dentry, AuLock_DW); -+out_free: -+ kfree(a); -+out: -+ return err; -+} -diff --git a/fs/aufs/i_op_del.c b/fs/aufs/i_op_del.c -new file mode 100644 -index 0000000..b4dd686 ---- /dev/null -+++ b/fs/aufs/i_op_del.c -@@ -0,0 +1,506 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * inode operations (del entry) -+ */ -+ -+#include "aufs.h" -+ -+/* -+ * decide if a new whiteout for @dentry is necessary or not. -+ * when it is necessary, prepare the parent dir for the upper branch whose -+ * branch index is @bcpup for creation. the actual creation of the whiteout will -+ * be done by caller. -+ * return value: -+ * 0: wh is unnecessary -+ * plus: wh is necessary -+ * minus: error -+ */ -+int au_wr_dir_need_wh(struct dentry *dentry, int isdir, aufs_bindex_t *bcpup) -+{ -+ int need_wh, err; -+ aufs_bindex_t bstart; -+ struct super_block *sb; -+ -+ sb = dentry->d_sb; -+ bstart = au_dbstart(dentry); -+ if (*bcpup < 0) { -+ *bcpup = bstart; -+ if (au_test_ro(sb, bstart, dentry->d_inode)) { -+ err = AuWbrCopyup(au_sbi(sb), dentry); -+ *bcpup = err; -+ if (unlikely(err < 0)) -+ goto out; -+ } -+ } else -+ AuDebugOn(bstart < *bcpup -+ || au_test_ro(sb, *bcpup, dentry->d_inode)); -+ AuDbg("bcpup %d, bstart %d\n", *bcpup, bstart); -+ -+ if (*bcpup != bstart) { -+ err = au_cpup_dirs(dentry, *bcpup); -+ if (unlikely(err)) -+ goto out; -+ need_wh = 1; -+ } else { -+ struct au_dinfo *dinfo, *tmp; -+ -+ need_wh = -ENOMEM; -+ dinfo = au_di(dentry); -+ tmp = au_di_alloc(sb, AuLsc_DI_TMP); -+ if (tmp) { -+ au_di_cp(tmp, dinfo); -+ au_di_swap(tmp, dinfo); -+ /* returns the number of positive dentries */ -+ need_wh = au_lkup_dentry(dentry, bstart + 1, /*type*/0); -+ au_di_swap(tmp, dinfo); -+ au_rw_write_unlock(&tmp->di_rwsem); -+ au_di_free(tmp); -+ } -+ } -+ AuDbg("need_wh %d\n", need_wh); -+ err = need_wh; -+ -+out: -+ return err; -+} -+ -+/* -+ * simple tests for the del-entry operations. -+ * following the checks in vfs, plus the parent-child relationship. -+ */ -+int au_may_del(struct dentry *dentry, aufs_bindex_t bindex, -+ struct dentry *h_parent, int isdir) -+{ -+ int err; -+ umode_t h_mode; -+ struct dentry *h_dentry, *h_latest; -+ struct inode *h_inode; -+ -+ h_dentry = au_h_dptr(dentry, bindex); -+ h_inode = h_dentry->d_inode; -+ if (dentry->d_inode) { -+ err = -ENOENT; -+ if (unlikely(!h_inode || !h_inode->i_nlink)) -+ goto out; -+ -+ h_mode = h_inode->i_mode; -+ if (!isdir) { -+ err = -EISDIR; -+ if (unlikely(S_ISDIR(h_mode))) -+ goto out; -+ } else if (unlikely(!S_ISDIR(h_mode))) { -+ err = -ENOTDIR; -+ goto out; -+ } -+ } else { -+ /* rename(2) case */ -+ err = -EIO; -+ if (unlikely(h_inode)) -+ goto out; -+ } -+ -+ err = -ENOENT; -+ /* expected parent dir is locked */ -+ if (unlikely(h_parent != h_dentry->d_parent)) -+ goto out; -+ err = 0; -+ -+ /* -+ * rmdir a dir may break the consistency on some filesystem. -+ * let's try heavy test. -+ */ -+ err = -EACCES; -+ if (unlikely(!au_opt_test(au_mntflags(dentry->d_sb), DIRPERM1) -+ && au_test_h_perm(h_parent->d_inode, -+ MAY_EXEC | MAY_WRITE))) -+ goto out; -+ -+ h_latest = au_sio_lkup_one(&dentry->d_name, h_parent); -+ err = -EIO; -+ if (IS_ERR(h_latest)) -+ goto out; -+ if (h_latest == h_dentry) -+ err = 0; -+ dput(h_latest); -+ -+out: -+ return err; -+} -+ -+/* -+ * decide the branch where we operate for @dentry. the branch index will be set -+ * @rbcpup. after diciding it, 'pin' it and store the timestamps of the parent -+ * dir for reverting. -+ * when a new whiteout is necessary, create it. -+ */ -+static struct dentry* -+lock_hdir_create_wh(struct dentry *dentry, int isdir, aufs_bindex_t *rbcpup, -+ struct au_dtime *dt, struct au_pin *pin) -+{ -+ struct dentry *wh_dentry; -+ struct super_block *sb; -+ struct path h_path; -+ int err, need_wh; -+ unsigned int udba; -+ aufs_bindex_t bcpup; -+ -+ need_wh = au_wr_dir_need_wh(dentry, isdir, rbcpup); -+ wh_dentry = ERR_PTR(need_wh); -+ if (unlikely(need_wh < 0)) -+ goto out; -+ -+ sb = dentry->d_sb; -+ udba = au_opt_udba(sb); -+ bcpup = *rbcpup; -+ err = au_pin(pin, dentry, bcpup, udba, -+ AuPin_DI_LOCKED | AuPin_MNT_WRITE); -+ wh_dentry = ERR_PTR(err); -+ if (unlikely(err)) -+ goto out; -+ -+ h_path.dentry = au_pinned_h_parent(pin); -+ if (udba != AuOpt_UDBA_NONE -+ && au_dbstart(dentry) == bcpup) { -+ err = au_may_del(dentry, bcpup, h_path.dentry, isdir); -+ wh_dentry = ERR_PTR(err); -+ if (unlikely(err)) -+ goto out_unpin; -+ } -+ -+ h_path.mnt = au_sbr_mnt(sb, bcpup); -+ au_dtime_store(dt, au_pinned_parent(pin), &h_path); -+ wh_dentry = NULL; -+ if (!need_wh) -+ goto out; /* success, no need to create whiteout */ -+ -+ wh_dentry = au_wh_create(dentry, bcpup, h_path.dentry); -+ if (IS_ERR(wh_dentry)) -+ goto out_unpin; -+ -+ /* returns with the parent is locked and wh_dentry is dget-ed */ -+ goto out; /* success */ -+ -+out_unpin: -+ au_unpin(pin); -+out: -+ return wh_dentry; -+} -+ -+/* -+ * when removing a dir, rename it to a unique temporary whiteout-ed name first -+ * in order to be revertible and save time for removing many child whiteouts -+ * under the dir. -+ * returns 1 when there are too many child whiteout and caller should remove -+ * them asynchronously. returns 0 when the number of children is enough small to -+ * remove now or the branch fs is a remote fs. -+ * otherwise return an error. -+ */ -+static int renwh_and_rmdir(struct dentry *dentry, aufs_bindex_t bindex, -+ struct au_nhash *whlist, struct inode *dir) -+{ -+ int rmdir_later, err, dirwh; -+ struct dentry *h_dentry; -+ struct super_block *sb; -+ -+ sb = dentry->d_sb; -+ SiMustAnyLock(sb); -+ h_dentry = au_h_dptr(dentry, bindex); -+ err = au_whtmp_ren(h_dentry, au_sbr(sb, bindex)); -+ if (unlikely(err)) -+ goto out; -+ -+ /* stop monitoring */ -+ au_hn_free(au_hi(dentry->d_inode, bindex)); -+ -+ if (!au_test_fs_remote(h_dentry->d_sb)) { -+ dirwh = au_sbi(sb)->si_dirwh; -+ rmdir_later = (dirwh <= 1); -+ if (!rmdir_later) -+ rmdir_later = au_nhash_test_longer_wh(whlist, bindex, -+ dirwh); -+ if (rmdir_later) -+ return rmdir_later; -+ } -+ -+ err = au_whtmp_rmdir(dir, bindex, h_dentry, whlist); -+ if (unlikely(err)) { -+ AuIOErr("rmdir %pd, b%d failed, %d. ignored\n", -+ h_dentry, bindex, err); -+ err = 0; -+ } -+ -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+/* -+ * final procedure for deleting a entry. -+ * maintain dentry and iattr. -+ */ -+static void epilog(struct inode *dir, struct dentry *dentry, -+ aufs_bindex_t bindex) -+{ -+ struct inode *inode; -+ -+ inode = dentry->d_inode; -+ d_drop(dentry); -+ inode->i_ctime = dir->i_ctime; -+ -+ au_dir_ts(dir, bindex); -+ dir->i_version++; -+} -+ -+/* -+ * when an error happened, remove the created whiteout and revert everything. -+ */ -+static int do_revert(int err, struct inode *dir, aufs_bindex_t bindex, -+ aufs_bindex_t bwh, struct dentry *wh_dentry, -+ struct dentry *dentry, struct au_dtime *dt) -+{ -+ int rerr; -+ struct path h_path = { -+ .dentry = wh_dentry, -+ .mnt = au_sbr_mnt(dir->i_sb, bindex) -+ }; -+ -+ rerr = au_wh_unlink_dentry(au_h_iptr(dir, bindex), &h_path, dentry); -+ if (!rerr) { -+ au_set_dbwh(dentry, bwh); -+ au_dtime_revert(dt); -+ return 0; -+ } -+ -+ AuIOErr("%pd reverting whiteout failed(%d, %d)\n", dentry, err, rerr); -+ return -EIO; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+int aufs_unlink(struct inode *dir, struct dentry *dentry) -+{ -+ int err; -+ aufs_bindex_t bwh, bindex, bstart; -+ struct inode *inode, *h_dir, *delegated; -+ struct dentry *parent, *wh_dentry; -+ /* to reuduce stack size */ -+ struct { -+ struct au_dtime dt; -+ struct au_pin pin; -+ struct path h_path; -+ } *a; -+ -+ IMustLock(dir); -+ -+ err = -ENOMEM; -+ a = kmalloc(sizeof(*a), GFP_NOFS); -+ if (unlikely(!a)) -+ goto out; -+ -+ err = aufs_read_lock(dentry, AuLock_DW | AuLock_GEN); -+ if (unlikely(err)) -+ goto out_free; -+ err = au_d_hashed_positive(dentry); -+ if (unlikely(err)) -+ goto out_unlock; -+ inode = dentry->d_inode; -+ IMustLock(inode); -+ err = -EISDIR; -+ if (unlikely(d_is_dir(dentry))) -+ goto out_unlock; /* possible? */ -+ -+ bstart = au_dbstart(dentry); -+ bwh = au_dbwh(dentry); -+ bindex = -1; -+ parent = dentry->d_parent; /* dir inode is locked */ -+ di_write_lock_parent(parent); -+ wh_dentry = lock_hdir_create_wh(dentry, /*isdir*/0, &bindex, &a->dt, -+ &a->pin); -+ err = PTR_ERR(wh_dentry); -+ if (IS_ERR(wh_dentry)) -+ goto out_parent; -+ -+ a->h_path.mnt = au_sbr_mnt(dentry->d_sb, bstart); -+ a->h_path.dentry = au_h_dptr(dentry, bstart); -+ dget(a->h_path.dentry); -+ if (bindex == bstart) { -+ h_dir = au_pinned_h_dir(&a->pin); -+ delegated = NULL; -+ err = vfsub_unlink(h_dir, &a->h_path, &delegated, /*force*/0); -+ if (unlikely(err == -EWOULDBLOCK)) { -+ pr_warn("cannot retry for NFSv4 delegation" -+ " for an internal unlink\n"); -+ iput(delegated); -+ } -+ } else { -+ /* dir inode is locked */ -+ h_dir = wh_dentry->d_parent->d_inode; -+ IMustLock(h_dir); -+ err = 0; -+ } -+ -+ if (!err) { -+ vfsub_drop_nlink(inode); -+ epilog(dir, dentry, bindex); -+ -+ /* update target timestamps */ -+ if (bindex == bstart) { -+ vfsub_update_h_iattr(&a->h_path, /*did*/NULL); -+ /*ignore*/ -+ inode->i_ctime = a->h_path.dentry->d_inode->i_ctime; -+ } else -+ /* todo: this timestamp may be reverted later */ -+ inode->i_ctime = h_dir->i_ctime; -+ goto out_unpin; /* success */ -+ } -+ -+ /* revert */ -+ if (wh_dentry) { -+ int rerr; -+ -+ rerr = do_revert(err, dir, bindex, bwh, wh_dentry, dentry, -+ &a->dt); -+ if (rerr) -+ err = rerr; -+ } -+ -+out_unpin: -+ au_unpin(&a->pin); -+ dput(wh_dentry); -+ dput(a->h_path.dentry); -+out_parent: -+ di_write_unlock(parent); -+out_unlock: -+ aufs_read_unlock(dentry, AuLock_DW); -+out_free: -+ kfree(a); -+out: -+ return err; -+} -+ -+int aufs_rmdir(struct inode *dir, struct dentry *dentry) -+{ -+ int err, rmdir_later; -+ aufs_bindex_t bwh, bindex, bstart; -+ struct inode *inode; -+ struct dentry *parent, *wh_dentry, *h_dentry; -+ struct au_whtmp_rmdir *args; -+ /* to reuduce stack size */ -+ struct { -+ struct au_dtime dt; -+ struct au_pin pin; -+ } *a; -+ -+ IMustLock(dir); -+ -+ err = -ENOMEM; -+ a = kmalloc(sizeof(*a), GFP_NOFS); -+ if (unlikely(!a)) -+ goto out; -+ -+ err = aufs_read_lock(dentry, AuLock_DW | AuLock_FLUSH | AuLock_GEN); -+ if (unlikely(err)) -+ goto out_free; -+ err = au_alive_dir(dentry); -+ if (unlikely(err)) -+ goto out_unlock; -+ inode = dentry->d_inode; -+ IMustLock(inode); -+ err = -ENOTDIR; -+ if (unlikely(!d_is_dir(dentry))) -+ goto out_unlock; /* possible? */ -+ -+ err = -ENOMEM; -+ args = au_whtmp_rmdir_alloc(dir->i_sb, GFP_NOFS); -+ if (unlikely(!args)) -+ goto out_unlock; -+ -+ parent = dentry->d_parent; /* dir inode is locked */ -+ di_write_lock_parent(parent); -+ err = au_test_empty(dentry, &args->whlist); -+ if (unlikely(err)) -+ goto out_parent; -+ -+ bstart = au_dbstart(dentry); -+ bwh = au_dbwh(dentry); -+ bindex = -1; -+ wh_dentry = lock_hdir_create_wh(dentry, /*isdir*/1, &bindex, &a->dt, -+ &a->pin); -+ err = PTR_ERR(wh_dentry); -+ if (IS_ERR(wh_dentry)) -+ goto out_parent; -+ -+ h_dentry = au_h_dptr(dentry, bstart); -+ dget(h_dentry); -+ rmdir_later = 0; -+ if (bindex == bstart) { -+ err = renwh_and_rmdir(dentry, bstart, &args->whlist, dir); -+ if (err > 0) { -+ rmdir_later = err; -+ err = 0; -+ } -+ } else { -+ /* stop monitoring */ -+ au_hn_free(au_hi(inode, bstart)); -+ -+ /* dir inode is locked */ -+ IMustLock(wh_dentry->d_parent->d_inode); -+ err = 0; -+ } -+ -+ if (!err) { -+ vfsub_dead_dir(inode); -+ au_set_dbdiropq(dentry, -1); -+ epilog(dir, dentry, bindex); -+ -+ if (rmdir_later) { -+ au_whtmp_kick_rmdir(dir, bstart, h_dentry, args); -+ args = NULL; -+ } -+ -+ goto out_unpin; /* success */ -+ } -+ -+ /* revert */ -+ AuLabel(revert); -+ if (wh_dentry) { -+ int rerr; -+ -+ rerr = do_revert(err, dir, bindex, bwh, wh_dentry, dentry, -+ &a->dt); -+ if (rerr) -+ err = rerr; -+ } -+ -+out_unpin: -+ au_unpin(&a->pin); -+ dput(wh_dentry); -+ dput(h_dentry); -+out_parent: -+ di_write_unlock(parent); -+ if (args) -+ au_whtmp_rmdir_free(args); -+out_unlock: -+ aufs_read_unlock(dentry, AuLock_DW); -+out_free: -+ kfree(a); -+out: -+ AuTraceErr(err); -+ return err; -+} -diff --git a/fs/aufs/i_op_ren.c b/fs/aufs/i_op_ren.c -new file mode 100644 -index 0000000..6ce2ed6 ---- /dev/null -+++ b/fs/aufs/i_op_ren.c -@@ -0,0 +1,1013 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * inode operation (rename entry) -+ * todo: this is crazy monster -+ */ -+ -+#include "aufs.h" -+ -+enum { AuSRC, AuDST, AuSrcDst }; -+enum { AuPARENT, AuCHILD, AuParentChild }; -+ -+#define AuRen_ISDIR 1 -+#define AuRen_ISSAMEDIR (1 << 1) -+#define AuRen_WHSRC (1 << 2) -+#define AuRen_WHDST (1 << 3) -+#define AuRen_MNT_WRITE (1 << 4) -+#define AuRen_DT_DSTDIR (1 << 5) -+#define AuRen_DIROPQ (1 << 6) -+#define au_ftest_ren(flags, name) ((flags) & AuRen_##name) -+#define au_fset_ren(flags, name) \ -+ do { (flags) |= AuRen_##name; } while (0) -+#define au_fclr_ren(flags, name) \ -+ do { (flags) &= ~AuRen_##name; } while (0) -+ -+struct au_ren_args { -+ struct { -+ struct dentry *dentry, *h_dentry, *parent, *h_parent, -+ *wh_dentry; -+ struct inode *dir, *inode; -+ struct au_hinode *hdir; -+ struct au_dtime dt[AuParentChild]; -+ aufs_bindex_t bstart; -+ } sd[AuSrcDst]; -+ -+#define src_dentry sd[AuSRC].dentry -+#define src_dir sd[AuSRC].dir -+#define src_inode sd[AuSRC].inode -+#define src_h_dentry sd[AuSRC].h_dentry -+#define src_parent sd[AuSRC].parent -+#define src_h_parent sd[AuSRC].h_parent -+#define src_wh_dentry sd[AuSRC].wh_dentry -+#define src_hdir sd[AuSRC].hdir -+#define src_h_dir sd[AuSRC].hdir->hi_inode -+#define src_dt sd[AuSRC].dt -+#define src_bstart sd[AuSRC].bstart -+ -+#define dst_dentry sd[AuDST].dentry -+#define dst_dir sd[AuDST].dir -+#define dst_inode sd[AuDST].inode -+#define dst_h_dentry sd[AuDST].h_dentry -+#define dst_parent sd[AuDST].parent -+#define dst_h_parent sd[AuDST].h_parent -+#define dst_wh_dentry sd[AuDST].wh_dentry -+#define dst_hdir sd[AuDST].hdir -+#define dst_h_dir sd[AuDST].hdir->hi_inode -+#define dst_dt sd[AuDST].dt -+#define dst_bstart sd[AuDST].bstart -+ -+ struct dentry *h_trap; -+ struct au_branch *br; -+ struct au_hinode *src_hinode; -+ struct path h_path; -+ struct au_nhash whlist; -+ aufs_bindex_t btgt, src_bwh, src_bdiropq; -+ -+ unsigned int flags; -+ -+ struct au_whtmp_rmdir *thargs; -+ struct dentry *h_dst; -+}; -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * functions for reverting. -+ * when an error happened in a single rename systemcall, we should revert -+ * everything as if nothing happened. -+ * we don't need to revert the copied-up/down the parent dir since they are -+ * harmless. -+ */ -+ -+#define RevertFailure(fmt, ...) do { \ -+ AuIOErr("revert failure: " fmt " (%d, %d)\n", \ -+ ##__VA_ARGS__, err, rerr); \ -+ err = -EIO; \ -+} while (0) -+ -+static void au_ren_rev_diropq(int err, struct au_ren_args *a) -+{ -+ int rerr; -+ -+ au_hn_imtx_lock_nested(a->src_hinode, AuLsc_I_CHILD); -+ rerr = au_diropq_remove(a->src_dentry, a->btgt); -+ au_hn_imtx_unlock(a->src_hinode); -+ au_set_dbdiropq(a->src_dentry, a->src_bdiropq); -+ if (rerr) -+ RevertFailure("remove diropq %pd", a->src_dentry); -+} -+ -+static void au_ren_rev_rename(int err, struct au_ren_args *a) -+{ -+ int rerr; -+ struct inode *delegated; -+ -+ a->h_path.dentry = vfsub_lkup_one(&a->src_dentry->d_name, -+ a->src_h_parent); -+ rerr = PTR_ERR(a->h_path.dentry); -+ if (IS_ERR(a->h_path.dentry)) { -+ RevertFailure("lkup one %pd", a->src_dentry); -+ return; -+ } -+ -+ delegated = NULL; -+ rerr = vfsub_rename(a->dst_h_dir, -+ au_h_dptr(a->src_dentry, a->btgt), -+ a->src_h_dir, &a->h_path, &delegated); -+ if (unlikely(rerr == -EWOULDBLOCK)) { -+ pr_warn("cannot retry for NFSv4 delegation" -+ " for an internal rename\n"); -+ iput(delegated); -+ } -+ d_drop(a->h_path.dentry); -+ dput(a->h_path.dentry); -+ /* au_set_h_dptr(a->src_dentry, a->btgt, NULL); */ -+ if (rerr) -+ RevertFailure("rename %pd", a->src_dentry); -+} -+ -+static void au_ren_rev_whtmp(int err, struct au_ren_args *a) -+{ -+ int rerr; -+ struct inode *delegated; -+ -+ a->h_path.dentry = vfsub_lkup_one(&a->dst_dentry->d_name, -+ a->dst_h_parent); -+ rerr = PTR_ERR(a->h_path.dentry); -+ if (IS_ERR(a->h_path.dentry)) { -+ RevertFailure("lkup one %pd", a->dst_dentry); -+ return; -+ } -+ if (a->h_path.dentry->d_inode) { -+ d_drop(a->h_path.dentry); -+ dput(a->h_path.dentry); -+ return; -+ } -+ -+ delegated = NULL; -+ rerr = vfsub_rename(a->dst_h_dir, a->h_dst, a->dst_h_dir, &a->h_path, -+ &delegated); -+ if (unlikely(rerr == -EWOULDBLOCK)) { -+ pr_warn("cannot retry for NFSv4 delegation" -+ " for an internal rename\n"); -+ iput(delegated); -+ } -+ d_drop(a->h_path.dentry); -+ dput(a->h_path.dentry); -+ if (!rerr) -+ au_set_h_dptr(a->dst_dentry, a->btgt, dget(a->h_dst)); -+ else -+ RevertFailure("rename %pd", a->h_dst); -+} -+ -+static void au_ren_rev_whsrc(int err, struct au_ren_args *a) -+{ -+ int rerr; -+ -+ a->h_path.dentry = a->src_wh_dentry; -+ rerr = au_wh_unlink_dentry(a->src_h_dir, &a->h_path, a->src_dentry); -+ au_set_dbwh(a->src_dentry, a->src_bwh); -+ if (rerr) -+ RevertFailure("unlink %pd", a->src_wh_dentry); -+} -+#undef RevertFailure -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * when we have to copyup the renaming entry, do it with the rename-target name -+ * in order to minimize the cost (the later actual rename is unnecessary). -+ * otherwise rename it on the target branch. -+ */ -+static int au_ren_or_cpup(struct au_ren_args *a) -+{ -+ int err; -+ struct dentry *d; -+ struct inode *delegated; -+ -+ d = a->src_dentry; -+ if (au_dbstart(d) == a->btgt) { -+ a->h_path.dentry = a->dst_h_dentry; -+ if (au_ftest_ren(a->flags, DIROPQ) -+ && au_dbdiropq(d) == a->btgt) -+ au_fclr_ren(a->flags, DIROPQ); -+ AuDebugOn(au_dbstart(d) != a->btgt); -+ delegated = NULL; -+ err = vfsub_rename(a->src_h_dir, au_h_dptr(d, a->btgt), -+ a->dst_h_dir, &a->h_path, &delegated); -+ if (unlikely(err == -EWOULDBLOCK)) { -+ pr_warn("cannot retry for NFSv4 delegation" -+ " for an internal rename\n"); -+ iput(delegated); -+ } -+ } else -+ BUG(); -+ -+ if (!err && a->h_dst) -+ /* it will be set to dinfo later */ -+ dget(a->h_dst); -+ -+ return err; -+} -+ -+/* cf. aufs_rmdir() */ -+static int au_ren_del_whtmp(struct au_ren_args *a) -+{ -+ int err; -+ struct inode *dir; -+ -+ dir = a->dst_dir; -+ SiMustAnyLock(dir->i_sb); -+ if (!au_nhash_test_longer_wh(&a->whlist, a->btgt, -+ au_sbi(dir->i_sb)->si_dirwh) -+ || au_test_fs_remote(a->h_dst->d_sb)) { -+ err = au_whtmp_rmdir(dir, a->btgt, a->h_dst, &a->whlist); -+ if (unlikely(err)) -+ pr_warn("failed removing whtmp dir %pd (%d), " -+ "ignored.\n", a->h_dst, err); -+ } else { -+ au_nhash_wh_free(&a->thargs->whlist); -+ a->thargs->whlist = a->whlist; -+ a->whlist.nh_num = 0; -+ au_whtmp_kick_rmdir(dir, a->btgt, a->h_dst, a->thargs); -+ dput(a->h_dst); -+ a->thargs = NULL; -+ } -+ -+ return 0; -+} -+ -+/* make it 'opaque' dir. */ -+static int au_ren_diropq(struct au_ren_args *a) -+{ -+ int err; -+ struct dentry *diropq; -+ -+ err = 0; -+ a->src_bdiropq = au_dbdiropq(a->src_dentry); -+ a->src_hinode = au_hi(a->src_inode, a->btgt); -+ au_hn_imtx_lock_nested(a->src_hinode, AuLsc_I_CHILD); -+ diropq = au_diropq_create(a->src_dentry, a->btgt); -+ au_hn_imtx_unlock(a->src_hinode); -+ if (IS_ERR(diropq)) -+ err = PTR_ERR(diropq); -+ else -+ dput(diropq); -+ -+ return err; -+} -+ -+static int do_rename(struct au_ren_args *a) -+{ -+ int err; -+ struct dentry *d, *h_d; -+ -+ /* prepare workqueue args for asynchronous rmdir */ -+ h_d = a->dst_h_dentry; -+ if (au_ftest_ren(a->flags, ISDIR) && h_d->d_inode) { -+ err = -ENOMEM; -+ a->thargs = au_whtmp_rmdir_alloc(a->src_dentry->d_sb, GFP_NOFS); -+ if (unlikely(!a->thargs)) -+ goto out; -+ a->h_dst = dget(h_d); -+ } -+ -+ /* create whiteout for src_dentry */ -+ if (au_ftest_ren(a->flags, WHSRC)) { -+ a->src_bwh = au_dbwh(a->src_dentry); -+ AuDebugOn(a->src_bwh >= 0); -+ a->src_wh_dentry -+ = au_wh_create(a->src_dentry, a->btgt, a->src_h_parent); -+ err = PTR_ERR(a->src_wh_dentry); -+ if (IS_ERR(a->src_wh_dentry)) -+ goto out_thargs; -+ } -+ -+ /* lookup whiteout for dentry */ -+ if (au_ftest_ren(a->flags, WHDST)) { -+ h_d = au_wh_lkup(a->dst_h_parent, &a->dst_dentry->d_name, -+ a->br); -+ err = PTR_ERR(h_d); -+ if (IS_ERR(h_d)) -+ goto out_whsrc; -+ if (!h_d->d_inode) -+ dput(h_d); -+ else -+ a->dst_wh_dentry = h_d; -+ } -+ -+ /* rename dentry to tmpwh */ -+ if (a->thargs) { -+ err = au_whtmp_ren(a->dst_h_dentry, a->br); -+ if (unlikely(err)) -+ goto out_whdst; -+ -+ d = a->dst_dentry; -+ au_set_h_dptr(d, a->btgt, NULL); -+ err = au_lkup_neg(d, a->btgt, /*wh*/0); -+ if (unlikely(err)) -+ goto out_whtmp; -+ a->dst_h_dentry = au_h_dptr(d, a->btgt); -+ } -+ -+ BUG_ON(a->dst_h_dentry->d_inode && a->src_bstart != a->btgt); -+ -+ /* rename by vfs_rename or cpup */ -+ d = a->dst_dentry; -+ if (au_ftest_ren(a->flags, ISDIR) -+ && (a->dst_wh_dentry -+ || au_dbdiropq(d) == a->btgt -+ /* hide the lower to keep xino */ -+ || a->btgt < au_dbend(d) -+ || au_opt_test(au_mntflags(d->d_sb), ALWAYS_DIROPQ))) -+ au_fset_ren(a->flags, DIROPQ); -+ err = au_ren_or_cpup(a); -+ if (unlikely(err)) -+ /* leave the copied-up one */ -+ goto out_whtmp; -+ -+ /* make dir opaque */ -+ if (au_ftest_ren(a->flags, DIROPQ)) { -+ err = au_ren_diropq(a); -+ if (unlikely(err)) -+ goto out_rename; -+ } -+ -+ /* update target timestamps */ -+ AuDebugOn(au_dbstart(a->src_dentry) != a->btgt); -+ a->h_path.dentry = au_h_dptr(a->src_dentry, a->btgt); -+ vfsub_update_h_iattr(&a->h_path, /*did*/NULL); /*ignore*/ -+ a->src_inode->i_ctime = a->h_path.dentry->d_inode->i_ctime; -+ -+ /* remove whiteout for dentry */ -+ if (a->dst_wh_dentry) { -+ a->h_path.dentry = a->dst_wh_dentry; -+ err = au_wh_unlink_dentry(a->dst_h_dir, &a->h_path, -+ a->dst_dentry); -+ if (unlikely(err)) -+ goto out_diropq; -+ } -+ -+ /* remove whtmp */ -+ if (a->thargs) -+ au_ren_del_whtmp(a); /* ignore this error */ -+ -+ au_fhsm_wrote(a->src_dentry->d_sb, a->btgt, /*force*/0); -+ err = 0; -+ goto out_success; -+ -+out_diropq: -+ if (au_ftest_ren(a->flags, DIROPQ)) -+ au_ren_rev_diropq(err, a); -+out_rename: -+ au_ren_rev_rename(err, a); -+ dput(a->h_dst); -+out_whtmp: -+ if (a->thargs) -+ au_ren_rev_whtmp(err, a); -+out_whdst: -+ dput(a->dst_wh_dentry); -+ a->dst_wh_dentry = NULL; -+out_whsrc: -+ if (a->src_wh_dentry) -+ au_ren_rev_whsrc(err, a); -+out_success: -+ dput(a->src_wh_dentry); -+ dput(a->dst_wh_dentry); -+out_thargs: -+ if (a->thargs) { -+ dput(a->h_dst); -+ au_whtmp_rmdir_free(a->thargs); -+ a->thargs = NULL; -+ } -+out: -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * test if @dentry dir can be rename destination or not. -+ * success means, it is a logically empty dir. -+ */ -+static int may_rename_dstdir(struct dentry *dentry, struct au_nhash *whlist) -+{ -+ return au_test_empty(dentry, whlist); -+} -+ -+/* -+ * test if @dentry dir can be rename source or not. -+ * if it can, return 0 and @children is filled. -+ * success means, -+ * - it is a logically empty dir. -+ * - or, it exists on writable branch and has no children including whiteouts -+ * on the lower branch. -+ */ -+static int may_rename_srcdir(struct dentry *dentry, aufs_bindex_t btgt) -+{ -+ int err; -+ unsigned int rdhash; -+ aufs_bindex_t bstart; -+ -+ bstart = au_dbstart(dentry); -+ if (bstart != btgt) { -+ struct au_nhash whlist; -+ -+ SiMustAnyLock(dentry->d_sb); -+ rdhash = au_sbi(dentry->d_sb)->si_rdhash; -+ if (!rdhash) -+ rdhash = au_rdhash_est(au_dir_size(/*file*/NULL, -+ dentry)); -+ err = au_nhash_alloc(&whlist, rdhash, GFP_NOFS); -+ if (unlikely(err)) -+ goto out; -+ err = au_test_empty(dentry, &whlist); -+ au_nhash_wh_free(&whlist); -+ goto out; -+ } -+ -+ if (bstart == au_dbtaildir(dentry)) -+ return 0; /* success */ -+ -+ err = au_test_empty_lower(dentry); -+ -+out: -+ if (err == -ENOTEMPTY) { -+ AuWarn1("renaming dir who has child(ren) on multiple branches," -+ " is not supported\n"); -+ err = -EXDEV; -+ } -+ return err; -+} -+ -+/* side effect: sets whlist and h_dentry */ -+static int au_ren_may_dir(struct au_ren_args *a) -+{ -+ int err; -+ unsigned int rdhash; -+ struct dentry *d; -+ -+ d = a->dst_dentry; -+ SiMustAnyLock(d->d_sb); -+ -+ err = 0; -+ if (au_ftest_ren(a->flags, ISDIR) && a->dst_inode) { -+ rdhash = au_sbi(d->d_sb)->si_rdhash; -+ if (!rdhash) -+ rdhash = au_rdhash_est(au_dir_size(/*file*/NULL, d)); -+ err = au_nhash_alloc(&a->whlist, rdhash, GFP_NOFS); -+ if (unlikely(err)) -+ goto out; -+ -+ au_set_dbstart(d, a->dst_bstart); -+ err = may_rename_dstdir(d, &a->whlist); -+ au_set_dbstart(d, a->btgt); -+ } -+ a->dst_h_dentry = au_h_dptr(d, au_dbstart(d)); -+ if (unlikely(err)) -+ goto out; -+ -+ d = a->src_dentry; -+ a->src_h_dentry = au_h_dptr(d, au_dbstart(d)); -+ if (au_ftest_ren(a->flags, ISDIR)) { -+ err = may_rename_srcdir(d, a->btgt); -+ if (unlikely(err)) { -+ au_nhash_wh_free(&a->whlist); -+ a->whlist.nh_num = 0; -+ } -+ } -+out: -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * simple tests for rename. -+ * following the checks in vfs, plus the parent-child relationship. -+ */ -+static int au_may_ren(struct au_ren_args *a) -+{ -+ int err, isdir; -+ struct inode *h_inode; -+ -+ if (a->src_bstart == a->btgt) { -+ err = au_may_del(a->src_dentry, a->btgt, a->src_h_parent, -+ au_ftest_ren(a->flags, ISDIR)); -+ if (unlikely(err)) -+ goto out; -+ err = -EINVAL; -+ if (unlikely(a->src_h_dentry == a->h_trap)) -+ goto out; -+ } -+ -+ err = 0; -+ if (a->dst_bstart != a->btgt) -+ goto out; -+ -+ err = -ENOTEMPTY; -+ if (unlikely(a->dst_h_dentry == a->h_trap)) -+ goto out; -+ -+ err = -EIO; -+ h_inode = a->dst_h_dentry->d_inode; -+ isdir = !!au_ftest_ren(a->flags, ISDIR); -+ if (!a->dst_dentry->d_inode) { -+ if (unlikely(h_inode)) -+ goto out; -+ err = au_may_add(a->dst_dentry, a->btgt, a->dst_h_parent, -+ isdir); -+ } else { -+ if (unlikely(!h_inode || !h_inode->i_nlink)) -+ goto out; -+ err = au_may_del(a->dst_dentry, a->btgt, a->dst_h_parent, -+ isdir); -+ if (unlikely(err)) -+ goto out; -+ } -+ -+out: -+ if (unlikely(err == -ENOENT || err == -EEXIST)) -+ err = -EIO; -+ AuTraceErr(err); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * locking order -+ * (VFS) -+ * - src_dir and dir by lock_rename() -+ * - inode if exitsts -+ * (aufs) -+ * - lock all -+ * + src_dentry and dentry by aufs_read_and_write_lock2() which calls, -+ * + si_read_lock -+ * + di_write_lock2_child() -+ * + di_write_lock_child() -+ * + ii_write_lock_child() -+ * + di_write_lock_child2() -+ * + ii_write_lock_child2() -+ * + src_parent and parent -+ * + di_write_lock_parent() -+ * + ii_write_lock_parent() -+ * + di_write_lock_parent2() -+ * + ii_write_lock_parent2() -+ * + lower src_dir and dir by vfsub_lock_rename() -+ * + verify the every relationships between child and parent. if any -+ * of them failed, unlock all and return -EBUSY. -+ */ -+static void au_ren_unlock(struct au_ren_args *a) -+{ -+ vfsub_unlock_rename(a->src_h_parent, a->src_hdir, -+ a->dst_h_parent, a->dst_hdir); -+ if (au_ftest_ren(a->flags, MNT_WRITE)) -+ vfsub_mnt_drop_write(au_br_mnt(a->br)); -+} -+ -+static int au_ren_lock(struct au_ren_args *a) -+{ -+ int err; -+ unsigned int udba; -+ -+ err = 0; -+ a->src_h_parent = au_h_dptr(a->src_parent, a->btgt); -+ a->src_hdir = au_hi(a->src_dir, a->btgt); -+ a->dst_h_parent = au_h_dptr(a->dst_parent, a->btgt); -+ a->dst_hdir = au_hi(a->dst_dir, a->btgt); -+ -+ err = vfsub_mnt_want_write(au_br_mnt(a->br)); -+ if (unlikely(err)) -+ goto out; -+ au_fset_ren(a->flags, MNT_WRITE); -+ a->h_trap = vfsub_lock_rename(a->src_h_parent, a->src_hdir, -+ a->dst_h_parent, a->dst_hdir); -+ udba = au_opt_udba(a->src_dentry->d_sb); -+ if (unlikely(a->src_hdir->hi_inode != a->src_h_parent->d_inode -+ || a->dst_hdir->hi_inode != a->dst_h_parent->d_inode)) -+ err = au_busy_or_stale(); -+ if (!err && au_dbstart(a->src_dentry) == a->btgt) -+ err = au_h_verify(a->src_h_dentry, udba, -+ a->src_h_parent->d_inode, a->src_h_parent, -+ a->br); -+ if (!err && au_dbstart(a->dst_dentry) == a->btgt) -+ err = au_h_verify(a->dst_h_dentry, udba, -+ a->dst_h_parent->d_inode, a->dst_h_parent, -+ a->br); -+ if (!err) -+ goto out; /* success */ -+ -+ err = au_busy_or_stale(); -+ au_ren_unlock(a); -+ -+out: -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static void au_ren_refresh_dir(struct au_ren_args *a) -+{ -+ struct inode *dir; -+ -+ dir = a->dst_dir; -+ dir->i_version++; -+ if (au_ftest_ren(a->flags, ISDIR)) { -+ /* is this updating defined in POSIX? */ -+ au_cpup_attr_timesizes(a->src_inode); -+ au_cpup_attr_nlink(dir, /*force*/1); -+ } -+ -+ au_dir_ts(dir, a->btgt); -+ -+ if (au_ftest_ren(a->flags, ISSAMEDIR)) -+ return; -+ -+ dir = a->src_dir; -+ dir->i_version++; -+ if (au_ftest_ren(a->flags, ISDIR)) -+ au_cpup_attr_nlink(dir, /*force*/1); -+ au_dir_ts(dir, a->btgt); -+} -+ -+static void au_ren_refresh(struct au_ren_args *a) -+{ -+ aufs_bindex_t bend, bindex; -+ struct dentry *d, *h_d; -+ struct inode *i, *h_i; -+ struct super_block *sb; -+ -+ d = a->dst_dentry; -+ d_drop(d); -+ if (a->h_dst) -+ /* already dget-ed by au_ren_or_cpup() */ -+ au_set_h_dptr(d, a->btgt, a->h_dst); -+ -+ i = a->dst_inode; -+ if (i) { -+ if (!au_ftest_ren(a->flags, ISDIR)) -+ vfsub_drop_nlink(i); -+ else { -+ vfsub_dead_dir(i); -+ au_cpup_attr_timesizes(i); -+ } -+ au_update_dbrange(d, /*do_put_zero*/1); -+ } else { -+ bend = a->btgt; -+ for (bindex = au_dbstart(d); bindex < bend; bindex++) -+ au_set_h_dptr(d, bindex, NULL); -+ bend = au_dbend(d); -+ for (bindex = a->btgt + 1; bindex <= bend; bindex++) -+ au_set_h_dptr(d, bindex, NULL); -+ au_update_dbrange(d, /*do_put_zero*/0); -+ } -+ -+ d = a->src_dentry; -+ au_set_dbwh(d, -1); -+ bend = au_dbend(d); -+ for (bindex = a->btgt + 1; bindex <= bend; bindex++) { -+ h_d = au_h_dptr(d, bindex); -+ if (h_d) -+ au_set_h_dptr(d, bindex, NULL); -+ } -+ au_set_dbend(d, a->btgt); -+ -+ sb = d->d_sb; -+ i = a->src_inode; -+ if (au_opt_test(au_mntflags(sb), PLINK) && au_plink_test(i)) -+ return; /* success */ -+ -+ bend = au_ibend(i); -+ for (bindex = a->btgt + 1; bindex <= bend; bindex++) { -+ h_i = au_h_iptr(i, bindex); -+ if (h_i) { -+ au_xino_write(sb, bindex, h_i->i_ino, /*ino*/0); -+ /* ignore this error */ -+ au_set_h_iptr(i, bindex, NULL, 0); -+ } -+ } -+ au_set_ibend(i, a->btgt); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* mainly for link(2) and rename(2) */ -+int au_wbr(struct dentry *dentry, aufs_bindex_t btgt) -+{ -+ aufs_bindex_t bdiropq, bwh; -+ struct dentry *parent; -+ struct au_branch *br; -+ -+ parent = dentry->d_parent; -+ IMustLock(parent->d_inode); /* dir is locked */ -+ -+ bdiropq = au_dbdiropq(parent); -+ bwh = au_dbwh(dentry); -+ br = au_sbr(dentry->d_sb, btgt); -+ if (au_br_rdonly(br) -+ || (0 <= bdiropq && bdiropq < btgt) -+ || (0 <= bwh && bwh < btgt)) -+ btgt = -1; -+ -+ AuDbg("btgt %d\n", btgt); -+ return btgt; -+} -+ -+/* sets src_bstart, dst_bstart and btgt */ -+static int au_ren_wbr(struct au_ren_args *a) -+{ -+ int err; -+ struct au_wr_dir_args wr_dir_args = { -+ /* .force_btgt = -1, */ -+ .flags = AuWrDir_ADD_ENTRY -+ }; -+ -+ a->src_bstart = au_dbstart(a->src_dentry); -+ a->dst_bstart = au_dbstart(a->dst_dentry); -+ if (au_ftest_ren(a->flags, ISDIR)) -+ au_fset_wrdir(wr_dir_args.flags, ISDIR); -+ wr_dir_args.force_btgt = a->src_bstart; -+ if (a->dst_inode && a->dst_bstart < a->src_bstart) -+ wr_dir_args.force_btgt = a->dst_bstart; -+ wr_dir_args.force_btgt = au_wbr(a->dst_dentry, wr_dir_args.force_btgt); -+ err = au_wr_dir(a->dst_dentry, a->src_dentry, &wr_dir_args); -+ a->btgt = err; -+ -+ return err; -+} -+ -+static void au_ren_dt(struct au_ren_args *a) -+{ -+ a->h_path.dentry = a->src_h_parent; -+ au_dtime_store(a->src_dt + AuPARENT, a->src_parent, &a->h_path); -+ if (!au_ftest_ren(a->flags, ISSAMEDIR)) { -+ a->h_path.dentry = a->dst_h_parent; -+ au_dtime_store(a->dst_dt + AuPARENT, a->dst_parent, &a->h_path); -+ } -+ -+ au_fclr_ren(a->flags, DT_DSTDIR); -+ if (!au_ftest_ren(a->flags, ISDIR)) -+ return; -+ -+ a->h_path.dentry = a->src_h_dentry; -+ au_dtime_store(a->src_dt + AuCHILD, a->src_dentry, &a->h_path); -+ if (a->dst_h_dentry->d_inode) { -+ au_fset_ren(a->flags, DT_DSTDIR); -+ a->h_path.dentry = a->dst_h_dentry; -+ au_dtime_store(a->dst_dt + AuCHILD, a->dst_dentry, &a->h_path); -+ } -+} -+ -+static void au_ren_rev_dt(int err, struct au_ren_args *a) -+{ -+ struct dentry *h_d; -+ struct mutex *h_mtx; -+ -+ au_dtime_revert(a->src_dt + AuPARENT); -+ if (!au_ftest_ren(a->flags, ISSAMEDIR)) -+ au_dtime_revert(a->dst_dt + AuPARENT); -+ -+ if (au_ftest_ren(a->flags, ISDIR) && err != -EIO) { -+ h_d = a->src_dt[AuCHILD].dt_h_path.dentry; -+ h_mtx = &h_d->d_inode->i_mutex; -+ mutex_lock_nested(h_mtx, AuLsc_I_CHILD); -+ au_dtime_revert(a->src_dt + AuCHILD); -+ mutex_unlock(h_mtx); -+ -+ if (au_ftest_ren(a->flags, DT_DSTDIR)) { -+ h_d = a->dst_dt[AuCHILD].dt_h_path.dentry; -+ h_mtx = &h_d->d_inode->i_mutex; -+ mutex_lock_nested(h_mtx, AuLsc_I_CHILD); -+ au_dtime_revert(a->dst_dt + AuCHILD); -+ mutex_unlock(h_mtx); -+ } -+ } -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+int aufs_rename(struct inode *_src_dir, struct dentry *_src_dentry, -+ struct inode *_dst_dir, struct dentry *_dst_dentry) -+{ -+ int err, flags; -+ /* reduce stack space */ -+ struct au_ren_args *a; -+ -+ AuDbg("%pd, %pd\n", _src_dentry, _dst_dentry); -+ IMustLock(_src_dir); -+ IMustLock(_dst_dir); -+ -+ err = -ENOMEM; -+ BUILD_BUG_ON(sizeof(*a) > PAGE_SIZE); -+ a = kzalloc(sizeof(*a), GFP_NOFS); -+ if (unlikely(!a)) -+ goto out; -+ -+ a->src_dir = _src_dir; -+ a->src_dentry = _src_dentry; -+ a->src_inode = a->src_dentry->d_inode; -+ a->src_parent = a->src_dentry->d_parent; /* dir inode is locked */ -+ a->dst_dir = _dst_dir; -+ a->dst_dentry = _dst_dentry; -+ a->dst_inode = a->dst_dentry->d_inode; -+ a->dst_parent = a->dst_dentry->d_parent; /* dir inode is locked */ -+ if (a->dst_inode) { -+ IMustLock(a->dst_inode); -+ au_igrab(a->dst_inode); -+ } -+ -+ err = -ENOTDIR; -+ flags = AuLock_FLUSH | AuLock_NOPLM | AuLock_GEN; -+ if (d_is_dir(a->src_dentry)) { -+ au_fset_ren(a->flags, ISDIR); -+ if (unlikely(d_is_positive(a->dst_dentry) -+ && !d_is_dir(a->dst_dentry))) -+ goto out_free; -+ flags |= AuLock_DIRS; -+ } -+ err = aufs_read_and_write_lock2(a->dst_dentry, a->src_dentry, flags); -+ if (unlikely(err)) -+ goto out_free; -+ -+ err = au_d_hashed_positive(a->src_dentry); -+ if (unlikely(err)) -+ goto out_unlock; -+ err = -ENOENT; -+ if (a->dst_inode) { -+ /* -+ * If it is a dir, VFS unhash dst_dentry before this -+ * function. It means we cannot rely upon d_unhashed(). -+ */ -+ if (unlikely(!a->dst_inode->i_nlink)) -+ goto out_unlock; -+ if (!S_ISDIR(a->dst_inode->i_mode)) { -+ err = au_d_hashed_positive(a->dst_dentry); -+ if (unlikely(err)) -+ goto out_unlock; -+ } else if (unlikely(IS_DEADDIR(a->dst_inode))) -+ goto out_unlock; -+ } else if (unlikely(d_unhashed(a->dst_dentry))) -+ goto out_unlock; -+ -+ /* -+ * is it possible? -+ * yes, it happened (in linux-3.3-rcN) but I don't know why. -+ * there may exist a problem somewhere else. -+ */ -+ err = -EINVAL; -+ if (unlikely(a->dst_parent->d_inode == a->src_dentry->d_inode)) -+ goto out_unlock; -+ -+ au_fset_ren(a->flags, ISSAMEDIR); /* temporary */ -+ di_write_lock_parent(a->dst_parent); -+ -+ /* which branch we process */ -+ err = au_ren_wbr(a); -+ if (unlikely(err < 0)) -+ goto out_parent; -+ a->br = au_sbr(a->dst_dentry->d_sb, a->btgt); -+ a->h_path.mnt = au_br_mnt(a->br); -+ -+ /* are they available to be renamed */ -+ err = au_ren_may_dir(a); -+ if (unlikely(err)) -+ goto out_children; -+ -+ /* prepare the writable parent dir on the same branch */ -+ if (a->dst_bstart == a->btgt) { -+ au_fset_ren(a->flags, WHDST); -+ } else { -+ err = au_cpup_dirs(a->dst_dentry, a->btgt); -+ if (unlikely(err)) -+ goto out_children; -+ } -+ -+ if (a->src_dir != a->dst_dir) { -+ /* -+ * this temporary unlock is safe, -+ * because both dir->i_mutex are locked. -+ */ -+ di_write_unlock(a->dst_parent); -+ di_write_lock_parent(a->src_parent); -+ err = au_wr_dir_need_wh(a->src_dentry, -+ au_ftest_ren(a->flags, ISDIR), -+ &a->btgt); -+ di_write_unlock(a->src_parent); -+ di_write_lock2_parent(a->src_parent, a->dst_parent, /*isdir*/1); -+ au_fclr_ren(a->flags, ISSAMEDIR); -+ } else -+ err = au_wr_dir_need_wh(a->src_dentry, -+ au_ftest_ren(a->flags, ISDIR), -+ &a->btgt); -+ if (unlikely(err < 0)) -+ goto out_children; -+ if (err) -+ au_fset_ren(a->flags, WHSRC); -+ -+ /* cpup src */ -+ if (a->src_bstart != a->btgt) { -+ struct au_pin pin; -+ -+ err = au_pin(&pin, a->src_dentry, a->btgt, -+ au_opt_udba(a->src_dentry->d_sb), -+ AuPin_DI_LOCKED | AuPin_MNT_WRITE); -+ if (!err) { -+ struct au_cp_generic cpg = { -+ .dentry = a->src_dentry, -+ .bdst = a->btgt, -+ .bsrc = a->src_bstart, -+ .len = -1, -+ .pin = &pin, -+ .flags = AuCpup_DTIME | AuCpup_HOPEN -+ }; -+ AuDebugOn(au_dbstart(a->src_dentry) != a->src_bstart); -+ err = au_sio_cpup_simple(&cpg); -+ au_unpin(&pin); -+ } -+ if (unlikely(err)) -+ goto out_children; -+ a->src_bstart = a->btgt; -+ a->src_h_dentry = au_h_dptr(a->src_dentry, a->btgt); -+ au_fset_ren(a->flags, WHSRC); -+ } -+ -+ /* lock them all */ -+ err = au_ren_lock(a); -+ if (unlikely(err)) -+ /* leave the copied-up one */ -+ goto out_children; -+ -+ if (!au_opt_test(au_mntflags(a->dst_dir->i_sb), UDBA_NONE)) -+ err = au_may_ren(a); -+ else if (unlikely(a->dst_dentry->d_name.len > AUFS_MAX_NAMELEN)) -+ err = -ENAMETOOLONG; -+ if (unlikely(err)) -+ goto out_hdir; -+ -+ /* store timestamps to be revertible */ -+ au_ren_dt(a); -+ -+ /* here we go */ -+ err = do_rename(a); -+ if (unlikely(err)) -+ goto out_dt; -+ -+ /* update dir attributes */ -+ au_ren_refresh_dir(a); -+ -+ /* dput/iput all lower dentries */ -+ au_ren_refresh(a); -+ -+ goto out_hdir; /* success */ -+ -+out_dt: -+ au_ren_rev_dt(err, a); -+out_hdir: -+ au_ren_unlock(a); -+out_children: -+ au_nhash_wh_free(&a->whlist); -+ if (err && a->dst_inode && a->dst_bstart != a->btgt) { -+ AuDbg("bstart %d, btgt %d\n", a->dst_bstart, a->btgt); -+ au_set_h_dptr(a->dst_dentry, a->btgt, NULL); -+ au_set_dbstart(a->dst_dentry, a->dst_bstart); -+ } -+out_parent: -+ if (!err) -+ d_move(a->src_dentry, a->dst_dentry); -+ else { -+ au_update_dbstart(a->dst_dentry); -+ if (!a->dst_inode) -+ d_drop(a->dst_dentry); -+ } -+ if (au_ftest_ren(a->flags, ISSAMEDIR)) -+ di_write_unlock(a->dst_parent); -+ else -+ di_write_unlock2(a->src_parent, a->dst_parent); -+out_unlock: -+ aufs_read_and_write_unlock2(a->dst_dentry, a->src_dentry); -+out_free: -+ iput(a->dst_inode); -+ if (a->thargs) -+ au_whtmp_rmdir_free(a->thargs); -+ kfree(a); -+out: -+ AuTraceErr(err); -+ return err; -+} -diff --git a/fs/aufs/iinfo.c b/fs/aufs/iinfo.c -new file mode 100644 -index 0000000..f889aba ---- /dev/null -+++ b/fs/aufs/iinfo.c -@@ -0,0 +1,277 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * inode private data -+ */ -+ -+#include "aufs.h" -+ -+struct inode *au_h_iptr(struct inode *inode, aufs_bindex_t bindex) -+{ -+ struct inode *h_inode; -+ -+ IiMustAnyLock(inode); -+ -+ h_inode = au_ii(inode)->ii_hinode[0 + bindex].hi_inode; -+ AuDebugOn(h_inode && atomic_read(&h_inode->i_count) <= 0); -+ return h_inode; -+} -+ -+/* todo: hard/soft set? */ -+void au_hiput(struct au_hinode *hinode) -+{ -+ au_hn_free(hinode); -+ dput(hinode->hi_whdentry); -+ iput(hinode->hi_inode); -+} -+ -+unsigned int au_hi_flags(struct inode *inode, int isdir) -+{ -+ unsigned int flags; -+ const unsigned int mnt_flags = au_mntflags(inode->i_sb); -+ -+ flags = 0; -+ if (au_opt_test(mnt_flags, XINO)) -+ au_fset_hi(flags, XINO); -+ if (isdir && au_opt_test(mnt_flags, UDBA_HNOTIFY)) -+ au_fset_hi(flags, HNOTIFY); -+ return flags; -+} -+ -+void au_set_h_iptr(struct inode *inode, aufs_bindex_t bindex, -+ struct inode *h_inode, unsigned int flags) -+{ -+ struct au_hinode *hinode; -+ struct inode *hi; -+ struct au_iinfo *iinfo = au_ii(inode); -+ -+ IiMustWriteLock(inode); -+ -+ hinode = iinfo->ii_hinode + bindex; -+ hi = hinode->hi_inode; -+ AuDebugOn(h_inode && atomic_read(&h_inode->i_count) <= 0); -+ -+ if (hi) -+ au_hiput(hinode); -+ hinode->hi_inode = h_inode; -+ if (h_inode) { -+ int err; -+ struct super_block *sb = inode->i_sb; -+ struct au_branch *br; -+ -+ AuDebugOn(inode->i_mode -+ && (h_inode->i_mode & S_IFMT) -+ != (inode->i_mode & S_IFMT)); -+ if (bindex == iinfo->ii_bstart) -+ au_cpup_igen(inode, h_inode); -+ br = au_sbr(sb, bindex); -+ hinode->hi_id = br->br_id; -+ if (au_ftest_hi(flags, XINO)) { -+ err = au_xino_write(sb, bindex, h_inode->i_ino, -+ inode->i_ino); -+ if (unlikely(err)) -+ AuIOErr1("failed au_xino_write() %d\n", err); -+ } -+ -+ if (au_ftest_hi(flags, HNOTIFY) -+ && au_br_hnotifyable(br->br_perm)) { -+ err = au_hn_alloc(hinode, inode); -+ if (unlikely(err)) -+ AuIOErr1("au_hn_alloc() %d\n", err); -+ } -+ } -+} -+ -+void au_set_hi_wh(struct inode *inode, aufs_bindex_t bindex, -+ struct dentry *h_wh) -+{ -+ struct au_hinode *hinode; -+ -+ IiMustWriteLock(inode); -+ -+ hinode = au_ii(inode)->ii_hinode + bindex; -+ AuDebugOn(hinode->hi_whdentry); -+ hinode->hi_whdentry = h_wh; -+} -+ -+void au_update_iigen(struct inode *inode, int half) -+{ -+ struct au_iinfo *iinfo; -+ struct au_iigen *iigen; -+ unsigned int sigen; -+ -+ sigen = au_sigen(inode->i_sb); -+ iinfo = au_ii(inode); -+ iigen = &iinfo->ii_generation; -+ spin_lock(&iigen->ig_spin); -+ iigen->ig_generation = sigen; -+ if (half) -+ au_ig_fset(iigen->ig_flags, HALF_REFRESHED); -+ else -+ au_ig_fclr(iigen->ig_flags, HALF_REFRESHED); -+ spin_unlock(&iigen->ig_spin); -+} -+ -+/* it may be called at remount time, too */ -+void au_update_ibrange(struct inode *inode, int do_put_zero) -+{ -+ struct au_iinfo *iinfo; -+ aufs_bindex_t bindex, bend; -+ -+ iinfo = au_ii(inode); -+ if (!iinfo) -+ return; -+ -+ IiMustWriteLock(inode); -+ -+ if (do_put_zero && iinfo->ii_bstart >= 0) { -+ for (bindex = iinfo->ii_bstart; bindex <= iinfo->ii_bend; -+ bindex++) { -+ struct inode *h_i; -+ -+ h_i = iinfo->ii_hinode[0 + bindex].hi_inode; -+ if (h_i -+ && !h_i->i_nlink -+ && !(h_i->i_state & I_LINKABLE)) -+ au_set_h_iptr(inode, bindex, NULL, 0); -+ } -+ } -+ -+ iinfo->ii_bstart = -1; -+ iinfo->ii_bend = -1; -+ bend = au_sbend(inode->i_sb); -+ for (bindex = 0; bindex <= bend; bindex++) -+ if (iinfo->ii_hinode[0 + bindex].hi_inode) { -+ iinfo->ii_bstart = bindex; -+ break; -+ } -+ if (iinfo->ii_bstart >= 0) -+ for (bindex = bend; bindex >= iinfo->ii_bstart; bindex--) -+ if (iinfo->ii_hinode[0 + bindex].hi_inode) { -+ iinfo->ii_bend = bindex; -+ break; -+ } -+ AuDebugOn(iinfo->ii_bstart > iinfo->ii_bend); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+void au_icntnr_init_once(void *_c) -+{ -+ struct au_icntnr *c = _c; -+ struct au_iinfo *iinfo = &c->iinfo; -+ static struct lock_class_key aufs_ii; -+ -+ spin_lock_init(&iinfo->ii_generation.ig_spin); -+ au_rw_init(&iinfo->ii_rwsem); -+ au_rw_class(&iinfo->ii_rwsem, &aufs_ii); -+ inode_init_once(&c->vfs_inode); -+} -+ -+int au_iinfo_init(struct inode *inode) -+{ -+ struct au_iinfo *iinfo; -+ struct super_block *sb; -+ int nbr, i; -+ -+ sb = inode->i_sb; -+ iinfo = &(container_of(inode, struct au_icntnr, vfs_inode)->iinfo); -+ nbr = au_sbend(sb) + 1; -+ if (unlikely(nbr <= 0)) -+ nbr = 1; -+ iinfo->ii_hinode = kcalloc(nbr, sizeof(*iinfo->ii_hinode), GFP_NOFS); -+ if (iinfo->ii_hinode) { -+ au_ninodes_inc(sb); -+ for (i = 0; i < nbr; i++) -+ iinfo->ii_hinode[i].hi_id = -1; -+ -+ iinfo->ii_generation.ig_generation = au_sigen(sb); -+ iinfo->ii_bstart = -1; -+ iinfo->ii_bend = -1; -+ iinfo->ii_vdir = NULL; -+ return 0; -+ } -+ return -ENOMEM; -+} -+ -+int au_ii_realloc(struct au_iinfo *iinfo, int nbr) -+{ -+ int err, sz; -+ struct au_hinode *hip; -+ -+ AuRwMustWriteLock(&iinfo->ii_rwsem); -+ -+ err = -ENOMEM; -+ sz = sizeof(*hip) * (iinfo->ii_bend + 1); -+ if (!sz) -+ sz = sizeof(*hip); -+ hip = au_kzrealloc(iinfo->ii_hinode, sz, sizeof(*hip) * nbr, GFP_NOFS); -+ if (hip) { -+ iinfo->ii_hinode = hip; -+ err = 0; -+ } -+ -+ return err; -+} -+ -+void au_iinfo_fin(struct inode *inode) -+{ -+ struct au_iinfo *iinfo; -+ struct au_hinode *hi; -+ struct super_block *sb; -+ aufs_bindex_t bindex, bend; -+ const unsigned char unlinked = !inode->i_nlink; -+ -+ iinfo = au_ii(inode); -+ /* bad_inode case */ -+ if (!iinfo) -+ return; -+ -+ sb = inode->i_sb; -+ au_ninodes_dec(sb); -+ if (si_pid_test(sb)) -+ au_xino_delete_inode(inode, unlinked); -+ else { -+ /* -+ * it is safe to hide the dependency between sbinfo and -+ * sb->s_umount. -+ */ -+ lockdep_off(); -+ si_noflush_read_lock(sb); -+ au_xino_delete_inode(inode, unlinked); -+ si_read_unlock(sb); -+ lockdep_on(); -+ } -+ -+ if (iinfo->ii_vdir) -+ au_vdir_free(iinfo->ii_vdir); -+ -+ bindex = iinfo->ii_bstart; -+ if (bindex >= 0) { -+ hi = iinfo->ii_hinode + bindex; -+ bend = iinfo->ii_bend; -+ while (bindex++ <= bend) { -+ if (hi->hi_inode) -+ au_hiput(hi); -+ hi++; -+ } -+ } -+ kfree(iinfo->ii_hinode); -+ iinfo->ii_hinode = NULL; -+ AuRwDestroy(&iinfo->ii_rwsem); -+} -diff --git a/fs/aufs/inode.c b/fs/aufs/inode.c -new file mode 100644 -index 0000000..75ec2e5 ---- /dev/null -+++ b/fs/aufs/inode.c -@@ -0,0 +1,522 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * inode functions -+ */ -+ -+#include "aufs.h" -+ -+struct inode *au_igrab(struct inode *inode) -+{ -+ if (inode) { -+ AuDebugOn(!atomic_read(&inode->i_count)); -+ ihold(inode); -+ } -+ return inode; -+} -+ -+static void au_refresh_hinode_attr(struct inode *inode, int do_version) -+{ -+ au_cpup_attr_all(inode, /*force*/0); -+ au_update_iigen(inode, /*half*/1); -+ if (do_version) -+ inode->i_version++; -+} -+ -+static int au_ii_refresh(struct inode *inode, int *update) -+{ -+ int err, e; -+ umode_t type; -+ aufs_bindex_t bindex, new_bindex; -+ struct super_block *sb; -+ struct au_iinfo *iinfo; -+ struct au_hinode *p, *q, tmp; -+ -+ IiMustWriteLock(inode); -+ -+ *update = 0; -+ sb = inode->i_sb; -+ type = inode->i_mode & S_IFMT; -+ iinfo = au_ii(inode); -+ err = au_ii_realloc(iinfo, au_sbend(sb) + 1); -+ if (unlikely(err)) -+ goto out; -+ -+ AuDebugOn(iinfo->ii_bstart < 0); -+ p = iinfo->ii_hinode + iinfo->ii_bstart; -+ for (bindex = iinfo->ii_bstart; bindex <= iinfo->ii_bend; -+ bindex++, p++) { -+ if (!p->hi_inode) -+ continue; -+ -+ AuDebugOn(type != (p->hi_inode->i_mode & S_IFMT)); -+ new_bindex = au_br_index(sb, p->hi_id); -+ if (new_bindex == bindex) -+ continue; -+ -+ if (new_bindex < 0) { -+ *update = 1; -+ au_hiput(p); -+ p->hi_inode = NULL; -+ continue; -+ } -+ -+ if (new_bindex < iinfo->ii_bstart) -+ iinfo->ii_bstart = new_bindex; -+ if (iinfo->ii_bend < new_bindex) -+ iinfo->ii_bend = new_bindex; -+ /* swap two lower inode, and loop again */ -+ q = iinfo->ii_hinode + new_bindex; -+ tmp = *q; -+ *q = *p; -+ *p = tmp; -+ if (tmp.hi_inode) { -+ bindex--; -+ p--; -+ } -+ } -+ au_update_ibrange(inode, /*do_put_zero*/0); -+ e = au_dy_irefresh(inode); -+ if (unlikely(e && !err)) -+ err = e; -+ -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+void au_refresh_iop(struct inode *inode, int force_getattr) -+{ -+ int type; -+ struct au_sbinfo *sbi = au_sbi(inode->i_sb); -+ const struct inode_operations *iop -+ = force_getattr ? aufs_iop : sbi->si_iop_array; -+ -+ if (inode->i_op == iop) -+ return; -+ -+ switch (inode->i_mode & S_IFMT) { -+ case S_IFDIR: -+ type = AuIop_DIR; -+ break; -+ case S_IFLNK: -+ type = AuIop_SYMLINK; -+ break; -+ default: -+ type = AuIop_OTHER; -+ break; -+ } -+ -+ inode->i_op = iop + type; -+ /* unnecessary smp_wmb() */ -+} -+ -+int au_refresh_hinode_self(struct inode *inode) -+{ -+ int err, update; -+ -+ err = au_ii_refresh(inode, &update); -+ if (!err) -+ au_refresh_hinode_attr(inode, update && S_ISDIR(inode->i_mode)); -+ -+ AuTraceErr(err); -+ return err; -+} -+ -+int au_refresh_hinode(struct inode *inode, struct dentry *dentry) -+{ -+ int err, e, update; -+ unsigned int flags; -+ umode_t mode; -+ aufs_bindex_t bindex, bend; -+ unsigned char isdir; -+ struct au_hinode *p; -+ struct au_iinfo *iinfo; -+ -+ err = au_ii_refresh(inode, &update); -+ if (unlikely(err)) -+ goto out; -+ -+ update = 0; -+ iinfo = au_ii(inode); -+ p = iinfo->ii_hinode + iinfo->ii_bstart; -+ mode = (inode->i_mode & S_IFMT); -+ isdir = S_ISDIR(mode); -+ flags = au_hi_flags(inode, isdir); -+ bend = au_dbend(dentry); -+ for (bindex = au_dbstart(dentry); bindex <= bend; bindex++) { -+ struct inode *h_i; -+ struct dentry *h_d; -+ -+ h_d = au_h_dptr(dentry, bindex); -+ if (!h_d || !h_d->d_inode) -+ continue; -+ -+ AuDebugOn(mode != (h_d->d_inode->i_mode & S_IFMT)); -+ if (iinfo->ii_bstart <= bindex && bindex <= iinfo->ii_bend) { -+ h_i = au_h_iptr(inode, bindex); -+ if (h_i) { -+ if (h_i == h_d->d_inode) -+ continue; -+ err = -EIO; -+ break; -+ } -+ } -+ if (bindex < iinfo->ii_bstart) -+ iinfo->ii_bstart = bindex; -+ if (iinfo->ii_bend < bindex) -+ iinfo->ii_bend = bindex; -+ au_set_h_iptr(inode, bindex, au_igrab(h_d->d_inode), flags); -+ update = 1; -+ } -+ au_update_ibrange(inode, /*do_put_zero*/0); -+ e = au_dy_irefresh(inode); -+ if (unlikely(e && !err)) -+ err = e; -+ if (!err) -+ au_refresh_hinode_attr(inode, update && isdir); -+ -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+static int set_inode(struct inode *inode, struct dentry *dentry) -+{ -+ int err; -+ unsigned int flags; -+ umode_t mode; -+ aufs_bindex_t bindex, bstart, btail; -+ unsigned char isdir; -+ struct dentry *h_dentry; -+ struct inode *h_inode; -+ struct au_iinfo *iinfo; -+ struct inode_operations *iop; -+ -+ IiMustWriteLock(inode); -+ -+ err = 0; -+ isdir = 0; -+ iop = au_sbi(inode->i_sb)->si_iop_array; -+ bstart = au_dbstart(dentry); -+ h_inode = au_h_dptr(dentry, bstart)->d_inode; -+ mode = h_inode->i_mode; -+ switch (mode & S_IFMT) { -+ case S_IFREG: -+ btail = au_dbtail(dentry); -+ inode->i_op = iop + AuIop_OTHER; -+ inode->i_fop = &aufs_file_fop; -+ err = au_dy_iaop(inode, bstart, h_inode); -+ if (unlikely(err)) -+ goto out; -+ break; -+ case S_IFDIR: -+ isdir = 1; -+ btail = au_dbtaildir(dentry); -+ inode->i_op = iop + AuIop_DIR; -+ inode->i_fop = &aufs_dir_fop; -+ break; -+ case S_IFLNK: -+ btail = au_dbtail(dentry); -+ inode->i_op = iop + AuIop_SYMLINK; -+ break; -+ case S_IFBLK: -+ case S_IFCHR: -+ case S_IFIFO: -+ case S_IFSOCK: -+ btail = au_dbtail(dentry); -+ inode->i_op = iop + AuIop_OTHER; -+ init_special_inode(inode, mode, h_inode->i_rdev); -+ break; -+ default: -+ AuIOErr("Unknown file type 0%o\n", mode); -+ err = -EIO; -+ goto out; -+ } -+ -+ /* do not set hnotify for whiteouted dirs (SHWH mode) */ -+ flags = au_hi_flags(inode, isdir); -+ if (au_opt_test(au_mntflags(dentry->d_sb), SHWH) -+ && au_ftest_hi(flags, HNOTIFY) -+ && dentry->d_name.len > AUFS_WH_PFX_LEN -+ && !memcmp(dentry->d_name.name, AUFS_WH_PFX, AUFS_WH_PFX_LEN)) -+ au_fclr_hi(flags, HNOTIFY); -+ iinfo = au_ii(inode); -+ iinfo->ii_bstart = bstart; -+ iinfo->ii_bend = btail; -+ for (bindex = bstart; bindex <= btail; bindex++) { -+ h_dentry = au_h_dptr(dentry, bindex); -+ if (h_dentry) -+ au_set_h_iptr(inode, bindex, -+ au_igrab(h_dentry->d_inode), flags); -+ } -+ au_cpup_attr_all(inode, /*force*/1); -+ /* -+ * to force calling aufs_get_acl() every time, -+ * do not call cache_no_acl() for aufs inode. -+ */ -+ -+out: -+ return err; -+} -+ -+/* -+ * successful returns with iinfo write_locked -+ * minus: errno -+ * zero: success, matched -+ * plus: no error, but unmatched -+ */ -+static int reval_inode(struct inode *inode, struct dentry *dentry) -+{ -+ int err; -+ unsigned int gen, igflags; -+ aufs_bindex_t bindex, bend; -+ struct inode *h_inode, *h_dinode; -+ -+ /* -+ * before this function, if aufs got any iinfo lock, it must be only -+ * one, the parent dir. -+ * it can happen by UDBA and the obsoleted inode number. -+ */ -+ err = -EIO; -+ if (unlikely(inode->i_ino == parent_ino(dentry))) -+ goto out; -+ -+ err = 1; -+ ii_write_lock_new_child(inode); -+ h_dinode = au_h_dptr(dentry, au_dbstart(dentry))->d_inode; -+ bend = au_ibend(inode); -+ for (bindex = au_ibstart(inode); bindex <= bend; bindex++) { -+ h_inode = au_h_iptr(inode, bindex); -+ if (!h_inode || h_inode != h_dinode) -+ continue; -+ -+ err = 0; -+ gen = au_iigen(inode, &igflags); -+ if (gen == au_digen(dentry) -+ && !au_ig_ftest(igflags, HALF_REFRESHED)) -+ break; -+ -+ /* fully refresh inode using dentry */ -+ err = au_refresh_hinode(inode, dentry); -+ if (!err) -+ au_update_iigen(inode, /*half*/0); -+ break; -+ } -+ -+ if (unlikely(err)) -+ ii_write_unlock(inode); -+out: -+ return err; -+} -+ -+int au_ino(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino, -+ unsigned int d_type, ino_t *ino) -+{ -+ int err; -+ struct mutex *mtx; -+ -+ /* prevent hardlinked inode number from race condition */ -+ mtx = NULL; -+ if (d_type != DT_DIR) { -+ mtx = &au_sbr(sb, bindex)->br_xino.xi_nondir_mtx; -+ mutex_lock(mtx); -+ } -+ err = au_xino_read(sb, bindex, h_ino, ino); -+ if (unlikely(err)) -+ goto out; -+ -+ if (!*ino) { -+ err = -EIO; -+ *ino = au_xino_new_ino(sb); -+ if (unlikely(!*ino)) -+ goto out; -+ err = au_xino_write(sb, bindex, h_ino, *ino); -+ if (unlikely(err)) -+ goto out; -+ } -+ -+out: -+ if (mtx) -+ mutex_unlock(mtx); -+ return err; -+} -+ -+/* successful returns with iinfo write_locked */ -+/* todo: return with unlocked? */ -+struct inode *au_new_inode(struct dentry *dentry, int must_new) -+{ -+ struct inode *inode; -+ struct dentry *h_dentry; -+ struct super_block *sb; -+ struct mutex *mtx; -+ ino_t h_ino, ino; -+ int err; -+ aufs_bindex_t bstart; -+ -+ sb = dentry->d_sb; -+ bstart = au_dbstart(dentry); -+ h_dentry = au_h_dptr(dentry, bstart); -+ h_ino = h_dentry->d_inode->i_ino; -+ -+ /* -+ * stop 'race'-ing between hardlinks under different -+ * parents. -+ */ -+ mtx = NULL; -+ if (!d_is_dir(h_dentry)) -+ mtx = &au_sbr(sb, bstart)->br_xino.xi_nondir_mtx; -+ -+new_ino: -+ if (mtx) -+ mutex_lock(mtx); -+ err = au_xino_read(sb, bstart, h_ino, &ino); -+ inode = ERR_PTR(err); -+ if (unlikely(err)) -+ goto out; -+ -+ if (!ino) { -+ ino = au_xino_new_ino(sb); -+ if (unlikely(!ino)) { -+ inode = ERR_PTR(-EIO); -+ goto out; -+ } -+ } -+ -+ AuDbg("i%lu\n", (unsigned long)ino); -+ inode = au_iget_locked(sb, ino); -+ err = PTR_ERR(inode); -+ if (IS_ERR(inode)) -+ goto out; -+ -+ AuDbg("%lx, new %d\n", inode->i_state, !!(inode->i_state & I_NEW)); -+ if (inode->i_state & I_NEW) { -+ /* verbose coding for lock class name */ -+ if (unlikely(d_is_symlink(h_dentry))) -+ au_rw_class(&au_ii(inode)->ii_rwsem, -+ au_lc_key + AuLcSymlink_IIINFO); -+ else if (unlikely(d_is_dir(h_dentry))) -+ au_rw_class(&au_ii(inode)->ii_rwsem, -+ au_lc_key + AuLcDir_IIINFO); -+ else /* likely */ -+ au_rw_class(&au_ii(inode)->ii_rwsem, -+ au_lc_key + AuLcNonDir_IIINFO); -+ -+ ii_write_lock_new_child(inode); -+ err = set_inode(inode, dentry); -+ if (!err) { -+ unlock_new_inode(inode); -+ goto out; /* success */ -+ } -+ -+ /* -+ * iget_failed() calls iput(), but we need to call -+ * ii_write_unlock() after iget_failed(). so dirty hack for -+ * i_count. -+ */ -+ atomic_inc(&inode->i_count); -+ iget_failed(inode); -+ ii_write_unlock(inode); -+ au_xino_write(sb, bstart, h_ino, /*ino*/0); -+ /* ignore this error */ -+ goto out_iput; -+ } else if (!must_new && !IS_DEADDIR(inode) && inode->i_nlink) { -+ /* -+ * horrible race condition between lookup, readdir and copyup -+ * (or something). -+ */ -+ if (mtx) -+ mutex_unlock(mtx); -+ err = reval_inode(inode, dentry); -+ if (unlikely(err < 0)) { -+ mtx = NULL; -+ goto out_iput; -+ } -+ -+ if (!err) { -+ mtx = NULL; -+ goto out; /* success */ -+ } else if (mtx) -+ mutex_lock(mtx); -+ } -+ -+ if (unlikely(au_test_fs_unique_ino(h_dentry->d_inode))) -+ AuWarn1("Warning: Un-notified UDBA or repeatedly renamed dir," -+ " b%d, %s, %pd, hi%lu, i%lu.\n", -+ bstart, au_sbtype(h_dentry->d_sb), dentry, -+ (unsigned long)h_ino, (unsigned long)ino); -+ ino = 0; -+ err = au_xino_write(sb, bstart, h_ino, /*ino*/0); -+ if (!err) { -+ iput(inode); -+ if (mtx) -+ mutex_unlock(mtx); -+ goto new_ino; -+ } -+ -+out_iput: -+ iput(inode); -+ inode = ERR_PTR(err); -+out: -+ if (mtx) -+ mutex_unlock(mtx); -+ return inode; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+int au_test_ro(struct super_block *sb, aufs_bindex_t bindex, -+ struct inode *inode) -+{ -+ int err; -+ struct inode *hi; -+ -+ err = au_br_rdonly(au_sbr(sb, bindex)); -+ -+ /* pseudo-link after flushed may happen out of bounds */ -+ if (!err -+ && inode -+ && au_ibstart(inode) <= bindex -+ && bindex <= au_ibend(inode)) { -+ /* -+ * permission check is unnecessary since vfsub routine -+ * will be called later -+ */ -+ hi = au_h_iptr(inode, bindex); -+ if (hi) -+ err = IS_IMMUTABLE(hi) ? -EROFS : 0; -+ } -+ -+ return err; -+} -+ -+int au_test_h_perm(struct inode *h_inode, int mask) -+{ -+ if (uid_eq(current_fsuid(), GLOBAL_ROOT_UID)) -+ return 0; -+ return inode_permission(h_inode, mask); -+} -+ -+int au_test_h_perm_sio(struct inode *h_inode, int mask) -+{ -+ if (au_test_nfs(h_inode->i_sb) -+ && (mask & MAY_WRITE) -+ && S_ISDIR(h_inode->i_mode)) -+ mask |= MAY_READ; /* force permission check */ -+ return au_test_h_perm(h_inode, mask); -+} -diff --git a/fs/aufs/inode.h b/fs/aufs/inode.h -new file mode 100644 -index 0000000..49d53a2 ---- /dev/null -+++ b/fs/aufs/inode.h -@@ -0,0 +1,686 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * inode operations -+ */ -+ -+#ifndef __AUFS_INODE_H__ -+#define __AUFS_INODE_H__ -+ -+#ifdef __KERNEL__ -+ -+#include -+#include "rwsem.h" -+ -+struct vfsmount; -+ -+struct au_hnotify { -+#ifdef CONFIG_AUFS_HNOTIFY -+#ifdef CONFIG_AUFS_HFSNOTIFY -+ /* never use fsnotify_add_vfsmount_mark() */ -+ struct fsnotify_mark hn_mark; -+#endif -+ struct inode *hn_aufs_inode; /* no get/put */ -+#endif -+} ____cacheline_aligned_in_smp; -+ -+struct au_hinode { -+ struct inode *hi_inode; -+ aufs_bindex_t hi_id; -+#ifdef CONFIG_AUFS_HNOTIFY -+ struct au_hnotify *hi_notify; -+#endif -+ -+ /* reference to the copied-up whiteout with get/put */ -+ struct dentry *hi_whdentry; -+}; -+ -+/* ig_flags */ -+#define AuIG_HALF_REFRESHED 1 -+#define au_ig_ftest(flags, name) ((flags) & AuIG_##name) -+#define au_ig_fset(flags, name) \ -+ do { (flags) |= AuIG_##name; } while (0) -+#define au_ig_fclr(flags, name) \ -+ do { (flags) &= ~AuIG_##name; } while (0) -+ -+struct au_iigen { -+ spinlock_t ig_spin; -+ __u32 ig_generation, ig_flags; -+}; -+ -+struct au_vdir; -+struct au_iinfo { -+ struct au_iigen ii_generation; -+ struct super_block *ii_hsb1; /* no get/put */ -+ -+ struct au_rwsem ii_rwsem; -+ aufs_bindex_t ii_bstart, ii_bend; -+ __u32 ii_higen; -+ struct au_hinode *ii_hinode; -+ struct au_vdir *ii_vdir; -+}; -+ -+struct au_icntnr { -+ struct au_iinfo iinfo; -+ struct inode vfs_inode; -+ struct hlist_node plink; -+} ____cacheline_aligned_in_smp; -+ -+/* au_pin flags */ -+#define AuPin_DI_LOCKED 1 -+#define AuPin_MNT_WRITE (1 << 1) -+#define au_ftest_pin(flags, name) ((flags) & AuPin_##name) -+#define au_fset_pin(flags, name) \ -+ do { (flags) |= AuPin_##name; } while (0) -+#define au_fclr_pin(flags, name) \ -+ do { (flags) &= ~AuPin_##name; } while (0) -+ -+struct au_pin { -+ /* input */ -+ struct dentry *dentry; -+ unsigned int udba; -+ unsigned char lsc_di, lsc_hi, flags; -+ aufs_bindex_t bindex; -+ -+ /* output */ -+ struct dentry *parent; -+ struct au_hinode *hdir; -+ struct vfsmount *h_mnt; -+ -+ /* temporary unlock/relock for copyup */ -+ struct dentry *h_dentry, *h_parent; -+ struct au_branch *br; -+ struct task_struct *task; -+}; -+ -+void au_pin_hdir_unlock(struct au_pin *p); -+int au_pin_hdir_lock(struct au_pin *p); -+int au_pin_hdir_relock(struct au_pin *p); -+void au_pin_hdir_set_owner(struct au_pin *p, struct task_struct *task); -+void au_pin_hdir_acquire_nest(struct au_pin *p); -+void au_pin_hdir_release(struct au_pin *p); -+ -+/* ---------------------------------------------------------------------- */ -+ -+static inline struct au_iinfo *au_ii(struct inode *inode) -+{ -+ struct au_iinfo *iinfo; -+ -+ iinfo = &(container_of(inode, struct au_icntnr, vfs_inode)->iinfo); -+ if (iinfo->ii_hinode) -+ return iinfo; -+ return NULL; /* debugging bad_inode case */ -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* inode.c */ -+struct inode *au_igrab(struct inode *inode); -+void au_refresh_iop(struct inode *inode, int force_getattr); -+int au_refresh_hinode_self(struct inode *inode); -+int au_refresh_hinode(struct inode *inode, struct dentry *dentry); -+int au_ino(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino, -+ unsigned int d_type, ino_t *ino); -+struct inode *au_new_inode(struct dentry *dentry, int must_new); -+int au_test_ro(struct super_block *sb, aufs_bindex_t bindex, -+ struct inode *inode); -+int au_test_h_perm(struct inode *h_inode, int mask); -+int au_test_h_perm_sio(struct inode *h_inode, int mask); -+ -+static inline int au_wh_ino(struct super_block *sb, aufs_bindex_t bindex, -+ ino_t h_ino, unsigned int d_type, ino_t *ino) -+{ -+#ifdef CONFIG_AUFS_SHWH -+ return au_ino(sb, bindex, h_ino, d_type, ino); -+#else -+ return 0; -+#endif -+} -+ -+/* i_op.c */ -+enum { -+ AuIop_SYMLINK, -+ AuIop_DIR, -+ AuIop_OTHER, -+ AuIop_Last -+}; -+extern struct inode_operations aufs_iop[AuIop_Last], -+ aufs_iop_nogetattr[AuIop_Last]; -+ -+/* au_wr_dir flags */ -+#define AuWrDir_ADD_ENTRY 1 -+#define AuWrDir_ISDIR (1 << 1) -+#define AuWrDir_TMPFILE (1 << 2) -+#define au_ftest_wrdir(flags, name) ((flags) & AuWrDir_##name) -+#define au_fset_wrdir(flags, name) \ -+ do { (flags) |= AuWrDir_##name; } while (0) -+#define au_fclr_wrdir(flags, name) \ -+ do { (flags) &= ~AuWrDir_##name; } while (0) -+ -+struct au_wr_dir_args { -+ aufs_bindex_t force_btgt; -+ unsigned char flags; -+}; -+int au_wr_dir(struct dentry *dentry, struct dentry *src_dentry, -+ struct au_wr_dir_args *args); -+ -+struct dentry *au_pinned_h_parent(struct au_pin *pin); -+void au_pin_init(struct au_pin *pin, struct dentry *dentry, -+ aufs_bindex_t bindex, int lsc_di, int lsc_hi, -+ unsigned int udba, unsigned char flags); -+int au_pin(struct au_pin *pin, struct dentry *dentry, aufs_bindex_t bindex, -+ unsigned int udba, unsigned char flags) __must_check; -+int au_do_pin(struct au_pin *pin) __must_check; -+void au_unpin(struct au_pin *pin); -+int au_reval_for_attr(struct dentry *dentry, unsigned int sigen); -+ -+#define AuIcpup_DID_CPUP 1 -+#define au_ftest_icpup(flags, name) ((flags) & AuIcpup_##name) -+#define au_fset_icpup(flags, name) \ -+ do { (flags) |= AuIcpup_##name; } while (0) -+#define au_fclr_icpup(flags, name) \ -+ do { (flags) &= ~AuIcpup_##name; } while (0) -+ -+struct au_icpup_args { -+ unsigned char flags; -+ unsigned char pin_flags; -+ aufs_bindex_t btgt; -+ unsigned int udba; -+ struct au_pin pin; -+ struct path h_path; -+ struct inode *h_inode; -+}; -+ -+int au_pin_and_icpup(struct dentry *dentry, struct iattr *ia, -+ struct au_icpup_args *a); -+ -+int au_h_path_getattr(struct dentry *dentry, int force, struct path *h_path); -+ -+/* i_op_add.c */ -+int au_may_add(struct dentry *dentry, aufs_bindex_t bindex, -+ struct dentry *h_parent, int isdir); -+int aufs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, -+ dev_t dev); -+int aufs_symlink(struct inode *dir, struct dentry *dentry, const char *symname); -+int aufs_create(struct inode *dir, struct dentry *dentry, umode_t mode, -+ bool want_excl); -+struct vfsub_aopen_args; -+int au_aopen_or_create(struct inode *dir, struct dentry *dentry, -+ struct vfsub_aopen_args *args); -+int aufs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode); -+int aufs_link(struct dentry *src_dentry, struct inode *dir, -+ struct dentry *dentry); -+int aufs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode); -+ -+/* i_op_del.c */ -+int au_wr_dir_need_wh(struct dentry *dentry, int isdir, aufs_bindex_t *bcpup); -+int au_may_del(struct dentry *dentry, aufs_bindex_t bindex, -+ struct dentry *h_parent, int isdir); -+int aufs_unlink(struct inode *dir, struct dentry *dentry); -+int aufs_rmdir(struct inode *dir, struct dentry *dentry); -+ -+/* i_op_ren.c */ -+int au_wbr(struct dentry *dentry, aufs_bindex_t btgt); -+int aufs_rename(struct inode *src_dir, struct dentry *src_dentry, -+ struct inode *dir, struct dentry *dentry); -+ -+/* iinfo.c */ -+struct inode *au_h_iptr(struct inode *inode, aufs_bindex_t bindex); -+void au_hiput(struct au_hinode *hinode); -+void au_set_hi_wh(struct inode *inode, aufs_bindex_t bindex, -+ struct dentry *h_wh); -+unsigned int au_hi_flags(struct inode *inode, int isdir); -+ -+/* hinode flags */ -+#define AuHi_XINO 1 -+#define AuHi_HNOTIFY (1 << 1) -+#define au_ftest_hi(flags, name) ((flags) & AuHi_##name) -+#define au_fset_hi(flags, name) \ -+ do { (flags) |= AuHi_##name; } while (0) -+#define au_fclr_hi(flags, name) \ -+ do { (flags) &= ~AuHi_##name; } while (0) -+ -+#ifndef CONFIG_AUFS_HNOTIFY -+#undef AuHi_HNOTIFY -+#define AuHi_HNOTIFY 0 -+#endif -+ -+void au_set_h_iptr(struct inode *inode, aufs_bindex_t bindex, -+ struct inode *h_inode, unsigned int flags); -+ -+void au_update_iigen(struct inode *inode, int half); -+void au_update_ibrange(struct inode *inode, int do_put_zero); -+ -+void au_icntnr_init_once(void *_c); -+int au_iinfo_init(struct inode *inode); -+void au_iinfo_fin(struct inode *inode); -+int au_ii_realloc(struct au_iinfo *iinfo, int nbr); -+ -+#ifdef CONFIG_PROC_FS -+/* plink.c */ -+int au_plink_maint(struct super_block *sb, int flags); -+struct au_sbinfo; -+void au_plink_maint_leave(struct au_sbinfo *sbinfo); -+int au_plink_maint_enter(struct super_block *sb); -+#ifdef CONFIG_AUFS_DEBUG -+void au_plink_list(struct super_block *sb); -+#else -+AuStubVoid(au_plink_list, struct super_block *sb) -+#endif -+int au_plink_test(struct inode *inode); -+struct dentry *au_plink_lkup(struct inode *inode, aufs_bindex_t bindex); -+void au_plink_append(struct inode *inode, aufs_bindex_t bindex, -+ struct dentry *h_dentry); -+void au_plink_put(struct super_block *sb, int verbose); -+void au_plink_clean(struct super_block *sb, int verbose); -+void au_plink_half_refresh(struct super_block *sb, aufs_bindex_t br_id); -+#else -+AuStubInt0(au_plink_maint, struct super_block *sb, int flags); -+AuStubVoid(au_plink_maint_leave, struct au_sbinfo *sbinfo); -+AuStubInt0(au_plink_maint_enter, struct super_block *sb); -+AuStubVoid(au_plink_list, struct super_block *sb); -+AuStubInt0(au_plink_test, struct inode *inode); -+AuStub(struct dentry *, au_plink_lkup, return NULL, -+ struct inode *inode, aufs_bindex_t bindex); -+AuStubVoid(au_plink_append, struct inode *inode, aufs_bindex_t bindex, -+ struct dentry *h_dentry); -+AuStubVoid(au_plink_put, struct super_block *sb, int verbose); -+AuStubVoid(au_plink_clean, struct super_block *sb, int verbose); -+AuStubVoid(au_plink_half_refresh, struct super_block *sb, aufs_bindex_t br_id); -+#endif /* CONFIG_PROC_FS */ -+ -+#ifdef CONFIG_AUFS_XATTR -+/* xattr.c */ -+int au_cpup_xattr(struct dentry *h_dst, struct dentry *h_src, int ignore_flags, -+ unsigned int verbose); -+ssize_t aufs_listxattr(struct dentry *dentry, char *list, size_t size); -+ssize_t aufs_getxattr(struct dentry *dentry, const char *name, void *value, -+ size_t size); -+int aufs_setxattr(struct dentry *dentry, const char *name, const void *value, -+ size_t size, int flags); -+int aufs_removexattr(struct dentry *dentry, const char *name); -+ -+/* void au_xattr_init(struct super_block *sb); */ -+#else -+AuStubInt0(au_cpup_xattr, struct dentry *h_dst, struct dentry *h_src, -+ int ignore_flags, unsigned int verbose); -+/* AuStubVoid(au_xattr_init, struct super_block *sb); */ -+#endif -+ -+#ifdef CONFIG_FS_POSIX_ACL -+struct posix_acl *aufs_get_acl(struct inode *inode, int type); -+int aufs_set_acl(struct inode *inode, struct posix_acl *acl, int type); -+#endif -+ -+#if IS_ENABLED(CONFIG_AUFS_XATTR) || IS_ENABLED(CONFIG_FS_POSIX_ACL) -+enum { -+ AU_XATTR_SET, -+ AU_XATTR_REMOVE, -+ AU_ACL_SET -+}; -+ -+struct au_srxattr { -+ int type; -+ union { -+ struct { -+ const char *name; -+ const void *value; -+ size_t size; -+ int flags; -+ } set; -+ struct { -+ const char *name; -+ } remove; -+ struct { -+ struct posix_acl *acl; -+ int type; -+ } acl_set; -+ } u; -+}; -+ssize_t au_srxattr(struct dentry *dentry, struct au_srxattr *arg); -+#endif -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* lock subclass for iinfo */ -+enum { -+ AuLsc_II_CHILD, /* child first */ -+ AuLsc_II_CHILD2, /* rename(2), link(2), and cpup at hnotify */ -+ AuLsc_II_CHILD3, /* copyup dirs */ -+ AuLsc_II_PARENT, /* see AuLsc_I_PARENT in vfsub.h */ -+ AuLsc_II_PARENT2, -+ AuLsc_II_PARENT3, /* copyup dirs */ -+ AuLsc_II_NEW_CHILD -+}; -+ -+/* -+ * ii_read_lock_child, ii_write_lock_child, -+ * ii_read_lock_child2, ii_write_lock_child2, -+ * ii_read_lock_child3, ii_write_lock_child3, -+ * ii_read_lock_parent, ii_write_lock_parent, -+ * ii_read_lock_parent2, ii_write_lock_parent2, -+ * ii_read_lock_parent3, ii_write_lock_parent3, -+ * ii_read_lock_new_child, ii_write_lock_new_child, -+ */ -+#define AuReadLockFunc(name, lsc) \ -+static inline void ii_read_lock_##name(struct inode *i) \ -+{ \ -+ au_rw_read_lock_nested(&au_ii(i)->ii_rwsem, AuLsc_II_##lsc); \ -+} -+ -+#define AuWriteLockFunc(name, lsc) \ -+static inline void ii_write_lock_##name(struct inode *i) \ -+{ \ -+ au_rw_write_lock_nested(&au_ii(i)->ii_rwsem, AuLsc_II_##lsc); \ -+} -+ -+#define AuRWLockFuncs(name, lsc) \ -+ AuReadLockFunc(name, lsc) \ -+ AuWriteLockFunc(name, lsc) -+ -+AuRWLockFuncs(child, CHILD); -+AuRWLockFuncs(child2, CHILD2); -+AuRWLockFuncs(child3, CHILD3); -+AuRWLockFuncs(parent, PARENT); -+AuRWLockFuncs(parent2, PARENT2); -+AuRWLockFuncs(parent3, PARENT3); -+AuRWLockFuncs(new_child, NEW_CHILD); -+ -+#undef AuReadLockFunc -+#undef AuWriteLockFunc -+#undef AuRWLockFuncs -+ -+/* -+ * ii_read_unlock, ii_write_unlock, ii_downgrade_lock -+ */ -+AuSimpleUnlockRwsemFuncs(ii, struct inode *i, &au_ii(i)->ii_rwsem); -+ -+#define IiMustNoWaiters(i) AuRwMustNoWaiters(&au_ii(i)->ii_rwsem) -+#define IiMustAnyLock(i) AuRwMustAnyLock(&au_ii(i)->ii_rwsem) -+#define IiMustWriteLock(i) AuRwMustWriteLock(&au_ii(i)->ii_rwsem) -+ -+/* ---------------------------------------------------------------------- */ -+ -+static inline void au_icntnr_init(struct au_icntnr *c) -+{ -+#ifdef CONFIG_AUFS_DEBUG -+ c->vfs_inode.i_mode = 0; -+#endif -+} -+ -+static inline unsigned int au_iigen(struct inode *inode, unsigned int *igflags) -+{ -+ unsigned int gen; -+ struct au_iinfo *iinfo; -+ struct au_iigen *iigen; -+ -+ iinfo = au_ii(inode); -+ iigen = &iinfo->ii_generation; -+ spin_lock(&iigen->ig_spin); -+ if (igflags) -+ *igflags = iigen->ig_flags; -+ gen = iigen->ig_generation; -+ spin_unlock(&iigen->ig_spin); -+ -+ return gen; -+} -+ -+/* tiny test for inode number */ -+/* tmpfs generation is too rough */ -+static inline int au_test_higen(struct inode *inode, struct inode *h_inode) -+{ -+ struct au_iinfo *iinfo; -+ -+ iinfo = au_ii(inode); -+ AuRwMustAnyLock(&iinfo->ii_rwsem); -+ return !(iinfo->ii_hsb1 == h_inode->i_sb -+ && iinfo->ii_higen == h_inode->i_generation); -+} -+ -+static inline void au_iigen_dec(struct inode *inode) -+{ -+ struct au_iinfo *iinfo; -+ struct au_iigen *iigen; -+ -+ iinfo = au_ii(inode); -+ iigen = &iinfo->ii_generation; -+ spin_lock(&iigen->ig_spin); -+ iigen->ig_generation--; -+ spin_unlock(&iigen->ig_spin); -+} -+ -+static inline int au_iigen_test(struct inode *inode, unsigned int sigen) -+{ -+ int err; -+ -+ err = 0; -+ if (unlikely(inode && au_iigen(inode, NULL) != sigen)) -+ err = -EIO; -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static inline aufs_bindex_t au_ii_br_id(struct inode *inode, -+ aufs_bindex_t bindex) -+{ -+ IiMustAnyLock(inode); -+ return au_ii(inode)->ii_hinode[0 + bindex].hi_id; -+} -+ -+static inline aufs_bindex_t au_ibstart(struct inode *inode) -+{ -+ IiMustAnyLock(inode); -+ return au_ii(inode)->ii_bstart; -+} -+ -+static inline aufs_bindex_t au_ibend(struct inode *inode) -+{ -+ IiMustAnyLock(inode); -+ return au_ii(inode)->ii_bend; -+} -+ -+static inline struct au_vdir *au_ivdir(struct inode *inode) -+{ -+ IiMustAnyLock(inode); -+ return au_ii(inode)->ii_vdir; -+} -+ -+static inline struct dentry *au_hi_wh(struct inode *inode, aufs_bindex_t bindex) -+{ -+ IiMustAnyLock(inode); -+ return au_ii(inode)->ii_hinode[0 + bindex].hi_whdentry; -+} -+ -+static inline void au_set_ibstart(struct inode *inode, aufs_bindex_t bindex) -+{ -+ IiMustWriteLock(inode); -+ au_ii(inode)->ii_bstart = bindex; -+} -+ -+static inline void au_set_ibend(struct inode *inode, aufs_bindex_t bindex) -+{ -+ IiMustWriteLock(inode); -+ au_ii(inode)->ii_bend = bindex; -+} -+ -+static inline void au_set_ivdir(struct inode *inode, struct au_vdir *vdir) -+{ -+ IiMustWriteLock(inode); -+ au_ii(inode)->ii_vdir = vdir; -+} -+ -+static inline struct au_hinode *au_hi(struct inode *inode, aufs_bindex_t bindex) -+{ -+ IiMustAnyLock(inode); -+ return au_ii(inode)->ii_hinode + bindex; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static inline struct dentry *au_pinned_parent(struct au_pin *pin) -+{ -+ if (pin) -+ return pin->parent; -+ return NULL; -+} -+ -+static inline struct inode *au_pinned_h_dir(struct au_pin *pin) -+{ -+ if (pin && pin->hdir) -+ return pin->hdir->hi_inode; -+ return NULL; -+} -+ -+static inline struct au_hinode *au_pinned_hdir(struct au_pin *pin) -+{ -+ if (pin) -+ return pin->hdir; -+ return NULL; -+} -+ -+static inline void au_pin_set_dentry(struct au_pin *pin, struct dentry *dentry) -+{ -+ if (pin) -+ pin->dentry = dentry; -+} -+ -+static inline void au_pin_set_parent_lflag(struct au_pin *pin, -+ unsigned char lflag) -+{ -+ if (pin) { -+ if (lflag) -+ au_fset_pin(pin->flags, DI_LOCKED); -+ else -+ au_fclr_pin(pin->flags, DI_LOCKED); -+ } -+} -+ -+#if 0 /* reserved */ -+static inline void au_pin_set_parent(struct au_pin *pin, struct dentry *parent) -+{ -+ if (pin) { -+ dput(pin->parent); -+ pin->parent = dget(parent); -+ } -+} -+#endif -+ -+/* ---------------------------------------------------------------------- */ -+ -+struct au_branch; -+#ifdef CONFIG_AUFS_HNOTIFY -+struct au_hnotify_op { -+ void (*ctl)(struct au_hinode *hinode, int do_set); -+ int (*alloc)(struct au_hinode *hinode); -+ -+ /* -+ * if it returns true, the the caller should free hinode->hi_notify, -+ * otherwise ->free() frees it. -+ */ -+ int (*free)(struct au_hinode *hinode, -+ struct au_hnotify *hn) __must_check; -+ -+ void (*fin)(void); -+ int (*init)(void); -+ -+ int (*reset_br)(unsigned int udba, struct au_branch *br, int perm); -+ void (*fin_br)(struct au_branch *br); -+ int (*init_br)(struct au_branch *br, int perm); -+}; -+ -+/* hnotify.c */ -+int au_hn_alloc(struct au_hinode *hinode, struct inode *inode); -+void au_hn_free(struct au_hinode *hinode); -+void au_hn_ctl(struct au_hinode *hinode, int do_set); -+void au_hn_reset(struct inode *inode, unsigned int flags); -+int au_hnotify(struct inode *h_dir, struct au_hnotify *hnotify, u32 mask, -+ struct qstr *h_child_qstr, struct inode *h_child_inode); -+int au_hnotify_reset_br(unsigned int udba, struct au_branch *br, int perm); -+int au_hnotify_init_br(struct au_branch *br, int perm); -+void au_hnotify_fin_br(struct au_branch *br); -+int __init au_hnotify_init(void); -+void au_hnotify_fin(void); -+ -+/* hfsnotify.c */ -+extern const struct au_hnotify_op au_hnotify_op; -+ -+static inline -+void au_hn_init(struct au_hinode *hinode) -+{ -+ hinode->hi_notify = NULL; -+} -+ -+static inline struct au_hnotify *au_hn(struct au_hinode *hinode) -+{ -+ return hinode->hi_notify; -+} -+ -+#else -+AuStub(int, au_hn_alloc, return -EOPNOTSUPP, -+ struct au_hinode *hinode __maybe_unused, -+ struct inode *inode __maybe_unused) -+AuStub(struct au_hnotify *, au_hn, return NULL, struct au_hinode *hinode) -+AuStubVoid(au_hn_free, struct au_hinode *hinode __maybe_unused) -+AuStubVoid(au_hn_ctl, struct au_hinode *hinode __maybe_unused, -+ int do_set __maybe_unused) -+AuStubVoid(au_hn_reset, struct inode *inode __maybe_unused, -+ unsigned int flags __maybe_unused) -+AuStubInt0(au_hnotify_reset_br, unsigned int udba __maybe_unused, -+ struct au_branch *br __maybe_unused, -+ int perm __maybe_unused) -+AuStubInt0(au_hnotify_init_br, struct au_branch *br __maybe_unused, -+ int perm __maybe_unused) -+AuStubVoid(au_hnotify_fin_br, struct au_branch *br __maybe_unused) -+AuStubInt0(__init au_hnotify_init, void) -+AuStubVoid(au_hnotify_fin, void) -+AuStubVoid(au_hn_init, struct au_hinode *hinode __maybe_unused) -+#endif /* CONFIG_AUFS_HNOTIFY */ -+ -+static inline void au_hn_suspend(struct au_hinode *hdir) -+{ -+ au_hn_ctl(hdir, /*do_set*/0); -+} -+ -+static inline void au_hn_resume(struct au_hinode *hdir) -+{ -+ au_hn_ctl(hdir, /*do_set*/1); -+} -+ -+static inline void au_hn_imtx_lock(struct au_hinode *hdir) -+{ -+ mutex_lock(&hdir->hi_inode->i_mutex); -+ au_hn_suspend(hdir); -+} -+ -+static inline void au_hn_imtx_lock_nested(struct au_hinode *hdir, -+ unsigned int sc __maybe_unused) -+{ -+ mutex_lock_nested(&hdir->hi_inode->i_mutex, sc); -+ au_hn_suspend(hdir); -+} -+ -+static inline void au_hn_imtx_unlock(struct au_hinode *hdir) -+{ -+ au_hn_resume(hdir); -+ mutex_unlock(&hdir->hi_inode->i_mutex); -+} -+ -+#endif /* __KERNEL__ */ -+#endif /* __AUFS_INODE_H__ */ -diff --git a/fs/aufs/ioctl.c b/fs/aufs/ioctl.c -new file mode 100644 -index 0000000..10e2315 ---- /dev/null -+++ b/fs/aufs/ioctl.c -@@ -0,0 +1,219 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * ioctl -+ * plink-management and readdir in userspace. -+ * assist the pathconf(3) wrapper library. -+ * move-down -+ * File-based Hierarchical Storage Management. -+ */ -+ -+#include -+#include -+#include "aufs.h" -+ -+static int au_wbr_fd(struct path *path, struct aufs_wbr_fd __user *arg) -+{ -+ int err, fd; -+ aufs_bindex_t wbi, bindex, bend; -+ struct file *h_file; -+ struct super_block *sb; -+ struct dentry *root; -+ struct au_branch *br; -+ struct aufs_wbr_fd wbrfd = { -+ .oflags = au_dir_roflags, -+ .brid = -1 -+ }; -+ const int valid = O_RDONLY | O_NONBLOCK | O_LARGEFILE | O_DIRECTORY -+ | O_NOATIME | O_CLOEXEC; -+ -+ AuDebugOn(wbrfd.oflags & ~valid); -+ -+ if (arg) { -+ err = copy_from_user(&wbrfd, arg, sizeof(wbrfd)); -+ if (unlikely(err)) { -+ err = -EFAULT; -+ goto out; -+ } -+ -+ err = -EINVAL; -+ AuDbg("wbrfd{0%o, %d}\n", wbrfd.oflags, wbrfd.brid); -+ wbrfd.oflags |= au_dir_roflags; -+ AuDbg("0%o\n", wbrfd.oflags); -+ if (unlikely(wbrfd.oflags & ~valid)) -+ goto out; -+ } -+ -+ fd = get_unused_fd(); -+ err = fd; -+ if (unlikely(fd < 0)) -+ goto out; -+ -+ h_file = ERR_PTR(-EINVAL); -+ wbi = 0; -+ br = NULL; -+ sb = path->dentry->d_sb; -+ root = sb->s_root; -+ aufs_read_lock(root, AuLock_IR); -+ bend = au_sbend(sb); -+ if (wbrfd.brid >= 0) { -+ wbi = au_br_index(sb, wbrfd.brid); -+ if (unlikely(wbi < 0 || wbi > bend)) -+ goto out_unlock; -+ } -+ -+ h_file = ERR_PTR(-ENOENT); -+ br = au_sbr(sb, wbi); -+ if (!au_br_writable(br->br_perm)) { -+ if (arg) -+ goto out_unlock; -+ -+ bindex = wbi + 1; -+ wbi = -1; -+ for (; bindex <= bend; bindex++) { -+ br = au_sbr(sb, bindex); -+ if (au_br_writable(br->br_perm)) { -+ wbi = bindex; -+ br = au_sbr(sb, wbi); -+ break; -+ } -+ } -+ } -+ AuDbg("wbi %d\n", wbi); -+ if (wbi >= 0) -+ h_file = au_h_open(root, wbi, wbrfd.oflags, NULL, -+ /*force_wr*/0); -+ -+out_unlock: -+ aufs_read_unlock(root, AuLock_IR); -+ err = PTR_ERR(h_file); -+ if (IS_ERR(h_file)) -+ goto out_fd; -+ -+ atomic_dec(&br->br_count); /* cf. au_h_open() */ -+ fd_install(fd, h_file); -+ err = fd; -+ goto out; /* success */ -+ -+out_fd: -+ put_unused_fd(fd); -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+long aufs_ioctl_dir(struct file *file, unsigned int cmd, unsigned long arg) -+{ -+ long err; -+ struct dentry *dentry; -+ -+ switch (cmd) { -+ case AUFS_CTL_RDU: -+ case AUFS_CTL_RDU_INO: -+ err = au_rdu_ioctl(file, cmd, arg); -+ break; -+ -+ case AUFS_CTL_WBR_FD: -+ err = au_wbr_fd(&file->f_path, (void __user *)arg); -+ break; -+ -+ case AUFS_CTL_IBUSY: -+ err = au_ibusy_ioctl(file, arg); -+ break; -+ -+ case AUFS_CTL_BRINFO: -+ err = au_brinfo_ioctl(file, arg); -+ break; -+ -+ case AUFS_CTL_FHSM_FD: -+ dentry = file->f_dentry; -+ if (IS_ROOT(dentry)) -+ err = au_fhsm_fd(dentry->d_sb, arg); -+ else -+ err = -ENOTTY; -+ break; -+ -+ default: -+ /* do not call the lower */ -+ AuDbg("0x%x\n", cmd); -+ err = -ENOTTY; -+ } -+ -+ AuTraceErr(err); -+ return err; -+} -+ -+long aufs_ioctl_nondir(struct file *file, unsigned int cmd, unsigned long arg) -+{ -+ long err; -+ -+ switch (cmd) { -+ case AUFS_CTL_MVDOWN: -+ err = au_mvdown(file->f_dentry, (void __user *)arg); -+ break; -+ -+ case AUFS_CTL_WBR_FD: -+ err = au_wbr_fd(&file->f_path, (void __user *)arg); -+ break; -+ -+ default: -+ /* do not call the lower */ -+ AuDbg("0x%x\n", cmd); -+ err = -ENOTTY; -+ } -+ -+ AuTraceErr(err); -+ return err; -+} -+ -+#ifdef CONFIG_COMPAT -+long aufs_compat_ioctl_dir(struct file *file, unsigned int cmd, -+ unsigned long arg) -+{ -+ long err; -+ -+ switch (cmd) { -+ case AUFS_CTL_RDU: -+ case AUFS_CTL_RDU_INO: -+ err = au_rdu_compat_ioctl(file, cmd, arg); -+ break; -+ -+ case AUFS_CTL_IBUSY: -+ err = au_ibusy_compat_ioctl(file, arg); -+ break; -+ -+ case AUFS_CTL_BRINFO: -+ err = au_brinfo_compat_ioctl(file, arg); -+ break; -+ -+ default: -+ err = aufs_ioctl_dir(file, cmd, arg); -+ } -+ -+ AuTraceErr(err); -+ return err; -+} -+ -+long aufs_compat_ioctl_nondir(struct file *file, unsigned int cmd, -+ unsigned long arg) -+{ -+ return aufs_ioctl_nondir(file, cmd, (unsigned long)compat_ptr(arg)); -+} -+#endif -diff --git a/fs/aufs/loop.c b/fs/aufs/loop.c -new file mode 100644 -index 0000000..1eaf59f ---- /dev/null -+++ b/fs/aufs/loop.c -@@ -0,0 +1,146 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * support for loopback block device as a branch -+ */ -+ -+#include "aufs.h" -+ -+/* added into drivers/block/loop.c */ -+static struct file *(*backing_file_func)(struct super_block *sb); -+ -+/* -+ * test if two lower dentries have overlapping branches. -+ */ -+int au_test_loopback_overlap(struct super_block *sb, struct dentry *h_adding) -+{ -+ struct super_block *h_sb; -+ struct file *backing_file; -+ -+ if (unlikely(!backing_file_func)) { -+ /* don't load "loop" module here */ -+ backing_file_func = symbol_get(loop_backing_file); -+ if (unlikely(!backing_file_func)) -+ /* "loop" module is not loaded */ -+ return 0; -+ } -+ -+ h_sb = h_adding->d_sb; -+ backing_file = backing_file_func(h_sb); -+ if (!backing_file) -+ return 0; -+ -+ h_adding = backing_file->f_dentry; -+ /* -+ * h_adding can be local NFS. -+ * in this case aufs cannot detect the loop. -+ */ -+ if (unlikely(h_adding->d_sb == sb)) -+ return 1; -+ return !!au_test_subdir(h_adding, sb->s_root); -+} -+ -+/* true if a kernel thread named 'loop[0-9].*' accesses a file */ -+int au_test_loopback_kthread(void) -+{ -+ int ret; -+ struct task_struct *tsk = current; -+ char c, comm[sizeof(tsk->comm)]; -+ -+ ret = 0; -+ if (tsk->flags & PF_KTHREAD) { -+ get_task_comm(comm, tsk); -+ c = comm[4]; -+ ret = ('0' <= c && c <= '9' -+ && !strncmp(comm, "loop", 4)); -+ } -+ -+ return ret; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+#define au_warn_loopback_step 16 -+static int au_warn_loopback_nelem = au_warn_loopback_step; -+static unsigned long *au_warn_loopback_array; -+ -+void au_warn_loopback(struct super_block *h_sb) -+{ -+ int i, new_nelem; -+ unsigned long *a, magic; -+ static DEFINE_SPINLOCK(spin); -+ -+ magic = h_sb->s_magic; -+ spin_lock(&spin); -+ a = au_warn_loopback_array; -+ for (i = 0; i < au_warn_loopback_nelem && *a; i++) -+ if (a[i] == magic) { -+ spin_unlock(&spin); -+ return; -+ } -+ -+ /* h_sb is new to us, print it */ -+ if (i < au_warn_loopback_nelem) { -+ a[i] = magic; -+ goto pr; -+ } -+ -+ /* expand the array */ -+ new_nelem = au_warn_loopback_nelem + au_warn_loopback_step; -+ a = au_kzrealloc(au_warn_loopback_array, -+ au_warn_loopback_nelem * sizeof(unsigned long), -+ new_nelem * sizeof(unsigned long), GFP_ATOMIC); -+ if (a) { -+ au_warn_loopback_nelem = new_nelem; -+ au_warn_loopback_array = a; -+ a[i] = magic; -+ goto pr; -+ } -+ -+ spin_unlock(&spin); -+ AuWarn1("realloc failed, ignored\n"); -+ return; -+ -+pr: -+ spin_unlock(&spin); -+ pr_warn("you may want to try another patch for loopback file " -+ "on %s(0x%lx) branch\n", au_sbtype(h_sb), magic); -+} -+ -+int au_loopback_init(void) -+{ -+ int err; -+ struct super_block *sb __maybe_unused; -+ -+ BUILD_BUG_ON(sizeof(sb->s_magic) != sizeof(unsigned long)); -+ -+ err = 0; -+ au_warn_loopback_array = kcalloc(au_warn_loopback_step, -+ sizeof(unsigned long), GFP_NOFS); -+ if (unlikely(!au_warn_loopback_array)) -+ err = -ENOMEM; -+ -+ return err; -+} -+ -+void au_loopback_fin(void) -+{ -+ if (backing_file_func) -+ symbol_put(loop_backing_file); -+ kfree(au_warn_loopback_array); -+} -diff --git a/fs/aufs/loop.h b/fs/aufs/loop.h -new file mode 100644 -index 0000000..35f7446 ---- /dev/null -+++ b/fs/aufs/loop.h -@@ -0,0 +1,52 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * support for loopback mount as a branch -+ */ -+ -+#ifndef __AUFS_LOOP_H__ -+#define __AUFS_LOOP_H__ -+ -+#ifdef __KERNEL__ -+ -+struct dentry; -+struct super_block; -+ -+#ifdef CONFIG_AUFS_BDEV_LOOP -+/* drivers/block/loop.c */ -+struct file *loop_backing_file(struct super_block *sb); -+ -+/* loop.c */ -+int au_test_loopback_overlap(struct super_block *sb, struct dentry *h_adding); -+int au_test_loopback_kthread(void); -+void au_warn_loopback(struct super_block *h_sb); -+ -+int au_loopback_init(void); -+void au_loopback_fin(void); -+#else -+AuStubInt0(au_test_loopback_overlap, struct super_block *sb, -+ struct dentry *h_adding) -+AuStubInt0(au_test_loopback_kthread, void) -+AuStubVoid(au_warn_loopback, struct super_block *h_sb) -+ -+AuStubInt0(au_loopback_init, void) -+AuStubVoid(au_loopback_fin, void) -+#endif /* BLK_DEV_LOOP */ -+ -+#endif /* __KERNEL__ */ -+#endif /* __AUFS_LOOP_H__ */ -diff --git a/fs/aufs/magic.mk b/fs/aufs/magic.mk -new file mode 100644 -index 0000000..4f83bdf ---- /dev/null -+++ b/fs/aufs/magic.mk -@@ -0,0 +1,30 @@ -+ -+# defined in ${srctree}/fs/fuse/inode.c -+# tristate -+ifdef CONFIG_FUSE_FS -+ccflags-y += -DFUSE_SUPER_MAGIC=0x65735546 -+endif -+ -+# defined in ${srctree}/fs/xfs/xfs_sb.h -+# tristate -+ifdef CONFIG_XFS_FS -+ccflags-y += -DXFS_SB_MAGIC=0x58465342 -+endif -+ -+# defined in ${srctree}/fs/configfs/mount.c -+# tristate -+ifdef CONFIG_CONFIGFS_FS -+ccflags-y += -DCONFIGFS_MAGIC=0x62656570 -+endif -+ -+# defined in ${srctree}/fs/ubifs/ubifs.h -+# tristate -+ifdef CONFIG_UBIFS_FS -+ccflags-y += -DUBIFS_SUPER_MAGIC=0x24051905 -+endif -+ -+# defined in ${srctree}/fs/hfsplus/hfsplus_raw.h -+# tristate -+ifdef CONFIG_HFSPLUS_FS -+ccflags-y += -DHFSPLUS_SUPER_MAGIC=0x482b -+endif -diff --git a/fs/aufs/module.c b/fs/aufs/module.c -new file mode 100644 -index 0000000..e4e04aa ---- /dev/null -+++ b/fs/aufs/module.c -@@ -0,0 +1,222 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * module global variables and operations -+ */ -+ -+#include -+#include -+#include "aufs.h" -+ -+void *au_kzrealloc(void *p, unsigned int nused, unsigned int new_sz, gfp_t gfp) -+{ -+ if (new_sz <= nused) -+ return p; -+ -+ p = krealloc(p, new_sz, gfp); -+ if (p) -+ memset(p + nused, 0, new_sz - nused); -+ return p; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * aufs caches -+ */ -+struct kmem_cache *au_cachep[AuCache_Last]; -+static int __init au_cache_init(void) -+{ -+ au_cachep[AuCache_DINFO] = AuCacheCtor(au_dinfo, au_di_init_once); -+ if (au_cachep[AuCache_DINFO]) -+ /* SLAB_DESTROY_BY_RCU */ -+ au_cachep[AuCache_ICNTNR] = AuCacheCtor(au_icntnr, -+ au_icntnr_init_once); -+ if (au_cachep[AuCache_ICNTNR]) -+ au_cachep[AuCache_FINFO] = AuCacheCtor(au_finfo, -+ au_fi_init_once); -+ if (au_cachep[AuCache_FINFO]) -+ au_cachep[AuCache_VDIR] = AuCache(au_vdir); -+ if (au_cachep[AuCache_VDIR]) -+ au_cachep[AuCache_DEHSTR] = AuCache(au_vdir_dehstr); -+ if (au_cachep[AuCache_DEHSTR]) -+ return 0; -+ -+ return -ENOMEM; -+} -+ -+static void au_cache_fin(void) -+{ -+ int i; -+ -+ /* -+ * Make sure all delayed rcu free inodes are flushed before we -+ * destroy cache. -+ */ -+ rcu_barrier(); -+ -+ /* excluding AuCache_HNOTIFY */ -+ BUILD_BUG_ON(AuCache_HNOTIFY + 1 != AuCache_Last); -+ for (i = 0; i < AuCache_HNOTIFY; i++) -+ if (au_cachep[i]) { -+ kmem_cache_destroy(au_cachep[i]); -+ au_cachep[i] = NULL; -+ } -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+int au_dir_roflags; -+ -+#ifdef CONFIG_AUFS_SBILIST -+/* -+ * iterate_supers_type() doesn't protect us from -+ * remounting (branch management) -+ */ -+struct au_sphlhead au_sbilist; -+#endif -+ -+struct lock_class_key au_lc_key[AuLcKey_Last]; -+ -+/* -+ * functions for module interface. -+ */ -+MODULE_LICENSE("GPL"); -+/* MODULE_LICENSE("GPL v2"); */ -+MODULE_AUTHOR("Junjiro R. Okajima "); -+MODULE_DESCRIPTION(AUFS_NAME -+ " -- Advanced multi layered unification filesystem"); -+MODULE_VERSION(AUFS_VERSION); -+MODULE_ALIAS_FS(AUFS_NAME); -+ -+/* this module parameter has no meaning when SYSFS is disabled */ -+int sysaufs_brs = 1; -+MODULE_PARM_DESC(brs, "use /fs/aufs/si_*/brN"); -+module_param_named(brs, sysaufs_brs, int, S_IRUGO); -+ -+/* this module parameter has no meaning when USER_NS is disabled */ -+bool au_userns; -+MODULE_PARM_DESC(allow_userns, "allow unprivileged to mount under userns"); -+module_param_named(allow_userns, au_userns, bool, S_IRUGO); -+ -+/* ---------------------------------------------------------------------- */ -+ -+static char au_esc_chars[0x20 + 3]; /* 0x01-0x20, backslash, del, and NULL */ -+ -+int au_seq_path(struct seq_file *seq, struct path *path) -+{ -+ int err; -+ -+ err = seq_path(seq, path, au_esc_chars); -+ if (err > 0) -+ err = 0; -+ else if (err < 0) -+ err = -ENOMEM; -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int __init aufs_init(void) -+{ -+ int err, i; -+ char *p; -+ -+ p = au_esc_chars; -+ for (i = 1; i <= ' '; i++) -+ *p++ = i; -+ *p++ = '\\'; -+ *p++ = '\x7f'; -+ *p = 0; -+ -+ au_dir_roflags = au_file_roflags(O_DIRECTORY | O_LARGEFILE); -+ -+ memcpy(aufs_iop_nogetattr, aufs_iop, sizeof(aufs_iop)); -+ for (i = 0; i < AuIop_Last; i++) -+ aufs_iop_nogetattr[i].getattr = NULL; -+ -+ au_sbilist_init(); -+ sysaufs_brs_init(); -+ au_debug_init(); -+ au_dy_init(); -+ err = sysaufs_init(); -+ if (unlikely(err)) -+ goto out; -+ err = au_procfs_init(); -+ if (unlikely(err)) -+ goto out_sysaufs; -+ err = au_wkq_init(); -+ if (unlikely(err)) -+ goto out_procfs; -+ err = au_loopback_init(); -+ if (unlikely(err)) -+ goto out_wkq; -+ err = au_hnotify_init(); -+ if (unlikely(err)) -+ goto out_loopback; -+ err = au_sysrq_init(); -+ if (unlikely(err)) -+ goto out_hin; -+ err = au_cache_init(); -+ if (unlikely(err)) -+ goto out_sysrq; -+ -+ aufs_fs_type.fs_flags |= au_userns ? FS_USERNS_MOUNT : 0; -+ err = register_filesystem(&aufs_fs_type); -+ if (unlikely(err)) -+ goto out_cache; -+ -+ /* since we define pr_fmt, call printk directly */ -+ printk(KERN_INFO AUFS_NAME " " AUFS_VERSION "\n"); -+ goto out; /* success */ -+ -+out_cache: -+ au_cache_fin(); -+out_sysrq: -+ au_sysrq_fin(); -+out_hin: -+ au_hnotify_fin(); -+out_loopback: -+ au_loopback_fin(); -+out_wkq: -+ au_wkq_fin(); -+out_procfs: -+ au_procfs_fin(); -+out_sysaufs: -+ sysaufs_fin(); -+ au_dy_fin(); -+out: -+ return err; -+} -+ -+static void __exit aufs_exit(void) -+{ -+ unregister_filesystem(&aufs_fs_type); -+ au_cache_fin(); -+ au_sysrq_fin(); -+ au_hnotify_fin(); -+ au_loopback_fin(); -+ au_wkq_fin(); -+ au_procfs_fin(); -+ sysaufs_fin(); -+ au_dy_fin(); -+} -+ -+module_init(aufs_init); -+module_exit(aufs_exit); -diff --git a/fs/aufs/module.h b/fs/aufs/module.h -new file mode 100644 -index 0000000..90c3c8f ---- /dev/null -+++ b/fs/aufs/module.h -@@ -0,0 +1,105 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * module initialization and module-global -+ */ -+ -+#ifndef __AUFS_MODULE_H__ -+#define __AUFS_MODULE_H__ -+ -+#ifdef __KERNEL__ -+ -+#include -+ -+struct path; -+struct seq_file; -+ -+/* module parameters */ -+extern int sysaufs_brs; -+extern bool au_userns; -+ -+/* ---------------------------------------------------------------------- */ -+ -+extern int au_dir_roflags; -+ -+enum { -+ AuLcNonDir_FIINFO, -+ AuLcNonDir_DIINFO, -+ AuLcNonDir_IIINFO, -+ -+ AuLcDir_FIINFO, -+ AuLcDir_DIINFO, -+ AuLcDir_IIINFO, -+ -+ AuLcSymlink_DIINFO, -+ AuLcSymlink_IIINFO, -+ -+ AuLcKey_Last -+}; -+extern struct lock_class_key au_lc_key[AuLcKey_Last]; -+ -+void *au_kzrealloc(void *p, unsigned int nused, unsigned int new_sz, gfp_t gfp); -+int au_seq_path(struct seq_file *seq, struct path *path); -+ -+#ifdef CONFIG_PROC_FS -+/* procfs.c */ -+int __init au_procfs_init(void); -+void au_procfs_fin(void); -+#else -+AuStubInt0(au_procfs_init, void); -+AuStubVoid(au_procfs_fin, void); -+#endif -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* kmem cache */ -+enum { -+ AuCache_DINFO, -+ AuCache_ICNTNR, -+ AuCache_FINFO, -+ AuCache_VDIR, -+ AuCache_DEHSTR, -+ AuCache_HNOTIFY, /* must be last */ -+ AuCache_Last -+}; -+ -+#define AuCacheFlags (SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD) -+#define AuCache(type) KMEM_CACHE(type, AuCacheFlags) -+#define AuCacheCtor(type, ctor) \ -+ kmem_cache_create(#type, sizeof(struct type), \ -+ __alignof__(struct type), AuCacheFlags, ctor) -+ -+extern struct kmem_cache *au_cachep[]; -+ -+#define AuCacheFuncs(name, index) \ -+static inline struct au_##name *au_cache_alloc_##name(void) \ -+{ return kmem_cache_alloc(au_cachep[AuCache_##index], GFP_NOFS); } \ -+static inline void au_cache_free_##name(struct au_##name *p) \ -+{ kmem_cache_free(au_cachep[AuCache_##index], p); } -+ -+AuCacheFuncs(dinfo, DINFO); -+AuCacheFuncs(icntnr, ICNTNR); -+AuCacheFuncs(finfo, FINFO); -+AuCacheFuncs(vdir, VDIR); -+AuCacheFuncs(vdir_dehstr, DEHSTR); -+#ifdef CONFIG_AUFS_HNOTIFY -+AuCacheFuncs(hnotify, HNOTIFY); -+#endif -+ -+#endif /* __KERNEL__ */ -+#endif /* __AUFS_MODULE_H__ */ -diff --git a/fs/aufs/mvdown.c b/fs/aufs/mvdown.c -new file mode 100644 -index 0000000..e660c8f ---- /dev/null -+++ b/fs/aufs/mvdown.c -@@ -0,0 +1,703 @@ -+/* -+ * Copyright (C) 2011-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * move-down, opposite of copy-up -+ */ -+ -+#include "aufs.h" -+ -+struct au_mvd_args { -+ struct { -+ struct super_block *h_sb; -+ struct dentry *h_parent; -+ struct au_hinode *hdir; -+ struct inode *h_dir, *h_inode; -+ struct au_pin pin; -+ } info[AUFS_MVDOWN_NARRAY]; -+ -+ struct aufs_mvdown mvdown; -+ struct dentry *dentry, *parent; -+ struct inode *inode, *dir; -+ struct super_block *sb; -+ aufs_bindex_t bopq, bwh, bfound; -+ unsigned char rename_lock; -+}; -+ -+#define mvd_errno mvdown.au_errno -+#define mvd_bsrc mvdown.stbr[AUFS_MVDOWN_UPPER].bindex -+#define mvd_src_brid mvdown.stbr[AUFS_MVDOWN_UPPER].brid -+#define mvd_bdst mvdown.stbr[AUFS_MVDOWN_LOWER].bindex -+#define mvd_dst_brid mvdown.stbr[AUFS_MVDOWN_LOWER].brid -+ -+#define mvd_h_src_sb info[AUFS_MVDOWN_UPPER].h_sb -+#define mvd_h_src_parent info[AUFS_MVDOWN_UPPER].h_parent -+#define mvd_hdir_src info[AUFS_MVDOWN_UPPER].hdir -+#define mvd_h_src_dir info[AUFS_MVDOWN_UPPER].h_dir -+#define mvd_h_src_inode info[AUFS_MVDOWN_UPPER].h_inode -+#define mvd_pin_src info[AUFS_MVDOWN_UPPER].pin -+ -+#define mvd_h_dst_sb info[AUFS_MVDOWN_LOWER].h_sb -+#define mvd_h_dst_parent info[AUFS_MVDOWN_LOWER].h_parent -+#define mvd_hdir_dst info[AUFS_MVDOWN_LOWER].hdir -+#define mvd_h_dst_dir info[AUFS_MVDOWN_LOWER].h_dir -+#define mvd_h_dst_inode info[AUFS_MVDOWN_LOWER].h_inode -+#define mvd_pin_dst info[AUFS_MVDOWN_LOWER].pin -+ -+#define AU_MVD_PR(flag, ...) do { \ -+ if (flag) \ -+ pr_err(__VA_ARGS__); \ -+ } while (0) -+ -+static int find_lower_writable(struct au_mvd_args *a) -+{ -+ struct super_block *sb; -+ aufs_bindex_t bindex, bend; -+ struct au_branch *br; -+ -+ sb = a->sb; -+ bindex = a->mvd_bsrc; -+ bend = au_sbend(sb); -+ if (a->mvdown.flags & AUFS_MVDOWN_FHSM_LOWER) -+ for (bindex++; bindex <= bend; bindex++) { -+ br = au_sbr(sb, bindex); -+ if (au_br_fhsm(br->br_perm) -+ && (!(au_br_sb(br)->s_flags & MS_RDONLY))) -+ return bindex; -+ } -+ else if (!(a->mvdown.flags & AUFS_MVDOWN_ROLOWER)) -+ for (bindex++; bindex <= bend; bindex++) { -+ br = au_sbr(sb, bindex); -+ if (!au_br_rdonly(br)) -+ return bindex; -+ } -+ else -+ for (bindex++; bindex <= bend; bindex++) { -+ br = au_sbr(sb, bindex); -+ if (!(au_br_sb(br)->s_flags & MS_RDONLY)) { -+ if (au_br_rdonly(br)) -+ a->mvdown.flags -+ |= AUFS_MVDOWN_ROLOWER_R; -+ return bindex; -+ } -+ } -+ -+ return -1; -+} -+ -+/* make the parent dir on bdst */ -+static int au_do_mkdir(const unsigned char dmsg, struct au_mvd_args *a) -+{ -+ int err; -+ -+ err = 0; -+ a->mvd_hdir_src = au_hi(a->dir, a->mvd_bsrc); -+ a->mvd_hdir_dst = au_hi(a->dir, a->mvd_bdst); -+ a->mvd_h_src_parent = au_h_dptr(a->parent, a->mvd_bsrc); -+ a->mvd_h_dst_parent = NULL; -+ if (au_dbend(a->parent) >= a->mvd_bdst) -+ a->mvd_h_dst_parent = au_h_dptr(a->parent, a->mvd_bdst); -+ if (!a->mvd_h_dst_parent) { -+ err = au_cpdown_dirs(a->dentry, a->mvd_bdst); -+ if (unlikely(err)) { -+ AU_MVD_PR(dmsg, "cpdown_dirs failed\n"); -+ goto out; -+ } -+ a->mvd_h_dst_parent = au_h_dptr(a->parent, a->mvd_bdst); -+ } -+ -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+/* lock them all */ -+static int au_do_lock(const unsigned char dmsg, struct au_mvd_args *a) -+{ -+ int err; -+ struct dentry *h_trap; -+ -+ a->mvd_h_src_sb = au_sbr_sb(a->sb, a->mvd_bsrc); -+ a->mvd_h_dst_sb = au_sbr_sb(a->sb, a->mvd_bdst); -+ err = au_pin(&a->mvd_pin_dst, a->dentry, a->mvd_bdst, -+ au_opt_udba(a->sb), -+ AuPin_MNT_WRITE | AuPin_DI_LOCKED); -+ AuTraceErr(err); -+ if (unlikely(err)) { -+ AU_MVD_PR(dmsg, "pin_dst failed\n"); -+ goto out; -+ } -+ -+ if (a->mvd_h_src_sb != a->mvd_h_dst_sb) { -+ a->rename_lock = 0; -+ au_pin_init(&a->mvd_pin_src, a->dentry, a->mvd_bsrc, -+ AuLsc_DI_PARENT, AuLsc_I_PARENT3, -+ au_opt_udba(a->sb), -+ AuPin_MNT_WRITE | AuPin_DI_LOCKED); -+ err = au_do_pin(&a->mvd_pin_src); -+ AuTraceErr(err); -+ a->mvd_h_src_dir = a->mvd_h_src_parent->d_inode; -+ if (unlikely(err)) { -+ AU_MVD_PR(dmsg, "pin_src failed\n"); -+ goto out_dst; -+ } -+ goto out; /* success */ -+ } -+ -+ a->rename_lock = 1; -+ au_pin_hdir_unlock(&a->mvd_pin_dst); -+ err = au_pin(&a->mvd_pin_src, a->dentry, a->mvd_bsrc, -+ au_opt_udba(a->sb), -+ AuPin_MNT_WRITE | AuPin_DI_LOCKED); -+ AuTraceErr(err); -+ a->mvd_h_src_dir = a->mvd_h_src_parent->d_inode; -+ if (unlikely(err)) { -+ AU_MVD_PR(dmsg, "pin_src failed\n"); -+ au_pin_hdir_lock(&a->mvd_pin_dst); -+ goto out_dst; -+ } -+ au_pin_hdir_unlock(&a->mvd_pin_src); -+ h_trap = vfsub_lock_rename(a->mvd_h_src_parent, a->mvd_hdir_src, -+ a->mvd_h_dst_parent, a->mvd_hdir_dst); -+ if (h_trap) { -+ err = (h_trap != a->mvd_h_src_parent); -+ if (err) -+ err = (h_trap != a->mvd_h_dst_parent); -+ } -+ BUG_ON(err); /* it should never happen */ -+ if (unlikely(a->mvd_h_src_dir != au_pinned_h_dir(&a->mvd_pin_src))) { -+ err = -EBUSY; -+ AuTraceErr(err); -+ vfsub_unlock_rename(a->mvd_h_src_parent, a->mvd_hdir_src, -+ a->mvd_h_dst_parent, a->mvd_hdir_dst); -+ au_pin_hdir_lock(&a->mvd_pin_src); -+ au_unpin(&a->mvd_pin_src); -+ au_pin_hdir_lock(&a->mvd_pin_dst); -+ goto out_dst; -+ } -+ goto out; /* success */ -+ -+out_dst: -+ au_unpin(&a->mvd_pin_dst); -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+static void au_do_unlock(const unsigned char dmsg, struct au_mvd_args *a) -+{ -+ if (!a->rename_lock) -+ au_unpin(&a->mvd_pin_src); -+ else { -+ vfsub_unlock_rename(a->mvd_h_src_parent, a->mvd_hdir_src, -+ a->mvd_h_dst_parent, a->mvd_hdir_dst); -+ au_pin_hdir_lock(&a->mvd_pin_src); -+ au_unpin(&a->mvd_pin_src); -+ au_pin_hdir_lock(&a->mvd_pin_dst); -+ } -+ au_unpin(&a->mvd_pin_dst); -+} -+ -+/* copy-down the file */ -+static int au_do_cpdown(const unsigned char dmsg, struct au_mvd_args *a) -+{ -+ int err; -+ struct au_cp_generic cpg = { -+ .dentry = a->dentry, -+ .bdst = a->mvd_bdst, -+ .bsrc = a->mvd_bsrc, -+ .len = -1, -+ .pin = &a->mvd_pin_dst, -+ .flags = AuCpup_DTIME | AuCpup_HOPEN -+ }; -+ -+ AuDbg("b%d, b%d\n", cpg.bsrc, cpg.bdst); -+ if (a->mvdown.flags & AUFS_MVDOWN_OWLOWER) -+ au_fset_cpup(cpg.flags, OVERWRITE); -+ if (a->mvdown.flags & AUFS_MVDOWN_ROLOWER) -+ au_fset_cpup(cpg.flags, RWDST); -+ err = au_sio_cpdown_simple(&cpg); -+ if (unlikely(err)) -+ AU_MVD_PR(dmsg, "cpdown failed\n"); -+ -+ AuTraceErr(err); -+ return err; -+} -+ -+/* -+ * unlink the whiteout on bdst if exist which may be created by UDBA while we -+ * were sleeping -+ */ -+static int au_do_unlink_wh(const unsigned char dmsg, struct au_mvd_args *a) -+{ -+ int err; -+ struct path h_path; -+ struct au_branch *br; -+ struct inode *delegated; -+ -+ br = au_sbr(a->sb, a->mvd_bdst); -+ h_path.dentry = au_wh_lkup(a->mvd_h_dst_parent, &a->dentry->d_name, br); -+ err = PTR_ERR(h_path.dentry); -+ if (IS_ERR(h_path.dentry)) { -+ AU_MVD_PR(dmsg, "wh_lkup failed\n"); -+ goto out; -+ } -+ -+ err = 0; -+ if (h_path.dentry->d_inode) { -+ h_path.mnt = au_br_mnt(br); -+ delegated = NULL; -+ err = vfsub_unlink(a->mvd_h_dst_parent->d_inode, &h_path, -+ &delegated, /*force*/0); -+ if (unlikely(err == -EWOULDBLOCK)) { -+ pr_warn("cannot retry for NFSv4 delegation" -+ " for an internal unlink\n"); -+ iput(delegated); -+ } -+ if (unlikely(err)) -+ AU_MVD_PR(dmsg, "wh_unlink failed\n"); -+ } -+ dput(h_path.dentry); -+ -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+/* -+ * unlink the topmost h_dentry -+ */ -+static int au_do_unlink(const unsigned char dmsg, struct au_mvd_args *a) -+{ -+ int err; -+ struct path h_path; -+ struct inode *delegated; -+ -+ h_path.mnt = au_sbr_mnt(a->sb, a->mvd_bsrc); -+ h_path.dentry = au_h_dptr(a->dentry, a->mvd_bsrc); -+ delegated = NULL; -+ err = vfsub_unlink(a->mvd_h_src_dir, &h_path, &delegated, /*force*/0); -+ if (unlikely(err == -EWOULDBLOCK)) { -+ pr_warn("cannot retry for NFSv4 delegation" -+ " for an internal unlink\n"); -+ iput(delegated); -+ } -+ if (unlikely(err)) -+ AU_MVD_PR(dmsg, "unlink failed\n"); -+ -+ AuTraceErr(err); -+ return err; -+} -+ -+/* Since mvdown succeeded, we ignore an error of this function */ -+static void au_do_stfs(const unsigned char dmsg, struct au_mvd_args *a) -+{ -+ int err; -+ struct au_branch *br; -+ -+ a->mvdown.flags |= AUFS_MVDOWN_STFS_FAILED; -+ br = au_sbr(a->sb, a->mvd_bsrc); -+ err = au_br_stfs(br, &a->mvdown.stbr[AUFS_MVDOWN_UPPER].stfs); -+ if (!err) { -+ br = au_sbr(a->sb, a->mvd_bdst); -+ a->mvdown.stbr[AUFS_MVDOWN_LOWER].brid = br->br_id; -+ err = au_br_stfs(br, &a->mvdown.stbr[AUFS_MVDOWN_LOWER].stfs); -+ } -+ if (!err) -+ a->mvdown.flags &= ~AUFS_MVDOWN_STFS_FAILED; -+ else -+ AU_MVD_PR(dmsg, "statfs failed (%d), ignored\n", err); -+} -+ -+/* -+ * copy-down the file and unlink the bsrc file. -+ * - unlink the bdst whout if exist -+ * - copy-down the file (with whtmp name and rename) -+ * - unlink the bsrc file -+ */ -+static int au_do_mvdown(const unsigned char dmsg, struct au_mvd_args *a) -+{ -+ int err; -+ -+ err = au_do_mkdir(dmsg, a); -+ if (!err) -+ err = au_do_lock(dmsg, a); -+ if (unlikely(err)) -+ goto out; -+ -+ /* -+ * do not revert the activities we made on bdst since they should be -+ * harmless in aufs. -+ */ -+ -+ err = au_do_cpdown(dmsg, a); -+ if (!err) -+ err = au_do_unlink_wh(dmsg, a); -+ if (!err && !(a->mvdown.flags & AUFS_MVDOWN_KUPPER)) -+ err = au_do_unlink(dmsg, a); -+ if (unlikely(err)) -+ goto out_unlock; -+ -+ AuDbg("%pd2, 0x%x, %d --> %d\n", -+ a->dentry, a->mvdown.flags, a->mvd_bsrc, a->mvd_bdst); -+ if (find_lower_writable(a) < 0) -+ a->mvdown.flags |= AUFS_MVDOWN_BOTTOM; -+ -+ if (a->mvdown.flags & AUFS_MVDOWN_STFS) -+ au_do_stfs(dmsg, a); -+ -+ /* maintain internal array */ -+ if (!(a->mvdown.flags & AUFS_MVDOWN_KUPPER)) { -+ au_set_h_dptr(a->dentry, a->mvd_bsrc, NULL); -+ au_set_dbstart(a->dentry, a->mvd_bdst); -+ au_set_h_iptr(a->inode, a->mvd_bsrc, NULL, /*flags*/0); -+ au_set_ibstart(a->inode, a->mvd_bdst); -+ } else { -+ /* hide the lower */ -+ au_set_h_dptr(a->dentry, a->mvd_bdst, NULL); -+ au_set_dbend(a->dentry, a->mvd_bsrc); -+ au_set_h_iptr(a->inode, a->mvd_bdst, NULL, /*flags*/0); -+ au_set_ibend(a->inode, a->mvd_bsrc); -+ } -+ if (au_dbend(a->dentry) < a->mvd_bdst) -+ au_set_dbend(a->dentry, a->mvd_bdst); -+ if (au_ibend(a->inode) < a->mvd_bdst) -+ au_set_ibend(a->inode, a->mvd_bdst); -+ -+out_unlock: -+ au_do_unlock(dmsg, a); -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* make sure the file is idle */ -+static int au_mvd_args_busy(const unsigned char dmsg, struct au_mvd_args *a) -+{ -+ int err, plinked; -+ -+ err = 0; -+ plinked = !!au_opt_test(au_mntflags(a->sb), PLINK); -+ if (au_dbstart(a->dentry) == a->mvd_bsrc -+ && au_dcount(a->dentry) == 1 -+ && atomic_read(&a->inode->i_count) == 1 -+ /* && a->mvd_h_src_inode->i_nlink == 1 */ -+ && (!plinked || !au_plink_test(a->inode)) -+ && a->inode->i_nlink == 1) -+ goto out; -+ -+ err = -EBUSY; -+ AU_MVD_PR(dmsg, -+ "b%d, d{b%d, c%d?}, i{c%d?, l%u}, hi{l%u}, p{%d, %d}\n", -+ a->mvd_bsrc, au_dbstart(a->dentry), au_dcount(a->dentry), -+ atomic_read(&a->inode->i_count), a->inode->i_nlink, -+ a->mvd_h_src_inode->i_nlink, -+ plinked, plinked ? au_plink_test(a->inode) : 0); -+ -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+/* make sure the parent dir is fine */ -+static int au_mvd_args_parent(const unsigned char dmsg, -+ struct au_mvd_args *a) -+{ -+ int err; -+ aufs_bindex_t bindex; -+ -+ err = 0; -+ if (unlikely(au_alive_dir(a->parent))) { -+ err = -ENOENT; -+ AU_MVD_PR(dmsg, "parent dir is dead\n"); -+ goto out; -+ } -+ -+ a->bopq = au_dbdiropq(a->parent); -+ bindex = au_wbr_nonopq(a->dentry, a->mvd_bdst); -+ AuDbg("b%d\n", bindex); -+ if (unlikely((bindex >= 0 && bindex < a->mvd_bdst) -+ || (a->bopq != -1 && a->bopq < a->mvd_bdst))) { -+ err = -EINVAL; -+ a->mvd_errno = EAU_MVDOWN_OPAQUE; -+ AU_MVD_PR(dmsg, "ancestor is opaque b%d, b%d\n", -+ a->bopq, a->mvd_bdst); -+ } -+ -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+static int au_mvd_args_intermediate(const unsigned char dmsg, -+ struct au_mvd_args *a) -+{ -+ int err; -+ struct au_dinfo *dinfo, *tmp; -+ -+ /* lookup the next lower positive entry */ -+ err = -ENOMEM; -+ tmp = au_di_alloc(a->sb, AuLsc_DI_TMP); -+ if (unlikely(!tmp)) -+ goto out; -+ -+ a->bfound = -1; -+ a->bwh = -1; -+ dinfo = au_di(a->dentry); -+ au_di_cp(tmp, dinfo); -+ au_di_swap(tmp, dinfo); -+ -+ /* returns the number of positive dentries */ -+ err = au_lkup_dentry(a->dentry, a->mvd_bsrc + 1, /*type*/0); -+ if (!err) -+ a->bwh = au_dbwh(a->dentry); -+ else if (err > 0) -+ a->bfound = au_dbstart(a->dentry); -+ -+ au_di_swap(tmp, dinfo); -+ au_rw_write_unlock(&tmp->di_rwsem); -+ au_di_free(tmp); -+ if (unlikely(err < 0)) -+ AU_MVD_PR(dmsg, "failed look-up lower\n"); -+ -+ /* -+ * here, we have these cases. -+ * bfound == -1 -+ * no positive dentry under bsrc. there are more sub-cases. -+ * bwh < 0 -+ * there no whiteout, we can safely move-down. -+ * bwh <= bsrc -+ * impossible -+ * bsrc < bwh && bwh < bdst -+ * there is a whiteout on RO branch. cannot proceed. -+ * bwh == bdst -+ * there is a whiteout on the RW target branch. it should -+ * be removed. -+ * bdst < bwh -+ * there is a whiteout somewhere unrelated branch. -+ * -1 < bfound && bfound <= bsrc -+ * impossible. -+ * bfound < bdst -+ * found, but it is on RO branch between bsrc and bdst. cannot -+ * proceed. -+ * bfound == bdst -+ * found, replace it if AUFS_MVDOWN_FORCE is set. otherwise return -+ * error. -+ * bdst < bfound -+ * found, after we create the file on bdst, it will be hidden. -+ */ -+ -+ AuDebugOn(a->bfound == -1 -+ && a->bwh != -1 -+ && a->bwh <= a->mvd_bsrc); -+ AuDebugOn(-1 < a->bfound -+ && a->bfound <= a->mvd_bsrc); -+ -+ err = -EINVAL; -+ if (a->bfound == -1 -+ && a->mvd_bsrc < a->bwh -+ && a->bwh != -1 -+ && a->bwh < a->mvd_bdst) { -+ a->mvd_errno = EAU_MVDOWN_WHITEOUT; -+ AU_MVD_PR(dmsg, "bsrc %d, bdst %d, bfound %d, bwh %d\n", -+ a->mvd_bsrc, a->mvd_bdst, a->bfound, a->bwh); -+ goto out; -+ } else if (a->bfound != -1 && a->bfound < a->mvd_bdst) { -+ a->mvd_errno = EAU_MVDOWN_UPPER; -+ AU_MVD_PR(dmsg, "bdst %d, bfound %d\n", -+ a->mvd_bdst, a->bfound); -+ goto out; -+ } -+ -+ err = 0; /* success */ -+ -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+static int au_mvd_args_exist(const unsigned char dmsg, struct au_mvd_args *a) -+{ -+ int err; -+ -+ err = 0; -+ if (!(a->mvdown.flags & AUFS_MVDOWN_OWLOWER) -+ && a->bfound == a->mvd_bdst) -+ err = -EEXIST; -+ AuTraceErr(err); -+ return err; -+} -+ -+static int au_mvd_args(const unsigned char dmsg, struct au_mvd_args *a) -+{ -+ int err; -+ struct au_branch *br; -+ -+ err = -EISDIR; -+ if (unlikely(S_ISDIR(a->inode->i_mode))) -+ goto out; -+ -+ err = -EINVAL; -+ if (!(a->mvdown.flags & AUFS_MVDOWN_BRID_UPPER)) -+ a->mvd_bsrc = au_ibstart(a->inode); -+ else { -+ a->mvd_bsrc = au_br_index(a->sb, a->mvd_src_brid); -+ if (unlikely(a->mvd_bsrc < 0 -+ || (a->mvd_bsrc < au_dbstart(a->dentry) -+ || au_dbend(a->dentry) < a->mvd_bsrc -+ || !au_h_dptr(a->dentry, a->mvd_bsrc)) -+ || (a->mvd_bsrc < au_ibstart(a->inode) -+ || au_ibend(a->inode) < a->mvd_bsrc -+ || !au_h_iptr(a->inode, a->mvd_bsrc)))) { -+ a->mvd_errno = EAU_MVDOWN_NOUPPER; -+ AU_MVD_PR(dmsg, "no upper\n"); -+ goto out; -+ } -+ } -+ if (unlikely(a->mvd_bsrc == au_sbend(a->sb))) { -+ a->mvd_errno = EAU_MVDOWN_BOTTOM; -+ AU_MVD_PR(dmsg, "on the bottom\n"); -+ goto out; -+ } -+ a->mvd_h_src_inode = au_h_iptr(a->inode, a->mvd_bsrc); -+ br = au_sbr(a->sb, a->mvd_bsrc); -+ err = au_br_rdonly(br); -+ if (!(a->mvdown.flags & AUFS_MVDOWN_ROUPPER)) { -+ if (unlikely(err)) -+ goto out; -+ } else if (!(vfsub_native_ro(a->mvd_h_src_inode) -+ || IS_APPEND(a->mvd_h_src_inode))) { -+ if (err) -+ a->mvdown.flags |= AUFS_MVDOWN_ROUPPER_R; -+ /* go on */ -+ } else -+ goto out; -+ -+ err = -EINVAL; -+ if (!(a->mvdown.flags & AUFS_MVDOWN_BRID_LOWER)) { -+ a->mvd_bdst = find_lower_writable(a); -+ if (unlikely(a->mvd_bdst < 0)) { -+ a->mvd_errno = EAU_MVDOWN_BOTTOM; -+ AU_MVD_PR(dmsg, "no writable lower branch\n"); -+ goto out; -+ } -+ } else { -+ a->mvd_bdst = au_br_index(a->sb, a->mvd_dst_brid); -+ if (unlikely(a->mvd_bdst < 0 -+ || au_sbend(a->sb) < a->mvd_bdst)) { -+ a->mvd_errno = EAU_MVDOWN_NOLOWERBR; -+ AU_MVD_PR(dmsg, "no lower brid\n"); -+ goto out; -+ } -+ } -+ -+ err = au_mvd_args_busy(dmsg, a); -+ if (!err) -+ err = au_mvd_args_parent(dmsg, a); -+ if (!err) -+ err = au_mvd_args_intermediate(dmsg, a); -+ if (!err) -+ err = au_mvd_args_exist(dmsg, a); -+ if (!err) -+ AuDbg("b%d, b%d\n", a->mvd_bsrc, a->mvd_bdst); -+ -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+int au_mvdown(struct dentry *dentry, struct aufs_mvdown __user *uarg) -+{ -+ int err, e; -+ unsigned char dmsg; -+ struct au_mvd_args *args; -+ struct inode *inode; -+ -+ inode = dentry->d_inode; -+ err = -EPERM; -+ if (unlikely(!capable(CAP_SYS_ADMIN))) -+ goto out; -+ -+ err = -ENOMEM; -+ args = kmalloc(sizeof(*args), GFP_NOFS); -+ if (unlikely(!args)) -+ goto out; -+ -+ err = copy_from_user(&args->mvdown, uarg, sizeof(args->mvdown)); -+ if (!err) -+ err = !access_ok(VERIFY_WRITE, uarg, sizeof(*uarg)); -+ if (unlikely(err)) { -+ err = -EFAULT; -+ AuTraceErr(err); -+ goto out_free; -+ } -+ AuDbg("flags 0x%x\n", args->mvdown.flags); -+ args->mvdown.flags &= ~(AUFS_MVDOWN_ROLOWER_R | AUFS_MVDOWN_ROUPPER_R); -+ args->mvdown.au_errno = 0; -+ args->dentry = dentry; -+ args->inode = inode; -+ args->sb = dentry->d_sb; -+ -+ err = -ENOENT; -+ dmsg = !!(args->mvdown.flags & AUFS_MVDOWN_DMSG); -+ args->parent = dget_parent(dentry); -+ args->dir = args->parent->d_inode; -+ mutex_lock_nested(&args->dir->i_mutex, I_MUTEX_PARENT); -+ dput(args->parent); -+ if (unlikely(args->parent != dentry->d_parent)) { -+ AU_MVD_PR(dmsg, "parent dir is moved\n"); -+ goto out_dir; -+ } -+ -+ mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD); -+ err = aufs_read_lock(dentry, AuLock_DW | AuLock_FLUSH | AuLock_NOPLMW); -+ if (unlikely(err)) -+ goto out_inode; -+ -+ di_write_lock_parent(args->parent); -+ err = au_mvd_args(dmsg, args); -+ if (unlikely(err)) -+ goto out_parent; -+ -+ err = au_do_mvdown(dmsg, args); -+ if (unlikely(err)) -+ goto out_parent; -+ -+ au_cpup_attr_timesizes(args->dir); -+ au_cpup_attr_timesizes(inode); -+ if (!(args->mvdown.flags & AUFS_MVDOWN_KUPPER)) -+ au_cpup_igen(inode, au_h_iptr(inode, args->mvd_bdst)); -+ /* au_digen_dec(dentry); */ -+ -+out_parent: -+ di_write_unlock(args->parent); -+ aufs_read_unlock(dentry, AuLock_DW); -+out_inode: -+ mutex_unlock(&inode->i_mutex); -+out_dir: -+ mutex_unlock(&args->dir->i_mutex); -+out_free: -+ e = copy_to_user(uarg, &args->mvdown, sizeof(args->mvdown)); -+ if (unlikely(e)) -+ err = -EFAULT; -+ kfree(args); -+out: -+ AuTraceErr(err); -+ return err; -+} -diff --git a/fs/aufs/opts.c b/fs/aufs/opts.c -new file mode 100644 -index 0000000..0363f67 ---- /dev/null -+++ b/fs/aufs/opts.c -@@ -0,0 +1,1878 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * mount options/flags -+ */ -+ -+#include -+#include /* a distribution requires */ -+#include -+#include "aufs.h" -+ -+/* ---------------------------------------------------------------------- */ -+ -+enum { -+ Opt_br, -+ Opt_add, Opt_del, Opt_mod, Opt_append, Opt_prepend, -+ Opt_idel, Opt_imod, -+ Opt_dirwh, Opt_rdcache, Opt_rdblk, Opt_rdhash, -+ Opt_rdblk_def, Opt_rdhash_def, -+ Opt_xino, Opt_noxino, -+ Opt_trunc_xino, Opt_trunc_xino_v, Opt_notrunc_xino, -+ Opt_trunc_xino_path, Opt_itrunc_xino, -+ Opt_trunc_xib, Opt_notrunc_xib, -+ Opt_shwh, Opt_noshwh, -+ Opt_plink, Opt_noplink, Opt_list_plink, -+ Opt_udba, -+ Opt_dio, Opt_nodio, -+ Opt_diropq_a, Opt_diropq_w, -+ Opt_warn_perm, Opt_nowarn_perm, -+ Opt_wbr_copyup, Opt_wbr_create, -+ Opt_fhsm_sec, -+ Opt_refrof, Opt_norefrof, -+ Opt_verbose, Opt_noverbose, -+ Opt_sum, Opt_nosum, Opt_wsum, -+ Opt_dirperm1, Opt_nodirperm1, -+ Opt_acl, Opt_noacl, -+ Opt_tail, Opt_ignore, Opt_ignore_silent, Opt_err -+}; -+ -+static match_table_t options = { -+ {Opt_br, "br=%s"}, -+ {Opt_br, "br:%s"}, -+ -+ {Opt_add, "add=%d:%s"}, -+ {Opt_add, "add:%d:%s"}, -+ {Opt_add, "ins=%d:%s"}, -+ {Opt_add, "ins:%d:%s"}, -+ {Opt_append, "append=%s"}, -+ {Opt_append, "append:%s"}, -+ {Opt_prepend, "prepend=%s"}, -+ {Opt_prepend, "prepend:%s"}, -+ -+ {Opt_del, "del=%s"}, -+ {Opt_del, "del:%s"}, -+ /* {Opt_idel, "idel:%d"}, */ -+ {Opt_mod, "mod=%s"}, -+ {Opt_mod, "mod:%s"}, -+ /* {Opt_imod, "imod:%d:%s"}, */ -+ -+ {Opt_dirwh, "dirwh=%d"}, -+ -+ {Opt_xino, "xino=%s"}, -+ {Opt_noxino, "noxino"}, -+ {Opt_trunc_xino, "trunc_xino"}, -+ {Opt_trunc_xino_v, "trunc_xino_v=%d:%d"}, -+ {Opt_notrunc_xino, "notrunc_xino"}, -+ {Opt_trunc_xino_path, "trunc_xino=%s"}, -+ {Opt_itrunc_xino, "itrunc_xino=%d"}, -+ /* {Opt_zxino, "zxino=%s"}, */ -+ {Opt_trunc_xib, "trunc_xib"}, -+ {Opt_notrunc_xib, "notrunc_xib"}, -+ -+#ifdef CONFIG_PROC_FS -+ {Opt_plink, "plink"}, -+#else -+ {Opt_ignore_silent, "plink"}, -+#endif -+ -+ {Opt_noplink, "noplink"}, -+ -+#ifdef CONFIG_AUFS_DEBUG -+ {Opt_list_plink, "list_plink"}, -+#endif -+ -+ {Opt_udba, "udba=%s"}, -+ -+ {Opt_dio, "dio"}, -+ {Opt_nodio, "nodio"}, -+ -+#ifdef CONFIG_AUFS_FHSM -+ {Opt_fhsm_sec, "fhsm_sec=%d"}, -+#else -+ {Opt_ignore_silent, "fhsm_sec=%d"}, -+#endif -+ -+ {Opt_diropq_a, "diropq=always"}, -+ {Opt_diropq_a, "diropq=a"}, -+ {Opt_diropq_w, "diropq=whiteouted"}, -+ {Opt_diropq_w, "diropq=w"}, -+ -+ {Opt_warn_perm, "warn_perm"}, -+ {Opt_nowarn_perm, "nowarn_perm"}, -+ -+ /* keep them temporary */ -+ {Opt_ignore_silent, "nodlgt"}, -+ {Opt_ignore_silent, "clean_plink"}, -+ -+#ifdef CONFIG_AUFS_SHWH -+ {Opt_shwh, "shwh"}, -+#endif -+ {Opt_noshwh, "noshwh"}, -+ -+ {Opt_dirperm1, "dirperm1"}, -+ {Opt_nodirperm1, "nodirperm1"}, -+ -+ {Opt_refrof, "refrof"}, -+ {Opt_norefrof, "norefrof"}, -+ -+ {Opt_verbose, "verbose"}, -+ {Opt_verbose, "v"}, -+ {Opt_noverbose, "noverbose"}, -+ {Opt_noverbose, "quiet"}, -+ {Opt_noverbose, "q"}, -+ {Opt_noverbose, "silent"}, -+ -+ {Opt_sum, "sum"}, -+ {Opt_nosum, "nosum"}, -+ {Opt_wsum, "wsum"}, -+ -+ {Opt_rdcache, "rdcache=%d"}, -+ {Opt_rdblk, "rdblk=%d"}, -+ {Opt_rdblk_def, "rdblk=def"}, -+ {Opt_rdhash, "rdhash=%d"}, -+ {Opt_rdhash_def, "rdhash=def"}, -+ -+ {Opt_wbr_create, "create=%s"}, -+ {Opt_wbr_create, "create_policy=%s"}, -+ {Opt_wbr_copyup, "cpup=%s"}, -+ {Opt_wbr_copyup, "copyup=%s"}, -+ {Opt_wbr_copyup, "copyup_policy=%s"}, -+ -+ /* generic VFS flag */ -+#ifdef CONFIG_FS_POSIX_ACL -+ {Opt_acl, "acl"}, -+ {Opt_noacl, "noacl"}, -+#else -+ {Opt_ignore_silent, "acl"}, -+ {Opt_ignore_silent, "noacl"}, -+#endif -+ -+ /* internal use for the scripts */ -+ {Opt_ignore_silent, "si=%s"}, -+ -+ {Opt_br, "dirs=%s"}, -+ {Opt_ignore, "debug=%d"}, -+ {Opt_ignore, "delete=whiteout"}, -+ {Opt_ignore, "delete=all"}, -+ {Opt_ignore, "imap=%s"}, -+ -+ /* temporary workaround, due to old mount(8)? */ -+ {Opt_ignore_silent, "relatime"}, -+ -+ {Opt_err, NULL} -+}; -+ -+/* ---------------------------------------------------------------------- */ -+ -+static const char *au_parser_pattern(int val, match_table_t tbl) -+{ -+ struct match_token *p; -+ -+ p = tbl; -+ while (p->pattern) { -+ if (p->token == val) -+ return p->pattern; -+ p++; -+ } -+ BUG(); -+ return "??"; -+} -+ -+static const char *au_optstr(int *val, match_table_t tbl) -+{ -+ struct match_token *p; -+ int v; -+ -+ v = *val; -+ if (!v) -+ goto out; -+ p = tbl; -+ while (p->pattern) { -+ if (p->token -+ && (v & p->token) == p->token) { -+ *val &= ~p->token; -+ return p->pattern; -+ } -+ p++; -+ } -+ -+out: -+ return NULL; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static match_table_t brperm = { -+ {AuBrPerm_RO, AUFS_BRPERM_RO}, -+ {AuBrPerm_RR, AUFS_BRPERM_RR}, -+ {AuBrPerm_RW, AUFS_BRPERM_RW}, -+ {0, NULL} -+}; -+ -+static match_table_t brattr = { -+ /* general */ -+ {AuBrAttr_COO_REG, AUFS_BRATTR_COO_REG}, -+ {AuBrAttr_COO_ALL, AUFS_BRATTR_COO_ALL}, -+ /* 'unpin' attrib is meaningless since linux-3.18-rc1 */ -+ {AuBrAttr_UNPIN, AUFS_BRATTR_UNPIN}, -+#ifdef CONFIG_AUFS_FHSM -+ {AuBrAttr_FHSM, AUFS_BRATTR_FHSM}, -+#endif -+#ifdef CONFIG_AUFS_XATTR -+ {AuBrAttr_ICEX, AUFS_BRATTR_ICEX}, -+ {AuBrAttr_ICEX_SEC, AUFS_BRATTR_ICEX_SEC}, -+ {AuBrAttr_ICEX_SYS, AUFS_BRATTR_ICEX_SYS}, -+ {AuBrAttr_ICEX_TR, AUFS_BRATTR_ICEX_TR}, -+ {AuBrAttr_ICEX_USR, AUFS_BRATTR_ICEX_USR}, -+ {AuBrAttr_ICEX_OTH, AUFS_BRATTR_ICEX_OTH}, -+#endif -+ -+ /* ro/rr branch */ -+ {AuBrRAttr_WH, AUFS_BRRATTR_WH}, -+ -+ /* rw branch */ -+ {AuBrWAttr_MOO, AUFS_BRWATTR_MOO}, -+ {AuBrWAttr_NoLinkWH, AUFS_BRWATTR_NLWH}, -+ -+ {0, NULL} -+}; -+ -+static int br_attr_val(char *str, match_table_t table, substring_t args[]) -+{ -+ int attr, v; -+ char *p; -+ -+ attr = 0; -+ do { -+ p = strchr(str, '+'); -+ if (p) -+ *p = 0; -+ v = match_token(str, table, args); -+ if (v) { -+ if (v & AuBrAttr_CMOO_Mask) -+ attr &= ~AuBrAttr_CMOO_Mask; -+ attr |= v; -+ } else { -+ if (p) -+ *p = '+'; -+ pr_warn("ignored branch attribute %s\n", str); -+ break; -+ } -+ if (p) -+ str = p + 1; -+ } while (p); -+ -+ return attr; -+} -+ -+static int au_do_optstr_br_attr(au_br_perm_str_t *str, int perm) -+{ -+ int sz; -+ const char *p; -+ char *q; -+ -+ q = str->a; -+ *q = 0; -+ p = au_optstr(&perm, brattr); -+ if (p) { -+ sz = strlen(p); -+ memcpy(q, p, sz + 1); -+ q += sz; -+ } else -+ goto out; -+ -+ do { -+ p = au_optstr(&perm, brattr); -+ if (p) { -+ *q++ = '+'; -+ sz = strlen(p); -+ memcpy(q, p, sz + 1); -+ q += sz; -+ } -+ } while (p); -+ -+out: -+ return q - str->a; -+} -+ -+static int noinline_for_stack br_perm_val(char *perm) -+{ -+ int val, bad, sz; -+ char *p; -+ substring_t args[MAX_OPT_ARGS]; -+ au_br_perm_str_t attr; -+ -+ p = strchr(perm, '+'); -+ if (p) -+ *p = 0; -+ val = match_token(perm, brperm, args); -+ if (!val) { -+ if (p) -+ *p = '+'; -+ pr_warn("ignored branch permission %s\n", perm); -+ val = AuBrPerm_RO; -+ goto out; -+ } -+ if (!p) -+ goto out; -+ -+ val |= br_attr_val(p + 1, brattr, args); -+ -+ bad = 0; -+ switch (val & AuBrPerm_Mask) { -+ case AuBrPerm_RO: -+ case AuBrPerm_RR: -+ bad = val & AuBrWAttr_Mask; -+ val &= ~AuBrWAttr_Mask; -+ break; -+ case AuBrPerm_RW: -+ bad = val & AuBrRAttr_Mask; -+ val &= ~AuBrRAttr_Mask; -+ break; -+ } -+ -+ /* -+ * 'unpin' attrib becomes meaningless since linux-3.18-rc1, but aufs -+ * does not treat it as an error, just warning. -+ * this is a tiny guard for the user operation. -+ */ -+ if (val & AuBrAttr_UNPIN) { -+ bad |= AuBrAttr_UNPIN; -+ val &= ~AuBrAttr_UNPIN; -+ } -+ -+ if (unlikely(bad)) { -+ sz = au_do_optstr_br_attr(&attr, bad); -+ AuDebugOn(!sz); -+ pr_warn("ignored branch attribute %s\n", attr.a); -+ } -+ -+out: -+ return val; -+} -+ -+void au_optstr_br_perm(au_br_perm_str_t *str, int perm) -+{ -+ au_br_perm_str_t attr; -+ const char *p; -+ char *q; -+ int sz; -+ -+ q = str->a; -+ p = au_optstr(&perm, brperm); -+ AuDebugOn(!p || !*p); -+ sz = strlen(p); -+ memcpy(q, p, sz + 1); -+ q += sz; -+ -+ sz = au_do_optstr_br_attr(&attr, perm); -+ if (sz) { -+ *q++ = '+'; -+ memcpy(q, attr.a, sz + 1); -+ } -+ -+ AuDebugOn(strlen(str->a) >= sizeof(str->a)); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static match_table_t udbalevel = { -+ {AuOpt_UDBA_REVAL, "reval"}, -+ {AuOpt_UDBA_NONE, "none"}, -+#ifdef CONFIG_AUFS_HNOTIFY -+ {AuOpt_UDBA_HNOTIFY, "notify"}, /* abstraction */ -+#ifdef CONFIG_AUFS_HFSNOTIFY -+ {AuOpt_UDBA_HNOTIFY, "fsnotify"}, -+#endif -+#endif -+ {-1, NULL} -+}; -+ -+static int noinline_for_stack udba_val(char *str) -+{ -+ substring_t args[MAX_OPT_ARGS]; -+ -+ return match_token(str, udbalevel, args); -+} -+ -+const char *au_optstr_udba(int udba) -+{ -+ return au_parser_pattern(udba, udbalevel); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static match_table_t au_wbr_create_policy = { -+ {AuWbrCreate_TDP, "tdp"}, -+ {AuWbrCreate_TDP, "top-down-parent"}, -+ {AuWbrCreate_RR, "rr"}, -+ {AuWbrCreate_RR, "round-robin"}, -+ {AuWbrCreate_MFS, "mfs"}, -+ {AuWbrCreate_MFS, "most-free-space"}, -+ {AuWbrCreate_MFSV, "mfs:%d"}, -+ {AuWbrCreate_MFSV, "most-free-space:%d"}, -+ -+ {AuWbrCreate_MFSRR, "mfsrr:%d"}, -+ {AuWbrCreate_MFSRRV, "mfsrr:%d:%d"}, -+ {AuWbrCreate_PMFS, "pmfs"}, -+ {AuWbrCreate_PMFSV, "pmfs:%d"}, -+ {AuWbrCreate_PMFSRR, "pmfsrr:%d"}, -+ {AuWbrCreate_PMFSRRV, "pmfsrr:%d:%d"}, -+ -+ {-1, NULL} -+}; -+ -+/* -+ * cf. linux/lib/parser.c and cmdline.c -+ * gave up calling memparse() since it uses simple_strtoull() instead of -+ * kstrto...(). -+ */ -+static int noinline_for_stack -+au_match_ull(substring_t *s, unsigned long long *result) -+{ -+ int err; -+ unsigned int len; -+ char a[32]; -+ -+ err = -ERANGE; -+ len = s->to - s->from; -+ if (len + 1 <= sizeof(a)) { -+ memcpy(a, s->from, len); -+ a[len] = '\0'; -+ err = kstrtoull(a, 0, result); -+ } -+ return err; -+} -+ -+static int au_wbr_mfs_wmark(substring_t *arg, char *str, -+ struct au_opt_wbr_create *create) -+{ -+ int err; -+ unsigned long long ull; -+ -+ err = 0; -+ if (!au_match_ull(arg, &ull)) -+ create->mfsrr_watermark = ull; -+ else { -+ pr_err("bad integer in %s\n", str); -+ err = -EINVAL; -+ } -+ -+ return err; -+} -+ -+static int au_wbr_mfs_sec(substring_t *arg, char *str, -+ struct au_opt_wbr_create *create) -+{ -+ int n, err; -+ -+ err = 0; -+ if (!match_int(arg, &n) && 0 <= n && n <= AUFS_MFS_MAX_SEC) -+ create->mfs_second = n; -+ else { -+ pr_err("bad integer in %s\n", str); -+ err = -EINVAL; -+ } -+ -+ return err; -+} -+ -+static int noinline_for_stack -+au_wbr_create_val(char *str, struct au_opt_wbr_create *create) -+{ -+ int err, e; -+ substring_t args[MAX_OPT_ARGS]; -+ -+ err = match_token(str, au_wbr_create_policy, args); -+ create->wbr_create = err; -+ switch (err) { -+ case AuWbrCreate_MFSRRV: -+ case AuWbrCreate_PMFSRRV: -+ e = au_wbr_mfs_wmark(&args[0], str, create); -+ if (!e) -+ e = au_wbr_mfs_sec(&args[1], str, create); -+ if (unlikely(e)) -+ err = e; -+ break; -+ case AuWbrCreate_MFSRR: -+ case AuWbrCreate_PMFSRR: -+ e = au_wbr_mfs_wmark(&args[0], str, create); -+ if (unlikely(e)) { -+ err = e; -+ break; -+ } -+ /*FALLTHROUGH*/ -+ case AuWbrCreate_MFS: -+ case AuWbrCreate_PMFS: -+ create->mfs_second = AUFS_MFS_DEF_SEC; -+ break; -+ case AuWbrCreate_MFSV: -+ case AuWbrCreate_PMFSV: -+ e = au_wbr_mfs_sec(&args[0], str, create); -+ if (unlikely(e)) -+ err = e; -+ break; -+ } -+ -+ return err; -+} -+ -+const char *au_optstr_wbr_create(int wbr_create) -+{ -+ return au_parser_pattern(wbr_create, au_wbr_create_policy); -+} -+ -+static match_table_t au_wbr_copyup_policy = { -+ {AuWbrCopyup_TDP, "tdp"}, -+ {AuWbrCopyup_TDP, "top-down-parent"}, -+ {AuWbrCopyup_BUP, "bup"}, -+ {AuWbrCopyup_BUP, "bottom-up-parent"}, -+ {AuWbrCopyup_BU, "bu"}, -+ {AuWbrCopyup_BU, "bottom-up"}, -+ {-1, NULL} -+}; -+ -+static int noinline_for_stack au_wbr_copyup_val(char *str) -+{ -+ substring_t args[MAX_OPT_ARGS]; -+ -+ return match_token(str, au_wbr_copyup_policy, args); -+} -+ -+const char *au_optstr_wbr_copyup(int wbr_copyup) -+{ -+ return au_parser_pattern(wbr_copyup, au_wbr_copyup_policy); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static const int lkup_dirflags = LOOKUP_FOLLOW | LOOKUP_DIRECTORY; -+ -+static void dump_opts(struct au_opts *opts) -+{ -+#ifdef CONFIG_AUFS_DEBUG -+ /* reduce stack space */ -+ union { -+ struct au_opt_add *add; -+ struct au_opt_del *del; -+ struct au_opt_mod *mod; -+ struct au_opt_xino *xino; -+ struct au_opt_xino_itrunc *xino_itrunc; -+ struct au_opt_wbr_create *create; -+ } u; -+ struct au_opt *opt; -+ -+ opt = opts->opt; -+ while (opt->type != Opt_tail) { -+ switch (opt->type) { -+ case Opt_add: -+ u.add = &opt->add; -+ AuDbg("add {b%d, %s, 0x%x, %p}\n", -+ u.add->bindex, u.add->pathname, u.add->perm, -+ u.add->path.dentry); -+ break; -+ case Opt_del: -+ case Opt_idel: -+ u.del = &opt->del; -+ AuDbg("del {%s, %p}\n", -+ u.del->pathname, u.del->h_path.dentry); -+ break; -+ case Opt_mod: -+ case Opt_imod: -+ u.mod = &opt->mod; -+ AuDbg("mod {%s, 0x%x, %p}\n", -+ u.mod->path, u.mod->perm, u.mod->h_root); -+ break; -+ case Opt_append: -+ u.add = &opt->add; -+ AuDbg("append {b%d, %s, 0x%x, %p}\n", -+ u.add->bindex, u.add->pathname, u.add->perm, -+ u.add->path.dentry); -+ break; -+ case Opt_prepend: -+ u.add = &opt->add; -+ AuDbg("prepend {b%d, %s, 0x%x, %p}\n", -+ u.add->bindex, u.add->pathname, u.add->perm, -+ u.add->path.dentry); -+ break; -+ case Opt_dirwh: -+ AuDbg("dirwh %d\n", opt->dirwh); -+ break; -+ case Opt_rdcache: -+ AuDbg("rdcache %d\n", opt->rdcache); -+ break; -+ case Opt_rdblk: -+ AuDbg("rdblk %u\n", opt->rdblk); -+ break; -+ case Opt_rdblk_def: -+ AuDbg("rdblk_def\n"); -+ break; -+ case Opt_rdhash: -+ AuDbg("rdhash %u\n", opt->rdhash); -+ break; -+ case Opt_rdhash_def: -+ AuDbg("rdhash_def\n"); -+ break; -+ case Opt_xino: -+ u.xino = &opt->xino; -+ AuDbg("xino {%s %pD}\n", u.xino->path, u.xino->file); -+ break; -+ case Opt_trunc_xino: -+ AuLabel(trunc_xino); -+ break; -+ case Opt_notrunc_xino: -+ AuLabel(notrunc_xino); -+ break; -+ case Opt_trunc_xino_path: -+ case Opt_itrunc_xino: -+ u.xino_itrunc = &opt->xino_itrunc; -+ AuDbg("trunc_xino %d\n", u.xino_itrunc->bindex); -+ break; -+ case Opt_noxino: -+ AuLabel(noxino); -+ break; -+ case Opt_trunc_xib: -+ AuLabel(trunc_xib); -+ break; -+ case Opt_notrunc_xib: -+ AuLabel(notrunc_xib); -+ break; -+ case Opt_shwh: -+ AuLabel(shwh); -+ break; -+ case Opt_noshwh: -+ AuLabel(noshwh); -+ break; -+ case Opt_dirperm1: -+ AuLabel(dirperm1); -+ break; -+ case Opt_nodirperm1: -+ AuLabel(nodirperm1); -+ break; -+ case Opt_plink: -+ AuLabel(plink); -+ break; -+ case Opt_noplink: -+ AuLabel(noplink); -+ break; -+ case Opt_list_plink: -+ AuLabel(list_plink); -+ break; -+ case Opt_udba: -+ AuDbg("udba %d, %s\n", -+ opt->udba, au_optstr_udba(opt->udba)); -+ break; -+ case Opt_dio: -+ AuLabel(dio); -+ break; -+ case Opt_nodio: -+ AuLabel(nodio); -+ break; -+ case Opt_diropq_a: -+ AuLabel(diropq_a); -+ break; -+ case Opt_diropq_w: -+ AuLabel(diropq_w); -+ break; -+ case Opt_warn_perm: -+ AuLabel(warn_perm); -+ break; -+ case Opt_nowarn_perm: -+ AuLabel(nowarn_perm); -+ break; -+ case Opt_refrof: -+ AuLabel(refrof); -+ break; -+ case Opt_norefrof: -+ AuLabel(norefrof); -+ break; -+ case Opt_verbose: -+ AuLabel(verbose); -+ break; -+ case Opt_noverbose: -+ AuLabel(noverbose); -+ break; -+ case Opt_sum: -+ AuLabel(sum); -+ break; -+ case Opt_nosum: -+ AuLabel(nosum); -+ break; -+ case Opt_wsum: -+ AuLabel(wsum); -+ break; -+ case Opt_wbr_create: -+ u.create = &opt->wbr_create; -+ AuDbg("create %d, %s\n", u.create->wbr_create, -+ au_optstr_wbr_create(u.create->wbr_create)); -+ switch (u.create->wbr_create) { -+ case AuWbrCreate_MFSV: -+ case AuWbrCreate_PMFSV: -+ AuDbg("%d sec\n", u.create->mfs_second); -+ break; -+ case AuWbrCreate_MFSRR: -+ AuDbg("%llu watermark\n", -+ u.create->mfsrr_watermark); -+ break; -+ case AuWbrCreate_MFSRRV: -+ case AuWbrCreate_PMFSRRV: -+ AuDbg("%llu watermark, %d sec\n", -+ u.create->mfsrr_watermark, -+ u.create->mfs_second); -+ break; -+ } -+ break; -+ case Opt_wbr_copyup: -+ AuDbg("copyup %d, %s\n", opt->wbr_copyup, -+ au_optstr_wbr_copyup(opt->wbr_copyup)); -+ break; -+ case Opt_fhsm_sec: -+ AuDbg("fhsm_sec %u\n", opt->fhsm_second); -+ break; -+ case Opt_acl: -+ AuLabel(acl); -+ break; -+ case Opt_noacl: -+ AuLabel(noacl); -+ break; -+ default: -+ BUG(); -+ } -+ opt++; -+ } -+#endif -+} -+ -+void au_opts_free(struct au_opts *opts) -+{ -+ struct au_opt *opt; -+ -+ opt = opts->opt; -+ while (opt->type != Opt_tail) { -+ switch (opt->type) { -+ case Opt_add: -+ case Opt_append: -+ case Opt_prepend: -+ path_put(&opt->add.path); -+ break; -+ case Opt_del: -+ case Opt_idel: -+ path_put(&opt->del.h_path); -+ break; -+ case Opt_mod: -+ case Opt_imod: -+ dput(opt->mod.h_root); -+ break; -+ case Opt_xino: -+ fput(opt->xino.file); -+ break; -+ } -+ opt++; -+ } -+} -+ -+static int opt_add(struct au_opt *opt, char *opt_str, unsigned long sb_flags, -+ aufs_bindex_t bindex) -+{ -+ int err; -+ struct au_opt_add *add = &opt->add; -+ char *p; -+ -+ add->bindex = bindex; -+ add->perm = AuBrPerm_RO; -+ add->pathname = opt_str; -+ p = strchr(opt_str, '='); -+ if (p) { -+ *p++ = 0; -+ if (*p) -+ add->perm = br_perm_val(p); -+ } -+ -+ err = vfsub_kern_path(add->pathname, lkup_dirflags, &add->path); -+ if (!err) { -+ if (!p) { -+ add->perm = AuBrPerm_RO; -+ if (au_test_fs_rr(add->path.dentry->d_sb)) -+ add->perm = AuBrPerm_RR; -+ else if (!bindex && !(sb_flags & MS_RDONLY)) -+ add->perm = AuBrPerm_RW; -+ } -+ opt->type = Opt_add; -+ goto out; -+ } -+ pr_err("lookup failed %s (%d)\n", add->pathname, err); -+ err = -EINVAL; -+ -+out: -+ return err; -+} -+ -+static int au_opts_parse_del(struct au_opt_del *del, substring_t args[]) -+{ -+ int err; -+ -+ del->pathname = args[0].from; -+ AuDbg("del path %s\n", del->pathname); -+ -+ err = vfsub_kern_path(del->pathname, lkup_dirflags, &del->h_path); -+ if (unlikely(err)) -+ pr_err("lookup failed %s (%d)\n", del->pathname, err); -+ -+ return err; -+} -+ -+#if 0 /* reserved for future use */ -+static int au_opts_parse_idel(struct super_block *sb, aufs_bindex_t bindex, -+ struct au_opt_del *del, substring_t args[]) -+{ -+ int err; -+ struct dentry *root; -+ -+ err = -EINVAL; -+ root = sb->s_root; -+ aufs_read_lock(root, AuLock_FLUSH); -+ if (bindex < 0 || au_sbend(sb) < bindex) { -+ pr_err("out of bounds, %d\n", bindex); -+ goto out; -+ } -+ -+ err = 0; -+ del->h_path.dentry = dget(au_h_dptr(root, bindex)); -+ del->h_path.mnt = mntget(au_sbr_mnt(sb, bindex)); -+ -+out: -+ aufs_read_unlock(root, !AuLock_IR); -+ return err; -+} -+#endif -+ -+static int noinline_for_stack -+au_opts_parse_mod(struct au_opt_mod *mod, substring_t args[]) -+{ -+ int err; -+ struct path path; -+ char *p; -+ -+ err = -EINVAL; -+ mod->path = args[0].from; -+ p = strchr(mod->path, '='); -+ if (unlikely(!p)) { -+ pr_err("no permssion %s\n", args[0].from); -+ goto out; -+ } -+ -+ *p++ = 0; -+ err = vfsub_kern_path(mod->path, lkup_dirflags, &path); -+ if (unlikely(err)) { -+ pr_err("lookup failed %s (%d)\n", mod->path, err); -+ goto out; -+ } -+ -+ mod->perm = br_perm_val(p); -+ AuDbg("mod path %s, perm 0x%x, %s\n", mod->path, mod->perm, p); -+ mod->h_root = dget(path.dentry); -+ path_put(&path); -+ -+out: -+ return err; -+} -+ -+#if 0 /* reserved for future use */ -+static int au_opts_parse_imod(struct super_block *sb, aufs_bindex_t bindex, -+ struct au_opt_mod *mod, substring_t args[]) -+{ -+ int err; -+ struct dentry *root; -+ -+ err = -EINVAL; -+ root = sb->s_root; -+ aufs_read_lock(root, AuLock_FLUSH); -+ if (bindex < 0 || au_sbend(sb) < bindex) { -+ pr_err("out of bounds, %d\n", bindex); -+ goto out; -+ } -+ -+ err = 0; -+ mod->perm = br_perm_val(args[1].from); -+ AuDbg("mod path %s, perm 0x%x, %s\n", -+ mod->path, mod->perm, args[1].from); -+ mod->h_root = dget(au_h_dptr(root, bindex)); -+ -+out: -+ aufs_read_unlock(root, !AuLock_IR); -+ return err; -+} -+#endif -+ -+static int au_opts_parse_xino(struct super_block *sb, struct au_opt_xino *xino, -+ substring_t args[]) -+{ -+ int err; -+ struct file *file; -+ -+ file = au_xino_create(sb, args[0].from, /*silent*/0); -+ err = PTR_ERR(file); -+ if (IS_ERR(file)) -+ goto out; -+ -+ err = -EINVAL; -+ if (unlikely(file->f_dentry->d_sb == sb)) { -+ fput(file); -+ pr_err("%s must be outside\n", args[0].from); -+ goto out; -+ } -+ -+ err = 0; -+ xino->file = file; -+ xino->path = args[0].from; -+ -+out: -+ return err; -+} -+ -+static int noinline_for_stack -+au_opts_parse_xino_itrunc_path(struct super_block *sb, -+ struct au_opt_xino_itrunc *xino_itrunc, -+ substring_t args[]) -+{ -+ int err; -+ aufs_bindex_t bend, bindex; -+ struct path path; -+ struct dentry *root; -+ -+ err = vfsub_kern_path(args[0].from, lkup_dirflags, &path); -+ if (unlikely(err)) { -+ pr_err("lookup failed %s (%d)\n", args[0].from, err); -+ goto out; -+ } -+ -+ xino_itrunc->bindex = -1; -+ root = sb->s_root; -+ aufs_read_lock(root, AuLock_FLUSH); -+ bend = au_sbend(sb); -+ for (bindex = 0; bindex <= bend; bindex++) { -+ if (au_h_dptr(root, bindex) == path.dentry) { -+ xino_itrunc->bindex = bindex; -+ break; -+ } -+ } -+ aufs_read_unlock(root, !AuLock_IR); -+ path_put(&path); -+ -+ if (unlikely(xino_itrunc->bindex < 0)) { -+ pr_err("no such branch %s\n", args[0].from); -+ err = -EINVAL; -+ } -+ -+out: -+ return err; -+} -+ -+/* called without aufs lock */ -+int au_opts_parse(struct super_block *sb, char *str, struct au_opts *opts) -+{ -+ int err, n, token; -+ aufs_bindex_t bindex; -+ unsigned char skipped; -+ struct dentry *root; -+ struct au_opt *opt, *opt_tail; -+ char *opt_str; -+ /* reduce the stack space */ -+ union { -+ struct au_opt_xino_itrunc *xino_itrunc; -+ struct au_opt_wbr_create *create; -+ } u; -+ struct { -+ substring_t args[MAX_OPT_ARGS]; -+ } *a; -+ -+ err = -ENOMEM; -+ a = kmalloc(sizeof(*a), GFP_NOFS); -+ if (unlikely(!a)) -+ goto out; -+ -+ root = sb->s_root; -+ err = 0; -+ bindex = 0; -+ opt = opts->opt; -+ opt_tail = opt + opts->max_opt - 1; -+ opt->type = Opt_tail; -+ while (!err && (opt_str = strsep(&str, ",")) && *opt_str) { -+ err = -EINVAL; -+ skipped = 0; -+ token = match_token(opt_str, options, a->args); -+ switch (token) { -+ case Opt_br: -+ err = 0; -+ while (!err && (opt_str = strsep(&a->args[0].from, ":")) -+ && *opt_str) { -+ err = opt_add(opt, opt_str, opts->sb_flags, -+ bindex++); -+ if (unlikely(!err && ++opt > opt_tail)) { -+ err = -E2BIG; -+ break; -+ } -+ opt->type = Opt_tail; -+ skipped = 1; -+ } -+ break; -+ case Opt_add: -+ if (unlikely(match_int(&a->args[0], &n))) { -+ pr_err("bad integer in %s\n", opt_str); -+ break; -+ } -+ bindex = n; -+ err = opt_add(opt, a->args[1].from, opts->sb_flags, -+ bindex); -+ if (!err) -+ opt->type = token; -+ break; -+ case Opt_append: -+ err = opt_add(opt, a->args[0].from, opts->sb_flags, -+ /*dummy bindex*/1); -+ if (!err) -+ opt->type = token; -+ break; -+ case Opt_prepend: -+ err = opt_add(opt, a->args[0].from, opts->sb_flags, -+ /*bindex*/0); -+ if (!err) -+ opt->type = token; -+ break; -+ case Opt_del: -+ err = au_opts_parse_del(&opt->del, a->args); -+ if (!err) -+ opt->type = token; -+ break; -+#if 0 /* reserved for future use */ -+ case Opt_idel: -+ del->pathname = "(indexed)"; -+ if (unlikely(match_int(&args[0], &n))) { -+ pr_err("bad integer in %s\n", opt_str); -+ break; -+ } -+ err = au_opts_parse_idel(sb, n, &opt->del, a->args); -+ if (!err) -+ opt->type = token; -+ break; -+#endif -+ case Opt_mod: -+ err = au_opts_parse_mod(&opt->mod, a->args); -+ if (!err) -+ opt->type = token; -+ break; -+#ifdef IMOD /* reserved for future use */ -+ case Opt_imod: -+ u.mod->path = "(indexed)"; -+ if (unlikely(match_int(&a->args[0], &n))) { -+ pr_err("bad integer in %s\n", opt_str); -+ break; -+ } -+ err = au_opts_parse_imod(sb, n, &opt->mod, a->args); -+ if (!err) -+ opt->type = token; -+ break; -+#endif -+ case Opt_xino: -+ err = au_opts_parse_xino(sb, &opt->xino, a->args); -+ if (!err) -+ opt->type = token; -+ break; -+ -+ case Opt_trunc_xino_path: -+ err = au_opts_parse_xino_itrunc_path -+ (sb, &opt->xino_itrunc, a->args); -+ if (!err) -+ opt->type = token; -+ break; -+ -+ case Opt_itrunc_xino: -+ u.xino_itrunc = &opt->xino_itrunc; -+ if (unlikely(match_int(&a->args[0], &n))) { -+ pr_err("bad integer in %s\n", opt_str); -+ break; -+ } -+ u.xino_itrunc->bindex = n; -+ aufs_read_lock(root, AuLock_FLUSH); -+ if (n < 0 || au_sbend(sb) < n) { -+ pr_err("out of bounds, %d\n", n); -+ aufs_read_unlock(root, !AuLock_IR); -+ break; -+ } -+ aufs_read_unlock(root, !AuLock_IR); -+ err = 0; -+ opt->type = token; -+ break; -+ -+ case Opt_dirwh: -+ if (unlikely(match_int(&a->args[0], &opt->dirwh))) -+ break; -+ err = 0; -+ opt->type = token; -+ break; -+ -+ case Opt_rdcache: -+ if (unlikely(match_int(&a->args[0], &n))) { -+ pr_err("bad integer in %s\n", opt_str); -+ break; -+ } -+ if (unlikely(n > AUFS_RDCACHE_MAX)) { -+ pr_err("rdcache must be smaller than %d\n", -+ AUFS_RDCACHE_MAX); -+ break; -+ } -+ opt->rdcache = n; -+ err = 0; -+ opt->type = token; -+ break; -+ case Opt_rdblk: -+ if (unlikely(match_int(&a->args[0], &n) -+ || n < 0 -+ || n > KMALLOC_MAX_SIZE)) { -+ pr_err("bad integer in %s\n", opt_str); -+ break; -+ } -+ if (unlikely(n && n < NAME_MAX)) { -+ pr_err("rdblk must be larger than %d\n", -+ NAME_MAX); -+ break; -+ } -+ opt->rdblk = n; -+ err = 0; -+ opt->type = token; -+ break; -+ case Opt_rdhash: -+ if (unlikely(match_int(&a->args[0], &n) -+ || n < 0 -+ || n * sizeof(struct hlist_head) -+ > KMALLOC_MAX_SIZE)) { -+ pr_err("bad integer in %s\n", opt_str); -+ break; -+ } -+ opt->rdhash = n; -+ err = 0; -+ opt->type = token; -+ break; -+ -+ case Opt_trunc_xino: -+ case Opt_notrunc_xino: -+ case Opt_noxino: -+ case Opt_trunc_xib: -+ case Opt_notrunc_xib: -+ case Opt_shwh: -+ case Opt_noshwh: -+ case Opt_dirperm1: -+ case Opt_nodirperm1: -+ case Opt_plink: -+ case Opt_noplink: -+ case Opt_list_plink: -+ case Opt_dio: -+ case Opt_nodio: -+ case Opt_diropq_a: -+ case Opt_diropq_w: -+ case Opt_warn_perm: -+ case Opt_nowarn_perm: -+ case Opt_refrof: -+ case Opt_norefrof: -+ case Opt_verbose: -+ case Opt_noverbose: -+ case Opt_sum: -+ case Opt_nosum: -+ case Opt_wsum: -+ case Opt_rdblk_def: -+ case Opt_rdhash_def: -+ case Opt_acl: -+ case Opt_noacl: -+ err = 0; -+ opt->type = token; -+ break; -+ -+ case Opt_udba: -+ opt->udba = udba_val(a->args[0].from); -+ if (opt->udba >= 0) { -+ err = 0; -+ opt->type = token; -+ } else -+ pr_err("wrong value, %s\n", opt_str); -+ break; -+ -+ case Opt_wbr_create: -+ u.create = &opt->wbr_create; -+ u.create->wbr_create -+ = au_wbr_create_val(a->args[0].from, u.create); -+ if (u.create->wbr_create >= 0) { -+ err = 0; -+ opt->type = token; -+ } else -+ pr_err("wrong value, %s\n", opt_str); -+ break; -+ case Opt_wbr_copyup: -+ opt->wbr_copyup = au_wbr_copyup_val(a->args[0].from); -+ if (opt->wbr_copyup >= 0) { -+ err = 0; -+ opt->type = token; -+ } else -+ pr_err("wrong value, %s\n", opt_str); -+ break; -+ -+ case Opt_fhsm_sec: -+ if (unlikely(match_int(&a->args[0], &n) -+ || n < 0)) { -+ pr_err("bad integer in %s\n", opt_str); -+ break; -+ } -+ if (sysaufs_brs) { -+ opt->fhsm_second = n; -+ opt->type = token; -+ } else -+ pr_warn("ignored %s\n", opt_str); -+ err = 0; -+ break; -+ -+ case Opt_ignore: -+ pr_warn("ignored %s\n", opt_str); -+ /*FALLTHROUGH*/ -+ case Opt_ignore_silent: -+ skipped = 1; -+ err = 0; -+ break; -+ case Opt_err: -+ pr_err("unknown option %s\n", opt_str); -+ break; -+ } -+ -+ if (!err && !skipped) { -+ if (unlikely(++opt > opt_tail)) { -+ err = -E2BIG; -+ opt--; -+ opt->type = Opt_tail; -+ break; -+ } -+ opt->type = Opt_tail; -+ } -+ } -+ -+ kfree(a); -+ dump_opts(opts); -+ if (unlikely(err)) -+ au_opts_free(opts); -+ -+out: -+ return err; -+} -+ -+static int au_opt_wbr_create(struct super_block *sb, -+ struct au_opt_wbr_create *create) -+{ -+ int err; -+ struct au_sbinfo *sbinfo; -+ -+ SiMustWriteLock(sb); -+ -+ err = 1; /* handled */ -+ sbinfo = au_sbi(sb); -+ if (sbinfo->si_wbr_create_ops->fin) { -+ err = sbinfo->si_wbr_create_ops->fin(sb); -+ if (!err) -+ err = 1; -+ } -+ -+ sbinfo->si_wbr_create = create->wbr_create; -+ sbinfo->si_wbr_create_ops = au_wbr_create_ops + create->wbr_create; -+ switch (create->wbr_create) { -+ case AuWbrCreate_MFSRRV: -+ case AuWbrCreate_MFSRR: -+ case AuWbrCreate_PMFSRR: -+ case AuWbrCreate_PMFSRRV: -+ sbinfo->si_wbr_mfs.mfsrr_watermark = create->mfsrr_watermark; -+ /*FALLTHROUGH*/ -+ case AuWbrCreate_MFS: -+ case AuWbrCreate_MFSV: -+ case AuWbrCreate_PMFS: -+ case AuWbrCreate_PMFSV: -+ sbinfo->si_wbr_mfs.mfs_expire -+ = msecs_to_jiffies(create->mfs_second * MSEC_PER_SEC); -+ break; -+ } -+ -+ if (sbinfo->si_wbr_create_ops->init) -+ sbinfo->si_wbr_create_ops->init(sb); /* ignore */ -+ -+ return err; -+} -+ -+/* -+ * returns, -+ * plus: processed without an error -+ * zero: unprocessed -+ */ -+static int au_opt_simple(struct super_block *sb, struct au_opt *opt, -+ struct au_opts *opts) -+{ -+ int err; -+ struct au_sbinfo *sbinfo; -+ -+ SiMustWriteLock(sb); -+ -+ err = 1; /* handled */ -+ sbinfo = au_sbi(sb); -+ switch (opt->type) { -+ case Opt_udba: -+ sbinfo->si_mntflags &= ~AuOptMask_UDBA; -+ sbinfo->si_mntflags |= opt->udba; -+ opts->given_udba |= opt->udba; -+ break; -+ -+ case Opt_plink: -+ au_opt_set(sbinfo->si_mntflags, PLINK); -+ break; -+ case Opt_noplink: -+ if (au_opt_test(sbinfo->si_mntflags, PLINK)) -+ au_plink_put(sb, /*verbose*/1); -+ au_opt_clr(sbinfo->si_mntflags, PLINK); -+ break; -+ case Opt_list_plink: -+ if (au_opt_test(sbinfo->si_mntflags, PLINK)) -+ au_plink_list(sb); -+ break; -+ -+ case Opt_dio: -+ au_opt_set(sbinfo->si_mntflags, DIO); -+ au_fset_opts(opts->flags, REFRESH_DYAOP); -+ break; -+ case Opt_nodio: -+ au_opt_clr(sbinfo->si_mntflags, DIO); -+ au_fset_opts(opts->flags, REFRESH_DYAOP); -+ break; -+ -+ case Opt_fhsm_sec: -+ au_fhsm_set(sbinfo, opt->fhsm_second); -+ break; -+ -+ case Opt_diropq_a: -+ au_opt_set(sbinfo->si_mntflags, ALWAYS_DIROPQ); -+ break; -+ case Opt_diropq_w: -+ au_opt_clr(sbinfo->si_mntflags, ALWAYS_DIROPQ); -+ break; -+ -+ case Opt_warn_perm: -+ au_opt_set(sbinfo->si_mntflags, WARN_PERM); -+ break; -+ case Opt_nowarn_perm: -+ au_opt_clr(sbinfo->si_mntflags, WARN_PERM); -+ break; -+ -+ case Opt_refrof: -+ au_opt_set(sbinfo->si_mntflags, REFROF); -+ break; -+ case Opt_norefrof: -+ au_opt_clr(sbinfo->si_mntflags, REFROF); -+ break; -+ -+ case Opt_verbose: -+ au_opt_set(sbinfo->si_mntflags, VERBOSE); -+ break; -+ case Opt_noverbose: -+ au_opt_clr(sbinfo->si_mntflags, VERBOSE); -+ break; -+ -+ case Opt_sum: -+ au_opt_set(sbinfo->si_mntflags, SUM); -+ break; -+ case Opt_wsum: -+ au_opt_clr(sbinfo->si_mntflags, SUM); -+ au_opt_set(sbinfo->si_mntflags, SUM_W); -+ case Opt_nosum: -+ au_opt_clr(sbinfo->si_mntflags, SUM); -+ au_opt_clr(sbinfo->si_mntflags, SUM_W); -+ break; -+ -+ case Opt_wbr_create: -+ err = au_opt_wbr_create(sb, &opt->wbr_create); -+ break; -+ case Opt_wbr_copyup: -+ sbinfo->si_wbr_copyup = opt->wbr_copyup; -+ sbinfo->si_wbr_copyup_ops = au_wbr_copyup_ops + opt->wbr_copyup; -+ break; -+ -+ case Opt_dirwh: -+ sbinfo->si_dirwh = opt->dirwh; -+ break; -+ -+ case Opt_rdcache: -+ sbinfo->si_rdcache -+ = msecs_to_jiffies(opt->rdcache * MSEC_PER_SEC); -+ break; -+ case Opt_rdblk: -+ sbinfo->si_rdblk = opt->rdblk; -+ break; -+ case Opt_rdblk_def: -+ sbinfo->si_rdblk = AUFS_RDBLK_DEF; -+ break; -+ case Opt_rdhash: -+ sbinfo->si_rdhash = opt->rdhash; -+ break; -+ case Opt_rdhash_def: -+ sbinfo->si_rdhash = AUFS_RDHASH_DEF; -+ break; -+ -+ case Opt_shwh: -+ au_opt_set(sbinfo->si_mntflags, SHWH); -+ break; -+ case Opt_noshwh: -+ au_opt_clr(sbinfo->si_mntflags, SHWH); -+ break; -+ -+ case Opt_dirperm1: -+ au_opt_set(sbinfo->si_mntflags, DIRPERM1); -+ break; -+ case Opt_nodirperm1: -+ au_opt_clr(sbinfo->si_mntflags, DIRPERM1); -+ break; -+ -+ case Opt_trunc_xino: -+ au_opt_set(sbinfo->si_mntflags, TRUNC_XINO); -+ break; -+ case Opt_notrunc_xino: -+ au_opt_clr(sbinfo->si_mntflags, TRUNC_XINO); -+ break; -+ -+ case Opt_trunc_xino_path: -+ case Opt_itrunc_xino: -+ err = au_xino_trunc(sb, opt->xino_itrunc.bindex); -+ if (!err) -+ err = 1; -+ break; -+ -+ case Opt_trunc_xib: -+ au_fset_opts(opts->flags, TRUNC_XIB); -+ break; -+ case Opt_notrunc_xib: -+ au_fclr_opts(opts->flags, TRUNC_XIB); -+ break; -+ -+ case Opt_acl: -+ sb->s_flags |= MS_POSIXACL; -+ break; -+ case Opt_noacl: -+ sb->s_flags &= ~MS_POSIXACL; -+ break; -+ -+ default: -+ err = 0; -+ break; -+ } -+ -+ return err; -+} -+ -+/* -+ * returns tri-state. -+ * plus: processed without an error -+ * zero: unprocessed -+ * minus: error -+ */ -+static int au_opt_br(struct super_block *sb, struct au_opt *opt, -+ struct au_opts *opts) -+{ -+ int err, do_refresh; -+ -+ err = 0; -+ switch (opt->type) { -+ case Opt_append: -+ opt->add.bindex = au_sbend(sb) + 1; -+ if (opt->add.bindex < 0) -+ opt->add.bindex = 0; -+ goto add; -+ case Opt_prepend: -+ opt->add.bindex = 0; -+ add: /* indented label */ -+ case Opt_add: -+ err = au_br_add(sb, &opt->add, -+ au_ftest_opts(opts->flags, REMOUNT)); -+ if (!err) { -+ err = 1; -+ au_fset_opts(opts->flags, REFRESH); -+ } -+ break; -+ -+ case Opt_del: -+ case Opt_idel: -+ err = au_br_del(sb, &opt->del, -+ au_ftest_opts(opts->flags, REMOUNT)); -+ if (!err) { -+ err = 1; -+ au_fset_opts(opts->flags, TRUNC_XIB); -+ au_fset_opts(opts->flags, REFRESH); -+ } -+ break; -+ -+ case Opt_mod: -+ case Opt_imod: -+ err = au_br_mod(sb, &opt->mod, -+ au_ftest_opts(opts->flags, REMOUNT), -+ &do_refresh); -+ if (!err) { -+ err = 1; -+ if (do_refresh) -+ au_fset_opts(opts->flags, REFRESH); -+ } -+ break; -+ } -+ -+ return err; -+} -+ -+static int au_opt_xino(struct super_block *sb, struct au_opt *opt, -+ struct au_opt_xino **opt_xino, -+ struct au_opts *opts) -+{ -+ int err; -+ aufs_bindex_t bend, bindex; -+ struct dentry *root, *parent, *h_root; -+ -+ err = 0; -+ switch (opt->type) { -+ case Opt_xino: -+ err = au_xino_set(sb, &opt->xino, -+ !!au_ftest_opts(opts->flags, REMOUNT)); -+ if (unlikely(err)) -+ break; -+ -+ *opt_xino = &opt->xino; -+ au_xino_brid_set(sb, -1); -+ -+ /* safe d_parent access */ -+ parent = opt->xino.file->f_dentry->d_parent; -+ root = sb->s_root; -+ bend = au_sbend(sb); -+ for (bindex = 0; bindex <= bend; bindex++) { -+ h_root = au_h_dptr(root, bindex); -+ if (h_root == parent) { -+ au_xino_brid_set(sb, au_sbr_id(sb, bindex)); -+ break; -+ } -+ } -+ break; -+ -+ case Opt_noxino: -+ au_xino_clr(sb); -+ au_xino_brid_set(sb, -1); -+ *opt_xino = (void *)-1; -+ break; -+ } -+ -+ return err; -+} -+ -+int au_opts_verify(struct super_block *sb, unsigned long sb_flags, -+ unsigned int pending) -+{ -+ int err, fhsm; -+ aufs_bindex_t bindex, bend; -+ unsigned char do_plink, skip, do_free, can_no_dreval; -+ struct au_branch *br; -+ struct au_wbr *wbr; -+ struct dentry *root, *dentry; -+ struct inode *dir, *h_dir; -+ struct au_sbinfo *sbinfo; -+ struct au_hinode *hdir; -+ -+ SiMustAnyLock(sb); -+ -+ sbinfo = au_sbi(sb); -+ AuDebugOn(!(sbinfo->si_mntflags & AuOptMask_UDBA)); -+ -+ if (!(sb_flags & MS_RDONLY)) { -+ if (unlikely(!au_br_writable(au_sbr_perm(sb, 0)))) -+ pr_warn("first branch should be rw\n"); -+ if (unlikely(au_opt_test(sbinfo->si_mntflags, SHWH))) -+ pr_warn_once("shwh should be used with ro\n"); -+ } -+ -+ if (au_opt_test((sbinfo->si_mntflags | pending), UDBA_HNOTIFY) -+ && !au_opt_test(sbinfo->si_mntflags, XINO)) -+ pr_warn_once("udba=*notify requires xino\n"); -+ -+ if (au_opt_test(sbinfo->si_mntflags, DIRPERM1)) -+ pr_warn_once("dirperm1 breaks the protection" -+ " by the permission bits on the lower branch\n"); -+ -+ err = 0; -+ fhsm = 0; -+ root = sb->s_root; -+ dir = root->d_inode; -+ do_plink = !!au_opt_test(sbinfo->si_mntflags, PLINK); -+ can_no_dreval = !!au_opt_test((sbinfo->si_mntflags | pending), -+ UDBA_NONE); -+ bend = au_sbend(sb); -+ for (bindex = 0; !err && bindex <= bend; bindex++) { -+ skip = 0; -+ h_dir = au_h_iptr(dir, bindex); -+ br = au_sbr(sb, bindex); -+ -+ if ((br->br_perm & AuBrAttr_ICEX) -+ && !h_dir->i_op->listxattr) -+ br->br_perm &= ~AuBrAttr_ICEX; -+#if 0 -+ if ((br->br_perm & AuBrAttr_ICEX_SEC) -+ && (au_br_sb(br)->s_flags & MS_NOSEC)) -+ br->br_perm &= ~AuBrAttr_ICEX_SEC; -+#endif -+ -+ do_free = 0; -+ wbr = br->br_wbr; -+ if (wbr) -+ wbr_wh_read_lock(wbr); -+ -+ if (!au_br_writable(br->br_perm)) { -+ do_free = !!wbr; -+ skip = (!wbr -+ || (!wbr->wbr_whbase -+ && !wbr->wbr_plink -+ && !wbr->wbr_orph)); -+ } else if (!au_br_wh_linkable(br->br_perm)) { -+ /* skip = (!br->br_whbase && !br->br_orph); */ -+ skip = (!wbr || !wbr->wbr_whbase); -+ if (skip && wbr) { -+ if (do_plink) -+ skip = !!wbr->wbr_plink; -+ else -+ skip = !wbr->wbr_plink; -+ } -+ } else { -+ /* skip = (br->br_whbase && br->br_ohph); */ -+ skip = (wbr && wbr->wbr_whbase); -+ if (skip) { -+ if (do_plink) -+ skip = !!wbr->wbr_plink; -+ else -+ skip = !wbr->wbr_plink; -+ } -+ } -+ if (wbr) -+ wbr_wh_read_unlock(wbr); -+ -+ if (can_no_dreval) { -+ dentry = br->br_path.dentry; -+ spin_lock(&dentry->d_lock); -+ if (dentry->d_flags & -+ (DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE)) -+ can_no_dreval = 0; -+ spin_unlock(&dentry->d_lock); -+ } -+ -+ if (au_br_fhsm(br->br_perm)) { -+ fhsm++; -+ AuDebugOn(!br->br_fhsm); -+ } -+ -+ if (skip) -+ continue; -+ -+ hdir = au_hi(dir, bindex); -+ au_hn_imtx_lock_nested(hdir, AuLsc_I_PARENT); -+ if (wbr) -+ wbr_wh_write_lock(wbr); -+ err = au_wh_init(br, sb); -+ if (wbr) -+ wbr_wh_write_unlock(wbr); -+ au_hn_imtx_unlock(hdir); -+ -+ if (!err && do_free) { -+ kfree(wbr); -+ br->br_wbr = NULL; -+ } -+ } -+ -+ if (can_no_dreval) -+ au_fset_si(sbinfo, NO_DREVAL); -+ else -+ au_fclr_si(sbinfo, NO_DREVAL); -+ -+ if (fhsm >= 2) { -+ au_fset_si(sbinfo, FHSM); -+ for (bindex = bend; bindex >= 0; bindex--) { -+ br = au_sbr(sb, bindex); -+ if (au_br_fhsm(br->br_perm)) { -+ au_fhsm_set_bottom(sb, bindex); -+ break; -+ } -+ } -+ } else { -+ au_fclr_si(sbinfo, FHSM); -+ au_fhsm_set_bottom(sb, -1); -+ } -+ -+ return err; -+} -+ -+int au_opts_mount(struct super_block *sb, struct au_opts *opts) -+{ -+ int err; -+ unsigned int tmp; -+ aufs_bindex_t bindex, bend; -+ struct au_opt *opt; -+ struct au_opt_xino *opt_xino, xino; -+ struct au_sbinfo *sbinfo; -+ struct au_branch *br; -+ struct inode *dir; -+ -+ SiMustWriteLock(sb); -+ -+ err = 0; -+ opt_xino = NULL; -+ opt = opts->opt; -+ while (err >= 0 && opt->type != Opt_tail) -+ err = au_opt_simple(sb, opt++, opts); -+ if (err > 0) -+ err = 0; -+ else if (unlikely(err < 0)) -+ goto out; -+ -+ /* disable xino and udba temporary */ -+ sbinfo = au_sbi(sb); -+ tmp = sbinfo->si_mntflags; -+ au_opt_clr(sbinfo->si_mntflags, XINO); -+ au_opt_set_udba(sbinfo->si_mntflags, UDBA_REVAL); -+ -+ opt = opts->opt; -+ while (err >= 0 && opt->type != Opt_tail) -+ err = au_opt_br(sb, opt++, opts); -+ if (err > 0) -+ err = 0; -+ else if (unlikely(err < 0)) -+ goto out; -+ -+ bend = au_sbend(sb); -+ if (unlikely(bend < 0)) { -+ err = -EINVAL; -+ pr_err("no branches\n"); -+ goto out; -+ } -+ -+ if (au_opt_test(tmp, XINO)) -+ au_opt_set(sbinfo->si_mntflags, XINO); -+ opt = opts->opt; -+ while (!err && opt->type != Opt_tail) -+ err = au_opt_xino(sb, opt++, &opt_xino, opts); -+ if (unlikely(err)) -+ goto out; -+ -+ err = au_opts_verify(sb, sb->s_flags, tmp); -+ if (unlikely(err)) -+ goto out; -+ -+ /* restore xino */ -+ if (au_opt_test(tmp, XINO) && !opt_xino) { -+ xino.file = au_xino_def(sb); -+ err = PTR_ERR(xino.file); -+ if (IS_ERR(xino.file)) -+ goto out; -+ -+ err = au_xino_set(sb, &xino, /*remount*/0); -+ fput(xino.file); -+ if (unlikely(err)) -+ goto out; -+ } -+ -+ /* restore udba */ -+ tmp &= AuOptMask_UDBA; -+ sbinfo->si_mntflags &= ~AuOptMask_UDBA; -+ sbinfo->si_mntflags |= tmp; -+ bend = au_sbend(sb); -+ for (bindex = 0; bindex <= bend; bindex++) { -+ br = au_sbr(sb, bindex); -+ err = au_hnotify_reset_br(tmp, br, br->br_perm); -+ if (unlikely(err)) -+ AuIOErr("hnotify failed on br %d, %d, ignored\n", -+ bindex, err); -+ /* go on even if err */ -+ } -+ if (au_opt_test(tmp, UDBA_HNOTIFY)) { -+ dir = sb->s_root->d_inode; -+ au_hn_reset(dir, au_hi_flags(dir, /*isdir*/1) & ~AuHi_XINO); -+ } -+ -+out: -+ return err; -+} -+ -+int au_opts_remount(struct super_block *sb, struct au_opts *opts) -+{ -+ int err, rerr; -+ unsigned char no_dreval; -+ struct inode *dir; -+ struct au_opt_xino *opt_xino; -+ struct au_opt *opt; -+ struct au_sbinfo *sbinfo; -+ -+ SiMustWriteLock(sb); -+ -+ err = 0; -+ dir = sb->s_root->d_inode; -+ sbinfo = au_sbi(sb); -+ opt_xino = NULL; -+ opt = opts->opt; -+ while (err >= 0 && opt->type != Opt_tail) { -+ err = au_opt_simple(sb, opt, opts); -+ if (!err) -+ err = au_opt_br(sb, opt, opts); -+ if (!err) -+ err = au_opt_xino(sb, opt, &opt_xino, opts); -+ opt++; -+ } -+ if (err > 0) -+ err = 0; -+ AuTraceErr(err); -+ /* go on even err */ -+ -+ no_dreval = !!au_ftest_si(sbinfo, NO_DREVAL); -+ rerr = au_opts_verify(sb, opts->sb_flags, /*pending*/0); -+ if (unlikely(rerr && !err)) -+ err = rerr; -+ -+ if (no_dreval != !!au_ftest_si(sbinfo, NO_DREVAL)) -+ au_fset_opts(opts->flags, REFRESH_IDOP); -+ -+ if (au_ftest_opts(opts->flags, TRUNC_XIB)) { -+ rerr = au_xib_trunc(sb); -+ if (unlikely(rerr && !err)) -+ err = rerr; -+ } -+ -+ /* will be handled by the caller */ -+ if (!au_ftest_opts(opts->flags, REFRESH) -+ && (opts->given_udba -+ || au_opt_test(sbinfo->si_mntflags, XINO) -+ || au_ftest_opts(opts->flags, REFRESH_IDOP) -+ )) -+ au_fset_opts(opts->flags, REFRESH); -+ -+ AuDbg("status 0x%x\n", opts->flags); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+unsigned int au_opt_udba(struct super_block *sb) -+{ -+ return au_mntflags(sb) & AuOptMask_UDBA; -+} -diff --git a/fs/aufs/opts.h b/fs/aufs/opts.h -new file mode 100644 -index 0000000..50949a0 ---- /dev/null -+++ b/fs/aufs/opts.h -@@ -0,0 +1,212 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * mount options/flags -+ */ -+ -+#ifndef __AUFS_OPTS_H__ -+#define __AUFS_OPTS_H__ -+ -+#ifdef __KERNEL__ -+ -+#include -+ -+struct file; -+struct super_block; -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* mount flags */ -+#define AuOpt_XINO 1 /* external inode number bitmap -+ and translation table */ -+#define AuOpt_TRUNC_XINO (1 << 1) /* truncate xino files */ -+#define AuOpt_UDBA_NONE (1 << 2) /* users direct branch access */ -+#define AuOpt_UDBA_REVAL (1 << 3) -+#define AuOpt_UDBA_HNOTIFY (1 << 4) -+#define AuOpt_SHWH (1 << 5) /* show whiteout */ -+#define AuOpt_PLINK (1 << 6) /* pseudo-link */ -+#define AuOpt_DIRPERM1 (1 << 7) /* ignore the lower dir's perm -+ bits */ -+#define AuOpt_REFROF (1 << 8) /* unimplemented */ -+#define AuOpt_ALWAYS_DIROPQ (1 << 9) /* policy to creating diropq */ -+#define AuOpt_SUM (1 << 10) /* summation for statfs(2) */ -+#define AuOpt_SUM_W (1 << 11) /* unimplemented */ -+#define AuOpt_WARN_PERM (1 << 12) /* warn when add-branch */ -+#define AuOpt_VERBOSE (1 << 13) /* busy inode when del-branch */ -+#define AuOpt_DIO (1 << 14) /* direct io */ -+ -+#ifndef CONFIG_AUFS_HNOTIFY -+#undef AuOpt_UDBA_HNOTIFY -+#define AuOpt_UDBA_HNOTIFY 0 -+#endif -+#ifndef CONFIG_AUFS_SHWH -+#undef AuOpt_SHWH -+#define AuOpt_SHWH 0 -+#endif -+ -+#define AuOpt_Def (AuOpt_XINO \ -+ | AuOpt_UDBA_REVAL \ -+ | AuOpt_PLINK \ -+ /* | AuOpt_DIRPERM1 */ \ -+ | AuOpt_WARN_PERM) -+#define AuOptMask_UDBA (AuOpt_UDBA_NONE \ -+ | AuOpt_UDBA_REVAL \ -+ | AuOpt_UDBA_HNOTIFY) -+ -+#define au_opt_test(flags, name) (flags & AuOpt_##name) -+#define au_opt_set(flags, name) do { \ -+ BUILD_BUG_ON(AuOpt_##name & AuOptMask_UDBA); \ -+ ((flags) |= AuOpt_##name); \ -+} while (0) -+#define au_opt_set_udba(flags, name) do { \ -+ (flags) &= ~AuOptMask_UDBA; \ -+ ((flags) |= AuOpt_##name); \ -+} while (0) -+#define au_opt_clr(flags, name) do { \ -+ ((flags) &= ~AuOpt_##name); \ -+} while (0) -+ -+static inline unsigned int au_opts_plink(unsigned int mntflags) -+{ -+#ifdef CONFIG_PROC_FS -+ return mntflags; -+#else -+ return mntflags & ~AuOpt_PLINK; -+#endif -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* policies to select one among multiple writable branches */ -+enum { -+ AuWbrCreate_TDP, /* top down parent */ -+ AuWbrCreate_RR, /* round robin */ -+ AuWbrCreate_MFS, /* most free space */ -+ AuWbrCreate_MFSV, /* mfs with seconds */ -+ AuWbrCreate_MFSRR, /* mfs then rr */ -+ AuWbrCreate_MFSRRV, /* mfs then rr with seconds */ -+ AuWbrCreate_PMFS, /* parent and mfs */ -+ AuWbrCreate_PMFSV, /* parent and mfs with seconds */ -+ AuWbrCreate_PMFSRR, /* parent, mfs and round-robin */ -+ AuWbrCreate_PMFSRRV, /* plus seconds */ -+ -+ AuWbrCreate_Def = AuWbrCreate_TDP -+}; -+ -+enum { -+ AuWbrCopyup_TDP, /* top down parent */ -+ AuWbrCopyup_BUP, /* bottom up parent */ -+ AuWbrCopyup_BU, /* bottom up */ -+ -+ AuWbrCopyup_Def = AuWbrCopyup_TDP -+}; -+ -+/* ---------------------------------------------------------------------- */ -+ -+struct au_opt_add { -+ aufs_bindex_t bindex; -+ char *pathname; -+ int perm; -+ struct path path; -+}; -+ -+struct au_opt_del { -+ char *pathname; -+ struct path h_path; -+}; -+ -+struct au_opt_mod { -+ char *path; -+ int perm; -+ struct dentry *h_root; -+}; -+ -+struct au_opt_xino { -+ char *path; -+ struct file *file; -+}; -+ -+struct au_opt_xino_itrunc { -+ aufs_bindex_t bindex; -+}; -+ -+struct au_opt_wbr_create { -+ int wbr_create; -+ int mfs_second; -+ unsigned long long mfsrr_watermark; -+}; -+ -+struct au_opt { -+ int type; -+ union { -+ struct au_opt_xino xino; -+ struct au_opt_xino_itrunc xino_itrunc; -+ struct au_opt_add add; -+ struct au_opt_del del; -+ struct au_opt_mod mod; -+ int dirwh; -+ int rdcache; -+ unsigned int rdblk; -+ unsigned int rdhash; -+ int udba; -+ struct au_opt_wbr_create wbr_create; -+ int wbr_copyup; -+ unsigned int fhsm_second; -+ }; -+}; -+ -+/* opts flags */ -+#define AuOpts_REMOUNT 1 -+#define AuOpts_REFRESH (1 << 1) -+#define AuOpts_TRUNC_XIB (1 << 2) -+#define AuOpts_REFRESH_DYAOP (1 << 3) -+#define AuOpts_REFRESH_IDOP (1 << 4) -+#define au_ftest_opts(flags, name) ((flags) & AuOpts_##name) -+#define au_fset_opts(flags, name) \ -+ do { (flags) |= AuOpts_##name; } while (0) -+#define au_fclr_opts(flags, name) \ -+ do { (flags) &= ~AuOpts_##name; } while (0) -+ -+struct au_opts { -+ struct au_opt *opt; -+ int max_opt; -+ -+ unsigned int given_udba; -+ unsigned int flags; -+ unsigned long sb_flags; -+}; -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* opts.c */ -+void au_optstr_br_perm(au_br_perm_str_t *str, int perm); -+const char *au_optstr_udba(int udba); -+const char *au_optstr_wbr_copyup(int wbr_copyup); -+const char *au_optstr_wbr_create(int wbr_create); -+ -+void au_opts_free(struct au_opts *opts); -+int au_opts_parse(struct super_block *sb, char *str, struct au_opts *opts); -+int au_opts_verify(struct super_block *sb, unsigned long sb_flags, -+ unsigned int pending); -+int au_opts_mount(struct super_block *sb, struct au_opts *opts); -+int au_opts_remount(struct super_block *sb, struct au_opts *opts); -+ -+unsigned int au_opt_udba(struct super_block *sb); -+ -+#endif /* __KERNEL__ */ -+#endif /* __AUFS_OPTS_H__ */ -diff --git a/fs/aufs/plink.c b/fs/aufs/plink.c -new file mode 100644 -index 0000000..4f372ec ---- /dev/null -+++ b/fs/aufs/plink.c -@@ -0,0 +1,506 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * pseudo-link -+ */ -+ -+#include "aufs.h" -+ -+/* -+ * the pseudo-link maintenance mode. -+ * during a user process maintains the pseudo-links, -+ * prohibit adding a new plink and branch manipulation. -+ * -+ * Flags -+ * NOPLM: -+ * For entry functions which will handle plink, and i_mutex is already held -+ * in VFS. -+ * They cannot wait and should return an error at once. -+ * Callers has to check the error. -+ * NOPLMW: -+ * For entry functions which will handle plink, but i_mutex is not held -+ * in VFS. -+ * They can wait the plink maintenance mode to finish. -+ * -+ * They behave like F_SETLK and F_SETLKW. -+ * If the caller never handle plink, then both flags are unnecessary. -+ */ -+ -+int au_plink_maint(struct super_block *sb, int flags) -+{ -+ int err; -+ pid_t pid, ppid; -+ struct au_sbinfo *sbi; -+ -+ SiMustAnyLock(sb); -+ -+ err = 0; -+ if (!au_opt_test(au_mntflags(sb), PLINK)) -+ goto out; -+ -+ sbi = au_sbi(sb); -+ pid = sbi->si_plink_maint_pid; -+ if (!pid || pid == current->pid) -+ goto out; -+ -+ /* todo: it highly depends upon /sbin/mount.aufs */ -+ rcu_read_lock(); -+ ppid = task_pid_vnr(rcu_dereference(current->real_parent)); -+ rcu_read_unlock(); -+ if (pid == ppid) -+ goto out; -+ -+ if (au_ftest_lock(flags, NOPLMW)) { -+ /* if there is no i_mutex lock in VFS, we don't need to wait */ -+ /* AuDebugOn(!lockdep_depth(current)); */ -+ while (sbi->si_plink_maint_pid) { -+ si_read_unlock(sb); -+ /* gave up wake_up_bit() */ -+ wait_event(sbi->si_plink_wq, !sbi->si_plink_maint_pid); -+ -+ if (au_ftest_lock(flags, FLUSH)) -+ au_nwt_flush(&sbi->si_nowait); -+ si_noflush_read_lock(sb); -+ } -+ } else if (au_ftest_lock(flags, NOPLM)) { -+ AuDbg("ppid %d, pid %d\n", ppid, pid); -+ err = -EAGAIN; -+ } -+ -+out: -+ return err; -+} -+ -+void au_plink_maint_leave(struct au_sbinfo *sbinfo) -+{ -+ spin_lock(&sbinfo->si_plink_maint_lock); -+ sbinfo->si_plink_maint_pid = 0; -+ spin_unlock(&sbinfo->si_plink_maint_lock); -+ wake_up_all(&sbinfo->si_plink_wq); -+} -+ -+int au_plink_maint_enter(struct super_block *sb) -+{ -+ int err; -+ struct au_sbinfo *sbinfo; -+ -+ err = 0; -+ sbinfo = au_sbi(sb); -+ /* make sure i am the only one in this fs */ -+ si_write_lock(sb, AuLock_FLUSH); -+ if (au_opt_test(au_mntflags(sb), PLINK)) { -+ spin_lock(&sbinfo->si_plink_maint_lock); -+ if (!sbinfo->si_plink_maint_pid) -+ sbinfo->si_plink_maint_pid = current->pid; -+ else -+ err = -EBUSY; -+ spin_unlock(&sbinfo->si_plink_maint_lock); -+ } -+ si_write_unlock(sb); -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+#ifdef CONFIG_AUFS_DEBUG -+void au_plink_list(struct super_block *sb) -+{ -+ int i; -+ struct au_sbinfo *sbinfo; -+ struct hlist_head *plink_hlist; -+ struct au_icntnr *icntnr; -+ -+ SiMustAnyLock(sb); -+ -+ sbinfo = au_sbi(sb); -+ AuDebugOn(!au_opt_test(au_mntflags(sb), PLINK)); -+ AuDebugOn(au_plink_maint(sb, AuLock_NOPLM)); -+ -+ for (i = 0; i < AuPlink_NHASH; i++) { -+ plink_hlist = &sbinfo->si_plink[i].head; -+ rcu_read_lock(); -+ hlist_for_each_entry_rcu(icntnr, plink_hlist, plink) -+ AuDbg("%lu\n", icntnr->vfs_inode.i_ino); -+ rcu_read_unlock(); -+ } -+} -+#endif -+ -+/* is the inode pseudo-linked? */ -+int au_plink_test(struct inode *inode) -+{ -+ int found, i; -+ struct au_sbinfo *sbinfo; -+ struct hlist_head *plink_hlist; -+ struct au_icntnr *icntnr; -+ -+ sbinfo = au_sbi(inode->i_sb); -+ AuRwMustAnyLock(&sbinfo->si_rwsem); -+ AuDebugOn(!au_opt_test(au_mntflags(inode->i_sb), PLINK)); -+ AuDebugOn(au_plink_maint(inode->i_sb, AuLock_NOPLM)); -+ -+ found = 0; -+ i = au_plink_hash(inode->i_ino); -+ plink_hlist = &sbinfo->si_plink[i].head; -+ rcu_read_lock(); -+ hlist_for_each_entry_rcu(icntnr, plink_hlist, plink) -+ if (&icntnr->vfs_inode == inode) { -+ found = 1; -+ break; -+ } -+ rcu_read_unlock(); -+ return found; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * generate a name for plink. -+ * the file will be stored under AUFS_WH_PLINKDIR. -+ */ -+/* 20 is max digits length of ulong 64 */ -+#define PLINK_NAME_LEN ((20 + 1) * 2) -+ -+static int plink_name(char *name, int len, struct inode *inode, -+ aufs_bindex_t bindex) -+{ -+ int rlen; -+ struct inode *h_inode; -+ -+ h_inode = au_h_iptr(inode, bindex); -+ rlen = snprintf(name, len, "%lu.%lu", inode->i_ino, h_inode->i_ino); -+ return rlen; -+} -+ -+struct au_do_plink_lkup_args { -+ struct dentry **errp; -+ struct qstr *tgtname; -+ struct dentry *h_parent; -+ struct au_branch *br; -+}; -+ -+static struct dentry *au_do_plink_lkup(struct qstr *tgtname, -+ struct dentry *h_parent, -+ struct au_branch *br) -+{ -+ struct dentry *h_dentry; -+ struct mutex *h_mtx; -+ -+ h_mtx = &h_parent->d_inode->i_mutex; -+ mutex_lock_nested(h_mtx, AuLsc_I_CHILD2); -+ h_dentry = vfsub_lkup_one(tgtname, h_parent); -+ mutex_unlock(h_mtx); -+ return h_dentry; -+} -+ -+static void au_call_do_plink_lkup(void *args) -+{ -+ struct au_do_plink_lkup_args *a = args; -+ *a->errp = au_do_plink_lkup(a->tgtname, a->h_parent, a->br); -+} -+ -+/* lookup the plink-ed @inode under the branch at @bindex */ -+struct dentry *au_plink_lkup(struct inode *inode, aufs_bindex_t bindex) -+{ -+ struct dentry *h_dentry, *h_parent; -+ struct au_branch *br; -+ struct inode *h_dir; -+ int wkq_err; -+ char a[PLINK_NAME_LEN]; -+ struct qstr tgtname = QSTR_INIT(a, 0); -+ -+ AuDebugOn(au_plink_maint(inode->i_sb, AuLock_NOPLM)); -+ -+ br = au_sbr(inode->i_sb, bindex); -+ h_parent = br->br_wbr->wbr_plink; -+ h_dir = h_parent->d_inode; -+ tgtname.len = plink_name(a, sizeof(a), inode, bindex); -+ -+ if (!uid_eq(current_fsuid(), GLOBAL_ROOT_UID)) { -+ struct au_do_plink_lkup_args args = { -+ .errp = &h_dentry, -+ .tgtname = &tgtname, -+ .h_parent = h_parent, -+ .br = br -+ }; -+ -+ wkq_err = au_wkq_wait(au_call_do_plink_lkup, &args); -+ if (unlikely(wkq_err)) -+ h_dentry = ERR_PTR(wkq_err); -+ } else -+ h_dentry = au_do_plink_lkup(&tgtname, h_parent, br); -+ -+ return h_dentry; -+} -+ -+/* create a pseudo-link */ -+static int do_whplink(struct qstr *tgt, struct dentry *h_parent, -+ struct dentry *h_dentry, struct au_branch *br) -+{ -+ int err; -+ struct path h_path = { -+ .mnt = au_br_mnt(br) -+ }; -+ struct inode *h_dir, *delegated; -+ -+ h_dir = h_parent->d_inode; -+ mutex_lock_nested(&h_dir->i_mutex, AuLsc_I_CHILD2); -+again: -+ h_path.dentry = vfsub_lkup_one(tgt, h_parent); -+ err = PTR_ERR(h_path.dentry); -+ if (IS_ERR(h_path.dentry)) -+ goto out; -+ -+ err = 0; -+ /* wh.plink dir is not monitored */ -+ /* todo: is it really safe? */ -+ if (h_path.dentry->d_inode -+ && h_path.dentry->d_inode != h_dentry->d_inode) { -+ delegated = NULL; -+ err = vfsub_unlink(h_dir, &h_path, &delegated, /*force*/0); -+ if (unlikely(err == -EWOULDBLOCK)) { -+ pr_warn("cannot retry for NFSv4 delegation" -+ " for an internal unlink\n"); -+ iput(delegated); -+ } -+ dput(h_path.dentry); -+ h_path.dentry = NULL; -+ if (!err) -+ goto again; -+ } -+ if (!err && !h_path.dentry->d_inode) { -+ delegated = NULL; -+ err = vfsub_link(h_dentry, h_dir, &h_path, &delegated); -+ if (unlikely(err == -EWOULDBLOCK)) { -+ pr_warn("cannot retry for NFSv4 delegation" -+ " for an internal link\n"); -+ iput(delegated); -+ } -+ } -+ dput(h_path.dentry); -+ -+out: -+ mutex_unlock(&h_dir->i_mutex); -+ return err; -+} -+ -+struct do_whplink_args { -+ int *errp; -+ struct qstr *tgt; -+ struct dentry *h_parent; -+ struct dentry *h_dentry; -+ struct au_branch *br; -+}; -+ -+static void call_do_whplink(void *args) -+{ -+ struct do_whplink_args *a = args; -+ *a->errp = do_whplink(a->tgt, a->h_parent, a->h_dentry, a->br); -+} -+ -+static int whplink(struct dentry *h_dentry, struct inode *inode, -+ aufs_bindex_t bindex, struct au_branch *br) -+{ -+ int err, wkq_err; -+ struct au_wbr *wbr; -+ struct dentry *h_parent; -+ struct inode *h_dir; -+ char a[PLINK_NAME_LEN]; -+ struct qstr tgtname = QSTR_INIT(a, 0); -+ -+ wbr = au_sbr(inode->i_sb, bindex)->br_wbr; -+ h_parent = wbr->wbr_plink; -+ h_dir = h_parent->d_inode; -+ tgtname.len = plink_name(a, sizeof(a), inode, bindex); -+ -+ /* always superio. */ -+ if (!uid_eq(current_fsuid(), GLOBAL_ROOT_UID)) { -+ struct do_whplink_args args = { -+ .errp = &err, -+ .tgt = &tgtname, -+ .h_parent = h_parent, -+ .h_dentry = h_dentry, -+ .br = br -+ }; -+ wkq_err = au_wkq_wait(call_do_whplink, &args); -+ if (unlikely(wkq_err)) -+ err = wkq_err; -+ } else -+ err = do_whplink(&tgtname, h_parent, h_dentry, br); -+ -+ return err; -+} -+ -+/* -+ * create a new pseudo-link for @h_dentry on @bindex. -+ * the linked inode is held in aufs @inode. -+ */ -+void au_plink_append(struct inode *inode, aufs_bindex_t bindex, -+ struct dentry *h_dentry) -+{ -+ struct super_block *sb; -+ struct au_sbinfo *sbinfo; -+ struct hlist_head *plink_hlist; -+ struct au_icntnr *icntnr; -+ struct au_sphlhead *sphl; -+ int found, err, cnt, i; -+ -+ sb = inode->i_sb; -+ sbinfo = au_sbi(sb); -+ AuDebugOn(!au_opt_test(au_mntflags(sb), PLINK)); -+ AuDebugOn(au_plink_maint(sb, AuLock_NOPLM)); -+ -+ found = au_plink_test(inode); -+ if (found) -+ return; -+ -+ i = au_plink_hash(inode->i_ino); -+ sphl = sbinfo->si_plink + i; -+ plink_hlist = &sphl->head; -+ au_igrab(inode); -+ -+ spin_lock(&sphl->spin); -+ hlist_for_each_entry(icntnr, plink_hlist, plink) { -+ if (&icntnr->vfs_inode == inode) { -+ found = 1; -+ break; -+ } -+ } -+ if (!found) { -+ icntnr = container_of(inode, struct au_icntnr, vfs_inode); -+ hlist_add_head_rcu(&icntnr->plink, plink_hlist); -+ } -+ spin_unlock(&sphl->spin); -+ if (!found) { -+ cnt = au_sphl_count(sphl); -+#define msg "unexpectedly unblanced or too many pseudo-links" -+ if (cnt > AUFS_PLINK_WARN) -+ AuWarn1(msg ", %d\n", cnt); -+#undef msg -+ err = whplink(h_dentry, inode, bindex, au_sbr(sb, bindex)); -+ if (unlikely(err)) { -+ pr_warn("err %d, damaged pseudo link.\n", err); -+ au_sphl_del_rcu(&icntnr->plink, sphl); -+ iput(&icntnr->vfs_inode); -+ } -+ } else -+ iput(&icntnr->vfs_inode); -+} -+ -+/* free all plinks */ -+void au_plink_put(struct super_block *sb, int verbose) -+{ -+ int i, warned; -+ struct au_sbinfo *sbinfo; -+ struct hlist_head *plink_hlist; -+ struct hlist_node *tmp; -+ struct au_icntnr *icntnr; -+ -+ SiMustWriteLock(sb); -+ -+ sbinfo = au_sbi(sb); -+ AuDebugOn(!au_opt_test(au_mntflags(sb), PLINK)); -+ AuDebugOn(au_plink_maint(sb, AuLock_NOPLM)); -+ -+ /* no spin_lock since sbinfo is write-locked */ -+ warned = 0; -+ for (i = 0; i < AuPlink_NHASH; i++) { -+ plink_hlist = &sbinfo->si_plink[i].head; -+ if (!warned && verbose && !hlist_empty(plink_hlist)) { -+ pr_warn("pseudo-link is not flushed"); -+ warned = 1; -+ } -+ hlist_for_each_entry_safe(icntnr, tmp, plink_hlist, plink) -+ iput(&icntnr->vfs_inode); -+ INIT_HLIST_HEAD(plink_hlist); -+ } -+} -+ -+void au_plink_clean(struct super_block *sb, int verbose) -+{ -+ struct dentry *root; -+ -+ root = sb->s_root; -+ aufs_write_lock(root); -+ if (au_opt_test(au_mntflags(sb), PLINK)) -+ au_plink_put(sb, verbose); -+ aufs_write_unlock(root); -+} -+ -+static int au_plink_do_half_refresh(struct inode *inode, aufs_bindex_t br_id) -+{ -+ int do_put; -+ aufs_bindex_t bstart, bend, bindex; -+ -+ do_put = 0; -+ bstart = au_ibstart(inode); -+ bend = au_ibend(inode); -+ if (bstart >= 0) { -+ for (bindex = bstart; bindex <= bend; bindex++) { -+ if (!au_h_iptr(inode, bindex) -+ || au_ii_br_id(inode, bindex) != br_id) -+ continue; -+ au_set_h_iptr(inode, bindex, NULL, 0); -+ do_put = 1; -+ break; -+ } -+ if (do_put) -+ for (bindex = bstart; bindex <= bend; bindex++) -+ if (au_h_iptr(inode, bindex)) { -+ do_put = 0; -+ break; -+ } -+ } else -+ do_put = 1; -+ -+ return do_put; -+} -+ -+/* free the plinks on a branch specified by @br_id */ -+void au_plink_half_refresh(struct super_block *sb, aufs_bindex_t br_id) -+{ -+ struct au_sbinfo *sbinfo; -+ struct hlist_head *plink_hlist; -+ struct hlist_node *tmp; -+ struct au_icntnr *icntnr; -+ struct inode *inode; -+ int i, do_put; -+ -+ SiMustWriteLock(sb); -+ -+ sbinfo = au_sbi(sb); -+ AuDebugOn(!au_opt_test(au_mntflags(sb), PLINK)); -+ AuDebugOn(au_plink_maint(sb, AuLock_NOPLM)); -+ -+ /* no spin_lock since sbinfo is write-locked */ -+ for (i = 0; i < AuPlink_NHASH; i++) { -+ plink_hlist = &sbinfo->si_plink[i].head; -+ hlist_for_each_entry_safe(icntnr, tmp, plink_hlist, plink) { -+ inode = au_igrab(&icntnr->vfs_inode); -+ ii_write_lock_child(inode); -+ do_put = au_plink_do_half_refresh(inode, br_id); -+ if (do_put) { -+ hlist_del(&icntnr->plink); -+ iput(inode); -+ } -+ ii_write_unlock(inode); -+ iput(inode); -+ } -+ } -+} -diff --git a/fs/aufs/poll.c b/fs/aufs/poll.c -new file mode 100644 -index 0000000..eea19e7 ---- /dev/null -+++ b/fs/aufs/poll.c -@@ -0,0 +1,52 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * poll operation -+ * There is only one filesystem which implements ->poll operation, currently. -+ */ -+ -+#include "aufs.h" -+ -+unsigned int aufs_poll(struct file *file, poll_table *wait) -+{ -+ unsigned int mask; -+ int err; -+ struct file *h_file; -+ struct super_block *sb; -+ -+ /* We should pretend an error happened. */ -+ mask = POLLERR /* | POLLIN | POLLOUT */; -+ sb = file->f_dentry->d_sb; -+ si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW); -+ -+ h_file = au_read_pre(file, /*keep_fi*/0); -+ err = PTR_ERR(h_file); -+ if (IS_ERR(h_file)) -+ goto out; -+ -+ /* it is not an error if h_file has no operation */ -+ mask = DEFAULT_POLLMASK; -+ if (h_file->f_op->poll) -+ mask = h_file->f_op->poll(h_file, wait); -+ fput(h_file); /* instead of au_read_post() */ -+ -+out: -+ si_read_unlock(sb); -+ AuTraceErr((int)mask); -+ return mask; -+} -diff --git a/fs/aufs/posix_acl.c b/fs/aufs/posix_acl.c -new file mode 100644 -index 0000000..89b4127 ---- /dev/null -+++ b/fs/aufs/posix_acl.c -@@ -0,0 +1,98 @@ -+/* -+ * Copyright (C) 2014-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * posix acl operations -+ */ -+ -+#include -+#include "aufs.h" -+ -+struct posix_acl *aufs_get_acl(struct inode *inode, int type) -+{ -+ struct posix_acl *acl; -+ int err; -+ aufs_bindex_t bindex; -+ struct inode *h_inode; -+ struct super_block *sb; -+ -+ acl = NULL; -+ sb = inode->i_sb; -+ si_read_lock(sb, AuLock_FLUSH); -+ ii_read_lock_child(inode); -+ if (!(sb->s_flags & MS_POSIXACL)) -+ goto out; -+ -+ bindex = au_ibstart(inode); -+ h_inode = au_h_iptr(inode, bindex); -+ if (unlikely(!h_inode -+ || ((h_inode->i_mode & S_IFMT) -+ != (inode->i_mode & S_IFMT)))) { -+ err = au_busy_or_stale(); -+ acl = ERR_PTR(err); -+ goto out; -+ } -+ -+ /* always topmost only */ -+ acl = get_acl(h_inode, type); -+ -+out: -+ ii_read_unlock(inode); -+ si_read_unlock(sb); -+ -+ AuTraceErrPtr(acl); -+ return acl; -+} -+ -+int aufs_set_acl(struct inode *inode, struct posix_acl *acl, int type) -+{ -+ int err; -+ ssize_t ssz; -+ struct dentry *dentry; -+ struct au_srxattr arg = { -+ .type = AU_ACL_SET, -+ .u.acl_set = { -+ .acl = acl, -+ .type = type -+ }, -+ }; -+ -+ mutex_lock(&inode->i_mutex); -+ if (inode->i_ino == AUFS_ROOT_INO) -+ dentry = dget(inode->i_sb->s_root); -+ else { -+ dentry = d_find_alias(inode); -+ if (!dentry) -+ dentry = d_find_any_alias(inode); -+ if (!dentry) { -+ pr_warn("cannot handle this inode, " -+ "please report to aufs-users ML\n"); -+ err = -ENOENT; -+ goto out; -+ } -+ } -+ -+ ssz = au_srxattr(dentry, &arg); -+ dput(dentry); -+ err = ssz; -+ if (ssz >= 0) -+ err = 0; -+ -+out: -+ mutex_unlock(&inode->i_mutex); -+ return err; -+} -diff --git a/fs/aufs/procfs.c b/fs/aufs/procfs.c -new file mode 100644 -index 0000000..a334330 ---- /dev/null -+++ b/fs/aufs/procfs.c -@@ -0,0 +1,169 @@ -+/* -+ * Copyright (C) 2010-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * procfs interfaces -+ */ -+ -+#include -+#include "aufs.h" -+ -+static int au_procfs_plm_release(struct inode *inode, struct file *file) -+{ -+ struct au_sbinfo *sbinfo; -+ -+ sbinfo = file->private_data; -+ if (sbinfo) { -+ au_plink_maint_leave(sbinfo); -+ kobject_put(&sbinfo->si_kobj); -+ } -+ -+ return 0; -+} -+ -+static void au_procfs_plm_write_clean(struct file *file) -+{ -+ struct au_sbinfo *sbinfo; -+ -+ sbinfo = file->private_data; -+ if (sbinfo) -+ au_plink_clean(sbinfo->si_sb, /*verbose*/0); -+} -+ -+static int au_procfs_plm_write_si(struct file *file, unsigned long id) -+{ -+ int err; -+ struct super_block *sb; -+ struct au_sbinfo *sbinfo; -+ -+ err = -EBUSY; -+ if (unlikely(file->private_data)) -+ goto out; -+ -+ sb = NULL; -+ /* don't use au_sbilist_lock() here */ -+ spin_lock(&au_sbilist.spin); -+ hlist_for_each_entry(sbinfo, &au_sbilist.head, si_list) -+ if (id == sysaufs_si_id(sbinfo)) { -+ kobject_get(&sbinfo->si_kobj); -+ sb = sbinfo->si_sb; -+ break; -+ } -+ spin_unlock(&au_sbilist.spin); -+ -+ err = -EINVAL; -+ if (unlikely(!sb)) -+ goto out; -+ -+ err = au_plink_maint_enter(sb); -+ if (!err) -+ /* keep kobject_get() */ -+ file->private_data = sbinfo; -+ else -+ kobject_put(&sbinfo->si_kobj); -+out: -+ return err; -+} -+ -+/* -+ * Accept a valid "si=xxxx" only. -+ * Once it is accepted successfully, accept "clean" too. -+ */ -+static ssize_t au_procfs_plm_write(struct file *file, const char __user *ubuf, -+ size_t count, loff_t *ppos) -+{ -+ ssize_t err; -+ unsigned long id; -+ /* last newline is allowed */ -+ char buf[3 + sizeof(unsigned long) * 2 + 1]; -+ -+ err = -EACCES; -+ if (unlikely(!capable(CAP_SYS_ADMIN))) -+ goto out; -+ -+ err = -EINVAL; -+ if (unlikely(count > sizeof(buf))) -+ goto out; -+ -+ err = copy_from_user(buf, ubuf, count); -+ if (unlikely(err)) { -+ err = -EFAULT; -+ goto out; -+ } -+ buf[count] = 0; -+ -+ err = -EINVAL; -+ if (!strcmp("clean", buf)) { -+ au_procfs_plm_write_clean(file); -+ goto out_success; -+ } else if (unlikely(strncmp("si=", buf, 3))) -+ goto out; -+ -+ err = kstrtoul(buf + 3, 16, &id); -+ if (unlikely(err)) -+ goto out; -+ -+ err = au_procfs_plm_write_si(file, id); -+ if (unlikely(err)) -+ goto out; -+ -+out_success: -+ err = count; /* success */ -+out: -+ return err; -+} -+ -+static const struct file_operations au_procfs_plm_fop = { -+ .write = au_procfs_plm_write, -+ .release = au_procfs_plm_release, -+ .owner = THIS_MODULE -+}; -+ -+/* ---------------------------------------------------------------------- */ -+ -+static struct proc_dir_entry *au_procfs_dir; -+ -+void au_procfs_fin(void) -+{ -+ remove_proc_entry(AUFS_PLINK_MAINT_NAME, au_procfs_dir); -+ remove_proc_entry(AUFS_PLINK_MAINT_DIR, NULL); -+} -+ -+int __init au_procfs_init(void) -+{ -+ int err; -+ struct proc_dir_entry *entry; -+ -+ err = -ENOMEM; -+ au_procfs_dir = proc_mkdir(AUFS_PLINK_MAINT_DIR, NULL); -+ if (unlikely(!au_procfs_dir)) -+ goto out; -+ -+ entry = proc_create(AUFS_PLINK_MAINT_NAME, S_IFREG | S_IWUSR, -+ au_procfs_dir, &au_procfs_plm_fop); -+ if (unlikely(!entry)) -+ goto out_dir; -+ -+ err = 0; -+ goto out; /* success */ -+ -+ -+out_dir: -+ remove_proc_entry(AUFS_PLINK_MAINT_DIR, NULL); -+out: -+ return err; -+} -diff --git a/fs/aufs/rdu.c b/fs/aufs/rdu.c -new file mode 100644 -index 0000000..d22b2f8 ---- /dev/null -+++ b/fs/aufs/rdu.c -@@ -0,0 +1,388 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * readdir in userspace. -+ */ -+ -+#include -+#include -+#include -+#include "aufs.h" -+ -+/* bits for struct aufs_rdu.flags */ -+#define AuRdu_CALLED 1 -+#define AuRdu_CONT (1 << 1) -+#define AuRdu_FULL (1 << 2) -+#define au_ftest_rdu(flags, name) ((flags) & AuRdu_##name) -+#define au_fset_rdu(flags, name) \ -+ do { (flags) |= AuRdu_##name; } while (0) -+#define au_fclr_rdu(flags, name) \ -+ do { (flags) &= ~AuRdu_##name; } while (0) -+ -+struct au_rdu_arg { -+ struct dir_context ctx; -+ struct aufs_rdu *rdu; -+ union au_rdu_ent_ul ent; -+ unsigned long end; -+ -+ struct super_block *sb; -+ int err; -+}; -+ -+static int au_rdu_fill(struct dir_context *ctx, const char *name, int nlen, -+ loff_t offset, u64 h_ino, unsigned int d_type) -+{ -+ int err, len; -+ struct au_rdu_arg *arg = container_of(ctx, struct au_rdu_arg, ctx); -+ struct aufs_rdu *rdu = arg->rdu; -+ struct au_rdu_ent ent; -+ -+ err = 0; -+ arg->err = 0; -+ au_fset_rdu(rdu->cookie.flags, CALLED); -+ len = au_rdu_len(nlen); -+ if (arg->ent.ul + len < arg->end) { -+ ent.ino = h_ino; -+ ent.bindex = rdu->cookie.bindex; -+ ent.type = d_type; -+ ent.nlen = nlen; -+ if (unlikely(nlen > AUFS_MAX_NAMELEN)) -+ ent.type = DT_UNKNOWN; -+ -+ /* unnecessary to support mmap_sem since this is a dir */ -+ err = -EFAULT; -+ if (copy_to_user(arg->ent.e, &ent, sizeof(ent))) -+ goto out; -+ if (copy_to_user(arg->ent.e->name, name, nlen)) -+ goto out; -+ /* the terminating NULL */ -+ if (__put_user(0, arg->ent.e->name + nlen)) -+ goto out; -+ err = 0; -+ /* AuDbg("%p, %.*s\n", arg->ent.p, nlen, name); */ -+ arg->ent.ul += len; -+ rdu->rent++; -+ } else { -+ err = -EFAULT; -+ au_fset_rdu(rdu->cookie.flags, FULL); -+ rdu->full = 1; -+ rdu->tail = arg->ent; -+ } -+ -+out: -+ /* AuTraceErr(err); */ -+ return err; -+} -+ -+static int au_rdu_do(struct file *h_file, struct au_rdu_arg *arg) -+{ -+ int err; -+ loff_t offset; -+ struct au_rdu_cookie *cookie = &arg->rdu->cookie; -+ -+ /* we don't have to care (FMODE_32BITHASH | FMODE_64BITHASH) for ext4 */ -+ offset = vfsub_llseek(h_file, cookie->h_pos, SEEK_SET); -+ err = offset; -+ if (unlikely(offset != cookie->h_pos)) -+ goto out; -+ -+ err = 0; -+ do { -+ arg->err = 0; -+ au_fclr_rdu(cookie->flags, CALLED); -+ /* smp_mb(); */ -+ err = vfsub_iterate_dir(h_file, &arg->ctx); -+ if (err >= 0) -+ err = arg->err; -+ } while (!err -+ && au_ftest_rdu(cookie->flags, CALLED) -+ && !au_ftest_rdu(cookie->flags, FULL)); -+ cookie->h_pos = h_file->f_pos; -+ -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+static int au_rdu(struct file *file, struct aufs_rdu *rdu) -+{ -+ int err; -+ aufs_bindex_t bend; -+ struct au_rdu_arg arg = { -+ .ctx = { -+ .actor = au_diractor(au_rdu_fill) -+ } -+ }; -+ struct dentry *dentry; -+ struct inode *inode; -+ struct file *h_file; -+ struct au_rdu_cookie *cookie = &rdu->cookie; -+ -+ err = !access_ok(VERIFY_WRITE, rdu->ent.e, rdu->sz); -+ if (unlikely(err)) { -+ err = -EFAULT; -+ AuTraceErr(err); -+ goto out; -+ } -+ rdu->rent = 0; -+ rdu->tail = rdu->ent; -+ rdu->full = 0; -+ arg.rdu = rdu; -+ arg.ent = rdu->ent; -+ arg.end = arg.ent.ul; -+ arg.end += rdu->sz; -+ -+ err = -ENOTDIR; -+ if (unlikely(!file->f_op->iterate)) -+ goto out; -+ -+ err = security_file_permission(file, MAY_READ); -+ AuTraceErr(err); -+ if (unlikely(err)) -+ goto out; -+ -+ dentry = file->f_dentry; -+ inode = dentry->d_inode; -+#if 1 -+ mutex_lock(&inode->i_mutex); -+#else -+ err = mutex_lock_killable(&inode->i_mutex); -+ AuTraceErr(err); -+ if (unlikely(err)) -+ goto out; -+#endif -+ -+ arg.sb = inode->i_sb; -+ err = si_read_lock(arg.sb, AuLock_FLUSH | AuLock_NOPLM); -+ if (unlikely(err)) -+ goto out_mtx; -+ err = au_alive_dir(dentry); -+ if (unlikely(err)) -+ goto out_si; -+ /* todo: reval? */ -+ fi_read_lock(file); -+ -+ err = -EAGAIN; -+ if (unlikely(au_ftest_rdu(cookie->flags, CONT) -+ && cookie->generation != au_figen(file))) -+ goto out_unlock; -+ -+ err = 0; -+ if (!rdu->blk) { -+ rdu->blk = au_sbi(arg.sb)->si_rdblk; -+ if (!rdu->blk) -+ rdu->blk = au_dir_size(file, /*dentry*/NULL); -+ } -+ bend = au_fbstart(file); -+ if (cookie->bindex < bend) -+ cookie->bindex = bend; -+ bend = au_fbend_dir(file); -+ /* AuDbg("b%d, b%d\n", cookie->bindex, bend); */ -+ for (; !err && cookie->bindex <= bend; -+ cookie->bindex++, cookie->h_pos = 0) { -+ h_file = au_hf_dir(file, cookie->bindex); -+ if (!h_file) -+ continue; -+ -+ au_fclr_rdu(cookie->flags, FULL); -+ err = au_rdu_do(h_file, &arg); -+ AuTraceErr(err); -+ if (unlikely(au_ftest_rdu(cookie->flags, FULL) || err)) -+ break; -+ } -+ AuDbg("rent %llu\n", rdu->rent); -+ -+ if (!err && !au_ftest_rdu(cookie->flags, CONT)) { -+ rdu->shwh = !!au_opt_test(au_sbi(arg.sb)->si_mntflags, SHWH); -+ au_fset_rdu(cookie->flags, CONT); -+ cookie->generation = au_figen(file); -+ } -+ -+ ii_read_lock_child(inode); -+ fsstack_copy_attr_atime(inode, au_h_iptr(inode, au_ibstart(inode))); -+ ii_read_unlock(inode); -+ -+out_unlock: -+ fi_read_unlock(file); -+out_si: -+ si_read_unlock(arg.sb); -+out_mtx: -+ mutex_unlock(&inode->i_mutex); -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+static int au_rdu_ino(struct file *file, struct aufs_rdu *rdu) -+{ -+ int err; -+ ino_t ino; -+ unsigned long long nent; -+ union au_rdu_ent_ul *u; -+ struct au_rdu_ent ent; -+ struct super_block *sb; -+ -+ err = 0; -+ nent = rdu->nent; -+ u = &rdu->ent; -+ sb = file->f_dentry->d_sb; -+ si_read_lock(sb, AuLock_FLUSH); -+ while (nent-- > 0) { -+ /* unnecessary to support mmap_sem since this is a dir */ -+ err = copy_from_user(&ent, u->e, sizeof(ent)); -+ if (!err) -+ err = !access_ok(VERIFY_WRITE, &u->e->ino, sizeof(ino)); -+ if (unlikely(err)) { -+ err = -EFAULT; -+ AuTraceErr(err); -+ break; -+ } -+ -+ /* AuDbg("b%d, i%llu\n", ent.bindex, ent.ino); */ -+ if (!ent.wh) -+ err = au_ino(sb, ent.bindex, ent.ino, ent.type, &ino); -+ else -+ err = au_wh_ino(sb, ent.bindex, ent.ino, ent.type, -+ &ino); -+ if (unlikely(err)) { -+ AuTraceErr(err); -+ break; -+ } -+ -+ err = __put_user(ino, &u->e->ino); -+ if (unlikely(err)) { -+ err = -EFAULT; -+ AuTraceErr(err); -+ break; -+ } -+ u->ul += au_rdu_len(ent.nlen); -+ } -+ si_read_unlock(sb); -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int au_rdu_verify(struct aufs_rdu *rdu) -+{ -+ AuDbg("rdu{%llu, %p, %u | %u | %llu, %u, %u | " -+ "%llu, b%d, 0x%x, g%u}\n", -+ rdu->sz, rdu->ent.e, rdu->verify[AufsCtlRduV_SZ], -+ rdu->blk, -+ rdu->rent, rdu->shwh, rdu->full, -+ rdu->cookie.h_pos, rdu->cookie.bindex, rdu->cookie.flags, -+ rdu->cookie.generation); -+ -+ if (rdu->verify[AufsCtlRduV_SZ] == sizeof(*rdu)) -+ return 0; -+ -+ AuDbg("%u:%u\n", -+ rdu->verify[AufsCtlRduV_SZ], (unsigned int)sizeof(*rdu)); -+ return -EINVAL; -+} -+ -+long au_rdu_ioctl(struct file *file, unsigned int cmd, unsigned long arg) -+{ -+ long err, e; -+ struct aufs_rdu rdu; -+ void __user *p = (void __user *)arg; -+ -+ err = copy_from_user(&rdu, p, sizeof(rdu)); -+ if (unlikely(err)) { -+ err = -EFAULT; -+ AuTraceErr(err); -+ goto out; -+ } -+ err = au_rdu_verify(&rdu); -+ if (unlikely(err)) -+ goto out; -+ -+ switch (cmd) { -+ case AUFS_CTL_RDU: -+ err = au_rdu(file, &rdu); -+ if (unlikely(err)) -+ break; -+ -+ e = copy_to_user(p, &rdu, sizeof(rdu)); -+ if (unlikely(e)) { -+ err = -EFAULT; -+ AuTraceErr(err); -+ } -+ break; -+ case AUFS_CTL_RDU_INO: -+ err = au_rdu_ino(file, &rdu); -+ break; -+ -+ default: -+ /* err = -ENOTTY; */ -+ err = -EINVAL; -+ } -+ -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+#ifdef CONFIG_COMPAT -+long au_rdu_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) -+{ -+ long err, e; -+ struct aufs_rdu rdu; -+ void __user *p = compat_ptr(arg); -+ -+ /* todo: get_user()? */ -+ err = copy_from_user(&rdu, p, sizeof(rdu)); -+ if (unlikely(err)) { -+ err = -EFAULT; -+ AuTraceErr(err); -+ goto out; -+ } -+ rdu.ent.e = compat_ptr(rdu.ent.ul); -+ err = au_rdu_verify(&rdu); -+ if (unlikely(err)) -+ goto out; -+ -+ switch (cmd) { -+ case AUFS_CTL_RDU: -+ err = au_rdu(file, &rdu); -+ if (unlikely(err)) -+ break; -+ -+ rdu.ent.ul = ptr_to_compat(rdu.ent.e); -+ rdu.tail.ul = ptr_to_compat(rdu.tail.e); -+ e = copy_to_user(p, &rdu, sizeof(rdu)); -+ if (unlikely(e)) { -+ err = -EFAULT; -+ AuTraceErr(err); -+ } -+ break; -+ case AUFS_CTL_RDU_INO: -+ err = au_rdu_ino(file, &rdu); -+ break; -+ -+ default: -+ /* err = -ENOTTY; */ -+ err = -EINVAL; -+ } -+ -+out: -+ AuTraceErr(err); -+ return err; -+} -+#endif -diff --git a/fs/aufs/rwsem.h b/fs/aufs/rwsem.h -new file mode 100644 -index 0000000..09ed5a0 ---- /dev/null -+++ b/fs/aufs/rwsem.h -@@ -0,0 +1,191 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * simple read-write semaphore wrappers -+ */ -+ -+#ifndef __AUFS_RWSEM_H__ -+#define __AUFS_RWSEM_H__ -+ -+#ifdef __KERNEL__ -+ -+#include "debug.h" -+ -+struct au_rwsem { -+ struct rw_semaphore rwsem; -+#ifdef CONFIG_AUFS_DEBUG -+ /* just for debugging, not almighty counter */ -+ atomic_t rcnt, wcnt; -+#endif -+}; -+ -+#ifdef CONFIG_AUFS_DEBUG -+#define AuDbgCntInit(rw) do { \ -+ atomic_set(&(rw)->rcnt, 0); \ -+ atomic_set(&(rw)->wcnt, 0); \ -+ smp_mb(); /* atomic set */ \ -+} while (0) -+ -+#define AuDbgRcntInc(rw) atomic_inc(&(rw)->rcnt) -+#define AuDbgRcntDec(rw) WARN_ON(atomic_dec_return(&(rw)->rcnt) < 0) -+#define AuDbgWcntInc(rw) atomic_inc(&(rw)->wcnt) -+#define AuDbgWcntDec(rw) WARN_ON(atomic_dec_return(&(rw)->wcnt) < 0) -+#else -+#define AuDbgCntInit(rw) do {} while (0) -+#define AuDbgRcntInc(rw) do {} while (0) -+#define AuDbgRcntDec(rw) do {} while (0) -+#define AuDbgWcntInc(rw) do {} while (0) -+#define AuDbgWcntDec(rw) do {} while (0) -+#endif /* CONFIG_AUFS_DEBUG */ -+ -+/* to debug easier, do not make them inlined functions */ -+#define AuRwMustNoWaiters(rw) AuDebugOn(!list_empty(&(rw)->rwsem.wait_list)) -+/* rwsem_is_locked() is unusable */ -+#define AuRwMustReadLock(rw) AuDebugOn(atomic_read(&(rw)->rcnt) <= 0) -+#define AuRwMustWriteLock(rw) AuDebugOn(atomic_read(&(rw)->wcnt) <= 0) -+#define AuRwMustAnyLock(rw) AuDebugOn(atomic_read(&(rw)->rcnt) <= 0 \ -+ && atomic_read(&(rw)->wcnt) <= 0) -+#define AuRwDestroy(rw) AuDebugOn(atomic_read(&(rw)->rcnt) \ -+ || atomic_read(&(rw)->wcnt)) -+ -+#define au_rw_class(rw, key) lockdep_set_class(&(rw)->rwsem, key) -+ -+static inline void au_rw_init(struct au_rwsem *rw) -+{ -+ AuDbgCntInit(rw); -+ init_rwsem(&rw->rwsem); -+} -+ -+static inline void au_rw_init_wlock(struct au_rwsem *rw) -+{ -+ au_rw_init(rw); -+ down_write(&rw->rwsem); -+ AuDbgWcntInc(rw); -+} -+ -+static inline void au_rw_init_wlock_nested(struct au_rwsem *rw, -+ unsigned int lsc) -+{ -+ au_rw_init(rw); -+ down_write_nested(&rw->rwsem, lsc); -+ AuDbgWcntInc(rw); -+} -+ -+static inline void au_rw_read_lock(struct au_rwsem *rw) -+{ -+ down_read(&rw->rwsem); -+ AuDbgRcntInc(rw); -+} -+ -+static inline void au_rw_read_lock_nested(struct au_rwsem *rw, unsigned int lsc) -+{ -+ down_read_nested(&rw->rwsem, lsc); -+ AuDbgRcntInc(rw); -+} -+ -+static inline void au_rw_read_unlock(struct au_rwsem *rw) -+{ -+ AuRwMustReadLock(rw); -+ AuDbgRcntDec(rw); -+ up_read(&rw->rwsem); -+} -+ -+static inline void au_rw_dgrade_lock(struct au_rwsem *rw) -+{ -+ AuRwMustWriteLock(rw); -+ AuDbgRcntInc(rw); -+ AuDbgWcntDec(rw); -+ downgrade_write(&rw->rwsem); -+} -+ -+static inline void au_rw_write_lock(struct au_rwsem *rw) -+{ -+ down_write(&rw->rwsem); -+ AuDbgWcntInc(rw); -+} -+ -+static inline void au_rw_write_lock_nested(struct au_rwsem *rw, -+ unsigned int lsc) -+{ -+ down_write_nested(&rw->rwsem, lsc); -+ AuDbgWcntInc(rw); -+} -+ -+static inline void au_rw_write_unlock(struct au_rwsem *rw) -+{ -+ AuRwMustWriteLock(rw); -+ AuDbgWcntDec(rw); -+ up_write(&rw->rwsem); -+} -+ -+/* why is not _nested version defined */ -+static inline int au_rw_read_trylock(struct au_rwsem *rw) -+{ -+ int ret; -+ -+ ret = down_read_trylock(&rw->rwsem); -+ if (ret) -+ AuDbgRcntInc(rw); -+ return ret; -+} -+ -+static inline int au_rw_write_trylock(struct au_rwsem *rw) -+{ -+ int ret; -+ -+ ret = down_write_trylock(&rw->rwsem); -+ if (ret) -+ AuDbgWcntInc(rw); -+ return ret; -+} -+ -+#undef AuDbgCntInit -+#undef AuDbgRcntInc -+#undef AuDbgRcntDec -+#undef AuDbgWcntInc -+#undef AuDbgWcntDec -+ -+#define AuSimpleLockRwsemFuncs(prefix, param, rwsem) \ -+static inline void prefix##_read_lock(param) \ -+{ au_rw_read_lock(rwsem); } \ -+static inline void prefix##_write_lock(param) \ -+{ au_rw_write_lock(rwsem); } \ -+static inline int prefix##_read_trylock(param) \ -+{ return au_rw_read_trylock(rwsem); } \ -+static inline int prefix##_write_trylock(param) \ -+{ return au_rw_write_trylock(rwsem); } -+/* why is not _nested version defined */ -+/* static inline void prefix##_read_trylock_nested(param, lsc) -+{ au_rw_read_trylock_nested(rwsem, lsc)); } -+static inline void prefix##_write_trylock_nestd(param, lsc) -+{ au_rw_write_trylock_nested(rwsem, lsc); } */ -+ -+#define AuSimpleUnlockRwsemFuncs(prefix, param, rwsem) \ -+static inline void prefix##_read_unlock(param) \ -+{ au_rw_read_unlock(rwsem); } \ -+static inline void prefix##_write_unlock(param) \ -+{ au_rw_write_unlock(rwsem); } \ -+static inline void prefix##_downgrade_lock(param) \ -+{ au_rw_dgrade_lock(rwsem); } -+ -+#define AuSimpleRwsemFuncs(prefix, param, rwsem) \ -+ AuSimpleLockRwsemFuncs(prefix, param, rwsem) \ -+ AuSimpleUnlockRwsemFuncs(prefix, param, rwsem) -+ -+#endif /* __KERNEL__ */ -+#endif /* __AUFS_RWSEM_H__ */ -diff --git a/fs/aufs/sbinfo.c b/fs/aufs/sbinfo.c -new file mode 100644 -index 0000000..ff13c9f ---- /dev/null -+++ b/fs/aufs/sbinfo.c -@@ -0,0 +1,348 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * superblock private data -+ */ -+ -+#include "aufs.h" -+ -+/* -+ * they are necessary regardless sysfs is disabled. -+ */ -+void au_si_free(struct kobject *kobj) -+{ -+ int i; -+ struct au_sbinfo *sbinfo; -+ char *locked __maybe_unused; /* debug only */ -+ -+ sbinfo = container_of(kobj, struct au_sbinfo, si_kobj); -+ for (i = 0; i < AuPlink_NHASH; i++) -+ AuDebugOn(!hlist_empty(&sbinfo->si_plink[i].head)); -+ AuDebugOn(atomic_read(&sbinfo->si_nowait.nw_len)); -+ -+ au_rw_write_lock(&sbinfo->si_rwsem); -+ au_br_free(sbinfo); -+ au_rw_write_unlock(&sbinfo->si_rwsem); -+ -+ kfree(sbinfo->si_branch); -+ for (i = 0; i < AU_NPIDMAP; i++) -+ kfree(sbinfo->au_si_pid.pid_bitmap[i]); -+ mutex_destroy(&sbinfo->au_si_pid.pid_mtx); -+ mutex_destroy(&sbinfo->si_xib_mtx); -+ AuRwDestroy(&sbinfo->si_rwsem); -+ -+ kfree(sbinfo); -+} -+ -+int au_si_alloc(struct super_block *sb) -+{ -+ int err, i; -+ struct au_sbinfo *sbinfo; -+ static struct lock_class_key aufs_si; -+ -+ err = -ENOMEM; -+ sbinfo = kzalloc(sizeof(*sbinfo), GFP_NOFS); -+ if (unlikely(!sbinfo)) -+ goto out; -+ -+ /* will be reallocated separately */ -+ sbinfo->si_branch = kzalloc(sizeof(*sbinfo->si_branch), GFP_NOFS); -+ if (unlikely(!sbinfo->si_branch)) -+ goto out_sbinfo; -+ -+ err = sysaufs_si_init(sbinfo); -+ if (unlikely(err)) -+ goto out_br; -+ -+ au_nwt_init(&sbinfo->si_nowait); -+ au_rw_init_wlock(&sbinfo->si_rwsem); -+ au_rw_class(&sbinfo->si_rwsem, &aufs_si); -+ mutex_init(&sbinfo->au_si_pid.pid_mtx); -+ -+ atomic_long_set(&sbinfo->si_ninodes, 0); -+ atomic_long_set(&sbinfo->si_nfiles, 0); -+ -+ sbinfo->si_bend = -1; -+ sbinfo->si_last_br_id = AUFS_BRANCH_MAX / 2; -+ -+ sbinfo->si_wbr_copyup = AuWbrCopyup_Def; -+ sbinfo->si_wbr_create = AuWbrCreate_Def; -+ sbinfo->si_wbr_copyup_ops = au_wbr_copyup_ops + sbinfo->si_wbr_copyup; -+ sbinfo->si_wbr_create_ops = au_wbr_create_ops + sbinfo->si_wbr_create; -+ -+ au_fhsm_init(sbinfo); -+ -+ sbinfo->si_mntflags = au_opts_plink(AuOpt_Def); -+ -+ sbinfo->si_xino_jiffy = jiffies; -+ sbinfo->si_xino_expire -+ = msecs_to_jiffies(AUFS_XINO_DEF_SEC * MSEC_PER_SEC); -+ mutex_init(&sbinfo->si_xib_mtx); -+ sbinfo->si_xino_brid = -1; -+ /* leave si_xib_last_pindex and si_xib_next_bit */ -+ -+ au_sphl_init(&sbinfo->si_aopen); -+ -+ sbinfo->si_rdcache = msecs_to_jiffies(AUFS_RDCACHE_DEF * MSEC_PER_SEC); -+ sbinfo->si_rdblk = AUFS_RDBLK_DEF; -+ sbinfo->si_rdhash = AUFS_RDHASH_DEF; -+ sbinfo->si_dirwh = AUFS_DIRWH_DEF; -+ -+ for (i = 0; i < AuPlink_NHASH; i++) -+ au_sphl_init(sbinfo->si_plink + i); -+ init_waitqueue_head(&sbinfo->si_plink_wq); -+ spin_lock_init(&sbinfo->si_plink_maint_lock); -+ -+ au_sphl_init(&sbinfo->si_files); -+ -+ /* with getattr by default */ -+ sbinfo->si_iop_array = aufs_iop; -+ -+ /* leave other members for sysaufs and si_mnt. */ -+ sbinfo->si_sb = sb; -+ sb->s_fs_info = sbinfo; -+ si_pid_set(sb); -+ return 0; /* success */ -+ -+out_br: -+ kfree(sbinfo->si_branch); -+out_sbinfo: -+ kfree(sbinfo); -+out: -+ return err; -+} -+ -+int au_sbr_realloc(struct au_sbinfo *sbinfo, int nbr) -+{ -+ int err, sz; -+ struct au_branch **brp; -+ -+ AuRwMustWriteLock(&sbinfo->si_rwsem); -+ -+ err = -ENOMEM; -+ sz = sizeof(*brp) * (sbinfo->si_bend + 1); -+ if (unlikely(!sz)) -+ sz = sizeof(*brp); -+ brp = au_kzrealloc(sbinfo->si_branch, sz, sizeof(*brp) * nbr, GFP_NOFS); -+ if (brp) { -+ sbinfo->si_branch = brp; -+ err = 0; -+ } -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+unsigned int au_sigen_inc(struct super_block *sb) -+{ -+ unsigned int gen; -+ -+ SiMustWriteLock(sb); -+ -+ gen = ++au_sbi(sb)->si_generation; -+ au_update_digen(sb->s_root); -+ au_update_iigen(sb->s_root->d_inode, /*half*/0); -+ sb->s_root->d_inode->i_version++; -+ return gen; -+} -+ -+aufs_bindex_t au_new_br_id(struct super_block *sb) -+{ -+ aufs_bindex_t br_id; -+ int i; -+ struct au_sbinfo *sbinfo; -+ -+ SiMustWriteLock(sb); -+ -+ sbinfo = au_sbi(sb); -+ for (i = 0; i <= AUFS_BRANCH_MAX; i++) { -+ br_id = ++sbinfo->si_last_br_id; -+ AuDebugOn(br_id < 0); -+ if (br_id && au_br_index(sb, br_id) < 0) -+ return br_id; -+ } -+ -+ return -1; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* it is ok that new 'nwt' tasks are appended while we are sleeping */ -+int si_read_lock(struct super_block *sb, int flags) -+{ -+ int err; -+ -+ err = 0; -+ if (au_ftest_lock(flags, FLUSH)) -+ au_nwt_flush(&au_sbi(sb)->si_nowait); -+ -+ si_noflush_read_lock(sb); -+ err = au_plink_maint(sb, flags); -+ if (unlikely(err)) -+ si_read_unlock(sb); -+ -+ return err; -+} -+ -+int si_write_lock(struct super_block *sb, int flags) -+{ -+ int err; -+ -+ if (au_ftest_lock(flags, FLUSH)) -+ au_nwt_flush(&au_sbi(sb)->si_nowait); -+ -+ si_noflush_write_lock(sb); -+ err = au_plink_maint(sb, flags); -+ if (unlikely(err)) -+ si_write_unlock(sb); -+ -+ return err; -+} -+ -+/* dentry and super_block lock. call at entry point */ -+int aufs_read_lock(struct dentry *dentry, int flags) -+{ -+ int err; -+ struct super_block *sb; -+ -+ sb = dentry->d_sb; -+ err = si_read_lock(sb, flags); -+ if (unlikely(err)) -+ goto out; -+ -+ if (au_ftest_lock(flags, DW)) -+ di_write_lock_child(dentry); -+ else -+ di_read_lock_child(dentry, flags); -+ -+ if (au_ftest_lock(flags, GEN)) { -+ err = au_digen_test(dentry, au_sigen(sb)); -+ if (!au_opt_test(au_mntflags(sb), UDBA_NONE)) -+ AuDebugOn(!err && au_dbrange_test(dentry)); -+ else if (!err) -+ err = au_dbrange_test(dentry); -+ if (unlikely(err)) -+ aufs_read_unlock(dentry, flags); -+ } -+ -+out: -+ return err; -+} -+ -+void aufs_read_unlock(struct dentry *dentry, int flags) -+{ -+ if (au_ftest_lock(flags, DW)) -+ di_write_unlock(dentry); -+ else -+ di_read_unlock(dentry, flags); -+ si_read_unlock(dentry->d_sb); -+} -+ -+void aufs_write_lock(struct dentry *dentry) -+{ -+ si_write_lock(dentry->d_sb, AuLock_FLUSH | AuLock_NOPLMW); -+ di_write_lock_child(dentry); -+} -+ -+void aufs_write_unlock(struct dentry *dentry) -+{ -+ di_write_unlock(dentry); -+ si_write_unlock(dentry->d_sb); -+} -+ -+int aufs_read_and_write_lock2(struct dentry *d1, struct dentry *d2, int flags) -+{ -+ int err; -+ unsigned int sigen; -+ struct super_block *sb; -+ -+ sb = d1->d_sb; -+ err = si_read_lock(sb, flags); -+ if (unlikely(err)) -+ goto out; -+ -+ di_write_lock2_child(d1, d2, au_ftest_lock(flags, DIRS)); -+ -+ if (au_ftest_lock(flags, GEN)) { -+ sigen = au_sigen(sb); -+ err = au_digen_test(d1, sigen); -+ AuDebugOn(!err && au_dbrange_test(d1)); -+ if (!err) { -+ err = au_digen_test(d2, sigen); -+ AuDebugOn(!err && au_dbrange_test(d2)); -+ } -+ if (unlikely(err)) -+ aufs_read_and_write_unlock2(d1, d2); -+ } -+ -+out: -+ return err; -+} -+ -+void aufs_read_and_write_unlock2(struct dentry *d1, struct dentry *d2) -+{ -+ di_write_unlock2(d1, d2); -+ si_read_unlock(d1->d_sb); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static void si_pid_alloc(struct au_si_pid *au_si_pid, int idx) -+{ -+ unsigned long *p; -+ -+ BUILD_BUG_ON(sizeof(unsigned long) != -+ sizeof(*au_si_pid->pid_bitmap)); -+ -+ mutex_lock(&au_si_pid->pid_mtx); -+ p = au_si_pid->pid_bitmap[idx]; -+ while (!p) { -+ /* -+ * bad approach. -+ * but keeping 'si_pid_set()' void is more important. -+ */ -+ p = kcalloc(BITS_TO_LONGS(AU_PIDSTEP), -+ sizeof(*au_si_pid->pid_bitmap), -+ GFP_NOFS); -+ if (p) -+ break; -+ cond_resched(); -+ } -+ au_si_pid->pid_bitmap[idx] = p; -+ mutex_unlock(&au_si_pid->pid_mtx); -+} -+ -+void si_pid_set(struct super_block *sb) -+{ -+ pid_t bit; -+ int idx; -+ unsigned long *bitmap; -+ struct au_si_pid *au_si_pid; -+ -+ si_pid_idx_bit(&idx, &bit); -+ au_si_pid = &au_sbi(sb)->au_si_pid; -+ bitmap = au_si_pid->pid_bitmap[idx]; -+ if (!bitmap) { -+ si_pid_alloc(au_si_pid, idx); -+ bitmap = au_si_pid->pid_bitmap[idx]; -+ } -+ AuDebugOn(test_bit(bit, bitmap)); -+ set_bit(bit, bitmap); -+ /* smp_mb(); */ -+} -diff --git a/fs/aufs/spl.h b/fs/aufs/spl.h -new file mode 100644 -index 0000000..945343a ---- /dev/null -+++ b/fs/aufs/spl.h -@@ -0,0 +1,111 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * simple list protected by a spinlock -+ */ -+ -+#ifndef __AUFS_SPL_H__ -+#define __AUFS_SPL_H__ -+ -+#ifdef __KERNEL__ -+ -+struct au_splhead { -+ spinlock_t spin; -+ struct list_head head; -+}; -+ -+static inline void au_spl_init(struct au_splhead *spl) -+{ -+ spin_lock_init(&spl->spin); -+ INIT_LIST_HEAD(&spl->head); -+} -+ -+static inline void au_spl_add(struct list_head *list, struct au_splhead *spl) -+{ -+ spin_lock(&spl->spin); -+ list_add(list, &spl->head); -+ spin_unlock(&spl->spin); -+} -+ -+static inline void au_spl_del(struct list_head *list, struct au_splhead *spl) -+{ -+ spin_lock(&spl->spin); -+ list_del(list); -+ spin_unlock(&spl->spin); -+} -+ -+static inline void au_spl_del_rcu(struct list_head *list, -+ struct au_splhead *spl) -+{ -+ spin_lock(&spl->spin); -+ list_del_rcu(list); -+ spin_unlock(&spl->spin); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+struct au_sphlhead { -+ spinlock_t spin; -+ struct hlist_head head; -+}; -+ -+static inline void au_sphl_init(struct au_sphlhead *sphl) -+{ -+ spin_lock_init(&sphl->spin); -+ INIT_HLIST_HEAD(&sphl->head); -+} -+ -+static inline void au_sphl_add(struct hlist_node *hlist, -+ struct au_sphlhead *sphl) -+{ -+ spin_lock(&sphl->spin); -+ hlist_add_head(hlist, &sphl->head); -+ spin_unlock(&sphl->spin); -+} -+ -+static inline void au_sphl_del(struct hlist_node *hlist, -+ struct au_sphlhead *sphl) -+{ -+ spin_lock(&sphl->spin); -+ hlist_del(hlist); -+ spin_unlock(&sphl->spin); -+} -+ -+static inline void au_sphl_del_rcu(struct hlist_node *hlist, -+ struct au_sphlhead *sphl) -+{ -+ spin_lock(&sphl->spin); -+ hlist_del_rcu(hlist); -+ spin_unlock(&sphl->spin); -+} -+ -+static inline unsigned long au_sphl_count(struct au_sphlhead *sphl) -+{ -+ unsigned long cnt; -+ struct hlist_node *pos; -+ -+ cnt = 0; -+ spin_lock(&sphl->spin); -+ hlist_for_each(pos, &sphl->head) -+ cnt++; -+ spin_unlock(&sphl->spin); -+ return cnt; -+} -+ -+#endif /* __KERNEL__ */ -+#endif /* __AUFS_SPL_H__ */ -diff --git a/fs/aufs/super.c b/fs/aufs/super.c -new file mode 100644 -index 0000000..64a6bb4 ---- /dev/null -+++ b/fs/aufs/super.c -@@ -0,0 +1,1041 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * mount and super_block operations -+ */ -+ -+#include -+#include -+#include -+#include -+#include "aufs.h" -+ -+/* -+ * super_operations -+ */ -+static struct inode *aufs_alloc_inode(struct super_block *sb __maybe_unused) -+{ -+ struct au_icntnr *c; -+ -+ c = au_cache_alloc_icntnr(); -+ if (c) { -+ au_icntnr_init(c); -+ c->vfs_inode.i_version = 1; /* sigen(sb); */ -+ c->iinfo.ii_hinode = NULL; -+ return &c->vfs_inode; -+ } -+ return NULL; -+} -+ -+static void aufs_destroy_inode_cb(struct rcu_head *head) -+{ -+ struct inode *inode = container_of(head, struct inode, i_rcu); -+ -+ INIT_HLIST_HEAD(&inode->i_dentry); -+ au_cache_free_icntnr(container_of(inode, struct au_icntnr, vfs_inode)); -+} -+ -+static void aufs_destroy_inode(struct inode *inode) -+{ -+ au_iinfo_fin(inode); -+ call_rcu(&inode->i_rcu, aufs_destroy_inode_cb); -+} -+ -+struct inode *au_iget_locked(struct super_block *sb, ino_t ino) -+{ -+ struct inode *inode; -+ int err; -+ -+ inode = iget_locked(sb, ino); -+ if (unlikely(!inode)) { -+ inode = ERR_PTR(-ENOMEM); -+ goto out; -+ } -+ if (!(inode->i_state & I_NEW)) -+ goto out; -+ -+ err = au_xigen_new(inode); -+ if (!err) -+ err = au_iinfo_init(inode); -+ if (!err) -+ inode->i_version++; -+ else { -+ iget_failed(inode); -+ inode = ERR_PTR(err); -+ } -+ -+out: -+ /* never return NULL */ -+ AuDebugOn(!inode); -+ AuTraceErrPtr(inode); -+ return inode; -+} -+ -+/* lock free root dinfo */ -+static int au_show_brs(struct seq_file *seq, struct super_block *sb) -+{ -+ int err; -+ aufs_bindex_t bindex, bend; -+ struct path path; -+ struct au_hdentry *hdp; -+ struct au_branch *br; -+ au_br_perm_str_t perm; -+ -+ err = 0; -+ bend = au_sbend(sb); -+ hdp = au_di(sb->s_root)->di_hdentry; -+ for (bindex = 0; !err && bindex <= bend; bindex++) { -+ br = au_sbr(sb, bindex); -+ path.mnt = au_br_mnt(br); -+ path.dentry = hdp[bindex].hd_dentry; -+ err = au_seq_path(seq, &path); -+ if (!err) { -+ au_optstr_br_perm(&perm, br->br_perm); -+ err = seq_printf(seq, "=%s", perm.a); -+ if (err == -1) -+ err = -E2BIG; -+ } -+ if (!err && bindex != bend) -+ err = seq_putc(seq, ':'); -+ } -+ -+ return err; -+} -+ -+static void au_show_wbr_create(struct seq_file *m, int v, -+ struct au_sbinfo *sbinfo) -+{ -+ const char *pat; -+ -+ AuRwMustAnyLock(&sbinfo->si_rwsem); -+ -+ seq_puts(m, ",create="); -+ pat = au_optstr_wbr_create(v); -+ switch (v) { -+ case AuWbrCreate_TDP: -+ case AuWbrCreate_RR: -+ case AuWbrCreate_MFS: -+ case AuWbrCreate_PMFS: -+ seq_puts(m, pat); -+ break; -+ case AuWbrCreate_MFSV: -+ seq_printf(m, /*pat*/"mfs:%lu", -+ jiffies_to_msecs(sbinfo->si_wbr_mfs.mfs_expire) -+ / MSEC_PER_SEC); -+ break; -+ case AuWbrCreate_PMFSV: -+ seq_printf(m, /*pat*/"pmfs:%lu", -+ jiffies_to_msecs(sbinfo->si_wbr_mfs.mfs_expire) -+ / MSEC_PER_SEC); -+ break; -+ case AuWbrCreate_MFSRR: -+ seq_printf(m, /*pat*/"mfsrr:%llu", -+ sbinfo->si_wbr_mfs.mfsrr_watermark); -+ break; -+ case AuWbrCreate_MFSRRV: -+ seq_printf(m, /*pat*/"mfsrr:%llu:%lu", -+ sbinfo->si_wbr_mfs.mfsrr_watermark, -+ jiffies_to_msecs(sbinfo->si_wbr_mfs.mfs_expire) -+ / MSEC_PER_SEC); -+ break; -+ case AuWbrCreate_PMFSRR: -+ seq_printf(m, /*pat*/"pmfsrr:%llu", -+ sbinfo->si_wbr_mfs.mfsrr_watermark); -+ break; -+ case AuWbrCreate_PMFSRRV: -+ seq_printf(m, /*pat*/"pmfsrr:%llu:%lu", -+ sbinfo->si_wbr_mfs.mfsrr_watermark, -+ jiffies_to_msecs(sbinfo->si_wbr_mfs.mfs_expire) -+ / MSEC_PER_SEC); -+ break; -+ } -+} -+ -+static int au_show_xino(struct seq_file *seq, struct super_block *sb) -+{ -+#ifdef CONFIG_SYSFS -+ return 0; -+#else -+ int err; -+ const int len = sizeof(AUFS_XINO_FNAME) - 1; -+ aufs_bindex_t bindex, brid; -+ struct qstr *name; -+ struct file *f; -+ struct dentry *d, *h_root; -+ struct au_hdentry *hdp; -+ -+ AuRwMustAnyLock(&sbinfo->si_rwsem); -+ -+ err = 0; -+ f = au_sbi(sb)->si_xib; -+ if (!f) -+ goto out; -+ -+ /* stop printing the default xino path on the first writable branch */ -+ h_root = NULL; -+ brid = au_xino_brid(sb); -+ if (brid >= 0) { -+ bindex = au_br_index(sb, brid); -+ hdp = au_di(sb->s_root)->di_hdentry; -+ h_root = hdp[0 + bindex].hd_dentry; -+ } -+ d = f->f_dentry; -+ name = &d->d_name; -+ /* safe ->d_parent because the file is unlinked */ -+ if (d->d_parent == h_root -+ && name->len == len -+ && !memcmp(name->name, AUFS_XINO_FNAME, len)) -+ goto out; -+ -+ seq_puts(seq, ",xino="); -+ err = au_xino_path(seq, f); -+ -+out: -+ return err; -+#endif -+} -+ -+/* seq_file will re-call me in case of too long string */ -+static int aufs_show_options(struct seq_file *m, struct dentry *dentry) -+{ -+ int err; -+ unsigned int mnt_flags, v; -+ struct super_block *sb; -+ struct au_sbinfo *sbinfo; -+ -+#define AuBool(name, str) do { \ -+ v = au_opt_test(mnt_flags, name); \ -+ if (v != au_opt_test(AuOpt_Def, name)) \ -+ seq_printf(m, ",%s" #str, v ? "" : "no"); \ -+} while (0) -+ -+#define AuStr(name, str) do { \ -+ v = mnt_flags & AuOptMask_##name; \ -+ if (v != (AuOpt_Def & AuOptMask_##name)) \ -+ seq_printf(m, "," #str "=%s", au_optstr_##str(v)); \ -+} while (0) -+ -+#define AuUInt(name, str, val) do { \ -+ if (val != AUFS_##name##_DEF) \ -+ seq_printf(m, "," #str "=%u", val); \ -+} while (0) -+ -+ sb = dentry->d_sb; -+ if (sb->s_flags & MS_POSIXACL) -+ seq_puts(m, ",acl"); -+ -+ /* lock free root dinfo */ -+ si_noflush_read_lock(sb); -+ sbinfo = au_sbi(sb); -+ seq_printf(m, ",si=%lx", sysaufs_si_id(sbinfo)); -+ -+ mnt_flags = au_mntflags(sb); -+ if (au_opt_test(mnt_flags, XINO)) { -+ err = au_show_xino(m, sb); -+ if (unlikely(err)) -+ goto out; -+ } else -+ seq_puts(m, ",noxino"); -+ -+ AuBool(TRUNC_XINO, trunc_xino); -+ AuStr(UDBA, udba); -+ AuBool(SHWH, shwh); -+ AuBool(PLINK, plink); -+ AuBool(DIO, dio); -+ AuBool(DIRPERM1, dirperm1); -+ /* AuBool(REFROF, refrof); */ -+ -+ v = sbinfo->si_wbr_create; -+ if (v != AuWbrCreate_Def) -+ au_show_wbr_create(m, v, sbinfo); -+ -+ v = sbinfo->si_wbr_copyup; -+ if (v != AuWbrCopyup_Def) -+ seq_printf(m, ",cpup=%s", au_optstr_wbr_copyup(v)); -+ -+ v = au_opt_test(mnt_flags, ALWAYS_DIROPQ); -+ if (v != au_opt_test(AuOpt_Def, ALWAYS_DIROPQ)) -+ seq_printf(m, ",diropq=%c", v ? 'a' : 'w'); -+ -+ AuUInt(DIRWH, dirwh, sbinfo->si_dirwh); -+ -+ v = jiffies_to_msecs(sbinfo->si_rdcache) / MSEC_PER_SEC; -+ AuUInt(RDCACHE, rdcache, v); -+ -+ AuUInt(RDBLK, rdblk, sbinfo->si_rdblk); -+ AuUInt(RDHASH, rdhash, sbinfo->si_rdhash); -+ -+ au_fhsm_show(m, sbinfo); -+ -+ AuBool(SUM, sum); -+ /* AuBool(SUM_W, wsum); */ -+ AuBool(WARN_PERM, warn_perm); -+ AuBool(VERBOSE, verbose); -+ -+out: -+ /* be sure to print "br:" last */ -+ if (!sysaufs_brs) { -+ seq_puts(m, ",br:"); -+ au_show_brs(m, sb); -+ } -+ si_read_unlock(sb); -+ return 0; -+ -+#undef AuBool -+#undef AuStr -+#undef AuUInt -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* sum mode which returns the summation for statfs(2) */ -+ -+static u64 au_add_till_max(u64 a, u64 b) -+{ -+ u64 old; -+ -+ old = a; -+ a += b; -+ if (old <= a) -+ return a; -+ return ULLONG_MAX; -+} -+ -+static u64 au_mul_till_max(u64 a, long mul) -+{ -+ u64 old; -+ -+ old = a; -+ a *= mul; -+ if (old <= a) -+ return a; -+ return ULLONG_MAX; -+} -+ -+static int au_statfs_sum(struct super_block *sb, struct kstatfs *buf) -+{ -+ int err; -+ long bsize, factor; -+ u64 blocks, bfree, bavail, files, ffree; -+ aufs_bindex_t bend, bindex, i; -+ unsigned char shared; -+ struct path h_path; -+ struct super_block *h_sb; -+ -+ err = 0; -+ bsize = LONG_MAX; -+ files = 0; -+ ffree = 0; -+ blocks = 0; -+ bfree = 0; -+ bavail = 0; -+ bend = au_sbend(sb); -+ for (bindex = 0; bindex <= bend; bindex++) { -+ h_path.mnt = au_sbr_mnt(sb, bindex); -+ h_sb = h_path.mnt->mnt_sb; -+ shared = 0; -+ for (i = 0; !shared && i < bindex; i++) -+ shared = (au_sbr_sb(sb, i) == h_sb); -+ if (shared) -+ continue; -+ -+ /* sb->s_root for NFS is unreliable */ -+ h_path.dentry = h_path.mnt->mnt_root; -+ err = vfs_statfs(&h_path, buf); -+ if (unlikely(err)) -+ goto out; -+ -+ if (bsize > buf->f_bsize) { -+ /* -+ * we will reduce bsize, so we have to expand blocks -+ * etc. to match them again -+ */ -+ factor = (bsize / buf->f_bsize); -+ blocks = au_mul_till_max(blocks, factor); -+ bfree = au_mul_till_max(bfree, factor); -+ bavail = au_mul_till_max(bavail, factor); -+ bsize = buf->f_bsize; -+ } -+ -+ factor = (buf->f_bsize / bsize); -+ blocks = au_add_till_max(blocks, -+ au_mul_till_max(buf->f_blocks, factor)); -+ bfree = au_add_till_max(bfree, -+ au_mul_till_max(buf->f_bfree, factor)); -+ bavail = au_add_till_max(bavail, -+ au_mul_till_max(buf->f_bavail, factor)); -+ files = au_add_till_max(files, buf->f_files); -+ ffree = au_add_till_max(ffree, buf->f_ffree); -+ } -+ -+ buf->f_bsize = bsize; -+ buf->f_blocks = blocks; -+ buf->f_bfree = bfree; -+ buf->f_bavail = bavail; -+ buf->f_files = files; -+ buf->f_ffree = ffree; -+ buf->f_frsize = 0; -+ -+out: -+ return err; -+} -+ -+static int aufs_statfs(struct dentry *dentry, struct kstatfs *buf) -+{ -+ int err; -+ struct path h_path; -+ struct super_block *sb; -+ -+ /* lock free root dinfo */ -+ sb = dentry->d_sb; -+ si_noflush_read_lock(sb); -+ if (!au_opt_test(au_mntflags(sb), SUM)) { -+ /* sb->s_root for NFS is unreliable */ -+ h_path.mnt = au_sbr_mnt(sb, 0); -+ h_path.dentry = h_path.mnt->mnt_root; -+ err = vfs_statfs(&h_path, buf); -+ } else -+ err = au_statfs_sum(sb, buf); -+ si_read_unlock(sb); -+ -+ if (!err) { -+ buf->f_type = AUFS_SUPER_MAGIC; -+ buf->f_namelen = AUFS_MAX_NAMELEN; -+ memset(&buf->f_fsid, 0, sizeof(buf->f_fsid)); -+ } -+ /* buf->f_bsize = buf->f_blocks = buf->f_bfree = buf->f_bavail = -1; */ -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int aufs_sync_fs(struct super_block *sb, int wait) -+{ -+ int err, e; -+ aufs_bindex_t bend, bindex; -+ struct au_branch *br; -+ struct super_block *h_sb; -+ -+ err = 0; -+ si_noflush_read_lock(sb); -+ bend = au_sbend(sb); -+ for (bindex = 0; bindex <= bend; bindex++) { -+ br = au_sbr(sb, bindex); -+ if (!au_br_writable(br->br_perm)) -+ continue; -+ -+ h_sb = au_sbr_sb(sb, bindex); -+ if (h_sb->s_op->sync_fs) { -+ e = h_sb->s_op->sync_fs(h_sb, wait); -+ if (unlikely(e && !err)) -+ err = e; -+ /* go on even if an error happens */ -+ } -+ } -+ si_read_unlock(sb); -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* final actions when unmounting a file system */ -+static void aufs_put_super(struct super_block *sb) -+{ -+ struct au_sbinfo *sbinfo; -+ -+ sbinfo = au_sbi(sb); -+ if (!sbinfo) -+ return; -+ -+ dbgaufs_si_fin(sbinfo); -+ kobject_put(&sbinfo->si_kobj); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+void *au_array_alloc(unsigned long long *hint, au_arraycb_t cb, void *arg) -+{ -+ void *array; -+ unsigned long long n, sz; -+ -+ array = NULL; -+ n = 0; -+ if (!*hint) -+ goto out; -+ -+ if (*hint > ULLONG_MAX / sizeof(array)) { -+ array = ERR_PTR(-EMFILE); -+ pr_err("hint %llu\n", *hint); -+ goto out; -+ } -+ -+ sz = sizeof(array) * *hint; -+ array = kzalloc(sz, GFP_NOFS); -+ if (unlikely(!array)) -+ array = vzalloc(sz); -+ if (unlikely(!array)) { -+ array = ERR_PTR(-ENOMEM); -+ goto out; -+ } -+ -+ n = cb(array, *hint, arg); -+ AuDebugOn(n > *hint); -+ -+out: -+ *hint = n; -+ return array; -+} -+ -+static unsigned long long au_iarray_cb(void *a, -+ unsigned long long max __maybe_unused, -+ void *arg) -+{ -+ unsigned long long n; -+ struct inode **p, *inode; -+ struct list_head *head; -+ -+ n = 0; -+ p = a; -+ head = arg; -+ spin_lock(&inode_sb_list_lock); -+ list_for_each_entry(inode, head, i_sb_list) { -+ if (!is_bad_inode(inode) -+ && au_ii(inode)->ii_bstart >= 0) { -+ spin_lock(&inode->i_lock); -+ if (atomic_read(&inode->i_count)) { -+ au_igrab(inode); -+ *p++ = inode; -+ n++; -+ AuDebugOn(n > max); -+ } -+ spin_unlock(&inode->i_lock); -+ } -+ } -+ spin_unlock(&inode_sb_list_lock); -+ -+ return n; -+} -+ -+struct inode **au_iarray_alloc(struct super_block *sb, unsigned long long *max) -+{ -+ *max = atomic_long_read(&au_sbi(sb)->si_ninodes); -+ return au_array_alloc(max, au_iarray_cb, &sb->s_inodes); -+} -+ -+void au_iarray_free(struct inode **a, unsigned long long max) -+{ -+ unsigned long long ull; -+ -+ for (ull = 0; ull < max; ull++) -+ iput(a[ull]); -+ kvfree(a); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * refresh dentry and inode at remount time. -+ */ -+/* todo: consolidate with simple_reval_dpath() and au_reval_for_attr() */ -+static int au_do_refresh(struct dentry *dentry, unsigned int dir_flags, -+ struct dentry *parent) -+{ -+ int err; -+ -+ di_write_lock_child(dentry); -+ di_read_lock_parent(parent, AuLock_IR); -+ err = au_refresh_dentry(dentry, parent); -+ if (!err && dir_flags) -+ au_hn_reset(dentry->d_inode, dir_flags); -+ di_read_unlock(parent, AuLock_IR); -+ di_write_unlock(dentry); -+ -+ return err; -+} -+ -+static int au_do_refresh_d(struct dentry *dentry, unsigned int sigen, -+ struct au_sbinfo *sbinfo, -+ const unsigned int dir_flags, unsigned int do_idop) -+{ -+ int err; -+ struct dentry *parent; -+ struct inode *inode; -+ -+ err = 0; -+ parent = dget_parent(dentry); -+ if (!au_digen_test(parent, sigen) && au_digen_test(dentry, sigen)) { -+ inode = dentry->d_inode; -+ if (inode) { -+ if (!S_ISDIR(inode->i_mode)) -+ err = au_do_refresh(dentry, /*dir_flags*/0, -+ parent); -+ else { -+ err = au_do_refresh(dentry, dir_flags, parent); -+ if (unlikely(err)) -+ au_fset_si(sbinfo, FAILED_REFRESH_DIR); -+ } -+ } else -+ err = au_do_refresh(dentry, /*dir_flags*/0, parent); -+ AuDbgDentry(dentry); -+ } -+ dput(parent); -+ -+ if (!err) { -+ if (do_idop) -+ au_refresh_dop(dentry, /*force_reval*/0); -+ } else -+ au_refresh_dop(dentry, /*force_reval*/1); -+ -+ AuTraceErr(err); -+ return err; -+} -+ -+static int au_refresh_d(struct super_block *sb, unsigned int do_idop) -+{ -+ int err, i, j, ndentry, e; -+ unsigned int sigen; -+ struct au_dcsub_pages dpages; -+ struct au_dpage *dpage; -+ struct dentry **dentries, *d; -+ struct au_sbinfo *sbinfo; -+ struct dentry *root = sb->s_root; -+ const unsigned int dir_flags = au_hi_flags(root->d_inode, /*isdir*/1); -+ -+ if (do_idop) -+ au_refresh_dop(root, /*force_reval*/0); -+ -+ err = au_dpages_init(&dpages, GFP_NOFS); -+ if (unlikely(err)) -+ goto out; -+ err = au_dcsub_pages(&dpages, root, NULL, NULL); -+ if (unlikely(err)) -+ goto out_dpages; -+ -+ sigen = au_sigen(sb); -+ sbinfo = au_sbi(sb); -+ for (i = 0; i < dpages.ndpage; i++) { -+ dpage = dpages.dpages + i; -+ dentries = dpage->dentries; -+ ndentry = dpage->ndentry; -+ for (j = 0; j < ndentry; j++) { -+ d = dentries[j]; -+ e = au_do_refresh_d(d, sigen, sbinfo, dir_flags, -+ do_idop); -+ if (unlikely(e && !err)) -+ err = e; -+ /* go on even err */ -+ } -+ } -+ -+out_dpages: -+ au_dpages_free(&dpages); -+out: -+ return err; -+} -+ -+static int au_refresh_i(struct super_block *sb, unsigned int do_idop) -+{ -+ int err, e; -+ unsigned int sigen; -+ unsigned long long max, ull; -+ struct inode *inode, **array; -+ -+ array = au_iarray_alloc(sb, &max); -+ err = PTR_ERR(array); -+ if (IS_ERR(array)) -+ goto out; -+ -+ err = 0; -+ sigen = au_sigen(sb); -+ for (ull = 0; ull < max; ull++) { -+ inode = array[ull]; -+ if (unlikely(!inode)) -+ break; -+ -+ e = 0; -+ ii_write_lock_child(inode); -+ if (au_iigen(inode, NULL) != sigen) { -+ e = au_refresh_hinode_self(inode); -+ if (unlikely(e)) { -+ au_refresh_iop(inode, /*force_getattr*/1); -+ pr_err("error %d, i%lu\n", e, inode->i_ino); -+ if (!err) -+ err = e; -+ /* go on even if err */ -+ } -+ } -+ if (!e && do_idop) -+ au_refresh_iop(inode, /*force_getattr*/0); -+ ii_write_unlock(inode); -+ } -+ -+ au_iarray_free(array, max); -+ -+out: -+ return err; -+} -+ -+static void au_remount_refresh(struct super_block *sb, unsigned int do_idop) -+{ -+ int err, e; -+ unsigned int udba; -+ aufs_bindex_t bindex, bend; -+ struct dentry *root; -+ struct inode *inode; -+ struct au_branch *br; -+ struct au_sbinfo *sbi; -+ -+ au_sigen_inc(sb); -+ sbi = au_sbi(sb); -+ au_fclr_si(sbi, FAILED_REFRESH_DIR); -+ -+ root = sb->s_root; -+ DiMustNoWaiters(root); -+ inode = root->d_inode; -+ IiMustNoWaiters(inode); -+ -+ udba = au_opt_udba(sb); -+ bend = au_sbend(sb); -+ for (bindex = 0; bindex <= bend; bindex++) { -+ br = au_sbr(sb, bindex); -+ err = au_hnotify_reset_br(udba, br, br->br_perm); -+ if (unlikely(err)) -+ AuIOErr("hnotify failed on br %d, %d, ignored\n", -+ bindex, err); -+ /* go on even if err */ -+ } -+ au_hn_reset(inode, au_hi_flags(inode, /*isdir*/1)); -+ -+ if (do_idop) { -+ if (au_ftest_si(sbi, NO_DREVAL)) { -+ AuDebugOn(sb->s_d_op == &aufs_dop_noreval); -+ sb->s_d_op = &aufs_dop_noreval; -+ AuDebugOn(sbi->si_iop_array == aufs_iop_nogetattr); -+ sbi->si_iop_array = aufs_iop_nogetattr; -+ } else { -+ AuDebugOn(sb->s_d_op == &aufs_dop); -+ sb->s_d_op = &aufs_dop; -+ AuDebugOn(sbi->si_iop_array == aufs_iop); -+ sbi->si_iop_array = aufs_iop; -+ } -+ pr_info("reset to %pf and %pf\n", -+ sb->s_d_op, sbi->si_iop_array); -+ } -+ -+ di_write_unlock(root); -+ err = au_refresh_d(sb, do_idop); -+ e = au_refresh_i(sb, do_idop); -+ if (unlikely(e && !err)) -+ err = e; -+ /* aufs_write_lock() calls ..._child() */ -+ di_write_lock_child(root); -+ -+ au_cpup_attr_all(inode, /*force*/1); -+ -+ if (unlikely(err)) -+ AuIOErr("refresh failed, ignored, %d\n", err); -+} -+ -+/* stop extra interpretation of errno in mount(8), and strange error messages */ -+static int cvt_err(int err) -+{ -+ AuTraceErr(err); -+ -+ switch (err) { -+ case -ENOENT: -+ case -ENOTDIR: -+ case -EEXIST: -+ case -EIO: -+ err = -EINVAL; -+ } -+ return err; -+} -+ -+static int aufs_remount_fs(struct super_block *sb, int *flags, char *data) -+{ -+ int err, do_dx; -+ unsigned int mntflags; -+ struct au_opts opts = { -+ .opt = NULL -+ }; -+ struct dentry *root; -+ struct inode *inode; -+ struct au_sbinfo *sbinfo; -+ -+ err = 0; -+ root = sb->s_root; -+ if (!data || !*data) { -+ err = si_write_lock(sb, AuLock_FLUSH | AuLock_NOPLM); -+ if (!err) { -+ di_write_lock_child(root); -+ err = au_opts_verify(sb, *flags, /*pending*/0); -+ aufs_write_unlock(root); -+ } -+ goto out; -+ } -+ -+ err = -ENOMEM; -+ opts.opt = (void *)__get_free_page(GFP_NOFS); -+ if (unlikely(!opts.opt)) -+ goto out; -+ opts.max_opt = PAGE_SIZE / sizeof(*opts.opt); -+ opts.flags = AuOpts_REMOUNT; -+ opts.sb_flags = *flags; -+ -+ /* parse it before aufs lock */ -+ err = au_opts_parse(sb, data, &opts); -+ if (unlikely(err)) -+ goto out_opts; -+ -+ sbinfo = au_sbi(sb); -+ inode = root->d_inode; -+ mutex_lock(&inode->i_mutex); -+ err = si_write_lock(sb, AuLock_FLUSH | AuLock_NOPLM); -+ if (unlikely(err)) -+ goto out_mtx; -+ di_write_lock_child(root); -+ -+ /* au_opts_remount() may return an error */ -+ err = au_opts_remount(sb, &opts); -+ au_opts_free(&opts); -+ -+ if (au_ftest_opts(opts.flags, REFRESH)) -+ au_remount_refresh(sb, au_ftest_opts(opts.flags, REFRESH_IDOP)); -+ -+ if (au_ftest_opts(opts.flags, REFRESH_DYAOP)) { -+ mntflags = au_mntflags(sb); -+ do_dx = !!au_opt_test(mntflags, DIO); -+ au_dy_arefresh(do_dx); -+ } -+ -+ au_fhsm_wrote_all(sb, /*force*/1); /* ?? */ -+ aufs_write_unlock(root); -+ -+out_mtx: -+ mutex_unlock(&inode->i_mutex); -+out_opts: -+ free_page((unsigned long)opts.opt); -+out: -+ err = cvt_err(err); -+ AuTraceErr(err); -+ return err; -+} -+ -+static const struct super_operations aufs_sop = { -+ .alloc_inode = aufs_alloc_inode, -+ .destroy_inode = aufs_destroy_inode, -+ /* always deleting, no clearing */ -+ .drop_inode = generic_delete_inode, -+ .show_options = aufs_show_options, -+ .statfs = aufs_statfs, -+ .put_super = aufs_put_super, -+ .sync_fs = aufs_sync_fs, -+ .remount_fs = aufs_remount_fs -+}; -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int alloc_root(struct super_block *sb) -+{ -+ int err; -+ struct inode *inode; -+ struct dentry *root; -+ -+ err = -ENOMEM; -+ inode = au_iget_locked(sb, AUFS_ROOT_INO); -+ err = PTR_ERR(inode); -+ if (IS_ERR(inode)) -+ goto out; -+ -+ inode->i_op = aufs_iop + AuIop_DIR; /* with getattr by default */ -+ inode->i_fop = &aufs_dir_fop; -+ inode->i_mode = S_IFDIR; -+ set_nlink(inode, 2); -+ unlock_new_inode(inode); -+ -+ root = d_make_root(inode); -+ if (unlikely(!root)) -+ goto out; -+ err = PTR_ERR(root); -+ if (IS_ERR(root)) -+ goto out; -+ -+ err = au_di_init(root); -+ if (!err) { -+ sb->s_root = root; -+ return 0; /* success */ -+ } -+ dput(root); -+ -+out: -+ return err; -+} -+ -+static int aufs_fill_super(struct super_block *sb, void *raw_data, -+ int silent __maybe_unused) -+{ -+ int err; -+ struct au_opts opts = { -+ .opt = NULL -+ }; -+ struct au_sbinfo *sbinfo; -+ struct dentry *root; -+ struct inode *inode; -+ char *arg = raw_data; -+ -+ if (unlikely(!arg || !*arg)) { -+ err = -EINVAL; -+ pr_err("no arg\n"); -+ goto out; -+ } -+ -+ err = -ENOMEM; -+ opts.opt = (void *)__get_free_page(GFP_NOFS); -+ if (unlikely(!opts.opt)) -+ goto out; -+ opts.max_opt = PAGE_SIZE / sizeof(*opts.opt); -+ opts.sb_flags = sb->s_flags; -+ -+ err = au_si_alloc(sb); -+ if (unlikely(err)) -+ goto out_opts; -+ sbinfo = au_sbi(sb); -+ -+ /* all timestamps always follow the ones on the branch */ -+ sb->s_flags |= MS_NOATIME | MS_NODIRATIME; -+ sb->s_op = &aufs_sop; -+ sb->s_d_op = &aufs_dop; -+ sb->s_magic = AUFS_SUPER_MAGIC; -+ sb->s_maxbytes = 0; -+ sb->s_stack_depth = 1; -+ au_export_init(sb); -+ /* au_xattr_init(sb); */ -+ -+ err = alloc_root(sb); -+ if (unlikely(err)) { -+ si_write_unlock(sb); -+ goto out_info; -+ } -+ root = sb->s_root; -+ inode = root->d_inode; -+ -+ /* -+ * actually we can parse options regardless aufs lock here. -+ * but at remount time, parsing must be done before aufs lock. -+ * so we follow the same rule. -+ */ -+ ii_write_lock_parent(inode); -+ aufs_write_unlock(root); -+ err = au_opts_parse(sb, arg, &opts); -+ if (unlikely(err)) -+ goto out_root; -+ -+ /* lock vfs_inode first, then aufs. */ -+ mutex_lock(&inode->i_mutex); -+ aufs_write_lock(root); -+ err = au_opts_mount(sb, &opts); -+ au_opts_free(&opts); -+ if (!err && au_ftest_si(sbinfo, NO_DREVAL)) { -+ sb->s_d_op = &aufs_dop_noreval; -+ pr_info("%pf\n", sb->s_d_op); -+ au_refresh_dop(root, /*force_reval*/0); -+ sbinfo->si_iop_array = aufs_iop_nogetattr; -+ au_refresh_iop(inode, /*force_getattr*/0); -+ } -+ aufs_write_unlock(root); -+ mutex_unlock(&inode->i_mutex); -+ if (!err) -+ goto out_opts; /* success */ -+ -+out_root: -+ dput(root); -+ sb->s_root = NULL; -+out_info: -+ dbgaufs_si_fin(sbinfo); -+ kobject_put(&sbinfo->si_kobj); -+ sb->s_fs_info = NULL; -+out_opts: -+ free_page((unsigned long)opts.opt); -+out: -+ AuTraceErr(err); -+ err = cvt_err(err); -+ AuTraceErr(err); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static struct dentry *aufs_mount(struct file_system_type *fs_type, int flags, -+ const char *dev_name __maybe_unused, -+ void *raw_data) -+{ -+ struct dentry *root; -+ struct super_block *sb; -+ -+ /* all timestamps always follow the ones on the branch */ -+ /* mnt->mnt_flags |= MNT_NOATIME | MNT_NODIRATIME; */ -+ root = mount_nodev(fs_type, flags, raw_data, aufs_fill_super); -+ if (IS_ERR(root)) -+ goto out; -+ -+ sb = root->d_sb; -+ si_write_lock(sb, !AuLock_FLUSH); -+ sysaufs_brs_add(sb, 0); -+ si_write_unlock(sb); -+ au_sbilist_add(sb); -+ -+out: -+ return root; -+} -+ -+static void aufs_kill_sb(struct super_block *sb) -+{ -+ struct au_sbinfo *sbinfo; -+ -+ sbinfo = au_sbi(sb); -+ if (sbinfo) { -+ au_sbilist_del(sb); -+ aufs_write_lock(sb->s_root); -+ au_fhsm_fin(sb); -+ if (sbinfo->si_wbr_create_ops->fin) -+ sbinfo->si_wbr_create_ops->fin(sb); -+ if (au_opt_test(sbinfo->si_mntflags, UDBA_HNOTIFY)) { -+ au_opt_set_udba(sbinfo->si_mntflags, UDBA_NONE); -+ au_remount_refresh(sb, /*do_idop*/0); -+ } -+ if (au_opt_test(sbinfo->si_mntflags, PLINK)) -+ au_plink_put(sb, /*verbose*/1); -+ au_xino_clr(sb); -+ sbinfo->si_sb = NULL; -+ aufs_write_unlock(sb->s_root); -+ au_nwt_flush(&sbinfo->si_nowait); -+ } -+ kill_anon_super(sb); -+} -+ -+struct file_system_type aufs_fs_type = { -+ .name = AUFS_FSTYPE, -+ /* a race between rename and others */ -+ .fs_flags = FS_RENAME_DOES_D_MOVE, -+ .mount = aufs_mount, -+ .kill_sb = aufs_kill_sb, -+ /* no need to __module_get() and module_put(). */ -+ .owner = THIS_MODULE, -+}; -diff --git a/fs/aufs/super.h b/fs/aufs/super.h -new file mode 100644 -index 0000000..ecd364b ---- /dev/null -+++ b/fs/aufs/super.h -@@ -0,0 +1,626 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * super_block operations -+ */ -+ -+#ifndef __AUFS_SUPER_H__ -+#define __AUFS_SUPER_H__ -+ -+#ifdef __KERNEL__ -+ -+#include -+#include "rwsem.h" -+#include "spl.h" -+#include "wkq.h" -+ -+typedef ssize_t (*au_readf_t)(struct file *, char __user *, size_t, loff_t *); -+typedef ssize_t (*au_writef_t)(struct file *, const char __user *, size_t, -+ loff_t *); -+ -+/* policies to select one among multiple writable branches */ -+struct au_wbr_copyup_operations { -+ int (*copyup)(struct dentry *dentry); -+}; -+ -+#define AuWbr_DIR 1 /* target is a dir */ -+#define AuWbr_PARENT (1 << 1) /* always require a parent */ -+ -+#define au_ftest_wbr(flags, name) ((flags) & AuWbr_##name) -+#define au_fset_wbr(flags, name) { (flags) |= AuWbr_##name; } -+#define au_fclr_wbr(flags, name) { (flags) &= ~AuWbr_##name; } -+ -+struct au_wbr_create_operations { -+ int (*create)(struct dentry *dentry, unsigned int flags); -+ int (*init)(struct super_block *sb); -+ int (*fin)(struct super_block *sb); -+}; -+ -+struct au_wbr_mfs { -+ struct mutex mfs_lock; /* protect this structure */ -+ unsigned long mfs_jiffy; -+ unsigned long mfs_expire; -+ aufs_bindex_t mfs_bindex; -+ -+ unsigned long long mfsrr_bytes; -+ unsigned long long mfsrr_watermark; -+}; -+ -+#define AuPlink_NHASH 100 -+static inline int au_plink_hash(ino_t ino) -+{ -+ return ino % AuPlink_NHASH; -+} -+ -+/* File-based Hierarchical Storage Management */ -+struct au_fhsm { -+#ifdef CONFIG_AUFS_FHSM -+ /* allow only one process who can receive the notification */ -+ spinlock_t fhsm_spin; -+ pid_t fhsm_pid; -+ wait_queue_head_t fhsm_wqh; -+ atomic_t fhsm_readable; -+ -+ /* these are protected by si_rwsem */ -+ unsigned long fhsm_expire; -+ aufs_bindex_t fhsm_bottom; -+#endif -+}; -+ -+#define AU_PIDSTEP (int)(BITS_TO_LONGS(PID_MAX_DEFAULT) * BITS_PER_LONG) -+#define AU_NPIDMAP (int)DIV_ROUND_UP(PID_MAX_LIMIT, AU_PIDSTEP) -+struct au_si_pid { -+ unsigned long *pid_bitmap[AU_NPIDMAP]; -+ struct mutex pid_mtx; -+}; -+ -+struct au_branch; -+struct au_sbinfo { -+ /* nowait tasks in the system-wide workqueue */ -+ struct au_nowait_tasks si_nowait; -+ -+ /* -+ * tried sb->s_umount, but failed due to the dependecy between i_mutex. -+ * rwsem for au_sbinfo is necessary. -+ */ -+ struct au_rwsem si_rwsem; -+ -+ /* prevent recursive locking in deleting inode */ -+ struct au_si_pid au_si_pid; -+ -+ /* -+ * dirty approach to protect sb->sb_inodes and ->s_files (gone) from -+ * remount. -+ */ -+ atomic_long_t si_ninodes, si_nfiles; -+ -+ /* branch management */ -+ unsigned int si_generation; -+ -+ /* see AuSi_ flags */ -+ unsigned char au_si_status; -+ -+ aufs_bindex_t si_bend; -+ -+ /* dirty trick to keep br_id plus */ -+ unsigned int si_last_br_id : -+ sizeof(aufs_bindex_t) * BITS_PER_BYTE - 1; -+ struct au_branch **si_branch; -+ -+ /* policy to select a writable branch */ -+ unsigned char si_wbr_copyup; -+ unsigned char si_wbr_create; -+ struct au_wbr_copyup_operations *si_wbr_copyup_ops; -+ struct au_wbr_create_operations *si_wbr_create_ops; -+ -+ /* round robin */ -+ atomic_t si_wbr_rr_next; -+ -+ /* most free space */ -+ struct au_wbr_mfs si_wbr_mfs; -+ -+ /* File-based Hierarchical Storage Management */ -+ struct au_fhsm si_fhsm; -+ -+ /* mount flags */ -+ /* include/asm-ia64/siginfo.h defines a macro named si_flags */ -+ unsigned int si_mntflags; -+ -+ /* external inode number (bitmap and translation table) */ -+ au_readf_t si_xread; -+ au_writef_t si_xwrite; -+ struct file *si_xib; -+ struct mutex si_xib_mtx; /* protect xib members */ -+ unsigned long *si_xib_buf; -+ unsigned long si_xib_last_pindex; -+ int si_xib_next_bit; -+ aufs_bindex_t si_xino_brid; -+ unsigned long si_xino_jiffy; -+ unsigned long si_xino_expire; -+ /* reserved for future use */ -+ /* unsigned long long si_xib_limit; */ /* Max xib file size */ -+ -+#ifdef CONFIG_AUFS_EXPORT -+ /* i_generation */ -+ struct file *si_xigen; -+ atomic_t si_xigen_next; -+#endif -+ -+ /* dirty trick to suppoer atomic_open */ -+ struct au_sphlhead si_aopen; -+ -+ /* vdir parameters */ -+ unsigned long si_rdcache; /* max cache time in jiffies */ -+ unsigned int si_rdblk; /* deblk size */ -+ unsigned int si_rdhash; /* hash size */ -+ -+ /* -+ * If the number of whiteouts are larger than si_dirwh, leave all of -+ * them after au_whtmp_ren to reduce the cost of rmdir(2). -+ * future fsck.aufs or kernel thread will remove them later. -+ * Otherwise, remove all whiteouts and the dir in rmdir(2). -+ */ -+ unsigned int si_dirwh; -+ -+ /* pseudo_link list */ -+ struct au_sphlhead si_plink[AuPlink_NHASH]; -+ wait_queue_head_t si_plink_wq; -+ spinlock_t si_plink_maint_lock; -+ pid_t si_plink_maint_pid; -+ -+ /* file list */ -+ struct au_sphlhead si_files; -+ -+ /* with/without getattr, brother of sb->s_d_op */ -+ struct inode_operations *si_iop_array; -+ -+ /* -+ * sysfs and lifetime management. -+ * this is not a small structure and it may be a waste of memory in case -+ * of sysfs is disabled, particulary when many aufs-es are mounted. -+ * but using sysfs is majority. -+ */ -+ struct kobject si_kobj; -+#ifdef CONFIG_DEBUG_FS -+ struct dentry *si_dbgaufs; -+ struct dentry *si_dbgaufs_plink; -+ struct dentry *si_dbgaufs_xib; -+#ifdef CONFIG_AUFS_EXPORT -+ struct dentry *si_dbgaufs_xigen; -+#endif -+#endif -+ -+#ifdef CONFIG_AUFS_SBILIST -+ struct hlist_node si_list; -+#endif -+ -+ /* dirty, necessary for unmounting, sysfs and sysrq */ -+ struct super_block *si_sb; -+}; -+ -+/* sbinfo status flags */ -+/* -+ * set true when refresh_dirs() failed at remount time. -+ * then try refreshing dirs at access time again. -+ * if it is false, refreshing dirs at access time is unnecesary -+ */ -+#define AuSi_FAILED_REFRESH_DIR 1 -+#define AuSi_FHSM (1 << 1) /* fhsm is active now */ -+#define AuSi_NO_DREVAL (1 << 2) /* disable all d_revalidate */ -+ -+#ifndef CONFIG_AUFS_FHSM -+#undef AuSi_FHSM -+#define AuSi_FHSM 0 -+#endif -+ -+static inline unsigned char au_do_ftest_si(struct au_sbinfo *sbi, -+ unsigned int flag) -+{ -+ AuRwMustAnyLock(&sbi->si_rwsem); -+ return sbi->au_si_status & flag; -+} -+#define au_ftest_si(sbinfo, name) au_do_ftest_si(sbinfo, AuSi_##name) -+#define au_fset_si(sbinfo, name) do { \ -+ AuRwMustWriteLock(&(sbinfo)->si_rwsem); \ -+ (sbinfo)->au_si_status |= AuSi_##name; \ -+} while (0) -+#define au_fclr_si(sbinfo, name) do { \ -+ AuRwMustWriteLock(&(sbinfo)->si_rwsem); \ -+ (sbinfo)->au_si_status &= ~AuSi_##name; \ -+} while (0) -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* policy to select one among writable branches */ -+#define AuWbrCopyup(sbinfo, ...) \ -+ ((sbinfo)->si_wbr_copyup_ops->copyup(__VA_ARGS__)) -+#define AuWbrCreate(sbinfo, ...) \ -+ ((sbinfo)->si_wbr_create_ops->create(__VA_ARGS__)) -+ -+/* flags for si_read_lock()/aufs_read_lock()/di_read_lock() */ -+#define AuLock_DW 1 /* write-lock dentry */ -+#define AuLock_IR (1 << 1) /* read-lock inode */ -+#define AuLock_IW (1 << 2) /* write-lock inode */ -+#define AuLock_FLUSH (1 << 3) /* wait for 'nowait' tasks */ -+#define AuLock_DIRS (1 << 4) /* target is a pair of dirs */ -+#define AuLock_NOPLM (1 << 5) /* return err in plm mode */ -+#define AuLock_NOPLMW (1 << 6) /* wait for plm mode ends */ -+#define AuLock_GEN (1 << 7) /* test digen/iigen */ -+#define au_ftest_lock(flags, name) ((flags) & AuLock_##name) -+#define au_fset_lock(flags, name) \ -+ do { (flags) |= AuLock_##name; } while (0) -+#define au_fclr_lock(flags, name) \ -+ do { (flags) &= ~AuLock_##name; } while (0) -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* super.c */ -+extern struct file_system_type aufs_fs_type; -+struct inode *au_iget_locked(struct super_block *sb, ino_t ino); -+typedef unsigned long long (*au_arraycb_t)(void *array, unsigned long long max, -+ void *arg); -+void *au_array_alloc(unsigned long long *hint, au_arraycb_t cb, void *arg); -+struct inode **au_iarray_alloc(struct super_block *sb, unsigned long long *max); -+void au_iarray_free(struct inode **a, unsigned long long max); -+ -+/* sbinfo.c */ -+void au_si_free(struct kobject *kobj); -+int au_si_alloc(struct super_block *sb); -+int au_sbr_realloc(struct au_sbinfo *sbinfo, int nbr); -+ -+unsigned int au_sigen_inc(struct super_block *sb); -+aufs_bindex_t au_new_br_id(struct super_block *sb); -+ -+int si_read_lock(struct super_block *sb, int flags); -+int si_write_lock(struct super_block *sb, int flags); -+int aufs_read_lock(struct dentry *dentry, int flags); -+void aufs_read_unlock(struct dentry *dentry, int flags); -+void aufs_write_lock(struct dentry *dentry); -+void aufs_write_unlock(struct dentry *dentry); -+int aufs_read_and_write_lock2(struct dentry *d1, struct dentry *d2, int flags); -+void aufs_read_and_write_unlock2(struct dentry *d1, struct dentry *d2); -+ -+/* wbr_policy.c */ -+extern struct au_wbr_copyup_operations au_wbr_copyup_ops[]; -+extern struct au_wbr_create_operations au_wbr_create_ops[]; -+int au_cpdown_dirs(struct dentry *dentry, aufs_bindex_t bdst); -+int au_wbr_nonopq(struct dentry *dentry, aufs_bindex_t bindex); -+int au_wbr_do_copyup_bu(struct dentry *dentry, aufs_bindex_t bstart); -+ -+/* mvdown.c */ -+int au_mvdown(struct dentry *dentry, struct aufs_mvdown __user *arg); -+ -+#ifdef CONFIG_AUFS_FHSM -+/* fhsm.c */ -+ -+static inline pid_t au_fhsm_pid(struct au_fhsm *fhsm) -+{ -+ pid_t pid; -+ -+ spin_lock(&fhsm->fhsm_spin); -+ pid = fhsm->fhsm_pid; -+ spin_unlock(&fhsm->fhsm_spin); -+ -+ return pid; -+} -+ -+void au_fhsm_wrote(struct super_block *sb, aufs_bindex_t bindex, int force); -+void au_fhsm_wrote_all(struct super_block *sb, int force); -+int au_fhsm_fd(struct super_block *sb, int oflags); -+int au_fhsm_br_alloc(struct au_branch *br); -+void au_fhsm_set_bottom(struct super_block *sb, aufs_bindex_t bindex); -+void au_fhsm_fin(struct super_block *sb); -+void au_fhsm_init(struct au_sbinfo *sbinfo); -+void au_fhsm_set(struct au_sbinfo *sbinfo, unsigned int sec); -+void au_fhsm_show(struct seq_file *seq, struct au_sbinfo *sbinfo); -+#else -+AuStubVoid(au_fhsm_wrote, struct super_block *sb, aufs_bindex_t bindex, -+ int force) -+AuStubVoid(au_fhsm_wrote_all, struct super_block *sb, int force) -+AuStub(int, au_fhsm_fd, return -EOPNOTSUPP, struct super_block *sb, int oflags) -+AuStub(pid_t, au_fhsm_pid, return 0, struct au_fhsm *fhsm) -+AuStubInt0(au_fhsm_br_alloc, struct au_branch *br) -+AuStubVoid(au_fhsm_set_bottom, struct super_block *sb, aufs_bindex_t bindex) -+AuStubVoid(au_fhsm_fin, struct super_block *sb) -+AuStubVoid(au_fhsm_init, struct au_sbinfo *sbinfo) -+AuStubVoid(au_fhsm_set, struct au_sbinfo *sbinfo, unsigned int sec) -+AuStubVoid(au_fhsm_show, struct seq_file *seq, struct au_sbinfo *sbinfo) -+#endif -+ -+/* ---------------------------------------------------------------------- */ -+ -+static inline struct au_sbinfo *au_sbi(struct super_block *sb) -+{ -+ return sb->s_fs_info; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+#ifdef CONFIG_AUFS_EXPORT -+int au_test_nfsd(void); -+void au_export_init(struct super_block *sb); -+void au_xigen_inc(struct inode *inode); -+int au_xigen_new(struct inode *inode); -+int au_xigen_set(struct super_block *sb, struct file *base); -+void au_xigen_clr(struct super_block *sb); -+ -+static inline int au_busy_or_stale(void) -+{ -+ if (!au_test_nfsd()) -+ return -EBUSY; -+ return -ESTALE; -+} -+#else -+AuStubInt0(au_test_nfsd, void) -+AuStubVoid(au_export_init, struct super_block *sb) -+AuStubVoid(au_xigen_inc, struct inode *inode) -+AuStubInt0(au_xigen_new, struct inode *inode) -+AuStubInt0(au_xigen_set, struct super_block *sb, struct file *base) -+AuStubVoid(au_xigen_clr, struct super_block *sb) -+AuStub(int, au_busy_or_stale, return -EBUSY, void) -+#endif /* CONFIG_AUFS_EXPORT */ -+ -+/* ---------------------------------------------------------------------- */ -+ -+#ifdef CONFIG_AUFS_SBILIST -+/* module.c */ -+extern struct au_sphlhead au_sbilist; -+ -+static inline void au_sbilist_init(void) -+{ -+ au_sphl_init(&au_sbilist); -+} -+ -+static inline void au_sbilist_add(struct super_block *sb) -+{ -+ au_sphl_add(&au_sbi(sb)->si_list, &au_sbilist); -+} -+ -+static inline void au_sbilist_del(struct super_block *sb) -+{ -+ au_sphl_del(&au_sbi(sb)->si_list, &au_sbilist); -+} -+ -+#ifdef CONFIG_AUFS_MAGIC_SYSRQ -+static inline void au_sbilist_lock(void) -+{ -+ spin_lock(&au_sbilist.spin); -+} -+ -+static inline void au_sbilist_unlock(void) -+{ -+ spin_unlock(&au_sbilist.spin); -+} -+#define AuGFP_SBILIST GFP_ATOMIC -+#else -+AuStubVoid(au_sbilist_lock, void) -+AuStubVoid(au_sbilist_unlock, void) -+#define AuGFP_SBILIST GFP_NOFS -+#endif /* CONFIG_AUFS_MAGIC_SYSRQ */ -+#else -+AuStubVoid(au_sbilist_init, void) -+AuStubVoid(au_sbilist_add, struct super_block *sb) -+AuStubVoid(au_sbilist_del, struct super_block *sb) -+AuStubVoid(au_sbilist_lock, void) -+AuStubVoid(au_sbilist_unlock, void) -+#define AuGFP_SBILIST GFP_NOFS -+#endif -+ -+/* ---------------------------------------------------------------------- */ -+ -+static inline void dbgaufs_si_null(struct au_sbinfo *sbinfo) -+{ -+ /* -+ * This function is a dynamic '__init' function actually, -+ * so the tiny check for si_rwsem is unnecessary. -+ */ -+ /* AuRwMustWriteLock(&sbinfo->si_rwsem); */ -+#ifdef CONFIG_DEBUG_FS -+ sbinfo->si_dbgaufs = NULL; -+ sbinfo->si_dbgaufs_plink = NULL; -+ sbinfo->si_dbgaufs_xib = NULL; -+#ifdef CONFIG_AUFS_EXPORT -+ sbinfo->si_dbgaufs_xigen = NULL; -+#endif -+#endif -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static inline void si_pid_idx_bit(int *idx, pid_t *bit) -+{ -+ /* the origin of pid is 1, but the bitmap's is 0 */ -+ *bit = current->pid - 1; -+ *idx = *bit / AU_PIDSTEP; -+ *bit %= AU_PIDSTEP; -+} -+ -+static inline int si_pid_test(struct super_block *sb) -+{ -+ pid_t bit; -+ int idx; -+ unsigned long *bitmap; -+ -+ si_pid_idx_bit(&idx, &bit); -+ bitmap = au_sbi(sb)->au_si_pid.pid_bitmap[idx]; -+ if (bitmap) -+ return test_bit(bit, bitmap); -+ return 0; -+} -+ -+static inline void si_pid_clr(struct super_block *sb) -+{ -+ pid_t bit; -+ int idx; -+ unsigned long *bitmap; -+ -+ si_pid_idx_bit(&idx, &bit); -+ bitmap = au_sbi(sb)->au_si_pid.pid_bitmap[idx]; -+ BUG_ON(!bitmap); -+ AuDebugOn(!test_bit(bit, bitmap)); -+ clear_bit(bit, bitmap); -+ /* smp_mb(); */ -+} -+ -+void si_pid_set(struct super_block *sb); -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* lock superblock. mainly for entry point functions */ -+/* -+ * __si_read_lock, __si_write_lock, -+ * __si_read_unlock, __si_write_unlock, __si_downgrade_lock -+ */ -+AuSimpleRwsemFuncs(__si, struct super_block *sb, &au_sbi(sb)->si_rwsem); -+ -+#define SiMustNoWaiters(sb) AuRwMustNoWaiters(&au_sbi(sb)->si_rwsem) -+#define SiMustAnyLock(sb) AuRwMustAnyLock(&au_sbi(sb)->si_rwsem) -+#define SiMustWriteLock(sb) AuRwMustWriteLock(&au_sbi(sb)->si_rwsem) -+ -+static inline void si_noflush_read_lock(struct super_block *sb) -+{ -+ __si_read_lock(sb); -+ si_pid_set(sb); -+} -+ -+static inline int si_noflush_read_trylock(struct super_block *sb) -+{ -+ int locked; -+ -+ locked = __si_read_trylock(sb); -+ if (locked) -+ si_pid_set(sb); -+ return locked; -+} -+ -+static inline void si_noflush_write_lock(struct super_block *sb) -+{ -+ __si_write_lock(sb); -+ si_pid_set(sb); -+} -+ -+static inline int si_noflush_write_trylock(struct super_block *sb) -+{ -+ int locked; -+ -+ locked = __si_write_trylock(sb); -+ if (locked) -+ si_pid_set(sb); -+ return locked; -+} -+ -+#if 0 /* reserved */ -+static inline int si_read_trylock(struct super_block *sb, int flags) -+{ -+ if (au_ftest_lock(flags, FLUSH)) -+ au_nwt_flush(&au_sbi(sb)->si_nowait); -+ return si_noflush_read_trylock(sb); -+} -+#endif -+ -+static inline void si_read_unlock(struct super_block *sb) -+{ -+ si_pid_clr(sb); -+ __si_read_unlock(sb); -+} -+ -+#if 0 /* reserved */ -+static inline int si_write_trylock(struct super_block *sb, int flags) -+{ -+ if (au_ftest_lock(flags, FLUSH)) -+ au_nwt_flush(&au_sbi(sb)->si_nowait); -+ return si_noflush_write_trylock(sb); -+} -+#endif -+ -+static inline void si_write_unlock(struct super_block *sb) -+{ -+ si_pid_clr(sb); -+ __si_write_unlock(sb); -+} -+ -+#if 0 /* reserved */ -+static inline void si_downgrade_lock(struct super_block *sb) -+{ -+ __si_downgrade_lock(sb); -+} -+#endif -+ -+/* ---------------------------------------------------------------------- */ -+ -+static inline aufs_bindex_t au_sbend(struct super_block *sb) -+{ -+ SiMustAnyLock(sb); -+ return au_sbi(sb)->si_bend; -+} -+ -+static inline unsigned int au_mntflags(struct super_block *sb) -+{ -+ SiMustAnyLock(sb); -+ return au_sbi(sb)->si_mntflags; -+} -+ -+static inline unsigned int au_sigen(struct super_block *sb) -+{ -+ SiMustAnyLock(sb); -+ return au_sbi(sb)->si_generation; -+} -+ -+static inline void au_ninodes_inc(struct super_block *sb) -+{ -+ atomic_long_inc(&au_sbi(sb)->si_ninodes); -+} -+ -+static inline void au_ninodes_dec(struct super_block *sb) -+{ -+ AuDebugOn(!atomic_long_read(&au_sbi(sb)->si_ninodes)); -+ atomic_long_dec(&au_sbi(sb)->si_ninodes); -+} -+ -+static inline void au_nfiles_inc(struct super_block *sb) -+{ -+ atomic_long_inc(&au_sbi(sb)->si_nfiles); -+} -+ -+static inline void au_nfiles_dec(struct super_block *sb) -+{ -+ AuDebugOn(!atomic_long_read(&au_sbi(sb)->si_nfiles)); -+ atomic_long_dec(&au_sbi(sb)->si_nfiles); -+} -+ -+static inline struct au_branch *au_sbr(struct super_block *sb, -+ aufs_bindex_t bindex) -+{ -+ SiMustAnyLock(sb); -+ return au_sbi(sb)->si_branch[0 + bindex]; -+} -+ -+static inline void au_xino_brid_set(struct super_block *sb, aufs_bindex_t brid) -+{ -+ SiMustWriteLock(sb); -+ au_sbi(sb)->si_xino_brid = brid; -+} -+ -+static inline aufs_bindex_t au_xino_brid(struct super_block *sb) -+{ -+ SiMustAnyLock(sb); -+ return au_sbi(sb)->si_xino_brid; -+} -+ -+#endif /* __KERNEL__ */ -+#endif /* __AUFS_SUPER_H__ */ -diff --git a/fs/aufs/sysaufs.c b/fs/aufs/sysaufs.c -new file mode 100644 -index 0000000..75c9c24 ---- /dev/null -+++ b/fs/aufs/sysaufs.c -@@ -0,0 +1,104 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * sysfs interface and lifetime management -+ * they are necessary regardless sysfs is disabled. -+ */ -+ -+#include -+#include "aufs.h" -+ -+unsigned long sysaufs_si_mask; -+struct kset *sysaufs_kset; -+ -+#define AuSiAttr(_name) { \ -+ .attr = { .name = __stringify(_name), .mode = 0444 }, \ -+ .show = sysaufs_si_##_name, \ -+} -+ -+static struct sysaufs_si_attr sysaufs_si_attr_xi_path = AuSiAttr(xi_path); -+struct attribute *sysaufs_si_attrs[] = { -+ &sysaufs_si_attr_xi_path.attr, -+ NULL, -+}; -+ -+static const struct sysfs_ops au_sbi_ops = { -+ .show = sysaufs_si_show -+}; -+ -+static struct kobj_type au_sbi_ktype = { -+ .release = au_si_free, -+ .sysfs_ops = &au_sbi_ops, -+ .default_attrs = sysaufs_si_attrs -+}; -+ -+/* ---------------------------------------------------------------------- */ -+ -+int sysaufs_si_init(struct au_sbinfo *sbinfo) -+{ -+ int err; -+ -+ sbinfo->si_kobj.kset = sysaufs_kset; -+ /* cf. sysaufs_name() */ -+ err = kobject_init_and_add -+ (&sbinfo->si_kobj, &au_sbi_ktype, /*&sysaufs_kset->kobj*/NULL, -+ SysaufsSiNamePrefix "%lx", sysaufs_si_id(sbinfo)); -+ -+ dbgaufs_si_null(sbinfo); -+ if (!err) { -+ err = dbgaufs_si_init(sbinfo); -+ if (unlikely(err)) -+ kobject_put(&sbinfo->si_kobj); -+ } -+ return err; -+} -+ -+void sysaufs_fin(void) -+{ -+ dbgaufs_fin(); -+ sysfs_remove_group(&sysaufs_kset->kobj, sysaufs_attr_group); -+ kset_unregister(sysaufs_kset); -+} -+ -+int __init sysaufs_init(void) -+{ -+ int err; -+ -+ do { -+ get_random_bytes(&sysaufs_si_mask, sizeof(sysaufs_si_mask)); -+ } while (!sysaufs_si_mask); -+ -+ err = -EINVAL; -+ sysaufs_kset = kset_create_and_add(AUFS_NAME, NULL, fs_kobj); -+ if (unlikely(!sysaufs_kset)) -+ goto out; -+ err = PTR_ERR(sysaufs_kset); -+ if (IS_ERR(sysaufs_kset)) -+ goto out; -+ err = sysfs_create_group(&sysaufs_kset->kobj, sysaufs_attr_group); -+ if (unlikely(err)) { -+ kset_unregister(sysaufs_kset); -+ goto out; -+ } -+ -+ err = dbgaufs_init(); -+ if (unlikely(err)) -+ sysaufs_fin(); -+out: -+ return err; -+} -diff --git a/fs/aufs/sysaufs.h b/fs/aufs/sysaufs.h -new file mode 100644 -index 0000000..14975c9 ---- /dev/null -+++ b/fs/aufs/sysaufs.h -@@ -0,0 +1,101 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * sysfs interface and mount lifetime management -+ */ -+ -+#ifndef __SYSAUFS_H__ -+#define __SYSAUFS_H__ -+ -+#ifdef __KERNEL__ -+ -+#include -+#include "module.h" -+ -+struct super_block; -+struct au_sbinfo; -+ -+struct sysaufs_si_attr { -+ struct attribute attr; -+ int (*show)(struct seq_file *seq, struct super_block *sb); -+}; -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* sysaufs.c */ -+extern unsigned long sysaufs_si_mask; -+extern struct kset *sysaufs_kset; -+extern struct attribute *sysaufs_si_attrs[]; -+int sysaufs_si_init(struct au_sbinfo *sbinfo); -+int __init sysaufs_init(void); -+void sysaufs_fin(void); -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* some people doesn't like to show a pointer in kernel */ -+static inline unsigned long sysaufs_si_id(struct au_sbinfo *sbinfo) -+{ -+ return sysaufs_si_mask ^ (unsigned long)sbinfo; -+} -+ -+#define SysaufsSiNamePrefix "si_" -+#define SysaufsSiNameLen (sizeof(SysaufsSiNamePrefix) + 16) -+static inline void sysaufs_name(struct au_sbinfo *sbinfo, char *name) -+{ -+ snprintf(name, SysaufsSiNameLen, SysaufsSiNamePrefix "%lx", -+ sysaufs_si_id(sbinfo)); -+} -+ -+struct au_branch; -+#ifdef CONFIG_SYSFS -+/* sysfs.c */ -+extern struct attribute_group *sysaufs_attr_group; -+ -+int sysaufs_si_xi_path(struct seq_file *seq, struct super_block *sb); -+ssize_t sysaufs_si_show(struct kobject *kobj, struct attribute *attr, -+ char *buf); -+long au_brinfo_ioctl(struct file *file, unsigned long arg); -+#ifdef CONFIG_COMPAT -+long au_brinfo_compat_ioctl(struct file *file, unsigned long arg); -+#endif -+ -+void sysaufs_br_init(struct au_branch *br); -+void sysaufs_brs_add(struct super_block *sb, aufs_bindex_t bindex); -+void sysaufs_brs_del(struct super_block *sb, aufs_bindex_t bindex); -+ -+#define sysaufs_brs_init() do {} while (0) -+ -+#else -+#define sysaufs_attr_group NULL -+ -+AuStubInt0(sysaufs_si_xi_path, struct seq_file *seq, struct super_block *sb) -+AuStub(ssize_t, sysaufs_si_show, return 0, struct kobject *kobj, -+ struct attribute *attr, char *buf) -+AuStubVoid(sysaufs_br_init, struct au_branch *br) -+AuStubVoid(sysaufs_brs_add, struct super_block *sb, aufs_bindex_t bindex) -+AuStubVoid(sysaufs_brs_del, struct super_block *sb, aufs_bindex_t bindex) -+ -+static inline void sysaufs_brs_init(void) -+{ -+ sysaufs_brs = 0; -+} -+ -+#endif /* CONFIG_SYSFS */ -+ -+#endif /* __KERNEL__ */ -+#endif /* __SYSAUFS_H__ */ -diff --git a/fs/aufs/sysfs.c b/fs/aufs/sysfs.c -new file mode 100644 -index 0000000..b2d1888 ---- /dev/null -+++ b/fs/aufs/sysfs.c -@@ -0,0 +1,376 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * sysfs interface -+ */ -+ -+#include -+#include -+#include "aufs.h" -+ -+#ifdef CONFIG_AUFS_FS_MODULE -+/* this entry violates the "one line per file" policy of sysfs */ -+static ssize_t config_show(struct kobject *kobj, struct kobj_attribute *attr, -+ char *buf) -+{ -+ ssize_t err; -+ static char *conf = -+/* this file is generated at compiling */ -+#include "conf.str" -+ ; -+ -+ err = snprintf(buf, PAGE_SIZE, conf); -+ if (unlikely(err >= PAGE_SIZE)) -+ err = -EFBIG; -+ return err; -+} -+ -+static struct kobj_attribute au_config_attr = __ATTR_RO(config); -+#endif -+ -+static struct attribute *au_attr[] = { -+#ifdef CONFIG_AUFS_FS_MODULE -+ &au_config_attr.attr, -+#endif -+ NULL, /* need to NULL terminate the list of attributes */ -+}; -+ -+static struct attribute_group sysaufs_attr_group_body = { -+ .attrs = au_attr -+}; -+ -+struct attribute_group *sysaufs_attr_group = &sysaufs_attr_group_body; -+ -+/* ---------------------------------------------------------------------- */ -+ -+int sysaufs_si_xi_path(struct seq_file *seq, struct super_block *sb) -+{ -+ int err; -+ -+ SiMustAnyLock(sb); -+ -+ err = 0; -+ if (au_opt_test(au_mntflags(sb), XINO)) { -+ err = au_xino_path(seq, au_sbi(sb)->si_xib); -+ seq_putc(seq, '\n'); -+ } -+ return err; -+} -+ -+/* -+ * the lifetime of branch is independent from the entry under sysfs. -+ * sysfs handles the lifetime of the entry, and never call ->show() after it is -+ * unlinked. -+ */ -+static int sysaufs_si_br(struct seq_file *seq, struct super_block *sb, -+ aufs_bindex_t bindex, int idx) -+{ -+ int err; -+ struct path path; -+ struct dentry *root; -+ struct au_branch *br; -+ au_br_perm_str_t perm; -+ -+ AuDbg("b%d\n", bindex); -+ -+ err = 0; -+ root = sb->s_root; -+ di_read_lock_parent(root, !AuLock_IR); -+ br = au_sbr(sb, bindex); -+ -+ switch (idx) { -+ case AuBrSysfs_BR: -+ path.mnt = au_br_mnt(br); -+ path.dentry = au_h_dptr(root, bindex); -+ err = au_seq_path(seq, &path); -+ if (!err) { -+ au_optstr_br_perm(&perm, br->br_perm); -+ err = seq_printf(seq, "=%s\n", perm.a); -+ } -+ break; -+ case AuBrSysfs_BRID: -+ err = seq_printf(seq, "%d\n", br->br_id); -+ break; -+ } -+ di_read_unlock(root, !AuLock_IR); -+ if (err == -1) -+ err = -E2BIG; -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static struct seq_file *au_seq(char *p, ssize_t len) -+{ -+ struct seq_file *seq; -+ -+ seq = kzalloc(sizeof(*seq), GFP_NOFS); -+ if (seq) { -+ /* mutex_init(&seq.lock); */ -+ seq->buf = p; -+ seq->size = len; -+ return seq; /* success */ -+ } -+ -+ seq = ERR_PTR(-ENOMEM); -+ return seq; -+} -+ -+#define SysaufsBr_PREFIX "br" -+#define SysaufsBrid_PREFIX "brid" -+ -+/* todo: file size may exceed PAGE_SIZE */ -+ssize_t sysaufs_si_show(struct kobject *kobj, struct attribute *attr, -+ char *buf) -+{ -+ ssize_t err; -+ int idx; -+ long l; -+ aufs_bindex_t bend; -+ struct au_sbinfo *sbinfo; -+ struct super_block *sb; -+ struct seq_file *seq; -+ char *name; -+ struct attribute **cattr; -+ -+ sbinfo = container_of(kobj, struct au_sbinfo, si_kobj); -+ sb = sbinfo->si_sb; -+ -+ /* -+ * prevent a race condition between sysfs and aufs. -+ * for instance, sysfs_file_read() calls sysfs_get_active_two() which -+ * prohibits maintaining the sysfs entries. -+ * hew we acquire read lock after sysfs_get_active_two(). -+ * on the other hand, the remount process may maintain the sysfs/aufs -+ * entries after acquiring write lock. -+ * it can cause a deadlock. -+ * simply we gave up processing read here. -+ */ -+ err = -EBUSY; -+ if (unlikely(!si_noflush_read_trylock(sb))) -+ goto out; -+ -+ seq = au_seq(buf, PAGE_SIZE); -+ err = PTR_ERR(seq); -+ if (IS_ERR(seq)) -+ goto out_unlock; -+ -+ name = (void *)attr->name; -+ cattr = sysaufs_si_attrs; -+ while (*cattr) { -+ if (!strcmp(name, (*cattr)->name)) { -+ err = container_of(*cattr, struct sysaufs_si_attr, attr) -+ ->show(seq, sb); -+ goto out_seq; -+ } -+ cattr++; -+ } -+ -+ if (!strncmp(name, SysaufsBrid_PREFIX, -+ sizeof(SysaufsBrid_PREFIX) - 1)) { -+ idx = AuBrSysfs_BRID; -+ name += sizeof(SysaufsBrid_PREFIX) - 1; -+ } else if (!strncmp(name, SysaufsBr_PREFIX, -+ sizeof(SysaufsBr_PREFIX) - 1)) { -+ idx = AuBrSysfs_BR; -+ name += sizeof(SysaufsBr_PREFIX) - 1; -+ } else -+ BUG(); -+ -+ err = kstrtol(name, 10, &l); -+ if (!err) { -+ bend = au_sbend(sb); -+ if (l <= bend) -+ err = sysaufs_si_br(seq, sb, (aufs_bindex_t)l, idx); -+ else -+ err = -ENOENT; -+ } -+ -+out_seq: -+ if (!err) { -+ err = seq->count; -+ /* sysfs limit */ -+ if (unlikely(err == PAGE_SIZE)) -+ err = -EFBIG; -+ } -+ kfree(seq); -+out_unlock: -+ si_read_unlock(sb); -+out: -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int au_brinfo(struct super_block *sb, union aufs_brinfo __user *arg) -+{ -+ int err; -+ int16_t brid; -+ aufs_bindex_t bindex, bend; -+ size_t sz; -+ char *buf; -+ struct seq_file *seq; -+ struct au_branch *br; -+ -+ si_read_lock(sb, AuLock_FLUSH); -+ bend = au_sbend(sb); -+ err = bend + 1; -+ if (!arg) -+ goto out; -+ -+ err = -ENOMEM; -+ buf = (void *)__get_free_page(GFP_NOFS); -+ if (unlikely(!buf)) -+ goto out; -+ -+ seq = au_seq(buf, PAGE_SIZE); -+ err = PTR_ERR(seq); -+ if (IS_ERR(seq)) -+ goto out_buf; -+ -+ sz = sizeof(*arg) - offsetof(union aufs_brinfo, path); -+ for (bindex = 0; bindex <= bend; bindex++, arg++) { -+ err = !access_ok(VERIFY_WRITE, arg, sizeof(*arg)); -+ if (unlikely(err)) -+ break; -+ -+ br = au_sbr(sb, bindex); -+ brid = br->br_id; -+ BUILD_BUG_ON(sizeof(brid) != sizeof(arg->id)); -+ err = __put_user(brid, &arg->id); -+ if (unlikely(err)) -+ break; -+ -+ BUILD_BUG_ON(sizeof(br->br_perm) != sizeof(arg->perm)); -+ err = __put_user(br->br_perm, &arg->perm); -+ if (unlikely(err)) -+ break; -+ -+ err = au_seq_path(seq, &br->br_path); -+ if (unlikely(err)) -+ break; -+ err = seq_putc(seq, '\0'); -+ if (!err && seq->count <= sz) { -+ err = copy_to_user(arg->path, seq->buf, seq->count); -+ seq->count = 0; -+ if (unlikely(err)) -+ break; -+ } else { -+ err = -E2BIG; -+ goto out_seq; -+ } -+ } -+ if (unlikely(err)) -+ err = -EFAULT; -+ -+out_seq: -+ kfree(seq); -+out_buf: -+ free_page((unsigned long)buf); -+out: -+ si_read_unlock(sb); -+ return err; -+} -+ -+long au_brinfo_ioctl(struct file *file, unsigned long arg) -+{ -+ return au_brinfo(file->f_dentry->d_sb, (void __user *)arg); -+} -+ -+#ifdef CONFIG_COMPAT -+long au_brinfo_compat_ioctl(struct file *file, unsigned long arg) -+{ -+ return au_brinfo(file->f_dentry->d_sb, compat_ptr(arg)); -+} -+#endif -+ -+/* ---------------------------------------------------------------------- */ -+ -+void sysaufs_br_init(struct au_branch *br) -+{ -+ int i; -+ struct au_brsysfs *br_sysfs; -+ struct attribute *attr; -+ -+ br_sysfs = br->br_sysfs; -+ for (i = 0; i < ARRAY_SIZE(br->br_sysfs); i++) { -+ attr = &br_sysfs->attr; -+ sysfs_attr_init(attr); -+ attr->name = br_sysfs->name; -+ attr->mode = S_IRUGO; -+ br_sysfs++; -+ } -+} -+ -+void sysaufs_brs_del(struct super_block *sb, aufs_bindex_t bindex) -+{ -+ struct au_branch *br; -+ struct kobject *kobj; -+ struct au_brsysfs *br_sysfs; -+ int i; -+ aufs_bindex_t bend; -+ -+ dbgaufs_brs_del(sb, bindex); -+ -+ if (!sysaufs_brs) -+ return; -+ -+ kobj = &au_sbi(sb)->si_kobj; -+ bend = au_sbend(sb); -+ for (; bindex <= bend; bindex++) { -+ br = au_sbr(sb, bindex); -+ br_sysfs = br->br_sysfs; -+ for (i = 0; i < ARRAY_SIZE(br->br_sysfs); i++) { -+ sysfs_remove_file(kobj, &br_sysfs->attr); -+ br_sysfs++; -+ } -+ } -+} -+ -+void sysaufs_brs_add(struct super_block *sb, aufs_bindex_t bindex) -+{ -+ int err, i; -+ aufs_bindex_t bend; -+ struct kobject *kobj; -+ struct au_branch *br; -+ struct au_brsysfs *br_sysfs; -+ -+ dbgaufs_brs_add(sb, bindex); -+ -+ if (!sysaufs_brs) -+ return; -+ -+ kobj = &au_sbi(sb)->si_kobj; -+ bend = au_sbend(sb); -+ for (; bindex <= bend; bindex++) { -+ br = au_sbr(sb, bindex); -+ br_sysfs = br->br_sysfs; -+ snprintf(br_sysfs[AuBrSysfs_BR].name, sizeof(br_sysfs->name), -+ SysaufsBr_PREFIX "%d", bindex); -+ snprintf(br_sysfs[AuBrSysfs_BRID].name, sizeof(br_sysfs->name), -+ SysaufsBrid_PREFIX "%d", bindex); -+ for (i = 0; i < ARRAY_SIZE(br->br_sysfs); i++) { -+ err = sysfs_create_file(kobj, &br_sysfs->attr); -+ if (unlikely(err)) -+ pr_warn("failed %s under sysfs(%d)\n", -+ br_sysfs->name, err); -+ br_sysfs++; -+ } -+ } -+} -diff --git a/fs/aufs/sysrq.c b/fs/aufs/sysrq.c -new file mode 100644 -index 0000000..057c23e ---- /dev/null -+++ b/fs/aufs/sysrq.c -@@ -0,0 +1,157 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * magic sysrq hanlder -+ */ -+ -+/* #include */ -+#include -+#include "aufs.h" -+ -+/* ---------------------------------------------------------------------- */ -+ -+static void sysrq_sb(struct super_block *sb) -+{ -+ char *plevel; -+ struct au_sbinfo *sbinfo; -+ struct file *file; -+ struct au_sphlhead *files; -+ struct au_finfo *finfo; -+ -+ plevel = au_plevel; -+ au_plevel = KERN_WARNING; -+ -+ /* since we define pr_fmt, call printk directly */ -+#define pr(str) printk(KERN_WARNING AUFS_NAME ": " str) -+ -+ sbinfo = au_sbi(sb); -+ printk(KERN_WARNING "si=%lx\n", sysaufs_si_id(sbinfo)); -+ pr("superblock\n"); -+ au_dpri_sb(sb); -+ -+#if 0 -+ pr("root dentry\n"); -+ au_dpri_dentry(sb->s_root); -+ pr("root inode\n"); -+ au_dpri_inode(sb->s_root->d_inode); -+#endif -+ -+#if 0 -+ do { -+ int err, i, j, ndentry; -+ struct au_dcsub_pages dpages; -+ struct au_dpage *dpage; -+ -+ err = au_dpages_init(&dpages, GFP_ATOMIC); -+ if (unlikely(err)) -+ break; -+ err = au_dcsub_pages(&dpages, sb->s_root, NULL, NULL); -+ if (!err) -+ for (i = 0; i < dpages.ndpage; i++) { -+ dpage = dpages.dpages + i; -+ ndentry = dpage->ndentry; -+ for (j = 0; j < ndentry; j++) -+ au_dpri_dentry(dpage->dentries[j]); -+ } -+ au_dpages_free(&dpages); -+ } while (0); -+#endif -+ -+#if 1 -+ { -+ struct inode *i; -+ -+ pr("isolated inode\n"); -+ spin_lock(&inode_sb_list_lock); -+ list_for_each_entry(i, &sb->s_inodes, i_sb_list) { -+ spin_lock(&i->i_lock); -+ if (1 || hlist_empty(&i->i_dentry)) -+ au_dpri_inode(i); -+ spin_unlock(&i->i_lock); -+ } -+ spin_unlock(&inode_sb_list_lock); -+ } -+#endif -+ pr("files\n"); -+ files = &au_sbi(sb)->si_files; -+ spin_lock(&files->spin); -+ hlist_for_each_entry(finfo, &files->head, fi_hlist) { -+ umode_t mode; -+ -+ file = finfo->fi_file; -+ mode = file_inode(file)->i_mode; -+ if (!special_file(mode)) -+ au_dpri_file(file); -+ } -+ spin_unlock(&files->spin); -+ pr("done\n"); -+ -+#undef pr -+ au_plevel = plevel; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* module parameter */ -+static char *aufs_sysrq_key = "a"; -+module_param_named(sysrq, aufs_sysrq_key, charp, S_IRUGO); -+MODULE_PARM_DESC(sysrq, "MagicSysRq key for " AUFS_NAME); -+ -+static void au_sysrq(int key __maybe_unused) -+{ -+ struct au_sbinfo *sbinfo; -+ -+ lockdep_off(); -+ au_sbilist_lock(); -+ hlist_for_each_entry(sbinfo, &au_sbilist.head, si_list) -+ sysrq_sb(sbinfo->si_sb); -+ au_sbilist_unlock(); -+ lockdep_on(); -+} -+ -+static struct sysrq_key_op au_sysrq_op = { -+ .handler = au_sysrq, -+ .help_msg = "Aufs", -+ .action_msg = "Aufs", -+ .enable_mask = SYSRQ_ENABLE_DUMP -+}; -+ -+/* ---------------------------------------------------------------------- */ -+ -+int __init au_sysrq_init(void) -+{ -+ int err; -+ char key; -+ -+ err = -1; -+ key = *aufs_sysrq_key; -+ if ('a' <= key && key <= 'z') -+ err = register_sysrq_key(key, &au_sysrq_op); -+ if (unlikely(err)) -+ pr_err("err %d, sysrq=%c\n", err, key); -+ return err; -+} -+ -+void au_sysrq_fin(void) -+{ -+ int err; -+ -+ err = unregister_sysrq_key(*aufs_sysrq_key, &au_sysrq_op); -+ if (unlikely(err)) -+ pr_err("err %d (ignored)\n", err); -+} -diff --git a/fs/aufs/vdir.c b/fs/aufs/vdir.c -new file mode 100644 -index 0000000..f942d16 ---- /dev/null -+++ b/fs/aufs/vdir.c -@@ -0,0 +1,888 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * virtual or vertical directory -+ */ -+ -+#include "aufs.h" -+ -+static unsigned int calc_size(int nlen) -+{ -+ return ALIGN(sizeof(struct au_vdir_de) + nlen, sizeof(ino_t)); -+} -+ -+static int set_deblk_end(union au_vdir_deblk_p *p, -+ union au_vdir_deblk_p *deblk_end) -+{ -+ if (calc_size(0) <= deblk_end->deblk - p->deblk) { -+ p->de->de_str.len = 0; -+ /* smp_mb(); */ -+ return 0; -+ } -+ return -1; /* error */ -+} -+ -+/* returns true or false */ -+static int is_deblk_end(union au_vdir_deblk_p *p, -+ union au_vdir_deblk_p *deblk_end) -+{ -+ if (calc_size(0) <= deblk_end->deblk - p->deblk) -+ return !p->de->de_str.len; -+ return 1; -+} -+ -+static unsigned char *last_deblk(struct au_vdir *vdir) -+{ -+ return vdir->vd_deblk[vdir->vd_nblk - 1]; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* estimate the appropriate size for name hash table */ -+unsigned int au_rdhash_est(loff_t sz) -+{ -+ unsigned int n; -+ -+ n = UINT_MAX; -+ sz >>= 10; -+ if (sz < n) -+ n = sz; -+ if (sz < AUFS_RDHASH_DEF) -+ n = AUFS_RDHASH_DEF; -+ /* pr_info("n %u\n", n); */ -+ return n; -+} -+ -+/* -+ * the allocated memory has to be freed by -+ * au_nhash_wh_free() or au_nhash_de_free(). -+ */ -+int au_nhash_alloc(struct au_nhash *nhash, unsigned int num_hash, gfp_t gfp) -+{ -+ struct hlist_head *head; -+ unsigned int u; -+ size_t sz; -+ -+ sz = sizeof(*nhash->nh_head) * num_hash; -+ head = kmalloc(sz, gfp); -+ if (head) { -+ nhash->nh_num = num_hash; -+ nhash->nh_head = head; -+ for (u = 0; u < num_hash; u++) -+ INIT_HLIST_HEAD(head++); -+ return 0; /* success */ -+ } -+ -+ return -ENOMEM; -+} -+ -+static void nhash_count(struct hlist_head *head) -+{ -+#if 0 -+ unsigned long n; -+ struct hlist_node *pos; -+ -+ n = 0; -+ hlist_for_each(pos, head) -+ n++; -+ pr_info("%lu\n", n); -+#endif -+} -+ -+static void au_nhash_wh_do_free(struct hlist_head *head) -+{ -+ struct au_vdir_wh *pos; -+ struct hlist_node *node; -+ -+ hlist_for_each_entry_safe(pos, node, head, wh_hash) -+ kfree(pos); -+} -+ -+static void au_nhash_de_do_free(struct hlist_head *head) -+{ -+ struct au_vdir_dehstr *pos; -+ struct hlist_node *node; -+ -+ hlist_for_each_entry_safe(pos, node, head, hash) -+ au_cache_free_vdir_dehstr(pos); -+} -+ -+static void au_nhash_do_free(struct au_nhash *nhash, -+ void (*free)(struct hlist_head *head)) -+{ -+ unsigned int n; -+ struct hlist_head *head; -+ -+ n = nhash->nh_num; -+ if (!n) -+ return; -+ -+ head = nhash->nh_head; -+ while (n-- > 0) { -+ nhash_count(head); -+ free(head++); -+ } -+ kfree(nhash->nh_head); -+} -+ -+void au_nhash_wh_free(struct au_nhash *whlist) -+{ -+ au_nhash_do_free(whlist, au_nhash_wh_do_free); -+} -+ -+static void au_nhash_de_free(struct au_nhash *delist) -+{ -+ au_nhash_do_free(delist, au_nhash_de_do_free); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+int au_nhash_test_longer_wh(struct au_nhash *whlist, aufs_bindex_t btgt, -+ int limit) -+{ -+ int num; -+ unsigned int u, n; -+ struct hlist_head *head; -+ struct au_vdir_wh *pos; -+ -+ num = 0; -+ n = whlist->nh_num; -+ head = whlist->nh_head; -+ for (u = 0; u < n; u++, head++) -+ hlist_for_each_entry(pos, head, wh_hash) -+ if (pos->wh_bindex == btgt && ++num > limit) -+ return 1; -+ return 0; -+} -+ -+static struct hlist_head *au_name_hash(struct au_nhash *nhash, -+ unsigned char *name, -+ unsigned int len) -+{ -+ unsigned int v; -+ /* const unsigned int magic_bit = 12; */ -+ -+ AuDebugOn(!nhash->nh_num || !nhash->nh_head); -+ -+ v = 0; -+ while (len--) -+ v += *name++; -+ /* v = hash_long(v, magic_bit); */ -+ v %= nhash->nh_num; -+ return nhash->nh_head + v; -+} -+ -+static int au_nhash_test_name(struct au_vdir_destr *str, const char *name, -+ int nlen) -+{ -+ return str->len == nlen && !memcmp(str->name, name, nlen); -+} -+ -+/* returns found or not */ -+int au_nhash_test_known_wh(struct au_nhash *whlist, char *name, int nlen) -+{ -+ struct hlist_head *head; -+ struct au_vdir_wh *pos; -+ struct au_vdir_destr *str; -+ -+ head = au_name_hash(whlist, name, nlen); -+ hlist_for_each_entry(pos, head, wh_hash) { -+ str = &pos->wh_str; -+ AuDbg("%.*s\n", str->len, str->name); -+ if (au_nhash_test_name(str, name, nlen)) -+ return 1; -+ } -+ return 0; -+} -+ -+/* returns found(true) or not */ -+static int test_known(struct au_nhash *delist, char *name, int nlen) -+{ -+ struct hlist_head *head; -+ struct au_vdir_dehstr *pos; -+ struct au_vdir_destr *str; -+ -+ head = au_name_hash(delist, name, nlen); -+ hlist_for_each_entry(pos, head, hash) { -+ str = pos->str; -+ AuDbg("%.*s\n", str->len, str->name); -+ if (au_nhash_test_name(str, name, nlen)) -+ return 1; -+ } -+ return 0; -+} -+ -+static void au_shwh_init_wh(struct au_vdir_wh *wh, ino_t ino, -+ unsigned char d_type) -+{ -+#ifdef CONFIG_AUFS_SHWH -+ wh->wh_ino = ino; -+ wh->wh_type = d_type; -+#endif -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+int au_nhash_append_wh(struct au_nhash *whlist, char *name, int nlen, ino_t ino, -+ unsigned int d_type, aufs_bindex_t bindex, -+ unsigned char shwh) -+{ -+ int err; -+ struct au_vdir_destr *str; -+ struct au_vdir_wh *wh; -+ -+ AuDbg("%.*s\n", nlen, name); -+ AuDebugOn(!whlist->nh_num || !whlist->nh_head); -+ -+ err = -ENOMEM; -+ wh = kmalloc(sizeof(*wh) + nlen, GFP_NOFS); -+ if (unlikely(!wh)) -+ goto out; -+ -+ err = 0; -+ wh->wh_bindex = bindex; -+ if (shwh) -+ au_shwh_init_wh(wh, ino, d_type); -+ str = &wh->wh_str; -+ str->len = nlen; -+ memcpy(str->name, name, nlen); -+ hlist_add_head(&wh->wh_hash, au_name_hash(whlist, name, nlen)); -+ /* smp_mb(); */ -+ -+out: -+ return err; -+} -+ -+static int append_deblk(struct au_vdir *vdir) -+{ -+ int err; -+ unsigned long ul; -+ const unsigned int deblk_sz = vdir->vd_deblk_sz; -+ union au_vdir_deblk_p p, deblk_end; -+ unsigned char **o; -+ -+ err = -ENOMEM; -+ o = krealloc(vdir->vd_deblk, sizeof(*o) * (vdir->vd_nblk + 1), -+ GFP_NOFS); -+ if (unlikely(!o)) -+ goto out; -+ -+ vdir->vd_deblk = o; -+ p.deblk = kmalloc(deblk_sz, GFP_NOFS); -+ if (p.deblk) { -+ ul = vdir->vd_nblk++; -+ vdir->vd_deblk[ul] = p.deblk; -+ vdir->vd_last.ul = ul; -+ vdir->vd_last.p.deblk = p.deblk; -+ deblk_end.deblk = p.deblk + deblk_sz; -+ err = set_deblk_end(&p, &deblk_end); -+ } -+ -+out: -+ return err; -+} -+ -+static int append_de(struct au_vdir *vdir, char *name, int nlen, ino_t ino, -+ unsigned int d_type, struct au_nhash *delist) -+{ -+ int err; -+ unsigned int sz; -+ const unsigned int deblk_sz = vdir->vd_deblk_sz; -+ union au_vdir_deblk_p p, *room, deblk_end; -+ struct au_vdir_dehstr *dehstr; -+ -+ p.deblk = last_deblk(vdir); -+ deblk_end.deblk = p.deblk + deblk_sz; -+ room = &vdir->vd_last.p; -+ AuDebugOn(room->deblk < p.deblk || deblk_end.deblk <= room->deblk -+ || !is_deblk_end(room, &deblk_end)); -+ -+ sz = calc_size(nlen); -+ if (unlikely(sz > deblk_end.deblk - room->deblk)) { -+ err = append_deblk(vdir); -+ if (unlikely(err)) -+ goto out; -+ -+ p.deblk = last_deblk(vdir); -+ deblk_end.deblk = p.deblk + deblk_sz; -+ /* smp_mb(); */ -+ AuDebugOn(room->deblk != p.deblk); -+ } -+ -+ err = -ENOMEM; -+ dehstr = au_cache_alloc_vdir_dehstr(); -+ if (unlikely(!dehstr)) -+ goto out; -+ -+ dehstr->str = &room->de->de_str; -+ hlist_add_head(&dehstr->hash, au_name_hash(delist, name, nlen)); -+ room->de->de_ino = ino; -+ room->de->de_type = d_type; -+ room->de->de_str.len = nlen; -+ memcpy(room->de->de_str.name, name, nlen); -+ -+ err = 0; -+ room->deblk += sz; -+ if (unlikely(set_deblk_end(room, &deblk_end))) -+ err = append_deblk(vdir); -+ /* smp_mb(); */ -+ -+out: -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+void au_vdir_free(struct au_vdir *vdir) -+{ -+ unsigned char **deblk; -+ -+ deblk = vdir->vd_deblk; -+ while (vdir->vd_nblk--) -+ kfree(*deblk++); -+ kfree(vdir->vd_deblk); -+ au_cache_free_vdir(vdir); -+} -+ -+static struct au_vdir *alloc_vdir(struct file *file) -+{ -+ struct au_vdir *vdir; -+ struct super_block *sb; -+ int err; -+ -+ sb = file->f_dentry->d_sb; -+ SiMustAnyLock(sb); -+ -+ err = -ENOMEM; -+ vdir = au_cache_alloc_vdir(); -+ if (unlikely(!vdir)) -+ goto out; -+ -+ vdir->vd_deblk = kzalloc(sizeof(*vdir->vd_deblk), GFP_NOFS); -+ if (unlikely(!vdir->vd_deblk)) -+ goto out_free; -+ -+ vdir->vd_deblk_sz = au_sbi(sb)->si_rdblk; -+ if (!vdir->vd_deblk_sz) { -+ /* estimate the appropriate size for deblk */ -+ vdir->vd_deblk_sz = au_dir_size(file, /*dentry*/NULL); -+ /* pr_info("vd_deblk_sz %u\n", vdir->vd_deblk_sz); */ -+ } -+ vdir->vd_nblk = 0; -+ vdir->vd_version = 0; -+ vdir->vd_jiffy = 0; -+ err = append_deblk(vdir); -+ if (!err) -+ return vdir; /* success */ -+ -+ kfree(vdir->vd_deblk); -+ -+out_free: -+ au_cache_free_vdir(vdir); -+out: -+ vdir = ERR_PTR(err); -+ return vdir; -+} -+ -+static int reinit_vdir(struct au_vdir *vdir) -+{ -+ int err; -+ union au_vdir_deblk_p p, deblk_end; -+ -+ while (vdir->vd_nblk > 1) { -+ kfree(vdir->vd_deblk[vdir->vd_nblk - 1]); -+ /* vdir->vd_deblk[vdir->vd_nblk - 1] = NULL; */ -+ vdir->vd_nblk--; -+ } -+ p.deblk = vdir->vd_deblk[0]; -+ deblk_end.deblk = p.deblk + vdir->vd_deblk_sz; -+ err = set_deblk_end(&p, &deblk_end); -+ /* keep vd_dblk_sz */ -+ vdir->vd_last.ul = 0; -+ vdir->vd_last.p.deblk = vdir->vd_deblk[0]; -+ vdir->vd_version = 0; -+ vdir->vd_jiffy = 0; -+ /* smp_mb(); */ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+#define AuFillVdir_CALLED 1 -+#define AuFillVdir_WHABLE (1 << 1) -+#define AuFillVdir_SHWH (1 << 2) -+#define au_ftest_fillvdir(flags, name) ((flags) & AuFillVdir_##name) -+#define au_fset_fillvdir(flags, name) \ -+ do { (flags) |= AuFillVdir_##name; } while (0) -+#define au_fclr_fillvdir(flags, name) \ -+ do { (flags) &= ~AuFillVdir_##name; } while (0) -+ -+#ifndef CONFIG_AUFS_SHWH -+#undef AuFillVdir_SHWH -+#define AuFillVdir_SHWH 0 -+#endif -+ -+struct fillvdir_arg { -+ struct dir_context ctx; -+ struct file *file; -+ struct au_vdir *vdir; -+ struct au_nhash delist; -+ struct au_nhash whlist; -+ aufs_bindex_t bindex; -+ unsigned int flags; -+ int err; -+}; -+ -+static int fillvdir(struct dir_context *ctx, const char *__name, int nlen, -+ loff_t offset __maybe_unused, u64 h_ino, -+ unsigned int d_type) -+{ -+ struct fillvdir_arg *arg = container_of(ctx, struct fillvdir_arg, ctx); -+ char *name = (void *)__name; -+ struct super_block *sb; -+ ino_t ino; -+ const unsigned char shwh = !!au_ftest_fillvdir(arg->flags, SHWH); -+ -+ arg->err = 0; -+ sb = arg->file->f_dentry->d_sb; -+ au_fset_fillvdir(arg->flags, CALLED); -+ /* smp_mb(); */ -+ if (nlen <= AUFS_WH_PFX_LEN -+ || memcmp(name, AUFS_WH_PFX, AUFS_WH_PFX_LEN)) { -+ if (test_known(&arg->delist, name, nlen) -+ || au_nhash_test_known_wh(&arg->whlist, name, nlen)) -+ goto out; /* already exists or whiteouted */ -+ -+ arg->err = au_ino(sb, arg->bindex, h_ino, d_type, &ino); -+ if (!arg->err) { -+ if (unlikely(nlen > AUFS_MAX_NAMELEN)) -+ d_type = DT_UNKNOWN; -+ arg->err = append_de(arg->vdir, name, nlen, ino, -+ d_type, &arg->delist); -+ } -+ } else if (au_ftest_fillvdir(arg->flags, WHABLE)) { -+ name += AUFS_WH_PFX_LEN; -+ nlen -= AUFS_WH_PFX_LEN; -+ if (au_nhash_test_known_wh(&arg->whlist, name, nlen)) -+ goto out; /* already whiteouted */ -+ -+ if (shwh) -+ arg->err = au_wh_ino(sb, arg->bindex, h_ino, d_type, -+ &ino); -+ if (!arg->err) { -+ if (nlen <= AUFS_MAX_NAMELEN + AUFS_WH_PFX_LEN) -+ d_type = DT_UNKNOWN; -+ arg->err = au_nhash_append_wh -+ (&arg->whlist, name, nlen, ino, d_type, -+ arg->bindex, shwh); -+ } -+ } -+ -+out: -+ if (!arg->err) -+ arg->vdir->vd_jiffy = jiffies; -+ /* smp_mb(); */ -+ AuTraceErr(arg->err); -+ return arg->err; -+} -+ -+static int au_handle_shwh(struct super_block *sb, struct au_vdir *vdir, -+ struct au_nhash *whlist, struct au_nhash *delist) -+{ -+#ifdef CONFIG_AUFS_SHWH -+ int err; -+ unsigned int nh, u; -+ struct hlist_head *head; -+ struct au_vdir_wh *pos; -+ struct hlist_node *n; -+ char *p, *o; -+ struct au_vdir_destr *destr; -+ -+ AuDebugOn(!au_opt_test(au_mntflags(sb), SHWH)); -+ -+ err = -ENOMEM; -+ o = p = (void *)__get_free_page(GFP_NOFS); -+ if (unlikely(!p)) -+ goto out; -+ -+ err = 0; -+ nh = whlist->nh_num; -+ memcpy(p, AUFS_WH_PFX, AUFS_WH_PFX_LEN); -+ p += AUFS_WH_PFX_LEN; -+ for (u = 0; u < nh; u++) { -+ head = whlist->nh_head + u; -+ hlist_for_each_entry_safe(pos, n, head, wh_hash) { -+ destr = &pos->wh_str; -+ memcpy(p, destr->name, destr->len); -+ err = append_de(vdir, o, destr->len + AUFS_WH_PFX_LEN, -+ pos->wh_ino, pos->wh_type, delist); -+ if (unlikely(err)) -+ break; -+ } -+ } -+ -+ free_page((unsigned long)o); -+ -+out: -+ AuTraceErr(err); -+ return err; -+#else -+ return 0; -+#endif -+} -+ -+static int au_do_read_vdir(struct fillvdir_arg *arg) -+{ -+ int err; -+ unsigned int rdhash; -+ loff_t offset; -+ aufs_bindex_t bend, bindex, bstart; -+ unsigned char shwh; -+ struct file *hf, *file; -+ struct super_block *sb; -+ -+ file = arg->file; -+ sb = file->f_dentry->d_sb; -+ SiMustAnyLock(sb); -+ -+ rdhash = au_sbi(sb)->si_rdhash; -+ if (!rdhash) -+ rdhash = au_rdhash_est(au_dir_size(file, /*dentry*/NULL)); -+ err = au_nhash_alloc(&arg->delist, rdhash, GFP_NOFS); -+ if (unlikely(err)) -+ goto out; -+ err = au_nhash_alloc(&arg->whlist, rdhash, GFP_NOFS); -+ if (unlikely(err)) -+ goto out_delist; -+ -+ err = 0; -+ arg->flags = 0; -+ shwh = 0; -+ if (au_opt_test(au_mntflags(sb), SHWH)) { -+ shwh = 1; -+ au_fset_fillvdir(arg->flags, SHWH); -+ } -+ bstart = au_fbstart(file); -+ bend = au_fbend_dir(file); -+ for (bindex = bstart; !err && bindex <= bend; bindex++) { -+ hf = au_hf_dir(file, bindex); -+ if (!hf) -+ continue; -+ -+ offset = vfsub_llseek(hf, 0, SEEK_SET); -+ err = offset; -+ if (unlikely(offset)) -+ break; -+ -+ arg->bindex = bindex; -+ au_fclr_fillvdir(arg->flags, WHABLE); -+ if (shwh -+ || (bindex != bend -+ && au_br_whable(au_sbr_perm(sb, bindex)))) -+ au_fset_fillvdir(arg->flags, WHABLE); -+ do { -+ arg->err = 0; -+ au_fclr_fillvdir(arg->flags, CALLED); -+ /* smp_mb(); */ -+ err = vfsub_iterate_dir(hf, &arg->ctx); -+ if (err >= 0) -+ err = arg->err; -+ } while (!err && au_ftest_fillvdir(arg->flags, CALLED)); -+ -+ /* -+ * dir_relax() may be good for concurrency, but aufs should not -+ * use it since it will cause a lockdep problem. -+ */ -+ } -+ -+ if (!err && shwh) -+ err = au_handle_shwh(sb, arg->vdir, &arg->whlist, &arg->delist); -+ -+ au_nhash_wh_free(&arg->whlist); -+ -+out_delist: -+ au_nhash_de_free(&arg->delist); -+out: -+ return err; -+} -+ -+static int read_vdir(struct file *file, int may_read) -+{ -+ int err; -+ unsigned long expire; -+ unsigned char do_read; -+ struct fillvdir_arg arg = { -+ .ctx = { -+ .actor = au_diractor(fillvdir) -+ } -+ }; -+ struct inode *inode; -+ struct au_vdir *vdir, *allocated; -+ -+ err = 0; -+ inode = file_inode(file); -+ IMustLock(inode); -+ SiMustAnyLock(inode->i_sb); -+ -+ allocated = NULL; -+ do_read = 0; -+ expire = au_sbi(inode->i_sb)->si_rdcache; -+ vdir = au_ivdir(inode); -+ if (!vdir) { -+ do_read = 1; -+ vdir = alloc_vdir(file); -+ err = PTR_ERR(vdir); -+ if (IS_ERR(vdir)) -+ goto out; -+ err = 0; -+ allocated = vdir; -+ } else if (may_read -+ && (inode->i_version != vdir->vd_version -+ || time_after(jiffies, vdir->vd_jiffy + expire))) { -+ do_read = 1; -+ err = reinit_vdir(vdir); -+ if (unlikely(err)) -+ goto out; -+ } -+ -+ if (!do_read) -+ return 0; /* success */ -+ -+ arg.file = file; -+ arg.vdir = vdir; -+ err = au_do_read_vdir(&arg); -+ if (!err) { -+ /* file->f_pos = 0; */ /* todo: ctx->pos? */ -+ vdir->vd_version = inode->i_version; -+ vdir->vd_last.ul = 0; -+ vdir->vd_last.p.deblk = vdir->vd_deblk[0]; -+ if (allocated) -+ au_set_ivdir(inode, allocated); -+ } else if (allocated) -+ au_vdir_free(allocated); -+ -+out: -+ return err; -+} -+ -+static int copy_vdir(struct au_vdir *tgt, struct au_vdir *src) -+{ -+ int err, rerr; -+ unsigned long ul, n; -+ const unsigned int deblk_sz = src->vd_deblk_sz; -+ -+ AuDebugOn(tgt->vd_nblk != 1); -+ -+ err = -ENOMEM; -+ if (tgt->vd_nblk < src->vd_nblk) { -+ unsigned char **p; -+ -+ p = krealloc(tgt->vd_deblk, sizeof(*p) * src->vd_nblk, -+ GFP_NOFS); -+ if (unlikely(!p)) -+ goto out; -+ tgt->vd_deblk = p; -+ } -+ -+ if (tgt->vd_deblk_sz != deblk_sz) { -+ unsigned char *p; -+ -+ tgt->vd_deblk_sz = deblk_sz; -+ p = krealloc(tgt->vd_deblk[0], deblk_sz, GFP_NOFS); -+ if (unlikely(!p)) -+ goto out; -+ tgt->vd_deblk[0] = p; -+ } -+ memcpy(tgt->vd_deblk[0], src->vd_deblk[0], deblk_sz); -+ tgt->vd_version = src->vd_version; -+ tgt->vd_jiffy = src->vd_jiffy; -+ -+ n = src->vd_nblk; -+ for (ul = 1; ul < n; ul++) { -+ tgt->vd_deblk[ul] = kmemdup(src->vd_deblk[ul], deblk_sz, -+ GFP_NOFS); -+ if (unlikely(!tgt->vd_deblk[ul])) -+ goto out; -+ tgt->vd_nblk++; -+ } -+ tgt->vd_nblk = n; -+ tgt->vd_last.ul = tgt->vd_last.ul; -+ tgt->vd_last.p.deblk = tgt->vd_deblk[tgt->vd_last.ul]; -+ tgt->vd_last.p.deblk += src->vd_last.p.deblk -+ - src->vd_deblk[src->vd_last.ul]; -+ /* smp_mb(); */ -+ return 0; /* success */ -+ -+out: -+ rerr = reinit_vdir(tgt); -+ BUG_ON(rerr); -+ return err; -+} -+ -+int au_vdir_init(struct file *file) -+{ -+ int err; -+ struct inode *inode; -+ struct au_vdir *vdir_cache, *allocated; -+ -+ /* test file->f_pos here instead of ctx->pos */ -+ err = read_vdir(file, !file->f_pos); -+ if (unlikely(err)) -+ goto out; -+ -+ allocated = NULL; -+ vdir_cache = au_fvdir_cache(file); -+ if (!vdir_cache) { -+ vdir_cache = alloc_vdir(file); -+ err = PTR_ERR(vdir_cache); -+ if (IS_ERR(vdir_cache)) -+ goto out; -+ allocated = vdir_cache; -+ } else if (!file->f_pos && vdir_cache->vd_version != file->f_version) { -+ /* test file->f_pos here instead of ctx->pos */ -+ err = reinit_vdir(vdir_cache); -+ if (unlikely(err)) -+ goto out; -+ } else -+ return 0; /* success */ -+ -+ inode = file_inode(file); -+ err = copy_vdir(vdir_cache, au_ivdir(inode)); -+ if (!err) { -+ file->f_version = inode->i_version; -+ if (allocated) -+ au_set_fvdir_cache(file, allocated); -+ } else if (allocated) -+ au_vdir_free(allocated); -+ -+out: -+ return err; -+} -+ -+static loff_t calc_offset(struct au_vdir *vdir) -+{ -+ loff_t offset; -+ union au_vdir_deblk_p p; -+ -+ p.deblk = vdir->vd_deblk[vdir->vd_last.ul]; -+ offset = vdir->vd_last.p.deblk - p.deblk; -+ offset += vdir->vd_deblk_sz * vdir->vd_last.ul; -+ return offset; -+} -+ -+/* returns true or false */ -+static int seek_vdir(struct file *file, struct dir_context *ctx) -+{ -+ int valid; -+ unsigned int deblk_sz; -+ unsigned long ul, n; -+ loff_t offset; -+ union au_vdir_deblk_p p, deblk_end; -+ struct au_vdir *vdir_cache; -+ -+ valid = 1; -+ vdir_cache = au_fvdir_cache(file); -+ offset = calc_offset(vdir_cache); -+ AuDbg("offset %lld\n", offset); -+ if (ctx->pos == offset) -+ goto out; -+ -+ vdir_cache->vd_last.ul = 0; -+ vdir_cache->vd_last.p.deblk = vdir_cache->vd_deblk[0]; -+ if (!ctx->pos) -+ goto out; -+ -+ valid = 0; -+ deblk_sz = vdir_cache->vd_deblk_sz; -+ ul = div64_u64(ctx->pos, deblk_sz); -+ AuDbg("ul %lu\n", ul); -+ if (ul >= vdir_cache->vd_nblk) -+ goto out; -+ -+ n = vdir_cache->vd_nblk; -+ for (; ul < n; ul++) { -+ p.deblk = vdir_cache->vd_deblk[ul]; -+ deblk_end.deblk = p.deblk + deblk_sz; -+ offset = ul; -+ offset *= deblk_sz; -+ while (!is_deblk_end(&p, &deblk_end) && offset < ctx->pos) { -+ unsigned int l; -+ -+ l = calc_size(p.de->de_str.len); -+ offset += l; -+ p.deblk += l; -+ } -+ if (!is_deblk_end(&p, &deblk_end)) { -+ valid = 1; -+ vdir_cache->vd_last.ul = ul; -+ vdir_cache->vd_last.p = p; -+ break; -+ } -+ } -+ -+out: -+ /* smp_mb(); */ -+ AuTraceErr(!valid); -+ return valid; -+} -+ -+int au_vdir_fill_de(struct file *file, struct dir_context *ctx) -+{ -+ unsigned int l, deblk_sz; -+ union au_vdir_deblk_p deblk_end; -+ struct au_vdir *vdir_cache; -+ struct au_vdir_de *de; -+ -+ vdir_cache = au_fvdir_cache(file); -+ if (!seek_vdir(file, ctx)) -+ return 0; -+ -+ deblk_sz = vdir_cache->vd_deblk_sz; -+ while (1) { -+ deblk_end.deblk = vdir_cache->vd_deblk[vdir_cache->vd_last.ul]; -+ deblk_end.deblk += deblk_sz; -+ while (!is_deblk_end(&vdir_cache->vd_last.p, &deblk_end)) { -+ de = vdir_cache->vd_last.p.de; -+ AuDbg("%.*s, off%lld, i%lu, dt%d\n", -+ de->de_str.len, de->de_str.name, ctx->pos, -+ (unsigned long)de->de_ino, de->de_type); -+ if (unlikely(!dir_emit(ctx, de->de_str.name, -+ de->de_str.len, de->de_ino, -+ de->de_type))) { -+ /* todo: ignore the error caused by udba? */ -+ /* return err; */ -+ return 0; -+ } -+ -+ l = calc_size(de->de_str.len); -+ vdir_cache->vd_last.p.deblk += l; -+ ctx->pos += l; -+ } -+ if (vdir_cache->vd_last.ul < vdir_cache->vd_nblk - 1) { -+ vdir_cache->vd_last.ul++; -+ vdir_cache->vd_last.p.deblk -+ = vdir_cache->vd_deblk[vdir_cache->vd_last.ul]; -+ ctx->pos = deblk_sz * vdir_cache->vd_last.ul; -+ continue; -+ } -+ break; -+ } -+ -+ /* smp_mb(); */ -+ return 0; -+} -diff --git a/fs/aufs/vfsub.c b/fs/aufs/vfsub.c -new file mode 100644 -index 0000000..5fd008c ---- /dev/null -+++ b/fs/aufs/vfsub.c -@@ -0,0 +1,864 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * sub-routines for VFS -+ */ -+ -+#include -+#include -+#include -+#include -+#include "../fs/mount.h" -+#include "aufs.h" -+ -+#ifdef CONFIG_AUFS_BR_FUSE -+int vfsub_test_mntns(struct vfsmount *mnt, struct super_block *h_sb) -+{ -+ struct nsproxy *ns; -+ -+ if (!au_test_fuse(h_sb) || !au_userns) -+ return 0; -+ -+ ns = current->nsproxy; -+ /* no {get,put}_nsproxy(ns) */ -+ return real_mount(mnt)->mnt_ns == ns->mnt_ns ? 0 : -EACCES; -+} -+#endif -+ -+/* ---------------------------------------------------------------------- */ -+ -+int vfsub_update_h_iattr(struct path *h_path, int *did) -+{ -+ int err; -+ struct kstat st; -+ struct super_block *h_sb; -+ -+ /* for remote fs, leave work for its getattr or d_revalidate */ -+ /* for bad i_attr fs, handle them in aufs_getattr() */ -+ /* still some fs may acquire i_mutex. we need to skip them */ -+ err = 0; -+ if (!did) -+ did = &err; -+ h_sb = h_path->dentry->d_sb; -+ *did = (!au_test_fs_remote(h_sb) && au_test_fs_refresh_iattr(h_sb)); -+ if (*did) -+ err = vfs_getattr(h_path, &st); -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+struct file *vfsub_dentry_open(struct path *path, int flags) -+{ -+ struct file *file; -+ -+ file = dentry_open(path, flags /* | __FMODE_NONOTIFY */, -+ current_cred()); -+ if (!IS_ERR_OR_NULL(file) -+ && (file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) -+ i_readcount_inc(path->dentry->d_inode); -+ -+ return file; -+} -+ -+struct file *vfsub_filp_open(const char *path, int oflags, int mode) -+{ -+ struct file *file; -+ -+ lockdep_off(); -+ file = filp_open(path, -+ oflags /* | __FMODE_NONOTIFY */, -+ mode); -+ lockdep_on(); -+ if (IS_ERR(file)) -+ goto out; -+ vfsub_update_h_iattr(&file->f_path, /*did*/NULL); /*ignore*/ -+ -+out: -+ return file; -+} -+ -+/* -+ * Ideally this function should call VFS:do_last() in order to keep all its -+ * checkings. But it is very hard for aufs to regenerate several VFS internal -+ * structure such as nameidata. This is a second (or third) best approach. -+ * cf. linux/fs/namei.c:do_last(), lookup_open() and atomic_open(). -+ */ -+int vfsub_atomic_open(struct inode *dir, struct dentry *dentry, -+ struct vfsub_aopen_args *args, struct au_branch *br) -+{ -+ int err; -+ struct file *file = args->file; -+ /* copied from linux/fs/namei.c:atomic_open() */ -+ struct dentry *const DENTRY_NOT_SET = (void *)-1UL; -+ -+ IMustLock(dir); -+ AuDebugOn(!dir->i_op->atomic_open); -+ -+ err = au_br_test_oflag(args->open_flag, br); -+ if (unlikely(err)) -+ goto out; -+ -+ args->file->f_path.dentry = DENTRY_NOT_SET; -+ args->file->f_path.mnt = au_br_mnt(br); -+ err = dir->i_op->atomic_open(dir, dentry, file, args->open_flag, -+ args->create_mode, args->opened); -+ if (err >= 0) { -+ /* some filesystems don't set FILE_CREATED while succeeded? */ -+ if (*args->opened & FILE_CREATED) -+ fsnotify_create(dir, dentry); -+ } else -+ goto out; -+ -+ -+ if (!err) { -+ /* todo: call VFS:may_open() here */ -+ err = open_check_o_direct(file); -+ /* todo: ima_file_check() too? */ -+ if (!err && (args->open_flag & __FMODE_EXEC)) -+ err = deny_write_access(file); -+ if (unlikely(err)) -+ /* note that the file is created and still opened */ -+ goto out; -+ } -+ -+ atomic_inc(&br->br_count); -+ fsnotify_open(file); -+ -+out: -+ return err; -+} -+ -+int vfsub_kern_path(const char *name, unsigned int flags, struct path *path) -+{ -+ int err; -+ -+ err = kern_path(name, flags, path); -+ if (!err && path->dentry->d_inode) -+ vfsub_update_h_iattr(path, /*did*/NULL); /*ignore*/ -+ return err; -+} -+ -+struct dentry *vfsub_lookup_one_len(const char *name, struct dentry *parent, -+ int len) -+{ -+ struct path path = { -+ .mnt = NULL -+ }; -+ -+ /* VFS checks it too, but by WARN_ON_ONCE() */ -+ IMustLock(parent->d_inode); -+ -+ path.dentry = lookup_one_len(name, parent, len); -+ if (IS_ERR(path.dentry)) -+ goto out; -+ if (path.dentry->d_inode) -+ vfsub_update_h_iattr(&path, /*did*/NULL); /*ignore*/ -+ -+out: -+ AuTraceErrPtr(path.dentry); -+ return path.dentry; -+} -+ -+void vfsub_call_lkup_one(void *args) -+{ -+ struct vfsub_lkup_one_args *a = args; -+ *a->errp = vfsub_lkup_one(a->name, a->parent); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+struct dentry *vfsub_lock_rename(struct dentry *d1, struct au_hinode *hdir1, -+ struct dentry *d2, struct au_hinode *hdir2) -+{ -+ struct dentry *d; -+ -+ lockdep_off(); -+ d = lock_rename(d1, d2); -+ lockdep_on(); -+ au_hn_suspend(hdir1); -+ if (hdir1 != hdir2) -+ au_hn_suspend(hdir2); -+ -+ return d; -+} -+ -+void vfsub_unlock_rename(struct dentry *d1, struct au_hinode *hdir1, -+ struct dentry *d2, struct au_hinode *hdir2) -+{ -+ au_hn_resume(hdir1); -+ if (hdir1 != hdir2) -+ au_hn_resume(hdir2); -+ lockdep_off(); -+ unlock_rename(d1, d2); -+ lockdep_on(); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+int vfsub_create(struct inode *dir, struct path *path, int mode, bool want_excl) -+{ -+ int err; -+ struct dentry *d; -+ -+ IMustLock(dir); -+ -+ d = path->dentry; -+ path->dentry = d->d_parent; -+ err = security_path_mknod(path, d, mode, 0); -+ path->dentry = d; -+ if (unlikely(err)) -+ goto out; -+ -+ lockdep_off(); -+ err = vfs_create(dir, path->dentry, mode, want_excl); -+ lockdep_on(); -+ if (!err) { -+ struct path tmp = *path; -+ int did; -+ -+ vfsub_update_h_iattr(&tmp, &did); -+ if (did) { -+ tmp.dentry = path->dentry->d_parent; -+ vfsub_update_h_iattr(&tmp, /*did*/NULL); -+ } -+ /*ignore*/ -+ } -+ -+out: -+ return err; -+} -+ -+int vfsub_symlink(struct inode *dir, struct path *path, const char *symname) -+{ -+ int err; -+ struct dentry *d; -+ -+ IMustLock(dir); -+ -+ d = path->dentry; -+ path->dentry = d->d_parent; -+ err = security_path_symlink(path, d, symname); -+ path->dentry = d; -+ if (unlikely(err)) -+ goto out; -+ -+ lockdep_off(); -+ err = vfs_symlink(dir, path->dentry, symname); -+ lockdep_on(); -+ if (!err) { -+ struct path tmp = *path; -+ int did; -+ -+ vfsub_update_h_iattr(&tmp, &did); -+ if (did) { -+ tmp.dentry = path->dentry->d_parent; -+ vfsub_update_h_iattr(&tmp, /*did*/NULL); -+ } -+ /*ignore*/ -+ } -+ -+out: -+ return err; -+} -+ -+int vfsub_mknod(struct inode *dir, struct path *path, int mode, dev_t dev) -+{ -+ int err; -+ struct dentry *d; -+ -+ IMustLock(dir); -+ -+ d = path->dentry; -+ path->dentry = d->d_parent; -+ err = security_path_mknod(path, d, mode, new_encode_dev(dev)); -+ path->dentry = d; -+ if (unlikely(err)) -+ goto out; -+ -+ lockdep_off(); -+ err = vfs_mknod(dir, path->dentry, mode, dev); -+ lockdep_on(); -+ if (!err) { -+ struct path tmp = *path; -+ int did; -+ -+ vfsub_update_h_iattr(&tmp, &did); -+ if (did) { -+ tmp.dentry = path->dentry->d_parent; -+ vfsub_update_h_iattr(&tmp, /*did*/NULL); -+ } -+ /*ignore*/ -+ } -+ -+out: -+ return err; -+} -+ -+static int au_test_nlink(struct inode *inode) -+{ -+ const unsigned int link_max = UINT_MAX >> 1; /* rough margin */ -+ -+ if (!au_test_fs_no_limit_nlink(inode->i_sb) -+ || inode->i_nlink < link_max) -+ return 0; -+ return -EMLINK; -+} -+ -+int vfsub_link(struct dentry *src_dentry, struct inode *dir, struct path *path, -+ struct inode **delegated_inode) -+{ -+ int err; -+ struct dentry *d; -+ -+ IMustLock(dir); -+ -+ err = au_test_nlink(src_dentry->d_inode); -+ if (unlikely(err)) -+ return err; -+ -+ /* we don't call may_linkat() */ -+ d = path->dentry; -+ path->dentry = d->d_parent; -+ err = security_path_link(src_dentry, path, d); -+ path->dentry = d; -+ if (unlikely(err)) -+ goto out; -+ -+ lockdep_off(); -+ err = vfs_link(src_dentry, dir, path->dentry, delegated_inode); -+ lockdep_on(); -+ if (!err) { -+ struct path tmp = *path; -+ int did; -+ -+ /* fuse has different memory inode for the same inumber */ -+ vfsub_update_h_iattr(&tmp, &did); -+ if (did) { -+ tmp.dentry = path->dentry->d_parent; -+ vfsub_update_h_iattr(&tmp, /*did*/NULL); -+ tmp.dentry = src_dentry; -+ vfsub_update_h_iattr(&tmp, /*did*/NULL); -+ } -+ /*ignore*/ -+ } -+ -+out: -+ return err; -+} -+ -+int vfsub_rename(struct inode *src_dir, struct dentry *src_dentry, -+ struct inode *dir, struct path *path, -+ struct inode **delegated_inode) -+{ -+ int err; -+ struct path tmp = { -+ .mnt = path->mnt -+ }; -+ struct dentry *d; -+ -+ IMustLock(dir); -+ IMustLock(src_dir); -+ -+ d = path->dentry; -+ path->dentry = d->d_parent; -+ tmp.dentry = src_dentry->d_parent; -+ err = security_path_rename(&tmp, src_dentry, path, d, /*flags*/0); -+ path->dentry = d; -+ if (unlikely(err)) -+ goto out; -+ -+ lockdep_off(); -+ err = vfs_rename(src_dir, src_dentry, dir, path->dentry, -+ delegated_inode, /*flags*/0); -+ lockdep_on(); -+ if (!err) { -+ int did; -+ -+ tmp.dentry = d->d_parent; -+ vfsub_update_h_iattr(&tmp, &did); -+ if (did) { -+ tmp.dentry = src_dentry; -+ vfsub_update_h_iattr(&tmp, /*did*/NULL); -+ tmp.dentry = src_dentry->d_parent; -+ vfsub_update_h_iattr(&tmp, /*did*/NULL); -+ } -+ /*ignore*/ -+ } -+ -+out: -+ return err; -+} -+ -+int vfsub_mkdir(struct inode *dir, struct path *path, int mode) -+{ -+ int err; -+ struct dentry *d; -+ -+ IMustLock(dir); -+ -+ d = path->dentry; -+ path->dentry = d->d_parent; -+ err = security_path_mkdir(path, d, mode); -+ path->dentry = d; -+ if (unlikely(err)) -+ goto out; -+ -+ lockdep_off(); -+ err = vfs_mkdir(dir, path->dentry, mode); -+ lockdep_on(); -+ if (!err) { -+ struct path tmp = *path; -+ int did; -+ -+ vfsub_update_h_iattr(&tmp, &did); -+ if (did) { -+ tmp.dentry = path->dentry->d_parent; -+ vfsub_update_h_iattr(&tmp, /*did*/NULL); -+ } -+ /*ignore*/ -+ } -+ -+out: -+ return err; -+} -+ -+int vfsub_rmdir(struct inode *dir, struct path *path) -+{ -+ int err; -+ struct dentry *d; -+ -+ IMustLock(dir); -+ -+ d = path->dentry; -+ path->dentry = d->d_parent; -+ err = security_path_rmdir(path, d); -+ path->dentry = d; -+ if (unlikely(err)) -+ goto out; -+ -+ lockdep_off(); -+ err = vfs_rmdir(dir, path->dentry); -+ lockdep_on(); -+ if (!err) { -+ struct path tmp = { -+ .dentry = path->dentry->d_parent, -+ .mnt = path->mnt -+ }; -+ -+ vfsub_update_h_iattr(&tmp, /*did*/NULL); /*ignore*/ -+ } -+ -+out: -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* todo: support mmap_sem? */ -+ssize_t vfsub_read_u(struct file *file, char __user *ubuf, size_t count, -+ loff_t *ppos) -+{ -+ ssize_t err; -+ -+ lockdep_off(); -+ err = vfs_read(file, ubuf, count, ppos); -+ lockdep_on(); -+ if (err >= 0) -+ vfsub_update_h_iattr(&file->f_path, /*did*/NULL); /*ignore*/ -+ return err; -+} -+ -+/* todo: kernel_read()? */ -+ssize_t vfsub_read_k(struct file *file, void *kbuf, size_t count, -+ loff_t *ppos) -+{ -+ ssize_t err; -+ mm_segment_t oldfs; -+ union { -+ void *k; -+ char __user *u; -+ } buf; -+ -+ buf.k = kbuf; -+ oldfs = get_fs(); -+ set_fs(KERNEL_DS); -+ err = vfsub_read_u(file, buf.u, count, ppos); -+ set_fs(oldfs); -+ return err; -+} -+ -+ssize_t vfsub_write_u(struct file *file, const char __user *ubuf, size_t count, -+ loff_t *ppos) -+{ -+ ssize_t err; -+ -+ lockdep_off(); -+ err = vfs_write(file, ubuf, count, ppos); -+ lockdep_on(); -+ if (err >= 0) -+ vfsub_update_h_iattr(&file->f_path, /*did*/NULL); /*ignore*/ -+ return err; -+} -+ -+ssize_t vfsub_write_k(struct file *file, void *kbuf, size_t count, loff_t *ppos) -+{ -+ ssize_t err; -+ mm_segment_t oldfs; -+ union { -+ void *k; -+ const char __user *u; -+ } buf; -+ -+ buf.k = kbuf; -+ oldfs = get_fs(); -+ set_fs(KERNEL_DS); -+ err = vfsub_write_u(file, buf.u, count, ppos); -+ set_fs(oldfs); -+ return err; -+} -+ -+int vfsub_flush(struct file *file, fl_owner_t id) -+{ -+ int err; -+ -+ err = 0; -+ if (file->f_op->flush) { -+ if (!au_test_nfs(file->f_dentry->d_sb)) -+ err = file->f_op->flush(file, id); -+ else { -+ lockdep_off(); -+ err = file->f_op->flush(file, id); -+ lockdep_on(); -+ } -+ if (!err) -+ vfsub_update_h_iattr(&file->f_path, /*did*/NULL); -+ /*ignore*/ -+ } -+ return err; -+} -+ -+int vfsub_iterate_dir(struct file *file, struct dir_context *ctx) -+{ -+ int err; -+ -+ AuDbg("%pD, ctx{%pf, %llu}\n", file, ctx->actor, ctx->pos); -+ -+ lockdep_off(); -+ err = iterate_dir(file, ctx); -+ lockdep_on(); -+ if (err >= 0) -+ vfsub_update_h_iattr(&file->f_path, /*did*/NULL); /*ignore*/ -+ return err; -+} -+ -+long vfsub_splice_to(struct file *in, loff_t *ppos, -+ struct pipe_inode_info *pipe, size_t len, -+ unsigned int flags) -+{ -+ long err; -+ -+ lockdep_off(); -+ err = do_splice_to(in, ppos, pipe, len, flags); -+ lockdep_on(); -+ file_accessed(in); -+ if (err >= 0) -+ vfsub_update_h_iattr(&in->f_path, /*did*/NULL); /*ignore*/ -+ return err; -+} -+ -+long vfsub_splice_from(struct pipe_inode_info *pipe, struct file *out, -+ loff_t *ppos, size_t len, unsigned int flags) -+{ -+ long err; -+ -+ lockdep_off(); -+ err = do_splice_from(pipe, out, ppos, len, flags); -+ lockdep_on(); -+ if (err >= 0) -+ vfsub_update_h_iattr(&out->f_path, /*did*/NULL); /*ignore*/ -+ return err; -+} -+ -+int vfsub_fsync(struct file *file, struct path *path, int datasync) -+{ -+ int err; -+ -+ /* file can be NULL */ -+ lockdep_off(); -+ err = vfs_fsync(file, datasync); -+ lockdep_on(); -+ if (!err) { -+ if (!path) { -+ AuDebugOn(!file); -+ path = &file->f_path; -+ } -+ vfsub_update_h_iattr(path, /*did*/NULL); /*ignore*/ -+ } -+ return err; -+} -+ -+/* cf. open.c:do_sys_truncate() and do_sys_ftruncate() */ -+int vfsub_trunc(struct path *h_path, loff_t length, unsigned int attr, -+ struct file *h_file) -+{ -+ int err; -+ struct inode *h_inode; -+ struct super_block *h_sb; -+ -+ if (!h_file) { -+ err = vfsub_truncate(h_path, length); -+ goto out; -+ } -+ -+ h_inode = h_path->dentry->d_inode; -+ h_sb = h_inode->i_sb; -+ lockdep_off(); -+ sb_start_write(h_sb); -+ lockdep_on(); -+ err = locks_verify_truncate(h_inode, h_file, length); -+ if (!err) -+ err = security_path_truncate(h_path); -+ if (!err) { -+ lockdep_off(); -+ err = do_truncate(h_path->dentry, length, attr, h_file); -+ lockdep_on(); -+ } -+ lockdep_off(); -+ sb_end_write(h_sb); -+ lockdep_on(); -+ -+out: -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+struct au_vfsub_mkdir_args { -+ int *errp; -+ struct inode *dir; -+ struct path *path; -+ int mode; -+}; -+ -+static void au_call_vfsub_mkdir(void *args) -+{ -+ struct au_vfsub_mkdir_args *a = args; -+ *a->errp = vfsub_mkdir(a->dir, a->path, a->mode); -+} -+ -+int vfsub_sio_mkdir(struct inode *dir, struct path *path, int mode) -+{ -+ int err, do_sio, wkq_err; -+ -+ do_sio = au_test_h_perm_sio(dir, MAY_EXEC | MAY_WRITE); -+ if (!do_sio) { -+ lockdep_off(); -+ err = vfsub_mkdir(dir, path, mode); -+ lockdep_on(); -+ } else { -+ struct au_vfsub_mkdir_args args = { -+ .errp = &err, -+ .dir = dir, -+ .path = path, -+ .mode = mode -+ }; -+ wkq_err = au_wkq_wait(au_call_vfsub_mkdir, &args); -+ if (unlikely(wkq_err)) -+ err = wkq_err; -+ } -+ -+ return err; -+} -+ -+struct au_vfsub_rmdir_args { -+ int *errp; -+ struct inode *dir; -+ struct path *path; -+}; -+ -+static void au_call_vfsub_rmdir(void *args) -+{ -+ struct au_vfsub_rmdir_args *a = args; -+ *a->errp = vfsub_rmdir(a->dir, a->path); -+} -+ -+int vfsub_sio_rmdir(struct inode *dir, struct path *path) -+{ -+ int err, do_sio, wkq_err; -+ -+ do_sio = au_test_h_perm_sio(dir, MAY_EXEC | MAY_WRITE); -+ if (!do_sio) { -+ lockdep_off(); -+ err = vfsub_rmdir(dir, path); -+ lockdep_on(); -+ } else { -+ struct au_vfsub_rmdir_args args = { -+ .errp = &err, -+ .dir = dir, -+ .path = path -+ }; -+ wkq_err = au_wkq_wait(au_call_vfsub_rmdir, &args); -+ if (unlikely(wkq_err)) -+ err = wkq_err; -+ } -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+struct notify_change_args { -+ int *errp; -+ struct path *path; -+ struct iattr *ia; -+ struct inode **delegated_inode; -+}; -+ -+static void call_notify_change(void *args) -+{ -+ struct notify_change_args *a = args; -+ struct inode *h_inode; -+ -+ h_inode = a->path->dentry->d_inode; -+ IMustLock(h_inode); -+ -+ *a->errp = -EPERM; -+ if (!IS_IMMUTABLE(h_inode) && !IS_APPEND(h_inode)) { -+ lockdep_off(); -+ *a->errp = notify_change(a->path->dentry, a->ia, -+ a->delegated_inode); -+ lockdep_on(); -+ if (!*a->errp) -+ vfsub_update_h_iattr(a->path, /*did*/NULL); /*ignore*/ -+ } -+ AuTraceErr(*a->errp); -+} -+ -+int vfsub_notify_change(struct path *path, struct iattr *ia, -+ struct inode **delegated_inode) -+{ -+ int err; -+ struct notify_change_args args = { -+ .errp = &err, -+ .path = path, -+ .ia = ia, -+ .delegated_inode = delegated_inode -+ }; -+ -+ call_notify_change(&args); -+ -+ return err; -+} -+ -+int vfsub_sio_notify_change(struct path *path, struct iattr *ia, -+ struct inode **delegated_inode) -+{ -+ int err, wkq_err; -+ struct notify_change_args args = { -+ .errp = &err, -+ .path = path, -+ .ia = ia, -+ .delegated_inode = delegated_inode -+ }; -+ -+ wkq_err = au_wkq_wait(call_notify_change, &args); -+ if (unlikely(wkq_err)) -+ err = wkq_err; -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+struct unlink_args { -+ int *errp; -+ struct inode *dir; -+ struct path *path; -+ struct inode **delegated_inode; -+}; -+ -+static void call_unlink(void *args) -+{ -+ struct unlink_args *a = args; -+ struct dentry *d = a->path->dentry; -+ struct inode *h_inode; -+ const int stop_sillyrename = (au_test_nfs(d->d_sb) -+ && au_dcount(d) == 1); -+ -+ IMustLock(a->dir); -+ -+ a->path->dentry = d->d_parent; -+ *a->errp = security_path_unlink(a->path, d); -+ a->path->dentry = d; -+ if (unlikely(*a->errp)) -+ return; -+ -+ if (!stop_sillyrename) -+ dget(d); -+ h_inode = d->d_inode; -+ if (h_inode) -+ ihold(h_inode); -+ -+ lockdep_off(); -+ *a->errp = vfs_unlink(a->dir, d, a->delegated_inode); -+ lockdep_on(); -+ if (!*a->errp) { -+ struct path tmp = { -+ .dentry = d->d_parent, -+ .mnt = a->path->mnt -+ }; -+ vfsub_update_h_iattr(&tmp, /*did*/NULL); /*ignore*/ -+ } -+ -+ if (!stop_sillyrename) -+ dput(d); -+ if (h_inode) -+ iput(h_inode); -+ -+ AuTraceErr(*a->errp); -+} -+ -+/* -+ * @dir: must be locked. -+ * @dentry: target dentry. -+ */ -+int vfsub_unlink(struct inode *dir, struct path *path, -+ struct inode **delegated_inode, int force) -+{ -+ int err; -+ struct unlink_args args = { -+ .errp = &err, -+ .dir = dir, -+ .path = path, -+ .delegated_inode = delegated_inode -+ }; -+ -+ if (!force) -+ call_unlink(&args); -+ else { -+ int wkq_err; -+ -+ wkq_err = au_wkq_wait(call_unlink, &args); -+ if (unlikely(wkq_err)) -+ err = wkq_err; -+ } -+ -+ return err; -+} -diff --git a/fs/aufs/vfsub.h b/fs/aufs/vfsub.h -new file mode 100644 -index 0000000..2c33298 ---- /dev/null -+++ b/fs/aufs/vfsub.h -@@ -0,0 +1,315 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * sub-routines for VFS -+ */ -+ -+#ifndef __AUFS_VFSUB_H__ -+#define __AUFS_VFSUB_H__ -+ -+#ifdef __KERNEL__ -+ -+#include -+#include -+#include -+#include -+#include "debug.h" -+ -+/* copied from linux/fs/internal.h */ -+/* todo: BAD approach!! */ -+extern void __mnt_drop_write(struct vfsmount *); -+extern spinlock_t inode_sb_list_lock; -+extern int open_check_o_direct(struct file *f); -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* lock subclass for lower inode */ -+/* default MAX_LOCKDEP_SUBCLASSES(8) is not enough */ -+/* reduce? gave up. */ -+enum { -+ AuLsc_I_Begin = I_MUTEX_PARENT2, /* 5 */ -+ AuLsc_I_PARENT, /* lower inode, parent first */ -+ AuLsc_I_PARENT2, /* copyup dirs */ -+ AuLsc_I_PARENT3, /* copyup wh */ -+ AuLsc_I_CHILD, -+ AuLsc_I_CHILD2, -+ AuLsc_I_End -+}; -+ -+/* to debug easier, do not make them inlined functions */ -+#define MtxMustLock(mtx) AuDebugOn(!mutex_is_locked(mtx)) -+#define IMustLock(i) MtxMustLock(&(i)->i_mutex) -+ -+/* ---------------------------------------------------------------------- */ -+ -+static inline void vfsub_drop_nlink(struct inode *inode) -+{ -+ AuDebugOn(!inode->i_nlink); -+ drop_nlink(inode); -+} -+ -+static inline void vfsub_dead_dir(struct inode *inode) -+{ -+ AuDebugOn(!S_ISDIR(inode->i_mode)); -+ inode->i_flags |= S_DEAD; -+ clear_nlink(inode); -+} -+ -+static inline int vfsub_native_ro(struct inode *inode) -+{ -+ return (inode->i_sb->s_flags & MS_RDONLY) -+ || IS_RDONLY(inode) -+ /* || IS_APPEND(inode) */ -+ || IS_IMMUTABLE(inode); -+} -+ -+#ifdef CONFIG_AUFS_BR_FUSE -+int vfsub_test_mntns(struct vfsmount *mnt, struct super_block *h_sb); -+#else -+AuStubInt0(vfsub_test_mntns, struct vfsmount *mnt, struct super_block *h_sb); -+#endif -+ -+/* ---------------------------------------------------------------------- */ -+ -+int vfsub_update_h_iattr(struct path *h_path, int *did); -+struct file *vfsub_dentry_open(struct path *path, int flags); -+struct file *vfsub_filp_open(const char *path, int oflags, int mode); -+struct vfsub_aopen_args { -+ struct file *file; -+ unsigned int open_flag; -+ umode_t create_mode; -+ int *opened; -+}; -+struct au_branch; -+int vfsub_atomic_open(struct inode *dir, struct dentry *dentry, -+ struct vfsub_aopen_args *args, struct au_branch *br); -+int vfsub_kern_path(const char *name, unsigned int flags, struct path *path); -+ -+struct dentry *vfsub_lookup_one_len(const char *name, struct dentry *parent, -+ int len); -+ -+struct vfsub_lkup_one_args { -+ struct dentry **errp; -+ struct qstr *name; -+ struct dentry *parent; -+}; -+ -+static inline struct dentry *vfsub_lkup_one(struct qstr *name, -+ struct dentry *parent) -+{ -+ return vfsub_lookup_one_len(name->name, parent, name->len); -+} -+ -+void vfsub_call_lkup_one(void *args); -+ -+/* ---------------------------------------------------------------------- */ -+ -+static inline int vfsub_mnt_want_write(struct vfsmount *mnt) -+{ -+ int err; -+ -+ lockdep_off(); -+ err = mnt_want_write(mnt); -+ lockdep_on(); -+ return err; -+} -+ -+static inline void vfsub_mnt_drop_write(struct vfsmount *mnt) -+{ -+ lockdep_off(); -+ mnt_drop_write(mnt); -+ lockdep_on(); -+} -+ -+#if 0 /* reserved */ -+static inline void vfsub_mnt_drop_write_file(struct file *file) -+{ -+ lockdep_off(); -+ mnt_drop_write_file(file); -+ lockdep_on(); -+} -+#endif -+ -+/* ---------------------------------------------------------------------- */ -+ -+struct au_hinode; -+struct dentry *vfsub_lock_rename(struct dentry *d1, struct au_hinode *hdir1, -+ struct dentry *d2, struct au_hinode *hdir2); -+void vfsub_unlock_rename(struct dentry *d1, struct au_hinode *hdir1, -+ struct dentry *d2, struct au_hinode *hdir2); -+ -+int vfsub_create(struct inode *dir, struct path *path, int mode, -+ bool want_excl); -+int vfsub_symlink(struct inode *dir, struct path *path, -+ const char *symname); -+int vfsub_mknod(struct inode *dir, struct path *path, int mode, dev_t dev); -+int vfsub_link(struct dentry *src_dentry, struct inode *dir, -+ struct path *path, struct inode **delegated_inode); -+int vfsub_rename(struct inode *src_hdir, struct dentry *src_dentry, -+ struct inode *hdir, struct path *path, -+ struct inode **delegated_inode); -+int vfsub_mkdir(struct inode *dir, struct path *path, int mode); -+int vfsub_rmdir(struct inode *dir, struct path *path); -+ -+/* ---------------------------------------------------------------------- */ -+ -+ssize_t vfsub_read_u(struct file *file, char __user *ubuf, size_t count, -+ loff_t *ppos); -+ssize_t vfsub_read_k(struct file *file, void *kbuf, size_t count, -+ loff_t *ppos); -+ssize_t vfsub_write_u(struct file *file, const char __user *ubuf, size_t count, -+ loff_t *ppos); -+ssize_t vfsub_write_k(struct file *file, void *kbuf, size_t count, -+ loff_t *ppos); -+int vfsub_flush(struct file *file, fl_owner_t id); -+int vfsub_iterate_dir(struct file *file, struct dir_context *ctx); -+ -+/* just for type-check */ -+static inline filldir_t au_diractor(int (*func)(struct dir_context *, -+ const char *, int, loff_t, u64, -+ unsigned)) -+{ -+ return (filldir_t)func; -+} -+ -+static inline loff_t vfsub_f_size_read(struct file *file) -+{ -+ return i_size_read(file_inode(file)); -+} -+ -+static inline unsigned int vfsub_file_flags(struct file *file) -+{ -+ unsigned int flags; -+ -+ spin_lock(&file->f_lock); -+ flags = file->f_flags; -+ spin_unlock(&file->f_lock); -+ -+ return flags; -+} -+ -+#if 0 /* reserved */ -+static inline void vfsub_file_accessed(struct file *h_file) -+{ -+ file_accessed(h_file); -+ vfsub_update_h_iattr(&h_file->f_path, /*did*/NULL); /*ignore*/ -+} -+#endif -+ -+static inline void vfsub_touch_atime(struct vfsmount *h_mnt, -+ struct dentry *h_dentry) -+{ -+ struct path h_path = { -+ .dentry = h_dentry, -+ .mnt = h_mnt -+ }; -+ touch_atime(&h_path); -+ vfsub_update_h_iattr(&h_path, /*did*/NULL); /*ignore*/ -+} -+ -+static inline int vfsub_update_time(struct inode *h_inode, struct timespec *ts, -+ int flags) -+{ -+ return update_time(h_inode, ts, flags); -+ /* no vfsub_update_h_iattr() since we don't have struct path */ -+} -+ -+#ifdef CONFIG_FS_POSIX_ACL -+static inline int vfsub_acl_chmod(struct inode *h_inode, umode_t h_mode) -+{ -+ int err; -+ -+ err = posix_acl_chmod(h_inode, h_mode); -+ if (err == -EOPNOTSUPP) -+ err = 0; -+ return err; -+} -+#else -+AuStubInt0(vfsub_acl_chmod, struct inode *h_inode, umode_t h_mode); -+#endif -+ -+long vfsub_splice_to(struct file *in, loff_t *ppos, -+ struct pipe_inode_info *pipe, size_t len, -+ unsigned int flags); -+long vfsub_splice_from(struct pipe_inode_info *pipe, struct file *out, -+ loff_t *ppos, size_t len, unsigned int flags); -+ -+static inline long vfsub_truncate(struct path *path, loff_t length) -+{ -+ long err; -+ -+ lockdep_off(); -+ err = vfs_truncate(path, length); -+ lockdep_on(); -+ return err; -+} -+ -+int vfsub_trunc(struct path *h_path, loff_t length, unsigned int attr, -+ struct file *h_file); -+int vfsub_fsync(struct file *file, struct path *path, int datasync); -+ -+/* ---------------------------------------------------------------------- */ -+ -+static inline loff_t vfsub_llseek(struct file *file, loff_t offset, int origin) -+{ -+ loff_t err; -+ -+ lockdep_off(); -+ err = vfs_llseek(file, offset, origin); -+ lockdep_on(); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+int vfsub_sio_mkdir(struct inode *dir, struct path *path, int mode); -+int vfsub_sio_rmdir(struct inode *dir, struct path *path); -+int vfsub_sio_notify_change(struct path *path, struct iattr *ia, -+ struct inode **delegated_inode); -+int vfsub_notify_change(struct path *path, struct iattr *ia, -+ struct inode **delegated_inode); -+int vfsub_unlink(struct inode *dir, struct path *path, -+ struct inode **delegated_inode, int force); -+ -+/* ---------------------------------------------------------------------- */ -+ -+static inline int vfsub_setxattr(struct dentry *dentry, const char *name, -+ const void *value, size_t size, int flags) -+{ -+ int err; -+ -+ lockdep_off(); -+ err = vfs_setxattr(dentry, name, value, size, flags); -+ lockdep_on(); -+ -+ return err; -+} -+ -+static inline int vfsub_removexattr(struct dentry *dentry, const char *name) -+{ -+ int err; -+ -+ lockdep_off(); -+ err = vfs_removexattr(dentry, name); -+ lockdep_on(); -+ -+ return err; -+} -+ -+#endif /* __KERNEL__ */ -+#endif /* __AUFS_VFSUB_H__ */ -diff --git a/fs/aufs/wbr_policy.c b/fs/aufs/wbr_policy.c -new file mode 100644 -index 0000000..64cd9fe ---- /dev/null -+++ b/fs/aufs/wbr_policy.c -@@ -0,0 +1,765 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * policies for selecting one among multiple writable branches -+ */ -+ -+#include -+#include "aufs.h" -+ -+/* subset of cpup_attr() */ -+static noinline_for_stack -+int au_cpdown_attr(struct path *h_path, struct dentry *h_src) -+{ -+ int err, sbits; -+ struct iattr ia; -+ struct inode *h_isrc; -+ -+ h_isrc = h_src->d_inode; -+ ia.ia_valid = ATTR_FORCE | ATTR_MODE | ATTR_UID | ATTR_GID; -+ ia.ia_mode = h_isrc->i_mode; -+ ia.ia_uid = h_isrc->i_uid; -+ ia.ia_gid = h_isrc->i_gid; -+ sbits = !!(ia.ia_mode & (S_ISUID | S_ISGID)); -+ au_cpup_attr_flags(h_path->dentry->d_inode, h_isrc->i_flags); -+ /* no delegation since it is just created */ -+ err = vfsub_sio_notify_change(h_path, &ia, /*delegated*/NULL); -+ -+ /* is this nfs only? */ -+ if (!err && sbits && au_test_nfs(h_path->dentry->d_sb)) { -+ ia.ia_valid = ATTR_FORCE | ATTR_MODE; -+ ia.ia_mode = h_isrc->i_mode; -+ err = vfsub_sio_notify_change(h_path, &ia, /*delegated*/NULL); -+ } -+ -+ return err; -+} -+ -+#define AuCpdown_PARENT_OPQ 1 -+#define AuCpdown_WHED (1 << 1) -+#define AuCpdown_MADE_DIR (1 << 2) -+#define AuCpdown_DIROPQ (1 << 3) -+#define au_ftest_cpdown(flags, name) ((flags) & AuCpdown_##name) -+#define au_fset_cpdown(flags, name) \ -+ do { (flags) |= AuCpdown_##name; } while (0) -+#define au_fclr_cpdown(flags, name) \ -+ do { (flags) &= ~AuCpdown_##name; } while (0) -+ -+static int au_cpdown_dir_opq(struct dentry *dentry, aufs_bindex_t bdst, -+ unsigned int *flags) -+{ -+ int err; -+ struct dentry *opq_dentry; -+ -+ opq_dentry = au_diropq_create(dentry, bdst); -+ err = PTR_ERR(opq_dentry); -+ if (IS_ERR(opq_dentry)) -+ goto out; -+ dput(opq_dentry); -+ au_fset_cpdown(*flags, DIROPQ); -+ -+out: -+ return err; -+} -+ -+static int au_cpdown_dir_wh(struct dentry *dentry, struct dentry *h_parent, -+ struct inode *dir, aufs_bindex_t bdst) -+{ -+ int err; -+ struct path h_path; -+ struct au_branch *br; -+ -+ br = au_sbr(dentry->d_sb, bdst); -+ h_path.dentry = au_wh_lkup(h_parent, &dentry->d_name, br); -+ err = PTR_ERR(h_path.dentry); -+ if (IS_ERR(h_path.dentry)) -+ goto out; -+ -+ err = 0; -+ if (h_path.dentry->d_inode) { -+ h_path.mnt = au_br_mnt(br); -+ err = au_wh_unlink_dentry(au_h_iptr(dir, bdst), &h_path, -+ dentry); -+ } -+ dput(h_path.dentry); -+ -+out: -+ return err; -+} -+ -+static int au_cpdown_dir(struct dentry *dentry, aufs_bindex_t bdst, -+ struct au_pin *pin, -+ struct dentry *h_parent, void *arg) -+{ -+ int err, rerr; -+ aufs_bindex_t bopq, bstart; -+ struct path h_path; -+ struct dentry *parent; -+ struct inode *h_dir, *h_inode, *inode, *dir; -+ unsigned int *flags = arg; -+ -+ bstart = au_dbstart(dentry); -+ /* dentry is di-locked */ -+ parent = dget_parent(dentry); -+ dir = parent->d_inode; -+ h_dir = h_parent->d_inode; -+ AuDebugOn(h_dir != au_h_iptr(dir, bdst)); -+ IMustLock(h_dir); -+ -+ err = au_lkup_neg(dentry, bdst, /*wh*/0); -+ if (unlikely(err < 0)) -+ goto out; -+ h_path.dentry = au_h_dptr(dentry, bdst); -+ h_path.mnt = au_sbr_mnt(dentry->d_sb, bdst); -+ err = vfsub_sio_mkdir(au_h_iptr(dir, bdst), &h_path, -+ S_IRWXU | S_IRUGO | S_IXUGO); -+ if (unlikely(err)) -+ goto out_put; -+ au_fset_cpdown(*flags, MADE_DIR); -+ -+ bopq = au_dbdiropq(dentry); -+ au_fclr_cpdown(*flags, WHED); -+ au_fclr_cpdown(*flags, DIROPQ); -+ if (au_dbwh(dentry) == bdst) -+ au_fset_cpdown(*flags, WHED); -+ if (!au_ftest_cpdown(*flags, PARENT_OPQ) && bopq <= bdst) -+ au_fset_cpdown(*flags, PARENT_OPQ); -+ h_inode = h_path.dentry->d_inode; -+ mutex_lock_nested(&h_inode->i_mutex, AuLsc_I_CHILD); -+ if (au_ftest_cpdown(*flags, WHED)) { -+ err = au_cpdown_dir_opq(dentry, bdst, flags); -+ if (unlikely(err)) { -+ mutex_unlock(&h_inode->i_mutex); -+ goto out_dir; -+ } -+ } -+ -+ err = au_cpdown_attr(&h_path, au_h_dptr(dentry, bstart)); -+ mutex_unlock(&h_inode->i_mutex); -+ if (unlikely(err)) -+ goto out_opq; -+ -+ if (au_ftest_cpdown(*flags, WHED)) { -+ err = au_cpdown_dir_wh(dentry, h_parent, dir, bdst); -+ if (unlikely(err)) -+ goto out_opq; -+ } -+ -+ inode = dentry->d_inode; -+ if (au_ibend(inode) < bdst) -+ au_set_ibend(inode, bdst); -+ au_set_h_iptr(inode, bdst, au_igrab(h_inode), -+ au_hi_flags(inode, /*isdir*/1)); -+ au_fhsm_wrote(dentry->d_sb, bdst, /*force*/0); -+ goto out; /* success */ -+ -+ /* revert */ -+out_opq: -+ if (au_ftest_cpdown(*flags, DIROPQ)) { -+ mutex_lock_nested(&h_inode->i_mutex, AuLsc_I_CHILD); -+ rerr = au_diropq_remove(dentry, bdst); -+ mutex_unlock(&h_inode->i_mutex); -+ if (unlikely(rerr)) { -+ AuIOErr("failed removing diropq for %pd b%d (%d)\n", -+ dentry, bdst, rerr); -+ err = -EIO; -+ goto out; -+ } -+ } -+out_dir: -+ if (au_ftest_cpdown(*flags, MADE_DIR)) { -+ rerr = vfsub_sio_rmdir(au_h_iptr(dir, bdst), &h_path); -+ if (unlikely(rerr)) { -+ AuIOErr("failed removing %pd b%d (%d)\n", -+ dentry, bdst, rerr); -+ err = -EIO; -+ } -+ } -+out_put: -+ au_set_h_dptr(dentry, bdst, NULL); -+ if (au_dbend(dentry) == bdst) -+ au_update_dbend(dentry); -+out: -+ dput(parent); -+ return err; -+} -+ -+int au_cpdown_dirs(struct dentry *dentry, aufs_bindex_t bdst) -+{ -+ int err; -+ unsigned int flags; -+ -+ flags = 0; -+ err = au_cp_dirs(dentry, bdst, au_cpdown_dir, &flags); -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* policies for create */ -+ -+int au_wbr_nonopq(struct dentry *dentry, aufs_bindex_t bindex) -+{ -+ int err, i, j, ndentry; -+ aufs_bindex_t bopq; -+ struct au_dcsub_pages dpages; -+ struct au_dpage *dpage; -+ struct dentry **dentries, *parent, *d; -+ -+ err = au_dpages_init(&dpages, GFP_NOFS); -+ if (unlikely(err)) -+ goto out; -+ parent = dget_parent(dentry); -+ err = au_dcsub_pages_rev_aufs(&dpages, parent, /*do_include*/0); -+ if (unlikely(err)) -+ goto out_free; -+ -+ err = bindex; -+ for (i = 0; i < dpages.ndpage; i++) { -+ dpage = dpages.dpages + i; -+ dentries = dpage->dentries; -+ ndentry = dpage->ndentry; -+ for (j = 0; j < ndentry; j++) { -+ d = dentries[j]; -+ di_read_lock_parent2(d, !AuLock_IR); -+ bopq = au_dbdiropq(d); -+ di_read_unlock(d, !AuLock_IR); -+ if (bopq >= 0 && bopq < err) -+ err = bopq; -+ } -+ } -+ -+out_free: -+ dput(parent); -+ au_dpages_free(&dpages); -+out: -+ return err; -+} -+ -+static int au_wbr_bu(struct super_block *sb, aufs_bindex_t bindex) -+{ -+ for (; bindex >= 0; bindex--) -+ if (!au_br_rdonly(au_sbr(sb, bindex))) -+ return bindex; -+ return -EROFS; -+} -+ -+/* top down parent */ -+static int au_wbr_create_tdp(struct dentry *dentry, -+ unsigned int flags __maybe_unused) -+{ -+ int err; -+ aufs_bindex_t bstart, bindex; -+ struct super_block *sb; -+ struct dentry *parent, *h_parent; -+ -+ sb = dentry->d_sb; -+ bstart = au_dbstart(dentry); -+ err = bstart; -+ if (!au_br_rdonly(au_sbr(sb, bstart))) -+ goto out; -+ -+ err = -EROFS; -+ parent = dget_parent(dentry); -+ for (bindex = au_dbstart(parent); bindex < bstart; bindex++) { -+ h_parent = au_h_dptr(parent, bindex); -+ if (!h_parent || !h_parent->d_inode) -+ continue; -+ -+ if (!au_br_rdonly(au_sbr(sb, bindex))) { -+ err = bindex; -+ break; -+ } -+ } -+ dput(parent); -+ -+ /* bottom up here */ -+ if (unlikely(err < 0)) { -+ err = au_wbr_bu(sb, bstart - 1); -+ if (err >= 0) -+ err = au_wbr_nonopq(dentry, err); -+ } -+ -+out: -+ AuDbg("b%d\n", err); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* an exception for the policy other than tdp */ -+static int au_wbr_create_exp(struct dentry *dentry) -+{ -+ int err; -+ aufs_bindex_t bwh, bdiropq; -+ struct dentry *parent; -+ -+ err = -1; -+ bwh = au_dbwh(dentry); -+ parent = dget_parent(dentry); -+ bdiropq = au_dbdiropq(parent); -+ if (bwh >= 0) { -+ if (bdiropq >= 0) -+ err = min(bdiropq, bwh); -+ else -+ err = bwh; -+ AuDbg("%d\n", err); -+ } else if (bdiropq >= 0) { -+ err = bdiropq; -+ AuDbg("%d\n", err); -+ } -+ dput(parent); -+ -+ if (err >= 0) -+ err = au_wbr_nonopq(dentry, err); -+ -+ if (err >= 0 && au_br_rdonly(au_sbr(dentry->d_sb, err))) -+ err = -1; -+ -+ AuDbg("%d\n", err); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* round robin */ -+static int au_wbr_create_init_rr(struct super_block *sb) -+{ -+ int err; -+ -+ err = au_wbr_bu(sb, au_sbend(sb)); -+ atomic_set(&au_sbi(sb)->si_wbr_rr_next, -err); /* less important */ -+ /* smp_mb(); */ -+ -+ AuDbg("b%d\n", err); -+ return err; -+} -+ -+static int au_wbr_create_rr(struct dentry *dentry, unsigned int flags) -+{ -+ int err, nbr; -+ unsigned int u; -+ aufs_bindex_t bindex, bend; -+ struct super_block *sb; -+ atomic_t *next; -+ -+ err = au_wbr_create_exp(dentry); -+ if (err >= 0) -+ goto out; -+ -+ sb = dentry->d_sb; -+ next = &au_sbi(sb)->si_wbr_rr_next; -+ bend = au_sbend(sb); -+ nbr = bend + 1; -+ for (bindex = 0; bindex <= bend; bindex++) { -+ if (!au_ftest_wbr(flags, DIR)) { -+ err = atomic_dec_return(next) + 1; -+ /* modulo for 0 is meaningless */ -+ if (unlikely(!err)) -+ err = atomic_dec_return(next) + 1; -+ } else -+ err = atomic_read(next); -+ AuDbg("%d\n", err); -+ u = err; -+ err = u % nbr; -+ AuDbg("%d\n", err); -+ if (!au_br_rdonly(au_sbr(sb, err))) -+ break; -+ err = -EROFS; -+ } -+ -+ if (err >= 0) -+ err = au_wbr_nonopq(dentry, err); -+ -+out: -+ AuDbg("%d\n", err); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* most free space */ -+static void au_mfs(struct dentry *dentry, struct dentry *parent) -+{ -+ struct super_block *sb; -+ struct au_branch *br; -+ struct au_wbr_mfs *mfs; -+ struct dentry *h_parent; -+ aufs_bindex_t bindex, bend; -+ int err; -+ unsigned long long b, bavail; -+ struct path h_path; -+ /* reduce the stack usage */ -+ struct kstatfs *st; -+ -+ st = kmalloc(sizeof(*st), GFP_NOFS); -+ if (unlikely(!st)) { -+ AuWarn1("failed updating mfs(%d), ignored\n", -ENOMEM); -+ return; -+ } -+ -+ bavail = 0; -+ sb = dentry->d_sb; -+ mfs = &au_sbi(sb)->si_wbr_mfs; -+ MtxMustLock(&mfs->mfs_lock); -+ mfs->mfs_bindex = -EROFS; -+ mfs->mfsrr_bytes = 0; -+ if (!parent) { -+ bindex = 0; -+ bend = au_sbend(sb); -+ } else { -+ bindex = au_dbstart(parent); -+ bend = au_dbtaildir(parent); -+ } -+ -+ for (; bindex <= bend; bindex++) { -+ if (parent) { -+ h_parent = au_h_dptr(parent, bindex); -+ if (!h_parent || !h_parent->d_inode) -+ continue; -+ } -+ br = au_sbr(sb, bindex); -+ if (au_br_rdonly(br)) -+ continue; -+ -+ /* sb->s_root for NFS is unreliable */ -+ h_path.mnt = au_br_mnt(br); -+ h_path.dentry = h_path.mnt->mnt_root; -+ err = vfs_statfs(&h_path, st); -+ if (unlikely(err)) { -+ AuWarn1("failed statfs, b%d, %d\n", bindex, err); -+ continue; -+ } -+ -+ /* when the available size is equal, select the lower one */ -+ BUILD_BUG_ON(sizeof(b) < sizeof(st->f_bavail) -+ || sizeof(b) < sizeof(st->f_bsize)); -+ b = st->f_bavail * st->f_bsize; -+ br->br_wbr->wbr_bytes = b; -+ if (b >= bavail) { -+ bavail = b; -+ mfs->mfs_bindex = bindex; -+ mfs->mfs_jiffy = jiffies; -+ } -+ } -+ -+ mfs->mfsrr_bytes = bavail; -+ AuDbg("b%d\n", mfs->mfs_bindex); -+ kfree(st); -+} -+ -+static int au_wbr_create_mfs(struct dentry *dentry, unsigned int flags) -+{ -+ int err; -+ struct dentry *parent; -+ struct super_block *sb; -+ struct au_wbr_mfs *mfs; -+ -+ err = au_wbr_create_exp(dentry); -+ if (err >= 0) -+ goto out; -+ -+ sb = dentry->d_sb; -+ parent = NULL; -+ if (au_ftest_wbr(flags, PARENT)) -+ parent = dget_parent(dentry); -+ mfs = &au_sbi(sb)->si_wbr_mfs; -+ mutex_lock(&mfs->mfs_lock); -+ if (time_after(jiffies, mfs->mfs_jiffy + mfs->mfs_expire) -+ || mfs->mfs_bindex < 0 -+ || au_br_rdonly(au_sbr(sb, mfs->mfs_bindex))) -+ au_mfs(dentry, parent); -+ mutex_unlock(&mfs->mfs_lock); -+ err = mfs->mfs_bindex; -+ dput(parent); -+ -+ if (err >= 0) -+ err = au_wbr_nonopq(dentry, err); -+ -+out: -+ AuDbg("b%d\n", err); -+ return err; -+} -+ -+static int au_wbr_create_init_mfs(struct super_block *sb) -+{ -+ struct au_wbr_mfs *mfs; -+ -+ mfs = &au_sbi(sb)->si_wbr_mfs; -+ mutex_init(&mfs->mfs_lock); -+ mfs->mfs_jiffy = 0; -+ mfs->mfs_bindex = -EROFS; -+ -+ return 0; -+} -+ -+static int au_wbr_create_fin_mfs(struct super_block *sb __maybe_unused) -+{ -+ mutex_destroy(&au_sbi(sb)->si_wbr_mfs.mfs_lock); -+ return 0; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* most free space and then round robin */ -+static int au_wbr_create_mfsrr(struct dentry *dentry, unsigned int flags) -+{ -+ int err; -+ struct au_wbr_mfs *mfs; -+ -+ err = au_wbr_create_mfs(dentry, flags); -+ if (err >= 0) { -+ mfs = &au_sbi(dentry->d_sb)->si_wbr_mfs; -+ mutex_lock(&mfs->mfs_lock); -+ if (mfs->mfsrr_bytes < mfs->mfsrr_watermark) -+ err = au_wbr_create_rr(dentry, flags); -+ mutex_unlock(&mfs->mfs_lock); -+ } -+ -+ AuDbg("b%d\n", err); -+ return err; -+} -+ -+static int au_wbr_create_init_mfsrr(struct super_block *sb) -+{ -+ int err; -+ -+ au_wbr_create_init_mfs(sb); /* ignore */ -+ err = au_wbr_create_init_rr(sb); -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* top down parent and most free space */ -+static int au_wbr_create_pmfs(struct dentry *dentry, unsigned int flags) -+{ -+ int err, e2; -+ unsigned long long b; -+ aufs_bindex_t bindex, bstart, bend; -+ struct super_block *sb; -+ struct dentry *parent, *h_parent; -+ struct au_branch *br; -+ -+ err = au_wbr_create_tdp(dentry, flags); -+ if (unlikely(err < 0)) -+ goto out; -+ parent = dget_parent(dentry); -+ bstart = au_dbstart(parent); -+ bend = au_dbtaildir(parent); -+ if (bstart == bend) -+ goto out_parent; /* success */ -+ -+ e2 = au_wbr_create_mfs(dentry, flags); -+ if (e2 < 0) -+ goto out_parent; /* success */ -+ -+ /* when the available size is equal, select upper one */ -+ sb = dentry->d_sb; -+ br = au_sbr(sb, err); -+ b = br->br_wbr->wbr_bytes; -+ AuDbg("b%d, %llu\n", err, b); -+ -+ for (bindex = bstart; bindex <= bend; bindex++) { -+ h_parent = au_h_dptr(parent, bindex); -+ if (!h_parent || !h_parent->d_inode) -+ continue; -+ -+ br = au_sbr(sb, bindex); -+ if (!au_br_rdonly(br) && br->br_wbr->wbr_bytes > b) { -+ b = br->br_wbr->wbr_bytes; -+ err = bindex; -+ AuDbg("b%d, %llu\n", err, b); -+ } -+ } -+ -+ if (err >= 0) -+ err = au_wbr_nonopq(dentry, err); -+ -+out_parent: -+ dput(parent); -+out: -+ AuDbg("b%d\n", err); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * - top down parent -+ * - most free space with parent -+ * - most free space round-robin regardless parent -+ */ -+static int au_wbr_create_pmfsrr(struct dentry *dentry, unsigned int flags) -+{ -+ int err; -+ unsigned long long watermark; -+ struct super_block *sb; -+ struct au_branch *br; -+ struct au_wbr_mfs *mfs; -+ -+ err = au_wbr_create_pmfs(dentry, flags | AuWbr_PARENT); -+ if (unlikely(err < 0)) -+ goto out; -+ -+ sb = dentry->d_sb; -+ br = au_sbr(sb, err); -+ mfs = &au_sbi(sb)->si_wbr_mfs; -+ mutex_lock(&mfs->mfs_lock); -+ watermark = mfs->mfsrr_watermark; -+ mutex_unlock(&mfs->mfs_lock); -+ if (br->br_wbr->wbr_bytes < watermark) -+ /* regardless the parent dir */ -+ err = au_wbr_create_mfsrr(dentry, flags); -+ -+out: -+ AuDbg("b%d\n", err); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* policies for copyup */ -+ -+/* top down parent */ -+static int au_wbr_copyup_tdp(struct dentry *dentry) -+{ -+ return au_wbr_create_tdp(dentry, /*flags, anything is ok*/0); -+} -+ -+/* bottom up parent */ -+static int au_wbr_copyup_bup(struct dentry *dentry) -+{ -+ int err; -+ aufs_bindex_t bindex, bstart; -+ struct dentry *parent, *h_parent; -+ struct super_block *sb; -+ -+ err = -EROFS; -+ sb = dentry->d_sb; -+ parent = dget_parent(dentry); -+ bstart = au_dbstart(parent); -+ for (bindex = au_dbstart(dentry); bindex >= bstart; bindex--) { -+ h_parent = au_h_dptr(parent, bindex); -+ if (!h_parent || !h_parent->d_inode) -+ continue; -+ -+ if (!au_br_rdonly(au_sbr(sb, bindex))) { -+ err = bindex; -+ break; -+ } -+ } -+ dput(parent); -+ -+ /* bottom up here */ -+ if (unlikely(err < 0)) -+ err = au_wbr_bu(sb, bstart - 1); -+ -+ AuDbg("b%d\n", err); -+ return err; -+} -+ -+/* bottom up */ -+int au_wbr_do_copyup_bu(struct dentry *dentry, aufs_bindex_t bstart) -+{ -+ int err; -+ -+ err = au_wbr_bu(dentry->d_sb, bstart); -+ AuDbg("b%d\n", err); -+ if (err > bstart) -+ err = au_wbr_nonopq(dentry, err); -+ -+ AuDbg("b%d\n", err); -+ return err; -+} -+ -+static int au_wbr_copyup_bu(struct dentry *dentry) -+{ -+ int err; -+ aufs_bindex_t bstart; -+ -+ bstart = au_dbstart(dentry); -+ err = au_wbr_do_copyup_bu(dentry, bstart); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+struct au_wbr_copyup_operations au_wbr_copyup_ops[] = { -+ [AuWbrCopyup_TDP] = { -+ .copyup = au_wbr_copyup_tdp -+ }, -+ [AuWbrCopyup_BUP] = { -+ .copyup = au_wbr_copyup_bup -+ }, -+ [AuWbrCopyup_BU] = { -+ .copyup = au_wbr_copyup_bu -+ } -+}; -+ -+struct au_wbr_create_operations au_wbr_create_ops[] = { -+ [AuWbrCreate_TDP] = { -+ .create = au_wbr_create_tdp -+ }, -+ [AuWbrCreate_RR] = { -+ .create = au_wbr_create_rr, -+ .init = au_wbr_create_init_rr -+ }, -+ [AuWbrCreate_MFS] = { -+ .create = au_wbr_create_mfs, -+ .init = au_wbr_create_init_mfs, -+ .fin = au_wbr_create_fin_mfs -+ }, -+ [AuWbrCreate_MFSV] = { -+ .create = au_wbr_create_mfs, -+ .init = au_wbr_create_init_mfs, -+ .fin = au_wbr_create_fin_mfs -+ }, -+ [AuWbrCreate_MFSRR] = { -+ .create = au_wbr_create_mfsrr, -+ .init = au_wbr_create_init_mfsrr, -+ .fin = au_wbr_create_fin_mfs -+ }, -+ [AuWbrCreate_MFSRRV] = { -+ .create = au_wbr_create_mfsrr, -+ .init = au_wbr_create_init_mfsrr, -+ .fin = au_wbr_create_fin_mfs -+ }, -+ [AuWbrCreate_PMFS] = { -+ .create = au_wbr_create_pmfs, -+ .init = au_wbr_create_init_mfs, -+ .fin = au_wbr_create_fin_mfs -+ }, -+ [AuWbrCreate_PMFSV] = { -+ .create = au_wbr_create_pmfs, -+ .init = au_wbr_create_init_mfs, -+ .fin = au_wbr_create_fin_mfs -+ }, -+ [AuWbrCreate_PMFSRR] = { -+ .create = au_wbr_create_pmfsrr, -+ .init = au_wbr_create_init_mfsrr, -+ .fin = au_wbr_create_fin_mfs -+ }, -+ [AuWbrCreate_PMFSRRV] = { -+ .create = au_wbr_create_pmfsrr, -+ .init = au_wbr_create_init_mfsrr, -+ .fin = au_wbr_create_fin_mfs -+ } -+}; -diff --git a/fs/aufs/whout.c b/fs/aufs/whout.c -new file mode 100644 -index 0000000..fb667ee ---- /dev/null -+++ b/fs/aufs/whout.c -@@ -0,0 +1,1061 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * whiteout for logical deletion and opaque directory -+ */ -+ -+#include "aufs.h" -+ -+#define WH_MASK S_IRUGO -+ -+/* -+ * If a directory contains this file, then it is opaque. We start with the -+ * .wh. flag so that it is blocked by lookup. -+ */ -+static struct qstr diropq_name = QSTR_INIT(AUFS_WH_DIROPQ, -+ sizeof(AUFS_WH_DIROPQ) - 1); -+ -+/* -+ * generate whiteout name, which is NOT terminated by NULL. -+ * @name: original d_name.name -+ * @len: original d_name.len -+ * @wh: whiteout qstr -+ * returns zero when succeeds, otherwise error. -+ * succeeded value as wh->name should be freed by kfree(). -+ */ -+int au_wh_name_alloc(struct qstr *wh, const struct qstr *name) -+{ -+ char *p; -+ -+ if (unlikely(name->len > PATH_MAX - AUFS_WH_PFX_LEN)) -+ return -ENAMETOOLONG; -+ -+ wh->len = name->len + AUFS_WH_PFX_LEN; -+ p = kmalloc(wh->len, GFP_NOFS); -+ wh->name = p; -+ if (p) { -+ memcpy(p, AUFS_WH_PFX, AUFS_WH_PFX_LEN); -+ memcpy(p + AUFS_WH_PFX_LEN, name->name, name->len); -+ /* smp_mb(); */ -+ return 0; -+ } -+ return -ENOMEM; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * test if the @wh_name exists under @h_parent. -+ * @try_sio specifies the necessary of super-io. -+ */ -+int au_wh_test(struct dentry *h_parent, struct qstr *wh_name, int try_sio) -+{ -+ int err; -+ struct dentry *wh_dentry; -+ -+ if (!try_sio) -+ wh_dentry = vfsub_lkup_one(wh_name, h_parent); -+ else -+ wh_dentry = au_sio_lkup_one(wh_name, h_parent); -+ err = PTR_ERR(wh_dentry); -+ if (IS_ERR(wh_dentry)) { -+ if (err == -ENAMETOOLONG) -+ err = 0; -+ goto out; -+ } -+ -+ err = 0; -+ if (!wh_dentry->d_inode) -+ goto out_wh; /* success */ -+ -+ err = 1; -+ if (S_ISREG(wh_dentry->d_inode->i_mode)) -+ goto out_wh; /* success */ -+ -+ err = -EIO; -+ AuIOErr("%pd Invalid whiteout entry type 0%o.\n", -+ wh_dentry, wh_dentry->d_inode->i_mode); -+ -+out_wh: -+ dput(wh_dentry); -+out: -+ return err; -+} -+ -+/* -+ * test if the @h_dentry sets opaque or not. -+ */ -+int au_diropq_test(struct dentry *h_dentry) -+{ -+ int err; -+ struct inode *h_dir; -+ -+ h_dir = h_dentry->d_inode; -+ err = au_wh_test(h_dentry, &diropq_name, -+ au_test_h_perm_sio(h_dir, MAY_EXEC)); -+ return err; -+} -+ -+/* -+ * returns a negative dentry whose name is unique and temporary. -+ */ -+struct dentry *au_whtmp_lkup(struct dentry *h_parent, struct au_branch *br, -+ struct qstr *prefix) -+{ -+ struct dentry *dentry; -+ int i; -+ char defname[NAME_MAX - AUFS_MAX_NAMELEN + DNAME_INLINE_LEN + 1], -+ *name, *p; -+ /* strict atomic_t is unnecessary here */ -+ static unsigned short cnt; -+ struct qstr qs; -+ -+ BUILD_BUG_ON(sizeof(cnt) * 2 > AUFS_WH_TMP_LEN); -+ -+ name = defname; -+ qs.len = sizeof(defname) - DNAME_INLINE_LEN + prefix->len - 1; -+ if (unlikely(prefix->len > DNAME_INLINE_LEN)) { -+ dentry = ERR_PTR(-ENAMETOOLONG); -+ if (unlikely(qs.len > NAME_MAX)) -+ goto out; -+ dentry = ERR_PTR(-ENOMEM); -+ name = kmalloc(qs.len + 1, GFP_NOFS); -+ if (unlikely(!name)) -+ goto out; -+ } -+ -+ /* doubly whiteout-ed */ -+ memcpy(name, AUFS_WH_PFX AUFS_WH_PFX, AUFS_WH_PFX_LEN * 2); -+ p = name + AUFS_WH_PFX_LEN * 2; -+ memcpy(p, prefix->name, prefix->len); -+ p += prefix->len; -+ *p++ = '.'; -+ AuDebugOn(name + qs.len + 1 - p <= AUFS_WH_TMP_LEN); -+ -+ qs.name = name; -+ for (i = 0; i < 3; i++) { -+ sprintf(p, "%.*x", AUFS_WH_TMP_LEN, cnt++); -+ dentry = au_sio_lkup_one(&qs, h_parent); -+ if (IS_ERR(dentry) || !dentry->d_inode) -+ goto out_name; -+ dput(dentry); -+ } -+ /* pr_warn("could not get random name\n"); */ -+ dentry = ERR_PTR(-EEXIST); -+ AuDbg("%.*s\n", AuLNPair(&qs)); -+ BUG(); -+ -+out_name: -+ if (name != defname) -+ kfree(name); -+out: -+ AuTraceErrPtr(dentry); -+ return dentry; -+} -+ -+/* -+ * rename the @h_dentry on @br to the whiteouted temporary name. -+ */ -+int au_whtmp_ren(struct dentry *h_dentry, struct au_branch *br) -+{ -+ int err; -+ struct path h_path = { -+ .mnt = au_br_mnt(br) -+ }; -+ struct inode *h_dir, *delegated; -+ struct dentry *h_parent; -+ -+ h_parent = h_dentry->d_parent; /* dir inode is locked */ -+ h_dir = h_parent->d_inode; -+ IMustLock(h_dir); -+ -+ h_path.dentry = au_whtmp_lkup(h_parent, br, &h_dentry->d_name); -+ err = PTR_ERR(h_path.dentry); -+ if (IS_ERR(h_path.dentry)) -+ goto out; -+ -+ /* under the same dir, no need to lock_rename() */ -+ delegated = NULL; -+ err = vfsub_rename(h_dir, h_dentry, h_dir, &h_path, &delegated); -+ AuTraceErr(err); -+ if (unlikely(err == -EWOULDBLOCK)) { -+ pr_warn("cannot retry for NFSv4 delegation" -+ " for an internal rename\n"); -+ iput(delegated); -+ } -+ dput(h_path.dentry); -+ -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+/* -+ * functions for removing a whiteout -+ */ -+ -+static int do_unlink_wh(struct inode *h_dir, struct path *h_path) -+{ -+ int err, force; -+ struct inode *delegated; -+ -+ /* -+ * forces superio when the dir has a sticky bit. -+ * this may be a violation of unix fs semantics. -+ */ -+ force = (h_dir->i_mode & S_ISVTX) -+ && !uid_eq(current_fsuid(), h_path->dentry->d_inode->i_uid); -+ delegated = NULL; -+ err = vfsub_unlink(h_dir, h_path, &delegated, force); -+ if (unlikely(err == -EWOULDBLOCK)) { -+ pr_warn("cannot retry for NFSv4 delegation" -+ " for an internal unlink\n"); -+ iput(delegated); -+ } -+ return err; -+} -+ -+int au_wh_unlink_dentry(struct inode *h_dir, struct path *h_path, -+ struct dentry *dentry) -+{ -+ int err; -+ -+ err = do_unlink_wh(h_dir, h_path); -+ if (!err && dentry) -+ au_set_dbwh(dentry, -1); -+ -+ return err; -+} -+ -+static int unlink_wh_name(struct dentry *h_parent, struct qstr *wh, -+ struct au_branch *br) -+{ -+ int err; -+ struct path h_path = { -+ .mnt = au_br_mnt(br) -+ }; -+ -+ err = 0; -+ h_path.dentry = vfsub_lkup_one(wh, h_parent); -+ if (IS_ERR(h_path.dentry)) -+ err = PTR_ERR(h_path.dentry); -+ else { -+ if (h_path.dentry->d_inode -+ && S_ISREG(h_path.dentry->d_inode->i_mode)) -+ err = do_unlink_wh(h_parent->d_inode, &h_path); -+ dput(h_path.dentry); -+ } -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+/* -+ * initialize/clean whiteout for a branch -+ */ -+ -+static void au_wh_clean(struct inode *h_dir, struct path *whpath, -+ const int isdir) -+{ -+ int err; -+ struct inode *delegated; -+ -+ if (!whpath->dentry->d_inode) -+ return; -+ -+ if (isdir) -+ err = vfsub_rmdir(h_dir, whpath); -+ else { -+ delegated = NULL; -+ err = vfsub_unlink(h_dir, whpath, &delegated, /*force*/0); -+ if (unlikely(err == -EWOULDBLOCK)) { -+ pr_warn("cannot retry for NFSv4 delegation" -+ " for an internal unlink\n"); -+ iput(delegated); -+ } -+ } -+ if (unlikely(err)) -+ pr_warn("failed removing %pd (%d), ignored.\n", -+ whpath->dentry, err); -+} -+ -+static int test_linkable(struct dentry *h_root) -+{ -+ struct inode *h_dir = h_root->d_inode; -+ -+ if (h_dir->i_op->link) -+ return 0; -+ -+ pr_err("%pd (%s) doesn't support link(2), use noplink and rw+nolwh\n", -+ h_root, au_sbtype(h_root->d_sb)); -+ return -ENOSYS; -+} -+ -+/* todo: should this mkdir be done in /sbin/mount.aufs helper? */ -+static int au_whdir(struct inode *h_dir, struct path *path) -+{ -+ int err; -+ -+ err = -EEXIST; -+ if (!path->dentry->d_inode) { -+ int mode = S_IRWXU; -+ -+ if (au_test_nfs(path->dentry->d_sb)) -+ mode |= S_IXUGO; -+ err = vfsub_mkdir(h_dir, path, mode); -+ } else if (d_is_dir(path->dentry)) -+ err = 0; -+ else -+ pr_err("unknown %pd exists\n", path->dentry); -+ -+ return err; -+} -+ -+struct au_wh_base { -+ const struct qstr *name; -+ struct dentry *dentry; -+}; -+ -+static void au_wh_init_ro(struct inode *h_dir, struct au_wh_base base[], -+ struct path *h_path) -+{ -+ h_path->dentry = base[AuBrWh_BASE].dentry; -+ au_wh_clean(h_dir, h_path, /*isdir*/0); -+ h_path->dentry = base[AuBrWh_PLINK].dentry; -+ au_wh_clean(h_dir, h_path, /*isdir*/1); -+ h_path->dentry = base[AuBrWh_ORPH].dentry; -+ au_wh_clean(h_dir, h_path, /*isdir*/1); -+} -+ -+/* -+ * returns tri-state, -+ * minus: error, caller should print the message -+ * zero: succuess -+ * plus: error, caller should NOT print the message -+ */ -+static int au_wh_init_rw_nolink(struct dentry *h_root, struct au_wbr *wbr, -+ int do_plink, struct au_wh_base base[], -+ struct path *h_path) -+{ -+ int err; -+ struct inode *h_dir; -+ -+ h_dir = h_root->d_inode; -+ h_path->dentry = base[AuBrWh_BASE].dentry; -+ au_wh_clean(h_dir, h_path, /*isdir*/0); -+ h_path->dentry = base[AuBrWh_PLINK].dentry; -+ if (do_plink) { -+ err = test_linkable(h_root); -+ if (unlikely(err)) { -+ err = 1; -+ goto out; -+ } -+ -+ err = au_whdir(h_dir, h_path); -+ if (unlikely(err)) -+ goto out; -+ wbr->wbr_plink = dget(base[AuBrWh_PLINK].dentry); -+ } else -+ au_wh_clean(h_dir, h_path, /*isdir*/1); -+ h_path->dentry = base[AuBrWh_ORPH].dentry; -+ err = au_whdir(h_dir, h_path); -+ if (unlikely(err)) -+ goto out; -+ wbr->wbr_orph = dget(base[AuBrWh_ORPH].dentry); -+ -+out: -+ return err; -+} -+ -+/* -+ * for the moment, aufs supports the branch filesystem which does not support -+ * link(2). testing on FAT which does not support i_op->setattr() fully either, -+ * copyup failed. finally, such filesystem will not be used as the writable -+ * branch. -+ * -+ * returns tri-state, see above. -+ */ -+static int au_wh_init_rw(struct dentry *h_root, struct au_wbr *wbr, -+ int do_plink, struct au_wh_base base[], -+ struct path *h_path) -+{ -+ int err; -+ struct inode *h_dir; -+ -+ WbrWhMustWriteLock(wbr); -+ -+ err = test_linkable(h_root); -+ if (unlikely(err)) { -+ err = 1; -+ goto out; -+ } -+ -+ /* -+ * todo: should this create be done in /sbin/mount.aufs helper? -+ */ -+ err = -EEXIST; -+ h_dir = h_root->d_inode; -+ if (!base[AuBrWh_BASE].dentry->d_inode) { -+ h_path->dentry = base[AuBrWh_BASE].dentry; -+ err = vfsub_create(h_dir, h_path, WH_MASK, /*want_excl*/true); -+ } else if (S_ISREG(base[AuBrWh_BASE].dentry->d_inode->i_mode)) -+ err = 0; -+ else -+ pr_err("unknown %pd2 exists\n", base[AuBrWh_BASE].dentry); -+ if (unlikely(err)) -+ goto out; -+ -+ h_path->dentry = base[AuBrWh_PLINK].dentry; -+ if (do_plink) { -+ err = au_whdir(h_dir, h_path); -+ if (unlikely(err)) -+ goto out; -+ wbr->wbr_plink = dget(base[AuBrWh_PLINK].dentry); -+ } else -+ au_wh_clean(h_dir, h_path, /*isdir*/1); -+ wbr->wbr_whbase = dget(base[AuBrWh_BASE].dentry); -+ -+ h_path->dentry = base[AuBrWh_ORPH].dentry; -+ err = au_whdir(h_dir, h_path); -+ if (unlikely(err)) -+ goto out; -+ wbr->wbr_orph = dget(base[AuBrWh_ORPH].dentry); -+ -+out: -+ return err; -+} -+ -+/* -+ * initialize the whiteout base file/dir for @br. -+ */ -+int au_wh_init(struct au_branch *br, struct super_block *sb) -+{ -+ int err, i; -+ const unsigned char do_plink -+ = !!au_opt_test(au_mntflags(sb), PLINK); -+ struct inode *h_dir; -+ struct path path = br->br_path; -+ struct dentry *h_root = path.dentry; -+ struct au_wbr *wbr = br->br_wbr; -+ static const struct qstr base_name[] = { -+ [AuBrWh_BASE] = QSTR_INIT(AUFS_BASE_NAME, -+ sizeof(AUFS_BASE_NAME) - 1), -+ [AuBrWh_PLINK] = QSTR_INIT(AUFS_PLINKDIR_NAME, -+ sizeof(AUFS_PLINKDIR_NAME) - 1), -+ [AuBrWh_ORPH] = QSTR_INIT(AUFS_ORPHDIR_NAME, -+ sizeof(AUFS_ORPHDIR_NAME) - 1) -+ }; -+ struct au_wh_base base[] = { -+ [AuBrWh_BASE] = { -+ .name = base_name + AuBrWh_BASE, -+ .dentry = NULL -+ }, -+ [AuBrWh_PLINK] = { -+ .name = base_name + AuBrWh_PLINK, -+ .dentry = NULL -+ }, -+ [AuBrWh_ORPH] = { -+ .name = base_name + AuBrWh_ORPH, -+ .dentry = NULL -+ } -+ }; -+ -+ if (wbr) -+ WbrWhMustWriteLock(wbr); -+ -+ for (i = 0; i < AuBrWh_Last; i++) { -+ /* doubly whiteouted */ -+ struct dentry *d; -+ -+ d = au_wh_lkup(h_root, (void *)base[i].name, br); -+ err = PTR_ERR(d); -+ if (IS_ERR(d)) -+ goto out; -+ -+ base[i].dentry = d; -+ AuDebugOn(wbr -+ && wbr->wbr_wh[i] -+ && wbr->wbr_wh[i] != base[i].dentry); -+ } -+ -+ if (wbr) -+ for (i = 0; i < AuBrWh_Last; i++) { -+ dput(wbr->wbr_wh[i]); -+ wbr->wbr_wh[i] = NULL; -+ } -+ -+ err = 0; -+ if (!au_br_writable(br->br_perm)) { -+ h_dir = h_root->d_inode; -+ au_wh_init_ro(h_dir, base, &path); -+ } else if (!au_br_wh_linkable(br->br_perm)) { -+ err = au_wh_init_rw_nolink(h_root, wbr, do_plink, base, &path); -+ if (err > 0) -+ goto out; -+ else if (err) -+ goto out_err; -+ } else { -+ err = au_wh_init_rw(h_root, wbr, do_plink, base, &path); -+ if (err > 0) -+ goto out; -+ else if (err) -+ goto out_err; -+ } -+ goto out; /* success */ -+ -+out_err: -+ pr_err("an error(%d) on the writable branch %pd(%s)\n", -+ err, h_root, au_sbtype(h_root->d_sb)); -+out: -+ for (i = 0; i < AuBrWh_Last; i++) -+ dput(base[i].dentry); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+/* -+ * whiteouts are all hard-linked usually. -+ * when its link count reaches a ceiling, we create a new whiteout base -+ * asynchronously. -+ */ -+ -+struct reinit_br_wh { -+ struct super_block *sb; -+ struct au_branch *br; -+}; -+ -+static void reinit_br_wh(void *arg) -+{ -+ int err; -+ aufs_bindex_t bindex; -+ struct path h_path; -+ struct reinit_br_wh *a = arg; -+ struct au_wbr *wbr; -+ struct inode *dir, *delegated; -+ struct dentry *h_root; -+ struct au_hinode *hdir; -+ -+ err = 0; -+ wbr = a->br->br_wbr; -+ /* big aufs lock */ -+ si_noflush_write_lock(a->sb); -+ if (!au_br_writable(a->br->br_perm)) -+ goto out; -+ bindex = au_br_index(a->sb, a->br->br_id); -+ if (unlikely(bindex < 0)) -+ goto out; -+ -+ di_read_lock_parent(a->sb->s_root, AuLock_IR); -+ dir = a->sb->s_root->d_inode; -+ hdir = au_hi(dir, bindex); -+ h_root = au_h_dptr(a->sb->s_root, bindex); -+ AuDebugOn(h_root != au_br_dentry(a->br)); -+ -+ au_hn_imtx_lock_nested(hdir, AuLsc_I_PARENT); -+ wbr_wh_write_lock(wbr); -+ err = au_h_verify(wbr->wbr_whbase, au_opt_udba(a->sb), hdir->hi_inode, -+ h_root, a->br); -+ if (!err) { -+ h_path.dentry = wbr->wbr_whbase; -+ h_path.mnt = au_br_mnt(a->br); -+ delegated = NULL; -+ err = vfsub_unlink(hdir->hi_inode, &h_path, &delegated, -+ /*force*/0); -+ if (unlikely(err == -EWOULDBLOCK)) { -+ pr_warn("cannot retry for NFSv4 delegation" -+ " for an internal unlink\n"); -+ iput(delegated); -+ } -+ } else { -+ pr_warn("%pd is moved, ignored\n", wbr->wbr_whbase); -+ err = 0; -+ } -+ dput(wbr->wbr_whbase); -+ wbr->wbr_whbase = NULL; -+ if (!err) -+ err = au_wh_init(a->br, a->sb); -+ wbr_wh_write_unlock(wbr); -+ au_hn_imtx_unlock(hdir); -+ di_read_unlock(a->sb->s_root, AuLock_IR); -+ if (!err) -+ au_fhsm_wrote(a->sb, bindex, /*force*/0); -+ -+out: -+ if (wbr) -+ atomic_dec(&wbr->wbr_wh_running); -+ atomic_dec(&a->br->br_count); -+ si_write_unlock(a->sb); -+ au_nwt_done(&au_sbi(a->sb)->si_nowait); -+ kfree(arg); -+ if (unlikely(err)) -+ AuIOErr("err %d\n", err); -+} -+ -+static void kick_reinit_br_wh(struct super_block *sb, struct au_branch *br) -+{ -+ int do_dec, wkq_err; -+ struct reinit_br_wh *arg; -+ -+ do_dec = 1; -+ if (atomic_inc_return(&br->br_wbr->wbr_wh_running) != 1) -+ goto out; -+ -+ /* ignore ENOMEM */ -+ arg = kmalloc(sizeof(*arg), GFP_NOFS); -+ if (arg) { -+ /* -+ * dec(wh_running), kfree(arg) and dec(br_count) -+ * in reinit function -+ */ -+ arg->sb = sb; -+ arg->br = br; -+ atomic_inc(&br->br_count); -+ wkq_err = au_wkq_nowait(reinit_br_wh, arg, sb, /*flags*/0); -+ if (unlikely(wkq_err)) { -+ atomic_dec(&br->br_wbr->wbr_wh_running); -+ atomic_dec(&br->br_count); -+ kfree(arg); -+ } -+ do_dec = 0; -+ } -+ -+out: -+ if (do_dec) -+ atomic_dec(&br->br_wbr->wbr_wh_running); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * create the whiteout @wh. -+ */ -+static int link_or_create_wh(struct super_block *sb, aufs_bindex_t bindex, -+ struct dentry *wh) -+{ -+ int err; -+ struct path h_path = { -+ .dentry = wh -+ }; -+ struct au_branch *br; -+ struct au_wbr *wbr; -+ struct dentry *h_parent; -+ struct inode *h_dir, *delegated; -+ -+ h_parent = wh->d_parent; /* dir inode is locked */ -+ h_dir = h_parent->d_inode; -+ IMustLock(h_dir); -+ -+ br = au_sbr(sb, bindex); -+ h_path.mnt = au_br_mnt(br); -+ wbr = br->br_wbr; -+ wbr_wh_read_lock(wbr); -+ if (wbr->wbr_whbase) { -+ delegated = NULL; -+ err = vfsub_link(wbr->wbr_whbase, h_dir, &h_path, &delegated); -+ if (unlikely(err == -EWOULDBLOCK)) { -+ pr_warn("cannot retry for NFSv4 delegation" -+ " for an internal link\n"); -+ iput(delegated); -+ } -+ if (!err || err != -EMLINK) -+ goto out; -+ -+ /* link count full. re-initialize br_whbase. */ -+ kick_reinit_br_wh(sb, br); -+ } -+ -+ /* return this error in this context */ -+ err = vfsub_create(h_dir, &h_path, WH_MASK, /*want_excl*/true); -+ if (!err) -+ au_fhsm_wrote(sb, bindex, /*force*/0); -+ -+out: -+ wbr_wh_read_unlock(wbr); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * create or remove the diropq. -+ */ -+static struct dentry *do_diropq(struct dentry *dentry, aufs_bindex_t bindex, -+ unsigned int flags) -+{ -+ struct dentry *opq_dentry, *h_dentry; -+ struct super_block *sb; -+ struct au_branch *br; -+ int err; -+ -+ sb = dentry->d_sb; -+ br = au_sbr(sb, bindex); -+ h_dentry = au_h_dptr(dentry, bindex); -+ opq_dentry = vfsub_lkup_one(&diropq_name, h_dentry); -+ if (IS_ERR(opq_dentry)) -+ goto out; -+ -+ if (au_ftest_diropq(flags, CREATE)) { -+ err = link_or_create_wh(sb, bindex, opq_dentry); -+ if (!err) { -+ au_set_dbdiropq(dentry, bindex); -+ goto out; /* success */ -+ } -+ } else { -+ struct path tmp = { -+ .dentry = opq_dentry, -+ .mnt = au_br_mnt(br) -+ }; -+ err = do_unlink_wh(au_h_iptr(dentry->d_inode, bindex), &tmp); -+ if (!err) -+ au_set_dbdiropq(dentry, -1); -+ } -+ dput(opq_dentry); -+ opq_dentry = ERR_PTR(err); -+ -+out: -+ return opq_dentry; -+} -+ -+struct do_diropq_args { -+ struct dentry **errp; -+ struct dentry *dentry; -+ aufs_bindex_t bindex; -+ unsigned int flags; -+}; -+ -+static void call_do_diropq(void *args) -+{ -+ struct do_diropq_args *a = args; -+ *a->errp = do_diropq(a->dentry, a->bindex, a->flags); -+} -+ -+struct dentry *au_diropq_sio(struct dentry *dentry, aufs_bindex_t bindex, -+ unsigned int flags) -+{ -+ struct dentry *diropq, *h_dentry; -+ -+ h_dentry = au_h_dptr(dentry, bindex); -+ if (!au_test_h_perm_sio(h_dentry->d_inode, MAY_EXEC | MAY_WRITE)) -+ diropq = do_diropq(dentry, bindex, flags); -+ else { -+ int wkq_err; -+ struct do_diropq_args args = { -+ .errp = &diropq, -+ .dentry = dentry, -+ .bindex = bindex, -+ .flags = flags -+ }; -+ -+ wkq_err = au_wkq_wait(call_do_diropq, &args); -+ if (unlikely(wkq_err)) -+ diropq = ERR_PTR(wkq_err); -+ } -+ -+ return diropq; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * lookup whiteout dentry. -+ * @h_parent: lower parent dentry which must exist and be locked -+ * @base_name: name of dentry which will be whiteouted -+ * returns dentry for whiteout. -+ */ -+struct dentry *au_wh_lkup(struct dentry *h_parent, struct qstr *base_name, -+ struct au_branch *br) -+{ -+ int err; -+ struct qstr wh_name; -+ struct dentry *wh_dentry; -+ -+ err = au_wh_name_alloc(&wh_name, base_name); -+ wh_dentry = ERR_PTR(err); -+ if (!err) { -+ wh_dentry = vfsub_lkup_one(&wh_name, h_parent); -+ kfree(wh_name.name); -+ } -+ return wh_dentry; -+} -+ -+/* -+ * link/create a whiteout for @dentry on @bindex. -+ */ -+struct dentry *au_wh_create(struct dentry *dentry, aufs_bindex_t bindex, -+ struct dentry *h_parent) -+{ -+ struct dentry *wh_dentry; -+ struct super_block *sb; -+ int err; -+ -+ sb = dentry->d_sb; -+ wh_dentry = au_wh_lkup(h_parent, &dentry->d_name, au_sbr(sb, bindex)); -+ if (!IS_ERR(wh_dentry) && !wh_dentry->d_inode) { -+ err = link_or_create_wh(sb, bindex, wh_dentry); -+ if (!err) { -+ au_set_dbwh(dentry, bindex); -+ au_fhsm_wrote(sb, bindex, /*force*/0); -+ } else { -+ dput(wh_dentry); -+ wh_dentry = ERR_PTR(err); -+ } -+ } -+ -+ return wh_dentry; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* Delete all whiteouts in this directory on branch bindex. */ -+static int del_wh_children(struct dentry *h_dentry, struct au_nhash *whlist, -+ aufs_bindex_t bindex, struct au_branch *br) -+{ -+ int err; -+ unsigned long ul, n; -+ struct qstr wh_name; -+ char *p; -+ struct hlist_head *head; -+ struct au_vdir_wh *pos; -+ struct au_vdir_destr *str; -+ -+ err = -ENOMEM; -+ p = (void *)__get_free_page(GFP_NOFS); -+ wh_name.name = p; -+ if (unlikely(!wh_name.name)) -+ goto out; -+ -+ err = 0; -+ memcpy(p, AUFS_WH_PFX, AUFS_WH_PFX_LEN); -+ p += AUFS_WH_PFX_LEN; -+ n = whlist->nh_num; -+ head = whlist->nh_head; -+ for (ul = 0; !err && ul < n; ul++, head++) { -+ hlist_for_each_entry(pos, head, wh_hash) { -+ if (pos->wh_bindex != bindex) -+ continue; -+ -+ str = &pos->wh_str; -+ if (str->len + AUFS_WH_PFX_LEN <= PATH_MAX) { -+ memcpy(p, str->name, str->len); -+ wh_name.len = AUFS_WH_PFX_LEN + str->len; -+ err = unlink_wh_name(h_dentry, &wh_name, br); -+ if (!err) -+ continue; -+ break; -+ } -+ AuIOErr("whiteout name too long %.*s\n", -+ str->len, str->name); -+ err = -EIO; -+ break; -+ } -+ } -+ free_page((unsigned long)wh_name.name); -+ -+out: -+ return err; -+} -+ -+struct del_wh_children_args { -+ int *errp; -+ struct dentry *h_dentry; -+ struct au_nhash *whlist; -+ aufs_bindex_t bindex; -+ struct au_branch *br; -+}; -+ -+static void call_del_wh_children(void *args) -+{ -+ struct del_wh_children_args *a = args; -+ *a->errp = del_wh_children(a->h_dentry, a->whlist, a->bindex, a->br); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+struct au_whtmp_rmdir *au_whtmp_rmdir_alloc(struct super_block *sb, gfp_t gfp) -+{ -+ struct au_whtmp_rmdir *whtmp; -+ int err; -+ unsigned int rdhash; -+ -+ SiMustAnyLock(sb); -+ -+ whtmp = kzalloc(sizeof(*whtmp), gfp); -+ if (unlikely(!whtmp)) { -+ whtmp = ERR_PTR(-ENOMEM); -+ goto out; -+ } -+ -+ /* no estimation for dir size */ -+ rdhash = au_sbi(sb)->si_rdhash; -+ if (!rdhash) -+ rdhash = AUFS_RDHASH_DEF; -+ err = au_nhash_alloc(&whtmp->whlist, rdhash, gfp); -+ if (unlikely(err)) { -+ kfree(whtmp); -+ whtmp = ERR_PTR(err); -+ } -+ -+out: -+ return whtmp; -+} -+ -+void au_whtmp_rmdir_free(struct au_whtmp_rmdir *whtmp) -+{ -+ if (whtmp->br) -+ atomic_dec(&whtmp->br->br_count); -+ dput(whtmp->wh_dentry); -+ iput(whtmp->dir); -+ au_nhash_wh_free(&whtmp->whlist); -+ kfree(whtmp); -+} -+ -+/* -+ * rmdir the whiteouted temporary named dir @h_dentry. -+ * @whlist: whiteouted children. -+ */ -+int au_whtmp_rmdir(struct inode *dir, aufs_bindex_t bindex, -+ struct dentry *wh_dentry, struct au_nhash *whlist) -+{ -+ int err; -+ unsigned int h_nlink; -+ struct path h_tmp; -+ struct inode *wh_inode, *h_dir; -+ struct au_branch *br; -+ -+ h_dir = wh_dentry->d_parent->d_inode; /* dir inode is locked */ -+ IMustLock(h_dir); -+ -+ br = au_sbr(dir->i_sb, bindex); -+ wh_inode = wh_dentry->d_inode; -+ mutex_lock_nested(&wh_inode->i_mutex, AuLsc_I_CHILD); -+ -+ /* -+ * someone else might change some whiteouts while we were sleeping. -+ * it means this whlist may have an obsoleted entry. -+ */ -+ if (!au_test_h_perm_sio(wh_inode, MAY_EXEC | MAY_WRITE)) -+ err = del_wh_children(wh_dentry, whlist, bindex, br); -+ else { -+ int wkq_err; -+ struct del_wh_children_args args = { -+ .errp = &err, -+ .h_dentry = wh_dentry, -+ .whlist = whlist, -+ .bindex = bindex, -+ .br = br -+ }; -+ -+ wkq_err = au_wkq_wait(call_del_wh_children, &args); -+ if (unlikely(wkq_err)) -+ err = wkq_err; -+ } -+ mutex_unlock(&wh_inode->i_mutex); -+ -+ if (!err) { -+ h_tmp.dentry = wh_dentry; -+ h_tmp.mnt = au_br_mnt(br); -+ h_nlink = h_dir->i_nlink; -+ err = vfsub_rmdir(h_dir, &h_tmp); -+ /* some fs doesn't change the parent nlink in some cases */ -+ h_nlink -= h_dir->i_nlink; -+ } -+ -+ if (!err) { -+ if (au_ibstart(dir) == bindex) { -+ /* todo: dir->i_mutex is necessary */ -+ au_cpup_attr_timesizes(dir); -+ if (h_nlink) -+ vfsub_drop_nlink(dir); -+ } -+ return 0; /* success */ -+ } -+ -+ pr_warn("failed removing %pd(%d), ignored\n", wh_dentry, err); -+ return err; -+} -+ -+static void call_rmdir_whtmp(void *args) -+{ -+ int err; -+ aufs_bindex_t bindex; -+ struct au_whtmp_rmdir *a = args; -+ struct super_block *sb; -+ struct dentry *h_parent; -+ struct inode *h_dir; -+ struct au_hinode *hdir; -+ -+ /* rmdir by nfsd may cause deadlock with this i_mutex */ -+ /* mutex_lock(&a->dir->i_mutex); */ -+ err = -EROFS; -+ sb = a->dir->i_sb; -+ si_read_lock(sb, !AuLock_FLUSH); -+ if (!au_br_writable(a->br->br_perm)) -+ goto out; -+ bindex = au_br_index(sb, a->br->br_id); -+ if (unlikely(bindex < 0)) -+ goto out; -+ -+ err = -EIO; -+ ii_write_lock_parent(a->dir); -+ h_parent = dget_parent(a->wh_dentry); -+ h_dir = h_parent->d_inode; -+ hdir = au_hi(a->dir, bindex); -+ err = vfsub_mnt_want_write(au_br_mnt(a->br)); -+ if (unlikely(err)) -+ goto out_mnt; -+ au_hn_imtx_lock_nested(hdir, AuLsc_I_PARENT); -+ err = au_h_verify(a->wh_dentry, au_opt_udba(sb), h_dir, h_parent, -+ a->br); -+ if (!err) -+ err = au_whtmp_rmdir(a->dir, bindex, a->wh_dentry, &a->whlist); -+ au_hn_imtx_unlock(hdir); -+ vfsub_mnt_drop_write(au_br_mnt(a->br)); -+ -+out_mnt: -+ dput(h_parent); -+ ii_write_unlock(a->dir); -+out: -+ /* mutex_unlock(&a->dir->i_mutex); */ -+ au_whtmp_rmdir_free(a); -+ si_read_unlock(sb); -+ au_nwt_done(&au_sbi(sb)->si_nowait); -+ if (unlikely(err)) -+ AuIOErr("err %d\n", err); -+} -+ -+void au_whtmp_kick_rmdir(struct inode *dir, aufs_bindex_t bindex, -+ struct dentry *wh_dentry, struct au_whtmp_rmdir *args) -+{ -+ int wkq_err; -+ struct super_block *sb; -+ -+ IMustLock(dir); -+ -+ /* all post-process will be done in do_rmdir_whtmp(). */ -+ sb = dir->i_sb; -+ args->dir = au_igrab(dir); -+ args->br = au_sbr(sb, bindex); -+ atomic_inc(&args->br->br_count); -+ args->wh_dentry = dget(wh_dentry); -+ wkq_err = au_wkq_nowait(call_rmdir_whtmp, args, sb, /*flags*/0); -+ if (unlikely(wkq_err)) { -+ pr_warn("rmdir error %pd (%d), ignored\n", wh_dentry, wkq_err); -+ au_whtmp_rmdir_free(args); -+ } -+} -diff --git a/fs/aufs/whout.h b/fs/aufs/whout.h -new file mode 100644 -index 0000000..5a5c378 ---- /dev/null -+++ b/fs/aufs/whout.h -@@ -0,0 +1,85 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * whiteout for logical deletion and opaque directory -+ */ -+ -+#ifndef __AUFS_WHOUT_H__ -+#define __AUFS_WHOUT_H__ -+ -+#ifdef __KERNEL__ -+ -+#include "dir.h" -+ -+/* whout.c */ -+int au_wh_name_alloc(struct qstr *wh, const struct qstr *name); -+int au_wh_test(struct dentry *h_parent, struct qstr *wh_name, int try_sio); -+int au_diropq_test(struct dentry *h_dentry); -+struct au_branch; -+struct dentry *au_whtmp_lkup(struct dentry *h_parent, struct au_branch *br, -+ struct qstr *prefix); -+int au_whtmp_ren(struct dentry *h_dentry, struct au_branch *br); -+int au_wh_unlink_dentry(struct inode *h_dir, struct path *h_path, -+ struct dentry *dentry); -+int au_wh_init(struct au_branch *br, struct super_block *sb); -+ -+/* diropq flags */ -+#define AuDiropq_CREATE 1 -+#define au_ftest_diropq(flags, name) ((flags) & AuDiropq_##name) -+#define au_fset_diropq(flags, name) \ -+ do { (flags) |= AuDiropq_##name; } while (0) -+#define au_fclr_diropq(flags, name) \ -+ do { (flags) &= ~AuDiropq_##name; } while (0) -+ -+struct dentry *au_diropq_sio(struct dentry *dentry, aufs_bindex_t bindex, -+ unsigned int flags); -+struct dentry *au_wh_lkup(struct dentry *h_parent, struct qstr *base_name, -+ struct au_branch *br); -+struct dentry *au_wh_create(struct dentry *dentry, aufs_bindex_t bindex, -+ struct dentry *h_parent); -+ -+/* real rmdir for the whiteout-ed dir */ -+struct au_whtmp_rmdir { -+ struct inode *dir; -+ struct au_branch *br; -+ struct dentry *wh_dentry; -+ struct au_nhash whlist; -+}; -+ -+struct au_whtmp_rmdir *au_whtmp_rmdir_alloc(struct super_block *sb, gfp_t gfp); -+void au_whtmp_rmdir_free(struct au_whtmp_rmdir *whtmp); -+int au_whtmp_rmdir(struct inode *dir, aufs_bindex_t bindex, -+ struct dentry *wh_dentry, struct au_nhash *whlist); -+void au_whtmp_kick_rmdir(struct inode *dir, aufs_bindex_t bindex, -+ struct dentry *wh_dentry, struct au_whtmp_rmdir *args); -+ -+/* ---------------------------------------------------------------------- */ -+ -+static inline struct dentry *au_diropq_create(struct dentry *dentry, -+ aufs_bindex_t bindex) -+{ -+ return au_diropq_sio(dentry, bindex, AuDiropq_CREATE); -+} -+ -+static inline int au_diropq_remove(struct dentry *dentry, aufs_bindex_t bindex) -+{ -+ return PTR_ERR(au_diropq_sio(dentry, bindex, !AuDiropq_CREATE)); -+} -+ -+#endif /* __KERNEL__ */ -+#endif /* __AUFS_WHOUT_H__ */ -diff --git a/fs/aufs/wkq.c b/fs/aufs/wkq.c -new file mode 100644 -index 0000000..a4e1b92 ---- /dev/null -+++ b/fs/aufs/wkq.c -@@ -0,0 +1,213 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * workqueue for asynchronous/super-io operations -+ * todo: try new dredential scheme -+ */ -+ -+#include -+#include "aufs.h" -+ -+/* internal workqueue named AUFS_WKQ_NAME */ -+ -+static struct workqueue_struct *au_wkq; -+ -+struct au_wkinfo { -+ struct work_struct wk; -+ struct kobject *kobj; -+ -+ unsigned int flags; /* see wkq.h */ -+ -+ au_wkq_func_t func; -+ void *args; -+ -+ struct completion *comp; -+}; -+ -+/* ---------------------------------------------------------------------- */ -+ -+static void wkq_func(struct work_struct *wk) -+{ -+ struct au_wkinfo *wkinfo = container_of(wk, struct au_wkinfo, wk); -+ -+ AuDebugOn(!uid_eq(current_fsuid(), GLOBAL_ROOT_UID)); -+ AuDebugOn(rlimit(RLIMIT_FSIZE) != RLIM_INFINITY); -+ -+ wkinfo->func(wkinfo->args); -+ if (au_ftest_wkq(wkinfo->flags, WAIT)) -+ complete(wkinfo->comp); -+ else { -+ kobject_put(wkinfo->kobj); -+ module_put(THIS_MODULE); /* todo: ?? */ -+ kfree(wkinfo); -+ } -+} -+ -+/* -+ * Since struct completion is large, try allocating it dynamically. -+ */ -+#if 1 /* defined(CONFIG_4KSTACKS) || defined(AuTest4KSTACKS) */ -+#define AuWkqCompDeclare(name) struct completion *comp = NULL -+ -+static int au_wkq_comp_alloc(struct au_wkinfo *wkinfo, struct completion **comp) -+{ -+ *comp = kmalloc(sizeof(**comp), GFP_NOFS); -+ if (*comp) { -+ init_completion(*comp); -+ wkinfo->comp = *comp; -+ return 0; -+ } -+ return -ENOMEM; -+} -+ -+static void au_wkq_comp_free(struct completion *comp) -+{ -+ kfree(comp); -+} -+ -+#else -+ -+/* no braces */ -+#define AuWkqCompDeclare(name) \ -+ DECLARE_COMPLETION_ONSTACK(_ ## name); \ -+ struct completion *comp = &_ ## name -+ -+static int au_wkq_comp_alloc(struct au_wkinfo *wkinfo, struct completion **comp) -+{ -+ wkinfo->comp = *comp; -+ return 0; -+} -+ -+static void au_wkq_comp_free(struct completion *comp __maybe_unused) -+{ -+ /* empty */ -+} -+#endif /* 4KSTACKS */ -+ -+static void au_wkq_run(struct au_wkinfo *wkinfo) -+{ -+ if (au_ftest_wkq(wkinfo->flags, NEST)) { -+ if (au_wkq_test()) { -+ AuWarn1("wkq from wkq, unless silly-rename on NFS," -+ " due to a dead dir by UDBA?\n"); -+ AuDebugOn(au_ftest_wkq(wkinfo->flags, WAIT)); -+ } -+ } else -+ au_dbg_verify_kthread(); -+ -+ if (au_ftest_wkq(wkinfo->flags, WAIT)) { -+ INIT_WORK_ONSTACK(&wkinfo->wk, wkq_func); -+ queue_work(au_wkq, &wkinfo->wk); -+ } else { -+ INIT_WORK(&wkinfo->wk, wkq_func); -+ schedule_work(&wkinfo->wk); -+ } -+} -+ -+/* -+ * Be careful. It is easy to make deadlock happen. -+ * processA: lock, wkq and wait -+ * processB: wkq and wait, lock in wkq -+ * --> deadlock -+ */ -+int au_wkq_do_wait(unsigned int flags, au_wkq_func_t func, void *args) -+{ -+ int err; -+ AuWkqCompDeclare(comp); -+ struct au_wkinfo wkinfo = { -+ .flags = flags, -+ .func = func, -+ .args = args -+ }; -+ -+ err = au_wkq_comp_alloc(&wkinfo, &comp); -+ if (!err) { -+ au_wkq_run(&wkinfo); -+ /* no timeout, no interrupt */ -+ wait_for_completion(wkinfo.comp); -+ au_wkq_comp_free(comp); -+ destroy_work_on_stack(&wkinfo.wk); -+ } -+ -+ return err; -+ -+} -+ -+/* -+ * Note: dget/dput() in func for aufs dentries are not supported. It will be a -+ * problem in a concurrent umounting. -+ */ -+int au_wkq_nowait(au_wkq_func_t func, void *args, struct super_block *sb, -+ unsigned int flags) -+{ -+ int err; -+ struct au_wkinfo *wkinfo; -+ -+ atomic_inc(&au_sbi(sb)->si_nowait.nw_len); -+ -+ /* -+ * wkq_func() must free this wkinfo. -+ * it highly depends upon the implementation of workqueue. -+ */ -+ err = 0; -+ wkinfo = kmalloc(sizeof(*wkinfo), GFP_NOFS); -+ if (wkinfo) { -+ wkinfo->kobj = &au_sbi(sb)->si_kobj; -+ wkinfo->flags = flags & ~AuWkq_WAIT; -+ wkinfo->func = func; -+ wkinfo->args = args; -+ wkinfo->comp = NULL; -+ kobject_get(wkinfo->kobj); -+ __module_get(THIS_MODULE); /* todo: ?? */ -+ -+ au_wkq_run(wkinfo); -+ } else { -+ err = -ENOMEM; -+ au_nwt_done(&au_sbi(sb)->si_nowait); -+ } -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+void au_nwt_init(struct au_nowait_tasks *nwt) -+{ -+ atomic_set(&nwt->nw_len, 0); -+ /* smp_mb(); */ /* atomic_set */ -+ init_waitqueue_head(&nwt->nw_wq); -+} -+ -+void au_wkq_fin(void) -+{ -+ destroy_workqueue(au_wkq); -+} -+ -+int __init au_wkq_init(void) -+{ -+ int err; -+ -+ err = 0; -+ au_wkq = alloc_workqueue(AUFS_WKQ_NAME, 0, WQ_DFL_ACTIVE); -+ if (IS_ERR(au_wkq)) -+ err = PTR_ERR(au_wkq); -+ else if (!au_wkq) -+ err = -ENOMEM; -+ -+ return err; -+} -diff --git a/fs/aufs/wkq.h b/fs/aufs/wkq.h -new file mode 100644 -index 0000000..830123c ---- /dev/null -+++ b/fs/aufs/wkq.h -@@ -0,0 +1,91 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * workqueue for asynchronous/super-io operations -+ * todo: try new credentials management scheme -+ */ -+ -+#ifndef __AUFS_WKQ_H__ -+#define __AUFS_WKQ_H__ -+ -+#ifdef __KERNEL__ -+ -+struct super_block; -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * in the next operation, wait for the 'nowait' tasks in system-wide workqueue -+ */ -+struct au_nowait_tasks { -+ atomic_t nw_len; -+ wait_queue_head_t nw_wq; -+}; -+ -+/* ---------------------------------------------------------------------- */ -+ -+typedef void (*au_wkq_func_t)(void *args); -+ -+/* wkq flags */ -+#define AuWkq_WAIT 1 -+#define AuWkq_NEST (1 << 1) -+#define au_ftest_wkq(flags, name) ((flags) & AuWkq_##name) -+#define au_fset_wkq(flags, name) \ -+ do { (flags) |= AuWkq_##name; } while (0) -+#define au_fclr_wkq(flags, name) \ -+ do { (flags) &= ~AuWkq_##name; } while (0) -+ -+#ifndef CONFIG_AUFS_HNOTIFY -+#undef AuWkq_NEST -+#define AuWkq_NEST 0 -+#endif -+ -+/* wkq.c */ -+int au_wkq_do_wait(unsigned int flags, au_wkq_func_t func, void *args); -+int au_wkq_nowait(au_wkq_func_t func, void *args, struct super_block *sb, -+ unsigned int flags); -+void au_nwt_init(struct au_nowait_tasks *nwt); -+int __init au_wkq_init(void); -+void au_wkq_fin(void); -+ -+/* ---------------------------------------------------------------------- */ -+ -+static inline int au_wkq_test(void) -+{ -+ return current->flags & PF_WQ_WORKER; -+} -+ -+static inline int au_wkq_wait(au_wkq_func_t func, void *args) -+{ -+ return au_wkq_do_wait(AuWkq_WAIT, func, args); -+} -+ -+static inline void au_nwt_done(struct au_nowait_tasks *nwt) -+{ -+ if (atomic_dec_and_test(&nwt->nw_len)) -+ wake_up_all(&nwt->nw_wq); -+} -+ -+static inline int au_nwt_flush(struct au_nowait_tasks *nwt) -+{ -+ wait_event(nwt->nw_wq, !atomic_read(&nwt->nw_len)); -+ return 0; -+} -+ -+#endif /* __KERNEL__ */ -+#endif /* __AUFS_WKQ_H__ */ -diff --git a/fs/aufs/xattr.c b/fs/aufs/xattr.c -new file mode 100644 -index 0000000..e16beea ---- /dev/null -+++ b/fs/aufs/xattr.c -@@ -0,0 +1,344 @@ -+/* -+ * Copyright (C) 2014-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * handling xattr functions -+ */ -+ -+#include -+#include "aufs.h" -+ -+static int au_xattr_ignore(int err, char *name, unsigned int ignore_flags) -+{ -+ if (!ignore_flags) -+ goto out; -+ switch (err) { -+ case -ENOMEM: -+ case -EDQUOT: -+ goto out; -+ } -+ -+ if ((ignore_flags & AuBrAttr_ICEX) == AuBrAttr_ICEX) { -+ err = 0; -+ goto out; -+ } -+ -+#define cmp(brattr, prefix) do { \ -+ if (!strncmp(name, XATTR_##prefix##_PREFIX, \ -+ XATTR_##prefix##_PREFIX_LEN)) { \ -+ if (ignore_flags & AuBrAttr_ICEX_##brattr) \ -+ err = 0; \ -+ goto out; \ -+ } \ -+ } while (0) -+ -+ cmp(SEC, SECURITY); -+ cmp(SYS, SYSTEM); -+ cmp(TR, TRUSTED); -+ cmp(USR, USER); -+#undef cmp -+ -+ if (ignore_flags & AuBrAttr_ICEX_OTH) -+ err = 0; -+ -+out: -+ return err; -+} -+ -+static const int au_xattr_out_of_list = AuBrAttr_ICEX_OTH << 1; -+ -+static int au_do_cpup_xattr(struct dentry *h_dst, struct dentry *h_src, -+ char *name, char **buf, unsigned int ignore_flags, -+ unsigned int verbose) -+{ -+ int err; -+ ssize_t ssz; -+ struct inode *h_idst; -+ -+ ssz = vfs_getxattr_alloc(h_src, name, buf, 0, GFP_NOFS); -+ err = ssz; -+ if (unlikely(err <= 0)) { -+ if (err == -ENODATA -+ || (err == -EOPNOTSUPP -+ && ((ignore_flags & au_xattr_out_of_list) -+ || (au_test_nfs_noacl(h_src->d_inode) -+ && (!strcmp(name, XATTR_NAME_POSIX_ACL_ACCESS) -+ || !strcmp(name, -+ XATTR_NAME_POSIX_ACL_DEFAULT)))) -+ )) -+ err = 0; -+ if (err && (verbose || au_debug_test())) -+ pr_err("%s, err %d\n", name, err); -+ goto out; -+ } -+ -+ /* unlock it temporary */ -+ h_idst = h_dst->d_inode; -+ mutex_unlock(&h_idst->i_mutex); -+ err = vfsub_setxattr(h_dst, name, *buf, ssz, /*flags*/0); -+ mutex_lock_nested(&h_idst->i_mutex, AuLsc_I_CHILD2); -+ if (unlikely(err)) { -+ if (verbose || au_debug_test()) -+ pr_err("%s, err %d\n", name, err); -+ err = au_xattr_ignore(err, name, ignore_flags); -+ } -+ -+out: -+ return err; -+} -+ -+int au_cpup_xattr(struct dentry *h_dst, struct dentry *h_src, int ignore_flags, -+ unsigned int verbose) -+{ -+ int err, unlocked, acl_access, acl_default; -+ ssize_t ssz; -+ struct inode *h_isrc, *h_idst; -+ char *value, *p, *o, *e; -+ -+ /* try stopping to update the source inode while we are referencing */ -+ /* there should not be the parent-child relationship between them */ -+ h_isrc = h_src->d_inode; -+ h_idst = h_dst->d_inode; -+ mutex_unlock(&h_idst->i_mutex); -+ mutex_lock_nested(&h_isrc->i_mutex, AuLsc_I_CHILD); -+ mutex_lock_nested(&h_idst->i_mutex, AuLsc_I_CHILD2); -+ unlocked = 0; -+ -+ /* some filesystems don't list POSIX ACL, for example tmpfs */ -+ ssz = vfs_listxattr(h_src, NULL, 0); -+ err = ssz; -+ if (unlikely(err < 0)) { -+ AuTraceErr(err); -+ if (err == -ENODATA -+ || err == -EOPNOTSUPP) -+ err = 0; /* ignore */ -+ goto out; -+ } -+ -+ err = 0; -+ p = NULL; -+ o = NULL; -+ if (ssz) { -+ err = -ENOMEM; -+ p = kmalloc(ssz, GFP_NOFS); -+ o = p; -+ if (unlikely(!p)) -+ goto out; -+ err = vfs_listxattr(h_src, p, ssz); -+ } -+ mutex_unlock(&h_isrc->i_mutex); -+ unlocked = 1; -+ AuDbg("err %d, ssz %zd\n", err, ssz); -+ if (unlikely(err < 0)) -+ goto out_free; -+ -+ err = 0; -+ e = p + ssz; -+ value = NULL; -+ acl_access = 0; -+ acl_default = 0; -+ while (!err && p < e) { -+ acl_access |= !strncmp(p, XATTR_NAME_POSIX_ACL_ACCESS, -+ sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1); -+ acl_default |= !strncmp(p, XATTR_NAME_POSIX_ACL_DEFAULT, -+ sizeof(XATTR_NAME_POSIX_ACL_DEFAULT) -+ - 1); -+ err = au_do_cpup_xattr(h_dst, h_src, p, &value, ignore_flags, -+ verbose); -+ p += strlen(p) + 1; -+ } -+ AuTraceErr(err); -+ ignore_flags |= au_xattr_out_of_list; -+ if (!err && !acl_access) { -+ err = au_do_cpup_xattr(h_dst, h_src, -+ XATTR_NAME_POSIX_ACL_ACCESS, &value, -+ ignore_flags, verbose); -+ AuTraceErr(err); -+ } -+ if (!err && !acl_default) { -+ err = au_do_cpup_xattr(h_dst, h_src, -+ XATTR_NAME_POSIX_ACL_DEFAULT, &value, -+ ignore_flags, verbose); -+ AuTraceErr(err); -+ } -+ -+ kfree(value); -+ -+out_free: -+ kfree(o); -+out: -+ if (!unlocked) -+ mutex_unlock(&h_isrc->i_mutex); -+ AuTraceErr(err); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+enum { -+ AU_XATTR_LIST, -+ AU_XATTR_GET -+}; -+ -+struct au_lgxattr { -+ int type; -+ union { -+ struct { -+ char *list; -+ size_t size; -+ } list; -+ struct { -+ const char *name; -+ void *value; -+ size_t size; -+ } get; -+ } u; -+}; -+ -+static ssize_t au_lgxattr(struct dentry *dentry, struct au_lgxattr *arg) -+{ -+ ssize_t err; -+ struct path h_path; -+ struct super_block *sb; -+ -+ sb = dentry->d_sb; -+ err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM); -+ if (unlikely(err)) -+ goto out; -+ err = au_h_path_getattr(dentry, /*force*/1, &h_path); -+ if (unlikely(err)) -+ goto out_si; -+ if (unlikely(!h_path.dentry)) -+ /* illegally overlapped or something */ -+ goto out_di; /* pretending success */ -+ -+ /* always topmost entry only */ -+ switch (arg->type) { -+ case AU_XATTR_LIST: -+ err = vfs_listxattr(h_path.dentry, -+ arg->u.list.list, arg->u.list.size); -+ break; -+ case AU_XATTR_GET: -+ err = vfs_getxattr(h_path.dentry, -+ arg->u.get.name, arg->u.get.value, -+ arg->u.get.size); -+ break; -+ } -+ -+out_di: -+ di_read_unlock(dentry, AuLock_IR); -+out_si: -+ si_read_unlock(sb); -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+ssize_t aufs_listxattr(struct dentry *dentry, char *list, size_t size) -+{ -+ struct au_lgxattr arg = { -+ .type = AU_XATTR_LIST, -+ .u.list = { -+ .list = list, -+ .size = size -+ }, -+ }; -+ -+ return au_lgxattr(dentry, &arg); -+} -+ -+ssize_t aufs_getxattr(struct dentry *dentry, const char *name, void *value, -+ size_t size) -+{ -+ struct au_lgxattr arg = { -+ .type = AU_XATTR_GET, -+ .u.get = { -+ .name = name, -+ .value = value, -+ .size = size -+ }, -+ }; -+ -+ return au_lgxattr(dentry, &arg); -+} -+ -+int aufs_setxattr(struct dentry *dentry, const char *name, const void *value, -+ size_t size, int flags) -+{ -+ struct au_srxattr arg = { -+ .type = AU_XATTR_SET, -+ .u.set = { -+ .name = name, -+ .value = value, -+ .size = size, -+ .flags = flags -+ }, -+ }; -+ -+ return au_srxattr(dentry, &arg); -+} -+ -+int aufs_removexattr(struct dentry *dentry, const char *name) -+{ -+ struct au_srxattr arg = { -+ .type = AU_XATTR_REMOVE, -+ .u.remove = { -+ .name = name -+ }, -+ }; -+ -+ return au_srxattr(dentry, &arg); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+#if 0 -+static size_t au_xattr_list(struct dentry *dentry, char *list, size_t list_size, -+ const char *name, size_t name_len, int type) -+{ -+ return aufs_listxattr(dentry, list, list_size); -+} -+ -+static int au_xattr_get(struct dentry *dentry, const char *name, void *buffer, -+ size_t size, int type) -+{ -+ return aufs_getxattr(dentry, name, buffer, size); -+} -+ -+static int au_xattr_set(struct dentry *dentry, const char *name, -+ const void *value, size_t size, int flags, int type) -+{ -+ return aufs_setxattr(dentry, name, value, size, flags); -+} -+ -+static const struct xattr_handler au_xattr_handler = { -+ /* no prefix, no flags */ -+ .list = au_xattr_list, -+ .get = au_xattr_get, -+ .set = au_xattr_set -+ /* why no remove? */ -+}; -+ -+static const struct xattr_handler *au_xattr_handlers[] = { -+ &au_xattr_handler -+}; -+ -+void au_xattr_init(struct super_block *sb) -+{ -+ /* sb->s_xattr = au_xattr_handlers; */ -+} -+#endif -diff --git a/fs/aufs/xino.c b/fs/aufs/xino.c -new file mode 100644 -index 0000000..50ab4ca ---- /dev/null -+++ b/fs/aufs/xino.c -@@ -0,0 +1,1343 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * external inode number translation table and bitmap -+ */ -+ -+#include -+#include -+#include "aufs.h" -+ -+/* todo: unnecessary to support mmap_sem since kernel-space? */ -+ssize_t xino_fread(au_readf_t func, struct file *file, void *kbuf, size_t size, -+ loff_t *pos) -+{ -+ ssize_t err; -+ mm_segment_t oldfs; -+ union { -+ void *k; -+ char __user *u; -+ } buf; -+ -+ buf.k = kbuf; -+ oldfs = get_fs(); -+ set_fs(KERNEL_DS); -+ do { -+ /* todo: signal_pending? */ -+ err = func(file, buf.u, size, pos); -+ } while (err == -EAGAIN || err == -EINTR); -+ set_fs(oldfs); -+ -+#if 0 /* reserved for future use */ -+ if (err > 0) -+ fsnotify_access(file->f_dentry); -+#endif -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static ssize_t xino_fwrite_wkq(au_writef_t func, struct file *file, void *buf, -+ size_t size, loff_t *pos); -+ -+static ssize_t do_xino_fwrite(au_writef_t func, struct file *file, void *kbuf, -+ size_t size, loff_t *pos) -+{ -+ ssize_t err; -+ mm_segment_t oldfs; -+ union { -+ void *k; -+ const char __user *u; -+ } buf; -+ int i; -+ const int prevent_endless = 10; -+ -+ i = 0; -+ buf.k = kbuf; -+ oldfs = get_fs(); -+ set_fs(KERNEL_DS); -+ do { -+ err = func(file, buf.u, size, pos); -+ if (err == -EINTR -+ && !au_wkq_test() -+ && fatal_signal_pending(current)) { -+ set_fs(oldfs); -+ err = xino_fwrite_wkq(func, file, kbuf, size, pos); -+ BUG_ON(err == -EINTR); -+ oldfs = get_fs(); -+ set_fs(KERNEL_DS); -+ } -+ } while (i++ < prevent_endless -+ && (err == -EAGAIN || err == -EINTR)); -+ set_fs(oldfs); -+ -+#if 0 /* reserved for future use */ -+ if (err > 0) -+ fsnotify_modify(file->f_dentry); -+#endif -+ -+ return err; -+} -+ -+struct do_xino_fwrite_args { -+ ssize_t *errp; -+ au_writef_t func; -+ struct file *file; -+ void *buf; -+ size_t size; -+ loff_t *pos; -+}; -+ -+static void call_do_xino_fwrite(void *args) -+{ -+ struct do_xino_fwrite_args *a = args; -+ *a->errp = do_xino_fwrite(a->func, a->file, a->buf, a->size, a->pos); -+} -+ -+static ssize_t xino_fwrite_wkq(au_writef_t func, struct file *file, void *buf, -+ size_t size, loff_t *pos) -+{ -+ ssize_t err; -+ int wkq_err; -+ struct do_xino_fwrite_args args = { -+ .errp = &err, -+ .func = func, -+ .file = file, -+ .buf = buf, -+ .size = size, -+ .pos = pos -+ }; -+ -+ /* -+ * it breaks RLIMIT_FSIZE and normal user's limit, -+ * users should care about quota and real 'filesystem full.' -+ */ -+ wkq_err = au_wkq_wait(call_do_xino_fwrite, &args); -+ if (unlikely(wkq_err)) -+ err = wkq_err; -+ -+ return err; -+} -+ -+ssize_t xino_fwrite(au_writef_t func, struct file *file, void *buf, size_t size, -+ loff_t *pos) -+{ -+ ssize_t err; -+ -+ if (rlimit(RLIMIT_FSIZE) == RLIM_INFINITY) { -+ lockdep_off(); -+ err = do_xino_fwrite(func, file, buf, size, pos); -+ lockdep_on(); -+ } else -+ err = xino_fwrite_wkq(func, file, buf, size, pos); -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * create a new xinofile at the same place/path as @base_file. -+ */ -+struct file *au_xino_create2(struct file *base_file, struct file *copy_src) -+{ -+ struct file *file; -+ struct dentry *base, *parent; -+ struct inode *dir, *delegated; -+ struct qstr *name; -+ struct path path; -+ int err; -+ -+ base = base_file->f_dentry; -+ parent = base->d_parent; /* dir inode is locked */ -+ dir = parent->d_inode; -+ IMustLock(dir); -+ -+ file = ERR_PTR(-EINVAL); -+ name = &base->d_name; -+ path.dentry = vfsub_lookup_one_len(name->name, parent, name->len); -+ if (IS_ERR(path.dentry)) { -+ file = (void *)path.dentry; -+ pr_err("%pd lookup err %ld\n", -+ base, PTR_ERR(path.dentry)); -+ goto out; -+ } -+ -+ /* no need to mnt_want_write() since we call dentry_open() later */ -+ err = vfs_create(dir, path.dentry, S_IRUGO | S_IWUGO, NULL); -+ if (unlikely(err)) { -+ file = ERR_PTR(err); -+ pr_err("%pd create err %d\n", base, err); -+ goto out_dput; -+ } -+ -+ path.mnt = base_file->f_path.mnt; -+ file = vfsub_dentry_open(&path, -+ O_RDWR | O_CREAT | O_EXCL | O_LARGEFILE -+ /* | __FMODE_NONOTIFY */); -+ if (IS_ERR(file)) { -+ pr_err("%pd open err %ld\n", base, PTR_ERR(file)); -+ goto out_dput; -+ } -+ -+ delegated = NULL; -+ err = vfsub_unlink(dir, &file->f_path, &delegated, /*force*/0); -+ if (unlikely(err == -EWOULDBLOCK)) { -+ pr_warn("cannot retry for NFSv4 delegation" -+ " for an internal unlink\n"); -+ iput(delegated); -+ } -+ if (unlikely(err)) { -+ pr_err("%pd unlink err %d\n", base, err); -+ goto out_fput; -+ } -+ -+ if (copy_src) { -+ /* no one can touch copy_src xino */ -+ err = au_copy_file(file, copy_src, vfsub_f_size_read(copy_src)); -+ if (unlikely(err)) { -+ pr_err("%pd copy err %d\n", base, err); -+ goto out_fput; -+ } -+ } -+ goto out_dput; /* success */ -+ -+out_fput: -+ fput(file); -+ file = ERR_PTR(err); -+out_dput: -+ dput(path.dentry); -+out: -+ return file; -+} -+ -+struct au_xino_lock_dir { -+ struct au_hinode *hdir; -+ struct dentry *parent; -+ struct mutex *mtx; -+}; -+ -+static void au_xino_lock_dir(struct super_block *sb, struct file *xino, -+ struct au_xino_lock_dir *ldir) -+{ -+ aufs_bindex_t brid, bindex; -+ -+ ldir->hdir = NULL; -+ bindex = -1; -+ brid = au_xino_brid(sb); -+ if (brid >= 0) -+ bindex = au_br_index(sb, brid); -+ if (bindex >= 0) { -+ ldir->hdir = au_hi(sb->s_root->d_inode, bindex); -+ au_hn_imtx_lock_nested(ldir->hdir, AuLsc_I_PARENT); -+ } else { -+ ldir->parent = dget_parent(xino->f_dentry); -+ ldir->mtx = &ldir->parent->d_inode->i_mutex; -+ mutex_lock_nested(ldir->mtx, AuLsc_I_PARENT); -+ } -+} -+ -+static void au_xino_unlock_dir(struct au_xino_lock_dir *ldir) -+{ -+ if (ldir->hdir) -+ au_hn_imtx_unlock(ldir->hdir); -+ else { -+ mutex_unlock(ldir->mtx); -+ dput(ldir->parent); -+ } -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* trucate xino files asynchronously */ -+ -+int au_xino_trunc(struct super_block *sb, aufs_bindex_t bindex) -+{ -+ int err; -+ unsigned long jiffy; -+ blkcnt_t blocks; -+ aufs_bindex_t bi, bend; -+ struct kstatfs *st; -+ struct au_branch *br; -+ struct file *new_xino, *file; -+ struct super_block *h_sb; -+ struct au_xino_lock_dir ldir; -+ -+ err = -ENOMEM; -+ st = kmalloc(sizeof(*st), GFP_NOFS); -+ if (unlikely(!st)) -+ goto out; -+ -+ err = -EINVAL; -+ bend = au_sbend(sb); -+ if (unlikely(bindex < 0 || bend < bindex)) -+ goto out_st; -+ br = au_sbr(sb, bindex); -+ file = br->br_xino.xi_file; -+ if (!file) -+ goto out_st; -+ -+ err = vfs_statfs(&file->f_path, st); -+ if (unlikely(err)) -+ AuErr1("statfs err %d, ignored\n", err); -+ jiffy = jiffies; -+ blocks = file_inode(file)->i_blocks; -+ pr_info("begin truncating xino(b%d), ib%llu, %llu/%llu free blks\n", -+ bindex, (u64)blocks, st->f_bfree, st->f_blocks); -+ -+ au_xino_lock_dir(sb, file, &ldir); -+ /* mnt_want_write() is unnecessary here */ -+ new_xino = au_xino_create2(file, file); -+ au_xino_unlock_dir(&ldir); -+ err = PTR_ERR(new_xino); -+ if (IS_ERR(new_xino)) { -+ pr_err("err %d, ignored\n", err); -+ goto out_st; -+ } -+ err = 0; -+ fput(file); -+ br->br_xino.xi_file = new_xino; -+ -+ h_sb = au_br_sb(br); -+ for (bi = 0; bi <= bend; bi++) { -+ if (unlikely(bi == bindex)) -+ continue; -+ br = au_sbr(sb, bi); -+ if (au_br_sb(br) != h_sb) -+ continue; -+ -+ fput(br->br_xino.xi_file); -+ br->br_xino.xi_file = new_xino; -+ get_file(new_xino); -+ } -+ -+ err = vfs_statfs(&new_xino->f_path, st); -+ if (!err) { -+ pr_info("end truncating xino(b%d), ib%llu, %llu/%llu free blks\n", -+ bindex, (u64)file_inode(new_xino)->i_blocks, -+ st->f_bfree, st->f_blocks); -+ if (file_inode(new_xino)->i_blocks < blocks) -+ au_sbi(sb)->si_xino_jiffy = jiffy; -+ } else -+ AuErr1("statfs err %d, ignored\n", err); -+ -+out_st: -+ kfree(st); -+out: -+ return err; -+} -+ -+struct xino_do_trunc_args { -+ struct super_block *sb; -+ struct au_branch *br; -+}; -+ -+static void xino_do_trunc(void *_args) -+{ -+ struct xino_do_trunc_args *args = _args; -+ struct super_block *sb; -+ struct au_branch *br; -+ struct inode *dir; -+ int err; -+ aufs_bindex_t bindex; -+ -+ err = 0; -+ sb = args->sb; -+ dir = sb->s_root->d_inode; -+ br = args->br; -+ -+ si_noflush_write_lock(sb); -+ ii_read_lock_parent(dir); -+ bindex = au_br_index(sb, br->br_id); -+ err = au_xino_trunc(sb, bindex); -+ ii_read_unlock(dir); -+ if (unlikely(err)) -+ pr_warn("err b%d, (%d)\n", bindex, err); -+ atomic_dec(&br->br_xino_running); -+ atomic_dec(&br->br_count); -+ si_write_unlock(sb); -+ au_nwt_done(&au_sbi(sb)->si_nowait); -+ kfree(args); -+} -+ -+static int xino_trunc_test(struct super_block *sb, struct au_branch *br) -+{ -+ int err; -+ struct kstatfs st; -+ struct au_sbinfo *sbinfo; -+ -+ /* todo: si_xino_expire and the ratio should be customizable */ -+ sbinfo = au_sbi(sb); -+ if (time_before(jiffies, -+ sbinfo->si_xino_jiffy + sbinfo->si_xino_expire)) -+ return 0; -+ -+ /* truncation border */ -+ err = vfs_statfs(&br->br_xino.xi_file->f_path, &st); -+ if (unlikely(err)) { -+ AuErr1("statfs err %d, ignored\n", err); -+ return 0; -+ } -+ if (div64_u64(st.f_bfree * 100, st.f_blocks) >= AUFS_XINO_DEF_TRUNC) -+ return 0; -+ -+ return 1; -+} -+ -+static void xino_try_trunc(struct super_block *sb, struct au_branch *br) -+{ -+ struct xino_do_trunc_args *args; -+ int wkq_err; -+ -+ if (!xino_trunc_test(sb, br)) -+ return; -+ -+ if (atomic_inc_return(&br->br_xino_running) > 1) -+ goto out; -+ -+ /* lock and kfree() will be called in trunc_xino() */ -+ args = kmalloc(sizeof(*args), GFP_NOFS); -+ if (unlikely(!args)) { -+ AuErr1("no memory\n"); -+ goto out_args; -+ } -+ -+ atomic_inc(&br->br_count); -+ args->sb = sb; -+ args->br = br; -+ wkq_err = au_wkq_nowait(xino_do_trunc, args, sb, /*flags*/0); -+ if (!wkq_err) -+ return; /* success */ -+ -+ pr_err("wkq %d\n", wkq_err); -+ atomic_dec(&br->br_count); -+ -+out_args: -+ kfree(args); -+out: -+ atomic_dec(&br->br_xino_running); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int au_xino_do_write(au_writef_t write, struct file *file, -+ ino_t h_ino, ino_t ino) -+{ -+ loff_t pos; -+ ssize_t sz; -+ -+ pos = h_ino; -+ if (unlikely(au_loff_max / sizeof(ino) - 1 < pos)) { -+ AuIOErr1("too large hi%lu\n", (unsigned long)h_ino); -+ return -EFBIG; -+ } -+ pos *= sizeof(ino); -+ sz = xino_fwrite(write, file, &ino, sizeof(ino), &pos); -+ if (sz == sizeof(ino)) -+ return 0; /* success */ -+ -+ AuIOErr("write failed (%zd)\n", sz); -+ return -EIO; -+} -+ -+/* -+ * write @ino to the xinofile for the specified branch{@sb, @bindex} -+ * at the position of @h_ino. -+ * even if @ino is zero, it is written to the xinofile and means no entry. -+ * if the size of the xino file on a specific filesystem exceeds the watermark, -+ * try truncating it. -+ */ -+int au_xino_write(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino, -+ ino_t ino) -+{ -+ int err; -+ unsigned int mnt_flags; -+ struct au_branch *br; -+ -+ BUILD_BUG_ON(sizeof(long long) != sizeof(au_loff_max) -+ || ((loff_t)-1) > 0); -+ SiMustAnyLock(sb); -+ -+ mnt_flags = au_mntflags(sb); -+ if (!au_opt_test(mnt_flags, XINO)) -+ return 0; -+ -+ br = au_sbr(sb, bindex); -+ err = au_xino_do_write(au_sbi(sb)->si_xwrite, br->br_xino.xi_file, -+ h_ino, ino); -+ if (!err) { -+ if (au_opt_test(mnt_flags, TRUNC_XINO) -+ && au_test_fs_trunc_xino(au_br_sb(br))) -+ xino_try_trunc(sb, br); -+ return 0; /* success */ -+ } -+ -+ AuIOErr("write failed (%d)\n", err); -+ return -EIO; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* aufs inode number bitmap */ -+ -+static const int page_bits = (int)PAGE_SIZE * BITS_PER_BYTE; -+static ino_t xib_calc_ino(unsigned long pindex, int bit) -+{ -+ ino_t ino; -+ -+ AuDebugOn(bit < 0 || page_bits <= bit); -+ ino = AUFS_FIRST_INO + pindex * page_bits + bit; -+ return ino; -+} -+ -+static void xib_calc_bit(ino_t ino, unsigned long *pindex, int *bit) -+{ -+ AuDebugOn(ino < AUFS_FIRST_INO); -+ ino -= AUFS_FIRST_INO; -+ *pindex = ino / page_bits; -+ *bit = ino % page_bits; -+} -+ -+static int xib_pindex(struct super_block *sb, unsigned long pindex) -+{ -+ int err; -+ loff_t pos; -+ ssize_t sz; -+ struct au_sbinfo *sbinfo; -+ struct file *xib; -+ unsigned long *p; -+ -+ sbinfo = au_sbi(sb); -+ MtxMustLock(&sbinfo->si_xib_mtx); -+ AuDebugOn(pindex > ULONG_MAX / PAGE_SIZE -+ || !au_opt_test(sbinfo->si_mntflags, XINO)); -+ -+ if (pindex == sbinfo->si_xib_last_pindex) -+ return 0; -+ -+ xib = sbinfo->si_xib; -+ p = sbinfo->si_xib_buf; -+ pos = sbinfo->si_xib_last_pindex; -+ pos *= PAGE_SIZE; -+ sz = xino_fwrite(sbinfo->si_xwrite, xib, p, PAGE_SIZE, &pos); -+ if (unlikely(sz != PAGE_SIZE)) -+ goto out; -+ -+ pos = pindex; -+ pos *= PAGE_SIZE; -+ if (vfsub_f_size_read(xib) >= pos + PAGE_SIZE) -+ sz = xino_fread(sbinfo->si_xread, xib, p, PAGE_SIZE, &pos); -+ else { -+ memset(p, 0, PAGE_SIZE); -+ sz = xino_fwrite(sbinfo->si_xwrite, xib, p, PAGE_SIZE, &pos); -+ } -+ if (sz == PAGE_SIZE) { -+ sbinfo->si_xib_last_pindex = pindex; -+ return 0; /* success */ -+ } -+ -+out: -+ AuIOErr1("write failed (%zd)\n", sz); -+ err = sz; -+ if (sz >= 0) -+ err = -EIO; -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static void au_xib_clear_bit(struct inode *inode) -+{ -+ int err, bit; -+ unsigned long pindex; -+ struct super_block *sb; -+ struct au_sbinfo *sbinfo; -+ -+ AuDebugOn(inode->i_nlink); -+ -+ sb = inode->i_sb; -+ xib_calc_bit(inode->i_ino, &pindex, &bit); -+ AuDebugOn(page_bits <= bit); -+ sbinfo = au_sbi(sb); -+ mutex_lock(&sbinfo->si_xib_mtx); -+ err = xib_pindex(sb, pindex); -+ if (!err) { -+ clear_bit(bit, sbinfo->si_xib_buf); -+ sbinfo->si_xib_next_bit = bit; -+ } -+ mutex_unlock(&sbinfo->si_xib_mtx); -+} -+ -+/* for s_op->delete_inode() */ -+void au_xino_delete_inode(struct inode *inode, const int unlinked) -+{ -+ int err; -+ unsigned int mnt_flags; -+ aufs_bindex_t bindex, bend, bi; -+ unsigned char try_trunc; -+ struct au_iinfo *iinfo; -+ struct super_block *sb; -+ struct au_hinode *hi; -+ struct inode *h_inode; -+ struct au_branch *br; -+ au_writef_t xwrite; -+ -+ sb = inode->i_sb; -+ mnt_flags = au_mntflags(sb); -+ if (!au_opt_test(mnt_flags, XINO) -+ || inode->i_ino == AUFS_ROOT_INO) -+ return; -+ -+ if (unlinked) { -+ au_xigen_inc(inode); -+ au_xib_clear_bit(inode); -+ } -+ -+ iinfo = au_ii(inode); -+ if (!iinfo) -+ return; -+ -+ bindex = iinfo->ii_bstart; -+ if (bindex < 0) -+ return; -+ -+ xwrite = au_sbi(sb)->si_xwrite; -+ try_trunc = !!au_opt_test(mnt_flags, TRUNC_XINO); -+ hi = iinfo->ii_hinode + bindex; -+ bend = iinfo->ii_bend; -+ for (; bindex <= bend; bindex++, hi++) { -+ h_inode = hi->hi_inode; -+ if (!h_inode -+ || (!unlinked && h_inode->i_nlink)) -+ continue; -+ -+ /* inode may not be revalidated */ -+ bi = au_br_index(sb, hi->hi_id); -+ if (bi < 0) -+ continue; -+ -+ br = au_sbr(sb, bi); -+ err = au_xino_do_write(xwrite, br->br_xino.xi_file, -+ h_inode->i_ino, /*ino*/0); -+ if (!err && try_trunc -+ && au_test_fs_trunc_xino(au_br_sb(br))) -+ xino_try_trunc(sb, br); -+ } -+} -+ -+/* get an unused inode number from bitmap */ -+ino_t au_xino_new_ino(struct super_block *sb) -+{ -+ ino_t ino; -+ unsigned long *p, pindex, ul, pend; -+ struct au_sbinfo *sbinfo; -+ struct file *file; -+ int free_bit, err; -+ -+ if (!au_opt_test(au_mntflags(sb), XINO)) -+ return iunique(sb, AUFS_FIRST_INO); -+ -+ sbinfo = au_sbi(sb); -+ mutex_lock(&sbinfo->si_xib_mtx); -+ p = sbinfo->si_xib_buf; -+ free_bit = sbinfo->si_xib_next_bit; -+ if (free_bit < page_bits && !test_bit(free_bit, p)) -+ goto out; /* success */ -+ free_bit = find_first_zero_bit(p, page_bits); -+ if (free_bit < page_bits) -+ goto out; /* success */ -+ -+ pindex = sbinfo->si_xib_last_pindex; -+ for (ul = pindex - 1; ul < ULONG_MAX; ul--) { -+ err = xib_pindex(sb, ul); -+ if (unlikely(err)) -+ goto out_err; -+ free_bit = find_first_zero_bit(p, page_bits); -+ if (free_bit < page_bits) -+ goto out; /* success */ -+ } -+ -+ file = sbinfo->si_xib; -+ pend = vfsub_f_size_read(file) / PAGE_SIZE; -+ for (ul = pindex + 1; ul <= pend; ul++) { -+ err = xib_pindex(sb, ul); -+ if (unlikely(err)) -+ goto out_err; -+ free_bit = find_first_zero_bit(p, page_bits); -+ if (free_bit < page_bits) -+ goto out; /* success */ -+ } -+ BUG(); -+ -+out: -+ set_bit(free_bit, p); -+ sbinfo->si_xib_next_bit = free_bit + 1; -+ pindex = sbinfo->si_xib_last_pindex; -+ mutex_unlock(&sbinfo->si_xib_mtx); -+ ino = xib_calc_ino(pindex, free_bit); -+ AuDbg("i%lu\n", (unsigned long)ino); -+ return ino; -+out_err: -+ mutex_unlock(&sbinfo->si_xib_mtx); -+ AuDbg("i0\n"); -+ return 0; -+} -+ -+/* -+ * read @ino from xinofile for the specified branch{@sb, @bindex} -+ * at the position of @h_ino. -+ * if @ino does not exist and @do_new is true, get new one. -+ */ -+int au_xino_read(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino, -+ ino_t *ino) -+{ -+ int err; -+ ssize_t sz; -+ loff_t pos; -+ struct file *file; -+ struct au_sbinfo *sbinfo; -+ -+ *ino = 0; -+ if (!au_opt_test(au_mntflags(sb), XINO)) -+ return 0; /* no xino */ -+ -+ err = 0; -+ sbinfo = au_sbi(sb); -+ pos = h_ino; -+ if (unlikely(au_loff_max / sizeof(*ino) - 1 < pos)) { -+ AuIOErr1("too large hi%lu\n", (unsigned long)h_ino); -+ return -EFBIG; -+ } -+ pos *= sizeof(*ino); -+ -+ file = au_sbr(sb, bindex)->br_xino.xi_file; -+ if (vfsub_f_size_read(file) < pos + sizeof(*ino)) -+ return 0; /* no ino */ -+ -+ sz = xino_fread(sbinfo->si_xread, file, ino, sizeof(*ino), &pos); -+ if (sz == sizeof(*ino)) -+ return 0; /* success */ -+ -+ err = sz; -+ if (unlikely(sz >= 0)) { -+ err = -EIO; -+ AuIOErr("xino read error (%zd)\n", sz); -+ } -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* create and set a new xino file */ -+ -+struct file *au_xino_create(struct super_block *sb, char *fname, int silent) -+{ -+ struct file *file; -+ struct dentry *h_parent, *d; -+ struct inode *h_dir, *inode; -+ int err; -+ -+ /* -+ * at mount-time, and the xino file is the default path, -+ * hnotify is disabled so we have no notify events to ignore. -+ * when a user specified the xino, we cannot get au_hdir to be ignored. -+ */ -+ file = vfsub_filp_open(fname, O_RDWR | O_CREAT | O_EXCL | O_LARGEFILE -+ /* | __FMODE_NONOTIFY */, -+ S_IRUGO | S_IWUGO); -+ if (IS_ERR(file)) { -+ if (!silent) -+ pr_err("open %s(%ld)\n", fname, PTR_ERR(file)); -+ return file; -+ } -+ -+ /* keep file count */ -+ err = 0; -+ inode = file_inode(file); -+ h_parent = dget_parent(file->f_dentry); -+ h_dir = h_parent->d_inode; -+ mutex_lock_nested(&h_dir->i_mutex, AuLsc_I_PARENT); -+ /* mnt_want_write() is unnecessary here */ -+ /* no delegation since it is just created */ -+ if (inode->i_nlink) -+ err = vfsub_unlink(h_dir, &file->f_path, /*delegated*/NULL, -+ /*force*/0); -+ mutex_unlock(&h_dir->i_mutex); -+ dput(h_parent); -+ if (unlikely(err)) { -+ if (!silent) -+ pr_err("unlink %s(%d)\n", fname, err); -+ goto out; -+ } -+ -+ err = -EINVAL; -+ d = file->f_dentry; -+ if (unlikely(sb == d->d_sb)) { -+ if (!silent) -+ pr_err("%s must be outside\n", fname); -+ goto out; -+ } -+ if (unlikely(au_test_fs_bad_xino(d->d_sb))) { -+ if (!silent) -+ pr_err("xino doesn't support %s(%s)\n", -+ fname, au_sbtype(d->d_sb)); -+ goto out; -+ } -+ return file; /* success */ -+ -+out: -+ fput(file); -+ file = ERR_PTR(err); -+ return file; -+} -+ -+/* -+ * find another branch who is on the same filesystem of the specified -+ * branch{@btgt}. search until @bend. -+ */ -+static int is_sb_shared(struct super_block *sb, aufs_bindex_t btgt, -+ aufs_bindex_t bend) -+{ -+ aufs_bindex_t bindex; -+ struct super_block *tgt_sb = au_sbr_sb(sb, btgt); -+ -+ for (bindex = 0; bindex < btgt; bindex++) -+ if (unlikely(tgt_sb == au_sbr_sb(sb, bindex))) -+ return bindex; -+ for (bindex++; bindex <= bend; bindex++) -+ if (unlikely(tgt_sb == au_sbr_sb(sb, bindex))) -+ return bindex; -+ return -1; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * initialize the xinofile for the specified branch @br -+ * at the place/path where @base_file indicates. -+ * test whether another branch is on the same filesystem or not, -+ * if @do_test is true. -+ */ -+int au_xino_br(struct super_block *sb, struct au_branch *br, ino_t h_ino, -+ struct file *base_file, int do_test) -+{ -+ int err; -+ ino_t ino; -+ aufs_bindex_t bend, bindex; -+ struct au_branch *shared_br, *b; -+ struct file *file; -+ struct super_block *tgt_sb; -+ -+ shared_br = NULL; -+ bend = au_sbend(sb); -+ if (do_test) { -+ tgt_sb = au_br_sb(br); -+ for (bindex = 0; bindex <= bend; bindex++) { -+ b = au_sbr(sb, bindex); -+ if (tgt_sb == au_br_sb(b)) { -+ shared_br = b; -+ break; -+ } -+ } -+ } -+ -+ if (!shared_br || !shared_br->br_xino.xi_file) { -+ struct au_xino_lock_dir ldir; -+ -+ au_xino_lock_dir(sb, base_file, &ldir); -+ /* mnt_want_write() is unnecessary here */ -+ file = au_xino_create2(base_file, NULL); -+ au_xino_unlock_dir(&ldir); -+ err = PTR_ERR(file); -+ if (IS_ERR(file)) -+ goto out; -+ br->br_xino.xi_file = file; -+ } else { -+ br->br_xino.xi_file = shared_br->br_xino.xi_file; -+ get_file(br->br_xino.xi_file); -+ } -+ -+ ino = AUFS_ROOT_INO; -+ err = au_xino_do_write(au_sbi(sb)->si_xwrite, br->br_xino.xi_file, -+ h_ino, ino); -+ if (unlikely(err)) { -+ fput(br->br_xino.xi_file); -+ br->br_xino.xi_file = NULL; -+ } -+ -+out: -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* trucate a xino bitmap file */ -+ -+/* todo: slow */ -+static int do_xib_restore(struct super_block *sb, struct file *file, void *page) -+{ -+ int err, bit; -+ ssize_t sz; -+ unsigned long pindex; -+ loff_t pos, pend; -+ struct au_sbinfo *sbinfo; -+ au_readf_t func; -+ ino_t *ino; -+ unsigned long *p; -+ -+ err = 0; -+ sbinfo = au_sbi(sb); -+ MtxMustLock(&sbinfo->si_xib_mtx); -+ p = sbinfo->si_xib_buf; -+ func = sbinfo->si_xread; -+ pend = vfsub_f_size_read(file); -+ pos = 0; -+ while (pos < pend) { -+ sz = xino_fread(func, file, page, PAGE_SIZE, &pos); -+ err = sz; -+ if (unlikely(sz <= 0)) -+ goto out; -+ -+ err = 0; -+ for (ino = page; sz > 0; ino++, sz -= sizeof(ino)) { -+ if (unlikely(*ino < AUFS_FIRST_INO)) -+ continue; -+ -+ xib_calc_bit(*ino, &pindex, &bit); -+ AuDebugOn(page_bits <= bit); -+ err = xib_pindex(sb, pindex); -+ if (!err) -+ set_bit(bit, p); -+ else -+ goto out; -+ } -+ } -+ -+out: -+ return err; -+} -+ -+static int xib_restore(struct super_block *sb) -+{ -+ int err; -+ aufs_bindex_t bindex, bend; -+ void *page; -+ -+ err = -ENOMEM; -+ page = (void *)__get_free_page(GFP_NOFS); -+ if (unlikely(!page)) -+ goto out; -+ -+ err = 0; -+ bend = au_sbend(sb); -+ for (bindex = 0; !err && bindex <= bend; bindex++) -+ if (!bindex || is_sb_shared(sb, bindex, bindex - 1) < 0) -+ err = do_xib_restore -+ (sb, au_sbr(sb, bindex)->br_xino.xi_file, page); -+ else -+ AuDbg("b%d\n", bindex); -+ free_page((unsigned long)page); -+ -+out: -+ return err; -+} -+ -+int au_xib_trunc(struct super_block *sb) -+{ -+ int err; -+ ssize_t sz; -+ loff_t pos; -+ struct au_xino_lock_dir ldir; -+ struct au_sbinfo *sbinfo; -+ unsigned long *p; -+ struct file *file; -+ -+ SiMustWriteLock(sb); -+ -+ err = 0; -+ sbinfo = au_sbi(sb); -+ if (!au_opt_test(sbinfo->si_mntflags, XINO)) -+ goto out; -+ -+ file = sbinfo->si_xib; -+ if (vfsub_f_size_read(file) <= PAGE_SIZE) -+ goto out; -+ -+ au_xino_lock_dir(sb, file, &ldir); -+ /* mnt_want_write() is unnecessary here */ -+ file = au_xino_create2(sbinfo->si_xib, NULL); -+ au_xino_unlock_dir(&ldir); -+ err = PTR_ERR(file); -+ if (IS_ERR(file)) -+ goto out; -+ fput(sbinfo->si_xib); -+ sbinfo->si_xib = file; -+ -+ p = sbinfo->si_xib_buf; -+ memset(p, 0, PAGE_SIZE); -+ pos = 0; -+ sz = xino_fwrite(sbinfo->si_xwrite, sbinfo->si_xib, p, PAGE_SIZE, &pos); -+ if (unlikely(sz != PAGE_SIZE)) { -+ err = sz; -+ AuIOErr("err %d\n", err); -+ if (sz >= 0) -+ err = -EIO; -+ goto out; -+ } -+ -+ mutex_lock(&sbinfo->si_xib_mtx); -+ /* mnt_want_write() is unnecessary here */ -+ err = xib_restore(sb); -+ mutex_unlock(&sbinfo->si_xib_mtx); -+ -+out: -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * xino mount option handlers -+ */ -+static au_readf_t find_readf(struct file *h_file) -+{ -+ const struct file_operations *fop = h_file->f_op; -+ -+ if (fop->read) -+ return fop->read; -+ if (fop->aio_read) -+ return do_sync_read; -+ if (fop->read_iter) -+ return new_sync_read; -+ return ERR_PTR(-ENOSYS); -+} -+ -+static au_writef_t find_writef(struct file *h_file) -+{ -+ const struct file_operations *fop = h_file->f_op; -+ -+ if (fop->write) -+ return fop->write; -+ if (fop->aio_write) -+ return do_sync_write; -+ if (fop->write_iter) -+ return new_sync_write; -+ return ERR_PTR(-ENOSYS); -+} -+ -+/* xino bitmap */ -+static void xino_clear_xib(struct super_block *sb) -+{ -+ struct au_sbinfo *sbinfo; -+ -+ SiMustWriteLock(sb); -+ -+ sbinfo = au_sbi(sb); -+ sbinfo->si_xread = NULL; -+ sbinfo->si_xwrite = NULL; -+ if (sbinfo->si_xib) -+ fput(sbinfo->si_xib); -+ sbinfo->si_xib = NULL; -+ free_page((unsigned long)sbinfo->si_xib_buf); -+ sbinfo->si_xib_buf = NULL; -+} -+ -+static int au_xino_set_xib(struct super_block *sb, struct file *base) -+{ -+ int err; -+ loff_t pos; -+ struct au_sbinfo *sbinfo; -+ struct file *file; -+ -+ SiMustWriteLock(sb); -+ -+ sbinfo = au_sbi(sb); -+ file = au_xino_create2(base, sbinfo->si_xib); -+ err = PTR_ERR(file); -+ if (IS_ERR(file)) -+ goto out; -+ if (sbinfo->si_xib) -+ fput(sbinfo->si_xib); -+ sbinfo->si_xib = file; -+ sbinfo->si_xread = find_readf(file); -+ sbinfo->si_xwrite = find_writef(file); -+ -+ err = -ENOMEM; -+ if (!sbinfo->si_xib_buf) -+ sbinfo->si_xib_buf = (void *)get_zeroed_page(GFP_NOFS); -+ if (unlikely(!sbinfo->si_xib_buf)) -+ goto out_unset; -+ -+ sbinfo->si_xib_last_pindex = 0; -+ sbinfo->si_xib_next_bit = 0; -+ if (vfsub_f_size_read(file) < PAGE_SIZE) { -+ pos = 0; -+ err = xino_fwrite(sbinfo->si_xwrite, file, sbinfo->si_xib_buf, -+ PAGE_SIZE, &pos); -+ if (unlikely(err != PAGE_SIZE)) -+ goto out_free; -+ } -+ err = 0; -+ goto out; /* success */ -+ -+out_free: -+ free_page((unsigned long)sbinfo->si_xib_buf); -+ sbinfo->si_xib_buf = NULL; -+ if (err >= 0) -+ err = -EIO; -+out_unset: -+ fput(sbinfo->si_xib); -+ sbinfo->si_xib = NULL; -+ sbinfo->si_xread = NULL; -+ sbinfo->si_xwrite = NULL; -+out: -+ return err; -+} -+ -+/* xino for each branch */ -+static void xino_clear_br(struct super_block *sb) -+{ -+ aufs_bindex_t bindex, bend; -+ struct au_branch *br; -+ -+ bend = au_sbend(sb); -+ for (bindex = 0; bindex <= bend; bindex++) { -+ br = au_sbr(sb, bindex); -+ if (!br || !br->br_xino.xi_file) -+ continue; -+ -+ fput(br->br_xino.xi_file); -+ br->br_xino.xi_file = NULL; -+ } -+} -+ -+static int au_xino_set_br(struct super_block *sb, struct file *base) -+{ -+ int err; -+ ino_t ino; -+ aufs_bindex_t bindex, bend, bshared; -+ struct { -+ struct file *old, *new; -+ } *fpair, *p; -+ struct au_branch *br; -+ struct inode *inode; -+ au_writef_t writef; -+ -+ SiMustWriteLock(sb); -+ -+ err = -ENOMEM; -+ bend = au_sbend(sb); -+ fpair = kcalloc(bend + 1, sizeof(*fpair), GFP_NOFS); -+ if (unlikely(!fpair)) -+ goto out; -+ -+ inode = sb->s_root->d_inode; -+ ino = AUFS_ROOT_INO; -+ writef = au_sbi(sb)->si_xwrite; -+ for (bindex = 0, p = fpair; bindex <= bend; bindex++, p++) { -+ br = au_sbr(sb, bindex); -+ bshared = is_sb_shared(sb, bindex, bindex - 1); -+ if (bshared >= 0) { -+ /* shared xino */ -+ *p = fpair[bshared]; -+ get_file(p->new); -+ } -+ -+ if (!p->new) { -+ /* new xino */ -+ p->old = br->br_xino.xi_file; -+ p->new = au_xino_create2(base, br->br_xino.xi_file); -+ err = PTR_ERR(p->new); -+ if (IS_ERR(p->new)) { -+ p->new = NULL; -+ goto out_pair; -+ } -+ } -+ -+ err = au_xino_do_write(writef, p->new, -+ au_h_iptr(inode, bindex)->i_ino, ino); -+ if (unlikely(err)) -+ goto out_pair; -+ } -+ -+ for (bindex = 0, p = fpair; bindex <= bend; bindex++, p++) { -+ br = au_sbr(sb, bindex); -+ if (br->br_xino.xi_file) -+ fput(br->br_xino.xi_file); -+ get_file(p->new); -+ br->br_xino.xi_file = p->new; -+ } -+ -+out_pair: -+ for (bindex = 0, p = fpair; bindex <= bend; bindex++, p++) -+ if (p->new) -+ fput(p->new); -+ else -+ break; -+ kfree(fpair); -+out: -+ return err; -+} -+ -+void au_xino_clr(struct super_block *sb) -+{ -+ struct au_sbinfo *sbinfo; -+ -+ au_xigen_clr(sb); -+ xino_clear_xib(sb); -+ xino_clear_br(sb); -+ sbinfo = au_sbi(sb); -+ /* lvalue, do not call au_mntflags() */ -+ au_opt_clr(sbinfo->si_mntflags, XINO); -+} -+ -+int au_xino_set(struct super_block *sb, struct au_opt_xino *xino, int remount) -+{ -+ int err, skip; -+ struct dentry *parent, *cur_parent; -+ struct qstr *dname, *cur_name; -+ struct file *cur_xino; -+ struct inode *dir; -+ struct au_sbinfo *sbinfo; -+ -+ SiMustWriteLock(sb); -+ -+ err = 0; -+ sbinfo = au_sbi(sb); -+ parent = dget_parent(xino->file->f_dentry); -+ if (remount) { -+ skip = 0; -+ dname = &xino->file->f_dentry->d_name; -+ cur_xino = sbinfo->si_xib; -+ if (cur_xino) { -+ cur_parent = dget_parent(cur_xino->f_dentry); -+ cur_name = &cur_xino->f_dentry->d_name; -+ skip = (cur_parent == parent -+ && au_qstreq(dname, cur_name)); -+ dput(cur_parent); -+ } -+ if (skip) -+ goto out; -+ } -+ -+ au_opt_set(sbinfo->si_mntflags, XINO); -+ dir = parent->d_inode; -+ mutex_lock_nested(&dir->i_mutex, AuLsc_I_PARENT); -+ /* mnt_want_write() is unnecessary here */ -+ err = au_xino_set_xib(sb, xino->file); -+ if (!err) -+ err = au_xigen_set(sb, xino->file); -+ if (!err) -+ err = au_xino_set_br(sb, xino->file); -+ mutex_unlock(&dir->i_mutex); -+ if (!err) -+ goto out; /* success */ -+ -+ /* reset all */ -+ AuIOErr("failed creating xino(%d).\n", err); -+ au_xigen_clr(sb); -+ xino_clear_xib(sb); -+ -+out: -+ dput(parent); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * create a xinofile at the default place/path. -+ */ -+struct file *au_xino_def(struct super_block *sb) -+{ -+ struct file *file; -+ char *page, *p; -+ struct au_branch *br; -+ struct super_block *h_sb; -+ struct path path; -+ aufs_bindex_t bend, bindex, bwr; -+ -+ br = NULL; -+ bend = au_sbend(sb); -+ bwr = -1; -+ for (bindex = 0; bindex <= bend; bindex++) { -+ br = au_sbr(sb, bindex); -+ if (au_br_writable(br->br_perm) -+ && !au_test_fs_bad_xino(au_br_sb(br))) { -+ bwr = bindex; -+ break; -+ } -+ } -+ -+ if (bwr >= 0) { -+ file = ERR_PTR(-ENOMEM); -+ page = (void *)__get_free_page(GFP_NOFS); -+ if (unlikely(!page)) -+ goto out; -+ path.mnt = au_br_mnt(br); -+ path.dentry = au_h_dptr(sb->s_root, bwr); -+ p = d_path(&path, page, PATH_MAX - sizeof(AUFS_XINO_FNAME)); -+ file = (void *)p; -+ if (!IS_ERR(p)) { -+ strcat(p, "/" AUFS_XINO_FNAME); -+ AuDbg("%s\n", p); -+ file = au_xino_create(sb, p, /*silent*/0); -+ if (!IS_ERR(file)) -+ au_xino_brid_set(sb, br->br_id); -+ } -+ free_page((unsigned long)page); -+ } else { -+ file = au_xino_create(sb, AUFS_XINO_DEFPATH, /*silent*/0); -+ if (IS_ERR(file)) -+ goto out; -+ h_sb = file->f_dentry->d_sb; -+ if (unlikely(au_test_fs_bad_xino(h_sb))) { -+ pr_err("xino doesn't support %s(%s)\n", -+ AUFS_XINO_DEFPATH, au_sbtype(h_sb)); -+ fput(file); -+ file = ERR_PTR(-EINVAL); -+ } -+ if (!IS_ERR(file)) -+ au_xino_brid_set(sb, -1); -+ } -+ -+out: -+ return file; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+int au_xino_path(struct seq_file *seq, struct file *file) -+{ -+ int err; -+ -+ err = au_seq_path(seq, &file->f_path); -+ if (unlikely(err)) -+ goto out; -+ -+#define Deleted "\\040(deleted)" -+ seq->count -= sizeof(Deleted) - 1; -+ AuDebugOn(memcmp(seq->buf + seq->count, Deleted, -+ sizeof(Deleted) - 1)); -+#undef Deleted -+ -+out: -+ return err; -+} -diff --git a/fs/buffer.c b/fs/buffer.c -index 20805db..363569f 100644 ---- a/fs/buffer.c -+++ b/fs/buffer.c -@@ -2450,7 +2450,7 @@ int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, - * Update file times before taking page lock. We may end up failing the - * fault so this update may be superfluous but who really cares... - */ -- file_update_time(vma->vm_file); -+ vma_file_update_time(vma); - - ret = __block_page_mkwrite(vma, vmf, get_block); - sb_end_pagefault(sb); -diff --git a/fs/dcache.c b/fs/dcache.c -index d25f8fd..857990a 100644 ---- a/fs/dcache.c -+++ b/fs/dcache.c -@@ -1022,7 +1022,7 @@ enum d_walk_ret { - * - * The @enter() and @finish() callbacks are called with d_lock held. - */ --static void d_walk(struct dentry *parent, void *data, -+void d_walk(struct dentry *parent, void *data, - enum d_walk_ret (*enter)(void *, struct dentry *), - void (*finish)(void *)) - { -diff --git a/fs/fcntl.c b/fs/fcntl.c -index 99d440a..de1a407 100644 ---- a/fs/fcntl.c -+++ b/fs/fcntl.c -@@ -29,7 +29,7 @@ - - #define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT | O_NOATIME) - --static int setfl(int fd, struct file * filp, unsigned long arg) -+int setfl(int fd, struct file * filp, unsigned long arg) - { - struct inode * inode = file_inode(filp); - int error = 0; -@@ -59,6 +59,8 @@ static int setfl(int fd, struct file * filp, unsigned long arg) - - if (filp->f_op->check_flags) - error = filp->f_op->check_flags(arg); -+ if (!error && filp->f_op->setfl) -+ error = filp->f_op->setfl(filp, arg); - if (error) - return error; - -diff --git a/fs/inode.c b/fs/inode.c -index 56d1d2b..2998e86 100644 ---- a/fs/inode.c -+++ b/fs/inode.c -@@ -1497,7 +1497,7 @@ static int relatime_need_update(struct vfsmount *mnt, struct inode *inode, - * This does the actual work of updating an inodes time or version. Must have - * had called mnt_want_write() before calling this. - */ --static int update_time(struct inode *inode, struct timespec *time, int flags) -+int update_time(struct inode *inode, struct timespec *time, int flags) - { - if (inode->i_op->update_time) - return inode->i_op->update_time(inode, time, flags); -diff --git a/fs/proc/base.c b/fs/proc/base.c -index 7dc3ea8..b368ad5 100644 ---- a/fs/proc/base.c -+++ b/fs/proc/base.c -@@ -1735,7 +1735,7 @@ static int proc_map_files_get_link(struct dentry *dentry, struct path *path) - down_read(&mm->mmap_sem); - vma = find_exact_vma(mm, vm_start, vm_end); - if (vma && vma->vm_file) { -- *path = vma->vm_file->f_path; -+ *path = vma_pr_or_file(vma)->f_path; - path_get(path); - rc = 0; - } -diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c -index d4a3574..1397181 100644 ---- a/fs/proc/nommu.c -+++ b/fs/proc/nommu.c -@@ -45,7 +45,10 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region) - file = region->vm_file; - - if (file) { -- struct inode *inode = file_inode(region->vm_file); -+ struct inode *inode; -+ -+ file = vmr_pr_or_file(region); -+ inode = file_inode(file); - dev = inode->i_sb->s_dev; - ino = inode->i_ino; - } -diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c -index 69aa378..426b962 100644 ---- a/fs/proc/task_mmu.c -+++ b/fs/proc/task_mmu.c -@@ -276,7 +276,10 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) - const char *name = NULL; - - if (file) { -- struct inode *inode = file_inode(vma->vm_file); -+ struct inode *inode; -+ -+ file = vma_pr_or_file(vma); -+ inode = file_inode(file); - dev = inode->i_sb->s_dev; - ino = inode->i_ino; - pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT; -@@ -1447,7 +1450,7 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid) - struct proc_maps_private *proc_priv = &numa_priv->proc_maps; - struct vm_area_struct *vma = v; - struct numa_maps *md = &numa_priv->md; -- struct file *file = vma->vm_file; -+ struct file *file = vma_pr_or_file(vma); - struct mm_struct *mm = vma->vm_mm; - struct mm_walk walk = {}; - struct mempolicy *pol; -diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c -index 599ec2e..1740207 100644 ---- a/fs/proc/task_nommu.c -+++ b/fs/proc/task_nommu.c -@@ -160,7 +160,10 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma, - file = vma->vm_file; - - if (file) { -- struct inode *inode = file_inode(vma->vm_file); -+ struct inode *inode; -+ -+ file = vma_pr_or_file(vma); -+ inode = file_inode(file); - dev = inode->i_sb->s_dev; - ino = inode->i_ino; - pgoff = (loff_t)vma->vm_pgoff << PAGE_SHIFT; -diff --git a/fs/splice.c b/fs/splice.c -index 75c6058..619359a 100644 ---- a/fs/splice.c -+++ b/fs/splice.c -@@ -1114,8 +1114,8 @@ EXPORT_SYMBOL(generic_splice_sendpage); - /* - * Attempt to initiate a splice from pipe to file. - */ --static long do_splice_from(struct pipe_inode_info *pipe, struct file *out, -- loff_t *ppos, size_t len, unsigned int flags) -+long do_splice_from(struct pipe_inode_info *pipe, struct file *out, -+ loff_t *ppos, size_t len, unsigned int flags) - { - ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, - loff_t *, size_t, unsigned int); -@@ -1131,9 +1131,9 @@ static long do_splice_from(struct pipe_inode_info *pipe, struct file *out, - /* - * Attempt to initiate a splice from a file to a pipe. - */ --static long do_splice_to(struct file *in, loff_t *ppos, -- struct pipe_inode_info *pipe, size_t len, -- unsigned int flags) -+long do_splice_to(struct file *in, loff_t *ppos, -+ struct pipe_inode_info *pipe, size_t len, -+ unsigned int flags) - { - ssize_t (*splice_read)(struct file *, loff_t *, - struct pipe_inode_info *, size_t, unsigned int); -diff --git a/include/asm-generic/msi.h b/include/asm-generic/msi.h -new file mode 100644 -index 0000000..61c58d8 ---- /dev/null -+++ b/include/asm-generic/msi.h -@@ -0,0 +1,32 @@ -+#ifndef __ASM_GENERIC_MSI_H -+#define __ASM_GENERIC_MSI_H -+ -+#include -+ -+#ifndef NUM_MSI_ALLOC_SCRATCHPAD_REGS -+# define NUM_MSI_ALLOC_SCRATCHPAD_REGS 2 -+#endif -+ -+struct msi_desc; -+ -+/** -+ * struct msi_alloc_info - Default structure for MSI interrupt allocation. -+ * @desc: Pointer to msi descriptor -+ * @hwirq: Associated hw interrupt number in the domain -+ * @scratchpad: Storage for implementation specific scratch data -+ * -+ * Architectures can provide their own implementation by not including -+ * asm-generic/msi.h into their arch specific header file. -+ */ -+typedef struct msi_alloc_info { -+ struct msi_desc *desc; -+ irq_hw_number_t hwirq; -+ union { -+ unsigned long ul; -+ void *ptr; -+ } scratchpad[NUM_MSI_ALLOC_SCRATCHPAD_REGS]; -+} msi_alloc_info_t; -+ -+#define GENERIC_MSI_DOMAIN_OPS 1 -+ -+#endif -diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h -index aa70cbd..bee5d68 100644 ---- a/include/asm-generic/vmlinux.lds.h -+++ b/include/asm-generic/vmlinux.lds.h -@@ -164,6 +164,7 @@ - #define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc) - #define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip) - #define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk) -+#define IOMMU_OF_TABLES() OF_TABLE(CONFIG_OF_IOMMU, iommu) - #define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem) - #define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method) - #define EARLYCON_OF_TABLES() OF_TABLE(CONFIG_SERIAL_EARLYCON, earlycon) -@@ -497,6 +498,7 @@ - CLK_OF_TABLES() \ - RESERVEDMEM_OF_TABLES() \ - CLKSRC_OF_TABLES() \ -+ IOMMU_OF_TABLES() \ - CPU_METHOD_OF_TABLES() \ - KERNEL_DTB() \ - IRQCHIP_OF_MATCH_TABLE() \ -diff --git a/include/linux/acpi.h b/include/linux/acpi.h -index 1c7eaa7..d017dbf 100644 ---- a/include/linux/acpi.h -+++ b/include/linux/acpi.h -@@ -27,6 +27,7 @@ - - #include - #include /* for struct resource */ -+#include - #include - #include - -@@ -290,11 +291,6 @@ unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable); - bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index, - struct resource *res); - --struct resource_list_entry { -- struct list_head node; -- struct resource res; --}; -- - void acpi_dev_free_resource_list(struct list_head *list); - int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list, - int (*preproc)(struct acpi_resource *, void *), -diff --git a/include/linux/device.h b/include/linux/device.h -index ce1f216..941d97b 100644 ---- a/include/linux/device.h -+++ b/include/linux/device.h -@@ -690,6 +690,8 @@ struct acpi_dev_node { - * along with subsystem-level and driver-level callbacks. - * @pins: For device pin management. - * See Documentation/pinctrl.txt for details. -+ * @msi_list: Hosts MSI descriptors -+ * @msi_domain: The generic MSI domain this device is using. - * @numa_node: NUMA node this device is close to. - * @dma_mask: Dma mask (if dma'ble device). - * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all -@@ -750,9 +752,15 @@ struct device { - struct dev_pm_info power; - struct dev_pm_domain *pm_domain; - -+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN -+ struct irq_domain *msi_domain; -+#endif - #ifdef CONFIG_PINCTRL - struct dev_pin_info *pins; - #endif -+#ifdef CONFIG_GENERIC_MSI_IRQ -+ struct list_head msi_list; -+#endif - - #ifdef CONFIG_NUMA - int numa_node; /* NUMA node this device is close to */ -@@ -837,6 +845,22 @@ static inline void set_dev_node(struct device *dev, int node) - } - #endif - -+static inline struct irq_domain *dev_get_msi_domain(const struct device *dev) -+{ -+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN -+ return dev->msi_domain; -+#else -+ return NULL; -+#endif -+} -+ -+static inline void dev_set_msi_domain(struct device *dev, struct irq_domain *d) -+{ -+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN -+ dev->msi_domain = d; -+#endif -+} -+ - static inline void *dev_get_drvdata(const struct device *dev) - { - return dev->driver_data; -diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h -index d5d3881..c3007cb 100644 ---- a/include/linux/dma-mapping.h -+++ b/include/linux/dma-mapping.h -@@ -129,11 +129,14 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask) - - extern u64 dma_get_required_mask(struct device *dev); - --#ifndef set_arch_dma_coherent_ops --static inline int set_arch_dma_coherent_ops(struct device *dev) --{ -- return 0; --} -+#ifndef arch_setup_dma_ops -+static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, -+ u64 size, struct iommu_ops *iommu, -+ bool coherent) { } -+#endif -+ -+#ifndef arch_teardown_dma_ops -+static inline void arch_teardown_dma_ops(struct device *dev) { } - #endif - - static inline unsigned int dma_get_max_seg_size(struct device *dev) -diff --git a/include/linux/file.h b/include/linux/file.h -index 4d69123..62cffc0 100644 ---- a/include/linux/file.h -+++ b/include/linux/file.h -@@ -19,6 +19,7 @@ struct dentry; - struct path; - extern struct file *alloc_file(struct path *, fmode_t mode, - const struct file_operations *fop); -+extern struct file *get_empty_filp(void); - - static inline void fput_light(struct file *file, int fput_needed) - { -diff --git a/include/linux/fs.h b/include/linux/fs.h -index 6fd017e..c44d25d 100644 ---- a/include/linux/fs.h -+++ b/include/linux/fs.h -@@ -1149,6 +1149,7 @@ extern void fasync_free(struct fasync_struct *); - /* can be called from interrupts */ - extern void kill_fasync(struct fasync_struct **, int, int); - -+extern int setfl(int fd, struct file * filp, unsigned long arg); - extern void __f_setown(struct file *filp, struct pid *, enum pid_type, int force); - extern void f_setown(struct file *filp, unsigned long arg, int force); - extern void f_delown(struct file *filp); -@@ -1507,6 +1508,7 @@ struct file_operations { - ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int); - unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); - int (*check_flags)(int); -+ int (*setfl)(struct file *, unsigned long); - int (*flock) (struct file *, int, struct file_lock *); - ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); - ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); -@@ -2662,6 +2664,7 @@ extern int inode_change_ok(const struct inode *, struct iattr *); - extern int inode_newsize_ok(const struct inode *, loff_t offset); - extern void setattr_copy(struct inode *inode, const struct iattr *attr); - -+extern int update_time(struct inode *, struct timespec *, int); - extern int file_update_time(struct file *file); - - extern int generic_show_options(struct seq_file *m, struct dentry *root); -diff --git a/include/linux/fsl/guts.h b/include/linux/fsl/guts.h -new file mode 100644 -index 0000000..f13b12e ---- /dev/null -+++ b/include/linux/fsl/guts.h -@@ -0,0 +1,195 @@ -+/** -+ * Freecale 85xx and 86xx Global Utilties register set -+ * -+ * Authors: Jeff Brown -+ * Timur Tabi -+ * -+ * Copyright 2004,2007,2012 Freescale Semiconductor, Inc -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License as published by the -+ * Free Software Foundation; either version 2 of the License, or (at your -+ * option) any later version. -+ */ -+ -+#ifndef __FSL_GUTS_H__ -+#define __FSL_GUTS_H__ -+ -+#include -+ -+/** -+ * Global Utility Registers. -+ * -+ * Not all registers defined in this structure are available on all chips, so -+ * you are expected to know whether a given register actually exists on your -+ * chip before you access it. -+ * -+ * Also, some registers are similar on different chips but have slightly -+ * different names. In these cases, one name is chosen to avoid extraneous -+ * #ifdefs. -+ */ -+struct ccsr_guts { -+ u32 porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */ -+ u32 porbmsr; /* 0x.0004 - POR Boot Mode Status Register */ -+ u32 porimpscr; /* 0x.0008 - POR I/O Impedance Status and Control Register */ -+ u32 pordevsr; /* 0x.000c - POR I/O Device Status Register */ -+ u32 pordbgmsr; /* 0x.0010 - POR Debug Mode Status Register */ -+ u32 pordevsr2; /* 0x.0014 - POR device status register 2 */ -+ u8 res018[0x20 - 0x18]; -+ u32 porcir; /* 0x.0020 - POR Configuration Information Register */ -+ u8 res024[0x30 - 0x24]; -+ u32 gpiocr; /* 0x.0030 - GPIO Control Register */ -+ u8 res034[0x40 - 0x34]; -+ u32 gpoutdr; /* 0x.0040 - General-Purpose Output Data Register */ -+ u8 res044[0x50 - 0x44]; -+ u32 gpindr; /* 0x.0050 - General-Purpose Input Data Register */ -+ u8 res054[0x60 - 0x54]; -+ u32 pmuxcr; /* 0x.0060 - Alternate Function Signal Multiplex Control */ -+ u32 pmuxcr2; /* 0x.0064 - Alternate function signal multiplex control 2 */ -+ u32 dmuxcr; /* 0x.0068 - DMA Mux Control Register */ -+ u8 res06c[0x70 - 0x6c]; -+ u32 devdisr; /* 0x.0070 - Device Disable Control */ -+#define CCSR_GUTS_DEVDISR_TB1 0x00001000 -+#define CCSR_GUTS_DEVDISR_TB0 0x00004000 -+ u32 devdisr2; /* 0x.0074 - Device Disable Control 2 */ -+ u8 res078[0x7c - 0x78]; -+ u32 pmjcr; /* 0x.007c - 4 Power Management Jog Control Register */ -+ u32 powmgtcsr; /* 0x.0080 - Power Management Status and Control Register */ -+ u32 pmrccr; /* 0x.0084 - Power Management Reset Counter Configuration Register */ -+ u32 pmpdccr; /* 0x.0088 - Power Management Power Down Counter Configuration Register */ -+ u32 pmcdr; /* 0x.008c - 4Power management clock disable register */ -+ u32 mcpsumr; /* 0x.0090 - Machine Check Summary Register */ -+ u32 rstrscr; /* 0x.0094 - Reset Request Status and Control Register */ -+ u32 ectrstcr; /* 0x.0098 - Exception reset control register */ -+ u32 autorstsr; /* 0x.009c - Automatic reset status register */ -+ u32 pvr; /* 0x.00a0 - Processor Version Register */ -+ u32 svr; /* 0x.00a4 - System Version Register */ -+ u8 res0a8[0xb0 - 0xa8]; -+ u32 rstcr; /* 0x.00b0 - Reset Control Register */ -+ u8 res0b4[0xc0 - 0xb4]; -+ u32 iovselsr; /* 0x.00c0 - I/O voltage select status register -+ Called 'elbcvselcr' on 86xx SOCs */ -+ u8 res0c4[0x100 - 0xc4]; -+ u32 rcwsr[16]; /* 0x.0100 - Reset Control Word Status registers -+ There are 16 registers */ -+ u8 res140[0x224 - 0x140]; -+ u32 iodelay1; /* 0x.0224 - IO delay control register 1 */ -+ u32 iodelay2; /* 0x.0228 - IO delay control register 2 */ -+ u8 res22c[0x604 - 0x22c]; -+ u32 pamubypenr; /* 0x.604 - PAMU bypass enable register */ -+ u8 res608[0x800 - 0x608]; -+ u32 clkdvdr; /* 0x.0800 - Clock Divide Register */ -+ u8 res804[0x900 - 0x804]; -+ u32 ircr; /* 0x.0900 - Infrared Control Register */ -+ u8 res904[0x908 - 0x904]; -+ u32 dmacr; /* 0x.0908 - DMA Control Register */ -+ u8 res90c[0x914 - 0x90c]; -+ u32 elbccr; /* 0x.0914 - eLBC Control Register */ -+ u8 res918[0xb20 - 0x918]; -+ u32 ddr1clkdr; /* 0x.0b20 - DDR1 Clock Disable Register */ -+ u32 ddr2clkdr; /* 0x.0b24 - DDR2 Clock Disable Register */ -+ u32 ddrclkdr; /* 0x.0b28 - DDR Clock Disable Register */ -+ u8 resb2c[0xe00 - 0xb2c]; -+ u32 clkocr; /* 0x.0e00 - Clock Out Select Register */ -+ u8 rese04[0xe10 - 0xe04]; -+ u32 ddrdllcr; /* 0x.0e10 - DDR DLL Control Register */ -+ u8 rese14[0xe20 - 0xe14]; -+ u32 lbcdllcr; /* 0x.0e20 - LBC DLL Control Register */ -+ u32 cpfor; /* 0x.0e24 - L2 charge pump fuse override register */ -+ u8 rese28[0xf04 - 0xe28]; -+ u32 srds1cr0; /* 0x.0f04 - SerDes1 Control Register 0 */ -+ u32 srds1cr1; /* 0x.0f08 - SerDes1 Control Register 0 */ -+ u8 resf0c[0xf2c - 0xf0c]; -+ u32 itcr; /* 0x.0f2c - Internal transaction control register */ -+ u8 resf30[0xf40 - 0xf30]; -+ u32 srds2cr0; /* 0x.0f40 - SerDes2 Control Register 0 */ -+ u32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */ -+} __attribute__ ((packed)); -+ -+#ifdef CONFIG_FSL_GUTS -+extern u32 guts_get_svr(void); -+#endif -+ -+/* Alternate function signal multiplex control */ -+#define MPC85xx_PMUXCR_QE(x) (0x8000 >> (x)) -+ -+#ifdef CONFIG_PPC_86xx -+ -+#define CCSR_GUTS_DMACR_DEV_SSI 0 /* DMA controller/channel set to SSI */ -+#define CCSR_GUTS_DMACR_DEV_IR 1 /* DMA controller/channel set to IR */ -+ -+/* -+ * Set the DMACR register in the GUTS -+ * -+ * The DMACR register determines the source of initiated transfers for each -+ * channel on each DMA controller. Rather than have a bunch of repetitive -+ * macros for the bit patterns, we just have a function that calculates -+ * them. -+ * -+ * guts: Pointer to GUTS structure -+ * co: The DMA controller (0 or 1) -+ * ch: The channel on the DMA controller (0, 1, 2, or 3) -+ * device: The device to set as the source (CCSR_GUTS_DMACR_DEV_xx) -+ */ -+static inline void guts_set_dmacr(struct ccsr_guts __iomem *guts, -+ unsigned int co, unsigned int ch, unsigned int device) -+{ -+ unsigned int shift = 16 + (8 * (1 - co) + 2 * (3 - ch)); -+ -+ clrsetbits_be32(&guts->dmacr, 3 << shift, device << shift); -+} -+ -+#define CCSR_GUTS_PMUXCR_LDPSEL 0x00010000 -+#define CCSR_GUTS_PMUXCR_SSI1_MASK 0x0000C000 /* Bitmask for SSI1 */ -+#define CCSR_GUTS_PMUXCR_SSI1_LA 0x00000000 /* Latched address */ -+#define CCSR_GUTS_PMUXCR_SSI1_HI 0x00004000 /* High impedance */ -+#define CCSR_GUTS_PMUXCR_SSI1_SSI 0x00008000 /* Used for SSI1 */ -+#define CCSR_GUTS_PMUXCR_SSI2_MASK 0x00003000 /* Bitmask for SSI2 */ -+#define CCSR_GUTS_PMUXCR_SSI2_LA 0x00000000 /* Latched address */ -+#define CCSR_GUTS_PMUXCR_SSI2_HI 0x00001000 /* High impedance */ -+#define CCSR_GUTS_PMUXCR_SSI2_SSI 0x00002000 /* Used for SSI2 */ -+#define CCSR_GUTS_PMUXCR_LA_22_25_LA 0x00000000 /* Latched Address */ -+#define CCSR_GUTS_PMUXCR_LA_22_25_HI 0x00000400 /* High impedance */ -+#define CCSR_GUTS_PMUXCR_DBGDRV 0x00000200 /* Signals not driven */ -+#define CCSR_GUTS_PMUXCR_DMA2_0 0x00000008 -+#define CCSR_GUTS_PMUXCR_DMA2_3 0x00000004 -+#define CCSR_GUTS_PMUXCR_DMA1_0 0x00000002 -+#define CCSR_GUTS_PMUXCR_DMA1_3 0x00000001 -+ -+/* -+ * Set the DMA external control bits in the GUTS -+ * -+ * The DMA external control bits in the PMUXCR are only meaningful for -+ * channels 0 and 3. Any other channels are ignored. -+ * -+ * guts: Pointer to GUTS structure -+ * co: The DMA controller (0 or 1) -+ * ch: The channel on the DMA controller (0, 1, 2, or 3) -+ * value: the new value for the bit (0 or 1) -+ */ -+static inline void guts_set_pmuxcr_dma(struct ccsr_guts __iomem *guts, -+ unsigned int co, unsigned int ch, unsigned int value) -+{ -+ if ((ch == 0) || (ch == 3)) { -+ unsigned int shift = 2 * (co + 1) - (ch & 1) - 1; -+ -+ clrsetbits_be32(&guts->pmuxcr, 1 << shift, value << shift); -+ } -+} -+ -+#define CCSR_GUTS_CLKDVDR_PXCKEN 0x80000000 -+#define CCSR_GUTS_CLKDVDR_SSICKEN 0x20000000 -+#define CCSR_GUTS_CLKDVDR_PXCKINV 0x10000000 -+#define CCSR_GUTS_CLKDVDR_PXCKDLY_SHIFT 25 -+#define CCSR_GUTS_CLKDVDR_PXCKDLY_MASK 0x06000000 -+#define CCSR_GUTS_CLKDVDR_PXCKDLY(x) \ -+ (((x) & 3) << CCSR_GUTS_CLKDVDR_PXCKDLY_SHIFT) -+#define CCSR_GUTS_CLKDVDR_PXCLK_SHIFT 16 -+#define CCSR_GUTS_CLKDVDR_PXCLK_MASK 0x001F0000 -+#define CCSR_GUTS_CLKDVDR_PXCLK(x) (((x) & 31) << CCSR_GUTS_CLKDVDR_PXCLK_SHIFT) -+#define CCSR_GUTS_CLKDVDR_SSICLK_MASK 0x000000FF -+#define CCSR_GUTS_CLKDVDR_SSICLK(x) ((x) & CCSR_GUTS_CLKDVDR_SSICLK_MASK) -+ -+#endif -+ -+#endif -diff --git a/include/linux/fsl/svr.h b/include/linux/fsl/svr.h -new file mode 100644 -index 0000000..8d13836 ---- /dev/null -+++ b/include/linux/fsl/svr.h -@@ -0,0 +1,95 @@ -+/* -+ * MPC85xx cpu type detection -+ * -+ * Copyright 2011-2012 Freescale Semiconductor, Inc. -+ * -+ * This is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ */ -+ -+#ifndef FSL_SVR_H -+#define FSL_SVR_H -+ -+#define SVR_REV(svr) ((svr) & 0xFF) /* SOC design resision */ -+#define SVR_MAJ(svr) (((svr) >> 4) & 0xF) /* Major revision field*/ -+#define SVR_MIN(svr) (((svr) >> 0) & 0xF) /* Minor revision field*/ -+ -+/* Some parts define SVR[0:23] as the SOC version */ -+#define SVR_SOC_VER(svr) (((svr) >> 8) & 0xFFF7FF) /* SOC Version fields */ -+ -+#define SVR_8533 0x803400 -+#define SVR_8535 0x803701 -+#define SVR_8536 0x803700 -+#define SVR_8540 0x803000 -+#define SVR_8541 0x807200 -+#define SVR_8543 0x803200 -+#define SVR_8544 0x803401 -+#define SVR_8545 0x803102 -+#define SVR_8547 0x803101 -+#define SVR_8548 0x803100 -+#define SVR_8555 0x807100 -+#define SVR_8560 0x807000 -+#define SVR_8567 0x807501 -+#define SVR_8568 0x807500 -+#define SVR_8569 0x808000 -+#define SVR_8572 0x80E000 -+#define SVR_P1010 0x80F100 -+#define SVR_P1011 0x80E500 -+#define SVR_P1012 0x80E501 -+#define SVR_P1013 0x80E700 -+#define SVR_P1014 0x80F101 -+#define SVR_P1017 0x80F700 -+#define SVR_P1020 0x80E400 -+#define SVR_P1021 0x80E401 -+#define SVR_P1022 0x80E600 -+#define SVR_P1023 0x80F600 -+#define SVR_P1024 0x80E402 -+#define SVR_P1025 0x80E403 -+#define SVR_P2010 0x80E300 -+#define SVR_P2020 0x80E200 -+#define SVR_P2040 0x821000 -+#define SVR_P2041 0x821001 -+#define SVR_P3041 0x821103 -+#define SVR_P4040 0x820100 -+#define SVR_P4080 0x820000 -+#define SVR_P5010 0x822100 -+#define SVR_P5020 0x822000 -+#define SVR_P5021 0X820500 -+#define SVR_P5040 0x820400 -+#define SVR_T4240 0x824000 -+#define SVR_T4120 0x824001 -+#define SVR_T4160 0x824100 -+#define SVR_T4080 0x824102 -+#define SVR_C291 0x850000 -+#define SVR_C292 0x850020 -+#define SVR_C293 0x850030 -+#define SVR_B4860 0X868000 -+#define SVR_G4860 0x868001 -+#define SVR_G4060 0x868003 -+#define SVR_B4440 0x868100 -+#define SVR_G4440 0x868101 -+#define SVR_B4420 0x868102 -+#define SVR_B4220 0x868103 -+#define SVR_T1040 0x852000 -+#define SVR_T1041 0x852001 -+#define SVR_T1042 0x852002 -+#define SVR_T1020 0x852100 -+#define SVR_T1021 0x852101 -+#define SVR_T1022 0x852102 -+#define SVR_T2080 0x853000 -+#define SVR_T2081 0x853100 -+ -+#define SVR_8610 0x80A000 -+#define SVR_8641 0x809000 -+#define SVR_8641D 0x809001 -+ -+#define SVR_9130 0x860001 -+#define SVR_9131 0x860000 -+#define SVR_9132 0x861000 -+#define SVR_9232 0x861400 -+ -+#define SVR_Unknown 0xFFFFFF -+ -+#endif -diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h -index 84d60cb..3f9778c 100644 ---- a/include/linux/fsl_ifc.h -+++ b/include/linux/fsl_ifc.h -@@ -29,7 +29,20 @@ - #include - #include - --#define FSL_IFC_BANK_COUNT 4 -+/* -+ * The actual number of banks implemented depends on the IFC version -+ * - IFC version 1.0 implements 4 banks. -+ * - IFC version 1.1 onward implements 8 banks. -+ */ -+#define FSL_IFC_BANK_COUNT 8 -+ -+#define FSL_IFC_VERSION_MASK 0x0F0F0000 -+#define FSL_IFC_VERSION_1_0_0 0x01000000 -+#define FSL_IFC_VERSION_1_1_0 0x01010000 -+#define FSL_IFC_VERSION_2_0_0 0x02000000 -+ -+#define PGOFFSET_64K (64*1024) -+#define PGOFFSET_4K (4*1024) - - /* - * CSPR - Chip Select Property Register -@@ -714,20 +727,26 @@ struct fsl_ifc_nand { - __be32 nand_evter_en; - u32 res17[0x2]; - __be32 nand_evter_intr_en; -- u32 res18[0x2]; -+ __be32 nand_vol_addr_stat; -+ u32 res18; - __be32 nand_erattr0; - __be32 nand_erattr1; - u32 res19[0x10]; - __be32 nand_fsr; -- u32 res20; -- __be32 nand_eccstat[4]; -- u32 res21[0x20]; -+ u32 res20[0x3]; -+ __be32 nand_eccstat[6]; -+ u32 res21[0x1c]; - __be32 nanndcr; - u32 res22[0x2]; - __be32 nand_autoboot_trgr; - u32 res23; - __be32 nand_mdr; -- u32 res24[0x5C]; -+ u32 res24[0x1C]; -+ __be32 nand_dll_lowcfg0; -+ __be32 nand_dll_lowcfg1; -+ u32 res25; -+ __be32 nand_dll_lowstat; -+ u32 res26[0x3c]; - }; - - /* -@@ -762,13 +781,12 @@ struct fsl_ifc_gpcm { - __be32 gpcm_erattr1; - __be32 gpcm_erattr2; - __be32 gpcm_stat; -- u32 res4[0x1F3]; - }; - - /* - * IFC Controller Registers - */ --struct fsl_ifc_regs { -+struct fsl_ifc_global { - __be32 ifc_rev; - u32 res1[0x2]; - struct { -@@ -776,39 +794,44 @@ struct fsl_ifc_regs { - __be32 cspr; - u32 res2; - } cspr_cs[FSL_IFC_BANK_COUNT]; -- u32 res3[0x19]; -+ u32 res3[0xd]; - struct { - __be32 amask; - u32 res4[0x2]; - } amask_cs[FSL_IFC_BANK_COUNT]; -- u32 res5[0x18]; -+ u32 res5[0xc]; - struct { - __be32 csor; - __be32 csor_ext; - u32 res6; - } csor_cs[FSL_IFC_BANK_COUNT]; -- u32 res7[0x18]; -+ u32 res7[0xc]; - struct { - __be32 ftim[4]; - u32 res8[0x8]; - } ftim_cs[FSL_IFC_BANK_COUNT]; -- u32 res9[0x60]; -+ u32 res9[0x30]; - __be32 rb_stat; -- u32 res10[0x2]; -+ __be32 rb_map; -+ __be32 wb_map; - __be32 ifc_gcr; -- u32 res11[0x2]; -+ u32 res10[0x2]; - __be32 cm_evter_stat; -- u32 res12[0x2]; -+ u32 res11[0x2]; - __be32 cm_evter_en; -- u32 res13[0x2]; -+ u32 res12[0x2]; - __be32 cm_evter_intr_en; -- u32 res14[0x2]; -+ u32 res13[0x2]; - __be32 cm_erattr0; - __be32 cm_erattr1; -- u32 res15[0x2]; -+ u32 res14[0x2]; - __be32 ifc_ccr; - __be32 ifc_csr; -- u32 res16[0x2EB]; -+ __be32 ddr_ccr_low; -+}; -+ -+ -+struct fsl_ifc_runtime { - struct fsl_ifc_nand ifc_nand; - struct fsl_ifc_nor ifc_nor; - struct fsl_ifc_gpcm ifc_gpcm; -@@ -822,17 +845,70 @@ extern int fsl_ifc_find(phys_addr_t addr_base); - struct fsl_ifc_ctrl { - /* device info */ - struct device *dev; -- struct fsl_ifc_regs __iomem *regs; -+ struct fsl_ifc_global __iomem *gregs; -+ struct fsl_ifc_runtime __iomem *rregs; - int irq; - int nand_irq; - spinlock_t lock; - void *nand; -+ int version; -+ int banks; - - u32 nand_stat; - wait_queue_head_t nand_wait; -+ bool little_endian; - }; - - extern struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev; - -+static inline u32 ifc_in32(void __iomem *addr) -+{ -+ u32 val; -+ -+ if (fsl_ifc_ctrl_dev->little_endian) -+ val = ioread32(addr); -+ else -+ val = ioread32be(addr); -+ -+ return val; -+} -+ -+static inline u16 ifc_in16(void __iomem *addr) -+{ -+ u16 val; -+ -+ if (fsl_ifc_ctrl_dev->little_endian) -+ val = ioread16(addr); -+ else -+ val = ioread16be(addr); -+ -+ return val; -+} -+ -+static inline u8 ifc_in8(void __iomem *addr) -+{ -+ return ioread8(addr); -+} -+ -+static inline void ifc_out32(u32 val, void __iomem *addr) -+{ -+ if (fsl_ifc_ctrl_dev->little_endian) -+ iowrite32(val, addr); -+ else -+ iowrite32be(val, addr); -+} -+ -+static inline void ifc_out16(u16 val, void __iomem *addr) -+{ -+ if (fsl_ifc_ctrl_dev->little_endian) -+ iowrite16(val, addr); -+ else -+ iowrite16be(val, addr); -+} -+ -+static inline void ifc_out8(u8 val, void __iomem *addr) -+{ -+ iowrite8(val, addr); -+} - - #endif /* __ASM_FSL_IFC_H */ -diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h -index 69517a2..cbbe6a2 100644 ---- a/include/linux/interrupt.h -+++ b/include/linux/interrupt.h -@@ -356,6 +356,20 @@ static inline int disable_irq_wake(unsigned int irq) - return irq_set_irq_wake(irq, 0); - } - -+/* -+ * irq_get_irqchip_state/irq_set_irqchip_state specific flags -+ */ -+enum irqchip_irq_state { -+ IRQCHIP_STATE_PENDING, /* Is interrupt pending? */ -+ IRQCHIP_STATE_ACTIVE, /* Is interrupt in progress? */ -+ IRQCHIP_STATE_MASKED, /* Is interrupt masked? */ -+ IRQCHIP_STATE_LINE_LEVEL, /* Is IRQ line high? */ -+}; -+ -+extern int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which, -+ bool *state); -+extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, -+ bool state); - - #ifdef CONFIG_IRQ_FORCED_THREADING - extern bool force_irqthreads; -diff --git a/include/linux/iommu.h b/include/linux/iommu.h -index e6a7c9f..7421bdf 100644 ---- a/include/linux/iommu.h -+++ b/include/linux/iommu.h -@@ -21,13 +21,16 @@ - - #include - #include -+#include - #include -+#include - #include - - #define IOMMU_READ (1 << 0) - #define IOMMU_WRITE (1 << 1) - #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ --#define IOMMU_EXEC (1 << 3) -+#define IOMMU_NOEXEC (1 << 3) -+#define IOMMU_MMIO (1 << 4) /* Device memory access */ - - struct iommu_ops; - struct iommu_group; -@@ -49,9 +52,33 @@ struct iommu_domain_geometry { - bool force_aperture; /* DMA only allowed in mappable range? */ - }; - -+/* Domain feature flags */ -+#define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */ -+#define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API -+ implementation */ -+#define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */ -+ -+/* -+ * This are the possible domain-types -+ * -+ * IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate -+ * devices -+ * IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses -+ * IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used -+ * for VMs -+ * IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations. -+ * This flag allows IOMMU drivers to implement -+ * certain optimizations for these domains -+ */ -+#define IOMMU_DOMAIN_BLOCKED (0U) -+#define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT) -+#define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING) -+#define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \ -+ __IOMMU_DOMAIN_DMA_API) -+ - struct iommu_domain { -+ unsigned type; - const struct iommu_ops *ops; -- void *priv; - iommu_fault_handler_t handler; - void *handler_token; - struct iommu_domain_geometry geometry; -@@ -61,6 +88,7 @@ enum iommu_cap { - IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA - transactions */ - IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */ -+ IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */ - }; - - /* -@@ -97,23 +125,32 @@ enum iommu_attr { - * @detach_dev: detach device from an iommu domain - * @map: map a physically contiguous memory region to an iommu domain - * @unmap: unmap a physically contiguous memory region from an iommu domain -+ * @map_sg: map a scatter-gather list of physically contiguous memory chunks -+ * to an iommu domain - * @iova_to_phys: translate iova to physical address - * @add_device: add device to iommu grouping - * @remove_device: remove device from iommu grouping - * @domain_get_attr: Query domain attributes - * @domain_set_attr: Change domain attributes -+ * @of_xlate: add OF master IDs to iommu grouping - * @pgsize_bitmap: bitmap of supported page sizes -+ * @priv: per-instance data private to the iommu driver - */ - struct iommu_ops { - bool (*capable)(enum iommu_cap); -- int (*domain_init)(struct iommu_domain *domain); -- void (*domain_destroy)(struct iommu_domain *domain); -+ -+ /* Domain allocation and freeing by the iommu driver */ -+ struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type); -+ void (*domain_free)(struct iommu_domain *); -+ - int (*attach_dev)(struct iommu_domain *domain, struct device *dev); - void (*detach_dev)(struct iommu_domain *domain, struct device *dev); - int (*map)(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot); - size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, - size_t size); -+ size_t (*map_sg)(struct iommu_domain *domain, unsigned long iova, -+ struct scatterlist *sg, unsigned int nents, int prot); - phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova); - int (*add_device)(struct device *dev); - void (*remove_device)(struct device *dev); -@@ -131,8 +168,14 @@ struct iommu_ops { - int (*domain_set_windows)(struct iommu_domain *domain, u32 w_count); - /* Get the numer of window per domain */ - u32 (*domain_get_windows)(struct iommu_domain *domain); -+ struct iommu_domain *(*get_dev_iommu_domain)(struct device *dev); -+ -+#ifdef CONFIG_OF_IOMMU -+ int (*of_xlate)(struct device *dev, struct of_phandle_args *args); -+#endif - - unsigned long pgsize_bitmap; -+ void *priv; - }; - - #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */ -@@ -156,6 +199,9 @@ extern int iommu_map(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot); - extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, - size_t size); -+extern size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova, -+ struct scatterlist *sg,unsigned int nents, -+ int prot); - extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova); - extern void iommu_set_fault_handler(struct iommu_domain *domain, - iommu_fault_handler_t handler, void *token); -@@ -200,6 +246,9 @@ extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, - phys_addr_t offset, u64 size, - int prot); - extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr); -+ -+extern struct iommu_domain *iommu_get_dev_domain(struct device *dev); -+ - /** - * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework - * @domain: the iommu domain where the fault has happened -@@ -241,6 +290,13 @@ static inline int report_iommu_fault(struct iommu_domain *domain, - return ret; - } - -+static inline size_t iommu_map_sg(struct iommu_domain *domain, -+ unsigned long iova, struct scatterlist *sg, -+ unsigned int nents, int prot) -+{ -+ return domain->ops->map_sg(domain, iova, sg, nents, prot); -+} -+ - #else /* CONFIG_IOMMU_API */ - - struct iommu_ops {}; -@@ -293,6 +349,13 @@ static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova, - return -ENODEV; - } - -+static inline size_t iommu_map_sg(struct iommu_domain *domain, -+ unsigned long iova, struct scatterlist *sg, -+ unsigned int nents, int prot) -+{ -+ return -ENODEV; -+} -+ - static inline int iommu_domain_window_enable(struct iommu_domain *domain, - u32 wnd_nr, phys_addr_t paddr, - u64 size, int prot) -@@ -424,6 +487,11 @@ static inline void iommu_device_unlink(struct device *dev, struct device *link) - { - } - -+static inline struct iommu_domain *iommu_get_dev_domain(struct device *dev) -+{ -+ return NULL; -+} -+ - #endif /* CONFIG_IOMMU_API */ - - #endif /* __LINUX_IOMMU_H */ -diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h -new file mode 100644 -index 0000000..1c30014 ---- /dev/null -+++ b/include/linux/iopoll.h -@@ -0,0 +1,144 @@ -+/* -+ * Copyright (c) 2012-2014 The Linux Foundation. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 and -+ * only version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ */ -+ -+#ifndef _LINUX_IOPOLL_H -+#define _LINUX_IOPOLL_H -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/** -+ * readx_poll_timeout - Periodically poll an address until a condition is met or a timeout occurs -+ * @op: accessor function (takes @addr as its only argument) -+ * @addr: Address to poll -+ * @val: Variable to read the value into -+ * @cond: Break condition (usually involving @val) -+ * @sleep_us: Maximum time to sleep between reads in us (0 -+ * tight-loops). Should be less than ~20ms since usleep_range -+ * is used (see Documentation/timers/timers-howto.txt). -+ * @timeout_us: Timeout in us, 0 means never timeout -+ * -+ * Returns 0 on success and -ETIMEDOUT upon a timeout. In either -+ * case, the last read value at @addr is stored in @val. Must not -+ * be called from atomic context if sleep_us or timeout_us are used. -+ * -+ * When available, you'll probably want to use one of the specialized -+ * macros defined below rather than this macro directly. -+ */ -+#define readx_poll_timeout(op, addr, val, cond, sleep_us, timeout_us) \ -+({ \ -+ ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \ -+ might_sleep_if(sleep_us); \ -+ for (;;) { \ -+ (val) = op(addr); \ -+ if (cond) \ -+ break; \ -+ if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \ -+ (val) = op(addr); \ -+ break; \ -+ } \ -+ if (sleep_us) \ -+ usleep_range((sleep_us >> 2) + 1, sleep_us); \ -+ } \ -+ (cond) ? 0 : -ETIMEDOUT; \ -+}) -+ -+/** -+ * readx_poll_timeout_atomic - Periodically poll an address until a condition is met or a timeout occurs -+ * @op: accessor function (takes @addr as its only argument) -+ * @addr: Address to poll -+ * @val: Variable to read the value into -+ * @cond: Break condition (usually involving @val) -+ * @delay_us: Time to udelay between reads in us (0 tight-loops). Should -+ * be less than ~10us since udelay is used (see -+ * Documentation/timers/timers-howto.txt). -+ * @timeout_us: Timeout in us, 0 means never timeout -+ * -+ * Returns 0 on success and -ETIMEDOUT upon a timeout. In either -+ * case, the last read value at @addr is stored in @val. -+ * -+ * When available, you'll probably want to use one of the specialized -+ * macros defined below rather than this macro directly. -+ */ -+#define readx_poll_timeout_atomic(op, addr, val, cond, delay_us, timeout_us) \ -+({ \ -+ ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \ -+ for (;;) { \ -+ (val) = op(addr); \ -+ if (cond) \ -+ break; \ -+ if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \ -+ (val) = op(addr); \ -+ break; \ -+ } \ -+ if (delay_us) \ -+ udelay(delay_us); \ -+ } \ -+ (cond) ? 0 : -ETIMEDOUT; \ -+}) -+ -+ -+#define readb_poll_timeout(addr, val, cond, delay_us, timeout_us) \ -+ readx_poll_timeout(readb, addr, val, cond, delay_us, timeout_us) -+ -+#define readb_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \ -+ readx_poll_timeout_atomic(readb, addr, val, cond, delay_us, timeout_us) -+ -+#define readw_poll_timeout(addr, val, cond, delay_us, timeout_us) \ -+ readx_poll_timeout(readw, addr, val, cond, delay_us, timeout_us) -+ -+#define readw_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \ -+ readx_poll_timeout_atomic(readw, addr, val, cond, delay_us, timeout_us) -+ -+#define readl_poll_timeout(addr, val, cond, delay_us, timeout_us) \ -+ readx_poll_timeout(readl, addr, val, cond, delay_us, timeout_us) -+ -+#define readl_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \ -+ readx_poll_timeout_atomic(readl, addr, val, cond, delay_us, timeout_us) -+ -+#define readq_poll_timeout(addr, val, cond, delay_us, timeout_us) \ -+ readx_poll_timeout(readq, addr, val, cond, delay_us, timeout_us) -+ -+#define readq_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \ -+ readx_poll_timeout_atomic(readq, addr, val, cond, delay_us, timeout_us) -+ -+#define readb_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \ -+ readx_poll_timeout(readb_relaxed, addr, val, cond, delay_us, timeout_us) -+ -+#define readb_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \ -+ readx_poll_timeout_atomic(readb_relaxed, addr, val, cond, delay_us, timeout_us) -+ -+#define readw_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \ -+ readx_poll_timeout(readw_relaxed, addr, val, cond, delay_us, timeout_us) -+ -+#define readw_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \ -+ readx_poll_timeout_atomic(readw_relaxed, addr, val, cond, delay_us, timeout_us) -+ -+#define readl_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \ -+ readx_poll_timeout(readl_relaxed, addr, val, cond, delay_us, timeout_us) -+ -+#define readl_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \ -+ readx_poll_timeout_atomic(readl_relaxed, addr, val, cond, delay_us, timeout_us) -+ -+#define readq_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \ -+ readx_poll_timeout(readq_relaxed, addr, val, cond, delay_us, timeout_us) -+ -+#define readq_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \ -+ readx_poll_timeout_atomic(readq_relaxed, addr, val, cond, delay_us, timeout_us) -+ -+#endif /* _LINUX_IOPOLL_H */ -diff --git a/include/linux/irq.h b/include/linux/irq.h -index 03f48d9..4931a8b 100644 ---- a/include/linux/irq.h -+++ b/include/linux/irq.h -@@ -15,11 +15,13 @@ - #include - #include - #include -+#include - #include - #include - #include - #include - #include -+#include - - #include - #include -@@ -27,11 +29,8 @@ - - struct seq_file; - struct module; --struct irq_desc; --struct irq_data; --typedef void (*irq_flow_handler_t)(unsigned int irq, -- struct irq_desc *desc); --typedef void (*irq_preflow_handler_t)(struct irq_data *data); -+struct msi_msg; -+enum irqchip_irq_state; - - /* - * IRQ line status. -@@ -113,10 +112,14 @@ enum { - * - * IRQ_SET_MASK_OK - OK, core updates irq_data.affinity - * IRQ_SET_MASK_NOCPY - OK, chip did update irq_data.affinity -+ * IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to -+ * support stacked irqchips, which indicates skipping -+ * all descendent irqchips. - */ - enum { - IRQ_SET_MASK_OK = 0, - IRQ_SET_MASK_OK_NOCOPY, -+ IRQ_SET_MASK_OK_DONE, - }; - - struct msi_desc; -@@ -133,6 +136,8 @@ struct irq_domain; - * @chip: low level interrupt hardware access - * @domain: Interrupt translation domain; responsible for mapping - * between hwirq number and linux irq number. -+ * @parent_data: pointer to parent struct irq_data to support hierarchy -+ * irq_domain - * @handler_data: per-IRQ data for the irq_chip methods - * @chip_data: platform-specific per-chip private data for the chip - * methods, to allow shared chip implementations -@@ -151,6 +156,9 @@ struct irq_data { - unsigned int state_use_accessors; - struct irq_chip *chip; - struct irq_domain *domain; -+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY -+ struct irq_data *parent_data; -+#endif - void *handler_data; - void *chip_data; - struct msi_desc *msi_desc; -@@ -315,6 +323,10 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) - * any other callback related to this irq - * @irq_release_resources: optional to release resources acquired with - * irq_request_resources -+ * @irq_compose_msi_msg: optional to compose message content for MSI -+ * @irq_write_msi_msg: optional to write message content for MSI -+ * @irq_get_irqchip_state: return the internal state of an interrupt -+ * @irq_set_irqchip_state: set the internal state of a interrupt - * @flags: chip specific flags - */ - struct irq_chip { -@@ -351,6 +363,12 @@ struct irq_chip { - int (*irq_request_resources)(struct irq_data *data); - void (*irq_release_resources)(struct irq_data *data); - -+ void (*irq_compose_msi_msg)(struct irq_data *data, struct msi_msg *msg); -+ void (*irq_write_msi_msg)(struct irq_data *data, struct msi_msg *msg); -+ -+ int (*irq_get_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool *state); -+ int (*irq_set_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool state); -+ - unsigned long flags; - }; - -@@ -438,6 +456,20 @@ extern void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc); - extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); - extern void handle_nested_irq(unsigned int irq); - -+extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg); -+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY -+extern void irq_chip_ack_parent(struct irq_data *data); -+extern int irq_chip_retrigger_hierarchy(struct irq_data *data); -+extern void irq_chip_mask_parent(struct irq_data *data); -+extern void irq_chip_unmask_parent(struct irq_data *data); -+extern void irq_chip_eoi_parent(struct irq_data *data); -+extern int irq_chip_set_affinity_parent(struct irq_data *data, -+ const struct cpumask *dest, -+ bool force); -+extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on); -+extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type); -+#endif -+ - /* Handling of unhandled and spurious interrupts: */ - extern void note_interrupt(unsigned int irq, struct irq_desc *desc, - irqreturn_t action_ret); -@@ -582,7 +614,7 @@ static inline struct msi_desc *irq_get_msi_desc(unsigned int irq) - return d ? d->msi_desc : NULL; - } - --static inline struct msi_desc *irq_data_get_msi(struct irq_data *d) -+static inline struct msi_desc *irq_data_get_msi_desc(struct irq_data *d) - { - return d->msi_desc; - } -@@ -639,13 +671,6 @@ void arch_teardown_hwirq(unsigned int irq); - void irq_init_desc(unsigned int irq); - #endif - --#ifndef irq_reg_writel --# define irq_reg_writel(val, addr) writel(val, addr) --#endif --#ifndef irq_reg_readl --# define irq_reg_readl(addr) readl(addr) --#endif -- - /** - * struct irq_chip_regs - register offsets for struct irq_gci - * @enable: Enable register offset to reg_base -@@ -692,6 +717,8 @@ struct irq_chip_type { - * struct irq_chip_generic - Generic irq chip data structure - * @lock: Lock to protect register and cache data access - * @reg_base: Register base address (virtual) -+ * @reg_readl: Alternate I/O accessor (defaults to readl if NULL) -+ * @reg_writel: Alternate I/O accessor (defaults to writel if NULL) - * @irq_base: Interrupt base nr for this chip - * @irq_cnt: Number of interrupts handled by this chip - * @mask_cache: Cached mask register shared between all chip types -@@ -716,6 +743,8 @@ struct irq_chip_type { - struct irq_chip_generic { - raw_spinlock_t lock; - void __iomem *reg_base; -+ u32 (*reg_readl)(void __iomem *addr); -+ void (*reg_writel)(u32 val, void __iomem *addr); - unsigned int irq_base; - unsigned int irq_cnt; - u32 mask_cache; -@@ -740,12 +769,14 @@ struct irq_chip_generic { - * the parent irq. Usually GPIO implementations - * @IRQ_GC_MASK_CACHE_PER_TYPE: Mask cache is chip type private - * @IRQ_GC_NO_MASK: Do not calculate irq_data->mask -+ * @IRQ_GC_BE_IO: Use big-endian register accesses (default: LE) - */ - enum irq_gc_flags { - IRQ_GC_INIT_MASK_CACHE = 1 << 0, - IRQ_GC_INIT_NESTED_LOCK = 1 << 1, - IRQ_GC_MASK_CACHE_PER_TYPE = 1 << 2, - IRQ_GC_NO_MASK = 1 << 3, -+ IRQ_GC_BE_IO = 1 << 4, - }; - - /* -@@ -821,4 +852,22 @@ static inline void irq_gc_lock(struct irq_chip_generic *gc) { } - static inline void irq_gc_unlock(struct irq_chip_generic *gc) { } - #endif - -+static inline void irq_reg_writel(struct irq_chip_generic *gc, -+ u32 val, int reg_offset) -+{ -+ if (gc->reg_writel) -+ gc->reg_writel(val, gc->reg_base + reg_offset); -+ else -+ writel(val, gc->reg_base + reg_offset); -+} -+ -+static inline u32 irq_reg_readl(struct irq_chip_generic *gc, -+ int reg_offset) -+{ -+ if (gc->reg_readl) -+ return gc->reg_readl(gc->reg_base + reg_offset); -+ else -+ return readl(gc->reg_base + reg_offset); -+} -+ - #endif /* _LINUX_IRQ_H */ -diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h -index 03a4ea3..36caf46 100644 ---- a/include/linux/irqchip/arm-gic-v3.h -+++ b/include/linux/irqchip/arm-gic-v3.h -@@ -49,6 +49,10 @@ - #define GICD_CTLR_ENABLE_G1A (1U << 1) - #define GICD_CTLR_ENABLE_G1 (1U << 0) - -+#define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1) -+#define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32) -+#define GICD_TYPER_LPIS (1U << 17) -+ - #define GICD_IROUTER_SPI_MODE_ONE (0U << 31) - #define GICD_IROUTER_SPI_MODE_ANY (1U << 31) - -@@ -76,9 +80,42 @@ - #define GICR_MOVALLR 0x0110 - #define GICR_PIDR2 GICD_PIDR2 - -+#define GICR_CTLR_ENABLE_LPIS (1UL << 0) -+ -+#define GICR_TYPER_CPU_NUMBER(r) (((r) >> 8) & 0xffff) -+ - #define GICR_WAKER_ProcessorSleep (1U << 1) - #define GICR_WAKER_ChildrenAsleep (1U << 2) - -+#define GICR_PROPBASER_NonShareable (0U << 10) -+#define GICR_PROPBASER_InnerShareable (1U << 10) -+#define GICR_PROPBASER_OuterShareable (2U << 10) -+#define GICR_PROPBASER_SHAREABILITY_MASK (3UL << 10) -+#define GICR_PROPBASER_nCnB (0U << 7) -+#define GICR_PROPBASER_nC (1U << 7) -+#define GICR_PROPBASER_RaWt (2U << 7) -+#define GICR_PROPBASER_RaWb (3U << 7) -+#define GICR_PROPBASER_WaWt (4U << 7) -+#define GICR_PROPBASER_WaWb (5U << 7) -+#define GICR_PROPBASER_RaWaWt (6U << 7) -+#define GICR_PROPBASER_RaWaWb (7U << 7) -+#define GICR_PROPBASER_CACHEABILITY_MASK (7U << 7) -+#define GICR_PROPBASER_IDBITS_MASK (0x1f) -+ -+#define GICR_PENDBASER_NonShareable (0U << 10) -+#define GICR_PENDBASER_InnerShareable (1U << 10) -+#define GICR_PENDBASER_OuterShareable (2U << 10) -+#define GICR_PENDBASER_SHAREABILITY_MASK (3UL << 10) -+#define GICR_PENDBASER_nCnB (0U << 7) -+#define GICR_PENDBASER_nC (1U << 7) -+#define GICR_PENDBASER_RaWt (2U << 7) -+#define GICR_PENDBASER_RaWb (3U << 7) -+#define GICR_PENDBASER_WaWt (4U << 7) -+#define GICR_PENDBASER_WaWb (5U << 7) -+#define GICR_PENDBASER_RaWaWt (6U << 7) -+#define GICR_PENDBASER_RaWaWb (7U << 7) -+#define GICR_PENDBASER_CACHEABILITY_MASK (7U << 7) -+ - /* - * Re-Distributor registers, offsets from SGI_base - */ -@@ -91,9 +128,100 @@ - #define GICR_IPRIORITYR0 GICD_IPRIORITYR - #define GICR_ICFGR0 GICD_ICFGR - -+#define GICR_TYPER_PLPIS (1U << 0) - #define GICR_TYPER_VLPIS (1U << 1) - #define GICR_TYPER_LAST (1U << 4) - -+#define LPI_PROP_GROUP1 (1 << 1) -+#define LPI_PROP_ENABLED (1 << 0) -+ -+/* -+ * ITS registers, offsets from ITS_base -+ */ -+#define GITS_CTLR 0x0000 -+#define GITS_IIDR 0x0004 -+#define GITS_TYPER 0x0008 -+#define GITS_CBASER 0x0080 -+#define GITS_CWRITER 0x0088 -+#define GITS_CREADR 0x0090 -+#define GITS_BASER 0x0100 -+#define GITS_PIDR2 GICR_PIDR2 -+ -+#define GITS_TRANSLATER 0x10040 -+ -+#define GITS_CTLR_ENABLE (1U << 0) -+#define GITS_CTLR_QUIESCENT (1U << 31) -+ -+#define GITS_TYPER_DEVBITS_SHIFT 13 -+#define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1) -+#define GITS_TYPER_PTA (1UL << 19) -+ -+#define GITS_CBASER_VALID (1UL << 63) -+#define GITS_CBASER_nCnB (0UL << 59) -+#define GITS_CBASER_nC (1UL << 59) -+#define GITS_CBASER_RaWt (2UL << 59) -+#define GITS_CBASER_RaWb (3UL << 59) -+#define GITS_CBASER_WaWt (4UL << 59) -+#define GITS_CBASER_WaWb (5UL << 59) -+#define GITS_CBASER_RaWaWt (6UL << 59) -+#define GITS_CBASER_RaWaWb (7UL << 59) -+#define GITS_CBASER_CACHEABILITY_MASK (7UL << 59) -+#define GITS_CBASER_NonShareable (0UL << 10) -+#define GITS_CBASER_InnerShareable (1UL << 10) -+#define GITS_CBASER_OuterShareable (2UL << 10) -+#define GITS_CBASER_SHAREABILITY_MASK (3UL << 10) -+ -+#define GITS_BASER_NR_REGS 8 -+ -+#define GITS_BASER_VALID (1UL << 63) -+#define GITS_BASER_nCnB (0UL << 59) -+#define GITS_BASER_nC (1UL << 59) -+#define GITS_BASER_RaWt (2UL << 59) -+#define GITS_BASER_RaWb (3UL << 59) -+#define GITS_BASER_WaWt (4UL << 59) -+#define GITS_BASER_WaWb (5UL << 59) -+#define GITS_BASER_RaWaWt (6UL << 59) -+#define GITS_BASER_RaWaWb (7UL << 59) -+#define GITS_BASER_CACHEABILITY_MASK (7UL << 59) -+#define GITS_BASER_TYPE_SHIFT (56) -+#define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7) -+#define GITS_BASER_ENTRY_SIZE_SHIFT (48) -+#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0xff) + 1) -+#define GITS_BASER_NonShareable (0UL << 10) -+#define GITS_BASER_InnerShareable (1UL << 10) -+#define GITS_BASER_OuterShareable (2UL << 10) -+#define GITS_BASER_SHAREABILITY_SHIFT (10) -+#define GITS_BASER_SHAREABILITY_MASK (3UL << GITS_BASER_SHAREABILITY_SHIFT) -+#define GITS_BASER_PAGE_SIZE_SHIFT (8) -+#define GITS_BASER_PAGE_SIZE_4K (0UL << GITS_BASER_PAGE_SIZE_SHIFT) -+#define GITS_BASER_PAGE_SIZE_16K (1UL << GITS_BASER_PAGE_SIZE_SHIFT) -+#define GITS_BASER_PAGE_SIZE_64K (2UL << GITS_BASER_PAGE_SIZE_SHIFT) -+#define GITS_BASER_PAGE_SIZE_MASK (3UL << GITS_BASER_PAGE_SIZE_SHIFT) -+ -+#define GITS_BASER_TYPE_NONE 0 -+#define GITS_BASER_TYPE_DEVICE 1 -+#define GITS_BASER_TYPE_VCPU 2 -+#define GITS_BASER_TYPE_CPU 3 -+#define GITS_BASER_TYPE_COLLECTION 4 -+#define GITS_BASER_TYPE_RESERVED5 5 -+#define GITS_BASER_TYPE_RESERVED6 6 -+#define GITS_BASER_TYPE_RESERVED7 7 -+ -+/* -+ * ITS commands -+ */ -+#define GITS_CMD_MAPD 0x08 -+#define GITS_CMD_MAPC 0x09 -+#define GITS_CMD_MAPVI 0x0a -+#define GITS_CMD_MOVI 0x01 -+#define GITS_CMD_DISCARD 0x0f -+#define GITS_CMD_INV 0x0c -+#define GITS_CMD_MOVALL 0x0e -+#define GITS_CMD_INVALL 0x0d -+#define GITS_CMD_INT 0x03 -+#define GITS_CMD_CLEAR 0x04 -+#define GITS_CMD_SYNC 0x05 -+ - /* - * CPU interface registers - */ -@@ -142,6 +270,18 @@ - #define ICC_SRE_EL2_SRE (1 << 0) - #define ICC_SRE_EL2_ENABLE (1 << 3) - -+#define ICC_SGI1R_TARGET_LIST_SHIFT 0 -+#define ICC_SGI1R_TARGET_LIST_MASK (0xffff << ICC_SGI1R_TARGET_LIST_SHIFT) -+#define ICC_SGI1R_AFFINITY_1_SHIFT 16 -+#define ICC_SGI1R_AFFINITY_1_MASK (0xff << ICC_SGI1R_AFFINITY_1_SHIFT) -+#define ICC_SGI1R_SGI_ID_SHIFT 24 -+#define ICC_SGI1R_SGI_ID_MASK (0xff << ICC_SGI1R_SGI_ID_SHIFT) -+#define ICC_SGI1R_AFFINITY_2_SHIFT 32 -+#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT) -+#define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40 -+#define ICC_SGI1R_AFFINITY_3_SHIFT 48 -+#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT) -+ - /* - * System register definitions - */ -@@ -188,6 +328,24 @@ - #ifndef __ASSEMBLY__ - - #include -+#include -+ -+/* -+ * We need a value to serve as a irq-type for LPIs. Choose one that will -+ * hopefully pique the interest of the reviewer. -+ */ -+#define GIC_IRQ_TYPE_LPI 0xa110c8ed -+ -+struct rdists { -+ struct { -+ void __iomem *rd_base; -+ struct page *pend_page; -+ phys_addr_t phys_base; -+ } __percpu *rdist; -+ struct page *prop_page; -+ int id_bits; -+ u64 flags; -+}; - - static inline void gic_write_eoir(u64 irq) - { -@@ -195,6 +353,13 @@ static inline void gic_write_eoir(u64 irq) - isb(); - } - -+struct irq_domain; -+int its_cpu_init(void); -+int its_init(struct device_node *node, struct rdists *rdists, -+ struct irq_domain *domain); -+int __its_msi_prepare(struct irq_domain *domain, u32 dev_id, -+ struct device *dev, int nvec, msi_alloc_info_t *info); -+ - #endif - - #endif -diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h -index 13eed92..60b09ed 100644 ---- a/include/linux/irqchip/arm-gic.h -+++ b/include/linux/irqchip/arm-gic.h -@@ -106,6 +106,8 @@ static inline void gic_init(unsigned int nr, int start, - gic_init_bases(nr, start, dist, cpu, 0, NULL); - } - -+int gicv2m_of_init(struct device_node *node, struct irq_domain *parent); -+ - void gic_send_sgi(unsigned int cpu_id, unsigned int irq); - int gic_get_cpu_id(unsigned int cpu); - void gic_migrate_target(unsigned int new_cpu_id); -diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h -index b0f9d16..3c5ca45 100644 ---- a/include/linux/irqdomain.h -+++ b/include/linux/irqdomain.h -@@ -33,15 +33,32 @@ - #define _LINUX_IRQDOMAIN_H - - #include -+#include - #include - - struct device_node; - struct irq_domain; - struct of_device_id; -+struct irq_chip; -+struct irq_data; - - /* Number of irqs reserved for a legacy isa controller */ - #define NUM_ISA_INTERRUPTS 16 - -+/* -+ * Should several domains have the same device node, but serve -+ * different purposes (for example one domain is for PCI/MSI, and the -+ * other for wired IRQs), they can be distinguished using a -+ * bus-specific token. Most domains are expected to only carry -+ * DOMAIN_BUS_ANY. -+ */ -+enum irq_domain_bus_token { -+ DOMAIN_BUS_ANY = 0, -+ DOMAIN_BUS_PCI_MSI, -+ DOMAIN_BUS_PLATFORM_MSI, -+ DOMAIN_BUS_NEXUS, -+}; -+ - /** - * struct irq_domain_ops - Methods for irq_domain objects - * @match: Match an interrupt controller device node to a host, returns -@@ -58,12 +75,23 @@ struct of_device_id; - * to setup the irq_desc when returning from map(). - */ - struct irq_domain_ops { -- int (*match)(struct irq_domain *d, struct device_node *node); -+ int (*match)(struct irq_domain *d, struct device_node *node, -+ enum irq_domain_bus_token bus_token); - int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw); - void (*unmap)(struct irq_domain *d, unsigned int virq); - int (*xlate)(struct irq_domain *d, struct device_node *node, - const u32 *intspec, unsigned int intsize, - unsigned long *out_hwirq, unsigned int *out_type); -+ -+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY -+ /* extended V2 interfaces to support hierarchy irq_domains */ -+ int (*alloc)(struct irq_domain *d, unsigned int virq, -+ unsigned int nr_irqs, void *arg); -+ void (*free)(struct irq_domain *d, unsigned int virq, -+ unsigned int nr_irqs); -+ void (*activate)(struct irq_domain *d, struct irq_data *irq_data); -+ void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data); -+#endif - }; - - extern struct irq_domain_ops irq_generic_chip_ops; -@@ -77,6 +105,7 @@ struct irq_domain_chip_generic; - * @ops: pointer to irq_domain methods - * @host_data: private data pointer for use by owner. Not touched by irq_domain - * core code. -+ * @flags: host per irq_domain flags - * - * Optional elements - * @of_node: Pointer to device tree nodes associated with the irq_domain. Used -@@ -84,6 +113,7 @@ struct irq_domain_chip_generic; - * @gc: Pointer to a list of generic chips. There is a helper function for - * setting up one or more generic chips for interrupt controllers - * drivers using the generic chip library which uses this pointer. -+ * @parent: Pointer to parent irq_domain to support hierarchy irq_domains - * - * Revmap data, used internally by irq_domain - * @revmap_direct_max_irq: The largest hwirq that can be set for controllers that -@@ -97,10 +127,15 @@ struct irq_domain { - const char *name; - const struct irq_domain_ops *ops; - void *host_data; -+ unsigned int flags; - - /* Optional data */ - struct device_node *of_node; -+ enum irq_domain_bus_token bus_token; - struct irq_domain_chip_generic *gc; -+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY -+ struct irq_domain *parent; -+#endif - - /* reverse map data. The linear map gets appended to the irq_domain */ - irq_hw_number_t hwirq_max; -@@ -110,6 +145,22 @@ struct irq_domain { - unsigned int linear_revmap[]; - }; - -+/* Irq domain flags */ -+enum { -+ /* Irq domain is hierarchical */ -+ IRQ_DOMAIN_FLAG_HIERARCHY = (1 << 0), -+ -+ /* Core calls alloc/free recursive through the domain hierarchy. */ -+ IRQ_DOMAIN_FLAG_AUTO_RECURSIVE = (1 << 1), -+ -+ /* -+ * Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved -+ * for implementation specific purposes and ignored by the -+ * core code. -+ */ -+ IRQ_DOMAIN_FLAG_NONCORE = (1 << 16), -+}; -+ - #ifdef CONFIG_IRQ_DOMAIN - struct irq_domain *__irq_domain_add(struct device_node *of_node, int size, - irq_hw_number_t hwirq_max, int direct_max, -@@ -126,9 +177,15 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, - irq_hw_number_t first_hwirq, - const struct irq_domain_ops *ops, - void *host_data); --extern struct irq_domain *irq_find_host(struct device_node *node); -+extern struct irq_domain *irq_find_matching_host(struct device_node *node, -+ enum irq_domain_bus_token bus_token); - extern void irq_set_default_host(struct irq_domain *host); - -+static inline struct irq_domain *irq_find_host(struct device_node *node) -+{ -+ return irq_find_matching_host(node, DOMAIN_BUS_ANY); -+} -+ - /** - * irq_domain_add_linear() - Allocate and register a linear revmap irq_domain. - * @of_node: pointer to interrupt controller's device tree node. -@@ -220,8 +277,74 @@ int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr, - const u32 *intspec, unsigned int intsize, - irq_hw_number_t *out_hwirq, unsigned int *out_type); - -+/* V2 interfaces to support hierarchy IRQ domains. */ -+extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, -+ unsigned int virq); -+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY -+extern struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent, -+ unsigned int flags, unsigned int size, -+ struct device_node *node, -+ const struct irq_domain_ops *ops, void *host_data); -+extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, -+ unsigned int nr_irqs, int node, void *arg, -+ bool realloc); -+extern void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs); -+extern void irq_domain_activate_irq(struct irq_data *irq_data); -+extern void irq_domain_deactivate_irq(struct irq_data *irq_data); -+ -+static inline int irq_domain_alloc_irqs(struct irq_domain *domain, -+ unsigned int nr_irqs, int node, void *arg) -+{ -+ return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false); -+} -+ -+extern int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, -+ unsigned int virq, -+ irq_hw_number_t hwirq, -+ struct irq_chip *chip, -+ void *chip_data); -+extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, -+ irq_hw_number_t hwirq, struct irq_chip *chip, -+ void *chip_data, irq_flow_handler_t handler, -+ void *handler_data, const char *handler_name); -+extern void irq_domain_reset_irq_data(struct irq_data *irq_data); -+extern void irq_domain_free_irqs_common(struct irq_domain *domain, -+ unsigned int virq, -+ unsigned int nr_irqs); -+extern void irq_domain_free_irqs_top(struct irq_domain *domain, -+ unsigned int virq, unsigned int nr_irqs); -+ -+extern int irq_domain_alloc_irqs_parent(struct irq_domain *domain, -+ unsigned int irq_base, -+ unsigned int nr_irqs, void *arg); -+ -+extern void irq_domain_free_irqs_parent(struct irq_domain *domain, -+ unsigned int irq_base, -+ unsigned int nr_irqs); -+ -+static inline bool irq_domain_is_hierarchy(struct irq_domain *domain) -+{ -+ return domain->flags & IRQ_DOMAIN_FLAG_HIERARCHY; -+} -+#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ -+static inline void irq_domain_activate_irq(struct irq_data *data) { } -+static inline void irq_domain_deactivate_irq(struct irq_data *data) { } -+static inline int irq_domain_alloc_irqs(struct irq_domain *domain, -+ unsigned int nr_irqs, int node, void *arg) -+{ -+ return -1; -+} -+ -+static inline bool irq_domain_is_hierarchy(struct irq_domain *domain) -+{ -+ return false; -+} -+#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ -+ - #else /* CONFIG_IRQ_DOMAIN */ - static inline void irq_dispose_mapping(unsigned int virq) { } -+static inline void irq_domain_activate_irq(struct irq_data *data) { } -+static inline void irq_domain_deactivate_irq(struct irq_data *data) { } - #endif /* !CONFIG_IRQ_DOMAIN */ - - #endif /* _LINUX_IRQDOMAIN_H */ -diff --git a/include/linux/irqhandler.h b/include/linux/irqhandler.h -new file mode 100644 -index 0000000..62d5430 ---- /dev/null -+++ b/include/linux/irqhandler.h -@@ -0,0 +1,14 @@ -+#ifndef _LINUX_IRQHANDLER_H -+#define _LINUX_IRQHANDLER_H -+ -+/* -+ * Interrupt flow handler typedefs are defined here to avoid circular -+ * include dependencies. -+ */ -+ -+struct irq_desc; -+struct irq_data; -+typedef void (*irq_flow_handler_t)(unsigned int irq, struct irq_desc *desc); -+typedef void (*irq_preflow_handler_t)(struct irq_data *data); -+ -+#endif -diff --git a/include/linux/mm.h b/include/linux/mm.h -index 86a977b..a2d0dbb 100644 ---- a/include/linux/mm.h -+++ b/include/linux/mm.h -@@ -1208,6 +1208,28 @@ static inline int fixup_user_fault(struct task_struct *tsk, - } - #endif - -+extern void vma_do_file_update_time(struct vm_area_struct *, const char[], int); -+extern struct file *vma_do_pr_or_file(struct vm_area_struct *, const char[], -+ int); -+extern void vma_do_get_file(struct vm_area_struct *, const char[], int); -+extern void vma_do_fput(struct vm_area_struct *, const char[], int); -+ -+#define vma_file_update_time(vma) vma_do_file_update_time(vma, __func__, \ -+ __LINE__) -+#define vma_pr_or_file(vma) vma_do_pr_or_file(vma, __func__, \ -+ __LINE__) -+#define vma_get_file(vma) vma_do_get_file(vma, __func__, __LINE__) -+#define vma_fput(vma) vma_do_fput(vma, __func__, __LINE__) -+ -+#ifndef CONFIG_MMU -+extern struct file *vmr_do_pr_or_file(struct vm_region *, const char[], int); -+extern void vmr_do_fput(struct vm_region *, const char[], int); -+ -+#define vmr_pr_or_file(region) vmr_do_pr_or_file(region, __func__, \ -+ __LINE__) -+#define vmr_fput(region) vmr_do_fput(region, __func__, __LINE__) -+#endif /* !CONFIG_MMU */ -+ - extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); - extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, - void *buf, int len, int write); -diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h -index 6e0b286..8f374ed 100644 ---- a/include/linux/mm_types.h -+++ b/include/linux/mm_types.h -@@ -232,6 +232,7 @@ struct vm_region { - unsigned long vm_top; /* region allocated to here */ - unsigned long vm_pgoff; /* the offset in vm_file corresponding to vm_start */ - struct file *vm_file; /* the backing file or NULL */ -+ struct file *vm_prfile; /* the virtual backing file or NULL */ - - int vm_usage; /* region usage count (access under nommu_region_sem) */ - bool vm_icache_flushed : 1; /* true if the icache has been flushed for -@@ -300,6 +301,7 @@ struct vm_area_struct { - unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE - units, *not* PAGE_CACHE_SIZE */ - struct file * vm_file; /* File we map to (can be NULL). */ -+ struct file *vm_prfile; /* shadow of vm_file */ - void * vm_private_data; /* was vm_pte (shared mem) */ - - #ifndef CONFIG_MMU -diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h -index dba793e..62d966a 100644 ---- a/include/linux/mmc/sdhci.h -+++ b/include/linux/mmc/sdhci.h -@@ -100,6 +100,10 @@ struct sdhci_host { - #define SDHCI_QUIRK2_BROKEN_DDR50 (1<<7) - /* Stop command (CMD12) can set Transfer Complete when not using MMC_RSP_BUSY */ - #define SDHCI_QUIRK2_STOP_WITH_TC (1<<8) -+/* Controller does not support 64-bit DMA */ -+#define SDHCI_QUIRK2_BROKEN_64_BIT_DMA (1<<9) -+/* Controller broken with using ACMD23 */ -+#define SDHCI_QUIRK2_ACMD23_BROKEN (1<<14) - - int irq; /* Device IRQ */ - void __iomem *ioaddr; /* Mapped address */ -@@ -130,6 +134,7 @@ struct sdhci_host { - #define SDHCI_SDIO_IRQ_ENABLED (1<<9) /* SDIO irq enabled */ - #define SDHCI_SDR104_NEEDS_TUNING (1<<10) /* SDR104/HS200 needs tuning */ - #define SDHCI_USING_RETUNING_TIMER (1<<11) /* Host is using a retuning timer for the card */ -+#define SDHCI_USE_64_BIT_DMA (1<<12) /* Use 64-bit DMA */ - - unsigned int version; /* SDHCI spec. version */ - -@@ -155,12 +160,19 @@ struct sdhci_host { - - int sg_count; /* Mapped sg entries */ - -- u8 *adma_desc; /* ADMA descriptor table */ -- u8 *align_buffer; /* Bounce buffer */ -+ void *adma_table; /* ADMA descriptor table */ -+ void *align_buffer; /* Bounce buffer */ -+ -+ size_t adma_table_sz; /* ADMA descriptor table size */ -+ size_t align_buffer_sz; /* Bounce buffer size */ - - dma_addr_t adma_addr; /* Mapped ADMA descr. table */ - dma_addr_t align_addr; /* Mapped bounce buffer */ - -+ unsigned int desc_sz; /* ADMA descriptor size */ -+ unsigned int align_sz; /* ADMA alignment */ -+ unsigned int align_mask; /* ADMA alignment mask */ -+ - struct tasklet_struct finish_tasklet; /* Tasklet structures */ - - struct timer_list timer; /* Timer for timeouts */ -diff --git a/include/linux/msi.h b/include/linux/msi.h -index 44f4746..788d65b 100644 ---- a/include/linux/msi.h -+++ b/include/linux/msi.h -@@ -10,17 +10,13 @@ struct msi_msg { - u32 data; /* 16 bits of msi message data */ - }; - -+extern int pci_msi_ignore_mask; - /* Helper functions */ - struct irq_data; - struct msi_desc; --void mask_msi_irq(struct irq_data *data); --void unmask_msi_irq(struct irq_data *data); --void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); -+struct pci_dev; - void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg); --void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); --void read_msi_msg(unsigned int irq, struct msi_msg *msg); - void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg); --void write_msi_msg(unsigned int irq, struct msi_msg *msg); - - struct msi_desc { - struct { -@@ -42,12 +38,63 @@ struct msi_desc { - void __iomem *mask_base; - u8 mask_pos; - }; -- struct pci_dev *dev; -+ struct device *dev; - - /* Last set MSI message */ - struct msi_msg msg; - }; - -+/* Helpers to hide struct msi_desc implementation details */ -+#define msi_desc_to_dev(desc) ((desc)->dev) -+#define dev_to_msi_list(dev) (&(dev)->msi_list) -+#define first_msi_entry(dev) \ -+ list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list) -+#define for_each_msi_entry(desc, dev) \ -+ list_for_each_entry((desc), dev_to_msi_list((dev)), list) -+ -+#ifdef CONFIG_PCI_MSI -+#define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev) -+#define for_each_pci_msi_entry(desc, pdev) \ -+ for_each_msi_entry((desc), &(pdev)->dev) -+ -+struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc); -+void *msi_desc_to_pci_sysdata(struct msi_desc *desc); -+#else /* CONFIG_PCI_MSI */ -+static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc) -+{ -+ return NULL; -+} -+#endif /* CONFIG_PCI_MSI */ -+ -+struct msi_desc *alloc_msi_entry(struct device *dev); -+void free_msi_entry(struct msi_desc *entry); -+void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); -+void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); -+void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg); -+ -+u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag); -+u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag); -+void pci_msi_mask_irq(struct irq_data *data); -+void pci_msi_unmask_irq(struct irq_data *data); -+ -+/* Conversion helpers. Should be removed after merging */ -+static inline void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) -+{ -+ __pci_write_msi_msg(entry, msg); -+} -+static inline void write_msi_msg(int irq, struct msi_msg *msg) -+{ -+ pci_write_msi_msg(irq, msg); -+} -+static inline void mask_msi_irq(struct irq_data *data) -+{ -+ pci_msi_mask_irq(data); -+} -+static inline void unmask_msi_irq(struct irq_data *data) -+{ -+ pci_msi_unmask_irq(data); -+} -+ - /* - * The arch hooks to setup up msi irqs. Those functions are - * implemented as weak symbols so that they /can/ be overriden by -@@ -61,18 +108,146 @@ void arch_restore_msi_irqs(struct pci_dev *dev); - - void default_teardown_msi_irqs(struct pci_dev *dev); - void default_restore_msi_irqs(struct pci_dev *dev); --u32 default_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag); --u32 default_msix_mask_irq(struct msi_desc *desc, u32 flag); -+#define default_msi_mask_irq __msi_mask_irq -+#define default_msix_mask_irq __msix_mask_irq - --struct msi_chip { -+struct msi_controller { - struct module *owner; - struct device *dev; - struct device_node *of_node; - struct list_head list; -+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN -+ struct irq_domain *domain; -+#endif - -- int (*setup_irq)(struct msi_chip *chip, struct pci_dev *dev, -+ int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev, - struct msi_desc *desc); -- void (*teardown_irq)(struct msi_chip *chip, unsigned int irq); -+ int (*setup_irqs)(struct msi_controller *chip, struct pci_dev *dev, -+ int nvec, int type); -+ void (*teardown_irq)(struct msi_controller *chip, unsigned int irq); - }; - -+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN -+ -+#include -+#include -+ -+struct irq_domain; -+struct irq_chip; -+struct device_node; -+struct msi_domain_info; -+ -+/** -+ * struct msi_domain_ops - MSI interrupt domain callbacks -+ * @get_hwirq: Retrieve the resulting hw irq number -+ * @msi_init: Domain specific init function for MSI interrupts -+ * @msi_free: Domain specific function to free a MSI interrupts -+ * @msi_check: Callback for verification of the domain/info/dev data -+ * @msi_prepare: Prepare the allocation of the interrupts in the domain -+ * @msi_finish: Optional callbacl to finalize the allocation -+ * @set_desc: Set the msi descriptor for an interrupt -+ * @handle_error: Optional error handler if the allocation fails -+ * -+ * @get_hwirq, @msi_init and @msi_free are callbacks used by -+ * msi_create_irq_domain() and related interfaces -+ * -+ * @msi_check, @msi_prepare, @msi_finish, @set_desc and @handle_error -+ * are callbacks used by msi_irq_domain_alloc_irqs() and related -+ * interfaces which are based on msi_desc. -+ */ -+struct msi_domain_ops { -+ irq_hw_number_t (*get_hwirq)(struct msi_domain_info *info, -+ msi_alloc_info_t *arg); -+ int (*msi_init)(struct irq_domain *domain, -+ struct msi_domain_info *info, -+ unsigned int virq, irq_hw_number_t hwirq, -+ msi_alloc_info_t *arg); -+ void (*msi_free)(struct irq_domain *domain, -+ struct msi_domain_info *info, -+ unsigned int virq); -+ int (*msi_check)(struct irq_domain *domain, -+ struct msi_domain_info *info, -+ struct device *dev); -+ int (*msi_prepare)(struct irq_domain *domain, -+ struct device *dev, int nvec, -+ msi_alloc_info_t *arg); -+ void (*msi_finish)(msi_alloc_info_t *arg, int retval); -+ void (*set_desc)(msi_alloc_info_t *arg, -+ struct msi_desc *desc); -+ int (*handle_error)(struct irq_domain *domain, -+ struct msi_desc *desc, int error); -+}; -+ -+/** -+ * struct msi_domain_info - MSI interrupt domain data -+ * @flags: Flags to decribe features and capabilities -+ * @ops: The callback data structure -+ * @chip: Optional: associated interrupt chip -+ * @chip_data: Optional: associated interrupt chip data -+ * @handler: Optional: associated interrupt flow handler -+ * @handler_data: Optional: associated interrupt flow handler data -+ * @handler_name: Optional: associated interrupt flow handler name -+ * @data: Optional: domain specific data -+ */ -+struct msi_domain_info { -+ u32 flags; -+ struct msi_domain_ops *ops; -+ struct irq_chip *chip; -+ void *chip_data; -+ irq_flow_handler_t handler; -+ void *handler_data; -+ const char *handler_name; -+ void *data; -+}; -+ -+/* Flags for msi_domain_info */ -+enum { -+ /* -+ * Init non implemented ops callbacks with default MSI domain -+ * callbacks. -+ */ -+ MSI_FLAG_USE_DEF_DOM_OPS = (1 << 0), -+ /* -+ * Init non implemented chip callbacks with default MSI chip -+ * callbacks. -+ */ -+ MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1), -+ /* Build identity map between hwirq and irq */ -+ MSI_FLAG_IDENTITY_MAP = (1 << 2), -+ /* Support multiple PCI MSI interrupts */ -+ MSI_FLAG_MULTI_PCI_MSI = (1 << 3), -+ /* Support PCI MSIX interrupts */ -+ MSI_FLAG_PCI_MSIX = (1 << 4), -+}; -+ -+int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask, -+ bool force); -+ -+struct irq_domain *msi_create_irq_domain(struct device_node *of_node, -+ struct msi_domain_info *info, -+ struct irq_domain *parent); -+int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, -+ int nvec); -+void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev); -+struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain); -+ -+#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */ -+ -+#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN -+void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg); -+struct irq_domain *pci_msi_create_irq_domain(struct device_node *node, -+ struct msi_domain_info *info, -+ struct irq_domain *parent); -+int pci_msi_domain_alloc_irqs(struct irq_domain *domain, struct pci_dev *dev, -+ int nvec, int type); -+void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev); -+struct irq_domain *pci_msi_create_default_irq_domain(struct device_node *node, -+ struct msi_domain_info *info, struct irq_domain *parent); -+ -+irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev, -+ struct msi_desc *desc); -+int pci_msi_domain_check_cap(struct irq_domain *domain, -+ struct msi_domain_info *info, struct device *dev); -+#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */ -+ - #endif /* LINUX_MSI_H */ -diff --git a/include/linux/of.h b/include/linux/of.h -index 4a6a489..25111fb 100644 ---- a/include/linux/of.h -+++ b/include/linux/of.h -@@ -57,7 +57,6 @@ struct device_node { - struct device_node *child; - struct device_node *sibling; - struct device_node *next; /* next device of same type */ -- struct device_node *allnext; /* next in list of all nodes */ - struct kobject kobj; - unsigned long _flags; - void *data; -@@ -109,7 +108,7 @@ static inline void of_node_put(struct device_node *node) { } - #ifdef CONFIG_OF - - /* Pointer for first entry in chain of all nodes. */ --extern struct device_node *of_allnodes; -+extern struct device_node *of_root; - extern struct device_node *of_chosen; - extern struct device_node *of_aliases; - extern struct device_node *of_stdout; -@@ -117,7 +116,7 @@ extern raw_spinlock_t devtree_lock; - - static inline bool of_have_populated_dt(void) - { -- return of_allnodes != NULL; -+ return of_root != NULL; - } - - static inline bool of_node_is_root(const struct device_node *node) -@@ -161,6 +160,7 @@ static inline void of_property_clear_flag(struct property *p, unsigned long flag - clear_bit(flag, &p->_flags); - } - -+extern struct device_node *__of_find_all_nodes(struct device_node *prev); - extern struct device_node *of_find_all_nodes(struct device_node *prev); - - /* -@@ -216,8 +216,9 @@ static inline const char *of_node_full_name(const struct device_node *np) - return np ? np->full_name : ""; - } - --#define for_each_of_allnodes(dn) \ -- for (dn = of_allnodes; dn; dn = dn->allnext) -+#define for_each_of_allnodes_from(from, dn) \ -+ for (dn = __of_find_all_nodes(from); dn; dn = __of_find_all_nodes(dn)) -+#define for_each_of_allnodes(dn) for_each_of_allnodes_from(NULL, dn) - extern struct device_node *of_find_node_by_name(struct device_node *from, - const char *name); - extern struct device_node *of_find_node_by_type(struct device_node *from, -diff --git a/include/linux/of_device.h b/include/linux/of_device.h -index ef37021..22801b1 100644 ---- a/include/linux/of_device.h -+++ b/include/linux/of_device.h -@@ -53,6 +53,7 @@ static inline struct device_node *of_cpu_device_node_get(int cpu) - return of_node_get(cpu_dev->of_node); - } - -+void of_dma_configure(struct device *dev, struct device_node *np); - #else /* CONFIG_OF */ - - static inline int of_driver_match_device(struct device *dev, -@@ -90,6 +91,8 @@ static inline struct device_node *of_cpu_device_node_get(int cpu) - { - return NULL; - } -+static inline void of_dma_configure(struct device *dev, struct device_node *np) -+{} - #endif /* CONFIG_OF */ - - #endif /* _LINUX_OF_DEVICE_H */ -diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h -index 51a560f..ffbe470 100644 ---- a/include/linux/of_iommu.h -+++ b/include/linux/of_iommu.h -@@ -1,12 +1,20 @@ - #ifndef __OF_IOMMU_H - #define __OF_IOMMU_H - -+#include -+#include -+#include -+ - #ifdef CONFIG_OF_IOMMU - - extern int of_get_dma_window(struct device_node *dn, const char *prefix, - int index, unsigned long *busno, dma_addr_t *addr, - size_t *size); - -+extern void of_iommu_init(void); -+extern struct iommu_ops *of_iommu_configure(struct device *dev, -+ struct device_node *master_np); -+ - #else - - static inline int of_get_dma_window(struct device_node *dn, const char *prefix, -@@ -16,6 +24,23 @@ static inline int of_get_dma_window(struct device_node *dn, const char *prefix, - return -EINVAL; - } - -+static inline void of_iommu_init(void) { } -+static inline struct iommu_ops *of_iommu_configure(struct device *dev, -+ struct device_node *master_np) -+{ -+ return NULL; -+} -+ - #endif /* CONFIG_OF_IOMMU */ - -+void of_iommu_set_ops(struct device_node *np, struct iommu_ops *ops); -+struct iommu_ops *of_iommu_get_ops(struct device_node *np); -+ -+extern struct of_device_id __iommu_of_table; -+ -+typedef int (*of_iommu_init_fn)(struct device_node *); -+ -+#define IOMMU_OF_DECLARE(name, compat, fn) \ -+ _OF_DECLARE(iommu, name, compat, fn, of_iommu_init_fn) -+ - #endif /* __OF_IOMMU_H */ -diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h -index bfec136..563ad28 100644 ---- a/include/linux/of_irq.h -+++ b/include/linux/of_irq.h -@@ -69,6 +69,7 @@ static inline int of_irq_get_byname(struct device_node *dev, const char *name) - */ - extern unsigned int irq_of_parse_and_map(struct device_node *node, int index); - extern struct device_node *of_irq_find_parent(struct device_node *child); -+extern void of_msi_configure(struct device *dev, struct device_node *np); - - #else /* !CONFIG_OF */ - static inline unsigned int irq_of_parse_and_map(struct device_node *dev, -diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h -index 1fd207e..29fd3fe 100644 ---- a/include/linux/of_pci.h -+++ b/include/linux/of_pci.h -@@ -16,6 +16,7 @@ int of_pci_get_devfn(struct device_node *np); - int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin); - int of_pci_parse_bus_range(struct device_node *node, struct resource *res); - int of_get_pci_domain_nr(struct device_node *node); -+void of_pci_dma_configure(struct pci_dev *pci_dev); - #else - static inline int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq) - { -@@ -50,6 +51,8 @@ of_get_pci_domain_nr(struct device_node *node) - { - return -1; - } -+ -+static inline void of_pci_dma_configure(struct pci_dev *pci_dev) { } - #endif - - #if defined(CONFIG_OF_ADDRESS) -@@ -59,13 +62,13 @@ int of_pci_get_host_bridge_resources(struct device_node *dev, - #endif - - #if defined(CONFIG_OF) && defined(CONFIG_PCI_MSI) --int of_pci_msi_chip_add(struct msi_chip *chip); --void of_pci_msi_chip_remove(struct msi_chip *chip); --struct msi_chip *of_pci_find_msi_chip_by_node(struct device_node *of_node); -+int of_pci_msi_chip_add(struct msi_controller *chip); -+void of_pci_msi_chip_remove(struct msi_controller *chip); -+struct msi_controller *of_pci_find_msi_chip_by_node(struct device_node *of_node); - #else --static inline int of_pci_msi_chip_add(struct msi_chip *chip) { return -EINVAL; } --static inline void of_pci_msi_chip_remove(struct msi_chip *chip) { } --static inline struct msi_chip * -+static inline int of_pci_msi_chip_add(struct msi_controller *chip) { return -EINVAL; } -+static inline void of_pci_msi_chip_remove(struct msi_controller *chip) { } -+static inline struct msi_controller * - of_pci_find_msi_chip_by_node(struct device_node *of_node) { return NULL; } - #endif - -diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h -index c65a18a..7e09244 100644 ---- a/include/linux/of_pdt.h -+++ b/include/linux/of_pdt.h -@@ -39,7 +39,6 @@ extern void *prom_early_alloc(unsigned long size); - /* for building the device tree */ - extern void of_pdt_build_devicetree(phandle root_node, struct of_pdt_ops *ops); - --extern void (*of_pdt_build_more)(struct device_node *dp, -- struct device_node ***nextp); -+extern void (*of_pdt_build_more)(struct device_node *dp); - - #endif /* _LINUX_OF_PDT_H */ -diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h -index c2b0627..8a860f0 100644 ---- a/include/linux/of_platform.h -+++ b/include/linux/of_platform.h -@@ -84,4 +84,10 @@ static inline int of_platform_populate(struct device_node *root, - static inline void of_platform_depopulate(struct device *parent) { } - #endif - -+#ifdef CONFIG_OF_DYNAMIC -+extern void of_platform_register_reconfig_notifier(void); -+#else -+static inline void of_platform_register_reconfig_notifier(void) { } -+#endif -+ - #endif /* _LINUX_OF_PLATFORM_H */ -diff --git a/include/linux/pci.h b/include/linux/pci.h -index 7a34844..f28c88b 100644 ---- a/include/linux/pci.h -+++ b/include/linux/pci.h -@@ -29,6 +29,7 @@ - #include - #include - #include -+#include - #include - - #include -@@ -171,8 +172,8 @@ enum pci_dev_flags { - PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) (1 << 2), - /* Flag for quirk use to store if quirk-specific ACS is enabled */ - PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = (__force pci_dev_flags_t) (1 << 3), -- /* Flag to indicate the device uses dma_alias_devfn */ -- PCI_DEV_FLAGS_DMA_ALIAS_DEVFN = (__force pci_dev_flags_t) (1 << 4), -+ /* Flag to indicate the device uses dma_alias_devid */ -+ PCI_DEV_FLAGS_DMA_ALIAS_DEVID = (__force pci_dev_flags_t) (1 << 4), - /* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */ - PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5), - /* Do not use bus resets for device */ -@@ -278,7 +279,7 @@ struct pci_dev { - u8 rom_base_reg; /* which config register controls the ROM */ - u8 pin; /* which interrupt pin this device uses */ - u16 pcie_flags_reg; /* cached PCIe Capabilities Register */ -- u8 dma_alias_devfn;/* devfn of DMA alias, if any */ -+ u32 dma_alias_devid;/* devid of DMA alias */ - - struct pci_driver *driver; /* which driver has allocated this device */ - u64 dma_mask; /* Mask of the bits of bus address this -@@ -365,7 +366,6 @@ struct pci_dev { - struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */ - struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */ - #ifdef CONFIG_PCI_MSI -- struct list_head msi_list; - const struct attribute_group **msi_irq_groups; - #endif - struct pci_vpd *vpd; -@@ -400,16 +400,10 @@ static inline int pci_channel_offline(struct pci_dev *pdev) - return (pdev->error_state != pci_channel_io_normal); - } - --struct pci_host_bridge_window { -- struct list_head list; -- struct resource *res; /* host bridge aperture (CPU address) */ -- resource_size_t offset; /* bus address + offset = CPU address */ --}; -- - struct pci_host_bridge { - struct device dev; - struct pci_bus *bus; /* root bus */ -- struct list_head windows; /* pci_host_bridge_windows */ -+ struct list_head windows; /* resource_entry */ - void (*release_fn)(struct pci_host_bridge *); - void *release_data; - }; -@@ -456,7 +450,7 @@ struct pci_bus { - struct resource busn_res; /* bus numbers routed to this bus */ - - struct pci_ops *ops; /* configuration access functions */ -- struct msi_chip *msi; /* MSI controller */ -+ struct msi_controller *msi; /* MSI controller */ - void *sysdata; /* hook for sys-specific extension */ - struct proc_dir_entry *procdir; /* directory entry in /proc/bus/pci */ - -@@ -516,6 +510,9 @@ static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev) - return dev->bus->self; - } - -+struct device *pci_get_host_bridge_device(struct pci_dev *dev); -+void pci_put_host_bridge_device(struct device *dev); -+ - #ifdef CONFIG_PCI_MSI - static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) - { -@@ -565,6 +562,7 @@ static inline int pcibios_err_to_errno(int err) - /* Low-level architecture-dependent routines */ - - struct pci_ops { -+ void __iomem *(*map_bus)(struct pci_bus *bus, unsigned int devfn, int where); - int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val); - int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val); - }; -@@ -862,6 +860,16 @@ int pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn, - int where, u16 val); - int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn, - int where, u32 val); -+ -+int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn, -+ int where, int size, u32 *val); -+int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn, -+ int where, int size, u32 val); -+int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn, -+ int where, int size, u32 *val); -+int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn, -+ int where, int size, u32 val); -+ - struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops); - - static inline int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val) -diff --git a/include/linux/phy.h b/include/linux/phy.h -index d090cfc..eda18a8 100644 ---- a/include/linux/phy.h -+++ b/include/linux/phy.h -@@ -700,6 +700,7 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, - struct phy_c45_device_ids *c45_ids); - struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45); - int phy_device_register(struct phy_device *phy); -+void phy_device_remove(struct phy_device *phydev); - int phy_init_hw(struct phy_device *phydev); - int phy_suspend(struct phy_device *phydev); - int phy_resume(struct phy_device *phydev); -diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h -index f2ca1b4..fe5732d 100644 ---- a/include/linux/phy_fixed.h -+++ b/include/linux/phy_fixed.h -@@ -11,7 +11,7 @@ struct fixed_phy_status { - - struct device_node; - --#ifdef CONFIG_FIXED_PHY -+#if IS_ENABLED(CONFIG_FIXED_PHY) - extern int fixed_phy_add(unsigned int irq, int phy_id, - struct fixed_phy_status *status); - extern struct phy_device *fixed_phy_register(unsigned int irq, -@@ -21,6 +21,9 @@ extern void fixed_phy_del(int phy_addr); - extern int fixed_phy_set_link_update(struct phy_device *phydev, - int (*link_update)(struct net_device *, - struct fixed_phy_status *)); -+extern int fixed_phy_update_state(struct phy_device *phydev, -+ const struct fixed_phy_status *status, -+ const struct fixed_phy_status *changed); - #else - static inline int fixed_phy_add(unsigned int irq, int phy_id, - struct fixed_phy_status *status) -@@ -43,6 +46,12 @@ static inline int fixed_phy_set_link_update(struct phy_device *phydev, - { - return -ENODEV; - } -+static inline int fixed_phy_update_state(struct phy_device *phydev, -+ const struct fixed_phy_status *status, -+ const struct fixed_phy_status *changed) -+{ -+ return -ENODEV; -+} - #endif /* CONFIG_FIXED_PHY */ - - #endif /* __PHY_FIXED_H */ -diff --git a/include/linux/resource_ext.h b/include/linux/resource_ext.h -new file mode 100644 -index 0000000..e2bf63d ---- /dev/null -+++ b/include/linux/resource_ext.h -@@ -0,0 +1,77 @@ -+/* -+ * Copyright (C) 2015, Intel Corporation -+ * Author: Jiang Liu -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ * more details. -+ */ -+#ifndef _LINUX_RESOURCE_EXT_H -+#define _LINUX_RESOURCE_EXT_H -+#include -+#include -+#include -+#include -+ -+/* Represent resource window for bridge devices */ -+struct resource_win { -+ struct resource res; /* In master (CPU) address space */ -+ resource_size_t offset; /* Translation offset for bridge */ -+}; -+ -+/* -+ * Common resource list management data structure and interfaces to support -+ * ACPI, PNP and PCI host bridge etc. -+ */ -+struct resource_entry { -+ struct list_head node; -+ struct resource *res; /* In master (CPU) address space */ -+ resource_size_t offset; /* Translation offset for bridge */ -+ struct resource __res; /* Default storage for res */ -+}; -+ -+extern struct resource_entry * -+resource_list_create_entry(struct resource *res, size_t extra_size); -+extern void resource_list_free(struct list_head *head); -+ -+static inline void resource_list_add(struct resource_entry *entry, -+ struct list_head *head) -+{ -+ list_add(&entry->node, head); -+} -+ -+static inline void resource_list_add_tail(struct resource_entry *entry, -+ struct list_head *head) -+{ -+ list_add_tail(&entry->node, head); -+} -+ -+static inline void resource_list_del(struct resource_entry *entry) -+{ -+ list_del(&entry->node); -+} -+ -+static inline void resource_list_free_entry(struct resource_entry *entry) -+{ -+ kfree(entry); -+} -+ -+static inline void -+resource_list_destroy_entry(struct resource_entry *entry) -+{ -+ resource_list_del(entry); -+ resource_list_free_entry(entry); -+} -+ -+#define resource_list_for_each_entry(entry, list) \ -+ list_for_each_entry((entry), (list), node) -+ -+#define resource_list_for_each_entry_safe(entry, tmp, list) \ -+ list_for_each_entry_safe((entry), (tmp), (list), node) -+ -+#endif /* _LINUX_RESOURCE_EXT_H */ -diff --git a/include/linux/splice.h b/include/linux/splice.h -index da2751d..2e0fca6 100644 ---- a/include/linux/splice.h -+++ b/include/linux/splice.h -@@ -83,4 +83,10 @@ extern void splice_shrink_spd(struct splice_pipe_desc *); - extern void spd_release_page(struct splice_pipe_desc *, unsigned int); - - extern const struct pipe_buf_operations page_cache_pipe_buf_ops; -+ -+extern long do_splice_from(struct pipe_inode_info *pipe, struct file *out, -+ loff_t *ppos, size_t len, unsigned int flags); -+extern long do_splice_to(struct file *in, loff_t *ppos, -+ struct pipe_inode_info *pipe, size_t len, -+ unsigned int flags); - #endif -diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h -index 9948c87..1d0043d 100644 ---- a/include/linux/usb/quirks.h -+++ b/include/linux/usb/quirks.h -@@ -47,4 +47,7 @@ - /* device generates spurious wakeup, ignore remote wakeup capability */ - #define USB_QUIRK_IGNORE_REMOTE_WAKEUP BIT(9) - -+/* device can't handle Link Power Management */ -+#define USB_QUIRK_NO_LPM BIT(10) -+ - #endif /* __LINUX_USB_QUIRKS_H */ -diff --git a/include/trace/events/iommu.h b/include/trace/events/iommu.h -index a8f5c32..2c7befb 100644 ---- a/include/trace/events/iommu.h -+++ b/include/trace/events/iommu.h -@@ -83,7 +83,7 @@ DEFINE_EVENT(iommu_device_event, detach_device_from_domain, - TP_ARGS(dev) - ); - --DECLARE_EVENT_CLASS(iommu_map_unmap, -+TRACE_EVENT(map, - - TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size), - -@@ -92,7 +92,7 @@ DECLARE_EVENT_CLASS(iommu_map_unmap, - TP_STRUCT__entry( - __field(u64, iova) - __field(u64, paddr) -- __field(int, size) -+ __field(size_t, size) - ), - - TP_fast_assign( -@@ -101,26 +101,31 @@ DECLARE_EVENT_CLASS(iommu_map_unmap, - __entry->size = size; - ), - -- TP_printk("IOMMU: iova=0x%016llx paddr=0x%016llx size=0x%x", -+ TP_printk("IOMMU: iova=0x%016llx paddr=0x%016llx size=%zu", - __entry->iova, __entry->paddr, __entry->size - ) - ); - --DEFINE_EVENT(iommu_map_unmap, map, -+TRACE_EVENT(unmap, - -- TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size), -- -- TP_ARGS(iova, paddr, size) --); -+ TP_PROTO(unsigned long iova, size_t size, size_t unmapped_size), - --DEFINE_EVENT_PRINT(iommu_map_unmap, unmap, -+ TP_ARGS(iova, size, unmapped_size), - -- TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size), -+ TP_STRUCT__entry( -+ __field(u64, iova) -+ __field(size_t, size) -+ __field(size_t, unmapped_size) -+ ), - -- TP_ARGS(iova, paddr, size), -+ TP_fast_assign( -+ __entry->iova = iova; -+ __entry->size = size; -+ __entry->unmapped_size = unmapped_size; -+ ), - -- TP_printk("IOMMU: iova=0x%016llx size=0x%x", -- __entry->iova, __entry->size -+ TP_printk("IOMMU: iova=0x%016llx size=%zu unmapped_size=%zu", -+ __entry->iova, __entry->size, __entry->unmapped_size - ) - ); - -diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild -index 8523f9b..11f8f74 100644 ---- a/include/uapi/linux/Kbuild -+++ b/include/uapi/linux/Kbuild -@@ -56,6 +56,7 @@ header-y += atmppp.h - header-y += atmsap.h - header-y += atmsvc.h - header-y += audit.h -+header-y += aufs_type.h - header-y += auto_fs.h - header-y += auto_fs4.h - header-y += auxvec.h -diff --git a/include/uapi/linux/aufs_type.h b/include/uapi/linux/aufs_type.h -new file mode 100644 -index 0000000..75915f8 ---- /dev/null -+++ b/include/uapi/linux/aufs_type.h -@@ -0,0 +1,419 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+#ifndef __AUFS_TYPE_H__ -+#define __AUFS_TYPE_H__ -+ -+#define AUFS_NAME "aufs" -+ -+#ifdef __KERNEL__ -+/* -+ * define it before including all other headers. -+ * sched.h may use pr_* macros before defining "current", so define the -+ * no-current version first, and re-define later. -+ */ -+#define pr_fmt(fmt) AUFS_NAME " %s:%d: " fmt, __func__, __LINE__ -+#include -+#undef pr_fmt -+#define pr_fmt(fmt) \ -+ AUFS_NAME " %s:%d:%.*s[%d]: " fmt, __func__, __LINE__, \ -+ (int)sizeof(current->comm), current->comm, current->pid -+#else -+#include -+#include -+#endif /* __KERNEL__ */ -+ -+#include -+ -+#define AUFS_VERSION "3.18.25+-20160509" -+ -+/* todo? move this to linux-2.6.19/include/magic.h */ -+#define AUFS_SUPER_MAGIC ('a' << 24 | 'u' << 16 | 'f' << 8 | 's') -+ -+/* ---------------------------------------------------------------------- */ -+ -+#ifdef CONFIG_AUFS_BRANCH_MAX_127 -+typedef int8_t aufs_bindex_t; -+#define AUFS_BRANCH_MAX 127 -+#else -+typedef int16_t aufs_bindex_t; -+#ifdef CONFIG_AUFS_BRANCH_MAX_511 -+#define AUFS_BRANCH_MAX 511 -+#elif defined(CONFIG_AUFS_BRANCH_MAX_1023) -+#define AUFS_BRANCH_MAX 1023 -+#elif defined(CONFIG_AUFS_BRANCH_MAX_32767) -+#define AUFS_BRANCH_MAX 32767 -+#endif -+#endif -+ -+#ifdef __KERNEL__ -+#ifndef AUFS_BRANCH_MAX -+#error unknown CONFIG_AUFS_BRANCH_MAX value -+#endif -+#endif /* __KERNEL__ */ -+ -+/* ---------------------------------------------------------------------- */ -+ -+#define AUFS_FSTYPE AUFS_NAME -+ -+#define AUFS_ROOT_INO 2 -+#define AUFS_FIRST_INO 11 -+ -+#define AUFS_WH_PFX ".wh." -+#define AUFS_WH_PFX_LEN ((int)sizeof(AUFS_WH_PFX) - 1) -+#define AUFS_WH_TMP_LEN 4 -+/* a limit for rmdir/rename a dir and copyup */ -+#define AUFS_MAX_NAMELEN (NAME_MAX \ -+ - AUFS_WH_PFX_LEN * 2 /* doubly whiteouted */\ -+ - 1 /* dot */\ -+ - AUFS_WH_TMP_LEN) /* hex */ -+#define AUFS_XINO_FNAME "." AUFS_NAME ".xino" -+#define AUFS_XINO_DEFPATH "/tmp/" AUFS_XINO_FNAME -+#define AUFS_XINO_DEF_SEC 30 /* seconds */ -+#define AUFS_XINO_DEF_TRUNC 45 /* percentage */ -+#define AUFS_DIRWH_DEF 3 -+#define AUFS_RDCACHE_DEF 10 /* seconds */ -+#define AUFS_RDCACHE_MAX 3600 /* seconds */ -+#define AUFS_RDBLK_DEF 512 /* bytes */ -+#define AUFS_RDHASH_DEF 32 -+#define AUFS_WKQ_NAME AUFS_NAME "d" -+#define AUFS_MFS_DEF_SEC 30 /* seconds */ -+#define AUFS_MFS_MAX_SEC 3600 /* seconds */ -+#define AUFS_FHSM_CACHE_DEF_SEC 30 /* seconds */ -+#define AUFS_PLINK_WARN 50 /* number of plinks in a single bucket */ -+ -+/* pseudo-link maintenace under /proc */ -+#define AUFS_PLINK_MAINT_NAME "plink_maint" -+#define AUFS_PLINK_MAINT_DIR "fs/" AUFS_NAME -+#define AUFS_PLINK_MAINT_PATH AUFS_PLINK_MAINT_DIR "/" AUFS_PLINK_MAINT_NAME -+ -+#define AUFS_DIROPQ_NAME AUFS_WH_PFX ".opq" /* whiteouted doubly */ -+#define AUFS_WH_DIROPQ AUFS_WH_PFX AUFS_DIROPQ_NAME -+ -+#define AUFS_BASE_NAME AUFS_WH_PFX AUFS_NAME -+#define AUFS_PLINKDIR_NAME AUFS_WH_PFX "plnk" -+#define AUFS_ORPHDIR_NAME AUFS_WH_PFX "orph" -+ -+/* doubly whiteouted */ -+#define AUFS_WH_BASE AUFS_WH_PFX AUFS_BASE_NAME -+#define AUFS_WH_PLINKDIR AUFS_WH_PFX AUFS_PLINKDIR_NAME -+#define AUFS_WH_ORPHDIR AUFS_WH_PFX AUFS_ORPHDIR_NAME -+ -+/* branch permissions and attributes */ -+#define AUFS_BRPERM_RW "rw" -+#define AUFS_BRPERM_RO "ro" -+#define AUFS_BRPERM_RR "rr" -+#define AUFS_BRATTR_COO_REG "coo_reg" -+#define AUFS_BRATTR_COO_ALL "coo_all" -+#define AUFS_BRATTR_FHSM "fhsm" -+#define AUFS_BRATTR_UNPIN "unpin" -+#define AUFS_BRATTR_ICEX "icex" -+#define AUFS_BRATTR_ICEX_SEC "icexsec" -+#define AUFS_BRATTR_ICEX_SYS "icexsys" -+#define AUFS_BRATTR_ICEX_TR "icextr" -+#define AUFS_BRATTR_ICEX_USR "icexusr" -+#define AUFS_BRATTR_ICEX_OTH "icexoth" -+#define AUFS_BRRATTR_WH "wh" -+#define AUFS_BRWATTR_NLWH "nolwh" -+#define AUFS_BRWATTR_MOO "moo" -+ -+#define AuBrPerm_RW 1 /* writable, hardlinkable wh */ -+#define AuBrPerm_RO (1 << 1) /* readonly */ -+#define AuBrPerm_RR (1 << 2) /* natively readonly */ -+#define AuBrPerm_Mask (AuBrPerm_RW | AuBrPerm_RO | AuBrPerm_RR) -+ -+#define AuBrAttr_COO_REG (1 << 3) /* copy-up on open */ -+#define AuBrAttr_COO_ALL (1 << 4) -+#define AuBrAttr_COO_Mask (AuBrAttr_COO_REG | AuBrAttr_COO_ALL) -+ -+#define AuBrAttr_FHSM (1 << 5) /* file-based hsm */ -+#define AuBrAttr_UNPIN (1 << 6) /* rename-able top dir of -+ branch. meaningless since -+ linux-3.18-rc1 */ -+ -+/* ignore error in copying XATTR */ -+#define AuBrAttr_ICEX_SEC (1 << 7) -+#define AuBrAttr_ICEX_SYS (1 << 8) -+#define AuBrAttr_ICEX_TR (1 << 9) -+#define AuBrAttr_ICEX_USR (1 << 10) -+#define AuBrAttr_ICEX_OTH (1 << 11) -+#define AuBrAttr_ICEX (AuBrAttr_ICEX_SEC \ -+ | AuBrAttr_ICEX_SYS \ -+ | AuBrAttr_ICEX_TR \ -+ | AuBrAttr_ICEX_USR \ -+ | AuBrAttr_ICEX_OTH) -+ -+#define AuBrRAttr_WH (1 << 12) /* whiteout-able */ -+#define AuBrRAttr_Mask AuBrRAttr_WH -+ -+#define AuBrWAttr_NoLinkWH (1 << 13) /* un-hardlinkable whiteouts */ -+#define AuBrWAttr_MOO (1 << 14) /* move-up on open */ -+#define AuBrWAttr_Mask (AuBrWAttr_NoLinkWH | AuBrWAttr_MOO) -+ -+#define AuBrAttr_CMOO_Mask (AuBrAttr_COO_Mask | AuBrWAttr_MOO) -+ -+/* #warning test userspace */ -+#ifdef __KERNEL__ -+#ifndef CONFIG_AUFS_FHSM -+#undef AuBrAttr_FHSM -+#define AuBrAttr_FHSM 0 -+#endif -+#ifndef CONFIG_AUFS_XATTR -+#undef AuBrAttr_ICEX -+#define AuBrAttr_ICEX 0 -+#undef AuBrAttr_ICEX_SEC -+#define AuBrAttr_ICEX_SEC 0 -+#undef AuBrAttr_ICEX_SYS -+#define AuBrAttr_ICEX_SYS 0 -+#undef AuBrAttr_ICEX_TR -+#define AuBrAttr_ICEX_TR 0 -+#undef AuBrAttr_ICEX_USR -+#define AuBrAttr_ICEX_USR 0 -+#undef AuBrAttr_ICEX_OTH -+#define AuBrAttr_ICEX_OTH 0 -+#endif -+#endif -+ -+/* the longest combination */ -+/* AUFS_BRATTR_ICEX and AUFS_BRATTR_ICEX_TR don't affect here */ -+#define AuBrPermStrSz sizeof(AUFS_BRPERM_RW \ -+ "+" AUFS_BRATTR_COO_REG \ -+ "+" AUFS_BRATTR_FHSM \ -+ "+" AUFS_BRATTR_UNPIN \ -+ "+" AUFS_BRATTR_ICEX_SEC \ -+ "+" AUFS_BRATTR_ICEX_SYS \ -+ "+" AUFS_BRATTR_ICEX_USR \ -+ "+" AUFS_BRATTR_ICEX_OTH \ -+ "+" AUFS_BRWATTR_NLWH) -+ -+typedef struct { -+ char a[AuBrPermStrSz]; -+} au_br_perm_str_t; -+ -+static inline int au_br_writable(int brperm) -+{ -+ return brperm & AuBrPerm_RW; -+} -+ -+static inline int au_br_whable(int brperm) -+{ -+ return brperm & (AuBrPerm_RW | AuBrRAttr_WH); -+} -+ -+static inline int au_br_wh_linkable(int brperm) -+{ -+ return !(brperm & AuBrWAttr_NoLinkWH); -+} -+ -+static inline int au_br_cmoo(int brperm) -+{ -+ return brperm & AuBrAttr_CMOO_Mask; -+} -+ -+static inline int au_br_fhsm(int brperm) -+{ -+ return brperm & AuBrAttr_FHSM; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* ioctl */ -+enum { -+ /* readdir in userspace */ -+ AuCtl_RDU, -+ AuCtl_RDU_INO, -+ -+ AuCtl_WBR_FD, /* pathconf wrapper */ -+ AuCtl_IBUSY, /* busy inode */ -+ AuCtl_MVDOWN, /* move-down */ -+ AuCtl_BR, /* info about branches */ -+ AuCtl_FHSM_FD /* connection for fhsm */ -+}; -+ -+/* borrowed from linux/include/linux/kernel.h */ -+#ifndef ALIGN -+#define ALIGN(x, a) __ALIGN_MASK(x, (typeof(x))(a)-1) -+#define __ALIGN_MASK(x, mask) (((x)+(mask))&~(mask)) -+#endif -+ -+/* borrowed from linux/include/linux/compiler-gcc3.h */ -+#ifndef __aligned -+#define __aligned(x) __attribute__((aligned(x))) -+#endif -+ -+#ifdef __KERNEL__ -+#ifndef __packed -+#define __packed __attribute__((packed)) -+#endif -+#endif -+ -+struct au_rdu_cookie { -+ uint64_t h_pos; -+ int16_t bindex; -+ uint8_t flags; -+ uint8_t pad; -+ uint32_t generation; -+} __aligned(8); -+ -+struct au_rdu_ent { -+ uint64_t ino; -+ int16_t bindex; -+ uint8_t type; -+ uint8_t nlen; -+ uint8_t wh; -+ char name[0]; -+} __aligned(8); -+ -+static inline int au_rdu_len(int nlen) -+{ -+ /* include the terminating NULL */ -+ return ALIGN(sizeof(struct au_rdu_ent) + nlen + 1, -+ sizeof(uint64_t)); -+} -+ -+union au_rdu_ent_ul { -+ struct au_rdu_ent __user *e; -+ uint64_t ul; -+}; -+ -+enum { -+ AufsCtlRduV_SZ, -+ AufsCtlRduV_End -+}; -+ -+struct aufs_rdu { -+ /* input */ -+ union { -+ uint64_t sz; /* AuCtl_RDU */ -+ uint64_t nent; /* AuCtl_RDU_INO */ -+ }; -+ union au_rdu_ent_ul ent; -+ uint16_t verify[AufsCtlRduV_End]; -+ -+ /* input/output */ -+ uint32_t blk; -+ -+ /* output */ -+ union au_rdu_ent_ul tail; -+ /* number of entries which were added in a single call */ -+ uint64_t rent; -+ uint8_t full; -+ uint8_t shwh; -+ -+ struct au_rdu_cookie cookie; -+} __aligned(8); -+ -+/* ---------------------------------------------------------------------- */ -+ -+struct aufs_wbr_fd { -+ uint32_t oflags; -+ int16_t brid; -+} __aligned(8); -+ -+/* ---------------------------------------------------------------------- */ -+ -+struct aufs_ibusy { -+ uint64_t ino, h_ino; -+ int16_t bindex; -+} __aligned(8); -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* error code for move-down */ -+/* the actual message strings are implemented in aufs-util.git */ -+enum { -+ EAU_MVDOWN_OPAQUE = 1, -+ EAU_MVDOWN_WHITEOUT, -+ EAU_MVDOWN_UPPER, -+ EAU_MVDOWN_BOTTOM, -+ EAU_MVDOWN_NOUPPER, -+ EAU_MVDOWN_NOLOWERBR, -+ EAU_Last -+}; -+ -+/* flags for move-down */ -+#define AUFS_MVDOWN_DMSG 1 -+#define AUFS_MVDOWN_OWLOWER (1 << 1) /* overwrite lower */ -+#define AUFS_MVDOWN_KUPPER (1 << 2) /* keep upper */ -+#define AUFS_MVDOWN_ROLOWER (1 << 3) /* do even if lower is RO */ -+#define AUFS_MVDOWN_ROLOWER_R (1 << 4) /* did on lower RO */ -+#define AUFS_MVDOWN_ROUPPER (1 << 5) /* do even if upper is RO */ -+#define AUFS_MVDOWN_ROUPPER_R (1 << 6) /* did on upper RO */ -+#define AUFS_MVDOWN_BRID_UPPER (1 << 7) /* upper brid */ -+#define AUFS_MVDOWN_BRID_LOWER (1 << 8) /* lower brid */ -+#define AUFS_MVDOWN_FHSM_LOWER (1 << 9) /* find fhsm attr for lower */ -+#define AUFS_MVDOWN_STFS (1 << 10) /* req. stfs */ -+#define AUFS_MVDOWN_STFS_FAILED (1 << 11) /* output: stfs is unusable */ -+#define AUFS_MVDOWN_BOTTOM (1 << 12) /* output: no more lowers */ -+ -+/* index for move-down */ -+enum { -+ AUFS_MVDOWN_UPPER, -+ AUFS_MVDOWN_LOWER, -+ AUFS_MVDOWN_NARRAY -+}; -+ -+/* -+ * additional info of move-down -+ * number of free blocks and inodes. -+ * subset of struct kstatfs, but smaller and always 64bit. -+ */ -+struct aufs_stfs { -+ uint64_t f_blocks; -+ uint64_t f_bavail; -+ uint64_t f_files; -+ uint64_t f_ffree; -+}; -+ -+struct aufs_stbr { -+ int16_t brid; /* optional input */ -+ int16_t bindex; /* output */ -+ struct aufs_stfs stfs; /* output when AUFS_MVDOWN_STFS set */ -+} __aligned(8); -+ -+struct aufs_mvdown { -+ uint32_t flags; /* input/output */ -+ struct aufs_stbr stbr[AUFS_MVDOWN_NARRAY]; /* input/output */ -+ int8_t au_errno; /* output */ -+} __aligned(8); -+ -+/* ---------------------------------------------------------------------- */ -+ -+union aufs_brinfo { -+ /* PATH_MAX may differ between kernel-space and user-space */ -+ char _spacer[4096]; -+ struct { -+ int16_t id; -+ int perm; -+ char path[0]; -+ }; -+} __aligned(8); -+ -+/* ---------------------------------------------------------------------- */ -+ -+#define AuCtlType 'A' -+#define AUFS_CTL_RDU _IOWR(AuCtlType, AuCtl_RDU, struct aufs_rdu) -+#define AUFS_CTL_RDU_INO _IOWR(AuCtlType, AuCtl_RDU_INO, struct aufs_rdu) -+#define AUFS_CTL_WBR_FD _IOW(AuCtlType, AuCtl_WBR_FD, \ -+ struct aufs_wbr_fd) -+#define AUFS_CTL_IBUSY _IOWR(AuCtlType, AuCtl_IBUSY, struct aufs_ibusy) -+#define AUFS_CTL_MVDOWN _IOWR(AuCtlType, AuCtl_MVDOWN, \ -+ struct aufs_mvdown) -+#define AUFS_CTL_BRINFO _IOW(AuCtlType, AuCtl_BR, union aufs_brinfo) -+#define AUFS_CTL_FHSM_FD _IOW(AuCtlType, AuCtl_FHSM_FD, int) -+ -+#endif /* __AUFS_TYPE_H__ */ -diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h -index 29715d2..6eca3ee 100644 ---- a/include/uapi/linux/vfio.h -+++ b/include/uapi/linux/vfio.h -@@ -160,6 +160,10 @@ struct vfio_device_info { - __u32 flags; - #define VFIO_DEVICE_FLAGS_RESET (1 << 0) /* Device supports reset */ - #define VFIO_DEVICE_FLAGS_PCI (1 << 1) /* vfio-pci device */ -+#define VFIO_DEVICE_FLAGS_PLATFORM (1 << 2) /* vfio-platform device */ -+#define VFIO_DEVICE_FLAGS_AMBA (1 << 3) /* vfio-amba device */ -+#define VFIO_DEVICE_FLAGS_FSL_MC (1 << 4) /* vfio Freescale MC device */ -+ - __u32 num_regions; /* Max region index + 1 */ - __u32 num_irqs; /* Max IRQ index + 1 */ - }; -@@ -404,6 +408,7 @@ struct vfio_iommu_type1_dma_map { - __u32 flags; - #define VFIO_DMA_MAP_FLAG_READ (1 << 0) /* readable from device */ - #define VFIO_DMA_MAP_FLAG_WRITE (1 << 1) /* writable from device */ -+#define VFIO_DMA_MAP_FLAG_MMIO (1 << 2) /* non-cachable device region */ - __u64 vaddr; /* Process virtual address */ - __u64 iova; /* IO virtual address */ - __u64 size; /* Size of mapping (bytes) */ -diff --git a/kernel/fork.c b/kernel/fork.c -index 0a4f601..67ecb91 100644 ---- a/kernel/fork.c -+++ b/kernel/fork.c -@@ -430,7 +430,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) - struct inode *inode = file_inode(file); - struct address_space *mapping = file->f_mapping; - -- get_file(file); -+ vma_get_file(tmp); - if (tmp->vm_flags & VM_DENYWRITE) - atomic_dec(&inode->i_writecount); - mutex_lock(&mapping->i_mmap_mutex); -diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig -index 225086b..9a76e3b 100644 ---- a/kernel/irq/Kconfig -+++ b/kernel/irq/Kconfig -@@ -55,6 +55,21 @@ config GENERIC_IRQ_CHIP - config IRQ_DOMAIN - bool - -+# Support for hierarchical irq domains -+config IRQ_DOMAIN_HIERARCHY -+ bool -+ select IRQ_DOMAIN -+ -+# Generic MSI interrupt support -+config GENERIC_MSI_IRQ -+ bool -+ -+# Generic MSI hierarchical interrupt domain support -+config GENERIC_MSI_IRQ_DOMAIN -+ bool -+ select IRQ_DOMAIN_HIERARCHY -+ select GENERIC_MSI_IRQ -+ - config HANDLE_DOMAIN_IRQ - bool - -diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile -index fff1738..d121235 100644 ---- a/kernel/irq/Makefile -+++ b/kernel/irq/Makefile -@@ -6,3 +6,4 @@ obj-$(CONFIG_IRQ_DOMAIN) += irqdomain.o - obj-$(CONFIG_PROC_FS) += proc.o - obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o - obj-$(CONFIG_PM_SLEEP) += pm.o -+obj-$(CONFIG_GENERIC_MSI_IRQ) += msi.o -diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c -index e5202f0..55dd2fb 100644 ---- a/kernel/irq/chip.c -+++ b/kernel/irq/chip.c -@@ -15,6 +15,7 @@ - #include - #include - #include -+#include - - #include - -@@ -178,6 +179,7 @@ int irq_startup(struct irq_desc *desc, bool resend) - irq_state_clr_disabled(desc); - desc->depth = 0; - -+ irq_domain_activate_irq(&desc->irq_data); - if (desc->irq_data.chip->irq_startup) { - ret = desc->irq_data.chip->irq_startup(&desc->irq_data); - irq_state_clr_masked(desc); -@@ -199,6 +201,7 @@ void irq_shutdown(struct irq_desc *desc) - desc->irq_data.chip->irq_disable(&desc->irq_data); - else - desc->irq_data.chip->irq_mask(&desc->irq_data); -+ irq_domain_deactivate_irq(&desc->irq_data); - irq_state_set_masked(desc); - } - -@@ -728,7 +731,30 @@ __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, - if (!handle) { - handle = handle_bad_irq; - } else { -- if (WARN_ON(desc->irq_data.chip == &no_irq_chip)) -+ struct irq_data *irq_data = &desc->irq_data; -+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY -+ /* -+ * With hierarchical domains we might run into a -+ * situation where the outermost chip is not yet set -+ * up, but the inner chips are there. Instead of -+ * bailing we install the handler, but obviously we -+ * cannot enable/startup the interrupt at this point. -+ */ -+ while (irq_data) { -+ if (irq_data->chip != &no_irq_chip) -+ break; -+ /* -+ * Bail out if the outer chip is not set up -+ * and the interrrupt supposed to be started -+ * right away. -+ */ -+ if (WARN_ON(is_chained)) -+ goto out; -+ /* Try the parent */ -+ irq_data = irq_data->parent_data; -+ } -+#endif -+ if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip)) - goto out; - } - -@@ -847,3 +873,138 @@ void irq_cpu_offline(void) - raw_spin_unlock_irqrestore(&desc->lock, flags); - } - } -+ -+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY -+/** -+ * irq_chip_ack_parent - Acknowledge the parent interrupt -+ * @data: Pointer to interrupt specific data -+ */ -+void irq_chip_ack_parent(struct irq_data *data) -+{ -+ data = data->parent_data; -+ data->chip->irq_ack(data); -+} -+ -+/** -+ * irq_chip_mask_parent - Mask the parent interrupt -+ * @data: Pointer to interrupt specific data -+ */ -+void irq_chip_mask_parent(struct irq_data *data) -+{ -+ data = data->parent_data; -+ data->chip->irq_mask(data); -+} -+ -+/** -+ * irq_chip_unmask_parent - Unmask the parent interrupt -+ * @data: Pointer to interrupt specific data -+ */ -+void irq_chip_unmask_parent(struct irq_data *data) -+{ -+ data = data->parent_data; -+ data->chip->irq_unmask(data); -+} -+ -+/** -+ * irq_chip_eoi_parent - Invoke EOI on the parent interrupt -+ * @data: Pointer to interrupt specific data -+ */ -+void irq_chip_eoi_parent(struct irq_data *data) -+{ -+ data = data->parent_data; -+ data->chip->irq_eoi(data); -+} -+ -+/** -+ * irq_chip_set_affinity_parent - Set affinity on the parent interrupt -+ * @data: Pointer to interrupt specific data -+ * @dest: The affinity mask to set -+ * @force: Flag to enforce setting (disable online checks) -+ * -+ * Conditinal, as the underlying parent chip might not implement it. -+ */ -+int irq_chip_set_affinity_parent(struct irq_data *data, -+ const struct cpumask *dest, bool force) -+{ -+ data = data->parent_data; -+ if (data->chip->irq_set_affinity) -+ return data->chip->irq_set_affinity(data, dest, force); -+ -+ return -ENOSYS; -+} -+ -+/** -+ * irq_chip_set_type_parent - Set IRQ type on the parent interrupt -+ * @data: Pointer to interrupt specific data -+ * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h -+ * -+ * Conditional, as the underlying parent chip might not implement it. -+ */ -+int irq_chip_set_type_parent(struct irq_data *data, unsigned int type) -+{ -+ data = data->parent_data; -+ -+ if (data->chip->irq_set_type) -+ return data->chip->irq_set_type(data, type); -+ -+ return -ENOSYS; -+} -+ -+/** -+ * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware -+ * @data: Pointer to interrupt specific data -+ * -+ * Iterate through the domain hierarchy of the interrupt and check -+ * whether a hw retrigger function exists. If yes, invoke it. -+ */ -+int irq_chip_retrigger_hierarchy(struct irq_data *data) -+{ -+ for (data = data->parent_data; data; data = data->parent_data) -+ if (data->chip && data->chip->irq_retrigger) -+ return data->chip->irq_retrigger(data); -+ -+ return -ENOSYS; -+} -+ -+/** -+ * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt -+ * @data: Pointer to interrupt specific data -+ * @on: Whether to set or reset the wake-up capability of this irq -+ * -+ * Conditional, as the underlying parent chip might not implement it. -+ */ -+int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on) -+{ -+ data = data->parent_data; -+ if (data->chip->irq_set_wake) -+ return data->chip->irq_set_wake(data, on); -+ -+ return -ENOSYS; -+} -+#endif -+ -+/** -+ * irq_chip_compose_msi_msg - Componse msi message for a irq chip -+ * @data: Pointer to interrupt specific data -+ * @msg: Pointer to the MSI message -+ * -+ * For hierarchical domains we find the first chip in the hierarchy -+ * which implements the irq_compose_msi_msg callback. For non -+ * hierarchical we use the top level chip. -+ */ -+int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) -+{ -+ struct irq_data *pos = NULL; -+ -+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY -+ for (; data; data = data->parent_data) -+#endif -+ if (data->chip && data->chip->irq_compose_msi_msg) -+ pos = data; -+ if (!pos) -+ return -ENOSYS; -+ -+ pos->chip->irq_compose_msi_msg(pos, msg); -+ -+ return 0; -+} -diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c -index cf80e7b..61024e8 100644 ---- a/kernel/irq/generic-chip.c -+++ b/kernel/irq/generic-chip.c -@@ -39,7 +39,7 @@ void irq_gc_mask_disable_reg(struct irq_data *d) - u32 mask = d->mask; - - irq_gc_lock(gc); -- irq_reg_writel(mask, gc->reg_base + ct->regs.disable); -+ irq_reg_writel(gc, mask, ct->regs.disable); - *ct->mask_cache &= ~mask; - irq_gc_unlock(gc); - } -@@ -59,7 +59,7 @@ void irq_gc_mask_set_bit(struct irq_data *d) - - irq_gc_lock(gc); - *ct->mask_cache |= mask; -- irq_reg_writel(*ct->mask_cache, gc->reg_base + ct->regs.mask); -+ irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask); - irq_gc_unlock(gc); - } - EXPORT_SYMBOL_GPL(irq_gc_mask_set_bit); -@@ -79,7 +79,7 @@ void irq_gc_mask_clr_bit(struct irq_data *d) - - irq_gc_lock(gc); - *ct->mask_cache &= ~mask; -- irq_reg_writel(*ct->mask_cache, gc->reg_base + ct->regs.mask); -+ irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask); - irq_gc_unlock(gc); - } - EXPORT_SYMBOL_GPL(irq_gc_mask_clr_bit); -@@ -98,7 +98,7 @@ void irq_gc_unmask_enable_reg(struct irq_data *d) - u32 mask = d->mask; - - irq_gc_lock(gc); -- irq_reg_writel(mask, gc->reg_base + ct->regs.enable); -+ irq_reg_writel(gc, mask, ct->regs.enable); - *ct->mask_cache |= mask; - irq_gc_unlock(gc); - } -@@ -114,7 +114,7 @@ void irq_gc_ack_set_bit(struct irq_data *d) - u32 mask = d->mask; - - irq_gc_lock(gc); -- irq_reg_writel(mask, gc->reg_base + ct->regs.ack); -+ irq_reg_writel(gc, mask, ct->regs.ack); - irq_gc_unlock(gc); - } - EXPORT_SYMBOL_GPL(irq_gc_ack_set_bit); -@@ -130,7 +130,7 @@ void irq_gc_ack_clr_bit(struct irq_data *d) - u32 mask = ~d->mask; - - irq_gc_lock(gc); -- irq_reg_writel(mask, gc->reg_base + ct->regs.ack); -+ irq_reg_writel(gc, mask, ct->regs.ack); - irq_gc_unlock(gc); - } - -@@ -145,8 +145,8 @@ void irq_gc_mask_disable_reg_and_ack(struct irq_data *d) - u32 mask = d->mask; - - irq_gc_lock(gc); -- irq_reg_writel(mask, gc->reg_base + ct->regs.mask); -- irq_reg_writel(mask, gc->reg_base + ct->regs.ack); -+ irq_reg_writel(gc, mask, ct->regs.mask); -+ irq_reg_writel(gc, mask, ct->regs.ack); - irq_gc_unlock(gc); - } - -@@ -161,7 +161,7 @@ void irq_gc_eoi(struct irq_data *d) - u32 mask = d->mask; - - irq_gc_lock(gc); -- irq_reg_writel(mask, gc->reg_base + ct->regs.eoi); -+ irq_reg_writel(gc, mask, ct->regs.eoi); - irq_gc_unlock(gc); - } - -@@ -191,6 +191,16 @@ int irq_gc_set_wake(struct irq_data *d, unsigned int on) - return 0; - } - -+static u32 irq_readl_be(void __iomem *addr) -+{ -+ return ioread32be(addr); -+} -+ -+static void irq_writel_be(u32 val, void __iomem *addr) -+{ -+ iowrite32be(val, addr); -+} -+ - static void - irq_init_generic_chip(struct irq_chip_generic *gc, const char *name, - int num_ct, unsigned int irq_base, -@@ -245,7 +255,7 @@ irq_gc_init_mask_cache(struct irq_chip_generic *gc, enum irq_gc_flags flags) - } - ct[i].mask_cache = mskptr; - if (flags & IRQ_GC_INIT_MASK_CACHE) -- *mskptr = irq_reg_readl(gc->reg_base + mskreg); -+ *mskptr = irq_reg_readl(gc, mskreg); - } - } - -@@ -300,7 +310,13 @@ int irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip, - dgc->gc[i] = gc = tmp; - irq_init_generic_chip(gc, name, num_ct, i * irqs_per_chip, - NULL, handler); -+ - gc->domain = d; -+ if (gcflags & IRQ_GC_BE_IO) { -+ gc->reg_readl = &irq_readl_be; -+ gc->reg_writel = &irq_writel_be; -+ } -+ - raw_spin_lock_irqsave(&gc_lock, flags); - list_add_tail(&gc->list, &gc_list); - raw_spin_unlock_irqrestore(&gc_lock, flags); -diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c -index 6534ff6..021f823 100644 ---- a/kernel/irq/irqdomain.c -+++ b/kernel/irq/irqdomain.c -@@ -23,6 +23,10 @@ static DEFINE_MUTEX(irq_domain_mutex); - static DEFINE_MUTEX(revmap_trees_mutex); - static struct irq_domain *irq_default_domain; - -+static int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, -+ irq_hw_number_t hwirq, int node); -+static void irq_domain_check_hierarchy(struct irq_domain *domain); -+ - /** - * __irq_domain_add() - Allocate a new irq_domain data structure - * @of_node: optional device-tree node of the interrupt controller -@@ -30,7 +34,7 @@ static struct irq_domain *irq_default_domain; - * @hwirq_max: Maximum number of interrupts supported by controller - * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no - * direct mapping -- * @ops: map/unmap domain callbacks -+ * @ops: domain callbacks - * @host_data: Controller private data pointer - * - * Allocates and initialize and irq_domain structure. -@@ -56,6 +60,7 @@ struct irq_domain *__irq_domain_add(struct device_node *of_node, int size, - domain->hwirq_max = hwirq_max; - domain->revmap_size = size; - domain->revmap_direct_max_irq = direct_max; -+ irq_domain_check_hierarchy(domain); - - mutex_lock(&irq_domain_mutex); - list_add(&domain->link, &irq_domain_list); -@@ -109,7 +114,7 @@ EXPORT_SYMBOL_GPL(irq_domain_remove); - * @first_irq: first number of irq block assigned to the domain, - * pass zero to assign irqs on-the-fly. If first_irq is non-zero, then - * pre-map all of the irqs in the domain to virqs starting at first_irq. -- * @ops: map/unmap domain callbacks -+ * @ops: domain callbacks - * @host_data: Controller private data pointer - * - * Allocates an irq_domain, and optionally if first_irq is positive then also -@@ -174,20 +179,20 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, - - domain = __irq_domain_add(of_node, first_hwirq + size, - first_hwirq + size, 0, ops, host_data); -- if (!domain) -- return NULL; -- -- irq_domain_associate_many(domain, first_irq, first_hwirq, size); -+ if (domain) -+ irq_domain_associate_many(domain, first_irq, first_hwirq, size); - - return domain; - } - EXPORT_SYMBOL_GPL(irq_domain_add_legacy); - - /** -- * irq_find_host() - Locates a domain for a given device node -+ * irq_find_matching_host() - Locates a domain for a given device node - * @node: device-tree node of the interrupt controller -+ * @bus_token: domain-specific data - */ --struct irq_domain *irq_find_host(struct device_node *node) -+struct irq_domain *irq_find_matching_host(struct device_node *node, -+ enum irq_domain_bus_token bus_token) - { - struct irq_domain *h, *found = NULL; - int rc; -@@ -196,13 +201,19 @@ struct irq_domain *irq_find_host(struct device_node *node) - * it might potentially be set to match all interrupts in - * the absence of a device node. This isn't a problem so far - * yet though... -+ * -+ * bus_token == DOMAIN_BUS_ANY matches any domain, any other -+ * values must generate an exact match for the domain to be -+ * selected. - */ - mutex_lock(&irq_domain_mutex); - list_for_each_entry(h, &irq_domain_list, link) { - if (h->ops->match) -- rc = h->ops->match(h, node); -+ rc = h->ops->match(h, node, bus_token); - else -- rc = (h->of_node != NULL) && (h->of_node == node); -+ rc = ((h->of_node != NULL) && (h->of_node == node) && -+ ((bus_token == DOMAIN_BUS_ANY) || -+ (h->bus_token == bus_token))); - - if (rc) { - found = h; -@@ -212,7 +223,7 @@ struct irq_domain *irq_find_host(struct device_node *node) - mutex_unlock(&irq_domain_mutex); - return found; - } --EXPORT_SYMBOL_GPL(irq_find_host); -+EXPORT_SYMBOL_GPL(irq_find_matching_host); - - /** - * irq_set_default_host() - Set a "default" irq domain -@@ -388,7 +399,6 @@ EXPORT_SYMBOL_GPL(irq_create_direct_mapping); - unsigned int irq_create_mapping(struct irq_domain *domain, - irq_hw_number_t hwirq) - { -- unsigned int hint; - int virq; - - pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq); -@@ -410,12 +420,8 @@ unsigned int irq_create_mapping(struct irq_domain *domain, - } - - /* Allocate a virtual interrupt number */ -- hint = hwirq % nr_irqs; -- if (hint == 0) -- hint++; -- virq = irq_alloc_desc_from(hint, of_node_to_nid(domain->of_node)); -- if (virq <= 0) -- virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node)); -+ virq = irq_domain_alloc_descs(-1, 1, hwirq, -+ of_node_to_nid(domain->of_node)); - if (virq <= 0) { - pr_debug("-> virq allocation failed\n"); - return 0; -@@ -471,7 +477,7 @@ unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data) - struct irq_domain *domain; - irq_hw_number_t hwirq; - unsigned int type = IRQ_TYPE_NONE; -- unsigned int virq; -+ int virq; - - domain = irq_data->np ? irq_find_host(irq_data->np) : irq_default_domain; - if (!domain) { -@@ -489,10 +495,24 @@ unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data) - return 0; - } - -- /* Create mapping */ -- virq = irq_create_mapping(domain, hwirq); -- if (!virq) -- return virq; -+ if (irq_domain_is_hierarchy(domain)) { -+ /* -+ * If we've already configured this interrupt, -+ * don't do it again, or hell will break loose. -+ */ -+ virq = irq_find_mapping(domain, hwirq); -+ if (virq) -+ return virq; -+ -+ virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, irq_data); -+ if (virq <= 0) -+ return 0; -+ } else { -+ /* Create mapping */ -+ virq = irq_create_mapping(domain, hwirq); -+ if (!virq) -+ return virq; -+ } - - /* Set type if specified and different than the current one */ - if (type != IRQ_TYPE_NONE && -@@ -540,8 +560,8 @@ unsigned int irq_find_mapping(struct irq_domain *domain, - return 0; - - if (hwirq < domain->revmap_direct_max_irq) { -- data = irq_get_irq_data(hwirq); -- if (data && (data->domain == domain) && (data->hwirq == hwirq)) -+ data = irq_domain_get_irq_data(domain, hwirq); -+ if (data && data->hwirq == hwirq) - return hwirq; - } - -@@ -709,3 +729,518 @@ const struct irq_domain_ops irq_domain_simple_ops = { - .xlate = irq_domain_xlate_onetwocell, - }; - EXPORT_SYMBOL_GPL(irq_domain_simple_ops); -+ -+static int irq_domain_alloc_descs(int virq, unsigned int cnt, -+ irq_hw_number_t hwirq, int node) -+{ -+ unsigned int hint; -+ -+ if (virq >= 0) { -+ virq = irq_alloc_descs(virq, virq, cnt, node); -+ } else { -+ hint = hwirq % nr_irqs; -+ if (hint == 0) -+ hint++; -+ virq = irq_alloc_descs_from(hint, cnt, node); -+ if (virq <= 0 && hint > 1) -+ virq = irq_alloc_descs_from(1, cnt, node); -+ } -+ -+ return virq; -+} -+ -+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY -+/** -+ * irq_domain_add_hierarchy - Add a irqdomain into the hierarchy -+ * @parent: Parent irq domain to associate with the new domain -+ * @flags: Irq domain flags associated to the domain -+ * @size: Size of the domain. See below -+ * @node: Optional device-tree node of the interrupt controller -+ * @ops: Pointer to the interrupt domain callbacks -+ * @host_data: Controller private data pointer -+ * -+ * If @size is 0 a tree domain is created, otherwise a linear domain. -+ * -+ * If successful the parent is associated to the new domain and the -+ * domain flags are set. -+ * Returns pointer to IRQ domain, or NULL on failure. -+ */ -+struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent, -+ unsigned int flags, -+ unsigned int size, -+ struct device_node *node, -+ const struct irq_domain_ops *ops, -+ void *host_data) -+{ -+ struct irq_domain *domain; -+ -+ if (size) -+ domain = irq_domain_add_linear(node, size, ops, host_data); -+ else -+ domain = irq_domain_add_tree(node, ops, host_data); -+ if (domain) { -+ domain->parent = parent; -+ domain->flags |= flags; -+ } -+ -+ return domain; -+} -+ -+static void irq_domain_insert_irq(int virq) -+{ -+ struct irq_data *data; -+ -+ for (data = irq_get_irq_data(virq); data; data = data->parent_data) { -+ struct irq_domain *domain = data->domain; -+ irq_hw_number_t hwirq = data->hwirq; -+ -+ if (hwirq < domain->revmap_size) { -+ domain->linear_revmap[hwirq] = virq; -+ } else { -+ mutex_lock(&revmap_trees_mutex); -+ radix_tree_insert(&domain->revmap_tree, hwirq, data); -+ mutex_unlock(&revmap_trees_mutex); -+ } -+ -+ /* If not already assigned, give the domain the chip's name */ -+ if (!domain->name && data->chip) -+ domain->name = data->chip->name; -+ } -+ -+ irq_clear_status_flags(virq, IRQ_NOREQUEST); -+} -+ -+static void irq_domain_remove_irq(int virq) -+{ -+ struct irq_data *data; -+ -+ irq_set_status_flags(virq, IRQ_NOREQUEST); -+ irq_set_chip_and_handler(virq, NULL, NULL); -+ synchronize_irq(virq); -+ smp_mb(); -+ -+ for (data = irq_get_irq_data(virq); data; data = data->parent_data) { -+ struct irq_domain *domain = data->domain; -+ irq_hw_number_t hwirq = data->hwirq; -+ -+ if (hwirq < domain->revmap_size) { -+ domain->linear_revmap[hwirq] = 0; -+ } else { -+ mutex_lock(&revmap_trees_mutex); -+ radix_tree_delete(&domain->revmap_tree, hwirq); -+ mutex_unlock(&revmap_trees_mutex); -+ } -+ } -+} -+ -+static struct irq_data *irq_domain_insert_irq_data(struct irq_domain *domain, -+ struct irq_data *child) -+{ -+ struct irq_data *irq_data; -+ -+ irq_data = kzalloc_node(sizeof(*irq_data), GFP_KERNEL, child->node); -+ if (irq_data) { -+ child->parent_data = irq_data; -+ irq_data->irq = child->irq; -+ irq_data->node = child->node; -+ irq_data->domain = domain; -+ } -+ -+ return irq_data; -+} -+ -+static void irq_domain_free_irq_data(unsigned int virq, unsigned int nr_irqs) -+{ -+ struct irq_data *irq_data, *tmp; -+ int i; -+ -+ for (i = 0; i < nr_irqs; i++) { -+ irq_data = irq_get_irq_data(virq + i); -+ tmp = irq_data->parent_data; -+ irq_data->parent_data = NULL; -+ irq_data->domain = NULL; -+ -+ while (tmp) { -+ irq_data = tmp; -+ tmp = tmp->parent_data; -+ kfree(irq_data); -+ } -+ } -+} -+ -+static int irq_domain_alloc_irq_data(struct irq_domain *domain, -+ unsigned int virq, unsigned int nr_irqs) -+{ -+ struct irq_data *irq_data; -+ struct irq_domain *parent; -+ int i; -+ -+ /* The outermost irq_data is embedded in struct irq_desc */ -+ for (i = 0; i < nr_irqs; i++) { -+ irq_data = irq_get_irq_data(virq + i); -+ irq_data->domain = domain; -+ -+ for (parent = domain->parent; parent; parent = parent->parent) { -+ irq_data = irq_domain_insert_irq_data(parent, irq_data); -+ if (!irq_data) { -+ irq_domain_free_irq_data(virq, i + 1); -+ return -ENOMEM; -+ } -+ } -+ } -+ -+ return 0; -+} -+ -+/** -+ * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain -+ * @domain: domain to match -+ * @virq: IRQ number to get irq_data -+ */ -+struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, -+ unsigned int virq) -+{ -+ struct irq_data *irq_data; -+ -+ for (irq_data = irq_get_irq_data(virq); irq_data; -+ irq_data = irq_data->parent_data) -+ if (irq_data->domain == domain) -+ return irq_data; -+ -+ return NULL; -+} -+ -+/** -+ * irq_domain_set_hwirq_and_chip - Set hwirq and irqchip of @virq at @domain -+ * @domain: Interrupt domain to match -+ * @virq: IRQ number -+ * @hwirq: The hwirq number -+ * @chip: The associated interrupt chip -+ * @chip_data: The associated chip data -+ */ -+int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, unsigned int virq, -+ irq_hw_number_t hwirq, struct irq_chip *chip, -+ void *chip_data) -+{ -+ struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq); -+ -+ if (!irq_data) -+ return -ENOENT; -+ -+ irq_data->hwirq = hwirq; -+ irq_data->chip = chip ? chip : &no_irq_chip; -+ irq_data->chip_data = chip_data; -+ -+ return 0; -+} -+ -+/** -+ * irq_domain_set_info - Set the complete data for a @virq in @domain -+ * @domain: Interrupt domain to match -+ * @virq: IRQ number -+ * @hwirq: The hardware interrupt number -+ * @chip: The associated interrupt chip -+ * @chip_data: The associated interrupt chip data -+ * @handler: The interrupt flow handler -+ * @handler_data: The interrupt flow handler data -+ * @handler_name: The interrupt handler name -+ */ -+void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, -+ irq_hw_number_t hwirq, struct irq_chip *chip, -+ void *chip_data, irq_flow_handler_t handler, -+ void *handler_data, const char *handler_name) -+{ -+ irq_domain_set_hwirq_and_chip(domain, virq, hwirq, chip, chip_data); -+ __irq_set_handler(virq, handler, 0, handler_name); -+ irq_set_handler_data(virq, handler_data); -+} -+ -+/** -+ * irq_domain_reset_irq_data - Clear hwirq, chip and chip_data in @irq_data -+ * @irq_data: The pointer to irq_data -+ */ -+void irq_domain_reset_irq_data(struct irq_data *irq_data) -+{ -+ irq_data->hwirq = 0; -+ irq_data->chip = &no_irq_chip; -+ irq_data->chip_data = NULL; -+} -+ -+/** -+ * irq_domain_free_irqs_common - Clear irq_data and free the parent -+ * @domain: Interrupt domain to match -+ * @virq: IRQ number to start with -+ * @nr_irqs: The number of irqs to free -+ */ -+void irq_domain_free_irqs_common(struct irq_domain *domain, unsigned int virq, -+ unsigned int nr_irqs) -+{ -+ struct irq_data *irq_data; -+ int i; -+ -+ for (i = 0; i < nr_irqs; i++) { -+ irq_data = irq_domain_get_irq_data(domain, virq + i); -+ if (irq_data) -+ irq_domain_reset_irq_data(irq_data); -+ } -+ irq_domain_free_irqs_parent(domain, virq, nr_irqs); -+} -+ -+/** -+ * irq_domain_free_irqs_top - Clear handler and handler data, clear irqdata and free parent -+ * @domain: Interrupt domain to match -+ * @virq: IRQ number to start with -+ * @nr_irqs: The number of irqs to free -+ */ -+void irq_domain_free_irqs_top(struct irq_domain *domain, unsigned int virq, -+ unsigned int nr_irqs) -+{ -+ int i; -+ -+ for (i = 0; i < nr_irqs; i++) { -+ irq_set_handler_data(virq + i, NULL); -+ irq_set_handler(virq + i, NULL); -+ } -+ irq_domain_free_irqs_common(domain, virq, nr_irqs); -+} -+ -+static bool irq_domain_is_auto_recursive(struct irq_domain *domain) -+{ -+ return domain->flags & IRQ_DOMAIN_FLAG_AUTO_RECURSIVE; -+} -+ -+static void irq_domain_free_irqs_recursive(struct irq_domain *domain, -+ unsigned int irq_base, -+ unsigned int nr_irqs) -+{ -+ domain->ops->free(domain, irq_base, nr_irqs); -+ if (irq_domain_is_auto_recursive(domain)) { -+ BUG_ON(!domain->parent); -+ irq_domain_free_irqs_recursive(domain->parent, irq_base, -+ nr_irqs); -+ } -+} -+ -+static int irq_domain_alloc_irqs_recursive(struct irq_domain *domain, -+ unsigned int irq_base, -+ unsigned int nr_irqs, void *arg) -+{ -+ int ret = 0; -+ struct irq_domain *parent = domain->parent; -+ bool recursive = irq_domain_is_auto_recursive(domain); -+ -+ BUG_ON(recursive && !parent); -+ if (recursive) -+ ret = irq_domain_alloc_irqs_recursive(parent, irq_base, -+ nr_irqs, arg); -+ if (ret >= 0) -+ ret = domain->ops->alloc(domain, irq_base, nr_irqs, arg); -+ if (ret < 0 && recursive) -+ irq_domain_free_irqs_recursive(parent, irq_base, nr_irqs); -+ -+ return ret; -+} -+ -+/** -+ * __irq_domain_alloc_irqs - Allocate IRQs from domain -+ * @domain: domain to allocate from -+ * @irq_base: allocate specified IRQ nubmer if irq_base >= 0 -+ * @nr_irqs: number of IRQs to allocate -+ * @node: NUMA node id for memory allocation -+ * @arg: domain specific argument -+ * @realloc: IRQ descriptors have already been allocated if true -+ * -+ * Allocate IRQ numbers and initialized all data structures to support -+ * hierarchy IRQ domains. -+ * Parameter @realloc is mainly to support legacy IRQs. -+ * Returns error code or allocated IRQ number -+ * -+ * The whole process to setup an IRQ has been split into two steps. -+ * The first step, __irq_domain_alloc_irqs(), is to allocate IRQ -+ * descriptor and required hardware resources. The second step, -+ * irq_domain_activate_irq(), is to program hardwares with preallocated -+ * resources. In this way, it's easier to rollback when failing to -+ * allocate resources. -+ */ -+int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, -+ unsigned int nr_irqs, int node, void *arg, -+ bool realloc) -+{ -+ int i, ret, virq; -+ -+ if (domain == NULL) { -+ domain = irq_default_domain; -+ if (WARN(!domain, "domain is NULL; cannot allocate IRQ\n")) -+ return -EINVAL; -+ } -+ -+ if (!domain->ops->alloc) { -+ pr_debug("domain->ops->alloc() is NULL\n"); -+ return -ENOSYS; -+ } -+ -+ if (realloc && irq_base >= 0) { -+ virq = irq_base; -+ } else { -+ virq = irq_domain_alloc_descs(irq_base, nr_irqs, 0, node); -+ if (virq < 0) { -+ pr_debug("cannot allocate IRQ(base %d, count %d)\n", -+ irq_base, nr_irqs); -+ return virq; -+ } -+ } -+ -+ if (irq_domain_alloc_irq_data(domain, virq, nr_irqs)) { -+ pr_debug("cannot allocate memory for IRQ%d\n", virq); -+ ret = -ENOMEM; -+ goto out_free_desc; -+ } -+ -+ mutex_lock(&irq_domain_mutex); -+ ret = irq_domain_alloc_irqs_recursive(domain, virq, nr_irqs, arg); -+ if (ret < 0) { -+ mutex_unlock(&irq_domain_mutex); -+ goto out_free_irq_data; -+ } -+ for (i = 0; i < nr_irqs; i++) -+ irq_domain_insert_irq(virq + i); -+ mutex_unlock(&irq_domain_mutex); -+ -+ return virq; -+ -+out_free_irq_data: -+ irq_domain_free_irq_data(virq, nr_irqs); -+out_free_desc: -+ irq_free_descs(virq, nr_irqs); -+ return ret; -+} -+ -+/** -+ * irq_domain_free_irqs - Free IRQ number and associated data structures -+ * @virq: base IRQ number -+ * @nr_irqs: number of IRQs to free -+ */ -+void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs) -+{ -+ struct irq_data *data = irq_get_irq_data(virq); -+ int i; -+ -+ if (WARN(!data || !data->domain || !data->domain->ops->free, -+ "NULL pointer, cannot free irq\n")) -+ return; -+ -+ mutex_lock(&irq_domain_mutex); -+ for (i = 0; i < nr_irqs; i++) -+ irq_domain_remove_irq(virq + i); -+ irq_domain_free_irqs_recursive(data->domain, virq, nr_irqs); -+ mutex_unlock(&irq_domain_mutex); -+ -+ irq_domain_free_irq_data(virq, nr_irqs); -+ irq_free_descs(virq, nr_irqs); -+} -+ -+/** -+ * irq_domain_alloc_irqs_parent - Allocate interrupts from parent domain -+ * @irq_base: Base IRQ number -+ * @nr_irqs: Number of IRQs to allocate -+ * @arg: Allocation data (arch/domain specific) -+ * -+ * Check whether the domain has been setup recursive. If not allocate -+ * through the parent domain. -+ */ -+int irq_domain_alloc_irqs_parent(struct irq_domain *domain, -+ unsigned int irq_base, unsigned int nr_irqs, -+ void *arg) -+{ -+ /* irq_domain_alloc_irqs_recursive() has called parent's alloc() */ -+ if (irq_domain_is_auto_recursive(domain)) -+ return 0; -+ -+ domain = domain->parent; -+ if (domain) -+ return irq_domain_alloc_irqs_recursive(domain, irq_base, -+ nr_irqs, arg); -+ return -ENOSYS; -+} -+ -+/** -+ * irq_domain_free_irqs_parent - Free interrupts from parent domain -+ * @irq_base: Base IRQ number -+ * @nr_irqs: Number of IRQs to free -+ * -+ * Check whether the domain has been setup recursive. If not free -+ * through the parent domain. -+ */ -+void irq_domain_free_irqs_parent(struct irq_domain *domain, -+ unsigned int irq_base, unsigned int nr_irqs) -+{ -+ /* irq_domain_free_irqs_recursive() will call parent's free */ -+ if (!irq_domain_is_auto_recursive(domain) && domain->parent) -+ irq_domain_free_irqs_recursive(domain->parent, irq_base, -+ nr_irqs); -+} -+ -+/** -+ * irq_domain_activate_irq - Call domain_ops->activate recursively to activate -+ * interrupt -+ * @irq_data: outermost irq_data associated with interrupt -+ * -+ * This is the second step to call domain_ops->activate to program interrupt -+ * controllers, so the interrupt could actually get delivered. -+ */ -+void irq_domain_activate_irq(struct irq_data *irq_data) -+{ -+ if (irq_data && irq_data->domain) { -+ struct irq_domain *domain = irq_data->domain; -+ -+ if (irq_data->parent_data) -+ irq_domain_activate_irq(irq_data->parent_data); -+ if (domain->ops->activate) -+ domain->ops->activate(domain, irq_data); -+ } -+} -+ -+/** -+ * irq_domain_deactivate_irq - Call domain_ops->deactivate recursively to -+ * deactivate interrupt -+ * @irq_data: outermost irq_data associated with interrupt -+ * -+ * It calls domain_ops->deactivate to program interrupt controllers to disable -+ * interrupt delivery. -+ */ -+void irq_domain_deactivate_irq(struct irq_data *irq_data) -+{ -+ if (irq_data && irq_data->domain) { -+ struct irq_domain *domain = irq_data->domain; -+ -+ if (domain->ops->deactivate) -+ domain->ops->deactivate(domain, irq_data); -+ if (irq_data->parent_data) -+ irq_domain_deactivate_irq(irq_data->parent_data); -+ } -+} -+ -+static void irq_domain_check_hierarchy(struct irq_domain *domain) -+{ -+ /* Hierarchy irq_domains must implement callback alloc() */ -+ if (domain->ops->alloc) -+ domain->flags |= IRQ_DOMAIN_FLAG_HIERARCHY; -+} -+#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ -+/** -+ * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain -+ * @domain: domain to match -+ * @virq: IRQ number to get irq_data -+ */ -+struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, -+ unsigned int virq) -+{ -+ struct irq_data *irq_data = irq_get_irq_data(virq); -+ -+ return (irq_data && irq_data->domain == domain) ? irq_data : NULL; -+} -+ -+static void irq_domain_check_hierarchy(struct irq_domain *domain) -+{ -+} -+#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ -diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c -index 0a9104b..acb401f 100644 ---- a/kernel/irq/manage.c -+++ b/kernel/irq/manage.c -@@ -183,6 +183,7 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, - ret = chip->irq_set_affinity(data, mask, force); - switch (ret) { - case IRQ_SET_MASK_OK: -+ case IRQ_SET_MASK_OK_DONE: - cpumask_copy(data->affinity, mask); - case IRQ_SET_MASK_OK_NOCOPY: - irq_set_thread_affinity(desc); -@@ -600,6 +601,7 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, - - switch (ret) { - case IRQ_SET_MASK_OK: -+ case IRQ_SET_MASK_OK_DONE: - irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); - irqd_set(&desc->irq_data, flags); - -@@ -1756,3 +1758,94 @@ int request_percpu_irq(unsigned int irq, irq_handler_t handler, - - return retval; - } -+ -+/** -+ * irq_get_irqchip_state - returns the irqchip state of a interrupt. -+ * @irq: Interrupt line that is forwarded to a VM -+ * @which: One of IRQCHIP_STATE_* the caller wants to know about -+ * @state: a pointer to a boolean where the state is to be storeed -+ * -+ * This call snapshots the internal irqchip state of an -+ * interrupt, returning into @state the bit corresponding to -+ * stage @which -+ * -+ * This function should be called with preemption disabled if the -+ * interrupt controller has per-cpu registers. -+ */ -+int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which, -+ bool *state) -+{ -+ struct irq_desc *desc; -+ struct irq_data *data; -+ struct irq_chip *chip; -+ unsigned long flags; -+ int err = -EINVAL; -+ -+ desc = irq_get_desc_buslock(irq, &flags, 0); -+ if (!desc) -+ return err; -+ -+ data = irq_desc_get_irq_data(desc); -+ -+ do { -+ chip = irq_data_get_irq_chip(data); -+ if (chip->irq_get_irqchip_state) -+ break; -+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY -+ data = data->parent_data; -+#else -+ data = NULL; -+#endif -+ } while (data); -+ -+ if (data) -+ err = chip->irq_get_irqchip_state(data, which, state); -+ -+ irq_put_desc_busunlock(desc, flags); -+ return err; -+} -+ -+/** -+ * irq_set_irqchip_state - set the state of a forwarded interrupt. -+ * @irq: Interrupt line that is forwarded to a VM -+ * @which: State to be restored (one of IRQCHIP_STATE_*) -+ * @val: Value corresponding to @which -+ * -+ * This call sets the internal irqchip state of an interrupt, -+ * depending on the value of @which. -+ * -+ * This function should be called with preemption disabled if the -+ * interrupt controller has per-cpu registers. -+ */ -+int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, -+ bool val) -+{ -+ struct irq_desc *desc; -+ struct irq_data *data; -+ struct irq_chip *chip; -+ unsigned long flags; -+ int err = -EINVAL; -+ -+ desc = irq_get_desc_buslock(irq, &flags, 0); -+ if (!desc) -+ return err; -+ -+ data = irq_desc_get_irq_data(desc); -+ -+ do { -+ chip = irq_data_get_irq_chip(data); -+ if (chip->irq_set_irqchip_state) -+ break; -+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY -+ data = data->parent_data; -+#else -+ data = NULL; -+#endif -+ } while (data); -+ -+ if (data) -+ err = chip->irq_set_irqchip_state(data, which, val); -+ -+ irq_put_desc_busunlock(desc, flags); -+ return err; -+} -diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c -new file mode 100644 -index 0000000..54433c2 ---- /dev/null -+++ b/kernel/irq/msi.c -@@ -0,0 +1,356 @@ -+/* -+ * linux/kernel/irq/msi.c -+ * -+ * Copyright (C) 2014 Intel Corp. -+ * Author: Jiang Liu -+ * -+ * This file is licensed under GPLv2. -+ * -+ * This file contains common code to support Message Signalled Interrupt for -+ * PCI compatible and non PCI compatible devices. -+ */ -+#include -+#include -+#include -+#include -+#include -+ -+/* Temparory solution for building, will be removed later */ -+#include -+ -+struct msi_desc *alloc_msi_entry(struct device *dev) -+{ -+ struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL); -+ if (!desc) -+ return NULL; -+ -+ INIT_LIST_HEAD(&desc->list); -+ desc->dev = dev; -+ -+ return desc; -+} -+ -+void free_msi_entry(struct msi_desc *entry) -+{ -+ kfree(entry); -+} -+ -+void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg) -+{ -+ *msg = entry->msg; -+} -+ -+void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) -+{ -+ struct msi_desc *entry = irq_get_msi_desc(irq); -+ -+ __get_cached_msi_msg(entry, msg); -+} -+EXPORT_SYMBOL_GPL(get_cached_msi_msg); -+ -+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN -+static inline void irq_chip_write_msi_msg(struct irq_data *data, -+ struct msi_msg *msg) -+{ -+ data->chip->irq_write_msi_msg(data, msg); -+} -+ -+/** -+ * msi_domain_set_affinity - Generic affinity setter function for MSI domains -+ * @irq_data: The irq data associated to the interrupt -+ * @mask: The affinity mask to set -+ * @force: Flag to enforce setting (disable online checks) -+ * -+ * Intended to be used by MSI interrupt controllers which are -+ * implemented with hierarchical domains. -+ */ -+int msi_domain_set_affinity(struct irq_data *irq_data, -+ const struct cpumask *mask, bool force) -+{ -+ struct irq_data *parent = irq_data->parent_data; -+ struct msi_msg msg; -+ int ret; -+ -+ ret = parent->chip->irq_set_affinity(parent, mask, force); -+ if (ret >= 0 && ret != IRQ_SET_MASK_OK_DONE) { -+ BUG_ON(irq_chip_compose_msi_msg(irq_data, &msg)); -+ irq_chip_write_msi_msg(irq_data, &msg); -+ } -+ -+ return ret; -+} -+ -+static void msi_domain_activate(struct irq_domain *domain, -+ struct irq_data *irq_data) -+{ -+ struct msi_msg msg; -+ -+ BUG_ON(irq_chip_compose_msi_msg(irq_data, &msg)); -+ irq_chip_write_msi_msg(irq_data, &msg); -+} -+ -+static void msi_domain_deactivate(struct irq_domain *domain, -+ struct irq_data *irq_data) -+{ -+ struct msi_msg msg; -+ -+ memset(&msg, 0, sizeof(msg)); -+ irq_chip_write_msi_msg(irq_data, &msg); -+} -+ -+static int msi_domain_alloc(struct irq_domain *domain, unsigned int virq, -+ unsigned int nr_irqs, void *arg) -+{ -+ struct msi_domain_info *info = domain->host_data; -+ struct msi_domain_ops *ops = info->ops; -+ irq_hw_number_t hwirq = ops->get_hwirq(info, arg); -+ int i, ret; -+ -+#if 0 -+ if (irq_find_mapping(domain, hwirq) > 0) -+ return -EEXIST; -+#endif -+ -+ ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); -+ if (ret < 0) -+ return ret; -+ -+ for (i = 0; i < nr_irqs; i++) { -+ ret = ops->msi_init(domain, info, virq + i, hwirq + i, arg); -+ if (ret < 0) { -+ if (ops->msi_free) { -+ for (i--; i > 0; i--) -+ ops->msi_free(domain, info, virq + i); -+ } -+ irq_domain_free_irqs_top(domain, virq, nr_irqs); -+ return ret; -+ } -+ } -+ -+ return 0; -+} -+ -+static void msi_domain_free(struct irq_domain *domain, unsigned int virq, -+ unsigned int nr_irqs) -+{ -+ struct msi_domain_info *info = domain->host_data; -+ int i; -+ -+ if (info->ops->msi_free) { -+ for (i = 0; i < nr_irqs; i++) -+ info->ops->msi_free(domain, info, virq + i); -+ } -+ irq_domain_free_irqs_top(domain, virq, nr_irqs); -+} -+ -+static struct irq_domain_ops msi_domain_ops = { -+ .alloc = msi_domain_alloc, -+ .free = msi_domain_free, -+ .activate = msi_domain_activate, -+ .deactivate = msi_domain_deactivate, -+}; -+ -+#ifdef GENERIC_MSI_DOMAIN_OPS -+static irq_hw_number_t msi_domain_ops_get_hwirq(struct msi_domain_info *info, -+ msi_alloc_info_t *arg) -+{ -+ return arg->hwirq; -+} -+ -+static int msi_domain_ops_prepare(struct irq_domain *domain, struct device *dev, -+ int nvec, msi_alloc_info_t *arg) -+{ -+ memset(arg, 0, sizeof(*arg)); -+ return 0; -+} -+ -+static void msi_domain_ops_set_desc(msi_alloc_info_t *arg, -+ struct msi_desc *desc) -+{ -+ arg->desc = desc; -+} -+#else -+#define msi_domain_ops_get_hwirq NULL -+#define msi_domain_ops_prepare NULL -+#define msi_domain_ops_set_desc NULL -+#endif /* !GENERIC_MSI_DOMAIN_OPS */ -+ -+static int msi_domain_ops_init(struct irq_domain *domain, -+ struct msi_domain_info *info, -+ unsigned int virq, irq_hw_number_t hwirq, -+ msi_alloc_info_t *arg) -+{ -+ irq_domain_set_hwirq_and_chip(domain, virq, hwirq, info->chip, -+ info->chip_data); -+ if (info->handler && info->handler_name) { -+ __irq_set_handler(virq, info->handler, 0, info->handler_name); -+ if (info->handler_data) -+ irq_set_handler_data(virq, info->handler_data); -+ } -+ return 0; -+} -+ -+static int msi_domain_ops_check(struct irq_domain *domain, -+ struct msi_domain_info *info, -+ struct device *dev) -+{ -+ return 0; -+} -+ -+static struct msi_domain_ops msi_domain_ops_default = { -+ .get_hwirq = msi_domain_ops_get_hwirq, -+ .msi_init = msi_domain_ops_init, -+ .msi_check = msi_domain_ops_check, -+ .msi_prepare = msi_domain_ops_prepare, -+ .set_desc = msi_domain_ops_set_desc, -+}; -+ -+static void msi_domain_update_dom_ops(struct msi_domain_info *info) -+{ -+ struct msi_domain_ops *ops = info->ops; -+ -+ if (ops == NULL) { -+ info->ops = &msi_domain_ops_default; -+ return; -+ } -+ -+ if (ops->get_hwirq == NULL) -+ ops->get_hwirq = msi_domain_ops_default.get_hwirq; -+ if (ops->msi_init == NULL) -+ ops->msi_init = msi_domain_ops_default.msi_init; -+ if (ops->msi_check == NULL) -+ ops->msi_check = msi_domain_ops_default.msi_check; -+ if (ops->msi_prepare == NULL) -+ ops->msi_prepare = msi_domain_ops_default.msi_prepare; -+ if (ops->set_desc == NULL) -+ ops->set_desc = msi_domain_ops_default.set_desc; -+} -+ -+static void msi_domain_update_chip_ops(struct msi_domain_info *info) -+{ -+ struct irq_chip *chip = info->chip; -+ -+ BUG_ON(!chip); -+ if (!chip->irq_mask) -+ chip->irq_mask = pci_msi_mask_irq; -+ if (!chip->irq_unmask) -+ chip->irq_unmask = pci_msi_unmask_irq; -+ if (!chip->irq_set_affinity) -+ chip->irq_set_affinity = msi_domain_set_affinity; -+} -+ -+/** -+ * msi_create_irq_domain - Create a MSI interrupt domain -+ * @of_node: Optional device-tree node of the interrupt controller -+ * @info: MSI domain info -+ * @parent: Parent irq domain -+ */ -+struct irq_domain *msi_create_irq_domain(struct device_node *node, -+ struct msi_domain_info *info, -+ struct irq_domain *parent) -+{ -+ if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS) -+ msi_domain_update_dom_ops(info); -+ if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) -+ msi_domain_update_chip_ops(info); -+ -+ return irq_domain_add_hierarchy(parent, 0, 0, node, &msi_domain_ops, -+ info); -+} -+ -+/** -+ * msi_domain_alloc_irqs - Allocate interrupts from a MSI interrupt domain -+ * @domain: The domain to allocate from -+ * @dev: Pointer to device struct of the device for which the interrupts -+ * are allocated -+ * @nvec: The number of interrupts to allocate -+ * -+ * Returns 0 on success or an error code. -+ */ -+int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, -+ int nvec) -+{ -+ struct msi_domain_info *info = domain->host_data; -+ struct msi_domain_ops *ops = info->ops; -+ msi_alloc_info_t arg; -+ struct msi_desc *desc; -+ int i, ret, virq = -1; -+ -+ ret = ops->msi_check(domain, info, dev); -+ if (ret == 0) -+ ret = ops->msi_prepare(domain, dev, nvec, &arg); -+ if (ret) -+ return ret; -+ -+ for_each_msi_entry(desc, dev) { -+ ops->set_desc(&arg, desc); -+ if (info->flags & MSI_FLAG_IDENTITY_MAP) -+ virq = (int)ops->get_hwirq(info, &arg); -+ else -+ virq = -1; -+ -+ virq = __irq_domain_alloc_irqs(domain, virq, desc->nvec_used, -+ dev_to_node(dev), &arg, false); -+ if (virq < 0) { -+ ret = -ENOSPC; -+ if (ops->handle_error) -+ ret = ops->handle_error(domain, desc, ret); -+ if (ops->msi_finish) -+ ops->msi_finish(&arg, ret); -+ return ret; -+ } -+ -+ for (i = 0; i < desc->nvec_used; i++) -+ irq_set_msi_desc_off(virq, i, desc); -+ } -+ -+ if (ops->msi_finish) -+ ops->msi_finish(&arg, 0); -+ -+ for_each_msi_entry(desc, dev) { -+ if (desc->nvec_used == 1) -+ dev_dbg(dev, "irq %d for MSI\n", virq); -+ else -+ dev_dbg(dev, "irq [%d-%d] for MSI\n", -+ virq, virq + desc->nvec_used - 1); -+ } -+ -+ return 0; -+} -+ -+/** -+ * msi_domain_free_irqs - Free interrupts from a MSI interrupt @domain associated tp @dev -+ * @domain: The domain to managing the interrupts -+ * @dev: Pointer to device struct of the device for which the interrupts -+ * are free -+ */ -+void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev) -+{ -+ struct msi_desc *desc; -+ -+ for_each_msi_entry(desc, dev) { -+ /* -+ * We might have failed to allocate an MSI early -+ * enough that there is no IRQ associated to this -+ * entry. If that's the case, don't do anything. -+ */ -+ if (desc->irq) { -+ irq_domain_free_irqs(desc->irq, desc->nvec_used); -+ desc->irq = 0; -+ } -+ } -+} -+ -+/** -+ * msi_get_domain_info - Get the MSI interrupt domain info for @domain -+ * @domain: The interrupt domain to retrieve data from -+ * -+ * Returns the pointer to the msi_domain_info stored in -+ * @domain->host_data. -+ */ -+struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain) -+{ -+ return (struct msi_domain_info *)domain->host_data; -+} -+ -+#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */ -diff --git a/kernel/resource.c b/kernel/resource.c -index 0bcebff..19f2357 100644 ---- a/kernel/resource.c -+++ b/kernel/resource.c -@@ -22,6 +22,7 @@ - #include - #include - #include -+#include - #include - - -@@ -1529,6 +1530,30 @@ int iomem_is_exclusive(u64 addr) - return err; - } - -+struct resource_entry *resource_list_create_entry(struct resource *res, -+ size_t extra_size) -+{ -+ struct resource_entry *entry; -+ -+ entry = kzalloc(sizeof(*entry) + extra_size, GFP_KERNEL); -+ if (entry) { -+ INIT_LIST_HEAD(&entry->node); -+ entry->res = res ? res : &entry->__res; -+ } -+ -+ return entry; -+} -+EXPORT_SYMBOL(resource_list_create_entry); -+ -+void resource_list_free(struct list_head *head) -+{ -+ struct resource_entry *entry, *tmp; -+ -+ list_for_each_entry_safe(entry, tmp, head, node) -+ resource_list_destroy_entry(entry); -+} -+EXPORT_SYMBOL(resource_list_free); -+ - static int __init strict_iomem(char *str) - { - if (strstr(str, "relaxed")) -diff --git a/mm/Makefile b/mm/Makefile -index 8405eb0..e0bda2d 100644 ---- a/mm/Makefile -+++ b/mm/Makefile -@@ -18,7 +18,7 @@ obj-y := filemap.o mempool.o oom_kill.o \ - mm_init.o mmu_context.o percpu.o slab_common.o \ - compaction.o vmacache.o \ - interval_tree.o list_lru.o workingset.o \ -- iov_iter.o debug.o $(mmu-y) -+ iov_iter.o prfile.o debug.o $(mmu-y) - - obj-y += init-mm.o - -diff --git a/mm/filemap.c b/mm/filemap.c -index 7e6ab98..2fe1e57 100644 ---- a/mm/filemap.c -+++ b/mm/filemap.c -@@ -2063,7 +2063,7 @@ int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) - int ret = VM_FAULT_LOCKED; - - sb_start_pagefault(inode->i_sb); -- file_update_time(vma->vm_file); -+ vma_file_update_time(vma); - lock_page(page); - if (page->mapping != inode->i_mapping) { - unlock_page(page); -diff --git a/mm/fremap.c b/mm/fremap.c -index 72b8fa3..a00bbf0 100644 ---- a/mm/fremap.c -+++ b/mm/fremap.c -@@ -224,16 +224,28 @@ get_write_lock: - */ - if (mapping_cap_account_dirty(mapping)) { - unsigned long addr; -- struct file *file = get_file(vma->vm_file); -+ struct file *file = vma->vm_file, -+ *prfile = vma->vm_prfile; -+ - /* mmap_region may free vma; grab the info now */ - vm_flags = vma->vm_flags; - -+ vma_get_file(vma); - addr = mmap_region(file, start, size, vm_flags, pgoff); -- fput(file); -+ vma_fput(vma); - if (IS_ERR_VALUE(addr)) { - err = addr; - } else { - BUG_ON(addr != start); -+ if (prfile) { -+ struct vm_area_struct *new_vma; -+ -+ new_vma = find_vma(mm, addr); -+ if (!new_vma->vm_prfile) -+ new_vma->vm_prfile = prfile; -+ if (new_vma != vma) -+ get_file(prfile); -+ } - err = 0; - } - goto out_freed; -diff --git a/mm/memory.c b/mm/memory.c -index 90fb265..844df2e 100644 ---- a/mm/memory.c -+++ b/mm/memory.c -@@ -2156,7 +2156,7 @@ reuse: - - /* file_update_time outside page_lock */ - if (vma->vm_file) -- file_update_time(vma->vm_file); -+ vma_file_update_time(vma); - } - put_page(dirty_page); - if (page_mkwrite) { -diff --git a/mm/mmap.c b/mm/mmap.c -index f88b4f9..9994987 100644 ---- a/mm/mmap.c -+++ b/mm/mmap.c -@@ -277,7 +277,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) - if (vma->vm_ops && vma->vm_ops->close) - vma->vm_ops->close(vma); - if (vma->vm_file) -- fput(vma->vm_file); -+ vma_fput(vma); - mpol_put(vma_policy(vma)); - kmem_cache_free(vm_area_cachep, vma); - return next; -@@ -895,7 +895,7 @@ again: remove_next = 1 + (end > next->vm_end); - if (remove_next) { - if (file) { - uprobe_munmap(next, next->vm_start, next->vm_end); -- fput(file); -+ vma_fput(vma); - } - if (next->anon_vma) - anon_vma_merge(vma, next); -@@ -1680,8 +1680,8 @@ out: - return addr; - - unmap_and_free_vma: -+ vma_fput(vma); - vma->vm_file = NULL; -- fput(file); - - /* Undo any partial mapping done by a device driver. */ - unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end); -@@ -2480,7 +2480,7 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, - goto out_free_mpol; - - if (new->vm_file) -- get_file(new->vm_file); -+ vma_get_file(new); - - if (new->vm_ops && new->vm_ops->open) - new->vm_ops->open(new); -@@ -2499,7 +2499,7 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, - if (new->vm_ops && new->vm_ops->close) - new->vm_ops->close(new); - if (new->vm_file) -- fput(new->vm_file); -+ vma_fput(new); - unlink_anon_vmas(new); - out_free_mpol: - mpol_put(vma_policy(new)); -@@ -2889,7 +2889,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, - if (anon_vma_clone(new_vma, vma)) - goto out_free_mempol; - if (new_vma->vm_file) -- get_file(new_vma->vm_file); -+ vma_get_file(new_vma); - if (new_vma->vm_ops && new_vma->vm_ops->open) - new_vma->vm_ops->open(new_vma); - vma_link(mm, new_vma, prev, rb_link, rb_parent); -diff --git a/mm/nommu.c b/mm/nommu.c -index b5ba5bc..a7662fc 100644 ---- a/mm/nommu.c -+++ b/mm/nommu.c -@@ -658,7 +658,7 @@ static void __put_nommu_region(struct vm_region *region) - up_write(&nommu_region_sem); - - if (region->vm_file) -- fput(region->vm_file); -+ vmr_fput(region); - - /* IO memory and memory shared directly out of the pagecache - * from ramfs/tmpfs mustn't be released here */ -@@ -823,7 +823,7 @@ static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma) - if (vma->vm_ops && vma->vm_ops->close) - vma->vm_ops->close(vma); - if (vma->vm_file) -- fput(vma->vm_file); -+ vma_fput(vma); - put_nommu_region(vma->vm_region); - kmem_cache_free(vm_area_cachep, vma); - } -@@ -1385,7 +1385,7 @@ unsigned long do_mmap_pgoff(struct file *file, - goto error_just_free; - } - } -- fput(region->vm_file); -+ vmr_fput(region); - kmem_cache_free(vm_region_jar, region); - region = pregion; - result = start; -@@ -1461,10 +1461,10 @@ error_just_free: - up_write(&nommu_region_sem); - error: - if (region->vm_file) -- fput(region->vm_file); -+ vmr_fput(region); - kmem_cache_free(vm_region_jar, region); - if (vma->vm_file) -- fput(vma->vm_file); -+ vma_fput(vma); - kmem_cache_free(vm_area_cachep, vma); - kleave(" = %d", ret); - return ret; -diff --git a/mm/prfile.c b/mm/prfile.c -new file mode 100644 -index 0000000..532e518 ---- /dev/null -+++ b/mm/prfile.c -@@ -0,0 +1,86 @@ -+/* -+ * Mainly for aufs which mmap(2) diffrent file and wants to print different path -+ * in /proc/PID/maps. -+ * Call these functions via macros defined in linux/mm.h. -+ * -+ * See Documentation/filesystems/aufs/design/06mmap.txt -+ * -+ * Copyright (c) 2014 Junjro R. Okajima -+ * Copyright (c) 2014 Ian Campbell -+ */ -+ -+#include -+#include -+#include -+ -+/* #define PRFILE_TRACE */ -+static inline void prfile_trace(struct file *f, struct file *pr, -+ const char func[], int line, const char func2[]) -+{ -+#ifdef PRFILE_TRACE -+ if (pr) -+ pr_info("%s:%d: %s, %s\n", func, line, func2, -+ f ? (char *)f->f_dentry->d_name.name : "(null)"); -+#endif -+} -+ -+void vma_do_file_update_time(struct vm_area_struct *vma, const char func[], -+ int line) -+{ -+ struct file *f = vma->vm_file, *pr = vma->vm_prfile; -+ -+ prfile_trace(f, pr, func, line, __func__); -+ file_update_time(f); -+ if (f && pr) -+ file_update_time(pr); -+} -+ -+struct file *vma_do_pr_or_file(struct vm_area_struct *vma, const char func[], -+ int line) -+{ -+ struct file *f = vma->vm_file, *pr = vma->vm_prfile; -+ -+ prfile_trace(f, pr, func, line, __func__); -+ return (f && pr) ? pr : f; -+} -+ -+void vma_do_get_file(struct vm_area_struct *vma, const char func[], int line) -+{ -+ struct file *f = vma->vm_file, *pr = vma->vm_prfile; -+ -+ prfile_trace(f, pr, func, line, __func__); -+ get_file(f); -+ if (f && pr) -+ get_file(pr); -+} -+ -+void vma_do_fput(struct vm_area_struct *vma, const char func[], int line) -+{ -+ struct file *f = vma->vm_file, *pr = vma->vm_prfile; -+ -+ prfile_trace(f, pr, func, line, __func__); -+ fput(f); -+ if (f && pr) -+ fput(pr); -+} -+ -+#ifndef CONFIG_MMU -+struct file *vmr_do_pr_or_file(struct vm_region *region, const char func[], -+ int line) -+{ -+ struct file *f = region->vm_file, *pr = region->vm_prfile; -+ -+ prfile_trace(f, pr, func, line, __func__); -+ return (f && pr) ? pr : f; -+} -+ -+void vmr_do_fput(struct vm_region *region, const char func[], int line) -+{ -+ struct file *f = region->vm_file, *pr = region->vm_prfile; -+ -+ prfile_trace(f, pr, func, line, __func__); -+ fput(f); -+ if (f && pr) -+ fput(pr); -+} -+#endif /* !CONFIG_MMU */ -diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include -index 65e7b08..5374b1b 100644 ---- a/scripts/Kbuild.include -+++ b/scripts/Kbuild.include -@@ -179,6 +179,12 @@ build := -f $(srctree)/scripts/Makefile.build obj - # $(Q)$(MAKE) $(modbuiltin)=dir - modbuiltin := -f $(srctree)/scripts/Makefile.modbuiltin obj - -+### -+# Shorthand for $(Q)$(MAKE) -f scripts/Makefile.dtbinst obj= -+# Usage: -+# $(Q)$(MAKE) $(dtbinst)=dir -+dtbinst := -f $(if $(KBUILD_SRC),$(srctree)/)scripts/Makefile.dtbinst obj -+ - # Prefix -I with $(srctree) if it is not an absolute path. - # skip if -I has no parameter - addtree = $(if $(patsubst -I%,%,$(1)), \ -diff --git a/scripts/Makefile.dtbinst b/scripts/Makefile.dtbinst -new file mode 100644 -index 0000000..909ed7a ---- /dev/null -+++ b/scripts/Makefile.dtbinst -@@ -0,0 +1,51 @@ -+# ========================================================================== -+# Installing dtb files -+# -+# Installs all dtb files listed in $(dtb-y) either in the -+# INSTALL_DTBS_PATH directory or the default location: -+# -+# $INSTALL_PATH/dtbs/$KERNELRELEASE -+# -+# Traverse through subdirectories listed in $(dts-dirs). -+# ========================================================================== -+ -+src := $(obj) -+ -+PHONY := __dtbs_install -+__dtbs_install: -+ -+export dtbinst-root ?= $(obj) -+ -+include include/config/auto.conf -+include scripts/Kbuild.include -+include $(srctree)/$(obj)/Makefile -+ -+PHONY += __dtbs_install_prep -+__dtbs_install_prep: -+ifeq ("$(dtbinst-root)", "$(obj)") -+ $(Q)if [ -d $(INSTALL_DTBS_PATH).old ]; then rm -rf $(INSTALL_DTBS_PATH).old; fi -+ $(Q)if [ -d $(INSTALL_DTBS_PATH) ]; then mv $(INSTALL_DTBS_PATH) $(INSTALL_DTBS_PATH).old; fi -+ $(Q)mkdir -p $(INSTALL_DTBS_PATH) -+endif -+ -+dtbinst-files := $(dtb-y) -+dtbinst-dirs := $(dts-dirs) -+ -+# Helper targets for Installing DTBs into the boot directory -+quiet_cmd_dtb_install = INSTALL $< -+ cmd_dtb_install = mkdir -p $(2); cp $< $(2) -+ -+install-dir = $(patsubst $(dtbinst-root)%,$(INSTALL_DTBS_PATH)%,$(obj)) -+ -+$(dtbinst-files) $(dtbinst-dirs): | __dtbs_install_prep -+ -+$(dtbinst-files): %.dtb: $(obj)/%.dtb -+ $(call cmd,dtb_install,$(install-dir)) -+ -+$(dtbinst-dirs): -+ $(Q)$(MAKE) $(dtbinst)=$(obj)/$@ -+ -+PHONY += $(dtbinst-files) $(dtbinst-dirs) -+__dtbs_install: $(dtbinst-files) $(dtbinst-dirs) -+ -+.PHONY: $(PHONY) -diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib -index 54be19a..5117552 100644 ---- a/scripts/Makefile.lib -+++ b/scripts/Makefile.lib -@@ -283,18 +283,6 @@ $(obj)/%.dtb: $(src)/%.dts FORCE - - dtc-tmp = $(subst $(comma),_,$(dot-target).dts.tmp) - --# Helper targets for Installing DTBs into the boot directory --quiet_cmd_dtb_install = INSTALL $< -- cmd_dtb_install = cp $< $(2) -- --_dtbinst_pre_: -- $(Q)if [ -d $(INSTALL_DTBS_PATH).old ]; then rm -rf $(INSTALL_DTBS_PATH).old; fi -- $(Q)if [ -d $(INSTALL_DTBS_PATH) ]; then mv $(INSTALL_DTBS_PATH) $(INSTALL_DTBS_PATH).old; fi -- $(Q)mkdir -p $(INSTALL_DTBS_PATH) -- --%.dtb_dtbinst_: $(obj)/%.dtb _dtbinst_pre_ -- $(call cmd,dtb_install,$(INSTALL_DTBS_PATH)) -- - # Bzip2 - # --------------------------------------------------------------------------- - -diff --git a/sound/soc/fsl/mpc8610_hpcd.c b/sound/soc/fsl/mpc8610_hpcd.c -index fa756d0..ad57f0c 100644 ---- a/sound/soc/fsl/mpc8610_hpcd.c -+++ b/sound/soc/fsl/mpc8610_hpcd.c -@@ -12,11 +12,11 @@ - - #include - #include -+#include - #include - #include - #include - #include --#include - - #include "fsl_dma.h" - #include "fsl_ssi.h" -diff --git a/sound/soc/fsl/p1022_ds.c b/sound/soc/fsl/p1022_ds.c -index f75c3cf..64a0bb6 100644 ---- a/sound/soc/fsl/p1022_ds.c -+++ b/sound/soc/fsl/p1022_ds.c -@@ -11,12 +11,12 @@ - */ - - #include -+#include - #include - #include - #include - #include - #include --#include - - #include "fsl_dma.h" - #include "fsl_ssi.h" -diff --git a/sound/soc/fsl/p1022_rdk.c b/sound/soc/fsl/p1022_rdk.c -index 9d89bb0..4ce4aff 100644 ---- a/sound/soc/fsl/p1022_rdk.c -+++ b/sound/soc/fsl/p1022_rdk.c -@@ -18,12 +18,12 @@ - */ - - #include -+#include - #include - #include - #include - #include - #include --#include - - #include "fsl_dma.h" - #include "fsl_ssi.h" --- -2.7.4 - diff --git a/packages/base/any/kernels/3.18.25/patches/add-fsl-dpaa2-and-fsl-mc-support-based-on-3.18.25.patch b/packages/base/any/kernels/3.18.25/patches/add-fsl-dpaa2-and-fsl-mc-support-based-on-3.18.25.patch deleted file mode 100644 index 5d493c1d..00000000 --- a/packages/base/any/kernels/3.18.25/patches/add-fsl-dpaa2-and-fsl-mc-support-based-on-3.18.25.patch +++ /dev/null @@ -1,35045 +0,0 @@ -From 340daa3e4a9851ab640062065eff4501e6f7cc61 Mon Sep 17 00:00:00 2001 -From: Shengzhou Liu -Date: Fri, 23 Sep 2016 13:45:59 +0800 -Subject: [PATCH 1/2] Add fsl-dpaa2 and fsl-mc support based on 3.18.25 - -This patch integrated a ton of patches to support DPAA2.0 & MC -networking which is used on LS2080A/LS2088A RDB. ---- - MAINTAINERS | 27 + - arch/arm64/include/asm/io.h | 1 + - arch/arm64/include/asm/pgtable.h | 1 + - drivers/net/ethernet/freescale/Kconfig | 8 +- - drivers/net/ethernet/freescale/fec_mpc52xx.c | 2 +- - drivers/net/ethernet/freescale/fec_mpc52xx_phy.c | 2 +- - .../net/ethernet/freescale/fs_enet/fs_enet-main.c | 4 +- - .../net/ethernet/freescale/fs_enet/mii-bitbang.c | 2 +- - drivers/net/ethernet/freescale/fs_enet/mii-fec.c | 4 +- - drivers/net/ethernet/freescale/fsl_pq_mdio.c | 2 +- - drivers/net/ethernet/freescale/gianfar.c | 2 +- - drivers/net/ethernet/freescale/gianfar_ptp.c | 2 +- - drivers/net/ethernet/freescale/ucc_geth.c | 2 +- - drivers/net/ethernet/freescale/xgmac_mdio.c | 194 +- - drivers/net/phy/Kconfig | 5 + - drivers/net/phy/Makefile | 1 + - drivers/net/phy/aquantia.c | 201 ++ - drivers/net/phy/fsl_10gkr.c | 1467 ++++++++++ - drivers/net/phy/teranetics.c | 135 + - drivers/staging/Kconfig | 4 + - drivers/staging/Makefile | 2 + - drivers/staging/fsl-dpaa2/Kconfig | 12 + - drivers/staging/fsl-dpaa2/Makefile | 6 + - drivers/staging/fsl-dpaa2/ethernet/Kconfig | 36 + - drivers/staging/fsl-dpaa2/ethernet/Makefile | 21 + - .../staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c | 317 +++ - .../staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h | 61 + - .../staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h | 185 ++ - drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 2836 ++++++++++++++++++++ - drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h | 377 +++ - drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c | 861 ++++++ - drivers/staging/fsl-dpaa2/ethernet/dpkg.h | 175 ++ - drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h | 1058 ++++++++ - drivers/staging/fsl-dpaa2/ethernet/dpni.c | 1907 +++++++++++++ - drivers/staging/fsl-dpaa2/ethernet/dpni.h | 2581 ++++++++++++++++++ - drivers/staging/fsl-dpaa2/mac/Kconfig | 24 + - drivers/staging/fsl-dpaa2/mac/Makefile | 10 + - drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h | 195 ++ - drivers/staging/fsl-dpaa2/mac/dpmac.c | 422 +++ - drivers/staging/fsl-dpaa2/mac/dpmac.h | 593 ++++ - drivers/staging/fsl-dpaa2/mac/mac.c | 694 +++++ - drivers/staging/fsl-mc/Kconfig | 1 + - drivers/staging/fsl-mc/Makefile | 2 + - drivers/staging/fsl-mc/TODO | 13 + - drivers/staging/fsl-mc/bus/Kconfig | 45 + - drivers/staging/fsl-mc/bus/Makefile | 24 + - drivers/staging/fsl-mc/bus/dpbp.c | 459 ++++ - drivers/staging/fsl-mc/bus/dpcon.c | 407 +++ - drivers/staging/fsl-mc/bus/dpio/Makefile | 9 + - drivers/staging/fsl-mc/bus/dpio/dpio-drv.c | 401 +++ - drivers/staging/fsl-mc/bus/dpio/dpio-drv.h | 33 + - drivers/staging/fsl-mc/bus/dpio/dpio.c | 468 ++++ - drivers/staging/fsl-mc/bus/dpio/dpio_service.c | 801 ++++++ - drivers/staging/fsl-mc/bus/dpio/fsl_dpio.h | 460 ++++ - drivers/staging/fsl-mc/bus/dpio/fsl_dpio_cmd.h | 184 ++ - drivers/staging/fsl-mc/bus/dpio/fsl_qbman_base.h | 123 + - drivers/staging/fsl-mc/bus/dpio/fsl_qbman_portal.h | 753 ++++++ - drivers/staging/fsl-mc/bus/dpio/qbman_debug.c | 846 ++++++ - drivers/staging/fsl-mc/bus/dpio/qbman_debug.h | 136 + - drivers/staging/fsl-mc/bus/dpio/qbman_portal.c | 1212 +++++++++ - drivers/staging/fsl-mc/bus/dpio/qbman_portal.h | 261 ++ - drivers/staging/fsl-mc/bus/dpio/qbman_private.h | 173 ++ - drivers/staging/fsl-mc/bus/dpio/qbman_sys.h | 307 +++ - drivers/staging/fsl-mc/bus/dpio/qbman_sys_decl.h | 86 + - drivers/staging/fsl-mc/bus/dpio/qbman_test.c | 664 +++++ - drivers/staging/fsl-mc/bus/dpmcp-cmd.h | 56 + - drivers/staging/fsl-mc/bus/dpmcp.c | 318 +++ - drivers/staging/fsl-mc/bus/dpmcp.h | 323 +++ - drivers/staging/fsl-mc/bus/dpmng-cmd.h | 47 + - drivers/staging/fsl-mc/bus/dpmng.c | 85 + - drivers/staging/fsl-mc/bus/dprc-cmd.h | 87 + - drivers/staging/fsl-mc/bus/dprc-driver.c | 1084 ++++++++ - drivers/staging/fsl-mc/bus/dprc.c | 1218 +++++++++ - drivers/staging/fsl-mc/bus/mc-allocator.c | 716 +++++ - drivers/staging/fsl-mc/bus/mc-bus.c | 1347 ++++++++++ - drivers/staging/fsl-mc/bus/mc-ioctl.h | 25 + - drivers/staging/fsl-mc/bus/mc-restool.c | 312 +++ - drivers/staging/fsl-mc/bus/mc-sys.c | 677 +++++ - drivers/staging/fsl-mc/include/dpbp-cmd.h | 62 + - drivers/staging/fsl-mc/include/dpbp.h | 438 +++ - drivers/staging/fsl-mc/include/dpcon-cmd.h | 162 ++ - drivers/staging/fsl-mc/include/dpcon.h | 407 +++ - drivers/staging/fsl-mc/include/dpmac-cmd.h | 192 ++ - drivers/staging/fsl-mc/include/dpmac.h | 528 ++++ - drivers/staging/fsl-mc/include/dpmng.h | 80 + - drivers/staging/fsl-mc/include/dprc.h | 990 +++++++ - drivers/staging/fsl-mc/include/fsl_dpaa2_fd.h | 774 ++++++ - drivers/staging/fsl-mc/include/fsl_dpaa2_io.h | 619 +++++ - drivers/staging/fsl-mc/include/mc-cmd.h | 133 + - drivers/staging/fsl-mc/include/mc-private.h | 168 ++ - drivers/staging/fsl-mc/include/mc-sys.h | 128 + - drivers/staging/fsl-mc/include/mc.h | 244 ++ - drivers/staging/fsl-mc/include/net.h | 481 ++++ - scripts/Makefile.dtbinst | 51 + - 94 files changed, 33975 insertions(+), 84 deletions(-) - create mode 100644 drivers/net/phy/aquantia.c - create mode 100644 drivers/net/phy/fsl_10gkr.c - create mode 100644 drivers/net/phy/teranetics.c - create mode 100644 drivers/staging/fsl-dpaa2/Kconfig - create mode 100644 drivers/staging/fsl-dpaa2/Makefile - create mode 100644 drivers/staging/fsl-dpaa2/ethernet/Kconfig - create mode 100644 drivers/staging/fsl-dpaa2/ethernet/Makefile - create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c - create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h - create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h - create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c - create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h - create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c - create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpkg.h - create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h - create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni.c - create mode 100644 drivers/staging/fsl-dpaa2/ethernet/dpni.h - create mode 100644 drivers/staging/fsl-dpaa2/mac/Kconfig - create mode 100644 drivers/staging/fsl-dpaa2/mac/Makefile - create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h - create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac.c - create mode 100644 drivers/staging/fsl-dpaa2/mac/dpmac.h - create mode 100644 drivers/staging/fsl-dpaa2/mac/mac.c - create mode 100644 drivers/staging/fsl-mc/Kconfig - create mode 100644 drivers/staging/fsl-mc/Makefile - create mode 100644 drivers/staging/fsl-mc/TODO - create mode 100644 drivers/staging/fsl-mc/bus/Kconfig - create mode 100644 drivers/staging/fsl-mc/bus/Makefile - create mode 100644 drivers/staging/fsl-mc/bus/dpbp.c - create mode 100644 drivers/staging/fsl-mc/bus/dpcon.c - create mode 100644 drivers/staging/fsl-mc/bus/dpio/Makefile - create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio-drv.c - create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio-drv.h - create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio.c - create mode 100644 drivers/staging/fsl-mc/bus/dpio/dpio_service.c - create mode 100644 drivers/staging/fsl-mc/bus/dpio/fsl_dpio.h - create mode 100644 drivers/staging/fsl-mc/bus/dpio/fsl_dpio_cmd.h - create mode 100644 drivers/staging/fsl-mc/bus/dpio/fsl_qbman_base.h - create mode 100644 drivers/staging/fsl-mc/bus/dpio/fsl_qbman_portal.h - create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_debug.c - create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_debug.h - create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_portal.c - create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_portal.h - create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_private.h - create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_sys.h - create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_sys_decl.h - create mode 100644 drivers/staging/fsl-mc/bus/dpio/qbman_test.c - create mode 100644 drivers/staging/fsl-mc/bus/dpmcp-cmd.h - create mode 100644 drivers/staging/fsl-mc/bus/dpmcp.c - create mode 100644 drivers/staging/fsl-mc/bus/dpmcp.h - create mode 100644 drivers/staging/fsl-mc/bus/dpmng-cmd.h - create mode 100644 drivers/staging/fsl-mc/bus/dpmng.c - create mode 100644 drivers/staging/fsl-mc/bus/dprc-cmd.h - create mode 100644 drivers/staging/fsl-mc/bus/dprc-driver.c - create mode 100644 drivers/staging/fsl-mc/bus/dprc.c - create mode 100644 drivers/staging/fsl-mc/bus/mc-allocator.c - create mode 100644 drivers/staging/fsl-mc/bus/mc-bus.c - create mode 100644 drivers/staging/fsl-mc/bus/mc-ioctl.h - create mode 100644 drivers/staging/fsl-mc/bus/mc-restool.c - create mode 100644 drivers/staging/fsl-mc/bus/mc-sys.c - create mode 100644 drivers/staging/fsl-mc/include/dpbp-cmd.h - create mode 100644 drivers/staging/fsl-mc/include/dpbp.h - create mode 100644 drivers/staging/fsl-mc/include/dpcon-cmd.h - create mode 100644 drivers/staging/fsl-mc/include/dpcon.h - create mode 100644 drivers/staging/fsl-mc/include/dpmac-cmd.h - create mode 100644 drivers/staging/fsl-mc/include/dpmac.h - create mode 100644 drivers/staging/fsl-mc/include/dpmng.h - create mode 100644 drivers/staging/fsl-mc/include/dprc.h - create mode 100644 drivers/staging/fsl-mc/include/fsl_dpaa2_fd.h - create mode 100644 drivers/staging/fsl-mc/include/fsl_dpaa2_io.h - create mode 100644 drivers/staging/fsl-mc/include/mc-cmd.h - create mode 100644 drivers/staging/fsl-mc/include/mc-private.h - create mode 100644 drivers/staging/fsl-mc/include/mc-sys.h - create mode 100644 drivers/staging/fsl-mc/include/mc.h - create mode 100644 drivers/staging/fsl-mc/include/net.h - create mode 100644 scripts/Makefile.dtbinst - -diff --git a/MAINTAINERS b/MAINTAINERS -index 1ae7362..63a796c 100644 ---- a/MAINTAINERS -+++ b/MAINTAINERS -@@ -3973,6 +3973,33 @@ F: sound/soc/fsl/fsl* - F: sound/soc/fsl/imx* - F: sound/soc/fsl/mpc8610_hpcd.c - -+FREESCALE QORIQ MANAGEMENT COMPLEX DRIVER -+M: J. German Rivera -+L: linux-kernel@vger.kernel.org -+S: Maintained -+F: drivers/staging/fsl-mc/ -+ -+FREESCALE DPAA2 ETH DRIVER -+M: Ioana Radulescu -+M: Bogdan Hamciuc -+M: Cristian Sovaiala -+L: linux-kernel@vger.kernel.org -+S: Maintained -+F: drivers/staging/fsl-dpaa2/ethernet/ -+ -+FREESCALE QORIQ MANAGEMENT COMPLEX RESTOOL DRIVER -+M: Lijun Pan -+L: linux-kernel@vger.kernel.org -+S: Maintained -+F: drivers/staging/fsl-mc/bus/mc-ioctl.h -+F: drivers/staging/fsl-mc/bus/mc-restool.c -+ -+FREESCALE DPAA2 MAC/PHY INTERFACE DRIVER -+M: Alex Marginean -+L: linux-kernel@vger.kernel.org -+S: Maintained -+F: drivers/staging/fsl-dpaa2/mac/ -+ - FREEVXFS FILESYSTEM - M: Christoph Hellwig - W: ftp://ftp.openlinux.org/pub/people/hch/vxfs -diff --git a/arch/arm64/include/asm/io.h b/arch/arm64/include/asm/io.h -index 75825b6..f58e31a 100644 ---- a/arch/arm64/include/asm/io.h -+++ b/arch/arm64/include/asm/io.h -@@ -249,6 +249,7 @@ extern void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size); - #define ioremap(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) - #define ioremap_nocache(addr, size) __ioremap((addr), (size), __pgprot(PROT_DEVICE_nGnRE)) - #define ioremap_wc(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NC)) -+#define ioremap_cache_ns(addr, size) __ioremap((addr), (size), __pgprot(PROT_NORMAL_NS)) - #define iounmap __iounmap - - #define ARCH_HAS_IOREMAP_WC -diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h -index 41a43bf..009f690 100644 ---- a/arch/arm64/include/asm/pgtable.h -+++ b/arch/arm64/include/asm/pgtable.h -@@ -65,6 +65,7 @@ extern void __pgd_error(const char *file, int line, unsigned long val); - #define PROT_DEVICE_nGnRE (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_DEVICE_nGnRE)) - #define PROT_NORMAL_NC (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL_NC)) - #define PROT_NORMAL (PROT_DEFAULT | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL)) -+#define PROT_NORMAL_NS (PTE_TYPE_PAGE | PTE_AF | PTE_PXN | PTE_UXN | PTE_ATTRINDX(MT_NORMAL)) - - #define PROT_SECT_DEVICE_nGnRE (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_DEVICE_nGnRE)) - #define PROT_SECT_NORMAL (PROT_SECT_DEFAULT | PMD_SECT_PXN | PMD_SECT_UXN | PMD_ATTRINDX(MT_NORMAL)) -diff --git a/drivers/net/ethernet/freescale/Kconfig b/drivers/net/ethernet/freescale/Kconfig -index 2703083..0c1c97d 100644 ---- a/drivers/net/ethernet/freescale/Kconfig -+++ b/drivers/net/ethernet/freescale/Kconfig -@@ -7,7 +7,8 @@ config NET_VENDOR_FREESCALE - default y - depends on FSL_SOC || QUICC_ENGINE || CPM1 || CPM2 || PPC_MPC512x || \ - M523x || M527x || M5272 || M528x || M520x || M532x || \ -- ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) -+ ARCH_MXC || ARCH_MXS || (PPC_MPC52xx && PPC_BESTCOMM) || \ -+ ARCH_LAYERSCAPE - ---help--- - If you have a network (Ethernet) card belonging to this class, say Y - and read the Ethernet-HOWTO, available from -@@ -58,18 +59,17 @@ source "drivers/net/ethernet/freescale/fs_enet/Kconfig" - - config FSL_PQ_MDIO - tristate "Freescale PQ MDIO" -- depends on FSL_SOC - select PHYLIB - ---help--- - This driver supports the MDIO bus used by the gianfar and UCC drivers. - - config FSL_XGMAC_MDIO - tristate "Freescale XGMAC MDIO" -- depends on FSL_SOC - select PHYLIB - select OF_MDIO - ---help--- -- This driver supports the MDIO bus on the Fman 10G Ethernet MACs. -+ This driver supports the MDIO bus on the Fman 10G Ethernet MACs and -+ on mEMAC (which supports both Clauses 22 and 45) - - config UCC_GETH - tristate "Freescale QE Gigabit Ethernet" -diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx.c b/drivers/net/ethernet/freescale/fec_mpc52xx.c -index ff55fbb..76ff046 100644 ---- a/drivers/net/ethernet/freescale/fec_mpc52xx.c -+++ b/drivers/net/ethernet/freescale/fec_mpc52xx.c -@@ -1057,7 +1057,7 @@ static int mpc52xx_fec_of_resume(struct platform_device *op) - } - #endif - --static struct of_device_id mpc52xx_fec_match[] = { -+static const struct of_device_id mpc52xx_fec_match[] = { - { .compatible = "fsl,mpc5200b-fec", }, - { .compatible = "fsl,mpc5200-fec", }, - { .compatible = "mpc5200-fec", }, -diff --git a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c -index e052890..1e647be 100644 ---- a/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c -+++ b/drivers/net/ethernet/freescale/fec_mpc52xx_phy.c -@@ -134,7 +134,7 @@ static int mpc52xx_fec_mdio_remove(struct platform_device *of) - return 0; - } - --static struct of_device_id mpc52xx_fec_mdio_match[] = { -+static const struct of_device_id mpc52xx_fec_mdio_match[] = { - { .compatible = "fsl,mpc5200b-mdio", }, - { .compatible = "fsl,mpc5200-mdio", }, - { .compatible = "mpc5200b-fec-phy", }, -diff --git a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c -index c92c3b7..dc0da6c 100644 ---- a/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c -+++ b/drivers/net/ethernet/freescale/fs_enet/fs_enet-main.c -@@ -886,7 +886,7 @@ static const struct net_device_ops fs_enet_netdev_ops = { - #endif - }; - --static struct of_device_id fs_enet_match[]; -+static const struct of_device_id fs_enet_match[]; - static int fs_enet_probe(struct platform_device *ofdev) - { - const struct of_device_id *match; -@@ -1047,7 +1047,7 @@ static int fs_enet_remove(struct platform_device *ofdev) - return 0; - } - --static struct of_device_id fs_enet_match[] = { -+static const struct of_device_id fs_enet_match[] = { - #ifdef CONFIG_FS_ENET_HAS_SCC - { - .compatible = "fsl,cpm1-scc-enet", -diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c -index 3d3fde6..9ec396b 100644 ---- a/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c -+++ b/drivers/net/ethernet/freescale/fs_enet/mii-bitbang.c -@@ -213,7 +213,7 @@ static int fs_enet_mdio_remove(struct platform_device *ofdev) - return 0; - } - --static struct of_device_id fs_enet_mdio_bb_match[] = { -+static const struct of_device_id fs_enet_mdio_bb_match[] = { - { - .compatible = "fsl,cpm2-mdio-bitbang", - }, -diff --git a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c -index ebf5d64..72205b0 100644 ---- a/drivers/net/ethernet/freescale/fs_enet/mii-fec.c -+++ b/drivers/net/ethernet/freescale/fs_enet/mii-fec.c -@@ -95,7 +95,7 @@ static int fs_enet_fec_mii_write(struct mii_bus *bus, int phy_id, int location, - - } - --static struct of_device_id fs_enet_mdio_fec_match[]; -+static const struct of_device_id fs_enet_mdio_fec_match[]; - static int fs_enet_mdio_probe(struct platform_device *ofdev) - { - const struct of_device_id *match; -@@ -208,7 +208,7 @@ static int fs_enet_mdio_remove(struct platform_device *ofdev) - return 0; - } - --static struct of_device_id fs_enet_mdio_fec_match[] = { -+static const struct of_device_id fs_enet_mdio_fec_match[] = { - { - .compatible = "fsl,pq1-fec-mdio", - }, -diff --git a/drivers/net/ethernet/freescale/fsl_pq_mdio.c b/drivers/net/ethernet/freescale/fsl_pq_mdio.c -index 964c6bf..f94fa63 100644 ---- a/drivers/net/ethernet/freescale/fsl_pq_mdio.c -+++ b/drivers/net/ethernet/freescale/fsl_pq_mdio.c -@@ -294,7 +294,7 @@ static void ucc_configure(phys_addr_t start, phys_addr_t end) - - #endif - --static struct of_device_id fsl_pq_mdio_match[] = { -+static const struct of_device_id fsl_pq_mdio_match[] = { - #if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE) - { - .compatible = "fsl,gianfar-tbi", -diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c -index 4fdf0aa..a4a7396 100644 ---- a/drivers/net/ethernet/freescale/gianfar.c -+++ b/drivers/net/ethernet/freescale/gianfar.c -@@ -3455,7 +3455,7 @@ static noinline void gfar_update_link_state(struct gfar_private *priv) - phy_print_status(phydev); - } - --static struct of_device_id gfar_match[] = -+static const struct of_device_id gfar_match[] = - { - { - .type = "network", -diff --git a/drivers/net/ethernet/freescale/gianfar_ptp.c b/drivers/net/ethernet/freescale/gianfar_ptp.c -index bb56800..c7c75de 100644 ---- a/drivers/net/ethernet/freescale/gianfar_ptp.c -+++ b/drivers/net/ethernet/freescale/gianfar_ptp.c -@@ -554,7 +554,7 @@ static int gianfar_ptp_remove(struct platform_device *dev) - return 0; - } - --static struct of_device_id match_table[] = { -+static const struct of_device_id match_table[] = { - { .compatible = "fsl,etsec-ptp" }, - {}, - }; -diff --git a/drivers/net/ethernet/freescale/ucc_geth.c b/drivers/net/ethernet/freescale/ucc_geth.c -index 3cf0478..741a7d4 100644 ---- a/drivers/net/ethernet/freescale/ucc_geth.c -+++ b/drivers/net/ethernet/freescale/ucc_geth.c -@@ -3930,7 +3930,7 @@ static int ucc_geth_remove(struct platform_device* ofdev) - return 0; - } - --static struct of_device_id ucc_geth_match[] = { -+static const struct of_device_id ucc_geth_match[] = { - { - .type = "network", - .compatible = "ucc_geth", -diff --git a/drivers/net/ethernet/freescale/xgmac_mdio.c b/drivers/net/ethernet/freescale/xgmac_mdio.c -index 6e7db66..7b8fe86 100644 ---- a/drivers/net/ethernet/freescale/xgmac_mdio.c -+++ b/drivers/net/ethernet/freescale/xgmac_mdio.c -@@ -32,31 +32,62 @@ struct tgec_mdio_controller { - __be32 mdio_addr; /* MDIO address */ - } __packed; - -+#define MDIO_STAT_ENC BIT(6) - #define MDIO_STAT_CLKDIV(x) (((x>>1) & 0xff) << 8) --#define MDIO_STAT_BSY (1 << 0) --#define MDIO_STAT_RD_ER (1 << 1) -+#define MDIO_STAT_BSY BIT(0) -+#define MDIO_STAT_RD_ER BIT(1) - #define MDIO_CTL_DEV_ADDR(x) (x & 0x1f) - #define MDIO_CTL_PORT_ADDR(x) ((x & 0x1f) << 5) --#define MDIO_CTL_PRE_DIS (1 << 10) --#define MDIO_CTL_SCAN_EN (1 << 11) --#define MDIO_CTL_POST_INC (1 << 14) --#define MDIO_CTL_READ (1 << 15) -+#define MDIO_CTL_PRE_DIS BIT(10) -+#define MDIO_CTL_SCAN_EN BIT(11) -+#define MDIO_CTL_POST_INC BIT(14) -+#define MDIO_CTL_READ BIT(15) - - #define MDIO_DATA(x) (x & 0xffff) --#define MDIO_DATA_BSY (1 << 31) -+#define MDIO_DATA_BSY BIT(31) -+ -+struct mdio_fsl_priv { -+ struct tgec_mdio_controller __iomem *mdio_base; -+ bool is_little_endian; -+}; -+ -+static u32 xgmac_read32(void __iomem *regs, -+ bool is_little_endian) -+{ -+ if (is_little_endian) -+ return ioread32(regs); -+ else -+ return ioread32be(regs); -+} -+ -+static void xgmac_write32(u32 value, -+ void __iomem *regs, -+ bool is_little_endian) -+{ -+ if (is_little_endian) -+ iowrite32(value, regs); -+ else -+ iowrite32be(value, regs); -+} - - /* - * Wait until the MDIO bus is free - */ - static int xgmac_wait_until_free(struct device *dev, -- struct tgec_mdio_controller __iomem *regs) -+ struct tgec_mdio_controller __iomem *regs, -+ bool is_little_endian) - { -- uint32_t status; -+ unsigned int timeout; - - /* Wait till the bus is free */ -- status = spin_event_timeout( -- !((in_be32(®s->mdio_stat)) & MDIO_STAT_BSY), TIMEOUT, 0); -- if (!status) { -+ timeout = TIMEOUT; -+ while ((xgmac_read32(®s->mdio_stat, is_little_endian) & -+ MDIO_STAT_BSY) && timeout) { -+ cpu_relax(); -+ timeout--; -+ } -+ -+ if (!timeout) { - dev_err(dev, "timeout waiting for bus to be free\n"); - return -ETIMEDOUT; - } -@@ -68,14 +99,20 @@ static int xgmac_wait_until_free(struct device *dev, - * Wait till the MDIO read or write operation is complete - */ - static int xgmac_wait_until_done(struct device *dev, -- struct tgec_mdio_controller __iomem *regs) -+ struct tgec_mdio_controller __iomem *regs, -+ bool is_little_endian) - { -- uint32_t status; -+ unsigned int timeout; - - /* Wait till the MDIO write is complete */ -- status = spin_event_timeout( -- !((in_be32(®s->mdio_data)) & MDIO_DATA_BSY), TIMEOUT, 0); -- if (!status) { -+ timeout = TIMEOUT; -+ while ((xgmac_read32(®s->mdio_stat, is_little_endian) & -+ MDIO_STAT_BSY) && timeout) { -+ cpu_relax(); -+ timeout--; -+ } -+ -+ if (!timeout) { - dev_err(dev, "timeout waiting for operation to complete\n"); - return -ETIMEDOUT; - } -@@ -90,32 +127,47 @@ static int xgmac_wait_until_done(struct device *dev, - */ - static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 value) - { -- struct tgec_mdio_controller __iomem *regs = bus->priv; -- uint16_t dev_addr = regnum >> 16; -+ struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv; -+ struct tgec_mdio_controller __iomem *regs = priv->mdio_base; -+ uint16_t dev_addr; -+ u32 mdio_ctl, mdio_stat; - int ret; -+ bool endian = priv->is_little_endian; -+ -+ mdio_stat = xgmac_read32(®s->mdio_stat, endian); -+ if (regnum & MII_ADDR_C45) { -+ /* Clause 45 (ie 10G) */ -+ dev_addr = (regnum >> 16) & 0x1f; -+ mdio_stat |= MDIO_STAT_ENC; -+ } else { -+ /* Clause 22 (ie 1G) */ -+ dev_addr = regnum & 0x1f; -+ mdio_stat &= ~MDIO_STAT_ENC; -+ } - -- /* Setup the MII Mgmt clock speed */ -- out_be32(®s->mdio_stat, MDIO_STAT_CLKDIV(100)); -+ xgmac_write32(mdio_stat, ®s->mdio_stat, endian); - -- ret = xgmac_wait_until_free(&bus->dev, regs); -+ ret = xgmac_wait_until_free(&bus->dev, regs, endian); - if (ret) - return ret; - - /* Set the port and dev addr */ -- out_be32(®s->mdio_ctl, -- MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr)); -+ mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr); -+ xgmac_write32(mdio_ctl, ®s->mdio_ctl, endian); - - /* Set the register address */ -- out_be32(®s->mdio_addr, regnum & 0xffff); -+ if (regnum & MII_ADDR_C45) { -+ xgmac_write32(regnum & 0xffff, ®s->mdio_addr, endian); - -- ret = xgmac_wait_until_free(&bus->dev, regs); -- if (ret) -- return ret; -+ ret = xgmac_wait_until_free(&bus->dev, regs, endian); -+ if (ret) -+ return ret; -+ } - - /* Write the value to the register */ -- out_be32(®s->mdio_data, MDIO_DATA(value)); -+ xgmac_write32(MDIO_DATA(value), ®s->mdio_data, endian); - -- ret = xgmac_wait_until_done(&bus->dev, regs); -+ ret = xgmac_wait_until_done(&bus->dev, regs, endian); - if (ret) - return ret; - -@@ -129,74 +181,70 @@ static int xgmac_mdio_write(struct mii_bus *bus, int phy_id, int regnum, u16 val - */ - static int xgmac_mdio_read(struct mii_bus *bus, int phy_id, int regnum) - { -- struct tgec_mdio_controller __iomem *regs = bus->priv; -- uint16_t dev_addr = regnum >> 16; -+ struct mdio_fsl_priv *priv = (struct mdio_fsl_priv *)bus->priv; -+ struct tgec_mdio_controller __iomem *regs = priv->mdio_base; -+ uint16_t dev_addr; -+ uint32_t mdio_stat; - uint32_t mdio_ctl; - uint16_t value; - int ret; -+ bool endian = priv->is_little_endian; -+ -+ mdio_stat = xgmac_read32(®s->mdio_stat, endian); -+ if (regnum & MII_ADDR_C45) { -+ dev_addr = (regnum >> 16) & 0x1f; -+ mdio_stat |= MDIO_STAT_ENC; -+ } else { -+ dev_addr = regnum & 0x1f; -+ mdio_stat &= ~MDIO_STAT_ENC; -+ } - -- /* Setup the MII Mgmt clock speed */ -- out_be32(®s->mdio_stat, MDIO_STAT_CLKDIV(100)); -+ xgmac_write32(mdio_stat, ®s->mdio_stat, endian); - -- ret = xgmac_wait_until_free(&bus->dev, regs); -+ ret = xgmac_wait_until_free(&bus->dev, regs, endian); - if (ret) - return ret; - - /* Set the Port and Device Addrs */ - mdio_ctl = MDIO_CTL_PORT_ADDR(phy_id) | MDIO_CTL_DEV_ADDR(dev_addr); -- out_be32(®s->mdio_ctl, mdio_ctl); -+ xgmac_write32(mdio_ctl, ®s->mdio_ctl, endian); - - /* Set the register address */ -- out_be32(®s->mdio_addr, regnum & 0xffff); -+ if (regnum & MII_ADDR_C45) { -+ xgmac_write32(regnum & 0xffff, ®s->mdio_addr, endian); - -- ret = xgmac_wait_until_free(&bus->dev, regs); -- if (ret) -- return ret; -+ ret = xgmac_wait_until_free(&bus->dev, regs, endian); -+ if (ret) -+ return ret; -+ } - - /* Initiate the read */ -- out_be32(®s->mdio_ctl, mdio_ctl | MDIO_CTL_READ); -+ xgmac_write32(mdio_ctl | MDIO_CTL_READ, ®s->mdio_ctl, endian); - -- ret = xgmac_wait_until_done(&bus->dev, regs); -+ ret = xgmac_wait_until_done(&bus->dev, regs, endian); - if (ret) - return ret; - - /* Return all Fs if nothing was there */ -- if (in_be32(®s->mdio_stat) & MDIO_STAT_RD_ER) { -+ if (xgmac_read32(®s->mdio_stat, endian) & MDIO_STAT_RD_ER) { - dev_err(&bus->dev, - "Error while reading PHY%d reg at %d.%hhu\n", - phy_id, dev_addr, regnum); - return 0xffff; - } - -- value = in_be32(®s->mdio_data) & 0xffff; -+ value = xgmac_read32(®s->mdio_data, endian) & 0xffff; - dev_dbg(&bus->dev, "read %04x\n", value); - - return value; - } - --/* Reset the MIIM registers, and wait for the bus to free */ --static int xgmac_mdio_reset(struct mii_bus *bus) --{ -- struct tgec_mdio_controller __iomem *regs = bus->priv; -- int ret; -- -- mutex_lock(&bus->mdio_lock); -- -- /* Setup the MII Mgmt clock speed */ -- out_be32(®s->mdio_stat, MDIO_STAT_CLKDIV(100)); -- -- ret = xgmac_wait_until_free(&bus->dev, regs); -- -- mutex_unlock(&bus->mdio_lock); -- -- return ret; --} -- - static int xgmac_mdio_probe(struct platform_device *pdev) - { - struct device_node *np = pdev->dev.of_node; - struct mii_bus *bus; - struct resource res; -+ struct mdio_fsl_priv *priv; - int ret; - - ret = of_address_to_resource(np, 0, &res); -@@ -205,25 +253,30 @@ static int xgmac_mdio_probe(struct platform_device *pdev) - return ret; - } - -- bus = mdiobus_alloc_size(PHY_MAX_ADDR * sizeof(int)); -+ bus = mdiobus_alloc_size(sizeof(struct mdio_fsl_priv)); - if (!bus) - return -ENOMEM; - - bus->name = "Freescale XGMAC MDIO Bus"; - bus->read = xgmac_mdio_read; - bus->write = xgmac_mdio_write; -- bus->reset = xgmac_mdio_reset; -- bus->irq = bus->priv; - bus->parent = &pdev->dev; - snprintf(bus->id, MII_BUS_ID_SIZE, "%llx", (unsigned long long)res.start); - - /* Set the PHY base address */ -- bus->priv = of_iomap(np, 0); -- if (!bus->priv) { -+ priv = bus->priv; -+ priv->mdio_base = of_iomap(np, 0); -+ if (!priv->mdio_base) { - ret = -ENOMEM; - goto err_ioremap; - } - -+ if (of_get_property(pdev->dev.of_node, -+ "little-endian", NULL)) -+ priv->is_little_endian = true; -+ else -+ priv->is_little_endian = false; -+ - ret = of_mdiobus_register(bus, np); - if (ret) { - dev_err(&pdev->dev, "cannot register MDIO bus\n"); -@@ -235,7 +288,7 @@ static int xgmac_mdio_probe(struct platform_device *pdev) - return 0; - - err_registration: -- iounmap(bus->priv); -+ iounmap(priv->mdio_base); - - err_ioremap: - mdiobus_free(bus); -@@ -254,10 +307,13 @@ static int xgmac_mdio_remove(struct platform_device *pdev) - return 0; - } - --static struct of_device_id xgmac_mdio_match[] = { -+static const struct of_device_id xgmac_mdio_match[] = { - { - .compatible = "fsl,fman-xmdio", - }, -+ { -+ .compatible = "fsl,fman-memac-mdio", -+ }, - {}, - }; - MODULE_DEVICE_TABLE(of, xgmac_mdio_match); -diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig -index 75472cf..2973c60 100644 ---- a/drivers/net/phy/Kconfig -+++ b/drivers/net/phy/Kconfig -@@ -14,6 +14,11 @@ if PHYLIB - - comment "MII PHY device drivers" - -+config AQUANTIA_PHY -+ tristate "Drivers for the Aquantia PHYs" -+ ---help--- -+ Currently supports the Aquantia AQ1202, AQ2104, AQR105, AQR405 -+ - config AT803X_PHY - tristate "Drivers for Atheros AT803X PHYs" - ---help--- -diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile -index eb3b18b..b5c8f9f 100644 ---- a/drivers/net/phy/Makefile -+++ b/drivers/net/phy/Makefile -@@ -3,6 +3,7 @@ - libphy-objs := phy.o phy_device.o mdio_bus.o - - obj-$(CONFIG_PHYLIB) += libphy.o -+obj-$(CONFIG_AQUANTIA_PHY) += aquantia.o - obj-$(CONFIG_MARVELL_PHY) += marvell.o - obj-$(CONFIG_DAVICOM_PHY) += davicom.o - obj-$(CONFIG_CICADA_PHY) += cicada.o -diff --git a/drivers/net/phy/aquantia.c b/drivers/net/phy/aquantia.c -new file mode 100644 -index 0000000..d6111af ---- /dev/null -+++ b/drivers/net/phy/aquantia.c -@@ -0,0 +1,201 @@ -+/* -+ * Driver for Aquantia PHY -+ * -+ * Author: Shaohui Xie -+ * -+ * Copyright 2015 Freescale Semiconductor, Inc. -+ * -+ * This file is licensed under the terms of the GNU General Public License -+ * version 2. This program is licensed "as is" without any warranty of any -+ * kind, whether express or implied. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define PHY_ID_AQ1202 0x03a1b445 -+#define PHY_ID_AQ2104 0x03a1b460 -+#define PHY_ID_AQR105 0x03a1b4a2 -+#define PHY_ID_AQR405 0x03a1b4b0 -+ -+#define PHY_AQUANTIA_FEATURES (SUPPORTED_10000baseT_Full | \ -+ SUPPORTED_1000baseT_Full | \ -+ SUPPORTED_100baseT_Full | \ -+ PHY_DEFAULT_FEATURES) -+ -+static int aquantia_config_aneg(struct phy_device *phydev) -+{ -+ phydev->supported = PHY_AQUANTIA_FEATURES; -+ phydev->advertising = phydev->supported; -+ -+ return 0; -+} -+ -+static int aquantia_aneg_done(struct phy_device *phydev) -+{ -+ int reg; -+ -+ reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); -+ return (reg < 0) ? reg : (reg & BMSR_ANEGCOMPLETE); -+} -+ -+static int aquantia_config_intr(struct phy_device *phydev) -+{ -+ int err; -+ -+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) { -+ err = phy_write_mmd(phydev, MDIO_MMD_AN, 0xd401, 1); -+ if (err < 0) -+ return err; -+ -+ err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff00, 1); -+ if (err < 0) -+ return err; -+ -+ err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff01, 0x1001); -+ } else { -+ err = phy_write_mmd(phydev, MDIO_MMD_AN, 0xd401, 0); -+ if (err < 0) -+ return err; -+ -+ err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff00, 0); -+ if (err < 0) -+ return err; -+ -+ err = phy_write_mmd(phydev, MDIO_MMD_VEND1, 0xff01, 0); -+ } -+ -+ return err; -+} -+ -+static int aquantia_ack_interrupt(struct phy_device *phydev) -+{ -+ int reg; -+ -+ reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xcc01); -+ return (reg < 0) ? reg : 0; -+} -+ -+static int aquantia_read_status(struct phy_device *phydev) -+{ -+ int reg; -+ -+ reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); -+ reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); -+ if (reg & MDIO_STAT1_LSTATUS) -+ phydev->link = 1; -+ else -+ phydev->link = 0; -+ -+ reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xc800); -+ mdelay(10); -+ reg = phy_read_mmd(phydev, MDIO_MMD_AN, 0xc800); -+ -+ switch (reg) { -+ case 0x9: -+ phydev->speed = SPEED_2500; -+ break; -+ case 0x5: -+ phydev->speed = SPEED_1000; -+ break; -+ case 0x3: -+ phydev->speed = SPEED_100; -+ break; -+ case 0x7: -+ default: -+ phydev->speed = SPEED_10000; -+ break; -+ } -+ phydev->duplex = DUPLEX_FULL; -+ -+ return 0; -+} -+ -+static struct phy_driver aquantia_driver[] = { -+{ -+ .phy_id = PHY_ID_AQ1202, -+ .phy_id_mask = 0xfffffff0, -+ .name = "Aquantia AQ1202", -+ .features = PHY_AQUANTIA_FEATURES, -+ .flags = PHY_HAS_INTERRUPT, -+ .aneg_done = aquantia_aneg_done, -+ .config_aneg = aquantia_config_aneg, -+ .config_intr = aquantia_config_intr, -+ .ack_interrupt = aquantia_ack_interrupt, -+ .read_status = aquantia_read_status, -+ .driver = { .owner = THIS_MODULE,}, -+}, -+{ -+ .phy_id = PHY_ID_AQ2104, -+ .phy_id_mask = 0xfffffff0, -+ .name = "Aquantia AQ2104", -+ .features = PHY_AQUANTIA_FEATURES, -+ .flags = PHY_HAS_INTERRUPT, -+ .aneg_done = aquantia_aneg_done, -+ .config_aneg = aquantia_config_aneg, -+ .config_intr = aquantia_config_intr, -+ .ack_interrupt = aquantia_ack_interrupt, -+ .read_status = aquantia_read_status, -+ .driver = { .owner = THIS_MODULE,}, -+}, -+{ -+ .phy_id = PHY_ID_AQR105, -+ .phy_id_mask = 0xfffffff0, -+ .name = "Aquantia AQR105", -+ .features = PHY_AQUANTIA_FEATURES, -+ .flags = PHY_HAS_INTERRUPT, -+ .aneg_done = aquantia_aneg_done, -+ .config_aneg = aquantia_config_aneg, -+ .config_intr = aquantia_config_intr, -+ .ack_interrupt = aquantia_ack_interrupt, -+ .read_status = aquantia_read_status, -+ .driver = { .owner = THIS_MODULE,}, -+}, -+{ -+ .phy_id = PHY_ID_AQR405, -+ .phy_id_mask = 0xfffffff0, -+ .name = "Aquantia AQR405", -+ .features = PHY_AQUANTIA_FEATURES, -+ .flags = PHY_HAS_INTERRUPT, -+ .aneg_done = aquantia_aneg_done, -+ .config_aneg = aquantia_config_aneg, -+ .config_intr = aquantia_config_intr, -+ .ack_interrupt = aquantia_ack_interrupt, -+ .read_status = aquantia_read_status, -+ .driver = { .owner = THIS_MODULE,}, -+}, -+}; -+ -+static int __init aquantia_init(void) -+{ -+ return phy_drivers_register(aquantia_driver, -+ ARRAY_SIZE(aquantia_driver)); -+} -+ -+static void __exit aquantia_exit(void) -+{ -+ return phy_drivers_unregister(aquantia_driver, -+ ARRAY_SIZE(aquantia_driver)); -+} -+ -+module_init(aquantia_init); -+module_exit(aquantia_exit); -+ -+static struct mdio_device_id __maybe_unused aquantia_tbl[] = { -+ { PHY_ID_AQ1202, 0xfffffff0 }, -+ { PHY_ID_AQ2104, 0xfffffff0 }, -+ { PHY_ID_AQR105, 0xfffffff0 }, -+ { PHY_ID_AQR405, 0xfffffff0 }, -+ { } -+}; -+ -+MODULE_DEVICE_TABLE(mdio, aquantia_tbl); -+ -+MODULE_DESCRIPTION("Aquantia PHY driver"); -+MODULE_AUTHOR("Shaohui Xie "); -+MODULE_LICENSE("GPL v2"); -diff --git a/drivers/net/phy/fsl_10gkr.c b/drivers/net/phy/fsl_10gkr.c -new file mode 100644 -index 0000000..3713726 ---- /dev/null -+++ b/drivers/net/phy/fsl_10gkr.c -@@ -0,0 +1,1467 @@ -+/* Freescale XFI 10GBASE-KR driver. -+ * Author: Shaohui Xie -+ * -+ * Copyright 2014 Freescale Semiconductor, Inc. -+ * -+ * Licensed under the GPL-2 or later. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define FSL_XFI_PCS_PHY_ID 0x7C000012 -+#define FSL_XFI_PCS_PHY_ID2 0x0083e400 -+ -+/* Freescale XFI PCS MMD */ -+#define FSL_XFI_PMD 0x1 -+#define FSL_XFI_PCS 0x3 -+#define FSL_XFI_AN 0x7 -+#define FSL_XFI_VS1 0x1e -+ -+/* Freescale XFI PMD registers */ -+#define FSL_XFI_PMD_CTRL 0x0 -+#define FSL_XFI_KR_PMD_CTRL 0x0096 -+#define FSL_XFI_KR_PMD_STATUS 0x0097 -+#define FSL_XFI_KR_LP_CU 0x0098 -+#define FSL_XFI_KR_LP_STATUS 0x0099 -+#define FSL_XFI_KR_LD_CU 0x009a -+#define FSL_XFI_KR_LD_STATUS 0x009b -+ -+/* PMD define */ -+#define PMD_RESET 0x1 -+#define PMD_STATUS_SUP_STAT 0x4 -+#define PMD_STATUS_FRAME_LOCK 0x2 -+#define TRAIN_EN 0x3 -+#define TRAIN_DISABLE 0x1 -+#define RX_STAT 0x1 -+ -+/* Freescale XFI PCS registers */ -+#define FSL_XFI_PCS_CTRL 0x0 -+#define FSL_XFI_PCS_STATUS 0x1 -+ -+/* Freescale XFI Auto-Negotiation Registers */ -+#define FSL_XFI_AN_CTRL 0x0000 -+#define FSL_XFI_LNK_STATUS 0x0001 -+#define FSL_XFI_AN_AD_1 0x0011 -+#define FSL_XFI_BP_STATUS 0x0030 -+ -+#define XFI_AN_AD1 0x85 -+#define XF_AN_RESTART 0x1200 -+#define XFI_AN_LNK_STAT_UP 0x4 -+ -+/* Freescale XFI Vendor-Specific 1 Registers */ -+#define FSL_XFI_PCS_INTR_EVENT 0x0002 -+#define FSL_XFI_PCS_INTR_MASK 0x0003 -+#define FSL_XFI_AN_INTR_EVENT 0x0004 -+#define FSL_XFI_AN_INTR_MASK 0x0005 -+#define FSL_XFI_LT_INTR_EVENT 0x0006 -+#define FSL_XFI_LT_INTR_MASK 0x0007 -+ -+/* C(-1) */ -+#define BIN_M1 0 -+/* C(1) */ -+#define BIN_LONG 1 -+#define BIN_M1_SEL 6 -+#define BIN_Long_SEL 7 -+#define CDR_SEL_MASK 0x00070000 -+#define BIN_SNAPSHOT_NUM 5 -+#define BIN_M1_THRESHOLD 3 -+#define BIN_LONG_THRESHOLD 2 -+ -+#define PRE_COE_MASK 0x03c00000 -+#define POST_COE_MASK 0x001f0000 -+#define ZERO_COE_MASK 0x00003f00 -+#define PRE_COE_SHIFT 22 -+#define POST_COE_SHIFT 16 -+#define ZERO_COE_SHIFT 8 -+ -+#define PRE_COE_MAX 0x0 -+#define PRE_COE_MIN 0x8 -+#define POST_COE_MAX 0x0 -+#define POST_COE_MIN 0x10 -+#define ZERO_COE_MAX 0x30 -+#define ZERO_COE_MIN 0x0 -+ -+#define TECR0_INIT 0x24200000 -+#define RATIO_PREQ 0x3 -+#define RATIO_PST1Q 0xd -+#define RATIO_EQ 0x20 -+ -+#define GCR1_CTL_SNP_START_MASK 0x00002000 -+#define GCR1_SNP_START_MASK 0x00000040 -+#define RECR1_SNP_DONE_MASK 0x00000004 -+#define RECR1_CTL_SNP_DONE_MASK 0x00000002 -+#define TCSR1_SNP_DATA_MASK 0x0000ffc0 -+#define TCSR1_SNP_DATA_SHIFT 6 -+#define TCSR1_EQ_SNPBIN_SIGN_MASK 0x100 -+ -+#define RECR1_GAINK2_MASK 0x0f000000 -+#define RECR1_GAINK2_SHIFT 24 -+#define RECR1_GAINK3_MASK 0x000f0000 -+#define RECR1_GAINK3_SHIFT 16 -+#define RECR1_OFFSET_MASK 0x00003f80 -+#define RECR1_OFFSET_SHIFT 7 -+#define RECR1_BLW_MASK 0x00000f80 -+#define RECR1_BLW_SHIFT 7 -+#define EYE_CTRL_SHIFT 12 -+#define BASE_WAND_SHIFT 10 -+ -+#define XGKR_TIMEOUT 1050 -+#define AN_ABILITY_MASK 0x9 -+#define AN_10GKR_MASK 0x8 -+#define LT_10GKR_MASK 0x4 -+#define TRAIN_FAIL 0x8 -+ -+#define INCREMENT 1 -+#define DECREMENT 2 -+#define TIMEOUT_LONG 3 -+#define TIMEOUT_M1 3 -+ -+#define RX_READY_MASK 0x8000 -+#define PRESET_MASK 0x2000 -+#define INIT_MASK 0x1000 -+#define COP1_MASK 0x30 -+#define COP1_SHIFT 4 -+#define COZ_MASK 0xc -+#define COZ_SHIFT 2 -+#define COM1_MASK 0x3 -+#define COM1_SHIFT 0 -+#define REQUEST_MASK 0x3f -+#define LD_ALL_MASK (PRESET_MASK | INIT_MASK | \ -+ COP1_MASK | COZ_MASK | COM1_MASK) -+ -+#define FSL_SERDES_INSTANCE1_BASE 0xffe0ea000 -+#define FSL_SERDES_INSTANCE2_BASE 0xffe0eb000 -+#define FSL_LANE_A_BASE 0x800 -+#define FSL_LANE_B_BASE 0x840 -+#define FSL_LANE_C_BASE 0x880 -+#define FSL_LANE_D_BASE 0x8C0 -+#define FSL_LANE_E_BASE 0x900 -+#define FSL_LANE_F_BASE 0x940 -+#define FSL_LANE_G_BASE 0x980 -+#define FSL_LANE_H_BASE 0x9C0 -+#define GCR0_RESET_MASK 0x600000 -+ -+#define NEW_ALGORITHM_TRAIN_TX -+#ifdef NEW_ALGORITHM_TRAIN_TX -+#define FORCE_INC_COP1_NUMBER 0 -+#define FORCE_INC_COM1_NUMBER 1 -+#endif -+ -+enum fsl_xgkr_driver { -+ FSL_XGKR_REV1, -+ FSL_XGKR_REV2, -+ FSL_XGKR_INV -+}; -+ -+static struct phy_driver fsl_xgkr_driver[FSL_XGKR_INV]; -+ -+enum coe_filed { -+ COE_COP1, -+ COE_COZ, -+ COE_COM -+}; -+ -+enum coe_update { -+ COE_NOTUPDATED, -+ COE_UPDATED, -+ COE_MIN, -+ COE_MAX, -+ COE_INV -+}; -+ -+enum serdes_inst { -+ SERDES_1, -+ SERDES_2, -+ SERDES_MAX -+}; -+ -+enum lane_inst { -+ LANE_A, -+ LANE_B, -+ LANE_C, -+ LANE_D, -+ LANE_E, -+ LANE_F, -+ LANE_G, -+ LANE_H, -+ LANE_MAX -+}; -+ -+struct serdes_map { -+ const char *serdes_name; -+ unsigned long serdes_base; -+}; -+ -+struct lane_map { -+ const char *lane_name; -+ unsigned long lane_base; -+}; -+ -+const struct serdes_map s_map[SERDES_MAX] = { -+ {"serdes-1", FSL_SERDES_INSTANCE1_BASE}, -+ {"serdes-2", FSL_SERDES_INSTANCE2_BASE} -+}; -+ -+const struct lane_map l_map[LANE_MAX] = { -+ {"lane-a", FSL_LANE_A_BASE}, -+ {"lane-b", FSL_LANE_B_BASE}, -+ {"lane-c", FSL_LANE_C_BASE}, -+ {"lane-d", FSL_LANE_D_BASE}, -+ {"lane-e", FSL_LANE_E_BASE}, -+ {"lane-f", FSL_LANE_F_BASE}, -+ {"lane-g", FSL_LANE_G_BASE}, -+ {"lane-h", FSL_LANE_H_BASE} -+}; -+ -+struct per_lane_ctrl_status { -+ __be32 gcr0; /* 0x.000 - General Control Register 0 */ -+ __be32 gcr1; /* 0x.004 - General Control Register 1 */ -+ __be32 gcr2; /* 0x.008 - General Control Register 2 */ -+ __be32 resv1; /* 0x.00C - Reserved */ -+ __be32 recr0; /* 0x.010 - Receive Equalization Control Register 0 */ -+ __be32 recr1; /* 0x.014 - Receive Equalization Control Register 1 */ -+ __be32 tecr0; /* 0x.018 - Transmit Equalization Control Register 0 */ -+ __be32 resv2; /* 0x.01C - Reserved */ -+ __be32 tlcr0; /* 0x.020 - TTL Control Register 0 */ -+ __be32 tlcr1; /* 0x.024 - TTL Control Register 1 */ -+ __be32 tlcr2; /* 0x.028 - TTL Control Register 2 */ -+ __be32 tlcr3; /* 0x.02C - TTL Control Register 3 */ -+ __be32 tcsr0; /* 0x.030 - Test Control/Status Register 0 */ -+ __be32 tcsr1; /* 0x.034 - Test Control/Status Register 1 */ -+ __be32 tcsr2; /* 0x.038 - Test Control/Status Register 2 */ -+ __be32 tcsr3; /* 0x.03C - Test Control/Status Register 3 */ -+}; -+ -+struct training_state_machine { -+ bool bin_m1_late_early; -+ bool bin_long_late_early; -+ bool bin_m1_stop; -+ bool bin_long_stop; -+ bool tx_complete; -+ bool an_ok; -+ bool link_up; -+ bool running; -+ bool sent_init; -+ int m1_min_max_cnt; -+ int long_min_max_cnt; -+#ifdef NEW_ALGORITHM_TRAIN_TX -+ int pre_inc; -+ int post_inc; -+#endif -+}; -+ -+struct fsl_xgkr_inst { -+ void *reg_base; -+ struct mii_bus *bus; -+ struct phy_device *phydev; -+ struct training_state_machine t_s_m; -+ u32 ld_update; -+ u32 ld_status; -+ u32 ratio_preq; -+ u32 ratio_pst1q; -+ u32 adpt_eq; -+}; -+ -+struct fsl_xgkr_wk { -+ struct work_struct xgkr_wk; -+ struct list_head xgkr_list; -+ struct fsl_xgkr_inst *xgkr_inst; -+}; -+ -+LIST_HEAD(fsl_xgkr_list); -+ -+static struct timer_list xgkr_timer; -+static int fire_timer; -+static struct workqueue_struct *xgkr_wq; -+ -+static void init_state_machine(struct training_state_machine *s_m) -+{ -+ s_m->bin_m1_late_early = true; -+ s_m->bin_long_late_early = false; -+ s_m->bin_m1_stop = false; -+ s_m->bin_long_stop = false; -+ s_m->tx_complete = false; -+ s_m->an_ok = false; -+ s_m->link_up = false; -+ s_m->running = false; -+ s_m->sent_init = false; -+ s_m->m1_min_max_cnt = 0; -+ s_m->long_min_max_cnt = 0; -+#ifdef NEW_ALGORITHM_TRAIN_TX -+ s_m->pre_inc = FORCE_INC_COM1_NUMBER; -+ s_m->post_inc = FORCE_INC_COP1_NUMBER; -+#endif -+} -+ -+void tune_tecr0(struct fsl_xgkr_inst *inst) -+{ -+ struct per_lane_ctrl_status *reg_base; -+ u32 val; -+ -+ reg_base = (struct per_lane_ctrl_status *)inst->reg_base; -+ -+ val = TECR0_INIT | -+ inst->adpt_eq << ZERO_COE_SHIFT | -+ inst->ratio_preq << PRE_COE_SHIFT | -+ inst->ratio_pst1q << POST_COE_SHIFT; -+ -+ /* reset the lane */ -+ iowrite32be(ioread32be(®_base->gcr0) & ~GCR0_RESET_MASK, -+ ®_base->gcr0); -+ udelay(1); -+ iowrite32be(val, ®_base->tecr0); -+ udelay(1); -+ /* unreset the lane */ -+ iowrite32be(ioread32be(®_base->gcr0) | GCR0_RESET_MASK, -+ ®_base->gcr0); -+ udelay(1); -+} -+ -+static void start_lt(struct phy_device *phydev) -+{ -+ phy_write_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_PMD_CTRL, TRAIN_EN); -+} -+ -+static void stop_lt(struct phy_device *phydev) -+{ -+ phy_write_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_PMD_CTRL, TRAIN_DISABLE); -+} -+ -+static void reset_gcr0(struct fsl_xgkr_inst *inst) -+{ -+ struct per_lane_ctrl_status *reg_base; -+ -+ reg_base = (struct per_lane_ctrl_status *)inst->reg_base; -+ -+ iowrite32be(ioread32be(®_base->gcr0) & ~GCR0_RESET_MASK, -+ ®_base->gcr0); -+ udelay(1); -+ iowrite32be(ioread32be(®_base->gcr0) | GCR0_RESET_MASK, -+ ®_base->gcr0); -+ udelay(1); -+} -+ -+static void reset_lt(struct phy_device *phydev) -+{ -+ phy_write_mmd(phydev, FSL_XFI_PMD, FSL_XFI_PMD_CTRL, PMD_RESET); -+ phy_write_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_PMD_CTRL, TRAIN_DISABLE); -+ phy_write_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_LD_CU, 0); -+ phy_write_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_LD_STATUS, 0); -+ phy_write_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_PMD_STATUS, 0); -+ phy_write_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_LP_CU, 0); -+ phy_write_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_LP_STATUS, 0); -+} -+ -+static void start_an(struct phy_device *phydev) -+{ -+ reset_lt(phydev); -+ phy_write_mmd(phydev, FSL_XFI_AN, FSL_XFI_AN_AD_1, XFI_AN_AD1); -+ phy_write_mmd(phydev, FSL_XFI_AN, FSL_XFI_AN_CTRL, XF_AN_RESTART); -+} -+ -+static void ld_coe_status(struct fsl_xgkr_inst *inst) -+{ -+ phy_write_mmd(inst->phydev, FSL_XFI_PMD, -+ FSL_XFI_KR_LD_STATUS, inst->ld_status); -+} -+ -+static void ld_coe_update(struct fsl_xgkr_inst *inst) -+{ -+ phy_write_mmd(inst->phydev, FSL_XFI_PMD, -+ FSL_XFI_KR_LD_CU, inst->ld_update); -+} -+ -+static void init_inst(struct fsl_xgkr_inst *inst, int reset) -+{ -+ if (reset) { -+ inst->ratio_preq = RATIO_PREQ; -+ inst->ratio_pst1q = RATIO_PST1Q; -+ inst->adpt_eq = RATIO_EQ; -+ tune_tecr0(inst); -+ } -+ -+ inst->ld_status &= RX_READY_MASK; -+ ld_coe_status(inst); -+ -+ /* init state machine */ -+ init_state_machine(&inst->t_s_m); -+ -+ inst->ld_update = 0; -+ ld_coe_update(inst); -+ -+ inst->ld_status &= ~RX_READY_MASK; -+ ld_coe_status(inst); -+} -+ -+#ifdef NEW_ALGORITHM_TRAIN_TX -+static int get_median_gaink2(u32 *reg) -+{ -+ int gaink2_snap_shot[BIN_SNAPSHOT_NUM]; -+ u32 rx_eq_snp; -+ struct per_lane_ctrl_status *reg_base; -+ int timeout; -+ int i, j, tmp, pos; -+ -+ reg_base = (struct per_lane_ctrl_status *)reg; -+ -+ for (i = 0; i < BIN_SNAPSHOT_NUM; i++) { -+ /* wait RECR1_CTL_SNP_DONE_MASK has cleared */ -+ timeout = 100; -+ while (ioread32be(®_base->recr1) & -+ RECR1_CTL_SNP_DONE_MASK) { -+ udelay(1); -+ timeout--; -+ if (timeout == 0) -+ break; -+ } -+ -+ /* start snap shot */ -+ iowrite32be((ioread32be(®_base->gcr1) | -+ GCR1_CTL_SNP_START_MASK), -+ ®_base->gcr1); -+ -+ /* wait for SNP done */ -+ timeout = 100; -+ while (!(ioread32be(®_base->recr1) & -+ RECR1_CTL_SNP_DONE_MASK)) { -+ udelay(1); -+ timeout--; -+ if (timeout == 0) -+ break; -+ } -+ -+ /* read and save the snap shot */ -+ rx_eq_snp = ioread32be(®_base->recr1); -+ gaink2_snap_shot[i] = (rx_eq_snp & RECR1_GAINK2_MASK) >> -+ RECR1_GAINK2_SHIFT; -+ -+ /* terminate the snap shot by setting GCR1[REQ_CTL_SNP] */ -+ iowrite32be((ioread32be(®_base->gcr1) & -+ ~GCR1_CTL_SNP_START_MASK), -+ ®_base->gcr1); -+ } -+ -+ /* get median of the 5 snap shot */ -+ for (i = 0; i < BIN_SNAPSHOT_NUM - 1; i++) { -+ tmp = gaink2_snap_shot[i]; -+ pos = i; -+ for (j = i + 1; j < BIN_SNAPSHOT_NUM; j++) { -+ if (gaink2_snap_shot[j] < tmp) { -+ tmp = gaink2_snap_shot[j]; -+ pos = j; -+ } -+ } -+ -+ gaink2_snap_shot[pos] = gaink2_snap_shot[i]; -+ gaink2_snap_shot[i] = tmp; -+ } -+ -+ return gaink2_snap_shot[2]; -+} -+#endif -+ -+static bool is_bin_early(int bin_sel, void __iomem *reg) -+{ -+ bool early = false; -+ int bin_snap_shot[BIN_SNAPSHOT_NUM]; -+ int i, negative_count = 0; -+ struct per_lane_ctrl_status *reg_base; -+ int timeout; -+ -+ reg_base = (struct per_lane_ctrl_status *)reg; -+ -+ for (i = 0; i < BIN_SNAPSHOT_NUM; i++) { -+ /* wait RECR1_SNP_DONE_MASK has cleared */ -+ timeout = 100; -+ while ((ioread32be(®_base->recr1) & RECR1_SNP_DONE_MASK)) { -+ udelay(1); -+ timeout--; -+ if (timeout == 0) -+ break; -+ } -+ -+ /* set TCSR1[CDR_SEL] to BinM1/BinLong */ -+ if (bin_sel == BIN_M1) { -+ iowrite32be((ioread32be(®_base->tcsr1) & -+ ~CDR_SEL_MASK) | BIN_M1_SEL, -+ ®_base->tcsr1); -+ } else { -+ iowrite32be((ioread32be(®_base->tcsr1) & -+ ~CDR_SEL_MASK) | BIN_Long_SEL, -+ ®_base->tcsr1); -+ } -+ -+ /* start snap shot */ -+ iowrite32be(ioread32be(®_base->gcr1) | GCR1_SNP_START_MASK, -+ ®_base->gcr1); -+ -+ /* wait for SNP done */ -+ timeout = 100; -+ while (!(ioread32be(®_base->recr1) & RECR1_SNP_DONE_MASK)) { -+ udelay(1); -+ timeout--; -+ if (timeout == 0) -+ break; -+ } -+ -+ /* read and save the snap shot */ -+ bin_snap_shot[i] = (ioread32be(®_base->tcsr1) & -+ TCSR1_SNP_DATA_MASK) >> TCSR1_SNP_DATA_SHIFT; -+ if (bin_snap_shot[i] & TCSR1_EQ_SNPBIN_SIGN_MASK) -+ negative_count++; -+ -+ /* terminate the snap shot by setting GCR1[REQ_CTL_SNP] */ -+ iowrite32be(ioread32be(®_base->gcr1) & ~GCR1_SNP_START_MASK, -+ ®_base->gcr1); -+ } -+ -+ if (((bin_sel == BIN_M1) && negative_count > BIN_M1_THRESHOLD) || -+ ((bin_sel == BIN_LONG && negative_count > BIN_LONG_THRESHOLD))) { -+ early = true; -+ } -+ -+ return early; -+} -+ -+static void train_tx(struct fsl_xgkr_inst *inst) -+{ -+ struct phy_device *phydev = inst->phydev; -+ struct training_state_machine *s_m = &inst->t_s_m; -+ bool bin_m1_early, bin_long_early; -+ u32 lp_status, old_ld_update; -+ u32 status_cop1, status_coz, status_com1; -+ u32 req_cop1, req_coz, req_com1, req_preset, req_init; -+ u32 temp; -+#ifdef NEW_ALGORITHM_TRAIN_TX -+ u32 median_gaink2; -+#endif -+ -+recheck: -+ if (s_m->bin_long_stop && s_m->bin_m1_stop) { -+ s_m->tx_complete = true; -+ inst->ld_status |= RX_READY_MASK; -+ ld_coe_status(inst); -+ /* tell LP we are ready */ -+ phy_write_mmd(phydev, FSL_XFI_PMD, -+ FSL_XFI_KR_PMD_STATUS, RX_STAT); -+ return; -+ } -+ -+ /* We start by checking the current LP status. If we got any responses, -+ * we can clear up the appropriate update request so that the -+ * subsequent code may easily issue new update requests if needed. -+ */ -+ lp_status = phy_read_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_LP_STATUS) & -+ REQUEST_MASK; -+ status_cop1 = (lp_status & COP1_MASK) >> COP1_SHIFT; -+ status_coz = (lp_status & COZ_MASK) >> COZ_SHIFT; -+ status_com1 = (lp_status & COM1_MASK) >> COM1_SHIFT; -+ -+ old_ld_update = inst->ld_update; -+ req_cop1 = (old_ld_update & COP1_MASK) >> COP1_SHIFT; -+ req_coz = (old_ld_update & COZ_MASK) >> COZ_SHIFT; -+ req_com1 = (old_ld_update & COM1_MASK) >> COM1_SHIFT; -+ req_preset = old_ld_update & PRESET_MASK; -+ req_init = old_ld_update & INIT_MASK; -+ -+ /* IEEE802.3-2008, 72.6.10.2.3.1 -+ * We may clear PRESET when all coefficients show UPDATED or MAX. -+ */ -+ if (req_preset) { -+ if ((status_cop1 == COE_UPDATED || status_cop1 == COE_MAX) && -+ (status_coz == COE_UPDATED || status_coz == COE_MAX) && -+ (status_com1 == COE_UPDATED || status_com1 == COE_MAX)) { -+ inst->ld_update &= ~PRESET_MASK; -+ } -+ } -+ -+ /* IEEE802.3-2008, 72.6.10.2.3.2 -+ * We may clear INITIALIZE when no coefficients show NOT UPDATED. -+ */ -+ if (req_init) { -+ if (status_cop1 != COE_NOTUPDATED && -+ status_coz != COE_NOTUPDATED && -+ status_com1 != COE_NOTUPDATED) { -+ inst->ld_update &= ~INIT_MASK; -+ } -+ } -+ -+ /* IEEE802.3-2008, 72.6.10.2.3.2 -+ * we send initialize to the other side to ensure default settings -+ * for the LP. Naturally, we should do this only once. -+ */ -+ if (!s_m->sent_init) { -+ if (!lp_status && !(old_ld_update & (LD_ALL_MASK))) { -+ inst->ld_update |= INIT_MASK; -+ s_m->sent_init = true; -+ } -+ } -+ -+ /* IEEE802.3-2008, 72.6.10.2.3.3 -+ * We set coefficient requests to HOLD when we get the information -+ * about any updates On clearing our prior response, we also update -+ * our internal status. -+ */ -+ if (status_cop1 != COE_NOTUPDATED) { -+ if (req_cop1) { -+ inst->ld_update &= ~COP1_MASK; -+#ifdef NEW_ALGORITHM_TRAIN_TX -+ if (s_m->post_inc) { -+ if (req_cop1 == INCREMENT && -+ status_cop1 == COE_MAX) { -+ s_m->post_inc = 0; -+ s_m->bin_long_stop = true; -+ s_m->bin_m1_stop = true; -+ } else { -+ s_m->post_inc -= 1; -+ } -+ -+ ld_coe_update(inst); -+ goto recheck; -+ } -+#endif -+ if ((req_cop1 == DECREMENT && status_cop1 == COE_MIN) || -+ (req_cop1 == INCREMENT && status_cop1 == COE_MAX)) { -+ s_m->long_min_max_cnt++; -+ if (s_m->long_min_max_cnt >= TIMEOUT_LONG) { -+ s_m->bin_long_stop = true; -+ ld_coe_update(inst); -+ goto recheck; -+ } -+ } -+ } -+ } -+ -+ if (status_coz != COE_NOTUPDATED) { -+ if (req_coz) -+ inst->ld_update &= ~COZ_MASK; -+ } -+ -+ if (status_com1 != COE_NOTUPDATED) { -+ if (req_com1) { -+ inst->ld_update &= ~COM1_MASK; -+#ifdef NEW_ALGORITHM_TRAIN_TX -+ if (s_m->pre_inc) { -+ if (req_com1 == INCREMENT && -+ status_com1 == COE_MAX) -+ s_m->pre_inc = 0; -+ else -+ s_m->pre_inc -= 1; -+ -+ ld_coe_update(inst); -+ goto recheck; -+ } -+#endif -+ /* Stop If we have reached the limit for a parameter. */ -+ if ((req_com1 == DECREMENT && status_com1 == COE_MIN) || -+ (req_com1 == INCREMENT && status_com1 == COE_MAX)) { -+ s_m->m1_min_max_cnt++; -+ if (s_m->m1_min_max_cnt >= TIMEOUT_M1) { -+ s_m->bin_m1_stop = true; -+ ld_coe_update(inst); -+ goto recheck; -+ } -+ } -+ } -+ } -+ -+ if (old_ld_update != inst->ld_update) { -+ ld_coe_update(inst); -+ /* Redo these status checks and updates until we have no more -+ * changes, to speed up the overall process. -+ */ -+ goto recheck; -+ } -+ -+ /* Do nothing if we have pending request. */ -+ if ((req_coz || req_com1 || req_cop1)) -+ return; -+ else if (lp_status) -+ /* No pending request but LP status was not reverted to -+ * not updated. -+ */ -+ return; -+ -+#ifdef NEW_ALGORITHM_TRAIN_TX -+ if (!(inst->ld_update & (PRESET_MASK | INIT_MASK))) { -+ if (s_m->pre_inc) { -+ inst->ld_update = INCREMENT << COM1_SHIFT; -+ ld_coe_update(inst); -+ return; -+ } -+ -+ if (status_cop1 != COE_MAX) { -+ median_gaink2 = get_median_gaink2(inst->reg_base); -+ if (median_gaink2 == 0xf) { -+ s_m->post_inc = 1; -+ } else { -+ /* Gaink2 median lower than "F" */ -+ s_m->bin_m1_stop = true; -+ s_m->bin_long_stop = true; -+ goto recheck; -+ } -+ } else { -+ /* C1 MAX */ -+ s_m->bin_m1_stop = true; -+ s_m->bin_long_stop = true; -+ goto recheck; -+ } -+ -+ if (s_m->post_inc) { -+ inst->ld_update = INCREMENT << COP1_SHIFT; -+ ld_coe_update(inst); -+ return; -+ } -+ } -+#endif -+ -+ /* snapshot and select bin */ -+ bin_m1_early = is_bin_early(BIN_M1, inst->reg_base); -+ bin_long_early = is_bin_early(BIN_LONG, inst->reg_base); -+ -+ if (!s_m->bin_m1_stop && !s_m->bin_m1_late_early && bin_m1_early) { -+ s_m->bin_m1_stop = true; -+ goto recheck; -+ } -+ -+ if (!s_m->bin_long_stop && -+ s_m->bin_long_late_early && !bin_long_early) { -+ s_m->bin_long_stop = true; -+ goto recheck; -+ } -+ -+ /* IEEE802.3-2008, 72.6.10.2.3.3 -+ * We only request coefficient updates when no PRESET/INITIALIZE is -+ * pending! We also only request coefficient updates when the -+ * corresponding status is NOT UPDATED and nothing is pending. -+ */ -+ if (!(inst->ld_update & (PRESET_MASK | INIT_MASK))) { -+ if (!s_m->bin_long_stop) { -+ /* BinM1 correction means changing COM1 */ -+ if (!status_com1 && !(inst->ld_update & COM1_MASK)) { -+ /* Avoid BinM1Late by requesting an -+ * immediate decrement. -+ */ -+ if (!bin_m1_early) { -+ /* request decrement c(-1) */ -+ temp = DECREMENT << COM1_SHIFT; -+ inst->ld_update |= temp; -+ ld_coe_update(inst); -+ s_m->bin_m1_late_early = bin_m1_early; -+ return; -+ } -+ } -+ -+ /* BinLong correction means changing COP1 */ -+ if (!status_cop1 && !(inst->ld_update & COP1_MASK)) { -+ /* Locate BinLong transition point (if any) -+ * while avoiding BinM1Late. -+ */ -+ if (bin_long_early) { -+ /* request increment c(1) */ -+ temp = INCREMENT << COP1_SHIFT; -+ inst->ld_update |= temp; -+ } else { -+ /* request decrement c(1) */ -+ temp = DECREMENT << COP1_SHIFT; -+ inst->ld_update |= temp; -+ } -+ -+ ld_coe_update(inst); -+ s_m->bin_long_late_early = bin_long_early; -+ } -+ /* We try to finish BinLong before we do BinM1 */ -+ return; -+ } -+ -+ if (!s_m->bin_m1_stop) { -+ /* BinM1 correction means changing COM1 */ -+ if (!status_com1 && !(inst->ld_update & COM1_MASK)) { -+ /* Locate BinM1 transition point (if any) */ -+ if (bin_m1_early) { -+ /* request increment c(-1) */ -+ temp = INCREMENT << COM1_SHIFT; -+ inst->ld_update |= temp; -+ } else { -+ /* request decrement c(-1) */ -+ temp = DECREMENT << COM1_SHIFT; -+ inst->ld_update |= temp; -+ } -+ -+ ld_coe_update(inst); -+ s_m->bin_m1_late_early = bin_m1_early; -+ } -+ } -+ } -+} -+ -+static int check_an_link(struct phy_device *phydev) -+{ -+ int val; -+ int timeout = 100; -+ -+ while (timeout--) { -+ val = phy_read_mmd(phydev, FSL_XFI_AN, FSL_XFI_LNK_STATUS); -+ if (val & XFI_AN_LNK_STAT_UP) -+ return 1; -+ usleep_range(100, 500); -+ } -+ -+ return 0; -+} -+ -+static int is_link_training_fail(struct phy_device *phydev) -+{ -+ int val; -+ -+ val = phy_read_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_PMD_STATUS); -+ if (!(val & TRAIN_FAIL) && (val & RX_STAT)) { -+ /* check LNK_STAT for sure */ -+ if (check_an_link(phydev)) -+ return 0; -+ return 1; -+ } -+ return 1; -+} -+ -+static int check_rx(struct phy_device *phydev) -+{ -+ return phy_read_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_LP_STATUS) & -+ RX_READY_MASK; -+} -+ -+/* Coefficient values have hardware restrictions */ -+static int is_ld_valid(u32 *ld_coe) -+{ -+ u32 ratio_pst1q = *ld_coe; -+ u32 adpt_eq = *(ld_coe + 1); -+ u32 ratio_preq = *(ld_coe + 2); -+ -+ if ((ratio_pst1q + adpt_eq + ratio_preq) > 48) -+ return 0; -+ -+ if (((ratio_pst1q + adpt_eq + ratio_preq) * 4) >= -+ ((adpt_eq - ratio_pst1q - ratio_preq) * 17)) -+ return 0; -+ -+ if (ratio_preq > ratio_pst1q) -+ return 0; -+ -+ if (ratio_preq > 8) -+ return 0; -+ -+ if (adpt_eq < 26) -+ return 0; -+ -+ if (ratio_pst1q > 16) -+ return 0; -+ -+ return 1; -+} -+ -+#define VAL_INVALID 0xff -+ -+static const u32 preq_table[] = {0x0, 0x1, 0x3, 0x5, -+ 0x7, 0x9, 0xb, 0xc, VAL_INVALID}; -+static const u32 pst1q_table[] = {0x0, 0x1, 0x3, 0x5, -+ 0x7, 0x9, 0xb, 0xd, 0xf, 0x10, VAL_INVALID}; -+ -+static int is_value_allowed(const u32 *val_table, u32 val) -+{ -+ int i; -+ -+ for (i = 0;; i++) { -+ if (*(val_table + i) == VAL_INVALID) -+ return 0; -+ if (*(val_table + i) == val) -+ return 1; -+ } -+} -+ -+static int inc_dec(struct fsl_xgkr_inst *inst, int field, int request) -+{ -+ u32 ld_limit[3], ld_coe[3], step[3]; -+ -+ ld_coe[0] = inst->ratio_pst1q; -+ ld_coe[1] = inst->adpt_eq; -+ ld_coe[2] = inst->ratio_preq; -+ -+ /* Information specific to the Freescale SerDes for 10GBase-KR: -+ * Incrementing C(+1) means *decrementing* RATIO_PST1Q -+ * Incrementing C(0) means incrementing ADPT_EQ -+ * Incrementing C(-1) means *decrementing* RATIO_PREQ -+ */ -+ step[0] = -1; -+ step[1] = 1; -+ step[2] = -1; -+ -+ switch (request) { -+ case INCREMENT: -+ ld_limit[0] = POST_COE_MAX; -+ ld_limit[1] = ZERO_COE_MAX; -+ ld_limit[2] = PRE_COE_MAX; -+ if (ld_coe[field] != ld_limit[field]) -+ ld_coe[field] += step[field]; -+ else -+ /* MAX */ -+ return 2; -+ break; -+ case DECREMENT: -+ ld_limit[0] = POST_COE_MIN; -+ ld_limit[1] = ZERO_COE_MIN; -+ ld_limit[2] = PRE_COE_MIN; -+ if (ld_coe[field] != ld_limit[field]) -+ ld_coe[field] -= step[field]; -+ else -+ /* MIN */ -+ return 1; -+ break; -+ default: -+ break; -+ } -+ -+ if (is_ld_valid(ld_coe)) { -+ /* accept new ld */ -+ inst->ratio_pst1q = ld_coe[0]; -+ inst->adpt_eq = ld_coe[1]; -+ inst->ratio_preq = ld_coe[2]; -+ /* only some values for preq and pst1q can be used. -+ * for preq: 0x0, 0x1, 0x3, 0x5, 0x7, 0x9, 0xb, 0xc. -+ * for pst1q: 0x0, 0x1, 0x3, 0x5, 0x7, 0x9, 0xb, 0xd, 0xf, 0x10. -+ */ -+ if (!is_value_allowed((const u32 *)&preq_table, ld_coe[2])) { -+ dev_dbg(&inst->phydev->dev, -+ "preq skipped value: %d.\n", ld_coe[2]); -+ return 0; -+ } -+ -+ if (!is_value_allowed((const u32 *)&pst1q_table, ld_coe[0])) { -+ dev_dbg(&inst->phydev->dev, -+ "pst1q skipped value: %d.\n", ld_coe[0]); -+ return 0; -+ } -+ -+ tune_tecr0(inst); -+ } else { -+ if (request == DECREMENT) -+ /* MIN */ -+ return 1; -+ if (request == INCREMENT) -+ /* MAX */ -+ return 2; -+ } -+ -+ return 0; -+} -+ -+static void min_max_updated(struct fsl_xgkr_inst *inst, int field, int new_ld) -+{ -+ u32 ld_coe[] = {COE_UPDATED, COE_MIN, COE_MAX}; -+ u32 mask, val; -+ -+ switch (field) { -+ case COE_COP1: -+ mask = COP1_MASK; -+ val = ld_coe[new_ld] << COP1_SHIFT; -+ break; -+ case COE_COZ: -+ mask = COZ_MASK; -+ val = ld_coe[new_ld] << COZ_SHIFT; -+ break; -+ case COE_COM: -+ mask = COM1_MASK; -+ val = ld_coe[new_ld] << COM1_SHIFT; -+ break; -+ default: -+ return; -+ break; -+ } -+ -+ inst->ld_status &= ~mask; -+ inst->ld_status |= val; -+} -+ -+static void check_request(struct fsl_xgkr_inst *inst, int request) -+{ -+ int cop1_req, coz_req, com_req; -+ int old_status, new_ld_sta; -+ -+ cop1_req = (request & COP1_MASK) >> COP1_SHIFT; -+ coz_req = (request & COZ_MASK) >> COZ_SHIFT; -+ com_req = (request & COM1_MASK) >> COM1_SHIFT; -+ -+ /* IEEE802.3-2008, 72.6.10.2.5 -+ * Ensure we only act on INCREMENT/DECREMENT when we are in NOT UPDATED! -+ */ -+ old_status = inst->ld_status; -+ -+ if (cop1_req && !(inst->ld_status & COP1_MASK)) { -+ new_ld_sta = inc_dec(inst, COE_COP1, cop1_req); -+ min_max_updated(inst, COE_COP1, new_ld_sta); -+ } -+ -+ if (coz_req && !(inst->ld_status & COZ_MASK)) { -+ new_ld_sta = inc_dec(inst, COE_COZ, coz_req); -+ min_max_updated(inst, COE_COZ, new_ld_sta); -+ } -+ -+ if (com_req && !(inst->ld_status & COM1_MASK)) { -+ new_ld_sta = inc_dec(inst, COE_COM, com_req); -+ min_max_updated(inst, COE_COM, new_ld_sta); -+ } -+ -+ if (old_status != inst->ld_status) -+ ld_coe_status(inst); -+ -+} -+ -+static void preset(struct fsl_xgkr_inst *inst) -+{ -+ /* These are all MAX values from the IEEE802.3 perspective! */ -+ inst->ratio_pst1q = POST_COE_MAX; -+ inst->adpt_eq = ZERO_COE_MAX; -+ inst->ratio_preq = PRE_COE_MAX; -+ -+ tune_tecr0(inst); -+ inst->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK); -+ inst->ld_status |= COE_MAX << COP1_SHIFT | -+ COE_MAX << COZ_SHIFT | -+ COE_MAX << COM1_SHIFT; -+ ld_coe_status(inst); -+} -+ -+static void initialize(struct fsl_xgkr_inst *inst) -+{ -+ inst->ratio_preq = RATIO_PREQ; -+ inst->ratio_pst1q = RATIO_PST1Q; -+ inst->adpt_eq = RATIO_EQ; -+ -+ tune_tecr0(inst); -+ inst->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK); -+ inst->ld_status |= COE_UPDATED << COP1_SHIFT | -+ COE_UPDATED << COZ_SHIFT | -+ COE_UPDATED << COM1_SHIFT; -+ ld_coe_status(inst); -+} -+ -+static void train_rx(struct fsl_xgkr_inst *inst) -+{ -+ struct phy_device *phydev = inst->phydev; -+ int request, old_ld_status; -+ -+ /* get request from LP */ -+ request = phy_read_mmd(phydev, FSL_XFI_PMD, FSL_XFI_KR_LP_CU) & -+ (LD_ALL_MASK); -+ old_ld_status = inst->ld_status; -+ -+ /* IEEE802.3-2008, 72.6.10.2.5 -+ * Ensure we always go to NOT UDPATED for status reporting in -+ * response to HOLD requests. -+ * IEEE802.3-2008, 72.6.10.2.3.1/2 -+ * ... but only if PRESET/INITIALIZE are not active to ensure -+ * we keep status until they are released! -+ */ -+ if (!(request & (PRESET_MASK | INIT_MASK))) { -+ if (!(request & COP1_MASK)) -+ inst->ld_status &= ~COP1_MASK; -+ -+ if (!(request & COZ_MASK)) -+ inst->ld_status &= ~COZ_MASK; -+ -+ if (!(request & COM1_MASK)) -+ inst->ld_status &= ~COM1_MASK; -+ -+ if (old_ld_status != inst->ld_status) -+ ld_coe_status(inst); -+ -+ } -+ -+ /* As soon as the LP shows ready, no need to do any more updates. */ -+ if (check_rx(phydev)) { -+ /* LP receiver is ready */ -+ if (inst->ld_status & (COP1_MASK | COZ_MASK | COM1_MASK)) { -+ inst->ld_status &= ~(COP1_MASK | COZ_MASK | COM1_MASK); -+ ld_coe_status(inst); -+ } -+ } else { -+ /* IEEE802.3-2008, 72.6.10.2.3.1/2 -+ * only act on PRESET/INITIALIZE if all status is NOT UPDATED. -+ */ -+ if (request & (PRESET_MASK | INIT_MASK)) { -+ if (!(inst->ld_status & -+ (COP1_MASK | COZ_MASK | COM1_MASK))) { -+ if (request & PRESET_MASK) -+ preset(inst); -+ -+ if (request & INIT_MASK) -+ initialize(inst); -+ } -+ } -+ -+ /* LP Coefficient are not in HOLD */ -+ if (request & REQUEST_MASK) -+ check_request(inst, request & REQUEST_MASK); -+ } -+} -+ -+static void xgkr_wq_state_machine(struct work_struct *work) -+{ -+ struct fsl_xgkr_wk *wk = container_of(work, -+ struct fsl_xgkr_wk, xgkr_wk); -+ struct fsl_xgkr_inst *inst = wk->xgkr_inst; -+ struct training_state_machine *s_m = &inst->t_s_m; -+ struct phy_device *phydev = inst->phydev; -+ int val = 0, i; -+ int an_state, lt_state; -+ unsigned long dead_line; -+ int rx_ok, tx_ok; -+ -+ if (s_m->link_up) { -+ /* check abnormal link down events when link is up, for ex. -+ * the cable is pulled out or link partner is down. -+ */ -+ an_state = phy_read_mmd(phydev, FSL_XFI_AN, FSL_XFI_LNK_STATUS); -+ if (!(an_state & XFI_AN_LNK_STAT_UP)) { -+ dev_info(&phydev->dev, -+ "Detect hotplug, restart training!\n"); -+ init_inst(inst, 1); -+ start_an(phydev); -+ } -+ s_m->running = false; -+ return; -+ } -+ -+ if (!s_m->an_ok) { -+ an_state = phy_read_mmd(phydev, FSL_XFI_AN, FSL_XFI_BP_STATUS); -+ if (!(an_state & AN_10GKR_MASK)) { -+ s_m->running = false; -+ return; -+ } else -+ s_m->an_ok = true; -+ } -+ -+ dev_info(&phydev->dev, "is training.\n"); -+ -+ start_lt(phydev); -+ for (i = 0; i < 2;) { -+ /* i < 1 also works, but start one more try immediately when -+ * failed can adjust our training frequency to match other -+ * devices. This can help the link being established more -+ * quickly. -+ */ -+ dead_line = jiffies + msecs_to_jiffies(500); -+ while (time_before(jiffies, dead_line)) { -+ val = phy_read_mmd(phydev, FSL_XFI_PMD, -+ FSL_XFI_KR_PMD_STATUS); -+ if (val & TRAIN_FAIL) { -+ /* LT failed already, reset lane to avoid -+ * it run into hanging, then start LT again. -+ */ -+ reset_gcr0(inst); -+ start_lt(phydev); -+ } else if (val & PMD_STATUS_SUP_STAT && -+ val & PMD_STATUS_FRAME_LOCK) -+ break; -+ usleep_range(100, 500); -+ } -+ -+ if (!(val & PMD_STATUS_FRAME_LOCK && -+ val & PMD_STATUS_SUP_STAT)) { -+ i++; -+ continue; -+ } -+ -+ /* init process */ -+ rx_ok = tx_ok = false; -+ /* the LT should be finished in 500ms, failed or OK. */ -+ dead_line = jiffies + msecs_to_jiffies(500); -+ -+ while (time_before(jiffies, dead_line)) { -+ /* check if the LT is already failed */ -+ lt_state = phy_read_mmd(phydev, FSL_XFI_PMD, -+ FSL_XFI_KR_PMD_STATUS); -+ if (lt_state & TRAIN_FAIL) { -+ reset_gcr0(inst); -+ break; -+ } -+ -+ rx_ok = check_rx(phydev); -+ tx_ok = s_m->tx_complete; -+ -+ if (rx_ok && tx_ok) -+ break; -+ -+ if (!rx_ok) -+ train_rx(inst); -+ -+ if (!tx_ok) -+ train_tx(inst); -+ usleep_range(100, 500); -+ } -+ -+ i++; -+ /* check LT result */ -+ if (is_link_training_fail(phydev)) { -+ /* reset state machine */ -+ init_inst(inst, 0); -+ continue; -+ } else { -+ stop_lt(phydev); -+ s_m->running = false; -+ s_m->link_up = true; -+ dev_info(&phydev->dev, "LT training is SUCCEEDED!\n"); -+ break; -+ } -+ } -+ -+ if (!s_m->link_up) { -+ /* reset state machine */ -+ init_inst(inst, 0); -+ } -+} -+ -+static void xgkr_timer_handle(unsigned long arg) -+{ -+ struct list_head *pos; -+ struct fsl_xgkr_wk *wk; -+ struct fsl_xgkr_inst *xgkr_inst; -+ struct phy_device *phydev; -+ struct training_state_machine *s_m; -+ -+ list_for_each(pos, &fsl_xgkr_list) { -+ wk = list_entry(pos, struct fsl_xgkr_wk, xgkr_list); -+ xgkr_inst = wk->xgkr_inst; -+ phydev = xgkr_inst->phydev; -+ s_m = &xgkr_inst->t_s_m; -+ -+ if (!s_m->running && (!s_m->an_ok || s_m->link_up)) { -+ s_m->running = true; -+ queue_work(xgkr_wq, (struct work_struct *)wk); -+ } -+ } -+ -+ if (!list_empty(&fsl_xgkr_list)) -+ mod_timer(&xgkr_timer, -+ jiffies + msecs_to_jiffies(XGKR_TIMEOUT)); -+} -+ -+static int fsl_xgkr_bind_serdes(const char *lane_name, -+ struct phy_device *phydev) -+{ -+ unsigned long serdes_base; -+ unsigned long lane_base; -+ int i; -+ -+ for (i = 0; i < SERDES_MAX; i++) { -+ if (strstr(lane_name, s_map[i].serdes_name)) { -+ serdes_base = s_map[i].serdes_base; -+ break; -+ } -+ } -+ -+ if (i == SERDES_MAX) -+ goto serdes_err; -+ -+ for (i = 0; i < LANE_MAX; i++) { -+ if (strstr(lane_name, l_map[i].lane_name)) { -+ lane_base = l_map[i].lane_base; -+ break; -+ } -+ } -+ -+ if (i == LANE_MAX) -+ goto lane_err; -+ -+ phydev->priv = ioremap(serdes_base + lane_base, -+ sizeof(struct per_lane_ctrl_status)); -+ if (!phydev->priv) -+ return -ENOMEM; -+ -+ return 0; -+ -+serdes_err: -+ dev_err(&phydev->dev, "Unknown SerDes name"); -+ return -EINVAL; -+lane_err: -+ dev_err(&phydev->dev, "Unknown Lane name"); -+ return -EINVAL; -+} -+ -+static int fsl_xgkr_probe(struct phy_device *phydev) -+{ -+ struct fsl_xgkr_inst *xgkr_inst; -+ struct fsl_xgkr_wk *xgkr_wk; -+ struct device_node *child; -+ const char *lane_name; -+ int len; -+ -+ child = phydev->dev.of_node; -+ -+ /* if there is lane-instance property, 10G-KR need to run */ -+ lane_name = of_get_property(child, "lane-instance", &len); -+ if (!lane_name || (fsl_xgkr_bind_serdes(lane_name, phydev))) -+ return 0; -+ -+ xgkr_inst = kzalloc(sizeof(struct fsl_xgkr_inst), GFP_KERNEL); -+ if (!xgkr_inst) -+ goto mem_err1; -+ -+ xgkr_inst->reg_base = phydev->priv; -+ -+ xgkr_inst->bus = phydev->bus; -+ -+ xgkr_inst->phydev = phydev; -+ -+ init_inst(xgkr_inst, 1); -+ -+ xgkr_wk = kzalloc(sizeof(struct fsl_xgkr_wk), GFP_KERNEL); -+ if (!xgkr_wk) -+ goto mem_err2; -+ -+ xgkr_wk->xgkr_inst = xgkr_inst; -+ phydev->priv = xgkr_wk; -+ -+ list_add(&xgkr_wk->xgkr_list, &fsl_xgkr_list); -+ -+ if (!fire_timer) { -+ setup_timer(&xgkr_timer, xgkr_timer_handle, -+ (unsigned long)&fsl_xgkr_list); -+ mod_timer(&xgkr_timer, -+ jiffies + msecs_to_jiffies(XGKR_TIMEOUT)); -+ fire_timer = 1; -+ xgkr_wq = create_workqueue("fsl_xgkr"); -+ } -+ INIT_WORK((struct work_struct *)xgkr_wk, xgkr_wq_state_machine); -+ -+ /* start auto-negotiation to detect link partner */ -+ start_an(phydev); -+ -+ return 0; -+mem_err2: -+ kfree(xgkr_inst); -+mem_err1: -+ dev_err(&phydev->dev, "failed to allocate memory!\n"); -+ return -ENOMEM; -+} -+ -+static int fsl_xgkr_config_init(struct phy_device *phydev) -+{ -+ return 0; -+} -+ -+static int fsl_xgkr_config_aneg(struct phy_device *phydev) -+{ -+ return 0; -+} -+ -+static void fsl_xgkr_remove(struct phy_device *phydev) -+{ -+ struct fsl_xgkr_wk *wk = (struct fsl_xgkr_wk *)phydev->priv; -+ struct fsl_xgkr_inst *xgkr_inst = wk->xgkr_inst; -+ struct list_head *this, *next; -+ struct fsl_xgkr_wk *tmp; -+ -+ list_for_each_safe(this, next, &fsl_xgkr_list) { -+ tmp = list_entry(this, struct fsl_xgkr_wk, xgkr_list); -+ if (tmp == wk) { -+ cancel_work_sync((struct work_struct *)wk); -+ list_del(this); -+ } -+ } -+ -+ if (list_empty(&fsl_xgkr_list)) -+ del_timer(&xgkr_timer); -+ -+ if (xgkr_inst->reg_base) -+ iounmap(xgkr_inst->reg_base); -+ -+ kfree(xgkr_inst); -+ kfree(wk); -+} -+ -+static int fsl_xgkr_read_status(struct phy_device *phydev) -+{ -+ int val = phy_read_mmd(phydev, FSL_XFI_AN, FSL_XFI_LNK_STATUS); -+ -+ phydev->speed = SPEED_10000; -+ phydev->duplex = 1; -+ -+ if (val & XFI_AN_LNK_STAT_UP) -+ phydev->link = 1; -+ else -+ phydev->link = 0; -+ -+ return 0; -+} -+ -+static int fsl_xgkr_match_phy_device(struct phy_device *phydev) -+{ -+ return phydev->c45_ids.device_ids[3] == FSL_XFI_PCS_PHY_ID; -+} -+ -+static int fsl_xgkr_match_phy_device2(struct phy_device *phydev) -+{ -+ return phydev->c45_ids.device_ids[3] == FSL_XFI_PCS_PHY_ID2; -+} -+ -+static struct phy_driver fsl_xgkr_driver[] = { -+ { -+ .phy_id = FSL_XFI_PCS_PHY_ID, -+ .name = "Freescale 10G KR Rev1", -+ .phy_id_mask = 0xffffffff, -+ .features = PHY_GBIT_FEATURES, -+ .flags = PHY_HAS_INTERRUPT, -+ .probe = fsl_xgkr_probe, -+ .config_init = &fsl_xgkr_config_init, -+ .config_aneg = &fsl_xgkr_config_aneg, -+ .read_status = &fsl_xgkr_read_status, -+ .match_phy_device = fsl_xgkr_match_phy_device, -+ .remove = fsl_xgkr_remove, -+ .driver = { .owner = THIS_MODULE,}, -+ }, -+ { -+ .phy_id = FSL_XFI_PCS_PHY_ID2, -+ .name = "Freescale 10G KR Rev2", -+ .phy_id_mask = 0xffffffff, -+ .features = PHY_GBIT_FEATURES, -+ .flags = PHY_HAS_INTERRUPT, -+ .probe = fsl_xgkr_probe, -+ .config_init = &fsl_xgkr_config_init, -+ .config_aneg = &fsl_xgkr_config_aneg, -+ .read_status = &fsl_xgkr_read_status, -+ .match_phy_device = fsl_xgkr_match_phy_device2, -+ .remove = fsl_xgkr_remove, -+ .driver = { .owner = THIS_MODULE,}, -+ }, -+}; -+ -+static int __init fsl_xgkr_init(void) -+{ -+ return phy_drivers_register(fsl_xgkr_driver, -+ ARRAY_SIZE(fsl_xgkr_driver)); -+} -+ -+static void __exit fsl_xgkr_exit(void) -+{ -+ phy_drivers_unregister(fsl_xgkr_driver, -+ ARRAY_SIZE(fsl_xgkr_driver)); -+} -+ -+module_init(fsl_xgkr_init); -+module_exit(fsl_xgkr_exit); -+ -+static struct mdio_device_id __maybe_unused freescale_tbl[] = { -+ { FSL_XFI_PCS_PHY_ID, 0xffffffff }, -+ { FSL_XFI_PCS_PHY_ID2, 0xffffffff }, -+ { } -+}; -+ -+MODULE_DEVICE_TABLE(mdio, freescale_tbl); -diff --git a/drivers/net/phy/teranetics.c b/drivers/net/phy/teranetics.c -new file mode 100644 -index 0000000..91e1bec ---- /dev/null -+++ b/drivers/net/phy/teranetics.c -@@ -0,0 +1,135 @@ -+/* -+ * Driver for Teranetics PHY -+ * -+ * Author: Shaohui Xie -+ * -+ * Copyright 2015 Freescale Semiconductor, Inc. -+ * -+ * This file is licensed under the terms of the GNU General Public License -+ * version 2. This program is licensed "as is" without any warranty of any -+ * kind, whether express or implied. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+MODULE_DESCRIPTION("Teranetics PHY driver"); -+MODULE_AUTHOR("Shaohui Xie "); -+MODULE_LICENSE("GPL v2"); -+ -+#define PHY_ID_TN2020 0x00a19410 -+#define MDIO_PHYXS_LNSTAT_SYNC0 0x0001 -+#define MDIO_PHYXS_LNSTAT_SYNC1 0x0002 -+#define MDIO_PHYXS_LNSTAT_SYNC2 0x0004 -+#define MDIO_PHYXS_LNSTAT_SYNC3 0x0008 -+#define MDIO_PHYXS_LNSTAT_ALIGN 0x1000 -+ -+#define MDIO_PHYXS_LANE_READY (MDIO_PHYXS_LNSTAT_SYNC0 | \ -+ MDIO_PHYXS_LNSTAT_SYNC1 | \ -+ MDIO_PHYXS_LNSTAT_SYNC2 | \ -+ MDIO_PHYXS_LNSTAT_SYNC3 | \ -+ MDIO_PHYXS_LNSTAT_ALIGN) -+ -+static int teranetics_config_init(struct phy_device *phydev) -+{ -+ phydev->supported = SUPPORTED_10000baseT_Full; -+ phydev->advertising = SUPPORTED_10000baseT_Full; -+ -+ return 0; -+} -+ -+static int teranetics_soft_reset(struct phy_device *phydev) -+{ -+ return 0; -+} -+ -+static int teranetics_aneg_done(struct phy_device *phydev) -+{ -+ int reg; -+ -+ /* auto negotiation state can only be checked when using copper -+ * port, if using fiber port, just lie it's done. -+ */ -+ if (!phy_read_mmd(phydev, MDIO_MMD_VEND1, 93)) { -+ reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); -+ return (reg < 0) ? reg : (reg & BMSR_ANEGCOMPLETE); -+ } -+ -+ return 1; -+} -+ -+static int teranetics_config_aneg(struct phy_device *phydev) -+{ -+ return 0; -+} -+ -+static int teranetics_read_status(struct phy_device *phydev) -+{ -+ int reg; -+ -+ phydev->link = 1; -+ -+ phydev->speed = SPEED_10000; -+ phydev->duplex = DUPLEX_FULL; -+ -+ if (!phy_read_mmd(phydev, MDIO_MMD_VEND1, 93)) { -+ reg = phy_read_mmd(phydev, MDIO_MMD_PHYXS, MDIO_PHYXS_LNSTAT); -+ if (reg < 0 || -+ !((reg & MDIO_PHYXS_LANE_READY) == MDIO_PHYXS_LANE_READY)) { -+ phydev->link = 0; -+ return 0; -+ } -+ -+ reg = phy_read_mmd(phydev, MDIO_MMD_AN, MDIO_STAT1); -+ if (reg < 0 || !(reg & MDIO_STAT1_LSTATUS)) -+ phydev->link = 0; -+ } -+ -+ return 0; -+} -+ -+static int teranetics_match_phy_device(struct phy_device *phydev) -+{ -+ return phydev->c45_ids.device_ids[3] == PHY_ID_TN2020; -+} -+ -+static struct phy_driver teranetics_driver[] = { -+{ -+ .phy_id = PHY_ID_TN2020, -+ .phy_id_mask = 0xffffffff, -+ .name = "Teranetics TN2020", -+ .soft_reset = teranetics_soft_reset, -+ .aneg_done = teranetics_aneg_done, -+ .config_init = teranetics_config_init, -+ .config_aneg = teranetics_config_aneg, -+ .read_status = teranetics_read_status, -+ .match_phy_device = teranetics_match_phy_device, -+ .driver = { .owner = THIS_MODULE,}, -+}, -+}; -+ -+static int __init teranetics_init(void) -+{ -+ return phy_drivers_register(teranetics_driver, -+ ARRAY_SIZE(teranetics_driver)); -+} -+ -+static void __exit teranetics_exit(void) -+{ -+ return phy_drivers_unregister(teranetics_driver, -+ ARRAY_SIZE(teranetics_driver)); -+} -+ -+module_init(teranetics_init); -+module_exit(teranetics_exit); -+ -+static struct mdio_device_id __maybe_unused teranetics_tbl[] = { -+ { PHY_ID_TN2020, 0xffffffff }, -+ { } -+}; -+ -+MODULE_DEVICE_TABLE(mdio, teranetics_tbl); -diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig -index 4690ae9..43ff2b5 100644 ---- a/drivers/staging/Kconfig -+++ b/drivers/staging/Kconfig -@@ -108,4 +108,8 @@ source "drivers/staging/skein/Kconfig" - - source "drivers/staging/unisys/Kconfig" - -+source "drivers/staging/fsl-mc/Kconfig" -+ -+source "drivers/staging/fsl-dpaa2/Kconfig" -+ - endif # STAGING -diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile -index c780a0e..a9bd303 100644 ---- a/drivers/staging/Makefile -+++ b/drivers/staging/Makefile -@@ -46,3 +46,5 @@ obj-$(CONFIG_MTD_SPINAND_MT29F) += mt29f_spinand/ - obj-$(CONFIG_GS_FPGABOOT) += gs_fpgaboot/ - obj-$(CONFIG_CRYPTO_SKEIN) += skein/ - obj-$(CONFIG_UNISYSSPAR) += unisys/ -+obj-$(CONFIG_FSL_MC_BUS) += fsl-mc/ -+obj-$(CONFIG_FSL_DPAA2) += fsl-dpaa2/ -diff --git a/drivers/staging/fsl-dpaa2/Kconfig b/drivers/staging/fsl-dpaa2/Kconfig -new file mode 100644 -index 0000000..3fe47bc ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/Kconfig -@@ -0,0 +1,12 @@ -+# -+# Freescale device configuration -+# -+ -+config FSL_DPAA2 -+ bool "Freescale DPAA2 devices" -+ depends on FSL_MC_BUS -+ ---help--- -+ Build drivers for Freescale DataPath Acceleration Architecture (DPAA2) family of SoCs. -+# TODO move DPIO driver in-here? -+source "drivers/staging/fsl-dpaa2/ethernet/Kconfig" -+source "drivers/staging/fsl-dpaa2/mac/Kconfig" -diff --git a/drivers/staging/fsl-dpaa2/Makefile b/drivers/staging/fsl-dpaa2/Makefile -new file mode 100644 -index 0000000..bc687a1 ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/Makefile -@@ -0,0 +1,6 @@ -+# -+# Makefile for the Freescale network device drivers. -+# -+ -+obj-$(CONFIG_FSL_DPAA2_ETH) += ethernet/ -+obj-$(CONFIG_FSL_DPAA2_MAC) += mac/ -diff --git a/drivers/staging/fsl-dpaa2/ethernet/Kconfig b/drivers/staging/fsl-dpaa2/ethernet/Kconfig -new file mode 100644 -index 0000000..df91da2 ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/ethernet/Kconfig -@@ -0,0 +1,36 @@ -+# -+# Freescale DPAA Ethernet driver configuration -+# -+# Copyright (C) 2014-2015 Freescale Semiconductor, Inc. -+# -+# This file is released under the GPLv2 -+# -+ -+menuconfig FSL_DPAA2_ETH -+ tristate "Freescale DPAA2 Ethernet" -+ depends on FSL_DPAA2 && FSL_MC_BUS && FSL_MC_DPIO -+ select FSL_DPAA2_MAC -+ default y -+ ---help--- -+ Freescale Data Path Acceleration Architecture Ethernet -+ driver, using the Freescale MC bus driver. -+ -+if FSL_DPAA2_ETH -+ -+config FSL_DPAA2_ETH_USE_ERR_QUEUE -+ bool "Enable Rx error queue" -+ default n -+ ---help--- -+ Allow Rx error frames to be enqueued on an error queue -+ and processed by the driver (by default they are dropped -+ in hardware). -+ This may impact performance, recommended for debugging -+ purposes only. -+ -+config FSL_DPAA2_ETH_DEBUGFS -+ depends on DEBUG_FS && FSL_QBMAN_DEBUG -+ bool "Enable debugfs support" -+ default n -+ ---help--- -+ Enable advanced statistics through debugfs interface. -+endif -diff --git a/drivers/staging/fsl-dpaa2/ethernet/Makefile b/drivers/staging/fsl-dpaa2/ethernet/Makefile -new file mode 100644 -index 0000000..74bff15 ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/ethernet/Makefile -@@ -0,0 +1,21 @@ -+# -+# Makefile for the Freescale DPAA Ethernet controllers -+# -+# Copyright (C) 2014-2015 Freescale Semiconductor, Inc. -+# -+# This file is released under the GPLv2 -+# -+ -+ccflags-y += -DVERSION=\"\" -+ -+obj-$(CONFIG_FSL_DPAA2_ETH) += fsl-dpaa2-eth.o -+ -+fsl-dpaa2-eth-objs := dpaa2-eth.o dpaa2-ethtool.o dpni.o -+fsl-dpaa2-eth-${CONFIG_FSL_DPAA2_ETH_DEBUGFS} += dpaa2-eth-debugfs.o -+ -+#Needed by the tracing framework -+CFLAGS_dpaa2-eth.o := -I$(src) -+ -+ifeq ($(CONFIG_FSL_DPAA2_ETH_GCOV),y) -+ GCOV_PROFILE := y -+endif -diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c -new file mode 100644 -index 0000000..c397983 ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.c -@@ -0,0 +1,317 @@ -+ -+/* Copyright 2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include -+#include -+#include "dpaa2-eth.h" -+#include "dpaa2-eth-debugfs.h" -+ -+#define DPAA2_ETH_DBG_ROOT "dpaa2-eth" -+ -+static struct dentry *dpaa2_dbg_root; -+ -+static int dpaa2_dbg_cpu_show(struct seq_file *file, void *offset) -+{ -+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private; -+ struct rtnl_link_stats64 *stats; -+ struct dpaa2_eth_drv_stats *extras; -+ int i; -+ -+ seq_printf(file, "Per-CPU stats for %s\n", priv->net_dev->name); -+ seq_printf(file, "%s%16s%16s%16s%16s%16s%16s%16s%16s\n", -+ "CPU", "Rx", "Rx Err", "Rx SG", "Tx", "Tx Err", "Tx conf", -+ "Tx SG", "Enq busy"); -+ -+ for_each_online_cpu(i) { -+ stats = per_cpu_ptr(priv->percpu_stats, i); -+ extras = per_cpu_ptr(priv->percpu_extras, i); -+ seq_printf(file, "%3d%16llu%16llu%16llu%16llu%16llu%16llu%16llu%16llu\n", -+ i, -+ stats->rx_packets, -+ stats->rx_errors, -+ extras->rx_sg_frames, -+ stats->tx_packets, -+ stats->tx_errors, -+ extras->tx_conf_frames, -+ extras->tx_sg_frames, -+ extras->tx_portal_busy); -+ } -+ -+ return 0; -+} -+ -+static int dpaa2_dbg_cpu_open(struct inode *inode, struct file *file) -+{ -+ int err; -+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private; -+ -+ err = single_open(file, dpaa2_dbg_cpu_show, priv); -+ if (err < 0) -+ netdev_err(priv->net_dev, "single_open() failed\n"); -+ -+ return err; -+} -+ -+static const struct file_operations dpaa2_dbg_cpu_ops = { -+ .open = dpaa2_dbg_cpu_open, -+ .read = seq_read, -+ .llseek = seq_lseek, -+ .release = single_release, -+}; -+ -+static char *fq_type_to_str(struct dpaa2_eth_fq *fq) -+{ -+ switch (fq->type) { -+ case DPAA2_RX_FQ: -+ return "Rx"; -+ case DPAA2_TX_CONF_FQ: -+ return "Tx conf"; -+ case DPAA2_RX_ERR_FQ: -+ return "Rx err"; -+ default: -+ return "N/A"; -+ } -+} -+ -+static int dpaa2_dbg_fqs_show(struct seq_file *file, void *offset) -+{ -+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private; -+ struct dpaa2_eth_fq *fq; -+ u32 fcnt, bcnt; -+ int i, err; -+ -+ seq_printf(file, "FQ stats for %s:\n", priv->net_dev->name); -+ seq_printf(file, "%s%16s%16s%16s%16s\n", -+ "VFQID", "CPU", "Type", "Frames", "Pending frames"); -+ -+ for (i = 0; i < priv->num_fqs; i++) { -+ fq = &priv->fq[i]; -+ err = dpaa2_io_query_fq_count(NULL, fq->fqid, &fcnt, &bcnt); -+ if (err) -+ fcnt = 0; -+ -+ seq_printf(file, "%5d%16d%16s%16llu%16u\n", -+ fq->fqid, -+ fq->target_cpu, -+ fq_type_to_str(fq), -+ fq->stats.frames, -+ fcnt); -+ } -+ -+ return 0; -+} -+ -+static int dpaa2_dbg_fqs_open(struct inode *inode, struct file *file) -+{ -+ int err; -+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private; -+ -+ err = single_open(file, dpaa2_dbg_fqs_show, priv); -+ if (err < 0) -+ netdev_err(priv->net_dev, "single_open() failed\n"); -+ -+ return err; -+} -+ -+static const struct file_operations dpaa2_dbg_fq_ops = { -+ .open = dpaa2_dbg_fqs_open, -+ .read = seq_read, -+ .llseek = seq_lseek, -+ .release = single_release, -+}; -+ -+static int dpaa2_dbg_ch_show(struct seq_file *file, void *offset) -+{ -+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)file->private; -+ struct dpaa2_eth_channel *ch; -+ int i; -+ -+ seq_printf(file, "Channel stats for %s:\n", priv->net_dev->name); -+ seq_printf(file, "%s%16s%16s%16s%16s%16s\n", -+ "CHID", "CPU", "Deq busy", "Frames", "CDANs", -+ "Avg frm/CDAN"); -+ -+ for (i = 0; i < priv->num_channels; i++) { -+ ch = priv->channel[i]; -+ seq_printf(file, "%4d%16d%16llu%16llu%16llu%16llu\n", -+ ch->ch_id, -+ ch->nctx.desired_cpu, -+ ch->stats.dequeue_portal_busy, -+ ch->stats.frames, -+ ch->stats.cdan, -+ ch->stats.frames / ch->stats.cdan); -+ } -+ -+ return 0; -+} -+ -+static int dpaa2_dbg_ch_open(struct inode *inode, struct file *file) -+{ -+ int err; -+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)inode->i_private; -+ -+ err = single_open(file, dpaa2_dbg_ch_show, priv); -+ if (err < 0) -+ netdev_err(priv->net_dev, "single_open() failed\n"); -+ -+ return err; -+} -+ -+static const struct file_operations dpaa2_dbg_ch_ops = { -+ .open = dpaa2_dbg_ch_open, -+ .read = seq_read, -+ .llseek = seq_lseek, -+ .release = single_release, -+}; -+ -+static ssize_t dpaa2_dbg_reset_write(struct file *file, const char __user *buf, -+ size_t count, loff_t *offset) -+{ -+ struct dpaa2_eth_priv *priv = file->private_data; -+ struct rtnl_link_stats64 *percpu_stats; -+ struct dpaa2_eth_drv_stats *percpu_extras; -+ struct dpaa2_eth_fq *fq; -+ struct dpaa2_eth_channel *ch; -+ int i; -+ -+ for_each_online_cpu(i) { -+ percpu_stats = per_cpu_ptr(priv->percpu_stats, i); -+ memset(percpu_stats, 0, sizeof(*percpu_stats)); -+ -+ percpu_extras = per_cpu_ptr(priv->percpu_extras, i); -+ memset(percpu_extras, 0, sizeof(*percpu_extras)); -+ } -+ -+ for (i = 0; i < priv->num_fqs; i++) { -+ fq = &priv->fq[i]; -+ memset(&fq->stats, 0, sizeof(fq->stats)); -+ } -+ -+ for_each_cpu(i, &priv->dpio_cpumask) { -+ ch = priv->channel[i]; -+ memset(&ch->stats, 0, sizeof(ch->stats)); -+ } -+ -+ return count; -+} -+ -+static const struct file_operations dpaa2_dbg_reset_ops = { -+ .open = simple_open, -+ .write = dpaa2_dbg_reset_write, -+}; -+ -+void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) -+{ -+ if (!dpaa2_dbg_root) -+ return; -+ -+ /* Create a directory for the interface */ -+ priv->dbg.dir = debugfs_create_dir(priv->net_dev->name, -+ dpaa2_dbg_root); -+ if (!priv->dbg.dir) { -+ netdev_err(priv->net_dev, "debugfs_create_dir() failed\n"); -+ return; -+ } -+ -+ /* per-cpu stats file */ -+ priv->dbg.cpu_stats = debugfs_create_file("cpu_stats", S_IRUGO, -+ priv->dbg.dir, priv, -+ &dpaa2_dbg_cpu_ops); -+ if (!priv->dbg.cpu_stats) { -+ netdev_err(priv->net_dev, "debugfs_create_file() failed\n"); -+ goto err_cpu_stats; -+ } -+ -+ /* per-fq stats file */ -+ priv->dbg.fq_stats = debugfs_create_file("fq_stats", S_IRUGO, -+ priv->dbg.dir, priv, -+ &dpaa2_dbg_fq_ops); -+ if (!priv->dbg.fq_stats) { -+ netdev_err(priv->net_dev, "debugfs_create_file() failed\n"); -+ goto err_fq_stats; -+ } -+ -+ /* per-fq stats file */ -+ priv->dbg.ch_stats = debugfs_create_file("ch_stats", S_IRUGO, -+ priv->dbg.dir, priv, -+ &dpaa2_dbg_ch_ops); -+ if (!priv->dbg.fq_stats) { -+ netdev_err(priv->net_dev, "debugfs_create_file() failed\n"); -+ goto err_ch_stats; -+ } -+ -+ /* reset stats */ -+ priv->dbg.reset_stats = debugfs_create_file("reset_stats", S_IWUSR, -+ priv->dbg.dir, priv, -+ &dpaa2_dbg_reset_ops); -+ if (!priv->dbg.reset_stats) { -+ netdev_err(priv->net_dev, "debugfs_create_file() failed\n"); -+ goto err_reset_stats; -+ } -+ -+ return; -+ -+err_reset_stats: -+ debugfs_remove(priv->dbg.ch_stats); -+err_ch_stats: -+ debugfs_remove(priv->dbg.fq_stats); -+err_fq_stats: -+ debugfs_remove(priv->dbg.cpu_stats); -+err_cpu_stats: -+ debugfs_remove(priv->dbg.dir); -+} -+ -+void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) -+{ -+ debugfs_remove(priv->dbg.reset_stats); -+ debugfs_remove(priv->dbg.fq_stats); -+ debugfs_remove(priv->dbg.ch_stats); -+ debugfs_remove(priv->dbg.cpu_stats); -+ debugfs_remove(priv->dbg.dir); -+} -+ -+void dpaa2_eth_dbg_init(void) -+{ -+ dpaa2_dbg_root = debugfs_create_dir(DPAA2_ETH_DBG_ROOT, NULL); -+ if (!dpaa2_dbg_root) { -+ pr_err("DPAA2-ETH: debugfs create failed\n"); -+ return; -+ } -+ -+ pr_info("DPAA2-ETH: debugfs created\n"); -+} -+ -+void __exit dpaa2_eth_dbg_exit(void) -+{ -+ debugfs_remove(dpaa2_dbg_root); -+} -+ -diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h -new file mode 100644 -index 0000000..7ba706c ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-debugfs.h -@@ -0,0 +1,61 @@ -+/* Copyright 2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#ifndef DPAA2_ETH_DEBUGFS_H -+#define DPAA2_ETH_DEBUGFS_H -+ -+#include -+#include "dpaa2-eth.h" -+ -+extern struct dpaa2_eth_priv *priv; -+ -+struct dpaa2_debugfs { -+ struct dentry *dir; -+ struct dentry *fq_stats; -+ struct dentry *ch_stats; -+ struct dentry *cpu_stats; -+ struct dentry *reset_stats; -+}; -+ -+#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS -+void dpaa2_eth_dbg_init(void); -+void dpaa2_eth_dbg_exit(void); -+void dpaa2_dbg_add(struct dpaa2_eth_priv *priv); -+void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv); -+#else -+static inline void dpaa2_eth_dbg_init(void) {} -+static inline void dpaa2_eth_dbg_exit(void) {} -+static inline void dpaa2_dbg_add(struct dpaa2_eth_priv *priv) {} -+static inline void dpaa2_dbg_remove(struct dpaa2_eth_priv *priv) {} -+#endif /* CONFIG_FSL_DPAA2_ETH_DEBUGFS */ -+ -+#endif /* DPAA2_ETH_DEBUGFS_H */ -+ -diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h -new file mode 100644 -index 0000000..3b040e8 ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth-trace.h -@@ -0,0 +1,185 @@ -+/* Copyright 2014-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#undef TRACE_SYSTEM -+#define TRACE_SYSTEM dpaa2_eth -+ -+#if !defined(_DPAA2_ETH_TRACE_H) || defined(TRACE_HEADER_MULTI_READ) -+#define _DPAA2_ETH_TRACE_H -+ -+#include -+#include -+#include "dpaa2-eth.h" -+#include -+ -+#define TR_FMT "[%s] fd: addr=0x%llx, len=%u, off=%u" -+/* trace_printk format for raw buffer event class */ -+#define TR_BUF_FMT "[%s] vaddr=%p size=%zu dma_addr=%pad map_size=%zu bpid=%d" -+ -+/* This is used to declare a class of events. -+ * individual events of this type will be defined below. -+ */ -+ -+/* Store details about a frame descriptor */ -+DECLARE_EVENT_CLASS(dpaa2_eth_fd, -+ /* Trace function prototype */ -+ TP_PROTO(struct net_device *netdev, -+ const struct dpaa2_fd *fd), -+ -+ /* Repeat argument list here */ -+ TP_ARGS(netdev, fd), -+ -+ /* A structure containing the relevant information we want -+ * to record. Declare name and type for each normal element, -+ * name, type and size for arrays. Use __string for variable -+ * length strings. -+ */ -+ TP_STRUCT__entry( -+ __field(u64, fd_addr) -+ __field(u32, fd_len) -+ __field(u16, fd_offset) -+ __string(name, netdev->name) -+ ), -+ -+ /* The function that assigns values to the above declared -+ * fields -+ */ -+ TP_fast_assign( -+ __entry->fd_addr = dpaa2_fd_get_addr(fd); -+ __entry->fd_len = dpaa2_fd_get_len(fd); -+ __entry->fd_offset = dpaa2_fd_get_offset(fd); -+ __assign_str(name, netdev->name); -+ ), -+ -+ /* This is what gets printed when the trace event is -+ * triggered. -+ */ -+ TP_printk(TR_FMT, -+ __get_str(name), -+ __entry->fd_addr, -+ __entry->fd_len, -+ __entry->fd_offset) -+); -+ -+/* Now declare events of the above type. Format is: -+ * DEFINE_EVENT(class, name, proto, args), with proto and args same as for class -+ */ -+ -+/* Tx (egress) fd */ -+DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_fd, -+ TP_PROTO(struct net_device *netdev, -+ const struct dpaa2_fd *fd), -+ -+ TP_ARGS(netdev, fd) -+); -+ -+/* Rx fd */ -+DEFINE_EVENT(dpaa2_eth_fd, dpaa2_rx_fd, -+ TP_PROTO(struct net_device *netdev, -+ const struct dpaa2_fd *fd), -+ -+ TP_ARGS(netdev, fd) -+); -+ -+/* Tx confirmation fd */ -+DEFINE_EVENT(dpaa2_eth_fd, dpaa2_tx_conf_fd, -+ TP_PROTO(struct net_device *netdev, -+ const struct dpaa2_fd *fd), -+ -+ TP_ARGS(netdev, fd) -+); -+ -+/* Log data about raw buffers. Useful for tracing DPBP content. */ -+TRACE_EVENT(dpaa2_eth_buf_seed, -+ /* Trace function prototype */ -+ TP_PROTO(struct net_device *netdev, -+ /* virtual address and size */ -+ void *vaddr, -+ size_t size, -+ /* dma map address and size */ -+ dma_addr_t dma_addr, -+ size_t map_size, -+ /* buffer pool id, if relevant */ -+ u16 bpid), -+ -+ /* Repeat argument list here */ -+ TP_ARGS(netdev, vaddr, size, dma_addr, map_size, bpid), -+ -+ /* A structure containing the relevant information we want -+ * to record. Declare name and type for each normal element, -+ * name, type and size for arrays. Use __string for variable -+ * length strings. -+ */ -+ TP_STRUCT__entry( -+ __field(void *, vaddr) -+ __field(size_t, size) -+ __field(dma_addr_t, dma_addr) -+ __field(size_t, map_size) -+ __field(u16, bpid) -+ __string(name, netdev->name) -+ ), -+ -+ /* The function that assigns values to the above declared -+ * fields -+ */ -+ TP_fast_assign( -+ __entry->vaddr = vaddr; -+ __entry->size = size; -+ __entry->dma_addr = dma_addr; -+ __entry->map_size = map_size; -+ __entry->bpid = bpid; -+ __assign_str(name, netdev->name); -+ ), -+ -+ /* This is what gets printed when the trace event is -+ * triggered. -+ */ -+ TP_printk(TR_BUF_FMT, -+ __get_str(name), -+ __entry->vaddr, -+ __entry->size, -+ &__entry->dma_addr, -+ __entry->map_size, -+ __entry->bpid) -+); -+ -+/* If only one event of a certain type needs to be declared, use TRACE_EVENT(). -+ * The syntax is the same as for DECLARE_EVENT_CLASS(). -+ */ -+ -+#endif /* _DPAA2_ETH_TRACE_H */ -+ -+/* This must be outside ifdef _DPAA2_ETH_TRACE_H */ -+#undef TRACE_INCLUDE_PATH -+#define TRACE_INCLUDE_PATH . -+#undef TRACE_INCLUDE_FILE -+#define TRACE_INCLUDE_FILE dpaa2-eth-trace -+#include -diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c -new file mode 100644 -index 0000000..27d1a91 ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c -@@ -0,0 +1,2836 @@ -+/* Copyright 2014-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "../../fsl-mc/include/mc.h" -+#include "../../fsl-mc/include/mc-sys.h" -+#include "dpaa2-eth.h" -+ -+/* CREATE_TRACE_POINTS only needs to be defined once. Other dpa files -+ * using trace events only need to #include -+ */ -+#define CREATE_TRACE_POINTS -+#include "dpaa2-eth-trace.h" -+ -+MODULE_LICENSE("Dual BSD/GPL"); -+MODULE_AUTHOR("Freescale Semiconductor, Inc"); -+MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver"); -+ -+/* Oldest DPAA2 objects version we are compatible with */ -+#define DPAA2_SUPPORTED_DPNI_VERSION 6 -+#define DPAA2_SUPPORTED_DPBP_VERSION 2 -+#define DPAA2_SUPPORTED_DPCON_VERSION 2 -+ -+static void validate_rx_csum(struct dpaa2_eth_priv *priv, -+ u32 fd_status, -+ struct sk_buff *skb) -+{ -+ skb_checksum_none_assert(skb); -+ -+ /* HW checksum validation is disabled, nothing to do here */ -+ if (!(priv->net_dev->features & NETIF_F_RXCSUM)) -+ return; -+ -+ /* Read checksum validation bits */ -+ if (!((fd_status & DPAA2_FAS_L3CV) && -+ (fd_status & DPAA2_FAS_L4CV))) -+ return; -+ -+ /* Inform the stack there's no need to compute L3/L4 csum anymore */ -+ skb->ip_summed = CHECKSUM_UNNECESSARY; -+} -+ -+/* Free a received FD. -+ * Not to be used for Tx conf FDs or on any other paths. -+ */ -+static void free_rx_fd(struct dpaa2_eth_priv *priv, -+ const struct dpaa2_fd *fd, -+ void *vaddr) -+{ -+ struct device *dev = priv->net_dev->dev.parent; -+ dma_addr_t addr = dpaa2_fd_get_addr(fd); -+ u8 fd_format = dpaa2_fd_get_format(fd); -+ struct dpaa2_sg_entry *sgt; -+ void *sg_vaddr; -+ int i; -+ -+ /* If single buffer frame, just free the data buffer */ -+ if (fd_format == dpaa2_fd_single) -+ goto free_buf; -+ -+ /* For S/G frames, we first need to free all SG entries */ -+ sgt = vaddr + dpaa2_fd_get_offset(fd); -+ for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { -+ dpaa2_sg_le_to_cpu(&sgt[i]); -+ -+ addr = dpaa2_sg_get_addr(&sgt[i]); -+ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, -+ DMA_FROM_DEVICE); -+ -+ sg_vaddr = phys_to_virt(addr); -+ put_page(virt_to_head_page(sg_vaddr)); -+ -+ if (dpaa2_sg_is_final(&sgt[i])) -+ break; -+ } -+ -+free_buf: -+ put_page(virt_to_head_page(vaddr)); -+} -+ -+/* Build a linear skb based on a single-buffer frame descriptor */ -+static struct sk_buff *build_linear_skb(struct dpaa2_eth_priv *priv, -+ struct dpaa2_eth_channel *ch, -+ const struct dpaa2_fd *fd, -+ void *fd_vaddr) -+{ -+ struct sk_buff *skb = NULL; -+ u16 fd_offset = dpaa2_fd_get_offset(fd); -+ u32 fd_length = dpaa2_fd_get_len(fd); -+ -+ skb = build_skb(fd_vaddr, DPAA2_ETH_RX_BUF_SIZE + -+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); -+ if (unlikely(!skb)) -+ return NULL; -+ -+ skb_reserve(skb, fd_offset); -+ skb_put(skb, fd_length); -+ -+ ch->buf_count--; -+ -+ return skb; -+} -+ -+/* Build a non linear (fragmented) skb based on a S/G table */ -+static struct sk_buff *build_frag_skb(struct dpaa2_eth_priv *priv, -+ struct dpaa2_eth_channel *ch, -+ struct dpaa2_sg_entry *sgt) -+{ -+ struct sk_buff *skb = NULL; -+ struct device *dev = priv->net_dev->dev.parent; -+ void *sg_vaddr; -+ dma_addr_t sg_addr; -+ u16 sg_offset; -+ u32 sg_length; -+ struct page *page, *head_page; -+ int page_offset; -+ int i; -+ -+ for (i = 0; i < DPAA2_ETH_MAX_SG_ENTRIES; i++) { -+ struct dpaa2_sg_entry *sge = &sgt[i]; -+ -+ dpaa2_sg_le_to_cpu(sge); -+ -+ /* NOTE: We only support SG entries in dpaa2_sg_single format, -+ * but this is the only format we may receive from HW anyway -+ */ -+ -+ /* Get the address and length from the S/G entry */ -+ sg_addr = dpaa2_sg_get_addr(sge); -+ dma_unmap_single(dev, sg_addr, DPAA2_ETH_RX_BUF_SIZE, -+ DMA_FROM_DEVICE); -+ -+ sg_vaddr = phys_to_virt(sg_addr); -+ sg_length = dpaa2_sg_get_len(sge); -+ -+ if (i == 0) { -+ /* We build the skb around the first data buffer */ -+ skb = build_skb(sg_vaddr, DPAA2_ETH_RX_BUF_SIZE + -+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))); -+ if (unlikely(!skb)) -+ return NULL; -+ -+ sg_offset = dpaa2_sg_get_offset(sge); -+ skb_reserve(skb, sg_offset); -+ skb_put(skb, sg_length); -+ } else { -+ /* Rest of the data buffers are stored as skb frags */ -+ page = virt_to_page(sg_vaddr); -+ head_page = virt_to_head_page(sg_vaddr); -+ -+ /* Offset in page (which may be compound). -+ * Data in subsequent SG entries is stored from the -+ * beginning of the buffer, so we don't need to add the -+ * sg_offset. -+ */ -+ page_offset = ((unsigned long)sg_vaddr & -+ (PAGE_SIZE - 1)) + -+ (page_address(page) - page_address(head_page)); -+ -+ skb_add_rx_frag(skb, i - 1, head_page, page_offset, -+ sg_length, DPAA2_ETH_RX_BUF_SIZE); -+ } -+ -+ if (dpaa2_sg_is_final(sge)) -+ break; -+ } -+ -+ /* Count all data buffers + SG table buffer */ -+ ch->buf_count -= i + 2; -+ -+ return skb; -+} -+ -+/* Main Rx frame processing routine */ -+static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, -+ struct dpaa2_eth_channel *ch, -+ const struct dpaa2_fd *fd, -+ struct napi_struct *napi) -+{ -+ dma_addr_t addr = dpaa2_fd_get_addr(fd); -+ u8 fd_format = dpaa2_fd_get_format(fd); -+ void *vaddr; -+ struct sk_buff *skb; -+ struct rtnl_link_stats64 *percpu_stats; -+ struct dpaa2_eth_drv_stats *percpu_extras; -+ struct device *dev = priv->net_dev->dev.parent; -+ struct dpaa2_fas *fas; -+ u32 status = 0; -+ -+ /* Tracing point */ -+ trace_dpaa2_rx_fd(priv->net_dev, fd); -+ -+ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE); -+ vaddr = phys_to_virt(addr); -+ -+ prefetch(vaddr + priv->buf_layout.private_data_size); -+ prefetch(vaddr + dpaa2_fd_get_offset(fd)); -+ -+ percpu_stats = this_cpu_ptr(priv->percpu_stats); -+ percpu_extras = this_cpu_ptr(priv->percpu_extras); -+ -+ if (fd_format == dpaa2_fd_single) { -+ skb = build_linear_skb(priv, ch, fd, vaddr); -+ } else if (fd_format == dpaa2_fd_sg) { -+ struct dpaa2_sg_entry *sgt = -+ vaddr + dpaa2_fd_get_offset(fd); -+ skb = build_frag_skb(priv, ch, sgt); -+ put_page(virt_to_head_page(vaddr)); -+ percpu_extras->rx_sg_frames++; -+ percpu_extras->rx_sg_bytes += dpaa2_fd_get_len(fd); -+ } else { -+ /* We don't support any other format */ -+ goto err_frame_format; -+ } -+ -+ if (unlikely(!skb)) -+ goto err_build_skb; -+ -+ prefetch(skb->data); -+ -+ /* Get the timestamp value */ -+ if (priv->ts_rx_en) { -+ struct skb_shared_hwtstamps *shhwtstamps = skb_hwtstamps(skb); -+ u64 *ns = (u64 *)(vaddr + -+ priv->buf_layout.private_data_size + -+ sizeof(struct dpaa2_fas)); -+ -+ *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * (*ns); -+ memset(shhwtstamps, 0, sizeof(*shhwtstamps)); -+ shhwtstamps->hwtstamp = ns_to_ktime(*ns); -+ } -+ -+ /* Check if we need to validate the L4 csum */ -+ if (likely(fd->simple.frc & DPAA2_FD_FRC_FASV)) { -+ fas = (struct dpaa2_fas *) -+ (vaddr + priv->buf_layout.private_data_size); -+ status = le32_to_cpu(fas->status); -+ validate_rx_csum(priv, status, skb); -+ } -+ -+ skb->protocol = eth_type_trans(skb, priv->net_dev); -+ -+ percpu_stats->rx_packets++; -+ percpu_stats->rx_bytes += skb->len; -+ -+ if (priv->net_dev->features & NETIF_F_GRO) -+ napi_gro_receive(napi, skb); -+ else -+ netif_receive_skb(skb); -+ -+ return; -+err_frame_format: -+err_build_skb: -+ free_rx_fd(priv, fd, vaddr); -+ percpu_stats->rx_dropped++; -+} -+ -+#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE -+/* Processing of Rx frames received on the error FQ -+ * We check and print the error bits and then free the frame -+ */ -+static void dpaa2_eth_rx_err(struct dpaa2_eth_priv *priv, -+ struct dpaa2_eth_channel *ch, -+ const struct dpaa2_fd *fd, -+ struct napi_struct *napi __always_unused) -+{ -+ struct device *dev = priv->net_dev->dev.parent; -+ dma_addr_t addr = dpaa2_fd_get_addr(fd); -+ void *vaddr; -+ struct rtnl_link_stats64 *percpu_stats; -+ struct dpaa2_fas *fas; -+ u32 status = 0; -+ -+ dma_unmap_single(dev, addr, DPAA2_ETH_RX_BUF_SIZE, DMA_FROM_DEVICE); -+ vaddr = phys_to_virt(addr); -+ -+ if (fd->simple.frc & DPAA2_FD_FRC_FASV) { -+ fas = (struct dpaa2_fas *) -+ (vaddr + priv->buf_layout.private_data_size); -+ status = le32_to_cpu(fas->status); -+ if (net_ratelimit()) -+ netdev_warn(priv->net_dev, "Rx frame error: 0x%08x\n", -+ status & DPAA2_ETH_RX_ERR_MASK); -+ } -+ free_rx_fd(priv, fd, vaddr); -+ -+ percpu_stats = this_cpu_ptr(priv->percpu_stats); -+ percpu_stats->rx_errors++; -+} -+#endif -+ -+/* Consume all frames pull-dequeued into the store. This is the simplest way to -+ * make sure we don't accidentally issue another volatile dequeue which would -+ * overwrite (leak) frames already in the store. -+ * -+ * Observance of NAPI budget is not our concern, leaving that to the caller. -+ */ -+static int consume_frames(struct dpaa2_eth_channel *ch) -+{ -+ struct dpaa2_eth_priv *priv = ch->priv; -+ struct dpaa2_eth_fq *fq; -+ struct dpaa2_dq *dq; -+ const struct dpaa2_fd *fd; -+ int cleaned = 0; -+ int is_last; -+ -+ do { -+ dq = dpaa2_io_store_next(ch->store, &is_last); -+ if (unlikely(!dq)) { -+ /* If we're here, we *must* have placed a -+ * volatile dequeue comnmand, so keep reading through -+ * the store until we get some sort of valid response -+ * token (either a valid frame or an "empty dequeue") -+ */ -+ continue; -+ } -+ -+ fd = dpaa2_dq_fd(dq); -+ fq = (struct dpaa2_eth_fq *)dpaa2_dq_fqd_ctx(dq); -+ fq->stats.frames++; -+ -+ fq->consume(priv, ch, fd, &ch->napi); -+ cleaned++; -+ } while (!is_last); -+ -+ return cleaned; -+} -+ -+/* Create a frame descriptor based on a fragmented skb */ -+static int build_sg_fd(struct dpaa2_eth_priv *priv, -+ struct sk_buff *skb, -+ struct dpaa2_fd *fd) -+{ -+ struct device *dev = priv->net_dev->dev.parent; -+ void *sgt_buf = NULL; -+ dma_addr_t addr; -+ int nr_frags = skb_shinfo(skb)->nr_frags; -+ struct dpaa2_sg_entry *sgt; -+ int i, j, err; -+ int sgt_buf_size; -+ struct scatterlist *scl, *crt_scl; -+ int num_sg; -+ int num_dma_bufs; -+ struct dpaa2_eth_swa *swa; -+ -+ /* Create and map scatterlist. -+ * We don't advertise NETIF_F_FRAGLIST, so skb_to_sgvec() will not have -+ * to go beyond nr_frags+1. -+ * Note: We don't support chained scatterlists -+ */ -+ if (unlikely(PAGE_SIZE / sizeof(struct scatterlist) < nr_frags + 1)) -+ return -EINVAL; -+ -+ scl = kcalloc(nr_frags + 1, sizeof(struct scatterlist), GFP_ATOMIC); -+ if (unlikely(!scl)) -+ return -ENOMEM; -+ -+ sg_init_table(scl, nr_frags + 1); -+ num_sg = skb_to_sgvec(skb, scl, 0, skb->len); -+ num_dma_bufs = dma_map_sg(dev, scl, num_sg, DMA_TO_DEVICE); -+ if (unlikely(!num_dma_bufs)) { -+ err = -ENOMEM; -+ goto dma_map_sg_failed; -+ } -+ -+ /* Prepare the HW SGT structure */ -+ sgt_buf_size = priv->tx_data_offset + -+ sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs); -+ sgt_buf = kzalloc(sgt_buf_size + DPAA2_ETH_TX_BUF_ALIGN, GFP_ATOMIC); -+ if (unlikely(!sgt_buf)) { -+ err = -ENOMEM; -+ goto sgt_buf_alloc_failed; -+ } -+ sgt_buf = PTR_ALIGN(sgt_buf, DPAA2_ETH_TX_BUF_ALIGN); -+ -+ /* PTA from egress side is passed as is to the confirmation side so -+ * we need to clear some fields here in order to find consistent values -+ * on TX confirmation. We are clearing FAS (Frame Annotation Status) -+ * field here. -+ */ -+ memset(sgt_buf + priv->buf_layout.private_data_size, 0, 8); -+ -+ sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset); -+ -+ /* Fill in the HW SGT structure. -+ * -+ * sgt_buf is zeroed out, so the following fields are implicit -+ * in all sgt entries: -+ * - offset is 0 -+ * - format is 'dpaa2_sg_single' -+ */ -+ for_each_sg(scl, crt_scl, num_dma_bufs, i) { -+ dpaa2_sg_set_addr(&sgt[i], sg_dma_address(crt_scl)); -+ dpaa2_sg_set_len(&sgt[i], sg_dma_len(crt_scl)); -+ } -+ dpaa2_sg_set_final(&sgt[i - 1], true); -+ -+ /* Store the skb backpointer in the SGT buffer. -+ * Fit the scatterlist and the number of buffers alongside the -+ * skb backpointer in the SWA. We'll need all of them on Tx Conf. -+ */ -+ swa = (struct dpaa2_eth_swa *)sgt_buf; -+ swa->skb = skb; -+ swa->scl = scl; -+ swa->num_sg = num_sg; -+ swa->num_dma_bufs = num_dma_bufs; -+ -+ /* Hardware expects the SG table to be in little endian format */ -+ for (j = 0; j < i; j++) -+ dpaa2_sg_cpu_to_le(&sgt[j]); -+ -+ /* Separately map the SGT buffer */ -+ addr = dma_map_single(dev, sgt_buf, sgt_buf_size, DMA_TO_DEVICE); -+ if (unlikely(dma_mapping_error(dev, addr))) { -+ err = -ENOMEM; -+ goto dma_map_single_failed; -+ } -+ dpaa2_fd_set_offset(fd, priv->tx_data_offset); -+ dpaa2_fd_set_format(fd, dpaa2_fd_sg); -+ dpaa2_fd_set_addr(fd, addr); -+ dpaa2_fd_set_len(fd, skb->len); -+ -+ fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA | -+ DPAA2_FD_CTRL_PTV1; -+ -+ return 0; -+ -+dma_map_single_failed: -+ kfree(sgt_buf); -+sgt_buf_alloc_failed: -+ dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE); -+dma_map_sg_failed: -+ kfree(scl); -+ return err; -+} -+ -+/* Create a frame descriptor based on a linear skb */ -+static int build_single_fd(struct dpaa2_eth_priv *priv, -+ struct sk_buff *skb, -+ struct dpaa2_fd *fd) -+{ -+ struct device *dev = priv->net_dev->dev.parent; -+ u8 *buffer_start; -+ struct sk_buff **skbh; -+ dma_addr_t addr; -+ -+ buffer_start = PTR_ALIGN(skb->data - priv->tx_data_offset - -+ DPAA2_ETH_TX_BUF_ALIGN, -+ DPAA2_ETH_TX_BUF_ALIGN); -+ -+ /* PTA from egress side is passed as is to the confirmation side so -+ * we need to clear some fields here in order to find consistent values -+ * on TX confirmation. We are clearing FAS (Frame Annotation Status) -+ * field here. -+ */ -+ memset(buffer_start + priv->buf_layout.private_data_size, 0, 8); -+ -+ /* Store a backpointer to the skb at the beginning of the buffer -+ * (in the private data area) such that we can release it -+ * on Tx confirm -+ */ -+ skbh = (struct sk_buff **)buffer_start; -+ *skbh = skb; -+ -+ addr = dma_map_single(dev, buffer_start, -+ skb_tail_pointer(skb) - buffer_start, -+ DMA_TO_DEVICE); -+ if (unlikely(dma_mapping_error(dev, addr))) -+ return -ENOMEM; -+ -+ dpaa2_fd_set_addr(fd, addr); -+ dpaa2_fd_set_offset(fd, (u16)(skb->data - buffer_start)); -+ dpaa2_fd_set_len(fd, skb->len); -+ dpaa2_fd_set_format(fd, dpaa2_fd_single); -+ -+ fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA | -+ DPAA2_FD_CTRL_PTV1; -+ -+ return 0; -+} -+ -+/* FD freeing routine on the Tx path -+ * -+ * DMA-unmap and free FD and possibly SGT buffer allocated on Tx. The skb -+ * back-pointed to is also freed. -+ * This can be called either from dpaa2_eth_tx_conf() or on the error path of -+ * dpaa2_eth_tx(). -+ * Optionally, return the frame annotation status word (FAS), which needs -+ * to be checked if we're on the confirmation path. -+ */ -+static void free_tx_fd(const struct dpaa2_eth_priv *priv, -+ const struct dpaa2_fd *fd, -+ u32 *status) -+{ -+ struct device *dev = priv->net_dev->dev.parent; -+ dma_addr_t fd_addr; -+ struct sk_buff **skbh, *skb; -+ unsigned char *buffer_start; -+ int unmap_size; -+ struct scatterlist *scl; -+ int num_sg, num_dma_bufs; -+ struct dpaa2_eth_swa *swa; -+ bool fd_single; -+ struct dpaa2_fas *fas; -+ -+ fd_addr = dpaa2_fd_get_addr(fd); -+ skbh = phys_to_virt(fd_addr); -+ fd_single = (dpaa2_fd_get_format(fd) == dpaa2_fd_single); -+ -+ if (fd_single) { -+ skb = *skbh; -+ buffer_start = (unsigned char *)skbh; -+ /* Accessing the skb buffer is safe before dma unmap, because -+ * we didn't map the actual skb shell. -+ */ -+ dma_unmap_single(dev, fd_addr, -+ skb_tail_pointer(skb) - buffer_start, -+ DMA_TO_DEVICE); -+ } else { -+ swa = (struct dpaa2_eth_swa *)skbh; -+ skb = swa->skb; -+ scl = swa->scl; -+ num_sg = swa->num_sg; -+ num_dma_bufs = swa->num_dma_bufs; -+ -+ /* Unmap the scatterlist */ -+ dma_unmap_sg(dev, scl, num_sg, DMA_TO_DEVICE); -+ kfree(scl); -+ -+ /* Unmap the SGT buffer */ -+ unmap_size = priv->tx_data_offset + -+ sizeof(struct dpaa2_sg_entry) * (1 + num_dma_bufs); -+ dma_unmap_single(dev, fd_addr, unmap_size, DMA_TO_DEVICE); -+ } -+ -+ /* Get the timestamp value */ -+ if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) { -+ struct skb_shared_hwtstamps shhwtstamps; -+ u64 *ns; -+ -+ memset(&shhwtstamps, 0, sizeof(shhwtstamps)); -+ -+ ns = (u64 *)((void *)skbh + -+ priv->buf_layout.private_data_size + -+ sizeof(struct dpaa2_fas)); -+ *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * (*ns); -+ shhwtstamps.hwtstamp = ns_to_ktime(*ns); -+ skb_tstamp_tx(skb, &shhwtstamps); -+ } -+ -+ /* Read the status from the Frame Annotation after we unmap the first -+ * buffer but before we free it. The caller function is responsible -+ * for checking the status value. -+ */ -+ if (status && (fd->simple.frc & DPAA2_FD_FRC_FASV)) { -+ fas = (struct dpaa2_fas *) -+ ((void *)skbh + priv->buf_layout.private_data_size); -+ *status = le32_to_cpu(fas->status); -+ } -+ -+ /* Free SGT buffer kmalloc'ed on tx */ -+ if (!fd_single) -+ kfree(skbh); -+ -+ /* Move on with skb release */ -+ dev_kfree_skb(skb); -+} -+ -+static int dpaa2_eth_tx(struct sk_buff *skb, struct net_device *net_dev) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ struct dpaa2_fd fd; -+ struct rtnl_link_stats64 *percpu_stats; -+ struct dpaa2_eth_drv_stats *percpu_extras; -+ u16 queue_mapping, flow_id; -+ int err, i; -+ -+ percpu_stats = this_cpu_ptr(priv->percpu_stats); -+ percpu_extras = this_cpu_ptr(priv->percpu_extras); -+ -+ if (unlikely(skb_headroom(skb) < DPAA2_ETH_NEEDED_HEADROOM(priv))) { -+ struct sk_buff *ns; -+ -+ ns = skb_realloc_headroom(skb, DPAA2_ETH_NEEDED_HEADROOM(priv)); -+ if (unlikely(!ns)) { -+ percpu_stats->tx_dropped++; -+ goto err_alloc_headroom; -+ } -+ dev_kfree_skb(skb); -+ skb = ns; -+ } -+ -+ /* We'll be holding a back-reference to the skb until Tx Confirmation; -+ * we don't want that overwritten by a concurrent Tx with a cloned skb. -+ */ -+ skb = skb_unshare(skb, GFP_ATOMIC); -+ if (unlikely(!skb)) { -+ /* skb_unshare() has already freed the skb */ -+ percpu_stats->tx_dropped++; -+ return NETDEV_TX_OK; -+ } -+ -+ /* Setup the FD fields */ -+ memset(&fd, 0, sizeof(fd)); -+ -+ if (skb_is_nonlinear(skb)) { -+ err = build_sg_fd(priv, skb, &fd); -+ percpu_extras->tx_sg_frames++; -+ percpu_extras->tx_sg_bytes += skb->len; -+ } else { -+ err = build_single_fd(priv, skb, &fd); -+ } -+ -+ if (unlikely(err)) { -+ percpu_stats->tx_dropped++; -+ goto err_build_fd; -+ } -+ -+ /* Tracing point */ -+ trace_dpaa2_tx_fd(net_dev, &fd); -+ -+ /* TxConf FQ selection primarily based on cpu affinity; this is -+ * non-migratable context, so it's safe to call smp_processor_id(). -+ */ -+ queue_mapping = smp_processor_id() % priv->dpni_attrs.max_senders; -+ flow_id = priv->fq[queue_mapping].flowid; -+ for (i = 0; i < (DPAA2_ETH_MAX_TX_QUEUES << 1); i++) { -+ err = dpaa2_io_service_enqueue_qd(NULL, priv->tx_qdid, 0, -+ flow_id, &fd); -+ if (err != -EBUSY) -+ break; -+ } -+ percpu_extras->tx_portal_busy += i; -+ if (unlikely(err < 0)) { -+ percpu_stats->tx_errors++; -+ /* Clean up everything, including freeing the skb */ -+ free_tx_fd(priv, &fd, NULL); -+ } else { -+ percpu_stats->tx_packets++; -+ percpu_stats->tx_bytes += skb->len; -+ } -+ -+ return NETDEV_TX_OK; -+ -+err_build_fd: -+err_alloc_headroom: -+ dev_kfree_skb(skb); -+ -+ return NETDEV_TX_OK; -+} -+ -+/* Tx confirmation frame processing routine */ -+static void dpaa2_eth_tx_conf(struct dpaa2_eth_priv *priv, -+ struct dpaa2_eth_channel *ch, -+ const struct dpaa2_fd *fd, -+ struct napi_struct *napi __always_unused) -+{ -+ struct rtnl_link_stats64 *percpu_stats; -+ struct dpaa2_eth_drv_stats *percpu_extras; -+ u32 status = 0; -+ -+ /* Tracing point */ -+ trace_dpaa2_tx_conf_fd(priv->net_dev, fd); -+ -+ percpu_extras = this_cpu_ptr(priv->percpu_extras); -+ percpu_extras->tx_conf_frames++; -+ percpu_extras->tx_conf_bytes += dpaa2_fd_get_len(fd); -+ -+ free_tx_fd(priv, fd, &status); -+ -+ if (unlikely(status & DPAA2_ETH_TXCONF_ERR_MASK)) { -+ percpu_stats = this_cpu_ptr(priv->percpu_stats); -+ /* Tx-conf logically pertains to the egress path. */ -+ percpu_stats->tx_errors++; -+ } -+} -+ -+static int set_rx_csum(struct dpaa2_eth_priv *priv, bool enable) -+{ -+ int err; -+ -+ err = dpni_set_l3_chksum_validation(priv->mc_io, 0, priv->mc_token, -+ enable); -+ if (err) { -+ netdev_err(priv->net_dev, -+ "dpni_set_l3_chksum_validation() failed\n"); -+ return err; -+ } -+ -+ err = dpni_set_l4_chksum_validation(priv->mc_io, 0, priv->mc_token, -+ enable); -+ if (err) { -+ netdev_err(priv->net_dev, -+ "dpni_set_l4_chksum_validation failed\n"); -+ return err; -+ } -+ -+ return 0; -+} -+ -+static int set_tx_csum(struct dpaa2_eth_priv *priv, bool enable) -+{ -+ struct dpaa2_eth_fq *fq; -+ struct dpni_tx_flow_cfg tx_flow_cfg; -+ int err; -+ int i; -+ -+ memset(&tx_flow_cfg, 0, sizeof(tx_flow_cfg)); -+ tx_flow_cfg.options = DPNI_TX_FLOW_OPT_L3_CHKSUM_GEN | -+ DPNI_TX_FLOW_OPT_L4_CHKSUM_GEN; -+ tx_flow_cfg.l3_chksum_gen = enable; -+ tx_flow_cfg.l4_chksum_gen = enable; -+ -+ for (i = 0; i < priv->num_fqs; i++) { -+ fq = &priv->fq[i]; -+ if (fq->type != DPAA2_TX_CONF_FQ) -+ continue; -+ -+ /* The Tx flowid is kept in the corresponding TxConf FQ. */ -+ err = dpni_set_tx_flow(priv->mc_io, 0, priv->mc_token, -+ &fq->flowid, &tx_flow_cfg); -+ if (err) { -+ netdev_err(priv->net_dev, "dpni_set_tx_flow failed\n"); -+ return err; -+ } -+ } -+ -+ return 0; -+} -+ -+/* Perform a single release command to add buffers -+ * to the specified buffer pool -+ */ -+static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid) -+{ -+ struct device *dev = priv->net_dev->dev.parent; -+ u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; -+ void *buf; -+ dma_addr_t addr; -+ int i; -+ -+ for (i = 0; i < DPAA2_ETH_BUFS_PER_CMD; i++) { -+ /* Allocate buffer visible to WRIOP + skb shared info + -+ * alignment padding -+ */ -+ buf = napi_alloc_frag(DPAA2_ETH_BUF_RAW_SIZE); -+ if (unlikely(!buf)) -+ goto err_alloc; -+ -+ buf = PTR_ALIGN(buf, DPAA2_ETH_RX_BUF_ALIGN); -+ -+ addr = dma_map_single(dev, buf, DPAA2_ETH_RX_BUF_SIZE, -+ DMA_FROM_DEVICE); -+ if (unlikely(dma_mapping_error(dev, addr))) -+ goto err_map; -+ -+ buf_array[i] = addr; -+ -+ /* tracing point */ -+ trace_dpaa2_eth_buf_seed(priv->net_dev, -+ buf, DPAA2_ETH_BUF_RAW_SIZE, -+ addr, DPAA2_ETH_RX_BUF_SIZE, -+ bpid); -+ } -+ -+release_bufs: -+ /* In case the portal is busy, retry until successful. -+ * The buffer release function would only fail if the QBMan portal -+ * was busy, which implies portal contention (i.e. more CPUs than -+ * portals, i.e. GPPs w/o affine DPIOs). For all practical purposes, -+ * there is little we can realistically do, short of giving up - -+ * in which case we'd risk depleting the buffer pool and never again -+ * receiving the Rx interrupt which would kick-start the refill logic. -+ * So just keep retrying, at the risk of being moved to ksoftirqd. -+ */ -+ while (dpaa2_io_service_release(NULL, bpid, buf_array, i)) -+ cpu_relax(); -+ return i; -+ -+err_map: -+ put_page(virt_to_head_page(buf)); -+err_alloc: -+ if (i) -+ goto release_bufs; -+ -+ return 0; -+} -+ -+static int seed_pool(struct dpaa2_eth_priv *priv, u16 bpid) -+{ -+ int i, j; -+ int new_count; -+ -+ /* This is the lazy seeding of Rx buffer pools. -+ * dpaa2_add_bufs() is also used on the Rx hotpath and calls -+ * napi_alloc_frag(). The trouble with that is that it in turn ends up -+ * calling this_cpu_ptr(), which mandates execution in atomic context. -+ * Rather than splitting up the code, do a one-off preempt disable. -+ */ -+ preempt_disable(); -+ for (j = 0; j < priv->num_channels; j++) { -+ for (i = 0; i < DPAA2_ETH_NUM_BUFS; -+ i += DPAA2_ETH_BUFS_PER_CMD) { -+ new_count = add_bufs(priv, bpid); -+ priv->channel[j]->buf_count += new_count; -+ -+ if (new_count < DPAA2_ETH_BUFS_PER_CMD) { -+ preempt_enable(); -+ return -ENOMEM; -+ } -+ } -+ } -+ preempt_enable(); -+ -+ return 0; -+} -+ -+/** -+ * Drain the specified number of buffers from the DPNI's private buffer pool. -+ * @count must not exceeed DPAA2_ETH_BUFS_PER_CMD -+ */ -+static void drain_bufs(struct dpaa2_eth_priv *priv, int count) -+{ -+ struct device *dev = priv->net_dev->dev.parent; -+ u64 buf_array[DPAA2_ETH_BUFS_PER_CMD]; -+ void *vaddr; -+ int ret, i; -+ -+ do { -+ ret = dpaa2_io_service_acquire(NULL, priv->dpbp_attrs.bpid, -+ buf_array, count); -+ if (ret < 0) { -+ netdev_err(priv->net_dev, "dpaa2_io_service_acquire() failed\n"); -+ return; -+ } -+ for (i = 0; i < ret; i++) { -+ /* Same logic as on regular Rx path */ -+ dma_unmap_single(dev, buf_array[i], -+ DPAA2_ETH_RX_BUF_SIZE, -+ DMA_FROM_DEVICE); -+ vaddr = phys_to_virt(buf_array[i]); -+ put_page(virt_to_head_page(vaddr)); -+ } -+ } while (ret); -+} -+ -+static void drain_pool(struct dpaa2_eth_priv *priv) -+{ -+ int i; -+ -+ drain_bufs(priv, DPAA2_ETH_BUFS_PER_CMD); -+ drain_bufs(priv, 1); -+ -+ for (i = 0; i < priv->num_channels; i++) -+ priv->channel[i]->buf_count = 0; -+} -+ -+/* Function is called from softirq context only, so we don't need to guard -+ * the access to percpu count -+ */ -+static int refill_pool(struct dpaa2_eth_priv *priv, -+ struct dpaa2_eth_channel *ch, -+ u16 bpid) -+{ -+ int new_count; -+ -+ if (likely(ch->buf_count >= DPAA2_ETH_REFILL_THRESH)) -+ return 0; -+ -+ do { -+ new_count = add_bufs(priv, bpid); -+ if (unlikely(!new_count)) { -+ /* Out of memory; abort for now, we'll try later on */ -+ break; -+ } -+ ch->buf_count += new_count; -+ } while (ch->buf_count < DPAA2_ETH_NUM_BUFS); -+ -+ if (unlikely(ch->buf_count < DPAA2_ETH_NUM_BUFS)) -+ return -ENOMEM; -+ -+ return 0; -+} -+ -+static int pull_channel(struct dpaa2_eth_channel *ch) -+{ -+ int err; -+ int dequeues = -1; -+ -+ /* Retry while portal is busy */ -+ do { -+ err = dpaa2_io_service_pull_channel(NULL, ch->ch_id, ch->store); -+ dequeues++; -+ cpu_relax(); -+ } while (err == -EBUSY); -+ -+ ch->stats.dequeue_portal_busy += dequeues; -+ if (unlikely(err)) -+ ch->stats.pull_err++; -+ -+ return err; -+} -+ -+/* NAPI poll routine -+ * -+ * Frames are dequeued from the QMan channel associated with this NAPI context. -+ * Rx, Tx confirmation and (if configured) Rx error frames all count -+ * towards the NAPI budget. -+ */ -+static int dpaa2_eth_poll(struct napi_struct *napi, int budget) -+{ -+ struct dpaa2_eth_channel *ch; -+ int cleaned = 0, store_cleaned; -+ struct dpaa2_eth_priv *priv; -+ int err; -+ -+ ch = container_of(napi, struct dpaa2_eth_channel, napi); -+ priv = ch->priv; -+ -+ while (cleaned < budget) { -+ err = pull_channel(ch); -+ if (unlikely(err)) -+ break; -+ -+ /* Refill pool if appropriate */ -+ refill_pool(priv, ch, priv->dpbp_attrs.bpid); -+ -+ store_cleaned = consume_frames(ch); -+ cleaned += store_cleaned; -+ -+ /* If we have enough budget left for a full store, -+ * try a new pull dequeue, otherwise we're done here -+ */ -+ if (store_cleaned == 0 || -+ cleaned > budget - DPAA2_ETH_STORE_SIZE) -+ break; -+ } -+ -+ if (cleaned < budget) { -+ napi_complete_done(napi, cleaned); -+ /* Re-enable data available notifications */ -+ do { -+ err = dpaa2_io_service_rearm(NULL, &ch->nctx); -+ cpu_relax(); -+ } while (err == -EBUSY); -+ } -+ -+ ch->stats.frames += cleaned; -+ -+ return cleaned; -+} -+ -+static void enable_ch_napi(struct dpaa2_eth_priv *priv) -+{ -+ struct dpaa2_eth_channel *ch; -+ int i; -+ -+ for (i = 0; i < priv->num_channels; i++) { -+ ch = priv->channel[i]; -+ napi_enable(&ch->napi); -+ } -+} -+ -+static void disable_ch_napi(struct dpaa2_eth_priv *priv) -+{ -+ struct dpaa2_eth_channel *ch; -+ int i; -+ -+ for (i = 0; i < priv->num_channels; i++) { -+ ch = priv->channel[i]; -+ napi_disable(&ch->napi); -+ } -+} -+ -+static int link_state_update(struct dpaa2_eth_priv *priv) -+{ -+ struct dpni_link_state state; -+ int err; -+ -+ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); -+ if (unlikely(err)) { -+ netdev_err(priv->net_dev, -+ "dpni_get_link_state() failed\n"); -+ return err; -+ } -+ -+ /* Chech link state; speed / duplex changes are not treated yet */ -+ if (priv->link_state.up == state.up) -+ return 0; -+ -+ priv->link_state = state; -+ if (state.up) { -+ netif_carrier_on(priv->net_dev); -+ netif_tx_start_all_queues(priv->net_dev); -+ } else { -+ netif_tx_stop_all_queues(priv->net_dev); -+ netif_carrier_off(priv->net_dev); -+ } -+ -+ netdev_info(priv->net_dev, "Link Event: state %s", -+ state.up ? "up" : "down"); -+ -+ return 0; -+} -+ -+static int dpaa2_eth_open(struct net_device *net_dev) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ int err; -+ -+ err = seed_pool(priv, priv->dpbp_attrs.bpid); -+ if (err) { -+ /* Not much to do; the buffer pool, though not filled up, -+ * may still contain some buffers which would enable us -+ * to limp on. -+ */ -+ netdev_err(net_dev, "Buffer seeding failed for DPBP %d (bpid=%d)\n", -+ priv->dpbp_dev->obj_desc.id, priv->dpbp_attrs.bpid); -+ } -+ -+ /* We'll only start the txqs when the link is actually ready; make sure -+ * we don't race against the link up notification, which may come -+ * immediately after dpni_enable(); -+ */ -+ netif_tx_stop_all_queues(net_dev); -+ enable_ch_napi(priv); -+ /* Also, explicitly set carrier off, otherwise netif_carrier_ok() will -+ * return true and cause 'ip link show' to report the LOWER_UP flag, -+ * even though the link notification wasn't even received. -+ */ -+ netif_carrier_off(net_dev); -+ -+ err = dpni_enable(priv->mc_io, 0, priv->mc_token); -+ if (err < 0) { -+ netdev_err(net_dev, "dpni_enable() failed\n"); -+ goto enable_err; -+ } -+ -+ /* If the DPMAC object has already processed the link up interrupt, -+ * we have to learn the link state ourselves. -+ */ -+ err = link_state_update(priv); -+ if (err < 0) { -+ netdev_err(net_dev, "Can't update link state\n"); -+ goto link_state_err; -+ } -+ -+ return 0; -+ -+link_state_err: -+enable_err: -+ disable_ch_napi(priv); -+ drain_pool(priv); -+ return err; -+} -+ -+/* The DPIO store must be empty when we call this, -+ * at the end of every NAPI cycle. -+ */ -+static u32 drain_channel(struct dpaa2_eth_priv *priv, -+ struct dpaa2_eth_channel *ch) -+{ -+ u32 drained = 0, total = 0; -+ -+ do { -+ pull_channel(ch); -+ drained = consume_frames(ch); -+ total += drained; -+ } while (drained); -+ -+ return total; -+} -+ -+static u32 drain_ingress_frames(struct dpaa2_eth_priv *priv) -+{ -+ struct dpaa2_eth_channel *ch; -+ int i; -+ u32 drained = 0; -+ -+ for (i = 0; i < priv->num_channels; i++) { -+ ch = priv->channel[i]; -+ drained += drain_channel(priv, ch); -+ } -+ -+ return drained; -+} -+ -+static int dpaa2_eth_stop(struct net_device *net_dev) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ int dpni_enabled; -+ int retries = 10; -+ u32 drained; -+ -+ netif_tx_stop_all_queues(net_dev); -+ netif_carrier_off(net_dev); -+ -+ /* Loop while dpni_disable() attempts to drain the egress FQs -+ * and confirm them back to us. -+ */ -+ do { -+ dpni_disable(priv->mc_io, 0, priv->mc_token); -+ dpni_is_enabled(priv->mc_io, 0, priv->mc_token, &dpni_enabled); -+ if (dpni_enabled) -+ /* Allow the MC some slack */ -+ msleep(100); -+ } while (dpni_enabled && --retries); -+ if (!retries) { -+ netdev_warn(net_dev, "Retry count exceeded disabling DPNI\n"); -+ /* Must go on and disable NAPI nonetheless, so we don't crash at -+ * the next "ifconfig up" -+ */ -+ } -+ -+ /* Wait for NAPI to complete on every core and disable it. -+ * In particular, this will also prevent NAPI from being rescheduled if -+ * a new CDAN is serviced, effectively discarding the CDAN. We therefore -+ * don't even need to disarm the channels, except perhaps for the case -+ * of a huge coalescing value. -+ */ -+ disable_ch_napi(priv); -+ -+ /* Manually drain the Rx and TxConf queues */ -+ drained = drain_ingress_frames(priv); -+ if (drained) -+ netdev_dbg(net_dev, "Drained %d frames.\n", drained); -+ -+ /* Empty the buffer pool */ -+ drain_pool(priv); -+ -+ return 0; -+} -+ -+static int dpaa2_eth_init(struct net_device *net_dev) -+{ -+ u64 supported = 0; -+ u64 not_supported = 0; -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ u32 options = priv->dpni_attrs.options; -+ -+ /* Capabilities listing */ -+ supported |= IFF_LIVE_ADDR_CHANGE | IFF_PROMISC | IFF_ALLMULTI; -+ -+ if (options & DPNI_OPT_UNICAST_FILTER) -+ supported |= IFF_UNICAST_FLT; -+ else -+ not_supported |= IFF_UNICAST_FLT; -+ -+ if (options & DPNI_OPT_MULTICAST_FILTER) -+ supported |= IFF_MULTICAST; -+ else -+ not_supported |= IFF_MULTICAST; -+ -+ net_dev->priv_flags |= supported; -+ net_dev->priv_flags &= ~not_supported; -+ -+ /* Features */ -+ net_dev->features = NETIF_F_RXCSUM | -+ NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | -+ NETIF_F_SG | NETIF_F_HIGHDMA | -+ NETIF_F_LLTX; -+ net_dev->hw_features = net_dev->features; -+ -+ return 0; -+} -+ -+static int dpaa2_eth_set_addr(struct net_device *net_dev, void *addr) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ struct device *dev = net_dev->dev.parent; -+ int err; -+ -+ err = eth_mac_addr(net_dev, addr); -+ if (err < 0) { -+ dev_err(dev, "eth_mac_addr() failed with error %d\n", err); -+ return err; -+ } -+ -+ err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token, -+ net_dev->dev_addr); -+ if (err) { -+ dev_err(dev, "dpni_set_primary_mac_addr() failed (%d)\n", err); -+ return err; -+ } -+ -+ return 0; -+} -+ -+/** Fill in counters maintained by the GPP driver. These may be different from -+ * the hardware counters obtained by ethtool. -+ */ -+static struct rtnl_link_stats64 -+*dpaa2_eth_get_stats(struct net_device *net_dev, -+ struct rtnl_link_stats64 *stats) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ struct rtnl_link_stats64 *percpu_stats; -+ u64 *cpustats; -+ u64 *netstats = (u64 *)stats; -+ int i, j; -+ int num = sizeof(struct rtnl_link_stats64) / sizeof(u64); -+ -+ for_each_possible_cpu(i) { -+ percpu_stats = per_cpu_ptr(priv->percpu_stats, i); -+ cpustats = (u64 *)percpu_stats; -+ for (j = 0; j < num; j++) -+ netstats[j] += cpustats[j]; -+ } -+ -+ return stats; -+} -+ -+static int dpaa2_eth_change_mtu(struct net_device *net_dev, int mtu) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ int err; -+ -+ if (mtu < 68 || mtu > DPAA2_ETH_MAX_MTU) { -+ netdev_err(net_dev, "Invalid MTU %d. Valid range is: 68..%d\n", -+ mtu, DPAA2_ETH_MAX_MTU); -+ return -EINVAL; -+ } -+ -+ /* Set the maximum Rx frame length to match the transmit side; -+ * account for L2 headers when computing the MFL -+ */ -+ err = dpni_set_max_frame_length(priv->mc_io, 0, priv->mc_token, -+ (u16)DPAA2_ETH_L2_MAX_FRM(mtu)); -+ if (err) { -+ netdev_err(net_dev, "dpni_set_max_frame_length() failed\n"); -+ return err; -+ } -+ -+ net_dev->mtu = mtu; -+ return 0; -+} -+ -+/* Copy mac unicast addresses from @net_dev to @priv. -+ * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. -+ */ -+static void add_uc_hw_addr(const struct net_device *net_dev, -+ struct dpaa2_eth_priv *priv) -+{ -+ struct netdev_hw_addr *ha; -+ int err; -+ -+ netdev_for_each_uc_addr(ha, net_dev) { -+ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, -+ ha->addr); -+ if (err) -+ netdev_warn(priv->net_dev, -+ "Could not add ucast MAC %pM to the filtering table (err %d)\n", -+ ha->addr, err); -+ } -+} -+ -+/* Copy mac multicast addresses from @net_dev to @priv -+ * Its sole purpose is to make dpaa2_eth_set_rx_mode() more readable. -+ */ -+static void add_mc_hw_addr(const struct net_device *net_dev, -+ struct dpaa2_eth_priv *priv) -+{ -+ struct netdev_hw_addr *ha; -+ int err; -+ -+ netdev_for_each_mc_addr(ha, net_dev) { -+ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, -+ ha->addr); -+ if (err) -+ netdev_warn(priv->net_dev, -+ "Could not add mcast MAC %pM to the filtering table (err %d)\n", -+ ha->addr, err); -+ } -+} -+ -+static void dpaa2_eth_set_rx_mode(struct net_device *net_dev) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ int uc_count = netdev_uc_count(net_dev); -+ int mc_count = netdev_mc_count(net_dev); -+ u8 max_uc = priv->dpni_attrs.max_unicast_filters; -+ u8 max_mc = priv->dpni_attrs.max_multicast_filters; -+ u32 options = priv->dpni_attrs.options; -+ u16 mc_token = priv->mc_token; -+ struct fsl_mc_io *mc_io = priv->mc_io; -+ int err; -+ -+ /* Basic sanity checks; these probably indicate a misconfiguration */ -+ if (!(options & DPNI_OPT_UNICAST_FILTER) && max_uc != 0) -+ netdev_info(net_dev, -+ "max_unicast_filters=%d, DPNI_OPT_UNICAST_FILTER option must be enabled\n", -+ max_uc); -+ if (!(options & DPNI_OPT_MULTICAST_FILTER) && max_mc != 0) -+ netdev_info(net_dev, -+ "max_multicast_filters=%d, DPNI_OPT_MULTICAST_FILTER option must be enabled\n", -+ max_mc); -+ -+ /* Force promiscuous if the uc or mc counts exceed our capabilities. */ -+ if (uc_count > max_uc) { -+ netdev_info(net_dev, -+ "Unicast addr count reached %d, max allowed is %d; forcing promisc\n", -+ uc_count, max_uc); -+ goto force_promisc; -+ } -+ if (mc_count > max_mc) { -+ netdev_info(net_dev, -+ "Multicast addr count reached %d, max allowed is %d; forcing promisc\n", -+ mc_count, max_mc); -+ goto force_mc_promisc; -+ } -+ -+ /* Adjust promisc settings due to flag combinations */ -+ if (net_dev->flags & IFF_PROMISC) -+ goto force_promisc; -+ if (net_dev->flags & IFF_ALLMULTI) { -+ /* First, rebuild unicast filtering table. This should be done -+ * in promisc mode, in order to avoid frame loss while we -+ * progressively add entries to the table. -+ * We don't know whether we had been in promisc already, and -+ * making an MC call to find it is expensive; so set uc promisc -+ * nonetheless. -+ */ -+ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); -+ if (err) -+ netdev_warn(net_dev, "Can't set uc promisc\n"); -+ -+ /* Actual uc table reconstruction. */ -+ err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 0); -+ if (err) -+ netdev_warn(net_dev, "Can't clear uc filters\n"); -+ add_uc_hw_addr(net_dev, priv); -+ -+ /* Finally, clear uc promisc and set mc promisc as requested. */ -+ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0); -+ if (err) -+ netdev_warn(net_dev, "Can't clear uc promisc\n"); -+ goto force_mc_promisc; -+ } -+ -+ /* Neither unicast, nor multicast promisc will be on... eventually. -+ * For now, rebuild mac filtering tables while forcing both of them on. -+ */ -+ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); -+ if (err) -+ netdev_warn(net_dev, "Can't set uc promisc (%d)\n", err); -+ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1); -+ if (err) -+ netdev_warn(net_dev, "Can't set mc promisc (%d)\n", err); -+ -+ /* Actual mac filtering tables reconstruction */ -+ err = dpni_clear_mac_filters(mc_io, 0, mc_token, 1, 1); -+ if (err) -+ netdev_warn(net_dev, "Can't clear mac filters\n"); -+ add_mc_hw_addr(net_dev, priv); -+ add_uc_hw_addr(net_dev, priv); -+ -+ /* Now we can clear both ucast and mcast promisc, without risking -+ * to drop legitimate frames anymore. -+ */ -+ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 0); -+ if (err) -+ netdev_warn(net_dev, "Can't clear ucast promisc\n"); -+ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 0); -+ if (err) -+ netdev_warn(net_dev, "Can't clear mcast promisc\n"); -+ -+ return; -+ -+force_promisc: -+ err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); -+ if (err) -+ netdev_warn(net_dev, "Can't set ucast promisc\n"); -+force_mc_promisc: -+ err = dpni_set_multicast_promisc(mc_io, 0, mc_token, 1); -+ if (err) -+ netdev_warn(net_dev, "Can't set mcast promisc\n"); -+} -+ -+static int dpaa2_eth_set_features(struct net_device *net_dev, -+ netdev_features_t features) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ netdev_features_t changed = features ^ net_dev->features; -+ bool enable; -+ int err; -+ -+ if (changed & NETIF_F_RXCSUM) { -+ enable = !!(features & NETIF_F_RXCSUM); -+ err = set_rx_csum(priv, enable); -+ if (err) -+ return err; -+ } -+ -+ if (changed & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)) { -+ enable = !!(features & (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM)); -+ err = set_tx_csum(priv, enable); -+ if (err) -+ return err; -+ } -+ -+ return 0; -+} -+ -+static int dpaa2_eth_ts_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(dev); -+ struct hwtstamp_config config; -+ -+ if (copy_from_user(&config, rq->ifr_data, sizeof(config))) -+ return -EFAULT; -+ -+ switch (config.tx_type) { -+ case HWTSTAMP_TX_OFF: -+ priv->ts_tx_en = false; -+ break; -+ case HWTSTAMP_TX_ON: -+ priv->ts_tx_en = true; -+ break; -+ default: -+ return -ERANGE; -+ } -+ -+ if (config.rx_filter == HWTSTAMP_FILTER_NONE) { -+ priv->ts_rx_en = false; -+ } else { -+ priv->ts_rx_en = true; -+ /* TS is set for all frame types, not only those requested */ -+ config.rx_filter = HWTSTAMP_FILTER_ALL; -+ } -+ -+ return copy_to_user(rq->ifr_data, &config, sizeof(config)) ? -+ -EFAULT : 0; -+} -+ -+static int dpaa2_eth_ioctl(struct net_device *dev, struct ifreq *rq, int cmd) -+{ -+ if (cmd == SIOCSHWTSTAMP) -+ return dpaa2_eth_ts_ioctl(dev, rq, cmd); -+ -+ return -EINVAL; -+} -+ -+static const struct net_device_ops dpaa2_eth_ops = { -+ .ndo_open = dpaa2_eth_open, -+ .ndo_start_xmit = dpaa2_eth_tx, -+ .ndo_stop = dpaa2_eth_stop, -+ .ndo_init = dpaa2_eth_init, -+ .ndo_set_mac_address = dpaa2_eth_set_addr, -+ .ndo_get_stats64 = dpaa2_eth_get_stats, -+ .ndo_change_mtu = dpaa2_eth_change_mtu, -+ .ndo_set_rx_mode = dpaa2_eth_set_rx_mode, -+ .ndo_set_features = dpaa2_eth_set_features, -+ .ndo_do_ioctl = dpaa2_eth_ioctl, -+}; -+ -+static void cdan_cb(struct dpaa2_io_notification_ctx *ctx) -+{ -+ struct dpaa2_eth_channel *ch; -+ -+ ch = container_of(ctx, struct dpaa2_eth_channel, nctx); -+ -+ /* Update NAPI statistics */ -+ ch->stats.cdan++; -+ -+ napi_schedule_irqoff(&ch->napi); -+} -+ -+/* Verify that the FLIB API version of various MC objects is supported -+ * by our driver -+ */ -+static int check_obj_version(struct fsl_mc_device *ls_dev, u16 mc_version) -+{ -+ char *name = ls_dev->obj_desc.type; -+ struct device *dev = &ls_dev->dev; -+ u16 supported_version, flib_version; -+ -+ if (strcmp(name, "dpni") == 0) { -+ flib_version = DPNI_VER_MAJOR; -+ supported_version = DPAA2_SUPPORTED_DPNI_VERSION; -+ } else if (strcmp(name, "dpbp") == 0) { -+ flib_version = DPBP_VER_MAJOR; -+ supported_version = DPAA2_SUPPORTED_DPBP_VERSION; -+ } else if (strcmp(name, "dpcon") == 0) { -+ flib_version = DPCON_VER_MAJOR; -+ supported_version = DPAA2_SUPPORTED_DPCON_VERSION; -+ } else { -+ dev_err(dev, "invalid object type (%s)\n", name); -+ return -EINVAL; -+ } -+ -+ /* Check that the FLIB-defined version matches the one reported by MC */ -+ if (mc_version != flib_version) { -+ dev_err(dev, "%s FLIB version mismatch: MC reports %d, we have %d\n", -+ name, mc_version, flib_version); -+ return -EINVAL; -+ } -+ -+ /* ... and that we actually support it */ -+ if (mc_version < supported_version) { -+ dev_err(dev, "Unsupported %s FLIB version (%d)\n", -+ name, mc_version); -+ return -EINVAL; -+ } -+ dev_dbg(dev, "Using %s FLIB version %d\n", name, mc_version); -+ -+ return 0; -+} -+ -+/* Allocate and configure a DPCON object */ -+static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv) -+{ -+ struct fsl_mc_device *dpcon; -+ struct device *dev = priv->net_dev->dev.parent; -+ struct dpcon_attr attrs; -+ int err; -+ -+ err = fsl_mc_object_allocate(to_fsl_mc_device(dev), -+ FSL_MC_POOL_DPCON, &dpcon); -+ if (err) { -+ dev_info(dev, "Not enough DPCONs, will go on as-is\n"); -+ return NULL; -+ } -+ -+ err = dpcon_open(priv->mc_io, 0, dpcon->obj_desc.id, &dpcon->mc_handle); -+ if (err) { -+ dev_err(dev, "dpcon_open() failed\n"); -+ goto err_open; -+ } -+ -+ err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs); -+ if (err) { -+ dev_err(dev, "dpcon_get_attributes() failed\n"); -+ goto err_get_attr; -+ } -+ -+ err = check_obj_version(dpcon, attrs.version.major); -+ if (err) -+ goto err_dpcon_ver; -+ -+ err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle); -+ if (err) { -+ dev_err(dev, "dpcon_enable() failed\n"); -+ goto err_enable; -+ } -+ -+ return dpcon; -+ -+err_enable: -+err_dpcon_ver: -+err_get_attr: -+ dpcon_close(priv->mc_io, 0, dpcon->mc_handle); -+err_open: -+ fsl_mc_object_free(dpcon); -+ -+ return NULL; -+} -+ -+static void free_dpcon(struct dpaa2_eth_priv *priv, -+ struct fsl_mc_device *dpcon) -+{ -+ dpcon_disable(priv->mc_io, 0, dpcon->mc_handle); -+ dpcon_close(priv->mc_io, 0, dpcon->mc_handle); -+ fsl_mc_object_free(dpcon); -+} -+ -+static struct dpaa2_eth_channel * -+alloc_channel(struct dpaa2_eth_priv *priv) -+{ -+ struct dpaa2_eth_channel *channel; -+ struct dpcon_attr attr; -+ struct device *dev = priv->net_dev->dev.parent; -+ int err; -+ -+ channel = kzalloc(sizeof(*channel), GFP_ATOMIC); -+ if (!channel) -+ return NULL; -+ -+ channel->dpcon = setup_dpcon(priv); -+ if (!channel->dpcon) -+ goto err_setup; -+ -+ err = dpcon_get_attributes(priv->mc_io, 0, channel->dpcon->mc_handle, -+ &attr); -+ if (err) { -+ dev_err(dev, "dpcon_get_attributes() failed\n"); -+ goto err_get_attr; -+ } -+ -+ channel->dpcon_id = attr.id; -+ channel->ch_id = attr.qbman_ch_id; -+ channel->priv = priv; -+ -+ return channel; -+ -+err_get_attr: -+ free_dpcon(priv, channel->dpcon); -+err_setup: -+ kfree(channel); -+ return NULL; -+} -+ -+static void free_channel(struct dpaa2_eth_priv *priv, -+ struct dpaa2_eth_channel *channel) -+{ -+ free_dpcon(priv, channel->dpcon); -+ kfree(channel); -+} -+ -+/* DPIO setup: allocate and configure QBMan channels, setup core affinity -+ * and register data availability notifications -+ */ -+static int setup_dpio(struct dpaa2_eth_priv *priv) -+{ -+ struct dpaa2_io_notification_ctx *nctx; -+ struct dpaa2_eth_channel *channel; -+ struct dpcon_notification_cfg dpcon_notif_cfg; -+ struct device *dev = priv->net_dev->dev.parent; -+ int i, err; -+ -+ /* Don't allocate more channels than strictly necessary and assign -+ * them to cores starting from the first one available in -+ * cpu_online_mask. -+ * If the number of channels is lower than the number of cores, -+ * there will be no rx/tx conf processing on the last cores in the mask. -+ */ -+ cpumask_clear(&priv->dpio_cpumask); -+ for_each_online_cpu(i) { -+ /* Try to allocate a channel */ -+ channel = alloc_channel(priv); -+ if (!channel) -+ goto err_alloc_ch; -+ -+ priv->channel[priv->num_channels] = channel; -+ -+ nctx = &channel->nctx; -+ nctx->is_cdan = 1; -+ nctx->cb = cdan_cb; -+ nctx->id = channel->ch_id; -+ nctx->desired_cpu = i; -+ -+ /* Register the new context */ -+ err = dpaa2_io_service_register(NULL, nctx); -+ if (err) { -+ dev_info(dev, "No affine DPIO for core %d\n", i); -+ /* This core doesn't have an affine DPIO, but there's -+ * a chance another one does, so keep trying -+ */ -+ free_channel(priv, channel); -+ continue; -+ } -+ -+ /* Register DPCON notification with MC */ -+ dpcon_notif_cfg.dpio_id = nctx->dpio_id; -+ dpcon_notif_cfg.priority = 0; -+ dpcon_notif_cfg.user_ctx = nctx->qman64; -+ err = dpcon_set_notification(priv->mc_io, 0, -+ channel->dpcon->mc_handle, -+ &dpcon_notif_cfg); -+ if (err) { -+ dev_err(dev, "dpcon_set_notification failed()\n"); -+ goto err_set_cdan; -+ } -+ -+ /* If we managed to allocate a channel and also found an affine -+ * DPIO for this core, add it to the final mask -+ */ -+ cpumask_set_cpu(i, &priv->dpio_cpumask); -+ priv->num_channels++; -+ -+ if (priv->num_channels == dpaa2_eth_max_channels(priv)) -+ break; -+ } -+ -+ /* Tx confirmation queues can only be serviced by cpus -+ * with an affine DPIO/channel -+ */ -+ cpumask_copy(&priv->txconf_cpumask, &priv->dpio_cpumask); -+ -+ return 0; -+ -+err_set_cdan: -+ dpaa2_io_service_deregister(NULL, nctx); -+ free_channel(priv, channel); -+err_alloc_ch: -+ if (cpumask_empty(&priv->dpio_cpumask)) { -+ dev_err(dev, "No cpu with an affine DPIO/DPCON\n"); -+ return -ENODEV; -+ } -+ cpumask_copy(&priv->txconf_cpumask, &priv->dpio_cpumask); -+ -+ return 0; -+} -+ -+static void free_dpio(struct dpaa2_eth_priv *priv) -+{ -+ int i; -+ struct dpaa2_eth_channel *ch; -+ -+ /* deregister CDAN notifications and free channels */ -+ for (i = 0; i < priv->num_channels; i++) { -+ ch = priv->channel[i]; -+ dpaa2_io_service_deregister(NULL, &ch->nctx); -+ free_channel(priv, ch); -+ } -+} -+ -+static struct dpaa2_eth_channel *get_affine_channel(struct dpaa2_eth_priv *priv, -+ int cpu) -+{ -+ struct device *dev = priv->net_dev->dev.parent; -+ int i; -+ -+ for (i = 0; i < priv->num_channels; i++) -+ if (priv->channel[i]->nctx.desired_cpu == cpu) -+ return priv->channel[i]; -+ -+ /* We should never get here. Issue a warning and return -+ * the first channel, because it's still better than nothing -+ */ -+ dev_warn(dev, "No affine channel found for cpu %d\n", cpu); -+ -+ return priv->channel[0]; -+} -+ -+static void set_fq_affinity(struct dpaa2_eth_priv *priv) -+{ -+ struct device *dev = priv->net_dev->dev.parent; -+ struct dpaa2_eth_fq *fq; -+ int rx_cpu, txc_cpu; -+ int i; -+ -+ /* For each FQ, pick one channel/CPU to deliver frames to. -+ * This may well change at runtime, either through irqbalance or -+ * through direct user intervention. -+ */ -+ rx_cpu = cpumask_first(&priv->dpio_cpumask); -+ txc_cpu = cpumask_first(&priv->txconf_cpumask); -+ -+ for (i = 0; i < priv->num_fqs; i++) { -+ fq = &priv->fq[i]; -+ switch (fq->type) { -+ case DPAA2_RX_FQ: -+ case DPAA2_RX_ERR_FQ: -+ fq->target_cpu = rx_cpu; -+ rx_cpu = cpumask_next(rx_cpu, &priv->dpio_cpumask); -+ if (rx_cpu >= nr_cpu_ids) -+ rx_cpu = cpumask_first(&priv->dpio_cpumask); -+ break; -+ case DPAA2_TX_CONF_FQ: -+ fq->target_cpu = txc_cpu; -+ txc_cpu = cpumask_next(txc_cpu, &priv->txconf_cpumask); -+ if (txc_cpu >= nr_cpu_ids) -+ txc_cpu = cpumask_first(&priv->txconf_cpumask); -+ break; -+ default: -+ dev_err(dev, "Unknown FQ type: %d\n", fq->type); -+ } -+ fq->channel = get_affine_channel(priv, fq->target_cpu); -+ } -+} -+ -+static void setup_fqs(struct dpaa2_eth_priv *priv) -+{ -+ int i; -+ -+ /* We have one TxConf FQ per Tx flow */ -+ for (i = 0; i < priv->dpni_attrs.max_senders; i++) { -+ priv->fq[priv->num_fqs].type = DPAA2_TX_CONF_FQ; -+ priv->fq[priv->num_fqs].consume = dpaa2_eth_tx_conf; -+ priv->fq[priv->num_fqs++].flowid = DPNI_NEW_FLOW_ID; -+ } -+ -+ /* The number of Rx queues (Rx distribution width) may be different from -+ * the number of cores. -+ * We only support one traffic class for now. -+ */ -+ for (i = 0; i < dpaa2_eth_queue_count(priv); i++) { -+ priv->fq[priv->num_fqs].type = DPAA2_RX_FQ; -+ priv->fq[priv->num_fqs].consume = dpaa2_eth_rx; -+ priv->fq[priv->num_fqs++].flowid = (u16)i; -+ } -+ -+#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE -+ /* We have exactly one Rx error queue per DPNI */ -+ priv->fq[priv->num_fqs].type = DPAA2_RX_ERR_FQ; -+ priv->fq[priv->num_fqs++].consume = dpaa2_eth_rx_err; -+#endif -+ -+ /* For each FQ, decide on which core to process incoming frames */ -+ set_fq_affinity(priv); -+} -+ -+/* Allocate and configure one buffer pool for each interface */ -+static int setup_dpbp(struct dpaa2_eth_priv *priv) -+{ -+ int err; -+ struct fsl_mc_device *dpbp_dev; -+ struct device *dev = priv->net_dev->dev.parent; -+ -+ err = fsl_mc_object_allocate(to_fsl_mc_device(dev), FSL_MC_POOL_DPBP, -+ &dpbp_dev); -+ if (err) { -+ dev_err(dev, "DPBP device allocation failed\n"); -+ return err; -+ } -+ -+ priv->dpbp_dev = dpbp_dev; -+ -+ err = dpbp_open(priv->mc_io, 0, priv->dpbp_dev->obj_desc.id, -+ &dpbp_dev->mc_handle); -+ if (err) { -+ dev_err(dev, "dpbp_open() failed\n"); -+ goto err_open; -+ } -+ -+ err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle); -+ if (err) { -+ dev_err(dev, "dpbp_enable() failed\n"); -+ goto err_enable; -+ } -+ -+ err = dpbp_get_attributes(priv->mc_io, 0, dpbp_dev->mc_handle, -+ &priv->dpbp_attrs); -+ if (err) { -+ dev_err(dev, "dpbp_get_attributes() failed\n"); -+ goto err_get_attr; -+ } -+ -+ err = check_obj_version(dpbp_dev, priv->dpbp_attrs.version.major); -+ if (err) -+ goto err_dpbp_ver; -+ -+ return 0; -+ -+err_dpbp_ver: -+err_get_attr: -+ dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle); -+err_enable: -+ dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle); -+err_open: -+ fsl_mc_object_free(dpbp_dev); -+ -+ return err; -+} -+ -+static void free_dpbp(struct dpaa2_eth_priv *priv) -+{ -+ drain_pool(priv); -+ dpbp_disable(priv->mc_io, 0, priv->dpbp_dev->mc_handle); -+ dpbp_close(priv->mc_io, 0, priv->dpbp_dev->mc_handle); -+ fsl_mc_object_free(priv->dpbp_dev); -+} -+ -+/* Configure the DPNI object this interface is associated with */ -+static int setup_dpni(struct fsl_mc_device *ls_dev) -+{ -+ struct device *dev = &ls_dev->dev; -+ struct dpaa2_eth_priv *priv; -+ struct net_device *net_dev; -+ void *dma_mem; -+ int err; -+ -+ net_dev = dev_get_drvdata(dev); -+ priv = netdev_priv(net_dev); -+ -+ priv->dpni_id = ls_dev->obj_desc.id; -+ -+ /* get a handle for the DPNI object */ -+ err = dpni_open(priv->mc_io, 0, priv->dpni_id, &priv->mc_token); -+ if (err) { -+ dev_err(dev, "dpni_open() failed\n"); -+ goto err_open; -+ } -+ -+ ls_dev->mc_io = priv->mc_io; -+ ls_dev->mc_handle = priv->mc_token; -+ -+ /* Map a memory region which will be used by MC to pass us an -+ * attribute structure -+ */ -+ dma_mem = kzalloc(DPAA2_EXT_CFG_SIZE, GFP_DMA | GFP_KERNEL); -+ if (!dma_mem) -+ goto err_alloc; -+ -+ priv->dpni_attrs.ext_cfg_iova = dma_map_single(dev, dma_mem, -+ DPAA2_EXT_CFG_SIZE, -+ DMA_FROM_DEVICE); -+ if (dma_mapping_error(dev, priv->dpni_attrs.ext_cfg_iova)) { -+ dev_err(dev, "dma mapping for dpni_ext_cfg failed\n"); -+ goto err_dma_map; -+ } -+ -+ err = dpni_get_attributes(priv->mc_io, 0, priv->mc_token, -+ &priv->dpni_attrs); -+ -+ /* We'll check the return code after unmapping, as we need to -+ * do this anyway -+ */ -+ dma_unmap_single(dev, priv->dpni_attrs.ext_cfg_iova, -+ DPAA2_EXT_CFG_SIZE, DMA_FROM_DEVICE); -+ -+ if (err) { -+ dev_err(dev, "dpni_get_attributes() failed (err=%d)\n", err); -+ goto err_get_attr; -+ } -+ -+ err = check_obj_version(ls_dev, priv->dpni_attrs.version.major); -+ if (err) -+ goto err_dpni_ver; -+ -+ memset(&priv->dpni_ext_cfg, 0, sizeof(priv->dpni_ext_cfg)); -+ err = dpni_extract_extended_cfg(&priv->dpni_ext_cfg, dma_mem); -+ if (err) { -+ dev_err(dev, "dpni_extract_extended_cfg() failed\n"); -+ goto err_extract; -+ } -+ -+ /* Configure our buffers' layout */ -+ priv->buf_layout.options = DPNI_BUF_LAYOUT_OPT_PARSER_RESULT | -+ DPNI_BUF_LAYOUT_OPT_FRAME_STATUS | -+ DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE | -+ DPNI_BUF_LAYOUT_OPT_DATA_ALIGN; -+ priv->buf_layout.pass_parser_result = true; -+ priv->buf_layout.pass_frame_status = true; -+ priv->buf_layout.private_data_size = DPAA2_ETH_SWA_SIZE; -+ /* HW erratum mandates data alignment in multiples of 256 */ -+ priv->buf_layout.data_align = DPAA2_ETH_RX_BUF_ALIGN; -+ -+ /* rx buffer */ -+ err = dpni_set_rx_buffer_layout(priv->mc_io, 0, priv->mc_token, -+ &priv->buf_layout); -+ if (err) { -+ dev_err(dev, "dpni_set_rx_buffer_layout() failed"); -+ goto err_buf_layout; -+ } -+ /* tx buffer: remove Rx-only options */ -+ priv->buf_layout.options &= ~(DPNI_BUF_LAYOUT_OPT_DATA_ALIGN | -+ DPNI_BUF_LAYOUT_OPT_PARSER_RESULT); -+ err = dpni_set_tx_buffer_layout(priv->mc_io, 0, priv->mc_token, -+ &priv->buf_layout); -+ if (err) { -+ dev_err(dev, "dpni_set_tx_buffer_layout() failed"); -+ goto err_buf_layout; -+ } -+ /* tx-confirm: same options as tx */ -+ priv->buf_layout.options &= ~DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE; -+ priv->buf_layout.options |= DPNI_BUF_LAYOUT_OPT_TIMESTAMP; -+ priv->buf_layout.pass_timestamp = 1; -+ err = dpni_set_tx_conf_buffer_layout(priv->mc_io, 0, priv->mc_token, -+ &priv->buf_layout); -+ if (err) { -+ dev_err(dev, "dpni_set_tx_conf_buffer_layout() failed"); -+ goto err_buf_layout; -+ } -+ /* Now that we've set our tx buffer layout, retrieve the minimum -+ * required tx data offset. -+ */ -+ err = dpni_get_tx_data_offset(priv->mc_io, 0, priv->mc_token, -+ &priv->tx_data_offset); -+ if (err) { -+ dev_err(dev, "dpni_get_tx_data_offset() failed\n"); -+ goto err_data_offset; -+ } -+ -+ if ((priv->tx_data_offset % 64) != 0) -+ dev_warn(dev, "Tx data offset (%d) not a multiple of 64B", -+ priv->tx_data_offset); -+ -+ /* Accommodate SWA space. */ -+ priv->tx_data_offset += DPAA2_ETH_SWA_SIZE; -+ -+ /* allocate classification rule space */ -+ priv->cls_rule = kzalloc(sizeof(*priv->cls_rule) * -+ DPAA2_CLASSIFIER_ENTRY_COUNT, GFP_KERNEL); -+ if (!priv->cls_rule) -+ goto err_cls_rule; -+ -+ kfree(dma_mem); -+ -+ return 0; -+ -+err_cls_rule: -+err_data_offset: -+err_buf_layout: -+err_extract: -+err_dpni_ver: -+err_get_attr: -+err_dma_map: -+ kfree(dma_mem); -+err_alloc: -+ dpni_close(priv->mc_io, 0, priv->mc_token); -+err_open: -+ return err; -+} -+ -+static void free_dpni(struct dpaa2_eth_priv *priv) -+{ -+ int err; -+ -+ err = dpni_reset(priv->mc_io, 0, priv->mc_token); -+ if (err) -+ netdev_warn(priv->net_dev, "dpni_reset() failed (err %d)\n", -+ err); -+ -+ dpni_close(priv->mc_io, 0, priv->mc_token); -+} -+ -+static int setup_rx_flow(struct dpaa2_eth_priv *priv, -+ struct dpaa2_eth_fq *fq) -+{ -+ struct device *dev = priv->net_dev->dev.parent; -+ struct dpni_queue_attr rx_queue_attr; -+ struct dpni_queue_cfg queue_cfg; -+ int err; -+ -+ memset(&queue_cfg, 0, sizeof(queue_cfg)); -+ queue_cfg.options = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST | -+ DPNI_QUEUE_OPT_TAILDROP_THRESHOLD; -+ queue_cfg.dest_cfg.dest_type = DPNI_DEST_DPCON; -+ queue_cfg.dest_cfg.priority = 1; -+ queue_cfg.user_ctx = (u64)fq; -+ queue_cfg.dest_cfg.dest_id = fq->channel->dpcon_id; -+ queue_cfg.tail_drop_threshold = DPAA2_ETH_TAILDROP_THRESH; -+ err = dpni_set_rx_flow(priv->mc_io, 0, priv->mc_token, 0, fq->flowid, -+ &queue_cfg); -+ if (err) { -+ dev_err(dev, "dpni_set_rx_flow() failed\n"); -+ return err; -+ } -+ -+ /* Get the actual FQID that was assigned by MC */ -+ err = dpni_get_rx_flow(priv->mc_io, 0, priv->mc_token, 0, fq->flowid, -+ &rx_queue_attr); -+ if (err) { -+ dev_err(dev, "dpni_get_rx_flow() failed\n"); -+ return err; -+ } -+ fq->fqid = rx_queue_attr.fqid; -+ -+ return 0; -+} -+ -+static int setup_tx_flow(struct dpaa2_eth_priv *priv, -+ struct dpaa2_eth_fq *fq) -+{ -+ struct device *dev = priv->net_dev->dev.parent; -+ struct dpni_tx_flow_cfg tx_flow_cfg; -+ struct dpni_tx_conf_cfg tx_conf_cfg; -+ struct dpni_tx_conf_attr tx_conf_attr; -+ int err; -+ -+ memset(&tx_flow_cfg, 0, sizeof(tx_flow_cfg)); -+ tx_flow_cfg.options = DPNI_TX_FLOW_OPT_TX_CONF_ERROR; -+ tx_flow_cfg.use_common_tx_conf_queue = 0; -+ err = dpni_set_tx_flow(priv->mc_io, 0, priv->mc_token, -+ &fq->flowid, &tx_flow_cfg); -+ if (err) { -+ dev_err(dev, "dpni_set_tx_flow() failed\n"); -+ return err; -+ } -+ -+ tx_conf_cfg.errors_only = 0; -+ tx_conf_cfg.queue_cfg.options = DPNI_QUEUE_OPT_USER_CTX | -+ DPNI_QUEUE_OPT_DEST; -+ tx_conf_cfg.queue_cfg.user_ctx = (u64)fq; -+ tx_conf_cfg.queue_cfg.dest_cfg.dest_type = DPNI_DEST_DPCON; -+ tx_conf_cfg.queue_cfg.dest_cfg.dest_id = fq->channel->dpcon_id; -+ tx_conf_cfg.queue_cfg.dest_cfg.priority = 0; -+ -+ err = dpni_set_tx_conf(priv->mc_io, 0, priv->mc_token, fq->flowid, -+ &tx_conf_cfg); -+ if (err) { -+ dev_err(dev, "dpni_set_tx_conf() failed\n"); -+ return err; -+ } -+ -+ err = dpni_get_tx_conf(priv->mc_io, 0, priv->mc_token, fq->flowid, -+ &tx_conf_attr); -+ if (err) { -+ dev_err(dev, "dpni_get_tx_conf() failed\n"); -+ return err; -+ } -+ -+ fq->fqid = tx_conf_attr.queue_attr.fqid; -+ -+ return 0; -+} -+ -+#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE -+static int setup_rx_err_flow(struct dpaa2_eth_priv *priv, -+ struct dpaa2_eth_fq *fq) -+{ -+ struct dpni_queue_attr queue_attr; -+ struct dpni_queue_cfg queue_cfg; -+ int err; -+ -+ /* Configure the Rx error queue to generate CDANs, -+ * just like the Rx queues -+ */ -+ queue_cfg.options = DPNI_QUEUE_OPT_USER_CTX | DPNI_QUEUE_OPT_DEST; -+ queue_cfg.dest_cfg.dest_type = DPNI_DEST_DPCON; -+ queue_cfg.dest_cfg.priority = 1; -+ queue_cfg.user_ctx = (u64)fq; -+ queue_cfg.dest_cfg.dest_id = fq->channel->dpcon_id; -+ err = dpni_set_rx_err_queue(priv->mc_io, 0, priv->mc_token, &queue_cfg); -+ if (err) { -+ netdev_err(priv->net_dev, "dpni_set_rx_err_queue() failed\n"); -+ return err; -+ } -+ -+ /* Get the FQID */ -+ err = dpni_get_rx_err_queue(priv->mc_io, 0, priv->mc_token, -+ &queue_attr); -+ if (err) { -+ netdev_err(priv->net_dev, "dpni_get_rx_err_queue() failed\n"); -+ return err; -+ } -+ fq->fqid = queue_attr.fqid; -+ -+ return 0; -+} -+#endif -+ -+/* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs, -+ * frame queues and channels -+ */ -+static int bind_dpni(struct dpaa2_eth_priv *priv) -+{ -+ struct net_device *net_dev = priv->net_dev; -+ struct device *dev = net_dev->dev.parent; -+ struct dpni_pools_cfg pools_params; -+ struct dpni_error_cfg err_cfg; -+ int err = 0; -+ int i; -+ -+ pools_params.num_dpbp = 1; -+ pools_params.pools[0].dpbp_id = priv->dpbp_dev->obj_desc.id; -+ pools_params.pools[0].backup_pool = 0; -+ pools_params.pools[0].buffer_size = DPAA2_ETH_RX_BUF_SIZE; -+ err = dpni_set_pools(priv->mc_io, 0, priv->mc_token, &pools_params); -+ if (err) { -+ dev_err(dev, "dpni_set_pools() failed\n"); -+ return err; -+ } -+ -+ check_fs_support(net_dev); -+ -+ /* have the interface implicitly distribute traffic based on supported -+ * header fields -+ */ -+ if (dpaa2_eth_hash_enabled(priv)) { -+ err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_SUPPORTED); -+ if (err) -+ return err; -+ } -+ -+ /* Configure handling of error frames */ -+ err_cfg.errors = DPAA2_ETH_RX_ERR_MASK; -+ err_cfg.set_frame_annotation = 1; -+#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE -+ err_cfg.error_action = DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE; -+#else -+ err_cfg.error_action = DPNI_ERROR_ACTION_DISCARD; -+#endif -+ err = dpni_set_errors_behavior(priv->mc_io, 0, priv->mc_token, -+ &err_cfg); -+ if (err) { -+ dev_err(dev, "dpni_set_errors_behavior failed\n"); -+ return err; -+ } -+ -+ /* Configure Rx and Tx conf queues to generate CDANs */ -+ for (i = 0; i < priv->num_fqs; i++) { -+ switch (priv->fq[i].type) { -+ case DPAA2_RX_FQ: -+ err = setup_rx_flow(priv, &priv->fq[i]); -+ break; -+ case DPAA2_TX_CONF_FQ: -+ err = setup_tx_flow(priv, &priv->fq[i]); -+ break; -+#ifdef CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE -+ case DPAA2_RX_ERR_FQ: -+ err = setup_rx_err_flow(priv, &priv->fq[i]); -+ break; -+#endif -+ default: -+ dev_err(dev, "Invalid FQ type %d\n", priv->fq[i].type); -+ return -EINVAL; -+ } -+ if (err) -+ return err; -+ } -+ -+ err = dpni_get_qdid(priv->mc_io, 0, priv->mc_token, &priv->tx_qdid); -+ if (err) { -+ dev_err(dev, "dpni_get_qdid() failed\n"); -+ return err; -+ } -+ -+ return 0; -+} -+ -+/* Allocate rings for storing incoming frame descriptors */ -+static int alloc_rings(struct dpaa2_eth_priv *priv) -+{ -+ struct net_device *net_dev = priv->net_dev; -+ struct device *dev = net_dev->dev.parent; -+ int i; -+ -+ for (i = 0; i < priv->num_channels; i++) { -+ priv->channel[i]->store = -+ dpaa2_io_store_create(DPAA2_ETH_STORE_SIZE, dev); -+ if (!priv->channel[i]->store) { -+ netdev_err(net_dev, "dpaa2_io_store_create() failed\n"); -+ goto err_ring; -+ } -+ } -+ -+ return 0; -+ -+err_ring: -+ for (i = 0; i < priv->num_channels; i++) { -+ if (!priv->channel[i]->store) -+ break; -+ dpaa2_io_store_destroy(priv->channel[i]->store); -+ } -+ -+ return -ENOMEM; -+} -+ -+static void free_rings(struct dpaa2_eth_priv *priv) -+{ -+ int i; -+ -+ for (i = 0; i < priv->num_channels; i++) -+ dpaa2_io_store_destroy(priv->channel[i]->store); -+} -+ -+static int netdev_init(struct net_device *net_dev) -+{ -+ int err; -+ struct device *dev = net_dev->dev.parent; -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ u8 mac_addr[ETH_ALEN]; -+ u8 bcast_addr[ETH_ALEN]; -+ -+ net_dev->netdev_ops = &dpaa2_eth_ops; -+ -+ /* If the DPNI attributes contain an all-0 mac_addr, -+ * set a random hardware address -+ */ -+ err = dpni_get_primary_mac_addr(priv->mc_io, 0, priv->mc_token, -+ mac_addr); -+ if (err) { -+ dev_err(dev, "dpni_get_primary_mac_addr() failed (%d)", err); -+ return err; -+ } -+ if (is_zero_ether_addr(mac_addr)) { -+ /* Fills in net_dev->dev_addr, as required by -+ * register_netdevice() -+ */ -+ eth_hw_addr_random(net_dev); -+ /* Make the user aware, without cluttering the boot log */ -+ pr_info_once(KBUILD_MODNAME " device(s) have all-zero hwaddr, replaced with random"); -+ err = dpni_set_primary_mac_addr(priv->mc_io, 0, priv->mc_token, -+ net_dev->dev_addr); -+ if (err) { -+ dev_err(dev, "dpni_set_primary_mac_addr(): %d\n", err); -+ return err; -+ } -+ /* Override NET_ADDR_RANDOM set by eth_hw_addr_random(); for all -+ * practical purposes, this will be our "permanent" mac address, -+ * at least until the next reboot. This move will also permit -+ * register_netdevice() to properly fill up net_dev->perm_addr. -+ */ -+ net_dev->addr_assign_type = NET_ADDR_PERM; -+ } else { -+ /* NET_ADDR_PERM is default, all we have to do is -+ * fill in the device addr. -+ */ -+ memcpy(net_dev->dev_addr, mac_addr, net_dev->addr_len); -+ } -+ -+ /* Explicitly add the broadcast address to the MAC filtering table; -+ * the MC won't do that for us. -+ */ -+ eth_broadcast_addr(bcast_addr); -+ err = dpni_add_mac_addr(priv->mc_io, 0, priv->mc_token, bcast_addr); -+ if (err) { -+ dev_warn(dev, "dpni_add_mac_addr() failed (%d)\n", err); -+ /* Won't return an error; at least, we'd have egress traffic */ -+ } -+ -+ /* Reserve enough space to align buffer as per hardware requirement; -+ * NOTE: priv->tx_data_offset MUST be initialized at this point. -+ */ -+ net_dev->needed_headroom = DPAA2_ETH_NEEDED_HEADROOM(priv); -+ -+ /* Our .ndo_init will be called herein */ -+ err = register_netdev(net_dev); -+ if (err < 0) { -+ dev_err(dev, "register_netdev() = %d\n", err); -+ return err; -+ } -+ -+ return 0; -+} -+ -+static int poll_link_state(void *arg) -+{ -+ struct dpaa2_eth_priv *priv = (struct dpaa2_eth_priv *)arg; -+ int err; -+ -+ while (!kthread_should_stop()) { -+ err = link_state_update(priv); -+ if (unlikely(err)) -+ return err; -+ -+ msleep(DPAA2_ETH_LINK_STATE_REFRESH); -+ } -+ -+ return 0; -+} -+ -+static irqreturn_t dpni_irq0_handler(int irq_num, void *arg) -+{ -+ return IRQ_WAKE_THREAD; -+} -+ -+static irqreturn_t dpni_irq0_handler_thread(int irq_num, void *arg) -+{ -+ u8 irq_index = DPNI_IRQ_INDEX; -+ u32 status, clear = 0; -+ struct device *dev = (struct device *)arg; -+ struct fsl_mc_device *dpni_dev = to_fsl_mc_device(dev); -+ struct net_device *net_dev = dev_get_drvdata(dev); -+ int err; -+ -+ err = dpni_get_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle, -+ irq_index, &status); -+ if (unlikely(err)) { -+ netdev_err(net_dev, "Can't get irq status (err %d)", err); -+ clear = 0xffffffff; -+ goto out; -+ } -+ -+ if (status & DPNI_IRQ_EVENT_LINK_CHANGED) { -+ clear |= DPNI_IRQ_EVENT_LINK_CHANGED; -+ link_state_update(netdev_priv(net_dev)); -+ } -+ -+out: -+ dpni_clear_irq_status(dpni_dev->mc_io, 0, dpni_dev->mc_handle, -+ irq_index, clear); -+ return IRQ_HANDLED; -+} -+ -+static int setup_irqs(struct fsl_mc_device *ls_dev) -+{ -+ int err = 0; -+ struct fsl_mc_device_irq *irq; -+ u8 irq_index = DPNI_IRQ_INDEX; -+ u32 mask = DPNI_IRQ_EVENT_LINK_CHANGED; -+ -+ err = fsl_mc_allocate_irqs(ls_dev); -+ if (err) { -+ dev_err(&ls_dev->dev, "MC irqs allocation failed\n"); -+ return err; -+ } -+ -+ irq = ls_dev->irqs[0]; -+ err = devm_request_threaded_irq(&ls_dev->dev, irq->irq_number, -+ dpni_irq0_handler, -+ dpni_irq0_handler_thread, -+ IRQF_NO_SUSPEND | IRQF_ONESHOT, -+ dev_name(&ls_dev->dev), &ls_dev->dev); -+ if (err < 0) { -+ dev_err(&ls_dev->dev, "devm_request_threaded_irq(): %d", err); -+ goto free_mc_irq; -+ } -+ -+ err = dpni_set_irq_mask(ls_dev->mc_io, 0, ls_dev->mc_handle, -+ irq_index, mask); -+ if (err < 0) { -+ dev_err(&ls_dev->dev, "dpni_set_irq_mask(): %d", err); -+ goto free_irq; -+ } -+ -+ err = dpni_set_irq_enable(ls_dev->mc_io, 0, ls_dev->mc_handle, -+ irq_index, 1); -+ if (err < 0) { -+ dev_err(&ls_dev->dev, "dpni_set_irq_enable(): %d", err); -+ goto free_irq; -+ } -+ -+ return 0; -+ -+free_irq: -+ devm_free_irq(&ls_dev->dev, irq->irq_number, &ls_dev->dev); -+free_mc_irq: -+ fsl_mc_free_irqs(ls_dev); -+ -+ return err; -+} -+ -+static void add_ch_napi(struct dpaa2_eth_priv *priv) -+{ -+ int i; -+ struct dpaa2_eth_channel *ch; -+ -+ for (i = 0; i < priv->num_channels; i++) { -+ ch = priv->channel[i]; -+ /* NAPI weight *MUST* be a multiple of DPAA2_ETH_STORE_SIZE */ -+ netif_napi_add(priv->net_dev, &ch->napi, dpaa2_eth_poll, -+ NAPI_POLL_WEIGHT); -+ } -+} -+ -+static void del_ch_napi(struct dpaa2_eth_priv *priv) -+{ -+ int i; -+ struct dpaa2_eth_channel *ch; -+ -+ for (i = 0; i < priv->num_channels; i++) { -+ ch = priv->channel[i]; -+ netif_napi_del(&ch->napi); -+ } -+} -+ -+/* SysFS support */ -+static ssize_t dpaa2_eth_show_tx_shaping(struct device *dev, -+ struct device_attribute *attr, -+ char *buf) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev)); -+ /* No MC API for getting the shaping config. We're stateful. */ -+ struct dpni_tx_shaping_cfg *scfg = &priv->shaping_cfg; -+ -+ return sprintf(buf, "%u %hu\n", scfg->rate_limit, scfg->max_burst_size); -+} -+ -+static ssize_t dpaa2_eth_write_tx_shaping(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, -+ size_t count) -+{ -+ int err, items; -+ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev)); -+ struct dpni_tx_shaping_cfg scfg; -+ -+ items = sscanf(buf, "%u %hu", &scfg.rate_limit, &scfg.max_burst_size); -+ if (items != 2) { -+ pr_err("Expected format: \"rate_limit(Mbps) max_burst_size(bytes)\"\n"); -+ return -EINVAL; -+ } -+ /* Size restriction as per MC API documentation */ -+ if (scfg.max_burst_size > 64000) { -+ pr_err("max_burst_size must be <= 64000, thanks.\n"); -+ return -EINVAL; -+ } -+ -+ err = dpni_set_tx_shaping(priv->mc_io, 0, priv->mc_token, &scfg); -+ if (err) { -+ dev_err(dev, "dpni_set_tx_shaping() failed\n"); -+ return -EPERM; -+ } -+ /* If successful, save the current configuration for future inquiries */ -+ priv->shaping_cfg = scfg; -+ -+ return count; -+} -+ -+static ssize_t dpaa2_eth_show_txconf_cpumask(struct device *dev, -+ struct device_attribute *attr, -+ char *buf) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev)); -+ -+ return cpumap_print_to_pagebuf(1, buf, &priv->txconf_cpumask); -+} -+ -+static ssize_t dpaa2_eth_write_txconf_cpumask(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, -+ size_t count) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev)); -+ struct dpaa2_eth_fq *fq; -+ bool running = netif_running(priv->net_dev); -+ int i, err; -+ -+ err = cpulist_parse(buf, &priv->txconf_cpumask); -+ if (err) -+ return err; -+ -+ /* Only accept CPUs that have an affine DPIO */ -+ if (!cpumask_subset(&priv->txconf_cpumask, &priv->dpio_cpumask)) { -+ netdev_info(priv->net_dev, -+ "cpumask must be a subset of 0x%lx\n", -+ *cpumask_bits(&priv->dpio_cpumask)); -+ cpumask_and(&priv->txconf_cpumask, &priv->dpio_cpumask, -+ &priv->txconf_cpumask); -+ } -+ -+ /* Rewiring the TxConf FQs requires interface shutdown. -+ */ -+ if (running) { -+ err = dpaa2_eth_stop(priv->net_dev); -+ if (err) -+ return -ENODEV; -+ } -+ -+ /* Set the new TxConf FQ affinities */ -+ set_fq_affinity(priv); -+ -+ /* dpaa2_eth_open() below will *stop* the Tx queues until an explicit -+ * link up notification is received. Give the polling thread enough time -+ * to detect the link state change, or else we'll end up with the -+ * transmission side forever shut down. -+ */ -+ if (priv->do_link_poll) -+ msleep(2 * DPAA2_ETH_LINK_STATE_REFRESH); -+ -+ for (i = 0; i < priv->num_fqs; i++) { -+ fq = &priv->fq[i]; -+ if (fq->type != DPAA2_TX_CONF_FQ) -+ continue; -+ setup_tx_flow(priv, fq); -+ } -+ -+ if (running) { -+ err = dpaa2_eth_open(priv->net_dev); -+ if (err) -+ return -ENODEV; -+ } -+ -+ return count; -+} -+ -+static struct device_attribute dpaa2_eth_attrs[] = { -+ __ATTR(txconf_cpumask, -+ S_IRUSR | S_IWUSR, -+ dpaa2_eth_show_txconf_cpumask, -+ dpaa2_eth_write_txconf_cpumask), -+ -+ __ATTR(tx_shaping, -+ S_IRUSR | S_IWUSR, -+ dpaa2_eth_show_tx_shaping, -+ dpaa2_eth_write_tx_shaping), -+}; -+ -+void dpaa2_eth_sysfs_init(struct device *dev) -+{ -+ int i, err; -+ -+ for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++) { -+ err = device_create_file(dev, &dpaa2_eth_attrs[i]); -+ if (err) { -+ dev_err(dev, "ERROR creating sysfs file\n"); -+ goto undo; -+ } -+ } -+ return; -+ -+undo: -+ while (i > 0) -+ device_remove_file(dev, &dpaa2_eth_attrs[--i]); -+} -+ -+void dpaa2_eth_sysfs_remove(struct device *dev) -+{ -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(dpaa2_eth_attrs); i++) -+ device_remove_file(dev, &dpaa2_eth_attrs[i]); -+} -+ -+static int dpaa2_eth_probe(struct fsl_mc_device *dpni_dev) -+{ -+ struct device *dev; -+ struct net_device *net_dev = NULL; -+ struct dpaa2_eth_priv *priv = NULL; -+ int err = 0; -+ -+ dev = &dpni_dev->dev; -+ -+ /* Net device */ -+ net_dev = alloc_etherdev_mq(sizeof(*priv), DPAA2_ETH_MAX_TX_QUEUES); -+ if (!net_dev) { -+ dev_err(dev, "alloc_etherdev_mq() failed\n"); -+ return -ENOMEM; -+ } -+ -+ SET_NETDEV_DEV(net_dev, dev); -+ dev_set_drvdata(dev, net_dev); -+ -+ priv = netdev_priv(net_dev); -+ priv->net_dev = net_dev; -+ -+ /* Obtain a MC portal */ -+ err = fsl_mc_portal_allocate(dpni_dev, FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, -+ &priv->mc_io); -+ if (err) { -+ dev_err(dev, "MC portal allocation failed\n"); -+ goto err_portal_alloc; -+ } -+ -+ /* MC objects initialization and configuration */ -+ err = setup_dpni(dpni_dev); -+ if (err) -+ goto err_dpni_setup; -+ -+ err = setup_dpio(priv); -+ if (err) -+ goto err_dpio_setup; -+ -+ setup_fqs(priv); -+ -+ err = setup_dpbp(priv); -+ if (err) -+ goto err_dpbp_setup; -+ -+ err = bind_dpni(priv); -+ if (err) -+ goto err_bind; -+ -+ /* Add a NAPI context for each channel */ -+ add_ch_napi(priv); -+ -+ /* Percpu statistics */ -+ priv->percpu_stats = alloc_percpu(*priv->percpu_stats); -+ if (!priv->percpu_stats) { -+ dev_err(dev, "alloc_percpu(percpu_stats) failed\n"); -+ err = -ENOMEM; -+ goto err_alloc_percpu_stats; -+ } -+ priv->percpu_extras = alloc_percpu(*priv->percpu_extras); -+ if (!priv->percpu_extras) { -+ dev_err(dev, "alloc_percpu(percpu_extras) failed\n"); -+ err = -ENOMEM; -+ goto err_alloc_percpu_extras; -+ } -+ -+ snprintf(net_dev->name, IFNAMSIZ, "ni%d", dpni_dev->obj_desc.id); -+ if (!dev_valid_name(net_dev->name)) { -+ dev_warn(&net_dev->dev, -+ "netdevice name \"%s\" cannot be used, reverting to default..\n", -+ net_dev->name); -+ dev_alloc_name(net_dev, "eth%d"); -+ dev_warn(&net_dev->dev, "using name \"%s\"\n", net_dev->name); -+ } -+ -+ err = netdev_init(net_dev); -+ if (err) -+ goto err_netdev_init; -+ -+ /* Configure checksum offload based on current interface flags */ -+ err = set_rx_csum(priv, !!(net_dev->features & NETIF_F_RXCSUM)); -+ if (err) -+ goto err_csum; -+ -+ err = set_tx_csum(priv, !!(net_dev->features & -+ (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM))); -+ if (err) -+ goto err_csum; -+ -+ err = alloc_rings(priv); -+ if (err) -+ goto err_alloc_rings; -+ -+ net_dev->ethtool_ops = &dpaa2_ethtool_ops; -+ -+ err = setup_irqs(dpni_dev); -+ if (err) { -+ netdev_warn(net_dev, "Failed to set link interrupt, fall back to polling\n"); -+ priv->poll_thread = kthread_run(poll_link_state, priv, -+ "%s_poll_link", net_dev->name); -+ if (IS_ERR(priv->poll_thread)) { -+ netdev_err(net_dev, "Error starting polling thread\n"); -+ goto err_poll_thread; -+ } -+ priv->do_link_poll = true; -+ } -+ -+ dpaa2_eth_sysfs_init(&net_dev->dev); -+ dpaa2_dbg_add(priv); -+ -+ dev_info(dev, "Probed interface %s\n", net_dev->name); -+ return 0; -+ -+err_poll_thread: -+ free_rings(priv); -+err_alloc_rings: -+err_csum: -+ unregister_netdev(net_dev); -+err_netdev_init: -+ free_percpu(priv->percpu_extras); -+err_alloc_percpu_extras: -+ free_percpu(priv->percpu_stats); -+err_alloc_percpu_stats: -+ del_ch_napi(priv); -+err_bind: -+ free_dpbp(priv); -+err_dpbp_setup: -+ free_dpio(priv); -+err_dpio_setup: -+ kfree(priv->cls_rule); -+ dpni_close(priv->mc_io, 0, priv->mc_token); -+err_dpni_setup: -+ fsl_mc_portal_free(priv->mc_io); -+err_portal_alloc: -+ dev_set_drvdata(dev, NULL); -+ free_netdev(net_dev); -+ -+ return err; -+} -+ -+static int dpaa2_eth_remove(struct fsl_mc_device *ls_dev) -+{ -+ struct device *dev; -+ struct net_device *net_dev; -+ struct dpaa2_eth_priv *priv; -+ -+ dev = &ls_dev->dev; -+ net_dev = dev_get_drvdata(dev); -+ priv = netdev_priv(net_dev); -+ -+ dpaa2_dbg_remove(priv); -+ dpaa2_eth_sysfs_remove(&net_dev->dev); -+ -+ unregister_netdev(net_dev); -+ dev_info(net_dev->dev.parent, "Removed interface %s\n", net_dev->name); -+ -+ free_dpio(priv); -+ free_rings(priv); -+ del_ch_napi(priv); -+ free_dpbp(priv); -+ free_dpni(priv); -+ -+ fsl_mc_portal_free(priv->mc_io); -+ -+ free_percpu(priv->percpu_stats); -+ free_percpu(priv->percpu_extras); -+ -+ if (priv->do_link_poll) -+ kthread_stop(priv->poll_thread); -+ else -+ fsl_mc_free_irqs(ls_dev); -+ -+ kfree(priv->cls_rule); -+ -+ dev_set_drvdata(dev, NULL); -+ free_netdev(net_dev); -+ -+ return 0; -+} -+ -+static const struct fsl_mc_device_match_id dpaa2_eth_match_id_table[] = { -+ { -+ .vendor = FSL_MC_VENDOR_FREESCALE, -+ .obj_type = "dpni", -+ .ver_major = DPNI_VER_MAJOR, -+ .ver_minor = DPNI_VER_MINOR -+ }, -+ { .vendor = 0x0 } -+}; -+ -+static struct fsl_mc_driver dpaa2_eth_driver = { -+ .driver = { -+ .name = KBUILD_MODNAME, -+ .owner = THIS_MODULE, -+ }, -+ .probe = dpaa2_eth_probe, -+ .remove = dpaa2_eth_remove, -+ .match_id_table = dpaa2_eth_match_id_table -+}; -+ -+static int __init dpaa2_eth_driver_init(void) -+{ -+ int err; -+ -+ dpaa2_eth_dbg_init(); -+ -+ err = fsl_mc_driver_register(&dpaa2_eth_driver); -+ if (err) { -+ dpaa2_eth_dbg_exit(); -+ return err; -+ } -+ -+ return 0; -+} -+ -+static void __exit dpaa2_eth_driver_exit(void) -+{ -+ fsl_mc_driver_unregister(&dpaa2_eth_driver); -+ dpaa2_eth_dbg_exit(); -+} -+ -+module_init(dpaa2_eth_driver_init); -+module_exit(dpaa2_eth_driver_exit); -diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h -new file mode 100644 -index 0000000..7274fbe ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h -@@ -0,0 +1,377 @@ -+/* Copyright 2014-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#ifndef __DPAA2_ETH_H -+#define __DPAA2_ETH_H -+ -+#include -+#include -+#include "../../fsl-mc/include/fsl_dpaa2_io.h" -+#include "../../fsl-mc/include/fsl_dpaa2_fd.h" -+#include "../../fsl-mc/include/dpbp.h" -+#include "../../fsl-mc/include/dpbp-cmd.h" -+#include "../../fsl-mc/include/dpcon.h" -+#include "../../fsl-mc/include/dpcon-cmd.h" -+#include "../../fsl-mc/include/dpmng.h" -+#include "dpni.h" -+#include "dpni-cmd.h" -+ -+#include "dpaa2-eth-trace.h" -+#include "dpaa2-eth-debugfs.h" -+ -+#define DPAA2_ETH_STORE_SIZE 16 -+ -+/* Maximum number of scatter-gather entries in an ingress frame, -+ * considering the maximum receive frame size is 64K -+ */ -+#define DPAA2_ETH_MAX_SG_ENTRIES ((64 * 1024) / DPAA2_ETH_RX_BUF_SIZE) -+ -+/* Maximum acceptable MTU value. It is in direct relation with the MC-enforced -+ * Max Frame Length (currently 10k). -+ */ -+#define DPAA2_ETH_MFL (10 * 1024) -+#define DPAA2_ETH_MAX_MTU (DPAA2_ETH_MFL - VLAN_ETH_HLEN) -+/* Convert L3 MTU to L2 MFL */ -+#define DPAA2_ETH_L2_MAX_FRM(mtu) (mtu + VLAN_ETH_HLEN) -+ -+/* Set the taildrop threshold (in bytes) to allow the enqueue of several jumbo -+ * frames in the Rx queues (length of the current frame is not -+ * taken into account when making the taildrop decision) -+ */ -+#define DPAA2_ETH_TAILDROP_THRESH (64 * 1024) -+ -+/* Buffer quota per queue. Must be large enough such that for minimum sized -+ * frames taildrop kicks in before the bpool gets depleted, so we compute -+ * how many 64B frames fit inside the taildrop threshold and add a margin -+ * to accommodate the buffer refill delay. -+ */ -+#define DPAA2_ETH_MAX_FRAMES_PER_QUEUE (DPAA2_ETH_TAILDROP_THRESH / 64) -+#define DPAA2_ETH_NUM_BUFS (DPAA2_ETH_MAX_FRAMES_PER_QUEUE + 256) -+#define DPAA2_ETH_REFILL_THRESH DPAA2_ETH_MAX_FRAMES_PER_QUEUE -+ -+/* Maximum number of buffers that can be acquired/released through a single -+ * QBMan command -+ */ -+#define DPAA2_ETH_BUFS_PER_CMD 7 -+ -+/* Hardware requires alignment for ingress/egress buffer addresses -+ * and ingress buffer lengths. -+ */ -+#define DPAA2_ETH_RX_BUF_SIZE 2048 -+#define DPAA2_ETH_TX_BUF_ALIGN 64 -+#define DPAA2_ETH_RX_BUF_ALIGN 256 -+#define DPAA2_ETH_NEEDED_HEADROOM(p_priv) \ -+ ((p_priv)->tx_data_offset + DPAA2_ETH_TX_BUF_ALIGN) -+ -+/* Hardware only sees DPAA2_ETH_RX_BUF_SIZE, but we need to allocate ingress -+ * buffers large enough to allow building an skb around them and also account -+ * for alignment restrictions -+ */ -+#define DPAA2_ETH_BUF_RAW_SIZE \ -+ (DPAA2_ETH_RX_BUF_SIZE + \ -+ SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + \ -+ DPAA2_ETH_RX_BUF_ALIGN) -+ -+/* PTP nominal frequency 1MHz */ -+#define DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS 1000 -+ -+/* We are accommodating a skb backpointer and some S/G info -+ * in the frame's software annotation. The hardware -+ * options are either 0 or 64, so we choose the latter. -+ */ -+#define DPAA2_ETH_SWA_SIZE 64 -+ -+/* Must keep this struct smaller than DPAA2_ETH_SWA_SIZE */ -+struct dpaa2_eth_swa { -+ struct sk_buff *skb; -+ struct scatterlist *scl; -+ int num_sg; -+ int num_dma_bufs; -+}; -+ -+/* Annotation valid bits in FD FRC */ -+#define DPAA2_FD_FRC_FASV 0x8000 -+#define DPAA2_FD_FRC_FAEADV 0x4000 -+#define DPAA2_FD_FRC_FAPRV 0x2000 -+#define DPAA2_FD_FRC_FAIADV 0x1000 -+#define DPAA2_FD_FRC_FASWOV 0x0800 -+#define DPAA2_FD_FRC_FAICFDV 0x0400 -+ -+/* Annotation bits in FD CTRL */ -+#define DPAA2_FD_CTRL_ASAL 0x00020000 /* ASAL = 128 */ -+#define DPAA2_FD_CTRL_PTA 0x00800000 -+#define DPAA2_FD_CTRL_PTV1 0x00400000 -+ -+/* Frame annotation status */ -+struct dpaa2_fas { -+ u8 reserved; -+ u8 ppid; -+ __le16 ifpid; -+ __le32 status; -+} __packed; -+ -+/* Error and status bits in the frame annotation status word */ -+/* Debug frame, otherwise supposed to be discarded */ -+#define DPAA2_FAS_DISC 0x80000000 -+/* MACSEC frame */ -+#define DPAA2_FAS_MS 0x40000000 -+#define DPAA2_FAS_PTP 0x08000000 -+/* Ethernet multicast frame */ -+#define DPAA2_FAS_MC 0x04000000 -+/* Ethernet broadcast frame */ -+#define DPAA2_FAS_BC 0x02000000 -+#define DPAA2_FAS_KSE 0x00040000 -+#define DPAA2_FAS_EOFHE 0x00020000 -+#define DPAA2_FAS_MNLE 0x00010000 -+#define DPAA2_FAS_TIDE 0x00008000 -+#define DPAA2_FAS_PIEE 0x00004000 -+/* Frame length error */ -+#define DPAA2_FAS_FLE 0x00002000 -+/* Frame physical error */ -+#define DPAA2_FAS_FPE 0x00001000 -+#define DPAA2_FAS_PTE 0x00000080 -+#define DPAA2_FAS_ISP 0x00000040 -+#define DPAA2_FAS_PHE 0x00000020 -+#define DPAA2_FAS_BLE 0x00000010 -+/* L3 csum validation performed */ -+#define DPAA2_FAS_L3CV 0x00000008 -+/* L3 csum error */ -+#define DPAA2_FAS_L3CE 0x00000004 -+/* L4 csum validation performed */ -+#define DPAA2_FAS_L4CV 0x00000002 -+/* L4 csum error */ -+#define DPAA2_FAS_L4CE 0x00000001 -+/* Possible errors on the ingress path */ -+#define DPAA2_ETH_RX_ERR_MASK (DPAA2_FAS_KSE | \ -+ DPAA2_FAS_EOFHE | \ -+ DPAA2_FAS_MNLE | \ -+ DPAA2_FAS_TIDE | \ -+ DPAA2_FAS_PIEE | \ -+ DPAA2_FAS_FLE | \ -+ DPAA2_FAS_FPE | \ -+ DPAA2_FAS_PTE | \ -+ DPAA2_FAS_ISP | \ -+ DPAA2_FAS_PHE | \ -+ DPAA2_FAS_BLE | \ -+ DPAA2_FAS_L3CE | \ -+ DPAA2_FAS_L4CE) -+/* Tx errors */ -+#define DPAA2_ETH_TXCONF_ERR_MASK (DPAA2_FAS_KSE | \ -+ DPAA2_FAS_EOFHE | \ -+ DPAA2_FAS_MNLE | \ -+ DPAA2_FAS_TIDE) -+ -+/* Time in milliseconds between link state updates */ -+#define DPAA2_ETH_LINK_STATE_REFRESH 1000 -+ -+/* Driver statistics, other than those in struct rtnl_link_stats64. -+ * These are usually collected per-CPU and aggregated by ethtool. -+ */ -+struct dpaa2_eth_drv_stats { -+ __u64 tx_conf_frames; -+ __u64 tx_conf_bytes; -+ __u64 tx_sg_frames; -+ __u64 tx_sg_bytes; -+ __u64 rx_sg_frames; -+ __u64 rx_sg_bytes; -+ /* Enqueues retried due to portal busy */ -+ __u64 tx_portal_busy; -+}; -+ -+/* Per-FQ statistics */ -+struct dpaa2_eth_fq_stats { -+ /* Number of frames received on this queue */ -+ __u64 frames; -+}; -+ -+/* Per-channel statistics */ -+struct dpaa2_eth_ch_stats { -+ /* Volatile dequeues retried due to portal busy */ -+ __u64 dequeue_portal_busy; -+ /* Number of CDANs; useful to estimate avg NAPI len */ -+ __u64 cdan; -+ /* Number of frames received on queues from this channel */ -+ __u64 frames; -+ /* Pull errors */ -+ __u64 pull_err; -+}; -+ -+/* Maximum number of queues associated with a DPNI */ -+#define DPAA2_ETH_MAX_RX_QUEUES 16 -+#define DPAA2_ETH_MAX_TX_QUEUES NR_CPUS -+#define DPAA2_ETH_MAX_RX_ERR_QUEUES 1 -+#define DPAA2_ETH_MAX_QUEUES (DPAA2_ETH_MAX_RX_QUEUES + \ -+ DPAA2_ETH_MAX_TX_QUEUES + \ -+ DPAA2_ETH_MAX_RX_ERR_QUEUES) -+ -+#define DPAA2_ETH_MAX_DPCONS NR_CPUS -+ -+enum dpaa2_eth_fq_type { -+ DPAA2_RX_FQ = 0, -+ DPAA2_TX_CONF_FQ, -+ DPAA2_RX_ERR_FQ -+}; -+ -+struct dpaa2_eth_priv; -+ -+struct dpaa2_eth_fq { -+ u32 fqid; -+ u16 flowid; -+ int target_cpu; -+ struct dpaa2_eth_channel *channel; -+ enum dpaa2_eth_fq_type type; -+ -+ void (*consume)(struct dpaa2_eth_priv *, -+ struct dpaa2_eth_channel *, -+ const struct dpaa2_fd *, -+ struct napi_struct *); -+ struct dpaa2_eth_fq_stats stats; -+}; -+ -+struct dpaa2_eth_channel { -+ struct dpaa2_io_notification_ctx nctx; -+ struct fsl_mc_device *dpcon; -+ int dpcon_id; -+ int ch_id; -+ int dpio_id; -+ struct napi_struct napi; -+ struct dpaa2_io_store *store; -+ struct dpaa2_eth_priv *priv; -+ int buf_count; -+ struct dpaa2_eth_ch_stats stats; -+}; -+ -+struct dpaa2_eth_cls_rule { -+ struct ethtool_rx_flow_spec fs; -+ bool in_use; -+}; -+ -+/* Driver private data */ -+struct dpaa2_eth_priv { -+ struct net_device *net_dev; -+ -+ u8 num_fqs; -+ struct dpaa2_eth_fq fq[DPAA2_ETH_MAX_QUEUES]; -+ -+ u8 num_channels; -+ struct dpaa2_eth_channel *channel[DPAA2_ETH_MAX_DPCONS]; -+ -+ int dpni_id; -+ struct dpni_attr dpni_attrs; -+ struct dpni_extended_cfg dpni_ext_cfg; -+ /* Insofar as the MC is concerned, we're using one layout on all 3 types -+ * of buffers (Rx, Tx, Tx-Conf). -+ */ -+ struct dpni_buffer_layout buf_layout; -+ u16 tx_data_offset; -+ -+ struct fsl_mc_device *dpbp_dev; -+ struct dpbp_attr dpbp_attrs; -+ -+ u16 tx_qdid; -+ struct fsl_mc_io *mc_io; -+ /* SysFS-controlled affinity mask for TxConf FQs */ -+ struct cpumask txconf_cpumask; -+ /* Cores which have an affine DPIO/DPCON. -+ * This is the cpu set on which Rx frames are processed; -+ * Tx confirmation frames are processed on a subset of this, -+ * depending on user settings. -+ */ -+ struct cpumask dpio_cpumask; -+ -+ /* Standard statistics */ -+ struct rtnl_link_stats64 __percpu *percpu_stats; -+ /* Extra stats, in addition to the ones known by the kernel */ -+ struct dpaa2_eth_drv_stats __percpu *percpu_extras; -+ -+ u16 mc_token; -+ -+ struct dpni_link_state link_state; -+ bool do_link_poll; -+ struct task_struct *poll_thread; -+ -+ /* enabled ethtool hashing bits */ -+ u64 rx_hash_fields; -+ -+#ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS -+ struct dpaa2_debugfs dbg; -+#endif -+ -+ /* array of classification rules */ -+ struct dpaa2_eth_cls_rule *cls_rule; -+ -+ struct dpni_tx_shaping_cfg shaping_cfg; -+ -+ bool ts_tx_en; /* Tx timestamping enabled */ -+ bool ts_rx_en; /* Rx timestamping enabled */ -+}; -+ -+/* default Rx hash options, set during probing */ -+#define DPAA2_RXH_SUPPORTED (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \ -+ | RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 \ -+ | RXH_L4_B_2_3) -+ -+#define dpaa2_eth_hash_enabled(priv) \ -+ ((priv)->dpni_attrs.options & DPNI_OPT_DIST_HASH) -+ -+#define dpaa2_eth_fs_enabled(priv) \ -+ ((priv)->dpni_attrs.options & DPNI_OPT_DIST_FS) -+ -+#define DPAA2_CLASSIFIER_ENTRY_COUNT 16 -+ -+/* Required by struct dpni_attr::ext_cfg_iova */ -+#define DPAA2_EXT_CFG_SIZE 256 -+ -+extern const struct ethtool_ops dpaa2_ethtool_ops; -+ -+int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags); -+ -+static int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv) -+{ -+ if (!dpaa2_eth_hash_enabled(priv)) -+ return 1; -+ -+ return priv->dpni_ext_cfg.tc_cfg[0].max_dist; -+} -+ -+static inline int dpaa2_eth_max_channels(struct dpaa2_eth_priv *priv) -+{ -+ /* Ideally, we want a number of channels large enough -+ * to accommodate both the Rx distribution size -+ * and the max number of Tx confirmation queues -+ */ -+ return max_t(int, dpaa2_eth_queue_count(priv), -+ priv->dpni_attrs.max_senders); -+} -+ -+void check_fs_support(struct net_device *); -+ -+#endif /* __DPAA2_H */ -diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c -new file mode 100644 -index 0000000..fdab07f ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c -@@ -0,0 +1,861 @@ -+/* Copyright 2014-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include "dpni.h" /* DPNI_LINK_OPT_* */ -+#include "dpaa2-eth.h" -+ -+/* size of DMA memory used to pass configuration to classifier, in bytes */ -+#define DPAA2_CLASSIFIER_DMA_SIZE 256 -+ -+/* To be kept in sync with 'enum dpni_counter' */ -+char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = { -+ "rx frames", -+ "rx bytes", -+ /* rx frames filtered/policed */ -+ "rx filtered frames", -+ /* rx frames dropped with errors */ -+ "rx discarded frames", -+ "rx mcast frames", -+ "rx mcast bytes", -+ "rx bcast frames", -+ "rx bcast bytes", -+ "tx frames", -+ "tx bytes", -+ /* tx frames dropped with errors */ -+ "tx discarded frames", -+}; -+ -+#define DPAA2_ETH_NUM_STATS ARRAY_SIZE(dpaa2_ethtool_stats) -+ -+/* To be kept in sync with 'struct dpaa2_eth_drv_stats' */ -+char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = { -+ /* per-cpu stats */ -+ -+ "tx conf frames", -+ "tx conf bytes", -+ "tx sg frames", -+ "tx sg bytes", -+ "rx sg frames", -+ "rx sg bytes", -+ /* how many times we had to retry the enqueue command */ -+ "enqueue portal busy", -+ -+ /* Channel stats */ -+ /* How many times we had to retry the volatile dequeue command */ -+ "dequeue portal busy", -+ "channel pull errors", -+ /* Number of notifications received */ -+ "cdan", -+#ifdef CONFIG_FSL_QBMAN_DEBUG -+ /* FQ stats */ -+ "rx pending frames", -+ "rx pending bytes", -+ "tx conf pending frames", -+ "tx conf pending bytes", -+ "buffer count" -+#endif -+}; -+ -+#define DPAA2_ETH_NUM_EXTRA_STATS ARRAY_SIZE(dpaa2_ethtool_extras) -+ -+static void dpaa2_eth_get_drvinfo(struct net_device *net_dev, -+ struct ethtool_drvinfo *drvinfo) -+{ -+ struct mc_version mc_ver; -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ char fw_version[ETHTOOL_FWVERS_LEN]; -+ char version[32]; -+ int err; -+ -+ err = mc_get_version(priv->mc_io, 0, &mc_ver); -+ if (err) { -+ strlcpy(drvinfo->fw_version, "Error retrieving MC version", -+ sizeof(drvinfo->fw_version)); -+ } else { -+ scnprintf(fw_version, sizeof(fw_version), "%d.%d.%d", -+ mc_ver.major, mc_ver.minor, mc_ver.revision); -+ strlcpy(drvinfo->fw_version, fw_version, -+ sizeof(drvinfo->fw_version)); -+ } -+ -+ scnprintf(version, sizeof(version), "%d.%d", DPNI_VER_MAJOR, -+ DPNI_VER_MINOR); -+ strlcpy(drvinfo->version, version, sizeof(drvinfo->version)); -+ -+ strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); -+ strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent), -+ sizeof(drvinfo->bus_info)); -+} -+ -+static int dpaa2_eth_get_settings(struct net_device *net_dev, -+ struct ethtool_cmd *cmd) -+{ -+ struct dpni_link_state state = {0}; -+ int err = 0; -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ -+ err = dpni_get_link_state(priv->mc_io, 0, priv->mc_token, &state); -+ if (err) { -+ netdev_err(net_dev, "ERROR %d getting link state", err); -+ goto out; -+ } -+ -+ /* At the moment, we have no way of interrogating the DPMAC -+ * from the DPNI side - and for that matter there may exist -+ * no DPMAC at all. So for now we just don't report anything -+ * beyond the DPNI attributes. -+ */ -+ if (state.options & DPNI_LINK_OPT_AUTONEG) -+ cmd->autoneg = AUTONEG_ENABLE; -+ if (!(state.options & DPNI_LINK_OPT_HALF_DUPLEX)) -+ cmd->duplex = DUPLEX_FULL; -+ ethtool_cmd_speed_set(cmd, state.rate); -+ -+out: -+ return err; -+} -+ -+static int dpaa2_eth_set_settings(struct net_device *net_dev, -+ struct ethtool_cmd *cmd) -+{ -+ struct dpni_link_cfg cfg = {0}; -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ int err = 0; -+ -+ netdev_dbg(net_dev, "Setting link parameters..."); -+ -+ /* Due to a temporary firmware limitation, the DPNI must be down -+ * in order to be able to change link settings. Taking steps to let -+ * the user know that. -+ */ -+ if (netif_running(net_dev)) { -+ netdev_info(net_dev, "Sorry, interface must be brought down first.\n"); -+ return -EACCES; -+ } -+ -+ cfg.rate = ethtool_cmd_speed(cmd); -+ if (cmd->autoneg == AUTONEG_ENABLE) -+ cfg.options |= DPNI_LINK_OPT_AUTONEG; -+ else -+ cfg.options &= ~DPNI_LINK_OPT_AUTONEG; -+ if (cmd->duplex == DUPLEX_HALF) -+ cfg.options |= DPNI_LINK_OPT_HALF_DUPLEX; -+ else -+ cfg.options &= ~DPNI_LINK_OPT_HALF_DUPLEX; -+ -+ err = dpni_set_link_cfg(priv->mc_io, 0, priv->mc_token, &cfg); -+ if (err) -+ /* ethtool will be loud enough if we return an error; no point -+ * in putting our own error message on the console by default -+ */ -+ netdev_dbg(net_dev, "ERROR %d setting link cfg", err); -+ -+ return err; -+} -+ -+static void dpaa2_eth_get_strings(struct net_device *netdev, u32 stringset, -+ u8 *data) -+{ -+ u8 *p = data; -+ int i; -+ -+ switch (stringset) { -+ case ETH_SS_STATS: -+ for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) { -+ strlcpy(p, dpaa2_ethtool_stats[i], ETH_GSTRING_LEN); -+ p += ETH_GSTRING_LEN; -+ } -+ for (i = 0; i < DPAA2_ETH_NUM_EXTRA_STATS; i++) { -+ strlcpy(p, dpaa2_ethtool_extras[i], ETH_GSTRING_LEN); -+ p += ETH_GSTRING_LEN; -+ } -+ break; -+ } -+} -+ -+static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset) -+{ -+ switch (sset) { -+ case ETH_SS_STATS: /* ethtool_get_stats(), ethtool_get_drvinfo() */ -+ return DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS; -+ default: -+ return -EOPNOTSUPP; -+ } -+} -+ -+/** Fill in hardware counters, as returned by the MC firmware. -+ */ -+static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev, -+ struct ethtool_stats *stats, -+ u64 *data) -+{ -+ int i; /* Current index in the data array */ -+ int j, k, err; -+ -+#ifdef CONFIG_FSL_QBMAN_DEBUG -+ u32 fcnt, bcnt; -+ u32 fcnt_rx_total = 0, fcnt_tx_total = 0; -+ u32 bcnt_rx_total = 0, bcnt_tx_total = 0; -+ u32 buf_cnt; -+#endif -+ u64 cdan = 0; -+ u64 portal_busy = 0, pull_err = 0; -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ struct dpaa2_eth_drv_stats *extras; -+ struct dpaa2_eth_ch_stats *ch_stats; -+ -+ memset(data, 0, -+ sizeof(u64) * (DPAA2_ETH_NUM_STATS + DPAA2_ETH_NUM_EXTRA_STATS)); -+ -+ /* Print standard counters, from DPNI statistics */ -+ for (i = 0; i < DPAA2_ETH_NUM_STATS; i++) { -+ err = dpni_get_counter(priv->mc_io, 0, priv->mc_token, i, -+ data + i); -+ if (err != 0) -+ netdev_warn(net_dev, "Err %d getting DPNI counter %d", -+ err, i); -+ } -+ -+ /* Print per-cpu extra stats */ -+ for_each_online_cpu(k) { -+ extras = per_cpu_ptr(priv->percpu_extras, k); -+ for (j = 0; j < sizeof(*extras) / sizeof(__u64); j++) -+ *((__u64 *)data + i + j) += *((__u64 *)extras + j); -+ } -+ i += j; -+ -+ /* We may be using fewer DPIOs than actual CPUs */ -+ for_each_cpu(j, &priv->dpio_cpumask) { -+ ch_stats = &priv->channel[j]->stats; -+ cdan += ch_stats->cdan; -+ portal_busy += ch_stats->dequeue_portal_busy; -+ pull_err += ch_stats->pull_err; -+ } -+ -+ *(data + i++) = portal_busy; -+ *(data + i++) = pull_err; -+ *(data + i++) = cdan; -+ -+#ifdef CONFIG_FSL_QBMAN_DEBUG -+ for (j = 0; j < priv->num_fqs; j++) { -+ /* Print FQ instantaneous counts */ -+ err = dpaa2_io_query_fq_count(NULL, priv->fq[j].fqid, -+ &fcnt, &bcnt); -+ if (err) { -+ netdev_warn(net_dev, "FQ query error %d", err); -+ return; -+ } -+ -+ if (priv->fq[j].type == DPAA2_TX_CONF_FQ) { -+ fcnt_tx_total += fcnt; -+ bcnt_tx_total += bcnt; -+ } else { -+ fcnt_rx_total += fcnt; -+ bcnt_rx_total += bcnt; -+ } -+ } -+ *(data + i++) = fcnt_rx_total; -+ *(data + i++) = bcnt_rx_total; -+ *(data + i++) = fcnt_tx_total; -+ *(data + i++) = bcnt_tx_total; -+ -+ err = dpaa2_io_query_bp_count(NULL, priv->dpbp_attrs.bpid, &buf_cnt); -+ if (err) { -+ netdev_warn(net_dev, "Buffer count query error %d\n", err); -+ return; -+ } -+ *(data + i++) = buf_cnt; -+#endif -+} -+ -+static const struct dpaa2_eth_hash_fields { -+ u64 rxnfc_field; -+ enum net_prot cls_prot; -+ int cls_field; -+ int size; -+} hash_fields[] = { -+ { -+ /* L2 header */ -+ .rxnfc_field = RXH_L2DA, -+ .cls_prot = NET_PROT_ETH, -+ .cls_field = NH_FLD_ETH_DA, -+ .size = 6, -+ }, { -+ /* VLAN header */ -+ .rxnfc_field = RXH_VLAN, -+ .cls_prot = NET_PROT_VLAN, -+ .cls_field = NH_FLD_VLAN_TCI, -+ .size = 2, -+ }, { -+ /* IP header */ -+ .rxnfc_field = RXH_IP_SRC, -+ .cls_prot = NET_PROT_IP, -+ .cls_field = NH_FLD_IP_SRC, -+ .size = 4, -+ }, { -+ .rxnfc_field = RXH_IP_DST, -+ .cls_prot = NET_PROT_IP, -+ .cls_field = NH_FLD_IP_DST, -+ .size = 4, -+ }, { -+ .rxnfc_field = RXH_L3_PROTO, -+ .cls_prot = NET_PROT_IP, -+ .cls_field = NH_FLD_IP_PROTO, -+ .size = 1, -+ }, { -+ /* Using UDP ports, this is functionally equivalent to raw -+ * byte pairs from L4 header. -+ */ -+ .rxnfc_field = RXH_L4_B_0_1, -+ .cls_prot = NET_PROT_UDP, -+ .cls_field = NH_FLD_UDP_PORT_SRC, -+ .size = 2, -+ }, { -+ .rxnfc_field = RXH_L4_B_2_3, -+ .cls_prot = NET_PROT_UDP, -+ .cls_field = NH_FLD_UDP_PORT_DST, -+ .size = 2, -+ }, -+}; -+ -+static int cls_is_enabled(struct net_device *net_dev, u64 flag) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ -+ return !!(priv->rx_hash_fields & flag); -+} -+ -+static int cls_key_off(struct net_device *net_dev, u64 flag) -+{ -+ int i, off = 0; -+ -+ for (i = 0; i < ARRAY_SIZE(hash_fields); i++) { -+ if (hash_fields[i].rxnfc_field & flag) -+ return off; -+ if (cls_is_enabled(net_dev, hash_fields[i].rxnfc_field)) -+ off += hash_fields[i].size; -+ } -+ -+ return -1; -+} -+ -+static u8 cls_key_size(struct net_device *net_dev) -+{ -+ u8 i, size = 0; -+ -+ for (i = 0; i < ARRAY_SIZE(hash_fields); i++) { -+ if (!cls_is_enabled(net_dev, hash_fields[i].rxnfc_field)) -+ continue; -+ size += hash_fields[i].size; -+ } -+ -+ return size; -+} -+ -+static u8 cls_max_key_size(struct net_device *net_dev) -+{ -+ u8 i, size = 0; -+ -+ for (i = 0; i < ARRAY_SIZE(hash_fields); i++) -+ size += hash_fields[i].size; -+ -+ return size; -+} -+ -+void check_fs_support(struct net_device *net_dev) -+{ -+ u8 key_size = cls_max_key_size(net_dev); -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ -+ if (priv->dpni_attrs.options & DPNI_OPT_DIST_FS && -+ priv->dpni_attrs.max_dist_key_size < key_size) { -+ dev_err(&net_dev->dev, -+ "max_dist_key_size = %d, expected %d. Steering is disabled\n", -+ priv->dpni_attrs.max_dist_key_size, -+ key_size); -+ priv->dpni_attrs.options &= ~DPNI_OPT_DIST_FS; -+ } -+} -+ -+/* Set RX hash options -+ * flags is a combination of RXH_ bits -+ */ -+int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags) -+{ -+ struct device *dev = net_dev->dev.parent; -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ struct dpkg_profile_cfg cls_cfg; -+ struct dpni_rx_tc_dist_cfg dist_cfg; -+ u8 *dma_mem; -+ u64 enabled_flags = 0; -+ int i; -+ int err = 0; -+ -+ if (!dpaa2_eth_hash_enabled(priv)) { -+ dev_err(dev, "Hashing support is not enabled\n"); -+ return -EOPNOTSUPP; -+ } -+ -+ if (flags & ~DPAA2_RXH_SUPPORTED) { -+ /* RXH_DISCARD is not supported */ -+ dev_err(dev, "unsupported option selected, supported options are: mvtsdfn\n"); -+ return -EOPNOTSUPP; -+ } -+ -+ memset(&cls_cfg, 0, sizeof(cls_cfg)); -+ -+ for (i = 0; i < ARRAY_SIZE(hash_fields); i++) { -+ struct dpkg_extract *key = -+ &cls_cfg.extracts[cls_cfg.num_extracts]; -+ -+ if (!(flags & hash_fields[i].rxnfc_field)) -+ continue; -+ -+ if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) { -+ dev_err(dev, "error adding key extraction rule, too many rules?\n"); -+ return -E2BIG; -+ } -+ -+ key->type = DPKG_EXTRACT_FROM_HDR; -+ key->extract.from_hdr.prot = hash_fields[i].cls_prot; -+ key->extract.from_hdr.type = DPKG_FULL_FIELD; -+ key->extract.from_hdr.field = hash_fields[i].cls_field; -+ cls_cfg.num_extracts++; -+ -+ enabled_flags |= hash_fields[i].rxnfc_field; -+ } -+ -+ dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_DMA | GFP_KERNEL); -+ if (!dma_mem) -+ return -ENOMEM; -+ -+ err = dpni_prepare_key_cfg(&cls_cfg, dma_mem); -+ if (err) { -+ dev_err(dev, "dpni_prepare_key_cfg error %d", err); -+ return err; -+ } -+ -+ memset(&dist_cfg, 0, sizeof(dist_cfg)); -+ -+ /* Prepare for setting the rx dist */ -+ dist_cfg.key_cfg_iova = dma_map_single(net_dev->dev.parent, dma_mem, -+ DPAA2_CLASSIFIER_DMA_SIZE, -+ DMA_TO_DEVICE); -+ if (dma_mapping_error(net_dev->dev.parent, dist_cfg.key_cfg_iova)) { -+ dev_err(dev, "DMA mapping failed\n"); -+ kfree(dma_mem); -+ return -ENOMEM; -+ } -+ -+ dist_cfg.dist_size = dpaa2_eth_queue_count(priv); -+ if (dpaa2_eth_fs_enabled(priv)) { -+ dist_cfg.dist_mode = DPNI_DIST_MODE_FS; -+ dist_cfg.fs_cfg.miss_action = DPNI_FS_MISS_HASH; -+ } else { -+ dist_cfg.dist_mode = DPNI_DIST_MODE_HASH; -+ } -+ -+ err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg); -+ dma_unmap_single(net_dev->dev.parent, dist_cfg.key_cfg_iova, -+ DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE); -+ kfree(dma_mem); -+ if (err) { -+ dev_err(dev, "dpni_set_rx_tc_dist() error %d\n", err); -+ return err; -+ } -+ -+ priv->rx_hash_fields = enabled_flags; -+ -+ return 0; -+} -+ -+static int prep_cls_rule(struct net_device *net_dev, -+ struct ethtool_rx_flow_spec *fs, -+ void *key) -+{ -+ struct ethtool_tcpip4_spec *l4ip4_h, *l4ip4_m; -+ struct ethhdr *eth_h, *eth_m; -+ struct ethtool_flow_ext *ext_h, *ext_m; -+ const u8 key_size = cls_key_size(net_dev); -+ void *msk = key + key_size; -+ -+ memset(key, 0, key_size * 2); -+ -+ /* This code is a major mess, it has to be cleaned up after the -+ * classification mask issue is fixed and key format will be made static -+ */ -+ -+ switch (fs->flow_type & 0xff) { -+ case TCP_V4_FLOW: -+ l4ip4_h = &fs->h_u.tcp_ip4_spec; -+ l4ip4_m = &fs->m_u.tcp_ip4_spec; -+ /* TODO: ethertype to match IPv4 and protocol to match TCP */ -+ goto l4ip4; -+ -+ case UDP_V4_FLOW: -+ l4ip4_h = &fs->h_u.udp_ip4_spec; -+ l4ip4_m = &fs->m_u.udp_ip4_spec; -+ goto l4ip4; -+ -+ case SCTP_V4_FLOW: -+ l4ip4_h = &fs->h_u.sctp_ip4_spec; -+ l4ip4_m = &fs->m_u.sctp_ip4_spec; -+ -+l4ip4: -+ if (l4ip4_m->tos) { -+ netdev_err(net_dev, -+ "ToS is not supported for IPv4 L4\n"); -+ return -EOPNOTSUPP; -+ } -+ if (l4ip4_m->ip4src && !cls_is_enabled(net_dev, RXH_IP_SRC)) { -+ netdev_err(net_dev, "IP SRC not supported!\n"); -+ return -EOPNOTSUPP; -+ } -+ if (l4ip4_m->ip4dst && !cls_is_enabled(net_dev, RXH_IP_DST)) { -+ netdev_err(net_dev, "IP DST not supported!\n"); -+ return -EOPNOTSUPP; -+ } -+ if (l4ip4_m->psrc && !cls_is_enabled(net_dev, RXH_L4_B_0_1)) { -+ netdev_err(net_dev, "PSRC not supported, ignored\n"); -+ return -EOPNOTSUPP; -+ } -+ if (l4ip4_m->pdst && !cls_is_enabled(net_dev, RXH_L4_B_2_3)) { -+ netdev_err(net_dev, "PDST not supported, ignored\n"); -+ return -EOPNOTSUPP; -+ } -+ -+ if (cls_is_enabled(net_dev, RXH_IP_SRC)) { -+ *(u32 *)(key + cls_key_off(net_dev, RXH_IP_SRC)) -+ = l4ip4_h->ip4src; -+ *(u32 *)(msk + cls_key_off(net_dev, RXH_IP_SRC)) -+ = l4ip4_m->ip4src; -+ } -+ if (cls_is_enabled(net_dev, RXH_IP_DST)) { -+ *(u32 *)(key + cls_key_off(net_dev, RXH_IP_DST)) -+ = l4ip4_h->ip4dst; -+ *(u32 *)(msk + cls_key_off(net_dev, RXH_IP_DST)) -+ = l4ip4_m->ip4dst; -+ } -+ -+ if (cls_is_enabled(net_dev, RXH_L4_B_0_1)) { -+ *(u32 *)(key + cls_key_off(net_dev, RXH_L4_B_0_1)) -+ = l4ip4_h->psrc; -+ *(u32 *)(msk + cls_key_off(net_dev, RXH_L4_B_0_1)) -+ = l4ip4_m->psrc; -+ } -+ -+ if (cls_is_enabled(net_dev, RXH_L4_B_2_3)) { -+ *(u32 *)(key + cls_key_off(net_dev, RXH_L4_B_2_3)) -+ = l4ip4_h->pdst; -+ *(u32 *)(msk + cls_key_off(net_dev, RXH_L4_B_2_3)) -+ = l4ip4_m->pdst; -+ } -+ break; -+ -+ case ETHER_FLOW: -+ eth_h = &fs->h_u.ether_spec; -+ eth_m = &fs->m_u.ether_spec; -+ -+ if (eth_m->h_proto) { -+ netdev_err(net_dev, "Ethertype is not supported!\n"); -+ return -EOPNOTSUPP; -+ } -+ -+ if (!is_zero_ether_addr(eth_m->h_source)) { -+ netdev_err(net_dev, "ETH SRC is not supported!\n"); -+ return -EOPNOTSUPP; -+ } -+ -+ if (cls_is_enabled(net_dev, RXH_L2DA)) { -+ ether_addr_copy(key + cls_key_off(net_dev, RXH_L2DA), -+ eth_h->h_dest); -+ ether_addr_copy(msk + cls_key_off(net_dev, RXH_L2DA), -+ eth_m->h_dest); -+ } else { -+ if (!is_zero_ether_addr(eth_m->h_dest)) { -+ netdev_err(net_dev, -+ "ETH DST is not supported!\n"); -+ return -EOPNOTSUPP; -+ } -+ } -+ break; -+ -+ default: -+ /* TODO: IP user flow, AH, ESP */ -+ return -EOPNOTSUPP; -+ } -+ -+ if (fs->flow_type & FLOW_EXT) { -+ /* TODO: ETH data, VLAN ethertype, VLAN TCI .. */ -+ return -EOPNOTSUPP; -+ } -+ -+ if (fs->flow_type & FLOW_MAC_EXT) { -+ ext_h = &fs->h_ext; -+ ext_m = &fs->m_ext; -+ -+ if (cls_is_enabled(net_dev, RXH_L2DA)) { -+ ether_addr_copy(key + cls_key_off(net_dev, RXH_L2DA), -+ ext_h->h_dest); -+ ether_addr_copy(msk + cls_key_off(net_dev, RXH_L2DA), -+ ext_m->h_dest); -+ } else { -+ if (!is_zero_ether_addr(ext_m->h_dest)) { -+ netdev_err(net_dev, -+ "ETH DST is not supported!\n"); -+ return -EOPNOTSUPP; -+ } -+ } -+ } -+ return 0; -+} -+ -+static int do_cls(struct net_device *net_dev, -+ struct ethtool_rx_flow_spec *fs, -+ bool add) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ const int rule_cnt = DPAA2_CLASSIFIER_ENTRY_COUNT; -+ struct dpni_rule_cfg rule_cfg; -+ void *dma_mem; -+ int err = 0; -+ -+ if (!dpaa2_eth_fs_enabled(priv)) { -+ netdev_err(net_dev, "dev does not support steering!\n"); -+ /* dev doesn't support steering */ -+ return -EOPNOTSUPP; -+ } -+ -+ if ((fs->ring_cookie != RX_CLS_FLOW_DISC && -+ fs->ring_cookie >= dpaa2_eth_queue_count(priv)) || -+ fs->location >= rule_cnt) -+ return -EINVAL; -+ -+ memset(&rule_cfg, 0, sizeof(rule_cfg)); -+ rule_cfg.key_size = cls_key_size(net_dev); -+ -+ /* allocate twice the key size, for the actual key and for mask */ -+ dma_mem = kzalloc(rule_cfg.key_size * 2, GFP_DMA | GFP_KERNEL); -+ if (!dma_mem) -+ return -ENOMEM; -+ -+ err = prep_cls_rule(net_dev, fs, dma_mem); -+ if (err) -+ goto err_free_mem; -+ -+ rule_cfg.key_iova = dma_map_single(net_dev->dev.parent, dma_mem, -+ rule_cfg.key_size * 2, -+ DMA_TO_DEVICE); -+ -+ rule_cfg.mask_iova = rule_cfg.key_iova + rule_cfg.key_size; -+ -+ if (!(priv->dpni_attrs.options & DPNI_OPT_FS_MASK_SUPPORT)) { -+ int i; -+ u8 *mask = dma_mem + rule_cfg.key_size; -+ -+ /* check that nothing is masked out, otherwise it won't work */ -+ for (i = 0; i < rule_cfg.key_size; i++) { -+ if (mask[i] == 0xff) -+ continue; -+ netdev_err(net_dev, "dev does not support masking!\n"); -+ err = -EOPNOTSUPP; -+ goto err_free_mem; -+ } -+ rule_cfg.mask_iova = 0; -+ } -+ -+ /* No way to control rule order in firmware */ -+ if (add) -+ err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token, 0, -+ &rule_cfg, (u16)fs->ring_cookie); -+ else -+ err = dpni_remove_fs_entry(priv->mc_io, 0, priv->mc_token, 0, -+ &rule_cfg); -+ -+ dma_unmap_single(net_dev->dev.parent, rule_cfg.key_iova, -+ rule_cfg.key_size * 2, DMA_TO_DEVICE); -+ if (err) { -+ netdev_err(net_dev, "dpaa2_add_cls() error %d\n", err); -+ goto err_free_mem; -+ } -+ -+ priv->cls_rule[fs->location].fs = *fs; -+ priv->cls_rule[fs->location].in_use = true; -+ -+err_free_mem: -+ kfree(dma_mem); -+ -+ return err; -+} -+ -+static int add_cls(struct net_device *net_dev, -+ struct ethtool_rx_flow_spec *fs) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ int err; -+ -+ err = do_cls(net_dev, fs, true); -+ if (err) -+ return err; -+ -+ priv->cls_rule[fs->location].in_use = true; -+ priv->cls_rule[fs->location].fs = *fs; -+ -+ return 0; -+} -+ -+static int del_cls(struct net_device *net_dev, int location) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ int err; -+ -+ err = do_cls(net_dev, &priv->cls_rule[location].fs, false); -+ if (err) -+ return err; -+ -+ priv->cls_rule[location].in_use = false; -+ -+ return 0; -+} -+ -+static void clear_cls(struct net_device *net_dev) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ int i, err; -+ -+ for (i = 0; i < DPAA2_CLASSIFIER_ENTRY_COUNT; i++) { -+ if (!priv->cls_rule[i].in_use) -+ continue; -+ -+ err = del_cls(net_dev, i); -+ if (err) -+ netdev_warn(net_dev, -+ "err trying to delete classification entry %d\n", -+ i); -+ } -+} -+ -+static int dpaa2_eth_set_rxnfc(struct net_device *net_dev, -+ struct ethtool_rxnfc *rxnfc) -+{ -+ int err = 0; -+ -+ switch (rxnfc->cmd) { -+ case ETHTOOL_SRXFH: -+ /* first off clear ALL classification rules, chaging key -+ * composition will break them anyway -+ */ -+ clear_cls(net_dev); -+ /* we purposely ignore cmd->flow_type for now, because the -+ * classifier only supports a single set of fields for all -+ * protocols -+ */ -+ err = dpaa2_eth_set_hash(net_dev, rxnfc->data); -+ break; -+ case ETHTOOL_SRXCLSRLINS: -+ err = add_cls(net_dev, &rxnfc->fs); -+ break; -+ -+ case ETHTOOL_SRXCLSRLDEL: -+ err = del_cls(net_dev, rxnfc->fs.location); -+ break; -+ -+ default: -+ err = -EOPNOTSUPP; -+ } -+ -+ return err; -+} -+ -+static int dpaa2_eth_get_rxnfc(struct net_device *net_dev, -+ struct ethtool_rxnfc *rxnfc, u32 *rule_locs) -+{ -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ const int rule_cnt = DPAA2_CLASSIFIER_ENTRY_COUNT; -+ int i, j; -+ -+ switch (rxnfc->cmd) { -+ case ETHTOOL_GRXFH: -+ /* we purposely ignore cmd->flow_type for now, because the -+ * classifier only supports a single set of fields for all -+ * protocols -+ */ -+ rxnfc->data = priv->rx_hash_fields; -+ break; -+ -+ case ETHTOOL_GRXRINGS: -+ rxnfc->data = dpaa2_eth_queue_count(priv); -+ break; -+ -+ case ETHTOOL_GRXCLSRLCNT: -+ for (i = 0, rxnfc->rule_cnt = 0; i < rule_cnt; i++) -+ if (priv->cls_rule[i].in_use) -+ rxnfc->rule_cnt++; -+ rxnfc->data = rule_cnt; -+ break; -+ -+ case ETHTOOL_GRXCLSRULE: -+ if (!priv->cls_rule[rxnfc->fs.location].in_use) -+ return -EINVAL; -+ -+ rxnfc->fs = priv->cls_rule[rxnfc->fs.location].fs; -+ break; -+ -+ case ETHTOOL_GRXCLSRLALL: -+ for (i = 0, j = 0; i < rule_cnt; i++) { -+ if (!priv->cls_rule[i].in_use) -+ continue; -+ if (j == rxnfc->rule_cnt) -+ return -EMSGSIZE; -+ rule_locs[j++] = i; -+ } -+ rxnfc->rule_cnt = j; -+ rxnfc->data = rule_cnt; -+ break; -+ -+ default: -+ return -EOPNOTSUPP; -+ } -+ -+ return 0; -+} -+ -+const struct ethtool_ops dpaa2_ethtool_ops = { -+ .get_drvinfo = dpaa2_eth_get_drvinfo, -+ .get_link = ethtool_op_get_link, -+ .get_settings = dpaa2_eth_get_settings, -+ .set_settings = dpaa2_eth_set_settings, -+ .get_sset_count = dpaa2_eth_get_sset_count, -+ .get_ethtool_stats = dpaa2_eth_get_ethtool_stats, -+ .get_strings = dpaa2_eth_get_strings, -+ .get_rxnfc = dpaa2_eth_get_rxnfc, -+ .set_rxnfc = dpaa2_eth_set_rxnfc, -+}; -diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpkg.h b/drivers/staging/fsl-dpaa2/ethernet/dpkg.h -new file mode 100644 -index 0000000..92ec12b ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/ethernet/dpkg.h -@@ -0,0 +1,175 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPKG_H_ -+#define __FSL_DPKG_H_ -+ -+#include -+#include "../../fsl-mc/include/net.h" -+ -+/* Data Path Key Generator API -+ * Contains initialization APIs and runtime APIs for the Key Generator -+ */ -+ -+/** Key Generator properties */ -+ -+/** -+ * Number of masks per key extraction -+ */ -+#define DPKG_NUM_OF_MASKS 4 -+/** -+ * Number of extractions per key profile -+ */ -+#define DPKG_MAX_NUM_OF_EXTRACTS 10 -+ -+/** -+ * enum dpkg_extract_from_hdr_type - Selecting extraction by header types -+ * @DPKG_FROM_HDR: Extract selected bytes from header, by offset -+ * @DPKG_FROM_FIELD: Extract selected bytes from header, by offset from field -+ * @DPKG_FULL_FIELD: Extract a full field -+ */ -+enum dpkg_extract_from_hdr_type { -+ DPKG_FROM_HDR = 0, -+ DPKG_FROM_FIELD = 1, -+ DPKG_FULL_FIELD = 2 -+}; -+ -+/** -+ * enum dpkg_extract_type - Enumeration for selecting extraction type -+ * @DPKG_EXTRACT_FROM_HDR: Extract from the header -+ * @DPKG_EXTRACT_FROM_DATA: Extract from data not in specific header -+ * @DPKG_EXTRACT_FROM_PARSE: Extract from parser-result; -+ * e.g. can be used to extract header existence; -+ * please refer to 'Parse Result definition' section in the parser BG -+ */ -+enum dpkg_extract_type { -+ DPKG_EXTRACT_FROM_HDR = 0, -+ DPKG_EXTRACT_FROM_DATA = 1, -+ DPKG_EXTRACT_FROM_PARSE = 3 -+}; -+ -+/** -+ * struct dpkg_mask - A structure for defining a single extraction mask -+ * @mask: Byte mask for the extracted content -+ * @offset: Offset within the extracted content -+ */ -+struct dpkg_mask { -+ uint8_t mask; -+ uint8_t offset; -+}; -+ -+/** -+ * struct dpkg_extract - A structure for defining a single extraction -+ * @type: Determines how the union below is interpreted: -+ * DPKG_EXTRACT_FROM_HDR: selects 'from_hdr'; -+ * DPKG_EXTRACT_FROM_DATA: selects 'from_data'; -+ * DPKG_EXTRACT_FROM_PARSE: selects 'from_parse' -+ * @extract: Selects extraction method -+ * @num_of_byte_masks: Defines the number of valid entries in the array below; -+ * This is also the number of bytes to be used as masks -+ * @masks: Masks parameters -+ */ -+struct dpkg_extract { -+ enum dpkg_extract_type type; -+ /** -+ * union extract - Selects extraction method -+ * @from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR' -+ * @from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA' -+ * @from_parse - Used when 'type = DPKG_EXTRACT_FROM_PARSE' -+ */ -+ union { -+ /** -+ * struct from_hdr - Used when 'type = DPKG_EXTRACT_FROM_HDR' -+ * @prot: Any of the supported headers -+ * @type: Defines the type of header extraction: -+ * DPKG_FROM_HDR: use size & offset below; -+ * DPKG_FROM_FIELD: use field, size and offset below; -+ * DPKG_FULL_FIELD: use field below -+ * @field: One of the supported fields (NH_FLD_) -+ * -+ * @size: Size in bytes -+ * @offset: Byte offset -+ * @hdr_index: Clear for cases not listed below; -+ * Used for protocols that may have more than a single -+ * header, 0 indicates an outer header; -+ * Supported protocols (possible values): -+ * NET_PROT_VLAN (0, HDR_INDEX_LAST); -+ * NET_PROT_MPLS (0, 1, HDR_INDEX_LAST); -+ * NET_PROT_IP(0, HDR_INDEX_LAST); -+ * NET_PROT_IPv4(0, HDR_INDEX_LAST); -+ * NET_PROT_IPv6(0, HDR_INDEX_LAST); -+ */ -+ -+ struct { -+ enum net_prot prot; -+ enum dpkg_extract_from_hdr_type type; -+ uint32_t field; -+ uint8_t size; -+ uint8_t offset; -+ uint8_t hdr_index; -+ } from_hdr; -+ /** -+ * struct from_data - Used when 'type = DPKG_EXTRACT_FROM_DATA' -+ * @size: Size in bytes -+ * @offset: Byte offset -+ */ -+ struct { -+ uint8_t size; -+ uint8_t offset; -+ } from_data; -+ -+ /** -+ * struct from_parse - Used when 'type = DPKG_EXTRACT_FROM_PARSE' -+ * @size: Size in bytes -+ * @offset: Byte offset -+ */ -+ struct { -+ uint8_t size; -+ uint8_t offset; -+ } from_parse; -+ } extract; -+ -+ uint8_t num_of_byte_masks; -+ struct dpkg_mask masks[DPKG_NUM_OF_MASKS]; -+}; -+ -+/** -+ * struct dpkg_profile_cfg - A structure for defining a full Key Generation -+ * profile (rule) -+ * @num_extracts: Defines the number of valid entries in the array below -+ * @extracts: Array of required extractions -+ */ -+struct dpkg_profile_cfg { -+ uint8_t num_extracts; -+ struct dpkg_extract extracts[DPKG_MAX_NUM_OF_EXTRACTS]; -+}; -+ -+#endif /* __FSL_DPKG_H_ */ -diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h b/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h -new file mode 100644 -index 0000000..c0f8af0 ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni-cmd.h -@@ -0,0 +1,1058 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPNI_CMD_H -+#define _FSL_DPNI_CMD_H -+ -+/* DPNI Version */ -+#define DPNI_VER_MAJOR 6 -+#define DPNI_VER_MINOR 0 -+ -+/* Command IDs */ -+#define DPNI_CMDID_OPEN 0x801 -+#define DPNI_CMDID_CLOSE 0x800 -+#define DPNI_CMDID_CREATE 0x901 -+#define DPNI_CMDID_DESTROY 0x900 -+ -+#define DPNI_CMDID_ENABLE 0x002 -+#define DPNI_CMDID_DISABLE 0x003 -+#define DPNI_CMDID_GET_ATTR 0x004 -+#define DPNI_CMDID_RESET 0x005 -+#define DPNI_CMDID_IS_ENABLED 0x006 -+ -+#define DPNI_CMDID_SET_IRQ 0x010 -+#define DPNI_CMDID_GET_IRQ 0x011 -+#define DPNI_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPNI_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPNI_CMDID_SET_IRQ_MASK 0x014 -+#define DPNI_CMDID_GET_IRQ_MASK 0x015 -+#define DPNI_CMDID_GET_IRQ_STATUS 0x016 -+#define DPNI_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPNI_CMDID_SET_POOLS 0x200 -+#define DPNI_CMDID_GET_RX_BUFFER_LAYOUT 0x201 -+#define DPNI_CMDID_SET_RX_BUFFER_LAYOUT 0x202 -+#define DPNI_CMDID_GET_TX_BUFFER_LAYOUT 0x203 -+#define DPNI_CMDID_SET_TX_BUFFER_LAYOUT 0x204 -+#define DPNI_CMDID_SET_TX_CONF_BUFFER_LAYOUT 0x205 -+#define DPNI_CMDID_GET_TX_CONF_BUFFER_LAYOUT 0x206 -+#define DPNI_CMDID_SET_L3_CHKSUM_VALIDATION 0x207 -+#define DPNI_CMDID_GET_L3_CHKSUM_VALIDATION 0x208 -+#define DPNI_CMDID_SET_L4_CHKSUM_VALIDATION 0x209 -+#define DPNI_CMDID_GET_L4_CHKSUM_VALIDATION 0x20A -+#define DPNI_CMDID_SET_ERRORS_BEHAVIOR 0x20B -+#define DPNI_CMDID_SET_TX_CONF_REVOKE 0x20C -+ -+#define DPNI_CMDID_GET_QDID 0x210 -+#define DPNI_CMDID_GET_SP_INFO 0x211 -+#define DPNI_CMDID_GET_TX_DATA_OFFSET 0x212 -+#define DPNI_CMDID_GET_COUNTER 0x213 -+#define DPNI_CMDID_SET_COUNTER 0x214 -+#define DPNI_CMDID_GET_LINK_STATE 0x215 -+#define DPNI_CMDID_SET_MAX_FRAME_LENGTH 0x216 -+#define DPNI_CMDID_GET_MAX_FRAME_LENGTH 0x217 -+#define DPNI_CMDID_SET_MTU 0x218 -+#define DPNI_CMDID_GET_MTU 0x219 -+#define DPNI_CMDID_SET_LINK_CFG 0x21A -+#define DPNI_CMDID_SET_TX_SHAPING 0x21B -+ -+#define DPNI_CMDID_SET_MCAST_PROMISC 0x220 -+#define DPNI_CMDID_GET_MCAST_PROMISC 0x221 -+#define DPNI_CMDID_SET_UNICAST_PROMISC 0x222 -+#define DPNI_CMDID_GET_UNICAST_PROMISC 0x223 -+#define DPNI_CMDID_SET_PRIM_MAC 0x224 -+#define DPNI_CMDID_GET_PRIM_MAC 0x225 -+#define DPNI_CMDID_ADD_MAC_ADDR 0x226 -+#define DPNI_CMDID_REMOVE_MAC_ADDR 0x227 -+#define DPNI_CMDID_CLR_MAC_FILTERS 0x228 -+ -+#define DPNI_CMDID_SET_VLAN_FILTERS 0x230 -+#define DPNI_CMDID_ADD_VLAN_ID 0x231 -+#define DPNI_CMDID_REMOVE_VLAN_ID 0x232 -+#define DPNI_CMDID_CLR_VLAN_FILTERS 0x233 -+ -+#define DPNI_CMDID_SET_RX_TC_DIST 0x235 -+#define DPNI_CMDID_SET_TX_FLOW 0x236 -+#define DPNI_CMDID_GET_TX_FLOW 0x237 -+#define DPNI_CMDID_SET_RX_FLOW 0x238 -+#define DPNI_CMDID_GET_RX_FLOW 0x239 -+#define DPNI_CMDID_SET_RX_ERR_QUEUE 0x23A -+#define DPNI_CMDID_GET_RX_ERR_QUEUE 0x23B -+ -+#define DPNI_CMDID_SET_RX_TC_POLICING 0x23E -+#define DPNI_CMDID_SET_RX_TC_EARLY_DROP 0x23F -+ -+#define DPNI_CMDID_SET_QOS_TBL 0x240 -+#define DPNI_CMDID_ADD_QOS_ENT 0x241 -+#define DPNI_CMDID_REMOVE_QOS_ENT 0x242 -+#define DPNI_CMDID_CLR_QOS_TBL 0x243 -+#define DPNI_CMDID_ADD_FS_ENT 0x244 -+#define DPNI_CMDID_REMOVE_FS_ENT 0x245 -+#define DPNI_CMDID_CLR_FS_ENT 0x246 -+#define DPNI_CMDID_SET_VLAN_INSERTION 0x247 -+#define DPNI_CMDID_SET_VLAN_REMOVAL 0x248 -+#define DPNI_CMDID_SET_IPR 0x249 -+#define DPNI_CMDID_SET_IPF 0x24A -+ -+#define DPNI_CMDID_SET_TX_SELECTION 0x250 -+#define DPNI_CMDID_GET_RX_TC_POLICING 0x251 -+#define DPNI_CMDID_GET_RX_TC_EARLY_DROP 0x252 -+#define DPNI_CMDID_SET_RX_TC_CONGESTION_NOTIFICATION 0x253 -+#define DPNI_CMDID_GET_RX_TC_CONGESTION_NOTIFICATION 0x254 -+#define DPNI_CMDID_SET_TX_TC_CONGESTION_NOTIFICATION 0x255 -+#define DPNI_CMDID_GET_TX_TC_CONGESTION_NOTIFICATION 0x256 -+#define DPNI_CMDID_SET_TX_CONF 0x257 -+#define DPNI_CMDID_GET_TX_CONF 0x258 -+#define DPNI_CMDID_SET_TX_CONF_CONGESTION_NOTIFICATION 0x259 -+#define DPNI_CMDID_GET_TX_CONF_CONGESTION_NOTIFICATION 0x25A -+#define DPNI_CMDID_SET_TX_TC_EARLY_DROP 0x25B -+#define DPNI_CMDID_GET_TX_TC_EARLY_DROP 0x25C -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_OPEN(cmd, dpni_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpni_id) -+ -+#define DPNI_PREP_EXTENDED_CFG(ext, cfg) \ -+do { \ -+ MC_PREP_OP(ext, 0, 0, 16, uint16_t, cfg->tc_cfg[0].max_dist); \ -+ MC_PREP_OP(ext, 0, 16, 16, uint16_t, cfg->tc_cfg[0].max_fs_entries); \ -+ MC_PREP_OP(ext, 0, 32, 16, uint16_t, cfg->tc_cfg[1].max_dist); \ -+ MC_PREP_OP(ext, 0, 48, 16, uint16_t, cfg->tc_cfg[1].max_fs_entries); \ -+ MC_PREP_OP(ext, 1, 0, 16, uint16_t, cfg->tc_cfg[2].max_dist); \ -+ MC_PREP_OP(ext, 1, 16, 16, uint16_t, cfg->tc_cfg[2].max_fs_entries); \ -+ MC_PREP_OP(ext, 1, 32, 16, uint16_t, cfg->tc_cfg[3].max_dist); \ -+ MC_PREP_OP(ext, 1, 48, 16, uint16_t, cfg->tc_cfg[3].max_fs_entries); \ -+ MC_PREP_OP(ext, 2, 0, 16, uint16_t, cfg->tc_cfg[4].max_dist); \ -+ MC_PREP_OP(ext, 2, 16, 16, uint16_t, cfg->tc_cfg[4].max_fs_entries); \ -+ MC_PREP_OP(ext, 2, 32, 16, uint16_t, cfg->tc_cfg[5].max_dist); \ -+ MC_PREP_OP(ext, 2, 48, 16, uint16_t, cfg->tc_cfg[5].max_fs_entries); \ -+ MC_PREP_OP(ext, 3, 0, 16, uint16_t, cfg->tc_cfg[6].max_dist); \ -+ MC_PREP_OP(ext, 3, 16, 16, uint16_t, cfg->tc_cfg[6].max_fs_entries); \ -+ MC_PREP_OP(ext, 3, 32, 16, uint16_t, cfg->tc_cfg[7].max_dist); \ -+ MC_PREP_OP(ext, 3, 48, 16, uint16_t, cfg->tc_cfg[7].max_fs_entries); \ -+ MC_PREP_OP(ext, 4, 0, 16, uint16_t, \ -+ cfg->ipr_cfg.max_open_frames_ipv4); \ -+ MC_PREP_OP(ext, 4, 16, 16, uint16_t, \ -+ cfg->ipr_cfg.max_open_frames_ipv6); \ -+ MC_PREP_OP(ext, 4, 32, 16, uint16_t, \ -+ cfg->ipr_cfg.max_reass_frm_size); \ -+ MC_PREP_OP(ext, 5, 0, 16, uint16_t, \ -+ cfg->ipr_cfg.min_frag_size_ipv4); \ -+ MC_PREP_OP(ext, 5, 16, 16, uint16_t, \ -+ cfg->ipr_cfg.min_frag_size_ipv6); \ -+} while (0) -+ -+#define DPNI_EXT_EXTENDED_CFG(ext, cfg) \ -+do { \ -+ MC_EXT_OP(ext, 0, 0, 16, uint16_t, cfg->tc_cfg[0].max_dist); \ -+ MC_EXT_OP(ext, 0, 16, 16, uint16_t, cfg->tc_cfg[0].max_fs_entries); \ -+ MC_EXT_OP(ext, 0, 32, 16, uint16_t, cfg->tc_cfg[1].max_dist); \ -+ MC_EXT_OP(ext, 0, 48, 16, uint16_t, cfg->tc_cfg[1].max_fs_entries); \ -+ MC_EXT_OP(ext, 1, 0, 16, uint16_t, cfg->tc_cfg[2].max_dist); \ -+ MC_EXT_OP(ext, 1, 16, 16, uint16_t, cfg->tc_cfg[2].max_fs_entries); \ -+ MC_EXT_OP(ext, 1, 32, 16, uint16_t, cfg->tc_cfg[3].max_dist); \ -+ MC_EXT_OP(ext, 1, 48, 16, uint16_t, cfg->tc_cfg[3].max_fs_entries); \ -+ MC_EXT_OP(ext, 2, 0, 16, uint16_t, cfg->tc_cfg[4].max_dist); \ -+ MC_EXT_OP(ext, 2, 16, 16, uint16_t, cfg->tc_cfg[4].max_fs_entries); \ -+ MC_EXT_OP(ext, 2, 32, 16, uint16_t, cfg->tc_cfg[5].max_dist); \ -+ MC_EXT_OP(ext, 2, 48, 16, uint16_t, cfg->tc_cfg[5].max_fs_entries); \ -+ MC_EXT_OP(ext, 3, 0, 16, uint16_t, cfg->tc_cfg[6].max_dist); \ -+ MC_EXT_OP(ext, 3, 16, 16, uint16_t, cfg->tc_cfg[6].max_fs_entries); \ -+ MC_EXT_OP(ext, 3, 32, 16, uint16_t, cfg->tc_cfg[7].max_dist); \ -+ MC_EXT_OP(ext, 3, 48, 16, uint16_t, cfg->tc_cfg[7].max_fs_entries); \ -+ MC_EXT_OP(ext, 4, 0, 16, uint16_t, \ -+ cfg->ipr_cfg.max_open_frames_ipv4); \ -+ MC_EXT_OP(ext, 4, 16, 16, uint16_t, \ -+ cfg->ipr_cfg.max_open_frames_ipv6); \ -+ MC_EXT_OP(ext, 4, 32, 16, uint16_t, \ -+ cfg->ipr_cfg.max_reass_frm_size); \ -+ MC_EXT_OP(ext, 5, 0, 16, uint16_t, \ -+ cfg->ipr_cfg.min_frag_size_ipv4); \ -+ MC_EXT_OP(ext, 5, 16, 16, uint16_t, \ -+ cfg->ipr_cfg.min_frag_size_ipv6); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_CREATE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->adv.max_tcs); \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->adv.max_senders); \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->mac_addr[5]); \ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->mac_addr[4]); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->mac_addr[3]); \ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, cfg->mac_addr[2]); \ -+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, cfg->mac_addr[1]); \ -+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, cfg->mac_addr[0]); \ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->adv.options); \ -+ MC_CMD_OP(cmd, 2, 0, 8, uint8_t, cfg->adv.max_unicast_filters); \ -+ MC_CMD_OP(cmd, 2, 8, 8, uint8_t, cfg->adv.max_multicast_filters); \ -+ MC_CMD_OP(cmd, 2, 16, 8, uint8_t, cfg->adv.max_vlan_filters); \ -+ MC_CMD_OP(cmd, 2, 24, 8, uint8_t, cfg->adv.max_qos_entries); \ -+ MC_CMD_OP(cmd, 2, 32, 8, uint8_t, cfg->adv.max_qos_key_size); \ -+ MC_CMD_OP(cmd, 2, 48, 8, uint8_t, cfg->adv.max_dist_key_size); \ -+ MC_CMD_OP(cmd, 2, 56, 8, enum net_prot, cfg->adv.start_hdr); \ -+ MC_CMD_OP(cmd, 4, 48, 8, uint8_t, cfg->adv.max_policers); \ -+ MC_CMD_OP(cmd, 4, 56, 8, uint8_t, cfg->adv.max_congestion_ctrl); \ -+ MC_CMD_OP(cmd, 5, 0, 64, uint64_t, cfg->adv.ext_cfg_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_POOLS(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->num_dpbp); \ -+ MC_CMD_OP(cmd, 0, 8, 1, int, cfg->pools[0].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 9, 1, int, cfg->pools[1].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 10, 1, int, cfg->pools[2].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 11, 1, int, cfg->pools[3].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 12, 1, int, cfg->pools[4].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 13, 1, int, cfg->pools[5].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 14, 1, int, cfg->pools[6].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 15, 1, int, cfg->pools[7].backup_pool); \ -+ MC_CMD_OP(cmd, 0, 32, 32, int, cfg->pools[0].dpbp_id); \ -+ MC_CMD_OP(cmd, 4, 32, 16, uint16_t, cfg->pools[0].buffer_size);\ -+ MC_CMD_OP(cmd, 1, 0, 32, int, cfg->pools[1].dpbp_id); \ -+ MC_CMD_OP(cmd, 4, 48, 16, uint16_t, cfg->pools[1].buffer_size);\ -+ MC_CMD_OP(cmd, 1, 32, 32, int, cfg->pools[2].dpbp_id); \ -+ MC_CMD_OP(cmd, 5, 0, 16, uint16_t, cfg->pools[2].buffer_size);\ -+ MC_CMD_OP(cmd, 2, 0, 32, int, cfg->pools[3].dpbp_id); \ -+ MC_CMD_OP(cmd, 5, 16, 16, uint16_t, cfg->pools[3].buffer_size);\ -+ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->pools[4].dpbp_id); \ -+ MC_CMD_OP(cmd, 5, 32, 16, uint16_t, cfg->pools[4].buffer_size);\ -+ MC_CMD_OP(cmd, 3, 0, 32, int, cfg->pools[5].dpbp_id); \ -+ MC_CMD_OP(cmd, 5, 48, 16, uint16_t, cfg->pools[5].buffer_size);\ -+ MC_CMD_OP(cmd, 3, 32, 32, int, cfg->pools[6].dpbp_id); \ -+ MC_CMD_OP(cmd, 6, 0, 16, uint16_t, cfg->pools[6].buffer_size);\ -+ MC_CMD_OP(cmd, 4, 0, 32, int, cfg->pools[7].dpbp_id); \ -+ MC_CMD_OP(cmd, 6, 16, 16, uint16_t, cfg->pools[7].buffer_size);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_IS_ENABLED(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_IRQ_ENABLE(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_ATTR(cmd, attr) \ -+ MC_CMD_OP(cmd, 6, 0, 64, uint64_t, attr->ext_cfg_iova) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id);\ -+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->max_tcs); \ -+ MC_RSP_OP(cmd, 0, 40, 8, uint8_t, attr->max_senders); \ -+ MC_RSP_OP(cmd, 0, 48, 8, enum net_prot, attr->start_hdr); \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, attr->options); \ -+ MC_RSP_OP(cmd, 2, 0, 8, uint8_t, attr->max_unicast_filters); \ -+ MC_RSP_OP(cmd, 2, 8, 8, uint8_t, attr->max_multicast_filters);\ -+ MC_RSP_OP(cmd, 2, 16, 8, uint8_t, attr->max_vlan_filters); \ -+ MC_RSP_OP(cmd, 2, 24, 8, uint8_t, attr->max_qos_entries); \ -+ MC_RSP_OP(cmd, 2, 32, 8, uint8_t, attr->max_qos_key_size); \ -+ MC_RSP_OP(cmd, 2, 40, 8, uint8_t, attr->max_dist_key_size); \ -+ MC_RSP_OP(cmd, 4, 48, 8, uint8_t, attr->max_policers); \ -+ MC_RSP_OP(cmd, 4, 56, 8, uint8_t, attr->max_congestion_ctrl); \ -+ MC_RSP_OP(cmd, 5, 32, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 5, 48, 16, uint16_t, attr->version.minor);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_ERRORS_BEHAVIOR(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, cfg->errors); \ -+ MC_CMD_OP(cmd, 0, 32, 4, enum dpni_error_action, cfg->error_action); \ -+ MC_CMD_OP(cmd, 0, 36, 1, int, cfg->set_frame_annotation); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_RX_BUFFER_LAYOUT(cmd, layout) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \ -+ MC_RSP_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \ -+ MC_RSP_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \ -+ MC_RSP_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \ -+ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_RX_BUFFER_LAYOUT(cmd, layout) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, layout->options); \ -+ MC_CMD_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \ -+ MC_CMD_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \ -+ MC_CMD_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \ -+ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \ -+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_TX_BUFFER_LAYOUT(cmd, layout) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \ -+ MC_RSP_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \ -+ MC_RSP_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \ -+ MC_RSP_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \ -+ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_TX_BUFFER_LAYOUT(cmd, layout) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, layout->options); \ -+ MC_CMD_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \ -+ MC_CMD_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \ -+ MC_CMD_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \ -+ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \ -+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_TX_CONF_BUFFER_LAYOUT(cmd, layout) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \ -+ MC_RSP_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \ -+ MC_RSP_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \ -+ MC_RSP_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \ -+ MC_RSP_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_TX_CONF_BUFFER_LAYOUT(cmd, layout) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, layout->private_data_size); \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, layout->data_align); \ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, layout->options); \ -+ MC_CMD_OP(cmd, 1, 0, 1, int, layout->pass_timestamp); \ -+ MC_CMD_OP(cmd, 1, 1, 1, int, layout->pass_parser_result); \ -+ MC_CMD_OP(cmd, 1, 2, 1, int, layout->pass_frame_status); \ -+ MC_CMD_OP(cmd, 1, 16, 16, uint16_t, layout->data_head_room); \ -+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, layout->data_tail_room); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_L3_CHKSUM_VALIDATION(cmd, en) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_L3_CHKSUM_VALIDATION(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_L4_CHKSUM_VALIDATION(cmd, en) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_L4_CHKSUM_VALIDATION(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_QDID(cmd, qdid) \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, qdid) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_SP_INFO(cmd, sp_info) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, sp_info->spids[0]); \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, sp_info->spids[1]); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_TX_DATA_OFFSET(cmd, data_offset) \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, data_offset) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_COUNTER(cmd, counter) \ -+ MC_CMD_OP(cmd, 0, 0, 16, enum dpni_counter, counter) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_COUNTER(cmd, value) \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, value) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_COUNTER(cmd, counter, value) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, enum dpni_counter, counter); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, value); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_LINK_CFG(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->rate);\ -+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->options);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_LINK_STATE(cmd, state) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 32, 1, int, state->up);\ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, state->rate);\ -+ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, state->options);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_TX_SHAPING(cmd, tx_shaper) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, tx_shaper->max_burst_size);\ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, tx_shaper->rate_limit);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_MAX_FRAME_LENGTH(cmd, max_frame_length) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, max_frame_length) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_MAX_FRAME_LENGTH(cmd, max_frame_length) \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, max_frame_length) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_MTU(cmd, mtu) \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, mtu) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_MTU(cmd, mtu) \ -+ MC_RSP_OP(cmd, 0, 0, 16, uint16_t, mtu) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_MULTICAST_PROMISC(cmd, en) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_MULTICAST_PROMISC(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_UNICAST_PROMISC(cmd, en) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_UNICAST_PROMISC(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_PRIMARY_MAC_ADDR(cmd, mac_addr) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \ -+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \ -+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_PRIMARY_MAC_ADDR(cmd, mac_addr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \ -+ MC_RSP_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \ -+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \ -+ MC_RSP_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \ -+ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \ -+ MC_RSP_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_ADD_MAC_ADDR(cmd, mac_addr) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \ -+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \ -+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_REMOVE_MAC_ADDR(cmd, mac_addr) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, mac_addr[5]); \ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, mac_addr[4]); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, mac_addr[3]); \ -+ MC_CMD_OP(cmd, 0, 40, 8, uint8_t, mac_addr[2]); \ -+ MC_CMD_OP(cmd, 0, 48, 8, uint8_t, mac_addr[1]); \ -+ MC_CMD_OP(cmd, 0, 56, 8, uint8_t, mac_addr[0]); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_CLEAR_MAC_FILTERS(cmd, unicast, multicast) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, unicast); \ -+ MC_CMD_OP(cmd, 0, 1, 1, int, multicast); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_VLAN_FILTERS(cmd, en) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_ADD_VLAN_ID(cmd, vlan_id) \ -+ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, vlan_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_REMOVE_VLAN_ID(cmd, vlan_id) \ -+ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, vlan_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_TX_SELECTION(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, cfg->tc_sched[0].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 0, 16, 4, enum dpni_tx_schedule_mode, \ -+ cfg->tc_sched[0].mode); \ -+ MC_CMD_OP(cmd, 0, 32, 16, uint16_t, cfg->tc_sched[1].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 0, 48, 4, enum dpni_tx_schedule_mode, \ -+ cfg->tc_sched[1].mode); \ -+ MC_CMD_OP(cmd, 1, 0, 16, uint16_t, cfg->tc_sched[2].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 1, 16, 4, enum dpni_tx_schedule_mode, \ -+ cfg->tc_sched[2].mode); \ -+ MC_CMD_OP(cmd, 1, 32, 16, uint16_t, cfg->tc_sched[3].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 1, 48, 4, enum dpni_tx_schedule_mode, \ -+ cfg->tc_sched[3].mode); \ -+ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->tc_sched[4].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 2, 16, 4, enum dpni_tx_schedule_mode, \ -+ cfg->tc_sched[4].mode); \ -+ MC_CMD_OP(cmd, 2, 32, 16, uint16_t, cfg->tc_sched[5].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 2, 48, 4, enum dpni_tx_schedule_mode, \ -+ cfg->tc_sched[5].mode); \ -+ MC_CMD_OP(cmd, 3, 0, 16, uint16_t, cfg->tc_sched[6].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 3, 16, 4, enum dpni_tx_schedule_mode, \ -+ cfg->tc_sched[6].mode); \ -+ MC_CMD_OP(cmd, 3, 32, 16, uint16_t, cfg->tc_sched[7].delta_bandwidth);\ -+ MC_CMD_OP(cmd, 3, 48, 4, enum dpni_tx_schedule_mode, \ -+ cfg->tc_sched[7].mode); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_RX_TC_DIST(cmd, tc_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 16, uint16_t, cfg->dist_size); \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 0, 24, 4, enum dpni_dist_mode, cfg->dist_mode); \ -+ MC_CMD_OP(cmd, 0, 28, 4, enum dpni_fs_miss_action, \ -+ cfg->fs_cfg.miss_action); \ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, cfg->fs_cfg.default_flow_id); \ -+ MC_CMD_OP(cmd, 6, 0, 64, uint64_t, cfg->key_cfg_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_TX_FLOW(cmd, flow_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 43, 1, int, cfg->l3_chksum_gen);\ -+ MC_CMD_OP(cmd, 0, 44, 1, int, cfg->l4_chksum_gen);\ -+ MC_CMD_OP(cmd, 0, 45, 1, int, cfg->use_common_tx_conf_queue);\ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id);\ -+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_SET_TX_FLOW(cmd, flow_id) \ -+ MC_RSP_OP(cmd, 0, 48, 16, uint16_t, flow_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_TX_FLOW(cmd, flow_id) \ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_TX_FLOW(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 43, 1, int, attr->l3_chksum_gen);\ -+ MC_RSP_OP(cmd, 0, 44, 1, int, attr->l4_chksum_gen);\ -+ MC_RSP_OP(cmd, 0, 45, 1, int, attr->use_common_tx_conf_queue);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_RX_FLOW(cmd, tc_id, flow_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority);\ -+ MC_CMD_OP(cmd, 0, 40, 2, enum dpni_dest, cfg->dest_cfg.dest_type);\ -+ MC_CMD_OP(cmd, 0, 42, 1, int, cfg->order_preservation_en);\ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \ -+ MC_CMD_OP(cmd, 2, 16, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 2, 32, 32, uint32_t, cfg->options); \ -+ MC_CMD_OP(cmd, 3, 0, 4, enum dpni_flc_type, cfg->flc_cfg.flc_type); \ -+ MC_CMD_OP(cmd, 3, 4, 4, enum dpni_stash_size, \ -+ cfg->flc_cfg.frame_data_size);\ -+ MC_CMD_OP(cmd, 3, 8, 4, enum dpni_stash_size, \ -+ cfg->flc_cfg.flow_context_size);\ -+ MC_CMD_OP(cmd, 3, 32, 32, uint32_t, cfg->flc_cfg.options);\ -+ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->flc_cfg.flow_context);\ -+ MC_CMD_OP(cmd, 5, 0, 32, uint32_t, cfg->tail_drop_threshold); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_RX_FLOW(cmd, tc_id, flow_id) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_RX_FLOW(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id); \ -+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\ -+ MC_RSP_OP(cmd, 0, 40, 2, enum dpni_dest, attr->dest_cfg.dest_type); \ -+ MC_RSP_OP(cmd, 0, 42, 1, int, attr->order_preservation_en);\ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->user_ctx); \ -+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->tail_drop_threshold); \ -+ MC_RSP_OP(cmd, 2, 32, 32, uint32_t, attr->fqid); \ -+ MC_RSP_OP(cmd, 3, 0, 4, enum dpni_flc_type, attr->flc_cfg.flc_type); \ -+ MC_RSP_OP(cmd, 3, 4, 4, enum dpni_stash_size, \ -+ attr->flc_cfg.frame_data_size);\ -+ MC_RSP_OP(cmd, 3, 8, 4, enum dpni_stash_size, \ -+ attr->flc_cfg.flow_context_size);\ -+ MC_RSP_OP(cmd, 3, 32, 32, uint32_t, attr->flc_cfg.options);\ -+ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, attr->flc_cfg.flow_context);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_RX_ERR_QUEUE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->dest_cfg.priority);\ -+ MC_CMD_OP(cmd, 0, 40, 2, enum dpni_dest, cfg->dest_cfg.dest_type);\ -+ MC_CMD_OP(cmd, 0, 42, 1, int, cfg->order_preservation_en);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx); \ -+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->options); \ -+ MC_CMD_OP(cmd, 2, 32, 32, uint32_t, cfg->tail_drop_threshold); \ -+ MC_CMD_OP(cmd, 3, 0, 4, enum dpni_flc_type, cfg->flc_cfg.flc_type); \ -+ MC_CMD_OP(cmd, 3, 4, 4, enum dpni_stash_size, \ -+ cfg->flc_cfg.frame_data_size);\ -+ MC_CMD_OP(cmd, 3, 8, 4, enum dpni_stash_size, \ -+ cfg->flc_cfg.flow_context_size);\ -+ MC_CMD_OP(cmd, 3, 32, 32, uint32_t, cfg->flc_cfg.options);\ -+ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->flc_cfg.flow_context);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_RX_ERR_QUEUE(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->dest_cfg.dest_id); \ -+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, attr->dest_cfg.priority);\ -+ MC_RSP_OP(cmd, 0, 40, 2, enum dpni_dest, attr->dest_cfg.dest_type);\ -+ MC_RSP_OP(cmd, 0, 42, 1, int, attr->order_preservation_en);\ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->user_ctx); \ -+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->tail_drop_threshold); \ -+ MC_RSP_OP(cmd, 2, 32, 32, uint32_t, attr->fqid); \ -+ MC_RSP_OP(cmd, 3, 0, 4, enum dpni_flc_type, attr->flc_cfg.flc_type); \ -+ MC_RSP_OP(cmd, 3, 4, 4, enum dpni_stash_size, \ -+ attr->flc_cfg.frame_data_size);\ -+ MC_RSP_OP(cmd, 3, 8, 4, enum dpni_stash_size, \ -+ attr->flc_cfg.flow_context_size);\ -+ MC_RSP_OP(cmd, 3, 32, 32, uint32_t, attr->flc_cfg.options);\ -+ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, attr->flc_cfg.flow_context);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_TX_CONF_REVOKE(cmd, revoke) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, revoke) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_QOS_TABLE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->default_tc); \ -+ MC_CMD_OP(cmd, 0, 40, 1, int, cfg->discard_on_miss); \ -+ MC_CMD_OP(cmd, 6, 0, 64, uint64_t, cfg->key_cfg_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_ADD_QOS_ENTRY(cmd, cfg, tc_id) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->key_size); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \ -+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_REMOVE_QOS_ENTRY(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->key_size); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \ -+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_ADD_FS_ENTRY(cmd, tc_id, cfg, flow_id) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->key_size); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \ -+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_REMOVE_FS_ENTRY(cmd, tc_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 0, 24, 8, uint8_t, cfg->key_size); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->key_iova); \ -+ MC_CMD_OP(cmd, 2, 0, 64, uint64_t, cfg->mask_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_CLEAR_FS_ENTRIES(cmd, tc_id) \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_VLAN_INSERTION(cmd, en) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_VLAN_REMOVAL(cmd, en) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_IPR(cmd, en) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_IPF(cmd, en) \ -+ MC_CMD_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_RX_TC_POLICING(cmd, tc_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 4, enum dpni_policer_mode, cfg->mode); \ -+ MC_CMD_OP(cmd, 0, 4, 4, enum dpni_policer_color, cfg->default_color); \ -+ MC_CMD_OP(cmd, 0, 8, 4, enum dpni_policer_unit, cfg->units); \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, cfg->options); \ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->cir); \ -+ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->cbs); \ -+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->eir); \ -+ MC_CMD_OP(cmd, 2, 32, 32, uint32_t, cfg->ebs);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_RX_TC_POLICING(cmd, tc_id) \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, tc_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_RSP_GET_RX_TC_POLICING(cmd, cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 4, enum dpni_policer_mode, cfg->mode); \ -+ MC_RSP_OP(cmd, 0, 4, 4, enum dpni_policer_color, cfg->default_color); \ -+ MC_RSP_OP(cmd, 0, 8, 4, enum dpni_policer_unit, cfg->units); \ -+ MC_RSP_OP(cmd, 0, 32, 32, uint32_t, cfg->options); \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->cir); \ -+ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, cfg->cbs); \ -+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, cfg->eir); \ -+ MC_RSP_OP(cmd, 2, 32, 32, uint32_t, cfg->ebs);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_PREP_EARLY_DROP(ext, cfg) \ -+do { \ -+ MC_PREP_OP(ext, 0, 0, 2, enum dpni_early_drop_mode, cfg->mode); \ -+ MC_PREP_OP(ext, 0, 2, 2, \ -+ enum dpni_congestion_unit, cfg->units); \ -+ MC_PREP_OP(ext, 0, 32, 32, uint32_t, cfg->tail_drop_threshold); \ -+ MC_PREP_OP(ext, 1, 0, 8, uint8_t, cfg->green.drop_probability); \ -+ MC_PREP_OP(ext, 2, 0, 64, uint64_t, cfg->green.max_threshold); \ -+ MC_PREP_OP(ext, 3, 0, 64, uint64_t, cfg->green.min_threshold); \ -+ MC_PREP_OP(ext, 5, 0, 8, uint8_t, cfg->yellow.drop_probability);\ -+ MC_PREP_OP(ext, 6, 0, 64, uint64_t, cfg->yellow.max_threshold); \ -+ MC_PREP_OP(ext, 7, 0, 64, uint64_t, cfg->yellow.min_threshold); \ -+ MC_PREP_OP(ext, 9, 0, 8, uint8_t, cfg->red.drop_probability); \ -+ MC_PREP_OP(ext, 10, 0, 64, uint64_t, cfg->red.max_threshold); \ -+ MC_PREP_OP(ext, 11, 0, 64, uint64_t, cfg->red.min_threshold); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_EXT_EARLY_DROP(ext, cfg) \ -+do { \ -+ MC_EXT_OP(ext, 0, 0, 2, enum dpni_early_drop_mode, cfg->mode); \ -+ MC_EXT_OP(ext, 0, 2, 2, \ -+ enum dpni_congestion_unit, cfg->units); \ -+ MC_EXT_OP(ext, 0, 32, 32, uint32_t, cfg->tail_drop_threshold); \ -+ MC_EXT_OP(ext, 1, 0, 8, uint8_t, cfg->green.drop_probability); \ -+ MC_EXT_OP(ext, 2, 0, 64, uint64_t, cfg->green.max_threshold); \ -+ MC_EXT_OP(ext, 3, 0, 64, uint64_t, cfg->green.min_threshold); \ -+ MC_EXT_OP(ext, 5, 0, 8, uint8_t, cfg->yellow.drop_probability);\ -+ MC_EXT_OP(ext, 6, 0, 64, uint64_t, cfg->yellow.max_threshold); \ -+ MC_EXT_OP(ext, 7, 0, 64, uint64_t, cfg->yellow.min_threshold); \ -+ MC_EXT_OP(ext, 9, 0, 8, uint8_t, cfg->red.drop_probability); \ -+ MC_EXT_OP(ext, 10, 0, 64, uint64_t, cfg->red.max_threshold); \ -+ MC_EXT_OP(ext, 11, 0, 64, uint64_t, cfg->red.min_threshold); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_RX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_RX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_SET_TX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPNI_CMD_GET_TX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, early_drop_iova); \ -+} while (0) -+ -+#define DPNI_CMD_SET_RX_TC_CONGESTION_NOTIFICATION(cmd, tc_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \ -+ MC_CMD_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \ -+ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \ -+ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \ -+ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \ -+ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \ -+} while (0) -+ -+#define DPNI_CMD_GET_RX_TC_CONGESTION_NOTIFICATION(cmd, tc_id) \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id) -+ -+#define DPNI_RSP_GET_RX_TC_CONGESTION_NOTIFICATION(cmd, cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \ -+ MC_RSP_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \ -+ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \ -+ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \ -+ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \ -+ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \ -+} while (0) -+ -+#define DPNI_CMD_SET_TX_TC_CONGESTION_NOTIFICATION(cmd, tc_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \ -+ MC_CMD_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id); \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \ -+ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \ -+ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \ -+ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \ -+ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \ -+} while (0) -+ -+#define DPNI_CMD_GET_TX_TC_CONGESTION_NOTIFICATION(cmd, tc_id) \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, tc_id) -+ -+#define DPNI_RSP_GET_TX_TC_CONGESTION_NOTIFICATION(cmd, cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \ -+ MC_RSP_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \ -+ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \ -+ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \ -+ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \ -+ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \ -+} while (0) -+ -+#define DPNI_CMD_SET_TX_CONF(cmd, flow_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->queue_cfg.dest_cfg.priority); \ -+ MC_CMD_OP(cmd, 0, 40, 2, enum dpni_dest, \ -+ cfg->queue_cfg.dest_cfg.dest_type); \ -+ MC_CMD_OP(cmd, 0, 42, 1, int, cfg->errors_only); \ -+ MC_CMD_OP(cmd, 0, 46, 1, int, cfg->queue_cfg.order_preservation_en); \ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->queue_cfg.user_ctx); \ -+ MC_CMD_OP(cmd, 2, 0, 32, uint32_t, cfg->queue_cfg.options); \ -+ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->queue_cfg.dest_cfg.dest_id); \ -+ MC_CMD_OP(cmd, 3, 0, 32, uint32_t, \ -+ cfg->queue_cfg.tail_drop_threshold); \ -+ MC_CMD_OP(cmd, 4, 0, 4, enum dpni_flc_type, \ -+ cfg->queue_cfg.flc_cfg.flc_type); \ -+ MC_CMD_OP(cmd, 4, 4, 4, enum dpni_stash_size, \ -+ cfg->queue_cfg.flc_cfg.frame_data_size); \ -+ MC_CMD_OP(cmd, 4, 8, 4, enum dpni_stash_size, \ -+ cfg->queue_cfg.flc_cfg.flow_context_size); \ -+ MC_CMD_OP(cmd, 4, 32, 32, uint32_t, cfg->queue_cfg.flc_cfg.options); \ -+ MC_CMD_OP(cmd, 5, 0, 64, uint64_t, \ -+ cfg->queue_cfg.flc_cfg.flow_context); \ -+} while (0) -+ -+#define DPNI_CMD_GET_TX_CONF(cmd, flow_id) \ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id) -+ -+#define DPNI_RSP_GET_TX_CONF(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 32, 8, uint8_t, \ -+ attr->queue_attr.dest_cfg.priority); \ -+ MC_RSP_OP(cmd, 0, 40, 2, enum dpni_dest, \ -+ attr->queue_attr.dest_cfg.dest_type); \ -+ MC_RSP_OP(cmd, 0, 42, 1, int, attr->errors_only); \ -+ MC_RSP_OP(cmd, 0, 46, 1, int, \ -+ attr->queue_attr.order_preservation_en); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->queue_attr.user_ctx); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, attr->queue_attr.dest_cfg.dest_id); \ -+ MC_RSP_OP(cmd, 3, 0, 32, uint32_t, \ -+ attr->queue_attr.tail_drop_threshold); \ -+ MC_RSP_OP(cmd, 3, 32, 32, uint32_t, attr->queue_attr.fqid); \ -+ MC_RSP_OP(cmd, 4, 0, 4, enum dpni_flc_type, \ -+ attr->queue_attr.flc_cfg.flc_type); \ -+ MC_RSP_OP(cmd, 4, 4, 4, enum dpni_stash_size, \ -+ attr->queue_attr.flc_cfg.frame_data_size); \ -+ MC_RSP_OP(cmd, 4, 8, 4, enum dpni_stash_size, \ -+ attr->queue_attr.flc_cfg.flow_context_size); \ -+ MC_RSP_OP(cmd, 4, 32, 32, uint32_t, attr->queue_attr.flc_cfg.options); \ -+ MC_RSP_OP(cmd, 5, 0, 64, uint64_t, \ -+ attr->queue_attr.flc_cfg.flow_context); \ -+} while (0) -+ -+#define DPNI_CMD_SET_TX_CONF_CONGESTION_NOTIFICATION(cmd, flow_id, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \ -+ MC_CMD_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \ -+ MC_CMD_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id); \ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \ -+ MC_CMD_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \ -+ MC_CMD_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \ -+ MC_CMD_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_CMD_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \ -+ MC_CMD_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \ -+} while (0) -+ -+#define DPNI_CMD_GET_TX_CONF_CONGESTION_NOTIFICATION(cmd, flow_id) \ -+ MC_CMD_OP(cmd, 0, 48, 16, uint16_t, flow_id) -+ -+#define DPNI_RSP_GET_TX_CONF_CONGESTION_NOTIFICATION(cmd, cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 2, enum dpni_congestion_unit, cfg->units); \ -+ MC_RSP_OP(cmd, 0, 4, 4, enum dpni_dest, cfg->dest_cfg.dest_type); \ -+ MC_RSP_OP(cmd, 0, 16, 8, uint8_t, cfg->dest_cfg.priority); \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->threshold_entry); \ -+ MC_RSP_OP(cmd, 1, 32, 32, uint32_t, cfg->threshold_exit); \ -+ MC_RSP_OP(cmd, 2, 0, 16, uint16_t, cfg->options); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, cfg->dest_cfg.dest_id); \ -+ MC_RSP_OP(cmd, 3, 0, 64, uint64_t, cfg->message_ctx); \ -+ MC_RSP_OP(cmd, 4, 0, 64, uint64_t, cfg->message_iova); \ -+} while (0) -+ -+#endif /* _FSL_DPNI_CMD_H */ -diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpni.c b/drivers/staging/fsl-dpaa2/ethernet/dpni.c -new file mode 100644 -index 0000000..c228ce5 ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.c -@@ -0,0 +1,1907 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include "../../fsl-mc/include/mc-sys.h" -+#include "../../fsl-mc/include/mc-cmd.h" -+#include "dpni.h" -+#include "dpni-cmd.h" -+ -+int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, -+ uint8_t *key_cfg_buf) -+{ -+ int i, j; -+ int offset = 0; -+ int param = 1; -+ uint64_t *params = (uint64_t *)key_cfg_buf; -+ -+ if (!key_cfg_buf || !cfg) -+ return -EINVAL; -+ -+ params[0] |= mc_enc(0, 8, cfg->num_extracts); -+ params[0] = cpu_to_le64(params[0]); -+ -+ if (cfg->num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) -+ return -EINVAL; -+ -+ for (i = 0; i < cfg->num_extracts; i++) { -+ switch (cfg->extracts[i].type) { -+ case DPKG_EXTRACT_FROM_HDR: -+ params[param] |= mc_enc(0, 8, -+ cfg->extracts[i].extract.from_hdr.prot); -+ params[param] |= mc_enc(8, 4, -+ cfg->extracts[i].extract.from_hdr.type); -+ params[param] |= mc_enc(16, 8, -+ cfg->extracts[i].extract.from_hdr.size); -+ params[param] |= mc_enc(24, 8, -+ cfg->extracts[i].extract. -+ from_hdr.offset); -+ params[param] |= mc_enc(32, 32, -+ cfg->extracts[i].extract. -+ from_hdr.field); -+ params[param] = cpu_to_le64(params[param]); -+ param++; -+ params[param] |= mc_enc(0, 8, -+ cfg->extracts[i].extract. -+ from_hdr.hdr_index); -+ break; -+ case DPKG_EXTRACT_FROM_DATA: -+ params[param] |= mc_enc(16, 8, -+ cfg->extracts[i].extract. -+ from_data.size); -+ params[param] |= mc_enc(24, 8, -+ cfg->extracts[i].extract. -+ from_data.offset); -+ params[param] = cpu_to_le64(params[param]); -+ param++; -+ break; -+ case DPKG_EXTRACT_FROM_PARSE: -+ params[param] |= mc_enc(16, 8, -+ cfg->extracts[i].extract. -+ from_parse.size); -+ params[param] |= mc_enc(24, 8, -+ cfg->extracts[i].extract. -+ from_parse.offset); -+ params[param] = cpu_to_le64(params[param]); -+ param++; -+ break; -+ default: -+ return -EINVAL; -+ } -+ params[param] |= mc_enc( -+ 24, 8, cfg->extracts[i].num_of_byte_masks); -+ params[param] |= mc_enc(32, 4, cfg->extracts[i].type); -+ params[param] = cpu_to_le64(params[param]); -+ param++; -+ for (offset = 0, j = 0; -+ j < DPKG_NUM_OF_MASKS; -+ offset += 16, j++) { -+ params[param] |= mc_enc( -+ (offset), 8, cfg->extracts[i].masks[j].mask); -+ params[param] |= mc_enc( -+ (offset + 8), 8, -+ cfg->extracts[i].masks[j].offset); -+ } -+ params[param] = cpu_to_le64(params[param]); -+ param++; -+ } -+ return 0; -+} -+ -+int dpni_prepare_extended_cfg(const struct dpni_extended_cfg *cfg, -+ uint8_t *ext_cfg_buf) -+{ -+ uint64_t *ext_params = (uint64_t *)ext_cfg_buf; -+ -+ DPNI_PREP_EXTENDED_CFG(ext_params, cfg); -+ -+ return 0; -+} -+ -+int dpni_extract_extended_cfg(struct dpni_extended_cfg *cfg, -+ const uint8_t *ext_cfg_buf) -+{ -+ const uint64_t *ext_params = (const uint64_t *)ext_cfg_buf; -+ -+ DPNI_EXT_EXTENDED_CFG(ext_params, cfg); -+ -+ return 0; -+} -+ -+int dpni_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpni_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ DPNI_CMD_OPEN(cmd, dpni_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpni_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLOSE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpni_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ DPNI_CMD_CREATE(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpni_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_pools(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_pools_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_POOLS, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_POOLS(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ENABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_DISABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_IS_ENABLED, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_IS_ENABLED(cmd, *en); -+ -+ return 0; -+} -+ -+int dpni_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_RESET, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpni_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_IRQ(cmd, irq_index, irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpni_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_IRQ(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dpni_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_IRQ_ENABLE(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_IRQ_ENABLE(cmd, *en); -+ -+ return 0; -+} -+ -+int dpni_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_IRQ_MASK(cmd, irq_index, mask); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_IRQ_MASK(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_IRQ_MASK(cmd, *mask); -+ -+ return 0; -+} -+ -+int dpni_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_IRQ_STATUS(cmd, *status); -+ -+ return 0; -+} -+ -+int dpni_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPNI_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_ATTR(cmd, attr); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpni_set_errors_behavior(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_error_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_ERRORS_BEHAVIOR, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_ERRORS_BEHAVIOR(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_rx_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_buffer_layout *layout) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_BUFFER_LAYOUT, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_RX_BUFFER_LAYOUT(cmd, layout); -+ -+ return 0; -+} -+ -+int dpni_set_rx_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_buffer_layout *layout) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_BUFFER_LAYOUT, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_RX_BUFFER_LAYOUT(cmd, layout); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_tx_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_buffer_layout *layout) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_BUFFER_LAYOUT, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_TX_BUFFER_LAYOUT(cmd, layout); -+ -+ return 0; -+} -+ -+int dpni_set_tx_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_buffer_layout *layout) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_BUFFER_LAYOUT, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_BUFFER_LAYOUT(cmd, layout); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_tx_conf_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_buffer_layout *layout) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_CONF_BUFFER_LAYOUT, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_TX_CONF_BUFFER_LAYOUT(cmd, layout); -+ -+ return 0; -+} -+ -+int dpni_set_tx_conf_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_buffer_layout *layout) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_CONF_BUFFER_LAYOUT, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_CONF_BUFFER_LAYOUT(cmd, layout); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_l3_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_L3_CHKSUM_VALIDATION, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_L3_CHKSUM_VALIDATION(cmd, *en); -+ -+ return 0; -+} -+ -+int dpni_set_l3_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_L3_CHKSUM_VALIDATION, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_L3_CHKSUM_VALIDATION(cmd, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_l4_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_L4_CHKSUM_VALIDATION, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_L4_CHKSUM_VALIDATION(cmd, *en); -+ -+ return 0; -+} -+ -+int dpni_set_l4_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_L4_CHKSUM_VALIDATION, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_L4_CHKSUM_VALIDATION(cmd, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_qdid(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *qdid) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_QDID, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_QDID(cmd, *qdid); -+ -+ return 0; -+} -+ -+int dpni_get_sp_info(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_sp_info *sp_info) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_SP_INFO, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_SP_INFO(cmd, sp_info); -+ -+ return 0; -+} -+ -+int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *data_offset) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_DATA_OFFSET, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_TX_DATA_OFFSET(cmd, *data_offset); -+ -+ return 0; -+} -+ -+int dpni_get_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ enum dpni_counter counter, -+ uint64_t *value) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_COUNTER, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_COUNTER(cmd, counter); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_COUNTER(cmd, *value); -+ -+ return 0; -+} -+ -+int dpni_set_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ enum dpni_counter counter, -+ uint64_t value) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_COUNTER, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_COUNTER(cmd, counter, value); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_link_cfg(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_link_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_LINK_CFG, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_LINK_CFG(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_link_state(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_link_state *state) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_LINK_STATE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_LINK_STATE(cmd, state); -+ -+ return 0; -+} -+ -+int dpni_set_tx_shaping(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_tx_shaping_cfg *tx_shaper) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SHAPING, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_SHAPING(cmd, tx_shaper); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_max_frame_length(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t max_frame_length) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MAX_FRAME_LENGTH, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_MAX_FRAME_LENGTH(cmd, max_frame_length); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_max_frame_length(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *max_frame_length) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MAX_FRAME_LENGTH, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_MAX_FRAME_LENGTH(cmd, *max_frame_length); -+ -+ return 0; -+} -+ -+int dpni_set_mtu(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t mtu) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MTU, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_MTU(cmd, mtu); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_mtu(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *mtu) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MTU, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_MTU(cmd, *mtu); -+ -+ return 0; -+} -+ -+int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_MCAST_PROMISC, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_MULTICAST_PROMISC(cmd, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_MCAST_PROMISC, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_MULTICAST_PROMISC(cmd, *en); -+ -+ return 0; -+} -+ -+int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_UNICAST_PROMISC, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_UNICAST_PROMISC(cmd, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_UNICAST_PROMISC, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_UNICAST_PROMISC(cmd, *en); -+ -+ return 0; -+} -+ -+int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const uint8_t mac_addr[6]) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_PRIM_MAC, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_PRIMARY_MAC_ADDR(cmd, mac_addr); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t mac_addr[6]) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_PRIM_MAC, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_PRIMARY_MAC_ADDR(cmd, mac_addr); -+ -+ return 0; -+} -+ -+int dpni_add_mac_addr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const uint8_t mac_addr[6]) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_MAC_ADDR, -+ cmd_flags, -+ token); -+ DPNI_CMD_ADD_MAC_ADDR(cmd, mac_addr); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_remove_mac_addr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const uint8_t mac_addr[6]) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_MAC_ADDR, -+ cmd_flags, -+ token); -+ DPNI_CMD_REMOVE_MAC_ADDR(cmd, mac_addr); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_clear_mac_filters(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int unicast, -+ int multicast) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_MAC_FILTERS, -+ cmd_flags, -+ token); -+ DPNI_CMD_CLEAR_MAC_FILTERS(cmd, unicast, multicast); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_vlan_filters(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_VLAN_FILTERS, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_VLAN_FILTERS(cmd, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_add_vlan_id(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_VLAN_ID, -+ cmd_flags, -+ token); -+ DPNI_CMD_ADD_VLAN_ID(cmd, vlan_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_remove_vlan_id(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_VLAN_ID, -+ cmd_flags, -+ token); -+ DPNI_CMD_REMOVE_VLAN_ID(cmd, vlan_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_clear_vlan_filters(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_VLAN_FILTERS, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_tx_selection(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_tx_selection_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_SELECTION, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_SELECTION(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_rx_tc_dist_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_DIST, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_RX_TC_DIST(cmd, tc_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_tx_flow(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *flow_id, -+ const struct dpni_tx_flow_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_FLOW, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_FLOW(cmd, *flow_id, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_SET_TX_FLOW(cmd, *flow_id); -+ -+ return 0; -+} -+ -+int dpni_get_tx_flow(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ struct dpni_tx_flow_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_FLOW, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_TX_FLOW(cmd, flow_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_TX_FLOW(cmd, attr); -+ -+ return 0; -+} -+ -+int dpni_set_rx_flow(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint16_t flow_id, -+ const struct dpni_queue_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_FLOW, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_RX_FLOW(cmd, tc_id, flow_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_rx_flow(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint16_t flow_id, -+ struct dpni_queue_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_FLOW, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_RX_FLOW(cmd, tc_id, flow_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_RX_FLOW(cmd, attr); -+ -+ return 0; -+} -+ -+int dpni_set_rx_err_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_queue_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_ERR_QUEUE, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_RX_ERR_QUEUE(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_rx_err_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_queue_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_ERR_QUEUE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPNI_RSP_GET_RX_ERR_QUEUE(cmd, attr); -+ -+ return 0; -+} -+ -+int dpni_set_tx_conf_revoke(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int revoke) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_CONF_REVOKE, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_CONF_REVOKE(cmd, revoke); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_qos_table(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_qos_tbl_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_QOS_TBL, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_QOS_TABLE(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_add_qos_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_rule_cfg *cfg, -+ uint8_t tc_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_QOS_ENT, -+ cmd_flags, -+ token); -+ DPNI_CMD_ADD_QOS_ENTRY(cmd, cfg, tc_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_remove_qos_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_rule_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_QOS_ENT, -+ cmd_flags, -+ token); -+ DPNI_CMD_REMOVE_QOS_ENTRY(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_clear_qos_table(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_QOS_TBL, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_add_fs_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_rule_cfg *cfg, -+ uint16_t flow_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_ADD_FS_ENT, -+ cmd_flags, -+ token); -+ DPNI_CMD_ADD_FS_ENTRY(cmd, tc_id, cfg, flow_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_remove_fs_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_rule_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_REMOVE_FS_ENT, -+ cmd_flags, -+ token); -+ DPNI_CMD_REMOVE_FS_ENTRY(cmd, tc_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_clear_fs_entries(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_CLR_FS_ENT, -+ cmd_flags, -+ token); -+ DPNI_CMD_CLEAR_FS_ENTRIES(cmd, tc_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_vlan_insertion(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_VLAN_INSERTION, -+ cmd_flags, token); -+ DPNI_CMD_SET_VLAN_INSERTION(cmd, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_vlan_removal(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_VLAN_REMOVAL, -+ cmd_flags, token); -+ DPNI_CMD_SET_VLAN_REMOVAL(cmd, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_ipr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IPR, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_IPR(cmd, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_ipf(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_IPF, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_IPF(cmd, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_rx_tc_policing(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_rx_tc_policing_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_POLICING, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_RX_TC_POLICING(cmd, tc_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_rx_tc_policing(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ struct dpni_rx_tc_policing_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_TC_POLICING, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_RX_TC_POLICING(cmd, tc_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ DPNI_RSP_GET_RX_TC_POLICING(cmd, cfg); -+ -+ return 0; -+} -+ -+void dpni_prepare_early_drop(const struct dpni_early_drop_cfg *cfg, -+ uint8_t *early_drop_buf) -+{ -+ uint64_t *ext_params = (uint64_t *)early_drop_buf; -+ -+ DPNI_PREP_EARLY_DROP(ext_params, cfg); -+} -+ -+void dpni_extract_early_drop(struct dpni_early_drop_cfg *cfg, -+ const uint8_t *early_drop_buf) -+{ -+ const uint64_t *ext_params = (const uint64_t *)early_drop_buf; -+ -+ DPNI_EXT_EARLY_DROP(ext_params, cfg); -+} -+ -+int dpni_set_rx_tc_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint64_t early_drop_iova) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_RX_TC_EARLY_DROP, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_RX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_rx_tc_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint64_t early_drop_iova) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_RX_TC_EARLY_DROP, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_RX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_tx_tc_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint64_t early_drop_iova) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_TC_EARLY_DROP, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_tx_tc_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint64_t early_drop_iova) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_TC_EARLY_DROP, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_TX_TC_EARLY_DROP(cmd, tc_id, early_drop_iova); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_set_rx_tc_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_congestion_notification_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header( -+ DPNI_CMDID_SET_RX_TC_CONGESTION_NOTIFICATION, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_RX_TC_CONGESTION_NOTIFICATION(cmd, tc_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_rx_tc_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ struct dpni_congestion_notification_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header( -+ DPNI_CMDID_GET_RX_TC_CONGESTION_NOTIFICATION, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_RX_TC_CONGESTION_NOTIFICATION(cmd, tc_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ DPNI_RSP_GET_RX_TC_CONGESTION_NOTIFICATION(cmd, cfg); -+ -+ return 0; -+} -+ -+int dpni_set_tx_tc_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_congestion_notification_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header( -+ DPNI_CMDID_SET_TX_TC_CONGESTION_NOTIFICATION, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_TC_CONGESTION_NOTIFICATION(cmd, tc_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_tx_tc_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ struct dpni_congestion_notification_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header( -+ DPNI_CMDID_GET_TX_TC_CONGESTION_NOTIFICATION, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_TX_TC_CONGESTION_NOTIFICATION(cmd, tc_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ DPNI_RSP_GET_TX_TC_CONGESTION_NOTIFICATION(cmd, cfg); -+ -+ return 0; -+} -+ -+int dpni_set_tx_conf(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ const struct dpni_tx_conf_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_SET_TX_CONF, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_CONF(cmd, flow_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_tx_conf(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ struct dpni_tx_conf_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPNI_CMDID_GET_TX_CONF, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_TX_CONF(cmd, flow_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ DPNI_RSP_GET_TX_CONF(cmd, attr); -+ -+ return 0; -+} -+ -+int dpni_set_tx_conf_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ const struct dpni_congestion_notification_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header( -+ DPNI_CMDID_SET_TX_CONF_CONGESTION_NOTIFICATION, -+ cmd_flags, -+ token); -+ DPNI_CMD_SET_TX_CONF_CONGESTION_NOTIFICATION(cmd, flow_id, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpni_get_tx_conf_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ struct dpni_congestion_notification_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header( -+ DPNI_CMDID_GET_TX_CONF_CONGESTION_NOTIFICATION, -+ cmd_flags, -+ token); -+ DPNI_CMD_GET_TX_CONF_CONGESTION_NOTIFICATION(cmd, flow_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ DPNI_RSP_GET_TX_CONF_CONGESTION_NOTIFICATION(cmd, cfg); -+ -+ return 0; -+} -diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpni.h b/drivers/staging/fsl-dpaa2/ethernet/dpni.h -new file mode 100644 -index 0000000..fca426d ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/ethernet/dpni.h -@@ -0,0 +1,2581 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPNI_H -+#define __FSL_DPNI_H -+ -+#include "dpkg.h" -+ -+struct fsl_mc_io; -+ -+/** -+ * Data Path Network Interface API -+ * Contains initialization APIs and runtime control APIs for DPNI -+ */ -+ -+/** General DPNI macros */ -+ -+/** -+ * Maximum number of traffic classes -+ */ -+#define DPNI_MAX_TC 8 -+/** -+ * Maximum number of buffer pools per DPNI -+ */ -+#define DPNI_MAX_DPBP 8 -+/** -+ * Maximum number of storage-profiles per DPNI -+ */ -+#define DPNI_MAX_SP 2 -+ -+/** -+ * All traffic classes considered; see dpni_set_rx_flow() -+ */ -+#define DPNI_ALL_TCS (uint8_t)(-1) -+/** -+ * All flows within traffic class considered; see dpni_set_rx_flow() -+ */ -+#define DPNI_ALL_TC_FLOWS (uint16_t)(-1) -+/** -+ * Generate new flow ID; see dpni_set_tx_flow() -+ */ -+#define DPNI_NEW_FLOW_ID (uint16_t)(-1) -+/* use for common tx-conf queue; see dpni_set_tx_conf_() */ -+#define DPNI_COMMON_TX_CONF (uint16_t)(-1) -+ -+/** -+ * dpni_open() - Open a control session for the specified object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dpni_id: DPNI unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpni_create() function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpni_id, -+ uint16_t *token); -+ -+/** -+ * dpni_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/* DPNI configuration options */ -+ -+/** -+ * Allow different distribution key profiles for different traffic classes; -+ * if not set, a single key profile is assumed -+ */ -+#define DPNI_OPT_ALLOW_DIST_KEY_PER_TC 0x00000001 -+ -+/** -+ * Disable all non-error transmit confirmation; error frames are reported -+ * back to a common Tx error queue -+ */ -+#define DPNI_OPT_TX_CONF_DISABLED 0x00000002 -+ -+/** -+ * Disable per-sender private Tx confirmation/error queue -+ */ -+#define DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED 0x00000004 -+ -+/** -+ * Support distribution based on hashed key; -+ * allows statistical distribution over receive queues in a traffic class -+ */ -+#define DPNI_OPT_DIST_HASH 0x00000010 -+ -+/** -+ * DEPRECATED - if this flag is selected and and all new 'max_fs_entries' are -+ * '0' then backward compatibility is preserved; -+ * Support distribution based on flow steering; -+ * allows explicit control of distribution over receive queues in a traffic -+ * class -+ */ -+#define DPNI_OPT_DIST_FS 0x00000020 -+ -+/** -+ * Unicast filtering support -+ */ -+#define DPNI_OPT_UNICAST_FILTER 0x00000080 -+/** -+ * Multicast filtering support -+ */ -+#define DPNI_OPT_MULTICAST_FILTER 0x00000100 -+/** -+ * VLAN filtering support -+ */ -+#define DPNI_OPT_VLAN_FILTER 0x00000200 -+/** -+ * Support IP reassembly on received packets -+ */ -+#define DPNI_OPT_IPR 0x00000800 -+/** -+ * Support IP fragmentation on transmitted packets -+ */ -+#define DPNI_OPT_IPF 0x00001000 -+/** -+ * VLAN manipulation support -+ */ -+#define DPNI_OPT_VLAN_MANIPULATION 0x00010000 -+/** -+ * Support masking of QoS lookup keys -+ */ -+#define DPNI_OPT_QOS_MASK_SUPPORT 0x00020000 -+/** -+ * Support masking of Flow Steering lookup keys -+ */ -+#define DPNI_OPT_FS_MASK_SUPPORT 0x00040000 -+ -+/** -+ * struct dpni_extended_cfg - Structure representing extended DPNI configuration -+ * @tc_cfg: TCs configuration -+ * @ipr_cfg: IP reassembly configuration -+ */ -+struct dpni_extended_cfg { -+ /** -+ * struct tc_cfg - TC configuration -+ * @max_dist: Maximum distribution size for Rx traffic class; -+ * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96, -+ * 112,128,192,224,256,384,448,512,768,896,1024; -+ * value '0' will be treated as '1'. -+ * other unsupported values will be round down to the nearest -+ * supported value. -+ * @max_fs_entries: Maximum FS entries for Rx traffic class; -+ * '0' means no support for this TC; -+ */ -+ struct { -+ uint16_t max_dist; -+ uint16_t max_fs_entries; -+ } tc_cfg[DPNI_MAX_TC]; -+ /** -+ * struct ipr_cfg - Structure representing IP reassembly configuration -+ * @max_reass_frm_size: Maximum size of the reassembled frame -+ * @min_frag_size_ipv4: Minimum fragment size of IPv4 fragments -+ * @min_frag_size_ipv6: Minimum fragment size of IPv6 fragments -+ * @max_open_frames_ipv4: Maximum concurrent IPv4 packets in reassembly -+ * process -+ * @max_open_frames_ipv6: Maximum concurrent IPv6 packets in reassembly -+ * process -+ */ -+ struct { -+ uint16_t max_reass_frm_size; -+ uint16_t min_frag_size_ipv4; -+ uint16_t min_frag_size_ipv6; -+ uint16_t max_open_frames_ipv4; -+ uint16_t max_open_frames_ipv6; -+ } ipr_cfg; -+}; -+ -+/** -+ * dpni_prepare_extended_cfg() - function prepare extended parameters -+ * @cfg: extended structure -+ * @ext_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA -+ * -+ * This function has to be called before dpni_create() -+ */ -+int dpni_prepare_extended_cfg(const struct dpni_extended_cfg *cfg, -+ uint8_t *ext_cfg_buf); -+ -+/** -+ * struct dpni_cfg - Structure representing DPNI configuration -+ * @mac_addr: Primary MAC address -+ * @adv: Advanced parameters; default is all zeros; -+ * use this structure to change default settings -+ */ -+struct dpni_cfg { -+ uint8_t mac_addr[6]; -+ /** -+ * struct adv - Advanced parameters -+ * @options: Mask of available options; use 'DPNI_OPT_' values -+ * @start_hdr: Selects the packet starting header for parsing; -+ * 'NET_PROT_NONE' is treated as default: 'NET_PROT_ETH' -+ * @max_senders: Maximum number of different senders; used as the number -+ * of dedicated Tx flows; Non-power-of-2 values are rounded -+ * up to the next power-of-2 value as hardware demands it; -+ * '0' will be treated as '1' -+ * @max_tcs: Maximum number of traffic classes (for both Tx and Rx); -+ * '0' will e treated as '1' -+ * @max_unicast_filters: Maximum number of unicast filters; -+ * '0' is treated as '16' -+ * @max_multicast_filters: Maximum number of multicast filters; -+ * '0' is treated as '64' -+ * @max_qos_entries: if 'max_tcs > 1', declares the maximum entries in -+ * the QoS table; '0' is treated as '64' -+ * @max_qos_key_size: Maximum key size for the QoS look-up; -+ * '0' is treated as '24' which is enough for IPv4 -+ * 5-tuple -+ * @max_dist_key_size: Maximum key size for the distribution; -+ * '0' is treated as '24' which is enough for IPv4 5-tuple -+ * @max_policers: Maximum number of policers; -+ * should be between '0' and max_tcs -+ * @max_congestion_ctrl: Maximum number of congestion control groups -+ * (CGs); covers early drop and congestion notification -+ * requirements; -+ * should be between '0' and ('max_tcs' + 'max_senders') -+ * @ext_cfg_iova: I/O virtual address of 256 bytes DMA-able memory -+ * filled with the extended configuration by calling -+ * dpni_prepare_extended_cfg() -+ */ -+ struct { -+ uint32_t options; -+ enum net_prot start_hdr; -+ uint8_t max_senders; -+ uint8_t max_tcs; -+ uint8_t max_unicast_filters; -+ uint8_t max_multicast_filters; -+ uint8_t max_vlan_filters; -+ uint8_t max_qos_entries; -+ uint8_t max_qos_key_size; -+ uint8_t max_dist_key_size; -+ uint8_t max_policers; -+ uint8_t max_congestion_ctrl; -+ uint64_t ext_cfg_iova; -+ } adv; -+}; -+ -+/** -+ * dpni_create() - Create the DPNI object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPNI object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpni_open() function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpni_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpni_destroy() - Destroy the DPNI object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpni_pools_cfg - Structure representing buffer pools configuration -+ * @num_dpbp: Number of DPBPs -+ * @pools: Array of buffer pools parameters; The number of valid entries -+ * must match 'num_dpbp' value -+ */ -+struct dpni_pools_cfg { -+ uint8_t num_dpbp; -+ /** -+ * struct pools - Buffer pools parameters -+ * @dpbp_id: DPBP object ID -+ * @buffer_size: Buffer size -+ * @backup_pool: Backup pool -+ */ -+ struct { -+ int dpbp_id; -+ uint16_t buffer_size; -+ int backup_pool; -+ } pools[DPNI_MAX_DPBP]; -+}; -+ -+/** -+ * dpni_set_pools() - Set buffer pools configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @cfg: Buffer pools configuration -+ * -+ * mandatory for DPNI operation -+ * warning:Allowed only when DPNI is disabled -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_pools(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_pools_cfg *cfg); -+ -+/** -+ * dpni_enable() - Enable the DPNI, allow sending and receiving frames. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpni_disable() - Disable the DPNI, stop sending and receiving frames. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpni_is_enabled() - Check if the DPNI is enabled. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Returns '1' if object is enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpni_reset() - Reset the DPNI, returns the object to initial state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * DPNI IRQ Index and Events -+ */ -+ -+/** -+ * IRQ index -+ */ -+#define DPNI_IRQ_INDEX 0 -+/** -+ * IRQ event - indicates a change in link state -+ */ -+#define DPNI_IRQ_EVENT_LINK_CHANGED 0x00000001 -+ -+/** -+ * struct dpni_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dpni_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dpni_set_irq() - Set IRQ information for the DPNI to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpni_irq_cfg *irq_cfg); -+ -+/** -+ * dpni_get_irq() - Get IRQ information from the DPNI. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpni_irq_cfg *irq_cfg); -+ -+/** -+ * dpni_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state: - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpni_get_irq_enable() - Get overall interrupt state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpni_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @irq_index: The interrupt index to configure -+ * @mask: event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpni_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpni_get_irq_status() - Get the current status of any pending interrupts. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dpni_clear_irq_status() - Clear a pending interrupt's status -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @irq_index: The interrupt index to configure -+ * @status: bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+ -+/** -+ * struct dpni_attr - Structure representing DPNI attributes -+ * @id: DPNI object ID -+ * @version: DPNI version -+ * @start_hdr: Indicates the packet starting header for parsing -+ * @options: Mask of available options; reflects the value as was given in -+ * object's creation -+ * @max_senders: Maximum number of different senders; used as the number -+ * of dedicated Tx flows; -+ * @max_tcs: Maximum number of traffic classes (for both Tx and Rx) -+ * @max_unicast_filters: Maximum number of unicast filters -+ * @max_multicast_filters: Maximum number of multicast filters -+ * @max_vlan_filters: Maximum number of VLAN filters -+ * @max_qos_entries: if 'max_tcs > 1', declares the maximum entries in QoS table -+ * @max_qos_key_size: Maximum key size for the QoS look-up -+ * @max_dist_key_size: Maximum key size for the distribution look-up -+ * @max_policers: Maximum number of policers; -+ * @max_congestion_ctrl: Maximum number of congestion control groups (CGs); -+ * @ext_cfg_iova: I/O virtual address of 256 bytes DMA-able memory; -+ * call dpni_extract_extended_cfg() to extract the extended configuration -+ */ -+struct dpni_attr { -+ int id; -+ /** -+ * struct version - DPNI version -+ * @major: DPNI major version -+ * @minor: DPNI minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+ enum net_prot start_hdr; -+ uint32_t options; -+ uint8_t max_senders; -+ uint8_t max_tcs; -+ uint8_t max_unicast_filters; -+ uint8_t max_multicast_filters; -+ uint8_t max_vlan_filters; -+ uint8_t max_qos_entries; -+ uint8_t max_qos_key_size; -+ uint8_t max_dist_key_size; -+ uint8_t max_policers; -+ uint8_t max_congestion_ctrl; -+ uint64_t ext_cfg_iova; -+}; -+ -+/** -+ * dpni_get_attributes() - Retrieve DPNI attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @attr: Object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_attr *attr); -+ -+/** -+ * dpni_extract_extended_cfg() - extract the extended parameters -+ * @cfg: extended structure -+ * @ext_cfg_buf: 256 bytes of DMA-able memory -+ * -+ * This function has to be called after dpni_get_attributes() -+ */ -+int dpni_extract_extended_cfg(struct dpni_extended_cfg *cfg, -+ const uint8_t *ext_cfg_buf); -+ -+/** -+ * DPNI errors -+ */ -+ -+/** -+ * Extract out of frame header error -+ */ -+#define DPNI_ERROR_EOFHE 0x00020000 -+/** -+ * Frame length error -+ */ -+#define DPNI_ERROR_FLE 0x00002000 -+/** -+ * Frame physical error -+ */ -+#define DPNI_ERROR_FPE 0x00001000 -+/** -+ * Parsing header error -+ */ -+#define DPNI_ERROR_PHE 0x00000020 -+/** -+ * Parser L3 checksum error -+ */ -+#define DPNI_ERROR_L3CE 0x00000004 -+/** -+ * Parser L3 checksum error -+ */ -+#define DPNI_ERROR_L4CE 0x00000001 -+ -+/** -+ * enum dpni_error_action - Defines DPNI behavior for errors -+ * @DPNI_ERROR_ACTION_DISCARD: Discard the frame -+ * @DPNI_ERROR_ACTION_CONTINUE: Continue with the normal flow -+ * @DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE: Send the frame to the error queue -+ */ -+enum dpni_error_action { -+ DPNI_ERROR_ACTION_DISCARD = 0, -+ DPNI_ERROR_ACTION_CONTINUE = 1, -+ DPNI_ERROR_ACTION_SEND_TO_ERROR_QUEUE = 2 -+}; -+ -+/** -+ * struct dpni_error_cfg - Structure representing DPNI errors treatment -+ * @errors: Errors mask; use 'DPNI_ERROR__ -+ * @error_action: The desired action for the errors mask -+ * @set_frame_annotation: Set to '1' to mark the errors in frame annotation -+ * status (FAS); relevant only for the non-discard action -+ */ -+struct dpni_error_cfg { -+ uint32_t errors; -+ enum dpni_error_action error_action; -+ int set_frame_annotation; -+}; -+ -+/** -+ * dpni_set_errors_behavior() - Set errors behavior -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @cfg: Errors configuration -+ * -+ * this function may be called numerous times with different -+ * error masks -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_errors_behavior(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_error_cfg *cfg); -+ -+/** -+ * DPNI buffer layout modification options -+ */ -+ -+/** -+ * Select to modify the time-stamp setting -+ */ -+#define DPNI_BUF_LAYOUT_OPT_TIMESTAMP 0x00000001 -+/** -+ * Select to modify the parser-result setting; not applicable for Tx -+ */ -+#define DPNI_BUF_LAYOUT_OPT_PARSER_RESULT 0x00000002 -+/** -+ * Select to modify the frame-status setting -+ */ -+#define DPNI_BUF_LAYOUT_OPT_FRAME_STATUS 0x00000004 -+/** -+ * Select to modify the private-data-size setting -+ */ -+#define DPNI_BUF_LAYOUT_OPT_PRIVATE_DATA_SIZE 0x00000008 -+/** -+ * Select to modify the data-alignment setting -+ */ -+#define DPNI_BUF_LAYOUT_OPT_DATA_ALIGN 0x00000010 -+/** -+ * Select to modify the data-head-room setting -+ */ -+#define DPNI_BUF_LAYOUT_OPT_DATA_HEAD_ROOM 0x00000020 -+/** -+ * Select to modify the data-tail-room setting -+ */ -+#define DPNI_BUF_LAYOUT_OPT_DATA_TAIL_ROOM 0x00000040 -+ -+/** -+ * struct dpni_buffer_layout - Structure representing DPNI buffer layout -+ * @options: Flags representing the suggested modifications to the buffer -+ * layout; Use any combination of 'DPNI_BUF_LAYOUT_OPT_' flags -+ * @pass_timestamp: Pass timestamp value -+ * @pass_parser_result: Pass parser results -+ * @pass_frame_status: Pass frame status -+ * @private_data_size: Size kept for private data (in bytes) -+ * @data_align: Data alignment -+ * @data_head_room: Data head room -+ * @data_tail_room: Data tail room -+ */ -+struct dpni_buffer_layout { -+ uint32_t options; -+ int pass_timestamp; -+ int pass_parser_result; -+ int pass_frame_status; -+ uint16_t private_data_size; -+ uint16_t data_align; -+ uint16_t data_head_room; -+ uint16_t data_tail_room; -+}; -+ -+/** -+ * dpni_get_rx_buffer_layout() - Retrieve Rx buffer layout attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @layout: Returns buffer layout attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_rx_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_buffer_layout *layout); -+ -+/** -+ * dpni_set_rx_buffer_layout() - Set Rx buffer layout configuration. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @layout: Buffer layout configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ * -+ * @warning Allowed only when DPNI is disabled -+ */ -+int dpni_set_rx_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_buffer_layout *layout); -+ -+/** -+ * dpni_get_tx_buffer_layout() - Retrieve Tx buffer layout attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @layout: Returns buffer layout attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_tx_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_buffer_layout *layout); -+ -+/** -+ * dpni_set_tx_buffer_layout() - Set Tx buffer layout configuration. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @layout: Buffer layout configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ * -+ * @warning Allowed only when DPNI is disabled -+ */ -+int dpni_set_tx_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_buffer_layout *layout); -+ -+/** -+ * dpni_get_tx_conf_buffer_layout() - Retrieve Tx confirmation buffer layout -+ * attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @layout: Returns buffer layout attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_tx_conf_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_buffer_layout *layout); -+ -+/** -+ * dpni_set_tx_conf_buffer_layout() - Set Tx confirmation buffer layout -+ * configuration. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @layout: Buffer layout configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ * -+ * @warning Allowed only when DPNI is disabled -+ */ -+int dpni_set_tx_conf_buffer_layout(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_buffer_layout *layout); -+ -+/** -+ * dpni_set_l3_chksum_validation() - Enable/disable L3 checksum validation -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Set to '1' to enable; '0' to disable -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_l3_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en); -+ -+/** -+ * dpni_get_l3_chksum_validation() - Get L3 checksum validation mode -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Returns '1' if enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_l3_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpni_set_l4_chksum_validation() - Enable/disable L4 checksum validation -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Set to '1' to enable; '0' to disable -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_l4_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en); -+ -+/** -+ * dpni_get_l4_chksum_validation() - Get L4 checksum validation mode -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Returns '1' if enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_l4_chksum_validation(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpni_get_qdid() - Get the Queuing Destination ID (QDID) that should be used -+ * for enqueue operations -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @qdid: Returned virtual QDID value that should be used as an argument -+ * in all enqueue operations -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_qdid(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *qdid); -+ -+/** -+ * struct dpni_sp_info - Structure representing DPNI storage-profile information -+ * (relevant only for DPNI owned by AIOP) -+ * @spids: array of storage-profiles -+ */ -+struct dpni_sp_info { -+ uint16_t spids[DPNI_MAX_SP]; -+}; -+ -+/** -+ * dpni_get_spids() - Get the AIOP storage profile IDs associated with the DPNI -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @sp_info: Returned AIOP storage-profile information -+ * -+ * Return: '0' on Success; Error code otherwise. -+ * -+ * @warning Only relevant for DPNI that belongs to AIOP container. -+ */ -+int dpni_get_sp_info(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_sp_info *sp_info); -+ -+/** -+ * dpni_get_tx_data_offset() - Get the Tx data offset (from start of buffer) -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @data_offset: Tx data offset (from start of buffer) -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_tx_data_offset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *data_offset); -+ -+/** -+ * enum dpni_counter - DPNI counter types -+ * @DPNI_CNT_ING_FRAME: Counts ingress frames -+ * @DPNI_CNT_ING_BYTE: Counts ingress bytes -+ * @DPNI_CNT_ING_FRAME_DROP: Counts ingress frames dropped due to explicit -+ * 'drop' setting -+ * @DPNI_CNT_ING_FRAME_DISCARD: Counts ingress frames discarded due to errors -+ * @DPNI_CNT_ING_MCAST_FRAME: Counts ingress multicast frames -+ * @DPNI_CNT_ING_MCAST_BYTE: Counts ingress multicast bytes -+ * @DPNI_CNT_ING_BCAST_FRAME: Counts ingress broadcast frames -+ * @DPNI_CNT_ING_BCAST_BYTES: Counts ingress broadcast bytes -+ * @DPNI_CNT_EGR_FRAME: Counts egress frames -+ * @DPNI_CNT_EGR_BYTE: Counts egress bytes -+ * @DPNI_CNT_EGR_FRAME_DISCARD: Counts egress frames discarded due to errors -+ */ -+enum dpni_counter { -+ DPNI_CNT_ING_FRAME = 0x0, -+ DPNI_CNT_ING_BYTE = 0x1, -+ DPNI_CNT_ING_FRAME_DROP = 0x2, -+ DPNI_CNT_ING_FRAME_DISCARD = 0x3, -+ DPNI_CNT_ING_MCAST_FRAME = 0x4, -+ DPNI_CNT_ING_MCAST_BYTE = 0x5, -+ DPNI_CNT_ING_BCAST_FRAME = 0x6, -+ DPNI_CNT_ING_BCAST_BYTES = 0x7, -+ DPNI_CNT_EGR_FRAME = 0x8, -+ DPNI_CNT_EGR_BYTE = 0x9, -+ DPNI_CNT_EGR_FRAME_DISCARD = 0xa -+}; -+ -+/** -+ * dpni_get_counter() - Read a specific DPNI counter -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @counter: The requested counter -+ * @value: Returned counter's current value -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ enum dpni_counter counter, -+ uint64_t *value); -+ -+/** -+ * dpni_set_counter() - Set (or clear) a specific DPNI counter -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @counter: The requested counter -+ * @value: New counter value; typically pass '0' for resetting -+ * the counter. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ enum dpni_counter counter, -+ uint64_t value); -+ -+/** -+ * Enable auto-negotiation -+ */ -+#define DPNI_LINK_OPT_AUTONEG 0x0000000000000001ULL -+/** -+ * Enable half-duplex mode -+ */ -+#define DPNI_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL -+/** -+ * Enable pause frames -+ */ -+#define DPNI_LINK_OPT_PAUSE 0x0000000000000004ULL -+/** -+ * Enable a-symmetric pause frames -+ */ -+#define DPNI_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL -+ -+/** -+ * struct - Structure representing DPNI link configuration -+ * @rate: Rate -+ * @options: Mask of available options; use 'DPNI_LINK_OPT_' values -+ */ -+struct dpni_link_cfg { -+ uint32_t rate; -+ uint64_t options; -+}; -+ -+/** -+ * dpni_set_link_cfg() - set the link configuration. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @cfg: Link configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_link_cfg(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_link_cfg *cfg); -+ -+/** -+ * struct dpni_link_state - Structure representing DPNI link state -+ * @rate: Rate -+ * @options: Mask of available options; use 'DPNI_LINK_OPT_' values -+ * @up: Link state; '0' for down, '1' for up -+ */ -+struct dpni_link_state { -+ uint32_t rate; -+ uint64_t options; -+ int up; -+}; -+ -+/** -+ * dpni_get_link_state() - Return the link state (either up or down) -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @state: Returned link state; -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_link_state(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_link_state *state); -+ -+/** -+ * struct dpni_tx_shaping - Structure representing DPNI tx shaping configuration -+ * @rate_limit: rate in Mbps -+ * @max_burst_size: burst size in bytes (up to 64KB) -+ */ -+struct dpni_tx_shaping_cfg { -+ uint32_t rate_limit; -+ uint16_t max_burst_size; -+}; -+ -+/** -+ * dpni_set_tx_shaping() - Set the transmit shaping -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tx_shaper: tx shaping configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_tx_shaping(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_tx_shaping_cfg *tx_shaper); -+ -+/** -+ * dpni_set_max_frame_length() - Set the maximum received frame length. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @max_frame_length: Maximum received frame length (in -+ * bytes); frame is discarded if its -+ * length exceeds this value -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_max_frame_length(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t max_frame_length); -+ -+/** -+ * dpni_get_max_frame_length() - Get the maximum received frame length. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @max_frame_length: Maximum received frame length (in -+ * bytes); frame is discarded if its -+ * length exceeds this value -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_max_frame_length(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *max_frame_length); -+ -+/** -+ * dpni_set_mtu() - Set the MTU for the interface. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @mtu: MTU length (in bytes) -+ * -+ * MTU determines the maximum fragment size for performing IP -+ * fragmentation on egress packets. -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_mtu(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t mtu); -+ -+/** -+ * dpni_get_mtu() - Get the MTU. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @mtu: Returned MTU length (in bytes) -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_mtu(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *mtu); -+ -+/** -+ * dpni_set_multicast_promisc() - Enable/disable multicast promiscuous mode -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Set to '1' to enable; '0' to disable -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_multicast_promisc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en); -+ -+/** -+ * dpni_get_multicast_promisc() - Get multicast promiscuous mode -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Returns '1' if enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_multicast_promisc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpni_set_unicast_promisc() - Enable/disable unicast promiscuous mode -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Set to '1' to enable; '0' to disable -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_unicast_promisc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en); -+ -+/** -+ * dpni_get_unicast_promisc() - Get unicast promiscuous mode -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Returns '1' if enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_unicast_promisc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpni_set_primary_mac_addr() - Set the primary MAC address -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @mac_addr: MAC address to set as primary address -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_primary_mac_addr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const uint8_t mac_addr[6]); -+ -+/** -+ * dpni_get_primary_mac_addr() - Get the primary MAC address -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @mac_addr: Returned MAC address -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_primary_mac_addr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t mac_addr[6]); -+ -+/** -+ * dpni_add_mac_addr() - Add MAC address filter -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @mac_addr: MAC address to add -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_add_mac_addr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const uint8_t mac_addr[6]); -+ -+/** -+ * dpni_remove_mac_addr() - Remove MAC address filter -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @mac_addr: MAC address to remove -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_remove_mac_addr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const uint8_t mac_addr[6]); -+ -+/** -+ * dpni_clear_mac_filters() - Clear all unicast and/or multicast MAC filters -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @unicast: Set to '1' to clear unicast addresses -+ * @multicast: Set to '1' to clear multicast addresses -+ * -+ * The primary MAC address is not cleared by this operation. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_clear_mac_filters(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int unicast, -+ int multicast); -+ -+/** -+ * dpni_set_vlan_filters() - Enable/disable VLAN filtering mode -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Set to '1' to enable; '0' to disable -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_vlan_filters(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en); -+ -+/** -+ * dpni_add_vlan_id() - Add VLAN ID filter -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @vlan_id: VLAN ID to add -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_add_vlan_id(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id); -+ -+/** -+ * dpni_remove_vlan_id() - Remove VLAN ID filter -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @vlan_id: VLAN ID to remove -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_remove_vlan_id(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t vlan_id); -+ -+/** -+ * dpni_clear_vlan_filters() - Clear all VLAN filters -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_clear_vlan_filters(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * enum dpni_tx_schedule_mode - DPNI Tx scheduling mode -+ * @DPNI_TX_SCHED_STRICT_PRIORITY: strict priority -+ * @DPNI_TX_SCHED_WEIGHTED: weighted based scheduling -+ */ -+enum dpni_tx_schedule_mode { -+ DPNI_TX_SCHED_STRICT_PRIORITY, -+ DPNI_TX_SCHED_WEIGHTED, -+}; -+ -+/** -+ * struct dpni_tx_schedule_cfg - Structure representing Tx -+ * scheduling configuration -+ * @mode: scheduling mode -+ * @delta_bandwidth: Bandwidth represented in weights from 100 to 10000; -+ * not applicable for 'strict-priority' mode; -+ */ -+struct dpni_tx_schedule_cfg { -+ enum dpni_tx_schedule_mode mode; -+ uint16_t delta_bandwidth; -+}; -+ -+/** -+ * struct dpni_tx_selection_cfg - Structure representing transmission -+ * selection configuration -+ * @tc_sched: an array of traffic-classes -+ */ -+struct dpni_tx_selection_cfg { -+ struct dpni_tx_schedule_cfg tc_sched[DPNI_MAX_TC]; -+}; -+ -+/** -+ * dpni_set_tx_selection() - Set transmission selection configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @cfg: transmission selection configuration -+ * -+ * warning: Allowed only when DPNI is disabled -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_tx_selection(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_tx_selection_cfg *cfg); -+ -+/** -+ * enum dpni_dist_mode - DPNI distribution mode -+ * @DPNI_DIST_MODE_NONE: No distribution -+ * @DPNI_DIST_MODE_HASH: Use hash distribution; only relevant if -+ * the 'DPNI_OPT_DIST_HASH' option was set at DPNI creation -+ * @DPNI_DIST_MODE_FS: Use explicit flow steering; only relevant if -+ * the 'DPNI_OPT_DIST_FS' option was set at DPNI creation -+ */ -+enum dpni_dist_mode { -+ DPNI_DIST_MODE_NONE = 0, -+ DPNI_DIST_MODE_HASH = 1, -+ DPNI_DIST_MODE_FS = 2 -+}; -+ -+/** -+ * enum dpni_fs_miss_action - DPNI Flow Steering miss action -+ * @DPNI_FS_MISS_DROP: In case of no-match, drop the frame -+ * @DPNI_FS_MISS_EXPLICIT_FLOWID: In case of no-match, use explicit flow-id -+ * @DPNI_FS_MISS_HASH: In case of no-match, distribute using hash -+ */ -+enum dpni_fs_miss_action { -+ DPNI_FS_MISS_DROP = 0, -+ DPNI_FS_MISS_EXPLICIT_FLOWID = 1, -+ DPNI_FS_MISS_HASH = 2 -+}; -+ -+/** -+ * struct dpni_fs_tbl_cfg - Flow Steering table configuration -+ * @miss_action: Miss action selection -+ * @default_flow_id: Used when 'miss_action = DPNI_FS_MISS_EXPLICIT_FLOWID' -+ */ -+struct dpni_fs_tbl_cfg { -+ enum dpni_fs_miss_action miss_action; -+ uint16_t default_flow_id; -+}; -+ -+/** -+ * dpni_prepare_key_cfg() - function prepare extract parameters -+ * @cfg: defining a full Key Generation profile (rule) -+ * @key_cfg_buf: Zeroed 256 bytes of memory before mapping it to DMA -+ * -+ * This function has to be called before the following functions: -+ * - dpni_set_rx_tc_dist() -+ * - dpni_set_qos_table() -+ */ -+int dpni_prepare_key_cfg(const struct dpkg_profile_cfg *cfg, -+ uint8_t *key_cfg_buf); -+ -+/** -+ * struct dpni_rx_tc_dist_cfg - Rx traffic class distribution configuration -+ * @dist_size: Set the distribution size; -+ * supported values: 1,2,3,4,6,7,8,12,14,16,24,28,32,48,56,64,96, -+ * 112,128,192,224,256,384,448,512,768,896,1024 -+ * @dist_mode: Distribution mode -+ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with -+ * the extractions to be used for the distribution key by calling -+ * dpni_prepare_key_cfg() relevant only when -+ * 'dist_mode != DPNI_DIST_MODE_NONE', otherwise it can be '0' -+ * @fs_cfg: Flow Steering table configuration; only relevant if -+ * 'dist_mode = DPNI_DIST_MODE_FS' -+ */ -+struct dpni_rx_tc_dist_cfg { -+ uint16_t dist_size; -+ enum dpni_dist_mode dist_mode; -+ uint64_t key_cfg_iova; -+ struct dpni_fs_tbl_cfg fs_cfg; -+}; -+ -+/** -+ * dpni_set_rx_tc_dist() - Set Rx traffic class distribution configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @cfg: Traffic class distribution configuration -+ * -+ * warning: if 'dist_mode != DPNI_DIST_MODE_NONE', call dpni_prepare_key_cfg() -+ * first to prepare the key_cfg_iova parameter -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_set_rx_tc_dist(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_rx_tc_dist_cfg *cfg); -+ -+/** -+ * Set to select color aware mode (otherwise - color blind) -+ */ -+#define DPNI_POLICER_OPT_COLOR_AWARE 0x00000001 -+/** -+ * Set to discard frame with RED color -+ */ -+#define DPNI_POLICER_OPT_DISCARD_RED 0x00000002 -+ -+/** -+ * enum dpni_policer_mode - selecting the policer mode -+ * @DPNI_POLICER_MODE_NONE: Policer is disabled -+ * @DPNI_POLICER_MODE_PASS_THROUGH: Policer pass through -+ * @DPNI_POLICER_MODE_RFC_2698: Policer algorithm RFC 2698 -+ * @DPNI_POLICER_MODE_RFC_4115: Policer algorithm RFC 4115 -+ */ -+enum dpni_policer_mode { -+ DPNI_POLICER_MODE_NONE = 0, -+ DPNI_POLICER_MODE_PASS_THROUGH, -+ DPNI_POLICER_MODE_RFC_2698, -+ DPNI_POLICER_MODE_RFC_4115 -+}; -+ -+/** -+ * enum dpni_policer_unit - DPNI policer units -+ * @DPNI_POLICER_UNIT_BYTES: bytes units -+ * @DPNI_POLICER_UNIT_FRAMES: frames units -+ */ -+enum dpni_policer_unit { -+ DPNI_POLICER_UNIT_BYTES = 0, -+ DPNI_POLICER_UNIT_FRAMES -+}; -+ -+/** -+ * enum dpni_policer_color - selecting the policer color -+ * @DPNI_POLICER_COLOR_GREEN: Green color -+ * @DPNI_POLICER_COLOR_YELLOW: Yellow color -+ * @DPNI_POLICER_COLOR_RED: Red color -+ */ -+enum dpni_policer_color { -+ DPNI_POLICER_COLOR_GREEN = 0, -+ DPNI_POLICER_COLOR_YELLOW, -+ DPNI_POLICER_COLOR_RED -+}; -+ -+/** -+ * struct dpni_rx_tc_policing_cfg - Policer configuration -+ * @options: Mask of available options; use 'DPNI_POLICER_OPT_' values -+ * @mode: policer mode -+ * @default_color: For pass-through mode the policer re-colors with this -+ * color any incoming packets. For Color aware non-pass-through mode: -+ * policer re-colors with this color all packets with FD[DROPP]>2. -+ * @units: Bytes or Packets -+ * @cir: Committed information rate (CIR) in Kbps or packets/second -+ * @cbs: Committed burst size (CBS) in bytes or packets -+ * @eir: Peak information rate (PIR, rfc2698) in Kbps or packets/second -+ * Excess information rate (EIR, rfc4115) in Kbps or packets/second -+ * @ebs: Peak burst size (PBS, rfc2698) in bytes or packets -+ * Excess burst size (EBS, rfc4115) in bytes or packets -+ */ -+struct dpni_rx_tc_policing_cfg { -+ uint32_t options; -+ enum dpni_policer_mode mode; -+ enum dpni_policer_unit units; -+ enum dpni_policer_color default_color; -+ uint32_t cir; -+ uint32_t cbs; -+ uint32_t eir; -+ uint32_t ebs; -+}; -+ -+/** -+ * dpni_set_rx_tc_policing() - Set Rx traffic class policing configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @cfg: Traffic class policing configuration -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_set_rx_tc_policing(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_rx_tc_policing_cfg *cfg); -+ -+/** -+ * dpni_get_rx_tc_policing() - Get Rx traffic class policing configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @cfg: Traffic class policing configuration -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_get_rx_tc_policing(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ struct dpni_rx_tc_policing_cfg *cfg); -+ -+/** -+ * enum dpni_congestion_unit - DPNI congestion units -+ * @DPNI_CONGESTION_UNIT_BYTES: bytes units -+ * @DPNI_CONGESTION_UNIT_FRAMES: frames units -+ */ -+enum dpni_congestion_unit { -+ DPNI_CONGESTION_UNIT_BYTES = 0, -+ DPNI_CONGESTION_UNIT_FRAMES -+}; -+ -+/** -+ * enum dpni_early_drop_mode - DPNI early drop mode -+ * @DPNI_EARLY_DROP_MODE_NONE: early drop is disabled -+ * @DPNI_EARLY_DROP_MODE_TAIL: early drop in taildrop mode -+ * @DPNI_EARLY_DROP_MODE_WRED: early drop in WRED mode -+ */ -+enum dpni_early_drop_mode { -+ DPNI_EARLY_DROP_MODE_NONE = 0, -+ DPNI_EARLY_DROP_MODE_TAIL, -+ DPNI_EARLY_DROP_MODE_WRED -+}; -+ -+/** -+ * struct dpni_wred_cfg - WRED configuration -+ * @max_threshold: maximum threshold that packets may be discarded. Above this -+ * threshold all packets are discarded; must be less than 2^39; -+ * approximated to be expressed as (x+256)*2^(y-1) due to HW -+ * implementation. -+ * @min_threshold: minimum threshold that packets may be discarded at -+ * @drop_probability: probability that a packet will be discarded (1-100, -+ * associated with the max_threshold). -+ */ -+struct dpni_wred_cfg { -+ uint64_t max_threshold; -+ uint64_t min_threshold; -+ uint8_t drop_probability; -+}; -+ -+/** -+ * struct dpni_early_drop_cfg - early-drop configuration -+ * @mode: drop mode -+ * @units: units type -+ * @green: WRED - 'green' configuration -+ * @yellow: WRED - 'yellow' configuration -+ * @red: WRED - 'red' configuration -+ * @tail_drop_threshold: tail drop threshold -+ */ -+struct dpni_early_drop_cfg { -+ enum dpni_early_drop_mode mode; -+ enum dpni_congestion_unit units; -+ -+ struct dpni_wred_cfg green; -+ struct dpni_wred_cfg yellow; -+ struct dpni_wred_cfg red; -+ -+ uint32_t tail_drop_threshold; -+}; -+ -+/** -+ * dpni_prepare_early_drop() - prepare an early drop. -+ * @cfg: Early-drop configuration -+ * @early_drop_buf: Zeroed 256 bytes of memory before mapping it to DMA -+ * -+ * This function has to be called before dpni_set_rx_tc_early_drop or -+ * dpni_set_tx_tc_early_drop -+ * -+ */ -+void dpni_prepare_early_drop(const struct dpni_early_drop_cfg *cfg, -+ uint8_t *early_drop_buf); -+ -+/** -+ * dpni_extract_early_drop() - extract the early drop configuration. -+ * @cfg: Early-drop configuration -+ * @early_drop_buf: Zeroed 256 bytes of memory before mapping it to DMA -+ * -+ * This function has to be called after dpni_get_rx_tc_early_drop or -+ * dpni_get_tx_tc_early_drop -+ * -+ */ -+void dpni_extract_early_drop(struct dpni_early_drop_cfg *cfg, -+ const uint8_t *early_drop_buf); -+ -+/** -+ * dpni_set_rx_tc_early_drop() - Set Rx traffic class early-drop configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory filled -+ * with the early-drop configuration by calling dpni_prepare_early_drop() -+ * -+ * warning: Before calling this function, call dpni_prepare_early_drop() to -+ * prepare the early_drop_iova parameter -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_set_rx_tc_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint64_t early_drop_iova); -+ -+/** -+ * dpni_get_rx_tc_early_drop() - Get Rx traffic class early-drop configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory -+ * -+ * warning: After calling this function, call dpni_extract_early_drop() to -+ * get the early drop configuration -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_get_rx_tc_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint64_t early_drop_iova); -+ -+/** -+ * dpni_set_tx_tc_early_drop() - Set Tx traffic class early-drop configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory filled -+ * with the early-drop configuration by calling dpni_prepare_early_drop() -+ * -+ * warning: Before calling this function, call dpni_prepare_early_drop() to -+ * prepare the early_drop_iova parameter -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_set_tx_tc_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint64_t early_drop_iova); -+ -+/** -+ * dpni_get_tx_tc_early_drop() - Get Tx traffic class early-drop configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @early_drop_iova: I/O virtual address of 256 bytes DMA-able memory -+ * -+ * warning: After calling this function, call dpni_extract_early_drop() to -+ * get the early drop configuration -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_get_tx_tc_early_drop(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint64_t early_drop_iova); -+ -+/** -+ * enum dpni_dest - DPNI destination types -+ * @DPNI_DEST_NONE: Unassigned destination; The queue is set in parked mode and -+ * does not generate FQDAN notifications; user is expected to -+ * dequeue from the queue based on polling or other user-defined -+ * method -+ * @DPNI_DEST_DPIO: The queue is set in schedule mode and generates FQDAN -+ * notifications to the specified DPIO; user is expected to dequeue -+ * from the queue only after notification is received -+ * @DPNI_DEST_DPCON: The queue is set in schedule mode and does not generate -+ * FQDAN notifications, but is connected to the specified DPCON -+ * object; user is expected to dequeue from the DPCON channel -+ */ -+enum dpni_dest { -+ DPNI_DEST_NONE = 0, -+ DPNI_DEST_DPIO = 1, -+ DPNI_DEST_DPCON = 2 -+}; -+ -+/** -+ * struct dpni_dest_cfg - Structure representing DPNI destination parameters -+ * @dest_type: Destination type -+ * @dest_id: Either DPIO ID or DPCON ID, depending on the destination type -+ * @priority: Priority selection within the DPIO or DPCON channel; valid values -+ * are 0-1 or 0-7, depending on the number of priorities in that -+ * channel; not relevant for 'DPNI_DEST_NONE' option -+ */ -+struct dpni_dest_cfg { -+ enum dpni_dest dest_type; -+ int dest_id; -+ uint8_t priority; -+}; -+ -+/* DPNI congestion options */ -+ -+/** -+ * CSCN message is written to message_iova once entering a -+ * congestion state (see 'threshold_entry') -+ */ -+#define DPNI_CONG_OPT_WRITE_MEM_ON_ENTER 0x00000001 -+/** -+ * CSCN message is written to message_iova once exiting a -+ * congestion state (see 'threshold_exit') -+ */ -+#define DPNI_CONG_OPT_WRITE_MEM_ON_EXIT 0x00000002 -+/** -+ * CSCN write will attempt to allocate into a cache (coherent write); -+ * valid only if 'DPNI_CONG_OPT_WRITE_MEM_' is selected -+ */ -+#define DPNI_CONG_OPT_COHERENT_WRITE 0x00000004 -+/** -+ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to -+ * DPIO/DPCON's WQ channel once entering a congestion state -+ * (see 'threshold_entry') -+ */ -+#define DPNI_CONG_OPT_NOTIFY_DEST_ON_ENTER 0x00000008 -+/** -+ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' CSCN message is sent to -+ * DPIO/DPCON's WQ channel once exiting a congestion state -+ * (see 'threshold_exit') -+ */ -+#define DPNI_CONG_OPT_NOTIFY_DEST_ON_EXIT 0x00000010 -+/** -+ * if 'dest_cfg.dest_type != DPNI_DEST_NONE' when the CSCN is written to the -+ * sw-portal's DQRR, the DQRI interrupt is asserted immediately (if enabled) -+ */ -+#define DPNI_CONG_OPT_INTR_COALESCING_DISABLED 0x00000020 -+ -+/** -+ * struct dpni_congestion_notification_cfg - congestion notification -+ * configuration -+ * @units: units type -+ * @threshold_entry: above this threshold we enter a congestion state. -+ * set it to '0' to disable it -+ * @threshold_exit: below this threshold we exit the congestion state. -+ * @message_ctx: The context that will be part of the CSCN message -+ * @message_iova: I/O virtual address (must be in DMA-able memory), -+ * must be 16B aligned; valid only if 'DPNI_CONG_OPT_WRITE_MEM_' is -+ * contained in 'options' -+ * @dest_cfg: CSCN can be send to either DPIO or DPCON WQ channel -+ * @options: Mask of available options; use 'DPNI_CONG_OPT_' values -+ */ -+ -+struct dpni_congestion_notification_cfg { -+ enum dpni_congestion_unit units; -+ uint32_t threshold_entry; -+ uint32_t threshold_exit; -+ uint64_t message_ctx; -+ uint64_t message_iova; -+ struct dpni_dest_cfg dest_cfg; -+ uint16_t options; -+}; -+ -+/** -+ * dpni_set_rx_tc_congestion_notification() - Set Rx traffic class congestion -+ * notification configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @cfg: congestion notification configuration -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_set_rx_tc_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_congestion_notification_cfg *cfg); -+ -+/** -+ * dpni_get_rx_tc_congestion_notification() - Get Rx traffic class congestion -+ * notification configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @cfg: congestion notification configuration -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_get_rx_tc_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ struct dpni_congestion_notification_cfg *cfg); -+ -+/** -+ * dpni_set_tx_tc_congestion_notification() - Set Tx traffic class congestion -+ * notification configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @cfg: congestion notification configuration -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_set_tx_tc_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_congestion_notification_cfg *cfg); -+ -+/** -+ * dpni_get_tx_tc_congestion_notification() - Get Tx traffic class congestion -+ * notification configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @cfg: congestion notification configuration -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_get_tx_tc_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ struct dpni_congestion_notification_cfg *cfg); -+ -+/** -+ * enum dpni_flc_type - DPNI FLC types -+ * @DPNI_FLC_USER_DEFINED: select the FLC to be used for user defined value -+ * @DPNI_FLC_STASH: select the FLC to be used for stash control -+ */ -+enum dpni_flc_type { -+ DPNI_FLC_USER_DEFINED = 0, -+ DPNI_FLC_STASH = 1, -+}; -+ -+/** -+ * enum dpni_stash_size - DPNI FLC stashing size -+ * @DPNI_STASH_SIZE_0B: no stash -+ * @DPNI_STASH_SIZE_64B: stashes 64 bytes -+ * @DPNI_STASH_SIZE_128B: stashes 128 bytes -+ * @DPNI_STASH_SIZE_192B: stashes 192 bytes -+ */ -+enum dpni_stash_size { -+ DPNI_STASH_SIZE_0B = 0, -+ DPNI_STASH_SIZE_64B = 1, -+ DPNI_STASH_SIZE_128B = 2, -+ DPNI_STASH_SIZE_192B = 3, -+}; -+ -+/* DPNI FLC stash options */ -+ -+/** -+ * stashes the whole annotation area (up to 192 bytes) -+ */ -+#define DPNI_FLC_STASH_FRAME_ANNOTATION 0x00000001 -+ -+/** -+ * struct dpni_flc_cfg - Structure representing DPNI FLC configuration -+ * @flc_type: FLC type -+ * @options: Mask of available options; -+ * use 'DPNI_FLC_STASH_' values -+ * @frame_data_size: Size of frame data to be stashed -+ * @flow_context_size: Size of flow context to be stashed -+ * @flow_context: 1. In case flc_type is 'DPNI_FLC_USER_DEFINED': -+ * this value will be provided in the frame descriptor -+ * (FD[FLC]) -+ * 2. In case flc_type is 'DPNI_FLC_STASH': -+ * this value will be I/O virtual address of the -+ * flow-context; -+ * Must be cacheline-aligned and DMA-able memory -+ */ -+struct dpni_flc_cfg { -+ enum dpni_flc_type flc_type; -+ uint32_t options; -+ enum dpni_stash_size frame_data_size; -+ enum dpni_stash_size flow_context_size; -+ uint64_t flow_context; -+}; -+ -+/** -+ * DPNI queue modification options -+ */ -+ -+/** -+ * Select to modify the user's context associated with the queue -+ */ -+#define DPNI_QUEUE_OPT_USER_CTX 0x00000001 -+/** -+ * Select to modify the queue's destination -+ */ -+#define DPNI_QUEUE_OPT_DEST 0x00000002 -+/** Select to modify the flow-context parameters; -+ * not applicable for Tx-conf/Err queues as the FD comes from the user -+ */ -+#define DPNI_QUEUE_OPT_FLC 0x00000004 -+/** -+ * Select to modify the queue's order preservation -+ */ -+#define DPNI_QUEUE_OPT_ORDER_PRESERVATION 0x00000008 -+/* Select to modify the queue's tail-drop threshold */ -+#define DPNI_QUEUE_OPT_TAILDROP_THRESHOLD 0x00000010 -+ -+/** -+ * struct dpni_queue_cfg - Structure representing queue configuration -+ * @options: Flags representing the suggested modifications to the queue; -+ * Use any combination of 'DPNI_QUEUE_OPT_' flags -+ * @user_ctx: User context value provided in the frame descriptor of each -+ * dequeued frame; valid only if 'DPNI_QUEUE_OPT_USER_CTX' -+ * is contained in 'options' -+ * @dest_cfg: Queue destination parameters; -+ * valid only if 'DPNI_QUEUE_OPT_DEST' is contained in 'options' -+ * @flc_cfg: Flow context configuration; in case the TC's distribution -+ * is either NONE or HASH the FLC's settings of flow#0 are used. -+ * in the case of FS (flow-steering) the flow's FLC settings -+ * are used. -+ * valid only if 'DPNI_QUEUE_OPT_FLC' is contained in 'options' -+ * @order_preservation_en: enable/disable order preservation; -+ * valid only if 'DPNI_QUEUE_OPT_ORDER_PRESERVATION' is contained -+ * in 'options' -+ * @tail_drop_threshold: set the queue's tail drop threshold in bytes; -+ * '0' value disable the threshold; maximum value is 0xE000000; -+ * valid only if 'DPNI_QUEUE_OPT_TAILDROP_THRESHOLD' is contained -+ * in 'options' -+ */ -+struct dpni_queue_cfg { -+ uint32_t options; -+ uint64_t user_ctx; -+ struct dpni_dest_cfg dest_cfg; -+ struct dpni_flc_cfg flc_cfg; -+ int order_preservation_en; -+ uint32_t tail_drop_threshold; -+}; -+ -+/** -+ * struct dpni_queue_attr - Structure representing queue attributes -+ * @user_ctx: User context value provided in the frame descriptor of each -+ * dequeued frame -+ * @dest_cfg: Queue destination configuration -+ * @flc_cfg: Flow context configuration -+ * @order_preservation_en: enable/disable order preservation -+ * @tail_drop_threshold: queue's tail drop threshold in bytes; -+ * @fqid: Virtual fqid value to be used for dequeue operations -+ */ -+struct dpni_queue_attr { -+ uint64_t user_ctx; -+ struct dpni_dest_cfg dest_cfg; -+ struct dpni_flc_cfg flc_cfg; -+ int order_preservation_en; -+ uint32_t tail_drop_threshold; -+ -+ uint32_t fqid; -+}; -+ -+/** -+ * DPNI Tx flow modification options -+ */ -+ -+/** -+ * Select to modify the settings for dedicate Tx confirmation/error -+ */ -+#define DPNI_TX_FLOW_OPT_TX_CONF_ERROR 0x00000001 -+/** -+ * Select to modify the L3 checksum generation setting -+ */ -+#define DPNI_TX_FLOW_OPT_L3_CHKSUM_GEN 0x00000010 -+/** -+ * Select to modify the L4 checksum generation setting -+ */ -+#define DPNI_TX_FLOW_OPT_L4_CHKSUM_GEN 0x00000020 -+ -+/** -+ * struct dpni_tx_flow_cfg - Structure representing Tx flow configuration -+ * @options: Flags representing the suggested modifications to the Tx flow; -+ * Use any combination 'DPNI_TX_FLOW_OPT_' flags -+ * @use_common_tx_conf_queue: Set to '1' to use the common (default) Tx -+ * confirmation and error queue; Set to '0' to use the private -+ * Tx confirmation and error queue; valid only if -+ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' wasn't set at DPNI creation -+ * and 'DPNI_TX_FLOW_OPT_TX_CONF_ERROR' is contained in 'options' -+ * @l3_chksum_gen: Set to '1' to enable L3 checksum generation; '0' to disable; -+ * valid only if 'DPNI_TX_FLOW_OPT_L3_CHKSUM_GEN' is contained in 'options' -+ * @l4_chksum_gen: Set to '1' to enable L4 checksum generation; '0' to disable; -+ * valid only if 'DPNI_TX_FLOW_OPT_L4_CHKSUM_GEN' is contained in 'options' -+ */ -+struct dpni_tx_flow_cfg { -+ uint32_t options; -+ int use_common_tx_conf_queue; -+ int l3_chksum_gen; -+ int l4_chksum_gen; -+}; -+ -+/** -+ * dpni_set_tx_flow() - Set Tx flow configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @flow_id: Provides (or returns) the sender's flow ID; -+ * for each new sender set (*flow_id) to 'DPNI_NEW_FLOW_ID' to generate -+ * a new flow_id; this ID should be used as the QDBIN argument -+ * in enqueue operations -+ * @cfg: Tx flow configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_tx_flow(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t *flow_id, -+ const struct dpni_tx_flow_cfg *cfg); -+ -+/** -+ * struct dpni_tx_flow_attr - Structure representing Tx flow attributes -+ * @use_common_tx_conf_queue: '1' if using common (default) Tx confirmation and -+ * error queue; '0' if using private Tx confirmation and error queue -+ * @l3_chksum_gen: '1' if L3 checksum generation is enabled; '0' if disabled -+ * @l4_chksum_gen: '1' if L4 checksum generation is enabled; '0' if disabled -+ */ -+struct dpni_tx_flow_attr { -+ int use_common_tx_conf_queue; -+ int l3_chksum_gen; -+ int l4_chksum_gen; -+}; -+ -+/** -+ * dpni_get_tx_flow() - Get Tx flow attributes -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @flow_id: The sender's flow ID, as returned by the -+ * dpni_set_tx_flow() function -+ * @attr: Returned Tx flow attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_tx_flow(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ struct dpni_tx_flow_attr *attr); -+ -+/** -+ * struct dpni_tx_conf_cfg - Structure representing Tx conf configuration -+ * @errors_only: Set to '1' to report back only error frames; -+ * Set to '0' to confirm transmission/error for all transmitted frames; -+ * @queue_cfg: Queue configuration -+ */ -+struct dpni_tx_conf_cfg { -+ int errors_only; -+ struct dpni_queue_cfg queue_cfg; -+}; -+ -+/** -+ * dpni_set_tx_conf() - Set Tx confirmation and error queue configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @flow_id: The sender's flow ID, as returned by the -+ * dpni_set_tx_flow() function; -+ * use 'DPNI_COMMON_TX_CONF' for common tx-conf -+ * @cfg: Queue configuration -+ * -+ * If either 'DPNI_OPT_TX_CONF_DISABLED' or -+ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' were selected at DPNI creation, -+ * this function can ONLY be used with 'flow_id == DPNI_COMMON_TX_CONF'; -+ * i.e. only serve the common tx-conf-err queue; -+ * if 'DPNI_OPT_TX_CONF_DISABLED' was selected, only error frames are reported -+ * back - successfully transmitted frames are not confirmed. Otherwise, all -+ * transmitted frames are sent for confirmation. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_tx_conf(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ const struct dpni_tx_conf_cfg *cfg); -+ -+/** -+ * struct dpni_tx_conf_attr - Structure representing Tx conf attributes -+ * @errors_only: '1' if only error frames are reported back; '0' if all -+ * transmitted frames are confirmed -+ * @queue_attr: Queue attributes -+ */ -+struct dpni_tx_conf_attr { -+ int errors_only; -+ struct dpni_queue_attr queue_attr; -+}; -+ -+/** -+ * dpni_get_tx_conf() - Get Tx confirmation and error queue attributes -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @flow_id: The sender's flow ID, as returned by the -+ * dpni_set_tx_flow() function; -+ * use 'DPNI_COMMON_TX_CONF' for common tx-conf -+ * @attr: Returned tx-conf attributes -+ * -+ * If either 'DPNI_OPT_TX_CONF_DISABLED' or -+ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' were selected at DPNI creation, -+ * this function can ONLY be used with 'flow_id == DPNI_COMMON_TX_CONF'; -+ * i.e. only serve the common tx-conf-err queue; -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_tx_conf(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ struct dpni_tx_conf_attr *attr); -+ -+/** -+ * dpni_set_tx_conf_congestion_notification() - Set Tx conf congestion -+ * notification configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @flow_id: The sender's flow ID, as returned by the -+ * dpni_set_tx_flow() function; -+ * use 'DPNI_COMMON_TX_CONF' for common tx-conf -+ * @cfg: congestion notification configuration -+ * -+ * If either 'DPNI_OPT_TX_CONF_DISABLED' or -+ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' were selected at DPNI creation, -+ * this function can ONLY be used with 'flow_id == DPNI_COMMON_TX_CONF'; -+ * i.e. only serve the common tx-conf-err queue; -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_set_tx_conf_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ const struct dpni_congestion_notification_cfg *cfg); -+ -+/** -+ * dpni_get_tx_conf_congestion_notification() - Get Tx conf congestion -+ * notification configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @flow_id: The sender's flow ID, as returned by the -+ * dpni_set_tx_flow() function; -+ * use 'DPNI_COMMON_TX_CONF' for common tx-conf -+ * @cfg: congestion notification -+ * -+ * If either 'DPNI_OPT_TX_CONF_DISABLED' or -+ * 'DPNI_OPT_PRIVATE_TX_CONF_ERROR_DISABLED' were selected at DPNI creation, -+ * this function can ONLY be used with 'flow_id == DPNI_COMMON_TX_CONF'; -+ * i.e. only serve the common tx-conf-err queue; -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpni_get_tx_conf_congestion_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint16_t flow_id, -+ struct dpni_congestion_notification_cfg *cfg); -+ -+/** -+ * dpni_set_tx_conf_revoke() - Tx confirmation revocation -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @revoke: revoke or not -+ * -+ * This function is useful only when 'DPNI_OPT_TX_CONF_DISABLED' is not -+ * selected at DPNI creation. -+ * Calling this function with 'revoke' set to '1' disables all transmit -+ * confirmation (including the private confirmation queues), regardless of -+ * previous settings; Note that in this case, Tx error frames are still -+ * enqueued to the general transmit errors queue. -+ * Calling this function with 'revoke' set to '0' restores the previous -+ * settings for both general and private transmit confirmation. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_tx_conf_revoke(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int revoke); -+ -+/** -+ * dpni_set_rx_flow() - Set Rx flow configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7); -+ * use 'DPNI_ALL_TCS' to set all TCs and all flows -+ * @flow_id: Rx flow id within the traffic class; use -+ * 'DPNI_ALL_TC_FLOWS' to set all flows within -+ * this tc_id; ignored if tc_id is set to -+ * 'DPNI_ALL_TCS'; -+ * @cfg: Rx flow configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_rx_flow(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint16_t flow_id, -+ const struct dpni_queue_cfg *cfg); -+ -+/** -+ * dpni_get_rx_flow() - Get Rx flow attributes -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @flow_id: Rx flow id within the traffic class -+ * @attr: Returned Rx flow attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_rx_flow(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ uint16_t flow_id, -+ struct dpni_queue_attr *attr); -+ -+/** -+ * dpni_set_rx_err_queue() - Set Rx error queue configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @cfg: Queue configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_rx_err_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_queue_cfg *cfg); -+ -+/** -+ * dpni_get_rx_err_queue() - Get Rx error queue attributes -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @attr: Returned Queue attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_get_rx_err_queue(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpni_queue_attr *attr); -+ -+/** -+ * struct dpni_qos_tbl_cfg - Structure representing QOS table configuration -+ * @key_cfg_iova: I/O virtual address of 256 bytes DMA-able memory filled with -+ * key extractions to be used as the QoS criteria by calling -+ * dpni_prepare_key_cfg() -+ * @discard_on_miss: Set to '1' to discard frames in case of no match (miss); -+ * '0' to use the 'default_tc' in such cases -+ * @default_tc: Used in case of no-match and 'discard_on_miss'= 0 -+ */ -+struct dpni_qos_tbl_cfg { -+ uint64_t key_cfg_iova; -+ int discard_on_miss; -+ uint8_t default_tc; -+}; -+ -+/** -+ * dpni_set_qos_table() - Set QoS mapping table -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @cfg: QoS table configuration -+ * -+ * This function and all QoS-related functions require that -+ *'max_tcs > 1' was set at DPNI creation. -+ * -+ * warning: Before calling this function, call dpni_prepare_key_cfg() to -+ * prepare the key_cfg_iova parameter -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_qos_table(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_qos_tbl_cfg *cfg); -+ -+/** -+ * struct dpni_rule_cfg - Rule configuration for table lookup -+ * @key_iova: I/O virtual address of the key (must be in DMA-able memory) -+ * @mask_iova: I/O virtual address of the mask (must be in DMA-able memory) -+ * @key_size: key and mask size (in bytes) -+ */ -+struct dpni_rule_cfg { -+ uint64_t key_iova; -+ uint64_t mask_iova; -+ uint8_t key_size; -+}; -+ -+/** -+ * dpni_add_qos_entry() - Add QoS mapping entry (to select a traffic class) -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @cfg: QoS rule to add -+ * @tc_id: Traffic class selection (0-7) -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_add_qos_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_rule_cfg *cfg, -+ uint8_t tc_id); -+ -+/** -+ * dpni_remove_qos_entry() - Remove QoS mapping entry -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @cfg: QoS rule to remove -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_remove_qos_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dpni_rule_cfg *cfg); -+ -+/** -+ * dpni_clear_qos_table() - Clear all QoS mapping entries -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * -+ * Following this function call, all frames are directed to -+ * the default traffic class (0) -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_clear_qos_table(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpni_add_fs_entry() - Add Flow Steering entry for a specific traffic class -+ * (to select a flow ID) -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @cfg: Flow steering rule to add -+ * @flow_id: Flow id selection (must be smaller than the -+ * distribution size of the traffic class) -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_add_fs_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_rule_cfg *cfg, -+ uint16_t flow_id); -+ -+/** -+ * dpni_remove_fs_entry() - Remove Flow Steering entry from a specific -+ * traffic class -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * @cfg: Flow steering rule to remove -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_remove_fs_entry(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id, -+ const struct dpni_rule_cfg *cfg); -+ -+/** -+ * dpni_clear_fs_entries() - Clear all Flow Steering entries of a specific -+ * traffic class -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @tc_id: Traffic class selection (0-7) -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_clear_fs_entries(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t tc_id); -+ -+/** -+ * dpni_set_vlan_insertion() - Enable/disable VLAN insertion for egress frames -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Set to '1' to enable; '0' to disable -+ * -+ * Requires that the 'DPNI_OPT_VLAN_MANIPULATION' option is set -+ * at DPNI creation. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_vlan_insertion(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en); -+ -+/** -+ * dpni_set_vlan_removal() - Enable/disable VLAN removal for ingress frames -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Set to '1' to enable; '0' to disable -+ * -+ * Requires that the 'DPNI_OPT_VLAN_MANIPULATION' option is set -+ * at DPNI creation. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_vlan_removal(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en); -+ -+/** -+ * dpni_set_ipr() - Enable/disable IP reassembly of ingress frames -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Set to '1' to enable; '0' to disable -+ * -+ * Requires that the 'DPNI_OPT_IPR' option is set at DPNI creation. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_ipr(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en); -+ -+/** -+ * dpni_set_ipf() - Enable/disable IP fragmentation of egress frames -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPNI object -+ * @en: Set to '1' to enable; '0' to disable -+ * -+ * Requires that the 'DPNI_OPT_IPF' option is set at DPNI -+ * creation. Fragmentation is performed according to MTU value -+ * set by dpni_set_mtu() function -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpni_set_ipf(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int en); -+ -+#endif /* __FSL_DPNI_H */ -diff --git a/drivers/staging/fsl-dpaa2/mac/Kconfig b/drivers/staging/fsl-dpaa2/mac/Kconfig -new file mode 100644 -index 0000000..174a9cd ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/mac/Kconfig -@@ -0,0 +1,24 @@ -+config FSL_DPAA2_MAC -+ tristate "DPAA2 MAC / PHY interface" -+ depends on FSL_MC_BUS && FSL_DPAA2 -+ select MDIO_BUS_MUX_MMIOREG -+ select FSL_XGMAC_MDIO -+ select FIXED_PHY -+ ---help--- -+ Prototype driver for DPAA2 MAC / PHY interface object. -+ This driver works as a proxy between phylib including phy drivers and -+ the MC firmware. It receives updates on link state changes from PHY -+ lib and forwards them to MC and receives interrupt from MC whenever -+ a request is made to change the link state. -+ -+ -+config FSL_DPAA2_MAC_NETDEVS -+ bool "Expose net interfaces for PHYs" -+ default n -+ depends on FSL_DPAA2_MAC -+ ---help--- -+ Exposes macX net interfaces which allow direct control over MACs and -+ PHYs. -+ . -+ Leave disabled if unsure. -+ -diff --git a/drivers/staging/fsl-dpaa2/mac/Makefile b/drivers/staging/fsl-dpaa2/mac/Makefile -new file mode 100644 -index 0000000..bda9410 ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/mac/Makefile -@@ -0,0 +1,10 @@ -+ -+obj-$(CONFIG_FSL_DPAA2_MAC) += dpaa2-mac.o -+ -+dpaa2-mac-objs := mac.o dpmac.o -+ -+all: -+ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules -+ -+clean: -+ make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean -diff --git a/drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h b/drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h -new file mode 100644 -index 0000000..dc00590 ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/mac/dpmac-cmd.h -@@ -0,0 +1,195 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPMAC_CMD_H -+#define _FSL_DPMAC_CMD_H -+ -+/* DPMAC Version */ -+#define DPMAC_VER_MAJOR 3 -+#define DPMAC_VER_MINOR 2 -+ -+/* Command IDs */ -+#define DPMAC_CMDID_CLOSE 0x800 -+#define DPMAC_CMDID_OPEN 0x80c -+#define DPMAC_CMDID_CREATE 0x90c -+#define DPMAC_CMDID_DESTROY 0x900 -+ -+#define DPMAC_CMDID_GET_ATTR 0x004 -+#define DPMAC_CMDID_RESET 0x005 -+ -+#define DPMAC_CMDID_SET_IRQ 0x010 -+#define DPMAC_CMDID_GET_IRQ 0x011 -+#define DPMAC_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPMAC_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPMAC_CMDID_SET_IRQ_MASK 0x014 -+#define DPMAC_CMDID_GET_IRQ_MASK 0x015 -+#define DPMAC_CMDID_GET_IRQ_STATUS 0x016 -+#define DPMAC_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPMAC_CMDID_MDIO_READ 0x0c0 -+#define DPMAC_CMDID_MDIO_WRITE 0x0c1 -+#define DPMAC_CMDID_GET_LINK_CFG 0x0c2 -+#define DPMAC_CMDID_SET_LINK_STATE 0x0c3 -+#define DPMAC_CMDID_GET_COUNTER 0x0c4 -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_CREATE(cmd, cfg) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->mac_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_OPEN(cmd, dpmac_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpmac_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_IRQ_ENABLE(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_ATTRIBUTES(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->phy_id);\ -+ MC_RSP_OP(cmd, 0, 32, 32, int, attr->id);\ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\ -+ MC_RSP_OP(cmd, 1, 32, 8, enum dpmac_link_type, attr->link_type);\ -+ MC_RSP_OP(cmd, 1, 40, 8, enum dpmac_eth_if, attr->eth_if);\ -+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->max_rate);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_MDIO_READ(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->phy_addr); \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->reg); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_MDIO_READ(cmd, data) \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, data) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_MDIO_WRITE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->phy_addr); \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->reg); \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->data); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_LINK_CFG(cmd, cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 64, uint64_t, cfg->options); \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->rate); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_SET_LINK_STATE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 64, uint64_t, cfg->options); \ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->rate); \ -+ MC_CMD_OP(cmd, 2, 0, 1, int, cfg->up); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_GET_COUNTER(cmd, type) \ -+ MC_CMD_OP(cmd, 0, 0, 8, enum dpmac_counter, type) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_COUNTER(cmd, counter) \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, counter) -+ -+#endif /* _FSL_DPMAC_CMD_H */ -diff --git a/drivers/staging/fsl-dpaa2/mac/dpmac.c b/drivers/staging/fsl-dpaa2/mac/dpmac.c -new file mode 100644 -index 0000000..fc23b40 ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/mac/dpmac.c -@@ -0,0 +1,422 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include "../../fsl-mc/include/mc-sys.h" -+#include "../../fsl-mc/include/mc-cmd.h" -+#include "dpmac.h" -+#include "dpmac-cmd.h" -+ -+int dpmac_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpmac_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ DPMAC_CMD_OPEN(cmd, dpmac_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return err; -+} -+ -+int dpmac_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLOSE, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmac_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpmac_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ DPMAC_CMD_CREATE(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpmac_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmac_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpmac_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ DPMAC_CMD_SET_IRQ(cmd, irq_index, irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmac_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpmac_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ DPMAC_CMD_GET_IRQ(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPMAC_RSP_GET_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dpmac_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPMAC_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmac_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPMAC_CMD_GET_IRQ_ENABLE(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPMAC_RSP_GET_IRQ_ENABLE(cmd, *en); -+ -+ return 0; -+} -+ -+int dpmac_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPMAC_CMD_SET_IRQ_MASK(cmd, irq_index, mask); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmac_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPMAC_CMD_GET_IRQ_MASK(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPMAC_RSP_GET_IRQ_MASK(cmd, *mask); -+ -+ return 0; -+} -+ -+int dpmac_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPMAC_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPMAC_RSP_GET_IRQ_STATUS(cmd, *status); -+ -+ return 0; -+} -+ -+int dpmac_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPMAC_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmac_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPMAC_RSP_GET_ATTRIBUTES(cmd, attr); -+ -+ return 0; -+} -+ -+int dpmac_mdio_read(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_mdio_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_MDIO_READ, -+ cmd_flags, -+ token); -+ DPMAC_CMD_MDIO_READ(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPMAC_RSP_MDIO_READ(cmd, cfg->data); -+ -+ return 0; -+} -+ -+int dpmac_mdio_write(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_mdio_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_MDIO_WRITE, -+ cmd_flags, -+ token); -+ DPMAC_CMD_MDIO_WRITE(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmac_get_link_cfg(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_link_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err = 0; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_LINK_CFG, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ DPMAC_RSP_GET_LINK_CFG(cmd, cfg); -+ -+ return 0; -+} -+ -+int dpmac_set_link_state(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_link_state *link_state) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_SET_LINK_STATE, -+ cmd_flags, -+ token); -+ DPMAC_CMD_SET_LINK_STATE(cmd, link_state); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmac_get_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ enum dpmac_counter type, -+ uint64_t *counter) -+{ -+ struct mc_command cmd = { 0 }; -+ int err = 0; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMAC_CMDID_GET_COUNTER, -+ cmd_flags, -+ token); -+ DPMAC_CMD_GET_COUNTER(cmd, type); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ DPMAC_RSP_GET_COUNTER(cmd, *counter); -+ -+ return 0; -+} -diff --git a/drivers/staging/fsl-dpaa2/mac/dpmac.h b/drivers/staging/fsl-dpaa2/mac/dpmac.h -new file mode 100644 -index 0000000..ad27772 ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/mac/dpmac.h -@@ -0,0 +1,593 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPMAC_H -+#define __FSL_DPMAC_H -+ -+/* Data Path MAC API -+ * Contains initialization APIs and runtime control APIs for DPMAC -+ */ -+ -+struct fsl_mc_io; -+ -+/** -+ * dpmac_open() - Open a control session for the specified object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dpmac_id: DPMAC unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpmac_create function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpmac_id, -+ uint16_t *token); -+ -+/** -+ * dpmac_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * enum dpmac_link_type - DPMAC link type -+ * @DPMAC_LINK_TYPE_NONE: No link -+ * @DPMAC_LINK_TYPE_FIXED: Link is fixed type -+ * @DPMAC_LINK_TYPE_PHY: Link by PHY ID -+ * @DPMAC_LINK_TYPE_BACKPLANE: Backplane link type -+ */ -+enum dpmac_link_type { -+ DPMAC_LINK_TYPE_NONE, -+ DPMAC_LINK_TYPE_FIXED, -+ DPMAC_LINK_TYPE_PHY, -+ DPMAC_LINK_TYPE_BACKPLANE -+}; -+ -+/** -+ * enum dpmac_eth_if - DPMAC Ethrnet interface -+ * @DPMAC_ETH_IF_MII: MII interface -+ * @DPMAC_ETH_IF_RMII: RMII interface -+ * @DPMAC_ETH_IF_SMII: SMII interface -+ * @DPMAC_ETH_IF_GMII: GMII interface -+ * @DPMAC_ETH_IF_RGMII: RGMII interface -+ * @DPMAC_ETH_IF_SGMII: SGMII interface -+ * @DPMAC_ETH_IF_QSGMII: QSGMII interface -+ * @DPMAC_ETH_IF_XAUI: XAUI interface -+ * @DPMAC_ETH_IF_XFI: XFI interface -+ */ -+enum dpmac_eth_if { -+ DPMAC_ETH_IF_MII, -+ DPMAC_ETH_IF_RMII, -+ DPMAC_ETH_IF_SMII, -+ DPMAC_ETH_IF_GMII, -+ DPMAC_ETH_IF_RGMII, -+ DPMAC_ETH_IF_SGMII, -+ DPMAC_ETH_IF_QSGMII, -+ DPMAC_ETH_IF_XAUI, -+ DPMAC_ETH_IF_XFI -+}; -+ -+/** -+ * struct dpmac_cfg - Structure representing DPMAC configuration -+ * @mac_id: Represents the Hardware MAC ID; in case of multiple WRIOP, -+ * the MAC IDs are continuous. -+ * For example: 2 WRIOPs, 16 MACs in each: -+ * MAC IDs for the 1st WRIOP: 1-16, -+ * MAC IDs for the 2nd WRIOP: 17-32. -+ */ -+struct dpmac_cfg { -+ int mac_id; -+}; -+ -+/** -+ * dpmac_create() - Create the DPMAC object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPMAC object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpmac_open function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpmac_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpmac_destroy() - Destroy the DPMAC object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpmac_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * DPMAC IRQ Index and Events -+ */ -+ -+/** -+ * IRQ index -+ */ -+#define DPMAC_IRQ_INDEX 0 -+/** -+ * IRQ event - indicates a change in link state -+ */ -+#define DPMAC_IRQ_EVENT_LINK_CFG_REQ 0x00000001 -+/** -+ * IRQ event - Indicates that the link state changed -+ */ -+#define DPMAC_IRQ_EVENT_LINK_CHANGED 0x00000002 -+ -+/** -+ * struct dpmac_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dpmac_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dpmac_set_irq() - Set IRQ information for the DPMAC to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpmac_irq_cfg *irq_cfg); -+ -+/** -+ * dpmac_get_irq() - Get IRQ information from the DPMAC. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpmac_irq_cfg *irq_cfg); -+ -+/** -+ * dpmac_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpmac_get_irq_enable() - Get overall interrupt state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpmac_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @mask: Event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpmac_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpmac_get_irq_status() - Get the current status of any pending interrupts. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dpmac_clear_irq_status() - Clear a pending interrupt's status -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @status: Bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+ -+/** -+ * struct dpmac_attr - Structure representing DPMAC attributes -+ * @id: DPMAC object ID -+ * @phy_id: PHY ID -+ * @link_type: link type -+ * @eth_if: Ethernet interface -+ * @max_rate: Maximum supported rate - in Mbps -+ * @version: DPMAC version -+ */ -+struct dpmac_attr { -+ int id; -+ int phy_id; -+ enum dpmac_link_type link_type; -+ enum dpmac_eth_if eth_if; -+ uint32_t max_rate; -+ /** -+ * struct version - Structure representing DPMAC version -+ * @major: DPMAC major version -+ * @minor: DPMAC minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+}; -+ -+/** -+ * dpmac_get_attributes - Retrieve DPMAC attributes. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @attr: Returned object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_attr *attr); -+ -+/** -+ * struct dpmac_mdio_cfg - DPMAC MDIO read/write parameters -+ * @phy_addr: MDIO device address -+ * @reg: Address of the register within the Clause 45 PHY device from which data -+ * is to be read -+ * @data: Data read/write from/to MDIO -+ */ -+struct dpmac_mdio_cfg { -+ uint8_t phy_addr; -+ uint8_t reg; -+ uint16_t data; -+}; -+ -+/** -+ * dpmac_mdio_read() - Perform MDIO read transaction -+ * @mc_io: Pointer to opaque I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @cfg: Structure with MDIO transaction parameters -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_mdio_read(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_mdio_cfg *cfg); -+ -+/** -+ * dpmac_mdio_write() - Perform MDIO write transaction -+ * @mc_io: Pointer to opaque I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @cfg: Structure with MDIO transaction parameters -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_mdio_write(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_mdio_cfg *cfg); -+ -+/** -+ * DPMAC link configuration/state options -+ */ -+ -+/** -+ * Enable auto-negotiation -+ */ -+#define DPMAC_LINK_OPT_AUTONEG 0x0000000000000001ULL -+/** -+ * Enable half-duplex mode -+ */ -+#define DPMAC_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL -+/** -+ * Enable pause frames -+ */ -+#define DPMAC_LINK_OPT_PAUSE 0x0000000000000004ULL -+/** -+ * Enable a-symmetric pause frames -+ */ -+#define DPMAC_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL -+ -+/** -+ * struct dpmac_link_cfg - Structure representing DPMAC link configuration -+ * @rate: Link's rate - in Mbps -+ * @options: Enable/Disable DPMAC link cfg features (bitmap) -+ */ -+struct dpmac_link_cfg { -+ uint32_t rate; -+ uint64_t options; -+}; -+ -+/** -+ * dpmac_get_link_cfg() - Get Ethernet link configuration -+ * @mc_io: Pointer to opaque I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @cfg: Returned structure with the link configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_get_link_cfg(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_link_cfg *cfg); -+ -+/** -+ * struct dpmac_link_state - DPMAC link configuration request -+ * @rate: Rate in Mbps -+ * @options: Enable/Disable DPMAC link cfg features (bitmap) -+ * @up: Link state -+ */ -+struct dpmac_link_state { -+ uint32_t rate; -+ uint64_t options; -+ int up; -+}; -+ -+/** -+ * dpmac_set_link_state() - Set the Ethernet link status -+ * @mc_io: Pointer to opaque I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @link_state: Link state configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_set_link_state(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmac_link_state *link_state); -+ -+/** -+ * enum dpmac_counter - DPMAC counter types -+ * @DPMAC_CNT_ING_FRAME_64: counts 64-bytes frames, good or bad. -+ * @DPMAC_CNT_ING_FRAME_127: counts 65- to 127-bytes frames, good or bad. -+ * @DPMAC_CNT_ING_FRAME_255: counts 128- to 255-bytes frames, good or bad. -+ * @DPMAC_CNT_ING_FRAME_511: counts 256- to 511-bytes frames, good or bad. -+ * @DPMAC_CNT_ING_FRAME_1023: counts 512- to 1023-bytes frames, good or bad. -+ * @DPMAC_CNT_ING_FRAME_1518: counts 1024- to 1518-bytes frames, good or bad. -+ * @DPMAC_CNT_ING_FRAME_1519_MAX: counts 1519-bytes frames and larger -+ * (up to max frame length specified), -+ * good or bad. -+ * @DPMAC_CNT_ING_FRAG: counts frames which are shorter than 64 bytes received -+ * with a wrong CRC -+ * @DPMAC_CNT_ING_JABBER: counts frames longer than the maximum frame length -+ * specified, with a bad frame check sequence. -+ * @DPMAC_CNT_ING_FRAME_DISCARD: counts dropped frames due to internal errors. -+ * Occurs when a receive FIFO overflows. -+ * Includes also frames truncated as a result of -+ * the receive FIFO overflow. -+ * @DPMAC_CNT_ING_ALIGN_ERR: counts frames with an alignment error -+ * (optional used for wrong SFD). -+ * @DPMAC_CNT_EGR_UNDERSIZED: counts frames transmitted that was less than 64 -+ * bytes long with a good CRC. -+ * @DPMAC_CNT_ING_OVERSIZED: counts frames longer than the maximum frame length -+ * specified, with a good frame check sequence. -+ * @DPMAC_CNT_ING_VALID_PAUSE_FRAME: counts valid pause frames (regular and PFC) -+ * @DPMAC_CNT_EGR_VALID_PAUSE_FRAME: counts valid pause frames transmitted -+ * (regular and PFC). -+ * @DPMAC_CNT_ING_BYTE: counts bytes received except preamble for all valid -+ * frames and valid pause frames. -+ * @DPMAC_CNT_ING_MCAST_FRAME: counts received multicast frames. -+ * @DPMAC_CNT_ING_BCAST_FRAME: counts received broadcast frames. -+ * @DPMAC_CNT_ING_ALL_FRAME: counts each good or bad frames received. -+ * @DPMAC_CNT_ING_UCAST_FRAME: counts received unicast frames. -+ * @DPMAC_CNT_ING_ERR_FRAME: counts frames received with an error -+ * (except for undersized/fragment frame). -+ * @DPMAC_CNT_EGR_BYTE: counts bytes transmitted except preamble for all valid -+ * frames and valid pause frames transmitted. -+ * @DPMAC_CNT_EGR_MCAST_FRAME: counts transmitted multicast frames. -+ * @DPMAC_CNT_EGR_BCAST_FRAME: counts transmitted broadcast frames. -+ * @DPMAC_CNT_EGR_UCAST_FRAME: counts transmitted unicast frames. -+ * @DPMAC_CNT_EGR_ERR_FRAME: counts frames transmitted with an error. -+ * @DPMAC_CNT_ING_GOOD_FRAME: counts frames received without error, including -+ * pause frames. -+ * @DPMAC_CNT_ENG_GOOD_FRAME: counts frames transmitted without error, including -+ * pause frames. -+ */ -+enum dpmac_counter { -+ DPMAC_CNT_ING_FRAME_64, -+ DPMAC_CNT_ING_FRAME_127, -+ DPMAC_CNT_ING_FRAME_255, -+ DPMAC_CNT_ING_FRAME_511, -+ DPMAC_CNT_ING_FRAME_1023, -+ DPMAC_CNT_ING_FRAME_1518, -+ DPMAC_CNT_ING_FRAME_1519_MAX, -+ DPMAC_CNT_ING_FRAG, -+ DPMAC_CNT_ING_JABBER, -+ DPMAC_CNT_ING_FRAME_DISCARD, -+ DPMAC_CNT_ING_ALIGN_ERR, -+ DPMAC_CNT_EGR_UNDERSIZED, -+ DPMAC_CNT_ING_OVERSIZED, -+ DPMAC_CNT_ING_VALID_PAUSE_FRAME, -+ DPMAC_CNT_EGR_VALID_PAUSE_FRAME, -+ DPMAC_CNT_ING_BYTE, -+ DPMAC_CNT_ING_MCAST_FRAME, -+ DPMAC_CNT_ING_BCAST_FRAME, -+ DPMAC_CNT_ING_ALL_FRAME, -+ DPMAC_CNT_ING_UCAST_FRAME, -+ DPMAC_CNT_ING_ERR_FRAME, -+ DPMAC_CNT_EGR_BYTE, -+ DPMAC_CNT_EGR_MCAST_FRAME, -+ DPMAC_CNT_EGR_BCAST_FRAME, -+ DPMAC_CNT_EGR_UCAST_FRAME, -+ DPMAC_CNT_EGR_ERR_FRAME, -+ DPMAC_CNT_ING_GOOD_FRAME, -+ DPMAC_CNT_ENG_GOOD_FRAME -+}; -+ -+/** -+ * dpmac_get_counter() - Read a specific DPMAC counter -+ * @mc_io: Pointer to opaque I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMAC object -+ * @type: The requested counter -+ * @counter: Returned counter value -+ * -+ * Return: The requested counter; '0' otherwise. -+ */ -+int dpmac_get_counter(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ enum dpmac_counter type, -+ uint64_t *counter); -+ -+#endif /* __FSL_DPMAC_H */ -diff --git a/drivers/staging/fsl-dpaa2/mac/mac.c b/drivers/staging/fsl-dpaa2/mac/mac.c -new file mode 100644 -index 0000000..366ad4c ---- /dev/null -+++ b/drivers/staging/fsl-dpaa2/mac/mac.c -@@ -0,0 +1,694 @@ -+/* Copyright 2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include -+ -+#include -+#include -+#include -+#include -+ -+#include -+#include -+ -+#include -+#include -+#include -+#include -+#include -+ -+#include "../../fsl-mc/include/mc.h" -+#include "../../fsl-mc/include/mc-sys.h" -+ -+#include "dpmac.h" -+#include "dpmac-cmd.h" -+ -+#define DPAA2_SUPPORTED_DPMAC_VERSION 3 -+ -+struct dpaa2_mac_priv { -+ struct net_device *netdev; -+ struct fsl_mc_device *mc_dev; -+ struct dpmac_attr attr; -+ struct dpmac_link_state old_state; -+}; -+ -+/* TODO: fix the 10G modes, mapping can't be right: -+ * XGMII is paralel -+ * XAUI is serial, using 8b/10b encoding -+ * XFI is also serial but using 64b/66b encoding -+ * they can't all map to XGMII... -+ * -+ * This must be kept in sync with enum dpmac_eth_if. -+ */ -+static phy_interface_t dpaa2_mac_iface_mode[] = { -+ /* DPMAC_ETH_IF_MII */ -+ PHY_INTERFACE_MODE_MII, -+ /* DPMAC_ETH_IF_RMII */ -+ PHY_INTERFACE_MODE_RMII, -+ /* DPMAC_ETH_IF_SMII */ -+ PHY_INTERFACE_MODE_SMII, -+ /* DPMAC_ETH_IF_GMII */ -+ PHY_INTERFACE_MODE_GMII, -+ /* DPMAC_ETH_IF_RGMII */ -+ PHY_INTERFACE_MODE_RGMII, -+ /* DPMAC_ETH_IF_SGMII */ -+ PHY_INTERFACE_MODE_SGMII, -+ /* DPMAC_ETH_IF_QSGMII */ -+ PHY_INTERFACE_MODE_QSGMII, -+ /* DPMAC_ETH_IF_XAUI */ -+ PHY_INTERFACE_MODE_XGMII, -+ /* DPMAC_ETH_IF_XFI */ -+ PHY_INTERFACE_MODE_XGMII, -+}; -+ -+static void dpaa2_mac_link_changed(struct net_device *netdev) -+{ -+ struct phy_device *phydev; -+ struct dpmac_link_state state = { 0 }; -+ struct dpaa2_mac_priv *priv = netdev_priv(netdev); -+ int err; -+ -+ /* the PHY just notified us of link state change */ -+ phydev = netdev->phydev; -+ -+ state.up = !!phydev->link; -+ if (phydev->link) { -+ state.rate = phydev->speed; -+ -+ if (!phydev->duplex) -+ state.options |= DPMAC_LINK_OPT_HALF_DUPLEX; -+ if (phydev->autoneg) -+ state.options |= DPMAC_LINK_OPT_AUTONEG; -+ -+ netif_carrier_on(netdev); -+ } else { -+ netif_carrier_off(netdev); -+ } -+ -+ if (priv->old_state.up != state.up || -+ priv->old_state.rate != state.rate || -+ priv->old_state.options != state.options) { -+ priv->old_state = state; -+ phy_print_status(phydev); -+ } -+ -+ /* We must call into the MC firmware at all times, because we don't know -+ * when and whether a potential DPNI may have read the link state. -+ */ -+ err = dpmac_set_link_state(priv->mc_dev->mc_io, 0, -+ priv->mc_dev->mc_handle, &state); -+ if (unlikely(err)) -+ dev_err(&priv->mc_dev->dev, "dpmac_set_link_state: %d\n", err); -+} -+ -+/* IRQ bits that we handle */ -+static const u32 dpmac_irq_mask = DPMAC_IRQ_EVENT_LINK_CFG_REQ; -+ -+#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS -+static netdev_tx_t dpaa2_mac_drop_frame(struct sk_buff *skb, -+ struct net_device *dev) -+{ -+ /* we don't support I/O for now, drop the frame */ -+ dev_kfree_skb_any(skb); -+ return NETDEV_TX_OK; -+} -+ -+static int dpaa2_mac_open(struct net_device *netdev) -+{ -+ /* start PHY state machine */ -+ phy_start(netdev->phydev); -+ -+ return 0; -+} -+ -+static int dpaa2_mac_stop(struct net_device *netdev) -+{ -+ if (!netdev->phydev) -+ goto done; -+ -+ /* stop PHY state machine */ -+ phy_stop(netdev->phydev); -+ -+ /* signal link down to firmware */ -+ netdev->phydev->link = 0; -+ dpaa2_mac_link_changed(netdev); -+ -+done: -+ return 0; -+} -+ -+static int dpaa2_mac_get_settings(struct net_device *netdev, -+ struct ethtool_cmd *cmd) -+{ -+ return phy_ethtool_gset(netdev->phydev, cmd); -+} -+ -+static int dpaa2_mac_set_settings(struct net_device *netdev, -+ struct ethtool_cmd *cmd) -+{ -+ return phy_ethtool_sset(netdev->phydev, cmd); -+} -+ -+static struct rtnl_link_stats64 -+*dpaa2_mac_get_stats(struct net_device *netdev, -+ struct rtnl_link_stats64 *storage) -+{ -+ struct dpaa2_mac_priv *priv = netdev_priv(netdev); -+ u64 tmp; -+ int err; -+ -+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, -+ DPMAC_CNT_EGR_MCAST_FRAME, -+ &storage->tx_packets); -+ if (err) -+ goto error; -+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, -+ DPMAC_CNT_EGR_BCAST_FRAME, &tmp); -+ if (err) -+ goto error; -+ storage->tx_packets += tmp; -+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, -+ DPMAC_CNT_EGR_UCAST_FRAME, &tmp); -+ if (err) -+ goto error; -+ storage->tx_packets += tmp; -+ -+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, -+ DPMAC_CNT_EGR_UNDERSIZED, &storage->tx_dropped); -+ if (err) -+ goto error; -+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, -+ DPMAC_CNT_EGR_BYTE, &storage->tx_bytes); -+ if (err) -+ goto error; -+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, -+ DPMAC_CNT_EGR_ERR_FRAME, &storage->tx_errors); -+ if (err) -+ goto error; -+ -+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, -+ DPMAC_CNT_ING_ALL_FRAME, &storage->rx_packets); -+ if (err) -+ goto error; -+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, -+ DPMAC_CNT_ING_MCAST_FRAME, &storage->multicast); -+ if (err) -+ goto error; -+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, -+ DPMAC_CNT_ING_FRAME_DISCARD, -+ &storage->rx_dropped); -+ if (err) -+ goto error; -+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, -+ DPMAC_CNT_ING_ALIGN_ERR, &storage->rx_errors); -+ if (err) -+ goto error; -+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, -+ DPMAC_CNT_ING_OVERSIZED, &tmp); -+ if (err) -+ goto error; -+ storage->rx_errors += tmp; -+ err = dpmac_get_counter(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle, -+ DPMAC_CNT_ING_BYTE, &storage->rx_bytes); -+ if (err) -+ goto error; -+ -+ return storage; -+ -+error: -+ netdev_err(netdev, "dpmac_get_counter err %d\n", err); -+ return storage; -+} -+ -+static struct { -+ enum dpmac_counter id; -+ char name[ETH_GSTRING_LEN]; -+} dpaa2_mac_counters[] = { -+ {DPMAC_CNT_ING_ALL_FRAME, "rx all frames"}, -+ {DPMAC_CNT_ING_GOOD_FRAME, "rx frames ok"}, -+ {DPMAC_CNT_ING_ERR_FRAME, "rx frame errors"}, -+ {DPMAC_CNT_ING_FRAME_DISCARD, "rx frame discards"}, -+ {DPMAC_CNT_ING_UCAST_FRAME, "rx u-cast"}, -+ {DPMAC_CNT_ING_BCAST_FRAME, "rx b-cast"}, -+ {DPMAC_CNT_ING_MCAST_FRAME, "rx m-cast"}, -+ {DPMAC_CNT_ING_FRAME_64, "rx 64 bytes"}, -+ {DPMAC_CNT_ING_FRAME_127, "rx 65-127 bytes"}, -+ {DPMAC_CNT_ING_FRAME_255, "rx 128-255 bytes"}, -+ {DPMAC_CNT_ING_FRAME_511, "rx 256-511 bytes"}, -+ {DPMAC_CNT_ING_FRAME_1023, "rx 512-1023 bytes"}, -+ {DPMAC_CNT_ING_FRAME_1518, "rx 1024-1518 bytes"}, -+ {DPMAC_CNT_ING_FRAME_1519_MAX, "rx 1519-max bytes"}, -+ {DPMAC_CNT_ING_FRAG, "rx frags"}, -+ {DPMAC_CNT_ING_JABBER, "rx jabber"}, -+ {DPMAC_CNT_ING_ALIGN_ERR, "rx align errors"}, -+ {DPMAC_CNT_ING_OVERSIZED, "rx oversized"}, -+ {DPMAC_CNT_ING_VALID_PAUSE_FRAME, "rx pause"}, -+ {DPMAC_CNT_ING_BYTE, "rx bytes"}, -+ {DPMAC_CNT_ENG_GOOD_FRAME, "tx frames ok"}, -+ {DPMAC_CNT_EGR_UCAST_FRAME, "tx u-cast"}, -+ {DPMAC_CNT_EGR_MCAST_FRAME, "tx m-cast"}, -+ {DPMAC_CNT_EGR_BCAST_FRAME, "tx b-cast"}, -+ {DPMAC_CNT_EGR_ERR_FRAME, "tx frame errors"}, -+ {DPMAC_CNT_EGR_UNDERSIZED, "tx undersized"}, -+ {DPMAC_CNT_EGR_VALID_PAUSE_FRAME, "tx b-pause"}, -+ {DPMAC_CNT_EGR_BYTE, "tx bytes"}, -+ -+}; -+ -+static void dpaa2_mac_get_strings(struct net_device *netdev, -+ u32 stringset, u8 *data) -+{ -+ int i; -+ -+ switch (stringset) { -+ case ETH_SS_STATS: -+ for (i = 0; i < ARRAY_SIZE(dpaa2_mac_counters); i++) -+ memcpy(data + i * ETH_GSTRING_LEN, -+ dpaa2_mac_counters[i].name, -+ ETH_GSTRING_LEN); -+ break; -+ } -+} -+ -+static void dpaa2_mac_get_ethtool_stats(struct net_device *netdev, -+ struct ethtool_stats *stats, -+ u64 *data) -+{ -+ struct dpaa2_mac_priv *priv = netdev_priv(netdev); -+ int i; -+ int err; -+ -+ for (i = 0; i < ARRAY_SIZE(dpaa2_mac_counters); i++) { -+ err = dpmac_get_counter(priv->mc_dev->mc_io, -+ 0, -+ priv->mc_dev->mc_handle, -+ dpaa2_mac_counters[i].id, &data[i]); -+ if (err) -+ netdev_err(netdev, "dpmac_get_counter[%s] err %d\n", -+ dpaa2_mac_counters[i].name, err); -+ } -+} -+ -+static int dpaa2_mac_get_sset_count(struct net_device *dev, int sset) -+{ -+ switch (sset) { -+ case ETH_SS_STATS: -+ return ARRAY_SIZE(dpaa2_mac_counters); -+ default: -+ return -EOPNOTSUPP; -+ } -+} -+ -+static const struct net_device_ops dpaa2_mac_ndo_ops = { -+ .ndo_start_xmit = &dpaa2_mac_drop_frame, -+ .ndo_open = &dpaa2_mac_open, -+ .ndo_stop = &dpaa2_mac_stop, -+ .ndo_get_stats64 = &dpaa2_mac_get_stats, -+}; -+ -+static const struct ethtool_ops dpaa2_mac_ethtool_ops = { -+ .get_settings = &dpaa2_mac_get_settings, -+ .set_settings = &dpaa2_mac_set_settings, -+ .get_strings = &dpaa2_mac_get_strings, -+ .get_ethtool_stats = &dpaa2_mac_get_ethtool_stats, -+ .get_sset_count = &dpaa2_mac_get_sset_count, -+}; -+#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */ -+ -+static void configure_link(struct dpaa2_mac_priv *priv, -+ struct dpmac_link_cfg *cfg) -+{ -+ struct phy_device *phydev = priv->netdev->phydev; -+ -+ if (unlikely(!phydev)) -+ return; -+ -+ phydev->speed = cfg->rate; -+ phydev->duplex = !!(cfg->options & DPMAC_LINK_OPT_HALF_DUPLEX); -+ -+ if (cfg->options & DPMAC_LINK_OPT_AUTONEG) { -+ phydev->autoneg = 1; -+ phydev->advertising |= ADVERTISED_Autoneg; -+ } else { -+ phydev->autoneg = 0; -+ phydev->advertising &= ~ADVERTISED_Autoneg; -+ } -+ -+ phy_start_aneg(phydev); -+} -+ -+static irqreturn_t dpaa2_mac_irq_handler(int irq_num, void *arg) -+{ -+ struct device *dev = (struct device *)arg; -+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); -+ struct dpaa2_mac_priv *priv = dev_get_drvdata(dev); -+ struct dpmac_link_cfg link_cfg; -+ u32 status; -+ int err; -+ -+ err = dpmac_get_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle, -+ DPMAC_IRQ_INDEX, &status); -+ if (unlikely(err || !status)) -+ return IRQ_NONE; -+ -+ /* DPNI-initiated link configuration; 'ifconfig up' also calls this */ -+ if (status & DPMAC_IRQ_EVENT_LINK_CFG_REQ) { -+ err = dpmac_get_link_cfg(mc_dev->mc_io, 0, mc_dev->mc_handle, -+ &link_cfg); -+ if (unlikely(err)) -+ goto out; -+ -+ configure_link(priv, &link_cfg); -+ } -+ -+out: -+ dpmac_clear_irq_status(mc_dev->mc_io, 0, mc_dev->mc_handle, -+ DPMAC_IRQ_INDEX, status); -+ -+ return IRQ_HANDLED; -+} -+ -+static int setup_irqs(struct fsl_mc_device *mc_dev) -+{ -+ int err; -+ -+ err = fsl_mc_allocate_irqs(mc_dev); -+ if (err) { -+ dev_err(&mc_dev->dev, "fsl_mc_allocate_irqs err %d\n", err); -+ return err; -+ } -+ -+ err = devm_request_threaded_irq(&mc_dev->dev, -+ mc_dev->irqs[0]->irq_number, -+ NULL, &dpaa2_mac_irq_handler, -+ IRQF_NO_SUSPEND | IRQF_ONESHOT, -+ dev_name(&mc_dev->dev), &mc_dev->dev); -+ if (err) { -+ dev_err(&mc_dev->dev, "devm_request_threaded_irq err %d\n", -+ err); -+ goto free_irq; -+ } -+ -+ err = dpmac_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle, -+ DPMAC_IRQ_INDEX, dpmac_irq_mask); -+ if (err) { -+ dev_err(&mc_dev->dev, "dpmac_set_irq_mask err %d\n", err); -+ goto free_irq; -+ } -+ err = dpmac_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle, -+ DPMAC_IRQ_INDEX, 1); -+ if (err) { -+ dev_err(&mc_dev->dev, "dpmac_set_irq_enable err %d\n", err); -+ goto free_irq; -+ } -+ -+ return 0; -+ -+free_irq: -+ fsl_mc_free_irqs(mc_dev); -+ -+ return err; -+} -+ -+static void teardown_irqs(struct fsl_mc_device *mc_dev) -+{ -+ int err; -+ -+ err = dpmac_set_irq_mask(mc_dev->mc_io, 0, mc_dev->mc_handle, -+ DPMAC_IRQ_INDEX, dpmac_irq_mask); -+ if (err) -+ dev_err(&mc_dev->dev, "dpmac_set_irq_mask err %d\n", err); -+ -+ err = dpmac_set_irq_enable(mc_dev->mc_io, 0, mc_dev->mc_handle, -+ DPMAC_IRQ_INDEX, 0); -+ if (err) -+ dev_err(&mc_dev->dev, "dpmac_set_irq_enable err %d\n", err); -+ -+ fsl_mc_free_irqs(mc_dev); -+} -+ -+static struct device_node *lookup_node(struct device *dev, int dpmac_id) -+{ -+ struct device_node *dpmacs, *dpmac = NULL; -+ struct device_node *mc_node = dev->of_node; -+ u32 id; -+ int err; -+ -+ dpmacs = of_find_node_by_name(mc_node, "dpmacs"); -+ if (!dpmacs) { -+ dev_err(dev, "No dpmacs subnode in device-tree\n"); -+ return NULL; -+ } -+ -+ while ((dpmac = of_get_next_child(dpmacs, dpmac))) { -+ err = of_property_read_u32(dpmac, "reg", &id); -+ if (err) -+ continue; -+ if (id == dpmac_id) -+ return dpmac; -+ } -+ -+ return NULL; -+} -+ -+static int dpaa2_mac_probe(struct fsl_mc_device *mc_dev) -+{ -+ struct device *dev; -+ struct dpaa2_mac_priv *priv = NULL; -+ struct device_node *phy_node, *dpmac_node; -+ struct net_device *netdev; -+ phy_interface_t if_mode; -+ int err = 0; -+ -+ dev = &mc_dev->dev; -+ -+ /* prepare a net_dev structure to make the phy lib API happy */ -+ netdev = alloc_etherdev(sizeof(*priv)); -+ if (!netdev) { -+ dev_err(dev, "alloc_etherdev error\n"); -+ err = -ENOMEM; -+ goto err_exit; -+ } -+ priv = netdev_priv(netdev); -+ priv->mc_dev = mc_dev; -+ priv->netdev = netdev; -+ -+ SET_NETDEV_DEV(netdev, dev); -+ -+#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS -+ snprintf(netdev->name, IFNAMSIZ, "mac%d", mc_dev->obj_desc.id); -+#endif -+ -+ dev_set_drvdata(dev, priv); -+ -+ err = fsl_mc_portal_allocate(mc_dev, 0, &mc_dev->mc_io); -+ if (err || !mc_dev->mc_io) { -+ dev_err(dev, "fsl_mc_portal_allocate error: %d\n", err); -+ err = -ENODEV; -+ goto err_free_netdev; -+ } -+ -+ err = dpmac_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id, -+ &mc_dev->mc_handle); -+ if (err || !mc_dev->mc_handle) { -+ dev_err(dev, "dpmac_open error: %d\n", err); -+ err = -ENODEV; -+ goto err_free_mcp; -+ } -+ -+ err = dpmac_get_attributes(mc_dev->mc_io, 0, -+ mc_dev->mc_handle, &priv->attr); -+ if (err) { -+ dev_err(dev, "dpmac_get_attributes err %d\n", err); -+ err = -EINVAL; -+ goto err_close; -+ } -+ -+ dev_info_once(dev, "Using DPMAC API %d.%d\n", -+ priv->attr.version.major, priv->attr.version.minor); -+ -+ /* Look up the DPMAC node in the device-tree. */ -+ dpmac_node = lookup_node(dev, priv->attr.id); -+ if (!dpmac_node) { -+ dev_err(dev, "No dpmac@%d subnode found.\n", priv->attr.id); -+ err = -ENODEV; -+ goto err_close; -+ } -+ -+ err = setup_irqs(mc_dev); -+ if (err) { -+ err = -EFAULT; -+ goto err_close; -+ } -+ -+#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS -+ /* OPTIONAL, register netdev just to make it visible to the user */ -+ netdev->netdev_ops = &dpaa2_mac_ndo_ops; -+ netdev->ethtool_ops = &dpaa2_mac_ethtool_ops; -+ -+ /* phy starts up enabled so netdev should be up too */ -+ netdev->flags |= IFF_UP; -+ -+ err = register_netdev(priv->netdev); -+ if (err < 0) { -+ dev_err(dev, "register_netdev error %d\n", err); -+ err = -ENODEV; -+ goto err_free_irq; -+ } -+#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */ -+ -+ /* probe the PHY as a fixed-link if the link type declared in DPC -+ * explicitly mandates this -+ */ -+ if (priv->attr.link_type == DPMAC_LINK_TYPE_FIXED) -+ goto probe_fixed_link; -+ -+ if (priv->attr.eth_if < ARRAY_SIZE(dpaa2_mac_iface_mode)) { -+ if_mode = dpaa2_mac_iface_mode[priv->attr.eth_if]; -+ dev_dbg(dev, "\tusing if mode %s for eth_if %d\n", -+ phy_modes(if_mode), priv->attr.eth_if); -+ } else { -+ dev_warn(dev, "Unexpected interface mode %d, will probe as fixed link\n", -+ priv->attr.eth_if); -+ goto probe_fixed_link; -+ } -+ -+ /* try to connect to the PHY */ -+ phy_node = of_parse_phandle(dpmac_node, "phy-handle", 0); -+ if (!phy_node) { -+ if (!phy_node) { -+ dev_err(dev, "dpmac node has no phy-handle property\n"); -+ err = -ENODEV; -+ goto err_no_phy; -+ } -+ } -+ netdev->phydev = of_phy_connect(netdev, phy_node, -+ &dpaa2_mac_link_changed, 0, if_mode); -+ if (!netdev->phydev) { -+ /* No need for dev_err(); the kernel's loud enough as it is. */ -+ dev_dbg(dev, "Can't of_phy_connect() now.\n"); -+ /* We might be waiting for the MDIO MUX to probe, so defer -+ * our own probing. -+ */ -+ err = -EPROBE_DEFER; -+ goto err_defer; -+ } -+ dev_info(dev, "Connected to %s PHY.\n", phy_modes(if_mode)); -+ -+probe_fixed_link: -+ if (!netdev->phydev) { -+ struct fixed_phy_status status = { -+ .link = 1, -+ /* fixed-phys don't support 10Gbps speed for now */ -+ .speed = 1000, -+ .duplex = 1, -+ }; -+ -+ /* try to register a fixed link phy */ -+ netdev->phydev = fixed_phy_register(PHY_POLL, &status, NULL); -+ if (!netdev->phydev || IS_ERR(netdev->phydev)) { -+ dev_err(dev, "error trying to register fixed PHY\n"); -+ /* So we don't crash unregister_netdev() later on */ -+ netdev->phydev = NULL; -+ err = -EFAULT; -+ goto err_no_phy; -+ } -+ dev_info(dev, "Registered fixed PHY.\n"); -+ } -+ -+ /* start PHY state machine */ -+#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS -+ dpaa2_mac_open(netdev); -+#else /* CONFIG_FSL_DPAA2_MAC_NETDEVS */ -+ phy_start(netdev->phydev); -+#endif /* CONFIG_FSL_DPAA2_MAC_NETDEVS */ -+ return 0; -+ -+err_defer: -+err_no_phy: -+#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS -+ unregister_netdev(netdev); -+err_free_irq: -+#endif -+ teardown_irqs(mc_dev); -+err_close: -+ dpmac_close(mc_dev->mc_io, 0, mc_dev->mc_handle); -+err_free_mcp: -+ fsl_mc_portal_free(mc_dev->mc_io); -+err_free_netdev: -+ free_netdev(netdev); -+err_exit: -+ return err; -+} -+ -+static int dpaa2_mac_remove(struct fsl_mc_device *mc_dev) -+{ -+ struct device *dev = &mc_dev->dev; -+ struct dpaa2_mac_priv *priv = dev_get_drvdata(dev); -+ -+#ifdef CONFIG_FSL_DPAA2_MAC_NETDEVS -+ unregister_netdev(priv->netdev); -+#endif -+ teardown_irqs(priv->mc_dev); -+ dpmac_close(priv->mc_dev->mc_io, 0, priv->mc_dev->mc_handle); -+ fsl_mc_portal_free(priv->mc_dev->mc_io); -+ free_netdev(priv->netdev); -+ -+ dev_set_drvdata(dev, NULL); -+ kfree(priv); -+ -+ return 0; -+} -+ -+static const struct fsl_mc_device_match_id dpaa2_mac_match_id_table[] = { -+ { -+ .vendor = FSL_MC_VENDOR_FREESCALE, -+ .obj_type = "dpmac", -+ .ver_major = DPMAC_VER_MAJOR, -+ .ver_minor = DPMAC_VER_MINOR, -+ }, -+ {} -+}; -+ -+static struct fsl_mc_driver dpaa2_mac_drv = { -+ .driver = { -+ .name = KBUILD_MODNAME, -+ .owner = THIS_MODULE, -+ }, -+ .probe = dpaa2_mac_probe, -+ .remove = dpaa2_mac_remove, -+ .match_id_table = dpaa2_mac_match_id_table, -+}; -+ -+module_fsl_mc_driver(dpaa2_mac_drv); -+ -+MODULE_LICENSE("GPL"); -+MODULE_DESCRIPTION("DPAA2 PHY proxy interface driver"); -diff --git a/drivers/staging/fsl-mc/Kconfig b/drivers/staging/fsl-mc/Kconfig -new file mode 100644 -index 0000000..32df07b ---- /dev/null -+++ b/drivers/staging/fsl-mc/Kconfig -@@ -0,0 +1 @@ -+source "drivers/staging/fsl-mc/bus/Kconfig" -diff --git a/drivers/staging/fsl-mc/Makefile b/drivers/staging/fsl-mc/Makefile -new file mode 100644 -index 0000000..9c6a001 ---- /dev/null -+++ b/drivers/staging/fsl-mc/Makefile -@@ -0,0 +1,2 @@ -+# Freescale Management Complex (MC) bus drivers -+obj-$(CONFIG_FSL_MC_BUS) += bus/ -diff --git a/drivers/staging/fsl-mc/TODO b/drivers/staging/fsl-mc/TODO -new file mode 100644 -index 0000000..d78288b ---- /dev/null -+++ b/drivers/staging/fsl-mc/TODO -@@ -0,0 +1,13 @@ -+* Add README file (with ASCII art) describing relationships between -+ DPAA2 objects and how combine them to make a NIC, an LS2 switch, etc. -+ Also, define all acronyms used. -+ -+* Decide if multiple root fsl-mc buses will be supported per Linux instance, -+ and if so add support for this. -+ -+* Add at least one device driver for a DPAA2 object (child device of the -+ fsl-mc bus). -+ -+Please send any patches to Greg Kroah-Hartman , -+german.rivera@freescale.com, devel@driverdev.osuosl.org, -+linux-kernel@vger.kernel.org -diff --git a/drivers/staging/fsl-mc/bus/Kconfig b/drivers/staging/fsl-mc/bus/Kconfig -new file mode 100644 -index 0000000..8bef5b8 ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/Kconfig -@@ -0,0 +1,45 @@ -+# -+# Freescale Management Complex (MC) bus drivers -+# -+# Copyright (C) 2014 Freescale Semiconductor, Inc. -+# -+# This file is released under the GPLv2 -+# -+ -+config FSL_MC_BUS -+ tristate "Freescale Management Complex (MC) bus driver" -+ depends on OF && ARM64 -+ help -+ Driver to enable the bus infrastructure for the Freescale -+ QorIQ Management Complex (fsl-mc). The fsl-mc is a hardware -+ module of the QorIQ LS2 SoCs, that does resource management -+ for hardware building-blocks in the SoC that can be used -+ to dynamically create networking hardware objects such as -+ network interfaces (NICs), crypto accelerator instances, -+ or L2 switches. -+ -+ Only enable this option when building the kernel for -+ Freescale QorQIQ LS2xxxx SoCs. -+ -+config FSL_MC_RESTOOL -+ tristate "Freescale Management Complex (MC) restool driver" -+ depends on FSL_MC_BUS -+ help -+ Driver that provides kernel support for the Freescale Management -+ Complex resource manager user-space tool. -+ -+config FSL_MC_DPIO -+ tristate "Freescale Data Path I/O (DPIO) driver" -+ depends on FSL_MC_BUS -+ help -+ Driver for Freescale Data Path I/O (DPIO) devices. -+ A DPIO device provides queue and buffer management facilities -+ for software to interact with other Data Path devices. This -+ driver does not expose the DPIO device individually, but -+ groups them under a service layer API. -+ -+config FSL_QBMAN_DEBUG -+ tristate "Freescale QBMAN Debug APIs" -+ depends on FSL_MC_DPIO -+ help -+ QBMan debug assistant APIs. -diff --git a/drivers/staging/fsl-mc/bus/Makefile b/drivers/staging/fsl-mc/bus/Makefile -new file mode 100644 -index 0000000..f29399c ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/Makefile -@@ -0,0 +1,24 @@ -+# -+# Freescale Management Complex (MC) bus drivers -+# -+# Copyright (C) 2014 Freescale Semiconductor, Inc. -+# -+# This file is released under the GPLv2 -+# -+obj-$(CONFIG_FSL_MC_BUS) += mc-bus-driver.o -+ -+mc-bus-driver-objs := mc-bus.o \ -+ mc-sys.o \ -+ dprc.o \ -+ dpmng.o \ -+ dprc-driver.o \ -+ mc-allocator.o \ -+ dpmcp.o \ -+ dpbp.o \ -+ dpcon.o -+ -+# MC restool kernel support -+obj-$(CONFIG_FSL_MC_RESTOOL) += mc-restool.o -+ -+# MC DPIO driver -+obj-$(CONFIG_FSL_MC_DPIO) += dpio/ -diff --git a/drivers/staging/fsl-mc/bus/dpbp.c b/drivers/staging/fsl-mc/bus/dpbp.c -new file mode 100644 -index 0000000..f183121 ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpbp.c -@@ -0,0 +1,459 @@ -+/* Copyright 2013-2014 Freescale Semiconductor Inc. -+* -+* Redistribution and use in source and binary forms, with or without -+* modification, are permitted provided that the following conditions are met: -+* * Redistributions of source code must retain the above copyright -+* notice, this list of conditions and the following disclaimer. -+* * Redistributions in binary form must reproduce the above copyright -+* notice, this list of conditions and the following disclaimer in the -+* documentation and/or other materials provided with the distribution. -+* * Neither the name of the above-listed copyright holders nor the -+* names of any contributors may be used to endorse or promote products -+* derived from this software without specific prior written permission. -+* -+* -+* ALTERNATIVELY, this software may be distributed under the terms of the -+* GNU General Public License ("GPL") as published by the Free Software -+* Foundation, either version 2 of that License or (at your option) any -+* later version. -+* -+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+* POSSIBILITY OF SUCH DAMAGE. -+*/ -+#include "../include/mc-sys.h" -+#include "../include/mc-cmd.h" -+#include "../include/dpbp.h" -+#include "../include/dpbp-cmd.h" -+ -+int dpbp_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpbp_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ -+ cmd.params[0] |= mc_enc(0, 32, dpbp_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return err; -+} -+EXPORT_SYMBOL(dpbp_open); -+ -+int dpbp_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_CLOSE, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dpbp_close); -+ -+int dpbp_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpbp_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ (void)(cfg); /* unused */ -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpbp_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpbp_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_ENABLE, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dpbp_enable); -+ -+int dpbp_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_DISABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dpbp_disable); -+ -+int dpbp_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_IS_ENABLED, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *en = (int)mc_dec(cmd.params[0], 0, 1); -+ -+ return 0; -+} -+ -+int dpbp_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_RESET, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpbp_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpbp_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ -+ cmd.params[0] |= mc_enc(0, 8, irq_index); -+ cmd.params[0] |= mc_enc(32, 32, irq_cfg->val); -+ cmd.params[1] |= mc_enc(0, 64, irq_cfg->addr); -+ cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpbp_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpbp_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ irq_cfg->val = (uint32_t)mc_dec(cmd.params[0], 0, 32); -+ irq_cfg->addr = (uint64_t)mc_dec(cmd.params[1], 0, 64); -+ irq_cfg->irq_num = (int)mc_dec(cmd.params[2], 0, 32); -+ *type = (int)mc_dec(cmd.params[2], 32, 32); -+ -+ return 0; -+} -+ -+int dpbp_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ -+ cmd.params[0] |= mc_enc(0, 8, en); -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpbp_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *en = (uint8_t)mc_dec(cmd.params[0], 0, 8); -+ return 0; -+} -+ -+int dpbp_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ -+ cmd.params[0] |= mc_enc(0, 32, mask); -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpbp_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *mask = (uint32_t)mc_dec(cmd.params[0], 0, 32); -+ return 0; -+} -+ -+int dpbp_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ -+ cmd.params[0] |= mc_enc(0, 32, *status); -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *status = (uint32_t)mc_dec(cmd.params[0], 0, 32); -+ return 0; -+} -+ -+int dpbp_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ -+ cmd.params[0] |= mc_enc(0, 32, status); -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpbp_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpbp_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ attr->bpid = (uint16_t)mc_dec(cmd.params[0], 16, 16); -+ attr->id = (int)mc_dec(cmd.params[0], 32, 32); -+ attr->version.major = (uint16_t)mc_dec(cmd.params[1], 0, 16); -+ attr->version.minor = (uint16_t)mc_dec(cmd.params[1], 16, 16); -+ return 0; -+} -+EXPORT_SYMBOL(dpbp_get_attributes); -+ -+int dpbp_set_notifications(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpbp_notification_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_SET_NOTIFICATIONS, -+ cmd_flags, -+ token); -+ -+ cmd.params[0] |= mc_enc(0, 32, cfg->depletion_entry); -+ cmd.params[0] |= mc_enc(32, 32, cfg->depletion_exit); -+ cmd.params[1] |= mc_enc(0, 32, cfg->surplus_entry); -+ cmd.params[1] |= mc_enc(32, 32, cfg->surplus_exit); -+ cmd.params[2] |= mc_enc(0, 16, cfg->options); -+ cmd.params[3] |= mc_enc(0, 64, cfg->message_ctx); -+ cmd.params[4] |= mc_enc(0, 64, cfg->message_iova); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpbp_get_notifications(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpbp_notification_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPBP_CMDID_GET_NOTIFICATIONS, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ cfg->depletion_entry = (uint32_t)mc_dec(cmd.params[0], 0, 32); -+ cfg->depletion_exit = (uint32_t)mc_dec(cmd.params[0], 32, 32); -+ cfg->surplus_entry = (uint32_t)mc_dec(cmd.params[1], 0, 32); -+ cfg->surplus_exit = (uint32_t)mc_dec(cmd.params[1], 32, 32); -+ cfg->options = (uint16_t)mc_dec(cmd.params[2], 0, 16); -+ cfg->message_ctx = (uint64_t)mc_dec(cmd.params[3], 0, 64); -+ cfg->message_iova = (uint64_t)mc_dec(cmd.params[4], 0, 64); -+ -+ return 0; -+} -diff --git a/drivers/staging/fsl-mc/bus/dpcon.c b/drivers/staging/fsl-mc/bus/dpcon.c -new file mode 100644 -index 0000000..7965284 ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpcon.c -@@ -0,0 +1,407 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include "../include/mc-sys.h" -+#include "../include/mc-cmd.h" -+#include "../include/dpcon.h" -+#include "../include/dpcon-cmd.h" -+ -+int dpcon_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpcon_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ DPCON_CMD_OPEN(cmd, dpcon_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+EXPORT_SYMBOL(dpcon_open); -+ -+int dpcon_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CLOSE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dpcon_close); -+ -+int dpcon_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpcon_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ DPCON_CMD_CREATE(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpcon_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpcon_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_ENABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dpcon_enable); -+ -+int dpcon_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_DISABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dpcon_disable); -+ -+int dpcon_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_IS_ENABLED, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCON_RSP_IS_ENABLED(cmd, *en); -+ -+ return 0; -+} -+ -+int dpcon_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_RESET, -+ cmd_flags, token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpcon_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpcon_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ DPCON_CMD_SET_IRQ(cmd, irq_index, irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpcon_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpcon_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ DPCON_CMD_GET_IRQ(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCON_RSP_GET_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dpcon_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPCON_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpcon_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPCON_CMD_GET_IRQ_ENABLE(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCON_RSP_GET_IRQ_ENABLE(cmd, *en); -+ -+ return 0; -+} -+ -+int dpcon_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPCON_CMD_SET_IRQ_MASK(cmd, irq_index, mask); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpcon_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPCON_CMD_GET_IRQ_MASK(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCON_RSP_GET_IRQ_MASK(cmd, *mask); -+ -+ return 0; -+} -+ -+int dpcon_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPCON_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCON_RSP_GET_IRQ_STATUS(cmd, *status); -+ -+ return 0; -+} -+ -+int dpcon_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPCON_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpcon_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpcon_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPCON_RSP_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+EXPORT_SYMBOL(dpcon_get_attributes); -+ -+int dpcon_set_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpcon_notification_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPCON_CMDID_SET_NOTIFICATION, -+ cmd_flags, -+ token); -+ DPCON_CMD_SET_NOTIFICATION(cmd, cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dpcon_set_notification); -+ -diff --git a/drivers/staging/fsl-mc/bus/dpio/Makefile b/drivers/staging/fsl-mc/bus/dpio/Makefile -new file mode 100644 -index 0000000..c20356b ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/Makefile -@@ -0,0 +1,9 @@ -+# -+# Freescale DPIO driver -+# -+ -+obj-$(CONFIG_FSL_MC_BUS) += fsl-dpio-drv.o -+ -+fsl-dpio-drv-objs := dpio-drv.o dpio_service.o dpio.o qbman_portal.o -+ -+obj-$(CONFIG_FSL_QBMAN_DEBUG) += qbman_debug.o -diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio-drv.c b/drivers/staging/fsl-mc/bus/dpio/dpio-drv.c -new file mode 100644 -index 0000000..80add27 ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/dpio-drv.c -@@ -0,0 +1,401 @@ -+/* Copyright 2014 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "../../include/mc.h" -+#include "../../include/fsl_dpaa2_io.h" -+ -+#include "fsl_qbman_portal.h" -+#include "fsl_dpio.h" -+#include "fsl_dpio_cmd.h" -+ -+#include "dpio-drv.h" -+ -+#define DPIO_DESCRIPTION "DPIO Driver" -+ -+MODULE_LICENSE("Dual BSD/GPL"); -+MODULE_AUTHOR("Freescale Semiconductor, Inc"); -+MODULE_DESCRIPTION(DPIO_DESCRIPTION); -+ -+#define MAX_DPIO_IRQ_NAME 16 /* Big enough for "FSL DPIO %d" */ -+ -+struct dpio_priv { -+ struct dpaa2_io *io; -+ char irq_name[MAX_DPIO_IRQ_NAME]; -+ struct task_struct *thread; -+}; -+ -+static int dpio_thread(void *data) -+{ -+ struct dpaa2_io *io = data; -+ -+ while (!kthread_should_stop()) { -+ int err = dpaa2_io_poll(io); -+ -+ if (err) { -+ pr_err("dpaa2_io_poll() failed\n"); -+ return err; -+ } -+ msleep(50); -+ } -+ return 0; -+} -+ -+static irqreturn_t dpio_irq_handler(int irq_num, void *arg) -+{ -+ struct device *dev = (struct device *)arg; -+ struct dpio_priv *priv = dev_get_drvdata(dev); -+ -+ return dpaa2_io_irq(priv->io); -+} -+ -+static void unregister_dpio_irq_handlers(struct fsl_mc_device *ls_dev) -+{ -+ int i; -+ struct fsl_mc_device_irq *irq; -+ int irq_count = ls_dev->obj_desc.irq_count; -+ -+ for (i = 0; i < irq_count; i++) { -+ irq = ls_dev->irqs[i]; -+ devm_free_irq(&ls_dev->dev, irq->irq_number, &ls_dev->dev); -+ } -+} -+ -+static int register_dpio_irq_handlers(struct fsl_mc_device *ls_dev, int cpu) -+{ -+ struct dpio_priv *priv; -+ unsigned int i; -+ int error; -+ struct fsl_mc_device_irq *irq; -+ unsigned int num_irq_handlers_registered = 0; -+ int irq_count = ls_dev->obj_desc.irq_count; -+ cpumask_t mask; -+ -+ priv = dev_get_drvdata(&ls_dev->dev); -+ -+ if (WARN_ON(irq_count != 1)) -+ return -EINVAL; -+ -+ for (i = 0; i < irq_count; i++) { -+ irq = ls_dev->irqs[i]; -+ error = devm_request_irq(&ls_dev->dev, -+ irq->irq_number, -+ dpio_irq_handler, -+ 0, -+ priv->irq_name, -+ &ls_dev->dev); -+ if (error < 0) { -+ dev_err(&ls_dev->dev, -+ "devm_request_irq() failed: %d\n", -+ error); -+ goto error_unregister_irq_handlers; -+ } -+ -+ /* Set the IRQ affinity */ -+ cpumask_clear(&mask); -+ cpumask_set_cpu(cpu, &mask); -+ if (irq_set_affinity(irq->irq_number, &mask)) -+ pr_err("irq_set_affinity failed irq %d cpu %d\n", -+ irq->irq_number, cpu); -+ -+ num_irq_handlers_registered++; -+ } -+ -+ return 0; -+ -+error_unregister_irq_handlers: -+ for (i = 0; i < num_irq_handlers_registered; i++) { -+ irq = ls_dev->irqs[i]; -+ devm_free_irq(&ls_dev->dev, irq->irq_number, -+ &ls_dev->dev); -+ } -+ -+ return error; -+} -+ -+static int __cold -+dpaa2_dpio_probe(struct fsl_mc_device *ls_dev) -+{ -+ struct dpio_attr dpio_attrs; -+ struct dpaa2_io_desc desc; -+ struct dpio_priv *priv; -+ int err = -ENOMEM; -+ struct device *dev = &ls_dev->dev; -+ struct dpaa2_io *defservice; -+ bool irq_allocated = false; -+ static int next_cpu; -+ -+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); -+ if (!priv) -+ goto err_priv_alloc; -+ -+ dev_set_drvdata(dev, priv); -+ -+ err = fsl_mc_portal_allocate(ls_dev, 0, &ls_dev->mc_io); -+ if (err) { -+ dev_err(dev, "MC portal allocation failed\n"); -+ err = -EPROBE_DEFER; -+ goto err_mcportal; -+ } -+ -+ err = dpio_open(ls_dev->mc_io, 0, ls_dev->obj_desc.id, -+ &ls_dev->mc_handle); -+ if (err) { -+ dev_err(dev, "dpio_open() failed\n"); -+ goto err_open; -+ } -+ -+ err = dpio_get_attributes(ls_dev->mc_io, 0, ls_dev->mc_handle, -+ &dpio_attrs); -+ if (err) { -+ dev_err(dev, "dpio_get_attributes() failed %d\n", err); -+ goto err_get_attr; -+ } -+ err = dpio_enable(ls_dev->mc_io, 0, ls_dev->mc_handle); -+ if (err) { -+ dev_err(dev, "dpio_enable() failed %d\n", err); -+ goto err_get_attr; -+ } -+ pr_info("ce_paddr=0x%llx, ci_paddr=0x%llx, portalid=%d, prios=%d\n", -+ ls_dev->regions[0].start, -+ ls_dev->regions[1].start, -+ dpio_attrs.qbman_portal_id, -+ dpio_attrs.num_priorities); -+ -+ pr_info("ce_size=0x%llx, ci_size=0x%llx\n", -+ resource_size(&ls_dev->regions[0]), -+ resource_size(&ls_dev->regions[1])); -+ -+ desc.qman_version = dpio_attrs.qbman_version; -+ /* Build DPIO driver object out of raw MC object */ -+ desc.receives_notifications = dpio_attrs.num_priorities ? 1 : 0; -+ desc.has_irq = 1; -+ desc.will_poll = 1; -+ desc.has_8prio = dpio_attrs.num_priorities == 8 ? 1 : 0; -+ desc.cpu = next_cpu; -+ desc.stash_affinity = next_cpu; -+ next_cpu = (next_cpu + 1) % num_active_cpus(); -+ desc.dpio_id = ls_dev->obj_desc.id; -+ desc.regs_cena = ioremap_cache_ns(ls_dev->regions[0].start, -+ resource_size(&ls_dev->regions[0])); -+ desc.regs_cinh = ioremap(ls_dev->regions[1].start, -+ resource_size(&ls_dev->regions[1])); -+ -+ err = fsl_mc_allocate_irqs(ls_dev); -+ if (err) { -+ dev_err(dev, "DPIO fsl_mc_allocate_irqs failed\n"); -+ desc.has_irq = 0; -+ } else { -+ irq_allocated = true; -+ -+ snprintf(priv->irq_name, MAX_DPIO_IRQ_NAME, "FSL DPIO %d", -+ desc.dpio_id); -+ -+ err = register_dpio_irq_handlers(ls_dev, desc.cpu); -+ if (err) -+ desc.has_irq = 0; -+ } -+ -+ priv->io = dpaa2_io_create(&desc); -+ if (!priv->io) { -+ dev_err(dev, "DPIO setup failed\n"); -+ goto err_dpaa2_io_create; -+ } -+ -+ /* If no irq then go to poll mode */ -+ if (desc.has_irq == 0) { -+ dev_info(dev, "Using polling mode for DPIO %d\n", -+ desc.dpio_id); -+ /* goto err_register_dpio_irq; */ -+ /* TEMP: Start polling if IRQ could not -+ be registered. This will go away once -+ KVM support for MSI is present */ -+ if (irq_allocated == true) -+ fsl_mc_free_irqs(ls_dev); -+ -+ if (desc.stash_affinity) -+ priv->thread = kthread_create_on_cpu(dpio_thread, -+ priv->io, -+ desc.cpu, -+ "dpio_aff%u"); -+ else -+ priv->thread = -+ kthread_create(dpio_thread, -+ priv->io, -+ "dpio_non%u", -+ dpio_attrs.qbman_portal_id); -+ if (IS_ERR(priv->thread)) { -+ dev_err(dev, "DPIO thread failure\n"); -+ err = PTR_ERR(priv->thread); -+ goto err_dpaa_thread; -+ } -+ wake_up_process(priv->thread); -+ } -+ -+ defservice = dpaa2_io_default_service(); -+ err = dpaa2_io_service_add(defservice, priv->io); -+ dpaa2_io_down(defservice); -+ if (err) { -+ dev_err(dev, "DPIO add-to-service failed\n"); -+ goto err_dpaa2_io_add; -+ } -+ -+ dev_info(dev, "dpio: probed object %d\n", ls_dev->obj_desc.id); -+ dev_info(dev, " receives_notifications = %d\n", -+ desc.receives_notifications); -+ dev_info(dev, " has_irq = %d\n", desc.has_irq); -+ dpio_close(ls_dev->mc_io, 0, ls_dev->mc_handle); -+ fsl_mc_portal_free(ls_dev->mc_io); -+ return 0; -+ -+err_dpaa2_io_add: -+ unregister_dpio_irq_handlers(ls_dev); -+/* TEMP: To be restored once polling is removed -+ err_register_dpio_irq: -+ fsl_mc_free_irqs(ls_dev); -+*/ -+err_dpaa_thread: -+err_dpaa2_io_create: -+ dpio_disable(ls_dev->mc_io, 0, ls_dev->mc_handle); -+err_get_attr: -+ dpio_close(ls_dev->mc_io, 0, ls_dev->mc_handle); -+err_open: -+ fsl_mc_portal_free(ls_dev->mc_io); -+err_mcportal: -+ dev_set_drvdata(dev, NULL); -+ devm_kfree(dev, priv); -+err_priv_alloc: -+ return err; -+} -+ -+/* -+ * Tear down interrupts for a given DPIO object -+ */ -+static void dpio_teardown_irqs(struct fsl_mc_device *ls_dev) -+{ -+ /* (void)disable_dpio_irqs(ls_dev); */ -+ unregister_dpio_irq_handlers(ls_dev); -+ fsl_mc_free_irqs(ls_dev); -+} -+ -+static int __cold -+dpaa2_dpio_remove(struct fsl_mc_device *ls_dev) -+{ -+ struct device *dev; -+ struct dpio_priv *priv; -+ int err; -+ -+ dev = &ls_dev->dev; -+ priv = dev_get_drvdata(dev); -+ -+ /* there is no implementation yet for pulling a DPIO object out of a -+ * running service (and they're currently always running). -+ */ -+ dev_crit(dev, "DPIO unplugging is broken, the service holds onto it\n"); -+ -+ if (priv->thread) -+ kthread_stop(priv->thread); -+ else -+ dpio_teardown_irqs(ls_dev); -+ -+ err = fsl_mc_portal_allocate(ls_dev, 0, &ls_dev->mc_io); -+ if (err) { -+ dev_err(dev, "MC portal allocation failed\n"); -+ goto err_mcportal; -+ } -+ -+ err = dpio_open(ls_dev->mc_io, 0, ls_dev->obj_desc.id, -+ &ls_dev->mc_handle); -+ if (err) { -+ dev_err(dev, "dpio_open() failed\n"); -+ goto err_open; -+ } -+ -+ dev_set_drvdata(dev, NULL); -+ dpaa2_io_down(priv->io); -+ -+ err = 0; -+ -+ dpio_disable(ls_dev->mc_io, 0, ls_dev->mc_handle); -+ dpio_close(ls_dev->mc_io, 0, ls_dev->mc_handle); -+err_open: -+ fsl_mc_portal_free(ls_dev->mc_io); -+err_mcportal: -+ return err; -+} -+ -+static const struct fsl_mc_device_match_id dpaa2_dpio_match_id_table[] = { -+ { -+ .vendor = FSL_MC_VENDOR_FREESCALE, -+ .obj_type = "dpio", -+ .ver_major = DPIO_VER_MAJOR, -+ .ver_minor = DPIO_VER_MINOR -+ }, -+ { .vendor = 0x0 } -+}; -+ -+static struct fsl_mc_driver dpaa2_dpio_driver = { -+ .driver = { -+ .name = KBUILD_MODNAME, -+ .owner = THIS_MODULE, -+ }, -+ .probe = dpaa2_dpio_probe, -+ .remove = dpaa2_dpio_remove, -+ .match_id_table = dpaa2_dpio_match_id_table -+}; -+ -+static int dpio_driver_init(void) -+{ -+ int err; -+ -+ err = dpaa2_io_service_driver_init(); -+ if (!err) { -+ err = fsl_mc_driver_register(&dpaa2_dpio_driver); -+ if (err) -+ dpaa2_io_service_driver_exit(); -+ } -+ return err; -+} -+static void dpio_driver_exit(void) -+{ -+ fsl_mc_driver_unregister(&dpaa2_dpio_driver); -+ dpaa2_io_service_driver_exit(); -+} -+module_init(dpio_driver_init); -+module_exit(dpio_driver_exit); -diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio-drv.h b/drivers/staging/fsl-mc/bus/dpio/dpio-drv.h -new file mode 100644 -index 0000000..fe8d40b ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/dpio-drv.h -@@ -0,0 +1,33 @@ -+/* Copyright 2014 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+int dpaa2_io_service_driver_init(void); -+void dpaa2_io_service_driver_exit(void); -diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio.c b/drivers/staging/fsl-mc/bus/dpio/dpio.c -new file mode 100644 -index 0000000..b63edd6 ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/dpio.c -@@ -0,0 +1,468 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include "../../include/mc-sys.h" -+#include "../../include/mc-cmd.h" -+#include "fsl_dpio.h" -+#include "fsl_dpio_cmd.h" -+ -+int dpio_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpio_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ DPIO_CMD_OPEN(cmd, dpio_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpio_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CLOSE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpio_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ DPIO_CMD_CREATE(cmd, cfg); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpio_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_ENABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_DISABLE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_IS_ENABLED, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPIO_RSP_IS_ENABLED(cmd, *en); -+ -+ return 0; -+} -+ -+int dpio_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_RESET, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpio_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ DPIO_CMD_SET_IRQ(cmd, irq_index, irq_cfg); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpio_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ DPIO_CMD_GET_IRQ(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPIO_RSP_GET_IRQ(cmd, *type, irq_cfg); -+ -+ return 0; -+} -+ -+int dpio_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPIO_CMD_SET_IRQ_ENABLE(cmd, irq_index, en); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ DPIO_CMD_GET_IRQ_ENABLE(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPIO_RSP_GET_IRQ_ENABLE(cmd, *en); -+ -+ return 0; -+} -+ -+int dpio_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPIO_CMD_SET_IRQ_MASK(cmd, irq_index, mask); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ DPIO_CMD_GET_IRQ_MASK(cmd, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPIO_RSP_GET_IRQ_MASK(cmd, *mask); -+ -+ return 0; -+} -+ -+int dpio_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPIO_CMD_GET_IRQ_STATUS(cmd, irq_index, *status); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPIO_RSP_GET_IRQ_STATUS(cmd, *status); -+ -+ return 0; -+} -+ -+int dpio_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ DPIO_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpio_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPIO_RSP_GET_ATTR(cmd, attr); -+ -+ return 0; -+} -+ -+int dpio_set_stashing_destination(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t sdest) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_SET_STASHING_DEST, -+ cmd_flags, -+ token); -+ DPIO_CMD_SET_STASHING_DEST(cmd, sdest); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpio_get_stashing_destination(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t *sdest) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_GET_STASHING_DEST, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPIO_RSP_GET_STASHING_DEST(cmd, *sdest); -+ -+ return 0; -+} -+ -+int dpio_add_static_dequeue_channel(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpcon_id, -+ uint8_t *channel_index) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPIO_CMDID_ADD_STATIC_DEQUEUE_CHANNEL, -+ cmd_flags, -+ token); -+ DPIO_CMD_ADD_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ DPIO_RSP_ADD_STATIC_DEQUEUE_CHANNEL(cmd, *channel_index); -+ -+ return 0; -+} -+ -+int dpio_remove_static_dequeue_channel(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpcon_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header( -+ DPIO_CMDID_REMOVE_STATIC_DEQUEUE_CHANNEL, -+ cmd_flags, -+ token); -+ DPIO_CMD_REMOVE_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -diff --git a/drivers/staging/fsl-mc/bus/dpio/dpio_service.c b/drivers/staging/fsl-mc/bus/dpio/dpio_service.c -new file mode 100644 -index 0000000..ebcfd59 ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/dpio_service.c -@@ -0,0 +1,801 @@ -+/* Copyright 2014 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include "fsl_qbman_portal.h" -+#include "../../include/mc.h" -+#include "../../include/fsl_dpaa2_io.h" -+#include "fsl_dpio.h" -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "dpio-drv.h" -+#include "qbman_debug.h" -+ -+#define UNIMPLEMENTED() pr_err("FOO: %s unimplemented!\n", __func__) -+ -+#define MAGIC_SERVICE 0xabcd9876 -+#define MAGIC_OBJECT 0x1234fedc -+ -+struct dpaa2_io { -+ /* If MAGIC_SERVICE, this is a group of objects, use the 'service' part -+ * of the union. If MAGIC_OBJECT, use the 'object' part of the union. If -+ * it's neither, something got corrupted. This is mainly to satisfy -+ * dpaa2_io_from_registration(), which dereferences a caller- -+ * instantiated struct and so warrants a bug-checking step - hence the -+ * magic rather than a boolean. -+ */ -+ unsigned int magic; -+ atomic_t refs; -+ union { -+ struct dpaa2_io_service { -+ spinlock_t lock; -+ struct list_head list; -+ /* for targeted dpaa2_io selection */ -+ struct dpaa2_io *objects_by_cpu[NR_CPUS]; -+ cpumask_t cpus_notifications; -+ cpumask_t cpus_stashing; -+ int has_nonaffine; -+ /* slight hack. record the special case of the -+ * "default service", because that's the case where we -+ * need to avoid a kfree() ... */ -+ int is_defservice; -+ } service; -+ struct dpaa2_io_object { -+ struct dpaa2_io_desc dpio_desc; -+ struct qbman_swp_desc swp_desc; -+ struct qbman_swp *swp; -+ /* If the object is part of a service, this is it (and -+ * 'node' is linked into the service's list) */ -+ struct dpaa2_io *service; -+ struct list_head node; -+ /* Interrupt mask, as used with -+ * qbman_swp_interrupt_[gs]et_vanish(). This isn't -+ * locked, because the higher layer is driving all -+ * "ingress" processing. */ -+ uint32_t irq_mask; -+ /* As part of simplifying assumptions, we provide an -+ * irq-safe lock for each type of DPIO operation that -+ * isn't innately lockless. The selection algorithms -+ * (which are simplified) require this, whereas -+ * eventually adherence to cpu-affinity will presumably -+ * relax the locking requirements. */ -+ spinlock_t lock_mgmt_cmd; -+ spinlock_t lock_notifications; -+ struct list_head notifications; -+ } object; -+ }; -+}; -+ -+struct dpaa2_io_store { -+ unsigned int max; -+ dma_addr_t paddr; -+ struct dpaa2_dq *vaddr; -+ void *alloced_addr; /* the actual return from kmalloc as it may -+ be adjusted for alignment purposes */ -+ unsigned int idx; /* position of the next-to-be-returned entry */ -+ struct qbman_swp *swp; /* portal used to issue VDQCR */ -+ struct device *dev; /* device used for DMA mapping */ -+}; -+ -+static struct dpaa2_io def_serv; -+ -+/**********************/ -+/* Internal functions */ -+/**********************/ -+ -+static void service_init(struct dpaa2_io *d, int is_defservice) -+{ -+ struct dpaa2_io_service *s = &d->service; -+ -+ d->magic = MAGIC_SERVICE; -+ atomic_set(&d->refs, 1); -+ spin_lock_init(&s->lock); -+ INIT_LIST_HEAD(&s->list); -+ cpumask_clear(&s->cpus_notifications); -+ cpumask_clear(&s->cpus_stashing); -+ s->has_nonaffine = 0; -+ s->is_defservice = is_defservice; -+} -+ -+/* Selection algorithms, stupid ones at that. These are to handle the case where -+ * the given dpaa2_io is a service, by choosing the non-service dpaa2_io within -+ * it to use. -+ */ -+static struct dpaa2_io *_service_select_by_cpu_slow(struct dpaa2_io_service *ss, -+ int cpu) -+{ -+ struct dpaa2_io *o; -+ unsigned long irqflags; -+ -+ spin_lock_irqsave(&ss->lock, irqflags); -+ /* TODO: this is about the dumbest and slowest selection algorithm you -+ * could imagine. (We're looking for something working first, and -+ * something efficient second...) -+ */ -+ list_for_each_entry(o, &ss->list, object.node) -+ if (o->object.dpio_desc.cpu == cpu) -+ goto found; -+ -+ /* No joy. Try the first nonaffine portal (bleurgh) */ -+ if (ss->has_nonaffine) -+ list_for_each_entry(o, &ss->list, object.node) -+ if (!o->object.dpio_desc.stash_affinity) -+ goto found; -+ -+ /* No joy. Try the first object. Told you it was horrible. */ -+ if (!list_empty(&ss->list)) -+ o = list_entry(ss->list.next, struct dpaa2_io, object.node); -+ else -+ o = NULL; -+ -+found: -+ spin_unlock_irqrestore(&ss->lock, irqflags); -+ return o; -+} -+ -+static struct dpaa2_io *service_select_by_cpu(struct dpaa2_io *d, int cpu) -+{ -+ struct dpaa2_io_service *ss; -+ unsigned long irqflags; -+ -+ if (!d) -+ d = &def_serv; -+ else if (d->magic == MAGIC_OBJECT) -+ return d; -+ BUG_ON(d->magic != MAGIC_SERVICE); -+ -+ ss = &d->service; -+ -+ /* If cpu==-1, choose the current cpu, with no guarantees about -+ * potentially being migrated away. -+ */ -+ if (unlikely(cpu < 0)) { -+ spin_lock_irqsave(&ss->lock, irqflags); -+ cpu = smp_processor_id(); -+ spin_unlock_irqrestore(&ss->lock, irqflags); -+ -+ return _service_select_by_cpu_slow(ss, cpu); -+ } -+ -+ /* If a specific cpu was requested, pick it up immediately */ -+ return ss->objects_by_cpu[cpu]; -+} -+ -+static inline struct dpaa2_io *service_select_any(struct dpaa2_io *d) -+{ -+ struct dpaa2_io_service *ss; -+ struct dpaa2_io *o; -+ unsigned long irqflags; -+ -+ if (!d) -+ d = &def_serv; -+ else if (d->magic == MAGIC_OBJECT) -+ return d; -+ BUG_ON(d->magic != MAGIC_SERVICE); -+ -+ /* -+ * Lock the service, looking for the first DPIO object in the list, -+ * ignore everything else about that DPIO, and choose it to do the -+ * operation! As a post-selection step, move the DPIO to the end of -+ * the list. It should improve load-balancing a little, although it -+ * might also incur a performance hit, given that the lock is *global* -+ * and this may be called on the fast-path... -+ */ -+ ss = &d->service; -+ spin_lock_irqsave(&ss->lock, irqflags); -+ if (!list_empty(&ss->list)) { -+ o = list_entry(ss->list.next, struct dpaa2_io, object.node); -+ list_del(&o->object.node); -+ list_add_tail(&o->object.node, &ss->list); -+ } else -+ o = NULL; -+ spin_unlock_irqrestore(&ss->lock, irqflags); -+ return o; -+} -+ -+/* If the context is not preemptible, select the service affine to the -+ * current cpu. Otherwise, "select any". -+ */ -+static inline struct dpaa2_io *_service_select(struct dpaa2_io *d) -+{ -+ struct dpaa2_io *temp = d; -+ -+ if (likely(!preemptible())) { -+ d = service_select_by_cpu(d, smp_processor_id()); -+ if (likely(d)) -+ return d; -+ } -+ return service_select_any(temp); -+} -+ -+/**********************/ -+/* Exported functions */ -+/**********************/ -+ -+struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc) -+{ -+ struct dpaa2_io *ret = kmalloc(sizeof(*ret), GFP_KERNEL); -+ struct dpaa2_io_object *o = &ret->object; -+ -+ if (!ret) -+ return NULL; -+ ret->magic = MAGIC_OBJECT; -+ atomic_set(&ret->refs, 1); -+ o->dpio_desc = *desc; -+ o->swp_desc.cena_bar = o->dpio_desc.regs_cena; -+ o->swp_desc.cinh_bar = o->dpio_desc.regs_cinh; -+ o->swp_desc.qman_version = o->dpio_desc.qman_version; -+ o->swp = qbman_swp_init(&o->swp_desc); -+ o->service = NULL; -+ if (!o->swp) { -+ kfree(ret); -+ return NULL; -+ } -+ INIT_LIST_HEAD(&o->node); -+ spin_lock_init(&o->lock_mgmt_cmd); -+ spin_lock_init(&o->lock_notifications); -+ INIT_LIST_HEAD(&o->notifications); -+ if (!o->dpio_desc.has_irq) -+ qbman_swp_interrupt_set_vanish(o->swp, 0xffffffff); -+ else { -+ /* For now only enable DQRR interrupts */ -+ qbman_swp_interrupt_set_trigger(o->swp, -+ QBMAN_SWP_INTERRUPT_DQRI); -+ } -+ qbman_swp_interrupt_clear_status(o->swp, 0xffffffff); -+ if (o->dpio_desc.receives_notifications) -+ qbman_swp_push_set(o->swp, 0, 1); -+ return ret; -+} -+EXPORT_SYMBOL(dpaa2_io_create); -+ -+struct dpaa2_io *dpaa2_io_create_service(void) -+{ -+ struct dpaa2_io *ret = kmalloc(sizeof(*ret), GFP_KERNEL); -+ -+ if (ret) -+ service_init(ret, 0); -+ return ret; -+} -+EXPORT_SYMBOL(dpaa2_io_create_service); -+ -+struct dpaa2_io *dpaa2_io_default_service(void) -+{ -+ atomic_inc(&def_serv.refs); -+ return &def_serv; -+} -+EXPORT_SYMBOL(dpaa2_io_default_service); -+ -+void dpaa2_io_down(struct dpaa2_io *d) -+{ -+ if (!atomic_dec_and_test(&d->refs)) -+ return; -+ if (d->magic == MAGIC_SERVICE) { -+ BUG_ON(!list_empty(&d->service.list)); -+ if (d->service.is_defservice) -+ /* avoid the kfree()! */ -+ return; -+ } else { -+ BUG_ON(d->magic != MAGIC_OBJECT); -+ BUG_ON(d->object.service); -+ BUG_ON(!list_empty(&d->object.notifications)); -+ } -+ kfree(d); -+} -+EXPORT_SYMBOL(dpaa2_io_down); -+ -+int dpaa2_io_service_add(struct dpaa2_io *s, struct dpaa2_io *o) -+{ -+ struct dpaa2_io_service *ss = &s->service; -+ struct dpaa2_io_object *oo = &o->object; -+ int res = -EINVAL; -+ -+ if ((s->magic != MAGIC_SERVICE) || (o->magic != MAGIC_OBJECT)) -+ return res; -+ atomic_inc(&o->refs); -+ atomic_inc(&s->refs); -+ spin_lock(&ss->lock); -+ /* 'obj' must not already be associated with a service */ -+ if (!oo->service) { -+ oo->service = s; -+ list_add(&oo->node, &ss->list); -+ if (oo->dpio_desc.receives_notifications) { -+ cpumask_set_cpu(oo->dpio_desc.cpu, -+ &ss->cpus_notifications); -+ /* Update the fast-access array */ -+ ss->objects_by_cpu[oo->dpio_desc.cpu] = -+ container_of(oo, struct dpaa2_io, object); -+ } -+ if (oo->dpio_desc.stash_affinity) -+ cpumask_set_cpu(oo->dpio_desc.cpu, -+ &ss->cpus_stashing); -+ if (!oo->dpio_desc.stash_affinity) -+ ss->has_nonaffine = 1; -+ /* success */ -+ res = 0; -+ } -+ spin_unlock(&ss->lock); -+ if (res) { -+ dpaa2_io_down(s); -+ dpaa2_io_down(o); -+ } -+ return res; -+} -+EXPORT_SYMBOL(dpaa2_io_service_add); -+ -+int dpaa2_io_get_descriptor(struct dpaa2_io *obj, struct dpaa2_io_desc *desc) -+{ -+ if (obj->magic == MAGIC_SERVICE) -+ return -EINVAL; -+ BUG_ON(obj->magic != MAGIC_OBJECT); -+ *desc = obj->object.dpio_desc; -+ return 0; -+} -+EXPORT_SYMBOL(dpaa2_io_get_descriptor); -+ -+#define DPAA_POLL_MAX 32 -+ -+int dpaa2_io_poll(struct dpaa2_io *obj) -+{ -+ const struct dpaa2_dq *dq; -+ struct qbman_swp *swp; -+ int max = 0; -+ -+ if (obj->magic != MAGIC_OBJECT) -+ return -EINVAL; -+ swp = obj->object.swp; -+ dq = qbman_swp_dqrr_next(swp); -+ while (dq) { -+ if (qbman_result_is_SCN(dq)) { -+ struct dpaa2_io_notification_ctx *ctx; -+ uint64_t q64; -+ -+ q64 = qbman_result_SCN_ctx(dq); -+ ctx = (void *)q64; -+ ctx->cb(ctx); -+ } else -+ pr_crit("Unrecognised/ignored DQRR entry\n"); -+ qbman_swp_dqrr_consume(swp, dq); -+ ++max; -+ if (max > DPAA_POLL_MAX) -+ return 0; -+ dq = qbman_swp_dqrr_next(swp); -+ } -+ return 0; -+} -+EXPORT_SYMBOL(dpaa2_io_poll); -+ -+int dpaa2_io_irq(struct dpaa2_io *obj) -+{ -+ struct qbman_swp *swp; -+ uint32_t status; -+ -+ if (obj->magic != MAGIC_OBJECT) -+ return -EINVAL; -+ swp = obj->object.swp; -+ status = qbman_swp_interrupt_read_status(swp); -+ if (!status) -+ return IRQ_NONE; -+ dpaa2_io_poll(obj); -+ qbman_swp_interrupt_clear_status(swp, status); -+ qbman_swp_interrupt_set_inhibit(swp, 0); -+ return IRQ_HANDLED; -+} -+EXPORT_SYMBOL(dpaa2_io_irq); -+ -+int dpaa2_io_pause_poll(struct dpaa2_io *obj) -+{ -+ UNIMPLEMENTED(); -+ return -EINVAL; -+} -+EXPORT_SYMBOL(dpaa2_io_pause_poll); -+ -+int dpaa2_io_resume_poll(struct dpaa2_io *obj) -+{ -+ UNIMPLEMENTED(); -+ return -EINVAL; -+} -+EXPORT_SYMBOL(dpaa2_io_resume_poll); -+ -+void dpaa2_io_service_notifications(struct dpaa2_io *s, cpumask_t *mask) -+{ -+ struct dpaa2_io_service *ss = &s->service; -+ -+ BUG_ON(s->magic != MAGIC_SERVICE); -+ cpumask_copy(mask, &ss->cpus_notifications); -+} -+EXPORT_SYMBOL(dpaa2_io_service_notifications); -+ -+void dpaa2_io_service_stashing(struct dpaa2_io *s, cpumask_t *mask) -+{ -+ struct dpaa2_io_service *ss = &s->service; -+ -+ BUG_ON(s->magic != MAGIC_SERVICE); -+ cpumask_copy(mask, &ss->cpus_stashing); -+} -+EXPORT_SYMBOL(dpaa2_io_service_stashing); -+ -+int dpaa2_io_service_has_nonaffine(struct dpaa2_io *s) -+{ -+ struct dpaa2_io_service *ss = &s->service; -+ -+ BUG_ON(s->magic != MAGIC_SERVICE); -+ return ss->has_nonaffine; -+} -+EXPORT_SYMBOL(dpaa2_io_service_has_nonaffine); -+ -+int dpaa2_io_service_register(struct dpaa2_io *d, -+ struct dpaa2_io_notification_ctx *ctx) -+{ -+ unsigned long irqflags; -+ -+ d = service_select_by_cpu(d, ctx->desired_cpu); -+ if (!d) -+ return -ENODEV; -+ ctx->dpio_id = d->object.dpio_desc.dpio_id; -+ ctx->qman64 = (uint64_t)ctx; -+ ctx->dpio_private = d; -+ spin_lock_irqsave(&d->object.lock_notifications, irqflags); -+ list_add(&ctx->node, &d->object.notifications); -+ spin_unlock_irqrestore(&d->object.lock_notifications, irqflags); -+ if (ctx->is_cdan) -+ /* Enable the generation of CDAN notifications */ -+ qbman_swp_CDAN_set_context_enable(d->object.swp, -+ (uint16_t)ctx->id, -+ ctx->qman64); -+ return 0; -+} -+EXPORT_SYMBOL(dpaa2_io_service_register); -+ -+int dpaa2_io_service_deregister(struct dpaa2_io *service, -+ struct dpaa2_io_notification_ctx *ctx) -+{ -+ struct dpaa2_io *d = ctx->dpio_private; -+ unsigned long irqflags; -+ -+ if (!service) -+ service = &def_serv; -+ BUG_ON((service != d) && (service != d->object.service)); -+ if (ctx->is_cdan) -+ qbman_swp_CDAN_disable(d->object.swp, -+ (uint16_t)ctx->id); -+ spin_lock_irqsave(&d->object.lock_notifications, irqflags); -+ list_del(&ctx->node); -+ spin_unlock_irqrestore(&d->object.lock_notifications, irqflags); -+ return 0; -+} -+EXPORT_SYMBOL(dpaa2_io_service_deregister); -+ -+int dpaa2_io_service_rearm(struct dpaa2_io *d, -+ struct dpaa2_io_notification_ctx *ctx) -+{ -+ unsigned long irqflags; -+ int err; -+ -+ d = _service_select(d); -+ if (!d) -+ return -ENODEV; -+ spin_lock_irqsave(&d->object.lock_mgmt_cmd, irqflags); -+ if (ctx->is_cdan) -+ err = qbman_swp_CDAN_enable(d->object.swp, (uint16_t)ctx->id); -+ else -+ err = qbman_swp_fq_schedule(d->object.swp, ctx->id); -+ spin_unlock_irqrestore(&d->object.lock_mgmt_cmd, irqflags); -+ return err; -+} -+EXPORT_SYMBOL(dpaa2_io_service_rearm); -+ -+int dpaa2_io_from_registration(struct dpaa2_io_notification_ctx *ctx, -+ struct dpaa2_io **io) -+{ -+ struct dpaa2_io_notification_ctx *tmp; -+ struct dpaa2_io *d = ctx->dpio_private; -+ unsigned long irqflags; -+ int ret = 0; -+ -+ BUG_ON(d->magic != MAGIC_OBJECT); -+ /* Iterate the notifications associated with 'd' looking for a match. If -+ * not, we've been passed an unregistered ctx! */ -+ spin_lock_irqsave(&d->object.lock_notifications, irqflags); -+ list_for_each_entry(tmp, &d->object.notifications, node) -+ if (tmp == ctx) -+ goto found; -+ ret = -EINVAL; -+found: -+ spin_unlock_irqrestore(&d->object.lock_notifications, irqflags); -+ if (!ret) { -+ atomic_inc(&d->refs); -+ *io = d; -+ } -+ return ret; -+} -+EXPORT_SYMBOL(dpaa2_io_from_registration); -+ -+int dpaa2_io_service_get_persistent(struct dpaa2_io *service, int cpu, -+ struct dpaa2_io **ret) -+{ -+ if (cpu == -1) -+ *ret = service_select_any(service); -+ else -+ *ret = service_select_by_cpu(service, cpu); -+ if (*ret) { -+ atomic_inc(&(*ret)->refs); -+ return 0; -+ } -+ return -ENODEV; -+} -+EXPORT_SYMBOL(dpaa2_io_service_get_persistent); -+ -+int dpaa2_io_service_pull_fq(struct dpaa2_io *d, uint32_t fqid, -+ struct dpaa2_io_store *s) -+{ -+ struct qbman_pull_desc pd; -+ int err; -+ -+ qbman_pull_desc_clear(&pd); -+ qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1); -+ qbman_pull_desc_set_numframes(&pd, (uint8_t)s->max); -+ qbman_pull_desc_set_fq(&pd, fqid); -+ d = _service_select(d); -+ if (!d) -+ return -ENODEV; -+ s->swp = d->object.swp; -+ err = qbman_swp_pull(d->object.swp, &pd); -+ if (err) -+ s->swp = NULL; -+ return err; -+} -+EXPORT_SYMBOL(dpaa2_io_service_pull_fq); -+ -+int dpaa2_io_service_pull_channel(struct dpaa2_io *d, uint32_t channelid, -+ struct dpaa2_io_store *s) -+{ -+ struct qbman_pull_desc pd; -+ int err; -+ -+ qbman_pull_desc_clear(&pd); -+ qbman_pull_desc_set_storage(&pd, s->vaddr, s->paddr, 1); -+ qbman_pull_desc_set_numframes(&pd, (uint8_t)s->max); -+ qbman_pull_desc_set_channel(&pd, channelid, qbman_pull_type_prio); -+ d = _service_select(d); -+ if (!d) -+ return -ENODEV; -+ s->swp = d->object.swp; -+ err = qbman_swp_pull(d->object.swp, &pd); -+ if (err) -+ s->swp = NULL; -+ return err; -+} -+EXPORT_SYMBOL(dpaa2_io_service_pull_channel); -+ -+int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d, -+ uint32_t fqid, -+ const struct dpaa2_fd *fd) -+{ -+ struct qbman_eq_desc ed; -+ -+ d = _service_select(d); -+ if (!d) -+ return -ENODEV; -+ qbman_eq_desc_clear(&ed); -+ qbman_eq_desc_set_no_orp(&ed, 0); -+ qbman_eq_desc_set_fq(&ed, fqid); -+ return qbman_swp_enqueue(d->object.swp, &ed, -+ (const struct qbman_fd *)fd); -+} -+EXPORT_SYMBOL(dpaa2_io_service_enqueue_fq); -+ -+int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d, -+ uint32_t qdid, uint8_t prio, uint16_t qdbin, -+ const struct dpaa2_fd *fd) -+{ -+ struct qbman_eq_desc ed; -+ -+ d = _service_select(d); -+ if (!d) -+ return -ENODEV; -+ qbman_eq_desc_clear(&ed); -+ qbman_eq_desc_set_no_orp(&ed, 0); -+ qbman_eq_desc_set_qd(&ed, qdid, qdbin, prio); -+ return qbman_swp_enqueue(d->object.swp, &ed, -+ (const struct qbman_fd *)fd); -+} -+EXPORT_SYMBOL(dpaa2_io_service_enqueue_qd); -+ -+int dpaa2_io_service_release(struct dpaa2_io *d, -+ uint32_t bpid, -+ const uint64_t *buffers, -+ unsigned int num_buffers) -+{ -+ struct qbman_release_desc rd; -+ -+ d = _service_select(d); -+ if (!d) -+ return -ENODEV; -+ qbman_release_desc_clear(&rd); -+ qbman_release_desc_set_bpid(&rd, bpid); -+ return qbman_swp_release(d->object.swp, &rd, buffers, num_buffers); -+} -+EXPORT_SYMBOL(dpaa2_io_service_release); -+ -+int dpaa2_io_service_acquire(struct dpaa2_io *d, -+ uint32_t bpid, -+ uint64_t *buffers, -+ unsigned int num_buffers) -+{ -+ unsigned long irqflags; -+ int err; -+ -+ d = _service_select(d); -+ if (!d) -+ return -ENODEV; -+ spin_lock_irqsave(&d->object.lock_mgmt_cmd, irqflags); -+ err = qbman_swp_acquire(d->object.swp, bpid, buffers, num_buffers); -+ spin_unlock_irqrestore(&d->object.lock_mgmt_cmd, irqflags); -+ return err; -+} -+EXPORT_SYMBOL(dpaa2_io_service_acquire); -+ -+struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames, -+ struct device *dev) -+{ -+ struct dpaa2_io_store *ret = kmalloc(sizeof(*ret), GFP_KERNEL); -+ size_t size; -+ -+ BUG_ON(!max_frames || (max_frames > 16)); -+ if (!ret) -+ return NULL; -+ ret->max = max_frames; -+ size = max_frames * sizeof(struct dpaa2_dq) + 64; -+ ret->alloced_addr = kmalloc(size, GFP_KERNEL); -+ if (!ret->alloced_addr) { -+ kfree(ret); -+ return NULL; -+ } -+ ret->vaddr = PTR_ALIGN(ret->alloced_addr, 64); -+ ret->paddr = dma_map_single(dev, ret->vaddr, -+ sizeof(struct dpaa2_dq) * max_frames, -+ DMA_FROM_DEVICE); -+ if (dma_mapping_error(dev, ret->paddr)) { -+ kfree(ret->alloced_addr); -+ kfree(ret); -+ return NULL; -+ } -+ ret->idx = 0; -+ ret->dev = dev; -+ return ret; -+} -+EXPORT_SYMBOL(dpaa2_io_store_create); -+ -+void dpaa2_io_store_destroy(struct dpaa2_io_store *s) -+{ -+ dma_unmap_single(s->dev, s->paddr, sizeof(struct dpaa2_dq) * s->max, -+ DMA_FROM_DEVICE); -+ kfree(s->alloced_addr); -+ kfree(s); -+} -+EXPORT_SYMBOL(dpaa2_io_store_destroy); -+ -+struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last) -+{ -+ int match; -+ struct dpaa2_dq *ret = &s->vaddr[s->idx]; -+ -+ match = qbman_result_has_new_result(s->swp, ret); -+ if (!match) { -+ *is_last = 0; -+ return NULL; -+ } -+ BUG_ON(!qbman_result_is_DQ(ret)); -+ s->idx++; -+ if (dpaa2_dq_is_pull_complete(ret)) { -+ *is_last = 1; -+ s->idx = 0; -+ /* If we get an empty dequeue result to terminate a zero-results -+ * vdqcr, return NULL to the caller rather than expecting him to -+ * check non-NULL results every time. */ -+ if (!(dpaa2_dq_flags(ret) & DPAA2_DQ_STAT_VALIDFRAME)) -+ ret = NULL; -+ } else -+ *is_last = 0; -+ return ret; -+} -+EXPORT_SYMBOL(dpaa2_io_store_next); -+ -+#ifdef CONFIG_FSL_QBMAN_DEBUG -+int dpaa2_io_query_fq_count(struct dpaa2_io *d, uint32_t fqid, -+ uint32_t *fcnt, uint32_t *bcnt) -+{ -+ struct qbman_attr state; -+ struct qbman_swp *swp; -+ unsigned long irqflags; -+ int ret; -+ -+ d = service_select_any(d); -+ if (!d) -+ return -ENODEV; -+ -+ swp = d->object.swp; -+ spin_lock_irqsave(&d->object.lock_mgmt_cmd, irqflags); -+ ret = qbman_fq_query_state(swp, fqid, &state); -+ spin_unlock_irqrestore(&d->object.lock_mgmt_cmd, irqflags); -+ if (ret) -+ return ret; -+ *fcnt = qbman_fq_state_frame_count(&state); -+ *bcnt = qbman_fq_state_byte_count(&state); -+ -+ return 0; -+} -+EXPORT_SYMBOL(dpaa2_io_query_fq_count); -+ -+int dpaa2_io_query_bp_count(struct dpaa2_io *d, uint32_t bpid, -+ uint32_t *num) -+{ -+ struct qbman_attr state; -+ struct qbman_swp *swp; -+ unsigned long irqflags; -+ int ret; -+ -+ d = service_select_any(d); -+ if (!d) -+ return -ENODEV; -+ -+ swp = d->object.swp; -+ spin_lock_irqsave(&d->object.lock_mgmt_cmd, irqflags); -+ ret = qbman_bp_query(swp, bpid, &state); -+ spin_unlock_irqrestore(&d->object.lock_mgmt_cmd, irqflags); -+ if (ret) -+ return ret; -+ *num = qbman_bp_info_num_free_bufs(&state); -+ return 0; -+} -+EXPORT_SYMBOL(dpaa2_io_query_bp_count); -+ -+#endif -+ -+/* module init/exit hooks called from dpio-drv.c. These are declared in -+ * dpio-drv.h. -+ */ -+int dpaa2_io_service_driver_init(void) -+{ -+ service_init(&def_serv, 1); -+ return 0; -+} -+ -+void dpaa2_io_service_driver_exit(void) -+{ -+ if (atomic_read(&def_serv.refs) != 1) -+ pr_err("default DPIO service leaves dangling DPIO objects!\n"); -+} -diff --git a/drivers/staging/fsl-mc/bus/dpio/fsl_dpio.h b/drivers/staging/fsl-mc/bus/dpio/fsl_dpio.h -new file mode 100644 -index 0000000..88a492f ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/fsl_dpio.h -@@ -0,0 +1,460 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPIO_H -+#define __FSL_DPIO_H -+ -+/* Data Path I/O Portal API -+ * Contains initialization APIs and runtime control APIs for DPIO -+ */ -+ -+struct fsl_mc_io; -+ -+/** -+ * dpio_open() - Open a control session for the specified object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dpio_id: DPIO unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpio_create() function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpio_id, -+ uint16_t *token); -+ -+/** -+ * dpio_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * enum dpio_channel_mode - DPIO notification channel mode -+ * @DPIO_NO_CHANNEL: No support for notification channel -+ * @DPIO_LOCAL_CHANNEL: Notifications on data availability can be received by a -+ * dedicated channel in the DPIO; user should point the queue's -+ * destination in the relevant interface to this DPIO -+ */ -+enum dpio_channel_mode { -+ DPIO_NO_CHANNEL = 0, -+ DPIO_LOCAL_CHANNEL = 1, -+}; -+ -+/** -+ * struct dpio_cfg - Structure representing DPIO configuration -+ * @channel_mode: Notification channel mode -+ * @num_priorities: Number of priorities for the notification channel (1-8); -+ * relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL' -+ */ -+struct dpio_cfg { -+ enum dpio_channel_mode channel_mode; -+ uint8_t num_priorities; -+}; -+ -+/** -+ * dpio_create() - Create the DPIO object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPIO object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpio_open() function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpio_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpio_destroy() - Destroy the DPIO object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * -+ * Return: '0' on Success; Error code otherwise -+ */ -+int dpio_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpio_enable() - Enable the DPIO, allow I/O portal operations. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * -+ * Return: '0' on Success; Error code otherwise -+ */ -+int dpio_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpio_disable() - Disable the DPIO, stop any I/O portal operation. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * -+ * Return: '0' on Success; Error code otherwise -+ */ -+int dpio_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpio_is_enabled() - Check if the DPIO is enabled. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @en: Returns '1' if object is enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpio_reset() - Reset the DPIO, returns the object to initial state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpio_set_stashing_destination() - Set the stashing destination. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @sdest: stashing destination value -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_set_stashing_destination(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t sdest); -+ -+/** -+ * dpio_get_stashing_destination() - Get the stashing destination.. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @sdest: Returns the stashing destination value -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_get_stashing_destination(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t *sdest); -+ -+/** -+ * dpio_add_static_dequeue_channel() - Add a static dequeue channel. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @dpcon_id: DPCON object ID -+ * @channel_index: Returned channel index to be used in qbman API -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_add_static_dequeue_channel(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpcon_id, -+ uint8_t *channel_index); -+ -+/** -+ * dpio_remove_static_dequeue_channel() - Remove a static dequeue channel. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @dpcon_id: DPCON object ID -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_remove_static_dequeue_channel(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int dpcon_id); -+ -+/** -+ * DPIO IRQ Index and Events -+ */ -+ -+/** -+ * Irq software-portal index -+ */ -+#define DPIO_IRQ_SWP_INDEX 0 -+ -+/** -+ * struct dpio_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dpio_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dpio_set_irq() - Set IRQ information for the DPIO to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpio_irq_cfg *irq_cfg); -+ -+/** -+ * dpio_get_irq() - Get IRQ information from the DPIO. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpio_irq_cfg *irq_cfg); -+ -+/** -+ * dpio_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpio_get_irq_enable() - Get overall interrupt state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpio_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @irq_index: The interrupt index to configure -+ * @mask: event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpio_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpio_get_irq_status() - Get the current status of any pending interrupts. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dpio_clear_irq_status() - Clear a pending interrupt's status -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @irq_index: The interrupt index to configure -+ * @status: bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpio_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+ -+/** -+ * struct dpio_attr - Structure representing DPIO attributes -+ * @id: DPIO object ID -+ * @version: DPIO version -+ * @qbman_portal_ce_offset: offset of the software portal cache-enabled area -+ * @qbman_portal_ci_offset: offset of the software portal cache-inhibited area -+ * @qbman_portal_id: Software portal ID -+ * @channel_mode: Notification channel mode -+ * @num_priorities: Number of priorities for the notification channel (1-8); -+ * relevant only if 'channel_mode = DPIO_LOCAL_CHANNEL' -+ * @qbman_version: QBMAN version -+ */ -+struct dpio_attr { -+ int id; -+ /** -+ * struct version - DPIO version -+ * @major: DPIO major version -+ * @minor: DPIO minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+ uint64_t qbman_portal_ce_offset; -+ uint64_t qbman_portal_ci_offset; -+ uint16_t qbman_portal_id; -+ enum dpio_channel_mode channel_mode; -+ uint8_t num_priorities; -+ uint32_t qbman_version; -+}; -+ -+/** -+ * dpio_get_attributes() - Retrieve DPIO attributes -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPIO object -+ * @attr: Returned object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise -+ */ -+int dpio_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpio_attr *attr); -+#endif /* __FSL_DPIO_H */ -diff --git a/drivers/staging/fsl-mc/bus/dpio/fsl_dpio_cmd.h b/drivers/staging/fsl-mc/bus/dpio/fsl_dpio_cmd.h -new file mode 100644 -index 0000000..f339cd6 ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/fsl_dpio_cmd.h -@@ -0,0 +1,184 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPIO_CMD_H -+#define _FSL_DPIO_CMD_H -+ -+/* DPIO Version */ -+#define DPIO_VER_MAJOR 3 -+#define DPIO_VER_MINOR 2 -+ -+/* Command IDs */ -+#define DPIO_CMDID_CLOSE 0x800 -+#define DPIO_CMDID_OPEN 0x803 -+#define DPIO_CMDID_CREATE 0x903 -+#define DPIO_CMDID_DESTROY 0x900 -+ -+#define DPIO_CMDID_ENABLE 0x002 -+#define DPIO_CMDID_DISABLE 0x003 -+#define DPIO_CMDID_GET_ATTR 0x004 -+#define DPIO_CMDID_RESET 0x005 -+#define DPIO_CMDID_IS_ENABLED 0x006 -+ -+#define DPIO_CMDID_SET_IRQ 0x010 -+#define DPIO_CMDID_GET_IRQ 0x011 -+#define DPIO_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPIO_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPIO_CMDID_SET_IRQ_MASK 0x014 -+#define DPIO_CMDID_GET_IRQ_MASK 0x015 -+#define DPIO_CMDID_GET_IRQ_STATUS 0x016 -+#define DPIO_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPIO_CMDID_SET_STASHING_DEST 0x120 -+#define DPIO_CMDID_GET_STASHING_DEST 0x121 -+#define DPIO_CMDID_ADD_STATIC_DEQUEUE_CHANNEL 0x122 -+#define DPIO_CMDID_REMOVE_STATIC_DEQUEUE_CHANNEL 0x123 -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_OPEN(cmd, dpio_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpio_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_CREATE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 16, 2, enum dpio_channel_mode, \ -+ cfg->channel_mode);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->num_priorities);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_RSP_IS_ENABLED(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_RSP_GET_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr); \ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_RSP_GET_IRQ_ENABLE(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_RSP_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id);\ -+ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->qbman_portal_id);\ -+ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, attr->num_priorities);\ -+ MC_RSP_OP(cmd, 0, 56, 4, enum dpio_channel_mode, attr->channel_mode);\ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, attr->qbman_portal_ce_offset);\ -+ MC_RSP_OP(cmd, 2, 0, 64, uint64_t, attr->qbman_portal_ci_offset);\ -+ MC_RSP_OP(cmd, 3, 0, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 3, 16, 16, uint16_t, attr->version.minor);\ -+ MC_RSP_OP(cmd, 3, 32, 32, uint32_t, attr->qbman_version);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_SET_STASHING_DEST(cmd, sdest) \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, sdest) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_RSP_GET_STASHING_DEST(cmd, sdest) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, sdest) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_ADD_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpcon_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_RSP_ADD_STATIC_DEQUEUE_CHANNEL(cmd, channel_index) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, channel_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPIO_CMD_REMOVE_STATIC_DEQUEUE_CHANNEL(cmd, dpcon_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpcon_id) -+#endif /* _FSL_DPIO_CMD_H */ -diff --git a/drivers/staging/fsl-mc/bus/dpio/fsl_qbman_base.h b/drivers/staging/fsl-mc/bus/dpio/fsl_qbman_base.h -new file mode 100644 -index 0000000..2874ff8 ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/fsl_qbman_base.h -@@ -0,0 +1,123 @@ -+/* Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_QBMAN_BASE_H -+#define _FSL_QBMAN_BASE_H -+ -+/** -+ * struct qbman_block_desc - qbman block descriptor structure -+ * -+ * Descriptor for a QBMan instance on the SoC. On partitions/targets that do not -+ * control this QBMan instance, these values may simply be place-holders. The -+ * idea is simply that we be able to distinguish between them, eg. so that SWP -+ * descriptors can identify which QBMan instance they belong to. -+ */ -+struct qbman_block_desc { -+ void *ccsr_reg_bar; /* CCSR register map */ -+ int irq_rerr; /* Recoverable error interrupt line */ -+ int irq_nrerr; /* Non-recoverable error interrupt line */ -+}; -+ -+/** -+ * struct qbman_swp_desc - qbman software portal descriptor structure -+ * -+ * Descriptor for a QBMan software portal, expressed in terms that make sense to -+ * the user context. Ie. on MC, this information is likely to be true-physical, -+ * and instantiated statically at compile-time. On GPP, this information is -+ * likely to be obtained via "discovery" over a partition's "layerscape bus" -+ * (ie. in response to a MC portal command), and would take into account any -+ * virtualisation of the GPP user's address space and/or interrupt numbering. -+ */ -+struct qbman_swp_desc { -+ const struct qbman_block_desc *block; /* The QBMan instance */ -+ void *cena_bar; /* Cache-enabled portal register map */ -+ void *cinh_bar; /* Cache-inhibited portal register map */ -+ uint32_t qman_version; -+}; -+ -+/* Driver object for managing a QBMan portal */ -+struct qbman_swp; -+ -+/** -+ * struct qbman_fd - basci structure for qbman frame descriptor -+ * -+ * Place-holder for FDs, we represent it via the simplest form that we need for -+ * now. Different overlays may be needed to support different options, etc. (It -+ * is impractical to define One True Struct, because the resulting encoding -+ * routines (lots of read-modify-writes) would be worst-case performance whether -+ * or not circumstances required them.) -+ * -+ * Note, as with all data-structures exchanged between software and hardware (be -+ * they located in the portal register map or DMA'd to and from main-memory), -+ * the driver ensures that the caller of the driver API sees the data-structures -+ * in host-endianness. "struct qbman_fd" is no exception. The 32-bit words -+ * contained within this structure are represented in host-endianness, even if -+ * hardware always treats them as little-endian. As such, if any of these fields -+ * are interpreted in a binary (rather than numerical) fashion by hardware -+ * blocks (eg. accelerators), then the user should be careful. We illustrate -+ * with an example; -+ * -+ * Suppose the desired behaviour of an accelerator is controlled by the "frc" -+ * field of the FDs that are sent to it. Suppose also that the behaviour desired -+ * by the user corresponds to an "frc" value which is expressed as the literal -+ * sequence of bytes 0xfe, 0xed, 0xab, and 0xba. So "frc" should be the 32-bit -+ * value in which 0xfe is the first byte and 0xba is the last byte, and as -+ * hardware is little-endian, this amounts to a 32-bit "value" of 0xbaabedfe. If -+ * the software is little-endian also, this can simply be achieved by setting -+ * frc=0xbaabedfe. On the other hand, if software is big-endian, it should set -+ * frc=0xfeedabba! The best away of avoiding trouble with this sort of thing is -+ * to treat the 32-bit words as numerical values, in which the offset of a field -+ * from the beginning of the first byte (as required or generated by hardware) -+ * is numerically encoded by a left-shift (ie. by raising the field to a -+ * corresponding power of 2). Ie. in the current example, software could set -+ * "frc" in the following way, and it would work correctly on both little-endian -+ * and big-endian operation; -+ * fd.frc = (0xfe << 0) | (0xed << 8) | (0xab << 16) | (0xba << 24); -+ */ -+struct qbman_fd { -+ union { -+ uint32_t words[8]; -+ struct qbman_fd_simple { -+ uint32_t addr_lo; -+ uint32_t addr_hi; -+ uint32_t len; -+ /* offset in the MS 16 bits, BPID in the LS 16 bits */ -+ uint32_t bpid_offset; -+ uint32_t frc; /* frame context */ -+ /* "err", "va", "cbmt", "asal", [...] */ -+ uint32_t ctrl; -+ /* flow context */ -+ uint32_t flc_lo; -+ uint32_t flc_hi; -+ } simple; -+ }; -+}; -+ -+#endif /* !_FSL_QBMAN_BASE_H */ -diff --git a/drivers/staging/fsl-mc/bus/dpio/fsl_qbman_portal.h b/drivers/staging/fsl-mc/bus/dpio/fsl_qbman_portal.h -new file mode 100644 -index 0000000..c9e543e ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/fsl_qbman_portal.h -@@ -0,0 +1,753 @@ -+/* Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_QBMAN_PORTAL_H -+#define _FSL_QBMAN_PORTAL_H -+ -+#include "fsl_qbman_base.h" -+ -+/** -+ * qbman_swp_init() - Create a functional object representing the given -+ * QBMan portal descriptor. -+ * @d: the given qbman swp descriptor -+ * -+ * Return qbman_swp portal object for success, NULL if the object cannot -+ * be created. -+ */ -+struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d); -+/** -+ * qbman_swp_finish() - Create and destroy a functional object representing -+ * the given QBMan portal descriptor. -+ * @p: the qbman_swp object to be destroyed. -+ * -+ */ -+void qbman_swp_finish(struct qbman_swp *p); -+ -+/** -+ * qbman_swp_get_desc() - Get the descriptor of the given portal object. -+ * @p: the given portal object. -+ * -+ * Return the descriptor for this portal. -+ */ -+const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p); -+ -+ /**************/ -+ /* Interrupts */ -+ /**************/ -+ -+/* See the QBMan driver API documentation for details on the interrupt -+ * mechanisms. */ -+#define QBMAN_SWP_INTERRUPT_EQRI ((uint32_t)0x00000001) -+#define QBMAN_SWP_INTERRUPT_EQDI ((uint32_t)0x00000002) -+#define QBMAN_SWP_INTERRUPT_DQRI ((uint32_t)0x00000004) -+#define QBMAN_SWP_INTERRUPT_RCRI ((uint32_t)0x00000008) -+#define QBMAN_SWP_INTERRUPT_RCDI ((uint32_t)0x00000010) -+#define QBMAN_SWP_INTERRUPT_VDCI ((uint32_t)0x00000020) -+ -+/** -+ * qbman_swp_interrupt_get_vanish() -+ * qbman_swp_interrupt_set_vanish() - Get/Set the data in software portal -+ * interrupt status disable register. -+ * @p: the given software portal object. -+ * @mask: The mask to set in SWP_IDSR register. -+ * -+ * Return the settings in SWP_ISDR register for Get function. -+ */ -+uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p); -+void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask); -+ -+/** -+ * qbman_swp_interrupt_read_status() -+ * qbman_swp_interrupt_clear_status() - Get/Set the data in software portal -+ * interrupt status register. -+ * @p: the given software portal object. -+ * @mask: The mask to set in SWP_ISR register. -+ * -+ * Return the settings in SWP_ISR register for Get function. -+ * -+ */ -+uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p); -+void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask); -+ -+/** -+ * qbman_swp_interrupt_get_trigger() -+ * qbman_swp_interrupt_set_trigger() - Get/Set the data in software portal -+ * interrupt enable register. -+ * @p: the given software portal object. -+ * @mask: The mask to set in SWP_IER register. -+ * -+ * Return the settings in SWP_IER register for Get function. -+ */ -+uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p); -+void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask); -+ -+/** -+ * qbman_swp_interrupt_get_inhibit() -+ * qbman_swp_interrupt_set_inhibit() - Set/Set the data in software portal -+ * interrupt inhibit register. -+ * @p: the given software portal object. -+ * @mask: The mask to set in SWP_IIR register. -+ * -+ * Return the settings in SWP_IIR register for Get function. -+ */ -+int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p); -+void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit); -+ -+ /************/ -+ /* Dequeues */ -+ /************/ -+ -+/* See the QBMan driver API documentation for details on the enqueue -+ * mechanisms. NB: the use of a 'dpaa2_' prefix for this type is because it is -+ * primarily used by the "DPIO" layer that sits above (and hides) the QBMan -+ * driver. The structure is defined in the DPIO interface, but to avoid circular -+ * dependencies we just pre/re-declare it here opaquely. */ -+struct dpaa2_dq; -+ -+/* ------------------- */ -+/* Push-mode dequeuing */ -+/* ------------------- */ -+ -+/** -+ * qbman_swp_push_get() - Get the push dequeue setup. -+ * @p: the software portal object. -+ * @channel_idx: the channel index to query. -+ * @enabled: returned boolean to show whether the push dequeue is enabled for -+ * the given channel. -+ */ -+void qbman_swp_push_get(struct qbman_swp *, uint8_t channel_idx, int *enabled); -+/** -+ * qbman_swp_push_set() - Enable or disable push dequeue. -+ * @p: the software portal object. -+ * @channel_idx: the channel index.. -+ * @enable: enable or disable push dequeue. -+ * -+ * The user of a portal can enable and disable push-mode dequeuing of up to 16 -+ * channels independently. It does not specify this toggling by channel IDs, but -+ * rather by specifying the index (from 0 to 15) that has been mapped to the -+ * desired channel. -+ */ -+void qbman_swp_push_set(struct qbman_swp *, uint8_t channel_idx, int enable); -+ -+/* ------------------- */ -+/* Pull-mode dequeuing */ -+/* ------------------- */ -+ -+/** -+ * struct qbman_pull_desc - the structure for pull dequeue descriptor -+ */ -+struct qbman_pull_desc { -+ uint32_t dont_manipulate_directly[6]; -+}; -+ -+enum qbman_pull_type_e { -+ /* dequeue with priority precedence, respect intra-class scheduling */ -+ qbman_pull_type_prio = 1, -+ /* dequeue with active FQ precedence, respect ICS */ -+ qbman_pull_type_active, -+ /* dequeue with active FQ precedence, no ICS */ -+ qbman_pull_type_active_noics -+}; -+ -+/** -+ * qbman_pull_desc_clear() - Clear the contents of a descriptor to -+ * default/starting state. -+ * @d: the pull dequeue descriptor to be cleared. -+ */ -+void qbman_pull_desc_clear(struct qbman_pull_desc *d); -+ -+/** -+ * qbman_pull_desc_set_storage()- Set the pull dequeue storage -+ * @d: the pull dequeue descriptor to be set. -+ * @storage: the pointer of the memory to store the dequeue result. -+ * @storage_phys: the physical address of the storage memory. -+ * @stash: to indicate whether write allocate is enabled. -+ * -+ * If not called, or if called with 'storage' as NULL, the result pull dequeues -+ * will produce results to DQRR. If 'storage' is non-NULL, then results are -+ * produced to the given memory location (using the physical/DMA address which -+ * the caller provides in 'storage_phys'), and 'stash' controls whether or not -+ * those writes to main-memory express a cache-warming attribute. -+ */ -+void qbman_pull_desc_set_storage(struct qbman_pull_desc *d, -+ struct dpaa2_dq *storage, -+ dma_addr_t storage_phys, -+ int stash); -+/** -+ * qbman_pull_desc_set_numframes() - Set the number of frames to be dequeued. -+ * @d: the pull dequeue descriptor to be set. -+ * @numframes: number of frames to be set, must be between 1 and 16, inclusive. -+ */ -+void qbman_pull_desc_set_numframes(struct qbman_pull_desc *, uint8_t numframes); -+ -+/** -+ * qbman_pull_desc_set_fq() - Set fqid from which the dequeue command dequeues. -+ * @fqid: the frame queue index of the given FQ. -+ * -+ * qbman_pull_desc_set_wq() - Set wqid from which the dequeue command dequeues. -+ * @wqid: composed of channel id and wqid within the channel. -+ * @dct: the dequeue command type. -+ * -+ * qbman_pull_desc_set_channel() - Set channelid from which the dequeue command -+ * dequeues. -+ * @chid: the channel id to be dequeued. -+ * @dct: the dequeue command type. -+ * -+ * Exactly one of the following descriptor "actions" should be set. (Calling any -+ * one of these will replace the effect of any prior call to one of these.) -+ * - pull dequeue from the given frame queue (FQ) -+ * - pull dequeue from any FQ in the given work queue (WQ) -+ * - pull dequeue from any FQ in any WQ in the given channel -+ */ -+void qbman_pull_desc_set_fq(struct qbman_pull_desc *, uint32_t fqid); -+void qbman_pull_desc_set_wq(struct qbman_pull_desc *, uint32_t wqid, -+ enum qbman_pull_type_e dct); -+void qbman_pull_desc_set_channel(struct qbman_pull_desc *, uint32_t chid, -+ enum qbman_pull_type_e dct); -+ -+/** -+ * qbman_swp_pull() - Issue the pull dequeue command -+ * @s: the software portal object. -+ * @d: the software portal descriptor which has been configured with -+ * the set of qbman_pull_desc_set_*() calls. -+ * -+ * Return 0 for success, and -EBUSY if the software portal is not ready -+ * to do pull dequeue. -+ */ -+int qbman_swp_pull(struct qbman_swp *, struct qbman_pull_desc *d); -+ -+/* -------------------------------- */ -+/* Polling DQRR for dequeue results */ -+/* -------------------------------- */ -+ -+/** -+ * qbman_swp_dqrr_next() - Get an valid DQRR entry. -+ * @s: the software portal object. -+ * -+ * Return NULL if there are no unconsumed DQRR entries. Return a DQRR entry -+ * only once, so repeated calls can return a sequence of DQRR entries, without -+ * requiring they be consumed immediately or in any particular order. -+ */ -+const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s); -+ -+/** -+ * qbman_swp_dqrr_consume() - Consume DQRR entries previously returned from -+ * qbman_swp_dqrr_next(). -+ * @s: the software portal object. -+ * @dq: the DQRR entry to be consumed. -+ */ -+void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq); -+ -+/* ------------------------------------------------- */ -+/* Polling user-provided storage for dequeue results */ -+/* ------------------------------------------------- */ -+/** -+ * qbman_result_has_new_result() - Check and get the dequeue response from the -+ * dq storage memory set in pull dequeue command -+ * @s: the software portal object. -+ * @dq: the dequeue result read from the memory. -+ * -+ * Only used for user-provided storage of dequeue results, not DQRR. For -+ * efficiency purposes, the driver will perform any required endianness -+ * conversion to ensure that the user's dequeue result storage is in host-endian -+ * format (whether or not that is the same as the little-endian format that -+ * hardware DMA'd to the user's storage). As such, once the user has called -+ * qbman_result_has_new_result() and been returned a valid dequeue result, -+ * they should not call it again on the same memory location (except of course -+ * if another dequeue command has been executed to produce a new result to that -+ * location). -+ * -+ * Return 1 for getting a valid dequeue result, or 0 for not getting a valid -+ * dequeue result. -+ */ -+int qbman_result_has_new_result(struct qbman_swp *, -+ const struct dpaa2_dq *); -+ -+/* -------------------------------------------------------- */ -+/* Parsing dequeue entries (DQRR and user-provided storage) */ -+/* -------------------------------------------------------- */ -+ -+/** -+ * qbman_result_is_DQ() - check the dequeue result is a dequeue response or not -+ * @dq: the dequeue result to be checked. -+ * -+ * DQRR entries may contain non-dequeue results, ie. notifications -+ */ -+int qbman_result_is_DQ(const struct dpaa2_dq *); -+ -+/** -+ * qbman_result_is_SCN() - Check the dequeue result is notification or not -+ * @dq: the dequeue result to be checked. -+ * -+ * All the non-dequeue results (FQDAN/CDAN/CSCN/...) are "state change -+ * notifications" of one type or another. Some APIs apply to all of them, of the -+ * form qbman_result_SCN_***(). -+ */ -+static inline int qbman_result_is_SCN(const struct dpaa2_dq *dq) -+{ -+ return !qbman_result_is_DQ(dq); -+} -+ -+/** -+ * Recognise different notification types, only required if the user allows for -+ * these to occur, and cares about them when they do. -+ */ -+int qbman_result_is_FQDAN(const struct dpaa2_dq *); -+ /* FQ Data Availability */ -+int qbman_result_is_CDAN(const struct dpaa2_dq *); -+ /* Channel Data Availability */ -+int qbman_result_is_CSCN(const struct dpaa2_dq *); -+ /* Congestion State Change */ -+int qbman_result_is_BPSCN(const struct dpaa2_dq *); -+ /* Buffer Pool State Change */ -+int qbman_result_is_CGCU(const struct dpaa2_dq *); -+ /* Congestion Group Count Update */ -+/* Frame queue state change notifications; (FQDAN in theory counts too as it -+ * leaves a FQ parked, but it is primarily a data availability notification) */ -+int qbman_result_is_FQRN(const struct dpaa2_dq *); /* Retirement */ -+int qbman_result_is_FQRNI(const struct dpaa2_dq *); -+ /* Retirement Immediate */ -+int qbman_result_is_FQPN(const struct dpaa2_dq *); /* Park */ -+ -+/* NB: for parsing dequeue results (when "is_DQ" is TRUE), use the higher-layer -+ * dpaa2_dq_*() functions. */ -+ -+/* State-change notifications (FQDAN/CDAN/CSCN/...). */ -+/** -+ * qbman_result_SCN_state() - Get the state field in State-change notification -+ */ -+uint8_t qbman_result_SCN_state(const struct dpaa2_dq *); -+/** -+ * qbman_result_SCN_rid() - Get the resource id in State-change notification -+ */ -+uint32_t qbman_result_SCN_rid(const struct dpaa2_dq *); -+/** -+ * qbman_result_SCN_ctx() - Get the context data in State-change notification -+ */ -+uint64_t qbman_result_SCN_ctx(const struct dpaa2_dq *); -+/** -+ * qbman_result_SCN_state_in_mem() - Get the state field in State-change -+ * notification which is written to memory instead of DQRR. -+ */ -+uint8_t qbman_result_SCN_state_in_mem(const struct dpaa2_dq *); -+/** -+ * qbman_result_SCN_rid_in_mem() - Get the resource id in State-change -+ * notification which is written to memory instead of DQRR. -+ */ -+uint32_t qbman_result_SCN_rid_in_mem(const struct dpaa2_dq *); -+ -+/* Type-specific "resource IDs". Mainly for illustration purposes, though it -+ * also gives the appropriate type widths. */ -+#define qbman_result_FQDAN_fqid(dq) qbman_result_SCN_rid(dq) -+#define qbman_result_FQRN_fqid(dq) qbman_result_SCN_rid(dq) -+#define qbman_result_FQRNI_fqid(dq) qbman_result_SCN_rid(dq) -+#define qbman_result_FQPN_fqid(dq) qbman_result_SCN_rid(dq) -+#define qbman_result_CDAN_cid(dq) ((uint16_t)qbman_result_SCN_rid(dq)) -+#define qbman_result_CSCN_cgid(dq) ((uint16_t)qbman_result_SCN_rid(dq)) -+ -+/** -+ * qbman_result_bpscn_bpid() - Get the bpid from BPSCN -+ * -+ * Return the buffer pool id. -+ */ -+uint16_t qbman_result_bpscn_bpid(const struct dpaa2_dq *); -+/** -+ * qbman_result_bpscn_has_free_bufs() - Check whether there are free -+ * buffers in the pool from BPSCN. -+ * -+ * Return the number of free buffers. -+ */ -+int qbman_result_bpscn_has_free_bufs(const struct dpaa2_dq *); -+/** -+ * qbman_result_bpscn_is_depleted() - Check BPSCN to see whether the -+ * buffer pool is depleted. -+ * -+ * Return the status of buffer pool depletion. -+ */ -+int qbman_result_bpscn_is_depleted(const struct dpaa2_dq *); -+/** -+ * qbman_result_bpscn_is_surplus() - Check BPSCN to see whether the buffer -+ * pool is surplus or not. -+ * -+ * Return the status of buffer pool surplus. -+ */ -+int qbman_result_bpscn_is_surplus(const struct dpaa2_dq *); -+/** -+ * qbman_result_bpscn_ctx() - Get the BPSCN CTX from BPSCN message -+ * -+ * Return the BPSCN context. -+ */ -+uint64_t qbman_result_bpscn_ctx(const struct dpaa2_dq *); -+ -+/* Parsing CGCU */ -+/** -+ * qbman_result_cgcu_cgid() - Check CGCU resouce id, i.e. cgid -+ * -+ * Return the CGCU resource id. -+ */ -+uint16_t qbman_result_cgcu_cgid(const struct dpaa2_dq *); -+/** -+ * qbman_result_cgcu_icnt() - Get the I_CNT from CGCU -+ * -+ * Return instantaneous count in the CGCU notification. -+ */ -+uint64_t qbman_result_cgcu_icnt(const struct dpaa2_dq *); -+ -+ /************/ -+ /* Enqueues */ -+ /************/ -+/** -+ * struct qbman_eq_desc - structure of enqueue descriptor -+ */ -+struct qbman_eq_desc { -+ uint32_t dont_manipulate_directly[8]; -+}; -+ -+/** -+ * struct qbman_eq_response - structure of enqueue response -+ */ -+struct qbman_eq_response { -+ uint32_t dont_manipulate_directly[16]; -+}; -+ -+/** -+ * qbman_eq_desc_clear() - Clear the contents of a descriptor to -+ * default/starting state. -+ */ -+void qbman_eq_desc_clear(struct qbman_eq_desc *); -+ -+/* Exactly one of the following descriptor "actions" should be set. (Calling -+ * any one of these will replace the effect of any prior call to one of these.) -+ * - enqueue without order-restoration -+ * - enqueue with order-restoration -+ * - fill a hole in the order-restoration sequence, without any enqueue -+ * - advance NESN (Next Expected Sequence Number), without any enqueue -+ * 'respond_success' indicates whether an enqueue response should be DMA'd -+ * after success (otherwise a response is DMA'd only after failure). -+ * 'incomplete' indicates that other fragments of the same 'seqnum' are yet to -+ * be enqueued. -+ */ -+/** -+ * qbman_eq_desc_set_no_orp() - Set enqueue descriptor without orp -+ * @d: the enqueue descriptor. -+ * @response_success: 1 = enqueue with response always; 0 = enqueue with -+ * rejections returned on a FQ. -+ */ -+void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success); -+ -+/** -+ * qbman_eq_desc_set_orp() - Set order-resotration in the enqueue descriptor -+ * @d: the enqueue descriptor. -+ * @response_success: 1 = enqueue with response always; 0 = enqueue with -+ * rejections returned on a FQ. -+ * @opr_id: the order point record id. -+ * @seqnum: the order restoration sequence number. -+ * @incomplete: indiates whether this is the last fragments using the same -+ * sequeue number. -+ */ -+void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success, -+ uint32_t opr_id, uint32_t seqnum, int incomplete); -+ -+/** -+ * qbman_eq_desc_set_orp_hole() - fill a hole in the order-restoration sequence -+ * without any enqueue -+ * @d: the enqueue descriptor. -+ * @opr_id: the order point record id. -+ * @seqnum: the order restoration sequence number. -+ */ -+void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint32_t opr_id, -+ uint32_t seqnum); -+ -+/** -+ * qbman_eq_desc_set_orp_nesn() - advance NESN (Next Expected Sequence Number) -+ * without any enqueue -+ * @d: the enqueue descriptor. -+ * @opr_id: the order point record id. -+ * @seqnum: the order restoration sequence number. -+ */ -+void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint32_t opr_id, -+ uint32_t seqnum); -+ -+/** -+ * qbman_eq_desc_set_response() - Set the enqueue response info. -+ * @d: the enqueue descriptor -+ * @storage_phys: the physical address of the enqueue response in memory. -+ * @stash: indicate that the write allocation enabled or not. -+ * -+ * In the case where an enqueue response is DMA'd, this determines where that -+ * response should go. (The physical/DMA address is given for hardware's -+ * benefit, but software should interpret it as a "struct qbman_eq_response" -+ * data structure.) 'stash' controls whether or not the write to main-memory -+ * expresses a cache-warming attribute. -+ */ -+void qbman_eq_desc_set_response(struct qbman_eq_desc *d, -+ dma_addr_t storage_phys, -+ int stash); -+/** -+ * qbman_eq_desc_set_token() - Set token for the enqueue command -+ * @d: the enqueue descriptor -+ * @token: the token to be set. -+ * -+ * token is the value that shows up in an enqueue response that can be used to -+ * detect when the results have been published. The easiest technique is to zero -+ * result "storage" before issuing an enqueue, and use any non-zero 'token' -+ * value. -+ */ -+void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token); -+ -+/** -+ * qbman_eq_desc_set_fq() -+ * qbman_eq_desc_set_qd() - Set eithe FQ or Queuing Destination for the enqueue -+ * command. -+ * @d: the enqueue descriptor -+ * @fqid: the id of the frame queue to be enqueued. -+ * @qdid: the id of the queuing destination to be enqueued. -+ * @qd_bin: the queuing destination bin -+ * @qd_prio: the queuing destination priority. -+ * -+ * Exactly one of the following descriptor "targets" should be set. (Calling any -+ * one of these will replace the effect of any prior call to one of these.) -+ * - enqueue to a frame queue -+ * - enqueue to a queuing destination -+ * Note, that none of these will have any affect if the "action" type has been -+ * set to "orp_hole" or "orp_nesn". -+ */ -+void qbman_eq_desc_set_fq(struct qbman_eq_desc *, uint32_t fqid); -+void qbman_eq_desc_set_qd(struct qbman_eq_desc *, uint32_t qdid, -+ uint32_t qd_bin, uint32_t qd_prio); -+ -+/** -+ * qbman_eq_desc_set_eqdi() - enable/disable EQDI interrupt -+ * @d: the enqueue descriptor -+ * @enable: boolean to enable/disable EQDI -+ * -+ * Determines whether or not the portal's EQDI interrupt source should be -+ * asserted after the enqueue command is completed. -+ */ -+void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *, int enable); -+ -+/** -+ * qbman_eq_desc_set_dca() - Set DCA mode in the enqueue command. -+ * @d: the enqueue descriptor. -+ * @enable: enabled/disable DCA mode. -+ * @dqrr_idx: DCAP_CI, the DCAP consumer index. -+ * @park: determine the whether park the FQ or not -+ * -+ * Determines whether or not a portal DQRR entry should be consumed once the -+ * enqueue command is completed. (And if so, and the DQRR entry corresponds -+ * to a held-active (order-preserving) FQ, whether the FQ should be parked -+ * instead of being rescheduled.) -+ */ -+void qbman_eq_desc_set_dca(struct qbman_eq_desc *, int enable, -+ uint32_t dqrr_idx, int park); -+ -+/** -+ * qbman_swp_enqueue() - Issue an enqueue command. -+ * @s: the software portal used for enqueue. -+ * @d: the enqueue descriptor. -+ * @fd: the frame descriptor to be enqueued. -+ * -+ * Please note that 'fd' should only be NULL if the "action" of the -+ * descriptor is "orp_hole" or "orp_nesn". -+ * -+ * Return 0 for successful enqueue, -EBUSY if the EQCR is not ready. -+ */ -+int qbman_swp_enqueue(struct qbman_swp *, const struct qbman_eq_desc *, -+ const struct qbman_fd *fd); -+ -+/** -+ * qbman_swp_enqueue_thresh() - Set the threshold for EQRI interrupt. -+ * -+ * An EQRI interrupt can be generated when the fill-level of EQCR falls below -+ * the 'thresh' value set here. Setting thresh==0 (the default) disables. -+ */ -+int qbman_swp_enqueue_thresh(struct qbman_swp *, unsigned int thresh); -+ -+ /*******************/ -+ /* Buffer releases */ -+ /*******************/ -+/** -+ * struct qbman_release_desc - The structure for buffer release descriptor -+ */ -+struct qbman_release_desc { -+ uint32_t dont_manipulate_directly[1]; -+}; -+ -+/** -+ * qbman_release_desc_clear() - Clear the contents of a descriptor to -+ * default/starting state. -+ */ -+void qbman_release_desc_clear(struct qbman_release_desc *); -+ -+/** -+ * qbman_release_desc_set_bpid() - Set the ID of the buffer pool to release to -+ */ -+void qbman_release_desc_set_bpid(struct qbman_release_desc *, uint32_t bpid); -+ -+/** -+ * qbman_release_desc_set_rcdi() - Determines whether or not the portal's RCDI -+ * interrupt source should be asserted after the release command is completed. -+ */ -+void qbman_release_desc_set_rcdi(struct qbman_release_desc *, int enable); -+ -+/** -+ * qbman_swp_release() - Issue a buffer release command. -+ * @s: the software portal object. -+ * @d: the release descriptor. -+ * @buffers: a pointer pointing to the buffer address to be released. -+ * @num_buffers: number of buffers to be released, must be less than 8. -+ * -+ * Return 0 for success, -EBUSY if the release command ring is not ready. -+ */ -+int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d, -+ const uint64_t *buffers, unsigned int num_buffers); -+ -+ /*******************/ -+ /* Buffer acquires */ -+ /*******************/ -+ -+/** -+ * qbman_swp_acquire() - Issue a buffer acquire command. -+ * @s: the software portal object. -+ * @bpid: the buffer pool index. -+ * @buffers: a pointer pointing to the acquired buffer address|es. -+ * @num_buffers: number of buffers to be acquired, must be less than 8. -+ * -+ * Return 0 for success, or negative error code if the acquire command -+ * fails. -+ */ -+int qbman_swp_acquire(struct qbman_swp *, uint32_t bpid, uint64_t *buffers, -+ unsigned int num_buffers); -+ -+ /*****************/ -+ /* FQ management */ -+ /*****************/ -+ -+/** -+ * qbman_swp_fq_schedule() - Move the fq to the scheduled state. -+ * @s: the software portal object. -+ * @fqid: the index of frame queue to be scheduled. -+ * -+ * There are a couple of different ways that a FQ can end up parked state, -+ * This schedules it. -+ * -+ * Return 0 for success, or negative error code for failure. -+ */ -+int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid); -+ -+/** -+ * qbman_swp_fq_force() - Force the FQ to fully scheduled state. -+ * @s: the software portal object. -+ * @fqid: the index of frame queue to be forced. -+ * -+ * Force eligible will force a tentatively-scheduled FQ to be fully-scheduled -+ * and thus be available for selection by any channel-dequeuing behaviour (push -+ * or pull). If the FQ is subsequently "dequeued" from the channel and is still -+ * empty at the time this happens, the resulting dq_entry will have no FD. -+ * (qbman_result_DQ_fd() will return NULL.) -+ * -+ * Return 0 for success, or negative error code for failure. -+ */ -+int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid); -+ -+/** -+ * qbman_swp_fq_xon() -+ * qbman_swp_fq_xoff() - XON/XOFF the frame queue. -+ * @s: the software portal object. -+ * @fqid: the index of frame queue. -+ * -+ * These functions change the FQ flow-control stuff between XON/XOFF. (The -+ * default is XON.) This setting doesn't affect enqueues to the FQ, just -+ * dequeues. XOFF FQs will remain in the tenatively-scheduled state, even when -+ * non-empty, meaning they won't be selected for scheduled dequeuing. If a FQ is -+ * changed to XOFF after it had already become truly-scheduled to a channel, and -+ * a pull dequeue of that channel occurs that selects that FQ for dequeuing, -+ * then the resulting dq_entry will have no FD. (qbman_result_DQ_fd() will -+ * return NULL.) -+ * -+ * Return 0 for success, or negative error code for failure. -+ */ -+int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid); -+int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid); -+ -+ /**********************/ -+ /* Channel management */ -+ /**********************/ -+ -+/* If the user has been allocated a channel object that is going to generate -+ * CDANs to another channel, then these functions will be necessary. -+ * CDAN-enabled channels only generate a single CDAN notification, after which -+ * it they need to be reenabled before they'll generate another. (The idea is -+ * that pull dequeuing will occur in reaction to the CDAN, followed by a -+ * reenable step.) Each function generates a distinct command to hardware, so a -+ * combination function is provided if the user wishes to modify the "context" -+ * (which shows up in each CDAN message) each time they reenable, as a single -+ * command to hardware. */ -+/** -+ * qbman_swp_CDAN_set_context() - Set CDAN context -+ * @s: the software portal object. -+ * @channelid: the channel index. -+ * @ctx: the context to be set in CDAN. -+ * -+ * Return 0 for success, or negative error code for failure. -+ */ -+int qbman_swp_CDAN_set_context(struct qbman_swp *, uint16_t channelid, -+ uint64_t ctx); -+ -+/** -+ * qbman_swp_CDAN_enable() - Enable CDAN for the channel. -+ * @s: the software portal object. -+ * @channelid: the index of the channel to generate CDAN. -+ * -+ * Return 0 for success, or negative error code for failure. -+ */ -+int qbman_swp_CDAN_enable(struct qbman_swp *, uint16_t channelid); -+ -+/** -+ * qbman_swp_CDAN_disable() - disable CDAN for the channel. -+ * @s: the software portal object. -+ * @channelid: the index of the channel to generate CDAN. -+ * -+ * Return 0 for success, or negative error code for failure. -+ */ -+int qbman_swp_CDAN_disable(struct qbman_swp *, uint16_t channelid); -+ -+/** -+ * qbman_swp_CDAN_set_context_enable() - Set CDAN contest and enable CDAN -+ * @s: the software portal object. -+ * @channelid: the index of the channel to generate CDAN. -+ * @ctx: the context set in CDAN. -+ * -+ * Return 0 for success, or negative error code for failure. -+ */ -+int qbman_swp_CDAN_set_context_enable(struct qbman_swp *, uint16_t channelid, -+ uint64_t ctx); -+ -+#endif /* !_FSL_QBMAN_PORTAL_H */ -diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman_debug.c b/drivers/staging/fsl-mc/bus/dpio/qbman_debug.c -new file mode 100644 -index 0000000..12e33d3 ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/qbman_debug.c -@@ -0,0 +1,846 @@ -+/* Copyright (C) 2015 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include "qbman_portal.h" -+#include "qbman_debug.h" -+#include "fsl_qbman_portal.h" -+ -+/* QBMan portal management command code */ -+#define QBMAN_BP_QUERY 0x32 -+#define QBMAN_FQ_QUERY 0x44 -+#define QBMAN_FQ_QUERY_NP 0x45 -+#define QBMAN_CGR_QUERY 0x51 -+#define QBMAN_WRED_QUERY 0x54 -+#define QBMAN_CGR_STAT_QUERY 0x55 -+#define QBMAN_CGR_STAT_QUERY_CLR 0x56 -+ -+enum qbman_attr_usage_e { -+ qbman_attr_usage_fq, -+ qbman_attr_usage_bpool, -+ qbman_attr_usage_cgr, -+}; -+ -+struct int_qbman_attr { -+ uint32_t words[32]; -+ enum qbman_attr_usage_e usage; -+}; -+ -+#define attr_type_set(a, e) \ -+{ \ -+ struct qbman_attr *__attr = a; \ -+ enum qbman_attr_usage_e __usage = e; \ -+ ((struct int_qbman_attr *)__attr)->usage = __usage; \ -+} -+ -+#define ATTR32(d) (&(d)->dont_manipulate_directly[0]) -+#define ATTR32_1(d) (&(d)->dont_manipulate_directly[16]) -+ -+static struct qb_attr_code code_bp_bpid = QB_CODE(0, 16, 16); -+static struct qb_attr_code code_bp_bdi = QB_CODE(1, 16, 1); -+static struct qb_attr_code code_bp_va = QB_CODE(1, 17, 1); -+static struct qb_attr_code code_bp_wae = QB_CODE(1, 18, 1); -+static struct qb_attr_code code_bp_swdet = QB_CODE(4, 0, 16); -+static struct qb_attr_code code_bp_swdxt = QB_CODE(4, 16, 16); -+static struct qb_attr_code code_bp_hwdet = QB_CODE(5, 0, 16); -+static struct qb_attr_code code_bp_hwdxt = QB_CODE(5, 16, 16); -+static struct qb_attr_code code_bp_swset = QB_CODE(6, 0, 16); -+static struct qb_attr_code code_bp_swsxt = QB_CODE(6, 16, 16); -+static struct qb_attr_code code_bp_vbpid = QB_CODE(7, 0, 14); -+static struct qb_attr_code code_bp_icid = QB_CODE(7, 16, 15); -+static struct qb_attr_code code_bp_pl = QB_CODE(7, 31, 1); -+static struct qb_attr_code code_bp_bpscn_addr_lo = QB_CODE(8, 0, 32); -+static struct qb_attr_code code_bp_bpscn_addr_hi = QB_CODE(9, 0, 32); -+static struct qb_attr_code code_bp_bpscn_ctx_lo = QB_CODE(10, 0, 32); -+static struct qb_attr_code code_bp_bpscn_ctx_hi = QB_CODE(11, 0, 32); -+static struct qb_attr_code code_bp_hw_targ = QB_CODE(12, 0, 16); -+static struct qb_attr_code code_bp_state = QB_CODE(1, 24, 3); -+static struct qb_attr_code code_bp_fill = QB_CODE(2, 0, 32); -+static struct qb_attr_code code_bp_hdptr = QB_CODE(3, 0, 32); -+static struct qb_attr_code code_bp_sdcnt = QB_CODE(13, 0, 8); -+static struct qb_attr_code code_bp_hdcnt = QB_CODE(13, 1, 8); -+static struct qb_attr_code code_bp_sscnt = QB_CODE(13, 2, 8); -+ -+void qbman_bp_attr_clear(struct qbman_attr *a) -+{ -+ memset(a, 0, sizeof(*a)); -+ attr_type_set(a, qbman_attr_usage_bpool); -+} -+ -+int qbman_bp_query(struct qbman_swp *s, uint32_t bpid, -+ struct qbman_attr *a) -+{ -+ uint32_t *p; -+ uint32_t verb, rslt; -+ uint32_t *attr = ATTR32(a); -+ -+ qbman_bp_attr_clear(a); -+ -+ /* Start the management command */ -+ p = qbman_swp_mc_start(s); -+ if (!p) -+ return -EBUSY; -+ -+ /* Encode the caller-provided attributes */ -+ qb_attr_code_encode(&code_bp_bpid, p, bpid); -+ -+ /* Complete the management command */ -+ p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_BP_QUERY); -+ -+ /* Decode the outcome */ -+ verb = qb_attr_code_decode(&code_generic_verb, p); -+ rslt = qb_attr_code_decode(&code_generic_rslt, p); -+ BUG_ON(verb != QBMAN_BP_QUERY); -+ -+ /* Determine success or failure */ -+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { -+ pr_err("Query of BPID 0x%x failed, code=0x%02x\n", bpid, rslt); -+ return -EIO; -+ } -+ -+ /* For the query, word[0] of the result contains only the -+ * verb/rslt fields, so skip word[0]. -+ */ -+ word_copy(&attr[1], &p[1], 15); -+ return 0; -+} -+ -+void qbman_bp_attr_get_bdi(struct qbman_attr *a, int *bdi, int *va, int *wae) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *bdi = !!qb_attr_code_decode(&code_bp_bdi, p); -+ *va = !!qb_attr_code_decode(&code_bp_va, p); -+ *wae = !!qb_attr_code_decode(&code_bp_wae, p); -+} -+ -+static uint32_t qbman_bp_thresh_to_value(uint32_t val) -+{ -+ return (val & 0xff) << ((val & 0xf00) >> 8); -+} -+ -+void qbman_bp_attr_get_swdet(struct qbman_attr *a, uint32_t *swdet) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *swdet = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swdet, -+ p)); -+} -+void qbman_bp_attr_get_swdxt(struct qbman_attr *a, uint32_t *swdxt) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *swdxt = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swdxt, -+ p)); -+} -+void qbman_bp_attr_get_hwdet(struct qbman_attr *a, uint32_t *hwdet) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *hwdet = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_hwdet, -+ p)); -+} -+void qbman_bp_attr_get_hwdxt(struct qbman_attr *a, uint32_t *hwdxt) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *hwdxt = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_hwdxt, -+ p)); -+} -+ -+void qbman_bp_attr_get_swset(struct qbman_attr *a, uint32_t *swset) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *swset = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swset, -+ p)); -+} -+ -+void qbman_bp_attr_get_swsxt(struct qbman_attr *a, uint32_t *swsxt) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *swsxt = qbman_bp_thresh_to_value(qb_attr_code_decode(&code_bp_swsxt, -+ p)); -+} -+ -+void qbman_bp_attr_get_vbpid(struct qbman_attr *a, uint32_t *vbpid) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *vbpid = qb_attr_code_decode(&code_bp_vbpid, p); -+} -+ -+void qbman_bp_attr_get_icid(struct qbman_attr *a, uint32_t *icid, int *pl) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *icid = qb_attr_code_decode(&code_bp_icid, p); -+ *pl = !!qb_attr_code_decode(&code_bp_pl, p); -+} -+ -+void qbman_bp_attr_get_bpscn_addr(struct qbman_attr *a, uint64_t *bpscn_addr) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *bpscn_addr = ((uint64_t)qb_attr_code_decode(&code_bp_bpscn_addr_hi, -+ p) << 32) | -+ (uint64_t)qb_attr_code_decode(&code_bp_bpscn_addr_lo, -+ p); -+} -+ -+void qbman_bp_attr_get_bpscn_ctx(struct qbman_attr *a, uint64_t *bpscn_ctx) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *bpscn_ctx = ((uint64_t)qb_attr_code_decode(&code_bp_bpscn_ctx_hi, p) -+ << 32) | -+ (uint64_t)qb_attr_code_decode(&code_bp_bpscn_ctx_lo, -+ p); -+} -+ -+void qbman_bp_attr_get_hw_targ(struct qbman_attr *a, uint32_t *hw_targ) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ *hw_targ = qb_attr_code_decode(&code_bp_hw_targ, p); -+} -+ -+int qbman_bp_info_has_free_bufs(struct qbman_attr *a) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ return !(int)(qb_attr_code_decode(&code_bp_state, p) & 0x1); -+} -+ -+int qbman_bp_info_is_depleted(struct qbman_attr *a) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ return (int)(qb_attr_code_decode(&code_bp_state, p) & 0x2); -+} -+ -+int qbman_bp_info_is_surplus(struct qbman_attr *a) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ return (int)(qb_attr_code_decode(&code_bp_state, p) & 0x4); -+} -+ -+uint32_t qbman_bp_info_num_free_bufs(struct qbman_attr *a) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ return qb_attr_code_decode(&code_bp_fill, p); -+} -+ -+uint32_t qbman_bp_info_hdptr(struct qbman_attr *a) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ return qb_attr_code_decode(&code_bp_hdptr, p); -+} -+ -+uint32_t qbman_bp_info_sdcnt(struct qbman_attr *a) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ return qb_attr_code_decode(&code_bp_sdcnt, p); -+} -+ -+uint32_t qbman_bp_info_hdcnt(struct qbman_attr *a) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ return qb_attr_code_decode(&code_bp_hdcnt, p); -+} -+ -+uint32_t qbman_bp_info_sscnt(struct qbman_attr *a) -+{ -+ uint32_t *p = ATTR32(a); -+ -+ return qb_attr_code_decode(&code_bp_sscnt, p); -+} -+ -+static struct qb_attr_code code_fq_fqid = QB_CODE(1, 0, 24); -+static struct qb_attr_code code_fq_cgrid = QB_CODE(2, 16, 16); -+static struct qb_attr_code code_fq_destwq = QB_CODE(3, 0, 15); -+static struct qb_attr_code code_fq_fqctrl = QB_CODE(3, 24, 8); -+static struct qb_attr_code code_fq_icscred = QB_CODE(4, 0, 15); -+static struct qb_attr_code code_fq_tdthresh = QB_CODE(4, 16, 13); -+static struct qb_attr_code code_fq_oa_len = QB_CODE(5, 0, 12); -+static struct qb_attr_code code_fq_oa_ics = QB_CODE(5, 14, 1); -+static struct qb_attr_code code_fq_oa_cgr = QB_CODE(5, 15, 1); -+static struct qb_attr_code code_fq_mctl_bdi = QB_CODE(5, 24, 1); -+static struct qb_attr_code code_fq_mctl_ff = QB_CODE(5, 25, 1); -+static struct qb_attr_code code_fq_mctl_va = QB_CODE(5, 26, 1); -+static struct qb_attr_code code_fq_mctl_ps = QB_CODE(5, 27, 1); -+static struct qb_attr_code code_fq_ctx_lower32 = QB_CODE(6, 0, 32); -+static struct qb_attr_code code_fq_ctx_upper32 = QB_CODE(7, 0, 32); -+static struct qb_attr_code code_fq_icid = QB_CODE(8, 0, 15); -+static struct qb_attr_code code_fq_pl = QB_CODE(8, 15, 1); -+static struct qb_attr_code code_fq_vfqid = QB_CODE(9, 0, 24); -+static struct qb_attr_code code_fq_erfqid = QB_CODE(10, 0, 24); -+ -+void qbman_fq_attr_clear(struct qbman_attr *a) -+{ -+ memset(a, 0, sizeof(*a)); -+ attr_type_set(a, qbman_attr_usage_fq); -+} -+ -+/* FQ query function for programmable fields */ -+int qbman_fq_query(struct qbman_swp *s, uint32_t fqid, struct qbman_attr *desc) -+{ -+ uint32_t *p; -+ uint32_t verb, rslt; -+ uint32_t *d = ATTR32(desc); -+ -+ qbman_fq_attr_clear(desc); -+ -+ p = qbman_swp_mc_start(s); -+ if (!p) -+ return -EBUSY; -+ qb_attr_code_encode(&code_fq_fqid, p, fqid); -+ p = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY); -+ -+ /* Decode the outcome */ -+ verb = qb_attr_code_decode(&code_generic_verb, p); -+ rslt = qb_attr_code_decode(&code_generic_rslt, p); -+ BUG_ON(verb != QBMAN_FQ_QUERY); -+ -+ /* Determine success or failure */ -+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { -+ pr_err("Query of FQID 0x%x failed, code=0x%02x\n", -+ fqid, rslt); -+ return -EIO; -+ } -+ /* For the configure, word[0] of the command contains only the WE-mask. -+ * For the query, word[0] of the result contains only the verb/rslt -+ * fields. Skip word[0] in the latter case. */ -+ word_copy(&d[1], &p[1], 15); -+ return 0; -+} -+ -+void qbman_fq_attr_get_fqctrl(struct qbman_attr *d, uint32_t *fqctrl) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *fqctrl = qb_attr_code_decode(&code_fq_fqctrl, p); -+} -+ -+void qbman_fq_attr_get_cgrid(struct qbman_attr *d, uint32_t *cgrid) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *cgrid = qb_attr_code_decode(&code_fq_cgrid, p); -+} -+ -+void qbman_fq_attr_get_destwq(struct qbman_attr *d, uint32_t *destwq) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *destwq = qb_attr_code_decode(&code_fq_destwq, p); -+} -+ -+void qbman_fq_attr_get_icscred(struct qbman_attr *d, uint32_t *icscred) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *icscred = qb_attr_code_decode(&code_fq_icscred, p); -+} -+ -+static struct qb_attr_code code_tdthresh_exp = QB_CODE(0, 0, 5); -+static struct qb_attr_code code_tdthresh_mant = QB_CODE(0, 5, 8); -+static uint32_t qbman_thresh_to_value(uint32_t val) -+{ -+ uint32_t m, e; -+ -+ m = qb_attr_code_decode(&code_tdthresh_mant, &val); -+ e = qb_attr_code_decode(&code_tdthresh_exp, &val); -+ return m << e; -+} -+ -+void qbman_fq_attr_get_tdthresh(struct qbman_attr *d, uint32_t *tdthresh) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *tdthresh = qbman_thresh_to_value(qb_attr_code_decode(&code_fq_tdthresh, -+ p)); -+} -+ -+void qbman_fq_attr_get_oa(struct qbman_attr *d, -+ int *oa_ics, int *oa_cgr, int32_t *oa_len) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *oa_ics = !!qb_attr_code_decode(&code_fq_oa_ics, p); -+ *oa_cgr = !!qb_attr_code_decode(&code_fq_oa_cgr, p); -+ *oa_len = qb_attr_code_makesigned(&code_fq_oa_len, -+ qb_attr_code_decode(&code_fq_oa_len, p)); -+} -+ -+void qbman_fq_attr_get_mctl(struct qbman_attr *d, -+ int *bdi, int *ff, int *va, int *ps) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *bdi = !!qb_attr_code_decode(&code_fq_mctl_bdi, p); -+ *ff = !!qb_attr_code_decode(&code_fq_mctl_ff, p); -+ *va = !!qb_attr_code_decode(&code_fq_mctl_va, p); -+ *ps = !!qb_attr_code_decode(&code_fq_mctl_ps, p); -+} -+ -+void qbman_fq_attr_get_ctx(struct qbman_attr *d, uint32_t *hi, uint32_t *lo) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *hi = qb_attr_code_decode(&code_fq_ctx_upper32, p); -+ *lo = qb_attr_code_decode(&code_fq_ctx_lower32, p); -+} -+ -+void qbman_fq_attr_get_icid(struct qbman_attr *d, uint32_t *icid, int *pl) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *icid = qb_attr_code_decode(&code_fq_icid, p); -+ *pl = !!qb_attr_code_decode(&code_fq_pl, p); -+} -+ -+void qbman_fq_attr_get_vfqid(struct qbman_attr *d, uint32_t *vfqid) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *vfqid = qb_attr_code_decode(&code_fq_vfqid, p); -+} -+ -+void qbman_fq_attr_get_erfqid(struct qbman_attr *d, uint32_t *erfqid) -+{ -+ uint32_t *p = ATTR32(d); -+ -+ *erfqid = qb_attr_code_decode(&code_fq_erfqid, p); -+} -+ -+/* Query FQ Non-Programmalbe Fields */ -+static struct qb_attr_code code_fq_np_state = QB_CODE(0, 16, 3); -+static struct qb_attr_code code_fq_np_fe = QB_CODE(0, 19, 1); -+static struct qb_attr_code code_fq_np_x = QB_CODE(0, 20, 1); -+static struct qb_attr_code code_fq_np_r = QB_CODE(0, 21, 1); -+static struct qb_attr_code code_fq_np_oe = QB_CODE(0, 22, 1); -+static struct qb_attr_code code_fq_np_frm_cnt = QB_CODE(6, 0, 24); -+static struct qb_attr_code code_fq_np_byte_cnt = QB_CODE(7, 0, 32); -+ -+int qbman_fq_query_state(struct qbman_swp *s, uint32_t fqid, -+ struct qbman_attr *state) -+{ -+ uint32_t *p; -+ uint32_t verb, rslt; -+ uint32_t *d = ATTR32(state); -+ -+ qbman_fq_attr_clear(state); -+ -+ p = qbman_swp_mc_start(s); -+ if (!p) -+ return -EBUSY; -+ qb_attr_code_encode(&code_fq_fqid, p, fqid); -+ p = qbman_swp_mc_complete(s, p, QBMAN_FQ_QUERY_NP); -+ -+ /* Decode the outcome */ -+ verb = qb_attr_code_decode(&code_generic_verb, p); -+ rslt = qb_attr_code_decode(&code_generic_rslt, p); -+ BUG_ON(verb != QBMAN_FQ_QUERY_NP); -+ -+ /* Determine success or failure */ -+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { -+ pr_err("Query NP fields of FQID 0x%x failed, code=0x%02x\n", -+ fqid, rslt); -+ return -EIO; -+ } -+ word_copy(&d[0], &p[0], 16); -+ return 0; -+} -+ -+uint32_t qbman_fq_state_schedstate(const struct qbman_attr *state) -+{ -+ const uint32_t *p = ATTR32(state); -+ -+ return qb_attr_code_decode(&code_fq_np_state, p); -+} -+ -+int qbman_fq_state_force_eligible(const struct qbman_attr *state) -+{ -+ const uint32_t *p = ATTR32(state); -+ -+ return !!qb_attr_code_decode(&code_fq_np_fe, p); -+} -+ -+int qbman_fq_state_xoff(const struct qbman_attr *state) -+{ -+ const uint32_t *p = ATTR32(state); -+ -+ return !!qb_attr_code_decode(&code_fq_np_x, p); -+} -+ -+int qbman_fq_state_retirement_pending(const struct qbman_attr *state) -+{ -+ const uint32_t *p = ATTR32(state); -+ -+ return !!qb_attr_code_decode(&code_fq_np_r, p); -+} -+ -+int qbman_fq_state_overflow_error(const struct qbman_attr *state) -+{ -+ const uint32_t *p = ATTR32(state); -+ -+ return !!qb_attr_code_decode(&code_fq_np_oe, p); -+} -+ -+uint32_t qbman_fq_state_frame_count(const struct qbman_attr *state) -+{ -+ const uint32_t *p = ATTR32(state); -+ -+ return qb_attr_code_decode(&code_fq_np_frm_cnt, p); -+} -+ -+uint32_t qbman_fq_state_byte_count(const struct qbman_attr *state) -+{ -+ const uint32_t *p = ATTR32(state); -+ -+ return qb_attr_code_decode(&code_fq_np_byte_cnt, p); -+} -+ -+/* Query CGR */ -+static struct qb_attr_code code_cgr_cgid = QB_CODE(0, 16, 16); -+static struct qb_attr_code code_cgr_cscn_wq_en_enter = QB_CODE(2, 0, 1); -+static struct qb_attr_code code_cgr_cscn_wq_en_exit = QB_CODE(2, 1, 1); -+static struct qb_attr_code code_cgr_cscn_wq_icd = QB_CODE(2, 2, 1); -+static struct qb_attr_code code_cgr_mode = QB_CODE(3, 16, 2); -+static struct qb_attr_code code_cgr_rej_cnt_mode = QB_CODE(3, 18, 1); -+static struct qb_attr_code code_cgr_cscn_bdi = QB_CODE(3, 19, 1); -+static struct qb_attr_code code_cgr_cscn_wr_en_enter = QB_CODE(3, 24, 1); -+static struct qb_attr_code code_cgr_cscn_wr_en_exit = QB_CODE(3, 25, 1); -+static struct qb_attr_code code_cgr_cg_wr_ae = QB_CODE(3, 26, 1); -+static struct qb_attr_code code_cgr_cscn_dcp_en = QB_CODE(3, 27, 1); -+static struct qb_attr_code code_cgr_cg_wr_va = QB_CODE(3, 28, 1); -+static struct qb_attr_code code_cgr_i_cnt_wr_en = QB_CODE(4, 0, 1); -+static struct qb_attr_code code_cgr_i_cnt_wr_bnd = QB_CODE(4, 1, 5); -+static struct qb_attr_code code_cgr_td_en = QB_CODE(4, 8, 1); -+static struct qb_attr_code code_cgr_cs_thres = QB_CODE(4, 16, 13); -+static struct qb_attr_code code_cgr_cs_thres_x = QB_CODE(5, 0, 13); -+static struct qb_attr_code code_cgr_td_thres = QB_CODE(5, 16, 13); -+static struct qb_attr_code code_cgr_cscn_tdcp = QB_CODE(6, 0, 16); -+static struct qb_attr_code code_cgr_cscn_wqid = QB_CODE(6, 16, 16); -+static struct qb_attr_code code_cgr_cscn_vcgid = QB_CODE(7, 0, 16); -+static struct qb_attr_code code_cgr_cg_icid = QB_CODE(7, 16, 15); -+static struct qb_attr_code code_cgr_cg_pl = QB_CODE(7, 31, 1); -+static struct qb_attr_code code_cgr_cg_wr_addr_lo = QB_CODE(8, 0, 32); -+static struct qb_attr_code code_cgr_cg_wr_addr_hi = QB_CODE(9, 0, 32); -+static struct qb_attr_code code_cgr_cscn_ctx_lo = QB_CODE(10, 0, 32); -+static struct qb_attr_code code_cgr_cscn_ctx_hi = QB_CODE(11, 0, 32); -+ -+void qbman_cgr_attr_clear(struct qbman_attr *a) -+{ -+ memset(a, 0, sizeof(*a)); -+ attr_type_set(a, qbman_attr_usage_cgr); -+} -+ -+int qbman_cgr_query(struct qbman_swp *s, uint32_t cgid, struct qbman_attr *attr) -+{ -+ uint32_t *p; -+ uint32_t verb, rslt; -+ uint32_t *d[2]; -+ int i; -+ uint32_t query_verb; -+ -+ d[0] = ATTR32(attr); -+ d[1] = ATTR32_1(attr); -+ -+ qbman_cgr_attr_clear(attr); -+ -+ for (i = 0; i < 2; i++) { -+ p = qbman_swp_mc_start(s); -+ if (!p) -+ return -EBUSY; -+ query_verb = i ? QBMAN_WRED_QUERY : QBMAN_CGR_QUERY; -+ -+ qb_attr_code_encode(&code_cgr_cgid, p, cgid); -+ p = qbman_swp_mc_complete(s, p, p[0] | query_verb); -+ -+ /* Decode the outcome */ -+ verb = qb_attr_code_decode(&code_generic_verb, p); -+ rslt = qb_attr_code_decode(&code_generic_rslt, p); -+ BUG_ON(verb != query_verb); -+ -+ /* Determine success or failure */ -+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { -+ pr_err("Query CGID 0x%x failed,", cgid); -+ pr_err(" verb=0x%02x, code=0x%02x\n", verb, rslt); -+ return -EIO; -+ } -+ /* For the configure, word[0] of the command contains only the -+ * verb/cgid. For the query, word[0] of the result contains -+ * only the verb/rslt fields. Skip word[0] in the latter case. -+ */ -+ word_copy(&d[i][1], &p[1], 15); -+ } -+ return 0; -+} -+ -+void qbman_cgr_attr_get_ctl1(struct qbman_attr *d, int *cscn_wq_en_enter, -+ int *cscn_wq_en_exit, int *cscn_wq_icd) -+ { -+ uint32_t *p = ATTR32(d); -+ *cscn_wq_en_enter = !!qb_attr_code_decode(&code_cgr_cscn_wq_en_enter, -+ p); -+ *cscn_wq_en_exit = !!qb_attr_code_decode(&code_cgr_cscn_wq_en_exit, p); -+ *cscn_wq_icd = !!qb_attr_code_decode(&code_cgr_cscn_wq_icd, p); -+} -+ -+void qbman_cgr_attr_get_mode(struct qbman_attr *d, uint32_t *mode, -+ int *rej_cnt_mode, int *cscn_bdi) -+{ -+ uint32_t *p = ATTR32(d); -+ *mode = qb_attr_code_decode(&code_cgr_mode, p); -+ *rej_cnt_mode = !!qb_attr_code_decode(&code_cgr_rej_cnt_mode, p); -+ *cscn_bdi = !!qb_attr_code_decode(&code_cgr_cscn_bdi, p); -+} -+ -+void qbman_cgr_attr_get_ctl2(struct qbman_attr *d, int *cscn_wr_en_enter, -+ int *cscn_wr_en_exit, int *cg_wr_ae, -+ int *cscn_dcp_en, int *cg_wr_va) -+{ -+ uint32_t *p = ATTR32(d); -+ *cscn_wr_en_enter = !!qb_attr_code_decode(&code_cgr_cscn_wr_en_enter, -+ p); -+ *cscn_wr_en_exit = !!qb_attr_code_decode(&code_cgr_cscn_wr_en_exit, p); -+ *cg_wr_ae = !!qb_attr_code_decode(&code_cgr_cg_wr_ae, p); -+ *cscn_dcp_en = !!qb_attr_code_decode(&code_cgr_cscn_dcp_en, p); -+ *cg_wr_va = !!qb_attr_code_decode(&code_cgr_cg_wr_va, p); -+} -+ -+void qbman_cgr_attr_get_iwc(struct qbman_attr *d, int *i_cnt_wr_en, -+ uint32_t *i_cnt_wr_bnd) -+{ -+ uint32_t *p = ATTR32(d); -+ *i_cnt_wr_en = !!qb_attr_code_decode(&code_cgr_i_cnt_wr_en, p); -+ *i_cnt_wr_bnd = qb_attr_code_decode(&code_cgr_i_cnt_wr_bnd, p); -+} -+ -+void qbman_cgr_attr_get_tdc(struct qbman_attr *d, int *td_en) -+{ -+ uint32_t *p = ATTR32(d); -+ *td_en = !!qb_attr_code_decode(&code_cgr_td_en, p); -+} -+ -+void qbman_cgr_attr_get_cs_thres(struct qbman_attr *d, uint32_t *cs_thres) -+{ -+ uint32_t *p = ATTR32(d); -+ *cs_thres = qbman_thresh_to_value(qb_attr_code_decode( -+ &code_cgr_cs_thres, p)); -+} -+ -+void qbman_cgr_attr_get_cs_thres_x(struct qbman_attr *d, -+ uint32_t *cs_thres_x) -+{ -+ uint32_t *p = ATTR32(d); -+ *cs_thres_x = qbman_thresh_to_value(qb_attr_code_decode( -+ &code_cgr_cs_thres_x, p)); -+} -+ -+void qbman_cgr_attr_get_td_thres(struct qbman_attr *d, uint32_t *td_thres) -+{ -+ uint32_t *p = ATTR32(d); -+ *td_thres = qbman_thresh_to_value(qb_attr_code_decode( -+ &code_cgr_td_thres, p)); -+} -+ -+void qbman_cgr_attr_get_cscn_tdcp(struct qbman_attr *d, uint32_t *cscn_tdcp) -+{ -+ uint32_t *p = ATTR32(d); -+ *cscn_tdcp = qb_attr_code_decode(&code_cgr_cscn_tdcp, p); -+} -+ -+void qbman_cgr_attr_get_cscn_wqid(struct qbman_attr *d, uint32_t *cscn_wqid) -+{ -+ uint32_t *p = ATTR32(d); -+ *cscn_wqid = qb_attr_code_decode(&code_cgr_cscn_wqid, p); -+} -+ -+void qbman_cgr_attr_get_cscn_vcgid(struct qbman_attr *d, -+ uint32_t *cscn_vcgid) -+{ -+ uint32_t *p = ATTR32(d); -+ *cscn_vcgid = qb_attr_code_decode(&code_cgr_cscn_vcgid, p); -+} -+ -+void qbman_cgr_attr_get_cg_icid(struct qbman_attr *d, uint32_t *icid, -+ int *pl) -+{ -+ uint32_t *p = ATTR32(d); -+ *icid = qb_attr_code_decode(&code_cgr_cg_icid, p); -+ *pl = !!qb_attr_code_decode(&code_cgr_cg_pl, p); -+} -+ -+void qbman_cgr_attr_get_cg_wr_addr(struct qbman_attr *d, -+ uint64_t *cg_wr_addr) -+{ -+ uint32_t *p = ATTR32(d); -+ *cg_wr_addr = ((uint64_t)qb_attr_code_decode(&code_cgr_cg_wr_addr_hi, -+ p) << 32) | -+ (uint64_t)qb_attr_code_decode(&code_cgr_cg_wr_addr_lo, -+ p); -+} -+ -+void qbman_cgr_attr_get_cscn_ctx(struct qbman_attr *d, uint64_t *cscn_ctx) -+{ -+ uint32_t *p = ATTR32(d); -+ *cscn_ctx = ((uint64_t)qb_attr_code_decode(&code_cgr_cscn_ctx_hi, p) -+ << 32) | -+ (uint64_t)qb_attr_code_decode(&code_cgr_cscn_ctx_lo, p); -+} -+ -+#define WRED_EDP_WORD(n) (18 + n/4) -+#define WRED_EDP_OFFSET(n) (8 * (n % 4)) -+#define WRED_PARM_DP_WORD(n) (n + 20) -+#define WRED_WE_EDP(n) (16 + n * 2) -+#define WRED_WE_PARM_DP(n) (17 + n * 2) -+void qbman_cgr_attr_wred_get_edp(struct qbman_attr *d, uint32_t idx, -+ int *edp) -+{ -+ uint32_t *p = ATTR32(d); -+ struct qb_attr_code code_wred_edp = QB_CODE(WRED_EDP_WORD(idx), -+ WRED_EDP_OFFSET(idx), 8); -+ *edp = (int)qb_attr_code_decode(&code_wred_edp, p); -+} -+ -+void qbman_cgr_attr_wred_dp_decompose(uint32_t dp, uint64_t *minth, -+ uint64_t *maxth, uint8_t *maxp) -+{ -+ uint8_t ma, mn, step_i, step_s, pn; -+ -+ ma = (uint8_t)(dp >> 24); -+ mn = (uint8_t)(dp >> 19) & 0x1f; -+ step_i = (uint8_t)(dp >> 11); -+ step_s = (uint8_t)(dp >> 6) & 0x1f; -+ pn = (uint8_t)dp & 0x3f; -+ -+ *maxp = ((pn<<2) * 100)/256; -+ -+ if (mn == 0) -+ *maxth = ma; -+ else -+ *maxth = ((ma+256) * (1<<(mn-1))); -+ -+ if (step_s == 0) -+ *minth = *maxth - step_i; -+ else -+ *minth = *maxth - (256 + step_i) * (1<<(step_s - 1)); -+} -+ -+void qbman_cgr_attr_wred_get_parm_dp(struct qbman_attr *d, uint32_t idx, -+ uint32_t *dp) -+{ -+ uint32_t *p = ATTR32(d); -+ struct qb_attr_code code_wred_parm_dp = QB_CODE(WRED_PARM_DP_WORD(idx), -+ 0, 8); -+ *dp = qb_attr_code_decode(&code_wred_parm_dp, p); -+} -+ -+/* Query CGR/CCGR/CQ statistics */ -+static struct qb_attr_code code_cgr_stat_ct = QB_CODE(4, 0, 32); -+static struct qb_attr_code code_cgr_stat_frame_cnt_lo = QB_CODE(4, 0, 32); -+static struct qb_attr_code code_cgr_stat_frame_cnt_hi = QB_CODE(5, 0, 8); -+static struct qb_attr_code code_cgr_stat_byte_cnt_lo = QB_CODE(6, 0, 32); -+static struct qb_attr_code code_cgr_stat_byte_cnt_hi = QB_CODE(7, 0, 16); -+static int qbman_cgr_statistics_query(struct qbman_swp *s, uint32_t cgid, -+ int clear, uint32_t command_type, -+ uint64_t *frame_cnt, uint64_t *byte_cnt) -+{ -+ uint32_t *p; -+ uint32_t verb, rslt; -+ uint32_t query_verb; -+ uint32_t hi, lo; -+ -+ p = qbman_swp_mc_start(s); -+ if (!p) -+ return -EBUSY; -+ -+ qb_attr_code_encode(&code_cgr_cgid, p, cgid); -+ if (command_type < 2) -+ qb_attr_code_encode(&code_cgr_stat_ct, p, command_type); -+ query_verb = clear ? -+ QBMAN_CGR_STAT_QUERY_CLR : QBMAN_CGR_STAT_QUERY; -+ p = qbman_swp_mc_complete(s, p, p[0] | query_verb); -+ -+ /* Decode the outcome */ -+ verb = qb_attr_code_decode(&code_generic_verb, p); -+ rslt = qb_attr_code_decode(&code_generic_rslt, p); -+ BUG_ON(verb != query_verb); -+ -+ /* Determine success or failure */ -+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { -+ pr_err("Query statistics of CGID 0x%x failed,", cgid); -+ pr_err(" verb=0x%02x code=0x%02x\n", verb, rslt); -+ return -EIO; -+ } -+ -+ if (*frame_cnt) { -+ hi = qb_attr_code_decode(&code_cgr_stat_frame_cnt_hi, p); -+ lo = qb_attr_code_decode(&code_cgr_stat_frame_cnt_lo, p); -+ *frame_cnt = ((uint64_t)hi << 32) | (uint64_t)lo; -+ } -+ if (*byte_cnt) { -+ hi = qb_attr_code_decode(&code_cgr_stat_byte_cnt_hi, p); -+ lo = qb_attr_code_decode(&code_cgr_stat_byte_cnt_lo, p); -+ *byte_cnt = ((uint64_t)hi << 32) | (uint64_t)lo; -+ } -+ -+ return 0; -+} -+ -+int qbman_cgr_reject_statistics(struct qbman_swp *s, uint32_t cgid, int clear, -+ uint64_t *frame_cnt, uint64_t *byte_cnt) -+{ -+ return qbman_cgr_statistics_query(s, cgid, clear, 0xff, -+ frame_cnt, byte_cnt); -+} -+ -+int qbman_ccgr_reject_statistics(struct qbman_swp *s, uint32_t cgid, int clear, -+ uint64_t *frame_cnt, uint64_t *byte_cnt) -+{ -+ return qbman_cgr_statistics_query(s, cgid, clear, 1, -+ frame_cnt, byte_cnt); -+} -+ -+int qbman_cq_dequeue_statistics(struct qbman_swp *s, uint32_t cgid, int clear, -+ uint64_t *frame_cnt, uint64_t *byte_cnt) -+{ -+ return qbman_cgr_statistics_query(s, cgid, clear, 0, -+ frame_cnt, byte_cnt); -+} -diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman_debug.h b/drivers/staging/fsl-mc/bus/dpio/qbman_debug.h -new file mode 100644 -index 0000000..1e6b002 ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/qbman_debug.h -@@ -0,0 +1,136 @@ -+/* Copyright (C) 2015 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+struct qbman_attr { -+ uint32_t dont_manipulate_directly[40]; -+}; -+ -+/* Buffer pool query commands */ -+int qbman_bp_query(struct qbman_swp *s, uint32_t bpid, -+ struct qbman_attr *a); -+void qbman_bp_attr_get_bdi(struct qbman_attr *a, int *bdi, int *va, int *wae); -+void qbman_bp_attr_get_swdet(struct qbman_attr *a, uint32_t *swdet); -+void qbman_bp_attr_get_swdxt(struct qbman_attr *a, uint32_t *swdxt); -+void qbman_bp_attr_get_hwdet(struct qbman_attr *a, uint32_t *hwdet); -+void qbman_bp_attr_get_hwdxt(struct qbman_attr *a, uint32_t *hwdxt); -+void qbman_bp_attr_get_swset(struct qbman_attr *a, uint32_t *swset); -+void qbman_bp_attr_get_swsxt(struct qbman_attr *a, uint32_t *swsxt); -+void qbman_bp_attr_get_vbpid(struct qbman_attr *a, uint32_t *vbpid); -+void qbman_bp_attr_get_icid(struct qbman_attr *a, uint32_t *icid, int *pl); -+void qbman_bp_attr_get_bpscn_addr(struct qbman_attr *a, uint64_t *bpscn_addr); -+void qbman_bp_attr_get_bpscn_ctx(struct qbman_attr *a, uint64_t *bpscn_ctx); -+void qbman_bp_attr_get_hw_targ(struct qbman_attr *a, uint32_t *hw_targ); -+int qbman_bp_info_has_free_bufs(struct qbman_attr *a); -+int qbman_bp_info_is_depleted(struct qbman_attr *a); -+int qbman_bp_info_is_surplus(struct qbman_attr *a); -+uint32_t qbman_bp_info_num_free_bufs(struct qbman_attr *a); -+uint32_t qbman_bp_info_hdptr(struct qbman_attr *a); -+uint32_t qbman_bp_info_sdcnt(struct qbman_attr *a); -+uint32_t qbman_bp_info_hdcnt(struct qbman_attr *a); -+uint32_t qbman_bp_info_sscnt(struct qbman_attr *a); -+ -+/* FQ query function for programmable fields */ -+int qbman_fq_query(struct qbman_swp *s, uint32_t fqid, -+ struct qbman_attr *desc); -+void qbman_fq_attr_get_fqctrl(struct qbman_attr *d, uint32_t *fqctrl); -+void qbman_fq_attr_get_cgrid(struct qbman_attr *d, uint32_t *cgrid); -+void qbman_fq_attr_get_destwq(struct qbman_attr *d, uint32_t *destwq); -+void qbman_fq_attr_get_icscred(struct qbman_attr *d, uint32_t *icscred); -+void qbman_fq_attr_get_tdthresh(struct qbman_attr *d, uint32_t *tdthresh); -+void qbman_fq_attr_get_oa(struct qbman_attr *d, -+ int *oa_ics, int *oa_cgr, int32_t *oa_len); -+void qbman_fq_attr_get_mctl(struct qbman_attr *d, -+ int *bdi, int *ff, int *va, int *ps); -+void qbman_fq_attr_get_ctx(struct qbman_attr *d, uint32_t *hi, uint32_t *lo); -+void qbman_fq_attr_get_icid(struct qbman_attr *d, uint32_t *icid, int *pl); -+void qbman_fq_attr_get_vfqid(struct qbman_attr *d, uint32_t *vfqid); -+void qbman_fq_attr_get_erfqid(struct qbman_attr *d, uint32_t *erfqid); -+ -+/* FQ query command for non-programmable fields*/ -+enum qbman_fq_schedstate_e { -+ qbman_fq_schedstate_oos = 0, -+ qbman_fq_schedstate_retired, -+ qbman_fq_schedstate_tentatively_scheduled, -+ qbman_fq_schedstate_truly_scheduled, -+ qbman_fq_schedstate_parked, -+ qbman_fq_schedstate_held_active, -+}; -+ -+int qbman_fq_query_state(struct qbman_swp *s, uint32_t fqid, -+ struct qbman_attr *state); -+uint32_t qbman_fq_state_schedstate(const struct qbman_attr *state); -+int qbman_fq_state_force_eligible(const struct qbman_attr *state); -+int qbman_fq_state_xoff(const struct qbman_attr *state); -+int qbman_fq_state_retirement_pending(const struct qbman_attr *state); -+int qbman_fq_state_overflow_error(const struct qbman_attr *state); -+uint32_t qbman_fq_state_frame_count(const struct qbman_attr *state); -+uint32_t qbman_fq_state_byte_count(const struct qbman_attr *state); -+ -+/* CGR query */ -+int qbman_cgr_query(struct qbman_swp *s, uint32_t cgid, -+ struct qbman_attr *attr); -+void qbman_cgr_attr_get_ctl1(struct qbman_attr *d, int *cscn_wq_en_enter, -+ int *cscn_wq_en_exit, int *cscn_wq_icd); -+void qbman_cgr_attr_get_mode(struct qbman_attr *d, uint32_t *mode, -+ int *rej_cnt_mode, int *cscn_bdi); -+void qbman_cgr_attr_get_ctl2(struct qbman_attr *d, int *cscn_wr_en_enter, -+ int *cscn_wr_en_exit, int *cg_wr_ae, -+ int *cscn_dcp_en, int *cg_wr_va); -+void qbman_cgr_attr_get_iwc(struct qbman_attr *d, int *i_cnt_wr_en, -+ uint32_t *i_cnt_wr_bnd); -+void qbman_cgr_attr_get_tdc(struct qbman_attr *d, int *td_en); -+void qbman_cgr_attr_get_cs_thres(struct qbman_attr *d, uint32_t *cs_thres); -+void qbman_cgr_attr_get_cs_thres_x(struct qbman_attr *d, -+ uint32_t *cs_thres_x); -+void qbman_cgr_attr_get_td_thres(struct qbman_attr *d, uint32_t *td_thres); -+void qbman_cgr_attr_get_cscn_tdcp(struct qbman_attr *d, uint32_t *cscn_tdcp); -+void qbman_cgr_attr_get_cscn_wqid(struct qbman_attr *d, uint32_t *cscn_wqid); -+void qbman_cgr_attr_get_cscn_vcgid(struct qbman_attr *d, -+ uint32_t *cscn_vcgid); -+void qbman_cgr_attr_get_cg_icid(struct qbman_attr *d, uint32_t *icid, -+ int *pl); -+void qbman_cgr_attr_get_cg_wr_addr(struct qbman_attr *d, -+ uint64_t *cg_wr_addr); -+void qbman_cgr_attr_get_cscn_ctx(struct qbman_attr *d, uint64_t *cscn_ctx); -+void qbman_cgr_attr_wred_get_edp(struct qbman_attr *d, uint32_t idx, -+ int *edp); -+void qbman_cgr_attr_wred_dp_decompose(uint32_t dp, uint64_t *minth, -+ uint64_t *maxth, uint8_t *maxp); -+void qbman_cgr_attr_wred_get_parm_dp(struct qbman_attr *d, uint32_t idx, -+ uint32_t *dp); -+ -+/* CGR/CCGR/CQ statistics query */ -+int qbman_cgr_reject_statistics(struct qbman_swp *s, uint32_t cgid, int clear, -+ uint64_t *frame_cnt, uint64_t *byte_cnt); -+int qbman_ccgr_reject_statistics(struct qbman_swp *s, uint32_t cgid, int clear, -+ uint64_t *frame_cnt, uint64_t *byte_cnt); -+int qbman_cq_dequeue_statistics(struct qbman_swp *s, uint32_t cgid, int clear, -+ uint64_t *frame_cnt, uint64_t *byte_cnt); -diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman_portal.c b/drivers/staging/fsl-mc/bus/dpio/qbman_portal.c -new file mode 100644 -index 0000000..6c5638b ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/qbman_portal.c -@@ -0,0 +1,1212 @@ -+/* Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include "qbman_portal.h" -+ -+/* QBMan portal management command codes */ -+#define QBMAN_MC_ACQUIRE 0x30 -+#define QBMAN_WQCHAN_CONFIGURE 0x46 -+ -+/* CINH register offsets */ -+#define QBMAN_CINH_SWP_EQAR 0x8c0 -+#define QBMAN_CINH_SWP_DQPI 0xa00 -+#define QBMAN_CINH_SWP_DCAP 0xac0 -+#define QBMAN_CINH_SWP_SDQCR 0xb00 -+#define QBMAN_CINH_SWP_RAR 0xcc0 -+#define QBMAN_CINH_SWP_ISR 0xe00 -+#define QBMAN_CINH_SWP_IER 0xe40 -+#define QBMAN_CINH_SWP_ISDR 0xe80 -+#define QBMAN_CINH_SWP_IIR 0xec0 -+ -+/* CENA register offsets */ -+#define QBMAN_CENA_SWP_EQCR(n) (0x000 + ((uint32_t)(n) << 6)) -+#define QBMAN_CENA_SWP_DQRR(n) (0x200 + ((uint32_t)(n) << 6)) -+#define QBMAN_CENA_SWP_RCR(n) (0x400 + ((uint32_t)(n) << 6)) -+#define QBMAN_CENA_SWP_CR 0x600 -+#define QBMAN_CENA_SWP_RR(vb) (0x700 + ((uint32_t)(vb) >> 1)) -+#define QBMAN_CENA_SWP_VDQCR 0x780 -+ -+/* Reverse mapping of QBMAN_CENA_SWP_DQRR() */ -+#define QBMAN_IDX_FROM_DQRR(p) (((unsigned long)p & 0x1ff) >> 6) -+ -+/* QBMan FQ management command codes */ -+#define QBMAN_FQ_SCHEDULE 0x48 -+#define QBMAN_FQ_FORCE 0x49 -+#define QBMAN_FQ_XON 0x4d -+#define QBMAN_FQ_XOFF 0x4e -+ -+/*******************************/ -+/* Pre-defined attribute codes */ -+/*******************************/ -+ -+struct qb_attr_code code_generic_verb = QB_CODE(0, 0, 7); -+struct qb_attr_code code_generic_rslt = QB_CODE(0, 8, 8); -+ -+/*************************/ -+/* SDQCR attribute codes */ -+/*************************/ -+ -+/* we put these here because at least some of them are required by -+ * qbman_swp_init() */ -+struct qb_attr_code code_sdqcr_dct = QB_CODE(0, 24, 2); -+struct qb_attr_code code_sdqcr_fc = QB_CODE(0, 29, 1); -+struct qb_attr_code code_sdqcr_tok = QB_CODE(0, 16, 8); -+#define CODE_SDQCR_DQSRC(n) QB_CODE(0, n, 1) -+enum qbman_sdqcr_dct { -+ qbman_sdqcr_dct_null = 0, -+ qbman_sdqcr_dct_prio_ics, -+ qbman_sdqcr_dct_active_ics, -+ qbman_sdqcr_dct_active -+}; -+enum qbman_sdqcr_fc { -+ qbman_sdqcr_fc_one = 0, -+ qbman_sdqcr_fc_up_to_3 = 1 -+}; -+struct qb_attr_code code_sdqcr_dqsrc = QB_CODE(0, 0, 16); -+ -+/*********************************/ -+/* Portal constructor/destructor */ -+/*********************************/ -+ -+/* Software portals should always be in the power-on state when we initialise, -+ * due to the CCSR-based portal reset functionality that MC has. -+ * -+ * Erk! Turns out that QMan versions prior to 4.1 do not correctly reset DQRR -+ * valid-bits, so we need to support a workaround where we don't trust -+ * valid-bits when detecting new entries until any stale ring entries have been -+ * overwritten at least once. The idea is that we read PI for the first few -+ * entries, then switch to valid-bit after that. The trick is to clear the -+ * bug-work-around boolean once the PI wraps around the ring for the first time. -+ * -+ * Note: this still carries a slight additional cost once the decrementer hits -+ * zero, so ideally the workaround should only be compiled in if the compiled -+ * image needs to support affected chips. We use WORKAROUND_DQRR_RESET_BUG for -+ * this. -+ */ -+struct qbman_swp *qbman_swp_init(const struct qbman_swp_desc *d) -+{ -+ int ret; -+ struct qbman_swp *p = kmalloc(sizeof(*p), GFP_KERNEL); -+ -+ if (!p) -+ return NULL; -+ p->desc = d; -+#ifdef QBMAN_CHECKING -+ p->mc.check = swp_mc_can_start; -+#endif -+ p->mc.valid_bit = QB_VALID_BIT; -+ p->sdq = 0; -+ qb_attr_code_encode(&code_sdqcr_dct, &p->sdq, qbman_sdqcr_dct_prio_ics); -+ qb_attr_code_encode(&code_sdqcr_fc, &p->sdq, qbman_sdqcr_fc_up_to_3); -+ qb_attr_code_encode(&code_sdqcr_tok, &p->sdq, 0xbb); -+ atomic_set(&p->vdq.busy, 1); -+ p->vdq.valid_bit = QB_VALID_BIT; -+ p->dqrr.next_idx = 0; -+ p->dqrr.valid_bit = QB_VALID_BIT; -+ /* TODO: should also read PI/CI type registers and check that they're on -+ * PoR values. If we're asked to initialise portals that aren't in reset -+ * state, bad things will follow. */ -+#ifdef WORKAROUND_DQRR_RESET_BUG -+ p->dqrr.reset_bug = 1; -+#endif -+ if ((p->desc->qman_version & 0xFFFF0000) < QMAN_REV_4100) -+ p->dqrr.dqrr_size = 4; -+ else -+ p->dqrr.dqrr_size = 8; -+ ret = qbman_swp_sys_init(&p->sys, d, p->dqrr.dqrr_size); -+ if (ret) { -+ kfree(p); -+ pr_err("qbman_swp_sys_init() failed %d\n", ret); -+ return NULL; -+ } -+ /* SDQCR needs to be initialized to 0 when no channels are -+ being dequeued from or else the QMan HW will indicate an -+ error. The values that were calculated above will be -+ applied when dequeues from a specific channel are enabled */ -+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_SDQCR, 0); -+ return p; -+} -+ -+void qbman_swp_finish(struct qbman_swp *p) -+{ -+#ifdef QBMAN_CHECKING -+ BUG_ON(p->mc.check != swp_mc_can_start); -+#endif -+ qbman_swp_sys_finish(&p->sys); -+ kfree(p); -+} -+ -+const struct qbman_swp_desc *qbman_swp_get_desc(struct qbman_swp *p) -+{ -+ return p->desc; -+} -+ -+/**************/ -+/* Interrupts */ -+/**************/ -+ -+uint32_t qbman_swp_interrupt_get_vanish(struct qbman_swp *p) -+{ -+ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISDR); -+} -+ -+void qbman_swp_interrupt_set_vanish(struct qbman_swp *p, uint32_t mask) -+{ -+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISDR, mask); -+} -+ -+uint32_t qbman_swp_interrupt_read_status(struct qbman_swp *p) -+{ -+ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_ISR); -+} -+ -+void qbman_swp_interrupt_clear_status(struct qbman_swp *p, uint32_t mask) -+{ -+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_ISR, mask); -+} -+ -+uint32_t qbman_swp_interrupt_get_trigger(struct qbman_swp *p) -+{ -+ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IER); -+} -+ -+void qbman_swp_interrupt_set_trigger(struct qbman_swp *p, uint32_t mask) -+{ -+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IER, mask); -+} -+ -+int qbman_swp_interrupt_get_inhibit(struct qbman_swp *p) -+{ -+ return qbman_cinh_read(&p->sys, QBMAN_CINH_SWP_IIR); -+} -+ -+void qbman_swp_interrupt_set_inhibit(struct qbman_swp *p, int inhibit) -+{ -+ qbman_cinh_write(&p->sys, QBMAN_CINH_SWP_IIR, inhibit ? 0xffffffff : 0); -+} -+ -+/***********************/ -+/* Management commands */ -+/***********************/ -+ -+/* -+ * Internal code common to all types of management commands. -+ */ -+ -+void *qbman_swp_mc_start(struct qbman_swp *p) -+{ -+ void *ret; -+#ifdef QBMAN_CHECKING -+ BUG_ON(p->mc.check != swp_mc_can_start); -+#endif -+ ret = qbman_cena_write_start(&p->sys, QBMAN_CENA_SWP_CR); -+#ifdef QBMAN_CHECKING -+ if (!ret) -+ p->mc.check = swp_mc_can_submit; -+#endif -+ return ret; -+} -+ -+void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint32_t cmd_verb) -+{ -+ uint32_t *v = cmd; -+#ifdef QBMAN_CHECKING -+ BUG_ON(!p->mc.check != swp_mc_can_submit); -+#endif -+ /* TBD: "|=" is going to hurt performance. Need to move as many fields -+ * out of word zero, and for those that remain, the "OR" needs to occur -+ * at the caller side. This debug check helps to catch cases where the -+ * caller wants to OR but has forgotten to do so. */ -+ BUG_ON((*v & cmd_verb) != *v); -+ *v = cmd_verb | p->mc.valid_bit; -+ qbman_cena_write_complete(&p->sys, QBMAN_CENA_SWP_CR, cmd); -+#ifdef QBMAN_CHECKING -+ p->mc.check = swp_mc_can_poll; -+#endif -+} -+ -+void *qbman_swp_mc_result(struct qbman_swp *p) -+{ -+ uint32_t *ret, verb; -+#ifdef QBMAN_CHECKING -+ BUG_ON(p->mc.check != swp_mc_can_poll); -+#endif -+ qbman_cena_invalidate_prefetch(&p->sys, -+ QBMAN_CENA_SWP_RR(p->mc.valid_bit)); -+ ret = qbman_cena_read(&p->sys, QBMAN_CENA_SWP_RR(p->mc.valid_bit)); -+ /* Remove the valid-bit - command completed iff the rest is non-zero */ -+ verb = ret[0] & ~QB_VALID_BIT; -+ if (!verb) -+ return NULL; -+#ifdef QBMAN_CHECKING -+ p->mc.check = swp_mc_can_start; -+#endif -+ p->mc.valid_bit ^= QB_VALID_BIT; -+ return ret; -+} -+ -+/***********/ -+/* Enqueue */ -+/***********/ -+ -+/* These should be const, eventually */ -+static struct qb_attr_code code_eq_cmd = QB_CODE(0, 0, 2); -+static struct qb_attr_code code_eq_eqdi = QB_CODE(0, 3, 1); -+static struct qb_attr_code code_eq_dca_en = QB_CODE(0, 15, 1); -+static struct qb_attr_code code_eq_dca_pk = QB_CODE(0, 14, 1); -+static struct qb_attr_code code_eq_dca_idx = QB_CODE(0, 8, 2); -+static struct qb_attr_code code_eq_orp_en = QB_CODE(0, 2, 1); -+static struct qb_attr_code code_eq_orp_is_nesn = QB_CODE(0, 31, 1); -+static struct qb_attr_code code_eq_orp_nlis = QB_CODE(0, 30, 1); -+static struct qb_attr_code code_eq_orp_seqnum = QB_CODE(0, 16, 14); -+static struct qb_attr_code code_eq_opr_id = QB_CODE(1, 0, 16); -+static struct qb_attr_code code_eq_tgt_id = QB_CODE(2, 0, 24); -+/* static struct qb_attr_code code_eq_tag = QB_CODE(3, 0, 32); */ -+static struct qb_attr_code code_eq_qd_en = QB_CODE(0, 4, 1); -+static struct qb_attr_code code_eq_qd_bin = QB_CODE(4, 0, 16); -+static struct qb_attr_code code_eq_qd_pri = QB_CODE(4, 16, 4); -+static struct qb_attr_code code_eq_rsp_stash = QB_CODE(5, 16, 1); -+static struct qb_attr_code code_eq_rsp_id = QB_CODE(5, 24, 8); -+static struct qb_attr_code code_eq_rsp_lo = QB_CODE(6, 0, 32); -+ -+enum qbman_eq_cmd_e { -+ /* No enqueue, primarily for plugging ORP gaps for dropped frames */ -+ qbman_eq_cmd_empty, -+ /* DMA an enqueue response once complete */ -+ qbman_eq_cmd_respond, -+ /* DMA an enqueue response only if the enqueue fails */ -+ qbman_eq_cmd_respond_reject -+}; -+ -+void qbman_eq_desc_clear(struct qbman_eq_desc *d) -+{ -+ memset(d, 0, sizeof(*d)); -+} -+ -+void qbman_eq_desc_set_no_orp(struct qbman_eq_desc *d, int respond_success) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_eq_orp_en, cl, 0); -+ qb_attr_code_encode(&code_eq_cmd, cl, -+ respond_success ? qbman_eq_cmd_respond : -+ qbman_eq_cmd_respond_reject); -+} -+ -+void qbman_eq_desc_set_orp(struct qbman_eq_desc *d, int respond_success, -+ uint32_t opr_id, uint32_t seqnum, int incomplete) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_eq_orp_en, cl, 1); -+ qb_attr_code_encode(&code_eq_cmd, cl, -+ respond_success ? qbman_eq_cmd_respond : -+ qbman_eq_cmd_respond_reject); -+ qb_attr_code_encode(&code_eq_opr_id, cl, opr_id); -+ qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum); -+ qb_attr_code_encode(&code_eq_orp_nlis, cl, !!incomplete); -+} -+ -+void qbman_eq_desc_set_orp_hole(struct qbman_eq_desc *d, uint32_t opr_id, -+ uint32_t seqnum) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_eq_orp_en, cl, 1); -+ qb_attr_code_encode(&code_eq_cmd, cl, qbman_eq_cmd_empty); -+ qb_attr_code_encode(&code_eq_opr_id, cl, opr_id); -+ qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum); -+ qb_attr_code_encode(&code_eq_orp_nlis, cl, 0); -+ qb_attr_code_encode(&code_eq_orp_is_nesn, cl, 0); -+} -+ -+void qbman_eq_desc_set_orp_nesn(struct qbman_eq_desc *d, uint32_t opr_id, -+ uint32_t seqnum) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_eq_orp_en, cl, 1); -+ qb_attr_code_encode(&code_eq_cmd, cl, qbman_eq_cmd_empty); -+ qb_attr_code_encode(&code_eq_opr_id, cl, opr_id); -+ qb_attr_code_encode(&code_eq_orp_seqnum, cl, seqnum); -+ qb_attr_code_encode(&code_eq_orp_nlis, cl, 0); -+ qb_attr_code_encode(&code_eq_orp_is_nesn, cl, 1); -+} -+ -+void qbman_eq_desc_set_response(struct qbman_eq_desc *d, -+ dma_addr_t storage_phys, -+ int stash) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode_64(&code_eq_rsp_lo, (uint64_t *)cl, storage_phys); -+ qb_attr_code_encode(&code_eq_rsp_stash, cl, !!stash); -+} -+ -+void qbman_eq_desc_set_token(struct qbman_eq_desc *d, uint8_t token) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_eq_rsp_id, cl, (uint32_t)token); -+} -+ -+void qbman_eq_desc_set_fq(struct qbman_eq_desc *d, uint32_t fqid) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_eq_qd_en, cl, 0); -+ qb_attr_code_encode(&code_eq_tgt_id, cl, fqid); -+} -+ -+void qbman_eq_desc_set_qd(struct qbman_eq_desc *d, uint32_t qdid, -+ uint32_t qd_bin, uint32_t qd_prio) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_eq_qd_en, cl, 1); -+ qb_attr_code_encode(&code_eq_tgt_id, cl, qdid); -+ qb_attr_code_encode(&code_eq_qd_bin, cl, qd_bin); -+ qb_attr_code_encode(&code_eq_qd_pri, cl, qd_prio); -+} -+ -+void qbman_eq_desc_set_eqdi(struct qbman_eq_desc *d, int enable) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_eq_eqdi, cl, !!enable); -+} -+ -+void qbman_eq_desc_set_dca(struct qbman_eq_desc *d, int enable, -+ uint32_t dqrr_idx, int park) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_eq_dca_en, cl, !!enable); -+ if (enable) { -+ qb_attr_code_encode(&code_eq_dca_pk, cl, !!park); -+ qb_attr_code_encode(&code_eq_dca_idx, cl, dqrr_idx); -+ } -+} -+ -+#define EQAR_IDX(eqar) ((eqar) & 0x7) -+#define EQAR_VB(eqar) ((eqar) & 0x80) -+#define EQAR_SUCCESS(eqar) ((eqar) & 0x100) -+ -+int qbman_swp_enqueue(struct qbman_swp *s, const struct qbman_eq_desc *d, -+ const struct qbman_fd *fd) -+{ -+ uint32_t *p; -+ const uint32_t *cl = qb_cl(d); -+ uint32_t eqar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_EQAR); -+ -+ pr_debug("EQAR=%08x\n", eqar); -+ if (!EQAR_SUCCESS(eqar)) -+ return -EBUSY; -+ p = qbman_cena_write_start(&s->sys, -+ QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar))); -+ word_copy(&p[1], &cl[1], 7); -+ word_copy(&p[8], fd, sizeof(*fd) >> 2); -+ /* Set the verb byte, have to substitute in the valid-bit */ -+ p[0] = cl[0] | EQAR_VB(eqar); -+ qbman_cena_write_complete(&s->sys, -+ QBMAN_CENA_SWP_EQCR(EQAR_IDX(eqar)), -+ p); -+ return 0; -+} -+ -+/*************************/ -+/* Static (push) dequeue */ -+/*************************/ -+ -+void qbman_swp_push_get(struct qbman_swp *s, uint8_t channel_idx, int *enabled) -+{ -+ struct qb_attr_code code = CODE_SDQCR_DQSRC(channel_idx); -+ -+ BUG_ON(channel_idx > 15); -+ *enabled = (int)qb_attr_code_decode(&code, &s->sdq); -+} -+ -+void qbman_swp_push_set(struct qbman_swp *s, uint8_t channel_idx, int enable) -+{ -+ uint16_t dqsrc; -+ struct qb_attr_code code = CODE_SDQCR_DQSRC(channel_idx); -+ -+ BUG_ON(channel_idx > 15); -+ qb_attr_code_encode(&code, &s->sdq, !!enable); -+ /* Read make the complete src map. If no channels are enabled -+ the SDQCR must be 0 or else QMan will assert errors */ -+ dqsrc = (uint16_t)qb_attr_code_decode(&code_sdqcr_dqsrc, &s->sdq); -+ if (dqsrc != 0) -+ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, s->sdq); -+ else -+ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_SDQCR, 0); -+} -+ -+/***************************/ -+/* Volatile (pull) dequeue */ -+/***************************/ -+ -+/* These should be const, eventually */ -+static struct qb_attr_code code_pull_dct = QB_CODE(0, 0, 2); -+static struct qb_attr_code code_pull_dt = QB_CODE(0, 2, 2); -+static struct qb_attr_code code_pull_rls = QB_CODE(0, 4, 1); -+static struct qb_attr_code code_pull_stash = QB_CODE(0, 5, 1); -+static struct qb_attr_code code_pull_numframes = QB_CODE(0, 8, 4); -+static struct qb_attr_code code_pull_token = QB_CODE(0, 16, 8); -+static struct qb_attr_code code_pull_dqsource = QB_CODE(1, 0, 24); -+static struct qb_attr_code code_pull_rsp_lo = QB_CODE(2, 0, 32); -+ -+enum qb_pull_dt_e { -+ qb_pull_dt_channel, -+ qb_pull_dt_workqueue, -+ qb_pull_dt_framequeue -+}; -+ -+void qbman_pull_desc_clear(struct qbman_pull_desc *d) -+{ -+ memset(d, 0, sizeof(*d)); -+} -+ -+void qbman_pull_desc_set_storage(struct qbman_pull_desc *d, -+ struct dpaa2_dq *storage, -+ dma_addr_t storage_phys, -+ int stash) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ /* Squiggle the pointer 'storage' into the extra 2 words of the -+ * descriptor (which aren't copied to the hw command) */ -+ *(void **)&cl[4] = storage; -+ if (!storage) { -+ qb_attr_code_encode(&code_pull_rls, cl, 0); -+ return; -+ } -+ qb_attr_code_encode(&code_pull_rls, cl, 1); -+ qb_attr_code_encode(&code_pull_stash, cl, !!stash); -+ qb_attr_code_encode_64(&code_pull_rsp_lo, (uint64_t *)cl, storage_phys); -+} -+ -+void qbman_pull_desc_set_numframes(struct qbman_pull_desc *d, uint8_t numframes) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ BUG_ON(!numframes || (numframes > 16)); -+ qb_attr_code_encode(&code_pull_numframes, cl, -+ (uint32_t)(numframes - 1)); -+} -+ -+void qbman_pull_desc_set_token(struct qbman_pull_desc *d, uint8_t token) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_pull_token, cl, token); -+} -+ -+void qbman_pull_desc_set_fq(struct qbman_pull_desc *d, uint32_t fqid) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_pull_dct, cl, 1); -+ qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_framequeue); -+ qb_attr_code_encode(&code_pull_dqsource, cl, fqid); -+} -+ -+void qbman_pull_desc_set_wq(struct qbman_pull_desc *d, uint32_t wqid, -+ enum qbman_pull_type_e dct) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_pull_dct, cl, dct); -+ qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_workqueue); -+ qb_attr_code_encode(&code_pull_dqsource, cl, wqid); -+} -+ -+void qbman_pull_desc_set_channel(struct qbman_pull_desc *d, uint32_t chid, -+ enum qbman_pull_type_e dct) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_pull_dct, cl, dct); -+ qb_attr_code_encode(&code_pull_dt, cl, qb_pull_dt_channel); -+ qb_attr_code_encode(&code_pull_dqsource, cl, chid); -+} -+ -+int qbman_swp_pull(struct qbman_swp *s, struct qbman_pull_desc *d) -+{ -+ uint32_t *p; -+ uint32_t *cl = qb_cl(d); -+ -+ if (!atomic_dec_and_test(&s->vdq.busy)) { -+ atomic_inc(&s->vdq.busy); -+ return -EBUSY; -+ } -+ s->vdq.storage = *(void **)&cl[4]; -+ qb_attr_code_encode(&code_pull_token, cl, 1); -+ p = qbman_cena_write_start(&s->sys, QBMAN_CENA_SWP_VDQCR); -+ word_copy(&p[1], &cl[1], 3); -+ /* Set the verb byte, have to substitute in the valid-bit */ -+ p[0] = cl[0] | s->vdq.valid_bit; -+ s->vdq.valid_bit ^= QB_VALID_BIT; -+ qbman_cena_write_complete(&s->sys, QBMAN_CENA_SWP_VDQCR, p); -+ return 0; -+} -+ -+/****************/ -+/* Polling DQRR */ -+/****************/ -+ -+static struct qb_attr_code code_dqrr_verb = QB_CODE(0, 0, 8); -+static struct qb_attr_code code_dqrr_response = QB_CODE(0, 0, 7); -+static struct qb_attr_code code_dqrr_stat = QB_CODE(0, 8, 8); -+static struct qb_attr_code code_dqrr_seqnum = QB_CODE(0, 16, 14); -+static struct qb_attr_code code_dqrr_odpid = QB_CODE(1, 0, 16); -+/* static struct qb_attr_code code_dqrr_tok = QB_CODE(1, 24, 8); */ -+static struct qb_attr_code code_dqrr_fqid = QB_CODE(2, 0, 24); -+static struct qb_attr_code code_dqrr_byte_count = QB_CODE(4, 0, 32); -+static struct qb_attr_code code_dqrr_frame_count = QB_CODE(5, 0, 24); -+static struct qb_attr_code code_dqrr_ctx_lo = QB_CODE(6, 0, 32); -+ -+#define QBMAN_RESULT_DQ 0x60 -+#define QBMAN_RESULT_FQRN 0x21 -+#define QBMAN_RESULT_FQRNI 0x22 -+#define QBMAN_RESULT_FQPN 0x24 -+#define QBMAN_RESULT_FQDAN 0x25 -+#define QBMAN_RESULT_CDAN 0x26 -+#define QBMAN_RESULT_CSCN_MEM 0x27 -+#define QBMAN_RESULT_CGCU 0x28 -+#define QBMAN_RESULT_BPSCN 0x29 -+#define QBMAN_RESULT_CSCN_WQ 0x2a -+ -+static struct qb_attr_code code_dqpi_pi = QB_CODE(0, 0, 4); -+ -+/* NULL return if there are no unconsumed DQRR entries. Returns a DQRR entry -+ * only once, so repeated calls can return a sequence of DQRR entries, without -+ * requiring they be consumed immediately or in any particular order. */ -+const struct dpaa2_dq *qbman_swp_dqrr_next(struct qbman_swp *s) -+{ -+ uint32_t verb; -+ uint32_t response_verb; -+ uint32_t flags; -+ const struct dpaa2_dq *dq; -+ const uint32_t *p; -+ -+ /* Before using valid-bit to detect if something is there, we have to -+ * handle the case of the DQRR reset bug... */ -+#ifdef WORKAROUND_DQRR_RESET_BUG -+ if (unlikely(s->dqrr.reset_bug)) { -+ /* We pick up new entries by cache-inhibited producer index, -+ * which means that a non-coherent mapping would require us to -+ * invalidate and read *only* once that PI has indicated that -+ * there's an entry here. The first trip around the DQRR ring -+ * will be much less efficient than all subsequent trips around -+ * it... -+ */ -+ uint32_t dqpi = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_DQPI); -+ uint32_t pi = qb_attr_code_decode(&code_dqpi_pi, &dqpi); -+ /* there are new entries iff pi != next_idx */ -+ if (pi == s->dqrr.next_idx) -+ return NULL; -+ /* if next_idx is/was the last ring index, and 'pi' is -+ * different, we can disable the workaround as all the ring -+ * entries have now been DMA'd to so valid-bit checking is -+ * repaired. Note: this logic needs to be based on next_idx -+ * (which increments one at a time), rather than on pi (which -+ * can burst and wrap-around between our snapshots of it). -+ */ -+ if (s->dqrr.next_idx == (s->dqrr.dqrr_size - 1)) { -+ pr_debug("DEBUG: next_idx=%d, pi=%d, clear reset bug\n", -+ s->dqrr.next_idx, pi); -+ s->dqrr.reset_bug = 0; -+ } -+ qbman_cena_invalidate_prefetch(&s->sys, -+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)); -+ } -+#endif -+ -+ dq = qbman_cena_read(&s->sys, QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)); -+ p = qb_cl(dq); -+ verb = qb_attr_code_decode(&code_dqrr_verb, p); -+ -+ /* If the valid-bit isn't of the expected polarity, nothing there. Note, -+ * in the DQRR reset bug workaround, we shouldn't need to skip these -+ * check, because we've already determined that a new entry is available -+ * and we've invalidated the cacheline before reading it, so the -+ * valid-bit behaviour is repaired and should tell us what we already -+ * knew from reading PI. -+ */ -+ if ((verb & QB_VALID_BIT) != s->dqrr.valid_bit) { -+ qbman_cena_invalidate_prefetch(&s->sys, -+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)); -+ return NULL; -+ } -+ /* There's something there. Move "next_idx" attention to the next ring -+ * entry (and prefetch it) before returning what we found. */ -+ s->dqrr.next_idx++; -+ s->dqrr.next_idx &= s->dqrr.dqrr_size - 1; /* Wrap around */ -+ /* TODO: it's possible to do all this without conditionals, optimise it -+ * later. */ -+ if (!s->dqrr.next_idx) -+ s->dqrr.valid_bit ^= QB_VALID_BIT; -+ -+ /* If this is the final response to a volatile dequeue command -+ indicate that the vdq is no longer busy */ -+ flags = dpaa2_dq_flags(dq); -+ response_verb = qb_attr_code_decode(&code_dqrr_response, &verb); -+ if ((response_verb == QBMAN_RESULT_DQ) && -+ (flags & DPAA2_DQ_STAT_VOLATILE) && -+ (flags & DPAA2_DQ_STAT_EXPIRED)) -+ atomic_inc(&s->vdq.busy); -+ -+ qbman_cena_invalidate_prefetch(&s->sys, -+ QBMAN_CENA_SWP_DQRR(s->dqrr.next_idx)); -+ return dq; -+} -+ -+/* Consume DQRR entries previously returned from qbman_swp_dqrr_next(). */ -+void qbman_swp_dqrr_consume(struct qbman_swp *s, const struct dpaa2_dq *dq) -+{ -+ qbman_cinh_write(&s->sys, QBMAN_CINH_SWP_DCAP, QBMAN_IDX_FROM_DQRR(dq)); -+} -+ -+/*********************************/ -+/* Polling user-provided storage */ -+/*********************************/ -+ -+int qbman_result_has_new_result(struct qbman_swp *s, -+ const struct dpaa2_dq *dq) -+{ -+ /* To avoid converting the little-endian DQ entry to host-endian prior -+ * to us knowing whether there is a valid entry or not (and run the -+ * risk of corrupting the incoming hardware LE write), we detect in -+ * hardware endianness rather than host. This means we need a different -+ * "code" depending on whether we are BE or LE in software, which is -+ * where DQRR_TOK_OFFSET comes in... */ -+ static struct qb_attr_code code_dqrr_tok_detect = -+ QB_CODE(0, DQRR_TOK_OFFSET, 8); -+ /* The user trying to poll for a result treats "dq" as const. It is -+ * however the same address that was provided to us non-const in the -+ * first place, for directing hardware DMA to. So we can cast away the -+ * const because it is mutable from our perspective. */ -+ uint32_t *p = qb_cl((struct dpaa2_dq *)dq); -+ uint32_t token; -+ -+ token = qb_attr_code_decode(&code_dqrr_tok_detect, &p[1]); -+ if (token != 1) -+ return 0; -+ qb_attr_code_encode(&code_dqrr_tok_detect, &p[1], 0); -+ -+ /* Only now do we convert from hardware to host endianness. Also, as we -+ * are returning success, the user has promised not to call us again, so -+ * there's no risk of us converting the endianness twice... */ -+ make_le32_n(p, 16); -+ -+ /* VDQCR "no longer busy" hook - not quite the same as DQRR, because the -+ * fact "VDQCR" shows busy doesn't mean that the result we're looking at -+ * is from the same command. Eg. we may be looking at our 10th dequeue -+ * result from our first VDQCR command, yet the second dequeue command -+ * could have been kicked off already, after seeing the 1st result. Ie. -+ * the result we're looking at is not necessarily proof that we can -+ * reset "busy". We instead base the decision on whether the current -+ * result is sitting at the first 'storage' location of the busy -+ * command. */ -+ if (s->vdq.storage == dq) { -+ s->vdq.storage = NULL; -+ atomic_inc(&s->vdq.busy); -+ } -+ return 1; -+} -+ -+/********************************/ -+/* Categorising qbman_result */ -+/********************************/ -+ -+static struct qb_attr_code code_result_in_mem = -+ QB_CODE(0, QBMAN_RESULT_VERB_OFFSET_IN_MEM, 7); -+ -+static inline int __qbman_result_is_x(const struct dpaa2_dq *dq, uint32_t x) -+{ -+ const uint32_t *p = qb_cl(dq); -+ uint32_t response_verb = qb_attr_code_decode(&code_dqrr_response, p); -+ -+ return response_verb == x; -+} -+ -+static inline int __qbman_result_is_x_in_mem(const struct dpaa2_dq *dq, -+ uint32_t x) -+{ -+ const uint32_t *p = qb_cl(dq); -+ uint32_t response_verb = qb_attr_code_decode(&code_result_in_mem, p); -+ -+ return (response_verb == x); -+} -+ -+int qbman_result_is_DQ(const struct dpaa2_dq *dq) -+{ -+ return __qbman_result_is_x(dq, QBMAN_RESULT_DQ); -+} -+ -+int qbman_result_is_FQDAN(const struct dpaa2_dq *dq) -+{ -+ return __qbman_result_is_x(dq, QBMAN_RESULT_FQDAN); -+} -+ -+int qbman_result_is_CDAN(const struct dpaa2_dq *dq) -+{ -+ return __qbman_result_is_x(dq, QBMAN_RESULT_CDAN); -+} -+ -+int qbman_result_is_CSCN(const struct dpaa2_dq *dq) -+{ -+ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_CSCN_MEM) || -+ __qbman_result_is_x(dq, QBMAN_RESULT_CSCN_WQ); -+} -+ -+int qbman_result_is_BPSCN(const struct dpaa2_dq *dq) -+{ -+ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_BPSCN); -+} -+ -+int qbman_result_is_CGCU(const struct dpaa2_dq *dq) -+{ -+ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_CGCU); -+} -+ -+int qbman_result_is_FQRN(const struct dpaa2_dq *dq) -+{ -+ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_FQRN); -+} -+ -+int qbman_result_is_FQRNI(const struct dpaa2_dq *dq) -+{ -+ return __qbman_result_is_x_in_mem(dq, QBMAN_RESULT_FQRNI); -+} -+ -+int qbman_result_is_FQPN(const struct dpaa2_dq *dq) -+{ -+ return __qbman_result_is_x(dq, QBMAN_RESULT_FQPN); -+} -+ -+/*********************************/ -+/* Parsing frame dequeue results */ -+/*********************************/ -+ -+/* These APIs assume qbman_result_is_DQ() is TRUE */ -+ -+uint32_t dpaa2_dq_flags(const struct dpaa2_dq *dq) -+{ -+ const uint32_t *p = qb_cl(dq); -+ -+ return qb_attr_code_decode(&code_dqrr_stat, p); -+} -+ -+uint16_t dpaa2_dq_seqnum(const struct dpaa2_dq *dq) -+{ -+ const uint32_t *p = qb_cl(dq); -+ -+ return (uint16_t)qb_attr_code_decode(&code_dqrr_seqnum, p); -+} -+ -+uint16_t dpaa2_dq_odpid(const struct dpaa2_dq *dq) -+{ -+ const uint32_t *p = qb_cl(dq); -+ -+ return (uint16_t)qb_attr_code_decode(&code_dqrr_odpid, p); -+} -+ -+uint32_t dpaa2_dq_fqid(const struct dpaa2_dq *dq) -+{ -+ const uint32_t *p = qb_cl(dq); -+ -+ return qb_attr_code_decode(&code_dqrr_fqid, p); -+} -+ -+uint32_t dpaa2_dq_byte_count(const struct dpaa2_dq *dq) -+{ -+ const uint32_t *p = qb_cl(dq); -+ -+ return qb_attr_code_decode(&code_dqrr_byte_count, p); -+} -+ -+uint32_t dpaa2_dq_frame_count(const struct dpaa2_dq *dq) -+{ -+ const uint32_t *p = qb_cl(dq); -+ -+ return qb_attr_code_decode(&code_dqrr_frame_count, p); -+} -+ -+uint64_t dpaa2_dq_fqd_ctx(const struct dpaa2_dq *dq) -+{ -+ const uint64_t *p = (uint64_t *)qb_cl(dq); -+ -+ return qb_attr_code_decode_64(&code_dqrr_ctx_lo, p); -+} -+EXPORT_SYMBOL(dpaa2_dq_fqd_ctx); -+ -+const struct dpaa2_fd *dpaa2_dq_fd(const struct dpaa2_dq *dq) -+{ -+ const uint32_t *p = qb_cl(dq); -+ -+ return (const struct dpaa2_fd *)&p[8]; -+} -+EXPORT_SYMBOL(dpaa2_dq_fd); -+ -+/**************************************/ -+/* Parsing state-change notifications */ -+/**************************************/ -+ -+static struct qb_attr_code code_scn_state = QB_CODE(0, 16, 8); -+static struct qb_attr_code code_scn_rid = QB_CODE(1, 0, 24); -+static struct qb_attr_code code_scn_state_in_mem = -+ QB_CODE(0, SCN_STATE_OFFSET_IN_MEM, 8); -+static struct qb_attr_code code_scn_rid_in_mem = -+ QB_CODE(1, SCN_RID_OFFSET_IN_MEM, 24); -+static struct qb_attr_code code_scn_ctx_lo = QB_CODE(2, 0, 32); -+ -+uint8_t qbman_result_SCN_state(const struct dpaa2_dq *scn) -+{ -+ const uint32_t *p = qb_cl(scn); -+ -+ return (uint8_t)qb_attr_code_decode(&code_scn_state, p); -+} -+ -+uint32_t qbman_result_SCN_rid(const struct dpaa2_dq *scn) -+{ -+ const uint32_t *p = qb_cl(scn); -+ -+ return qb_attr_code_decode(&code_scn_rid, p); -+} -+ -+uint64_t qbman_result_SCN_ctx(const struct dpaa2_dq *scn) -+{ -+ const uint64_t *p = (uint64_t *)qb_cl(scn); -+ -+ return qb_attr_code_decode_64(&code_scn_ctx_lo, p); -+} -+ -+uint8_t qbman_result_SCN_state_in_mem(const struct dpaa2_dq *scn) -+{ -+ const uint32_t *p = qb_cl(scn); -+ -+ return (uint8_t)qb_attr_code_decode(&code_scn_state_in_mem, p); -+} -+ -+uint32_t qbman_result_SCN_rid_in_mem(const struct dpaa2_dq *scn) -+{ -+ const uint32_t *p = qb_cl(scn); -+ uint32_t result_rid; -+ -+ result_rid = qb_attr_code_decode(&code_scn_rid_in_mem, p); -+ return make_le24(result_rid); -+} -+ -+/*****************/ -+/* Parsing BPSCN */ -+/*****************/ -+uint16_t qbman_result_bpscn_bpid(const struct dpaa2_dq *scn) -+{ -+ return (uint16_t)qbman_result_SCN_rid_in_mem(scn) & 0x3FFF; -+} -+ -+int qbman_result_bpscn_has_free_bufs(const struct dpaa2_dq *scn) -+{ -+ return !(int)(qbman_result_SCN_state_in_mem(scn) & 0x1); -+} -+ -+int qbman_result_bpscn_is_depleted(const struct dpaa2_dq *scn) -+{ -+ return (int)(qbman_result_SCN_state_in_mem(scn) & 0x2); -+} -+ -+int qbman_result_bpscn_is_surplus(const struct dpaa2_dq *scn) -+{ -+ return (int)(qbman_result_SCN_state_in_mem(scn) & 0x4); -+} -+ -+uint64_t qbman_result_bpscn_ctx(const struct dpaa2_dq *scn) -+{ -+ return qbman_result_SCN_ctx(scn); -+} -+ -+/*****************/ -+/* Parsing CGCU */ -+/*****************/ -+uint16_t qbman_result_cgcu_cgid(const struct dpaa2_dq *scn) -+{ -+ return (uint16_t)qbman_result_SCN_rid_in_mem(scn) & 0xFFFF; -+} -+ -+uint64_t qbman_result_cgcu_icnt(const struct dpaa2_dq *scn) -+{ -+ return qbman_result_SCN_ctx(scn) & 0xFFFFFFFFFF; -+} -+ -+/******************/ -+/* Buffer release */ -+/******************/ -+ -+/* These should be const, eventually */ -+/* static struct qb_attr_code code_release_num = QB_CODE(0, 0, 3); */ -+static struct qb_attr_code code_release_set_me = QB_CODE(0, 5, 1); -+static struct qb_attr_code code_release_rcdi = QB_CODE(0, 6, 1); -+static struct qb_attr_code code_release_bpid = QB_CODE(0, 16, 16); -+ -+void qbman_release_desc_clear(struct qbman_release_desc *d) -+{ -+ uint32_t *cl; -+ -+ memset(d, 0, sizeof(*d)); -+ cl = qb_cl(d); -+ qb_attr_code_encode(&code_release_set_me, cl, 1); -+} -+ -+void qbman_release_desc_set_bpid(struct qbman_release_desc *d, uint32_t bpid) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_release_bpid, cl, bpid); -+} -+ -+void qbman_release_desc_set_rcdi(struct qbman_release_desc *d, int enable) -+{ -+ uint32_t *cl = qb_cl(d); -+ -+ qb_attr_code_encode(&code_release_rcdi, cl, !!enable); -+} -+ -+#define RAR_IDX(rar) ((rar) & 0x7) -+#define RAR_VB(rar) ((rar) & 0x80) -+#define RAR_SUCCESS(rar) ((rar) & 0x100) -+ -+int qbman_swp_release(struct qbman_swp *s, const struct qbman_release_desc *d, -+ const uint64_t *buffers, unsigned int num_buffers) -+{ -+ uint32_t *p; -+ const uint32_t *cl = qb_cl(d); -+ uint32_t rar = qbman_cinh_read(&s->sys, QBMAN_CINH_SWP_RAR); -+ -+ pr_debug("RAR=%08x\n", rar); -+ if (!RAR_SUCCESS(rar)) -+ return -EBUSY; -+ BUG_ON(!num_buffers || (num_buffers > 7)); -+ /* Start the release command */ -+ p = qbman_cena_write_start(&s->sys, -+ QBMAN_CENA_SWP_RCR(RAR_IDX(rar))); -+ /* Copy the caller's buffer pointers to the command */ -+ u64_to_le32_copy(&p[2], buffers, num_buffers); -+ /* Set the verb byte, have to substitute in the valid-bit and the number -+ * of buffers. */ -+ p[0] = cl[0] | RAR_VB(rar) | num_buffers; -+ qbman_cena_write_complete(&s->sys, -+ QBMAN_CENA_SWP_RCR(RAR_IDX(rar)), -+ p); -+ return 0; -+} -+ -+/*******************/ -+/* Buffer acquires */ -+/*******************/ -+ -+/* These should be const, eventually */ -+static struct qb_attr_code code_acquire_bpid = QB_CODE(0, 16, 16); -+static struct qb_attr_code code_acquire_num = QB_CODE(1, 0, 3); -+static struct qb_attr_code code_acquire_r_num = QB_CODE(1, 0, 3); -+ -+int qbman_swp_acquire(struct qbman_swp *s, uint32_t bpid, uint64_t *buffers, -+ unsigned int num_buffers) -+{ -+ uint32_t *p; -+ uint32_t verb, rslt, num; -+ -+ BUG_ON(!num_buffers || (num_buffers > 7)); -+ -+ /* Start the management command */ -+ p = qbman_swp_mc_start(s); -+ -+ if (!p) -+ return -EBUSY; -+ -+ /* Encode the caller-provided attributes */ -+ qb_attr_code_encode(&code_acquire_bpid, p, bpid); -+ qb_attr_code_encode(&code_acquire_num, p, num_buffers); -+ -+ /* Complete the management command */ -+ p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_MC_ACQUIRE); -+ -+ /* Decode the outcome */ -+ verb = qb_attr_code_decode(&code_generic_verb, p); -+ rslt = qb_attr_code_decode(&code_generic_rslt, p); -+ num = qb_attr_code_decode(&code_acquire_r_num, p); -+ BUG_ON(verb != QBMAN_MC_ACQUIRE); -+ -+ /* Determine success or failure */ -+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { -+ pr_err("Acquire buffers from BPID 0x%x failed, code=0x%02x\n", -+ bpid, rslt); -+ return -EIO; -+ } -+ BUG_ON(num > num_buffers); -+ /* Copy the acquired buffers to the caller's array */ -+ u64_from_le32_copy(buffers, &p[2], num); -+ return (int)num; -+} -+ -+/*****************/ -+/* FQ management */ -+/*****************/ -+ -+static struct qb_attr_code code_fqalt_fqid = QB_CODE(1, 0, 32); -+ -+static int qbman_swp_alt_fq_state(struct qbman_swp *s, uint32_t fqid, -+ uint8_t alt_fq_verb) -+{ -+ uint32_t *p; -+ uint32_t verb, rslt; -+ -+ /* Start the management command */ -+ p = qbman_swp_mc_start(s); -+ if (!p) -+ return -EBUSY; -+ -+ qb_attr_code_encode(&code_fqalt_fqid, p, fqid); -+ /* Complete the management command */ -+ p = qbman_swp_mc_complete(s, p, p[0] | alt_fq_verb); -+ -+ /* Decode the outcome */ -+ verb = qb_attr_code_decode(&code_generic_verb, p); -+ rslt = qb_attr_code_decode(&code_generic_rslt, p); -+ BUG_ON(verb != alt_fq_verb); -+ -+ /* Determine success or failure */ -+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { -+ pr_err("ALT FQID %d failed: verb = 0x%08x, code = 0x%02x\n", -+ fqid, alt_fq_verb, rslt); -+ return -EIO; -+ } -+ -+ return 0; -+} -+ -+int qbman_swp_fq_schedule(struct qbman_swp *s, uint32_t fqid) -+{ -+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_SCHEDULE); -+} -+ -+int qbman_swp_fq_force(struct qbman_swp *s, uint32_t fqid) -+{ -+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_FORCE); -+} -+ -+int qbman_swp_fq_xon(struct qbman_swp *s, uint32_t fqid) -+{ -+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XON); -+} -+ -+int qbman_swp_fq_xoff(struct qbman_swp *s, uint32_t fqid) -+{ -+ return qbman_swp_alt_fq_state(s, fqid, QBMAN_FQ_XOFF); -+} -+ -+/**********************/ -+/* Channel management */ -+/**********************/ -+ -+static struct qb_attr_code code_cdan_cid = QB_CODE(0, 16, 12); -+static struct qb_attr_code code_cdan_we = QB_CODE(1, 0, 8); -+static struct qb_attr_code code_cdan_en = QB_CODE(1, 8, 1); -+static struct qb_attr_code code_cdan_ctx_lo = QB_CODE(2, 0, 32); -+ -+/* Hide "ICD" for now as we don't use it, don't set it, and don't test it, so it -+ * would be irresponsible to expose it. */ -+#define CODE_CDAN_WE_EN 0x1 -+#define CODE_CDAN_WE_CTX 0x4 -+ -+static int qbman_swp_CDAN_set(struct qbman_swp *s, uint16_t channelid, -+ uint8_t we_mask, uint8_t cdan_en, -+ uint64_t ctx) -+{ -+ uint32_t *p; -+ uint32_t verb, rslt; -+ -+ /* Start the management command */ -+ p = qbman_swp_mc_start(s); -+ if (!p) -+ return -EBUSY; -+ -+ /* Encode the caller-provided attributes */ -+ qb_attr_code_encode(&code_cdan_cid, p, channelid); -+ qb_attr_code_encode(&code_cdan_we, p, we_mask); -+ qb_attr_code_encode(&code_cdan_en, p, cdan_en); -+ qb_attr_code_encode_64(&code_cdan_ctx_lo, (uint64_t *)p, ctx); -+ /* Complete the management command */ -+ p = qbman_swp_mc_complete(s, p, p[0] | QBMAN_WQCHAN_CONFIGURE); -+ -+ /* Decode the outcome */ -+ verb = qb_attr_code_decode(&code_generic_verb, p); -+ rslt = qb_attr_code_decode(&code_generic_rslt, p); -+ BUG_ON(verb != QBMAN_WQCHAN_CONFIGURE); -+ -+ /* Determine success or failure */ -+ if (unlikely(rslt != QBMAN_MC_RSLT_OK)) { -+ pr_err("CDAN cQID %d failed: code = 0x%02x\n", -+ channelid, rslt); -+ return -EIO; -+ } -+ -+ return 0; -+} -+ -+int qbman_swp_CDAN_set_context(struct qbman_swp *s, uint16_t channelid, -+ uint64_t ctx) -+{ -+ return qbman_swp_CDAN_set(s, channelid, -+ CODE_CDAN_WE_CTX, -+ 0, ctx); -+} -+ -+int qbman_swp_CDAN_enable(struct qbman_swp *s, uint16_t channelid) -+{ -+ return qbman_swp_CDAN_set(s, channelid, -+ CODE_CDAN_WE_EN, -+ 1, 0); -+} -+int qbman_swp_CDAN_disable(struct qbman_swp *s, uint16_t channelid) -+{ -+ return qbman_swp_CDAN_set(s, channelid, -+ CODE_CDAN_WE_EN, -+ 0, 0); -+} -+int qbman_swp_CDAN_set_context_enable(struct qbman_swp *s, uint16_t channelid, -+ uint64_t ctx) -+{ -+ return qbman_swp_CDAN_set(s, channelid, -+ CODE_CDAN_WE_EN | CODE_CDAN_WE_CTX, -+ 1, ctx); -+} -diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman_portal.h b/drivers/staging/fsl-mc/bus/dpio/qbman_portal.h -new file mode 100644 -index 0000000..65ebf3f ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/qbman_portal.h -@@ -0,0 +1,261 @@ -+/* Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include "qbman_private.h" -+#include "fsl_qbman_portal.h" -+#include "../../include/fsl_dpaa2_fd.h" -+ -+/* All QBMan command and result structures use this "valid bit" encoding */ -+#define QB_VALID_BIT ((uint32_t)0x80) -+ -+/* Management command result codes */ -+#define QBMAN_MC_RSLT_OK 0xf0 -+ -+/* TBD: as of QBMan 4.1, DQRR will be 8 rather than 4! */ -+#define QBMAN_DQRR_SIZE 4 -+ -+/* DQRR valid-bit reset bug. See qbman_portal.c::qbman_swp_init(). */ -+#define WORKAROUND_DQRR_RESET_BUG -+ -+/* --------------------- */ -+/* portal data structure */ -+/* --------------------- */ -+ -+struct qbman_swp { -+ const struct qbman_swp_desc *desc; -+ /* The qbman_sys (ie. arch/OS-specific) support code can put anything it -+ * needs in here. */ -+ struct qbman_swp_sys sys; -+ /* Management commands */ -+ struct { -+#ifdef QBMAN_CHECKING -+ enum swp_mc_check { -+ swp_mc_can_start, /* call __qbman_swp_mc_start() */ -+ swp_mc_can_submit, /* call __qbman_swp_mc_submit() */ -+ swp_mc_can_poll, /* call __qbman_swp_mc_result() */ -+ } check; -+#endif -+ uint32_t valid_bit; /* 0x00 or 0x80 */ -+ } mc; -+ /* Push dequeues */ -+ uint32_t sdq; -+ /* Volatile dequeues */ -+ struct { -+ /* VDQCR supports a "1 deep pipeline", meaning that if you know -+ * the last-submitted command is already executing in the -+ * hardware (as evidenced by at least 1 valid dequeue result), -+ * you can write another dequeue command to the register, the -+ * hardware will start executing it as soon as the -+ * already-executing command terminates. (This minimises latency -+ * and stalls.) With that in mind, this "busy" variable refers -+ * to whether or not a command can be submitted, not whether or -+ * not a previously-submitted command is still executing. In -+ * other words, once proof is seen that the previously-submitted -+ * command is executing, "vdq" is no longer "busy". -+ */ -+ atomic_t busy; -+ uint32_t valid_bit; /* 0x00 or 0x80 */ -+ /* We need to determine when vdq is no longer busy. This depends -+ * on whether the "busy" (last-submitted) dequeue command is -+ * targeting DQRR or main-memory, and detected is based on the -+ * presence of the dequeue command's "token" showing up in -+ * dequeue entries in DQRR or main-memory (respectively). */ -+ struct dpaa2_dq *storage; /* NULL if DQRR */ -+ } vdq; -+ /* DQRR */ -+ struct { -+ uint32_t next_idx; -+ uint32_t valid_bit; -+ uint8_t dqrr_size; -+#ifdef WORKAROUND_DQRR_RESET_BUG -+ int reset_bug; -+#endif -+ } dqrr; -+}; -+ -+/* -------------------------- */ -+/* portal management commands */ -+/* -------------------------- */ -+ -+/* Different management commands all use this common base layer of code to issue -+ * commands and poll for results. The first function returns a pointer to where -+ * the caller should fill in their MC command (though they should ignore the -+ * verb byte), the second function commits merges in the caller-supplied command -+ * verb (which should not include the valid-bit) and submits the command to -+ * hardware, and the third function checks for a completed response (returns -+ * non-NULL if only if the response is complete). */ -+void *qbman_swp_mc_start(struct qbman_swp *p); -+void qbman_swp_mc_submit(struct qbman_swp *p, void *cmd, uint32_t cmd_verb); -+void *qbman_swp_mc_result(struct qbman_swp *p); -+ -+/* Wraps up submit + poll-for-result */ -+static inline void *qbman_swp_mc_complete(struct qbman_swp *swp, void *cmd, -+ uint32_t cmd_verb) -+{ -+ int loopvar; -+ -+ qbman_swp_mc_submit(swp, cmd, cmd_verb); -+ DBG_POLL_START(loopvar); -+ do { -+ DBG_POLL_CHECK(loopvar); -+ cmd = qbman_swp_mc_result(swp); -+ } while (!cmd); -+ return cmd; -+} -+ -+/* ------------ */ -+/* qb_attr_code */ -+/* ------------ */ -+ -+/* This struct locates a sub-field within a QBMan portal (CENA) cacheline which -+ * is either serving as a configuration command or a query result. The -+ * representation is inherently little-endian, as the indexing of the words is -+ * itself little-endian in nature and layerscape is little endian for anything -+ * that crosses a word boundary too (64-bit fields are the obvious examples). -+ */ -+struct qb_attr_code { -+ unsigned int word; /* which uint32_t[] array member encodes the field */ -+ unsigned int lsoffset; /* encoding offset from ls-bit */ -+ unsigned int width; /* encoding width. (bool must be 1.) */ -+}; -+ -+/* Some pre-defined codes */ -+extern struct qb_attr_code code_generic_verb; -+extern struct qb_attr_code code_generic_rslt; -+ -+/* Macros to define codes */ -+#define QB_CODE(a, b, c) { a, b, c} -+#define QB_CODE_NULL \ -+ QB_CODE((unsigned int)-1, (unsigned int)-1, (unsigned int)-1) -+ -+/* Rotate a code "ms", meaning that it moves from less-significant bytes to -+ * more-significant, from less-significant words to more-significant, etc. The -+ * "ls" version does the inverse, from more-significant towards -+ * less-significant. -+ */ -+static inline void qb_attr_code_rotate_ms(struct qb_attr_code *code, -+ unsigned int bits) -+{ -+ code->lsoffset += bits; -+ while (code->lsoffset > 31) { -+ code->word++; -+ code->lsoffset -= 32; -+ } -+} -+static inline void qb_attr_code_rotate_ls(struct qb_attr_code *code, -+ unsigned int bits) -+{ -+ /* Don't be fooled, this trick should work because the types are -+ * unsigned. So the case that interests the while loop (the rotate has -+ * gone too far and the word count needs to compensate for it), is -+ * manifested when lsoffset is negative. But that equates to a really -+ * large unsigned value, starting with lots of "F"s. As such, we can -+ * continue adding 32 back to it until it wraps back round above zero, -+ * to a value of 31 or less... -+ */ -+ code->lsoffset -= bits; -+ while (code->lsoffset > 31) { -+ code->word--; -+ code->lsoffset += 32; -+ } -+} -+/* Implement a loop of code rotations until 'expr' evaluates to FALSE (0). */ -+#define qb_attr_code_for_ms(code, bits, expr) \ -+ for (; expr; qb_attr_code_rotate_ms(code, bits)) -+#define qb_attr_code_for_ls(code, bits, expr) \ -+ for (; expr; qb_attr_code_rotate_ls(code, bits)) -+ -+/* decode a field from a cacheline */ -+static inline uint32_t qb_attr_code_decode(const struct qb_attr_code *code, -+ const uint32_t *cacheline) -+{ -+ return d32_uint32_t(code->lsoffset, code->width, cacheline[code->word]); -+} -+static inline uint64_t qb_attr_code_decode_64(const struct qb_attr_code *code, -+ const uint64_t *cacheline) -+{ -+ uint64_t res; -+ u64_from_le32_copy(&res, &cacheline[code->word/2], 1); -+ return res; -+} -+ -+/* encode a field to a cacheline */ -+static inline void qb_attr_code_encode(const struct qb_attr_code *code, -+ uint32_t *cacheline, uint32_t val) -+{ -+ cacheline[code->word] = -+ r32_uint32_t(code->lsoffset, code->width, cacheline[code->word]) -+ | e32_uint32_t(code->lsoffset, code->width, val); -+} -+static inline void qb_attr_code_encode_64(const struct qb_attr_code *code, -+ uint64_t *cacheline, uint64_t val) -+{ -+ u64_to_le32_copy(&cacheline[code->word/2], &val, 1); -+} -+ -+/* Small-width signed values (two's-complement) will decode into medium-width -+ * positives. (Eg. for an 8-bit signed field, which stores values from -128 to -+ * +127, a setting of -7 would appear to decode to the 32-bit unsigned value -+ * 249. Likewise -120 would decode as 136.) This function allows the caller to -+ * "re-sign" such fields to 32-bit signed. (Eg. -7, which was 249 with an 8-bit -+ * encoding, will become 0xfffffff9 if you cast the return value to uint32_t). -+ */ -+static inline int32_t qb_attr_code_makesigned(const struct qb_attr_code *code, -+ uint32_t val) -+{ -+ BUG_ON(val >= (1 << code->width)); -+ /* If the high bit was set, it was encoding a negative */ -+ if (val >= (1 << (code->width - 1))) -+ return (int32_t)0 - (int32_t)(((uint32_t)1 << code->width) - -+ val); -+ /* Otherwise, it was encoding a positive */ -+ return (int32_t)val; -+} -+ -+/* ---------------------- */ -+/* Descriptors/cachelines */ -+/* ---------------------- */ -+ -+/* To avoid needless dynamic allocation, the driver API often gives the caller -+ * a "descriptor" type that the caller can instantiate however they like. -+ * Ultimately though, it is just a cacheline of binary storage (or something -+ * smaller when it is known that the descriptor doesn't need all 64 bytes) for -+ * holding pre-formatted pieces of hardware commands. The performance-critical -+ * code can then copy these descriptors directly into hardware command -+ * registers more efficiently than trying to construct/format commands -+ * on-the-fly. The API user sees the descriptor as an array of 32-bit words in -+ * order for the compiler to know its size, but the internal details are not -+ * exposed. The following macro is used within the driver for converting *any* -+ * descriptor pointer to a usable array pointer. The use of a macro (instead of -+ * an inline) is necessary to work with different descriptor types and to work -+ * correctly with const and non-const inputs (and similarly-qualified outputs). -+ */ -+#define qb_cl(d) (&(d)->dont_manipulate_directly[0]) -diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman_private.h b/drivers/staging/fsl-mc/bus/dpio/qbman_private.h -new file mode 100644 -index 0000000..e376b80 ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/qbman_private.h -@@ -0,0 +1,173 @@ -+/* Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+*/ -+ -+/* Perform extra checking */ -+#define QBMAN_CHECKING -+ -+/* To maximise the amount of logic that is common between the Linux driver and -+ * other targets (such as the embedded MC firmware), we pivot here between the -+ * inclusion of two platform-specific headers. -+ * -+ * The first, qbman_sys_decl.h, includes any and all required system headers as -+ * well as providing any definitions for the purposes of compatibility. The -+ * second, qbman_sys.h, is where platform-specific routines go. -+ * -+ * The point of the split is that the platform-independent code (including this -+ * header) may depend on platform-specific declarations, yet other -+ * platform-specific routines may depend on platform-independent definitions. -+ */ -+ -+#include "qbman_sys_decl.h" -+ -+#define QMAN_REV_4000 0x04000000 -+#define QMAN_REV_4100 0x04010000 -+#define QMAN_REV_4101 0x04010001 -+ -+/* When things go wrong, it is a convenient trick to insert a few FOO() -+ * statements in the code to trace progress. TODO: remove this once we are -+ * hacking the code less actively. -+ */ -+#define FOO() fsl_os_print("FOO: %s:%d\n", __FILE__, __LINE__) -+ -+/* Any time there is a register interface which we poll on, this provides a -+ * "break after x iterations" scheme for it. It's handy for debugging, eg. -+ * where you don't want millions of lines of log output from a polling loop -+ * that won't, because such things tend to drown out the earlier log output -+ * that might explain what caused the problem. (NB: put ";" after each macro!) -+ * TODO: we should probably remove this once we're done sanitising the -+ * simulator... -+ */ -+#define DBG_POLL_START(loopvar) (loopvar = 10) -+#define DBG_POLL_CHECK(loopvar) \ -+ do {if (!(loopvar--)) BUG_ON(1); } while (0) -+ -+/* For CCSR or portal-CINH registers that contain fields at arbitrary offsets -+ * and widths, these macro-generated encode/decode/isolate/remove inlines can -+ * be used. -+ * -+ * Eg. to "d"ecode a 14-bit field out of a register (into a "uint16_t" type), -+ * where the field is located 3 bits "up" from the least-significant bit of the -+ * register (ie. the field location within the 32-bit register corresponds to a -+ * mask of 0x0001fff8), you would do; -+ * uint16_t field = d32_uint16_t(3, 14, reg_value); -+ * -+ * Or to "e"ncode a 1-bit boolean value (input type is "int", zero is FALSE, -+ * non-zero is TRUE, so must convert all non-zero inputs to 1, hence the "!!" -+ * operator) into a register at bit location 0x00080000 (19 bits "in" from the -+ * LS bit), do; -+ * reg_value |= e32_int(19, 1, !!field); -+ * -+ * If you wish to read-modify-write a register, such that you leave the 14-bit -+ * field as-is but have all other fields set to zero, then "i"solate the 14-bit -+ * value using; -+ * reg_value = i32_uint16_t(3, 14, reg_value); -+ * -+ * Alternatively, you could "r"emove the 1-bit boolean field (setting it to -+ * zero) but leaving all other fields as-is; -+ * reg_val = r32_int(19, 1, reg_value); -+ * -+ */ -+#define MAKE_MASK32(width) (width == 32 ? 0xffffffff : \ -+ (uint32_t)((1 << width) - 1)) -+#define DECLARE_CODEC32(t) \ -+static inline uint32_t e32_##t(uint32_t lsoffset, uint32_t width, t val) \ -+{ \ -+ BUG_ON(width > (sizeof(t) * 8)); \ -+ return ((uint32_t)val & MAKE_MASK32(width)) << lsoffset; \ -+} \ -+static inline t d32_##t(uint32_t lsoffset, uint32_t width, uint32_t val) \ -+{ \ -+ BUG_ON(width > (sizeof(t) * 8)); \ -+ return (t)((val >> lsoffset) & MAKE_MASK32(width)); \ -+} \ -+static inline uint32_t i32_##t(uint32_t lsoffset, uint32_t width, \ -+ uint32_t val) \ -+{ \ -+ BUG_ON(width > (sizeof(t) * 8)); \ -+ return e32_##t(lsoffset, width, d32_##t(lsoffset, width, val)); \ -+} \ -+static inline uint32_t r32_##t(uint32_t lsoffset, uint32_t width, \ -+ uint32_t val) \ -+{ \ -+ BUG_ON(width > (sizeof(t) * 8)); \ -+ return ~(MAKE_MASK32(width) << lsoffset) & val; \ -+} -+DECLARE_CODEC32(uint32_t) -+DECLARE_CODEC32(uint16_t) -+DECLARE_CODEC32(uint8_t) -+DECLARE_CODEC32(int) -+ -+ /*********************/ -+ /* Debugging assists */ -+ /*********************/ -+ -+static inline void __hexdump(unsigned long start, unsigned long end, -+ unsigned long p, size_t sz, const unsigned char *c) -+{ -+ while (start < end) { -+ unsigned int pos = 0; -+ char buf[64]; -+ int nl = 0; -+ -+ pos += sprintf(buf + pos, "%08lx: ", start); -+ do { -+ if ((start < p) || (start >= (p + sz))) -+ pos += sprintf(buf + pos, ".."); -+ else -+ pos += sprintf(buf + pos, "%02x", *(c++)); -+ if (!(++start & 15)) { -+ buf[pos++] = '\n'; -+ nl = 1; -+ } else { -+ nl = 0; -+ if (!(start & 1)) -+ buf[pos++] = ' '; -+ if (!(start & 3)) -+ buf[pos++] = ' '; -+ } -+ } while (start & 15); -+ if (!nl) -+ buf[pos++] = '\n'; -+ buf[pos] = '\0'; -+ pr_info("%s", buf); -+ } -+} -+static inline void hexdump(const void *ptr, size_t sz) -+{ -+ unsigned long p = (unsigned long)ptr; -+ unsigned long start = p & ~(unsigned long)15; -+ unsigned long end = (p + sz + 15) & ~(unsigned long)15; -+ const unsigned char *c = ptr; -+ -+ __hexdump(start, end, p, sz, c); -+} -+ -+#include "qbman_sys.h" -diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman_sys.h b/drivers/staging/fsl-mc/bus/dpio/qbman_sys.h -new file mode 100644 -index 0000000..4849212 ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/qbman_sys.h -@@ -0,0 +1,307 @@ -+/* Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+/* qbman_sys_decl.h and qbman_sys.h are the two platform-specific files in the -+ * driver. They are only included via qbman_private.h, which is itself a -+ * platform-independent file and is included by all the other driver source. -+ * -+ * qbman_sys_decl.h is included prior to all other declarations and logic, and -+ * it exists to provide compatibility with any linux interfaces our -+ * single-source driver code is dependent on (eg. kmalloc). Ie. this file -+ * provides linux compatibility. -+ * -+ * This qbman_sys.h header, on the other hand, is included *after* any common -+ * and platform-neutral declarations and logic in qbman_private.h, and exists to -+ * implement any platform-specific logic of the qbman driver itself. Ie. it is -+ * *not* to provide linux compatibility. -+ */ -+ -+/* Trace the 3 different classes of read/write access to QBMan. #undef as -+ * required. */ -+#undef QBMAN_CCSR_TRACE -+#undef QBMAN_CINH_TRACE -+#undef QBMAN_CENA_TRACE -+ -+static inline void word_copy(void *d, const void *s, unsigned int cnt) -+{ -+ uint32_t *dd = d; -+ const uint32_t *ss = s; -+ -+ while (cnt--) -+ *(dd++) = *(ss++); -+} -+ -+/* Currently, the CENA support code expects each 32-bit word to be written in -+ * host order, and these are converted to hardware (little-endian) order on -+ * command submission. However, 64-bit quantities are must be written (and read) -+ * as two 32-bit words with the least-significant word first, irrespective of -+ * host endianness. */ -+static inline void u64_to_le32_copy(void *d, const uint64_t *s, -+ unsigned int cnt) -+{ -+ uint32_t *dd = d; -+ const uint32_t *ss = (const uint32_t *)s; -+ -+ while (cnt--) { -+ /* TBD: the toolchain was choking on the use of 64-bit types up -+ * until recently so this works entirely with 32-bit variables. -+ * When 64-bit types become usable again, investigate better -+ * ways of doing this. */ -+#if defined(__BIG_ENDIAN) -+ *(dd++) = ss[1]; -+ *(dd++) = ss[0]; -+ ss += 2; -+#else -+ *(dd++) = *(ss++); -+ *(dd++) = *(ss++); -+#endif -+ } -+} -+static inline void u64_from_le32_copy(uint64_t *d, const void *s, -+ unsigned int cnt) -+{ -+ const uint32_t *ss = s; -+ uint32_t *dd = (uint32_t *)d; -+ -+ while (cnt--) { -+#if defined(__BIG_ENDIAN) -+ dd[1] = *(ss++); -+ dd[0] = *(ss++); -+ dd += 2; -+#else -+ *(dd++) = *(ss++); -+ *(dd++) = *(ss++); -+#endif -+ } -+} -+ -+/* Convert a host-native 32bit value into little endian */ -+#if defined(__BIG_ENDIAN) -+static inline uint32_t make_le32(uint32_t val) -+{ -+ return ((val & 0xff) << 24) | ((val & 0xff00) << 8) | -+ ((val & 0xff0000) >> 8) | ((val & 0xff000000) >> 24); -+} -+static inline uint32_t make_le24(uint32_t val) -+{ -+ return (((val & 0xff) << 16) | (val & 0xff00) | -+ ((val & 0xff0000) >> 16)); -+} -+#else -+#define make_le32(val) (val) -+#define make_le24(val) (val) -+#endif -+static inline void make_le32_n(uint32_t *val, unsigned int num) -+{ -+ while (num--) { -+ *val = make_le32(*val); -+ val++; -+ } -+} -+ -+ /******************/ -+ /* Portal access */ -+ /******************/ -+struct qbman_swp_sys { -+ /* On GPP, the sys support for qbman_swp is here. The CENA region isi -+ * not an mmap() of the real portal registers, but an allocated -+ * place-holder, because the actual writes/reads to/from the portal are -+ * marshalled from these allocated areas using QBMan's "MC access -+ * registers". CINH accesses are atomic so there's no need for a -+ * place-holder. */ -+ void *cena; -+ void __iomem *addr_cena; -+ void __iomem *addr_cinh; -+}; -+ -+/* P_OFFSET is (ACCESS_CMD,0,12) - offset within the portal -+ * C is (ACCESS_CMD,12,1) - is inhibited? (0==CENA, 1==CINH) -+ * SWP_IDX is (ACCESS_CMD,16,10) - Software portal index -+ * P is (ACCESS_CMD,28,1) - (0==special portal, 1==any portal) -+ * T is (ACCESS_CMD,29,1) - Command type (0==READ, 1==WRITE) -+ * E is (ACCESS_CMD,31,1) - Command execute (1 to issue, poll for 0==complete) -+ */ -+ -+static inline void qbman_cinh_write(struct qbman_swp_sys *s, uint32_t offset, -+ uint32_t val) -+{ -+ -+ writel_relaxed(val, s->addr_cinh + offset); -+#ifdef QBMAN_CINH_TRACE -+ pr_info("qbman_cinh_write(%p:0x%03x) 0x%08x\n", -+ s->addr_cinh, offset, val); -+#endif -+} -+ -+static inline uint32_t qbman_cinh_read(struct qbman_swp_sys *s, uint32_t offset) -+{ -+ uint32_t reg = readl_relaxed(s->addr_cinh + offset); -+ -+#ifdef QBMAN_CINH_TRACE -+ pr_info("qbman_cinh_read(%p:0x%03x) 0x%08x\n", -+ s->addr_cinh, offset, reg); -+#endif -+ return reg; -+} -+ -+static inline void *qbman_cena_write_start(struct qbman_swp_sys *s, -+ uint32_t offset) -+{ -+ void *shadow = s->cena + offset; -+ -+#ifdef QBMAN_CENA_TRACE -+ pr_info("qbman_cena_write_start(%p:0x%03x) %p\n", -+ s->addr_cena, offset, shadow); -+#endif -+ BUG_ON(offset & 63); -+ dcbz(shadow); -+ return shadow; -+} -+ -+static inline void qbman_cena_write_complete(struct qbman_swp_sys *s, -+ uint32_t offset, void *cmd) -+{ -+ const uint32_t *shadow = cmd; -+ int loop; -+ -+#ifdef QBMAN_CENA_TRACE -+ pr_info("qbman_cena_write_complete(%p:0x%03x) %p\n", -+ s->addr_cena, offset, shadow); -+ hexdump(cmd, 64); -+#endif -+ for (loop = 15; loop >= 1; loop--) -+ writel_relaxed(shadow[loop], s->addr_cena + -+ offset + loop * 4); -+ lwsync(); -+ writel_relaxed(shadow[0], s->addr_cena + offset); -+ dcbf(s->addr_cena + offset); -+} -+ -+static inline void *qbman_cena_read(struct qbman_swp_sys *s, uint32_t offset) -+{ -+ uint32_t *shadow = s->cena + offset; -+ unsigned int loop; -+ -+#ifdef QBMAN_CENA_TRACE -+ pr_info("qbman_cena_read(%p:0x%03x) %p\n", -+ s->addr_cena, offset, shadow); -+#endif -+ -+ for (loop = 0; loop < 16; loop++) -+ shadow[loop] = readl_relaxed(s->addr_cena + offset -+ + loop * 4); -+#ifdef QBMAN_CENA_TRACE -+ hexdump(shadow, 64); -+#endif -+ return shadow; -+} -+ -+static inline void qbman_cena_invalidate_prefetch(struct qbman_swp_sys *s, -+ uint32_t offset) -+{ -+ dcivac(s->addr_cena + offset); -+ prefetch_for_load(s->addr_cena + offset); -+} -+ -+ /******************/ -+ /* Portal support */ -+ /******************/ -+ -+/* The SWP_CFG portal register is special, in that it is used by the -+ * platform-specific code rather than the platform-independent code in -+ * qbman_portal.c. So use of it is declared locally here. */ -+#define QBMAN_CINH_SWP_CFG 0xd00 -+ -+/* For MC portal use, we always configure with -+ * DQRR_MF is (SWP_CFG,20,3) - DQRR max fill (<- 0x4) -+ * EST is (SWP_CFG,16,3) - EQCR_CI stashing threshold (<- 0x0) -+ * RPM is (SWP_CFG,12,2) - RCR production notification mode (<- 0x3) -+ * DCM is (SWP_CFG,10,2) - DQRR consumption notification mode (<- 0x2) -+ * EPM is (SWP_CFG,8,2) - EQCR production notification mode (<- 0x3) -+ * SD is (SWP_CFG,5,1) - memory stashing drop enable (<- FALSE) -+ * SP is (SWP_CFG,4,1) - memory stashing priority (<- TRUE) -+ * SE is (SWP_CFG,3,1) - memory stashing enable (<- 0x0) -+ * DP is (SWP_CFG,2,1) - dequeue stashing priority (<- TRUE) -+ * DE is (SWP_CFG,1,1) - dequeue stashing enable (<- 0x0) -+ * EP is (SWP_CFG,0,1) - EQCR_CI stashing priority (<- FALSE) -+ */ -+static inline uint32_t qbman_set_swp_cfg(uint8_t max_fill, uint8_t wn, -+ uint8_t est, uint8_t rpm, uint8_t dcm, -+ uint8_t epm, int sd, int sp, int se, -+ int dp, int de, int ep) -+{ -+ uint32_t reg; -+ -+ reg = e32_uint8_t(20, (uint32_t)(3 + (max_fill >> 3)), max_fill) | -+ e32_uint8_t(16, 3, est) | e32_uint8_t(12, 2, rpm) | -+ e32_uint8_t(10, 2, dcm) | e32_uint8_t(8, 2, epm) | -+ e32_int(5, 1, sd) | e32_int(4, 1, sp) | e32_int(3, 1, se) | -+ e32_int(2, 1, dp) | e32_int(1, 1, de) | e32_int(0, 1, ep) | -+ e32_uint8_t(14, 1, wn); -+ return reg; -+} -+ -+static inline int qbman_swp_sys_init(struct qbman_swp_sys *s, -+ const struct qbman_swp_desc *d, -+ uint8_t dqrr_size) -+{ -+ uint32_t reg; -+ -+ s->addr_cena = d->cena_bar; -+ s->addr_cinh = d->cinh_bar; -+ s->cena = (void *)get_zeroed_page(GFP_KERNEL); -+ if (!s->cena) { -+ pr_err("Could not allocate page for cena shadow\n"); -+ return -1; -+ } -+ -+#ifdef QBMAN_CHECKING -+ /* We should never be asked to initialise for a portal that isn't in -+ * the power-on state. (Ie. don't forget to reset portals when they are -+ * decommissioned!) -+ */ -+ reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG); -+ BUG_ON(reg); -+#endif -+ reg = qbman_set_swp_cfg(dqrr_size, 0, 0, 3, 2, 3, 0, 1, 0, 1, 0, 0); -+ qbman_cinh_write(s, QBMAN_CINH_SWP_CFG, reg); -+ reg = qbman_cinh_read(s, QBMAN_CINH_SWP_CFG); -+ if (!reg) { -+ pr_err("The portal is not enabled!\n"); -+ kfree(s->cena); -+ return -1; -+ } -+ return 0; -+} -+ -+static inline void qbman_swp_sys_finish(struct qbman_swp_sys *s) -+{ -+ free_page((unsigned long)s->cena); -+} -diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman_sys_decl.h b/drivers/staging/fsl-mc/bus/dpio/qbman_sys_decl.h -new file mode 100644 -index 0000000..5b3a224 ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/qbman_sys_decl.h -@@ -0,0 +1,86 @@ -+/* Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "fsl_qbman_base.h" -+ -+/* The platform-independent code shouldn't need endianness, except for -+ * weird/fast-path cases like qbman_result_has_token(), which needs to -+ * perform a passive and endianness-specific test on a read-only data structure -+ * very quickly. It's an exception, and this symbol is used for that case. */ -+#if defined(__BIG_ENDIAN) -+#define DQRR_TOK_OFFSET 0 -+#define QBMAN_RESULT_VERB_OFFSET_IN_MEM 24 -+#define SCN_STATE_OFFSET_IN_MEM 8 -+#define SCN_RID_OFFSET_IN_MEM 8 -+#else -+#define DQRR_TOK_OFFSET 24 -+#define QBMAN_RESULT_VERB_OFFSET_IN_MEM 0 -+#define SCN_STATE_OFFSET_IN_MEM 16 -+#define SCN_RID_OFFSET_IN_MEM 0 -+#endif -+ -+/* Similarly-named functions */ -+#define upper32(a) upper_32_bits(a) -+#define lower32(a) lower_32_bits(a) -+ -+ /****************/ -+ /* arch assists */ -+ /****************/ -+ -+#define dcbz(p) { asm volatile("dc zva, %0" : : "r" (p) : "memory"); } -+#define lwsync() { asm volatile("dmb st" : : : "memory"); } -+#define dcbf(p) { asm volatile("dc cvac, %0;" : : "r" (p) : "memory"); } -+#define dcivac(p) { asm volatile("dc ivac, %0" : : "r"(p) : "memory"); } -+static inline void prefetch_for_load(void *p) -+{ -+ asm volatile("prfm pldl1keep, [%0, #64]" : : "r" (p)); -+} -+static inline void prefetch_for_store(void *p) -+{ -+ asm volatile("prfm pstl1keep, [%0, #64]" : : "r" (p)); -+} -diff --git a/drivers/staging/fsl-mc/bus/dpio/qbman_test.c b/drivers/staging/fsl-mc/bus/dpio/qbman_test.c -new file mode 100644 -index 0000000..28396e7 ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpio/qbman_test.c -@@ -0,0 +1,664 @@ -+/* Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include -+#include -+#include -+ -+#include "qbman_private.h" -+#include "fsl_qbman_portal.h" -+#include "qbman_debug.h" -+#include "../../include/fsl_dpaa2_fd.h" -+ -+#define QBMAN_SWP_CENA_BASE 0x818000000 -+#define QBMAN_SWP_CINH_BASE 0x81c000000 -+ -+#define QBMAN_PORTAL_IDX 2 -+#define QBMAN_TEST_FQID 19 -+#define QBMAN_TEST_BPID 23 -+#define QBMAN_USE_QD -+#ifdef QBMAN_USE_QD -+#define QBMAN_TEST_QDID 1 -+#endif -+#define QBMAN_TEST_LFQID 0xf00010 -+ -+#define NUM_EQ_FRAME 10 -+#define NUM_DQ_FRAME 10 -+#define NUM_DQ_IN_DQRR 5 -+#define NUM_DQ_IN_MEM (NUM_DQ_FRAME - NUM_DQ_IN_DQRR) -+ -+static struct qbman_swp *swp; -+static struct qbman_eq_desc eqdesc; -+static struct qbman_pull_desc pulldesc; -+static struct qbman_release_desc releasedesc; -+static struct qbman_eq_response eq_storage[1]; -+static struct dpaa2_dq dq_storage[NUM_DQ_IN_MEM] __aligned(64); -+static dma_addr_t eq_storage_phys; -+static dma_addr_t dq_storage_phys; -+ -+/* FQ ctx attribute values for the test code. */ -+#define FQCTX_HI 0xabbaf00d -+#define FQCTX_LO 0x98765432 -+#define FQ_VFQID 0x123456 -+ -+/* Sample frame descriptor */ -+static struct qbman_fd_simple fd = { -+ .addr_lo = 0xbabaf33d, -+ .addr_hi = 0x01234567, -+ .len = 0x7777, -+ .frc = 0xdeadbeef, -+ .flc_lo = 0xcafecafe, -+ .flc_hi = 0xbeadabba -+}; -+ -+static void fd_inc(struct qbman_fd_simple *_fd) -+{ -+ _fd->addr_lo += _fd->len; -+ _fd->flc_lo += 0x100; -+ _fd->frc += 0x10; -+} -+ -+static int fd_cmp(struct qbman_fd *fda, struct qbman_fd *fdb) -+{ -+ int i; -+ -+ for (i = 0; i < 8; i++) -+ if (fda->words[i] - fdb->words[i]) -+ return 1; -+ return 0; -+} -+ -+struct qbman_fd fd_eq[NUM_EQ_FRAME]; -+struct qbman_fd fd_dq[NUM_DQ_FRAME]; -+ -+/* "Buffers" to be released (and storage for buffers to be acquired) */ -+static uint64_t rbufs[320]; -+static uint64_t abufs[320]; -+ -+static void do_enqueue(struct qbman_swp *swp) -+{ -+ int i, j, ret; -+ -+#ifdef QBMAN_USE_QD -+ pr_info("*****QBMan_test: Enqueue %d frames to QD %d\n", -+ NUM_EQ_FRAME, QBMAN_TEST_QDID); -+#else -+ pr_info("*****QBMan_test: Enqueue %d frames to FQ %d\n", -+ NUM_EQ_FRAME, QBMAN_TEST_FQID); -+#endif -+ for (i = 0; i < NUM_EQ_FRAME; i++) { -+ /*********************************/ -+ /* Prepare a enqueue descriptor */ -+ /*********************************/ -+ memset(eq_storage, 0, sizeof(eq_storage)); -+ eq_storage_phys = virt_to_phys(eq_storage); -+ qbman_eq_desc_clear(&eqdesc); -+ qbman_eq_desc_set_no_orp(&eqdesc, 0); -+ qbman_eq_desc_set_response(&eqdesc, eq_storage_phys, 0); -+ qbman_eq_desc_set_token(&eqdesc, 0x99); -+#ifdef QBMAN_USE_QD -+ /**********************************/ -+ /* Prepare a Queueing Destination */ -+ /**********************************/ -+ qbman_eq_desc_set_qd(&eqdesc, QBMAN_TEST_QDID, 0, 3); -+#else -+ qbman_eq_desc_set_fq(&eqdesc, QBMAN_TEST_FQID); -+#endif -+ -+ /******************/ -+ /* Try an enqueue */ -+ /******************/ -+ ret = qbman_swp_enqueue(swp, &eqdesc, -+ (const struct qbman_fd *)&fd); -+ BUG_ON(ret); -+ for (j = 0; j < 8; j++) -+ fd_eq[i].words[j] = *((uint32_t *)&fd + j); -+ fd_inc(&fd); -+ } -+} -+ -+static void do_push_dequeue(struct qbman_swp *swp) -+{ -+ int i, j; -+ const struct dpaa2_dq *dq_storage1; -+ const struct qbman_fd *__fd; -+ int loopvar; -+ -+ pr_info("*****QBMan_test: Start push dequeue\n"); -+ for (i = 0; i < NUM_DQ_FRAME; i++) { -+ DBG_POLL_START(loopvar); -+ do { -+ DBG_POLL_CHECK(loopvar); -+ dq_storage1 = qbman_swp_dqrr_next(swp); -+ } while (!dq_storage1); -+ if (dq_storage1) { -+ __fd = (const struct qbman_fd *) -+ dpaa2_dq_fd(dq_storage1); -+ for (j = 0; j < 8; j++) -+ fd_dq[i].words[j] = __fd->words[j]; -+ if (fd_cmp(&fd_eq[i], &fd_dq[i])) { -+ pr_info("enqueue FD is\n"); -+ hexdump(&fd_eq[i], 32); -+ pr_info("dequeue FD is\n"); -+ hexdump(&fd_dq[i], 32); -+ } -+ qbman_swp_dqrr_consume(swp, dq_storage1); -+ } else { -+ pr_info("The push dequeue fails\n"); -+ } -+ } -+} -+ -+static void do_pull_dequeue(struct qbman_swp *swp) -+{ -+ int i, j, ret; -+ const struct dpaa2_dq *dq_storage1; -+ const struct qbman_fd *__fd; -+ int loopvar; -+ -+ pr_info("*****QBMan_test: Dequeue %d frames with dq entry in DQRR\n", -+ NUM_DQ_IN_DQRR); -+ for (i = 0; i < NUM_DQ_IN_DQRR; i++) { -+ qbman_pull_desc_clear(&pulldesc); -+ qbman_pull_desc_set_storage(&pulldesc, NULL, 0, 0); -+ qbman_pull_desc_set_numframes(&pulldesc, 1); -+ qbman_pull_desc_set_fq(&pulldesc, QBMAN_TEST_FQID); -+ -+ ret = qbman_swp_pull(swp, &pulldesc); -+ BUG_ON(ret); -+ DBG_POLL_START(loopvar); -+ do { -+ DBG_POLL_CHECK(loopvar); -+ dq_storage1 = qbman_swp_dqrr_next(swp); -+ } while (!dq_storage1); -+ -+ if (dq_storage1) { -+ __fd = (const struct qbman_fd *) -+ dpaa2_dq_fd(dq_storage1); -+ for (j = 0; j < 8; j++) -+ fd_dq[i].words[j] = __fd->words[j]; -+ if (fd_cmp(&fd_eq[i], &fd_dq[i])) { -+ pr_info("enqueue FD is\n"); -+ hexdump(&fd_eq[i], 32); -+ pr_info("dequeue FD is\n"); -+ hexdump(&fd_dq[i], 32); -+ } -+ qbman_swp_dqrr_consume(swp, dq_storage1); -+ } else { -+ pr_info("Dequeue with dq entry in DQRR fails\n"); -+ } -+ } -+ -+ pr_info("*****QBMan_test: Dequeue %d frames with dq entry in memory\n", -+ NUM_DQ_IN_MEM); -+ for (i = 0; i < NUM_DQ_IN_MEM; i++) { -+ dq_storage_phys = virt_to_phys(&dq_storage[i]); -+ qbman_pull_desc_clear(&pulldesc); -+ qbman_pull_desc_set_storage(&pulldesc, &dq_storage[i], -+ dq_storage_phys, 1); -+ qbman_pull_desc_set_numframes(&pulldesc, 1); -+ qbman_pull_desc_set_fq(&pulldesc, QBMAN_TEST_FQID); -+ ret = qbman_swp_pull(swp, &pulldesc); -+ BUG_ON(ret); -+ -+ DBG_POLL_START(loopvar); -+ do { -+ DBG_POLL_CHECK(loopvar); -+ ret = qbman_result_has_new_result(swp, -+ &dq_storage[i]); -+ } while (!ret); -+ -+ if (ret) { -+ for (j = 0; j < 8; j++) -+ fd_dq[i + NUM_DQ_IN_DQRR].words[j] = -+ dq_storage[i].dont_manipulate_directly[j + 8]; -+ j = i + NUM_DQ_IN_DQRR; -+ if (fd_cmp(&fd_eq[j], &fd_dq[j])) { -+ pr_info("enqueue FD is\n"); -+ hexdump(&fd_eq[i + NUM_DQ_IN_DQRR], 32); -+ pr_info("dequeue FD is\n"); -+ hexdump(&fd_dq[i + NUM_DQ_IN_DQRR], 32); -+ hexdump(&dq_storage[i], 64); -+ } -+ } else { -+ pr_info("Dequeue with dq entry in memory fails\n"); -+ } -+ } -+} -+ -+static void release_buffer(struct qbman_swp *swp, unsigned int num) -+{ -+ int ret; -+ unsigned int i, j; -+ -+ qbman_release_desc_clear(&releasedesc); -+ qbman_release_desc_set_bpid(&releasedesc, QBMAN_TEST_BPID); -+ pr_info("*****QBMan_test: Release %d buffers to BP %d\n", -+ num, QBMAN_TEST_BPID); -+ for (i = 0; i < (num / 7 + 1); i++) { -+ j = ((num - i * 7) > 7) ? 7 : (num - i * 7); -+ ret = qbman_swp_release(swp, &releasedesc, &rbufs[i * 7], j); -+ BUG_ON(ret); -+ } -+} -+ -+static void acquire_buffer(struct qbman_swp *swp, unsigned int num) -+{ -+ int ret; -+ unsigned int i, j; -+ -+ pr_info("*****QBMan_test: Acquire %d buffers from BP %d\n", -+ num, QBMAN_TEST_BPID); -+ -+ for (i = 0; i < (num / 7 + 1); i++) { -+ j = ((num - i * 7) > 7) ? 7 : (num - i * 7); -+ ret = qbman_swp_acquire(swp, QBMAN_TEST_BPID, &abufs[i * 7], j); -+ BUG_ON(ret != j); -+ } -+} -+ -+static void buffer_pool_test(struct qbman_swp *swp) -+{ -+ struct qbman_attr info; -+ struct dpaa2_dq *bpscn_message; -+ dma_addr_t bpscn_phys; -+ uint64_t bpscn_ctx; -+ uint64_t ctx = 0xbbccddaadeadbeefull; -+ int i, ret; -+ uint32_t hw_targ; -+ -+ pr_info("*****QBMan_test: test buffer pool management\n"); -+ ret = qbman_bp_query(swp, QBMAN_TEST_BPID, &info); -+ qbman_bp_attr_get_bpscn_addr(&info, &bpscn_phys); -+ pr_info("The bpscn is %llx, info_phys is %llx\n", bpscn_phys, -+ virt_to_phys(&info)); -+ bpscn_message = phys_to_virt(bpscn_phys); -+ -+ for (i = 0; i < 320; i++) -+ rbufs[i] = 0xf00dabba01234567ull + i * 0x40; -+ -+ release_buffer(swp, 320); -+ -+ pr_info("QBMan_test: query the buffer pool\n"); -+ qbman_bp_query(swp, QBMAN_TEST_BPID, &info); -+ hexdump(&info, 64); -+ qbman_bp_attr_get_hw_targ(&info, &hw_targ); -+ pr_info("hw_targ is %d\n", hw_targ); -+ -+ /* Acquire buffers to trigger BPSCN */ -+ acquire_buffer(swp, 300); -+ /* BPSCN should be written to the memory */ -+ qbman_bp_query(swp, QBMAN_TEST_BPID, &info); -+ hexdump(&info, 64); -+ hexdump(bpscn_message, 64); -+ BUG_ON(!qbman_result_is_BPSCN(bpscn_message)); -+ /* There should be free buffers in the pool */ -+ BUG_ON(!(qbman_result_bpscn_has_free_bufs(bpscn_message))); -+ /* Buffer pool is depleted */ -+ BUG_ON(!qbman_result_bpscn_is_depleted(bpscn_message)); -+ /* The ctx should match */ -+ bpscn_ctx = qbman_result_bpscn_ctx(bpscn_message); -+ pr_info("BPSCN test: ctx %llx, bpscn_ctx %llx\n", ctx, bpscn_ctx); -+ BUG_ON(ctx != bpscn_ctx); -+ memset(bpscn_message, 0, sizeof(struct dpaa2_dq)); -+ -+ /* Re-seed the buffer pool to trigger BPSCN */ -+ release_buffer(swp, 240); -+ /* BPSCN should be written to the memory */ -+ BUG_ON(!qbman_result_is_BPSCN(bpscn_message)); -+ /* There should be free buffers in the pool */ -+ BUG_ON(!(qbman_result_bpscn_has_free_bufs(bpscn_message))); -+ /* Buffer pool is not depleted */ -+ BUG_ON(qbman_result_bpscn_is_depleted(bpscn_message)); -+ memset(bpscn_message, 0, sizeof(struct dpaa2_dq)); -+ -+ acquire_buffer(swp, 260); -+ /* BPSCN should be written to the memory */ -+ BUG_ON(!qbman_result_is_BPSCN(bpscn_message)); -+ /* There should be free buffers in the pool while BPSCN generated */ -+ BUG_ON(!(qbman_result_bpscn_has_free_bufs(bpscn_message))); -+ /* Buffer pool is depletion */ -+ BUG_ON(!qbman_result_bpscn_is_depleted(bpscn_message)); -+} -+ -+static void ceetm_test(struct qbman_swp *swp) -+{ -+ int i, j, ret; -+ -+ qbman_eq_desc_clear(&eqdesc); -+ qbman_eq_desc_set_no_orp(&eqdesc, 0); -+ qbman_eq_desc_set_fq(&eqdesc, QBMAN_TEST_LFQID); -+ pr_info("*****QBMan_test: Enqueue to LFQID %x\n", -+ QBMAN_TEST_LFQID); -+ for (i = 0; i < NUM_EQ_FRAME; i++) { -+ ret = qbman_swp_enqueue(swp, &eqdesc, -+ (const struct qbman_fd *)&fd); -+ BUG_ON(ret); -+ for (j = 0; j < 8; j++) -+ fd_eq[i].words[j] = *((uint32_t *)&fd + j); -+ fd_inc(&fd); -+ } -+} -+ -+int qbman_test(void) -+{ -+ struct qbman_swp_desc pd; -+ uint32_t reg; -+ -+ pd.cena_bar = ioremap_cache_ns(QBMAN_SWP_CENA_BASE + -+ QBMAN_PORTAL_IDX * 0x10000, 0x10000); -+ pd.cinh_bar = ioremap(QBMAN_SWP_CINH_BASE + -+ QBMAN_PORTAL_IDX * 0x10000, 0x10000); -+ -+ /* Detect whether the mc image is the test image with GPP setup */ -+ reg = readl_relaxed(pd.cena_bar + 0x4); -+ if (reg != 0xdeadbeef) { -+ pr_err("The MC image doesn't have GPP test setup, stop!\n"); -+ iounmap(pd.cena_bar); -+ iounmap(pd.cinh_bar); -+ return -1; -+ } -+ -+ pr_info("*****QBMan_test: Init QBMan SWP %d\n", QBMAN_PORTAL_IDX); -+ swp = qbman_swp_init(&pd); -+ if (!swp) { -+ iounmap(pd.cena_bar); -+ iounmap(pd.cinh_bar); -+ return -1; -+ } -+ -+ /*******************/ -+ /* Enqueue frames */ -+ /*******************/ -+ do_enqueue(swp); -+ -+ /*******************/ -+ /* Do pull dequeue */ -+ /*******************/ -+ do_pull_dequeue(swp); -+ -+ /*******************/ -+ /* Enqueue frames */ -+ /*******************/ -+ qbman_swp_push_set(swp, 0, 1); -+ qbman_swp_fq_schedule(swp, QBMAN_TEST_FQID); -+ do_enqueue(swp); -+ -+ /*******************/ -+ /* Do push dequeue */ -+ /*******************/ -+ do_push_dequeue(swp); -+ -+ /**************************/ -+ /* Test buffer pool funcs */ -+ /**************************/ -+ buffer_pool_test(swp); -+ -+ /******************/ -+ /* CEETM test */ -+ /******************/ -+ ceetm_test(swp); -+ -+ qbman_swp_finish(swp); -+ pr_info("*****QBMan_test: Kernel test Passed\n"); -+ return 0; -+} -+ -+/* user-space test-case, definitions: -+ * -+ * 1 portal only, using portal index 3. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define QBMAN_TEST_US_SWP 3 /* portal index for user space */ -+ -+#define QBMAN_TEST_MAGIC 'q' -+struct qbman_test_swp_ioctl { -+ unsigned long portal1_cinh; -+ unsigned long portal1_cena; -+}; -+struct qbman_test_dma_ioctl { -+ unsigned long ptr; -+ uint64_t phys_addr; -+}; -+ -+struct qbman_test_priv { -+ int has_swp_map; -+ int has_dma_map; -+ unsigned long pgoff; -+}; -+ -+#define QBMAN_TEST_SWP_MAP \ -+ _IOR(QBMAN_TEST_MAGIC, 0x01, struct qbman_test_swp_ioctl) -+#define QBMAN_TEST_SWP_UNMAP \ -+ _IOR(QBMAN_TEST_MAGIC, 0x02, struct qbman_test_swp_ioctl) -+#define QBMAN_TEST_DMA_MAP \ -+ _IOR(QBMAN_TEST_MAGIC, 0x03, struct qbman_test_dma_ioctl) -+#define QBMAN_TEST_DMA_UNMAP \ -+ _IOR(QBMAN_TEST_MAGIC, 0x04, struct qbman_test_dma_ioctl) -+ -+#define TEST_PORTAL1_CENA_PGOFF ((QBMAN_SWP_CENA_BASE + QBMAN_TEST_US_SWP * \ -+ 0x10000) >> PAGE_SHIFT) -+#define TEST_PORTAL1_CINH_PGOFF ((QBMAN_SWP_CINH_BASE + QBMAN_TEST_US_SWP * \ -+ 0x10000) >> PAGE_SHIFT) -+ -+static int qbman_test_open(struct inode *inode, struct file *filp) -+{ -+ struct qbman_test_priv *priv; -+ -+ priv = kmalloc(sizeof(struct qbman_test_priv), GFP_KERNEL); -+ if (!priv) -+ return -EIO; -+ filp->private_data = priv; -+ priv->has_swp_map = 0; -+ priv->has_dma_map = 0; -+ priv->pgoff = 0; -+ return 0; -+} -+ -+static int qbman_test_mmap(struct file *filp, struct vm_area_struct *vma) -+{ -+ int ret; -+ struct qbman_test_priv *priv = filp->private_data; -+ -+ BUG_ON(!priv); -+ -+ if (vma->vm_pgoff == TEST_PORTAL1_CINH_PGOFF) -+ vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot); -+ else if (vma->vm_pgoff == TEST_PORTAL1_CENA_PGOFF) -+ vma->vm_page_prot = pgprot_cached_ns(vma->vm_page_prot); -+ else if (vma->vm_pgoff == priv->pgoff) -+ vma->vm_page_prot = pgprot_cached(vma->vm_page_prot); -+ else { -+ pr_err("Damn, unrecognised pg_off!!\n"); -+ return -EINVAL; -+ } -+ ret = remap_pfn_range(vma, vma->vm_start, vma->vm_pgoff, -+ vma->vm_end - vma->vm_start, -+ vma->vm_page_prot); -+ return ret; -+} -+ -+static long qbman_test_ioctl(struct file *fp, unsigned int cmd, -+ unsigned long arg) -+{ -+ void __user *a = (void __user *)arg; -+ unsigned long longret, populate; -+ int ret = 0; -+ struct qbman_test_priv *priv = fp->private_data; -+ -+ BUG_ON(!priv); -+ -+ switch (cmd) { -+ case QBMAN_TEST_SWP_MAP: -+ { -+ struct qbman_test_swp_ioctl params; -+ -+ if (priv->has_swp_map) -+ return -EINVAL; -+ down_write(¤t->mm->mmap_sem); -+ /* Map portal1 CINH */ -+ longret = do_mmap_pgoff(fp, PAGE_SIZE, 0x10000, -+ PROT_READ | PROT_WRITE, MAP_SHARED, -+ TEST_PORTAL1_CINH_PGOFF, &populate); -+ if (longret & ~PAGE_MASK) { -+ ret = (int)longret; -+ goto out; -+ } -+ params.portal1_cinh = longret; -+ /* Map portal1 CENA */ -+ longret = do_mmap_pgoff(fp, PAGE_SIZE, 0x10000, -+ PROT_READ | PROT_WRITE, MAP_SHARED, -+ TEST_PORTAL1_CENA_PGOFF, &populate); -+ if (longret & ~PAGE_MASK) { -+ ret = (int)longret; -+ goto out; -+ } -+ params.portal1_cena = longret; -+ priv->has_swp_map = 1; -+out: -+ up_write(¤t->mm->mmap_sem); -+ if (!ret && copy_to_user(a, ¶ms, sizeof(params))) -+ return -EFAULT; -+ return ret; -+ } -+ case QBMAN_TEST_SWP_UNMAP: -+ { -+ struct qbman_test_swp_ioctl params; -+ -+ if (!priv->has_swp_map) -+ return -EINVAL; -+ -+ if (copy_from_user(¶ms, a, sizeof(params))) -+ return -EFAULT; -+ down_write(¤t->mm->mmap_sem); -+ do_munmap(current->mm, params.portal1_cena, 0x10000); -+ do_munmap(current->mm, params.portal1_cinh, 0x10000); -+ up_write(¤t->mm->mmap_sem); -+ priv->has_swp_map = 0; -+ return 0; -+ } -+ case QBMAN_TEST_DMA_MAP: -+ { -+ struct qbman_test_dma_ioctl params; -+ void *vaddr; -+ -+ if (priv->has_dma_map) -+ return -EINVAL; -+ vaddr = (void *)get_zeroed_page(GFP_KERNEL); -+ params.phys_addr = virt_to_phys(vaddr); -+ priv->pgoff = (unsigned long)params.phys_addr >> PAGE_SHIFT; -+ down_write(¤t->mm->mmap_sem); -+ longret = do_mmap_pgoff(fp, PAGE_SIZE, PAGE_SIZE, -+ PROT_READ | PROT_WRITE, MAP_SHARED, -+ priv->pgoff, &populate); -+ if (longret & ~PAGE_MASK) { -+ ret = (int)longret; -+ return ret; -+ } -+ params.ptr = longret; -+ priv->has_dma_map = 1; -+ up_write(¤t->mm->mmap_sem); -+ if (copy_to_user(a, ¶ms, sizeof(params))) -+ return -EFAULT; -+ return 0; -+ } -+ case QBMAN_TEST_DMA_UNMAP: -+ { -+ struct qbman_test_dma_ioctl params; -+ -+ if (!priv->has_dma_map) -+ return -EINVAL; -+ if (copy_from_user(¶ms, a, sizeof(params))) -+ return -EFAULT; -+ down_write(¤t->mm->mmap_sem); -+ do_munmap(current->mm, params.ptr, PAGE_SIZE); -+ up_write(¤t->mm->mmap_sem); -+ free_page((unsigned long)phys_to_virt(params.phys_addr)); -+ priv->has_dma_map = 0; -+ return 0; -+ } -+ default: -+ pr_err("Bad ioctl cmd!\n"); -+ } -+ return -EINVAL; -+} -+ -+static const struct file_operations qbman_fops = { -+ .open = qbman_test_open, -+ .mmap = qbman_test_mmap, -+ .unlocked_ioctl = qbman_test_ioctl -+}; -+ -+static struct miscdevice qbman_miscdev = { -+ .name = "qbman-test", -+ .fops = &qbman_fops, -+ .minor = MISC_DYNAMIC_MINOR, -+}; -+ -+static int qbman_miscdev_init; -+ -+static int test_init(void) -+{ -+ int ret = qbman_test(); -+ -+ if (!ret) { -+ /* MC image supports the test cases, so instantiate the -+ * character devic that the user-space test case will use to do -+ * its memory mappings. */ -+ ret = misc_register(&qbman_miscdev); -+ if (ret) { -+ pr_err("qbman-test: failed to register misc device\n"); -+ return ret; -+ } -+ pr_info("qbman-test: misc device registered!\n"); -+ qbman_miscdev_init = 1; -+ } -+ return 0; -+} -+ -+static void test_exit(void) -+{ -+ if (qbman_miscdev_init) { -+ misc_deregister(&qbman_miscdev); -+ qbman_miscdev_init = 0; -+ } -+} -+ -+module_init(test_init); -+module_exit(test_exit); -diff --git a/drivers/staging/fsl-mc/bus/dpmcp-cmd.h b/drivers/staging/fsl-mc/bus/dpmcp-cmd.h -new file mode 100644 -index 0000000..c9b52dd ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpmcp-cmd.h -@@ -0,0 +1,56 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPMCP_CMD_H -+#define _FSL_DPMCP_CMD_H -+ -+/* Minimal supported DPMCP Version */ -+#define DPMCP_MIN_VER_MAJOR 3 -+#define DPMCP_MIN_VER_MINOR 0 -+ -+/* Command IDs */ -+#define DPMCP_CMDID_CLOSE 0x800 -+#define DPMCP_CMDID_OPEN 0x80b -+#define DPMCP_CMDID_CREATE 0x90b -+#define DPMCP_CMDID_DESTROY 0x900 -+ -+#define DPMCP_CMDID_GET_ATTR 0x004 -+#define DPMCP_CMDID_RESET 0x005 -+ -+#define DPMCP_CMDID_SET_IRQ 0x010 -+#define DPMCP_CMDID_GET_IRQ 0x011 -+#define DPMCP_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPMCP_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPMCP_CMDID_SET_IRQ_MASK 0x014 -+#define DPMCP_CMDID_GET_IRQ_MASK 0x015 -+#define DPMCP_CMDID_GET_IRQ_STATUS 0x016 -+ -+#endif /* _FSL_DPMCP_CMD_H */ -diff --git a/drivers/staging/fsl-mc/bus/dpmcp.c b/drivers/staging/fsl-mc/bus/dpmcp.c -new file mode 100644 -index 0000000..e23592a ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpmcp.c -@@ -0,0 +1,318 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#include "../include/mc-sys.h" -+#include "../include/mc-cmd.h" -+#include "dpmcp.h" -+#include "dpmcp-cmd.h" -+ -+int dpmcp_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpmcp_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_OPEN, -+ cmd_flags, -+ 0); -+ cmd.params[0] |= mc_enc(0, 32, dpmcp_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return err; -+} -+ -+int dpmcp_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CLOSE, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmcp_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpmcp_cfg *cfg, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_CREATE, -+ cmd_flags, -+ 0); -+ cmd.params[0] |= mc_enc(0, 32, cfg->portal_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+ -+int dpmcp_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_DESTROY, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmcp_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_RESET, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmcp_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpmcp_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(0, 8, irq_index); -+ cmd.params[0] |= mc_enc(32, 32, irq_cfg->val); -+ cmd.params[1] |= mc_enc(0, 64, irq_cfg->paddr); -+ cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmcp_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpmcp_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ irq_cfg->val = (uint32_t)mc_dec(cmd.params[0], 0, 32); -+ irq_cfg->paddr = (uint64_t)mc_dec(cmd.params[1], 0, 64); -+ irq_cfg->irq_num = (int)mc_dec(cmd.params[2], 0, 32); -+ *type = (int)mc_dec(cmd.params[2], 32, 32); -+ return 0; -+} -+ -+int dpmcp_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(0, 8, en); -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmcp_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *en = (uint8_t)mc_dec(cmd.params[0], 0, 8); -+ return 0; -+} -+ -+int dpmcp_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(0, 32, mask); -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+ -+int dpmcp_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *mask = (uint32_t)mc_dec(cmd.params[0], 0, 32); -+ return 0; -+} -+ -+int dpmcp_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *status = (uint32_t)mc_dec(cmd.params[0], 0, 32); -+ return 0; -+} -+ -+int dpmcp_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmcp_attr *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMCP_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ attr->id = (int)mc_dec(cmd.params[0], 32, 32); -+ attr->version.major = (uint16_t)mc_dec(cmd.params[1], 0, 16); -+ attr->version.minor = (uint16_t)mc_dec(cmd.params[1], 16, 16); -+ return 0; -+} -diff --git a/drivers/staging/fsl-mc/bus/dpmcp.h b/drivers/staging/fsl-mc/bus/dpmcp.h -new file mode 100644 -index 0000000..e434a24 ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpmcp.h -@@ -0,0 +1,323 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPMCP_H -+#define __FSL_DPMCP_H -+ -+/* Data Path Management Command Portal API -+ * Contains initialization APIs and runtime control APIs for DPMCP -+ */ -+ -+struct fsl_mc_io; -+ -+/** -+ * dpmcp_open() - Open a control session for the specified object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dpmcp_id: DPMCP unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpmcp_create function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpmcp_id, -+ uint16_t *token); -+ -+/* Get portal ID from pool */ -+#define DPMCP_GET_PORTAL_ID_FROM_POOL (-1) -+ -+/** -+ * dpmcp_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMCP object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpmcp_cfg - Structure representing DPMCP configuration -+ * @portal_id: Portal ID; 'DPMCP_GET_PORTAL_ID_FROM_POOL' to get the portal ID -+ * from pool -+ */ -+struct dpmcp_cfg { -+ int portal_id; -+}; -+ -+/** -+ * dpmcp_create() - Create the DPMCP object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPMCP object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpmcp_open function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpmcp_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpmcp_destroy() - Destroy the DPMCP object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMCP object -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpmcp_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpmcp_reset() - Reset the DPMCP, returns the object to initial state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMCP object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/* IRQ */ -+/* IRQ Index */ -+#define DPMCP_IRQ_INDEX 0 -+/* irq event - Indicates that the link state changed */ -+#define DPMCP_IRQ_EVENT_CMD_DONE 0x00000001 -+ -+/** -+ * struct dpmcp_irq_cfg - IRQ configuration -+ * @paddr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dpmcp_irq_cfg { -+ uint64_t paddr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dpmcp_set_irq() - Set IRQ information for the DPMCP to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMCP object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpmcp_irq_cfg *irq_cfg); -+ -+/** -+ * dpmcp_get_irq() - Get IRQ information from the DPMCP. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMCP object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpmcp_irq_cfg *irq_cfg); -+ -+/** -+ * dpmcp_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMCP object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpmcp_get_irq_enable() - Get overall interrupt state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMCP object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpmcp_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMCP object -+ * @irq_index: The interrupt index to configure -+ * @mask: Event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpmcp_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMCP object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpmcp_get_irq_status() - Get the current status of any pending interrupts. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMCP object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * struct dpmcp_attr - Structure representing DPMCP attributes -+ * @id: DPMCP object ID -+ * @version: DPMCP version -+ */ -+struct dpmcp_attr { -+ int id; -+ /** -+ * struct version - Structure representing DPMCP version -+ * @major: DPMCP major version -+ * @minor: DPMCP minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+}; -+ -+/** -+ * dpmcp_get_attributes - Retrieve DPMCP attributes. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPMCP object -+ * @attr: Returned object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmcp_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpmcp_attr *attr); -+ -+#endif /* __FSL_DPMCP_H */ -diff --git a/drivers/staging/fsl-mc/bus/dpmng-cmd.h b/drivers/staging/fsl-mc/bus/dpmng-cmd.h -new file mode 100644 -index 0000000..ba8cfa9 ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpmng-cmd.h -@@ -0,0 +1,47 @@ -+/* Copyright 2013-2014 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+/*************************************************************************//* -+ dpmng-cmd.h -+ -+ defines portal commands -+ -+ *//**************************************************************************/ -+ -+#ifndef __FSL_DPMNG_CMD_H -+#define __FSL_DPMNG_CMD_H -+ -+/* Command IDs */ -+#define DPMNG_CMDID_GET_CONT_ID 0x830 -+#define DPMNG_CMDID_GET_VERSION 0x831 -+ -+#endif /* __FSL_DPMNG_CMD_H */ -diff --git a/drivers/staging/fsl-mc/bus/dpmng.c b/drivers/staging/fsl-mc/bus/dpmng.c -new file mode 100644 -index 0000000..387390b ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dpmng.c -@@ -0,0 +1,85 @@ -+/* Copyright 2013-2014 Freescale Semiconductor Inc. -+* -+* Redistribution and use in source and binary forms, with or without -+* modification, are permitted provided that the following conditions are met: -+* * Redistributions of source code must retain the above copyright -+* notice, this list of conditions and the following disclaimer. -+* * Redistributions in binary form must reproduce the above copyright -+* notice, this list of conditions and the following disclaimer in the -+* documentation and/or other materials provided with the distribution. -+* * Neither the name of the above-listed copyright holders nor the -+* names of any contributors may be used to endorse or promote products -+* derived from this software without specific prior written permission. -+* -+* -+* ALTERNATIVELY, this software may be distributed under the terms of the -+* GNU General Public License ("GPL") as published by the Free Software -+* Foundation, either version 2 of that License or (at your option) any -+* later version. -+* -+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+* POSSIBILITY OF SUCH DAMAGE. -+*/ -+#include "../include/mc-sys.h" -+#include "../include/mc-cmd.h" -+#include "../include/dpmng.h" -+#include "dpmng-cmd.h" -+ -+int mc_get_version(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ struct mc_version *mc_ver_info) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMNG_CMDID_GET_VERSION, -+ cmd_flags, -+ 0); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ mc_ver_info->revision = mc_dec(cmd.params[0], 0, 32); -+ mc_ver_info->major = mc_dec(cmd.params[0], 32, 32); -+ mc_ver_info->minor = mc_dec(cmd.params[1], 0, 32); -+ -+ return 0; -+} -+EXPORT_SYMBOL(mc_get_version); -+ -+int dpmng_get_container_id(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int *container_id) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPMNG_CMDID_GET_CONT_ID, -+ cmd_flags, -+ 0); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *container_id = mc_dec(cmd.params[0], 0, 32); -+ -+ return 0; -+} -+ -diff --git a/drivers/staging/fsl-mc/bus/dprc-cmd.h b/drivers/staging/fsl-mc/bus/dprc-cmd.h -new file mode 100644 -index 0000000..9b854fa ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dprc-cmd.h -@@ -0,0 +1,87 @@ -+/* Copyright 2013-2014 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+/*************************************************************************//* -+ dprc-cmd.h -+ -+ defines dprc portal commands -+ -+ *//**************************************************************************/ -+ -+#ifndef _FSL_DPRC_CMD_H -+#define _FSL_DPRC_CMD_H -+ -+/* Minimal supported DPRC Version */ -+#define DPRC_MIN_VER_MAJOR 5 -+#define DPRC_MIN_VER_MINOR 0 -+ -+/* Command IDs */ -+#define DPRC_CMDID_CLOSE 0x800 -+#define DPRC_CMDID_OPEN 0x805 -+#define DPRC_CMDID_CREATE 0x905 -+ -+#define DPRC_CMDID_GET_ATTR 0x004 -+#define DPRC_CMDID_RESET_CONT 0x005 -+ -+#define DPRC_CMDID_SET_IRQ 0x010 -+#define DPRC_CMDID_GET_IRQ 0x011 -+#define DPRC_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPRC_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPRC_CMDID_SET_IRQ_MASK 0x014 -+#define DPRC_CMDID_GET_IRQ_MASK 0x015 -+#define DPRC_CMDID_GET_IRQ_STATUS 0x016 -+#define DPRC_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPRC_CMDID_CREATE_CONT 0x151 -+#define DPRC_CMDID_DESTROY_CONT 0x152 -+#define DPRC_CMDID_SET_RES_QUOTA 0x155 -+#define DPRC_CMDID_GET_RES_QUOTA 0x156 -+#define DPRC_CMDID_ASSIGN 0x157 -+#define DPRC_CMDID_UNASSIGN 0x158 -+#define DPRC_CMDID_GET_OBJ_COUNT 0x159 -+#define DPRC_CMDID_GET_OBJ 0x15A -+#define DPRC_CMDID_GET_RES_COUNT 0x15B -+#define DPRC_CMDID_GET_RES_IDS 0x15C -+#define DPRC_CMDID_GET_OBJ_REG 0x15E -+#define DPRC_CMDID_SET_OBJ_IRQ 0x15F -+#define DPRC_CMDID_GET_OBJ_IRQ 0x160 -+#define DPRC_CMDID_SET_OBJ_LABEL 0x161 -+#define DPRC_CMDID_GET_OBJ_DESC 0x162 -+ -+#define DPRC_CMDID_CONNECT 0x167 -+#define DPRC_CMDID_DISCONNECT 0x168 -+#define DPRC_CMDID_GET_POOL 0x169 -+#define DPRC_CMDID_GET_POOL_COUNT 0x16A -+ -+#define DPRC_CMDID_GET_CONNECTION 0x16C -+ -+#endif /* _FSL_DPRC_CMD_H */ -diff --git a/drivers/staging/fsl-mc/bus/dprc-driver.c b/drivers/staging/fsl-mc/bus/dprc-driver.c -new file mode 100644 -index 0000000..f8d8cbe ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dprc-driver.c -@@ -0,0 +1,1084 @@ -+/* -+ * Freescale data path resource container (DPRC) driver -+ * -+ * Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * Author: German Rivera -+ * -+ * This file is licensed under the terms of the GNU General Public -+ * License version 2. This program is licensed "as is" without any -+ * warranty of any kind, whether express or implied. -+ */ -+ -+#include "../include/mc-private.h" -+#include "../include/mc-sys.h" -+#include -+#include -+#include -+#include "dprc-cmd.h" -+#include "dpmcp.h" -+ -+struct dprc_child_objs { -+ int child_count; -+ struct dprc_obj_desc *child_array; -+}; -+ -+static int __fsl_mc_device_remove_if_not_in_mc(struct device *dev, void *data) -+{ -+ int i; -+ struct dprc_child_objs *objs; -+ struct fsl_mc_device *mc_dev; -+ -+ WARN_ON(!dev); -+ WARN_ON(!data); -+ mc_dev = to_fsl_mc_device(dev); -+ objs = data; -+ -+ for (i = 0; i < objs->child_count; i++) { -+ struct dprc_obj_desc *obj_desc = &objs->child_array[i]; -+ -+ if (strlen(obj_desc->type) != 0 && -+ FSL_MC_DEVICE_MATCH(mc_dev, obj_desc)) -+ break; -+ } -+ -+ if (i == objs->child_count) -+ fsl_mc_device_remove(mc_dev); -+ -+ return 0; -+} -+ -+static int __fsl_mc_device_remove(struct device *dev, void *data) -+{ -+ WARN_ON(!dev); -+ WARN_ON(data); -+ fsl_mc_device_remove(to_fsl_mc_device(dev)); -+ return 0; -+} -+ -+/** -+ * dprc_remove_devices - Removes devices for objects removed from a DPRC -+ * -+ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object -+ * @obj_desc_array: array of object descriptors for child objects currently -+ * present in the DPRC in the MC. -+ * @num_child_objects_in_mc: number of entries in obj_desc_array -+ * -+ * Synchronizes the state of the Linux bus driver with the actual state of -+ * the MC by removing devices that represent MC objects that have -+ * been dynamically removed in the physical DPRC. -+ */ -+static void dprc_remove_devices(struct fsl_mc_device *mc_bus_dev, -+ struct dprc_obj_desc *obj_desc_array, -+ int num_child_objects_in_mc) -+{ -+ if (num_child_objects_in_mc != 0) { -+ /* -+ * Remove child objects that are in the DPRC in Linux, -+ * but not in the MC: -+ */ -+ struct dprc_child_objs objs; -+ -+ objs.child_count = num_child_objects_in_mc; -+ objs.child_array = obj_desc_array; -+ device_for_each_child(&mc_bus_dev->dev, &objs, -+ __fsl_mc_device_remove_if_not_in_mc); -+ } else { -+ /* -+ * There are no child objects for this DPRC in the MC. -+ * So, remove all the child devices from Linux: -+ */ -+ device_for_each_child(&mc_bus_dev->dev, NULL, -+ __fsl_mc_device_remove); -+ } -+} -+ -+static int __fsl_mc_device_match(struct device *dev, void *data) -+{ -+ struct dprc_obj_desc *obj_desc = data; -+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); -+ -+ return FSL_MC_DEVICE_MATCH(mc_dev, obj_desc); -+} -+ -+static struct fsl_mc_device *fsl_mc_device_lookup(struct dprc_obj_desc -+ *obj_desc, -+ struct fsl_mc_device -+ *mc_bus_dev) -+{ -+ struct device *dev; -+ -+ dev = device_find_child(&mc_bus_dev->dev, obj_desc, -+ __fsl_mc_device_match); -+ -+ return dev ? to_fsl_mc_device(dev) : NULL; -+} -+ -+/** -+ * check_plugged_state_change - Check change in an MC object's plugged state -+ * -+ * @mc_dev: pointer to the fsl-mc device for a given MC object -+ * @obj_desc: pointer to the MC object's descriptor in the MC -+ * -+ * If the plugged state has changed from unplugged to plugged, the fsl-mc -+ * device is bound to the corresponding device driver. -+ * If the plugged state has changed from plugged to unplugged, the fsl-mc -+ * device is unbound from the corresponding device driver. -+ */ -+static void check_plugged_state_change(struct fsl_mc_device *mc_dev, -+ struct dprc_obj_desc *obj_desc) -+{ -+ int error; -+ uint32_t plugged_flag_at_mc = -+ (obj_desc->state & DPRC_OBJ_STATE_PLUGGED); -+ -+ if (plugged_flag_at_mc != -+ (mc_dev->obj_desc.state & DPRC_OBJ_STATE_PLUGGED)) { -+ if (plugged_flag_at_mc) { -+ mc_dev->obj_desc.state |= DPRC_OBJ_STATE_PLUGGED; -+ error = device_attach(&mc_dev->dev); -+ if (error < 0) { -+ dev_err(&mc_dev->dev, -+ "device_attach() failed: %d\n", -+ error); -+ } -+ } else { -+ mc_dev->obj_desc.state &= ~DPRC_OBJ_STATE_PLUGGED; -+ device_release_driver(&mc_dev->dev); -+ } -+ } -+} -+ -+/** -+ * dprc_add_new_devices - Adds devices to the logical bus for a DPRC -+ * -+ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object -+ * @driver_override: driver override to apply to new objects found in the DPRC, -+ * or NULL, if none. -+ * @obj_desc_array: array of device descriptors for child devices currently -+ * present in the physical DPRC. -+ * @num_child_objects_in_mc: number of entries in obj_desc_array -+ * -+ * Synchronizes the state of the Linux bus driver with the actual -+ * state of the MC by adding objects that have been newly discovered -+ * in the physical DPRC. -+ */ -+static void dprc_add_new_devices(struct fsl_mc_device *mc_bus_dev, -+ const char *driver_override, -+ struct dprc_obj_desc *obj_desc_array, -+ int num_child_objects_in_mc) -+{ -+ int error; -+ int i; -+ -+ for (i = 0; i < num_child_objects_in_mc; i++) { -+ struct fsl_mc_device *child_dev; -+ struct dprc_obj_desc *obj_desc = &obj_desc_array[i]; -+ -+ if (strlen(obj_desc->type) == 0) -+ continue; -+ -+ /* -+ * Check if device is already known to Linux: -+ */ -+ child_dev = fsl_mc_device_lookup(obj_desc, mc_bus_dev); -+ if (child_dev) { -+ check_plugged_state_change(child_dev, obj_desc); -+ continue; -+ } -+ -+ error = fsl_mc_device_add(obj_desc, NULL, &mc_bus_dev->dev, -+ driver_override, &child_dev); -+ if (error < 0) -+ continue; -+ } -+} -+ -+void dprc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev) -+{ -+ int pool_type; -+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev); -+ -+ for (pool_type = 0; pool_type < FSL_MC_NUM_POOL_TYPES; pool_type++) { -+ struct fsl_mc_resource_pool *res_pool = -+ &mc_bus->resource_pools[pool_type]; -+ -+ res_pool->type = pool_type; -+ res_pool->max_count = 0; -+ res_pool->free_count = 0; -+ res_pool->mc_bus = mc_bus; -+ INIT_LIST_HEAD(&res_pool->free_list); -+ mutex_init(&res_pool->mutex); -+ } -+} -+ -+static void dprc_cleanup_resource_pool(struct fsl_mc_device *mc_bus_dev, -+ enum fsl_mc_pool_type pool_type) -+{ -+ struct fsl_mc_resource *resource; -+ struct fsl_mc_resource *next; -+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev); -+ struct fsl_mc_resource_pool *res_pool = -+ &mc_bus->resource_pools[pool_type]; -+ int free_count = 0; -+ -+ WARN_ON(res_pool->type != pool_type); -+ WARN_ON(res_pool->free_count != res_pool->max_count); -+ -+ list_for_each_entry_safe(resource, next, &res_pool->free_list, node) { -+ free_count++; -+ WARN_ON(resource->type != res_pool->type); -+ WARN_ON(resource->parent_pool != res_pool); -+ devm_kfree(&mc_bus_dev->dev, resource); -+ } -+ -+ WARN_ON(free_count != res_pool->free_count); -+} -+ -+/* -+ * Clean up all resource pools other than the IRQ pool -+ */ -+void dprc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev) -+{ -+ int pool_type; -+ -+ for (pool_type = 0; pool_type < FSL_MC_NUM_POOL_TYPES; pool_type++) { -+ if (pool_type != FSL_MC_POOL_IRQ) -+ dprc_cleanup_resource_pool(mc_bus_dev, pool_type); -+ } -+} -+ -+/** -+ * dprc_scan_objects - Discover objects in a DPRC -+ * -+ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object -+ * @driver_override: driver override to apply to new objects found in the DPRC, -+ * or NULL, if none. -+ * @total_irq_count: total number of IRQs needed by objects in the DPRC. -+ * -+ * Detects objects added and removed from a DPRC and synchronizes the -+ * state of the Linux bus driver, MC by adding and removing -+ * devices accordingly. -+ * Two types of devices can be found in a DPRC: allocatable objects (e.g., -+ * dpbp, dpmcp) and non-allocatable devices (e.g., dprc, dpni). -+ * All allocatable devices needed to be probed before all non-allocatable -+ * devices, to ensure that device drivers for non-allocatable -+ * devices can allocate any type of allocatable devices. -+ * That is, we need to ensure that the corresponding resource pools are -+ * populated before they can get allocation requests from probe callbacks -+ * of the device drivers for the non-allocatable devices. -+ */ -+int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev, -+ const char *driver_override, -+ unsigned int *total_irq_count) -+{ -+ int num_child_objects; -+ int dprc_get_obj_failures; -+ int error; -+ unsigned int irq_count = mc_bus_dev->obj_desc.irq_count; -+ struct dprc_obj_desc *child_obj_desc_array = NULL; -+ -+ error = dprc_get_obj_count(mc_bus_dev->mc_io, -+ 0, -+ mc_bus_dev->mc_handle, -+ &num_child_objects); -+ if (error < 0) { -+ dev_err(&mc_bus_dev->dev, "dprc_get_obj_count() failed: %d\n", -+ error); -+ return error; -+ } -+ -+ if (num_child_objects != 0) { -+ int i; -+ -+ child_obj_desc_array = -+ devm_kmalloc_array(&mc_bus_dev->dev, num_child_objects, -+ sizeof(*child_obj_desc_array), -+ GFP_KERNEL); -+ if (!child_obj_desc_array) -+ return -ENOMEM; -+ -+ /* -+ * Discover objects currently present in the physical DPRC: -+ */ -+ dprc_get_obj_failures = 0; -+ for (i = 0; i < num_child_objects; i++) { -+ struct dprc_obj_desc *obj_desc = -+ &child_obj_desc_array[i]; -+ -+ error = dprc_get_obj(mc_bus_dev->mc_io, -+ 0, -+ mc_bus_dev->mc_handle, -+ i, obj_desc); -+ -+ /* -+ * -ENXIO means object index was invalid. -+ * This is caused when the DPRC was changed at -+ * the MC during the scan. In this case, -+ * abort the current scan. -+ */ -+ if (error == -ENXIO) -+ return error; -+ -+ if (error < 0) { -+ dev_err(&mc_bus_dev->dev, -+ "dprc_get_obj(i=%d) failed: %d\n", -+ i, error); -+ /* -+ * Mark the obj entry as "invalid", by using the -+ * empty string as obj type: -+ */ -+ obj_desc->type[0] = '\0'; -+ obj_desc->id = error; -+ dprc_get_obj_failures++; -+ continue; -+ } -+ -+ /* -+ * for DPRC versions that do not support the -+ * shareability attribute, make simplifying assumption -+ * that only SEC is not shareable. -+ */ -+ if ((strcmp(obj_desc->type, "dpseci") == 0) && -+ (obj_desc->ver_major < 4)) -+ obj_desc->flags |= -+ DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY; -+ -+ irq_count += obj_desc->irq_count; -+ dev_dbg(&mc_bus_dev->dev, -+ "Discovered object: type %s, id %d\n", -+ obj_desc->type, obj_desc->id); -+ } -+ -+ if (dprc_get_obj_failures != 0) { -+ dev_err(&mc_bus_dev->dev, -+ "%d out of %d devices could not be retrieved\n", -+ dprc_get_obj_failures, num_child_objects); -+ } -+ } -+ -+ *total_irq_count = irq_count; -+ dprc_remove_devices(mc_bus_dev, child_obj_desc_array, -+ num_child_objects); -+ -+ dprc_add_new_devices(mc_bus_dev, driver_override, child_obj_desc_array, -+ num_child_objects); -+ -+ if (child_obj_desc_array) -+ devm_kfree(&mc_bus_dev->dev, child_obj_desc_array); -+ -+ return 0; -+} -+EXPORT_SYMBOL_GPL(dprc_scan_objects); -+ -+/** -+ * dprc_scan_container - Scans a physical DPRC and synchronizes Linux bus state -+ * -+ * @mc_bus_dev: pointer to the fsl-mc device that represents a DPRC object -+ * -+ * Scans the physical DPRC and synchronizes the state of the Linux -+ * bus driver with the actual state of the MC by adding and removing -+ * devices as appropriate. -+ */ -+static int dprc_scan_container(struct fsl_mc_device *mc_bus_dev) -+{ -+ int error; -+ unsigned int irq_count; -+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev); -+ -+ dprc_init_all_resource_pools(mc_bus_dev); -+ -+ /* -+ * Discover objects in the DPRC: -+ */ -+ mutex_lock(&mc_bus->scan_mutex); -+ error = dprc_scan_objects(mc_bus_dev, NULL, &irq_count); -+ mutex_unlock(&mc_bus->scan_mutex); -+ if (error < 0) -+ goto error; -+ -+ if (fsl_mc_interrupts_supported() && !mc_bus->irq_resources) { -+ irq_count += FSL_MC_IRQ_POOL_MAX_EXTRA_IRQS; -+ error = fsl_mc_populate_irq_pool(mc_bus, irq_count); -+ if (error < 0) -+ goto error; -+ } -+ -+ return 0; -+error: -+ device_for_each_child(&mc_bus_dev->dev, NULL, __fsl_mc_device_remove); -+ dprc_cleanup_all_resource_pools(mc_bus_dev); -+ return error; -+} -+ -+/** -+ * dprc_irq0_handler - Regular ISR for DPRC interrupt 0 -+ * -+ * @irq: IRQ number of the interrupt being handled -+ * @arg: Pointer to device structure -+ */ -+static irqreturn_t dprc_irq0_handler(int irq_num, void *arg) -+{ -+ return IRQ_WAKE_THREAD; -+} -+ -+/** -+ * dprc_irq0_handler_thread - Handler thread function for DPRC interrupt 0 -+ * -+ * @irq: IRQ number of the interrupt being handled -+ * @arg: Pointer to device structure -+ */ -+static irqreturn_t dprc_irq0_handler_thread(int irq_num, void *arg) -+{ -+ int error; -+ uint32_t status; -+ struct device *dev = (struct device *)arg; -+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); -+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev); -+ struct fsl_mc_io *mc_io = mc_dev->mc_io; -+ int irq_index = 0; -+ -+ dev_dbg(dev, "DPRC IRQ %d triggered on CPU %u\n", -+ irq_num, smp_processor_id()); -+ if (WARN_ON(!(mc_dev->flags & FSL_MC_IS_DPRC))) -+ return IRQ_HANDLED; -+ -+ mutex_lock(&mc_bus->scan_mutex); -+ if (WARN_ON(mc_dev->irqs[irq_index]->irq_number != (uint32_t)irq_num)) -+ goto out; -+ -+ status = 0; -+ error = dprc_get_irq_status(mc_io, 0, mc_dev->mc_handle, irq_index, -+ &status); -+ if (error < 0) { -+ dev_err(dev, -+ "dprc_get_irq_status() failed: %d\n", error); -+ goto out; -+ } -+ -+ error = dprc_clear_irq_status(mc_io, 0, mc_dev->mc_handle, irq_index, -+ status); -+ if (error < 0) { -+ dev_err(dev, -+ "dprc_clear_irq_status() failed: %d\n", error); -+ goto out; -+ } -+ -+ if (status & (DPRC_IRQ_EVENT_OBJ_ADDED | -+ DPRC_IRQ_EVENT_OBJ_REMOVED | -+ DPRC_IRQ_EVENT_CONTAINER_DESTROYED | -+ DPRC_IRQ_EVENT_OBJ_DESTROYED | -+ DPRC_IRQ_EVENT_OBJ_CREATED)) { -+ unsigned int irq_count; -+ -+ error = dprc_scan_objects(mc_dev, NULL, &irq_count); -+ if (error < 0) { -+ if (error != -ENXIO) /* don't need to report aborted scan */ -+ dev_err(dev, "dprc_scan_objects() failed: %d\n", error); -+ goto out; -+ } -+ -+ WARN_ON((int16_t)irq_count < 0); -+ -+ if ((int16_t)irq_count > -+ mc_bus->resource_pools[FSL_MC_POOL_IRQ].max_count) { -+ dev_warn(dev, -+ "IRQs needed (%u) exceed IRQs preallocated (%u)\n", -+ irq_count, -+ mc_bus->resource_pools[FSL_MC_POOL_IRQ]. -+ max_count); -+ } -+ } -+ -+out: -+ mutex_unlock(&mc_bus->scan_mutex); -+ return IRQ_HANDLED; -+} -+ -+/* -+ * Disable and clear interrupts for a given DPRC object -+ */ -+static int disable_dprc_irqs(struct fsl_mc_device *mc_dev) -+{ -+ int i; -+ int error; -+ struct fsl_mc_io *mc_io = mc_dev->mc_io; -+ int irq_count = mc_dev->obj_desc.irq_count; -+ -+ if (WARN_ON(irq_count == 0)) -+ return -EINVAL; -+ -+ for (i = 0; i < irq_count; i++) { -+ /* -+ * Disable generation of interrupt i, while we configure it: -+ */ -+ error = dprc_set_irq_enable(mc_io, 0, mc_dev->mc_handle, i, 0); -+ if (error < 0) { -+ dev_err(&mc_dev->dev, -+ "Disabling DPRC IRQ %d failed: dprc_set_irq_enable() failed: %d\n", -+ i, error); -+ -+ return error; -+ } -+ -+ /* -+ * Disable all interrupt causes for interrupt i: -+ */ -+ error = dprc_set_irq_mask(mc_io, 0, mc_dev->mc_handle, i, 0x0); -+ if (error < 0) { -+ dev_err(&mc_dev->dev, -+ "Disabling DPRC IRQ %d failed: dprc_set_irq_mask() failed: %d\n", -+ i, error); -+ -+ return error; -+ } -+ -+ /* -+ * Clear any leftover interrupt i: -+ */ -+ error = dprc_clear_irq_status(mc_io, 0, mc_dev->mc_handle, i, -+ ~0x0U); -+ if (error < 0) { -+ dev_err(&mc_dev->dev, -+ "Disabling DPRC IRQ %d failed: dprc_clear_irq_status() failed: %d\n", -+ i, error); -+ -+ return error; -+ } -+ } -+ -+ return 0; -+} -+ -+static void unregister_dprc_irq_handlers(struct fsl_mc_device *mc_dev) -+{ -+ int i; -+ struct fsl_mc_device_irq *irq; -+ int irq_count = mc_dev->obj_desc.irq_count; -+ -+ for (i = 0; i < irq_count; i++) { -+ irq = mc_dev->irqs[i]; -+ devm_free_irq(&mc_dev->dev, irq->irq_number, -+ &mc_dev->dev); -+ } -+} -+ -+static int register_dprc_irq_handlers(struct fsl_mc_device *mc_dev) -+{ -+ static const struct irq_handler { -+ irq_handler_t irq_handler; -+ irq_handler_t irq_handler_thread; -+ const char *irq_name; -+ } irq_handlers[] = { -+ [0] = { -+ .irq_handler = dprc_irq0_handler, -+ .irq_handler_thread = dprc_irq0_handler_thread, -+ .irq_name = "FSL MC DPRC irq0", -+ }, -+ }; -+ -+ unsigned int i; -+ int error; -+ struct fsl_mc_device_irq *irq; -+ unsigned int num_irq_handlers_registered = 0; -+ int irq_count = mc_dev->obj_desc.irq_count; -+ -+ if (WARN_ON(irq_count != ARRAY_SIZE(irq_handlers))) -+ return -EINVAL; -+ -+ for (i = 0; i < ARRAY_SIZE(irq_handlers); i++) { -+ irq = mc_dev->irqs[i]; -+ -+ /* -+ * NOTE: devm_request_threaded_irq() invokes the device-specific -+ * function that programs the MSI physically in the device -+ */ -+ error = devm_request_threaded_irq(&mc_dev->dev, -+ irq->irq_number, -+ irq_handlers[i].irq_handler, -+ irq_handlers[i]. -+ irq_handler_thread, -+ IRQF_NO_SUSPEND | -+ IRQF_ONESHOT, -+ irq_handlers[i].irq_name, -+ &mc_dev->dev); -+ if (error < 0) { -+ dev_err(&mc_dev->dev, -+ "devm_request_threaded_irq() failed: %d\n", -+ error); -+ goto error_unregister_irq_handlers; -+ } -+ -+ num_irq_handlers_registered++; -+ } -+ -+ return 0; -+ -+error_unregister_irq_handlers: -+ for (i = 0; i < num_irq_handlers_registered; i++) { -+ irq = mc_dev->irqs[i]; -+ devm_free_irq(&mc_dev->dev, irq->irq_number, -+ &mc_dev->dev); -+ } -+ -+ return error; -+} -+ -+static int enable_dprc_irqs(struct fsl_mc_device *mc_dev) -+{ -+ int i; -+ int error; -+ int irq_count = mc_dev->obj_desc.irq_count; -+ -+ for (i = 0; i < irq_count; i++) { -+ /* -+ * Enable all interrupt causes for the interrupt: -+ */ -+ error = dprc_set_irq_mask(mc_dev->mc_io, -+ 0, -+ mc_dev->mc_handle, -+ i, -+ ~0x0u); -+ if (error < 0) { -+ dev_err(&mc_dev->dev, -+ "Enabling DPRC IRQ %d failed: dprc_set_irq_mask() failed: %d\n", -+ i, error); -+ -+ return error; -+ } -+ -+ /* -+ * Enable generation of the interrupt: -+ */ -+ error = dprc_set_irq_enable(mc_dev->mc_io, -+ 0, -+ mc_dev->mc_handle, -+ i, 1); -+ if (error < 0) { -+ dev_err(&mc_dev->dev, -+ "Enabling DPRC IRQ %d failed: dprc_set_irq_enable() failed: %d\n", -+ i, error); -+ -+ return error; -+ } -+ } -+ -+ return 0; -+} -+ -+/* -+ * Setup interrupts for a given DPRC device -+ */ -+static int dprc_setup_irqs(struct fsl_mc_device *mc_dev) -+{ -+ int error; -+ -+ error = fsl_mc_allocate_irqs(mc_dev); -+ if (error < 0) -+ return error; -+ -+ error = disable_dprc_irqs(mc_dev); -+ if (error < 0) -+ goto error_free_irqs; -+ -+ error = register_dprc_irq_handlers(mc_dev); -+ if (error < 0) -+ goto error_free_irqs; -+ -+ error = enable_dprc_irqs(mc_dev); -+ if (error < 0) -+ goto error_unregister_irq_handlers; -+ -+ return 0; -+ -+error_unregister_irq_handlers: -+ unregister_dprc_irq_handlers(mc_dev); -+ -+error_free_irqs: -+ fsl_mc_free_irqs(mc_dev); -+ return error; -+} -+ -+/* -+ * Creates a DPMCP for a DPRC's built-in MC portal -+ */ -+static int dprc_create_dpmcp(struct fsl_mc_device *dprc_dev) -+{ -+ int error; -+ struct dpmcp_cfg dpmcp_cfg; -+ uint16_t dpmcp_handle; -+ struct dprc_res_req res_req; -+ struct dpmcp_attr dpmcp_attr; -+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(dprc_dev); -+ -+ dpmcp_cfg.portal_id = mc_bus->dprc_attr.portal_id; -+ error = dpmcp_create(dprc_dev->mc_io, -+ MC_CMD_FLAG_INTR_DIS, -+ &dpmcp_cfg, -+ &dpmcp_handle); -+ if (error < 0) { -+ dev_err(&dprc_dev->dev, "dpmcp_create() failed: %d\n", -+ error); -+ return error; -+ } -+ -+ /* -+ * Set the state of the newly created DPMCP object to be "plugged": -+ */ -+ -+ error = dpmcp_get_attributes(dprc_dev->mc_io, -+ MC_CMD_FLAG_INTR_DIS, -+ dpmcp_handle, -+ &dpmcp_attr); -+ if (error < 0) { -+ dev_err(&dprc_dev->dev, "dpmcp_get_attributes() failed: %d\n", -+ error); -+ goto error_destroy_dpmcp; -+ } -+ -+ if (WARN_ON(dpmcp_attr.id != mc_bus->dprc_attr.portal_id)) { -+ error = -EINVAL; -+ goto error_destroy_dpmcp; -+ } -+ -+ strcpy(res_req.type, "dpmcp"); -+ res_req.num = 1; -+ res_req.options = -+ (DPRC_RES_REQ_OPT_EXPLICIT | DPRC_RES_REQ_OPT_PLUGGED); -+ res_req.id_base_align = dpmcp_attr.id; -+ -+ error = dprc_assign(dprc_dev->mc_io, -+ MC_CMD_FLAG_INTR_DIS, -+ dprc_dev->mc_handle, -+ dprc_dev->obj_desc.id, -+ &res_req); -+ -+ if (error < 0) { -+ dev_err(&dprc_dev->dev, "dprc_assign() failed: %d\n", error); -+ goto error_destroy_dpmcp; -+ } -+ -+ (void)dpmcp_close(dprc_dev->mc_io, -+ MC_CMD_FLAG_INTR_DIS, -+ dpmcp_handle); -+ return 0; -+ -+error_destroy_dpmcp: -+ (void)dpmcp_destroy(dprc_dev->mc_io, -+ MC_CMD_FLAG_INTR_DIS, -+ dpmcp_handle); -+ return error; -+} -+ -+/* -+ * Destroys the DPMCP for a DPRC's built-in MC portal -+ */ -+static void dprc_destroy_dpmcp(struct fsl_mc_device *dprc_dev) -+{ -+ int error; -+ uint16_t dpmcp_handle; -+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(dprc_dev); -+ -+ if (WARN_ON(!dprc_dev->mc_io || dprc_dev->mc_io->dpmcp_dev)) -+ return; -+ -+ error = dpmcp_open(dprc_dev->mc_io, -+ MC_CMD_FLAG_INTR_DIS, -+ mc_bus->dprc_attr.portal_id, -+ &dpmcp_handle); -+ if (error < 0) { -+ dev_err(&dprc_dev->dev, "dpmcp_open() failed: %d\n", -+ error); -+ return; -+ } -+ -+ error = dpmcp_destroy(dprc_dev->mc_io, -+ MC_CMD_FLAG_INTR_DIS, -+ dpmcp_handle); -+ if (error < 0) { -+ dev_err(&dprc_dev->dev, "dpmcp_destroy() failed: %d\n", -+ error); -+ return; -+ } -+} -+ -+/** -+ * dprc_probe - callback invoked when a DPRC is being bound to this driver -+ * -+ * @mc_dev: Pointer to fsl-mc device representing a DPRC -+ * -+ * It opens the physical DPRC in the MC. -+ * It scans the DPRC to discover the MC objects contained in it. -+ * It creates the interrupt pool for the MC bus associated with the DPRC. -+ * It configures the interrupts for the DPRC device itself. -+ */ -+static int dprc_probe(struct fsl_mc_device *mc_dev) -+{ -+ int error; -+ size_t region_size; -+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev); -+ bool mc_io_created = false; -+ bool dev_root_set = false; -+ -+ if (WARN_ON(strcmp(mc_dev->obj_desc.type, "dprc") != 0)) -+ return -EINVAL; -+ -+ if (mc_dev->mc_io) { -+ /* -+ * This is the root DPRC -+ */ -+ if (WARN_ON(fsl_mc_bus_type.dev_root)) -+ return -EINVAL; -+ -+ fsl_mc_bus_type.dev_root = &mc_dev->dev; -+ dev_root_set = true; -+ } else { -+ /* -+ * This is a child DPRC -+ */ -+ if (WARN_ON(!fsl_mc_bus_type.dev_root)) -+ return -EINVAL; -+ -+ if (WARN_ON(mc_dev->obj_desc.region_count == 0)) -+ return -EINVAL; -+ -+ region_size = mc_dev->regions[0].end - -+ mc_dev->regions[0].start + 1; -+ -+ error = fsl_create_mc_io(&mc_dev->dev, -+ mc_dev->regions[0].start, -+ region_size, -+ NULL, 0, &mc_dev->mc_io); -+ if (error < 0) -+ return error; -+ -+ mc_io_created = true; -+ } -+ -+ error = dprc_open(mc_dev->mc_io, 0, mc_dev->obj_desc.id, -+ &mc_dev->mc_handle); -+ if (error < 0) { -+ dev_err(&mc_dev->dev, "dprc_open() failed: %d\n", error); -+ goto error_cleanup_mc_io; -+ } -+ -+ error = dprc_get_attributes(mc_dev->mc_io, 0, mc_dev->mc_handle, -+ &mc_bus->dprc_attr); -+ if (error < 0) { -+ dev_err(&mc_dev->dev, "dprc_get_attributes() failed: %d\n", -+ error); -+ goto error_cleanup_open; -+ } -+ -+ if (mc_bus->dprc_attr.version.major < DPRC_MIN_VER_MAJOR || -+ (mc_bus->dprc_attr.version.major == DPRC_MIN_VER_MAJOR && -+ mc_bus->dprc_attr.version.minor < DPRC_MIN_VER_MINOR)) { -+ dev_err(&mc_dev->dev, -+ "ERROR: DPRC version %d.%d not supported\n", -+ mc_bus->dprc_attr.version.major, -+ mc_bus->dprc_attr.version.minor); -+ error = -ENOTSUPP; -+ goto error_cleanup_open; -+ } -+ -+ if (fsl_mc_interrupts_supported()) { -+ /* -+ * Create DPMCP for the DPRC's built-in portal: -+ */ -+ error = dprc_create_dpmcp(mc_dev); -+ if (error < 0) -+ goto error_cleanup_open; -+ } -+ -+ mutex_init(&mc_bus->scan_mutex); -+ -+ /* -+ * Discover MC objects in the DPRC object: -+ */ -+ error = dprc_scan_container(mc_dev); -+ if (error < 0) -+ goto error_destroy_dpmcp; -+ -+ if (fsl_mc_interrupts_supported()) { -+ /* -+ * The fsl_mc_device object associated with the DPMCP object -+ * created above was created as part of the -+ * dprc_scan_container() call above: -+ */ -+ if (WARN_ON(!mc_dev->mc_io->dpmcp_dev)) { -+ error = -EINVAL; -+ goto error_cleanup_dprc_scan; -+ } -+ -+ /* -+ * Allocate MC portal to be used in atomic context -+ * (e.g., to program MSIs from program_msi_at_mc()) -+ */ -+ error = fsl_mc_portal_allocate(NULL, -+ FSL_MC_IO_ATOMIC_CONTEXT_PORTAL, -+ &mc_bus->atomic_mc_io); -+ if (error < 0) -+ goto error_cleanup_dprc_scan; -+ -+ pr_info("fsl-mc: Allocated dpmcp.%d to dprc.%d for atomic MC I/O\n", -+ mc_bus->atomic_mc_io->dpmcp_dev->obj_desc.id, -+ mc_dev->obj_desc.id); -+ -+ /* -+ * Open DPRC handle to be used with mc_bus->atomic_mc_io: -+ */ -+ error = dprc_open(mc_bus->atomic_mc_io, 0, mc_dev->obj_desc.id, -+ &mc_bus->atomic_dprc_handle); -+ if (error < 0) { -+ dev_err(&mc_dev->dev, "dprc_open() failed: %d\n", -+ error); -+ goto error_cleanup_atomic_mc_io; -+ } -+ -+ /* -+ * Configure interrupt for the DPMCP object associated with the -+ * DPRC object's built-in portal: -+ * -+ * NOTE: We have to do this after calling dprc_scan_container(), -+ * since dprc_scan_container() populates the IRQ pool for -+ * this DPRC. -+ */ -+ error = fsl_mc_io_setup_dpmcp_irq(mc_dev->mc_io); -+ if (error < 0) -+ goto error_cleanup_atomic_dprc_handle; -+ -+ /* -+ * Configure interrupts for the DPRC object associated with -+ * this MC bus: -+ */ -+ error = dprc_setup_irqs(mc_dev); -+ if (error < 0) -+ goto error_cleanup_atomic_dprc_handle; -+ } -+ -+ dev_info(&mc_dev->dev, "DPRC device bound to driver"); -+ return 0; -+ -+error_cleanup_atomic_dprc_handle: -+ (void)dprc_close(mc_bus->atomic_mc_io, 0, mc_bus->atomic_dprc_handle); -+ -+error_cleanup_atomic_mc_io: -+ fsl_mc_portal_free(mc_bus->atomic_mc_io); -+ -+error_cleanup_dprc_scan: -+ fsl_mc_io_unset_dpmcp(mc_dev->mc_io); -+ device_for_each_child(&mc_dev->dev, NULL, __fsl_mc_device_remove); -+ dprc_cleanup_all_resource_pools(mc_dev); -+ if (fsl_mc_interrupts_supported()) -+ fsl_mc_cleanup_irq_pool(mc_bus); -+ -+error_destroy_dpmcp: -+ dprc_destroy_dpmcp(mc_dev); -+ -+error_cleanup_open: -+ (void)dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle); -+ -+error_cleanup_mc_io: -+ if (mc_io_created) { -+ fsl_destroy_mc_io(mc_dev->mc_io); -+ mc_dev->mc_io = NULL; -+ } -+ -+ if (dev_root_set) -+ fsl_mc_bus_type.dev_root = NULL; -+ -+ return error; -+} -+ -+/* -+ * Tear down interrupts for a given DPRC object -+ */ -+static void dprc_teardown_irqs(struct fsl_mc_device *mc_dev) -+{ -+ (void)disable_dprc_irqs(mc_dev); -+ unregister_dprc_irq_handlers(mc_dev); -+ fsl_mc_free_irqs(mc_dev); -+} -+ -+/** -+ * dprc_remove - callback invoked when a DPRC is being unbound from this driver -+ * -+ * @mc_dev: Pointer to fsl-mc device representing the DPRC -+ * -+ * It removes the DPRC's child objects from Linux (not from the MC) and -+ * closes the DPRC device in the MC. -+ * It tears down the interrupts that were configured for the DPRC device. -+ * It destroys the interrupt pool associated with this MC bus. -+ */ -+static int dprc_remove(struct fsl_mc_device *mc_dev) -+{ -+ int error; -+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_dev); -+ -+ if (WARN_ON(strcmp(mc_dev->obj_desc.type, "dprc") != 0)) -+ return -EINVAL; -+ if (WARN_ON(!mc_dev->mc_io)) -+ return -EINVAL; -+ -+ if (WARN_ON(!mc_bus->irq_resources)) -+ return -EINVAL; -+ -+ if (fsl_mc_interrupts_supported()) { -+ dprc_teardown_irqs(mc_dev); -+ error = dprc_close(mc_bus->atomic_mc_io, 0, -+ mc_bus->atomic_dprc_handle); -+ if (error < 0) { -+ dev_err(&mc_dev->dev, "dprc_close() failed: %d\n", -+ error); -+ } -+ -+ fsl_mc_portal_free(mc_bus->atomic_mc_io); -+ } -+ -+ fsl_mc_io_unset_dpmcp(mc_dev->mc_io); -+ device_for_each_child(&mc_dev->dev, NULL, __fsl_mc_device_remove); -+ dprc_cleanup_all_resource_pools(mc_dev); -+ dprc_destroy_dpmcp(mc_dev); -+ error = dprc_close(mc_dev->mc_io, 0, mc_dev->mc_handle); -+ if (error < 0) -+ dev_err(&mc_dev->dev, "dprc_close() failed: %d\n", error); -+ -+ if (fsl_mc_interrupts_supported()) -+ fsl_mc_cleanup_irq_pool(mc_bus); -+ -+ fsl_destroy_mc_io(mc_dev->mc_io); -+ mc_dev->mc_io = NULL; -+ -+ if (&mc_dev->dev == fsl_mc_bus_type.dev_root) -+ fsl_mc_bus_type.dev_root = NULL; -+ -+ dev_info(&mc_dev->dev, "DPRC device unbound from driver"); -+ return 0; -+} -+ -+static const struct fsl_mc_device_match_id match_id_table[] = { -+ { -+ .vendor = FSL_MC_VENDOR_FREESCALE, -+ .obj_type = "dprc"}, -+ {.vendor = 0x0}, -+}; -+ -+static struct fsl_mc_driver dprc_driver = { -+ .driver = { -+ .name = FSL_MC_DPRC_DRIVER_NAME, -+ .owner = THIS_MODULE, -+ .pm = NULL, -+ }, -+ .match_id_table = match_id_table, -+ .probe = dprc_probe, -+ .remove = dprc_remove, -+}; -+ -+int __init dprc_driver_init(void) -+{ -+ return fsl_mc_driver_register(&dprc_driver); -+} -+ -+void __exit dprc_driver_exit(void) -+{ -+ fsl_mc_driver_unregister(&dprc_driver); -+} -diff --git a/drivers/staging/fsl-mc/bus/dprc.c b/drivers/staging/fsl-mc/bus/dprc.c -new file mode 100644 -index 0000000..4d86438 ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/dprc.c -@@ -0,0 +1,1218 @@ -+/* Copyright 2013-2014 Freescale Semiconductor Inc. -+* -+* Redistribution and use in source and binary forms, with or without -+* modification, are permitted provided that the following conditions are met: -+* * Redistributions of source code must retain the above copyright -+* notice, this list of conditions and the following disclaimer. -+* * Redistributions in binary form must reproduce the above copyright -+* notice, this list of conditions and the following disclaimer in the -+* documentation and/or other materials provided with the distribution. -+* * Neither the name of the above-listed copyright holders nor the -+* names of any contributors may be used to endorse or promote products -+* derived from this software without specific prior written permission. -+* -+* -+* ALTERNATIVELY, this software may be distributed under the terms of the -+* GNU General Public License ("GPL") as published by the Free Software -+* Foundation, either version 2 of that License or (at your option) any -+* later version. -+* -+* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+* POSSIBILITY OF SUCH DAMAGE. -+*/ -+#include "../include/mc-sys.h" -+#include "../include/mc-cmd.h" -+#include "../include/dprc.h" -+#include "dprc-cmd.h" -+ -+int dprc_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int container_id, -+ uint16_t *token) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_OPEN, cmd_flags, -+ 0); -+ cmd.params[0] |= mc_enc(0, 32, container_id); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *token = MC_CMD_HDR_READ_TOKEN(cmd.header); -+ -+ return 0; -+} -+EXPORT_SYMBOL(dprc_open); -+ -+int dprc_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CLOSE, cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dprc_close); -+ -+int dprc_create_container(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dprc_cfg *cfg, -+ int *child_container_id, -+ uint64_t *child_portal_offset) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.params[0] |= mc_enc(32, 16, cfg->icid); -+ cmd.params[0] |= mc_enc(0, 32, cfg->options); -+ cmd.params[1] |= mc_enc(32, 32, cfg->portal_id); -+ cmd.params[2] |= mc_enc(0, 8, cfg->label[0]); -+ cmd.params[2] |= mc_enc(8, 8, cfg->label[1]); -+ cmd.params[2] |= mc_enc(16, 8, cfg->label[2]); -+ cmd.params[2] |= mc_enc(24, 8, cfg->label[3]); -+ cmd.params[2] |= mc_enc(32, 8, cfg->label[4]); -+ cmd.params[2] |= mc_enc(40, 8, cfg->label[5]); -+ cmd.params[2] |= mc_enc(48, 8, cfg->label[6]); -+ cmd.params[2] |= mc_enc(56, 8, cfg->label[7]); -+ cmd.params[3] |= mc_enc(0, 8, cfg->label[8]); -+ cmd.params[3] |= mc_enc(8, 8, cfg->label[9]); -+ cmd.params[3] |= mc_enc(16, 8, cfg->label[10]); -+ cmd.params[3] |= mc_enc(24, 8, cfg->label[11]); -+ cmd.params[3] |= mc_enc(32, 8, cfg->label[12]); -+ cmd.params[3] |= mc_enc(40, 8, cfg->label[13]); -+ cmd.params[3] |= mc_enc(48, 8, cfg->label[14]); -+ cmd.params[3] |= mc_enc(56, 8, cfg->label[15]); -+ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CREATE_CONT, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *child_container_id = mc_dec(cmd.params[1], 0, 32); -+ *child_portal_offset = mc_dec(cmd.params[2], 0, 64); -+ -+ return 0; -+} -+EXPORT_SYMBOL(dprc_create_container); -+ -+int dprc_destroy_container(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int child_container_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_DESTROY_CONT, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(0, 32, child_container_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dprc_destroy_container); -+ -+int dprc_reset_container(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int child_container_id) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_RESET_CONT, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(0, 32, child_container_id); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dprc_reset_container); -+ -+int dprc_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dprc_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ irq_cfg->val = mc_dec(cmd.params[0], 0, 32); -+ irq_cfg->paddr = mc_dec(cmd.params[1], 0, 64); -+ irq_cfg->irq_num = mc_dec(cmd.params[2], 0, 32); -+ *type = mc_dec(cmd.params[2], 32, 32); -+ -+ return 0; -+} -+EXPORT_SYMBOL(dprc_get_irq); -+ -+int dprc_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dprc_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ cmd.params[0] |= mc_enc(0, 32, irq_cfg->val); -+ cmd.params[1] |= mc_enc(0, 64, irq_cfg->paddr); -+ cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dprc_set_irq); -+ -+int dprc_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *en = mc_dec(cmd.params[0], 0, 8); -+ -+ return 0; -+} -+EXPORT_SYMBOL(dprc_get_irq_enable); -+ -+int dprc_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ_ENABLE, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(0, 8, en); -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dprc_set_irq_enable); -+ -+int dprc_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_MASK, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *mask = mc_dec(cmd.params[0], 0, 32); -+ -+ return 0; -+} -+EXPORT_SYMBOL(dprc_get_irq_mask); -+ -+int dprc_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_IRQ_MASK, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(0, 32, mask); -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dprc_set_irq_mask); -+ -+int dprc_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_IRQ_STATUS, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(0, 32, *status); -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *status = mc_dec(cmd.params[0], 0, 32); -+ -+ return 0; -+} -+EXPORT_SYMBOL(dprc_get_irq_status); -+ -+int dprc_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CLEAR_IRQ_STATUS, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(0, 32, status); -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dprc_clear_irq_status); -+ -+int dprc_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dprc_attributes *attr) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_ATTR, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ attr->container_id = mc_dec(cmd.params[0], 0, 32); -+ attr->icid = mc_dec(cmd.params[0], 32, 16); -+ attr->options = mc_dec(cmd.params[1], 0, 32); -+ attr->portal_id = mc_dec(cmd.params[1], 32, 32); -+ attr->version.major = mc_dec(cmd.params[2], 0, 16); -+ attr->version.minor = mc_dec(cmd.params[2], 16, 16); -+ -+ return 0; -+} -+EXPORT_SYMBOL(dprc_get_attributes); -+ -+int dprc_set_res_quota(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int child_container_id, -+ char *type, -+ uint16_t quota) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_RES_QUOTA, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(0, 32, child_container_id); -+ cmd.params[0] |= mc_enc(32, 16, quota); -+ cmd.params[1] |= mc_enc(0, 8, type[0]); -+ cmd.params[1] |= mc_enc(8, 8, type[1]); -+ cmd.params[1] |= mc_enc(16, 8, type[2]); -+ cmd.params[1] |= mc_enc(24, 8, type[3]); -+ cmd.params[1] |= mc_enc(32, 8, type[4]); -+ cmd.params[1] |= mc_enc(40, 8, type[5]); -+ cmd.params[1] |= mc_enc(48, 8, type[6]); -+ cmd.params[1] |= mc_enc(56, 8, type[7]); -+ cmd.params[2] |= mc_enc(0, 8, type[8]); -+ cmd.params[2] |= mc_enc(8, 8, type[9]); -+ cmd.params[2] |= mc_enc(16, 8, type[10]); -+ cmd.params[2] |= mc_enc(24, 8, type[11]); -+ cmd.params[2] |= mc_enc(32, 8, type[12]); -+ cmd.params[2] |= mc_enc(40, 8, type[13]); -+ cmd.params[2] |= mc_enc(48, 8, type[14]); -+ cmd.params[2] |= mc_enc(56, 8, '\0'); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dprc_set_res_quota); -+ -+int dprc_get_res_quota(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int child_container_id, -+ char *type, -+ uint16_t *quota) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_QUOTA, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(0, 32, child_container_id); -+ cmd.params[1] |= mc_enc(0, 8, type[0]); -+ cmd.params[1] |= mc_enc(8, 8, type[1]); -+ cmd.params[1] |= mc_enc(16, 8, type[2]); -+ cmd.params[1] |= mc_enc(24, 8, type[3]); -+ cmd.params[1] |= mc_enc(32, 8, type[4]); -+ cmd.params[1] |= mc_enc(40, 8, type[5]); -+ cmd.params[1] |= mc_enc(48, 8, type[6]); -+ cmd.params[1] |= mc_enc(56, 8, type[7]); -+ cmd.params[2] |= mc_enc(0, 8, type[8]); -+ cmd.params[2] |= mc_enc(8, 8, type[9]); -+ cmd.params[2] |= mc_enc(16, 8, type[10]); -+ cmd.params[2] |= mc_enc(24, 8, type[11]); -+ cmd.params[2] |= mc_enc(32, 8, type[12]); -+ cmd.params[2] |= mc_enc(40, 8, type[13]); -+ cmd.params[2] |= mc_enc(48, 8, type[14]); -+ cmd.params[2] |= mc_enc(56, 8, '\0'); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *quota = mc_dec(cmd.params[0], 32, 16); -+ -+ return 0; -+} -+EXPORT_SYMBOL(dprc_get_res_quota); -+ -+int dprc_assign(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int container_id, -+ struct dprc_res_req *res_req) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_ASSIGN, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(0, 32, container_id); -+ cmd.params[0] |= mc_enc(32, 32, res_req->options); -+ cmd.params[1] |= mc_enc(0, 32, res_req->num); -+ cmd.params[1] |= mc_enc(32, 32, res_req->id_base_align); -+ cmd.params[2] |= mc_enc(0, 8, res_req->type[0]); -+ cmd.params[2] |= mc_enc(8, 8, res_req->type[1]); -+ cmd.params[2] |= mc_enc(16, 8, res_req->type[2]); -+ cmd.params[2] |= mc_enc(24, 8, res_req->type[3]); -+ cmd.params[2] |= mc_enc(32, 8, res_req->type[4]); -+ cmd.params[2] |= mc_enc(40, 8, res_req->type[5]); -+ cmd.params[2] |= mc_enc(48, 8, res_req->type[6]); -+ cmd.params[2] |= mc_enc(56, 8, res_req->type[7]); -+ cmd.params[3] |= mc_enc(0, 8, res_req->type[8]); -+ cmd.params[3] |= mc_enc(8, 8, res_req->type[9]); -+ cmd.params[3] |= mc_enc(16, 8, res_req->type[10]); -+ cmd.params[3] |= mc_enc(24, 8, res_req->type[11]); -+ cmd.params[3] |= mc_enc(32, 8, res_req->type[12]); -+ cmd.params[3] |= mc_enc(40, 8, res_req->type[13]); -+ cmd.params[3] |= mc_enc(48, 8, res_req->type[14]); -+ cmd.params[3] |= mc_enc(56, 8, res_req->type[15]); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dprc_assign); -+ -+int dprc_unassign(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int child_container_id, -+ struct dprc_res_req *res_req) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_UNASSIGN, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(0, 32, child_container_id); -+ cmd.params[0] |= mc_enc(32, 32, res_req->options); -+ cmd.params[1] |= mc_enc(0, 32, res_req->num); -+ cmd.params[1] |= mc_enc(32, 32, res_req->id_base_align); -+ cmd.params[2] |= mc_enc(0, 8, res_req->type[0]); -+ cmd.params[2] |= mc_enc(8, 8, res_req->type[1]); -+ cmd.params[2] |= mc_enc(16, 8, res_req->type[2]); -+ cmd.params[2] |= mc_enc(24, 8, res_req->type[3]); -+ cmd.params[2] |= mc_enc(32, 8, res_req->type[4]); -+ cmd.params[2] |= mc_enc(40, 8, res_req->type[5]); -+ cmd.params[2] |= mc_enc(48, 8, res_req->type[6]); -+ cmd.params[2] |= mc_enc(56, 8, res_req->type[7]); -+ cmd.params[3] |= mc_enc(0, 8, res_req->type[8]); -+ cmd.params[3] |= mc_enc(8, 8, res_req->type[9]); -+ cmd.params[3] |= mc_enc(16, 8, res_req->type[10]); -+ cmd.params[3] |= mc_enc(24, 8, res_req->type[11]); -+ cmd.params[3] |= mc_enc(32, 8, res_req->type[12]); -+ cmd.params[3] |= mc_enc(40, 8, res_req->type[13]); -+ cmd.params[3] |= mc_enc(48, 8, res_req->type[14]); -+ cmd.params[3] |= mc_enc(56, 8, res_req->type[15]); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dprc_unassign); -+ -+int dprc_get_pool_count(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *pool_count) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_POOL_COUNT, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *pool_count = mc_dec(cmd.params[0], 0, 32); -+ -+ return 0; -+} -+EXPORT_SYMBOL(dprc_get_pool_count); -+ -+int dprc_get_pool(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int pool_index, -+ char *type) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_POOL, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(0, 32, pool_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ type[0] = mc_dec(cmd.params[1], 0, 8); -+ type[1] = mc_dec(cmd.params[1], 8, 8); -+ type[2] = mc_dec(cmd.params[1], 16, 8); -+ type[3] = mc_dec(cmd.params[1], 24, 8); -+ type[4] = mc_dec(cmd.params[1], 32, 8); -+ type[5] = mc_dec(cmd.params[1], 40, 8); -+ type[6] = mc_dec(cmd.params[1], 48, 8); -+ type[7] = mc_dec(cmd.params[1], 56, 8); -+ type[8] = mc_dec(cmd.params[2], 0, 8); -+ type[9] = mc_dec(cmd.params[2], 8, 8); -+ type[10] = mc_dec(cmd.params[2], 16, 8); -+ type[11] = mc_dec(cmd.params[2], 24, 8); -+ type[12] = mc_dec(cmd.params[2], 32, 8); -+ type[13] = mc_dec(cmd.params[2], 40, 8); -+ type[14] = mc_dec(cmd.params[2], 48, 8); -+ type[15] = '\0'; -+ -+ return 0; -+} -+EXPORT_SYMBOL(dprc_get_pool); -+ -+int dprc_get_obj_count(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *obj_count) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_COUNT, -+ cmd_flags, -+ token); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *obj_count = mc_dec(cmd.params[0], 32, 32); -+ -+ return 0; -+} -+EXPORT_SYMBOL(dprc_get_obj_count); -+ -+int dprc_get_obj(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int obj_index, -+ struct dprc_obj_desc *obj_desc) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(0, 32, obj_index); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ obj_desc->id = mc_dec(cmd.params[0], 32, 32); -+ obj_desc->vendor = mc_dec(cmd.params[1], 0, 16); -+ obj_desc->irq_count = mc_dec(cmd.params[1], 16, 8); -+ obj_desc->region_count = mc_dec(cmd.params[1], 24, 8); -+ obj_desc->state = mc_dec(cmd.params[1], 32, 32); -+ obj_desc->ver_major = mc_dec(cmd.params[2], 0, 16); -+ obj_desc->ver_minor = mc_dec(cmd.params[2], 16, 16); -+ obj_desc->flags = mc_dec(cmd.params[2], 32, 16); -+ obj_desc->type[0] = mc_dec(cmd.params[3], 0, 8); -+ obj_desc->type[1] = mc_dec(cmd.params[3], 8, 8); -+ obj_desc->type[2] = mc_dec(cmd.params[3], 16, 8); -+ obj_desc->type[3] = mc_dec(cmd.params[3], 24, 8); -+ obj_desc->type[4] = mc_dec(cmd.params[3], 32, 8); -+ obj_desc->type[5] = mc_dec(cmd.params[3], 40, 8); -+ obj_desc->type[6] = mc_dec(cmd.params[3], 48, 8); -+ obj_desc->type[7] = mc_dec(cmd.params[3], 56, 8); -+ obj_desc->type[8] = mc_dec(cmd.params[4], 0, 8); -+ obj_desc->type[9] = mc_dec(cmd.params[4], 8, 8); -+ obj_desc->type[10] = mc_dec(cmd.params[4], 16, 8); -+ obj_desc->type[11] = mc_dec(cmd.params[4], 24, 8); -+ obj_desc->type[12] = mc_dec(cmd.params[4], 32, 8); -+ obj_desc->type[13] = mc_dec(cmd.params[4], 40, 8); -+ obj_desc->type[14] = mc_dec(cmd.params[4], 48, 8); -+ obj_desc->type[15] = '\0'; -+ obj_desc->label[0] = mc_dec(cmd.params[5], 0, 8); -+ obj_desc->label[1] = mc_dec(cmd.params[5], 8, 8); -+ obj_desc->label[2] = mc_dec(cmd.params[5], 16, 8); -+ obj_desc->label[3] = mc_dec(cmd.params[5], 24, 8); -+ obj_desc->label[4] = mc_dec(cmd.params[5], 32, 8); -+ obj_desc->label[5] = mc_dec(cmd.params[5], 40, 8); -+ obj_desc->label[6] = mc_dec(cmd.params[5], 48, 8); -+ obj_desc->label[7] = mc_dec(cmd.params[5], 56, 8); -+ obj_desc->label[8] = mc_dec(cmd.params[6], 0, 8); -+ obj_desc->label[9] = mc_dec(cmd.params[6], 8, 8); -+ obj_desc->label[10] = mc_dec(cmd.params[6], 16, 8); -+ obj_desc->label[11] = mc_dec(cmd.params[6], 24, 8); -+ obj_desc->label[12] = mc_dec(cmd.params[6], 32, 8); -+ obj_desc->label[13] = mc_dec(cmd.params[6], 40, 8); -+ obj_desc->label[14] = mc_dec(cmd.params[6], 48, 8); -+ obj_desc->label[15] = '\0'; -+ return 0; -+} -+EXPORT_SYMBOL(dprc_get_obj); -+ -+int dprc_get_obj_desc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *obj_type, -+ int obj_id, -+ struct dprc_obj_desc *obj_desc) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_DESC, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(0, 32, obj_id); -+ cmd.params[1] |= mc_enc(0, 8, obj_type[0]); -+ cmd.params[1] |= mc_enc(8, 8, obj_type[1]); -+ cmd.params[1] |= mc_enc(16, 8, obj_type[2]); -+ cmd.params[1] |= mc_enc(24, 8, obj_type[3]); -+ cmd.params[1] |= mc_enc(32, 8, obj_type[4]); -+ cmd.params[1] |= mc_enc(40, 8, obj_type[5]); -+ cmd.params[1] |= mc_enc(48, 8, obj_type[6]); -+ cmd.params[1] |= mc_enc(56, 8, obj_type[7]); -+ cmd.params[2] |= mc_enc(0, 8, obj_type[8]); -+ cmd.params[2] |= mc_enc(8, 8, obj_type[9]); -+ cmd.params[2] |= mc_enc(16, 8, obj_type[10]); -+ cmd.params[2] |= mc_enc(24, 8, obj_type[11]); -+ cmd.params[2] |= mc_enc(32, 8, obj_type[12]); -+ cmd.params[2] |= mc_enc(40, 8, obj_type[13]); -+ cmd.params[2] |= mc_enc(48, 8, obj_type[14]); -+ cmd.params[2] |= mc_enc(56, 8, obj_type[15]); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ obj_desc->id = (int)mc_dec(cmd.params[0], 32, 32); -+ obj_desc->vendor = (uint16_t)mc_dec(cmd.params[1], 0, 16); -+ obj_desc->vendor = (uint8_t)mc_dec(cmd.params[1], 16, 8); -+ obj_desc->region_count = (uint8_t)mc_dec(cmd.params[1], 24, 8); -+ obj_desc->state = (uint32_t)mc_dec(cmd.params[1], 32, 32); -+ obj_desc->ver_major = (uint16_t)mc_dec(cmd.params[2], 0, 16); -+ obj_desc->ver_minor = (uint16_t)mc_dec(cmd.params[2], 16, 16); -+ obj_desc->flags = mc_dec(cmd.params[2], 32, 16); -+ obj_desc->type[0] = (char)mc_dec(cmd.params[3], 0, 8); -+ obj_desc->type[1] = (char)mc_dec(cmd.params[3], 8, 8); -+ obj_desc->type[2] = (char)mc_dec(cmd.params[3], 16, 8); -+ obj_desc->type[3] = (char)mc_dec(cmd.params[3], 24, 8); -+ obj_desc->type[4] = (char)mc_dec(cmd.params[3], 32, 8); -+ obj_desc->type[5] = (char)mc_dec(cmd.params[3], 40, 8); -+ obj_desc->type[6] = (char)mc_dec(cmd.params[3], 48, 8); -+ obj_desc->type[7] = (char)mc_dec(cmd.params[3], 56, 8); -+ obj_desc->type[8] = (char)mc_dec(cmd.params[4], 0, 8); -+ obj_desc->type[9] = (char)mc_dec(cmd.params[4], 8, 8); -+ obj_desc->type[10] = (char)mc_dec(cmd.params[4], 16, 8); -+ obj_desc->type[11] = (char)mc_dec(cmd.params[4], 24, 8); -+ obj_desc->type[12] = (char)mc_dec(cmd.params[4], 32, 8); -+ obj_desc->type[13] = (char)mc_dec(cmd.params[4], 40, 8); -+ obj_desc->type[14] = (char)mc_dec(cmd.params[4], 48, 8); -+ obj_desc->type[15] = (char)mc_dec(cmd.params[4], 56, 8); -+ obj_desc->label[0] = (char)mc_dec(cmd.params[5], 0, 8); -+ obj_desc->label[1] = (char)mc_dec(cmd.params[5], 8, 8); -+ obj_desc->label[2] = (char)mc_dec(cmd.params[5], 16, 8); -+ obj_desc->label[3] = (char)mc_dec(cmd.params[5], 24, 8); -+ obj_desc->label[4] = (char)mc_dec(cmd.params[5], 32, 8); -+ obj_desc->label[5] = (char)mc_dec(cmd.params[5], 40, 8); -+ obj_desc->label[6] = (char)mc_dec(cmd.params[5], 48, 8); -+ obj_desc->label[7] = (char)mc_dec(cmd.params[5], 56, 8); -+ obj_desc->label[8] = (char)mc_dec(cmd.params[6], 0, 8); -+ obj_desc->label[9] = (char)mc_dec(cmd.params[6], 8, 8); -+ obj_desc->label[10] = (char)mc_dec(cmd.params[6], 16, 8); -+ obj_desc->label[11] = (char)mc_dec(cmd.params[6], 24, 8); -+ obj_desc->label[12] = (char)mc_dec(cmd.params[6], 32, 8); -+ obj_desc->label[13] = (char)mc_dec(cmd.params[6], 40, 8); -+ obj_desc->label[14] = (char)mc_dec(cmd.params[6], 48, 8); -+ obj_desc->label[15] = (char)mc_dec(cmd.params[6], 56, 8); -+ -+ return 0; -+} -+EXPORT_SYMBOL(dprc_get_obj_desc); -+ -+int dprc_set_obj_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *obj_type, -+ int obj_id, -+ uint8_t irq_index, -+ struct dprc_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_OBJ_IRQ, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ cmd.params[0] |= mc_enc(0, 32, irq_cfg->val); -+ cmd.params[1] |= mc_enc(0, 64, irq_cfg->paddr); -+ cmd.params[2] |= mc_enc(0, 32, irq_cfg->irq_num); -+ cmd.params[2] |= mc_enc(32, 32, obj_id); -+ cmd.params[3] |= mc_enc(0, 8, obj_type[0]); -+ cmd.params[3] |= mc_enc(8, 8, obj_type[1]); -+ cmd.params[3] |= mc_enc(16, 8, obj_type[2]); -+ cmd.params[3] |= mc_enc(24, 8, obj_type[3]); -+ cmd.params[3] |= mc_enc(32, 8, obj_type[4]); -+ cmd.params[3] |= mc_enc(40, 8, obj_type[5]); -+ cmd.params[3] |= mc_enc(48, 8, obj_type[6]); -+ cmd.params[3] |= mc_enc(56, 8, obj_type[7]); -+ cmd.params[4] |= mc_enc(0, 8, obj_type[8]); -+ cmd.params[4] |= mc_enc(8, 8, obj_type[9]); -+ cmd.params[4] |= mc_enc(16, 8, obj_type[10]); -+ cmd.params[4] |= mc_enc(24, 8, obj_type[11]); -+ cmd.params[4] |= mc_enc(32, 8, obj_type[12]); -+ cmd.params[4] |= mc_enc(40, 8, obj_type[13]); -+ cmd.params[4] |= mc_enc(48, 8, obj_type[14]); -+ cmd.params[4] |= mc_enc(56, 8, obj_type[15]); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dprc_set_obj_irq); -+ -+int dprc_get_obj_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *obj_type, -+ int obj_id, -+ uint8_t irq_index, -+ int *type, -+ struct dprc_irq_cfg *irq_cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_IRQ, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(0, 32, obj_id); -+ cmd.params[0] |= mc_enc(32, 8, irq_index); -+ cmd.params[1] |= mc_enc(0, 8, obj_type[0]); -+ cmd.params[1] |= mc_enc(8, 8, obj_type[1]); -+ cmd.params[1] |= mc_enc(16, 8, obj_type[2]); -+ cmd.params[1] |= mc_enc(24, 8, obj_type[3]); -+ cmd.params[1] |= mc_enc(32, 8, obj_type[4]); -+ cmd.params[1] |= mc_enc(40, 8, obj_type[5]); -+ cmd.params[1] |= mc_enc(48, 8, obj_type[6]); -+ cmd.params[1] |= mc_enc(56, 8, obj_type[7]); -+ cmd.params[2] |= mc_enc(0, 8, obj_type[8]); -+ cmd.params[2] |= mc_enc(8, 8, obj_type[9]); -+ cmd.params[2] |= mc_enc(16, 8, obj_type[10]); -+ cmd.params[2] |= mc_enc(24, 8, obj_type[11]); -+ cmd.params[2] |= mc_enc(32, 8, obj_type[12]); -+ cmd.params[2] |= mc_enc(40, 8, obj_type[13]); -+ cmd.params[2] |= mc_enc(48, 8, obj_type[14]); -+ cmd.params[2] |= mc_enc(56, 8, obj_type[15]); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ irq_cfg->val = (uint32_t)mc_dec(cmd.params[0], 0, 32); -+ irq_cfg->paddr = (uint64_t)mc_dec(cmd.params[1], 0, 64); -+ irq_cfg->irq_num = (int)mc_dec(cmd.params[2], 0, 32); -+ *type = (int)mc_dec(cmd.params[2], 32, 32); -+ -+ return 0; -+} -+EXPORT_SYMBOL(dprc_get_obj_irq); -+ -+int dprc_get_res_count(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *type, -+ int *res_count) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ *res_count = 0; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_COUNT, -+ cmd_flags, -+ token); -+ cmd.params[1] |= mc_enc(0, 8, type[0]); -+ cmd.params[1] |= mc_enc(8, 8, type[1]); -+ cmd.params[1] |= mc_enc(16, 8, type[2]); -+ cmd.params[1] |= mc_enc(24, 8, type[3]); -+ cmd.params[1] |= mc_enc(32, 8, type[4]); -+ cmd.params[1] |= mc_enc(40, 8, type[5]); -+ cmd.params[1] |= mc_enc(48, 8, type[6]); -+ cmd.params[1] |= mc_enc(56, 8, type[7]); -+ cmd.params[2] |= mc_enc(0, 8, type[8]); -+ cmd.params[2] |= mc_enc(8, 8, type[9]); -+ cmd.params[2] |= mc_enc(16, 8, type[10]); -+ cmd.params[2] |= mc_enc(24, 8, type[11]); -+ cmd.params[2] |= mc_enc(32, 8, type[12]); -+ cmd.params[2] |= mc_enc(40, 8, type[13]); -+ cmd.params[2] |= mc_enc(48, 8, type[14]); -+ cmd.params[2] |= mc_enc(56, 8, '\0'); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ *res_count = mc_dec(cmd.params[0], 0, 32); -+ -+ return 0; -+} -+EXPORT_SYMBOL(dprc_get_res_count); -+ -+int dprc_get_res_ids(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *type, -+ struct dprc_res_ids_range_desc *range_desc) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_RES_IDS, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(42, 7, range_desc->iter_status); -+ cmd.params[1] |= mc_enc(0, 32, range_desc->base_id); -+ cmd.params[1] |= mc_enc(32, 32, range_desc->last_id); -+ cmd.params[2] |= mc_enc(0, 8, type[0]); -+ cmd.params[2] |= mc_enc(8, 8, type[1]); -+ cmd.params[2] |= mc_enc(16, 8, type[2]); -+ cmd.params[2] |= mc_enc(24, 8, type[3]); -+ cmd.params[2] |= mc_enc(32, 8, type[4]); -+ cmd.params[2] |= mc_enc(40, 8, type[5]); -+ cmd.params[2] |= mc_enc(48, 8, type[6]); -+ cmd.params[2] |= mc_enc(56, 8, type[7]); -+ cmd.params[3] |= mc_enc(0, 8, type[8]); -+ cmd.params[3] |= mc_enc(8, 8, type[9]); -+ cmd.params[3] |= mc_enc(16, 8, type[10]); -+ cmd.params[3] |= mc_enc(24, 8, type[11]); -+ cmd.params[3] |= mc_enc(32, 8, type[12]); -+ cmd.params[3] |= mc_enc(40, 8, type[13]); -+ cmd.params[3] |= mc_enc(48, 8, type[14]); -+ cmd.params[3] |= mc_enc(56, 8, '\0'); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ range_desc->iter_status = mc_dec(cmd.params[0], 42, 7); -+ range_desc->base_id = mc_dec(cmd.params[1], 0, 32); -+ range_desc->last_id = mc_dec(cmd.params[1], 32, 32); -+ -+ return 0; -+} -+EXPORT_SYMBOL(dprc_get_res_ids); -+ -+int dprc_get_obj_region(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *obj_type, -+ int obj_id, -+ uint8_t region_index, -+ struct dprc_region_desc *region_desc) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_OBJ_REG, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(0, 32, obj_id); -+ cmd.params[0] |= mc_enc(48, 8, region_index); -+ cmd.params[3] |= mc_enc(0, 8, obj_type[0]); -+ cmd.params[3] |= mc_enc(8, 8, obj_type[1]); -+ cmd.params[3] |= mc_enc(16, 8, obj_type[2]); -+ cmd.params[3] |= mc_enc(24, 8, obj_type[3]); -+ cmd.params[3] |= mc_enc(32, 8, obj_type[4]); -+ cmd.params[3] |= mc_enc(40, 8, obj_type[5]); -+ cmd.params[3] |= mc_enc(48, 8, obj_type[6]); -+ cmd.params[3] |= mc_enc(56, 8, obj_type[7]); -+ cmd.params[4] |= mc_enc(0, 8, obj_type[8]); -+ cmd.params[4] |= mc_enc(8, 8, obj_type[9]); -+ cmd.params[4] |= mc_enc(16, 8, obj_type[10]); -+ cmd.params[4] |= mc_enc(24, 8, obj_type[11]); -+ cmd.params[4] |= mc_enc(32, 8, obj_type[12]); -+ cmd.params[4] |= mc_enc(40, 8, obj_type[13]); -+ cmd.params[4] |= mc_enc(48, 8, obj_type[14]); -+ cmd.params[4] |= mc_enc(56, 8, '\0'); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ region_desc->base_offset = mc_dec(cmd.params[1], 0, 64); -+ region_desc->size = mc_dec(cmd.params[2], 0, 32); -+ -+ return 0; -+} -+EXPORT_SYMBOL(dprc_get_obj_region); -+ -+int dprc_set_obj_label(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *obj_type, -+ int obj_id, -+ char *label) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_SET_OBJ_LABEL, -+ cmd_flags, -+ token); -+ -+ cmd.params[0] |= mc_enc(0, 32, obj_id); -+ cmd.params[1] |= mc_enc(0, 8, label[0]); -+ cmd.params[1] |= mc_enc(8, 8, label[1]); -+ cmd.params[1] |= mc_enc(16, 8, label[2]); -+ cmd.params[1] |= mc_enc(24, 8, label[3]); -+ cmd.params[1] |= mc_enc(32, 8, label[4]); -+ cmd.params[1] |= mc_enc(40, 8, label[5]); -+ cmd.params[1] |= mc_enc(48, 8, label[6]); -+ cmd.params[1] |= mc_enc(56, 8, label[7]); -+ cmd.params[2] |= mc_enc(0, 8, label[8]); -+ cmd.params[2] |= mc_enc(8, 8, label[9]); -+ cmd.params[2] |= mc_enc(16, 8, label[10]); -+ cmd.params[2] |= mc_enc(24, 8, label[11]); -+ cmd.params[2] |= mc_enc(32, 8, label[12]); -+ cmd.params[2] |= mc_enc(40, 8, label[13]); -+ cmd.params[2] |= mc_enc(48, 8, label[14]); -+ cmd.params[2] |= mc_enc(56, 8, label[15]); -+ cmd.params[3] |= mc_enc(0, 8, obj_type[0]); -+ cmd.params[3] |= mc_enc(8, 8, obj_type[1]); -+ cmd.params[3] |= mc_enc(16, 8, obj_type[2]); -+ cmd.params[3] |= mc_enc(24, 8, obj_type[3]); -+ cmd.params[3] |= mc_enc(32, 8, obj_type[4]); -+ cmd.params[3] |= mc_enc(40, 8, obj_type[5]); -+ cmd.params[3] |= mc_enc(48, 8, obj_type[6]); -+ cmd.params[3] |= mc_enc(56, 8, obj_type[7]); -+ cmd.params[4] |= mc_enc(0, 8, obj_type[8]); -+ cmd.params[4] |= mc_enc(8, 8, obj_type[9]); -+ cmd.params[4] |= mc_enc(16, 8, obj_type[10]); -+ cmd.params[4] |= mc_enc(24, 8, obj_type[11]); -+ cmd.params[4] |= mc_enc(32, 8, obj_type[12]); -+ cmd.params[4] |= mc_enc(40, 8, obj_type[13]); -+ cmd.params[4] |= mc_enc(48, 8, obj_type[14]); -+ cmd.params[4] |= mc_enc(56, 8, obj_type[15]); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dprc_set_obj_label); -+ -+int dprc_connect(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dprc_endpoint *endpoint1, -+ const struct dprc_endpoint *endpoint2, -+ const struct dprc_connection_cfg *cfg) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_CONNECT, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(0, 32, endpoint1->id); -+ cmd.params[0] |= mc_enc(32, 32, endpoint1->if_id); -+ cmd.params[1] |= mc_enc(0, 32, endpoint2->id); -+ cmd.params[1] |= mc_enc(32, 32, endpoint2->if_id); -+ cmd.params[2] |= mc_enc(0, 8, endpoint1->type[0]); -+ cmd.params[2] |= mc_enc(8, 8, endpoint1->type[1]); -+ cmd.params[2] |= mc_enc(16, 8, endpoint1->type[2]); -+ cmd.params[2] |= mc_enc(24, 8, endpoint1->type[3]); -+ cmd.params[2] |= mc_enc(32, 8, endpoint1->type[4]); -+ cmd.params[2] |= mc_enc(40, 8, endpoint1->type[5]); -+ cmd.params[2] |= mc_enc(48, 8, endpoint1->type[6]); -+ cmd.params[2] |= mc_enc(56, 8, endpoint1->type[7]); -+ cmd.params[3] |= mc_enc(0, 8, endpoint1->type[8]); -+ cmd.params[3] |= mc_enc(8, 8, endpoint1->type[9]); -+ cmd.params[3] |= mc_enc(16, 8, endpoint1->type[10]); -+ cmd.params[3] |= mc_enc(24, 8, endpoint1->type[11]); -+ cmd.params[3] |= mc_enc(32, 8, endpoint1->type[12]); -+ cmd.params[3] |= mc_enc(40, 8, endpoint1->type[13]); -+ cmd.params[3] |= mc_enc(48, 8, endpoint1->type[14]); -+ cmd.params[3] |= mc_enc(56, 8, endpoint1->type[15]); -+ cmd.params[4] |= mc_enc(0, 32, cfg->max_rate); -+ cmd.params[4] |= mc_enc(32, 32, cfg->committed_rate); -+ cmd.params[5] |= mc_enc(0, 8, endpoint2->type[0]); -+ cmd.params[5] |= mc_enc(8, 8, endpoint2->type[1]); -+ cmd.params[5] |= mc_enc(16, 8, endpoint2->type[2]); -+ cmd.params[5] |= mc_enc(24, 8, endpoint2->type[3]); -+ cmd.params[5] |= mc_enc(32, 8, endpoint2->type[4]); -+ cmd.params[5] |= mc_enc(40, 8, endpoint2->type[5]); -+ cmd.params[5] |= mc_enc(48, 8, endpoint2->type[6]); -+ cmd.params[5] |= mc_enc(56, 8, endpoint2->type[7]); -+ cmd.params[6] |= mc_enc(0, 8, endpoint2->type[8]); -+ cmd.params[6] |= mc_enc(8, 8, endpoint2->type[9]); -+ cmd.params[6] |= mc_enc(16, 8, endpoint2->type[10]); -+ cmd.params[6] |= mc_enc(24, 8, endpoint2->type[11]); -+ cmd.params[6] |= mc_enc(32, 8, endpoint2->type[12]); -+ cmd.params[6] |= mc_enc(40, 8, endpoint2->type[13]); -+ cmd.params[6] |= mc_enc(48, 8, endpoint2->type[14]); -+ cmd.params[6] |= mc_enc(56, 8, endpoint2->type[15]); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dprc_connect); -+ -+int dprc_disconnect(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dprc_endpoint *endpoint) -+{ -+ struct mc_command cmd = { 0 }; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_DISCONNECT, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(0, 32, endpoint->id); -+ cmd.params[0] |= mc_enc(32, 32, endpoint->if_id); -+ cmd.params[1] |= mc_enc(0, 8, endpoint->type[0]); -+ cmd.params[1] |= mc_enc(8, 8, endpoint->type[1]); -+ cmd.params[1] |= mc_enc(16, 8, endpoint->type[2]); -+ cmd.params[1] |= mc_enc(24, 8, endpoint->type[3]); -+ cmd.params[1] |= mc_enc(32, 8, endpoint->type[4]); -+ cmd.params[1] |= mc_enc(40, 8, endpoint->type[5]); -+ cmd.params[1] |= mc_enc(48, 8, endpoint->type[6]); -+ cmd.params[1] |= mc_enc(56, 8, endpoint->type[7]); -+ cmd.params[2] |= mc_enc(0, 8, endpoint->type[8]); -+ cmd.params[2] |= mc_enc(8, 8, endpoint->type[9]); -+ cmd.params[2] |= mc_enc(16, 8, endpoint->type[10]); -+ cmd.params[2] |= mc_enc(24, 8, endpoint->type[11]); -+ cmd.params[2] |= mc_enc(32, 8, endpoint->type[12]); -+ cmd.params[2] |= mc_enc(40, 8, endpoint->type[13]); -+ cmd.params[2] |= mc_enc(48, 8, endpoint->type[14]); -+ cmd.params[2] |= mc_enc(56, 8, endpoint->type[15]); -+ -+ /* send command to mc*/ -+ return mc_send_command(mc_io, &cmd); -+} -+EXPORT_SYMBOL(dprc_disconnect); -+ -+int dprc_get_connection(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dprc_endpoint *endpoint1, -+ struct dprc_endpoint *endpoint2, -+ int *state) -+{ -+ struct mc_command cmd = { 0 }; -+ int err; -+ -+ /* prepare command */ -+ cmd.header = mc_encode_cmd_header(DPRC_CMDID_GET_CONNECTION, -+ cmd_flags, -+ token); -+ cmd.params[0] |= mc_enc(0, 32, endpoint1->id); -+ cmd.params[0] |= mc_enc(32, 32, endpoint1->if_id); -+ cmd.params[1] |= mc_enc(0, 8, endpoint1->type[0]); -+ cmd.params[1] |= mc_enc(8, 8, endpoint1->type[1]); -+ cmd.params[1] |= mc_enc(16, 8, endpoint1->type[2]); -+ cmd.params[1] |= mc_enc(24, 8, endpoint1->type[3]); -+ cmd.params[1] |= mc_enc(32, 8, endpoint1->type[4]); -+ cmd.params[1] |= mc_enc(40, 8, endpoint1->type[5]); -+ cmd.params[1] |= mc_enc(48, 8, endpoint1->type[6]); -+ cmd.params[1] |= mc_enc(56, 8, endpoint1->type[7]); -+ cmd.params[2] |= mc_enc(0, 8, endpoint1->type[8]); -+ cmd.params[2] |= mc_enc(8, 8, endpoint1->type[9]); -+ cmd.params[2] |= mc_enc(16, 8, endpoint1->type[10]); -+ cmd.params[2] |= mc_enc(24, 8, endpoint1->type[11]); -+ cmd.params[2] |= mc_enc(32, 8, endpoint1->type[12]); -+ cmd.params[2] |= mc_enc(40, 8, endpoint1->type[13]); -+ cmd.params[2] |= mc_enc(48, 8, endpoint1->type[14]); -+ cmd.params[2] |= mc_enc(56, 8, endpoint1->type[15]); -+ -+ /* send command to mc*/ -+ err = mc_send_command(mc_io, &cmd); -+ if (err) -+ return err; -+ -+ /* retrieve response parameters */ -+ endpoint2->id = mc_dec(cmd.params[3], 0, 32); -+ endpoint2->if_id = mc_dec(cmd.params[3], 32, 32); -+ endpoint2->type[0] = mc_dec(cmd.params[4], 0, 8); -+ endpoint2->type[1] = mc_dec(cmd.params[4], 8, 8); -+ endpoint2->type[2] = mc_dec(cmd.params[4], 16, 8); -+ endpoint2->type[3] = mc_dec(cmd.params[4], 24, 8); -+ endpoint2->type[4] = mc_dec(cmd.params[4], 32, 8); -+ endpoint2->type[5] = mc_dec(cmd.params[4], 40, 8); -+ endpoint2->type[6] = mc_dec(cmd.params[4], 48, 8); -+ endpoint2->type[7] = mc_dec(cmd.params[4], 56, 8); -+ endpoint2->type[8] = mc_dec(cmd.params[5], 0, 8); -+ endpoint2->type[9] = mc_dec(cmd.params[5], 8, 8); -+ endpoint2->type[10] = mc_dec(cmd.params[5], 16, 8); -+ endpoint2->type[11] = mc_dec(cmd.params[5], 24, 8); -+ endpoint2->type[12] = mc_dec(cmd.params[5], 32, 8); -+ endpoint2->type[13] = mc_dec(cmd.params[5], 40, 8); -+ endpoint2->type[14] = mc_dec(cmd.params[5], 48, 8); -+ endpoint2->type[15] = mc_dec(cmd.params[5], 56, 8); -+ *state = mc_dec(cmd.params[6], 0, 32); -+ -+ return 0; -+} -+EXPORT_SYMBOL(dprc_get_connection); -diff --git a/drivers/staging/fsl-mc/bus/mc-allocator.c b/drivers/staging/fsl-mc/bus/mc-allocator.c -new file mode 100644 -index 0000000..a3940a0 ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/mc-allocator.c -@@ -0,0 +1,716 @@ -+/* -+ * Freescale MC object device allocator driver -+ * -+ * Copyright (C) 2013 Freescale Semiconductor, Inc. -+ * -+ * This file is licensed under the terms of the GNU General Public -+ * License version 2. This program is licensed "as is" without any -+ * warranty of any kind, whether express or implied. -+ */ -+ -+#include "../include/mc-private.h" -+#include "../include/mc-sys.h" -+#include -+#include "../include/dpbp-cmd.h" -+#include "../include/dpcon-cmd.h" -+#include "dpmcp-cmd.h" -+#include "dpmcp.h" -+ -+/** -+ * fsl_mc_resource_pool_add_device - add allocatable device to a resource -+ * pool of a given MC bus -+ * -+ * @mc_bus: pointer to the MC bus -+ * @pool_type: MC bus pool type -+ * @mc_dev: Pointer to allocatable MC object device -+ * -+ * It adds an allocatable MC object device to a container's resource pool of -+ * the given resource type -+ */ -+static int __must_check fsl_mc_resource_pool_add_device(struct fsl_mc_bus -+ *mc_bus, -+ enum fsl_mc_pool_type -+ pool_type, -+ struct fsl_mc_device -+ *mc_dev) -+{ -+ struct fsl_mc_resource_pool *res_pool; -+ struct fsl_mc_resource *resource; -+ struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev; -+ int error = -EINVAL; -+ bool mutex_locked = false; -+ -+ if (WARN_ON(pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES)) -+ goto out; -+ if (WARN_ON(!FSL_MC_IS_ALLOCATABLE(mc_dev->obj_desc.type))) -+ goto out; -+ if (WARN_ON(mc_dev->resource)) -+ goto out; -+ -+ res_pool = &mc_bus->resource_pools[pool_type]; -+ if (WARN_ON(res_pool->type != pool_type)) -+ goto out; -+ if (WARN_ON(res_pool->mc_bus != mc_bus)) -+ goto out; -+ -+ mutex_lock(&res_pool->mutex); -+ mutex_locked = true; -+ -+ if (WARN_ON(res_pool->max_count < 0)) -+ goto out; -+ if (WARN_ON(res_pool->free_count < 0 || -+ res_pool->free_count > res_pool->max_count)) -+ goto out; -+ -+ resource = devm_kzalloc(&mc_bus_dev->dev, sizeof(*resource), -+ GFP_KERNEL); -+ if (!resource) { -+ error = -ENOMEM; -+ dev_err(&mc_bus_dev->dev, -+ "Failed to allocate memory for fsl_mc_resource\n"); -+ goto out; -+ } -+ -+ resource->type = pool_type; -+ resource->id = mc_dev->obj_desc.id; -+ resource->data = mc_dev; -+ resource->parent_pool = res_pool; -+ INIT_LIST_HEAD(&resource->node); -+ list_add_tail(&resource->node, &res_pool->free_list); -+ mc_dev->resource = resource; -+ res_pool->free_count++; -+ res_pool->max_count++; -+ error = 0; -+out: -+ if (mutex_locked) -+ mutex_unlock(&res_pool->mutex); -+ -+ return error; -+} -+ -+/** -+ * fsl_mc_resource_pool_remove_device - remove an allocatable device from a -+ * resource pool -+ * -+ * @mc_dev: Pointer to allocatable MC object device -+ * -+ * It permanently removes an allocatable MC object device from the resource -+ * pool, the device is currently in, as long as it is in the pool's free list. -+ */ -+static int __must_check fsl_mc_resource_pool_remove_device(struct fsl_mc_device -+ *mc_dev) -+{ -+ struct fsl_mc_device *mc_bus_dev; -+ struct fsl_mc_bus *mc_bus; -+ struct fsl_mc_resource_pool *res_pool; -+ struct fsl_mc_resource *resource; -+ int error = -EINVAL; -+ bool mutex_locked = false; -+ -+ if (WARN_ON(!FSL_MC_IS_ALLOCATABLE(mc_dev->obj_desc.type))) -+ goto out; -+ -+ resource = mc_dev->resource; -+ if (WARN_ON(!resource || resource->data != mc_dev)) -+ goto out; -+ -+ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent); -+ mc_bus = to_fsl_mc_bus(mc_bus_dev); -+ res_pool = resource->parent_pool; -+ if (WARN_ON(res_pool != &mc_bus->resource_pools[resource->type])) -+ goto out; -+ -+ mutex_lock(&res_pool->mutex); -+ mutex_locked = true; -+ -+ if (WARN_ON(res_pool->max_count <= 0)) -+ goto out; -+ if (WARN_ON(res_pool->free_count <= 0 || -+ res_pool->free_count > res_pool->max_count)) -+ goto out; -+ -+ /* -+ * If the device is currently allocated, its resource is not -+ * in the free list and thus, the device cannot be removed. -+ */ -+ if (list_empty(&resource->node)) { -+ error = -EBUSY; -+ dev_err(&mc_bus_dev->dev, -+ "Device %s cannot be removed from resource pool\n", -+ dev_name(&mc_dev->dev)); -+ goto out; -+ } -+ -+ list_del(&resource->node); -+ INIT_LIST_HEAD(&resource->node); -+ res_pool->free_count--; -+ res_pool->max_count--; -+ -+ devm_kfree(&mc_bus_dev->dev, resource); -+ mc_dev->resource = NULL; -+ error = 0; -+out: -+ if (mutex_locked) -+ mutex_unlock(&res_pool->mutex); -+ -+ return error; -+} -+ -+static const char *const fsl_mc_pool_type_strings[] = { -+ [FSL_MC_POOL_DPMCP] = "dpmcp", -+ [FSL_MC_POOL_DPBP] = "dpbp", -+ [FSL_MC_POOL_DPCON] = "dpcon", -+ [FSL_MC_POOL_IRQ] = "irq", -+}; -+ -+static int __must_check object_type_to_pool_type(const char *object_type, -+ enum fsl_mc_pool_type -+ *pool_type) -+{ -+ unsigned int i; -+ -+ for (i = 0; i < ARRAY_SIZE(fsl_mc_pool_type_strings); i++) { -+ if (strcmp(object_type, fsl_mc_pool_type_strings[i]) == 0) { -+ *pool_type = i; -+ return 0; -+ } -+ } -+ -+ return -EINVAL; -+} -+ -+int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus, -+ enum fsl_mc_pool_type pool_type, -+ struct fsl_mc_resource **new_resource) -+{ -+ struct fsl_mc_resource_pool *res_pool; -+ struct fsl_mc_resource *resource; -+ struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev; -+ int error = -EINVAL; -+ bool mutex_locked = false; -+ -+ BUILD_BUG_ON(ARRAY_SIZE(fsl_mc_pool_type_strings) != -+ FSL_MC_NUM_POOL_TYPES); -+ -+ *new_resource = NULL; -+ if (WARN_ON(pool_type < 0 || pool_type >= FSL_MC_NUM_POOL_TYPES)) -+ goto error; -+ -+ res_pool = &mc_bus->resource_pools[pool_type]; -+ if (WARN_ON(res_pool->mc_bus != mc_bus)) -+ goto error; -+ -+ mutex_lock(&res_pool->mutex); -+ mutex_locked = true; -+ resource = list_first_entry_or_null(&res_pool->free_list, -+ struct fsl_mc_resource, node); -+ -+ if (!resource) { -+ WARN_ON(res_pool->free_count != 0); -+ error = -ENXIO; -+ dev_err(&mc_bus_dev->dev, -+ "No more resources of type %s left\n", -+ fsl_mc_pool_type_strings[pool_type]); -+ goto error; -+ } -+ -+ if (WARN_ON(resource->type != pool_type)) -+ goto error; -+ if (WARN_ON(resource->parent_pool != res_pool)) -+ goto error; -+ if (WARN_ON(res_pool->free_count <= 0 || -+ res_pool->free_count > res_pool->max_count)) -+ goto error; -+ -+ list_del(&resource->node); -+ INIT_LIST_HEAD(&resource->node); -+ -+ res_pool->free_count--; -+ mutex_unlock(&res_pool->mutex); -+ *new_resource = resource; -+ return 0; -+error: -+ if (mutex_locked) -+ mutex_unlock(&res_pool->mutex); -+ -+ return error; -+} -+EXPORT_SYMBOL_GPL(fsl_mc_resource_allocate); -+ -+void fsl_mc_resource_free(struct fsl_mc_resource *resource) -+{ -+ struct fsl_mc_resource_pool *res_pool; -+ bool mutex_locked = false; -+ -+ res_pool = resource->parent_pool; -+ if (WARN_ON(resource->type != res_pool->type)) -+ goto out; -+ -+ mutex_lock(&res_pool->mutex); -+ mutex_locked = true; -+ if (WARN_ON(res_pool->free_count < 0 || -+ res_pool->free_count >= res_pool->max_count)) -+ goto out; -+ -+ if (WARN_ON(!list_empty(&resource->node))) -+ goto out; -+ -+ list_add_tail(&resource->node, &res_pool->free_list); -+ res_pool->free_count++; -+out: -+ if (mutex_locked) -+ mutex_unlock(&res_pool->mutex); -+} -+EXPORT_SYMBOL_GPL(fsl_mc_resource_free); -+ -+/** -+ * fsl_mc_portal_allocate - Allocates an MC portal -+ * -+ * @mc_dev: MC device for which the MC portal is to be allocated -+ * @mc_io_flags: Flags for the fsl_mc_io object that wraps the allocated -+ * MC portal. -+ * @new_mc_io: Pointer to area where the pointer to the fsl_mc_io object -+ * that wraps the allocated MC portal is to be returned -+ * -+ * This function allocates an MC portal from the device's parent DPRC, -+ * from the corresponding MC bus' pool of MC portals and wraps -+ * it in a new fsl_mc_io object. If 'mc_dev' is a DPRC itself, the -+ * portal is allocated from its own MC bus. -+ */ -+int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev, -+ uint16_t mc_io_flags, -+ struct fsl_mc_io **new_mc_io) -+{ -+ struct fsl_mc_device *mc_bus_dev; -+ struct fsl_mc_bus *mc_bus; -+ phys_addr_t mc_portal_phys_addr; -+ size_t mc_portal_size; -+ struct fsl_mc_device *dpmcp_dev; -+ int error = -EINVAL; -+ struct fsl_mc_resource *resource = NULL; -+ struct fsl_mc_io *mc_io = NULL; -+ -+ if (!mc_dev) { -+ if (WARN_ON(!fsl_mc_bus_type.dev_root)) -+ return error; -+ -+ mc_bus_dev = to_fsl_mc_device(fsl_mc_bus_type.dev_root); -+ } else if (mc_dev->flags & FSL_MC_IS_DPRC) { -+ mc_bus_dev = mc_dev; -+ } else { -+ if (WARN_ON(mc_dev->dev.parent->bus != &fsl_mc_bus_type)) -+ return error; -+ -+ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent); -+ } -+ -+ mc_bus = to_fsl_mc_bus(mc_bus_dev); -+ *new_mc_io = NULL; -+ error = fsl_mc_resource_allocate(mc_bus, FSL_MC_POOL_DPMCP, &resource); -+ if (error < 0) -+ return error; -+ -+ error = -EINVAL; -+ dpmcp_dev = resource->data; -+ if (WARN_ON(!dpmcp_dev || -+ strcmp(dpmcp_dev->obj_desc.type, "dpmcp") != 0)) -+ goto error_cleanup_resource; -+ -+ if (dpmcp_dev->obj_desc.ver_major < DPMCP_MIN_VER_MAJOR || -+ (dpmcp_dev->obj_desc.ver_major == DPMCP_MIN_VER_MAJOR && -+ dpmcp_dev->obj_desc.ver_minor < DPMCP_MIN_VER_MINOR)) { -+ dev_err(&dpmcp_dev->dev, -+ "ERROR: Version %d.%d of DPMCP not supported.\n", -+ dpmcp_dev->obj_desc.ver_major, -+ dpmcp_dev->obj_desc.ver_minor); -+ error = -ENOTSUPP; -+ goto error_cleanup_resource; -+ } -+ -+ if (WARN_ON(dpmcp_dev->obj_desc.region_count == 0)) -+ goto error_cleanup_resource; -+ -+ mc_portal_phys_addr = dpmcp_dev->regions[0].start; -+ mc_portal_size = dpmcp_dev->regions[0].end - -+ dpmcp_dev->regions[0].start + 1; -+ -+ if (WARN_ON(mc_portal_size != mc_bus_dev->mc_io->portal_size)) -+ goto error_cleanup_resource; -+ -+ error = fsl_create_mc_io(&mc_bus_dev->dev, -+ mc_portal_phys_addr, -+ mc_portal_size, dpmcp_dev, -+ mc_io_flags, &mc_io); -+ if (error < 0) -+ goto error_cleanup_resource; -+ -+ *new_mc_io = mc_io; -+ return 0; -+ -+error_cleanup_resource: -+ fsl_mc_resource_free(resource); -+ return error; -+} -+EXPORT_SYMBOL_GPL(fsl_mc_portal_allocate); -+ -+/** -+ * fsl_mc_portal_free - Returns an MC portal to the pool of free MC portals -+ * of a given MC bus -+ * -+ * @mc_io: Pointer to the fsl_mc_io object that wraps the MC portal to free -+ */ -+void fsl_mc_portal_free(struct fsl_mc_io *mc_io) -+{ -+ struct fsl_mc_device *dpmcp_dev; -+ struct fsl_mc_resource *resource; -+ -+ /* -+ * Every mc_io obtained by calling fsl_mc_portal_allocate() is supposed -+ * to have a DPMCP object associated with. -+ */ -+ dpmcp_dev = mc_io->dpmcp_dev; -+ if (WARN_ON(!dpmcp_dev)) -+ return; -+ if (WARN_ON(strcmp(dpmcp_dev->obj_desc.type, "dpmcp") != 0)) -+ return; -+ if (WARN_ON(dpmcp_dev->mc_io != mc_io)) -+ return; -+ -+ resource = dpmcp_dev->resource; -+ if (WARN_ON(!resource || resource->type != FSL_MC_POOL_DPMCP)) -+ return; -+ -+ if (WARN_ON(resource->data != dpmcp_dev)) -+ return; -+ -+ fsl_destroy_mc_io(mc_io); -+ fsl_mc_resource_free(resource); -+} -+EXPORT_SYMBOL_GPL(fsl_mc_portal_free); -+ -+/** -+ * fsl_mc_portal_reset - Resets the dpmcp object for a given fsl_mc_io object -+ * -+ * @mc_io: Pointer to the fsl_mc_io object that wraps the MC portal to free -+ */ -+int fsl_mc_portal_reset(struct fsl_mc_io *mc_io) -+{ -+ int error; -+ struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev; -+ -+ if (WARN_ON(!dpmcp_dev)) -+ return -EINVAL; -+ -+ error = dpmcp_reset(mc_io, 0, dpmcp_dev->mc_handle); -+ if (error < 0) { -+ dev_err(&dpmcp_dev->dev, "dpmcp_reset() failed: %d\n", error); -+ return error; -+ } -+ -+ return 0; -+} -+EXPORT_SYMBOL_GPL(fsl_mc_portal_reset); -+ -+/** -+ * fsl_mc_object_allocate - Allocates a MC object device of the given -+ * pool type from a given MC bus -+ * -+ * @mc_dev: MC device for which the MC object device is to be allocated -+ * @pool_type: MC bus resource pool type -+ * @new_mc_dev: Pointer to area where the pointer to the allocated -+ * MC object device is to be returned -+ * -+ * This function allocates a MC object device from the device's parent DPRC, -+ * from the corresponding MC bus' pool of allocatable MC object devices of -+ * the given resource type. mc_dev cannot be a DPRC itself. -+ * -+ * NOTE: pool_type must be different from FSL_MC_POOL_MCP, since MC -+ * portals are allocated using fsl_mc_portal_allocate(), instead of -+ * this function. -+ */ -+int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev, -+ enum fsl_mc_pool_type pool_type, -+ struct fsl_mc_device **new_mc_adev) -+{ -+ struct fsl_mc_device *mc_bus_dev; -+ struct fsl_mc_bus *mc_bus; -+ struct fsl_mc_device *mc_adev; -+ int error = -EINVAL; -+ struct fsl_mc_resource *resource = NULL; -+ -+ *new_mc_adev = NULL; -+ if (WARN_ON(mc_dev->flags & FSL_MC_IS_DPRC)) -+ goto error; -+ -+ if (WARN_ON(mc_dev->dev.parent->bus != &fsl_mc_bus_type)) -+ goto error; -+ -+ if (WARN_ON(pool_type == FSL_MC_POOL_DPMCP)) -+ goto error; -+ -+ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent); -+ mc_bus = to_fsl_mc_bus(mc_bus_dev); -+ error = fsl_mc_resource_allocate(mc_bus, pool_type, &resource); -+ if (error < 0) -+ goto error; -+ -+ mc_adev = resource->data; -+ if (WARN_ON(!mc_adev)) -+ goto error; -+ -+ *new_mc_adev = mc_adev; -+ return 0; -+error: -+ if (resource) -+ fsl_mc_resource_free(resource); -+ -+ return error; -+} -+EXPORT_SYMBOL_GPL(fsl_mc_object_allocate); -+ -+/** -+ * fsl_mc_object_free - Returns an allocatable MC object device to the -+ * corresponding resource pool of a given MC bus. -+ * -+ * @mc_adev: Pointer to the MC object device -+ */ -+void fsl_mc_object_free(struct fsl_mc_device *mc_adev) -+{ -+ struct fsl_mc_resource *resource; -+ -+ resource = mc_adev->resource; -+ if (WARN_ON(resource->type == FSL_MC_POOL_DPMCP)) -+ return; -+ if (WARN_ON(resource->data != mc_adev)) -+ return; -+ -+ fsl_mc_resource_free(resource); -+} -+EXPORT_SYMBOL_GPL(fsl_mc_object_free); -+ -+/** -+ * It allocates the IRQs required by a given MC object device. The -+ * IRQs are allocated from the interrupt pool associated with the -+ * MC bus that contains the device, if the device is not a DPRC device. -+ * Otherwise, the IRQs are allocated from the interrupt pool associated -+ * with the MC bus that represents the DPRC device itself. -+ */ -+int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev) -+{ -+ int i; -+ int irq_count; -+ int res_allocated_count = 0; -+ int error = -EINVAL; -+ struct fsl_mc_device_irq **irqs = NULL; -+ struct fsl_mc_bus *mc_bus; -+ struct fsl_mc_resource_pool *res_pool; -+ struct fsl_mc *mc = dev_get_drvdata(fsl_mc_bus_type.dev_root->parent); -+ -+ if (!mc->gic_supported) -+ return -ENOTSUPP; -+ -+ if (WARN_ON(mc_dev->irqs)) -+ goto error; -+ -+ irq_count = mc_dev->obj_desc.irq_count; -+ if (WARN_ON(irq_count == 0)) -+ goto error; -+ -+ if (strcmp(mc_dev->obj_desc.type, "dprc") == 0) -+ mc_bus = to_fsl_mc_bus(mc_dev); -+ else -+ mc_bus = to_fsl_mc_bus(to_fsl_mc_device(mc_dev->dev.parent)); -+ -+ if (WARN_ON(!mc_bus->irq_resources)) -+ goto error; -+ -+ res_pool = &mc_bus->resource_pools[FSL_MC_POOL_IRQ]; -+ if (res_pool->free_count < irq_count) { -+ dev_err(&mc_dev->dev, -+ "Not able to allocate %u irqs for device\n", irq_count); -+ error = -ENOSPC; -+ goto error; -+ } -+ -+ irqs = devm_kzalloc(&mc_dev->dev, irq_count * sizeof(irqs[0]), -+ GFP_KERNEL); -+ if (!irqs) { -+ error = -ENOMEM; -+ dev_err(&mc_dev->dev, "No memory to allocate irqs[]\n"); -+ goto error; -+ } -+ -+ for (i = 0; i < irq_count; i++) { -+ struct fsl_mc_resource *resource; -+ -+ error = fsl_mc_resource_allocate(mc_bus, FSL_MC_POOL_IRQ, -+ &resource); -+ if (error < 0) -+ goto error; -+ -+ irqs[i] = to_fsl_mc_irq(resource); -+ res_allocated_count++; -+ -+ WARN_ON(irqs[i]->mc_dev); -+ irqs[i]->mc_dev = mc_dev; -+ irqs[i]->dev_irq_index = i; -+ } -+ -+ mc_dev->irqs = irqs; -+ return 0; -+error: -+ for (i = 0; i < res_allocated_count; i++) { -+ irqs[i]->mc_dev = NULL; -+ fsl_mc_resource_free(&irqs[i]->resource); -+ } -+ -+ if (irqs) -+ devm_kfree(&mc_dev->dev, irqs); -+ -+ return error; -+} -+EXPORT_SYMBOL_GPL(fsl_mc_allocate_irqs); -+ -+/* -+ * It frees the IRQs that were allocated for a MC object device, by -+ * returning them to the corresponding interrupt pool. -+ */ -+void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev) -+{ -+ int i; -+ int irq_count; -+ struct fsl_mc_bus *mc_bus; -+ struct fsl_mc_device_irq **irqs = mc_dev->irqs; -+ -+ if (WARN_ON(!irqs)) -+ return; -+ -+ irq_count = mc_dev->obj_desc.irq_count; -+ -+ if (strcmp(mc_dev->obj_desc.type, "dprc") == 0) -+ mc_bus = to_fsl_mc_bus(mc_dev); -+ else -+ mc_bus = to_fsl_mc_bus(to_fsl_mc_device(mc_dev->dev.parent)); -+ -+ if (WARN_ON(!mc_bus->irq_resources)) -+ return; -+ -+ for (i = 0; i < irq_count; i++) { -+ WARN_ON(!irqs[i]->mc_dev); -+ irqs[i]->mc_dev = NULL; -+ fsl_mc_resource_free(&irqs[i]->resource); -+ } -+ -+ devm_kfree(&mc_dev->dev, mc_dev->irqs); -+ mc_dev->irqs = NULL; -+} -+EXPORT_SYMBOL_GPL(fsl_mc_free_irqs); -+ -+/** -+ * fsl_mc_allocator_probe - callback invoked when an allocatable device is -+ * being added to the system -+ */ -+static int fsl_mc_allocator_probe(struct fsl_mc_device *mc_dev) -+{ -+ enum fsl_mc_pool_type pool_type; -+ struct fsl_mc_device *mc_bus_dev; -+ struct fsl_mc_bus *mc_bus; -+ int error = -EINVAL; -+ -+ if (WARN_ON(!FSL_MC_IS_ALLOCATABLE(mc_dev->obj_desc.type))) -+ goto error; -+ -+ mc_bus_dev = to_fsl_mc_device(mc_dev->dev.parent); -+ if (WARN_ON(mc_bus_dev->dev.bus != &fsl_mc_bus_type)) -+ goto error; -+ -+ mc_bus = to_fsl_mc_bus(mc_bus_dev); -+ -+ /* -+ * If mc_dev is the DPMCP object for the parent DPRC's built-in -+ * portal, we don't add this DPMCP to the DPMCP object pool, -+ * but instead allocate it directly to the parent DPRC (mc_bus_dev): -+ */ -+ if (strcmp(mc_dev->obj_desc.type, "dpmcp") == 0 && -+ mc_dev->obj_desc.id == mc_bus->dprc_attr.portal_id) { -+ error = fsl_mc_io_set_dpmcp(mc_bus_dev->mc_io, mc_dev); -+ if (error < 0) -+ goto error; -+ } else { -+ error = object_type_to_pool_type(mc_dev->obj_desc.type, -+ &pool_type); -+ if (error < 0) -+ goto error; -+ -+ error = fsl_mc_resource_pool_add_device(mc_bus, pool_type, -+ mc_dev); -+ if (error < 0) -+ goto error; -+ } -+ -+ dev_dbg(&mc_dev->dev, -+ "Allocatable MC object device bound to fsl_mc_allocator driver"); -+ return 0; -+error: -+ -+ return error; -+} -+ -+/** -+ * fsl_mc_allocator_remove - callback invoked when an allocatable device is -+ * being removed from the system -+ */ -+static int fsl_mc_allocator_remove(struct fsl_mc_device *mc_dev) -+{ -+ int error; -+ -+ if (WARN_ON(!FSL_MC_IS_ALLOCATABLE(mc_dev->obj_desc.type))) -+ return -EINVAL; -+ -+ if (mc_dev->resource) { -+ error = fsl_mc_resource_pool_remove_device(mc_dev); -+ if (error < 0) -+ return error; -+ } -+ -+ dev_dbg(&mc_dev->dev, -+ "Allocatable MC object device unbound from fsl_mc_allocator driver"); -+ return 0; -+} -+ -+static const struct fsl_mc_device_match_id match_id_table[] = { -+ { -+ .vendor = FSL_MC_VENDOR_FREESCALE, -+ .obj_type = "dpbp", -+ }, -+ { -+ .vendor = FSL_MC_VENDOR_FREESCALE, -+ .obj_type = "dpmcp", -+ }, -+ { -+ .vendor = FSL_MC_VENDOR_FREESCALE, -+ .obj_type = "dpcon", -+ }, -+ {.vendor = 0x0}, -+}; -+ -+static struct fsl_mc_driver fsl_mc_allocator_driver = { -+ .driver = { -+ .name = "fsl_mc_allocator", -+ .owner = THIS_MODULE, -+ .pm = NULL, -+ }, -+ .match_id_table = match_id_table, -+ .probe = fsl_mc_allocator_probe, -+ .remove = fsl_mc_allocator_remove, -+}; -+ -+int __init fsl_mc_allocator_driver_init(void) -+{ -+ return fsl_mc_driver_register(&fsl_mc_allocator_driver); -+} -+ -+void __exit fsl_mc_allocator_driver_exit(void) -+{ -+ fsl_mc_driver_unregister(&fsl_mc_allocator_driver); -+} -diff --git a/drivers/staging/fsl-mc/bus/mc-bus.c b/drivers/staging/fsl-mc/bus/mc-bus.c -new file mode 100644 -index 0000000..f173b35 ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/mc-bus.c -@@ -0,0 +1,1347 @@ -+/* -+ * Freescale Management Complex (MC) bus driver -+ * -+ * Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * Author: German Rivera -+ * -+ * This file is licensed under the terms of the GNU General Public -+ * License version 2. This program is licensed "as is" without any -+ * warranty of any kind, whether express or implied. -+ */ -+ -+#include "../include/mc-private.h" -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "../include/dpmng.h" -+#include "../include/mc-sys.h" -+#include "dprc-cmd.h" -+ -+/* -+ * IOMMU stream ID flags -+ */ -+#define STREAM_ID_PL_MASK BIT(9) /* privilege level */ -+#define STREAM_ID_BMT_MASK BIT(8) /* bypass memory translation */ -+#define STREAM_ID_VA_MASK BIT(7) /* virtual address translation -+ * (two-stage translation) */ -+#define STREAM_ID_ICID_MASK (BIT(7) - 1) /* isolation context ID -+ * (translation context) */ -+ -+#define MAX_STREAM_ID_ICID STREAM_ID_ICID_MASK -+ -+static struct kmem_cache *mc_dev_cache; -+ -+/** -+ * fsl_mc_bus_match - device to driver matching callback -+ * @dev: the MC object device structure to match against -+ * @drv: the device driver to search for matching MC object device id -+ * structures -+ * -+ * Returns 1 on success, 0 otherwise. -+ */ -+static int fsl_mc_bus_match(struct device *dev, struct device_driver *drv) -+{ -+ const struct fsl_mc_device_match_id *id; -+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); -+ struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(drv); -+ bool found = false; -+ -+ /* When driver_override is set, only bind to the matching driver */ -+ if (mc_dev->driver_override) { -+ found = !strcmp(mc_dev->driver_override, mc_drv->driver.name); -+ goto out; -+ } -+ -+ if (!mc_drv->match_id_table) -+ goto out; -+ -+ /* -+ * If the object is not 'plugged' don't match. -+ * Only exception is the root DPRC, which is a special case. -+ * -+ * NOTE: Only when this function is invoked for the root DPRC, -+ * mc_dev->mc_io is not NULL -+ */ -+ if ((mc_dev->obj_desc.state & DPRC_OBJ_STATE_PLUGGED) == 0 && -+ !mc_dev->mc_io) -+ goto out; -+ -+ /* -+ * Traverse the match_id table of the given driver, trying to find -+ * a matching for the given MC object device. -+ */ -+ for (id = mc_drv->match_id_table; id->vendor != 0x0; id++) { -+ if (id->vendor == mc_dev->obj_desc.vendor && -+ strcmp(id->obj_type, mc_dev->obj_desc.type) == 0) { -+ found = true; -+ -+ break; -+ } -+ } -+ -+out: -+ dev_dbg(dev, "%smatched\n", found ? "" : "not "); -+ return found; -+} -+ -+/** -+ * fsl_mc_bus_uevent - callback invoked when a device is added -+ */ -+static int fsl_mc_bus_uevent(struct device *dev, struct kobj_uevent_env *env) -+{ -+ pr_debug("%s invoked\n", __func__); -+ return 0; -+} -+ -+static ssize_t driver_override_store(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t count) -+{ -+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); -+ const char *driver_override, *old = mc_dev->driver_override; -+ char *cp; -+ -+ if (WARN_ON(dev->bus != &fsl_mc_bus_type)) -+ return -EINVAL; -+ -+ if (count > PATH_MAX) -+ return -EINVAL; -+ -+ driver_override = kstrndup(buf, count, GFP_KERNEL); -+ if (!driver_override) -+ return -ENOMEM; -+ -+ cp = strchr(driver_override, '\n'); -+ if (cp) -+ *cp = '\0'; -+ -+ if (strlen(driver_override)) { -+ mc_dev->driver_override = driver_override; -+ } else { -+ kfree(driver_override); -+ mc_dev->driver_override = NULL; -+ } -+ -+ kfree(old); -+ -+ return count; -+} -+ -+static ssize_t driver_override_show(struct device *dev, -+ struct device_attribute *attr, char *buf) -+{ -+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); -+ -+ return sprintf(buf, "%s\n", mc_dev->driver_override); -+} -+ -+static DEVICE_ATTR_RW(driver_override); -+ -+static ssize_t rescan_store(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t count) -+{ -+ unsigned long val; -+ unsigned int irq_count; -+ struct fsl_mc_device *root_mc_dev; -+ struct fsl_mc_bus *root_mc_bus; -+ -+ if (!is_root_dprc(dev)) -+ return -EINVAL; -+ -+ root_mc_dev = to_fsl_mc_device(dev); -+ root_mc_bus = to_fsl_mc_bus(root_mc_dev); -+ -+ if (kstrtoul(buf, 0, &val) < 0) -+ return -EINVAL; -+ -+ if (val) { -+ mutex_lock(&root_mc_bus->scan_mutex); -+ dprc_scan_objects(root_mc_dev, NULL, &irq_count); -+ mutex_unlock(&root_mc_bus->scan_mutex); -+ } -+ -+ return count; -+} -+ -+static DEVICE_ATTR_WO(rescan); -+ -+static struct attribute *fsl_mc_dev_attrs[] = { -+ &dev_attr_driver_override.attr, -+ &dev_attr_rescan.attr, -+ NULL, -+}; -+ -+static const struct attribute_group fsl_mc_dev_group = { -+ .attrs = fsl_mc_dev_attrs, -+}; -+ -+static const struct attribute_group *fsl_mc_dev_groups[] = { -+ &fsl_mc_dev_group, -+ NULL, -+}; -+ -+static int scan_fsl_mc_bus(struct device *dev, void *data) -+{ -+ unsigned int irq_count; -+ struct fsl_mc_device *root_mc_dev; -+ struct fsl_mc_bus *root_mc_bus; -+ -+ if (is_root_dprc(dev)) { -+ root_mc_dev = to_fsl_mc_device(dev); -+ root_mc_bus = to_fsl_mc_bus(root_mc_dev); -+ mutex_lock(&root_mc_bus->scan_mutex); -+ dprc_scan_objects(root_mc_dev, NULL, &irq_count); -+ mutex_unlock(&root_mc_bus->scan_mutex); -+ } -+ -+ return 0; -+} -+ -+static ssize_t bus_rescan_store(struct bus_type *bus, -+ const char *buf, size_t count) -+{ -+ unsigned long val; -+ -+ if (kstrtoul(buf, 0, &val) < 0) -+ return -EINVAL; -+ -+ if (val) -+ bus_for_each_dev(bus, NULL, NULL, scan_fsl_mc_bus); -+ -+ return count; -+} -+static BUS_ATTR(rescan, (S_IWUSR|S_IWGRP), NULL, bus_rescan_store); -+ -+static struct attribute *fsl_mc_bus_attrs[] = { -+ &bus_attr_rescan.attr, -+ NULL, -+}; -+ -+static const struct attribute_group fsl_mc_bus_group = { -+ .attrs = fsl_mc_bus_attrs, -+}; -+ -+static const struct attribute_group *fsl_mc_bus_groups[] = { -+ &fsl_mc_bus_group, -+ NULL, -+}; -+ -+struct bus_type fsl_mc_bus_type = { -+ .name = "fsl-mc", -+ .match = fsl_mc_bus_match, -+ .uevent = fsl_mc_bus_uevent, -+ .dev_groups = fsl_mc_dev_groups, -+ .bus_groups = fsl_mc_bus_groups, -+}; -+EXPORT_SYMBOL_GPL(fsl_mc_bus_type); -+ -+static int fsl_mc_driver_probe(struct device *dev) -+{ -+ struct fsl_mc_driver *mc_drv; -+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); -+ int error; -+ -+ if (WARN_ON(!dev->driver)) -+ return -EINVAL; -+ -+ mc_drv = to_fsl_mc_driver(dev->driver); -+ if (WARN_ON(!mc_drv->probe)) -+ return -EINVAL; -+ -+ error = mc_drv->probe(mc_dev); -+ if (error < 0) { -+ dev_err(dev, "MC object device probe callback failed: %d\n", -+ error); -+ return error; -+ } -+ -+ return 0; -+} -+ -+static int fsl_mc_driver_remove(struct device *dev) -+{ -+ struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver); -+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); -+ int error; -+ -+ if (WARN_ON(!dev->driver)) -+ return -EINVAL; -+ -+ error = mc_drv->remove(mc_dev); -+ if (error < 0) { -+ dev_err(dev, -+ "MC object device remove callback failed: %d\n", -+ error); -+ return error; -+ } -+ -+ return 0; -+} -+ -+static void fsl_mc_driver_shutdown(struct device *dev) -+{ -+ struct fsl_mc_driver *mc_drv = to_fsl_mc_driver(dev->driver); -+ struct fsl_mc_device *mc_dev = to_fsl_mc_device(dev); -+ -+ mc_drv->shutdown(mc_dev); -+} -+ -+/** -+ * __fsl_mc_driver_register - registers a child device driver with the -+ * MC bus -+ * -+ * This function is implicitly invoked from the registration function of -+ * fsl_mc device drivers, which is generated by the -+ * module_fsl_mc_driver() macro. -+ */ -+int __fsl_mc_driver_register(struct fsl_mc_driver *mc_driver, -+ struct module *owner) -+{ -+ int error; -+ -+ mc_driver->driver.owner = owner; -+ mc_driver->driver.bus = &fsl_mc_bus_type; -+ -+ if (mc_driver->probe) -+ mc_driver->driver.probe = fsl_mc_driver_probe; -+ -+ if (mc_driver->remove) -+ mc_driver->driver.remove = fsl_mc_driver_remove; -+ -+ if (mc_driver->shutdown) -+ mc_driver->driver.shutdown = fsl_mc_driver_shutdown; -+ -+ error = driver_register(&mc_driver->driver); -+ if (error < 0) { -+ pr_err("driver_register() failed for %s: %d\n", -+ mc_driver->driver.name, error); -+ return error; -+ } -+ -+ pr_info("MC object device driver %s registered\n", -+ mc_driver->driver.name); -+ return 0; -+} -+EXPORT_SYMBOL_GPL(__fsl_mc_driver_register); -+ -+/** -+ * fsl_mc_driver_unregister - unregisters a device driver from the -+ * MC bus -+ */ -+void fsl_mc_driver_unregister(struct fsl_mc_driver *mc_driver) -+{ -+ driver_unregister(&mc_driver->driver); -+} -+EXPORT_SYMBOL_GPL(fsl_mc_driver_unregister); -+ -+bool fsl_mc_interrupts_supported(void) -+{ -+ struct fsl_mc *mc = dev_get_drvdata(fsl_mc_bus_type.dev_root->parent); -+ -+ return mc->gic_supported; -+} -+EXPORT_SYMBOL_GPL(fsl_mc_interrupts_supported); -+ -+static int get_dprc_attr(struct fsl_mc_io *mc_io, -+ int container_id, struct dprc_attributes *attr) -+{ -+ uint16_t dprc_handle; -+ int error; -+ -+ error = dprc_open(mc_io, 0, container_id, &dprc_handle); -+ if (error < 0) { -+ pr_err("dprc_open() failed: %d\n", error); -+ return error; -+ } -+ -+ memset(attr, 0, sizeof(struct dprc_attributes)); -+ error = dprc_get_attributes(mc_io, 0, dprc_handle, attr); -+ if (error < 0) { -+ pr_err("dprc_get_attributes() failed: %d\n", error); -+ goto common_cleanup; -+ } -+ -+ error = 0; -+ -+common_cleanup: -+ (void)dprc_close(mc_io, 0, dprc_handle); -+ return error; -+} -+ -+static int get_dprc_icid(struct fsl_mc_io *mc_io, -+ int container_id, uint16_t *icid) -+{ -+ struct dprc_attributes attr; -+ int error; -+ -+ error = get_dprc_attr(mc_io, container_id, &attr); -+ if (error == 0) -+ *icid = attr.icid; -+ -+ return error; -+} -+ -+static int get_dprc_version(struct fsl_mc_io *mc_io, -+ int container_id, uint16_t *major, uint16_t *minor) -+{ -+ struct dprc_attributes attr; -+ int error; -+ -+ error = get_dprc_attr(mc_io, container_id, &attr); -+ if (error == 0) { -+ *major = attr.version.major; -+ *minor = attr.version.minor; -+ } -+ -+ return error; -+} -+ -+static int translate_mc_addr(enum fsl_mc_region_types mc_region_type, -+ uint64_t mc_offset, phys_addr_t *phys_addr) -+{ -+ int i; -+ struct fsl_mc *mc = dev_get_drvdata(fsl_mc_bus_type.dev_root->parent); -+ -+ if (mc->num_translation_ranges == 0) { -+ /* -+ * Do identity mapping: -+ */ -+ *phys_addr = mc_offset; -+ return 0; -+ } -+ -+ for (i = 0; i < mc->num_translation_ranges; i++) { -+ struct fsl_mc_addr_translation_range *range = -+ &mc->translation_ranges[i]; -+ -+ if (mc_region_type == range->mc_region_type && -+ mc_offset >= range->start_mc_offset && -+ mc_offset < range->end_mc_offset) { -+ *phys_addr = range->start_phys_addr + -+ (mc_offset - range->start_mc_offset); -+ return 0; -+ } -+ } -+ -+ return -EFAULT; -+} -+ -+static int fsl_mc_device_get_mmio_regions(struct fsl_mc_device *mc_dev, -+ struct fsl_mc_device *mc_bus_dev) -+{ -+ int i; -+ int error; -+ struct resource *regions; -+ struct dprc_obj_desc *obj_desc = &mc_dev->obj_desc; -+ struct device *parent_dev = mc_dev->dev.parent; -+ enum fsl_mc_region_types mc_region_type; -+ -+ if (strcmp(obj_desc->type, "dprc") == 0 || -+ strcmp(obj_desc->type, "dpmcp") == 0) { -+ mc_region_type = FSL_MC_PORTAL; -+ } else if (strcmp(obj_desc->type, "dpio") == 0) { -+ mc_region_type = FSL_QBMAN_PORTAL; -+ } else { -+ /* -+ * This function should not have been called for this MC object -+ * type, as this object type is not supposed to have MMIO -+ * regions -+ */ -+ WARN_ON(true); -+ return -EINVAL; -+ } -+ -+ regions = kmalloc_array(obj_desc->region_count, -+ sizeof(regions[0]), GFP_KERNEL); -+ if (!regions) -+ return -ENOMEM; -+ -+ for (i = 0; i < obj_desc->region_count; i++) { -+ struct dprc_region_desc region_desc; -+ -+ error = dprc_get_obj_region(mc_bus_dev->mc_io, -+ 0, -+ mc_bus_dev->mc_handle, -+ obj_desc->type, -+ obj_desc->id, i, ®ion_desc); -+ if (error < 0) { -+ dev_err(parent_dev, -+ "dprc_get_obj_region() failed: %d\n", error); -+ goto error_cleanup_regions; -+ } -+ -+ WARN_ON(region_desc.size == 0); -+ error = translate_mc_addr(mc_region_type, -+ region_desc.base_offset, -+ ®ions[i].start); -+ if (error < 0) { -+ dev_err(parent_dev, -+ "Invalid MC offset: %#x (for %s.%d\'s region %d)\n", -+ region_desc.base_offset, -+ obj_desc->type, obj_desc->id, i); -+ goto error_cleanup_regions; -+ } -+ -+ regions[i].end = regions[i].start + region_desc.size - 1; -+ regions[i].name = "fsl-mc object MMIO region"; -+ regions[i].flags = IORESOURCE_IO; -+ if (region_desc.flags & DPRC_REGION_CACHEABLE) -+ regions[i].flags |= IORESOURCE_CACHEABLE; -+ } -+ -+ mc_dev->regions = regions; -+ return 0; -+ -+error_cleanup_regions: -+ kfree(regions); -+ return error; -+} -+ -+/** -+ * Add a newly discovered MC object device to be visible in Linux -+ */ -+int fsl_mc_device_add(struct dprc_obj_desc *obj_desc, -+ struct fsl_mc_io *mc_io, -+ struct device *parent_dev, -+ const char *driver_override, -+ struct fsl_mc_device **new_mc_dev) -+{ -+ int error; -+ struct fsl_mc_device *mc_dev = NULL; -+ struct fsl_mc_bus *mc_bus = NULL; -+ struct fsl_mc_device *parent_mc_dev; -+ -+ if (parent_dev->bus == &fsl_mc_bus_type) -+ parent_mc_dev = to_fsl_mc_device(parent_dev); -+ else -+ parent_mc_dev = NULL; -+ -+ if (strcmp(obj_desc->type, "dprc") == 0) { -+ /* -+ * Allocate an MC bus device object: -+ */ -+ mc_bus = devm_kzalloc(parent_dev, sizeof(*mc_bus), GFP_KERNEL); -+ if (!mc_bus) -+ return -ENOMEM; -+ -+ mc_dev = &mc_bus->mc_dev; -+ } else { -+ /* -+ * Allocate a regular fsl_mc_device object: -+ */ -+ mc_dev = kmem_cache_zalloc(mc_dev_cache, GFP_KERNEL); -+ if (!mc_dev) -+ return -ENOMEM; -+ } -+ -+ mc_dev->obj_desc = *obj_desc; -+ mc_dev->mc_io = mc_io; -+ if (driver_override) { -+ /* -+ * We trust driver_override, so we don't need to use -+ * kstrndup() here -+ */ -+ mc_dev->driver_override = kstrdup(driver_override, GFP_KERNEL); -+ if (!mc_dev->driver_override) { -+ error = -ENOMEM; -+ goto error_cleanup_dev; -+ } -+ } -+ -+ device_initialize(&mc_dev->dev); -+ INIT_LIST_HEAD(&mc_dev->dev.msi_list); -+ mc_dev->dev.parent = parent_dev; -+ mc_dev->dev.bus = &fsl_mc_bus_type; -+ dev_set_name(&mc_dev->dev, "%s.%d", obj_desc->type, obj_desc->id); -+ -+ if (strcmp(obj_desc->type, "dprc") == 0) { -+ struct fsl_mc_io *mc_io2; -+ -+ mc_dev->flags |= FSL_MC_IS_DPRC; -+ -+ /* -+ * To get the DPRC's ICID, we need to open the DPRC -+ * in get_dprc_icid(). For child DPRCs, we do so using the -+ * parent DPRC's MC portal instead of the child DPRC's MC -+ * portal, in case the child DPRC is already opened with -+ * its own portal (e.g., the DPRC used by AIOP). -+ * -+ * NOTE: There cannot be more than one active open for a -+ * given MC object, using the same MC portal. -+ */ -+ if (parent_mc_dev) { -+ /* -+ * device being added is a child DPRC device -+ */ -+ mc_io2 = parent_mc_dev->mc_io; -+ } else { -+ /* -+ * device being added is the root DPRC device -+ */ -+ if (WARN_ON(!mc_io)) { -+ error = -EINVAL; -+ goto error_cleanup_dev; -+ } -+ -+ mc_io2 = mc_io; -+ } -+ -+ error = get_dprc_icid(mc_io2, obj_desc->id, &mc_dev->icid); -+ if (error < 0) -+ goto error_cleanup_dev; -+ } else { -+ /* -+ * A non-DPRC MC object device has to be a child of another -+ * MC object (specifically a DPRC object) -+ */ -+ mc_dev->icid = parent_mc_dev->icid; -+ mc_dev->dma_mask = FSL_MC_DEFAULT_DMA_MASK; -+ mc_dev->dev.dma_mask = &mc_dev->dma_mask; -+ } -+ -+ /* -+ * Get MMIO regions for the device from the MC: -+ * -+ * NOTE: the root DPRC is a special case as its MMIO region is -+ * obtained from the device tree -+ */ -+ if (parent_mc_dev && obj_desc->region_count != 0) { -+ error = fsl_mc_device_get_mmio_regions(mc_dev, -+ parent_mc_dev); -+ if (error < 0) -+ goto error_cleanup_dev; -+ } -+ -+ /* -+ * Objects are coherent, unless 'no shareability' flag set. -+ * FIXME: fill up @dma_base, @size, @iommu -+ */ -+ if (!(obj_desc->flags & DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY)) -+ arch_setup_dma_ops(&mc_dev->dev, 0, 0, NULL, true); -+ -+ /* -+ * The device-specific probe callback will get invoked by device_add() -+ */ -+ error = device_add(&mc_dev->dev); -+ if (error < 0) { -+ dev_err(parent_dev, -+ "device_add() failed for device %s: %d\n", -+ dev_name(&mc_dev->dev), error); -+ goto error_cleanup_dev; -+ } -+ -+ (void)get_device(&mc_dev->dev); -+ dev_dbg(parent_dev, "Added MC object device %s\n", -+ dev_name(&mc_dev->dev)); -+ -+ *new_mc_dev = mc_dev; -+ return 0; -+ -+error_cleanup_dev: -+ kfree(mc_dev->regions); -+ if (mc_bus) -+ devm_kfree(parent_dev, mc_bus); -+ else -+ kmem_cache_free(mc_dev_cache, mc_dev); -+ -+ return error; -+} -+EXPORT_SYMBOL_GPL(fsl_mc_device_add); -+ -+/** -+ * fsl_mc_device_remove - Remove a MC object device from being visible to -+ * Linux -+ * -+ * @mc_dev: Pointer to a MC object device object -+ */ -+void fsl_mc_device_remove(struct fsl_mc_device *mc_dev) -+{ -+ struct fsl_mc_bus *mc_bus = NULL; -+ -+ kfree(mc_dev->regions); -+ -+ /* -+ * The device-specific remove callback will get invoked by device_del() -+ */ -+ device_del(&mc_dev->dev); -+ put_device(&mc_dev->dev); -+ -+ if (strcmp(mc_dev->obj_desc.type, "dprc") == 0) { -+ mc_bus = to_fsl_mc_bus(mc_dev); -+ -+ if (&mc_dev->dev == fsl_mc_bus_type.dev_root) -+ fsl_mc_bus_type.dev_root = NULL; -+ } else -+ WARN_ON(mc_dev->mc_io != NULL); -+ -+ kfree(mc_dev->driver_override); -+ mc_dev->driver_override = NULL; -+ if (mc_bus) -+ devm_kfree(mc_dev->dev.parent, mc_bus); -+ else -+ kmem_cache_free(mc_dev_cache, mc_dev); -+} -+EXPORT_SYMBOL_GPL(fsl_mc_device_remove); -+ -+static int mc_bus_msi_prepare(struct irq_domain *domain, struct device *dev, -+ int nvec, msi_alloc_info_t *info) -+{ -+ int error; -+ u32 its_dev_id; -+ struct dprc_attributes dprc_attr; -+ struct fsl_mc_device *mc_bus_dev = to_fsl_mc_device(dev); -+ -+ if (WARN_ON(!(mc_bus_dev->flags & FSL_MC_IS_DPRC))) -+ return -EINVAL; -+ -+ error = dprc_get_attributes(mc_bus_dev->mc_io, -+ 0, -+ mc_bus_dev->mc_handle, &dprc_attr); -+ if (error < 0) { -+ dev_err(&mc_bus_dev->dev, -+ "dprc_get_attributes() failed: %d\n", -+ error); -+ return error; -+ } -+ -+ /* -+ * Build the device Id to be passed to the GIC-ITS: -+ * -+ * NOTE: This device id corresponds to the IOMMU stream ID -+ * associated with the DPRC object. -+ */ -+ its_dev_id = mc_bus_dev->icid; -+ if (its_dev_id > STREAM_ID_ICID_MASK) { -+ dev_err(&mc_bus_dev->dev, -+ "Invalid ICID: %#x\n", its_dev_id); -+ return -ERANGE; -+ } -+ -+ if (dprc_attr.options & DPRC_CFG_OPT_AIOP) -+ its_dev_id |= STREAM_ID_PL_MASK | STREAM_ID_BMT_MASK; -+ -+ return __its_msi_prepare(domain, its_dev_id, dev, nvec, info); -+} -+ -+static void mc_bus_mask_msi_irq(struct irq_data *d) -+{ -+ /* Bus specefic Mask */ -+ irq_chip_mask_parent(d); -+} -+ -+static void mc_bus_unmask_msi_irq(struct irq_data *d) -+{ -+ /* Bus specefic unmask */ -+ irq_chip_unmask_parent(d); -+} -+ -+static void program_msi_at_mc(struct fsl_mc_device *mc_bus_dev, -+ struct fsl_mc_device_irq *irq) -+{ -+ int error; -+ struct fsl_mc_device *owner_mc_dev = irq->mc_dev; -+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev); -+ struct dprc_irq_cfg irq_cfg; -+ -+ /* -+ * irq->msi_paddr is 0x0 when this function is invoked in the -+ * free_irq() code path. In this case, for the MC, we don't -+ * really need to "unprogram" the MSI, so we just return. -+ * This helps avoid subtle ordering problems in the MC -+ * bus IRQ teardown logic. -+ * FIXME: evaluate whether there is a better way to address -+ * the underlying issue (upstreamability concern) -+ */ -+ if (irq->msi_paddr == 0x0) -+ return; -+ -+ if (WARN_ON(!owner_mc_dev)) -+ return; -+ -+ irq_cfg.paddr = irq->msi_paddr; -+ irq_cfg.val = irq->msi_value; -+ irq_cfg.irq_num = irq->irq_number; -+ -+ if (owner_mc_dev == mc_bus_dev) { -+ /* -+ * IRQ is for the mc_bus_dev's DPRC itself -+ */ -+ error = dprc_set_irq(mc_bus->atomic_mc_io, -+ MC_CMD_FLAG_INTR_DIS | MC_CMD_FLAG_PRI, -+ mc_bus->atomic_dprc_handle, -+ irq->dev_irq_index, -+ &irq_cfg); -+ if (error < 0) { -+ dev_err(&owner_mc_dev->dev, -+ "dprc_set_irq() failed: %d\n", error); -+ } -+ } else { -+ error = dprc_set_obj_irq(mc_bus->atomic_mc_io, -+ MC_CMD_FLAG_INTR_DIS | MC_CMD_FLAG_PRI, -+ mc_bus->atomic_dprc_handle, -+ owner_mc_dev->obj_desc.type, -+ owner_mc_dev->obj_desc.id, -+ irq->dev_irq_index, -+ &irq_cfg); -+ if (error < 0) { -+ dev_err(&owner_mc_dev->dev, -+ "dprc_obj_set_irq() failed: %d\n", error); -+ } -+ } -+} -+ -+/* -+ * This function is invoked from devm_request_irq(), -+ * devm_request_threaded_irq(), dev_free_irq() -+ */ -+static void mc_bus_msi_domain_write_msg(struct irq_data *irq_data, -+ struct msi_msg *msg) -+{ -+ struct msi_desc *msi_entry = irq_data->msi_desc; -+ struct fsl_mc_device *mc_bus_dev = to_fsl_mc_device(msi_entry->dev); -+ struct fsl_mc_bus *mc_bus = to_fsl_mc_bus(mc_bus_dev); -+ struct fsl_mc_device_irq *irq_res = -+ &mc_bus->irq_resources[msi_entry->msi_attrib.entry_nr]; -+ -+ /* -+ * NOTE: This function is invoked with interrupts disabled -+ */ -+ -+ if (irq_res->irq_number == irq_data->irq) { -+ irq_res->msi_paddr = -+ ((u64)msg->address_hi << 32) | msg->address_lo; -+ -+ irq_res->msi_value = msg->data; -+ -+ /* -+ * Program the MSI (paddr, value) pair in the device: -+ */ -+ program_msi_at_mc(mc_bus_dev, irq_res); -+ } -+} -+ -+static struct irq_chip mc_bus_msi_irq_chip = { -+ .name = "fsl-mc-bus-msi", -+ .irq_unmask = mc_bus_unmask_msi_irq, -+ .irq_mask = mc_bus_mask_msi_irq, -+ .irq_eoi = irq_chip_eoi_parent, -+ .irq_write_msi_msg = mc_bus_msi_domain_write_msg, -+}; -+ -+static struct msi_domain_ops mc_bus_msi_ops = { -+ .msi_prepare = mc_bus_msi_prepare, -+}; -+ -+static struct msi_domain_info mc_bus_msi_domain_info = { -+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | -+ MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX), -+ .ops = &mc_bus_msi_ops, -+ .chip = &mc_bus_msi_irq_chip, -+}; -+ -+static int create_mc_irq_domain(struct platform_device *mc_pdev, -+ struct irq_domain **new_irq_domain) -+{ -+ int error; -+ struct device_node *its_of_node; -+ struct irq_domain *its_domain; -+ struct irq_domain *irq_domain; -+ struct device_node *mc_of_node = mc_pdev->dev.of_node; -+ -+ its_of_node = of_parse_phandle(mc_of_node, "msi-parent", 0); -+ if (!its_of_node) { -+ dev_err(&mc_pdev->dev, -+ "msi-parent phandle missing for %s\n", -+ mc_of_node->full_name); -+ return -ENOENT; -+ } -+ -+ /* -+ * Extract MSI parent node: -+ */ -+ its_domain = irq_find_host(its_of_node); -+ if (!its_domain) { -+ dev_err(&mc_pdev->dev, "Unable to find parent domain\n"); -+ error = -ENOENT; -+ goto cleanup_its_of_node; -+ } -+ -+ irq_domain = msi_create_irq_domain(mc_of_node, &mc_bus_msi_domain_info, -+ its_domain->parent); -+ if (!irq_domain) { -+ dev_err(&mc_pdev->dev, "Failed to allocate msi_domain\n"); -+ error = -ENOMEM; -+ goto cleanup_its_of_node; -+ } -+ -+ dev_dbg(&mc_pdev->dev, "Allocated MSI domain\n"); -+ *new_irq_domain = irq_domain; -+ return 0; -+ -+cleanup_its_of_node: -+ of_node_put(its_of_node); -+ return error; -+} -+ -+/* -+ * Initialize the interrupt pool associated with a MC bus. -+ * It allocates a block of IRQs from the GIC-ITS -+ */ -+int __must_check fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus, -+ unsigned int irq_count) -+{ -+ unsigned int i; -+ struct msi_desc *msi_entry; -+ struct msi_desc *next_msi_entry; -+ struct fsl_mc_device_irq *irq_resources; -+ struct fsl_mc_device_irq *irq_res; -+ int error; -+ struct fsl_mc_device *mc_bus_dev = &mc_bus->mc_dev; -+ struct fsl_mc *mc = dev_get_drvdata(fsl_mc_bus_type.dev_root->parent); -+ struct fsl_mc_resource_pool *res_pool = -+ &mc_bus->resource_pools[FSL_MC_POOL_IRQ]; -+ -+ /* -+ * Detect duplicate invocations of this function: -+ */ -+ if (WARN_ON(!list_empty(&mc_bus_dev->dev.msi_list))) -+ return -EINVAL; -+ -+ if (WARN_ON(irq_count == 0 || -+ irq_count > FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS)) -+ return -EINVAL; -+ -+ irq_resources = -+ devm_kzalloc(&mc_bus_dev->dev, -+ sizeof(*irq_resources) * irq_count, -+ GFP_KERNEL); -+ if (!irq_resources) -+ return -ENOMEM; -+ -+ for (i = 0; i < irq_count; i++) { -+ irq_res = &irq_resources[i]; -+ msi_entry = alloc_msi_entry(&mc_bus_dev->dev); -+ if (!msi_entry) { -+ dev_err(&mc_bus_dev->dev, "Failed to allocate msi entry\n"); -+ error = -ENOMEM; -+ goto cleanup_msi_entries; -+ } -+ -+ msi_entry->msi_attrib.is_msix = 1; -+ msi_entry->msi_attrib.is_64 = 1; -+ msi_entry->msi_attrib.entry_nr = i; -+ msi_entry->nvec_used = 1; -+ list_add_tail(&msi_entry->list, &mc_bus_dev->dev.msi_list); -+ -+ /* -+ * NOTE: irq_res->msi_paddr will be set by the -+ * mc_bus_msi_domain_write_msg() callback -+ */ -+ irq_res->resource.type = res_pool->type; -+ irq_res->resource.data = irq_res; -+ irq_res->resource.parent_pool = res_pool; -+ INIT_LIST_HEAD(&irq_res->resource.node); -+ list_add_tail(&irq_res->resource.node, &res_pool->free_list); -+ } -+ -+ /* -+ * NOTE: Calling this function will trigger the invocation of the -+ * mc_bus_msi_prepare() callback -+ */ -+ error = msi_domain_alloc_irqs(mc->irq_domain, -+ &mc_bus_dev->dev, irq_count); -+ -+ if (error) { -+ dev_err(&mc_bus_dev->dev, "Failed to allocate IRQs\n"); -+ goto cleanup_msi_entries; -+ } -+ -+ for_each_msi_entry(msi_entry, &mc_bus_dev->dev) { -+ u32 irq_num = msi_entry->irq; -+ -+ irq_res = &irq_resources[msi_entry->msi_attrib.entry_nr]; -+ irq_res->irq_number = irq_num; -+ irq_res->resource.id = irq_num; -+ } -+ -+ res_pool->max_count = irq_count; -+ res_pool->free_count = irq_count; -+ mc_bus->irq_resources = irq_resources; -+ return 0; -+ -+cleanup_msi_entries: -+ list_for_each_entry_safe(msi_entry, next_msi_entry, -+ &mc_bus_dev->dev.msi_list, list) { -+ list_del(&msi_entry->list); -+ kfree(msi_entry); -+ } -+ -+ devm_kfree(&mc_bus_dev->dev, irq_resources); -+ return error; -+} -+EXPORT_SYMBOL_GPL(fsl_mc_populate_irq_pool); -+ -+/** -+ * Teardown the interrupt pool associated with an MC bus. -+ * It frees the IRQs that were allocated to the pool, back to the GIC-ITS. -+ */ -+void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus) -+{ -+ struct msi_desc *msi_entry; -+ struct msi_desc *next_msi_entry; -+ struct fsl_mc *mc = dev_get_drvdata(fsl_mc_bus_type.dev_root->parent); -+ struct fsl_mc_resource_pool *res_pool = -+ &mc_bus->resource_pools[FSL_MC_POOL_IRQ]; -+ -+ if (WARN_ON(!mc_bus->irq_resources)) -+ return; -+ -+ if (WARN_ON(res_pool->max_count == 0)) -+ return; -+ -+ if (WARN_ON(res_pool->free_count != res_pool->max_count)) -+ return; -+ -+ msi_domain_free_irqs(mc->irq_domain, &mc_bus->mc_dev.dev); -+ list_for_each_entry_safe(msi_entry, next_msi_entry, -+ &mc_bus->mc_dev.dev.msi_list, list) { -+ list_del(&msi_entry->list); -+ kfree(msi_entry); -+ } -+ -+ devm_kfree(&mc_bus->mc_dev.dev, mc_bus->irq_resources); -+ res_pool->max_count = 0; -+ res_pool->free_count = 0; -+ mc_bus->irq_resources = NULL; -+} -+EXPORT_SYMBOL_GPL(fsl_mc_cleanup_irq_pool); -+ -+static int parse_mc_ranges(struct device *dev, -+ int *paddr_cells, -+ int *mc_addr_cells, -+ int *mc_size_cells, -+ const __be32 **ranges_start, -+ uint8_t *num_ranges) -+{ -+ const __be32 *prop; -+ int range_tuple_cell_count; -+ int ranges_len; -+ int tuple_len; -+ struct device_node *mc_node = dev->of_node; -+ -+ *ranges_start = of_get_property(mc_node, "ranges", &ranges_len); -+ if (!(*ranges_start) || !ranges_len) { -+ dev_warn(dev, -+ "missing or empty ranges property for device tree node '%s'\n", -+ mc_node->name); -+ -+ *num_ranges = 0; -+ return 0; -+ } -+ -+ *paddr_cells = of_n_addr_cells(mc_node); -+ -+ prop = of_get_property(mc_node, "#address-cells", NULL); -+ if (prop) -+ *mc_addr_cells = be32_to_cpup(prop); -+ else -+ *mc_addr_cells = *paddr_cells; -+ -+ prop = of_get_property(mc_node, "#size-cells", NULL); -+ if (prop) -+ *mc_size_cells = be32_to_cpup(prop); -+ else -+ *mc_size_cells = of_n_size_cells(mc_node); -+ -+ range_tuple_cell_count = *paddr_cells + *mc_addr_cells + -+ *mc_size_cells; -+ -+ tuple_len = range_tuple_cell_count * sizeof(__be32); -+ if (ranges_len % tuple_len != 0) { -+ dev_err(dev, "malformed ranges property '%s'\n", mc_node->name); -+ return -EINVAL; -+ } -+ -+ *num_ranges = ranges_len / tuple_len; -+ return 0; -+} -+ -+static int get_mc_addr_translation_ranges(struct device *dev, -+ struct fsl_mc_addr_translation_range -+ **ranges, -+ uint8_t *num_ranges) -+{ -+ int error; -+ int paddr_cells; -+ int mc_addr_cells; -+ int mc_size_cells; -+ int i; -+ const __be32 *ranges_start; -+ const __be32 *cell; -+ -+ error = parse_mc_ranges(dev, -+ &paddr_cells, -+ &mc_addr_cells, -+ &mc_size_cells, -+ &ranges_start, -+ num_ranges); -+ if (error < 0) -+ return error; -+ -+ if (!(*num_ranges)) { -+ /* -+ * Missing or empty ranges property ("ranges;") for the -+ * 'fsl,qoriq-mc' node. In this case, identity mapping -+ * will be used. -+ */ -+ *ranges = NULL; -+ return 0; -+ } -+ -+ *ranges = devm_kcalloc(dev, *num_ranges, -+ sizeof(struct fsl_mc_addr_translation_range), -+ GFP_KERNEL); -+ if (!(*ranges)) -+ return -ENOMEM; -+ -+ cell = ranges_start; -+ for (i = 0; i < *num_ranges; ++i) { -+ struct fsl_mc_addr_translation_range *range = &(*ranges)[i]; -+ -+ range->mc_region_type = of_read_number(cell, 1); -+ range->start_mc_offset = of_read_number(cell + 1, -+ mc_addr_cells - 1); -+ cell += mc_addr_cells; -+ range->start_phys_addr = of_read_number(cell, paddr_cells); -+ cell += paddr_cells; -+ range->end_mc_offset = range->start_mc_offset + -+ of_read_number(cell, mc_size_cells); -+ -+ cell += mc_size_cells; -+ } -+ -+ return 0; -+} -+ -+/** -+ * fsl_mc_bus_probe - callback invoked when the root MC bus is being -+ * added -+ */ -+static int fsl_mc_bus_probe(struct platform_device *pdev) -+{ -+ struct dprc_obj_desc obj_desc; -+ int error; -+ struct fsl_mc *mc; -+ struct fsl_mc_device *mc_bus_dev = NULL; -+ struct fsl_mc_io *mc_io = NULL; -+ int container_id; -+ phys_addr_t mc_portal_phys_addr; -+ uint32_t mc_portal_size; -+ struct mc_version mc_version; -+ struct resource res; -+ -+ dev_info(&pdev->dev, "Root MC bus device probed"); -+ -+ mc = devm_kzalloc(&pdev->dev, sizeof(*mc), GFP_KERNEL); -+ if (!mc) -+ return -ENOMEM; -+ -+ platform_set_drvdata(pdev, mc); -+ error = create_mc_irq_domain(pdev, &mc->irq_domain); -+ if (error < 0) { -+ dev_warn(&pdev->dev, -+ "WARNING: MC bus driver will run without interrupt support\n"); -+ } else { -+ mc->gic_supported = true; -+ } -+ -+ /* -+ * Get physical address of MC portal for the root DPRC: -+ */ -+ error = of_address_to_resource(pdev->dev.of_node, 0, &res); -+ if (error < 0) { -+ dev_err(&pdev->dev, -+ "of_address_to_resource() failed for %s\n", -+ pdev->dev.of_node->full_name); -+ goto error_cleanup_irq_domain; -+ } -+ -+ mc_portal_phys_addr = res.start; -+ mc_portal_size = resource_size(&res); -+ error = fsl_create_mc_io(&pdev->dev, mc_portal_phys_addr, -+ mc_portal_size, NULL, 0, &mc_io); -+ if (error < 0) -+ goto error_cleanup_irq_domain; -+ -+ error = mc_get_version(mc_io, 0, &mc_version); -+ if (error != 0) { -+ dev_err(&pdev->dev, -+ "mc_get_version() failed with error %d\n", error); -+ goto error_cleanup_mc_io; -+ } -+ -+ dev_info(&pdev->dev, -+ "Freescale Management Complex Firmware version: %u.%u.%u\n", -+ mc_version.major, mc_version.minor, mc_version.revision); -+ -+ error = get_mc_addr_translation_ranges(&pdev->dev, -+ &mc->translation_ranges, -+ &mc->num_translation_ranges); -+ if (error < 0) -+ goto error_cleanup_mc_io; -+ -+ error = dpmng_get_container_id(mc_io, 0, &container_id); -+ if (error < 0) { -+ dev_err(&pdev->dev, -+ "dpmng_get_container_id() failed: %d\n", error); -+ goto error_cleanup_mc_io; -+ } -+ -+ memset(&obj_desc, 0, sizeof(struct dprc_obj_desc)); -+ error = get_dprc_version(mc_io, container_id, -+ &obj_desc.ver_major, &obj_desc.ver_minor); -+ if (error < 0) -+ goto error_cleanup_mc_io; -+ -+ obj_desc.vendor = FSL_MC_VENDOR_FREESCALE; -+ strcpy(obj_desc.type, "dprc"); -+ obj_desc.id = container_id; -+ obj_desc.irq_count = 1; -+ obj_desc.region_count = 0; -+ -+ error = fsl_mc_device_add(&obj_desc, mc_io, &pdev->dev, NULL, -+ &mc_bus_dev); -+ if (error < 0) -+ goto error_cleanup_mc_io; -+ -+ mc->root_mc_bus_dev = mc_bus_dev; -+ return 0; -+ -+error_cleanup_mc_io: -+ fsl_destroy_mc_io(mc_io); -+ -+error_cleanup_irq_domain: -+ if (mc->gic_supported) -+ irq_domain_remove(mc->irq_domain); -+ -+ return error; -+} -+ -+/** -+ * fsl_mc_bus_remove - callback invoked when the root MC bus is being -+ * removed -+ */ -+static int fsl_mc_bus_remove(struct platform_device *pdev) -+{ -+ struct fsl_mc *mc = platform_get_drvdata(pdev); -+ -+ if (WARN_ON(&mc->root_mc_bus_dev->dev != fsl_mc_bus_type.dev_root)) -+ return -EINVAL; -+ -+ if (mc->gic_supported) -+ irq_domain_remove(mc->irq_domain); -+ -+ fsl_mc_device_remove(mc->root_mc_bus_dev); -+ fsl_destroy_mc_io(mc->root_mc_bus_dev->mc_io); -+ mc->root_mc_bus_dev->mc_io = NULL; -+ -+ dev_info(&pdev->dev, "Root MC bus device removed"); -+ return 0; -+} -+ -+static const struct of_device_id fsl_mc_bus_match_table[] = { -+ {.compatible = "fsl,qoriq-mc",}, -+ {}, -+}; -+ -+MODULE_DEVICE_TABLE(of, fsl_mc_bus_match_table); -+ -+static struct platform_driver fsl_mc_bus_driver = { -+ .driver = { -+ .name = "fsl_mc_bus", -+ .owner = THIS_MODULE, -+ .pm = NULL, -+ .of_match_table = fsl_mc_bus_match_table, -+ }, -+ .probe = fsl_mc_bus_probe, -+ .remove = fsl_mc_bus_remove, -+}; -+ -+static int __init fsl_mc_bus_driver_init(void) -+{ -+ int error; -+ -+ mc_dev_cache = kmem_cache_create("fsl_mc_device", -+ sizeof(struct fsl_mc_device), 0, 0, -+ NULL); -+ if (!mc_dev_cache) { -+ pr_err("Could not create fsl_mc_device cache\n"); -+ return -ENOMEM; -+ } -+ -+ error = bus_register(&fsl_mc_bus_type); -+ if (error < 0) { -+ pr_err("fsl-mc bus type registration failed: %d\n", error); -+ goto error_cleanup_cache; -+ } -+ -+ pr_info("fsl-mc bus type registered\n"); -+ -+ error = platform_driver_register(&fsl_mc_bus_driver); -+ if (error < 0) { -+ pr_err("platform_driver_register() failed: %d\n", error); -+ goto error_cleanup_bus; -+ } -+ -+ error = dprc_driver_init(); -+ if (error < 0) -+ goto error_cleanup_driver; -+ -+ error = fsl_mc_allocator_driver_init(); -+ if (error < 0) -+ goto error_cleanup_dprc_driver; -+ -+ return 0; -+ -+error_cleanup_dprc_driver: -+ dprc_driver_exit(); -+ -+error_cleanup_driver: -+ platform_driver_unregister(&fsl_mc_bus_driver); -+ -+error_cleanup_bus: -+ bus_unregister(&fsl_mc_bus_type); -+ -+error_cleanup_cache: -+ kmem_cache_destroy(mc_dev_cache); -+ return error; -+} -+ -+postcore_initcall(fsl_mc_bus_driver_init); -+ -+static void __exit fsl_mc_bus_driver_exit(void) -+{ -+ if (WARN_ON(!mc_dev_cache)) -+ return; -+ -+ fsl_mc_allocator_driver_exit(); -+ dprc_driver_exit(); -+ platform_driver_unregister(&fsl_mc_bus_driver); -+ bus_unregister(&fsl_mc_bus_type); -+ kmem_cache_destroy(mc_dev_cache); -+ pr_info("MC bus unregistered\n"); -+} -+ -+module_exit(fsl_mc_bus_driver_exit); -+ -+MODULE_AUTHOR("Freescale Semiconductor Inc."); -+MODULE_DESCRIPTION("Freescale Management Complex (MC) bus driver"); -+MODULE_LICENSE("GPL"); -diff --git a/drivers/staging/fsl-mc/bus/mc-ioctl.h b/drivers/staging/fsl-mc/bus/mc-ioctl.h -new file mode 100644 -index 0000000..d5c1bc3 ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/mc-ioctl.h -@@ -0,0 +1,25 @@ -+/* -+ * Freescale Management Complex (MC) ioclt interface -+ * -+ * Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * Author: German Rivera -+ * Lijun Pan -+ * -+ * This file is licensed under the terms of the GNU General Public -+ * License version 2. This program is licensed "as is" without any -+ * warranty of any kind, whether express or implied. -+ */ -+#ifndef _FSL_MC_IOCTL_H_ -+#define _FSL_MC_IOCTL_H_ -+ -+#include -+ -+#define RESTOOL_IOCTL_TYPE 'R' -+ -+#define RESTOOL_GET_ROOT_DPRC_INFO \ -+ _IOR(RESTOOL_IOCTL_TYPE, 0x1, uint32_t) -+ -+#define RESTOOL_SEND_MC_COMMAND \ -+ _IOWR(RESTOOL_IOCTL_TYPE, 0x4, struct mc_command) -+ -+#endif /* _FSL_MC_IOCTL_H_ */ -diff --git a/drivers/staging/fsl-mc/bus/mc-restool.c b/drivers/staging/fsl-mc/bus/mc-restool.c -new file mode 100644 -index 0000000..d261c1a ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/mc-restool.c -@@ -0,0 +1,312 @@ -+/* -+ * Freescale Management Complex (MC) restool driver -+ * -+ * Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * Author: German Rivera -+ * Lijun Pan -+ * This file is licensed under the terms of the GNU General Public -+ * License version 2. This program is licensed "as is" without any -+ * warranty of any kind, whether express or implied. -+ */ -+ -+#include "../include/mc-private.h" -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include "mc-ioctl.h" -+#include "../include/mc-sys.h" -+#include "../include/mc-cmd.h" -+#include "../include/dpmng.h" -+ -+/** -+ * Maximum number of DPRCs that can be opened at the same time -+ */ -+#define MAX_DPRC_HANDLES 64 -+ -+/** -+ * struct fsl_mc_restool - Management Complex (MC) resource manager object -+ * @tool_mc_io: pointer to the MC I/O object used by the restool -+ */ -+struct fsl_mc_restool { -+ struct fsl_mc_io *tool_mc_io; -+}; -+ -+/** -+ * struct global_state - indicating the number of static and dynamic instance -+ * @dynamic_instance_count - number of dynamically created instances -+ * @static_instance_in_use - static instance is in use or not -+ * @mutex - mutex lock to serialze the operations -+ */ -+struct global_state { -+ uint32_t dynamic_instance_count; -+ bool static_instance_in_use; -+ struct mutex mutex; -+}; -+ -+static struct fsl_mc_restool fsl_mc_restool = { 0 }; -+static struct global_state global_state = { 0 }; -+ -+static int fsl_mc_restool_dev_open(struct inode *inode, struct file *filep) -+{ -+ struct fsl_mc_device *root_mc_dev; -+ int error = 0; -+ struct fsl_mc_restool *fsl_mc_restool_new = NULL; -+ -+ mutex_lock(&global_state.mutex); -+ -+ if (WARN_ON(fsl_mc_bus_type.dev_root == NULL)) { -+ error = -EINVAL; -+ goto error; -+ } -+ -+ if (!global_state.static_instance_in_use) { -+ global_state.static_instance_in_use = true; -+ filep->private_data = &fsl_mc_restool; -+ } else { -+ fsl_mc_restool_new = kmalloc(sizeof(struct fsl_mc_restool), -+ GFP_KERNEL); -+ if (fsl_mc_restool_new == NULL) { -+ error = -ENOMEM; -+ goto error; -+ } -+ memset(fsl_mc_restool_new, 0, sizeof(*fsl_mc_restool_new)); -+ -+ root_mc_dev = to_fsl_mc_device(fsl_mc_bus_type.dev_root); -+ error = fsl_mc_portal_allocate(root_mc_dev, 0, -+ &fsl_mc_restool_new->tool_mc_io); -+ if (error < 0) { -+ pr_err("Not able to allocate MC portal\n"); -+ goto error; -+ } -+ ++global_state.dynamic_instance_count; -+ filep->private_data = fsl_mc_restool_new; -+ } -+ -+ mutex_unlock(&global_state.mutex); -+ return 0; -+error: -+ if (fsl_mc_restool_new != NULL && -+ fsl_mc_restool_new->tool_mc_io != NULL) { -+ fsl_mc_portal_free(fsl_mc_restool_new->tool_mc_io); -+ fsl_mc_restool_new->tool_mc_io = NULL; -+ } -+ -+ kfree(fsl_mc_restool_new); -+ mutex_unlock(&global_state.mutex); -+ return error; -+} -+ -+static int fsl_mc_restool_dev_release(struct inode *inode, struct file *filep) -+{ -+ struct fsl_mc_restool *fsl_mc_restool_local = filep->private_data; -+ -+ if (WARN_ON(filep->private_data == NULL)) -+ return -EINVAL; -+ -+ mutex_lock(&global_state.mutex); -+ -+ if (WARN_ON(global_state.dynamic_instance_count == 0 && -+ !global_state.static_instance_in_use)) { -+ mutex_unlock(&global_state.mutex); -+ return -EINVAL; -+ } -+ -+ /* Globally clean up opened/untracked handles */ -+ fsl_mc_portal_reset(fsl_mc_restool_local->tool_mc_io); -+ -+ pr_debug("dynamic instance count: %d\n", -+ global_state.dynamic_instance_count); -+ pr_debug("static instance count: %d\n", -+ global_state.static_instance_in_use); -+ -+ /* -+ * must check -+ * whether fsl_mc_restool_local is dynamic or global instance -+ * Otherwise it will free up the reserved portal by accident -+ * or even not free up the dynamic allocated portal -+ * if 2 or more instances running concurrently -+ */ -+ if (fsl_mc_restool_local == &fsl_mc_restool) { -+ pr_debug("this is reserved portal"); -+ pr_debug("reserved portal not in use\n"); -+ global_state.static_instance_in_use = false; -+ } else { -+ pr_debug("this is dynamically allocated portal"); -+ pr_debug("free one dynamically allocated portal\n"); -+ fsl_mc_portal_free(fsl_mc_restool_local->tool_mc_io); -+ kfree(filep->private_data); -+ --global_state.dynamic_instance_count; -+ } -+ -+ filep->private_data = NULL; -+ mutex_unlock(&global_state.mutex); -+ return 0; -+} -+ -+static int restool_get_root_dprc_info(unsigned long arg) -+{ -+ int error = -EINVAL; -+ uint32_t root_dprc_id; -+ struct fsl_mc_device *root_mc_dev; -+ -+ root_mc_dev = to_fsl_mc_device(fsl_mc_bus_type.dev_root); -+ root_dprc_id = root_mc_dev->obj_desc.id; -+ error = copy_to_user((void __user *)arg, &root_dprc_id, -+ sizeof(root_dprc_id)); -+ if (error < 0) { -+ pr_err("copy_to_user() failed with error %d\n", error); -+ goto error; -+ } -+ -+ return 0; -+error: -+ return error; -+} -+ -+static int restool_send_mc_command(unsigned long arg, -+ struct fsl_mc_restool *fsl_mc_restool) -+{ -+ int error = -EINVAL; -+ struct mc_command mc_cmd; -+ -+ error = copy_from_user(&mc_cmd, (void __user *)arg, sizeof(mc_cmd)); -+ if (error < 0) { -+ pr_err("copy_to_user() failed with error %d\n", error); -+ goto error; -+ } -+ -+ /* -+ * Send MC command to the MC: -+ */ -+ error = mc_send_command(fsl_mc_restool->tool_mc_io, &mc_cmd); -+ if (error < 0) -+ goto error; -+ -+ error = copy_to_user((void __user *)arg, &mc_cmd, sizeof(mc_cmd)); -+ if (error < 0) { -+ pr_err("copy_to_user() failed with error %d\n", error); -+ goto error; -+ } -+ -+ return 0; -+error: -+ return error; -+} -+ -+static long -+fsl_mc_restool_dev_ioctl(struct file *file, unsigned int cmd, unsigned long arg) -+{ -+ int error = -EINVAL; -+ -+ if (WARN_ON(fsl_mc_bus_type.dev_root == NULL)) -+ goto out; -+ -+ switch (cmd) { -+ case RESTOOL_GET_ROOT_DPRC_INFO: -+ error = restool_get_root_dprc_info(arg); -+ break; -+ -+ case RESTOOL_SEND_MC_COMMAND: -+ error = restool_send_mc_command(arg, file->private_data); -+ break; -+ default: -+ error = -EINVAL; -+ } -+out: -+ return error; -+} -+ -+static const struct file_operations fsl_mc_restool_dev_fops = { -+ .owner = THIS_MODULE, -+ .open = fsl_mc_restool_dev_open, -+ .release = fsl_mc_restool_dev_release, -+ .unlocked_ioctl = fsl_mc_restool_dev_ioctl, -+ .compat_ioctl = fsl_mc_restool_dev_ioctl, -+}; -+ -+static struct miscdevice fsl_mc_restool_dev = { -+ .minor = MISC_DYNAMIC_MINOR, -+ .name = "mc_restool", -+ .fops = &fsl_mc_restool_dev_fops -+}; -+ -+static int __init fsl_mc_restool_driver_init(void) -+{ -+ struct fsl_mc_device *root_mc_dev; -+ int error = -EINVAL; -+ bool restool_dev_registered = false; -+ -+ mutex_init(&global_state.mutex); -+ -+ if (WARN_ON(fsl_mc_restool.tool_mc_io != NULL)) -+ goto error; -+ -+ if (WARN_ON(global_state.dynamic_instance_count != 0)) -+ goto error; -+ -+ if (WARN_ON(global_state.static_instance_in_use)) -+ goto error; -+ -+ if (fsl_mc_bus_type.dev_root == NULL) { -+ pr_err("fsl-mc bus not found, restool driver registration failed\n"); -+ goto error; -+ } -+ -+ root_mc_dev = to_fsl_mc_device(fsl_mc_bus_type.dev_root); -+ error = fsl_mc_portal_allocate(root_mc_dev, 0, -+ &fsl_mc_restool.tool_mc_io); -+ if (error < 0) { -+ pr_err("Not able to allocate MC portal\n"); -+ goto error; -+ } -+ -+ error = misc_register(&fsl_mc_restool_dev); -+ if (error < 0) { -+ pr_err("misc_register() failed: %d\n", error); -+ goto error; -+ } -+ -+ restool_dev_registered = true; -+ pr_info("%s driver registered\n", fsl_mc_restool_dev.name); -+ return 0; -+error: -+ if (restool_dev_registered) -+ misc_deregister(&fsl_mc_restool_dev); -+ -+ if (fsl_mc_restool.tool_mc_io != NULL) { -+ fsl_mc_portal_free(fsl_mc_restool.tool_mc_io); -+ fsl_mc_restool.tool_mc_io = NULL; -+ } -+ -+ return error; -+} -+ -+module_init(fsl_mc_restool_driver_init); -+ -+static void __exit fsl_mc_restool_driver_exit(void) -+{ -+ if (WARN_ON(fsl_mc_restool.tool_mc_io == NULL)) -+ return; -+ -+ if (WARN_ON(global_state.dynamic_instance_count != 0)) -+ return; -+ -+ if (WARN_ON(global_state.static_instance_in_use)) -+ return; -+ -+ misc_deregister(&fsl_mc_restool_dev); -+ fsl_mc_portal_free(fsl_mc_restool.tool_mc_io); -+ fsl_mc_restool.tool_mc_io = NULL; -+ pr_info("%s driver unregistered\n", fsl_mc_restool_dev.name); -+} -+ -+module_exit(fsl_mc_restool_driver_exit); -+ -+MODULE_AUTHOR("Freescale Semiconductor Inc."); -+MODULE_DESCRIPTION("Freescale's MC restool driver"); -+MODULE_LICENSE("GPL"); -+ -diff --git a/drivers/staging/fsl-mc/bus/mc-sys.c b/drivers/staging/fsl-mc/bus/mc-sys.c -new file mode 100644 -index 0000000..d3b6940 ---- /dev/null -+++ b/drivers/staging/fsl-mc/bus/mc-sys.c -@@ -0,0 +1,677 @@ -+/* Copyright 2013-2014 Freescale Semiconductor Inc. -+ * -+ * I/O services to send MC commands to the MC hardware -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include "../include/mc-sys.h" -+#include "../include/mc-cmd.h" -+#include "../include/mc.h" -+#include -+#include -+#include -+#include -+#include -+#include "dpmcp.h" -+ -+/** -+ * Timeout in milliseconds to wait for the completion of an MC command -+ * 5000 ms is barely enough for dpsw/dpdmux creation -+ * TODO: if MC firmware could response faster, we should decrease this value -+ */ -+#define MC_CMD_COMPLETION_TIMEOUT_MS 5000 -+ -+/* -+ * usleep_range() min and max values used to throttle down polling -+ * iterations while waiting for MC command completion -+ */ -+#define MC_CMD_COMPLETION_POLLING_MIN_SLEEP_USECS 10 -+#define MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS 500 -+ -+#define MC_CMD_HDR_READ_CMDID(_hdr) \ -+ ((uint16_t)mc_dec((_hdr), MC_CMD_HDR_CMDID_O, MC_CMD_HDR_CMDID_S)) -+ -+/** -+ * dpmcp_irq0_handler - Regular ISR for DPMCP interrupt 0 -+ * -+ * @irq: IRQ number of the interrupt being handled -+ * @arg: Pointer to device structure -+ */ -+static irqreturn_t dpmcp_irq0_handler(int irq_num, void *arg) -+{ -+ struct device *dev = (struct device *)arg; -+ struct fsl_mc_device *dpmcp_dev = to_fsl_mc_device(dev); -+ struct fsl_mc_io *mc_io = dpmcp_dev->mc_io; -+ -+ dev_dbg(dev, "DPMCP IRQ %d triggered on CPU %u\n", irq_num, -+ smp_processor_id()); -+ -+ if (WARN_ON(dpmcp_dev->irqs[0]->irq_number != (uint32_t)irq_num)) -+ goto out; -+ -+ if (WARN_ON(!mc_io)) -+ goto out; -+ -+ complete(&mc_io->mc_command_done_completion); -+out: -+ return IRQ_HANDLED; -+} -+ -+/* -+ * Disable and clear interrupts for a given DPMCP object -+ */ -+static int disable_dpmcp_irq(struct fsl_mc_device *dpmcp_dev) -+{ -+ int error; -+ -+ /* -+ * Disable generation of the DPMCP interrupt: -+ */ -+ error = dpmcp_set_irq_enable(dpmcp_dev->mc_io, -+ MC_CMD_FLAG_INTR_DIS, -+ dpmcp_dev->mc_handle, -+ DPMCP_IRQ_INDEX, 0); -+ if (error < 0) { -+ dev_err(&dpmcp_dev->dev, -+ "dpmcp_set_irq_enable() failed: %d\n", error); -+ -+ return error; -+ } -+ -+ /* -+ * Disable all DPMCP interrupt causes: -+ */ -+ error = dpmcp_set_irq_mask(dpmcp_dev->mc_io, -+ MC_CMD_FLAG_INTR_DIS, -+ dpmcp_dev->mc_handle, -+ DPMCP_IRQ_INDEX, 0x0); -+ if (error < 0) { -+ dev_err(&dpmcp_dev->dev, -+ "dpmcp_set_irq_mask() failed: %d\n", error); -+ -+ return error; -+ } -+ -+ return 0; -+} -+ -+static void unregister_dpmcp_irq_handler(struct fsl_mc_device *dpmcp_dev) -+{ -+ struct fsl_mc_device_irq *irq = dpmcp_dev->irqs[DPMCP_IRQ_INDEX]; -+ -+ devm_free_irq(&dpmcp_dev->dev, irq->irq_number, &dpmcp_dev->dev); -+} -+ -+static int register_dpmcp_irq_handler(struct fsl_mc_device *dpmcp_dev) -+{ -+ int error; -+ struct fsl_mc_device_irq *irq = dpmcp_dev->irqs[DPMCP_IRQ_INDEX]; -+ -+ error = devm_request_irq(&dpmcp_dev->dev, -+ irq->irq_number, -+ dpmcp_irq0_handler, -+ IRQF_NO_SUSPEND | IRQF_ONESHOT, -+ "FSL MC DPMCP irq0", -+ &dpmcp_dev->dev); -+ if (error < 0) { -+ dev_err(&dpmcp_dev->dev, -+ "devm_request_irq() failed: %d\n", -+ error); -+ return error; -+ } -+ -+ return 0; -+} -+ -+static int enable_dpmcp_irq(struct fsl_mc_device *dpmcp_dev) -+{ -+ int error; -+ -+ /* -+ * Enable MC command completion event to trigger DPMCP interrupt: -+ */ -+ error = dpmcp_set_irq_mask(dpmcp_dev->mc_io, -+ MC_CMD_FLAG_INTR_DIS, -+ dpmcp_dev->mc_handle, -+ DPMCP_IRQ_INDEX, -+ DPMCP_IRQ_EVENT_CMD_DONE); -+ if (error < 0) { -+ dev_err(&dpmcp_dev->dev, -+ "dpmcp_set_irq_mask() failed: %d\n", error); -+ -+ return error; -+ } -+ -+ /* -+ * Enable generation of the interrupt: -+ */ -+ error = dpmcp_set_irq_enable(dpmcp_dev->mc_io, -+ MC_CMD_FLAG_INTR_DIS, -+ dpmcp_dev->mc_handle, -+ DPMCP_IRQ_INDEX, 1); -+ if (error < 0) { -+ dev_err(&dpmcp_dev->dev, -+ "dpmcp_set_irq_enable() failed: %d\n", error); -+ -+ return error; -+ } -+ -+ return 0; -+} -+ -+/* -+ * Setup MC command completion interrupt for the DPMCP device associated with a -+ * given fsl_mc_io object -+ */ -+int fsl_mc_io_setup_dpmcp_irq(struct fsl_mc_io *mc_io) -+{ -+ int error; -+ struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev; -+ -+ if (WARN_ON(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)) -+ return -EINVAL; -+ -+ if (WARN_ON(!dpmcp_dev)) -+ return -EINVAL; -+ -+ if (WARN_ON(!fsl_mc_interrupts_supported())) -+ return -EINVAL; -+ -+ if (WARN_ON(dpmcp_dev->obj_desc.irq_count != 1)) -+ return -EINVAL; -+ -+ if (WARN_ON(dpmcp_dev->mc_io != mc_io)) -+ return -EINVAL; -+ -+ error = fsl_mc_allocate_irqs(dpmcp_dev); -+ if (error < 0) -+ return error; -+ -+ error = disable_dpmcp_irq(dpmcp_dev); -+ if (error < 0) -+ goto error_free_irqs; -+ -+ error = register_dpmcp_irq_handler(dpmcp_dev); -+ if (error < 0) -+ goto error_free_irqs; -+ -+ error = enable_dpmcp_irq(dpmcp_dev); -+ if (error < 0) -+ goto error_unregister_irq_handler; -+ -+ mc_io->mc_command_done_irq_armed = true; -+ return 0; -+ -+error_unregister_irq_handler: -+ unregister_dpmcp_irq_handler(dpmcp_dev); -+ -+error_free_irqs: -+ fsl_mc_free_irqs(dpmcp_dev); -+ -+ return error; -+} -+EXPORT_SYMBOL_GPL(fsl_mc_io_setup_dpmcp_irq); -+ -+/* -+ * Tear down interrupts for the DPMCP device associated with a given fsl_mc_io -+ * object -+ */ -+static void teardown_dpmcp_irq(struct fsl_mc_io *mc_io) -+{ -+ struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev; -+ -+ if (WARN_ON(!dpmcp_dev)) -+ return; -+ if (WARN_ON(!fsl_mc_interrupts_supported())) -+ return; -+ if (WARN_ON(!dpmcp_dev->irqs)) -+ return; -+ -+ mc_io->mc_command_done_irq_armed = false; -+ (void)disable_dpmcp_irq(dpmcp_dev); -+ unregister_dpmcp_irq_handler(dpmcp_dev); -+ fsl_mc_free_irqs(dpmcp_dev); -+} -+ -+/** -+ * Creates an MC I/O object -+ * -+ * @dev: device to be associated with the MC I/O object -+ * @mc_portal_phys_addr: physical address of the MC portal to use -+ * @mc_portal_size: size in bytes of the MC portal -+ * @resource: Pointer to MC bus object allocator resource associated -+ * with this MC I/O object or NULL if none. -+ * @flags: flags for the new MC I/O object -+ * @new_mc_io: Area to return pointer to newly created MC I/O object -+ * -+ * Returns '0' on Success; Error code otherwise. -+ */ -+int __must_check fsl_create_mc_io(struct device *dev, -+ phys_addr_t mc_portal_phys_addr, -+ uint32_t mc_portal_size, -+ struct fsl_mc_device *dpmcp_dev, -+ uint32_t flags, struct fsl_mc_io **new_mc_io) -+{ -+ int error; -+ struct fsl_mc_io *mc_io; -+ void __iomem *mc_portal_virt_addr; -+ struct resource *res; -+ -+ mc_io = devm_kzalloc(dev, sizeof(*mc_io), GFP_KERNEL); -+ if (!mc_io) -+ return -ENOMEM; -+ -+ mc_io->dev = dev; -+ mc_io->flags = flags; -+ mc_io->portal_phys_addr = mc_portal_phys_addr; -+ mc_io->portal_size = mc_portal_size; -+ mc_io->mc_command_done_irq_armed = false; -+ if (flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL) { -+ spin_lock_init(&mc_io->spinlock); -+ } else { -+ mutex_init(&mc_io->mutex); -+ init_completion(&mc_io->mc_command_done_completion); -+ } -+ -+ res = devm_request_mem_region(dev, -+ mc_portal_phys_addr, -+ mc_portal_size, -+ "mc_portal"); -+ if (!res) { -+ dev_err(dev, -+ "devm_request_mem_region failed for MC portal %#llx\n", -+ mc_portal_phys_addr); -+ return -EBUSY; -+ } -+ -+ mc_portal_virt_addr = devm_ioremap_nocache(dev, -+ mc_portal_phys_addr, -+ mc_portal_size); -+ if (!mc_portal_virt_addr) { -+ dev_err(dev, -+ "devm_ioremap_nocache failed for MC portal %#llx\n", -+ mc_portal_phys_addr); -+ return -ENXIO; -+ } -+ -+ mc_io->portal_virt_addr = mc_portal_virt_addr; -+ if (dpmcp_dev) { -+ error = fsl_mc_io_set_dpmcp(mc_io, dpmcp_dev); -+ if (error < 0) -+ goto error_destroy_mc_io; -+ -+ if (!(flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL) && -+ fsl_mc_interrupts_supported()) { -+ error = fsl_mc_io_setup_dpmcp_irq(mc_io); -+ if (error < 0) -+ goto error_destroy_mc_io; -+ } -+ } -+ -+ *new_mc_io = mc_io; -+ return 0; -+ -+error_destroy_mc_io: -+ fsl_destroy_mc_io(mc_io); -+ return error; -+ -+} -+EXPORT_SYMBOL_GPL(fsl_create_mc_io); -+ -+/** -+ * Destroys an MC I/O object -+ * -+ * @mc_io: MC I/O object to destroy -+ */ -+void fsl_destroy_mc_io(struct fsl_mc_io *mc_io) -+{ -+ struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev; -+ -+ if (dpmcp_dev) -+ fsl_mc_io_unset_dpmcp(mc_io); -+ -+ devm_iounmap(mc_io->dev, mc_io->portal_virt_addr); -+ devm_release_mem_region(mc_io->dev, -+ mc_io->portal_phys_addr, -+ mc_io->portal_size); -+ -+ mc_io->portal_virt_addr = NULL; -+ devm_kfree(mc_io->dev, mc_io); -+} -+EXPORT_SYMBOL_GPL(fsl_destroy_mc_io); -+ -+int fsl_mc_io_set_dpmcp(struct fsl_mc_io *mc_io, -+ struct fsl_mc_device *dpmcp_dev) -+{ -+ int error; -+ -+ if (WARN_ON(!dpmcp_dev)) -+ return -EINVAL; -+ -+ if (WARN_ON(mc_io->dpmcp_dev)) -+ return -EINVAL; -+ -+ if (WARN_ON(dpmcp_dev->mc_io)) -+ return -EINVAL; -+ -+ if (!(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)) { -+ error = dpmcp_open(mc_io, -+ 0, -+ dpmcp_dev->obj_desc.id, -+ &dpmcp_dev->mc_handle); -+ if (error < 0) -+ return error; -+ } -+ -+ mc_io->dpmcp_dev = dpmcp_dev; -+ dpmcp_dev->mc_io = mc_io; -+ return 0; -+} -+EXPORT_SYMBOL_GPL(fsl_mc_io_set_dpmcp); -+ -+void fsl_mc_io_unset_dpmcp(struct fsl_mc_io *mc_io) -+{ -+ int error; -+ struct fsl_mc_device *dpmcp_dev = mc_io->dpmcp_dev; -+ -+ if (WARN_ON(!dpmcp_dev)) -+ return; -+ -+ if (WARN_ON(dpmcp_dev->mc_io != mc_io)) -+ return; -+ -+ if (!(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)) { -+ if (dpmcp_dev->irqs) -+ teardown_dpmcp_irq(mc_io); -+ -+ error = dpmcp_close(mc_io, -+ 0, -+ dpmcp_dev->mc_handle); -+ if (error < 0) { -+ dev_err(&dpmcp_dev->dev, "dpmcp_close() failed: %d\n", -+ error); -+ } -+ } -+ -+ mc_io->dpmcp_dev = NULL; -+ dpmcp_dev->mc_io = NULL; -+} -+EXPORT_SYMBOL_GPL(fsl_mc_io_unset_dpmcp); -+ -+static int mc_status_to_error(enum mc_cmd_status status) -+{ -+ static const int mc_status_to_error_map[] = { -+ [MC_CMD_STATUS_OK] = 0, -+ [MC_CMD_STATUS_AUTH_ERR] = -EACCES, -+ [MC_CMD_STATUS_NO_PRIVILEGE] = -EPERM, -+ [MC_CMD_STATUS_DMA_ERR] = -EIO, -+ [MC_CMD_STATUS_CONFIG_ERR] = -ENXIO, -+ [MC_CMD_STATUS_TIMEOUT] = -ETIMEDOUT, -+ [MC_CMD_STATUS_NO_RESOURCE] = -ENAVAIL, -+ [MC_CMD_STATUS_NO_MEMORY] = -ENOMEM, -+ [MC_CMD_STATUS_BUSY] = -EBUSY, -+ [MC_CMD_STATUS_UNSUPPORTED_OP] = -ENOTSUPP, -+ [MC_CMD_STATUS_INVALID_STATE] = -ENODEV, -+ }; -+ -+ if (WARN_ON((u32)status >= ARRAY_SIZE(mc_status_to_error_map))) -+ return -EINVAL; -+ -+ return mc_status_to_error_map[status]; -+} -+ -+static const char *mc_status_to_string(enum mc_cmd_status status) -+{ -+ static const char *const status_strings[] = { -+ [MC_CMD_STATUS_OK] = "Command completed successfully", -+ [MC_CMD_STATUS_READY] = "Command ready to be processed", -+ [MC_CMD_STATUS_AUTH_ERR] = "Authentication error", -+ [MC_CMD_STATUS_NO_PRIVILEGE] = "No privilege", -+ [MC_CMD_STATUS_DMA_ERR] = "DMA or I/O error", -+ [MC_CMD_STATUS_CONFIG_ERR] = "Configuration error", -+ [MC_CMD_STATUS_TIMEOUT] = "Operation timed out", -+ [MC_CMD_STATUS_NO_RESOURCE] = "No resources", -+ [MC_CMD_STATUS_NO_MEMORY] = "No memory available", -+ [MC_CMD_STATUS_BUSY] = "Device is busy", -+ [MC_CMD_STATUS_UNSUPPORTED_OP] = "Unsupported operation", -+ [MC_CMD_STATUS_INVALID_STATE] = "Invalid state" -+ }; -+ -+ if ((unsigned int)status >= ARRAY_SIZE(status_strings)) -+ return "Unknown MC error"; -+ -+ return status_strings[status]; -+} -+ -+/** -+ * mc_write_command - writes a command to a Management Complex (MC) portal -+ * -+ * @portal: pointer to an MC portal -+ * @cmd: pointer to a filled command -+ */ -+static inline void mc_write_command(struct mc_command __iomem *portal, -+ struct mc_command *cmd) -+{ -+ int i; -+ -+ /* copy command parameters into the portal */ -+ for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++) -+ writeq(cmd->params[i], &portal->params[i]); -+ -+ /* submit the command by writing the header */ -+ writeq(cmd->header, &portal->header); -+} -+ -+/** -+ * mc_read_response - reads the response for the last MC command from a -+ * Management Complex (MC) portal -+ * -+ * @portal: pointer to an MC portal -+ * @resp: pointer to command response buffer -+ * -+ * Returns MC_CMD_STATUS_OK on Success; Error code otherwise. -+ */ -+static inline enum mc_cmd_status mc_read_response(struct mc_command __iomem * -+ portal, -+ struct mc_command *resp) -+{ -+ int i; -+ enum mc_cmd_status status; -+ -+ /* Copy command response header from MC portal: */ -+ resp->header = readq(&portal->header); -+ status = MC_CMD_HDR_READ_STATUS(resp->header); -+ if (status != MC_CMD_STATUS_OK) -+ return status; -+ -+ /* Copy command response data from MC portal: */ -+ for (i = 0; i < MC_CMD_NUM_OF_PARAMS; i++) -+ resp->params[i] = readq(&portal->params[i]); -+ -+ return status; -+} -+ -+static int mc_completion_wait(struct fsl_mc_io *mc_io, struct mc_command *cmd, -+ enum mc_cmd_status *mc_status) -+{ -+ enum mc_cmd_status status; -+ unsigned long jiffies_left; -+ unsigned long timeout_jiffies = -+ msecs_to_jiffies(MC_CMD_COMPLETION_TIMEOUT_MS); -+ -+ if (WARN_ON(!mc_io->dpmcp_dev)) -+ return -EINVAL; -+ -+ if (WARN_ON(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)) -+ return -EINVAL; -+ -+ for (;;) { -+ status = mc_read_response(mc_io->portal_virt_addr, cmd); -+ if (status != MC_CMD_STATUS_READY) -+ break; -+ -+ jiffies_left = wait_for_completion_timeout( -+ &mc_io->mc_command_done_completion, -+ timeout_jiffies); -+ if (jiffies_left == 0) -+ return -ETIMEDOUT; -+ } -+ -+ *mc_status = status; -+ return 0; -+} -+ -+static int mc_polling_wait_preemptible(struct fsl_mc_io *mc_io, -+ struct mc_command *cmd, -+ enum mc_cmd_status *mc_status) -+{ -+ enum mc_cmd_status status; -+ unsigned long jiffies_until_timeout = -+ jiffies + msecs_to_jiffies(MC_CMD_COMPLETION_TIMEOUT_MS); -+ -+ for (;;) { -+ status = mc_read_response(mc_io->portal_virt_addr, cmd); -+ if (status != MC_CMD_STATUS_READY) -+ break; -+ -+ usleep_range(MC_CMD_COMPLETION_POLLING_MIN_SLEEP_USECS, -+ MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS); -+ -+ if (time_after_eq(jiffies, jiffies_until_timeout)) -+ return -ETIMEDOUT; -+ } -+ -+ *mc_status = status; -+ return 0; -+} -+ -+static int mc_polling_wait_atomic(struct fsl_mc_io *mc_io, -+ struct mc_command *cmd, -+ enum mc_cmd_status *mc_status) -+{ -+ enum mc_cmd_status status; -+ unsigned long timeout_usecs = MC_CMD_COMPLETION_TIMEOUT_MS * 1000; -+ -+ BUILD_BUG_ON((MC_CMD_COMPLETION_TIMEOUT_MS * 1000) % -+ MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS != 0); -+ -+ for (;;) { -+ status = mc_read_response(mc_io->portal_virt_addr, cmd); -+ if (status != MC_CMD_STATUS_READY) -+ break; -+ -+ udelay(MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS); -+ timeout_usecs -= MC_CMD_COMPLETION_POLLING_MAX_SLEEP_USECS; -+ if (timeout_usecs == 0) -+ return -ETIMEDOUT; -+ } -+ -+ *mc_status = status; -+ return 0; -+} -+ -+/** -+ * Sends a command to the MC device using the given MC I/O object -+ * -+ * @mc_io: MC I/O object to be used -+ * @cmd: command to be sent -+ * -+ * Returns '0' on Success; Error code otherwise. -+ */ -+int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd) -+{ -+ int error; -+ enum mc_cmd_status status; -+ unsigned long irq_flags = 0; -+ bool dpmcp_completion_intr_disabled = -+ (MC_CMD_HDR_READ_FLAGS(cmd->header) & MC_CMD_FLAG_INTR_DIS); -+ -+ if (WARN_ON(in_irq() && -+ (!dpmcp_completion_intr_disabled || -+ !(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)))) -+ return -EINVAL; -+ -+ if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL) -+ spin_lock_irqsave(&mc_io->spinlock, irq_flags); -+ else -+ mutex_lock(&mc_io->mutex); -+ -+ /* -+ * Send command to the MC hardware: -+ */ -+ mc_write_command(mc_io->portal_virt_addr, cmd); -+ -+ /* -+ * Wait for response from the MC hardware: -+ */ -+ if (mc_io->mc_command_done_irq_armed && !dpmcp_completion_intr_disabled) -+ error = mc_completion_wait(mc_io, cmd, &status); -+ else if (!(mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL)) -+ error = mc_polling_wait_preemptible(mc_io, cmd, &status); -+ else -+ error = mc_polling_wait_atomic(mc_io, cmd, &status); -+ -+ if (error < 0) { -+ if (error == -ETIMEDOUT) { -+ pr_debug("MC command timed out (portal: %#llx, obj handle: %#x, command: %#x)\n", -+ mc_io->portal_phys_addr, -+ (unsigned int) -+ MC_CMD_HDR_READ_TOKEN(cmd->header), -+ (unsigned int) -+ MC_CMD_HDR_READ_CMDID(cmd->header)); -+ } -+ goto common_exit; -+ -+ } -+ -+ if (status != MC_CMD_STATUS_OK) { -+ pr_debug("MC command failed: portal: %#llx, obj handle: %#x, command: %#x, status: %s (%#x)\n", -+ mc_io->portal_phys_addr, -+ (unsigned int)MC_CMD_HDR_READ_TOKEN(cmd->header), -+ (unsigned int)MC_CMD_HDR_READ_CMDID(cmd->header), -+ mc_status_to_string(status), -+ (unsigned int)status); -+ -+ error = mc_status_to_error(status); -+ goto common_exit; -+ } -+ -+ error = 0; -+ -+common_exit: -+ if (mc_io->flags & FSL_MC_IO_ATOMIC_CONTEXT_PORTAL) -+ spin_unlock_irqrestore(&mc_io->spinlock, irq_flags); -+ else -+ mutex_unlock(&mc_io->mutex); -+ -+ return error; -+} -+EXPORT_SYMBOL(mc_send_command); -diff --git a/drivers/staging/fsl-mc/include/dpbp-cmd.h b/drivers/staging/fsl-mc/include/dpbp-cmd.h -new file mode 100644 -index 0000000..1ec04e4 ---- /dev/null -+++ b/drivers/staging/fsl-mc/include/dpbp-cmd.h -@@ -0,0 +1,62 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPBP_CMD_H -+#define _FSL_DPBP_CMD_H -+ -+/* DPBP Version */ -+#define DPBP_VER_MAJOR 2 -+#define DPBP_VER_MINOR 2 -+ -+/* Command IDs */ -+#define DPBP_CMDID_CLOSE 0x800 -+#define DPBP_CMDID_OPEN 0x804 -+#define DPBP_CMDID_CREATE 0x904 -+#define DPBP_CMDID_DESTROY 0x900 -+ -+#define DPBP_CMDID_ENABLE 0x002 -+#define DPBP_CMDID_DISABLE 0x003 -+#define DPBP_CMDID_GET_ATTR 0x004 -+#define DPBP_CMDID_RESET 0x005 -+#define DPBP_CMDID_IS_ENABLED 0x006 -+ -+#define DPBP_CMDID_SET_IRQ 0x010 -+#define DPBP_CMDID_GET_IRQ 0x011 -+#define DPBP_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPBP_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPBP_CMDID_SET_IRQ_MASK 0x014 -+#define DPBP_CMDID_GET_IRQ_MASK 0x015 -+#define DPBP_CMDID_GET_IRQ_STATUS 0x016 -+#define DPBP_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPBP_CMDID_SET_NOTIFICATIONS 0x01b0 -+#define DPBP_CMDID_GET_NOTIFICATIONS 0x01b1 -+#endif /* _FSL_DPBP_CMD_H */ -diff --git a/drivers/staging/fsl-mc/include/dpbp.h b/drivers/staging/fsl-mc/include/dpbp.h -new file mode 100644 -index 0000000..9856bb8 ---- /dev/null -+++ b/drivers/staging/fsl-mc/include/dpbp.h -@@ -0,0 +1,438 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPBP_H -+#define __FSL_DPBP_H -+ -+/* Data Path Buffer Pool API -+ * Contains initialization APIs and runtime control APIs for DPBP -+ */ -+ -+struct fsl_mc_io; -+ -+/** -+ * dpbp_open() - Open a control session for the specified object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dpbp_id: DPBP unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpbp_create function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpbp_id, -+ uint16_t *token); -+ -+/** -+ * dpbp_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpbp_cfg - Structure representing DPBP configuration -+ * @options: place holder -+ */ -+struct dpbp_cfg { -+ uint32_t options; -+}; -+ -+/** -+ * dpbp_create() - Create the DPBP object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPBP object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpbp_open function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpbp_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpbp_destroy() - Destroy the DPBP object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpbp_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpbp_enable() - Enable the DPBP. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpbp_disable() - Disable the DPBP. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpbp_is_enabled() - Check if the DPBP is enabled. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @en: Returns '1' if object is enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpbp_reset() - Reset the DPBP, returns the object to initial state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpbp_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dpbp_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dpbp_set_irq() - Set IRQ information for the DPBP to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpbp_irq_cfg *irq_cfg); -+ -+/** -+ * dpbp_get_irq() - Get IRQ information from the DPBP. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpbp_irq_cfg *irq_cfg); -+ -+/** -+ * dpbp_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpbp_get_irq_enable() - Get overall interrupt state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpbp_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @irq_index: The interrupt index to configure -+ * @mask: Event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpbp_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpbp_get_irq_status() - Get the current status of any pending interrupts. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dpbp_clear_irq_status() - Clear a pending interrupt's status -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @irq_index: The interrupt index to configure -+ * @status: Bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+ -+/** -+ * struct dpbp_attr - Structure representing DPBP attributes -+ * @id: DPBP object ID -+ * @version: DPBP version -+ * @bpid: Hardware buffer pool ID; should be used as an argument in -+ * acquire/release operations on buffers -+ */ -+struct dpbp_attr { -+ int id; -+ /** -+ * struct version - Structure representing DPBP version -+ * @major: DPBP major version -+ * @minor: DPBP minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+ uint16_t bpid; -+}; -+ -+/** -+ * dpbp_get_attributes - Retrieve DPBP attributes. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @attr: Returned object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpbp_attr *attr); -+ -+/** -+ * DPBP notifications options -+ */ -+ -+/** -+ * BPSCN write will attempt to allocate into a cache (coherent write) -+ */ -+#define DPBP_NOTIF_OPT_COHERENT_WRITE 0x00000001 -+ -+/** -+ * struct dpbp_notification_cfg - Structure representing DPBP notifications -+ * towards software -+ * @depletion_entry: below this threshold the pool is "depleted"; -+ * set it to '0' to disable it -+ * @depletion_exit: greater than or equal to this threshold the pool exit its -+ * "depleted" state -+ * @surplus_entry: above this threshold the pool is in "surplus" state; -+ * set it to '0' to disable it -+ * @surplus_exit: less than or equal to this threshold the pool exit its -+ * "surplus" state -+ * @message_iova: MUST be given if either 'depletion_entry' or 'surplus_entry' -+ * is not '0' (enable); I/O virtual address (must be in DMA-able memory), -+ * must be 16B aligned. -+ * @message_ctx: The context that will be part of the BPSCN message and will -+ * be written to 'message_iova' -+ * @options: Mask of available options; use 'DPBP_NOTIF_OPT_' values -+ */ -+struct dpbp_notification_cfg { -+ uint32_t depletion_entry; -+ uint32_t depletion_exit; -+ uint32_t surplus_entry; -+ uint32_t surplus_exit; -+ uint64_t message_iova; -+ uint64_t message_ctx; -+ uint16_t options; -+}; -+ -+/** -+ * dpbp_set_notifications() - Set notifications towards software -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @cfg: notifications configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_set_notifications(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpbp_notification_cfg *cfg); -+ -+/** -+ * dpbp_get_notifications() - Get the notifications configuration -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPBP object -+ * @cfg: notifications configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpbp_get_notifications(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpbp_notification_cfg *cfg); -+ -+#endif /* __FSL_DPBP_H */ -diff --git a/drivers/staging/fsl-mc/include/dpcon-cmd.h b/drivers/staging/fsl-mc/include/dpcon-cmd.h -new file mode 100644 -index 0000000..ecb40d0 ---- /dev/null -+++ b/drivers/staging/fsl-mc/include/dpcon-cmd.h -@@ -0,0 +1,162 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPCON_CMD_H -+#define _FSL_DPCON_CMD_H -+ -+/* DPCON Version */ -+#define DPCON_VER_MAJOR 2 -+#define DPCON_VER_MINOR 2 -+ -+/* Command IDs */ -+#define DPCON_CMDID_CLOSE 0x800 -+#define DPCON_CMDID_OPEN 0x808 -+#define DPCON_CMDID_CREATE 0x908 -+#define DPCON_CMDID_DESTROY 0x900 -+ -+#define DPCON_CMDID_ENABLE 0x002 -+#define DPCON_CMDID_DISABLE 0x003 -+#define DPCON_CMDID_GET_ATTR 0x004 -+#define DPCON_CMDID_RESET 0x005 -+#define DPCON_CMDID_IS_ENABLED 0x006 -+ -+#define DPCON_CMDID_SET_IRQ 0x010 -+#define DPCON_CMDID_GET_IRQ 0x011 -+#define DPCON_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPCON_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPCON_CMDID_SET_IRQ_MASK 0x014 -+#define DPCON_CMDID_GET_IRQ_MASK 0x015 -+#define DPCON_CMDID_GET_IRQ_STATUS 0x016 -+#define DPCON_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPCON_CMDID_SET_NOTIFICATION 0x100 -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_OPEN(cmd, dpcon_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpcon_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_CREATE(cmd, cfg) \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->num_priorities) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_RSP_IS_ENABLED(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 1, int, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_SET_IRQ(cmd, irq_index, irq_cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_cfg->val);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_CMD_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_RSP_GET_IRQ(cmd, type, irq_cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_cfg->val);\ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_cfg->addr);\ -+ MC_RSP_OP(cmd, 2, 0, 32, int, irq_cfg->irq_num); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_RSP_GET_IRQ_ENABLE(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_GET_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_RSP_GET_ATTR(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->id);\ -+ MC_RSP_OP(cmd, 0, 32, 16, uint16_t, attr->qbman_ch_id);\ -+ MC_RSP_OP(cmd, 0, 48, 8, uint8_t, attr->num_priorities);\ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPCON_CMD_SET_NOTIFICATION(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->dpio_id);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, cfg->priority);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, cfg->user_ctx);\ -+} while (0) -+ -+#endif /* _FSL_DPCON_CMD_H */ -diff --git a/drivers/staging/fsl-mc/include/dpcon.h b/drivers/staging/fsl-mc/include/dpcon.h -new file mode 100644 -index 0000000..2555be5 ---- /dev/null -+++ b/drivers/staging/fsl-mc/include/dpcon.h -@@ -0,0 +1,407 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPCON_H -+#define __FSL_DPCON_H -+ -+/* Data Path Concentrator API -+ * Contains initialization APIs and runtime control APIs for DPCON -+ */ -+ -+struct fsl_mc_io; -+ -+/** General DPCON macros */ -+ -+/** -+ * Use it to disable notifications; see dpcon_set_notification() -+ */ -+#define DPCON_INVALID_DPIO_ID (int)(-1) -+ -+/** -+ * dpcon_open() - Open a control session for the specified object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @dpcon_id: DPCON unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpcon_create() function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int dpcon_id, -+ uint16_t *token); -+ -+/** -+ * dpcon_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpcon_cfg - Structure representing DPCON configuration -+ * @num_priorities: Number of priorities for the DPCON channel (1-8) -+ */ -+struct dpcon_cfg { -+ uint8_t num_priorities; -+}; -+ -+/** -+ * dpcon_create() - Create the DPCON object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPCON object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpcon_open() function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_create(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ const struct dpcon_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpcon_destroy() - Destroy the DPCON object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpcon_destroy(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpcon_enable() - Enable the DPCON -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * -+ * Return: '0' on Success; Error code otherwise -+ */ -+int dpcon_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpcon_disable() - Disable the DPCON -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * -+ * Return: '0' on Success; Error code otherwise -+ */ -+int dpcon_disable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * dpcon_is_enabled() - Check if the DPCON is enabled. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @en: Returns '1' if object is enabled; '0' otherwise -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_is_enabled(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *en); -+ -+/** -+ * dpcon_reset() - Reset the DPCON, returns the object to initial state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_reset(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * struct dpcon_irq_cfg - IRQ configuration -+ * @addr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dpcon_irq_cfg { -+ uint64_t addr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dpcon_set_irq() - Set IRQ information for the DPCON to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dpcon_irq_cfg *irq_cfg); -+ -+/** -+ * dpcon_get_irq() - Get IRQ information from the DPCON. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dpcon_irq_cfg *irq_cfg); -+ -+/** -+ * dpcon_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpcon_get_irq_enable() - Get overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpcon_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @irq_index: The interrupt index to configure -+ * @mask: Event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpcon_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpcon_get_irq_status() - Get the current status of any pending interrupts. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @irq_index: The interrupt index to configure -+ * @status: interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dpcon_clear_irq_status() - Clear a pending interrupt's status -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @irq_index: The interrupt index to configure -+ * @status: bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+ -+/** -+ * struct dpcon_attr - Structure representing DPCON attributes -+ * @id: DPCON object ID -+ * @version: DPCON version -+ * @qbman_ch_id: Channel ID to be used by dequeue operation -+ * @num_priorities: Number of priorities for the DPCON channel (1-8) -+ */ -+struct dpcon_attr { -+ int id; -+ /** -+ * struct version - DPCON version -+ * @major: DPCON major version -+ * @minor: DPCON minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+ uint16_t qbman_ch_id; -+ uint8_t num_priorities; -+}; -+ -+/** -+ * dpcon_get_attributes() - Retrieve DPCON attributes. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @attr: Object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpcon_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpcon_attr *attr); -+ -+/** -+ * struct dpcon_notification_cfg - Structure representing notification parameters -+ * @dpio_id: DPIO object ID; must be configured with a notification channel; -+ * to disable notifications set it to 'DPCON_INVALID_DPIO_ID'; -+ * @priority: Priority selection within the DPIO channel; valid values -+ * are 0-7, depending on the number of priorities in that channel -+ * @user_ctx: User context value provided with each CDAN message -+ */ -+struct dpcon_notification_cfg { -+ int dpio_id; -+ uint8_t priority; -+ uint64_t user_ctx; -+}; -+ -+/** -+ * dpcon_set_notification() - Set DPCON notification destination -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPCON object -+ * @cfg: Notification parameters -+ * -+ * Return: '0' on Success; Error code otherwise -+ */ -+int dpcon_set_notification(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dpcon_notification_cfg *cfg); -+ -+#endif /* __FSL_DPCON_H */ -diff --git a/drivers/staging/fsl-mc/include/dpmac-cmd.h b/drivers/staging/fsl-mc/include/dpmac-cmd.h -new file mode 100644 -index 0000000..c123aab ---- /dev/null -+++ b/drivers/staging/fsl-mc/include/dpmac-cmd.h -@@ -0,0 +1,192 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPMAC_CMD_H -+#define _FSL_DPMAC_CMD_H -+ -+/* DPMAC Version */ -+#define DPMAC_VER_MAJOR 3 -+#define DPMAC_VER_MINOR 0 -+ -+/* Command IDs */ -+#define DPMAC_CMDID_CLOSE 0x800 -+#define DPMAC_CMDID_OPEN 0x80c -+#define DPMAC_CMDID_CREATE 0x90c -+#define DPMAC_CMDID_DESTROY 0x900 -+ -+#define DPMAC_CMDID_GET_ATTR 0x004 -+#define DPMAC_CMDID_RESET 0x005 -+ -+#define DPMAC_CMDID_SET_IRQ 0x010 -+#define DPMAC_CMDID_GET_IRQ 0x011 -+#define DPMAC_CMDID_SET_IRQ_ENABLE 0x012 -+#define DPMAC_CMDID_GET_IRQ_ENABLE 0x013 -+#define DPMAC_CMDID_SET_IRQ_MASK 0x014 -+#define DPMAC_CMDID_GET_IRQ_MASK 0x015 -+#define DPMAC_CMDID_GET_IRQ_STATUS 0x016 -+#define DPMAC_CMDID_CLEAR_IRQ_STATUS 0x017 -+ -+#define DPMAC_CMDID_MDIO_READ 0x0c0 -+#define DPMAC_CMDID_MDIO_WRITE 0x0c1 -+#define DPMAC_CMDID_GET_LINK_CFG 0x0c2 -+#define DPMAC_CMDID_SET_LINK_STATE 0x0c3 -+#define DPMAC_CMDID_GET_COUNTER 0x0c4 -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_CREATE(cmd, cfg) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, cfg->mac_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_OPEN(cmd, dpmac_id) \ -+ MC_CMD_OP(cmd, 0, 0, 32, int, dpmac_id) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_SET_IRQ(cmd, irq_index, irq_addr, irq_val, user_irq_id) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, irq_index);\ -+ MC_CMD_OP(cmd, 0, 32, 32, uint32_t, irq_val);\ -+ MC_CMD_OP(cmd, 1, 0, 64, uint64_t, irq_addr); \ -+ MC_CMD_OP(cmd, 2, 0, 32, int, user_irq_id); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_GET_IRQ(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_IRQ(cmd, type, irq_addr, irq_val, user_irq_id) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, irq_val); \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, irq_addr); \ -+ MC_RSP_OP(cmd, 2, 0, 32, int, user_irq_id); \ -+ MC_RSP_OP(cmd, 2, 32, 32, int, type); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_SET_IRQ_ENABLE(cmd, irq_index, en) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, en); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_GET_IRQ_ENABLE(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_IRQ_ENABLE(cmd, en) \ -+ MC_RSP_OP(cmd, 0, 0, 8, uint8_t, en) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_SET_IRQ_MASK(cmd, irq_index, mask) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, mask);\ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_GET_IRQ_MASK(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_IRQ_MASK(cmd, mask) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, mask) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_GET_IRQ_STATUS(cmd, irq_index) \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_IRQ_STATUS(cmd, status) \ -+ MC_RSP_OP(cmd, 0, 0, 32, uint32_t, status) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_CLEAR_IRQ_STATUS(cmd, irq_index, status) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 32, uint32_t, status); \ -+ MC_CMD_OP(cmd, 0, 32, 8, uint8_t, irq_index);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_ATTRIBUTES(cmd, attr) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 32, int, attr->phy_id);\ -+ MC_RSP_OP(cmd, 0, 32, 32, int, attr->id);\ -+ MC_RSP_OP(cmd, 1, 0, 16, uint16_t, attr->version.major);\ -+ MC_RSP_OP(cmd, 1, 16, 16, uint16_t, attr->version.minor);\ -+ MC_RSP_OP(cmd, 1, 32, 8, enum dpmac_link_type, attr->link_type);\ -+ MC_RSP_OP(cmd, 1, 40, 8, enum dpmac_eth_if, attr->eth_if);\ -+ MC_RSP_OP(cmd, 2, 0, 32, uint32_t, attr->max_rate);\ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_MDIO_READ(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->phy_addr); \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->reg); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_MDIO_READ(cmd, data) \ -+ MC_RSP_OP(cmd, 0, 16, 16, uint16_t, data) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_MDIO_WRITE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 8, uint8_t, cfg->phy_addr); \ -+ MC_CMD_OP(cmd, 0, 8, 8, uint8_t, cfg->reg); \ -+ MC_CMD_OP(cmd, 0, 16, 16, uint16_t, cfg->data); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_LINK_CFG(cmd, cfg) \ -+do { \ -+ MC_RSP_OP(cmd, 0, 0, 64, uint64_t, cfg->options); \ -+ MC_RSP_OP(cmd, 1, 0, 32, uint32_t, cfg->rate); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_SET_LINK_STATE(cmd, cfg) \ -+do { \ -+ MC_CMD_OP(cmd, 0, 0, 64, uint64_t, cfg->options); \ -+ MC_CMD_OP(cmd, 1, 0, 32, uint32_t, cfg->rate); \ -+ MC_CMD_OP(cmd, 2, 0, 1, int, cfg->up); \ -+} while (0) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_CMD_GET_COUNTER(cmd, type) \ -+ MC_CMD_OP(cmd, 0, 0, 8, enum dpmac_counter, type) -+ -+/* cmd, param, offset, width, type, arg_name */ -+#define DPMAC_RSP_GET_COUNTER(cmd, counter) \ -+ MC_RSP_OP(cmd, 1, 0, 64, uint64_t, counter) -+ -+#endif /* _FSL_DPMAC_CMD_H */ -diff --git a/drivers/staging/fsl-mc/include/dpmac.h b/drivers/staging/fsl-mc/include/dpmac.h -new file mode 100644 -index 0000000..88091b5 ---- /dev/null -+++ b/drivers/staging/fsl-mc/include/dpmac.h -@@ -0,0 +1,528 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPMAC_H -+#define __FSL_DPMAC_H -+ -+/* Data Path MAC API -+ * Contains initialization APIs and runtime control APIs for DPMAC -+ */ -+ -+struct fsl_mc_io; -+ -+/** -+ * dpmac_open() - Open a control session for the specified object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @dpmac_id: DPMAC unique ID -+ * @token: Returned token; use in subsequent API calls -+ * -+ * This function can be used to open a control session for an -+ * already created object; an object may have been declared in -+ * the DPL or by calling the dpmac_create function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent commands for -+ * this specific object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_open(struct fsl_mc_io *mc_io, int dpmac_id, uint16_t *token); -+ -+/** -+ * dpmac_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @token: Token of DPMAC object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_close(struct fsl_mc_io *mc_io, uint16_t token); -+ -+/** -+ * enum dpmac_link_type - DPMAC link type -+ * @DPMAC_LINK_TYPE_NONE: No link -+ * @DPMAC_LINK_TYPE_FIXED: Link is fixed type -+ * @DPMAC_LINK_TYPE_PHY: Link by PHY ID -+ * @DPMAC_LINK_TYPE_BACKPLANE: Backplane link type -+ */ -+enum dpmac_link_type { -+ DPMAC_LINK_TYPE_NONE, -+ DPMAC_LINK_TYPE_FIXED, -+ DPMAC_LINK_TYPE_PHY, -+ DPMAC_LINK_TYPE_BACKPLANE -+}; -+ -+/** -+ * enum dpmac_eth_if - DPMAC Ethrnet interface -+ * @DPMAC_ETH_IF_MII: MII interface -+ * @DPMAC_ETH_IF_RMII: RMII interface -+ * @DPMAC_ETH_IF_SMII: SMII interface -+ * @DPMAC_ETH_IF_GMII: GMII interface -+ * @DPMAC_ETH_IF_RGMII: RGMII interface -+ * @DPMAC_ETH_IF_SGMII: SGMII interface -+ * @DPMAC_ETH_IF_XGMII: XGMII interface -+ * @DPMAC_ETH_IF_QSGMII: QSGMII interface -+ * @DPMAC_ETH_IF_XAUI: XAUI interface -+ * @DPMAC_ETH_IF_XFI: XFI interface -+ */ -+enum dpmac_eth_if { -+ DPMAC_ETH_IF_MII, -+ DPMAC_ETH_IF_RMII, -+ DPMAC_ETH_IF_SMII, -+ DPMAC_ETH_IF_GMII, -+ DPMAC_ETH_IF_RGMII, -+ DPMAC_ETH_IF_SGMII, -+ DPMAC_ETH_IF_XGMII, -+ DPMAC_ETH_IF_QSGMII, -+ DPMAC_ETH_IF_XAUI, -+ DPMAC_ETH_IF_XFI -+}; -+ -+/** -+ * struct dpmac_cfg() - Structure representing DPMAC configuration -+ * @mac_id: Represents the Hardware MAC ID; in case of multiple WRIOP, -+ * the MAC IDs are continuous. -+ * For example: 2 WRIOPs, 16 MACs in each: -+ * MAC IDs for the 1st WRIOP: 1-16, -+ * MAC IDs for the 2nd WRIOP: 17-32. -+ */ -+struct dpmac_cfg { -+ int mac_id; -+}; -+ -+/** -+ * dpmac_create() - Create the DPMAC object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cfg: Configuration structure -+ * @token: Returned token; use in subsequent API calls -+ * -+ * Create the DPMAC object, allocate required resources and -+ * perform required initialization. -+ * -+ * The object can be created either by declaring it in the -+ * DPL file, or by calling this function. -+ * This function returns a unique authentication token, -+ * associated with the specific object ID and the specific MC -+ * portal; this token must be used in all subsequent calls to -+ * this specific object. For objects that are created using the -+ * DPL file, call dpmac_open function to get an authentication -+ * token first. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_create(struct fsl_mc_io *mc_io, -+ const struct dpmac_cfg *cfg, -+ uint16_t *token); -+ -+/** -+ * dpmac_destroy() - Destroy the DPMAC object and release all its resources. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @token: Token of DPMAC object -+ * -+ * Return: '0' on Success; error code otherwise. -+ */ -+int dpmac_destroy(struct fsl_mc_io *mc_io, uint16_t token); -+ -+/* DPMAC IRQ Index and Events */ -+ -+/* IRQ index */ -+#define DPMAC_IRQ_INDEX 0 -+/* IRQ event - indicates a change in link state */ -+#define DPMAC_IRQ_EVENT_LINK_CFG_REQ 0x00000001 -+/* irq event - Indicates that the link state changed */ -+#define DPMAC_IRQ_EVENT_LINK_CHANGED 0x00000002 -+ -+/** -+ * dpmac_set_irq() - Set IRQ information for the DPMAC to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @token: Token of DPMAC object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_addr: Address that must be written to -+ * signal a message-based interrupt -+ * @irq_val: Value to write into irq_addr address -+ * @user_irq_id: A user defined number associated with this IRQ -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_set_irq(struct fsl_mc_io *mc_io, -+ uint16_t token, -+ uint8_t irq_index, -+ uint64_t irq_addr, -+ uint32_t irq_val, -+ int user_irq_id); -+ -+/** -+ * dpmac_get_irq() - Get IRQ information from the DPMAC. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_addr: Returned address that must be written to -+ * signal the message-based interrupt -+ * @irq_val: Value to write into irq_addr address -+ * @user_irq_id: A user defined number associated with this IRQ -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_get_irq(struct fsl_mc_io *mc_io, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ uint64_t *irq_addr, -+ uint32_t *irq_val, -+ int *user_irq_id); -+ -+/** -+ * dpmac_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dpmac_get_irq_enable() - Get overall interrupt state -+ * @mc_io: Pointer to MC portal's I/O object -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dpmac_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @mask: Event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting IRQ -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dpmac_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dpmac_get_irq_status() - Get the current status of any pending interrupts. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_get_irq_status(struct fsl_mc_io *mc_io, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dpmac_clear_irq_status() - Clear a pending interrupt's status -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @token: Token of DPMAC object -+ * @irq_index: The interrupt index to configure -+ * @status: Bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+ -+/** -+ * struct dpmac_attr - Structure representing DPMAC attributes -+ * @id: DPMAC object ID -+ * @phy_id: PHY ID -+ * @link_type: link type -+ * @eth_if: Ethernet interface -+ * @max_rate: Maximum supported rate - in Mbps -+ * @version: DPMAC version -+ */ -+struct dpmac_attr { -+ int id; -+ int phy_id; -+ enum dpmac_link_type link_type; -+ enum dpmac_eth_if eth_if; -+ uint32_t max_rate; -+ /** -+ * struct version - Structure representing DPMAC version -+ * @major: DPMAC major version -+ * @minor: DPMAC minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+}; -+ -+/** -+ * dpmac_get_attributes - Retrieve DPMAC attributes. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @token: Token of DPMAC object -+ * @attr: Returned object's attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_get_attributes(struct fsl_mc_io *mc_io, -+ uint16_t token, -+ struct dpmac_attr *attr); -+ -+/** -+ * struct dpmac_mdio_cfg - DPMAC MDIO read/write parameters -+ * @phy_addr: MDIO device address -+ * @reg: Address of the register within the Clause 45 PHY device from which data -+ * is to be read -+ * @data: Data read/write from/to MDIO -+ */ -+struct dpmac_mdio_cfg { -+ uint8_t phy_addr; -+ uint8_t reg; -+ uint16_t data; -+}; -+ -+/** -+ * dpmac_mdio_read() - Perform MDIO read transaction -+ * @mc_io: Pointer to opaque I/O object -+ * @token: Token of DPMAC object -+ * @cfg: Structure with MDIO transaction parameters -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_mdio_read(struct fsl_mc_io *mc_io, uint16_t token, -+ struct dpmac_mdio_cfg *cfg); -+ -+ -+/** -+ * dpmac_mdio_write() - Perform MDIO write transaction -+ * @mc_io: Pointer to opaque I/O object -+ * @token: Token of DPMAC object -+ * @cfg: Structure with MDIO transaction parameters -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_mdio_write(struct fsl_mc_io *mc_io, uint16_t token, -+ struct dpmac_mdio_cfg *cfg); -+ -+/* DPMAC link configuration/state options */ -+ -+/* Enable auto-negotiation */ -+#define DPMAC_LINK_OPT_AUTONEG 0x0000000000000001ULL -+/* Enable half-duplex mode */ -+#define DPMAC_LINK_OPT_HALF_DUPLEX 0x0000000000000002ULL -+/* Enable pause frames */ -+#define DPMAC_LINK_OPT_PAUSE 0x0000000000000004ULL -+/* Enable a-symmetric pause frames */ -+#define DPMAC_LINK_OPT_ASYM_PAUSE 0x0000000000000008ULL -+ -+/** -+ * struct dpmac_link_cfg - Structure representing DPMAC link configuration -+ * @rate: Link's rate - in Mbps -+ * @options: Enable/Disable DPMAC link cfg features (bitmap) -+ */ -+struct dpmac_link_cfg { -+ uint32_t rate; -+ uint64_t options; -+}; -+ -+/** -+ * dpmac_get_link_cfg() - Get Ethernet link configuration -+ * @mc_io: Pointer to opaque I/O object -+ * @token: Token of DPMAC object -+ * @cfg: Returned structure with the link configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_get_link_cfg(struct fsl_mc_io *mc_io, uint16_t token, -+ struct dpmac_link_cfg *cfg); -+ -+/** -+ * struct dpmac_link_state - DPMAC link configuration request -+ * @rate: Rate in Mbps -+ * @options: Enable/Disable DPMAC link cfg features (bitmap) -+ * @up: Link state -+ */ -+struct dpmac_link_state { -+ uint32_t rate; -+ uint64_t options; -+ int up; -+}; -+ -+/** -+ * dpmac_set_link_state() - Set the Ethernet link status -+ * @mc_io: Pointer to opaque I/O object -+ * @token: Token of DPMAC object -+ * @link_state: Link state configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmac_set_link_state(struct fsl_mc_io *mc_io, uint16_t token, -+ struct dpmac_link_state *link_state); -+ -+/** -+ * enum dpni_counter - DPNI counter types -+ * @DPMAC_CNT_ING_FRAME_64: counts 64-octet frame, good or bad. -+ * @DPMAC_CNT_ING_FRAME_127: counts 65- to 127-octet frame, good or bad. -+ * @DPMAC_CNT_ING_FRAME_255: counts 128- to 255-octet frame, good or bad. -+ * @DPMAC_CNT_ING_FRAME_511: counts 256- to 511-octet frame, good or bad. -+ * @DPMAC_CNT_ING_FRAME_1023: counts 512- to 1023-octet frame, good or bad. -+ * @DPMAC_CNT_ING_FRAME_1518: counts 1024- to 1518-octet frame, good or bad. -+ * @DPMAC_CNT_ING_FRAME_1519_MAX: counts 1519-octet frame and larger -+ * (up to max frame length specified), -+ * good or bad. -+ * @DPMAC_CNT_ING_FRAG: counts packet which is shorter than 64 octets received -+ * with a wrong CRC -+ * @DPMAC_CNT_ING_JABBER: counts packet longer than the maximum frame length -+ * specified, with a bad frame check sequence. -+ * @DPMAC_CNT_ING_FRAME_DISCARD: counts dropped packet due to internal errors. -+ * Occurs when a receive FIFO overflows. -+ * Includes also packets truncated as a result of -+ * the receive FIFO overflow. -+ * @DPMAC_CNT_ING_ALIGN_ERR: counts frame with an alignment error -+ * (optional used for wrong SFD) -+ * @DPMAC_CNT_EGR_UNDERSIZED: counts packet transmitted that was less than 64 -+ * octets long with a good CRC. -+ * @DPMAC_CNT_ING_OVERSIZED: counts packet longer than the maximum frame length -+ * specified, with a good frame check sequence. -+ * @DPMAC_CNT_ING_VALID_PAUSE_FRAME: counts valid pause frame (regular and PFC). -+ * @DPMAC_CNT_EGR_VALID_PAUSE_FRAME: counts valid pause frame transmitted -+ * (regular and PFC). -+ * @DPMAC_CNT_ING_BYTE: counts octet received except preamble for all valid -+ frames and valid pause frames. -+ * @DPMAC_CNT_ING_MCAST_FRAME: counts received multicast frame -+ * @DPMAC_CNT_ING_BCAST_FRAME: counts received broadcast frame -+ * @DPMAC_CNT_ING_ALL_FRAME: counts each good or bad packet received. -+ * @DPMAC_CNT_ING_UCAST_FRAME: counts received unicast frame -+ * @DPMAC_CNT_ING_ERR_FRAME: counts frame received with an error -+ * (except for undersized/fragment frame) -+ * @DPMAC_CNT_EGR_BYTE: counts octet transmitted except preamble for all valid -+ * frames and valid pause frames transmitted. -+ * @DPMAC_CNT_EGR_MCAST_FRAME: counts transmitted multicast frame -+ * @DPMAC_CNT_EGR_BCAST_FRAME: counts transmitted broadcast frame -+ * @DPMAC_CNT_EGR_UCAST_FRAME: counts transmitted unicast frame -+ * @DPMAC_CNT_EGR_ERR_FRAME: counts frame transmitted with an error -+ * @DPMAC_CNT_ING_GOOD_FRAME: counts frame received without error, including -+ * pause frames. -+ */ -+enum dpmac_counter { -+ DPMAC_CNT_ING_FRAME_64, -+ DPMAC_CNT_ING_FRAME_127, -+ DPMAC_CNT_ING_FRAME_255, -+ DPMAC_CNT_ING_FRAME_511, -+ DPMAC_CNT_ING_FRAME_1023, -+ DPMAC_CNT_ING_FRAME_1518, -+ DPMAC_CNT_ING_FRAME_1519_MAX, -+ DPMAC_CNT_ING_FRAG, -+ DPMAC_CNT_ING_JABBER, -+ DPMAC_CNT_ING_FRAME_DISCARD, -+ DPMAC_CNT_ING_ALIGN_ERR, -+ DPMAC_CNT_EGR_UNDERSIZED, -+ DPMAC_CNT_ING_OVERSIZED, -+ DPMAC_CNT_ING_VALID_PAUSE_FRAME, -+ DPMAC_CNT_EGR_VALID_PAUSE_FRAME, -+ DPMAC_CNT_ING_BYTE, -+ DPMAC_CNT_ING_MCAST_FRAME, -+ DPMAC_CNT_ING_BCAST_FRAME, -+ DPMAC_CNT_ING_ALL_FRAME, -+ DPMAC_CNT_ING_UCAST_FRAME, -+ DPMAC_CNT_ING_ERR_FRAME, -+ DPMAC_CNT_EGR_BYTE, -+ DPMAC_CNT_EGR_MCAST_FRAME, -+ DPMAC_CNT_EGR_BCAST_FRAME, -+ DPMAC_CNT_EGR_UCAST_FRAME, -+ DPMAC_CNT_EGR_ERR_FRAME, -+ DPMAC_CNT_ING_GOOD_FRAME -+}; -+ -+/** -+ * dpmac_get_counter() - Read a specific DPMAC counter -+ * @mc_io: Pointer to opaque I/O object -+ * @token: Token of DPMAC object -+ * @type: The requested counter -+ * @counter: Returned counter value -+ * -+ * Return: The requested counter; '0' otherwise. -+ */ -+int dpmac_get_counter(struct fsl_mc_io *mc_io, uint16_t token, -+ enum dpmac_counter type, -+ uint64_t *counter); -+ -+#endif /* __FSL_DPMAC_H */ -diff --git a/drivers/staging/fsl-mc/include/dpmng.h b/drivers/staging/fsl-mc/include/dpmng.h -new file mode 100644 -index 0000000..d1c4588 ---- /dev/null -+++ b/drivers/staging/fsl-mc/include/dpmng.h -@@ -0,0 +1,80 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPMNG_H -+#define __FSL_DPMNG_H -+ -+/* Management Complex General API -+ * Contains general API for the Management Complex firmware -+ */ -+ -+struct fsl_mc_io; -+ -+/** -+ * struct mc_version -+ * @major: Major version number: incremented on API compatibility changes -+ * @minor: Minor version number: incremented on API additions (that are -+ * backward compatible); reset when major version is incremented -+ * @revision: Internal revision number: incremented on implementation changes -+ * and/or bug fixes that have no impact on API -+ */ -+struct mc_version { -+ uint32_t major; -+ uint32_t minor; -+ uint32_t revision; -+}; -+ -+/** -+ * mc_get_version() - Retrieves the Management Complex firmware -+ * version information -+ * @mc_io: Pointer to opaque I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @mc_ver_info: Returned version information structure -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int mc_get_version(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ struct mc_version *mc_ver_info); -+ -+/** -+ * dpmng_get_container_id() - Get container ID associated with a given portal. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @container_id: Requested container ID -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dpmng_get_container_id(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int *container_id); -+ -+#endif /* __FSL_DPMNG_H */ -diff --git a/drivers/staging/fsl-mc/include/dprc.h b/drivers/staging/fsl-mc/include/dprc.h -new file mode 100644 -index 0000000..810ded0 ---- /dev/null -+++ b/drivers/staging/fsl-mc/include/dprc.h -@@ -0,0 +1,990 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef _FSL_DPRC_H -+#define _FSL_DPRC_H -+ -+#include "mc-cmd.h" -+ -+/* Data Path Resource Container API -+ * Contains DPRC API for managing and querying DPAA resources -+ */ -+ -+struct fsl_mc_io; -+ -+/** -+ * Set this value as the icid value in dprc_cfg structure when creating a -+ * container, in case the ICID is not selected by the user and should be -+ * allocated by the DPRC from the pool of ICIDs. -+ */ -+#define DPRC_GET_ICID_FROM_POOL (uint16_t)(~(0)) -+ -+/** -+ * Set this value as the portal_id value in dprc_cfg structure when creating a -+ * container, in case the portal ID is not specifically selected by the -+ * user and should be allocated by the DPRC from the pool of portal ids. -+ */ -+#define DPRC_GET_PORTAL_ID_FROM_POOL (int)(~(0)) -+ -+/** -+ * dprc_open() - Open DPRC object for use -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @container_id: Container ID to open -+ * @token: Returned token of DPRC object -+ * -+ * Return: '0' on Success; Error code otherwise. -+ * -+ * @warning Required before any operation on the object. -+ */ -+int dprc_open(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ int container_id, -+ uint16_t *token); -+ -+/** -+ * dprc_close() - Close the control session of the object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * -+ * After this function is called, no further operations are -+ * allowed on the object without opening a new control session. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_close(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token); -+ -+/** -+ * Container general options -+ * -+ * These options may be selected at container creation by the container creator -+ * and can be retrieved using dprc_get_attributes() -+ */ -+ -+/* Spawn Policy Option allowed - Indicates that the new container is allowed -+ * to spawn and have its own child containers. -+ */ -+#define DPRC_CFG_OPT_SPAWN_ALLOWED 0x00000001 -+ -+/* General Container allocation policy - Indicates that the new container is -+ * allowed to allocate requested resources from its parent container; if not -+ * set, the container is only allowed to use resources in its own pools; Note -+ * that this is a container's global policy, but the parent container may -+ * override it and set specific quota per resource type. -+ */ -+#define DPRC_CFG_OPT_ALLOC_ALLOWED 0x00000002 -+ -+/* Object initialization allowed - software context associated with this -+ * container is allowed to invoke object initialization operations. -+ */ -+#define DPRC_CFG_OPT_OBJ_CREATE_ALLOWED 0x00000004 -+ -+/* Topology change allowed - software context associated with this -+ * container is allowed to invoke topology operations, such as attach/detach -+ * of network objects. -+ */ -+#define DPRC_CFG_OPT_TOPOLOGY_CHANGES_ALLOWED 0x00000008 -+ -+/* AIOP - Indicates that container belongs to AIOP. */ -+#define DPRC_CFG_OPT_AIOP 0x00000020 -+ -+/* IRQ Config - Indicates that the container allowed to configure its IRQs. */ -+#define DPRC_CFG_OPT_IRQ_CFG_ALLOWED 0x00000040 -+ -+/** -+ * struct dprc_cfg - Container configuration options -+ * @icid: Container's ICID; if set to 'DPRC_GET_ICID_FROM_POOL', a free -+ * ICID value is allocated by the DPRC -+ * @portal_id: Portal ID; if set to 'DPRC_GET_PORTAL_ID_FROM_POOL', a free -+ * portal ID is allocated by the DPRC -+ * @options: Combination of 'DPRC_CFG_OPT_' options -+ * @label: Object's label -+ */ -+struct dprc_cfg { -+ uint16_t icid; -+ int portal_id; -+ uint64_t options; -+ char label[16]; -+}; -+ -+/** -+ * dprc_create_container() - Create child container -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @cfg: Child container configuration -+ * @child_container_id: Returned child container ID -+ * @child_portal_offset: Returned child portal offset from MC portal base -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_create_container(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dprc_cfg *cfg, -+ int *child_container_id, -+ uint64_t *child_portal_offset); -+ -+/** -+ * dprc_destroy_container() - Destroy child container. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @child_container_id: ID of the container to destroy -+ * -+ * This function terminates the child container, so following this call the -+ * child container ID becomes invalid. -+ * -+ * Notes: -+ * - All resources and objects of the destroyed container are returned to the -+ * parent container or destroyed if were created be the destroyed container. -+ * - This function destroy all the child containers of the specified -+ * container prior to destroying the container itself. -+ * -+ * warning: Only the parent container is allowed to destroy a child policy -+ * Container 0 can't be destroyed -+ * -+ * Return: '0' on Success; Error code otherwise. -+ * -+ */ -+int dprc_destroy_container(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int child_container_id); -+ -+/** -+ * dprc_reset_container - Reset child container. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @child_container_id: ID of the container to reset -+ * -+ * In case a software context crashes or becomes non-responsive, the parent -+ * may wish to reset its resources container before the software context is -+ * restarted. -+ * -+ * This routine informs all objects assigned to the child container that the -+ * container is being reset, so they may perform any cleanup operations that are -+ * needed. All objects handles that were owned by the child container shall be -+ * closed. -+ * -+ * Note that such request may be submitted even if the child software context -+ * has not crashed, but the resulting object cleanup operations will not be -+ * aware of that. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_reset_container(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int child_container_id); -+ -+/* IRQ */ -+ -+/* IRQ index */ -+#define DPRC_IRQ_INDEX 0 -+ -+/* Number of dprc's IRQs */ -+#define DPRC_NUM_OF_IRQS 1 -+ -+/* DPRC IRQ events */ -+ -+/* IRQ event - Indicates that a new object added to the container */ -+#define DPRC_IRQ_EVENT_OBJ_ADDED 0x00000001 -+ -+/* IRQ event - Indicates that an object was removed from the container */ -+#define DPRC_IRQ_EVENT_OBJ_REMOVED 0x00000002 -+ -+/* IRQ event - Indicates that resources added to the container */ -+#define DPRC_IRQ_EVENT_RES_ADDED 0x00000004 -+ -+/* IRQ event - Indicates that resources removed from the container */ -+#define DPRC_IRQ_EVENT_RES_REMOVED 0x00000008 -+ -+/* IRQ event - Indicates that one of the descendant containers that opened by -+ * this container is destroyed -+ */ -+#define DPRC_IRQ_EVENT_CONTAINER_DESTROYED 0x00000010 -+ -+/* IRQ event - Indicates that on one of the container's opened object is -+ * destroyed -+ */ -+#define DPRC_IRQ_EVENT_OBJ_DESTROYED 0x00000020 -+ -+/* Irq event - Indicates that object is created at the container */ -+#define DPRC_IRQ_EVENT_OBJ_CREATED 0x00000040 -+ -+/** -+ * struct dprc_irq_cfg - IRQ configuration -+ * @paddr: Address that must be written to signal a message-based interrupt -+ * @val: Value to write into irq_addr address -+ * @irq_num: A user defined number associated with this IRQ -+ */ -+struct dprc_irq_cfg { -+ uint64_t paddr; -+ uint32_t val; -+ int irq_num; -+}; -+ -+/** -+ * dprc_set_irq() - Set IRQ information for the DPRC to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @irq_index: Identifies the interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_set_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ struct dprc_irq_cfg *irq_cfg); -+ -+/** -+ * dprc_get_irq() - Get IRQ information from the DPRC. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ int *type, -+ struct dprc_irq_cfg *irq_cfg); -+ -+/** -+ * dprc_set_irq_enable() - Set overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @irq_index: The interrupt index to configure -+ * @en: Interrupt state - enable = 1, disable = 0 -+ * -+ * Allows GPP software to control when interrupts are generated. -+ * Each interrupt can have up to 32 causes. The enable/disable control's the -+ * overall interrupt state. if the interrupt is disabled no causes will cause -+ * an interrupt. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_set_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t en); -+ -+/** -+ * dprc_get_irq_enable() - Get overall interrupt state. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @irq_index: The interrupt index to configure -+ * @en: Returned interrupt state - enable = 1, disable = 0 -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_irq_enable(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint8_t *en); -+ -+/** -+ * dprc_set_irq_mask() - Set interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @irq_index: The interrupt index to configure -+ * @mask: event mask to trigger interrupt; -+ * each bit: -+ * 0 = ignore event -+ * 1 = consider event for asserting irq -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_set_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t mask); -+ -+/** -+ * dprc_get_irq_mask() - Get interrupt mask. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @irq_index: The interrupt index to configure -+ * @mask: Returned event mask to trigger interrupt -+ * -+ * Every interrupt can have up to 32 causes and the interrupt model supports -+ * masking/unmasking each cause independently -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_irq_mask(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *mask); -+ -+/** -+ * dprc_get_irq_status() - Get the current status of any pending interrupts. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @irq_index: The interrupt index to configure -+ * @status: Returned interrupts status - one bit per cause: -+ * 0 = no interrupt pending -+ * 1 = interrupt pending -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t *status); -+ -+/** -+ * dprc_clear_irq_status() - Clear a pending interrupt's status -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @irq_index: The interrupt index to configure -+ * @status: bits to clear (W1C) - one bit per cause: -+ * 0 = don't change -+ * 1 = clear status bit -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_clear_irq_status(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ uint8_t irq_index, -+ uint32_t status); -+ -+/** -+ * struct dprc_attributes - Container attributes -+ * @container_id: Container's ID -+ * @icid: Container's ICID -+ * @portal_id: Container's portal ID -+ * @options: Container's options as set at container's creation -+ * @version: DPRC version -+ */ -+struct dprc_attributes { -+ int container_id; -+ uint16_t icid; -+ int portal_id; -+ uint64_t options; -+ /** -+ * struct version - DPRC version -+ * @major: DPRC major version -+ * @minor: DPRC minor version -+ */ -+ struct { -+ uint16_t major; -+ uint16_t minor; -+ } version; -+}; -+ -+/** -+ * dprc_get_attributes() - Obtains container attributes -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @attributes: Returned container attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_attributes(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ struct dprc_attributes *attributes); -+ -+/** -+ * dprc_set_res_quota() - Set allocation policy for a specific resource/object -+ * type in a child container -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @child_container_id: ID of the child container -+ * @type: Resource/object type -+ * @quota: Sets the maximum number of resources of the selected type -+ * that the child container is allowed to allocate from its parent; -+ * when quota is set to -1, the policy is the same as container's -+ * general policy. -+ * -+ * Allocation policy determines whether or not a container may allocate -+ * resources from its parent. Each container has a 'global' allocation policy -+ * that is set when the container is created. -+ * -+ * This function sets allocation policy for a specific resource type. -+ * The default policy for all resource types matches the container's 'global' -+ * allocation policy. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ * -+ * @warning Only the parent container is allowed to change a child policy. -+ */ -+int dprc_set_res_quota(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int child_container_id, -+ char *type, -+ uint16_t quota); -+ -+/** -+ * dprc_get_res_quota() - Gets the allocation policy of a specific -+ * resource/object type in a child container -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @child_container_id: ID of the child container -+ * @type: resource/object type -+ * @quota: Returnes the maximum number of resources of the selected type -+ * that the child container is allowed to allocate from the parent; -+ * when quota is set to -1, the policy is the same as container's -+ * general policy. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_res_quota(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int child_container_id, -+ char *type, -+ uint16_t *quota); -+ -+/* Resource request options */ -+ -+/* Explicit resource ID request - The requested objects/resources -+ * are explicit and sequential (in case of resources). -+ * The base ID is given at res_req at base_align field -+ */ -+#define DPRC_RES_REQ_OPT_EXPLICIT 0x00000001 -+ -+/* Aligned resources request - Relevant only for resources -+ * request (and not objects). Indicates that resources base ID should be -+ * sequential and aligned to the value given at dprc_res_req base_align field -+ */ -+#define DPRC_RES_REQ_OPT_ALIGNED 0x00000002 -+ -+/* Plugged Flag - Relevant only for object assignment request. -+ * Indicates that after all objects assigned. An interrupt will be invoked at -+ * the relevant GPP. The assigned object will be marked as plugged. -+ * plugged objects can't be assigned from their container -+ */ -+#define DPRC_RES_REQ_OPT_PLUGGED 0x00000004 -+ -+/** -+ * struct dprc_res_req - Resource request descriptor, to be used in assignment -+ * or un-assignment of resources and objects. -+ * @type: Resource/object type: Represent as a NULL terminated string. -+ * This string may received by using dprc_get_pool() to get resource -+ * type and dprc_get_obj() to get object type; -+ * Note: it is not possible to assign/un-assign DPRC objects -+ * @num: Number of resources -+ * @options: Request options: combination of DPRC_RES_REQ_OPT_ options -+ * @id_base_align: In case of explicit assignment (DPRC_RES_REQ_OPT_EXPLICIT -+ * is set at option), this field represents the required base ID -+ * for resource allocation; In case of aligned assignment -+ * (DPRC_RES_REQ_OPT_ALIGNED is set at option), this field -+ * indicates the required alignment for the resource ID(s) - -+ * use 0 if there is no alignment or explicit ID requirements -+ */ -+struct dprc_res_req { -+ char type[16]; -+ uint32_t num; -+ uint32_t options; -+ int id_base_align; -+}; -+ -+/** -+ * dprc_assign() - Assigns objects or resource to a child container. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @container_id: ID of the child container -+ * @res_req: Describes the type and amount of resources to -+ * assign to the given container -+ * -+ * Assignment is usually done by a parent (this DPRC) to one of its child -+ * containers. -+ * -+ * According to the DPRC allocation policy, the assigned resources may be taken -+ * (allocated) from the container's ancestors, if not enough resources are -+ * available in the container itself. -+ * -+ * The type of assignment depends on the dprc_res_req options, as follows: -+ * - DPRC_RES_REQ_OPT_EXPLICIT: indicates that assigned resources should have -+ * the explicit base ID specified at the id_base_align field of res_req. -+ * - DPRC_RES_REQ_OPT_ALIGNED: indicates that the assigned resources should be -+ * aligned to the value given at id_base_align field of res_req. -+ * - DPRC_RES_REQ_OPT_PLUGGED: Relevant only for object assignment, -+ * and indicates that the object must be set to the plugged state. -+ * -+ * A container may use this function with its own ID in order to change a -+ * object state to plugged or unplugged. -+ * -+ * If IRQ information has been set in the child DPRC, it will signal an -+ * interrupt following every change in its object assignment. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_assign(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int container_id, -+ struct dprc_res_req *res_req); -+ -+/** -+ * dprc_unassign() - Un-assigns objects or resources from a child container -+ * and moves them into this (parent) DPRC. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @child_container_id: ID of the child container -+ * @res_req: Describes the type and amount of resources to un-assign from -+ * the child container -+ * -+ * Un-assignment of objects can succeed only if the object is not in the -+ * plugged or opened state. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_unassign(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int child_container_id, -+ struct dprc_res_req *res_req); -+ -+/** -+ * dprc_get_pool_count() - Get the number of dprc's pools -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @mc_io: Pointer to MC portal's I/O object -+ * @token: Token of DPRC object -+ * @pool_count: Returned number of resource pools in the dprc -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_pool_count(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *pool_count); -+ -+/** -+ * dprc_get_pool() - Get the type (string) of a certain dprc's pool -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @pool_index: Index of the pool to be queried (< pool_count) -+ * @type: The type of the pool -+ * -+ * The pool types retrieved one by one by incrementing -+ * pool_index up to (not including) the value of pool_count returned -+ * from dprc_get_pool_count(). dprc_get_pool_count() must -+ * be called prior to dprc_get_pool(). -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_pool(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int pool_index, -+ char *type); -+ -+/** -+ * dprc_get_obj_count() - Obtains the number of objects in the DPRC -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @obj_count: Number of objects assigned to the DPRC -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_obj_count(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int *obj_count); -+ -+/* Objects Attributes Flags */ -+ -+/* Opened state - Indicates that an object is open by at least one owner */ -+#define DPRC_OBJ_STATE_OPEN 0x00000001 -+/* Plugged state - Indicates that the object is plugged */ -+#define DPRC_OBJ_STATE_PLUGGED 0x00000002 -+ -+/** -+ * Shareability flag - Object flag indicating no memory shareability. -+ * the object generates memory accesses that are non coherent with other -+ * masters; -+ * user is responsible for proper memory handling through IOMMU configuration. -+ */ -+#define DPRC_OBJ_FLAG_NO_MEM_SHAREABILITY 0x0001 -+ -+/** -+ * struct dprc_obj_desc - Object descriptor, returned from dprc_get_obj() -+ * @type: Type of object: NULL terminated string -+ * @id: ID of logical object resource -+ * @vendor: Object vendor identifier -+ * @ver_major: Major version number -+ * @ver_minor: Minor version number -+ * @irq_count: Number of interrupts supported by the object -+ * @region_count: Number of mappable regions supported by the object -+ * @state: Object state: combination of DPRC_OBJ_STATE_ states -+ * @label: Object label -+ * @flags: Object's flags -+ */ -+struct dprc_obj_desc { -+ char type[16]; -+ int id; -+ uint16_t vendor; -+ uint16_t ver_major; -+ uint16_t ver_minor; -+ uint8_t irq_count; -+ uint8_t region_count; -+ uint32_t state; -+ char label[16]; -+ uint16_t flags; -+}; -+ -+/** -+ * dprc_get_obj() - Get general information on an object -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @obj_index: Index of the object to be queried (< obj_count) -+ * @obj_desc: Returns the requested object descriptor -+ * -+ * The object descriptors are retrieved one by one by incrementing -+ * obj_index up to (not including) the value of obj_count returned -+ * from dprc_get_obj_count(). dprc_get_obj_count() must -+ * be called prior to dprc_get_obj(). -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_obj(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ int obj_index, -+ struct dprc_obj_desc *obj_desc); -+ -+/** -+ * dprc_get_obj_desc() - Get object descriptor. -+ * -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @obj_type: The type of the object to get its descriptor. -+ * @obj_id: The id of the object to get its descriptor -+ * @obj_desc: The returned descriptor to fill and return to the user -+ * -+ * Return: '0' on Success; Error code otherwise. -+ * -+ */ -+int dprc_get_obj_desc(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *obj_type, -+ int obj_id, -+ struct dprc_obj_desc *obj_desc); -+ -+/** -+ * dprc_set_obj_irq() - Set IRQ information for object to trigger an interrupt. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @obj_type: Type of the object to set its IRQ -+ * @obj_id: ID of the object to set its IRQ -+ * @irq_index: The interrupt index to configure -+ * @irq_cfg: IRQ configuration -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_set_obj_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *obj_type, -+ int obj_id, -+ uint8_t irq_index, -+ struct dprc_irq_cfg *irq_cfg); -+ -+/** -+ * dprc_get_obj_irq() - Get IRQ information from object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @obj_type: Type od the object to get its IRQ -+ * @obj_id: ID of the object to get its IRQ -+ * @irq_index: The interrupt index to configure -+ * @type: Interrupt type: 0 represents message interrupt -+ * type (both irq_addr and irq_val are valid) -+ * @irq_cfg: The returned IRQ attributes -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_obj_irq(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *obj_type, -+ int obj_id, -+ uint8_t irq_index, -+ int *type, -+ struct dprc_irq_cfg *irq_cfg); -+ -+/** -+ * dprc_get_res_count() - Obtains the number of free resources that are assigned -+ * to this container, by pool type -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @type: pool type -+ * @res_count: Returned number of free resources of the given -+ * resource type that are assigned to this DPRC -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_res_count(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *type, -+ int *res_count); -+ -+/** -+ * enum dprc_iter_status - Iteration status -+ * @DPRC_ITER_STATUS_FIRST: Perform first iteration -+ * @DPRC_ITER_STATUS_MORE: Indicates more/next iteration is needed -+ * @DPRC_ITER_STATUS_LAST: Indicates last iteration -+ */ -+enum dprc_iter_status { -+ DPRC_ITER_STATUS_FIRST = 0, -+ DPRC_ITER_STATUS_MORE = 1, -+ DPRC_ITER_STATUS_LAST = 2 -+}; -+ -+/** -+ * struct dprc_res_ids_range_desc - Resource ID range descriptor -+ * @base_id: Base resource ID of this range -+ * @last_id: Last resource ID of this range -+ * @iter_status: Iteration status - should be set to DPRC_ITER_STATUS_FIRST at -+ * first iteration; while the returned marker is DPRC_ITER_STATUS_MORE, -+ * additional iterations are needed, until the returned marker is -+ * DPRC_ITER_STATUS_LAST -+ */ -+struct dprc_res_ids_range_desc { -+ int base_id; -+ int last_id; -+ enum dprc_iter_status iter_status; -+}; -+ -+/** -+ * dprc_get_res_ids() - Obtains IDs of free resources in the container -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @type: pool type -+ * @range_desc: range descriptor -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_res_ids(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *type, -+ struct dprc_res_ids_range_desc *range_desc); -+ -+/* Region flags */ -+/* Cacheable - Indicates that region should be mapped as cacheable */ -+#define DPRC_REGION_CACHEABLE 0x00000001 -+ -+/** -+ * enum dprc_region_type - Region type -+ * @DPRC_REGION_TYPE_MC_PORTAL: MC portal region -+ * @DPRC_REGION_TYPE_QBMAN_PORTAL: Qbman portal region -+ */ -+enum dprc_region_type { -+ DPRC_REGION_TYPE_MC_PORTAL, -+ DPRC_REGION_TYPE_QBMAN_PORTAL -+}; -+ -+/** -+ * struct dprc_region_desc - Mappable region descriptor -+ * @base_offset: Region offset from region's base address. -+ * For DPMCP and DPRC objects, region base is offset from SoC MC portals -+ * base address; For DPIO, region base is offset from SoC QMan portals -+ * base address -+ * @size: Region size (in bytes) -+ * @flags: Region attributes -+ * @type: Portal region type -+ */ -+struct dprc_region_desc { -+ uint32_t base_offset; -+ uint32_t size; -+ uint32_t flags; -+ enum dprc_region_type type; -+}; -+ -+/** -+ * dprc_get_obj_region() - Get region information for a specified object. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @obj_type: Object type as returned in dprc_get_obj() -+ * @obj_id: Unique object instance as returned in dprc_get_obj() -+ * @region_index: The specific region to query -+ * @region_desc: Returns the requested region descriptor -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_get_obj_region(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *obj_type, -+ int obj_id, -+ uint8_t region_index, -+ struct dprc_region_desc *region_desc); -+ -+/** -+ * dprc_set_obj_label() - Set object label. -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @obj_type: Object's type -+ * @obj_id: Object's ID -+ * @label: The required label. The maximum length is 16 chars. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_set_obj_label(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ char *obj_type, -+ int obj_id, -+ char *label); -+ -+/** -+ * struct dprc_endpoint - Endpoint description for link connect/disconnect -+ * operations -+ * @type: Endpoint object type: NULL terminated string -+ * @id: Endpoint object ID -+ * @if_id: Interface ID; should be set for endpoints with multiple -+ * interfaces ("dpsw", "dpdmux"); for others, always set to 0 -+ */ -+struct dprc_endpoint { -+ char type[16]; -+ int id; -+ int if_id; -+}; -+ -+/** -+ * struct dprc_connection_cfg - Connection configuration. -+ * Used for virtual connections only -+ * @committed_rate: Committed rate (Mbits/s) -+ * @max_rate: Maximum rate (Mbits/s) -+ */ -+struct dprc_connection_cfg { -+ uint32_t committed_rate; -+ uint32_t max_rate; -+}; -+ -+/** -+ * dprc_connect() - Connect two endpoints to create a network link between them -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @endpoint1: Endpoint 1 configuration parameters -+ * @endpoint2: Endpoint 2 configuration parameters -+ * @cfg: Connection configuration. The connection configuration is ignored for -+ * connections made to DPMAC objects, where rate is retrieved from the -+ * MAC configuration. -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_connect(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dprc_endpoint *endpoint1, -+ const struct dprc_endpoint *endpoint2, -+ const struct dprc_connection_cfg *cfg); -+ -+/** -+ * dprc_disconnect() - Disconnect one endpoint to remove its network connection -+ * @mc_io: Pointer to MC portal's I/O object -+ * @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+ * @token: Token of DPRC object -+ * @endpoint: Endpoint configuration parameters -+ * -+ * Return: '0' on Success; Error code otherwise. -+ */ -+int dprc_disconnect(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dprc_endpoint *endpoint); -+ -+/** -+* dprc_get_connection() - Get connected endpoint and link status if connection -+* exists. -+* @mc_io: Pointer to MC portal's I/O object -+* @cmd_flags: Command flags; one or more of 'MC_CMD_FLAG_' -+* @token: Token of DPRC object -+* @endpoint1: Endpoint 1 configuration parameters -+* @endpoint2: Returned endpoint 2 configuration parameters -+* @state: Returned link state: -+* 1 - link is up; -+* 0 - link is down; -+* -1 - no connection (endpoint2 information is irrelevant) -+* -+* Return: '0' on Success; -ENAVAIL if connection does not exist. -+*/ -+int dprc_get_connection(struct fsl_mc_io *mc_io, -+ uint32_t cmd_flags, -+ uint16_t token, -+ const struct dprc_endpoint *endpoint1, -+ struct dprc_endpoint *endpoint2, -+ int *state); -+ -+#endif /* _FSL_DPRC_H */ -+ -diff --git a/drivers/staging/fsl-mc/include/fsl_dpaa2_fd.h b/drivers/staging/fsl-mc/include/fsl_dpaa2_fd.h -new file mode 100644 -index 0000000..3e9af59 ---- /dev/null -+++ b/drivers/staging/fsl-mc/include/fsl_dpaa2_fd.h -@@ -0,0 +1,774 @@ -+/* Copyright 2014 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPAA2_FD_H -+#define __FSL_DPAA2_FD_H -+ -+/** -+ * DOC: DPAA2 FD - Frame Descriptor APIs for DPAA2 -+ * -+ * Frame Descriptors (FDs) are used to describe frame data in the DPAA2. -+ * Frames can be enqueued and dequeued to Frame Queues which are consumed -+ * by the various DPAA accelerators (WRIOP, SEC, PME, DCE) -+ * -+ * There are three types of frames: Single, Scatter Gather and Frame Lists. -+ * -+ * The set of APIs in this file must be used to create, manipulate and -+ * query Frame Descriptor. -+ * -+ */ -+ -+/** -+ * struct dpaa2_fd - Place-holder for FDs. -+ * @words: for easier/faster copying the whole FD structure. -+ * @addr_lo: the lower 32 bits of the address in FD. -+ * @addr_hi: the upper 32 bits of the address in FD. -+ * @len: the length field in FD. -+ * @bpid_offset: represent the bpid and offset fields in FD -+ * @frc: frame context -+ * @ctrl: the 32bit control bits including dd, sc,... va, err. -+ * @flc_lo: the lower 32bit of flow context. -+ * @flc_hi: the upper 32bits of flow context. -+ * -+ * This structure represents the basic Frame Descriptor used in the system. -+ * We represent it via the simplest form that we need for now. Different -+ * overlays may be needed to support different options, etc. (It is impractical -+ * to define One True Struct, because the resulting encoding routines (lots of -+ * read-modify-writes) would be worst-case performance whether or not -+ * circumstances required them.) -+ */ -+struct dpaa2_fd { -+ union { -+ u32 words[8]; -+ struct dpaa2_fd_simple { -+ u32 addr_lo; -+ u32 addr_hi; -+ u32 len; -+ /* offset in the MS 16 bits, BPID in the LS 16 bits */ -+ u32 bpid_offset; -+ u32 frc; /* frame context */ -+ /* "err", "va", "cbmt", "asal", [...] */ -+ u32 ctrl; -+ /* flow context */ -+ u32 flc_lo; -+ u32 flc_hi; -+ } simple; -+ }; -+}; -+ -+enum dpaa2_fd_format { -+ dpaa2_fd_single = 0, -+ dpaa2_fd_list, -+ dpaa2_fd_sg -+}; -+ -+/* Accessors for SG entry fields -+ * -+ * These setters and getters assume little endian format. For converting -+ * between LE and cpu endianness, the specific conversion functions must be -+ * called before the SGE contents are accessed by the core (on Rx), -+ * respectively before the SG table is sent to hardware (on Tx) -+ */ -+ -+/** -+ * dpaa2_fd_get_addr() - get the addr field of frame descriptor -+ * @fd: the given frame descriptor. -+ * -+ * Return the address in the frame descriptor. -+ */ -+static inline dma_addr_t dpaa2_fd_get_addr(const struct dpaa2_fd *fd) -+{ -+ return (dma_addr_t)((((uint64_t)fd->simple.addr_hi) << 32) -+ + fd->simple.addr_lo); -+} -+ -+/** -+ * dpaa2_fd_set_addr() - Set the addr field of frame descriptor -+ * @fd: the given frame descriptor. -+ * @addr: the address needs to be set in frame descriptor. -+ */ -+static inline void dpaa2_fd_set_addr(struct dpaa2_fd *fd, dma_addr_t addr) -+{ -+ fd->simple.addr_hi = upper_32_bits(addr); -+ fd->simple.addr_lo = lower_32_bits(addr); -+} -+ -+/** -+ * dpaa2_fd_get_frc() - Get the frame context in the frame descriptor -+ * @fd: the given frame descriptor. -+ * -+ * Return the frame context field in the frame descriptor. -+ */ -+static inline u32 dpaa2_fd_get_frc(const struct dpaa2_fd *fd) -+{ -+ return fd->simple.frc; -+} -+ -+/** -+ * dpaa2_fd_set_frc() - Set the frame context in the frame descriptor -+ * @fd: the given frame descriptor. -+ * @frc: the frame context needs to be set in frame descriptor. -+ */ -+static inline void dpaa2_fd_set_frc(struct dpaa2_fd *fd, u32 frc) -+{ -+ fd->simple.frc = frc; -+} -+ -+/** -+ * dpaa2_fd_get_flc() - Get the flow context in the frame descriptor -+ * @fd: the given frame descriptor. -+ * -+ * Return the flow context in the frame descriptor. -+ */ -+static inline dma_addr_t dpaa2_fd_get_flc(const struct dpaa2_fd *fd) -+{ -+ return (dma_addr_t)((((uint64_t)fd->simple.flc_hi) << 32) + -+ fd->simple.flc_lo); -+} -+ -+/** -+ * dpaa2_fd_set_flc() - Set the flow context field of frame descriptor -+ * @fd: the given frame descriptor. -+ * @flc_addr: the flow context needs to be set in frame descriptor. -+ */ -+static inline void dpaa2_fd_set_flc(struct dpaa2_fd *fd, dma_addr_t flc_addr) -+{ -+ fd->simple.flc_hi = upper_32_bits(flc_addr); -+ fd->simple.flc_lo = lower_32_bits(flc_addr); -+} -+ -+/** -+ * dpaa2_fd_get_len() - Get the length in the frame descriptor -+ * @fd: the given frame descriptor. -+ * -+ * Return the length field in the frame descriptor. -+ */ -+static inline u32 dpaa2_fd_get_len(const struct dpaa2_fd *fd) -+{ -+ return fd->simple.len; -+} -+ -+/** -+ * dpaa2_fd_set_len() - Set the length field of frame descriptor -+ * @fd: the given frame descriptor. -+ * @len: the length needs to be set in frame descriptor. -+ */ -+static inline void dpaa2_fd_set_len(struct dpaa2_fd *fd, u32 len) -+{ -+ fd->simple.len = len; -+} -+ -+/** -+ * dpaa2_fd_get_offset() - Get the offset field in the frame descriptor -+ * @fd: the given frame descriptor. -+ * -+ * Return the offset. -+ */ -+static inline uint16_t dpaa2_fd_get_offset(const struct dpaa2_fd *fd) -+{ -+ return (uint16_t)(fd->simple.bpid_offset >> 16) & 0x0FFF; -+} -+ -+/** -+ * dpaa2_fd_set_offset() - Set the offset field of frame descriptor -+ * -+ * @fd: the given frame descriptor. -+ * @offset: the offset needs to be set in frame descriptor. -+ */ -+static inline void dpaa2_fd_set_offset(struct dpaa2_fd *fd, uint16_t offset) -+{ -+ fd->simple.bpid_offset &= 0xF000FFFF; -+ fd->simple.bpid_offset |= (u32)offset << 16; -+} -+ -+/** -+ * dpaa2_fd_get_format() - Get the format field in the frame descriptor -+ * @fd: the given frame descriptor. -+ * -+ * Return the format. -+ */ -+static inline enum dpaa2_fd_format dpaa2_fd_get_format( -+ const struct dpaa2_fd *fd) -+{ -+ return (enum dpaa2_fd_format)((fd->simple.bpid_offset >> 28) & 0x3); -+} -+ -+/** -+ * dpaa2_fd_set_format() - Set the format field of frame descriptor -+ * -+ * @fd: the given frame descriptor. -+ * @format: the format needs to be set in frame descriptor. -+ */ -+static inline void dpaa2_fd_set_format(struct dpaa2_fd *fd, -+ enum dpaa2_fd_format format) -+{ -+ fd->simple.bpid_offset &= 0xCFFFFFFF; -+ fd->simple.bpid_offset |= (u32)format << 28; -+} -+ -+/** -+ * dpaa2_fd_get_bpid() - Get the bpid field in the frame descriptor -+ * @fd: the given frame descriptor. -+ * -+ * Return the bpid. -+ */ -+static inline uint16_t dpaa2_fd_get_bpid(const struct dpaa2_fd *fd) -+{ -+ return (uint16_t)(fd->simple.bpid_offset & 0xFFFF); -+} -+ -+/** -+ * dpaa2_fd_set_bpid() - Set the bpid field of frame descriptor -+ * -+ * @fd: the given frame descriptor. -+ * @bpid: the bpid needs to be set in frame descriptor. -+ */ -+static inline void dpaa2_fd_set_bpid(struct dpaa2_fd *fd, uint16_t bpid) -+{ -+ fd->simple.bpid_offset &= 0xFFFF0000; -+ fd->simple.bpid_offset |= (u32)bpid; -+} -+ -+/** -+ * struct dpaa2_sg_entry - the scatter-gathering structure -+ * @addr_lo: the lower 32bit of address -+ * @addr_hi: the upper 32bit of address -+ * @len: the length in this sg entry. -+ * @bpid_offset: offset in the MS 16 bits, BPID in the LS 16 bits. -+ */ -+struct dpaa2_sg_entry { -+ u32 addr_lo; -+ u32 addr_hi; -+ u32 len; -+ u32 bpid_offset; -+}; -+ -+enum dpaa2_sg_format { -+ dpaa2_sg_single = 0, -+ dpaa2_sg_frame_data, -+ dpaa2_sg_sgt_ext -+}; -+ -+/** -+ * dpaa2_sg_get_addr() - Get the address from SG entry -+ * @sg: the given scatter-gathering object. -+ * -+ * Return the address. -+ */ -+static inline dma_addr_t dpaa2_sg_get_addr(const struct dpaa2_sg_entry *sg) -+{ -+ return (dma_addr_t)((((u64)sg->addr_hi) << 32) + sg->addr_lo); -+} -+ -+/** -+ * dpaa2_sg_set_addr() - Set the address in SG entry -+ * @sg: the given scatter-gathering object. -+ * @addr: the address to be set. -+ */ -+static inline void dpaa2_sg_set_addr(struct dpaa2_sg_entry *sg, dma_addr_t addr) -+{ -+ sg->addr_hi = upper_32_bits(addr); -+ sg->addr_lo = lower_32_bits(addr); -+} -+ -+ -+static inline bool dpaa2_sg_short_len(const struct dpaa2_sg_entry *sg) -+{ -+ return (sg->bpid_offset >> 30) & 0x1; -+} -+ -+/** -+ * dpaa2_sg_get_len() - Get the length in SG entry -+ * @sg: the given scatter-gathering object. -+ * -+ * Return the length. -+ */ -+static inline u32 dpaa2_sg_get_len(const struct dpaa2_sg_entry *sg) -+{ -+ if (dpaa2_sg_short_len(sg)) -+ return sg->len & 0x1FFFF; -+ return sg->len; -+} -+ -+/** -+ * dpaa2_sg_set_len() - Set the length in SG entry -+ * @sg: the given scatter-gathering object. -+ * @len: the length to be set. -+ */ -+static inline void dpaa2_sg_set_len(struct dpaa2_sg_entry *sg, u32 len) -+{ -+ sg->len = len; -+} -+ -+/** -+ * dpaa2_sg_get_offset() - Get the offset in SG entry -+ * @sg: the given scatter-gathering object. -+ * -+ * Return the offset. -+ */ -+static inline u16 dpaa2_sg_get_offset(const struct dpaa2_sg_entry *sg) -+{ -+ return (u16)(sg->bpid_offset >> 16) & 0x0FFF; -+} -+ -+/** -+ * dpaa2_sg_set_offset() - Set the offset in SG entry -+ * @sg: the given scatter-gathering object. -+ * @offset: the offset to be set. -+ */ -+static inline void dpaa2_sg_set_offset(struct dpaa2_sg_entry *sg, -+ u16 offset) -+{ -+ sg->bpid_offset &= 0xF000FFFF; -+ sg->bpid_offset |= (u32)offset << 16; -+} -+ -+/** -+ * dpaa2_sg_get_format() - Get the SG format in SG entry -+ * @sg: the given scatter-gathering object. -+ * -+ * Return the format. -+ */ -+static inline enum dpaa2_sg_format -+ dpaa2_sg_get_format(const struct dpaa2_sg_entry *sg) -+{ -+ return (enum dpaa2_sg_format)((sg->bpid_offset >> 28) & 0x3); -+} -+ -+/** -+ * dpaa2_sg_set_format() - Set the SG format in SG entry -+ * @sg: the given scatter-gathering object. -+ * @format: the format to be set. -+ */ -+static inline void dpaa2_sg_set_format(struct dpaa2_sg_entry *sg, -+ enum dpaa2_sg_format format) -+{ -+ sg->bpid_offset &= 0xCFFFFFFF; -+ sg->bpid_offset |= (u32)format << 28; -+} -+ -+/** -+ * dpaa2_sg_get_bpid() - Get the buffer pool id in SG entry -+ * @sg: the given scatter-gathering object. -+ * -+ * Return the bpid. -+ */ -+static inline u16 dpaa2_sg_get_bpid(const struct dpaa2_sg_entry *sg) -+{ -+ return (u16)(sg->bpid_offset & 0x3FFF); -+} -+ -+/** -+ * dpaa2_sg_set_bpid() - Set the buffer pool id in SG entry -+ * @sg: the given scatter-gathering object. -+ * @bpid: the bpid to be set. -+ */ -+static inline void dpaa2_sg_set_bpid(struct dpaa2_sg_entry *sg, u16 bpid) -+{ -+ sg->bpid_offset &= 0xFFFFC000; -+ sg->bpid_offset |= (u32)bpid; -+} -+ -+/** -+ * dpaa2_sg_is_final() - Check final bit in SG entry -+ * @sg: the given scatter-gathering object. -+ * -+ * Return bool. -+ */ -+static inline bool dpaa2_sg_is_final(const struct dpaa2_sg_entry *sg) -+{ -+ return !!(sg->bpid_offset >> 31); -+} -+ -+/** -+ * dpaa2_sg_set_final() - Set the final bit in SG entry -+ * @sg: the given scatter-gathering object. -+ * @final: the final boolean to be set. -+ */ -+static inline void dpaa2_sg_set_final(struct dpaa2_sg_entry *sg, bool final) -+{ -+ sg->bpid_offset &= 0x7FFFFFFF; -+ sg->bpid_offset |= (u32)final << 31; -+} -+ -+/* Endianness conversion helper functions -+ * The accelerator drivers which construct / read scatter gather entries -+ * need to call these in order to account for endianness mismatches between -+ * hardware and cpu -+ */ -+#ifdef __BIG_ENDIAN -+/** -+ * dpaa2_sg_cpu_to_le() - convert scatter gather entry from native cpu -+ * format little endian format. -+ * @sg: the given scatter gather entry. -+ */ -+static inline void dpaa2_sg_cpu_to_le(struct dpaa2_sg_entry *sg) -+{ -+ uint32_t *p = (uint32_t *)sg; -+ int i; -+ -+ for (i = 0; i < sizeof(*sg) / sizeof(u32); i++) -+ cpu_to_le32s(p++); -+} -+ -+/** -+ * dpaa2_sg_le_to_cpu() - convert scatter gather entry from little endian -+ * format to native cpu format. -+ * @sg: the given scatter gather entry. -+ */ -+static inline void dpaa2_sg_le_to_cpu(struct dpaa2_sg_entry *sg) -+{ -+ uint32_t *p = (uint32_t *)sg; -+ int i; -+ -+ for (i = 0; i < sizeof(*sg) / sizeof(u32); i++) -+ le32_to_cpus(p++); -+} -+#else -+#define dpaa2_sg_cpu_to_le(sg) -+#define dpaa2_sg_le_to_cpu(sg) -+#endif /* __BIG_ENDIAN */ -+ -+ -+/** -+ * struct dpaa2_fl_entry - structure for frame list entry. -+ * @addr_lo: the lower 32bit of address -+ * @addr_hi: the upper 32bit of address -+ * @len: the length in this sg entry. -+ * @bpid_offset: offset in the MS 16 bits, BPID in the LS 16 bits. -+ * @frc: frame context -+ * @ctrl: the 32bit control bits including dd, sc,... va, err. -+ * @flc_lo: the lower 32bit of flow context. -+ * @flc_hi: the upper 32bits of flow context. -+ * -+ * Frame List Entry (FLE) -+ * Identical to dpaa2_fd.simple layout, but some bits are different -+ */ -+struct dpaa2_fl_entry { -+ u32 addr_lo; -+ u32 addr_hi; -+ u32 len; -+ u32 bpid_offset; -+ u32 frc; -+ u32 ctrl; -+ u32 flc_lo; -+ u32 flc_hi; -+}; -+ -+enum dpaa2_fl_format { -+ dpaa2_fl_single = 0, -+ dpaa2_fl_res, -+ dpaa2_fl_sg -+}; -+ -+/** -+ * dpaa2_fl_get_addr() - Get address in the frame list entry -+ * @fle: the given frame list entry. -+ * -+ * Return address for the get function. -+ */ -+static inline dma_addr_t dpaa2_fl_get_addr(const struct dpaa2_fl_entry *fle) -+{ -+ return (dma_addr_t)((((uint64_t)fle->addr_hi) << 32) + fle->addr_lo); -+} -+ -+/** -+ * dpaa2_fl_set_addr() - Set the address in the frame list entry -+ * @fle: the given frame list entry. -+ * @addr: the address needs to be set. -+ * -+ */ -+static inline void dpaa2_fl_set_addr(struct dpaa2_fl_entry *fle, -+ dma_addr_t addr) -+{ -+ fle->addr_hi = upper_32_bits(addr); -+ fle->addr_lo = lower_32_bits(addr); -+} -+ -+/** -+ * dpaa2_fl_get_flc() - Get the flow context in the frame list entry -+ * @fle: the given frame list entry. -+ * -+ * Return flow context for the get function. -+ */ -+static inline dma_addr_t dpaa2_fl_get_flc(const struct dpaa2_fl_entry *fle) -+{ -+ return (dma_addr_t)((((uint64_t)fle->flc_hi) << 32) + fle->flc_lo); -+} -+ -+/** -+ * dpaa2_fl_set_flc() - Set the flow context in the frame list entry -+ * @fle: the given frame list entry. -+ * @flc_addr: the flow context address needs to be set. -+ * -+ */ -+static inline void dpaa2_fl_set_flc(struct dpaa2_fl_entry *fle, -+ dma_addr_t flc_addr) -+{ -+ fle->flc_hi = upper_32_bits(flc_addr); -+ fle->flc_lo = lower_32_bits(flc_addr); -+} -+ -+/** -+ * dpaa2_fl_get_len() - Get the length in the frame list entry -+ * @fle: the given frame list entry. -+ * -+ * Return length for the get function. -+ */ -+static inline u32 dpaa2_fl_get_len(const struct dpaa2_fl_entry *fle) -+{ -+ return fle->len; -+} -+ -+/** -+ * dpaa2_fl_set_len() - Set the length in the frame list entry -+ * @fle: the given frame list entry. -+ * @len: the length needs to be set. -+ * -+ */ -+static inline void dpaa2_fl_set_len(struct dpaa2_fl_entry *fle, u32 len) -+{ -+ fle->len = len; -+} -+ -+/** -+ * dpaa2_fl_get_offset() - Get/Set the offset in the frame list entry -+ * @fle: the given frame list entry. -+ * -+ * Return offset for the get function. -+ */ -+static inline uint16_t dpaa2_fl_get_offset(const struct dpaa2_fl_entry *fle) -+{ -+ return (uint16_t)(fle->bpid_offset >> 16) & 0x0FFF; -+} -+ -+/** -+ * dpaa2_fl_set_offset() - Set the offset in the frame list entry -+ * @fle: the given frame list entry. -+ * @offset: the offset needs to be set. -+ * -+ */ -+static inline void dpaa2_fl_set_offset(struct dpaa2_fl_entry *fle, -+ uint16_t offset) -+{ -+ fle->bpid_offset &= 0xF000FFFF; -+ fle->bpid_offset |= (u32)(offset & 0x0FFF) << 16; -+} -+ -+/** -+ * dpaa2_fl_get_format() - Get the format in the frame list entry -+ * @fle: the given frame list entry. -+ * -+ * Return frame list format for the get function. -+ */ -+static inline enum dpaa2_fl_format dpaa2_fl_get_format( -+ const struct dpaa2_fl_entry *fle) -+{ -+ return (enum dpaa2_fl_format)((fle->bpid_offset >> 28) & 0x3); -+} -+ -+/** -+ * dpaa2_fl_set_format() - Set the format in the frame list entry -+ * @fle: the given frame list entry. -+ * @format: the frame list format needs to be set. -+ * -+ */ -+static inline void dpaa2_fl_set_format(struct dpaa2_fl_entry *fle, -+ enum dpaa2_fl_format format) -+{ -+ fle->bpid_offset &= 0xCFFFFFFF; -+ fle->bpid_offset |= (u32)(format & 0x3) << 28; -+} -+ -+/** -+ * dpaa2_fl_get_bpid() - Get the buffer pool id in the frame list entry -+ * @fle: the given frame list entry. -+ * -+ * Return bpid for the get function. -+ */ -+static inline uint16_t dpaa2_fl_get_bpid(const struct dpaa2_fl_entry *fle) -+{ -+ return (uint16_t)(fle->bpid_offset & 0x3FFF); -+} -+ -+/** -+ * dpaa2_fl_set_bpid() - Set the buffer pool id in the frame list entry -+ * @fle: the given frame list entry. -+ * @bpid: the buffer pool id needs to be set. -+ * -+ */ -+static inline void dpaa2_fl_set_bpid(struct dpaa2_fl_entry *fle, uint16_t bpid) -+{ -+ fle->bpid_offset &= 0xFFFFC000; -+ fle->bpid_offset |= (u32)bpid; -+} -+ -+/** dpaa2_fl_is_final() - check the final bit is set or not in the frame list. -+ * @fle: the given frame list entry. -+ * -+ * Return final bit settting. -+ */ -+static inline bool dpaa2_fl_is_final(const struct dpaa2_fl_entry *fle) -+{ -+ return !!(fle->bpid_offset >> 31); -+} -+ -+/** -+ * dpaa2_fl_set_final() - Set the final bit in the frame list entry -+ * @fle: the given frame list entry. -+ * @final: the final bit needs to be set. -+ * -+ */ -+static inline void dpaa2_fl_set_final(struct dpaa2_fl_entry *fle, bool final) -+{ -+ fle->bpid_offset &= 0x7FFFFFFF; -+ fle->bpid_offset |= (u32)final << 31; -+} -+ -+/** -+ * struct dpaa2_dq - the qman result structure -+ * @dont_manipulate_directly: the 16 32bit data to represent the whole -+ * possible qman dequeue result. -+ * -+ * When frames are dequeued, the FDs show up inside "dequeue" result structures -+ * (if at all, not all dequeue results contain valid FDs). This structure type -+ * is intentionally defined without internal detail, and the only reason it -+ * isn't declared opaquely (without size) is to allow the user to provide -+ * suitably-sized (and aligned) memory for these entries. -+ */ -+struct dpaa2_dq { -+ uint32_t dont_manipulate_directly[16]; -+}; -+ -+/* Parsing frame dequeue results */ -+/* FQ empty */ -+#define DPAA2_DQ_STAT_FQEMPTY 0x80 -+/* FQ held active */ -+#define DPAA2_DQ_STAT_HELDACTIVE 0x40 -+/* FQ force eligible */ -+#define DPAA2_DQ_STAT_FORCEELIGIBLE 0x20 -+/* Valid frame */ -+#define DPAA2_DQ_STAT_VALIDFRAME 0x10 -+/* FQ ODP enable */ -+#define DPAA2_DQ_STAT_ODPVALID 0x04 -+/* Volatile dequeue */ -+#define DPAA2_DQ_STAT_VOLATILE 0x02 -+/* volatile dequeue command is expired */ -+#define DPAA2_DQ_STAT_EXPIRED 0x01 -+ -+/** -+ * dpaa2_dq_flags() - Get the stat field of dequeue response -+ * @dq: the dequeue result. -+ */ -+uint32_t dpaa2_dq_flags(const struct dpaa2_dq *dq); -+ -+/** -+ * dpaa2_dq_is_pull() - Check whether the dq response is from a pull -+ * command. -+ * @dq: the dequeue result. -+ * -+ * Return 1 for volatile(pull) dequeue, 0 for static dequeue. -+ */ -+static inline int dpaa2_dq_is_pull(const struct dpaa2_dq *dq) -+{ -+ return (int)(dpaa2_dq_flags(dq) & DPAA2_DQ_STAT_VOLATILE); -+} -+ -+/** -+ * dpaa2_dq_is_pull_complete() - Check whether the pull command is completed. -+ * @dq: the dequeue result. -+ * -+ * Return boolean. -+ */ -+static inline int dpaa2_dq_is_pull_complete( -+ const struct dpaa2_dq *dq) -+{ -+ return (int)(dpaa2_dq_flags(dq) & DPAA2_DQ_STAT_EXPIRED); -+} -+ -+/** -+ * dpaa2_dq_seqnum() - Get the seqnum field in dequeue response -+ * seqnum is valid only if VALIDFRAME flag is TRUE -+ * @dq: the dequeue result. -+ * -+ * Return seqnum. -+ */ -+uint16_t dpaa2_dq_seqnum(const struct dpaa2_dq *dq); -+ -+/** -+ * dpaa2_dq_odpid() - Get the seqnum field in dequeue response -+ * odpid is valid only if ODPVAILD flag is TRUE. -+ * @dq: the dequeue result. -+ * -+ * Return odpid. -+ */ -+uint16_t dpaa2_dq_odpid(const struct dpaa2_dq *dq); -+ -+/** -+ * dpaa2_dq_fqid() - Get the fqid in dequeue response -+ * @dq: the dequeue result. -+ * -+ * Return fqid. -+ */ -+uint32_t dpaa2_dq_fqid(const struct dpaa2_dq *dq); -+ -+/** -+ * dpaa2_dq_byte_count() - Get the byte count in dequeue response -+ * @dq: the dequeue result. -+ * -+ * Return the byte count remaining in the FQ. -+ */ -+uint32_t dpaa2_dq_byte_count(const struct dpaa2_dq *dq); -+ -+/** -+ * dpaa2_dq_frame_count() - Get the frame count in dequeue response -+ * @dq: the dequeue result. -+ * -+ * Return the frame count remaining in the FQ. -+ */ -+uint32_t dpaa2_dq_frame_count(const struct dpaa2_dq *dq); -+ -+/** -+ * dpaa2_dq_fd_ctx() - Get the frame queue context in dequeue response -+ * @dq: the dequeue result. -+ * -+ * Return the frame queue context. -+ */ -+uint64_t dpaa2_dq_fqd_ctx(const struct dpaa2_dq *dq); -+ -+/** -+ * dpaa2_dq_fd() - Get the frame descriptor in dequeue response -+ * @dq: the dequeue result. -+ * -+ * Return the frame descriptor. -+ */ -+const struct dpaa2_fd *dpaa2_dq_fd(const struct dpaa2_dq *dq); -+ -+#endif /* __FSL_DPAA2_FD_H */ -diff --git a/drivers/staging/fsl-mc/include/fsl_dpaa2_io.h b/drivers/staging/fsl-mc/include/fsl_dpaa2_io.h -new file mode 100644 -index 0000000..6ea2ff9 ---- /dev/null -+++ b/drivers/staging/fsl-mc/include/fsl_dpaa2_io.h -@@ -0,0 +1,619 @@ -+/* Copyright 2014 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of Freescale Semiconductor nor the -+ * names of its contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY Freescale Semiconductor ``AS IS'' AND ANY -+ * EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED -+ * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -+ * DISCLAIMED. IN NO EVENT SHALL Freescale Semiconductor BE LIABLE FOR ANY -+ * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES -+ * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -+ * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND -+ * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS -+ * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_DPAA2_IO_H -+#define __FSL_DPAA2_IO_H -+ -+#include "fsl_dpaa2_fd.h" -+ -+struct dpaa2_io; -+struct dpaa2_io_store; -+ -+/** -+ * DOC: DPIO Service Management -+ * -+ * The DPIO service provides APIs for users to interact with the datapath -+ * by enqueueing and dequeing frame descriptors. -+ * -+ * The following set of APIs can be used to enqueue and dequeue frames -+ * as well as producing notification callbacks when data is available -+ * for dequeue. -+ */ -+ -+/** -+ * struct dpaa2_io_desc - The DPIO descriptor. -+ * @receives_notifications: Use notificaton mode. -+ * @has_irq: use irq-based proessing. -+ * @will_poll: use poll processing. -+ * @has_8prio: set for channel with 8 priority WQs. -+ * @cpu: the cpu index that at least interrupt handlers will execute on. -+ * @stash_affinity: the stash affinity for this portal favour 'cpu' -+ * @regs_cena: the cache enabled regs. -+ * @regs_cinh: the cache inhibited regs. -+ * @dpio_id: The dpio index. -+ * @qman_version: the qman version -+ * -+ * Describe the attributes and features of the DPIO object. -+ */ -+struct dpaa2_io_desc { -+ /* non-zero iff the DPIO has a channel */ -+ int receives_notifications; -+ /* non-zero if the DPIO portal interrupt is handled. If so, the -+ * caller/OS handles the interrupt and calls dpaa2_io_service_irq(). */ -+ int has_irq; -+ /* non-zero if the caller/OS is prepared to called the -+ * dpaa2_io_service_poll() routine as part of its run-to-completion (or -+ * scheduling) loop. If so, the DPIO service may dynamically switch some -+ * of its processing between polling-based and irq-based. It is illegal -+ * combination to have (!has_irq && !will_poll). */ -+ int will_poll; -+ /* ignored unless 'receives_notifications'. Non-zero iff the channel has -+ * 8 priority WQs, otherwise the channel has 2. */ -+ int has_8prio; -+ /* the cpu index that at least interrupt handlers will execute on. And -+ * if 'stash_affinity' is non-zero, the cache targeted by stash -+ * transactions is affine to this cpu. */ -+ int cpu; -+ /* non-zero if stash transactions for this portal favour 'cpu' over -+ * other CPUs. (Eg. zero if there's no stashing, or stashing is to -+ * shared cache.) */ -+ int stash_affinity; -+ /* Caller-provided flags, determined by bus-scanning and/or creation of -+ * DPIO objects via MC commands. */ -+ void *regs_cena; -+ void *regs_cinh; -+ int dpio_id; -+ uint32_t qman_version; -+}; -+ -+/** -+ * dpaa2_io_create() - create a dpaa2_io object. -+ * @desc: the dpaa2_io descriptor -+ * -+ * Activates a "struct dpaa2_io" corresponding to the given config of an actual -+ * DPIO object. This handle can be used on it's own (like a one-portal "DPIO -+ * service") or later be added to a service-type "struct dpaa2_io" object. Note, -+ * the information required on 'cfg' is copied so the caller is free to do as -+ * they wish with the input parameter upon return. -+ * -+ * Return a valid dpaa2_io object for success, or NULL for failure. -+ */ -+struct dpaa2_io *dpaa2_io_create(const struct dpaa2_io_desc *desc); -+ -+/** -+ * dpaa2_io_create_service() - Create an (initially empty) DPIO service. -+ * -+ * Return a valid dpaa2_io object for success, or NULL for failure. -+ */ -+struct dpaa2_io *dpaa2_io_create_service(void); -+ -+/** -+ * dpaa2_io_default_service() - Use the driver's own global (and initially -+ * empty) DPIO service. -+ * -+ * This increments the reference count, so don't forget to use dpaa2_io_down() -+ * for each time this function is called. -+ * -+ * Return a valid dpaa2_io object for success, or NULL for failure. -+ */ -+struct dpaa2_io *dpaa2_io_default_service(void); -+ -+/** -+ * dpaa2_io_down() - release the dpaa2_io object. -+ * @d: the dpaa2_io object to be released. -+ * -+ * The "struct dpaa2_io" type can represent an individual DPIO object (as -+ * described by "struct dpaa2_io_desc") or an instance of a "DPIO service", -+ * which can be used to group/encapsulate multiple DPIO objects. In all cases, -+ * each handle obtained should be released using this function. -+ */ -+void dpaa2_io_down(struct dpaa2_io *d); -+ -+/** -+ * dpaa2_io_service_add() - Add the given DPIO object to the given DPIO service. -+ * @service: the given DPIO service. -+ * @obj: the given DPIO object. -+ * -+ * 'service' must have been created by dpaa2_io_create_service() and 'obj' -+ * must have been created by dpaa2_io_create(). This increments the reference -+ * count on the object that 'obj' refers to, so the user could call -+ * dpaa2_io_down(obj) after this and the object will persist within the service -+ * (and will be destroyed when the service is destroyed). -+ * -+ * Return 0 for success, or -EINVAL for failure. -+ */ -+int dpaa2_io_service_add(struct dpaa2_io *service, struct dpaa2_io *obj); -+ -+/** -+ * dpaa2_io_get_descriptor() - Get the DPIO descriptor of the given DPIO object. -+ * @obj: the given DPIO object. -+ * @desc: the returned DPIO descriptor. -+ * -+ * This function will return failure if the given dpaa2_io struct represents a -+ * service rather than an individual DPIO object, otherwise it returns zero and -+ * the given 'cfg' structure is filled in. -+ * -+ * Return 0 for success, or -EINVAL for failure. -+ */ -+int dpaa2_io_get_descriptor(struct dpaa2_io *obj, struct dpaa2_io_desc *desc); -+ -+/** -+ * dpaa2_io_poll() - Process any notifications and h/w-initiated events that -+ * are polling-driven. -+ * @obj: the given DPIO object. -+ * -+ * Obligatory for DPIO objects that have dpaa2_io_desc::will_poll non-zero. -+ * -+ * Return 0 for success, or -EINVAL for failure. -+ */ -+int dpaa2_io_poll(struct dpaa2_io *obj); -+ -+/** -+ * dpaa2_io_irq() - Process any notifications and h/w-initiated events that are -+ * irq-driven. -+ * @obj: the given DPIO object. -+ * -+ * Obligatory for DPIO objects that have dpaa2_io_desc::has_irq non-zero. -+ * -+ * Return IRQ_HANDLED for success, or -EINVAL for failure. -+ */ -+int dpaa2_io_irq(struct dpaa2_io *obj); -+ -+/** -+ * dpaa2_io_pause_poll() - Used to stop polling. -+ * @obj: the given DPIO object. -+ * -+ * If a polling application is going to stop polling for a period of time and -+ * supports interrupt processing, it can call this function to convert all -+ * processing to IRQ. (Eg. when sleeping.) -+ * -+ * Return -EINVAL. -+ */ -+int dpaa2_io_pause_poll(struct dpaa2_io *obj); -+ -+/** -+ * dpaa2_io_resume_poll() - Resume polling -+ * @obj: the given DPIO object. -+ * -+ * Return -EINVAL. -+ */ -+int dpaa2_io_resume_poll(struct dpaa2_io *obj); -+ -+/** -+ * dpaa2_io_service_notifications() - Get a mask of cpus that the DPIO service -+ * can receive notifications on. -+ * @s: the given DPIO object. -+ * @mask: the mask of cpus. -+ * -+ * Note that this is a run-time snapshot. If things like cpu-hotplug are -+ * supported in the target system, then an attempt to register notifications -+ * for a cpu that appears present in the given mask might fail if that cpu has -+ * gone offline in the mean time. -+ */ -+void dpaa2_io_service_notifications(struct dpaa2_io *s, cpumask_t *mask); -+ -+/** -+ * dpaa2_io_service_stashing - Get a mask of cpus that the DPIO service has stash -+ * affinity to. -+ * @s: the given DPIO object. -+ * @mask: the mask of cpus. -+ */ -+void dpaa2_io_service_stashing(struct dpaa2_io *s, cpumask_t *mask); -+ -+/** -+ * dpaa2_io_service_nonaffine() - Check the DPIO service's cpu affinity -+ * for stashing. -+ * @s: the given DPIO object. -+ * -+ * Return a boolean, whether or not the DPIO service has resources that have no -+ * particular cpu affinity for stashing. (Useful to know if you wish to operate -+ * on CPUs that the service has no affinity to, you would choose to use -+ * resources that are neutral, rather than affine to a different CPU.) Unlike -+ * other service-specific APIs, this one doesn't return an error if it is passed -+ * a non-service object. So don't do it. -+ */ -+int dpaa2_io_service_has_nonaffine(struct dpaa2_io *s); -+ -+/*************************/ -+/* Notification handling */ -+/*************************/ -+ -+/** -+ * struct dpaa2_io_notification_ctx - The DPIO notification context structure. -+ * @cb: the callback to be invoked when the notification arrives. -+ * @is_cdan: Zero/FALSE for FQDAN, non-zero/TRUE for CDAN. -+ * @id: FQID or channel ID, needed for rearm. -+ * @desired_cpu: the cpu on which the notifications will show up. -+ * @actual_cpu: the cpu the notification actually shows up. -+ * @migration_cb: callback function used for migration. -+ * @dpio_id: the dpio index. -+ * @qman64: the 64-bit context value shows up in the FQDAN/CDAN. -+ * @node: the list node. -+ * @dpio_private: the dpio object internal to dpio_service. -+ * -+ * When a FQDAN/CDAN registration is made (eg. by DPNI/DPCON/DPAI code), a -+ * context of the following type is used. The caller can embed it within a -+ * larger structure in order to add state that is tracked along with the -+ * notification (this may be useful when callbacks are invoked that pass this -+ * notification context as a parameter). -+ */ -+struct dpaa2_io_notification_ctx { -+ void (*cb)(struct dpaa2_io_notification_ctx *); -+ int is_cdan; -+ uint32_t id; -+ /* This specifies which cpu the user wants notifications to show up on -+ * (ie. to execute 'cb'). If notification-handling on that cpu is not -+ * available at the time of notification registration, the registration -+ * will fail. */ -+ int desired_cpu; -+ /* If the target platform supports cpu-hotplug or other features -+ * (related to power-management, one would expect) that can migrate IRQ -+ * handling of a given DPIO object, then this value will potentially be -+ * different to 'desired_cpu' at run-time. */ -+ int actual_cpu; -+ /* And if migration does occur and this callback is non-NULL, it will -+ * be invoked prior to any futher notification callbacks executing on -+ * 'newcpu'. Note that 'oldcpu' is what 'actual_cpu' was prior to the -+ * migration, and 'newcpu' is what it is now. Both could conceivably be -+ * different to 'desired_cpu'. */ -+ void (*migration_cb)(struct dpaa2_io_notification_ctx *, -+ int oldcpu, int newcpu); -+ /* These are returned from dpaa2_io_service_register(). -+ * 'dpio_id' is the dpaa2_io_desc::dpio_id value of the DPIO object that -+ * has been selected by the service for receiving the notifications. The -+ * caller can use this value in the MC command that attaches the FQ (or -+ * channel) of their DPNI (or DPCON, respectively) to this DPIO for -+ * notification-generation. -+ * 'qman64' is the 64-bit context value that needs to be sent in the -+ * same MC command in order to be programmed into the FQ or channel - -+ * this is the 64-bit value that shows up in the FQDAN/CDAN messages to -+ * the DPIO object, and the DPIO service specifies this value back to -+ * the caller so that the notifications that show up will be -+ * comprensible/demux-able to the DPIO service. */ -+ int dpio_id; -+ uint64_t qman64; -+ /* These fields are internal to the DPIO service once the context is -+ * registered. TBD: may require more internal state fields. */ -+ struct list_head node; -+ void *dpio_private; -+}; -+ -+/** -+ * dpaa2_io_service_register() - Prepare for servicing of FQDAN or CDAN -+ * notifications on the given DPIO service. -+ * @service: the given DPIO service. -+ * @ctx: the notification context. -+ * -+ * The MC command to attach the caller's DPNI/DPCON/DPAI device to a -+ * DPIO object is performed after this function is called. In that way, (a) the -+ * DPIO service is "ready" to handle a notification arrival (which might happen -+ * before the "attach" command to MC has returned control of execution back to -+ * the caller), and (b) the DPIO service can provide back to the caller the -+ * 'dpio_id' and 'qman64' parameters that it should pass along in the MC command -+ * in order for the DPNI/DPCON/DPAI resources to be configured to produce the -+ * right notification fields to the DPIO service. -+ * -+ * Return 0 for success, or -ENODEV for failure. -+ */ -+int dpaa2_io_service_register(struct dpaa2_io *service, -+ struct dpaa2_io_notification_ctx *ctx); -+ -+/** -+ * dpaa2_io_service_deregister - The opposite of 'register'. -+ * @service: the given DPIO service. -+ * @ctx: the notification context. -+ * -+ * Note that 'register' should be called *before* -+ * making the MC call to attach the notification-producing device to the -+ * notification-handling DPIO service, the 'unregister' function should be -+ * called *after* making the MC call to detach the notification-producing -+ * device. -+ * -+ * Return 0 for success. -+ */ -+int dpaa2_io_service_deregister(struct dpaa2_io *service, -+ struct dpaa2_io_notification_ctx *ctx); -+ -+/** -+ * dpaa2_io_service_rearm() - Rearm the notification for the given DPIO service. -+ * @service: the given DPIO service. -+ * @ctx: the notification context. -+ * -+ * Once a FQDAN/CDAN has been produced, the corresponding FQ/channel is -+ * considered "disarmed". Ie. the user can issue pull dequeue operations on that -+ * traffic source for as long as it likes. Eventually it may wish to "rearm" -+ * that source to allow it to produce another FQDAN/CDAN, that's what this -+ * function achieves. -+ * -+ * Return 0 for success, or -ENODEV if no service available, -EBUSY/-EIO for not -+ * being able to implement the rearm the notifiaton due to setting CDAN or -+ * scheduling fq. -+ */ -+int dpaa2_io_service_rearm(struct dpaa2_io *service, -+ struct dpaa2_io_notification_ctx *ctx); -+ -+/** -+ * dpaa2_io_from_registration() - Get the DPIO object from the given notification -+ * context. -+ * @ctx: the given notifiation context. -+ * @ret: the returned DPIO object. -+ * -+ * Like 'dpaa2_io_service_get_persistent()' (see below), except that the -+ * returned handle is not selected based on a 'cpu' argument, but is the same -+ * DPIO object that the given notification context is registered against. The -+ * returned handle carries a reference count, so a corresponding dpaa2_io_down() -+ * would be required when the reference is no longer needed. -+ * -+ * Return 0 for success, or -EINVAL for failure. -+ */ -+int dpaa2_io_from_registration(struct dpaa2_io_notification_ctx *ctx, -+ struct dpaa2_io **ret); -+ -+/**********************************/ -+/* General usage of DPIO services */ -+/**********************************/ -+ -+/** -+ * dpaa2_io_service_get_persistent() - Get the DPIO resource from the given -+ * notification context and cpu. -+ * @service: the DPIO service. -+ * @cpu: the cpu that the DPIO resource has stashing affinity to. -+ * @ret: the returned DPIO resource. -+ * -+ * The various DPIO interfaces can accept a "struct dpaa2_io" handle that refers -+ * to an individual DPIO object or to a whole service. In the latter case, an -+ * internal choice is made for each operation. This function supports the former -+ * case, by selecting an individual DPIO object *from* the service in order for -+ * it to be used multiple times to provide "persistence". The returned handle -+ * also carries a reference count, so a corresponding dpaa2_io_down() would be -+ * required when the reference is no longer needed. Note, a parameter of -1 for -+ * 'cpu' will select a DPIO resource that has no particular stashing affinity to -+ * any cpu (eg. one that stashes to platform cache). -+ * -+ * Return 0 for success, or -ENODEV for failure. -+ */ -+int dpaa2_io_service_get_persistent(struct dpaa2_io *service, int cpu, -+ struct dpaa2_io **ret); -+ -+/*****************/ -+/* Pull dequeues */ -+/*****************/ -+ -+/** -+ * dpaa2_io_service_pull_fq() - pull dequeue functions from a fq. -+ * @d: the given DPIO service. -+ * @fqid: the given frame queue id. -+ * @s: the dpaa2_io_store object for the result. -+ * -+ * To support DCA/order-preservation, it will be necessary to support an -+ * alternative form, because they must ultimately dequeue to DQRR rather than a -+ * user-supplied dpaa2_io_store. Furthermore, those dequeue results will -+ * "complete" using a caller-provided callback (from DQRR processing) rather -+ * than the caller explicitly looking at their dpaa2_io_store for results. Eg. -+ * the alternative form will likely take a callback parameter rather than a -+ * store parameter. Ignoring it for now to keep the picture clearer. -+ * -+ * Return 0 for success, or error code for failure. -+ */ -+int dpaa2_io_service_pull_fq(struct dpaa2_io *d, uint32_t fqid, -+ struct dpaa2_io_store *s); -+ -+/** -+ * dpaa2_io_service_pull_channel() - pull dequeue functions from a channel. -+ * @d: the given DPIO service. -+ * @channelid: the given channel id. -+ * @s: the dpaa2_io_store object for the result. -+ * -+ * To support DCA/order-preservation, it will be necessary to support an -+ * alternative form, because they must ultimately dequeue to DQRR rather than a -+ * user-supplied dpaa2_io_store. Furthermore, those dequeue results will -+ * "complete" using a caller-provided callback (from DQRR processing) rather -+ * than the caller explicitly looking at their dpaa2_io_store for results. Eg. -+ * the alternative form will likely take a callback parameter rather than a -+ * store parameter. Ignoring it for now to keep the picture clearer. -+ * -+ * Return 0 for success, or error code for failure. -+ */ -+int dpaa2_io_service_pull_channel(struct dpaa2_io *d, uint32_t channelid, -+ struct dpaa2_io_store *s); -+ -+/************/ -+/* Enqueues */ -+/************/ -+ -+/** -+ * dpaa2_io_service_enqueue_fq() - Enqueue a frame to a frame queue. -+ * @d: the given DPIO service. -+ * @fqid: the given frame queue id. -+ * @fd: the frame descriptor which is enqueued. -+ * -+ * This definition bypasses some features that are not expected to be priority-1 -+ * features, and may not be needed at all via current assumptions (QBMan's -+ * feature set is wider than the MC object model is intendeding to support, -+ * initially at least). Plus, keeping them out (for now) keeps the API view -+ * simpler. Missing features are; -+ * - enqueue confirmation (results DMA'd back to the user) -+ * - ORP -+ * - DCA/order-preservation (see note in "pull dequeues") -+ * - enqueue consumption interrupts -+ * -+ * Return 0 for successful enqueue, or -EBUSY if the enqueue ring is not ready, -+ * or -ENODEV if there is no dpio service. -+ */ -+int dpaa2_io_service_enqueue_fq(struct dpaa2_io *d, -+ uint32_t fqid, -+ const struct dpaa2_fd *fd); -+ -+/** -+ * dpaa2_io_service_enqueue_qd() - Enqueue a frame to a QD. -+ * @d: the given DPIO service. -+ * @qdid: the given queuing destination id. -+ * @prio: the given queuing priority. -+ * @qdbin: the given queuing destination bin. -+ * @fd: the frame descriptor which is enqueued. -+ * -+ * This definition bypasses some features that are not expected to be priority-1 -+ * features, and may not be needed at all via current assumptions (QBMan's -+ * feature set is wider than the MC object model is intendeding to support, -+ * initially at least). Plus, keeping them out (for now) keeps the API view -+ * simpler. Missing features are; -+ * - enqueue confirmation (results DMA'd back to the user) -+ * - ORP -+ * - DCA/order-preservation (see note in "pull dequeues") -+ * - enqueue consumption interrupts -+ * -+ * Return 0 for successful enqueue, or -EBUSY if the enqueue ring is not ready, -+ * or -ENODEV if there is no dpio service. -+ */ -+int dpaa2_io_service_enqueue_qd(struct dpaa2_io *d, -+ uint32_t qdid, uint8_t prio, uint16_t qdbin, -+ const struct dpaa2_fd *fd); -+ -+/*******************/ -+/* Buffer handling */ -+/*******************/ -+ -+/** -+ * dpaa2_io_service_release() - Release buffers to a buffer pool. -+ * @d: the given DPIO object. -+ * @bpid: the buffer pool id. -+ * @buffers: the buffers to be released. -+ * @num_buffers: the number of the buffers to be released. -+ * -+ * Return 0 for success, and negative error code for failure. -+ */ -+int dpaa2_io_service_release(struct dpaa2_io *d, -+ uint32_t bpid, -+ const uint64_t *buffers, -+ unsigned int num_buffers); -+ -+/** -+ * dpaa2_io_service_acquire() - Acquire buffers from a buffer pool. -+ * @d: the given DPIO object. -+ * @bpid: the buffer pool id. -+ * @buffers: the buffer addresses for acquired buffers. -+ * @num_buffers: the expected number of the buffers to acquire. -+ * -+ * Return a negative error code if the command failed, otherwise it returns -+ * the number of buffers acquired, which may be less than the number requested. -+ * Eg. if the buffer pool is empty, this will return zero. -+ */ -+int dpaa2_io_service_acquire(struct dpaa2_io *d, -+ uint32_t bpid, -+ uint64_t *buffers, -+ unsigned int num_buffers); -+ -+/***************/ -+/* DPIO stores */ -+/***************/ -+ -+/* These are reusable memory blocks for retrieving dequeue results into, and to -+ * assist with parsing those results once they show up. They also hide the -+ * details of how to use "tokens" to make detection of DMA results possible (ie. -+ * comparing memory before the DMA and after it) while minimising the needless -+ * clearing/rewriting of those memory locations between uses. -+ */ -+ -+/** -+ * dpaa2_io_store_create() - Create the dma memory storage for dequeue -+ * result. -+ * @max_frames: the maximum number of dequeued result for frames, must be <= 16. -+ * @dev: the device to allow mapping/unmapping the DMAable region. -+ * -+ * Constructor - max_frames must be <= 16. The user provides the -+ * device struct to allow mapping/unmapping of the DMAable region. Area for -+ * storage will be allocated during create. The size of this storage is -+ * "max_frames*sizeof(struct dpaa2_dq)". The 'dpaa2_io_store' returned is a -+ * wrapper structure allocated within the DPIO code, which owns and manages -+ * allocated store. -+ * -+ * Return dpaa2_io_store struct for successfuly created storage memory, or NULL -+ * if not getting the stroage for dequeue result in create API. -+ */ -+struct dpaa2_io_store *dpaa2_io_store_create(unsigned int max_frames, -+ struct device *dev); -+ -+/** -+ * dpaa2_io_store_destroy() - Destroy the dma memory storage for dequeue -+ * result. -+ * @s: the storage memory to be destroyed. -+ * -+ * Frees to specified storage memory. -+ */ -+void dpaa2_io_store_destroy(struct dpaa2_io_store *s); -+ -+/** -+ * dpaa2_io_store_next() - Determine when the next dequeue result is available. -+ * @s: the dpaa2_io_store object. -+ * @is_last: indicate whether this is the last frame in the pull command. -+ * -+ * Once dpaa2_io_store has been passed to a function that performs dequeues to -+ * it, like dpaa2_ni_rx(), this function can be used to determine when the next -+ * frame result is available. Once this function returns non-NULL, a subsequent -+ * call to it will try to find the *next* dequeue result. -+ * -+ * Note that if a pull-dequeue has a null result because the target FQ/channel -+ * was empty, then this function will return NULL rather than expect the caller -+ * to always check for this on his own side. As such, "is_last" can be used to -+ * differentiate between "end-of-empty-dequeue" and "still-waiting". -+ * -+ * Return dequeue result for a valid dequeue result, or NULL for empty dequeue. -+ */ -+struct dpaa2_dq *dpaa2_io_store_next(struct dpaa2_io_store *s, int *is_last); -+ -+#ifdef CONFIG_FSL_QBMAN_DEBUG -+/** -+ * dpaa2_io_query_fq_count() - Get the frame and byte count for a given fq. -+ * @d: the given DPIO object. -+ * @fqid: the id of frame queue to be queried. -+ * @fcnt: the queried frame count. -+ * @bcnt: the queried byte count. -+ * -+ * Knowing the FQ count at run-time can be useful in debugging situations. -+ * The instantaneous frame- and byte-count are hereby returned. -+ * -+ * Return 0 for a successful query, and negative error code if query fails. -+ */ -+int dpaa2_io_query_fq_count(struct dpaa2_io *d, uint32_t fqid, -+ uint32_t *fcnt, uint32_t *bcnt); -+ -+/** -+ * dpaa2_io_query_bp_count() - Query the number of buffers currenty in a -+ * buffer pool. -+ * @d: the given DPIO object. -+ * @bpid: the index of buffer pool to be queried. -+ * @num: the queried number of buffers in the buffer pool. -+ * -+ * Return 0 for a sucessful query, and negative error code if query fails. -+ */ -+int dpaa2_io_query_bp_count(struct dpaa2_io *d, uint32_t bpid, -+ uint32_t *num); -+#endif -+#endif /* __FSL_DPAA2_IO_H */ -diff --git a/drivers/staging/fsl-mc/include/mc-cmd.h b/drivers/staging/fsl-mc/include/mc-cmd.h -new file mode 100644 -index 0000000..00f0b74 ---- /dev/null -+++ b/drivers/staging/fsl-mc/include/mc-cmd.h -@@ -0,0 +1,133 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_MC_CMD_H -+#define __FSL_MC_CMD_H -+ -+#define MC_CMD_NUM_OF_PARAMS 7 -+ -+#define MAKE_UMASK64(_width) \ -+ ((uint64_t)((_width) < 64 ? ((uint64_t)1 << (_width)) - 1 : \ -+ (uint64_t)-1)) -+ -+static inline uint64_t mc_enc(int lsoffset, int width, uint64_t val) -+{ -+ return (uint64_t)(((uint64_t)val & MAKE_UMASK64(width)) << lsoffset); -+} -+ -+static inline uint64_t mc_dec(uint64_t val, int lsoffset, int width) -+{ -+ return (uint64_t)((val >> lsoffset) & MAKE_UMASK64(width)); -+} -+ -+struct mc_command { -+ uint64_t header; -+ uint64_t params[MC_CMD_NUM_OF_PARAMS]; -+}; -+ -+enum mc_cmd_status { -+ MC_CMD_STATUS_OK = 0x0, /* Completed successfully */ -+ MC_CMD_STATUS_READY = 0x1, /* Ready to be processed */ -+ MC_CMD_STATUS_AUTH_ERR = 0x3, /* Authentication error */ -+ MC_CMD_STATUS_NO_PRIVILEGE = 0x4, /* No privilege */ -+ MC_CMD_STATUS_DMA_ERR = 0x5, /* DMA or I/O error */ -+ MC_CMD_STATUS_CONFIG_ERR = 0x6, /* Configuration error */ -+ MC_CMD_STATUS_TIMEOUT = 0x7, /* Operation timed out */ -+ MC_CMD_STATUS_NO_RESOURCE = 0x8, /* No resources */ -+ MC_CMD_STATUS_NO_MEMORY = 0x9, /* No memory available */ -+ MC_CMD_STATUS_BUSY = 0xA, /* Device is busy */ -+ MC_CMD_STATUS_UNSUPPORTED_OP = 0xB, /* Unsupported operation */ -+ MC_CMD_STATUS_INVALID_STATE = 0xC /* Invalid state */ -+}; -+ -+/* -+ * MC command flags -+ */ -+ -+/* High priority flag */ -+#define MC_CMD_FLAG_PRI 0x00008000 -+/* Command completion flag */ -+#define MC_CMD_FLAG_INTR_DIS 0x01000000 -+ -+/* TODO Remove following two defines after completion of flib 8.0.0 -+integration */ -+#define MC_CMD_PRI_LOW 0 /*!< Low Priority command indication */ -+#define MC_CMD_PRI_HIGH 1 /*!< High Priority command indication */ -+ -+#define MC_CMD_HDR_CMDID_O 52 /* Command ID field offset */ -+#define MC_CMD_HDR_CMDID_S 12 /* Command ID field size */ -+#define MC_CMD_HDR_TOKEN_O 38 /* Token field offset */ -+#define MC_CMD_HDR_TOKEN_S 10 /* Token field size */ -+#define MC_CMD_HDR_STATUS_O 16 /* Status field offset */ -+#define MC_CMD_HDR_STATUS_S 8 /* Status field size*/ -+#define MC_CMD_HDR_FLAGS_O 0 /* Flags field offset */ -+#define MC_CMD_HDR_FLAGS_S 32 /* Flags field size*/ -+#define MC_CMD_HDR_FLAGS_MASK 0xFF00FF00 /* Command flags mask */ -+ -+#define MC_CMD_HDR_READ_STATUS(_hdr) \ -+ ((enum mc_cmd_status)mc_dec((_hdr), \ -+ MC_CMD_HDR_STATUS_O, MC_CMD_HDR_STATUS_S)) -+ -+#define MC_CMD_HDR_READ_TOKEN(_hdr) \ -+ ((uint16_t)mc_dec((_hdr), MC_CMD_HDR_TOKEN_O, MC_CMD_HDR_TOKEN_S)) -+ -+#define MC_CMD_HDR_READ_FLAGS(_hdr) \ -+ ((uint32_t)mc_dec((_hdr), MC_CMD_HDR_FLAGS_O, MC_CMD_HDR_FLAGS_S)) -+ -+#define MC_PREP_OP(_ext, _param, _offset, _width, _type, _arg) \ -+ ((_ext)[_param] |= cpu_to_le64(mc_enc((_offset), (_width), _arg))) -+ -+#define MC_EXT_OP(_ext, _param, _offset, _width, _type, _arg) \ -+ (_arg = (_type)mc_dec(cpu_to_le64(_ext[_param]), (_offset), (_width))) -+ -+#define MC_CMD_OP(_cmd, _param, _offset, _width, _type, _arg) \ -+ ((_cmd).params[_param] |= mc_enc((_offset), (_width), _arg)) -+ -+#define MC_RSP_OP(_cmd, _param, _offset, _width, _type, _arg) \ -+ (_arg = (_type)mc_dec(_cmd.params[_param], (_offset), (_width))) -+ -+static inline uint64_t mc_encode_cmd_header(uint16_t cmd_id, -+ uint32_t cmd_flags, -+ uint16_t token) -+{ -+ uint64_t hdr; -+ -+ hdr = mc_enc(MC_CMD_HDR_CMDID_O, MC_CMD_HDR_CMDID_S, cmd_id); -+ hdr |= mc_enc(MC_CMD_HDR_FLAGS_O, MC_CMD_HDR_FLAGS_S, -+ (cmd_flags & MC_CMD_HDR_FLAGS_MASK)); -+ hdr |= mc_enc(MC_CMD_HDR_TOKEN_O, MC_CMD_HDR_TOKEN_S, token); -+ hdr |= mc_enc(MC_CMD_HDR_STATUS_O, MC_CMD_HDR_STATUS_S, -+ MC_CMD_STATUS_READY); -+ -+ return hdr; -+} -+ -+#endif /* __FSL_MC_CMD_H */ -diff --git a/drivers/staging/fsl-mc/include/mc-private.h b/drivers/staging/fsl-mc/include/mc-private.h -new file mode 100644 -index 0000000..1246ca8 ---- /dev/null -+++ b/drivers/staging/fsl-mc/include/mc-private.h -@@ -0,0 +1,168 @@ -+/* -+ * Freescale Management Complex (MC) bus private declarations -+ * -+ * Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * Author: German Rivera -+ * -+ * This file is licensed under the terms of the GNU General Public -+ * License version 2. This program is licensed "as is" without any -+ * warranty of any kind, whether express or implied. -+ */ -+#ifndef _FSL_MC_PRIVATE_H_ -+#define _FSL_MC_PRIVATE_H_ -+ -+#include "../include/mc.h" -+#include -+#include -+ -+#define FSL_MC_DPRC_DRIVER_NAME "fsl_mc_dprc" -+ -+#define FSL_MC_DEVICE_MATCH(_mc_dev, _obj_desc) \ -+ (strcmp((_mc_dev)->obj_desc.type, (_obj_desc)->type) == 0 && \ -+ (_mc_dev)->obj_desc.id == (_obj_desc)->id) -+ -+#define FSL_MC_IS_ALLOCATABLE(_obj_type) \ -+ (strcmp(_obj_type, "dpbp") == 0 || \ -+ strcmp(_obj_type, "dpmcp") == 0 || \ -+ strcmp(_obj_type, "dpcon") == 0) -+ -+/** -+ * Maximum number of total IRQs that can be pre-allocated for an MC bus' -+ * IRQ pool -+ */ -+#define FSL_MC_IRQ_POOL_MAX_TOTAL_IRQS 256 -+ -+/** -+ * Maximum number of extra IRQs pre-reallocated for an MC bus' IRQ pool, -+ * to be used by dynamically created MC objects -+ */ -+#define FSL_MC_IRQ_POOL_MAX_EXTRA_IRQS 64 -+ -+/** -+ * struct fsl_mc - Private data of a "fsl,qoriq-mc" platform device -+ * @root_mc_bus_dev: MC object device representing the root DPRC -+ * @irq_domain: IRQ domain for the fsl-mc bus type -+ * @gic_supported: boolean flag that indicates if the GIC interrupt controller -+ * is supported. -+ * @num_translation_ranges: number of entries in addr_translation_ranges -+ * @addr_translation_ranges: array of bus to system address translation ranges -+ */ -+struct fsl_mc { -+ struct fsl_mc_device *root_mc_bus_dev; -+ struct irq_domain *irq_domain; -+ bool gic_supported; -+ uint8_t num_translation_ranges; -+ struct fsl_mc_addr_translation_range *translation_ranges; -+}; -+ -+/** -+ * enum mc_region_types - Types of MC MMIO regions -+ */ -+enum fsl_mc_region_types { -+ FSL_MC_PORTAL = 0x0, -+ FSL_QBMAN_PORTAL, -+ -+ /* -+ * New offset types must be added above this entry -+ */ -+ FSL_NUM_MC_OFFSET_TYPES -+}; -+ -+/** -+ * struct fsl_mc_addr_translation_range - bus to system address translation -+ * range -+ * @mc_region_type: Type of MC region for the range being translated -+ * @start_mc_offset: Start MC offset of the range being translated -+ * @end_mc_offset: MC offset of the first byte after the range (last MC -+ * offset of the range is end_mc_offset - 1) -+ * @start_phys_addr: system physical address corresponding to start_mc_addr -+ */ -+struct fsl_mc_addr_translation_range { -+ enum fsl_mc_region_types mc_region_type; -+ uint64_t start_mc_offset; -+ uint64_t end_mc_offset; -+ phys_addr_t start_phys_addr; -+}; -+ -+/** -+ * struct fsl_mc_resource_pool - Pool of MC resources of a given -+ * type -+ * @type: type of resources in the pool -+ * @max_count: maximum number of resources in the pool -+ * @free_count: number of free resources in the pool -+ * @mutex: mutex to serialize access to the pool's free list -+ * @free_list: anchor node of list of free resources in the pool -+ * @mc_bus: pointer to the MC bus that owns this resource pool -+ */ -+struct fsl_mc_resource_pool { -+ enum fsl_mc_pool_type type; -+ int16_t max_count; -+ int16_t free_count; -+ struct mutex mutex; /* serializes access to free_list */ -+ struct list_head free_list; -+ struct fsl_mc_bus *mc_bus; -+}; -+ -+/** -+ * struct fsl_mc_bus - logical bus that corresponds to a physical DPRC -+ * @mc_dev: fsl-mc device for the bus device itself. -+ * @resource_pools: array of resource pools (one pool per resource type) -+ * for this MC bus. These resources represent allocatable entities -+ * from the physical DPRC. -+ * @atomic_mc_io: mc_io object to be used to send DPRC commands to the MC -+ * in atomic context (e.g., when programming MSIs in program_msi_at_mc()). -+ * @atomic_dprc_handle: DPRC handle opened using the atomic_mc_io's portal. -+ * @irq_resources: Pointer to array of IRQ objects for the IRQ pool. -+ * @scan_mutex: Serializes bus scanning -+ * @dprc_attr: DPRC attributes -+ */ -+struct fsl_mc_bus { -+ struct fsl_mc_device mc_dev; -+ struct fsl_mc_resource_pool resource_pools[FSL_MC_NUM_POOL_TYPES]; -+ struct fsl_mc_device_irq *irq_resources; -+ struct fsl_mc_io *atomic_mc_io; -+ uint16_t atomic_dprc_handle; -+ struct mutex scan_mutex; /* serializes bus scanning */ -+ struct dprc_attributes dprc_attr; -+}; -+ -+#define to_fsl_mc_bus(_mc_dev) \ -+ container_of(_mc_dev, struct fsl_mc_bus, mc_dev) -+ -+int __must_check fsl_mc_device_add(struct dprc_obj_desc *obj_desc, -+ struct fsl_mc_io *mc_io, -+ struct device *parent_dev, -+ const char *driver_override, -+ struct fsl_mc_device **new_mc_dev); -+ -+void fsl_mc_device_remove(struct fsl_mc_device *mc_dev); -+ -+int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev, -+ const char *driver_override, -+ unsigned int *total_irq_count); -+ -+int __init dprc_driver_init(void); -+ -+void __exit dprc_driver_exit(void); -+ -+int __init fsl_mc_allocator_driver_init(void); -+ -+void __exit fsl_mc_allocator_driver_exit(void); -+ -+int __must_check fsl_mc_resource_allocate(struct fsl_mc_bus *mc_bus, -+ enum fsl_mc_pool_type pool_type, -+ struct fsl_mc_resource -+ **new_resource); -+ -+void fsl_mc_resource_free(struct fsl_mc_resource *resource); -+ -+int __must_check fsl_mc_populate_irq_pool(struct fsl_mc_bus *mc_bus, -+ unsigned int irq_count); -+ -+void fsl_mc_cleanup_irq_pool(struct fsl_mc_bus *mc_bus); -+ -+void dprc_init_all_resource_pools(struct fsl_mc_device *mc_bus_dev); -+ -+void dprc_cleanup_all_resource_pools(struct fsl_mc_device *mc_bus_dev); -+ -+#endif /* _FSL_MC_PRIVATE_H_ */ -diff --git a/drivers/staging/fsl-mc/include/mc-sys.h b/drivers/staging/fsl-mc/include/mc-sys.h -new file mode 100644 -index 0000000..b08df85 ---- /dev/null -+++ b/drivers/staging/fsl-mc/include/mc-sys.h -@@ -0,0 +1,128 @@ -+/* Copyright 2013-2014 Freescale Semiconductor Inc. -+ * -+ * Interface of the I/O services to send MC commands to the MC hardware -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#ifndef _FSL_MC_SYS_H -+#define _FSL_MC_SYS_H -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/** -+ * Bit masks for a MC I/O object (struct fsl_mc_io) flags -+ */ -+#define FSL_MC_IO_ATOMIC_CONTEXT_PORTAL 0x0001 -+ -+struct fsl_mc_resource; -+struct mc_command; -+ -+/** -+ * struct fsl_mc_io - MC I/O object to be passed-in to mc_send_command() -+ * @dev: device associated with this Mc I/O object -+ * @flags: flags for mc_send_command() -+ * @portal_size: MC command portal size in bytes -+ * @portal_phys_addr: MC command portal physical address -+ * @portal_virt_addr: MC command portal virtual address -+ * @dpmcp_dev: pointer to the DPMCP device associated with the MC portal. -+ * @mc_command_done_irq_armed: Flag indicating that the MC command done IRQ -+ * is currently armed. -+ * @mc_command_done_completion: Completion variable to be signaled when an MC -+ * command sent to the MC fw is completed. -+ * -+ * Fields are only meaningful if the FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is not -+ * set: -+ * @mutex: Mutex to serialize mc_send_command() calls that use the same MC -+ * portal, if the fsl_mc_io object was created with the -+ * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag off. mc_send_command() calls for this -+ * fsl_mc_io object must be made only from non-atomic context. -+ * @mc_command_done_completion: Linux completion variable to be signaled -+ * when a DPMCP command completion interrupts is received. -+ * @mc_command_done_irq_armed: Boolean flag that indicates if interrupts have -+ * been successfully configured for the corresponding DPMCP object. -+ * -+ * Fields are only meaningful if the FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is -+ * set: -+ * @spinlock: Spinlock to serialize mc_send_command() calls that use the same MC -+ * portal, if the fsl_mc_io object was created with the -+ * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag on. mc_send_command() calls for this -+ * fsl_mc_io object can be made from atomic or non-atomic context. -+ */ -+struct fsl_mc_io { -+ struct device *dev; -+ uint16_t flags; -+ uint16_t portal_size; -+ phys_addr_t portal_phys_addr; -+ void __iomem *portal_virt_addr; -+ struct fsl_mc_device *dpmcp_dev; -+ union { -+ /* -+ * These fields are only meaningful if the -+ * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is not set -+ */ -+ struct { -+ struct mutex mutex; /* serializes mc_send_command() */ -+ struct completion mc_command_done_completion; -+ bool mc_command_done_irq_armed; -+ }; -+ -+ /* -+ * This field is only meaningful if the -+ * FSL_MC_IO_ATOMIC_CONTEXT_PORTAL flag is set -+ */ -+ spinlock_t spinlock; /* serializes mc_send_command() */ -+ }; -+}; -+ -+int __must_check fsl_create_mc_io(struct device *dev, -+ phys_addr_t mc_portal_phys_addr, -+ uint32_t mc_portal_size, -+ struct fsl_mc_device *dpmcp_dev, -+ uint32_t flags, struct fsl_mc_io **new_mc_io); -+ -+void fsl_destroy_mc_io(struct fsl_mc_io *mc_io); -+ -+int fsl_mc_io_set_dpmcp(struct fsl_mc_io *mc_io, -+ struct fsl_mc_device *dpmcp_dev); -+ -+void fsl_mc_io_unset_dpmcp(struct fsl_mc_io *mc_io); -+ -+int fsl_mc_io_setup_dpmcp_irq(struct fsl_mc_io *mc_io); -+ -+int mc_send_command(struct fsl_mc_io *mc_io, struct mc_command *cmd); -+ -+#endif /* _FSL_MC_SYS_H */ -diff --git a/drivers/staging/fsl-mc/include/mc.h b/drivers/staging/fsl-mc/include/mc.h -new file mode 100644 -index 0000000..bbeb121 ---- /dev/null -+++ b/drivers/staging/fsl-mc/include/mc.h -@@ -0,0 +1,244 @@ -+/* -+ * Freescale Management Complex (MC) bus public interface -+ * -+ * Copyright (C) 2014 Freescale Semiconductor, Inc. -+ * Author: German Rivera -+ * -+ * This file is licensed under the terms of the GNU General Public -+ * License version 2. This program is licensed "as is" without any -+ * warranty of any kind, whether express or implied. -+ */ -+#ifndef _FSL_MC_H_ -+#define _FSL_MC_H_ -+ -+#include -+#include -+#include -+#include -+#include -+#include "../include/dprc.h" -+ -+#define FSL_MC_VENDOR_FREESCALE 0x1957 -+ -+struct fsl_mc_device; -+struct fsl_mc_io; -+ -+/** -+ * struct fsl_mc_driver - MC object device driver object -+ * @driver: Generic device driver -+ * @match_id_table: table of supported device matching Ids -+ * @probe: Function called when a device is added -+ * @remove: Function called when a device is removed -+ * @shutdown: Function called at shutdown time to quiesce the device -+ * @suspend: Function called when a device is stopped -+ * @resume: Function called when a device is resumed -+ * -+ * Generic DPAA device driver object for device drivers that are registered -+ * with a DPRC bus. This structure is to be embedded in each device-specific -+ * driver structure. -+ */ -+struct fsl_mc_driver { -+ struct device_driver driver; -+ const struct fsl_mc_device_match_id *match_id_table; -+ int (*probe)(struct fsl_mc_device *dev); -+ int (*remove)(struct fsl_mc_device *dev); -+ void (*shutdown)(struct fsl_mc_device *dev); -+ int (*suspend)(struct fsl_mc_device *dev, pm_message_t state); -+ int (*resume)(struct fsl_mc_device *dev); -+}; -+ -+#define to_fsl_mc_driver(_drv) \ -+ container_of(_drv, struct fsl_mc_driver, driver) -+ -+/** -+ * struct fsl_mc_device_match_id - MC object device Id entry for driver matching -+ * @vendor: vendor ID -+ * @obj_type: MC object type -+ * @ver_major: MC object version major number -+ * @ver_minor: MC object version minor number -+ * -+ * Type of entries in the "device Id" table for MC object devices supported by -+ * a MC object device driver. The last entry of the table has vendor set to 0x0 -+ */ -+struct fsl_mc_device_match_id { -+ uint16_t vendor; -+ const char obj_type[16]; -+ uint32_t ver_major; -+ uint32_t ver_minor; -+}; -+ -+/** -+ * enum fsl_mc_pool_type - Types of allocatable MC bus resources -+ * -+ * Entries in these enum are used as indices in the array of resource -+ * pools of an fsl_mc_bus object. -+ */ -+enum fsl_mc_pool_type { -+ FSL_MC_POOL_DPMCP = 0x0, /* corresponds to "dpmcp" in the MC */ -+ FSL_MC_POOL_DPBP, /* corresponds to "dpbp" in the MC */ -+ FSL_MC_POOL_DPCON, /* corresponds to "dpcon" in the MC */ -+ FSL_MC_POOL_IRQ, -+ -+ /* -+ * NOTE: New resource pool types must be added before this entry -+ */ -+ FSL_MC_NUM_POOL_TYPES -+}; -+ -+/** -+ * struct fsl_mc_resource - MC generic resource -+ * @type: type of resource -+ * @id: unique MC resource Id within the resources of the same type -+ * @data: pointer to resource-specific data if the resource is currently -+ * allocated, or NULL if the resource is not currently allocated. -+ * @parent_pool: pointer to the parent resource pool from which this -+ * resource is allocated from. -+ * @node: Node in the free list of the corresponding resource pool -+ * -+ * NOTE: This structure is to be embedded as a field of specific -+ * MC resource structures. -+ */ -+struct fsl_mc_resource { -+ enum fsl_mc_pool_type type; -+ int32_t id; -+ void *data; -+ struct fsl_mc_resource_pool *parent_pool; -+ struct list_head node; -+}; -+ -+/** -+ * struct fsl_mc_device_irq - MC object device message-based interrupt -+ * @msi_paddr: message-based interrupt physical address -+ * @msi_value: message-based interrupt data value -+ * @irq_number: Linux IRQ number assigned to the interrupt -+ * @mc_dev: MC object device that owns this interrupt -+ * @dev_irq_index: device-relative IRQ index -+ * @resource: MC generic resource associated with the interrupt -+ */ -+struct fsl_mc_device_irq { -+ phys_addr_t msi_paddr; -+ uint32_t msi_value; -+ uint32_t irq_number; -+ struct fsl_mc_device *mc_dev; -+ uint8_t dev_irq_index; -+ struct fsl_mc_resource resource; -+}; -+ -+#define to_fsl_mc_irq(_mc_resource) \ -+ container_of(_mc_resource, struct fsl_mc_device_irq, resource) -+ -+/** -+ * Bit masks for a MC object device (struct fsl_mc_device) flags -+ */ -+#define FSL_MC_IS_DPRC 0x0001 -+ -+/** -+ * root dprc's parent is a platform device -+ * that platform device's bus type is platform_bus_type. -+ */ -+#define is_root_dprc(dev) \ -+ ((to_fsl_mc_device(dev)->flags & FSL_MC_IS_DPRC) && \ -+ ((dev)->bus == &fsl_mc_bus_type) && \ -+ ((dev)->parent->bus == &platform_bus_type)) -+ -+/** -+ * Default DMA mask for devices on a fsl-mc bus -+ */ -+#define FSL_MC_DEFAULT_DMA_MASK (~0ULL) -+ -+/** -+ * struct fsl_mc_device - MC object device object -+ * @dev: Linux driver model device object -+ * @dma_mask: Default DMA mask -+ * @flags: MC object device flags -+ * @icid: Isolation context ID for the device -+ * @mc_handle: MC handle for the corresponding MC object opened -+ * @mc_io: Pointer to MC IO object assigned to this device or -+ * NULL if none. -+ * @obj_desc: MC description of the DPAA device -+ * @regions: pointer to array of MMIO region entries -+ * @irqs: pointer to array of pointers to interrupts allocated to this device -+ * @resource: generic resource associated with this MC object device, if any. -+ * @driver_override: Driver name to force a match -+ * -+ * Generic device object for MC object devices that are "attached" to a -+ * MC bus. -+ * -+ * NOTES: -+ * - For a non-DPRC object its icid is the same as its parent DPRC's icid. -+ * - The SMMU notifier callback gets invoked after device_add() has been -+ * called for an MC object device, but before the device-specific probe -+ * callback gets called. -+ * - DP_OBJ_DPRC objects are the only MC objects that have built-in MC -+ * portals. For all other MC objects, their device drivers are responsible for -+ * allocating MC portals for them by calling fsl_mc_portal_allocate(). -+ * - Some types of MC objects (e.g., DP_OBJ_DPBP, DP_OBJ_DPCON) are -+ * treated as resources that can be allocated/deallocated from the -+ * corresponding resource pool in the object's parent DPRC, using the -+ * fsl_mc_object_allocate()/fsl_mc_object_free() functions. These MC objects -+ * are known as "allocatable" objects. For them, the corresponding -+ * fsl_mc_device's 'resource' points to the associated resource object. -+ * For MC objects that are not allocatable (e.g., DP_OBJ_DPRC, DP_OBJ_DPNI), -+ * 'resource' is NULL. -+ */ -+struct fsl_mc_device { -+ struct device dev; -+ uint64_t dma_mask; -+ uint16_t flags; -+ uint16_t icid; -+ uint16_t mc_handle; -+ struct fsl_mc_io *mc_io; -+ struct dprc_obj_desc obj_desc; -+ struct resource *regions; -+ struct fsl_mc_device_irq **irqs; -+ struct fsl_mc_resource *resource; -+ const char *driver_override; -+}; -+ -+#define to_fsl_mc_device(_dev) \ -+ container_of(_dev, struct fsl_mc_device, dev) -+ -+/* -+ * module_fsl_mc_driver() - Helper macro for drivers that don't do -+ * anything special in module init/exit. This eliminates a lot of -+ * boilerplate. Each module may only use this macro once, and -+ * calling it replaces module_init() and module_exit() -+ */ -+#define module_fsl_mc_driver(__fsl_mc_driver) \ -+ module_driver(__fsl_mc_driver, fsl_mc_driver_register, \ -+ fsl_mc_driver_unregister) -+ -+/* -+ * Macro to avoid include chaining to get THIS_MODULE -+ */ -+#define fsl_mc_driver_register(drv) \ -+ __fsl_mc_driver_register(drv, THIS_MODULE) -+ -+int __must_check __fsl_mc_driver_register(struct fsl_mc_driver *fsl_mc_driver, -+ struct module *owner); -+ -+void fsl_mc_driver_unregister(struct fsl_mc_driver *driver); -+ -+bool fsl_mc_interrupts_supported(void); -+ -+int __must_check fsl_mc_portal_allocate(struct fsl_mc_device *mc_dev, -+ uint16_t mc_io_flags, -+ struct fsl_mc_io **new_mc_io); -+ -+void fsl_mc_portal_free(struct fsl_mc_io *mc_io); -+ -+int fsl_mc_portal_reset(struct fsl_mc_io *mc_io); -+ -+int __must_check fsl_mc_object_allocate(struct fsl_mc_device *mc_dev, -+ enum fsl_mc_pool_type pool_type, -+ struct fsl_mc_device **new_mc_adev); -+ -+void fsl_mc_object_free(struct fsl_mc_device *mc_adev); -+ -+int __must_check fsl_mc_allocate_irqs(struct fsl_mc_device *mc_dev); -+ -+void fsl_mc_free_irqs(struct fsl_mc_device *mc_dev); -+ -+extern struct bus_type fsl_mc_bus_type; -+ -+#endif /* _FSL_MC_H_ */ -diff --git a/drivers/staging/fsl-mc/include/net.h b/drivers/staging/fsl-mc/include/net.h -new file mode 100644 -index 0000000..7480f6a ---- /dev/null -+++ b/drivers/staging/fsl-mc/include/net.h -@@ -0,0 +1,481 @@ -+/* Copyright 2013-2015 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+#ifndef __FSL_NET_H -+#define __FSL_NET_H -+ -+#define LAST_HDR_INDEX 0xFFFFFFFF -+ -+/*****************************************************************************/ -+/* Protocol fields */ -+/*****************************************************************************/ -+ -+/************************* Ethernet fields *********************************/ -+#define NH_FLD_ETH_DA (1) -+#define NH_FLD_ETH_SA (NH_FLD_ETH_DA << 1) -+#define NH_FLD_ETH_LENGTH (NH_FLD_ETH_DA << 2) -+#define NH_FLD_ETH_TYPE (NH_FLD_ETH_DA << 3) -+#define NH_FLD_ETH_FINAL_CKSUM (NH_FLD_ETH_DA << 4) -+#define NH_FLD_ETH_PADDING (NH_FLD_ETH_DA << 5) -+#define NH_FLD_ETH_ALL_FIELDS ((NH_FLD_ETH_DA << 6) - 1) -+ -+#define NH_FLD_ETH_ADDR_SIZE 6 -+ -+/*************************** VLAN fields ***********************************/ -+#define NH_FLD_VLAN_VPRI (1) -+#define NH_FLD_VLAN_CFI (NH_FLD_VLAN_VPRI << 1) -+#define NH_FLD_VLAN_VID (NH_FLD_VLAN_VPRI << 2) -+#define NH_FLD_VLAN_LENGTH (NH_FLD_VLAN_VPRI << 3) -+#define NH_FLD_VLAN_TYPE (NH_FLD_VLAN_VPRI << 4) -+#define NH_FLD_VLAN_ALL_FIELDS ((NH_FLD_VLAN_VPRI << 5) - 1) -+ -+#define NH_FLD_VLAN_TCI (NH_FLD_VLAN_VPRI | \ -+ NH_FLD_VLAN_CFI | \ -+ NH_FLD_VLAN_VID) -+ -+/************************ IP (generic) fields ******************************/ -+#define NH_FLD_IP_VER (1) -+#define NH_FLD_IP_DSCP (NH_FLD_IP_VER << 2) -+#define NH_FLD_IP_ECN (NH_FLD_IP_VER << 3) -+#define NH_FLD_IP_PROTO (NH_FLD_IP_VER << 4) -+#define NH_FLD_IP_SRC (NH_FLD_IP_VER << 5) -+#define NH_FLD_IP_DST (NH_FLD_IP_VER << 6) -+#define NH_FLD_IP_TOS_TC (NH_FLD_IP_VER << 7) -+#define NH_FLD_IP_ID (NH_FLD_IP_VER << 8) -+#define NH_FLD_IP_ALL_FIELDS ((NH_FLD_IP_VER << 9) - 1) -+ -+#define NH_FLD_IP_PROTO_SIZE 1 -+ -+/***************************** IPV4 fields *********************************/ -+#define NH_FLD_IPV4_VER (1) -+#define NH_FLD_IPV4_HDR_LEN (NH_FLD_IPV4_VER << 1) -+#define NH_FLD_IPV4_TOS (NH_FLD_IPV4_VER << 2) -+#define NH_FLD_IPV4_TOTAL_LEN (NH_FLD_IPV4_VER << 3) -+#define NH_FLD_IPV4_ID (NH_FLD_IPV4_VER << 4) -+#define NH_FLD_IPV4_FLAG_D (NH_FLD_IPV4_VER << 5) -+#define NH_FLD_IPV4_FLAG_M (NH_FLD_IPV4_VER << 6) -+#define NH_FLD_IPV4_OFFSET (NH_FLD_IPV4_VER << 7) -+#define NH_FLD_IPV4_TTL (NH_FLD_IPV4_VER << 8) -+#define NH_FLD_IPV4_PROTO (NH_FLD_IPV4_VER << 9) -+#define NH_FLD_IPV4_CKSUM (NH_FLD_IPV4_VER << 10) -+#define NH_FLD_IPV4_SRC_IP (NH_FLD_IPV4_VER << 11) -+#define NH_FLD_IPV4_DST_IP (NH_FLD_IPV4_VER << 12) -+#define NH_FLD_IPV4_OPTS (NH_FLD_IPV4_VER << 13) -+#define NH_FLD_IPV4_OPTS_COUNT (NH_FLD_IPV4_VER << 14) -+#define NH_FLD_IPV4_ALL_FIELDS ((NH_FLD_IPV4_VER << 15) - 1) -+ -+#define NH_FLD_IPV4_ADDR_SIZE 4 -+#define NH_FLD_IPV4_PROTO_SIZE 1 -+ -+/***************************** IPV6 fields *********************************/ -+#define NH_FLD_IPV6_VER (1) -+#define NH_FLD_IPV6_TC (NH_FLD_IPV6_VER << 1) -+#define NH_FLD_IPV6_SRC_IP (NH_FLD_IPV6_VER << 2) -+#define NH_FLD_IPV6_DST_IP (NH_FLD_IPV6_VER << 3) -+#define NH_FLD_IPV6_NEXT_HDR (NH_FLD_IPV6_VER << 4) -+#define NH_FLD_IPV6_FL (NH_FLD_IPV6_VER << 5) -+#define NH_FLD_IPV6_HOP_LIMIT (NH_FLD_IPV6_VER << 6) -+#define NH_FLD_IPV6_ID (NH_FLD_IPV6_VER << 7) -+#define NH_FLD_IPV6_ALL_FIELDS ((NH_FLD_IPV6_VER << 8) - 1) -+ -+#define NH_FLD_IPV6_ADDR_SIZE 16 -+#define NH_FLD_IPV6_NEXT_HDR_SIZE 1 -+ -+/***************************** ICMP fields *********************************/ -+#define NH_FLD_ICMP_TYPE (1) -+#define NH_FLD_ICMP_CODE (NH_FLD_ICMP_TYPE << 1) -+#define NH_FLD_ICMP_CKSUM (NH_FLD_ICMP_TYPE << 2) -+#define NH_FLD_ICMP_ID (NH_FLD_ICMP_TYPE << 3) -+#define NH_FLD_ICMP_SQ_NUM (NH_FLD_ICMP_TYPE << 4) -+#define NH_FLD_ICMP_ALL_FIELDS ((NH_FLD_ICMP_TYPE << 5) - 1) -+ -+#define NH_FLD_ICMP_CODE_SIZE 1 -+#define NH_FLD_ICMP_TYPE_SIZE 1 -+ -+/***************************** IGMP fields *********************************/ -+#define NH_FLD_IGMP_VERSION (1) -+#define NH_FLD_IGMP_TYPE (NH_FLD_IGMP_VERSION << 1) -+#define NH_FLD_IGMP_CKSUM (NH_FLD_IGMP_VERSION << 2) -+#define NH_FLD_IGMP_DATA (NH_FLD_IGMP_VERSION << 3) -+#define NH_FLD_IGMP_ALL_FIELDS ((NH_FLD_IGMP_VERSION << 4) - 1) -+ -+/***************************** TCP fields **********************************/ -+#define NH_FLD_TCP_PORT_SRC (1) -+#define NH_FLD_TCP_PORT_DST (NH_FLD_TCP_PORT_SRC << 1) -+#define NH_FLD_TCP_SEQ (NH_FLD_TCP_PORT_SRC << 2) -+#define NH_FLD_TCP_ACK (NH_FLD_TCP_PORT_SRC << 3) -+#define NH_FLD_TCP_OFFSET (NH_FLD_TCP_PORT_SRC << 4) -+#define NH_FLD_TCP_FLAGS (NH_FLD_TCP_PORT_SRC << 5) -+#define NH_FLD_TCP_WINDOW (NH_FLD_TCP_PORT_SRC << 6) -+#define NH_FLD_TCP_CKSUM (NH_FLD_TCP_PORT_SRC << 7) -+#define NH_FLD_TCP_URGPTR (NH_FLD_TCP_PORT_SRC << 8) -+#define NH_FLD_TCP_OPTS (NH_FLD_TCP_PORT_SRC << 9) -+#define NH_FLD_TCP_OPTS_COUNT (NH_FLD_TCP_PORT_SRC << 10) -+#define NH_FLD_TCP_ALL_FIELDS ((NH_FLD_TCP_PORT_SRC << 11) - 1) -+ -+#define NH_FLD_TCP_PORT_SIZE 2 -+ -+/***************************** UDP fields **********************************/ -+#define NH_FLD_UDP_PORT_SRC (1) -+#define NH_FLD_UDP_PORT_DST (NH_FLD_UDP_PORT_SRC << 1) -+#define NH_FLD_UDP_LEN (NH_FLD_UDP_PORT_SRC << 2) -+#define NH_FLD_UDP_CKSUM (NH_FLD_UDP_PORT_SRC << 3) -+#define NH_FLD_UDP_ALL_FIELDS ((NH_FLD_UDP_PORT_SRC << 4) - 1) -+ -+#define NH_FLD_UDP_PORT_SIZE 2 -+ -+/*************************** UDP-lite fields *******************************/ -+#define NH_FLD_UDP_LITE_PORT_SRC (1) -+#define NH_FLD_UDP_LITE_PORT_DST (NH_FLD_UDP_LITE_PORT_SRC << 1) -+#define NH_FLD_UDP_LITE_ALL_FIELDS \ -+ ((NH_FLD_UDP_LITE_PORT_SRC << 2) - 1) -+ -+#define NH_FLD_UDP_LITE_PORT_SIZE 2 -+ -+/*************************** UDP-encap-ESP fields **************************/ -+#define NH_FLD_UDP_ENC_ESP_PORT_SRC (1) -+#define NH_FLD_UDP_ENC_ESP_PORT_DST (NH_FLD_UDP_ENC_ESP_PORT_SRC << 1) -+#define NH_FLD_UDP_ENC_ESP_LEN (NH_FLD_UDP_ENC_ESP_PORT_SRC << 2) -+#define NH_FLD_UDP_ENC_ESP_CKSUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 3) -+#define NH_FLD_UDP_ENC_ESP_SPI (NH_FLD_UDP_ENC_ESP_PORT_SRC << 4) -+#define NH_FLD_UDP_ENC_ESP_SEQUENCE_NUM (NH_FLD_UDP_ENC_ESP_PORT_SRC << 5) -+#define NH_FLD_UDP_ENC_ESP_ALL_FIELDS \ -+ ((NH_FLD_UDP_ENC_ESP_PORT_SRC << 6) - 1) -+ -+#define NH_FLD_UDP_ENC_ESP_PORT_SIZE 2 -+#define NH_FLD_UDP_ENC_ESP_SPI_SIZE 4 -+ -+/***************************** SCTP fields *********************************/ -+#define NH_FLD_SCTP_PORT_SRC (1) -+#define NH_FLD_SCTP_PORT_DST (NH_FLD_SCTP_PORT_SRC << 1) -+#define NH_FLD_SCTP_VER_TAG (NH_FLD_SCTP_PORT_SRC << 2) -+#define NH_FLD_SCTP_CKSUM (NH_FLD_SCTP_PORT_SRC << 3) -+#define NH_FLD_SCTP_ALL_FIELDS ((NH_FLD_SCTP_PORT_SRC << 4) - 1) -+ -+#define NH_FLD_SCTP_PORT_SIZE 2 -+ -+/***************************** DCCP fields *********************************/ -+#define NH_FLD_DCCP_PORT_SRC (1) -+#define NH_FLD_DCCP_PORT_DST (NH_FLD_DCCP_PORT_SRC << 1) -+#define NH_FLD_DCCP_ALL_FIELDS ((NH_FLD_DCCP_PORT_SRC << 2) - 1) -+ -+#define NH_FLD_DCCP_PORT_SIZE 2 -+ -+/***************************** IPHC fields *********************************/ -+#define NH_FLD_IPHC_CID (1) -+#define NH_FLD_IPHC_CID_TYPE (NH_FLD_IPHC_CID << 1) -+#define NH_FLD_IPHC_HCINDEX (NH_FLD_IPHC_CID << 2) -+#define NH_FLD_IPHC_GEN (NH_FLD_IPHC_CID << 3) -+#define NH_FLD_IPHC_D_BIT (NH_FLD_IPHC_CID << 4) -+#define NH_FLD_IPHC_ALL_FIELDS ((NH_FLD_IPHC_CID << 5) - 1) -+ -+/***************************** SCTP fields *********************************/ -+#define NH_FLD_SCTP_CHUNK_DATA_TYPE (1) -+#define NH_FLD_SCTP_CHUNK_DATA_FLAGS (NH_FLD_SCTP_CHUNK_DATA_TYPE << 1) -+#define NH_FLD_SCTP_CHUNK_DATA_LENGTH (NH_FLD_SCTP_CHUNK_DATA_TYPE << 2) -+#define NH_FLD_SCTP_CHUNK_DATA_TSN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 3) -+#define NH_FLD_SCTP_CHUNK_DATA_STREAM_ID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 4) -+#define NH_FLD_SCTP_CHUNK_DATA_STREAM_SQN (NH_FLD_SCTP_CHUNK_DATA_TYPE << 5) -+#define NH_FLD_SCTP_CHUNK_DATA_PAYLOAD_PID (NH_FLD_SCTP_CHUNK_DATA_TYPE << 6) -+#define NH_FLD_SCTP_CHUNK_DATA_UNORDERED (NH_FLD_SCTP_CHUNK_DATA_TYPE << 7) -+#define NH_FLD_SCTP_CHUNK_DATA_BEGGINING (NH_FLD_SCTP_CHUNK_DATA_TYPE << 8) -+#define NH_FLD_SCTP_CHUNK_DATA_END (NH_FLD_SCTP_CHUNK_DATA_TYPE << 9) -+#define NH_FLD_SCTP_CHUNK_DATA_ALL_FIELDS \ -+ ((NH_FLD_SCTP_CHUNK_DATA_TYPE << 10) - 1) -+ -+/*************************** L2TPV2 fields *********************************/ -+#define NH_FLD_L2TPV2_TYPE_BIT (1) -+#define NH_FLD_L2TPV2_LENGTH_BIT (NH_FLD_L2TPV2_TYPE_BIT << 1) -+#define NH_FLD_L2TPV2_SEQUENCE_BIT (NH_FLD_L2TPV2_TYPE_BIT << 2) -+#define NH_FLD_L2TPV2_OFFSET_BIT (NH_FLD_L2TPV2_TYPE_BIT << 3) -+#define NH_FLD_L2TPV2_PRIORITY_BIT (NH_FLD_L2TPV2_TYPE_BIT << 4) -+#define NH_FLD_L2TPV2_VERSION (NH_FLD_L2TPV2_TYPE_BIT << 5) -+#define NH_FLD_L2TPV2_LEN (NH_FLD_L2TPV2_TYPE_BIT << 6) -+#define NH_FLD_L2TPV2_TUNNEL_ID (NH_FLD_L2TPV2_TYPE_BIT << 7) -+#define NH_FLD_L2TPV2_SESSION_ID (NH_FLD_L2TPV2_TYPE_BIT << 8) -+#define NH_FLD_L2TPV2_NS (NH_FLD_L2TPV2_TYPE_BIT << 9) -+#define NH_FLD_L2TPV2_NR (NH_FLD_L2TPV2_TYPE_BIT << 10) -+#define NH_FLD_L2TPV2_OFFSET_SIZE (NH_FLD_L2TPV2_TYPE_BIT << 11) -+#define NH_FLD_L2TPV2_FIRST_BYTE (NH_FLD_L2TPV2_TYPE_BIT << 12) -+#define NH_FLD_L2TPV2_ALL_FIELDS \ -+ ((NH_FLD_L2TPV2_TYPE_BIT << 13) - 1) -+ -+/*************************** L2TPV3 fields *********************************/ -+#define NH_FLD_L2TPV3_CTRL_TYPE_BIT (1) -+#define NH_FLD_L2TPV3_CTRL_LENGTH_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 1) -+#define NH_FLD_L2TPV3_CTRL_SEQUENCE_BIT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 2) -+#define NH_FLD_L2TPV3_CTRL_VERSION (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 3) -+#define NH_FLD_L2TPV3_CTRL_LENGTH (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 4) -+#define NH_FLD_L2TPV3_CTRL_CONTROL (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 5) -+#define NH_FLD_L2TPV3_CTRL_SENT (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 6) -+#define NH_FLD_L2TPV3_CTRL_RECV (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 7) -+#define NH_FLD_L2TPV3_CTRL_FIRST_BYTE (NH_FLD_L2TPV3_CTRL_TYPE_BIT << 8) -+#define NH_FLD_L2TPV3_CTRL_ALL_FIELDS \ -+ ((NH_FLD_L2TPV3_CTRL_TYPE_BIT << 9) - 1) -+ -+#define NH_FLD_L2TPV3_SESS_TYPE_BIT (1) -+#define NH_FLD_L2TPV3_SESS_VERSION (NH_FLD_L2TPV3_SESS_TYPE_BIT << 1) -+#define NH_FLD_L2TPV3_SESS_ID (NH_FLD_L2TPV3_SESS_TYPE_BIT << 2) -+#define NH_FLD_L2TPV3_SESS_COOKIE (NH_FLD_L2TPV3_SESS_TYPE_BIT << 3) -+#define NH_FLD_L2TPV3_SESS_ALL_FIELDS \ -+ ((NH_FLD_L2TPV3_SESS_TYPE_BIT << 4) - 1) -+ -+/**************************** PPP fields ***********************************/ -+#define NH_FLD_PPP_PID (1) -+#define NH_FLD_PPP_COMPRESSED (NH_FLD_PPP_PID << 1) -+#define NH_FLD_PPP_ALL_FIELDS ((NH_FLD_PPP_PID << 2) - 1) -+ -+/************************** PPPoE fields ***********************************/ -+#define NH_FLD_PPPOE_VER (1) -+#define NH_FLD_PPPOE_TYPE (NH_FLD_PPPOE_VER << 1) -+#define NH_FLD_PPPOE_CODE (NH_FLD_PPPOE_VER << 2) -+#define NH_FLD_PPPOE_SID (NH_FLD_PPPOE_VER << 3) -+#define NH_FLD_PPPOE_LEN (NH_FLD_PPPOE_VER << 4) -+#define NH_FLD_PPPOE_SESSION (NH_FLD_PPPOE_VER << 5) -+#define NH_FLD_PPPOE_PID (NH_FLD_PPPOE_VER << 6) -+#define NH_FLD_PPPOE_ALL_FIELDS ((NH_FLD_PPPOE_VER << 7) - 1) -+ -+/************************* PPP-Mux fields **********************************/ -+#define NH_FLD_PPPMUX_PID (1) -+#define NH_FLD_PPPMUX_CKSUM (NH_FLD_PPPMUX_PID << 1) -+#define NH_FLD_PPPMUX_COMPRESSED (NH_FLD_PPPMUX_PID << 2) -+#define NH_FLD_PPPMUX_ALL_FIELDS ((NH_FLD_PPPMUX_PID << 3) - 1) -+ -+/*********************** PPP-Mux sub-frame fields **************************/ -+#define NH_FLD_PPPMUX_SUBFRM_PFF (1) -+#define NH_FLD_PPPMUX_SUBFRM_LXT (NH_FLD_PPPMUX_SUBFRM_PFF << 1) -+#define NH_FLD_PPPMUX_SUBFRM_LEN (NH_FLD_PPPMUX_SUBFRM_PFF << 2) -+#define NH_FLD_PPPMUX_SUBFRM_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 3) -+#define NH_FLD_PPPMUX_SUBFRM_USE_PID (NH_FLD_PPPMUX_SUBFRM_PFF << 4) -+#define NH_FLD_PPPMUX_SUBFRM_ALL_FIELDS \ -+ ((NH_FLD_PPPMUX_SUBFRM_PFF << 5) - 1) -+ -+/*************************** LLC fields ************************************/ -+#define NH_FLD_LLC_DSAP (1) -+#define NH_FLD_LLC_SSAP (NH_FLD_LLC_DSAP << 1) -+#define NH_FLD_LLC_CTRL (NH_FLD_LLC_DSAP << 2) -+#define NH_FLD_LLC_ALL_FIELDS ((NH_FLD_LLC_DSAP << 3) - 1) -+ -+/*************************** NLPID fields **********************************/ -+#define NH_FLD_NLPID_NLPID (1) -+#define NH_FLD_NLPID_ALL_FIELDS ((NH_FLD_NLPID_NLPID << 1) - 1) -+ -+/*************************** SNAP fields ***********************************/ -+#define NH_FLD_SNAP_OUI (1) -+#define NH_FLD_SNAP_PID (NH_FLD_SNAP_OUI << 1) -+#define NH_FLD_SNAP_ALL_FIELDS ((NH_FLD_SNAP_OUI << 2) - 1) -+ -+/*************************** LLC SNAP fields *******************************/ -+#define NH_FLD_LLC_SNAP_TYPE (1) -+#define NH_FLD_LLC_SNAP_ALL_FIELDS ((NH_FLD_LLC_SNAP_TYPE << 1) - 1) -+ -+#define NH_FLD_ARP_HTYPE (1) -+#define NH_FLD_ARP_PTYPE (NH_FLD_ARP_HTYPE << 1) -+#define NH_FLD_ARP_HLEN (NH_FLD_ARP_HTYPE << 2) -+#define NH_FLD_ARP_PLEN (NH_FLD_ARP_HTYPE << 3) -+#define NH_FLD_ARP_OPER (NH_FLD_ARP_HTYPE << 4) -+#define NH_FLD_ARP_SHA (NH_FLD_ARP_HTYPE << 5) -+#define NH_FLD_ARP_SPA (NH_FLD_ARP_HTYPE << 6) -+#define NH_FLD_ARP_THA (NH_FLD_ARP_HTYPE << 7) -+#define NH_FLD_ARP_TPA (NH_FLD_ARP_HTYPE << 8) -+#define NH_FLD_ARP_ALL_FIELDS ((NH_FLD_ARP_HTYPE << 9) - 1) -+ -+/*************************** RFC2684 fields ********************************/ -+#define NH_FLD_RFC2684_LLC (1) -+#define NH_FLD_RFC2684_NLPID (NH_FLD_RFC2684_LLC << 1) -+#define NH_FLD_RFC2684_OUI (NH_FLD_RFC2684_LLC << 2) -+#define NH_FLD_RFC2684_PID (NH_FLD_RFC2684_LLC << 3) -+#define NH_FLD_RFC2684_VPN_OUI (NH_FLD_RFC2684_LLC << 4) -+#define NH_FLD_RFC2684_VPN_IDX (NH_FLD_RFC2684_LLC << 5) -+#define NH_FLD_RFC2684_ALL_FIELDS ((NH_FLD_RFC2684_LLC << 6) - 1) -+ -+/*************************** User defined fields ***************************/ -+#define NH_FLD_USER_DEFINED_SRCPORT (1) -+#define NH_FLD_USER_DEFINED_PCDID (NH_FLD_USER_DEFINED_SRCPORT << 1) -+#define NH_FLD_USER_DEFINED_ALL_FIELDS \ -+ ((NH_FLD_USER_DEFINED_SRCPORT << 2) - 1) -+ -+/*************************** Payload fields ********************************/ -+#define NH_FLD_PAYLOAD_BUFFER (1) -+#define NH_FLD_PAYLOAD_SIZE (NH_FLD_PAYLOAD_BUFFER << 1) -+#define NH_FLD_MAX_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 2) -+#define NH_FLD_MIN_FRM_SIZE (NH_FLD_PAYLOAD_BUFFER << 3) -+#define NH_FLD_PAYLOAD_TYPE (NH_FLD_PAYLOAD_BUFFER << 4) -+#define NH_FLD_FRAME_SIZE (NH_FLD_PAYLOAD_BUFFER << 5) -+#define NH_FLD_PAYLOAD_ALL_FIELDS ((NH_FLD_PAYLOAD_BUFFER << 6) - 1) -+ -+/*************************** GRE fields ************************************/ -+#define NH_FLD_GRE_TYPE (1) -+#define NH_FLD_GRE_ALL_FIELDS ((NH_FLD_GRE_TYPE << 1) - 1) -+ -+/*************************** MINENCAP fields *******************************/ -+#define NH_FLD_MINENCAP_SRC_IP (1) -+#define NH_FLD_MINENCAP_DST_IP (NH_FLD_MINENCAP_SRC_IP << 1) -+#define NH_FLD_MINENCAP_TYPE (NH_FLD_MINENCAP_SRC_IP << 2) -+#define NH_FLD_MINENCAP_ALL_FIELDS \ -+ ((NH_FLD_MINENCAP_SRC_IP << 3) - 1) -+ -+/*************************** IPSEC AH fields *******************************/ -+#define NH_FLD_IPSEC_AH_SPI (1) -+#define NH_FLD_IPSEC_AH_NH (NH_FLD_IPSEC_AH_SPI << 1) -+#define NH_FLD_IPSEC_AH_ALL_FIELDS ((NH_FLD_IPSEC_AH_SPI << 2) - 1) -+ -+/*************************** IPSEC ESP fields ******************************/ -+#define NH_FLD_IPSEC_ESP_SPI (1) -+#define NH_FLD_IPSEC_ESP_SEQUENCE_NUM (NH_FLD_IPSEC_ESP_SPI << 1) -+#define NH_FLD_IPSEC_ESP_ALL_FIELDS ((NH_FLD_IPSEC_ESP_SPI << 2) - 1) -+ -+#define NH_FLD_IPSEC_ESP_SPI_SIZE 4 -+ -+/*************************** MPLS fields ***********************************/ -+#define NH_FLD_MPLS_LABEL_STACK (1) -+#define NH_FLD_MPLS_LABEL_STACK_ALL_FIELDS \ -+ ((NH_FLD_MPLS_LABEL_STACK << 1) - 1) -+ -+/*************************** MACSEC fields *********************************/ -+#define NH_FLD_MACSEC_SECTAG (1) -+#define NH_FLD_MACSEC_ALL_FIELDS ((NH_FLD_MACSEC_SECTAG << 1) - 1) -+ -+/*************************** GTP fields ************************************/ -+#define NH_FLD_GTP_TEID (1) -+ -+ -+/* Protocol options */ -+ -+/* Ethernet options */ -+#define NH_OPT_ETH_BROADCAST 1 -+#define NH_OPT_ETH_MULTICAST 2 -+#define NH_OPT_ETH_UNICAST 3 -+#define NH_OPT_ETH_BPDU 4 -+ -+#define NH_ETH_IS_MULTICAST_ADDR(addr) (addr[0] & 0x01) -+/* also applicable for broadcast */ -+ -+/* VLAN options */ -+#define NH_OPT_VLAN_CFI 1 -+ -+/* IPV4 options */ -+#define NH_OPT_IPV4_UNICAST 1 -+#define NH_OPT_IPV4_MULTICAST 2 -+#define NH_OPT_IPV4_BROADCAST 3 -+#define NH_OPT_IPV4_OPTION 4 -+#define NH_OPT_IPV4_FRAG 5 -+#define NH_OPT_IPV4_INITIAL_FRAG 6 -+ -+/* IPV6 options */ -+#define NH_OPT_IPV6_UNICAST 1 -+#define NH_OPT_IPV6_MULTICAST 2 -+#define NH_OPT_IPV6_OPTION 3 -+#define NH_OPT_IPV6_FRAG 4 -+#define NH_OPT_IPV6_INITIAL_FRAG 5 -+ -+/* General IP options (may be used for any version) */ -+#define NH_OPT_IP_FRAG 1 -+#define NH_OPT_IP_INITIAL_FRAG 2 -+#define NH_OPT_IP_OPTION 3 -+ -+/* Minenc. options */ -+#define NH_OPT_MINENCAP_SRC_ADDR_PRESENT 1 -+ -+/* GRE. options */ -+#define NH_OPT_GRE_ROUTING_PRESENT 1 -+ -+/* TCP options */ -+#define NH_OPT_TCP_OPTIONS 1 -+#define NH_OPT_TCP_CONTROL_HIGH_BITS 2 -+#define NH_OPT_TCP_CONTROL_LOW_BITS 3 -+ -+/* CAPWAP options */ -+#define NH_OPT_CAPWAP_DTLS 1 -+ -+enum net_prot { -+ NET_PROT_NONE = 0, -+ NET_PROT_PAYLOAD, -+ NET_PROT_ETH, -+ NET_PROT_VLAN, -+ NET_PROT_IPV4, -+ NET_PROT_IPV6, -+ NET_PROT_IP, -+ NET_PROT_TCP, -+ NET_PROT_UDP, -+ NET_PROT_UDP_LITE, -+ NET_PROT_IPHC, -+ NET_PROT_SCTP, -+ NET_PROT_SCTP_CHUNK_DATA, -+ NET_PROT_PPPOE, -+ NET_PROT_PPP, -+ NET_PROT_PPPMUX, -+ NET_PROT_PPPMUX_SUBFRM, -+ NET_PROT_L2TPV2, -+ NET_PROT_L2TPV3_CTRL, -+ NET_PROT_L2TPV3_SESS, -+ NET_PROT_LLC, -+ NET_PROT_LLC_SNAP, -+ NET_PROT_NLPID, -+ NET_PROT_SNAP, -+ NET_PROT_MPLS, -+ NET_PROT_IPSEC_AH, -+ NET_PROT_IPSEC_ESP, -+ NET_PROT_UDP_ENC_ESP, /* RFC 3948 */ -+ NET_PROT_MACSEC, -+ NET_PROT_GRE, -+ NET_PROT_MINENCAP, -+ NET_PROT_DCCP, -+ NET_PROT_ICMP, -+ NET_PROT_IGMP, -+ NET_PROT_ARP, -+ NET_PROT_CAPWAP_DATA, -+ NET_PROT_CAPWAP_CTRL, -+ NET_PROT_RFC2684, -+ NET_PROT_ICMPV6, -+ NET_PROT_FCOE, -+ NET_PROT_FIP, -+ NET_PROT_ISCSI, -+ NET_PROT_GTP, -+ NET_PROT_USER_DEFINED_L2, -+ NET_PROT_USER_DEFINED_L3, -+ NET_PROT_USER_DEFINED_L4, -+ NET_PROT_USER_DEFINED_L5, -+ NET_PROT_USER_DEFINED_SHIM1, -+ NET_PROT_USER_DEFINED_SHIM2, -+ -+ NET_PROT_DUMMY_LAST -+}; -+ -+/*! IEEE8021.Q */ -+#define NH_IEEE8021Q_ETYPE 0x8100 -+#define NH_IEEE8021Q_HDR(etype, pcp, dei, vlan_id) \ -+ ((((uint32_t)(etype & 0xFFFF)) << 16) | \ -+ (((uint32_t)(pcp & 0x07)) << 13) | \ -+ (((uint32_t)(dei & 0x01)) << 12) | \ -+ (((uint32_t)(vlan_id & 0xFFF)))) -+ -+#endif /* __FSL_NET_H */ -diff --git a/scripts/Makefile.dtbinst b/scripts/Makefile.dtbinst -new file mode 100644 -index 0000000..909ed7a ---- /dev/null -+++ b/scripts/Makefile.dtbinst -@@ -0,0 +1,51 @@ -+# ========================================================================== -+# Installing dtb files -+# -+# Installs all dtb files listed in $(dtb-y) either in the -+# INSTALL_DTBS_PATH directory or the default location: -+# -+# $INSTALL_PATH/dtbs/$KERNELRELEASE -+# -+# Traverse through subdirectories listed in $(dts-dirs). -+# ========================================================================== -+ -+src := $(obj) -+ -+PHONY := __dtbs_install -+__dtbs_install: -+ -+export dtbinst-root ?= $(obj) -+ -+include include/config/auto.conf -+include scripts/Kbuild.include -+include $(srctree)/$(obj)/Makefile -+ -+PHONY += __dtbs_install_prep -+__dtbs_install_prep: -+ifeq ("$(dtbinst-root)", "$(obj)") -+ $(Q)if [ -d $(INSTALL_DTBS_PATH).old ]; then rm -rf $(INSTALL_DTBS_PATH).old; fi -+ $(Q)if [ -d $(INSTALL_DTBS_PATH) ]; then mv $(INSTALL_DTBS_PATH) $(INSTALL_DTBS_PATH).old; fi -+ $(Q)mkdir -p $(INSTALL_DTBS_PATH) -+endif -+ -+dtbinst-files := $(dtb-y) -+dtbinst-dirs := $(dts-dirs) -+ -+# Helper targets for Installing DTBs into the boot directory -+quiet_cmd_dtb_install = INSTALL $< -+ cmd_dtb_install = mkdir -p $(2); cp $< $(2) -+ -+install-dir = $(patsubst $(dtbinst-root)%,$(INSTALL_DTBS_PATH)%,$(obj)) -+ -+$(dtbinst-files) $(dtbinst-dirs): | __dtbs_install_prep -+ -+$(dtbinst-files): %.dtb: $(obj)/%.dtb -+ $(call cmd,dtb_install,$(install-dir)) -+ -+$(dtbinst-dirs): -+ $(Q)$(MAKE) $(dtbinst)=$(obj)/$@ -+ -+PHONY += $(dtbinst-files) $(dtbinst-dirs) -+__dtbs_install: $(dtbinst-files) $(dtbinst-dirs) -+ -+.PHONY: $(PHONY) --- -2.1.0.27.g96db324 - diff --git a/packages/base/any/kernels/3.18.25/patches/add-kernel-patches-for-nxp-arm64-ls2080ardb-based-on.patch b/packages/base/any/kernels/3.18.25/patches/add-kernel-patches-for-nxp-arm64-ls2080ardb-based-on.patch deleted file mode 100644 index 7942b14d..00000000 --- a/packages/base/any/kernels/3.18.25/patches/add-kernel-patches-for-nxp-arm64-ls2080ardb-based-on.patch +++ /dev/null @@ -1,17982 +0,0 @@ -From f64b882ce6cd659cc725a4097c39e5d97441127f Mon Sep 17 00:00:00 2001 -From: Shengzhou Liu -Date: Mon, 1 Aug 2016 12:57:39 +0800 -Subject: [PATCH] Add kernel patches for nxp arm64 ls2080ardb based on 3.18.25 - -This patch integrated a ton of patches to support misc functionalities -(e.g. USB, PCIe, IOMMU, GIC, reboot, etc) on arm64 LS2080ARDB platform. ---- - Documentation/IRQ-domain.txt | 71 + - Documentation/devicetree/bindings/arm/fsl.txt | 15 + - .../devicetree/bindings/pci/designware-pcie.txt | 3 +- - .../devicetree/bindings/powerpc/fsl/board.txt | 14 +- - Documentation/devicetree/bindings/usb/dwc3.txt | 3 +- - MAINTAINERS | 19 + - arch/arm/Kconfig | 3 + - arch/arm/Makefile | 8 +- - arch/arm/boot/dts/Makefile | 12 +- - arch/arm/include/asm/dma-mapping.h | 10 +- - arch/arm/include/asm/mach/pci.h | 12 +- - arch/arm/include/asm/pci.h | 7 - - arch/arm/kernel/bios32.c | 39 +- - arch/arm/mach-iop13xx/msi.c | 10 +- - arch/arm64/Kconfig | 7 +- - arch/arm64/Makefile | 11 +- - arch/arm64/boot/dts/Makefile | 1 + - arch/arm64/boot/dts/arm64-nxp-ls2080ardb-r0.dts | 249 +++ - arch/arm64/boot/dts/fsl-ls2080a.dtsi | 729 +++++++++ - arch/arm64/boot/dts/include/dt-bindings | 1 + - arch/arm64/configs/defconfig | 1 + - arch/arm64/include/asm/mmu_context.h | 43 + - arch/arm64/include/asm/page.h | 6 +- - arch/arm64/include/asm/pgtable-hwdef.h | 7 +- - arch/arm64/kernel/head.S | 37 + - arch/arm64/kernel/smp.c | 1 + - arch/arm64/mm/mmu.c | 7 +- - arch/arm64/mm/proc-macros.S | 10 + - arch/arm64/mm/proc.S | 3 + - arch/ia64/kernel/msi_ia64.c | 8 +- - arch/ia64/sn/kernel/msi_sn.c | 8 +- - arch/mips/pci/msi-octeon.c | 2 +- - arch/mips/pci/msi-xlp.c | 12 +- - arch/mips/pci/pci-xlr.c | 2 +- - arch/powerpc/platforms/512x/mpc5121_ads_cpld.c | 3 +- - arch/powerpc/platforms/cell/axon_msi.c | 8 +- - arch/powerpc/platforms/cell/interrupt.c | 3 +- - arch/powerpc/platforms/embedded6xx/flipper-pic.c | 3 +- - arch/powerpc/platforms/powermac/pic.c | 3 +- - arch/powerpc/platforms/powernv/pci.c | 2 +- - arch/powerpc/platforms/ps3/interrupt.c | 3 +- - arch/powerpc/platforms/pseries/msi.c | 2 +- - arch/powerpc/sysdev/ehv_pic.c | 3 +- - arch/powerpc/sysdev/fsl_msi.c | 6 +- - arch/powerpc/sysdev/i8259.c | 3 +- - arch/powerpc/sysdev/ipic.c | 3 +- - arch/powerpc/sysdev/mpic.c | 3 +- - arch/powerpc/sysdev/mpic_pasemi_msi.c | 6 +- - arch/powerpc/sysdev/mpic_u3msi.c | 6 +- - arch/powerpc/sysdev/ppc4xx_hsta_msi.c | 2 +- - arch/powerpc/sysdev/ppc4xx_msi.c | 2 +- - arch/powerpc/sysdev/qe_lib/qe_ic.c | 3 +- - arch/powerpc/sysdev/xics/ics-opal.c | 2 +- - arch/powerpc/sysdev/xics/ics-rtas.c | 2 +- - arch/powerpc/sysdev/xics/xics-common.c | 3 +- - arch/s390/pci/pci.c | 10 +- - arch/sparc/kernel/pci_msi.c | 10 +- - arch/tile/kernel/pci_gx.c | 8 +- - arch/x86/include/asm/x86_init.h | 3 - - arch/x86/kernel/apic/io_apic.c | 8 +- - arch/x86/kernel/x86_init.c | 10 - - arch/x86/pci/bus_numa.c | 4 +- - arch/x86/pci/xen.c | 19 +- - drivers/acpi/acpi_lpss.c | 8 +- - drivers/acpi/acpi_platform.c | 4 +- - drivers/acpi/resource.c | 17 +- - drivers/base/core.c | 3 + - drivers/base/platform.c | 1 + - drivers/dma/acpi-dma.c | 10 +- - drivers/iommu/Kconfig | 34 +- - drivers/iommu/Makefile | 2 + - drivers/iommu/amd_iommu.c | 6 +- - drivers/iommu/arm-smmu.c | 1382 ++++++++--------- - drivers/iommu/exynos-iommu.c | 2 +- - drivers/iommu/fsl_pamu.c | 1 - - drivers/iommu/intel-iommu.c | 1 + - drivers/iommu/io-pgtable-arm.c | 986 ++++++++++++ - drivers/iommu/io-pgtable.c | 82 + - drivers/iommu/io-pgtable.h | 143 ++ - drivers/iommu/iommu.c | 111 +- - drivers/iommu/ipmmu-vmsa.c | 2 +- - drivers/iommu/irq_remapping.c | 8 - - drivers/iommu/msm_iommu.c | 1 + - drivers/iommu/of_iommu.c | 95 ++ - drivers/iommu/omap-iommu.c | 1 + - drivers/iommu/shmobile-iommu.c | 1 + - drivers/iommu/shmobile-ipmmu.c | 1 - - drivers/iommu/tegra-gart.c | 1 - - drivers/iommu/tegra-smmu.c | 2 +- - drivers/irqchip/Kconfig | 4 + - drivers/irqchip/Makefile | 1 + - drivers/irqchip/irq-armada-370-xp.c | 16 +- - drivers/irqchip/irq-atmel-aic.c | 40 +- - drivers/irqchip/irq-atmel-aic5.c | 65 +- - drivers/irqchip/irq-gic-v3-its.c | 1628 ++++++++++++++++++++ - drivers/irqchip/irq-gic-v3.c | 114 +- - drivers/irqchip/irq-sunxi-nmi.c | 4 +- - drivers/irqchip/irq-tb10x.c | 4 +- - drivers/of/device.c | 84 + - drivers/of/irq.c | 21 + - drivers/of/of_pci.c | 34 +- - drivers/of/platform.c | 139 +- - drivers/pci/Kconfig | 6 + - drivers/pci/bus.c | 18 +- - drivers/pci/host-bridge.c | 22 +- - drivers/pci/host/Kconfig | 17 + - drivers/pci/host/Makefile | 3 + - drivers/pci/host/pci-dra7xx.c | 8 +- - drivers/pci/host/pci-exynos.c | 5 +- - drivers/pci/host/pci-host-generic.c | 229 +-- - drivers/pci/host/pci-keystone-dw.c | 37 +- - drivers/pci/host/pci-keystone.h | 4 +- - drivers/pci/host/pci-layerscape.c | 669 ++++++++ - drivers/pci/host/pci-layerscape.h | 13 + - drivers/pci/host/pci-mvebu.c | 17 +- - drivers/pci/host/pci-tegra.c | 22 +- - drivers/pci/host/pci-xgene-msi.c | 595 +++++++ - drivers/pci/host/pci-xgene.c | 25 +- - drivers/pci/host/pcie-designware.c | 657 +++----- - drivers/pci/host/pcie-designware.h | 23 +- - drivers/pci/host/pcie-rcar.c | 22 +- - drivers/pci/host/pcie-xilinx.c | 64 +- - drivers/pci/msi.c | 528 +++++-- - drivers/pci/pci.h | 21 + - drivers/pci/probe.c | 28 +- - drivers/pci/quirks.c | 10 +- - drivers/pci/search.c | 5 +- - drivers/pci/xen-pcifront.c | 2 +- - drivers/power/reset/Kconfig | 6 + - drivers/power/reset/Makefile | 1 + - drivers/power/reset/ls-reboot.c | 93 ++ - drivers/usb/core/config.c | 3 +- - drivers/usb/core/driver.c | 6 +- - drivers/usb/core/hcd-pci.c | 9 + - drivers/usb/core/hub.c | 66 +- - drivers/usb/core/quirks.c | 6 + - drivers/usb/dwc3/core.c | 76 +- - drivers/usb/dwc3/core.h | 8 + - drivers/usb/dwc3/host.c | 6 + - drivers/usb/host/xhci-pci.c | 114 +- - drivers/usb/host/xhci-ring.c | 6 +- - drivers/usb/host/xhci.c | 28 +- - drivers/usb/host/xhci.h | 3 + - drivers/vfio/pci/vfio_pci_intrs.c | 2 +- - include/asm-generic/msi.h | 32 + - include/asm-generic/vmlinux.lds.h | 2 + - include/linux/acpi.h | 6 +- - include/linux/device.h | 24 + - include/linux/dma-mapping.h | 13 +- - include/linux/fsl/guts.h | 192 +++ - include/linux/iommu.h | 75 +- - include/linux/iopoll.h | 144 ++ - include/linux/irq.h | 67 +- - include/linux/irqchip/arm-gic-v3.h | 153 ++ - include/linux/irqdomain.h | 126 +- - include/linux/irqhandler.h | 14 + - include/linux/msi.h | 199 ++- - include/linux/of_device.h | 3 + - include/linux/of_iommu.h | 25 + - include/linux/of_irq.h | 1 + - include/linux/of_pci.h | 15 +- - include/linux/of_platform.h | 6 + - include/linux/pci.h | 21 +- - include/linux/resource_ext.h | 77 + - include/linux/usb/quirks.h | 3 + - include/trace/events/iommu.h | 31 +- - kernel/irq/Kconfig | 15 + - kernel/irq/Makefile | 1 + - kernel/irq/chip.c | 105 ++ - kernel/irq/generic-chip.c | 36 +- - kernel/irq/irqdomain.c | 585 ++++++- - kernel/irq/manage.c | 2 + - kernel/irq/msi.c | 347 +++++ - kernel/resource.c | 25 + - scripts/Kbuild.include | 6 + - scripts/Makefile.lib | 12 - - 176 files changed, 10196 insertions(+), 2223 deletions(-) - create mode 100644 arch/arm64/boot/dts/arm64-nxp-ls2080ardb-r0.dts - create mode 100644 arch/arm64/boot/dts/fsl-ls2080a.dtsi - create mode 120000 arch/arm64/boot/dts/include/dt-bindings - create mode 100644 drivers/iommu/io-pgtable-arm.c - create mode 100644 drivers/iommu/io-pgtable.c - create mode 100644 drivers/iommu/io-pgtable.h - create mode 100644 drivers/irqchip/irq-gic-v3-its.c - create mode 100644 drivers/pci/host/pci-layerscape.c - create mode 100644 drivers/pci/host/pci-layerscape.h - create mode 100644 drivers/pci/host/pci-xgene-msi.c - create mode 100644 drivers/power/reset/ls-reboot.c - create mode 100644 include/asm-generic/msi.h - create mode 100644 include/linux/fsl/guts.h - create mode 100644 include/linux/iopoll.h - create mode 100644 include/linux/irqhandler.h - create mode 100644 include/linux/resource_ext.h - create mode 100644 kernel/irq/msi.c - -diff --git a/Documentation/IRQ-domain.txt b/Documentation/IRQ-domain.txt -index 8a8b82c..39cfa72 100644 ---- a/Documentation/IRQ-domain.txt -+++ b/Documentation/IRQ-domain.txt -@@ -151,3 +151,74 @@ used and no descriptor gets allocated it is very important to make sure - that the driver using the simple domain call irq_create_mapping() - before any irq_find_mapping() since the latter will actually work - for the static IRQ assignment case. -+ -+==== Hierarchy IRQ domain ==== -+On some architectures, there may be multiple interrupt controllers -+involved in delivering an interrupt from the device to the target CPU. -+Let's look at a typical interrupt delivering path on x86 platforms: -+ -+Device --> IOAPIC -> Interrupt remapping Controller -> Local APIC -> CPU -+ -+There are three interrupt controllers involved: -+1) IOAPIC controller -+2) Interrupt remapping controller -+3) Local APIC controller -+ -+To support such a hardware topology and make software architecture match -+hardware architecture, an irq_domain data structure is built for each -+interrupt controller and those irq_domains are organized into hierarchy. -+When building irq_domain hierarchy, the irq_domain near to the device is -+child and the irq_domain near to CPU is parent. So a hierarchy structure -+as below will be built for the example above. -+ CPU Vector irq_domain (root irq_domain to manage CPU vectors) -+ ^ -+ | -+ Interrupt Remapping irq_domain (manage irq_remapping entries) -+ ^ -+ | -+ IOAPIC irq_domain (manage IOAPIC delivery entries/pins) -+ -+There are four major interfaces to use hierarchy irq_domain: -+1) irq_domain_alloc_irqs(): allocate IRQ descriptors and interrupt -+ controller related resources to deliver these interrupts. -+2) irq_domain_free_irqs(): free IRQ descriptors and interrupt controller -+ related resources associated with these interrupts. -+3) irq_domain_activate_irq(): activate interrupt controller hardware to -+ deliver the interrupt. -+3) irq_domain_deactivate_irq(): deactivate interrupt controller hardware -+ to stop delivering the interrupt. -+ -+Following changes are needed to support hierarchy irq_domain. -+1) a new field 'parent' is added to struct irq_domain; it's used to -+ maintain irq_domain hierarchy information. -+2) a new field 'parent_data' is added to struct irq_data; it's used to -+ build hierarchy irq_data to match hierarchy irq_domains. The irq_data -+ is used to store irq_domain pointer and hardware irq number. -+3) new callbacks are added to struct irq_domain_ops to support hierarchy -+ irq_domain operations. -+ -+With support of hierarchy irq_domain and hierarchy irq_data ready, an -+irq_domain structure is built for each interrupt controller, and an -+irq_data structure is allocated for each irq_domain associated with an -+IRQ. Now we could go one step further to support stacked(hierarchy) -+irq_chip. That is, an irq_chip is associated with each irq_data along -+the hierarchy. A child irq_chip may implement a required action by -+itself or by cooperating with its parent irq_chip. -+ -+With stacked irq_chip, interrupt controller driver only needs to deal -+with the hardware managed by itself and may ask for services from its -+parent irq_chip when needed. So we could achieve a much cleaner -+software architecture. -+ -+For an interrupt controller driver to support hierarchy irq_domain, it -+needs to: -+1) Implement irq_domain_ops.alloc and irq_domain_ops.free -+2) Optionally implement irq_domain_ops.activate and -+ irq_domain_ops.deactivate. -+3) Optionally implement an irq_chip to manage the interrupt controller -+ hardware. -+4) No need to implement irq_domain_ops.map and irq_domain_ops.unmap, -+ they are unused with hierarchy irq_domain. -+ -+Hierarchy irq_domain may also be used to support other architectures, -+such as ARM, ARM64 etc. -diff --git a/Documentation/devicetree/bindings/arm/fsl.txt b/Documentation/devicetree/bindings/arm/fsl.txt -index e935d7d..5c9f338 100644 ---- a/Documentation/devicetree/bindings/arm/fsl.txt -+++ b/Documentation/devicetree/bindings/arm/fsl.txt -@@ -74,3 +74,18 @@ Required root node properties: - i.MX6q generic board - Required root node properties: - - compatible = "fsl,imx6q"; -+ -++Freescale ARMv8 based Layerscape SoC family Device Tree Bindings -++---------------------------------------------------------------- -+ -+LS2080A ARMv8 based Simulator model -+Required root node properties: -+ - compatible = "fsl,ls2080a-simu", "fsl,ls2080a"; -+ -+LS2080A ARMv8 based QDS Board -+Required root node properties: -+ - compatible = "fsl,ls2080a-qds", "fsl,ls2080a"; -+ -+LS2080A ARMv8 based RDB Board -+Required root node properties: -+ - compatible = "fsl,ls2080a-rdb", "fsl,ls2080a"; -diff --git a/Documentation/devicetree/bindings/pci/designware-pcie.txt b/Documentation/devicetree/bindings/pci/designware-pcie.txt -index 9f4faa8..0036ab3 100644 ---- a/Documentation/devicetree/bindings/pci/designware-pcie.txt -+++ b/Documentation/devicetree/bindings/pci/designware-pcie.txt -@@ -14,7 +14,6 @@ Required properties: - - interrupt-map-mask and interrupt-map: standard PCI properties - to define the mapping of the PCIe interface to interrupt - numbers. --- num-lanes: number of lanes to use - - clocks: Must contain an entry for each entry in clock-names. - See ../clocks/clock-bindings.txt for details. - - clock-names: Must include the following entries: -@@ -22,6 +21,8 @@ Required properties: - - "pcie_bus" - - Optional properties: -+- num-lanes: number of lanes to use (this property should be specified unless -+ the link is brought already up in BIOS) - - reset-gpio: gpio pin number of power good signal - - bus-range: PCI bus numbers covered (it is recommended for new devicetrees to - specify this property, to keep backwards compatibility a range of 0x00-0xff -diff --git a/Documentation/devicetree/bindings/powerpc/fsl/board.txt b/Documentation/devicetree/bindings/powerpc/fsl/board.txt -index cff38bd..89c90f4 100644 ---- a/Documentation/devicetree/bindings/powerpc/fsl/board.txt -+++ b/Documentation/devicetree/bindings/powerpc/fsl/board.txt -@@ -21,11 +21,14 @@ Example: - - This is the memory-mapped registers for on board FPGA. - --Required properities: -+Required properties: - - compatible: should be a board-specific string followed by a string - indicating the type of FPGA. Example: -- "fsl,-fpga", "fsl,fpga-pixis" -+ "fsl,-fpga", "fsl,fpga-pixis" or -+ "fsl,-fpga", "fsl,fpga-qixis" - - reg: should contain the address and the length of the FPGA register set. -+ -+Optional properties: - - interrupt-parent: should specify phandle for the interrupt controller. - - interrupts: should specify event (wakeup) IRQ. - -@@ -38,6 +41,13 @@ Example (P1022DS): - interrupts = <8 8 0 0>; - }; - -+Example (LS2080A-RDB): -+ -+ cpld@3,0 { -+ compatible = "fsl,ls2080ardb-fpga", "fsl,fpga-qixis"; -+ reg = <0x3 0 0x10000>; -+ }; -+ - * Freescale BCSR GPIO banks - - Some BCSR registers act as simple GPIO controllers, each such -diff --git a/Documentation/devicetree/bindings/usb/dwc3.txt b/Documentation/devicetree/bindings/usb/dwc3.txt -index 471366d..1f9900c 100644 ---- a/Documentation/devicetree/bindings/usb/dwc3.txt -+++ b/Documentation/devicetree/bindings/usb/dwc3.txt -@@ -1,6 +1,7 @@ - synopsys DWC3 CORE - --DWC3- USB3 CONTROLLER -+DWC3- USB3 CONTROLLER. Complies to the generic USB binding properties -+ as described in 'usb/generic.txt' - - Required properties: - - compatible: must be "snps,dwc3" -diff --git a/MAINTAINERS b/MAINTAINERS -index c721042..1ae7362 100644 ---- a/MAINTAINERS -+++ b/MAINTAINERS -@@ -1562,6 +1562,7 @@ M: Will Deacon - L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) - S: Maintained - F: drivers/iommu/arm-smmu.c -+F: drivers/iommu/io-pgtable-arm.c - - ARM64 PORT (AARCH64 ARCHITECTURE) - M: Catalin Marinas -@@ -7047,6 +7048,16 @@ S: Maintained - F: Documentation/devicetree/bindings/pci/xgene-pci.txt - F: drivers/pci/host/pci-xgene.c - -+PCI DRIVER FOR FREESCALE LAYERSCAPE -+M: Minghuan Lian -+M: Mingkai Hu -+M: Roy Zang -+L: linuxppc-dev@lists.ozlabs.org -+L: linux-pci@vger.kernel.org -+L: linux-arm-kernel@lists.infradead.org -+S: Maintained -+F: drivers/pci/host/*layerscape* -+ - PCI DRIVER FOR IMX6 - M: Richard Zhu - M: Lucas Stach -@@ -7122,6 +7133,14 @@ L: linux-pci@vger.kernel.org - S: Maintained - F: drivers/pci/host/*spear* - -+PCI MSI DRIVER FOR APPLIEDMICRO XGENE -+M: Duc Dang -+L: linux-pci@vger.kernel.org -+L: linux-arm-kernel@lists.infradead.org -+S: Maintained -+F: Documentation/devicetree/bindings/pci/xgene-pci-msi.txt -+F: drivers/pci/host/pci-xgene-msi.c -+ - PCMCIA SUBSYSTEM - P: Linux PCMCIA Team - L: linux-pcmcia@lists.infradead.org -diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig -index 89c4b5c..29544f0 100644 ---- a/arch/arm/Kconfig -+++ b/arch/arm/Kconfig -@@ -1292,6 +1292,9 @@ config PCI_DOMAINS - bool - depends on PCI - -+config PCI_DOMAINS_GENERIC -+ def_bool PCI_DOMAINS -+ - config PCI_NANOENGINE - bool "BSE nanoEngine PCI support" - depends on SA1100_NANOENGINE -diff --git a/arch/arm/Makefile b/arch/arm/Makefile -index b5d7988..93a30a2 100644 ---- a/arch/arm/Makefile -+++ b/arch/arm/Makefile -@@ -320,8 +320,12 @@ $(INSTALL_TARGETS): - $(Q)$(MAKE) $(build)=$(boot)/dts MACHINE=$(MACHINE) $(boot)/dts/$@ - - PHONY += dtbs dtbs_install --dtbs dtbs_install: prepare scripts -- $(Q)$(MAKE) $(build)=$(boot)/dts MACHINE=$(MACHINE) $@ -+ -+dtbs: prepare scripts -+ $(Q)$(MAKE) $(build)=$(boot)/dts MACHINE=$(MACHINE) -+ -+dtbs_install: -+ $(Q)$(MAKE) $(dtbinst)=$(boot)/dts MACHINE=$(MACHINE) - - # We use MRPROPER_FILES and CLEAN_FILES now - archclean: -diff --git a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile -index 38c89ca..6e784fa 100644 ---- a/arch/arm/boot/dts/Makefile -+++ b/arch/arm/boot/dts/Makefile -@@ -517,15 +517,7 @@ dtb-$(CONFIG_MACH_DOVE) += dove-cm-a510.dtb \ - dove-dove-db.dtb - dtb-$(CONFIG_ARCH_MEDIATEK) += mt6589-aquaris5.dtb - --targets += dtbs dtbs_install --targets += $(dtb-y) - endif - --# *.dtb used to be generated in the directory above. Clean out the --# old build results so people don't accidentally use them. --dtbs: $(addprefix $(obj)/, $(dtb-y)) -- $(Q)rm -f $(obj)/../*.dtb -- --clean-files := *.dtb -- --dtbs_install: $(addsuffix _dtbinst_, $(dtb-y)) -+always := $(dtb-y) -+clean-files := *.dtb -diff --git a/arch/arm/include/asm/dma-mapping.h b/arch/arm/include/asm/dma-mapping.h -index 85738b2..f3c0d95 100644 ---- a/arch/arm/include/asm/dma-mapping.h -+++ b/arch/arm/include/asm/dma-mapping.h -@@ -121,12 +121,14 @@ static inline unsigned long dma_max_pfn(struct device *dev) - } - #define dma_max_pfn(dev) dma_max_pfn(dev) - --static inline int set_arch_dma_coherent_ops(struct device *dev) -+static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, -+ u64 size, struct iommu_ops *iommu, -+ bool coherent) - { -- set_dma_ops(dev, &arm_coherent_dma_ops); -- return 0; -+ if (coherent) -+ set_dma_ops(dev, &arm_coherent_dma_ops); - } --#define set_arch_dma_coherent_ops(dev) set_arch_dma_coherent_ops(dev) -+#define arch_setup_dma_ops arch_setup_dma_ops - - static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) - { -diff --git a/arch/arm/include/asm/mach/pci.h b/arch/arm/include/asm/mach/pci.h -index 7fc4278..c074e7a 100644 ---- a/arch/arm/include/asm/mach/pci.h -+++ b/arch/arm/include/asm/mach/pci.h -@@ -19,9 +19,7 @@ struct pci_bus; - struct device; - - struct hw_pci { --#ifdef CONFIG_PCI_DOMAINS -- int domain; --#endif -+ struct msi_controller *msi_ctrl; - struct pci_ops *ops; - int nr_controllers; - void **private_data; -@@ -36,16 +34,14 @@ struct hw_pci { - resource_size_t start, - resource_size_t size, - resource_size_t align); -- void (*add_bus)(struct pci_bus *bus); -- void (*remove_bus)(struct pci_bus *bus); - }; - - /* - * Per-controller structure - */ - struct pci_sys_data { --#ifdef CONFIG_PCI_DOMAINS -- int domain; -+#ifdef CONFIG_PCI_MSI -+ struct msi_controller *msi_ctrl; - #endif - struct list_head node; - int busnr; /* primary bus number */ -@@ -65,8 +61,6 @@ struct pci_sys_data { - resource_size_t start, - resource_size_t size, - resource_size_t align); -- void (*add_bus)(struct pci_bus *bus); -- void (*remove_bus)(struct pci_bus *bus); - void *private_data; /* platform controller private data */ - }; - -diff --git a/arch/arm/include/asm/pci.h b/arch/arm/include/asm/pci.h -index 7e95d85..585dc33 100644 ---- a/arch/arm/include/asm/pci.h -+++ b/arch/arm/include/asm/pci.h -@@ -18,13 +18,6 @@ static inline int pcibios_assign_all_busses(void) - } - - #ifdef CONFIG_PCI_DOMAINS --static inline int pci_domain_nr(struct pci_bus *bus) --{ -- struct pci_sys_data *root = bus->sysdata; -- -- return root->domain; --} -- - static inline int pci_proc_domain(struct pci_bus *bus) - { - return pci_domain_nr(bus); -diff --git a/arch/arm/kernel/bios32.c b/arch/arm/kernel/bios32.c -index 17a26c1..a5cd259 100644 ---- a/arch/arm/kernel/bios32.c -+++ b/arch/arm/kernel/bios32.c -@@ -18,6 +18,15 @@ - - static int debug_pci; - -+#ifdef CONFIG_PCI_MSI -+struct msi_controller *pcibios_msi_controller(struct pci_dev *dev) -+{ -+ struct pci_sys_data *sysdata = dev->bus->sysdata; -+ -+ return sysdata->msi_ctrl; -+} -+#endif -+ - /* - * We can't use pci_get_device() here since we are - * called from interrupt context. -@@ -360,20 +369,6 @@ void pcibios_fixup_bus(struct pci_bus *bus) - } - EXPORT_SYMBOL(pcibios_fixup_bus); - --void pcibios_add_bus(struct pci_bus *bus) --{ -- struct pci_sys_data *sys = bus->sysdata; -- if (sys->add_bus) -- sys->add_bus(bus); --} -- --void pcibios_remove_bus(struct pci_bus *bus) --{ -- struct pci_sys_data *sys = bus->sysdata; -- if (sys->remove_bus) -- sys->remove_bus(bus); --} -- - /* - * Swizzle the device pin each time we cross a bridge. If a platform does - * not provide a swizzle function, we perform the standard PCI swizzling. -@@ -427,17 +422,16 @@ static int pcibios_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) - static int pcibios_init_resources(int busnr, struct pci_sys_data *sys) - { - int ret; -- struct pci_host_bridge_window *window; -+ struct resource_entry *window; - - if (list_empty(&sys->resources)) { - pci_add_resource_offset(&sys->resources, - &iomem_resource, sys->mem_offset); - } - -- list_for_each_entry(window, &sys->resources, list) { -+ resource_list_for_each_entry(window, &sys->resources) - if (resource_type(window->res) == IORESOURCE_IO) - return 0; -- } - - sys->io_res.start = (busnr * SZ_64K) ? : pcibios_min_io; - sys->io_res.end = (busnr + 1) * SZ_64K - 1; -@@ -468,15 +462,13 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw, - if (!sys) - panic("PCI: unable to allocate sys data!"); - --#ifdef CONFIG_PCI_DOMAINS -- sys->domain = hw->domain; -+#ifdef CONFIG_PCI_MSI -+ sys->msi_ctrl = hw->msi_ctrl; - #endif - sys->busnr = busnr; - sys->swizzle = hw->swizzle; - sys->map_irq = hw->map_irq; - sys->align_resource = hw->align_resource; -- sys->add_bus = hw->add_bus; -- sys->remove_bus = hw->remove_bus; - INIT_LIST_HEAD(&sys->resources); - - if (hw->private_data) -@@ -494,8 +486,9 @@ static void pcibios_init_hw(struct device *parent, struct hw_pci *hw, - if (hw->scan) - sys->bus = hw->scan(nr, sys); - else -- sys->bus = pci_scan_root_bus(parent, sys->busnr, -- hw->ops, sys, &sys->resources); -+ sys->bus = pci_scan_root_bus_msi(parent, -+ sys->busnr, hw->ops, sys, -+ &sys->resources, hw->msi_ctrl); - - if (!sys->bus) - panic("PCI: unable to scan bus!"); -diff --git a/arch/arm/mach-iop13xx/msi.c b/arch/arm/mach-iop13xx/msi.c -index e7730cf..9f89e76 100644 ---- a/arch/arm/mach-iop13xx/msi.c -+++ b/arch/arm/mach-iop13xx/msi.c -@@ -126,10 +126,10 @@ static void iop13xx_msi_nop(struct irq_data *d) - static struct irq_chip iop13xx_msi_chip = { - .name = "PCI-MSI", - .irq_ack = iop13xx_msi_nop, -- .irq_enable = unmask_msi_irq, -- .irq_disable = mask_msi_irq, -- .irq_mask = mask_msi_irq, -- .irq_unmask = unmask_msi_irq, -+ .irq_enable = pci_msi_unmask_irq, -+ .irq_disable = pci_msi_mask_irq, -+ .irq_mask = pci_msi_mask_irq, -+ .irq_unmask = pci_msi_unmask_irq, - }; - - int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) -@@ -153,7 +153,7 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) - id = iop13xx_cpu_id(); - msg.data = (id << IOP13XX_MU_MIMR_CORE_SELECT) | (irq & 0x7f); - -- write_msi_msg(irq, &msg); -+ pci_write_msi_msg(irq, &msg); - irq_set_chip_and_handler(irq, &iop13xx_msi_chip, handle_simple_irq); - - return 0; -diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig -index 00b9c48..08e1287 100644 ---- a/arch/arm64/Kconfig -+++ b/arch/arm64/Kconfig -@@ -14,6 +14,7 @@ config ARM64 - select ARM_GIC - select AUDIT_ARCH_COMPAT_GENERIC - select ARM_GIC_V3 -+ select ARM_GIC_V3_ITS if PCI_MSI - select BUILDTIME_EXTABLE_SORT - select CLONE_BACKWARDS - select COMMON_CLK -@@ -166,6 +167,11 @@ config ARCH_XGENE - help - This enables support for AppliedMicro X-Gene SOC Family - -+config ARCH_LAYERSCAPE -+ bool "ARMv8 based Freescale Layerscape SoC family" -+ help -+ This enables support for the Freescale Layerscape SoC family. -+ - endmenu - - menu "Bus support" -@@ -366,7 +372,6 @@ config ARM64_VA_BITS_42 - - config ARM64_VA_BITS_48 - bool "48-bit" -- depends on !ARM_SMMU - - endchoice - -diff --git a/arch/arm64/Makefile b/arch/arm64/Makefile -index 2d54c55..7cf8a29 100644 ---- a/arch/arm64/Makefile -+++ b/arch/arm64/Makefile -@@ -74,8 +74,13 @@ zinstall install: vmlinux - %.dtb: scripts - $(Q)$(MAKE) $(build)=$(boot)/dts $(boot)/dts/$@ - --dtbs: scripts -- $(Q)$(MAKE) $(build)=$(boot)/dts dtbs -+PHONY += dtbs dtbs_install -+ -+dtbs: prepare scripts -+ $(Q)$(MAKE) $(build)=$(boot)/dts -+ -+dtbs_install: -+ $(Q)$(MAKE) $(dtbinst)=$(boot)/dts - - PHONY += vdso_install - vdso_install: -@@ -84,11 +89,13 @@ vdso_install: - # We use MRPROPER_FILES and CLEAN_FILES now - archclean: - $(Q)$(MAKE) $(clean)=$(boot) -+ $(Q)$(MAKE) $(clean)=$(boot)/dts - - define archhelp - echo '* Image.gz - Compressed kernel image (arch/$(ARCH)/boot/Image.gz)' - echo ' Image - Uncompressed kernel image (arch/$(ARCH)/boot/Image)' - echo '* dtbs - Build device tree blobs for enabled boards' -+ echo ' dtbs_install - Install dtbs to $(INSTALL_DTBS_PATH)' - echo ' install - Install uncompressed kernel' - echo ' zinstall - Install compressed kernel' - echo ' Install using (your) ~/bin/installkernel or' -diff --git a/arch/arm64/boot/dts/Makefile b/arch/arm64/boot/dts/Makefile -index f8001a6..93f2fc3 100644 ---- a/arch/arm64/boot/dts/Makefile -+++ b/arch/arm64/boot/dts/Makefile -@@ -1,6 +1,7 @@ - dtb-$(CONFIG_ARCH_THUNDER) += thunder-88xx.dtb - dtb-$(CONFIG_ARCH_VEXPRESS) += rtsm_ve-aemv8a.dtb foundation-v8.dtb - dtb-$(CONFIG_ARCH_XGENE) += apm-mustang.dtb -+dtb-$(CONFIG_ARCH_LAYERSCAPE) += arm64-nxp-ls2080ardb-r0.dtb - - targets += dtbs - targets += $(dtb-y) -diff --git a/arch/arm64/boot/dts/arm64-nxp-ls2080ardb-r0.dts b/arch/arm64/boot/dts/arm64-nxp-ls2080ardb-r0.dts -new file mode 100644 -index 0000000..5da2834 ---- /dev/null -+++ b/arch/arm64/boot/dts/arm64-nxp-ls2080ardb-r0.dts -@@ -0,0 +1,249 @@ -+/* -+ * Device Tree file for NXP LS2080a RDB board -+ * -+ */ -+ -+/dts-v1/; -+ -+#include "fsl-ls2080a.dtsi" -+ -+/ { -+ model = "arm64-nxp-ls2080ardb-r0"; -+ compatible = "fsl,ls2080a-rdb", "fsl,ls2080a"; -+}; -+ -+&esdhc { -+ status = "okay"; -+}; -+ -+&ifc { -+ status = "okay"; -+ #address-cells = <2>; -+ #size-cells = <1>; -+ ranges = <0x0 0x0 0x5 0x80000000 0x08000000 -+ 0x2 0x0 0x5 0x30000000 0x00010000 -+ 0x3 0x0 0x5 0x20000000 0x00010000>; -+ -+ nor@0,0 { -+ #address-cells = <1>; -+ #size-cells = <1>; -+ compatible = "cfi-flash"; -+ reg = <0x0 0x0 0x8000000>; -+ bank-width = <2>; -+ device-width = <1>; -+ -+ partition@0 { -+ /* SoC RCW, this location must not be altered */ -+ reg = <0x0 0x100000>; -+ label = "rcw (RO)"; -+ read-only; -+ }; -+ -+ partition@1 { -+ /* U-Boot image */ -+ reg = <0x100000 0x100000>; -+ label = "uboot"; -+ }; -+ -+ partition@2 { -+ /* U-Boot environment varialbes, 1MB */ -+ reg = <0x200000 0x100000>; -+ label = "uboot-env"; -+ env_size = <0x20000>; -+ }; -+ -+ partition@3 { -+ /* MC firmware, 4MB*/ -+ reg = <0x300000 0x400000>; -+ label = "mc_firmware"; -+ }; -+ -+ partition@4 { -+ /* MC DPL Blob, 1MB */ -+ reg = <0x700000 0x100000>; -+ label = "mc_dpl_blob"; -+ }; -+ -+ partition@5 { -+ /* MC DPC Blob, 1MB */ -+ reg = <0x800000 0x100000>; -+ label = "mc_dpc_blob"; -+ }; -+ -+ partition@6 { -+ /* AIOP FW, 4MB */ -+ reg = <0x900000 0x400000>; -+ label = "aiop_fw"; -+ }; -+ -+ partition@7 { -+ /* DebugServerFW, 2MB */ -+ reg = <0xd00000 0x200000>; -+ label = "DebugServer_fw"; -+ }; -+ }; -+ -+ nand@2,0 { -+ #address-cells = <1>; -+ #size-cells = <1>; -+ compatible = "fsl,ifc-nand"; -+ reg = <0x2 0x0 0x10000>; -+ }; -+ -+ cpld@3,0 { -+ reg = <0x3 0x0 0x10000>; -+ compatible = "fsl,ls2080a-rdb-qixis", "fsl,fpga-qixis"; -+ }; -+ -+}; -+ -+&i2c0 { -+ status = "okay"; -+ pca9547@75 { -+ compatible = "nxp,pca9547"; -+ reg = <0x75>; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ i2c-mux-never-disable; -+ i2c@1 { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x01>; -+ rtc@68 { -+ compatible = "dallas,ds3232"; -+ reg = <0x68>; -+ }; -+ }; -+ -+ i2c@3 { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x3>; -+ -+ adt7481@4c { -+ compatible = "adi,adt7461"; -+ reg = <0x4c>; -+ }; -+ }; -+ }; -+}; -+ -+&i2c1 { -+ status = "disabled"; -+}; -+ -+&i2c2 { -+ status = "disabled"; -+}; -+ -+&i2c3 { -+ status = "disabled"; -+}; -+ -+&dspi { -+ status = "okay"; -+ dflash0: n25q512a { -+ #address-cells = <1>; -+ #size-cells = <1>; -+ compatible = "st,m25p80"; -+ spi-max-frequency = <3000000>; -+ reg = <0>; -+ }; -+}; -+ -+&qspi { -+ status = "disabled"; -+}; -+ -+&sata0 { -+ status = "okay"; -+}; -+ -+&sata1 { -+ status = "okay"; -+}; -+ -+&usb0 { -+ status = "okay"; -+}; -+ -+&usb1 { -+ status = "okay"; -+}; -+ -+&emdio1 { -+ status = "disabled"; -+ /* CS4340 PHYs */ -+ mdio1_phy1: emdio1_phy@1 { -+ reg = <0x10>; -+ phy-connection-type = "xfi"; -+ }; -+ mdio1_phy2: emdio1_phy@2 { -+ reg = <0x11>; -+ phy-connection-type = "xfi"; -+ }; -+ mdio1_phy3: emdio1_phy@3 { -+ reg = <0x12>; -+ phy-connection-type = "xfi"; -+ }; -+ mdio1_phy4: emdio1_phy@4 { -+ reg = <0x13>; -+ phy-connection-type = "xfi"; -+ }; -+}; -+ -+&emdio2 { -+ /* AQR405 PHYs */ -+ mdio2_phy1: emdio2_phy@1 { -+ compatible = "ethernet-phy-ieee802.3-c45"; -+ interrupts = <0 1 0x4>; /* Level high type */ -+ reg = <0x0>; -+ phy-connection-type = "xfi"; -+ }; -+ mdio2_phy2: emdio2_phy@2 { -+ compatible = "ethernet-phy-ieee802.3-c45"; -+ interrupts = <0 2 0x4>; /* Level high type */ -+ reg = <0x1>; -+ phy-connection-type = "xfi"; -+ }; -+ mdio2_phy3: emdio2_phy@3 { -+ compatible = "ethernet-phy-ieee802.3-c45"; -+ interrupts = <0 4 0x4>; /* Level high type */ -+ reg = <0x2>; -+ phy-connection-type = "xfi"; -+ }; -+ mdio2_phy4: emdio2_phy@4 { -+ compatible = "ethernet-phy-ieee802.3-c45"; -+ interrupts = <0 5 0x4>; /* Level high type */ -+ reg = <0x3>; -+ phy-connection-type = "xfi"; -+ }; -+}; -+ -+/* Update DPMAC connections to external PHYs, under the assumption of -+ * SerDes 0x2a_0x41. This is currently the only SerDes supported on the board. -+ */ -+&dpmac1 { -+ phy-handle = <&mdio1_phy1>; -+}; -+&dpmac2 { -+ phy-handle = <&mdio1_phy2>; -+}; -+&dpmac3 { -+ phy-handle = <&mdio1_phy3>; -+}; -+&dpmac4 { -+ phy-handle = <&mdio1_phy4>; -+}; -+&dpmac5 { -+ phy-handle = <&mdio2_phy1>; -+}; -+&dpmac6 { -+ phy-handle = <&mdio2_phy2>; -+}; -+&dpmac7 { -+ phy-handle = <&mdio2_phy3>; -+}; -+&dpmac8 { -+ phy-handle = <&mdio2_phy4>; -+}; -diff --git a/arch/arm64/boot/dts/fsl-ls2080a.dtsi b/arch/arm64/boot/dts/fsl-ls2080a.dtsi -new file mode 100644 -index 0000000..5e53b04 ---- /dev/null -+++ b/arch/arm64/boot/dts/fsl-ls2080a.dtsi -@@ -0,0 +1,729 @@ -+/* -+ * Device Tree Include file for Freescale Layerscape-2080A family SoC. -+ * -+ * Copyright (C) 2014-2015, Freescale Semiconductor -+ * -+ * Bhupesh Sharma -+ * Harninder Rai -+ * -+ * This file is licensed under the terms of the GNU General Public -+ * License version 2. This program is licensed "as is" without any -+ * warranty of any kind, whether express or implied. -+ */ -+ -+#include -+ -+/memreserve/ 0x80000000 0x00010000; -+ -+/ { -+ compatible = "fsl,ls2080a"; -+ interrupt-parent = <&gic>; -+ #address-cells = <2>; -+ #size-cells = <2>; -+ -+ cpus { -+ #address-cells = <2>; -+ #size-cells = <0>; -+ -+ /* We have 4 clusters having 2 Cortex-A57 cores each */ -+ cpu0: cpu@0 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a57"; -+ reg = <0x0 0x0>; -+ clocks = <&clockgen 1 0>; -+ #cooling-cells = <2>; -+ }; -+ -+ cpu1: cpu@1 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a57"; -+ reg = <0x0 0x1>; -+ clocks = <&clockgen 1 0>; -+ }; -+ -+ cpu2: cpu@100 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a57"; -+ reg = <0x0 0x100>; -+ clocks = <&clockgen 1 1>; -+ #cooling-cells = <2>; -+ }; -+ -+ cpu3: cpu@101 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a57"; -+ reg = <0x0 0x101>; -+ clocks = <&clockgen 1 1>; -+ }; -+ -+ cpu4: cpu@200 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a57"; -+ reg = <0x0 0x200>; -+ clocks = <&clockgen 1 2>; -+ #cooling-cells = <2>; -+ }; -+ -+ cpu5: cpu@201 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a57"; -+ reg = <0x0 0x201>; -+ clocks = <&clockgen 1 2>; -+ }; -+ -+ cpu6: cpu@300 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a57"; -+ reg = <0x0 0x300>; -+ clocks = <&clockgen 1 3>; -+ #cooling-cells = <2>; -+ }; -+ -+ cpu7: cpu@301 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a57"; -+ reg = <0x0 0x301>; -+ clocks = <&clockgen 1 3>; -+ }; -+ }; -+ -+ pmu { -+ compatible = "arm,armv8-pmuv3"; -+ interrupts = <1 7 0x8>; /* PMU PPI, Level low type */ -+ }; -+ -+ gic: interrupt-controller@6000000 { -+ compatible = "arm,gic-v3"; -+ reg = <0x0 0x06000000 0 0x10000>, /* GIC Dist */ -+ <0x0 0x06100000 0 0x100000>, /* GICR (RD_base + SGI_base) */ -+ <0x0 0x0c0c0000 0 0x2000>, /* GICC */ -+ <0x0 0x0c0d0000 0 0x1000>, /* GICH */ -+ <0x0 0x0c0e0000 0 0x20000>; /* GICV */ -+ #interrupt-cells = <3>; -+ #address-cells = <2>; -+ #size-cells = <2>; -+ ranges; -+ interrupt-controller; -+ interrupts = <1 9 0x4>; -+ -+ its: gic-its@6020000 { -+ compatible = "arm,gic-v3-its"; -+ msi-controller; -+ reg = <0x0 0x6020000 0 0x20000>; -+ }; -+ }; -+ -+ sysclk: sysclk { -+ compatible = "fixed-clock"; -+ #clock-cells = <0>; -+ clock-frequency = <100000000>; -+ clock-output-names = "sysclk"; -+ }; -+ -+ clockgen: clocking@1300000 { -+ compatible = "fsl,ls2080a-clockgen"; -+ reg = <0 0x1300000 0 0xa0000>; -+ #clock-cells = <2>; -+ clocks = <&sysclk>; -+ }; -+ -+ tmu: tmu@1f80000 { -+ compatible = "fsl,qoriq-tmu", "fsl,ls2080a-tmu"; -+ reg = <0x0 0x1f80000 0x0 0x10000>; -+ interrupts = <0 23 0x4>; -+ fsl,tmu-range = <0xb0000 0x9002a 0x6004c 0x30062>; -+ fsl,tmu-calibration = <0x00000000 0x00000026 -+ 0x00000001 0x0000002d -+ 0x00000002 0x00000032 -+ 0x00000003 0x00000039 -+ 0x00000004 0x0000003f -+ 0x00000005 0x00000046 -+ 0x00000006 0x0000004d -+ 0x00000007 0x00000054 -+ 0x00000008 0x0000005a -+ 0x00000009 0x00000061 -+ 0x0000000a 0x0000006a -+ 0x0000000b 0x00000071 -+ -+ 0x00010000 0x00000025 -+ 0x00010001 0x0000002c -+ 0x00010002 0x00000035 -+ 0x00010003 0x0000003d -+ 0x00010004 0x00000045 -+ 0x00010005 0x0000004e -+ 0x00010006 0x00000057 -+ 0x00010007 0x00000061 -+ 0x00010008 0x0000006b -+ 0x00010009 0x00000076 -+ -+ 0x00020000 0x00000029 -+ 0x00020001 0x00000033 -+ 0x00020002 0x0000003d -+ 0x00020003 0x00000049 -+ 0x00020004 0x00000056 -+ 0x00020005 0x00000061 -+ 0x00020006 0x0000006d -+ -+ 0x00030000 0x00000021 -+ 0x00030001 0x0000002a -+ 0x00030002 0x0000003c -+ 0x00030003 0x0000004e>; -+ little-endian; -+ #thermal-sensor-cells = <1>; -+ }; -+ -+ thermal-zones { -+ cpu_thermal: cpu-thermal { -+ polling-delay-passive = <1000>; -+ polling-delay = <5000>; -+ -+ thermal-sensors = <&tmu 4>; -+ -+ trips { -+ cpu_alert: cpu-alert { -+ temperature = <75000>; -+ hysteresis = <2000>; -+ type = "passive"; -+ }; -+ cpu_crit: cpu-crit { -+ temperature = <85000>; -+ hysteresis = <2000>; -+ type = "critical"; -+ }; -+ }; -+ -+ cooling-maps { -+ map0 { -+ trip = <&cpu_alert>; -+ cooling-device = -+ <&cpu0 THERMAL_NO_LIMIT -+ THERMAL_NO_LIMIT>; -+ }; -+ map1 { -+ trip = <&cpu_alert>; -+ cooling-device = -+ <&cpu2 THERMAL_NO_LIMIT -+ THERMAL_NO_LIMIT>; -+ }; -+ map2 { -+ trip = <&cpu_alert>; -+ cooling-device = -+ <&cpu4 THERMAL_NO_LIMIT -+ THERMAL_NO_LIMIT>; -+ }; -+ map3 { -+ trip = <&cpu_alert>; -+ cooling-device = -+ <&cpu6 THERMAL_NO_LIMIT -+ THERMAL_NO_LIMIT>; -+ }; -+ }; -+ }; -+ }; -+ -+ serial0: serial@21c0500 { -+ device_type = "serial"; -+ compatible = "fsl,ns16550", "ns16550a"; -+ reg = <0x0 0x21c0500 0x0 0x100>; -+ clocks = <&clockgen 4 3>; -+ interrupts = <0 32 0x4>; /* Level high type */ -+ }; -+ -+ serial1: serial@21c0600 { -+ device_type = "serial"; -+ compatible = "fsl,ns16550", "ns16550a"; -+ reg = <0x0 0x21c0600 0x0 0x100>; -+ clocks = <&clockgen 4 3>; -+ interrupts = <0 32 0x4>; /* Level high type */ -+ }; -+ -+ gpio0: gpio@2300000 { -+ compatible = "fsl,qoriq-gpio"; -+ reg = <0x0 0x2300000 0x0 0x10000>; -+ interrupts = <0 36 0x4>; /* Level high type */ -+ gpio-controller; -+ little-endian; -+ #gpio-cells = <2>; -+ interrupt-controller; -+ #interrupt-cells = <2>; -+ }; -+ -+ gpio1: gpio@2310000 { -+ compatible = "fsl,qoriq-gpio"; -+ reg = <0x0 0x2310000 0x0 0x10000>; -+ interrupts = <0 36 0x4>; /* Level high type */ -+ gpio-controller; -+ little-endian; -+ #gpio-cells = <2>; -+ interrupt-controller; -+ #interrupt-cells = <2>; -+ }; -+ -+ gpio2: gpio@2320000 { -+ compatible = "fsl,qoriq-gpio"; -+ reg = <0x0 0x2320000 0x0 0x10000>; -+ interrupts = <0 37 0x4>; /* Level high type */ -+ gpio-controller; -+ little-endian; -+ #gpio-cells = <2>; -+ interrupt-controller; -+ #interrupt-cells = <2>; -+ }; -+ -+ gpio3: gpio@2330000 { -+ compatible = "fsl,qoriq-gpio"; -+ reg = <0x0 0x2330000 0x0 0x10000>; -+ interrupts = <0 37 0x4>; /* Level high type */ -+ gpio-controller; -+ little-endian; -+ #gpio-cells = <2>; -+ interrupt-controller; -+ #interrupt-cells = <2>; -+ }; -+ -+ /* TODO: WRIOP (CCSR?) */ -+ emdio1: mdio@0x8B96000 { /* WRIOP0: 0x8B8_0000, E-MDIO1: 0x1_6000 */ -+ compatible = "fsl,fman-memac-mdio"; -+ reg = <0x0 0x8B96000 0x0 0x1000>; -+ device_type = "mdio"; /* TODO: is this necessary? */ -+ little-endian; /* force the driver in LE mode */ -+ -+ /* Not necessary on the QDS, but needed on the RDB */ -+ #address-cells = <1>; -+ #size-cells = <0>; -+ }; -+ -+ emdio2: mdio@0x8B97000 { /* WRIOP0: 0x8B8_0000, E-MDIO2: 0x1_7000 */ -+ compatible = "fsl,fman-memac-mdio"; -+ reg = <0x0 0x8B97000 0x0 0x1000>; -+ device_type = "mdio"; /* TODO: is this necessary? */ -+ little-endian; /* force the driver in LE mode */ -+ -+ #address-cells = <1>; -+ #size-cells = <0>; -+ }; -+ -+ ifc: ifc@2240000 { -+ compatible = "fsl,ifc", "simple-bus"; -+ reg = <0x0 0x2240000 0x0 0x20000>; -+ interrupts = <0 21 0x4>; /* Level high type */ -+ little-endian; -+ #address-cells = <2>; -+ #size-cells = <1>; -+ -+ ranges = <0 0 0x5 0x80000000 0x08000000 -+ 2 0 0x5 0x30000000 0x00010000 -+ 3 0 0x5 0x20000000 0x00010000>; -+ }; -+ -+ esdhc: esdhc@2140000 { -+ compatible = "fsl,ls2080a-esdhc", "fsl,esdhc"; -+ reg = <0x0 0x2140000 0x0 0x10000>; -+ interrupts = <0 28 0x4>; /* Level high type */ -+ clock-frequency = <0>; -+ voltage-ranges = <1800 1800 3300 3300>; -+ sdhci,auto-cmd12; -+ little-endian; -+ bus-width = <4>; -+ }; -+ -+ ftm0: ftm0@2800000 { -+ compatible = "fsl,ftm-alarm"; -+ reg = <0x0 0x2800000 0x0 0x10000>; -+ interrupts = <0 44 4>; -+ }; -+ -+ reset: reset@1E60000 { -+ compatible = "fsl,ls-reset"; -+ reg = <0x0 0x1E60000 0x0 0x10000>; -+ }; -+ -+ dspi: dspi@2100000 { -+ compatible = "fsl,ls2085a-dspi", "fsl,ls2080a-dspi"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x2100000 0x0 0x10000>; -+ interrupts = <0 26 0x4>; /* Level high type */ -+ clocks = <&clockgen 4 3>; -+ clock-names = "dspi"; -+ spi-num-chipselects = <5>; -+ bus-num = <0>; -+ }; -+ -+ i2c0: i2c@2000000 { -+ compatible = "fsl,vf610-i2c"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x2000000 0x0 0x10000>; -+ interrupts = <0 34 0x4>; /* Level high type */ -+ clock-names = "i2c"; -+ clocks = <&clockgen 4 3>; -+ }; -+ -+ i2c1: i2c@2010000 { -+ compatible = "fsl,vf610-i2c"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x2010000 0x0 0x10000>; -+ interrupts = <0 34 0x4>; /* Level high type */ -+ clock-names = "i2c"; -+ clocks = <&clockgen 4 3>; -+ }; -+ -+ i2c2: i2c@2020000 { -+ compatible = "fsl,vf610-i2c"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x2020000 0x0 0x10000>; -+ interrupts = <0 35 0x4>; /* Level high type */ -+ clock-names = "i2c"; -+ clocks = <&clockgen 4 3>; -+ }; -+ -+ i2c3: i2c@2030000 { -+ compatible = "fsl,vf610-i2c"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x2030000 0x0 0x10000>; -+ interrupts = <0 35 0x4>; /* Level high type */ -+ clock-names = "i2c"; -+ clocks = <&clockgen 4 3>; -+ }; -+ -+ qspi: quadspi@20c0000 { -+ compatible = "fsl,ls2080a-qspi"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x20c0000 0x0 0x10000>, -+ <0x0 0x20000000 0x0 0x10000000>; -+ reg-names = "QuadSPI", "QuadSPI-memory"; -+ interrupts = <0 25 0x4>; /* Level high type */ -+ clocks = <&clockgen 4 3>, <&clockgen 4 3>; -+ clock-names = "qspi_en", "qspi"; -+ }; -+ -+ pcie@3400000 { -+ compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie", -+ "snps,dw-pcie"; -+ reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */ -+ 0x10 0x00000000 0x0 0x00001000>; /* configuration space */ -+ reg-names = "regs", "config"; -+ interrupts = <0 108 0x4>; /* Level high type */ -+ interrupt-names = "intr"; -+ #address-cells = <3>; -+ #size-cells = <2>; -+ device_type = "pci"; -+ num-lanes = <4>; -+ bus-range = <0x0 0xff>; -+ ranges = <0x81000000 0x0 0x00000000 0x10 0x00010000 0x0 0x00010000 /* downstream I/O */ -+ 0x82000000 0x0 0x40000000 0x10 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ -+ msi-parent = <&its>; -+ #interrupt-cells = <1>; -+ interrupt-map-mask = <0 0 0 7>; -+ interrupt-map = <0000 0 0 1 &gic 0 0 0 109 4>, -+ <0000 0 0 2 &gic 0 0 0 110 4>, -+ <0000 0 0 3 &gic 0 0 0 111 4>, -+ <0000 0 0 4 &gic 0 0 0 112 4>; -+ }; -+ -+ pcie@3500000 { -+ compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie", -+ "snps,dw-pcie"; -+ reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */ -+ 0x12 0x00000000 0x0 0x00001000>; /* configuration space */ -+ reg-names = "regs", "config"; -+ interrupts = <0 113 0x4>; /* Level high type */ -+ interrupt-names = "intr"; -+ #address-cells = <3>; -+ #size-cells = <2>; -+ device_type = "pci"; -+ num-lanes = <4>; -+ bus-range = <0x0 0xff>; -+ ranges = <0x81000000 0x0 0x00000000 0x12 0x00010000 0x0 0x00010000 /* downstream I/O */ -+ 0x82000000 0x0 0x40000000 0x12 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ -+ msi-parent = <&its>; -+ #interrupt-cells = <1>; -+ interrupt-map-mask = <0 0 0 7>; -+ interrupt-map = <0000 0 0 1 &gic 0 0 0 114 4>, -+ <0000 0 0 2 &gic 0 0 0 115 4>, -+ <0000 0 0 3 &gic 0 0 0 116 4>, -+ <0000 0 0 4 &gic 0 0 0 117 4>; -+ }; -+ -+ pcie@3600000 { -+ compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie", -+ "snps,dw-pcie"; -+ reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */ -+ 0x14 0x00000000 0x0 0x00001000>; /* configuration space */ -+ reg-names = "regs", "config"; -+ interrupts = <0 118 0x4>; /* Level high type */ -+ interrupt-names = "intr"; -+ #address-cells = <3>; -+ #size-cells = <2>; -+ device_type = "pci"; -+ num-lanes = <8>; -+ bus-range = <0x0 0xff>; -+ ranges = <0x81000000 0x0 0x00000000 0x14 0x00010000 0x0 0x00010000 /* downstream I/O */ -+ 0x82000000 0x0 0x40000000 0x14 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ -+ msi-parent = <&its>; -+ #interrupt-cells = <1>; -+ interrupt-map-mask = <0 0 0 7>; -+ interrupt-map = <0000 0 0 1 &gic 0 0 0 119 4>, -+ <0000 0 0 2 &gic 0 0 0 120 4>, -+ <0000 0 0 3 &gic 0 0 0 121 4>, -+ <0000 0 0 4 &gic 0 0 0 122 4>; -+ }; -+ -+ pcie@3700000 { -+ compatible = "fsl,ls2080a-pcie", "fsl,ls2085a-pcie", -+ "snps,dw-pcie"; -+ reg = <0x00 0x03700000 0x0 0x00100000 /* controller registers */ -+ 0x16 0x00000000 0x0 0x00001000>; /* configuration space */ -+ reg-names = "regs", "config"; -+ interrupts = <0 123 0x4>; /* Level high type */ -+ interrupt-names = "intr"; -+ #address-cells = <3>; -+ #size-cells = <2>; -+ device_type = "pci"; -+ num-lanes = <4>; -+ bus-range = <0x0 0xff>; -+ ranges = <0x81000000 0x0 0x00000000 0x16 0x00010000 0x0 0x00010000 /* downstream I/O */ -+ 0x82000000 0x0 0x40000000 0x16 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ -+ msi-parent = <&its>; -+ #interrupt-cells = <1>; -+ interrupt-map-mask = <0 0 0 7>; -+ interrupt-map = <0000 0 0 1 &gic 0 0 0 124 4>, -+ <0000 0 0 2 &gic 0 0 0 125 4>, -+ <0000 0 0 3 &gic 0 0 0 126 4>, -+ <0000 0 0 4 &gic 0 0 0 127 4>; -+ }; -+ -+ sata0: sata@3200000 { -+ compatible = "fsl,ls2080a-ahci", "fsl,ls1021a-ahci"; -+ reg = <0x0 0x3200000 0x0 0x10000>; -+ interrupts = <0 133 0x4>; /* Level high type */ -+ clocks = <&clockgen 4 3>; -+ }; -+ -+ sata1: sata@3210000 { -+ compatible = "fsl,ls2080a-ahci", "fsl,ls1021a-ahci"; -+ reg = <0x0 0x3210000 0x0 0x10000>; -+ interrupts = <0 136 0x4>; /* Level high type */ -+ clocks = <&clockgen 4 3>; -+ }; -+ -+ usb0: usb3@3100000 { -+ compatible = "snps,dwc3"; -+ reg = <0x0 0x3100000 0x0 0x10000>; -+ interrupts = <0 80 0x4>; /* Level high type */ -+ dr_mode = "host"; -+ configure-gfladj; -+ }; -+ -+ usb1: usb3@3110000 { -+ compatible = "snps,dwc3"; -+ reg = <0x0 0x3110000 0x0 0x10000>; -+ interrupts = <0 81 0x4>; /* Level high type */ -+ dr_mode = "host"; -+ configure-gfladj; -+ }; -+ -+ smmu: iommu@5000000 { -+ compatible = "arm,mmu-500"; -+ reg = <0 0x5000000 0 0x800000>; -+ #global-interrupts = <12>; -+ interrupts = <0 13 4>, /* global secure fault */ -+ <0 14 4>, /* combined secure interrupt */ -+ <0 15 4>, /* global non-secure fault */ -+ <0 16 4>, /* combined non-secure interrupt */ -+ /* performance counter interrupts 0-7 */ -+ <0 211 4>, -+ <0 212 4>, -+ <0 213 4>, -+ <0 214 4>, -+ <0 215 4>, -+ <0 216 4>, -+ <0 217 4>, -+ <0 218 4>, -+ /* per context interrupt, 64 interrupts */ -+ <0 146 4>, -+ <0 147 4>, -+ <0 148 4>, -+ <0 149 4>, -+ <0 150 4>, -+ <0 151 4>, -+ <0 152 4>, -+ <0 153 4>, -+ <0 154 4>, -+ <0 155 4>, -+ <0 156 4>, -+ <0 157 4>, -+ <0 158 4>, -+ <0 159 4>, -+ <0 160 4>, -+ <0 161 4>, -+ <0 162 4>, -+ <0 163 4>, -+ <0 164 4>, -+ <0 165 4>, -+ <0 166 4>, -+ <0 167 4>, -+ <0 168 4>, -+ <0 169 4>, -+ <0 170 4>, -+ <0 171 4>, -+ <0 172 4>, -+ <0 173 4>, -+ <0 174 4>, -+ <0 175 4>, -+ <0 176 4>, -+ <0 177 4>, -+ <0 178 4>, -+ <0 179 4>, -+ <0 180 4>, -+ <0 181 4>, -+ <0 182 4>, -+ <0 183 4>, -+ <0 184 4>, -+ <0 185 4>, -+ <0 186 4>, -+ <0 187 4>, -+ <0 188 4>, -+ <0 189 4>, -+ <0 190 4>, -+ <0 191 4>, -+ <0 192 4>, -+ <0 193 4>, -+ <0 194 4>, -+ <0 195 4>, -+ <0 196 4>, -+ <0 197 4>, -+ <0 198 4>, -+ <0 199 4>, -+ <0 200 4>, -+ <0 201 4>, -+ <0 202 4>, -+ <0 203 4>, -+ <0 204 4>, -+ <0 205 4>, -+ <0 206 4>, -+ <0 207 4>, -+ <0 208 4>, -+ <0 209 4>; -+ mmu-masters = <&fsl_mc 0x300 0>; -+ }; -+ -+ timer { -+ compatible = "arm,armv8-timer"; -+ interrupts = <1 13 0x1>, /* Physical Secure PPI, edge triggered */ -+ <1 14 0x1>, /* Physical Non-Secure PPI, edge triggered */ -+ <1 11 0x1>, /* Virtual PPI, edge triggered */ -+ <1 10 0x1>; /* Hypervisor PPI, edge triggered */ -+ arm,reread-timer; -+ }; -+ -+ fsl_mc: fsl-mc@80c000000 { -+ compatible = "fsl,qoriq-mc"; -+ #stream-id-cells = <2>; -+ reg = <0x00000008 0x0c000000 0 0x40>, /* MC portal base */ -+ <0x00000000 0x08340000 0 0x40000>; /* MC control reg */ -+ msi-parent = <&its>; -+ #address-cells = <3>; -+ #size-cells = <1>; -+ -+ /* -+ * Region type 0x0 - MC portals -+ * Region type 0x1 - QBMAN portals -+ */ -+ ranges = <0x0 0x0 0x0 0x8 0x0c000000 0x4000000 -+ 0x1 0x0 0x0 0x8 0x18000000 0x8000000>; -+ -+ /* -+ * Define the maximum number of MACs present on the SoC. -+ * They won't necessarily be all probed, since the -+ * Data Path Layout file and the MC firmware can put fewer -+ * actual DPMAC objects on the MC bus. -+ */ -+ dpmacs { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ -+ dpmac1: dpmac@1 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <1>; -+ }; -+ dpmac2: dpmac@2 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <2>; -+ }; -+ dpmac3: dpmac@3 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <3>; -+ }; -+ dpmac4: dpmac@4 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <4>; -+ }; -+ dpmac5: dpmac@5 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <5>; -+ }; -+ dpmac6: dpmac@6 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <6>; -+ }; -+ dpmac7: dpmac@7 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <7>; -+ }; -+ dpmac8: dpmac@8 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <8>; -+ }; -+ dpmac9: dpmac@9 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <9>; -+ }; -+ dpmac10: dpmac@10 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <0xa>; -+ }; -+ dpmac11: dpmac@11 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <0xb>; -+ }; -+ dpmac12: dpmac@12 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <0xc>; -+ }; -+ dpmac13: dpmac@13 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <0xd>; -+ }; -+ dpmac14: dpmac@14 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <0xe>; -+ }; -+ dpmac15: dpmac@15 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <0xf>; -+ }; -+ dpmac16: dpmac@16 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <0x10>; -+ }; -+ }; -+ }; -+ -+ ccn@4000000 { -+ compatible = "arm,ccn-504"; -+ reg = <0x0 0x04000000 0x0 0x01000000>; -+ interrupts = <0 12 4>; -+ }; -+ -+ memory@80000000 { -+ device_type = "memory"; -+ reg = <0x00000000 0x80000000 0 0x80000000>; -+ /* DRAM space 1 - 2 GB DRAM */ -+ }; -+}; -diff --git a/arch/arm64/boot/dts/include/dt-bindings b/arch/arm64/boot/dts/include/dt-bindings -new file mode 120000 -index 0000000..08c00e4 ---- /dev/null -+++ b/arch/arm64/boot/dts/include/dt-bindings -@@ -0,0 +1 @@ -+../../../../../include/dt-bindings -\ No newline at end of file -diff --git a/arch/arm64/configs/defconfig b/arch/arm64/configs/defconfig -index dd301be..3852a77 100644 ---- a/arch/arm64/configs/defconfig -+++ b/arch/arm64/configs/defconfig -@@ -32,6 +32,7 @@ CONFIG_MODULES=y - CONFIG_MODULE_UNLOAD=y - # CONFIG_BLK_DEV_BSG is not set - # CONFIG_IOSCHED_DEADLINE is not set -+CONFIG_ARCH_LAYERSCAPE=y - CONFIG_ARCH_THUNDER=y - CONFIG_ARCH_VEXPRESS=y - CONFIG_ARCH_XGENE=y -diff --git a/arch/arm64/include/asm/mmu_context.h b/arch/arm64/include/asm/mmu_context.h -index 101a42b..8ec41e5 100644 ---- a/arch/arm64/include/asm/mmu_context.h -+++ b/arch/arm64/include/asm/mmu_context.h -@@ -64,6 +64,49 @@ static inline void cpu_set_reserved_ttbr0(void) - : "r" (ttbr)); - } - -+/* -+ * TCR.T0SZ value to use when the ID map is active. Usually equals -+ * TCR_T0SZ(VA_BITS), unless system RAM is positioned very high in -+ * physical memory, in which case it will be smaller. -+ */ -+extern u64 idmap_t0sz; -+ -+static inline bool __cpu_uses_extended_idmap(void) -+{ -+ return (!IS_ENABLED(CONFIG_ARM64_VA_BITS_48) && -+ unlikely(idmap_t0sz != TCR_T0SZ(VA_BITS))); -+} -+ -+static inline void __cpu_set_tcr_t0sz(u64 t0sz) -+{ -+ unsigned long tcr; -+ -+ if (__cpu_uses_extended_idmap()) -+ asm volatile ( -+ " mrs %0, tcr_el1 ;" -+ " bfi %0, %1, %2, %3 ;" -+ " msr tcr_el1, %0 ;" -+ " isb" -+ : "=&r" (tcr) -+ : "r"(t0sz), "I"(TCR_T0SZ_OFFSET), "I"(TCR_TxSZ_WIDTH)); -+} -+ -+/* -+ * Set TCR.T0SZ to the value appropriate for activating the identity map. -+ */ -+static inline void cpu_set_idmap_tcr_t0sz(void) -+{ -+ __cpu_set_tcr_t0sz(idmap_t0sz); -+} -+ -+/* -+ * Set TCR.T0SZ to its default value (based on VA_BITS) -+ */ -+static inline void cpu_set_default_tcr_t0sz(void) -+{ -+ __cpu_set_tcr_t0sz(TCR_T0SZ(VA_BITS)); -+} -+ - static inline void switch_new_context(struct mm_struct *mm) - { - unsigned long flags; -diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h -index 22b1623..3d02b18 100644 ---- a/arch/arm64/include/asm/page.h -+++ b/arch/arm64/include/asm/page.h -@@ -33,7 +33,9 @@ - * image. Both require pgd, pud (4 levels only) and pmd tables to (section) - * map the kernel. With the 64K page configuration, swapper and idmap need to - * map to pte level. The swapper also maps the FDT (see __create_page_tables -- * for more information). -+ * for more information). Note that the number of ID map translation levels -+ * could be increased on the fly if system RAM is out of reach for the default -+ * VA range, so 3 pages are reserved in all cases. - */ - #ifdef CONFIG_ARM64_64K_PAGES - #define SWAPPER_PGTABLE_LEVELS (CONFIG_ARM64_PGTABLE_LEVELS) -@@ -42,7 +44,7 @@ - #endif - - #define SWAPPER_DIR_SIZE (SWAPPER_PGTABLE_LEVELS * PAGE_SIZE) --#define IDMAP_DIR_SIZE (SWAPPER_DIR_SIZE) -+#define IDMAP_DIR_SIZE (3 * PAGE_SIZE) - - #ifndef __ASSEMBLY__ - -diff --git a/arch/arm64/include/asm/pgtable-hwdef.h b/arch/arm64/include/asm/pgtable-hwdef.h -index 88174e0..500b74e 100644 ---- a/arch/arm64/include/asm/pgtable-hwdef.h -+++ b/arch/arm64/include/asm/pgtable-hwdef.h -@@ -142,7 +142,12 @@ - /* - * TCR flags. - */ --#define TCR_TxSZ(x) (((UL(64) - (x)) << 16) | ((UL(64) - (x)) << 0)) -+#define TCR_T0SZ_OFFSET 0 -+#define TCR_T1SZ_OFFSET 16 -+#define TCR_T0SZ(x) ((UL(64) - (x)) << TCR_T0SZ_OFFSET) -+#define TCR_T1SZ(x) ((UL(64) - (x)) << TCR_T1SZ_OFFSET) -+#define TCR_TxSZ(x) (TCR_T0SZ(x) | TCR_T1SZ(x)) -+#define TCR_TxSZ_WIDTH 6 - #define TCR_IRGN_NC ((UL(0) << 8) | (UL(0) << 24)) - #define TCR_IRGN_WBWA ((UL(1) << 8) | (UL(1) << 24)) - #define TCR_IRGN_WT ((UL(2) << 8) | (UL(2) << 24)) -diff --git a/arch/arm64/kernel/head.S b/arch/arm64/kernel/head.S -index 2877dd8..ca02239 100644 ---- a/arch/arm64/kernel/head.S -+++ b/arch/arm64/kernel/head.S -@@ -592,6 +592,43 @@ __create_page_tables: - mov x0, x25 // idmap_pg_dir - ldr x3, =KERNEL_START - add x3, x3, x28 // __pa(KERNEL_START) -+ -+#ifndef CONFIG_ARM64_VA_BITS_48 -+#define EXTRA_SHIFT (PGDIR_SHIFT + PAGE_SHIFT - 3) -+#define EXTRA_PTRS (1 << (48 - EXTRA_SHIFT)) -+ -+ /* -+ * If VA_BITS < 48, it may be too small to allow for an ID mapping to be -+ * created that covers system RAM if that is located sufficiently high -+ * in the physical address space. So for the ID map, use an extended -+ * virtual range in that case, by configuring an additional translation -+ * level. -+ * First, we have to verify our assumption that the current value of -+ * VA_BITS was chosen such that all translation levels are fully -+ * utilised, and that lowering T0SZ will always result in an additional -+ * translation level to be configured. -+ */ -+#if VA_BITS != EXTRA_SHIFT -+#error "Mismatch between VA_BITS and page size/number of translation levels" -+#endif -+ -+ /* -+ * Calculate the maximum allowed value for TCR_EL1.T0SZ so that the -+ * entire kernel image can be ID mapped. As T0SZ == (64 - #bits used), -+ * this number conveniently equals the number of leading zeroes in -+ * the physical address of KERNEL_END. -+ */ -+ adrp x5, KERNEL_END -+ clz x5, x5 -+ cmp x5, TCR_T0SZ(VA_BITS) // default T0SZ small enough? -+ b.ge 1f // .. then skip additional level -+ -+ str_l x5, idmap_t0sz, x6 -+ -+ create_table_entry x0, x3, EXTRA_SHIFT, EXTRA_PTRS, x5, x6 -+1: -+#endif -+ - create_pgd_entry x0, x3, x5, x6 - ldr x6, =KERNEL_END - mov x5, x3 // __pa(KERNEL_START) -diff --git a/arch/arm64/kernel/smp.c b/arch/arm64/kernel/smp.c -index 0ef8789..5657692 100644 ---- a/arch/arm64/kernel/smp.c -+++ b/arch/arm64/kernel/smp.c -@@ -152,6 +152,7 @@ asmlinkage void secondary_start_kernel(void) - */ - cpu_set_reserved_ttbr0(); - flush_tlb_all(); -+ cpu_set_default_tcr_t0sz(); - - preempt_disable(); - trace_hardirqs_off(); -diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c -index f4f8b50..53bbff9 100644 ---- a/arch/arm64/mm/mmu.c -+++ b/arch/arm64/mm/mmu.c -@@ -37,6 +37,8 @@ - - #include "mm.h" - -+u64 idmap_t0sz = TCR_T0SZ(VA_BITS); -+ - /* - * Empty_zero_page is a special page that is used for zero-initialized data - * and COW. -@@ -369,6 +371,7 @@ void __init paging_init(void) - */ - cpu_set_reserved_ttbr0(); - flush_tlb_all(); -+ cpu_set_default_tcr_t0sz(); - } - - /* -@@ -376,8 +379,10 @@ void __init paging_init(void) - */ - void setup_mm_for_reboot(void) - { -- cpu_switch_mm(idmap_pg_dir, &init_mm); -+ cpu_set_reserved_ttbr0(); - flush_tlb_all(); -+ cpu_set_idmap_tcr_t0sz(); -+ cpu_switch_mm(idmap_pg_dir, &init_mm); - } - - /* -diff --git a/arch/arm64/mm/proc-macros.S b/arch/arm64/mm/proc-macros.S -index 005d29e..4c4d93c 100644 ---- a/arch/arm64/mm/proc-macros.S -+++ b/arch/arm64/mm/proc-macros.S -@@ -52,3 +52,13 @@ - mov \reg, #4 // bytes per word - lsl \reg, \reg, \tmp // actual cache line size - .endm -+ -+/* -+ * tcr_set_idmap_t0sz - update TCR.T0SZ so that we can load the ID map -+ */ -+ .macro tcr_set_idmap_t0sz, valreg, tmpreg -+#ifndef CONFIG_ARM64_VA_BITS_48 -+ ldr_l \tmpreg, idmap_t0sz -+ bfi \valreg, \tmpreg, #TCR_T0SZ_OFFSET, #TCR_TxSZ_WIDTH -+#endif -+ .endm -diff --git a/arch/arm64/mm/proc.S b/arch/arm64/mm/proc.S -index 4e778b1..cbea872 100644 ---- a/arch/arm64/mm/proc.S -+++ b/arch/arm64/mm/proc.S -@@ -156,6 +156,7 @@ ENTRY(cpu_do_resume) - msr cpacr_el1, x6 - msr ttbr0_el1, x1 - msr ttbr1_el1, x7 -+ tcr_set_idmap_t0sz x8, x7 - msr tcr_el1, x8 - msr vbar_el1, x9 - msr mdscr_el1, x10 -@@ -233,6 +234,8 @@ ENTRY(__cpu_setup) - */ - ldr x10, =TCR_TxSZ(VA_BITS) | TCR_CACHE_FLAGS | TCR_SMP_FLAGS | \ - TCR_TG_FLAGS | TCR_ASID16 | TCR_TBI0 -+ tcr_set_idmap_t0sz x10, x9 -+ - /* - * Read the PARange bits from ID_AA64MMFR0_EL1 and set the IPS bits in - * TCR_EL1. -diff --git a/arch/ia64/kernel/msi_ia64.c b/arch/ia64/kernel/msi_ia64.c -index 8c3730c..8ae36ea 100644 ---- a/arch/ia64/kernel/msi_ia64.c -+++ b/arch/ia64/kernel/msi_ia64.c -@@ -35,7 +35,7 @@ static int ia64_set_msi_irq_affinity(struct irq_data *idata, - data |= MSI_DATA_VECTOR(irq_to_vector(irq)); - msg.data = data; - -- write_msi_msg(irq, &msg); -+ pci_write_msi_msg(irq, &msg); - cpumask_copy(idata->affinity, cpumask_of(cpu)); - - return 0; -@@ -71,7 +71,7 @@ int ia64_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) - MSI_DATA_DELIVERY_FIXED | - MSI_DATA_VECTOR(vector); - -- write_msi_msg(irq, &msg); -+ pci_write_msi_msg(irq, &msg); - irq_set_chip_and_handler(irq, &ia64_msi_chip, handle_edge_irq); - - return 0; -@@ -102,8 +102,8 @@ static int ia64_msi_retrigger_irq(struct irq_data *data) - */ - static struct irq_chip ia64_msi_chip = { - .name = "PCI-MSI", -- .irq_mask = mask_msi_irq, -- .irq_unmask = unmask_msi_irq, -+ .irq_mask = pci_msi_mask_irq, -+ .irq_unmask = pci_msi_unmask_irq, - .irq_ack = ia64_ack_msi_irq, - #ifdef CONFIG_SMP - .irq_set_affinity = ia64_set_msi_irq_affinity, -diff --git a/arch/ia64/sn/kernel/msi_sn.c b/arch/ia64/sn/kernel/msi_sn.c -index 446e779..a0eb27b 100644 ---- a/arch/ia64/sn/kernel/msi_sn.c -+++ b/arch/ia64/sn/kernel/msi_sn.c -@@ -145,7 +145,7 @@ int sn_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *entry) - msg.data = 0x100 + irq; - - irq_set_msi_desc(irq, entry); -- write_msi_msg(irq, &msg); -+ pci_write_msi_msg(irq, &msg); - irq_set_chip_and_handler(irq, &sn_msi_chip, handle_edge_irq); - - return 0; -@@ -205,7 +205,7 @@ static int sn_set_msi_irq_affinity(struct irq_data *data, - msg.address_hi = (u32)(bus_addr >> 32); - msg.address_lo = (u32)(bus_addr & 0x00000000ffffffff); - -- write_msi_msg(irq, &msg); -+ pci_write_msi_msg(irq, &msg); - cpumask_copy(data->affinity, cpu_mask); - - return 0; -@@ -228,8 +228,8 @@ static int sn_msi_retrigger_irq(struct irq_data *data) - - static struct irq_chip sn_msi_chip = { - .name = "PCI-MSI", -- .irq_mask = mask_msi_irq, -- .irq_unmask = unmask_msi_irq, -+ .irq_mask = pci_msi_mask_irq, -+ .irq_unmask = pci_msi_unmask_irq, - .irq_ack = sn_ack_msi_irq, - #ifdef CONFIG_SMP - .irq_set_affinity = sn_set_msi_irq_affinity, -diff --git a/arch/mips/pci/msi-octeon.c b/arch/mips/pci/msi-octeon.c -index 63bbe07..cffaaf4 100644 ---- a/arch/mips/pci/msi-octeon.c -+++ b/arch/mips/pci/msi-octeon.c -@@ -178,7 +178,7 @@ msi_irq_allocated: - pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); - - irq_set_msi_desc(irq, desc); -- write_msi_msg(irq, &msg); -+ pci_write_msi_msg(irq, &msg); - return 0; - } - -diff --git a/arch/mips/pci/msi-xlp.c b/arch/mips/pci/msi-xlp.c -index f7ac3ed..6a40f24 100644 ---- a/arch/mips/pci/msi-xlp.c -+++ b/arch/mips/pci/msi-xlp.c -@@ -217,7 +217,7 @@ static void xlp_msix_mask_ack(struct irq_data *d) - - msixvec = nlm_irq_msixvec(d->irq); - link = nlm_irq_msixlink(msixvec); -- mask_msi_irq(d); -+ pci_msi_mask_irq(d); - md = irq_data_get_irq_handler_data(d); - - /* Ack MSI on bridge */ -@@ -239,10 +239,10 @@ static void xlp_msix_mask_ack(struct irq_data *d) - - static struct irq_chip xlp_msix_chip = { - .name = "XLP-MSIX", -- .irq_enable = unmask_msi_irq, -- .irq_disable = mask_msi_irq, -+ .irq_enable = pci_msi_unmask_irq, -+ .irq_disable = pci_msi_mask_irq, - .irq_mask_ack = xlp_msix_mask_ack, -- .irq_unmask = unmask_msi_irq, -+ .irq_unmask = pci_msi_unmask_irq, - }; - - void arch_teardown_msi_irq(unsigned int irq) -@@ -345,7 +345,7 @@ static int xlp_setup_msi(uint64_t lnkbase, int node, int link, - if (ret < 0) - return ret; - -- write_msi_msg(xirq, &msg); -+ pci_write_msi_msg(xirq, &msg); - return 0; - } - -@@ -446,7 +446,7 @@ static int xlp_setup_msix(uint64_t lnkbase, int node, int link, - if (ret < 0) - return ret; - -- write_msi_msg(xirq, &msg); -+ pci_write_msi_msg(xirq, &msg); - return 0; - } - -diff --git a/arch/mips/pci/pci-xlr.c b/arch/mips/pci/pci-xlr.c -index 0dde803..26d2dab 100644 ---- a/arch/mips/pci/pci-xlr.c -+++ b/arch/mips/pci/pci-xlr.c -@@ -260,7 +260,7 @@ int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) - if (ret < 0) - return ret; - -- write_msi_msg(irq, &msg); -+ pci_write_msi_msg(irq, &msg); - return 0; - } - #endif -diff --git a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c -index ca3a062..11090ab 100644 ---- a/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c -+++ b/arch/powerpc/platforms/512x/mpc5121_ads_cpld.c -@@ -123,7 +123,8 @@ cpld_pic_cascade(unsigned int irq, struct irq_desc *desc) - } - - static int --cpld_pic_host_match(struct irq_domain *h, struct device_node *node) -+cpld_pic_host_match(struct irq_domain *h, struct device_node *node, -+ enum irq_domain_bus_token bus_token) - { - return cpld_pic_node == node; - } -diff --git a/arch/powerpc/platforms/cell/axon_msi.c b/arch/powerpc/platforms/cell/axon_msi.c -index 862b327..0883994 100644 ---- a/arch/powerpc/platforms/cell/axon_msi.c -+++ b/arch/powerpc/platforms/cell/axon_msi.c -@@ -279,7 +279,7 @@ static int axon_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) - - irq_set_msi_desc(virq, entry); - msg.data = virq; -- write_msi_msg(virq, &msg); -+ pci_write_msi_msg(virq, &msg); - } - - return 0; -@@ -301,9 +301,9 @@ static void axon_msi_teardown_msi_irqs(struct pci_dev *dev) - } - - static struct irq_chip msic_irq_chip = { -- .irq_mask = mask_msi_irq, -- .irq_unmask = unmask_msi_irq, -- .irq_shutdown = mask_msi_irq, -+ .irq_mask = pci_msi_mask_irq, -+ .irq_unmask = pci_msi_unmask_irq, -+ .irq_shutdown = pci_msi_mask_irq, - .name = "AXON-MSI", - }; - -diff --git a/arch/powerpc/platforms/cell/interrupt.c b/arch/powerpc/platforms/cell/interrupt.c -index 28e558d..109d236 100644 ---- a/arch/powerpc/platforms/cell/interrupt.c -+++ b/arch/powerpc/platforms/cell/interrupt.c -@@ -222,7 +222,8 @@ void iic_request_IPIs(void) - #endif /* CONFIG_SMP */ - - --static int iic_host_match(struct irq_domain *h, struct device_node *node) -+static int iic_host_match(struct irq_domain *h, struct device_node *node, -+ enum irq_domain_bus_token bus_token) - { - return of_device_is_compatible(node, - "IBM,CBEA-Internal-Interrupt-Controller"); -diff --git a/arch/powerpc/platforms/embedded6xx/flipper-pic.c b/arch/powerpc/platforms/embedded6xx/flipper-pic.c -index 4cde8e7..b7866e0 100644 ---- a/arch/powerpc/platforms/embedded6xx/flipper-pic.c -+++ b/arch/powerpc/platforms/embedded6xx/flipper-pic.c -@@ -108,7 +108,8 @@ static int flipper_pic_map(struct irq_domain *h, unsigned int virq, - return 0; - } - --static int flipper_pic_match(struct irq_domain *h, struct device_node *np) -+static int flipper_pic_match(struct irq_domain *h, struct device_node *np, -+ enum irq_domain_bus_token bus_token) - { - return 1; - } -diff --git a/arch/powerpc/platforms/powermac/pic.c b/arch/powerpc/platforms/powermac/pic.c -index 4c24bf6..246cab4 100644 ---- a/arch/powerpc/platforms/powermac/pic.c -+++ b/arch/powerpc/platforms/powermac/pic.c -@@ -268,7 +268,8 @@ static struct irqaction gatwick_cascade_action = { - .name = "cascade", - }; - --static int pmac_pic_host_match(struct irq_domain *h, struct device_node *node) -+static int pmac_pic_host_match(struct irq_domain *h, struct device_node *node, -+ enum irq_domain_bus_token bus_token) - { - /* We match all, we don't always have a node anyway */ - return 1; -diff --git a/arch/powerpc/platforms/powernv/pci.c b/arch/powerpc/platforms/powernv/pci.c -index 9ff55d5..019991d 100644 ---- a/arch/powerpc/platforms/powernv/pci.c -+++ b/arch/powerpc/platforms/powernv/pci.c -@@ -90,7 +90,7 @@ static int pnv_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) - return rc; - } - irq_set_msi_desc(virq, entry); -- write_msi_msg(virq, &msg); -+ pci_write_msi_msg(virq, &msg); - } - return 0; - } -diff --git a/arch/powerpc/platforms/ps3/interrupt.c b/arch/powerpc/platforms/ps3/interrupt.c -index 5f3b232..df0c086 100644 ---- a/arch/powerpc/platforms/ps3/interrupt.c -+++ b/arch/powerpc/platforms/ps3/interrupt.c -@@ -678,7 +678,8 @@ static int ps3_host_map(struct irq_domain *h, unsigned int virq, - return 0; - } - --static int ps3_host_match(struct irq_domain *h, struct device_node *np) -+static int ps3_host_match(struct irq_domain *h, struct device_node *np, -+ enum irq_domain_bus_token bus_token) - { - /* Match all */ - return 1; -diff --git a/arch/powerpc/platforms/pseries/msi.c b/arch/powerpc/platforms/pseries/msi.c -index 8b909e9..691a154 100644 ---- a/arch/powerpc/platforms/pseries/msi.c -+++ b/arch/powerpc/platforms/pseries/msi.c -@@ -476,7 +476,7 @@ again: - irq_set_msi_desc(virq, entry); - - /* Read config space back so we can restore after reset */ -- __read_msi_msg(entry, &msg); -+ __pci_read_msi_msg(entry, &msg); - entry->msg = msg; - } - -diff --git a/arch/powerpc/sysdev/ehv_pic.c b/arch/powerpc/sysdev/ehv_pic.c -index 2d20f10..eca0b00 100644 ---- a/arch/powerpc/sysdev/ehv_pic.c -+++ b/arch/powerpc/sysdev/ehv_pic.c -@@ -177,7 +177,8 @@ unsigned int ehv_pic_get_irq(void) - return irq_linear_revmap(global_ehv_pic->irqhost, irq); - } - --static int ehv_pic_host_match(struct irq_domain *h, struct device_node *node) -+static int ehv_pic_host_match(struct irq_domain *h, struct device_node *node, -+ enum irq_domain_bus_token bus_token) - { - /* Exact match, unless ehv_pic node is NULL */ - return h->of_node == NULL || h->of_node == node; -diff --git a/arch/powerpc/sysdev/fsl_msi.c b/arch/powerpc/sysdev/fsl_msi.c -index ea6b3a1..f13282c 100644 ---- a/arch/powerpc/sysdev/fsl_msi.c -+++ b/arch/powerpc/sysdev/fsl_msi.c -@@ -82,8 +82,8 @@ static void fsl_msi_print_chip(struct irq_data *irqd, struct seq_file *p) - - - static struct irq_chip fsl_msi_chip = { -- .irq_mask = mask_msi_irq, -- .irq_unmask = unmask_msi_irq, -+ .irq_mask = pci_msi_mask_irq, -+ .irq_unmask = pci_msi_unmask_irq, - .irq_ack = fsl_msi_end_irq, - .irq_print_chip = fsl_msi_print_chip, - }; -@@ -243,7 +243,7 @@ static int fsl_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) - irq_set_msi_desc(virq, entry); - - fsl_compose_msi_msg(pdev, hwirq, &msg, msi_data); -- write_msi_msg(virq, &msg); -+ pci_write_msi_msg(virq, &msg); - } - return 0; - -diff --git a/arch/powerpc/sysdev/i8259.c b/arch/powerpc/sysdev/i8259.c -index 45598da..8c3756c 100644 ---- a/arch/powerpc/sysdev/i8259.c -+++ b/arch/powerpc/sysdev/i8259.c -@@ -162,7 +162,8 @@ static struct resource pic_edgectrl_iores = { - .flags = IORESOURCE_BUSY, - }; - --static int i8259_host_match(struct irq_domain *h, struct device_node *node) -+static int i8259_host_match(struct irq_domain *h, struct device_node *node, -+ enum irq_domain_bus_token bus_token) - { - return h->of_node == NULL || h->of_node == node; - } -diff --git a/arch/powerpc/sysdev/ipic.c b/arch/powerpc/sysdev/ipic.c -index b50f978..1b9b00f 100644 ---- a/arch/powerpc/sysdev/ipic.c -+++ b/arch/powerpc/sysdev/ipic.c -@@ -672,7 +672,8 @@ static struct irq_chip ipic_edge_irq_chip = { - .irq_set_type = ipic_set_irq_type, - }; - --static int ipic_host_match(struct irq_domain *h, struct device_node *node) -+static int ipic_host_match(struct irq_domain *h, struct device_node *node, -+ enum irq_domain_bus_token bus_token) - { - /* Exact match, unless ipic node is NULL */ - return h->of_node == NULL || h->of_node == node; -diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c -index 89cec0e..bf6f77e 100644 ---- a/arch/powerpc/sysdev/mpic.c -+++ b/arch/powerpc/sysdev/mpic.c -@@ -1009,7 +1009,8 @@ static struct irq_chip mpic_irq_ht_chip = { - #endif /* CONFIG_MPIC_U3_HT_IRQS */ - - --static int mpic_host_match(struct irq_domain *h, struct device_node *node) -+static int mpic_host_match(struct irq_domain *h, struct device_node *node, -+ enum irq_domain_bus_token bus_token) - { - /* Exact match, unless mpic node is NULL */ - return h->of_node == NULL || h->of_node == node; -diff --git a/arch/powerpc/sysdev/mpic_pasemi_msi.c b/arch/powerpc/sysdev/mpic_pasemi_msi.c -index a6add4a..5a4c474 100644 ---- a/arch/powerpc/sysdev/mpic_pasemi_msi.c -+++ b/arch/powerpc/sysdev/mpic_pasemi_msi.c -@@ -42,7 +42,7 @@ static struct mpic *msi_mpic; - static void mpic_pasemi_msi_mask_irq(struct irq_data *data) - { - pr_debug("mpic_pasemi_msi_mask_irq %d\n", data->irq); -- mask_msi_irq(data); -+ pci_msi_mask_irq(data); - mpic_mask_irq(data); - } - -@@ -50,7 +50,7 @@ static void mpic_pasemi_msi_unmask_irq(struct irq_data *data) - { - pr_debug("mpic_pasemi_msi_unmask_irq %d\n", data->irq); - mpic_unmask_irq(data); -- unmask_msi_irq(data); -+ pci_msi_unmask_irq(data); - } - - static struct irq_chip mpic_pasemi_msi_chip = { -@@ -138,7 +138,7 @@ static int pasemi_msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) - * register to generate MSI [512...1023] - */ - msg.data = hwirq-0x200; -- write_msi_msg(virq, &msg); -+ pci_write_msi_msg(virq, &msg); - } - - return 0; -diff --git a/arch/powerpc/sysdev/mpic_u3msi.c b/arch/powerpc/sysdev/mpic_u3msi.c -index db35a40..65880cc 100644 ---- a/arch/powerpc/sysdev/mpic_u3msi.c -+++ b/arch/powerpc/sysdev/mpic_u3msi.c -@@ -25,14 +25,14 @@ static struct mpic *msi_mpic; - - static void mpic_u3msi_mask_irq(struct irq_data *data) - { -- mask_msi_irq(data); -+ pci_msi_mask_irq(data); - mpic_mask_irq(data); - } - - static void mpic_u3msi_unmask_irq(struct irq_data *data) - { - mpic_unmask_irq(data); -- unmask_msi_irq(data); -+ pci_msi_unmask_irq(data); - } - - static struct irq_chip mpic_u3msi_chip = { -@@ -172,7 +172,7 @@ static int u3msi_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) - printk("u3msi: allocated virq 0x%x (hw 0x%x) addr 0x%lx\n", - virq, hwirq, (unsigned long)addr); - msg.data = hwirq; -- write_msi_msg(virq, &msg); -+ pci_write_msi_msg(virq, &msg); - - hwirq++; - } -diff --git a/arch/powerpc/sysdev/ppc4xx_hsta_msi.c b/arch/powerpc/sysdev/ppc4xx_hsta_msi.c -index a6a4dbd..908105f 100644 ---- a/arch/powerpc/sysdev/ppc4xx_hsta_msi.c -+++ b/arch/powerpc/sysdev/ppc4xx_hsta_msi.c -@@ -85,7 +85,7 @@ static int hsta_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) - msi_bitmap_free_hwirqs(&ppc4xx_hsta_msi.bmp, irq, 1); - return -EINVAL; - } -- write_msi_msg(hwirq, &msg); -+ pci_write_msi_msg(hwirq, &msg); - } - - return 0; -diff --git a/arch/powerpc/sysdev/ppc4xx_msi.c b/arch/powerpc/sysdev/ppc4xx_msi.c -index 85d9c18..c6df3e2 100644 ---- a/arch/powerpc/sysdev/ppc4xx_msi.c -+++ b/arch/powerpc/sysdev/ppc4xx_msi.c -@@ -116,7 +116,7 @@ static int ppc4xx_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) - - irq_set_msi_desc(virq, entry); - msg.data = int_no; -- write_msi_msg(virq, &msg); -+ pci_write_msi_msg(virq, &msg); - } - return 0; - } -diff --git a/arch/powerpc/sysdev/qe_lib/qe_ic.c b/arch/powerpc/sysdev/qe_lib/qe_ic.c -index b2b87c3..a433b3d 100644 ---- a/arch/powerpc/sysdev/qe_lib/qe_ic.c -+++ b/arch/powerpc/sysdev/qe_lib/qe_ic.c -@@ -245,7 +245,8 @@ static struct irq_chip qe_ic_irq_chip = { - .irq_mask_ack = qe_ic_mask_irq, - }; - --static int qe_ic_host_match(struct irq_domain *h, struct device_node *node) -+static int qe_ic_host_match(struct irq_domain *h, struct device_node *node, -+ enum irq_domain_bus_token bus_token) - { - /* Exact match, unless qe_ic node is NULL */ - return h->of_node == NULL || h->of_node == node; -diff --git a/arch/powerpc/sysdev/xics/ics-opal.c b/arch/powerpc/sysdev/xics/ics-opal.c -index 3c6ee1b..4ba554e 100644 ---- a/arch/powerpc/sysdev/xics/ics-opal.c -+++ b/arch/powerpc/sysdev/xics/ics-opal.c -@@ -73,7 +73,7 @@ static unsigned int ics_opal_startup(struct irq_data *d) - * at that level, so we do it here by hand. - */ - if (d->msi_desc) -- unmask_msi_irq(d); -+ pci_msi_unmask_irq(d); - #endif - - /* unmask it */ -diff --git a/arch/powerpc/sysdev/xics/ics-rtas.c b/arch/powerpc/sysdev/xics/ics-rtas.c -index 936575d..bc81335 100644 ---- a/arch/powerpc/sysdev/xics/ics-rtas.c -+++ b/arch/powerpc/sysdev/xics/ics-rtas.c -@@ -76,7 +76,7 @@ static unsigned int ics_rtas_startup(struct irq_data *d) - * at that level, so we do it here by hand. - */ - if (d->msi_desc) -- unmask_msi_irq(d); -+ pci_msi_unmask_irq(d); - #endif - /* unmask it */ - ics_rtas_unmask_irq(d); -diff --git a/arch/powerpc/sysdev/xics/xics-common.c b/arch/powerpc/sysdev/xics/xics-common.c -index fe0cca4..13ab716 100644 ---- a/arch/powerpc/sysdev/xics/xics-common.c -+++ b/arch/powerpc/sysdev/xics/xics-common.c -@@ -300,7 +300,8 @@ int xics_get_irq_server(unsigned int virq, const struct cpumask *cpumask, - } - #endif /* CONFIG_SMP */ - --static int xics_host_match(struct irq_domain *h, struct device_node *node) -+static int xics_host_match(struct irq_domain *h, struct device_node *node, -+ enum irq_domain_bus_token bus_token) - { - struct ics *ics; - -diff --git a/arch/s390/pci/pci.c b/arch/s390/pci/pci.c -index 2fa7b14..d59c825 100644 ---- a/arch/s390/pci/pci.c -+++ b/arch/s390/pci/pci.c -@@ -50,8 +50,8 @@ static DEFINE_SPINLOCK(zpci_list_lock); - - static struct irq_chip zpci_irq_chip = { - .name = "zPCI", -- .irq_unmask = unmask_msi_irq, -- .irq_mask = mask_msi_irq, -+ .irq_unmask = pci_msi_unmask_irq, -+ .irq_mask = pci_msi_mask_irq, - }; - - static DECLARE_BITMAP(zpci_domain, ZPCI_NR_DEVICES); -@@ -403,7 +403,7 @@ int arch_setup_msi_irqs(struct pci_dev *pdev, int nvec, int type) - msg.data = hwirq; - msg.address_lo = zdev->msi_addr & 0xffffffff; - msg.address_hi = zdev->msi_addr >> 32; -- write_msi_msg(irq, &msg); -+ pci_write_msi_msg(irq, &msg); - airq_iv_set_data(zdev->aibv, hwirq, irq); - hwirq++; - } -@@ -448,9 +448,9 @@ void arch_teardown_msi_irqs(struct pci_dev *pdev) - /* Release MSI interrupts */ - list_for_each_entry(msi, &pdev->msi_list, list) { - if (msi->msi_attrib.is_msix) -- default_msix_mask_irq(msi, 1); -+ __pci_msix_desc_mask_irq(msi, 1); - else -- default_msi_mask_irq(msi, 1, 1); -+ __pci_msi_desc_mask_irq(msi, 1, 1); - irq_set_msi_desc(msi->irq, NULL); - irq_free_desc(msi->irq); - msi->msg.address_lo = 0; -diff --git a/arch/sparc/kernel/pci_msi.c b/arch/sparc/kernel/pci_msi.c -index 580651a..84e16d8 100644 ---- a/arch/sparc/kernel/pci_msi.c -+++ b/arch/sparc/kernel/pci_msi.c -@@ -111,10 +111,10 @@ static void free_msi(struct pci_pbm_info *pbm, int msi_num) - - static struct irq_chip msi_irq = { - .name = "PCI-MSI", -- .irq_mask = mask_msi_irq, -- .irq_unmask = unmask_msi_irq, -- .irq_enable = unmask_msi_irq, -- .irq_disable = mask_msi_irq, -+ .irq_mask = pci_msi_mask_irq, -+ .irq_unmask = pci_msi_unmask_irq, -+ .irq_enable = pci_msi_unmask_irq, -+ .irq_disable = pci_msi_mask_irq, - /* XXX affinity XXX */ - }; - -@@ -161,7 +161,7 @@ static int sparc64_setup_msi_irq(unsigned int *irq_p, - msg.data = msi; - - irq_set_msi_desc(*irq_p, entry); -- write_msi_msg(*irq_p, &msg); -+ pci_write_msi_msg(*irq_p, &msg); - - return 0; - -diff --git a/arch/tile/kernel/pci_gx.c b/arch/tile/kernel/pci_gx.c -index e39f9c5..e717af2 100644 ---- a/arch/tile/kernel/pci_gx.c -+++ b/arch/tile/kernel/pci_gx.c -@@ -1453,7 +1453,7 @@ static struct pci_ops tile_cfg_ops = { - static unsigned int tilegx_msi_startup(struct irq_data *d) - { - if (d->msi_desc) -- unmask_msi_irq(d); -+ pci_msi_unmask_irq(d); - - return 0; - } -@@ -1465,14 +1465,14 @@ static void tilegx_msi_ack(struct irq_data *d) - - static void tilegx_msi_mask(struct irq_data *d) - { -- mask_msi_irq(d); -+ pci_msi_mask_irq(d); - __insn_mtspr(SPR_IPI_MASK_SET_K, 1UL << d->irq); - } - - static void tilegx_msi_unmask(struct irq_data *d) - { - __insn_mtspr(SPR_IPI_MASK_RESET_K, 1UL << d->irq); -- unmask_msi_irq(d); -+ pci_msi_unmask_irq(d); - } - - static struct irq_chip tilegx_msi_chip = { -@@ -1590,7 +1590,7 @@ int arch_setup_msi_irq(struct pci_dev *pdev, struct msi_desc *desc) - msg.address_hi = msi_addr >> 32; - msg.address_lo = msi_addr & 0xffffffff; - -- write_msi_msg(irq, &msg); -+ pci_write_msi_msg(irq, &msg); - irq_set_chip_and_handler(irq, &tilegx_msi_chip, handle_level_irq); - irq_set_handler_data(irq, controller); - -diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h -index e45e4da..f58a9c7 100644 ---- a/arch/x86/include/asm/x86_init.h -+++ b/arch/x86/include/asm/x86_init.h -@@ -172,7 +172,6 @@ struct x86_platform_ops { - - struct pci_dev; - struct msi_msg; --struct msi_desc; - - struct x86_msi_ops { - int (*setup_msi_irqs)(struct pci_dev *dev, int nvec, int type); -@@ -183,8 +182,6 @@ struct x86_msi_ops { - void (*teardown_msi_irqs)(struct pci_dev *dev); - void (*restore_msi_irqs)(struct pci_dev *dev); - int (*setup_hpet_msi)(unsigned int irq, unsigned int id); -- u32 (*msi_mask_irq)(struct msi_desc *desc, u32 mask, u32 flag); -- u32 (*msix_mask_irq)(struct msi_desc *desc, u32 flag); - }; - - struct IO_APIC_route_entry; -diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c -index 1183d54..7ffe0a2 100644 ---- a/arch/x86/kernel/apic/io_apic.c -+++ b/arch/x86/kernel/apic/io_apic.c -@@ -3158,7 +3158,7 @@ msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) - msg.address_lo &= ~MSI_ADDR_DEST_ID_MASK; - msg.address_lo |= MSI_ADDR_DEST_ID(dest); - -- __write_msi_msg(data->msi_desc, &msg); -+ __pci_write_msi_msg(data->msi_desc, &msg); - - return IRQ_SET_MASK_OK_NOCOPY; - } -@@ -3169,8 +3169,8 @@ msi_set_affinity(struct irq_data *data, const struct cpumask *mask, bool force) - */ - static struct irq_chip msi_chip = { - .name = "PCI-MSI", -- .irq_unmask = unmask_msi_irq, -- .irq_mask = mask_msi_irq, -+ .irq_unmask = pci_msi_unmask_irq, -+ .irq_mask = pci_msi_mask_irq, - .irq_ack = ack_apic_edge, - .irq_set_affinity = msi_set_affinity, - .irq_retrigger = ioapic_retrigger_irq, -@@ -3196,7 +3196,7 @@ int setup_msi_irq(struct pci_dev *dev, struct msi_desc *msidesc, - * MSI message denotes a contiguous group of IRQs, written for 0th IRQ. - */ - if (!irq_offset) -- write_msi_msg(irq, &msg); -+ pci_write_msi_msg(irq, &msg); - - setup_remapped_irq(irq, irq_cfg(irq), chip); - -diff --git a/arch/x86/kernel/x86_init.c b/arch/x86/kernel/x86_init.c -index e48b674..234b072 100644 ---- a/arch/x86/kernel/x86_init.c -+++ b/arch/x86/kernel/x86_init.c -@@ -116,8 +116,6 @@ struct x86_msi_ops x86_msi = { - .teardown_msi_irqs = default_teardown_msi_irqs, - .restore_msi_irqs = default_restore_msi_irqs, - .setup_hpet_msi = default_setup_hpet_msi, -- .msi_mask_irq = default_msi_mask_irq, -- .msix_mask_irq = default_msix_mask_irq, - }; - - /* MSI arch specific hooks */ -@@ -140,14 +138,6 @@ void arch_restore_msi_irqs(struct pci_dev *dev) - { - x86_msi.restore_msi_irqs(dev); - } --u32 arch_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) --{ -- return x86_msi.msi_mask_irq(desc, mask, flag); --} --u32 arch_msix_mask_irq(struct msi_desc *desc, u32 flag) --{ -- return x86_msi.msix_mask_irq(desc, flag); --} - #endif - - struct x86_io_apic_ops x86_io_apic_ops = { -diff --git a/arch/x86/pci/bus_numa.c b/arch/x86/pci/bus_numa.c -index f3a2cfc..7bcf06a 100644 ---- a/arch/x86/pci/bus_numa.c -+++ b/arch/x86/pci/bus_numa.c -@@ -31,7 +31,7 @@ void x86_pci_root_bus_resources(int bus, struct list_head *resources) - { - struct pci_root_info *info = x86_find_pci_root_info(bus); - struct pci_root_res *root_res; -- struct pci_host_bridge_window *window; -+ struct resource_entry *window; - bool found = false; - - if (!info) -@@ -41,7 +41,7 @@ void x86_pci_root_bus_resources(int bus, struct list_head *resources) - bus); - - /* already added by acpi ? */ -- list_for_each_entry(window, resources, list) -+ resource_list_for_each_entry(window, resources) - if (window->res->flags & IORESOURCE_BUS) { - found = true; - break; -diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c -index 6b3cf7c..4f6844b 100644 ---- a/arch/x86/pci/xen.c -+++ b/arch/x86/pci/xen.c -@@ -229,7 +229,7 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) - return 1; - - list_for_each_entry(msidesc, &dev->msi_list, list) { -- __read_msi_msg(msidesc, &msg); -+ __pci_read_msi_msg(msidesc, &msg); - pirq = MSI_ADDR_EXT_DEST_ID(msg.address_hi) | - ((msg.address_lo >> MSI_ADDR_DEST_ID_SHIFT) & 0xff); - if (msg.data != XEN_PIRQ_MSI_DATA || -@@ -240,7 +240,7 @@ static int xen_hvm_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) - goto error; - } - xen_msi_compose_msg(dev, pirq, &msg); -- __write_msi_msg(msidesc, &msg); -+ __pci_write_msi_msg(msidesc, &msg); - dev_dbg(&dev->dev, "xen: msi bound to pirq=%d\n", pirq); - } else { - dev_dbg(&dev->dev, -@@ -394,14 +394,7 @@ static void xen_teardown_msi_irq(unsigned int irq) - { - xen_destroy_irq(irq); - } --static u32 xen_nop_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) --{ -- return 0; --} --static u32 xen_nop_msix_mask_irq(struct msi_desc *desc, u32 flag) --{ -- return 0; --} -+ - #endif - - int __init pci_xen_init(void) -@@ -425,8 +418,7 @@ int __init pci_xen_init(void) - x86_msi.setup_msi_irqs = xen_setup_msi_irqs; - x86_msi.teardown_msi_irq = xen_teardown_msi_irq; - x86_msi.teardown_msi_irqs = xen_teardown_msi_irqs; -- x86_msi.msi_mask_irq = xen_nop_msi_mask_irq; -- x86_msi.msix_mask_irq = xen_nop_msix_mask_irq; -+ pci_msi_ignore_mask = 1; - #endif - return 0; - } -@@ -460,8 +452,7 @@ int __init pci_xen_initial_domain(void) - x86_msi.setup_msi_irqs = xen_initdom_setup_msi_irqs; - x86_msi.teardown_msi_irq = xen_teardown_msi_irq; - x86_msi.restore_msi_irqs = xen_initdom_restore_msi_irqs; -- x86_msi.msi_mask_irq = xen_nop_msi_mask_irq; -- x86_msi.msix_mask_irq = xen_nop_msix_mask_irq; -+ pci_msi_ignore_mask = 1; - #endif - __acpi_register_gsi = acpi_register_gsi_xen; - /* Pre-allocate legacy irqs */ -diff --git a/drivers/acpi/acpi_lpss.c b/drivers/acpi/acpi_lpss.c -index fdb5701..0ad0ce6 100644 ---- a/drivers/acpi/acpi_lpss.c -+++ b/drivers/acpi/acpi_lpss.c -@@ -325,7 +325,7 @@ static int acpi_lpss_create_device(struct acpi_device *adev, - { - struct lpss_device_desc *dev_desc; - struct lpss_private_data *pdata; -- struct resource_list_entry *rentry; -+ struct resource_entry *rentry; - struct list_head resource_list; - struct platform_device *pdev; - int ret; -@@ -345,12 +345,12 @@ static int acpi_lpss_create_device(struct acpi_device *adev, - goto err_out; - - list_for_each_entry(rentry, &resource_list, node) -- if (resource_type(&rentry->res) == IORESOURCE_MEM) { -+ if (resource_type(rentry->res) == IORESOURCE_MEM) { - if (dev_desc->prv_size_override) - pdata->mmio_size = dev_desc->prv_size_override; - else -- pdata->mmio_size = resource_size(&rentry->res); -- pdata->mmio_base = ioremap(rentry->res.start, -+ pdata->mmio_size = resource_size(rentry->res); -+ pdata->mmio_base = ioremap(rentry->res->start, - pdata->mmio_size); - break; - } -diff --git a/drivers/acpi/acpi_platform.c b/drivers/acpi/acpi_platform.c -index 6ba8beb..1284138 100644 ---- a/drivers/acpi/acpi_platform.c -+++ b/drivers/acpi/acpi_platform.c -@@ -45,7 +45,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev) - struct platform_device *pdev = NULL; - struct acpi_device *acpi_parent; - struct platform_device_info pdevinfo; -- struct resource_list_entry *rentry; -+ struct resource_entry *rentry; - struct list_head resource_list; - struct resource *resources = NULL; - int count; -@@ -71,7 +71,7 @@ struct platform_device *acpi_create_platform_device(struct acpi_device *adev) - } - count = 0; - list_for_each_entry(rentry, &resource_list, node) -- resources[count++] = rentry->res; -+ resources[count++] = *rentry->res; - - acpi_dev_free_resource_list(&resource_list); - } -diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c -index 2ba8f02..e7f4aa0 100644 ---- a/drivers/acpi/resource.c -+++ b/drivers/acpi/resource.c -@@ -415,12 +415,7 @@ EXPORT_SYMBOL_GPL(acpi_dev_resource_interrupt); - */ - void acpi_dev_free_resource_list(struct list_head *list) - { -- struct resource_list_entry *rentry, *re; -- -- list_for_each_entry_safe(rentry, re, list, node) { -- list_del(&rentry->node); -- kfree(rentry); -- } -+ resource_list_free(list); - } - EXPORT_SYMBOL_GPL(acpi_dev_free_resource_list); - -@@ -435,15 +430,15 @@ struct res_proc_context { - static acpi_status acpi_dev_new_resource_entry(struct resource *r, - struct res_proc_context *c) - { -- struct resource_list_entry *rentry; -+ struct resource_entry *rentry; - -- rentry = kmalloc(sizeof(*rentry), GFP_KERNEL); -+ rentry = resource_list_create_entry(NULL, 0); - if (!rentry) { - c->error = -ENOMEM; - return AE_NO_MEMORY; - } -- rentry->res = *r; -- list_add_tail(&rentry->node, c->list); -+ *rentry->res = *r; -+ resource_list_add_tail(rentry, c->list); - c->count++; - return AE_OK; - } -@@ -503,7 +498,7 @@ static acpi_status acpi_dev_process_resource(struct acpi_resource *ares, - * returned as the final error code. - * - * The resultant struct resource objects are put on the list pointed to by -- * @list, that must be empty initially, as members of struct resource_list_entry -+ * @list, that must be empty initially, as members of struct resource_entry - * objects. Callers of this routine should use %acpi_dev_free_resource_list() to - * free that list. - * -diff --git a/drivers/base/core.c b/drivers/base/core.c -index 842d047..4c7a18f 100644 ---- a/drivers/base/core.c -+++ b/drivers/base/core.c -@@ -661,6 +661,9 @@ void device_initialize(struct device *dev) - INIT_LIST_HEAD(&dev->devres_head); - device_pm_init(dev); - set_dev_node(dev, -1); -+#ifdef CONFIG_GENERIC_MSI_IRQ -+ INIT_LIST_HEAD(&dev->msi_list); -+#endif - } - EXPORT_SYMBOL_GPL(device_initialize); - -diff --git a/drivers/base/platform.c b/drivers/base/platform.c -index 317e0e4..b387fb9 100644 ---- a/drivers/base/platform.c -+++ b/drivers/base/platform.c -@@ -1011,6 +1011,7 @@ int __init platform_bus_init(void) - error = bus_register(&platform_bus_type); - if (error) - device_unregister(&platform_bus); -+ of_platform_register_reconfig_notifier(); - return error; - } - -diff --git a/drivers/dma/acpi-dma.c b/drivers/dma/acpi-dma.c -index de361a1..5a63564 100644 ---- a/drivers/dma/acpi-dma.c -+++ b/drivers/dma/acpi-dma.c -@@ -43,7 +43,7 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp, - { - const struct acpi_csrt_shared_info *si; - struct list_head resource_list; -- struct resource_list_entry *rentry; -+ struct resource_entry *rentry; - resource_size_t mem = 0, irq = 0; - int ret; - -@@ -56,10 +56,10 @@ static int acpi_dma_parse_resource_group(const struct acpi_csrt_group *grp, - return 0; - - list_for_each_entry(rentry, &resource_list, node) { -- if (resource_type(&rentry->res) == IORESOURCE_MEM) -- mem = rentry->res.start; -- else if (resource_type(&rentry->res) == IORESOURCE_IRQ) -- irq = rentry->res.start; -+ if (resource_type(rentry->res) == IORESOURCE_MEM) -+ mem = rentry->res->start; -+ else if (resource_type(rentry->res) == IORESOURCE_IRQ) -+ irq = rentry->res->start; - } - - acpi_dev_free_resource_list(&resource_list); -diff --git a/drivers/iommu/Kconfig b/drivers/iommu/Kconfig -index dd51122..2cdcc76 100644 ---- a/drivers/iommu/Kconfig -+++ b/drivers/iommu/Kconfig -@@ -13,9 +13,35 @@ menuconfig IOMMU_SUPPORT - - if IOMMU_SUPPORT - -+menu "Generic IOMMU Pagetable Support" -+ -+# Selected by the actual pagetable implementations -+config IOMMU_IO_PGTABLE -+ bool -+ -+config IOMMU_IO_PGTABLE_LPAE -+ bool "ARMv7/v8 Long Descriptor Format" -+ select IOMMU_IO_PGTABLE -+ help -+ Enable support for the ARM long descriptor pagetable format. -+ This allocator supports 4K/2M/1G, 16K/32M and 64K/512M page -+ sizes at both stage-1 and stage-2, as well as address spaces -+ up to 48-bits in size. -+ -+config IOMMU_IO_PGTABLE_LPAE_SELFTEST -+ bool "LPAE selftests" -+ depends on IOMMU_IO_PGTABLE_LPAE -+ help -+ Enable self-tests for LPAE page table allocator. This performs -+ a series of page-table consistency checks during boot. -+ -+ If unsure, say N here. -+ -+endmenu -+ - config OF_IOMMU - def_bool y -- depends on OF -+ depends on OF && IOMMU_API - - config FSL_PAMU - bool "Freescale IOMMU support" -@@ -291,13 +317,13 @@ config SPAPR_TCE_IOMMU - - config ARM_SMMU - bool "ARM Ltd. System MMU (SMMU) Support" -- depends on ARM64 || (ARM_LPAE && OF) -+ depends on ARM64 || ARM - select IOMMU_API -+ select IOMMU_IO_PGTABLE_LPAE - select ARM_DMA_USE_IOMMU if ARM - help - Support for implementations of the ARM System MMU architecture -- versions 1 and 2. The driver supports both v7l and v8l table -- formats with 4k and 64k page sizes. -+ versions 1 and 2. - - Say Y here if your SoC includes an IOMMU device implementing - the ARM SMMU architecture. -diff --git a/drivers/iommu/Makefile b/drivers/iommu/Makefile -index 16edef7..269cdd8 100644 ---- a/drivers/iommu/Makefile -+++ b/drivers/iommu/Makefile -@@ -1,6 +1,8 @@ - obj-$(CONFIG_IOMMU_API) += iommu.o - obj-$(CONFIG_IOMMU_API) += iommu-traces.o - obj-$(CONFIG_IOMMU_API) += iommu-sysfs.o -+obj-$(CONFIG_IOMMU_IO_PGTABLE) += io-pgtable.o -+obj-$(CONFIG_IOMMU_IO_PGTABLE_LPAE) += io-pgtable-arm.o - obj-$(CONFIG_OF_IOMMU) += of_iommu.o - obj-$(CONFIG_MSM_IOMMU) += msm_iommu.o msm_iommu_dev.o - obj-$(CONFIG_AMD_IOMMU) += amd_iommu.o amd_iommu_init.o -diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c -index af3daf8..f7131fa 100644 ---- a/drivers/iommu/amd_iommu.c -+++ b/drivers/iommu/amd_iommu.c -@@ -343,8 +343,9 @@ static u16 get_alias(struct device *dev) - */ - if (pci_alias == devid && - PCI_BUS_NUM(ivrs_alias) == pdev->bus->number) { -- pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN; -- pdev->dma_alias_devfn = ivrs_alias & 0xff; -+ pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVID; -+ pdev->dma_alias_devid = PCI_DEVID(pdev->bus->number, -+ ivrs_alias & 0xff); - pr_info("AMD-Vi: Added PCI DMA alias %02x.%d for %s\n", - PCI_SLOT(ivrs_alias), PCI_FUNC(ivrs_alias), - dev_name(dev)); -@@ -3432,6 +3433,7 @@ static const struct iommu_ops amd_iommu_ops = { - .detach_dev = amd_iommu_detach_device, - .map = amd_iommu_map, - .unmap = amd_iommu_unmap, -+ .map_sg = default_iommu_map_sg, - .iova_to_phys = amd_iommu_iova_to_phys, - .pgsize_bitmap = AMD_IOMMU_PGSIZES, - }; -diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c -index 60558f7..10e584b 100644 ---- a/drivers/iommu/arm-smmu.c -+++ b/drivers/iommu/arm-smmu.c -@@ -23,8 +23,6 @@ - * - Stream-matching and stream-indexing - * - v7/v8 long-descriptor format - * - Non-secure access to the SMMU -- * - 4k and 64k pages, with contiguous pte hints. -- * - Up to 48-bit addressing (dependent on VA_BITS) - * - Context fault reporting - */ - -@@ -36,7 +34,7 @@ - #include - #include - #include --#include -+#include - #include - #include - #include -@@ -46,6 +44,16 @@ - - #include - -+#include "io-pgtable.h" -+ -+#ifdef CONFIG_FSL_MC_BUS -+#include <../drivers/staging/fsl-mc/include/mc.h> -+#endif -+ -+#ifdef CONFIG_PCI_LAYERSCAPE -+#include <../drivers/pci/host/pci-layerscape.h> -+#endif -+ - #include - - /* Maximum number of stream IDs assigned to a single device */ -@@ -71,40 +79,6 @@ - ((smmu->options & ARM_SMMU_OPT_SECURE_CFG_ACCESS) \ - ? 0x400 : 0)) - --/* Page table bits */ --#define ARM_SMMU_PTE_XN (((pteval_t)3) << 53) --#define ARM_SMMU_PTE_CONT (((pteval_t)1) << 52) --#define ARM_SMMU_PTE_AF (((pteval_t)1) << 10) --#define ARM_SMMU_PTE_SH_NS (((pteval_t)0) << 8) --#define ARM_SMMU_PTE_SH_OS (((pteval_t)2) << 8) --#define ARM_SMMU_PTE_SH_IS (((pteval_t)3) << 8) --#define ARM_SMMU_PTE_PAGE (((pteval_t)3) << 0) -- --#if PAGE_SIZE == SZ_4K --#define ARM_SMMU_PTE_CONT_ENTRIES 16 --#elif PAGE_SIZE == SZ_64K --#define ARM_SMMU_PTE_CONT_ENTRIES 32 --#else --#define ARM_SMMU_PTE_CONT_ENTRIES 1 --#endif -- --#define ARM_SMMU_PTE_CONT_SIZE (PAGE_SIZE * ARM_SMMU_PTE_CONT_ENTRIES) --#define ARM_SMMU_PTE_CONT_MASK (~(ARM_SMMU_PTE_CONT_SIZE - 1)) -- --/* Stage-1 PTE */ --#define ARM_SMMU_PTE_AP_UNPRIV (((pteval_t)1) << 6) --#define ARM_SMMU_PTE_AP_RDONLY (((pteval_t)2) << 6) --#define ARM_SMMU_PTE_ATTRINDX_SHIFT 2 --#define ARM_SMMU_PTE_nG (((pteval_t)1) << 11) -- --/* Stage-2 PTE */ --#define ARM_SMMU_PTE_HAP_FAULT (((pteval_t)0) << 6) --#define ARM_SMMU_PTE_HAP_READ (((pteval_t)1) << 6) --#define ARM_SMMU_PTE_HAP_WRITE (((pteval_t)2) << 6) --#define ARM_SMMU_PTE_MEMATTR_OIWB (((pteval_t)0xf) << 2) --#define ARM_SMMU_PTE_MEMATTR_NC (((pteval_t)0x5) << 2) --#define ARM_SMMU_PTE_MEMATTR_DEV (((pteval_t)0x1) << 2) -- - /* Configuration registers */ - #define ARM_SMMU_GR0_sCR0 0x0 - #define sCR0_CLIENTPD (1 << 0) -@@ -132,17 +106,12 @@ - #define ARM_SMMU_GR0_sGFSYNR0 0x50 - #define ARM_SMMU_GR0_sGFSYNR1 0x54 - #define ARM_SMMU_GR0_sGFSYNR2 0x58 --#define ARM_SMMU_GR0_PIDR0 0xfe0 --#define ARM_SMMU_GR0_PIDR1 0xfe4 --#define ARM_SMMU_GR0_PIDR2 0xfe8 - - #define ID0_S1TS (1 << 30) - #define ID0_S2TS (1 << 29) - #define ID0_NTS (1 << 28) - #define ID0_SMS (1 << 27) --#define ID0_PTFS_SHIFT 24 --#define ID0_PTFS_MASK 0x2 --#define ID0_PTFS_V8_ONLY 0x2 -+#define ID0_ATOSNS (1 << 26) - #define ID0_CTTW (1 << 14) - #define ID0_NUMIRPT_SHIFT 16 - #define ID0_NUMIRPT_MASK 0xff -@@ -169,11 +138,7 @@ - #define ID2_PTFS_16K (1 << 13) - #define ID2_PTFS_64K (1 << 14) - --#define PIDR2_ARCH_SHIFT 4 --#define PIDR2_ARCH_MASK 0xf -- - /* Global TLB invalidation */ --#define ARM_SMMU_GR0_STLBIALL 0x60 - #define ARM_SMMU_GR0_TLBIVMID 0x64 - #define ARM_SMMU_GR0_TLBIALLNSNH 0x68 - #define ARM_SMMU_GR0_TLBIALLH 0x6c -@@ -231,13 +196,25 @@ - #define ARM_SMMU_CB_TTBCR2 0x10 - #define ARM_SMMU_CB_TTBR0_LO 0x20 - #define ARM_SMMU_CB_TTBR0_HI 0x24 -+#define ARM_SMMU_CB_TTBR1_LO 0x28 -+#define ARM_SMMU_CB_TTBR1_HI 0x2c - #define ARM_SMMU_CB_TTBCR 0x30 - #define ARM_SMMU_CB_S1_MAIR0 0x38 -+#define ARM_SMMU_CB_S1_MAIR1 0x3c -+#define ARM_SMMU_CB_PAR_LO 0x50 -+#define ARM_SMMU_CB_PAR_HI 0x54 - #define ARM_SMMU_CB_FSR 0x58 - #define ARM_SMMU_CB_FAR_LO 0x60 - #define ARM_SMMU_CB_FAR_HI 0x64 - #define ARM_SMMU_CB_FSYNR0 0x68 -+#define ARM_SMMU_CB_S1_TLBIVA 0x600 - #define ARM_SMMU_CB_S1_TLBIASID 0x610 -+#define ARM_SMMU_CB_S1_TLBIVAL 0x620 -+#define ARM_SMMU_CB_S2_TLBIIPAS2 0x630 -+#define ARM_SMMU_CB_S2_TLBIIPAS2L 0x638 -+#define ARM_SMMU_CB_ATS1PR_LO 0x800 -+#define ARM_SMMU_CB_ATS1PR_HI 0x804 -+#define ARM_SMMU_CB_ATSR 0x8f0 - - #define SCTLR_S1_ASIDPNE (1 << 12) - #define SCTLR_CFCFG (1 << 7) -@@ -249,64 +226,17 @@ - #define SCTLR_M (1 << 0) - #define SCTLR_EAE_SBOP (SCTLR_AFE | SCTLR_TRE) - --#define RESUME_RETRY (0 << 0) --#define RESUME_TERMINATE (1 << 0) -- --#define TTBCR_EAE (1 << 31) -+#define CB_PAR_F (1 << 0) - --#define TTBCR_PASIZE_SHIFT 16 --#define TTBCR_PASIZE_MASK 0x7 -+#define ATSR_ACTIVE (1 << 0) - --#define TTBCR_TG0_4K (0 << 14) --#define TTBCR_TG0_64K (1 << 14) -- --#define TTBCR_SH0_SHIFT 12 --#define TTBCR_SH0_MASK 0x3 --#define TTBCR_SH_NS 0 --#define TTBCR_SH_OS 2 --#define TTBCR_SH_IS 3 -- --#define TTBCR_ORGN0_SHIFT 10 --#define TTBCR_IRGN0_SHIFT 8 --#define TTBCR_RGN_MASK 0x3 --#define TTBCR_RGN_NC 0 --#define TTBCR_RGN_WBWA 1 --#define TTBCR_RGN_WT 2 --#define TTBCR_RGN_WB 3 -- --#define TTBCR_SL0_SHIFT 6 --#define TTBCR_SL0_MASK 0x3 --#define TTBCR_SL0_LVL_2 0 --#define TTBCR_SL0_LVL_1 1 -- --#define TTBCR_T1SZ_SHIFT 16 --#define TTBCR_T0SZ_SHIFT 0 --#define TTBCR_SZ_MASK 0xf -+#define RESUME_RETRY (0 << 0) -+#define RESUME_TERMINATE (1 << 0) - - #define TTBCR2_SEP_SHIFT 15 --#define TTBCR2_SEP_MASK 0x7 -- --#define TTBCR2_PASIZE_SHIFT 0 --#define TTBCR2_PASIZE_MASK 0x7 -- --/* Common definitions for PASize and SEP fields */ --#define TTBCR2_ADDR_32 0 --#define TTBCR2_ADDR_36 1 --#define TTBCR2_ADDR_40 2 --#define TTBCR2_ADDR_42 3 --#define TTBCR2_ADDR_44 4 --#define TTBCR2_ADDR_48 5 -- --#define TTBRn_HI_ASID_SHIFT 16 -- --#define MAIR_ATTR_SHIFT(n) ((n) << 3) --#define MAIR_ATTR_MASK 0xff --#define MAIR_ATTR_DEVICE 0x04 --#define MAIR_ATTR_NC 0x44 --#define MAIR_ATTR_WBRWA 0xff --#define MAIR_ATTR_IDX_NC 0 --#define MAIR_ATTR_IDX_CACHE 1 --#define MAIR_ATTR_IDX_DEV 2 -+#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT) -+ -+#define TTBRn_HI_ASID_SHIFT 16 - - #define FSR_MULTI (1 << 31) - #define FSR_SS (1 << 30) -@@ -345,6 +275,7 @@ struct arm_smmu_smr { - struct arm_smmu_master_cfg { - int num_streamids; - u16 streamids[MAX_MASTER_STREAMIDS]; -+ u16 mask; - struct arm_smmu_smr *smrs; - }; - -@@ -366,6 +297,7 @@ struct arm_smmu_device { - #define ARM_SMMU_FEAT_TRANS_S1 (1 << 2) - #define ARM_SMMU_FEAT_TRANS_S2 (1 << 3) - #define ARM_SMMU_FEAT_TRANS_NESTED (1 << 4) -+#define ARM_SMMU_FEAT_TRANS_OPS (1 << 5) - u32 features; - - #define ARM_SMMU_OPT_SECURE_CFG_ACCESS (1 << 0) -@@ -380,10 +312,9 @@ struct arm_smmu_device { - u32 num_mapping_groups; - DECLARE_BITMAP(smr_map, ARM_SMMU_MAX_SMRS); - -- unsigned long s1_input_size; -- unsigned long s1_output_size; -- unsigned long s2_input_size; -- unsigned long s2_output_size; -+ unsigned long va_size; -+ unsigned long ipa_size; -+ unsigned long pa_size; - - u32 num_global_irqs; - u32 num_context_irqs; -@@ -397,19 +328,33 @@ struct arm_smmu_cfg { - u8 cbndx; - u8 irptndx; - u32 cbar; -- pgd_t *pgd; - }; - #define INVALID_IRPTNDX 0xff - - #define ARM_SMMU_CB_ASID(cfg) ((cfg)->cbndx) - #define ARM_SMMU_CB_VMID(cfg) ((cfg)->cbndx + 1) - -+enum arm_smmu_domain_stage { -+ ARM_SMMU_DOMAIN_S1 = 0, -+ ARM_SMMU_DOMAIN_S2, -+ ARM_SMMU_DOMAIN_NESTED, -+}; -+ - struct arm_smmu_domain { - struct arm_smmu_device *smmu; -+ struct io_pgtable_ops *pgtbl_ops; -+ spinlock_t pgtbl_lock; - struct arm_smmu_cfg cfg; -- spinlock_t lock; -+ enum arm_smmu_domain_stage stage; -+ struct mutex init_mutex; /* Protects smmu pointer */ -+ struct iommu_domain domain; - }; - -+static struct iommu_ops arm_smmu_ops; -+#ifdef CONFIG_FSL_MC_BUS -+static struct iommu_ops arm_fsl_mc_smmu_ops; -+#endif -+ - static DEFINE_SPINLOCK(arm_smmu_devices_lock); - static LIST_HEAD(arm_smmu_devices); - -@@ -422,6 +367,43 @@ static struct arm_smmu_option_prop arm_smmu_options[] = { - { ARM_SMMU_OPT_SECURE_CFG_ACCESS, "calxeda,smmu-secure-config-access" }, - { 0, NULL}, - }; -+#define CONFIG_AIOP_ERRATA -+#ifdef CONFIG_AIOP_ERRATA -+/* -+ * PL = 1, BMT = 1, VA = 1 -+ */ -+#define AIOP_SMR_VALUE 0x380 -+/* -+ * Following should be set: -+ * SHCFG: 0x3 -+ * MTCFG: 0x1 -+ * MemAttr: 0xf -+ * Type: 0x1 -+ * RACFG: 0x2 -+ * WACFG: 0x2 -+ */ -+#define AIOP_S2CR_VALUE 0xA1FB00 -+ -+static void arm_smmu_aiop_attr_trans(struct arm_smmu_device *smmu) -+{ -+ void __iomem *gr0_base = ARM_SMMU_GR0(smmu); -+ u16 mask = 0x7c7f; -+ int index; -+ u32 reg; -+ /* reserve one smr group for AIOP */ -+ index = --smmu->num_mapping_groups; -+ -+ reg = SMR_VALID | AIOP_SMR_VALUE << SMR_ID_SHIFT | -+ mask << SMR_MASK_SHIFT; -+ writel(reg, gr0_base + ARM_SMMU_GR0_SMR(index)); -+ writel(AIOP_S2CR_VALUE, gr0_base + ARM_SMMU_GR0_S2CR(index)); -+} -+#endif -+ -+static struct arm_smmu_domain *to_smmu_domain(struct iommu_domain *dom) -+{ -+ return container_of(dom, struct arm_smmu_domain, domain); -+} - - static void parse_driver_options(struct arm_smmu_device *smmu) - { -@@ -447,6 +429,16 @@ static struct device_node *dev_get_dev_node(struct device *dev) - return bus->bridge->parent->of_node; - } - -+#ifdef CONFIG_FSL_MC_BUS -+ if (dev->bus == &fsl_mc_bus_type) { -+ /* -+ * Get to the MC device tree node. -+ */ -+ while (dev->bus == &fsl_mc_bus_type) -+ dev = dev->parent; -+ } -+#endif -+ - return dev->of_node; - } - -@@ -590,7 +582,7 @@ static void __arm_smmu_free_bitmap(unsigned long *map, int idx) - } - - /* Wait for any pending TLB invalidations to complete */ --static void arm_smmu_tlb_sync(struct arm_smmu_device *smmu) -+static void __arm_smmu_tlb_sync(struct arm_smmu_device *smmu) - { - int count = 0; - void __iomem *gr0_base = ARM_SMMU_GR0(smmu); -@@ -608,12 +600,19 @@ static void arm_smmu_tlb_sync(struct arm_smmu_device *smmu) - } - } - --static void arm_smmu_tlb_inv_context(struct arm_smmu_domain *smmu_domain) -+static void arm_smmu_tlb_sync(void *cookie) - { -+ struct arm_smmu_domain *smmu_domain = cookie; -+ __arm_smmu_tlb_sync(smmu_domain->smmu); -+} -+ -+static void arm_smmu_tlb_inv_context(void *cookie) -+{ -+ struct arm_smmu_domain *smmu_domain = cookie; - struct arm_smmu_cfg *cfg = &smmu_domain->cfg; - struct arm_smmu_device *smmu = smmu_domain->smmu; -- void __iomem *base = ARM_SMMU_GR0(smmu); - bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; -+ void __iomem *base; - - if (stage1) { - base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); -@@ -625,16 +624,83 @@ static void arm_smmu_tlb_inv_context(struct arm_smmu_domain *smmu_domain) - base + ARM_SMMU_GR0_TLBIVMID); - } - -- arm_smmu_tlb_sync(smmu); -+ __arm_smmu_tlb_sync(smmu); -+} -+ -+static void arm_smmu_tlb_inv_range_nosync(unsigned long iova, size_t size, -+ bool leaf, void *cookie) -+{ -+ struct arm_smmu_domain *smmu_domain = cookie; -+ struct arm_smmu_cfg *cfg = &smmu_domain->cfg; -+ struct arm_smmu_device *smmu = smmu_domain->smmu; -+ bool stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; -+ void __iomem *reg; -+ -+ if (stage1) { -+ reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); -+ reg += leaf ? ARM_SMMU_CB_S1_TLBIVAL : ARM_SMMU_CB_S1_TLBIVA; -+ -+ if (!IS_ENABLED(CONFIG_64BIT) || smmu->version == ARM_SMMU_V1) { -+ iova &= ~12UL; -+ iova |= ARM_SMMU_CB_ASID(cfg); -+ writel_relaxed(iova, reg); -+#ifdef CONFIG_64BIT -+ } else { -+ iova >>= 12; -+ iova |= (u64)ARM_SMMU_CB_ASID(cfg) << 48; -+ writeq_relaxed(iova, reg); -+#endif -+ } -+#ifdef CONFIG_64BIT -+ } else if (smmu->version == ARM_SMMU_V2) { -+ reg = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); -+ reg += leaf ? ARM_SMMU_CB_S2_TLBIIPAS2L : -+ ARM_SMMU_CB_S2_TLBIIPAS2; -+ writeq_relaxed(iova >> 12, reg); -+#endif -+ } else { -+ reg = ARM_SMMU_GR0(smmu) + ARM_SMMU_GR0_TLBIVMID; -+ writel_relaxed(ARM_SMMU_CB_VMID(cfg), reg); -+ } -+} -+ -+static void arm_smmu_flush_pgtable(void *addr, size_t size, void *cookie) -+{ -+ struct arm_smmu_domain *smmu_domain = cookie; -+ struct arm_smmu_device *smmu = smmu_domain->smmu; -+ unsigned long offset = (unsigned long)addr & ~PAGE_MASK; -+ -+ -+ /* Ensure new page tables are visible to the hardware walker */ -+ if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) { -+ dsb(ishst); -+ } else { -+ /* -+ * If the SMMU can't walk tables in the CPU caches, treat them -+ * like non-coherent DMA since we need to flush the new entries -+ * all the way out to memory. There's no possibility of -+ * recursion here as the SMMU table walker will not be wired -+ * through another SMMU. -+ */ -+ dma_map_page(smmu->dev, virt_to_page(addr), offset, size, -+ DMA_TO_DEVICE); -+ } - } - -+static struct iommu_gather_ops arm_smmu_gather_ops = { -+ .tlb_flush_all = arm_smmu_tlb_inv_context, -+ .tlb_add_flush = arm_smmu_tlb_inv_range_nosync, -+ .tlb_sync = arm_smmu_tlb_sync, -+ .flush_pgtable = arm_smmu_flush_pgtable, -+}; -+ - static irqreturn_t arm_smmu_context_fault(int irq, void *dev) - { - int flags, ret; - u32 fsr, far, fsynr, resume; - unsigned long iova; - struct iommu_domain *domain = dev; -- struct arm_smmu_domain *smmu_domain = domain->priv; -+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - struct arm_smmu_cfg *cfg = &smmu_domain->cfg; - struct arm_smmu_device *smmu = smmu_domain->smmu; - void __iomem *cb_base; -@@ -705,29 +771,8 @@ static irqreturn_t arm_smmu_global_fault(int irq, void *dev) - return IRQ_HANDLED; - } - --static void arm_smmu_flush_pgtable(struct arm_smmu_device *smmu, void *addr, -- size_t size) --{ -- unsigned long offset = (unsigned long)addr & ~PAGE_MASK; -- -- -- /* Ensure new page tables are visible to the hardware walker */ -- if (smmu->features & ARM_SMMU_FEAT_COHERENT_WALK) { -- dsb(ishst); -- } else { -- /* -- * If the SMMU can't walk tables in the CPU caches, treat them -- * like non-coherent DMA since we need to flush the new entries -- * all the way out to memory. There's no possibility of -- * recursion here as the SMMU table walker will not be wired -- * through another SMMU. -- */ -- dma_map_page(smmu->dev, virt_to_page(addr), offset, size, -- DMA_TO_DEVICE); -- } --} -- --static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) -+static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain, -+ struct io_pgtable_cfg *pgtbl_cfg) - { - u32 reg; - bool stage1; -@@ -740,6 +785,20 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) - stage1 = cfg->cbar != CBAR_TYPE_S2_TRANS; - cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); - -+ if (smmu->version > ARM_SMMU_V1) { -+ /* -+ * CBA2R. -+ * *Must* be initialised before CBAR thanks to VMID16 -+ * architectural oversight affected some implementations. -+ */ -+#ifdef CONFIG_64BIT -+ reg = CBA2R_RW64_64BIT; -+#else -+ reg = CBA2R_RW64_32BIT; -+#endif -+ writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx)); -+ } -+ - /* CBAR */ - reg = cfg->cbar; - if (smmu->version == ARM_SMMU_V1) -@@ -757,135 +816,51 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain) - } - writel_relaxed(reg, gr1_base + ARM_SMMU_GR1_CBAR(cfg->cbndx)); - -- if (smmu->version > ARM_SMMU_V1) { -- /* CBA2R */ --#ifdef CONFIG_64BIT -- reg = CBA2R_RW64_64BIT; --#else -- reg = CBA2R_RW64_32BIT; --#endif -- writel_relaxed(reg, -- gr1_base + ARM_SMMU_GR1_CBA2R(cfg->cbndx)); -- -- /* TTBCR2 */ -- switch (smmu->s1_input_size) { -- case 32: -- reg = (TTBCR2_ADDR_32 << TTBCR2_SEP_SHIFT); -- break; -- case 36: -- reg = (TTBCR2_ADDR_36 << TTBCR2_SEP_SHIFT); -- break; -- case 39: -- case 40: -- reg = (TTBCR2_ADDR_40 << TTBCR2_SEP_SHIFT); -- break; -- case 42: -- reg = (TTBCR2_ADDR_42 << TTBCR2_SEP_SHIFT); -- break; -- case 44: -- reg = (TTBCR2_ADDR_44 << TTBCR2_SEP_SHIFT); -- break; -- case 48: -- reg = (TTBCR2_ADDR_48 << TTBCR2_SEP_SHIFT); -- break; -- } -- -- switch (smmu->s1_output_size) { -- case 32: -- reg |= (TTBCR2_ADDR_32 << TTBCR2_PASIZE_SHIFT); -- break; -- case 36: -- reg |= (TTBCR2_ADDR_36 << TTBCR2_PASIZE_SHIFT); -- break; -- case 39: -- case 40: -- reg |= (TTBCR2_ADDR_40 << TTBCR2_PASIZE_SHIFT); -- break; -- case 42: -- reg |= (TTBCR2_ADDR_42 << TTBCR2_PASIZE_SHIFT); -- break; -- case 44: -- reg |= (TTBCR2_ADDR_44 << TTBCR2_PASIZE_SHIFT); -- break; -- case 48: -- reg |= (TTBCR2_ADDR_48 << TTBCR2_PASIZE_SHIFT); -- break; -- } -- -- if (stage1) -- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2); -- } -+ /* TTBRs */ -+ if (stage1) { -+ reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0]; -+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO); -+ reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[0] >> 32; -+ reg |= ARM_SMMU_CB_ASID(cfg) << TTBRn_HI_ASID_SHIFT; -+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI); - -- /* TTBR0 */ -- arm_smmu_flush_pgtable(smmu, cfg->pgd, -- PTRS_PER_PGD * sizeof(pgd_t)); -- reg = __pa(cfg->pgd); -- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO); -- reg = (phys_addr_t)__pa(cfg->pgd) >> 32; -- if (stage1) -+ reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1]; -+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1_LO); -+ reg = pgtbl_cfg->arm_lpae_s1_cfg.ttbr[1] >> 32; - reg |= ARM_SMMU_CB_ASID(cfg) << TTBRn_HI_ASID_SHIFT; -- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI); -+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR1_HI); -+ } else { -+ reg = pgtbl_cfg->arm_lpae_s2_cfg.vttbr; -+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_LO); -+ reg = pgtbl_cfg->arm_lpae_s2_cfg.vttbr >> 32; -+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBR0_HI); -+ } - -- /* -- * TTBCR -- * We use long descriptor, with inner-shareable WBWA tables in TTBR0. -- */ -- if (smmu->version > ARM_SMMU_V1) { -- if (PAGE_SIZE == SZ_4K) -- reg = TTBCR_TG0_4K; -- else -- reg = TTBCR_TG0_64K; -- -- if (!stage1) { -- reg |= (64 - smmu->s2_input_size) << TTBCR_T0SZ_SHIFT; -- -- switch (smmu->s2_output_size) { -- case 32: -- reg |= (TTBCR2_ADDR_32 << TTBCR_PASIZE_SHIFT); -- break; -- case 36: -- reg |= (TTBCR2_ADDR_36 << TTBCR_PASIZE_SHIFT); -- break; -- case 40: -- reg |= (TTBCR2_ADDR_40 << TTBCR_PASIZE_SHIFT); -- break; -- case 42: -- reg |= (TTBCR2_ADDR_42 << TTBCR_PASIZE_SHIFT); -- break; -- case 44: -- reg |= (TTBCR2_ADDR_44 << TTBCR_PASIZE_SHIFT); -- break; -- case 48: -- reg |= (TTBCR2_ADDR_48 << TTBCR_PASIZE_SHIFT); -- break; -- } -- } else { -- reg |= (64 - smmu->s1_input_size) << TTBCR_T0SZ_SHIFT; -+ /* TTBCR */ -+ if (stage1) { -+ reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr; -+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); -+ if (smmu->version > ARM_SMMU_V1) { -+ reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32; -+ reg |= TTBCR2_SEP_UPSTREAM; -+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2); - } - } else { -- reg = 0; -+ reg = pgtbl_cfg->arm_lpae_s2_cfg.vtcr; -+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); - } - -- reg |= TTBCR_EAE | -- (TTBCR_SH_IS << TTBCR_SH0_SHIFT) | -- (TTBCR_RGN_WBWA << TTBCR_ORGN0_SHIFT) | -- (TTBCR_RGN_WBWA << TTBCR_IRGN0_SHIFT); -- -- if (!stage1) -- reg |= (TTBCR_SL0_LVL_1 << TTBCR_SL0_SHIFT); -- -- writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR); -- -- /* MAIR0 (stage-1 only) */ -+ /* MAIRs (stage-1 only) */ - if (stage1) { -- reg = (MAIR_ATTR_NC << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_NC)) | -- (MAIR_ATTR_WBRWA << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_CACHE)) | -- (MAIR_ATTR_DEVICE << MAIR_ATTR_SHIFT(MAIR_ATTR_IDX_DEV)); -+ reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[0]; - writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR0); -+ reg = pgtbl_cfg->arm_lpae_s1_cfg.mair[1]; -+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_S1_MAIR1); - } - - /* SCTLR */ -- reg = SCTLR_CFCFG | SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP; -+ /* Disable stall mode */ -+ reg = SCTLR_CFIE | SCTLR_CFRE | SCTLR_M | SCTLR_EAE_SBOP; - if (stage1) - reg |= SCTLR_S1_ASIDPNE; - #ifdef __BIG_ENDIAN -@@ -898,27 +873,69 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, - struct arm_smmu_device *smmu) - { - int irq, start, ret = 0; -- unsigned long flags; -- struct arm_smmu_domain *smmu_domain = domain->priv; -+ unsigned long ias, oas; -+ struct io_pgtable_ops *pgtbl_ops; -+ struct io_pgtable_cfg pgtbl_cfg; -+ enum io_pgtable_fmt fmt; -+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - struct arm_smmu_cfg *cfg = &smmu_domain->cfg; - -- spin_lock_irqsave(&smmu_domain->lock, flags); -+ mutex_lock(&smmu_domain->init_mutex); - if (smmu_domain->smmu) - goto out_unlock; - -- if (smmu->features & ARM_SMMU_FEAT_TRANS_NESTED) { -+ /* -+ * Mapping the requested stage onto what we support is surprisingly -+ * complicated, mainly because the spec allows S1+S2 SMMUs without -+ * support for nested translation. That means we end up with the -+ * following table: -+ * -+ * Requested Supported Actual -+ * S1 N S1 -+ * S1 S1+S2 S1 -+ * S1 S2 S2 -+ * S1 S1 S1 -+ * N N N -+ * N S1+S2 S2 -+ * N S2 S2 -+ * N S1 S1 -+ * -+ * Note that you can't actually request stage-2 mappings. -+ */ -+ if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S1)) -+ smmu_domain->stage = ARM_SMMU_DOMAIN_S2; -+ if (!(smmu->features & ARM_SMMU_FEAT_TRANS_S2)) -+ smmu_domain->stage = ARM_SMMU_DOMAIN_S1; -+ -+ switch (smmu_domain->stage) { -+ case ARM_SMMU_DOMAIN_S1: -+ cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; -+ start = smmu->num_s2_context_banks; -+ ias = smmu->va_size; -+ oas = smmu->ipa_size; -+ if (IS_ENABLED(CONFIG_64BIT)) -+ fmt = ARM_64_LPAE_S1; -+ else -+ fmt = ARM_32_LPAE_S1; -+ break; -+ case ARM_SMMU_DOMAIN_NESTED: - /* - * We will likely want to change this if/when KVM gets - * involved. - */ -- cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; -- start = smmu->num_s2_context_banks; -- } else if (smmu->features & ARM_SMMU_FEAT_TRANS_S1) { -- cfg->cbar = CBAR_TYPE_S1_TRANS_S2_BYPASS; -- start = smmu->num_s2_context_banks; -- } else { -+ case ARM_SMMU_DOMAIN_S2: - cfg->cbar = CBAR_TYPE_S2_TRANS; - start = 0; -+ ias = smmu->ipa_size; -+ oas = smmu->pa_size; -+ if (IS_ENABLED(CONFIG_64BIT)) -+ fmt = ARM_64_LPAE_S2; -+ else -+ fmt = ARM_32_LPAE_S2; -+ break; -+ default: -+ ret = -EINVAL; -+ goto out_unlock; - } - - ret = __arm_smmu_alloc_bitmap(smmu->context_map, start, -@@ -934,10 +951,33 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, - cfg->irptndx = cfg->cbndx; - } - -- ACCESS_ONCE(smmu_domain->smmu) = smmu; -- arm_smmu_init_context_bank(smmu_domain); -- spin_unlock_irqrestore(&smmu_domain->lock, flags); -+ pgtbl_cfg = (struct io_pgtable_cfg) { -+ .pgsize_bitmap = arm_smmu_ops.pgsize_bitmap, -+ .ias = ias, -+ .oas = oas, -+ .tlb = &arm_smmu_gather_ops, -+ }; -+ -+ smmu_domain->smmu = smmu; -+ pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain); -+ if (!pgtbl_ops) { -+ ret = -ENOMEM; -+ goto out_clear_smmu; -+ } -+ -+ /* Update our support page sizes to reflect the page table format */ -+ arm_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; -+#ifdef CONFIG_FSL_MC_BUS -+ arm_fsl_mc_smmu_ops.pgsize_bitmap = pgtbl_cfg.pgsize_bitmap; -+#endif -+ -+ /* Initialise the context bank with our page table cfg */ -+ arm_smmu_init_context_bank(smmu_domain, &pgtbl_cfg); - -+ /* -+ * Request context fault interrupt. Do this last to avoid the -+ * handler seeing a half-initialised domain state. -+ */ - irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; - ret = request_irq(irq, arm_smmu_context_fault, IRQF_SHARED, - "arm-smmu-context-fault", domain); -@@ -947,16 +987,22 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain, - cfg->irptndx = INVALID_IRPTNDX; - } - -+ mutex_unlock(&smmu_domain->init_mutex); -+ -+ /* Publish page table ops for map/unmap */ -+ smmu_domain->pgtbl_ops = pgtbl_ops; - return 0; - -+out_clear_smmu: -+ smmu_domain->smmu = NULL; - out_unlock: -- spin_unlock_irqrestore(&smmu_domain->lock, flags); -+ mutex_unlock(&smmu_domain->init_mutex); - return ret; - } - - static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) - { -- struct arm_smmu_domain *smmu_domain = domain->priv; -+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - struct arm_smmu_device *smmu = smmu_domain->smmu; - struct arm_smmu_cfg *cfg = &smmu_domain->cfg; - void __iomem *cb_base; -@@ -965,24 +1011,30 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain) - if (!smmu) - return; - -- /* Disable the context bank and nuke the TLB before freeing it. */ -+ /* -+ * Disable the context bank and free the page tables before freeing -+ * it. -+ */ - cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); - writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR); -- arm_smmu_tlb_inv_context(smmu_domain); - - if (cfg->irptndx != INVALID_IRPTNDX) { - irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx]; - free_irq(irq, domain); - } - -+ if (smmu_domain->pgtbl_ops) -+ free_io_pgtable_ops(smmu_domain->pgtbl_ops); -+ - __arm_smmu_free_bitmap(smmu->context_map, cfg->cbndx); - } - --static int arm_smmu_domain_init(struct iommu_domain *domain) -+static struct iommu_domain *arm_smmu_domain_alloc(unsigned type) - { - struct arm_smmu_domain *smmu_domain; -- pgd_t *pgd; - -+ if (type != IOMMU_DOMAIN_UNMANAGED) -+ return NULL; - /* - * Allocate the domain and initialise some of its data structures. - * We can't really do anything meaningful until we've added a -@@ -990,95 +1042,23 @@ static int arm_smmu_domain_init(struct iommu_domain *domain) - */ - smmu_domain = kzalloc(sizeof(*smmu_domain), GFP_KERNEL); - if (!smmu_domain) -- return -ENOMEM; -+ return NULL; - -- pgd = kcalloc(PTRS_PER_PGD, sizeof(pgd_t), GFP_KERNEL); -- if (!pgd) -- goto out_free_domain; -- smmu_domain->cfg.pgd = pgd; -+ mutex_init(&smmu_domain->init_mutex); -+ spin_lock_init(&smmu_domain->pgtbl_lock); - -- spin_lock_init(&smmu_domain->lock); -- domain->priv = smmu_domain; -- return 0; -- --out_free_domain: -- kfree(smmu_domain); -- return -ENOMEM; -+ return &smmu_domain->domain; - } - --static void arm_smmu_free_ptes(pmd_t *pmd) -+static void arm_smmu_domain_free(struct iommu_domain *domain) - { -- pgtable_t table = pmd_pgtable(*pmd); -- -- __free_page(table); --} -- --static void arm_smmu_free_pmds(pud_t *pud) --{ -- int i; -- pmd_t *pmd, *pmd_base = pmd_offset(pud, 0); -- -- pmd = pmd_base; -- for (i = 0; i < PTRS_PER_PMD; ++i) { -- if (pmd_none(*pmd)) -- continue; -- -- arm_smmu_free_ptes(pmd); -- pmd++; -- } -- -- pmd_free(NULL, pmd_base); --} -- --static void arm_smmu_free_puds(pgd_t *pgd) --{ -- int i; -- pud_t *pud, *pud_base = pud_offset(pgd, 0); -- -- pud = pud_base; -- for (i = 0; i < PTRS_PER_PUD; ++i) { -- if (pud_none(*pud)) -- continue; -- -- arm_smmu_free_pmds(pud); -- pud++; -- } -- -- pud_free(NULL, pud_base); --} -- --static void arm_smmu_free_pgtables(struct arm_smmu_domain *smmu_domain) --{ -- int i; -- struct arm_smmu_cfg *cfg = &smmu_domain->cfg; -- pgd_t *pgd, *pgd_base = cfg->pgd; -- -- /* -- * Recursively free the page tables for this domain. We don't -- * care about speculative TLB filling because the tables should -- * not be active in any context bank at this point (SCTLR.M is 0). -- */ -- pgd = pgd_base; -- for (i = 0; i < PTRS_PER_PGD; ++i) { -- if (pgd_none(*pgd)) -- continue; -- arm_smmu_free_puds(pgd); -- pgd++; -- } -- -- kfree(pgd_base); --} -- --static void arm_smmu_domain_destroy(struct iommu_domain *domain) --{ -- struct arm_smmu_domain *smmu_domain = domain->priv; -+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - - /* - * Free the domain resources. We assume that all devices have - * already been detached. - */ - arm_smmu_destroy_domain_context(domain); -- arm_smmu_free_pgtables(smmu_domain); - kfree(smmu_domain); - } - -@@ -1113,7 +1093,7 @@ static int arm_smmu_master_configure_smrs(struct arm_smmu_device *smmu, - - smrs[i] = (struct arm_smmu_smr) { - .idx = idx, -- .mask = 0, /* We don't currently share SMRs */ -+ .mask = cfg->mask, - .id = cfg->streamids[i], - }; - } -@@ -1209,8 +1189,8 @@ static void arm_smmu_domain_remove_master(struct arm_smmu_domain *smmu_domain, - static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) - { - int ret; -- struct arm_smmu_domain *smmu_domain = domain->priv; -- struct arm_smmu_device *smmu, *dom_smmu; -+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); -+ struct arm_smmu_device *smmu; - struct arm_smmu_master_cfg *cfg; - - smmu = find_smmu_for_device(dev); -@@ -1224,21 +1204,16 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) - return -EEXIST; - } - -+ /* Ensure that the domain is finalised */ -+ ret = arm_smmu_init_domain_context(domain, smmu); -+ if (IS_ERR_VALUE(ret)) -+ return ret; -+ - /* - * Sanity check the domain. We don't support domains across - * different SMMUs. - */ -- dom_smmu = ACCESS_ONCE(smmu_domain->smmu); -- if (!dom_smmu) { -- /* Now that we have a master, we can finalise the domain */ -- ret = arm_smmu_init_domain_context(domain, smmu); -- if (IS_ERR_VALUE(ret)) -- return ret; -- -- dom_smmu = smmu_domain->smmu; -- } -- -- if (dom_smmu != smmu) { -+ if (smmu_domain->smmu != smmu) { - dev_err(dev, - "cannot attach to SMMU %s whilst already attached to domain on SMMU %s\n", - dev_name(smmu_domain->smmu->dev), dev_name(smmu->dev)); -@@ -1258,7 +1233,7 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev) - - static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev) - { -- struct arm_smmu_domain *smmu_domain = domain->priv; -+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - struct arm_smmu_master_cfg *cfg; - - cfg = find_smmu_master_cfg(dev); -@@ -1269,292 +1244,106 @@ static void arm_smmu_detach_dev(struct iommu_domain *domain, struct device *dev) - arm_smmu_domain_remove_master(smmu_domain, cfg); - } - --static bool arm_smmu_pte_is_contiguous_range(unsigned long addr, -- unsigned long end) --{ -- return !(addr & ~ARM_SMMU_PTE_CONT_MASK) && -- (addr + ARM_SMMU_PTE_CONT_SIZE <= end); --} -- --static int arm_smmu_alloc_init_pte(struct arm_smmu_device *smmu, pmd_t *pmd, -- unsigned long addr, unsigned long end, -- unsigned long pfn, int prot, int stage) --{ -- pte_t *pte, *start; -- pteval_t pteval = ARM_SMMU_PTE_PAGE | ARM_SMMU_PTE_AF | ARM_SMMU_PTE_XN; -- -- if (pmd_none(*pmd)) { -- /* Allocate a new set of tables */ -- pgtable_t table = alloc_page(GFP_ATOMIC|__GFP_ZERO); -- -- if (!table) -- return -ENOMEM; -- -- arm_smmu_flush_pgtable(smmu, page_address(table), PAGE_SIZE); -- pmd_populate(NULL, pmd, table); -- arm_smmu_flush_pgtable(smmu, pmd, sizeof(*pmd)); -- } -- -- if (stage == 1) { -- pteval |= ARM_SMMU_PTE_AP_UNPRIV | ARM_SMMU_PTE_nG; -- if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ)) -- pteval |= ARM_SMMU_PTE_AP_RDONLY; -- -- if (prot & IOMMU_CACHE) -- pteval |= (MAIR_ATTR_IDX_CACHE << -- ARM_SMMU_PTE_ATTRINDX_SHIFT); -- } else { -- pteval |= ARM_SMMU_PTE_HAP_FAULT; -- if (prot & IOMMU_READ) -- pteval |= ARM_SMMU_PTE_HAP_READ; -- if (prot & IOMMU_WRITE) -- pteval |= ARM_SMMU_PTE_HAP_WRITE; -- if (prot & IOMMU_CACHE) -- pteval |= ARM_SMMU_PTE_MEMATTR_OIWB; -- else -- pteval |= ARM_SMMU_PTE_MEMATTR_NC; -- } -- -- /* If no access, create a faulting entry to avoid TLB fills */ -- if (prot & IOMMU_EXEC) -- pteval &= ~ARM_SMMU_PTE_XN; -- else if (!(prot & (IOMMU_READ | IOMMU_WRITE))) -- pteval &= ~ARM_SMMU_PTE_PAGE; -- -- pteval |= ARM_SMMU_PTE_SH_IS; -- start = pmd_page_vaddr(*pmd) + pte_index(addr); -- pte = start; -- -- /* -- * Install the page table entries. This is fairly complicated -- * since we attempt to make use of the contiguous hint in the -- * ptes where possible. The contiguous hint indicates a series -- * of ARM_SMMU_PTE_CONT_ENTRIES ptes mapping a physically -- * contiguous region with the following constraints: -- * -- * - The region start is aligned to ARM_SMMU_PTE_CONT_SIZE -- * - Each pte in the region has the contiguous hint bit set -- * -- * This complicates unmapping (also handled by this code, when -- * neither IOMMU_READ or IOMMU_WRITE are set) because it is -- * possible, yet highly unlikely, that a client may unmap only -- * part of a contiguous range. This requires clearing of the -- * contiguous hint bits in the range before installing the new -- * faulting entries. -- * -- * Note that re-mapping an address range without first unmapping -- * it is not supported, so TLB invalidation is not required here -- * and is instead performed at unmap and domain-init time. -- */ -- do { -- int i = 1; -- -- pteval &= ~ARM_SMMU_PTE_CONT; -- -- if (arm_smmu_pte_is_contiguous_range(addr, end)) { -- i = ARM_SMMU_PTE_CONT_ENTRIES; -- pteval |= ARM_SMMU_PTE_CONT; -- } else if (pte_val(*pte) & -- (ARM_SMMU_PTE_CONT | ARM_SMMU_PTE_PAGE)) { -- int j; -- pte_t *cont_start; -- unsigned long idx = pte_index(addr); -- -- idx &= ~(ARM_SMMU_PTE_CONT_ENTRIES - 1); -- cont_start = pmd_page_vaddr(*pmd) + idx; -- for (j = 0; j < ARM_SMMU_PTE_CONT_ENTRIES; ++j) -- pte_val(*(cont_start + j)) &= -- ~ARM_SMMU_PTE_CONT; -- -- arm_smmu_flush_pgtable(smmu, cont_start, -- sizeof(*pte) * -- ARM_SMMU_PTE_CONT_ENTRIES); -- } -- -- do { -- *pte = pfn_pte(pfn, __pgprot(pteval)); -- } while (pte++, pfn++, addr += PAGE_SIZE, --i); -- } while (addr != end); -- -- arm_smmu_flush_pgtable(smmu, start, sizeof(*pte) * (pte - start)); -- return 0; --} -- --static int arm_smmu_alloc_init_pmd(struct arm_smmu_device *smmu, pud_t *pud, -- unsigned long addr, unsigned long end, -- phys_addr_t phys, int prot, int stage) -+static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, -+ phys_addr_t paddr, size_t size, int prot) - { - int ret; -- pmd_t *pmd; -- unsigned long next, pfn = __phys_to_pfn(phys); -- --#ifndef __PAGETABLE_PMD_FOLDED -- if (pud_none(*pud)) { -- pmd = (pmd_t *)get_zeroed_page(GFP_ATOMIC); -- if (!pmd) -- return -ENOMEM; -- -- arm_smmu_flush_pgtable(smmu, pmd, PAGE_SIZE); -- pud_populate(NULL, pud, pmd); -- arm_smmu_flush_pgtable(smmu, pud, sizeof(*pud)); -- -- pmd += pmd_index(addr); -- } else --#endif -- pmd = pmd_offset(pud, addr); -+ unsigned long flags; -+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); -+ struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; - -- do { -- next = pmd_addr_end(addr, end); -- ret = arm_smmu_alloc_init_pte(smmu, pmd, addr, next, pfn, -- prot, stage); -- phys += next - addr; -- pfn = __phys_to_pfn(phys); -- } while (pmd++, addr = next, addr < end); -+ if (!ops) -+ return -ENODEV; - -+ spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); -+ ret = ops->map(ops, iova, paddr, size, prot); -+ spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); - return ret; - } - --static int arm_smmu_alloc_init_pud(struct arm_smmu_device *smmu, pgd_t *pgd, -- unsigned long addr, unsigned long end, -- phys_addr_t phys, int prot, int stage) -+static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, -+ size_t size) - { -- int ret = 0; -- pud_t *pud; -- unsigned long next; -- --#ifndef __PAGETABLE_PUD_FOLDED -- if (pgd_none(*pgd)) { -- pud = (pud_t *)get_zeroed_page(GFP_ATOMIC); -- if (!pud) -- return -ENOMEM; -- -- arm_smmu_flush_pgtable(smmu, pud, PAGE_SIZE); -- pgd_populate(NULL, pgd, pud); -- arm_smmu_flush_pgtable(smmu, pgd, sizeof(*pgd)); -- -- pud += pud_index(addr); -- } else --#endif -- pud = pud_offset(pgd, addr); -+ size_t ret; -+ unsigned long flags; -+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); -+ struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; - -- do { -- next = pud_addr_end(addr, end); -- ret = arm_smmu_alloc_init_pmd(smmu, pud, addr, next, phys, -- prot, stage); -- phys += next - addr; -- } while (pud++, addr = next, addr < end); -+ if (!ops) -+ return 0; - -+ spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); -+ ret = ops->unmap(ops, iova, size); -+ spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); - return ret; - } - --static int arm_smmu_handle_mapping(struct arm_smmu_domain *smmu_domain, -- unsigned long iova, phys_addr_t paddr, -- size_t size, int prot) -+static phys_addr_t arm_smmu_iova_to_phys_hard(struct iommu_domain *domain, -+ dma_addr_t iova) - { -- int ret, stage; -- unsigned long end; -- phys_addr_t input_mask, output_mask; -+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - struct arm_smmu_device *smmu = smmu_domain->smmu; - struct arm_smmu_cfg *cfg = &smmu_domain->cfg; -- pgd_t *pgd = cfg->pgd; -- unsigned long flags; -+ struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; -+ struct device *dev = smmu->dev; -+ void __iomem *cb_base; -+ u32 tmp; -+ u64 phys; -+ -+ cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx); - -- if (cfg->cbar == CBAR_TYPE_S2_TRANS) { -- stage = 2; -- input_mask = (1ULL << smmu->s2_input_size) - 1; -- output_mask = (1ULL << smmu->s2_output_size) - 1; -+ if (smmu->version == 1) { -+ u32 reg = iova & ~0xfff; -+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_ATS1PR_LO); - } else { -- stage = 1; -- input_mask = (1ULL << smmu->s1_input_size) - 1; -- output_mask = (1ULL << smmu->s1_output_size) - 1; -+ u32 reg = iova & ~0xfff; -+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_ATS1PR_LO); -+ reg = ((u64)iova & ~0xfff) >> 32; -+ writel_relaxed(reg, cb_base + ARM_SMMU_CB_ATS1PR_HI); - } - -- if (!pgd) -- return -EINVAL; -- -- if (size & ~PAGE_MASK) -- return -EINVAL; -- -- if ((phys_addr_t)iova & ~input_mask) -- return -ERANGE; -- -- if (paddr & ~output_mask) -- return -ERANGE; -- -- spin_lock_irqsave(&smmu_domain->lock, flags); -- pgd += pgd_index(iova); -- end = iova + size; -- do { -- unsigned long next = pgd_addr_end(iova, end); -- -- ret = arm_smmu_alloc_init_pud(smmu, pgd, iova, next, paddr, -- prot, stage); -- if (ret) -- goto out_unlock; -- -- paddr += next - iova; -- iova = next; -- } while (pgd++, iova != end); -- --out_unlock: -- spin_unlock_irqrestore(&smmu_domain->lock, flags); -- -- return ret; --} -- --static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova, -- phys_addr_t paddr, size_t size, int prot) --{ -- struct arm_smmu_domain *smmu_domain = domain->priv; -- -- if (!smmu_domain) -- return -ENODEV; -+ if (readl_poll_timeout_atomic(cb_base + ARM_SMMU_CB_ATSR, tmp, -+ !(tmp & ATSR_ACTIVE), 5, 50)) { -+ dev_err(dev, -+ "iova to phys timed out on 0x%pad. Falling back to software table walk.\n", -+ &iova); -+ return ops->iova_to_phys(ops, iova); -+ } - -- return arm_smmu_handle_mapping(smmu_domain, iova, paddr, size, prot); --} -+ phys = readl_relaxed(cb_base + ARM_SMMU_CB_PAR_LO); -+ phys |= ((u64)readl_relaxed(cb_base + ARM_SMMU_CB_PAR_HI)) << 32; - --static size_t arm_smmu_unmap(struct iommu_domain *domain, unsigned long iova, -- size_t size) --{ -- int ret; -- struct arm_smmu_domain *smmu_domain = domain->priv; -+ if (phys & CB_PAR_F) { -+ dev_err(dev, "translation fault!\n"); -+ dev_err(dev, "PAR = 0x%llx\n", phys); -+ return 0; -+ } - -- ret = arm_smmu_handle_mapping(smmu_domain, iova, 0, size, 0); -- arm_smmu_tlb_inv_context(smmu_domain); -- return ret ? 0 : size; -+ return (phys & GENMASK_ULL(39, 12)) | (iova & 0xfff); - } - - static phys_addr_t arm_smmu_iova_to_phys(struct iommu_domain *domain, -- dma_addr_t iova) -+ dma_addr_t iova) - { -- pgd_t *pgdp, pgd; -- pud_t pud; -- pmd_t pmd; -- pte_t pte; -- struct arm_smmu_domain *smmu_domain = domain->priv; -- struct arm_smmu_cfg *cfg = &smmu_domain->cfg; -- -- pgdp = cfg->pgd; -- if (!pgdp) -- return 0; -- -- pgd = *(pgdp + pgd_index(iova)); -- if (pgd_none(pgd)) -- return 0; -+ phys_addr_t ret; -+ unsigned long flags; -+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); -+ struct io_pgtable_ops *ops= smmu_domain->pgtbl_ops; - -- pud = *pud_offset(&pgd, iova); -- if (pud_none(pud)) -+ if (!ops) - return 0; - -- pmd = *pmd_offset(&pud, iova); -- if (pmd_none(pmd)) -- return 0; -+ spin_lock_irqsave(&smmu_domain->pgtbl_lock, flags); -+ if (smmu_domain->smmu->features & ARM_SMMU_FEAT_TRANS_OPS && -+ smmu_domain->stage == ARM_SMMU_DOMAIN_S1) { -+ ret = arm_smmu_iova_to_phys_hard(domain, iova); -+ } else { -+ ret = ops->iova_to_phys(ops, iova); -+ } - -- pte = *(pmd_page_vaddr(pmd) + pte_index(iova)); -- if (pte_none(pte)) -- return 0; -+ spin_unlock_irqrestore(&smmu_domain->pgtbl_lock, flags); - -- return __pfn_to_phys(pte_pfn(pte)) | (iova & ~PAGE_MASK); -+ return ret; - } - - static bool arm_smmu_capable(enum iommu_cap cap) -@@ -1568,6 +1357,8 @@ static bool arm_smmu_capable(enum iommu_cap cap) - return true; - case IOMMU_CAP_INTR_REMAP: - return true; /* MSIs are just memory writes */ -+ case IOMMU_CAP_NOEXEC: -+ return true; - default: - return false; - } -@@ -1584,81 +1375,248 @@ static void __arm_smmu_release_pci_iommudata(void *data) - kfree(data); - } - --static int arm_smmu_add_device(struct device *dev) -+static int arm_smmu_add_pci_device(struct pci_dev *pdev) - { -- struct arm_smmu_device *smmu; -+ int i, ret; -+ u16 sid; -+ struct iommu_group *group; - struct arm_smmu_master_cfg *cfg; -+#ifdef CONFIG_PCI_LAYERSCAPE -+ u32 streamid; -+#endif -+ -+ group = iommu_group_get_for_dev(&pdev->dev); -+ if (IS_ERR(group)) -+ return PTR_ERR(group); -+ -+ cfg = iommu_group_get_iommudata(group); -+ if (!cfg) { -+ cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); -+ if (!cfg) { -+ ret = -ENOMEM; -+ goto out_put_group; -+ } -+ -+ iommu_group_set_iommudata(group, cfg, -+ __arm_smmu_release_pci_iommudata); -+ } -+ -+ if (cfg->num_streamids >= MAX_MASTER_STREAMIDS) { -+ ret = -ENOSPC; -+ goto out_put_group; -+ } -+ -+ /* -+ * Assume Stream ID == Requester ID for now. -+ * We need a way to describe the ID mappings in FDT. -+ */ -+ pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, &sid); -+ for (i = 0; i < cfg->num_streamids; ++i) -+ if (cfg->streamids[i] == sid) -+ break; -+ -+ /* Avoid duplicate SIDs, as this can lead to SMR conflicts */ -+ if (i == cfg->num_streamids) -+ cfg->streamids[cfg->num_streamids++] = sid; -+ -+#ifdef CONFIG_PCI_LAYERSCAPE -+ streamid = set_pcie_streamid_translation(pdev, sid); -+ if (~streamid == 0) { -+ ret = -ENODEV; -+ goto out_put_group; -+ } -+ cfg->streamids[0] = streamid; -+ cfg->mask = 0x7c00; -+ -+ pdev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVID; -+ pdev->dma_alias_devid = streamid; -+#endif -+ -+ return 0; -+out_put_group: -+ iommu_group_put(group); -+ return ret; -+} -+ -+static int arm_smmu_add_platform_device(struct device *dev) -+{ - struct iommu_group *group; -- void (*releasefn)(void *) = NULL; -- int ret; -+ struct arm_smmu_master *master; -+ struct arm_smmu_device *smmu = find_smmu_for_device(dev); - -- smmu = find_smmu_for_device(dev); - if (!smmu) - return -ENODEV; - -+ master = find_smmu_master(smmu, dev->of_node); -+ if (!master) -+ return -ENODEV; -+ -+ /* No automatic group creation for platform devices */ - group = iommu_group_alloc(); -- if (IS_ERR(group)) { -- dev_err(dev, "Failed to allocate IOMMU group\n"); -+ if (IS_ERR(group)) - return PTR_ERR(group); -+ -+ iommu_group_set_iommudata(group, &master->cfg, NULL); -+ return iommu_group_add_device(group, dev); -+} -+ -+static int arm_smmu_add_device(struct device *dev) -+{ -+ if (dev_is_pci(dev)) -+ return arm_smmu_add_pci_device(to_pci_dev(dev)); -+ -+ return arm_smmu_add_platform_device(dev); -+} -+ -+static void arm_smmu_remove_device(struct device *dev) -+{ -+ iommu_group_remove_device(dev); -+} -+ -+static int arm_smmu_domain_get_attr(struct iommu_domain *domain, -+ enum iommu_attr attr, void *data) -+{ -+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); -+ -+ switch (attr) { -+ case DOMAIN_ATTR_NESTING: -+ *(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED); -+ return 0; -+ default: -+ return -ENODEV; - } -+} - -- if (dev_is_pci(dev)) { -- struct pci_dev *pdev = to_pci_dev(dev); -+static int arm_smmu_domain_set_attr(struct iommu_domain *domain, -+ enum iommu_attr attr, void *data) -+{ -+ int ret = 0; -+ struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain); - -- cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); -- if (!cfg) { -- ret = -ENOMEM; -- goto out_put_group; -+ mutex_lock(&smmu_domain->init_mutex); -+ -+ switch (attr) { -+ case DOMAIN_ATTR_NESTING: -+ if (smmu_domain->smmu) { -+ ret = -EPERM; -+ goto out_unlock; - } - -- cfg->num_streamids = 1; -+ if (*(int *)data) -+ smmu_domain->stage = ARM_SMMU_DOMAIN_NESTED; -+ else -+ smmu_domain->stage = ARM_SMMU_DOMAIN_S1; -+ -+ break; -+ default: -+ ret = -ENODEV; -+ } -+ -+out_unlock: -+ mutex_unlock(&smmu_domain->init_mutex); -+ return ret; -+} -+ -+static struct iommu_ops arm_smmu_ops = { -+ .capable = arm_smmu_capable, -+ .domain_alloc = arm_smmu_domain_alloc, -+ .domain_free = arm_smmu_domain_free, -+ .attach_dev = arm_smmu_attach_dev, -+ .detach_dev = arm_smmu_detach_dev, -+ .map = arm_smmu_map, -+ .unmap = arm_smmu_unmap, -+ .iova_to_phys = arm_smmu_iova_to_phys, -+ .add_device = arm_smmu_add_device, -+ .remove_device = arm_smmu_remove_device, -+ .domain_get_attr = arm_smmu_domain_get_attr, -+ .domain_set_attr = arm_smmu_domain_set_attr, -+ .pgsize_bitmap = -1UL, /* Restricted during device attach */ -+}; -+ -+#ifdef CONFIG_FSL_MC_BUS -+ -+static void arm_smmu_release_fsl_mc_iommudata(void *data) -+{ -+ kfree(data); -+} -+ -+/* -+ * IOMMU group creation and stream ID programming for -+ * the LS devices -+ * -+ */ -+static int arm_fsl_mc_smmu_add_device(struct device *dev) -+{ -+ struct device *cont_dev; -+ struct fsl_mc_device *mc_dev; -+ struct iommu_group *group; -+ struct arm_smmu_master_cfg *cfg; -+ int ret = 0; -+ -+ mc_dev = to_fsl_mc_device(dev); -+ if (mc_dev->flags & FSL_MC_IS_DPRC) -+ cont_dev = dev; -+ else -+ cont_dev = mc_dev->dev.parent; -+ -+ get_device(cont_dev); -+ group = iommu_group_get(cont_dev); -+ put_device(cont_dev); -+ if (!group) { -+ void (*releasefn)(void *) = NULL; -+ -+ group = iommu_group_alloc(); -+ if (IS_ERR(group)) -+ return PTR_ERR(group); - /* -- * Assume Stream ID == Requester ID for now. -- * We need a way to describe the ID mappings in FDT. -+ * allocate the cfg for the container and associate it with -+ * the iommu group. In the find cfg function we get the cfg -+ * from the iommu group. - */ -- pci_for_each_dma_alias(pdev, __arm_smmu_get_pci_sid, -- &cfg->streamids[0]); -- releasefn = __arm_smmu_release_pci_iommudata; -- } else { -- struct arm_smmu_master *master; -- -- master = find_smmu_master(smmu, dev->of_node); -- if (!master) { -- ret = -ENODEV; -- goto out_put_group; -- } -+ cfg = kzalloc(sizeof(*cfg), GFP_KERNEL); -+ if (!cfg) -+ return -ENOMEM; - -- cfg = &master->cfg; -+ mc_dev = to_fsl_mc_device(cont_dev); -+ cfg->num_streamids = 1; -+ cfg->streamids[0] = mc_dev->icid; -+ cfg->mask = 0x7c00; -+ releasefn = arm_smmu_release_fsl_mc_iommudata; -+ iommu_group_set_iommudata(group, cfg, releasefn); -+ ret = iommu_group_add_device(group, cont_dev); - } - -- iommu_group_set_iommudata(group, cfg, releasefn); -- ret = iommu_group_add_device(group, dev); -+ if (!ret && cont_dev != dev) -+ ret = iommu_group_add_device(group, dev); - --out_put_group: - iommu_group_put(group); -+ - return ret; - } - --static void arm_smmu_remove_device(struct device *dev) -+static void arm_fsl_mc_smmu_remove_device(struct device *dev) - { - iommu_group_remove_device(dev); -+ - } - --static const struct iommu_ops arm_smmu_ops = { -- .capable = arm_smmu_capable, -- .domain_init = arm_smmu_domain_init, -- .domain_destroy = arm_smmu_domain_destroy, -- .attach_dev = arm_smmu_attach_dev, -- .detach_dev = arm_smmu_detach_dev, -- .map = arm_smmu_map, -- .unmap = arm_smmu_unmap, -- .iova_to_phys = arm_smmu_iova_to_phys, -- .add_device = arm_smmu_add_device, -- .remove_device = arm_smmu_remove_device, -- .pgsize_bitmap = (SECTION_SIZE | -- ARM_SMMU_PTE_CONT_SIZE | -- PAGE_SIZE), -+static struct iommu_ops arm_fsl_mc_smmu_ops = { -+ .capable = arm_smmu_capable, -+ .domain_alloc = arm_smmu_domain_alloc, -+ .domain_free = arm_smmu_domain_free, -+ .attach_dev = arm_smmu_attach_dev, -+ .detach_dev = arm_smmu_detach_dev, -+ .map = arm_smmu_map, -+ .unmap = arm_smmu_unmap, -+ .map_sg = default_iommu_map_sg, -+ .iova_to_phys = arm_smmu_iova_to_phys, -+ .add_device = arm_fsl_mc_smmu_add_device, -+ .remove_device = arm_fsl_mc_smmu_remove_device, -+ .domain_get_attr = arm_smmu_domain_get_attr, -+ .domain_set_attr = arm_smmu_domain_set_attr, -+ .pgsize_bitmap = -1UL, /* Restricted during device attach */ - }; -+#endif - - static void arm_smmu_device_reset(struct arm_smmu_device *smmu) - { -@@ -1686,7 +1644,6 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) - } - - /* Invalidate the TLB, just in case */ -- writel_relaxed(0, gr0_base + ARM_SMMU_GR0_STLBIALL); - writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLH); - writel_relaxed(0, gr0_base + ARM_SMMU_GR0_TLBIALLNSNH); - -@@ -1708,7 +1665,7 @@ static void arm_smmu_device_reset(struct arm_smmu_device *smmu) - reg &= ~(sCR0_BSU_MASK << sCR0_BSU_SHIFT); - - /* Push the button */ -- arm_smmu_tlb_sync(smmu); -+ __arm_smmu_tlb_sync(smmu); - writel(reg, ARM_SMMU_GR0_NS(smmu) + ARM_SMMU_GR0_sCR0); - } - -@@ -1742,12 +1699,6 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) - - /* ID0 */ - id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID0); --#ifndef CONFIG_64BIT -- if (((id >> ID0_PTFS_SHIFT) & ID0_PTFS_MASK) == ID0_PTFS_V8_ONLY) { -- dev_err(smmu->dev, "\tno v7 descriptor support!\n"); -- return -ENODEV; -- } --#endif - - /* Restrict available stages based on module parameter */ - if (force_stage == 1) -@@ -1776,6 +1727,11 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) - return -ENODEV; - } - -+ if ((id & ID0_S1TS) && ((smmu->version == 1) || !(id & ID0_ATOSNS))) { -+ smmu->features |= ARM_SMMU_FEAT_TRANS_OPS; -+ dev_notice(smmu->dev, "\taddress translation ops\n"); -+ } -+ - if (id & ID0_CTTW) { - smmu->features |= ARM_SMMU_FEAT_COHERENT_WALK; - dev_notice(smmu->dev, "\tcoherent table walk\n"); -@@ -1820,16 +1776,14 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) - smmu->pgshift = (id & ID1_PAGESIZE) ? 16 : 12; - - /* Check for size mismatch of SMMU address space from mapped region */ -- size = 1 << -- (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1); -+ size = 1 << (((id >> ID1_NUMPAGENDXB_SHIFT) & ID1_NUMPAGENDXB_MASK) + 1); - size *= 2 << smmu->pgshift; - if (smmu->size != size) - dev_warn(smmu->dev, - "SMMU address space size (0x%lx) differs from mapped region size (0x%lx)!\n", - size, smmu->size); - -- smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & -- ID1_NUMS2CB_MASK; -+ smmu->num_s2_context_banks = (id >> ID1_NUMS2CB_SHIFT) & ID1_NUMS2CB_MASK; - smmu->num_context_banks = (id >> ID1_NUMCB_SHIFT) & ID1_NUMCB_MASK; - if (smmu->num_s2_context_banks > smmu->num_context_banks) { - dev_err(smmu->dev, "impossible number of S2 context banks!\n"); -@@ -1841,46 +1795,49 @@ static int arm_smmu_device_cfg_probe(struct arm_smmu_device *smmu) - /* ID2 */ - id = readl_relaxed(gr0_base + ARM_SMMU_GR0_ID2); - size = arm_smmu_id_size_to_bits((id >> ID2_IAS_SHIFT) & ID2_IAS_MASK); -- smmu->s1_output_size = min_t(unsigned long, PHYS_MASK_SHIFT, size); -- -- /* Stage-2 input size limited due to pgd allocation (PTRS_PER_PGD) */ --#ifdef CONFIG_64BIT -- smmu->s2_input_size = min_t(unsigned long, VA_BITS, size); --#else -- smmu->s2_input_size = min(32UL, size); --#endif -+ smmu->ipa_size = size; - -- /* The stage-2 output mask is also applied for bypass */ -+ /* The output mask is also applied for bypass */ - size = arm_smmu_id_size_to_bits((id >> ID2_OAS_SHIFT) & ID2_OAS_MASK); -- smmu->s2_output_size = min_t(unsigned long, PHYS_MASK_SHIFT, size); -+ smmu->pa_size = size; -+ -+ /* -+ * What the page table walker can address actually depends on which -+ * descriptor format is in use, but since a) we don't know that yet, -+ * and b) it can vary per context bank, this will have to do... -+ */ -+ if (dma_set_mask_and_coherent(smmu->dev, DMA_BIT_MASK(size))) -+ dev_warn(smmu->dev, -+ "failed to set DMA mask for table walker\n"); - - if (smmu->version == ARM_SMMU_V1) { -- smmu->s1_input_size = 32; -+ smmu->va_size = smmu->ipa_size; -+ size = SZ_4K | SZ_2M | SZ_1G; - } else { --#ifdef CONFIG_64BIT - size = (id >> ID2_UBS_SHIFT) & ID2_UBS_MASK; -- size = min(VA_BITS, arm_smmu_id_size_to_bits(size)); --#else -- size = 32; -+ smmu->va_size = arm_smmu_id_size_to_bits(size); -+#ifndef CONFIG_64BIT -+ smmu->va_size = min(32UL, smmu->va_size); - #endif -- smmu->s1_input_size = size; -- -- if ((PAGE_SIZE == SZ_4K && !(id & ID2_PTFS_4K)) || -- (PAGE_SIZE == SZ_64K && !(id & ID2_PTFS_64K)) || -- (PAGE_SIZE != SZ_4K && PAGE_SIZE != SZ_64K)) { -- dev_err(smmu->dev, "CPU page size 0x%lx unsupported\n", -- PAGE_SIZE); -- return -ENODEV; -- } -+ size = 0; -+ if (id & ID2_PTFS_4K) -+ size |= SZ_4K | SZ_2M | SZ_1G; -+ if (id & ID2_PTFS_16K) -+ size |= SZ_16K | SZ_32M; -+ if (id & ID2_PTFS_64K) -+ size |= SZ_64K | SZ_512M; - } - -+ arm_smmu_ops.pgsize_bitmap &= size; -+ dev_notice(smmu->dev, "\tSupported page sizes: 0x%08lx\n", size); -+ - if (smmu->features & ARM_SMMU_FEAT_TRANS_S1) - dev_notice(smmu->dev, "\tStage-1: %lu-bit VA -> %lu-bit IPA\n", -- smmu->s1_input_size, smmu->s1_output_size); -+ smmu->va_size, smmu->ipa_size); - - if (smmu->features & ARM_SMMU_FEAT_TRANS_S2) - dev_notice(smmu->dev, "\tStage-2: %lu-bit IPA -> %lu-bit PA\n", -- smmu->s2_input_size, smmu->s2_output_size); -+ smmu->ipa_size, smmu->pa_size); - - return 0; - } -@@ -2007,6 +1964,10 @@ static int arm_smmu_device_dt_probe(struct platform_device *pdev) - spin_unlock(&arm_smmu_devices_lock); - - arm_smmu_device_reset(smmu); -+ /* AIOP Rev1 errata work around */ -+#ifdef CONFIG_AIOP_ERRATA -+ arm_smmu_aiop_attr_trans(smmu); -+#endif - return 0; - - out_free_irqs: -@@ -2062,7 +2023,6 @@ static int arm_smmu_device_remove(struct platform_device *pdev) - - static struct platform_driver arm_smmu_driver = { - .driver = { -- .owner = THIS_MODULE, - .name = "arm-smmu", - .of_match_table = of_match_ptr(arm_smmu_of_match), - }, -@@ -2072,8 +2032,20 @@ static struct platform_driver arm_smmu_driver = { - - static int __init arm_smmu_init(void) - { -+ struct device_node *np; - int ret; - -+ /* -+ * Play nice with systems that don't have an ARM SMMU by checking that -+ * an ARM SMMU exists in the system before proceeding with the driver -+ * and IOMMU bus operation registration. -+ */ -+ np = of_find_matching_node(NULL, arm_smmu_of_match); -+ if (!np) -+ return 0; -+ -+ of_node_put(np); -+ - ret = platform_driver_register(&arm_smmu_driver); - if (ret) - return ret; -@@ -2092,6 +2064,10 @@ static int __init arm_smmu_init(void) - bus_set_iommu(&pci_bus_type, &arm_smmu_ops); - #endif - -+#ifdef CONFIG_FSL_MC_BUS -+ if (!iommu_present(&fsl_mc_bus_type)) -+ bus_set_iommu(&fsl_mc_bus_type, &arm_fsl_mc_smmu_ops); -+#endif - return 0; - } - -diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c -index 7423318..7ce5273 100644 ---- a/drivers/iommu/exynos-iommu.c -+++ b/drivers/iommu/exynos-iommu.c -@@ -684,7 +684,6 @@ static const struct of_device_id sysmmu_of_match[] __initconst = { - static struct platform_driver exynos_sysmmu_driver __refdata = { - .probe = exynos_sysmmu_probe, - .driver = { -- .owner = THIS_MODULE, - .name = "exynos-sysmmu", - .of_match_table = sysmmu_of_match, - } -@@ -1178,6 +1177,7 @@ static const struct iommu_ops exynos_iommu_ops = { - .detach_dev = exynos_iommu_detach_device, - .map = exynos_iommu_map, - .unmap = exynos_iommu_unmap, -+ .map_sg = default_iommu_map_sg, - .iova_to_phys = exynos_iommu_iova_to_phys, - .add_device = exynos_iommu_add_device, - .remove_device = exynos_iommu_remove_device, -diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c -index 2b6ce93..80ac68d 100644 ---- a/drivers/iommu/fsl_pamu.c -+++ b/drivers/iommu/fsl_pamu.c -@@ -1227,7 +1227,6 @@ static const struct of_device_id fsl_of_pamu_ids[] = { - static struct platform_driver fsl_of_pamu_driver = { - .driver = { - .name = "fsl-of-pamu", -- .owner = THIS_MODULE, - }, - .probe = fsl_pamu_probe, - }; -diff --git a/drivers/iommu/intel-iommu.c b/drivers/iommu/intel-iommu.c -index 3d1fc73..9e97328 100644 ---- a/drivers/iommu/intel-iommu.c -+++ b/drivers/iommu/intel-iommu.c -@@ -4474,6 +4474,7 @@ static const struct iommu_ops intel_iommu_ops = { - .detach_dev = intel_iommu_detach_device, - .map = intel_iommu_map, - .unmap = intel_iommu_unmap, -+ .map_sg = default_iommu_map_sg, - .iova_to_phys = intel_iommu_iova_to_phys, - .add_device = intel_iommu_add_device, - .remove_device = intel_iommu_remove_device, -diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c -new file mode 100644 -index 0000000..5a500ed ---- /dev/null -+++ b/drivers/iommu/io-pgtable-arm.c -@@ -0,0 +1,986 @@ -+/* -+ * CPU-agnostic ARM page table allocator. -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ * -+ * Copyright (C) 2014 ARM Limited -+ * -+ * Author: Will Deacon -+ */ -+ -+#define pr_fmt(fmt) "arm-lpae io-pgtable: " fmt -+ -+#include -+#include -+#include -+#include -+#include -+ -+#include "io-pgtable.h" -+ -+#define ARM_LPAE_MAX_ADDR_BITS 48 -+#define ARM_LPAE_S2_MAX_CONCAT_PAGES 16 -+#define ARM_LPAE_MAX_LEVELS 4 -+ -+/* Struct accessors */ -+#define io_pgtable_to_data(x) \ -+ container_of((x), struct arm_lpae_io_pgtable, iop) -+ -+#define io_pgtable_ops_to_pgtable(x) \ -+ container_of((x), struct io_pgtable, ops) -+ -+#define io_pgtable_ops_to_data(x) \ -+ io_pgtable_to_data(io_pgtable_ops_to_pgtable(x)) -+ -+/* -+ * For consistency with the architecture, we always consider -+ * ARM_LPAE_MAX_LEVELS levels, with the walk starting at level n >=0 -+ */ -+#define ARM_LPAE_START_LVL(d) (ARM_LPAE_MAX_LEVELS - (d)->levels) -+ -+/* -+ * Calculate the right shift amount to get to the portion describing level l -+ * in a virtual address mapped by the pagetable in d. -+ */ -+#define ARM_LPAE_LVL_SHIFT(l,d) \ -+ ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \ -+ * (d)->bits_per_level) + (d)->pg_shift) -+ -+#define ARM_LPAE_PAGES_PER_PGD(d) ((d)->pgd_size >> (d)->pg_shift) -+ -+/* -+ * Calculate the index at level l used to map virtual address a using the -+ * pagetable in d. -+ */ -+#define ARM_LPAE_PGD_IDX(l,d) \ -+ ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0) -+ -+#define ARM_LPAE_LVL_IDX(a,l,d) \ -+ (((a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ -+ ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1)) -+ -+/* Calculate the block/page mapping size at level l for pagetable in d. */ -+#define ARM_LPAE_BLOCK_SIZE(l,d) \ -+ (1 << (ilog2(sizeof(arm_lpae_iopte)) + \ -+ ((ARM_LPAE_MAX_LEVELS - (l)) * (d)->bits_per_level))) -+ -+/* Page table bits */ -+#define ARM_LPAE_PTE_TYPE_SHIFT 0 -+#define ARM_LPAE_PTE_TYPE_MASK 0x3 -+ -+#define ARM_LPAE_PTE_TYPE_BLOCK 1 -+#define ARM_LPAE_PTE_TYPE_TABLE 3 -+#define ARM_LPAE_PTE_TYPE_PAGE 3 -+ -+#define ARM_LPAE_PTE_NSTABLE (((arm_lpae_iopte)1) << 63) -+#define ARM_LPAE_PTE_XN (((arm_lpae_iopte)3) << 53) -+#define ARM_LPAE_PTE_AF (((arm_lpae_iopte)1) << 10) -+#define ARM_LPAE_PTE_SH_NS (((arm_lpae_iopte)0) << 8) -+#define ARM_LPAE_PTE_SH_OS (((arm_lpae_iopte)2) << 8) -+#define ARM_LPAE_PTE_SH_IS (((arm_lpae_iopte)3) << 8) -+#define ARM_LPAE_PTE_NS (((arm_lpae_iopte)1) << 5) -+#define ARM_LPAE_PTE_VALID (((arm_lpae_iopte)1) << 0) -+ -+#define ARM_LPAE_PTE_ATTR_LO_MASK (((arm_lpae_iopte)0x3ff) << 2) -+/* Ignore the contiguous bit for block splitting */ -+#define ARM_LPAE_PTE_ATTR_HI_MASK (((arm_lpae_iopte)6) << 52) -+#define ARM_LPAE_PTE_ATTR_MASK (ARM_LPAE_PTE_ATTR_LO_MASK | \ -+ ARM_LPAE_PTE_ATTR_HI_MASK) -+ -+/* Stage-1 PTE */ -+#define ARM_LPAE_PTE_AP_UNPRIV (((arm_lpae_iopte)1) << 6) -+#define ARM_LPAE_PTE_AP_RDONLY (((arm_lpae_iopte)2) << 6) -+#define ARM_LPAE_PTE_ATTRINDX_SHIFT 2 -+#define ARM_LPAE_PTE_nG (((arm_lpae_iopte)1) << 11) -+ -+/* Stage-2 PTE */ -+#define ARM_LPAE_PTE_HAP_FAULT (((arm_lpae_iopte)0) << 6) -+#define ARM_LPAE_PTE_HAP_READ (((arm_lpae_iopte)1) << 6) -+#define ARM_LPAE_PTE_HAP_WRITE (((arm_lpae_iopte)2) << 6) -+#define ARM_LPAE_PTE_MEMATTR_OIWB (((arm_lpae_iopte)0xf) << 2) -+#define ARM_LPAE_PTE_MEMATTR_NC (((arm_lpae_iopte)0x5) << 2) -+#define ARM_LPAE_PTE_MEMATTR_DEV (((arm_lpae_iopte)0x1) << 2) -+ -+/* Register bits */ -+#define ARM_32_LPAE_TCR_EAE (1 << 31) -+#define ARM_64_LPAE_S2_TCR_RES1 (1 << 31) -+ -+#define ARM_LPAE_TCR_TG0_4K (0 << 14) -+#define ARM_LPAE_TCR_TG0_64K (1 << 14) -+#define ARM_LPAE_TCR_TG0_16K (2 << 14) -+ -+#define ARM_LPAE_TCR_SH0_SHIFT 12 -+#define ARM_LPAE_TCR_SH0_MASK 0x3 -+#define ARM_LPAE_TCR_SH_NS 0 -+#define ARM_LPAE_TCR_SH_OS 2 -+#define ARM_LPAE_TCR_SH_IS 3 -+ -+#define ARM_LPAE_TCR_ORGN0_SHIFT 10 -+#define ARM_LPAE_TCR_IRGN0_SHIFT 8 -+#define ARM_LPAE_TCR_RGN_MASK 0x3 -+#define ARM_LPAE_TCR_RGN_NC 0 -+#define ARM_LPAE_TCR_RGN_WBWA 1 -+#define ARM_LPAE_TCR_RGN_WT 2 -+#define ARM_LPAE_TCR_RGN_WB 3 -+ -+#define ARM_LPAE_TCR_SL0_SHIFT 6 -+#define ARM_LPAE_TCR_SL0_MASK 0x3 -+ -+#define ARM_LPAE_TCR_T0SZ_SHIFT 0 -+#define ARM_LPAE_TCR_SZ_MASK 0xf -+ -+#define ARM_LPAE_TCR_PS_SHIFT 16 -+#define ARM_LPAE_TCR_PS_MASK 0x7 -+ -+#define ARM_LPAE_TCR_IPS_SHIFT 32 -+#define ARM_LPAE_TCR_IPS_MASK 0x7 -+ -+#define ARM_LPAE_TCR_PS_32_BIT 0x0ULL -+#define ARM_LPAE_TCR_PS_36_BIT 0x1ULL -+#define ARM_LPAE_TCR_PS_40_BIT 0x2ULL -+#define ARM_LPAE_TCR_PS_42_BIT 0x3ULL -+#define ARM_LPAE_TCR_PS_44_BIT 0x4ULL -+#define ARM_LPAE_TCR_PS_48_BIT 0x5ULL -+ -+#define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3) -+#define ARM_LPAE_MAIR_ATTR_MASK 0xff -+#define ARM_LPAE_MAIR_ATTR_DEVICE 0x04 -+#define ARM_LPAE_MAIR_ATTR_NC 0x44 -+#define ARM_LPAE_MAIR_ATTR_WBRWA 0xff -+#define ARM_LPAE_MAIR_ATTR_IDX_NC 0 -+#define ARM_LPAE_MAIR_ATTR_IDX_CACHE 1 -+#define ARM_LPAE_MAIR_ATTR_IDX_DEV 2 -+ -+/* IOPTE accessors */ -+#define iopte_deref(pte,d) \ -+ (__va((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1) \ -+ & ~((1ULL << (d)->pg_shift) - 1))) -+ -+#define iopte_type(pte,l) \ -+ (((pte) >> ARM_LPAE_PTE_TYPE_SHIFT) & ARM_LPAE_PTE_TYPE_MASK) -+ -+#define iopte_prot(pte) ((pte) & ARM_LPAE_PTE_ATTR_MASK) -+ -+#define iopte_leaf(pte,l) \ -+ (l == (ARM_LPAE_MAX_LEVELS - 1) ? \ -+ (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_PAGE) : \ -+ (iopte_type(pte,l) == ARM_LPAE_PTE_TYPE_BLOCK)) -+ -+#define iopte_to_pfn(pte,d) \ -+ (((pte) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) >> (d)->pg_shift) -+ -+#define pfn_to_iopte(pfn,d) \ -+ (((pfn) << (d)->pg_shift) & ((1ULL << ARM_LPAE_MAX_ADDR_BITS) - 1)) -+ -+struct arm_lpae_io_pgtable { -+ struct io_pgtable iop; -+ -+ int levels; -+ size_t pgd_size; -+ unsigned long pg_shift; -+ unsigned long bits_per_level; -+ -+ void *pgd; -+}; -+ -+typedef u64 arm_lpae_iopte; -+ -+static bool selftest_running = false; -+ -+static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data, -+ unsigned long iova, phys_addr_t paddr, -+ arm_lpae_iopte prot, int lvl, -+ arm_lpae_iopte *ptep) -+{ -+ arm_lpae_iopte pte = prot; -+ -+ /* We require an unmap first */ -+ if (iopte_leaf(*ptep, lvl)) { -+ WARN_ON(!selftest_running); -+ return -EEXIST; -+ } -+ -+ if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS) -+ pte |= ARM_LPAE_PTE_NS; -+ -+ if (lvl == ARM_LPAE_MAX_LEVELS - 1) -+ pte |= ARM_LPAE_PTE_TYPE_PAGE; -+ else -+ pte |= ARM_LPAE_PTE_TYPE_BLOCK; -+ -+ pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS; -+ pte |= pfn_to_iopte(paddr >> data->pg_shift, data); -+ -+ *ptep = pte; -+ data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), data->iop.cookie); -+ return 0; -+} -+ -+static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova, -+ phys_addr_t paddr, size_t size, arm_lpae_iopte prot, -+ int lvl, arm_lpae_iopte *ptep) -+{ -+ arm_lpae_iopte *cptep, pte; -+ void *cookie = data->iop.cookie; -+ size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data); -+ -+ /* Find our entry at the current level */ -+ ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); -+ -+ /* If we can install a leaf entry at this level, then do so */ -+ if (size == block_size && (size & data->iop.cfg.pgsize_bitmap)) -+ return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep); -+ -+ /* We can't allocate tables at the final level */ -+ if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1)) -+ return -EINVAL; -+ -+ /* Grab a pointer to the next level */ -+ pte = *ptep; -+ if (!pte) { -+ cptep = alloc_pages_exact(1UL << data->pg_shift, -+ GFP_ATOMIC | __GFP_ZERO); -+ if (!cptep) -+ return -ENOMEM; -+ -+ data->iop.cfg.tlb->flush_pgtable(cptep, 1UL << data->pg_shift, -+ cookie); -+ pte = __pa(cptep) | ARM_LPAE_PTE_TYPE_TABLE; -+ if (data->iop.cfg.quirks & IO_PGTABLE_QUIRK_ARM_NS) -+ pte |= ARM_LPAE_PTE_NSTABLE; -+ *ptep = pte; -+ data->iop.cfg.tlb->flush_pgtable(ptep, sizeof(*ptep), cookie); -+ } else { -+ cptep = iopte_deref(pte, data); -+ } -+ -+ /* Rinse, repeat */ -+ return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep); -+} -+ -+static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, -+ int prot) -+{ -+ arm_lpae_iopte pte; -+ -+ if (data->iop.fmt == ARM_64_LPAE_S1 || -+ data->iop.fmt == ARM_32_LPAE_S1) { -+ pte = ARM_LPAE_PTE_AP_UNPRIV | ARM_LPAE_PTE_nG; -+ -+ if (!(prot & IOMMU_WRITE) && (prot & IOMMU_READ)) -+ pte |= ARM_LPAE_PTE_AP_RDONLY; -+ -+ if (prot & IOMMU_CACHE) -+ pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE -+ << ARM_LPAE_PTE_ATTRINDX_SHIFT); -+ } else { -+ pte = ARM_LPAE_PTE_HAP_FAULT; -+ if (prot & IOMMU_READ) -+ pte |= ARM_LPAE_PTE_HAP_READ; -+ if (prot & IOMMU_WRITE) -+ pte |= ARM_LPAE_PTE_HAP_WRITE; -+ if (prot & IOMMU_CACHE) -+ pte |= ARM_LPAE_PTE_MEMATTR_OIWB; -+ else -+ pte |= ARM_LPAE_PTE_MEMATTR_NC; -+ } -+ -+ if (prot & IOMMU_NOEXEC) -+ pte |= ARM_LPAE_PTE_XN; -+ -+ return pte; -+} -+ -+static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova, -+ phys_addr_t paddr, size_t size, int iommu_prot) -+{ -+ struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); -+ arm_lpae_iopte *ptep = data->pgd; -+ int lvl = ARM_LPAE_START_LVL(data); -+ arm_lpae_iopte prot; -+ -+ /* If no access, then nothing to do */ -+ if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE))) -+ return 0; -+ -+ prot = arm_lpae_prot_to_pte(data, iommu_prot); -+ return __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep); -+} -+ -+static void __arm_lpae_free_pgtable(struct arm_lpae_io_pgtable *data, int lvl, -+ arm_lpae_iopte *ptep) -+{ -+ arm_lpae_iopte *start, *end; -+ unsigned long table_size; -+ -+ /* Only leaf entries at the last level */ -+ if (lvl == ARM_LPAE_MAX_LEVELS - 1) -+ return; -+ -+ if (lvl == ARM_LPAE_START_LVL(data)) -+ table_size = data->pgd_size; -+ else -+ table_size = 1UL << data->pg_shift; -+ -+ start = ptep; -+ end = (void *)ptep + table_size; -+ -+ while (ptep != end) { -+ arm_lpae_iopte pte = *ptep++; -+ -+ if (!pte || iopte_leaf(pte, lvl)) -+ continue; -+ -+ __arm_lpae_free_pgtable(data, lvl + 1, iopte_deref(pte, data)); -+ } -+ -+ free_pages_exact(start, table_size); -+} -+ -+static void arm_lpae_free_pgtable(struct io_pgtable *iop) -+{ -+ struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop); -+ -+ __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd); -+ kfree(data); -+} -+ -+static int arm_lpae_split_blk_unmap(struct arm_lpae_io_pgtable *data, -+ unsigned long iova, size_t size, -+ arm_lpae_iopte prot, int lvl, -+ arm_lpae_iopte *ptep, size_t blk_size) -+{ -+ unsigned long blk_start, blk_end; -+ phys_addr_t blk_paddr; -+ arm_lpae_iopte table = 0; -+ void *cookie = data->iop.cookie; -+ const struct iommu_gather_ops *tlb = data->iop.cfg.tlb; -+ -+ blk_start = iova & ~(blk_size - 1); -+ blk_end = blk_start + blk_size; -+ blk_paddr = iopte_to_pfn(*ptep, data) << data->pg_shift; -+ -+ for (; blk_start < blk_end; blk_start += size, blk_paddr += size) { -+ arm_lpae_iopte *tablep; -+ -+ /* Unmap! */ -+ if (blk_start == iova) -+ continue; -+ -+ /* __arm_lpae_map expects a pointer to the start of the table */ -+ tablep = &table - ARM_LPAE_LVL_IDX(blk_start, lvl, data); -+ if (__arm_lpae_map(data, blk_start, blk_paddr, size, prot, lvl, -+ tablep) < 0) { -+ if (table) { -+ /* Free the table we allocated */ -+ tablep = iopte_deref(table, data); -+ __arm_lpae_free_pgtable(data, lvl + 1, tablep); -+ } -+ return 0; /* Bytes unmapped */ -+ } -+ } -+ -+ *ptep = table; -+ tlb->flush_pgtable(ptep, sizeof(*ptep), cookie); -+ iova &= ~(blk_size - 1); -+ tlb->tlb_add_flush(iova, blk_size, true, cookie); -+ return size; -+} -+ -+static int __arm_lpae_unmap(struct arm_lpae_io_pgtable *data, -+ unsigned long iova, size_t size, int lvl, -+ arm_lpae_iopte *ptep) -+{ -+ arm_lpae_iopte pte; -+ const struct iommu_gather_ops *tlb = data->iop.cfg.tlb; -+ void *cookie = data->iop.cookie; -+ size_t blk_size = ARM_LPAE_BLOCK_SIZE(lvl, data); -+ -+ ptep += ARM_LPAE_LVL_IDX(iova, lvl, data); -+ pte = *ptep; -+ -+ /* Something went horribly wrong and we ran out of page table */ -+ if (WARN_ON(!pte || (lvl == ARM_LPAE_MAX_LEVELS))) -+ return 0; -+ -+ /* If the size matches this level, we're in the right place */ -+ if (size == blk_size) { -+ *ptep = 0; -+ tlb->flush_pgtable(ptep, sizeof(*ptep), cookie); -+ -+ if (!iopte_leaf(pte, lvl)) { -+ /* Also flush any partial walks */ -+ tlb->tlb_add_flush(iova, size, false, cookie); -+ tlb->tlb_sync(data->iop.cookie); -+ ptep = iopte_deref(pte, data); -+ __arm_lpae_free_pgtable(data, lvl + 1, ptep); -+ } else { -+ tlb->tlb_add_flush(iova, size, true, cookie); -+ } -+ -+ return size; -+ } else if (iopte_leaf(pte, lvl)) { -+ /* -+ * Insert a table at the next level to map the old region, -+ * minus the part we want to unmap -+ */ -+ return arm_lpae_split_blk_unmap(data, iova, size, -+ iopte_prot(pte), lvl, ptep, -+ blk_size); -+ } -+ -+ /* Keep on walkin' */ -+ ptep = iopte_deref(pte, data); -+ return __arm_lpae_unmap(data, iova, size, lvl + 1, ptep); -+} -+ -+static int arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova, -+ size_t size) -+{ -+ size_t unmapped; -+ struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); -+ struct io_pgtable *iop = &data->iop; -+ arm_lpae_iopte *ptep = data->pgd; -+ int lvl = ARM_LPAE_START_LVL(data); -+ -+ unmapped = __arm_lpae_unmap(data, iova, size, lvl, ptep); -+ if (unmapped) -+ iop->cfg.tlb->tlb_sync(iop->cookie); -+ -+ return unmapped; -+} -+ -+static phys_addr_t arm_lpae_iova_to_phys(struct io_pgtable_ops *ops, -+ unsigned long iova) -+{ -+ struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); -+ arm_lpae_iopte pte, *ptep = data->pgd; -+ int lvl = ARM_LPAE_START_LVL(data); -+ -+ do { -+ /* Valid IOPTE pointer? */ -+ if (!ptep) -+ return 0; -+ -+ /* Grab the IOPTE we're interested in */ -+ pte = *(ptep + ARM_LPAE_LVL_IDX(iova, lvl, data)); -+ -+ /* Valid entry? */ -+ if (!pte) -+ return 0; -+ -+ /* Leaf entry? */ -+ if (iopte_leaf(pte,lvl)) -+ goto found_translation; -+ -+ /* Take it to the next level */ -+ ptep = iopte_deref(pte, data); -+ } while (++lvl < ARM_LPAE_MAX_LEVELS); -+ -+ /* Ran out of page tables to walk */ -+ return 0; -+ -+found_translation: -+ iova &= ((1 << data->pg_shift) - 1); -+ return ((phys_addr_t)iopte_to_pfn(pte,data) << data->pg_shift) | iova; -+} -+ -+static void arm_lpae_restrict_pgsizes(struct io_pgtable_cfg *cfg) -+{ -+ unsigned long granule; -+ -+ /* -+ * We need to restrict the supported page sizes to match the -+ * translation regime for a particular granule. Aim to match -+ * the CPU page size if possible, otherwise prefer smaller sizes. -+ * While we're at it, restrict the block sizes to match the -+ * chosen granule. -+ */ -+ if (cfg->pgsize_bitmap & PAGE_SIZE) -+ granule = PAGE_SIZE; -+ else if (cfg->pgsize_bitmap & ~PAGE_MASK) -+ granule = 1UL << __fls(cfg->pgsize_bitmap & ~PAGE_MASK); -+ else if (cfg->pgsize_bitmap & PAGE_MASK) -+ granule = 1UL << __ffs(cfg->pgsize_bitmap & PAGE_MASK); -+ else -+ granule = 0; -+ -+ switch (granule) { -+ case SZ_4K: -+ cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); -+ break; -+ case SZ_16K: -+ cfg->pgsize_bitmap &= (SZ_16K | SZ_32M); -+ break; -+ case SZ_64K: -+ cfg->pgsize_bitmap &= (SZ_64K | SZ_512M); -+ break; -+ default: -+ cfg->pgsize_bitmap = 0; -+ } -+} -+ -+static struct arm_lpae_io_pgtable * -+arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg) -+{ -+ unsigned long va_bits, pgd_bits; -+ struct arm_lpae_io_pgtable *data; -+ -+ arm_lpae_restrict_pgsizes(cfg); -+ -+ if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K))) -+ return NULL; -+ -+ if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS) -+ return NULL; -+ -+ if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS) -+ return NULL; -+ -+ data = kmalloc(sizeof(*data), GFP_KERNEL); -+ if (!data) -+ return NULL; -+ -+ data->pg_shift = __ffs(cfg->pgsize_bitmap); -+ data->bits_per_level = data->pg_shift - ilog2(sizeof(arm_lpae_iopte)); -+ -+ va_bits = cfg->ias - data->pg_shift; -+ data->levels = DIV_ROUND_UP(va_bits, data->bits_per_level); -+ -+ /* Calculate the actual size of our pgd (without concatenation) */ -+ pgd_bits = va_bits - (data->bits_per_level * (data->levels - 1)); -+ data->pgd_size = 1UL << (pgd_bits + ilog2(sizeof(arm_lpae_iopte))); -+ -+ data->iop.ops = (struct io_pgtable_ops) { -+ .map = arm_lpae_map, -+ .unmap = arm_lpae_unmap, -+ .iova_to_phys = arm_lpae_iova_to_phys, -+ }; -+ -+ return data; -+} -+ -+static struct io_pgtable * -+arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) -+{ -+ u64 reg; -+ struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg); -+ -+ if (!data) -+ return NULL; -+ -+ /* TCR */ -+ reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) | -+ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) | -+ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT); -+ -+ switch (1 << data->pg_shift) { -+ case SZ_4K: -+ reg |= ARM_LPAE_TCR_TG0_4K; -+ break; -+ case SZ_16K: -+ reg |= ARM_LPAE_TCR_TG0_16K; -+ break; -+ case SZ_64K: -+ reg |= ARM_LPAE_TCR_TG0_64K; -+ break; -+ } -+ -+ switch (cfg->oas) { -+ case 32: -+ reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_IPS_SHIFT); -+ break; -+ case 36: -+ reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_IPS_SHIFT); -+ break; -+ case 40: -+ reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_IPS_SHIFT); -+ break; -+ case 42: -+ reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_IPS_SHIFT); -+ break; -+ case 44: -+ reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_IPS_SHIFT); -+ break; -+ case 48: -+ reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_IPS_SHIFT); -+ break; -+ default: -+ goto out_free_data; -+ } -+ -+ reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT; -+ cfg->arm_lpae_s1_cfg.tcr = reg; -+ -+ /* MAIRs */ -+ reg = (ARM_LPAE_MAIR_ATTR_NC -+ << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_NC)) | -+ (ARM_LPAE_MAIR_ATTR_WBRWA -+ << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_CACHE)) | -+ (ARM_LPAE_MAIR_ATTR_DEVICE -+ << ARM_LPAE_MAIR_ATTR_SHIFT(ARM_LPAE_MAIR_ATTR_IDX_DEV)); -+ -+ cfg->arm_lpae_s1_cfg.mair[0] = reg; -+ cfg->arm_lpae_s1_cfg.mair[1] = 0; -+ -+ /* Looking good; allocate a pgd */ -+ data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO); -+ if (!data->pgd) -+ goto out_free_data; -+ -+ cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie); -+ -+ /* TTBRs */ -+ cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd); -+ cfg->arm_lpae_s1_cfg.ttbr[1] = 0; -+ return &data->iop; -+ -+out_free_data: -+ kfree(data); -+ return NULL; -+} -+ -+static struct io_pgtable * -+arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) -+{ -+ u64 reg, sl; -+ struct arm_lpae_io_pgtable *data = arm_lpae_alloc_pgtable(cfg); -+ -+ if (!data) -+ return NULL; -+ -+ /* -+ * Concatenate PGDs at level 1 if possible in order to reduce -+ * the depth of the stage-2 walk. -+ */ -+ if (data->levels == ARM_LPAE_MAX_LEVELS) { -+ unsigned long pgd_pages; -+ -+ pgd_pages = data->pgd_size >> ilog2(sizeof(arm_lpae_iopte)); -+ if (pgd_pages <= ARM_LPAE_S2_MAX_CONCAT_PAGES) { -+ data->pgd_size = pgd_pages << data->pg_shift; -+ data->levels--; -+ } -+ } -+ -+ /* VTCR */ -+ reg = ARM_64_LPAE_S2_TCR_RES1 | -+ (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) | -+ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) | -+ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT); -+ -+ sl = ARM_LPAE_START_LVL(data); -+ -+ switch (1 << data->pg_shift) { -+ case SZ_4K: -+ reg |= ARM_LPAE_TCR_TG0_4K; -+ sl++; /* SL0 format is different for 4K granule size */ -+ break; -+ case SZ_16K: -+ reg |= ARM_LPAE_TCR_TG0_16K; -+ break; -+ case SZ_64K: -+ reg |= ARM_LPAE_TCR_TG0_64K; -+ break; -+ } -+ -+ switch (cfg->oas) { -+ case 32: -+ reg |= (ARM_LPAE_TCR_PS_32_BIT << ARM_LPAE_TCR_PS_SHIFT); -+ break; -+ case 36: -+ reg |= (ARM_LPAE_TCR_PS_36_BIT << ARM_LPAE_TCR_PS_SHIFT); -+ break; -+ case 40: -+ reg |= (ARM_LPAE_TCR_PS_40_BIT << ARM_LPAE_TCR_PS_SHIFT); -+ break; -+ case 42: -+ reg |= (ARM_LPAE_TCR_PS_42_BIT << ARM_LPAE_TCR_PS_SHIFT); -+ break; -+ case 44: -+ reg |= (ARM_LPAE_TCR_PS_44_BIT << ARM_LPAE_TCR_PS_SHIFT); -+ break; -+ case 48: -+ reg |= (ARM_LPAE_TCR_PS_48_BIT << ARM_LPAE_TCR_PS_SHIFT); -+ break; -+ default: -+ goto out_free_data; -+ } -+ -+ reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT; -+ reg |= (~sl & ARM_LPAE_TCR_SL0_MASK) << ARM_LPAE_TCR_SL0_SHIFT; -+ cfg->arm_lpae_s2_cfg.vtcr = reg; -+ -+ /* Allocate pgd pages */ -+ data->pgd = alloc_pages_exact(data->pgd_size, GFP_KERNEL | __GFP_ZERO); -+ if (!data->pgd) -+ goto out_free_data; -+ -+ cfg->tlb->flush_pgtable(data->pgd, data->pgd_size, cookie); -+ -+ /* VTTBR */ -+ cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd); -+ return &data->iop; -+ -+out_free_data: -+ kfree(data); -+ return NULL; -+} -+ -+static struct io_pgtable * -+arm_32_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) -+{ -+ struct io_pgtable *iop; -+ -+ if (cfg->ias > 32 || cfg->oas > 40) -+ return NULL; -+ -+ cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); -+ iop = arm_64_lpae_alloc_pgtable_s1(cfg, cookie); -+ if (iop) { -+ cfg->arm_lpae_s1_cfg.tcr |= ARM_32_LPAE_TCR_EAE; -+ cfg->arm_lpae_s1_cfg.tcr &= 0xffffffff; -+ } -+ -+ return iop; -+} -+ -+static struct io_pgtable * -+arm_32_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie) -+{ -+ struct io_pgtable *iop; -+ -+ if (cfg->ias > 40 || cfg->oas > 40) -+ return NULL; -+ -+ cfg->pgsize_bitmap &= (SZ_4K | SZ_2M | SZ_1G); -+ iop = arm_64_lpae_alloc_pgtable_s2(cfg, cookie); -+ if (iop) -+ cfg->arm_lpae_s2_cfg.vtcr &= 0xffffffff; -+ -+ return iop; -+} -+ -+struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = { -+ .alloc = arm_64_lpae_alloc_pgtable_s1, -+ .free = arm_lpae_free_pgtable, -+}; -+ -+struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns = { -+ .alloc = arm_64_lpae_alloc_pgtable_s2, -+ .free = arm_lpae_free_pgtable, -+}; -+ -+struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns = { -+ .alloc = arm_32_lpae_alloc_pgtable_s1, -+ .free = arm_lpae_free_pgtable, -+}; -+ -+struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns = { -+ .alloc = arm_32_lpae_alloc_pgtable_s2, -+ .free = arm_lpae_free_pgtable, -+}; -+ -+#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST -+ -+static struct io_pgtable_cfg *cfg_cookie; -+ -+static void dummy_tlb_flush_all(void *cookie) -+{ -+ WARN_ON(cookie != cfg_cookie); -+} -+ -+static void dummy_tlb_add_flush(unsigned long iova, size_t size, bool leaf, -+ void *cookie) -+{ -+ WARN_ON(cookie != cfg_cookie); -+ WARN_ON(!(size & cfg_cookie->pgsize_bitmap)); -+} -+ -+static void dummy_tlb_sync(void *cookie) -+{ -+ WARN_ON(cookie != cfg_cookie); -+} -+ -+static void dummy_flush_pgtable(void *ptr, size_t size, void *cookie) -+{ -+ WARN_ON(cookie != cfg_cookie); -+} -+ -+static struct iommu_gather_ops dummy_tlb_ops __initdata = { -+ .tlb_flush_all = dummy_tlb_flush_all, -+ .tlb_add_flush = dummy_tlb_add_flush, -+ .tlb_sync = dummy_tlb_sync, -+ .flush_pgtable = dummy_flush_pgtable, -+}; -+ -+static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops) -+{ -+ struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops); -+ struct io_pgtable_cfg *cfg = &data->iop.cfg; -+ -+ pr_err("cfg: pgsize_bitmap 0x%lx, ias %u-bit\n", -+ cfg->pgsize_bitmap, cfg->ias); -+ pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n", -+ data->levels, data->pgd_size, data->pg_shift, -+ data->bits_per_level, data->pgd); -+} -+ -+#define __FAIL(ops, i) ({ \ -+ WARN(1, "selftest: test failed for fmt idx %d\n", (i)); \ -+ arm_lpae_dump_ops(ops); \ -+ selftest_running = false; \ -+ -EFAULT; \ -+}) -+ -+static int __init arm_lpae_run_tests(struct io_pgtable_cfg *cfg) -+{ -+ static const enum io_pgtable_fmt fmts[] = { -+ ARM_64_LPAE_S1, -+ ARM_64_LPAE_S2, -+ }; -+ -+ int i, j; -+ unsigned long iova; -+ size_t size; -+ struct io_pgtable_ops *ops; -+ -+ selftest_running = true; -+ -+ for (i = 0; i < ARRAY_SIZE(fmts); ++i) { -+ cfg_cookie = cfg; -+ ops = alloc_io_pgtable_ops(fmts[i], cfg, cfg); -+ if (!ops) { -+ pr_err("selftest: failed to allocate io pgtable ops\n"); -+ return -ENOMEM; -+ } -+ -+ /* -+ * Initial sanity checks. -+ * Empty page tables shouldn't provide any translations. -+ */ -+ if (ops->iova_to_phys(ops, 42)) -+ return __FAIL(ops, i); -+ -+ if (ops->iova_to_phys(ops, SZ_1G + 42)) -+ return __FAIL(ops, i); -+ -+ if (ops->iova_to_phys(ops, SZ_2G + 42)) -+ return __FAIL(ops, i); -+ -+ /* -+ * Distinct mappings of different granule sizes. -+ */ -+ iova = 0; -+ j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG); -+ while (j != BITS_PER_LONG) { -+ size = 1UL << j; -+ -+ if (ops->map(ops, iova, iova, size, IOMMU_READ | -+ IOMMU_WRITE | -+ IOMMU_NOEXEC | -+ IOMMU_CACHE)) -+ return __FAIL(ops, i); -+ -+ /* Overlapping mappings */ -+ if (!ops->map(ops, iova, iova + size, size, -+ IOMMU_READ | IOMMU_NOEXEC)) -+ return __FAIL(ops, i); -+ -+ if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) -+ return __FAIL(ops, i); -+ -+ iova += SZ_1G; -+ j++; -+ j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j); -+ } -+ -+ /* Partial unmap */ -+ size = 1UL << __ffs(cfg->pgsize_bitmap); -+ if (ops->unmap(ops, SZ_1G + size, size) != size) -+ return __FAIL(ops, i); -+ -+ /* Remap of partial unmap */ -+ if (ops->map(ops, SZ_1G + size, size, size, IOMMU_READ)) -+ return __FAIL(ops, i); -+ -+ if (ops->iova_to_phys(ops, SZ_1G + size + 42) != (size + 42)) -+ return __FAIL(ops, i); -+ -+ /* Full unmap */ -+ iova = 0; -+ j = find_first_bit(&cfg->pgsize_bitmap, BITS_PER_LONG); -+ while (j != BITS_PER_LONG) { -+ size = 1UL << j; -+ -+ if (ops->unmap(ops, iova, size) != size) -+ return __FAIL(ops, i); -+ -+ if (ops->iova_to_phys(ops, iova + 42)) -+ return __FAIL(ops, i); -+ -+ /* Remap full block */ -+ if (ops->map(ops, iova, iova, size, IOMMU_WRITE)) -+ return __FAIL(ops, i); -+ -+ if (ops->iova_to_phys(ops, iova + 42) != (iova + 42)) -+ return __FAIL(ops, i); -+ -+ iova += SZ_1G; -+ j++; -+ j = find_next_bit(&cfg->pgsize_bitmap, BITS_PER_LONG, j); -+ } -+ -+ free_io_pgtable_ops(ops); -+ } -+ -+ selftest_running = false; -+ return 0; -+} -+ -+static int __init arm_lpae_do_selftests(void) -+{ -+ static const unsigned long pgsize[] = { -+ SZ_4K | SZ_2M | SZ_1G, -+ SZ_16K | SZ_32M, -+ SZ_64K | SZ_512M, -+ }; -+ -+ static const unsigned int ias[] = { -+ 32, 36, 40, 42, 44, 48, -+ }; -+ -+ int i, j, pass = 0, fail = 0; -+ struct io_pgtable_cfg cfg = { -+ .tlb = &dummy_tlb_ops, -+ .oas = 48, -+ }; -+ -+ for (i = 0; i < ARRAY_SIZE(pgsize); ++i) { -+ for (j = 0; j < ARRAY_SIZE(ias); ++j) { -+ cfg.pgsize_bitmap = pgsize[i]; -+ cfg.ias = ias[j]; -+ pr_info("selftest: pgsize_bitmap 0x%08lx, IAS %u\n", -+ pgsize[i], ias[j]); -+ if (arm_lpae_run_tests(&cfg)) -+ fail++; -+ else -+ pass++; -+ } -+ } -+ -+ pr_info("selftest: completed with %d PASS %d FAIL\n", pass, fail); -+ return fail ? -EFAULT : 0; -+} -+subsys_initcall(arm_lpae_do_selftests); -+#endif -diff --git a/drivers/iommu/io-pgtable.c b/drivers/iommu/io-pgtable.c -new file mode 100644 -index 0000000..6436fe2 ---- /dev/null -+++ b/drivers/iommu/io-pgtable.c -@@ -0,0 +1,82 @@ -+/* -+ * Generic page table allocator for IOMMUs. -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ * -+ * Copyright (C) 2014 ARM Limited -+ * -+ * Author: Will Deacon -+ */ -+ -+#include -+#include -+#include -+ -+#include "io-pgtable.h" -+ -+extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s1_init_fns; -+extern struct io_pgtable_init_fns io_pgtable_arm_32_lpae_s2_init_fns; -+extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns; -+extern struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s2_init_fns; -+ -+static const struct io_pgtable_init_fns * -+io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] = -+{ -+#ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE -+ [ARM_32_LPAE_S1] = &io_pgtable_arm_32_lpae_s1_init_fns, -+ [ARM_32_LPAE_S2] = &io_pgtable_arm_32_lpae_s2_init_fns, -+ [ARM_64_LPAE_S1] = &io_pgtable_arm_64_lpae_s1_init_fns, -+ [ARM_64_LPAE_S2] = &io_pgtable_arm_64_lpae_s2_init_fns, -+#endif -+}; -+ -+struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt, -+ struct io_pgtable_cfg *cfg, -+ void *cookie) -+{ -+ struct io_pgtable *iop; -+ const struct io_pgtable_init_fns *fns; -+ -+ if (fmt >= IO_PGTABLE_NUM_FMTS) -+ return NULL; -+ -+ fns = io_pgtable_init_table[fmt]; -+ if (!fns) -+ return NULL; -+ -+ iop = fns->alloc(cfg, cookie); -+ if (!iop) -+ return NULL; -+ -+ iop->fmt = fmt; -+ iop->cookie = cookie; -+ iop->cfg = *cfg; -+ -+ return &iop->ops; -+} -+ -+/* -+ * It is the IOMMU driver's responsibility to ensure that the page table -+ * is no longer accessible to the walker by this point. -+ */ -+void free_io_pgtable_ops(struct io_pgtable_ops *ops) -+{ -+ struct io_pgtable *iop; -+ -+ if (!ops) -+ return; -+ -+ iop = container_of(ops, struct io_pgtable, ops); -+ iop->cfg.tlb->tlb_flush_all(iop->cookie); -+ io_pgtable_init_table[iop->fmt]->free(iop); -+} -diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h -new file mode 100644 -index 0000000..10e32f6 ---- /dev/null -+++ b/drivers/iommu/io-pgtable.h -@@ -0,0 +1,143 @@ -+#ifndef __IO_PGTABLE_H -+#define __IO_PGTABLE_H -+ -+/* -+ * Public API for use by IOMMU drivers -+ */ -+enum io_pgtable_fmt { -+ ARM_32_LPAE_S1, -+ ARM_32_LPAE_S2, -+ ARM_64_LPAE_S1, -+ ARM_64_LPAE_S2, -+ IO_PGTABLE_NUM_FMTS, -+}; -+ -+/** -+ * struct iommu_gather_ops - IOMMU callbacks for TLB and page table management. -+ * -+ * @tlb_flush_all: Synchronously invalidate the entire TLB context. -+ * @tlb_add_flush: Queue up a TLB invalidation for a virtual address range. -+ * @tlb_sync: Ensure any queue TLB invalidation has taken effect. -+ * @flush_pgtable: Ensure page table updates are visible to the IOMMU. -+ * -+ * Note that these can all be called in atomic context and must therefore -+ * not block. -+ */ -+struct iommu_gather_ops { -+ void (*tlb_flush_all)(void *cookie); -+ void (*tlb_add_flush)(unsigned long iova, size_t size, bool leaf, -+ void *cookie); -+ void (*tlb_sync)(void *cookie); -+ void (*flush_pgtable)(void *ptr, size_t size, void *cookie); -+}; -+ -+/** -+ * struct io_pgtable_cfg - Configuration data for a set of page tables. -+ * -+ * @quirks: A bitmap of hardware quirks that require some special -+ * action by the low-level page table allocator. -+ * @pgsize_bitmap: A bitmap of page sizes supported by this set of page -+ * tables. -+ * @ias: Input address (iova) size, in bits. -+ * @oas: Output address (paddr) size, in bits. -+ * @tlb: TLB management callbacks for this set of tables. -+ */ -+struct io_pgtable_cfg { -+ #define IO_PGTABLE_QUIRK_ARM_NS (1 << 0) /* Set NS bit in PTEs */ -+ int quirks; -+ unsigned long pgsize_bitmap; -+ unsigned int ias; -+ unsigned int oas; -+ const struct iommu_gather_ops *tlb; -+ -+ /* Low-level data specific to the table format */ -+ union { -+ struct { -+ u64 ttbr[2]; -+ u64 tcr; -+ u64 mair[2]; -+ } arm_lpae_s1_cfg; -+ -+ struct { -+ u64 vttbr; -+ u64 vtcr; -+ } arm_lpae_s2_cfg; -+ }; -+}; -+ -+/** -+ * struct io_pgtable_ops - Page table manipulation API for IOMMU drivers. -+ * -+ * @map: Map a physically contiguous memory region. -+ * @unmap: Unmap a physically contiguous memory region. -+ * @iova_to_phys: Translate iova to physical address. -+ * -+ * These functions map directly onto the iommu_ops member functions with -+ * the same names. -+ */ -+struct io_pgtable_ops { -+ int (*map)(struct io_pgtable_ops *ops, unsigned long iova, -+ phys_addr_t paddr, size_t size, int prot); -+ int (*unmap)(struct io_pgtable_ops *ops, unsigned long iova, -+ size_t size); -+ phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops, -+ unsigned long iova); -+}; -+ -+/** -+ * alloc_io_pgtable_ops() - Allocate a page table allocator for use by an IOMMU. -+ * -+ * @fmt: The page table format. -+ * @cfg: The page table configuration. This will be modified to represent -+ * the configuration actually provided by the allocator (e.g. the -+ * pgsize_bitmap may be restricted). -+ * @cookie: An opaque token provided by the IOMMU driver and passed back to -+ * the callback routines in cfg->tlb. -+ */ -+struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt, -+ struct io_pgtable_cfg *cfg, -+ void *cookie); -+ -+/** -+ * free_io_pgtable_ops() - Free an io_pgtable_ops structure. The caller -+ * *must* ensure that the page table is no longer -+ * live, but the TLB can be dirty. -+ * -+ * @ops: The ops returned from alloc_io_pgtable_ops. -+ */ -+void free_io_pgtable_ops(struct io_pgtable_ops *ops); -+ -+ -+/* -+ * Internal structures for page table allocator implementations. -+ */ -+ -+/** -+ * struct io_pgtable - Internal structure describing a set of page tables. -+ * -+ * @fmt: The page table format. -+ * @cookie: An opaque token provided by the IOMMU driver and passed back to -+ * any callback routines. -+ * @cfg: A copy of the page table configuration. -+ * @ops: The page table operations in use for this set of page tables. -+ */ -+struct io_pgtable { -+ enum io_pgtable_fmt fmt; -+ void *cookie; -+ struct io_pgtable_cfg cfg; -+ struct io_pgtable_ops ops; -+}; -+ -+/** -+ * struct io_pgtable_init_fns - Alloc/free a set of page tables for a -+ * particular format. -+ * -+ * @alloc: Allocate a set of page tables described by cfg. -+ * @free: Free the page tables associated with iop. -+ */ -+struct io_pgtable_init_fns { -+ struct io_pgtable *(*alloc)(struct io_pgtable_cfg *cfg, void *cookie); -+ void (*free)(struct io_pgtable *iop); -+}; -+ -+#endif /* __IO_PGTABLE_H */ -diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c -index ed8b048..8d8e5a7 100644 ---- a/drivers/iommu/iommu.c -+++ b/drivers/iommu/iommu.c -@@ -591,10 +591,10 @@ static struct iommu_group *get_pci_alias_group(struct pci_dev *pdev, - continue; - - /* We alias them or they alias us */ -- if (((pdev->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN) && -- pdev->dma_alias_devfn == tmp->devfn) || -- ((tmp->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN) && -- tmp->dma_alias_devfn == pdev->devfn)) { -+ if (((pdev->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVID) && -+ (pdev->dma_alias_devid & 0xff) == tmp->devfn) || -+ ((tmp->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVID) && -+ (tmp->dma_alias_devid & 0xff) == pdev->devfn)) { - - group = get_pci_alias_group(tmp, devfns); - if (group) { -@@ -737,7 +737,7 @@ static int add_iommu_group(struct device *dev, void *data) - const struct iommu_ops *ops = cb->ops; - - if (!ops->add_device) -- return -ENODEV; -+ return 0; - - WARN_ON(dev->iommu_group); - -@@ -818,7 +818,15 @@ static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops) - kfree(nb); - return err; - } -- return bus_for_each_dev(bus, NULL, &cb, add_iommu_group); -+ -+ err = bus_for_each_dev(bus, NULL, &cb, add_iommu_group); -+ if (err) { -+ bus_unregister_notifier(bus, nb); -+ kfree(nb); -+ return err; -+ } -+ -+ return 0; - } - - /** -@@ -836,13 +844,19 @@ static int iommu_bus_init(struct bus_type *bus, const struct iommu_ops *ops) - */ - int bus_set_iommu(struct bus_type *bus, const struct iommu_ops *ops) - { -+ int err; -+ - if (bus->iommu_ops != NULL) - return -EBUSY; - - bus->iommu_ops = ops; - - /* Do IOMMU specific setup for this bus-type */ -- return iommu_bus_init(bus, ops); -+ err = iommu_bus_init(bus, ops); -+ if (err) -+ bus->iommu_ops = NULL; -+ -+ return err; - } - EXPORT_SYMBOL_GPL(bus_set_iommu); - -@@ -887,36 +901,24 @@ EXPORT_SYMBOL_GPL(iommu_set_fault_handler); - struct iommu_domain *iommu_domain_alloc(struct bus_type *bus) - { - struct iommu_domain *domain; -- int ret; - - if (bus == NULL || bus->iommu_ops == NULL) - return NULL; - -- domain = kzalloc(sizeof(*domain), GFP_KERNEL); -+ domain = bus->iommu_ops->domain_alloc(IOMMU_DOMAIN_UNMANAGED); - if (!domain) - return NULL; - -- domain->ops = bus->iommu_ops; -- -- ret = domain->ops->domain_init(domain); -- if (ret) -- goto out_free; -+ domain->ops = bus->iommu_ops; -+ domain->type = IOMMU_DOMAIN_UNMANAGED; - - return domain; -- --out_free: -- kfree(domain); -- -- return NULL; - } - EXPORT_SYMBOL_GPL(iommu_domain_alloc); - - void iommu_domain_free(struct iommu_domain *domain) - { -- if (likely(domain->ops->domain_destroy != NULL)) -- domain->ops->domain_destroy(domain); -- -- kfree(domain); -+ domain->ops->domain_free(domain); - } - EXPORT_SYMBOL_GPL(iommu_domain_free); - -@@ -943,6 +945,16 @@ void iommu_detach_device(struct iommu_domain *domain, struct device *dev) - } - EXPORT_SYMBOL_GPL(iommu_detach_device); - -+struct iommu_domain *iommu_get_dev_domain(struct device *dev) -+{ -+ const struct iommu_ops *ops = dev->bus->iommu_ops; -+ -+ if (unlikely(ops == NULL || ops->get_dev_iommu_domain == NULL)) -+ return NULL; -+ -+ return ops->get_dev_iommu_domain(dev); -+} -+EXPORT_SYMBOL_GPL(iommu_get_dev_domain); - /* - * IOMMU groups are really the natrual working unit of the IOMMU, but - * the IOMMU API works on domains and devices. Bridge that gap by -@@ -1035,6 +1047,9 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova, - domain->ops->pgsize_bitmap == 0UL)) - return -ENODEV; - -+ if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) -+ return -EINVAL; -+ - /* find out the minimum page size supported */ - min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); - -@@ -1070,7 +1085,7 @@ int iommu_map(struct iommu_domain *domain, unsigned long iova, - if (ret) - iommu_unmap(domain, orig_iova, orig_size - size); - else -- trace_map(iova, paddr, size); -+ trace_map(orig_iova, paddr, orig_size); - - return ret; - } -@@ -1080,11 +1095,15 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) - { - size_t unmapped_page, unmapped = 0; - unsigned int min_pagesz; -+ unsigned long orig_iova = iova; - - if (unlikely(domain->ops->unmap == NULL || - domain->ops->pgsize_bitmap == 0UL)) - return -ENODEV; - -+ if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING))) -+ return -EINVAL; -+ - /* find out the minimum page size supported */ - min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); - -@@ -1119,11 +1138,53 @@ size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, size_t size) - unmapped += unmapped_page; - } - -- trace_unmap(iova, 0, size); -+ trace_unmap(orig_iova, size, unmapped); - return unmapped; - } - EXPORT_SYMBOL_GPL(iommu_unmap); - -+size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova, -+ struct scatterlist *sg, unsigned int nents, int prot) -+{ -+ struct scatterlist *s; -+ size_t mapped = 0; -+ unsigned int i, min_pagesz; -+ int ret; -+ -+ if (unlikely(domain->ops->pgsize_bitmap == 0UL)) -+ return 0; -+ -+ min_pagesz = 1 << __ffs(domain->ops->pgsize_bitmap); -+ -+ for_each_sg(sg, s, nents, i) { -+ phys_addr_t phys = page_to_phys(sg_page(s)) + s->offset; -+ -+ /* -+ * We are mapping on IOMMU page boundaries, so offset within -+ * the page must be 0. However, the IOMMU may support pages -+ * smaller than PAGE_SIZE, so s->offset may still represent -+ * an offset of that boundary within the CPU page. -+ */ -+ if (!IS_ALIGNED(s->offset, min_pagesz)) -+ goto out_err; -+ -+ ret = iommu_map(domain, iova + mapped, phys, s->length, prot); -+ if (ret) -+ goto out_err; -+ -+ mapped += s->length; -+ } -+ -+ return mapped; -+ -+out_err: -+ /* undo mappings already done */ -+ iommu_unmap(domain, iova, mapped); -+ -+ return 0; -+ -+} -+EXPORT_SYMBOL_GPL(default_iommu_map_sg); - - int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, - phys_addr_t paddr, u64 size, int prot) -diff --git a/drivers/iommu/ipmmu-vmsa.c b/drivers/iommu/ipmmu-vmsa.c -index 7dab5cb..f3c5ab6 100644 ---- a/drivers/iommu/ipmmu-vmsa.c -+++ b/drivers/iommu/ipmmu-vmsa.c -@@ -1127,6 +1127,7 @@ static const struct iommu_ops ipmmu_ops = { - .detach_dev = ipmmu_detach_device, - .map = ipmmu_map, - .unmap = ipmmu_unmap, -+ .map_sg = default_iommu_map_sg, - .iova_to_phys = ipmmu_iova_to_phys, - .add_device = ipmmu_add_device, - .remove_device = ipmmu_remove_device, -@@ -1221,7 +1222,6 @@ static int ipmmu_remove(struct platform_device *pdev) - - static struct platform_driver ipmmu_driver = { - .driver = { -- .owner = THIS_MODULE, - .name = "ipmmu-vmsa", - }, - .probe = ipmmu_probe, -diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c -index 74a1767..2c3f5ad 100644 ---- a/drivers/iommu/irq_remapping.c -+++ b/drivers/iommu/irq_remapping.c -@@ -56,19 +56,13 @@ static int do_setup_msi_irqs(struct pci_dev *dev, int nvec) - unsigned int irq; - struct msi_desc *msidesc; - -- WARN_ON(!list_is_singular(&dev->msi_list)); - msidesc = list_entry(dev->msi_list.next, struct msi_desc, list); -- WARN_ON(msidesc->irq); -- WARN_ON(msidesc->msi_attrib.multiple); -- WARN_ON(msidesc->nvec_used); - - irq = irq_alloc_hwirqs(nvec, dev_to_node(&dev->dev)); - if (irq == 0) - return -ENOSPC; - - nvec_pow2 = __roundup_pow_of_two(nvec); -- msidesc->nvec_used = nvec; -- msidesc->msi_attrib.multiple = ilog2(nvec_pow2); - for (sub_handle = 0; sub_handle < nvec; sub_handle++) { - if (!sub_handle) { - index = msi_alloc_remapped_irq(dev, irq, nvec_pow2); -@@ -96,8 +90,6 @@ error: - * IRQs from tearing down again in default_teardown_msi_irqs() - */ - msidesc->irq = 0; -- msidesc->nvec_used = 0; -- msidesc->msi_attrib.multiple = 0; - - return ret; - } -diff --git a/drivers/iommu/msm_iommu.c b/drivers/iommu/msm_iommu.c -index 6e3dcc2..1c7b78e 100644 ---- a/drivers/iommu/msm_iommu.c -+++ b/drivers/iommu/msm_iommu.c -@@ -681,6 +681,7 @@ static const struct iommu_ops msm_iommu_ops = { - .detach_dev = msm_iommu_detach_dev, - .map = msm_iommu_map, - .unmap = msm_iommu_unmap, -+ .map_sg = default_iommu_map_sg, - .iova_to_phys = msm_iommu_iova_to_phys, - .pgsize_bitmap = MSM_IOMMU_PGSIZES, - }; -diff --git a/drivers/iommu/of_iommu.c b/drivers/iommu/of_iommu.c -index e550ccb..43429ab 100644 ---- a/drivers/iommu/of_iommu.c -+++ b/drivers/iommu/of_iommu.c -@@ -18,9 +18,14 @@ - */ - - #include -+#include - #include - #include - #include -+#include -+ -+static const struct of_device_id __iommu_of_table_sentinel -+ __used __section(__iommu_of_table_end); - - /** - * of_get_dma_window - Parse *dma-window property and returns 0 if found. -@@ -89,3 +94,93 @@ int of_get_dma_window(struct device_node *dn, const char *prefix, int index, - return 0; - } - EXPORT_SYMBOL_GPL(of_get_dma_window); -+ -+struct of_iommu_node { -+ struct list_head list; -+ struct device_node *np; -+ struct iommu_ops *ops; -+}; -+static LIST_HEAD(of_iommu_list); -+static DEFINE_SPINLOCK(of_iommu_lock); -+ -+void of_iommu_set_ops(struct device_node *np, struct iommu_ops *ops) -+{ -+ struct of_iommu_node *iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); -+ -+ if (WARN_ON(!iommu)) -+ return; -+ -+ INIT_LIST_HEAD(&iommu->list); -+ iommu->np = np; -+ iommu->ops = ops; -+ spin_lock(&of_iommu_lock); -+ list_add_tail(&iommu->list, &of_iommu_list); -+ spin_unlock(&of_iommu_lock); -+} -+ -+struct iommu_ops *of_iommu_get_ops(struct device_node *np) -+{ -+ struct of_iommu_node *node; -+ struct iommu_ops *ops = NULL; -+ -+ spin_lock(&of_iommu_lock); -+ list_for_each_entry(node, &of_iommu_list, list) -+ if (node->np == np) { -+ ops = node->ops; -+ break; -+ } -+ spin_unlock(&of_iommu_lock); -+ return ops; -+} -+ -+struct iommu_ops *of_iommu_configure(struct device *dev, -+ struct device_node *master_np) -+{ -+ struct of_phandle_args iommu_spec; -+ struct device_node *np; -+ struct iommu_ops *ops = NULL; -+ int idx = 0; -+ -+ if (dev_is_pci(dev)) { -+ dev_err(dev, "IOMMU is currently not supported for PCI\n"); -+ return NULL; -+ } -+ -+ /* -+ * We don't currently walk up the tree looking for a parent IOMMU. -+ * See the `Notes:' section of -+ * Documentation/devicetree/bindings/iommu/iommu.txt -+ */ -+ while (!of_parse_phandle_with_args(master_np, "iommus", -+ "#iommu-cells", idx, -+ &iommu_spec)) { -+ np = iommu_spec.np; -+ ops = of_iommu_get_ops(np); -+ -+ if (!ops || !ops->of_xlate || ops->of_xlate(dev, &iommu_spec)) -+ goto err_put_node; -+ -+ of_node_put(np); -+ idx++; -+ } -+ -+ return ops; -+ -+err_put_node: -+ of_node_put(np); -+ return NULL; -+} -+ -+void __init of_iommu_init(void) -+{ -+ struct device_node *np; -+ const struct of_device_id *match, *matches = &__iommu_of_table; -+ -+ for_each_matching_node_and_match(np, matches, &match) { -+ const of_iommu_init_fn init_fn = match->data; -+ -+ if (init_fn(np)) -+ pr_err("Failed to initialise IOMMU %s\n", -+ of_node_full_name(np)); -+ } -+} -diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c -index 3627887..18003c0 100644 ---- a/drivers/iommu/omap-iommu.c -+++ b/drivers/iommu/omap-iommu.c -@@ -1288,6 +1288,7 @@ static const struct iommu_ops omap_iommu_ops = { - .detach_dev = omap_iommu_detach_dev, - .map = omap_iommu_map, - .unmap = omap_iommu_unmap, -+ .map_sg = default_iommu_map_sg, - .iova_to_phys = omap_iommu_iova_to_phys, - .add_device = omap_iommu_add_device, - .remove_device = omap_iommu_remove_device, -diff --git a/drivers/iommu/shmobile-iommu.c b/drivers/iommu/shmobile-iommu.c -index 1333e6f..f1b0077 100644 ---- a/drivers/iommu/shmobile-iommu.c -+++ b/drivers/iommu/shmobile-iommu.c -@@ -361,6 +361,7 @@ static const struct iommu_ops shmobile_iommu_ops = { - .detach_dev = shmobile_iommu_detach_device, - .map = shmobile_iommu_map, - .unmap = shmobile_iommu_unmap, -+ .map_sg = default_iommu_map_sg, - .iova_to_phys = shmobile_iommu_iova_to_phys, - .add_device = shmobile_iommu_add_device, - .pgsize_bitmap = SZ_1M | SZ_64K | SZ_4K, -diff --git a/drivers/iommu/shmobile-ipmmu.c b/drivers/iommu/shmobile-ipmmu.c -index bd97ade..951651a 100644 ---- a/drivers/iommu/shmobile-ipmmu.c -+++ b/drivers/iommu/shmobile-ipmmu.c -@@ -118,7 +118,6 @@ static int ipmmu_probe(struct platform_device *pdev) - static struct platform_driver ipmmu_driver = { - .probe = ipmmu_probe, - .driver = { -- .owner = THIS_MODULE, - .name = "ipmmu", - }, - }; -diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c -index a6d76ab..f722a0c 100644 ---- a/drivers/iommu/tegra-gart.c -+++ b/drivers/iommu/tegra-gart.c -@@ -425,7 +425,6 @@ static struct platform_driver tegra_gart_driver = { - .probe = tegra_gart_probe, - .remove = tegra_gart_remove, - .driver = { -- .owner = THIS_MODULE, - .name = "tegra-gart", - .pm = &tegra_gart_pm_ops, - .of_match_table = tegra_gart_of_match, -diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c -index 3afdf43..cb0c9bf 100644 ---- a/drivers/iommu/tegra-smmu.c -+++ b/drivers/iommu/tegra-smmu.c -@@ -955,6 +955,7 @@ static const struct iommu_ops smmu_iommu_ops = { - .detach_dev = smmu_iommu_detach_dev, - .map = smmu_iommu_map, - .unmap = smmu_iommu_unmap, -+ .map_sg = default_iommu_map_sg, - .iova_to_phys = smmu_iommu_iova_to_phys, - .pgsize_bitmap = SMMU_IOMMU_PGSIZES, - }; -@@ -1269,7 +1270,6 @@ static struct platform_driver tegra_smmu_driver = { - .probe = tegra_smmu_probe, - .remove = tegra_smmu_remove, - .driver = { -- .owner = THIS_MODULE, - .name = "tegra-smmu", - .pm = &tegra_smmu_pm_ops, - .of_match_table = tegra_smmu_of_match, -diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig -index b21f12f..caf590c 100644 ---- a/drivers/irqchip/Kconfig -+++ b/drivers/irqchip/Kconfig -@@ -15,6 +15,10 @@ config ARM_GIC_V3 - select IRQ_DOMAIN - select MULTI_IRQ_HANDLER - -+config ARM_GIC_V3_ITS -+ bool -+ select PCI_MSI_IRQ_DOMAIN -+ - config ARM_NVIC - bool - select IRQ_DOMAIN -diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile -index 173bb5f..ec3621d 100644 ---- a/drivers/irqchip/Makefile -+++ b/drivers/irqchip/Makefile -@@ -20,6 +20,7 @@ obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi-nmi.o - obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o - obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o - obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o -+obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o - obj-$(CONFIG_ARM_NVIC) += irq-nvic.o - obj-$(CONFIG_ARM_VIC) += irq-vic.o - obj-$(CONFIG_ATMEL_AIC_IRQ) += irq-atmel-aic-common.o irq-atmel-aic.o -diff --git a/drivers/irqchip/irq-armada-370-xp.c b/drivers/irqchip/irq-armada-370-xp.c -index 41ac85a..615075d 100644 ---- a/drivers/irqchip/irq-armada-370-xp.c -+++ b/drivers/irqchip/irq-armada-370-xp.c -@@ -131,7 +131,7 @@ static void armada_370_xp_free_msi(int hwirq) - mutex_unlock(&msi_used_lock); - } - --static int armada_370_xp_setup_msi_irq(struct msi_chip *chip, -+static int armada_370_xp_setup_msi_irq(struct msi_controller *chip, - struct pci_dev *pdev, - struct msi_desc *desc) - { -@@ -158,11 +158,11 @@ static int armada_370_xp_setup_msi_irq(struct msi_chip *chip, - msg.address_hi = 0; - msg.data = 0xf00 | (hwirq + 16); - -- write_msi_msg(virq, &msg); -+ pci_write_msi_msg(virq, &msg); - return 0; - } - --static void armada_370_xp_teardown_msi_irq(struct msi_chip *chip, -+static void armada_370_xp_teardown_msi_irq(struct msi_controller *chip, - unsigned int irq) - { - struct irq_data *d = irq_get_irq_data(irq); -@@ -174,10 +174,10 @@ static void armada_370_xp_teardown_msi_irq(struct msi_chip *chip, - - static struct irq_chip armada_370_xp_msi_irq_chip = { - .name = "armada_370_xp_msi_irq", -- .irq_enable = unmask_msi_irq, -- .irq_disable = mask_msi_irq, -- .irq_mask = mask_msi_irq, -- .irq_unmask = unmask_msi_irq, -+ .irq_enable = pci_msi_unmask_irq, -+ .irq_disable = pci_msi_mask_irq, -+ .irq_mask = pci_msi_mask_irq, -+ .irq_unmask = pci_msi_unmask_irq, - }; - - static int armada_370_xp_msi_map(struct irq_domain *domain, unsigned int virq, -@@ -197,7 +197,7 @@ static const struct irq_domain_ops armada_370_xp_msi_irq_ops = { - static int armada_370_xp_msi_init(struct device_node *node, - phys_addr_t main_int_phys_base) - { -- struct msi_chip *msi_chip; -+ struct msi_controller *msi_chip; - u32 reg; - int ret; - -diff --git a/drivers/irqchip/irq-atmel-aic.c b/drivers/irqchip/irq-atmel-aic.c -index 9a2cf3c..27fdd8c 100644 ---- a/drivers/irqchip/irq-atmel-aic.c -+++ b/drivers/irqchip/irq-atmel-aic.c -@@ -65,11 +65,11 @@ aic_handle(struct pt_regs *regs) - u32 irqnr; - u32 irqstat; - -- irqnr = irq_reg_readl(gc->reg_base + AT91_AIC_IVR); -- irqstat = irq_reg_readl(gc->reg_base + AT91_AIC_ISR); -+ irqnr = irq_reg_readl(gc, AT91_AIC_IVR); -+ irqstat = irq_reg_readl(gc, AT91_AIC_ISR); - - if (!irqstat) -- irq_reg_writel(0, gc->reg_base + AT91_AIC_EOICR); -+ irq_reg_writel(gc, 0, AT91_AIC_EOICR); - else - handle_domain_irq(aic_domain, irqnr, regs); - } -@@ -80,7 +80,7 @@ static int aic_retrigger(struct irq_data *d) - - /* Enable interrupt on AIC5 */ - irq_gc_lock(gc); -- irq_reg_writel(d->mask, gc->reg_base + AT91_AIC_ISCR); -+ irq_reg_writel(gc, d->mask, AT91_AIC_ISCR); - irq_gc_unlock(gc); - - return 0; -@@ -92,12 +92,12 @@ static int aic_set_type(struct irq_data *d, unsigned type) - unsigned int smr; - int ret; - -- smr = irq_reg_readl(gc->reg_base + AT91_AIC_SMR(d->hwirq)); -+ smr = irq_reg_readl(gc, AT91_AIC_SMR(d->hwirq)); - ret = aic_common_set_type(d, type, &smr); - if (ret) - return ret; - -- irq_reg_writel(smr, gc->reg_base + AT91_AIC_SMR(d->hwirq)); -+ irq_reg_writel(gc, smr, AT91_AIC_SMR(d->hwirq)); - - return 0; - } -@@ -108,8 +108,8 @@ static void aic_suspend(struct irq_data *d) - struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); - - irq_gc_lock(gc); -- irq_reg_writel(gc->mask_cache, gc->reg_base + AT91_AIC_IDCR); -- irq_reg_writel(gc->wake_active, gc->reg_base + AT91_AIC_IECR); -+ irq_reg_writel(gc, gc->mask_cache, AT91_AIC_IDCR); -+ irq_reg_writel(gc, gc->wake_active, AT91_AIC_IECR); - irq_gc_unlock(gc); - } - -@@ -118,8 +118,8 @@ static void aic_resume(struct irq_data *d) - struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); - - irq_gc_lock(gc); -- irq_reg_writel(gc->wake_active, gc->reg_base + AT91_AIC_IDCR); -- irq_reg_writel(gc->mask_cache, gc->reg_base + AT91_AIC_IECR); -+ irq_reg_writel(gc, gc->wake_active, AT91_AIC_IDCR); -+ irq_reg_writel(gc, gc->mask_cache, AT91_AIC_IECR); - irq_gc_unlock(gc); - } - -@@ -128,8 +128,8 @@ static void aic_pm_shutdown(struct irq_data *d) - struct irq_chip_generic *gc = irq_data_get_irq_chip_data(d); - - irq_gc_lock(gc); -- irq_reg_writel(0xffffffff, gc->reg_base + AT91_AIC_IDCR); -- irq_reg_writel(0xffffffff, gc->reg_base + AT91_AIC_ICCR); -+ irq_reg_writel(gc, 0xffffffff, AT91_AIC_IDCR); -+ irq_reg_writel(gc, 0xffffffff, AT91_AIC_ICCR); - irq_gc_unlock(gc); - } - #else -@@ -148,24 +148,24 @@ static void __init aic_hw_init(struct irq_domain *domain) - * will not Lock out nIRQ - */ - for (i = 0; i < 8; i++) -- irq_reg_writel(0, gc->reg_base + AT91_AIC_EOICR); -+ irq_reg_writel(gc, 0, AT91_AIC_EOICR); - - /* - * Spurious Interrupt ID in Spurious Vector Register. - * When there is no current interrupt, the IRQ Vector Register - * reads the value stored in AIC_SPU - */ -- irq_reg_writel(0xffffffff, gc->reg_base + AT91_AIC_SPU); -+ irq_reg_writel(gc, 0xffffffff, AT91_AIC_SPU); - - /* No debugging in AIC: Debug (Protect) Control Register */ -- irq_reg_writel(0, gc->reg_base + AT91_AIC_DCR); -+ irq_reg_writel(gc, 0, AT91_AIC_DCR); - - /* Disable and clear all interrupts initially */ -- irq_reg_writel(0xffffffff, gc->reg_base + AT91_AIC_IDCR); -- irq_reg_writel(0xffffffff, gc->reg_base + AT91_AIC_ICCR); -+ irq_reg_writel(gc, 0xffffffff, AT91_AIC_IDCR); -+ irq_reg_writel(gc, 0xffffffff, AT91_AIC_ICCR); - - for (i = 0; i < 32; i++) -- irq_reg_writel(i, gc->reg_base + AT91_AIC_SVR(i)); -+ irq_reg_writel(gc, i, AT91_AIC_SVR(i)); - } - - static int aic_irq_domain_xlate(struct irq_domain *d, -@@ -195,10 +195,10 @@ static int aic_irq_domain_xlate(struct irq_domain *d, - gc = dgc->gc[idx]; - - irq_gc_lock(gc); -- smr = irq_reg_readl(gc->reg_base + AT91_AIC_SMR(*out_hwirq)); -+ smr = irq_reg_readl(gc, AT91_AIC_SMR(*out_hwirq)); - ret = aic_common_set_priority(intspec[2], &smr); - if (!ret) -- irq_reg_writel(smr, gc->reg_base + AT91_AIC_SMR(*out_hwirq)); -+ irq_reg_writel(gc, smr, AT91_AIC_SMR(*out_hwirq)); - irq_gc_unlock(gc); - - return ret; -diff --git a/drivers/irqchip/irq-atmel-aic5.c b/drivers/irqchip/irq-atmel-aic5.c -index a11aae8..a2e8c3f 100644 ---- a/drivers/irqchip/irq-atmel-aic5.c -+++ b/drivers/irqchip/irq-atmel-aic5.c -@@ -75,11 +75,11 @@ aic5_handle(struct pt_regs *regs) - u32 irqnr; - u32 irqstat; - -- irqnr = irq_reg_readl(gc->reg_base + AT91_AIC5_IVR); -- irqstat = irq_reg_readl(gc->reg_base + AT91_AIC5_ISR); -+ irqnr = irq_reg_readl(gc, AT91_AIC5_IVR); -+ irqstat = irq_reg_readl(gc, AT91_AIC5_ISR); - - if (!irqstat) -- irq_reg_writel(0, gc->reg_base + AT91_AIC5_EOICR); -+ irq_reg_writel(gc, 0, AT91_AIC5_EOICR); - else - handle_domain_irq(aic5_domain, irqnr, regs); - } -@@ -92,8 +92,8 @@ static void aic5_mask(struct irq_data *d) - - /* Disable interrupt on AIC5 */ - irq_gc_lock(gc); -- irq_reg_writel(d->hwirq, gc->reg_base + AT91_AIC5_SSR); -- irq_reg_writel(1, gc->reg_base + AT91_AIC5_IDCR); -+ irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR); -+ irq_reg_writel(gc, 1, AT91_AIC5_IDCR); - gc->mask_cache &= ~d->mask; - irq_gc_unlock(gc); - } -@@ -106,8 +106,8 @@ static void aic5_unmask(struct irq_data *d) - - /* Enable interrupt on AIC5 */ - irq_gc_lock(gc); -- irq_reg_writel(d->hwirq, gc->reg_base + AT91_AIC5_SSR); -- irq_reg_writel(1, gc->reg_base + AT91_AIC5_IECR); -+ irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR); -+ irq_reg_writel(gc, 1, AT91_AIC5_IECR); - gc->mask_cache |= d->mask; - irq_gc_unlock(gc); - } -@@ -120,8 +120,8 @@ static int aic5_retrigger(struct irq_data *d) - - /* Enable interrupt on AIC5 */ - irq_gc_lock(gc); -- irq_reg_writel(d->hwirq, gc->reg_base + AT91_AIC5_SSR); -- irq_reg_writel(1, gc->reg_base + AT91_AIC5_ISCR); -+ irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR); -+ irq_reg_writel(gc, 1, AT91_AIC5_ISCR); - irq_gc_unlock(gc); - - return 0; -@@ -136,11 +136,11 @@ static int aic5_set_type(struct irq_data *d, unsigned type) - int ret; - - irq_gc_lock(gc); -- irq_reg_writel(d->hwirq, gc->reg_base + AT91_AIC5_SSR); -- smr = irq_reg_readl(gc->reg_base + AT91_AIC5_SMR); -+ irq_reg_writel(gc, d->hwirq, AT91_AIC5_SSR); -+ smr = irq_reg_readl(gc, AT91_AIC5_SMR); - ret = aic_common_set_type(d, type, &smr); - if (!ret) -- irq_reg_writel(smr, gc->reg_base + AT91_AIC5_SMR); -+ irq_reg_writel(gc, smr, AT91_AIC5_SMR); - irq_gc_unlock(gc); - - return ret; -@@ -162,12 +162,11 @@ static void aic5_suspend(struct irq_data *d) - if ((mask & gc->mask_cache) == (mask & gc->wake_active)) - continue; - -- irq_reg_writel(i + gc->irq_base, -- bgc->reg_base + AT91_AIC5_SSR); -+ irq_reg_writel(bgc, i + gc->irq_base, AT91_AIC5_SSR); - if (mask & gc->wake_active) -- irq_reg_writel(1, bgc->reg_base + AT91_AIC5_IECR); -+ irq_reg_writel(bgc, 1, AT91_AIC5_IECR); - else -- irq_reg_writel(1, bgc->reg_base + AT91_AIC5_IDCR); -+ irq_reg_writel(bgc, 1, AT91_AIC5_IDCR); - } - irq_gc_unlock(bgc); - } -@@ -187,12 +186,11 @@ static void aic5_resume(struct irq_data *d) - if ((mask & gc->mask_cache) == (mask & gc->wake_active)) - continue; - -- irq_reg_writel(i + gc->irq_base, -- bgc->reg_base + AT91_AIC5_SSR); -+ irq_reg_writel(bgc, i + gc->irq_base, AT91_AIC5_SSR); - if (mask & gc->mask_cache) -- irq_reg_writel(1, bgc->reg_base + AT91_AIC5_IECR); -+ irq_reg_writel(bgc, 1, AT91_AIC5_IECR); - else -- irq_reg_writel(1, bgc->reg_base + AT91_AIC5_IDCR); -+ irq_reg_writel(bgc, 1, AT91_AIC5_IDCR); - } - irq_gc_unlock(bgc); - } -@@ -207,10 +205,9 @@ static void aic5_pm_shutdown(struct irq_data *d) - - irq_gc_lock(bgc); - for (i = 0; i < dgc->irqs_per_chip; i++) { -- irq_reg_writel(i + gc->irq_base, -- bgc->reg_base + AT91_AIC5_SSR); -- irq_reg_writel(1, bgc->reg_base + AT91_AIC5_IDCR); -- irq_reg_writel(1, bgc->reg_base + AT91_AIC5_ICCR); -+ irq_reg_writel(bgc, i + gc->irq_base, AT91_AIC5_SSR); -+ irq_reg_writel(bgc, 1, AT91_AIC5_IDCR); -+ irq_reg_writel(bgc, 1, AT91_AIC5_ICCR); - } - irq_gc_unlock(bgc); - } -@@ -230,24 +227,24 @@ static void __init aic5_hw_init(struct irq_domain *domain) - * will not Lock out nIRQ - */ - for (i = 0; i < 8; i++) -- irq_reg_writel(0, gc->reg_base + AT91_AIC5_EOICR); -+ irq_reg_writel(gc, 0, AT91_AIC5_EOICR); - - /* - * Spurious Interrupt ID in Spurious Vector Register. - * When there is no current interrupt, the IRQ Vector Register - * reads the value stored in AIC_SPU - */ -- irq_reg_writel(0xffffffff, gc->reg_base + AT91_AIC5_SPU); -+ irq_reg_writel(gc, 0xffffffff, AT91_AIC5_SPU); - - /* No debugging in AIC: Debug (Protect) Control Register */ -- irq_reg_writel(0, gc->reg_base + AT91_AIC5_DCR); -+ irq_reg_writel(gc, 0, AT91_AIC5_DCR); - - /* Disable and clear all interrupts initially */ - for (i = 0; i < domain->revmap_size; i++) { -- irq_reg_writel(i, gc->reg_base + AT91_AIC5_SSR); -- irq_reg_writel(i, gc->reg_base + AT91_AIC5_SVR); -- irq_reg_writel(1, gc->reg_base + AT91_AIC5_IDCR); -- irq_reg_writel(1, gc->reg_base + AT91_AIC5_ICCR); -+ irq_reg_writel(gc, i, AT91_AIC5_SSR); -+ irq_reg_writel(gc, i, AT91_AIC5_SVR); -+ irq_reg_writel(gc, 1, AT91_AIC5_IDCR); -+ irq_reg_writel(gc, 1, AT91_AIC5_ICCR); - } - } - -@@ -273,11 +270,11 @@ static int aic5_irq_domain_xlate(struct irq_domain *d, - gc = dgc->gc[0]; - - irq_gc_lock(gc); -- irq_reg_writel(*out_hwirq, gc->reg_base + AT91_AIC5_SSR); -- smr = irq_reg_readl(gc->reg_base + AT91_AIC5_SMR); -+ irq_reg_writel(gc, *out_hwirq, AT91_AIC5_SSR); -+ smr = irq_reg_readl(gc, AT91_AIC5_SMR); - ret = aic_common_set_priority(intspec[2], &smr); - if (!ret) -- irq_reg_writel(intspec[2] | smr, gc->reg_base + AT91_AIC5_SMR); -+ irq_reg_writel(gc, intspec[2] | smr, AT91_AIC5_SMR); - irq_gc_unlock(gc); - - return ret; -diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c -new file mode 100644 -index 0000000..43c50ed ---- /dev/null -+++ b/drivers/irqchip/irq-gic-v3-its.c -@@ -0,0 +1,1628 @@ -+/* -+ * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved. -+ * Author: Marc Zyngier -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include -+ -+#include -+#include -+#include -+ -+#include "irqchip.h" -+ -+#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1 << 0) -+ -+#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) -+ -+/* -+ * Collection structure - just an ID, and a redistributor address to -+ * ping. We use one per CPU as a bag of interrupts assigned to this -+ * CPU. -+ */ -+struct its_collection { -+ u64 target_address; -+ u16 col_id; -+}; -+ -+/* -+ * The ITS structure - contains most of the infrastructure, with the -+ * msi_controller, the command queue, the collections, and the list of -+ * devices writing to it. -+ */ -+struct its_node { -+ raw_spinlock_t lock; -+ struct list_head entry; -+ struct msi_controller msi_chip; -+ struct irq_domain *domain; -+ void __iomem *base; -+ unsigned long phys_base; -+ struct its_cmd_block *cmd_base; -+ struct its_cmd_block *cmd_write; -+ void *tables[GITS_BASER_NR_REGS]; -+ struct its_collection *collections; -+ struct list_head its_device_list; -+ u64 flags; -+ u32 ite_size; -+}; -+ -+#define ITS_ITT_ALIGN SZ_256 -+ -+struct event_lpi_map { -+ unsigned long *lpi_map; -+ u16 *col_map; -+ irq_hw_number_t lpi_base; -+ int nr_lpis; -+}; -+ -+/* -+ * The ITS view of a device - belongs to an ITS, a collection, owns an -+ * interrupt translation table, and a list of interrupts. -+ */ -+struct its_device { -+ struct list_head entry; -+ struct its_node *its; -+ struct event_lpi_map event_map; -+ void *itt; -+ u32 nr_ites; -+ u32 device_id; -+}; -+ -+static LIST_HEAD(its_nodes); -+static DEFINE_SPINLOCK(its_lock); -+static struct device_node *gic_root_node; -+static struct rdists *gic_rdists; -+ -+#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) -+#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) -+ -+static struct its_collection *dev_event_to_col(struct its_device *its_dev, -+ u32 event) -+{ -+ struct its_node *its = its_dev->its; -+ -+ return its->collections + its_dev->event_map.col_map[event]; -+} -+ -+/* -+ * ITS command descriptors - parameters to be encoded in a command -+ * block. -+ */ -+struct its_cmd_desc { -+ union { -+ struct { -+ struct its_device *dev; -+ u32 event_id; -+ } its_inv_cmd; -+ -+ struct { -+ struct its_device *dev; -+ u32 event_id; -+ } its_int_cmd; -+ -+ struct { -+ struct its_device *dev; -+ int valid; -+ } its_mapd_cmd; -+ -+ struct { -+ struct its_collection *col; -+ int valid; -+ } its_mapc_cmd; -+ -+ struct { -+ struct its_device *dev; -+ u32 phys_id; -+ u32 event_id; -+ } its_mapvi_cmd; -+ -+ struct { -+ struct its_device *dev; -+ struct its_collection *col; -+ u32 event_id; -+ } its_movi_cmd; -+ -+ struct { -+ struct its_device *dev; -+ u32 event_id; -+ } its_discard_cmd; -+ -+ struct { -+ struct its_collection *col; -+ } its_invall_cmd; -+ }; -+}; -+ -+/* -+ * The ITS command block, which is what the ITS actually parses. -+ */ -+struct its_cmd_block { -+ u64 raw_cmd[4]; -+}; -+ -+#define ITS_CMD_QUEUE_SZ SZ_64K -+#define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block)) -+ -+typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *, -+ struct its_cmd_desc *); -+ -+static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr) -+{ -+ cmd->raw_cmd[0] &= ~0xffUL; -+ cmd->raw_cmd[0] |= cmd_nr; -+} -+ -+static void its_encode_devid(struct its_cmd_block *cmd, u32 devid) -+{ -+ cmd->raw_cmd[0] &= BIT_ULL(32) - 1; -+ cmd->raw_cmd[0] |= ((u64)devid) << 32; -+} -+ -+static void its_encode_event_id(struct its_cmd_block *cmd, u32 id) -+{ -+ cmd->raw_cmd[1] &= ~0xffffffffUL; -+ cmd->raw_cmd[1] |= id; -+} -+ -+static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id) -+{ -+ cmd->raw_cmd[1] &= 0xffffffffUL; -+ cmd->raw_cmd[1] |= ((u64)phys_id) << 32; -+} -+ -+static void its_encode_size(struct its_cmd_block *cmd, u8 size) -+{ -+ cmd->raw_cmd[1] &= ~0x1fUL; -+ cmd->raw_cmd[1] |= size & 0x1f; -+} -+ -+static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr) -+{ -+ cmd->raw_cmd[2] &= ~0xffffffffffffUL; -+ cmd->raw_cmd[2] |= itt_addr & 0xffffffffff00UL; -+} -+ -+static void its_encode_valid(struct its_cmd_block *cmd, int valid) -+{ -+ cmd->raw_cmd[2] &= ~(1UL << 63); -+ cmd->raw_cmd[2] |= ((u64)!!valid) << 63; -+} -+ -+static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr) -+{ -+ cmd->raw_cmd[2] &= ~(0xffffffffUL << 16); -+ cmd->raw_cmd[2] |= (target_addr & (0xffffffffUL << 16)); -+} -+ -+static void its_encode_collection(struct its_cmd_block *cmd, u16 col) -+{ -+ cmd->raw_cmd[2] &= ~0xffffUL; -+ cmd->raw_cmd[2] |= col; -+} -+ -+static inline void its_fixup_cmd(struct its_cmd_block *cmd) -+{ -+ /* Let's fixup BE commands */ -+ cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]); -+ cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]); -+ cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]); -+ cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]); -+} -+ -+static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd, -+ struct its_cmd_desc *desc) -+{ -+ unsigned long itt_addr; -+ u8 size = ilog2(desc->its_mapd_cmd.dev->nr_ites); -+ -+ itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt); -+ itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN); -+ -+ its_encode_cmd(cmd, GITS_CMD_MAPD); -+ its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id); -+ its_encode_size(cmd, size - 1); -+ its_encode_itt(cmd, itt_addr); -+ its_encode_valid(cmd, desc->its_mapd_cmd.valid); -+ -+ its_fixup_cmd(cmd); -+ -+ return NULL; -+} -+ -+static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd, -+ struct its_cmd_desc *desc) -+{ -+ its_encode_cmd(cmd, GITS_CMD_MAPC); -+ its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); -+ its_encode_target(cmd, desc->its_mapc_cmd.col->target_address); -+ its_encode_valid(cmd, desc->its_mapc_cmd.valid); -+ -+ its_fixup_cmd(cmd); -+ -+ return desc->its_mapc_cmd.col; -+} -+ -+static struct its_collection *its_build_mapvi_cmd(struct its_cmd_block *cmd, -+ struct its_cmd_desc *desc) -+{ -+ struct its_collection *col; -+ -+ col = dev_event_to_col(desc->its_mapvi_cmd.dev, -+ desc->its_mapvi_cmd.event_id); -+ -+ its_encode_cmd(cmd, GITS_CMD_MAPVI); -+ its_encode_devid(cmd, desc->its_mapvi_cmd.dev->device_id); -+ its_encode_event_id(cmd, desc->its_mapvi_cmd.event_id); -+ its_encode_phys_id(cmd, desc->its_mapvi_cmd.phys_id); -+ its_encode_collection(cmd, col->col_id); -+ -+ its_fixup_cmd(cmd); -+ -+ return col; -+} -+ -+static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd, -+ struct its_cmd_desc *desc) -+{ -+ struct its_collection *col; -+ -+ col = dev_event_to_col(desc->its_movi_cmd.dev, -+ desc->its_movi_cmd.event_id); -+ -+ its_encode_cmd(cmd, GITS_CMD_MOVI); -+ its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id); -+ its_encode_event_id(cmd, desc->its_movi_cmd.event_id); -+ its_encode_collection(cmd, desc->its_movi_cmd.col->col_id); -+ -+ its_fixup_cmd(cmd); -+ -+ return col; -+} -+ -+static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd, -+ struct its_cmd_desc *desc) -+{ -+ struct its_collection *col; -+ -+ col = dev_event_to_col(desc->its_discard_cmd.dev, -+ desc->its_discard_cmd.event_id); -+ -+ its_encode_cmd(cmd, GITS_CMD_DISCARD); -+ its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id); -+ its_encode_event_id(cmd, desc->its_discard_cmd.event_id); -+ -+ its_fixup_cmd(cmd); -+ -+ return col; -+} -+ -+static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd, -+ struct its_cmd_desc *desc) -+{ -+ struct its_collection *col; -+ -+ col = dev_event_to_col(desc->its_inv_cmd.dev, -+ desc->its_inv_cmd.event_id); -+ -+ its_encode_cmd(cmd, GITS_CMD_INV); -+ its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id); -+ its_encode_event_id(cmd, desc->its_inv_cmd.event_id); -+ -+ its_fixup_cmd(cmd); -+ -+ return col; -+} -+ -+static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd, -+ struct its_cmd_desc *desc) -+{ -+ its_encode_cmd(cmd, GITS_CMD_INVALL); -+ its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); -+ -+ its_fixup_cmd(cmd); -+ -+ return NULL; -+} -+ -+static u64 its_cmd_ptr_to_offset(struct its_node *its, -+ struct its_cmd_block *ptr) -+{ -+ return (ptr - its->cmd_base) * sizeof(*ptr); -+} -+ -+static int its_queue_full(struct its_node *its) -+{ -+ int widx; -+ int ridx; -+ -+ widx = its->cmd_write - its->cmd_base; -+ ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block); -+ -+ /* This is incredibly unlikely to happen, unless the ITS locks up. */ -+ if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx) -+ return 1; -+ -+ return 0; -+} -+ -+static struct its_cmd_block *its_allocate_entry(struct its_node *its) -+{ -+ struct its_cmd_block *cmd; -+ u32 count = 1000000; /* 1s! */ -+ -+ while (its_queue_full(its)) { -+ count--; -+ if (!count) { -+ pr_err_ratelimited("ITS queue not draining\n"); -+ return NULL; -+ } -+ cpu_relax(); -+ udelay(1); -+ } -+ -+ cmd = its->cmd_write++; -+ -+ /* Handle queue wrapping */ -+ if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES)) -+ its->cmd_write = its->cmd_base; -+ -+ return cmd; -+} -+ -+static struct its_cmd_block *its_post_commands(struct its_node *its) -+{ -+ u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write); -+ -+ writel_relaxed(wr, its->base + GITS_CWRITER); -+ -+ return its->cmd_write; -+} -+ -+static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd) -+{ -+ /* -+ * Make sure the commands written to memory are observable by -+ * the ITS. -+ */ -+ if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING) -+ __flush_dcache_area(cmd, sizeof(*cmd)); -+ else -+ dsb(ishst); -+} -+ -+static void its_wait_for_range_completion(struct its_node *its, -+ struct its_cmd_block *from, -+ struct its_cmd_block *to) -+{ -+ u64 rd_idx, from_idx, to_idx; -+ u32 count = 1000000; /* 1s! */ -+ -+ from_idx = its_cmd_ptr_to_offset(its, from); -+ to_idx = its_cmd_ptr_to_offset(its, to); -+ -+ while (1) { -+ rd_idx = readl_relaxed(its->base + GITS_CREADR); -+ if (rd_idx >= to_idx || rd_idx < from_idx) -+ break; -+ -+ count--; -+ if (!count) { -+ pr_err_ratelimited("ITS queue timeout\n"); -+ return; -+ } -+ cpu_relax(); -+ udelay(1); -+ } -+} -+ -+static void its_send_single_command(struct its_node *its, -+ its_cmd_builder_t builder, -+ struct its_cmd_desc *desc) -+{ -+ struct its_cmd_block *cmd, *sync_cmd, *next_cmd; -+ struct its_collection *sync_col; -+ unsigned long flags; -+ -+ raw_spin_lock_irqsave(&its->lock, flags); -+ -+ cmd = its_allocate_entry(its); -+ if (!cmd) { /* We're soooooo screewed... */ -+ pr_err_ratelimited("ITS can't allocate, dropping command\n"); -+ raw_spin_unlock_irqrestore(&its->lock, flags); -+ return; -+ } -+ sync_col = builder(cmd, desc); -+ its_flush_cmd(its, cmd); -+ -+ if (sync_col) { -+ sync_cmd = its_allocate_entry(its); -+ if (!sync_cmd) { -+ pr_err_ratelimited("ITS can't SYNC, skipping\n"); -+ goto post; -+ } -+ its_encode_cmd(sync_cmd, GITS_CMD_SYNC); -+ its_encode_target(sync_cmd, sync_col->target_address); -+ its_fixup_cmd(sync_cmd); -+ its_flush_cmd(its, sync_cmd); -+ } -+ -+post: -+ next_cmd = its_post_commands(its); -+ raw_spin_unlock_irqrestore(&its->lock, flags); -+ -+ its_wait_for_range_completion(its, cmd, next_cmd); -+} -+ -+static void its_send_inv(struct its_device *dev, u32 event_id) -+{ -+ struct its_cmd_desc desc; -+ -+ desc.its_inv_cmd.dev = dev; -+ desc.its_inv_cmd.event_id = event_id; -+ -+ its_send_single_command(dev->its, its_build_inv_cmd, &desc); -+} -+ -+static void its_send_mapd(struct its_device *dev, int valid) -+{ -+ struct its_cmd_desc desc; -+ -+ desc.its_mapd_cmd.dev = dev; -+ desc.its_mapd_cmd.valid = !!valid; -+ -+ its_send_single_command(dev->its, its_build_mapd_cmd, &desc); -+} -+ -+static void its_send_mapc(struct its_node *its, struct its_collection *col, -+ int valid) -+{ -+ struct its_cmd_desc desc; -+ -+ desc.its_mapc_cmd.col = col; -+ desc.its_mapc_cmd.valid = !!valid; -+ -+ its_send_single_command(its, its_build_mapc_cmd, &desc); -+} -+ -+static void its_send_mapvi(struct its_device *dev, u32 irq_id, u32 id) -+{ -+ struct its_cmd_desc desc; -+ -+ desc.its_mapvi_cmd.dev = dev; -+ desc.its_mapvi_cmd.phys_id = irq_id; -+ desc.its_mapvi_cmd.event_id = id; -+ -+ its_send_single_command(dev->its, its_build_mapvi_cmd, &desc); -+} -+ -+static void its_send_movi(struct its_device *dev, -+ struct its_collection *col, u32 id) -+{ -+ struct its_cmd_desc desc; -+ -+ desc.its_movi_cmd.dev = dev; -+ desc.its_movi_cmd.col = col; -+ desc.its_movi_cmd.event_id = id; -+ -+ its_send_single_command(dev->its, its_build_movi_cmd, &desc); -+} -+ -+static void its_send_discard(struct its_device *dev, u32 id) -+{ -+ struct its_cmd_desc desc; -+ -+ desc.its_discard_cmd.dev = dev; -+ desc.its_discard_cmd.event_id = id; -+ -+ its_send_single_command(dev->its, its_build_discard_cmd, &desc); -+} -+ -+static void its_send_invall(struct its_node *its, struct its_collection *col) -+{ -+ struct its_cmd_desc desc; -+ -+ desc.its_invall_cmd.col = col; -+ -+ its_send_single_command(its, its_build_invall_cmd, &desc); -+} -+ -+/* -+ * irqchip functions - assumes MSI, mostly. -+ */ -+ -+static inline u32 its_get_event_id(struct irq_data *d) -+{ -+ struct its_device *its_dev = irq_data_get_irq_chip_data(d); -+ return d->hwirq - its_dev->event_map.lpi_base; -+} -+ -+static void lpi_set_config(struct irq_data *d, bool enable) -+{ -+ struct its_device *its_dev = irq_data_get_irq_chip_data(d); -+ irq_hw_number_t hwirq = d->hwirq; -+ u32 id = its_get_event_id(d); -+ u8 *cfg = page_address(gic_rdists->prop_page) + hwirq - 8192; -+ -+ if (enable) -+ *cfg |= LPI_PROP_ENABLED; -+ else -+ *cfg &= ~LPI_PROP_ENABLED; -+ -+ /* -+ * Make the above write visible to the redistributors. -+ * And yes, we're flushing exactly: One. Single. Byte. -+ * Humpf... -+ */ -+ if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING) -+ __flush_dcache_area(cfg, sizeof(*cfg)); -+ else -+ dsb(ishst); -+ its_send_inv(its_dev, id); -+} -+ -+static void its_mask_irq(struct irq_data *d) -+{ -+ lpi_set_config(d, false); -+} -+ -+static void its_unmask_irq(struct irq_data *d) -+{ -+ lpi_set_config(d, true); -+} -+ -+static void its_eoi_irq(struct irq_data *d) -+{ -+ gic_write_eoir(d->hwirq); -+} -+ -+static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, -+ bool force) -+{ -+ unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); -+ struct its_device *its_dev = irq_data_get_irq_chip_data(d); -+ struct its_collection *target_col; -+ u32 id = its_get_event_id(d); -+ -+ if (cpu >= nr_cpu_ids) -+ return -EINVAL; -+ -+ target_col = &its_dev->its->collections[cpu]; -+ its_send_movi(its_dev, target_col, id); -+ its_dev->event_map.col_map[id] = cpu; -+ -+ return IRQ_SET_MASK_OK_DONE; -+} -+ -+static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) -+{ -+ struct its_device *its_dev = irq_data_get_irq_chip_data(d); -+ struct its_node *its; -+ u64 addr; -+ -+ its = its_dev->its; -+ addr = its->phys_base + GITS_TRANSLATER; -+ -+ msg->address_lo = addr & ((1UL << 32) - 1); -+ msg->address_hi = addr >> 32; -+ msg->data = its_get_event_id(d); -+} -+ -+static struct irq_chip its_irq_chip = { -+ .name = "ITS", -+ .irq_mask = its_mask_irq, -+ .irq_unmask = its_unmask_irq, -+ .irq_eoi = its_eoi_irq, -+ .irq_set_affinity = its_set_affinity, -+ .irq_compose_msi_msg = its_irq_compose_msi_msg, -+}; -+ -+static void its_mask_msi_irq(struct irq_data *d) -+{ -+ pci_msi_mask_irq(d); -+ irq_chip_mask_parent(d); -+} -+ -+static void its_unmask_msi_irq(struct irq_data *d) -+{ -+ pci_msi_unmask_irq(d); -+ irq_chip_unmask_parent(d); -+} -+ -+static struct irq_chip its_msi_irq_chip = { -+ .name = "ITS-MSI", -+ .irq_unmask = its_unmask_msi_irq, -+ .irq_mask = its_mask_msi_irq, -+ .irq_eoi = irq_chip_eoi_parent, -+ .irq_write_msi_msg = pci_msi_domain_write_msg, -+}; -+ -+/* -+ * How we allocate LPIs: -+ * -+ * The GIC has id_bits bits for interrupt identifiers. From there, we -+ * must subtract 8192 which are reserved for SGIs/PPIs/SPIs. Then, as -+ * we allocate LPIs by chunks of 32, we can shift the whole thing by 5 -+ * bits to the right. -+ * -+ * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations. -+ */ -+#define IRQS_PER_CHUNK_SHIFT 5 -+#define IRQS_PER_CHUNK (1 << IRQS_PER_CHUNK_SHIFT) -+ -+static unsigned long *lpi_bitmap; -+static u32 lpi_chunks; -+static DEFINE_SPINLOCK(lpi_lock); -+ -+static int its_lpi_to_chunk(int lpi) -+{ -+ return (lpi - 8192) >> IRQS_PER_CHUNK_SHIFT; -+} -+ -+static int its_chunk_to_lpi(int chunk) -+{ -+ return (chunk << IRQS_PER_CHUNK_SHIFT) + 8192; -+} -+ -+static int its_lpi_init(u32 id_bits) -+{ -+ lpi_chunks = its_lpi_to_chunk(1UL << id_bits); -+ -+ lpi_bitmap = kzalloc(BITS_TO_LONGS(lpi_chunks) * sizeof(long), -+ GFP_KERNEL); -+ if (!lpi_bitmap) { -+ lpi_chunks = 0; -+ return -ENOMEM; -+ } -+ -+ pr_info("ITS: Allocated %d chunks for LPIs\n", (int)lpi_chunks); -+ return 0; -+} -+ -+static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids) -+{ -+ unsigned long *bitmap = NULL; -+ int chunk_id; -+ int nr_chunks; -+ int i; -+ -+ nr_chunks = DIV_ROUND_UP(nr_irqs, IRQS_PER_CHUNK); -+ -+ spin_lock(&lpi_lock); -+ -+ do { -+ chunk_id = bitmap_find_next_zero_area(lpi_bitmap, lpi_chunks, -+ 0, nr_chunks, 0); -+ if (chunk_id < lpi_chunks) -+ break; -+ -+ nr_chunks--; -+ } while (nr_chunks > 0); -+ -+ if (!nr_chunks) -+ goto out; -+ -+ bitmap = kzalloc(BITS_TO_LONGS(nr_chunks * IRQS_PER_CHUNK) * sizeof (long), -+ GFP_ATOMIC); -+ if (!bitmap) -+ goto out; -+ -+ for (i = 0; i < nr_chunks; i++) -+ set_bit(chunk_id + i, lpi_bitmap); -+ -+ *base = its_chunk_to_lpi(chunk_id); -+ *nr_ids = nr_chunks * IRQS_PER_CHUNK; -+ -+out: -+ spin_unlock(&lpi_lock); -+ -+ if (!bitmap) -+ *base = *nr_ids = 0; -+ -+ return bitmap; -+} -+ -+static void its_lpi_free(struct event_lpi_map *map) -+{ -+ int base = map->lpi_base; -+ int nr_ids = map->nr_lpis; -+ int lpi; -+ -+ spin_lock(&lpi_lock); -+ -+ for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) { -+ int chunk = its_lpi_to_chunk(lpi); -+ BUG_ON(chunk > lpi_chunks); -+ if (test_bit(chunk, lpi_bitmap)) { -+ clear_bit(chunk, lpi_bitmap); -+ } else { -+ pr_err("Bad LPI chunk %d\n", chunk); -+ } -+ } -+ -+ spin_unlock(&lpi_lock); -+ -+ kfree(map->lpi_map); -+ kfree(map->col_map); -+} -+ -+/* -+ * We allocate 64kB for PROPBASE. That gives us at most 64K LPIs to -+ * deal with (one configuration byte per interrupt). PENDBASE has to -+ * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI). -+ */ -+#define LPI_PROPBASE_SZ SZ_64K -+#define LPI_PENDBASE_SZ (LPI_PROPBASE_SZ / 8 + SZ_1K) -+ -+/* -+ * This is how many bits of ID we need, including the useless ones. -+ */ -+#define LPI_NRBITS ilog2(LPI_PROPBASE_SZ + SZ_8K) -+ -+#define LPI_PROP_DEFAULT_PRIO 0xa0 -+ -+static int __init its_alloc_lpi_tables(void) -+{ -+ phys_addr_t paddr; -+ -+ gic_rdists->prop_page = alloc_pages(GFP_NOWAIT, -+ get_order(LPI_PROPBASE_SZ)); -+ if (!gic_rdists->prop_page) { -+ pr_err("Failed to allocate PROPBASE\n"); -+ return -ENOMEM; -+ } -+ -+ paddr = page_to_phys(gic_rdists->prop_page); -+ pr_info("GIC: using LPI property table @%pa\n", &paddr); -+ -+ /* Priority 0xa0, Group-1, disabled */ -+ memset(page_address(gic_rdists->prop_page), -+ LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, -+ LPI_PROPBASE_SZ); -+ -+ /* Make sure the GIC will observe the written configuration */ -+ __flush_dcache_area(page_address(gic_rdists->prop_page), LPI_PROPBASE_SZ); -+ -+ return 0; -+} -+ -+static const char *its_base_type_string[] = { -+ [GITS_BASER_TYPE_DEVICE] = "Devices", -+ [GITS_BASER_TYPE_VCPU] = "Virtual CPUs", -+ [GITS_BASER_TYPE_CPU] = "Physical CPUs", -+ [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections", -+ [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)", -+ [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)", -+ [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)", -+}; -+ -+static void its_free_tables(struct its_node *its) -+{ -+ int i; -+ -+ for (i = 0; i < GITS_BASER_NR_REGS; i++) { -+ if (its->tables[i]) { -+ free_page((unsigned long)its->tables[i]); -+ its->tables[i] = NULL; -+ } -+ } -+} -+ -+static int its_alloc_tables(struct its_node *its) -+{ -+ int err; -+ int i; -+ int psz = SZ_64K; -+ u64 shr = GITS_BASER_InnerShareable; -+ u64 cache = GITS_BASER_WaWb; -+ -+ for (i = 0; i < GITS_BASER_NR_REGS; i++) { -+ u64 val = readq_relaxed(its->base + GITS_BASER + i * 8); -+ u64 type = GITS_BASER_TYPE(val); -+ u64 entry_size = GITS_BASER_ENTRY_SIZE(val); -+ int order = get_order(psz); -+ int alloc_size; -+ u64 tmp; -+ void *base; -+ -+ if (type == GITS_BASER_TYPE_NONE) -+ continue; -+ -+ /* -+ * Allocate as many entries as required to fit the -+ * range of device IDs that the ITS can grok... The ID -+ * space being incredibly sparse, this results in a -+ * massive waste of memory. -+ * -+ * For other tables, only allocate a single page. -+ */ -+ if (type == GITS_BASER_TYPE_DEVICE) { -+ u64 typer = readq_relaxed(its->base + GITS_TYPER); -+ u32 ids = GITS_TYPER_DEVBITS(typer); -+ -+ /* -+ * 'order' was initialized earlier to the default page -+ * granule of the the ITS. We can't have an allocation -+ * smaller than that. If the requested allocation -+ * is smaller, round up to the default page granule. -+ */ -+ order = max(get_order((1UL << ids) * entry_size), -+ order); -+ if (order >= MAX_ORDER) { -+ order = MAX_ORDER - 1; -+ pr_warn("%s: Device Table too large, reduce its page order to %u\n", -+ its->msi_chip.of_node->full_name, order); -+ } -+ } -+ -+ alloc_size = (1 << order) * PAGE_SIZE; -+ base = (void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, order); -+ if (!base) { -+ err = -ENOMEM; -+ goto out_free; -+ } -+ -+ its->tables[i] = base; -+ -+retry_baser: -+ val = (virt_to_phys(base) | -+ (type << GITS_BASER_TYPE_SHIFT) | -+ ((entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) | -+ cache | -+ shr | -+ GITS_BASER_VALID); -+ -+ switch (psz) { -+ case SZ_4K: -+ val |= GITS_BASER_PAGE_SIZE_4K; -+ break; -+ case SZ_16K: -+ val |= GITS_BASER_PAGE_SIZE_16K; -+ break; -+ case SZ_64K: -+ val |= GITS_BASER_PAGE_SIZE_64K; -+ break; -+ } -+ -+ val |= (alloc_size / psz) - 1; -+ -+ writeq_relaxed(val, its->base + GITS_BASER + i * 8); -+ tmp = readq_relaxed(its->base + GITS_BASER + i * 8); -+ -+ if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) { -+ /* -+ * Shareability didn't stick. Just use -+ * whatever the read reported, which is likely -+ * to be the only thing this redistributor -+ * supports. If that's zero, make it -+ * non-cacheable as well. -+ */ -+ shr = tmp & GITS_BASER_SHAREABILITY_MASK; -+ if (!shr) { -+ cache = GITS_BASER_nC; -+ __flush_dcache_area(base, alloc_size); -+ } -+ goto retry_baser; -+ } -+ -+ if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) { -+ /* -+ * Page size didn't stick. Let's try a smaller -+ * size and retry. If we reach 4K, then -+ * something is horribly wrong... -+ */ -+ switch (psz) { -+ case SZ_16K: -+ psz = SZ_4K; -+ goto retry_baser; -+ case SZ_64K: -+ psz = SZ_16K; -+ goto retry_baser; -+ } -+ } -+ -+ if (val != tmp) { -+ pr_err("ITS: %s: GITS_BASER%d doesn't stick: %lx %lx\n", -+ its->msi_chip.of_node->full_name, i, -+ (unsigned long) val, (unsigned long) tmp); -+ err = -ENXIO; -+ goto out_free; -+ } -+ -+ pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n", -+ (int)(alloc_size / entry_size), -+ its_base_type_string[type], -+ (unsigned long)virt_to_phys(base), -+ psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); -+ } -+ -+ return 0; -+ -+out_free: -+ its_free_tables(its); -+ -+ return err; -+} -+ -+static int its_alloc_collections(struct its_node *its) -+{ -+ its->collections = kzalloc(nr_cpu_ids * sizeof(*its->collections), -+ GFP_KERNEL); -+ if (!its->collections) -+ return -ENOMEM; -+ -+ return 0; -+} -+ -+static void its_cpu_init_lpis(void) -+{ -+ void __iomem *rbase = gic_data_rdist_rd_base(); -+ struct page *pend_page; -+ u64 val, tmp; -+ -+ /* If we didn't allocate the pending table yet, do it now */ -+ pend_page = gic_data_rdist()->pend_page; -+ if (!pend_page) { -+ phys_addr_t paddr; -+ /* -+ * The pending pages have to be at least 64kB aligned, -+ * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below. -+ */ -+ pend_page = alloc_pages(GFP_NOWAIT | __GFP_ZERO, -+ get_order(max(LPI_PENDBASE_SZ, SZ_64K))); -+ if (!pend_page) { -+ pr_err("Failed to allocate PENDBASE for CPU%d\n", -+ smp_processor_id()); -+ return; -+ } -+ -+ /* Make sure the GIC will observe the zero-ed page */ -+ __flush_dcache_area(page_address(pend_page), LPI_PENDBASE_SZ); -+ -+ paddr = page_to_phys(pend_page); -+ pr_info("CPU%d: using LPI pending table @%pa\n", -+ smp_processor_id(), &paddr); -+ gic_data_rdist()->pend_page = pend_page; -+ } -+ -+ /* Disable LPIs */ -+ val = readl_relaxed(rbase + GICR_CTLR); -+ val &= ~GICR_CTLR_ENABLE_LPIS; -+ writel_relaxed(val, rbase + GICR_CTLR); -+ -+ /* -+ * Make sure any change to the table is observable by the GIC. -+ */ -+ dsb(sy); -+ -+ /* set PROPBASE */ -+ val = (page_to_phys(gic_rdists->prop_page) | -+ GICR_PROPBASER_InnerShareable | -+ GICR_PROPBASER_WaWb | -+ ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK)); -+ -+ writeq_relaxed(val, rbase + GICR_PROPBASER); -+ tmp = readq_relaxed(rbase + GICR_PROPBASER); -+ -+ if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) { -+ if (!(tmp & GICR_PROPBASER_SHAREABILITY_MASK)) { -+ /* -+ * The HW reports non-shareable, we must -+ * remove the cacheability attributes as -+ * well. -+ */ -+ val &= ~(GICR_PROPBASER_SHAREABILITY_MASK | -+ GICR_PROPBASER_CACHEABILITY_MASK); -+ val |= GICR_PROPBASER_nC; -+ writeq_relaxed(val, rbase + GICR_PROPBASER); -+ } -+ pr_info_once("GIC: using cache flushing for LPI property table\n"); -+ gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING; -+ } -+ -+ /* set PENDBASE */ -+ val = (page_to_phys(pend_page) | -+ GICR_PENDBASER_InnerShareable | -+ GICR_PENDBASER_WaWb); -+ -+ writeq_relaxed(val, rbase + GICR_PENDBASER); -+ tmp = readq_relaxed(rbase + GICR_PENDBASER); -+ -+ if (!(tmp & GICR_PENDBASER_SHAREABILITY_MASK)) { -+ /* -+ * The HW reports non-shareable, we must remove the -+ * cacheability attributes as well. -+ */ -+ val &= ~(GICR_PENDBASER_SHAREABILITY_MASK | -+ GICR_PENDBASER_CACHEABILITY_MASK); -+ val |= GICR_PENDBASER_nC; -+ writeq_relaxed(val, rbase + GICR_PENDBASER); -+ } -+ -+ /* Enable LPIs */ -+ val = readl_relaxed(rbase + GICR_CTLR); -+ val |= GICR_CTLR_ENABLE_LPIS; -+ writel_relaxed(val, rbase + GICR_CTLR); -+ -+ /* Make sure the GIC has seen the above */ -+ dsb(sy); -+} -+ -+static void its_cpu_init_collection(void) -+{ -+ struct its_node *its; -+ int cpu; -+ -+ spin_lock(&its_lock); -+ cpu = smp_processor_id(); -+ -+ list_for_each_entry(its, &its_nodes, entry) { -+ u64 target; -+ -+ /* -+ * We now have to bind each collection to its target -+ * redistributor. -+ */ -+ if (readq_relaxed(its->base + GITS_TYPER) & GITS_TYPER_PTA) { -+ /* -+ * This ITS wants the physical address of the -+ * redistributor. -+ */ -+ target = gic_data_rdist()->phys_base; -+ } else { -+ /* -+ * This ITS wants a linear CPU number. -+ */ -+ target = readq_relaxed(gic_data_rdist_rd_base() + GICR_TYPER); -+ target = GICR_TYPER_CPU_NUMBER(target) << 16; -+ } -+ -+ /* Perform collection mapping */ -+ its->collections[cpu].target_address = target; -+ its->collections[cpu].col_id = cpu; -+ -+ its_send_mapc(its, &its->collections[cpu], 1); -+ its_send_invall(its, &its->collections[cpu]); -+ } -+ -+ spin_unlock(&its_lock); -+} -+ -+static struct its_device *its_find_device(struct its_node *its, u32 dev_id) -+{ -+ struct its_device *its_dev = NULL, *tmp; -+ unsigned long flags; -+ -+ raw_spin_lock_irqsave(&its->lock, flags); -+ -+ list_for_each_entry(tmp, &its->its_device_list, entry) { -+ if (tmp->device_id == dev_id) { -+ its_dev = tmp; -+ break; -+ } -+ } -+ -+ raw_spin_unlock_irqrestore(&its->lock, flags); -+ -+ return its_dev; -+} -+ -+static struct its_device *its_create_device(struct its_node *its, u32 dev_id, -+ int nvecs) -+{ -+ struct its_device *dev; -+ unsigned long *lpi_map; -+ unsigned long flags; -+ u16 *col_map = NULL; -+ void *itt; -+ int lpi_base; -+ int nr_lpis; -+ int nr_ites; -+ int sz; -+ -+ dev = kzalloc(sizeof(*dev), GFP_KERNEL); -+ /* -+ * At least one bit of EventID is being used, hence a minimum -+ * of two entries. No, the architecture doesn't let you -+ * express an ITT with a single entry. -+ */ -+ nr_ites = max(2UL, roundup_pow_of_two(nvecs)); -+ sz = nr_ites * its->ite_size; -+ sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; -+ itt = kzalloc(sz, GFP_KERNEL); -+ lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis); -+ if (lpi_map) -+ col_map = kzalloc(sizeof(*col_map) * nr_lpis, GFP_KERNEL); -+ -+ if (!dev || !itt || !lpi_map || !col_map) { -+ kfree(dev); -+ kfree(itt); -+ kfree(lpi_map); -+ kfree(col_map); -+ return NULL; -+ } -+ -+ __flush_dcache_area(itt, sz); -+ -+ dev->its = its; -+ dev->itt = itt; -+ dev->nr_ites = nr_ites; -+ dev->event_map.lpi_map = lpi_map; -+ dev->event_map.col_map = col_map; -+ dev->event_map.lpi_base = lpi_base; -+ dev->event_map.nr_lpis = nr_lpis; -+ dev->device_id = dev_id; -+ INIT_LIST_HEAD(&dev->entry); -+ -+ raw_spin_lock_irqsave(&its->lock, flags); -+ list_add(&dev->entry, &its->its_device_list); -+ raw_spin_unlock_irqrestore(&its->lock, flags); -+ -+ /* Map device to its ITT */ -+ its_send_mapd(dev, 1); -+ -+ return dev; -+} -+ -+static void its_free_device(struct its_device *its_dev) -+{ -+ unsigned long flags; -+ -+ raw_spin_lock_irqsave(&its_dev->its->lock, flags); -+ list_del(&its_dev->entry); -+ raw_spin_unlock_irqrestore(&its_dev->its->lock, flags); -+ kfree(its_dev->itt); -+ kfree(its_dev); -+} -+ -+static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq) -+{ -+ int idx; -+ -+ idx = find_first_zero_bit(dev->event_map.lpi_map, -+ dev->event_map.nr_lpis); -+ if (idx == dev->event_map.nr_lpis) -+ return -ENOSPC; -+ -+ *hwirq = dev->event_map.lpi_base + idx; -+ set_bit(idx, dev->event_map.lpi_map); -+ -+ return 0; -+} -+ -+struct its_pci_alias { -+ struct pci_dev *pdev; -+ u32 dev_id; -+ u32 count; -+}; -+ -+static int its_pci_msi_vec_count(struct pci_dev *pdev) -+{ -+ int msi, msix; -+ -+ msi = max(pci_msi_vec_count(pdev), 0); -+ msix = max(pci_msix_vec_count(pdev), 0); -+ -+ return max(msi, msix); -+} -+ -+static int its_get_pci_alias(struct pci_dev *pdev, u16 alias, void *data) -+{ -+ struct its_pci_alias *dev_alias = data; -+ -+ dev_alias->dev_id = alias; -+ if (pdev != dev_alias->pdev) -+ dev_alias->count += its_pci_msi_vec_count(dev_alias->pdev); -+ -+ return 0; -+} -+ -+int __its_msi_prepare(struct irq_domain *domain, u32 dev_id, -+ struct device *dev, int nvec, msi_alloc_info_t *info) -+{ -+ struct its_node *its; -+ struct its_device *its_dev; -+ -+ its = domain->parent->host_data; -+ -+ its_dev = its_find_device(its, dev_id); -+ if (its_dev) { -+ /* -+ * We already have seen this ID, probably through -+ * another alias (PCI bridge of some sort). No need to -+ * create the device. -+ */ -+ dev_dbg(dev, "Reusing ITT for devID %x\n", dev_id); -+ goto out; -+ } -+ -+ its_dev = its_create_device(its, dev_id, nvec); -+ if (!its_dev) -+ return -ENOMEM; -+ -+ dev_dbg(dev, "ITT %d entries, %d bits\n", -+ nvec, ilog2(nvec)); -+out: -+ info->scratchpad[0].ptr = its_dev; -+ info->scratchpad[1].ptr = dev; -+ -+ return 0; -+} -+ -+static int its_msi_prepare(struct irq_domain *domain, struct device *dev, -+ int nvec, msi_alloc_info_t *info) -+{ -+ struct pci_dev *pdev; -+ struct its_pci_alias dev_alias; -+ u32 dev_id; -+ -+ if (!dev_is_pci(dev)) -+ return -EINVAL; -+ -+ pdev = to_pci_dev(dev); -+ dev_alias.pdev = pdev; -+ dev_alias.count = nvec; -+ -+ pci_for_each_dma_alias(pdev, its_get_pci_alias, &dev_alias); -+ -+ dev_dbg(dev, "ITT %d entries, %d bits\n", nvec, ilog2(nvec)); -+ dev_id = PCI_DEVID(pdev->bus->number, pdev->devfn); -+ return __its_msi_prepare(domain->parent, dev_alias.dev_id, dev, dev_alias.count, info); -+} -+ -+static struct msi_domain_ops its_pci_msi_ops = { -+ .msi_prepare = its_msi_prepare, -+}; -+ -+static struct msi_domain_info its_pci_msi_domain_info = { -+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | -+ MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX), -+ .ops = &its_pci_msi_ops, -+ .chip = &its_msi_irq_chip, -+}; -+ -+static int its_irq_gic_domain_alloc(struct irq_domain *domain, -+ unsigned int virq, -+ irq_hw_number_t hwirq) -+{ -+ struct of_phandle_args args; -+ -+ args.np = domain->parent->of_node; -+ args.args_count = 3; -+ args.args[0] = GIC_IRQ_TYPE_LPI; -+ args.args[1] = hwirq; -+ args.args[2] = IRQ_TYPE_EDGE_RISING; -+ -+ return irq_domain_alloc_irqs_parent(domain, virq, 1, &args); -+} -+ -+static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, -+ unsigned int nr_irqs, void *args) -+{ -+ msi_alloc_info_t *info = args; -+ struct its_device *its_dev = info->scratchpad[0].ptr; -+ irq_hw_number_t hwirq; -+ int err; -+ int i; -+ -+ for (i = 0; i < nr_irqs; i++) { -+ err = its_alloc_device_irq(its_dev, &hwirq); -+ if (err) -+ return err; -+ -+ err = its_irq_gic_domain_alloc(domain, virq + i, hwirq); -+ if (err) -+ return err; -+ -+ irq_domain_set_hwirq_and_chip(domain, virq + i, -+ hwirq, &its_irq_chip, its_dev); -+ dev_dbg(info->scratchpad[1].ptr, "ID:%d pID:%d vID:%d\n", -+ (int)(hwirq - its_dev->event_map.lpi_base), -+ (int)hwirq, virq + i); -+ } -+ -+ return 0; -+} -+ -+static void its_irq_domain_activate(struct irq_domain *domain, -+ struct irq_data *d) -+{ -+ struct its_device *its_dev = irq_data_get_irq_chip_data(d); -+ u32 event = its_get_event_id(d); -+ -+ /* Bind the LPI to the first possible CPU */ -+ its_dev->event_map.col_map[event] = cpumask_first(cpu_online_mask); -+ -+ /* Map the GIC IRQ and event to the device */ -+ its_send_mapvi(its_dev, d->hwirq, event); -+} -+ -+static void its_irq_domain_deactivate(struct irq_domain *domain, -+ struct irq_data *d) -+{ -+ struct its_device *its_dev = irq_data_get_irq_chip_data(d); -+ u32 event = its_get_event_id(d); -+ -+ /* Stop the delivery of interrupts */ -+ its_send_discard(its_dev, event); -+} -+ -+static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, -+ unsigned int nr_irqs) -+{ -+ struct irq_data *d = irq_domain_get_irq_data(domain, virq); -+ struct its_device *its_dev = irq_data_get_irq_chip_data(d); -+ int i; -+ -+ for (i = 0; i < nr_irqs; i++) { -+ struct irq_data *data = irq_domain_get_irq_data(domain, -+ virq + i); -+ u32 event = its_get_event_id(data); -+ -+ /* Mark interrupt index as unused */ -+ clear_bit(event, its_dev->event_map.lpi_map); -+ -+ /* Nuke the entry in the domain */ -+ irq_domain_reset_irq_data(data); -+ } -+ -+ /* If all interrupts have been freed, start mopping the floor */ -+ if (bitmap_empty(its_dev->event_map.lpi_map, -+ its_dev->event_map.nr_lpis)) { -+ its_lpi_free(&its_dev->event_map); -+ -+ /* Unmap device/itt */ -+ its_send_mapd(its_dev, 0); -+ its_free_device(its_dev); -+ } -+ -+ irq_domain_free_irqs_parent(domain, virq, nr_irqs); -+} -+ -+static const struct irq_domain_ops its_domain_ops = { -+ .alloc = its_irq_domain_alloc, -+ .free = its_irq_domain_free, -+ .activate = its_irq_domain_activate, -+ .deactivate = its_irq_domain_deactivate, -+}; -+ -+static int its_force_quiescent(void __iomem *base) -+{ -+ u32 count = 1000000; /* 1s */ -+ u32 val; -+ -+ val = readl_relaxed(base + GITS_CTLR); -+ if (val & GITS_CTLR_QUIESCENT) -+ return 0; -+ -+ /* Disable the generation of all interrupts to this ITS */ -+ val &= ~GITS_CTLR_ENABLE; -+ writel_relaxed(val, base + GITS_CTLR); -+ -+ /* Poll GITS_CTLR and wait until ITS becomes quiescent */ -+ while (1) { -+ val = readl_relaxed(base + GITS_CTLR); -+ if (val & GITS_CTLR_QUIESCENT) -+ return 0; -+ -+ count--; -+ if (!count) -+ return -EBUSY; -+ -+ cpu_relax(); -+ udelay(1); -+ } -+} -+ -+static int its_probe(struct device_node *node, struct irq_domain *parent) -+{ -+ struct resource res; -+ struct its_node *its; -+ void __iomem *its_base; -+ u32 val; -+ u64 baser, tmp; -+ int err; -+ -+ err = of_address_to_resource(node, 0, &res); -+ if (err) { -+ pr_warn("%s: no regs?\n", node->full_name); -+ return -ENXIO; -+ } -+ -+ its_base = ioremap(res.start, resource_size(&res)); -+ if (!its_base) { -+ pr_warn("%s: unable to map registers\n", node->full_name); -+ return -ENOMEM; -+ } -+ -+ val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK; -+ if (val != 0x30 && val != 0x40) { -+ pr_warn("%s: no ITS detected, giving up\n", node->full_name); -+ err = -ENODEV; -+ goto out_unmap; -+ } -+ -+ err = its_force_quiescent(its_base); -+ if (err) { -+ pr_warn("%s: failed to quiesce, giving up\n", -+ node->full_name); -+ goto out_unmap; -+ } -+ -+ pr_info("ITS: %s\n", node->full_name); -+ -+ its = kzalloc(sizeof(*its), GFP_KERNEL); -+ if (!its) { -+ err = -ENOMEM; -+ goto out_unmap; -+ } -+ -+ raw_spin_lock_init(&its->lock); -+ INIT_LIST_HEAD(&its->entry); -+ INIT_LIST_HEAD(&its->its_device_list); -+ its->base = its_base; -+ its->phys_base = res.start; -+ its->msi_chip.of_node = node; -+ its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1; -+ -+ its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL); -+ if (!its->cmd_base) { -+ err = -ENOMEM; -+ goto out_free_its; -+ } -+ its->cmd_write = its->cmd_base; -+ -+ err = its_alloc_tables(its); -+ if (err) -+ goto out_free_cmd; -+ -+ err = its_alloc_collections(its); -+ if (err) -+ goto out_free_tables; -+ -+ baser = (virt_to_phys(its->cmd_base) | -+ GITS_CBASER_WaWb | -+ GITS_CBASER_InnerShareable | -+ (ITS_CMD_QUEUE_SZ / SZ_4K - 1) | -+ GITS_CBASER_VALID); -+ -+ writeq_relaxed(baser, its->base + GITS_CBASER); -+ tmp = readq_relaxed(its->base + GITS_CBASER); -+ -+ if ((tmp ^ baser) & GITS_CBASER_SHAREABILITY_MASK) { -+ if (!(tmp & GITS_CBASER_SHAREABILITY_MASK)) { -+ /* -+ * The HW reports non-shareable, we must -+ * remove the cacheability attributes as -+ * well. -+ */ -+ baser &= ~(GITS_CBASER_SHAREABILITY_MASK | -+ GITS_CBASER_CACHEABILITY_MASK); -+ baser |= GITS_CBASER_nC; -+ writeq_relaxed(baser, its->base + GITS_CBASER); -+ } -+ pr_info("ITS: using cache flushing for cmd queue\n"); -+ its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING; -+ } -+ -+ writeq_relaxed(0, its->base + GITS_CWRITER); -+ writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR); -+ -+ if (of_property_read_bool(its->msi_chip.of_node, "msi-controller")) { -+ its->domain = irq_domain_add_tree(NULL, &its_domain_ops, its); -+ if (!its->domain) { -+ err = -ENOMEM; -+ goto out_free_tables; -+ } -+ -+ its->domain->parent = parent; -+ -+ its->msi_chip.domain = pci_msi_create_irq_domain(node, -+ &its_pci_msi_domain_info, -+ its->domain); -+ if (!its->msi_chip.domain) { -+ err = -ENOMEM; -+ goto out_free_domains; -+ } -+ -+ err = of_pci_msi_chip_add(&its->msi_chip); -+ if (err) -+ goto out_free_domains; -+ } -+ -+ spin_lock(&its_lock); -+ list_add(&its->entry, &its_nodes); -+ spin_unlock(&its_lock); -+ -+ return 0; -+ -+out_free_domains: -+ if (its->msi_chip.domain) -+ irq_domain_remove(its->msi_chip.domain); -+ if (its->domain) -+ irq_domain_remove(its->domain); -+out_free_tables: -+ its_free_tables(its); -+out_free_cmd: -+ kfree(its->cmd_base); -+out_free_its: -+ kfree(its); -+out_unmap: -+ iounmap(its_base); -+ pr_err("ITS: failed probing %s (%d)\n", node->full_name, err); -+ return err; -+} -+ -+static bool gic_rdists_supports_plpis(void) -+{ -+ return !!(readl_relaxed(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS); -+} -+ -+int its_cpu_init(void) -+{ -+ if (!list_empty(&its_nodes)) { -+ if (!gic_rdists_supports_plpis()) { -+ pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); -+ return -ENXIO; -+ } -+ its_cpu_init_lpis(); -+ its_cpu_init_collection(); -+ } -+ -+ return 0; -+} -+ -+static struct of_device_id its_device_id[] = { -+ { .compatible = "arm,gic-v3-its", }, -+ {}, -+}; -+ -+int its_init(struct device_node *node, struct rdists *rdists, -+ struct irq_domain *parent_domain) -+{ -+ struct device_node *np; -+ -+ for (np = of_find_matching_node(node, its_device_id); np; -+ np = of_find_matching_node(np, its_device_id)) { -+ its_probe(np, parent_domain); -+ } -+ -+ if (list_empty(&its_nodes)) { -+ pr_warn("ITS: No ITS available, not enabling LPIs\n"); -+ return -ENXIO; -+ } -+ -+ gic_rdists = rdists; -+ gic_root_node = node; -+ -+ its_alloc_lpi_tables(); -+ its_lpi_init(rdists->id_bits); -+ -+ return 0; -+} -diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c -index aa17ae8..34feda3 100644 ---- a/drivers/irqchip/irq-gic-v3.c -+++ b/drivers/irqchip/irq-gic-v3.c -@@ -34,20 +34,25 @@ - #include "irq-gic-common.h" - #include "irqchip.h" - -+struct redist_region { -+ void __iomem *redist_base; -+ phys_addr_t phys_base; -+}; -+ - struct gic_chip_data { - void __iomem *dist_base; -- void __iomem **redist_base; -- void __iomem * __percpu *rdist; -+ struct redist_region *redist_regions; -+ struct rdists rdists; - struct irq_domain *domain; - u64 redist_stride; -- u32 redist_regions; -+ u32 nr_redist_regions; - unsigned int irq_nr; - }; - - static struct gic_chip_data gic_data __read_mostly; - --#define gic_data_rdist() (this_cpu_ptr(gic_data.rdist)) --#define gic_data_rdist_rd_base() (*gic_data_rdist()) -+#define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist)) -+#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) - #define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K) - - /* Our default, arbitrary priority value. Linux only uses one anyway. */ -@@ -71,9 +76,6 @@ static inline void __iomem *gic_dist_base(struct irq_data *d) - if (d->hwirq <= 1023) /* SPI -> dist_base */ - return gic_data.dist_base; - -- if (d->hwirq >= 8192) -- BUG(); /* LPI Detected!!! */ -- - return NULL; - } - -@@ -271,11 +273,11 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs - do { - irqnr = gic_read_iar(); - -- if (likely(irqnr > 15 && irqnr < 1020)) { -+ if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) { - int err; - err = handle_domain_irq(gic_data.domain, irqnr, regs); - if (err) { -- WARN_ONCE(true, "Unexpected SPI received!\n"); -+ WARN_ONCE(true, "Unexpected interrupt received!\n"); - gic_write_eoir(irqnr); - } - continue; -@@ -333,8 +335,8 @@ static int gic_populate_rdist(void) - MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | - MPIDR_AFFINITY_LEVEL(mpidr, 0)); - -- for (i = 0; i < gic_data.redist_regions; i++) { -- void __iomem *ptr = gic_data.redist_base[i]; -+ for (i = 0; i < gic_data.nr_redist_regions; i++) { -+ void __iomem *ptr = gic_data.redist_regions[i].redist_base; - u32 reg; - - reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK; -@@ -347,10 +349,13 @@ static int gic_populate_rdist(void) - do { - typer = readq_relaxed(ptr + GICR_TYPER); - if ((typer >> 32) == aff) { -+ u64 offset = ptr - gic_data.redist_regions[i].redist_base; - gic_data_rdist_rd_base() = ptr; -- pr_info("CPU%d: found redistributor %llx @%p\n", -+ gic_data_rdist()->phys_base = gic_data.redist_regions[i].phys_base + offset; -+ pr_info("CPU%d: found redistributor %llx region %d:%pa\n", - smp_processor_id(), -- (unsigned long long)mpidr, ptr); -+ (unsigned long long)mpidr, -+ i, &gic_data_rdist()->phys_base); - return 0; - } - -@@ -385,6 +390,11 @@ static void gic_cpu_sys_reg_init(void) - gic_write_grpen1(1); - } - -+static int gic_dist_supports_lpis(void) -+{ -+ return !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS); -+} -+ - static void gic_cpu_init(void) - { - void __iomem *rbase; -@@ -399,6 +409,10 @@ static void gic_cpu_init(void) - - gic_cpu_config(rbase, gic_redist_wait_for_rwp); - -+ /* Give LPIs a spin */ -+ if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis()) -+ its_cpu_init(); -+ - /* initialise system registers */ - gic_cpu_sys_reg_init(); - } -@@ -585,12 +599,21 @@ static struct irq_chip gic_chip = { - .irq_set_affinity = gic_set_affinity, - }; - -+#define GIC_ID_NR (1U << gic_data.rdists.id_bits) -+ - static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, - irq_hw_number_t hw) - { - /* SGIs are private to the core kernel */ - if (hw < 16) - return -EPERM; -+ /* Nothing here */ -+ if (hw >= gic_data.irq_nr && hw < 8192) -+ return -EPERM; -+ /* Off limits */ -+ if (hw >= GIC_ID_NR) -+ return -EPERM; -+ - /* PPIs */ - if (hw < 32) { - irq_set_percpu_devid(irq); -@@ -604,7 +627,15 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, - handle_fasteoi_irq); - set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); - } -- irq_set_chip_data(irq, d->host_data); -+ /* LPIs */ -+ if (hw >= 8192 && hw < GIC_ID_NR) { -+ if (!gic_dist_supports_lpis()) -+ return -EPERM; -+ irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data, -+ handle_fasteoi_irq, NULL, NULL); -+ set_irq_flags(irq, IRQF_VALID); -+ } -+ - return 0; - } - -@@ -625,6 +656,9 @@ static int gic_irq_domain_xlate(struct irq_domain *d, - case 1: /* PPI */ - *out_hwirq = intspec[1] + 16; - break; -+ case GIC_IRQ_TYPE_LPI: /* LPI */ -+ *out_hwirq = intspec[1]; -+ break; - default: - return -EINVAL; - } -@@ -641,9 +675,10 @@ static const struct irq_domain_ops gic_irq_domain_ops = { - static int __init gic_of_init(struct device_node *node, struct device_node *parent) - { - void __iomem *dist_base; -- void __iomem **redist_base; -+ struct redist_region *rdist_regs; - u64 redist_stride; -- u32 redist_regions; -+ u32 nr_redist_regions; -+ u32 typer; - u32 reg; - int gic_irqs; - int err; -@@ -664,54 +699,63 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare - goto out_unmap_dist; - } - -- if (of_property_read_u32(node, "#redistributor-regions", &redist_regions)) -- redist_regions = 1; -+ if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions)) -+ nr_redist_regions = 1; - -- redist_base = kzalloc(sizeof(*redist_base) * redist_regions, GFP_KERNEL); -- if (!redist_base) { -+ rdist_regs = kzalloc(sizeof(*rdist_regs) * nr_redist_regions, GFP_KERNEL); -+ if (!rdist_regs) { - err = -ENOMEM; - goto out_unmap_dist; - } - -- for (i = 0; i < redist_regions; i++) { -- redist_base[i] = of_iomap(node, 1 + i); -- if (!redist_base[i]) { -+ for (i = 0; i < nr_redist_regions; i++) { -+ struct resource res; -+ int ret; -+ -+ ret = of_address_to_resource(node, 1 + i, &res); -+ rdist_regs[i].redist_base = of_iomap(node, 1 + i); -+ if (ret || !rdist_regs[i].redist_base) { - pr_err("%s: couldn't map region %d\n", - node->full_name, i); - err = -ENODEV; - goto out_unmap_rdist; - } -+ rdist_regs[i].phys_base = res.start; - } - - if (of_property_read_u64(node, "redistributor-stride", &redist_stride)) - redist_stride = 0; - - gic_data.dist_base = dist_base; -- gic_data.redist_base = redist_base; -- gic_data.redist_regions = redist_regions; -+ gic_data.redist_regions = rdist_regs; -+ gic_data.nr_redist_regions = nr_redist_regions; - gic_data.redist_stride = redist_stride; - - /* - * Find out how many interrupts are supported. - * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI) - */ -- gic_irqs = readl_relaxed(gic_data.dist_base + GICD_TYPER) & 0x1f; -- gic_irqs = (gic_irqs + 1) * 32; -+ typer = readl_relaxed(gic_data.dist_base + GICD_TYPER); -+ gic_data.rdists.id_bits = GICD_TYPER_ID_BITS(typer); -+ gic_irqs = GICD_TYPER_IRQS(typer); - if (gic_irqs > 1020) - gic_irqs = 1020; - gic_data.irq_nr = gic_irqs; - - gic_data.domain = irq_domain_add_tree(node, &gic_irq_domain_ops, - &gic_data); -- gic_data.rdist = alloc_percpu(typeof(*gic_data.rdist)); -+ gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist)); - -- if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdist)) { -+ if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) { - err = -ENOMEM; - goto out_free; - } - - set_handle_irq(gic_handle_irq); - -+ if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis()) -+ its_init(node, &gic_data.rdists, gic_data.domain); -+ - gic_smp_init(); - gic_dist_init(); - gic_cpu_init(); -@@ -722,12 +766,12 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare - out_free: - if (gic_data.domain) - irq_domain_remove(gic_data.domain); -- free_percpu(gic_data.rdist); -+ free_percpu(gic_data.rdists.rdist); - out_unmap_rdist: -- for (i = 0; i < redist_regions; i++) -- if (redist_base[i]) -- iounmap(redist_base[i]); -- kfree(redist_base); -+ for (i = 0; i < nr_redist_regions; i++) -+ if (rdist_regs[i].redist_base) -+ iounmap(rdist_regs[i].redist_base); -+ kfree(rdist_regs); - out_unmap_dist: - iounmap(dist_base); - return err; -diff --git a/drivers/irqchip/irq-sunxi-nmi.c b/drivers/irqchip/irq-sunxi-nmi.c -index eb9b59e..6b2b582 100644 ---- a/drivers/irqchip/irq-sunxi-nmi.c -+++ b/drivers/irqchip/irq-sunxi-nmi.c -@@ -50,12 +50,12 @@ static struct sunxi_sc_nmi_reg_offs sun6i_reg_offs = { - static inline void sunxi_sc_nmi_write(struct irq_chip_generic *gc, u32 off, - u32 val) - { -- irq_reg_writel(val, gc->reg_base + off); -+ irq_reg_writel(gc, val, off); - } - - static inline u32 sunxi_sc_nmi_read(struct irq_chip_generic *gc, u32 off) - { -- return irq_reg_readl(gc->reg_base + off); -+ return irq_reg_readl(gc, off); - } - - static void sunxi_sc_nmi_handle_irq(unsigned int irq, struct irq_desc *desc) -diff --git a/drivers/irqchip/irq-tb10x.c b/drivers/irqchip/irq-tb10x.c -index 7c44c99..accc200 100644 ---- a/drivers/irqchip/irq-tb10x.c -+++ b/drivers/irqchip/irq-tb10x.c -@@ -43,12 +43,12 @@ - static inline void ab_irqctl_writereg(struct irq_chip_generic *gc, u32 reg, - u32 val) - { -- irq_reg_writel(val, gc->reg_base + reg); -+ irq_reg_writel(gc, val, reg); - } - - static inline u32 ab_irqctl_readreg(struct irq_chip_generic *gc, u32 reg) - { -- return irq_reg_readl(gc->reg_base + reg); -+ return irq_reg_readl(gc, reg); - } - - static int tb10x_irq_set_type(struct irq_data *data, unsigned int flow_type) -diff --git a/drivers/of/device.c b/drivers/of/device.c -index 46d6c75..20c1332 100644 ---- a/drivers/of/device.c -+++ b/drivers/of/device.c -@@ -2,6 +2,9 @@ - #include - #include - #include -+#include -+#include -+#include - #include - #include - #include -@@ -66,6 +69,87 @@ int of_device_add(struct platform_device *ofdev) - return device_add(&ofdev->dev); - } - -+/** -+ * of_dma_configure - Setup DMA configuration -+ * @dev: Device to apply DMA configuration -+ * @np: Pointer to OF node having DMA configuration -+ * -+ * Try to get devices's DMA configuration from DT and update it -+ * accordingly. -+ * -+ * If platform code needs to use its own special DMA configuration, it -+ * can use a platform bus notifier and handle BUS_NOTIFY_ADD_DEVICE events -+ * to fix up DMA configuration. -+ */ -+void of_dma_configure(struct device *dev, struct device_node *np) -+{ -+ u64 dma_addr, paddr, size; -+ int ret; -+ bool coherent; -+ unsigned long offset; -+ struct iommu_ops *iommu; -+ -+ /* -+ * Set default coherent_dma_mask to 32 bit. Drivers are expected to -+ * setup the correct supported mask. -+ */ -+ if (!dev->coherent_dma_mask) -+ dev->coherent_dma_mask = DMA_BIT_MASK(32); -+ -+ /* -+ * Set it to coherent_dma_mask by default if the architecture -+ * code has not set it. -+ */ -+ if (!dev->dma_mask) -+ dev->dma_mask = &dev->coherent_dma_mask; -+ -+ ret = of_dma_get_range(np, &dma_addr, &paddr, &size); -+ if (ret < 0) { -+ dma_addr = offset = 0; -+ size = dev->coherent_dma_mask + 1; -+ } else { -+ offset = PFN_DOWN(paddr - dma_addr); -+ -+ /* -+ * Add a work around to treat the size as mask + 1 in case -+ * it is defined in DT as a mask. -+ */ -+ if (size & 1) { -+ dev_warn(dev, "Invalid size 0x%llx for dma-range\n", -+ size); -+ size = size + 1; -+ } -+ -+ if (!size) { -+ dev_err(dev, "Adjusted size 0x%llx invalid\n", size); -+ return; -+ } -+ dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", offset); -+ } -+ -+ dev->dma_pfn_offset = offset; -+ -+ /* -+ * Limit coherent and dma mask based on size and default mask -+ * set by the driver. -+ */ -+ dev->coherent_dma_mask = min(dev->coherent_dma_mask, -+ DMA_BIT_MASK(ilog2(dma_addr + size))); -+ *dev->dma_mask = min((*dev->dma_mask), -+ DMA_BIT_MASK(ilog2(dma_addr + size))); -+ -+ coherent = of_dma_is_coherent(np); -+ dev_dbg(dev, "device is%sdma coherent\n", -+ coherent ? " " : " not "); -+ -+ iommu = of_iommu_configure(dev, np); -+ dev_dbg(dev, "device is%sbehind an iommu\n", -+ iommu ? " " : " not "); -+ -+ arch_setup_dma_ops(dev, dma_addr, size, iommu, coherent); -+} -+EXPORT_SYMBOL_GPL(of_dma_configure); -+ - int of_device_register(struct platform_device *pdev) - { - device_initialize(&pdev->dev); -diff --git a/drivers/of/irq.c b/drivers/of/irq.c -index b97363a..4419e62 100644 ---- a/drivers/of/irq.c -+++ b/drivers/of/irq.c -@@ -18,6 +18,7 @@ - * driver. - */ - -+#include - #include - #include - #include -@@ -576,3 +577,23 @@ err: - kfree(desc); - } - } -+ -+/** -+ * of_msi_configure - Set the msi_domain field of a device -+ * @dev: device structure to associate with an MSI irq domain -+ * @np: device node for that device -+ */ -+void of_msi_configure(struct device *dev, struct device_node *np) -+{ -+ struct device_node *msi_np; -+ struct irq_domain *d; -+ -+ msi_np = of_parse_phandle(np, "msi-parent", 0); -+ if (!msi_np) -+ return; -+ -+ d = irq_find_matching_host(msi_np, DOMAIN_BUS_PLATFORM_MSI); -+ if (!d) -+ d = irq_find_host(msi_np); -+ dev_set_msi_domain(dev, d); -+} -diff --git a/drivers/of/of_pci.c b/drivers/of/of_pci.c -index ecc5fa5..5751dc5 100644 ---- a/drivers/of/of_pci.c -+++ b/drivers/of/of_pci.c -@@ -2,6 +2,7 @@ - #include - #include - #include -+#include - #include - #include - -@@ -116,6 +117,26 @@ int of_get_pci_domain_nr(struct device_node *node) - } - EXPORT_SYMBOL_GPL(of_get_pci_domain_nr); - -+/** -+ * of_pci_dma_configure - Setup DMA configuration -+ * @dev: ptr to pci_dev struct of the PCI device -+ * -+ * Function to update PCI devices's DMA configuration using the same -+ * info from the OF node of host bridge's parent (if any). -+ */ -+void of_pci_dma_configure(struct pci_dev *pci_dev) -+{ -+ struct device *dev = &pci_dev->dev; -+ struct device *bridge = pci_get_host_bridge_device(pci_dev); -+ -+ if (!bridge->parent) -+ return; -+ -+ of_dma_configure(dev, bridge->parent->of_node); -+ pci_put_host_bridge_device(bridge); -+} -+EXPORT_SYMBOL_GPL(of_pci_dma_configure); -+ - #if defined(CONFIG_OF_ADDRESS) - /** - * of_pci_get_host_bridge_resources - Parse PCI host bridge resources from DT -@@ -140,7 +161,7 @@ int of_pci_get_host_bridge_resources(struct device_node *dev, - unsigned char busno, unsigned char bus_max, - struct list_head *resources, resource_size_t *io_base) - { -- struct pci_host_bridge_window *window; -+ struct resource_entry *window; - struct resource *res; - struct resource *bus_range; - struct of_pci_range range; -@@ -226,10 +247,9 @@ int of_pci_get_host_bridge_resources(struct device_node *dev, - conversion_failed: - kfree(res); - parse_failed: -- list_for_each_entry(window, resources, list) -+ resource_list_for_each_entry(window, resources) - kfree(window->res); - pci_free_resource_list(resources); -- kfree(bus_range); - return err; - } - EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources); -@@ -240,7 +260,7 @@ EXPORT_SYMBOL_GPL(of_pci_get_host_bridge_resources); - static LIST_HEAD(of_pci_msi_chip_list); - static DEFINE_MUTEX(of_pci_msi_chip_mutex); - --int of_pci_msi_chip_add(struct msi_chip *chip) -+int of_pci_msi_chip_add(struct msi_controller *chip) - { - if (!of_property_read_bool(chip->of_node, "msi-controller")) - return -EINVAL; -@@ -253,7 +273,7 @@ int of_pci_msi_chip_add(struct msi_chip *chip) - } - EXPORT_SYMBOL_GPL(of_pci_msi_chip_add); - --void of_pci_msi_chip_remove(struct msi_chip *chip) -+void of_pci_msi_chip_remove(struct msi_controller *chip) - { - mutex_lock(&of_pci_msi_chip_mutex); - list_del(&chip->list); -@@ -261,9 +281,9 @@ void of_pci_msi_chip_remove(struct msi_chip *chip) - } - EXPORT_SYMBOL_GPL(of_pci_msi_chip_remove); - --struct msi_chip *of_pci_find_msi_chip_by_node(struct device_node *of_node) -+struct msi_controller *of_pci_find_msi_chip_by_node(struct device_node *of_node) - { -- struct msi_chip *c; -+ struct msi_controller *c; - - mutex_lock(&of_pci_msi_chip_mutex); - list_for_each_entry(c, &of_pci_msi_chip_list, list) { -diff --git a/drivers/of/platform.c b/drivers/of/platform.c -index 3b64d0b..8a002d6 100644 ---- a/drivers/of/platform.c -+++ b/drivers/of/platform.c -@@ -25,6 +25,7 @@ - - const struct of_device_id of_default_bus_match_table[] = { - { .compatible = "simple-bus", }, -+ { .compatible = "simple-mfd", }, - #ifdef CONFIG_ARM_AMBA - { .compatible = "arm,amba-bus", }, - #endif /* CONFIG_ARM_AMBA */ -@@ -138,7 +139,7 @@ struct platform_device *of_device_alloc(struct device_node *np, - } - - dev->dev.of_node = of_node_get(np); -- dev->dev.parent = parent; -+ dev->dev.parent = parent ? : &platform_bus; - - if (bus_id) - dev_set_name(&dev->dev, "%s", bus_id); -@@ -149,57 +150,9 @@ struct platform_device *of_device_alloc(struct device_node *np, - } - EXPORT_SYMBOL(of_device_alloc); - --/** -- * of_dma_configure - Setup DMA configuration -- * @dev: Device to apply DMA configuration -- * -- * Try to get devices's DMA configuration from DT and update it -- * accordingly. -- * -- * In case if platform code need to use own special DMA configuration,it -- * can use Platform bus notifier and handle BUS_NOTIFY_ADD_DEVICE event -- * to fix up DMA configuration. -- */ --static void of_dma_configure(struct device *dev) -+static void of_dma_deconfigure(struct device *dev) - { -- u64 dma_addr, paddr, size; -- int ret; -- -- /* -- * Set default dma-mask to 32 bit. Drivers are expected to setup -- * the correct supported dma_mask. -- */ -- dev->coherent_dma_mask = DMA_BIT_MASK(32); -- -- /* -- * Set it to coherent_dma_mask by default if the architecture -- * code has not set it. -- */ -- if (!dev->dma_mask) -- dev->dma_mask = &dev->coherent_dma_mask; -- -- /* -- * if dma-coherent property exist, call arch hook to setup -- * dma coherent operations. -- */ -- if (of_dma_is_coherent(dev->of_node)) { -- set_arch_dma_coherent_ops(dev); -- dev_dbg(dev, "device is dma coherent\n"); -- } -- -- /* -- * if dma-ranges property doesn't exist - just return else -- * setup the dma offset -- */ -- ret = of_dma_get_range(dev->of_node, &dma_addr, &paddr, &size); -- if (ret < 0) { -- dev_dbg(dev, "no dma range information to setup\n"); -- return; -- } -- -- /* DMA ranges found. Calculate and set dma_pfn_offset */ -- dev->dma_pfn_offset = PFN_DOWN(paddr - dma_addr); -- dev_dbg(dev, "dma_pfn_offset(%#08lx)\n", dev->dma_pfn_offset); -+ arch_teardown_dma_ops(dev); - } - - /** -@@ -228,16 +181,13 @@ static struct platform_device *of_platform_device_create_pdata( - if (!dev) - goto err_clear_flag; - -- of_dma_configure(&dev->dev); - dev->dev.bus = &platform_bus_type; - dev->dev.platform_data = platform_data; -- -- /* We do not fill the DMA ops for platform devices by default. -- * This is currently the responsibility of the platform code -- * to do such, possibly using a device notifier -- */ -+ of_dma_configure(&dev->dev, dev->dev.of_node); -+ of_msi_configure(&dev->dev, dev->dev.of_node); - - if (of_device_add(dev) != 0) { -+ of_dma_deconfigure(&dev->dev); - platform_device_put(dev); - goto err_clear_flag; - } -@@ -291,13 +241,13 @@ static struct amba_device *of_amba_device_create(struct device_node *node, - - /* setup generic device info */ - dev->dev.of_node = of_node_get(node); -- dev->dev.parent = parent; -+ dev->dev.parent = parent ? : &platform_bus; - dev->dev.platform_data = platform_data; - if (bus_id) - dev_set_name(&dev->dev, "%s", bus_id); - else - of_device_make_bus_id(&dev->dev); -- of_dma_configure(&dev->dev); -+ of_dma_configure(&dev->dev, dev->dev.of_node); - - /* Allow the HW Peripheral ID to be overridden */ - prop = of_get_property(node, "arm,primecell-periphid", NULL); -@@ -500,6 +450,7 @@ int of_platform_populate(struct device_node *root, - if (rc) - break; - } -+ of_node_set_flag(root, OF_POPULATED_BUS); - - of_node_put(root); - return rc; -@@ -523,6 +474,7 @@ static int of_platform_device_destroy(struct device *dev, void *data) - amba_device_unregister(to_amba_device(dev)); - #endif - -+ of_dma_deconfigure(dev); - of_node_clear_flag(dev->of_node, OF_POPULATED); - of_node_clear_flag(dev->of_node, OF_POPULATED_BUS); - return 0; -@@ -542,8 +494,75 @@ static int of_platform_device_destroy(struct device *dev, void *data) - */ - void of_platform_depopulate(struct device *parent) - { -- device_for_each_child(parent, NULL, of_platform_device_destroy); -+ if (parent->of_node && of_node_check_flag(parent->of_node, OF_POPULATED_BUS)) { -+ device_for_each_child(parent, NULL, of_platform_device_destroy); -+ of_node_clear_flag(parent->of_node, OF_POPULATED_BUS); -+ } - } - EXPORT_SYMBOL_GPL(of_platform_depopulate); - -+#ifdef CONFIG_OF_DYNAMIC -+static int of_platform_notify(struct notifier_block *nb, -+ unsigned long action, void *arg) -+{ -+ struct of_reconfig_data *rd = arg; -+ struct platform_device *pdev_parent, *pdev; -+ bool children_left; -+ -+ switch (of_reconfig_get_state_change(action, rd)) { -+ case OF_RECONFIG_CHANGE_ADD: -+ /* verify that the parent is a bus */ -+ if (!of_node_check_flag(rd->dn->parent, OF_POPULATED_BUS)) -+ return NOTIFY_OK; /* not for us */ -+ -+ /* already populated? (driver using of_populate manually) */ -+ if (of_node_check_flag(rd->dn, OF_POPULATED)) -+ return NOTIFY_OK; -+ -+ /* pdev_parent may be NULL when no bus platform device */ -+ pdev_parent = of_find_device_by_node(rd->dn->parent); -+ pdev = of_platform_device_create(rd->dn, NULL, -+ pdev_parent ? &pdev_parent->dev : NULL); -+ of_dev_put(pdev_parent); -+ -+ if (pdev == NULL) { -+ pr_err("%s: failed to create for '%s'\n", -+ __func__, rd->dn->full_name); -+ /* of_platform_device_create tosses the error code */ -+ return notifier_from_errno(-EINVAL); -+ } -+ break; -+ -+ case OF_RECONFIG_CHANGE_REMOVE: -+ -+ /* already depopulated? */ -+ if (!of_node_check_flag(rd->dn, OF_POPULATED)) -+ return NOTIFY_OK; -+ -+ /* find our device by node */ -+ pdev = of_find_device_by_node(rd->dn); -+ if (pdev == NULL) -+ return NOTIFY_OK; /* no? not meant for us */ -+ -+ /* unregister takes one ref away */ -+ of_platform_device_destroy(&pdev->dev, &children_left); -+ -+ /* and put the reference of the find */ -+ of_dev_put(pdev); -+ break; -+ } -+ -+ return NOTIFY_OK; -+} -+ -+static struct notifier_block platform_of_notifier = { -+ .notifier_call = of_platform_notify, -+}; -+ -+void of_platform_register_reconfig_notifier(void) -+{ -+ WARN_ON(of_reconfig_notifier_register(&platform_of_notifier)); -+} -+#endif /* CONFIG_OF_DYNAMIC */ -+ - #endif /* CONFIG_OF_ADDRESS */ -diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig -index 893503f..cced842 100644 ---- a/drivers/pci/Kconfig -+++ b/drivers/pci/Kconfig -@@ -4,6 +4,7 @@ - config PCI_MSI - bool "Message Signaled Interrupts (MSI and MSI-X)" - depends on PCI -+ select GENERIC_MSI_IRQ - help - This allows device drivers to enable MSI (Message Signaled - Interrupts). Message Signaled Interrupts enable a device to -@@ -16,6 +17,11 @@ config PCI_MSI - - If you don't know what to do here, say Y. - -+config PCI_MSI_IRQ_DOMAIN -+ bool -+ depends on PCI_MSI -+ select GENERIC_MSI_IRQ_DOMAIN -+ - config PCI_DEBUG - bool "PCI Debugging" - depends on PCI && DEBUG_KERNEL -diff --git a/drivers/pci/bus.c b/drivers/pci/bus.c -index 8fb1618..90fa3a7 100644 ---- a/drivers/pci/bus.c -+++ b/drivers/pci/bus.c -@@ -20,17 +20,16 @@ - void pci_add_resource_offset(struct list_head *resources, struct resource *res, - resource_size_t offset) - { -- struct pci_host_bridge_window *window; -+ struct resource_entry *entry; - -- window = kzalloc(sizeof(struct pci_host_bridge_window), GFP_KERNEL); -- if (!window) { -+ entry = resource_list_create_entry(res, 0); -+ if (!entry) { - printk(KERN_ERR "PCI: can't add host bridge window %pR\n", res); - return; - } - -- window->res = res; -- window->offset = offset; -- list_add_tail(&window->list, resources); -+ entry->offset = offset; -+ resource_list_add_tail(entry, resources); - } - EXPORT_SYMBOL(pci_add_resource_offset); - -@@ -42,12 +41,7 @@ EXPORT_SYMBOL(pci_add_resource); - - void pci_free_resource_list(struct list_head *resources) - { -- struct pci_host_bridge_window *window, *tmp; -- -- list_for_each_entry_safe(window, tmp, resources, list) { -- list_del(&window->list); -- kfree(window); -- } -+ resource_list_free(resources); - } - EXPORT_SYMBOL(pci_free_resource_list); - -diff --git a/drivers/pci/host-bridge.c b/drivers/pci/host-bridge.c -index 0e5f3c9..3e5bbf9 100644 ---- a/drivers/pci/host-bridge.c -+++ b/drivers/pci/host-bridge.c -@@ -23,6 +23,20 @@ static struct pci_host_bridge *find_pci_host_bridge(struct pci_bus *bus) - return to_pci_host_bridge(root_bus->bridge); - } - -+struct device *pci_get_host_bridge_device(struct pci_dev *dev) -+{ -+ struct pci_bus *root_bus = find_pci_root_bus(dev->bus); -+ struct device *bridge = root_bus->bridge; -+ -+ kobject_get(&bridge->kobj); -+ return bridge; -+} -+ -+void pci_put_host_bridge_device(struct device *dev) -+{ -+ kobject_put(&dev->kobj); -+} -+ - void pci_set_host_bridge_release(struct pci_host_bridge *bridge, - void (*release_fn)(struct pci_host_bridge *), - void *release_data) -@@ -35,10 +49,10 @@ void pcibios_resource_to_bus(struct pci_bus *bus, struct pci_bus_region *region, - struct resource *res) - { - struct pci_host_bridge *bridge = find_pci_host_bridge(bus); -- struct pci_host_bridge_window *window; -+ struct resource_entry *window; - resource_size_t offset = 0; - -- list_for_each_entry(window, &bridge->windows, list) { -+ resource_list_for_each_entry(window, &bridge->windows) { - if (resource_contains(window->res, res)) { - offset = window->offset; - break; -@@ -60,10 +74,10 @@ void pcibios_bus_to_resource(struct pci_bus *bus, struct resource *res, - struct pci_bus_region *region) - { - struct pci_host_bridge *bridge = find_pci_host_bridge(bus); -- struct pci_host_bridge_window *window; -+ struct resource_entry *window; - resource_size_t offset = 0; - -- list_for_each_entry(window, &bridge->windows, list) { -+ resource_list_for_each_entry(window, &bridge->windows) { - struct pci_bus_region bus_region; - - if (resource_type(res) != resource_type(window->res)) -diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig -index 3dc25fa..96586b1 100644 ---- a/drivers/pci/host/Kconfig -+++ b/drivers/pci/host/Kconfig -@@ -86,9 +86,26 @@ config PCI_XGENE - depends on ARCH_XGENE - depends on OF - select PCIEPORTBUS -+ select PCI_MSI_IRQ_DOMAIN if PCI_MSI - help - Say Y here if you want internal PCI support on APM X-Gene SoC. - There are 5 internal PCIe ports available. Each port is GEN3 capable - and have varied lanes from x1 to x8. - -+config PCI_XGENE_MSI -+ bool "X-Gene v1 PCIe MSI feature" -+ depends on PCI_XGENE && PCI_MSI -+ default y -+ help -+ Say Y here if you want PCIe MSI support for the APM X-Gene v1 SoC. -+ This MSI driver supports 5 PCIe ports on the APM X-Gene v1 SoC. -+ -+config PCI_LAYERSCAPE -+ bool "Freescale Layerscape PCIe controller" -+ depends on OF && (ARM || ARCH_LAYERSCAPE) -+ select PCIE_DW -+ select MFD_SYSCON -+ help -+ Say Y here if you want PCIe controller support on Layerscape SoCs. -+ - endmenu -diff --git a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile -index 26b3461..029685e 100644 ---- a/drivers/pci/host/Makefile -+++ b/drivers/pci/host/Makefile -@@ -1,3 +1,4 @@ -+obj-$(CONFIG_PCIE_DW_BASE) += pcie-designware-base.o - obj-$(CONFIG_PCIE_DW) += pcie-designware.o - obj-$(CONFIG_PCI_DRA7XX) += pci-dra7xx.o - obj-$(CONFIG_PCI_EXYNOS) += pci-exynos.o -@@ -11,3 +12,5 @@ obj-$(CONFIG_PCIE_SPEAR13XX) += pcie-spear13xx.o - obj-$(CONFIG_PCI_KEYSTONE) += pci-keystone-dw.o pci-keystone.o - obj-$(CONFIG_PCIE_XILINX) += pcie-xilinx.o - obj-$(CONFIG_PCI_XGENE) += pci-xgene.o -+obj-$(CONFIG_PCI_XGENE_MSI) += pci-xgene-msi.o -+obj-$(CONFIG_PCI_LAYERSCAPE) += pci-layerscape.o -diff --git a/drivers/pci/host/pci-dra7xx.c b/drivers/pci/host/pci-dra7xx.c -index 52b34fe..84a45cf 100644 ---- a/drivers/pci/host/pci-dra7xx.c -+++ b/drivers/pci/host/pci-dra7xx.c -@@ -61,6 +61,7 @@ - - #define PCIECTRL_DRA7XX_CONF_PHY_CS 0x010C - #define LINK_UP BIT(16) -+#define DRA7XX_CPU_TO_BUS_ADDR 0x0FFFFFFF - - struct dra7xx_pcie { - void __iomem *base; -@@ -144,6 +145,12 @@ static void dra7xx_pcie_enable_interrupts(struct pcie_port *pp) - static void dra7xx_pcie_host_init(struct pcie_port *pp) - { - dw_pcie_setup_rc(pp); -+ -+ pp->io_base &= DRA7XX_CPU_TO_BUS_ADDR; -+ pp->mem_base &= DRA7XX_CPU_TO_BUS_ADDR; -+ pp->cfg0_base &= DRA7XX_CPU_TO_BUS_ADDR; -+ pp->cfg1_base &= DRA7XX_CPU_TO_BUS_ADDR; -+ - dra7xx_pcie_establish_link(pp); - if (IS_ENABLED(CONFIG_PCI_MSI)) - dw_pcie_msi_init(pp); -@@ -160,7 +167,6 @@ static int dra7xx_pcie_intx_map(struct irq_domain *domain, unsigned int irq, - { - irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); - irq_set_chip_data(irq, domain->host_data); -- set_irq_flags(irq, IRQF_VALID); - - return 0; - } -diff --git a/drivers/pci/host/pci-exynos.c b/drivers/pci/host/pci-exynos.c -index c5d0ca3..2fd6b4e 100644 ---- a/drivers/pci/host/pci-exynos.c -+++ b/drivers/pci/host/pci-exynos.c -@@ -466,7 +466,7 @@ static int exynos_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, - int ret; - - exynos_pcie_sideband_dbi_r_mode(pp, true); -- ret = dw_pcie_cfg_read(pp->dbi_base + (where & ~0x3), where, size, val); -+ ret = dw_pcie_cfg_read(pp->dbi_base + where, size, val); - exynos_pcie_sideband_dbi_r_mode(pp, false); - return ret; - } -@@ -477,8 +477,7 @@ static int exynos_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, - int ret; - - exynos_pcie_sideband_dbi_w_mode(pp, true); -- ret = dw_pcie_cfg_write(pp->dbi_base + (where & ~0x3), -- where, size, val); -+ ret = dw_pcie_cfg_write(pp->dbi_base + where, size, val); - exynos_pcie_sideband_dbi_w_mode(pp, false); - return ret; - } -diff --git a/drivers/pci/host/pci-host-generic.c b/drivers/pci/host/pci-host-generic.c -index 3d2076f..83fb705 100644 ---- a/drivers/pci/host/pci-host-generic.c -+++ b/drivers/pci/host/pci-host-generic.c -@@ -32,13 +32,22 @@ struct gen_pci_cfg_bus_ops { - - struct gen_pci_cfg_windows { - struct resource res; -- struct resource bus_range; -+ struct resource *bus_range; - void __iomem **win; - - const struct gen_pci_cfg_bus_ops *ops; - }; - -+/* -+ * ARM pcibios functions expect the ARM struct pci_sys_data as the PCI -+ * sysdata. Add pci_sys_data as the first element in struct gen_pci so -+ * that when we use a gen_pci pointer as sysdata, it is also a pointer to -+ * a struct pci_sys_data. -+ */ - struct gen_pci { -+#ifdef CONFIG_ARM -+ struct pci_sys_data sys; -+#endif - struct pci_host_bridge host; - struct gen_pci_cfg_windows cfg; - struct list_head resources; -@@ -48,9 +57,8 @@ static void __iomem *gen_pci_map_cfg_bus_cam(struct pci_bus *bus, - unsigned int devfn, - int where) - { -- struct pci_sys_data *sys = bus->sysdata; -- struct gen_pci *pci = sys->private_data; -- resource_size_t idx = bus->number - pci->cfg.bus_range.start; -+ struct gen_pci *pci = bus->sysdata; -+ resource_size_t idx = bus->number - pci->cfg.bus_range->start; - - return pci->cfg.win[idx] + ((devfn << 8) | where); - } -@@ -64,9 +72,8 @@ static void __iomem *gen_pci_map_cfg_bus_ecam(struct pci_bus *bus, - unsigned int devfn, - int where) - { -- struct pci_sys_data *sys = bus->sysdata; -- struct gen_pci *pci = sys->private_data; -- resource_size_t idx = bus->number - pci->cfg.bus_range.start; -+ struct gen_pci *pci = bus->sysdata; -+ resource_size_t idx = bus->number - pci->cfg.bus_range->start; - - return pci->cfg.win[idx] + ((devfn << 12) | where); - } -@@ -76,55 +83,9 @@ static struct gen_pci_cfg_bus_ops gen_pci_cfg_ecam_bus_ops = { - .map_bus = gen_pci_map_cfg_bus_ecam, - }; - --static int gen_pci_config_read(struct pci_bus *bus, unsigned int devfn, -- int where, int size, u32 *val) --{ -- void __iomem *addr; -- struct pci_sys_data *sys = bus->sysdata; -- struct gen_pci *pci = sys->private_data; -- -- addr = pci->cfg.ops->map_bus(bus, devfn, where); -- -- switch (size) { -- case 1: -- *val = readb(addr); -- break; -- case 2: -- *val = readw(addr); -- break; -- default: -- *val = readl(addr); -- } -- -- return PCIBIOS_SUCCESSFUL; --} -- --static int gen_pci_config_write(struct pci_bus *bus, unsigned int devfn, -- int where, int size, u32 val) --{ -- void __iomem *addr; -- struct pci_sys_data *sys = bus->sysdata; -- struct gen_pci *pci = sys->private_data; -- -- addr = pci->cfg.ops->map_bus(bus, devfn, where); -- -- switch (size) { -- case 1: -- writeb(val, addr); -- break; -- case 2: -- writew(val, addr); -- break; -- default: -- writel(val, addr); -- } -- -- return PCIBIOS_SUCCESSFUL; --} -- - static struct pci_ops gen_pci_ops = { -- .read = gen_pci_config_read, -- .write = gen_pci_config_write, -+ .read = pci_generic_config_read, -+ .write = pci_generic_config_write, - }; - - static const struct of_device_id gen_pci_of_match[] = { -@@ -138,106 +99,50 @@ static const struct of_device_id gen_pci_of_match[] = { - }; - MODULE_DEVICE_TABLE(of, gen_pci_of_match); - --static int gen_pci_calc_io_offset(struct device *dev, -- struct of_pci_range *range, -- struct resource *res, -- resource_size_t *offset) --{ -- static atomic_t wins = ATOMIC_INIT(0); -- int err, idx, max_win; -- unsigned int window; -- -- if (!PAGE_ALIGNED(range->cpu_addr)) -- return -EINVAL; -- -- max_win = (IO_SPACE_LIMIT + 1) / SZ_64K; -- idx = atomic_inc_return(&wins); -- if (idx > max_win) -- return -ENOSPC; -- -- window = (idx - 1) * SZ_64K; -- err = pci_ioremap_io(window, range->cpu_addr); -- if (err) -- return err; -- -- of_pci_range_to_resource(range, dev->of_node, res); -- res->start = window; -- res->end = res->start + range->size - 1; -- *offset = window - range->pci_addr; -- return 0; --} -- --static int gen_pci_calc_mem_offset(struct device *dev, -- struct of_pci_range *range, -- struct resource *res, -- resource_size_t *offset) --{ -- of_pci_range_to_resource(range, dev->of_node, res); -- *offset = range->cpu_addr - range->pci_addr; -- return 0; --} -- - static void gen_pci_release_of_pci_ranges(struct gen_pci *pci) - { -- struct pci_host_bridge_window *win; -- -- list_for_each_entry(win, &pci->resources, list) -- release_resource(win->res); -- - pci_free_resource_list(&pci->resources); - } - - static int gen_pci_parse_request_of_pci_ranges(struct gen_pci *pci) - { -- struct of_pci_range range; -- struct of_pci_range_parser parser; - int err, res_valid = 0; - struct device *dev = pci->host.dev.parent; - struct device_node *np = dev->of_node; -+ resource_size_t iobase; -+ struct resource_entry *win; - -- if (of_pci_range_parser_init(&parser, np)) { -- dev_err(dev, "missing \"ranges\" property\n"); -- return -EINVAL; -- } -- -- for_each_of_pci_range(&parser, &range) { -- struct resource *parent, *res; -- resource_size_t offset; -- u32 restype = range.flags & IORESOURCE_TYPE_BITS; -+ err = of_pci_get_host_bridge_resources(np, 0, 0xff, &pci->resources, -+ &iobase); -+ if (err) -+ return err; - -- res = devm_kmalloc(dev, sizeof(*res), GFP_KERNEL); -- if (!res) { -- err = -ENOMEM; -- goto out_release_res; -- } -+ resource_list_for_each_entry(win, &pci->resources) { -+ struct resource *parent, *res = win->res; - -- switch (restype) { -+ switch (resource_type(res)) { - case IORESOURCE_IO: - parent = &ioport_resource; -- err = gen_pci_calc_io_offset(dev, &range, res, &offset); -+ err = pci_remap_iospace(res, iobase); -+ if (err) { -+ dev_warn(dev, "error %d: failed to map resource %pR\n", -+ err, res); -+ continue; -+ } - break; - case IORESOURCE_MEM: - parent = &iomem_resource; -- err = gen_pci_calc_mem_offset(dev, &range, res, &offset); -- res_valid |= !(res->flags & IORESOURCE_PREFETCH || err); -+ res_valid |= !(res->flags & IORESOURCE_PREFETCH); - break; -+ case IORESOURCE_BUS: -+ pci->cfg.bus_range = res; - default: -- err = -EINVAL; - continue; - } - -- if (err) { -- dev_warn(dev, -- "error %d: failed to add resource [type 0x%x, %lld bytes]\n", -- err, restype, range.size); -- continue; -- } -- -- err = request_resource(parent, res); -+ err = devm_request_resource(dev, parent, res); - if (err) - goto out_release_res; -- -- pci_add_resource_offset(&pci->resources, res, offset); - } - - if (!res_valid) { -@@ -262,38 +167,30 @@ static int gen_pci_parse_map_cfg_windows(struct gen_pci *pci) - struct device *dev = pci->host.dev.parent; - struct device_node *np = dev->of_node; - -- if (of_pci_parse_bus_range(np, &pci->cfg.bus_range)) -- pci->cfg.bus_range = (struct resource) { -- .name = np->name, -- .start = 0, -- .end = 0xff, -- .flags = IORESOURCE_BUS, -- }; -- - err = of_address_to_resource(np, 0, &pci->cfg.res); - if (err) { - dev_err(dev, "missing \"reg\" property\n"); - return err; - } - -- pci->cfg.win = devm_kcalloc(dev, resource_size(&pci->cfg.bus_range), -+ /* Limit the bus-range to fit within reg */ -+ bus_max = pci->cfg.bus_range->start + -+ (resource_size(&pci->cfg.res) >> pci->cfg.ops->bus_shift) - 1; -+ pci->cfg.bus_range->end = min_t(resource_size_t, -+ pci->cfg.bus_range->end, bus_max); -+ -+ pci->cfg.win = devm_kcalloc(dev, resource_size(pci->cfg.bus_range), - sizeof(*pci->cfg.win), GFP_KERNEL); - if (!pci->cfg.win) - return -ENOMEM; - -- /* Limit the bus-range to fit within reg */ -- bus_max = pci->cfg.bus_range.start + -- (resource_size(&pci->cfg.res) >> pci->cfg.ops->bus_shift) - 1; -- pci->cfg.bus_range.end = min_t(resource_size_t, pci->cfg.bus_range.end, -- bus_max); -- - /* Map our Configuration Space windows */ - if (!devm_request_mem_region(dev, pci->cfg.res.start, - resource_size(&pci->cfg.res), - "Configuration Space")) - return -ENOMEM; - -- bus_range = &pci->cfg.bus_range; -+ bus_range = pci->cfg.bus_range; - for (busn = bus_range->start; busn <= bus_range->end; ++busn) { - u32 idx = busn - bus_range->start; - u32 sz = 1 << pci->cfg.ops->bus_shift; -@@ -305,18 +202,9 @@ static int gen_pci_parse_map_cfg_windows(struct gen_pci *pci) - return -ENOMEM; - } - -- /* Register bus resource */ -- pci_add_resource(&pci->resources, bus_range); - return 0; - } - --static int gen_pci_setup(int nr, struct pci_sys_data *sys) --{ -- struct gen_pci *pci = sys->private_data; -- list_splice_init(&pci->resources, &sys->resources); -- return 1; --} -- - static int gen_pci_probe(struct platform_device *pdev) - { - int err; -@@ -326,13 +214,7 @@ static int gen_pci_probe(struct platform_device *pdev) - struct device *dev = &pdev->dev; - struct device_node *np = dev->of_node; - struct gen_pci *pci = devm_kzalloc(dev, sizeof(*pci), GFP_KERNEL); -- struct hw_pci hw = { -- .nr_controllers = 1, -- .private_data = (void **)&pci, -- .setup = gen_pci_setup, -- .map_irq = of_irq_parse_and_map_pci, -- .ops = &gen_pci_ops, -- }; -+ struct pci_bus *bus, *child; - - if (!pci) - return -ENOMEM; -@@ -353,6 +235,7 @@ static int gen_pci_probe(struct platform_device *pdev) - - of_id = of_match_node(gen_pci_of_match, np); - pci->cfg.ops = of_id->data; -+ gen_pci_ops.map_bus = pci->cfg.ops->map_bus; - pci->host.dev.parent = dev; - INIT_LIST_HEAD(&pci->host.windows); - INIT_LIST_HEAD(&pci->resources); -@@ -369,7 +252,27 @@ static int gen_pci_probe(struct platform_device *pdev) - return err; - } - -- pci_common_init_dev(dev, &hw); -+ /* Do not reassign resources if probe only */ -+ if (!pci_has_flag(PCI_PROBE_ONLY)) -+ pci_add_flags(PCI_REASSIGN_ALL_RSRC | PCI_REASSIGN_ALL_BUS); -+ -+ bus = pci_scan_root_bus(dev, 0, &gen_pci_ops, pci, &pci->resources); -+ if (!bus) { -+ dev_err(dev, "Scanning rootbus failed"); -+ return -ENODEV; -+ } -+ -+ pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci); -+ -+ if (!pci_has_flag(PCI_PROBE_ONLY)) { -+ pci_bus_size_bridges(bus); -+ pci_bus_assign_resources(bus); -+ -+ list_for_each_entry(child, &bus->children, node) -+ pcie_bus_configure_settings(child); -+ } -+ -+ pci_bus_add_devices(bus); - return 0; - } - -diff --git a/drivers/pci/host/pci-keystone-dw.c b/drivers/pci/host/pci-keystone-dw.c -index 34086ce..c1b5980 100644 ---- a/drivers/pci/host/pci-keystone-dw.c -+++ b/drivers/pci/host/pci-keystone-dw.c -@@ -70,7 +70,7 @@ static inline void update_reg_offset_bit_pos(u32 offset, u32 *reg_offset, - *bit_pos = offset >> 3; - } - --u32 ks_dw_pcie_get_msi_addr(struct pcie_port *pp) -+phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp) - { - struct keystone_pcie *ks_pcie = to_keystone_pcie(pp); - -@@ -104,14 +104,13 @@ static void ks_dw_pcie_msi_irq_ack(struct irq_data *d) - { - u32 offset, reg_offset, bit_pos; - struct keystone_pcie *ks_pcie; -- unsigned int irq = d->irq; - struct msi_desc *msi; - struct pcie_port *pp; - -- msi = irq_get_msi_desc(irq); -- pp = sys_to_pcie(msi->dev->bus->sysdata); -+ msi = irq_data_get_msi_desc(d); -+ pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi)); - ks_pcie = to_keystone_pcie(pp); -- offset = irq - irq_linear_revmap(pp->irq_domain, 0); -+ offset = d->irq - irq_linear_revmap(pp->irq_domain, 0); - update_reg_offset_bit_pos(offset, ®_offset, &bit_pos); - - writel(BIT(bit_pos), -@@ -142,20 +141,19 @@ void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq) - static void ks_dw_pcie_msi_irq_mask(struct irq_data *d) - { - struct keystone_pcie *ks_pcie; -- unsigned int irq = d->irq; - struct msi_desc *msi; - struct pcie_port *pp; - u32 offset; - -- msi = irq_get_msi_desc(irq); -- pp = sys_to_pcie(msi->dev->bus->sysdata); -+ msi = irq_data_get_msi_desc(d); -+ pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi)); - ks_pcie = to_keystone_pcie(pp); -- offset = irq - irq_linear_revmap(pp->irq_domain, 0); -+ offset = d->irq - irq_linear_revmap(pp->irq_domain, 0); - - /* Mask the end point if PVM implemented */ - if (IS_ENABLED(CONFIG_PCI_MSI)) { - if (msi->msi_attrib.maskbit) -- mask_msi_irq(d); -+ pci_msi_mask_irq(d); - } - - ks_dw_pcie_msi_clear_irq(pp, offset); -@@ -164,20 +162,19 @@ static void ks_dw_pcie_msi_irq_mask(struct irq_data *d) - static void ks_dw_pcie_msi_irq_unmask(struct irq_data *d) - { - struct keystone_pcie *ks_pcie; -- unsigned int irq = d->irq; - struct msi_desc *msi; - struct pcie_port *pp; - u32 offset; - -- msi = irq_get_msi_desc(irq); -- pp = sys_to_pcie(msi->dev->bus->sysdata); -+ msi = irq_data_get_msi_desc(d); -+ pp = sys_to_pcie(msi_desc_to_pci_sysdata(msi)); - ks_pcie = to_keystone_pcie(pp); -- offset = irq - irq_linear_revmap(pp->irq_domain, 0); -+ offset = d->irq - irq_linear_revmap(pp->irq_domain, 0); - - /* Mask the end point if PVM implemented */ - if (IS_ENABLED(CONFIG_PCI_MSI)) { - if (msi->msi_attrib.maskbit) -- unmask_msi_irq(d); -+ pci_msi_unmask_irq(d); - } - - ks_dw_pcie_msi_set_irq(pp, offset); -@@ -196,7 +193,6 @@ static int ks_dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq, - irq_set_chip_and_handler(irq, &ks_dw_pcie_msi_irq_chip, - handle_level_irq); - irq_set_chip_data(irq, domain->host_data); -- set_irq_flags(irq, IRQF_VALID); - - return 0; - } -@@ -205,7 +201,7 @@ const struct irq_domain_ops ks_dw_pcie_msi_domain_ops = { - .map = ks_dw_pcie_msi_map, - }; - --int ks_dw_pcie_msi_host_init(struct pcie_port *pp, struct msi_chip *chip) -+int ks_dw_pcie_msi_host_init(struct pcie_port *pp, struct msi_controller *chip) - { - struct keystone_pcie *ks_pcie = to_keystone_pcie(pp); - int i; -@@ -277,7 +273,6 @@ static int ks_dw_pcie_init_legacy_irq_map(struct irq_domain *d, - irq_set_chip_and_handler(irq, &ks_dw_pcie_legacy_irq_chip, - handle_level_irq); - irq_set_chip_data(irq, d->host_data); -- set_irq_flags(irq, IRQF_VALID); - - return 0; - } -@@ -327,7 +322,7 @@ static void ks_dw_pcie_clear_dbi_mode(void __iomem *reg_virt) - void ks_dw_pcie_setup_rc_app_regs(struct keystone_pcie *ks_pcie) - { - struct pcie_port *pp = &ks_pcie->pp; -- u32 start = pp->mem.start, end = pp->mem.end; -+ u32 start = pp->mem->start, end = pp->mem->end; - int i, tr_size; - - /* Disable BARs for inbound access */ -@@ -403,7 +398,7 @@ int ks_dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, - - addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn); - -- return dw_pcie_cfg_read(addr + (where & ~0x3), where, size, val); -+ return dw_pcie_cfg_read(addr + where, size, val); - } - - int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, -@@ -415,7 +410,7 @@ int ks_dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, - - addr = ks_pcie_cfg_setup(ks_pcie, bus_num, devfn); - -- return dw_pcie_cfg_write(addr + (where & ~0x3), where, size, val); -+ return dw_pcie_cfg_write(addr + where, size, val); - } - - /** -diff --git a/drivers/pci/host/pci-keystone.h b/drivers/pci/host/pci-keystone.h -index 1fc1fce..f0944e8 100644 ---- a/drivers/pci/host/pci-keystone.h -+++ b/drivers/pci/host/pci-keystone.h -@@ -37,7 +37,7 @@ struct keystone_pcie { - - /* Keystone DW specific MSI controller APIs/definitions */ - void ks_dw_pcie_handle_msi_irq(struct keystone_pcie *ks_pcie, int offset); --u32 ks_dw_pcie_get_msi_addr(struct pcie_port *pp); -+phys_addr_t ks_dw_pcie_get_msi_addr(struct pcie_port *pp); - - /* Keystone specific PCI controller APIs */ - void ks_dw_pcie_enable_legacy_irqs(struct keystone_pcie *ks_pcie); -@@ -55,4 +55,4 @@ void ks_dw_pcie_msi_set_irq(struct pcie_port *pp, int irq); - void ks_dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq); - void ks_dw_pcie_v3_65_scan_bus(struct pcie_port *pp); - int ks_dw_pcie_msi_host_init(struct pcie_port *pp, -- struct msi_chip *chip); -+ struct msi_controller *chip); -diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c -new file mode 100644 -index 0000000..d491b0f ---- /dev/null -+++ b/drivers/pci/host/pci-layerscape.c -@@ -0,0 +1,669 @@ -+/* -+ * PCIe host controller driver for Freescale Layerscape SoCs -+ * -+ * Copyright (C) 2014 Freescale Semiconductor. -+ * -+ * Author: Minghuan Lian -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#include "pcie-designware.h" -+ -+/* PEX1/2 Misc Ports Status Register */ -+#define SCFG_PEXMSCPORTSR(pex_idx) (0x94 + (pex_idx) * 4) -+#define SCFG_PEXPMWRCR(pex_idx) (0x5c + (pex_idx) * 0x64) -+#define LTSSM_STATE_SHIFT 20 -+#define LTSSM_STATE_MASK 0x3f -+#define LTSSM_PCIE_L0 0x11 /* L0 state */ -+#define LTSSM_PCIE_L2_IDLE 0x15 /* L2 idle state */ -+ -+/* PEX Internal Configuration Registers */ -+#define PCIE_STRFMR1 0x71c /* Symbol Timer & Filter Mask Register1 */ -+#define PCIE_DBI_RO_WR_EN 0x8bc /* DBI Read-Only Write Enable Register */ -+ -+/* PEX LUT registers */ -+#define PCIE_LUT_DBG 0x7FC /* PEX LUT Debug Register */ -+#define PCIE_LUT_UDR(n) (0x800 + (n) * 8) -+#define PCIE_LUT_LDR(n) (0x804 + (n) * 8) -+#define PCIE_LUT_MASK_ALL 0xffff -+#define PCIE_LUT_DR_NUM 32 -+#define PCIE_LUT_ENABLE (1 << 31) -+ -+#define PCIE_PM_SCR 0x44 -+#define PCIE_PM_SCR_PMEEN 0x10 -+#define PCIE_PM_SCR_PMEPS_D0 0xfffc -+#define PCIE_PM_SCR_PMEPS_D3 0x3 -+#define PCIE_PM_SCR_PME_STATE 0x8000 -+ -+#define PCIE_PEX_DCR 0x78 -+#define PCIE_PEX_DCR_AUXPOWEREN 0x0400 -+ -+#define PCIE_PEX_SSR 0x8a -+#define PCIE_PEX_SSR_PDS 0x40 -+ -+#define PCIE_PEX_RCR 0x8c -+#define PCIE_PEX_RCR_PMEIE 0x0008 -+ -+#define PCIE_PEX_RSR 0x90 -+#define PCIE_PEX_PMES 0x00010000 -+ -+#define QIXIS_RST_FORCE_3 0x45 -+#define QIXIS_RST_FORCE_3_PCIESLOT 0xe0 -+ -+#define CPLD_RST_PCIE_SLOT 0x14 -+#define CPLD_RST_PCIESLOT 0x3 -+ -+struct ls_pcie; -+ -+struct ls_pcie_pm_data { -+ void __iomem *fpga; -+ void __iomem *cpld; -+}; -+ -+struct ls_pcie_pm_ops { -+ u32 (*get_link_state)(struct ls_pcie *pcie); -+ int (*send_turn_off_message)(struct ls_pcie *pcie); -+ void (*clear_turn_off_message)(struct ls_pcie *pcie); -+ void (*reset_slot)(struct ls_pcie *pcie, -+ struct ls_pcie_pm_data *pm_data); -+}; -+ -+struct ls_pcie_drvdata { -+ u32 lut_offset; -+ u32 ltssm_shift; -+ struct pcie_host_ops *ops; -+ struct ls_pcie_pm_ops *pm; -+}; -+ -+struct ls_pcie { -+ struct list_head list_node; -+ void __iomem *dbi; -+ void __iomem *lut; -+ struct regmap *scfg; -+ struct pcie_port pp; -+ const struct ls_pcie_drvdata *drvdata; -+ struct ls_pcie_pm_data pm_data; -+ int index; -+ const u32 *avail_streamids; -+ int streamid_index; -+ int pme_irq; -+ bool in_slot; -+}; -+ -+#define to_ls_pcie(x) container_of(x, struct ls_pcie, pp) -+ -+u32 set_pcie_streamid_translation(struct pci_dev *pdev, u32 devid) -+{ -+ u32 index, streamid; -+ struct pcie_port *pp = pdev->bus->sysdata; -+ struct ls_pcie *pcie = to_ls_pcie(pp); -+ -+ if (!pcie->avail_streamids || !pcie->streamid_index) -+ return ~(u32)0; -+ -+ index = --pcie->streamid_index; -+ /* mask is set as all zeroes, want to match all bits */ -+ iowrite32((devid << 16), pcie->lut + PCIE_LUT_UDR(index)); -+ streamid = be32_to_cpup(&pcie->avail_streamids[index]); -+ iowrite32(streamid | PCIE_LUT_ENABLE, pcie->lut + PCIE_LUT_LDR(index)); -+ -+ return streamid; -+} -+ -+LIST_HEAD(hose_list); -+ -+static bool ls_pcie_is_bridge(struct ls_pcie *pcie) -+{ -+ u32 header_type; -+ -+ header_type = ioread8(pcie->dbi + PCI_HEADER_TYPE); -+ header_type &= 0x7f; -+ -+ return header_type == PCI_HEADER_TYPE_BRIDGE; -+} -+ -+/* Clear multi-function bit */ -+static void ls_pcie_clear_multifunction(struct ls_pcie *pcie) -+{ -+ iowrite8(PCI_HEADER_TYPE_BRIDGE, pcie->dbi + PCI_HEADER_TYPE); -+} -+ -+/* Fix class value */ -+static void ls_pcie_fix_class(struct ls_pcie *pcie) -+{ -+ iowrite16(PCI_CLASS_BRIDGE_PCI, pcie->dbi + PCI_CLASS_DEVICE); -+} -+ -+/* Drop MSG TLP except for Vendor MSG */ -+static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie) -+{ -+ u32 val; -+ -+ val = ioread32(pcie->dbi + PCIE_STRFMR1); -+ val &= 0xDFFFFFFF; -+ iowrite32(val, pcie->dbi + PCIE_STRFMR1); -+} -+ -+static int ls1021_pcie_link_up(struct pcie_port *pp) -+{ -+ u32 state; -+ struct ls_pcie *pcie = to_ls_pcie(pp); -+ -+ if (!pcie->scfg) -+ return 0; -+ -+ regmap_read(pcie->scfg, SCFG_PEXMSCPORTSR(pcie->index), &state); -+ state = (state >> LTSSM_STATE_SHIFT) & LTSSM_STATE_MASK; -+ -+ if (state < LTSSM_PCIE_L0) -+ return 0; -+ -+ return 1; -+} -+ -+static u32 ls1021_pcie_get_link_state(struct ls_pcie *pcie) -+{ -+ u32 state; -+ -+ if (!pcie->scfg) -+ return 0; -+ -+ regmap_read(pcie->scfg, SCFG_PEXMSCPORTSR(pcie->index), &state); -+ state = (state >> LTSSM_STATE_SHIFT) & LTSSM_STATE_MASK; -+ -+ return state; -+} -+ -+static int ls1021_pcie_send_turn_off_message(struct ls_pcie *pcie) -+{ -+ u32 val; -+ -+ if (!pcie->scfg) -+ return -EINVAL; -+ -+ /* Send Turn_off message */ -+ regmap_read(pcie->scfg, SCFG_PEXPMWRCR(pcie->index), &val); -+ val |= 0x80000000; -+ regmap_write(pcie->scfg, SCFG_PEXPMWRCR(pcie->index), val); -+ -+ return 0; -+} -+ -+static void ls1021_pcie_clear_turn_off_message(struct ls_pcie *pcie) -+{ -+ u32 val; -+ -+ if (!pcie->scfg) -+ return; -+ -+ /* Clear Turn_off message */ -+ regmap_read(pcie->scfg, SCFG_PEXPMWRCR(pcie->index), &val); -+ val &= 0x00000000; -+ regmap_write(pcie->scfg, SCFG_PEXPMWRCR(pcie->index), val); -+} -+ -+static void ls1021_pcie_reset_slot(struct ls_pcie *pcie, -+ struct ls_pcie_pm_data *pm_data) -+{ -+ u8 val; -+ -+ /* Try to reset PCIe slot to relink EP */ -+ if (pm_data->fpga) { -+ /* PULL DOWN PCIe RST# */ -+ val = ioread8(pm_data->fpga + QIXIS_RST_FORCE_3); -+ val |= QIXIS_RST_FORCE_3_PCIESLOT; -+ iowrite8(val, pm_data->fpga + QIXIS_RST_FORCE_3); -+ -+ /* PULL ON PCIe RST# */ -+ val = ioread8(pm_data->fpga + QIXIS_RST_FORCE_3); -+ val &= 0x0; -+ iowrite8(val, pm_data->fpga + QIXIS_RST_FORCE_3); -+ } -+ -+ if (pm_data->cpld) { -+ /* PULL DOWN PCIe RST# */ -+ val = ioread8(pm_data->cpld + CPLD_RST_PCIE_SLOT); -+ val &= 0x0; -+ iowrite8(val, pm_data->cpld + CPLD_RST_PCIE_SLOT); -+ -+ /* PULL ON PCIe RST# */ -+ val = ioread8(pm_data->cpld + CPLD_RST_PCIE_SLOT); -+ val |= CPLD_RST_PCIESLOT; -+ iowrite8(val, pm_data->cpld + CPLD_RST_PCIE_SLOT); -+ } -+} -+ -+static void ls1021_pcie_host_init(struct pcie_port *pp) -+{ -+ struct ls_pcie *pcie = to_ls_pcie(pp); -+ u32 index[2]; -+ -+ pcie->scfg = syscon_regmap_lookup_by_phandle(pp->dev->of_node, -+ "fsl,pcie-scfg"); -+ if (IS_ERR(pcie->scfg)) { -+ dev_err(pp->dev, "No syscfg phandle specified\n"); -+ pcie->scfg = NULL; -+ return; -+ } -+ -+ if (of_property_read_u32_array(pp->dev->of_node, -+ "fsl,pcie-scfg", index, 2)) { -+ pcie->scfg = NULL; -+ return; -+ } -+ pcie->index = index[1]; -+ -+ dw_pcie_setup_rc(pp); -+ -+ ls_pcie_drop_msg_tlp(pcie); -+} -+ -+static int ls_pcie_link_up(struct pcie_port *pp) -+{ -+ struct ls_pcie *pcie = to_ls_pcie(pp); -+ u32 state; -+ -+ state = (ioread32(pcie->lut + PCIE_LUT_DBG) >> -+ pcie->drvdata->ltssm_shift) & -+ LTSSM_STATE_MASK; -+ -+ if (state < LTSSM_PCIE_L0) -+ return 0; -+ -+ return 1; -+} -+ -+static u32 ls_pcie_get_link_state(struct ls_pcie *pcie) -+{ -+ return (ioread32(pcie->lut + PCIE_LUT_DBG) >> -+ pcie->drvdata->ltssm_shift) & -+ LTSSM_STATE_MASK; -+} -+ -+static void ls_pcie_host_init(struct pcie_port *pp) -+{ -+ struct ls_pcie *pcie = to_ls_pcie(pp); -+ -+ iowrite32(1, pcie->dbi + PCIE_DBI_RO_WR_EN); -+ ls_pcie_fix_class(pcie); -+ ls_pcie_clear_multifunction(pcie); -+ ls_pcie_drop_msg_tlp(pcie); -+ iowrite32(0, pcie->dbi + PCIE_DBI_RO_WR_EN); -+} -+ -+static int ls_pcie_msi_host_init(struct pcie_port *pp, -+ struct msi_controller *chip) -+{ -+ struct device_node *msi_node; -+ struct device_node *np = pp->dev->of_node; -+ -+ /* -+ * The MSI domain is set by the generic of_msi_configure(). This -+ * .msi_host_init() function keeps us from doing the default MSI -+ * domain setup in dw_pcie_host_init() and also enforces the -+ * requirement that "msi-parent" exists. -+ */ -+ msi_node = of_parse_phandle(np, "msi-parent", 0); -+ if (!msi_node) { -+ dev_err(pp->dev, "failed to find msi-parent\n"); -+ return -EINVAL; -+ } -+ -+ return 0; -+} -+ -+static struct pcie_host_ops ls1021_pcie_host_ops = { -+ .link_up = ls1021_pcie_link_up, -+ .host_init = ls1021_pcie_host_init, -+ .msi_host_init = ls_pcie_msi_host_init, -+}; -+ -+static struct ls_pcie_pm_ops ls1021_pcie_host_pm_ops = { -+ .get_link_state = &ls1021_pcie_get_link_state, -+ .send_turn_off_message = &ls1021_pcie_send_turn_off_message, -+ .clear_turn_off_message = &ls1021_pcie_clear_turn_off_message, -+ .reset_slot = &ls1021_pcie_reset_slot, -+}; -+ -+static struct pcie_host_ops ls_pcie_host_ops = { -+ .link_up = ls_pcie_link_up, -+ .host_init = ls_pcie_host_init, -+ .msi_host_init = ls_pcie_msi_host_init, -+}; -+ -+static struct ls_pcie_pm_ops ls_pcie_host_pm_ops = { -+ .get_link_state = &ls_pcie_get_link_state, -+}; -+ -+static struct ls_pcie_drvdata ls1021_drvdata = { -+ .ops = &ls1021_pcie_host_ops, -+ .pm = &ls1021_pcie_host_pm_ops, -+}; -+ -+static struct ls_pcie_drvdata ls1043_drvdata = { -+ .lut_offset = 0x10000, -+ .ltssm_shift = 24, -+ .ops = &ls_pcie_host_ops, -+ .pm = &ls_pcie_host_pm_ops, -+}; -+ -+static struct ls_pcie_drvdata ls2080_drvdata = { -+ .lut_offset = 0x80000, -+ .ltssm_shift = 0, -+ .ops = &ls_pcie_host_ops, -+ .pm = &ls_pcie_host_pm_ops, -+}; -+ -+static const struct of_device_id ls_pcie_of_match[] = { -+ { .compatible = "fsl,ls1021a-pcie", .data = &ls1021_drvdata }, -+ { .compatible = "fsl,ls1043a-pcie", .data = &ls1043_drvdata }, -+ { .compatible = "fsl,ls2080a-pcie", .data = &ls2080_drvdata }, -+ { .compatible = "fsl,ls2085a-pcie", .data = &ls2080_drvdata }, -+ { }, -+}; -+MODULE_DEVICE_TABLE(of, ls_pcie_of_match); -+ -+static void ls_pcie_host_hack_pm_init(struct ls_pcie *pcie) -+{ -+ struct device_node *np; -+ struct ls_pcie_pm_data *pm_data = &pcie->pm_data; -+ -+ np = of_find_compatible_node(NULL, NULL, "fsl,ls1021aqds-fpga"); -+ if (np) -+ pm_data->fpga = of_iomap(np, 0); -+ -+ of_node_put(np); -+ -+ np = of_find_compatible_node(NULL, NULL, "fsl,ls1021atwr-cpld"); -+ if (np) -+ pm_data->cpld = of_iomap(np, 0); -+ -+ of_node_put(np); -+} -+ -+static irqreturn_t ls_pcie_pme_irq_handler(int irq, void *data) -+{ -+ struct pcie_port *pp = data; -+ struct ls_pcie *pcie = to_ls_pcie(pp); -+ u32 val; -+ -+ if (pcie->drvdata->pm->clear_turn_off_message) -+ pcie->drvdata->pm->clear_turn_off_message(pcie); -+ -+ /* Clear Host root PME_STATE bit */ -+ val = ioread32(pcie->dbi + PCIE_PEX_RSR); -+ val |= PCIE_PEX_PMES; -+ iowrite32(val, pcie->dbi + PCIE_PEX_RSR); -+ -+ return IRQ_HANDLED; -+} -+ -+static int ls_pcie_host_pme_init(struct ls_pcie *pcie, -+ struct platform_device *pdev) -+{ -+ struct pcie_port *pp; -+ int ret; -+ u16 val; -+ -+ pp = &pcie->pp; -+ -+ pcie->pme_irq = platform_get_irq_byname(pdev, "pme"); -+ if (pcie->pme_irq < 0) { -+ dev_err(&pdev->dev, -+ "failed to get PME IRQ: %d\n", pcie->pme_irq); -+ return pcie->pme_irq; -+ } -+ -+ ret = devm_request_irq(pp->dev, pcie->pme_irq, ls_pcie_pme_irq_handler, -+ IRQF_SHARED, "ls-pcie-pme", pp); -+ if (ret) { -+ dev_err(pp->dev, "Failed to request pme irq\n"); -+ return ret; -+ } -+ -+ ls_pcie_host_hack_pm_init(pcie); -+ -+ /* AUX Power PM Enable */ -+ val = ioread16(pcie->dbi + PCIE_PEX_DCR); -+ val |= PCIE_PEX_DCR_AUXPOWEREN; -+ iowrite16(val, pcie->dbi + PCIE_PEX_DCR); -+ -+ /* Enable PME message */ -+ val = ioread16(pcie->dbi + PCIE_PM_SCR); -+ val |= PCIE_PM_SCR_PMEEN; -+ iowrite16(val, pcie->dbi + PCIE_PM_SCR); -+ -+ /* Clear Host PME_STATE bit */ -+ val = ioread16(pcie->dbi + PCIE_PM_SCR); -+ val |= PCIE_PM_SCR_PME_STATE; -+ iowrite16(val, pcie->dbi + PCIE_PM_SCR); -+ -+ /* Enable Host %d interrupt */ -+ val = ioread16(pcie->dbi + PCIE_PEX_RCR); -+ val |= PCIE_PEX_RCR_PMEIE; -+ iowrite16(val, pcie->dbi + PCIE_PEX_RCR); -+ -+ if (dw_pcie_link_up(&pcie->pp)) -+ pcie->in_slot = true; -+ else -+ pcie->in_slot = false; -+ -+ return 0; -+} -+ -+static int __init ls_add_pcie_port(struct pcie_port *pp, -+ struct platform_device *pdev) -+{ -+ int ret; -+ struct ls_pcie *pcie = to_ls_pcie(pp); -+ -+ pp->dev = &pdev->dev; -+ pp->dbi_base = pcie->dbi; -+ pp->ops = pcie->drvdata->ops; -+ -+ ret = dw_pcie_host_init(pp); -+ if (ret) { -+ dev_err(pp->dev, "failed to initialize host\n"); -+ return ret; -+ } -+ -+ ret = ls_pcie_host_pme_init(pcie, pdev); -+ if (ret) -+ dev_warn(pp->dev, "failed to initialize PME\n"); -+ -+ return 0; -+} -+ -+static int ls_pcie_probe(struct platform_device *pdev) -+{ -+ const struct of_device_id *match; -+ struct ls_pcie *pcie; -+ struct resource *dbi_base; -+ int ret; -+ -+ match = of_match_device(ls_pcie_of_match, &pdev->dev); -+ if (!match) -+ return -ENODEV; -+ -+ pcie = devm_kzalloc(&pdev->dev, sizeof(*pcie), GFP_KERNEL); -+ if (!pcie) -+ return -ENOMEM; -+ -+ dbi_base = platform_get_resource_byname(pdev, IORESOURCE_MEM, "regs"); -+ pcie->dbi = devm_ioremap_resource(&pdev->dev, dbi_base); -+ if (IS_ERR(pcie->dbi)) { -+ dev_err(&pdev->dev, "missing *regs* space\n"); -+ return PTR_ERR(pcie->dbi); -+ } -+ -+ pcie->drvdata = match->data; -+ pcie->lut = pcie->dbi + pcie->drvdata->lut_offset; -+ /* Disable LDR zero */ -+ iowrite32(0, pcie->lut + PCIE_LUT_LDR(0)); -+ -+ if (!ls_pcie_is_bridge(pcie)) -+ return -ENODEV; -+ -+ if (of_device_is_compatible(pdev->dev.of_node, "fsl,ls2085a-pcie") || -+ of_device_is_compatible(pdev->dev.of_node, "fsl,ls2080a-pcie")) { -+ int len; -+ const u32 *prop; -+ struct device_node *np; -+ -+ np = pdev->dev.of_node; -+ prop = (u32 *)of_get_property(np, "available-stream-ids", &len); -+ if (prop) { -+ pcie->avail_streamids = prop; -+ pcie->streamid_index = len/sizeof(u32); -+ } else -+ dev_err(&pdev->dev, "PCIe endpoint partitioning not possible\n"); -+ } -+ -+ ret = ls_add_pcie_port(&pcie->pp, pdev); -+ if (ret < 0) -+ return ret; -+ -+ list_add_tail(&pcie->list_node, &hose_list); -+ -+ platform_set_drvdata(pdev, pcie); -+ -+ return 0; -+} -+ -+#ifdef CONFIG_PM_SLEEP -+static int ls_pcie_pm_do_suspend(struct ls_pcie *pcie) -+{ -+ u32 state; -+ int i = 0; -+ int ret; -+ u16 val; -+ -+ if (!pcie->in_slot) -+ return 0; -+ -+ if (!pcie->drvdata->pm->send_turn_off_message) -+ return 0; -+ -+ ret = pcie->drvdata->pm->send_turn_off_message(pcie); -+ if (ret) -+ return -EINVAL; -+ -+ while (i < 100) { -+ state = pcie->drvdata->pm->get_link_state(pcie); -+ if (state == LTSSM_PCIE_L2_IDLE) -+ break; -+ i++; -+ mdelay(1); -+ } -+ -+ /* Put RC in D3 */ -+ val = ioread16(pcie->dbi + PCIE_PM_SCR); -+ val |= PCIE_PM_SCR_PMEPS_D3; -+ iowrite16(val, pcie->dbi + PCIE_PM_SCR); -+ -+ mdelay(10); -+ -+ return 0; -+} -+ -+static int ls_pcie_pm_do_resume(struct ls_pcie *pcie) -+{ -+ u32 state; -+ int i = 0; -+ u16 val; -+ -+ ls_pcie_host_init(&pcie->pp); -+ -+ if (!pcie->in_slot) -+ return 0; -+ -+ /* Put RC in D0 */ -+ val = ioread16(pcie->dbi + PCIE_PM_SCR); -+ val &= PCIE_PM_SCR_PMEPS_D0; -+ iowrite16(val, pcie->dbi + PCIE_PM_SCR); -+ -+ mdelay(10); -+ -+ state = pcie->drvdata->pm->get_link_state(pcie); -+ if (state == LTSSM_PCIE_L0) -+ return 0; -+ -+ if (!pcie->drvdata->pm->reset_slot) -+ return -EINVAL; -+ -+ pcie->drvdata->pm->reset_slot(pcie, &pcie->pm_data); -+ -+ while (i < 100) { -+ state = pcie->drvdata->pm->get_link_state(pcie); -+ if (state == LTSSM_PCIE_L0) -+ return 0; -+ i++; -+ mdelay(1); -+ } -+ -+ return -EINVAL; -+} -+ -+static int ls_pcie_pm_suspend(void) -+{ -+ struct ls_pcie *hose, *tmp; -+ -+ list_for_each_entry_safe(hose, tmp, &hose_list, list_node) -+ ls_pcie_pm_do_suspend(hose); -+ -+ return 0; -+} -+ -+static void ls_pcie_pm_resume(void) -+{ -+ struct ls_pcie *hose, *tmp; -+ -+ list_for_each_entry_safe(hose, tmp, &hose_list, list_node) -+ ls_pcie_pm_do_resume(hose); -+} -+ -+static struct syscore_ops ls_pcie_syscore_pm_ops = { -+ .suspend = ls_pcie_pm_suspend, -+ .resume = ls_pcie_pm_resume, -+}; -+#endif /* CONFIG_PM_SLEEP */ -+ -+static struct platform_driver ls_pcie_driver = { -+ .probe = ls_pcie_probe, -+ .driver = { -+ .name = "layerscape-pcie", -+ .of_match_table = ls_pcie_of_match, -+ }, -+}; -+ -+static int __init fsl_pci_init(void) -+{ -+#ifdef CONFIG_PM_SLEEP -+ register_syscore_ops(&ls_pcie_syscore_pm_ops); -+#endif -+ return platform_driver_register(&ls_pcie_driver); -+} -+module_init(fsl_pci_init); -+ -+MODULE_AUTHOR("Minghuan Lian "); -+MODULE_DESCRIPTION("Freescale Layerscape PCIe host controller driver"); -+MODULE_LICENSE("GPL v2"); -diff --git a/drivers/pci/host/pci-layerscape.h b/drivers/pci/host/pci-layerscape.h -new file mode 100644 -index 0000000..e90e114 ---- /dev/null -+++ b/drivers/pci/host/pci-layerscape.h -@@ -0,0 +1,13 @@ -+/* -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ */ -+ -+#ifndef _PCI_LAYERSCAPE_H -+#define _PCI_LAYERSCAPE_H -+ -+/* function for setting up stream id to device id translation */ -+u32 set_pcie_streamid_translation(struct pci_dev *pdev, u32 devid); -+ -+#endif /* _PCI_LAYERSCAPE_H */ -diff --git a/drivers/pci/host/pci-mvebu.c b/drivers/pci/host/pci-mvebu.c -index b1315e1..94b42d1 100644 ---- a/drivers/pci/host/pci-mvebu.c -+++ b/drivers/pci/host/pci-mvebu.c -@@ -99,11 +99,9 @@ struct mvebu_pcie_port; - struct mvebu_pcie { - struct platform_device *pdev; - struct mvebu_pcie_port *ports; -- struct msi_chip *msi; -+ struct msi_controller *msi; - struct resource io; -- char io_name[30]; - struct resource realio; -- char mem_name[30]; - struct resource mem; - struct resource busn; - int nports; -@@ -722,18 +720,9 @@ static int mvebu_pcie_setup(int nr, struct pci_sys_data *sys) - { - struct mvebu_pcie *pcie = sys_to_pcie(sys); - int i; -- int domain = 0; - --#ifdef CONFIG_PCI_DOMAINS -- domain = sys->domain; --#endif -- -- snprintf(pcie->mem_name, sizeof(pcie->mem_name), "PCI MEM %04x", -- domain); -- pcie->mem.name = pcie->mem_name; -- -- snprintf(pcie->io_name, sizeof(pcie->io_name), "PCI I/O %04x", domain); -- pcie->realio.name = pcie->io_name; -+ pcie->mem.name = "PCI MEM"; -+ pcie->realio.name = "PCI I/O"; - - if (request_resource(&iomem_resource, &pcie->mem)) - return 0; -diff --git a/drivers/pci/host/pci-tegra.c b/drivers/pci/host/pci-tegra.c -index 19bb19c..971d8d7 100644 ---- a/drivers/pci/host/pci-tegra.c -+++ b/drivers/pci/host/pci-tegra.c -@@ -238,7 +238,7 @@ - ) - - struct tegra_msi { -- struct msi_chip chip; -+ struct msi_controller chip; - DECLARE_BITMAP(used, INT_PCI_MSI_NR); - struct irq_domain *domain; - unsigned long pages; -@@ -259,7 +259,7 @@ struct tegra_pcie_soc_data { - bool has_gen2; - }; - --static inline struct tegra_msi *to_tegra_msi(struct msi_chip *chip) -+static inline struct tegra_msi *to_tegra_msi(struct msi_controller *chip) - { - return container_of(chip, struct tegra_msi, chip); - } -@@ -1280,8 +1280,8 @@ static irqreturn_t tegra_pcie_msi_irq(int irq, void *data) - return processed > 0 ? IRQ_HANDLED : IRQ_NONE; - } - --static int tegra_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev, -- struct msi_desc *desc) -+static int tegra_msi_setup_irq(struct msi_controller *chip, -+ struct pci_dev *pdev, struct msi_desc *desc) - { - struct tegra_msi *msi = to_tegra_msi(chip); - struct msi_msg msg; -@@ -1305,12 +1305,13 @@ static int tegra_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev, - msg.address_hi = 0; - msg.data = hwirq; - -- write_msi_msg(irq, &msg); -+ pci_write_msi_msg(irq, &msg); - - return 0; - } - --static void tegra_msi_teardown_irq(struct msi_chip *chip, unsigned int irq) -+static void tegra_msi_teardown_irq(struct msi_controller *chip, -+ unsigned int irq) - { - struct tegra_msi *msi = to_tegra_msi(chip); - struct irq_data *d = irq_get_irq_data(irq); -@@ -1322,10 +1323,10 @@ static void tegra_msi_teardown_irq(struct msi_chip *chip, unsigned int irq) - - static struct irq_chip tegra_msi_irq_chip = { - .name = "Tegra PCIe MSI", -- .irq_enable = unmask_msi_irq, -- .irq_disable = mask_msi_irq, -- .irq_mask = mask_msi_irq, -- .irq_unmask = unmask_msi_irq, -+ .irq_enable = pci_msi_unmask_irq, -+ .irq_disable = pci_msi_mask_irq, -+ .irq_mask = pci_msi_mask_irq, -+ .irq_unmask = pci_msi_unmask_irq, - }; - - static int tegra_msi_map(struct irq_domain *domain, unsigned int irq, -@@ -1333,7 +1334,6 @@ static int tegra_msi_map(struct irq_domain *domain, unsigned int irq, - { - irq_set_chip_and_handler(irq, &tegra_msi_irq_chip, handle_simple_irq); - irq_set_chip_data(irq, domain->host_data); -- set_irq_flags(irq, IRQF_VALID); - - tegra_cpuidle_pcie_irqs_in_use(); - -diff --git a/drivers/pci/host/pci-xgene-msi.c b/drivers/pci/host/pci-xgene-msi.c -new file mode 100644 -index 0000000..8e559d1 ---- /dev/null -+++ b/drivers/pci/host/pci-xgene-msi.c -@@ -0,0 +1,595 @@ -+/* -+ * APM X-Gene MSI Driver -+ * -+ * Copyright (c) 2014, Applied Micro Circuits Corporation -+ * Author: Tanmay Inamdar -+ * Duc Dang -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License as published by the -+ * Free Software Foundation; either version 2 of the License, or (at your -+ * option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ */ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define MSI_IR0 0x000000 -+#define MSI_INT0 0x800000 -+#define IDX_PER_GROUP 8 -+#define IRQS_PER_IDX 16 -+#define NR_HW_IRQS 16 -+#define NR_MSI_VEC (IDX_PER_GROUP * IRQS_PER_IDX * NR_HW_IRQS) -+ -+struct xgene_msi_group { -+ struct xgene_msi *msi; -+ int gic_irq; -+ u32 msi_grp; -+}; -+ -+struct xgene_msi { -+ struct device_node *node; -+ struct msi_controller mchip; -+ struct irq_domain *domain; -+ u64 msi_addr; -+ void __iomem *msi_regs; -+ unsigned long *bitmap; -+ struct mutex bitmap_lock; -+ struct xgene_msi_group *msi_groups; -+ int num_cpus; -+}; -+ -+/* Global data */ -+static struct xgene_msi xgene_msi_ctrl; -+ -+static struct irq_chip xgene_msi_top_irq_chip = { -+ .name = "X-Gene1 MSI", -+ .irq_enable = pci_msi_unmask_irq, -+ .irq_disable = pci_msi_mask_irq, -+ .irq_mask = pci_msi_mask_irq, -+ .irq_unmask = pci_msi_unmask_irq, -+}; -+ -+static struct msi_domain_info xgene_msi_domain_info = { -+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | -+ MSI_FLAG_PCI_MSIX), -+ .chip = &xgene_msi_top_irq_chip, -+}; -+ -+/* -+ * X-Gene v1 has 16 groups of MSI termination registers MSInIRx, where -+ * n is group number (0..F), x is index of registers in each group (0..7) -+ * The register layout is as follows: -+ * MSI0IR0 base_addr -+ * MSI0IR1 base_addr + 0x10000 -+ * ... ... -+ * MSI0IR6 base_addr + 0x60000 -+ * MSI0IR7 base_addr + 0x70000 -+ * MSI1IR0 base_addr + 0x80000 -+ * MSI1IR1 base_addr + 0x90000 -+ * ... ... -+ * MSI1IR7 base_addr + 0xF0000 -+ * MSI2IR0 base_addr + 0x100000 -+ * ... ... -+ * MSIFIR0 base_addr + 0x780000 -+ * MSIFIR1 base_addr + 0x790000 -+ * ... ... -+ * MSIFIR7 base_addr + 0x7F0000 -+ * MSIINT0 base_addr + 0x800000 -+ * MSIINT1 base_addr + 0x810000 -+ * ... ... -+ * MSIINTF base_addr + 0x8F0000 -+ * -+ * Each index register supports 16 MSI vectors (0..15) to generate interrupt. -+ * There are total 16 GIC IRQs assigned for these 16 groups of MSI termination -+ * registers. -+ * -+ * Each MSI termination group has 1 MSIINTn register (n is 0..15) to indicate -+ * the MSI pending status caused by 1 of its 8 index registers. -+ */ -+ -+/* MSInIRx read helper */ -+static u32 xgene_msi_ir_read(struct xgene_msi *msi, -+ u32 msi_grp, u32 msir_idx) -+{ -+ return readl_relaxed(msi->msi_regs + MSI_IR0 + -+ (msi_grp << 19) + (msir_idx << 16)); -+} -+ -+/* MSIINTn read helper */ -+static u32 xgene_msi_int_read(struct xgene_msi *msi, u32 msi_grp) -+{ -+ return readl_relaxed(msi->msi_regs + MSI_INT0 + (msi_grp << 16)); -+} -+ -+/* -+ * With 2048 MSI vectors supported, the MSI message can be constructed using -+ * following scheme: -+ * - Divide into 8 256-vector groups -+ * Group 0: 0-255 -+ * Group 1: 256-511 -+ * Group 2: 512-767 -+ * ... -+ * Group 7: 1792-2047 -+ * - Each 256-vector group is divided into 16 16-vector groups -+ * As an example: 16 16-vector groups for 256-vector group 0-255 is -+ * Group 0: 0-15 -+ * Group 1: 16-32 -+ * ... -+ * Group 15: 240-255 -+ * - The termination address of MSI vector in 256-vector group n and 16-vector -+ * group x is the address of MSIxIRn -+ * - The data for MSI vector in 16-vector group x is x -+ */ -+static u32 hwirq_to_reg_set(unsigned long hwirq) -+{ -+ return (hwirq / (NR_HW_IRQS * IRQS_PER_IDX)); -+} -+ -+static u32 hwirq_to_group(unsigned long hwirq) -+{ -+ return (hwirq % NR_HW_IRQS); -+} -+ -+static u32 hwirq_to_msi_data(unsigned long hwirq) -+{ -+ return ((hwirq / NR_HW_IRQS) % IRQS_PER_IDX); -+} -+ -+static void xgene_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) -+{ -+ struct xgene_msi *msi = irq_data_get_irq_chip_data(data); -+ u32 reg_set = hwirq_to_reg_set(data->hwirq); -+ u32 group = hwirq_to_group(data->hwirq); -+ u64 target_addr = msi->msi_addr + (((8 * group) + reg_set) << 16); -+ -+ msg->address_hi = upper_32_bits(target_addr); -+ msg->address_lo = lower_32_bits(target_addr); -+ msg->data = hwirq_to_msi_data(data->hwirq); -+} -+ -+/* -+ * X-Gene v1 only has 16 MSI GIC IRQs for 2048 MSI vectors. To maintain -+ * the expected behaviour of .set_affinity for each MSI interrupt, the 16 -+ * MSI GIC IRQs are statically allocated to 8 X-Gene v1 cores (2 GIC IRQs -+ * for each core). The MSI vector is moved fom 1 MSI GIC IRQ to another -+ * MSI GIC IRQ to steer its MSI interrupt to correct X-Gene v1 core. As a -+ * consequence, the total MSI vectors that X-Gene v1 supports will be -+ * reduced to 256 (2048/8) vectors. -+ */ -+static int hwirq_to_cpu(unsigned long hwirq) -+{ -+ return (hwirq % xgene_msi_ctrl.num_cpus); -+} -+ -+static unsigned long hwirq_to_canonical_hwirq(unsigned long hwirq) -+{ -+ return (hwirq - hwirq_to_cpu(hwirq)); -+} -+ -+static int xgene_msi_set_affinity(struct irq_data *irqdata, -+ const struct cpumask *mask, bool force) -+{ -+ int target_cpu = cpumask_first(mask); -+ int curr_cpu; -+ -+ curr_cpu = hwirq_to_cpu(irqdata->hwirq); -+ if (curr_cpu == target_cpu) -+ return IRQ_SET_MASK_OK_DONE; -+ -+ /* Update MSI number to target the new CPU */ -+ irqdata->hwirq = hwirq_to_canonical_hwirq(irqdata->hwirq) + target_cpu; -+ -+ return IRQ_SET_MASK_OK; -+} -+ -+static struct irq_chip xgene_msi_bottom_irq_chip = { -+ .name = "MSI", -+ .irq_set_affinity = xgene_msi_set_affinity, -+ .irq_compose_msi_msg = xgene_compose_msi_msg, -+}; -+ -+static int xgene_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, -+ unsigned int nr_irqs, void *args) -+{ -+ struct xgene_msi *msi = domain->host_data; -+ int msi_irq; -+ -+ mutex_lock(&msi->bitmap_lock); -+ -+ msi_irq = bitmap_find_next_zero_area(msi->bitmap, NR_MSI_VEC, 0, -+ msi->num_cpus, 0); -+ if (msi_irq < NR_MSI_VEC) -+ bitmap_set(msi->bitmap, msi_irq, msi->num_cpus); -+ else -+ msi_irq = -ENOSPC; -+ -+ mutex_unlock(&msi->bitmap_lock); -+ -+ if (msi_irq < 0) -+ return msi_irq; -+ -+ irq_domain_set_info(domain, virq, msi_irq, -+ &xgene_msi_bottom_irq_chip, domain->host_data, -+ handle_simple_irq, NULL, NULL); -+ -+ return 0; -+} -+ -+static void xgene_irq_domain_free(struct irq_domain *domain, -+ unsigned int virq, unsigned int nr_irqs) -+{ -+ struct irq_data *d = irq_domain_get_irq_data(domain, virq); -+ struct xgene_msi *msi = irq_data_get_irq_chip_data(d); -+ u32 hwirq; -+ -+ mutex_lock(&msi->bitmap_lock); -+ -+ hwirq = hwirq_to_canonical_hwirq(d->hwirq); -+ bitmap_clear(msi->bitmap, hwirq, msi->num_cpus); -+ -+ mutex_unlock(&msi->bitmap_lock); -+ -+ irq_domain_free_irqs_parent(domain, virq, nr_irqs); -+} -+ -+static const struct irq_domain_ops msi_domain_ops = { -+ .alloc = xgene_irq_domain_alloc, -+ .free = xgene_irq_domain_free, -+}; -+ -+static int xgene_allocate_domains(struct xgene_msi *msi) -+{ -+ msi->domain = irq_domain_add_linear(NULL, NR_MSI_VEC, -+ &msi_domain_ops, msi); -+ if (!msi->domain) -+ return -ENOMEM; -+ -+ msi->mchip.domain = pci_msi_create_irq_domain(msi->mchip.of_node, -+ &xgene_msi_domain_info, -+ msi->domain); -+ -+ if (!msi->mchip.domain) { -+ irq_domain_remove(msi->domain); -+ return -ENOMEM; -+ } -+ -+ return 0; -+} -+ -+static void xgene_free_domains(struct xgene_msi *msi) -+{ -+ if (msi->mchip.domain) -+ irq_domain_remove(msi->mchip.domain); -+ if (msi->domain) -+ irq_domain_remove(msi->domain); -+} -+ -+static int xgene_msi_init_allocator(struct xgene_msi *xgene_msi) -+{ -+ int size = BITS_TO_LONGS(NR_MSI_VEC) * sizeof(long); -+ -+ xgene_msi->bitmap = kzalloc(size, GFP_KERNEL); -+ if (!xgene_msi->bitmap) -+ return -ENOMEM; -+ -+ mutex_init(&xgene_msi->bitmap_lock); -+ -+ xgene_msi->msi_groups = kcalloc(NR_HW_IRQS, -+ sizeof(struct xgene_msi_group), -+ GFP_KERNEL); -+ if (!xgene_msi->msi_groups) -+ return -ENOMEM; -+ -+ return 0; -+} -+ -+static void xgene_msi_isr(unsigned int irq, struct irq_desc *desc) -+{ -+ struct irq_chip *chip = irq_desc_get_chip(desc); -+ struct xgene_msi_group *msi_groups; -+ struct xgene_msi *xgene_msi; -+ unsigned int virq; -+ int msir_index, msir_val, hw_irq; -+ u32 intr_index, grp_select, msi_grp; -+ -+ chained_irq_enter(chip, desc); -+ -+ msi_groups = irq_desc_get_handler_data(desc); -+ xgene_msi = msi_groups->msi; -+ msi_grp = msi_groups->msi_grp; -+ -+ /* -+ * MSIINTn (n is 0..F) indicates if there is a pending MSI interrupt -+ * If bit x of this register is set (x is 0..7), one or more interupts -+ * corresponding to MSInIRx is set. -+ */ -+ grp_select = xgene_msi_int_read(xgene_msi, msi_grp); -+ while (grp_select) { -+ msir_index = ffs(grp_select) - 1; -+ /* -+ * Calculate MSInIRx address to read to check for interrupts -+ * (refer to termination address and data assignment -+ * described in xgene_compose_msi_msg() ) -+ */ -+ msir_val = xgene_msi_ir_read(xgene_msi, msi_grp, msir_index); -+ while (msir_val) { -+ intr_index = ffs(msir_val) - 1; -+ /* -+ * Calculate MSI vector number (refer to the termination -+ * address and data assignment described in -+ * xgene_compose_msi_msg function) -+ */ -+ hw_irq = (((msir_index * IRQS_PER_IDX) + intr_index) * -+ NR_HW_IRQS) + msi_grp; -+ /* -+ * As we have multiple hw_irq that maps to single MSI, -+ * always look up the virq using the hw_irq as seen from -+ * CPU0 -+ */ -+ hw_irq = hwirq_to_canonical_hwirq(hw_irq); -+ virq = irq_find_mapping(xgene_msi->domain, hw_irq); -+ WARN_ON(!virq); -+ if (virq != 0) -+ generic_handle_irq(virq); -+ msir_val &= ~(1 << intr_index); -+ } -+ grp_select &= ~(1 << msir_index); -+ -+ if (!grp_select) { -+ /* -+ * We handled all interrupts happened in this group, -+ * resample this group MSI_INTx register in case -+ * something else has been made pending in the meantime -+ */ -+ grp_select = xgene_msi_int_read(xgene_msi, msi_grp); -+ } -+ } -+ -+ chained_irq_exit(chip, desc); -+} -+ -+static int xgene_msi_remove(struct platform_device *pdev) -+{ -+ int virq, i; -+ struct xgene_msi *msi = platform_get_drvdata(pdev); -+ -+ for (i = 0; i < NR_HW_IRQS; i++) { -+ virq = msi->msi_groups[i].gic_irq; -+ if (virq != 0) { -+ irq_set_chained_handler(virq, NULL); -+ irq_set_handler_data(virq, NULL); -+ } -+ } -+ kfree(msi->msi_groups); -+ -+ kfree(msi->bitmap); -+ msi->bitmap = NULL; -+ -+ xgene_free_domains(msi); -+ -+ return 0; -+} -+ -+static int xgene_msi_hwirq_alloc(unsigned int cpu) -+{ -+ struct xgene_msi *msi = &xgene_msi_ctrl; -+ struct xgene_msi_group *msi_group; -+ cpumask_var_t mask; -+ int i; -+ int err; -+ -+ for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) { -+ msi_group = &msi->msi_groups[i]; -+ if (!msi_group->gic_irq) -+ continue; -+ -+ irq_set_chained_handler(msi_group->gic_irq, -+ xgene_msi_isr); -+ err = irq_set_handler_data(msi_group->gic_irq, msi_group); -+ if (err) { -+ pr_err("failed to register GIC IRQ handler\n"); -+ return -EINVAL; -+ } -+ /* -+ * Statically allocate MSI GIC IRQs to each CPU core. -+ * With 8-core X-Gene v1, 2 MSI GIC IRQs are allocated -+ * to each core. -+ */ -+ if (alloc_cpumask_var(&mask, GFP_KERNEL)) { -+ cpumask_clear(mask); -+ cpumask_set_cpu(cpu, mask); -+ err = irq_set_affinity(msi_group->gic_irq, mask); -+ if (err) -+ pr_err("failed to set affinity for GIC IRQ"); -+ free_cpumask_var(mask); -+ } else { -+ pr_err("failed to alloc CPU mask for affinity\n"); -+ err = -EINVAL; -+ } -+ -+ if (err) { -+ irq_set_chained_handler(msi_group->gic_irq, NULL); -+ irq_set_handler_data(msi_group->gic_irq, NULL); -+ return err; -+ } -+ } -+ -+ return 0; -+} -+ -+static void xgene_msi_hwirq_free(unsigned int cpu) -+{ -+ struct xgene_msi *msi = &xgene_msi_ctrl; -+ struct xgene_msi_group *msi_group; -+ int i; -+ -+ for (i = cpu; i < NR_HW_IRQS; i += msi->num_cpus) { -+ msi_group = &msi->msi_groups[i]; -+ if (!msi_group->gic_irq) -+ continue; -+ -+ irq_set_chained_handler(msi_group->gic_irq, NULL); -+ irq_set_handler_data(msi_group->gic_irq, NULL); -+ } -+} -+ -+static int xgene_msi_cpu_callback(struct notifier_block *nfb, -+ unsigned long action, void *hcpu) -+{ -+ unsigned cpu = (unsigned long)hcpu; -+ -+ switch (action) { -+ case CPU_ONLINE: -+ case CPU_ONLINE_FROZEN: -+ xgene_msi_hwirq_alloc(cpu); -+ break; -+ case CPU_DEAD: -+ case CPU_DEAD_FROZEN: -+ xgene_msi_hwirq_free(cpu); -+ break; -+ default: -+ break; -+ } -+ -+ return NOTIFY_OK; -+} -+ -+static struct notifier_block xgene_msi_cpu_notifier = { -+ .notifier_call = xgene_msi_cpu_callback, -+}; -+ -+static const struct of_device_id xgene_msi_match_table[] = { -+ {.compatible = "apm,xgene1-msi"}, -+ {}, -+}; -+ -+static int xgene_msi_probe(struct platform_device *pdev) -+{ -+ struct resource *res; -+ int rc, irq_index; -+ struct xgene_msi *xgene_msi; -+ unsigned int cpu; -+ int virt_msir; -+ u32 msi_val, msi_idx; -+ -+ xgene_msi = &xgene_msi_ctrl; -+ -+ platform_set_drvdata(pdev, xgene_msi); -+ -+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -+ xgene_msi->msi_regs = devm_ioremap_resource(&pdev->dev, res); -+ if (IS_ERR(xgene_msi->msi_regs)) { -+ dev_err(&pdev->dev, "no reg space\n"); -+ rc = -EINVAL; -+ goto error; -+ } -+ xgene_msi->msi_addr = res->start; -+ -+ xgene_msi->num_cpus = num_possible_cpus(); -+ -+ rc = xgene_msi_init_allocator(xgene_msi); -+ if (rc) { -+ dev_err(&pdev->dev, "Error allocating MSI bitmap\n"); -+ goto error; -+ } -+ -+ rc = xgene_allocate_domains(xgene_msi); -+ if (rc) { -+ dev_err(&pdev->dev, "Failed to allocate MSI domain\n"); -+ goto error; -+ } -+ -+ for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) { -+ virt_msir = platform_get_irq(pdev, irq_index); -+ if (virt_msir < 0) { -+ dev_err(&pdev->dev, "Cannot translate IRQ index %d\n", -+ irq_index); -+ rc = -EINVAL; -+ goto error; -+ } -+ xgene_msi->msi_groups[irq_index].gic_irq = virt_msir; -+ xgene_msi->msi_groups[irq_index].msi_grp = irq_index; -+ xgene_msi->msi_groups[irq_index].msi = xgene_msi; -+ } -+ -+ /* -+ * MSInIRx registers are read-to-clear; before registering -+ * interrupt handlers, read all of them to clear spurious -+ * interrupts that may occur before the driver is probed. -+ */ -+ for (irq_index = 0; irq_index < NR_HW_IRQS; irq_index++) { -+ for (msi_idx = 0; msi_idx < IDX_PER_GROUP; msi_idx++) -+ msi_val = xgene_msi_ir_read(xgene_msi, irq_index, -+ msi_idx); -+ /* Read MSIINTn to confirm */ -+ msi_val = xgene_msi_int_read(xgene_msi, irq_index); -+ if (msi_val) { -+ dev_err(&pdev->dev, "Failed to clear spurious IRQ\n"); -+ rc = -EINVAL; -+ goto error; -+ } -+ } -+ -+ cpu_notifier_register_begin(); -+ -+ for_each_online_cpu(cpu) -+ if (xgene_msi_hwirq_alloc(cpu)) { -+ dev_err(&pdev->dev, "failed to register MSI handlers\n"); -+ cpu_notifier_register_done(); -+ goto error; -+ } -+ -+ rc = __register_hotcpu_notifier(&xgene_msi_cpu_notifier); -+ if (rc) { -+ dev_err(&pdev->dev, "failed to add CPU MSI notifier\n"); -+ cpu_notifier_register_done(); -+ goto error; -+ } -+ -+ cpu_notifier_register_done(); -+ -+ xgene_msi->mchip.of_node = pdev->dev.of_node; -+ rc = of_pci_msi_chip_add(&xgene_msi->mchip); -+ if (rc) { -+ dev_err(&pdev->dev, "failed to add MSI controller chip\n"); -+ goto error_notifier; -+ } -+ -+ dev_info(&pdev->dev, "APM X-Gene PCIe MSI driver loaded\n"); -+ -+ return 0; -+ -+error_notifier: -+ unregister_hotcpu_notifier(&xgene_msi_cpu_notifier); -+error: -+ xgene_msi_remove(pdev); -+ return rc; -+} -+ -+static struct platform_driver xgene_msi_driver = { -+ .driver = { -+ .name = "xgene-msi", -+ .owner = THIS_MODULE, -+ .of_match_table = xgene_msi_match_table, -+ }, -+ .probe = xgene_msi_probe, -+ .remove = xgene_msi_remove, -+}; -+ -+static int __init xgene_pcie_msi_init(void) -+{ -+ return platform_driver_register(&xgene_msi_driver); -+} -+subsys_initcall(xgene_pcie_msi_init); -diff --git a/drivers/pci/host/pci-xgene.c b/drivers/pci/host/pci-xgene.c -index 2988fe1..0dac1fb 100644 ---- a/drivers/pci/host/pci-xgene.c -+++ b/drivers/pci/host/pci-xgene.c -@@ -401,11 +401,11 @@ static int xgene_pcie_map_ranges(struct xgene_pcie_port *port, - struct list_head *res, - resource_size_t io_base) - { -- struct pci_host_bridge_window *window; -+ struct resource_entry *window; - struct device *dev = port->dev; - int ret; - -- list_for_each_entry(window, res, list) { -+ resource_list_for_each_entry(window, res) { - struct resource *res = window->res; - u64 restype = resource_type(res); - -@@ -600,6 +600,23 @@ static int xgene_pcie_setup(struct xgene_pcie_port *port, - return 0; - } - -+static int xgene_pcie_msi_enable(struct pci_bus *bus) -+{ -+ struct device_node *msi_node; -+ -+ msi_node = of_parse_phandle(bus->dev.of_node, -+ "msi-parent", 0); -+ if (!msi_node) -+ return -ENODEV; -+ -+ bus->msi = of_pci_find_msi_chip_by_node(msi_node); -+ if (!bus->msi) -+ return -ENODEV; -+ -+ bus->msi->dev = &bus->dev; -+ return 0; -+} -+ - static int xgene_pcie_probe_bridge(struct platform_device *pdev) - { - struct device_node *dn = pdev->dev.of_node; -@@ -636,6 +653,10 @@ static int xgene_pcie_probe_bridge(struct platform_device *pdev) - if (!bus) - return -ENOMEM; - -+ if (IS_ENABLED(CONFIG_PCI_MSI)) -+ if (xgene_pcie_msi_enable(bus)) -+ dev_info(port->dev, "failed to enable MSI\n"); -+ - pci_scan_child_bus(bus); - pci_assign_unassigned_bus_resources(bus); - pci_bus_add_devices(bus); -diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c -index f69b0d0..8a9241b 100644 ---- a/drivers/pci/host/pcie-designware.c -+++ b/drivers/pci/host/pcie-designware.c -@@ -15,7 +15,6 @@ - #include - #include - #include --#include - #include - #include - #include -@@ -31,6 +30,7 @@ - #define PORT_LINK_MODE_1_LANES (0x1 << 16) - #define PORT_LINK_MODE_2_LANES (0x3 << 16) - #define PORT_LINK_MODE_4_LANES (0x7 << 16) -+#define PORT_LINK_MODE_8_LANES (0xf << 16) - - #define PCIE_LINK_WIDTH_SPEED_CONTROL 0x80C - #define PORT_LOGIC_SPEED_CHANGE (0x1 << 17) -@@ -38,12 +38,7 @@ - #define PORT_LOGIC_LINK_WIDTH_1_LANES (0x1 << 8) - #define PORT_LOGIC_LINK_WIDTH_2_LANES (0x2 << 8) - #define PORT_LOGIC_LINK_WIDTH_4_LANES (0x4 << 8) -- --#define PCIE_MSI_ADDR_LO 0x820 --#define PCIE_MSI_ADDR_HI 0x824 --#define PCIE_MSI_INTR0_ENABLE 0x828 --#define PCIE_MSI_INTR0_MASK 0x82C --#define PCIE_MSI_INTR0_STATUS 0x830 -+#define PORT_LOGIC_LINK_WIDTH_8_LANES (0x8 << 8) - - #define PCIE_ATU_VIEWPORT 0x900 - #define PCIE_ATU_REGION_INBOUND (0x1 << 31) -@@ -67,39 +62,40 @@ - #define PCIE_ATU_FUNC(x) (((x) & 0x7) << 16) - #define PCIE_ATU_UPPER_TARGET 0x91C - --static struct hw_pci dw_pci; -- --static unsigned long global_io_offset; -- --static inline struct pcie_port *sys_to_pcie(struct pci_sys_data *sys) --{ -- BUG_ON(!sys->private_data); -- -- return sys->private_data; --} -+static struct pci_ops dw_pcie_ops; - --int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val) -+int dw_pcie_cfg_read(void __iomem *addr, int size, u32 *val) - { -- *val = readl(addr); -+ if ((uintptr_t)addr & (size - 1)) { -+ *val = 0; -+ return PCIBIOS_BAD_REGISTER_NUMBER; -+ } - -- if (size == 1) -- *val = (*val >> (8 * (where & 3))) & 0xff; -+ if (size == 4) -+ *val = readl(addr); - else if (size == 2) -- *val = (*val >> (8 * (where & 3))) & 0xffff; -- else if (size != 4) -+ *val = readw(addr); -+ else if (size == 1) -+ *val = readb(addr); -+ else { -+ *val = 0; - return PCIBIOS_BAD_REGISTER_NUMBER; -+ } - - return PCIBIOS_SUCCESSFUL; - } - --int dw_pcie_cfg_write(void __iomem *addr, int where, int size, u32 val) -+int dw_pcie_cfg_write(void __iomem *addr, int size, u32 val) - { -+ if ((uintptr_t)addr & (size - 1)) -+ return PCIBIOS_BAD_REGISTER_NUMBER; -+ - if (size == 4) - writel(val, addr); - else if (size == 2) -- writew(val, addr + (where & 2)); -+ writew(val, addr); - else if (size == 1) -- writeb(val, addr + (where & 3)); -+ writeb(val, addr); - else - return PCIBIOS_BAD_REGISTER_NUMBER; - -@@ -130,8 +126,7 @@ static int dw_pcie_rd_own_conf(struct pcie_port *pp, int where, int size, - if (pp->ops->rd_own_conf) - ret = pp->ops->rd_own_conf(pp, where, size, val); - else -- ret = dw_pcie_cfg_read(pp->dbi_base + (where & ~0x3), where, -- size, val); -+ ret = dw_pcie_cfg_read(pp->dbi_base + where, size, val); - - return ret; - } -@@ -144,182 +139,26 @@ static int dw_pcie_wr_own_conf(struct pcie_port *pp, int where, int size, - if (pp->ops->wr_own_conf) - ret = pp->ops->wr_own_conf(pp, where, size, val); - else -- ret = dw_pcie_cfg_write(pp->dbi_base + (where & ~0x3), where, -- size, val); -+ ret = dw_pcie_cfg_write(pp->dbi_base + where, size, val); - - return ret; - } - --static struct irq_chip dw_msi_irq_chip = { -- .name = "PCI-MSI", -- .irq_enable = unmask_msi_irq, -- .irq_disable = mask_msi_irq, -- .irq_mask = mask_msi_irq, -- .irq_unmask = unmask_msi_irq, --}; -- --/* MSI int handler */ --irqreturn_t dw_handle_msi_irq(struct pcie_port *pp) -+static void dw_pcie_prog_outbound_atu(struct pcie_port *pp, int index, -+ int type, u64 cpu_addr, u64 pci_addr, u32 size) - { -- unsigned long val; -- int i, pos, irq; -- irqreturn_t ret = IRQ_NONE; -- -- for (i = 0; i < MAX_MSI_CTRLS; i++) { -- dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_STATUS + i * 12, 4, -- (u32 *)&val); -- if (val) { -- ret = IRQ_HANDLED; -- pos = 0; -- while ((pos = find_next_bit(&val, 32, pos)) != 32) { -- irq = irq_find_mapping(pp->irq_domain, -- i * 32 + pos); -- dw_pcie_wr_own_conf(pp, -- PCIE_MSI_INTR0_STATUS + i * 12, -- 4, 1 << pos); -- generic_handle_irq(irq); -- pos++; -- } -- } -- } -- -- return ret; --} -- --void dw_pcie_msi_init(struct pcie_port *pp) --{ -- pp->msi_data = __get_free_pages(GFP_KERNEL, 0); -- -- /* program the msi_data */ -- dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_LO, 4, -- virt_to_phys((void *)pp->msi_data)); -- dw_pcie_wr_own_conf(pp, PCIE_MSI_ADDR_HI, 4, 0); --} -- --static void dw_pcie_msi_clear_irq(struct pcie_port *pp, int irq) --{ -- unsigned int res, bit, val; -- -- res = (irq / 32) * 12; -- bit = irq % 32; -- dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val); -- val &= ~(1 << bit); -- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val); --} -- --static void clear_irq_range(struct pcie_port *pp, unsigned int irq_base, -- unsigned int nvec, unsigned int pos) --{ -- unsigned int i; -- -- for (i = 0; i < nvec; i++) { -- irq_set_msi_desc_off(irq_base, i, NULL); -- /* Disable corresponding interrupt on MSI controller */ -- if (pp->ops->msi_clear_irq) -- pp->ops->msi_clear_irq(pp, pos + i); -- else -- dw_pcie_msi_clear_irq(pp, pos + i); -- } -- -- bitmap_release_region(pp->msi_irq_in_use, pos, order_base_2(nvec)); --} -- --static void dw_pcie_msi_set_irq(struct pcie_port *pp, int irq) --{ -- unsigned int res, bit, val; -- -- res = (irq / 32) * 12; -- bit = irq % 32; -- dw_pcie_rd_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, &val); -- val |= 1 << bit; -- dw_pcie_wr_own_conf(pp, PCIE_MSI_INTR0_ENABLE + res, 4, val); --} -- --static int assign_irq(int no_irqs, struct msi_desc *desc, int *pos) --{ -- int irq, pos0, i; -- struct pcie_port *pp = sys_to_pcie(desc->dev->bus->sysdata); -- -- pos0 = bitmap_find_free_region(pp->msi_irq_in_use, MAX_MSI_IRQS, -- order_base_2(no_irqs)); -- if (pos0 < 0) -- goto no_valid_irq; -- -- irq = irq_find_mapping(pp->irq_domain, pos0); -- if (!irq) -- goto no_valid_irq; -- -- /* -- * irq_create_mapping (called from dw_pcie_host_init) pre-allocates -- * descs so there is no need to allocate descs here. We can therefore -- * assume that if irq_find_mapping above returns non-zero, then the -- * descs are also successfully allocated. -- */ -- -- for (i = 0; i < no_irqs; i++) { -- if (irq_set_msi_desc_off(irq, i, desc) != 0) { -- clear_irq_range(pp, irq, i, pos0); -- goto no_valid_irq; -- } -- /*Enable corresponding interrupt in MSI interrupt controller */ -- if (pp->ops->msi_set_irq) -- pp->ops->msi_set_irq(pp, pos0 + i); -- else -- dw_pcie_msi_set_irq(pp, pos0 + i); -- } -- -- *pos = pos0; -- return irq; -- --no_valid_irq: -- *pos = pos0; -- return -ENOSPC; --} -- --static int dw_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev, -- struct msi_desc *desc) --{ -- int irq, pos; -- struct msi_msg msg; -- struct pcie_port *pp = sys_to_pcie(pdev->bus->sysdata); -- -- if (desc->msi_attrib.is_msix) -- return -EINVAL; -- -- irq = assign_irq(1, desc, &pos); -- if (irq < 0) -- return irq; -- -- if (pp->ops->get_msi_addr) -- msg.address_lo = pp->ops->get_msi_addr(pp); -- else -- msg.address_lo = virt_to_phys((void *)pp->msi_data); -- msg.address_hi = 0x0; -- -- if (pp->ops->get_msi_data) -- msg.data = pp->ops->get_msi_data(pp, pos); -- else -- msg.data = pos; -- -- write_msi_msg(irq, &msg); -- -- return 0; --} -- --static void dw_msi_teardown_irq(struct msi_chip *chip, unsigned int irq) --{ -- struct irq_data *data = irq_get_irq_data(irq); -- struct msi_desc *msi = irq_data_get_msi(data); -- struct pcie_port *pp = sys_to_pcie(msi->dev->bus->sysdata); -- -- clear_irq_range(pp, irq, 1, data->hwirq); -+ dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | index, -+ PCIE_ATU_VIEWPORT); -+ dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr), PCIE_ATU_LOWER_BASE); -+ dw_pcie_writel_rc(pp, upper_32_bits(cpu_addr), PCIE_ATU_UPPER_BASE); -+ dw_pcie_writel_rc(pp, lower_32_bits(cpu_addr + size - 1), -+ PCIE_ATU_LIMIT); -+ dw_pcie_writel_rc(pp, lower_32_bits(pci_addr), PCIE_ATU_LOWER_TARGET); -+ dw_pcie_writel_rc(pp, upper_32_bits(pci_addr), PCIE_ATU_UPPER_TARGET); -+ dw_pcie_writel_rc(pp, type, PCIE_ATU_CR1); -+ dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); - } - --static struct msi_chip dw_pcie_msi_chip = { -- .setup_irq = dw_msi_setup_irq, -- .teardown_irq = dw_msi_teardown_irq, --}; -- - int dw_pcie_link_up(struct pcie_port *pp) - { - if (pp->ops->link_up) -@@ -328,36 +167,42 @@ int dw_pcie_link_up(struct pcie_port *pp) - return 0; - } - --static int dw_pcie_msi_map(struct irq_domain *domain, unsigned int irq, -- irq_hw_number_t hwirq) -+static int dw_pcie_msi_ctrl_init(struct pcie_port *pp) - { -- irq_set_chip_and_handler(irq, &dw_msi_irq_chip, handle_simple_irq); -- irq_set_chip_data(irq, domain->host_data); -- set_irq_flags(irq, IRQF_VALID); -+ struct device_node *msi_node; -+ -+ if (!IS_ENABLED(CONFIG_PCI_MSI)) { -+ pp->msi = NULL; -+ return 0; -+ } -+ -+ if (pp->msi) -+ return 0; -+ -+ msi_node = of_parse_phandle(pp->dev->of_node, "msi-parent", 0); -+ if (msi_node) { -+ pp->msi = of_pci_find_msi_chip_by_node(msi_node); -+ if (!pp->msi) { -+ dev_err(pp->dev, "Cannot find msi chip of %s\n", -+ msi_node->full_name); -+ return -ENODEV; -+ } else -+ return 0; -+ } - - return 0; - } - --static const struct irq_domain_ops msi_domain_ops = { -- .map = dw_pcie_msi_map, --}; -- - int dw_pcie_host_init(struct pcie_port *pp) - { - struct device_node *np = pp->dev->of_node; - struct platform_device *pdev = to_platform_device(pp->dev); -- struct of_pci_range range; -- struct of_pci_range_parser parser; -+ struct pci_bus *bus, *child; - struct resource *cfg_res; -- u32 val, na, ns; -- const __be32 *addrp; -- int i, index, ret; -- -- /* Find the address cell size and the number of cells in order to get -- * the untranslated address. -- */ -- of_property_read_u32(np, "#address-cells", &na); -- ns = of_n_size_cells(np); -+ u32 val; -+ int ret; -+ LIST_HEAD(res); -+ struct resource_entry *win; - - cfg_res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "config"); - if (cfg_res) { -@@ -365,87 +210,61 @@ int dw_pcie_host_init(struct pcie_port *pp) - pp->cfg1_size = resource_size(cfg_res)/2; - pp->cfg0_base = cfg_res->start; - pp->cfg1_base = cfg_res->start + pp->cfg0_size; -- -- /* Find the untranslated configuration space address */ -- index = of_property_match_string(np, "reg-names", "config"); -- addrp = of_get_address(np, index, NULL, NULL); -- pp->cfg0_mod_base = of_read_number(addrp, ns); -- pp->cfg1_mod_base = pp->cfg0_mod_base + pp->cfg0_size; -- } else { -+ } else if (!pp->va_cfg0_base) { - dev_err(pp->dev, "missing *config* reg space\n"); - } - -- if (of_pci_range_parser_init(&parser, np)) { -- dev_err(pp->dev, "missing ranges property\n"); -- return -EINVAL; -- } -+ ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &pp->io_base); -+ if (ret) -+ return ret; - - /* Get the I/O and memory ranges from DT */ -- for_each_of_pci_range(&parser, &range) { -- unsigned long restype = range.flags & IORESOURCE_TYPE_BITS; -- if (restype == IORESOURCE_IO) { -- of_pci_range_to_resource(&range, np, &pp->io); -- pp->io.name = "I/O"; -- pp->io.start = max_t(resource_size_t, -- PCIBIOS_MIN_IO, -- range.pci_addr + global_io_offset); -- pp->io.end = min_t(resource_size_t, -- IO_SPACE_LIMIT, -- range.pci_addr + range.size -- + global_io_offset - 1); -- pp->io_size = resource_size(&pp->io); -- pp->io_bus_addr = range.pci_addr; -- pp->io_base = range.cpu_addr; -- -- /* Find the untranslated IO space address */ -- pp->io_mod_base = of_read_number(parser.range - -- parser.np + na, ns); -- } -- if (restype == IORESOURCE_MEM) { -- of_pci_range_to_resource(&range, np, &pp->mem); -- pp->mem.name = "MEM"; -- pp->mem_size = resource_size(&pp->mem); -- pp->mem_bus_addr = range.pci_addr; -- -- /* Find the untranslated MEM space address */ -- pp->mem_mod_base = of_read_number(parser.range - -- parser.np + na, ns); -- } -- if (restype == 0) { -- of_pci_range_to_resource(&range, np, &pp->cfg); -- pp->cfg0_size = resource_size(&pp->cfg)/2; -- pp->cfg1_size = resource_size(&pp->cfg)/2; -- pp->cfg0_base = pp->cfg.start; -- pp->cfg1_base = pp->cfg.start + pp->cfg0_size; -- -- /* Find the untranslated configuration space address */ -- pp->cfg0_mod_base = of_read_number(parser.range - -- parser.np + na, ns); -- pp->cfg1_mod_base = pp->cfg0_mod_base + -- pp->cfg0_size; -+ resource_list_for_each_entry(win, &res) { -+ switch (resource_type(win->res)) { -+ case IORESOURCE_IO: -+ pp->io = win->res; -+ pp->io->name = "I/O"; -+ pp->io_size = resource_size(pp->io); -+ pp->io_bus_addr = pp->io->start - win->offset; -+ ret = pci_remap_iospace(pp->io, pp->io_base); -+ if (ret) { -+ dev_warn(pp->dev, "error %d: failed to map resource %pR\n", -+ ret, pp->io); -+ continue; -+ } -+ pp->io_base = pp->io->start; -+ break; -+ case IORESOURCE_MEM: -+ pp->mem = win->res; -+ pp->mem->name = "MEM"; -+ pp->mem_size = resource_size(pp->mem); -+ pp->mem_bus_addr = pp->mem->start - win->offset; -+ break; -+ case 0: -+ pp->cfg = win->res; -+ pp->cfg0_size = resource_size(pp->cfg)/2; -+ pp->cfg1_size = resource_size(pp->cfg)/2; -+ pp->cfg0_base = pp->cfg->start; -+ pp->cfg1_base = pp->cfg->start + pp->cfg0_size; -+ break; -+ case IORESOURCE_BUS: -+ pp->busn = win->res; -+ break; -+ default: -+ continue; - } - } - -- ret = of_pci_parse_bus_range(np, &pp->busn); -- if (ret < 0) { -- pp->busn.name = np->name; -- pp->busn.start = 0; -- pp->busn.end = 0xff; -- pp->busn.flags = IORESOURCE_BUS; -- dev_dbg(pp->dev, "failed to parse bus-range property: %d, using default %pR\n", -- ret, &pp->busn); -- } -- - if (!pp->dbi_base) { -- pp->dbi_base = devm_ioremap(pp->dev, pp->cfg.start, -- resource_size(&pp->cfg)); -+ pp->dbi_base = devm_ioremap(pp->dev, pp->cfg->start, -+ resource_size(pp->cfg)); - if (!pp->dbi_base) { - dev_err(pp->dev, "error with ioremap\n"); - return -ENOMEM; - } - } - -- pp->mem_base = pp->mem.start; -+ pp->mem_base = pp->mem->start; - - if (!pp->va_cfg0_base) { - pp->va_cfg0_base = devm_ioremap(pp->dev, pp->cfg0_base, -@@ -465,33 +284,18 @@ int dw_pcie_host_init(struct pcie_port *pp) - } - } - -- if (of_property_read_u32(np, "num-lanes", &pp->lanes)) { -- dev_err(pp->dev, "Failed to parse the number of lanes\n"); -- return -EINVAL; -- } -- -- if (IS_ENABLED(CONFIG_PCI_MSI)) { -- if (!pp->ops->msi_host_init) { -- pp->irq_domain = irq_domain_add_linear(pp->dev->of_node, -- MAX_MSI_IRQS, &msi_domain_ops, -- &dw_pcie_msi_chip); -- if (!pp->irq_domain) { -- dev_err(pp->dev, "irq domain init failed\n"); -- return -ENXIO; -- } -- -- for (i = 0; i < MAX_MSI_IRQS; i++) -- irq_create_mapping(pp->irq_domain, i); -- } else { -- ret = pp->ops->msi_host_init(pp, &dw_pcie_msi_chip); -- if (ret < 0) -- return ret; -- } -- } -+ ret = of_property_read_u32(np, "num-lanes", &pp->lanes); -+ if (ret) -+ pp->lanes = 0; - - if (pp->ops->host_init) - pp->ops->host_init(pp); - -+ if (!pp->ops->rd_other_conf) -+ dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1, -+ PCIE_ATU_TYPE_MEM, pp->mem_base, -+ pp->mem_bus_addr, pp->mem_size); -+ - dw_pcie_wr_own_conf(pp, PCI_BASE_ADDRESS_0, 4, 0); - - /* program correct class for RC */ -@@ -501,126 +305,113 @@ int dw_pcie_host_init(struct pcie_port *pp) - val |= PORT_LOGIC_SPEED_CHANGE; - dw_pcie_wr_own_conf(pp, PCIE_LINK_WIDTH_SPEED_CONTROL, 4, val); - -- dw_pci.nr_controllers = 1; -- dw_pci.private_data = (void **)&pp; -+ pp->root_bus_nr = pp->busn->start; -+#if 0 -+ bus = pci_scan_root_bus(pp->dev, pp->root_bus_nr, &dw_pcie_ops, -+ pp, &res); -+ if (!bus) -+ return -ENOMEM; -+#else -+ bus = pci_create_root_bus(pp->dev, pp->root_bus_nr, &dw_pcie_ops, -+ pp, &res); -+ if (!bus) -+ return -ENODEV; -+ -+ ret = dw_pcie_msi_ctrl_init(pp); -+ if (ret) -+ return ret; - -- pci_common_init_dev(pp->dev, &dw_pci); --#ifdef CONFIG_PCI_DOMAINS -- dw_pci.domain++; -+ bus->msi = pp->msi; -+ -+ pci_scan_child_bus(bus); - #endif - -- return 0; --} -+ if (pp->ops->scan_bus) -+ pp->ops->scan_bus(pp); - --static void dw_pcie_prog_viewport_cfg0(struct pcie_port *pp, u32 busdev) --{ -- /* Program viewport 0 : OUTBOUND : CFG0 */ -- dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0, -- PCIE_ATU_VIEWPORT); -- dw_pcie_writel_rc(pp, pp->cfg0_mod_base, PCIE_ATU_LOWER_BASE); -- dw_pcie_writel_rc(pp, (pp->cfg0_mod_base >> 32), PCIE_ATU_UPPER_BASE); -- dw_pcie_writel_rc(pp, pp->cfg0_mod_base + pp->cfg0_size - 1, -- PCIE_ATU_LIMIT); -- dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET); -- dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET); -- dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG0, PCIE_ATU_CR1); -- dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); --} -+#ifdef CONFIG_ARM -+ /* support old dtbs that incorrectly describe IRQs */ -+ pci_fixup_irqs(pci_common_swizzle, of_irq_parse_and_map_pci); -+#endif - --static void dw_pcie_prog_viewport_cfg1(struct pcie_port *pp, u32 busdev) --{ -- /* Program viewport 1 : OUTBOUND : CFG1 */ -- dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1, -- PCIE_ATU_VIEWPORT); -- dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_CFG1, PCIE_ATU_CR1); -- dw_pcie_writel_rc(pp, pp->cfg1_mod_base, PCIE_ATU_LOWER_BASE); -- dw_pcie_writel_rc(pp, (pp->cfg1_mod_base >> 32), PCIE_ATU_UPPER_BASE); -- dw_pcie_writel_rc(pp, pp->cfg1_mod_base + pp->cfg1_size - 1, -- PCIE_ATU_LIMIT); -- dw_pcie_writel_rc(pp, busdev, PCIE_ATU_LOWER_TARGET); -- dw_pcie_writel_rc(pp, 0, PCIE_ATU_UPPER_TARGET); -- dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); --} -+ if (!pci_has_flag(PCI_PROBE_ONLY)) { -+ pci_bus_size_bridges(bus); -+ pci_bus_assign_resources(bus); - --static void dw_pcie_prog_viewport_mem_outbound(struct pcie_port *pp) --{ -- /* Program viewport 0 : OUTBOUND : MEM */ -- dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX0, -- PCIE_ATU_VIEWPORT); -- dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_MEM, PCIE_ATU_CR1); -- dw_pcie_writel_rc(pp, pp->mem_mod_base, PCIE_ATU_LOWER_BASE); -- dw_pcie_writel_rc(pp, (pp->mem_mod_base >> 32), PCIE_ATU_UPPER_BASE); -- dw_pcie_writel_rc(pp, pp->mem_mod_base + pp->mem_size - 1, -- PCIE_ATU_LIMIT); -- dw_pcie_writel_rc(pp, pp->mem_bus_addr, PCIE_ATU_LOWER_TARGET); -- dw_pcie_writel_rc(pp, upper_32_bits(pp->mem_bus_addr), -- PCIE_ATU_UPPER_TARGET); -- dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); --} -+ list_for_each_entry(child, &bus->children, node) -+ pcie_bus_configure_settings(child); -+ } - --static void dw_pcie_prog_viewport_io_outbound(struct pcie_port *pp) --{ -- /* Program viewport 1 : OUTBOUND : IO */ -- dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | PCIE_ATU_REGION_INDEX1, -- PCIE_ATU_VIEWPORT); -- dw_pcie_writel_rc(pp, PCIE_ATU_TYPE_IO, PCIE_ATU_CR1); -- dw_pcie_writel_rc(pp, pp->io_mod_base, PCIE_ATU_LOWER_BASE); -- dw_pcie_writel_rc(pp, (pp->io_mod_base >> 32), PCIE_ATU_UPPER_BASE); -- dw_pcie_writel_rc(pp, pp->io_mod_base + pp->io_size - 1, -- PCIE_ATU_LIMIT); -- dw_pcie_writel_rc(pp, pp->io_bus_addr, PCIE_ATU_LOWER_TARGET); -- dw_pcie_writel_rc(pp, upper_32_bits(pp->io_bus_addr), -- PCIE_ATU_UPPER_TARGET); -- dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); -+ pci_bus_add_devices(bus); -+ -+ return 0; - } - - static int dw_pcie_rd_other_conf(struct pcie_port *pp, struct pci_bus *bus, - u32 devfn, int where, int size, u32 *val) - { -- int ret = PCIBIOS_SUCCESSFUL; -- u32 address, busdev; -+ int ret, type; -+ u32 busdev, cfg_size; -+ u64 cpu_addr; -+ void __iomem *va_cfg_base; - - busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | - PCIE_ATU_FUNC(PCI_FUNC(devfn)); -- address = where & ~0x3; - - if (bus->parent->number == pp->root_bus_nr) { -- dw_pcie_prog_viewport_cfg0(pp, busdev); -- ret = dw_pcie_cfg_read(pp->va_cfg0_base + address, where, size, -- val); -- dw_pcie_prog_viewport_mem_outbound(pp); -+ type = PCIE_ATU_TYPE_CFG0; -+ cpu_addr = pp->cfg0_base; -+ cfg_size = pp->cfg0_size; -+ va_cfg_base = pp->va_cfg0_base; - } else { -- dw_pcie_prog_viewport_cfg1(pp, busdev); -- ret = dw_pcie_cfg_read(pp->va_cfg1_base + address, where, size, -- val); -- dw_pcie_prog_viewport_io_outbound(pp); -+ type = PCIE_ATU_TYPE_CFG1; -+ cpu_addr = pp->cfg1_base; -+ cfg_size = pp->cfg1_size; -+ va_cfg_base = pp->va_cfg1_base; - } - -+ dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, -+ type, cpu_addr, -+ busdev, cfg_size); -+ ret = dw_pcie_cfg_read(va_cfg_base + where, size, val); -+ dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, -+ PCIE_ATU_TYPE_IO, pp->io_base, -+ pp->io_bus_addr, pp->io_size); -+ - return ret; - } - - static int dw_pcie_wr_other_conf(struct pcie_port *pp, struct pci_bus *bus, - u32 devfn, int where, int size, u32 val) - { -- int ret = PCIBIOS_SUCCESSFUL; -- u32 address, busdev; -+ int ret, type; -+ u32 busdev, cfg_size; -+ u64 cpu_addr; -+ void __iomem *va_cfg_base; - - busdev = PCIE_ATU_BUS(bus->number) | PCIE_ATU_DEV(PCI_SLOT(devfn)) | - PCIE_ATU_FUNC(PCI_FUNC(devfn)); -- address = where & ~0x3; - - if (bus->parent->number == pp->root_bus_nr) { -- dw_pcie_prog_viewport_cfg0(pp, busdev); -- ret = dw_pcie_cfg_write(pp->va_cfg0_base + address, where, size, -- val); -- dw_pcie_prog_viewport_mem_outbound(pp); -+ type = PCIE_ATU_TYPE_CFG0; -+ cpu_addr = pp->cfg0_base; -+ cfg_size = pp->cfg0_size; -+ va_cfg_base = pp->va_cfg0_base; - } else { -- dw_pcie_prog_viewport_cfg1(pp, busdev); -- ret = dw_pcie_cfg_write(pp->va_cfg1_base + address, where, size, -- val); -- dw_pcie_prog_viewport_io_outbound(pp); -+ type = PCIE_ATU_TYPE_CFG1; -+ cpu_addr = pp->cfg1_base; -+ cfg_size = pp->cfg1_size; -+ va_cfg_base = pp->va_cfg1_base; - } - -+ dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, -+ type, cpu_addr, -+ busdev, cfg_size); -+ ret = dw_pcie_cfg_write(va_cfg_base + where, size, val); -+ dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, -+ PCIE_ATU_TYPE_IO, pp->io_base, -+ pp->io_bus_addr, pp->io_size); -+ - return ret; - } - -@@ -650,7 +441,7 @@ static int dw_pcie_valid_config(struct pcie_port *pp, - static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, - int size, u32 *val) - { -- struct pcie_port *pp = sys_to_pcie(bus->sysdata); -+ struct pcie_port *pp = bus->sysdata; - int ret; - - if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) { -@@ -674,7 +465,7 @@ static int dw_pcie_rd_conf(struct pci_bus *bus, u32 devfn, int where, - static int dw_pcie_wr_conf(struct pci_bus *bus, u32 devfn, - int where, int size, u32 val) - { -- struct pcie_port *pp = sys_to_pcie(bus->sysdata); -+ struct pcie_port *pp = bus->sysdata; - int ret; - - if (dw_pcie_valid_config(pp, bus, PCI_SLOT(devfn)) == 0) -@@ -698,75 +489,6 @@ static struct pci_ops dw_pcie_ops = { - .write = dw_pcie_wr_conf, - }; - --static int dw_pcie_setup(int nr, struct pci_sys_data *sys) --{ -- struct pcie_port *pp; -- -- pp = sys_to_pcie(sys); -- -- if (global_io_offset < SZ_1M && pp->io_size > 0) { -- sys->io_offset = global_io_offset - pp->io_bus_addr; -- pci_ioremap_io(global_io_offset, pp->io_base); -- global_io_offset += SZ_64K; -- pci_add_resource_offset(&sys->resources, &pp->io, -- sys->io_offset); -- } -- -- sys->mem_offset = pp->mem.start - pp->mem_bus_addr; -- pci_add_resource_offset(&sys->resources, &pp->mem, sys->mem_offset); -- pci_add_resource(&sys->resources, &pp->busn); -- -- return 1; --} -- --static struct pci_bus *dw_pcie_scan_bus(int nr, struct pci_sys_data *sys) --{ -- struct pci_bus *bus; -- struct pcie_port *pp = sys_to_pcie(sys); -- -- pp->root_bus_nr = sys->busnr; -- bus = pci_create_root_bus(pp->dev, sys->busnr, -- &dw_pcie_ops, sys, &sys->resources); -- if (!bus) -- return NULL; -- -- pci_scan_child_bus(bus); -- -- if (bus && pp->ops->scan_bus) -- pp->ops->scan_bus(pp); -- -- return bus; --} -- --static int dw_pcie_map_irq(const struct pci_dev *dev, u8 slot, u8 pin) --{ -- struct pcie_port *pp = sys_to_pcie(dev->bus->sysdata); -- int irq; -- -- irq = of_irq_parse_and_map_pci(dev, slot, pin); -- if (!irq) -- irq = pp->irq; -- -- return irq; --} -- --static void dw_pcie_add_bus(struct pci_bus *bus) --{ -- if (IS_ENABLED(CONFIG_PCI_MSI)) { -- struct pcie_port *pp = sys_to_pcie(bus->sysdata); -- -- dw_pcie_msi_chip.dev = pp->dev; -- bus->msi = &dw_pcie_msi_chip; -- } --} -- --static struct hw_pci dw_pci = { -- .setup = dw_pcie_setup, -- .scan = dw_pcie_scan_bus, -- .map_irq = dw_pcie_map_irq, -- .add_bus = dw_pcie_add_bus, --}; -- - void dw_pcie_setup_rc(struct pcie_port *pp) - { - u32 val; -@@ -786,6 +508,12 @@ void dw_pcie_setup_rc(struct pcie_port *pp) - case 4: - val |= PORT_LINK_MODE_4_LANES; - break; -+ case 8: -+ val |= PORT_LINK_MODE_8_LANES; -+ break; -+ default: -+ dev_err(pp->dev, "num-lanes %u: invalid value\n", pp->lanes); -+ return; - } - dw_pcie_writel_rc(pp, val, PCIE_PORT_LINK_CONTROL); - -@@ -802,6 +530,9 @@ void dw_pcie_setup_rc(struct pcie_port *pp) - case 4: - val |= PORT_LOGIC_LINK_WIDTH_4_LANES; - break; -+ case 8: -+ val |= PORT_LOGIC_LINK_WIDTH_8_LANES; -+ break; - } - dw_pcie_writel_rc(pp, val, PCIE_LINK_WIDTH_SPEED_CONTROL); - -diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h -index c625675..2f01284 100644 ---- a/drivers/pci/host/pcie-designware.h -+++ b/drivers/pci/host/pcie-designware.h -@@ -27,28 +27,25 @@ struct pcie_port { - u8 root_bus_nr; - void __iomem *dbi_base; - u64 cfg0_base; -- u64 cfg0_mod_base; - void __iomem *va_cfg0_base; - u32 cfg0_size; - u64 cfg1_base; -- u64 cfg1_mod_base; - void __iomem *va_cfg1_base; - u32 cfg1_size; -- u64 io_base; -- u64 io_mod_base; -+ resource_size_t io_base; - phys_addr_t io_bus_addr; - u32 io_size; - u64 mem_base; -- u64 mem_mod_base; - phys_addr_t mem_bus_addr; - u32 mem_size; -- struct resource cfg; -- struct resource io; -- struct resource mem; -- struct resource busn; -+ struct resource *cfg; -+ struct resource *io; -+ struct resource *mem; -+ struct resource *busn; - int irq; - u32 lanes; - struct pcie_host_ops *ops; -+ struct msi_controller *msi; - int msi_irq; - struct irq_domain *irq_domain; - unsigned long msi_data; -@@ -70,14 +67,14 @@ struct pcie_host_ops { - void (*host_init)(struct pcie_port *pp); - void (*msi_set_irq)(struct pcie_port *pp, int irq); - void (*msi_clear_irq)(struct pcie_port *pp, int irq); -- u32 (*get_msi_addr)(struct pcie_port *pp); -+ phys_addr_t (*get_msi_addr)(struct pcie_port *pp); - u32 (*get_msi_data)(struct pcie_port *pp, int pos); - void (*scan_bus)(struct pcie_port *pp); -- int (*msi_host_init)(struct pcie_port *pp, struct msi_chip *chip); -+ int (*msi_host_init)(struct pcie_port *pp, struct msi_controller *chip); - }; - --int dw_pcie_cfg_read(void __iomem *addr, int where, int size, u32 *val); --int dw_pcie_cfg_write(void __iomem *addr, int where, int size, u32 val); -+int dw_pcie_cfg_read(void __iomem *addr, int size, u32 *val); -+int dw_pcie_cfg_write(void __iomem *addr, int size, u32 val); - irqreturn_t dw_handle_msi_irq(struct pcie_port *pp); - void dw_pcie_msi_init(struct pcie_port *pp); - int dw_pcie_link_up(struct pcie_port *pp); -diff --git a/drivers/pci/host/pcie-rcar.c b/drivers/pci/host/pcie-rcar.c -index 61158e0..f8ec96d 100644 ---- a/drivers/pci/host/pcie-rcar.c -+++ b/drivers/pci/host/pcie-rcar.c -@@ -111,14 +111,14 @@ - struct rcar_msi { - DECLARE_BITMAP(used, INT_PCI_MSI_NR); - struct irq_domain *domain; -- struct msi_chip chip; -+ struct msi_controller chip; - unsigned long pages; - struct mutex lock; - int irq1; - int irq2; - }; - --static inline struct rcar_msi *to_rcar_msi(struct msi_chip *chip) -+static inline struct rcar_msi *to_rcar_msi(struct msi_controller *chip) - { - return container_of(chip, struct rcar_msi, chip); - } -@@ -404,9 +404,6 @@ static void rcar_pcie_enable(struct rcar_pcie *pcie) - rcar_pci.private_data = (void **)&pcie; - - pci_common_init_dev(&pdev->dev, &rcar_pci); --#ifdef CONFIG_PCI_DOMAINS -- rcar_pci.domain++; --#endif - } - - static int phy_wait_for_ack(struct rcar_pcie *pcie) -@@ -622,7 +619,7 @@ static irqreturn_t rcar_pcie_msi_irq(int irq, void *data) - return IRQ_HANDLED; - } - --static int rcar_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev, -+static int rcar_msi_setup_irq(struct msi_controller *chip, struct pci_dev *pdev, - struct msi_desc *desc) - { - struct rcar_msi *msi = to_rcar_msi(chip); -@@ -647,12 +644,12 @@ static int rcar_msi_setup_irq(struct msi_chip *chip, struct pci_dev *pdev, - msg.address_hi = rcar_pci_read_reg(pcie, PCIEMSIAUR); - msg.data = hwirq; - -- write_msi_msg(irq, &msg); -+ pci_write_msi_msg(irq, &msg); - - return 0; - } - --static void rcar_msi_teardown_irq(struct msi_chip *chip, unsigned int irq) -+static void rcar_msi_teardown_irq(struct msi_controller *chip, unsigned int irq) - { - struct rcar_msi *msi = to_rcar_msi(chip); - struct irq_data *d = irq_get_irq_data(irq); -@@ -662,10 +659,10 @@ static void rcar_msi_teardown_irq(struct msi_chip *chip, unsigned int irq) - - static struct irq_chip rcar_msi_irq_chip = { - .name = "R-Car PCIe MSI", -- .irq_enable = unmask_msi_irq, -- .irq_disable = mask_msi_irq, -- .irq_mask = mask_msi_irq, -- .irq_unmask = unmask_msi_irq, -+ .irq_enable = pci_msi_unmask_irq, -+ .irq_disable = pci_msi_mask_irq, -+ .irq_mask = pci_msi_mask_irq, -+ .irq_unmask = pci_msi_unmask_irq, - }; - - static int rcar_msi_map(struct irq_domain *domain, unsigned int irq, -@@ -673,7 +670,6 @@ static int rcar_msi_map(struct irq_domain *domain, unsigned int irq, - { - irq_set_chip_and_handler(irq, &rcar_msi_irq_chip, handle_simple_irq); - irq_set_chip_data(irq, domain->host_data); -- set_irq_flags(irq, IRQF_VALID); - - return 0; - } -diff --git a/drivers/pci/host/pcie-xilinx.c b/drivers/pci/host/pcie-xilinx.c -index ccc496b..eef849c 100644 ---- a/drivers/pci/host/pcie-xilinx.c -+++ b/drivers/pci/host/pcie-xilinx.c -@@ -297,18 +297,16 @@ static struct pci_ops xilinx_pcie_ops = { - */ - static void xilinx_pcie_destroy_msi(unsigned int irq) - { -- struct irq_desc *desc; - struct msi_desc *msi; - struct xilinx_pcie_port *port; - -- desc = irq_to_desc(irq); -- msi = irq_desc_get_msi_desc(desc); -- port = sys_to_pcie(msi->dev->bus->sysdata); -- -- if (!test_bit(irq, msi_irq_in_use)) -+ if (!test_bit(irq, msi_irq_in_use)) { -+ msi = irq_get_msi_desc(irq); -+ port = sys_to_pcie(msi_desc_to_pci_sys_data(msi)); - dev_err(port->dev, "Trying to free unused MSI#%d\n", irq); -- else -+ } else { - clear_bit(irq, msi_irq_in_use); -+ } - } - - /** -@@ -335,7 +333,8 @@ static int xilinx_pcie_assign_msi(struct xilinx_pcie_port *port) - * @chip: MSI Chip descriptor - * @irq: MSI IRQ to destroy - */ --static void xilinx_msi_teardown_irq(struct msi_chip *chip, unsigned int irq) -+static void xilinx_msi_teardown_irq(struct msi_controller *chip, -+ unsigned int irq) - { - xilinx_pcie_destroy_msi(irq); - } -@@ -348,7 +347,7 @@ static void xilinx_msi_teardown_irq(struct msi_chip *chip, unsigned int irq) - * - * Return: '0' on success and error value on failure - */ --static int xilinx_pcie_msi_setup_irq(struct msi_chip *chip, -+static int xilinx_pcie_msi_setup_irq(struct msi_controller *chip, - struct pci_dev *pdev, - struct msi_desc *desc) - { -@@ -374,13 +373,13 @@ static int xilinx_pcie_msi_setup_irq(struct msi_chip *chip, - msg.address_lo = msg_addr; - msg.data = irq; - -- write_msi_msg(irq, &msg); -+ pci_write_msi_msg(irq, &msg); - - return 0; - } - - /* MSI Chip Descriptor */ --static struct msi_chip xilinx_pcie_msi_chip = { -+static struct msi_controller xilinx_pcie_msi_chip = { - .setup_irq = xilinx_pcie_msi_setup_irq, - .teardown_irq = xilinx_msi_teardown_irq, - }; -@@ -388,10 +387,10 @@ static struct msi_chip xilinx_pcie_msi_chip = { - /* HW Interrupt Chip Descriptor */ - static struct irq_chip xilinx_msi_irq_chip = { - .name = "Xilinx PCIe MSI", -- .irq_enable = unmask_msi_irq, -- .irq_disable = mask_msi_irq, -- .irq_mask = mask_msi_irq, -- .irq_unmask = unmask_msi_irq, -+ .irq_enable = pci_msi_unmask_irq, -+ .irq_disable = pci_msi_mask_irq, -+ .irq_mask = pci_msi_mask_irq, -+ .irq_unmask = pci_msi_unmask_irq, - }; - - /** -@@ -407,7 +406,6 @@ static int xilinx_pcie_msi_map(struct irq_domain *domain, unsigned int irq, - { - irq_set_chip_and_handler(irq, &xilinx_msi_irq_chip, handle_simple_irq); - irq_set_chip_data(irq, domain->host_data); -- set_irq_flags(irq, IRQF_VALID); - - return 0; - } -@@ -431,20 +429,6 @@ static void xilinx_pcie_enable_msi(struct xilinx_pcie_port *port) - pcie_write(port, msg_addr, XILINX_PCIE_REG_MSIBASE2); - } - --/** -- * xilinx_pcie_add_bus - Add MSI chip info to PCIe bus -- * @bus: PCIe bus -- */ --static void xilinx_pcie_add_bus(struct pci_bus *bus) --{ -- if (IS_ENABLED(CONFIG_PCI_MSI)) { -- struct xilinx_pcie_port *port = sys_to_pcie(bus->sysdata); -- -- xilinx_pcie_msi_chip.dev = port->dev; -- bus->msi = &xilinx_pcie_msi_chip; -- } --} -- - /* INTx Functions */ - - /** -@@ -460,7 +444,6 @@ static int xilinx_pcie_intx_map(struct irq_domain *domain, unsigned int irq, - { - irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_simple_irq); - irq_set_chip_data(irq, domain->host_data); -- set_irq_flags(irq, IRQF_VALID); - - return 0; - } -@@ -730,9 +713,15 @@ static struct pci_bus *xilinx_pcie_scan_bus(int nr, struct pci_sys_data *sys) - struct pci_bus *bus; - - port->root_busno = sys->busnr; -- bus = pci_scan_root_bus(port->dev, sys->busnr, &xilinx_pcie_ops, -- sys, &sys->resources); - -+ if (IS_ENABLED(CONFIG_PCI_MSI)) -+ bus = pci_scan_root_bus_msi(port->dev, sys->busnr, -+ &xilinx_pcie_ops, sys, -+ &sys->resources, -+ &xilinx_pcie_msi_chip); -+ else -+ bus = pci_scan_root_bus(port->dev, sys->busnr, -+ &xilinx_pcie_ops, sys, &sys->resources); - return bus; - } - -@@ -750,7 +739,7 @@ static int xilinx_pcie_parse_and_add_res(struct xilinx_pcie_port *port) - resource_size_t offset; - struct of_pci_range_parser parser; - struct of_pci_range range; -- struct pci_host_bridge_window *win; -+ struct resource_entry *win; - int err = 0, mem_resno = 0; - - /* Get the ranges */ -@@ -820,7 +809,7 @@ static int xilinx_pcie_parse_and_add_res(struct xilinx_pcie_port *port) - - free_resources: - release_child_resources(&iomem_resource); -- list_for_each_entry(win, &port->resources, list) -+ resource_list_for_each_entry(win, &port->resources) - devm_kfree(dev, win->res); - pci_free_resource_list(&port->resources); - -@@ -924,10 +913,13 @@ static int xilinx_pcie_probe(struct platform_device *pdev) - .private_data = (void **)&port, - .setup = xilinx_pcie_setup, - .map_irq = of_irq_parse_and_map_pci, -- .add_bus = xilinx_pcie_add_bus, - .scan = xilinx_pcie_scan_bus, - .ops = &xilinx_pcie_ops, - }; -+ -+#ifdef CONFIG_PCI_MSI -+ xilinx_pcie_msi_chip.dev = port->dev; -+#endif - pci_common_init_dev(dev, &hw); - - return 0; -diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c -index 084587d..5dd4c96 100644 ---- a/drivers/pci/msi.c -+++ b/drivers/pci/msi.c -@@ -19,19 +19,81 @@ - #include - #include - #include -+#include - - #include "pci.h" - - static int pci_msi_enable = 1; -+int pci_msi_ignore_mask; - - #define msix_table_size(flags) ((flags & PCI_MSIX_FLAGS_QSIZE) + 1) - -+#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN -+static struct irq_domain *pci_msi_default_domain; -+static DEFINE_MUTEX(pci_msi_domain_lock); -+ -+struct irq_domain * __weak arch_get_pci_msi_domain(struct pci_dev *dev) -+{ -+ return pci_msi_default_domain; -+} -+ -+static struct irq_domain *pci_msi_get_domain(struct pci_dev *dev) -+{ -+ struct irq_domain *domain; -+ -+ domain = dev_get_msi_domain(&dev->dev); -+ if (domain) -+ return domain; -+ -+ return arch_get_pci_msi_domain(dev); -+} -+ -+static int pci_msi_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) -+{ -+ struct irq_domain *domain; -+ -+ domain = pci_msi_get_domain(dev); -+ if (domain) -+ return pci_msi_domain_alloc_irqs(domain, dev, nvec, type); -+ -+ return arch_setup_msi_irqs(dev, nvec, type); -+} -+ -+static void pci_msi_teardown_msi_irqs(struct pci_dev *dev) -+{ -+ struct irq_domain *domain; -+ -+ domain = pci_msi_get_domain(dev); -+ if (domain) -+ pci_msi_domain_free_irqs(domain, dev); -+ else -+ arch_teardown_msi_irqs(dev); -+} -+#else -+#define pci_msi_setup_msi_irqs arch_setup_msi_irqs -+#define pci_msi_teardown_msi_irqs arch_teardown_msi_irqs -+#endif - - /* Arch hooks */ - -+struct msi_controller * __weak pcibios_msi_controller(struct pci_dev *dev) -+{ -+ return NULL; -+} -+ -+static struct msi_controller *pci_msi_controller(struct pci_dev *dev) -+{ -+ struct msi_controller *msi_ctrl = dev->bus->msi; -+ -+ if (msi_ctrl) -+ return msi_ctrl; -+ -+ return pcibios_msi_controller(dev); -+} -+ - int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) - { -- struct msi_chip *chip = dev->bus->msi; -+ struct msi_controller *chip = pci_msi_controller(dev); - int err; - - if (!chip || !chip->setup_irq) -@@ -48,7 +110,7 @@ int __weak arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc) - - void __weak arch_teardown_msi_irq(unsigned int irq) - { -- struct msi_chip *chip = irq_get_chip_data(irq); -+ struct msi_controller *chip = irq_get_chip_data(irq); - - if (!chip || !chip->teardown_irq) - return; -@@ -58,9 +120,12 @@ void __weak arch_teardown_msi_irq(unsigned int irq) - - int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) - { -+ struct msi_controller *chip = dev->bus->msi; - struct msi_desc *entry; - int ret; - -+ if (chip && chip->setup_irqs) -+ return chip->setup_irqs(chip, dev, nvec, type); - /* - * If an architecture wants to support multiple MSI, it needs to - * override arch_setup_msi_irqs() -@@ -68,7 +133,7 @@ int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) - if (type == PCI_CAP_ID_MSI && nvec > 1) - return 1; - -- list_for_each_entry(entry, &dev->msi_list, list) { -+ for_each_pci_msi_entry(entry, dev) { - ret = arch_setup_msi_irq(dev, entry); - if (ret < 0) - return ret; -@@ -85,19 +150,13 @@ int __weak arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) - */ - void default_teardown_msi_irqs(struct pci_dev *dev) - { -+ int i; - struct msi_desc *entry; - -- list_for_each_entry(entry, &dev->msi_list, list) { -- int i, nvec; -- if (entry->irq == 0) -- continue; -- if (entry->nvec_used) -- nvec = entry->nvec_used; -- else -- nvec = 1 << entry->msi_attrib.multiple; -- for (i = 0; i < nvec; i++) -- arch_teardown_msi_irq(entry->irq + i); -- } -+ for_each_pci_msi_entry(entry, dev) -+ if (entry->irq) -+ for (i = 0; i < entry->nvec_used; i++) -+ arch_teardown_msi_irq(entry->irq + i); - } - - void __weak arch_teardown_msi_irqs(struct pci_dev *dev) -@@ -111,7 +170,7 @@ static void default_restore_msi_irq(struct pci_dev *dev, int irq) - - entry = NULL; - if (dev->msix_enabled) { -- list_for_each_entry(entry, &dev->msi_list, list) { -+ for_each_pci_msi_entry(entry, dev) { - if (irq == entry->irq) - break; - } -@@ -120,7 +179,7 @@ static void default_restore_msi_irq(struct pci_dev *dev, int irq) - } - - if (entry) -- __write_msi_msg(entry, &entry->msg); -+ __pci_write_msi_msg(entry, &entry->msg); - } - - void __weak arch_restore_msi_irqs(struct pci_dev *dev) -@@ -128,27 +187,6 @@ void __weak arch_restore_msi_irqs(struct pci_dev *dev) - return default_restore_msi_irqs(dev); - } - --static void msi_set_enable(struct pci_dev *dev, int enable) --{ -- u16 control; -- -- pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); -- control &= ~PCI_MSI_FLAGS_ENABLE; -- if (enable) -- control |= PCI_MSI_FLAGS_ENABLE; -- pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); --} -- --static void msix_clear_and_set_ctrl(struct pci_dev *dev, u16 clear, u16 set) --{ -- u16 ctrl; -- -- pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &ctrl); -- ctrl &= ~clear; -- ctrl |= set; -- pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, ctrl); --} -- - static inline __attribute_const__ u32 msi_mask(unsigned x) - { - /* Don't shift by >= width of type */ -@@ -163,28 +201,24 @@ static inline __attribute_const__ u32 msi_mask(unsigned x) - * reliably as devices without an INTx disable bit will then generate a - * level IRQ which will never be cleared. - */ --u32 default_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) -+u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) - { - u32 mask_bits = desc->masked; - -- if (!desc->msi_attrib.maskbit) -+ if (pci_msi_ignore_mask || !desc->msi_attrib.maskbit) - return 0; - - mask_bits &= ~mask; - mask_bits |= flag; -- pci_write_config_dword(desc->dev, desc->mask_pos, mask_bits); -+ pci_write_config_dword(msi_desc_to_pci_dev(desc), desc->mask_pos, -+ mask_bits); - - return mask_bits; - } - --__weak u32 arch_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) --{ -- return default_msi_mask_irq(desc, mask, flag); --} -- - static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) - { -- desc->masked = arch_msi_mask_irq(desc, mask, flag); -+ desc->masked = __pci_msi_desc_mask_irq(desc, mask, flag); - } - - /* -@@ -194,11 +228,15 @@ static void msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag) - * file. This saves a few milliseconds when initialising devices with lots - * of MSI-X interrupts. - */ --u32 default_msix_mask_irq(struct msi_desc *desc, u32 flag) -+u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag) - { - u32 mask_bits = desc->masked; - unsigned offset = desc->msi_attrib.entry_nr * PCI_MSIX_ENTRY_SIZE + - PCI_MSIX_ENTRY_VECTOR_CTRL; -+ -+ if (pci_msi_ignore_mask) -+ return 0; -+ - mask_bits &= ~PCI_MSIX_ENTRY_CTRL_MASKBIT; - if (flag) - mask_bits |= PCI_MSIX_ENTRY_CTRL_MASKBIT; -@@ -207,19 +245,14 @@ u32 default_msix_mask_irq(struct msi_desc *desc, u32 flag) - return mask_bits; - } - --__weak u32 arch_msix_mask_irq(struct msi_desc *desc, u32 flag) --{ -- return default_msix_mask_irq(desc, flag); --} -- - static void msix_mask_irq(struct msi_desc *desc, u32 flag) - { -- desc->masked = arch_msix_mask_irq(desc, flag); -+ desc->masked = __pci_msix_desc_mask_irq(desc, flag); - } - - static void msi_set_mask_bit(struct irq_data *data, u32 flag) - { -- struct msi_desc *desc = irq_data_get_msi(data); -+ struct msi_desc *desc = irq_data_get_msi_desc(data); - - if (desc->msi_attrib.is_msix) { - msix_mask_irq(desc, flag); -@@ -230,12 +263,20 @@ static void msi_set_mask_bit(struct irq_data *data, u32 flag) - } - } - --void mask_msi_irq(struct irq_data *data) -+/** -+ * pci_msi_mask_irq - Generic irq chip callback to mask PCI/MSI interrupts -+ * @data: pointer to irqdata associated to that interrupt -+ */ -+void pci_msi_mask_irq(struct irq_data *data) - { - msi_set_mask_bit(data, 1); - } - --void unmask_msi_irq(struct irq_data *data) -+/** -+ * pci_msi_unmask_irq - Generic irq chip callback to unmask PCI/MSI interrupts -+ * @data: pointer to irqdata associated to that interrupt -+ */ -+void pci_msi_unmask_irq(struct irq_data *data) - { - msi_set_mask_bit(data, 0); - } -@@ -244,14 +285,15 @@ void default_restore_msi_irqs(struct pci_dev *dev) - { - struct msi_desc *entry; - -- list_for_each_entry(entry, &dev->msi_list, list) { -+ for_each_pci_msi_entry(entry, dev) - default_restore_msi_irq(dev, entry->irq); -- } - } - --void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) -+void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) - { -- BUG_ON(entry->dev->current_state != PCI_D0); -+ struct pci_dev *dev = msi_desc_to_pci_dev(entry); -+ -+ BUG_ON(dev->current_state != PCI_D0); - - if (entry->msi_attrib.is_msix) { - void __iomem *base = entry->mask_base + -@@ -261,7 +303,6 @@ void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) - msg->address_hi = readl(base + PCI_MSIX_ENTRY_UPPER_ADDR); - msg->data = readl(base + PCI_MSIX_ENTRY_DATA); - } else { -- struct pci_dev *dev = entry->dev; - int pos = dev->msi_cap; - u16 data; - -@@ -279,34 +320,11 @@ void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg) - } - } - --void read_msi_msg(unsigned int irq, struct msi_msg *msg) --{ -- struct msi_desc *entry = irq_get_msi_desc(irq); -- -- __read_msi_msg(entry, msg); --} -- --void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg) --{ -- /* Assert that the cache is valid, assuming that -- * valid messages are not all-zeroes. */ -- BUG_ON(!(entry->msg.address_hi | entry->msg.address_lo | -- entry->msg.data)); -- -- *msg = entry->msg; --} -- --void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) -+void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) - { -- struct msi_desc *entry = irq_get_msi_desc(irq); -- -- __get_cached_msi_msg(entry, msg); --} --EXPORT_SYMBOL_GPL(get_cached_msi_msg); -+ struct pci_dev *dev = msi_desc_to_pci_dev(entry); - --void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) --{ -- if (entry->dev->current_state != PCI_D0) { -+ if (dev->current_state != PCI_D0) { - /* Don't touch the hardware now */ - } else if (entry->msi_attrib.is_msix) { - void __iomem *base; -@@ -317,7 +335,6 @@ void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) - writel(msg->address_hi, base + PCI_MSIX_ENTRY_UPPER_ADDR); - writel(msg->data, base + PCI_MSIX_ENTRY_DATA); - } else { -- struct pci_dev *dev = entry->dev; - int pos = dev->msi_cap; - u16 msgctl; - -@@ -341,38 +358,32 @@ void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) - entry->msg = *msg; - } - --void write_msi_msg(unsigned int irq, struct msi_msg *msg) -+void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg) - { - struct msi_desc *entry = irq_get_msi_desc(irq); - -- __write_msi_msg(entry, msg); -+ __pci_write_msi_msg(entry, msg); - } --EXPORT_SYMBOL_GPL(write_msi_msg); -+EXPORT_SYMBOL_GPL(pci_write_msi_msg); - - static void free_msi_irqs(struct pci_dev *dev) - { -+ struct list_head *msi_list = dev_to_msi_list(&dev->dev); - struct msi_desc *entry, *tmp; - struct attribute **msi_attrs; - struct device_attribute *dev_attr; -- int count = 0; -+ int i, count = 0; - -- list_for_each_entry(entry, &dev->msi_list, list) { -- int i, nvec; -- if (!entry->irq) -- continue; -- if (entry->nvec_used) -- nvec = entry->nvec_used; -- else -- nvec = 1 << entry->msi_attrib.multiple; -- for (i = 0; i < nvec; i++) -- BUG_ON(irq_has_action(entry->irq + i)); -- } -+ for_each_pci_msi_entry(entry, dev) -+ if (entry->irq) -+ for (i = 0; i < entry->nvec_used; i++) -+ BUG_ON(irq_has_action(entry->irq + i)); - -- arch_teardown_msi_irqs(dev); -+ pci_msi_teardown_msi_irqs(dev); - -- list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) { -+ list_for_each_entry_safe(entry, tmp, msi_list, list) { - if (entry->msi_attrib.is_msix) { -- if (list_is_last(&entry->list, &dev->msi_list)) -+ if (list_is_last(&entry->list, msi_list)) - iounmap(entry->mask_base); - } - -@@ -397,18 +408,6 @@ static void free_msi_irqs(struct pci_dev *dev) - } - } - --static struct msi_desc *alloc_msi_entry(struct pci_dev *dev) --{ -- struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL); -- if (!desc) -- return NULL; -- -- INIT_LIST_HEAD(&desc->list); -- desc->dev = dev; -- -- return desc; --} -- - static void pci_intx_for_msi(struct pci_dev *dev, int enable) - { - if (!(dev->dev_flags & PCI_DEV_FLAGS_MSI_INTX_DISABLE_BUG)) -@@ -426,7 +425,7 @@ static void __pci_restore_msi_state(struct pci_dev *dev) - entry = irq_get_msi_desc(dev->irq); - - pci_intx_for_msi(dev, 0); -- msi_set_enable(dev, 0); -+ pci_msi_set_enable(dev, 0); - arch_restore_msi_irqs(dev); - - pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); -@@ -443,19 +442,18 @@ static void __pci_restore_msix_state(struct pci_dev *dev) - - if (!dev->msix_enabled) - return; -- BUG_ON(list_empty(&dev->msi_list)); -+ BUG_ON(list_empty(dev_to_msi_list(&dev->dev))); - - /* route the table */ - pci_intx_for_msi(dev, 0); -- msix_clear_and_set_ctrl(dev, 0, -+ pci_msix_clear_and_set_ctrl(dev, 0, - PCI_MSIX_FLAGS_ENABLE | PCI_MSIX_FLAGS_MASKALL); - - arch_restore_msi_irqs(dev); -- list_for_each_entry(entry, &dev->msi_list, list) { -+ for_each_pci_msi_entry(entry, dev) - msix_mask_irq(entry, entry->masked); -- } - -- msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); -+ pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); - } - - void pci_restore_msi_state(struct pci_dev *dev) -@@ -497,9 +495,8 @@ static int populate_msi_sysfs(struct pci_dev *pdev) - int count = 0; - - /* Determine how many msi entries we have */ -- list_for_each_entry(entry, &pdev->msi_list, list) { -+ for_each_pci_msi_entry(entry, pdev) - ++num_msi; -- } - if (!num_msi) - return 0; - -@@ -507,7 +504,7 @@ static int populate_msi_sysfs(struct pci_dev *pdev) - msi_attrs = kzalloc(sizeof(void *) * (num_msi + 1), GFP_KERNEL); - if (!msi_attrs) - return -ENOMEM; -- list_for_each_entry(entry, &pdev->msi_list, list) { -+ for_each_pci_msi_entry(entry, pdev) { - msi_dev_attr = kzalloc(sizeof(*msi_dev_attr), GFP_KERNEL); - if (!msi_dev_attr) - goto error_attrs; -@@ -559,13 +556,13 @@ error_attrs: - return ret; - } - --static struct msi_desc *msi_setup_entry(struct pci_dev *dev) -+static struct msi_desc *msi_setup_entry(struct pci_dev *dev, int nvec) - { - u16 control; - struct msi_desc *entry; - - /* MSI Entry Initialization */ -- entry = alloc_msi_entry(dev); -+ entry = alloc_msi_entry(&dev->dev); - if (!entry) - return NULL; - -@@ -577,6 +574,8 @@ static struct msi_desc *msi_setup_entry(struct pci_dev *dev) - entry->msi_attrib.maskbit = !!(control & PCI_MSI_FLAGS_MASKBIT); - entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */ - entry->msi_attrib.multi_cap = (control & PCI_MSI_FLAGS_QMASK) >> 1; -+ entry->msi_attrib.multiple = ilog2(__roundup_pow_of_two(nvec)); -+ entry->nvec_used = nvec; - - if (control & PCI_MSI_FLAGS_64BIT) - entry->mask_pos = dev->msi_cap + PCI_MSI_MASK_64; -@@ -594,7 +593,7 @@ static int msi_verify_entries(struct pci_dev *dev) - { - struct msi_desc *entry; - -- list_for_each_entry(entry, &dev->msi_list, list) { -+ for_each_pci_msi_entry(entry, dev) { - if (!dev->no_64bit_msi || !entry->msg.address_hi) - continue; - dev_err(&dev->dev, "Device has broken 64-bit MSI but arch" -@@ -621,9 +620,9 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) - int ret; - unsigned mask; - -- msi_set_enable(dev, 0); /* Disable MSI during set up */ -+ pci_msi_set_enable(dev, 0); /* Disable MSI during set up */ - -- entry = msi_setup_entry(dev); -+ entry = msi_setup_entry(dev, nvec); - if (!entry) - return -ENOMEM; - -@@ -631,10 +630,10 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) - mask = msi_mask(entry->msi_attrib.multi_cap); - msi_mask_irq(entry, mask, mask); - -- list_add_tail(&entry->list, &dev->msi_list); -+ list_add_tail(&entry->list, dev_to_msi_list(&dev->dev)); - - /* Configure MSI capability structure */ -- ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); -+ ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI); - if (ret) { - msi_mask_irq(entry, mask, ~mask); - free_msi_irqs(dev); -@@ -657,7 +656,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec) - - /* Set MSI enabled bits */ - pci_intx_for_msi(dev, 0); -- msi_set_enable(dev, 1); -+ pci_msi_set_enable(dev, 1); - dev->msi_enabled = 1; - - dev->irq = entry->irq; -@@ -686,7 +685,7 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, - int i; - - for (i = 0; i < nvec; i++) { -- entry = alloc_msi_entry(dev); -+ entry = alloc_msi_entry(&dev->dev); - if (!entry) { - if (!i) - iounmap(base); -@@ -701,8 +700,9 @@ static int msix_setup_entries(struct pci_dev *dev, void __iomem *base, - entry->msi_attrib.entry_nr = entries[i].entry; - entry->msi_attrib.default_irq = dev->irq; - entry->mask_base = base; -+ entry->nvec_used = 1; - -- list_add_tail(&entry->list, &dev->msi_list); -+ list_add_tail(&entry->list, dev_to_msi_list(&dev->dev)); - } - - return 0; -@@ -714,12 +714,11 @@ static void msix_program_entries(struct pci_dev *dev, - struct msi_desc *entry; - int i = 0; - -- list_for_each_entry(entry, &dev->msi_list, list) { -+ for_each_pci_msi_entry(entry, dev) { - int offset = entries[i].entry * PCI_MSIX_ENTRY_SIZE + - PCI_MSIX_ENTRY_VECTOR_CTRL; - - entries[i].vector = entry->irq; -- irq_set_msi_desc(entry->irq, entry); - entry->masked = readl(entry->mask_base + offset); - msix_mask_irq(entry, 1); - i++; -@@ -744,7 +743,7 @@ static int msix_capability_init(struct pci_dev *dev, - void __iomem *base; - - /* Ensure MSI-X is disabled while it is set up */ -- msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); -+ pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); - - pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &control); - /* Request & Map MSI-X table region */ -@@ -756,7 +755,7 @@ static int msix_capability_init(struct pci_dev *dev, - if (ret) - return ret; - -- ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); -+ ret = pci_msi_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX); - if (ret) - goto out_avail; - -@@ -770,7 +769,7 @@ static int msix_capability_init(struct pci_dev *dev, - * MSI-X registers. We need to mask all the vectors to prevent - * interrupts coming in before they're fully set up. - */ -- msix_clear_and_set_ctrl(dev, 0, -+ pci_msix_clear_and_set_ctrl(dev, 0, - PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE); - - msix_program_entries(dev, entries); -@@ -783,7 +782,7 @@ static int msix_capability_init(struct pci_dev *dev, - pci_intx_for_msi(dev, 0); - dev->msix_enabled = 1; - -- msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); -+ pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_MASKALL, 0); - - return 0; - -@@ -796,7 +795,7 @@ out_avail: - struct msi_desc *entry; - int avail = 0; - -- list_for_each_entry(entry, &dev->msi_list, list) { -+ for_each_pci_msi_entry(entry, dev) { - if (entry->irq != 0) - avail++; - } -@@ -885,17 +884,17 @@ void pci_msi_shutdown(struct pci_dev *dev) - if (!pci_msi_enable || !dev || !dev->msi_enabled) - return; - -- BUG_ON(list_empty(&dev->msi_list)); -- desc = list_first_entry(&dev->msi_list, struct msi_desc, list); -+ BUG_ON(list_empty(dev_to_msi_list(&dev->dev))); -+ desc = first_pci_msi_entry(dev); - -- msi_set_enable(dev, 0); -+ pci_msi_set_enable(dev, 0); - pci_intx_for_msi(dev, 1); - dev->msi_enabled = 0; - - /* Return the device with MSI unmasked as initial states */ - mask = msi_mask(desc->msi_attrib.multi_cap); - /* Keep cached state to be restored */ -- arch_msi_mask_irq(desc, mask, ~mask); -+ __pci_msi_desc_mask_irq(desc, mask, ~mask); - - /* Restore dev->irq to its default pin-assertion irq */ - dev->irq = desc->msi_attrib.default_irq; -@@ -991,12 +990,12 @@ void pci_msix_shutdown(struct pci_dev *dev) - return; - - /* Return the device with MSI-X masked as initial states */ -- list_for_each_entry(entry, &dev->msi_list, list) { -+ for_each_pci_msi_entry(entry, dev) { - /* Keep cached states to be restored */ -- arch_msix_mask_irq(entry, 1); -+ __pci_msix_desc_mask_irq(entry, 1); - } - -- msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); -+ pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); - pci_intx_for_msi(dev, 1); - dev->msix_enabled = 0; - } -@@ -1030,19 +1029,6 @@ EXPORT_SYMBOL(pci_msi_enabled); - - void pci_msi_init_pci_dev(struct pci_dev *dev) - { -- INIT_LIST_HEAD(&dev->msi_list); -- -- /* Disable the msi hardware to avoid screaming interrupts -- * during boot. This is the power on reset default so -- * usually this should be a noop. -- */ -- dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI); -- if (dev->msi_cap) -- msi_set_enable(dev, 0); -- -- dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX); -- if (dev->msix_cap) -- msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); - } - - /** -@@ -1138,3 +1124,217 @@ int pci_enable_msix_range(struct pci_dev *dev, struct msix_entry *entries, - return nvec; - } - EXPORT_SYMBOL(pci_enable_msix_range); -+ -+struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc) -+{ -+ return to_pci_dev(desc->dev); -+} -+ -+void *msi_desc_to_pci_sysdata(struct msi_desc *desc) -+{ -+ struct pci_dev *dev = msi_desc_to_pci_dev(desc); -+ -+ return dev->bus->sysdata; -+} -+EXPORT_SYMBOL_GPL(msi_desc_to_pci_sysdata); -+ -+#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN -+/** -+ * pci_msi_domain_write_msg - Helper to write MSI message to PCI config space -+ * @irq_data: Pointer to interrupt data of the MSI interrupt -+ * @msg: Pointer to the message -+ */ -+void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg) -+{ -+ struct msi_desc *desc = irq_data->msi_desc; -+ -+ /* -+ * For MSI-X desc->irq is always equal to irq_data->irq. For -+ * MSI only the first interrupt of MULTI MSI passes the test. -+ */ -+ if (desc->irq == irq_data->irq) -+ __pci_write_msi_msg(desc, msg); -+} -+ -+/** -+ * pci_msi_domain_calc_hwirq - Generate a unique ID for an MSI source -+ * @dev: Pointer to the PCI device -+ * @desc: Pointer to the msi descriptor -+ * -+ * The ID number is only used within the irqdomain. -+ */ -+irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev, -+ struct msi_desc *desc) -+{ -+ return (irq_hw_number_t)desc->msi_attrib.entry_nr | -+ PCI_DEVID(dev->bus->number, dev->devfn) << 11 | -+ (pci_domain_nr(dev->bus) & 0xFFFFFFFF) << 27; -+} -+ -+static inline bool pci_msi_desc_is_multi_msi(struct msi_desc *desc) -+{ -+ return !desc->msi_attrib.is_msix && desc->nvec_used > 1; -+} -+ -+/** -+ * pci_msi_domain_check_cap - Verify that @domain supports the capabilities for @dev -+ * @domain: The interrupt domain to check -+ * @info: The domain info for verification -+ * @dev: The device to check -+ * -+ * Returns: -+ * 0 if the functionality is supported -+ * 1 if Multi MSI is requested, but the domain does not support it -+ * -ENOTSUPP otherwise -+ */ -+int pci_msi_domain_check_cap(struct irq_domain *domain, -+ struct msi_domain_info *info, struct device *dev) -+{ -+ struct msi_desc *desc = first_pci_msi_entry(to_pci_dev(dev)); -+ -+ /* Special handling to support pci_enable_msi_range() */ -+ if (pci_msi_desc_is_multi_msi(desc) && -+ !(info->flags & MSI_FLAG_MULTI_PCI_MSI)) -+ return 1; -+ else if (desc->msi_attrib.is_msix && !(info->flags & MSI_FLAG_PCI_MSIX)) -+ return -ENOTSUPP; -+ -+ return 0; -+} -+ -+static int pci_msi_domain_handle_error(struct irq_domain *domain, -+ struct msi_desc *desc, int error) -+{ -+ /* Special handling to support pci_enable_msi_range() */ -+ if (pci_msi_desc_is_multi_msi(desc) && error == -ENOSPC) -+ return 1; -+ -+ return error; -+} -+ -+#ifdef GENERIC_MSI_DOMAIN_OPS -+static void pci_msi_domain_set_desc(msi_alloc_info_t *arg, -+ struct msi_desc *desc) -+{ -+ arg->desc = desc; -+ arg->hwirq = pci_msi_domain_calc_hwirq(msi_desc_to_pci_dev(desc), -+ desc); -+} -+#else -+#define pci_msi_domain_set_desc NULL -+#endif -+ -+static struct msi_domain_ops pci_msi_domain_ops_default = { -+ .set_desc = pci_msi_domain_set_desc, -+ .msi_check = pci_msi_domain_check_cap, -+ .handle_error = pci_msi_domain_handle_error, -+}; -+ -+static void pci_msi_domain_update_dom_ops(struct msi_domain_info *info) -+{ -+ struct msi_domain_ops *ops = info->ops; -+ -+ if (ops == NULL) { -+ info->ops = &pci_msi_domain_ops_default; -+ } else { -+ if (ops->set_desc == NULL) -+ ops->set_desc = pci_msi_domain_set_desc; -+ if (ops->msi_check == NULL) -+ ops->msi_check = pci_msi_domain_check_cap; -+ if (ops->handle_error == NULL) -+ ops->handle_error = pci_msi_domain_handle_error; -+ } -+} -+ -+static void pci_msi_domain_update_chip_ops(struct msi_domain_info *info) -+{ -+ struct irq_chip *chip = info->chip; -+ -+ BUG_ON(!chip); -+ if (!chip->irq_write_msi_msg) -+ chip->irq_write_msi_msg = pci_msi_domain_write_msg; -+} -+ -+/** -+ * pci_msi_create_irq_domain - Creat a MSI interrupt domain -+ * @node: Optional device-tree node of the interrupt controller -+ * @info: MSI domain info -+ * @parent: Parent irq domain -+ * -+ * Updates the domain and chip ops and creates a MSI interrupt domain. -+ * -+ * Returns: -+ * A domain pointer or NULL in case of failure. -+ */ -+struct irq_domain *pci_msi_create_irq_domain(struct device_node *node, -+ struct msi_domain_info *info, -+ struct irq_domain *parent) -+{ -+ struct irq_domain *domain; -+ -+ if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS) -+ pci_msi_domain_update_dom_ops(info); -+ if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) -+ pci_msi_domain_update_chip_ops(info); -+ -+ domain = msi_create_irq_domain(node, info, parent); -+ if (!domain) -+ return NULL; -+ -+ domain->bus_token = DOMAIN_BUS_PCI_MSI; -+ return domain; -+} -+ -+/** -+ * pci_msi_domain_alloc_irqs - Allocate interrupts for @dev in @domain -+ * @domain: The interrupt domain to allocate from -+ * @dev: The device for which to allocate -+ * @nvec: The number of interrupts to allocate -+ * @type: Unused to allow simpler migration from the arch_XXX interfaces -+ * -+ * Returns: -+ * A virtual interrupt number or an error code in case of failure -+ */ -+int pci_msi_domain_alloc_irqs(struct irq_domain *domain, struct pci_dev *dev, -+ int nvec, int type) -+{ -+ return msi_domain_alloc_irqs(domain, &dev->dev, nvec); -+} -+ -+/** -+ * pci_msi_domain_free_irqs - Free interrupts for @dev in @domain -+ * @domain: The interrupt domain -+ * @dev: The device for which to free interrupts -+ */ -+void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev) -+{ -+ msi_domain_free_irqs(domain, &dev->dev); -+} -+ -+/** -+ * pci_msi_create_default_irq_domain - Create a default MSI interrupt domain -+ * @node: Optional device-tree node of the interrupt controller -+ * @info: MSI domain info -+ * @parent: Parent irq domain -+ * -+ * Returns: A domain pointer or NULL in case of failure. If successful -+ * the default PCI/MSI irqdomain pointer is updated. -+ */ -+struct irq_domain *pci_msi_create_default_irq_domain(struct device_node *node, -+ struct msi_domain_info *info, struct irq_domain *parent) -+{ -+ struct irq_domain *domain; -+ -+ mutex_lock(&pci_msi_domain_lock); -+ if (pci_msi_default_domain) { -+ pr_err("PCI: default irq domain for PCI MSI has already been created.\n"); -+ domain = NULL; -+ } else { -+ domain = pci_msi_create_irq_domain(node, info, parent); -+ pci_msi_default_domain = domain; -+ } -+ mutex_unlock(&pci_msi_domain_lock); -+ -+ return domain; -+} -+#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */ -diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h -index b5defca..df2169e 100644 ---- a/drivers/pci/pci.h -+++ b/drivers/pci/pci.h -@@ -140,6 +140,27 @@ static inline void pci_no_msi(void) { } - static inline void pci_msi_init_pci_dev(struct pci_dev *dev) { } - #endif - -+static inline void pci_msi_set_enable(struct pci_dev *dev, int enable) -+{ -+ u16 control; -+ -+ pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); -+ control &= ~PCI_MSI_FLAGS_ENABLE; -+ if (enable) -+ control |= PCI_MSI_FLAGS_ENABLE; -+ pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); -+} -+ -+static inline void pci_msix_clear_and_set_ctrl(struct pci_dev *dev, u16 clear, u16 set) -+{ -+ u16 ctrl; -+ -+ pci_read_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, &ctrl); -+ ctrl &= ~clear; -+ ctrl |= set; -+ pci_write_config_word(dev, dev->msix_cap + PCI_MSIX_FLAGS, ctrl); -+} -+ - void pci_realloc_get_opt(char *); - - static inline int pci_no_d1d2(struct pci_dev *dev) -diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c -index 3010ffc..6bdeb75 100644 ---- a/drivers/pci/probe.c -+++ b/drivers/pci/probe.c -@@ -1097,6 +1097,22 @@ int pci_cfg_space_size(struct pci_dev *dev) - - #define LEGACY_IO_RESOURCE (IORESOURCE_IO | IORESOURCE_PCI_FIXED) - -+static void pci_msi_setup_pci_dev(struct pci_dev *dev) -+{ -+ /* -+ * Disable the MSI hardware to avoid screaming interrupts -+ * during boot. This is the power on reset default so -+ * usually this should be a noop. -+ */ -+ dev->msi_cap = pci_find_capability(dev, PCI_CAP_ID_MSI); -+ if (dev->msi_cap) -+ pci_msi_set_enable(dev, 0); -+ -+ dev->msix_cap = pci_find_capability(dev, PCI_CAP_ID_MSIX); -+ if (dev->msix_cap) -+ pci_msix_clear_and_set_ctrl(dev, PCI_MSIX_FLAGS_ENABLE, 0); -+} -+ - /** - * pci_setup_device - fill in class and map information of a device - * @dev: the device structure to fill -@@ -1152,6 +1168,8 @@ int pci_setup_device(struct pci_dev *dev) - /* "Unknown power state" */ - dev->current_state = PCI_UNKNOWN; - -+ pci_msi_setup_pci_dev(dev); -+ - /* Early fixups, before probing the BARs */ - pci_fixup_device(pci_fixup_early, dev); - /* device class may be changed after fixup */ -@@ -1908,7 +1926,7 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus, - int error; - struct pci_host_bridge *bridge; - struct pci_bus *b, *b2; -- struct pci_host_bridge_window *window, *n; -+ struct resource_entry *window, *n; - struct resource *res; - resource_size_t offset; - char bus_addr[64]; -@@ -1972,8 +1990,8 @@ struct pci_bus *pci_create_root_bus(struct device *parent, int bus, - printk(KERN_INFO "PCI host bridge to bus %s\n", dev_name(&b->dev)); - - /* Add initial resources to the bus */ -- list_for_each_entry_safe(window, n, resources, list) { -- list_move_tail(&window->list, &bridge->windows); -+ resource_list_for_each_entry_safe(window, n, resources) { -+ list_move_tail(&window->node, &bridge->windows); - res = window->res; - offset = window->offset; - if (res->flags & IORESOURCE_BUS) -@@ -2073,12 +2091,12 @@ void pci_bus_release_busn_res(struct pci_bus *b) - struct pci_bus *pci_scan_root_bus(struct device *parent, int bus, - struct pci_ops *ops, void *sysdata, struct list_head *resources) - { -- struct pci_host_bridge_window *window; -+ struct resource_entry *window; - bool found = false; - struct pci_bus *b; - int max; - -- list_for_each_entry(window, resources, list) -+ resource_list_for_each_entry(window, resources) - if (window->res->flags & IORESOURCE_BUS) { - found = true; - break; -diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c -index b6d646a..f3681e2 100644 ---- a/drivers/pci/quirks.c -+++ b/drivers/pci/quirks.c -@@ -3516,8 +3516,9 @@ int pci_dev_specific_reset(struct pci_dev *dev, int probe) - static void quirk_dma_func0_alias(struct pci_dev *dev) - { - if (PCI_FUNC(dev->devfn) != 0) { -- dev->dma_alias_devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 0); -- dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN; -+ dev->dma_alias_devid = PCI_DEVID(dev->bus->number, -+ PCI_DEVFN(PCI_SLOT(dev->devfn), 0)); -+ dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVID; - } - } - -@@ -3532,8 +3533,9 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_RICOH, 0xe476, quirk_dma_func0_alias); - static void quirk_dma_func1_alias(struct pci_dev *dev) - { - if (PCI_FUNC(dev->devfn) != 1) { -- dev->dma_alias_devfn = PCI_DEVFN(PCI_SLOT(dev->devfn), 1); -- dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVFN; -+ dev->dma_alias_devid = PCI_DEVID(dev->bus->number, -+ PCI_DEVFN(PCI_SLOT(dev->devfn), 1)); -+ dev->dev_flags |= PCI_DEV_FLAGS_DMA_ALIAS_DEVID; - } - } - -diff --git a/drivers/pci/search.c b/drivers/pci/search.c -index a81f413..a00924f 100644 ---- a/drivers/pci/search.c -+++ b/drivers/pci/search.c -@@ -40,9 +40,8 @@ int pci_for_each_dma_alias(struct pci_dev *pdev, - * If the device is broken and uses an alias requester ID for - * DMA, iterate over that too. - */ -- if (unlikely(pdev->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVFN)) { -- ret = fn(pdev, PCI_DEVID(pdev->bus->number, -- pdev->dma_alias_devfn), data); -+ if (unlikely(pdev->dev_flags & PCI_DEV_FLAGS_DMA_ALIAS_DEVID)) { -+ ret = fn(pdev, pdev->dma_alias_devid, data); - if (ret) - return ret; - } -diff --git a/drivers/pci/xen-pcifront.c b/drivers/pci/xen-pcifront.c -index 116ca37..37d4218 100644 ---- a/drivers/pci/xen-pcifront.c -+++ b/drivers/pci/xen-pcifront.c -@@ -267,7 +267,7 @@ static int pci_frontend_enable_msix(struct pci_dev *dev, - } - - i = 0; -- list_for_each_entry(entry, &dev->msi_list, list) { -+ for_each_pci_msi_entry(entry, dev) { - op.msix_entries[i].entry = entry->msi_attrib.entry_nr; - /* Vector is useless at this point. */ - op.msix_entries[i].vector = -1; -diff --git a/drivers/power/reset/Kconfig b/drivers/power/reset/Kconfig -index f65ff49..b56b084 100644 ---- a/drivers/power/reset/Kconfig -+++ b/drivers/power/reset/Kconfig -@@ -150,5 +150,11 @@ config POWER_RESET_SYSCON - help - Reboot support for generic SYSCON mapped register reset. - -+config POWER_RESET_LAYERSCAPE -+ bool "Freescale LayerScape reset driver" -+ depends on ARCH_LAYERSCAPE -+ help -+ Reboot support for the Freescale LayerScape SoCs. -+ - endif - -diff --git a/drivers/power/reset/Makefile b/drivers/power/reset/Makefile -index 76ce1c5..d924bdb 100644 ---- a/drivers/power/reset/Makefile -+++ b/drivers/power/reset/Makefile -@@ -17,3 +17,4 @@ obj-$(CONFIG_POWER_RESET_VEXPRESS) += vexpress-poweroff.o - obj-$(CONFIG_POWER_RESET_XGENE) += xgene-reboot.o - obj-$(CONFIG_POWER_RESET_KEYSTONE) += keystone-reset.o - obj-$(CONFIG_POWER_RESET_SYSCON) += syscon-reboot.o -+obj-$(CONFIG_POWER_RESET_LAYERSCAPE) += ls-reboot.o -diff --git a/drivers/power/reset/ls-reboot.c b/drivers/power/reset/ls-reboot.c -new file mode 100644 -index 0000000..fa1152c ---- /dev/null -+++ b/drivers/power/reset/ls-reboot.c -@@ -0,0 +1,93 @@ -+/* -+ * Freescale LayerScape reboot driver -+ * -+ * Copyright (c) 2015, Freescale Semiconductor. -+ * Author: Pankaj Chauhan -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+struct ls_reboot_priv { -+ struct device *dev; -+ u32 *rstcr; -+}; -+ -+static struct ls_reboot_priv *ls_reboot_priv; -+ -+static void ls_reboot(enum reboot_mode reboot_mode, const char *cmd) -+{ -+ struct ls_reboot_priv *priv = ls_reboot_priv; -+ u32 val; -+ unsigned long timeout; -+ -+ if (ls_reboot_priv) { -+ val = readl(priv->rstcr); -+ val |= 0x02; -+ writel(val, priv->rstcr); -+ } -+ -+ timeout = jiffies + HZ; -+ while (time_before(jiffies, timeout)) -+ cpu_relax(); -+ -+} -+ -+static int ls_reboot_probe(struct platform_device *pdev) -+{ -+ ls_reboot_priv = devm_kzalloc(&pdev->dev, -+ sizeof(*ls_reboot_priv), GFP_KERNEL); -+ if (!ls_reboot_priv) { -+ dev_err(&pdev->dev, "out of memory for context\n"); -+ return -ENODEV; -+ } -+ -+ ls_reboot_priv->rstcr = of_iomap(pdev->dev.of_node, 0); -+ if (!ls_reboot_priv->rstcr) { -+ devm_kfree(&pdev->dev, ls_reboot_priv); -+ dev_err(&pdev->dev, "can not map resource\n"); -+ return -ENODEV; -+ } -+ -+ ls_reboot_priv->dev = &pdev->dev; -+ -+ arm_pm_restart = ls_reboot; -+ -+ return 0; -+} -+ -+static struct of_device_id ls_reboot_of_match[] = { -+ { .compatible = "fsl,ls-reset" }, -+ {} -+}; -+ -+static struct platform_driver ls_reboot_driver = { -+ .probe = ls_reboot_probe, -+ .driver = { -+ .name = "ls-reset", -+ .of_match_table = ls_reboot_of_match, -+ }, -+}; -+ -+static int __init ls_reboot_init(void) -+{ -+ return platform_driver_register(&ls_reboot_driver); -+} -+device_initcall(ls_reboot_init); -diff --git a/drivers/usb/core/config.c b/drivers/usb/core/config.c -index b9ddf0c..894894f 100644 ---- a/drivers/usb/core/config.c -+++ b/drivers/usb/core/config.c -@@ -115,7 +115,8 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno, - USB_SS_MULT(desc->bmAttributes) > 3) { - dev_warn(ddev, "Isoc endpoint has Mult of %d in " - "config %d interface %d altsetting %d ep %d: " -- "setting to 3\n", desc->bmAttributes + 1, -+ "setting to 3\n", -+ USB_SS_MULT(desc->bmAttributes), - cfgno, inum, asnum, ep->desc.bEndpointAddress); - ep->ss_ep_comp.bmAttributes = 2; - } -diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c -index d7a6d8b..66be3b4 100644 ---- a/drivers/usb/core/driver.c -+++ b/drivers/usb/core/driver.c -@@ -499,11 +499,15 @@ static int usb_unbind_interface(struct device *dev) - int usb_driver_claim_interface(struct usb_driver *driver, - struct usb_interface *iface, void *priv) - { -- struct device *dev = &iface->dev; -+ struct device *dev; - struct usb_device *udev; - int retval = 0; - int lpm_disable_error; - -+ if (!iface) -+ return -ENODEV; -+ -+ dev = &iface->dev; - if (dev->driver) - return -EBUSY; - -diff --git a/drivers/usb/core/hcd-pci.c b/drivers/usb/core/hcd-pci.c -index efc9531..a4c0b85 100644 ---- a/drivers/usb/core/hcd-pci.c -+++ b/drivers/usb/core/hcd-pci.c -@@ -74,6 +74,15 @@ static void for_each_companion(struct pci_dev *pdev, struct usb_hcd *hcd, - if (companion->bus != pdev->bus || - PCI_SLOT(companion->devfn) != slot) - continue; -+ -+ /* -+ * Companion device should be either UHCI,OHCI or EHCI host -+ * controller, otherwise skip. -+ */ -+ if (companion->class != CL_UHCI && companion->class != CL_OHCI && -+ companion->class != CL_EHCI) -+ continue; -+ - companion_hcd = pci_get_drvdata(companion); - if (!companion_hcd || !companion_hcd->self.root_hub) - continue; -diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c -index 2222899..d8e1d5c 100644 ---- a/drivers/usb/core/hub.c -+++ b/drivers/usb/core/hub.c -@@ -124,6 +124,10 @@ struct usb_hub *usb_hub_to_struct_hub(struct usb_device *hdev) - - static int usb_device_supports_lpm(struct usb_device *udev) - { -+ /* Some devices have trouble with LPM */ -+ if (udev->quirks & USB_QUIRK_NO_LPM) -+ return 0; -+ - /* USB 2.1 (and greater) devices indicate LPM support through - * their USB 2.0 Extended Capabilities BOS descriptor. - */ -@@ -1030,10 +1034,20 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) - unsigned delay; - - /* Continue a partial initialization */ -- if (type == HUB_INIT2) -- goto init2; -- if (type == HUB_INIT3) -+ if (type == HUB_INIT2 || type == HUB_INIT3) { -+ device_lock(hub->intfdev); -+ -+ /* Was the hub disconnected while we were waiting? */ -+ if (hub->disconnected) { -+ device_unlock(hub->intfdev); -+ kref_put(&hub->kref, hub_release); -+ return; -+ } -+ if (type == HUB_INIT2) -+ goto init2; - goto init3; -+ } -+ kref_get(&hub->kref); - - /* The superspeed hub except for root hub has to use Hub Depth - * value as an offset into the route string to locate the bits -@@ -1231,6 +1245,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) - queue_delayed_work(system_power_efficient_wq, - &hub->init_work, - msecs_to_jiffies(delay)); -+ device_unlock(hub->intfdev); - return; /* Continues at init3: below */ - } else { - msleep(delay); -@@ -1252,6 +1267,11 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type) - /* Allow autosuspend if it was suppressed */ - if (type <= HUB_INIT3) - usb_autopm_put_interface_async(to_usb_interface(hub->intfdev)); -+ -+ if (type == HUB_INIT2 || type == HUB_INIT3) -+ device_unlock(hub->intfdev); -+ -+ kref_put(&hub->kref, hub_release); - } - - /* Implement the continuations for the delays above */ -@@ -4222,7 +4242,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1, - { - struct usb_device *hdev = hub->hdev; - struct usb_hcd *hcd = bus_to_hcd(hdev->bus); -- int i, j, retval; -+ int retries, operations, retval, i; - unsigned delay = HUB_SHORT_RESET_TIME; - enum usb_device_speed oldspeed = udev->speed; - const char *speed; -@@ -4324,7 +4344,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1, - * first 8 bytes of the device descriptor to get the ep0 maxpacket - * value. - */ -- for (i = 0; i < GET_DESCRIPTOR_TRIES; (++i, msleep(100))) { -+ for (retries = 0; retries < GET_DESCRIPTOR_TRIES; (++retries, msleep(100))) { - bool did_new_scheme = false; - - if (use_new_scheme(udev, retry_counter)) { -@@ -4351,7 +4371,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1, - * 255 is for WUSB devices, we actually need to use - * 512 (WUSB1.0[4.8.1]). - */ -- for (j = 0; j < 3; ++j) { -+ for (operations = 0; operations < 3; ++operations) { - buf->bMaxPacketSize0 = 0; - r = usb_control_msg(udev, usb_rcvaddr0pipe(), - USB_REQ_GET_DESCRIPTOR, USB_DIR_IN, -@@ -4371,7 +4391,13 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1, - r = -EPROTO; - break; - } -- if (r == 0) -+ /* -+ * Some devices time out if they are powered on -+ * when already connected. They need a second -+ * reset. But only on the first attempt, -+ * lest we get into a time out/reset loop -+ */ -+ if (r == 0 || (r == -ETIMEDOUT && retries == 0)) - break; - } - udev->descriptor.bMaxPacketSize0 = -@@ -4403,7 +4429,7 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1, - * authorization will assign the final address. - */ - if (udev->wusb == 0) { -- for (j = 0; j < SET_ADDRESS_TRIES; ++j) { -+ for (operations = 0; operations < SET_ADDRESS_TRIES; ++operations) { - retval = hub_set_address(udev, devnum); - if (retval >= 0) - break; -@@ -4498,6 +4524,8 @@ hub_port_init (struct usb_hub *hub, struct usb_device *udev, int port1, - goto fail; - } - -+ usb_detect_quirks(udev); -+ - if (udev->wusb == 0 && le16_to_cpu(udev->descriptor.bcdUSB) >= 0x0201) { - retval = usb_get_bos_descriptor(udev); - if (!retval) { -@@ -4692,7 +4720,6 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, - if (status < 0) - goto loop; - -- usb_detect_quirks(udev); - if (udev->quirks & USB_QUIRK_DELAY_INIT) - msleep(1000); - -@@ -5324,9 +5351,6 @@ static int usb_reset_and_verify_device(struct usb_device *udev) - if (udev->usb2_hw_lpm_enabled == 1) - usb_set_usb2_hardware_lpm(udev, 0); - -- bos = udev->bos; -- udev->bos = NULL; -- - /* Disable LPM and LTM while we reset the device and reinstall the alt - * settings. Device-initiated LPM settings, and system exit latency - * settings are cleared when the device is reset, so we have to set -@@ -5335,15 +5359,17 @@ static int usb_reset_and_verify_device(struct usb_device *udev) - ret = usb_unlocked_disable_lpm(udev); - if (ret) { - dev_err(&udev->dev, "%s Failed to disable LPM\n.", __func__); -- goto re_enumerate; -+ goto re_enumerate_no_bos; - } - ret = usb_disable_ltm(udev); - if (ret) { - dev_err(&udev->dev, "%s Failed to disable LTM\n.", - __func__); -- goto re_enumerate; -+ goto re_enumerate_no_bos; - } - -+ bos = udev->bos; -+ - for (i = 0; i < SET_CONFIG_TRIES; ++i) { - - /* ep0 maxpacket size may change; let the HCD know about it. -@@ -5435,15 +5461,19 @@ done: - usb_set_usb2_hardware_lpm(udev, 1); - usb_unlocked_enable_lpm(udev); - usb_enable_ltm(udev); -- usb_release_bos_descriptor(udev); -- udev->bos = bos; -+ /* release the new BOS descriptor allocated by hub_port_init() */ -+ if (udev->bos != bos) { -+ usb_release_bos_descriptor(udev); -+ udev->bos = bos; -+ } - return 0; - - re_enumerate: -- /* LPM state doesn't matter when we're about to destroy the device. */ -- hub_port_logical_disconnect(parent_hub, port1); - usb_release_bos_descriptor(udev); - udev->bos = bos; -+re_enumerate_no_bos: -+ /* LPM state doesn't matter when we're about to destroy the device. */ -+ hub_port_logical_disconnect(parent_hub, port1); - return -ENODEV; - } - -diff --git a/drivers/usb/core/quirks.c b/drivers/usb/core/quirks.c -index 8a77a41..6b53fc3 100644 ---- a/drivers/usb/core/quirks.c -+++ b/drivers/usb/core/quirks.c -@@ -196,6 +196,12 @@ static const struct usb_device_id usb_quirk_list[] = { - { USB_DEVICE(0x1a0a, 0x0200), .driver_info = - USB_QUIRK_LINEAR_UFRAME_INTR_BINTERVAL }, - -+ /* Blackmagic Design Intensity Shuttle */ -+ { USB_DEVICE(0x1edb, 0xbd3b), .driver_info = USB_QUIRK_NO_LPM }, -+ -+ /* Blackmagic Design UltraStudio SDI */ -+ { USB_DEVICE(0x1edb, 0xbd4f), .driver_info = USB_QUIRK_NO_LPM }, -+ - { } /* terminating entry must be last */ - }; - -diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c -index b0f4d52..17eeab8 100644 ---- a/drivers/usb/dwc3/core.c -+++ b/drivers/usb/dwc3/core.c -@@ -673,22 +673,20 @@ static int dwc3_probe(struct platform_device *pdev) - * since it will be requested by the xhci-plat driver. - */ - regs = devm_ioremap_resource(dev, res); -- if (IS_ERR(regs)) -- return PTR_ERR(regs); -+ if (IS_ERR(regs)) { -+ ret = PTR_ERR(regs); -+ goto err0; -+ } - - dwc->regs = regs; - dwc->regs_size = resource_size(res); -- /* -- * restore res->start back to its original value so that, -- * in case the probe is deferred, we don't end up getting error in -- * request the memory region the next time probe is called. -- */ -- res->start -= DWC3_GLOBALS_REGS_START; - - if (node) { - dwc->maximum_speed = of_usb_get_maximum_speed(node); - - dwc->needs_fifo_resize = of_property_read_bool(node, "tx-fifo-resize"); -+ dwc->configure_gfladj = -+ of_property_read_bool(node, "configure-gfladj"); - dwc->dr_mode = of_usb_get_dr_mode(node); - } else if (pdata) { - dwc->maximum_speed = pdata->maximum_speed; -@@ -703,7 +701,7 @@ static int dwc3_probe(struct platform_device *pdev) - - ret = dwc3_core_get_phy(dwc); - if (ret) -- return ret; -+ goto err0; - - spin_lock_init(&dwc->lock); - platform_set_drvdata(pdev, dwc); -@@ -722,7 +720,25 @@ static int dwc3_probe(struct platform_device *pdev) - if (ret) { - dev_err(dwc->dev, "failed to allocate event buffers\n"); - ret = -ENOMEM; -- goto err0; -+ goto err1; -+ } -+ -+ /* Adjust Frame Length */ -+ if (dwc->configure_gfladj) -+ dwc3_writel(dwc->regs, DWC3_GFLADJ, GFLADJ_30MHZ_REG_SEL | -+ GFLADJ_30MHZ(GFLADJ_30MHZ_DEFAULT)); -+ -+ /* Change burst beat and outstanding pipelined transfers requests */ -+ dwc3_writel(dwc->regs, DWC3_GSBUSCFG0, -+ (dwc3_readl(dwc->regs, DWC3_GSBUSCFG0) & ~0xff) | 0xf); -+ dwc3_writel(dwc->regs, DWC3_GSBUSCFG1, -+ dwc3_readl(dwc->regs, DWC3_GSBUSCFG1) | 0xf00); -+ -+ /* Enable Snooping */ -+ if (node && of_dma_is_coherent(node)) { -+ dwc3_writel(dwc->regs, DWC3_GSBUSCFG0, -+ dwc3_readl(dwc->regs, DWC3_GSBUSCFG0) | 0x22220000); -+ dev_dbg(dev, "enabled snooping for usb\n"); - } - - if (IS_ENABLED(CONFIG_USB_DWC3_HOST)) -@@ -736,65 +752,81 @@ static int dwc3_probe(struct platform_device *pdev) - ret = dwc3_core_init(dwc); - if (ret) { - dev_err(dev, "failed to initialize core\n"); -- goto err0; -+ goto err1; - } - - usb_phy_set_suspend(dwc->usb2_phy, 0); - usb_phy_set_suspend(dwc->usb3_phy, 0); - ret = phy_power_on(dwc->usb2_generic_phy); - if (ret < 0) -- goto err1; -+ goto err2; - - ret = phy_power_on(dwc->usb3_generic_phy); - if (ret < 0) -- goto err_usb2phy_power; -+ goto err3; - - ret = dwc3_event_buffers_setup(dwc); - if (ret) { - dev_err(dwc->dev, "failed to setup event buffers\n"); -- goto err_usb3phy_power; -+ goto err4; - } - - ret = dwc3_core_init_mode(dwc); - if (ret) -- goto err2; -+ goto err5; - - ret = dwc3_debugfs_init(dwc); - if (ret) { - dev_err(dev, "failed to initialize debugfs\n"); -- goto err3; -+ goto err6; - } - - pm_runtime_allow(dev); - - return 0; - --err3: -+err6: - dwc3_core_exit_mode(dwc); - --err2: -+err5: - dwc3_event_buffers_cleanup(dwc); - --err_usb3phy_power: -+err4: - phy_power_off(dwc->usb3_generic_phy); - --err_usb2phy_power: -+err3: - phy_power_off(dwc->usb2_generic_phy); - --err1: -+err2: - usb_phy_set_suspend(dwc->usb2_phy, 1); - usb_phy_set_suspend(dwc->usb3_phy, 1); - dwc3_core_exit(dwc); - --err0: -+err1: - dwc3_free_event_buffers(dwc); - -+err0: -+ /* -+ * restore res->start back to its original value so that, in case the -+ * probe is deferred, we don't end up getting error in request the -+ * memory region the next time probe is called. -+ */ -+ res->start -= DWC3_GLOBALS_REGS_START; -+ - return ret; - } - - static int dwc3_remove(struct platform_device *pdev) - { - struct dwc3 *dwc = platform_get_drvdata(pdev); -+ struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); -+ -+ /* -+ * restore res->start back to its original value so that, in case the -+ * probe is deferred, we don't end up getting error in request the -+ * memory region the next time probe is called. -+ */ -+ res->start -= DWC3_GLOBALS_REGS_START; - - dwc3_debugfs_exit(dwc); - dwc3_core_exit_mode(dwc); -diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h -index 66f6256..aec8953 100644 ---- a/drivers/usb/dwc3/core.h -+++ b/drivers/usb/dwc3/core.h -@@ -26,6 +26,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -123,6 +124,7 @@ - #define DWC3_GEVNTCOUNT(n) (0xc40c + (n * 0x10)) - - #define DWC3_GHWPARAMS8 0xc600 -+#define DWC3_GFLADJ 0xc630 - - /* Device Registers */ - #define DWC3_DCFG 0xc700 -@@ -210,6 +212,11 @@ - #define DWC3_GHWPARAMS4_HIBER_SCRATCHBUFS(n) (((n) & (0x0f << 13)) >> 13) - #define DWC3_MAX_HIBER_SCRATCHBUFS 15 - -+/* Global Frame Length Adjustment Register */ -+#define GFLADJ_30MHZ_REG_SEL (1 << 7) -+#define GFLADJ_30MHZ(n) ((n) & 0x3f) -+#define GFLADJ_30MHZ_DEFAULT 0x20 -+ - /* Device Configuration Register */ - #define DWC3_DCFG_DEVADDR(addr) ((addr) << 3) - #define DWC3_DCFG_DEVADDR_MASK DWC3_DCFG_DEVADDR(0x7f) -@@ -766,6 +773,7 @@ struct dwc3 { - unsigned has_hibernation:1; - unsigned is_selfpowered:1; - unsigned needs_fifo_resize:1; -+ unsigned configure_gfladj:1; - unsigned pullups_connected:1; - unsigned resize_fifos:1; - unsigned setup_packet_pending:1; -diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c -index dcb8ca0..c41d46c 100644 ---- a/drivers/usb/dwc3/host.c -+++ b/drivers/usb/dwc3/host.c -@@ -39,6 +39,12 @@ int dwc3_host_init(struct dwc3 *dwc) - xhci->dev.dma_mask = dwc->dev->dma_mask; - xhci->dev.dma_parms = dwc->dev->dma_parms; - -+ /* set DMA operations */ -+ if (dwc->dev->of_node && of_dma_is_coherent(dwc->dev->of_node)) { -+ xhci->dev.archdata.dma_ops = dwc->dev->archdata.dma_ops; -+ dev_dbg(dwc->dev, "set dma_ops for usb\n"); -+ } -+ - dwc->xhci = xhci; - - ret = platform_device_add_resources(xhci, dwc->xhci_resources, -diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c -index 7e5c90e..c6027ac 100644 ---- a/drivers/usb/host/xhci-pci.c -+++ b/drivers/usb/host/xhci-pci.c -@@ -23,10 +23,17 @@ - #include - #include - #include -+#include - - #include "xhci.h" - #include "xhci-trace.h" - -+#define SSIC_PORT_NUM 2 -+#define SSIC_PORT_CFG2 0x880c -+#define SSIC_PORT_CFG2_OFFSET 0x30 -+#define PROG_DONE (1 << 30) -+#define SSIC_PORT_UNUSED (1 << 31) -+ - /* Device for a quirk */ - #define PCI_VENDOR_ID_FRESCO_LOGIC 0x1b73 - #define PCI_DEVICE_ID_FRESCO_LOGIC_PDK 0x1000 -@@ -40,6 +47,8 @@ - #define PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI 0x22b5 - #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI 0xa12f - #define PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI 0x9d2f -+#define PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI 0x0aa8 -+#define PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI 0x1aa8 - - static const char hcd_name[] = "xhci_hcd"; - -@@ -140,9 +149,15 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) - if (pdev->vendor == PCI_VENDOR_ID_INTEL && - (pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_LP_XHCI || - pdev->device == PCI_DEVICE_ID_INTEL_SUNRISEPOINT_H_XHCI || -- pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI)) { -+ pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI || -+ pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_M_XHCI || -+ pdev->device == PCI_DEVICE_ID_INTEL_BROXTON_B_XHCI)) { - xhci->quirks |= XHCI_PME_STUCK_QUIRK; - } -+ if (pdev->vendor == PCI_VENDOR_ID_INTEL && -+ pdev->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) { -+ xhci->quirks |= XHCI_SSIC_PORT_UNUSED; -+ } - if (pdev->vendor == PCI_VENDOR_ID_ETRON && - pdev->device == PCI_DEVICE_ID_EJ168) { - xhci->quirks |= XHCI_RESET_ON_RESUME; -@@ -169,20 +184,18 @@ static void xhci_pci_quirks(struct device *dev, struct xhci_hcd *xhci) - "QUIRK: Resetting on resume"); - } - --/* -- * Make sure PME works on some Intel xHCI controllers by writing 1 to clear -- * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4 -- */ --static void xhci_pme_quirk(struct xhci_hcd *xhci) -+#ifdef CONFIG_ACPI -+static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) - { -- u32 val; -- void __iomem *reg; -- -- reg = (void __iomem *) xhci->cap_regs + 0x80a4; -- val = readl(reg); -- writel(val | BIT(28), reg); -- readl(reg); -+ static const u8 intel_dsm_uuid[] = { -+ 0xb7, 0x0c, 0x34, 0xac, 0x01, 0xe9, 0xbf, 0x45, -+ 0xb7, 0xe6, 0x2b, 0x34, 0xec, 0x93, 0x1e, 0x23, -+ }; -+ acpi_evaluate_dsm(ACPI_HANDLE(&dev->dev), intel_dsm_uuid, 3, 1, NULL); - } -+#else -+ static void xhci_pme_acpi_rtd3_enable(struct pci_dev *dev) { } -+#endif /* CONFIG_ACPI */ - - /* called during probe() after chip reset completes */ - static int xhci_pci_setup(struct usb_hcd *hcd) -@@ -263,6 +276,9 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) - HCC_MAX_PSA(xhci->hcc_params) >= 4) - xhci->shared_hcd->can_do_streams = 1; - -+ if (xhci->quirks & XHCI_PME_STUCK_QUIRK) -+ xhci_pme_acpi_rtd3_enable(dev); -+ - /* USB-2 and USB-3 roothubs initialized, allow runtime pm suspend */ - pm_runtime_put_noidle(&dev->dev); - -@@ -282,6 +298,7 @@ static void xhci_pci_remove(struct pci_dev *dev) - struct xhci_hcd *xhci; - - xhci = hcd_to_xhci(pci_get_drvdata(dev)); -+ xhci->xhc_state |= XHCI_STATE_REMOVING; - if (xhci->shared_hcd) { - usb_remove_hcd(xhci->shared_hcd); - usb_put_hcd(xhci->shared_hcd); -@@ -296,10 +313,65 @@ static void xhci_pci_remove(struct pci_dev *dev) - } - - #ifdef CONFIG_PM -+/* -+ * In some Intel xHCI controllers, in order to get D3 working, -+ * through a vendor specific SSIC CONFIG register at offset 0x883c, -+ * SSIC PORT need to be marked as "unused" before putting xHCI -+ * into D3. After D3 exit, the SSIC port need to be marked as "used". -+ * Without this change, xHCI might not enter D3 state. -+ */ -+static void xhci_ssic_port_unused_quirk(struct usb_hcd *hcd, bool suspend) -+{ -+ struct xhci_hcd *xhci = hcd_to_xhci(hcd); -+ u32 val; -+ void __iomem *reg; -+ int i; -+ -+ for (i = 0; i < SSIC_PORT_NUM; i++) { -+ reg = (void __iomem *) xhci->cap_regs + -+ SSIC_PORT_CFG2 + -+ i * SSIC_PORT_CFG2_OFFSET; -+ -+ /* Notify SSIC that SSIC profile programming is not done. */ -+ val = readl(reg) & ~PROG_DONE; -+ writel(val, reg); -+ -+ /* Mark SSIC port as unused(suspend) or used(resume) */ -+ val = readl(reg); -+ if (suspend) -+ val |= SSIC_PORT_UNUSED; -+ else -+ val &= ~SSIC_PORT_UNUSED; -+ writel(val, reg); -+ -+ /* Notify SSIC that SSIC profile programming is done */ -+ val = readl(reg) | PROG_DONE; -+ writel(val, reg); -+ readl(reg); -+ } -+} -+ -+/* -+ * Make sure PME works on some Intel xHCI controllers by writing 1 to clear -+ * the Internal PME flag bit in vendor specific PMCTRL register at offset 0x80a4 -+ */ -+static void xhci_pme_quirk(struct usb_hcd *hcd) -+{ -+ struct xhci_hcd *xhci = hcd_to_xhci(hcd); -+ void __iomem *reg; -+ u32 val; -+ -+ reg = (void __iomem *) xhci->cap_regs + 0x80a4; -+ val = readl(reg); -+ writel(val | BIT(28), reg); -+ readl(reg); -+} -+ - static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup) - { - struct xhci_hcd *xhci = hcd_to_xhci(hcd); - struct pci_dev *pdev = to_pci_dev(hcd->self.controller); -+ int ret; - - /* - * Systems with the TI redriver that loses port status change events -@@ -309,9 +381,16 @@ static int xhci_pci_suspend(struct usb_hcd *hcd, bool do_wakeup) - pdev->no_d3cold = true; - - if (xhci->quirks & XHCI_PME_STUCK_QUIRK) -- xhci_pme_quirk(xhci); -+ xhci_pme_quirk(hcd); -+ -+ if (xhci->quirks & XHCI_SSIC_PORT_UNUSED) -+ xhci_ssic_port_unused_quirk(hcd, true); - -- return xhci_suspend(xhci, do_wakeup); -+ ret = xhci_suspend(xhci, do_wakeup); -+ if (ret && (xhci->quirks & XHCI_SSIC_PORT_UNUSED)) -+ xhci_ssic_port_unused_quirk(hcd, false); -+ -+ return ret; - } - - static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated) -@@ -341,8 +420,11 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated) - if (pdev->vendor == PCI_VENDOR_ID_INTEL) - usb_enable_intel_xhci_ports(pdev); - -+ if (xhci->quirks & XHCI_SSIC_PORT_UNUSED) -+ xhci_ssic_port_unused_quirk(hcd, false); -+ - if (xhci->quirks & XHCI_PME_STUCK_QUIRK) -- xhci_pme_quirk(xhci); -+ xhci_pme_quirk(hcd); - - retval = xhci_resume(xhci, hibernated); - return retval; -diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c -index 1e5fb8c..04e7525 100644 ---- a/drivers/usb/host/xhci-ring.c -+++ b/drivers/usb/host/xhci-ring.c -@@ -3840,8 +3840,12 @@ static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd, - { - int reserved_trbs = xhci->cmd_ring_reserved_trbs; - int ret; -- if (xhci->xhc_state & XHCI_STATE_DYING) -+ -+ if ((xhci->xhc_state & XHCI_STATE_DYING) || -+ (xhci->xhc_state & XHCI_STATE_HALTED)) { -+ xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n"); - return -ESHUTDOWN; -+ } - - if (!command_must_succeed) - reserved_trbs++; -diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c -index 98380fa..f951b75 100644 ---- a/drivers/usb/host/xhci.c -+++ b/drivers/usb/host/xhci.c -@@ -147,7 +147,8 @@ static int xhci_start(struct xhci_hcd *xhci) - "waited %u microseconds.\n", - XHCI_MAX_HALT_USEC); - if (!ret) -- xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING); -+ /* clear state flags. Including dying, halted or removing */ -+ xhci->xhc_state = 0; - - return ret; - } -@@ -1102,8 +1103,8 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) - /* Resume root hubs only when have pending events. */ - status = readl(&xhci->op_regs->status); - if (status & STS_EINT) { -- usb_hcd_resume_root_hub(hcd); - usb_hcd_resume_root_hub(xhci->shared_hcd); -+ usb_hcd_resume_root_hub(hcd); - } - } - -@@ -1118,10 +1119,10 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated) - - /* Re-enable port polling. */ - xhci_dbg(xhci, "%s: starting port polling.\n", __func__); -- set_bit(HCD_FLAG_POLL_RH, &hcd->flags); -- usb_hcd_poll_rh_status(hcd); - set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags); - usb_hcd_poll_rh_status(xhci->shared_hcd); -+ set_bit(HCD_FLAG_POLL_RH, &hcd->flags); -+ usb_hcd_poll_rh_status(hcd); - - return retval; - } -@@ -1548,7 +1549,9 @@ int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status) - xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb, - "HW died, freeing TD."); - urb_priv = urb->hcpriv; -- for (i = urb_priv->td_cnt; i < urb_priv->length; i++) { -+ for (i = urb_priv->td_cnt; -+ i < urb_priv->length && xhci->devs[urb->dev->slot_id]; -+ i++) { - td = urb_priv->td[i]; - if (!list_empty(&td->td_list)) - list_del_init(&td->td_list); -@@ -2751,7 +2754,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) - if (ret <= 0) - return ret; - xhci = hcd_to_xhci(hcd); -- if (xhci->xhc_state & XHCI_STATE_DYING) -+ if ((xhci->xhc_state & XHCI_STATE_DYING) || -+ (xhci->xhc_state & XHCI_STATE_REMOVING)) - return -ENODEV; - - xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev); -@@ -3793,7 +3797,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev, - u64 temp_64; - struct xhci_command *command; - -- if (xhci->xhc_state) /* dying or halted */ -+ if (xhci->xhc_state) /* dying, removing or halted */ - return -EINVAL; - - if (!udev->slot_id) { -@@ -4912,6 +4916,16 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks) - goto error; - xhci_dbg(xhci, "Reset complete\n"); - -+ /* -+ * On some xHCI controllers (e.g. R-Car SoCs), the AC64 bit (bit 0) -+ * of HCCPARAMS1 is set to 1. However, the xHCs don't support 64-bit -+ * address memory pointers actually. So, this driver clears the AC64 -+ * bit of xhci->hcc_params to call dma_set_coherent_mask(dev, -+ * DMA_BIT_MASK(32)) in this xhci_gen_setup(). -+ */ -+ if (xhci->quirks & XHCI_NO_64BIT_SUPPORT) -+ xhci->hcc_params &= ~BIT(0); -+ - /* Set dma_mask and coherent_dma_mask to 64-bits, - * if xHC supports 64-bit addressing */ - if (HCC_64BIT_ADDR(xhci->hcc_params) && -diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h -index 54f386f..3850cb2 100644 ---- a/drivers/usb/host/xhci.h -+++ b/drivers/usb/host/xhci.h -@@ -1531,6 +1531,7 @@ struct xhci_hcd { - */ - #define XHCI_STATE_DYING (1 << 0) - #define XHCI_STATE_HALTED (1 << 1) -+#define XHCI_STATE_REMOVING (1 << 2) - /* Statistics */ - int error_bitmask; - unsigned int quirks; -@@ -1565,6 +1566,8 @@ struct xhci_hcd { - /* For controllers with a broken beyond repair streams implementation */ - #define XHCI_BROKEN_STREAMS (1 << 19) - #define XHCI_PME_STUCK_QUIRK (1 << 20) -+#define XHCI_SSIC_PORT_UNUSED (1 << 22) -+#define XHCI_NO_64BIT_SUPPORT (1 << 23) - unsigned int num_active_eps; - unsigned int limit_active_eps; - /* There are two roothubs to keep track of bus suspend info for */ -diff --git a/drivers/vfio/pci/vfio_pci_intrs.c b/drivers/vfio/pci/vfio_pci_intrs.c -index 553212f..e8d695b 100644 ---- a/drivers/vfio/pci/vfio_pci_intrs.c -+++ b/drivers/vfio/pci/vfio_pci_intrs.c -@@ -560,7 +560,7 @@ static int vfio_msi_set_vector_signal(struct vfio_pci_device *vdev, - struct msi_msg msg; - - get_cached_msi_msg(irq, &msg); -- write_msi_msg(irq, &msg); -+ pci_write_msi_msg(irq, &msg); - } - - ret = request_irq(irq, vfio_msihandler, 0, -diff --git a/include/asm-generic/msi.h b/include/asm-generic/msi.h -new file mode 100644 -index 0000000..61c58d8 ---- /dev/null -+++ b/include/asm-generic/msi.h -@@ -0,0 +1,32 @@ -+#ifndef __ASM_GENERIC_MSI_H -+#define __ASM_GENERIC_MSI_H -+ -+#include -+ -+#ifndef NUM_MSI_ALLOC_SCRATCHPAD_REGS -+# define NUM_MSI_ALLOC_SCRATCHPAD_REGS 2 -+#endif -+ -+struct msi_desc; -+ -+/** -+ * struct msi_alloc_info - Default structure for MSI interrupt allocation. -+ * @desc: Pointer to msi descriptor -+ * @hwirq: Associated hw interrupt number in the domain -+ * @scratchpad: Storage for implementation specific scratch data -+ * -+ * Architectures can provide their own implementation by not including -+ * asm-generic/msi.h into their arch specific header file. -+ */ -+typedef struct msi_alloc_info { -+ struct msi_desc *desc; -+ irq_hw_number_t hwirq; -+ union { -+ unsigned long ul; -+ void *ptr; -+ } scratchpad[NUM_MSI_ALLOC_SCRATCHPAD_REGS]; -+} msi_alloc_info_t; -+ -+#define GENERIC_MSI_DOMAIN_OPS 1 -+ -+#endif -diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h -index aa70cbd..bee5d68 100644 ---- a/include/asm-generic/vmlinux.lds.h -+++ b/include/asm-generic/vmlinux.lds.h -@@ -164,6 +164,7 @@ - #define CLKSRC_OF_TABLES() OF_TABLE(CONFIG_CLKSRC_OF, clksrc) - #define IRQCHIP_OF_MATCH_TABLE() OF_TABLE(CONFIG_IRQCHIP, irqchip) - #define CLK_OF_TABLES() OF_TABLE(CONFIG_COMMON_CLK, clk) -+#define IOMMU_OF_TABLES() OF_TABLE(CONFIG_OF_IOMMU, iommu) - #define RESERVEDMEM_OF_TABLES() OF_TABLE(CONFIG_OF_RESERVED_MEM, reservedmem) - #define CPU_METHOD_OF_TABLES() OF_TABLE(CONFIG_SMP, cpu_method) - #define EARLYCON_OF_TABLES() OF_TABLE(CONFIG_SERIAL_EARLYCON, earlycon) -@@ -497,6 +498,7 @@ - CLK_OF_TABLES() \ - RESERVEDMEM_OF_TABLES() \ - CLKSRC_OF_TABLES() \ -+ IOMMU_OF_TABLES() \ - CPU_METHOD_OF_TABLES() \ - KERNEL_DTB() \ - IRQCHIP_OF_MATCH_TABLE() \ -diff --git a/include/linux/acpi.h b/include/linux/acpi.h -index 1c7eaa7..d017dbf 100644 ---- a/include/linux/acpi.h -+++ b/include/linux/acpi.h -@@ -27,6 +27,7 @@ - - #include - #include /* for struct resource */ -+#include - #include - #include - -@@ -290,11 +291,6 @@ unsigned long acpi_dev_irq_flags(u8 triggering, u8 polarity, u8 shareable); - bool acpi_dev_resource_interrupt(struct acpi_resource *ares, int index, - struct resource *res); - --struct resource_list_entry { -- struct list_head node; -- struct resource res; --}; -- - void acpi_dev_free_resource_list(struct list_head *list); - int acpi_dev_get_resources(struct acpi_device *adev, struct list_head *list, - int (*preproc)(struct acpi_resource *, void *), -diff --git a/include/linux/device.h b/include/linux/device.h -index ce1f216..941d97b 100644 ---- a/include/linux/device.h -+++ b/include/linux/device.h -@@ -690,6 +690,8 @@ struct acpi_dev_node { - * along with subsystem-level and driver-level callbacks. - * @pins: For device pin management. - * See Documentation/pinctrl.txt for details. -+ * @msi_list: Hosts MSI descriptors -+ * @msi_domain: The generic MSI domain this device is using. - * @numa_node: NUMA node this device is close to. - * @dma_mask: Dma mask (if dma'ble device). - * @coherent_dma_mask: Like dma_mask, but for alloc_coherent mapping as not all -@@ -750,9 +752,15 @@ struct device { - struct dev_pm_info power; - struct dev_pm_domain *pm_domain; - -+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN -+ struct irq_domain *msi_domain; -+#endif - #ifdef CONFIG_PINCTRL - struct dev_pin_info *pins; - #endif -+#ifdef CONFIG_GENERIC_MSI_IRQ -+ struct list_head msi_list; -+#endif - - #ifdef CONFIG_NUMA - int numa_node; /* NUMA node this device is close to */ -@@ -837,6 +845,22 @@ static inline void set_dev_node(struct device *dev, int node) - } - #endif - -+static inline struct irq_domain *dev_get_msi_domain(const struct device *dev) -+{ -+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN -+ return dev->msi_domain; -+#else -+ return NULL; -+#endif -+} -+ -+static inline void dev_set_msi_domain(struct device *dev, struct irq_domain *d) -+{ -+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN -+ dev->msi_domain = d; -+#endif -+} -+ - static inline void *dev_get_drvdata(const struct device *dev) - { - return dev->driver_data; -diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h -index d5d3881..c3007cb 100644 ---- a/include/linux/dma-mapping.h -+++ b/include/linux/dma-mapping.h -@@ -129,11 +129,14 @@ static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask) - - extern u64 dma_get_required_mask(struct device *dev); - --#ifndef set_arch_dma_coherent_ops --static inline int set_arch_dma_coherent_ops(struct device *dev) --{ -- return 0; --} -+#ifndef arch_setup_dma_ops -+static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, -+ u64 size, struct iommu_ops *iommu, -+ bool coherent) { } -+#endif -+ -+#ifndef arch_teardown_dma_ops -+static inline void arch_teardown_dma_ops(struct device *dev) { } - #endif - - static inline unsigned int dma_get_max_seg_size(struct device *dev) -diff --git a/include/linux/fsl/guts.h b/include/linux/fsl/guts.h -new file mode 100644 -index 0000000..84d971f ---- /dev/null -+++ b/include/linux/fsl/guts.h -@@ -0,0 +1,192 @@ -+/** -+ * Freecale 85xx and 86xx Global Utilties register set -+ * -+ * Authors: Jeff Brown -+ * Timur Tabi -+ * -+ * Copyright 2004,2007,2012 Freescale Semiconductor, Inc -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License as published by the -+ * Free Software Foundation; either version 2 of the License, or (at your -+ * option) any later version. -+ */ -+ -+#ifndef __FSL_GUTS_H__ -+#define __FSL_GUTS_H__ -+ -+#include -+ -+/** -+ * Global Utility Registers. -+ * -+ * Not all registers defined in this structure are available on all chips, so -+ * you are expected to know whether a given register actually exists on your -+ * chip before you access it. -+ * -+ * Also, some registers are similar on different chips but have slightly -+ * different names. In these cases, one name is chosen to avoid extraneous -+ * #ifdefs. -+ */ -+struct ccsr_guts { -+ __be32 porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */ -+ __be32 porbmsr; /* 0x.0004 - POR Boot Mode Status Register */ -+ __be32 porimpscr; /* 0x.0008 - POR I/O Impedance Status and Control Register */ -+ __be32 pordevsr; /* 0x.000c - POR I/O Device Status Register */ -+ __be32 pordbgmsr; /* 0x.0010 - POR Debug Mode Status Register */ -+ __be32 pordevsr2; /* 0x.0014 - POR device status register 2 */ -+ u8 res018[0x20 - 0x18]; -+ __be32 porcir; /* 0x.0020 - POR Configuration Information Register */ -+ u8 res024[0x30 - 0x24]; -+ __be32 gpiocr; /* 0x.0030 - GPIO Control Register */ -+ u8 res034[0x40 - 0x34]; -+ __be32 gpoutdr; /* 0x.0040 - General-Purpose Output Data Register */ -+ u8 res044[0x50 - 0x44]; -+ __be32 gpindr; /* 0x.0050 - General-Purpose Input Data Register */ -+ u8 res054[0x60 - 0x54]; -+ __be32 pmuxcr; /* 0x.0060 - Alternate Function Signal Multiplex Control */ -+ __be32 pmuxcr2; /* 0x.0064 - Alternate function signal multiplex control 2 */ -+ __be32 dmuxcr; /* 0x.0068 - DMA Mux Control Register */ -+ u8 res06c[0x70 - 0x6c]; -+ __be32 devdisr; /* 0x.0070 - Device Disable Control */ -+#define CCSR_GUTS_DEVDISR_TB1 0x00001000 -+#define CCSR_GUTS_DEVDISR_TB0 0x00004000 -+ __be32 devdisr2; /* 0x.0074 - Device Disable Control 2 */ -+ u8 res078[0x7c - 0x78]; -+ __be32 pmjcr; /* 0x.007c - 4 Power Management Jog Control Register */ -+ __be32 powmgtcsr; /* 0x.0080 - Power Management Status and Control Register */ -+ __be32 pmrccr; /* 0x.0084 - Power Management Reset Counter Configuration Register */ -+ __be32 pmpdccr; /* 0x.0088 - Power Management Power Down Counter Configuration Register */ -+ __be32 pmcdr; /* 0x.008c - 4Power management clock disable register */ -+ __be32 mcpsumr; /* 0x.0090 - Machine Check Summary Register */ -+ __be32 rstrscr; /* 0x.0094 - Reset Request Status and Control Register */ -+ __be32 ectrstcr; /* 0x.0098 - Exception reset control register */ -+ __be32 autorstsr; /* 0x.009c - Automatic reset status register */ -+ __be32 pvr; /* 0x.00a0 - Processor Version Register */ -+ __be32 svr; /* 0x.00a4 - System Version Register */ -+ u8 res0a8[0xb0 - 0xa8]; -+ __be32 rstcr; /* 0x.00b0 - Reset Control Register */ -+ u8 res0b4[0xc0 - 0xb4]; -+ __be32 iovselsr; /* 0x.00c0 - I/O voltage select status register -+ Called 'elbcvselcr' on 86xx SOCs */ -+ u8 res0c4[0x100 - 0xc4]; -+ __be32 rcwsr[16]; /* 0x.0100 - Reset Control Word Status registers -+ There are 16 registers */ -+ u8 res140[0x224 - 0x140]; -+ __be32 iodelay1; /* 0x.0224 - IO delay control register 1 */ -+ __be32 iodelay2; /* 0x.0228 - IO delay control register 2 */ -+ u8 res22c[0x604 - 0x22c]; -+ __be32 pamubypenr; /* 0x.604 - PAMU bypass enable register */ -+ u8 res608[0x800 - 0x608]; -+ __be32 clkdvdr; /* 0x.0800 - Clock Divide Register */ -+ u8 res804[0x900 - 0x804]; -+ __be32 ircr; /* 0x.0900 - Infrared Control Register */ -+ u8 res904[0x908 - 0x904]; -+ __be32 dmacr; /* 0x.0908 - DMA Control Register */ -+ u8 res90c[0x914 - 0x90c]; -+ __be32 elbccr; /* 0x.0914 - eLBC Control Register */ -+ u8 res918[0xb20 - 0x918]; -+ __be32 ddr1clkdr; /* 0x.0b20 - DDR1 Clock Disable Register */ -+ __be32 ddr2clkdr; /* 0x.0b24 - DDR2 Clock Disable Register */ -+ __be32 ddrclkdr; /* 0x.0b28 - DDR Clock Disable Register */ -+ u8 resb2c[0xe00 - 0xb2c]; -+ __be32 clkocr; /* 0x.0e00 - Clock Out Select Register */ -+ u8 rese04[0xe10 - 0xe04]; -+ __be32 ddrdllcr; /* 0x.0e10 - DDR DLL Control Register */ -+ u8 rese14[0xe20 - 0xe14]; -+ __be32 lbcdllcr; /* 0x.0e20 - LBC DLL Control Register */ -+ __be32 cpfor; /* 0x.0e24 - L2 charge pump fuse override register */ -+ u8 rese28[0xf04 - 0xe28]; -+ __be32 srds1cr0; /* 0x.0f04 - SerDes1 Control Register 0 */ -+ __be32 srds1cr1; /* 0x.0f08 - SerDes1 Control Register 0 */ -+ u8 resf0c[0xf2c - 0xf0c]; -+ __be32 itcr; /* 0x.0f2c - Internal transaction control register */ -+ u8 resf30[0xf40 - 0xf30]; -+ __be32 srds2cr0; /* 0x.0f40 - SerDes2 Control Register 0 */ -+ __be32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */ -+} __attribute__ ((packed)); -+ -+ -+/* Alternate function signal multiplex control */ -+#define MPC85xx_PMUXCR_QE(x) (0x8000 >> (x)) -+ -+#ifdef CONFIG_PPC_86xx -+ -+#define CCSR_GUTS_DMACR_DEV_SSI 0 /* DMA controller/channel set to SSI */ -+#define CCSR_GUTS_DMACR_DEV_IR 1 /* DMA controller/channel set to IR */ -+ -+/* -+ * Set the DMACR register in the GUTS -+ * -+ * The DMACR register determines the source of initiated transfers for each -+ * channel on each DMA controller. Rather than have a bunch of repetitive -+ * macros for the bit patterns, we just have a function that calculates -+ * them. -+ * -+ * guts: Pointer to GUTS structure -+ * co: The DMA controller (0 or 1) -+ * ch: The channel on the DMA controller (0, 1, 2, or 3) -+ * device: The device to set as the source (CCSR_GUTS_DMACR_DEV_xx) -+ */ -+static inline void guts_set_dmacr(struct ccsr_guts __iomem *guts, -+ unsigned int co, unsigned int ch, unsigned int device) -+{ -+ unsigned int shift = 16 + (8 * (1 - co) + 2 * (3 - ch)); -+ -+ clrsetbits_be32(&guts->dmacr, 3 << shift, device << shift); -+} -+ -+#define CCSR_GUTS_PMUXCR_LDPSEL 0x00010000 -+#define CCSR_GUTS_PMUXCR_SSI1_MASK 0x0000C000 /* Bitmask for SSI1 */ -+#define CCSR_GUTS_PMUXCR_SSI1_LA 0x00000000 /* Latched address */ -+#define CCSR_GUTS_PMUXCR_SSI1_HI 0x00004000 /* High impedance */ -+#define CCSR_GUTS_PMUXCR_SSI1_SSI 0x00008000 /* Used for SSI1 */ -+#define CCSR_GUTS_PMUXCR_SSI2_MASK 0x00003000 /* Bitmask for SSI2 */ -+#define CCSR_GUTS_PMUXCR_SSI2_LA 0x00000000 /* Latched address */ -+#define CCSR_GUTS_PMUXCR_SSI2_HI 0x00001000 /* High impedance */ -+#define CCSR_GUTS_PMUXCR_SSI2_SSI 0x00002000 /* Used for SSI2 */ -+#define CCSR_GUTS_PMUXCR_LA_22_25_LA 0x00000000 /* Latched Address */ -+#define CCSR_GUTS_PMUXCR_LA_22_25_HI 0x00000400 /* High impedance */ -+#define CCSR_GUTS_PMUXCR_DBGDRV 0x00000200 /* Signals not driven */ -+#define CCSR_GUTS_PMUXCR_DMA2_0 0x00000008 -+#define CCSR_GUTS_PMUXCR_DMA2_3 0x00000004 -+#define CCSR_GUTS_PMUXCR_DMA1_0 0x00000002 -+#define CCSR_GUTS_PMUXCR_DMA1_3 0x00000001 -+ -+/* -+ * Set the DMA external control bits in the GUTS -+ * -+ * The DMA external control bits in the PMUXCR are only meaningful for -+ * channels 0 and 3. Any other channels are ignored. -+ * -+ * guts: Pointer to GUTS structure -+ * co: The DMA controller (0 or 1) -+ * ch: The channel on the DMA controller (0, 1, 2, or 3) -+ * value: the new value for the bit (0 or 1) -+ */ -+static inline void guts_set_pmuxcr_dma(struct ccsr_guts __iomem *guts, -+ unsigned int co, unsigned int ch, unsigned int value) -+{ -+ if ((ch == 0) || (ch == 3)) { -+ unsigned int shift = 2 * (co + 1) - (ch & 1) - 1; -+ -+ clrsetbits_be32(&guts->pmuxcr, 1 << shift, value << shift); -+ } -+} -+ -+#define CCSR_GUTS_CLKDVDR_PXCKEN 0x80000000 -+#define CCSR_GUTS_CLKDVDR_SSICKEN 0x20000000 -+#define CCSR_GUTS_CLKDVDR_PXCKINV 0x10000000 -+#define CCSR_GUTS_CLKDVDR_PXCKDLY_SHIFT 25 -+#define CCSR_GUTS_CLKDVDR_PXCKDLY_MASK 0x06000000 -+#define CCSR_GUTS_CLKDVDR_PXCKDLY(x) \ -+ (((x) & 3) << CCSR_GUTS_CLKDVDR_PXCKDLY_SHIFT) -+#define CCSR_GUTS_CLKDVDR_PXCLK_SHIFT 16 -+#define CCSR_GUTS_CLKDVDR_PXCLK_MASK 0x001F0000 -+#define CCSR_GUTS_CLKDVDR_PXCLK(x) (((x) & 31) << CCSR_GUTS_CLKDVDR_PXCLK_SHIFT) -+#define CCSR_GUTS_CLKDVDR_SSICLK_MASK 0x000000FF -+#define CCSR_GUTS_CLKDVDR_SSICLK(x) ((x) & CCSR_GUTS_CLKDVDR_SSICLK_MASK) -+ -+#endif -+ -+#endif -diff --git a/include/linux/iommu.h b/include/linux/iommu.h -index e6a7c9f..04229cb 100644 ---- a/include/linux/iommu.h -+++ b/include/linux/iommu.h -@@ -21,13 +21,15 @@ - - #include - #include -+#include - #include -+#include - #include - - #define IOMMU_READ (1 << 0) - #define IOMMU_WRITE (1 << 1) - #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ --#define IOMMU_EXEC (1 << 3) -+#define IOMMU_NOEXEC (1 << 3) - - struct iommu_ops; - struct iommu_group; -@@ -49,9 +51,33 @@ struct iommu_domain_geometry { - bool force_aperture; /* DMA only allowed in mappable range? */ - }; - -+/* Domain feature flags */ -+#define __IOMMU_DOMAIN_PAGING (1U << 0) /* Support for iommu_map/unmap */ -+#define __IOMMU_DOMAIN_DMA_API (1U << 1) /* Domain for use in DMA-API -+ implementation */ -+#define __IOMMU_DOMAIN_PT (1U << 2) /* Domain is identity mapped */ -+ -+/* -+ * This are the possible domain-types -+ * -+ * IOMMU_DOMAIN_BLOCKED - All DMA is blocked, can be used to isolate -+ * devices -+ * IOMMU_DOMAIN_IDENTITY - DMA addresses are system physical addresses -+ * IOMMU_DOMAIN_UNMANAGED - DMA mappings managed by IOMMU-API user, used -+ * for VMs -+ * IOMMU_DOMAIN_DMA - Internally used for DMA-API implementations. -+ * This flag allows IOMMU drivers to implement -+ * certain optimizations for these domains -+ */ -+#define IOMMU_DOMAIN_BLOCKED (0U) -+#define IOMMU_DOMAIN_IDENTITY (__IOMMU_DOMAIN_PT) -+#define IOMMU_DOMAIN_UNMANAGED (__IOMMU_DOMAIN_PAGING) -+#define IOMMU_DOMAIN_DMA (__IOMMU_DOMAIN_PAGING | \ -+ __IOMMU_DOMAIN_DMA_API) -+ - struct iommu_domain { -+ unsigned type; - const struct iommu_ops *ops; -- void *priv; - iommu_fault_handler_t handler; - void *handler_token; - struct iommu_domain_geometry geometry; -@@ -61,6 +87,7 @@ enum iommu_cap { - IOMMU_CAP_CACHE_COHERENCY, /* IOMMU can enforce cache coherent DMA - transactions */ - IOMMU_CAP_INTR_REMAP, /* IOMMU supports interrupt isolation */ -+ IOMMU_CAP_NOEXEC, /* IOMMU_NOEXEC flag */ - }; - - /* -@@ -97,23 +124,32 @@ enum iommu_attr { - * @detach_dev: detach device from an iommu domain - * @map: map a physically contiguous memory region to an iommu domain - * @unmap: unmap a physically contiguous memory region from an iommu domain -+ * @map_sg: map a scatter-gather list of physically contiguous memory chunks -+ * to an iommu domain - * @iova_to_phys: translate iova to physical address - * @add_device: add device to iommu grouping - * @remove_device: remove device from iommu grouping - * @domain_get_attr: Query domain attributes - * @domain_set_attr: Change domain attributes -+ * @of_xlate: add OF master IDs to iommu grouping - * @pgsize_bitmap: bitmap of supported page sizes -+ * @priv: per-instance data private to the iommu driver - */ - struct iommu_ops { - bool (*capable)(enum iommu_cap); -- int (*domain_init)(struct iommu_domain *domain); -- void (*domain_destroy)(struct iommu_domain *domain); -+ -+ /* Domain allocation and freeing by the iommu driver */ -+ struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type); -+ void (*domain_free)(struct iommu_domain *); -+ - int (*attach_dev)(struct iommu_domain *domain, struct device *dev); - void (*detach_dev)(struct iommu_domain *domain, struct device *dev); - int (*map)(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot); - size_t (*unmap)(struct iommu_domain *domain, unsigned long iova, - size_t size); -+ size_t (*map_sg)(struct iommu_domain *domain, unsigned long iova, -+ struct scatterlist *sg, unsigned int nents, int prot); - phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova); - int (*add_device)(struct device *dev); - void (*remove_device)(struct device *dev); -@@ -131,8 +167,14 @@ struct iommu_ops { - int (*domain_set_windows)(struct iommu_domain *domain, u32 w_count); - /* Get the numer of window per domain */ - u32 (*domain_get_windows)(struct iommu_domain *domain); -+ struct iommu_domain *(*get_dev_iommu_domain)(struct device *dev); -+ -+#ifdef CONFIG_OF_IOMMU -+ int (*of_xlate)(struct device *dev, struct of_phandle_args *args); -+#endif - - unsigned long pgsize_bitmap; -+ void *priv; - }; - - #define IOMMU_GROUP_NOTIFY_ADD_DEVICE 1 /* Device added */ -@@ -156,6 +198,9 @@ extern int iommu_map(struct iommu_domain *domain, unsigned long iova, - phys_addr_t paddr, size_t size, int prot); - extern size_t iommu_unmap(struct iommu_domain *domain, unsigned long iova, - size_t size); -+extern size_t default_iommu_map_sg(struct iommu_domain *domain, unsigned long iova, -+ struct scatterlist *sg,unsigned int nents, -+ int prot); - extern phys_addr_t iommu_iova_to_phys(struct iommu_domain *domain, dma_addr_t iova); - extern void iommu_set_fault_handler(struct iommu_domain *domain, - iommu_fault_handler_t handler, void *token); -@@ -200,6 +245,9 @@ extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr, - phys_addr_t offset, u64 size, - int prot); - extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr); -+ -+extern struct iommu_domain *iommu_get_dev_domain(struct device *dev); -+ - /** - * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework - * @domain: the iommu domain where the fault has happened -@@ -241,6 +289,13 @@ static inline int report_iommu_fault(struct iommu_domain *domain, - return ret; - } - -+static inline size_t iommu_map_sg(struct iommu_domain *domain, -+ unsigned long iova, struct scatterlist *sg, -+ unsigned int nents, int prot) -+{ -+ return domain->ops->map_sg(domain, iova, sg, nents, prot); -+} -+ - #else /* CONFIG_IOMMU_API */ - - struct iommu_ops {}; -@@ -293,6 +348,13 @@ static inline int iommu_unmap(struct iommu_domain *domain, unsigned long iova, - return -ENODEV; - } - -+static inline size_t iommu_map_sg(struct iommu_domain *domain, -+ unsigned long iova, struct scatterlist *sg, -+ unsigned int nents, int prot) -+{ -+ return -ENODEV; -+} -+ - static inline int iommu_domain_window_enable(struct iommu_domain *domain, - u32 wnd_nr, phys_addr_t paddr, - u64 size, int prot) -@@ -424,6 +486,11 @@ static inline void iommu_device_unlink(struct device *dev, struct device *link) - { - } - -+static inline struct iommu_domain *iommu_get_dev_domain(struct device *dev) -+{ -+ return NULL; -+} -+ - #endif /* CONFIG_IOMMU_API */ - - #endif /* __LINUX_IOMMU_H */ -diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h -new file mode 100644 -index 0000000..1c30014 ---- /dev/null -+++ b/include/linux/iopoll.h -@@ -0,0 +1,144 @@ -+/* -+ * Copyright (c) 2012-2014 The Linux Foundation. All rights reserved. -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 and -+ * only version 2 as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ */ -+ -+#ifndef _LINUX_IOPOLL_H -+#define _LINUX_IOPOLL_H -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/** -+ * readx_poll_timeout - Periodically poll an address until a condition is met or a timeout occurs -+ * @op: accessor function (takes @addr as its only argument) -+ * @addr: Address to poll -+ * @val: Variable to read the value into -+ * @cond: Break condition (usually involving @val) -+ * @sleep_us: Maximum time to sleep between reads in us (0 -+ * tight-loops). Should be less than ~20ms since usleep_range -+ * is used (see Documentation/timers/timers-howto.txt). -+ * @timeout_us: Timeout in us, 0 means never timeout -+ * -+ * Returns 0 on success and -ETIMEDOUT upon a timeout. In either -+ * case, the last read value at @addr is stored in @val. Must not -+ * be called from atomic context if sleep_us or timeout_us are used. -+ * -+ * When available, you'll probably want to use one of the specialized -+ * macros defined below rather than this macro directly. -+ */ -+#define readx_poll_timeout(op, addr, val, cond, sleep_us, timeout_us) \ -+({ \ -+ ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \ -+ might_sleep_if(sleep_us); \ -+ for (;;) { \ -+ (val) = op(addr); \ -+ if (cond) \ -+ break; \ -+ if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \ -+ (val) = op(addr); \ -+ break; \ -+ } \ -+ if (sleep_us) \ -+ usleep_range((sleep_us >> 2) + 1, sleep_us); \ -+ } \ -+ (cond) ? 0 : -ETIMEDOUT; \ -+}) -+ -+/** -+ * readx_poll_timeout_atomic - Periodically poll an address until a condition is met or a timeout occurs -+ * @op: accessor function (takes @addr as its only argument) -+ * @addr: Address to poll -+ * @val: Variable to read the value into -+ * @cond: Break condition (usually involving @val) -+ * @delay_us: Time to udelay between reads in us (0 tight-loops). Should -+ * be less than ~10us since udelay is used (see -+ * Documentation/timers/timers-howto.txt). -+ * @timeout_us: Timeout in us, 0 means never timeout -+ * -+ * Returns 0 on success and -ETIMEDOUT upon a timeout. In either -+ * case, the last read value at @addr is stored in @val. -+ * -+ * When available, you'll probably want to use one of the specialized -+ * macros defined below rather than this macro directly. -+ */ -+#define readx_poll_timeout_atomic(op, addr, val, cond, delay_us, timeout_us) \ -+({ \ -+ ktime_t timeout = ktime_add_us(ktime_get(), timeout_us); \ -+ for (;;) { \ -+ (val) = op(addr); \ -+ if (cond) \ -+ break; \ -+ if (timeout_us && ktime_compare(ktime_get(), timeout) > 0) { \ -+ (val) = op(addr); \ -+ break; \ -+ } \ -+ if (delay_us) \ -+ udelay(delay_us); \ -+ } \ -+ (cond) ? 0 : -ETIMEDOUT; \ -+}) -+ -+ -+#define readb_poll_timeout(addr, val, cond, delay_us, timeout_us) \ -+ readx_poll_timeout(readb, addr, val, cond, delay_us, timeout_us) -+ -+#define readb_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \ -+ readx_poll_timeout_atomic(readb, addr, val, cond, delay_us, timeout_us) -+ -+#define readw_poll_timeout(addr, val, cond, delay_us, timeout_us) \ -+ readx_poll_timeout(readw, addr, val, cond, delay_us, timeout_us) -+ -+#define readw_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \ -+ readx_poll_timeout_atomic(readw, addr, val, cond, delay_us, timeout_us) -+ -+#define readl_poll_timeout(addr, val, cond, delay_us, timeout_us) \ -+ readx_poll_timeout(readl, addr, val, cond, delay_us, timeout_us) -+ -+#define readl_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \ -+ readx_poll_timeout_atomic(readl, addr, val, cond, delay_us, timeout_us) -+ -+#define readq_poll_timeout(addr, val, cond, delay_us, timeout_us) \ -+ readx_poll_timeout(readq, addr, val, cond, delay_us, timeout_us) -+ -+#define readq_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \ -+ readx_poll_timeout_atomic(readq, addr, val, cond, delay_us, timeout_us) -+ -+#define readb_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \ -+ readx_poll_timeout(readb_relaxed, addr, val, cond, delay_us, timeout_us) -+ -+#define readb_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \ -+ readx_poll_timeout_atomic(readb_relaxed, addr, val, cond, delay_us, timeout_us) -+ -+#define readw_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \ -+ readx_poll_timeout(readw_relaxed, addr, val, cond, delay_us, timeout_us) -+ -+#define readw_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \ -+ readx_poll_timeout_atomic(readw_relaxed, addr, val, cond, delay_us, timeout_us) -+ -+#define readl_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \ -+ readx_poll_timeout(readl_relaxed, addr, val, cond, delay_us, timeout_us) -+ -+#define readl_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \ -+ readx_poll_timeout_atomic(readl_relaxed, addr, val, cond, delay_us, timeout_us) -+ -+#define readq_relaxed_poll_timeout(addr, val, cond, delay_us, timeout_us) \ -+ readx_poll_timeout(readq_relaxed, addr, val, cond, delay_us, timeout_us) -+ -+#define readq_relaxed_poll_timeout_atomic(addr, val, cond, delay_us, timeout_us) \ -+ readx_poll_timeout_atomic(readq_relaxed, addr, val, cond, delay_us, timeout_us) -+ -+#endif /* _LINUX_IOPOLL_H */ -diff --git a/include/linux/irq.h b/include/linux/irq.h -index 03f48d9..9ba173b 100644 ---- a/include/linux/irq.h -+++ b/include/linux/irq.h -@@ -15,11 +15,13 @@ - #include - #include - #include -+#include - #include - #include - #include - #include - #include -+#include - - #include - #include -@@ -27,11 +29,7 @@ - - struct seq_file; - struct module; --struct irq_desc; --struct irq_data; --typedef void (*irq_flow_handler_t)(unsigned int irq, -- struct irq_desc *desc); --typedef void (*irq_preflow_handler_t)(struct irq_data *data); -+struct msi_msg; - - /* - * IRQ line status. -@@ -113,10 +111,14 @@ enum { - * - * IRQ_SET_MASK_OK - OK, core updates irq_data.affinity - * IRQ_SET_MASK_NOCPY - OK, chip did update irq_data.affinity -+ * IRQ_SET_MASK_OK_DONE - Same as IRQ_SET_MASK_OK for core. Special code to -+ * support stacked irqchips, which indicates skipping -+ * all descendent irqchips. - */ - enum { - IRQ_SET_MASK_OK = 0, - IRQ_SET_MASK_OK_NOCOPY, -+ IRQ_SET_MASK_OK_DONE, - }; - - struct msi_desc; -@@ -133,6 +135,8 @@ struct irq_domain; - * @chip: low level interrupt hardware access - * @domain: Interrupt translation domain; responsible for mapping - * between hwirq number and linux irq number. -+ * @parent_data: pointer to parent struct irq_data to support hierarchy -+ * irq_domain - * @handler_data: per-IRQ data for the irq_chip methods - * @chip_data: platform-specific per-chip private data for the chip - * methods, to allow shared chip implementations -@@ -151,6 +155,9 @@ struct irq_data { - unsigned int state_use_accessors; - struct irq_chip *chip; - struct irq_domain *domain; -+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY -+ struct irq_data *parent_data; -+#endif - void *handler_data; - void *chip_data; - struct msi_desc *msi_desc; -@@ -315,6 +322,8 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) - * any other callback related to this irq - * @irq_release_resources: optional to release resources acquired with - * irq_request_resources -+ * @irq_compose_msi_msg: optional to compose message content for MSI -+ * @irq_write_msi_msg: optional to write message content for MSI - * @flags: chip specific flags - */ - struct irq_chip { -@@ -351,6 +360,9 @@ struct irq_chip { - int (*irq_request_resources)(struct irq_data *data); - void (*irq_release_resources)(struct irq_data *data); - -+ void (*irq_compose_msi_msg)(struct irq_data *data, struct msi_msg *msg); -+ void (*irq_write_msi_msg)(struct irq_data *data, struct msi_msg *msg); -+ - unsigned long flags; - }; - -@@ -438,6 +450,18 @@ extern void handle_percpu_devid_irq(unsigned int irq, struct irq_desc *desc); - extern void handle_bad_irq(unsigned int irq, struct irq_desc *desc); - extern void handle_nested_irq(unsigned int irq); - -+extern int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg); -+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY -+extern void irq_chip_ack_parent(struct irq_data *data); -+extern int irq_chip_retrigger_hierarchy(struct irq_data *data); -+extern void irq_chip_mask_parent(struct irq_data *data); -+extern void irq_chip_unmask_parent(struct irq_data *data); -+extern void irq_chip_eoi_parent(struct irq_data *data); -+extern int irq_chip_set_affinity_parent(struct irq_data *data, -+ const struct cpumask *dest, -+ bool force); -+#endif -+ - /* Handling of unhandled and spurious interrupts: */ - extern void note_interrupt(unsigned int irq, struct irq_desc *desc, - irqreturn_t action_ret); -@@ -582,7 +606,7 @@ static inline struct msi_desc *irq_get_msi_desc(unsigned int irq) - return d ? d->msi_desc : NULL; - } - --static inline struct msi_desc *irq_data_get_msi(struct irq_data *d) -+static inline struct msi_desc *irq_data_get_msi_desc(struct irq_data *d) - { - return d->msi_desc; - } -@@ -639,13 +663,6 @@ void arch_teardown_hwirq(unsigned int irq); - void irq_init_desc(unsigned int irq); - #endif - --#ifndef irq_reg_writel --# define irq_reg_writel(val, addr) writel(val, addr) --#endif --#ifndef irq_reg_readl --# define irq_reg_readl(addr) readl(addr) --#endif -- - /** - * struct irq_chip_regs - register offsets for struct irq_gci - * @enable: Enable register offset to reg_base -@@ -692,6 +709,8 @@ struct irq_chip_type { - * struct irq_chip_generic - Generic irq chip data structure - * @lock: Lock to protect register and cache data access - * @reg_base: Register base address (virtual) -+ * @reg_readl: Alternate I/O accessor (defaults to readl if NULL) -+ * @reg_writel: Alternate I/O accessor (defaults to writel if NULL) - * @irq_base: Interrupt base nr for this chip - * @irq_cnt: Number of interrupts handled by this chip - * @mask_cache: Cached mask register shared between all chip types -@@ -716,6 +735,8 @@ struct irq_chip_type { - struct irq_chip_generic { - raw_spinlock_t lock; - void __iomem *reg_base; -+ u32 (*reg_readl)(void __iomem *addr); -+ void (*reg_writel)(u32 val, void __iomem *addr); - unsigned int irq_base; - unsigned int irq_cnt; - u32 mask_cache; -@@ -740,12 +761,14 @@ struct irq_chip_generic { - * the parent irq. Usually GPIO implementations - * @IRQ_GC_MASK_CACHE_PER_TYPE: Mask cache is chip type private - * @IRQ_GC_NO_MASK: Do not calculate irq_data->mask -+ * @IRQ_GC_BE_IO: Use big-endian register accesses (default: LE) - */ - enum irq_gc_flags { - IRQ_GC_INIT_MASK_CACHE = 1 << 0, - IRQ_GC_INIT_NESTED_LOCK = 1 << 1, - IRQ_GC_MASK_CACHE_PER_TYPE = 1 << 2, - IRQ_GC_NO_MASK = 1 << 3, -+ IRQ_GC_BE_IO = 1 << 4, - }; - - /* -@@ -821,4 +844,22 @@ static inline void irq_gc_lock(struct irq_chip_generic *gc) { } - static inline void irq_gc_unlock(struct irq_chip_generic *gc) { } - #endif - -+static inline void irq_reg_writel(struct irq_chip_generic *gc, -+ u32 val, int reg_offset) -+{ -+ if (gc->reg_writel) -+ gc->reg_writel(val, gc->reg_base + reg_offset); -+ else -+ writel(val, gc->reg_base + reg_offset); -+} -+ -+static inline u32 irq_reg_readl(struct irq_chip_generic *gc, -+ int reg_offset) -+{ -+ if (gc->reg_readl) -+ return gc->reg_readl(gc->reg_base + reg_offset); -+ else -+ return readl(gc->reg_base + reg_offset); -+} -+ - #endif /* _LINUX_IRQ_H */ -diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h -index 03a4ea3..da1aa15 100644 ---- a/include/linux/irqchip/arm-gic-v3.h -+++ b/include/linux/irqchip/arm-gic-v3.h -@@ -49,6 +49,10 @@ - #define GICD_CTLR_ENABLE_G1A (1U << 1) - #define GICD_CTLR_ENABLE_G1 (1U << 0) - -+#define GICD_TYPER_ID_BITS(typer) ((((typer) >> 19) & 0x1f) + 1) -+#define GICD_TYPER_IRQS(typer) ((((typer) & 0x1f) + 1) * 32) -+#define GICD_TYPER_LPIS (1U << 17) -+ - #define GICD_IROUTER_SPI_MODE_ONE (0U << 31) - #define GICD_IROUTER_SPI_MODE_ANY (1U << 31) - -@@ -76,9 +80,42 @@ - #define GICR_MOVALLR 0x0110 - #define GICR_PIDR2 GICD_PIDR2 - -+#define GICR_CTLR_ENABLE_LPIS (1UL << 0) -+ -+#define GICR_TYPER_CPU_NUMBER(r) (((r) >> 8) & 0xffff) -+ - #define GICR_WAKER_ProcessorSleep (1U << 1) - #define GICR_WAKER_ChildrenAsleep (1U << 2) - -+#define GICR_PROPBASER_NonShareable (0U << 10) -+#define GICR_PROPBASER_InnerShareable (1U << 10) -+#define GICR_PROPBASER_OuterShareable (2U << 10) -+#define GICR_PROPBASER_SHAREABILITY_MASK (3UL << 10) -+#define GICR_PROPBASER_nCnB (0U << 7) -+#define GICR_PROPBASER_nC (1U << 7) -+#define GICR_PROPBASER_RaWt (2U << 7) -+#define GICR_PROPBASER_RaWb (3U << 7) -+#define GICR_PROPBASER_WaWt (4U << 7) -+#define GICR_PROPBASER_WaWb (5U << 7) -+#define GICR_PROPBASER_RaWaWt (6U << 7) -+#define GICR_PROPBASER_RaWaWb (7U << 7) -+#define GICR_PROPBASER_CACHEABILITY_MASK (7U << 7) -+#define GICR_PROPBASER_IDBITS_MASK (0x1f) -+ -+#define GICR_PENDBASER_NonShareable (0U << 10) -+#define GICR_PENDBASER_InnerShareable (1U << 10) -+#define GICR_PENDBASER_OuterShareable (2U << 10) -+#define GICR_PENDBASER_SHAREABILITY_MASK (3UL << 10) -+#define GICR_PENDBASER_nCnB (0U << 7) -+#define GICR_PENDBASER_nC (1U << 7) -+#define GICR_PENDBASER_RaWt (2U << 7) -+#define GICR_PENDBASER_RaWb (3U << 7) -+#define GICR_PENDBASER_WaWt (4U << 7) -+#define GICR_PENDBASER_WaWb (5U << 7) -+#define GICR_PENDBASER_RaWaWt (6U << 7) -+#define GICR_PENDBASER_RaWaWb (7U << 7) -+#define GICR_PENDBASER_CACHEABILITY_MASK (7U << 7) -+ - /* - * Re-Distributor registers, offsets from SGI_base - */ -@@ -91,9 +128,100 @@ - #define GICR_IPRIORITYR0 GICD_IPRIORITYR - #define GICR_ICFGR0 GICD_ICFGR - -+#define GICR_TYPER_PLPIS (1U << 0) - #define GICR_TYPER_VLPIS (1U << 1) - #define GICR_TYPER_LAST (1U << 4) - -+#define LPI_PROP_GROUP1 (1 << 1) -+#define LPI_PROP_ENABLED (1 << 0) -+ -+/* -+ * ITS registers, offsets from ITS_base -+ */ -+#define GITS_CTLR 0x0000 -+#define GITS_IIDR 0x0004 -+#define GITS_TYPER 0x0008 -+#define GITS_CBASER 0x0080 -+#define GITS_CWRITER 0x0088 -+#define GITS_CREADR 0x0090 -+#define GITS_BASER 0x0100 -+#define GITS_PIDR2 GICR_PIDR2 -+ -+#define GITS_TRANSLATER 0x10040 -+ -+#define GITS_CTLR_ENABLE (1U << 0) -+#define GITS_CTLR_QUIESCENT (1U << 31) -+ -+#define GITS_TYPER_DEVBITS_SHIFT 13 -+#define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1) -+#define GITS_TYPER_PTA (1UL << 19) -+ -+#define GITS_CBASER_VALID (1UL << 63) -+#define GITS_CBASER_nCnB (0UL << 59) -+#define GITS_CBASER_nC (1UL << 59) -+#define GITS_CBASER_RaWt (2UL << 59) -+#define GITS_CBASER_RaWb (3UL << 59) -+#define GITS_CBASER_WaWt (4UL << 59) -+#define GITS_CBASER_WaWb (5UL << 59) -+#define GITS_CBASER_RaWaWt (6UL << 59) -+#define GITS_CBASER_RaWaWb (7UL << 59) -+#define GITS_CBASER_CACHEABILITY_MASK (7UL << 59) -+#define GITS_CBASER_NonShareable (0UL << 10) -+#define GITS_CBASER_InnerShareable (1UL << 10) -+#define GITS_CBASER_OuterShareable (2UL << 10) -+#define GITS_CBASER_SHAREABILITY_MASK (3UL << 10) -+ -+#define GITS_BASER_NR_REGS 8 -+ -+#define GITS_BASER_VALID (1UL << 63) -+#define GITS_BASER_nCnB (0UL << 59) -+#define GITS_BASER_nC (1UL << 59) -+#define GITS_BASER_RaWt (2UL << 59) -+#define GITS_BASER_RaWb (3UL << 59) -+#define GITS_BASER_WaWt (4UL << 59) -+#define GITS_BASER_WaWb (5UL << 59) -+#define GITS_BASER_RaWaWt (6UL << 59) -+#define GITS_BASER_RaWaWb (7UL << 59) -+#define GITS_BASER_CACHEABILITY_MASK (7UL << 59) -+#define GITS_BASER_TYPE_SHIFT (56) -+#define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7) -+#define GITS_BASER_ENTRY_SIZE_SHIFT (48) -+#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0xff) + 1) -+#define GITS_BASER_NonShareable (0UL << 10) -+#define GITS_BASER_InnerShareable (1UL << 10) -+#define GITS_BASER_OuterShareable (2UL << 10) -+#define GITS_BASER_SHAREABILITY_SHIFT (10) -+#define GITS_BASER_SHAREABILITY_MASK (3UL << GITS_BASER_SHAREABILITY_SHIFT) -+#define GITS_BASER_PAGE_SIZE_SHIFT (8) -+#define GITS_BASER_PAGE_SIZE_4K (0UL << GITS_BASER_PAGE_SIZE_SHIFT) -+#define GITS_BASER_PAGE_SIZE_16K (1UL << GITS_BASER_PAGE_SIZE_SHIFT) -+#define GITS_BASER_PAGE_SIZE_64K (2UL << GITS_BASER_PAGE_SIZE_SHIFT) -+#define GITS_BASER_PAGE_SIZE_MASK (3UL << GITS_BASER_PAGE_SIZE_SHIFT) -+ -+#define GITS_BASER_TYPE_NONE 0 -+#define GITS_BASER_TYPE_DEVICE 1 -+#define GITS_BASER_TYPE_VCPU 2 -+#define GITS_BASER_TYPE_CPU 3 -+#define GITS_BASER_TYPE_COLLECTION 4 -+#define GITS_BASER_TYPE_RESERVED5 5 -+#define GITS_BASER_TYPE_RESERVED6 6 -+#define GITS_BASER_TYPE_RESERVED7 7 -+ -+/* -+ * ITS commands -+ */ -+#define GITS_CMD_MAPD 0x08 -+#define GITS_CMD_MAPC 0x09 -+#define GITS_CMD_MAPVI 0x0a -+#define GITS_CMD_MOVI 0x01 -+#define GITS_CMD_DISCARD 0x0f -+#define GITS_CMD_INV 0x0c -+#define GITS_CMD_MOVALL 0x0e -+#define GITS_CMD_INVALL 0x0d -+#define GITS_CMD_INT 0x03 -+#define GITS_CMD_CLEAR 0x04 -+#define GITS_CMD_SYNC 0x05 -+ - /* - * CPU interface registers - */ -@@ -188,6 +316,24 @@ - #ifndef __ASSEMBLY__ - - #include -+#include -+ -+/* -+ * We need a value to serve as a irq-type for LPIs. Choose one that will -+ * hopefully pique the interest of the reviewer. -+ */ -+#define GIC_IRQ_TYPE_LPI 0xa110c8ed -+ -+struct rdists { -+ struct { -+ void __iomem *rd_base; -+ struct page *pend_page; -+ phys_addr_t phys_base; -+ } __percpu *rdist; -+ struct page *prop_page; -+ int id_bits; -+ u64 flags; -+}; - - static inline void gic_write_eoir(u64 irq) - { -@@ -195,6 +341,13 @@ static inline void gic_write_eoir(u64 irq) - isb(); - } - -+struct irq_domain; -+int its_cpu_init(void); -+int its_init(struct device_node *node, struct rdists *rdists, -+ struct irq_domain *domain); -+int __its_msi_prepare(struct irq_domain *domain, u32 dev_id, -+ struct device *dev, int nvec, msi_alloc_info_t *info); -+ - #endif - - #endif -diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h -index b0f9d16..ebace05 100644 ---- a/include/linux/irqdomain.h -+++ b/include/linux/irqdomain.h -@@ -33,15 +33,31 @@ - #define _LINUX_IRQDOMAIN_H - - #include -+#include - #include - - struct device_node; - struct irq_domain; - struct of_device_id; -+struct irq_chip; -+struct irq_data; - - /* Number of irqs reserved for a legacy isa controller */ - #define NUM_ISA_INTERRUPTS 16 - -+/* -+ * Should several domains have the same device node, but serve -+ * different purposes (for example one domain is for PCI/MSI, and the -+ * other for wired IRQs), they can be distinguished using a -+ * bus-specific token. Most domains are expected to only carry -+ * DOMAIN_BUS_ANY. -+ */ -+enum irq_domain_bus_token { -+ DOMAIN_BUS_ANY = 0, -+ DOMAIN_BUS_PCI_MSI, -+ DOMAIN_BUS_PLATFORM_MSI, -+}; -+ - /** - * struct irq_domain_ops - Methods for irq_domain objects - * @match: Match an interrupt controller device node to a host, returns -@@ -58,12 +74,23 @@ struct of_device_id; - * to setup the irq_desc when returning from map(). - */ - struct irq_domain_ops { -- int (*match)(struct irq_domain *d, struct device_node *node); -+ int (*match)(struct irq_domain *d, struct device_node *node, -+ enum irq_domain_bus_token bus_token); - int (*map)(struct irq_domain *d, unsigned int virq, irq_hw_number_t hw); - void (*unmap)(struct irq_domain *d, unsigned int virq); - int (*xlate)(struct irq_domain *d, struct device_node *node, - const u32 *intspec, unsigned int intsize, - unsigned long *out_hwirq, unsigned int *out_type); -+ -+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY -+ /* extended V2 interfaces to support hierarchy irq_domains */ -+ int (*alloc)(struct irq_domain *d, unsigned int virq, -+ unsigned int nr_irqs, void *arg); -+ void (*free)(struct irq_domain *d, unsigned int virq, -+ unsigned int nr_irqs); -+ void (*activate)(struct irq_domain *d, struct irq_data *irq_data); -+ void (*deactivate)(struct irq_domain *d, struct irq_data *irq_data); -+#endif - }; - - extern struct irq_domain_ops irq_generic_chip_ops; -@@ -77,6 +104,7 @@ struct irq_domain_chip_generic; - * @ops: pointer to irq_domain methods - * @host_data: private data pointer for use by owner. Not touched by irq_domain - * core code. -+ * @flags: host per irq_domain flags - * - * Optional elements - * @of_node: Pointer to device tree nodes associated with the irq_domain. Used -@@ -84,6 +112,7 @@ struct irq_domain_chip_generic; - * @gc: Pointer to a list of generic chips. There is a helper function for - * setting up one or more generic chips for interrupt controllers - * drivers using the generic chip library which uses this pointer. -+ * @parent: Pointer to parent irq_domain to support hierarchy irq_domains - * - * Revmap data, used internally by irq_domain - * @revmap_direct_max_irq: The largest hwirq that can be set for controllers that -@@ -97,10 +126,15 @@ struct irq_domain { - const char *name; - const struct irq_domain_ops *ops; - void *host_data; -+ unsigned int flags; - - /* Optional data */ - struct device_node *of_node; -+ enum irq_domain_bus_token bus_token; - struct irq_domain_chip_generic *gc; -+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY -+ struct irq_domain *parent; -+#endif - - /* reverse map data. The linear map gets appended to the irq_domain */ - irq_hw_number_t hwirq_max; -@@ -110,6 +144,22 @@ struct irq_domain { - unsigned int linear_revmap[]; - }; - -+/* Irq domain flags */ -+enum { -+ /* Irq domain is hierarchical */ -+ IRQ_DOMAIN_FLAG_HIERARCHY = (1 << 0), -+ -+ /* Core calls alloc/free recursive through the domain hierarchy. */ -+ IRQ_DOMAIN_FLAG_AUTO_RECURSIVE = (1 << 1), -+ -+ /* -+ * Flags starting from IRQ_DOMAIN_FLAG_NONCORE are reserved -+ * for implementation specific purposes and ignored by the -+ * core code. -+ */ -+ IRQ_DOMAIN_FLAG_NONCORE = (1 << 16), -+}; -+ - #ifdef CONFIG_IRQ_DOMAIN - struct irq_domain *__irq_domain_add(struct device_node *of_node, int size, - irq_hw_number_t hwirq_max, int direct_max, -@@ -126,9 +176,15 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, - irq_hw_number_t first_hwirq, - const struct irq_domain_ops *ops, - void *host_data); --extern struct irq_domain *irq_find_host(struct device_node *node); -+extern struct irq_domain *irq_find_matching_host(struct device_node *node, -+ enum irq_domain_bus_token bus_token); - extern void irq_set_default_host(struct irq_domain *host); - -+static inline struct irq_domain *irq_find_host(struct device_node *node) -+{ -+ return irq_find_matching_host(node, DOMAIN_BUS_ANY); -+} -+ - /** - * irq_domain_add_linear() - Allocate and register a linear revmap irq_domain. - * @of_node: pointer to interrupt controller's device tree node. -@@ -220,8 +276,74 @@ int irq_domain_xlate_onetwocell(struct irq_domain *d, struct device_node *ctrlr, - const u32 *intspec, unsigned int intsize, - irq_hw_number_t *out_hwirq, unsigned int *out_type); - -+/* V2 interfaces to support hierarchy IRQ domains. */ -+extern struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, -+ unsigned int virq); -+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY -+extern struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent, -+ unsigned int flags, unsigned int size, -+ struct device_node *node, -+ const struct irq_domain_ops *ops, void *host_data); -+extern int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, -+ unsigned int nr_irqs, int node, void *arg, -+ bool realloc); -+extern void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs); -+extern void irq_domain_activate_irq(struct irq_data *irq_data); -+extern void irq_domain_deactivate_irq(struct irq_data *irq_data); -+ -+static inline int irq_domain_alloc_irqs(struct irq_domain *domain, -+ unsigned int nr_irqs, int node, void *arg) -+{ -+ return __irq_domain_alloc_irqs(domain, -1, nr_irqs, node, arg, false); -+} -+ -+extern int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, -+ unsigned int virq, -+ irq_hw_number_t hwirq, -+ struct irq_chip *chip, -+ void *chip_data); -+extern void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, -+ irq_hw_number_t hwirq, struct irq_chip *chip, -+ void *chip_data, irq_flow_handler_t handler, -+ void *handler_data, const char *handler_name); -+extern void irq_domain_reset_irq_data(struct irq_data *irq_data); -+extern void irq_domain_free_irqs_common(struct irq_domain *domain, -+ unsigned int virq, -+ unsigned int nr_irqs); -+extern void irq_domain_free_irqs_top(struct irq_domain *domain, -+ unsigned int virq, unsigned int nr_irqs); -+ -+extern int irq_domain_alloc_irqs_parent(struct irq_domain *domain, -+ unsigned int irq_base, -+ unsigned int nr_irqs, void *arg); -+ -+extern void irq_domain_free_irqs_parent(struct irq_domain *domain, -+ unsigned int irq_base, -+ unsigned int nr_irqs); -+ -+static inline bool irq_domain_is_hierarchy(struct irq_domain *domain) -+{ -+ return domain->flags & IRQ_DOMAIN_FLAG_HIERARCHY; -+} -+#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ -+static inline void irq_domain_activate_irq(struct irq_data *data) { } -+static inline void irq_domain_deactivate_irq(struct irq_data *data) { } -+static inline int irq_domain_alloc_irqs(struct irq_domain *domain, -+ unsigned int nr_irqs, int node, void *arg) -+{ -+ return -1; -+} -+ -+static inline bool irq_domain_is_hierarchy(struct irq_domain *domain) -+{ -+ return false; -+} -+#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ -+ - #else /* CONFIG_IRQ_DOMAIN */ - static inline void irq_dispose_mapping(unsigned int virq) { } -+static inline void irq_domain_activate_irq(struct irq_data *data) { } -+static inline void irq_domain_deactivate_irq(struct irq_data *data) { } - #endif /* !CONFIG_IRQ_DOMAIN */ - - #endif /* _LINUX_IRQDOMAIN_H */ -diff --git a/include/linux/irqhandler.h b/include/linux/irqhandler.h -new file mode 100644 -index 0000000..62d5430 ---- /dev/null -+++ b/include/linux/irqhandler.h -@@ -0,0 +1,14 @@ -+#ifndef _LINUX_IRQHANDLER_H -+#define _LINUX_IRQHANDLER_H -+ -+/* -+ * Interrupt flow handler typedefs are defined here to avoid circular -+ * include dependencies. -+ */ -+ -+struct irq_desc; -+struct irq_data; -+typedef void (*irq_flow_handler_t)(unsigned int irq, struct irq_desc *desc); -+typedef void (*irq_preflow_handler_t)(struct irq_data *data); -+ -+#endif -diff --git a/include/linux/msi.h b/include/linux/msi.h -index 44f4746..788d65b 100644 ---- a/include/linux/msi.h -+++ b/include/linux/msi.h -@@ -10,17 +10,13 @@ struct msi_msg { - u32 data; /* 16 bits of msi message data */ - }; - -+extern int pci_msi_ignore_mask; - /* Helper functions */ - struct irq_data; - struct msi_desc; --void mask_msi_irq(struct irq_data *data); --void unmask_msi_irq(struct irq_data *data); --void __read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); -+struct pci_dev; - void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg); --void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); --void read_msi_msg(unsigned int irq, struct msi_msg *msg); - void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg); --void write_msi_msg(unsigned int irq, struct msi_msg *msg); - - struct msi_desc { - struct { -@@ -42,12 +38,63 @@ struct msi_desc { - void __iomem *mask_base; - u8 mask_pos; - }; -- struct pci_dev *dev; -+ struct device *dev; - - /* Last set MSI message */ - struct msi_msg msg; - }; - -+/* Helpers to hide struct msi_desc implementation details */ -+#define msi_desc_to_dev(desc) ((desc)->dev) -+#define dev_to_msi_list(dev) (&(dev)->msi_list) -+#define first_msi_entry(dev) \ -+ list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list) -+#define for_each_msi_entry(desc, dev) \ -+ list_for_each_entry((desc), dev_to_msi_list((dev)), list) -+ -+#ifdef CONFIG_PCI_MSI -+#define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev) -+#define for_each_pci_msi_entry(desc, pdev) \ -+ for_each_msi_entry((desc), &(pdev)->dev) -+ -+struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc); -+void *msi_desc_to_pci_sysdata(struct msi_desc *desc); -+#else /* CONFIG_PCI_MSI */ -+static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc) -+{ -+ return NULL; -+} -+#endif /* CONFIG_PCI_MSI */ -+ -+struct msi_desc *alloc_msi_entry(struct device *dev); -+void free_msi_entry(struct msi_desc *entry); -+void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg); -+void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg); -+void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg); -+ -+u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag); -+u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag); -+void pci_msi_mask_irq(struct irq_data *data); -+void pci_msi_unmask_irq(struct irq_data *data); -+ -+/* Conversion helpers. Should be removed after merging */ -+static inline void __write_msi_msg(struct msi_desc *entry, struct msi_msg *msg) -+{ -+ __pci_write_msi_msg(entry, msg); -+} -+static inline void write_msi_msg(int irq, struct msi_msg *msg) -+{ -+ pci_write_msi_msg(irq, msg); -+} -+static inline void mask_msi_irq(struct irq_data *data) -+{ -+ pci_msi_mask_irq(data); -+} -+static inline void unmask_msi_irq(struct irq_data *data) -+{ -+ pci_msi_unmask_irq(data); -+} -+ - /* - * The arch hooks to setup up msi irqs. Those functions are - * implemented as weak symbols so that they /can/ be overriden by -@@ -61,18 +108,146 @@ void arch_restore_msi_irqs(struct pci_dev *dev); - - void default_teardown_msi_irqs(struct pci_dev *dev); - void default_restore_msi_irqs(struct pci_dev *dev); --u32 default_msi_mask_irq(struct msi_desc *desc, u32 mask, u32 flag); --u32 default_msix_mask_irq(struct msi_desc *desc, u32 flag); -+#define default_msi_mask_irq __msi_mask_irq -+#define default_msix_mask_irq __msix_mask_irq - --struct msi_chip { -+struct msi_controller { - struct module *owner; - struct device *dev; - struct device_node *of_node; - struct list_head list; -+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN -+ struct irq_domain *domain; -+#endif - -- int (*setup_irq)(struct msi_chip *chip, struct pci_dev *dev, -+ int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev, - struct msi_desc *desc); -- void (*teardown_irq)(struct msi_chip *chip, unsigned int irq); -+ int (*setup_irqs)(struct msi_controller *chip, struct pci_dev *dev, -+ int nvec, int type); -+ void (*teardown_irq)(struct msi_controller *chip, unsigned int irq); - }; - -+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN -+ -+#include -+#include -+ -+struct irq_domain; -+struct irq_chip; -+struct device_node; -+struct msi_domain_info; -+ -+/** -+ * struct msi_domain_ops - MSI interrupt domain callbacks -+ * @get_hwirq: Retrieve the resulting hw irq number -+ * @msi_init: Domain specific init function for MSI interrupts -+ * @msi_free: Domain specific function to free a MSI interrupts -+ * @msi_check: Callback for verification of the domain/info/dev data -+ * @msi_prepare: Prepare the allocation of the interrupts in the domain -+ * @msi_finish: Optional callbacl to finalize the allocation -+ * @set_desc: Set the msi descriptor for an interrupt -+ * @handle_error: Optional error handler if the allocation fails -+ * -+ * @get_hwirq, @msi_init and @msi_free are callbacks used by -+ * msi_create_irq_domain() and related interfaces -+ * -+ * @msi_check, @msi_prepare, @msi_finish, @set_desc and @handle_error -+ * are callbacks used by msi_irq_domain_alloc_irqs() and related -+ * interfaces which are based on msi_desc. -+ */ -+struct msi_domain_ops { -+ irq_hw_number_t (*get_hwirq)(struct msi_domain_info *info, -+ msi_alloc_info_t *arg); -+ int (*msi_init)(struct irq_domain *domain, -+ struct msi_domain_info *info, -+ unsigned int virq, irq_hw_number_t hwirq, -+ msi_alloc_info_t *arg); -+ void (*msi_free)(struct irq_domain *domain, -+ struct msi_domain_info *info, -+ unsigned int virq); -+ int (*msi_check)(struct irq_domain *domain, -+ struct msi_domain_info *info, -+ struct device *dev); -+ int (*msi_prepare)(struct irq_domain *domain, -+ struct device *dev, int nvec, -+ msi_alloc_info_t *arg); -+ void (*msi_finish)(msi_alloc_info_t *arg, int retval); -+ void (*set_desc)(msi_alloc_info_t *arg, -+ struct msi_desc *desc); -+ int (*handle_error)(struct irq_domain *domain, -+ struct msi_desc *desc, int error); -+}; -+ -+/** -+ * struct msi_domain_info - MSI interrupt domain data -+ * @flags: Flags to decribe features and capabilities -+ * @ops: The callback data structure -+ * @chip: Optional: associated interrupt chip -+ * @chip_data: Optional: associated interrupt chip data -+ * @handler: Optional: associated interrupt flow handler -+ * @handler_data: Optional: associated interrupt flow handler data -+ * @handler_name: Optional: associated interrupt flow handler name -+ * @data: Optional: domain specific data -+ */ -+struct msi_domain_info { -+ u32 flags; -+ struct msi_domain_ops *ops; -+ struct irq_chip *chip; -+ void *chip_data; -+ irq_flow_handler_t handler; -+ void *handler_data; -+ const char *handler_name; -+ void *data; -+}; -+ -+/* Flags for msi_domain_info */ -+enum { -+ /* -+ * Init non implemented ops callbacks with default MSI domain -+ * callbacks. -+ */ -+ MSI_FLAG_USE_DEF_DOM_OPS = (1 << 0), -+ /* -+ * Init non implemented chip callbacks with default MSI chip -+ * callbacks. -+ */ -+ MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1), -+ /* Build identity map between hwirq and irq */ -+ MSI_FLAG_IDENTITY_MAP = (1 << 2), -+ /* Support multiple PCI MSI interrupts */ -+ MSI_FLAG_MULTI_PCI_MSI = (1 << 3), -+ /* Support PCI MSIX interrupts */ -+ MSI_FLAG_PCI_MSIX = (1 << 4), -+}; -+ -+int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask, -+ bool force); -+ -+struct irq_domain *msi_create_irq_domain(struct device_node *of_node, -+ struct msi_domain_info *info, -+ struct irq_domain *parent); -+int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, -+ int nvec); -+void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev); -+struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain); -+ -+#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */ -+ -+#ifdef CONFIG_PCI_MSI_IRQ_DOMAIN -+void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg); -+struct irq_domain *pci_msi_create_irq_domain(struct device_node *node, -+ struct msi_domain_info *info, -+ struct irq_domain *parent); -+int pci_msi_domain_alloc_irqs(struct irq_domain *domain, struct pci_dev *dev, -+ int nvec, int type); -+void pci_msi_domain_free_irqs(struct irq_domain *domain, struct pci_dev *dev); -+struct irq_domain *pci_msi_create_default_irq_domain(struct device_node *node, -+ struct msi_domain_info *info, struct irq_domain *parent); -+ -+irq_hw_number_t pci_msi_domain_calc_hwirq(struct pci_dev *dev, -+ struct msi_desc *desc); -+int pci_msi_domain_check_cap(struct irq_domain *domain, -+ struct msi_domain_info *info, struct device *dev); -+#endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */ -+ - #endif /* LINUX_MSI_H */ -diff --git a/include/linux/of_device.h b/include/linux/of_device.h -index ef37021..22801b1 100644 ---- a/include/linux/of_device.h -+++ b/include/linux/of_device.h -@@ -53,6 +53,7 @@ static inline struct device_node *of_cpu_device_node_get(int cpu) - return of_node_get(cpu_dev->of_node); - } - -+void of_dma_configure(struct device *dev, struct device_node *np); - #else /* CONFIG_OF */ - - static inline int of_driver_match_device(struct device *dev, -@@ -90,6 +91,8 @@ static inline struct device_node *of_cpu_device_node_get(int cpu) - { - return NULL; - } -+static inline void of_dma_configure(struct device *dev, struct device_node *np) -+{} - #endif /* CONFIG_OF */ - - #endif /* _LINUX_OF_DEVICE_H */ -diff --git a/include/linux/of_iommu.h b/include/linux/of_iommu.h -index 51a560f..ffbe470 100644 ---- a/include/linux/of_iommu.h -+++ b/include/linux/of_iommu.h -@@ -1,12 +1,20 @@ - #ifndef __OF_IOMMU_H - #define __OF_IOMMU_H - -+#include -+#include -+#include -+ - #ifdef CONFIG_OF_IOMMU - - extern int of_get_dma_window(struct device_node *dn, const char *prefix, - int index, unsigned long *busno, dma_addr_t *addr, - size_t *size); - -+extern void of_iommu_init(void); -+extern struct iommu_ops *of_iommu_configure(struct device *dev, -+ struct device_node *master_np); -+ - #else - - static inline int of_get_dma_window(struct device_node *dn, const char *prefix, -@@ -16,6 +24,23 @@ static inline int of_get_dma_window(struct device_node *dn, const char *prefix, - return -EINVAL; - } - -+static inline void of_iommu_init(void) { } -+static inline struct iommu_ops *of_iommu_configure(struct device *dev, -+ struct device_node *master_np) -+{ -+ return NULL; -+} -+ - #endif /* CONFIG_OF_IOMMU */ - -+void of_iommu_set_ops(struct device_node *np, struct iommu_ops *ops); -+struct iommu_ops *of_iommu_get_ops(struct device_node *np); -+ -+extern struct of_device_id __iommu_of_table; -+ -+typedef int (*of_iommu_init_fn)(struct device_node *); -+ -+#define IOMMU_OF_DECLARE(name, compat, fn) \ -+ _OF_DECLARE(iommu, name, compat, fn, of_iommu_init_fn) -+ - #endif /* __OF_IOMMU_H */ -diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h -index bfec136..563ad28 100644 ---- a/include/linux/of_irq.h -+++ b/include/linux/of_irq.h -@@ -69,6 +69,7 @@ static inline int of_irq_get_byname(struct device_node *dev, const char *name) - */ - extern unsigned int irq_of_parse_and_map(struct device_node *node, int index); - extern struct device_node *of_irq_find_parent(struct device_node *child); -+extern void of_msi_configure(struct device *dev, struct device_node *np); - - #else /* !CONFIG_OF */ - static inline unsigned int irq_of_parse_and_map(struct device_node *dev, -diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h -index 1fd207e..29fd3fe 100644 ---- a/include/linux/of_pci.h -+++ b/include/linux/of_pci.h -@@ -16,6 +16,7 @@ int of_pci_get_devfn(struct device_node *np); - int of_irq_parse_and_map_pci(const struct pci_dev *dev, u8 slot, u8 pin); - int of_pci_parse_bus_range(struct device_node *node, struct resource *res); - int of_get_pci_domain_nr(struct device_node *node); -+void of_pci_dma_configure(struct pci_dev *pci_dev); - #else - static inline int of_irq_parse_pci(const struct pci_dev *pdev, struct of_phandle_args *out_irq) - { -@@ -50,6 +51,8 @@ of_get_pci_domain_nr(struct device_node *node) - { - return -1; - } -+ -+static inline void of_pci_dma_configure(struct pci_dev *pci_dev) { } - #endif - - #if defined(CONFIG_OF_ADDRESS) -@@ -59,13 +62,13 @@ int of_pci_get_host_bridge_resources(struct device_node *dev, - #endif - - #if defined(CONFIG_OF) && defined(CONFIG_PCI_MSI) --int of_pci_msi_chip_add(struct msi_chip *chip); --void of_pci_msi_chip_remove(struct msi_chip *chip); --struct msi_chip *of_pci_find_msi_chip_by_node(struct device_node *of_node); -+int of_pci_msi_chip_add(struct msi_controller *chip); -+void of_pci_msi_chip_remove(struct msi_controller *chip); -+struct msi_controller *of_pci_find_msi_chip_by_node(struct device_node *of_node); - #else --static inline int of_pci_msi_chip_add(struct msi_chip *chip) { return -EINVAL; } --static inline void of_pci_msi_chip_remove(struct msi_chip *chip) { } --static inline struct msi_chip * -+static inline int of_pci_msi_chip_add(struct msi_controller *chip) { return -EINVAL; } -+static inline void of_pci_msi_chip_remove(struct msi_controller *chip) { } -+static inline struct msi_controller * - of_pci_find_msi_chip_by_node(struct device_node *of_node) { return NULL; } - #endif - -diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h -index c2b0627..8a860f0 100644 ---- a/include/linux/of_platform.h -+++ b/include/linux/of_platform.h -@@ -84,4 +84,10 @@ static inline int of_platform_populate(struct device_node *root, - static inline void of_platform_depopulate(struct device *parent) { } - #endif - -+#ifdef CONFIG_OF_DYNAMIC -+extern void of_platform_register_reconfig_notifier(void); -+#else -+static inline void of_platform_register_reconfig_notifier(void) { } -+#endif -+ - #endif /* _LINUX_OF_PLATFORM_H */ -diff --git a/include/linux/pci.h b/include/linux/pci.h -index 7a34844..a99f301 100644 ---- a/include/linux/pci.h -+++ b/include/linux/pci.h -@@ -29,6 +29,7 @@ - #include - #include - #include -+#include - #include - - #include -@@ -171,8 +172,8 @@ enum pci_dev_flags { - PCI_DEV_FLAGS_ASSIGNED = (__force pci_dev_flags_t) (1 << 2), - /* Flag for quirk use to store if quirk-specific ACS is enabled */ - PCI_DEV_FLAGS_ACS_ENABLED_QUIRK = (__force pci_dev_flags_t) (1 << 3), -- /* Flag to indicate the device uses dma_alias_devfn */ -- PCI_DEV_FLAGS_DMA_ALIAS_DEVFN = (__force pci_dev_flags_t) (1 << 4), -+ /* Flag to indicate the device uses dma_alias_devid */ -+ PCI_DEV_FLAGS_DMA_ALIAS_DEVID = (__force pci_dev_flags_t) (1 << 4), - /* Use a PCIe-to-PCI bridge alias even if !pci_is_pcie */ - PCI_DEV_FLAG_PCIE_BRIDGE_ALIAS = (__force pci_dev_flags_t) (1 << 5), - /* Do not use bus resets for device */ -@@ -278,7 +279,7 @@ struct pci_dev { - u8 rom_base_reg; /* which config register controls the ROM */ - u8 pin; /* which interrupt pin this device uses */ - u16 pcie_flags_reg; /* cached PCIe Capabilities Register */ -- u8 dma_alias_devfn;/* devfn of DMA alias, if any */ -+ u32 dma_alias_devid;/* devid of DMA alias */ - - struct pci_driver *driver; /* which driver has allocated this device */ - u64 dma_mask; /* Mask of the bits of bus address this -@@ -365,7 +366,6 @@ struct pci_dev { - struct bin_attribute *res_attr[DEVICE_COUNT_RESOURCE]; /* sysfs file for resources */ - struct bin_attribute *res_attr_wc[DEVICE_COUNT_RESOURCE]; /* sysfs file for WC mapping of resources */ - #ifdef CONFIG_PCI_MSI -- struct list_head msi_list; - const struct attribute_group **msi_irq_groups; - #endif - struct pci_vpd *vpd; -@@ -400,16 +400,10 @@ static inline int pci_channel_offline(struct pci_dev *pdev) - return (pdev->error_state != pci_channel_io_normal); - } - --struct pci_host_bridge_window { -- struct list_head list; -- struct resource *res; /* host bridge aperture (CPU address) */ -- resource_size_t offset; /* bus address + offset = CPU address */ --}; -- - struct pci_host_bridge { - struct device dev; - struct pci_bus *bus; /* root bus */ -- struct list_head windows; /* pci_host_bridge_windows */ -+ struct list_head windows; /* resource_entry */ - void (*release_fn)(struct pci_host_bridge *); - void *release_data; - }; -@@ -456,7 +450,7 @@ struct pci_bus { - struct resource busn_res; /* bus numbers routed to this bus */ - - struct pci_ops *ops; /* configuration access functions */ -- struct msi_chip *msi; /* MSI controller */ -+ struct msi_controller *msi; /* MSI controller */ - void *sysdata; /* hook for sys-specific extension */ - struct proc_dir_entry *procdir; /* directory entry in /proc/bus/pci */ - -@@ -516,6 +510,9 @@ static inline struct pci_dev *pci_upstream_bridge(struct pci_dev *dev) - return dev->bus->self; - } - -+struct device *pci_get_host_bridge_device(struct pci_dev *dev); -+void pci_put_host_bridge_device(struct device *dev); -+ - #ifdef CONFIG_PCI_MSI - static inline bool pci_dev_msi_enabled(struct pci_dev *pci_dev) - { -diff --git a/include/linux/resource_ext.h b/include/linux/resource_ext.h -new file mode 100644 -index 0000000..e2bf63d ---- /dev/null -+++ b/include/linux/resource_ext.h -@@ -0,0 +1,77 @@ -+/* -+ * Copyright (C) 2015, Intel Corporation -+ * Author: Jiang Liu -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms and conditions of the GNU General Public License, -+ * version 2, as published by the Free Software Foundation. -+ * -+ * This program is distributed in the hope it will be useful, but WITHOUT -+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or -+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for -+ * more details. -+ */ -+#ifndef _LINUX_RESOURCE_EXT_H -+#define _LINUX_RESOURCE_EXT_H -+#include -+#include -+#include -+#include -+ -+/* Represent resource window for bridge devices */ -+struct resource_win { -+ struct resource res; /* In master (CPU) address space */ -+ resource_size_t offset; /* Translation offset for bridge */ -+}; -+ -+/* -+ * Common resource list management data structure and interfaces to support -+ * ACPI, PNP and PCI host bridge etc. -+ */ -+struct resource_entry { -+ struct list_head node; -+ struct resource *res; /* In master (CPU) address space */ -+ resource_size_t offset; /* Translation offset for bridge */ -+ struct resource __res; /* Default storage for res */ -+}; -+ -+extern struct resource_entry * -+resource_list_create_entry(struct resource *res, size_t extra_size); -+extern void resource_list_free(struct list_head *head); -+ -+static inline void resource_list_add(struct resource_entry *entry, -+ struct list_head *head) -+{ -+ list_add(&entry->node, head); -+} -+ -+static inline void resource_list_add_tail(struct resource_entry *entry, -+ struct list_head *head) -+{ -+ list_add_tail(&entry->node, head); -+} -+ -+static inline void resource_list_del(struct resource_entry *entry) -+{ -+ list_del(&entry->node); -+} -+ -+static inline void resource_list_free_entry(struct resource_entry *entry) -+{ -+ kfree(entry); -+} -+ -+static inline void -+resource_list_destroy_entry(struct resource_entry *entry) -+{ -+ resource_list_del(entry); -+ resource_list_free_entry(entry); -+} -+ -+#define resource_list_for_each_entry(entry, list) \ -+ list_for_each_entry((entry), (list), node) -+ -+#define resource_list_for_each_entry_safe(entry, tmp, list) \ -+ list_for_each_entry_safe((entry), (tmp), (list), node) -+ -+#endif /* _LINUX_RESOURCE_EXT_H */ -diff --git a/include/linux/usb/quirks.h b/include/linux/usb/quirks.h -index 9948c87..1d0043d 100644 ---- a/include/linux/usb/quirks.h -+++ b/include/linux/usb/quirks.h -@@ -47,4 +47,7 @@ - /* device generates spurious wakeup, ignore remote wakeup capability */ - #define USB_QUIRK_IGNORE_REMOTE_WAKEUP BIT(9) - -+/* device can't handle Link Power Management */ -+#define USB_QUIRK_NO_LPM BIT(10) -+ - #endif /* __LINUX_USB_QUIRKS_H */ -diff --git a/include/trace/events/iommu.h b/include/trace/events/iommu.h -index a8f5c32..2c7befb 100644 ---- a/include/trace/events/iommu.h -+++ b/include/trace/events/iommu.h -@@ -83,7 +83,7 @@ DEFINE_EVENT(iommu_device_event, detach_device_from_domain, - TP_ARGS(dev) - ); - --DECLARE_EVENT_CLASS(iommu_map_unmap, -+TRACE_EVENT(map, - - TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size), - -@@ -92,7 +92,7 @@ DECLARE_EVENT_CLASS(iommu_map_unmap, - TP_STRUCT__entry( - __field(u64, iova) - __field(u64, paddr) -- __field(int, size) -+ __field(size_t, size) - ), - - TP_fast_assign( -@@ -101,26 +101,31 @@ DECLARE_EVENT_CLASS(iommu_map_unmap, - __entry->size = size; - ), - -- TP_printk("IOMMU: iova=0x%016llx paddr=0x%016llx size=0x%x", -+ TP_printk("IOMMU: iova=0x%016llx paddr=0x%016llx size=%zu", - __entry->iova, __entry->paddr, __entry->size - ) - ); - --DEFINE_EVENT(iommu_map_unmap, map, -+TRACE_EVENT(unmap, - -- TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size), -- -- TP_ARGS(iova, paddr, size) --); -+ TP_PROTO(unsigned long iova, size_t size, size_t unmapped_size), - --DEFINE_EVENT_PRINT(iommu_map_unmap, unmap, -+ TP_ARGS(iova, size, unmapped_size), - -- TP_PROTO(unsigned long iova, phys_addr_t paddr, size_t size), -+ TP_STRUCT__entry( -+ __field(u64, iova) -+ __field(size_t, size) -+ __field(size_t, unmapped_size) -+ ), - -- TP_ARGS(iova, paddr, size), -+ TP_fast_assign( -+ __entry->iova = iova; -+ __entry->size = size; -+ __entry->unmapped_size = unmapped_size; -+ ), - -- TP_printk("IOMMU: iova=0x%016llx size=0x%x", -- __entry->iova, __entry->size -+ TP_printk("IOMMU: iova=0x%016llx size=%zu unmapped_size=%zu", -+ __entry->iova, __entry->size, __entry->unmapped_size - ) - ); - -diff --git a/kernel/irq/Kconfig b/kernel/irq/Kconfig -index 225086b..9a76e3b 100644 ---- a/kernel/irq/Kconfig -+++ b/kernel/irq/Kconfig -@@ -55,6 +55,21 @@ config GENERIC_IRQ_CHIP - config IRQ_DOMAIN - bool - -+# Support for hierarchical irq domains -+config IRQ_DOMAIN_HIERARCHY -+ bool -+ select IRQ_DOMAIN -+ -+# Generic MSI interrupt support -+config GENERIC_MSI_IRQ -+ bool -+ -+# Generic MSI hierarchical interrupt domain support -+config GENERIC_MSI_IRQ_DOMAIN -+ bool -+ select IRQ_DOMAIN_HIERARCHY -+ select GENERIC_MSI_IRQ -+ - config HANDLE_DOMAIN_IRQ - bool - -diff --git a/kernel/irq/Makefile b/kernel/irq/Makefile -index fff1738..d121235 100644 ---- a/kernel/irq/Makefile -+++ b/kernel/irq/Makefile -@@ -6,3 +6,4 @@ obj-$(CONFIG_IRQ_DOMAIN) += irqdomain.o - obj-$(CONFIG_PROC_FS) += proc.o - obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o - obj-$(CONFIG_PM_SLEEP) += pm.o -+obj-$(CONFIG_GENERIC_MSI_IRQ) += msi.o -diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c -index e5202f0..63c16d1 100644 ---- a/kernel/irq/chip.c -+++ b/kernel/irq/chip.c -@@ -15,6 +15,7 @@ - #include - #include - #include -+#include - - #include - -@@ -178,6 +179,7 @@ int irq_startup(struct irq_desc *desc, bool resend) - irq_state_clr_disabled(desc); - desc->depth = 0; - -+ irq_domain_activate_irq(&desc->irq_data); - if (desc->irq_data.chip->irq_startup) { - ret = desc->irq_data.chip->irq_startup(&desc->irq_data); - irq_state_clr_masked(desc); -@@ -199,6 +201,7 @@ void irq_shutdown(struct irq_desc *desc) - desc->irq_data.chip->irq_disable(&desc->irq_data); - else - desc->irq_data.chip->irq_mask(&desc->irq_data); -+ irq_domain_deactivate_irq(&desc->irq_data); - irq_state_set_masked(desc); - } - -@@ -847,3 +850,105 @@ void irq_cpu_offline(void) - raw_spin_unlock_irqrestore(&desc->lock, flags); - } - } -+ -+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY -+/** -+ * irq_chip_ack_parent - Acknowledge the parent interrupt -+ * @data: Pointer to interrupt specific data -+ */ -+void irq_chip_ack_parent(struct irq_data *data) -+{ -+ data = data->parent_data; -+ data->chip->irq_ack(data); -+} -+ -+/** -+ * irq_chip_mask_parent - Mask the parent interrupt -+ * @data: Pointer to interrupt specific data -+ */ -+void irq_chip_mask_parent(struct irq_data *data) -+{ -+ data = data->parent_data; -+ data->chip->irq_mask(data); -+} -+ -+/** -+ * irq_chip_unmask_parent - Unmask the parent interrupt -+ * @data: Pointer to interrupt specific data -+ */ -+void irq_chip_unmask_parent(struct irq_data *data) -+{ -+ data = data->parent_data; -+ data->chip->irq_unmask(data); -+} -+ -+/** -+ * irq_chip_eoi_parent - Invoke EOI on the parent interrupt -+ * @data: Pointer to interrupt specific data -+ */ -+void irq_chip_eoi_parent(struct irq_data *data) -+{ -+ data = data->parent_data; -+ data->chip->irq_eoi(data); -+} -+ -+/** -+ * irq_chip_set_affinity_parent - Set affinity on the parent interrupt -+ * @data: Pointer to interrupt specific data -+ * @dest: The affinity mask to set -+ * @force: Flag to enforce setting (disable online checks) -+ * -+ * Conditinal, as the underlying parent chip might not implement it. -+ */ -+int irq_chip_set_affinity_parent(struct irq_data *data, -+ const struct cpumask *dest, bool force) -+{ -+ data = data->parent_data; -+ if (data->chip->irq_set_affinity) -+ return data->chip->irq_set_affinity(data, dest, force); -+ -+ return -ENOSYS; -+} -+ -+/** -+ * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware -+ * @data: Pointer to interrupt specific data -+ * -+ * Iterate through the domain hierarchy of the interrupt and check -+ * whether a hw retrigger function exists. If yes, invoke it. -+ */ -+int irq_chip_retrigger_hierarchy(struct irq_data *data) -+{ -+ for (data = data->parent_data; data; data = data->parent_data) -+ if (data->chip && data->chip->irq_retrigger) -+ return data->chip->irq_retrigger(data); -+ -+ return -ENOSYS; -+} -+#endif -+ -+/** -+ * irq_chip_compose_msi_msg - Componse msi message for a irq chip -+ * @data: Pointer to interrupt specific data -+ * @msg: Pointer to the MSI message -+ * -+ * For hierarchical domains we find the first chip in the hierarchy -+ * which implements the irq_compose_msi_msg callback. For non -+ * hierarchical we use the top level chip. -+ */ -+int irq_chip_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) -+{ -+ struct irq_data *pos = NULL; -+ -+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY -+ for (; data; data = data->parent_data) -+#endif -+ if (data->chip && data->chip->irq_compose_msi_msg) -+ pos = data; -+ if (!pos) -+ return -ENOSYS; -+ -+ pos->chip->irq_compose_msi_msg(pos, msg); -+ -+ return 0; -+} -diff --git a/kernel/irq/generic-chip.c b/kernel/irq/generic-chip.c -index cf80e7b..61024e8 100644 ---- a/kernel/irq/generic-chip.c -+++ b/kernel/irq/generic-chip.c -@@ -39,7 +39,7 @@ void irq_gc_mask_disable_reg(struct irq_data *d) - u32 mask = d->mask; - - irq_gc_lock(gc); -- irq_reg_writel(mask, gc->reg_base + ct->regs.disable); -+ irq_reg_writel(gc, mask, ct->regs.disable); - *ct->mask_cache &= ~mask; - irq_gc_unlock(gc); - } -@@ -59,7 +59,7 @@ void irq_gc_mask_set_bit(struct irq_data *d) - - irq_gc_lock(gc); - *ct->mask_cache |= mask; -- irq_reg_writel(*ct->mask_cache, gc->reg_base + ct->regs.mask); -+ irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask); - irq_gc_unlock(gc); - } - EXPORT_SYMBOL_GPL(irq_gc_mask_set_bit); -@@ -79,7 +79,7 @@ void irq_gc_mask_clr_bit(struct irq_data *d) - - irq_gc_lock(gc); - *ct->mask_cache &= ~mask; -- irq_reg_writel(*ct->mask_cache, gc->reg_base + ct->regs.mask); -+ irq_reg_writel(gc, *ct->mask_cache, ct->regs.mask); - irq_gc_unlock(gc); - } - EXPORT_SYMBOL_GPL(irq_gc_mask_clr_bit); -@@ -98,7 +98,7 @@ void irq_gc_unmask_enable_reg(struct irq_data *d) - u32 mask = d->mask; - - irq_gc_lock(gc); -- irq_reg_writel(mask, gc->reg_base + ct->regs.enable); -+ irq_reg_writel(gc, mask, ct->regs.enable); - *ct->mask_cache |= mask; - irq_gc_unlock(gc); - } -@@ -114,7 +114,7 @@ void irq_gc_ack_set_bit(struct irq_data *d) - u32 mask = d->mask; - - irq_gc_lock(gc); -- irq_reg_writel(mask, gc->reg_base + ct->regs.ack); -+ irq_reg_writel(gc, mask, ct->regs.ack); - irq_gc_unlock(gc); - } - EXPORT_SYMBOL_GPL(irq_gc_ack_set_bit); -@@ -130,7 +130,7 @@ void irq_gc_ack_clr_bit(struct irq_data *d) - u32 mask = ~d->mask; - - irq_gc_lock(gc); -- irq_reg_writel(mask, gc->reg_base + ct->regs.ack); -+ irq_reg_writel(gc, mask, ct->regs.ack); - irq_gc_unlock(gc); - } - -@@ -145,8 +145,8 @@ void irq_gc_mask_disable_reg_and_ack(struct irq_data *d) - u32 mask = d->mask; - - irq_gc_lock(gc); -- irq_reg_writel(mask, gc->reg_base + ct->regs.mask); -- irq_reg_writel(mask, gc->reg_base + ct->regs.ack); -+ irq_reg_writel(gc, mask, ct->regs.mask); -+ irq_reg_writel(gc, mask, ct->regs.ack); - irq_gc_unlock(gc); - } - -@@ -161,7 +161,7 @@ void irq_gc_eoi(struct irq_data *d) - u32 mask = d->mask; - - irq_gc_lock(gc); -- irq_reg_writel(mask, gc->reg_base + ct->regs.eoi); -+ irq_reg_writel(gc, mask, ct->regs.eoi); - irq_gc_unlock(gc); - } - -@@ -191,6 +191,16 @@ int irq_gc_set_wake(struct irq_data *d, unsigned int on) - return 0; - } - -+static u32 irq_readl_be(void __iomem *addr) -+{ -+ return ioread32be(addr); -+} -+ -+static void irq_writel_be(u32 val, void __iomem *addr) -+{ -+ iowrite32be(val, addr); -+} -+ - static void - irq_init_generic_chip(struct irq_chip_generic *gc, const char *name, - int num_ct, unsigned int irq_base, -@@ -245,7 +255,7 @@ irq_gc_init_mask_cache(struct irq_chip_generic *gc, enum irq_gc_flags flags) - } - ct[i].mask_cache = mskptr; - if (flags & IRQ_GC_INIT_MASK_CACHE) -- *mskptr = irq_reg_readl(gc->reg_base + mskreg); -+ *mskptr = irq_reg_readl(gc, mskreg); - } - } - -@@ -300,7 +310,13 @@ int irq_alloc_domain_generic_chips(struct irq_domain *d, int irqs_per_chip, - dgc->gc[i] = gc = tmp; - irq_init_generic_chip(gc, name, num_ct, i * irqs_per_chip, - NULL, handler); -+ - gc->domain = d; -+ if (gcflags & IRQ_GC_BE_IO) { -+ gc->reg_readl = &irq_readl_be; -+ gc->reg_writel = &irq_writel_be; -+ } -+ - raw_spin_lock_irqsave(&gc_lock, flags); - list_add_tail(&gc->list, &gc_list); - raw_spin_unlock_irqrestore(&gc_lock, flags); -diff --git a/kernel/irq/irqdomain.c b/kernel/irq/irqdomain.c -index 6534ff6..021f823 100644 ---- a/kernel/irq/irqdomain.c -+++ b/kernel/irq/irqdomain.c -@@ -23,6 +23,10 @@ static DEFINE_MUTEX(irq_domain_mutex); - static DEFINE_MUTEX(revmap_trees_mutex); - static struct irq_domain *irq_default_domain; - -+static int irq_domain_alloc_descs(int virq, unsigned int nr_irqs, -+ irq_hw_number_t hwirq, int node); -+static void irq_domain_check_hierarchy(struct irq_domain *domain); -+ - /** - * __irq_domain_add() - Allocate a new irq_domain data structure - * @of_node: optional device-tree node of the interrupt controller -@@ -30,7 +34,7 @@ static struct irq_domain *irq_default_domain; - * @hwirq_max: Maximum number of interrupts supported by controller - * @direct_max: Maximum value of direct maps; Use ~0 for no limit; 0 for no - * direct mapping -- * @ops: map/unmap domain callbacks -+ * @ops: domain callbacks - * @host_data: Controller private data pointer - * - * Allocates and initialize and irq_domain structure. -@@ -56,6 +60,7 @@ struct irq_domain *__irq_domain_add(struct device_node *of_node, int size, - domain->hwirq_max = hwirq_max; - domain->revmap_size = size; - domain->revmap_direct_max_irq = direct_max; -+ irq_domain_check_hierarchy(domain); - - mutex_lock(&irq_domain_mutex); - list_add(&domain->link, &irq_domain_list); -@@ -109,7 +114,7 @@ EXPORT_SYMBOL_GPL(irq_domain_remove); - * @first_irq: first number of irq block assigned to the domain, - * pass zero to assign irqs on-the-fly. If first_irq is non-zero, then - * pre-map all of the irqs in the domain to virqs starting at first_irq. -- * @ops: map/unmap domain callbacks -+ * @ops: domain callbacks - * @host_data: Controller private data pointer - * - * Allocates an irq_domain, and optionally if first_irq is positive then also -@@ -174,20 +179,20 @@ struct irq_domain *irq_domain_add_legacy(struct device_node *of_node, - - domain = __irq_domain_add(of_node, first_hwirq + size, - first_hwirq + size, 0, ops, host_data); -- if (!domain) -- return NULL; -- -- irq_domain_associate_many(domain, first_irq, first_hwirq, size); -+ if (domain) -+ irq_domain_associate_many(domain, first_irq, first_hwirq, size); - - return domain; - } - EXPORT_SYMBOL_GPL(irq_domain_add_legacy); - - /** -- * irq_find_host() - Locates a domain for a given device node -+ * irq_find_matching_host() - Locates a domain for a given device node - * @node: device-tree node of the interrupt controller -+ * @bus_token: domain-specific data - */ --struct irq_domain *irq_find_host(struct device_node *node) -+struct irq_domain *irq_find_matching_host(struct device_node *node, -+ enum irq_domain_bus_token bus_token) - { - struct irq_domain *h, *found = NULL; - int rc; -@@ -196,13 +201,19 @@ struct irq_domain *irq_find_host(struct device_node *node) - * it might potentially be set to match all interrupts in - * the absence of a device node. This isn't a problem so far - * yet though... -+ * -+ * bus_token == DOMAIN_BUS_ANY matches any domain, any other -+ * values must generate an exact match for the domain to be -+ * selected. - */ - mutex_lock(&irq_domain_mutex); - list_for_each_entry(h, &irq_domain_list, link) { - if (h->ops->match) -- rc = h->ops->match(h, node); -+ rc = h->ops->match(h, node, bus_token); - else -- rc = (h->of_node != NULL) && (h->of_node == node); -+ rc = ((h->of_node != NULL) && (h->of_node == node) && -+ ((bus_token == DOMAIN_BUS_ANY) || -+ (h->bus_token == bus_token))); - - if (rc) { - found = h; -@@ -212,7 +223,7 @@ struct irq_domain *irq_find_host(struct device_node *node) - mutex_unlock(&irq_domain_mutex); - return found; - } --EXPORT_SYMBOL_GPL(irq_find_host); -+EXPORT_SYMBOL_GPL(irq_find_matching_host); - - /** - * irq_set_default_host() - Set a "default" irq domain -@@ -388,7 +399,6 @@ EXPORT_SYMBOL_GPL(irq_create_direct_mapping); - unsigned int irq_create_mapping(struct irq_domain *domain, - irq_hw_number_t hwirq) - { -- unsigned int hint; - int virq; - - pr_debug("irq_create_mapping(0x%p, 0x%lx)\n", domain, hwirq); -@@ -410,12 +420,8 @@ unsigned int irq_create_mapping(struct irq_domain *domain, - } - - /* Allocate a virtual interrupt number */ -- hint = hwirq % nr_irqs; -- if (hint == 0) -- hint++; -- virq = irq_alloc_desc_from(hint, of_node_to_nid(domain->of_node)); -- if (virq <= 0) -- virq = irq_alloc_desc_from(1, of_node_to_nid(domain->of_node)); -+ virq = irq_domain_alloc_descs(-1, 1, hwirq, -+ of_node_to_nid(domain->of_node)); - if (virq <= 0) { - pr_debug("-> virq allocation failed\n"); - return 0; -@@ -471,7 +477,7 @@ unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data) - struct irq_domain *domain; - irq_hw_number_t hwirq; - unsigned int type = IRQ_TYPE_NONE; -- unsigned int virq; -+ int virq; - - domain = irq_data->np ? irq_find_host(irq_data->np) : irq_default_domain; - if (!domain) { -@@ -489,10 +495,24 @@ unsigned int irq_create_of_mapping(struct of_phandle_args *irq_data) - return 0; - } - -- /* Create mapping */ -- virq = irq_create_mapping(domain, hwirq); -- if (!virq) -- return virq; -+ if (irq_domain_is_hierarchy(domain)) { -+ /* -+ * If we've already configured this interrupt, -+ * don't do it again, or hell will break loose. -+ */ -+ virq = irq_find_mapping(domain, hwirq); -+ if (virq) -+ return virq; -+ -+ virq = irq_domain_alloc_irqs(domain, 1, NUMA_NO_NODE, irq_data); -+ if (virq <= 0) -+ return 0; -+ } else { -+ /* Create mapping */ -+ virq = irq_create_mapping(domain, hwirq); -+ if (!virq) -+ return virq; -+ } - - /* Set type if specified and different than the current one */ - if (type != IRQ_TYPE_NONE && -@@ -540,8 +560,8 @@ unsigned int irq_find_mapping(struct irq_domain *domain, - return 0; - - if (hwirq < domain->revmap_direct_max_irq) { -- data = irq_get_irq_data(hwirq); -- if (data && (data->domain == domain) && (data->hwirq == hwirq)) -+ data = irq_domain_get_irq_data(domain, hwirq); -+ if (data && data->hwirq == hwirq) - return hwirq; - } - -@@ -709,3 +729,518 @@ const struct irq_domain_ops irq_domain_simple_ops = { - .xlate = irq_domain_xlate_onetwocell, - }; - EXPORT_SYMBOL_GPL(irq_domain_simple_ops); -+ -+static int irq_domain_alloc_descs(int virq, unsigned int cnt, -+ irq_hw_number_t hwirq, int node) -+{ -+ unsigned int hint; -+ -+ if (virq >= 0) { -+ virq = irq_alloc_descs(virq, virq, cnt, node); -+ } else { -+ hint = hwirq % nr_irqs; -+ if (hint == 0) -+ hint++; -+ virq = irq_alloc_descs_from(hint, cnt, node); -+ if (virq <= 0 && hint > 1) -+ virq = irq_alloc_descs_from(1, cnt, node); -+ } -+ -+ return virq; -+} -+ -+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY -+/** -+ * irq_domain_add_hierarchy - Add a irqdomain into the hierarchy -+ * @parent: Parent irq domain to associate with the new domain -+ * @flags: Irq domain flags associated to the domain -+ * @size: Size of the domain. See below -+ * @node: Optional device-tree node of the interrupt controller -+ * @ops: Pointer to the interrupt domain callbacks -+ * @host_data: Controller private data pointer -+ * -+ * If @size is 0 a tree domain is created, otherwise a linear domain. -+ * -+ * If successful the parent is associated to the new domain and the -+ * domain flags are set. -+ * Returns pointer to IRQ domain, or NULL on failure. -+ */ -+struct irq_domain *irq_domain_add_hierarchy(struct irq_domain *parent, -+ unsigned int flags, -+ unsigned int size, -+ struct device_node *node, -+ const struct irq_domain_ops *ops, -+ void *host_data) -+{ -+ struct irq_domain *domain; -+ -+ if (size) -+ domain = irq_domain_add_linear(node, size, ops, host_data); -+ else -+ domain = irq_domain_add_tree(node, ops, host_data); -+ if (domain) { -+ domain->parent = parent; -+ domain->flags |= flags; -+ } -+ -+ return domain; -+} -+ -+static void irq_domain_insert_irq(int virq) -+{ -+ struct irq_data *data; -+ -+ for (data = irq_get_irq_data(virq); data; data = data->parent_data) { -+ struct irq_domain *domain = data->domain; -+ irq_hw_number_t hwirq = data->hwirq; -+ -+ if (hwirq < domain->revmap_size) { -+ domain->linear_revmap[hwirq] = virq; -+ } else { -+ mutex_lock(&revmap_trees_mutex); -+ radix_tree_insert(&domain->revmap_tree, hwirq, data); -+ mutex_unlock(&revmap_trees_mutex); -+ } -+ -+ /* If not already assigned, give the domain the chip's name */ -+ if (!domain->name && data->chip) -+ domain->name = data->chip->name; -+ } -+ -+ irq_clear_status_flags(virq, IRQ_NOREQUEST); -+} -+ -+static void irq_domain_remove_irq(int virq) -+{ -+ struct irq_data *data; -+ -+ irq_set_status_flags(virq, IRQ_NOREQUEST); -+ irq_set_chip_and_handler(virq, NULL, NULL); -+ synchronize_irq(virq); -+ smp_mb(); -+ -+ for (data = irq_get_irq_data(virq); data; data = data->parent_data) { -+ struct irq_domain *domain = data->domain; -+ irq_hw_number_t hwirq = data->hwirq; -+ -+ if (hwirq < domain->revmap_size) { -+ domain->linear_revmap[hwirq] = 0; -+ } else { -+ mutex_lock(&revmap_trees_mutex); -+ radix_tree_delete(&domain->revmap_tree, hwirq); -+ mutex_unlock(&revmap_trees_mutex); -+ } -+ } -+} -+ -+static struct irq_data *irq_domain_insert_irq_data(struct irq_domain *domain, -+ struct irq_data *child) -+{ -+ struct irq_data *irq_data; -+ -+ irq_data = kzalloc_node(sizeof(*irq_data), GFP_KERNEL, child->node); -+ if (irq_data) { -+ child->parent_data = irq_data; -+ irq_data->irq = child->irq; -+ irq_data->node = child->node; -+ irq_data->domain = domain; -+ } -+ -+ return irq_data; -+} -+ -+static void irq_domain_free_irq_data(unsigned int virq, unsigned int nr_irqs) -+{ -+ struct irq_data *irq_data, *tmp; -+ int i; -+ -+ for (i = 0; i < nr_irqs; i++) { -+ irq_data = irq_get_irq_data(virq + i); -+ tmp = irq_data->parent_data; -+ irq_data->parent_data = NULL; -+ irq_data->domain = NULL; -+ -+ while (tmp) { -+ irq_data = tmp; -+ tmp = tmp->parent_data; -+ kfree(irq_data); -+ } -+ } -+} -+ -+static int irq_domain_alloc_irq_data(struct irq_domain *domain, -+ unsigned int virq, unsigned int nr_irqs) -+{ -+ struct irq_data *irq_data; -+ struct irq_domain *parent; -+ int i; -+ -+ /* The outermost irq_data is embedded in struct irq_desc */ -+ for (i = 0; i < nr_irqs; i++) { -+ irq_data = irq_get_irq_data(virq + i); -+ irq_data->domain = domain; -+ -+ for (parent = domain->parent; parent; parent = parent->parent) { -+ irq_data = irq_domain_insert_irq_data(parent, irq_data); -+ if (!irq_data) { -+ irq_domain_free_irq_data(virq, i + 1); -+ return -ENOMEM; -+ } -+ } -+ } -+ -+ return 0; -+} -+ -+/** -+ * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain -+ * @domain: domain to match -+ * @virq: IRQ number to get irq_data -+ */ -+struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, -+ unsigned int virq) -+{ -+ struct irq_data *irq_data; -+ -+ for (irq_data = irq_get_irq_data(virq); irq_data; -+ irq_data = irq_data->parent_data) -+ if (irq_data->domain == domain) -+ return irq_data; -+ -+ return NULL; -+} -+ -+/** -+ * irq_domain_set_hwirq_and_chip - Set hwirq and irqchip of @virq at @domain -+ * @domain: Interrupt domain to match -+ * @virq: IRQ number -+ * @hwirq: The hwirq number -+ * @chip: The associated interrupt chip -+ * @chip_data: The associated chip data -+ */ -+int irq_domain_set_hwirq_and_chip(struct irq_domain *domain, unsigned int virq, -+ irq_hw_number_t hwirq, struct irq_chip *chip, -+ void *chip_data) -+{ -+ struct irq_data *irq_data = irq_domain_get_irq_data(domain, virq); -+ -+ if (!irq_data) -+ return -ENOENT; -+ -+ irq_data->hwirq = hwirq; -+ irq_data->chip = chip ? chip : &no_irq_chip; -+ irq_data->chip_data = chip_data; -+ -+ return 0; -+} -+ -+/** -+ * irq_domain_set_info - Set the complete data for a @virq in @domain -+ * @domain: Interrupt domain to match -+ * @virq: IRQ number -+ * @hwirq: The hardware interrupt number -+ * @chip: The associated interrupt chip -+ * @chip_data: The associated interrupt chip data -+ * @handler: The interrupt flow handler -+ * @handler_data: The interrupt flow handler data -+ * @handler_name: The interrupt handler name -+ */ -+void irq_domain_set_info(struct irq_domain *domain, unsigned int virq, -+ irq_hw_number_t hwirq, struct irq_chip *chip, -+ void *chip_data, irq_flow_handler_t handler, -+ void *handler_data, const char *handler_name) -+{ -+ irq_domain_set_hwirq_and_chip(domain, virq, hwirq, chip, chip_data); -+ __irq_set_handler(virq, handler, 0, handler_name); -+ irq_set_handler_data(virq, handler_data); -+} -+ -+/** -+ * irq_domain_reset_irq_data - Clear hwirq, chip and chip_data in @irq_data -+ * @irq_data: The pointer to irq_data -+ */ -+void irq_domain_reset_irq_data(struct irq_data *irq_data) -+{ -+ irq_data->hwirq = 0; -+ irq_data->chip = &no_irq_chip; -+ irq_data->chip_data = NULL; -+} -+ -+/** -+ * irq_domain_free_irqs_common - Clear irq_data and free the parent -+ * @domain: Interrupt domain to match -+ * @virq: IRQ number to start with -+ * @nr_irqs: The number of irqs to free -+ */ -+void irq_domain_free_irqs_common(struct irq_domain *domain, unsigned int virq, -+ unsigned int nr_irqs) -+{ -+ struct irq_data *irq_data; -+ int i; -+ -+ for (i = 0; i < nr_irqs; i++) { -+ irq_data = irq_domain_get_irq_data(domain, virq + i); -+ if (irq_data) -+ irq_domain_reset_irq_data(irq_data); -+ } -+ irq_domain_free_irqs_parent(domain, virq, nr_irqs); -+} -+ -+/** -+ * irq_domain_free_irqs_top - Clear handler and handler data, clear irqdata and free parent -+ * @domain: Interrupt domain to match -+ * @virq: IRQ number to start with -+ * @nr_irqs: The number of irqs to free -+ */ -+void irq_domain_free_irqs_top(struct irq_domain *domain, unsigned int virq, -+ unsigned int nr_irqs) -+{ -+ int i; -+ -+ for (i = 0; i < nr_irqs; i++) { -+ irq_set_handler_data(virq + i, NULL); -+ irq_set_handler(virq + i, NULL); -+ } -+ irq_domain_free_irqs_common(domain, virq, nr_irqs); -+} -+ -+static bool irq_domain_is_auto_recursive(struct irq_domain *domain) -+{ -+ return domain->flags & IRQ_DOMAIN_FLAG_AUTO_RECURSIVE; -+} -+ -+static void irq_domain_free_irqs_recursive(struct irq_domain *domain, -+ unsigned int irq_base, -+ unsigned int nr_irqs) -+{ -+ domain->ops->free(domain, irq_base, nr_irqs); -+ if (irq_domain_is_auto_recursive(domain)) { -+ BUG_ON(!domain->parent); -+ irq_domain_free_irqs_recursive(domain->parent, irq_base, -+ nr_irqs); -+ } -+} -+ -+static int irq_domain_alloc_irqs_recursive(struct irq_domain *domain, -+ unsigned int irq_base, -+ unsigned int nr_irqs, void *arg) -+{ -+ int ret = 0; -+ struct irq_domain *parent = domain->parent; -+ bool recursive = irq_domain_is_auto_recursive(domain); -+ -+ BUG_ON(recursive && !parent); -+ if (recursive) -+ ret = irq_domain_alloc_irqs_recursive(parent, irq_base, -+ nr_irqs, arg); -+ if (ret >= 0) -+ ret = domain->ops->alloc(domain, irq_base, nr_irqs, arg); -+ if (ret < 0 && recursive) -+ irq_domain_free_irqs_recursive(parent, irq_base, nr_irqs); -+ -+ return ret; -+} -+ -+/** -+ * __irq_domain_alloc_irqs - Allocate IRQs from domain -+ * @domain: domain to allocate from -+ * @irq_base: allocate specified IRQ nubmer if irq_base >= 0 -+ * @nr_irqs: number of IRQs to allocate -+ * @node: NUMA node id for memory allocation -+ * @arg: domain specific argument -+ * @realloc: IRQ descriptors have already been allocated if true -+ * -+ * Allocate IRQ numbers and initialized all data structures to support -+ * hierarchy IRQ domains. -+ * Parameter @realloc is mainly to support legacy IRQs. -+ * Returns error code or allocated IRQ number -+ * -+ * The whole process to setup an IRQ has been split into two steps. -+ * The first step, __irq_domain_alloc_irqs(), is to allocate IRQ -+ * descriptor and required hardware resources. The second step, -+ * irq_domain_activate_irq(), is to program hardwares with preallocated -+ * resources. In this way, it's easier to rollback when failing to -+ * allocate resources. -+ */ -+int __irq_domain_alloc_irqs(struct irq_domain *domain, int irq_base, -+ unsigned int nr_irqs, int node, void *arg, -+ bool realloc) -+{ -+ int i, ret, virq; -+ -+ if (domain == NULL) { -+ domain = irq_default_domain; -+ if (WARN(!domain, "domain is NULL; cannot allocate IRQ\n")) -+ return -EINVAL; -+ } -+ -+ if (!domain->ops->alloc) { -+ pr_debug("domain->ops->alloc() is NULL\n"); -+ return -ENOSYS; -+ } -+ -+ if (realloc && irq_base >= 0) { -+ virq = irq_base; -+ } else { -+ virq = irq_domain_alloc_descs(irq_base, nr_irqs, 0, node); -+ if (virq < 0) { -+ pr_debug("cannot allocate IRQ(base %d, count %d)\n", -+ irq_base, nr_irqs); -+ return virq; -+ } -+ } -+ -+ if (irq_domain_alloc_irq_data(domain, virq, nr_irqs)) { -+ pr_debug("cannot allocate memory for IRQ%d\n", virq); -+ ret = -ENOMEM; -+ goto out_free_desc; -+ } -+ -+ mutex_lock(&irq_domain_mutex); -+ ret = irq_domain_alloc_irqs_recursive(domain, virq, nr_irqs, arg); -+ if (ret < 0) { -+ mutex_unlock(&irq_domain_mutex); -+ goto out_free_irq_data; -+ } -+ for (i = 0; i < nr_irqs; i++) -+ irq_domain_insert_irq(virq + i); -+ mutex_unlock(&irq_domain_mutex); -+ -+ return virq; -+ -+out_free_irq_data: -+ irq_domain_free_irq_data(virq, nr_irqs); -+out_free_desc: -+ irq_free_descs(virq, nr_irqs); -+ return ret; -+} -+ -+/** -+ * irq_domain_free_irqs - Free IRQ number and associated data structures -+ * @virq: base IRQ number -+ * @nr_irqs: number of IRQs to free -+ */ -+void irq_domain_free_irqs(unsigned int virq, unsigned int nr_irqs) -+{ -+ struct irq_data *data = irq_get_irq_data(virq); -+ int i; -+ -+ if (WARN(!data || !data->domain || !data->domain->ops->free, -+ "NULL pointer, cannot free irq\n")) -+ return; -+ -+ mutex_lock(&irq_domain_mutex); -+ for (i = 0; i < nr_irqs; i++) -+ irq_domain_remove_irq(virq + i); -+ irq_domain_free_irqs_recursive(data->domain, virq, nr_irqs); -+ mutex_unlock(&irq_domain_mutex); -+ -+ irq_domain_free_irq_data(virq, nr_irqs); -+ irq_free_descs(virq, nr_irqs); -+} -+ -+/** -+ * irq_domain_alloc_irqs_parent - Allocate interrupts from parent domain -+ * @irq_base: Base IRQ number -+ * @nr_irqs: Number of IRQs to allocate -+ * @arg: Allocation data (arch/domain specific) -+ * -+ * Check whether the domain has been setup recursive. If not allocate -+ * through the parent domain. -+ */ -+int irq_domain_alloc_irqs_parent(struct irq_domain *domain, -+ unsigned int irq_base, unsigned int nr_irqs, -+ void *arg) -+{ -+ /* irq_domain_alloc_irqs_recursive() has called parent's alloc() */ -+ if (irq_domain_is_auto_recursive(domain)) -+ return 0; -+ -+ domain = domain->parent; -+ if (domain) -+ return irq_domain_alloc_irqs_recursive(domain, irq_base, -+ nr_irqs, arg); -+ return -ENOSYS; -+} -+ -+/** -+ * irq_domain_free_irqs_parent - Free interrupts from parent domain -+ * @irq_base: Base IRQ number -+ * @nr_irqs: Number of IRQs to free -+ * -+ * Check whether the domain has been setup recursive. If not free -+ * through the parent domain. -+ */ -+void irq_domain_free_irqs_parent(struct irq_domain *domain, -+ unsigned int irq_base, unsigned int nr_irqs) -+{ -+ /* irq_domain_free_irqs_recursive() will call parent's free */ -+ if (!irq_domain_is_auto_recursive(domain) && domain->parent) -+ irq_domain_free_irqs_recursive(domain->parent, irq_base, -+ nr_irqs); -+} -+ -+/** -+ * irq_domain_activate_irq - Call domain_ops->activate recursively to activate -+ * interrupt -+ * @irq_data: outermost irq_data associated with interrupt -+ * -+ * This is the second step to call domain_ops->activate to program interrupt -+ * controllers, so the interrupt could actually get delivered. -+ */ -+void irq_domain_activate_irq(struct irq_data *irq_data) -+{ -+ if (irq_data && irq_data->domain) { -+ struct irq_domain *domain = irq_data->domain; -+ -+ if (irq_data->parent_data) -+ irq_domain_activate_irq(irq_data->parent_data); -+ if (domain->ops->activate) -+ domain->ops->activate(domain, irq_data); -+ } -+} -+ -+/** -+ * irq_domain_deactivate_irq - Call domain_ops->deactivate recursively to -+ * deactivate interrupt -+ * @irq_data: outermost irq_data associated with interrupt -+ * -+ * It calls domain_ops->deactivate to program interrupt controllers to disable -+ * interrupt delivery. -+ */ -+void irq_domain_deactivate_irq(struct irq_data *irq_data) -+{ -+ if (irq_data && irq_data->domain) { -+ struct irq_domain *domain = irq_data->domain; -+ -+ if (domain->ops->deactivate) -+ domain->ops->deactivate(domain, irq_data); -+ if (irq_data->parent_data) -+ irq_domain_deactivate_irq(irq_data->parent_data); -+ } -+} -+ -+static void irq_domain_check_hierarchy(struct irq_domain *domain) -+{ -+ /* Hierarchy irq_domains must implement callback alloc() */ -+ if (domain->ops->alloc) -+ domain->flags |= IRQ_DOMAIN_FLAG_HIERARCHY; -+} -+#else /* CONFIG_IRQ_DOMAIN_HIERARCHY */ -+/** -+ * irq_domain_get_irq_data - Get irq_data associated with @virq and @domain -+ * @domain: domain to match -+ * @virq: IRQ number to get irq_data -+ */ -+struct irq_data *irq_domain_get_irq_data(struct irq_domain *domain, -+ unsigned int virq) -+{ -+ struct irq_data *irq_data = irq_get_irq_data(virq); -+ -+ return (irq_data && irq_data->domain == domain) ? irq_data : NULL; -+} -+ -+static void irq_domain_check_hierarchy(struct irq_domain *domain) -+{ -+} -+#endif /* CONFIG_IRQ_DOMAIN_HIERARCHY */ -diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c -index 0a9104b..8069237 100644 ---- a/kernel/irq/manage.c -+++ b/kernel/irq/manage.c -@@ -183,6 +183,7 @@ int irq_do_set_affinity(struct irq_data *data, const struct cpumask *mask, - ret = chip->irq_set_affinity(data, mask, force); - switch (ret) { - case IRQ_SET_MASK_OK: -+ case IRQ_SET_MASK_OK_DONE: - cpumask_copy(data->affinity, mask); - case IRQ_SET_MASK_OK_NOCOPY: - irq_set_thread_affinity(desc); -@@ -600,6 +601,7 @@ int __irq_set_trigger(struct irq_desc *desc, unsigned int irq, - - switch (ret) { - case IRQ_SET_MASK_OK: -+ case IRQ_SET_MASK_OK_DONE: - irqd_clear(&desc->irq_data, IRQD_TRIGGER_MASK); - irqd_set(&desc->irq_data, flags); - -diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c -new file mode 100644 -index 0000000..2495ed0 ---- /dev/null -+++ b/kernel/irq/msi.c -@@ -0,0 +1,347 @@ -+/* -+ * linux/kernel/irq/msi.c -+ * -+ * Copyright (C) 2014 Intel Corp. -+ * Author: Jiang Liu -+ * -+ * This file is licensed under GPLv2. -+ * -+ * This file contains common code to support Message Signalled Interrupt for -+ * PCI compatible and non PCI compatible devices. -+ */ -+#include -+#include -+#include -+#include -+#include -+ -+/* Temparory solution for building, will be removed later */ -+#include -+ -+struct msi_desc *alloc_msi_entry(struct device *dev) -+{ -+ struct msi_desc *desc = kzalloc(sizeof(*desc), GFP_KERNEL); -+ if (!desc) -+ return NULL; -+ -+ INIT_LIST_HEAD(&desc->list); -+ desc->dev = dev; -+ -+ return desc; -+} -+ -+void free_msi_entry(struct msi_desc *entry) -+{ -+ kfree(entry); -+} -+ -+void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg) -+{ -+ *msg = entry->msg; -+} -+ -+void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg) -+{ -+ struct msi_desc *entry = irq_get_msi_desc(irq); -+ -+ __get_cached_msi_msg(entry, msg); -+} -+EXPORT_SYMBOL_GPL(get_cached_msi_msg); -+ -+#ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN -+static inline void irq_chip_write_msi_msg(struct irq_data *data, -+ struct msi_msg *msg) -+{ -+ data->chip->irq_write_msi_msg(data, msg); -+} -+ -+/** -+ * msi_domain_set_affinity - Generic affinity setter function for MSI domains -+ * @irq_data: The irq data associated to the interrupt -+ * @mask: The affinity mask to set -+ * @force: Flag to enforce setting (disable online checks) -+ * -+ * Intended to be used by MSI interrupt controllers which are -+ * implemented with hierarchical domains. -+ */ -+int msi_domain_set_affinity(struct irq_data *irq_data, -+ const struct cpumask *mask, bool force) -+{ -+ struct irq_data *parent = irq_data->parent_data; -+ struct msi_msg msg; -+ int ret; -+ -+ ret = parent->chip->irq_set_affinity(parent, mask, force); -+ if (ret >= 0 && ret != IRQ_SET_MASK_OK_DONE) { -+ BUG_ON(irq_chip_compose_msi_msg(irq_data, &msg)); -+ irq_chip_write_msi_msg(irq_data, &msg); -+ } -+ -+ return ret; -+} -+ -+static void msi_domain_activate(struct irq_domain *domain, -+ struct irq_data *irq_data) -+{ -+ struct msi_msg msg; -+ -+ BUG_ON(irq_chip_compose_msi_msg(irq_data, &msg)); -+ irq_chip_write_msi_msg(irq_data, &msg); -+} -+ -+static void msi_domain_deactivate(struct irq_domain *domain, -+ struct irq_data *irq_data) -+{ -+ struct msi_msg msg; -+ -+ memset(&msg, 0, sizeof(msg)); -+ irq_chip_write_msi_msg(irq_data, &msg); -+} -+ -+static int msi_domain_alloc(struct irq_domain *domain, unsigned int virq, -+ unsigned int nr_irqs, void *arg) -+{ -+ struct msi_domain_info *info = domain->host_data; -+ struct msi_domain_ops *ops = info->ops; -+ irq_hw_number_t hwirq = ops->get_hwirq(info, arg); -+ int i, ret; -+ -+ if (irq_find_mapping(domain, hwirq) > 0) -+ return -EEXIST; -+ -+ ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); -+ if (ret < 0) -+ return ret; -+ -+ for (i = 0; i < nr_irqs; i++) { -+ ret = ops->msi_init(domain, info, virq + i, hwirq + i, arg); -+ if (ret < 0) { -+ if (ops->msi_free) { -+ for (i--; i > 0; i--) -+ ops->msi_free(domain, info, virq + i); -+ } -+ irq_domain_free_irqs_top(domain, virq, nr_irqs); -+ return ret; -+ } -+ } -+ -+ return 0; -+} -+ -+static void msi_domain_free(struct irq_domain *domain, unsigned int virq, -+ unsigned int nr_irqs) -+{ -+ struct msi_domain_info *info = domain->host_data; -+ int i; -+ -+ if (info->ops->msi_free) { -+ for (i = 0; i < nr_irqs; i++) -+ info->ops->msi_free(domain, info, virq + i); -+ } -+ irq_domain_free_irqs_top(domain, virq, nr_irqs); -+} -+ -+static struct irq_domain_ops msi_domain_ops = { -+ .alloc = msi_domain_alloc, -+ .free = msi_domain_free, -+ .activate = msi_domain_activate, -+ .deactivate = msi_domain_deactivate, -+}; -+ -+#ifdef GENERIC_MSI_DOMAIN_OPS -+static irq_hw_number_t msi_domain_ops_get_hwirq(struct msi_domain_info *info, -+ msi_alloc_info_t *arg) -+{ -+ return arg->hwirq; -+} -+ -+static int msi_domain_ops_prepare(struct irq_domain *domain, struct device *dev, -+ int nvec, msi_alloc_info_t *arg) -+{ -+ memset(arg, 0, sizeof(*arg)); -+ return 0; -+} -+ -+static void msi_domain_ops_set_desc(msi_alloc_info_t *arg, -+ struct msi_desc *desc) -+{ -+ arg->desc = desc; -+} -+#else -+#define msi_domain_ops_get_hwirq NULL -+#define msi_domain_ops_prepare NULL -+#define msi_domain_ops_set_desc NULL -+#endif /* !GENERIC_MSI_DOMAIN_OPS */ -+ -+static int msi_domain_ops_init(struct irq_domain *domain, -+ struct msi_domain_info *info, -+ unsigned int virq, irq_hw_number_t hwirq, -+ msi_alloc_info_t *arg) -+{ -+ irq_domain_set_hwirq_and_chip(domain, virq, hwirq, info->chip, -+ info->chip_data); -+ if (info->handler && info->handler_name) { -+ __irq_set_handler(virq, info->handler, 0, info->handler_name); -+ if (info->handler_data) -+ irq_set_handler_data(virq, info->handler_data); -+ } -+ return 0; -+} -+ -+static int msi_domain_ops_check(struct irq_domain *domain, -+ struct msi_domain_info *info, -+ struct device *dev) -+{ -+ return 0; -+} -+ -+static struct msi_domain_ops msi_domain_ops_default = { -+ .get_hwirq = msi_domain_ops_get_hwirq, -+ .msi_init = msi_domain_ops_init, -+ .msi_check = msi_domain_ops_check, -+ .msi_prepare = msi_domain_ops_prepare, -+ .set_desc = msi_domain_ops_set_desc, -+}; -+ -+static void msi_domain_update_dom_ops(struct msi_domain_info *info) -+{ -+ struct msi_domain_ops *ops = info->ops; -+ -+ if (ops == NULL) { -+ info->ops = &msi_domain_ops_default; -+ return; -+ } -+ -+ if (ops->get_hwirq == NULL) -+ ops->get_hwirq = msi_domain_ops_default.get_hwirq; -+ if (ops->msi_init == NULL) -+ ops->msi_init = msi_domain_ops_default.msi_init; -+ if (ops->msi_check == NULL) -+ ops->msi_check = msi_domain_ops_default.msi_check; -+ if (ops->msi_prepare == NULL) -+ ops->msi_prepare = msi_domain_ops_default.msi_prepare; -+ if (ops->set_desc == NULL) -+ ops->set_desc = msi_domain_ops_default.set_desc; -+} -+ -+static void msi_domain_update_chip_ops(struct msi_domain_info *info) -+{ -+ struct irq_chip *chip = info->chip; -+ -+ BUG_ON(!chip); -+ if (!chip->irq_mask) -+ chip->irq_mask = pci_msi_mask_irq; -+ if (!chip->irq_unmask) -+ chip->irq_unmask = pci_msi_unmask_irq; -+ if (!chip->irq_set_affinity) -+ chip->irq_set_affinity = msi_domain_set_affinity; -+} -+ -+/** -+ * msi_create_irq_domain - Create a MSI interrupt domain -+ * @of_node: Optional device-tree node of the interrupt controller -+ * @info: MSI domain info -+ * @parent: Parent irq domain -+ */ -+struct irq_domain *msi_create_irq_domain(struct device_node *node, -+ struct msi_domain_info *info, -+ struct irq_domain *parent) -+{ -+ if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS) -+ msi_domain_update_dom_ops(info); -+ if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS) -+ msi_domain_update_chip_ops(info); -+ -+ return irq_domain_add_hierarchy(parent, 0, 0, node, &msi_domain_ops, -+ info); -+} -+ -+/** -+ * msi_domain_alloc_irqs - Allocate interrupts from a MSI interrupt domain -+ * @domain: The domain to allocate from -+ * @dev: Pointer to device struct of the device for which the interrupts -+ * are allocated -+ * @nvec: The number of interrupts to allocate -+ * -+ * Returns 0 on success or an error code. -+ */ -+int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev, -+ int nvec) -+{ -+ struct msi_domain_info *info = domain->host_data; -+ struct msi_domain_ops *ops = info->ops; -+ msi_alloc_info_t arg; -+ struct msi_desc *desc; -+ int i, ret, virq = -1; -+ -+ ret = ops->msi_check(domain, info, dev); -+ if (ret == 0) -+ ret = ops->msi_prepare(domain, dev, nvec, &arg); -+ if (ret) -+ return ret; -+ -+ for_each_msi_entry(desc, dev) { -+ ops->set_desc(&arg, desc); -+ if (info->flags & MSI_FLAG_IDENTITY_MAP) -+ virq = (int)ops->get_hwirq(info, &arg); -+ else -+ virq = -1; -+ -+ virq = __irq_domain_alloc_irqs(domain, virq, desc->nvec_used, -+ dev_to_node(dev), &arg, false); -+ if (virq < 0) { -+ ret = -ENOSPC; -+ if (ops->handle_error) -+ ret = ops->handle_error(domain, desc, ret); -+ if (ops->msi_finish) -+ ops->msi_finish(&arg, ret); -+ return ret; -+ } -+ -+ for (i = 0; i < desc->nvec_used; i++) -+ irq_set_msi_desc_off(virq, i, desc); -+ } -+ -+ if (ops->msi_finish) -+ ops->msi_finish(&arg, 0); -+ -+ for_each_msi_entry(desc, dev) { -+ if (desc->nvec_used == 1) -+ dev_dbg(dev, "irq %d for MSI\n", virq); -+ else -+ dev_dbg(dev, "irq [%d-%d] for MSI\n", -+ virq, virq + desc->nvec_used - 1); -+ } -+ -+ return 0; -+} -+ -+/** -+ * msi_domain_free_irqs - Free interrupts from a MSI interrupt @domain associated tp @dev -+ * @domain: The domain to managing the interrupts -+ * @dev: Pointer to device struct of the device for which the interrupts -+ * are free -+ */ -+void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev) -+{ -+ struct msi_desc *desc; -+ -+ for_each_msi_entry(desc, dev) { -+ irq_domain_free_irqs(desc->irq, desc->nvec_used); -+ desc->irq = 0; -+ } -+} -+ -+/** -+ * msi_get_domain_info - Get the MSI interrupt domain info for @domain -+ * @domain: The interrupt domain to retrieve data from -+ * -+ * Returns the pointer to the msi_domain_info stored in -+ * @domain->host_data. -+ */ -+struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain) -+{ -+ return (struct msi_domain_info *)domain->host_data; -+} -+ -+#endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */ -diff --git a/kernel/resource.c b/kernel/resource.c -index 0bcebff..19f2357 100644 ---- a/kernel/resource.c -+++ b/kernel/resource.c -@@ -22,6 +22,7 @@ - #include - #include - #include -+#include - #include - - -@@ -1529,6 +1530,30 @@ int iomem_is_exclusive(u64 addr) - return err; - } - -+struct resource_entry *resource_list_create_entry(struct resource *res, -+ size_t extra_size) -+{ -+ struct resource_entry *entry; -+ -+ entry = kzalloc(sizeof(*entry) + extra_size, GFP_KERNEL); -+ if (entry) { -+ INIT_LIST_HEAD(&entry->node); -+ entry->res = res ? res : &entry->__res; -+ } -+ -+ return entry; -+} -+EXPORT_SYMBOL(resource_list_create_entry); -+ -+void resource_list_free(struct list_head *head) -+{ -+ struct resource_entry *entry, *tmp; -+ -+ list_for_each_entry_safe(entry, tmp, head, node) -+ resource_list_destroy_entry(entry); -+} -+EXPORT_SYMBOL(resource_list_free); -+ - static int __init strict_iomem(char *str) - { - if (strstr(str, "relaxed")) -diff --git a/scripts/Kbuild.include b/scripts/Kbuild.include -index 65e7b08..5374b1b 100644 ---- a/scripts/Kbuild.include -+++ b/scripts/Kbuild.include -@@ -179,6 +179,12 @@ build := -f $(srctree)/scripts/Makefile.build obj - # $(Q)$(MAKE) $(modbuiltin)=dir - modbuiltin := -f $(srctree)/scripts/Makefile.modbuiltin obj - -+### -+# Shorthand for $(Q)$(MAKE) -f scripts/Makefile.dtbinst obj= -+# Usage: -+# $(Q)$(MAKE) $(dtbinst)=dir -+dtbinst := -f $(if $(KBUILD_SRC),$(srctree)/)scripts/Makefile.dtbinst obj -+ - # Prefix -I with $(srctree) if it is not an absolute path. - # skip if -I has no parameter - addtree = $(if $(patsubst -I%,%,$(1)), \ -diff --git a/scripts/Makefile.lib b/scripts/Makefile.lib -index 54be19a..5117552 100644 ---- a/scripts/Makefile.lib -+++ b/scripts/Makefile.lib -@@ -283,18 +283,6 @@ $(obj)/%.dtb: $(src)/%.dts FORCE - - dtc-tmp = $(subst $(comma),_,$(dot-target).dts.tmp) - --# Helper targets for Installing DTBs into the boot directory --quiet_cmd_dtb_install = INSTALL $< -- cmd_dtb_install = cp $< $(2) -- --_dtbinst_pre_: -- $(Q)if [ -d $(INSTALL_DTBS_PATH).old ]; then rm -rf $(INSTALL_DTBS_PATH).old; fi -- $(Q)if [ -d $(INSTALL_DTBS_PATH) ]; then mv $(INSTALL_DTBS_PATH) $(INSTALL_DTBS_PATH).old; fi -- $(Q)mkdir -p $(INSTALL_DTBS_PATH) -- --%.dtb_dtbinst_: $(obj)/%.dtb _dtbinst_pre_ -- $(call cmd,dtb_install,$(INSTALL_DTBS_PATH)) -- - # Bzip2 - # --------------------------------------------------------------------------- - --- -2.1.0.27.g96db324 - diff --git a/packages/base/any/kernels/3.18.25/patches/add-nxp-arm64-ls2088ardb-device-tree.patch b/packages/base/any/kernels/3.18.25/patches/add-nxp-arm64-ls2088ardb-device-tree.patch deleted file mode 100644 index 28aff876..00000000 --- a/packages/base/any/kernels/3.18.25/patches/add-nxp-arm64-ls2088ardb-device-tree.patch +++ /dev/null @@ -1,1116 +0,0 @@ -From 0b8911d6263d5b70d41fd741bcead8b68a48ed2b Mon Sep 17 00:00:00 2001 -From: Shengzhou Liu -Date: Wed, 24 Aug 2016 16:16:16 +0800 -Subject: [PATCH] add nxp arm64 ls2088ardb device tree - ---- - arch/arm64/boot/dts/arm64-nxp-ls2088ardb-r1.dts | 256 ++++++++ - arch/arm64/boot/dts/fsl-ls2088a.dtsi | 833 ++++++++++++++++++++++++ - 2 files changed, 1089 insertions(+) - create mode 100644 arch/arm64/boot/dts/arm64-nxp-ls2088ardb-r1.dts - create mode 100644 arch/arm64/boot/dts/fsl-ls2088a.dtsi - -diff --git a/arch/arm64/boot/dts/arm64-nxp-ls2088ardb-r1.dts b/arch/arm64/boot/dts/arm64-nxp-ls2088ardb-r1.dts -new file mode 100644 -index 0000000..3e72718 ---- /dev/null -+++ b/arch/arm64/boot/dts/arm64-nxp-ls2088ardb-r1.dts -@@ -0,0 +1,256 @@ -+/* -+ * Device Tree file for NXP LS2088a RDB board -+ * -+ * Copyright (C) 2016, Freescale Semiconductor -+ * -+ * This file is licensed under the terms of the GNU General Public -+ * License version 2. This program is licensed "as is" without any -+ * warranty of any kind, whether express or implied. -+ */ -+ -+/dts-v1/; -+ -+#include "fsl-ls2088a.dtsi" -+ -+/ { -+ model = "arm64-nxp-ls2088ardb-r1"; -+ compatible = "fsl,ls2088a-rdb", "fsl,ls2088a"; -+}; -+ -+&esdhc { -+ status = "okay"; -+}; -+ -+&ifc { -+ status = "okay"; -+ #address-cells = <2>; -+ #size-cells = <1>; -+ ranges = <0x0 0x0 0x5 0x80000000 0x08000000 -+ 0x2 0x0 0x5 0x30000000 0x00010000 -+ 0x3 0x0 0x5 0x20000000 0x00010000>; -+ -+ nor@0,0 { -+ #address-cells = <1>; -+ #size-cells = <1>; -+ compatible = "cfi-flash"; -+ reg = <0x0 0x0 0x8000000>; -+ bank-width = <2>; -+ device-width = <1>; -+ -+ partition@0 { -+ /* SoC RCW, this location must not be altered */ -+ reg = <0x0 0x100000>; -+ label = "rcw (RO)"; -+ read-only; -+ }; -+ -+ partition@1 { -+ /* U-Boot image */ -+ reg = <0x100000 0x100000>; -+ label = "uboot"; -+ }; -+ -+ partition@2 { -+ /* U-Boot environment varialbes, 1MB */ -+ reg = <0x200000 0x100000>; -+ label = "uboot-env"; -+ env_size = <0x20000>; -+ }; -+ -+ partition@3 { -+ /* MC firmware, 4MB*/ -+ reg = <0x300000 0x400000>; -+ label = "mc_firmware"; -+ }; -+ -+ partition@4 { -+ /* MC DPL Blob, 1MB */ -+ reg = <0x700000 0x100000>; -+ label = "mc_dpl_blob"; -+ }; -+ -+ partition@5 { -+ /* MC DPC Blob, 1MB */ -+ reg = <0x800000 0x100000>; -+ label = "mc_dpc_blob"; -+ }; -+ -+ partition@6 { -+ /* AIOP FW, 4MB */ -+ reg = <0x900000 0x400000>; -+ label = "aiop_fw"; -+ }; -+ -+ partition@7 { -+ /* DebugServerFW, 2MB */ -+ reg = <0xd00000 0x200000>; -+ label = "DebugServer_fw"; -+ }; -+ }; -+ -+ nand@2,0 { -+ #address-cells = <1>; -+ #size-cells = <1>; -+ compatible = "fsl,ifc-nand"; -+ reg = <0x2 0x0 0x10000>; -+ }; -+ -+ cpld@3,0 { -+ reg = <0x3 0x0 0x10000>; -+ compatible = "fsl,ls2088a-rdb-qixis", "fsl,fpga-qixis"; -+ }; -+}; -+ -+&ftm0 { -+ status = "okay"; -+}; -+ -+&i2c0 { -+ status = "okay"; -+ pca9547@75 { -+ compatible = "nxp,pca9547"; -+ reg = <0x75>; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ i2c-mux-never-disable; -+ i2c@1 { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x01>; -+ rtc@68 { -+ compatible = "dallas,ds3232"; -+ reg = <0x68>; -+ }; -+ }; -+ -+ i2c@3 { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x3>; -+ -+ adt7481@4c { -+ compatible = "adi,adt7461"; -+ reg = <0x4c>; -+ }; -+ }; -+ }; -+}; -+ -+&i2c1 { -+ status = "disabled"; -+}; -+ -+&i2c2 { -+ status = "disabled"; -+}; -+ -+&i2c3 { -+ status = "disabled"; -+}; -+ -+&dspi { -+ status = "okay"; -+ dflash0: n25q512a { -+ #address-cells = <1>; -+ #size-cells = <1>; -+ compatible = "st,m25p80"; -+ spi-max-frequency = <3000000>; -+ reg = <0>; -+ }; -+}; -+ -+&qspi { -+ status = "disabled"; -+}; -+ -+&sata0 { -+ status = "okay"; -+}; -+ -+&sata1 { -+ status = "okay"; -+}; -+ -+&usb0 { -+ status = "okay"; -+}; -+ -+&usb1 { -+ status = "okay"; -+}; -+ -+&emdio1 { -+ /* CS4340 PHYs */ -+ mdio1_phy1: emdio1_phy@1 { -+ reg = <0x10>; -+ phy-connection-type = "xfi"; -+ }; -+ mdio1_phy2: emdio1_phy@2 { -+ reg = <0x11>; -+ phy-connection-type = "xfi"; -+ }; -+ mdio1_phy3: emdio1_phy@3 { -+ reg = <0x12>; -+ phy-connection-type = "xfi"; -+ }; -+ mdio1_phy4: emdio1_phy@4 { -+ reg = <0x13>; -+ phy-connection-type = "xfi"; -+ }; -+}; -+ -+&emdio2 { -+ /* AQR405 PHYs */ -+ mdio2_phy1: emdio2_phy@1 { -+ compatible = "ethernet-phy-ieee802.3-c45"; -+ interrupts = <0 1 0x4>; /* Level high type */ -+ reg = <0x0>; -+ phy-connection-type = "xfi"; -+ }; -+ mdio2_phy2: emdio2_phy@2 { -+ compatible = "ethernet-phy-ieee802.3-c45"; -+ interrupts = <0 2 0x4>; /* Level high type */ -+ reg = <0x1>; -+ phy-connection-type = "xfi"; -+ }; -+ mdio2_phy3: emdio2_phy@3 { -+ compatible = "ethernet-phy-ieee802.3-c45"; -+ interrupts = <0 4 0x4>; /* Level high type */ -+ reg = <0x2>; -+ phy-connection-type = "xfi"; -+ }; -+ mdio2_phy4: emdio2_phy@4 { -+ compatible = "ethernet-phy-ieee802.3-c45"; -+ interrupts = <0 5 0x4>; /* Level high type */ -+ reg = <0x3>; -+ phy-connection-type = "xfi"; -+ }; -+}; -+ -+/* Update DPMAC connections to external PHYs, under the assumption of -+ * SerDes 0x2a_0x41. This is currently the only SerDes supported on the board. -+ */ -+&dpmac1 { -+ phy-handle = <&mdio1_phy1>; -+}; -+&dpmac2 { -+ phy-handle = <&mdio1_phy2>; -+}; -+&dpmac3 { -+ phy-handle = <&mdio1_phy3>; -+}; -+&dpmac4 { -+ phy-handle = <&mdio1_phy4>; -+}; -+&dpmac5 { -+ phy-handle = <&mdio2_phy1>; -+}; -+&dpmac6 { -+ phy-handle = <&mdio2_phy2>; -+}; -+&dpmac7 { -+ phy-handle = <&mdio2_phy3>; -+}; -+&dpmac8 { -+ phy-handle = <&mdio2_phy4>; -+}; -diff --git a/arch/arm64/boot/dts/fsl-ls2088a.dtsi b/arch/arm64/boot/dts/fsl-ls2088a.dtsi -new file mode 100644 -index 0000000..892d426 ---- /dev/null -+++ b/arch/arm64/boot/dts/fsl-ls2088a.dtsi -@@ -0,0 +1,833 @@ -+/* -+ * Device Tree Include file for Freescale Layerscape-2088A family SoC. -+ * -+ * Copyright (C) 2016, Freescale Semiconductor -+ * -+ * Abhimanyu Saini -+ * -+ * This file is dual-licensed: you can use it either under the terms -+ * of the GPLv2 or the X11 license, at your option. Note that this dual -+ * licensing only applies to this file, and not this project as a -+ * whole. -+ * -+ * a) This library is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License as -+ * published by the Free Software Foundation; either version 2 of the -+ * License, or (at your option) any later version. -+ * -+ * This library is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * Or, alternatively, -+ * -+ * b) Permission is hereby granted, free of charge, to any person -+ * obtaining a copy of this software and associated documentation -+ * files (the "Software"), to deal in the Software without -+ * restriction, including without limitation the rights to use, -+ * copy, modify, merge, publish, distribute, sublicense, and/or -+ * sell copies of the Software, and to permit persons to whom the -+ * Software is furnished to do so, subject to the following -+ * conditions: -+ * -+ * The above copyright notice and this permission notice shall be -+ * included in all copies or substantial portions of the Software. -+ * -+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, -+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES -+ * OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND -+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT -+ * HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, -+ * WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING -+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR -+ * OTHER DEALINGS IN THE SOFTWARE. -+ */ -+ -+#include -+ -+/memreserve/ 0x80000000 0x00010000; -+ -+/ { -+ compatible = "fsl,ls2088a"; -+ interrupt-parent = <&gic>; -+ #address-cells = <2>; -+ #size-cells = <2>; -+ -+ cpus { -+ #address-cells = <2>; -+ #size-cells = <0>; -+ -+ cpu0: cpu@0 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a72"; -+ reg = <0x0 0x0>; -+ clocks = <&clockgen 1 0>; -+ #cooling-cells = <2>; -+ }; -+ -+ cpu1: cpu@1 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a72"; -+ reg = <0x0 0x1>; -+ clocks = <&clockgen 1 0>; -+ }; -+ -+ cpu2: cpu@100 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a72"; -+ reg = <0x0 0x100>; -+ clocks = <&clockgen 1 1>; -+ #cooling-cells = <2>; -+ }; -+ -+ cpu3: cpu@101 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a72"; -+ reg = <0x0 0x101>; -+ clocks = <&clockgen 1 1>; -+ }; -+ -+ cpu4: cpu@200 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a72"; -+ reg = <0x0 0x200>; -+ clocks = <&clockgen 1 2>; -+ #cooling-cells = <2>; -+ }; -+ -+ cpu5: cpu@201 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a72"; -+ reg = <0x0 0x201>; -+ clocks = <&clockgen 1 2>; -+ }; -+ -+ cpu6: cpu@300 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a72"; -+ reg = <0x0 0x300>; -+ clocks = <&clockgen 1 3>; -+ #cooling-cells = <2>; -+ }; -+ -+ cpu7: cpu@301 { -+ device_type = "cpu"; -+ compatible = "arm,cortex-a72"; -+ reg = <0x0 0x301>; -+ clocks = <&clockgen 1 3>; -+ }; -+ }; -+ -+ pmu { -+ compatible = "arm,armv8-pmuv3"; -+ interrupts = <1 7 0x8>; /* PMU PPI, Level low type */ -+ }; -+ -+ gic: interrupt-controller@6000000 { -+ compatible = "arm,gic-v3"; -+ reg = <0x0 0x06000000 0 0x10000>, /* GIC Dist */ -+ <0x0 0x06100000 0 0x100000>, /* GICR (RD_base + SGI_base) */ -+ <0x0 0x0c0c0000 0 0x2000>, /* GICC */ -+ <0x0 0x0c0d0000 0 0x1000>, /* GICH */ -+ <0x0 0x0c0e0000 0 0x20000>; /* GICV */ -+ #interrupt-cells = <3>; -+ #address-cells = <2>; -+ #size-cells = <2>; -+ ranges; -+ interrupt-controller; -+ interrupts = <1 9 0x4>; -+ -+ its: gic-its@6020000 { -+ compatible = "arm,gic-v3-its"; -+ msi-controller; -+ reg = <0x0 0x6020000 0 0x20000>; -+ }; -+ }; -+ -+ sysclk: sysclk { -+ compatible = "fixed-clock"; -+ #clock-cells = <0>; -+ clock-frequency = <100000000>; -+ clock-output-names = "sysclk"; -+ }; -+ -+ clockgen: clocking@1300000 { -+ compatible = "fsl,ls2088a-clockgen"; -+ reg = <0 0x1300000 0 0xa0000>; -+ #clock-cells = <2>; -+ clocks = <&sysclk>; -+ }; -+ -+ tmu: tmu@1f80000 { -+ compatible = "fsl,qoriq-tmu", "fsl,ls2080a-tmu", "fsl,ls2088a-tmu"; -+ reg = <0x0 0x1f80000 0x0 0x10000>; -+ interrupts = <0 23 0x4>; -+ fsl,tmu-range = <0xb0000 0x9002a 0x6004c 0x30062>; -+ fsl,tmu-calibration = <0x00000000 0x00000026 -+ 0x00000001 0x0000002d -+ 0x00000002 0x00000032 -+ 0x00000003 0x00000039 -+ 0x00000004 0x0000003f -+ 0x00000005 0x00000046 -+ 0x00000006 0x0000004d -+ 0x00000007 0x00000054 -+ 0x00000008 0x0000005a -+ 0x00000009 0x00000061 -+ 0x0000000a 0x0000006a -+ 0x0000000b 0x00000071 -+ -+ 0x00010000 0x00000025 -+ 0x00010001 0x0000002c -+ 0x00010002 0x00000035 -+ 0x00010003 0x0000003d -+ 0x00010004 0x00000045 -+ 0x00010005 0x0000004e -+ 0x00010006 0x00000057 -+ 0x00010007 0x00000061 -+ 0x00010008 0x0000006b -+ 0x00010009 0x00000076 -+ -+ 0x00020000 0x00000029 -+ 0x00020001 0x00000033 -+ 0x00020002 0x0000003d -+ 0x00020003 0x00000049 -+ 0x00020004 0x00000056 -+ 0x00020005 0x00000061 -+ 0x00020006 0x0000006d -+ -+ 0x00030000 0x00000021 -+ 0x00030001 0x0000002a -+ 0x00030002 0x0000003c -+ 0x00030003 0x0000004e>; -+ little-endian; -+ #thermal-sensor-cells = <1>; -+ }; -+ -+ thermal-zones { -+ cpu_thermal: cpu-thermal { -+ polling-delay-passive = <1000>; -+ polling-delay = <5000>; -+ -+ thermal-sensors = <&tmu 4>; -+ -+ trips { -+ cpu_alert: cpu-alert { -+ temperature = <75000>; -+ hysteresis = <2000>; -+ type = "passive"; -+ }; -+ cpu_crit: cpu-crit { -+ temperature = <85000>; -+ hysteresis = <2000>; -+ type = "critical"; -+ }; -+ }; -+ -+ cooling-maps { -+ map0 { -+ trip = <&cpu_alert>; -+ cooling-device = -+ <&cpu0 THERMAL_NO_LIMIT -+ THERMAL_NO_LIMIT>; -+ }; -+ map1 { -+ trip = <&cpu_alert>; -+ cooling-device = -+ <&cpu2 THERMAL_NO_LIMIT -+ THERMAL_NO_LIMIT>; -+ }; -+ map2 { -+ trip = <&cpu_alert>; -+ cooling-device = -+ <&cpu4 THERMAL_NO_LIMIT -+ THERMAL_NO_LIMIT>; -+ }; -+ map3 { -+ trip = <&cpu_alert>; -+ cooling-device = -+ <&cpu6 THERMAL_NO_LIMIT -+ THERMAL_NO_LIMIT>; -+ }; -+ }; -+ }; -+ }; -+ -+ serial0: serial@21c0500 { -+ device_type = "serial"; -+ compatible = "fsl,ns16550", "ns16550a"; -+ reg = <0x0 0x21c0500 0x0 0x100>; -+ clocks = <&clockgen 4 3>; -+ interrupts = <0 32 0x4>; /* Level high type */ -+ }; -+ -+ serial1: serial@21c0600 { -+ device_type = "serial"; -+ compatible = "fsl,ns16550", "ns16550a"; -+ reg = <0x0 0x21c0600 0x0 0x100>; -+ clocks = <&clockgen 4 3>; -+ interrupts = <0 32 0x4>; /* Level high type */ -+ }; -+ cluster1_core0_watchdog: wdt@c000000 { -+ compatible = "arm,sp805-wdt", "arm,primecell"; -+ reg = <0x0 0xc000000 0x0 0x1000>; -+ clocks = <&clockgen 4 3>, <&clockgen 4 3>; -+ clock-names = "apb_pclk", "wdog_clk"; -+ }; -+ -+ cluster1_core1_watchdog: wdt@c010000 { -+ compatible = "arm,sp805-wdt", "arm,primecell"; -+ reg = <0x0 0xc010000 0x0 0x1000>; -+ clocks = <&clockgen 4 3>, <&clockgen 4 3>; -+ clock-names = "apb_pclk", "wdog_clk"; -+ }; -+ -+ cluster2_core0_watchdog: wdt@c100000 { -+ compatible = "arm,sp805-wdt", "arm,primecell"; -+ reg = <0x0 0xc100000 0x0 0x1000>; -+ clocks = <&clockgen 4 3>, <&clockgen 4 3>; -+ clock-names = "apb_pclk", "wdog_clk"; -+ }; -+ -+ cluster2_core1_watchdog: wdt@c110000 { -+ compatible = "arm,sp805-wdt", "arm,primecell"; -+ reg = <0x0 0xc110000 0x0 0x1000>; -+ clocks = <&clockgen 4 3>, <&clockgen 4 3>; -+ clock-names = "apb_pclk", "wdog_clk"; -+ }; -+ -+ cluster3_core0_watchdog: wdt@c200000 { -+ compatible = "arm,sp805-wdt", "arm,primecell"; -+ reg = <0x0 0xc200000 0x0 0x1000>; -+ clocks = <&clockgen 4 3>, <&clockgen 4 3>; -+ clock-names = "apb_pclk", "wdog_clk"; -+ }; -+ -+ cluster3_core1_watchdog: wdt@c210000 { -+ compatible = "arm,sp805-wdt", "arm,primecell"; -+ reg = <0x0 0xc210000 0x0 0x1000>; -+ clocks = <&clockgen 4 3>, <&clockgen 4 3>; -+ clock-names = "apb_pclk", "wdog_clk"; -+ }; -+ -+ cluster4_core0_watchdog: wdt@c300000 { -+ compatible = "arm,sp805-wdt", "arm,primecell"; -+ reg = <0x0 0xc300000 0x0 0x1000>; -+ clocks = <&clockgen 4 3>, <&clockgen 4 3>; -+ clock-names = "apb_pclk", "wdog_clk"; -+ }; -+ -+ cluster4_core1_watchdog: wdt@c310000 { -+ compatible = "arm,sp805-wdt", "arm,primecell"; -+ reg = <0x0 0xc310000 0x0 0x1000>; -+ clocks = <&clockgen 4 3>, <&clockgen 4 3>; -+ clock-names = "apb_pclk", "wdog_clk"; -+ }; -+ -+ gpio0: gpio@2300000 { -+ compatible = "fsl,qoriq-gpio"; -+ reg = <0x0 0x2300000 0x0 0x10000>; -+ interrupts = <0 36 0x4>; /* Level high type */ -+ gpio-controller; -+ little-endian; -+ #gpio-cells = <2>; -+ interrupt-controller; -+ #interrupt-cells = <2>; -+ }; -+ -+ gpio1: gpio@2310000 { -+ compatible = "fsl,qoriq-gpio"; -+ reg = <0x0 0x2310000 0x0 0x10000>; -+ interrupts = <0 36 0x4>; /* Level high type */ -+ gpio-controller; -+ little-endian; -+ #gpio-cells = <2>; -+ interrupt-controller; -+ #interrupt-cells = <2>; -+ }; -+ -+ gpio2: gpio@2320000 { -+ compatible = "fsl,qoriq-gpio"; -+ reg = <0x0 0x2320000 0x0 0x10000>; -+ interrupts = <0 37 0x4>; /* Level high type */ -+ gpio-controller; -+ little-endian; -+ #gpio-cells = <2>; -+ interrupt-controller; -+ #interrupt-cells = <2>; -+ }; -+ -+ gpio3: gpio@2330000 { -+ compatible = "fsl,qoriq-gpio"; -+ reg = <0x0 0x2330000 0x0 0x10000>; -+ interrupts = <0 37 0x4>; /* Level high type */ -+ gpio-controller; -+ little-endian; -+ #gpio-cells = <2>; -+ interrupt-controller; -+ #interrupt-cells = <2>; -+ }; -+ -+ /* TODO: WRIOP (CCSR?) */ -+ emdio1: mdio@0x8B96000 { /* WRIOP0: 0x8B8_0000, E-MDIO1: 0x1_6000 */ -+ compatible = "fsl,fman-memac-mdio"; -+ reg = <0x0 0x8B96000 0x0 0x1000>; -+ device_type = "mdio"; /* TODO: is this necessary? */ -+ little-endian; /* force the driver in LE mode */ -+ -+ /* Not necessary on the QDS, but needed on the RDB */ -+ #address-cells = <1>; -+ #size-cells = <0>; -+ }; -+ -+ emdio2: mdio@0x8B97000 { /* WRIOP0: 0x8B8_0000, E-MDIO2: 0x1_7000 */ -+ compatible = "fsl,fman-memac-mdio"; -+ reg = <0x0 0x8B97000 0x0 0x1000>; -+ device_type = "mdio"; /* TODO: is this necessary? */ -+ little-endian; /* force the driver in LE mode */ -+ -+ #address-cells = <1>; -+ #size-cells = <0>; -+ }; -+ -+ ifc: ifc@2240000 { -+ compatible = "fsl,ifc", "simple-bus"; -+ reg = <0x0 0x2240000 0x0 0x20000>; -+ interrupts = <0 21 0x4>; /* Level high type */ -+ little-endian; -+ #address-cells = <2>; -+ #size-cells = <1>; -+ -+ ranges = <0 0 0x5 0x80000000 0x08000000 -+ 2 0 0x5 0x30000000 0x00010000 -+ 3 0 0x5 0x20000000 0x00010000>; -+ }; -+ -+ esdhc: esdhc@2140000 { -+ compatible = "fsl,ls2088a-esdhc", "fsl,ls2080a-esdhc", -+ "fsl,esdhc"; -+ reg = <0x0 0x2140000 0x0 0x10000>; -+ interrupts = <0 28 0x4>; /* Level high type */ -+ clock-frequency = <0>; -+ voltage-ranges = <1800 1800 3300 3300>; -+ sdhci,auto-cmd12; -+ little-endian; -+ bus-width = <4>; -+ }; -+ -+ ftm0: ftm0@2800000 { -+ compatible = "fsl,ftm-alarm"; -+ reg = <0x0 0x2800000 0x0 0x10000>; -+ interrupts = <0 44 4>; -+ }; -+ -+ reset: reset@1E60000 { -+ compatible = "fsl,ls-reset"; -+ reg = <0x0 0x1E60000 0x0 0x10000>; -+ }; -+ -+ dspi: dspi@2100000 { -+ compatible = "fsl,ls2088a-dspi", "fsl,ls2085a-dspi", -+ "fsl,ls2080a-dspi"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x2100000 0x0 0x10000>; -+ interrupts = <0 26 0x4>; /* Level high type */ -+ clocks = <&clockgen 4 3>; -+ clock-names = "dspi"; -+ spi-num-chipselects = <5>; -+ bus-num = <0>; -+ }; -+ -+ i2c0: i2c@2000000 { -+ compatible = "fsl,vf610-i2c"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x2000000 0x0 0x10000>; -+ interrupts = <0 34 0x4>; /* Level high type */ -+ clock-names = "i2c"; -+ clocks = <&clockgen 4 3>; -+ }; -+ -+ i2c1: i2c@2010000 { -+ compatible = "fsl,vf610-i2c"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x2010000 0x0 0x10000>; -+ interrupts = <0 34 0x4>; /* Level high type */ -+ clock-names = "i2c"; -+ clocks = <&clockgen 4 3>; -+ }; -+ -+ i2c2: i2c@2020000 { -+ compatible = "fsl,vf610-i2c"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x2020000 0x0 0x10000>; -+ interrupts = <0 35 0x4>; /* Level high type */ -+ clock-names = "i2c"; -+ clocks = <&clockgen 4 3>; -+ }; -+ -+ i2c3: i2c@2030000 { -+ compatible = "fsl,vf610-i2c"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x2030000 0x0 0x10000>; -+ interrupts = <0 35 0x4>; /* Level high type */ -+ clock-names = "i2c"; -+ clocks = <&clockgen 4 3>; -+ }; -+ -+ qspi: quadspi@20c0000 { -+ compatible = "fsl,ls2088a-qspi", "fsl,ls2080a-qspi"; -+ #address-cells = <1>; -+ #size-cells = <0>; -+ reg = <0x0 0x20c0000 0x0 0x10000>, -+ <0x0 0x20000000 0x0 0x10000000>; -+ reg-names = "QuadSPI", "QuadSPI-memory"; -+ interrupts = <0 25 0x4>; /* Level high type */ -+ clocks = <&clockgen 4 3>, <&clockgen 4 3>; -+ clock-names = "qspi_en", "qspi"; -+ }; -+ -+ pcie1: pcie@3400000 { -+ compatible = "fsl,ls2088a-pcie", "fsl,ls2080a-pcie", -+ "fsl,ls2085a-pcie", "snps,dw-pcie"; -+ reg = <0x00 0x03400000 0x0 0x00100000 /* controller registers */ -+ 0x20 0x00000000 0x0 0x00001000>; /* configuration space */ -+ reg-names = "regs", "config"; -+ interrupts = <0 108 0x4>; /* Level high type */ -+ interrupt-names = "aer"; -+ #address-cells = <3>; -+ #size-cells = <2>; -+ device_type = "pci"; -+ dma-coherent; -+ fsl,lut_diff; -+ num-lanes = <4>; -+ bus-range = <0x0 0xff>; -+ ranges = <0x81000000 0x0 0x00000000 0x20 0x00010000 0x0 0x00010000 /* downstream I/O */ -+ 0x82000000 0x0 0x40000000 0x20 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ -+ msi-parent = <&its>; -+ #interrupt-cells = <1>; -+ interrupt-map-mask = <0 0 0 7>; -+ interrupt-map = <0000 0 0 1 &gic 0 0 0 109 4>, -+ <0000 0 0 2 &gic 0 0 0 110 4>, -+ <0000 0 0 3 &gic 0 0 0 111 4>, -+ <0000 0 0 4 &gic 0 0 0 112 4>; -+ }; -+ -+ pcie2: pcie@3500000 { -+ compatible = "fsl,ls2080a-pcie", "fsl,ls2080a-pcie", -+ "fsl,ls2085a-pcie", "snps,dw-pcie"; -+ reg = <0x00 0x03500000 0x0 0x00100000 /* controller registers */ -+ 0x28 0x00000000 0x0 0x00001000>; /* configuration space */ -+ reg-names = "regs", "config"; -+ interrupts = <0 113 0x4>; /* Level high type */ -+ interrupt-names = "aer"; -+ #address-cells = <3>; -+ #size-cells = <2>; -+ device_type = "pci"; -+ dma-coherent; -+ fsl,lut_diff; -+ num-lanes = <4>; -+ bus-range = <0x0 0xff>; -+ ranges = <0x81000000 0x0 0x00000000 0x28 0x00010000 0x0 0x00010000 /* downstream I/O */ -+ 0x82000000 0x0 0x40000000 0x28 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ -+ msi-parent = <&its>; -+ #interrupt-cells = <1>; -+ interrupt-map-mask = <0 0 0 7>; -+ interrupt-map = <0000 0 0 1 &gic 0 0 0 114 4>, -+ <0000 0 0 2 &gic 0 0 0 115 4>, -+ <0000 0 0 3 &gic 0 0 0 116 4>, -+ <0000 0 0 4 &gic 0 0 0 117 4>; -+ }; -+ -+ pcie3: pcie@3600000 { -+ compatible = "fsl,ls2088a-pcie", "fsl,ls2080a-pcie", -+ "fsl,ls2085a-pcie", "snps,dw-pcie"; -+ reg = <0x00 0x03600000 0x0 0x00100000 /* controller registers */ -+ 0x30 0x00000000 0x0 0x00001000>; /* configuration space */ -+ reg-names = "regs", "config"; -+ interrupts = <0 118 0x4>; /* Level high type */ -+ interrupt-names = "aer"; -+ #address-cells = <3>; -+ #size-cells = <2>; -+ device_type = "pci"; -+ dma-coherent; -+ fsl,lut_diff; -+ num-lanes = <8>; -+ bus-range = <0x0 0xff>; -+ ranges = <0x81000000 0x0 0x00000000 0x30 0x00010000 0x0 0x00010000 /* downstream I/O */ -+ 0x82000000 0x0 0x40000000 0x30 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ -+ msi-parent = <&its>; -+ #interrupt-cells = <1>; -+ interrupt-map-mask = <0 0 0 7>; -+ interrupt-map = <0000 0 0 1 &gic 0 0 0 119 4>, -+ <0000 0 0 2 &gic 0 0 0 120 4>, -+ <0000 0 0 3 &gic 0 0 0 121 4>, -+ <0000 0 0 4 &gic 0 0 0 122 4>; -+ }; -+ -+ pcie4: pcie@3700000 { -+ compatible = "fsl,ls2080a-pcie", "fsl,ls2080a-pcie", -+ "fsl,ls2085a-pcie", "snps,dw-pcie"; -+ reg = <0x00 0x03700000 0x0 0x00100000 /* controller registers */ -+ 0x38 0x00000000 0x0 0x00001000>; /* configuration space */ -+ reg-names = "regs", "config"; -+ interrupts = <0 123 0x4>; /* Level high type */ -+ interrupt-names = "aer"; -+ #address-cells = <3>; -+ #size-cells = <2>; -+ device_type = "pci"; -+ dma-coherent; -+ fsl,lut_diff; -+ num-lanes = <4>; -+ bus-range = <0x0 0xff>; -+ ranges = <0x81000000 0x0 0x00000000 0x38 0x00010000 0x0 0x00010000 /* downstream I/O */ -+ 0x82000000 0x0 0x40000000 0x38 0x40000000 0x0 0x40000000>; /* non-prefetchable memory */ -+ msi-parent = <&its>; -+ #interrupt-cells = <1>; -+ interrupt-map-mask = <0 0 0 7>; -+ interrupt-map = <0000 0 0 1 &gic 0 0 0 124 4>, -+ <0000 0 0 2 &gic 0 0 0 125 4>, -+ <0000 0 0 3 &gic 0 0 0 126 4>, -+ <0000 0 0 4 &gic 0 0 0 127 4>; -+ }; -+ -+ sata0: sata@3200000 { -+ status = "disabled"; -+ compatible = "fsl,ls2088a-ahci", "fsl,ls2080a-ahci"; -+ reg = <0x0 0x3200000 0x0 0x10000>; -+ interrupts = <0 133 0x4>; /* Level high type */ -+ clocks = <&clockgen 4 3>; -+ }; -+ -+ sata1: sata@3210000 { -+ status = "disabled"; -+ compatible = "fsl,ls2088a-ahci", "fsl,ls2080a-ahci"; -+ reg = <0x0 0x3210000 0x0 0x10000>; -+ interrupts = <0 136 0x4>; /* Level high type */ -+ clocks = <&clockgen 4 3>; -+ }; -+ -+ usb0: usb3@3100000 { -+ status = "disabled"; -+ compatible = "snps,dwc3"; -+ reg = <0x0 0x3100000 0x0 0x10000>; -+ interrupts = <0 80 0x4>; /* Level high type */ -+ dr_mode = "host"; -+ configure-gfladj; -+ snps,dis_rxdet_inp3_quirk; -+ }; -+ -+ usb1: usb3@3110000 { -+ status = "disabled"; -+ compatible = "snps,dwc3"; -+ reg = <0x0 0x3110000 0x0 0x10000>; -+ interrupts = <0 81 0x4>; /* Level high type */ -+ dr_mode = "host"; -+ configure-gfladj; -+ snps,dis_rxdet_inp3_quirk; -+ }; -+ -+ smmu: iommu@5000000 { -+ compatible = "arm,mmu-500"; -+ reg = <0 0x5000000 0 0x800000>; -+ #global-interrupts = <12>; -+ interrupts = <0 13 4>, /* global secure fault */ -+ <0 14 4>, /* combined secure interrupt */ -+ <0 15 4>, /* global non-secure fault */ -+ <0 16 4>, /* combined non-secure interrupt */ -+ /* performance counter interrupts 0-7 */ -+ <0 211 4>, -+ <0 212 4>, -+ <0 213 4>, -+ <0 214 4>, -+ <0 215 4>, -+ <0 216 4>, -+ <0 217 4>, -+ <0 218 4>, -+ /* per context interrupt, 64 interrupts */ -+ <0 146 4>, -+ <0 147 4>, -+ <0 148 4>, -+ <0 149 4>, -+ <0 150 4>, -+ <0 151 4>, -+ <0 152 4>, -+ <0 153 4>, -+ <0 154 4>, -+ <0 155 4>, -+ <0 156 4>, -+ <0 157 4>, -+ <0 158 4>, -+ <0 159 4>, -+ <0 160 4>, -+ <0 161 4>, -+ <0 162 4>, -+ <0 163 4>, -+ <0 164 4>, -+ <0 165 4>, -+ <0 166 4>, -+ <0 167 4>, -+ <0 168 4>, -+ <0 169 4>, -+ <0 170 4>, -+ <0 171 4>, -+ <0 172 4>, -+ <0 173 4>, -+ <0 174 4>, -+ <0 175 4>, -+ <0 176 4>, -+ <0 177 4>, -+ <0 178 4>, -+ <0 179 4>, -+ <0 180 4>, -+ <0 181 4>, -+ <0 182 4>, -+ <0 183 4>, -+ <0 184 4>, -+ <0 185 4>, -+ <0 186 4>, -+ <0 187 4>, -+ <0 188 4>, -+ <0 189 4>, -+ <0 190 4>, -+ <0 191 4>, -+ <0 192 4>, -+ <0 193 4>, -+ <0 194 4>, -+ <0 195 4>, -+ <0 196 4>, -+ <0 197 4>, -+ <0 198 4>, -+ <0 199 4>, -+ <0 200 4>, -+ <0 201 4>, -+ <0 202 4>, -+ <0 203 4>, -+ <0 204 4>, -+ <0 205 4>, -+ <0 206 4>, -+ <0 207 4>, -+ <0 208 4>, -+ <0 209 4>; -+ mmu-masters = <&fsl_mc 0x300 0>; -+ }; -+ -+ timer { -+ compatible = "arm,armv8-timer"; -+ interrupts = <1 13 0x1>, /* Physical Secure PPI, edge triggered */ -+ <1 14 0x1>, /* Physical Non-Secure PPI, edge triggered */ -+ <1 11 0x1>, /* Virtual PPI, edge triggered */ -+ <1 10 0x1>; /* Hypervisor PPI, edge triggered */ -+ arm,reread-timer; -+ fsl,erratum-a008585; -+ }; -+ -+ fsl_mc: fsl-mc@80c000000 { -+ compatible = "fsl,qoriq-mc"; -+ #stream-id-cells = <2>; -+ reg = <0x00000008 0x0c000000 0 0x40>, /* MC portal base */ -+ <0x00000000 0x08340000 0 0x40000>; /* MC control reg */ -+ msi-parent = <&its>; -+ #address-cells = <3>; -+ #size-cells = <1>; -+ -+ /* -+ * Region type 0x0 - MC portals -+ * Region type 0x1 - QBMAN portals -+ */ -+ ranges = <0x0 0x0 0x0 0x8 0x0c000000 0x4000000 -+ 0x1 0x0 0x0 0x8 0x18000000 0x8000000>; -+ -+ /* -+ * Define the maximum number of MACs present on the SoC. -+ * They won't necessarily be all probed, since the -+ * Data Path Layout file and the MC firmware can put fewer -+ * actual DPMAC objects on the MC bus. -+ */ -+ dpmacs { -+ #address-cells = <1>; -+ #size-cells = <0>; -+ -+ dpmac1: dpmac@1 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <1>; -+ }; -+ dpmac2: dpmac@2 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <2>; -+ }; -+ dpmac3: dpmac@3 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <3>; -+ }; -+ dpmac4: dpmac@4 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <4>; -+ }; -+ dpmac5: dpmac@5 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <5>; -+ }; -+ dpmac6: dpmac@6 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <6>; -+ }; -+ dpmac7: dpmac@7 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <7>; -+ }; -+ dpmac8: dpmac@8 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <8>; -+ }; -+ dpmac9: dpmac@9 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <9>; -+ }; -+ dpmac10: dpmac@10 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <0xa>; -+ }; -+ dpmac11: dpmac@11 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <0xb>; -+ }; -+ dpmac12: dpmac@12 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <0xc>; -+ }; -+ dpmac13: dpmac@13 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <0xd>; -+ }; -+ dpmac14: dpmac@14 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <0xe>; -+ }; -+ dpmac15: dpmac@15 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <0xf>; -+ }; -+ dpmac16: dpmac@16 { -+ compatible = "fsl,qoriq-mc-dpmac"; -+ reg = <0x10>; -+ }; -+ }; -+ }; -+ -+ ccn@4000000 { -+ compatible = "arm,ccn-504"; -+ reg = <0x0 0x04000000 0x0 0x01000000>; -+ interrupts = <0 12 4>; -+ }; -+ -+ memory@80000000 { -+ device_type = "memory"; -+ reg = <0x00000000 0x80000000 0 0x80000000>; -+ /* DRAM space 1 - 2 GB DRAM */ -+ }; -+}; --- -2.1.0.27.g96db324 - diff --git a/packages/base/any/kernels/3.18.25/patches/aufs.patch b/packages/base/any/kernels/3.18.25/patches/aufs.patch deleted file mode 100644 index b0dd85d1..00000000 --- a/packages/base/any/kernels/3.18.25/patches/aufs.patch +++ /dev/null @@ -1,33877 +0,0 @@ -From 9da2de12351d9d38412a0074d49efaf3c5bf6f3f Mon Sep 17 00:00:00 2001 -From: Steven Noble -Date: Thu, 12 May 2016 21:42:28 +0000 -Subject: [PATCH] AUFS patches applied - ---- - MAINTAINERS | 14 + - drivers/block/loop.c | 18 + - fs/Kconfig | 1 + - fs/Makefile | 1 + - fs/aufs/Kconfig | 185 ++++ - fs/aufs/Makefile | 44 + - fs/aufs/aufs.h | 59 ++ - fs/aufs/branch.c | 1402 ++++++++++++++++++++++++++++++ - fs/aufs/branch.h | 279 ++++++ - fs/aufs/conf.mk | 38 + - fs/aufs/cpup.c | 1368 +++++++++++++++++++++++++++++ - fs/aufs/cpup.h | 94 ++ - fs/aufs/dbgaufs.c | 432 +++++++++ - fs/aufs/dbgaufs.h | 48 + - fs/aufs/dcsub.c | 224 +++++ - fs/aufs/dcsub.h | 123 +++ - fs/aufs/debug.c | 436 ++++++++++ - fs/aufs/debug.h | 228 +++++ - fs/aufs/dentry.c | 1129 ++++++++++++++++++++++++ - fs/aufs/dentry.h | 234 +++++ - fs/aufs/dinfo.c | 544 ++++++++++++ - fs/aufs/dir.c | 756 ++++++++++++++++ - fs/aufs/dir.h | 131 +++ - fs/aufs/dynop.c | 379 ++++++++ - fs/aufs/dynop.h | 76 ++ - fs/aufs/export.c | 831 ++++++++++++++++++ - fs/aufs/f_op.c | 781 +++++++++++++++++ - fs/aufs/fhsm.c | 426 +++++++++ - fs/aufs/file.c | 857 ++++++++++++++++++ - fs/aufs/file.h | 291 +++++++ - fs/aufs/finfo.c | 156 ++++ - fs/aufs/fstype.h | 400 +++++++++ - fs/aufs/hfsnotify.c | 288 ++++++ - fs/aufs/hfsplus.c | 56 ++ - fs/aufs/hnotify.c | 714 +++++++++++++++ - fs/aufs/i_op.c | 1460 +++++++++++++++++++++++++++++++ - fs/aufs/i_op_add.c | 930 ++++++++++++++++++++ - fs/aufs/i_op_del.c | 506 +++++++++++ - fs/aufs/i_op_ren.c | 1013 ++++++++++++++++++++++ - fs/aufs/iinfo.c | 277 ++++++ - fs/aufs/inode.c | 522 +++++++++++ - fs/aufs/inode.h | 686 +++++++++++++++ - fs/aufs/ioctl.c | 219 +++++ - fs/aufs/loop.c | 146 ++++ - fs/aufs/loop.h | 52 ++ - fs/aufs/magic.mk | 30 + - fs/aufs/module.c | 222 +++++ - fs/aufs/module.h | 105 +++ - fs/aufs/mvdown.c | 703 +++++++++++++++ - fs/aufs/opts.c | 1878 ++++++++++++++++++++++++++++++++++++++++ - fs/aufs/opts.h | 212 +++++ - fs/aufs/plink.c | 506 +++++++++++ - fs/aufs/poll.c | 52 ++ - fs/aufs/posix_acl.c | 98 +++ - fs/aufs/procfs.c | 169 ++++ - fs/aufs/rdu.c | 388 +++++++++ - fs/aufs/rwsem.h | 191 ++++ - fs/aufs/sbinfo.c | 348 ++++++++ - fs/aufs/spl.h | 111 +++ - fs/aufs/super.c | 1041 ++++++++++++++++++++++ - fs/aufs/super.h | 626 ++++++++++++++ - fs/aufs/sysaufs.c | 104 +++ - fs/aufs/sysaufs.h | 101 +++ - fs/aufs/sysfs.c | 376 ++++++++ - fs/aufs/sysrq.c | 157 ++++ - fs/aufs/vdir.c | 888 +++++++++++++++++++ - fs/aufs/vfsub.c | 864 ++++++++++++++++++ - fs/aufs/vfsub.h | 315 +++++++ - fs/aufs/wbr_policy.c | 765 ++++++++++++++++ - fs/aufs/whout.c | 1061 +++++++++++++++++++++++ - fs/aufs/whout.h | 85 ++ - fs/aufs/wkq.c | 213 +++++ - fs/aufs/wkq.h | 91 ++ - fs/aufs/xattr.c | 344 ++++++++ - fs/aufs/xino.c | 1343 ++++++++++++++++++++++++++++ - fs/buffer.c | 2 +- - fs/dcache.c | 2 +- - fs/fcntl.c | 4 +- - fs/inode.c | 2 +- - fs/proc/base.c | 2 +- - fs/proc/nommu.c | 5 +- - fs/proc/task_mmu.c | 7 +- - fs/proc/task_nommu.c | 5 +- - fs/splice.c | 10 +- - include/linux/file.h | 1 + - include/linux/fs.h | 3 + - include/linux/mm.h | 22 + - include/linux/mm_types.h | 2 + - include/linux/splice.h | 6 + - include/uapi/linux/Kbuild | 1 + - include/uapi/linux/aufs_type.h | 419 +++++++++ - kernel/fork.c | 2 +- - mm/Makefile | 2 +- - mm/filemap.c | 2 +- - mm/fremap.c | 16 +- - mm/memory.c | 2 +- - mm/mmap.c | 12 +- - mm/nommu.c | 10 +- - mm/prfile.c | 86 ++ - 99 files changed, 32835 insertions(+), 31 deletions(-) - create mode 100644 fs/aufs/Kconfig - create mode 100644 fs/aufs/Makefile - create mode 100644 fs/aufs/aufs.h - create mode 100644 fs/aufs/branch.c - create mode 100644 fs/aufs/branch.h - create mode 100644 fs/aufs/conf.mk - create mode 100644 fs/aufs/cpup.c - create mode 100644 fs/aufs/cpup.h - create mode 100644 fs/aufs/dbgaufs.c - create mode 100644 fs/aufs/dbgaufs.h - create mode 100644 fs/aufs/dcsub.c - create mode 100644 fs/aufs/dcsub.h - create mode 100644 fs/aufs/debug.c - create mode 100644 fs/aufs/debug.h - create mode 100644 fs/aufs/dentry.c - create mode 100644 fs/aufs/dentry.h - create mode 100644 fs/aufs/dinfo.c - create mode 100644 fs/aufs/dir.c - create mode 100644 fs/aufs/dir.h - create mode 100644 fs/aufs/dynop.c - create mode 100644 fs/aufs/dynop.h - create mode 100644 fs/aufs/export.c - create mode 100644 fs/aufs/f_op.c - create mode 100644 fs/aufs/fhsm.c - create mode 100644 fs/aufs/file.c - create mode 100644 fs/aufs/file.h - create mode 100644 fs/aufs/finfo.c - create mode 100644 fs/aufs/fstype.h - create mode 100644 fs/aufs/hfsnotify.c - create mode 100644 fs/aufs/hfsplus.c - create mode 100644 fs/aufs/hnotify.c - create mode 100644 fs/aufs/i_op.c - create mode 100644 fs/aufs/i_op_add.c - create mode 100644 fs/aufs/i_op_del.c - create mode 100644 fs/aufs/i_op_ren.c - create mode 100644 fs/aufs/iinfo.c - create mode 100644 fs/aufs/inode.c - create mode 100644 fs/aufs/inode.h - create mode 100644 fs/aufs/ioctl.c - create mode 100644 fs/aufs/loop.c - create mode 100644 fs/aufs/loop.h - create mode 100644 fs/aufs/magic.mk - create mode 100644 fs/aufs/module.c - create mode 100644 fs/aufs/module.h - create mode 100644 fs/aufs/mvdown.c - create mode 100644 fs/aufs/opts.c - create mode 100644 fs/aufs/opts.h - create mode 100644 fs/aufs/plink.c - create mode 100644 fs/aufs/poll.c - create mode 100644 fs/aufs/posix_acl.c - create mode 100644 fs/aufs/procfs.c - create mode 100644 fs/aufs/rdu.c - create mode 100644 fs/aufs/rwsem.h - create mode 100644 fs/aufs/sbinfo.c - create mode 100644 fs/aufs/spl.h - create mode 100644 fs/aufs/super.c - create mode 100644 fs/aufs/super.h - create mode 100644 fs/aufs/sysaufs.c - create mode 100644 fs/aufs/sysaufs.h - create mode 100644 fs/aufs/sysfs.c - create mode 100644 fs/aufs/sysrq.c - create mode 100644 fs/aufs/vdir.c - create mode 100644 fs/aufs/vfsub.c - create mode 100644 fs/aufs/vfsub.h - create mode 100644 fs/aufs/wbr_policy.c - create mode 100644 fs/aufs/whout.c - create mode 100644 fs/aufs/whout.h - create mode 100644 fs/aufs/wkq.c - create mode 100644 fs/aufs/wkq.h - create mode 100644 fs/aufs/xattr.c - create mode 100644 fs/aufs/xino.c - create mode 100644 include/uapi/linux/aufs_type.h - create mode 100644 mm/prfile.c - -diff --git a/MAINTAINERS b/MAINTAINERS -index c721042..83801d0 100644 ---- a/MAINTAINERS -+++ b/MAINTAINERS -@@ -1795,6 +1795,20 @@ F: include/linux/audit.h - F: include/uapi/linux/audit.h - F: kernel/audit* - -+AUFS (advanced multi layered unification filesystem) FILESYSTEM -+M: "J. R. Okajima" -+L: linux-unionfs@vger.kernel.org -+L: aufs-users@lists.sourceforge.net (members only) -+W: http://aufs.sourceforge.net -+T: git://git.code.sf.net/p/aufs/aufs3-linux -+T: git://github.com/sfjro/aufs3-linux.git -+S: Supported -+F: Documentation/filesystems/aufs/ -+F: Documentation/ABI/testing/debugfs-aufs -+F: Documentation/ABI/testing/sysfs-aufs -+F: fs/aufs/ -+F: include/uapi/linux/aufs_type.h -+ - AUXILIARY DISPLAY DRIVERS - M: Miguel Ojeda Sandonis - W: http://miguelojeda.es/auxdisplay.htm -diff --git a/drivers/block/loop.c b/drivers/block/loop.c -index 6cb1beb..12678be 100644 ---- a/drivers/block/loop.c -+++ b/drivers/block/loop.c -@@ -692,6 +692,24 @@ static inline int is_loop_device(struct file *file) - return i && S_ISBLK(i->i_mode) && MAJOR(i->i_rdev) == LOOP_MAJOR; - } - -+/* -+ * for AUFS -+ * no get/put for file. -+ */ -+struct file *loop_backing_file(struct super_block *sb) -+{ -+ struct file *ret; -+ struct loop_device *l; -+ -+ ret = NULL; -+ if (MAJOR(sb->s_dev) == LOOP_MAJOR) { -+ l = sb->s_bdev->bd_disk->private_data; -+ ret = l->lo_backing_file; -+ } -+ return ret; -+} -+EXPORT_SYMBOL_GPL(loop_backing_file); -+ - /* loop sysfs attributes */ - - static ssize_t loop_attr_show(struct device *dev, char *page, -diff --git a/fs/Kconfig b/fs/Kconfig -index 664991a..1481093 100644 ---- a/fs/Kconfig -+++ b/fs/Kconfig -@@ -210,6 +210,7 @@ source "fs/ufs/Kconfig" - source "fs/exofs/Kconfig" - source "fs/f2fs/Kconfig" - source "fs/efivarfs/Kconfig" -+source "fs/aufs/Kconfig" - - endif # MISC_FILESYSTEMS - -diff --git a/fs/Makefile b/fs/Makefile -index da0bbb4..c8bc724 100644 ---- a/fs/Makefile -+++ b/fs/Makefile -@@ -126,3 +126,4 @@ obj-y += exofs/ # Multiple modules - obj-$(CONFIG_CEPH_FS) += ceph/ - obj-$(CONFIG_PSTORE) += pstore/ - obj-$(CONFIG_EFIVAR_FS) += efivarfs/ -+obj-$(CONFIG_AUFS_FS) += aufs/ -diff --git a/fs/aufs/Kconfig b/fs/aufs/Kconfig -new file mode 100644 -index 0000000..63560ce ---- /dev/null -+++ b/fs/aufs/Kconfig -@@ -0,0 +1,185 @@ -+config AUFS_FS -+ tristate "Aufs (Advanced multi layered unification filesystem) support" -+ help -+ Aufs is a stackable unification filesystem such as Unionfs, -+ which unifies several directories and provides a merged single -+ directory. -+ In the early days, aufs was entirely re-designed and -+ re-implemented Unionfs Version 1.x series. Introducing many -+ original ideas, approaches and improvements, it becomes totally -+ different from Unionfs while keeping the basic features. -+ -+if AUFS_FS -+choice -+ prompt "Maximum number of branches" -+ default AUFS_BRANCH_MAX_127 -+ help -+ Specifies the maximum number of branches (or member directories) -+ in a single aufs. The larger value consumes more system -+ resources and has a minor impact to performance. -+config AUFS_BRANCH_MAX_127 -+ bool "127" -+ help -+ Specifies the maximum number of branches (or member directories) -+ in a single aufs. The larger value consumes more system -+ resources and has a minor impact to performance. -+config AUFS_BRANCH_MAX_511 -+ bool "511" -+ help -+ Specifies the maximum number of branches (or member directories) -+ in a single aufs. The larger value consumes more system -+ resources and has a minor impact to performance. -+config AUFS_BRANCH_MAX_1023 -+ bool "1023" -+ help -+ Specifies the maximum number of branches (or member directories) -+ in a single aufs. The larger value consumes more system -+ resources and has a minor impact to performance. -+config AUFS_BRANCH_MAX_32767 -+ bool "32767" -+ help -+ Specifies the maximum number of branches (or member directories) -+ in a single aufs. The larger value consumes more system -+ resources and has a minor impact to performance. -+endchoice -+ -+config AUFS_SBILIST -+ bool -+ depends on AUFS_MAGIC_SYSRQ || PROC_FS -+ default y -+ help -+ Automatic configuration for internal use. -+ When aufs supports Magic SysRq or /proc, enabled automatically. -+ -+config AUFS_HNOTIFY -+ bool "Detect direct branch access (bypassing aufs)" -+ help -+ If you want to modify files on branches directly, eg. bypassing aufs, -+ and want aufs to detect the changes of them fully, then enable this -+ option and use 'udba=notify' mount option. -+ Currently there is only one available configuration, "fsnotify". -+ It will have a negative impact to the performance. -+ See detail in aufs.5. -+ -+choice -+ prompt "method" if AUFS_HNOTIFY -+ default AUFS_HFSNOTIFY -+config AUFS_HFSNOTIFY -+ bool "fsnotify" -+ select FSNOTIFY -+endchoice -+ -+config AUFS_EXPORT -+ bool "NFS-exportable aufs" -+ depends on EXPORTFS -+ help -+ If you want to export your mounted aufs via NFS, then enable this -+ option. There are several requirements for this configuration. -+ See detail in aufs.5. -+ -+config AUFS_INO_T_64 -+ bool -+ depends on AUFS_EXPORT -+ depends on 64BIT && !(ALPHA || S390) -+ default y -+ help -+ Automatic configuration for internal use. -+ /* typedef unsigned long/int __kernel_ino_t */ -+ /* alpha and s390x are int */ -+ -+config AUFS_XATTR -+ bool "support for XATTR/EA (including Security Labels)" -+ help -+ If your branch fs supports XATTR/EA and you want to make them -+ available in aufs too, then enable this opsion and specify the -+ branch attributes for EA. -+ See detail in aufs.5. -+ -+config AUFS_FHSM -+ bool "File-based Hierarchical Storage Management" -+ help -+ Hierarchical Storage Management (or HSM) is a well-known feature -+ in the storage world. Aufs provides this feature as file-based. -+ with multiple branches. -+ These multiple branches are prioritized, ie. the topmost one -+ should be the fastest drive and be used heavily. -+ -+config AUFS_RDU -+ bool "Readdir in userspace" -+ help -+ Aufs has two methods to provide a merged view for a directory, -+ by a user-space library and by kernel-space natively. The latter -+ is always enabled but sometimes large and slow. -+ If you enable this option, install the library in aufs2-util -+ package, and set some environment variables for your readdir(3), -+ then the work will be handled in user-space which generally -+ shows better performance in most cases. -+ See detail in aufs.5. -+ -+config AUFS_SHWH -+ bool "Show whiteouts" -+ help -+ If you want to make the whiteouts in aufs visible, then enable -+ this option and specify 'shwh' mount option. Although it may -+ sounds like philosophy or something, but in technically it -+ simply shows the name of whiteout with keeping its behaviour. -+ -+config AUFS_BR_RAMFS -+ bool "Ramfs (initramfs/rootfs) as an aufs branch" -+ help -+ If you want to use ramfs as an aufs branch fs, then enable this -+ option. Generally tmpfs is recommended. -+ Aufs prohibited them to be a branch fs by default, because -+ initramfs becomes unusable after switch_root or something -+ generally. If you sets initramfs as an aufs branch and boot your -+ system by switch_root, you will meet a problem easily since the -+ files in initramfs may be inaccessible. -+ Unless you are going to use ramfs as an aufs branch fs without -+ switch_root or something, leave it N. -+ -+config AUFS_BR_FUSE -+ bool "Fuse fs as an aufs branch" -+ depends on FUSE_FS -+ select AUFS_POLL -+ help -+ If you want to use fuse-based userspace filesystem as an aufs -+ branch fs, then enable this option. -+ It implements the internal poll(2) operation which is -+ implemented by fuse only (curretnly). -+ -+config AUFS_POLL -+ bool -+ help -+ Automatic configuration for internal use. -+ -+config AUFS_BR_HFSPLUS -+ bool "Hfsplus as an aufs branch" -+ depends on HFSPLUS_FS -+ default y -+ help -+ If you want to use hfsplus fs as an aufs branch fs, then enable -+ this option. This option introduces a small overhead at -+ copying-up a file on hfsplus. -+ -+config AUFS_BDEV_LOOP -+ bool -+ depends on BLK_DEV_LOOP -+ default y -+ help -+ Automatic configuration for internal use. -+ Convert =[ym] into =y. -+ -+config AUFS_DEBUG -+ bool "Debug aufs" -+ help -+ Enable this to compile aufs internal debug code. -+ It will have a negative impact to the performance. -+ -+config AUFS_MAGIC_SYSRQ -+ bool -+ depends on AUFS_DEBUG && MAGIC_SYSRQ -+ default y -+ help -+ Automatic configuration for internal use. -+ When aufs supports Magic SysRq, enabled automatically. -+endif -diff --git a/fs/aufs/Makefile b/fs/aufs/Makefile -new file mode 100644 -index 0000000..c7a501e ---- /dev/null -+++ b/fs/aufs/Makefile -@@ -0,0 +1,44 @@ -+ -+include ${src}/magic.mk -+ifeq (${CONFIG_AUFS_FS},m) -+include ${src}/conf.mk -+endif -+-include ${src}/priv_def.mk -+ -+# cf. include/linux/kernel.h -+# enable pr_debug -+ccflags-y += -DDEBUG -+# sparse requires the full pathname -+ifdef M -+ccflags-y += -include ${M}/../../include/uapi/linux/aufs_type.h -+else -+ccflags-y += -include ${srctree}/include/uapi/linux/aufs_type.h -+endif -+ -+obj-$(CONFIG_AUFS_FS) += aufs.o -+aufs-y := module.o sbinfo.o super.o branch.o xino.o sysaufs.o opts.o \ -+ wkq.o vfsub.o dcsub.o \ -+ cpup.o whout.o wbr_policy.o \ -+ dinfo.o dentry.o \ -+ dynop.o \ -+ finfo.o file.o f_op.o \ -+ dir.o vdir.o \ -+ iinfo.o inode.o i_op.o i_op_add.o i_op_del.o i_op_ren.o \ -+ mvdown.o ioctl.o -+ -+# all are boolean -+aufs-$(CONFIG_PROC_FS) += procfs.o plink.o -+aufs-$(CONFIG_SYSFS) += sysfs.o -+aufs-$(CONFIG_DEBUG_FS) += dbgaufs.o -+aufs-$(CONFIG_AUFS_BDEV_LOOP) += loop.o -+aufs-$(CONFIG_AUFS_HNOTIFY) += hnotify.o -+aufs-$(CONFIG_AUFS_HFSNOTIFY) += hfsnotify.o -+aufs-$(CONFIG_AUFS_EXPORT) += export.o -+aufs-$(CONFIG_AUFS_XATTR) += xattr.o -+aufs-$(CONFIG_FS_POSIX_ACL) += posix_acl.o -+aufs-$(CONFIG_AUFS_FHSM) += fhsm.o -+aufs-$(CONFIG_AUFS_POLL) += poll.o -+aufs-$(CONFIG_AUFS_RDU) += rdu.o -+aufs-$(CONFIG_AUFS_BR_HFSPLUS) += hfsplus.o -+aufs-$(CONFIG_AUFS_DEBUG) += debug.o -+aufs-$(CONFIG_AUFS_MAGIC_SYSRQ) += sysrq.o -diff --git a/fs/aufs/aufs.h b/fs/aufs/aufs.h -new file mode 100644 -index 0000000..e48d268 ---- /dev/null -+++ b/fs/aufs/aufs.h -@@ -0,0 +1,59 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * all header files -+ */ -+ -+#ifndef __AUFS_H__ -+#define __AUFS_H__ -+ -+#ifdef __KERNEL__ -+ -+#define AuStub(type, name, body, ...) \ -+ static inline type name(__VA_ARGS__) { body; } -+ -+#define AuStubVoid(name, ...) \ -+ AuStub(void, name, , __VA_ARGS__) -+#define AuStubInt0(name, ...) \ -+ AuStub(int, name, return 0, __VA_ARGS__) -+ -+#include "debug.h" -+ -+#include "branch.h" -+#include "cpup.h" -+#include "dcsub.h" -+#include "dbgaufs.h" -+#include "dentry.h" -+#include "dir.h" -+#include "dynop.h" -+#include "file.h" -+#include "fstype.h" -+#include "inode.h" -+#include "loop.h" -+#include "module.h" -+#include "opts.h" -+#include "rwsem.h" -+#include "spl.h" -+#include "super.h" -+#include "sysaufs.h" -+#include "vfsub.h" -+#include "whout.h" -+#include "wkq.h" -+ -+#endif /* __KERNEL__ */ -+#endif /* __AUFS_H__ */ -diff --git a/fs/aufs/branch.c b/fs/aufs/branch.c -new file mode 100644 -index 0000000..17210b2 ---- /dev/null -+++ b/fs/aufs/branch.c -@@ -0,0 +1,1402 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * branch management -+ */ -+ -+#include -+#include -+#include "aufs.h" -+ -+/* -+ * free a single branch -+ */ -+static void au_br_do_free(struct au_branch *br) -+{ -+ int i; -+ struct au_wbr *wbr; -+ struct au_dykey **key; -+ -+ au_hnotify_fin_br(br); -+ -+ if (br->br_xino.xi_file) -+ fput(br->br_xino.xi_file); -+ mutex_destroy(&br->br_xino.xi_nondir_mtx); -+ -+ AuDebugOn(atomic_read(&br->br_count)); -+ -+ wbr = br->br_wbr; -+ if (wbr) { -+ for (i = 0; i < AuBrWh_Last; i++) -+ dput(wbr->wbr_wh[i]); -+ AuDebugOn(atomic_read(&wbr->wbr_wh_running)); -+ AuRwDestroy(&wbr->wbr_wh_rwsem); -+ } -+ -+ if (br->br_fhsm) { -+ au_br_fhsm_fin(br->br_fhsm); -+ kfree(br->br_fhsm); -+ } -+ -+ key = br->br_dykey; -+ for (i = 0; i < AuBrDynOp; i++, key++) -+ if (*key) -+ au_dy_put(*key); -+ else -+ break; -+ -+ /* recursive lock, s_umount of branch's */ -+ lockdep_off(); -+ path_put(&br->br_path); -+ lockdep_on(); -+ kfree(wbr); -+ kfree(br); -+} -+ -+/* -+ * frees all branches -+ */ -+void au_br_free(struct au_sbinfo *sbinfo) -+{ -+ aufs_bindex_t bmax; -+ struct au_branch **br; -+ -+ AuRwMustWriteLock(&sbinfo->si_rwsem); -+ -+ bmax = sbinfo->si_bend + 1; -+ br = sbinfo->si_branch; -+ while (bmax--) -+ au_br_do_free(*br++); -+} -+ -+/* -+ * find the index of a branch which is specified by @br_id. -+ */ -+int au_br_index(struct super_block *sb, aufs_bindex_t br_id) -+{ -+ aufs_bindex_t bindex, bend; -+ -+ bend = au_sbend(sb); -+ for (bindex = 0; bindex <= bend; bindex++) -+ if (au_sbr_id(sb, bindex) == br_id) -+ return bindex; -+ return -1; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * add a branch -+ */ -+ -+static int test_overlap(struct super_block *sb, struct dentry *h_adding, -+ struct dentry *h_root) -+{ -+ if (unlikely(h_adding == h_root -+ || au_test_loopback_overlap(sb, h_adding))) -+ return 1; -+ if (h_adding->d_sb != h_root->d_sb) -+ return 0; -+ return au_test_subdir(h_adding, h_root) -+ || au_test_subdir(h_root, h_adding); -+} -+ -+/* -+ * returns a newly allocated branch. @new_nbranch is a number of branches -+ * after adding a branch. -+ */ -+static struct au_branch *au_br_alloc(struct super_block *sb, int new_nbranch, -+ int perm) -+{ -+ struct au_branch *add_branch; -+ struct dentry *root; -+ int err; -+ -+ err = -ENOMEM; -+ root = sb->s_root; -+ add_branch = kzalloc(sizeof(*add_branch), GFP_NOFS); -+ if (unlikely(!add_branch)) -+ goto out; -+ -+ err = au_hnotify_init_br(add_branch, perm); -+ if (unlikely(err)) -+ goto out_br; -+ -+ if (au_br_writable(perm)) { -+ /* may be freed separately at changing the branch permission */ -+ add_branch->br_wbr = kzalloc(sizeof(*add_branch->br_wbr), -+ GFP_NOFS); -+ if (unlikely(!add_branch->br_wbr)) -+ goto out_hnotify; -+ } -+ -+ if (au_br_fhsm(perm)) { -+ err = au_fhsm_br_alloc(add_branch); -+ if (unlikely(err)) -+ goto out_wbr; -+ } -+ -+ err = au_sbr_realloc(au_sbi(sb), new_nbranch); -+ if (!err) -+ err = au_di_realloc(au_di(root), new_nbranch); -+ if (!err) -+ err = au_ii_realloc(au_ii(root->d_inode), new_nbranch); -+ if (!err) -+ return add_branch; /* success */ -+ -+out_wbr: -+ kfree(add_branch->br_wbr); -+out_hnotify: -+ au_hnotify_fin_br(add_branch); -+out_br: -+ kfree(add_branch); -+out: -+ return ERR_PTR(err); -+} -+ -+/* -+ * test if the branch permission is legal or not. -+ */ -+static int test_br(struct inode *inode, int brperm, char *path) -+{ -+ int err; -+ -+ err = (au_br_writable(brperm) && IS_RDONLY(inode)); -+ if (!err) -+ goto out; -+ -+ err = -EINVAL; -+ pr_err("write permission for readonly mount or inode, %s\n", path); -+ -+out: -+ return err; -+} -+ -+/* -+ * returns: -+ * 0: success, the caller will add it -+ * plus: success, it is already unified, the caller should ignore it -+ * minus: error -+ */ -+static int test_add(struct super_block *sb, struct au_opt_add *add, int remount) -+{ -+ int err; -+ aufs_bindex_t bend, bindex; -+ struct dentry *root; -+ struct inode *inode, *h_inode; -+ -+ root = sb->s_root; -+ bend = au_sbend(sb); -+ if (unlikely(bend >= 0 -+ && au_find_dbindex(root, add->path.dentry) >= 0)) { -+ err = 1; -+ if (!remount) { -+ err = -EINVAL; -+ pr_err("%s duplicated\n", add->pathname); -+ } -+ goto out; -+ } -+ -+ err = -ENOSPC; /* -E2BIG; */ -+ if (unlikely(AUFS_BRANCH_MAX <= add->bindex -+ || AUFS_BRANCH_MAX - 1 <= bend)) { -+ pr_err("number of branches exceeded %s\n", add->pathname); -+ goto out; -+ } -+ -+ err = -EDOM; -+ if (unlikely(add->bindex < 0 || bend + 1 < add->bindex)) { -+ pr_err("bad index %d\n", add->bindex); -+ goto out; -+ } -+ -+ inode = add->path.dentry->d_inode; -+ err = -ENOENT; -+ if (unlikely(!inode->i_nlink)) { -+ pr_err("no existence %s\n", add->pathname); -+ goto out; -+ } -+ -+ err = -EINVAL; -+ if (unlikely(inode->i_sb == sb)) { -+ pr_err("%s must be outside\n", add->pathname); -+ goto out; -+ } -+ -+ if (unlikely(au_test_fs_unsuppoted(inode->i_sb))) { -+ pr_err("unsupported filesystem, %s (%s)\n", -+ add->pathname, au_sbtype(inode->i_sb)); -+ goto out; -+ } -+ -+ if (unlikely(inode->i_sb->s_stack_depth)) { -+ pr_err("already stacked, %s (%s)\n", -+ add->pathname, au_sbtype(inode->i_sb)); -+ goto out; -+ } -+ -+ err = test_br(add->path.dentry->d_inode, add->perm, add->pathname); -+ if (unlikely(err)) -+ goto out; -+ -+ if (bend < 0) -+ return 0; /* success */ -+ -+ err = -EINVAL; -+ for (bindex = 0; bindex <= bend; bindex++) -+ if (unlikely(test_overlap(sb, add->path.dentry, -+ au_h_dptr(root, bindex)))) { -+ pr_err("%s is overlapped\n", add->pathname); -+ goto out; -+ } -+ -+ err = 0; -+ if (au_opt_test(au_mntflags(sb), WARN_PERM)) { -+ h_inode = au_h_dptr(root, 0)->d_inode; -+ if ((h_inode->i_mode & S_IALLUGO) != (inode->i_mode & S_IALLUGO) -+ || !uid_eq(h_inode->i_uid, inode->i_uid) -+ || !gid_eq(h_inode->i_gid, inode->i_gid)) -+ pr_warn("uid/gid/perm %s %u/%u/0%o, %u/%u/0%o\n", -+ add->pathname, -+ i_uid_read(inode), i_gid_read(inode), -+ (inode->i_mode & S_IALLUGO), -+ i_uid_read(h_inode), i_gid_read(h_inode), -+ (h_inode->i_mode & S_IALLUGO)); -+ } -+ -+out: -+ return err; -+} -+ -+/* -+ * initialize or clean the whiteouts for an adding branch -+ */ -+static int au_br_init_wh(struct super_block *sb, struct au_branch *br, -+ int new_perm) -+{ -+ int err, old_perm; -+ aufs_bindex_t bindex; -+ struct mutex *h_mtx; -+ struct au_wbr *wbr; -+ struct au_hinode *hdir; -+ -+ err = vfsub_mnt_want_write(au_br_mnt(br)); -+ if (unlikely(err)) -+ goto out; -+ -+ wbr = br->br_wbr; -+ old_perm = br->br_perm; -+ br->br_perm = new_perm; -+ hdir = NULL; -+ h_mtx = NULL; -+ bindex = au_br_index(sb, br->br_id); -+ if (0 <= bindex) { -+ hdir = au_hi(sb->s_root->d_inode, bindex); -+ au_hn_imtx_lock_nested(hdir, AuLsc_I_PARENT); -+ } else { -+ h_mtx = &au_br_dentry(br)->d_inode->i_mutex; -+ mutex_lock_nested(h_mtx, AuLsc_I_PARENT); -+ } -+ if (!wbr) -+ err = au_wh_init(br, sb); -+ else { -+ wbr_wh_write_lock(wbr); -+ err = au_wh_init(br, sb); -+ wbr_wh_write_unlock(wbr); -+ } -+ if (hdir) -+ au_hn_imtx_unlock(hdir); -+ else -+ mutex_unlock(h_mtx); -+ vfsub_mnt_drop_write(au_br_mnt(br)); -+ br->br_perm = old_perm; -+ -+ if (!err && wbr && !au_br_writable(new_perm)) { -+ kfree(wbr); -+ br->br_wbr = NULL; -+ } -+ -+out: -+ return err; -+} -+ -+static int au_wbr_init(struct au_branch *br, struct super_block *sb, -+ int perm) -+{ -+ int err; -+ struct kstatfs kst; -+ struct au_wbr *wbr; -+ -+ wbr = br->br_wbr; -+ au_rw_init(&wbr->wbr_wh_rwsem); -+ atomic_set(&wbr->wbr_wh_running, 0); -+ -+ /* -+ * a limit for rmdir/rename a dir -+ * cf. AUFS_MAX_NAMELEN in include/uapi/linux/aufs_type.h -+ */ -+ err = vfs_statfs(&br->br_path, &kst); -+ if (unlikely(err)) -+ goto out; -+ err = -EINVAL; -+ if (kst.f_namelen >= NAME_MAX) -+ err = au_br_init_wh(sb, br, perm); -+ else -+ pr_err("%pd(%s), unsupported namelen %ld\n", -+ au_br_dentry(br), -+ au_sbtype(au_br_dentry(br)->d_sb), kst.f_namelen); -+ -+out: -+ return err; -+} -+ -+/* initialize a new branch */ -+static int au_br_init(struct au_branch *br, struct super_block *sb, -+ struct au_opt_add *add) -+{ -+ int err; -+ -+ err = 0; -+ mutex_init(&br->br_xino.xi_nondir_mtx); -+ br->br_perm = add->perm; -+ br->br_path = add->path; /* set first, path_get() later */ -+ spin_lock_init(&br->br_dykey_lock); -+ atomic_set(&br->br_count, 0); -+ atomic_set(&br->br_xino_running, 0); -+ br->br_id = au_new_br_id(sb); -+ AuDebugOn(br->br_id < 0); -+ -+ if (au_br_writable(add->perm)) { -+ err = au_wbr_init(br, sb, add->perm); -+ if (unlikely(err)) -+ goto out_err; -+ } -+ -+ if (au_opt_test(au_mntflags(sb), XINO)) { -+ err = au_xino_br(sb, br, add->path.dentry->d_inode->i_ino, -+ au_sbr(sb, 0)->br_xino.xi_file, /*do_test*/1); -+ if (unlikely(err)) { -+ AuDebugOn(br->br_xino.xi_file); -+ goto out_err; -+ } -+ } -+ -+ sysaufs_br_init(br); -+ path_get(&br->br_path); -+ goto out; /* success */ -+ -+out_err: -+ memset(&br->br_path, 0, sizeof(br->br_path)); -+out: -+ return err; -+} -+ -+static void au_br_do_add_brp(struct au_sbinfo *sbinfo, aufs_bindex_t bindex, -+ struct au_branch *br, aufs_bindex_t bend, -+ aufs_bindex_t amount) -+{ -+ struct au_branch **brp; -+ -+ AuRwMustWriteLock(&sbinfo->si_rwsem); -+ -+ brp = sbinfo->si_branch + bindex; -+ memmove(brp + 1, brp, sizeof(*brp) * amount); -+ *brp = br; -+ sbinfo->si_bend++; -+ if (unlikely(bend < 0)) -+ sbinfo->si_bend = 0; -+} -+ -+static void au_br_do_add_hdp(struct au_dinfo *dinfo, aufs_bindex_t bindex, -+ aufs_bindex_t bend, aufs_bindex_t amount) -+{ -+ struct au_hdentry *hdp; -+ -+ AuRwMustWriteLock(&dinfo->di_rwsem); -+ -+ hdp = dinfo->di_hdentry + bindex; -+ memmove(hdp + 1, hdp, sizeof(*hdp) * amount); -+ au_h_dentry_init(hdp); -+ dinfo->di_bend++; -+ if (unlikely(bend < 0)) -+ dinfo->di_bstart = 0; -+} -+ -+static void au_br_do_add_hip(struct au_iinfo *iinfo, aufs_bindex_t bindex, -+ aufs_bindex_t bend, aufs_bindex_t amount) -+{ -+ struct au_hinode *hip; -+ -+ AuRwMustWriteLock(&iinfo->ii_rwsem); -+ -+ hip = iinfo->ii_hinode + bindex; -+ memmove(hip + 1, hip, sizeof(*hip) * amount); -+ hip->hi_inode = NULL; -+ au_hn_init(hip); -+ iinfo->ii_bend++; -+ if (unlikely(bend < 0)) -+ iinfo->ii_bstart = 0; -+} -+ -+static void au_br_do_add(struct super_block *sb, struct au_branch *br, -+ aufs_bindex_t bindex) -+{ -+ struct dentry *root, *h_dentry; -+ struct inode *root_inode; -+ aufs_bindex_t bend, amount; -+ -+ root = sb->s_root; -+ root_inode = root->d_inode; -+ bend = au_sbend(sb); -+ amount = bend + 1 - bindex; -+ h_dentry = au_br_dentry(br); -+ au_sbilist_lock(); -+ au_br_do_add_brp(au_sbi(sb), bindex, br, bend, amount); -+ au_br_do_add_hdp(au_di(root), bindex, bend, amount); -+ au_br_do_add_hip(au_ii(root_inode), bindex, bend, amount); -+ au_set_h_dptr(root, bindex, dget(h_dentry)); -+ au_set_h_iptr(root_inode, bindex, au_igrab(h_dentry->d_inode), -+ /*flags*/0); -+ au_sbilist_unlock(); -+} -+ -+int au_br_add(struct super_block *sb, struct au_opt_add *add, int remount) -+{ -+ int err; -+ aufs_bindex_t bend, add_bindex; -+ struct dentry *root, *h_dentry; -+ struct inode *root_inode; -+ struct au_branch *add_branch; -+ -+ root = sb->s_root; -+ root_inode = root->d_inode; -+ IMustLock(root_inode); -+ err = test_add(sb, add, remount); -+ if (unlikely(err < 0)) -+ goto out; -+ if (err) { -+ err = 0; -+ goto out; /* success */ -+ } -+ -+ bend = au_sbend(sb); -+ add_branch = au_br_alloc(sb, bend + 2, add->perm); -+ err = PTR_ERR(add_branch); -+ if (IS_ERR(add_branch)) -+ goto out; -+ -+ err = au_br_init(add_branch, sb, add); -+ if (unlikely(err)) { -+ au_br_do_free(add_branch); -+ goto out; -+ } -+ -+ add_bindex = add->bindex; -+ if (!remount) -+ au_br_do_add(sb, add_branch, add_bindex); -+ else { -+ sysaufs_brs_del(sb, add_bindex); -+ au_br_do_add(sb, add_branch, add_bindex); -+ sysaufs_brs_add(sb, add_bindex); -+ } -+ -+ h_dentry = add->path.dentry; -+ if (!add_bindex) { -+ au_cpup_attr_all(root_inode, /*force*/1); -+ sb->s_maxbytes = h_dentry->d_sb->s_maxbytes; -+ } else -+ au_add_nlink(root_inode, h_dentry->d_inode); -+ -+ /* -+ * this test/set prevents aufs from handling unnecesary notify events -+ * of xino files, in case of re-adding a writable branch which was -+ * once detached from aufs. -+ */ -+ if (au_xino_brid(sb) < 0 -+ && au_br_writable(add_branch->br_perm) -+ && !au_test_fs_bad_xino(h_dentry->d_sb) -+ && add_branch->br_xino.xi_file -+ && add_branch->br_xino.xi_file->f_dentry->d_parent == h_dentry) -+ au_xino_brid_set(sb, add_branch->br_id); -+ -+out: -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static unsigned long long au_farray_cb(void *a, -+ unsigned long long max __maybe_unused, -+ void *arg) -+{ -+ unsigned long long n; -+ struct file **p, *f; -+ struct au_sphlhead *files; -+ struct au_finfo *finfo; -+ struct super_block *sb = arg; -+ -+ n = 0; -+ p = a; -+ files = &au_sbi(sb)->si_files; -+ spin_lock(&files->spin); -+ hlist_for_each_entry(finfo, &files->head, fi_hlist) { -+ f = finfo->fi_file; -+ if (file_count(f) -+ && !special_file(file_inode(f)->i_mode)) { -+ get_file(f); -+ *p++ = f; -+ n++; -+ AuDebugOn(n > max); -+ } -+ } -+ spin_unlock(&files->spin); -+ -+ return n; -+} -+ -+static struct file **au_farray_alloc(struct super_block *sb, -+ unsigned long long *max) -+{ -+ *max = atomic_long_read(&au_sbi(sb)->si_nfiles); -+ return au_array_alloc(max, au_farray_cb, sb); -+} -+ -+static void au_farray_free(struct file **a, unsigned long long max) -+{ -+ unsigned long long ull; -+ -+ for (ull = 0; ull < max; ull++) -+ if (a[ull]) -+ fput(a[ull]); -+ kvfree(a); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * delete a branch -+ */ -+ -+/* to show the line number, do not make it inlined function */ -+#define AuVerbose(do_info, fmt, ...) do { \ -+ if (do_info) \ -+ pr_info(fmt, ##__VA_ARGS__); \ -+} while (0) -+ -+static int au_test_ibusy(struct inode *inode, aufs_bindex_t bstart, -+ aufs_bindex_t bend) -+{ -+ return (inode && !S_ISDIR(inode->i_mode)) || bstart == bend; -+} -+ -+static int au_test_dbusy(struct dentry *dentry, aufs_bindex_t bstart, -+ aufs_bindex_t bend) -+{ -+ return au_test_ibusy(dentry->d_inode, bstart, bend); -+} -+ -+/* -+ * test if the branch is deletable or not. -+ */ -+static int test_dentry_busy(struct dentry *root, aufs_bindex_t bindex, -+ unsigned int sigen, const unsigned int verbose) -+{ -+ int err, i, j, ndentry; -+ aufs_bindex_t bstart, bend; -+ struct au_dcsub_pages dpages; -+ struct au_dpage *dpage; -+ struct dentry *d; -+ -+ err = au_dpages_init(&dpages, GFP_NOFS); -+ if (unlikely(err)) -+ goto out; -+ err = au_dcsub_pages(&dpages, root, NULL, NULL); -+ if (unlikely(err)) -+ goto out_dpages; -+ -+ for (i = 0; !err && i < dpages.ndpage; i++) { -+ dpage = dpages.dpages + i; -+ ndentry = dpage->ndentry; -+ for (j = 0; !err && j < ndentry; j++) { -+ d = dpage->dentries[j]; -+ AuDebugOn(au_dcount(d) <= 0); -+ if (!au_digen_test(d, sigen)) { -+ di_read_lock_child(d, AuLock_IR); -+ if (unlikely(au_dbrange_test(d))) { -+ di_read_unlock(d, AuLock_IR); -+ continue; -+ } -+ } else { -+ di_write_lock_child(d); -+ if (unlikely(au_dbrange_test(d))) { -+ di_write_unlock(d); -+ continue; -+ } -+ err = au_reval_dpath(d, sigen); -+ if (!err) -+ di_downgrade_lock(d, AuLock_IR); -+ else { -+ di_write_unlock(d); -+ break; -+ } -+ } -+ -+ /* AuDbgDentry(d); */ -+ bstart = au_dbstart(d); -+ bend = au_dbend(d); -+ if (bstart <= bindex -+ && bindex <= bend -+ && au_h_dptr(d, bindex) -+ && au_test_dbusy(d, bstart, bend)) { -+ err = -EBUSY; -+ AuVerbose(verbose, "busy %pd\n", d); -+ AuDbgDentry(d); -+ } -+ di_read_unlock(d, AuLock_IR); -+ } -+ } -+ -+out_dpages: -+ au_dpages_free(&dpages); -+out: -+ return err; -+} -+ -+static int test_inode_busy(struct super_block *sb, aufs_bindex_t bindex, -+ unsigned int sigen, const unsigned int verbose) -+{ -+ int err; -+ unsigned long long max, ull; -+ struct inode *i, **array; -+ aufs_bindex_t bstart, bend; -+ -+ array = au_iarray_alloc(sb, &max); -+ err = PTR_ERR(array); -+ if (IS_ERR(array)) -+ goto out; -+ -+ err = 0; -+ AuDbg("b%d\n", bindex); -+ for (ull = 0; !err && ull < max; ull++) { -+ i = array[ull]; -+ if (unlikely(!i)) -+ break; -+ if (i->i_ino == AUFS_ROOT_INO) -+ continue; -+ -+ /* AuDbgInode(i); */ -+ if (au_iigen(i, NULL) == sigen) -+ ii_read_lock_child(i); -+ else { -+ ii_write_lock_child(i); -+ err = au_refresh_hinode_self(i); -+ au_iigen_dec(i); -+ if (!err) -+ ii_downgrade_lock(i); -+ else { -+ ii_write_unlock(i); -+ break; -+ } -+ } -+ -+ bstart = au_ibstart(i); -+ bend = au_ibend(i); -+ if (bstart <= bindex -+ && bindex <= bend -+ && au_h_iptr(i, bindex) -+ && au_test_ibusy(i, bstart, bend)) { -+ err = -EBUSY; -+ AuVerbose(verbose, "busy i%lu\n", i->i_ino); -+ AuDbgInode(i); -+ } -+ ii_read_unlock(i); -+ } -+ au_iarray_free(array, max); -+ -+out: -+ return err; -+} -+ -+static int test_children_busy(struct dentry *root, aufs_bindex_t bindex, -+ const unsigned int verbose) -+{ -+ int err; -+ unsigned int sigen; -+ -+ sigen = au_sigen(root->d_sb); -+ DiMustNoWaiters(root); -+ IiMustNoWaiters(root->d_inode); -+ di_write_unlock(root); -+ err = test_dentry_busy(root, bindex, sigen, verbose); -+ if (!err) -+ err = test_inode_busy(root->d_sb, bindex, sigen, verbose); -+ di_write_lock_child(root); /* aufs_write_lock() calls ..._child() */ -+ -+ return err; -+} -+ -+static int test_dir_busy(struct file *file, aufs_bindex_t br_id, -+ struct file **to_free, int *idx) -+{ -+ int err; -+ unsigned char matched, root; -+ aufs_bindex_t bindex, bend; -+ struct au_fidir *fidir; -+ struct au_hfile *hfile; -+ -+ err = 0; -+ root = IS_ROOT(file->f_dentry); -+ if (root) { -+ get_file(file); -+ to_free[*idx] = file; -+ (*idx)++; -+ goto out; -+ } -+ -+ matched = 0; -+ fidir = au_fi(file)->fi_hdir; -+ AuDebugOn(!fidir); -+ bend = au_fbend_dir(file); -+ for (bindex = au_fbstart(file); bindex <= bend; bindex++) { -+ hfile = fidir->fd_hfile + bindex; -+ if (!hfile->hf_file) -+ continue; -+ -+ if (hfile->hf_br->br_id == br_id) { -+ matched = 1; -+ break; -+ } -+ } -+ if (matched) -+ err = -EBUSY; -+ -+out: -+ return err; -+} -+ -+static int test_file_busy(struct super_block *sb, aufs_bindex_t br_id, -+ struct file **to_free, int opened) -+{ -+ int err, idx; -+ unsigned long long ull, max; -+ aufs_bindex_t bstart; -+ struct file *file, **array; -+ struct dentry *root; -+ struct au_hfile *hfile; -+ -+ array = au_farray_alloc(sb, &max); -+ err = PTR_ERR(array); -+ if (IS_ERR(array)) -+ goto out; -+ -+ err = 0; -+ idx = 0; -+ root = sb->s_root; -+ di_write_unlock(root); -+ for (ull = 0; ull < max; ull++) { -+ file = array[ull]; -+ if (unlikely(!file)) -+ break; -+ -+ /* AuDbg("%pD\n", file); */ -+ fi_read_lock(file); -+ bstart = au_fbstart(file); -+ if (!d_is_dir(file->f_path.dentry)) { -+ hfile = &au_fi(file)->fi_htop; -+ if (hfile->hf_br->br_id == br_id) -+ err = -EBUSY; -+ } else -+ err = test_dir_busy(file, br_id, to_free, &idx); -+ fi_read_unlock(file); -+ if (unlikely(err)) -+ break; -+ } -+ di_write_lock_child(root); -+ au_farray_free(array, max); -+ AuDebugOn(idx > opened); -+ -+out: -+ return err; -+} -+ -+static void br_del_file(struct file **to_free, unsigned long long opened, -+ aufs_bindex_t br_id) -+{ -+ unsigned long long ull; -+ aufs_bindex_t bindex, bstart, bend, bfound; -+ struct file *file; -+ struct au_fidir *fidir; -+ struct au_hfile *hfile; -+ -+ for (ull = 0; ull < opened; ull++) { -+ file = to_free[ull]; -+ if (unlikely(!file)) -+ break; -+ -+ /* AuDbg("%pD\n", file); */ -+ AuDebugOn(!d_is_dir(file->f_path.dentry)); -+ bfound = -1; -+ fidir = au_fi(file)->fi_hdir; -+ AuDebugOn(!fidir); -+ fi_write_lock(file); -+ bstart = au_fbstart(file); -+ bend = au_fbend_dir(file); -+ for (bindex = bstart; bindex <= bend; bindex++) { -+ hfile = fidir->fd_hfile + bindex; -+ if (!hfile->hf_file) -+ continue; -+ -+ if (hfile->hf_br->br_id == br_id) { -+ bfound = bindex; -+ break; -+ } -+ } -+ AuDebugOn(bfound < 0); -+ au_set_h_fptr(file, bfound, NULL); -+ if (bfound == bstart) { -+ for (bstart++; bstart <= bend; bstart++) -+ if (au_hf_dir(file, bstart)) { -+ au_set_fbstart(file, bstart); -+ break; -+ } -+ } -+ fi_write_unlock(file); -+ } -+} -+ -+static void au_br_do_del_brp(struct au_sbinfo *sbinfo, -+ const aufs_bindex_t bindex, -+ const aufs_bindex_t bend) -+{ -+ struct au_branch **brp, **p; -+ -+ AuRwMustWriteLock(&sbinfo->si_rwsem); -+ -+ brp = sbinfo->si_branch + bindex; -+ if (bindex < bend) -+ memmove(brp, brp + 1, sizeof(*brp) * (bend - bindex)); -+ sbinfo->si_branch[0 + bend] = NULL; -+ sbinfo->si_bend--; -+ -+ p = krealloc(sbinfo->si_branch, sizeof(*p) * bend, AuGFP_SBILIST); -+ if (p) -+ sbinfo->si_branch = p; -+ /* harmless error */ -+} -+ -+static void au_br_do_del_hdp(struct au_dinfo *dinfo, const aufs_bindex_t bindex, -+ const aufs_bindex_t bend) -+{ -+ struct au_hdentry *hdp, *p; -+ -+ AuRwMustWriteLock(&dinfo->di_rwsem); -+ -+ hdp = dinfo->di_hdentry; -+ if (bindex < bend) -+ memmove(hdp + bindex, hdp + bindex + 1, -+ sizeof(*hdp) * (bend - bindex)); -+ hdp[0 + bend].hd_dentry = NULL; -+ dinfo->di_bend--; -+ -+ p = krealloc(hdp, sizeof(*p) * bend, AuGFP_SBILIST); -+ if (p) -+ dinfo->di_hdentry = p; -+ /* harmless error */ -+} -+ -+static void au_br_do_del_hip(struct au_iinfo *iinfo, const aufs_bindex_t bindex, -+ const aufs_bindex_t bend) -+{ -+ struct au_hinode *hip, *p; -+ -+ AuRwMustWriteLock(&iinfo->ii_rwsem); -+ -+ hip = iinfo->ii_hinode + bindex; -+ if (bindex < bend) -+ memmove(hip, hip + 1, sizeof(*hip) * (bend - bindex)); -+ iinfo->ii_hinode[0 + bend].hi_inode = NULL; -+ au_hn_init(iinfo->ii_hinode + bend); -+ iinfo->ii_bend--; -+ -+ p = krealloc(iinfo->ii_hinode, sizeof(*p) * bend, AuGFP_SBILIST); -+ if (p) -+ iinfo->ii_hinode = p; -+ /* harmless error */ -+} -+ -+static void au_br_do_del(struct super_block *sb, aufs_bindex_t bindex, -+ struct au_branch *br) -+{ -+ aufs_bindex_t bend; -+ struct au_sbinfo *sbinfo; -+ struct dentry *root, *h_root; -+ struct inode *inode, *h_inode; -+ struct au_hinode *hinode; -+ -+ SiMustWriteLock(sb); -+ -+ root = sb->s_root; -+ inode = root->d_inode; -+ sbinfo = au_sbi(sb); -+ bend = sbinfo->si_bend; -+ -+ h_root = au_h_dptr(root, bindex); -+ hinode = au_hi(inode, bindex); -+ h_inode = au_igrab(hinode->hi_inode); -+ au_hiput(hinode); -+ -+ au_sbilist_lock(); -+ au_br_do_del_brp(sbinfo, bindex, bend); -+ au_br_do_del_hdp(au_di(root), bindex, bend); -+ au_br_do_del_hip(au_ii(inode), bindex, bend); -+ au_sbilist_unlock(); -+ -+ dput(h_root); -+ iput(h_inode); -+ au_br_do_free(br); -+} -+ -+static unsigned long long empty_cb(void *array, unsigned long long max, -+ void *arg) -+{ -+ return max; -+} -+ -+int au_br_del(struct super_block *sb, struct au_opt_del *del, int remount) -+{ -+ int err, rerr, i; -+ unsigned long long opened; -+ unsigned int mnt_flags; -+ aufs_bindex_t bindex, bend, br_id; -+ unsigned char do_wh, verbose; -+ struct au_branch *br; -+ struct au_wbr *wbr; -+ struct dentry *root; -+ struct file **to_free; -+ -+ err = 0; -+ opened = 0; -+ to_free = NULL; -+ root = sb->s_root; -+ bindex = au_find_dbindex(root, del->h_path.dentry); -+ if (bindex < 0) { -+ if (remount) -+ goto out; /* success */ -+ err = -ENOENT; -+ pr_err("%s no such branch\n", del->pathname); -+ goto out; -+ } -+ AuDbg("bindex b%d\n", bindex); -+ -+ err = -EBUSY; -+ mnt_flags = au_mntflags(sb); -+ verbose = !!au_opt_test(mnt_flags, VERBOSE); -+ bend = au_sbend(sb); -+ if (unlikely(!bend)) { -+ AuVerbose(verbose, "no more branches left\n"); -+ goto out; -+ } -+ br = au_sbr(sb, bindex); -+ AuDebugOn(!path_equal(&br->br_path, &del->h_path)); -+ -+ br_id = br->br_id; -+ opened = atomic_read(&br->br_count); -+ if (unlikely(opened)) { -+ to_free = au_array_alloc(&opened, empty_cb, NULL); -+ err = PTR_ERR(to_free); -+ if (IS_ERR(to_free)) -+ goto out; -+ -+ err = test_file_busy(sb, br_id, to_free, opened); -+ if (unlikely(err)) { -+ AuVerbose(verbose, "%llu file(s) opened\n", opened); -+ goto out; -+ } -+ } -+ -+ wbr = br->br_wbr; -+ do_wh = wbr && (wbr->wbr_whbase || wbr->wbr_plink || wbr->wbr_orph); -+ if (do_wh) { -+ /* instead of WbrWhMustWriteLock(wbr) */ -+ SiMustWriteLock(sb); -+ for (i = 0; i < AuBrWh_Last; i++) { -+ dput(wbr->wbr_wh[i]); -+ wbr->wbr_wh[i] = NULL; -+ } -+ } -+ -+ err = test_children_busy(root, bindex, verbose); -+ if (unlikely(err)) { -+ if (do_wh) -+ goto out_wh; -+ goto out; -+ } -+ -+ err = 0; -+ if (to_free) { -+ /* -+ * now we confirmed the branch is deletable. -+ * let's free the remaining opened dirs on the branch. -+ */ -+ di_write_unlock(root); -+ br_del_file(to_free, opened, br_id); -+ di_write_lock_child(root); -+ } -+ -+ if (!remount) -+ au_br_do_del(sb, bindex, br); -+ else { -+ sysaufs_brs_del(sb, bindex); -+ au_br_do_del(sb, bindex, br); -+ sysaufs_brs_add(sb, bindex); -+ } -+ -+ if (!bindex) { -+ au_cpup_attr_all(root->d_inode, /*force*/1); -+ sb->s_maxbytes = au_sbr_sb(sb, 0)->s_maxbytes; -+ } else -+ au_sub_nlink(root->d_inode, del->h_path.dentry->d_inode); -+ if (au_opt_test(mnt_flags, PLINK)) -+ au_plink_half_refresh(sb, br_id); -+ -+ if (au_xino_brid(sb) == br_id) -+ au_xino_brid_set(sb, -1); -+ goto out; /* success */ -+ -+out_wh: -+ /* revert */ -+ rerr = au_br_init_wh(sb, br, br->br_perm); -+ if (rerr) -+ pr_warn("failed re-creating base whiteout, %s. (%d)\n", -+ del->pathname, rerr); -+out: -+ if (to_free) -+ au_farray_free(to_free, opened); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int au_ibusy(struct super_block *sb, struct aufs_ibusy __user *arg) -+{ -+ int err; -+ aufs_bindex_t bstart, bend; -+ struct aufs_ibusy ibusy; -+ struct inode *inode, *h_inode; -+ -+ err = -EPERM; -+ if (unlikely(!capable(CAP_SYS_ADMIN))) -+ goto out; -+ -+ err = copy_from_user(&ibusy, arg, sizeof(ibusy)); -+ if (!err) -+ err = !access_ok(VERIFY_WRITE, &arg->h_ino, sizeof(arg->h_ino)); -+ if (unlikely(err)) { -+ err = -EFAULT; -+ AuTraceErr(err); -+ goto out; -+ } -+ -+ err = -EINVAL; -+ si_read_lock(sb, AuLock_FLUSH); -+ if (unlikely(ibusy.bindex < 0 || ibusy.bindex > au_sbend(sb))) -+ goto out_unlock; -+ -+ err = 0; -+ ibusy.h_ino = 0; /* invalid */ -+ inode = ilookup(sb, ibusy.ino); -+ if (!inode -+ || inode->i_ino == AUFS_ROOT_INO -+ || is_bad_inode(inode)) -+ goto out_unlock; -+ -+ ii_read_lock_child(inode); -+ bstart = au_ibstart(inode); -+ bend = au_ibend(inode); -+ if (bstart <= ibusy.bindex && ibusy.bindex <= bend) { -+ h_inode = au_h_iptr(inode, ibusy.bindex); -+ if (h_inode && au_test_ibusy(inode, bstart, bend)) -+ ibusy.h_ino = h_inode->i_ino; -+ } -+ ii_read_unlock(inode); -+ iput(inode); -+ -+out_unlock: -+ si_read_unlock(sb); -+ if (!err) { -+ err = __put_user(ibusy.h_ino, &arg->h_ino); -+ if (unlikely(err)) { -+ err = -EFAULT; -+ AuTraceErr(err); -+ } -+ } -+out: -+ return err; -+} -+ -+long au_ibusy_ioctl(struct file *file, unsigned long arg) -+{ -+ return au_ibusy(file->f_dentry->d_sb, (void __user *)arg); -+} -+ -+#ifdef CONFIG_COMPAT -+long au_ibusy_compat_ioctl(struct file *file, unsigned long arg) -+{ -+ return au_ibusy(file->f_dentry->d_sb, compat_ptr(arg)); -+} -+#endif -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * change a branch permission -+ */ -+ -+static void au_warn_ima(void) -+{ -+#ifdef CONFIG_IMA -+ /* since it doesn't support mark_files_ro() */ -+ AuWarn1("RW -> RO makes IMA to produce wrong message\n"); -+#endif -+} -+ -+static int do_need_sigen_inc(int a, int b) -+{ -+ return au_br_whable(a) && !au_br_whable(b); -+} -+ -+static int need_sigen_inc(int old, int new) -+{ -+ return do_need_sigen_inc(old, new) -+ || do_need_sigen_inc(new, old); -+} -+ -+static int au_br_mod_files_ro(struct super_block *sb, aufs_bindex_t bindex) -+{ -+ int err, do_warn; -+ unsigned int mnt_flags; -+ unsigned long long ull, max; -+ aufs_bindex_t br_id; -+ unsigned char verbose, writer; -+ struct file *file, *hf, **array; -+ struct inode *inode; -+ struct au_hfile *hfile; -+ -+ mnt_flags = au_mntflags(sb); -+ verbose = !!au_opt_test(mnt_flags, VERBOSE); -+ -+ array = au_farray_alloc(sb, &max); -+ err = PTR_ERR(array); -+ if (IS_ERR(array)) -+ goto out; -+ -+ do_warn = 0; -+ br_id = au_sbr_id(sb, bindex); -+ for (ull = 0; ull < max; ull++) { -+ file = array[ull]; -+ if (unlikely(!file)) -+ break; -+ -+ /* AuDbg("%pD\n", file); */ -+ fi_read_lock(file); -+ if (unlikely(au_test_mmapped(file))) { -+ err = -EBUSY; -+ AuVerbose(verbose, "mmapped %pD\n", file); -+ AuDbgFile(file); -+ FiMustNoWaiters(file); -+ fi_read_unlock(file); -+ goto out_array; -+ } -+ -+ inode = file_inode(file); -+ hfile = &au_fi(file)->fi_htop; -+ hf = hfile->hf_file; -+ if (!S_ISREG(inode->i_mode) -+ || !(file->f_mode & FMODE_WRITE) -+ || hfile->hf_br->br_id != br_id -+ || !(hf->f_mode & FMODE_WRITE)) -+ array[ull] = NULL; -+ else { -+ do_warn = 1; -+ get_file(file); -+ } -+ -+ FiMustNoWaiters(file); -+ fi_read_unlock(file); -+ fput(file); -+ } -+ -+ err = 0; -+ if (do_warn) -+ au_warn_ima(); -+ -+ for (ull = 0; ull < max; ull++) { -+ file = array[ull]; -+ if (!file) -+ continue; -+ -+ /* todo: already flushed? */ -+ /* -+ * fs/super.c:mark_files_ro() is gone, but aufs keeps its -+ * approach which resets f_mode and calls mnt_drop_write() and -+ * file_release_write() for each file, because the branch -+ * attribute in aufs world is totally different from the native -+ * fs rw/ro mode. -+ */ -+ /* fi_read_lock(file); */ -+ hfile = &au_fi(file)->fi_htop; -+ hf = hfile->hf_file; -+ /* fi_read_unlock(file); */ -+ spin_lock(&hf->f_lock); -+ writer = !!(hf->f_mode & FMODE_WRITER); -+ hf->f_mode &= ~(FMODE_WRITE | FMODE_WRITER); -+ spin_unlock(&hf->f_lock); -+ if (writer) { -+ put_write_access(file_inode(hf)); -+ __mnt_drop_write(hf->f_path.mnt); -+ } -+ } -+ -+out_array: -+ au_farray_free(array, max); -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+int au_br_mod(struct super_block *sb, struct au_opt_mod *mod, int remount, -+ int *do_refresh) -+{ -+ int err, rerr; -+ aufs_bindex_t bindex; -+ struct dentry *root; -+ struct au_branch *br; -+ struct au_br_fhsm *bf; -+ -+ root = sb->s_root; -+ bindex = au_find_dbindex(root, mod->h_root); -+ if (bindex < 0) { -+ if (remount) -+ return 0; /* success */ -+ err = -ENOENT; -+ pr_err("%s no such branch\n", mod->path); -+ goto out; -+ } -+ AuDbg("bindex b%d\n", bindex); -+ -+ err = test_br(mod->h_root->d_inode, mod->perm, mod->path); -+ if (unlikely(err)) -+ goto out; -+ -+ br = au_sbr(sb, bindex); -+ AuDebugOn(mod->h_root != au_br_dentry(br)); -+ if (br->br_perm == mod->perm) -+ return 0; /* success */ -+ -+ /* pre-allocate for non-fhsm --> fhsm */ -+ bf = NULL; -+ if (!au_br_fhsm(br->br_perm) && au_br_fhsm(mod->perm)) { -+ err = au_fhsm_br_alloc(br); -+ if (unlikely(err)) -+ goto out; -+ bf = br->br_fhsm; -+ br->br_fhsm = NULL; -+ } -+ -+ if (au_br_writable(br->br_perm)) { -+ /* remove whiteout base */ -+ err = au_br_init_wh(sb, br, mod->perm); -+ if (unlikely(err)) -+ goto out_bf; -+ -+ if (!au_br_writable(mod->perm)) { -+ /* rw --> ro, file might be mmapped */ -+ DiMustNoWaiters(root); -+ IiMustNoWaiters(root->d_inode); -+ di_write_unlock(root); -+ err = au_br_mod_files_ro(sb, bindex); -+ /* aufs_write_lock() calls ..._child() */ -+ di_write_lock_child(root); -+ -+ if (unlikely(err)) { -+ rerr = -ENOMEM; -+ br->br_wbr = kzalloc(sizeof(*br->br_wbr), -+ GFP_NOFS); -+ if (br->br_wbr) -+ rerr = au_wbr_init(br, sb, br->br_perm); -+ if (unlikely(rerr)) { -+ AuIOErr("nested error %d (%d)\n", -+ rerr, err); -+ br->br_perm = mod->perm; -+ } -+ } -+ } -+ } else if (au_br_writable(mod->perm)) { -+ /* ro --> rw */ -+ err = -ENOMEM; -+ br->br_wbr = kzalloc(sizeof(*br->br_wbr), GFP_NOFS); -+ if (br->br_wbr) { -+ err = au_wbr_init(br, sb, mod->perm); -+ if (unlikely(err)) { -+ kfree(br->br_wbr); -+ br->br_wbr = NULL; -+ } -+ } -+ } -+ if (unlikely(err)) -+ goto out_bf; -+ -+ if (au_br_fhsm(br->br_perm)) { -+ if (!au_br_fhsm(mod->perm)) { -+ /* fhsm --> non-fhsm */ -+ au_br_fhsm_fin(br->br_fhsm); -+ kfree(br->br_fhsm); -+ br->br_fhsm = NULL; -+ } -+ } else if (au_br_fhsm(mod->perm)) -+ /* non-fhsm --> fhsm */ -+ br->br_fhsm = bf; -+ -+ *do_refresh |= need_sigen_inc(br->br_perm, mod->perm); -+ br->br_perm = mod->perm; -+ goto out; /* success */ -+ -+out_bf: -+ kfree(bf); -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+int au_br_stfs(struct au_branch *br, struct aufs_stfs *stfs) -+{ -+ int err; -+ struct kstatfs kstfs; -+ -+ err = vfs_statfs(&br->br_path, &kstfs); -+ if (!err) { -+ stfs->f_blocks = kstfs.f_blocks; -+ stfs->f_bavail = kstfs.f_bavail; -+ stfs->f_files = kstfs.f_files; -+ stfs->f_ffree = kstfs.f_ffree; -+ } -+ -+ return err; -+} -diff --git a/fs/aufs/branch.h b/fs/aufs/branch.h -new file mode 100644 -index 0000000..6ae006e ---- /dev/null -+++ b/fs/aufs/branch.h -@@ -0,0 +1,279 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * branch filesystems and xino for them -+ */ -+ -+#ifndef __AUFS_BRANCH_H__ -+#define __AUFS_BRANCH_H__ -+ -+#ifdef __KERNEL__ -+ -+#include -+#include "dynop.h" -+#include "rwsem.h" -+#include "super.h" -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* a xino file */ -+struct au_xino_file { -+ struct file *xi_file; -+ struct mutex xi_nondir_mtx; -+ -+ /* todo: make xino files an array to support huge inode number */ -+ -+#ifdef CONFIG_DEBUG_FS -+ struct dentry *xi_dbgaufs; -+#endif -+}; -+ -+/* File-based Hierarchical Storage Management */ -+struct au_br_fhsm { -+#ifdef CONFIG_AUFS_FHSM -+ struct mutex bf_lock; -+ unsigned long bf_jiffy; -+ struct aufs_stfs bf_stfs; -+ int bf_readable; -+#endif -+}; -+ -+/* members for writable branch only */ -+enum {AuBrWh_BASE, AuBrWh_PLINK, AuBrWh_ORPH, AuBrWh_Last}; -+struct au_wbr { -+ struct au_rwsem wbr_wh_rwsem; -+ struct dentry *wbr_wh[AuBrWh_Last]; -+ atomic_t wbr_wh_running; -+#define wbr_whbase wbr_wh[AuBrWh_BASE] /* whiteout base */ -+#define wbr_plink wbr_wh[AuBrWh_PLINK] /* pseudo-link dir */ -+#define wbr_orph wbr_wh[AuBrWh_ORPH] /* dir for orphans */ -+ -+ /* mfs mode */ -+ unsigned long long wbr_bytes; -+}; -+ -+/* ext2 has 3 types of operations at least, ext3 has 4 */ -+#define AuBrDynOp (AuDyLast * 4) -+ -+#ifdef CONFIG_AUFS_HFSNOTIFY -+/* support for asynchronous destruction */ -+struct au_br_hfsnotify { -+ struct fsnotify_group *hfsn_group; -+}; -+#endif -+ -+/* sysfs entries */ -+struct au_brsysfs { -+ char name[16]; -+ struct attribute attr; -+}; -+ -+enum { -+ AuBrSysfs_BR, -+ AuBrSysfs_BRID, -+ AuBrSysfs_Last -+}; -+ -+/* protected by superblock rwsem */ -+struct au_branch { -+ struct au_xino_file br_xino; -+ -+ aufs_bindex_t br_id; -+ -+ int br_perm; -+ struct path br_path; -+ spinlock_t br_dykey_lock; -+ struct au_dykey *br_dykey[AuBrDynOp]; -+ atomic_t br_count; -+ -+ struct au_wbr *br_wbr; -+ struct au_br_fhsm *br_fhsm; -+ -+ /* xino truncation */ -+ atomic_t br_xino_running; -+ -+#ifdef CONFIG_AUFS_HFSNOTIFY -+ struct au_br_hfsnotify *br_hfsn; -+#endif -+ -+#ifdef CONFIG_SYSFS -+ /* entries under sysfs per mount-point */ -+ struct au_brsysfs br_sysfs[AuBrSysfs_Last]; -+#endif -+}; -+ -+/* ---------------------------------------------------------------------- */ -+ -+static inline struct vfsmount *au_br_mnt(struct au_branch *br) -+{ -+ return br->br_path.mnt; -+} -+ -+static inline struct dentry *au_br_dentry(struct au_branch *br) -+{ -+ return br->br_path.dentry; -+} -+ -+static inline struct super_block *au_br_sb(struct au_branch *br) -+{ -+ return au_br_mnt(br)->mnt_sb; -+} -+ -+static inline int au_br_rdonly(struct au_branch *br) -+{ -+ return ((au_br_sb(br)->s_flags & MS_RDONLY) -+ || !au_br_writable(br->br_perm)) -+ ? -EROFS : 0; -+} -+ -+static inline int au_br_hnotifyable(int brperm __maybe_unused) -+{ -+#ifdef CONFIG_AUFS_HNOTIFY -+ return !(brperm & AuBrPerm_RR); -+#else -+ return 0; -+#endif -+} -+ -+static inline int au_br_test_oflag(int oflag, struct au_branch *br) -+{ -+ int err, exec_flag; -+ -+ err = 0; -+ exec_flag = oflag & __FMODE_EXEC; -+ if (unlikely(exec_flag && (au_br_mnt(br)->mnt_flags & MNT_NOEXEC))) -+ err = -EACCES; -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* branch.c */ -+struct au_sbinfo; -+void au_br_free(struct au_sbinfo *sinfo); -+int au_br_index(struct super_block *sb, aufs_bindex_t br_id); -+struct au_opt_add; -+int au_br_add(struct super_block *sb, struct au_opt_add *add, int remount); -+struct au_opt_del; -+int au_br_del(struct super_block *sb, struct au_opt_del *del, int remount); -+long au_ibusy_ioctl(struct file *file, unsigned long arg); -+#ifdef CONFIG_COMPAT -+long au_ibusy_compat_ioctl(struct file *file, unsigned long arg); -+#endif -+struct au_opt_mod; -+int au_br_mod(struct super_block *sb, struct au_opt_mod *mod, int remount, -+ int *do_refresh); -+struct aufs_stfs; -+int au_br_stfs(struct au_branch *br, struct aufs_stfs *stfs); -+ -+/* xino.c */ -+static const loff_t au_loff_max = LLONG_MAX; -+ -+int au_xib_trunc(struct super_block *sb); -+ssize_t xino_fread(au_readf_t func, struct file *file, void *buf, size_t size, -+ loff_t *pos); -+ssize_t xino_fwrite(au_writef_t func, struct file *file, void *buf, size_t size, -+ loff_t *pos); -+struct file *au_xino_create2(struct file *base_file, struct file *copy_src); -+struct file *au_xino_create(struct super_block *sb, char *fname, int silent); -+ino_t au_xino_new_ino(struct super_block *sb); -+void au_xino_delete_inode(struct inode *inode, const int unlinked); -+int au_xino_write(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino, -+ ino_t ino); -+int au_xino_read(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino, -+ ino_t *ino); -+int au_xino_br(struct super_block *sb, struct au_branch *br, ino_t hino, -+ struct file *base_file, int do_test); -+int au_xino_trunc(struct super_block *sb, aufs_bindex_t bindex); -+ -+struct au_opt_xino; -+int au_xino_set(struct super_block *sb, struct au_opt_xino *xino, int remount); -+void au_xino_clr(struct super_block *sb); -+struct file *au_xino_def(struct super_block *sb); -+int au_xino_path(struct seq_file *seq, struct file *file); -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* Superblock to branch */ -+static inline -+aufs_bindex_t au_sbr_id(struct super_block *sb, aufs_bindex_t bindex) -+{ -+ return au_sbr(sb, bindex)->br_id; -+} -+ -+static inline -+struct vfsmount *au_sbr_mnt(struct super_block *sb, aufs_bindex_t bindex) -+{ -+ return au_br_mnt(au_sbr(sb, bindex)); -+} -+ -+static inline -+struct super_block *au_sbr_sb(struct super_block *sb, aufs_bindex_t bindex) -+{ -+ return au_br_sb(au_sbr(sb, bindex)); -+} -+ -+static inline void au_sbr_put(struct super_block *sb, aufs_bindex_t bindex) -+{ -+ atomic_dec(&au_sbr(sb, bindex)->br_count); -+} -+ -+static inline int au_sbr_perm(struct super_block *sb, aufs_bindex_t bindex) -+{ -+ return au_sbr(sb, bindex)->br_perm; -+} -+ -+static inline int au_sbr_whable(struct super_block *sb, aufs_bindex_t bindex) -+{ -+ return au_br_whable(au_sbr_perm(sb, bindex)); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * wbr_wh_read_lock, wbr_wh_write_lock -+ * wbr_wh_read_unlock, wbr_wh_write_unlock, wbr_wh_downgrade_lock -+ */ -+AuSimpleRwsemFuncs(wbr_wh, struct au_wbr *wbr, &wbr->wbr_wh_rwsem); -+ -+#define WbrWhMustNoWaiters(wbr) AuRwMustNoWaiters(&wbr->wbr_wh_rwsem) -+#define WbrWhMustAnyLock(wbr) AuRwMustAnyLock(&wbr->wbr_wh_rwsem) -+#define WbrWhMustWriteLock(wbr) AuRwMustWriteLock(&wbr->wbr_wh_rwsem) -+ -+/* ---------------------------------------------------------------------- */ -+ -+#ifdef CONFIG_AUFS_FHSM -+static inline void au_br_fhsm_init(struct au_br_fhsm *brfhsm) -+{ -+ mutex_init(&brfhsm->bf_lock); -+ brfhsm->bf_jiffy = 0; -+ brfhsm->bf_readable = 0; -+} -+ -+static inline void au_br_fhsm_fin(struct au_br_fhsm *brfhsm) -+{ -+ mutex_destroy(&brfhsm->bf_lock); -+} -+#else -+AuStubVoid(au_br_fhsm_init, struct au_br_fhsm *brfhsm) -+AuStubVoid(au_br_fhsm_fin, struct au_br_fhsm *brfhsm) -+#endif -+ -+#endif /* __KERNEL__ */ -+#endif /* __AUFS_BRANCH_H__ */ -diff --git a/fs/aufs/conf.mk b/fs/aufs/conf.mk -new file mode 100644 -index 0000000..0bbb2d3 ---- /dev/null -+++ b/fs/aufs/conf.mk -@@ -0,0 +1,38 @@ -+ -+AuConfStr = CONFIG_AUFS_FS=${CONFIG_AUFS_FS} -+ -+define AuConf -+ifdef ${1} -+AuConfStr += ${1}=${${1}} -+endif -+endef -+ -+AuConfAll = BRANCH_MAX_127 BRANCH_MAX_511 BRANCH_MAX_1023 BRANCH_MAX_32767 \ -+ SBILIST \ -+ HNOTIFY HFSNOTIFY \ -+ EXPORT INO_T_64 \ -+ XATTR \ -+ FHSM \ -+ RDU \ -+ SHWH \ -+ BR_RAMFS \ -+ BR_FUSE POLL \ -+ BR_HFSPLUS \ -+ BDEV_LOOP \ -+ DEBUG MAGIC_SYSRQ -+$(foreach i, ${AuConfAll}, \ -+ $(eval $(call AuConf,CONFIG_AUFS_${i}))) -+ -+AuConfName = ${obj}/conf.str -+${AuConfName}.tmp: FORCE -+ @echo ${AuConfStr} | tr ' ' '\n' | sed -e 's/^/"/' -e 's/$$/\\n"/' > $@ -+${AuConfName}: ${AuConfName}.tmp -+ @diff -q $< $@ > /dev/null 2>&1 || { \ -+ echo ' GEN ' $@; \ -+ cp -p $< $@; \ -+ } -+FORCE: -+clean-files += ${AuConfName} ${AuConfName}.tmp -+${obj}/sysfs.o: ${AuConfName} -+ -+-include ${srctree}/${src}/conf_priv.mk -diff --git a/fs/aufs/cpup.c b/fs/aufs/cpup.c -new file mode 100644 -index 0000000..9d8b767 ---- /dev/null -+++ b/fs/aufs/cpup.c -@@ -0,0 +1,1368 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * copy-up functions, see wbr_policy.c for copy-down -+ */ -+ -+#include -+#include -+#include -+#include "aufs.h" -+ -+void au_cpup_attr_flags(struct inode *dst, unsigned int iflags) -+{ -+ const unsigned int mask = S_DEAD | S_SWAPFILE | S_PRIVATE -+ | S_NOATIME | S_NOCMTIME | S_AUTOMOUNT; -+ -+ BUILD_BUG_ON(sizeof(iflags) != sizeof(dst->i_flags)); -+ -+ dst->i_flags |= iflags & ~mask; -+ if (au_test_fs_notime(dst->i_sb)) -+ dst->i_flags |= S_NOATIME | S_NOCMTIME; -+} -+ -+void au_cpup_attr_timesizes(struct inode *inode) -+{ -+ struct inode *h_inode; -+ -+ h_inode = au_h_iptr(inode, au_ibstart(inode)); -+ fsstack_copy_attr_times(inode, h_inode); -+ fsstack_copy_inode_size(inode, h_inode); -+} -+ -+void au_cpup_attr_nlink(struct inode *inode, int force) -+{ -+ struct inode *h_inode; -+ struct super_block *sb; -+ aufs_bindex_t bindex, bend; -+ -+ sb = inode->i_sb; -+ bindex = au_ibstart(inode); -+ h_inode = au_h_iptr(inode, bindex); -+ if (!force -+ && !S_ISDIR(h_inode->i_mode) -+ && au_opt_test(au_mntflags(sb), PLINK) -+ && au_plink_test(inode)) -+ return; -+ -+ /* -+ * 0 can happen in revalidating. -+ * h_inode->i_mutex may not be held here, but it is harmless since once -+ * i_nlink reaches 0, it will never become positive except O_TMPFILE -+ * case. -+ * todo: O_TMPFILE+linkat(AT_SYMLINK_FOLLOW) bypassing aufs may cause -+ * the incorrect link count. -+ */ -+ set_nlink(inode, h_inode->i_nlink); -+ -+ /* -+ * fewer nlink makes find(1) noisy, but larger nlink doesn't. -+ * it may includes whplink directory. -+ */ -+ if (S_ISDIR(h_inode->i_mode)) { -+ bend = au_ibend(inode); -+ for (bindex++; bindex <= bend; bindex++) { -+ h_inode = au_h_iptr(inode, bindex); -+ if (h_inode) -+ au_add_nlink(inode, h_inode); -+ } -+ } -+} -+ -+void au_cpup_attr_changeable(struct inode *inode) -+{ -+ struct inode *h_inode; -+ -+ h_inode = au_h_iptr(inode, au_ibstart(inode)); -+ inode->i_mode = h_inode->i_mode; -+ inode->i_uid = h_inode->i_uid; -+ inode->i_gid = h_inode->i_gid; -+ au_cpup_attr_timesizes(inode); -+ au_cpup_attr_flags(inode, h_inode->i_flags); -+} -+ -+void au_cpup_igen(struct inode *inode, struct inode *h_inode) -+{ -+ struct au_iinfo *iinfo = au_ii(inode); -+ -+ IiMustWriteLock(inode); -+ -+ iinfo->ii_higen = h_inode->i_generation; -+ iinfo->ii_hsb1 = h_inode->i_sb; -+} -+ -+void au_cpup_attr_all(struct inode *inode, int force) -+{ -+ struct inode *h_inode; -+ -+ h_inode = au_h_iptr(inode, au_ibstart(inode)); -+ au_cpup_attr_changeable(inode); -+ if (inode->i_nlink > 0) -+ au_cpup_attr_nlink(inode, force); -+ inode->i_rdev = h_inode->i_rdev; -+ inode->i_blkbits = h_inode->i_blkbits; -+ au_cpup_igen(inode, h_inode); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* Note: dt_dentry and dt_h_dentry are not dget/dput-ed */ -+ -+/* keep the timestamps of the parent dir when cpup */ -+void au_dtime_store(struct au_dtime *dt, struct dentry *dentry, -+ struct path *h_path) -+{ -+ struct inode *h_inode; -+ -+ dt->dt_dentry = dentry; -+ dt->dt_h_path = *h_path; -+ h_inode = h_path->dentry->d_inode; -+ dt->dt_atime = h_inode->i_atime; -+ dt->dt_mtime = h_inode->i_mtime; -+ /* smp_mb(); */ -+} -+ -+void au_dtime_revert(struct au_dtime *dt) -+{ -+ struct iattr attr; -+ int err; -+ -+ attr.ia_atime = dt->dt_atime; -+ attr.ia_mtime = dt->dt_mtime; -+ attr.ia_valid = ATTR_FORCE | ATTR_MTIME | ATTR_MTIME_SET -+ | ATTR_ATIME | ATTR_ATIME_SET; -+ -+ /* no delegation since this is a directory */ -+ err = vfsub_notify_change(&dt->dt_h_path, &attr, /*delegated*/NULL); -+ if (unlikely(err)) -+ pr_warn("restoring timestamps failed(%d). ignored\n", err); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* internal use only */ -+struct au_cpup_reg_attr { -+ int valid; -+ struct kstat st; -+ unsigned int iflags; /* inode->i_flags */ -+}; -+ -+static noinline_for_stack -+int cpup_iattr(struct dentry *dst, aufs_bindex_t bindex, struct dentry *h_src, -+ struct au_cpup_reg_attr *h_src_attr) -+{ -+ int err, sbits, icex; -+ unsigned int mnt_flags; -+ unsigned char verbose; -+ struct iattr ia; -+ struct path h_path; -+ struct inode *h_isrc, *h_idst; -+ struct kstat *h_st; -+ struct au_branch *br; -+ -+ h_path.dentry = au_h_dptr(dst, bindex); -+ h_idst = h_path.dentry->d_inode; -+ br = au_sbr(dst->d_sb, bindex); -+ h_path.mnt = au_br_mnt(br); -+ h_isrc = h_src->d_inode; -+ ia.ia_valid = ATTR_FORCE | ATTR_UID | ATTR_GID -+ | ATTR_ATIME | ATTR_MTIME -+ | ATTR_ATIME_SET | ATTR_MTIME_SET; -+ if (h_src_attr && h_src_attr->valid) { -+ h_st = &h_src_attr->st; -+ ia.ia_uid = h_st->uid; -+ ia.ia_gid = h_st->gid; -+ ia.ia_atime = h_st->atime; -+ ia.ia_mtime = h_st->mtime; -+ if (h_idst->i_mode != h_st->mode -+ && !S_ISLNK(h_idst->i_mode)) { -+ ia.ia_valid |= ATTR_MODE; -+ ia.ia_mode = h_st->mode; -+ } -+ sbits = !!(h_st->mode & (S_ISUID | S_ISGID)); -+ au_cpup_attr_flags(h_idst, h_src_attr->iflags); -+ } else { -+ ia.ia_uid = h_isrc->i_uid; -+ ia.ia_gid = h_isrc->i_gid; -+ ia.ia_atime = h_isrc->i_atime; -+ ia.ia_mtime = h_isrc->i_mtime; -+ if (h_idst->i_mode != h_isrc->i_mode -+ && !S_ISLNK(h_idst->i_mode)) { -+ ia.ia_valid |= ATTR_MODE; -+ ia.ia_mode = h_isrc->i_mode; -+ } -+ sbits = !!(h_isrc->i_mode & (S_ISUID | S_ISGID)); -+ au_cpup_attr_flags(h_idst, h_isrc->i_flags); -+ } -+ /* no delegation since it is just created */ -+ err = vfsub_notify_change(&h_path, &ia, /*delegated*/NULL); -+ -+ /* is this nfs only? */ -+ if (!err && sbits && au_test_nfs(h_path.dentry->d_sb)) { -+ ia.ia_valid = ATTR_FORCE | ATTR_MODE; -+ ia.ia_mode = h_isrc->i_mode; -+ err = vfsub_notify_change(&h_path, &ia, /*delegated*/NULL); -+ } -+ -+ icex = br->br_perm & AuBrAttr_ICEX; -+ if (!err) { -+ mnt_flags = au_mntflags(dst->d_sb); -+ verbose = !!au_opt_test(mnt_flags, VERBOSE); -+ err = au_cpup_xattr(h_path.dentry, h_src, icex, verbose); -+ } -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int au_do_copy_file(struct file *dst, struct file *src, loff_t len, -+ char *buf, unsigned long blksize) -+{ -+ int err; -+ size_t sz, rbytes, wbytes; -+ unsigned char all_zero; -+ char *p, *zp; -+ struct mutex *h_mtx; -+ /* reduce stack usage */ -+ struct iattr *ia; -+ -+ zp = page_address(ZERO_PAGE(0)); -+ if (unlikely(!zp)) -+ return -ENOMEM; /* possible? */ -+ -+ err = 0; -+ all_zero = 0; -+ while (len) { -+ AuDbg("len %lld\n", len); -+ sz = blksize; -+ if (len < blksize) -+ sz = len; -+ -+ rbytes = 0; -+ /* todo: signal_pending? */ -+ while (!rbytes || err == -EAGAIN || err == -EINTR) { -+ rbytes = vfsub_read_k(src, buf, sz, &src->f_pos); -+ err = rbytes; -+ } -+ if (unlikely(err < 0)) -+ break; -+ -+ all_zero = 0; -+ if (len >= rbytes && rbytes == blksize) -+ all_zero = !memcmp(buf, zp, rbytes); -+ if (!all_zero) { -+ wbytes = rbytes; -+ p = buf; -+ while (wbytes) { -+ size_t b; -+ -+ b = vfsub_write_k(dst, p, wbytes, &dst->f_pos); -+ err = b; -+ /* todo: signal_pending? */ -+ if (unlikely(err == -EAGAIN || err == -EINTR)) -+ continue; -+ if (unlikely(err < 0)) -+ break; -+ wbytes -= b; -+ p += b; -+ } -+ if (unlikely(err < 0)) -+ break; -+ } else { -+ loff_t res; -+ -+ AuLabel(hole); -+ res = vfsub_llseek(dst, rbytes, SEEK_CUR); -+ err = res; -+ if (unlikely(res < 0)) -+ break; -+ } -+ len -= rbytes; -+ err = 0; -+ } -+ -+ /* the last block may be a hole */ -+ if (!err && all_zero) { -+ AuLabel(last hole); -+ -+ err = 1; -+ if (au_test_nfs(dst->f_dentry->d_sb)) { -+ /* nfs requires this step to make last hole */ -+ /* is this only nfs? */ -+ do { -+ /* todo: signal_pending? */ -+ err = vfsub_write_k(dst, "\0", 1, &dst->f_pos); -+ } while (err == -EAGAIN || err == -EINTR); -+ if (err == 1) -+ dst->f_pos--; -+ } -+ -+ if (err == 1) { -+ ia = (void *)buf; -+ ia->ia_size = dst->f_pos; -+ ia->ia_valid = ATTR_SIZE | ATTR_FILE; -+ ia->ia_file = dst; -+ h_mtx = &file_inode(dst)->i_mutex; -+ mutex_lock_nested(h_mtx, AuLsc_I_CHILD2); -+ /* no delegation since it is just created */ -+ err = vfsub_notify_change(&dst->f_path, ia, -+ /*delegated*/NULL); -+ mutex_unlock(h_mtx); -+ } -+ } -+ -+ return err; -+} -+ -+int au_copy_file(struct file *dst, struct file *src, loff_t len) -+{ -+ int err; -+ unsigned long blksize; -+ unsigned char do_kfree; -+ char *buf; -+ -+ err = -ENOMEM; -+ blksize = dst->f_dentry->d_sb->s_blocksize; -+ if (!blksize || PAGE_SIZE < blksize) -+ blksize = PAGE_SIZE; -+ AuDbg("blksize %lu\n", blksize); -+ do_kfree = (blksize != PAGE_SIZE && blksize >= sizeof(struct iattr *)); -+ if (do_kfree) -+ buf = kmalloc(blksize, GFP_NOFS); -+ else -+ buf = (void *)__get_free_page(GFP_NOFS); -+ if (unlikely(!buf)) -+ goto out; -+ -+ if (len > (1 << 22)) -+ AuDbg("copying a large file %lld\n", (long long)len); -+ -+ src->f_pos = 0; -+ dst->f_pos = 0; -+ err = au_do_copy_file(dst, src, len, buf, blksize); -+ if (do_kfree) -+ kfree(buf); -+ else -+ free_page((unsigned long)buf); -+ -+out: -+ return err; -+} -+ -+/* -+ * to support a sparse file which is opened with O_APPEND, -+ * we need to close the file. -+ */ -+static int au_cp_regular(struct au_cp_generic *cpg) -+{ -+ int err, i; -+ enum { SRC, DST }; -+ struct { -+ aufs_bindex_t bindex; -+ unsigned int flags; -+ struct dentry *dentry; -+ int force_wr; -+ struct file *file; -+ void *label; -+ } *f, file[] = { -+ { -+ .bindex = cpg->bsrc, -+ .flags = O_RDONLY | O_NOATIME | O_LARGEFILE, -+ .label = &&out -+ }, -+ { -+ .bindex = cpg->bdst, -+ .flags = O_WRONLY | O_NOATIME | O_LARGEFILE, -+ .force_wr = !!au_ftest_cpup(cpg->flags, RWDST), -+ .label = &&out_src -+ } -+ }; -+ struct super_block *sb; -+ struct task_struct *tsk = current; -+ -+ /* bsrc branch can be ro/rw. */ -+ sb = cpg->dentry->d_sb; -+ f = file; -+ for (i = 0; i < 2; i++, f++) { -+ f->dentry = au_h_dptr(cpg->dentry, f->bindex); -+ f->file = au_h_open(cpg->dentry, f->bindex, f->flags, -+ /*file*/NULL, f->force_wr); -+ err = PTR_ERR(f->file); -+ if (IS_ERR(f->file)) -+ goto *f->label; -+ } -+ -+ /* try stopping to update while we copyup */ -+ IMustLock(file[SRC].dentry->d_inode); -+ err = au_copy_file(file[DST].file, file[SRC].file, cpg->len); -+ -+ /* i wonder if we had O_NO_DELAY_FPUT flag */ -+ if (tsk->flags & PF_KTHREAD) -+ __fput_sync(file[DST].file); -+ else { -+ WARN(1, "%pD\nPlease report this warning to aufs-users ML", -+ file[DST].file); -+ fput(file[DST].file); -+ /* -+ * too bad. -+ * we have to call both since we don't know which place the file -+ * was added to. -+ */ -+ task_work_run(); -+ flush_delayed_fput(); -+ } -+ au_sbr_put(sb, file[DST].bindex); -+ -+out_src: -+ fput(file[SRC].file); -+ au_sbr_put(sb, file[SRC].bindex); -+out: -+ return err; -+} -+ -+static int au_do_cpup_regular(struct au_cp_generic *cpg, -+ struct au_cpup_reg_attr *h_src_attr) -+{ -+ int err, rerr; -+ loff_t l; -+ struct path h_path; -+ struct inode *h_src_inode, *h_dst_inode; -+ -+ err = 0; -+ h_src_inode = au_h_iptr(cpg->dentry->d_inode, cpg->bsrc); -+ l = i_size_read(h_src_inode); -+ if (cpg->len == -1 || l < cpg->len) -+ cpg->len = l; -+ if (cpg->len) { -+ /* try stopping to update while we are referencing */ -+ mutex_lock_nested(&h_src_inode->i_mutex, AuLsc_I_CHILD); -+ au_pin_hdir_unlock(cpg->pin); -+ -+ h_path.dentry = au_h_dptr(cpg->dentry, cpg->bsrc); -+ h_path.mnt = au_sbr_mnt(cpg->dentry->d_sb, cpg->bsrc); -+ h_src_attr->iflags = h_src_inode->i_flags; -+ err = vfs_getattr(&h_path, &h_src_attr->st); -+ if (unlikely(err)) { -+ mutex_unlock(&h_src_inode->i_mutex); -+ goto out; -+ } -+ h_src_attr->valid = 1; -+ err = au_cp_regular(cpg); -+ mutex_unlock(&h_src_inode->i_mutex); -+ rerr = au_pin_hdir_relock(cpg->pin); -+ if (!err && rerr) -+ err = rerr; -+ } -+ if (!err && (h_src_inode->i_state & I_LINKABLE)) { -+ h_path.dentry = au_h_dptr(cpg->dentry, cpg->bdst); -+ h_dst_inode = h_path.dentry->d_inode; -+ spin_lock(&h_dst_inode->i_lock); -+ h_dst_inode->i_state |= I_LINKABLE; -+ spin_unlock(&h_dst_inode->i_lock); -+ } -+ -+out: -+ return err; -+} -+ -+static int au_do_cpup_symlink(struct path *h_path, struct dentry *h_src, -+ struct inode *h_dir) -+{ -+ int err, symlen; -+ mm_segment_t old_fs; -+ union { -+ char *k; -+ char __user *u; -+ } sym; -+ -+ err = -ENOSYS; -+ if (unlikely(!h_src->d_inode->i_op->readlink)) -+ goto out; -+ -+ err = -ENOMEM; -+ sym.k = (void *)__get_free_page(GFP_NOFS); -+ if (unlikely(!sym.k)) -+ goto out; -+ -+ /* unnecessary to support mmap_sem since symlink is not mmap-able */ -+ old_fs = get_fs(); -+ set_fs(KERNEL_DS); -+ symlen = h_src->d_inode->i_op->readlink(h_src, sym.u, PATH_MAX); -+ err = symlen; -+ set_fs(old_fs); -+ -+ if (symlen > 0) { -+ sym.k[symlen] = 0; -+ err = vfsub_symlink(h_dir, h_path, sym.k); -+ } -+ free_page((unsigned long)sym.k); -+ -+out: -+ return err; -+} -+ -+/* -+ * regardless 'acl' option, reset all ACL. -+ * All ACL will be copied up later from the original entry on the lower branch. -+ */ -+static int au_reset_acl(struct inode *h_dir, struct path *h_path, umode_t mode) -+{ -+ int err; -+ struct dentry *h_dentry; -+ struct inode *h_inode; -+ -+ h_dentry = h_path->dentry; -+ h_inode = h_dentry->d_inode; -+ /* forget_all_cached_acls(h_inode)); */ -+ err = vfsub_removexattr(h_dentry, XATTR_NAME_POSIX_ACL_ACCESS); -+ AuTraceErr(err); -+ if (err == -EOPNOTSUPP) -+ err = 0; -+ if (!err) -+ err = vfsub_acl_chmod(h_inode, mode); -+ -+ AuTraceErr(err); -+ return err; -+} -+ -+static int au_do_cpup_dir(struct au_cp_generic *cpg, struct dentry *dst_parent, -+ struct inode *h_dir, struct path *h_path) -+{ -+ int err; -+ struct inode *dir; -+ -+ err = vfsub_removexattr(h_path->dentry, XATTR_NAME_POSIX_ACL_DEFAULT); -+ AuTraceErr(err); -+ if (err == -EOPNOTSUPP) -+ err = 0; -+ if (unlikely(err)) -+ goto out; -+ -+ /* -+ * strange behaviour from the users view, -+ * particularry setattr case -+ */ -+ dir = dst_parent->d_inode; -+ if (au_ibstart(dir) == cpg->bdst) -+ au_cpup_attr_nlink(dir, /*force*/1); -+ au_cpup_attr_nlink(cpg->dentry->d_inode, /*force*/1); -+ -+out: -+ return err; -+} -+ -+static noinline_for_stack -+int cpup_entry(struct au_cp_generic *cpg, struct dentry *dst_parent, -+ struct au_cpup_reg_attr *h_src_attr) -+{ -+ int err; -+ umode_t mode; -+ unsigned int mnt_flags; -+ unsigned char isdir, isreg, force; -+ const unsigned char do_dt = !!au_ftest_cpup(cpg->flags, DTIME); -+ struct au_dtime dt; -+ struct path h_path; -+ struct dentry *h_src, *h_dst, *h_parent; -+ struct inode *h_inode, *h_dir; -+ struct super_block *sb; -+ -+ /* bsrc branch can be ro/rw. */ -+ h_src = au_h_dptr(cpg->dentry, cpg->bsrc); -+ h_inode = h_src->d_inode; -+ AuDebugOn(h_inode != au_h_iptr(cpg->dentry->d_inode, cpg->bsrc)); -+ -+ /* try stopping to be referenced while we are creating */ -+ h_dst = au_h_dptr(cpg->dentry, cpg->bdst); -+ if (au_ftest_cpup(cpg->flags, RENAME)) -+ AuDebugOn(strncmp(h_dst->d_name.name, AUFS_WH_PFX, -+ AUFS_WH_PFX_LEN)); -+ h_parent = h_dst->d_parent; /* dir inode is locked */ -+ h_dir = h_parent->d_inode; -+ IMustLock(h_dir); -+ AuDebugOn(h_parent != h_dst->d_parent); -+ -+ sb = cpg->dentry->d_sb; -+ h_path.mnt = au_sbr_mnt(sb, cpg->bdst); -+ if (do_dt) { -+ h_path.dentry = h_parent; -+ au_dtime_store(&dt, dst_parent, &h_path); -+ } -+ h_path.dentry = h_dst; -+ -+ isreg = 0; -+ isdir = 0; -+ mode = h_inode->i_mode; -+ switch (mode & S_IFMT) { -+ case S_IFREG: -+ isreg = 1; -+ err = vfsub_create(h_dir, &h_path, S_IRUSR | S_IWUSR, -+ /*want_excl*/true); -+ if (!err) -+ err = au_do_cpup_regular(cpg, h_src_attr); -+ break; -+ case S_IFDIR: -+ isdir = 1; -+ err = vfsub_mkdir(h_dir, &h_path, mode); -+ if (!err) -+ err = au_do_cpup_dir(cpg, dst_parent, h_dir, &h_path); -+ break; -+ case S_IFLNK: -+ err = au_do_cpup_symlink(&h_path, h_src, h_dir); -+ break; -+ case S_IFCHR: -+ case S_IFBLK: -+ AuDebugOn(!capable(CAP_MKNOD)); -+ /*FALLTHROUGH*/ -+ case S_IFIFO: -+ case S_IFSOCK: -+ err = vfsub_mknod(h_dir, &h_path, mode, h_inode->i_rdev); -+ break; -+ default: -+ AuIOErr("Unknown inode type 0%o\n", mode); -+ err = -EIO; -+ } -+ if (!err) -+ err = au_reset_acl(h_dir, &h_path, mode); -+ -+ mnt_flags = au_mntflags(sb); -+ if (!au_opt_test(mnt_flags, UDBA_NONE) -+ && !isdir -+ && au_opt_test(mnt_flags, XINO) -+ && (h_inode->i_nlink == 1 -+ || (h_inode->i_state & I_LINKABLE)) -+ /* todo: unnecessary? */ -+ /* && cpg->dentry->d_inode->i_nlink == 1 */ -+ && cpg->bdst < cpg->bsrc -+ && !au_ftest_cpup(cpg->flags, KEEPLINO)) -+ au_xino_write(sb, cpg->bsrc, h_inode->i_ino, /*ino*/0); -+ /* ignore this error */ -+ -+ if (!err) { -+ force = 0; -+ if (isreg) { -+ force = !!cpg->len; -+ if (cpg->len == -1) -+ force = !!i_size_read(h_inode); -+ } -+ au_fhsm_wrote(sb, cpg->bdst, force); -+ } -+ -+ if (do_dt) -+ au_dtime_revert(&dt); -+ return err; -+} -+ -+static int au_do_ren_after_cpup(struct au_cp_generic *cpg, struct path *h_path) -+{ -+ int err; -+ struct dentry *dentry, *h_dentry, *h_parent, *parent; -+ struct inode *h_dir; -+ aufs_bindex_t bdst; -+ -+ dentry = cpg->dentry; -+ bdst = cpg->bdst; -+ h_dentry = au_h_dptr(dentry, bdst); -+ if (!au_ftest_cpup(cpg->flags, OVERWRITE)) { -+ dget(h_dentry); -+ au_set_h_dptr(dentry, bdst, NULL); -+ err = au_lkup_neg(dentry, bdst, /*wh*/0); -+ if (!err) -+ h_path->dentry = dget(au_h_dptr(dentry, bdst)); -+ au_set_h_dptr(dentry, bdst, h_dentry); -+ } else { -+ err = 0; -+ parent = dget_parent(dentry); -+ h_parent = au_h_dptr(parent, bdst); -+ dput(parent); -+ h_path->dentry = vfsub_lkup_one(&dentry->d_name, h_parent); -+ if (IS_ERR(h_path->dentry)) -+ err = PTR_ERR(h_path->dentry); -+ } -+ if (unlikely(err)) -+ goto out; -+ -+ h_parent = h_dentry->d_parent; /* dir inode is locked */ -+ h_dir = h_parent->d_inode; -+ IMustLock(h_dir); -+ AuDbg("%pd %pd\n", h_dentry, h_path->dentry); -+ /* no delegation since it is just created */ -+ err = vfsub_rename(h_dir, h_dentry, h_dir, h_path, /*delegated*/NULL); -+ dput(h_path->dentry); -+ -+out: -+ return err; -+} -+ -+/* -+ * copyup the @dentry from @bsrc to @bdst. -+ * the caller must set the both of lower dentries. -+ * @len is for truncating when it is -1 copyup the entire file. -+ * in link/rename cases, @dst_parent may be different from the real one. -+ * basic->bsrc can be larger than basic->bdst. -+ */ -+static int au_cpup_single(struct au_cp_generic *cpg, struct dentry *dst_parent) -+{ -+ int err, rerr; -+ aufs_bindex_t old_ibstart; -+ unsigned char isdir, plink; -+ struct dentry *h_src, *h_dst, *h_parent; -+ struct inode *dst_inode, *h_dir, *inode, *delegated; -+ struct super_block *sb; -+ struct au_branch *br; -+ /* to reuduce stack size */ -+ struct { -+ struct au_dtime dt; -+ struct path h_path; -+ struct au_cpup_reg_attr h_src_attr; -+ } *a; -+ -+ err = -ENOMEM; -+ a = kmalloc(sizeof(*a), GFP_NOFS); -+ if (unlikely(!a)) -+ goto out; -+ a->h_src_attr.valid = 0; -+ -+ sb = cpg->dentry->d_sb; -+ br = au_sbr(sb, cpg->bdst); -+ a->h_path.mnt = au_br_mnt(br); -+ h_dst = au_h_dptr(cpg->dentry, cpg->bdst); -+ h_parent = h_dst->d_parent; /* dir inode is locked */ -+ h_dir = h_parent->d_inode; -+ IMustLock(h_dir); -+ -+ h_src = au_h_dptr(cpg->dentry, cpg->bsrc); -+ inode = cpg->dentry->d_inode; -+ -+ if (!dst_parent) -+ dst_parent = dget_parent(cpg->dentry); -+ else -+ dget(dst_parent); -+ -+ plink = !!au_opt_test(au_mntflags(sb), PLINK); -+ dst_inode = au_h_iptr(inode, cpg->bdst); -+ if (dst_inode) { -+ if (unlikely(!plink)) { -+ err = -EIO; -+ AuIOErr("hi%lu(i%lu) exists on b%d " -+ "but plink is disabled\n", -+ dst_inode->i_ino, inode->i_ino, cpg->bdst); -+ goto out_parent; -+ } -+ -+ if (dst_inode->i_nlink) { -+ const int do_dt = au_ftest_cpup(cpg->flags, DTIME); -+ -+ h_src = au_plink_lkup(inode, cpg->bdst); -+ err = PTR_ERR(h_src); -+ if (IS_ERR(h_src)) -+ goto out_parent; -+ if (unlikely(!h_src->d_inode)) { -+ err = -EIO; -+ AuIOErr("i%lu exists on b%d " -+ "but not pseudo-linked\n", -+ inode->i_ino, cpg->bdst); -+ dput(h_src); -+ goto out_parent; -+ } -+ -+ if (do_dt) { -+ a->h_path.dentry = h_parent; -+ au_dtime_store(&a->dt, dst_parent, &a->h_path); -+ } -+ -+ a->h_path.dentry = h_dst; -+ delegated = NULL; -+ err = vfsub_link(h_src, h_dir, &a->h_path, &delegated); -+ if (!err && au_ftest_cpup(cpg->flags, RENAME)) -+ err = au_do_ren_after_cpup(cpg, &a->h_path); -+ if (do_dt) -+ au_dtime_revert(&a->dt); -+ if (unlikely(err == -EWOULDBLOCK)) { -+ pr_warn("cannot retry for NFSv4 delegation" -+ " for an internal link\n"); -+ iput(delegated); -+ } -+ dput(h_src); -+ goto out_parent; -+ } else -+ /* todo: cpup_wh_file? */ -+ /* udba work */ -+ au_update_ibrange(inode, /*do_put_zero*/1); -+ } -+ -+ isdir = S_ISDIR(inode->i_mode); -+ old_ibstart = au_ibstart(inode); -+ err = cpup_entry(cpg, dst_parent, &a->h_src_attr); -+ if (unlikely(err)) -+ goto out_rev; -+ dst_inode = h_dst->d_inode; -+ mutex_lock_nested(&dst_inode->i_mutex, AuLsc_I_CHILD2); -+ /* todo: necessary? */ -+ /* au_pin_hdir_unlock(cpg->pin); */ -+ -+ err = cpup_iattr(cpg->dentry, cpg->bdst, h_src, &a->h_src_attr); -+ if (unlikely(err)) { -+ /* todo: necessary? */ -+ /* au_pin_hdir_relock(cpg->pin); */ /* ignore an error */ -+ mutex_unlock(&dst_inode->i_mutex); -+ goto out_rev; -+ } -+ -+ if (cpg->bdst < old_ibstart) { -+ if (S_ISREG(inode->i_mode)) { -+ err = au_dy_iaop(inode, cpg->bdst, dst_inode); -+ if (unlikely(err)) { -+ /* ignore an error */ -+ /* au_pin_hdir_relock(cpg->pin); */ -+ mutex_unlock(&dst_inode->i_mutex); -+ goto out_rev; -+ } -+ } -+ au_set_ibstart(inode, cpg->bdst); -+ } else -+ au_set_ibend(inode, cpg->bdst); -+ au_set_h_iptr(inode, cpg->bdst, au_igrab(dst_inode), -+ au_hi_flags(inode, isdir)); -+ -+ /* todo: necessary? */ -+ /* err = au_pin_hdir_relock(cpg->pin); */ -+ mutex_unlock(&dst_inode->i_mutex); -+ if (unlikely(err)) -+ goto out_rev; -+ -+ if (!isdir -+ && (h_src->d_inode->i_nlink > 1 -+ || h_src->d_inode->i_state & I_LINKABLE) -+ && plink) -+ au_plink_append(inode, cpg->bdst, h_dst); -+ -+ if (au_ftest_cpup(cpg->flags, RENAME)) { -+ a->h_path.dentry = h_dst; -+ err = au_do_ren_after_cpup(cpg, &a->h_path); -+ } -+ if (!err) -+ goto out_parent; /* success */ -+ -+ /* revert */ -+out_rev: -+ a->h_path.dentry = h_parent; -+ au_dtime_store(&a->dt, dst_parent, &a->h_path); -+ a->h_path.dentry = h_dst; -+ rerr = 0; -+ if (h_dst->d_inode) { -+ if (!isdir) { -+ /* no delegation since it is just created */ -+ rerr = vfsub_unlink(h_dir, &a->h_path, -+ /*delegated*/NULL, /*force*/0); -+ } else -+ rerr = vfsub_rmdir(h_dir, &a->h_path); -+ } -+ au_dtime_revert(&a->dt); -+ if (rerr) { -+ AuIOErr("failed removing broken entry(%d, %d)\n", err, rerr); -+ err = -EIO; -+ } -+out_parent: -+ dput(dst_parent); -+ kfree(a); -+out: -+ return err; -+} -+ -+#if 0 /* reserved */ -+struct au_cpup_single_args { -+ int *errp; -+ struct au_cp_generic *cpg; -+ struct dentry *dst_parent; -+}; -+ -+static void au_call_cpup_single(void *args) -+{ -+ struct au_cpup_single_args *a = args; -+ -+ au_pin_hdir_acquire_nest(a->cpg->pin); -+ *a->errp = au_cpup_single(a->cpg, a->dst_parent); -+ au_pin_hdir_release(a->cpg->pin); -+} -+#endif -+ -+/* -+ * prevent SIGXFSZ in copy-up. -+ * testing CAP_MKNOD is for generic fs, -+ * but CAP_FSETID is for xfs only, currently. -+ */ -+static int au_cpup_sio_test(struct au_pin *pin, umode_t mode) -+{ -+ int do_sio; -+ struct super_block *sb; -+ struct inode *h_dir; -+ -+ do_sio = 0; -+ sb = au_pinned_parent(pin)->d_sb; -+ if (!au_wkq_test() -+ && (!au_sbi(sb)->si_plink_maint_pid -+ || au_plink_maint(sb, AuLock_NOPLM))) { -+ switch (mode & S_IFMT) { -+ case S_IFREG: -+ /* no condition about RLIMIT_FSIZE and the file size */ -+ do_sio = 1; -+ break; -+ case S_IFCHR: -+ case S_IFBLK: -+ do_sio = !capable(CAP_MKNOD); -+ break; -+ } -+ if (!do_sio) -+ do_sio = ((mode & (S_ISUID | S_ISGID)) -+ && !capable(CAP_FSETID)); -+ /* this workaround may be removed in the future */ -+ if (!do_sio) { -+ h_dir = au_pinned_h_dir(pin); -+ do_sio = h_dir->i_mode & S_ISVTX; -+ } -+ } -+ -+ return do_sio; -+} -+ -+#if 0 /* reserved */ -+int au_sio_cpup_single(struct au_cp_generic *cpg, struct dentry *dst_parent) -+{ -+ int err, wkq_err; -+ struct dentry *h_dentry; -+ -+ h_dentry = au_h_dptr(cpg->dentry, cpg->bsrc); -+ if (!au_cpup_sio_test(pin, h_dentry->d_inode->i_mode)) -+ err = au_cpup_single(cpg, dst_parent); -+ else { -+ struct au_cpup_single_args args = { -+ .errp = &err, -+ .cpg = cpg, -+ .dst_parent = dst_parent -+ }; -+ wkq_err = au_wkq_wait(au_call_cpup_single, &args); -+ if (unlikely(wkq_err)) -+ err = wkq_err; -+ } -+ -+ return err; -+} -+#endif -+ -+/* -+ * copyup the @dentry from the first active lower branch to @bdst, -+ * using au_cpup_single(). -+ */ -+static int au_cpup_simple(struct au_cp_generic *cpg) -+{ -+ int err; -+ unsigned int flags_orig; -+ struct dentry *dentry; -+ -+ AuDebugOn(cpg->bsrc < 0); -+ -+ dentry = cpg->dentry; -+ DiMustWriteLock(dentry); -+ -+ err = au_lkup_neg(dentry, cpg->bdst, /*wh*/1); -+ if (!err) { -+ flags_orig = cpg->flags; -+ au_fset_cpup(cpg->flags, RENAME); -+ err = au_cpup_single(cpg, NULL); -+ cpg->flags = flags_orig; -+ if (!err) -+ return 0; /* success */ -+ -+ /* revert */ -+ au_set_h_dptr(dentry, cpg->bdst, NULL); -+ au_set_dbstart(dentry, cpg->bsrc); -+ } -+ -+ return err; -+} -+ -+struct au_cpup_simple_args { -+ int *errp; -+ struct au_cp_generic *cpg; -+}; -+ -+static void au_call_cpup_simple(void *args) -+{ -+ struct au_cpup_simple_args *a = args; -+ -+ au_pin_hdir_acquire_nest(a->cpg->pin); -+ *a->errp = au_cpup_simple(a->cpg); -+ au_pin_hdir_release(a->cpg->pin); -+} -+ -+static int au_do_sio_cpup_simple(struct au_cp_generic *cpg) -+{ -+ int err, wkq_err; -+ struct dentry *dentry, *parent; -+ struct file *h_file; -+ struct inode *h_dir; -+ -+ dentry = cpg->dentry; -+ h_file = NULL; -+ if (au_ftest_cpup(cpg->flags, HOPEN)) { -+ AuDebugOn(cpg->bsrc < 0); -+ h_file = au_h_open_pre(dentry, cpg->bsrc, /*force_wr*/0); -+ err = PTR_ERR(h_file); -+ if (IS_ERR(h_file)) -+ goto out; -+ } -+ -+ parent = dget_parent(dentry); -+ h_dir = au_h_iptr(parent->d_inode, cpg->bdst); -+ if (!au_test_h_perm_sio(h_dir, MAY_EXEC | MAY_WRITE) -+ && !au_cpup_sio_test(cpg->pin, dentry->d_inode->i_mode)) -+ err = au_cpup_simple(cpg); -+ else { -+ struct au_cpup_simple_args args = { -+ .errp = &err, -+ .cpg = cpg -+ }; -+ wkq_err = au_wkq_wait(au_call_cpup_simple, &args); -+ if (unlikely(wkq_err)) -+ err = wkq_err; -+ } -+ -+ dput(parent); -+ if (h_file) -+ au_h_open_post(dentry, cpg->bsrc, h_file); -+ -+out: -+ return err; -+} -+ -+int au_sio_cpup_simple(struct au_cp_generic *cpg) -+{ -+ aufs_bindex_t bsrc, bend; -+ struct dentry *dentry, *h_dentry; -+ -+ if (cpg->bsrc < 0) { -+ dentry = cpg->dentry; -+ bend = au_dbend(dentry); -+ for (bsrc = cpg->bdst + 1; bsrc <= bend; bsrc++) { -+ h_dentry = au_h_dptr(dentry, bsrc); -+ if (h_dentry) { -+ AuDebugOn(!h_dentry->d_inode); -+ break; -+ } -+ } -+ AuDebugOn(bsrc > bend); -+ cpg->bsrc = bsrc; -+ } -+ AuDebugOn(cpg->bsrc <= cpg->bdst); -+ return au_do_sio_cpup_simple(cpg); -+} -+ -+int au_sio_cpdown_simple(struct au_cp_generic *cpg) -+{ -+ AuDebugOn(cpg->bdst <= cpg->bsrc); -+ return au_do_sio_cpup_simple(cpg); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * copyup the deleted file for writing. -+ */ -+static int au_do_cpup_wh(struct au_cp_generic *cpg, struct dentry *wh_dentry, -+ struct file *file) -+{ -+ int err; -+ unsigned int flags_orig; -+ aufs_bindex_t bsrc_orig; -+ struct dentry *h_d_dst, *h_d_start; -+ struct au_dinfo *dinfo; -+ struct au_hdentry *hdp; -+ -+ dinfo = au_di(cpg->dentry); -+ AuRwMustWriteLock(&dinfo->di_rwsem); -+ -+ bsrc_orig = cpg->bsrc; -+ cpg->bsrc = dinfo->di_bstart; -+ hdp = dinfo->di_hdentry; -+ h_d_dst = hdp[0 + cpg->bdst].hd_dentry; -+ dinfo->di_bstart = cpg->bdst; -+ hdp[0 + cpg->bdst].hd_dentry = wh_dentry; -+ h_d_start = NULL; -+ if (file) { -+ h_d_start = hdp[0 + cpg->bsrc].hd_dentry; -+ hdp[0 + cpg->bsrc].hd_dentry = au_hf_top(file)->f_dentry; -+ } -+ flags_orig = cpg->flags; -+ cpg->flags = !AuCpup_DTIME; -+ err = au_cpup_single(cpg, /*h_parent*/NULL); -+ cpg->flags = flags_orig; -+ if (file) { -+ if (!err) -+ err = au_reopen_nondir(file); -+ hdp[0 + cpg->bsrc].hd_dentry = h_d_start; -+ } -+ hdp[0 + cpg->bdst].hd_dentry = h_d_dst; -+ dinfo->di_bstart = cpg->bsrc; -+ cpg->bsrc = bsrc_orig; -+ -+ return err; -+} -+ -+static int au_cpup_wh(struct au_cp_generic *cpg, struct file *file) -+{ -+ int err; -+ aufs_bindex_t bdst; -+ struct au_dtime dt; -+ struct dentry *dentry, *parent, *h_parent, *wh_dentry; -+ struct au_branch *br; -+ struct path h_path; -+ -+ dentry = cpg->dentry; -+ bdst = cpg->bdst; -+ br = au_sbr(dentry->d_sb, bdst); -+ parent = dget_parent(dentry); -+ h_parent = au_h_dptr(parent, bdst); -+ wh_dentry = au_whtmp_lkup(h_parent, br, &dentry->d_name); -+ err = PTR_ERR(wh_dentry); -+ if (IS_ERR(wh_dentry)) -+ goto out; -+ -+ h_path.dentry = h_parent; -+ h_path.mnt = au_br_mnt(br); -+ au_dtime_store(&dt, parent, &h_path); -+ err = au_do_cpup_wh(cpg, wh_dentry, file); -+ if (unlikely(err)) -+ goto out_wh; -+ -+ dget(wh_dentry); -+ h_path.dentry = wh_dentry; -+ if (!d_is_dir(wh_dentry)) { -+ /* no delegation since it is just created */ -+ err = vfsub_unlink(h_parent->d_inode, &h_path, -+ /*delegated*/NULL, /*force*/0); -+ } else -+ err = vfsub_rmdir(h_parent->d_inode, &h_path); -+ if (unlikely(err)) { -+ AuIOErr("failed remove copied-up tmp file %pd(%d)\n", -+ wh_dentry, err); -+ err = -EIO; -+ } -+ au_dtime_revert(&dt); -+ au_set_hi_wh(dentry->d_inode, bdst, wh_dentry); -+ -+out_wh: -+ dput(wh_dentry); -+out: -+ dput(parent); -+ return err; -+} -+ -+struct au_cpup_wh_args { -+ int *errp; -+ struct au_cp_generic *cpg; -+ struct file *file; -+}; -+ -+static void au_call_cpup_wh(void *args) -+{ -+ struct au_cpup_wh_args *a = args; -+ -+ au_pin_hdir_acquire_nest(a->cpg->pin); -+ *a->errp = au_cpup_wh(a->cpg, a->file); -+ au_pin_hdir_release(a->cpg->pin); -+} -+ -+int au_sio_cpup_wh(struct au_cp_generic *cpg, struct file *file) -+{ -+ int err, wkq_err; -+ aufs_bindex_t bdst; -+ struct dentry *dentry, *parent, *h_orph, *h_parent; -+ struct inode *dir, *h_dir, *h_tmpdir; -+ struct au_wbr *wbr; -+ struct au_pin wh_pin, *pin_orig; -+ -+ dentry = cpg->dentry; -+ bdst = cpg->bdst; -+ parent = dget_parent(dentry); -+ dir = parent->d_inode; -+ h_orph = NULL; -+ h_parent = NULL; -+ h_dir = au_igrab(au_h_iptr(dir, bdst)); -+ h_tmpdir = h_dir; -+ pin_orig = NULL; -+ if (!h_dir->i_nlink) { -+ wbr = au_sbr(dentry->d_sb, bdst)->br_wbr; -+ h_orph = wbr->wbr_orph; -+ -+ h_parent = dget(au_h_dptr(parent, bdst)); -+ au_set_h_dptr(parent, bdst, dget(h_orph)); -+ h_tmpdir = h_orph->d_inode; -+ au_set_h_iptr(dir, bdst, au_igrab(h_tmpdir), /*flags*/0); -+ -+ mutex_lock_nested(&h_tmpdir->i_mutex, AuLsc_I_PARENT3); -+ /* todo: au_h_open_pre()? */ -+ -+ pin_orig = cpg->pin; -+ au_pin_init(&wh_pin, dentry, bdst, AuLsc_DI_PARENT, -+ AuLsc_I_PARENT3, cpg->pin->udba, AuPin_DI_LOCKED); -+ cpg->pin = &wh_pin; -+ } -+ -+ if (!au_test_h_perm_sio(h_tmpdir, MAY_EXEC | MAY_WRITE) -+ && !au_cpup_sio_test(cpg->pin, dentry->d_inode->i_mode)) -+ err = au_cpup_wh(cpg, file); -+ else { -+ struct au_cpup_wh_args args = { -+ .errp = &err, -+ .cpg = cpg, -+ .file = file -+ }; -+ wkq_err = au_wkq_wait(au_call_cpup_wh, &args); -+ if (unlikely(wkq_err)) -+ err = wkq_err; -+ } -+ -+ if (h_orph) { -+ mutex_unlock(&h_tmpdir->i_mutex); -+ /* todo: au_h_open_post()? */ -+ au_set_h_iptr(dir, bdst, au_igrab(h_dir), /*flags*/0); -+ au_set_h_dptr(parent, bdst, h_parent); -+ AuDebugOn(!pin_orig); -+ cpg->pin = pin_orig; -+ } -+ iput(h_dir); -+ dput(parent); -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * generic routine for both of copy-up and copy-down. -+ */ -+/* cf. revalidate function in file.c */ -+int au_cp_dirs(struct dentry *dentry, aufs_bindex_t bdst, -+ int (*cp)(struct dentry *dentry, aufs_bindex_t bdst, -+ struct au_pin *pin, -+ struct dentry *h_parent, void *arg), -+ void *arg) -+{ -+ int err; -+ struct au_pin pin; -+ struct dentry *d, *parent, *h_parent, *real_parent; -+ -+ err = 0; -+ parent = dget_parent(dentry); -+ if (IS_ROOT(parent)) -+ goto out; -+ -+ au_pin_init(&pin, dentry, bdst, AuLsc_DI_PARENT2, AuLsc_I_PARENT2, -+ au_opt_udba(dentry->d_sb), AuPin_MNT_WRITE); -+ -+ /* do not use au_dpage */ -+ real_parent = parent; -+ while (1) { -+ dput(parent); -+ parent = dget_parent(dentry); -+ h_parent = au_h_dptr(parent, bdst); -+ if (h_parent) -+ goto out; /* success */ -+ -+ /* find top dir which is necessary to cpup */ -+ do { -+ d = parent; -+ dput(parent); -+ parent = dget_parent(d); -+ di_read_lock_parent3(parent, !AuLock_IR); -+ h_parent = au_h_dptr(parent, bdst); -+ di_read_unlock(parent, !AuLock_IR); -+ } while (!h_parent); -+ -+ if (d != real_parent) -+ di_write_lock_child3(d); -+ -+ /* somebody else might create while we were sleeping */ -+ if (!au_h_dptr(d, bdst) || !au_h_dptr(d, bdst)->d_inode) { -+ if (au_h_dptr(d, bdst)) -+ au_update_dbstart(d); -+ -+ au_pin_set_dentry(&pin, d); -+ err = au_do_pin(&pin); -+ if (!err) { -+ err = cp(d, bdst, &pin, h_parent, arg); -+ au_unpin(&pin); -+ } -+ } -+ -+ if (d != real_parent) -+ di_write_unlock(d); -+ if (unlikely(err)) -+ break; -+ } -+ -+out: -+ dput(parent); -+ return err; -+} -+ -+static int au_cpup_dir(struct dentry *dentry, aufs_bindex_t bdst, -+ struct au_pin *pin, -+ struct dentry *h_parent __maybe_unused, -+ void *arg __maybe_unused) -+{ -+ struct au_cp_generic cpg = { -+ .dentry = dentry, -+ .bdst = bdst, -+ .bsrc = -1, -+ .len = 0, -+ .pin = pin, -+ .flags = AuCpup_DTIME -+ }; -+ return au_sio_cpup_simple(&cpg); -+} -+ -+int au_cpup_dirs(struct dentry *dentry, aufs_bindex_t bdst) -+{ -+ return au_cp_dirs(dentry, bdst, au_cpup_dir, NULL); -+} -+ -+int au_test_and_cpup_dirs(struct dentry *dentry, aufs_bindex_t bdst) -+{ -+ int err; -+ struct dentry *parent; -+ struct inode *dir; -+ -+ parent = dget_parent(dentry); -+ dir = parent->d_inode; -+ err = 0; -+ if (au_h_iptr(dir, bdst)) -+ goto out; -+ -+ di_read_unlock(parent, AuLock_IR); -+ di_write_lock_parent(parent); -+ /* someone else might change our inode while we were sleeping */ -+ if (!au_h_iptr(dir, bdst)) -+ err = au_cpup_dirs(dentry, bdst); -+ di_downgrade_lock(parent, AuLock_IR); -+ -+out: -+ dput(parent); -+ return err; -+} -diff --git a/fs/aufs/cpup.h b/fs/aufs/cpup.h -new file mode 100644 -index 0000000..7721429 ---- /dev/null -+++ b/fs/aufs/cpup.h -@@ -0,0 +1,94 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * copy-up/down functions -+ */ -+ -+#ifndef __AUFS_CPUP_H__ -+#define __AUFS_CPUP_H__ -+ -+#ifdef __KERNEL__ -+ -+#include -+ -+struct inode; -+struct file; -+struct au_pin; -+ -+void au_cpup_attr_flags(struct inode *dst, unsigned int iflags); -+void au_cpup_attr_timesizes(struct inode *inode); -+void au_cpup_attr_nlink(struct inode *inode, int force); -+void au_cpup_attr_changeable(struct inode *inode); -+void au_cpup_igen(struct inode *inode, struct inode *h_inode); -+void au_cpup_attr_all(struct inode *inode, int force); -+ -+/* ---------------------------------------------------------------------- */ -+ -+struct au_cp_generic { -+ struct dentry *dentry; -+ aufs_bindex_t bdst, bsrc; -+ loff_t len; -+ struct au_pin *pin; -+ unsigned int flags; -+}; -+ -+/* cpup flags */ -+#define AuCpup_DTIME 1 /* do dtime_store/revert */ -+#define AuCpup_KEEPLINO (1 << 1) /* do not clear the lower xino, -+ for link(2) */ -+#define AuCpup_RENAME (1 << 2) /* rename after cpup */ -+#define AuCpup_HOPEN (1 << 3) /* call h_open_pre/post() in -+ cpup */ -+#define AuCpup_OVERWRITE (1 << 4) /* allow overwriting the -+ existing entry */ -+#define AuCpup_RWDST (1 << 5) /* force write target even if -+ the branch is marked as RO */ -+ -+#define au_ftest_cpup(flags, name) ((flags) & AuCpup_##name) -+#define au_fset_cpup(flags, name) \ -+ do { (flags) |= AuCpup_##name; } while (0) -+#define au_fclr_cpup(flags, name) \ -+ do { (flags) &= ~AuCpup_##name; } while (0) -+ -+int au_copy_file(struct file *dst, struct file *src, loff_t len); -+int au_sio_cpup_simple(struct au_cp_generic *cpg); -+int au_sio_cpdown_simple(struct au_cp_generic *cpg); -+int au_sio_cpup_wh(struct au_cp_generic *cpg, struct file *file); -+ -+int au_cp_dirs(struct dentry *dentry, aufs_bindex_t bdst, -+ int (*cp)(struct dentry *dentry, aufs_bindex_t bdst, -+ struct au_pin *pin, -+ struct dentry *h_parent, void *arg), -+ void *arg); -+int au_cpup_dirs(struct dentry *dentry, aufs_bindex_t bdst); -+int au_test_and_cpup_dirs(struct dentry *dentry, aufs_bindex_t bdst); -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* keep timestamps when copyup */ -+struct au_dtime { -+ struct dentry *dt_dentry; -+ struct path dt_h_path; -+ struct timespec dt_atime, dt_mtime; -+}; -+void au_dtime_store(struct au_dtime *dt, struct dentry *dentry, -+ struct path *h_path); -+void au_dtime_revert(struct au_dtime *dt); -+ -+#endif /* __KERNEL__ */ -+#endif /* __AUFS_CPUP_H__ */ -diff --git a/fs/aufs/dbgaufs.c b/fs/aufs/dbgaufs.c -new file mode 100644 -index 0000000..b4fdc25 ---- /dev/null -+++ b/fs/aufs/dbgaufs.c -@@ -0,0 +1,432 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * debugfs interface -+ */ -+ -+#include -+#include "aufs.h" -+ -+#ifndef CONFIG_SYSFS -+#error DEBUG_FS depends upon SYSFS -+#endif -+ -+static struct dentry *dbgaufs; -+static const mode_t dbgaufs_mode = S_IRUSR | S_IRGRP | S_IROTH; -+ -+/* 20 is max digits length of ulong 64 */ -+struct dbgaufs_arg { -+ int n; -+ char a[20 * 4]; -+}; -+ -+/* -+ * common function for all XINO files -+ */ -+static int dbgaufs_xi_release(struct inode *inode __maybe_unused, -+ struct file *file) -+{ -+ kfree(file->private_data); -+ return 0; -+} -+ -+static int dbgaufs_xi_open(struct file *xf, struct file *file, int do_fcnt) -+{ -+ int err; -+ struct kstat st; -+ struct dbgaufs_arg *p; -+ -+ err = -ENOMEM; -+ p = kmalloc(sizeof(*p), GFP_NOFS); -+ if (unlikely(!p)) -+ goto out; -+ -+ err = 0; -+ p->n = 0; -+ file->private_data = p; -+ if (!xf) -+ goto out; -+ -+ err = vfs_getattr(&xf->f_path, &st); -+ if (!err) { -+ if (do_fcnt) -+ p->n = snprintf -+ (p->a, sizeof(p->a), "%ld, %llux%lu %lld\n", -+ (long)file_count(xf), st.blocks, st.blksize, -+ (long long)st.size); -+ else -+ p->n = snprintf(p->a, sizeof(p->a), "%llux%lu %lld\n", -+ st.blocks, st.blksize, -+ (long long)st.size); -+ AuDebugOn(p->n >= sizeof(p->a)); -+ } else { -+ p->n = snprintf(p->a, sizeof(p->a), "err %d\n", err); -+ err = 0; -+ } -+ -+out: -+ return err; -+ -+} -+ -+static ssize_t dbgaufs_xi_read(struct file *file, char __user *buf, -+ size_t count, loff_t *ppos) -+{ -+ struct dbgaufs_arg *p; -+ -+ p = file->private_data; -+ return simple_read_from_buffer(buf, count, ppos, p->a, p->n); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+struct dbgaufs_plink_arg { -+ int n; -+ char a[]; -+}; -+ -+static int dbgaufs_plink_release(struct inode *inode __maybe_unused, -+ struct file *file) -+{ -+ free_page((unsigned long)file->private_data); -+ return 0; -+} -+ -+static int dbgaufs_plink_open(struct inode *inode, struct file *file) -+{ -+ int err, i, limit; -+ unsigned long n, sum; -+ struct dbgaufs_plink_arg *p; -+ struct au_sbinfo *sbinfo; -+ struct super_block *sb; -+ struct au_sphlhead *sphl; -+ -+ err = -ENOMEM; -+ p = (void *)get_zeroed_page(GFP_NOFS); -+ if (unlikely(!p)) -+ goto out; -+ -+ err = -EFBIG; -+ sbinfo = inode->i_private; -+ sb = sbinfo->si_sb; -+ si_noflush_read_lock(sb); -+ if (au_opt_test(au_mntflags(sb), PLINK)) { -+ limit = PAGE_SIZE - sizeof(p->n); -+ -+ /* the number of buckets */ -+ n = snprintf(p->a + p->n, limit, "%d\n", AuPlink_NHASH); -+ p->n += n; -+ limit -= n; -+ -+ sum = 0; -+ for (i = 0, sphl = sbinfo->si_plink; -+ i < AuPlink_NHASH; -+ i++, sphl++) { -+ n = au_sphl_count(sphl); -+ sum += n; -+ -+ n = snprintf(p->a + p->n, limit, "%lu ", n); -+ p->n += n; -+ limit -= n; -+ if (unlikely(limit <= 0)) -+ goto out_free; -+ } -+ p->a[p->n - 1] = '\n'; -+ -+ /* the sum of plinks */ -+ n = snprintf(p->a + p->n, limit, "%lu\n", sum); -+ p->n += n; -+ limit -= n; -+ if (unlikely(limit <= 0)) -+ goto out_free; -+ } else { -+#define str "1\n0\n0\n" -+ p->n = sizeof(str) - 1; -+ strcpy(p->a, str); -+#undef str -+ } -+ si_read_unlock(sb); -+ -+ err = 0; -+ file->private_data = p; -+ goto out; /* success */ -+ -+out_free: -+ free_page((unsigned long)p); -+out: -+ return err; -+} -+ -+static ssize_t dbgaufs_plink_read(struct file *file, char __user *buf, -+ size_t count, loff_t *ppos) -+{ -+ struct dbgaufs_plink_arg *p; -+ -+ p = file->private_data; -+ return simple_read_from_buffer(buf, count, ppos, p->a, p->n); -+} -+ -+static const struct file_operations dbgaufs_plink_fop = { -+ .owner = THIS_MODULE, -+ .open = dbgaufs_plink_open, -+ .release = dbgaufs_plink_release, -+ .read = dbgaufs_plink_read -+}; -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int dbgaufs_xib_open(struct inode *inode, struct file *file) -+{ -+ int err; -+ struct au_sbinfo *sbinfo; -+ struct super_block *sb; -+ -+ sbinfo = inode->i_private; -+ sb = sbinfo->si_sb; -+ si_noflush_read_lock(sb); -+ err = dbgaufs_xi_open(sbinfo->si_xib, file, /*do_fcnt*/0); -+ si_read_unlock(sb); -+ return err; -+} -+ -+static const struct file_operations dbgaufs_xib_fop = { -+ .owner = THIS_MODULE, -+ .open = dbgaufs_xib_open, -+ .release = dbgaufs_xi_release, -+ .read = dbgaufs_xi_read -+}; -+ -+/* ---------------------------------------------------------------------- */ -+ -+#define DbgaufsXi_PREFIX "xi" -+ -+static int dbgaufs_xino_open(struct inode *inode, struct file *file) -+{ -+ int err; -+ long l; -+ struct au_sbinfo *sbinfo; -+ struct super_block *sb; -+ struct file *xf; -+ struct qstr *name; -+ -+ err = -ENOENT; -+ xf = NULL; -+ name = &file->f_dentry->d_name; -+ if (unlikely(name->len < sizeof(DbgaufsXi_PREFIX) -+ || memcmp(name->name, DbgaufsXi_PREFIX, -+ sizeof(DbgaufsXi_PREFIX) - 1))) -+ goto out; -+ err = kstrtol(name->name + sizeof(DbgaufsXi_PREFIX) - 1, 10, &l); -+ if (unlikely(err)) -+ goto out; -+ -+ sbinfo = inode->i_private; -+ sb = sbinfo->si_sb; -+ si_noflush_read_lock(sb); -+ if (l <= au_sbend(sb)) { -+ xf = au_sbr(sb, (aufs_bindex_t)l)->br_xino.xi_file; -+ err = dbgaufs_xi_open(xf, file, /*do_fcnt*/1); -+ } else -+ err = -ENOENT; -+ si_read_unlock(sb); -+ -+out: -+ return err; -+} -+ -+static const struct file_operations dbgaufs_xino_fop = { -+ .owner = THIS_MODULE, -+ .open = dbgaufs_xino_open, -+ .release = dbgaufs_xi_release, -+ .read = dbgaufs_xi_read -+}; -+ -+void dbgaufs_brs_del(struct super_block *sb, aufs_bindex_t bindex) -+{ -+ aufs_bindex_t bend; -+ struct au_branch *br; -+ struct au_xino_file *xi; -+ -+ if (!au_sbi(sb)->si_dbgaufs) -+ return; -+ -+ bend = au_sbend(sb); -+ for (; bindex <= bend; bindex++) { -+ br = au_sbr(sb, bindex); -+ xi = &br->br_xino; -+ debugfs_remove(xi->xi_dbgaufs); -+ xi->xi_dbgaufs = NULL; -+ } -+} -+ -+void dbgaufs_brs_add(struct super_block *sb, aufs_bindex_t bindex) -+{ -+ struct au_sbinfo *sbinfo; -+ struct dentry *parent; -+ struct au_branch *br; -+ struct au_xino_file *xi; -+ aufs_bindex_t bend; -+ char name[sizeof(DbgaufsXi_PREFIX) + 5]; /* "xi" bindex NULL */ -+ -+ sbinfo = au_sbi(sb); -+ parent = sbinfo->si_dbgaufs; -+ if (!parent) -+ return; -+ -+ bend = au_sbend(sb); -+ for (; bindex <= bend; bindex++) { -+ snprintf(name, sizeof(name), DbgaufsXi_PREFIX "%d", bindex); -+ br = au_sbr(sb, bindex); -+ xi = &br->br_xino; -+ AuDebugOn(xi->xi_dbgaufs); -+ xi->xi_dbgaufs = debugfs_create_file(name, dbgaufs_mode, parent, -+ sbinfo, &dbgaufs_xino_fop); -+ /* ignore an error */ -+ if (unlikely(!xi->xi_dbgaufs)) -+ AuWarn1("failed %s under debugfs\n", name); -+ } -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+#ifdef CONFIG_AUFS_EXPORT -+static int dbgaufs_xigen_open(struct inode *inode, struct file *file) -+{ -+ int err; -+ struct au_sbinfo *sbinfo; -+ struct super_block *sb; -+ -+ sbinfo = inode->i_private; -+ sb = sbinfo->si_sb; -+ si_noflush_read_lock(sb); -+ err = dbgaufs_xi_open(sbinfo->si_xigen, file, /*do_fcnt*/0); -+ si_read_unlock(sb); -+ return err; -+} -+ -+static const struct file_operations dbgaufs_xigen_fop = { -+ .owner = THIS_MODULE, -+ .open = dbgaufs_xigen_open, -+ .release = dbgaufs_xi_release, -+ .read = dbgaufs_xi_read -+}; -+ -+static int dbgaufs_xigen_init(struct au_sbinfo *sbinfo) -+{ -+ int err; -+ -+ /* -+ * This function is a dynamic '__init' function actually, -+ * so the tiny check for si_rwsem is unnecessary. -+ */ -+ /* AuRwMustWriteLock(&sbinfo->si_rwsem); */ -+ -+ err = -EIO; -+ sbinfo->si_dbgaufs_xigen = debugfs_create_file -+ ("xigen", dbgaufs_mode, sbinfo->si_dbgaufs, sbinfo, -+ &dbgaufs_xigen_fop); -+ if (sbinfo->si_dbgaufs_xigen) -+ err = 0; -+ -+ return err; -+} -+#else -+static int dbgaufs_xigen_init(struct au_sbinfo *sbinfo) -+{ -+ return 0; -+} -+#endif /* CONFIG_AUFS_EXPORT */ -+ -+/* ---------------------------------------------------------------------- */ -+ -+void dbgaufs_si_fin(struct au_sbinfo *sbinfo) -+{ -+ /* -+ * This function is a dynamic '__fin' function actually, -+ * so the tiny check for si_rwsem is unnecessary. -+ */ -+ /* AuRwMustWriteLock(&sbinfo->si_rwsem); */ -+ -+ debugfs_remove_recursive(sbinfo->si_dbgaufs); -+ sbinfo->si_dbgaufs = NULL; -+ kobject_put(&sbinfo->si_kobj); -+} -+ -+int dbgaufs_si_init(struct au_sbinfo *sbinfo) -+{ -+ int err; -+ char name[SysaufsSiNameLen]; -+ -+ /* -+ * This function is a dynamic '__init' function actually, -+ * so the tiny check for si_rwsem is unnecessary. -+ */ -+ /* AuRwMustWriteLock(&sbinfo->si_rwsem); */ -+ -+ err = -ENOENT; -+ if (!dbgaufs) { -+ AuErr1("/debug/aufs is uninitialized\n"); -+ goto out; -+ } -+ -+ err = -EIO; -+ sysaufs_name(sbinfo, name); -+ sbinfo->si_dbgaufs = debugfs_create_dir(name, dbgaufs); -+ if (unlikely(!sbinfo->si_dbgaufs)) -+ goto out; -+ kobject_get(&sbinfo->si_kobj); -+ -+ sbinfo->si_dbgaufs_xib = debugfs_create_file -+ ("xib", dbgaufs_mode, sbinfo->si_dbgaufs, sbinfo, -+ &dbgaufs_xib_fop); -+ if (unlikely(!sbinfo->si_dbgaufs_xib)) -+ goto out_dir; -+ -+ sbinfo->si_dbgaufs_plink = debugfs_create_file -+ ("plink", dbgaufs_mode, sbinfo->si_dbgaufs, sbinfo, -+ &dbgaufs_plink_fop); -+ if (unlikely(!sbinfo->si_dbgaufs_plink)) -+ goto out_dir; -+ -+ err = dbgaufs_xigen_init(sbinfo); -+ if (!err) -+ goto out; /* success */ -+ -+out_dir: -+ dbgaufs_si_fin(sbinfo); -+out: -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+void dbgaufs_fin(void) -+{ -+ debugfs_remove(dbgaufs); -+} -+ -+int __init dbgaufs_init(void) -+{ -+ int err; -+ -+ err = -EIO; -+ dbgaufs = debugfs_create_dir(AUFS_NAME, NULL); -+ if (dbgaufs) -+ err = 0; -+ return err; -+} -diff --git a/fs/aufs/dbgaufs.h b/fs/aufs/dbgaufs.h -new file mode 100644 -index 0000000..d1e09bd ---- /dev/null -+++ b/fs/aufs/dbgaufs.h -@@ -0,0 +1,48 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * debugfs interface -+ */ -+ -+#ifndef __DBGAUFS_H__ -+#define __DBGAUFS_H__ -+ -+#ifdef __KERNEL__ -+ -+struct super_block; -+struct au_sbinfo; -+ -+#ifdef CONFIG_DEBUG_FS -+/* dbgaufs.c */ -+void dbgaufs_brs_del(struct super_block *sb, aufs_bindex_t bindex); -+void dbgaufs_brs_add(struct super_block *sb, aufs_bindex_t bindex); -+void dbgaufs_si_fin(struct au_sbinfo *sbinfo); -+int dbgaufs_si_init(struct au_sbinfo *sbinfo); -+void dbgaufs_fin(void); -+int __init dbgaufs_init(void); -+#else -+AuStubVoid(dbgaufs_brs_del, struct super_block *sb, aufs_bindex_t bindex) -+AuStubVoid(dbgaufs_brs_add, struct super_block *sb, aufs_bindex_t bindex) -+AuStubVoid(dbgaufs_si_fin, struct au_sbinfo *sbinfo) -+AuStubInt0(dbgaufs_si_init, struct au_sbinfo *sbinfo) -+AuStubVoid(dbgaufs_fin, void) -+AuStubInt0(__init dbgaufs_init, void) -+#endif /* CONFIG_DEBUG_FS */ -+ -+#endif /* __KERNEL__ */ -+#endif /* __DBGAUFS_H__ */ -diff --git a/fs/aufs/dcsub.c b/fs/aufs/dcsub.c -new file mode 100644 -index 0000000..832baa4 ---- /dev/null -+++ b/fs/aufs/dcsub.c -@@ -0,0 +1,224 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * sub-routines for dentry cache -+ */ -+ -+#include "aufs.h" -+ -+static void au_dpage_free(struct au_dpage *dpage) -+{ -+ int i; -+ struct dentry **p; -+ -+ p = dpage->dentries; -+ for (i = 0; i < dpage->ndentry; i++) -+ dput(*p++); -+ free_page((unsigned long)dpage->dentries); -+} -+ -+int au_dpages_init(struct au_dcsub_pages *dpages, gfp_t gfp) -+{ -+ int err; -+ void *p; -+ -+ err = -ENOMEM; -+ dpages->dpages = kmalloc(sizeof(*dpages->dpages), gfp); -+ if (unlikely(!dpages->dpages)) -+ goto out; -+ -+ p = (void *)__get_free_page(gfp); -+ if (unlikely(!p)) -+ goto out_dpages; -+ -+ dpages->dpages[0].ndentry = 0; -+ dpages->dpages[0].dentries = p; -+ dpages->ndpage = 1; -+ return 0; /* success */ -+ -+out_dpages: -+ kfree(dpages->dpages); -+out: -+ return err; -+} -+ -+void au_dpages_free(struct au_dcsub_pages *dpages) -+{ -+ int i; -+ struct au_dpage *p; -+ -+ p = dpages->dpages; -+ for (i = 0; i < dpages->ndpage; i++) -+ au_dpage_free(p++); -+ kfree(dpages->dpages); -+} -+ -+static int au_dpages_append(struct au_dcsub_pages *dpages, -+ struct dentry *dentry, gfp_t gfp) -+{ -+ int err, sz; -+ struct au_dpage *dpage; -+ void *p; -+ -+ dpage = dpages->dpages + dpages->ndpage - 1; -+ sz = PAGE_SIZE / sizeof(dentry); -+ if (unlikely(dpage->ndentry >= sz)) { -+ AuLabel(new dpage); -+ err = -ENOMEM; -+ sz = dpages->ndpage * sizeof(*dpages->dpages); -+ p = au_kzrealloc(dpages->dpages, sz, -+ sz + sizeof(*dpages->dpages), gfp); -+ if (unlikely(!p)) -+ goto out; -+ -+ dpages->dpages = p; -+ dpage = dpages->dpages + dpages->ndpage; -+ p = (void *)__get_free_page(gfp); -+ if (unlikely(!p)) -+ goto out; -+ -+ dpage->ndentry = 0; -+ dpage->dentries = p; -+ dpages->ndpage++; -+ } -+ -+ AuDebugOn(au_dcount(dentry) <= 0); -+ dpage->dentries[dpage->ndentry++] = dget_dlock(dentry); -+ return 0; /* success */ -+ -+out: -+ return err; -+} -+ -+/* todo: BAD approach */ -+/* copied from linux/fs/dcache.c */ -+enum d_walk_ret { -+ D_WALK_CONTINUE, -+ D_WALK_QUIT, -+ D_WALK_NORETRY, -+ D_WALK_SKIP, -+}; -+ -+extern void d_walk(struct dentry *parent, void *data, -+ enum d_walk_ret (*enter)(void *, struct dentry *), -+ void (*finish)(void *)); -+ -+struct ac_dpages_arg { -+ int err; -+ struct au_dcsub_pages *dpages; -+ struct super_block *sb; -+ au_dpages_test test; -+ void *arg; -+}; -+ -+static enum d_walk_ret au_call_dpages_append(void *_arg, struct dentry *dentry) -+{ -+ enum d_walk_ret ret; -+ struct ac_dpages_arg *arg = _arg; -+ -+ ret = D_WALK_CONTINUE; -+ if (dentry->d_sb == arg->sb -+ && !IS_ROOT(dentry) -+ && au_dcount(dentry) > 0 -+ && au_di(dentry) -+ && (!arg->test || arg->test(dentry, arg->arg))) { -+ arg->err = au_dpages_append(arg->dpages, dentry, GFP_ATOMIC); -+ if (unlikely(arg->err)) -+ ret = D_WALK_QUIT; -+ } -+ -+ return ret; -+} -+ -+int au_dcsub_pages(struct au_dcsub_pages *dpages, struct dentry *root, -+ au_dpages_test test, void *arg) -+{ -+ struct ac_dpages_arg args = { -+ .err = 0, -+ .dpages = dpages, -+ .sb = root->d_sb, -+ .test = test, -+ .arg = arg -+ }; -+ -+ d_walk(root, &args, au_call_dpages_append, NULL); -+ -+ return args.err; -+} -+ -+int au_dcsub_pages_rev(struct au_dcsub_pages *dpages, struct dentry *dentry, -+ int do_include, au_dpages_test test, void *arg) -+{ -+ int err; -+ -+ err = 0; -+ write_seqlock(&rename_lock); -+ spin_lock(&dentry->d_lock); -+ if (do_include -+ && au_dcount(dentry) > 0 -+ && (!test || test(dentry, arg))) -+ err = au_dpages_append(dpages, dentry, GFP_ATOMIC); -+ spin_unlock(&dentry->d_lock); -+ if (unlikely(err)) -+ goto out; -+ -+ /* -+ * RCU for vfsmount is unnecessary since this is a traverse in a single -+ * mount -+ */ -+ while (!IS_ROOT(dentry)) { -+ dentry = dentry->d_parent; /* rename_lock is locked */ -+ spin_lock(&dentry->d_lock); -+ if (au_dcount(dentry) > 0 -+ && (!test || test(dentry, arg))) -+ err = au_dpages_append(dpages, dentry, GFP_ATOMIC); -+ spin_unlock(&dentry->d_lock); -+ if (unlikely(err)) -+ break; -+ } -+ -+out: -+ write_sequnlock(&rename_lock); -+ return err; -+} -+ -+static inline int au_dcsub_dpages_aufs(struct dentry *dentry, void *arg) -+{ -+ return au_di(dentry) && dentry->d_sb == arg; -+} -+ -+int au_dcsub_pages_rev_aufs(struct au_dcsub_pages *dpages, -+ struct dentry *dentry, int do_include) -+{ -+ return au_dcsub_pages_rev(dpages, dentry, do_include, -+ au_dcsub_dpages_aufs, dentry->d_sb); -+} -+ -+int au_test_subdir(struct dentry *d1, struct dentry *d2) -+{ -+ struct path path[2] = { -+ { -+ .dentry = d1 -+ }, -+ { -+ .dentry = d2 -+ } -+ }; -+ -+ return path_is_under(path + 0, path + 1); -+} -diff --git a/fs/aufs/dcsub.h b/fs/aufs/dcsub.h -new file mode 100644 -index 0000000..7997944 ---- /dev/null -+++ b/fs/aufs/dcsub.h -@@ -0,0 +1,123 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * sub-routines for dentry cache -+ */ -+ -+#ifndef __AUFS_DCSUB_H__ -+#define __AUFS_DCSUB_H__ -+ -+#ifdef __KERNEL__ -+ -+#include -+#include -+ -+struct au_dpage { -+ int ndentry; -+ struct dentry **dentries; -+}; -+ -+struct au_dcsub_pages { -+ int ndpage; -+ struct au_dpage *dpages; -+}; -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* dcsub.c */ -+int au_dpages_init(struct au_dcsub_pages *dpages, gfp_t gfp); -+void au_dpages_free(struct au_dcsub_pages *dpages); -+typedef int (*au_dpages_test)(struct dentry *dentry, void *arg); -+int au_dcsub_pages(struct au_dcsub_pages *dpages, struct dentry *root, -+ au_dpages_test test, void *arg); -+int au_dcsub_pages_rev(struct au_dcsub_pages *dpages, struct dentry *dentry, -+ int do_include, au_dpages_test test, void *arg); -+int au_dcsub_pages_rev_aufs(struct au_dcsub_pages *dpages, -+ struct dentry *dentry, int do_include); -+int au_test_subdir(struct dentry *d1, struct dentry *d2); -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * todo: in linux-3.13, several similar (but faster) helpers are added to -+ * include/linux/dcache.h. Try them (in the future). -+ */ -+ -+static inline int au_d_hashed_positive(struct dentry *d) -+{ -+ int err; -+ struct inode *inode = d->d_inode; -+ -+ err = 0; -+ if (unlikely(d_unhashed(d) || !inode || !inode->i_nlink)) -+ err = -ENOENT; -+ return err; -+} -+ -+static inline int au_d_linkable(struct dentry *d) -+{ -+ int err; -+ struct inode *inode = d->d_inode; -+ -+ err = au_d_hashed_positive(d); -+ if (err -+ && inode -+ && (inode->i_state & I_LINKABLE)) -+ err = 0; -+ return err; -+} -+ -+static inline int au_d_alive(struct dentry *d) -+{ -+ int err; -+ struct inode *inode; -+ -+ err = 0; -+ if (!IS_ROOT(d)) -+ err = au_d_hashed_positive(d); -+ else { -+ inode = d->d_inode; -+ if (unlikely(d_unlinked(d) || !inode || !inode->i_nlink)) -+ err = -ENOENT; -+ } -+ return err; -+} -+ -+static inline int au_alive_dir(struct dentry *d) -+{ -+ int err; -+ -+ err = au_d_alive(d); -+ if (unlikely(err || IS_DEADDIR(d->d_inode))) -+ err = -ENOENT; -+ return err; -+} -+ -+static inline int au_qstreq(struct qstr *a, struct qstr *b) -+{ -+ return a->len == b->len -+ && !memcmp(a->name, b->name, a->len); -+} -+ -+static inline int au_dcount(struct dentry *d) -+{ -+ return (int)d_count(d); -+} -+ -+#endif /* __KERNEL__ */ -+#endif /* __AUFS_DCSUB_H__ */ -diff --git a/fs/aufs/debug.c b/fs/aufs/debug.c -new file mode 100644 -index 0000000..2747d13 ---- /dev/null -+++ b/fs/aufs/debug.c -@@ -0,0 +1,436 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * debug print functions -+ */ -+ -+#include "aufs.h" -+ -+/* Returns 0, or -errno. arg is in kp->arg. */ -+static int param_atomic_t_set(const char *val, const struct kernel_param *kp) -+{ -+ int err, n; -+ -+ err = kstrtoint(val, 0, &n); -+ if (!err) { -+ if (n > 0) -+ au_debug_on(); -+ else -+ au_debug_off(); -+ } -+ return err; -+} -+ -+/* Returns length written or -errno. Buffer is 4k (ie. be short!) */ -+static int param_atomic_t_get(char *buffer, const struct kernel_param *kp) -+{ -+ atomic_t *a; -+ -+ a = kp->arg; -+ return sprintf(buffer, "%d", atomic_read(a)); -+} -+ -+static struct kernel_param_ops param_ops_atomic_t = { -+ .set = param_atomic_t_set, -+ .get = param_atomic_t_get -+ /* void (*free)(void *arg) */ -+}; -+ -+atomic_t aufs_debug = ATOMIC_INIT(0); -+MODULE_PARM_DESC(debug, "debug print"); -+module_param_named(debug, aufs_debug, atomic_t, S_IRUGO | S_IWUSR | S_IWGRP); -+ -+DEFINE_MUTEX(au_dbg_mtx); /* just to serialize the dbg msgs */ -+char *au_plevel = KERN_DEBUG; -+#define dpri(fmt, ...) do { \ -+ if ((au_plevel \ -+ && strcmp(au_plevel, KERN_DEBUG)) \ -+ || au_debug_test()) \ -+ printk("%s" fmt, au_plevel, ##__VA_ARGS__); \ -+} while (0) -+ -+/* ---------------------------------------------------------------------- */ -+ -+void au_dpri_whlist(struct au_nhash *whlist) -+{ -+ unsigned long ul, n; -+ struct hlist_head *head; -+ struct au_vdir_wh *pos; -+ -+ n = whlist->nh_num; -+ head = whlist->nh_head; -+ for (ul = 0; ul < n; ul++) { -+ hlist_for_each_entry(pos, head, wh_hash) -+ dpri("b%d, %.*s, %d\n", -+ pos->wh_bindex, -+ pos->wh_str.len, pos->wh_str.name, -+ pos->wh_str.len); -+ head++; -+ } -+} -+ -+void au_dpri_vdir(struct au_vdir *vdir) -+{ -+ unsigned long ul; -+ union au_vdir_deblk_p p; -+ unsigned char *o; -+ -+ if (!vdir || IS_ERR(vdir)) { -+ dpri("err %ld\n", PTR_ERR(vdir)); -+ return; -+ } -+ -+ dpri("deblk %u, nblk %lu, deblk %p, last{%lu, %p}, ver %lu\n", -+ vdir->vd_deblk_sz, vdir->vd_nblk, vdir->vd_deblk, -+ vdir->vd_last.ul, vdir->vd_last.p.deblk, vdir->vd_version); -+ for (ul = 0; ul < vdir->vd_nblk; ul++) { -+ p.deblk = vdir->vd_deblk[ul]; -+ o = p.deblk; -+ dpri("[%lu]: %p\n", ul, o); -+ } -+} -+ -+static int do_pri_inode(aufs_bindex_t bindex, struct inode *inode, int hn, -+ struct dentry *wh) -+{ -+ char *n = NULL; -+ int l = 0; -+ -+ if (!inode || IS_ERR(inode)) { -+ dpri("i%d: err %ld\n", bindex, PTR_ERR(inode)); -+ return -1; -+ } -+ -+ /* the type of i_blocks depends upon CONFIG_LBDAF */ -+ BUILD_BUG_ON(sizeof(inode->i_blocks) != sizeof(unsigned long) -+ && sizeof(inode->i_blocks) != sizeof(u64)); -+ if (wh) { -+ n = (void *)wh->d_name.name; -+ l = wh->d_name.len; -+ } -+ -+ dpri("i%d: %p, i%lu, %s, cnt %d, nl %u, 0%o, sz %llu, blk %llu," -+ " hn %d, ct %lld, np %lu, st 0x%lx, f 0x%x, v %llu, g %x%s%.*s\n", -+ bindex, inode, -+ inode->i_ino, inode->i_sb ? au_sbtype(inode->i_sb) : "??", -+ atomic_read(&inode->i_count), inode->i_nlink, inode->i_mode, -+ i_size_read(inode), (unsigned long long)inode->i_blocks, -+ hn, (long long)timespec_to_ns(&inode->i_ctime) & 0x0ffff, -+ inode->i_mapping ? inode->i_mapping->nrpages : 0, -+ inode->i_state, inode->i_flags, inode->i_version, -+ inode->i_generation, -+ l ? ", wh " : "", l, n); -+ return 0; -+} -+ -+void au_dpri_inode(struct inode *inode) -+{ -+ struct au_iinfo *iinfo; -+ aufs_bindex_t bindex; -+ int err, hn; -+ -+ err = do_pri_inode(-1, inode, -1, NULL); -+ if (err || !au_test_aufs(inode->i_sb)) -+ return; -+ -+ iinfo = au_ii(inode); -+ if (!iinfo) -+ return; -+ dpri("i-1: bstart %d, bend %d, gen %d\n", -+ iinfo->ii_bstart, iinfo->ii_bend, au_iigen(inode, NULL)); -+ if (iinfo->ii_bstart < 0) -+ return; -+ hn = 0; -+ for (bindex = iinfo->ii_bstart; bindex <= iinfo->ii_bend; bindex++) { -+ hn = !!au_hn(iinfo->ii_hinode + bindex); -+ do_pri_inode(bindex, iinfo->ii_hinode[0 + bindex].hi_inode, hn, -+ iinfo->ii_hinode[0 + bindex].hi_whdentry); -+ } -+} -+ -+void au_dpri_dalias(struct inode *inode) -+{ -+ struct dentry *d; -+ -+ spin_lock(&inode->i_lock); -+ hlist_for_each_entry(d, &inode->i_dentry, d_u.d_alias) -+ au_dpri_dentry(d); -+ spin_unlock(&inode->i_lock); -+} -+ -+static int do_pri_dentry(aufs_bindex_t bindex, struct dentry *dentry) -+{ -+ struct dentry *wh = NULL; -+ int hn; -+ struct au_iinfo *iinfo; -+ -+ if (!dentry || IS_ERR(dentry)) { -+ dpri("d%d: err %ld\n", bindex, PTR_ERR(dentry)); -+ return -1; -+ } -+ /* do not call dget_parent() here */ -+ /* note: access d_xxx without d_lock */ -+ dpri("d%d: %p, %pd2?, %s, cnt %d, flags 0x%x, %shashed\n", -+ bindex, dentry, dentry, -+ dentry->d_sb ? au_sbtype(dentry->d_sb) : "??", -+ au_dcount(dentry), dentry->d_flags, -+ d_unhashed(dentry) ? "un" : ""); -+ hn = -1; -+ if (bindex >= 0 && dentry->d_inode && au_test_aufs(dentry->d_sb)) { -+ iinfo = au_ii(dentry->d_inode); -+ if (iinfo) { -+ hn = !!au_hn(iinfo->ii_hinode + bindex); -+ wh = iinfo->ii_hinode[0 + bindex].hi_whdentry; -+ } -+ } -+ do_pri_inode(bindex, dentry->d_inode, hn, wh); -+ return 0; -+} -+ -+void au_dpri_dentry(struct dentry *dentry) -+{ -+ struct au_dinfo *dinfo; -+ aufs_bindex_t bindex; -+ int err; -+ struct au_hdentry *hdp; -+ -+ err = do_pri_dentry(-1, dentry); -+ if (err || !au_test_aufs(dentry->d_sb)) -+ return; -+ -+ dinfo = au_di(dentry); -+ if (!dinfo) -+ return; -+ dpri("d-1: bstart %d, bend %d, bwh %d, bdiropq %d, gen %d, tmp %d\n", -+ dinfo->di_bstart, dinfo->di_bend, -+ dinfo->di_bwh, dinfo->di_bdiropq, au_digen(dentry), -+ dinfo->di_tmpfile); -+ if (dinfo->di_bstart < 0) -+ return; -+ hdp = dinfo->di_hdentry; -+ for (bindex = dinfo->di_bstart; bindex <= dinfo->di_bend; bindex++) -+ do_pri_dentry(bindex, hdp[0 + bindex].hd_dentry); -+} -+ -+static int do_pri_file(aufs_bindex_t bindex, struct file *file) -+{ -+ char a[32]; -+ -+ if (!file || IS_ERR(file)) { -+ dpri("f%d: err %ld\n", bindex, PTR_ERR(file)); -+ return -1; -+ } -+ a[0] = 0; -+ if (bindex < 0 -+ && !IS_ERR_OR_NULL(file->f_dentry) -+ && au_test_aufs(file->f_dentry->d_sb) -+ && au_fi(file)) -+ snprintf(a, sizeof(a), ", gen %d, mmapped %d", -+ au_figen(file), atomic_read(&au_fi(file)->fi_mmapped)); -+ dpri("f%d: mode 0x%x, flags 0%o, cnt %ld, v %llu, pos %llu%s\n", -+ bindex, file->f_mode, file->f_flags, (long)file_count(file), -+ file->f_version, file->f_pos, a); -+ if (!IS_ERR_OR_NULL(file->f_dentry)) -+ do_pri_dentry(bindex, file->f_dentry); -+ return 0; -+} -+ -+void au_dpri_file(struct file *file) -+{ -+ struct au_finfo *finfo; -+ struct au_fidir *fidir; -+ struct au_hfile *hfile; -+ aufs_bindex_t bindex; -+ int err; -+ -+ err = do_pri_file(-1, file); -+ if (err -+ || IS_ERR_OR_NULL(file->f_dentry) -+ || !au_test_aufs(file->f_dentry->d_sb)) -+ return; -+ -+ finfo = au_fi(file); -+ if (!finfo) -+ return; -+ if (finfo->fi_btop < 0) -+ return; -+ fidir = finfo->fi_hdir; -+ if (!fidir) -+ do_pri_file(finfo->fi_btop, finfo->fi_htop.hf_file); -+ else -+ for (bindex = finfo->fi_btop; -+ bindex >= 0 && bindex <= fidir->fd_bbot; -+ bindex++) { -+ hfile = fidir->fd_hfile + bindex; -+ do_pri_file(bindex, hfile ? hfile->hf_file : NULL); -+ } -+} -+ -+static int do_pri_br(aufs_bindex_t bindex, struct au_branch *br) -+{ -+ struct vfsmount *mnt; -+ struct super_block *sb; -+ -+ if (!br || IS_ERR(br)) -+ goto out; -+ mnt = au_br_mnt(br); -+ if (!mnt || IS_ERR(mnt)) -+ goto out; -+ sb = mnt->mnt_sb; -+ if (!sb || IS_ERR(sb)) -+ goto out; -+ -+ dpri("s%d: {perm 0x%x, id %d, cnt %d, wbr %p}, " -+ "%s, dev 0x%02x%02x, flags 0x%lx, cnt %d, active %d, " -+ "xino %d\n", -+ bindex, br->br_perm, br->br_id, atomic_read(&br->br_count), -+ br->br_wbr, au_sbtype(sb), MAJOR(sb->s_dev), MINOR(sb->s_dev), -+ sb->s_flags, sb->s_count, -+ atomic_read(&sb->s_active), !!br->br_xino.xi_file); -+ return 0; -+ -+out: -+ dpri("s%d: err %ld\n", bindex, PTR_ERR(br)); -+ return -1; -+} -+ -+void au_dpri_sb(struct super_block *sb) -+{ -+ struct au_sbinfo *sbinfo; -+ aufs_bindex_t bindex; -+ int err; -+ /* to reuduce stack size */ -+ struct { -+ struct vfsmount mnt; -+ struct au_branch fake; -+ } *a; -+ -+ /* this function can be called from magic sysrq */ -+ a = kzalloc(sizeof(*a), GFP_ATOMIC); -+ if (unlikely(!a)) { -+ dpri("no memory\n"); -+ return; -+ } -+ -+ a->mnt.mnt_sb = sb; -+ a->fake.br_path.mnt = &a->mnt; -+ atomic_set(&a->fake.br_count, 0); -+ smp_mb(); /* atomic_set */ -+ err = do_pri_br(-1, &a->fake); -+ kfree(a); -+ dpri("dev 0x%x\n", sb->s_dev); -+ if (err || !au_test_aufs(sb)) -+ return; -+ -+ sbinfo = au_sbi(sb); -+ if (!sbinfo) -+ return; -+ dpri("nw %d, gen %u, kobj %d\n", -+ atomic_read(&sbinfo->si_nowait.nw_len), sbinfo->si_generation, -+ atomic_read(&sbinfo->si_kobj.kref.refcount)); -+ for (bindex = 0; bindex <= sbinfo->si_bend; bindex++) -+ do_pri_br(bindex, sbinfo->si_branch[0 + bindex]); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+void __au_dbg_verify_dinode(struct dentry *dentry, const char *func, int line) -+{ -+ struct inode *h_inode, *inode = dentry->d_inode; -+ struct dentry *h_dentry; -+ aufs_bindex_t bindex, bend, bi; -+ -+ if (!inode /* || au_di(dentry)->di_lsc == AuLsc_DI_TMP */) -+ return; -+ -+ bend = au_dbend(dentry); -+ bi = au_ibend(inode); -+ if (bi < bend) -+ bend = bi; -+ bindex = au_dbstart(dentry); -+ bi = au_ibstart(inode); -+ if (bi > bindex) -+ bindex = bi; -+ -+ for (; bindex <= bend; bindex++) { -+ h_dentry = au_h_dptr(dentry, bindex); -+ if (!h_dentry) -+ continue; -+ h_inode = au_h_iptr(inode, bindex); -+ if (unlikely(h_inode != h_dentry->d_inode)) { -+ au_debug_on(); -+ AuDbg("b%d, %s:%d\n", bindex, func, line); -+ AuDbgDentry(dentry); -+ AuDbgInode(inode); -+ au_debug_off(); -+ BUG(); -+ } -+ } -+} -+ -+void au_dbg_verify_gen(struct dentry *parent, unsigned int sigen) -+{ -+ int err, i, j; -+ struct au_dcsub_pages dpages; -+ struct au_dpage *dpage; -+ struct dentry **dentries; -+ -+ err = au_dpages_init(&dpages, GFP_NOFS); -+ AuDebugOn(err); -+ err = au_dcsub_pages_rev_aufs(&dpages, parent, /*do_include*/1); -+ AuDebugOn(err); -+ for (i = dpages.ndpage - 1; !err && i >= 0; i--) { -+ dpage = dpages.dpages + i; -+ dentries = dpage->dentries; -+ for (j = dpage->ndentry - 1; !err && j >= 0; j--) -+ AuDebugOn(au_digen_test(dentries[j], sigen)); -+ } -+ au_dpages_free(&dpages); -+} -+ -+void au_dbg_verify_kthread(void) -+{ -+ if (au_wkq_test()) { -+ au_dbg_blocked(); -+ /* -+ * It may be recursive, but udba=notify between two aufs mounts, -+ * where a single ro branch is shared, is not a problem. -+ */ -+ /* WARN_ON(1); */ -+ } -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+int __init au_debug_init(void) -+{ -+ aufs_bindex_t bindex; -+ struct au_vdir_destr destr; -+ -+ bindex = -1; -+ AuDebugOn(bindex >= 0); -+ -+ destr.len = -1; -+ AuDebugOn(destr.len < NAME_MAX); -+ -+#ifdef CONFIG_4KSTACKS -+ pr_warn("CONFIG_4KSTACKS is defined.\n"); -+#endif -+ -+ return 0; -+} -diff --git a/fs/aufs/debug.h b/fs/aufs/debug.h -new file mode 100644 -index 0000000..039e6f8 ---- /dev/null -+++ b/fs/aufs/debug.h -@@ -0,0 +1,228 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * debug print functions -+ */ -+ -+#ifndef __AUFS_DEBUG_H__ -+#define __AUFS_DEBUG_H__ -+ -+#ifdef __KERNEL__ -+ -+#include -+#include -+#include -+#include -+ -+#ifdef CONFIG_AUFS_DEBUG -+#define AuDebugOn(a) BUG_ON(a) -+ -+/* module parameter */ -+extern atomic_t aufs_debug; -+static inline void au_debug_on(void) -+{ -+ atomic_inc(&aufs_debug); -+} -+static inline void au_debug_off(void) -+{ -+ atomic_dec_if_positive(&aufs_debug); -+} -+ -+static inline int au_debug_test(void) -+{ -+ return atomic_read(&aufs_debug) > 0; -+} -+#else -+#define AuDebugOn(a) do {} while (0) -+AuStubVoid(au_debug_on, void) -+AuStubVoid(au_debug_off, void) -+AuStubInt0(au_debug_test, void) -+#endif /* CONFIG_AUFS_DEBUG */ -+ -+#define param_check_atomic_t(name, p) __param_check(name, p, atomic_t) -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* debug print */ -+ -+#define AuDbg(fmt, ...) do { \ -+ if (au_debug_test()) \ -+ pr_debug("DEBUG: " fmt, ##__VA_ARGS__); \ -+} while (0) -+#define AuLabel(l) AuDbg(#l "\n") -+#define AuIOErr(fmt, ...) pr_err("I/O Error, " fmt, ##__VA_ARGS__) -+#define AuWarn1(fmt, ...) do { \ -+ static unsigned char _c; \ -+ if (!_c++) \ -+ pr_warn(fmt, ##__VA_ARGS__); \ -+} while (0) -+ -+#define AuErr1(fmt, ...) do { \ -+ static unsigned char _c; \ -+ if (!_c++) \ -+ pr_err(fmt, ##__VA_ARGS__); \ -+} while (0) -+ -+#define AuIOErr1(fmt, ...) do { \ -+ static unsigned char _c; \ -+ if (!_c++) \ -+ AuIOErr(fmt, ##__VA_ARGS__); \ -+} while (0) -+ -+#define AuUnsupportMsg "This operation is not supported." \ -+ " Please report this application to aufs-users ML." -+#define AuUnsupport(fmt, ...) do { \ -+ pr_err(AuUnsupportMsg "\n" fmt, ##__VA_ARGS__); \ -+ dump_stack(); \ -+} while (0) -+ -+#define AuTraceErr(e) do { \ -+ if (unlikely((e) < 0)) \ -+ AuDbg("err %d\n", (int)(e)); \ -+} while (0) -+ -+#define AuTraceErrPtr(p) do { \ -+ if (IS_ERR(p)) \ -+ AuDbg("err %ld\n", PTR_ERR(p)); \ -+} while (0) -+ -+/* dirty macros for debug print, use with "%.*s" and caution */ -+#define AuLNPair(qstr) (qstr)->len, (qstr)->name -+ -+/* ---------------------------------------------------------------------- */ -+ -+struct dentry; -+#ifdef CONFIG_AUFS_DEBUG -+extern struct mutex au_dbg_mtx; -+extern char *au_plevel; -+struct au_nhash; -+void au_dpri_whlist(struct au_nhash *whlist); -+struct au_vdir; -+void au_dpri_vdir(struct au_vdir *vdir); -+struct inode; -+void au_dpri_inode(struct inode *inode); -+void au_dpri_dalias(struct inode *inode); -+void au_dpri_dentry(struct dentry *dentry); -+struct file; -+void au_dpri_file(struct file *filp); -+struct super_block; -+void au_dpri_sb(struct super_block *sb); -+ -+#define au_dbg_verify_dinode(d) __au_dbg_verify_dinode(d, __func__, __LINE__) -+void __au_dbg_verify_dinode(struct dentry *dentry, const char *func, int line); -+void au_dbg_verify_gen(struct dentry *parent, unsigned int sigen); -+void au_dbg_verify_kthread(void); -+ -+int __init au_debug_init(void); -+ -+#define AuDbgWhlist(w) do { \ -+ mutex_lock(&au_dbg_mtx); \ -+ AuDbg(#w "\n"); \ -+ au_dpri_whlist(w); \ -+ mutex_unlock(&au_dbg_mtx); \ -+} while (0) -+ -+#define AuDbgVdir(v) do { \ -+ mutex_lock(&au_dbg_mtx); \ -+ AuDbg(#v "\n"); \ -+ au_dpri_vdir(v); \ -+ mutex_unlock(&au_dbg_mtx); \ -+} while (0) -+ -+#define AuDbgInode(i) do { \ -+ mutex_lock(&au_dbg_mtx); \ -+ AuDbg(#i "\n"); \ -+ au_dpri_inode(i); \ -+ mutex_unlock(&au_dbg_mtx); \ -+} while (0) -+ -+#define AuDbgDAlias(i) do { \ -+ mutex_lock(&au_dbg_mtx); \ -+ AuDbg(#i "\n"); \ -+ au_dpri_dalias(i); \ -+ mutex_unlock(&au_dbg_mtx); \ -+} while (0) -+ -+#define AuDbgDentry(d) do { \ -+ mutex_lock(&au_dbg_mtx); \ -+ AuDbg(#d "\n"); \ -+ au_dpri_dentry(d); \ -+ mutex_unlock(&au_dbg_mtx); \ -+} while (0) -+ -+#define AuDbgFile(f) do { \ -+ mutex_lock(&au_dbg_mtx); \ -+ AuDbg(#f "\n"); \ -+ au_dpri_file(f); \ -+ mutex_unlock(&au_dbg_mtx); \ -+} while (0) -+ -+#define AuDbgSb(sb) do { \ -+ mutex_lock(&au_dbg_mtx); \ -+ AuDbg(#sb "\n"); \ -+ au_dpri_sb(sb); \ -+ mutex_unlock(&au_dbg_mtx); \ -+} while (0) -+ -+#define AuDbgSym(addr) do { \ -+ char sym[KSYM_SYMBOL_LEN]; \ -+ sprint_symbol(sym, (unsigned long)addr); \ -+ AuDbg("%s\n", sym); \ -+} while (0) -+#else -+AuStubVoid(au_dbg_verify_dinode, struct dentry *dentry) -+AuStubVoid(au_dbg_verify_dir_parent, struct dentry *dentry, unsigned int sigen) -+AuStubVoid(au_dbg_verify_nondir_parent, struct dentry *dentry, -+ unsigned int sigen) -+AuStubVoid(au_dbg_verify_gen, struct dentry *parent, unsigned int sigen) -+AuStubVoid(au_dbg_verify_kthread, void) -+AuStubInt0(__init au_debug_init, void) -+ -+#define AuDbgWhlist(w) do {} while (0) -+#define AuDbgVdir(v) do {} while (0) -+#define AuDbgInode(i) do {} while (0) -+#define AuDbgDAlias(i) do {} while (0) -+#define AuDbgDentry(d) do {} while (0) -+#define AuDbgFile(f) do {} while (0) -+#define AuDbgSb(sb) do {} while (0) -+#define AuDbgSym(addr) do {} while (0) -+#endif /* CONFIG_AUFS_DEBUG */ -+ -+/* ---------------------------------------------------------------------- */ -+ -+#ifdef CONFIG_AUFS_MAGIC_SYSRQ -+int __init au_sysrq_init(void); -+void au_sysrq_fin(void); -+ -+#ifdef CONFIG_HW_CONSOLE -+#define au_dbg_blocked() do { \ -+ WARN_ON(1); \ -+ handle_sysrq('w'); \ -+} while (0) -+#else -+AuStubVoid(au_dbg_blocked, void) -+#endif -+ -+#else -+AuStubInt0(__init au_sysrq_init, void) -+AuStubVoid(au_sysrq_fin, void) -+AuStubVoid(au_dbg_blocked, void) -+#endif /* CONFIG_AUFS_MAGIC_SYSRQ */ -+ -+#endif /* __KERNEL__ */ -+#endif /* __AUFS_DEBUG_H__ */ -diff --git a/fs/aufs/dentry.c b/fs/aufs/dentry.c -new file mode 100644 -index 0000000..ed56947 ---- /dev/null -+++ b/fs/aufs/dentry.c -@@ -0,0 +1,1129 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * lookup and dentry operations -+ */ -+ -+#include -+#include "aufs.h" -+ -+#define AuLkup_ALLOW_NEG 1 -+#define AuLkup_IGNORE_PERM (1 << 1) -+#define au_ftest_lkup(flags, name) ((flags) & AuLkup_##name) -+#define au_fset_lkup(flags, name) \ -+ do { (flags) |= AuLkup_##name; } while (0) -+#define au_fclr_lkup(flags, name) \ -+ do { (flags) &= ~AuLkup_##name; } while (0) -+ -+struct au_do_lookup_args { -+ unsigned int flags; -+ mode_t type; -+}; -+ -+/* -+ * returns positive/negative dentry, NULL or an error. -+ * NULL means whiteout-ed or not-found. -+ */ -+static struct dentry* -+au_do_lookup(struct dentry *h_parent, struct dentry *dentry, -+ aufs_bindex_t bindex, struct qstr *wh_name, -+ struct au_do_lookup_args *args) -+{ -+ struct dentry *h_dentry; -+ struct inode *h_inode; -+ struct au_branch *br; -+ int wh_found, opq; -+ unsigned char wh_able; -+ const unsigned char allow_neg = !!au_ftest_lkup(args->flags, ALLOW_NEG); -+ const unsigned char ignore_perm = !!au_ftest_lkup(args->flags, -+ IGNORE_PERM); -+ -+ wh_found = 0; -+ br = au_sbr(dentry->d_sb, bindex); -+ wh_able = !!au_br_whable(br->br_perm); -+ if (wh_able) -+ wh_found = au_wh_test(h_parent, wh_name, /*try_sio*/0); -+ h_dentry = ERR_PTR(wh_found); -+ if (!wh_found) -+ goto real_lookup; -+ if (unlikely(wh_found < 0)) -+ goto out; -+ -+ /* We found a whiteout */ -+ /* au_set_dbend(dentry, bindex); */ -+ au_set_dbwh(dentry, bindex); -+ if (!allow_neg) -+ return NULL; /* success */ -+ -+real_lookup: -+ if (!ignore_perm) -+ h_dentry = vfsub_lkup_one(&dentry->d_name, h_parent); -+ else -+ h_dentry = au_sio_lkup_one(&dentry->d_name, h_parent); -+ if (IS_ERR(h_dentry)) { -+ if (PTR_ERR(h_dentry) == -ENAMETOOLONG -+ && !allow_neg) -+ h_dentry = NULL; -+ goto out; -+ } -+ -+ h_inode = h_dentry->d_inode; -+ if (!h_inode) { -+ if (!allow_neg) -+ goto out_neg; -+ } else if (wh_found -+ || (args->type && args->type != (h_inode->i_mode & S_IFMT))) -+ goto out_neg; -+ -+ if (au_dbend(dentry) <= bindex) -+ au_set_dbend(dentry, bindex); -+ if (au_dbstart(dentry) < 0 || bindex < au_dbstart(dentry)) -+ au_set_dbstart(dentry, bindex); -+ au_set_h_dptr(dentry, bindex, h_dentry); -+ -+ if (!d_is_dir(h_dentry) -+ || !wh_able -+ || (d_is_positive(dentry) && !d_is_dir(dentry))) -+ goto out; /* success */ -+ -+ mutex_lock_nested(&h_inode->i_mutex, AuLsc_I_CHILD); -+ opq = au_diropq_test(h_dentry); -+ mutex_unlock(&h_inode->i_mutex); -+ if (opq > 0) -+ au_set_dbdiropq(dentry, bindex); -+ else if (unlikely(opq < 0)) { -+ au_set_h_dptr(dentry, bindex, NULL); -+ h_dentry = ERR_PTR(opq); -+ } -+ goto out; -+ -+out_neg: -+ dput(h_dentry); -+ h_dentry = NULL; -+out: -+ return h_dentry; -+} -+ -+static int au_test_shwh(struct super_block *sb, const struct qstr *name) -+{ -+ if (unlikely(!au_opt_test(au_mntflags(sb), SHWH) -+ && !strncmp(name->name, AUFS_WH_PFX, AUFS_WH_PFX_LEN))) -+ return -EPERM; -+ return 0; -+} -+ -+/* -+ * returns the number of lower positive dentries, -+ * otherwise an error. -+ * can be called at unlinking with @type is zero. -+ */ -+int au_lkup_dentry(struct dentry *dentry, aufs_bindex_t bstart, mode_t type) -+{ -+ int npositive, err; -+ aufs_bindex_t bindex, btail, bdiropq; -+ unsigned char isdir, dirperm1; -+ struct qstr whname; -+ struct au_do_lookup_args args = { -+ .flags = 0, -+ .type = type -+ }; -+ const struct qstr *name = &dentry->d_name; -+ struct dentry *parent; -+ struct inode *inode; -+ struct super_block *sb; -+ -+ sb = dentry->d_sb; -+ err = au_test_shwh(sb, name); -+ if (unlikely(err)) -+ goto out; -+ -+ err = au_wh_name_alloc(&whname, name); -+ if (unlikely(err)) -+ goto out; -+ -+ inode = dentry->d_inode; -+ isdir = !!d_is_dir(dentry); -+ if (!type) -+ au_fset_lkup(args.flags, ALLOW_NEG); -+ dirperm1 = !!au_opt_test(au_mntflags(sb), DIRPERM1); -+ -+ npositive = 0; -+ parent = dget_parent(dentry); -+ btail = au_dbtaildir(parent); -+ for (bindex = bstart; bindex <= btail; bindex++) { -+ struct dentry *h_parent, *h_dentry; -+ struct inode *h_inode, *h_dir; -+ -+ h_dentry = au_h_dptr(dentry, bindex); -+ if (h_dentry) { -+ if (h_dentry->d_inode) -+ npositive++; -+ if (type != S_IFDIR) -+ break; -+ continue; -+ } -+ h_parent = au_h_dptr(parent, bindex); -+ if (!h_parent || !d_is_dir(h_parent)) -+ continue; -+ -+ h_dir = h_parent->d_inode; -+ mutex_lock_nested(&h_dir->i_mutex, AuLsc_I_PARENT); -+ h_dentry = au_do_lookup(h_parent, dentry, bindex, &whname, -+ &args); -+ mutex_unlock(&h_dir->i_mutex); -+ err = PTR_ERR(h_dentry); -+ if (IS_ERR(h_dentry)) -+ goto out_parent; -+ if (h_dentry) -+ au_fclr_lkup(args.flags, ALLOW_NEG); -+ if (dirperm1) -+ au_fset_lkup(args.flags, IGNORE_PERM); -+ -+ if (au_dbwh(dentry) == bindex) -+ break; -+ if (!h_dentry) -+ continue; -+ h_inode = h_dentry->d_inode; -+ if (!h_inode) -+ continue; -+ npositive++; -+ if (!args.type) -+ args.type = h_inode->i_mode & S_IFMT; -+ if (args.type != S_IFDIR) -+ break; -+ else if (isdir) { -+ /* the type of lower may be different */ -+ bdiropq = au_dbdiropq(dentry); -+ if (bdiropq >= 0 && bdiropq <= bindex) -+ break; -+ } -+ } -+ -+ if (npositive) { -+ AuLabel(positive); -+ au_update_dbstart(dentry); -+ } -+ err = npositive; -+ if (unlikely(!au_opt_test(au_mntflags(sb), UDBA_NONE) -+ && au_dbstart(dentry) < 0)) { -+ err = -EIO; -+ AuIOErr("both of real entry and whiteout found, %pd, err %d\n", -+ dentry, err); -+ } -+ -+out_parent: -+ dput(parent); -+ kfree(whname.name); -+out: -+ return err; -+} -+ -+struct dentry *au_sio_lkup_one(struct qstr *name, struct dentry *parent) -+{ -+ struct dentry *dentry; -+ int wkq_err; -+ -+ if (!au_test_h_perm_sio(parent->d_inode, MAY_EXEC)) -+ dentry = vfsub_lkup_one(name, parent); -+ else { -+ struct vfsub_lkup_one_args args = { -+ .errp = &dentry, -+ .name = name, -+ .parent = parent -+ }; -+ -+ wkq_err = au_wkq_wait(vfsub_call_lkup_one, &args); -+ if (unlikely(wkq_err)) -+ dentry = ERR_PTR(wkq_err); -+ } -+ -+ return dentry; -+} -+ -+/* -+ * lookup @dentry on @bindex which should be negative. -+ */ -+int au_lkup_neg(struct dentry *dentry, aufs_bindex_t bindex, int wh) -+{ -+ int err; -+ struct dentry *parent, *h_parent, *h_dentry; -+ struct au_branch *br; -+ -+ parent = dget_parent(dentry); -+ h_parent = au_h_dptr(parent, bindex); -+ br = au_sbr(dentry->d_sb, bindex); -+ if (wh) -+ h_dentry = au_whtmp_lkup(h_parent, br, &dentry->d_name); -+ else -+ h_dentry = au_sio_lkup_one(&dentry->d_name, h_parent); -+ err = PTR_ERR(h_dentry); -+ if (IS_ERR(h_dentry)) -+ goto out; -+ if (unlikely(h_dentry->d_inode)) { -+ err = -EIO; -+ AuIOErr("%pd should be negative on b%d.\n", h_dentry, bindex); -+ dput(h_dentry); -+ goto out; -+ } -+ -+ err = 0; -+ if (bindex < au_dbstart(dentry)) -+ au_set_dbstart(dentry, bindex); -+ if (au_dbend(dentry) < bindex) -+ au_set_dbend(dentry, bindex); -+ au_set_h_dptr(dentry, bindex, h_dentry); -+ -+out: -+ dput(parent); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* subset of struct inode */ -+struct au_iattr { -+ unsigned long i_ino; -+ /* unsigned int i_nlink; */ -+ kuid_t i_uid; -+ kgid_t i_gid; -+ u64 i_version; -+/* -+ loff_t i_size; -+ blkcnt_t i_blocks; -+*/ -+ umode_t i_mode; -+}; -+ -+static void au_iattr_save(struct au_iattr *ia, struct inode *h_inode) -+{ -+ ia->i_ino = h_inode->i_ino; -+ /* ia->i_nlink = h_inode->i_nlink; */ -+ ia->i_uid = h_inode->i_uid; -+ ia->i_gid = h_inode->i_gid; -+ ia->i_version = h_inode->i_version; -+/* -+ ia->i_size = h_inode->i_size; -+ ia->i_blocks = h_inode->i_blocks; -+*/ -+ ia->i_mode = (h_inode->i_mode & S_IFMT); -+} -+ -+static int au_iattr_test(struct au_iattr *ia, struct inode *h_inode) -+{ -+ return ia->i_ino != h_inode->i_ino -+ /* || ia->i_nlink != h_inode->i_nlink */ -+ || !uid_eq(ia->i_uid, h_inode->i_uid) -+ || !gid_eq(ia->i_gid, h_inode->i_gid) -+ || ia->i_version != h_inode->i_version -+/* -+ || ia->i_size != h_inode->i_size -+ || ia->i_blocks != h_inode->i_blocks -+*/ -+ || ia->i_mode != (h_inode->i_mode & S_IFMT); -+} -+ -+static int au_h_verify_dentry(struct dentry *h_dentry, struct dentry *h_parent, -+ struct au_branch *br) -+{ -+ int err; -+ struct au_iattr ia; -+ struct inode *h_inode; -+ struct dentry *h_d; -+ struct super_block *h_sb; -+ -+ err = 0; -+ memset(&ia, -1, sizeof(ia)); -+ h_sb = h_dentry->d_sb; -+ h_inode = h_dentry->d_inode; -+ if (h_inode) -+ au_iattr_save(&ia, h_inode); -+ else if (au_test_nfs(h_sb) || au_test_fuse(h_sb)) -+ /* nfs d_revalidate may return 0 for negative dentry */ -+ /* fuse d_revalidate always return 0 for negative dentry */ -+ goto out; -+ -+ /* main purpose is namei.c:cached_lookup() and d_revalidate */ -+ h_d = vfsub_lkup_one(&h_dentry->d_name, h_parent); -+ err = PTR_ERR(h_d); -+ if (IS_ERR(h_d)) -+ goto out; -+ -+ err = 0; -+ if (unlikely(h_d != h_dentry -+ || h_d->d_inode != h_inode -+ || (h_inode && au_iattr_test(&ia, h_inode)))) -+ err = au_busy_or_stale(); -+ dput(h_d); -+ -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+int au_h_verify(struct dentry *h_dentry, unsigned int udba, struct inode *h_dir, -+ struct dentry *h_parent, struct au_branch *br) -+{ -+ int err; -+ -+ err = 0; -+ if (udba == AuOpt_UDBA_REVAL -+ && !au_test_fs_remote(h_dentry->d_sb)) { -+ IMustLock(h_dir); -+ err = (h_dentry->d_parent->d_inode != h_dir); -+ } else if (udba != AuOpt_UDBA_NONE) -+ err = au_h_verify_dentry(h_dentry, h_parent, br); -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int au_do_refresh_hdentry(struct dentry *dentry, struct dentry *parent) -+{ -+ int err; -+ aufs_bindex_t new_bindex, bindex, bend, bwh, bdiropq; -+ struct au_hdentry tmp, *p, *q; -+ struct au_dinfo *dinfo; -+ struct super_block *sb; -+ -+ DiMustWriteLock(dentry); -+ -+ sb = dentry->d_sb; -+ dinfo = au_di(dentry); -+ bend = dinfo->di_bend; -+ bwh = dinfo->di_bwh; -+ bdiropq = dinfo->di_bdiropq; -+ p = dinfo->di_hdentry + dinfo->di_bstart; -+ for (bindex = dinfo->di_bstart; bindex <= bend; bindex++, p++) { -+ if (!p->hd_dentry) -+ continue; -+ -+ new_bindex = au_br_index(sb, p->hd_id); -+ if (new_bindex == bindex) -+ continue; -+ -+ if (dinfo->di_bwh == bindex) -+ bwh = new_bindex; -+ if (dinfo->di_bdiropq == bindex) -+ bdiropq = new_bindex; -+ if (new_bindex < 0) { -+ au_hdput(p); -+ p->hd_dentry = NULL; -+ continue; -+ } -+ -+ /* swap two lower dentries, and loop again */ -+ q = dinfo->di_hdentry + new_bindex; -+ tmp = *q; -+ *q = *p; -+ *p = tmp; -+ if (tmp.hd_dentry) { -+ bindex--; -+ p--; -+ } -+ } -+ -+ dinfo->di_bwh = -1; -+ if (bwh >= 0 && bwh <= au_sbend(sb) && au_sbr_whable(sb, bwh)) -+ dinfo->di_bwh = bwh; -+ -+ dinfo->di_bdiropq = -1; -+ if (bdiropq >= 0 -+ && bdiropq <= au_sbend(sb) -+ && au_sbr_whable(sb, bdiropq)) -+ dinfo->di_bdiropq = bdiropq; -+ -+ err = -EIO; -+ dinfo->di_bstart = -1; -+ dinfo->di_bend = -1; -+ bend = au_dbend(parent); -+ p = dinfo->di_hdentry; -+ for (bindex = 0; bindex <= bend; bindex++, p++) -+ if (p->hd_dentry) { -+ dinfo->di_bstart = bindex; -+ break; -+ } -+ -+ if (dinfo->di_bstart >= 0) { -+ p = dinfo->di_hdentry + bend; -+ for (bindex = bend; bindex >= 0; bindex--, p--) -+ if (p->hd_dentry) { -+ dinfo->di_bend = bindex; -+ err = 0; -+ break; -+ } -+ } -+ -+ return err; -+} -+ -+static void au_do_hide(struct dentry *dentry) -+{ -+ struct inode *inode; -+ -+ inode = dentry->d_inode; -+ if (inode) { -+ if (!S_ISDIR(inode->i_mode)) { -+ if (inode->i_nlink && !d_unhashed(dentry)) -+ drop_nlink(inode); -+ } else { -+ clear_nlink(inode); -+ /* stop next lookup */ -+ inode->i_flags |= S_DEAD; -+ } -+ smp_mb(); /* necessary? */ -+ } -+ d_drop(dentry); -+} -+ -+static int au_hide_children(struct dentry *parent) -+{ -+ int err, i, j, ndentry; -+ struct au_dcsub_pages dpages; -+ struct au_dpage *dpage; -+ struct dentry *dentry; -+ -+ err = au_dpages_init(&dpages, GFP_NOFS); -+ if (unlikely(err)) -+ goto out; -+ err = au_dcsub_pages(&dpages, parent, NULL, NULL); -+ if (unlikely(err)) -+ goto out_dpages; -+ -+ /* in reverse order */ -+ for (i = dpages.ndpage - 1; i >= 0; i--) { -+ dpage = dpages.dpages + i; -+ ndentry = dpage->ndentry; -+ for (j = ndentry - 1; j >= 0; j--) { -+ dentry = dpage->dentries[j]; -+ if (dentry != parent) -+ au_do_hide(dentry); -+ } -+ } -+ -+out_dpages: -+ au_dpages_free(&dpages); -+out: -+ return err; -+} -+ -+static void au_hide(struct dentry *dentry) -+{ -+ int err; -+ -+ AuDbgDentry(dentry); -+ if (d_is_dir(dentry)) { -+ /* shrink_dcache_parent(dentry); */ -+ err = au_hide_children(dentry); -+ if (unlikely(err)) -+ AuIOErr("%pd, failed hiding children, ignored %d\n", -+ dentry, err); -+ } -+ au_do_hide(dentry); -+} -+ -+/* -+ * By adding a dirty branch, a cached dentry may be affected in various ways. -+ * -+ * a dirty branch is added -+ * - on the top of layers -+ * - in the middle of layers -+ * - to the bottom of layers -+ * -+ * on the added branch there exists -+ * - a whiteout -+ * - a diropq -+ * - a same named entry -+ * + exist -+ * * negative --> positive -+ * * positive --> positive -+ * - type is unchanged -+ * - type is changed -+ * + doesn't exist -+ * * negative --> negative -+ * * positive --> negative (rejected by au_br_del() for non-dir case) -+ * - none -+ */ -+static int au_refresh_by_dinfo(struct dentry *dentry, struct au_dinfo *dinfo, -+ struct au_dinfo *tmp) -+{ -+ int err; -+ aufs_bindex_t bindex, bend; -+ struct { -+ struct dentry *dentry; -+ struct inode *inode; -+ mode_t mode; -+ } orig_h, tmp_h = { -+ .dentry = NULL -+ }; -+ struct au_hdentry *hd; -+ struct inode *inode, *h_inode; -+ struct dentry *h_dentry; -+ -+ err = 0; -+ AuDebugOn(dinfo->di_bstart < 0); -+ orig_h.dentry = dinfo->di_hdentry[dinfo->di_bstart].hd_dentry; -+ orig_h.inode = orig_h.dentry->d_inode; -+ orig_h.mode = 0; -+ if (orig_h.inode) -+ orig_h.mode = orig_h.inode->i_mode & S_IFMT; -+ if (tmp->di_bstart >= 0) { -+ tmp_h.dentry = tmp->di_hdentry[tmp->di_bstart].hd_dentry; -+ tmp_h.inode = tmp_h.dentry->d_inode; -+ if (tmp_h.inode) -+ tmp_h.mode = tmp_h.inode->i_mode & S_IFMT; -+ } -+ -+ inode = dentry->d_inode; -+ if (!orig_h.inode) { -+ AuDbg("nagative originally\n"); -+ if (inode) { -+ au_hide(dentry); -+ goto out; -+ } -+ AuDebugOn(inode); -+ AuDebugOn(dinfo->di_bstart != dinfo->di_bend); -+ AuDebugOn(dinfo->di_bdiropq != -1); -+ -+ if (!tmp_h.inode) { -+ AuDbg("negative --> negative\n"); -+ /* should have only one negative lower */ -+ if (tmp->di_bstart >= 0 -+ && tmp->di_bstart < dinfo->di_bstart) { -+ AuDebugOn(tmp->di_bstart != tmp->di_bend); -+ AuDebugOn(dinfo->di_bstart != dinfo->di_bend); -+ au_set_h_dptr(dentry, dinfo->di_bstart, NULL); -+ au_di_cp(dinfo, tmp); -+ hd = tmp->di_hdentry + tmp->di_bstart; -+ au_set_h_dptr(dentry, tmp->di_bstart, -+ dget(hd->hd_dentry)); -+ } -+ au_dbg_verify_dinode(dentry); -+ } else { -+ AuDbg("negative --> positive\n"); -+ /* -+ * similar to the behaviour of creating with bypassing -+ * aufs. -+ * unhash it in order to force an error in the -+ * succeeding create operation. -+ * we should not set S_DEAD here. -+ */ -+ d_drop(dentry); -+ /* au_di_swap(tmp, dinfo); */ -+ au_dbg_verify_dinode(dentry); -+ } -+ } else { -+ AuDbg("positive originally\n"); -+ /* inode may be NULL */ -+ AuDebugOn(inode && (inode->i_mode & S_IFMT) != orig_h.mode); -+ if (!tmp_h.inode) { -+ AuDbg("positive --> negative\n"); -+ /* or bypassing aufs */ -+ au_hide(dentry); -+ if (tmp->di_bwh >= 0 && tmp->di_bwh <= dinfo->di_bstart) -+ dinfo->di_bwh = tmp->di_bwh; -+ if (inode) -+ err = au_refresh_hinode_self(inode); -+ au_dbg_verify_dinode(dentry); -+ } else if (orig_h.mode == tmp_h.mode) { -+ AuDbg("positive --> positive, same type\n"); -+ if (!S_ISDIR(orig_h.mode) -+ && dinfo->di_bstart > tmp->di_bstart) { -+ /* -+ * similar to the behaviour of removing and -+ * creating. -+ */ -+ au_hide(dentry); -+ if (inode) -+ err = au_refresh_hinode_self(inode); -+ au_dbg_verify_dinode(dentry); -+ } else { -+ /* fill empty slots */ -+ if (dinfo->di_bstart > tmp->di_bstart) -+ dinfo->di_bstart = tmp->di_bstart; -+ if (dinfo->di_bend < tmp->di_bend) -+ dinfo->di_bend = tmp->di_bend; -+ dinfo->di_bwh = tmp->di_bwh; -+ dinfo->di_bdiropq = tmp->di_bdiropq; -+ hd = tmp->di_hdentry; -+ bend = dinfo->di_bend; -+ for (bindex = tmp->di_bstart; bindex <= bend; -+ bindex++) { -+ if (au_h_dptr(dentry, bindex)) -+ continue; -+ h_dentry = hd[bindex].hd_dentry; -+ if (!h_dentry) -+ continue; -+ h_inode = h_dentry->d_inode; -+ AuDebugOn(!h_inode); -+ AuDebugOn(orig_h.mode -+ != (h_inode->i_mode -+ & S_IFMT)); -+ au_set_h_dptr(dentry, bindex, -+ dget(h_dentry)); -+ } -+ err = au_refresh_hinode(inode, dentry); -+ au_dbg_verify_dinode(dentry); -+ } -+ } else { -+ AuDbg("positive --> positive, different type\n"); -+ /* similar to the behaviour of removing and creating */ -+ au_hide(dentry); -+ if (inode) -+ err = au_refresh_hinode_self(inode); -+ au_dbg_verify_dinode(dentry); -+ } -+ } -+ -+out: -+ return err; -+} -+ -+void au_refresh_dop(struct dentry *dentry, int force_reval) -+{ -+ const struct dentry_operations *dop -+ = force_reval ? &aufs_dop : dentry->d_sb->s_d_op; -+ static const unsigned int mask -+ = DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE; -+ -+ BUILD_BUG_ON(sizeof(mask) != sizeof(dentry->d_flags)); -+ -+ if (dentry->d_op == dop) -+ return; -+ -+ AuDbg("%pd\n", dentry); -+ spin_lock(&dentry->d_lock); -+ if (dop == &aufs_dop) -+ dentry->d_flags |= mask; -+ else -+ dentry->d_flags &= ~mask; -+ dentry->d_op = dop; -+ spin_unlock(&dentry->d_lock); -+} -+ -+int au_refresh_dentry(struct dentry *dentry, struct dentry *parent) -+{ -+ int err, ebrange; -+ unsigned int sigen; -+ struct au_dinfo *dinfo, *tmp; -+ struct super_block *sb; -+ struct inode *inode; -+ -+ DiMustWriteLock(dentry); -+ AuDebugOn(IS_ROOT(dentry)); -+ AuDebugOn(!parent->d_inode); -+ -+ sb = dentry->d_sb; -+ inode = dentry->d_inode; -+ sigen = au_sigen(sb); -+ err = au_digen_test(parent, sigen); -+ if (unlikely(err)) -+ goto out; -+ -+ dinfo = au_di(dentry); -+ err = au_di_realloc(dinfo, au_sbend(sb) + 1); -+ if (unlikely(err)) -+ goto out; -+ ebrange = au_dbrange_test(dentry); -+ if (!ebrange) -+ ebrange = au_do_refresh_hdentry(dentry, parent); -+ -+ if (d_unhashed(dentry) || ebrange /* || dinfo->di_tmpfile */) { -+ AuDebugOn(au_dbstart(dentry) < 0 && au_dbend(dentry) >= 0); -+ if (inode) -+ err = au_refresh_hinode_self(inode); -+ au_dbg_verify_dinode(dentry); -+ if (!err) -+ goto out_dgen; /* success */ -+ goto out; -+ } -+ -+ /* temporary dinfo */ -+ AuDbgDentry(dentry); -+ err = -ENOMEM; -+ tmp = au_di_alloc(sb, AuLsc_DI_TMP); -+ if (unlikely(!tmp)) -+ goto out; -+ au_di_swap(tmp, dinfo); -+ /* returns the number of positive dentries */ -+ /* -+ * if current working dir is removed, it returns an error. -+ * but the dentry is legal. -+ */ -+ err = au_lkup_dentry(dentry, /*bstart*/0, /*type*/0); -+ AuDbgDentry(dentry); -+ au_di_swap(tmp, dinfo); -+ if (err == -ENOENT) -+ err = 0; -+ if (err >= 0) { -+ /* compare/refresh by dinfo */ -+ AuDbgDentry(dentry); -+ err = au_refresh_by_dinfo(dentry, dinfo, tmp); -+ au_dbg_verify_dinode(dentry); -+ AuTraceErr(err); -+ } -+ au_rw_write_unlock(&tmp->di_rwsem); -+ au_di_free(tmp); -+ if (unlikely(err)) -+ goto out; -+ -+out_dgen: -+ au_update_digen(dentry); -+out: -+ if (unlikely(err && !(dentry->d_flags & DCACHE_NFSFS_RENAMED))) { -+ AuIOErr("failed refreshing %pd, %d\n", dentry, err); -+ AuDbgDentry(dentry); -+ } -+ AuTraceErr(err); -+ return err; -+} -+ -+static int au_do_h_d_reval(struct dentry *h_dentry, unsigned int flags, -+ struct dentry *dentry, aufs_bindex_t bindex) -+{ -+ int err, valid; -+ -+ err = 0; -+ if (!(h_dentry->d_flags & DCACHE_OP_REVALIDATE)) -+ goto out; -+ -+ AuDbg("b%d\n", bindex); -+ /* -+ * gave up supporting LOOKUP_CREATE/OPEN for lower fs, -+ * due to whiteout and branch permission. -+ */ -+ flags &= ~(/*LOOKUP_PARENT |*/ LOOKUP_OPEN | LOOKUP_CREATE -+ | LOOKUP_FOLLOW | LOOKUP_EXCL); -+ /* it may return tri-state */ -+ valid = h_dentry->d_op->d_revalidate(h_dentry, flags); -+ -+ if (unlikely(valid < 0)) -+ err = valid; -+ else if (!valid) -+ err = -EINVAL; -+ -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+/* todo: remove this */ -+static int h_d_revalidate(struct dentry *dentry, struct inode *inode, -+ unsigned int flags, int do_udba) -+{ -+ int err; -+ umode_t mode, h_mode; -+ aufs_bindex_t bindex, btail, bstart, ibs, ibe; -+ unsigned char plus, unhashed, is_root, h_plus, h_nfs, tmpfile; -+ struct inode *h_inode, *h_cached_inode; -+ struct dentry *h_dentry; -+ struct qstr *name, *h_name; -+ -+ err = 0; -+ plus = 0; -+ mode = 0; -+ ibs = -1; -+ ibe = -1; -+ unhashed = !!d_unhashed(dentry); -+ is_root = !!IS_ROOT(dentry); -+ name = &dentry->d_name; -+ tmpfile = au_di(dentry)->di_tmpfile; -+ -+ /* -+ * Theoretically, REVAL test should be unnecessary in case of -+ * {FS,I}NOTIFY. -+ * But {fs,i}notify doesn't fire some necessary events, -+ * IN_ATTRIB for atime/nlink/pageio -+ * Let's do REVAL test too. -+ */ -+ if (do_udba && inode) { -+ mode = (inode->i_mode & S_IFMT); -+ plus = (inode->i_nlink > 0); -+ ibs = au_ibstart(inode); -+ ibe = au_ibend(inode); -+ } -+ -+ bstart = au_dbstart(dentry); -+ btail = bstart; -+ if (inode && S_ISDIR(inode->i_mode)) -+ btail = au_dbtaildir(dentry); -+ for (bindex = bstart; bindex <= btail; bindex++) { -+ h_dentry = au_h_dptr(dentry, bindex); -+ if (!h_dentry) -+ continue; -+ -+ AuDbg("b%d, %pd\n", bindex, h_dentry); -+ h_nfs = !!au_test_nfs(h_dentry->d_sb); -+ spin_lock(&h_dentry->d_lock); -+ h_name = &h_dentry->d_name; -+ if (unlikely(do_udba -+ && !is_root -+ && ((!h_nfs -+ && (unhashed != !!d_unhashed(h_dentry) -+ || (!tmpfile -+ && !au_qstreq(name, h_name)) -+ )) -+ || (h_nfs -+ && !(flags & LOOKUP_OPEN) -+ && (h_dentry->d_flags -+ & DCACHE_NFSFS_RENAMED))) -+ )) { -+ int h_unhashed; -+ -+ h_unhashed = d_unhashed(h_dentry); -+ spin_unlock(&h_dentry->d_lock); -+ AuDbg("unhash 0x%x 0x%x, %pd %pd\n", -+ unhashed, h_unhashed, dentry, h_dentry); -+ goto err; -+ } -+ spin_unlock(&h_dentry->d_lock); -+ -+ err = au_do_h_d_reval(h_dentry, flags, dentry, bindex); -+ if (unlikely(err)) -+ /* do not goto err, to keep the errno */ -+ break; -+ -+ /* todo: plink too? */ -+ if (!do_udba) -+ continue; -+ -+ /* UDBA tests */ -+ h_inode = h_dentry->d_inode; -+ if (unlikely(!!inode != !!h_inode)) -+ goto err; -+ -+ h_plus = plus; -+ h_mode = mode; -+ h_cached_inode = h_inode; -+ if (h_inode) { -+ h_mode = (h_inode->i_mode & S_IFMT); -+ h_plus = (h_inode->i_nlink > 0); -+ } -+ if (inode && ibs <= bindex && bindex <= ibe) -+ h_cached_inode = au_h_iptr(inode, bindex); -+ -+ if (!h_nfs) { -+ if (unlikely(plus != h_plus && !tmpfile)) -+ goto err; -+ } else { -+ if (unlikely(!(h_dentry->d_flags & DCACHE_NFSFS_RENAMED) -+ && !is_root -+ && !IS_ROOT(h_dentry) -+ && unhashed != d_unhashed(h_dentry))) -+ goto err; -+ } -+ if (unlikely(mode != h_mode -+ || h_cached_inode != h_inode)) -+ goto err; -+ continue; -+ -+err: -+ err = -EINVAL; -+ break; -+ } -+ -+ AuTraceErr(err); -+ return err; -+} -+ -+/* todo: consolidate with do_refresh() and au_reval_for_attr() */ -+static int simple_reval_dpath(struct dentry *dentry, unsigned int sigen) -+{ -+ int err; -+ struct dentry *parent; -+ -+ if (!au_digen_test(dentry, sigen)) -+ return 0; -+ -+ parent = dget_parent(dentry); -+ di_read_lock_parent(parent, AuLock_IR); -+ AuDebugOn(au_digen_test(parent, sigen)); -+ au_dbg_verify_gen(parent, sigen); -+ err = au_refresh_dentry(dentry, parent); -+ di_read_unlock(parent, AuLock_IR); -+ dput(parent); -+ AuTraceErr(err); -+ return err; -+} -+ -+int au_reval_dpath(struct dentry *dentry, unsigned int sigen) -+{ -+ int err; -+ struct dentry *d, *parent; -+ struct inode *inode; -+ -+ if (!au_ftest_si(au_sbi(dentry->d_sb), FAILED_REFRESH_DIR)) -+ return simple_reval_dpath(dentry, sigen); -+ -+ /* slow loop, keep it simple and stupid */ -+ /* cf: au_cpup_dirs() */ -+ err = 0; -+ parent = NULL; -+ while (au_digen_test(dentry, sigen)) { -+ d = dentry; -+ while (1) { -+ dput(parent); -+ parent = dget_parent(d); -+ if (!au_digen_test(parent, sigen)) -+ break; -+ d = parent; -+ } -+ -+ inode = d->d_inode; -+ if (d != dentry) -+ di_write_lock_child2(d); -+ -+ /* someone might update our dentry while we were sleeping */ -+ if (au_digen_test(d, sigen)) { -+ /* -+ * todo: consolidate with simple_reval_dpath(), -+ * do_refresh() and au_reval_for_attr(). -+ */ -+ di_read_lock_parent(parent, AuLock_IR); -+ err = au_refresh_dentry(d, parent); -+ di_read_unlock(parent, AuLock_IR); -+ } -+ -+ if (d != dentry) -+ di_write_unlock(d); -+ dput(parent); -+ if (unlikely(err)) -+ break; -+ } -+ -+ return err; -+} -+ -+/* -+ * if valid returns 1, otherwise 0. -+ */ -+static int aufs_d_revalidate(struct dentry *dentry, unsigned int flags) -+{ -+ int valid, err; -+ unsigned int sigen; -+ unsigned char do_udba; -+ struct super_block *sb; -+ struct inode *inode; -+ -+ /* todo: support rcu-walk? */ -+ if (flags & LOOKUP_RCU) -+ return -ECHILD; -+ -+ valid = 0; -+ if (unlikely(!au_di(dentry))) -+ goto out; -+ -+ valid = 1; -+ sb = dentry->d_sb; -+ /* -+ * todo: very ugly -+ * i_mutex of parent dir may be held, -+ * but we should not return 'invalid' due to busy. -+ */ -+ err = aufs_read_lock(dentry, AuLock_FLUSH | AuLock_DW | AuLock_NOPLM); -+ if (unlikely(err)) { -+ valid = err; -+ AuTraceErr(err); -+ goto out; -+ } -+ inode = dentry->d_inode; -+ if (unlikely(inode && is_bad_inode(inode))) { -+ err = -EINVAL; -+ AuTraceErr(err); -+ goto out_dgrade; -+ } -+ if (unlikely(au_dbrange_test(dentry))) { -+ err = -EINVAL; -+ AuTraceErr(err); -+ goto out_dgrade; -+ } -+ -+ sigen = au_sigen(sb); -+ if (au_digen_test(dentry, sigen)) { -+ AuDebugOn(IS_ROOT(dentry)); -+ err = au_reval_dpath(dentry, sigen); -+ if (unlikely(err)) { -+ AuTraceErr(err); -+ goto out_dgrade; -+ } -+ } -+ di_downgrade_lock(dentry, AuLock_IR); -+ -+ err = -EINVAL; -+ if (!(flags & (LOOKUP_OPEN | LOOKUP_EMPTY)) -+ && inode -+ && !(inode->i_state && I_LINKABLE) -+ && (IS_DEADDIR(inode) || !inode->i_nlink)) { -+ AuTraceErr(err); -+ goto out_inval; -+ } -+ -+ do_udba = !au_opt_test(au_mntflags(sb), UDBA_NONE); -+ if (do_udba && inode) { -+ aufs_bindex_t bstart = au_ibstart(inode); -+ struct inode *h_inode; -+ -+ if (bstart >= 0) { -+ h_inode = au_h_iptr(inode, bstart); -+ if (h_inode && au_test_higen(inode, h_inode)) { -+ AuTraceErr(err); -+ goto out_inval; -+ } -+ } -+ } -+ -+ err = h_d_revalidate(dentry, inode, flags, do_udba); -+ if (unlikely(!err && do_udba && au_dbstart(dentry) < 0)) { -+ err = -EIO; -+ AuDbg("both of real entry and whiteout found, %p, err %d\n", -+ dentry, err); -+ } -+ goto out_inval; -+ -+out_dgrade: -+ di_downgrade_lock(dentry, AuLock_IR); -+out_inval: -+ aufs_read_unlock(dentry, AuLock_IR); -+ AuTraceErr(err); -+ valid = !err; -+out: -+ if (!valid) { -+ AuDbg("%pd invalid, %d\n", dentry, valid); -+ d_drop(dentry); -+ } -+ return valid; -+} -+ -+static void aufs_d_release(struct dentry *dentry) -+{ -+ if (au_di(dentry)) { -+ au_di_fin(dentry); -+ au_hn_di_reinit(dentry); -+ } -+} -+ -+const struct dentry_operations aufs_dop = { -+ .d_revalidate = aufs_d_revalidate, -+ .d_weak_revalidate = aufs_d_revalidate, -+ .d_release = aufs_d_release -+}; -+ -+/* aufs_dop without d_revalidate */ -+const struct dentry_operations aufs_dop_noreval = { -+ .d_release = aufs_d_release -+}; -diff --git a/fs/aufs/dentry.h b/fs/aufs/dentry.h -new file mode 100644 -index 0000000..4006484 ---- /dev/null -+++ b/fs/aufs/dentry.h -@@ -0,0 +1,234 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * lookup and dentry operations -+ */ -+ -+#ifndef __AUFS_DENTRY_H__ -+#define __AUFS_DENTRY_H__ -+ -+#ifdef __KERNEL__ -+ -+#include -+#include "rwsem.h" -+ -+struct au_hdentry { -+ struct dentry *hd_dentry; -+ aufs_bindex_t hd_id; -+}; -+ -+struct au_dinfo { -+ atomic_t di_generation; -+ -+ struct au_rwsem di_rwsem; -+ aufs_bindex_t di_bstart, di_bend, di_bwh, di_bdiropq; -+ unsigned char di_tmpfile; /* to allow the different name */ -+ struct au_hdentry *di_hdentry; -+} ____cacheline_aligned_in_smp; -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* dentry.c */ -+extern const struct dentry_operations aufs_dop, aufs_dop_noreval; -+struct au_branch; -+struct dentry *au_sio_lkup_one(struct qstr *name, struct dentry *parent); -+int au_h_verify(struct dentry *h_dentry, unsigned int udba, struct inode *h_dir, -+ struct dentry *h_parent, struct au_branch *br); -+ -+int au_lkup_dentry(struct dentry *dentry, aufs_bindex_t bstart, mode_t type); -+int au_lkup_neg(struct dentry *dentry, aufs_bindex_t bindex, int wh); -+int au_refresh_dentry(struct dentry *dentry, struct dentry *parent); -+int au_reval_dpath(struct dentry *dentry, unsigned int sigen); -+void au_refresh_dop(struct dentry *dentry, int force_reval); -+ -+/* dinfo.c */ -+void au_di_init_once(void *_di); -+struct au_dinfo *au_di_alloc(struct super_block *sb, unsigned int lsc); -+void au_di_free(struct au_dinfo *dinfo); -+void au_di_swap(struct au_dinfo *a, struct au_dinfo *b); -+void au_di_cp(struct au_dinfo *dst, struct au_dinfo *src); -+int au_di_init(struct dentry *dentry); -+void au_di_fin(struct dentry *dentry); -+int au_di_realloc(struct au_dinfo *dinfo, int nbr); -+ -+void di_read_lock(struct dentry *d, int flags, unsigned int lsc); -+void di_read_unlock(struct dentry *d, int flags); -+void di_downgrade_lock(struct dentry *d, int flags); -+void di_write_lock(struct dentry *d, unsigned int lsc); -+void di_write_unlock(struct dentry *d); -+void di_write_lock2_child(struct dentry *d1, struct dentry *d2, int isdir); -+void di_write_lock2_parent(struct dentry *d1, struct dentry *d2, int isdir); -+void di_write_unlock2(struct dentry *d1, struct dentry *d2); -+ -+struct dentry *au_h_dptr(struct dentry *dentry, aufs_bindex_t bindex); -+struct dentry *au_h_d_alias(struct dentry *dentry, aufs_bindex_t bindex); -+aufs_bindex_t au_dbtail(struct dentry *dentry); -+aufs_bindex_t au_dbtaildir(struct dentry *dentry); -+ -+void au_set_h_dptr(struct dentry *dentry, aufs_bindex_t bindex, -+ struct dentry *h_dentry); -+int au_digen_test(struct dentry *dentry, unsigned int sigen); -+int au_dbrange_test(struct dentry *dentry); -+void au_update_digen(struct dentry *dentry); -+void au_update_dbrange(struct dentry *dentry, int do_put_zero); -+void au_update_dbstart(struct dentry *dentry); -+void au_update_dbend(struct dentry *dentry); -+int au_find_dbindex(struct dentry *dentry, struct dentry *h_dentry); -+ -+/* ---------------------------------------------------------------------- */ -+ -+static inline struct au_dinfo *au_di(struct dentry *dentry) -+{ -+ return dentry->d_fsdata; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* lock subclass for dinfo */ -+enum { -+ AuLsc_DI_CHILD, /* child first */ -+ AuLsc_DI_CHILD2, /* rename(2), link(2), and cpup at hnotify */ -+ AuLsc_DI_CHILD3, /* copyup dirs */ -+ AuLsc_DI_PARENT, -+ AuLsc_DI_PARENT2, -+ AuLsc_DI_PARENT3, -+ AuLsc_DI_TMP /* temp for replacing dinfo */ -+}; -+ -+/* -+ * di_read_lock_child, di_write_lock_child, -+ * di_read_lock_child2, di_write_lock_child2, -+ * di_read_lock_child3, di_write_lock_child3, -+ * di_read_lock_parent, di_write_lock_parent, -+ * di_read_lock_parent2, di_write_lock_parent2, -+ * di_read_lock_parent3, di_write_lock_parent3, -+ */ -+#define AuReadLockFunc(name, lsc) \ -+static inline void di_read_lock_##name(struct dentry *d, int flags) \ -+{ di_read_lock(d, flags, AuLsc_DI_##lsc); } -+ -+#define AuWriteLockFunc(name, lsc) \ -+static inline void di_write_lock_##name(struct dentry *d) \ -+{ di_write_lock(d, AuLsc_DI_##lsc); } -+ -+#define AuRWLockFuncs(name, lsc) \ -+ AuReadLockFunc(name, lsc) \ -+ AuWriteLockFunc(name, lsc) -+ -+AuRWLockFuncs(child, CHILD); -+AuRWLockFuncs(child2, CHILD2); -+AuRWLockFuncs(child3, CHILD3); -+AuRWLockFuncs(parent, PARENT); -+AuRWLockFuncs(parent2, PARENT2); -+AuRWLockFuncs(parent3, PARENT3); -+ -+#undef AuReadLockFunc -+#undef AuWriteLockFunc -+#undef AuRWLockFuncs -+ -+#define DiMustNoWaiters(d) AuRwMustNoWaiters(&au_di(d)->di_rwsem) -+#define DiMustAnyLock(d) AuRwMustAnyLock(&au_di(d)->di_rwsem) -+#define DiMustWriteLock(d) AuRwMustWriteLock(&au_di(d)->di_rwsem) -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* todo: memory barrier? */ -+static inline unsigned int au_digen(struct dentry *d) -+{ -+ return atomic_read(&au_di(d)->di_generation); -+} -+ -+static inline void au_h_dentry_init(struct au_hdentry *hdentry) -+{ -+ hdentry->hd_dentry = NULL; -+} -+ -+static inline void au_hdput(struct au_hdentry *hd) -+{ -+ if (hd) -+ dput(hd->hd_dentry); -+} -+ -+static inline aufs_bindex_t au_dbstart(struct dentry *dentry) -+{ -+ DiMustAnyLock(dentry); -+ return au_di(dentry)->di_bstart; -+} -+ -+static inline aufs_bindex_t au_dbend(struct dentry *dentry) -+{ -+ DiMustAnyLock(dentry); -+ return au_di(dentry)->di_bend; -+} -+ -+static inline aufs_bindex_t au_dbwh(struct dentry *dentry) -+{ -+ DiMustAnyLock(dentry); -+ return au_di(dentry)->di_bwh; -+} -+ -+static inline aufs_bindex_t au_dbdiropq(struct dentry *dentry) -+{ -+ DiMustAnyLock(dentry); -+ return au_di(dentry)->di_bdiropq; -+} -+ -+/* todo: hard/soft set? */ -+static inline void au_set_dbstart(struct dentry *dentry, aufs_bindex_t bindex) -+{ -+ DiMustWriteLock(dentry); -+ au_di(dentry)->di_bstart = bindex; -+} -+ -+static inline void au_set_dbend(struct dentry *dentry, aufs_bindex_t bindex) -+{ -+ DiMustWriteLock(dentry); -+ au_di(dentry)->di_bend = bindex; -+} -+ -+static inline void au_set_dbwh(struct dentry *dentry, aufs_bindex_t bindex) -+{ -+ DiMustWriteLock(dentry); -+ /* dbwh can be outside of bstart - bend range */ -+ au_di(dentry)->di_bwh = bindex; -+} -+ -+static inline void au_set_dbdiropq(struct dentry *dentry, aufs_bindex_t bindex) -+{ -+ DiMustWriteLock(dentry); -+ au_di(dentry)->di_bdiropq = bindex; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+#ifdef CONFIG_AUFS_HNOTIFY -+static inline void au_digen_dec(struct dentry *d) -+{ -+ atomic_dec(&au_di(d)->di_generation); -+} -+ -+static inline void au_hn_di_reinit(struct dentry *dentry) -+{ -+ dentry->d_fsdata = NULL; -+} -+#else -+AuStubVoid(au_hn_di_reinit, struct dentry *dentry __maybe_unused) -+#endif /* CONFIG_AUFS_HNOTIFY */ -+ -+#endif /* __KERNEL__ */ -+#endif /* __AUFS_DENTRY_H__ */ -diff --git a/fs/aufs/dinfo.c b/fs/aufs/dinfo.c -new file mode 100644 -index 0000000..28c02b3 ---- /dev/null -+++ b/fs/aufs/dinfo.c -@@ -0,0 +1,544 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * dentry private data -+ */ -+ -+#include "aufs.h" -+ -+void au_di_init_once(void *_dinfo) -+{ -+ struct au_dinfo *dinfo = _dinfo; -+ static struct lock_class_key aufs_di; -+ -+ au_rw_init(&dinfo->di_rwsem); -+ au_rw_class(&dinfo->di_rwsem, &aufs_di); -+} -+ -+struct au_dinfo *au_di_alloc(struct super_block *sb, unsigned int lsc) -+{ -+ struct au_dinfo *dinfo; -+ int nbr, i; -+ -+ dinfo = au_cache_alloc_dinfo(); -+ if (unlikely(!dinfo)) -+ goto out; -+ -+ nbr = au_sbend(sb) + 1; -+ if (nbr <= 0) -+ nbr = 1; -+ dinfo->di_hdentry = kcalloc(nbr, sizeof(*dinfo->di_hdentry), GFP_NOFS); -+ if (dinfo->di_hdentry) { -+ au_rw_write_lock_nested(&dinfo->di_rwsem, lsc); -+ dinfo->di_bstart = -1; -+ dinfo->di_bend = -1; -+ dinfo->di_bwh = -1; -+ dinfo->di_bdiropq = -1; -+ dinfo->di_tmpfile = 0; -+ for (i = 0; i < nbr; i++) -+ dinfo->di_hdentry[i].hd_id = -1; -+ goto out; -+ } -+ -+ au_cache_free_dinfo(dinfo); -+ dinfo = NULL; -+ -+out: -+ return dinfo; -+} -+ -+void au_di_free(struct au_dinfo *dinfo) -+{ -+ struct au_hdentry *p; -+ aufs_bindex_t bend, bindex; -+ -+ /* dentry may not be revalidated */ -+ bindex = dinfo->di_bstart; -+ if (bindex >= 0) { -+ bend = dinfo->di_bend; -+ p = dinfo->di_hdentry + bindex; -+ while (bindex++ <= bend) -+ au_hdput(p++); -+ } -+ kfree(dinfo->di_hdentry); -+ au_cache_free_dinfo(dinfo); -+} -+ -+void au_di_swap(struct au_dinfo *a, struct au_dinfo *b) -+{ -+ struct au_hdentry *p; -+ aufs_bindex_t bi; -+ -+ AuRwMustWriteLock(&a->di_rwsem); -+ AuRwMustWriteLock(&b->di_rwsem); -+ -+#define DiSwap(v, name) \ -+ do { \ -+ v = a->di_##name; \ -+ a->di_##name = b->di_##name; \ -+ b->di_##name = v; \ -+ } while (0) -+ -+ DiSwap(p, hdentry); -+ DiSwap(bi, bstart); -+ DiSwap(bi, bend); -+ DiSwap(bi, bwh); -+ DiSwap(bi, bdiropq); -+ /* smp_mb(); */ -+ -+#undef DiSwap -+} -+ -+void au_di_cp(struct au_dinfo *dst, struct au_dinfo *src) -+{ -+ AuRwMustWriteLock(&dst->di_rwsem); -+ AuRwMustWriteLock(&src->di_rwsem); -+ -+ dst->di_bstart = src->di_bstart; -+ dst->di_bend = src->di_bend; -+ dst->di_bwh = src->di_bwh; -+ dst->di_bdiropq = src->di_bdiropq; -+ /* smp_mb(); */ -+} -+ -+int au_di_init(struct dentry *dentry) -+{ -+ int err; -+ struct super_block *sb; -+ struct au_dinfo *dinfo; -+ -+ err = 0; -+ sb = dentry->d_sb; -+ dinfo = au_di_alloc(sb, AuLsc_DI_CHILD); -+ if (dinfo) { -+ atomic_set(&dinfo->di_generation, au_sigen(sb)); -+ /* smp_mb(); */ /* atomic_set */ -+ dentry->d_fsdata = dinfo; -+ } else -+ err = -ENOMEM; -+ -+ return err; -+} -+ -+void au_di_fin(struct dentry *dentry) -+{ -+ struct au_dinfo *dinfo; -+ -+ dinfo = au_di(dentry); -+ AuRwDestroy(&dinfo->di_rwsem); -+ au_di_free(dinfo); -+} -+ -+int au_di_realloc(struct au_dinfo *dinfo, int nbr) -+{ -+ int err, sz; -+ struct au_hdentry *hdp; -+ -+ AuRwMustWriteLock(&dinfo->di_rwsem); -+ -+ err = -ENOMEM; -+ sz = sizeof(*hdp) * (dinfo->di_bend + 1); -+ if (!sz) -+ sz = sizeof(*hdp); -+ hdp = au_kzrealloc(dinfo->di_hdentry, sz, sizeof(*hdp) * nbr, GFP_NOFS); -+ if (hdp) { -+ dinfo->di_hdentry = hdp; -+ err = 0; -+ } -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static void do_ii_write_lock(struct inode *inode, unsigned int lsc) -+{ -+ switch (lsc) { -+ case AuLsc_DI_CHILD: -+ ii_write_lock_child(inode); -+ break; -+ case AuLsc_DI_CHILD2: -+ ii_write_lock_child2(inode); -+ break; -+ case AuLsc_DI_CHILD3: -+ ii_write_lock_child3(inode); -+ break; -+ case AuLsc_DI_PARENT: -+ ii_write_lock_parent(inode); -+ break; -+ case AuLsc_DI_PARENT2: -+ ii_write_lock_parent2(inode); -+ break; -+ case AuLsc_DI_PARENT3: -+ ii_write_lock_parent3(inode); -+ break; -+ default: -+ BUG(); -+ } -+} -+ -+static void do_ii_read_lock(struct inode *inode, unsigned int lsc) -+{ -+ switch (lsc) { -+ case AuLsc_DI_CHILD: -+ ii_read_lock_child(inode); -+ break; -+ case AuLsc_DI_CHILD2: -+ ii_read_lock_child2(inode); -+ break; -+ case AuLsc_DI_CHILD3: -+ ii_read_lock_child3(inode); -+ break; -+ case AuLsc_DI_PARENT: -+ ii_read_lock_parent(inode); -+ break; -+ case AuLsc_DI_PARENT2: -+ ii_read_lock_parent2(inode); -+ break; -+ case AuLsc_DI_PARENT3: -+ ii_read_lock_parent3(inode); -+ break; -+ default: -+ BUG(); -+ } -+} -+ -+void di_read_lock(struct dentry *d, int flags, unsigned int lsc) -+{ -+ au_rw_read_lock_nested(&au_di(d)->di_rwsem, lsc); -+ if (d->d_inode) { -+ if (au_ftest_lock(flags, IW)) -+ do_ii_write_lock(d->d_inode, lsc); -+ else if (au_ftest_lock(flags, IR)) -+ do_ii_read_lock(d->d_inode, lsc); -+ } -+} -+ -+void di_read_unlock(struct dentry *d, int flags) -+{ -+ if (d->d_inode) { -+ if (au_ftest_lock(flags, IW)) { -+ au_dbg_verify_dinode(d); -+ ii_write_unlock(d->d_inode); -+ } else if (au_ftest_lock(flags, IR)) { -+ au_dbg_verify_dinode(d); -+ ii_read_unlock(d->d_inode); -+ } -+ } -+ au_rw_read_unlock(&au_di(d)->di_rwsem); -+} -+ -+void di_downgrade_lock(struct dentry *d, int flags) -+{ -+ if (d->d_inode && au_ftest_lock(flags, IR)) -+ ii_downgrade_lock(d->d_inode); -+ au_rw_dgrade_lock(&au_di(d)->di_rwsem); -+} -+ -+void di_write_lock(struct dentry *d, unsigned int lsc) -+{ -+ au_rw_write_lock_nested(&au_di(d)->di_rwsem, lsc); -+ if (d->d_inode) -+ do_ii_write_lock(d->d_inode, lsc); -+} -+ -+void di_write_unlock(struct dentry *d) -+{ -+ au_dbg_verify_dinode(d); -+ if (d->d_inode) -+ ii_write_unlock(d->d_inode); -+ au_rw_write_unlock(&au_di(d)->di_rwsem); -+} -+ -+void di_write_lock2_child(struct dentry *d1, struct dentry *d2, int isdir) -+{ -+ AuDebugOn(d1 == d2 -+ || d1->d_inode == d2->d_inode -+ || d1->d_sb != d2->d_sb); -+ -+ if (isdir && au_test_subdir(d1, d2)) { -+ di_write_lock_child(d1); -+ di_write_lock_child2(d2); -+ } else { -+ /* there should be no races */ -+ di_write_lock_child(d2); -+ di_write_lock_child2(d1); -+ } -+} -+ -+void di_write_lock2_parent(struct dentry *d1, struct dentry *d2, int isdir) -+{ -+ AuDebugOn(d1 == d2 -+ || d1->d_inode == d2->d_inode -+ || d1->d_sb != d2->d_sb); -+ -+ if (isdir && au_test_subdir(d1, d2)) { -+ di_write_lock_parent(d1); -+ di_write_lock_parent2(d2); -+ } else { -+ /* there should be no races */ -+ di_write_lock_parent(d2); -+ di_write_lock_parent2(d1); -+ } -+} -+ -+void di_write_unlock2(struct dentry *d1, struct dentry *d2) -+{ -+ di_write_unlock(d1); -+ if (d1->d_inode == d2->d_inode) -+ au_rw_write_unlock(&au_di(d2)->di_rwsem); -+ else -+ di_write_unlock(d2); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+struct dentry *au_h_dptr(struct dentry *dentry, aufs_bindex_t bindex) -+{ -+ struct dentry *d; -+ -+ DiMustAnyLock(dentry); -+ -+ if (au_dbstart(dentry) < 0 || bindex < au_dbstart(dentry)) -+ return NULL; -+ AuDebugOn(bindex < 0); -+ d = au_di(dentry)->di_hdentry[0 + bindex].hd_dentry; -+ AuDebugOn(d && au_dcount(d) <= 0); -+ return d; -+} -+ -+/* -+ * extended version of au_h_dptr(). -+ * returns a hashed and positive (or linkable) h_dentry in bindex, NULL, or -+ * error. -+ */ -+struct dentry *au_h_d_alias(struct dentry *dentry, aufs_bindex_t bindex) -+{ -+ struct dentry *h_dentry; -+ struct inode *inode, *h_inode; -+ -+ inode = dentry->d_inode; -+ AuDebugOn(!inode); -+ -+ h_dentry = NULL; -+ if (au_dbstart(dentry) <= bindex -+ && bindex <= au_dbend(dentry)) -+ h_dentry = au_h_dptr(dentry, bindex); -+ if (h_dentry && !au_d_linkable(h_dentry)) { -+ dget(h_dentry); -+ goto out; /* success */ -+ } -+ -+ AuDebugOn(bindex < au_ibstart(inode)); -+ AuDebugOn(au_ibend(inode) < bindex); -+ h_inode = au_h_iptr(inode, bindex); -+ h_dentry = d_find_alias(h_inode); -+ if (h_dentry) { -+ if (!IS_ERR(h_dentry)) { -+ if (!au_d_linkable(h_dentry)) -+ goto out; /* success */ -+ dput(h_dentry); -+ } else -+ goto out; -+ } -+ -+ if (au_opt_test(au_mntflags(dentry->d_sb), PLINK)) { -+ h_dentry = au_plink_lkup(inode, bindex); -+ AuDebugOn(!h_dentry); -+ if (!IS_ERR(h_dentry)) { -+ if (!au_d_hashed_positive(h_dentry)) -+ goto out; /* success */ -+ dput(h_dentry); -+ h_dentry = NULL; -+ } -+ } -+ -+out: -+ AuDbgDentry(h_dentry); -+ return h_dentry; -+} -+ -+aufs_bindex_t au_dbtail(struct dentry *dentry) -+{ -+ aufs_bindex_t bend, bwh; -+ -+ bend = au_dbend(dentry); -+ if (0 <= bend) { -+ bwh = au_dbwh(dentry); -+ if (!bwh) -+ return bwh; -+ if (0 < bwh && bwh < bend) -+ return bwh - 1; -+ } -+ return bend; -+} -+ -+aufs_bindex_t au_dbtaildir(struct dentry *dentry) -+{ -+ aufs_bindex_t bend, bopq; -+ -+ bend = au_dbtail(dentry); -+ if (0 <= bend) { -+ bopq = au_dbdiropq(dentry); -+ if (0 <= bopq && bopq < bend) -+ bend = bopq; -+ } -+ return bend; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+void au_set_h_dptr(struct dentry *dentry, aufs_bindex_t bindex, -+ struct dentry *h_dentry) -+{ -+ struct au_hdentry *hd = au_di(dentry)->di_hdentry + bindex; -+ struct au_branch *br; -+ -+ DiMustWriteLock(dentry); -+ -+ au_hdput(hd); -+ hd->hd_dentry = h_dentry; -+ if (h_dentry) { -+ br = au_sbr(dentry->d_sb, bindex); -+ hd->hd_id = br->br_id; -+ } -+} -+ -+int au_dbrange_test(struct dentry *dentry) -+{ -+ int err; -+ aufs_bindex_t bstart, bend; -+ -+ err = 0; -+ bstart = au_dbstart(dentry); -+ bend = au_dbend(dentry); -+ if (bstart >= 0) -+ AuDebugOn(bend < 0 && bstart > bend); -+ else { -+ err = -EIO; -+ AuDebugOn(bend >= 0); -+ } -+ -+ return err; -+} -+ -+int au_digen_test(struct dentry *dentry, unsigned int sigen) -+{ -+ int err; -+ -+ err = 0; -+ if (unlikely(au_digen(dentry) != sigen -+ || au_iigen_test(dentry->d_inode, sigen))) -+ err = -EIO; -+ -+ return err; -+} -+ -+void au_update_digen(struct dentry *dentry) -+{ -+ atomic_set(&au_di(dentry)->di_generation, au_sigen(dentry->d_sb)); -+ /* smp_mb(); */ /* atomic_set */ -+} -+ -+void au_update_dbrange(struct dentry *dentry, int do_put_zero) -+{ -+ struct au_dinfo *dinfo; -+ struct dentry *h_d; -+ struct au_hdentry *hdp; -+ -+ DiMustWriteLock(dentry); -+ -+ dinfo = au_di(dentry); -+ if (!dinfo || dinfo->di_bstart < 0) -+ return; -+ -+ hdp = dinfo->di_hdentry; -+ if (do_put_zero) { -+ aufs_bindex_t bindex, bend; -+ -+ bend = dinfo->di_bend; -+ for (bindex = dinfo->di_bstart; bindex <= bend; bindex++) { -+ h_d = hdp[0 + bindex].hd_dentry; -+ if (h_d && !h_d->d_inode) -+ au_set_h_dptr(dentry, bindex, NULL); -+ } -+ } -+ -+ dinfo->di_bstart = -1; -+ while (++dinfo->di_bstart <= dinfo->di_bend) -+ if (hdp[0 + dinfo->di_bstart].hd_dentry) -+ break; -+ if (dinfo->di_bstart > dinfo->di_bend) { -+ dinfo->di_bstart = -1; -+ dinfo->di_bend = -1; -+ return; -+ } -+ -+ dinfo->di_bend++; -+ while (0 <= --dinfo->di_bend) -+ if (hdp[0 + dinfo->di_bend].hd_dentry) -+ break; -+ AuDebugOn(dinfo->di_bstart > dinfo->di_bend || dinfo->di_bend < 0); -+} -+ -+void au_update_dbstart(struct dentry *dentry) -+{ -+ aufs_bindex_t bindex, bend; -+ struct dentry *h_dentry; -+ -+ bend = au_dbend(dentry); -+ for (bindex = au_dbstart(dentry); bindex <= bend; bindex++) { -+ h_dentry = au_h_dptr(dentry, bindex); -+ if (!h_dentry) -+ continue; -+ if (h_dentry->d_inode) { -+ au_set_dbstart(dentry, bindex); -+ return; -+ } -+ au_set_h_dptr(dentry, bindex, NULL); -+ } -+} -+ -+void au_update_dbend(struct dentry *dentry) -+{ -+ aufs_bindex_t bindex, bstart; -+ struct dentry *h_dentry; -+ -+ bstart = au_dbstart(dentry); -+ for (bindex = au_dbend(dentry); bindex >= bstart; bindex--) { -+ h_dentry = au_h_dptr(dentry, bindex); -+ if (!h_dentry) -+ continue; -+ if (h_dentry->d_inode) { -+ au_set_dbend(dentry, bindex); -+ return; -+ } -+ au_set_h_dptr(dentry, bindex, NULL); -+ } -+} -+ -+int au_find_dbindex(struct dentry *dentry, struct dentry *h_dentry) -+{ -+ aufs_bindex_t bindex, bend; -+ -+ bend = au_dbend(dentry); -+ for (bindex = au_dbstart(dentry); bindex <= bend; bindex++) -+ if (au_h_dptr(dentry, bindex) == h_dentry) -+ return bindex; -+ return -1; -+} -diff --git a/fs/aufs/dir.c b/fs/aufs/dir.c -new file mode 100644 -index 0000000..3d61b05 ---- /dev/null -+++ b/fs/aufs/dir.c -@@ -0,0 +1,756 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * directory operations -+ */ -+ -+#include -+#include "aufs.h" -+ -+void au_add_nlink(struct inode *dir, struct inode *h_dir) -+{ -+ unsigned int nlink; -+ -+ AuDebugOn(!S_ISDIR(dir->i_mode) || !S_ISDIR(h_dir->i_mode)); -+ -+ nlink = dir->i_nlink; -+ nlink += h_dir->i_nlink - 2; -+ if (h_dir->i_nlink < 2) -+ nlink += 2; -+ smp_mb(); /* for i_nlink */ -+ /* 0 can happen in revaliding */ -+ set_nlink(dir, nlink); -+} -+ -+void au_sub_nlink(struct inode *dir, struct inode *h_dir) -+{ -+ unsigned int nlink; -+ -+ AuDebugOn(!S_ISDIR(dir->i_mode) || !S_ISDIR(h_dir->i_mode)); -+ -+ nlink = dir->i_nlink; -+ nlink -= h_dir->i_nlink - 2; -+ if (h_dir->i_nlink < 2) -+ nlink -= 2; -+ smp_mb(); /* for i_nlink */ -+ /* nlink == 0 means the branch-fs is broken */ -+ set_nlink(dir, nlink); -+} -+ -+loff_t au_dir_size(struct file *file, struct dentry *dentry) -+{ -+ loff_t sz; -+ aufs_bindex_t bindex, bend; -+ struct file *h_file; -+ struct dentry *h_dentry; -+ -+ sz = 0; -+ if (file) { -+ AuDebugOn(!d_is_dir(file->f_path.dentry)); -+ -+ bend = au_fbend_dir(file); -+ for (bindex = au_fbstart(file); -+ bindex <= bend && sz < KMALLOC_MAX_SIZE; -+ bindex++) { -+ h_file = au_hf_dir(file, bindex); -+ if (h_file && file_inode(h_file)) -+ sz += vfsub_f_size_read(h_file); -+ } -+ } else { -+ AuDebugOn(!dentry); -+ AuDebugOn(!d_is_dir(dentry)); -+ -+ bend = au_dbtaildir(dentry); -+ for (bindex = au_dbstart(dentry); -+ bindex <= bend && sz < KMALLOC_MAX_SIZE; -+ bindex++) { -+ h_dentry = au_h_dptr(dentry, bindex); -+ if (h_dentry && h_dentry->d_inode) -+ sz += i_size_read(h_dentry->d_inode); -+ } -+ } -+ if (sz < KMALLOC_MAX_SIZE) -+ sz = roundup_pow_of_two(sz); -+ if (sz > KMALLOC_MAX_SIZE) -+ sz = KMALLOC_MAX_SIZE; -+ else if (sz < NAME_MAX) { -+ BUILD_BUG_ON(AUFS_RDBLK_DEF < NAME_MAX); -+ sz = AUFS_RDBLK_DEF; -+ } -+ return sz; -+} -+ -+struct au_dir_ts_arg { -+ struct dentry *dentry; -+ aufs_bindex_t brid; -+}; -+ -+static void au_do_dir_ts(void *arg) -+{ -+ struct au_dir_ts_arg *a = arg; -+ struct au_dtime dt; -+ struct path h_path; -+ struct inode *dir, *h_dir; -+ struct super_block *sb; -+ struct au_branch *br; -+ struct au_hinode *hdir; -+ int err; -+ aufs_bindex_t bstart, bindex; -+ -+ sb = a->dentry->d_sb; -+ dir = a->dentry->d_inode; -+ if (!dir) -+ goto out; -+ /* no dir->i_mutex lock */ -+ aufs_read_lock(a->dentry, AuLock_DW); /* noflush */ -+ -+ bstart = au_ibstart(dir); -+ bindex = au_br_index(sb, a->brid); -+ if (bindex < bstart) -+ goto out_unlock; -+ -+ br = au_sbr(sb, bindex); -+ h_path.dentry = au_h_dptr(a->dentry, bindex); -+ if (!h_path.dentry) -+ goto out_unlock; -+ h_path.mnt = au_br_mnt(br); -+ au_dtime_store(&dt, a->dentry, &h_path); -+ -+ br = au_sbr(sb, bstart); -+ if (!au_br_writable(br->br_perm)) -+ goto out_unlock; -+ h_path.dentry = au_h_dptr(a->dentry, bstart); -+ h_path.mnt = au_br_mnt(br); -+ err = vfsub_mnt_want_write(h_path.mnt); -+ if (err) -+ goto out_unlock; -+ hdir = au_hi(dir, bstart); -+ au_hn_imtx_lock_nested(hdir, AuLsc_I_PARENT); -+ h_dir = au_h_iptr(dir, bstart); -+ if (h_dir->i_nlink -+ && timespec_compare(&h_dir->i_mtime, &dt.dt_mtime) < 0) { -+ dt.dt_h_path = h_path; -+ au_dtime_revert(&dt); -+ } -+ au_hn_imtx_unlock(hdir); -+ vfsub_mnt_drop_write(h_path.mnt); -+ au_cpup_attr_timesizes(dir); -+ -+out_unlock: -+ aufs_read_unlock(a->dentry, AuLock_DW); -+out: -+ dput(a->dentry); -+ au_nwt_done(&au_sbi(sb)->si_nowait); -+ kfree(arg); -+} -+ -+void au_dir_ts(struct inode *dir, aufs_bindex_t bindex) -+{ -+ int perm, wkq_err; -+ aufs_bindex_t bstart; -+ struct au_dir_ts_arg *arg; -+ struct dentry *dentry; -+ struct super_block *sb; -+ -+ IMustLock(dir); -+ -+ dentry = d_find_any_alias(dir); -+ AuDebugOn(!dentry); -+ sb = dentry->d_sb; -+ bstart = au_ibstart(dir); -+ if (bstart == bindex) { -+ au_cpup_attr_timesizes(dir); -+ goto out; -+ } -+ -+ perm = au_sbr_perm(sb, bstart); -+ if (!au_br_writable(perm)) -+ goto out; -+ -+ arg = kmalloc(sizeof(*arg), GFP_NOFS); -+ if (!arg) -+ goto out; -+ -+ arg->dentry = dget(dentry); /* will be dput-ted by au_do_dir_ts() */ -+ arg->brid = au_sbr_id(sb, bindex); -+ wkq_err = au_wkq_nowait(au_do_dir_ts, arg, sb, /*flags*/0); -+ if (unlikely(wkq_err)) { -+ pr_err("wkq %d\n", wkq_err); -+ dput(dentry); -+ kfree(arg); -+ } -+ -+out: -+ dput(dentry); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int reopen_dir(struct file *file) -+{ -+ int err; -+ unsigned int flags; -+ aufs_bindex_t bindex, btail, bstart; -+ struct dentry *dentry, *h_dentry; -+ struct file *h_file; -+ -+ /* open all lower dirs */ -+ dentry = file->f_dentry; -+ bstart = au_dbstart(dentry); -+ for (bindex = au_fbstart(file); bindex < bstart; bindex++) -+ au_set_h_fptr(file, bindex, NULL); -+ au_set_fbstart(file, bstart); -+ -+ btail = au_dbtaildir(dentry); -+ for (bindex = au_fbend_dir(file); btail < bindex; bindex--) -+ au_set_h_fptr(file, bindex, NULL); -+ au_set_fbend_dir(file, btail); -+ -+ flags = vfsub_file_flags(file); -+ for (bindex = bstart; bindex <= btail; bindex++) { -+ h_dentry = au_h_dptr(dentry, bindex); -+ if (!h_dentry) -+ continue; -+ h_file = au_hf_dir(file, bindex); -+ if (h_file) -+ continue; -+ -+ h_file = au_h_open(dentry, bindex, flags, file, /*force_wr*/0); -+ err = PTR_ERR(h_file); -+ if (IS_ERR(h_file)) -+ goto out; /* close all? */ -+ au_set_h_fptr(file, bindex, h_file); -+ } -+ au_update_figen(file); -+ /* todo: necessary? */ -+ /* file->f_ra = h_file->f_ra; */ -+ err = 0; -+ -+out: -+ return err; -+} -+ -+static int do_open_dir(struct file *file, int flags, struct file *h_file) -+{ -+ int err; -+ aufs_bindex_t bindex, btail; -+ struct dentry *dentry, *h_dentry; -+ struct vfsmount *mnt; -+ -+ FiMustWriteLock(file); -+ AuDebugOn(h_file); -+ -+ err = 0; -+ mnt = file->f_path.mnt; -+ dentry = file->f_dentry; -+ file->f_version = dentry->d_inode->i_version; -+ bindex = au_dbstart(dentry); -+ au_set_fbstart(file, bindex); -+ btail = au_dbtaildir(dentry); -+ au_set_fbend_dir(file, btail); -+ for (; !err && bindex <= btail; bindex++) { -+ h_dentry = au_h_dptr(dentry, bindex); -+ if (!h_dentry) -+ continue; -+ -+ err = vfsub_test_mntns(mnt, h_dentry->d_sb); -+ if (unlikely(err)) -+ break; -+ h_file = au_h_open(dentry, bindex, flags, file, /*force_wr*/0); -+ if (IS_ERR(h_file)) { -+ err = PTR_ERR(h_file); -+ break; -+ } -+ au_set_h_fptr(file, bindex, h_file); -+ } -+ au_update_figen(file); -+ /* todo: necessary? */ -+ /* file->f_ra = h_file->f_ra; */ -+ if (!err) -+ return 0; /* success */ -+ -+ /* close all */ -+ for (bindex = au_fbstart(file); bindex <= btail; bindex++) -+ au_set_h_fptr(file, bindex, NULL); -+ au_set_fbstart(file, -1); -+ au_set_fbend_dir(file, -1); -+ -+ return err; -+} -+ -+static int aufs_open_dir(struct inode *inode __maybe_unused, -+ struct file *file) -+{ -+ int err; -+ struct super_block *sb; -+ struct au_fidir *fidir; -+ -+ err = -ENOMEM; -+ sb = file->f_dentry->d_sb; -+ si_read_lock(sb, AuLock_FLUSH); -+ fidir = au_fidir_alloc(sb); -+ if (fidir) { -+ struct au_do_open_args args = { -+ .open = do_open_dir, -+ .fidir = fidir -+ }; -+ err = au_do_open(file, &args); -+ if (unlikely(err)) -+ kfree(fidir); -+ } -+ si_read_unlock(sb); -+ return err; -+} -+ -+static int aufs_release_dir(struct inode *inode __maybe_unused, -+ struct file *file) -+{ -+ struct au_vdir *vdir_cache; -+ struct au_finfo *finfo; -+ struct au_fidir *fidir; -+ aufs_bindex_t bindex, bend; -+ -+ finfo = au_fi(file); -+ fidir = finfo->fi_hdir; -+ if (fidir) { -+ au_sphl_del(&finfo->fi_hlist, -+ &au_sbi(file->f_dentry->d_sb)->si_files); -+ vdir_cache = fidir->fd_vdir_cache; /* lock-free */ -+ if (vdir_cache) -+ au_vdir_free(vdir_cache); -+ -+ bindex = finfo->fi_btop; -+ if (bindex >= 0) { -+ /* -+ * calls fput() instead of filp_close(), -+ * since no dnotify or lock for the lower file. -+ */ -+ bend = fidir->fd_bbot; -+ for (; bindex <= bend; bindex++) -+ au_set_h_fptr(file, bindex, NULL); -+ } -+ kfree(fidir); -+ finfo->fi_hdir = NULL; -+ } -+ au_finfo_fin(file); -+ return 0; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int au_do_flush_dir(struct file *file, fl_owner_t id) -+{ -+ int err; -+ aufs_bindex_t bindex, bend; -+ struct file *h_file; -+ -+ err = 0; -+ bend = au_fbend_dir(file); -+ for (bindex = au_fbstart(file); !err && bindex <= bend; bindex++) { -+ h_file = au_hf_dir(file, bindex); -+ if (h_file) -+ err = vfsub_flush(h_file, id); -+ } -+ return err; -+} -+ -+static int aufs_flush_dir(struct file *file, fl_owner_t id) -+{ -+ return au_do_flush(file, id, au_do_flush_dir); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int au_do_fsync_dir_no_file(struct dentry *dentry, int datasync) -+{ -+ int err; -+ aufs_bindex_t bend, bindex; -+ struct inode *inode; -+ struct super_block *sb; -+ -+ err = 0; -+ sb = dentry->d_sb; -+ inode = dentry->d_inode; -+ IMustLock(inode); -+ bend = au_dbend(dentry); -+ for (bindex = au_dbstart(dentry); !err && bindex <= bend; bindex++) { -+ struct path h_path; -+ -+ if (au_test_ro(sb, bindex, inode)) -+ continue; -+ h_path.dentry = au_h_dptr(dentry, bindex); -+ if (!h_path.dentry) -+ continue; -+ -+ h_path.mnt = au_sbr_mnt(sb, bindex); -+ err = vfsub_fsync(NULL, &h_path, datasync); -+ } -+ -+ return err; -+} -+ -+static int au_do_fsync_dir(struct file *file, int datasync) -+{ -+ int err; -+ aufs_bindex_t bend, bindex; -+ struct file *h_file; -+ struct super_block *sb; -+ struct inode *inode; -+ -+ err = au_reval_and_lock_fdi(file, reopen_dir, /*wlock*/1); -+ if (unlikely(err)) -+ goto out; -+ -+ inode = file_inode(file); -+ sb = inode->i_sb; -+ bend = au_fbend_dir(file); -+ for (bindex = au_fbstart(file); !err && bindex <= bend; bindex++) { -+ h_file = au_hf_dir(file, bindex); -+ if (!h_file || au_test_ro(sb, bindex, inode)) -+ continue; -+ -+ err = vfsub_fsync(h_file, &h_file->f_path, datasync); -+ } -+ -+out: -+ return err; -+} -+ -+/* -+ * @file may be NULL -+ */ -+static int aufs_fsync_dir(struct file *file, loff_t start, loff_t end, -+ int datasync) -+{ -+ int err; -+ struct dentry *dentry; -+ struct super_block *sb; -+ struct mutex *mtx; -+ -+ err = 0; -+ dentry = file->f_dentry; -+ mtx = &dentry->d_inode->i_mutex; -+ mutex_lock(mtx); -+ sb = dentry->d_sb; -+ si_noflush_read_lock(sb); -+ if (file) -+ err = au_do_fsync_dir(file, datasync); -+ else { -+ di_write_lock_child(dentry); -+ err = au_do_fsync_dir_no_file(dentry, datasync); -+ } -+ au_cpup_attr_timesizes(dentry->d_inode); -+ di_write_unlock(dentry); -+ if (file) -+ fi_write_unlock(file); -+ -+ si_read_unlock(sb); -+ mutex_unlock(mtx); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int aufs_iterate(struct file *file, struct dir_context *ctx) -+{ -+ int err; -+ struct dentry *dentry; -+ struct inode *inode, *h_inode; -+ struct super_block *sb; -+ -+ AuDbg("%pD, ctx{%pf, %llu}\n", file, ctx->actor, ctx->pos); -+ -+ dentry = file->f_dentry; -+ inode = dentry->d_inode; -+ IMustLock(inode); -+ -+ sb = dentry->d_sb; -+ si_read_lock(sb, AuLock_FLUSH); -+ err = au_reval_and_lock_fdi(file, reopen_dir, /*wlock*/1); -+ if (unlikely(err)) -+ goto out; -+ err = au_alive_dir(dentry); -+ if (!err) -+ err = au_vdir_init(file); -+ di_downgrade_lock(dentry, AuLock_IR); -+ if (unlikely(err)) -+ goto out_unlock; -+ -+ h_inode = au_h_iptr(inode, au_ibstart(inode)); -+ if (!au_test_nfsd()) { -+ err = au_vdir_fill_de(file, ctx); -+ fsstack_copy_attr_atime(inode, h_inode); -+ } else { -+ /* -+ * nfsd filldir may call lookup_one_len(), vfs_getattr(), -+ * encode_fh() and others. -+ */ -+ atomic_inc(&h_inode->i_count); -+ di_read_unlock(dentry, AuLock_IR); -+ si_read_unlock(sb); -+ err = au_vdir_fill_de(file, ctx); -+ fsstack_copy_attr_atime(inode, h_inode); -+ fi_write_unlock(file); -+ iput(h_inode); -+ -+ AuTraceErr(err); -+ return err; -+ } -+ -+out_unlock: -+ di_read_unlock(dentry, AuLock_IR); -+ fi_write_unlock(file); -+out: -+ si_read_unlock(sb); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+#define AuTestEmpty_WHONLY 1 -+#define AuTestEmpty_CALLED (1 << 1) -+#define AuTestEmpty_SHWH (1 << 2) -+#define au_ftest_testempty(flags, name) ((flags) & AuTestEmpty_##name) -+#define au_fset_testempty(flags, name) \ -+ do { (flags) |= AuTestEmpty_##name; } while (0) -+#define au_fclr_testempty(flags, name) \ -+ do { (flags) &= ~AuTestEmpty_##name; } while (0) -+ -+#ifndef CONFIG_AUFS_SHWH -+#undef AuTestEmpty_SHWH -+#define AuTestEmpty_SHWH 0 -+#endif -+ -+struct test_empty_arg { -+ struct dir_context ctx; -+ struct au_nhash *whlist; -+ unsigned int flags; -+ int err; -+ aufs_bindex_t bindex; -+}; -+ -+static int test_empty_cb(struct dir_context *ctx, const char *__name, -+ int namelen, loff_t offset __maybe_unused, u64 ino, -+ unsigned int d_type) -+{ -+ struct test_empty_arg *arg = container_of(ctx, struct test_empty_arg, -+ ctx); -+ char *name = (void *)__name; -+ -+ arg->err = 0; -+ au_fset_testempty(arg->flags, CALLED); -+ /* smp_mb(); */ -+ if (name[0] == '.' -+ && (namelen == 1 || (name[1] == '.' && namelen == 2))) -+ goto out; /* success */ -+ -+ if (namelen <= AUFS_WH_PFX_LEN -+ || memcmp(name, AUFS_WH_PFX, AUFS_WH_PFX_LEN)) { -+ if (au_ftest_testempty(arg->flags, WHONLY) -+ && !au_nhash_test_known_wh(arg->whlist, name, namelen)) -+ arg->err = -ENOTEMPTY; -+ goto out; -+ } -+ -+ name += AUFS_WH_PFX_LEN; -+ namelen -= AUFS_WH_PFX_LEN; -+ if (!au_nhash_test_known_wh(arg->whlist, name, namelen)) -+ arg->err = au_nhash_append_wh -+ (arg->whlist, name, namelen, ino, d_type, arg->bindex, -+ au_ftest_testempty(arg->flags, SHWH)); -+ -+out: -+ /* smp_mb(); */ -+ AuTraceErr(arg->err); -+ return arg->err; -+} -+ -+static int do_test_empty(struct dentry *dentry, struct test_empty_arg *arg) -+{ -+ int err; -+ struct file *h_file; -+ -+ h_file = au_h_open(dentry, arg->bindex, -+ O_RDONLY | O_NONBLOCK | O_DIRECTORY | O_LARGEFILE, -+ /*file*/NULL, /*force_wr*/0); -+ err = PTR_ERR(h_file); -+ if (IS_ERR(h_file)) -+ goto out; -+ -+ err = 0; -+ if (!au_opt_test(au_mntflags(dentry->d_sb), UDBA_NONE) -+ && !file_inode(h_file)->i_nlink) -+ goto out_put; -+ -+ do { -+ arg->err = 0; -+ au_fclr_testempty(arg->flags, CALLED); -+ /* smp_mb(); */ -+ err = vfsub_iterate_dir(h_file, &arg->ctx); -+ if (err >= 0) -+ err = arg->err; -+ } while (!err && au_ftest_testempty(arg->flags, CALLED)); -+ -+out_put: -+ fput(h_file); -+ au_sbr_put(dentry->d_sb, arg->bindex); -+out: -+ return err; -+} -+ -+struct do_test_empty_args { -+ int *errp; -+ struct dentry *dentry; -+ struct test_empty_arg *arg; -+}; -+ -+static void call_do_test_empty(void *args) -+{ -+ struct do_test_empty_args *a = args; -+ *a->errp = do_test_empty(a->dentry, a->arg); -+} -+ -+static int sio_test_empty(struct dentry *dentry, struct test_empty_arg *arg) -+{ -+ int err, wkq_err; -+ struct dentry *h_dentry; -+ struct inode *h_inode; -+ -+ h_dentry = au_h_dptr(dentry, arg->bindex); -+ h_inode = h_dentry->d_inode; -+ /* todo: i_mode changes anytime? */ -+ mutex_lock_nested(&h_inode->i_mutex, AuLsc_I_CHILD); -+ err = au_test_h_perm_sio(h_inode, MAY_EXEC | MAY_READ); -+ mutex_unlock(&h_inode->i_mutex); -+ if (!err) -+ err = do_test_empty(dentry, arg); -+ else { -+ struct do_test_empty_args args = { -+ .errp = &err, -+ .dentry = dentry, -+ .arg = arg -+ }; -+ unsigned int flags = arg->flags; -+ -+ wkq_err = au_wkq_wait(call_do_test_empty, &args); -+ if (unlikely(wkq_err)) -+ err = wkq_err; -+ arg->flags = flags; -+ } -+ -+ return err; -+} -+ -+int au_test_empty_lower(struct dentry *dentry) -+{ -+ int err; -+ unsigned int rdhash; -+ aufs_bindex_t bindex, bstart, btail; -+ struct au_nhash whlist; -+ struct test_empty_arg arg = { -+ .ctx = { -+ .actor = au_diractor(test_empty_cb) -+ } -+ }; -+ int (*test_empty)(struct dentry *dentry, struct test_empty_arg *arg); -+ -+ SiMustAnyLock(dentry->d_sb); -+ -+ rdhash = au_sbi(dentry->d_sb)->si_rdhash; -+ if (!rdhash) -+ rdhash = au_rdhash_est(au_dir_size(/*file*/NULL, dentry)); -+ err = au_nhash_alloc(&whlist, rdhash, GFP_NOFS); -+ if (unlikely(err)) -+ goto out; -+ -+ arg.flags = 0; -+ arg.whlist = &whlist; -+ bstart = au_dbstart(dentry); -+ if (au_opt_test(au_mntflags(dentry->d_sb), SHWH)) -+ au_fset_testempty(arg.flags, SHWH); -+ test_empty = do_test_empty; -+ if (au_opt_test(au_mntflags(dentry->d_sb), DIRPERM1)) -+ test_empty = sio_test_empty; -+ arg.bindex = bstart; -+ err = test_empty(dentry, &arg); -+ if (unlikely(err)) -+ goto out_whlist; -+ -+ au_fset_testempty(arg.flags, WHONLY); -+ btail = au_dbtaildir(dentry); -+ for (bindex = bstart + 1; !err && bindex <= btail; bindex++) { -+ struct dentry *h_dentry; -+ -+ h_dentry = au_h_dptr(dentry, bindex); -+ if (h_dentry && h_dentry->d_inode) { -+ arg.bindex = bindex; -+ err = test_empty(dentry, &arg); -+ } -+ } -+ -+out_whlist: -+ au_nhash_wh_free(&whlist); -+out: -+ return err; -+} -+ -+int au_test_empty(struct dentry *dentry, struct au_nhash *whlist) -+{ -+ int err; -+ struct test_empty_arg arg = { -+ .ctx = { -+ .actor = au_diractor(test_empty_cb) -+ } -+ }; -+ aufs_bindex_t bindex, btail; -+ -+ err = 0; -+ arg.whlist = whlist; -+ arg.flags = AuTestEmpty_WHONLY; -+ if (au_opt_test(au_mntflags(dentry->d_sb), SHWH)) -+ au_fset_testempty(arg.flags, SHWH); -+ btail = au_dbtaildir(dentry); -+ for (bindex = au_dbstart(dentry); !err && bindex <= btail; bindex++) { -+ struct dentry *h_dentry; -+ -+ h_dentry = au_h_dptr(dentry, bindex); -+ if (h_dentry && h_dentry->d_inode) { -+ arg.bindex = bindex; -+ err = sio_test_empty(dentry, &arg); -+ } -+ } -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+const struct file_operations aufs_dir_fop = { -+ .owner = THIS_MODULE, -+ .llseek = default_llseek, -+ .read = generic_read_dir, -+ .iterate = aufs_iterate, -+ .unlocked_ioctl = aufs_ioctl_dir, -+#ifdef CONFIG_COMPAT -+ .compat_ioctl = aufs_compat_ioctl_dir, -+#endif -+ .open = aufs_open_dir, -+ .release = aufs_release_dir, -+ .flush = aufs_flush_dir, -+ .fsync = aufs_fsync_dir -+}; -diff --git a/fs/aufs/dir.h b/fs/aufs/dir.h -new file mode 100644 -index 0000000..16821f9 ---- /dev/null -+++ b/fs/aufs/dir.h -@@ -0,0 +1,131 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * directory operations -+ */ -+ -+#ifndef __AUFS_DIR_H__ -+#define __AUFS_DIR_H__ -+ -+#ifdef __KERNEL__ -+ -+#include -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* need to be faster and smaller */ -+ -+struct au_nhash { -+ unsigned int nh_num; -+ struct hlist_head *nh_head; -+}; -+ -+struct au_vdir_destr { -+ unsigned char len; -+ unsigned char name[0]; -+} __packed; -+ -+struct au_vdir_dehstr { -+ struct hlist_node hash; -+ struct au_vdir_destr *str; -+} ____cacheline_aligned_in_smp; -+ -+struct au_vdir_de { -+ ino_t de_ino; -+ unsigned char de_type; -+ /* caution: packed */ -+ struct au_vdir_destr de_str; -+} __packed; -+ -+struct au_vdir_wh { -+ struct hlist_node wh_hash; -+#ifdef CONFIG_AUFS_SHWH -+ ino_t wh_ino; -+ aufs_bindex_t wh_bindex; -+ unsigned char wh_type; -+#else -+ aufs_bindex_t wh_bindex; -+#endif -+ /* caution: packed */ -+ struct au_vdir_destr wh_str; -+} __packed; -+ -+union au_vdir_deblk_p { -+ unsigned char *deblk; -+ struct au_vdir_de *de; -+}; -+ -+struct au_vdir { -+ unsigned char **vd_deblk; -+ unsigned long vd_nblk; -+ struct { -+ unsigned long ul; -+ union au_vdir_deblk_p p; -+ } vd_last; -+ -+ unsigned long vd_version; -+ unsigned int vd_deblk_sz; -+ unsigned long vd_jiffy; -+} ____cacheline_aligned_in_smp; -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* dir.c */ -+extern const struct file_operations aufs_dir_fop; -+void au_add_nlink(struct inode *dir, struct inode *h_dir); -+void au_sub_nlink(struct inode *dir, struct inode *h_dir); -+loff_t au_dir_size(struct file *file, struct dentry *dentry); -+void au_dir_ts(struct inode *dir, aufs_bindex_t bsrc); -+int au_test_empty_lower(struct dentry *dentry); -+int au_test_empty(struct dentry *dentry, struct au_nhash *whlist); -+ -+/* vdir.c */ -+unsigned int au_rdhash_est(loff_t sz); -+int au_nhash_alloc(struct au_nhash *nhash, unsigned int num_hash, gfp_t gfp); -+void au_nhash_wh_free(struct au_nhash *whlist); -+int au_nhash_test_longer_wh(struct au_nhash *whlist, aufs_bindex_t btgt, -+ int limit); -+int au_nhash_test_known_wh(struct au_nhash *whlist, char *name, int nlen); -+int au_nhash_append_wh(struct au_nhash *whlist, char *name, int nlen, ino_t ino, -+ unsigned int d_type, aufs_bindex_t bindex, -+ unsigned char shwh); -+void au_vdir_free(struct au_vdir *vdir); -+int au_vdir_init(struct file *file); -+int au_vdir_fill_de(struct file *file, struct dir_context *ctx); -+ -+/* ioctl.c */ -+long aufs_ioctl_dir(struct file *file, unsigned int cmd, unsigned long arg); -+ -+#ifdef CONFIG_AUFS_RDU -+/* rdu.c */ -+long au_rdu_ioctl(struct file *file, unsigned int cmd, unsigned long arg); -+#ifdef CONFIG_COMPAT -+long au_rdu_compat_ioctl(struct file *file, unsigned int cmd, -+ unsigned long arg); -+#endif -+#else -+AuStub(long, au_rdu_ioctl, return -EINVAL, struct file *file, -+ unsigned int cmd, unsigned long arg) -+#ifdef CONFIG_COMPAT -+AuStub(long, au_rdu_compat_ioctl, return -EINVAL, struct file *file, -+ unsigned int cmd, unsigned long arg) -+#endif -+#endif -+ -+#endif /* __KERNEL__ */ -+#endif /* __AUFS_DIR_H__ */ -diff --git a/fs/aufs/dynop.c b/fs/aufs/dynop.c -new file mode 100644 -index 0000000..d758805 ---- /dev/null -+++ b/fs/aufs/dynop.c -@@ -0,0 +1,379 @@ -+/* -+ * Copyright (C) 2010-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * dynamically customizable operations for regular files -+ */ -+ -+#include "aufs.h" -+ -+#define DyPrSym(key) AuDbgSym(key->dk_op.dy_hop) -+ -+/* -+ * How large will these lists be? -+ * Usually just a few elements, 20-30 at most for each, I guess. -+ */ -+static struct au_splhead dynop[AuDyLast]; -+ -+static struct au_dykey *dy_gfind_get(struct au_splhead *spl, const void *h_op) -+{ -+ struct au_dykey *key, *tmp; -+ struct list_head *head; -+ -+ key = NULL; -+ head = &spl->head; -+ rcu_read_lock(); -+ list_for_each_entry_rcu(tmp, head, dk_list) -+ if (tmp->dk_op.dy_hop == h_op) { -+ key = tmp; -+ kref_get(&key->dk_kref); -+ break; -+ } -+ rcu_read_unlock(); -+ -+ return key; -+} -+ -+static struct au_dykey *dy_bradd(struct au_branch *br, struct au_dykey *key) -+{ -+ struct au_dykey **k, *found; -+ const void *h_op = key->dk_op.dy_hop; -+ int i; -+ -+ found = NULL; -+ k = br->br_dykey; -+ for (i = 0; i < AuBrDynOp; i++) -+ if (k[i]) { -+ if (k[i]->dk_op.dy_hop == h_op) { -+ found = k[i]; -+ break; -+ } -+ } else -+ break; -+ if (!found) { -+ spin_lock(&br->br_dykey_lock); -+ for (; i < AuBrDynOp; i++) -+ if (k[i]) { -+ if (k[i]->dk_op.dy_hop == h_op) { -+ found = k[i]; -+ break; -+ } -+ } else { -+ k[i] = key; -+ break; -+ } -+ spin_unlock(&br->br_dykey_lock); -+ BUG_ON(i == AuBrDynOp); /* expand the array */ -+ } -+ -+ return found; -+} -+ -+/* kref_get() if @key is already added */ -+static struct au_dykey *dy_gadd(struct au_splhead *spl, struct au_dykey *key) -+{ -+ struct au_dykey *tmp, *found; -+ struct list_head *head; -+ const void *h_op = key->dk_op.dy_hop; -+ -+ found = NULL; -+ head = &spl->head; -+ spin_lock(&spl->spin); -+ list_for_each_entry(tmp, head, dk_list) -+ if (tmp->dk_op.dy_hop == h_op) { -+ kref_get(&tmp->dk_kref); -+ found = tmp; -+ break; -+ } -+ if (!found) -+ list_add_rcu(&key->dk_list, head); -+ spin_unlock(&spl->spin); -+ -+ if (!found) -+ DyPrSym(key); -+ return found; -+} -+ -+static void dy_free_rcu(struct rcu_head *rcu) -+{ -+ struct au_dykey *key; -+ -+ key = container_of(rcu, struct au_dykey, dk_rcu); -+ DyPrSym(key); -+ kfree(key); -+} -+ -+static void dy_free(struct kref *kref) -+{ -+ struct au_dykey *key; -+ struct au_splhead *spl; -+ -+ key = container_of(kref, struct au_dykey, dk_kref); -+ spl = dynop + key->dk_op.dy_type; -+ au_spl_del_rcu(&key->dk_list, spl); -+ call_rcu(&key->dk_rcu, dy_free_rcu); -+} -+ -+void au_dy_put(struct au_dykey *key) -+{ -+ kref_put(&key->dk_kref, dy_free); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+#define DyDbgSize(cnt, op) AuDebugOn(cnt != sizeof(op)/sizeof(void *)) -+ -+#ifdef CONFIG_AUFS_DEBUG -+#define DyDbgDeclare(cnt) unsigned int cnt = 0 -+#define DyDbgInc(cnt) do { cnt++; } while (0) -+#else -+#define DyDbgDeclare(cnt) do {} while (0) -+#define DyDbgInc(cnt) do {} while (0) -+#endif -+ -+#define DySet(func, dst, src, h_op, h_sb) do { \ -+ DyDbgInc(cnt); \ -+ if (h_op->func) { \ -+ if (src.func) \ -+ dst.func = src.func; \ -+ else \ -+ AuDbg("%s %s\n", au_sbtype(h_sb), #func); \ -+ } \ -+} while (0) -+ -+#define DySetForce(func, dst, src) do { \ -+ AuDebugOn(!src.func); \ -+ DyDbgInc(cnt); \ -+ dst.func = src.func; \ -+} while (0) -+ -+#define DySetAop(func) \ -+ DySet(func, dyaop->da_op, aufs_aop, h_aop, h_sb) -+#define DySetAopForce(func) \ -+ DySetForce(func, dyaop->da_op, aufs_aop) -+ -+static void dy_aop(struct au_dykey *key, const void *h_op, -+ struct super_block *h_sb __maybe_unused) -+{ -+ struct au_dyaop *dyaop = (void *)key; -+ const struct address_space_operations *h_aop = h_op; -+ DyDbgDeclare(cnt); -+ -+ AuDbg("%s\n", au_sbtype(h_sb)); -+ -+ DySetAop(writepage); -+ DySetAopForce(readpage); /* force */ -+ DySetAop(writepages); -+ DySetAop(set_page_dirty); -+ DySetAop(readpages); -+ DySetAop(write_begin); -+ DySetAop(write_end); -+ DySetAop(bmap); -+ DySetAop(invalidatepage); -+ DySetAop(releasepage); -+ DySetAop(freepage); -+ /* these two will be changed according to an aufs mount option */ -+ DySetAop(direct_IO); -+ DySetAop(get_xip_mem); -+ DySetAop(migratepage); -+ DySetAop(launder_page); -+ DySetAop(is_partially_uptodate); -+ DySetAop(is_dirty_writeback); -+ DySetAop(error_remove_page); -+ DySetAop(swap_activate); -+ DySetAop(swap_deactivate); -+ -+ DyDbgSize(cnt, *h_aop); -+ dyaop->da_get_xip_mem = h_aop->get_xip_mem; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static void dy_bug(struct kref *kref) -+{ -+ BUG(); -+} -+ -+static struct au_dykey *dy_get(struct au_dynop *op, struct au_branch *br) -+{ -+ struct au_dykey *key, *old; -+ struct au_splhead *spl; -+ struct op { -+ unsigned int sz; -+ void (*set)(struct au_dykey *key, const void *h_op, -+ struct super_block *h_sb __maybe_unused); -+ }; -+ static const struct op a[] = { -+ [AuDy_AOP] = { -+ .sz = sizeof(struct au_dyaop), -+ .set = dy_aop -+ } -+ }; -+ const struct op *p; -+ -+ spl = dynop + op->dy_type; -+ key = dy_gfind_get(spl, op->dy_hop); -+ if (key) -+ goto out_add; /* success */ -+ -+ p = a + op->dy_type; -+ key = kzalloc(p->sz, GFP_NOFS); -+ if (unlikely(!key)) { -+ key = ERR_PTR(-ENOMEM); -+ goto out; -+ } -+ -+ key->dk_op.dy_hop = op->dy_hop; -+ kref_init(&key->dk_kref); -+ p->set(key, op->dy_hop, au_br_sb(br)); -+ old = dy_gadd(spl, key); -+ if (old) { -+ kfree(key); -+ key = old; -+ } -+ -+out_add: -+ old = dy_bradd(br, key); -+ if (old) -+ /* its ref-count should never be zero here */ -+ kref_put(&key->dk_kref, dy_bug); -+out: -+ return key; -+} -+ -+/* ---------------------------------------------------------------------- */ -+/* -+ * Aufs prohibits O_DIRECT by defaut even if the branch supports it. -+ * This behaviour is necessary to return an error from open(O_DIRECT) instead -+ * of the succeeding I/O. The dio mount option enables O_DIRECT and makes -+ * open(O_DIRECT) always succeed, but the succeeding I/O may return an error. -+ * See the aufs manual in detail. -+ * -+ * To keep this behaviour, aufs has to set NULL to ->get_xip_mem too, and the -+ * performance of fadvise() and madvise() may be affected. -+ */ -+static void dy_adx(struct au_dyaop *dyaop, int do_dx) -+{ -+ if (!do_dx) { -+ dyaop->da_op.direct_IO = NULL; -+ dyaop->da_op.get_xip_mem = NULL; -+ } else { -+ dyaop->da_op.direct_IO = aufs_aop.direct_IO; -+ dyaop->da_op.get_xip_mem = aufs_aop.get_xip_mem; -+ if (!dyaop->da_get_xip_mem) -+ dyaop->da_op.get_xip_mem = NULL; -+ } -+} -+ -+static struct au_dyaop *dy_aget(struct au_branch *br, -+ const struct address_space_operations *h_aop, -+ int do_dx) -+{ -+ struct au_dyaop *dyaop; -+ struct au_dynop op; -+ -+ op.dy_type = AuDy_AOP; -+ op.dy_haop = h_aop; -+ dyaop = (void *)dy_get(&op, br); -+ if (IS_ERR(dyaop)) -+ goto out; -+ dy_adx(dyaop, do_dx); -+ -+out: -+ return dyaop; -+} -+ -+int au_dy_iaop(struct inode *inode, aufs_bindex_t bindex, -+ struct inode *h_inode) -+{ -+ int err, do_dx; -+ struct super_block *sb; -+ struct au_branch *br; -+ struct au_dyaop *dyaop; -+ -+ AuDebugOn(!S_ISREG(h_inode->i_mode)); -+ IiMustWriteLock(inode); -+ -+ sb = inode->i_sb; -+ br = au_sbr(sb, bindex); -+ do_dx = !!au_opt_test(au_mntflags(sb), DIO); -+ dyaop = dy_aget(br, h_inode->i_mapping->a_ops, do_dx); -+ err = PTR_ERR(dyaop); -+ if (IS_ERR(dyaop)) -+ /* unnecessary to call dy_fput() */ -+ goto out; -+ -+ err = 0; -+ inode->i_mapping->a_ops = &dyaop->da_op; -+ -+out: -+ return err; -+} -+ -+/* -+ * Is it safe to replace a_ops during the inode/file is in operation? -+ * Yes, I hope so. -+ */ -+int au_dy_irefresh(struct inode *inode) -+{ -+ int err; -+ aufs_bindex_t bstart; -+ struct inode *h_inode; -+ -+ err = 0; -+ if (S_ISREG(inode->i_mode)) { -+ bstart = au_ibstart(inode); -+ h_inode = au_h_iptr(inode, bstart); -+ err = au_dy_iaop(inode, bstart, h_inode); -+ } -+ return err; -+} -+ -+void au_dy_arefresh(int do_dx) -+{ -+ struct au_splhead *spl; -+ struct list_head *head; -+ struct au_dykey *key; -+ -+ spl = dynop + AuDy_AOP; -+ head = &spl->head; -+ spin_lock(&spl->spin); -+ list_for_each_entry(key, head, dk_list) -+ dy_adx((void *)key, do_dx); -+ spin_unlock(&spl->spin); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+void __init au_dy_init(void) -+{ -+ int i; -+ -+ /* make sure that 'struct au_dykey *' can be any type */ -+ BUILD_BUG_ON(offsetof(struct au_dyaop, da_key)); -+ -+ for (i = 0; i < AuDyLast; i++) -+ au_spl_init(dynop + i); -+} -+ -+void au_dy_fin(void) -+{ -+ int i; -+ -+ for (i = 0; i < AuDyLast; i++) -+ WARN_ON(!list_empty(&dynop[i].head)); -+} -diff --git a/fs/aufs/dynop.h b/fs/aufs/dynop.h -new file mode 100644 -index 0000000..cdf1499 ---- /dev/null -+++ b/fs/aufs/dynop.h -@@ -0,0 +1,76 @@ -+/* -+ * Copyright (C) 2010-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * dynamically customizable operations (for regular files only) -+ */ -+ -+#ifndef __AUFS_DYNOP_H__ -+#define __AUFS_DYNOP_H__ -+ -+#ifdef __KERNEL__ -+ -+#include -+#include -+ -+enum {AuDy_AOP, AuDyLast}; -+ -+struct au_dynop { -+ int dy_type; -+ union { -+ const void *dy_hop; -+ const struct address_space_operations *dy_haop; -+ }; -+}; -+ -+struct au_dykey { -+ union { -+ struct list_head dk_list; -+ struct rcu_head dk_rcu; -+ }; -+ struct au_dynop dk_op; -+ -+ /* -+ * during I am in the branch local array, kref is gotten. when the -+ * branch is removed, kref is put. -+ */ -+ struct kref dk_kref; -+}; -+ -+/* stop unioning since their sizes are very different from each other */ -+struct au_dyaop { -+ struct au_dykey da_key; -+ struct address_space_operations da_op; /* not const */ -+ int (*da_get_xip_mem)(struct address_space *, pgoff_t, int, -+ void **, unsigned long *); -+}; -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* dynop.c */ -+struct au_branch; -+void au_dy_put(struct au_dykey *key); -+int au_dy_iaop(struct inode *inode, aufs_bindex_t bindex, -+ struct inode *h_inode); -+int au_dy_irefresh(struct inode *inode); -+void au_dy_arefresh(int do_dio); -+ -+void __init au_dy_init(void); -+void au_dy_fin(void); -+ -+#endif /* __KERNEL__ */ -+#endif /* __AUFS_DYNOP_H__ */ -diff --git a/fs/aufs/export.c b/fs/aufs/export.c -new file mode 100644 -index 0000000..c5bfa76 ---- /dev/null -+++ b/fs/aufs/export.c -@@ -0,0 +1,831 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * export via nfs -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include "../fs/mount.h" -+#include "aufs.h" -+ -+union conv { -+#ifdef CONFIG_AUFS_INO_T_64 -+ __u32 a[2]; -+#else -+ __u32 a[1]; -+#endif -+ ino_t ino; -+}; -+ -+static ino_t decode_ino(__u32 *a) -+{ -+ union conv u; -+ -+ BUILD_BUG_ON(sizeof(u.ino) != sizeof(u.a)); -+ u.a[0] = a[0]; -+#ifdef CONFIG_AUFS_INO_T_64 -+ u.a[1] = a[1]; -+#endif -+ return u.ino; -+} -+ -+static void encode_ino(__u32 *a, ino_t ino) -+{ -+ union conv u; -+ -+ u.ino = ino; -+ a[0] = u.a[0]; -+#ifdef CONFIG_AUFS_INO_T_64 -+ a[1] = u.a[1]; -+#endif -+} -+ -+/* NFS file handle */ -+enum { -+ Fh_br_id, -+ Fh_sigen, -+#ifdef CONFIG_AUFS_INO_T_64 -+ /* support 64bit inode number */ -+ Fh_ino1, -+ Fh_ino2, -+ Fh_dir_ino1, -+ Fh_dir_ino2, -+#else -+ Fh_ino1, -+ Fh_dir_ino1, -+#endif -+ Fh_igen, -+ Fh_h_type, -+ Fh_tail, -+ -+ Fh_ino = Fh_ino1, -+ Fh_dir_ino = Fh_dir_ino1 -+}; -+ -+static int au_test_anon(struct dentry *dentry) -+{ -+ /* note: read d_flags without d_lock */ -+ return !!(dentry->d_flags & DCACHE_DISCONNECTED); -+} -+ -+int au_test_nfsd(void) -+{ -+ int ret; -+ struct task_struct *tsk = current; -+ char comm[sizeof(tsk->comm)]; -+ -+ ret = 0; -+ if (tsk->flags & PF_KTHREAD) { -+ get_task_comm(comm, tsk); -+ ret = !strcmp(comm, "nfsd"); -+ } -+ -+ return ret; -+} -+ -+/* ---------------------------------------------------------------------- */ -+/* inode generation external table */ -+ -+void au_xigen_inc(struct inode *inode) -+{ -+ loff_t pos; -+ ssize_t sz; -+ __u32 igen; -+ struct super_block *sb; -+ struct au_sbinfo *sbinfo; -+ -+ sb = inode->i_sb; -+ AuDebugOn(!au_opt_test(au_mntflags(sb), XINO)); -+ -+ sbinfo = au_sbi(sb); -+ pos = inode->i_ino; -+ pos *= sizeof(igen); -+ igen = inode->i_generation + 1; -+ sz = xino_fwrite(sbinfo->si_xwrite, sbinfo->si_xigen, &igen, -+ sizeof(igen), &pos); -+ if (sz == sizeof(igen)) -+ return; /* success */ -+ -+ if (unlikely(sz >= 0)) -+ AuIOErr("xigen error (%zd)\n", sz); -+} -+ -+int au_xigen_new(struct inode *inode) -+{ -+ int err; -+ loff_t pos; -+ ssize_t sz; -+ struct super_block *sb; -+ struct au_sbinfo *sbinfo; -+ struct file *file; -+ -+ err = 0; -+ /* todo: dirty, at mount time */ -+ if (inode->i_ino == AUFS_ROOT_INO) -+ goto out; -+ sb = inode->i_sb; -+ SiMustAnyLock(sb); -+ if (unlikely(!au_opt_test(au_mntflags(sb), XINO))) -+ goto out; -+ -+ err = -EFBIG; -+ pos = inode->i_ino; -+ if (unlikely(au_loff_max / sizeof(inode->i_generation) - 1 < pos)) { -+ AuIOErr1("too large i%lld\n", pos); -+ goto out; -+ } -+ pos *= sizeof(inode->i_generation); -+ -+ err = 0; -+ sbinfo = au_sbi(sb); -+ file = sbinfo->si_xigen; -+ BUG_ON(!file); -+ -+ if (vfsub_f_size_read(file) -+ < pos + sizeof(inode->i_generation)) { -+ inode->i_generation = atomic_inc_return(&sbinfo->si_xigen_next); -+ sz = xino_fwrite(sbinfo->si_xwrite, file, &inode->i_generation, -+ sizeof(inode->i_generation), &pos); -+ } else -+ sz = xino_fread(sbinfo->si_xread, file, &inode->i_generation, -+ sizeof(inode->i_generation), &pos); -+ if (sz == sizeof(inode->i_generation)) -+ goto out; /* success */ -+ -+ err = sz; -+ if (unlikely(sz >= 0)) { -+ err = -EIO; -+ AuIOErr("xigen error (%zd)\n", sz); -+ } -+ -+out: -+ return err; -+} -+ -+int au_xigen_set(struct super_block *sb, struct file *base) -+{ -+ int err; -+ struct au_sbinfo *sbinfo; -+ struct file *file; -+ -+ SiMustWriteLock(sb); -+ -+ sbinfo = au_sbi(sb); -+ file = au_xino_create2(base, sbinfo->si_xigen); -+ err = PTR_ERR(file); -+ if (IS_ERR(file)) -+ goto out; -+ err = 0; -+ if (sbinfo->si_xigen) -+ fput(sbinfo->si_xigen); -+ sbinfo->si_xigen = file; -+ -+out: -+ return err; -+} -+ -+void au_xigen_clr(struct super_block *sb) -+{ -+ struct au_sbinfo *sbinfo; -+ -+ SiMustWriteLock(sb); -+ -+ sbinfo = au_sbi(sb); -+ if (sbinfo->si_xigen) { -+ fput(sbinfo->si_xigen); -+ sbinfo->si_xigen = NULL; -+ } -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static struct dentry *decode_by_ino(struct super_block *sb, ino_t ino, -+ ino_t dir_ino) -+{ -+ struct dentry *dentry, *d; -+ struct inode *inode; -+ unsigned int sigen; -+ -+ dentry = NULL; -+ inode = ilookup(sb, ino); -+ if (!inode) -+ goto out; -+ -+ dentry = ERR_PTR(-ESTALE); -+ sigen = au_sigen(sb); -+ if (unlikely(is_bad_inode(inode) -+ || IS_DEADDIR(inode) -+ || sigen != au_iigen(inode, NULL))) -+ goto out_iput; -+ -+ dentry = NULL; -+ if (!dir_ino || S_ISDIR(inode->i_mode)) -+ dentry = d_find_alias(inode); -+ else { -+ spin_lock(&inode->i_lock); -+ hlist_for_each_entry(d, &inode->i_dentry, d_u.d_alias) { -+ spin_lock(&d->d_lock); -+ if (!au_test_anon(d) -+ && d->d_parent->d_inode->i_ino == dir_ino) { -+ dentry = dget_dlock(d); -+ spin_unlock(&d->d_lock); -+ break; -+ } -+ spin_unlock(&d->d_lock); -+ } -+ spin_unlock(&inode->i_lock); -+ } -+ if (unlikely(dentry && au_digen_test(dentry, sigen))) { -+ /* need to refresh */ -+ dput(dentry); -+ dentry = NULL; -+ } -+ -+out_iput: -+ iput(inode); -+out: -+ AuTraceErrPtr(dentry); -+ return dentry; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* todo: dirty? */ -+/* if exportfs_decode_fh() passed vfsmount*, we could be happy */ -+ -+struct au_compare_mnt_args { -+ /* input */ -+ struct super_block *sb; -+ -+ /* output */ -+ struct vfsmount *mnt; -+}; -+ -+static int au_compare_mnt(struct vfsmount *mnt, void *arg) -+{ -+ struct au_compare_mnt_args *a = arg; -+ -+ if (mnt->mnt_sb != a->sb) -+ return 0; -+ a->mnt = mntget(mnt); -+ return 1; -+} -+ -+static struct vfsmount *au_mnt_get(struct super_block *sb) -+{ -+ int err; -+ struct path root; -+ struct au_compare_mnt_args args = { -+ .sb = sb -+ }; -+ -+ get_fs_root(current->fs, &root); -+ rcu_read_lock(); -+ err = iterate_mounts(au_compare_mnt, &args, root.mnt); -+ rcu_read_unlock(); -+ path_put(&root); -+ AuDebugOn(!err); -+ AuDebugOn(!args.mnt); -+ return args.mnt; -+} -+ -+struct au_nfsd_si_lock { -+ unsigned int sigen; -+ aufs_bindex_t bindex, br_id; -+ unsigned char force_lock; -+}; -+ -+static int si_nfsd_read_lock(struct super_block *sb, -+ struct au_nfsd_si_lock *nsi_lock) -+{ -+ int err; -+ aufs_bindex_t bindex; -+ -+ si_read_lock(sb, AuLock_FLUSH); -+ -+ /* branch id may be wrapped around */ -+ err = 0; -+ bindex = au_br_index(sb, nsi_lock->br_id); -+ if (bindex >= 0 && nsi_lock->sigen + AUFS_BRANCH_MAX > au_sigen(sb)) -+ goto out; /* success */ -+ -+ err = -ESTALE; -+ bindex = -1; -+ if (!nsi_lock->force_lock) -+ si_read_unlock(sb); -+ -+out: -+ nsi_lock->bindex = bindex; -+ return err; -+} -+ -+struct find_name_by_ino { -+ struct dir_context ctx; -+ int called, found; -+ ino_t ino; -+ char *name; -+ int namelen; -+}; -+ -+static int -+find_name_by_ino(struct dir_context *ctx, const char *name, int namelen, -+ loff_t offset, u64 ino, unsigned int d_type) -+{ -+ struct find_name_by_ino *a = container_of(ctx, struct find_name_by_ino, -+ ctx); -+ -+ a->called++; -+ if (a->ino != ino) -+ return 0; -+ -+ memcpy(a->name, name, namelen); -+ a->namelen = namelen; -+ a->found = 1; -+ return 1; -+} -+ -+static struct dentry *au_lkup_by_ino(struct path *path, ino_t ino, -+ struct au_nfsd_si_lock *nsi_lock) -+{ -+ struct dentry *dentry, *parent; -+ struct file *file; -+ struct inode *dir; -+ struct find_name_by_ino arg = { -+ .ctx = { -+ .actor = au_diractor(find_name_by_ino) -+ } -+ }; -+ int err; -+ -+ parent = path->dentry; -+ if (nsi_lock) -+ si_read_unlock(parent->d_sb); -+ file = vfsub_dentry_open(path, au_dir_roflags); -+ dentry = (void *)file; -+ if (IS_ERR(file)) -+ goto out; -+ -+ dentry = ERR_PTR(-ENOMEM); -+ arg.name = (void *)__get_free_page(GFP_NOFS); -+ if (unlikely(!arg.name)) -+ goto out_file; -+ arg.ino = ino; -+ arg.found = 0; -+ do { -+ arg.called = 0; -+ /* smp_mb(); */ -+ err = vfsub_iterate_dir(file, &arg.ctx); -+ } while (!err && !arg.found && arg.called); -+ dentry = ERR_PTR(err); -+ if (unlikely(err)) -+ goto out_name; -+ /* instead of ENOENT */ -+ dentry = ERR_PTR(-ESTALE); -+ if (!arg.found) -+ goto out_name; -+ -+ /* do not call vfsub_lkup_one() */ -+ dir = parent->d_inode; -+ mutex_lock(&dir->i_mutex); -+ dentry = vfsub_lookup_one_len(arg.name, parent, arg.namelen); -+ mutex_unlock(&dir->i_mutex); -+ AuTraceErrPtr(dentry); -+ if (IS_ERR(dentry)) -+ goto out_name; -+ AuDebugOn(au_test_anon(dentry)); -+ if (unlikely(!dentry->d_inode)) { -+ dput(dentry); -+ dentry = ERR_PTR(-ENOENT); -+ } -+ -+out_name: -+ free_page((unsigned long)arg.name); -+out_file: -+ fput(file); -+out: -+ if (unlikely(nsi_lock -+ && si_nfsd_read_lock(parent->d_sb, nsi_lock) < 0)) -+ if (!IS_ERR(dentry)) { -+ dput(dentry); -+ dentry = ERR_PTR(-ESTALE); -+ } -+ AuTraceErrPtr(dentry); -+ return dentry; -+} -+ -+static struct dentry *decode_by_dir_ino(struct super_block *sb, ino_t ino, -+ ino_t dir_ino, -+ struct au_nfsd_si_lock *nsi_lock) -+{ -+ struct dentry *dentry; -+ struct path path; -+ -+ if (dir_ino != AUFS_ROOT_INO) { -+ path.dentry = decode_by_ino(sb, dir_ino, 0); -+ dentry = path.dentry; -+ if (!path.dentry || IS_ERR(path.dentry)) -+ goto out; -+ AuDebugOn(au_test_anon(path.dentry)); -+ } else -+ path.dentry = dget(sb->s_root); -+ -+ path.mnt = au_mnt_get(sb); -+ dentry = au_lkup_by_ino(&path, ino, nsi_lock); -+ path_put(&path); -+ -+out: -+ AuTraceErrPtr(dentry); -+ return dentry; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int h_acceptable(void *expv, struct dentry *dentry) -+{ -+ return 1; -+} -+ -+static char *au_build_path(struct dentry *h_parent, struct path *h_rootpath, -+ char *buf, int len, struct super_block *sb) -+{ -+ char *p; -+ int n; -+ struct path path; -+ -+ p = d_path(h_rootpath, buf, len); -+ if (IS_ERR(p)) -+ goto out; -+ n = strlen(p); -+ -+ path.mnt = h_rootpath->mnt; -+ path.dentry = h_parent; -+ p = d_path(&path, buf, len); -+ if (IS_ERR(p)) -+ goto out; -+ if (n != 1) -+ p += n; -+ -+ path.mnt = au_mnt_get(sb); -+ path.dentry = sb->s_root; -+ p = d_path(&path, buf, len - strlen(p)); -+ mntput(path.mnt); -+ if (IS_ERR(p)) -+ goto out; -+ if (n != 1) -+ p[strlen(p)] = '/'; -+ -+out: -+ AuTraceErrPtr(p); -+ return p; -+} -+ -+static -+struct dentry *decode_by_path(struct super_block *sb, ino_t ino, __u32 *fh, -+ int fh_len, struct au_nfsd_si_lock *nsi_lock) -+{ -+ struct dentry *dentry, *h_parent, *root; -+ struct super_block *h_sb; -+ char *pathname, *p; -+ struct vfsmount *h_mnt; -+ struct au_branch *br; -+ int err; -+ struct path path; -+ -+ br = au_sbr(sb, nsi_lock->bindex); -+ h_mnt = au_br_mnt(br); -+ h_sb = h_mnt->mnt_sb; -+ /* todo: call lower fh_to_dentry()? fh_to_parent()? */ -+ h_parent = exportfs_decode_fh(h_mnt, (void *)(fh + Fh_tail), -+ fh_len - Fh_tail, fh[Fh_h_type], -+ h_acceptable, /*context*/NULL); -+ dentry = h_parent; -+ if (unlikely(!h_parent || IS_ERR(h_parent))) { -+ AuWarn1("%s decode_fh failed, %ld\n", -+ au_sbtype(h_sb), PTR_ERR(h_parent)); -+ goto out; -+ } -+ dentry = NULL; -+ if (unlikely(au_test_anon(h_parent))) { -+ AuWarn1("%s decode_fh returned a disconnected dentry\n", -+ au_sbtype(h_sb)); -+ goto out_h_parent; -+ } -+ -+ dentry = ERR_PTR(-ENOMEM); -+ pathname = (void *)__get_free_page(GFP_NOFS); -+ if (unlikely(!pathname)) -+ goto out_h_parent; -+ -+ root = sb->s_root; -+ path.mnt = h_mnt; -+ di_read_lock_parent(root, !AuLock_IR); -+ path.dentry = au_h_dptr(root, nsi_lock->bindex); -+ di_read_unlock(root, !AuLock_IR); -+ p = au_build_path(h_parent, &path, pathname, PAGE_SIZE, sb); -+ dentry = (void *)p; -+ if (IS_ERR(p)) -+ goto out_pathname; -+ -+ si_read_unlock(sb); -+ err = vfsub_kern_path(p, LOOKUP_FOLLOW | LOOKUP_DIRECTORY, &path); -+ dentry = ERR_PTR(err); -+ if (unlikely(err)) -+ goto out_relock; -+ -+ dentry = ERR_PTR(-ENOENT); -+ AuDebugOn(au_test_anon(path.dentry)); -+ if (unlikely(!path.dentry->d_inode)) -+ goto out_path; -+ -+ if (ino != path.dentry->d_inode->i_ino) -+ dentry = au_lkup_by_ino(&path, ino, /*nsi_lock*/NULL); -+ else -+ dentry = dget(path.dentry); -+ -+out_path: -+ path_put(&path); -+out_relock: -+ if (unlikely(si_nfsd_read_lock(sb, nsi_lock) < 0)) -+ if (!IS_ERR(dentry)) { -+ dput(dentry); -+ dentry = ERR_PTR(-ESTALE); -+ } -+out_pathname: -+ free_page((unsigned long)pathname); -+out_h_parent: -+ dput(h_parent); -+out: -+ AuTraceErrPtr(dentry); -+ return dentry; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static struct dentry * -+aufs_fh_to_dentry(struct super_block *sb, struct fid *fid, int fh_len, -+ int fh_type) -+{ -+ struct dentry *dentry; -+ __u32 *fh = fid->raw; -+ struct au_branch *br; -+ ino_t ino, dir_ino; -+ struct au_nfsd_si_lock nsi_lock = { -+ .force_lock = 0 -+ }; -+ -+ dentry = ERR_PTR(-ESTALE); -+ /* it should never happen, but the file handle is unreliable */ -+ if (unlikely(fh_len < Fh_tail)) -+ goto out; -+ nsi_lock.sigen = fh[Fh_sigen]; -+ nsi_lock.br_id = fh[Fh_br_id]; -+ -+ /* branch id may be wrapped around */ -+ br = NULL; -+ if (unlikely(si_nfsd_read_lock(sb, &nsi_lock))) -+ goto out; -+ nsi_lock.force_lock = 1; -+ -+ /* is this inode still cached? */ -+ ino = decode_ino(fh + Fh_ino); -+ /* it should never happen */ -+ if (unlikely(ino == AUFS_ROOT_INO)) -+ goto out_unlock; -+ -+ dir_ino = decode_ino(fh + Fh_dir_ino); -+ dentry = decode_by_ino(sb, ino, dir_ino); -+ if (IS_ERR(dentry)) -+ goto out_unlock; -+ if (dentry) -+ goto accept; -+ -+ /* is the parent dir cached? */ -+ br = au_sbr(sb, nsi_lock.bindex); -+ atomic_inc(&br->br_count); -+ dentry = decode_by_dir_ino(sb, ino, dir_ino, &nsi_lock); -+ if (IS_ERR(dentry)) -+ goto out_unlock; -+ if (dentry) -+ goto accept; -+ -+ /* lookup path */ -+ dentry = decode_by_path(sb, ino, fh, fh_len, &nsi_lock); -+ if (IS_ERR(dentry)) -+ goto out_unlock; -+ if (unlikely(!dentry)) -+ /* todo?: make it ESTALE */ -+ goto out_unlock; -+ -+accept: -+ if (!au_digen_test(dentry, au_sigen(sb)) -+ && dentry->d_inode->i_generation == fh[Fh_igen]) -+ goto out_unlock; /* success */ -+ -+ dput(dentry); -+ dentry = ERR_PTR(-ESTALE); -+out_unlock: -+ if (br) -+ atomic_dec(&br->br_count); -+ si_read_unlock(sb); -+out: -+ AuTraceErrPtr(dentry); -+ return dentry; -+} -+ -+#if 0 /* reserved for future use */ -+/* support subtreecheck option */ -+static struct dentry *aufs_fh_to_parent(struct super_block *sb, struct fid *fid, -+ int fh_len, int fh_type) -+{ -+ struct dentry *parent; -+ __u32 *fh = fid->raw; -+ ino_t dir_ino; -+ -+ dir_ino = decode_ino(fh + Fh_dir_ino); -+ parent = decode_by_ino(sb, dir_ino, 0); -+ if (IS_ERR(parent)) -+ goto out; -+ if (!parent) -+ parent = decode_by_path(sb, au_br_index(sb, fh[Fh_br_id]), -+ dir_ino, fh, fh_len); -+ -+out: -+ AuTraceErrPtr(parent); -+ return parent; -+} -+#endif -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int aufs_encode_fh(struct inode *inode, __u32 *fh, int *max_len, -+ struct inode *dir) -+{ -+ int err; -+ aufs_bindex_t bindex; -+ struct super_block *sb, *h_sb; -+ struct dentry *dentry, *parent, *h_parent; -+ struct inode *h_dir; -+ struct au_branch *br; -+ -+ err = -ENOSPC; -+ if (unlikely(*max_len <= Fh_tail)) { -+ AuWarn1("NFSv2 client (max_len %d)?\n", *max_len); -+ goto out; -+ } -+ -+ err = FILEID_ROOT; -+ if (inode->i_ino == AUFS_ROOT_INO) { -+ AuDebugOn(inode->i_ino != AUFS_ROOT_INO); -+ goto out; -+ } -+ -+ h_parent = NULL; -+ sb = inode->i_sb; -+ err = si_read_lock(sb, AuLock_FLUSH); -+ if (unlikely(err)) -+ goto out; -+ -+#ifdef CONFIG_AUFS_DEBUG -+ if (unlikely(!au_opt_test(au_mntflags(sb), XINO))) -+ AuWarn1("NFS-exporting requires xino\n"); -+#endif -+ err = -EIO; -+ parent = NULL; -+ ii_read_lock_child(inode); -+ bindex = au_ibstart(inode); -+ if (!dir) { -+ dentry = d_find_any_alias(inode); -+ if (unlikely(!dentry)) -+ goto out_unlock; -+ AuDebugOn(au_test_anon(dentry)); -+ parent = dget_parent(dentry); -+ dput(dentry); -+ if (unlikely(!parent)) -+ goto out_unlock; -+ dir = parent->d_inode; -+ } -+ -+ ii_read_lock_parent(dir); -+ h_dir = au_h_iptr(dir, bindex); -+ ii_read_unlock(dir); -+ if (unlikely(!h_dir)) -+ goto out_parent; -+ h_parent = d_find_any_alias(h_dir); -+ if (unlikely(!h_parent)) -+ goto out_hparent; -+ -+ err = -EPERM; -+ br = au_sbr(sb, bindex); -+ h_sb = au_br_sb(br); -+ if (unlikely(!h_sb->s_export_op)) { -+ AuErr1("%s branch is not exportable\n", au_sbtype(h_sb)); -+ goto out_hparent; -+ } -+ -+ fh[Fh_br_id] = br->br_id; -+ fh[Fh_sigen] = au_sigen(sb); -+ encode_ino(fh + Fh_ino, inode->i_ino); -+ encode_ino(fh + Fh_dir_ino, dir->i_ino); -+ fh[Fh_igen] = inode->i_generation; -+ -+ *max_len -= Fh_tail; -+ fh[Fh_h_type] = exportfs_encode_fh(h_parent, (void *)(fh + Fh_tail), -+ max_len, -+ /*connectable or subtreecheck*/0); -+ err = fh[Fh_h_type]; -+ *max_len += Fh_tail; -+ /* todo: macros? */ -+ if (err != FILEID_INVALID) -+ err = 99; -+ else -+ AuWarn1("%s encode_fh failed\n", au_sbtype(h_sb)); -+ -+out_hparent: -+ dput(h_parent); -+out_parent: -+ dput(parent); -+out_unlock: -+ ii_read_unlock(inode); -+ si_read_unlock(sb); -+out: -+ if (unlikely(err < 0)) -+ err = FILEID_INVALID; -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int aufs_commit_metadata(struct inode *inode) -+{ -+ int err; -+ aufs_bindex_t bindex; -+ struct super_block *sb; -+ struct inode *h_inode; -+ int (*f)(struct inode *inode); -+ -+ sb = inode->i_sb; -+ si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW); -+ ii_write_lock_child(inode); -+ bindex = au_ibstart(inode); -+ AuDebugOn(bindex < 0); -+ h_inode = au_h_iptr(inode, bindex); -+ -+ f = h_inode->i_sb->s_export_op->commit_metadata; -+ if (f) -+ err = f(h_inode); -+ else { -+ struct writeback_control wbc = { -+ .sync_mode = WB_SYNC_ALL, -+ .nr_to_write = 0 /* metadata only */ -+ }; -+ -+ err = sync_inode(h_inode, &wbc); -+ } -+ -+ au_cpup_attr_timesizes(inode); -+ ii_write_unlock(inode); -+ si_read_unlock(sb); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static struct export_operations aufs_export_op = { -+ .fh_to_dentry = aufs_fh_to_dentry, -+ /* .fh_to_parent = aufs_fh_to_parent, */ -+ .encode_fh = aufs_encode_fh, -+ .commit_metadata = aufs_commit_metadata -+}; -+ -+void au_export_init(struct super_block *sb) -+{ -+ struct au_sbinfo *sbinfo; -+ __u32 u; -+ -+ sb->s_export_op = &aufs_export_op; -+ sbinfo = au_sbi(sb); -+ sbinfo->si_xigen = NULL; -+ get_random_bytes(&u, sizeof(u)); -+ BUILD_BUG_ON(sizeof(u) != sizeof(int)); -+ atomic_set(&sbinfo->si_xigen_next, u); -+} -diff --git a/fs/aufs/f_op.c b/fs/aufs/f_op.c -new file mode 100644 -index 0000000..b08981a ---- /dev/null -+++ b/fs/aufs/f_op.c -@@ -0,0 +1,781 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * file and vm operations -+ */ -+ -+#include -+#include -+#include -+#include -+#include "aufs.h" -+ -+int au_do_open_nondir(struct file *file, int flags, struct file *h_file) -+{ -+ int err; -+ aufs_bindex_t bindex; -+ struct dentry *dentry, *h_dentry; -+ struct au_finfo *finfo; -+ struct inode *h_inode; -+ -+ FiMustWriteLock(file); -+ -+ err = 0; -+ dentry = file->f_dentry; -+ AuDebugOn(IS_ERR_OR_NULL(dentry)); -+ finfo = au_fi(file); -+ memset(&finfo->fi_htop, 0, sizeof(finfo->fi_htop)); -+ atomic_set(&finfo->fi_mmapped, 0); -+ bindex = au_dbstart(dentry); -+ if (!h_file) { -+ h_dentry = au_h_dptr(dentry, bindex); -+ err = vfsub_test_mntns(file->f_path.mnt, h_dentry->d_sb); -+ if (unlikely(err)) -+ goto out; -+ h_file = au_h_open(dentry, bindex, flags, file, /*force_wr*/0); -+ } else { -+ h_dentry = h_file->f_dentry; -+ err = vfsub_test_mntns(file->f_path.mnt, h_dentry->d_sb); -+ if (unlikely(err)) -+ goto out; -+ get_file(h_file); -+ } -+ if (IS_ERR(h_file)) -+ err = PTR_ERR(h_file); -+ else { -+ if ((flags & __O_TMPFILE) -+ && !(flags & O_EXCL)) { -+ h_inode = file_inode(h_file); -+ spin_lock(&h_inode->i_lock); -+ h_inode->i_state |= I_LINKABLE; -+ spin_unlock(&h_inode->i_lock); -+ } -+ au_set_fbstart(file, bindex); -+ au_set_h_fptr(file, bindex, h_file); -+ au_update_figen(file); -+ /* todo: necessary? */ -+ /* file->f_ra = h_file->f_ra; */ -+ } -+ -+out: -+ return err; -+} -+ -+static int aufs_open_nondir(struct inode *inode __maybe_unused, -+ struct file *file) -+{ -+ int err; -+ struct super_block *sb; -+ struct au_do_open_args args = { -+ .open = au_do_open_nondir -+ }; -+ -+ AuDbg("%pD, f_flags 0x%x, f_mode 0x%x\n", -+ file, vfsub_file_flags(file), file->f_mode); -+ -+ sb = file->f_dentry->d_sb; -+ si_read_lock(sb, AuLock_FLUSH); -+ err = au_do_open(file, &args); -+ si_read_unlock(sb); -+ return err; -+} -+ -+int aufs_release_nondir(struct inode *inode __maybe_unused, struct file *file) -+{ -+ struct au_finfo *finfo; -+ aufs_bindex_t bindex; -+ -+ finfo = au_fi(file); -+ au_sphl_del(&finfo->fi_hlist, &au_sbi(file->f_dentry->d_sb)->si_files); -+ bindex = finfo->fi_btop; -+ if (bindex >= 0) -+ au_set_h_fptr(file, bindex, NULL); -+ -+ au_finfo_fin(file); -+ return 0; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int au_do_flush_nondir(struct file *file, fl_owner_t id) -+{ -+ int err; -+ struct file *h_file; -+ -+ err = 0; -+ h_file = au_hf_top(file); -+ if (h_file) -+ err = vfsub_flush(h_file, id); -+ return err; -+} -+ -+static int aufs_flush_nondir(struct file *file, fl_owner_t id) -+{ -+ return au_do_flush(file, id, au_do_flush_nondir); -+} -+ -+/* ---------------------------------------------------------------------- */ -+/* -+ * read and write functions acquire [fdi]_rwsem once, but release before -+ * mmap_sem. This is because to stop a race condition between mmap(2). -+ * Releasing these aufs-rwsem should be safe, no branch-mamagement (by keeping -+ * si_rwsem), no harmful copy-up should happen. Actually copy-up may happen in -+ * read functions after [fdi]_rwsem are released, but it should be harmless. -+ */ -+ -+/* Callers should call au_read_post() or fput() in the end */ -+struct file *au_read_pre(struct file *file, int keep_fi) -+{ -+ struct file *h_file; -+ int err; -+ -+ err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/0); -+ if (!err) { -+ di_read_unlock(file->f_dentry, AuLock_IR); -+ h_file = au_hf_top(file); -+ get_file(h_file); -+ if (!keep_fi) -+ fi_read_unlock(file); -+ } else -+ h_file = ERR_PTR(err); -+ -+ return h_file; -+} -+ -+static void au_read_post(struct inode *inode, struct file *h_file) -+{ -+ /* update without lock, I don't think it a problem */ -+ fsstack_copy_attr_atime(inode, file_inode(h_file)); -+ fput(h_file); -+} -+ -+struct au_write_pre { -+ blkcnt_t blks; -+ aufs_bindex_t bstart; -+}; -+ -+/* -+ * return with iinfo is write-locked -+ * callers should call au_write_post() or iinfo_write_unlock() + fput() in the -+ * end -+ */ -+static struct file *au_write_pre(struct file *file, int do_ready, -+ struct au_write_pre *wpre) -+{ -+ struct file *h_file; -+ struct dentry *dentry; -+ int err; -+ struct au_pin pin; -+ -+ err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/1); -+ h_file = ERR_PTR(err); -+ if (unlikely(err)) -+ goto out; -+ -+ dentry = file->f_dentry; -+ if (do_ready) { -+ err = au_ready_to_write(file, -1, &pin); -+ if (unlikely(err)) { -+ h_file = ERR_PTR(err); -+ di_write_unlock(dentry); -+ goto out_fi; -+ } -+ } -+ -+ di_downgrade_lock(dentry, /*flags*/0); -+ if (wpre) -+ wpre->bstart = au_fbstart(file); -+ h_file = au_hf_top(file); -+ get_file(h_file); -+ if (wpre) -+ wpre->blks = file_inode(h_file)->i_blocks; -+ if (do_ready) -+ au_unpin(&pin); -+ di_read_unlock(dentry, /*flags*/0); -+ -+out_fi: -+ fi_write_unlock(file); -+out: -+ return h_file; -+} -+ -+static void au_write_post(struct inode *inode, struct file *h_file, -+ struct au_write_pre *wpre, ssize_t written) -+{ -+ struct inode *h_inode; -+ -+ au_cpup_attr_timesizes(inode); -+ AuDebugOn(au_ibstart(inode) != wpre->bstart); -+ h_inode = file_inode(h_file); -+ inode->i_mode = h_inode->i_mode; -+ ii_write_unlock(inode); -+ fput(h_file); -+ -+ /* AuDbg("blks %llu, %llu\n", (u64)blks, (u64)h_inode->i_blocks); */ -+ if (written > 0) -+ au_fhsm_wrote(inode->i_sb, wpre->bstart, -+ /*force*/h_inode->i_blocks > wpre->blks); -+} -+ -+static ssize_t aufs_read(struct file *file, char __user *buf, size_t count, -+ loff_t *ppos) -+{ -+ ssize_t err; -+ struct inode *inode; -+ struct file *h_file; -+ struct super_block *sb; -+ -+ inode = file_inode(file); -+ sb = inode->i_sb; -+ si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW); -+ -+ h_file = au_read_pre(file, /*keep_fi*/0); -+ err = PTR_ERR(h_file); -+ if (IS_ERR(h_file)) -+ goto out; -+ -+ /* filedata may be obsoleted by concurrent copyup, but no problem */ -+ err = vfsub_read_u(h_file, buf, count, ppos); -+ /* todo: necessary? */ -+ /* file->f_ra = h_file->f_ra; */ -+ au_read_post(inode, h_file); -+ -+out: -+ si_read_unlock(sb); -+ return err; -+} -+ -+/* -+ * todo: very ugly -+ * it locks both of i_mutex and si_rwsem for read in safe. -+ * if the plink maintenance mode continues forever (that is the problem), -+ * may loop forever. -+ */ -+static void au_mtx_and_read_lock(struct inode *inode) -+{ -+ int err; -+ struct super_block *sb = inode->i_sb; -+ -+ while (1) { -+ mutex_lock(&inode->i_mutex); -+ err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM); -+ if (!err) -+ break; -+ mutex_unlock(&inode->i_mutex); -+ si_read_lock(sb, AuLock_NOPLMW); -+ si_read_unlock(sb); -+ } -+} -+ -+static ssize_t aufs_write(struct file *file, const char __user *ubuf, -+ size_t count, loff_t *ppos) -+{ -+ ssize_t err; -+ struct au_write_pre wpre; -+ struct inode *inode; -+ struct file *h_file; -+ char __user *buf = (char __user *)ubuf; -+ -+ inode = file_inode(file); -+ au_mtx_and_read_lock(inode); -+ -+ h_file = au_write_pre(file, /*do_ready*/1, &wpre); -+ err = PTR_ERR(h_file); -+ if (IS_ERR(h_file)) -+ goto out; -+ -+ err = vfsub_write_u(h_file, buf, count, ppos); -+ au_write_post(inode, h_file, &wpre, err); -+ -+out: -+ si_read_unlock(inode->i_sb); -+ mutex_unlock(&inode->i_mutex); -+ return err; -+} -+ -+static ssize_t au_do_iter(struct file *h_file, int rw, struct kiocb *kio, -+ struct iov_iter *iov_iter) -+{ -+ ssize_t err; -+ struct file *file; -+ ssize_t (*iter)(struct kiocb *, struct iov_iter *); -+ ssize_t (*aio)(struct kiocb *, const struct iovec *, unsigned long, -+ loff_t); -+ -+ err = security_file_permission(h_file, rw); -+ if (unlikely(err)) -+ goto out; -+ -+ err = -ENOSYS; -+ iter = NULL; -+ aio = NULL; -+ if (rw == MAY_READ) { -+ iter = h_file->f_op->read_iter; -+ aio = h_file->f_op->aio_read; -+ } else if (rw == MAY_WRITE) { -+ iter = h_file->f_op->write_iter; -+ aio = h_file->f_op->aio_write; -+ } -+ -+ file = kio->ki_filp; -+ kio->ki_filp = h_file; -+ if (iter) { -+ lockdep_off(); -+ err = iter(kio, iov_iter); -+ lockdep_on(); -+ } else if (aio) { -+ lockdep_off(); -+ err = aio(kio, iov_iter->iov, iov_iter->nr_segs, kio->ki_pos); -+ lockdep_on(); -+ } else -+ /* currently there is no such fs */ -+ WARN_ON_ONCE(1); -+ kio->ki_filp = file; -+ -+out: -+ return err; -+} -+ -+static ssize_t aufs_read_iter(struct kiocb *kio, struct iov_iter *iov_iter) -+{ -+ ssize_t err; -+ struct file *file, *h_file; -+ struct inode *inode; -+ struct super_block *sb; -+ -+ file = kio->ki_filp; -+ inode = file_inode(file); -+ sb = inode->i_sb; -+ si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW); -+ -+ h_file = au_read_pre(file, /*keep_fi*/0); -+ err = PTR_ERR(h_file); -+ if (IS_ERR(h_file)) -+ goto out; -+ -+ err = au_do_iter(h_file, MAY_READ, kio, iov_iter); -+ /* todo: necessary? */ -+ /* file->f_ra = h_file->f_ra; */ -+ au_read_post(inode, h_file); -+ -+out: -+ si_read_unlock(sb); -+ return err; -+} -+ -+static ssize_t aufs_write_iter(struct kiocb *kio, struct iov_iter *iov_iter) -+{ -+ ssize_t err; -+ struct au_write_pre wpre; -+ struct inode *inode; -+ struct file *file, *h_file; -+ -+ file = kio->ki_filp; -+ inode = file_inode(file); -+ au_mtx_and_read_lock(inode); -+ -+ h_file = au_write_pre(file, /*do_ready*/1, &wpre); -+ err = PTR_ERR(h_file); -+ if (IS_ERR(h_file)) -+ goto out; -+ -+ err = au_do_iter(h_file, MAY_WRITE, kio, iov_iter); -+ au_write_post(inode, h_file, &wpre, err); -+ -+out: -+ si_read_unlock(inode->i_sb); -+ mutex_unlock(&inode->i_mutex); -+ return err; -+} -+ -+static ssize_t aufs_splice_read(struct file *file, loff_t *ppos, -+ struct pipe_inode_info *pipe, size_t len, -+ unsigned int flags) -+{ -+ ssize_t err; -+ struct file *h_file; -+ struct inode *inode; -+ struct super_block *sb; -+ -+ inode = file_inode(file); -+ sb = inode->i_sb; -+ si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW); -+ -+ h_file = au_read_pre(file, /*keep_fi*/1); -+ err = PTR_ERR(h_file); -+ if (IS_ERR(h_file)) -+ goto out; -+ -+ if (au_test_loopback_kthread()) { -+ au_warn_loopback(h_file->f_dentry->d_sb); -+ if (file->f_mapping != h_file->f_mapping) { -+ file->f_mapping = h_file->f_mapping; -+ smp_mb(); /* unnecessary? */ -+ } -+ } -+ fi_read_unlock(file); -+ -+ err = vfsub_splice_to(h_file, ppos, pipe, len, flags); -+ /* todo: necessasry? */ -+ /* file->f_ra = h_file->f_ra; */ -+ au_read_post(inode, h_file); -+ -+out: -+ si_read_unlock(sb); -+ return err; -+} -+ -+static ssize_t -+aufs_splice_write(struct pipe_inode_info *pipe, struct file *file, loff_t *ppos, -+ size_t len, unsigned int flags) -+{ -+ ssize_t err; -+ struct au_write_pre wpre; -+ struct inode *inode; -+ struct file *h_file; -+ -+ inode = file_inode(file); -+ au_mtx_and_read_lock(inode); -+ -+ h_file = au_write_pre(file, /*do_ready*/1, &wpre); -+ err = PTR_ERR(h_file); -+ if (IS_ERR(h_file)) -+ goto out; -+ -+ err = vfsub_splice_from(pipe, h_file, ppos, len, flags); -+ au_write_post(inode, h_file, &wpre, err); -+ -+out: -+ si_read_unlock(inode->i_sb); -+ mutex_unlock(&inode->i_mutex); -+ return err; -+} -+ -+static long aufs_fallocate(struct file *file, int mode, loff_t offset, -+ loff_t len) -+{ -+ long err; -+ struct au_write_pre wpre; -+ struct inode *inode; -+ struct file *h_file; -+ -+ inode = file_inode(file); -+ au_mtx_and_read_lock(inode); -+ -+ h_file = au_write_pre(file, /*do_ready*/1, &wpre); -+ err = PTR_ERR(h_file); -+ if (IS_ERR(h_file)) -+ goto out; -+ -+ lockdep_off(); -+ err = do_fallocate(h_file, mode, offset, len); -+ lockdep_on(); -+ au_write_post(inode, h_file, &wpre, /*written*/1); -+ -+out: -+ si_read_unlock(inode->i_sb); -+ mutex_unlock(&inode->i_mutex); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * The locking order around current->mmap_sem. -+ * - in most and regular cases -+ * file I/O syscall -- aufs_read() or something -+ * -- si_rwsem for read -- mmap_sem -+ * (Note that [fdi]i_rwsem are released before mmap_sem). -+ * - in mmap case -+ * mmap(2) -- mmap_sem -- aufs_mmap() -- si_rwsem for read -- [fdi]i_rwsem -+ * This AB-BA order is definitly bad, but is not a problem since "si_rwsem for -+ * read" allows muliple processes to acquire it and [fdi]i_rwsem are not held in -+ * file I/O. Aufs needs to stop lockdep in aufs_mmap() though. -+ * It means that when aufs acquires si_rwsem for write, the process should never -+ * acquire mmap_sem. -+ * -+ * Actually aufs_iterate() holds [fdi]i_rwsem before mmap_sem, but this is not a -+ * problem either since any directory is not able to be mmap-ed. -+ * The similar scenario is applied to aufs_readlink() too. -+ */ -+ -+#if 0 /* stop calling security_file_mmap() */ -+/* cf. linux/include/linux/mman.h: calc_vm_prot_bits() */ -+#define AuConv_VM_PROT(f, b) _calc_vm_trans(f, VM_##b, PROT_##b) -+ -+static unsigned long au_arch_prot_conv(unsigned long flags) -+{ -+ /* currently ppc64 only */ -+#ifdef CONFIG_PPC64 -+ /* cf. linux/arch/powerpc/include/asm/mman.h */ -+ AuDebugOn(arch_calc_vm_prot_bits(-1) != VM_SAO); -+ return AuConv_VM_PROT(flags, SAO); -+#else -+ AuDebugOn(arch_calc_vm_prot_bits(-1)); -+ return 0; -+#endif -+} -+ -+static unsigned long au_prot_conv(unsigned long flags) -+{ -+ return AuConv_VM_PROT(flags, READ) -+ | AuConv_VM_PROT(flags, WRITE) -+ | AuConv_VM_PROT(flags, EXEC) -+ | au_arch_prot_conv(flags); -+} -+ -+/* cf. linux/include/linux/mman.h: calc_vm_flag_bits() */ -+#define AuConv_VM_MAP(f, b) _calc_vm_trans(f, VM_##b, MAP_##b) -+ -+static unsigned long au_flag_conv(unsigned long flags) -+{ -+ return AuConv_VM_MAP(flags, GROWSDOWN) -+ | AuConv_VM_MAP(flags, DENYWRITE) -+ | AuConv_VM_MAP(flags, LOCKED); -+} -+#endif -+ -+static int aufs_mmap(struct file *file, struct vm_area_struct *vma) -+{ -+ int err; -+ const unsigned char wlock -+ = (file->f_mode & FMODE_WRITE) && (vma->vm_flags & VM_SHARED); -+ struct super_block *sb; -+ struct file *h_file; -+ struct inode *inode; -+ -+ AuDbgVmRegion(file, vma); -+ -+ inode = file_inode(file); -+ sb = inode->i_sb; -+ lockdep_off(); -+ si_read_lock(sb, AuLock_NOPLMW); -+ -+ h_file = au_write_pre(file, wlock, /*wpre*/NULL); -+ lockdep_on(); -+ err = PTR_ERR(h_file); -+ if (IS_ERR(h_file)) -+ goto out; -+ -+ err = 0; -+ au_set_mmapped(file); -+ au_vm_file_reset(vma, h_file); -+ /* -+ * we cannot call security_mmap_file() here since it may acquire -+ * mmap_sem or i_mutex. -+ * -+ * err = security_mmap_file(h_file, au_prot_conv(vma->vm_flags), -+ * au_flag_conv(vma->vm_flags)); -+ */ -+ if (!err) -+ err = h_file->f_op->mmap(h_file, vma); -+ if (!err) { -+ au_vm_prfile_set(vma, file); -+ fsstack_copy_attr_atime(inode, file_inode(h_file)); -+ goto out_fput; /* success */ -+ } -+ au_unset_mmapped(file); -+ au_vm_file_reset(vma, file); -+ -+out_fput: -+ lockdep_off(); -+ ii_write_unlock(inode); -+ lockdep_on(); -+ fput(h_file); -+out: -+ lockdep_off(); -+ si_read_unlock(sb); -+ lockdep_on(); -+ AuTraceErr(err); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int aufs_fsync_nondir(struct file *file, loff_t start, loff_t end, -+ int datasync) -+{ -+ int err; -+ struct au_write_pre wpre; -+ struct inode *inode; -+ struct file *h_file; -+ -+ err = 0; /* -EBADF; */ /* posix? */ -+ if (unlikely(!(file->f_mode & FMODE_WRITE))) -+ goto out; -+ -+ inode = file_inode(file); -+ au_mtx_and_read_lock(inode); -+ -+ h_file = au_write_pre(file, /*do_ready*/1, &wpre); -+ err = PTR_ERR(h_file); -+ if (IS_ERR(h_file)) -+ goto out_unlock; -+ -+ err = vfsub_fsync(h_file, &h_file->f_path, datasync); -+ au_write_post(inode, h_file, &wpre, /*written*/0); -+ -+out_unlock: -+ si_read_unlock(inode->i_sb); -+ mutex_unlock(&inode->i_mutex); -+out: -+ return err; -+} -+ -+/* no one supports this operation, currently */ -+#if 0 -+static int aufs_aio_fsync_nondir(struct kiocb *kio, int datasync) -+{ -+ int err; -+ struct au_write_pre wpre; -+ struct inode *inode; -+ struct file *file, *h_file; -+ -+ err = 0; /* -EBADF; */ /* posix? */ -+ if (unlikely(!(file->f_mode & FMODE_WRITE))) -+ goto out; -+ -+ file = kio->ki_filp; -+ inode = file_inode(file); -+ au_mtx_and_read_lock(inode); -+ -+ h_file = au_write_pre(file, /*do_ready*/1, &wpre); -+ err = PTR_ERR(h_file); -+ if (IS_ERR(h_file)) -+ goto out_unlock; -+ -+ err = -ENOSYS; -+ h_file = au_hf_top(file); -+ if (h_file->f_op->aio_fsync) { -+ struct mutex *h_mtx; -+ -+ h_mtx = &file_inode(h_file)->i_mutex; -+ if (!is_sync_kiocb(kio)) { -+ get_file(h_file); -+ fput(file); -+ } -+ kio->ki_filp = h_file; -+ err = h_file->f_op->aio_fsync(kio, datasync); -+ mutex_lock_nested(h_mtx, AuLsc_I_CHILD); -+ if (!err) -+ vfsub_update_h_iattr(&h_file->f_path, /*did*/NULL); -+ /*ignore*/ -+ mutex_unlock(h_mtx); -+ } -+ au_write_post(inode, h_file, &wpre, /*written*/0); -+ -+out_unlock: -+ si_read_unlock(inode->sb); -+ mutex_unlock(&inode->i_mutex); -+out: -+ return err; -+} -+#endif -+ -+static int aufs_fasync(int fd, struct file *file, int flag) -+{ -+ int err; -+ struct file *h_file; -+ struct super_block *sb; -+ -+ sb = file->f_dentry->d_sb; -+ si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW); -+ -+ h_file = au_read_pre(file, /*keep_fi*/0); -+ err = PTR_ERR(h_file); -+ if (IS_ERR(h_file)) -+ goto out; -+ -+ if (h_file->f_op->fasync) -+ err = h_file->f_op->fasync(fd, h_file, flag); -+ fput(h_file); /* instead of au_read_post() */ -+ -+out: -+ si_read_unlock(sb); -+ return err; -+} -+ -+static int aufs_setfl(struct file *file, unsigned long arg) -+{ -+ int err; -+ struct file *h_file; -+ struct super_block *sb; -+ -+ sb = file->f_dentry->d_sb; -+ si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW); -+ -+ h_file = au_read_pre(file, /*keep_fi*/0); -+ err = PTR_ERR(h_file); -+ if (IS_ERR(h_file)) -+ goto out; -+ -+ arg |= vfsub_file_flags(file) & FASYNC; /* stop calling h_file->fasync */ -+ err = setfl(/*unused fd*/-1, h_file, arg); -+ fput(h_file); /* instead of au_read_post() */ -+ -+out: -+ si_read_unlock(sb); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* no one supports this operation, currently */ -+#if 0 -+static ssize_t aufs_sendpage(struct file *file, struct page *page, int offset, -+ size_t len, loff_t *pos, int more) -+{ -+} -+#endif -+ -+/* ---------------------------------------------------------------------- */ -+ -+const struct file_operations aufs_file_fop = { -+ .owner = THIS_MODULE, -+ -+ .llseek = default_llseek, -+ -+ .read = aufs_read, -+ .write = aufs_write, -+ .read_iter = aufs_read_iter, -+ .write_iter = aufs_write_iter, -+ -+#ifdef CONFIG_AUFS_POLL -+ .poll = aufs_poll, -+#endif -+ .unlocked_ioctl = aufs_ioctl_nondir, -+#ifdef CONFIG_COMPAT -+ .compat_ioctl = aufs_compat_ioctl_nondir, -+#endif -+ .mmap = aufs_mmap, -+ .open = aufs_open_nondir, -+ .flush = aufs_flush_nondir, -+ .release = aufs_release_nondir, -+ .fsync = aufs_fsync_nondir, -+ /* .aio_fsync = aufs_aio_fsync_nondir, */ -+ .fasync = aufs_fasync, -+ /* .sendpage = aufs_sendpage, */ -+ .setfl = aufs_setfl, -+ .splice_write = aufs_splice_write, -+ .splice_read = aufs_splice_read, -+#if 0 -+ .aio_splice_write = aufs_aio_splice_write, -+ .aio_splice_read = aufs_aio_splice_read, -+#endif -+ .fallocate = aufs_fallocate -+}; -diff --git a/fs/aufs/fhsm.c b/fs/aufs/fhsm.c -new file mode 100644 -index 0000000..5b3ad74 ---- /dev/null -+++ b/fs/aufs/fhsm.c -@@ -0,0 +1,426 @@ -+/* -+ * Copyright (C) 2011-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program; if not, write to the Free Software -+ * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA -+ */ -+ -+/* -+ * File-based Hierarchy Storage Management -+ */ -+ -+#include -+#include -+#include -+#include -+#include "aufs.h" -+ -+static aufs_bindex_t au_fhsm_bottom(struct super_block *sb) -+{ -+ struct au_sbinfo *sbinfo; -+ struct au_fhsm *fhsm; -+ -+ SiMustAnyLock(sb); -+ -+ sbinfo = au_sbi(sb); -+ fhsm = &sbinfo->si_fhsm; -+ AuDebugOn(!fhsm); -+ return fhsm->fhsm_bottom; -+} -+ -+void au_fhsm_set_bottom(struct super_block *sb, aufs_bindex_t bindex) -+{ -+ struct au_sbinfo *sbinfo; -+ struct au_fhsm *fhsm; -+ -+ SiMustWriteLock(sb); -+ -+ sbinfo = au_sbi(sb); -+ fhsm = &sbinfo->si_fhsm; -+ AuDebugOn(!fhsm); -+ fhsm->fhsm_bottom = bindex; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int au_fhsm_test_jiffy(struct au_sbinfo *sbinfo, struct au_branch *br) -+{ -+ struct au_br_fhsm *bf; -+ -+ bf = br->br_fhsm; -+ MtxMustLock(&bf->bf_lock); -+ -+ return !bf->bf_readable -+ || time_after(jiffies, -+ bf->bf_jiffy + sbinfo->si_fhsm.fhsm_expire); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static void au_fhsm_notify(struct super_block *sb, int val) -+{ -+ struct au_sbinfo *sbinfo; -+ struct au_fhsm *fhsm; -+ -+ SiMustAnyLock(sb); -+ -+ sbinfo = au_sbi(sb); -+ fhsm = &sbinfo->si_fhsm; -+ if (au_fhsm_pid(fhsm) -+ && atomic_read(&fhsm->fhsm_readable) != -1) { -+ atomic_set(&fhsm->fhsm_readable, val); -+ if (val) -+ wake_up(&fhsm->fhsm_wqh); -+ } -+} -+ -+static int au_fhsm_stfs(struct super_block *sb, aufs_bindex_t bindex, -+ struct aufs_stfs *rstfs, int do_lock, int do_notify) -+{ -+ int err; -+ struct au_branch *br; -+ struct au_br_fhsm *bf; -+ -+ br = au_sbr(sb, bindex); -+ AuDebugOn(au_br_rdonly(br)); -+ bf = br->br_fhsm; -+ AuDebugOn(!bf); -+ -+ if (do_lock) -+ mutex_lock(&bf->bf_lock); -+ else -+ MtxMustLock(&bf->bf_lock); -+ -+ /* sb->s_root for NFS is unreliable */ -+ err = au_br_stfs(br, &bf->bf_stfs); -+ if (unlikely(err)) { -+ AuErr1("FHSM failed (%d), b%d, ignored.\n", bindex, err); -+ goto out; -+ } -+ -+ bf->bf_jiffy = jiffies; -+ bf->bf_readable = 1; -+ if (do_notify) -+ au_fhsm_notify(sb, /*val*/1); -+ if (rstfs) -+ *rstfs = bf->bf_stfs; -+ -+out: -+ if (do_lock) -+ mutex_unlock(&bf->bf_lock); -+ au_fhsm_notify(sb, /*val*/1); -+ -+ return err; -+} -+ -+void au_fhsm_wrote(struct super_block *sb, aufs_bindex_t bindex, int force) -+{ -+ int err; -+ struct au_sbinfo *sbinfo; -+ struct au_fhsm *fhsm; -+ struct au_branch *br; -+ struct au_br_fhsm *bf; -+ -+ AuDbg("b%d, force %d\n", bindex, force); -+ SiMustAnyLock(sb); -+ -+ sbinfo = au_sbi(sb); -+ fhsm = &sbinfo->si_fhsm; -+ if (!au_ftest_si(sbinfo, FHSM) -+ || fhsm->fhsm_bottom == bindex) -+ return; -+ -+ br = au_sbr(sb, bindex); -+ bf = br->br_fhsm; -+ AuDebugOn(!bf); -+ mutex_lock(&bf->bf_lock); -+ if (force -+ || au_fhsm_pid(fhsm) -+ || au_fhsm_test_jiffy(sbinfo, br)) -+ err = au_fhsm_stfs(sb, bindex, /*rstfs*/NULL, /*do_lock*/0, -+ /*do_notify*/1); -+ mutex_unlock(&bf->bf_lock); -+} -+ -+void au_fhsm_wrote_all(struct super_block *sb, int force) -+{ -+ aufs_bindex_t bindex, bend; -+ struct au_branch *br; -+ -+ /* exclude the bottom */ -+ bend = au_fhsm_bottom(sb); -+ for (bindex = 0; bindex < bend; bindex++) { -+ br = au_sbr(sb, bindex); -+ if (au_br_fhsm(br->br_perm)) -+ au_fhsm_wrote(sb, bindex, force); -+ } -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static unsigned int au_fhsm_poll(struct file *file, -+ struct poll_table_struct *wait) -+{ -+ unsigned int mask; -+ struct au_sbinfo *sbinfo; -+ struct au_fhsm *fhsm; -+ -+ mask = 0; -+ sbinfo = file->private_data; -+ fhsm = &sbinfo->si_fhsm; -+ poll_wait(file, &fhsm->fhsm_wqh, wait); -+ if (atomic_read(&fhsm->fhsm_readable)) -+ mask = POLLIN /* | POLLRDNORM */; -+ -+ AuTraceErr((int)mask); -+ return mask; -+} -+ -+static int au_fhsm_do_read_one(struct aufs_stbr __user *stbr, -+ struct aufs_stfs *stfs, __s16 brid) -+{ -+ int err; -+ -+ err = copy_to_user(&stbr->stfs, stfs, sizeof(*stfs)); -+ if (!err) -+ err = __put_user(brid, &stbr->brid); -+ if (unlikely(err)) -+ err = -EFAULT; -+ -+ return err; -+} -+ -+static ssize_t au_fhsm_do_read(struct super_block *sb, -+ struct aufs_stbr __user *stbr, size_t count) -+{ -+ ssize_t err; -+ int nstbr; -+ aufs_bindex_t bindex, bend; -+ struct au_branch *br; -+ struct au_br_fhsm *bf; -+ -+ /* except the bottom branch */ -+ err = 0; -+ nstbr = 0; -+ bend = au_fhsm_bottom(sb); -+ for (bindex = 0; !err && bindex < bend; bindex++) { -+ br = au_sbr(sb, bindex); -+ if (!au_br_fhsm(br->br_perm)) -+ continue; -+ -+ bf = br->br_fhsm; -+ mutex_lock(&bf->bf_lock); -+ if (bf->bf_readable) { -+ err = -EFAULT; -+ if (count >= sizeof(*stbr)) -+ err = au_fhsm_do_read_one(stbr++, &bf->bf_stfs, -+ br->br_id); -+ if (!err) { -+ bf->bf_readable = 0; -+ count -= sizeof(*stbr); -+ nstbr++; -+ } -+ } -+ mutex_unlock(&bf->bf_lock); -+ } -+ if (!err) -+ err = sizeof(*stbr) * nstbr; -+ -+ return err; -+} -+ -+static ssize_t au_fhsm_read(struct file *file, char __user *buf, size_t count, -+ loff_t *pos) -+{ -+ ssize_t err; -+ int readable; -+ aufs_bindex_t nfhsm, bindex, bend; -+ struct au_sbinfo *sbinfo; -+ struct au_fhsm *fhsm; -+ struct au_branch *br; -+ struct super_block *sb; -+ -+ err = 0; -+ sbinfo = file->private_data; -+ fhsm = &sbinfo->si_fhsm; -+need_data: -+ spin_lock_irq(&fhsm->fhsm_wqh.lock); -+ if (!atomic_read(&fhsm->fhsm_readable)) { -+ if (vfsub_file_flags(file) & O_NONBLOCK) -+ err = -EAGAIN; -+ else -+ err = wait_event_interruptible_locked_irq -+ (fhsm->fhsm_wqh, -+ atomic_read(&fhsm->fhsm_readable)); -+ } -+ spin_unlock_irq(&fhsm->fhsm_wqh.lock); -+ if (unlikely(err)) -+ goto out; -+ -+ /* sb may already be dead */ -+ au_rw_read_lock(&sbinfo->si_rwsem); -+ readable = atomic_read(&fhsm->fhsm_readable); -+ if (readable > 0) { -+ sb = sbinfo->si_sb; -+ AuDebugOn(!sb); -+ /* exclude the bottom branch */ -+ nfhsm = 0; -+ bend = au_fhsm_bottom(sb); -+ for (bindex = 0; bindex < bend; bindex++) { -+ br = au_sbr(sb, bindex); -+ if (au_br_fhsm(br->br_perm)) -+ nfhsm++; -+ } -+ err = -EMSGSIZE; -+ if (nfhsm * sizeof(struct aufs_stbr) <= count) { -+ atomic_set(&fhsm->fhsm_readable, 0); -+ err = au_fhsm_do_read(sbinfo->si_sb, (void __user *)buf, -+ count); -+ } -+ } -+ au_rw_read_unlock(&sbinfo->si_rwsem); -+ if (!readable) -+ goto need_data; -+ -+out: -+ return err; -+} -+ -+static int au_fhsm_release(struct inode *inode, struct file *file) -+{ -+ struct au_sbinfo *sbinfo; -+ struct au_fhsm *fhsm; -+ -+ /* sb may already be dead */ -+ sbinfo = file->private_data; -+ fhsm = &sbinfo->si_fhsm; -+ spin_lock(&fhsm->fhsm_spin); -+ fhsm->fhsm_pid = 0; -+ spin_unlock(&fhsm->fhsm_spin); -+ kobject_put(&sbinfo->si_kobj); -+ -+ return 0; -+} -+ -+static const struct file_operations au_fhsm_fops = { -+ .owner = THIS_MODULE, -+ .llseek = noop_llseek, -+ .read = au_fhsm_read, -+ .poll = au_fhsm_poll, -+ .release = au_fhsm_release -+}; -+ -+int au_fhsm_fd(struct super_block *sb, int oflags) -+{ -+ int err, fd; -+ struct au_sbinfo *sbinfo; -+ struct au_fhsm *fhsm; -+ -+ err = -EPERM; -+ if (unlikely(!capable(CAP_SYS_ADMIN))) -+ goto out; -+ -+ err = -EINVAL; -+ if (unlikely(oflags & ~(O_CLOEXEC | O_NONBLOCK))) -+ goto out; -+ -+ err = 0; -+ sbinfo = au_sbi(sb); -+ fhsm = &sbinfo->si_fhsm; -+ spin_lock(&fhsm->fhsm_spin); -+ if (!fhsm->fhsm_pid) -+ fhsm->fhsm_pid = current->pid; -+ else -+ err = -EBUSY; -+ spin_unlock(&fhsm->fhsm_spin); -+ if (unlikely(err)) -+ goto out; -+ -+ oflags |= O_RDONLY; -+ /* oflags |= FMODE_NONOTIFY; */ -+ fd = anon_inode_getfd("[aufs_fhsm]", &au_fhsm_fops, sbinfo, oflags); -+ err = fd; -+ if (unlikely(fd < 0)) -+ goto out_pid; -+ -+ /* succeed reglardless 'fhsm' status */ -+ kobject_get(&sbinfo->si_kobj); -+ si_noflush_read_lock(sb); -+ if (au_ftest_si(sbinfo, FHSM)) -+ au_fhsm_wrote_all(sb, /*force*/0); -+ si_read_unlock(sb); -+ goto out; /* success */ -+ -+out_pid: -+ spin_lock(&fhsm->fhsm_spin); -+ fhsm->fhsm_pid = 0; -+ spin_unlock(&fhsm->fhsm_spin); -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+int au_fhsm_br_alloc(struct au_branch *br) -+{ -+ int err; -+ -+ err = 0; -+ br->br_fhsm = kmalloc(sizeof(*br->br_fhsm), GFP_NOFS); -+ if (br->br_fhsm) -+ au_br_fhsm_init(br->br_fhsm); -+ else -+ err = -ENOMEM; -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+void au_fhsm_fin(struct super_block *sb) -+{ -+ au_fhsm_notify(sb, /*val*/-1); -+} -+ -+void au_fhsm_init(struct au_sbinfo *sbinfo) -+{ -+ struct au_fhsm *fhsm; -+ -+ fhsm = &sbinfo->si_fhsm; -+ spin_lock_init(&fhsm->fhsm_spin); -+ init_waitqueue_head(&fhsm->fhsm_wqh); -+ atomic_set(&fhsm->fhsm_readable, 0); -+ fhsm->fhsm_expire -+ = msecs_to_jiffies(AUFS_FHSM_CACHE_DEF_SEC * MSEC_PER_SEC); -+ fhsm->fhsm_bottom = -1; -+} -+ -+void au_fhsm_set(struct au_sbinfo *sbinfo, unsigned int sec) -+{ -+ sbinfo->si_fhsm.fhsm_expire -+ = msecs_to_jiffies(sec * MSEC_PER_SEC); -+} -+ -+void au_fhsm_show(struct seq_file *seq, struct au_sbinfo *sbinfo) -+{ -+ unsigned int u; -+ -+ if (!au_ftest_si(sbinfo, FHSM)) -+ return; -+ -+ u = jiffies_to_msecs(sbinfo->si_fhsm.fhsm_expire) / MSEC_PER_SEC; -+ if (u != AUFS_FHSM_CACHE_DEF_SEC) -+ seq_printf(seq, ",fhsm_sec=%u", u); -+} -diff --git a/fs/aufs/file.c b/fs/aufs/file.c -new file mode 100644 -index 0000000..12c7620 ---- /dev/null -+++ b/fs/aufs/file.c -@@ -0,0 +1,857 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * handling file/dir, and address_space operation -+ */ -+ -+#ifdef CONFIG_AUFS_DEBUG -+#include -+#endif -+#include -+#include "aufs.h" -+ -+/* drop flags for writing */ -+unsigned int au_file_roflags(unsigned int flags) -+{ -+ flags &= ~(O_WRONLY | O_RDWR | O_APPEND | O_CREAT | O_TRUNC); -+ flags |= O_RDONLY | O_NOATIME; -+ return flags; -+} -+ -+/* common functions to regular file and dir */ -+struct file *au_h_open(struct dentry *dentry, aufs_bindex_t bindex, int flags, -+ struct file *file, int force_wr) -+{ -+ struct file *h_file; -+ struct dentry *h_dentry; -+ struct inode *h_inode; -+ struct super_block *sb; -+ struct au_branch *br; -+ struct path h_path; -+ int err; -+ -+ /* a race condition can happen between open and unlink/rmdir */ -+ h_file = ERR_PTR(-ENOENT); -+ h_dentry = au_h_dptr(dentry, bindex); -+ if (au_test_nfsd() && !h_dentry) -+ goto out; -+ h_inode = h_dentry->d_inode; -+ if (au_test_nfsd() && !h_inode) -+ goto out; -+ spin_lock(&h_dentry->d_lock); -+ err = (!d_unhashed(dentry) && d_unlinked(h_dentry)) -+ || !h_inode -+ /* || !dentry->d_inode->i_nlink */ -+ ; -+ spin_unlock(&h_dentry->d_lock); -+ if (unlikely(err)) -+ goto out; -+ -+ sb = dentry->d_sb; -+ br = au_sbr(sb, bindex); -+ err = au_br_test_oflag(flags, br); -+ h_file = ERR_PTR(err); -+ if (unlikely(err)) -+ goto out; -+ -+ /* drop flags for writing */ -+ if (au_test_ro(sb, bindex, dentry->d_inode)) { -+ if (force_wr && !(flags & O_WRONLY)) -+ force_wr = 0; -+ flags = au_file_roflags(flags); -+ if (force_wr) { -+ h_file = ERR_PTR(-EROFS); -+ flags = au_file_roflags(flags); -+ if (unlikely(vfsub_native_ro(h_inode) -+ || IS_APPEND(h_inode))) -+ goto out; -+ flags &= ~O_ACCMODE; -+ flags |= O_WRONLY; -+ } -+ } -+ flags &= ~O_CREAT; -+ atomic_inc(&br->br_count); -+ h_path.dentry = h_dentry; -+ h_path.mnt = au_br_mnt(br); -+ h_file = vfsub_dentry_open(&h_path, flags); -+ if (IS_ERR(h_file)) -+ goto out_br; -+ -+ if (flags & __FMODE_EXEC) { -+ err = deny_write_access(h_file); -+ if (unlikely(err)) { -+ fput(h_file); -+ h_file = ERR_PTR(err); -+ goto out_br; -+ } -+ } -+ fsnotify_open(h_file); -+ goto out; /* success */ -+ -+out_br: -+ atomic_dec(&br->br_count); -+out: -+ return h_file; -+} -+ -+static int au_cmoo(struct dentry *dentry) -+{ -+ int err, cmoo; -+ unsigned int udba; -+ struct path h_path; -+ struct au_pin pin; -+ struct au_cp_generic cpg = { -+ .dentry = dentry, -+ .bdst = -1, -+ .bsrc = -1, -+ .len = -1, -+ .pin = &pin, -+ .flags = AuCpup_DTIME | AuCpup_HOPEN -+ }; -+ struct inode *inode, *delegated; -+ struct super_block *sb; -+ struct au_sbinfo *sbinfo; -+ struct au_fhsm *fhsm; -+ pid_t pid; -+ struct au_branch *br; -+ struct dentry *parent; -+ struct au_hinode *hdir; -+ -+ DiMustWriteLock(dentry); -+ inode = dentry->d_inode; -+ IiMustWriteLock(inode); -+ -+ err = 0; -+ if (IS_ROOT(dentry)) -+ goto out; -+ cpg.bsrc = au_dbstart(dentry); -+ if (!cpg.bsrc) -+ goto out; -+ -+ sb = dentry->d_sb; -+ sbinfo = au_sbi(sb); -+ fhsm = &sbinfo->si_fhsm; -+ pid = au_fhsm_pid(fhsm); -+ if (pid -+ && (current->pid == pid -+ || current->real_parent->pid == pid)) -+ goto out; -+ -+ br = au_sbr(sb, cpg.bsrc); -+ cmoo = au_br_cmoo(br->br_perm); -+ if (!cmoo) -+ goto out; -+ if (!S_ISREG(inode->i_mode)) -+ cmoo &= AuBrAttr_COO_ALL; -+ if (!cmoo) -+ goto out; -+ -+ parent = dget_parent(dentry); -+ di_write_lock_parent(parent); -+ err = au_wbr_do_copyup_bu(dentry, cpg.bsrc - 1); -+ cpg.bdst = err; -+ if (unlikely(err < 0)) { -+ err = 0; /* there is no upper writable branch */ -+ goto out_dgrade; -+ } -+ AuDbg("bsrc %d, bdst %d\n", cpg.bsrc, cpg.bdst); -+ -+ /* do not respect the coo attrib for the target branch */ -+ err = au_cpup_dirs(dentry, cpg.bdst); -+ if (unlikely(err)) -+ goto out_dgrade; -+ -+ di_downgrade_lock(parent, AuLock_IR); -+ udba = au_opt_udba(sb); -+ err = au_pin(&pin, dentry, cpg.bdst, udba, -+ AuPin_DI_LOCKED | AuPin_MNT_WRITE); -+ if (unlikely(err)) -+ goto out_parent; -+ -+ err = au_sio_cpup_simple(&cpg); -+ au_unpin(&pin); -+ if (unlikely(err)) -+ goto out_parent; -+ if (!(cmoo & AuBrWAttr_MOO)) -+ goto out_parent; /* success */ -+ -+ err = au_pin(&pin, dentry, cpg.bsrc, udba, -+ AuPin_DI_LOCKED | AuPin_MNT_WRITE); -+ if (unlikely(err)) -+ goto out_parent; -+ -+ h_path.mnt = au_br_mnt(br); -+ h_path.dentry = au_h_dptr(dentry, cpg.bsrc); -+ hdir = au_hi(parent->d_inode, cpg.bsrc); -+ delegated = NULL; -+ err = vfsub_unlink(hdir->hi_inode, &h_path, &delegated, /*force*/1); -+ au_unpin(&pin); -+ /* todo: keep h_dentry or not? */ -+ if (unlikely(err == -EWOULDBLOCK)) { -+ pr_warn("cannot retry for NFSv4 delegation" -+ " for an internal unlink\n"); -+ iput(delegated); -+ } -+ if (unlikely(err)) { -+ pr_err("unlink %pd after coo failed (%d), ignored\n", -+ dentry, err); -+ err = 0; -+ } -+ goto out_parent; /* success */ -+ -+out_dgrade: -+ di_downgrade_lock(parent, AuLock_IR); -+out_parent: -+ di_read_unlock(parent, AuLock_IR); -+ dput(parent); -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+int au_do_open(struct file *file, struct au_do_open_args *args) -+{ -+ int err, no_lock = args->no_lock; -+ struct dentry *dentry; -+ struct au_finfo *finfo; -+ -+ if (!no_lock) -+ err = au_finfo_init(file, args->fidir); -+ else { -+ lockdep_off(); -+ err = au_finfo_init(file, args->fidir); -+ lockdep_on(); -+ } -+ if (unlikely(err)) -+ goto out; -+ -+ dentry = file->f_dentry; -+ AuDebugOn(IS_ERR_OR_NULL(dentry)); -+ if (!no_lock) { -+ di_write_lock_child(dentry); -+ err = au_cmoo(dentry); -+ di_downgrade_lock(dentry, AuLock_IR); -+ if (!err) -+ err = args->open(file, vfsub_file_flags(file), NULL); -+ di_read_unlock(dentry, AuLock_IR); -+ } else { -+ err = au_cmoo(dentry); -+ if (!err) -+ err = args->open(file, vfsub_file_flags(file), -+ args->h_file); -+ if (!err && au_fbstart(file) != au_dbstart(dentry)) -+ /* -+ * cmoo happens after h_file was opened. -+ * need to refresh file later. -+ */ -+ atomic_dec(&au_fi(file)->fi_generation); -+ } -+ -+ finfo = au_fi(file); -+ if (!err) { -+ finfo->fi_file = file; -+ au_sphl_add(&finfo->fi_hlist, -+ &au_sbi(file->f_dentry->d_sb)->si_files); -+ } -+ if (!no_lock) -+ fi_write_unlock(file); -+ else { -+ lockdep_off(); -+ fi_write_unlock(file); -+ lockdep_on(); -+ } -+ if (unlikely(err)) { -+ finfo->fi_hdir = NULL; -+ au_finfo_fin(file); -+ } -+ -+out: -+ return err; -+} -+ -+int au_reopen_nondir(struct file *file) -+{ -+ int err; -+ aufs_bindex_t bstart; -+ struct dentry *dentry; -+ struct file *h_file, *h_file_tmp; -+ -+ dentry = file->f_dentry; -+ bstart = au_dbstart(dentry); -+ h_file_tmp = NULL; -+ if (au_fbstart(file) == bstart) { -+ h_file = au_hf_top(file); -+ if (file->f_mode == h_file->f_mode) -+ return 0; /* success */ -+ h_file_tmp = h_file; -+ get_file(h_file_tmp); -+ au_set_h_fptr(file, bstart, NULL); -+ } -+ AuDebugOn(au_fi(file)->fi_hdir); -+ /* -+ * it can happen -+ * file exists on both of rw and ro -+ * open --> dbstart and fbstart are both 0 -+ * prepend a branch as rw, "rw" become ro -+ * remove rw/file -+ * delete the top branch, "rw" becomes rw again -+ * --> dbstart is 1, fbstart is still 0 -+ * write --> fbstart is 0 but dbstart is 1 -+ */ -+ /* AuDebugOn(au_fbstart(file) < bstart); */ -+ -+ h_file = au_h_open(dentry, bstart, vfsub_file_flags(file) & ~O_TRUNC, -+ file, /*force_wr*/0); -+ err = PTR_ERR(h_file); -+ if (IS_ERR(h_file)) { -+ if (h_file_tmp) { -+ atomic_inc(&au_sbr(dentry->d_sb, bstart)->br_count); -+ au_set_h_fptr(file, bstart, h_file_tmp); -+ h_file_tmp = NULL; -+ } -+ goto out; /* todo: close all? */ -+ } -+ -+ err = 0; -+ au_set_fbstart(file, bstart); -+ au_set_h_fptr(file, bstart, h_file); -+ au_update_figen(file); -+ /* todo: necessary? */ -+ /* file->f_ra = h_file->f_ra; */ -+ -+out: -+ if (h_file_tmp) -+ fput(h_file_tmp); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int au_reopen_wh(struct file *file, aufs_bindex_t btgt, -+ struct dentry *hi_wh) -+{ -+ int err; -+ aufs_bindex_t bstart; -+ struct au_dinfo *dinfo; -+ struct dentry *h_dentry; -+ struct au_hdentry *hdp; -+ -+ dinfo = au_di(file->f_dentry); -+ AuRwMustWriteLock(&dinfo->di_rwsem); -+ -+ bstart = dinfo->di_bstart; -+ dinfo->di_bstart = btgt; -+ hdp = dinfo->di_hdentry; -+ h_dentry = hdp[0 + btgt].hd_dentry; -+ hdp[0 + btgt].hd_dentry = hi_wh; -+ err = au_reopen_nondir(file); -+ hdp[0 + btgt].hd_dentry = h_dentry; -+ dinfo->di_bstart = bstart; -+ -+ return err; -+} -+ -+static int au_ready_to_write_wh(struct file *file, loff_t len, -+ aufs_bindex_t bcpup, struct au_pin *pin) -+{ -+ int err; -+ struct inode *inode, *h_inode; -+ struct dentry *h_dentry, *hi_wh; -+ struct au_cp_generic cpg = { -+ .dentry = file->f_dentry, -+ .bdst = bcpup, -+ .bsrc = -1, -+ .len = len, -+ .pin = pin -+ }; -+ -+ au_update_dbstart(cpg.dentry); -+ inode = cpg.dentry->d_inode; -+ h_inode = NULL; -+ if (au_dbstart(cpg.dentry) <= bcpup -+ && au_dbend(cpg.dentry) >= bcpup) { -+ h_dentry = au_h_dptr(cpg.dentry, bcpup); -+ if (h_dentry) -+ h_inode = h_dentry->d_inode; -+ } -+ hi_wh = au_hi_wh(inode, bcpup); -+ if (!hi_wh && !h_inode) -+ err = au_sio_cpup_wh(&cpg, file); -+ else -+ /* already copied-up after unlink */ -+ err = au_reopen_wh(file, bcpup, hi_wh); -+ -+ if (!err -+ && (inode->i_nlink > 1 -+ || (inode->i_state & I_LINKABLE)) -+ && au_opt_test(au_mntflags(cpg.dentry->d_sb), PLINK)) -+ au_plink_append(inode, bcpup, au_h_dptr(cpg.dentry, bcpup)); -+ -+ return err; -+} -+ -+/* -+ * prepare the @file for writing. -+ */ -+int au_ready_to_write(struct file *file, loff_t len, struct au_pin *pin) -+{ -+ int err; -+ aufs_bindex_t dbstart; -+ struct dentry *parent; -+ struct inode *inode; -+ struct super_block *sb; -+ struct file *h_file; -+ struct au_cp_generic cpg = { -+ .dentry = file->f_dentry, -+ .bdst = -1, -+ .bsrc = -1, -+ .len = len, -+ .pin = pin, -+ .flags = AuCpup_DTIME -+ }; -+ -+ sb = cpg.dentry->d_sb; -+ inode = cpg.dentry->d_inode; -+ cpg.bsrc = au_fbstart(file); -+ err = au_test_ro(sb, cpg.bsrc, inode); -+ if (!err && (au_hf_top(file)->f_mode & FMODE_WRITE)) { -+ err = au_pin(pin, cpg.dentry, cpg.bsrc, AuOpt_UDBA_NONE, -+ /*flags*/0); -+ goto out; -+ } -+ -+ /* need to cpup or reopen */ -+ parent = dget_parent(cpg.dentry); -+ di_write_lock_parent(parent); -+ err = AuWbrCopyup(au_sbi(sb), cpg.dentry); -+ cpg.bdst = err; -+ if (unlikely(err < 0)) -+ goto out_dgrade; -+ err = 0; -+ -+ if (!d_unhashed(cpg.dentry) && !au_h_dptr(parent, cpg.bdst)) { -+ err = au_cpup_dirs(cpg.dentry, cpg.bdst); -+ if (unlikely(err)) -+ goto out_dgrade; -+ } -+ -+ err = au_pin(pin, cpg.dentry, cpg.bdst, AuOpt_UDBA_NONE, -+ AuPin_DI_LOCKED | AuPin_MNT_WRITE); -+ if (unlikely(err)) -+ goto out_dgrade; -+ -+ dbstart = au_dbstart(cpg.dentry); -+ if (dbstart <= cpg.bdst) -+ cpg.bsrc = cpg.bdst; -+ -+ if (dbstart <= cpg.bdst /* just reopen */ -+ || !d_unhashed(cpg.dentry) /* copyup and reopen */ -+ ) { -+ h_file = au_h_open_pre(cpg.dentry, cpg.bsrc, /*force_wr*/0); -+ if (IS_ERR(h_file)) -+ err = PTR_ERR(h_file); -+ else { -+ di_downgrade_lock(parent, AuLock_IR); -+ if (dbstart > cpg.bdst) -+ err = au_sio_cpup_simple(&cpg); -+ if (!err) -+ err = au_reopen_nondir(file); -+ au_h_open_post(cpg.dentry, cpg.bsrc, h_file); -+ } -+ } else { /* copyup as wh and reopen */ -+ /* -+ * since writable hfsplus branch is not supported, -+ * h_open_pre/post() are unnecessary. -+ */ -+ err = au_ready_to_write_wh(file, len, cpg.bdst, pin); -+ di_downgrade_lock(parent, AuLock_IR); -+ } -+ -+ if (!err) { -+ au_pin_set_parent_lflag(pin, /*lflag*/0); -+ goto out_dput; /* success */ -+ } -+ au_unpin(pin); -+ goto out_unlock; -+ -+out_dgrade: -+ di_downgrade_lock(parent, AuLock_IR); -+out_unlock: -+ di_read_unlock(parent, AuLock_IR); -+out_dput: -+ dput(parent); -+out: -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+int au_do_flush(struct file *file, fl_owner_t id, -+ int (*flush)(struct file *file, fl_owner_t id)) -+{ -+ int err; -+ struct super_block *sb; -+ struct inode *inode; -+ -+ inode = file_inode(file); -+ sb = inode->i_sb; -+ si_noflush_read_lock(sb); -+ fi_read_lock(file); -+ ii_read_lock_child(inode); -+ -+ err = flush(file, id); -+ au_cpup_attr_timesizes(inode); -+ -+ ii_read_unlock(inode); -+ fi_read_unlock(file); -+ si_read_unlock(sb); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int au_file_refresh_by_inode(struct file *file, int *need_reopen) -+{ -+ int err; -+ struct au_pin pin; -+ struct au_finfo *finfo; -+ struct dentry *parent, *hi_wh; -+ struct inode *inode; -+ struct super_block *sb; -+ struct au_cp_generic cpg = { -+ .dentry = file->f_dentry, -+ .bdst = -1, -+ .bsrc = -1, -+ .len = -1, -+ .pin = &pin, -+ .flags = AuCpup_DTIME -+ }; -+ -+ FiMustWriteLock(file); -+ -+ err = 0; -+ finfo = au_fi(file); -+ sb = cpg.dentry->d_sb; -+ inode = cpg.dentry->d_inode; -+ cpg.bdst = au_ibstart(inode); -+ if (cpg.bdst == finfo->fi_btop || IS_ROOT(cpg.dentry)) -+ goto out; -+ -+ parent = dget_parent(cpg.dentry); -+ if (au_test_ro(sb, cpg.bdst, inode)) { -+ di_read_lock_parent(parent, !AuLock_IR); -+ err = AuWbrCopyup(au_sbi(sb), cpg.dentry); -+ cpg.bdst = err; -+ di_read_unlock(parent, !AuLock_IR); -+ if (unlikely(err < 0)) -+ goto out_parent; -+ err = 0; -+ } -+ -+ di_read_lock_parent(parent, AuLock_IR); -+ hi_wh = au_hi_wh(inode, cpg.bdst); -+ if (!S_ISDIR(inode->i_mode) -+ && au_opt_test(au_mntflags(sb), PLINK) -+ && au_plink_test(inode) -+ && !d_unhashed(cpg.dentry) -+ && cpg.bdst < au_dbstart(cpg.dentry)) { -+ err = au_test_and_cpup_dirs(cpg.dentry, cpg.bdst); -+ if (unlikely(err)) -+ goto out_unlock; -+ -+ /* always superio. */ -+ err = au_pin(&pin, cpg.dentry, cpg.bdst, AuOpt_UDBA_NONE, -+ AuPin_DI_LOCKED | AuPin_MNT_WRITE); -+ if (!err) { -+ err = au_sio_cpup_simple(&cpg); -+ au_unpin(&pin); -+ } -+ } else if (hi_wh) { -+ /* already copied-up after unlink */ -+ err = au_reopen_wh(file, cpg.bdst, hi_wh); -+ *need_reopen = 0; -+ } -+ -+out_unlock: -+ di_read_unlock(parent, AuLock_IR); -+out_parent: -+ dput(parent); -+out: -+ return err; -+} -+ -+static void au_do_refresh_dir(struct file *file) -+{ -+ aufs_bindex_t bindex, bend, new_bindex, brid; -+ struct au_hfile *p, tmp, *q; -+ struct au_finfo *finfo; -+ struct super_block *sb; -+ struct au_fidir *fidir; -+ -+ FiMustWriteLock(file); -+ -+ sb = file->f_dentry->d_sb; -+ finfo = au_fi(file); -+ fidir = finfo->fi_hdir; -+ AuDebugOn(!fidir); -+ p = fidir->fd_hfile + finfo->fi_btop; -+ brid = p->hf_br->br_id; -+ bend = fidir->fd_bbot; -+ for (bindex = finfo->fi_btop; bindex <= bend; bindex++, p++) { -+ if (!p->hf_file) -+ continue; -+ -+ new_bindex = au_br_index(sb, p->hf_br->br_id); -+ if (new_bindex == bindex) -+ continue; -+ if (new_bindex < 0) { -+ au_set_h_fptr(file, bindex, NULL); -+ continue; -+ } -+ -+ /* swap two lower inode, and loop again */ -+ q = fidir->fd_hfile + new_bindex; -+ tmp = *q; -+ *q = *p; -+ *p = tmp; -+ if (tmp.hf_file) { -+ bindex--; -+ p--; -+ } -+ } -+ -+ p = fidir->fd_hfile; -+ if (!au_test_mmapped(file) && !d_unlinked(file->f_dentry)) { -+ bend = au_sbend(sb); -+ for (finfo->fi_btop = 0; finfo->fi_btop <= bend; -+ finfo->fi_btop++, p++) -+ if (p->hf_file) { -+ if (file_inode(p->hf_file)) -+ break; -+ au_hfput(p, file); -+ } -+ } else { -+ bend = au_br_index(sb, brid); -+ for (finfo->fi_btop = 0; finfo->fi_btop < bend; -+ finfo->fi_btop++, p++) -+ if (p->hf_file) -+ au_hfput(p, file); -+ bend = au_sbend(sb); -+ } -+ -+ p = fidir->fd_hfile + bend; -+ for (fidir->fd_bbot = bend; fidir->fd_bbot >= finfo->fi_btop; -+ fidir->fd_bbot--, p--) -+ if (p->hf_file) { -+ if (file_inode(p->hf_file)) -+ break; -+ au_hfput(p, file); -+ } -+ AuDebugOn(fidir->fd_bbot < finfo->fi_btop); -+} -+ -+/* -+ * after branch manipulating, refresh the file. -+ */ -+static int refresh_file(struct file *file, int (*reopen)(struct file *file)) -+{ -+ int err, need_reopen; -+ aufs_bindex_t bend, bindex; -+ struct dentry *dentry; -+ struct au_finfo *finfo; -+ struct au_hfile *hfile; -+ -+ dentry = file->f_dentry; -+ finfo = au_fi(file); -+ if (!finfo->fi_hdir) { -+ hfile = &finfo->fi_htop; -+ AuDebugOn(!hfile->hf_file); -+ bindex = au_br_index(dentry->d_sb, hfile->hf_br->br_id); -+ AuDebugOn(bindex < 0); -+ if (bindex != finfo->fi_btop) -+ au_set_fbstart(file, bindex); -+ } else { -+ err = au_fidir_realloc(finfo, au_sbend(dentry->d_sb) + 1); -+ if (unlikely(err)) -+ goto out; -+ au_do_refresh_dir(file); -+ } -+ -+ err = 0; -+ need_reopen = 1; -+ if (!au_test_mmapped(file)) -+ err = au_file_refresh_by_inode(file, &need_reopen); -+ if (!err && need_reopen && !d_unlinked(dentry)) -+ err = reopen(file); -+ if (!err) { -+ au_update_figen(file); -+ goto out; /* success */ -+ } -+ -+ /* error, close all lower files */ -+ if (finfo->fi_hdir) { -+ bend = au_fbend_dir(file); -+ for (bindex = au_fbstart(file); bindex <= bend; bindex++) -+ au_set_h_fptr(file, bindex, NULL); -+ } -+ -+out: -+ return err; -+} -+ -+/* common function to regular file and dir */ -+int au_reval_and_lock_fdi(struct file *file, int (*reopen)(struct file *file), -+ int wlock) -+{ -+ int err; -+ unsigned int sigen, figen; -+ aufs_bindex_t bstart; -+ unsigned char pseudo_link; -+ struct dentry *dentry; -+ struct inode *inode; -+ -+ err = 0; -+ dentry = file->f_dentry; -+ inode = dentry->d_inode; -+ sigen = au_sigen(dentry->d_sb); -+ fi_write_lock(file); -+ figen = au_figen(file); -+ di_write_lock_child(dentry); -+ bstart = au_dbstart(dentry); -+ pseudo_link = (bstart != au_ibstart(inode)); -+ if (sigen == figen && !pseudo_link && au_fbstart(file) == bstart) { -+ if (!wlock) { -+ di_downgrade_lock(dentry, AuLock_IR); -+ fi_downgrade_lock(file); -+ } -+ goto out; /* success */ -+ } -+ -+ AuDbg("sigen %d, figen %d\n", sigen, figen); -+ if (au_digen_test(dentry, sigen)) { -+ err = au_reval_dpath(dentry, sigen); -+ AuDebugOn(!err && au_digen_test(dentry, sigen)); -+ } -+ -+ if (!err) -+ err = refresh_file(file, reopen); -+ if (!err) { -+ if (!wlock) { -+ di_downgrade_lock(dentry, AuLock_IR); -+ fi_downgrade_lock(file); -+ } -+ } else { -+ di_write_unlock(dentry); -+ fi_write_unlock(file); -+ } -+ -+out: -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* cf. aufs_nopage() */ -+/* for madvise(2) */ -+static int aufs_readpage(struct file *file __maybe_unused, struct page *page) -+{ -+ unlock_page(page); -+ return 0; -+} -+ -+/* it will never be called, but necessary to support O_DIRECT */ -+static ssize_t aufs_direct_IO(int rw, struct kiocb *iocb, -+ struct iov_iter *iter, loff_t offset) -+{ BUG(); return 0; } -+ -+/* -+ * it will never be called, but madvise and fadvise behaves differently -+ * when get_xip_mem is defined -+ */ -+static int aufs_get_xip_mem(struct address_space *mapping, pgoff_t pgoff, -+ int create, void **kmem, unsigned long *pfn) -+{ BUG(); return 0; } -+ -+/* they will never be called. */ -+#ifdef CONFIG_AUFS_DEBUG -+static int aufs_write_begin(struct file *file, struct address_space *mapping, -+ loff_t pos, unsigned len, unsigned flags, -+ struct page **pagep, void **fsdata) -+{ AuUnsupport(); return 0; } -+static int aufs_write_end(struct file *file, struct address_space *mapping, -+ loff_t pos, unsigned len, unsigned copied, -+ struct page *page, void *fsdata) -+{ AuUnsupport(); return 0; } -+static int aufs_writepage(struct page *page, struct writeback_control *wbc) -+{ AuUnsupport(); return 0; } -+ -+static int aufs_set_page_dirty(struct page *page) -+{ AuUnsupport(); return 0; } -+static void aufs_invalidatepage(struct page *page, unsigned int offset, -+ unsigned int length) -+{ AuUnsupport(); } -+static int aufs_releasepage(struct page *page, gfp_t gfp) -+{ AuUnsupport(); return 0; } -+#if 0 /* called by memory compaction regardless file */ -+static int aufs_migratepage(struct address_space *mapping, struct page *newpage, -+ struct page *page, enum migrate_mode mode) -+{ AuUnsupport(); return 0; } -+#endif -+static int aufs_launder_page(struct page *page) -+{ AuUnsupport(); return 0; } -+static int aufs_is_partially_uptodate(struct page *page, -+ unsigned long from, -+ unsigned long count) -+{ AuUnsupport(); return 0; } -+static void aufs_is_dirty_writeback(struct page *page, bool *dirty, -+ bool *writeback) -+{ AuUnsupport(); } -+static int aufs_error_remove_page(struct address_space *mapping, -+ struct page *page) -+{ AuUnsupport(); return 0; } -+static int aufs_swap_activate(struct swap_info_struct *sis, struct file *file, -+ sector_t *span) -+{ AuUnsupport(); return 0; } -+static void aufs_swap_deactivate(struct file *file) -+{ AuUnsupport(); } -+#endif /* CONFIG_AUFS_DEBUG */ -+ -+const struct address_space_operations aufs_aop = { -+ .readpage = aufs_readpage, -+ .direct_IO = aufs_direct_IO, -+ .get_xip_mem = aufs_get_xip_mem, -+#ifdef CONFIG_AUFS_DEBUG -+ .writepage = aufs_writepage, -+ /* no writepages, because of writepage */ -+ .set_page_dirty = aufs_set_page_dirty, -+ /* no readpages, because of readpage */ -+ .write_begin = aufs_write_begin, -+ .write_end = aufs_write_end, -+ /* no bmap, no block device */ -+ .invalidatepage = aufs_invalidatepage, -+ .releasepage = aufs_releasepage, -+ /* is fallback_migrate_page ok? */ -+ /* .migratepage = aufs_migratepage, */ -+ .launder_page = aufs_launder_page, -+ .is_partially_uptodate = aufs_is_partially_uptodate, -+ .is_dirty_writeback = aufs_is_dirty_writeback, -+ .error_remove_page = aufs_error_remove_page, -+ .swap_activate = aufs_swap_activate, -+ .swap_deactivate = aufs_swap_deactivate -+#endif /* CONFIG_AUFS_DEBUG */ -+}; -diff --git a/fs/aufs/file.h b/fs/aufs/file.h -new file mode 100644 -index 0000000..564be91 ---- /dev/null -+++ b/fs/aufs/file.h -@@ -0,0 +1,291 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * file operations -+ */ -+ -+#ifndef __AUFS_FILE_H__ -+#define __AUFS_FILE_H__ -+ -+#ifdef __KERNEL__ -+ -+#include -+#include -+#include -+#include "rwsem.h" -+ -+struct au_branch; -+struct au_hfile { -+ struct file *hf_file; -+ struct au_branch *hf_br; -+}; -+ -+struct au_vdir; -+struct au_fidir { -+ aufs_bindex_t fd_bbot; -+ aufs_bindex_t fd_nent; -+ struct au_vdir *fd_vdir_cache; -+ struct au_hfile fd_hfile[]; -+}; -+ -+static inline int au_fidir_sz(int nent) -+{ -+ AuDebugOn(nent < 0); -+ return sizeof(struct au_fidir) + sizeof(struct au_hfile) * nent; -+} -+ -+struct au_finfo { -+ atomic_t fi_generation; -+ -+ struct au_rwsem fi_rwsem; -+ aufs_bindex_t fi_btop; -+ -+ /* do not union them */ -+ struct { /* for non-dir */ -+ struct au_hfile fi_htop; -+ atomic_t fi_mmapped; -+ }; -+ struct au_fidir *fi_hdir; /* for dir only */ -+ -+ struct hlist_node fi_hlist; -+ struct file *fi_file; /* very ugly */ -+} ____cacheline_aligned_in_smp; -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* file.c */ -+extern const struct address_space_operations aufs_aop; -+unsigned int au_file_roflags(unsigned int flags); -+struct file *au_h_open(struct dentry *dentry, aufs_bindex_t bindex, int flags, -+ struct file *file, int force_wr); -+struct au_do_open_args { -+ int no_lock; -+ int (*open)(struct file *file, int flags, -+ struct file *h_file); -+ struct au_fidir *fidir; -+ struct file *h_file; -+}; -+int au_do_open(struct file *file, struct au_do_open_args *args); -+int au_reopen_nondir(struct file *file); -+struct au_pin; -+int au_ready_to_write(struct file *file, loff_t len, struct au_pin *pin); -+int au_reval_and_lock_fdi(struct file *file, int (*reopen)(struct file *file), -+ int wlock); -+int au_do_flush(struct file *file, fl_owner_t id, -+ int (*flush)(struct file *file, fl_owner_t id)); -+ -+/* poll.c */ -+#ifdef CONFIG_AUFS_POLL -+unsigned int aufs_poll(struct file *file, poll_table *wait); -+#endif -+ -+#ifdef CONFIG_AUFS_BR_HFSPLUS -+/* hfsplus.c */ -+struct file *au_h_open_pre(struct dentry *dentry, aufs_bindex_t bindex, -+ int force_wr); -+void au_h_open_post(struct dentry *dentry, aufs_bindex_t bindex, -+ struct file *h_file); -+#else -+AuStub(struct file *, au_h_open_pre, return NULL, struct dentry *dentry, -+ aufs_bindex_t bindex, int force_wr) -+AuStubVoid(au_h_open_post, struct dentry *dentry, aufs_bindex_t bindex, -+ struct file *h_file); -+#endif -+ -+/* f_op.c */ -+extern const struct file_operations aufs_file_fop; -+int au_do_open_nondir(struct file *file, int flags, struct file *h_file); -+int aufs_release_nondir(struct inode *inode __maybe_unused, struct file *file); -+struct file *au_read_pre(struct file *file, int keep_fi); -+ -+/* finfo.c */ -+void au_hfput(struct au_hfile *hf, struct file *file); -+void au_set_h_fptr(struct file *file, aufs_bindex_t bindex, -+ struct file *h_file); -+ -+void au_update_figen(struct file *file); -+struct au_fidir *au_fidir_alloc(struct super_block *sb); -+int au_fidir_realloc(struct au_finfo *finfo, int nbr); -+ -+void au_fi_init_once(void *_fi); -+void au_finfo_fin(struct file *file); -+int au_finfo_init(struct file *file, struct au_fidir *fidir); -+ -+/* ioctl.c */ -+long aufs_ioctl_nondir(struct file *file, unsigned int cmd, unsigned long arg); -+#ifdef CONFIG_COMPAT -+long aufs_compat_ioctl_dir(struct file *file, unsigned int cmd, -+ unsigned long arg); -+long aufs_compat_ioctl_nondir(struct file *file, unsigned int cmd, -+ unsigned long arg); -+#endif -+ -+/* ---------------------------------------------------------------------- */ -+ -+static inline struct au_finfo *au_fi(struct file *file) -+{ -+ return file->private_data; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * fi_read_lock, fi_write_lock, -+ * fi_read_unlock, fi_write_unlock, fi_downgrade_lock -+ */ -+AuSimpleRwsemFuncs(fi, struct file *f, &au_fi(f)->fi_rwsem); -+ -+#define FiMustNoWaiters(f) AuRwMustNoWaiters(&au_fi(f)->fi_rwsem) -+#define FiMustAnyLock(f) AuRwMustAnyLock(&au_fi(f)->fi_rwsem) -+#define FiMustWriteLock(f) AuRwMustWriteLock(&au_fi(f)->fi_rwsem) -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* todo: hard/soft set? */ -+static inline aufs_bindex_t au_fbstart(struct file *file) -+{ -+ FiMustAnyLock(file); -+ return au_fi(file)->fi_btop; -+} -+ -+static inline aufs_bindex_t au_fbend_dir(struct file *file) -+{ -+ FiMustAnyLock(file); -+ AuDebugOn(!au_fi(file)->fi_hdir); -+ return au_fi(file)->fi_hdir->fd_bbot; -+} -+ -+static inline struct au_vdir *au_fvdir_cache(struct file *file) -+{ -+ FiMustAnyLock(file); -+ AuDebugOn(!au_fi(file)->fi_hdir); -+ return au_fi(file)->fi_hdir->fd_vdir_cache; -+} -+ -+static inline void au_set_fbstart(struct file *file, aufs_bindex_t bindex) -+{ -+ FiMustWriteLock(file); -+ au_fi(file)->fi_btop = bindex; -+} -+ -+static inline void au_set_fbend_dir(struct file *file, aufs_bindex_t bindex) -+{ -+ FiMustWriteLock(file); -+ AuDebugOn(!au_fi(file)->fi_hdir); -+ au_fi(file)->fi_hdir->fd_bbot = bindex; -+} -+ -+static inline void au_set_fvdir_cache(struct file *file, -+ struct au_vdir *vdir_cache) -+{ -+ FiMustWriteLock(file); -+ AuDebugOn(!au_fi(file)->fi_hdir); -+ au_fi(file)->fi_hdir->fd_vdir_cache = vdir_cache; -+} -+ -+static inline struct file *au_hf_top(struct file *file) -+{ -+ FiMustAnyLock(file); -+ AuDebugOn(au_fi(file)->fi_hdir); -+ return au_fi(file)->fi_htop.hf_file; -+} -+ -+static inline struct file *au_hf_dir(struct file *file, aufs_bindex_t bindex) -+{ -+ FiMustAnyLock(file); -+ AuDebugOn(!au_fi(file)->fi_hdir); -+ return au_fi(file)->fi_hdir->fd_hfile[0 + bindex].hf_file; -+} -+ -+/* todo: memory barrier? */ -+static inline unsigned int au_figen(struct file *f) -+{ -+ return atomic_read(&au_fi(f)->fi_generation); -+} -+ -+static inline void au_set_mmapped(struct file *f) -+{ -+ if (atomic_inc_return(&au_fi(f)->fi_mmapped)) -+ return; -+ pr_warn("fi_mmapped wrapped around\n"); -+ while (!atomic_inc_return(&au_fi(f)->fi_mmapped)) -+ ; -+} -+ -+static inline void au_unset_mmapped(struct file *f) -+{ -+ atomic_dec(&au_fi(f)->fi_mmapped); -+} -+ -+static inline int au_test_mmapped(struct file *f) -+{ -+ return atomic_read(&au_fi(f)->fi_mmapped); -+} -+ -+/* customize vma->vm_file */ -+ -+static inline void au_do_vm_file_reset(struct vm_area_struct *vma, -+ struct file *file) -+{ -+ struct file *f; -+ -+ f = vma->vm_file; -+ get_file(file); -+ vma->vm_file = file; -+ fput(f); -+} -+ -+#ifdef CONFIG_MMU -+#define AuDbgVmRegion(file, vma) do {} while (0) -+ -+static inline void au_vm_file_reset(struct vm_area_struct *vma, -+ struct file *file) -+{ -+ au_do_vm_file_reset(vma, file); -+} -+#else -+#define AuDbgVmRegion(file, vma) \ -+ AuDebugOn((vma)->vm_region && (vma)->vm_region->vm_file != (file)) -+ -+static inline void au_vm_file_reset(struct vm_area_struct *vma, -+ struct file *file) -+{ -+ struct file *f; -+ -+ au_do_vm_file_reset(vma, file); -+ f = vma->vm_region->vm_file; -+ get_file(file); -+ vma->vm_region->vm_file = file; -+ fput(f); -+} -+#endif /* CONFIG_MMU */ -+ -+/* handle vma->vm_prfile */ -+static inline void au_vm_prfile_set(struct vm_area_struct *vma, -+ struct file *file) -+{ -+ get_file(file); -+ vma->vm_prfile = file; -+#ifndef CONFIG_MMU -+ get_file(file); -+ vma->vm_region->vm_prfile = file; -+#endif -+} -+ -+#endif /* __KERNEL__ */ -+#endif /* __AUFS_FILE_H__ */ -diff --git a/fs/aufs/finfo.c b/fs/aufs/finfo.c -new file mode 100644 -index 0000000..7e25db3 ---- /dev/null -+++ b/fs/aufs/finfo.c -@@ -0,0 +1,156 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * file private data -+ */ -+ -+#include "aufs.h" -+ -+void au_hfput(struct au_hfile *hf, struct file *file) -+{ -+ /* todo: direct access f_flags */ -+ if (vfsub_file_flags(file) & __FMODE_EXEC) -+ allow_write_access(hf->hf_file); -+ fput(hf->hf_file); -+ hf->hf_file = NULL; -+ atomic_dec(&hf->hf_br->br_count); -+ hf->hf_br = NULL; -+} -+ -+void au_set_h_fptr(struct file *file, aufs_bindex_t bindex, struct file *val) -+{ -+ struct au_finfo *finfo = au_fi(file); -+ struct au_hfile *hf; -+ struct au_fidir *fidir; -+ -+ fidir = finfo->fi_hdir; -+ if (!fidir) { -+ AuDebugOn(finfo->fi_btop != bindex); -+ hf = &finfo->fi_htop; -+ } else -+ hf = fidir->fd_hfile + bindex; -+ -+ if (hf && hf->hf_file) -+ au_hfput(hf, file); -+ if (val) { -+ FiMustWriteLock(file); -+ AuDebugOn(IS_ERR_OR_NULL(file->f_dentry)); -+ hf->hf_file = val; -+ hf->hf_br = au_sbr(file->f_dentry->d_sb, bindex); -+ } -+} -+ -+void au_update_figen(struct file *file) -+{ -+ atomic_set(&au_fi(file)->fi_generation, au_digen(file->f_dentry)); -+ /* smp_mb(); */ /* atomic_set */ -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+struct au_fidir *au_fidir_alloc(struct super_block *sb) -+{ -+ struct au_fidir *fidir; -+ int nbr; -+ -+ nbr = au_sbend(sb) + 1; -+ if (nbr < 2) -+ nbr = 2; /* initial allocate for 2 branches */ -+ fidir = kzalloc(au_fidir_sz(nbr), GFP_NOFS); -+ if (fidir) { -+ fidir->fd_bbot = -1; -+ fidir->fd_nent = nbr; -+ } -+ -+ return fidir; -+} -+ -+int au_fidir_realloc(struct au_finfo *finfo, int nbr) -+{ -+ int err; -+ struct au_fidir *fidir, *p; -+ -+ AuRwMustWriteLock(&finfo->fi_rwsem); -+ fidir = finfo->fi_hdir; -+ AuDebugOn(!fidir); -+ -+ err = -ENOMEM; -+ p = au_kzrealloc(fidir, au_fidir_sz(fidir->fd_nent), au_fidir_sz(nbr), -+ GFP_NOFS); -+ if (p) { -+ p->fd_nent = nbr; -+ finfo->fi_hdir = p; -+ err = 0; -+ } -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+void au_finfo_fin(struct file *file) -+{ -+ struct au_finfo *finfo; -+ -+ au_nfiles_dec(file->f_dentry->d_sb); -+ -+ finfo = au_fi(file); -+ AuDebugOn(finfo->fi_hdir); -+ AuRwDestroy(&finfo->fi_rwsem); -+ au_cache_free_finfo(finfo); -+} -+ -+void au_fi_init_once(void *_finfo) -+{ -+ struct au_finfo *finfo = _finfo; -+ static struct lock_class_key aufs_fi; -+ -+ au_rw_init(&finfo->fi_rwsem); -+ au_rw_class(&finfo->fi_rwsem, &aufs_fi); -+} -+ -+int au_finfo_init(struct file *file, struct au_fidir *fidir) -+{ -+ int err; -+ struct au_finfo *finfo; -+ struct dentry *dentry; -+ -+ err = -ENOMEM; -+ dentry = file->f_dentry; -+ finfo = au_cache_alloc_finfo(); -+ if (unlikely(!finfo)) -+ goto out; -+ -+ err = 0; -+ au_nfiles_inc(dentry->d_sb); -+ /* verbose coding for lock class name */ -+ if (!fidir) -+ au_rw_class(&finfo->fi_rwsem, au_lc_key + AuLcNonDir_FIINFO); -+ else -+ au_rw_class(&finfo->fi_rwsem, au_lc_key + AuLcDir_FIINFO); -+ au_rw_write_lock(&finfo->fi_rwsem); -+ finfo->fi_btop = -1; -+ finfo->fi_hdir = fidir; -+ atomic_set(&finfo->fi_generation, au_digen(dentry)); -+ /* smp_mb(); */ /* atomic_set */ -+ -+ file->private_data = finfo; -+ -+out: -+ return err; -+} -diff --git a/fs/aufs/fstype.h b/fs/aufs/fstype.h -new file mode 100644 -index 0000000..2842400 ---- /dev/null -+++ b/fs/aufs/fstype.h -@@ -0,0 +1,400 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * judging filesystem type -+ */ -+ -+#ifndef __AUFS_FSTYPE_H__ -+#define __AUFS_FSTYPE_H__ -+ -+#ifdef __KERNEL__ -+ -+#include -+#include -+#include -+#include -+ -+static inline int au_test_aufs(struct super_block *sb) -+{ -+ return sb->s_magic == AUFS_SUPER_MAGIC; -+} -+ -+static inline const char *au_sbtype(struct super_block *sb) -+{ -+ return sb->s_type->name; -+} -+ -+static inline int au_test_iso9660(struct super_block *sb __maybe_unused) -+{ -+#if defined(CONFIG_ISO9660_FS) || defined(CONFIG_ISO9660_FS_MODULE) -+ return sb->s_magic == ISOFS_SUPER_MAGIC; -+#else -+ return 0; -+#endif -+} -+ -+static inline int au_test_romfs(struct super_block *sb __maybe_unused) -+{ -+#if defined(CONFIG_ROMFS_FS) || defined(CONFIG_ROMFS_FS_MODULE) -+ return sb->s_magic == ROMFS_MAGIC; -+#else -+ return 0; -+#endif -+} -+ -+static inline int au_test_cramfs(struct super_block *sb __maybe_unused) -+{ -+#if defined(CONFIG_CRAMFS) || defined(CONFIG_CRAMFS_MODULE) -+ return sb->s_magic == CRAMFS_MAGIC; -+#endif -+ return 0; -+} -+ -+static inline int au_test_nfs(struct super_block *sb __maybe_unused) -+{ -+#if defined(CONFIG_NFS_FS) || defined(CONFIG_NFS_FS_MODULE) -+ return sb->s_magic == NFS_SUPER_MAGIC; -+#else -+ return 0; -+#endif -+} -+ -+static inline int au_test_fuse(struct super_block *sb __maybe_unused) -+{ -+#if defined(CONFIG_FUSE_FS) || defined(CONFIG_FUSE_FS_MODULE) -+ return sb->s_magic == FUSE_SUPER_MAGIC; -+#else -+ return 0; -+#endif -+} -+ -+static inline int au_test_xfs(struct super_block *sb __maybe_unused) -+{ -+#if defined(CONFIG_XFS_FS) || defined(CONFIG_XFS_FS_MODULE) -+ return sb->s_magic == XFS_SB_MAGIC; -+#else -+ return 0; -+#endif -+} -+ -+static inline int au_test_tmpfs(struct super_block *sb __maybe_unused) -+{ -+#ifdef CONFIG_TMPFS -+ return sb->s_magic == TMPFS_MAGIC; -+#else -+ return 0; -+#endif -+} -+ -+static inline int au_test_ecryptfs(struct super_block *sb __maybe_unused) -+{ -+#if defined(CONFIG_ECRYPT_FS) || defined(CONFIG_ECRYPT_FS_MODULE) -+ return !strcmp(au_sbtype(sb), "ecryptfs"); -+#else -+ return 0; -+#endif -+} -+ -+static inline int au_test_ramfs(struct super_block *sb) -+{ -+ return sb->s_magic == RAMFS_MAGIC; -+} -+ -+static inline int au_test_ubifs(struct super_block *sb __maybe_unused) -+{ -+#if defined(CONFIG_UBIFS_FS) || defined(CONFIG_UBIFS_FS_MODULE) -+ return sb->s_magic == UBIFS_SUPER_MAGIC; -+#else -+ return 0; -+#endif -+} -+ -+static inline int au_test_procfs(struct super_block *sb __maybe_unused) -+{ -+#ifdef CONFIG_PROC_FS -+ return sb->s_magic == PROC_SUPER_MAGIC; -+#else -+ return 0; -+#endif -+} -+ -+static inline int au_test_sysfs(struct super_block *sb __maybe_unused) -+{ -+#ifdef CONFIG_SYSFS -+ return sb->s_magic == SYSFS_MAGIC; -+#else -+ return 0; -+#endif -+} -+ -+static inline int au_test_configfs(struct super_block *sb __maybe_unused) -+{ -+#if defined(CONFIG_CONFIGFS_FS) || defined(CONFIG_CONFIGFS_FS_MODULE) -+ return sb->s_magic == CONFIGFS_MAGIC; -+#else -+ return 0; -+#endif -+} -+ -+static inline int au_test_minix(struct super_block *sb __maybe_unused) -+{ -+#if defined(CONFIG_MINIX_FS) || defined(CONFIG_MINIX_FS_MODULE) -+ return sb->s_magic == MINIX3_SUPER_MAGIC -+ || sb->s_magic == MINIX2_SUPER_MAGIC -+ || sb->s_magic == MINIX2_SUPER_MAGIC2 -+ || sb->s_magic == MINIX_SUPER_MAGIC -+ || sb->s_magic == MINIX_SUPER_MAGIC2; -+#else -+ return 0; -+#endif -+} -+ -+static inline int au_test_fat(struct super_block *sb __maybe_unused) -+{ -+#if defined(CONFIG_FAT_FS) || defined(CONFIG_FAT_FS_MODULE) -+ return sb->s_magic == MSDOS_SUPER_MAGIC; -+#else -+ return 0; -+#endif -+} -+ -+static inline int au_test_msdos(struct super_block *sb) -+{ -+ return au_test_fat(sb); -+} -+ -+static inline int au_test_vfat(struct super_block *sb) -+{ -+ return au_test_fat(sb); -+} -+ -+static inline int au_test_securityfs(struct super_block *sb __maybe_unused) -+{ -+#ifdef CONFIG_SECURITYFS -+ return sb->s_magic == SECURITYFS_MAGIC; -+#else -+ return 0; -+#endif -+} -+ -+static inline int au_test_squashfs(struct super_block *sb __maybe_unused) -+{ -+#if defined(CONFIG_SQUASHFS) || defined(CONFIG_SQUASHFS_MODULE) -+ return sb->s_magic == SQUASHFS_MAGIC; -+#else -+ return 0; -+#endif -+} -+ -+static inline int au_test_btrfs(struct super_block *sb __maybe_unused) -+{ -+#if defined(CONFIG_BTRFS_FS) || defined(CONFIG_BTRFS_FS_MODULE) -+ return sb->s_magic == BTRFS_SUPER_MAGIC; -+#else -+ return 0; -+#endif -+} -+ -+static inline int au_test_xenfs(struct super_block *sb __maybe_unused) -+{ -+#if defined(CONFIG_XENFS) || defined(CONFIG_XENFS_MODULE) -+ return sb->s_magic == XENFS_SUPER_MAGIC; -+#else -+ return 0; -+#endif -+} -+ -+static inline int au_test_debugfs(struct super_block *sb __maybe_unused) -+{ -+#ifdef CONFIG_DEBUG_FS -+ return sb->s_magic == DEBUGFS_MAGIC; -+#else -+ return 0; -+#endif -+} -+ -+static inline int au_test_nilfs(struct super_block *sb __maybe_unused) -+{ -+#if defined(CONFIG_NILFS) || defined(CONFIG_NILFS_MODULE) -+ return sb->s_magic == NILFS_SUPER_MAGIC; -+#else -+ return 0; -+#endif -+} -+ -+static inline int au_test_hfsplus(struct super_block *sb __maybe_unused) -+{ -+#if defined(CONFIG_HFSPLUS_FS) || defined(CONFIG_HFSPLUS_FS_MODULE) -+ return sb->s_magic == HFSPLUS_SUPER_MAGIC; -+#else -+ return 0; -+#endif -+} -+ -+/* ---------------------------------------------------------------------- */ -+/* -+ * they can't be an aufs branch. -+ */ -+static inline int au_test_fs_unsuppoted(struct super_block *sb) -+{ -+ return -+#ifndef CONFIG_AUFS_BR_RAMFS -+ au_test_ramfs(sb) || -+#endif -+ au_test_procfs(sb) -+ || au_test_sysfs(sb) -+ || au_test_configfs(sb) -+ || au_test_debugfs(sb) -+ || au_test_securityfs(sb) -+ || au_test_xenfs(sb) -+ || au_test_ecryptfs(sb) -+ /* || !strcmp(au_sbtype(sb), "unionfs") */ -+ || au_test_aufs(sb); /* will be supported in next version */ -+} -+ -+static inline int au_test_fs_remote(struct super_block *sb) -+{ -+ return !au_test_tmpfs(sb) -+#ifdef CONFIG_AUFS_BR_RAMFS -+ && !au_test_ramfs(sb) -+#endif -+ && !(sb->s_type->fs_flags & FS_REQUIRES_DEV); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * Note: these functions (below) are created after reading ->getattr() in all -+ * filesystems under linux/fs. it means we have to do so in every update... -+ */ -+ -+/* -+ * some filesystems require getattr to refresh the inode attributes before -+ * referencing. -+ * in most cases, we can rely on the inode attribute in NFS (or every remote fs) -+ * and leave the work for d_revalidate() -+ */ -+static inline int au_test_fs_refresh_iattr(struct super_block *sb) -+{ -+ return au_test_nfs(sb) -+ || au_test_fuse(sb) -+ /* || au_test_btrfs(sb) */ /* untested */ -+ ; -+} -+ -+/* -+ * filesystems which don't maintain i_size or i_blocks. -+ */ -+static inline int au_test_fs_bad_iattr_size(struct super_block *sb) -+{ -+ return au_test_xfs(sb) -+ || au_test_btrfs(sb) -+ || au_test_ubifs(sb) -+ || au_test_hfsplus(sb) /* maintained, but incorrect */ -+ /* || au_test_minix(sb) */ /* untested */ -+ ; -+} -+ -+/* -+ * filesystems which don't store the correct value in some of their inode -+ * attributes. -+ */ -+static inline int au_test_fs_bad_iattr(struct super_block *sb) -+{ -+ return au_test_fs_bad_iattr_size(sb) -+ || au_test_fat(sb) -+ || au_test_msdos(sb) -+ || au_test_vfat(sb); -+} -+ -+/* they don't check i_nlink in link(2) */ -+static inline int au_test_fs_no_limit_nlink(struct super_block *sb) -+{ -+ return au_test_tmpfs(sb) -+#ifdef CONFIG_AUFS_BR_RAMFS -+ || au_test_ramfs(sb) -+#endif -+ || au_test_ubifs(sb) -+ || au_test_hfsplus(sb); -+} -+ -+/* -+ * filesystems which sets S_NOATIME and S_NOCMTIME. -+ */ -+static inline int au_test_fs_notime(struct super_block *sb) -+{ -+ return au_test_nfs(sb) -+ || au_test_fuse(sb) -+ || au_test_ubifs(sb) -+ ; -+} -+ -+/* temporary support for i#1 in cramfs */ -+static inline int au_test_fs_unique_ino(struct inode *inode) -+{ -+ if (au_test_cramfs(inode->i_sb)) -+ return inode->i_ino != 1; -+ return 1; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * the filesystem where the xino files placed must support i/o after unlink and -+ * maintain i_size and i_blocks. -+ */ -+static inline int au_test_fs_bad_xino(struct super_block *sb) -+{ -+ return au_test_fs_remote(sb) -+ || au_test_fs_bad_iattr_size(sb) -+ /* don't want unnecessary work for xino */ -+ || au_test_aufs(sb) -+ || au_test_ecryptfs(sb) -+ || au_test_nilfs(sb); -+} -+ -+static inline int au_test_fs_trunc_xino(struct super_block *sb) -+{ -+ return au_test_tmpfs(sb) -+ || au_test_ramfs(sb); -+} -+ -+/* -+ * test if the @sb is real-readonly. -+ */ -+static inline int au_test_fs_rr(struct super_block *sb) -+{ -+ return au_test_squashfs(sb) -+ || au_test_iso9660(sb) -+ || au_test_cramfs(sb) -+ || au_test_romfs(sb); -+} -+ -+/* -+ * test if the @inode is nfs with 'noacl' option -+ * NFS always sets MS_POSIXACL regardless its mount option 'noacl.' -+ */ -+static inline int au_test_nfs_noacl(struct inode *inode) -+{ -+ return au_test_nfs(inode->i_sb) -+ /* && IS_POSIXACL(inode) */ -+ && !nfs_server_capable(inode, NFS_CAP_ACLS); -+} -+ -+#endif /* __KERNEL__ */ -+#endif /* __AUFS_FSTYPE_H__ */ -diff --git a/fs/aufs/hfsnotify.c b/fs/aufs/hfsnotify.c -new file mode 100644 -index 0000000..6fa79b0 ---- /dev/null -+++ b/fs/aufs/hfsnotify.c -@@ -0,0 +1,288 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * fsnotify for the lower directories -+ */ -+ -+#include "aufs.h" -+ -+/* FS_IN_IGNORED is unnecessary */ -+static const __u32 AuHfsnMask = (FS_MOVED_TO | FS_MOVED_FROM | FS_DELETE -+ | FS_CREATE | FS_EVENT_ON_CHILD); -+static DECLARE_WAIT_QUEUE_HEAD(au_hfsn_wq); -+static __cacheline_aligned_in_smp atomic64_t au_hfsn_ifree = ATOMIC64_INIT(0); -+ -+static void au_hfsn_free_mark(struct fsnotify_mark *mark) -+{ -+ struct au_hnotify *hn = container_of(mark, struct au_hnotify, -+ hn_mark); -+ AuDbg("here\n"); -+ au_cache_free_hnotify(hn); -+ smp_mb__before_atomic(); -+ if (atomic64_dec_and_test(&au_hfsn_ifree)) -+ wake_up(&au_hfsn_wq); -+} -+ -+static int au_hfsn_alloc(struct au_hinode *hinode) -+{ -+ int err; -+ struct au_hnotify *hn; -+ struct super_block *sb; -+ struct au_branch *br; -+ struct fsnotify_mark *mark; -+ aufs_bindex_t bindex; -+ -+ hn = hinode->hi_notify; -+ sb = hn->hn_aufs_inode->i_sb; -+ bindex = au_br_index(sb, hinode->hi_id); -+ br = au_sbr(sb, bindex); -+ AuDebugOn(!br->br_hfsn); -+ -+ mark = &hn->hn_mark; -+ fsnotify_init_mark(mark, au_hfsn_free_mark); -+ mark->mask = AuHfsnMask; -+ /* -+ * by udba rename or rmdir, aufs assign a new inode to the known -+ * h_inode, so specify 1 to allow dups. -+ */ -+ lockdep_off(); -+ err = fsnotify_add_mark(mark, br->br_hfsn->hfsn_group, hinode->hi_inode, -+ /*mnt*/NULL, /*allow_dups*/1); -+ /* even if err */ -+ fsnotify_put_mark(mark); -+ lockdep_on(); -+ -+ return err; -+} -+ -+static int au_hfsn_free(struct au_hinode *hinode, struct au_hnotify *hn) -+{ -+ struct fsnotify_mark *mark; -+ unsigned long long ull; -+ struct fsnotify_group *group; -+ -+ ull = atomic64_inc_return(&au_hfsn_ifree); -+ BUG_ON(!ull); -+ -+ mark = &hn->hn_mark; -+ spin_lock(&mark->lock); -+ group = mark->group; -+ fsnotify_get_group(group); -+ spin_unlock(&mark->lock); -+ lockdep_off(); -+ fsnotify_destroy_mark(mark, group); -+ fsnotify_put_group(group); -+ lockdep_on(); -+ -+ /* free hn by myself */ -+ return 0; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static void au_hfsn_ctl(struct au_hinode *hinode, int do_set) -+{ -+ struct fsnotify_mark *mark; -+ -+ mark = &hinode->hi_notify->hn_mark; -+ spin_lock(&mark->lock); -+ if (do_set) { -+ AuDebugOn(mark->mask & AuHfsnMask); -+ mark->mask |= AuHfsnMask; -+ } else { -+ AuDebugOn(!(mark->mask & AuHfsnMask)); -+ mark->mask &= ~AuHfsnMask; -+ } -+ spin_unlock(&mark->lock); -+ /* fsnotify_recalc_inode_mask(hinode->hi_inode); */ -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* #define AuDbgHnotify */ -+#ifdef AuDbgHnotify -+static char *au_hfsn_name(u32 mask) -+{ -+#ifdef CONFIG_AUFS_DEBUG -+#define test_ret(flag) \ -+ do { \ -+ if (mask & flag) \ -+ return #flag; \ -+ } while (0) -+ test_ret(FS_ACCESS); -+ test_ret(FS_MODIFY); -+ test_ret(FS_ATTRIB); -+ test_ret(FS_CLOSE_WRITE); -+ test_ret(FS_CLOSE_NOWRITE); -+ test_ret(FS_OPEN); -+ test_ret(FS_MOVED_FROM); -+ test_ret(FS_MOVED_TO); -+ test_ret(FS_CREATE); -+ test_ret(FS_DELETE); -+ test_ret(FS_DELETE_SELF); -+ test_ret(FS_MOVE_SELF); -+ test_ret(FS_UNMOUNT); -+ test_ret(FS_Q_OVERFLOW); -+ test_ret(FS_IN_IGNORED); -+ test_ret(FS_ISDIR); -+ test_ret(FS_IN_ONESHOT); -+ test_ret(FS_EVENT_ON_CHILD); -+ return ""; -+#undef test_ret -+#else -+ return "??"; -+#endif -+} -+#endif -+ -+/* ---------------------------------------------------------------------- */ -+ -+static void au_hfsn_free_group(struct fsnotify_group *group) -+{ -+ struct au_br_hfsnotify *hfsn = group->private; -+ -+ AuDbg("here\n"); -+ kfree(hfsn); -+} -+ -+static int au_hfsn_handle_event(struct fsnotify_group *group, -+ struct inode *inode, -+ struct fsnotify_mark *inode_mark, -+ struct fsnotify_mark *vfsmount_mark, -+ u32 mask, void *data, int data_type, -+ const unsigned char *file_name, u32 cookie) -+{ -+ int err; -+ struct au_hnotify *hnotify; -+ struct inode *h_dir, *h_inode; -+ struct qstr h_child_qstr = QSTR_INIT(file_name, strlen(file_name)); -+ -+ AuDebugOn(data_type != FSNOTIFY_EVENT_INODE); -+ -+ err = 0; -+ /* if FS_UNMOUNT happens, there must be another bug */ -+ AuDebugOn(mask & FS_UNMOUNT); -+ if (mask & (FS_IN_IGNORED | FS_UNMOUNT)) -+ goto out; -+ -+ h_dir = inode; -+ h_inode = NULL; -+#ifdef AuDbgHnotify -+ au_debug_on(); -+ if (1 || h_child_qstr.len != sizeof(AUFS_XINO_FNAME) - 1 -+ || strncmp(h_child_qstr.name, AUFS_XINO_FNAME, h_child_qstr.len)) { -+ AuDbg("i%lu, mask 0x%x %s, hcname %.*s, hi%lu\n", -+ h_dir->i_ino, mask, au_hfsn_name(mask), -+ AuLNPair(&h_child_qstr), h_inode ? h_inode->i_ino : 0); -+ /* WARN_ON(1); */ -+ } -+ au_debug_off(); -+#endif -+ -+ AuDebugOn(!inode_mark); -+ hnotify = container_of(inode_mark, struct au_hnotify, hn_mark); -+ err = au_hnotify(h_dir, hnotify, mask, &h_child_qstr, h_inode); -+ -+out: -+ return err; -+} -+ -+static struct fsnotify_ops au_hfsn_ops = { -+ .handle_event = au_hfsn_handle_event, -+ .free_group_priv = au_hfsn_free_group -+}; -+ -+/* ---------------------------------------------------------------------- */ -+ -+static void au_hfsn_fin_br(struct au_branch *br) -+{ -+ struct au_br_hfsnotify *hfsn; -+ -+ hfsn = br->br_hfsn; -+ if (hfsn) { -+ lockdep_off(); -+ fsnotify_put_group(hfsn->hfsn_group); -+ lockdep_on(); -+ } -+} -+ -+static int au_hfsn_init_br(struct au_branch *br, int perm) -+{ -+ int err; -+ struct fsnotify_group *group; -+ struct au_br_hfsnotify *hfsn; -+ -+ err = 0; -+ br->br_hfsn = NULL; -+ if (!au_br_hnotifyable(perm)) -+ goto out; -+ -+ err = -ENOMEM; -+ hfsn = kmalloc(sizeof(*hfsn), GFP_NOFS); -+ if (unlikely(!hfsn)) -+ goto out; -+ -+ err = 0; -+ group = fsnotify_alloc_group(&au_hfsn_ops); -+ if (IS_ERR(group)) { -+ err = PTR_ERR(group); -+ pr_err("fsnotify_alloc_group() failed, %d\n", err); -+ goto out_hfsn; -+ } -+ -+ group->private = hfsn; -+ hfsn->hfsn_group = group; -+ br->br_hfsn = hfsn; -+ goto out; /* success */ -+ -+out_hfsn: -+ kfree(hfsn); -+out: -+ return err; -+} -+ -+static int au_hfsn_reset_br(unsigned int udba, struct au_branch *br, int perm) -+{ -+ int err; -+ -+ err = 0; -+ if (!br->br_hfsn) -+ err = au_hfsn_init_br(br, perm); -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static void au_hfsn_fin(void) -+{ -+ AuDbg("au_hfsn_ifree %lld\n", (long long)atomic64_read(&au_hfsn_ifree)); -+ wait_event(au_hfsn_wq, !atomic64_read(&au_hfsn_ifree)); -+} -+ -+const struct au_hnotify_op au_hnotify_op = { -+ .ctl = au_hfsn_ctl, -+ .alloc = au_hfsn_alloc, -+ .free = au_hfsn_free, -+ -+ .fin = au_hfsn_fin, -+ -+ .reset_br = au_hfsn_reset_br, -+ .fin_br = au_hfsn_fin_br, -+ .init_br = au_hfsn_init_br -+}; -diff --git a/fs/aufs/hfsplus.c b/fs/aufs/hfsplus.c -new file mode 100644 -index 0000000..8a54c82 ---- /dev/null -+++ b/fs/aufs/hfsplus.c -@@ -0,0 +1,56 @@ -+/* -+ * Copyright (C) 2010-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * special support for filesystems which aqucires an inode mutex -+ * at final closing a file, eg, hfsplus. -+ * -+ * This trick is very simple and stupid, just to open the file before really -+ * neceeary open to tell hfsplus that this is not the final closing. -+ * The caller should call au_h_open_pre() after acquiring the inode mutex, -+ * and au_h_open_post() after releasing it. -+ */ -+ -+#include "aufs.h" -+ -+struct file *au_h_open_pre(struct dentry *dentry, aufs_bindex_t bindex, -+ int force_wr) -+{ -+ struct file *h_file; -+ struct dentry *h_dentry; -+ -+ h_dentry = au_h_dptr(dentry, bindex); -+ AuDebugOn(!h_dentry); -+ AuDebugOn(!h_dentry->d_inode); -+ -+ h_file = NULL; -+ if (au_test_hfsplus(h_dentry->d_sb) -+ && S_ISREG(h_dentry->d_inode->i_mode)) -+ h_file = au_h_open(dentry, bindex, -+ O_RDONLY | O_NOATIME | O_LARGEFILE, -+ /*file*/NULL, force_wr); -+ return h_file; -+} -+ -+void au_h_open_post(struct dentry *dentry, aufs_bindex_t bindex, -+ struct file *h_file) -+{ -+ if (h_file) { -+ fput(h_file); -+ au_sbr_put(dentry->d_sb, bindex); -+ } -+} -diff --git a/fs/aufs/hnotify.c b/fs/aufs/hnotify.c -new file mode 100644 -index 0000000..1801420 ---- /dev/null -+++ b/fs/aufs/hnotify.c -@@ -0,0 +1,714 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * abstraction to notify the direct changes on lower directories -+ */ -+ -+#include "aufs.h" -+ -+int au_hn_alloc(struct au_hinode *hinode, struct inode *inode) -+{ -+ int err; -+ struct au_hnotify *hn; -+ -+ err = -ENOMEM; -+ hn = au_cache_alloc_hnotify(); -+ if (hn) { -+ hn->hn_aufs_inode = inode; -+ hinode->hi_notify = hn; -+ err = au_hnotify_op.alloc(hinode); -+ AuTraceErr(err); -+ if (unlikely(err)) { -+ hinode->hi_notify = NULL; -+ au_cache_free_hnotify(hn); -+ /* -+ * The upper dir was removed by udba, but the same named -+ * dir left. In this case, aufs assignes a new inode -+ * number and set the monitor again. -+ * For the lower dir, the old monitnor is still left. -+ */ -+ if (err == -EEXIST) -+ err = 0; -+ } -+ } -+ -+ AuTraceErr(err); -+ return err; -+} -+ -+void au_hn_free(struct au_hinode *hinode) -+{ -+ struct au_hnotify *hn; -+ -+ hn = hinode->hi_notify; -+ if (hn) { -+ hinode->hi_notify = NULL; -+ if (au_hnotify_op.free(hinode, hn)) -+ au_cache_free_hnotify(hn); -+ } -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+void au_hn_ctl(struct au_hinode *hinode, int do_set) -+{ -+ if (hinode->hi_notify) -+ au_hnotify_op.ctl(hinode, do_set); -+} -+ -+void au_hn_reset(struct inode *inode, unsigned int flags) -+{ -+ aufs_bindex_t bindex, bend; -+ struct inode *hi; -+ struct dentry *iwhdentry; -+ -+ bend = au_ibend(inode); -+ for (bindex = au_ibstart(inode); bindex <= bend; bindex++) { -+ hi = au_h_iptr(inode, bindex); -+ if (!hi) -+ continue; -+ -+ /* mutex_lock_nested(&hi->i_mutex, AuLsc_I_CHILD); */ -+ iwhdentry = au_hi_wh(inode, bindex); -+ if (iwhdentry) -+ dget(iwhdentry); -+ au_igrab(hi); -+ au_set_h_iptr(inode, bindex, NULL, 0); -+ au_set_h_iptr(inode, bindex, au_igrab(hi), -+ flags & ~AuHi_XINO); -+ iput(hi); -+ dput(iwhdentry); -+ /* mutex_unlock(&hi->i_mutex); */ -+ } -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int hn_xino(struct inode *inode, struct inode *h_inode) -+{ -+ int err; -+ aufs_bindex_t bindex, bend, bfound, bstart; -+ struct inode *h_i; -+ -+ err = 0; -+ if (unlikely(inode->i_ino == AUFS_ROOT_INO)) { -+ pr_warn("branch root dir was changed\n"); -+ goto out; -+ } -+ -+ bfound = -1; -+ bend = au_ibend(inode); -+ bstart = au_ibstart(inode); -+#if 0 /* reserved for future use */ -+ if (bindex == bend) { -+ /* keep this ino in rename case */ -+ goto out; -+ } -+#endif -+ for (bindex = bstart; bindex <= bend; bindex++) -+ if (au_h_iptr(inode, bindex) == h_inode) { -+ bfound = bindex; -+ break; -+ } -+ if (bfound < 0) -+ goto out; -+ -+ for (bindex = bstart; bindex <= bend; bindex++) { -+ h_i = au_h_iptr(inode, bindex); -+ if (!h_i) -+ continue; -+ -+ err = au_xino_write(inode->i_sb, bindex, h_i->i_ino, /*ino*/0); -+ /* ignore this error */ -+ /* bad action? */ -+ } -+ -+ /* children inode number will be broken */ -+ -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+static int hn_gen_tree(struct dentry *dentry) -+{ -+ int err, i, j, ndentry; -+ struct au_dcsub_pages dpages; -+ struct au_dpage *dpage; -+ struct dentry **dentries; -+ -+ err = au_dpages_init(&dpages, GFP_NOFS); -+ if (unlikely(err)) -+ goto out; -+ err = au_dcsub_pages(&dpages, dentry, NULL, NULL); -+ if (unlikely(err)) -+ goto out_dpages; -+ -+ for (i = 0; i < dpages.ndpage; i++) { -+ dpage = dpages.dpages + i; -+ dentries = dpage->dentries; -+ ndentry = dpage->ndentry; -+ for (j = 0; j < ndentry; j++) { -+ struct dentry *d; -+ -+ d = dentries[j]; -+ if (IS_ROOT(d)) -+ continue; -+ -+ au_digen_dec(d); -+ if (d->d_inode) -+ /* todo: reset children xino? -+ cached children only? */ -+ au_iigen_dec(d->d_inode); -+ } -+ } -+ -+out_dpages: -+ au_dpages_free(&dpages); -+ -+#if 0 -+ /* discard children */ -+ dentry_unhash(dentry); -+ dput(dentry); -+#endif -+out: -+ return err; -+} -+ -+/* -+ * return 0 if processed. -+ */ -+static int hn_gen_by_inode(char *name, unsigned int nlen, struct inode *inode, -+ const unsigned int isdir) -+{ -+ int err; -+ struct dentry *d; -+ struct qstr *dname; -+ -+ err = 1; -+ if (unlikely(inode->i_ino == AUFS_ROOT_INO)) { -+ pr_warn("branch root dir was changed\n"); -+ err = 0; -+ goto out; -+ } -+ -+ if (!isdir) { -+ AuDebugOn(!name); -+ au_iigen_dec(inode); -+ spin_lock(&inode->i_lock); -+ hlist_for_each_entry(d, &inode->i_dentry, d_u.d_alias) { -+ spin_lock(&d->d_lock); -+ dname = &d->d_name; -+ if (dname->len != nlen -+ && memcmp(dname->name, name, nlen)) { -+ spin_unlock(&d->d_lock); -+ continue; -+ } -+ err = 0; -+ au_digen_dec(d); -+ spin_unlock(&d->d_lock); -+ break; -+ } -+ spin_unlock(&inode->i_lock); -+ } else { -+ au_fset_si(au_sbi(inode->i_sb), FAILED_REFRESH_DIR); -+ d = d_find_any_alias(inode); -+ if (!d) { -+ au_iigen_dec(inode); -+ goto out; -+ } -+ -+ spin_lock(&d->d_lock); -+ dname = &d->d_name; -+ if (dname->len == nlen && !memcmp(dname->name, name, nlen)) { -+ spin_unlock(&d->d_lock); -+ err = hn_gen_tree(d); -+ spin_lock(&d->d_lock); -+ } -+ spin_unlock(&d->d_lock); -+ dput(d); -+ } -+ -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+static int hn_gen_by_name(struct dentry *dentry, const unsigned int isdir) -+{ -+ int err; -+ struct inode *inode; -+ -+ inode = dentry->d_inode; -+ if (IS_ROOT(dentry) -+ /* || (inode && inode->i_ino == AUFS_ROOT_INO) */ -+ ) { -+ pr_warn("branch root dir was changed\n"); -+ return 0; -+ } -+ -+ err = 0; -+ if (!isdir) { -+ au_digen_dec(dentry); -+ if (inode) -+ au_iigen_dec(inode); -+ } else { -+ au_fset_si(au_sbi(dentry->d_sb), FAILED_REFRESH_DIR); -+ if (inode) -+ err = hn_gen_tree(dentry); -+ } -+ -+ AuTraceErr(err); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* hnotify job flags */ -+#define AuHnJob_XINO0 1 -+#define AuHnJob_GEN (1 << 1) -+#define AuHnJob_DIRENT (1 << 2) -+#define AuHnJob_ISDIR (1 << 3) -+#define AuHnJob_TRYXINO0 (1 << 4) -+#define AuHnJob_MNTPNT (1 << 5) -+#define au_ftest_hnjob(flags, name) ((flags) & AuHnJob_##name) -+#define au_fset_hnjob(flags, name) \ -+ do { (flags) |= AuHnJob_##name; } while (0) -+#define au_fclr_hnjob(flags, name) \ -+ do { (flags) &= ~AuHnJob_##name; } while (0) -+ -+enum { -+ AuHn_CHILD, -+ AuHn_PARENT, -+ AuHnLast -+}; -+ -+struct au_hnotify_args { -+ struct inode *h_dir, *dir, *h_child_inode; -+ u32 mask; -+ unsigned int flags[AuHnLast]; -+ unsigned int h_child_nlen; -+ char h_child_name[]; -+}; -+ -+struct hn_job_args { -+ unsigned int flags; -+ struct inode *inode, *h_inode, *dir, *h_dir; -+ struct dentry *dentry; -+ char *h_name; -+ int h_nlen; -+}; -+ -+static int hn_job(struct hn_job_args *a) -+{ -+ const unsigned int isdir = au_ftest_hnjob(a->flags, ISDIR); -+ int e; -+ -+ /* reset xino */ -+ if (au_ftest_hnjob(a->flags, XINO0) && a->inode) -+ hn_xino(a->inode, a->h_inode); /* ignore this error */ -+ -+ if (au_ftest_hnjob(a->flags, TRYXINO0) -+ && a->inode -+ && a->h_inode) { -+ mutex_lock_nested(&a->h_inode->i_mutex, AuLsc_I_CHILD); -+ if (!a->h_inode->i_nlink -+ && !(a->h_inode->i_state & I_LINKABLE)) -+ hn_xino(a->inode, a->h_inode); /* ignore this error */ -+ mutex_unlock(&a->h_inode->i_mutex); -+ } -+ -+ /* make the generation obsolete */ -+ if (au_ftest_hnjob(a->flags, GEN)) { -+ e = -1; -+ if (a->inode) -+ e = hn_gen_by_inode(a->h_name, a->h_nlen, a->inode, -+ isdir); -+ if (e && a->dentry) -+ hn_gen_by_name(a->dentry, isdir); -+ /* ignore this error */ -+ } -+ -+ /* make dir entries obsolete */ -+ if (au_ftest_hnjob(a->flags, DIRENT) && a->inode) { -+ struct au_vdir *vdir; -+ -+ vdir = au_ivdir(a->inode); -+ if (vdir) -+ vdir->vd_jiffy = 0; -+ /* IMustLock(a->inode); */ -+ /* a->inode->i_version++; */ -+ } -+ -+ /* can do nothing but warn */ -+ if (au_ftest_hnjob(a->flags, MNTPNT) -+ && a->dentry -+ && d_mountpoint(a->dentry)) -+ pr_warn("mount-point %pd is removed or renamed\n", a->dentry); -+ -+ return 0; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static struct dentry *lookup_wlock_by_name(char *name, unsigned int nlen, -+ struct inode *dir) -+{ -+ struct dentry *dentry, *d, *parent; -+ struct qstr *dname; -+ -+ parent = d_find_any_alias(dir); -+ if (!parent) -+ return NULL; -+ -+ dentry = NULL; -+ spin_lock(&parent->d_lock); -+ list_for_each_entry(d, &parent->d_subdirs, d_child) { -+ /* AuDbg("%pd\n", d); */ -+ spin_lock_nested(&d->d_lock, DENTRY_D_LOCK_NESTED); -+ dname = &d->d_name; -+ if (dname->len != nlen || memcmp(dname->name, name, nlen)) -+ goto cont_unlock; -+ if (au_di(d)) -+ au_digen_dec(d); -+ else -+ goto cont_unlock; -+ if (au_dcount(d) > 0) { -+ dentry = dget_dlock(d); -+ spin_unlock(&d->d_lock); -+ break; -+ } -+ -+cont_unlock: -+ spin_unlock(&d->d_lock); -+ } -+ spin_unlock(&parent->d_lock); -+ dput(parent); -+ -+ if (dentry) -+ di_write_lock_child(dentry); -+ -+ return dentry; -+} -+ -+static struct inode *lookup_wlock_by_ino(struct super_block *sb, -+ aufs_bindex_t bindex, ino_t h_ino) -+{ -+ struct inode *inode; -+ ino_t ino; -+ int err; -+ -+ inode = NULL; -+ err = au_xino_read(sb, bindex, h_ino, &ino); -+ if (!err && ino) -+ inode = ilookup(sb, ino); -+ if (!inode) -+ goto out; -+ -+ if (unlikely(inode->i_ino == AUFS_ROOT_INO)) { -+ pr_warn("wrong root branch\n"); -+ iput(inode); -+ inode = NULL; -+ goto out; -+ } -+ -+ ii_write_lock_child(inode); -+ -+out: -+ return inode; -+} -+ -+static void au_hn_bh(void *_args) -+{ -+ struct au_hnotify_args *a = _args; -+ struct super_block *sb; -+ aufs_bindex_t bindex, bend, bfound; -+ unsigned char xino, try_iput; -+ int err; -+ struct inode *inode; -+ ino_t h_ino; -+ struct hn_job_args args; -+ struct dentry *dentry; -+ struct au_sbinfo *sbinfo; -+ -+ AuDebugOn(!_args); -+ AuDebugOn(!a->h_dir); -+ AuDebugOn(!a->dir); -+ AuDebugOn(!a->mask); -+ AuDbg("mask 0x%x, i%lu, hi%lu, hci%lu\n", -+ a->mask, a->dir->i_ino, a->h_dir->i_ino, -+ a->h_child_inode ? a->h_child_inode->i_ino : 0); -+ -+ inode = NULL; -+ dentry = NULL; -+ /* -+ * do not lock a->dir->i_mutex here -+ * because of d_revalidate() may cause a deadlock. -+ */ -+ sb = a->dir->i_sb; -+ AuDebugOn(!sb); -+ sbinfo = au_sbi(sb); -+ AuDebugOn(!sbinfo); -+ si_write_lock(sb, AuLock_NOPLMW); -+ -+ ii_read_lock_parent(a->dir); -+ bfound = -1; -+ bend = au_ibend(a->dir); -+ for (bindex = au_ibstart(a->dir); bindex <= bend; bindex++) -+ if (au_h_iptr(a->dir, bindex) == a->h_dir) { -+ bfound = bindex; -+ break; -+ } -+ ii_read_unlock(a->dir); -+ if (unlikely(bfound < 0)) -+ goto out; -+ -+ xino = !!au_opt_test(au_mntflags(sb), XINO); -+ h_ino = 0; -+ if (a->h_child_inode) -+ h_ino = a->h_child_inode->i_ino; -+ -+ if (a->h_child_nlen -+ && (au_ftest_hnjob(a->flags[AuHn_CHILD], GEN) -+ || au_ftest_hnjob(a->flags[AuHn_CHILD], MNTPNT))) -+ dentry = lookup_wlock_by_name(a->h_child_name, a->h_child_nlen, -+ a->dir); -+ try_iput = 0; -+ if (dentry) -+ inode = dentry->d_inode; -+ if (xino && !inode && h_ino -+ && (au_ftest_hnjob(a->flags[AuHn_CHILD], XINO0) -+ || au_ftest_hnjob(a->flags[AuHn_CHILD], TRYXINO0) -+ || au_ftest_hnjob(a->flags[AuHn_CHILD], GEN))) { -+ inode = lookup_wlock_by_ino(sb, bfound, h_ino); -+ try_iput = 1; -+ } -+ -+ args.flags = a->flags[AuHn_CHILD]; -+ args.dentry = dentry; -+ args.inode = inode; -+ args.h_inode = a->h_child_inode; -+ args.dir = a->dir; -+ args.h_dir = a->h_dir; -+ args.h_name = a->h_child_name; -+ args.h_nlen = a->h_child_nlen; -+ err = hn_job(&args); -+ if (dentry) { -+ if (au_di(dentry)) -+ di_write_unlock(dentry); -+ dput(dentry); -+ } -+ if (inode && try_iput) { -+ ii_write_unlock(inode); -+ iput(inode); -+ } -+ -+ ii_write_lock_parent(a->dir); -+ args.flags = a->flags[AuHn_PARENT]; -+ args.dentry = NULL; -+ args.inode = a->dir; -+ args.h_inode = a->h_dir; -+ args.dir = NULL; -+ args.h_dir = NULL; -+ args.h_name = NULL; -+ args.h_nlen = 0; -+ err = hn_job(&args); -+ ii_write_unlock(a->dir); -+ -+out: -+ iput(a->h_child_inode); -+ iput(a->h_dir); -+ iput(a->dir); -+ si_write_unlock(sb); -+ au_nwt_done(&sbinfo->si_nowait); -+ kfree(a); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+int au_hnotify(struct inode *h_dir, struct au_hnotify *hnotify, u32 mask, -+ struct qstr *h_child_qstr, struct inode *h_child_inode) -+{ -+ int err, len; -+ unsigned int flags[AuHnLast], f; -+ unsigned char isdir, isroot, wh; -+ struct inode *dir; -+ struct au_hnotify_args *args; -+ char *p, *h_child_name; -+ -+ err = 0; -+ AuDebugOn(!hnotify || !hnotify->hn_aufs_inode); -+ dir = igrab(hnotify->hn_aufs_inode); -+ if (!dir) -+ goto out; -+ -+ isroot = (dir->i_ino == AUFS_ROOT_INO); -+ wh = 0; -+ h_child_name = (void *)h_child_qstr->name; -+ len = h_child_qstr->len; -+ if (h_child_name) { -+ if (len > AUFS_WH_PFX_LEN -+ && !memcmp(h_child_name, AUFS_WH_PFX, AUFS_WH_PFX_LEN)) { -+ h_child_name += AUFS_WH_PFX_LEN; -+ len -= AUFS_WH_PFX_LEN; -+ wh = 1; -+ } -+ } -+ -+ isdir = 0; -+ if (h_child_inode) -+ isdir = !!S_ISDIR(h_child_inode->i_mode); -+ flags[AuHn_PARENT] = AuHnJob_ISDIR; -+ flags[AuHn_CHILD] = 0; -+ if (isdir) -+ flags[AuHn_CHILD] = AuHnJob_ISDIR; -+ au_fset_hnjob(flags[AuHn_PARENT], DIRENT); -+ au_fset_hnjob(flags[AuHn_CHILD], GEN); -+ switch (mask & FS_EVENTS_POSS_ON_CHILD) { -+ case FS_MOVED_FROM: -+ case FS_MOVED_TO: -+ au_fset_hnjob(flags[AuHn_CHILD], XINO0); -+ au_fset_hnjob(flags[AuHn_CHILD], MNTPNT); -+ /*FALLTHROUGH*/ -+ case FS_CREATE: -+ AuDebugOn(!h_child_name); -+ break; -+ -+ case FS_DELETE: -+ /* -+ * aufs never be able to get this child inode. -+ * revalidation should be in d_revalidate() -+ * by checking i_nlink, i_generation or d_unhashed(). -+ */ -+ AuDebugOn(!h_child_name); -+ au_fset_hnjob(flags[AuHn_CHILD], TRYXINO0); -+ au_fset_hnjob(flags[AuHn_CHILD], MNTPNT); -+ break; -+ -+ default: -+ AuDebugOn(1); -+ } -+ -+ if (wh) -+ h_child_inode = NULL; -+ -+ err = -ENOMEM; -+ /* iput() and kfree() will be called in au_hnotify() */ -+ args = kmalloc(sizeof(*args) + len + 1, GFP_NOFS); -+ if (unlikely(!args)) { -+ AuErr1("no memory\n"); -+ iput(dir); -+ goto out; -+ } -+ args->flags[AuHn_PARENT] = flags[AuHn_PARENT]; -+ args->flags[AuHn_CHILD] = flags[AuHn_CHILD]; -+ args->mask = mask; -+ args->dir = dir; -+ args->h_dir = igrab(h_dir); -+ if (h_child_inode) -+ h_child_inode = igrab(h_child_inode); /* can be NULL */ -+ args->h_child_inode = h_child_inode; -+ args->h_child_nlen = len; -+ if (len) { -+ p = (void *)args; -+ p += sizeof(*args); -+ memcpy(p, h_child_name, len); -+ p[len] = 0; -+ } -+ -+ /* NFS fires the event for silly-renamed one from kworker */ -+ f = 0; -+ if (!dir->i_nlink -+ || (au_test_nfs(h_dir->i_sb) && (mask & FS_DELETE))) -+ f = AuWkq_NEST; -+ err = au_wkq_nowait(au_hn_bh, args, dir->i_sb, f); -+ if (unlikely(err)) { -+ pr_err("wkq %d\n", err); -+ iput(args->h_child_inode); -+ iput(args->h_dir); -+ iput(args->dir); -+ kfree(args); -+ } -+ -+out: -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+int au_hnotify_reset_br(unsigned int udba, struct au_branch *br, int perm) -+{ -+ int err; -+ -+ AuDebugOn(!(udba & AuOptMask_UDBA)); -+ -+ err = 0; -+ if (au_hnotify_op.reset_br) -+ err = au_hnotify_op.reset_br(udba, br, perm); -+ -+ return err; -+} -+ -+int au_hnotify_init_br(struct au_branch *br, int perm) -+{ -+ int err; -+ -+ err = 0; -+ if (au_hnotify_op.init_br) -+ err = au_hnotify_op.init_br(br, perm); -+ -+ return err; -+} -+ -+void au_hnotify_fin_br(struct au_branch *br) -+{ -+ if (au_hnotify_op.fin_br) -+ au_hnotify_op.fin_br(br); -+} -+ -+static void au_hn_destroy_cache(void) -+{ -+ kmem_cache_destroy(au_cachep[AuCache_HNOTIFY]); -+ au_cachep[AuCache_HNOTIFY] = NULL; -+} -+ -+int __init au_hnotify_init(void) -+{ -+ int err; -+ -+ err = -ENOMEM; -+ au_cachep[AuCache_HNOTIFY] = AuCache(au_hnotify); -+ if (au_cachep[AuCache_HNOTIFY]) { -+ err = 0; -+ if (au_hnotify_op.init) -+ err = au_hnotify_op.init(); -+ if (unlikely(err)) -+ au_hn_destroy_cache(); -+ } -+ AuTraceErr(err); -+ return err; -+} -+ -+void au_hnotify_fin(void) -+{ -+ if (au_hnotify_op.fin) -+ au_hnotify_op.fin(); -+ /* cf. au_cache_fin() */ -+ if (au_cachep[AuCache_HNOTIFY]) -+ au_hn_destroy_cache(); -+} -diff --git a/fs/aufs/i_op.c b/fs/aufs/i_op.c -new file mode 100644 -index 0000000..02dc95a ---- /dev/null -+++ b/fs/aufs/i_op.c -@@ -0,0 +1,1460 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * inode operations (except add/del/rename) -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include "aufs.h" -+ -+static int h_permission(struct inode *h_inode, int mask, -+ struct vfsmount *h_mnt, int brperm) -+{ -+ int err; -+ const unsigned char write_mask = !!(mask & (MAY_WRITE | MAY_APPEND)); -+ -+ err = -EACCES; -+ if ((write_mask && IS_IMMUTABLE(h_inode)) -+ || ((mask & MAY_EXEC) -+ && S_ISREG(h_inode->i_mode) -+ && ((h_mnt->mnt_flags & MNT_NOEXEC) -+ || !(h_inode->i_mode & S_IXUGO)))) -+ goto out; -+ -+ /* -+ * - skip the lower fs test in the case of write to ro branch. -+ * - nfs dir permission write check is optimized, but a policy for -+ * link/rename requires a real check. -+ * - nfs always sets MS_POSIXACL regardless its mount option 'noacl.' -+ * in this case, generic_permission() returns -EOPNOTSUPP. -+ */ -+ if ((write_mask && !au_br_writable(brperm)) -+ || (au_test_nfs(h_inode->i_sb) && S_ISDIR(h_inode->i_mode) -+ && write_mask && !(mask & MAY_READ)) -+ || !h_inode->i_op->permission) { -+ /* AuLabel(generic_permission); */ -+ /* AuDbg("get_acl %pf\n", h_inode->i_op->get_acl); */ -+ err = generic_permission(h_inode, mask); -+ if (err == -EOPNOTSUPP && au_test_nfs_noacl(h_inode)) -+ err = h_inode->i_op->permission(h_inode, mask); -+ AuTraceErr(err); -+ } else { -+ /* AuLabel(h_inode->permission); */ -+ err = h_inode->i_op->permission(h_inode, mask); -+ AuTraceErr(err); -+ } -+ -+ if (!err) -+ err = devcgroup_inode_permission(h_inode, mask); -+ if (!err) -+ err = security_inode_permission(h_inode, mask); -+ -+#if 0 -+ if (!err) { -+ /* todo: do we need to call ima_path_check()? */ -+ struct path h_path = { -+ .dentry = -+ .mnt = h_mnt -+ }; -+ err = ima_path_check(&h_path, -+ mask & (MAY_READ | MAY_WRITE | MAY_EXEC), -+ IMA_COUNT_LEAVE); -+ } -+#endif -+ -+out: -+ return err; -+} -+ -+static int aufs_permission(struct inode *inode, int mask) -+{ -+ int err; -+ aufs_bindex_t bindex, bend; -+ const unsigned char isdir = !!S_ISDIR(inode->i_mode), -+ write_mask = !!(mask & (MAY_WRITE | MAY_APPEND)); -+ struct inode *h_inode; -+ struct super_block *sb; -+ struct au_branch *br; -+ -+ /* todo: support rcu-walk? */ -+ if (mask & MAY_NOT_BLOCK) -+ return -ECHILD; -+ -+ sb = inode->i_sb; -+ si_read_lock(sb, AuLock_FLUSH); -+ ii_read_lock_child(inode); -+#if 0 -+ err = au_iigen_test(inode, au_sigen(sb)); -+ if (unlikely(err)) -+ goto out; -+#endif -+ -+ if (!isdir -+ || write_mask -+ || au_opt_test(au_mntflags(sb), DIRPERM1)) { -+ err = au_busy_or_stale(); -+ h_inode = au_h_iptr(inode, au_ibstart(inode)); -+ if (unlikely(!h_inode -+ || (h_inode->i_mode & S_IFMT) -+ != (inode->i_mode & S_IFMT))) -+ goto out; -+ -+ err = 0; -+ bindex = au_ibstart(inode); -+ br = au_sbr(sb, bindex); -+ err = h_permission(h_inode, mask, au_br_mnt(br), br->br_perm); -+ if (write_mask -+ && !err -+ && !special_file(h_inode->i_mode)) { -+ /* test whether the upper writable branch exists */ -+ err = -EROFS; -+ for (; bindex >= 0; bindex--) -+ if (!au_br_rdonly(au_sbr(sb, bindex))) { -+ err = 0; -+ break; -+ } -+ } -+ goto out; -+ } -+ -+ /* non-write to dir */ -+ err = 0; -+ bend = au_ibend(inode); -+ for (bindex = au_ibstart(inode); !err && bindex <= bend; bindex++) { -+ h_inode = au_h_iptr(inode, bindex); -+ if (h_inode) { -+ err = au_busy_or_stale(); -+ if (unlikely(!S_ISDIR(h_inode->i_mode))) -+ break; -+ -+ br = au_sbr(sb, bindex); -+ err = h_permission(h_inode, mask, au_br_mnt(br), -+ br->br_perm); -+ } -+ } -+ -+out: -+ ii_read_unlock(inode); -+ si_read_unlock(sb); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static struct dentry *aufs_lookup(struct inode *dir, struct dentry *dentry, -+ unsigned int flags) -+{ -+ struct dentry *ret, *parent; -+ struct inode *inode; -+ struct super_block *sb; -+ int err, npositive; -+ -+ IMustLock(dir); -+ -+ /* todo: support rcu-walk? */ -+ ret = ERR_PTR(-ECHILD); -+ if (flags & LOOKUP_RCU) -+ goto out; -+ -+ ret = ERR_PTR(-ENAMETOOLONG); -+ if (unlikely(dentry->d_name.len > AUFS_MAX_NAMELEN)) -+ goto out; -+ -+ sb = dir->i_sb; -+ err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM); -+ ret = ERR_PTR(err); -+ if (unlikely(err)) -+ goto out; -+ -+ err = au_di_init(dentry); -+ ret = ERR_PTR(err); -+ if (unlikely(err)) -+ goto out_si; -+ -+ inode = NULL; -+ npositive = 0; /* suppress a warning */ -+ parent = dentry->d_parent; /* dir inode is locked */ -+ di_read_lock_parent(parent, AuLock_IR); -+ err = au_alive_dir(parent); -+ if (!err) -+ err = au_digen_test(parent, au_sigen(sb)); -+ if (!err) { -+ npositive = au_lkup_dentry(dentry, au_dbstart(parent), -+ /*type*/0); -+ err = npositive; -+ } -+ di_read_unlock(parent, AuLock_IR); -+ ret = ERR_PTR(err); -+ if (unlikely(err < 0)) -+ goto out_unlock; -+ -+ if (npositive) { -+ inode = au_new_inode(dentry, /*must_new*/0); -+ if (IS_ERR(inode)) { -+ ret = (void *)inode; -+ inode = NULL; -+ goto out_unlock; -+ } -+ } -+ -+ if (inode) -+ atomic_inc(&inode->i_count); -+ ret = d_splice_alias(inode, dentry); -+ if (IS_ERR(ret) -+ && PTR_ERR(ret) == -EIO -+ && inode -+ && S_ISDIR(inode->i_mode)) { -+ atomic_inc(&inode->i_count); -+ ret = d_materialise_unique(dentry, inode); -+ if (!IS_ERR(ret)) -+ ii_write_unlock(inode); -+ } -+#if 0 -+ if (unlikely(d_need_lookup(dentry))) { -+ spin_lock(&dentry->d_lock); -+ dentry->d_flags &= ~DCACHE_NEED_LOOKUP; -+ spin_unlock(&dentry->d_lock); -+ } else -+#endif -+ if (inode) { -+ if (!IS_ERR(ret)) -+ iput(inode); -+ else { -+ ii_write_unlock(inode); -+ iput(inode); -+ inode = NULL; -+ } -+ } -+ -+out_unlock: -+ di_write_unlock(dentry); -+ if (inode) { -+ /* verbose coding for lock class name */ -+ if (unlikely(S_ISLNK(inode->i_mode))) -+ au_rw_class(&au_di(dentry)->di_rwsem, -+ au_lc_key + AuLcSymlink_DIINFO); -+ else if (unlikely(S_ISDIR(inode->i_mode))) -+ au_rw_class(&au_di(dentry)->di_rwsem, -+ au_lc_key + AuLcDir_DIINFO); -+ else /* likely */ -+ au_rw_class(&au_di(dentry)->di_rwsem, -+ au_lc_key + AuLcNonDir_DIINFO); -+ } -+out_si: -+ si_read_unlock(sb); -+out: -+ return ret; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+struct aopen_node { -+ struct hlist_node hlist; -+ struct file *file, *h_file; -+}; -+ -+static int au_do_aopen(struct inode *inode, struct file *file) -+{ -+ struct au_sphlhead *aopen; -+ struct aopen_node *node; -+ struct au_do_open_args args = { -+ .no_lock = 1, -+ .open = au_do_open_nondir -+ }; -+ -+ aopen = &au_sbi(inode->i_sb)->si_aopen; -+ spin_lock(&aopen->spin); -+ hlist_for_each_entry(node, &aopen->head, hlist) -+ if (node->file == file) { -+ args.h_file = node->h_file; -+ break; -+ } -+ spin_unlock(&aopen->spin); -+ /* AuDebugOn(!args.h_file); */ -+ -+ return au_do_open(file, &args); -+} -+ -+static int aufs_atomic_open(struct inode *dir, struct dentry *dentry, -+ struct file *file, unsigned int open_flag, -+ umode_t create_mode, int *opened) -+{ -+ int err, h_opened = *opened; -+ struct dentry *parent; -+ struct dentry *d; -+ struct au_sphlhead *aopen; -+ struct vfsub_aopen_args args = { -+ .open_flag = open_flag, -+ .create_mode = create_mode, -+ .opened = &h_opened -+ }; -+ struct aopen_node aopen_node = { -+ .file = file -+ }; -+ -+ IMustLock(dir); -+ AuDbg("open_flag 0x%x\n", open_flag); -+ AuDbgDentry(dentry); -+ -+ err = 0; -+ if (!au_di(dentry)) { -+ d = aufs_lookup(dir, dentry, /*flags*/0); -+ if (IS_ERR(d)) { -+ err = PTR_ERR(d); -+ goto out; -+ } else if (d) { -+ /* -+ * obsoleted dentry found. -+ * another error will be returned later. -+ */ -+ d_drop(d); -+ dput(d); -+ AuDbgDentry(d); -+ } -+ AuDbgDentry(dentry); -+ } -+ -+ if (d_is_positive(dentry) -+ || d_unhashed(dentry) -+ || d_unlinked(dentry) -+ || !(open_flag & O_CREAT)) -+ goto out_no_open; -+ -+ err = aufs_read_lock(dentry, AuLock_DW | AuLock_FLUSH | AuLock_GEN); -+ if (unlikely(err)) -+ goto out; -+ -+ parent = dentry->d_parent; /* dir is locked */ -+ di_write_lock_parent(parent); -+ err = au_lkup_dentry(dentry, /*bstart*/0, /*type*/0); -+ if (unlikely(err)) -+ goto out_unlock; -+ -+ AuDbgDentry(dentry); -+ if (d_is_positive(dentry)) -+ goto out_unlock; -+ -+ args.file = get_empty_filp(); -+ err = PTR_ERR(args.file); -+ if (IS_ERR(args.file)) -+ goto out_unlock; -+ -+ args.file->f_flags = file->f_flags; -+ err = au_aopen_or_create(dir, dentry, &args); -+ AuTraceErr(err); -+ AuDbgFile(args.file); -+ if (unlikely(err < 0)) { -+ if (h_opened & FILE_OPENED) -+ fput(args.file); -+ else -+ put_filp(args.file); -+ goto out_unlock; -+ } -+ -+ /* some filesystems don't set FILE_CREATED while succeeded? */ -+ *opened |= FILE_CREATED; -+ if (h_opened & FILE_OPENED) -+ aopen_node.h_file = args.file; -+ else { -+ put_filp(args.file); -+ args.file = NULL; -+ } -+ aopen = &au_sbi(dir->i_sb)->si_aopen; -+ au_sphl_add(&aopen_node.hlist, aopen); -+ err = finish_open(file, dentry, au_do_aopen, opened); -+ au_sphl_del(&aopen_node.hlist, aopen); -+ AuTraceErr(err); -+ AuDbgFile(file); -+ if (aopen_node.h_file) -+ fput(aopen_node.h_file); -+ -+out_unlock: -+ di_write_unlock(parent); -+ aufs_read_unlock(dentry, AuLock_DW); -+ AuDbgDentry(dentry); -+ if (unlikely(err)) -+ goto out; -+out_no_open: -+ if (!err && !(*opened & FILE_CREATED)) { -+ AuLabel(out_no_open); -+ dget(dentry); -+ err = finish_no_open(file, dentry); -+ } -+out: -+ AuDbg("%pd%s%s\n", dentry, -+ (*opened & FILE_CREATED) ? " created" : "", -+ (*opened & FILE_OPENED) ? " opened" : ""); -+ AuTraceErr(err); -+ return err; -+} -+ -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int au_wr_dir_cpup(struct dentry *dentry, struct dentry *parent, -+ const unsigned char add_entry, aufs_bindex_t bcpup, -+ aufs_bindex_t bstart) -+{ -+ int err; -+ struct dentry *h_parent; -+ struct inode *h_dir; -+ -+ if (add_entry) -+ IMustLock(parent->d_inode); -+ else -+ di_write_lock_parent(parent); -+ -+ err = 0; -+ if (!au_h_dptr(parent, bcpup)) { -+ if (bstart > bcpup) -+ err = au_cpup_dirs(dentry, bcpup); -+ else if (bstart < bcpup) -+ err = au_cpdown_dirs(dentry, bcpup); -+ else -+ BUG(); -+ } -+ if (!err && add_entry && !au_ftest_wrdir(add_entry, TMPFILE)) { -+ h_parent = au_h_dptr(parent, bcpup); -+ h_dir = h_parent->d_inode; -+ mutex_lock_nested(&h_dir->i_mutex, AuLsc_I_PARENT); -+ err = au_lkup_neg(dentry, bcpup, /*wh*/0); -+ /* todo: no unlock here */ -+ mutex_unlock(&h_dir->i_mutex); -+ -+ AuDbg("bcpup %d\n", bcpup); -+ if (!err) { -+ if (!dentry->d_inode) -+ au_set_h_dptr(dentry, bstart, NULL); -+ au_update_dbrange(dentry, /*do_put_zero*/0); -+ } -+ } -+ -+ if (!add_entry) -+ di_write_unlock(parent); -+ if (!err) -+ err = bcpup; /* success */ -+ -+ AuTraceErr(err); -+ return err; -+} -+ -+/* -+ * decide the branch and the parent dir where we will create a new entry. -+ * returns new bindex or an error. -+ * copyup the parent dir if needed. -+ */ -+int au_wr_dir(struct dentry *dentry, struct dentry *src_dentry, -+ struct au_wr_dir_args *args) -+{ -+ int err; -+ unsigned int flags; -+ aufs_bindex_t bcpup, bstart, src_bstart; -+ const unsigned char add_entry -+ = au_ftest_wrdir(args->flags, ADD_ENTRY) -+ | au_ftest_wrdir(args->flags, TMPFILE); -+ struct super_block *sb; -+ struct dentry *parent; -+ struct au_sbinfo *sbinfo; -+ -+ sb = dentry->d_sb; -+ sbinfo = au_sbi(sb); -+ parent = dget_parent(dentry); -+ bstart = au_dbstart(dentry); -+ bcpup = bstart; -+ if (args->force_btgt < 0) { -+ if (src_dentry) { -+ src_bstart = au_dbstart(src_dentry); -+ if (src_bstart < bstart) -+ bcpup = src_bstart; -+ } else if (add_entry) { -+ flags = 0; -+ if (au_ftest_wrdir(args->flags, ISDIR)) -+ au_fset_wbr(flags, DIR); -+ err = AuWbrCreate(sbinfo, dentry, flags); -+ bcpup = err; -+ } -+ -+ if (bcpup < 0 || au_test_ro(sb, bcpup, dentry->d_inode)) { -+ if (add_entry) -+ err = AuWbrCopyup(sbinfo, dentry); -+ else { -+ if (!IS_ROOT(dentry)) { -+ di_read_lock_parent(parent, !AuLock_IR); -+ err = AuWbrCopyup(sbinfo, dentry); -+ di_read_unlock(parent, !AuLock_IR); -+ } else -+ err = AuWbrCopyup(sbinfo, dentry); -+ } -+ bcpup = err; -+ if (unlikely(err < 0)) -+ goto out; -+ } -+ } else { -+ bcpup = args->force_btgt; -+ AuDebugOn(au_test_ro(sb, bcpup, dentry->d_inode)); -+ } -+ -+ AuDbg("bstart %d, bcpup %d\n", bstart, bcpup); -+ err = bcpup; -+ if (bcpup == bstart) -+ goto out; /* success */ -+ -+ /* copyup the new parent into the branch we process */ -+ err = au_wr_dir_cpup(dentry, parent, add_entry, bcpup, bstart); -+ if (err >= 0) { -+ if (!dentry->d_inode) { -+ au_set_h_dptr(dentry, bstart, NULL); -+ au_set_dbstart(dentry, bcpup); -+ au_set_dbend(dentry, bcpup); -+ } -+ AuDebugOn(add_entry -+ && !au_ftest_wrdir(args->flags, TMPFILE) -+ && !au_h_dptr(dentry, bcpup)); -+ } -+ -+out: -+ dput(parent); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+void au_pin_hdir_unlock(struct au_pin *p) -+{ -+ if (p->hdir) -+ au_hn_imtx_unlock(p->hdir); -+} -+ -+int au_pin_hdir_lock(struct au_pin *p) -+{ -+ int err; -+ -+ err = 0; -+ if (!p->hdir) -+ goto out; -+ -+ /* even if an error happens later, keep this lock */ -+ au_hn_imtx_lock_nested(p->hdir, p->lsc_hi); -+ -+ err = -EBUSY; -+ if (unlikely(p->hdir->hi_inode != p->h_parent->d_inode)) -+ goto out; -+ -+ err = 0; -+ if (p->h_dentry) -+ err = au_h_verify(p->h_dentry, p->udba, p->hdir->hi_inode, -+ p->h_parent, p->br); -+ -+out: -+ return err; -+} -+ -+int au_pin_hdir_relock(struct au_pin *p) -+{ -+ int err, i; -+ struct inode *h_i; -+ struct dentry *h_d[] = { -+ p->h_dentry, -+ p->h_parent -+ }; -+ -+ err = au_pin_hdir_lock(p); -+ if (unlikely(err)) -+ goto out; -+ -+ for (i = 0; !err && i < sizeof(h_d)/sizeof(*h_d); i++) { -+ if (!h_d[i]) -+ continue; -+ h_i = h_d[i]->d_inode; -+ if (h_i) -+ err = !h_i->i_nlink; -+ } -+ -+out: -+ return err; -+} -+ -+void au_pin_hdir_set_owner(struct au_pin *p, struct task_struct *task) -+{ -+#if defined(CONFIG_DEBUG_MUTEXES) || defined(CONFIG_SMP) -+ p->hdir->hi_inode->i_mutex.owner = task; -+#endif -+} -+ -+void au_pin_hdir_acquire_nest(struct au_pin *p) -+{ -+ if (p->hdir) { -+ mutex_acquire_nest(&p->hdir->hi_inode->i_mutex.dep_map, -+ p->lsc_hi, 0, NULL, _RET_IP_); -+ au_pin_hdir_set_owner(p, current); -+ } -+} -+ -+void au_pin_hdir_release(struct au_pin *p) -+{ -+ if (p->hdir) { -+ au_pin_hdir_set_owner(p, p->task); -+ mutex_release(&p->hdir->hi_inode->i_mutex.dep_map, 1, _RET_IP_); -+ } -+} -+ -+struct dentry *au_pinned_h_parent(struct au_pin *pin) -+{ -+ if (pin && pin->parent) -+ return au_h_dptr(pin->parent, pin->bindex); -+ return NULL; -+} -+ -+void au_unpin(struct au_pin *p) -+{ -+ if (p->hdir) -+ au_pin_hdir_unlock(p); -+ if (p->h_mnt && au_ftest_pin(p->flags, MNT_WRITE)) -+ vfsub_mnt_drop_write(p->h_mnt); -+ if (!p->hdir) -+ return; -+ -+ if (!au_ftest_pin(p->flags, DI_LOCKED)) -+ di_read_unlock(p->parent, AuLock_IR); -+ iput(p->hdir->hi_inode); -+ dput(p->parent); -+ p->parent = NULL; -+ p->hdir = NULL; -+ p->h_mnt = NULL; -+ /* do not clear p->task */ -+} -+ -+int au_do_pin(struct au_pin *p) -+{ -+ int err; -+ struct super_block *sb; -+ struct inode *h_dir; -+ -+ err = 0; -+ sb = p->dentry->d_sb; -+ p->br = au_sbr(sb, p->bindex); -+ if (IS_ROOT(p->dentry)) { -+ if (au_ftest_pin(p->flags, MNT_WRITE)) { -+ p->h_mnt = au_br_mnt(p->br); -+ err = vfsub_mnt_want_write(p->h_mnt); -+ if (unlikely(err)) { -+ au_fclr_pin(p->flags, MNT_WRITE); -+ goto out_err; -+ } -+ } -+ goto out; -+ } -+ -+ p->h_dentry = NULL; -+ if (p->bindex <= au_dbend(p->dentry)) -+ p->h_dentry = au_h_dptr(p->dentry, p->bindex); -+ -+ p->parent = dget_parent(p->dentry); -+ if (!au_ftest_pin(p->flags, DI_LOCKED)) -+ di_read_lock(p->parent, AuLock_IR, p->lsc_di); -+ -+ h_dir = NULL; -+ p->h_parent = au_h_dptr(p->parent, p->bindex); -+ p->hdir = au_hi(p->parent->d_inode, p->bindex); -+ if (p->hdir) -+ h_dir = p->hdir->hi_inode; -+ -+ /* -+ * udba case, or -+ * if DI_LOCKED is not set, then p->parent may be different -+ * and h_parent can be NULL. -+ */ -+ if (unlikely(!p->hdir || !h_dir || !p->h_parent)) { -+ err = -EBUSY; -+ if (!au_ftest_pin(p->flags, DI_LOCKED)) -+ di_read_unlock(p->parent, AuLock_IR); -+ dput(p->parent); -+ p->parent = NULL; -+ goto out_err; -+ } -+ -+ if (au_ftest_pin(p->flags, MNT_WRITE)) { -+ p->h_mnt = au_br_mnt(p->br); -+ err = vfsub_mnt_want_write(p->h_mnt); -+ if (unlikely(err)) { -+ au_fclr_pin(p->flags, MNT_WRITE); -+ if (!au_ftest_pin(p->flags, DI_LOCKED)) -+ di_read_unlock(p->parent, AuLock_IR); -+ dput(p->parent); -+ p->parent = NULL; -+ goto out_err; -+ } -+ } -+ -+ au_igrab(h_dir); -+ err = au_pin_hdir_lock(p); -+ if (!err) -+ goto out; /* success */ -+ -+ au_unpin(p); -+ -+out_err: -+ pr_err("err %d\n", err); -+ err = au_busy_or_stale(); -+out: -+ return err; -+} -+ -+void au_pin_init(struct au_pin *p, struct dentry *dentry, -+ aufs_bindex_t bindex, int lsc_di, int lsc_hi, -+ unsigned int udba, unsigned char flags) -+{ -+ p->dentry = dentry; -+ p->udba = udba; -+ p->lsc_di = lsc_di; -+ p->lsc_hi = lsc_hi; -+ p->flags = flags; -+ p->bindex = bindex; -+ -+ p->parent = NULL; -+ p->hdir = NULL; -+ p->h_mnt = NULL; -+ -+ p->h_dentry = NULL; -+ p->h_parent = NULL; -+ p->br = NULL; -+ p->task = current; -+} -+ -+int au_pin(struct au_pin *pin, struct dentry *dentry, aufs_bindex_t bindex, -+ unsigned int udba, unsigned char flags) -+{ -+ au_pin_init(pin, dentry, bindex, AuLsc_DI_PARENT, AuLsc_I_PARENT2, -+ udba, flags); -+ return au_do_pin(pin); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * ->setattr() and ->getattr() are called in various cases. -+ * chmod, stat: dentry is revalidated. -+ * fchmod, fstat: file and dentry are not revalidated, additionally they may be -+ * unhashed. -+ * for ->setattr(), ia->ia_file is passed from ftruncate only. -+ */ -+/* todo: consolidate with do_refresh() and simple_reval_dpath() */ -+int au_reval_for_attr(struct dentry *dentry, unsigned int sigen) -+{ -+ int err; -+ struct inode *inode; -+ struct dentry *parent; -+ -+ err = 0; -+ inode = dentry->d_inode; -+ if (au_digen_test(dentry, sigen)) { -+ parent = dget_parent(dentry); -+ di_read_lock_parent(parent, AuLock_IR); -+ err = au_refresh_dentry(dentry, parent); -+ di_read_unlock(parent, AuLock_IR); -+ dput(parent); -+ } -+ -+ AuTraceErr(err); -+ return err; -+} -+ -+int au_pin_and_icpup(struct dentry *dentry, struct iattr *ia, -+ struct au_icpup_args *a) -+{ -+ int err; -+ loff_t sz; -+ aufs_bindex_t bstart, ibstart; -+ struct dentry *hi_wh, *parent; -+ struct inode *inode; -+ struct au_wr_dir_args wr_dir_args = { -+ .force_btgt = -1, -+ .flags = 0 -+ }; -+ -+ if (d_is_dir(dentry)) -+ au_fset_wrdir(wr_dir_args.flags, ISDIR); -+ /* plink or hi_wh() case */ -+ bstart = au_dbstart(dentry); -+ inode = dentry->d_inode; -+ ibstart = au_ibstart(inode); -+ if (bstart != ibstart && !au_test_ro(inode->i_sb, ibstart, inode)) -+ wr_dir_args.force_btgt = ibstart; -+ err = au_wr_dir(dentry, /*src_dentry*/NULL, &wr_dir_args); -+ if (unlikely(err < 0)) -+ goto out; -+ a->btgt = err; -+ if (err != bstart) -+ au_fset_icpup(a->flags, DID_CPUP); -+ -+ err = 0; -+ a->pin_flags = AuPin_MNT_WRITE; -+ parent = NULL; -+ if (!IS_ROOT(dentry)) { -+ au_fset_pin(a->pin_flags, DI_LOCKED); -+ parent = dget_parent(dentry); -+ di_write_lock_parent(parent); -+ } -+ -+ err = au_pin(&a->pin, dentry, a->btgt, a->udba, a->pin_flags); -+ if (unlikely(err)) -+ goto out_parent; -+ -+ a->h_path.dentry = au_h_dptr(dentry, bstart); -+ a->h_inode = a->h_path.dentry->d_inode; -+ sz = -1; -+ if (ia && (ia->ia_valid & ATTR_SIZE)) { -+ mutex_lock_nested(&a->h_inode->i_mutex, AuLsc_I_CHILD); -+ if (ia->ia_size < i_size_read(a->h_inode)) -+ sz = ia->ia_size; -+ mutex_unlock(&a->h_inode->i_mutex); -+ } -+ -+ hi_wh = NULL; -+ if (au_ftest_icpup(a->flags, DID_CPUP) && d_unlinked(dentry)) { -+ hi_wh = au_hi_wh(inode, a->btgt); -+ if (!hi_wh) { -+ struct au_cp_generic cpg = { -+ .dentry = dentry, -+ .bdst = a->btgt, -+ .bsrc = -1, -+ .len = sz, -+ .pin = &a->pin -+ }; -+ err = au_sio_cpup_wh(&cpg, /*file*/NULL); -+ if (unlikely(err)) -+ goto out_unlock; -+ hi_wh = au_hi_wh(inode, a->btgt); -+ /* todo: revalidate hi_wh? */ -+ } -+ } -+ -+ if (parent) { -+ au_pin_set_parent_lflag(&a->pin, /*lflag*/0); -+ di_downgrade_lock(parent, AuLock_IR); -+ dput(parent); -+ parent = NULL; -+ } -+ if (!au_ftest_icpup(a->flags, DID_CPUP)) -+ goto out; /* success */ -+ -+ if (!d_unhashed(dentry)) { -+ struct au_cp_generic cpg = { -+ .dentry = dentry, -+ .bdst = a->btgt, -+ .bsrc = bstart, -+ .len = sz, -+ .pin = &a->pin, -+ .flags = AuCpup_DTIME | AuCpup_HOPEN -+ }; -+ err = au_sio_cpup_simple(&cpg); -+ if (!err) -+ a->h_path.dentry = au_h_dptr(dentry, a->btgt); -+ } else if (!hi_wh) -+ a->h_path.dentry = au_h_dptr(dentry, a->btgt); -+ else -+ a->h_path.dentry = hi_wh; /* do not dget here */ -+ -+out_unlock: -+ a->h_inode = a->h_path.dentry->d_inode; -+ if (!err) -+ goto out; /* success */ -+ au_unpin(&a->pin); -+out_parent: -+ if (parent) { -+ di_write_unlock(parent); -+ dput(parent); -+ } -+out: -+ if (!err) -+ mutex_lock_nested(&a->h_inode->i_mutex, AuLsc_I_CHILD); -+ return err; -+} -+ -+static int aufs_setattr(struct dentry *dentry, struct iattr *ia) -+{ -+ int err; -+ struct inode *inode, *delegated; -+ struct super_block *sb; -+ struct file *file; -+ struct au_icpup_args *a; -+ -+ inode = dentry->d_inode; -+ IMustLock(inode); -+ -+ err = -ENOMEM; -+ a = kzalloc(sizeof(*a), GFP_NOFS); -+ if (unlikely(!a)) -+ goto out; -+ -+ if (ia->ia_valid & (ATTR_KILL_SUID | ATTR_KILL_SGID)) -+ ia->ia_valid &= ~ATTR_MODE; -+ -+ file = NULL; -+ sb = dentry->d_sb; -+ err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM); -+ if (unlikely(err)) -+ goto out_kfree; -+ -+ if (ia->ia_valid & ATTR_FILE) { -+ /* currently ftruncate(2) only */ -+ AuDebugOn(!S_ISREG(inode->i_mode)); -+ file = ia->ia_file; -+ err = au_reval_and_lock_fdi(file, au_reopen_nondir, /*wlock*/1); -+ if (unlikely(err)) -+ goto out_si; -+ ia->ia_file = au_hf_top(file); -+ a->udba = AuOpt_UDBA_NONE; -+ } else { -+ /* fchmod() doesn't pass ia_file */ -+ a->udba = au_opt_udba(sb); -+ di_write_lock_child(dentry); -+ /* no d_unlinked(), to set UDBA_NONE for root */ -+ if (d_unhashed(dentry)) -+ a->udba = AuOpt_UDBA_NONE; -+ if (a->udba != AuOpt_UDBA_NONE) { -+ AuDebugOn(IS_ROOT(dentry)); -+ err = au_reval_for_attr(dentry, au_sigen(sb)); -+ if (unlikely(err)) -+ goto out_dentry; -+ } -+ } -+ -+ err = au_pin_and_icpup(dentry, ia, a); -+ if (unlikely(err < 0)) -+ goto out_dentry; -+ if (au_ftest_icpup(a->flags, DID_CPUP)) { -+ ia->ia_file = NULL; -+ ia->ia_valid &= ~ATTR_FILE; -+ } -+ -+ a->h_path.mnt = au_sbr_mnt(sb, a->btgt); -+ if ((ia->ia_valid & (ATTR_MODE | ATTR_CTIME)) -+ == (ATTR_MODE | ATTR_CTIME)) { -+ err = security_path_chmod(&a->h_path, ia->ia_mode); -+ if (unlikely(err)) -+ goto out_unlock; -+ } else if ((ia->ia_valid & (ATTR_UID | ATTR_GID)) -+ && (ia->ia_valid & ATTR_CTIME)) { -+ err = security_path_chown(&a->h_path, ia->ia_uid, ia->ia_gid); -+ if (unlikely(err)) -+ goto out_unlock; -+ } -+ -+ if (ia->ia_valid & ATTR_SIZE) { -+ struct file *f; -+ -+ if (ia->ia_size < i_size_read(inode)) -+ /* unmap only */ -+ truncate_setsize(inode, ia->ia_size); -+ -+ f = NULL; -+ if (ia->ia_valid & ATTR_FILE) -+ f = ia->ia_file; -+ mutex_unlock(&a->h_inode->i_mutex); -+ err = vfsub_trunc(&a->h_path, ia->ia_size, ia->ia_valid, f); -+ mutex_lock_nested(&a->h_inode->i_mutex, AuLsc_I_CHILD); -+ } else { -+ delegated = NULL; -+ while (1) { -+ err = vfsub_notify_change(&a->h_path, ia, &delegated); -+ if (delegated) { -+ err = break_deleg_wait(&delegated); -+ if (!err) -+ continue; -+ } -+ break; -+ } -+ } -+ /* -+ * regardless aufs 'acl' option setting. -+ * why don't all acl-aware fs call this func from their ->setattr()? -+ */ -+ if (!err && (ia->ia_valid & ATTR_MODE)) -+ err = vfsub_acl_chmod(a->h_inode, ia->ia_mode); -+ if (!err) -+ au_cpup_attr_changeable(inode); -+ -+out_unlock: -+ mutex_unlock(&a->h_inode->i_mutex); -+ au_unpin(&a->pin); -+ if (unlikely(err)) -+ au_update_dbstart(dentry); -+out_dentry: -+ di_write_unlock(dentry); -+ if (file) { -+ fi_write_unlock(file); -+ ia->ia_file = file; -+ ia->ia_valid |= ATTR_FILE; -+ } -+out_si: -+ si_read_unlock(sb); -+out_kfree: -+ kfree(a); -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+#if IS_ENABLED(CONFIG_AUFS_XATTR) || IS_ENABLED(CONFIG_FS_POSIX_ACL) -+static int au_h_path_to_set_attr(struct dentry *dentry, -+ struct au_icpup_args *a, struct path *h_path) -+{ -+ int err; -+ struct super_block *sb; -+ -+ sb = dentry->d_sb; -+ a->udba = au_opt_udba(sb); -+ /* no d_unlinked(), to set UDBA_NONE for root */ -+ if (d_unhashed(dentry)) -+ a->udba = AuOpt_UDBA_NONE; -+ if (a->udba != AuOpt_UDBA_NONE) { -+ AuDebugOn(IS_ROOT(dentry)); -+ err = au_reval_for_attr(dentry, au_sigen(sb)); -+ if (unlikely(err)) -+ goto out; -+ } -+ err = au_pin_and_icpup(dentry, /*ia*/NULL, a); -+ if (unlikely(err < 0)) -+ goto out; -+ -+ h_path->dentry = a->h_path.dentry; -+ h_path->mnt = au_sbr_mnt(sb, a->btgt); -+ -+out: -+ return err; -+} -+ -+ssize_t au_srxattr(struct dentry *dentry, struct au_srxattr *arg) -+{ -+ int err; -+ struct path h_path; -+ struct super_block *sb; -+ struct au_icpup_args *a; -+ struct inode *inode, *h_inode; -+ -+ inode = dentry->d_inode; -+ IMustLock(inode); -+ -+ err = -ENOMEM; -+ a = kzalloc(sizeof(*a), GFP_NOFS); -+ if (unlikely(!a)) -+ goto out; -+ -+ sb = dentry->d_sb; -+ err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM); -+ if (unlikely(err)) -+ goto out_kfree; -+ -+ h_path.dentry = NULL; /* silence gcc */ -+ di_write_lock_child(dentry); -+ err = au_h_path_to_set_attr(dentry, a, &h_path); -+ if (unlikely(err)) -+ goto out_di; -+ -+ mutex_unlock(&a->h_inode->i_mutex); -+ switch (arg->type) { -+ case AU_XATTR_SET: -+ err = vfsub_setxattr(h_path.dentry, -+ arg->u.set.name, arg->u.set.value, -+ arg->u.set.size, arg->u.set.flags); -+ break; -+ case AU_XATTR_REMOVE: -+ err = vfsub_removexattr(h_path.dentry, arg->u.remove.name); -+ break; -+ case AU_ACL_SET: -+ err = -EOPNOTSUPP; -+ h_inode = h_path.dentry->d_inode; -+ if (h_inode->i_op->set_acl) -+ err = h_inode->i_op->set_acl(h_inode, -+ arg->u.acl_set.acl, -+ arg->u.acl_set.type); -+ break; -+ } -+ if (!err) -+ au_cpup_attr_timesizes(inode); -+ -+ au_unpin(&a->pin); -+ if (unlikely(err)) -+ au_update_dbstart(dentry); -+ -+out_di: -+ di_write_unlock(dentry); -+ si_read_unlock(sb); -+out_kfree: -+ kfree(a); -+out: -+ AuTraceErr(err); -+ return err; -+} -+#endif -+ -+static void au_refresh_iattr(struct inode *inode, struct kstat *st, -+ unsigned int nlink) -+{ -+ unsigned int n; -+ -+ inode->i_mode = st->mode; -+ /* don't i_[ug]id_write() here */ -+ inode->i_uid = st->uid; -+ inode->i_gid = st->gid; -+ inode->i_atime = st->atime; -+ inode->i_mtime = st->mtime; -+ inode->i_ctime = st->ctime; -+ -+ au_cpup_attr_nlink(inode, /*force*/0); -+ if (S_ISDIR(inode->i_mode)) { -+ n = inode->i_nlink; -+ n -= nlink; -+ n += st->nlink; -+ smp_mb(); /* for i_nlink */ -+ /* 0 can happen */ -+ set_nlink(inode, n); -+ } -+ -+ spin_lock(&inode->i_lock); -+ inode->i_blocks = st->blocks; -+ i_size_write(inode, st->size); -+ spin_unlock(&inode->i_lock); -+} -+ -+/* -+ * common routine for aufs_getattr() and aufs_getxattr(). -+ * returns zero or negative (an error). -+ * @dentry will be read-locked in success. -+ */ -+int au_h_path_getattr(struct dentry *dentry, int force, struct path *h_path) -+{ -+ int err; -+ unsigned int mnt_flags, sigen; -+ unsigned char udba_none; -+ aufs_bindex_t bindex; -+ struct super_block *sb, *h_sb; -+ struct inode *inode; -+ -+ h_path->mnt = NULL; -+ h_path->dentry = NULL; -+ -+ err = 0; -+ sb = dentry->d_sb; -+ mnt_flags = au_mntflags(sb); -+ udba_none = !!au_opt_test(mnt_flags, UDBA_NONE); -+ -+ /* support fstat(2) */ -+ if (!d_unlinked(dentry) && !udba_none) { -+ sigen = au_sigen(sb); -+ err = au_digen_test(dentry, sigen); -+ if (!err) { -+ di_read_lock_child(dentry, AuLock_IR); -+ err = au_dbrange_test(dentry); -+ if (unlikely(err)) { -+ di_read_unlock(dentry, AuLock_IR); -+ goto out; -+ } -+ } else { -+ AuDebugOn(IS_ROOT(dentry)); -+ di_write_lock_child(dentry); -+ err = au_dbrange_test(dentry); -+ if (!err) -+ err = au_reval_for_attr(dentry, sigen); -+ if (!err) -+ di_downgrade_lock(dentry, AuLock_IR); -+ else { -+ di_write_unlock(dentry); -+ goto out; -+ } -+ } -+ } else -+ di_read_lock_child(dentry, AuLock_IR); -+ -+ inode = dentry->d_inode; -+ bindex = au_ibstart(inode); -+ h_path->mnt = au_sbr_mnt(sb, bindex); -+ h_sb = h_path->mnt->mnt_sb; -+ if (!force -+ && !au_test_fs_bad_iattr(h_sb) -+ && udba_none) -+ goto out; /* success */ -+ -+ if (au_dbstart(dentry) == bindex) -+ h_path->dentry = au_h_dptr(dentry, bindex); -+ else if (au_opt_test(mnt_flags, PLINK) && au_plink_test(inode)) { -+ h_path->dentry = au_plink_lkup(inode, bindex); -+ if (IS_ERR(h_path->dentry)) -+ /* pretending success */ -+ h_path->dentry = NULL; -+ else -+ dput(h_path->dentry); -+ } -+ -+out: -+ return err; -+} -+ -+static int aufs_getattr(struct vfsmount *mnt __maybe_unused, -+ struct dentry *dentry, struct kstat *st) -+{ -+ int err; -+ unsigned char positive; -+ struct path h_path; -+ struct inode *inode; -+ struct super_block *sb; -+ -+ inode = dentry->d_inode; -+ sb = dentry->d_sb; -+ err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM); -+ if (unlikely(err)) -+ goto out; -+ err = au_h_path_getattr(dentry, /*force*/0, &h_path); -+ if (unlikely(err)) -+ goto out_si; -+ if (unlikely(!h_path.dentry)) -+ /* illegally overlapped or something */ -+ goto out_fill; /* pretending success */ -+ -+ positive = !!h_path.dentry->d_inode; -+ if (positive) -+ err = vfs_getattr(&h_path, st); -+ if (!err) { -+ if (positive) -+ au_refresh_iattr(inode, st, -+ h_path.dentry->d_inode->i_nlink); -+ goto out_fill; /* success */ -+ } -+ AuTraceErr(err); -+ goto out_di; -+ -+out_fill: -+ generic_fillattr(inode, st); -+out_di: -+ di_read_unlock(dentry, AuLock_IR); -+out_si: -+ si_read_unlock(sb); -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int h_readlink(struct dentry *dentry, int bindex, char __user *buf, -+ int bufsiz) -+{ -+ int err; -+ struct super_block *sb; -+ struct dentry *h_dentry; -+ -+ err = -EINVAL; -+ h_dentry = au_h_dptr(dentry, bindex); -+ if (unlikely(!h_dentry->d_inode->i_op->readlink)) -+ goto out; -+ -+ err = security_inode_readlink(h_dentry); -+ if (unlikely(err)) -+ goto out; -+ -+ sb = dentry->d_sb; -+ if (!au_test_ro(sb, bindex, dentry->d_inode)) { -+ vfsub_touch_atime(au_sbr_mnt(sb, bindex), h_dentry); -+ fsstack_copy_attr_atime(dentry->d_inode, h_dentry->d_inode); -+ } -+ err = h_dentry->d_inode->i_op->readlink(h_dentry, buf, bufsiz); -+ -+out: -+ return err; -+} -+ -+static int aufs_readlink(struct dentry *dentry, char __user *buf, int bufsiz) -+{ -+ int err; -+ -+ err = aufs_read_lock(dentry, AuLock_IR | AuLock_GEN); -+ if (unlikely(err)) -+ goto out; -+ err = au_d_hashed_positive(dentry); -+ if (!err) -+ err = h_readlink(dentry, au_dbstart(dentry), buf, bufsiz); -+ aufs_read_unlock(dentry, AuLock_IR); -+ -+out: -+ return err; -+} -+ -+static void *aufs_follow_link(struct dentry *dentry, struct nameidata *nd) -+{ -+ int err; -+ mm_segment_t old_fs; -+ union { -+ char *k; -+ char __user *u; -+ } buf; -+ -+ err = -ENOMEM; -+ buf.k = (void *)__get_free_page(GFP_NOFS); -+ if (unlikely(!buf.k)) -+ goto out; -+ -+ err = aufs_read_lock(dentry, AuLock_IR | AuLock_GEN); -+ if (unlikely(err)) -+ goto out_name; -+ -+ err = au_d_hashed_positive(dentry); -+ if (!err) { -+ old_fs = get_fs(); -+ set_fs(KERNEL_DS); -+ err = h_readlink(dentry, au_dbstart(dentry), buf.u, PATH_MAX); -+ set_fs(old_fs); -+ } -+ aufs_read_unlock(dentry, AuLock_IR); -+ -+ if (err >= 0) { -+ buf.k[err] = 0; -+ /* will be freed by put_link */ -+ nd_set_link(nd, buf.k); -+ return NULL; /* success */ -+ } -+ -+out_name: -+ free_page((unsigned long)buf.k); -+out: -+ AuTraceErr(err); -+ return ERR_PTR(err); -+} -+ -+static void aufs_put_link(struct dentry *dentry __maybe_unused, -+ struct nameidata *nd, void *cookie __maybe_unused) -+{ -+ char *p; -+ -+ p = nd_get_link(nd); -+ if (!IS_ERR_OR_NULL(p)) -+ free_page((unsigned long)p); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int aufs_update_time(struct inode *inode, struct timespec *ts, int flags) -+{ -+ int err; -+ struct super_block *sb; -+ struct inode *h_inode; -+ -+ sb = inode->i_sb; -+ /* mmap_sem might be acquired already, cf. aufs_mmap() */ -+ lockdep_off(); -+ si_read_lock(sb, AuLock_FLUSH); -+ ii_write_lock_child(inode); -+ lockdep_on(); -+ h_inode = au_h_iptr(inode, au_ibstart(inode)); -+ err = vfsub_update_time(h_inode, ts, flags); -+ lockdep_off(); -+ if (!err) -+ au_cpup_attr_timesizes(inode); -+ ii_write_unlock(inode); -+ si_read_unlock(sb); -+ lockdep_on(); -+ -+ if (!err && (flags & S_VERSION)) -+ inode_inc_iversion(inode); -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* no getattr version will be set by module.c:aufs_init() */ -+struct inode_operations aufs_iop_nogetattr[AuIop_Last], -+ aufs_iop[] = { -+ [AuIop_SYMLINK] = { -+ .permission = aufs_permission, -+#ifdef CONFIG_FS_POSIX_ACL -+ .get_acl = aufs_get_acl, -+ .set_acl = aufs_set_acl, /* unsupport for symlink? */ -+#endif -+ -+ .setattr = aufs_setattr, -+ .getattr = aufs_getattr, -+ -+#ifdef CONFIG_AUFS_XATTR -+ .setxattr = aufs_setxattr, -+ .getxattr = aufs_getxattr, -+ .listxattr = aufs_listxattr, -+ .removexattr = aufs_removexattr, -+#endif -+ -+ .readlink = aufs_readlink, -+ .follow_link = aufs_follow_link, -+ .put_link = aufs_put_link, -+ -+ /* .update_time = aufs_update_time */ -+ }, -+ [AuIop_DIR] = { -+ .create = aufs_create, -+ .lookup = aufs_lookup, -+ .link = aufs_link, -+ .unlink = aufs_unlink, -+ .symlink = aufs_symlink, -+ .mkdir = aufs_mkdir, -+ .rmdir = aufs_rmdir, -+ .mknod = aufs_mknod, -+ .rename = aufs_rename, -+ -+ .permission = aufs_permission, -+#ifdef CONFIG_FS_POSIX_ACL -+ .get_acl = aufs_get_acl, -+ .set_acl = aufs_set_acl, -+#endif -+ -+ .setattr = aufs_setattr, -+ .getattr = aufs_getattr, -+ -+#ifdef CONFIG_AUFS_XATTR -+ .setxattr = aufs_setxattr, -+ .getxattr = aufs_getxattr, -+ .listxattr = aufs_listxattr, -+ .removexattr = aufs_removexattr, -+#endif -+ -+ .update_time = aufs_update_time, -+ .atomic_open = aufs_atomic_open, -+ .tmpfile = aufs_tmpfile -+ }, -+ [AuIop_OTHER] = { -+ .permission = aufs_permission, -+#ifdef CONFIG_FS_POSIX_ACL -+ .get_acl = aufs_get_acl, -+ .set_acl = aufs_set_acl, -+#endif -+ -+ .setattr = aufs_setattr, -+ .getattr = aufs_getattr, -+ -+#ifdef CONFIG_AUFS_XATTR -+ .setxattr = aufs_setxattr, -+ .getxattr = aufs_getxattr, -+ .listxattr = aufs_listxattr, -+ .removexattr = aufs_removexattr, -+#endif -+ -+ .update_time = aufs_update_time -+ } -+}; -diff --git a/fs/aufs/i_op_add.c b/fs/aufs/i_op_add.c -new file mode 100644 -index 0000000..9e4f65c ---- /dev/null -+++ b/fs/aufs/i_op_add.c -@@ -0,0 +1,930 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * inode operations (add entry) -+ */ -+ -+#include "aufs.h" -+ -+/* -+ * final procedure of adding a new entry, except link(2). -+ * remove whiteout, instantiate, copyup the parent dir's times and size -+ * and update version. -+ * if it failed, re-create the removed whiteout. -+ */ -+static int epilog(struct inode *dir, aufs_bindex_t bindex, -+ struct dentry *wh_dentry, struct dentry *dentry) -+{ -+ int err, rerr; -+ aufs_bindex_t bwh; -+ struct path h_path; -+ struct super_block *sb; -+ struct inode *inode, *h_dir; -+ struct dentry *wh; -+ -+ bwh = -1; -+ sb = dir->i_sb; -+ if (wh_dentry) { -+ h_dir = wh_dentry->d_parent->d_inode; /* dir inode is locked */ -+ IMustLock(h_dir); -+ AuDebugOn(au_h_iptr(dir, bindex) != h_dir); -+ bwh = au_dbwh(dentry); -+ h_path.dentry = wh_dentry; -+ h_path.mnt = au_sbr_mnt(sb, bindex); -+ err = au_wh_unlink_dentry(au_h_iptr(dir, bindex), &h_path, -+ dentry); -+ if (unlikely(err)) -+ goto out; -+ } -+ -+ inode = au_new_inode(dentry, /*must_new*/1); -+ if (!IS_ERR(inode)) { -+ d_instantiate(dentry, inode); -+ dir = dentry->d_parent->d_inode; /* dir inode is locked */ -+ IMustLock(dir); -+ au_dir_ts(dir, bindex); -+ dir->i_version++; -+ au_fhsm_wrote(sb, bindex, /*force*/0); -+ return 0; /* success */ -+ } -+ -+ err = PTR_ERR(inode); -+ if (!wh_dentry) -+ goto out; -+ -+ /* revert */ -+ /* dir inode is locked */ -+ wh = au_wh_create(dentry, bwh, wh_dentry->d_parent); -+ rerr = PTR_ERR(wh); -+ if (IS_ERR(wh)) { -+ AuIOErr("%pd reverting whiteout failed(%d, %d)\n", -+ dentry, err, rerr); -+ err = -EIO; -+ } else -+ dput(wh); -+ -+out: -+ return err; -+} -+ -+static int au_d_may_add(struct dentry *dentry) -+{ -+ int err; -+ -+ err = 0; -+ if (unlikely(d_unhashed(dentry))) -+ err = -ENOENT; -+ if (unlikely(dentry->d_inode)) -+ err = -EEXIST; -+ return err; -+} -+ -+/* -+ * simple tests for the adding inode operations. -+ * following the checks in vfs, plus the parent-child relationship. -+ */ -+int au_may_add(struct dentry *dentry, aufs_bindex_t bindex, -+ struct dentry *h_parent, int isdir) -+{ -+ int err; -+ umode_t h_mode; -+ struct dentry *h_dentry; -+ struct inode *h_inode; -+ -+ err = -ENAMETOOLONG; -+ if (unlikely(dentry->d_name.len > AUFS_MAX_NAMELEN)) -+ goto out; -+ -+ h_dentry = au_h_dptr(dentry, bindex); -+ h_inode = h_dentry->d_inode; -+ if (!dentry->d_inode) { -+ err = -EEXIST; -+ if (unlikely(h_inode)) -+ goto out; -+ } else { -+ /* rename(2) case */ -+ err = -EIO; -+ if (unlikely(!h_inode || !h_inode->i_nlink)) -+ goto out; -+ -+ h_mode = h_inode->i_mode; -+ if (!isdir) { -+ err = -EISDIR; -+ if (unlikely(S_ISDIR(h_mode))) -+ goto out; -+ } else if (unlikely(!S_ISDIR(h_mode))) { -+ err = -ENOTDIR; -+ goto out; -+ } -+ } -+ -+ err = 0; -+ /* expected parent dir is locked */ -+ if (unlikely(h_parent != h_dentry->d_parent)) -+ err = -EIO; -+ -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+/* -+ * initial procedure of adding a new entry. -+ * prepare writable branch and the parent dir, lock it, -+ * and lookup whiteout for the new entry. -+ */ -+static struct dentry* -+lock_hdir_lkup_wh(struct dentry *dentry, struct au_dtime *dt, -+ struct dentry *src_dentry, struct au_pin *pin, -+ struct au_wr_dir_args *wr_dir_args) -+{ -+ struct dentry *wh_dentry, *h_parent; -+ struct super_block *sb; -+ struct au_branch *br; -+ int err; -+ unsigned int udba; -+ aufs_bindex_t bcpup; -+ -+ AuDbg("%pd\n", dentry); -+ -+ err = au_wr_dir(dentry, src_dentry, wr_dir_args); -+ bcpup = err; -+ wh_dentry = ERR_PTR(err); -+ if (unlikely(err < 0)) -+ goto out; -+ -+ sb = dentry->d_sb; -+ udba = au_opt_udba(sb); -+ err = au_pin(pin, dentry, bcpup, udba, -+ AuPin_DI_LOCKED | AuPin_MNT_WRITE); -+ wh_dentry = ERR_PTR(err); -+ if (unlikely(err)) -+ goto out; -+ -+ h_parent = au_pinned_h_parent(pin); -+ if (udba != AuOpt_UDBA_NONE -+ && au_dbstart(dentry) == bcpup) -+ err = au_may_add(dentry, bcpup, h_parent, -+ au_ftest_wrdir(wr_dir_args->flags, ISDIR)); -+ else if (unlikely(dentry->d_name.len > AUFS_MAX_NAMELEN)) -+ err = -ENAMETOOLONG; -+ wh_dentry = ERR_PTR(err); -+ if (unlikely(err)) -+ goto out_unpin; -+ -+ br = au_sbr(sb, bcpup); -+ if (dt) { -+ struct path tmp = { -+ .dentry = h_parent, -+ .mnt = au_br_mnt(br) -+ }; -+ au_dtime_store(dt, au_pinned_parent(pin), &tmp); -+ } -+ -+ wh_dentry = NULL; -+ if (bcpup != au_dbwh(dentry)) -+ goto out; /* success */ -+ -+ /* -+ * ENAMETOOLONG here means that if we allowed create such name, then it -+ * would not be able to removed in the future. So we don't allow such -+ * name here and we don't handle ENAMETOOLONG differently here. -+ */ -+ wh_dentry = au_wh_lkup(h_parent, &dentry->d_name, br); -+ -+out_unpin: -+ if (IS_ERR(wh_dentry)) -+ au_unpin(pin); -+out: -+ return wh_dentry; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+enum { Mknod, Symlink, Creat }; -+struct simple_arg { -+ int type; -+ union { -+ struct { -+ umode_t mode; -+ bool want_excl; -+ bool try_aopen; -+ struct vfsub_aopen_args *aopen; -+ } c; -+ struct { -+ const char *symname; -+ } s; -+ struct { -+ umode_t mode; -+ dev_t dev; -+ } m; -+ } u; -+}; -+ -+static int add_simple(struct inode *dir, struct dentry *dentry, -+ struct simple_arg *arg) -+{ -+ int err, rerr; -+ aufs_bindex_t bstart; -+ unsigned char created; -+ const unsigned char try_aopen -+ = (arg->type == Creat && arg->u.c.try_aopen); -+ struct dentry *wh_dentry, *parent; -+ struct inode *h_dir; -+ struct super_block *sb; -+ struct au_branch *br; -+ /* to reuduce stack size */ -+ struct { -+ struct au_dtime dt; -+ struct au_pin pin; -+ struct path h_path; -+ struct au_wr_dir_args wr_dir_args; -+ } *a; -+ -+ AuDbg("%pd\n", dentry); -+ IMustLock(dir); -+ -+ err = -ENOMEM; -+ a = kmalloc(sizeof(*a), GFP_NOFS); -+ if (unlikely(!a)) -+ goto out; -+ a->wr_dir_args.force_btgt = -1; -+ a->wr_dir_args.flags = AuWrDir_ADD_ENTRY; -+ -+ parent = dentry->d_parent; /* dir inode is locked */ -+ if (!try_aopen) { -+ err = aufs_read_lock(dentry, AuLock_DW | AuLock_GEN); -+ if (unlikely(err)) -+ goto out_free; -+ } -+ err = au_d_may_add(dentry); -+ if (unlikely(err)) -+ goto out_unlock; -+ if (!try_aopen) -+ di_write_lock_parent(parent); -+ wh_dentry = lock_hdir_lkup_wh(dentry, &a->dt, /*src_dentry*/NULL, -+ &a->pin, &a->wr_dir_args); -+ err = PTR_ERR(wh_dentry); -+ if (IS_ERR(wh_dentry)) -+ goto out_parent; -+ -+ bstart = au_dbstart(dentry); -+ sb = dentry->d_sb; -+ br = au_sbr(sb, bstart); -+ a->h_path.dentry = au_h_dptr(dentry, bstart); -+ a->h_path.mnt = au_br_mnt(br); -+ h_dir = au_pinned_h_dir(&a->pin); -+ switch (arg->type) { -+ case Creat: -+ err = 0; -+ if (!try_aopen || !h_dir->i_op->atomic_open) -+ err = vfsub_create(h_dir, &a->h_path, arg->u.c.mode, -+ arg->u.c.want_excl); -+ else -+ err = vfsub_atomic_open(h_dir, a->h_path.dentry, -+ arg->u.c.aopen, br); -+ break; -+ case Symlink: -+ err = vfsub_symlink(h_dir, &a->h_path, arg->u.s.symname); -+ break; -+ case Mknod: -+ err = vfsub_mknod(h_dir, &a->h_path, arg->u.m.mode, -+ arg->u.m.dev); -+ break; -+ default: -+ BUG(); -+ } -+ created = !err; -+ if (!err) -+ err = epilog(dir, bstart, wh_dentry, dentry); -+ -+ /* revert */ -+ if (unlikely(created && err && a->h_path.dentry->d_inode)) { -+ /* no delegation since it is just created */ -+ rerr = vfsub_unlink(h_dir, &a->h_path, /*delegated*/NULL, -+ /*force*/0); -+ if (rerr) { -+ AuIOErr("%pd revert failure(%d, %d)\n", -+ dentry, err, rerr); -+ err = -EIO; -+ } -+ au_dtime_revert(&a->dt); -+ } -+ -+ if (!err && try_aopen && !h_dir->i_op->atomic_open) -+ *arg->u.c.aopen->opened |= FILE_CREATED; -+ -+ au_unpin(&a->pin); -+ dput(wh_dentry); -+ -+out_parent: -+ if (!try_aopen) -+ di_write_unlock(parent); -+out_unlock: -+ if (unlikely(err)) { -+ au_update_dbstart(dentry); -+ d_drop(dentry); -+ } -+ if (!try_aopen) -+ aufs_read_unlock(dentry, AuLock_DW); -+out_free: -+ kfree(a); -+out: -+ return err; -+} -+ -+int aufs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, -+ dev_t dev) -+{ -+ struct simple_arg arg = { -+ .type = Mknod, -+ .u.m = { -+ .mode = mode, -+ .dev = dev -+ } -+ }; -+ return add_simple(dir, dentry, &arg); -+} -+ -+int aufs_symlink(struct inode *dir, struct dentry *dentry, const char *symname) -+{ -+ struct simple_arg arg = { -+ .type = Symlink, -+ .u.s.symname = symname -+ }; -+ return add_simple(dir, dentry, &arg); -+} -+ -+int aufs_create(struct inode *dir, struct dentry *dentry, umode_t mode, -+ bool want_excl) -+{ -+ struct simple_arg arg = { -+ .type = Creat, -+ .u.c = { -+ .mode = mode, -+ .want_excl = want_excl -+ } -+ }; -+ return add_simple(dir, dentry, &arg); -+} -+ -+int au_aopen_or_create(struct inode *dir, struct dentry *dentry, -+ struct vfsub_aopen_args *aopen_args) -+{ -+ struct simple_arg arg = { -+ .type = Creat, -+ .u.c = { -+ .mode = aopen_args->create_mode, -+ .want_excl = aopen_args->open_flag & O_EXCL, -+ .try_aopen = true, -+ .aopen = aopen_args -+ } -+ }; -+ return add_simple(dir, dentry, &arg); -+} -+ -+int aufs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode) -+{ -+ int err; -+ aufs_bindex_t bindex; -+ struct super_block *sb; -+ struct dentry *parent, *h_parent, *h_dentry; -+ struct inode *h_dir, *inode; -+ struct vfsmount *h_mnt; -+ struct au_wr_dir_args wr_dir_args = { -+ .force_btgt = -1, -+ .flags = AuWrDir_TMPFILE -+ }; -+ -+ /* copy-up may happen */ -+ mutex_lock(&dir->i_mutex); -+ -+ sb = dir->i_sb; -+ err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM); -+ if (unlikely(err)) -+ goto out; -+ -+ err = au_di_init(dentry); -+ if (unlikely(err)) -+ goto out_si; -+ -+ err = -EBUSY; -+ parent = d_find_any_alias(dir); -+ AuDebugOn(!parent); -+ di_write_lock_parent(parent); -+ if (unlikely(parent->d_inode != dir)) -+ goto out_parent; -+ -+ err = au_digen_test(parent, au_sigen(sb)); -+ if (unlikely(err)) -+ goto out_parent; -+ -+ bindex = au_dbstart(parent); -+ au_set_dbstart(dentry, bindex); -+ au_set_dbend(dentry, bindex); -+ err = au_wr_dir(dentry, /*src_dentry*/NULL, &wr_dir_args); -+ bindex = err; -+ if (unlikely(err < 0)) -+ goto out_parent; -+ -+ err = -EOPNOTSUPP; -+ h_dir = au_h_iptr(dir, bindex); -+ if (unlikely(!h_dir->i_op->tmpfile)) -+ goto out_parent; -+ -+ h_mnt = au_sbr_mnt(sb, bindex); -+ err = vfsub_mnt_want_write(h_mnt); -+ if (unlikely(err)) -+ goto out_parent; -+ -+ h_parent = au_h_dptr(parent, bindex); -+ err = inode_permission(h_parent->d_inode, MAY_WRITE | MAY_EXEC); -+ if (unlikely(err)) -+ goto out_mnt; -+ -+ err = -ENOMEM; -+ h_dentry = d_alloc(h_parent, &dentry->d_name); -+ if (unlikely(!h_dentry)) -+ goto out_mnt; -+ -+ err = h_dir->i_op->tmpfile(h_dir, h_dentry, mode); -+ if (unlikely(err)) -+ goto out_dentry; -+ -+ au_set_dbstart(dentry, bindex); -+ au_set_dbend(dentry, bindex); -+ au_set_h_dptr(dentry, bindex, dget(h_dentry)); -+ inode = au_new_inode(dentry, /*must_new*/1); -+ if (IS_ERR(inode)) { -+ err = PTR_ERR(inode); -+ au_set_h_dptr(dentry, bindex, NULL); -+ au_set_dbstart(dentry, -1); -+ au_set_dbend(dentry, -1); -+ } else { -+ if (!inode->i_nlink) -+ set_nlink(inode, 1); -+ d_tmpfile(dentry, inode); -+ au_di(dentry)->di_tmpfile = 1; -+ -+ /* update without i_mutex */ -+ if (au_ibstart(dir) == au_dbstart(dentry)) -+ au_cpup_attr_timesizes(dir); -+ } -+ -+out_dentry: -+ dput(h_dentry); -+out_mnt: -+ vfsub_mnt_drop_write(h_mnt); -+out_parent: -+ di_write_unlock(parent); -+ dput(parent); -+ di_write_unlock(dentry); -+ if (!err) -+#if 0 -+ /* verbose coding for lock class name */ -+ au_rw_class(&au_di(dentry)->di_rwsem, -+ au_lc_key + AuLcNonDir_DIINFO); -+#else -+ ; -+#endif -+ else { -+ au_di_fin(dentry); -+ dentry->d_fsdata = NULL; -+ } -+out_si: -+ si_read_unlock(sb); -+out: -+ mutex_unlock(&dir->i_mutex); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+struct au_link_args { -+ aufs_bindex_t bdst, bsrc; -+ struct au_pin pin; -+ struct path h_path; -+ struct dentry *src_parent, *parent; -+}; -+ -+static int au_cpup_before_link(struct dentry *src_dentry, -+ struct au_link_args *a) -+{ -+ int err; -+ struct dentry *h_src_dentry; -+ struct au_cp_generic cpg = { -+ .dentry = src_dentry, -+ .bdst = a->bdst, -+ .bsrc = a->bsrc, -+ .len = -1, -+ .pin = &a->pin, -+ .flags = AuCpup_DTIME | AuCpup_HOPEN /* | AuCpup_KEEPLINO */ -+ }; -+ -+ di_read_lock_parent(a->src_parent, AuLock_IR); -+ err = au_test_and_cpup_dirs(src_dentry, a->bdst); -+ if (unlikely(err)) -+ goto out; -+ -+ h_src_dentry = au_h_dptr(src_dentry, a->bsrc); -+ err = au_pin(&a->pin, src_dentry, a->bdst, -+ au_opt_udba(src_dentry->d_sb), -+ AuPin_DI_LOCKED | AuPin_MNT_WRITE); -+ if (unlikely(err)) -+ goto out; -+ -+ err = au_sio_cpup_simple(&cpg); -+ au_unpin(&a->pin); -+ -+out: -+ di_read_unlock(a->src_parent, AuLock_IR); -+ return err; -+} -+ -+static int au_cpup_or_link(struct dentry *src_dentry, struct dentry *dentry, -+ struct au_link_args *a) -+{ -+ int err; -+ unsigned char plink; -+ aufs_bindex_t bend; -+ struct dentry *h_src_dentry; -+ struct inode *h_inode, *inode, *delegated; -+ struct super_block *sb; -+ struct file *h_file; -+ -+ plink = 0; -+ h_inode = NULL; -+ sb = src_dentry->d_sb; -+ inode = src_dentry->d_inode; -+ if (au_ibstart(inode) <= a->bdst) -+ h_inode = au_h_iptr(inode, a->bdst); -+ if (!h_inode || !h_inode->i_nlink) { -+ /* copyup src_dentry as the name of dentry. */ -+ bend = au_dbend(dentry); -+ if (bend < a->bsrc) -+ au_set_dbend(dentry, a->bsrc); -+ au_set_h_dptr(dentry, a->bsrc, -+ dget(au_h_dptr(src_dentry, a->bsrc))); -+ dget(a->h_path.dentry); -+ au_set_h_dptr(dentry, a->bdst, NULL); -+ AuDbg("temporary d_inode...\n"); -+ spin_lock(&dentry->d_lock); -+ dentry->d_inode = src_dentry->d_inode; /* tmp */ -+ spin_unlock(&dentry->d_lock); -+ h_file = au_h_open_pre(dentry, a->bsrc, /*force_wr*/0); -+ if (IS_ERR(h_file)) -+ err = PTR_ERR(h_file); -+ else { -+ struct au_cp_generic cpg = { -+ .dentry = dentry, -+ .bdst = a->bdst, -+ .bsrc = -1, -+ .len = -1, -+ .pin = &a->pin, -+ .flags = AuCpup_KEEPLINO -+ }; -+ err = au_sio_cpup_simple(&cpg); -+ au_h_open_post(dentry, a->bsrc, h_file); -+ if (!err) { -+ dput(a->h_path.dentry); -+ a->h_path.dentry = au_h_dptr(dentry, a->bdst); -+ } else -+ au_set_h_dptr(dentry, a->bdst, -+ a->h_path.dentry); -+ } -+ spin_lock(&dentry->d_lock); -+ dentry->d_inode = NULL; /* restore */ -+ spin_unlock(&dentry->d_lock); -+ AuDbg("temporary d_inode...done\n"); -+ au_set_h_dptr(dentry, a->bsrc, NULL); -+ au_set_dbend(dentry, bend); -+ } else { -+ /* the inode of src_dentry already exists on a.bdst branch */ -+ h_src_dentry = d_find_alias(h_inode); -+ if (!h_src_dentry && au_plink_test(inode)) { -+ plink = 1; -+ h_src_dentry = au_plink_lkup(inode, a->bdst); -+ err = PTR_ERR(h_src_dentry); -+ if (IS_ERR(h_src_dentry)) -+ goto out; -+ -+ if (unlikely(!h_src_dentry->d_inode)) { -+ dput(h_src_dentry); -+ h_src_dentry = NULL; -+ } -+ -+ } -+ if (h_src_dentry) { -+ delegated = NULL; -+ err = vfsub_link(h_src_dentry, au_pinned_h_dir(&a->pin), -+ &a->h_path, &delegated); -+ if (unlikely(err == -EWOULDBLOCK)) { -+ pr_warn("cannot retry for NFSv4 delegation" -+ " for an internal link\n"); -+ iput(delegated); -+ } -+ dput(h_src_dentry); -+ } else { -+ AuIOErr("no dentry found for hi%lu on b%d\n", -+ h_inode->i_ino, a->bdst); -+ err = -EIO; -+ } -+ } -+ -+ if (!err && !plink) -+ au_plink_append(inode, a->bdst, a->h_path.dentry); -+ -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+int aufs_link(struct dentry *src_dentry, struct inode *dir, -+ struct dentry *dentry) -+{ -+ int err, rerr; -+ struct au_dtime dt; -+ struct au_link_args *a; -+ struct dentry *wh_dentry, *h_src_dentry; -+ struct inode *inode, *delegated; -+ struct super_block *sb; -+ struct au_wr_dir_args wr_dir_args = { -+ /* .force_btgt = -1, */ -+ .flags = AuWrDir_ADD_ENTRY -+ }; -+ -+ IMustLock(dir); -+ inode = src_dentry->d_inode; -+ IMustLock(inode); -+ -+ err = -ENOMEM; -+ a = kzalloc(sizeof(*a), GFP_NOFS); -+ if (unlikely(!a)) -+ goto out; -+ -+ a->parent = dentry->d_parent; /* dir inode is locked */ -+ err = aufs_read_and_write_lock2(dentry, src_dentry, -+ AuLock_NOPLM | AuLock_GEN); -+ if (unlikely(err)) -+ goto out_kfree; -+ err = au_d_linkable(src_dentry); -+ if (unlikely(err)) -+ goto out_unlock; -+ err = au_d_may_add(dentry); -+ if (unlikely(err)) -+ goto out_unlock; -+ -+ a->src_parent = dget_parent(src_dentry); -+ wr_dir_args.force_btgt = au_ibstart(inode); -+ -+ di_write_lock_parent(a->parent); -+ wr_dir_args.force_btgt = au_wbr(dentry, wr_dir_args.force_btgt); -+ wh_dentry = lock_hdir_lkup_wh(dentry, &dt, src_dentry, &a->pin, -+ &wr_dir_args); -+ err = PTR_ERR(wh_dentry); -+ if (IS_ERR(wh_dentry)) -+ goto out_parent; -+ -+ err = 0; -+ sb = dentry->d_sb; -+ a->bdst = au_dbstart(dentry); -+ a->h_path.dentry = au_h_dptr(dentry, a->bdst); -+ a->h_path.mnt = au_sbr_mnt(sb, a->bdst); -+ a->bsrc = au_ibstart(inode); -+ h_src_dentry = au_h_d_alias(src_dentry, a->bsrc); -+ if (!h_src_dentry && au_di(src_dentry)->di_tmpfile) -+ h_src_dentry = dget(au_hi_wh(inode, a->bsrc)); -+ if (!h_src_dentry) { -+ a->bsrc = au_dbstart(src_dentry); -+ h_src_dentry = au_h_d_alias(src_dentry, a->bsrc); -+ AuDebugOn(!h_src_dentry); -+ } else if (IS_ERR(h_src_dentry)) { -+ err = PTR_ERR(h_src_dentry); -+ goto out_parent; -+ } -+ -+ if (au_opt_test(au_mntflags(sb), PLINK)) { -+ if (a->bdst < a->bsrc -+ /* && h_src_dentry->d_sb != a->h_path.dentry->d_sb */) -+ err = au_cpup_or_link(src_dentry, dentry, a); -+ else { -+ delegated = NULL; -+ err = vfsub_link(h_src_dentry, au_pinned_h_dir(&a->pin), -+ &a->h_path, &delegated); -+ if (unlikely(err == -EWOULDBLOCK)) { -+ pr_warn("cannot retry for NFSv4 delegation" -+ " for an internal link\n"); -+ iput(delegated); -+ } -+ } -+ dput(h_src_dentry); -+ } else { -+ /* -+ * copyup src_dentry to the branch we process, -+ * and then link(2) to it. -+ */ -+ dput(h_src_dentry); -+ if (a->bdst < a->bsrc -+ /* && h_src_dentry->d_sb != a->h_path.dentry->d_sb */) { -+ au_unpin(&a->pin); -+ di_write_unlock(a->parent); -+ err = au_cpup_before_link(src_dentry, a); -+ di_write_lock_parent(a->parent); -+ if (!err) -+ err = au_pin(&a->pin, dentry, a->bdst, -+ au_opt_udba(sb), -+ AuPin_DI_LOCKED | AuPin_MNT_WRITE); -+ if (unlikely(err)) -+ goto out_wh; -+ } -+ if (!err) { -+ h_src_dentry = au_h_dptr(src_dentry, a->bdst); -+ err = -ENOENT; -+ if (h_src_dentry && h_src_dentry->d_inode) { -+ delegated = NULL; -+ err = vfsub_link(h_src_dentry, -+ au_pinned_h_dir(&a->pin), -+ &a->h_path, &delegated); -+ if (unlikely(err == -EWOULDBLOCK)) { -+ pr_warn("cannot retry" -+ " for NFSv4 delegation" -+ " for an internal link\n"); -+ iput(delegated); -+ } -+ } -+ } -+ } -+ if (unlikely(err)) -+ goto out_unpin; -+ -+ if (wh_dentry) { -+ a->h_path.dentry = wh_dentry; -+ err = au_wh_unlink_dentry(au_pinned_h_dir(&a->pin), &a->h_path, -+ dentry); -+ if (unlikely(err)) -+ goto out_revert; -+ } -+ -+ au_dir_ts(dir, a->bdst); -+ dir->i_version++; -+ inc_nlink(inode); -+ inode->i_ctime = dir->i_ctime; -+ d_instantiate(dentry, au_igrab(inode)); -+ if (d_unhashed(a->h_path.dentry)) -+ /* some filesystem calls d_drop() */ -+ d_drop(dentry); -+ /* some filesystems consume an inode even hardlink */ -+ au_fhsm_wrote(sb, a->bdst, /*force*/0); -+ goto out_unpin; /* success */ -+ -+out_revert: -+ /* no delegation since it is just created */ -+ rerr = vfsub_unlink(au_pinned_h_dir(&a->pin), &a->h_path, -+ /*delegated*/NULL, /*force*/0); -+ if (unlikely(rerr)) { -+ AuIOErr("%pd reverting failed(%d, %d)\n", dentry, err, rerr); -+ err = -EIO; -+ } -+ au_dtime_revert(&dt); -+out_unpin: -+ au_unpin(&a->pin); -+out_wh: -+ dput(wh_dentry); -+out_parent: -+ di_write_unlock(a->parent); -+ dput(a->src_parent); -+out_unlock: -+ if (unlikely(err)) { -+ au_update_dbstart(dentry); -+ d_drop(dentry); -+ } -+ aufs_read_and_write_unlock2(dentry, src_dentry); -+out_kfree: -+ kfree(a); -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+int aufs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode) -+{ -+ int err, rerr; -+ aufs_bindex_t bindex; -+ unsigned char diropq; -+ struct path h_path; -+ struct dentry *wh_dentry, *parent, *opq_dentry; -+ struct mutex *h_mtx; -+ struct super_block *sb; -+ struct { -+ struct au_pin pin; -+ struct au_dtime dt; -+ } *a; /* reduce the stack usage */ -+ struct au_wr_dir_args wr_dir_args = { -+ .force_btgt = -1, -+ .flags = AuWrDir_ADD_ENTRY | AuWrDir_ISDIR -+ }; -+ -+ IMustLock(dir); -+ -+ err = -ENOMEM; -+ a = kmalloc(sizeof(*a), GFP_NOFS); -+ if (unlikely(!a)) -+ goto out; -+ -+ err = aufs_read_lock(dentry, AuLock_DW | AuLock_GEN); -+ if (unlikely(err)) -+ goto out_free; -+ err = au_d_may_add(dentry); -+ if (unlikely(err)) -+ goto out_unlock; -+ -+ parent = dentry->d_parent; /* dir inode is locked */ -+ di_write_lock_parent(parent); -+ wh_dentry = lock_hdir_lkup_wh(dentry, &a->dt, /*src_dentry*/NULL, -+ &a->pin, &wr_dir_args); -+ err = PTR_ERR(wh_dentry); -+ if (IS_ERR(wh_dentry)) -+ goto out_parent; -+ -+ sb = dentry->d_sb; -+ bindex = au_dbstart(dentry); -+ h_path.dentry = au_h_dptr(dentry, bindex); -+ h_path.mnt = au_sbr_mnt(sb, bindex); -+ err = vfsub_mkdir(au_pinned_h_dir(&a->pin), &h_path, mode); -+ if (unlikely(err)) -+ goto out_unpin; -+ -+ /* make the dir opaque */ -+ diropq = 0; -+ h_mtx = &h_path.dentry->d_inode->i_mutex; -+ if (wh_dentry -+ || au_opt_test(au_mntflags(sb), ALWAYS_DIROPQ)) { -+ mutex_lock_nested(h_mtx, AuLsc_I_CHILD); -+ opq_dentry = au_diropq_create(dentry, bindex); -+ mutex_unlock(h_mtx); -+ err = PTR_ERR(opq_dentry); -+ if (IS_ERR(opq_dentry)) -+ goto out_dir; -+ dput(opq_dentry); -+ diropq = 1; -+ } -+ -+ err = epilog(dir, bindex, wh_dentry, dentry); -+ if (!err) { -+ inc_nlink(dir); -+ goto out_unpin; /* success */ -+ } -+ -+ /* revert */ -+ if (diropq) { -+ AuLabel(revert opq); -+ mutex_lock_nested(h_mtx, AuLsc_I_CHILD); -+ rerr = au_diropq_remove(dentry, bindex); -+ mutex_unlock(h_mtx); -+ if (rerr) { -+ AuIOErr("%pd reverting diropq failed(%d, %d)\n", -+ dentry, err, rerr); -+ err = -EIO; -+ } -+ } -+ -+out_dir: -+ AuLabel(revert dir); -+ rerr = vfsub_rmdir(au_pinned_h_dir(&a->pin), &h_path); -+ if (rerr) { -+ AuIOErr("%pd reverting dir failed(%d, %d)\n", -+ dentry, err, rerr); -+ err = -EIO; -+ } -+ au_dtime_revert(&a->dt); -+out_unpin: -+ au_unpin(&a->pin); -+ dput(wh_dentry); -+out_parent: -+ di_write_unlock(parent); -+out_unlock: -+ if (unlikely(err)) { -+ au_update_dbstart(dentry); -+ d_drop(dentry); -+ } -+ aufs_read_unlock(dentry, AuLock_DW); -+out_free: -+ kfree(a); -+out: -+ return err; -+} -diff --git a/fs/aufs/i_op_del.c b/fs/aufs/i_op_del.c -new file mode 100644 -index 0000000..b4dd686 ---- /dev/null -+++ b/fs/aufs/i_op_del.c -@@ -0,0 +1,506 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * inode operations (del entry) -+ */ -+ -+#include "aufs.h" -+ -+/* -+ * decide if a new whiteout for @dentry is necessary or not. -+ * when it is necessary, prepare the parent dir for the upper branch whose -+ * branch index is @bcpup for creation. the actual creation of the whiteout will -+ * be done by caller. -+ * return value: -+ * 0: wh is unnecessary -+ * plus: wh is necessary -+ * minus: error -+ */ -+int au_wr_dir_need_wh(struct dentry *dentry, int isdir, aufs_bindex_t *bcpup) -+{ -+ int need_wh, err; -+ aufs_bindex_t bstart; -+ struct super_block *sb; -+ -+ sb = dentry->d_sb; -+ bstart = au_dbstart(dentry); -+ if (*bcpup < 0) { -+ *bcpup = bstart; -+ if (au_test_ro(sb, bstart, dentry->d_inode)) { -+ err = AuWbrCopyup(au_sbi(sb), dentry); -+ *bcpup = err; -+ if (unlikely(err < 0)) -+ goto out; -+ } -+ } else -+ AuDebugOn(bstart < *bcpup -+ || au_test_ro(sb, *bcpup, dentry->d_inode)); -+ AuDbg("bcpup %d, bstart %d\n", *bcpup, bstart); -+ -+ if (*bcpup != bstart) { -+ err = au_cpup_dirs(dentry, *bcpup); -+ if (unlikely(err)) -+ goto out; -+ need_wh = 1; -+ } else { -+ struct au_dinfo *dinfo, *tmp; -+ -+ need_wh = -ENOMEM; -+ dinfo = au_di(dentry); -+ tmp = au_di_alloc(sb, AuLsc_DI_TMP); -+ if (tmp) { -+ au_di_cp(tmp, dinfo); -+ au_di_swap(tmp, dinfo); -+ /* returns the number of positive dentries */ -+ need_wh = au_lkup_dentry(dentry, bstart + 1, /*type*/0); -+ au_di_swap(tmp, dinfo); -+ au_rw_write_unlock(&tmp->di_rwsem); -+ au_di_free(tmp); -+ } -+ } -+ AuDbg("need_wh %d\n", need_wh); -+ err = need_wh; -+ -+out: -+ return err; -+} -+ -+/* -+ * simple tests for the del-entry operations. -+ * following the checks in vfs, plus the parent-child relationship. -+ */ -+int au_may_del(struct dentry *dentry, aufs_bindex_t bindex, -+ struct dentry *h_parent, int isdir) -+{ -+ int err; -+ umode_t h_mode; -+ struct dentry *h_dentry, *h_latest; -+ struct inode *h_inode; -+ -+ h_dentry = au_h_dptr(dentry, bindex); -+ h_inode = h_dentry->d_inode; -+ if (dentry->d_inode) { -+ err = -ENOENT; -+ if (unlikely(!h_inode || !h_inode->i_nlink)) -+ goto out; -+ -+ h_mode = h_inode->i_mode; -+ if (!isdir) { -+ err = -EISDIR; -+ if (unlikely(S_ISDIR(h_mode))) -+ goto out; -+ } else if (unlikely(!S_ISDIR(h_mode))) { -+ err = -ENOTDIR; -+ goto out; -+ } -+ } else { -+ /* rename(2) case */ -+ err = -EIO; -+ if (unlikely(h_inode)) -+ goto out; -+ } -+ -+ err = -ENOENT; -+ /* expected parent dir is locked */ -+ if (unlikely(h_parent != h_dentry->d_parent)) -+ goto out; -+ err = 0; -+ -+ /* -+ * rmdir a dir may break the consistency on some filesystem. -+ * let's try heavy test. -+ */ -+ err = -EACCES; -+ if (unlikely(!au_opt_test(au_mntflags(dentry->d_sb), DIRPERM1) -+ && au_test_h_perm(h_parent->d_inode, -+ MAY_EXEC | MAY_WRITE))) -+ goto out; -+ -+ h_latest = au_sio_lkup_one(&dentry->d_name, h_parent); -+ err = -EIO; -+ if (IS_ERR(h_latest)) -+ goto out; -+ if (h_latest == h_dentry) -+ err = 0; -+ dput(h_latest); -+ -+out: -+ return err; -+} -+ -+/* -+ * decide the branch where we operate for @dentry. the branch index will be set -+ * @rbcpup. after diciding it, 'pin' it and store the timestamps of the parent -+ * dir for reverting. -+ * when a new whiteout is necessary, create it. -+ */ -+static struct dentry* -+lock_hdir_create_wh(struct dentry *dentry, int isdir, aufs_bindex_t *rbcpup, -+ struct au_dtime *dt, struct au_pin *pin) -+{ -+ struct dentry *wh_dentry; -+ struct super_block *sb; -+ struct path h_path; -+ int err, need_wh; -+ unsigned int udba; -+ aufs_bindex_t bcpup; -+ -+ need_wh = au_wr_dir_need_wh(dentry, isdir, rbcpup); -+ wh_dentry = ERR_PTR(need_wh); -+ if (unlikely(need_wh < 0)) -+ goto out; -+ -+ sb = dentry->d_sb; -+ udba = au_opt_udba(sb); -+ bcpup = *rbcpup; -+ err = au_pin(pin, dentry, bcpup, udba, -+ AuPin_DI_LOCKED | AuPin_MNT_WRITE); -+ wh_dentry = ERR_PTR(err); -+ if (unlikely(err)) -+ goto out; -+ -+ h_path.dentry = au_pinned_h_parent(pin); -+ if (udba != AuOpt_UDBA_NONE -+ && au_dbstart(dentry) == bcpup) { -+ err = au_may_del(dentry, bcpup, h_path.dentry, isdir); -+ wh_dentry = ERR_PTR(err); -+ if (unlikely(err)) -+ goto out_unpin; -+ } -+ -+ h_path.mnt = au_sbr_mnt(sb, bcpup); -+ au_dtime_store(dt, au_pinned_parent(pin), &h_path); -+ wh_dentry = NULL; -+ if (!need_wh) -+ goto out; /* success, no need to create whiteout */ -+ -+ wh_dentry = au_wh_create(dentry, bcpup, h_path.dentry); -+ if (IS_ERR(wh_dentry)) -+ goto out_unpin; -+ -+ /* returns with the parent is locked and wh_dentry is dget-ed */ -+ goto out; /* success */ -+ -+out_unpin: -+ au_unpin(pin); -+out: -+ return wh_dentry; -+} -+ -+/* -+ * when removing a dir, rename it to a unique temporary whiteout-ed name first -+ * in order to be revertible and save time for removing many child whiteouts -+ * under the dir. -+ * returns 1 when there are too many child whiteout and caller should remove -+ * them asynchronously. returns 0 when the number of children is enough small to -+ * remove now or the branch fs is a remote fs. -+ * otherwise return an error. -+ */ -+static int renwh_and_rmdir(struct dentry *dentry, aufs_bindex_t bindex, -+ struct au_nhash *whlist, struct inode *dir) -+{ -+ int rmdir_later, err, dirwh; -+ struct dentry *h_dentry; -+ struct super_block *sb; -+ -+ sb = dentry->d_sb; -+ SiMustAnyLock(sb); -+ h_dentry = au_h_dptr(dentry, bindex); -+ err = au_whtmp_ren(h_dentry, au_sbr(sb, bindex)); -+ if (unlikely(err)) -+ goto out; -+ -+ /* stop monitoring */ -+ au_hn_free(au_hi(dentry->d_inode, bindex)); -+ -+ if (!au_test_fs_remote(h_dentry->d_sb)) { -+ dirwh = au_sbi(sb)->si_dirwh; -+ rmdir_later = (dirwh <= 1); -+ if (!rmdir_later) -+ rmdir_later = au_nhash_test_longer_wh(whlist, bindex, -+ dirwh); -+ if (rmdir_later) -+ return rmdir_later; -+ } -+ -+ err = au_whtmp_rmdir(dir, bindex, h_dentry, whlist); -+ if (unlikely(err)) { -+ AuIOErr("rmdir %pd, b%d failed, %d. ignored\n", -+ h_dentry, bindex, err); -+ err = 0; -+ } -+ -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+/* -+ * final procedure for deleting a entry. -+ * maintain dentry and iattr. -+ */ -+static void epilog(struct inode *dir, struct dentry *dentry, -+ aufs_bindex_t bindex) -+{ -+ struct inode *inode; -+ -+ inode = dentry->d_inode; -+ d_drop(dentry); -+ inode->i_ctime = dir->i_ctime; -+ -+ au_dir_ts(dir, bindex); -+ dir->i_version++; -+} -+ -+/* -+ * when an error happened, remove the created whiteout and revert everything. -+ */ -+static int do_revert(int err, struct inode *dir, aufs_bindex_t bindex, -+ aufs_bindex_t bwh, struct dentry *wh_dentry, -+ struct dentry *dentry, struct au_dtime *dt) -+{ -+ int rerr; -+ struct path h_path = { -+ .dentry = wh_dentry, -+ .mnt = au_sbr_mnt(dir->i_sb, bindex) -+ }; -+ -+ rerr = au_wh_unlink_dentry(au_h_iptr(dir, bindex), &h_path, dentry); -+ if (!rerr) { -+ au_set_dbwh(dentry, bwh); -+ au_dtime_revert(dt); -+ return 0; -+ } -+ -+ AuIOErr("%pd reverting whiteout failed(%d, %d)\n", dentry, err, rerr); -+ return -EIO; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+int aufs_unlink(struct inode *dir, struct dentry *dentry) -+{ -+ int err; -+ aufs_bindex_t bwh, bindex, bstart; -+ struct inode *inode, *h_dir, *delegated; -+ struct dentry *parent, *wh_dentry; -+ /* to reuduce stack size */ -+ struct { -+ struct au_dtime dt; -+ struct au_pin pin; -+ struct path h_path; -+ } *a; -+ -+ IMustLock(dir); -+ -+ err = -ENOMEM; -+ a = kmalloc(sizeof(*a), GFP_NOFS); -+ if (unlikely(!a)) -+ goto out; -+ -+ err = aufs_read_lock(dentry, AuLock_DW | AuLock_GEN); -+ if (unlikely(err)) -+ goto out_free; -+ err = au_d_hashed_positive(dentry); -+ if (unlikely(err)) -+ goto out_unlock; -+ inode = dentry->d_inode; -+ IMustLock(inode); -+ err = -EISDIR; -+ if (unlikely(d_is_dir(dentry))) -+ goto out_unlock; /* possible? */ -+ -+ bstart = au_dbstart(dentry); -+ bwh = au_dbwh(dentry); -+ bindex = -1; -+ parent = dentry->d_parent; /* dir inode is locked */ -+ di_write_lock_parent(parent); -+ wh_dentry = lock_hdir_create_wh(dentry, /*isdir*/0, &bindex, &a->dt, -+ &a->pin); -+ err = PTR_ERR(wh_dentry); -+ if (IS_ERR(wh_dentry)) -+ goto out_parent; -+ -+ a->h_path.mnt = au_sbr_mnt(dentry->d_sb, bstart); -+ a->h_path.dentry = au_h_dptr(dentry, bstart); -+ dget(a->h_path.dentry); -+ if (bindex == bstart) { -+ h_dir = au_pinned_h_dir(&a->pin); -+ delegated = NULL; -+ err = vfsub_unlink(h_dir, &a->h_path, &delegated, /*force*/0); -+ if (unlikely(err == -EWOULDBLOCK)) { -+ pr_warn("cannot retry for NFSv4 delegation" -+ " for an internal unlink\n"); -+ iput(delegated); -+ } -+ } else { -+ /* dir inode is locked */ -+ h_dir = wh_dentry->d_parent->d_inode; -+ IMustLock(h_dir); -+ err = 0; -+ } -+ -+ if (!err) { -+ vfsub_drop_nlink(inode); -+ epilog(dir, dentry, bindex); -+ -+ /* update target timestamps */ -+ if (bindex == bstart) { -+ vfsub_update_h_iattr(&a->h_path, /*did*/NULL); -+ /*ignore*/ -+ inode->i_ctime = a->h_path.dentry->d_inode->i_ctime; -+ } else -+ /* todo: this timestamp may be reverted later */ -+ inode->i_ctime = h_dir->i_ctime; -+ goto out_unpin; /* success */ -+ } -+ -+ /* revert */ -+ if (wh_dentry) { -+ int rerr; -+ -+ rerr = do_revert(err, dir, bindex, bwh, wh_dentry, dentry, -+ &a->dt); -+ if (rerr) -+ err = rerr; -+ } -+ -+out_unpin: -+ au_unpin(&a->pin); -+ dput(wh_dentry); -+ dput(a->h_path.dentry); -+out_parent: -+ di_write_unlock(parent); -+out_unlock: -+ aufs_read_unlock(dentry, AuLock_DW); -+out_free: -+ kfree(a); -+out: -+ return err; -+} -+ -+int aufs_rmdir(struct inode *dir, struct dentry *dentry) -+{ -+ int err, rmdir_later; -+ aufs_bindex_t bwh, bindex, bstart; -+ struct inode *inode; -+ struct dentry *parent, *wh_dentry, *h_dentry; -+ struct au_whtmp_rmdir *args; -+ /* to reuduce stack size */ -+ struct { -+ struct au_dtime dt; -+ struct au_pin pin; -+ } *a; -+ -+ IMustLock(dir); -+ -+ err = -ENOMEM; -+ a = kmalloc(sizeof(*a), GFP_NOFS); -+ if (unlikely(!a)) -+ goto out; -+ -+ err = aufs_read_lock(dentry, AuLock_DW | AuLock_FLUSH | AuLock_GEN); -+ if (unlikely(err)) -+ goto out_free; -+ err = au_alive_dir(dentry); -+ if (unlikely(err)) -+ goto out_unlock; -+ inode = dentry->d_inode; -+ IMustLock(inode); -+ err = -ENOTDIR; -+ if (unlikely(!d_is_dir(dentry))) -+ goto out_unlock; /* possible? */ -+ -+ err = -ENOMEM; -+ args = au_whtmp_rmdir_alloc(dir->i_sb, GFP_NOFS); -+ if (unlikely(!args)) -+ goto out_unlock; -+ -+ parent = dentry->d_parent; /* dir inode is locked */ -+ di_write_lock_parent(parent); -+ err = au_test_empty(dentry, &args->whlist); -+ if (unlikely(err)) -+ goto out_parent; -+ -+ bstart = au_dbstart(dentry); -+ bwh = au_dbwh(dentry); -+ bindex = -1; -+ wh_dentry = lock_hdir_create_wh(dentry, /*isdir*/1, &bindex, &a->dt, -+ &a->pin); -+ err = PTR_ERR(wh_dentry); -+ if (IS_ERR(wh_dentry)) -+ goto out_parent; -+ -+ h_dentry = au_h_dptr(dentry, bstart); -+ dget(h_dentry); -+ rmdir_later = 0; -+ if (bindex == bstart) { -+ err = renwh_and_rmdir(dentry, bstart, &args->whlist, dir); -+ if (err > 0) { -+ rmdir_later = err; -+ err = 0; -+ } -+ } else { -+ /* stop monitoring */ -+ au_hn_free(au_hi(inode, bstart)); -+ -+ /* dir inode is locked */ -+ IMustLock(wh_dentry->d_parent->d_inode); -+ err = 0; -+ } -+ -+ if (!err) { -+ vfsub_dead_dir(inode); -+ au_set_dbdiropq(dentry, -1); -+ epilog(dir, dentry, bindex); -+ -+ if (rmdir_later) { -+ au_whtmp_kick_rmdir(dir, bstart, h_dentry, args); -+ args = NULL; -+ } -+ -+ goto out_unpin; /* success */ -+ } -+ -+ /* revert */ -+ AuLabel(revert); -+ if (wh_dentry) { -+ int rerr; -+ -+ rerr = do_revert(err, dir, bindex, bwh, wh_dentry, dentry, -+ &a->dt); -+ if (rerr) -+ err = rerr; -+ } -+ -+out_unpin: -+ au_unpin(&a->pin); -+ dput(wh_dentry); -+ dput(h_dentry); -+out_parent: -+ di_write_unlock(parent); -+ if (args) -+ au_whtmp_rmdir_free(args); -+out_unlock: -+ aufs_read_unlock(dentry, AuLock_DW); -+out_free: -+ kfree(a); -+out: -+ AuTraceErr(err); -+ return err; -+} -diff --git a/fs/aufs/i_op_ren.c b/fs/aufs/i_op_ren.c -new file mode 100644 -index 0000000..6ce2ed6 ---- /dev/null -+++ b/fs/aufs/i_op_ren.c -@@ -0,0 +1,1013 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * inode operation (rename entry) -+ * todo: this is crazy monster -+ */ -+ -+#include "aufs.h" -+ -+enum { AuSRC, AuDST, AuSrcDst }; -+enum { AuPARENT, AuCHILD, AuParentChild }; -+ -+#define AuRen_ISDIR 1 -+#define AuRen_ISSAMEDIR (1 << 1) -+#define AuRen_WHSRC (1 << 2) -+#define AuRen_WHDST (1 << 3) -+#define AuRen_MNT_WRITE (1 << 4) -+#define AuRen_DT_DSTDIR (1 << 5) -+#define AuRen_DIROPQ (1 << 6) -+#define au_ftest_ren(flags, name) ((flags) & AuRen_##name) -+#define au_fset_ren(flags, name) \ -+ do { (flags) |= AuRen_##name; } while (0) -+#define au_fclr_ren(flags, name) \ -+ do { (flags) &= ~AuRen_##name; } while (0) -+ -+struct au_ren_args { -+ struct { -+ struct dentry *dentry, *h_dentry, *parent, *h_parent, -+ *wh_dentry; -+ struct inode *dir, *inode; -+ struct au_hinode *hdir; -+ struct au_dtime dt[AuParentChild]; -+ aufs_bindex_t bstart; -+ } sd[AuSrcDst]; -+ -+#define src_dentry sd[AuSRC].dentry -+#define src_dir sd[AuSRC].dir -+#define src_inode sd[AuSRC].inode -+#define src_h_dentry sd[AuSRC].h_dentry -+#define src_parent sd[AuSRC].parent -+#define src_h_parent sd[AuSRC].h_parent -+#define src_wh_dentry sd[AuSRC].wh_dentry -+#define src_hdir sd[AuSRC].hdir -+#define src_h_dir sd[AuSRC].hdir->hi_inode -+#define src_dt sd[AuSRC].dt -+#define src_bstart sd[AuSRC].bstart -+ -+#define dst_dentry sd[AuDST].dentry -+#define dst_dir sd[AuDST].dir -+#define dst_inode sd[AuDST].inode -+#define dst_h_dentry sd[AuDST].h_dentry -+#define dst_parent sd[AuDST].parent -+#define dst_h_parent sd[AuDST].h_parent -+#define dst_wh_dentry sd[AuDST].wh_dentry -+#define dst_hdir sd[AuDST].hdir -+#define dst_h_dir sd[AuDST].hdir->hi_inode -+#define dst_dt sd[AuDST].dt -+#define dst_bstart sd[AuDST].bstart -+ -+ struct dentry *h_trap; -+ struct au_branch *br; -+ struct au_hinode *src_hinode; -+ struct path h_path; -+ struct au_nhash whlist; -+ aufs_bindex_t btgt, src_bwh, src_bdiropq; -+ -+ unsigned int flags; -+ -+ struct au_whtmp_rmdir *thargs; -+ struct dentry *h_dst; -+}; -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * functions for reverting. -+ * when an error happened in a single rename systemcall, we should revert -+ * everything as if nothing happened. -+ * we don't need to revert the copied-up/down the parent dir since they are -+ * harmless. -+ */ -+ -+#define RevertFailure(fmt, ...) do { \ -+ AuIOErr("revert failure: " fmt " (%d, %d)\n", \ -+ ##__VA_ARGS__, err, rerr); \ -+ err = -EIO; \ -+} while (0) -+ -+static void au_ren_rev_diropq(int err, struct au_ren_args *a) -+{ -+ int rerr; -+ -+ au_hn_imtx_lock_nested(a->src_hinode, AuLsc_I_CHILD); -+ rerr = au_diropq_remove(a->src_dentry, a->btgt); -+ au_hn_imtx_unlock(a->src_hinode); -+ au_set_dbdiropq(a->src_dentry, a->src_bdiropq); -+ if (rerr) -+ RevertFailure("remove diropq %pd", a->src_dentry); -+} -+ -+static void au_ren_rev_rename(int err, struct au_ren_args *a) -+{ -+ int rerr; -+ struct inode *delegated; -+ -+ a->h_path.dentry = vfsub_lkup_one(&a->src_dentry->d_name, -+ a->src_h_parent); -+ rerr = PTR_ERR(a->h_path.dentry); -+ if (IS_ERR(a->h_path.dentry)) { -+ RevertFailure("lkup one %pd", a->src_dentry); -+ return; -+ } -+ -+ delegated = NULL; -+ rerr = vfsub_rename(a->dst_h_dir, -+ au_h_dptr(a->src_dentry, a->btgt), -+ a->src_h_dir, &a->h_path, &delegated); -+ if (unlikely(rerr == -EWOULDBLOCK)) { -+ pr_warn("cannot retry for NFSv4 delegation" -+ " for an internal rename\n"); -+ iput(delegated); -+ } -+ d_drop(a->h_path.dentry); -+ dput(a->h_path.dentry); -+ /* au_set_h_dptr(a->src_dentry, a->btgt, NULL); */ -+ if (rerr) -+ RevertFailure("rename %pd", a->src_dentry); -+} -+ -+static void au_ren_rev_whtmp(int err, struct au_ren_args *a) -+{ -+ int rerr; -+ struct inode *delegated; -+ -+ a->h_path.dentry = vfsub_lkup_one(&a->dst_dentry->d_name, -+ a->dst_h_parent); -+ rerr = PTR_ERR(a->h_path.dentry); -+ if (IS_ERR(a->h_path.dentry)) { -+ RevertFailure("lkup one %pd", a->dst_dentry); -+ return; -+ } -+ if (a->h_path.dentry->d_inode) { -+ d_drop(a->h_path.dentry); -+ dput(a->h_path.dentry); -+ return; -+ } -+ -+ delegated = NULL; -+ rerr = vfsub_rename(a->dst_h_dir, a->h_dst, a->dst_h_dir, &a->h_path, -+ &delegated); -+ if (unlikely(rerr == -EWOULDBLOCK)) { -+ pr_warn("cannot retry for NFSv4 delegation" -+ " for an internal rename\n"); -+ iput(delegated); -+ } -+ d_drop(a->h_path.dentry); -+ dput(a->h_path.dentry); -+ if (!rerr) -+ au_set_h_dptr(a->dst_dentry, a->btgt, dget(a->h_dst)); -+ else -+ RevertFailure("rename %pd", a->h_dst); -+} -+ -+static void au_ren_rev_whsrc(int err, struct au_ren_args *a) -+{ -+ int rerr; -+ -+ a->h_path.dentry = a->src_wh_dentry; -+ rerr = au_wh_unlink_dentry(a->src_h_dir, &a->h_path, a->src_dentry); -+ au_set_dbwh(a->src_dentry, a->src_bwh); -+ if (rerr) -+ RevertFailure("unlink %pd", a->src_wh_dentry); -+} -+#undef RevertFailure -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * when we have to copyup the renaming entry, do it with the rename-target name -+ * in order to minimize the cost (the later actual rename is unnecessary). -+ * otherwise rename it on the target branch. -+ */ -+static int au_ren_or_cpup(struct au_ren_args *a) -+{ -+ int err; -+ struct dentry *d; -+ struct inode *delegated; -+ -+ d = a->src_dentry; -+ if (au_dbstart(d) == a->btgt) { -+ a->h_path.dentry = a->dst_h_dentry; -+ if (au_ftest_ren(a->flags, DIROPQ) -+ && au_dbdiropq(d) == a->btgt) -+ au_fclr_ren(a->flags, DIROPQ); -+ AuDebugOn(au_dbstart(d) != a->btgt); -+ delegated = NULL; -+ err = vfsub_rename(a->src_h_dir, au_h_dptr(d, a->btgt), -+ a->dst_h_dir, &a->h_path, &delegated); -+ if (unlikely(err == -EWOULDBLOCK)) { -+ pr_warn("cannot retry for NFSv4 delegation" -+ " for an internal rename\n"); -+ iput(delegated); -+ } -+ } else -+ BUG(); -+ -+ if (!err && a->h_dst) -+ /* it will be set to dinfo later */ -+ dget(a->h_dst); -+ -+ return err; -+} -+ -+/* cf. aufs_rmdir() */ -+static int au_ren_del_whtmp(struct au_ren_args *a) -+{ -+ int err; -+ struct inode *dir; -+ -+ dir = a->dst_dir; -+ SiMustAnyLock(dir->i_sb); -+ if (!au_nhash_test_longer_wh(&a->whlist, a->btgt, -+ au_sbi(dir->i_sb)->si_dirwh) -+ || au_test_fs_remote(a->h_dst->d_sb)) { -+ err = au_whtmp_rmdir(dir, a->btgt, a->h_dst, &a->whlist); -+ if (unlikely(err)) -+ pr_warn("failed removing whtmp dir %pd (%d), " -+ "ignored.\n", a->h_dst, err); -+ } else { -+ au_nhash_wh_free(&a->thargs->whlist); -+ a->thargs->whlist = a->whlist; -+ a->whlist.nh_num = 0; -+ au_whtmp_kick_rmdir(dir, a->btgt, a->h_dst, a->thargs); -+ dput(a->h_dst); -+ a->thargs = NULL; -+ } -+ -+ return 0; -+} -+ -+/* make it 'opaque' dir. */ -+static int au_ren_diropq(struct au_ren_args *a) -+{ -+ int err; -+ struct dentry *diropq; -+ -+ err = 0; -+ a->src_bdiropq = au_dbdiropq(a->src_dentry); -+ a->src_hinode = au_hi(a->src_inode, a->btgt); -+ au_hn_imtx_lock_nested(a->src_hinode, AuLsc_I_CHILD); -+ diropq = au_diropq_create(a->src_dentry, a->btgt); -+ au_hn_imtx_unlock(a->src_hinode); -+ if (IS_ERR(diropq)) -+ err = PTR_ERR(diropq); -+ else -+ dput(diropq); -+ -+ return err; -+} -+ -+static int do_rename(struct au_ren_args *a) -+{ -+ int err; -+ struct dentry *d, *h_d; -+ -+ /* prepare workqueue args for asynchronous rmdir */ -+ h_d = a->dst_h_dentry; -+ if (au_ftest_ren(a->flags, ISDIR) && h_d->d_inode) { -+ err = -ENOMEM; -+ a->thargs = au_whtmp_rmdir_alloc(a->src_dentry->d_sb, GFP_NOFS); -+ if (unlikely(!a->thargs)) -+ goto out; -+ a->h_dst = dget(h_d); -+ } -+ -+ /* create whiteout for src_dentry */ -+ if (au_ftest_ren(a->flags, WHSRC)) { -+ a->src_bwh = au_dbwh(a->src_dentry); -+ AuDebugOn(a->src_bwh >= 0); -+ a->src_wh_dentry -+ = au_wh_create(a->src_dentry, a->btgt, a->src_h_parent); -+ err = PTR_ERR(a->src_wh_dentry); -+ if (IS_ERR(a->src_wh_dentry)) -+ goto out_thargs; -+ } -+ -+ /* lookup whiteout for dentry */ -+ if (au_ftest_ren(a->flags, WHDST)) { -+ h_d = au_wh_lkup(a->dst_h_parent, &a->dst_dentry->d_name, -+ a->br); -+ err = PTR_ERR(h_d); -+ if (IS_ERR(h_d)) -+ goto out_whsrc; -+ if (!h_d->d_inode) -+ dput(h_d); -+ else -+ a->dst_wh_dentry = h_d; -+ } -+ -+ /* rename dentry to tmpwh */ -+ if (a->thargs) { -+ err = au_whtmp_ren(a->dst_h_dentry, a->br); -+ if (unlikely(err)) -+ goto out_whdst; -+ -+ d = a->dst_dentry; -+ au_set_h_dptr(d, a->btgt, NULL); -+ err = au_lkup_neg(d, a->btgt, /*wh*/0); -+ if (unlikely(err)) -+ goto out_whtmp; -+ a->dst_h_dentry = au_h_dptr(d, a->btgt); -+ } -+ -+ BUG_ON(a->dst_h_dentry->d_inode && a->src_bstart != a->btgt); -+ -+ /* rename by vfs_rename or cpup */ -+ d = a->dst_dentry; -+ if (au_ftest_ren(a->flags, ISDIR) -+ && (a->dst_wh_dentry -+ || au_dbdiropq(d) == a->btgt -+ /* hide the lower to keep xino */ -+ || a->btgt < au_dbend(d) -+ || au_opt_test(au_mntflags(d->d_sb), ALWAYS_DIROPQ))) -+ au_fset_ren(a->flags, DIROPQ); -+ err = au_ren_or_cpup(a); -+ if (unlikely(err)) -+ /* leave the copied-up one */ -+ goto out_whtmp; -+ -+ /* make dir opaque */ -+ if (au_ftest_ren(a->flags, DIROPQ)) { -+ err = au_ren_diropq(a); -+ if (unlikely(err)) -+ goto out_rename; -+ } -+ -+ /* update target timestamps */ -+ AuDebugOn(au_dbstart(a->src_dentry) != a->btgt); -+ a->h_path.dentry = au_h_dptr(a->src_dentry, a->btgt); -+ vfsub_update_h_iattr(&a->h_path, /*did*/NULL); /*ignore*/ -+ a->src_inode->i_ctime = a->h_path.dentry->d_inode->i_ctime; -+ -+ /* remove whiteout for dentry */ -+ if (a->dst_wh_dentry) { -+ a->h_path.dentry = a->dst_wh_dentry; -+ err = au_wh_unlink_dentry(a->dst_h_dir, &a->h_path, -+ a->dst_dentry); -+ if (unlikely(err)) -+ goto out_diropq; -+ } -+ -+ /* remove whtmp */ -+ if (a->thargs) -+ au_ren_del_whtmp(a); /* ignore this error */ -+ -+ au_fhsm_wrote(a->src_dentry->d_sb, a->btgt, /*force*/0); -+ err = 0; -+ goto out_success; -+ -+out_diropq: -+ if (au_ftest_ren(a->flags, DIROPQ)) -+ au_ren_rev_diropq(err, a); -+out_rename: -+ au_ren_rev_rename(err, a); -+ dput(a->h_dst); -+out_whtmp: -+ if (a->thargs) -+ au_ren_rev_whtmp(err, a); -+out_whdst: -+ dput(a->dst_wh_dentry); -+ a->dst_wh_dentry = NULL; -+out_whsrc: -+ if (a->src_wh_dentry) -+ au_ren_rev_whsrc(err, a); -+out_success: -+ dput(a->src_wh_dentry); -+ dput(a->dst_wh_dentry); -+out_thargs: -+ if (a->thargs) { -+ dput(a->h_dst); -+ au_whtmp_rmdir_free(a->thargs); -+ a->thargs = NULL; -+ } -+out: -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * test if @dentry dir can be rename destination or not. -+ * success means, it is a logically empty dir. -+ */ -+static int may_rename_dstdir(struct dentry *dentry, struct au_nhash *whlist) -+{ -+ return au_test_empty(dentry, whlist); -+} -+ -+/* -+ * test if @dentry dir can be rename source or not. -+ * if it can, return 0 and @children is filled. -+ * success means, -+ * - it is a logically empty dir. -+ * - or, it exists on writable branch and has no children including whiteouts -+ * on the lower branch. -+ */ -+static int may_rename_srcdir(struct dentry *dentry, aufs_bindex_t btgt) -+{ -+ int err; -+ unsigned int rdhash; -+ aufs_bindex_t bstart; -+ -+ bstart = au_dbstart(dentry); -+ if (bstart != btgt) { -+ struct au_nhash whlist; -+ -+ SiMustAnyLock(dentry->d_sb); -+ rdhash = au_sbi(dentry->d_sb)->si_rdhash; -+ if (!rdhash) -+ rdhash = au_rdhash_est(au_dir_size(/*file*/NULL, -+ dentry)); -+ err = au_nhash_alloc(&whlist, rdhash, GFP_NOFS); -+ if (unlikely(err)) -+ goto out; -+ err = au_test_empty(dentry, &whlist); -+ au_nhash_wh_free(&whlist); -+ goto out; -+ } -+ -+ if (bstart == au_dbtaildir(dentry)) -+ return 0; /* success */ -+ -+ err = au_test_empty_lower(dentry); -+ -+out: -+ if (err == -ENOTEMPTY) { -+ AuWarn1("renaming dir who has child(ren) on multiple branches," -+ " is not supported\n"); -+ err = -EXDEV; -+ } -+ return err; -+} -+ -+/* side effect: sets whlist and h_dentry */ -+static int au_ren_may_dir(struct au_ren_args *a) -+{ -+ int err; -+ unsigned int rdhash; -+ struct dentry *d; -+ -+ d = a->dst_dentry; -+ SiMustAnyLock(d->d_sb); -+ -+ err = 0; -+ if (au_ftest_ren(a->flags, ISDIR) && a->dst_inode) { -+ rdhash = au_sbi(d->d_sb)->si_rdhash; -+ if (!rdhash) -+ rdhash = au_rdhash_est(au_dir_size(/*file*/NULL, d)); -+ err = au_nhash_alloc(&a->whlist, rdhash, GFP_NOFS); -+ if (unlikely(err)) -+ goto out; -+ -+ au_set_dbstart(d, a->dst_bstart); -+ err = may_rename_dstdir(d, &a->whlist); -+ au_set_dbstart(d, a->btgt); -+ } -+ a->dst_h_dentry = au_h_dptr(d, au_dbstart(d)); -+ if (unlikely(err)) -+ goto out; -+ -+ d = a->src_dentry; -+ a->src_h_dentry = au_h_dptr(d, au_dbstart(d)); -+ if (au_ftest_ren(a->flags, ISDIR)) { -+ err = may_rename_srcdir(d, a->btgt); -+ if (unlikely(err)) { -+ au_nhash_wh_free(&a->whlist); -+ a->whlist.nh_num = 0; -+ } -+ } -+out: -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * simple tests for rename. -+ * following the checks in vfs, plus the parent-child relationship. -+ */ -+static int au_may_ren(struct au_ren_args *a) -+{ -+ int err, isdir; -+ struct inode *h_inode; -+ -+ if (a->src_bstart == a->btgt) { -+ err = au_may_del(a->src_dentry, a->btgt, a->src_h_parent, -+ au_ftest_ren(a->flags, ISDIR)); -+ if (unlikely(err)) -+ goto out; -+ err = -EINVAL; -+ if (unlikely(a->src_h_dentry == a->h_trap)) -+ goto out; -+ } -+ -+ err = 0; -+ if (a->dst_bstart != a->btgt) -+ goto out; -+ -+ err = -ENOTEMPTY; -+ if (unlikely(a->dst_h_dentry == a->h_trap)) -+ goto out; -+ -+ err = -EIO; -+ h_inode = a->dst_h_dentry->d_inode; -+ isdir = !!au_ftest_ren(a->flags, ISDIR); -+ if (!a->dst_dentry->d_inode) { -+ if (unlikely(h_inode)) -+ goto out; -+ err = au_may_add(a->dst_dentry, a->btgt, a->dst_h_parent, -+ isdir); -+ } else { -+ if (unlikely(!h_inode || !h_inode->i_nlink)) -+ goto out; -+ err = au_may_del(a->dst_dentry, a->btgt, a->dst_h_parent, -+ isdir); -+ if (unlikely(err)) -+ goto out; -+ } -+ -+out: -+ if (unlikely(err == -ENOENT || err == -EEXIST)) -+ err = -EIO; -+ AuTraceErr(err); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * locking order -+ * (VFS) -+ * - src_dir and dir by lock_rename() -+ * - inode if exitsts -+ * (aufs) -+ * - lock all -+ * + src_dentry and dentry by aufs_read_and_write_lock2() which calls, -+ * + si_read_lock -+ * + di_write_lock2_child() -+ * + di_write_lock_child() -+ * + ii_write_lock_child() -+ * + di_write_lock_child2() -+ * + ii_write_lock_child2() -+ * + src_parent and parent -+ * + di_write_lock_parent() -+ * + ii_write_lock_parent() -+ * + di_write_lock_parent2() -+ * + ii_write_lock_parent2() -+ * + lower src_dir and dir by vfsub_lock_rename() -+ * + verify the every relationships between child and parent. if any -+ * of them failed, unlock all and return -EBUSY. -+ */ -+static void au_ren_unlock(struct au_ren_args *a) -+{ -+ vfsub_unlock_rename(a->src_h_parent, a->src_hdir, -+ a->dst_h_parent, a->dst_hdir); -+ if (au_ftest_ren(a->flags, MNT_WRITE)) -+ vfsub_mnt_drop_write(au_br_mnt(a->br)); -+} -+ -+static int au_ren_lock(struct au_ren_args *a) -+{ -+ int err; -+ unsigned int udba; -+ -+ err = 0; -+ a->src_h_parent = au_h_dptr(a->src_parent, a->btgt); -+ a->src_hdir = au_hi(a->src_dir, a->btgt); -+ a->dst_h_parent = au_h_dptr(a->dst_parent, a->btgt); -+ a->dst_hdir = au_hi(a->dst_dir, a->btgt); -+ -+ err = vfsub_mnt_want_write(au_br_mnt(a->br)); -+ if (unlikely(err)) -+ goto out; -+ au_fset_ren(a->flags, MNT_WRITE); -+ a->h_trap = vfsub_lock_rename(a->src_h_parent, a->src_hdir, -+ a->dst_h_parent, a->dst_hdir); -+ udba = au_opt_udba(a->src_dentry->d_sb); -+ if (unlikely(a->src_hdir->hi_inode != a->src_h_parent->d_inode -+ || a->dst_hdir->hi_inode != a->dst_h_parent->d_inode)) -+ err = au_busy_or_stale(); -+ if (!err && au_dbstart(a->src_dentry) == a->btgt) -+ err = au_h_verify(a->src_h_dentry, udba, -+ a->src_h_parent->d_inode, a->src_h_parent, -+ a->br); -+ if (!err && au_dbstart(a->dst_dentry) == a->btgt) -+ err = au_h_verify(a->dst_h_dentry, udba, -+ a->dst_h_parent->d_inode, a->dst_h_parent, -+ a->br); -+ if (!err) -+ goto out; /* success */ -+ -+ err = au_busy_or_stale(); -+ au_ren_unlock(a); -+ -+out: -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static void au_ren_refresh_dir(struct au_ren_args *a) -+{ -+ struct inode *dir; -+ -+ dir = a->dst_dir; -+ dir->i_version++; -+ if (au_ftest_ren(a->flags, ISDIR)) { -+ /* is this updating defined in POSIX? */ -+ au_cpup_attr_timesizes(a->src_inode); -+ au_cpup_attr_nlink(dir, /*force*/1); -+ } -+ -+ au_dir_ts(dir, a->btgt); -+ -+ if (au_ftest_ren(a->flags, ISSAMEDIR)) -+ return; -+ -+ dir = a->src_dir; -+ dir->i_version++; -+ if (au_ftest_ren(a->flags, ISDIR)) -+ au_cpup_attr_nlink(dir, /*force*/1); -+ au_dir_ts(dir, a->btgt); -+} -+ -+static void au_ren_refresh(struct au_ren_args *a) -+{ -+ aufs_bindex_t bend, bindex; -+ struct dentry *d, *h_d; -+ struct inode *i, *h_i; -+ struct super_block *sb; -+ -+ d = a->dst_dentry; -+ d_drop(d); -+ if (a->h_dst) -+ /* already dget-ed by au_ren_or_cpup() */ -+ au_set_h_dptr(d, a->btgt, a->h_dst); -+ -+ i = a->dst_inode; -+ if (i) { -+ if (!au_ftest_ren(a->flags, ISDIR)) -+ vfsub_drop_nlink(i); -+ else { -+ vfsub_dead_dir(i); -+ au_cpup_attr_timesizes(i); -+ } -+ au_update_dbrange(d, /*do_put_zero*/1); -+ } else { -+ bend = a->btgt; -+ for (bindex = au_dbstart(d); bindex < bend; bindex++) -+ au_set_h_dptr(d, bindex, NULL); -+ bend = au_dbend(d); -+ for (bindex = a->btgt + 1; bindex <= bend; bindex++) -+ au_set_h_dptr(d, bindex, NULL); -+ au_update_dbrange(d, /*do_put_zero*/0); -+ } -+ -+ d = a->src_dentry; -+ au_set_dbwh(d, -1); -+ bend = au_dbend(d); -+ for (bindex = a->btgt + 1; bindex <= bend; bindex++) { -+ h_d = au_h_dptr(d, bindex); -+ if (h_d) -+ au_set_h_dptr(d, bindex, NULL); -+ } -+ au_set_dbend(d, a->btgt); -+ -+ sb = d->d_sb; -+ i = a->src_inode; -+ if (au_opt_test(au_mntflags(sb), PLINK) && au_plink_test(i)) -+ return; /* success */ -+ -+ bend = au_ibend(i); -+ for (bindex = a->btgt + 1; bindex <= bend; bindex++) { -+ h_i = au_h_iptr(i, bindex); -+ if (h_i) { -+ au_xino_write(sb, bindex, h_i->i_ino, /*ino*/0); -+ /* ignore this error */ -+ au_set_h_iptr(i, bindex, NULL, 0); -+ } -+ } -+ au_set_ibend(i, a->btgt); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* mainly for link(2) and rename(2) */ -+int au_wbr(struct dentry *dentry, aufs_bindex_t btgt) -+{ -+ aufs_bindex_t bdiropq, bwh; -+ struct dentry *parent; -+ struct au_branch *br; -+ -+ parent = dentry->d_parent; -+ IMustLock(parent->d_inode); /* dir is locked */ -+ -+ bdiropq = au_dbdiropq(parent); -+ bwh = au_dbwh(dentry); -+ br = au_sbr(dentry->d_sb, btgt); -+ if (au_br_rdonly(br) -+ || (0 <= bdiropq && bdiropq < btgt) -+ || (0 <= bwh && bwh < btgt)) -+ btgt = -1; -+ -+ AuDbg("btgt %d\n", btgt); -+ return btgt; -+} -+ -+/* sets src_bstart, dst_bstart and btgt */ -+static int au_ren_wbr(struct au_ren_args *a) -+{ -+ int err; -+ struct au_wr_dir_args wr_dir_args = { -+ /* .force_btgt = -1, */ -+ .flags = AuWrDir_ADD_ENTRY -+ }; -+ -+ a->src_bstart = au_dbstart(a->src_dentry); -+ a->dst_bstart = au_dbstart(a->dst_dentry); -+ if (au_ftest_ren(a->flags, ISDIR)) -+ au_fset_wrdir(wr_dir_args.flags, ISDIR); -+ wr_dir_args.force_btgt = a->src_bstart; -+ if (a->dst_inode && a->dst_bstart < a->src_bstart) -+ wr_dir_args.force_btgt = a->dst_bstart; -+ wr_dir_args.force_btgt = au_wbr(a->dst_dentry, wr_dir_args.force_btgt); -+ err = au_wr_dir(a->dst_dentry, a->src_dentry, &wr_dir_args); -+ a->btgt = err; -+ -+ return err; -+} -+ -+static void au_ren_dt(struct au_ren_args *a) -+{ -+ a->h_path.dentry = a->src_h_parent; -+ au_dtime_store(a->src_dt + AuPARENT, a->src_parent, &a->h_path); -+ if (!au_ftest_ren(a->flags, ISSAMEDIR)) { -+ a->h_path.dentry = a->dst_h_parent; -+ au_dtime_store(a->dst_dt + AuPARENT, a->dst_parent, &a->h_path); -+ } -+ -+ au_fclr_ren(a->flags, DT_DSTDIR); -+ if (!au_ftest_ren(a->flags, ISDIR)) -+ return; -+ -+ a->h_path.dentry = a->src_h_dentry; -+ au_dtime_store(a->src_dt + AuCHILD, a->src_dentry, &a->h_path); -+ if (a->dst_h_dentry->d_inode) { -+ au_fset_ren(a->flags, DT_DSTDIR); -+ a->h_path.dentry = a->dst_h_dentry; -+ au_dtime_store(a->dst_dt + AuCHILD, a->dst_dentry, &a->h_path); -+ } -+} -+ -+static void au_ren_rev_dt(int err, struct au_ren_args *a) -+{ -+ struct dentry *h_d; -+ struct mutex *h_mtx; -+ -+ au_dtime_revert(a->src_dt + AuPARENT); -+ if (!au_ftest_ren(a->flags, ISSAMEDIR)) -+ au_dtime_revert(a->dst_dt + AuPARENT); -+ -+ if (au_ftest_ren(a->flags, ISDIR) && err != -EIO) { -+ h_d = a->src_dt[AuCHILD].dt_h_path.dentry; -+ h_mtx = &h_d->d_inode->i_mutex; -+ mutex_lock_nested(h_mtx, AuLsc_I_CHILD); -+ au_dtime_revert(a->src_dt + AuCHILD); -+ mutex_unlock(h_mtx); -+ -+ if (au_ftest_ren(a->flags, DT_DSTDIR)) { -+ h_d = a->dst_dt[AuCHILD].dt_h_path.dentry; -+ h_mtx = &h_d->d_inode->i_mutex; -+ mutex_lock_nested(h_mtx, AuLsc_I_CHILD); -+ au_dtime_revert(a->dst_dt + AuCHILD); -+ mutex_unlock(h_mtx); -+ } -+ } -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+int aufs_rename(struct inode *_src_dir, struct dentry *_src_dentry, -+ struct inode *_dst_dir, struct dentry *_dst_dentry) -+{ -+ int err, flags; -+ /* reduce stack space */ -+ struct au_ren_args *a; -+ -+ AuDbg("%pd, %pd\n", _src_dentry, _dst_dentry); -+ IMustLock(_src_dir); -+ IMustLock(_dst_dir); -+ -+ err = -ENOMEM; -+ BUILD_BUG_ON(sizeof(*a) > PAGE_SIZE); -+ a = kzalloc(sizeof(*a), GFP_NOFS); -+ if (unlikely(!a)) -+ goto out; -+ -+ a->src_dir = _src_dir; -+ a->src_dentry = _src_dentry; -+ a->src_inode = a->src_dentry->d_inode; -+ a->src_parent = a->src_dentry->d_parent; /* dir inode is locked */ -+ a->dst_dir = _dst_dir; -+ a->dst_dentry = _dst_dentry; -+ a->dst_inode = a->dst_dentry->d_inode; -+ a->dst_parent = a->dst_dentry->d_parent; /* dir inode is locked */ -+ if (a->dst_inode) { -+ IMustLock(a->dst_inode); -+ au_igrab(a->dst_inode); -+ } -+ -+ err = -ENOTDIR; -+ flags = AuLock_FLUSH | AuLock_NOPLM | AuLock_GEN; -+ if (d_is_dir(a->src_dentry)) { -+ au_fset_ren(a->flags, ISDIR); -+ if (unlikely(d_is_positive(a->dst_dentry) -+ && !d_is_dir(a->dst_dentry))) -+ goto out_free; -+ flags |= AuLock_DIRS; -+ } -+ err = aufs_read_and_write_lock2(a->dst_dentry, a->src_dentry, flags); -+ if (unlikely(err)) -+ goto out_free; -+ -+ err = au_d_hashed_positive(a->src_dentry); -+ if (unlikely(err)) -+ goto out_unlock; -+ err = -ENOENT; -+ if (a->dst_inode) { -+ /* -+ * If it is a dir, VFS unhash dst_dentry before this -+ * function. It means we cannot rely upon d_unhashed(). -+ */ -+ if (unlikely(!a->dst_inode->i_nlink)) -+ goto out_unlock; -+ if (!S_ISDIR(a->dst_inode->i_mode)) { -+ err = au_d_hashed_positive(a->dst_dentry); -+ if (unlikely(err)) -+ goto out_unlock; -+ } else if (unlikely(IS_DEADDIR(a->dst_inode))) -+ goto out_unlock; -+ } else if (unlikely(d_unhashed(a->dst_dentry))) -+ goto out_unlock; -+ -+ /* -+ * is it possible? -+ * yes, it happened (in linux-3.3-rcN) but I don't know why. -+ * there may exist a problem somewhere else. -+ */ -+ err = -EINVAL; -+ if (unlikely(a->dst_parent->d_inode == a->src_dentry->d_inode)) -+ goto out_unlock; -+ -+ au_fset_ren(a->flags, ISSAMEDIR); /* temporary */ -+ di_write_lock_parent(a->dst_parent); -+ -+ /* which branch we process */ -+ err = au_ren_wbr(a); -+ if (unlikely(err < 0)) -+ goto out_parent; -+ a->br = au_sbr(a->dst_dentry->d_sb, a->btgt); -+ a->h_path.mnt = au_br_mnt(a->br); -+ -+ /* are they available to be renamed */ -+ err = au_ren_may_dir(a); -+ if (unlikely(err)) -+ goto out_children; -+ -+ /* prepare the writable parent dir on the same branch */ -+ if (a->dst_bstart == a->btgt) { -+ au_fset_ren(a->flags, WHDST); -+ } else { -+ err = au_cpup_dirs(a->dst_dentry, a->btgt); -+ if (unlikely(err)) -+ goto out_children; -+ } -+ -+ if (a->src_dir != a->dst_dir) { -+ /* -+ * this temporary unlock is safe, -+ * because both dir->i_mutex are locked. -+ */ -+ di_write_unlock(a->dst_parent); -+ di_write_lock_parent(a->src_parent); -+ err = au_wr_dir_need_wh(a->src_dentry, -+ au_ftest_ren(a->flags, ISDIR), -+ &a->btgt); -+ di_write_unlock(a->src_parent); -+ di_write_lock2_parent(a->src_parent, a->dst_parent, /*isdir*/1); -+ au_fclr_ren(a->flags, ISSAMEDIR); -+ } else -+ err = au_wr_dir_need_wh(a->src_dentry, -+ au_ftest_ren(a->flags, ISDIR), -+ &a->btgt); -+ if (unlikely(err < 0)) -+ goto out_children; -+ if (err) -+ au_fset_ren(a->flags, WHSRC); -+ -+ /* cpup src */ -+ if (a->src_bstart != a->btgt) { -+ struct au_pin pin; -+ -+ err = au_pin(&pin, a->src_dentry, a->btgt, -+ au_opt_udba(a->src_dentry->d_sb), -+ AuPin_DI_LOCKED | AuPin_MNT_WRITE); -+ if (!err) { -+ struct au_cp_generic cpg = { -+ .dentry = a->src_dentry, -+ .bdst = a->btgt, -+ .bsrc = a->src_bstart, -+ .len = -1, -+ .pin = &pin, -+ .flags = AuCpup_DTIME | AuCpup_HOPEN -+ }; -+ AuDebugOn(au_dbstart(a->src_dentry) != a->src_bstart); -+ err = au_sio_cpup_simple(&cpg); -+ au_unpin(&pin); -+ } -+ if (unlikely(err)) -+ goto out_children; -+ a->src_bstart = a->btgt; -+ a->src_h_dentry = au_h_dptr(a->src_dentry, a->btgt); -+ au_fset_ren(a->flags, WHSRC); -+ } -+ -+ /* lock them all */ -+ err = au_ren_lock(a); -+ if (unlikely(err)) -+ /* leave the copied-up one */ -+ goto out_children; -+ -+ if (!au_opt_test(au_mntflags(a->dst_dir->i_sb), UDBA_NONE)) -+ err = au_may_ren(a); -+ else if (unlikely(a->dst_dentry->d_name.len > AUFS_MAX_NAMELEN)) -+ err = -ENAMETOOLONG; -+ if (unlikely(err)) -+ goto out_hdir; -+ -+ /* store timestamps to be revertible */ -+ au_ren_dt(a); -+ -+ /* here we go */ -+ err = do_rename(a); -+ if (unlikely(err)) -+ goto out_dt; -+ -+ /* update dir attributes */ -+ au_ren_refresh_dir(a); -+ -+ /* dput/iput all lower dentries */ -+ au_ren_refresh(a); -+ -+ goto out_hdir; /* success */ -+ -+out_dt: -+ au_ren_rev_dt(err, a); -+out_hdir: -+ au_ren_unlock(a); -+out_children: -+ au_nhash_wh_free(&a->whlist); -+ if (err && a->dst_inode && a->dst_bstart != a->btgt) { -+ AuDbg("bstart %d, btgt %d\n", a->dst_bstart, a->btgt); -+ au_set_h_dptr(a->dst_dentry, a->btgt, NULL); -+ au_set_dbstart(a->dst_dentry, a->dst_bstart); -+ } -+out_parent: -+ if (!err) -+ d_move(a->src_dentry, a->dst_dentry); -+ else { -+ au_update_dbstart(a->dst_dentry); -+ if (!a->dst_inode) -+ d_drop(a->dst_dentry); -+ } -+ if (au_ftest_ren(a->flags, ISSAMEDIR)) -+ di_write_unlock(a->dst_parent); -+ else -+ di_write_unlock2(a->src_parent, a->dst_parent); -+out_unlock: -+ aufs_read_and_write_unlock2(a->dst_dentry, a->src_dentry); -+out_free: -+ iput(a->dst_inode); -+ if (a->thargs) -+ au_whtmp_rmdir_free(a->thargs); -+ kfree(a); -+out: -+ AuTraceErr(err); -+ return err; -+} -diff --git a/fs/aufs/iinfo.c b/fs/aufs/iinfo.c -new file mode 100644 -index 0000000..f889aba ---- /dev/null -+++ b/fs/aufs/iinfo.c -@@ -0,0 +1,277 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * inode private data -+ */ -+ -+#include "aufs.h" -+ -+struct inode *au_h_iptr(struct inode *inode, aufs_bindex_t bindex) -+{ -+ struct inode *h_inode; -+ -+ IiMustAnyLock(inode); -+ -+ h_inode = au_ii(inode)->ii_hinode[0 + bindex].hi_inode; -+ AuDebugOn(h_inode && atomic_read(&h_inode->i_count) <= 0); -+ return h_inode; -+} -+ -+/* todo: hard/soft set? */ -+void au_hiput(struct au_hinode *hinode) -+{ -+ au_hn_free(hinode); -+ dput(hinode->hi_whdentry); -+ iput(hinode->hi_inode); -+} -+ -+unsigned int au_hi_flags(struct inode *inode, int isdir) -+{ -+ unsigned int flags; -+ const unsigned int mnt_flags = au_mntflags(inode->i_sb); -+ -+ flags = 0; -+ if (au_opt_test(mnt_flags, XINO)) -+ au_fset_hi(flags, XINO); -+ if (isdir && au_opt_test(mnt_flags, UDBA_HNOTIFY)) -+ au_fset_hi(flags, HNOTIFY); -+ return flags; -+} -+ -+void au_set_h_iptr(struct inode *inode, aufs_bindex_t bindex, -+ struct inode *h_inode, unsigned int flags) -+{ -+ struct au_hinode *hinode; -+ struct inode *hi; -+ struct au_iinfo *iinfo = au_ii(inode); -+ -+ IiMustWriteLock(inode); -+ -+ hinode = iinfo->ii_hinode + bindex; -+ hi = hinode->hi_inode; -+ AuDebugOn(h_inode && atomic_read(&h_inode->i_count) <= 0); -+ -+ if (hi) -+ au_hiput(hinode); -+ hinode->hi_inode = h_inode; -+ if (h_inode) { -+ int err; -+ struct super_block *sb = inode->i_sb; -+ struct au_branch *br; -+ -+ AuDebugOn(inode->i_mode -+ && (h_inode->i_mode & S_IFMT) -+ != (inode->i_mode & S_IFMT)); -+ if (bindex == iinfo->ii_bstart) -+ au_cpup_igen(inode, h_inode); -+ br = au_sbr(sb, bindex); -+ hinode->hi_id = br->br_id; -+ if (au_ftest_hi(flags, XINO)) { -+ err = au_xino_write(sb, bindex, h_inode->i_ino, -+ inode->i_ino); -+ if (unlikely(err)) -+ AuIOErr1("failed au_xino_write() %d\n", err); -+ } -+ -+ if (au_ftest_hi(flags, HNOTIFY) -+ && au_br_hnotifyable(br->br_perm)) { -+ err = au_hn_alloc(hinode, inode); -+ if (unlikely(err)) -+ AuIOErr1("au_hn_alloc() %d\n", err); -+ } -+ } -+} -+ -+void au_set_hi_wh(struct inode *inode, aufs_bindex_t bindex, -+ struct dentry *h_wh) -+{ -+ struct au_hinode *hinode; -+ -+ IiMustWriteLock(inode); -+ -+ hinode = au_ii(inode)->ii_hinode + bindex; -+ AuDebugOn(hinode->hi_whdentry); -+ hinode->hi_whdentry = h_wh; -+} -+ -+void au_update_iigen(struct inode *inode, int half) -+{ -+ struct au_iinfo *iinfo; -+ struct au_iigen *iigen; -+ unsigned int sigen; -+ -+ sigen = au_sigen(inode->i_sb); -+ iinfo = au_ii(inode); -+ iigen = &iinfo->ii_generation; -+ spin_lock(&iigen->ig_spin); -+ iigen->ig_generation = sigen; -+ if (half) -+ au_ig_fset(iigen->ig_flags, HALF_REFRESHED); -+ else -+ au_ig_fclr(iigen->ig_flags, HALF_REFRESHED); -+ spin_unlock(&iigen->ig_spin); -+} -+ -+/* it may be called at remount time, too */ -+void au_update_ibrange(struct inode *inode, int do_put_zero) -+{ -+ struct au_iinfo *iinfo; -+ aufs_bindex_t bindex, bend; -+ -+ iinfo = au_ii(inode); -+ if (!iinfo) -+ return; -+ -+ IiMustWriteLock(inode); -+ -+ if (do_put_zero && iinfo->ii_bstart >= 0) { -+ for (bindex = iinfo->ii_bstart; bindex <= iinfo->ii_bend; -+ bindex++) { -+ struct inode *h_i; -+ -+ h_i = iinfo->ii_hinode[0 + bindex].hi_inode; -+ if (h_i -+ && !h_i->i_nlink -+ && !(h_i->i_state & I_LINKABLE)) -+ au_set_h_iptr(inode, bindex, NULL, 0); -+ } -+ } -+ -+ iinfo->ii_bstart = -1; -+ iinfo->ii_bend = -1; -+ bend = au_sbend(inode->i_sb); -+ for (bindex = 0; bindex <= bend; bindex++) -+ if (iinfo->ii_hinode[0 + bindex].hi_inode) { -+ iinfo->ii_bstart = bindex; -+ break; -+ } -+ if (iinfo->ii_bstart >= 0) -+ for (bindex = bend; bindex >= iinfo->ii_bstart; bindex--) -+ if (iinfo->ii_hinode[0 + bindex].hi_inode) { -+ iinfo->ii_bend = bindex; -+ break; -+ } -+ AuDebugOn(iinfo->ii_bstart > iinfo->ii_bend); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+void au_icntnr_init_once(void *_c) -+{ -+ struct au_icntnr *c = _c; -+ struct au_iinfo *iinfo = &c->iinfo; -+ static struct lock_class_key aufs_ii; -+ -+ spin_lock_init(&iinfo->ii_generation.ig_spin); -+ au_rw_init(&iinfo->ii_rwsem); -+ au_rw_class(&iinfo->ii_rwsem, &aufs_ii); -+ inode_init_once(&c->vfs_inode); -+} -+ -+int au_iinfo_init(struct inode *inode) -+{ -+ struct au_iinfo *iinfo; -+ struct super_block *sb; -+ int nbr, i; -+ -+ sb = inode->i_sb; -+ iinfo = &(container_of(inode, struct au_icntnr, vfs_inode)->iinfo); -+ nbr = au_sbend(sb) + 1; -+ if (unlikely(nbr <= 0)) -+ nbr = 1; -+ iinfo->ii_hinode = kcalloc(nbr, sizeof(*iinfo->ii_hinode), GFP_NOFS); -+ if (iinfo->ii_hinode) { -+ au_ninodes_inc(sb); -+ for (i = 0; i < nbr; i++) -+ iinfo->ii_hinode[i].hi_id = -1; -+ -+ iinfo->ii_generation.ig_generation = au_sigen(sb); -+ iinfo->ii_bstart = -1; -+ iinfo->ii_bend = -1; -+ iinfo->ii_vdir = NULL; -+ return 0; -+ } -+ return -ENOMEM; -+} -+ -+int au_ii_realloc(struct au_iinfo *iinfo, int nbr) -+{ -+ int err, sz; -+ struct au_hinode *hip; -+ -+ AuRwMustWriteLock(&iinfo->ii_rwsem); -+ -+ err = -ENOMEM; -+ sz = sizeof(*hip) * (iinfo->ii_bend + 1); -+ if (!sz) -+ sz = sizeof(*hip); -+ hip = au_kzrealloc(iinfo->ii_hinode, sz, sizeof(*hip) * nbr, GFP_NOFS); -+ if (hip) { -+ iinfo->ii_hinode = hip; -+ err = 0; -+ } -+ -+ return err; -+} -+ -+void au_iinfo_fin(struct inode *inode) -+{ -+ struct au_iinfo *iinfo; -+ struct au_hinode *hi; -+ struct super_block *sb; -+ aufs_bindex_t bindex, bend; -+ const unsigned char unlinked = !inode->i_nlink; -+ -+ iinfo = au_ii(inode); -+ /* bad_inode case */ -+ if (!iinfo) -+ return; -+ -+ sb = inode->i_sb; -+ au_ninodes_dec(sb); -+ if (si_pid_test(sb)) -+ au_xino_delete_inode(inode, unlinked); -+ else { -+ /* -+ * it is safe to hide the dependency between sbinfo and -+ * sb->s_umount. -+ */ -+ lockdep_off(); -+ si_noflush_read_lock(sb); -+ au_xino_delete_inode(inode, unlinked); -+ si_read_unlock(sb); -+ lockdep_on(); -+ } -+ -+ if (iinfo->ii_vdir) -+ au_vdir_free(iinfo->ii_vdir); -+ -+ bindex = iinfo->ii_bstart; -+ if (bindex >= 0) { -+ hi = iinfo->ii_hinode + bindex; -+ bend = iinfo->ii_bend; -+ while (bindex++ <= bend) { -+ if (hi->hi_inode) -+ au_hiput(hi); -+ hi++; -+ } -+ } -+ kfree(iinfo->ii_hinode); -+ iinfo->ii_hinode = NULL; -+ AuRwDestroy(&iinfo->ii_rwsem); -+} -diff --git a/fs/aufs/inode.c b/fs/aufs/inode.c -new file mode 100644 -index 0000000..75ec2e5 ---- /dev/null -+++ b/fs/aufs/inode.c -@@ -0,0 +1,522 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * inode functions -+ */ -+ -+#include "aufs.h" -+ -+struct inode *au_igrab(struct inode *inode) -+{ -+ if (inode) { -+ AuDebugOn(!atomic_read(&inode->i_count)); -+ ihold(inode); -+ } -+ return inode; -+} -+ -+static void au_refresh_hinode_attr(struct inode *inode, int do_version) -+{ -+ au_cpup_attr_all(inode, /*force*/0); -+ au_update_iigen(inode, /*half*/1); -+ if (do_version) -+ inode->i_version++; -+} -+ -+static int au_ii_refresh(struct inode *inode, int *update) -+{ -+ int err, e; -+ umode_t type; -+ aufs_bindex_t bindex, new_bindex; -+ struct super_block *sb; -+ struct au_iinfo *iinfo; -+ struct au_hinode *p, *q, tmp; -+ -+ IiMustWriteLock(inode); -+ -+ *update = 0; -+ sb = inode->i_sb; -+ type = inode->i_mode & S_IFMT; -+ iinfo = au_ii(inode); -+ err = au_ii_realloc(iinfo, au_sbend(sb) + 1); -+ if (unlikely(err)) -+ goto out; -+ -+ AuDebugOn(iinfo->ii_bstart < 0); -+ p = iinfo->ii_hinode + iinfo->ii_bstart; -+ for (bindex = iinfo->ii_bstart; bindex <= iinfo->ii_bend; -+ bindex++, p++) { -+ if (!p->hi_inode) -+ continue; -+ -+ AuDebugOn(type != (p->hi_inode->i_mode & S_IFMT)); -+ new_bindex = au_br_index(sb, p->hi_id); -+ if (new_bindex == bindex) -+ continue; -+ -+ if (new_bindex < 0) { -+ *update = 1; -+ au_hiput(p); -+ p->hi_inode = NULL; -+ continue; -+ } -+ -+ if (new_bindex < iinfo->ii_bstart) -+ iinfo->ii_bstart = new_bindex; -+ if (iinfo->ii_bend < new_bindex) -+ iinfo->ii_bend = new_bindex; -+ /* swap two lower inode, and loop again */ -+ q = iinfo->ii_hinode + new_bindex; -+ tmp = *q; -+ *q = *p; -+ *p = tmp; -+ if (tmp.hi_inode) { -+ bindex--; -+ p--; -+ } -+ } -+ au_update_ibrange(inode, /*do_put_zero*/0); -+ e = au_dy_irefresh(inode); -+ if (unlikely(e && !err)) -+ err = e; -+ -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+void au_refresh_iop(struct inode *inode, int force_getattr) -+{ -+ int type; -+ struct au_sbinfo *sbi = au_sbi(inode->i_sb); -+ const struct inode_operations *iop -+ = force_getattr ? aufs_iop : sbi->si_iop_array; -+ -+ if (inode->i_op == iop) -+ return; -+ -+ switch (inode->i_mode & S_IFMT) { -+ case S_IFDIR: -+ type = AuIop_DIR; -+ break; -+ case S_IFLNK: -+ type = AuIop_SYMLINK; -+ break; -+ default: -+ type = AuIop_OTHER; -+ break; -+ } -+ -+ inode->i_op = iop + type; -+ /* unnecessary smp_wmb() */ -+} -+ -+int au_refresh_hinode_self(struct inode *inode) -+{ -+ int err, update; -+ -+ err = au_ii_refresh(inode, &update); -+ if (!err) -+ au_refresh_hinode_attr(inode, update && S_ISDIR(inode->i_mode)); -+ -+ AuTraceErr(err); -+ return err; -+} -+ -+int au_refresh_hinode(struct inode *inode, struct dentry *dentry) -+{ -+ int err, e, update; -+ unsigned int flags; -+ umode_t mode; -+ aufs_bindex_t bindex, bend; -+ unsigned char isdir; -+ struct au_hinode *p; -+ struct au_iinfo *iinfo; -+ -+ err = au_ii_refresh(inode, &update); -+ if (unlikely(err)) -+ goto out; -+ -+ update = 0; -+ iinfo = au_ii(inode); -+ p = iinfo->ii_hinode + iinfo->ii_bstart; -+ mode = (inode->i_mode & S_IFMT); -+ isdir = S_ISDIR(mode); -+ flags = au_hi_flags(inode, isdir); -+ bend = au_dbend(dentry); -+ for (bindex = au_dbstart(dentry); bindex <= bend; bindex++) { -+ struct inode *h_i; -+ struct dentry *h_d; -+ -+ h_d = au_h_dptr(dentry, bindex); -+ if (!h_d || !h_d->d_inode) -+ continue; -+ -+ AuDebugOn(mode != (h_d->d_inode->i_mode & S_IFMT)); -+ if (iinfo->ii_bstart <= bindex && bindex <= iinfo->ii_bend) { -+ h_i = au_h_iptr(inode, bindex); -+ if (h_i) { -+ if (h_i == h_d->d_inode) -+ continue; -+ err = -EIO; -+ break; -+ } -+ } -+ if (bindex < iinfo->ii_bstart) -+ iinfo->ii_bstart = bindex; -+ if (iinfo->ii_bend < bindex) -+ iinfo->ii_bend = bindex; -+ au_set_h_iptr(inode, bindex, au_igrab(h_d->d_inode), flags); -+ update = 1; -+ } -+ au_update_ibrange(inode, /*do_put_zero*/0); -+ e = au_dy_irefresh(inode); -+ if (unlikely(e && !err)) -+ err = e; -+ if (!err) -+ au_refresh_hinode_attr(inode, update && isdir); -+ -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+static int set_inode(struct inode *inode, struct dentry *dentry) -+{ -+ int err; -+ unsigned int flags; -+ umode_t mode; -+ aufs_bindex_t bindex, bstart, btail; -+ unsigned char isdir; -+ struct dentry *h_dentry; -+ struct inode *h_inode; -+ struct au_iinfo *iinfo; -+ struct inode_operations *iop; -+ -+ IiMustWriteLock(inode); -+ -+ err = 0; -+ isdir = 0; -+ iop = au_sbi(inode->i_sb)->si_iop_array; -+ bstart = au_dbstart(dentry); -+ h_inode = au_h_dptr(dentry, bstart)->d_inode; -+ mode = h_inode->i_mode; -+ switch (mode & S_IFMT) { -+ case S_IFREG: -+ btail = au_dbtail(dentry); -+ inode->i_op = iop + AuIop_OTHER; -+ inode->i_fop = &aufs_file_fop; -+ err = au_dy_iaop(inode, bstart, h_inode); -+ if (unlikely(err)) -+ goto out; -+ break; -+ case S_IFDIR: -+ isdir = 1; -+ btail = au_dbtaildir(dentry); -+ inode->i_op = iop + AuIop_DIR; -+ inode->i_fop = &aufs_dir_fop; -+ break; -+ case S_IFLNK: -+ btail = au_dbtail(dentry); -+ inode->i_op = iop + AuIop_SYMLINK; -+ break; -+ case S_IFBLK: -+ case S_IFCHR: -+ case S_IFIFO: -+ case S_IFSOCK: -+ btail = au_dbtail(dentry); -+ inode->i_op = iop + AuIop_OTHER; -+ init_special_inode(inode, mode, h_inode->i_rdev); -+ break; -+ default: -+ AuIOErr("Unknown file type 0%o\n", mode); -+ err = -EIO; -+ goto out; -+ } -+ -+ /* do not set hnotify for whiteouted dirs (SHWH mode) */ -+ flags = au_hi_flags(inode, isdir); -+ if (au_opt_test(au_mntflags(dentry->d_sb), SHWH) -+ && au_ftest_hi(flags, HNOTIFY) -+ && dentry->d_name.len > AUFS_WH_PFX_LEN -+ && !memcmp(dentry->d_name.name, AUFS_WH_PFX, AUFS_WH_PFX_LEN)) -+ au_fclr_hi(flags, HNOTIFY); -+ iinfo = au_ii(inode); -+ iinfo->ii_bstart = bstart; -+ iinfo->ii_bend = btail; -+ for (bindex = bstart; bindex <= btail; bindex++) { -+ h_dentry = au_h_dptr(dentry, bindex); -+ if (h_dentry) -+ au_set_h_iptr(inode, bindex, -+ au_igrab(h_dentry->d_inode), flags); -+ } -+ au_cpup_attr_all(inode, /*force*/1); -+ /* -+ * to force calling aufs_get_acl() every time, -+ * do not call cache_no_acl() for aufs inode. -+ */ -+ -+out: -+ return err; -+} -+ -+/* -+ * successful returns with iinfo write_locked -+ * minus: errno -+ * zero: success, matched -+ * plus: no error, but unmatched -+ */ -+static int reval_inode(struct inode *inode, struct dentry *dentry) -+{ -+ int err; -+ unsigned int gen, igflags; -+ aufs_bindex_t bindex, bend; -+ struct inode *h_inode, *h_dinode; -+ -+ /* -+ * before this function, if aufs got any iinfo lock, it must be only -+ * one, the parent dir. -+ * it can happen by UDBA and the obsoleted inode number. -+ */ -+ err = -EIO; -+ if (unlikely(inode->i_ino == parent_ino(dentry))) -+ goto out; -+ -+ err = 1; -+ ii_write_lock_new_child(inode); -+ h_dinode = au_h_dptr(dentry, au_dbstart(dentry))->d_inode; -+ bend = au_ibend(inode); -+ for (bindex = au_ibstart(inode); bindex <= bend; bindex++) { -+ h_inode = au_h_iptr(inode, bindex); -+ if (!h_inode || h_inode != h_dinode) -+ continue; -+ -+ err = 0; -+ gen = au_iigen(inode, &igflags); -+ if (gen == au_digen(dentry) -+ && !au_ig_ftest(igflags, HALF_REFRESHED)) -+ break; -+ -+ /* fully refresh inode using dentry */ -+ err = au_refresh_hinode(inode, dentry); -+ if (!err) -+ au_update_iigen(inode, /*half*/0); -+ break; -+ } -+ -+ if (unlikely(err)) -+ ii_write_unlock(inode); -+out: -+ return err; -+} -+ -+int au_ino(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino, -+ unsigned int d_type, ino_t *ino) -+{ -+ int err; -+ struct mutex *mtx; -+ -+ /* prevent hardlinked inode number from race condition */ -+ mtx = NULL; -+ if (d_type != DT_DIR) { -+ mtx = &au_sbr(sb, bindex)->br_xino.xi_nondir_mtx; -+ mutex_lock(mtx); -+ } -+ err = au_xino_read(sb, bindex, h_ino, ino); -+ if (unlikely(err)) -+ goto out; -+ -+ if (!*ino) { -+ err = -EIO; -+ *ino = au_xino_new_ino(sb); -+ if (unlikely(!*ino)) -+ goto out; -+ err = au_xino_write(sb, bindex, h_ino, *ino); -+ if (unlikely(err)) -+ goto out; -+ } -+ -+out: -+ if (mtx) -+ mutex_unlock(mtx); -+ return err; -+} -+ -+/* successful returns with iinfo write_locked */ -+/* todo: return with unlocked? */ -+struct inode *au_new_inode(struct dentry *dentry, int must_new) -+{ -+ struct inode *inode; -+ struct dentry *h_dentry; -+ struct super_block *sb; -+ struct mutex *mtx; -+ ino_t h_ino, ino; -+ int err; -+ aufs_bindex_t bstart; -+ -+ sb = dentry->d_sb; -+ bstart = au_dbstart(dentry); -+ h_dentry = au_h_dptr(dentry, bstart); -+ h_ino = h_dentry->d_inode->i_ino; -+ -+ /* -+ * stop 'race'-ing between hardlinks under different -+ * parents. -+ */ -+ mtx = NULL; -+ if (!d_is_dir(h_dentry)) -+ mtx = &au_sbr(sb, bstart)->br_xino.xi_nondir_mtx; -+ -+new_ino: -+ if (mtx) -+ mutex_lock(mtx); -+ err = au_xino_read(sb, bstart, h_ino, &ino); -+ inode = ERR_PTR(err); -+ if (unlikely(err)) -+ goto out; -+ -+ if (!ino) { -+ ino = au_xino_new_ino(sb); -+ if (unlikely(!ino)) { -+ inode = ERR_PTR(-EIO); -+ goto out; -+ } -+ } -+ -+ AuDbg("i%lu\n", (unsigned long)ino); -+ inode = au_iget_locked(sb, ino); -+ err = PTR_ERR(inode); -+ if (IS_ERR(inode)) -+ goto out; -+ -+ AuDbg("%lx, new %d\n", inode->i_state, !!(inode->i_state & I_NEW)); -+ if (inode->i_state & I_NEW) { -+ /* verbose coding for lock class name */ -+ if (unlikely(d_is_symlink(h_dentry))) -+ au_rw_class(&au_ii(inode)->ii_rwsem, -+ au_lc_key + AuLcSymlink_IIINFO); -+ else if (unlikely(d_is_dir(h_dentry))) -+ au_rw_class(&au_ii(inode)->ii_rwsem, -+ au_lc_key + AuLcDir_IIINFO); -+ else /* likely */ -+ au_rw_class(&au_ii(inode)->ii_rwsem, -+ au_lc_key + AuLcNonDir_IIINFO); -+ -+ ii_write_lock_new_child(inode); -+ err = set_inode(inode, dentry); -+ if (!err) { -+ unlock_new_inode(inode); -+ goto out; /* success */ -+ } -+ -+ /* -+ * iget_failed() calls iput(), but we need to call -+ * ii_write_unlock() after iget_failed(). so dirty hack for -+ * i_count. -+ */ -+ atomic_inc(&inode->i_count); -+ iget_failed(inode); -+ ii_write_unlock(inode); -+ au_xino_write(sb, bstart, h_ino, /*ino*/0); -+ /* ignore this error */ -+ goto out_iput; -+ } else if (!must_new && !IS_DEADDIR(inode) && inode->i_nlink) { -+ /* -+ * horrible race condition between lookup, readdir and copyup -+ * (or something). -+ */ -+ if (mtx) -+ mutex_unlock(mtx); -+ err = reval_inode(inode, dentry); -+ if (unlikely(err < 0)) { -+ mtx = NULL; -+ goto out_iput; -+ } -+ -+ if (!err) { -+ mtx = NULL; -+ goto out; /* success */ -+ } else if (mtx) -+ mutex_lock(mtx); -+ } -+ -+ if (unlikely(au_test_fs_unique_ino(h_dentry->d_inode))) -+ AuWarn1("Warning: Un-notified UDBA or repeatedly renamed dir," -+ " b%d, %s, %pd, hi%lu, i%lu.\n", -+ bstart, au_sbtype(h_dentry->d_sb), dentry, -+ (unsigned long)h_ino, (unsigned long)ino); -+ ino = 0; -+ err = au_xino_write(sb, bstart, h_ino, /*ino*/0); -+ if (!err) { -+ iput(inode); -+ if (mtx) -+ mutex_unlock(mtx); -+ goto new_ino; -+ } -+ -+out_iput: -+ iput(inode); -+ inode = ERR_PTR(err); -+out: -+ if (mtx) -+ mutex_unlock(mtx); -+ return inode; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+int au_test_ro(struct super_block *sb, aufs_bindex_t bindex, -+ struct inode *inode) -+{ -+ int err; -+ struct inode *hi; -+ -+ err = au_br_rdonly(au_sbr(sb, bindex)); -+ -+ /* pseudo-link after flushed may happen out of bounds */ -+ if (!err -+ && inode -+ && au_ibstart(inode) <= bindex -+ && bindex <= au_ibend(inode)) { -+ /* -+ * permission check is unnecessary since vfsub routine -+ * will be called later -+ */ -+ hi = au_h_iptr(inode, bindex); -+ if (hi) -+ err = IS_IMMUTABLE(hi) ? -EROFS : 0; -+ } -+ -+ return err; -+} -+ -+int au_test_h_perm(struct inode *h_inode, int mask) -+{ -+ if (uid_eq(current_fsuid(), GLOBAL_ROOT_UID)) -+ return 0; -+ return inode_permission(h_inode, mask); -+} -+ -+int au_test_h_perm_sio(struct inode *h_inode, int mask) -+{ -+ if (au_test_nfs(h_inode->i_sb) -+ && (mask & MAY_WRITE) -+ && S_ISDIR(h_inode->i_mode)) -+ mask |= MAY_READ; /* force permission check */ -+ return au_test_h_perm(h_inode, mask); -+} -diff --git a/fs/aufs/inode.h b/fs/aufs/inode.h -new file mode 100644 -index 0000000..49d53a2 ---- /dev/null -+++ b/fs/aufs/inode.h -@@ -0,0 +1,686 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * inode operations -+ */ -+ -+#ifndef __AUFS_INODE_H__ -+#define __AUFS_INODE_H__ -+ -+#ifdef __KERNEL__ -+ -+#include -+#include "rwsem.h" -+ -+struct vfsmount; -+ -+struct au_hnotify { -+#ifdef CONFIG_AUFS_HNOTIFY -+#ifdef CONFIG_AUFS_HFSNOTIFY -+ /* never use fsnotify_add_vfsmount_mark() */ -+ struct fsnotify_mark hn_mark; -+#endif -+ struct inode *hn_aufs_inode; /* no get/put */ -+#endif -+} ____cacheline_aligned_in_smp; -+ -+struct au_hinode { -+ struct inode *hi_inode; -+ aufs_bindex_t hi_id; -+#ifdef CONFIG_AUFS_HNOTIFY -+ struct au_hnotify *hi_notify; -+#endif -+ -+ /* reference to the copied-up whiteout with get/put */ -+ struct dentry *hi_whdentry; -+}; -+ -+/* ig_flags */ -+#define AuIG_HALF_REFRESHED 1 -+#define au_ig_ftest(flags, name) ((flags) & AuIG_##name) -+#define au_ig_fset(flags, name) \ -+ do { (flags) |= AuIG_##name; } while (0) -+#define au_ig_fclr(flags, name) \ -+ do { (flags) &= ~AuIG_##name; } while (0) -+ -+struct au_iigen { -+ spinlock_t ig_spin; -+ __u32 ig_generation, ig_flags; -+}; -+ -+struct au_vdir; -+struct au_iinfo { -+ struct au_iigen ii_generation; -+ struct super_block *ii_hsb1; /* no get/put */ -+ -+ struct au_rwsem ii_rwsem; -+ aufs_bindex_t ii_bstart, ii_bend; -+ __u32 ii_higen; -+ struct au_hinode *ii_hinode; -+ struct au_vdir *ii_vdir; -+}; -+ -+struct au_icntnr { -+ struct au_iinfo iinfo; -+ struct inode vfs_inode; -+ struct hlist_node plink; -+} ____cacheline_aligned_in_smp; -+ -+/* au_pin flags */ -+#define AuPin_DI_LOCKED 1 -+#define AuPin_MNT_WRITE (1 << 1) -+#define au_ftest_pin(flags, name) ((flags) & AuPin_##name) -+#define au_fset_pin(flags, name) \ -+ do { (flags) |= AuPin_##name; } while (0) -+#define au_fclr_pin(flags, name) \ -+ do { (flags) &= ~AuPin_##name; } while (0) -+ -+struct au_pin { -+ /* input */ -+ struct dentry *dentry; -+ unsigned int udba; -+ unsigned char lsc_di, lsc_hi, flags; -+ aufs_bindex_t bindex; -+ -+ /* output */ -+ struct dentry *parent; -+ struct au_hinode *hdir; -+ struct vfsmount *h_mnt; -+ -+ /* temporary unlock/relock for copyup */ -+ struct dentry *h_dentry, *h_parent; -+ struct au_branch *br; -+ struct task_struct *task; -+}; -+ -+void au_pin_hdir_unlock(struct au_pin *p); -+int au_pin_hdir_lock(struct au_pin *p); -+int au_pin_hdir_relock(struct au_pin *p); -+void au_pin_hdir_set_owner(struct au_pin *p, struct task_struct *task); -+void au_pin_hdir_acquire_nest(struct au_pin *p); -+void au_pin_hdir_release(struct au_pin *p); -+ -+/* ---------------------------------------------------------------------- */ -+ -+static inline struct au_iinfo *au_ii(struct inode *inode) -+{ -+ struct au_iinfo *iinfo; -+ -+ iinfo = &(container_of(inode, struct au_icntnr, vfs_inode)->iinfo); -+ if (iinfo->ii_hinode) -+ return iinfo; -+ return NULL; /* debugging bad_inode case */ -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* inode.c */ -+struct inode *au_igrab(struct inode *inode); -+void au_refresh_iop(struct inode *inode, int force_getattr); -+int au_refresh_hinode_self(struct inode *inode); -+int au_refresh_hinode(struct inode *inode, struct dentry *dentry); -+int au_ino(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino, -+ unsigned int d_type, ino_t *ino); -+struct inode *au_new_inode(struct dentry *dentry, int must_new); -+int au_test_ro(struct super_block *sb, aufs_bindex_t bindex, -+ struct inode *inode); -+int au_test_h_perm(struct inode *h_inode, int mask); -+int au_test_h_perm_sio(struct inode *h_inode, int mask); -+ -+static inline int au_wh_ino(struct super_block *sb, aufs_bindex_t bindex, -+ ino_t h_ino, unsigned int d_type, ino_t *ino) -+{ -+#ifdef CONFIG_AUFS_SHWH -+ return au_ino(sb, bindex, h_ino, d_type, ino); -+#else -+ return 0; -+#endif -+} -+ -+/* i_op.c */ -+enum { -+ AuIop_SYMLINK, -+ AuIop_DIR, -+ AuIop_OTHER, -+ AuIop_Last -+}; -+extern struct inode_operations aufs_iop[AuIop_Last], -+ aufs_iop_nogetattr[AuIop_Last]; -+ -+/* au_wr_dir flags */ -+#define AuWrDir_ADD_ENTRY 1 -+#define AuWrDir_ISDIR (1 << 1) -+#define AuWrDir_TMPFILE (1 << 2) -+#define au_ftest_wrdir(flags, name) ((flags) & AuWrDir_##name) -+#define au_fset_wrdir(flags, name) \ -+ do { (flags) |= AuWrDir_##name; } while (0) -+#define au_fclr_wrdir(flags, name) \ -+ do { (flags) &= ~AuWrDir_##name; } while (0) -+ -+struct au_wr_dir_args { -+ aufs_bindex_t force_btgt; -+ unsigned char flags; -+}; -+int au_wr_dir(struct dentry *dentry, struct dentry *src_dentry, -+ struct au_wr_dir_args *args); -+ -+struct dentry *au_pinned_h_parent(struct au_pin *pin); -+void au_pin_init(struct au_pin *pin, struct dentry *dentry, -+ aufs_bindex_t bindex, int lsc_di, int lsc_hi, -+ unsigned int udba, unsigned char flags); -+int au_pin(struct au_pin *pin, struct dentry *dentry, aufs_bindex_t bindex, -+ unsigned int udba, unsigned char flags) __must_check; -+int au_do_pin(struct au_pin *pin) __must_check; -+void au_unpin(struct au_pin *pin); -+int au_reval_for_attr(struct dentry *dentry, unsigned int sigen); -+ -+#define AuIcpup_DID_CPUP 1 -+#define au_ftest_icpup(flags, name) ((flags) & AuIcpup_##name) -+#define au_fset_icpup(flags, name) \ -+ do { (flags) |= AuIcpup_##name; } while (0) -+#define au_fclr_icpup(flags, name) \ -+ do { (flags) &= ~AuIcpup_##name; } while (0) -+ -+struct au_icpup_args { -+ unsigned char flags; -+ unsigned char pin_flags; -+ aufs_bindex_t btgt; -+ unsigned int udba; -+ struct au_pin pin; -+ struct path h_path; -+ struct inode *h_inode; -+}; -+ -+int au_pin_and_icpup(struct dentry *dentry, struct iattr *ia, -+ struct au_icpup_args *a); -+ -+int au_h_path_getattr(struct dentry *dentry, int force, struct path *h_path); -+ -+/* i_op_add.c */ -+int au_may_add(struct dentry *dentry, aufs_bindex_t bindex, -+ struct dentry *h_parent, int isdir); -+int aufs_mknod(struct inode *dir, struct dentry *dentry, umode_t mode, -+ dev_t dev); -+int aufs_symlink(struct inode *dir, struct dentry *dentry, const char *symname); -+int aufs_create(struct inode *dir, struct dentry *dentry, umode_t mode, -+ bool want_excl); -+struct vfsub_aopen_args; -+int au_aopen_or_create(struct inode *dir, struct dentry *dentry, -+ struct vfsub_aopen_args *args); -+int aufs_tmpfile(struct inode *dir, struct dentry *dentry, umode_t mode); -+int aufs_link(struct dentry *src_dentry, struct inode *dir, -+ struct dentry *dentry); -+int aufs_mkdir(struct inode *dir, struct dentry *dentry, umode_t mode); -+ -+/* i_op_del.c */ -+int au_wr_dir_need_wh(struct dentry *dentry, int isdir, aufs_bindex_t *bcpup); -+int au_may_del(struct dentry *dentry, aufs_bindex_t bindex, -+ struct dentry *h_parent, int isdir); -+int aufs_unlink(struct inode *dir, struct dentry *dentry); -+int aufs_rmdir(struct inode *dir, struct dentry *dentry); -+ -+/* i_op_ren.c */ -+int au_wbr(struct dentry *dentry, aufs_bindex_t btgt); -+int aufs_rename(struct inode *src_dir, struct dentry *src_dentry, -+ struct inode *dir, struct dentry *dentry); -+ -+/* iinfo.c */ -+struct inode *au_h_iptr(struct inode *inode, aufs_bindex_t bindex); -+void au_hiput(struct au_hinode *hinode); -+void au_set_hi_wh(struct inode *inode, aufs_bindex_t bindex, -+ struct dentry *h_wh); -+unsigned int au_hi_flags(struct inode *inode, int isdir); -+ -+/* hinode flags */ -+#define AuHi_XINO 1 -+#define AuHi_HNOTIFY (1 << 1) -+#define au_ftest_hi(flags, name) ((flags) & AuHi_##name) -+#define au_fset_hi(flags, name) \ -+ do { (flags) |= AuHi_##name; } while (0) -+#define au_fclr_hi(flags, name) \ -+ do { (flags) &= ~AuHi_##name; } while (0) -+ -+#ifndef CONFIG_AUFS_HNOTIFY -+#undef AuHi_HNOTIFY -+#define AuHi_HNOTIFY 0 -+#endif -+ -+void au_set_h_iptr(struct inode *inode, aufs_bindex_t bindex, -+ struct inode *h_inode, unsigned int flags); -+ -+void au_update_iigen(struct inode *inode, int half); -+void au_update_ibrange(struct inode *inode, int do_put_zero); -+ -+void au_icntnr_init_once(void *_c); -+int au_iinfo_init(struct inode *inode); -+void au_iinfo_fin(struct inode *inode); -+int au_ii_realloc(struct au_iinfo *iinfo, int nbr); -+ -+#ifdef CONFIG_PROC_FS -+/* plink.c */ -+int au_plink_maint(struct super_block *sb, int flags); -+struct au_sbinfo; -+void au_plink_maint_leave(struct au_sbinfo *sbinfo); -+int au_plink_maint_enter(struct super_block *sb); -+#ifdef CONFIG_AUFS_DEBUG -+void au_plink_list(struct super_block *sb); -+#else -+AuStubVoid(au_plink_list, struct super_block *sb) -+#endif -+int au_plink_test(struct inode *inode); -+struct dentry *au_plink_lkup(struct inode *inode, aufs_bindex_t bindex); -+void au_plink_append(struct inode *inode, aufs_bindex_t bindex, -+ struct dentry *h_dentry); -+void au_plink_put(struct super_block *sb, int verbose); -+void au_plink_clean(struct super_block *sb, int verbose); -+void au_plink_half_refresh(struct super_block *sb, aufs_bindex_t br_id); -+#else -+AuStubInt0(au_plink_maint, struct super_block *sb, int flags); -+AuStubVoid(au_plink_maint_leave, struct au_sbinfo *sbinfo); -+AuStubInt0(au_plink_maint_enter, struct super_block *sb); -+AuStubVoid(au_plink_list, struct super_block *sb); -+AuStubInt0(au_plink_test, struct inode *inode); -+AuStub(struct dentry *, au_plink_lkup, return NULL, -+ struct inode *inode, aufs_bindex_t bindex); -+AuStubVoid(au_plink_append, struct inode *inode, aufs_bindex_t bindex, -+ struct dentry *h_dentry); -+AuStubVoid(au_plink_put, struct super_block *sb, int verbose); -+AuStubVoid(au_plink_clean, struct super_block *sb, int verbose); -+AuStubVoid(au_plink_half_refresh, struct super_block *sb, aufs_bindex_t br_id); -+#endif /* CONFIG_PROC_FS */ -+ -+#ifdef CONFIG_AUFS_XATTR -+/* xattr.c */ -+int au_cpup_xattr(struct dentry *h_dst, struct dentry *h_src, int ignore_flags, -+ unsigned int verbose); -+ssize_t aufs_listxattr(struct dentry *dentry, char *list, size_t size); -+ssize_t aufs_getxattr(struct dentry *dentry, const char *name, void *value, -+ size_t size); -+int aufs_setxattr(struct dentry *dentry, const char *name, const void *value, -+ size_t size, int flags); -+int aufs_removexattr(struct dentry *dentry, const char *name); -+ -+/* void au_xattr_init(struct super_block *sb); */ -+#else -+AuStubInt0(au_cpup_xattr, struct dentry *h_dst, struct dentry *h_src, -+ int ignore_flags, unsigned int verbose); -+/* AuStubVoid(au_xattr_init, struct super_block *sb); */ -+#endif -+ -+#ifdef CONFIG_FS_POSIX_ACL -+struct posix_acl *aufs_get_acl(struct inode *inode, int type); -+int aufs_set_acl(struct inode *inode, struct posix_acl *acl, int type); -+#endif -+ -+#if IS_ENABLED(CONFIG_AUFS_XATTR) || IS_ENABLED(CONFIG_FS_POSIX_ACL) -+enum { -+ AU_XATTR_SET, -+ AU_XATTR_REMOVE, -+ AU_ACL_SET -+}; -+ -+struct au_srxattr { -+ int type; -+ union { -+ struct { -+ const char *name; -+ const void *value; -+ size_t size; -+ int flags; -+ } set; -+ struct { -+ const char *name; -+ } remove; -+ struct { -+ struct posix_acl *acl; -+ int type; -+ } acl_set; -+ } u; -+}; -+ssize_t au_srxattr(struct dentry *dentry, struct au_srxattr *arg); -+#endif -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* lock subclass for iinfo */ -+enum { -+ AuLsc_II_CHILD, /* child first */ -+ AuLsc_II_CHILD2, /* rename(2), link(2), and cpup at hnotify */ -+ AuLsc_II_CHILD3, /* copyup dirs */ -+ AuLsc_II_PARENT, /* see AuLsc_I_PARENT in vfsub.h */ -+ AuLsc_II_PARENT2, -+ AuLsc_II_PARENT3, /* copyup dirs */ -+ AuLsc_II_NEW_CHILD -+}; -+ -+/* -+ * ii_read_lock_child, ii_write_lock_child, -+ * ii_read_lock_child2, ii_write_lock_child2, -+ * ii_read_lock_child3, ii_write_lock_child3, -+ * ii_read_lock_parent, ii_write_lock_parent, -+ * ii_read_lock_parent2, ii_write_lock_parent2, -+ * ii_read_lock_parent3, ii_write_lock_parent3, -+ * ii_read_lock_new_child, ii_write_lock_new_child, -+ */ -+#define AuReadLockFunc(name, lsc) \ -+static inline void ii_read_lock_##name(struct inode *i) \ -+{ \ -+ au_rw_read_lock_nested(&au_ii(i)->ii_rwsem, AuLsc_II_##lsc); \ -+} -+ -+#define AuWriteLockFunc(name, lsc) \ -+static inline void ii_write_lock_##name(struct inode *i) \ -+{ \ -+ au_rw_write_lock_nested(&au_ii(i)->ii_rwsem, AuLsc_II_##lsc); \ -+} -+ -+#define AuRWLockFuncs(name, lsc) \ -+ AuReadLockFunc(name, lsc) \ -+ AuWriteLockFunc(name, lsc) -+ -+AuRWLockFuncs(child, CHILD); -+AuRWLockFuncs(child2, CHILD2); -+AuRWLockFuncs(child3, CHILD3); -+AuRWLockFuncs(parent, PARENT); -+AuRWLockFuncs(parent2, PARENT2); -+AuRWLockFuncs(parent3, PARENT3); -+AuRWLockFuncs(new_child, NEW_CHILD); -+ -+#undef AuReadLockFunc -+#undef AuWriteLockFunc -+#undef AuRWLockFuncs -+ -+/* -+ * ii_read_unlock, ii_write_unlock, ii_downgrade_lock -+ */ -+AuSimpleUnlockRwsemFuncs(ii, struct inode *i, &au_ii(i)->ii_rwsem); -+ -+#define IiMustNoWaiters(i) AuRwMustNoWaiters(&au_ii(i)->ii_rwsem) -+#define IiMustAnyLock(i) AuRwMustAnyLock(&au_ii(i)->ii_rwsem) -+#define IiMustWriteLock(i) AuRwMustWriteLock(&au_ii(i)->ii_rwsem) -+ -+/* ---------------------------------------------------------------------- */ -+ -+static inline void au_icntnr_init(struct au_icntnr *c) -+{ -+#ifdef CONFIG_AUFS_DEBUG -+ c->vfs_inode.i_mode = 0; -+#endif -+} -+ -+static inline unsigned int au_iigen(struct inode *inode, unsigned int *igflags) -+{ -+ unsigned int gen; -+ struct au_iinfo *iinfo; -+ struct au_iigen *iigen; -+ -+ iinfo = au_ii(inode); -+ iigen = &iinfo->ii_generation; -+ spin_lock(&iigen->ig_spin); -+ if (igflags) -+ *igflags = iigen->ig_flags; -+ gen = iigen->ig_generation; -+ spin_unlock(&iigen->ig_spin); -+ -+ return gen; -+} -+ -+/* tiny test for inode number */ -+/* tmpfs generation is too rough */ -+static inline int au_test_higen(struct inode *inode, struct inode *h_inode) -+{ -+ struct au_iinfo *iinfo; -+ -+ iinfo = au_ii(inode); -+ AuRwMustAnyLock(&iinfo->ii_rwsem); -+ return !(iinfo->ii_hsb1 == h_inode->i_sb -+ && iinfo->ii_higen == h_inode->i_generation); -+} -+ -+static inline void au_iigen_dec(struct inode *inode) -+{ -+ struct au_iinfo *iinfo; -+ struct au_iigen *iigen; -+ -+ iinfo = au_ii(inode); -+ iigen = &iinfo->ii_generation; -+ spin_lock(&iigen->ig_spin); -+ iigen->ig_generation--; -+ spin_unlock(&iigen->ig_spin); -+} -+ -+static inline int au_iigen_test(struct inode *inode, unsigned int sigen) -+{ -+ int err; -+ -+ err = 0; -+ if (unlikely(inode && au_iigen(inode, NULL) != sigen)) -+ err = -EIO; -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static inline aufs_bindex_t au_ii_br_id(struct inode *inode, -+ aufs_bindex_t bindex) -+{ -+ IiMustAnyLock(inode); -+ return au_ii(inode)->ii_hinode[0 + bindex].hi_id; -+} -+ -+static inline aufs_bindex_t au_ibstart(struct inode *inode) -+{ -+ IiMustAnyLock(inode); -+ return au_ii(inode)->ii_bstart; -+} -+ -+static inline aufs_bindex_t au_ibend(struct inode *inode) -+{ -+ IiMustAnyLock(inode); -+ return au_ii(inode)->ii_bend; -+} -+ -+static inline struct au_vdir *au_ivdir(struct inode *inode) -+{ -+ IiMustAnyLock(inode); -+ return au_ii(inode)->ii_vdir; -+} -+ -+static inline struct dentry *au_hi_wh(struct inode *inode, aufs_bindex_t bindex) -+{ -+ IiMustAnyLock(inode); -+ return au_ii(inode)->ii_hinode[0 + bindex].hi_whdentry; -+} -+ -+static inline void au_set_ibstart(struct inode *inode, aufs_bindex_t bindex) -+{ -+ IiMustWriteLock(inode); -+ au_ii(inode)->ii_bstart = bindex; -+} -+ -+static inline void au_set_ibend(struct inode *inode, aufs_bindex_t bindex) -+{ -+ IiMustWriteLock(inode); -+ au_ii(inode)->ii_bend = bindex; -+} -+ -+static inline void au_set_ivdir(struct inode *inode, struct au_vdir *vdir) -+{ -+ IiMustWriteLock(inode); -+ au_ii(inode)->ii_vdir = vdir; -+} -+ -+static inline struct au_hinode *au_hi(struct inode *inode, aufs_bindex_t bindex) -+{ -+ IiMustAnyLock(inode); -+ return au_ii(inode)->ii_hinode + bindex; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static inline struct dentry *au_pinned_parent(struct au_pin *pin) -+{ -+ if (pin) -+ return pin->parent; -+ return NULL; -+} -+ -+static inline struct inode *au_pinned_h_dir(struct au_pin *pin) -+{ -+ if (pin && pin->hdir) -+ return pin->hdir->hi_inode; -+ return NULL; -+} -+ -+static inline struct au_hinode *au_pinned_hdir(struct au_pin *pin) -+{ -+ if (pin) -+ return pin->hdir; -+ return NULL; -+} -+ -+static inline void au_pin_set_dentry(struct au_pin *pin, struct dentry *dentry) -+{ -+ if (pin) -+ pin->dentry = dentry; -+} -+ -+static inline void au_pin_set_parent_lflag(struct au_pin *pin, -+ unsigned char lflag) -+{ -+ if (pin) { -+ if (lflag) -+ au_fset_pin(pin->flags, DI_LOCKED); -+ else -+ au_fclr_pin(pin->flags, DI_LOCKED); -+ } -+} -+ -+#if 0 /* reserved */ -+static inline void au_pin_set_parent(struct au_pin *pin, struct dentry *parent) -+{ -+ if (pin) { -+ dput(pin->parent); -+ pin->parent = dget(parent); -+ } -+} -+#endif -+ -+/* ---------------------------------------------------------------------- */ -+ -+struct au_branch; -+#ifdef CONFIG_AUFS_HNOTIFY -+struct au_hnotify_op { -+ void (*ctl)(struct au_hinode *hinode, int do_set); -+ int (*alloc)(struct au_hinode *hinode); -+ -+ /* -+ * if it returns true, the the caller should free hinode->hi_notify, -+ * otherwise ->free() frees it. -+ */ -+ int (*free)(struct au_hinode *hinode, -+ struct au_hnotify *hn) __must_check; -+ -+ void (*fin)(void); -+ int (*init)(void); -+ -+ int (*reset_br)(unsigned int udba, struct au_branch *br, int perm); -+ void (*fin_br)(struct au_branch *br); -+ int (*init_br)(struct au_branch *br, int perm); -+}; -+ -+/* hnotify.c */ -+int au_hn_alloc(struct au_hinode *hinode, struct inode *inode); -+void au_hn_free(struct au_hinode *hinode); -+void au_hn_ctl(struct au_hinode *hinode, int do_set); -+void au_hn_reset(struct inode *inode, unsigned int flags); -+int au_hnotify(struct inode *h_dir, struct au_hnotify *hnotify, u32 mask, -+ struct qstr *h_child_qstr, struct inode *h_child_inode); -+int au_hnotify_reset_br(unsigned int udba, struct au_branch *br, int perm); -+int au_hnotify_init_br(struct au_branch *br, int perm); -+void au_hnotify_fin_br(struct au_branch *br); -+int __init au_hnotify_init(void); -+void au_hnotify_fin(void); -+ -+/* hfsnotify.c */ -+extern const struct au_hnotify_op au_hnotify_op; -+ -+static inline -+void au_hn_init(struct au_hinode *hinode) -+{ -+ hinode->hi_notify = NULL; -+} -+ -+static inline struct au_hnotify *au_hn(struct au_hinode *hinode) -+{ -+ return hinode->hi_notify; -+} -+ -+#else -+AuStub(int, au_hn_alloc, return -EOPNOTSUPP, -+ struct au_hinode *hinode __maybe_unused, -+ struct inode *inode __maybe_unused) -+AuStub(struct au_hnotify *, au_hn, return NULL, struct au_hinode *hinode) -+AuStubVoid(au_hn_free, struct au_hinode *hinode __maybe_unused) -+AuStubVoid(au_hn_ctl, struct au_hinode *hinode __maybe_unused, -+ int do_set __maybe_unused) -+AuStubVoid(au_hn_reset, struct inode *inode __maybe_unused, -+ unsigned int flags __maybe_unused) -+AuStubInt0(au_hnotify_reset_br, unsigned int udba __maybe_unused, -+ struct au_branch *br __maybe_unused, -+ int perm __maybe_unused) -+AuStubInt0(au_hnotify_init_br, struct au_branch *br __maybe_unused, -+ int perm __maybe_unused) -+AuStubVoid(au_hnotify_fin_br, struct au_branch *br __maybe_unused) -+AuStubInt0(__init au_hnotify_init, void) -+AuStubVoid(au_hnotify_fin, void) -+AuStubVoid(au_hn_init, struct au_hinode *hinode __maybe_unused) -+#endif /* CONFIG_AUFS_HNOTIFY */ -+ -+static inline void au_hn_suspend(struct au_hinode *hdir) -+{ -+ au_hn_ctl(hdir, /*do_set*/0); -+} -+ -+static inline void au_hn_resume(struct au_hinode *hdir) -+{ -+ au_hn_ctl(hdir, /*do_set*/1); -+} -+ -+static inline void au_hn_imtx_lock(struct au_hinode *hdir) -+{ -+ mutex_lock(&hdir->hi_inode->i_mutex); -+ au_hn_suspend(hdir); -+} -+ -+static inline void au_hn_imtx_lock_nested(struct au_hinode *hdir, -+ unsigned int sc __maybe_unused) -+{ -+ mutex_lock_nested(&hdir->hi_inode->i_mutex, sc); -+ au_hn_suspend(hdir); -+} -+ -+static inline void au_hn_imtx_unlock(struct au_hinode *hdir) -+{ -+ au_hn_resume(hdir); -+ mutex_unlock(&hdir->hi_inode->i_mutex); -+} -+ -+#endif /* __KERNEL__ */ -+#endif /* __AUFS_INODE_H__ */ -diff --git a/fs/aufs/ioctl.c b/fs/aufs/ioctl.c -new file mode 100644 -index 0000000..10e2315 ---- /dev/null -+++ b/fs/aufs/ioctl.c -@@ -0,0 +1,219 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * ioctl -+ * plink-management and readdir in userspace. -+ * assist the pathconf(3) wrapper library. -+ * move-down -+ * File-based Hierarchical Storage Management. -+ */ -+ -+#include -+#include -+#include "aufs.h" -+ -+static int au_wbr_fd(struct path *path, struct aufs_wbr_fd __user *arg) -+{ -+ int err, fd; -+ aufs_bindex_t wbi, bindex, bend; -+ struct file *h_file; -+ struct super_block *sb; -+ struct dentry *root; -+ struct au_branch *br; -+ struct aufs_wbr_fd wbrfd = { -+ .oflags = au_dir_roflags, -+ .brid = -1 -+ }; -+ const int valid = O_RDONLY | O_NONBLOCK | O_LARGEFILE | O_DIRECTORY -+ | O_NOATIME | O_CLOEXEC; -+ -+ AuDebugOn(wbrfd.oflags & ~valid); -+ -+ if (arg) { -+ err = copy_from_user(&wbrfd, arg, sizeof(wbrfd)); -+ if (unlikely(err)) { -+ err = -EFAULT; -+ goto out; -+ } -+ -+ err = -EINVAL; -+ AuDbg("wbrfd{0%o, %d}\n", wbrfd.oflags, wbrfd.brid); -+ wbrfd.oflags |= au_dir_roflags; -+ AuDbg("0%o\n", wbrfd.oflags); -+ if (unlikely(wbrfd.oflags & ~valid)) -+ goto out; -+ } -+ -+ fd = get_unused_fd(); -+ err = fd; -+ if (unlikely(fd < 0)) -+ goto out; -+ -+ h_file = ERR_PTR(-EINVAL); -+ wbi = 0; -+ br = NULL; -+ sb = path->dentry->d_sb; -+ root = sb->s_root; -+ aufs_read_lock(root, AuLock_IR); -+ bend = au_sbend(sb); -+ if (wbrfd.brid >= 0) { -+ wbi = au_br_index(sb, wbrfd.brid); -+ if (unlikely(wbi < 0 || wbi > bend)) -+ goto out_unlock; -+ } -+ -+ h_file = ERR_PTR(-ENOENT); -+ br = au_sbr(sb, wbi); -+ if (!au_br_writable(br->br_perm)) { -+ if (arg) -+ goto out_unlock; -+ -+ bindex = wbi + 1; -+ wbi = -1; -+ for (; bindex <= bend; bindex++) { -+ br = au_sbr(sb, bindex); -+ if (au_br_writable(br->br_perm)) { -+ wbi = bindex; -+ br = au_sbr(sb, wbi); -+ break; -+ } -+ } -+ } -+ AuDbg("wbi %d\n", wbi); -+ if (wbi >= 0) -+ h_file = au_h_open(root, wbi, wbrfd.oflags, NULL, -+ /*force_wr*/0); -+ -+out_unlock: -+ aufs_read_unlock(root, AuLock_IR); -+ err = PTR_ERR(h_file); -+ if (IS_ERR(h_file)) -+ goto out_fd; -+ -+ atomic_dec(&br->br_count); /* cf. au_h_open() */ -+ fd_install(fd, h_file); -+ err = fd; -+ goto out; /* success */ -+ -+out_fd: -+ put_unused_fd(fd); -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+long aufs_ioctl_dir(struct file *file, unsigned int cmd, unsigned long arg) -+{ -+ long err; -+ struct dentry *dentry; -+ -+ switch (cmd) { -+ case AUFS_CTL_RDU: -+ case AUFS_CTL_RDU_INO: -+ err = au_rdu_ioctl(file, cmd, arg); -+ break; -+ -+ case AUFS_CTL_WBR_FD: -+ err = au_wbr_fd(&file->f_path, (void __user *)arg); -+ break; -+ -+ case AUFS_CTL_IBUSY: -+ err = au_ibusy_ioctl(file, arg); -+ break; -+ -+ case AUFS_CTL_BRINFO: -+ err = au_brinfo_ioctl(file, arg); -+ break; -+ -+ case AUFS_CTL_FHSM_FD: -+ dentry = file->f_dentry; -+ if (IS_ROOT(dentry)) -+ err = au_fhsm_fd(dentry->d_sb, arg); -+ else -+ err = -ENOTTY; -+ break; -+ -+ default: -+ /* do not call the lower */ -+ AuDbg("0x%x\n", cmd); -+ err = -ENOTTY; -+ } -+ -+ AuTraceErr(err); -+ return err; -+} -+ -+long aufs_ioctl_nondir(struct file *file, unsigned int cmd, unsigned long arg) -+{ -+ long err; -+ -+ switch (cmd) { -+ case AUFS_CTL_MVDOWN: -+ err = au_mvdown(file->f_dentry, (void __user *)arg); -+ break; -+ -+ case AUFS_CTL_WBR_FD: -+ err = au_wbr_fd(&file->f_path, (void __user *)arg); -+ break; -+ -+ default: -+ /* do not call the lower */ -+ AuDbg("0x%x\n", cmd); -+ err = -ENOTTY; -+ } -+ -+ AuTraceErr(err); -+ return err; -+} -+ -+#ifdef CONFIG_COMPAT -+long aufs_compat_ioctl_dir(struct file *file, unsigned int cmd, -+ unsigned long arg) -+{ -+ long err; -+ -+ switch (cmd) { -+ case AUFS_CTL_RDU: -+ case AUFS_CTL_RDU_INO: -+ err = au_rdu_compat_ioctl(file, cmd, arg); -+ break; -+ -+ case AUFS_CTL_IBUSY: -+ err = au_ibusy_compat_ioctl(file, arg); -+ break; -+ -+ case AUFS_CTL_BRINFO: -+ err = au_brinfo_compat_ioctl(file, arg); -+ break; -+ -+ default: -+ err = aufs_ioctl_dir(file, cmd, arg); -+ } -+ -+ AuTraceErr(err); -+ return err; -+} -+ -+long aufs_compat_ioctl_nondir(struct file *file, unsigned int cmd, -+ unsigned long arg) -+{ -+ return aufs_ioctl_nondir(file, cmd, (unsigned long)compat_ptr(arg)); -+} -+#endif -diff --git a/fs/aufs/loop.c b/fs/aufs/loop.c -new file mode 100644 -index 0000000..1eaf59f ---- /dev/null -+++ b/fs/aufs/loop.c -@@ -0,0 +1,146 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * support for loopback block device as a branch -+ */ -+ -+#include "aufs.h" -+ -+/* added into drivers/block/loop.c */ -+static struct file *(*backing_file_func)(struct super_block *sb); -+ -+/* -+ * test if two lower dentries have overlapping branches. -+ */ -+int au_test_loopback_overlap(struct super_block *sb, struct dentry *h_adding) -+{ -+ struct super_block *h_sb; -+ struct file *backing_file; -+ -+ if (unlikely(!backing_file_func)) { -+ /* don't load "loop" module here */ -+ backing_file_func = symbol_get(loop_backing_file); -+ if (unlikely(!backing_file_func)) -+ /* "loop" module is not loaded */ -+ return 0; -+ } -+ -+ h_sb = h_adding->d_sb; -+ backing_file = backing_file_func(h_sb); -+ if (!backing_file) -+ return 0; -+ -+ h_adding = backing_file->f_dentry; -+ /* -+ * h_adding can be local NFS. -+ * in this case aufs cannot detect the loop. -+ */ -+ if (unlikely(h_adding->d_sb == sb)) -+ return 1; -+ return !!au_test_subdir(h_adding, sb->s_root); -+} -+ -+/* true if a kernel thread named 'loop[0-9].*' accesses a file */ -+int au_test_loopback_kthread(void) -+{ -+ int ret; -+ struct task_struct *tsk = current; -+ char c, comm[sizeof(tsk->comm)]; -+ -+ ret = 0; -+ if (tsk->flags & PF_KTHREAD) { -+ get_task_comm(comm, tsk); -+ c = comm[4]; -+ ret = ('0' <= c && c <= '9' -+ && !strncmp(comm, "loop", 4)); -+ } -+ -+ return ret; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+#define au_warn_loopback_step 16 -+static int au_warn_loopback_nelem = au_warn_loopback_step; -+static unsigned long *au_warn_loopback_array; -+ -+void au_warn_loopback(struct super_block *h_sb) -+{ -+ int i, new_nelem; -+ unsigned long *a, magic; -+ static DEFINE_SPINLOCK(spin); -+ -+ magic = h_sb->s_magic; -+ spin_lock(&spin); -+ a = au_warn_loopback_array; -+ for (i = 0; i < au_warn_loopback_nelem && *a; i++) -+ if (a[i] == magic) { -+ spin_unlock(&spin); -+ return; -+ } -+ -+ /* h_sb is new to us, print it */ -+ if (i < au_warn_loopback_nelem) { -+ a[i] = magic; -+ goto pr; -+ } -+ -+ /* expand the array */ -+ new_nelem = au_warn_loopback_nelem + au_warn_loopback_step; -+ a = au_kzrealloc(au_warn_loopback_array, -+ au_warn_loopback_nelem * sizeof(unsigned long), -+ new_nelem * sizeof(unsigned long), GFP_ATOMIC); -+ if (a) { -+ au_warn_loopback_nelem = new_nelem; -+ au_warn_loopback_array = a; -+ a[i] = magic; -+ goto pr; -+ } -+ -+ spin_unlock(&spin); -+ AuWarn1("realloc failed, ignored\n"); -+ return; -+ -+pr: -+ spin_unlock(&spin); -+ pr_warn("you may want to try another patch for loopback file " -+ "on %s(0x%lx) branch\n", au_sbtype(h_sb), magic); -+} -+ -+int au_loopback_init(void) -+{ -+ int err; -+ struct super_block *sb __maybe_unused; -+ -+ BUILD_BUG_ON(sizeof(sb->s_magic) != sizeof(unsigned long)); -+ -+ err = 0; -+ au_warn_loopback_array = kcalloc(au_warn_loopback_step, -+ sizeof(unsigned long), GFP_NOFS); -+ if (unlikely(!au_warn_loopback_array)) -+ err = -ENOMEM; -+ -+ return err; -+} -+ -+void au_loopback_fin(void) -+{ -+ if (backing_file_func) -+ symbol_put(loop_backing_file); -+ kfree(au_warn_loopback_array); -+} -diff --git a/fs/aufs/loop.h b/fs/aufs/loop.h -new file mode 100644 -index 0000000..35f7446 ---- /dev/null -+++ b/fs/aufs/loop.h -@@ -0,0 +1,52 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * support for loopback mount as a branch -+ */ -+ -+#ifndef __AUFS_LOOP_H__ -+#define __AUFS_LOOP_H__ -+ -+#ifdef __KERNEL__ -+ -+struct dentry; -+struct super_block; -+ -+#ifdef CONFIG_AUFS_BDEV_LOOP -+/* drivers/block/loop.c */ -+struct file *loop_backing_file(struct super_block *sb); -+ -+/* loop.c */ -+int au_test_loopback_overlap(struct super_block *sb, struct dentry *h_adding); -+int au_test_loopback_kthread(void); -+void au_warn_loopback(struct super_block *h_sb); -+ -+int au_loopback_init(void); -+void au_loopback_fin(void); -+#else -+AuStubInt0(au_test_loopback_overlap, struct super_block *sb, -+ struct dentry *h_adding) -+AuStubInt0(au_test_loopback_kthread, void) -+AuStubVoid(au_warn_loopback, struct super_block *h_sb) -+ -+AuStubInt0(au_loopback_init, void) -+AuStubVoid(au_loopback_fin, void) -+#endif /* BLK_DEV_LOOP */ -+ -+#endif /* __KERNEL__ */ -+#endif /* __AUFS_LOOP_H__ */ -diff --git a/fs/aufs/magic.mk b/fs/aufs/magic.mk -new file mode 100644 -index 0000000..4f83bdf ---- /dev/null -+++ b/fs/aufs/magic.mk -@@ -0,0 +1,30 @@ -+ -+# defined in ${srctree}/fs/fuse/inode.c -+# tristate -+ifdef CONFIG_FUSE_FS -+ccflags-y += -DFUSE_SUPER_MAGIC=0x65735546 -+endif -+ -+# defined in ${srctree}/fs/xfs/xfs_sb.h -+# tristate -+ifdef CONFIG_XFS_FS -+ccflags-y += -DXFS_SB_MAGIC=0x58465342 -+endif -+ -+# defined in ${srctree}/fs/configfs/mount.c -+# tristate -+ifdef CONFIG_CONFIGFS_FS -+ccflags-y += -DCONFIGFS_MAGIC=0x62656570 -+endif -+ -+# defined in ${srctree}/fs/ubifs/ubifs.h -+# tristate -+ifdef CONFIG_UBIFS_FS -+ccflags-y += -DUBIFS_SUPER_MAGIC=0x24051905 -+endif -+ -+# defined in ${srctree}/fs/hfsplus/hfsplus_raw.h -+# tristate -+ifdef CONFIG_HFSPLUS_FS -+ccflags-y += -DHFSPLUS_SUPER_MAGIC=0x482b -+endif -diff --git a/fs/aufs/module.c b/fs/aufs/module.c -new file mode 100644 -index 0000000..e4e04aa ---- /dev/null -+++ b/fs/aufs/module.c -@@ -0,0 +1,222 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * module global variables and operations -+ */ -+ -+#include -+#include -+#include "aufs.h" -+ -+void *au_kzrealloc(void *p, unsigned int nused, unsigned int new_sz, gfp_t gfp) -+{ -+ if (new_sz <= nused) -+ return p; -+ -+ p = krealloc(p, new_sz, gfp); -+ if (p) -+ memset(p + nused, 0, new_sz - nused); -+ return p; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * aufs caches -+ */ -+struct kmem_cache *au_cachep[AuCache_Last]; -+static int __init au_cache_init(void) -+{ -+ au_cachep[AuCache_DINFO] = AuCacheCtor(au_dinfo, au_di_init_once); -+ if (au_cachep[AuCache_DINFO]) -+ /* SLAB_DESTROY_BY_RCU */ -+ au_cachep[AuCache_ICNTNR] = AuCacheCtor(au_icntnr, -+ au_icntnr_init_once); -+ if (au_cachep[AuCache_ICNTNR]) -+ au_cachep[AuCache_FINFO] = AuCacheCtor(au_finfo, -+ au_fi_init_once); -+ if (au_cachep[AuCache_FINFO]) -+ au_cachep[AuCache_VDIR] = AuCache(au_vdir); -+ if (au_cachep[AuCache_VDIR]) -+ au_cachep[AuCache_DEHSTR] = AuCache(au_vdir_dehstr); -+ if (au_cachep[AuCache_DEHSTR]) -+ return 0; -+ -+ return -ENOMEM; -+} -+ -+static void au_cache_fin(void) -+{ -+ int i; -+ -+ /* -+ * Make sure all delayed rcu free inodes are flushed before we -+ * destroy cache. -+ */ -+ rcu_barrier(); -+ -+ /* excluding AuCache_HNOTIFY */ -+ BUILD_BUG_ON(AuCache_HNOTIFY + 1 != AuCache_Last); -+ for (i = 0; i < AuCache_HNOTIFY; i++) -+ if (au_cachep[i]) { -+ kmem_cache_destroy(au_cachep[i]); -+ au_cachep[i] = NULL; -+ } -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+int au_dir_roflags; -+ -+#ifdef CONFIG_AUFS_SBILIST -+/* -+ * iterate_supers_type() doesn't protect us from -+ * remounting (branch management) -+ */ -+struct au_sphlhead au_sbilist; -+#endif -+ -+struct lock_class_key au_lc_key[AuLcKey_Last]; -+ -+/* -+ * functions for module interface. -+ */ -+MODULE_LICENSE("GPL"); -+/* MODULE_LICENSE("GPL v2"); */ -+MODULE_AUTHOR("Junjiro R. Okajima "); -+MODULE_DESCRIPTION(AUFS_NAME -+ " -- Advanced multi layered unification filesystem"); -+MODULE_VERSION(AUFS_VERSION); -+MODULE_ALIAS_FS(AUFS_NAME); -+ -+/* this module parameter has no meaning when SYSFS is disabled */ -+int sysaufs_brs = 1; -+MODULE_PARM_DESC(brs, "use /fs/aufs/si_*/brN"); -+module_param_named(brs, sysaufs_brs, int, S_IRUGO); -+ -+/* this module parameter has no meaning when USER_NS is disabled */ -+bool au_userns; -+MODULE_PARM_DESC(allow_userns, "allow unprivileged to mount under userns"); -+module_param_named(allow_userns, au_userns, bool, S_IRUGO); -+ -+/* ---------------------------------------------------------------------- */ -+ -+static char au_esc_chars[0x20 + 3]; /* 0x01-0x20, backslash, del, and NULL */ -+ -+int au_seq_path(struct seq_file *seq, struct path *path) -+{ -+ int err; -+ -+ err = seq_path(seq, path, au_esc_chars); -+ if (err > 0) -+ err = 0; -+ else if (err < 0) -+ err = -ENOMEM; -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int __init aufs_init(void) -+{ -+ int err, i; -+ char *p; -+ -+ p = au_esc_chars; -+ for (i = 1; i <= ' '; i++) -+ *p++ = i; -+ *p++ = '\\'; -+ *p++ = '\x7f'; -+ *p = 0; -+ -+ au_dir_roflags = au_file_roflags(O_DIRECTORY | O_LARGEFILE); -+ -+ memcpy(aufs_iop_nogetattr, aufs_iop, sizeof(aufs_iop)); -+ for (i = 0; i < AuIop_Last; i++) -+ aufs_iop_nogetattr[i].getattr = NULL; -+ -+ au_sbilist_init(); -+ sysaufs_brs_init(); -+ au_debug_init(); -+ au_dy_init(); -+ err = sysaufs_init(); -+ if (unlikely(err)) -+ goto out; -+ err = au_procfs_init(); -+ if (unlikely(err)) -+ goto out_sysaufs; -+ err = au_wkq_init(); -+ if (unlikely(err)) -+ goto out_procfs; -+ err = au_loopback_init(); -+ if (unlikely(err)) -+ goto out_wkq; -+ err = au_hnotify_init(); -+ if (unlikely(err)) -+ goto out_loopback; -+ err = au_sysrq_init(); -+ if (unlikely(err)) -+ goto out_hin; -+ err = au_cache_init(); -+ if (unlikely(err)) -+ goto out_sysrq; -+ -+ aufs_fs_type.fs_flags |= au_userns ? FS_USERNS_MOUNT : 0; -+ err = register_filesystem(&aufs_fs_type); -+ if (unlikely(err)) -+ goto out_cache; -+ -+ /* since we define pr_fmt, call printk directly */ -+ printk(KERN_INFO AUFS_NAME " " AUFS_VERSION "\n"); -+ goto out; /* success */ -+ -+out_cache: -+ au_cache_fin(); -+out_sysrq: -+ au_sysrq_fin(); -+out_hin: -+ au_hnotify_fin(); -+out_loopback: -+ au_loopback_fin(); -+out_wkq: -+ au_wkq_fin(); -+out_procfs: -+ au_procfs_fin(); -+out_sysaufs: -+ sysaufs_fin(); -+ au_dy_fin(); -+out: -+ return err; -+} -+ -+static void __exit aufs_exit(void) -+{ -+ unregister_filesystem(&aufs_fs_type); -+ au_cache_fin(); -+ au_sysrq_fin(); -+ au_hnotify_fin(); -+ au_loopback_fin(); -+ au_wkq_fin(); -+ au_procfs_fin(); -+ sysaufs_fin(); -+ au_dy_fin(); -+} -+ -+module_init(aufs_init); -+module_exit(aufs_exit); -diff --git a/fs/aufs/module.h b/fs/aufs/module.h -new file mode 100644 -index 0000000..90c3c8f ---- /dev/null -+++ b/fs/aufs/module.h -@@ -0,0 +1,105 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * module initialization and module-global -+ */ -+ -+#ifndef __AUFS_MODULE_H__ -+#define __AUFS_MODULE_H__ -+ -+#ifdef __KERNEL__ -+ -+#include -+ -+struct path; -+struct seq_file; -+ -+/* module parameters */ -+extern int sysaufs_brs; -+extern bool au_userns; -+ -+/* ---------------------------------------------------------------------- */ -+ -+extern int au_dir_roflags; -+ -+enum { -+ AuLcNonDir_FIINFO, -+ AuLcNonDir_DIINFO, -+ AuLcNonDir_IIINFO, -+ -+ AuLcDir_FIINFO, -+ AuLcDir_DIINFO, -+ AuLcDir_IIINFO, -+ -+ AuLcSymlink_DIINFO, -+ AuLcSymlink_IIINFO, -+ -+ AuLcKey_Last -+}; -+extern struct lock_class_key au_lc_key[AuLcKey_Last]; -+ -+void *au_kzrealloc(void *p, unsigned int nused, unsigned int new_sz, gfp_t gfp); -+int au_seq_path(struct seq_file *seq, struct path *path); -+ -+#ifdef CONFIG_PROC_FS -+/* procfs.c */ -+int __init au_procfs_init(void); -+void au_procfs_fin(void); -+#else -+AuStubInt0(au_procfs_init, void); -+AuStubVoid(au_procfs_fin, void); -+#endif -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* kmem cache */ -+enum { -+ AuCache_DINFO, -+ AuCache_ICNTNR, -+ AuCache_FINFO, -+ AuCache_VDIR, -+ AuCache_DEHSTR, -+ AuCache_HNOTIFY, /* must be last */ -+ AuCache_Last -+}; -+ -+#define AuCacheFlags (SLAB_RECLAIM_ACCOUNT | SLAB_MEM_SPREAD) -+#define AuCache(type) KMEM_CACHE(type, AuCacheFlags) -+#define AuCacheCtor(type, ctor) \ -+ kmem_cache_create(#type, sizeof(struct type), \ -+ __alignof__(struct type), AuCacheFlags, ctor) -+ -+extern struct kmem_cache *au_cachep[]; -+ -+#define AuCacheFuncs(name, index) \ -+static inline struct au_##name *au_cache_alloc_##name(void) \ -+{ return kmem_cache_alloc(au_cachep[AuCache_##index], GFP_NOFS); } \ -+static inline void au_cache_free_##name(struct au_##name *p) \ -+{ kmem_cache_free(au_cachep[AuCache_##index], p); } -+ -+AuCacheFuncs(dinfo, DINFO); -+AuCacheFuncs(icntnr, ICNTNR); -+AuCacheFuncs(finfo, FINFO); -+AuCacheFuncs(vdir, VDIR); -+AuCacheFuncs(vdir_dehstr, DEHSTR); -+#ifdef CONFIG_AUFS_HNOTIFY -+AuCacheFuncs(hnotify, HNOTIFY); -+#endif -+ -+#endif /* __KERNEL__ */ -+#endif /* __AUFS_MODULE_H__ */ -diff --git a/fs/aufs/mvdown.c b/fs/aufs/mvdown.c -new file mode 100644 -index 0000000..e660c8f ---- /dev/null -+++ b/fs/aufs/mvdown.c -@@ -0,0 +1,703 @@ -+/* -+ * Copyright (C) 2011-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * move-down, opposite of copy-up -+ */ -+ -+#include "aufs.h" -+ -+struct au_mvd_args { -+ struct { -+ struct super_block *h_sb; -+ struct dentry *h_parent; -+ struct au_hinode *hdir; -+ struct inode *h_dir, *h_inode; -+ struct au_pin pin; -+ } info[AUFS_MVDOWN_NARRAY]; -+ -+ struct aufs_mvdown mvdown; -+ struct dentry *dentry, *parent; -+ struct inode *inode, *dir; -+ struct super_block *sb; -+ aufs_bindex_t bopq, bwh, bfound; -+ unsigned char rename_lock; -+}; -+ -+#define mvd_errno mvdown.au_errno -+#define mvd_bsrc mvdown.stbr[AUFS_MVDOWN_UPPER].bindex -+#define mvd_src_brid mvdown.stbr[AUFS_MVDOWN_UPPER].brid -+#define mvd_bdst mvdown.stbr[AUFS_MVDOWN_LOWER].bindex -+#define mvd_dst_brid mvdown.stbr[AUFS_MVDOWN_LOWER].brid -+ -+#define mvd_h_src_sb info[AUFS_MVDOWN_UPPER].h_sb -+#define mvd_h_src_parent info[AUFS_MVDOWN_UPPER].h_parent -+#define mvd_hdir_src info[AUFS_MVDOWN_UPPER].hdir -+#define mvd_h_src_dir info[AUFS_MVDOWN_UPPER].h_dir -+#define mvd_h_src_inode info[AUFS_MVDOWN_UPPER].h_inode -+#define mvd_pin_src info[AUFS_MVDOWN_UPPER].pin -+ -+#define mvd_h_dst_sb info[AUFS_MVDOWN_LOWER].h_sb -+#define mvd_h_dst_parent info[AUFS_MVDOWN_LOWER].h_parent -+#define mvd_hdir_dst info[AUFS_MVDOWN_LOWER].hdir -+#define mvd_h_dst_dir info[AUFS_MVDOWN_LOWER].h_dir -+#define mvd_h_dst_inode info[AUFS_MVDOWN_LOWER].h_inode -+#define mvd_pin_dst info[AUFS_MVDOWN_LOWER].pin -+ -+#define AU_MVD_PR(flag, ...) do { \ -+ if (flag) \ -+ pr_err(__VA_ARGS__); \ -+ } while (0) -+ -+static int find_lower_writable(struct au_mvd_args *a) -+{ -+ struct super_block *sb; -+ aufs_bindex_t bindex, bend; -+ struct au_branch *br; -+ -+ sb = a->sb; -+ bindex = a->mvd_bsrc; -+ bend = au_sbend(sb); -+ if (a->mvdown.flags & AUFS_MVDOWN_FHSM_LOWER) -+ for (bindex++; bindex <= bend; bindex++) { -+ br = au_sbr(sb, bindex); -+ if (au_br_fhsm(br->br_perm) -+ && (!(au_br_sb(br)->s_flags & MS_RDONLY))) -+ return bindex; -+ } -+ else if (!(a->mvdown.flags & AUFS_MVDOWN_ROLOWER)) -+ for (bindex++; bindex <= bend; bindex++) { -+ br = au_sbr(sb, bindex); -+ if (!au_br_rdonly(br)) -+ return bindex; -+ } -+ else -+ for (bindex++; bindex <= bend; bindex++) { -+ br = au_sbr(sb, bindex); -+ if (!(au_br_sb(br)->s_flags & MS_RDONLY)) { -+ if (au_br_rdonly(br)) -+ a->mvdown.flags -+ |= AUFS_MVDOWN_ROLOWER_R; -+ return bindex; -+ } -+ } -+ -+ return -1; -+} -+ -+/* make the parent dir on bdst */ -+static int au_do_mkdir(const unsigned char dmsg, struct au_mvd_args *a) -+{ -+ int err; -+ -+ err = 0; -+ a->mvd_hdir_src = au_hi(a->dir, a->mvd_bsrc); -+ a->mvd_hdir_dst = au_hi(a->dir, a->mvd_bdst); -+ a->mvd_h_src_parent = au_h_dptr(a->parent, a->mvd_bsrc); -+ a->mvd_h_dst_parent = NULL; -+ if (au_dbend(a->parent) >= a->mvd_bdst) -+ a->mvd_h_dst_parent = au_h_dptr(a->parent, a->mvd_bdst); -+ if (!a->mvd_h_dst_parent) { -+ err = au_cpdown_dirs(a->dentry, a->mvd_bdst); -+ if (unlikely(err)) { -+ AU_MVD_PR(dmsg, "cpdown_dirs failed\n"); -+ goto out; -+ } -+ a->mvd_h_dst_parent = au_h_dptr(a->parent, a->mvd_bdst); -+ } -+ -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+/* lock them all */ -+static int au_do_lock(const unsigned char dmsg, struct au_mvd_args *a) -+{ -+ int err; -+ struct dentry *h_trap; -+ -+ a->mvd_h_src_sb = au_sbr_sb(a->sb, a->mvd_bsrc); -+ a->mvd_h_dst_sb = au_sbr_sb(a->sb, a->mvd_bdst); -+ err = au_pin(&a->mvd_pin_dst, a->dentry, a->mvd_bdst, -+ au_opt_udba(a->sb), -+ AuPin_MNT_WRITE | AuPin_DI_LOCKED); -+ AuTraceErr(err); -+ if (unlikely(err)) { -+ AU_MVD_PR(dmsg, "pin_dst failed\n"); -+ goto out; -+ } -+ -+ if (a->mvd_h_src_sb != a->mvd_h_dst_sb) { -+ a->rename_lock = 0; -+ au_pin_init(&a->mvd_pin_src, a->dentry, a->mvd_bsrc, -+ AuLsc_DI_PARENT, AuLsc_I_PARENT3, -+ au_opt_udba(a->sb), -+ AuPin_MNT_WRITE | AuPin_DI_LOCKED); -+ err = au_do_pin(&a->mvd_pin_src); -+ AuTraceErr(err); -+ a->mvd_h_src_dir = a->mvd_h_src_parent->d_inode; -+ if (unlikely(err)) { -+ AU_MVD_PR(dmsg, "pin_src failed\n"); -+ goto out_dst; -+ } -+ goto out; /* success */ -+ } -+ -+ a->rename_lock = 1; -+ au_pin_hdir_unlock(&a->mvd_pin_dst); -+ err = au_pin(&a->mvd_pin_src, a->dentry, a->mvd_bsrc, -+ au_opt_udba(a->sb), -+ AuPin_MNT_WRITE | AuPin_DI_LOCKED); -+ AuTraceErr(err); -+ a->mvd_h_src_dir = a->mvd_h_src_parent->d_inode; -+ if (unlikely(err)) { -+ AU_MVD_PR(dmsg, "pin_src failed\n"); -+ au_pin_hdir_lock(&a->mvd_pin_dst); -+ goto out_dst; -+ } -+ au_pin_hdir_unlock(&a->mvd_pin_src); -+ h_trap = vfsub_lock_rename(a->mvd_h_src_parent, a->mvd_hdir_src, -+ a->mvd_h_dst_parent, a->mvd_hdir_dst); -+ if (h_trap) { -+ err = (h_trap != a->mvd_h_src_parent); -+ if (err) -+ err = (h_trap != a->mvd_h_dst_parent); -+ } -+ BUG_ON(err); /* it should never happen */ -+ if (unlikely(a->mvd_h_src_dir != au_pinned_h_dir(&a->mvd_pin_src))) { -+ err = -EBUSY; -+ AuTraceErr(err); -+ vfsub_unlock_rename(a->mvd_h_src_parent, a->mvd_hdir_src, -+ a->mvd_h_dst_parent, a->mvd_hdir_dst); -+ au_pin_hdir_lock(&a->mvd_pin_src); -+ au_unpin(&a->mvd_pin_src); -+ au_pin_hdir_lock(&a->mvd_pin_dst); -+ goto out_dst; -+ } -+ goto out; /* success */ -+ -+out_dst: -+ au_unpin(&a->mvd_pin_dst); -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+static void au_do_unlock(const unsigned char dmsg, struct au_mvd_args *a) -+{ -+ if (!a->rename_lock) -+ au_unpin(&a->mvd_pin_src); -+ else { -+ vfsub_unlock_rename(a->mvd_h_src_parent, a->mvd_hdir_src, -+ a->mvd_h_dst_parent, a->mvd_hdir_dst); -+ au_pin_hdir_lock(&a->mvd_pin_src); -+ au_unpin(&a->mvd_pin_src); -+ au_pin_hdir_lock(&a->mvd_pin_dst); -+ } -+ au_unpin(&a->mvd_pin_dst); -+} -+ -+/* copy-down the file */ -+static int au_do_cpdown(const unsigned char dmsg, struct au_mvd_args *a) -+{ -+ int err; -+ struct au_cp_generic cpg = { -+ .dentry = a->dentry, -+ .bdst = a->mvd_bdst, -+ .bsrc = a->mvd_bsrc, -+ .len = -1, -+ .pin = &a->mvd_pin_dst, -+ .flags = AuCpup_DTIME | AuCpup_HOPEN -+ }; -+ -+ AuDbg("b%d, b%d\n", cpg.bsrc, cpg.bdst); -+ if (a->mvdown.flags & AUFS_MVDOWN_OWLOWER) -+ au_fset_cpup(cpg.flags, OVERWRITE); -+ if (a->mvdown.flags & AUFS_MVDOWN_ROLOWER) -+ au_fset_cpup(cpg.flags, RWDST); -+ err = au_sio_cpdown_simple(&cpg); -+ if (unlikely(err)) -+ AU_MVD_PR(dmsg, "cpdown failed\n"); -+ -+ AuTraceErr(err); -+ return err; -+} -+ -+/* -+ * unlink the whiteout on bdst if exist which may be created by UDBA while we -+ * were sleeping -+ */ -+static int au_do_unlink_wh(const unsigned char dmsg, struct au_mvd_args *a) -+{ -+ int err; -+ struct path h_path; -+ struct au_branch *br; -+ struct inode *delegated; -+ -+ br = au_sbr(a->sb, a->mvd_bdst); -+ h_path.dentry = au_wh_lkup(a->mvd_h_dst_parent, &a->dentry->d_name, br); -+ err = PTR_ERR(h_path.dentry); -+ if (IS_ERR(h_path.dentry)) { -+ AU_MVD_PR(dmsg, "wh_lkup failed\n"); -+ goto out; -+ } -+ -+ err = 0; -+ if (h_path.dentry->d_inode) { -+ h_path.mnt = au_br_mnt(br); -+ delegated = NULL; -+ err = vfsub_unlink(a->mvd_h_dst_parent->d_inode, &h_path, -+ &delegated, /*force*/0); -+ if (unlikely(err == -EWOULDBLOCK)) { -+ pr_warn("cannot retry for NFSv4 delegation" -+ " for an internal unlink\n"); -+ iput(delegated); -+ } -+ if (unlikely(err)) -+ AU_MVD_PR(dmsg, "wh_unlink failed\n"); -+ } -+ dput(h_path.dentry); -+ -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+/* -+ * unlink the topmost h_dentry -+ */ -+static int au_do_unlink(const unsigned char dmsg, struct au_mvd_args *a) -+{ -+ int err; -+ struct path h_path; -+ struct inode *delegated; -+ -+ h_path.mnt = au_sbr_mnt(a->sb, a->mvd_bsrc); -+ h_path.dentry = au_h_dptr(a->dentry, a->mvd_bsrc); -+ delegated = NULL; -+ err = vfsub_unlink(a->mvd_h_src_dir, &h_path, &delegated, /*force*/0); -+ if (unlikely(err == -EWOULDBLOCK)) { -+ pr_warn("cannot retry for NFSv4 delegation" -+ " for an internal unlink\n"); -+ iput(delegated); -+ } -+ if (unlikely(err)) -+ AU_MVD_PR(dmsg, "unlink failed\n"); -+ -+ AuTraceErr(err); -+ return err; -+} -+ -+/* Since mvdown succeeded, we ignore an error of this function */ -+static void au_do_stfs(const unsigned char dmsg, struct au_mvd_args *a) -+{ -+ int err; -+ struct au_branch *br; -+ -+ a->mvdown.flags |= AUFS_MVDOWN_STFS_FAILED; -+ br = au_sbr(a->sb, a->mvd_bsrc); -+ err = au_br_stfs(br, &a->mvdown.stbr[AUFS_MVDOWN_UPPER].stfs); -+ if (!err) { -+ br = au_sbr(a->sb, a->mvd_bdst); -+ a->mvdown.stbr[AUFS_MVDOWN_LOWER].brid = br->br_id; -+ err = au_br_stfs(br, &a->mvdown.stbr[AUFS_MVDOWN_LOWER].stfs); -+ } -+ if (!err) -+ a->mvdown.flags &= ~AUFS_MVDOWN_STFS_FAILED; -+ else -+ AU_MVD_PR(dmsg, "statfs failed (%d), ignored\n", err); -+} -+ -+/* -+ * copy-down the file and unlink the bsrc file. -+ * - unlink the bdst whout if exist -+ * - copy-down the file (with whtmp name and rename) -+ * - unlink the bsrc file -+ */ -+static int au_do_mvdown(const unsigned char dmsg, struct au_mvd_args *a) -+{ -+ int err; -+ -+ err = au_do_mkdir(dmsg, a); -+ if (!err) -+ err = au_do_lock(dmsg, a); -+ if (unlikely(err)) -+ goto out; -+ -+ /* -+ * do not revert the activities we made on bdst since they should be -+ * harmless in aufs. -+ */ -+ -+ err = au_do_cpdown(dmsg, a); -+ if (!err) -+ err = au_do_unlink_wh(dmsg, a); -+ if (!err && !(a->mvdown.flags & AUFS_MVDOWN_KUPPER)) -+ err = au_do_unlink(dmsg, a); -+ if (unlikely(err)) -+ goto out_unlock; -+ -+ AuDbg("%pd2, 0x%x, %d --> %d\n", -+ a->dentry, a->mvdown.flags, a->mvd_bsrc, a->mvd_bdst); -+ if (find_lower_writable(a) < 0) -+ a->mvdown.flags |= AUFS_MVDOWN_BOTTOM; -+ -+ if (a->mvdown.flags & AUFS_MVDOWN_STFS) -+ au_do_stfs(dmsg, a); -+ -+ /* maintain internal array */ -+ if (!(a->mvdown.flags & AUFS_MVDOWN_KUPPER)) { -+ au_set_h_dptr(a->dentry, a->mvd_bsrc, NULL); -+ au_set_dbstart(a->dentry, a->mvd_bdst); -+ au_set_h_iptr(a->inode, a->mvd_bsrc, NULL, /*flags*/0); -+ au_set_ibstart(a->inode, a->mvd_bdst); -+ } else { -+ /* hide the lower */ -+ au_set_h_dptr(a->dentry, a->mvd_bdst, NULL); -+ au_set_dbend(a->dentry, a->mvd_bsrc); -+ au_set_h_iptr(a->inode, a->mvd_bdst, NULL, /*flags*/0); -+ au_set_ibend(a->inode, a->mvd_bsrc); -+ } -+ if (au_dbend(a->dentry) < a->mvd_bdst) -+ au_set_dbend(a->dentry, a->mvd_bdst); -+ if (au_ibend(a->inode) < a->mvd_bdst) -+ au_set_ibend(a->inode, a->mvd_bdst); -+ -+out_unlock: -+ au_do_unlock(dmsg, a); -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* make sure the file is idle */ -+static int au_mvd_args_busy(const unsigned char dmsg, struct au_mvd_args *a) -+{ -+ int err, plinked; -+ -+ err = 0; -+ plinked = !!au_opt_test(au_mntflags(a->sb), PLINK); -+ if (au_dbstart(a->dentry) == a->mvd_bsrc -+ && au_dcount(a->dentry) == 1 -+ && atomic_read(&a->inode->i_count) == 1 -+ /* && a->mvd_h_src_inode->i_nlink == 1 */ -+ && (!plinked || !au_plink_test(a->inode)) -+ && a->inode->i_nlink == 1) -+ goto out; -+ -+ err = -EBUSY; -+ AU_MVD_PR(dmsg, -+ "b%d, d{b%d, c%d?}, i{c%d?, l%u}, hi{l%u}, p{%d, %d}\n", -+ a->mvd_bsrc, au_dbstart(a->dentry), au_dcount(a->dentry), -+ atomic_read(&a->inode->i_count), a->inode->i_nlink, -+ a->mvd_h_src_inode->i_nlink, -+ plinked, plinked ? au_plink_test(a->inode) : 0); -+ -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+/* make sure the parent dir is fine */ -+static int au_mvd_args_parent(const unsigned char dmsg, -+ struct au_mvd_args *a) -+{ -+ int err; -+ aufs_bindex_t bindex; -+ -+ err = 0; -+ if (unlikely(au_alive_dir(a->parent))) { -+ err = -ENOENT; -+ AU_MVD_PR(dmsg, "parent dir is dead\n"); -+ goto out; -+ } -+ -+ a->bopq = au_dbdiropq(a->parent); -+ bindex = au_wbr_nonopq(a->dentry, a->mvd_bdst); -+ AuDbg("b%d\n", bindex); -+ if (unlikely((bindex >= 0 && bindex < a->mvd_bdst) -+ || (a->bopq != -1 && a->bopq < a->mvd_bdst))) { -+ err = -EINVAL; -+ a->mvd_errno = EAU_MVDOWN_OPAQUE; -+ AU_MVD_PR(dmsg, "ancestor is opaque b%d, b%d\n", -+ a->bopq, a->mvd_bdst); -+ } -+ -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+static int au_mvd_args_intermediate(const unsigned char dmsg, -+ struct au_mvd_args *a) -+{ -+ int err; -+ struct au_dinfo *dinfo, *tmp; -+ -+ /* lookup the next lower positive entry */ -+ err = -ENOMEM; -+ tmp = au_di_alloc(a->sb, AuLsc_DI_TMP); -+ if (unlikely(!tmp)) -+ goto out; -+ -+ a->bfound = -1; -+ a->bwh = -1; -+ dinfo = au_di(a->dentry); -+ au_di_cp(tmp, dinfo); -+ au_di_swap(tmp, dinfo); -+ -+ /* returns the number of positive dentries */ -+ err = au_lkup_dentry(a->dentry, a->mvd_bsrc + 1, /*type*/0); -+ if (!err) -+ a->bwh = au_dbwh(a->dentry); -+ else if (err > 0) -+ a->bfound = au_dbstart(a->dentry); -+ -+ au_di_swap(tmp, dinfo); -+ au_rw_write_unlock(&tmp->di_rwsem); -+ au_di_free(tmp); -+ if (unlikely(err < 0)) -+ AU_MVD_PR(dmsg, "failed look-up lower\n"); -+ -+ /* -+ * here, we have these cases. -+ * bfound == -1 -+ * no positive dentry under bsrc. there are more sub-cases. -+ * bwh < 0 -+ * there no whiteout, we can safely move-down. -+ * bwh <= bsrc -+ * impossible -+ * bsrc < bwh && bwh < bdst -+ * there is a whiteout on RO branch. cannot proceed. -+ * bwh == bdst -+ * there is a whiteout on the RW target branch. it should -+ * be removed. -+ * bdst < bwh -+ * there is a whiteout somewhere unrelated branch. -+ * -1 < bfound && bfound <= bsrc -+ * impossible. -+ * bfound < bdst -+ * found, but it is on RO branch between bsrc and bdst. cannot -+ * proceed. -+ * bfound == bdst -+ * found, replace it if AUFS_MVDOWN_FORCE is set. otherwise return -+ * error. -+ * bdst < bfound -+ * found, after we create the file on bdst, it will be hidden. -+ */ -+ -+ AuDebugOn(a->bfound == -1 -+ && a->bwh != -1 -+ && a->bwh <= a->mvd_bsrc); -+ AuDebugOn(-1 < a->bfound -+ && a->bfound <= a->mvd_bsrc); -+ -+ err = -EINVAL; -+ if (a->bfound == -1 -+ && a->mvd_bsrc < a->bwh -+ && a->bwh != -1 -+ && a->bwh < a->mvd_bdst) { -+ a->mvd_errno = EAU_MVDOWN_WHITEOUT; -+ AU_MVD_PR(dmsg, "bsrc %d, bdst %d, bfound %d, bwh %d\n", -+ a->mvd_bsrc, a->mvd_bdst, a->bfound, a->bwh); -+ goto out; -+ } else if (a->bfound != -1 && a->bfound < a->mvd_bdst) { -+ a->mvd_errno = EAU_MVDOWN_UPPER; -+ AU_MVD_PR(dmsg, "bdst %d, bfound %d\n", -+ a->mvd_bdst, a->bfound); -+ goto out; -+ } -+ -+ err = 0; /* success */ -+ -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+static int au_mvd_args_exist(const unsigned char dmsg, struct au_mvd_args *a) -+{ -+ int err; -+ -+ err = 0; -+ if (!(a->mvdown.flags & AUFS_MVDOWN_OWLOWER) -+ && a->bfound == a->mvd_bdst) -+ err = -EEXIST; -+ AuTraceErr(err); -+ return err; -+} -+ -+static int au_mvd_args(const unsigned char dmsg, struct au_mvd_args *a) -+{ -+ int err; -+ struct au_branch *br; -+ -+ err = -EISDIR; -+ if (unlikely(S_ISDIR(a->inode->i_mode))) -+ goto out; -+ -+ err = -EINVAL; -+ if (!(a->mvdown.flags & AUFS_MVDOWN_BRID_UPPER)) -+ a->mvd_bsrc = au_ibstart(a->inode); -+ else { -+ a->mvd_bsrc = au_br_index(a->sb, a->mvd_src_brid); -+ if (unlikely(a->mvd_bsrc < 0 -+ || (a->mvd_bsrc < au_dbstart(a->dentry) -+ || au_dbend(a->dentry) < a->mvd_bsrc -+ || !au_h_dptr(a->dentry, a->mvd_bsrc)) -+ || (a->mvd_bsrc < au_ibstart(a->inode) -+ || au_ibend(a->inode) < a->mvd_bsrc -+ || !au_h_iptr(a->inode, a->mvd_bsrc)))) { -+ a->mvd_errno = EAU_MVDOWN_NOUPPER; -+ AU_MVD_PR(dmsg, "no upper\n"); -+ goto out; -+ } -+ } -+ if (unlikely(a->mvd_bsrc == au_sbend(a->sb))) { -+ a->mvd_errno = EAU_MVDOWN_BOTTOM; -+ AU_MVD_PR(dmsg, "on the bottom\n"); -+ goto out; -+ } -+ a->mvd_h_src_inode = au_h_iptr(a->inode, a->mvd_bsrc); -+ br = au_sbr(a->sb, a->mvd_bsrc); -+ err = au_br_rdonly(br); -+ if (!(a->mvdown.flags & AUFS_MVDOWN_ROUPPER)) { -+ if (unlikely(err)) -+ goto out; -+ } else if (!(vfsub_native_ro(a->mvd_h_src_inode) -+ || IS_APPEND(a->mvd_h_src_inode))) { -+ if (err) -+ a->mvdown.flags |= AUFS_MVDOWN_ROUPPER_R; -+ /* go on */ -+ } else -+ goto out; -+ -+ err = -EINVAL; -+ if (!(a->mvdown.flags & AUFS_MVDOWN_BRID_LOWER)) { -+ a->mvd_bdst = find_lower_writable(a); -+ if (unlikely(a->mvd_bdst < 0)) { -+ a->mvd_errno = EAU_MVDOWN_BOTTOM; -+ AU_MVD_PR(dmsg, "no writable lower branch\n"); -+ goto out; -+ } -+ } else { -+ a->mvd_bdst = au_br_index(a->sb, a->mvd_dst_brid); -+ if (unlikely(a->mvd_bdst < 0 -+ || au_sbend(a->sb) < a->mvd_bdst)) { -+ a->mvd_errno = EAU_MVDOWN_NOLOWERBR; -+ AU_MVD_PR(dmsg, "no lower brid\n"); -+ goto out; -+ } -+ } -+ -+ err = au_mvd_args_busy(dmsg, a); -+ if (!err) -+ err = au_mvd_args_parent(dmsg, a); -+ if (!err) -+ err = au_mvd_args_intermediate(dmsg, a); -+ if (!err) -+ err = au_mvd_args_exist(dmsg, a); -+ if (!err) -+ AuDbg("b%d, b%d\n", a->mvd_bsrc, a->mvd_bdst); -+ -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+int au_mvdown(struct dentry *dentry, struct aufs_mvdown __user *uarg) -+{ -+ int err, e; -+ unsigned char dmsg; -+ struct au_mvd_args *args; -+ struct inode *inode; -+ -+ inode = dentry->d_inode; -+ err = -EPERM; -+ if (unlikely(!capable(CAP_SYS_ADMIN))) -+ goto out; -+ -+ err = -ENOMEM; -+ args = kmalloc(sizeof(*args), GFP_NOFS); -+ if (unlikely(!args)) -+ goto out; -+ -+ err = copy_from_user(&args->mvdown, uarg, sizeof(args->mvdown)); -+ if (!err) -+ err = !access_ok(VERIFY_WRITE, uarg, sizeof(*uarg)); -+ if (unlikely(err)) { -+ err = -EFAULT; -+ AuTraceErr(err); -+ goto out_free; -+ } -+ AuDbg("flags 0x%x\n", args->mvdown.flags); -+ args->mvdown.flags &= ~(AUFS_MVDOWN_ROLOWER_R | AUFS_MVDOWN_ROUPPER_R); -+ args->mvdown.au_errno = 0; -+ args->dentry = dentry; -+ args->inode = inode; -+ args->sb = dentry->d_sb; -+ -+ err = -ENOENT; -+ dmsg = !!(args->mvdown.flags & AUFS_MVDOWN_DMSG); -+ args->parent = dget_parent(dentry); -+ args->dir = args->parent->d_inode; -+ mutex_lock_nested(&args->dir->i_mutex, I_MUTEX_PARENT); -+ dput(args->parent); -+ if (unlikely(args->parent != dentry->d_parent)) { -+ AU_MVD_PR(dmsg, "parent dir is moved\n"); -+ goto out_dir; -+ } -+ -+ mutex_lock_nested(&inode->i_mutex, I_MUTEX_CHILD); -+ err = aufs_read_lock(dentry, AuLock_DW | AuLock_FLUSH | AuLock_NOPLMW); -+ if (unlikely(err)) -+ goto out_inode; -+ -+ di_write_lock_parent(args->parent); -+ err = au_mvd_args(dmsg, args); -+ if (unlikely(err)) -+ goto out_parent; -+ -+ err = au_do_mvdown(dmsg, args); -+ if (unlikely(err)) -+ goto out_parent; -+ -+ au_cpup_attr_timesizes(args->dir); -+ au_cpup_attr_timesizes(inode); -+ if (!(args->mvdown.flags & AUFS_MVDOWN_KUPPER)) -+ au_cpup_igen(inode, au_h_iptr(inode, args->mvd_bdst)); -+ /* au_digen_dec(dentry); */ -+ -+out_parent: -+ di_write_unlock(args->parent); -+ aufs_read_unlock(dentry, AuLock_DW); -+out_inode: -+ mutex_unlock(&inode->i_mutex); -+out_dir: -+ mutex_unlock(&args->dir->i_mutex); -+out_free: -+ e = copy_to_user(uarg, &args->mvdown, sizeof(args->mvdown)); -+ if (unlikely(e)) -+ err = -EFAULT; -+ kfree(args); -+out: -+ AuTraceErr(err); -+ return err; -+} -diff --git a/fs/aufs/opts.c b/fs/aufs/opts.c -new file mode 100644 -index 0000000..0363f67 ---- /dev/null -+++ b/fs/aufs/opts.c -@@ -0,0 +1,1878 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * mount options/flags -+ */ -+ -+#include -+#include /* a distribution requires */ -+#include -+#include "aufs.h" -+ -+/* ---------------------------------------------------------------------- */ -+ -+enum { -+ Opt_br, -+ Opt_add, Opt_del, Opt_mod, Opt_append, Opt_prepend, -+ Opt_idel, Opt_imod, -+ Opt_dirwh, Opt_rdcache, Opt_rdblk, Opt_rdhash, -+ Opt_rdblk_def, Opt_rdhash_def, -+ Opt_xino, Opt_noxino, -+ Opt_trunc_xino, Opt_trunc_xino_v, Opt_notrunc_xino, -+ Opt_trunc_xino_path, Opt_itrunc_xino, -+ Opt_trunc_xib, Opt_notrunc_xib, -+ Opt_shwh, Opt_noshwh, -+ Opt_plink, Opt_noplink, Opt_list_plink, -+ Opt_udba, -+ Opt_dio, Opt_nodio, -+ Opt_diropq_a, Opt_diropq_w, -+ Opt_warn_perm, Opt_nowarn_perm, -+ Opt_wbr_copyup, Opt_wbr_create, -+ Opt_fhsm_sec, -+ Opt_refrof, Opt_norefrof, -+ Opt_verbose, Opt_noverbose, -+ Opt_sum, Opt_nosum, Opt_wsum, -+ Opt_dirperm1, Opt_nodirperm1, -+ Opt_acl, Opt_noacl, -+ Opt_tail, Opt_ignore, Opt_ignore_silent, Opt_err -+}; -+ -+static match_table_t options = { -+ {Opt_br, "br=%s"}, -+ {Opt_br, "br:%s"}, -+ -+ {Opt_add, "add=%d:%s"}, -+ {Opt_add, "add:%d:%s"}, -+ {Opt_add, "ins=%d:%s"}, -+ {Opt_add, "ins:%d:%s"}, -+ {Opt_append, "append=%s"}, -+ {Opt_append, "append:%s"}, -+ {Opt_prepend, "prepend=%s"}, -+ {Opt_prepend, "prepend:%s"}, -+ -+ {Opt_del, "del=%s"}, -+ {Opt_del, "del:%s"}, -+ /* {Opt_idel, "idel:%d"}, */ -+ {Opt_mod, "mod=%s"}, -+ {Opt_mod, "mod:%s"}, -+ /* {Opt_imod, "imod:%d:%s"}, */ -+ -+ {Opt_dirwh, "dirwh=%d"}, -+ -+ {Opt_xino, "xino=%s"}, -+ {Opt_noxino, "noxino"}, -+ {Opt_trunc_xino, "trunc_xino"}, -+ {Opt_trunc_xino_v, "trunc_xino_v=%d:%d"}, -+ {Opt_notrunc_xino, "notrunc_xino"}, -+ {Opt_trunc_xino_path, "trunc_xino=%s"}, -+ {Opt_itrunc_xino, "itrunc_xino=%d"}, -+ /* {Opt_zxino, "zxino=%s"}, */ -+ {Opt_trunc_xib, "trunc_xib"}, -+ {Opt_notrunc_xib, "notrunc_xib"}, -+ -+#ifdef CONFIG_PROC_FS -+ {Opt_plink, "plink"}, -+#else -+ {Opt_ignore_silent, "plink"}, -+#endif -+ -+ {Opt_noplink, "noplink"}, -+ -+#ifdef CONFIG_AUFS_DEBUG -+ {Opt_list_plink, "list_plink"}, -+#endif -+ -+ {Opt_udba, "udba=%s"}, -+ -+ {Opt_dio, "dio"}, -+ {Opt_nodio, "nodio"}, -+ -+#ifdef CONFIG_AUFS_FHSM -+ {Opt_fhsm_sec, "fhsm_sec=%d"}, -+#else -+ {Opt_ignore_silent, "fhsm_sec=%d"}, -+#endif -+ -+ {Opt_diropq_a, "diropq=always"}, -+ {Opt_diropq_a, "diropq=a"}, -+ {Opt_diropq_w, "diropq=whiteouted"}, -+ {Opt_diropq_w, "diropq=w"}, -+ -+ {Opt_warn_perm, "warn_perm"}, -+ {Opt_nowarn_perm, "nowarn_perm"}, -+ -+ /* keep them temporary */ -+ {Opt_ignore_silent, "nodlgt"}, -+ {Opt_ignore_silent, "clean_plink"}, -+ -+#ifdef CONFIG_AUFS_SHWH -+ {Opt_shwh, "shwh"}, -+#endif -+ {Opt_noshwh, "noshwh"}, -+ -+ {Opt_dirperm1, "dirperm1"}, -+ {Opt_nodirperm1, "nodirperm1"}, -+ -+ {Opt_refrof, "refrof"}, -+ {Opt_norefrof, "norefrof"}, -+ -+ {Opt_verbose, "verbose"}, -+ {Opt_verbose, "v"}, -+ {Opt_noverbose, "noverbose"}, -+ {Opt_noverbose, "quiet"}, -+ {Opt_noverbose, "q"}, -+ {Opt_noverbose, "silent"}, -+ -+ {Opt_sum, "sum"}, -+ {Opt_nosum, "nosum"}, -+ {Opt_wsum, "wsum"}, -+ -+ {Opt_rdcache, "rdcache=%d"}, -+ {Opt_rdblk, "rdblk=%d"}, -+ {Opt_rdblk_def, "rdblk=def"}, -+ {Opt_rdhash, "rdhash=%d"}, -+ {Opt_rdhash_def, "rdhash=def"}, -+ -+ {Opt_wbr_create, "create=%s"}, -+ {Opt_wbr_create, "create_policy=%s"}, -+ {Opt_wbr_copyup, "cpup=%s"}, -+ {Opt_wbr_copyup, "copyup=%s"}, -+ {Opt_wbr_copyup, "copyup_policy=%s"}, -+ -+ /* generic VFS flag */ -+#ifdef CONFIG_FS_POSIX_ACL -+ {Opt_acl, "acl"}, -+ {Opt_noacl, "noacl"}, -+#else -+ {Opt_ignore_silent, "acl"}, -+ {Opt_ignore_silent, "noacl"}, -+#endif -+ -+ /* internal use for the scripts */ -+ {Opt_ignore_silent, "si=%s"}, -+ -+ {Opt_br, "dirs=%s"}, -+ {Opt_ignore, "debug=%d"}, -+ {Opt_ignore, "delete=whiteout"}, -+ {Opt_ignore, "delete=all"}, -+ {Opt_ignore, "imap=%s"}, -+ -+ /* temporary workaround, due to old mount(8)? */ -+ {Opt_ignore_silent, "relatime"}, -+ -+ {Opt_err, NULL} -+}; -+ -+/* ---------------------------------------------------------------------- */ -+ -+static const char *au_parser_pattern(int val, match_table_t tbl) -+{ -+ struct match_token *p; -+ -+ p = tbl; -+ while (p->pattern) { -+ if (p->token == val) -+ return p->pattern; -+ p++; -+ } -+ BUG(); -+ return "??"; -+} -+ -+static const char *au_optstr(int *val, match_table_t tbl) -+{ -+ struct match_token *p; -+ int v; -+ -+ v = *val; -+ if (!v) -+ goto out; -+ p = tbl; -+ while (p->pattern) { -+ if (p->token -+ && (v & p->token) == p->token) { -+ *val &= ~p->token; -+ return p->pattern; -+ } -+ p++; -+ } -+ -+out: -+ return NULL; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static match_table_t brperm = { -+ {AuBrPerm_RO, AUFS_BRPERM_RO}, -+ {AuBrPerm_RR, AUFS_BRPERM_RR}, -+ {AuBrPerm_RW, AUFS_BRPERM_RW}, -+ {0, NULL} -+}; -+ -+static match_table_t brattr = { -+ /* general */ -+ {AuBrAttr_COO_REG, AUFS_BRATTR_COO_REG}, -+ {AuBrAttr_COO_ALL, AUFS_BRATTR_COO_ALL}, -+ /* 'unpin' attrib is meaningless since linux-3.18-rc1 */ -+ {AuBrAttr_UNPIN, AUFS_BRATTR_UNPIN}, -+#ifdef CONFIG_AUFS_FHSM -+ {AuBrAttr_FHSM, AUFS_BRATTR_FHSM}, -+#endif -+#ifdef CONFIG_AUFS_XATTR -+ {AuBrAttr_ICEX, AUFS_BRATTR_ICEX}, -+ {AuBrAttr_ICEX_SEC, AUFS_BRATTR_ICEX_SEC}, -+ {AuBrAttr_ICEX_SYS, AUFS_BRATTR_ICEX_SYS}, -+ {AuBrAttr_ICEX_TR, AUFS_BRATTR_ICEX_TR}, -+ {AuBrAttr_ICEX_USR, AUFS_BRATTR_ICEX_USR}, -+ {AuBrAttr_ICEX_OTH, AUFS_BRATTR_ICEX_OTH}, -+#endif -+ -+ /* ro/rr branch */ -+ {AuBrRAttr_WH, AUFS_BRRATTR_WH}, -+ -+ /* rw branch */ -+ {AuBrWAttr_MOO, AUFS_BRWATTR_MOO}, -+ {AuBrWAttr_NoLinkWH, AUFS_BRWATTR_NLWH}, -+ -+ {0, NULL} -+}; -+ -+static int br_attr_val(char *str, match_table_t table, substring_t args[]) -+{ -+ int attr, v; -+ char *p; -+ -+ attr = 0; -+ do { -+ p = strchr(str, '+'); -+ if (p) -+ *p = 0; -+ v = match_token(str, table, args); -+ if (v) { -+ if (v & AuBrAttr_CMOO_Mask) -+ attr &= ~AuBrAttr_CMOO_Mask; -+ attr |= v; -+ } else { -+ if (p) -+ *p = '+'; -+ pr_warn("ignored branch attribute %s\n", str); -+ break; -+ } -+ if (p) -+ str = p + 1; -+ } while (p); -+ -+ return attr; -+} -+ -+static int au_do_optstr_br_attr(au_br_perm_str_t *str, int perm) -+{ -+ int sz; -+ const char *p; -+ char *q; -+ -+ q = str->a; -+ *q = 0; -+ p = au_optstr(&perm, brattr); -+ if (p) { -+ sz = strlen(p); -+ memcpy(q, p, sz + 1); -+ q += sz; -+ } else -+ goto out; -+ -+ do { -+ p = au_optstr(&perm, brattr); -+ if (p) { -+ *q++ = '+'; -+ sz = strlen(p); -+ memcpy(q, p, sz + 1); -+ q += sz; -+ } -+ } while (p); -+ -+out: -+ return q - str->a; -+} -+ -+static int noinline_for_stack br_perm_val(char *perm) -+{ -+ int val, bad, sz; -+ char *p; -+ substring_t args[MAX_OPT_ARGS]; -+ au_br_perm_str_t attr; -+ -+ p = strchr(perm, '+'); -+ if (p) -+ *p = 0; -+ val = match_token(perm, brperm, args); -+ if (!val) { -+ if (p) -+ *p = '+'; -+ pr_warn("ignored branch permission %s\n", perm); -+ val = AuBrPerm_RO; -+ goto out; -+ } -+ if (!p) -+ goto out; -+ -+ val |= br_attr_val(p + 1, brattr, args); -+ -+ bad = 0; -+ switch (val & AuBrPerm_Mask) { -+ case AuBrPerm_RO: -+ case AuBrPerm_RR: -+ bad = val & AuBrWAttr_Mask; -+ val &= ~AuBrWAttr_Mask; -+ break; -+ case AuBrPerm_RW: -+ bad = val & AuBrRAttr_Mask; -+ val &= ~AuBrRAttr_Mask; -+ break; -+ } -+ -+ /* -+ * 'unpin' attrib becomes meaningless since linux-3.18-rc1, but aufs -+ * does not treat it as an error, just warning. -+ * this is a tiny guard for the user operation. -+ */ -+ if (val & AuBrAttr_UNPIN) { -+ bad |= AuBrAttr_UNPIN; -+ val &= ~AuBrAttr_UNPIN; -+ } -+ -+ if (unlikely(bad)) { -+ sz = au_do_optstr_br_attr(&attr, bad); -+ AuDebugOn(!sz); -+ pr_warn("ignored branch attribute %s\n", attr.a); -+ } -+ -+out: -+ return val; -+} -+ -+void au_optstr_br_perm(au_br_perm_str_t *str, int perm) -+{ -+ au_br_perm_str_t attr; -+ const char *p; -+ char *q; -+ int sz; -+ -+ q = str->a; -+ p = au_optstr(&perm, brperm); -+ AuDebugOn(!p || !*p); -+ sz = strlen(p); -+ memcpy(q, p, sz + 1); -+ q += sz; -+ -+ sz = au_do_optstr_br_attr(&attr, perm); -+ if (sz) { -+ *q++ = '+'; -+ memcpy(q, attr.a, sz + 1); -+ } -+ -+ AuDebugOn(strlen(str->a) >= sizeof(str->a)); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static match_table_t udbalevel = { -+ {AuOpt_UDBA_REVAL, "reval"}, -+ {AuOpt_UDBA_NONE, "none"}, -+#ifdef CONFIG_AUFS_HNOTIFY -+ {AuOpt_UDBA_HNOTIFY, "notify"}, /* abstraction */ -+#ifdef CONFIG_AUFS_HFSNOTIFY -+ {AuOpt_UDBA_HNOTIFY, "fsnotify"}, -+#endif -+#endif -+ {-1, NULL} -+}; -+ -+static int noinline_for_stack udba_val(char *str) -+{ -+ substring_t args[MAX_OPT_ARGS]; -+ -+ return match_token(str, udbalevel, args); -+} -+ -+const char *au_optstr_udba(int udba) -+{ -+ return au_parser_pattern(udba, udbalevel); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static match_table_t au_wbr_create_policy = { -+ {AuWbrCreate_TDP, "tdp"}, -+ {AuWbrCreate_TDP, "top-down-parent"}, -+ {AuWbrCreate_RR, "rr"}, -+ {AuWbrCreate_RR, "round-robin"}, -+ {AuWbrCreate_MFS, "mfs"}, -+ {AuWbrCreate_MFS, "most-free-space"}, -+ {AuWbrCreate_MFSV, "mfs:%d"}, -+ {AuWbrCreate_MFSV, "most-free-space:%d"}, -+ -+ {AuWbrCreate_MFSRR, "mfsrr:%d"}, -+ {AuWbrCreate_MFSRRV, "mfsrr:%d:%d"}, -+ {AuWbrCreate_PMFS, "pmfs"}, -+ {AuWbrCreate_PMFSV, "pmfs:%d"}, -+ {AuWbrCreate_PMFSRR, "pmfsrr:%d"}, -+ {AuWbrCreate_PMFSRRV, "pmfsrr:%d:%d"}, -+ -+ {-1, NULL} -+}; -+ -+/* -+ * cf. linux/lib/parser.c and cmdline.c -+ * gave up calling memparse() since it uses simple_strtoull() instead of -+ * kstrto...(). -+ */ -+static int noinline_for_stack -+au_match_ull(substring_t *s, unsigned long long *result) -+{ -+ int err; -+ unsigned int len; -+ char a[32]; -+ -+ err = -ERANGE; -+ len = s->to - s->from; -+ if (len + 1 <= sizeof(a)) { -+ memcpy(a, s->from, len); -+ a[len] = '\0'; -+ err = kstrtoull(a, 0, result); -+ } -+ return err; -+} -+ -+static int au_wbr_mfs_wmark(substring_t *arg, char *str, -+ struct au_opt_wbr_create *create) -+{ -+ int err; -+ unsigned long long ull; -+ -+ err = 0; -+ if (!au_match_ull(arg, &ull)) -+ create->mfsrr_watermark = ull; -+ else { -+ pr_err("bad integer in %s\n", str); -+ err = -EINVAL; -+ } -+ -+ return err; -+} -+ -+static int au_wbr_mfs_sec(substring_t *arg, char *str, -+ struct au_opt_wbr_create *create) -+{ -+ int n, err; -+ -+ err = 0; -+ if (!match_int(arg, &n) && 0 <= n && n <= AUFS_MFS_MAX_SEC) -+ create->mfs_second = n; -+ else { -+ pr_err("bad integer in %s\n", str); -+ err = -EINVAL; -+ } -+ -+ return err; -+} -+ -+static int noinline_for_stack -+au_wbr_create_val(char *str, struct au_opt_wbr_create *create) -+{ -+ int err, e; -+ substring_t args[MAX_OPT_ARGS]; -+ -+ err = match_token(str, au_wbr_create_policy, args); -+ create->wbr_create = err; -+ switch (err) { -+ case AuWbrCreate_MFSRRV: -+ case AuWbrCreate_PMFSRRV: -+ e = au_wbr_mfs_wmark(&args[0], str, create); -+ if (!e) -+ e = au_wbr_mfs_sec(&args[1], str, create); -+ if (unlikely(e)) -+ err = e; -+ break; -+ case AuWbrCreate_MFSRR: -+ case AuWbrCreate_PMFSRR: -+ e = au_wbr_mfs_wmark(&args[0], str, create); -+ if (unlikely(e)) { -+ err = e; -+ break; -+ } -+ /*FALLTHROUGH*/ -+ case AuWbrCreate_MFS: -+ case AuWbrCreate_PMFS: -+ create->mfs_second = AUFS_MFS_DEF_SEC; -+ break; -+ case AuWbrCreate_MFSV: -+ case AuWbrCreate_PMFSV: -+ e = au_wbr_mfs_sec(&args[0], str, create); -+ if (unlikely(e)) -+ err = e; -+ break; -+ } -+ -+ return err; -+} -+ -+const char *au_optstr_wbr_create(int wbr_create) -+{ -+ return au_parser_pattern(wbr_create, au_wbr_create_policy); -+} -+ -+static match_table_t au_wbr_copyup_policy = { -+ {AuWbrCopyup_TDP, "tdp"}, -+ {AuWbrCopyup_TDP, "top-down-parent"}, -+ {AuWbrCopyup_BUP, "bup"}, -+ {AuWbrCopyup_BUP, "bottom-up-parent"}, -+ {AuWbrCopyup_BU, "bu"}, -+ {AuWbrCopyup_BU, "bottom-up"}, -+ {-1, NULL} -+}; -+ -+static int noinline_for_stack au_wbr_copyup_val(char *str) -+{ -+ substring_t args[MAX_OPT_ARGS]; -+ -+ return match_token(str, au_wbr_copyup_policy, args); -+} -+ -+const char *au_optstr_wbr_copyup(int wbr_copyup) -+{ -+ return au_parser_pattern(wbr_copyup, au_wbr_copyup_policy); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static const int lkup_dirflags = LOOKUP_FOLLOW | LOOKUP_DIRECTORY; -+ -+static void dump_opts(struct au_opts *opts) -+{ -+#ifdef CONFIG_AUFS_DEBUG -+ /* reduce stack space */ -+ union { -+ struct au_opt_add *add; -+ struct au_opt_del *del; -+ struct au_opt_mod *mod; -+ struct au_opt_xino *xino; -+ struct au_opt_xino_itrunc *xino_itrunc; -+ struct au_opt_wbr_create *create; -+ } u; -+ struct au_opt *opt; -+ -+ opt = opts->opt; -+ while (opt->type != Opt_tail) { -+ switch (opt->type) { -+ case Opt_add: -+ u.add = &opt->add; -+ AuDbg("add {b%d, %s, 0x%x, %p}\n", -+ u.add->bindex, u.add->pathname, u.add->perm, -+ u.add->path.dentry); -+ break; -+ case Opt_del: -+ case Opt_idel: -+ u.del = &opt->del; -+ AuDbg("del {%s, %p}\n", -+ u.del->pathname, u.del->h_path.dentry); -+ break; -+ case Opt_mod: -+ case Opt_imod: -+ u.mod = &opt->mod; -+ AuDbg("mod {%s, 0x%x, %p}\n", -+ u.mod->path, u.mod->perm, u.mod->h_root); -+ break; -+ case Opt_append: -+ u.add = &opt->add; -+ AuDbg("append {b%d, %s, 0x%x, %p}\n", -+ u.add->bindex, u.add->pathname, u.add->perm, -+ u.add->path.dentry); -+ break; -+ case Opt_prepend: -+ u.add = &opt->add; -+ AuDbg("prepend {b%d, %s, 0x%x, %p}\n", -+ u.add->bindex, u.add->pathname, u.add->perm, -+ u.add->path.dentry); -+ break; -+ case Opt_dirwh: -+ AuDbg("dirwh %d\n", opt->dirwh); -+ break; -+ case Opt_rdcache: -+ AuDbg("rdcache %d\n", opt->rdcache); -+ break; -+ case Opt_rdblk: -+ AuDbg("rdblk %u\n", opt->rdblk); -+ break; -+ case Opt_rdblk_def: -+ AuDbg("rdblk_def\n"); -+ break; -+ case Opt_rdhash: -+ AuDbg("rdhash %u\n", opt->rdhash); -+ break; -+ case Opt_rdhash_def: -+ AuDbg("rdhash_def\n"); -+ break; -+ case Opt_xino: -+ u.xino = &opt->xino; -+ AuDbg("xino {%s %pD}\n", u.xino->path, u.xino->file); -+ break; -+ case Opt_trunc_xino: -+ AuLabel(trunc_xino); -+ break; -+ case Opt_notrunc_xino: -+ AuLabel(notrunc_xino); -+ break; -+ case Opt_trunc_xino_path: -+ case Opt_itrunc_xino: -+ u.xino_itrunc = &opt->xino_itrunc; -+ AuDbg("trunc_xino %d\n", u.xino_itrunc->bindex); -+ break; -+ case Opt_noxino: -+ AuLabel(noxino); -+ break; -+ case Opt_trunc_xib: -+ AuLabel(trunc_xib); -+ break; -+ case Opt_notrunc_xib: -+ AuLabel(notrunc_xib); -+ break; -+ case Opt_shwh: -+ AuLabel(shwh); -+ break; -+ case Opt_noshwh: -+ AuLabel(noshwh); -+ break; -+ case Opt_dirperm1: -+ AuLabel(dirperm1); -+ break; -+ case Opt_nodirperm1: -+ AuLabel(nodirperm1); -+ break; -+ case Opt_plink: -+ AuLabel(plink); -+ break; -+ case Opt_noplink: -+ AuLabel(noplink); -+ break; -+ case Opt_list_plink: -+ AuLabel(list_plink); -+ break; -+ case Opt_udba: -+ AuDbg("udba %d, %s\n", -+ opt->udba, au_optstr_udba(opt->udba)); -+ break; -+ case Opt_dio: -+ AuLabel(dio); -+ break; -+ case Opt_nodio: -+ AuLabel(nodio); -+ break; -+ case Opt_diropq_a: -+ AuLabel(diropq_a); -+ break; -+ case Opt_diropq_w: -+ AuLabel(diropq_w); -+ break; -+ case Opt_warn_perm: -+ AuLabel(warn_perm); -+ break; -+ case Opt_nowarn_perm: -+ AuLabel(nowarn_perm); -+ break; -+ case Opt_refrof: -+ AuLabel(refrof); -+ break; -+ case Opt_norefrof: -+ AuLabel(norefrof); -+ break; -+ case Opt_verbose: -+ AuLabel(verbose); -+ break; -+ case Opt_noverbose: -+ AuLabel(noverbose); -+ break; -+ case Opt_sum: -+ AuLabel(sum); -+ break; -+ case Opt_nosum: -+ AuLabel(nosum); -+ break; -+ case Opt_wsum: -+ AuLabel(wsum); -+ break; -+ case Opt_wbr_create: -+ u.create = &opt->wbr_create; -+ AuDbg("create %d, %s\n", u.create->wbr_create, -+ au_optstr_wbr_create(u.create->wbr_create)); -+ switch (u.create->wbr_create) { -+ case AuWbrCreate_MFSV: -+ case AuWbrCreate_PMFSV: -+ AuDbg("%d sec\n", u.create->mfs_second); -+ break; -+ case AuWbrCreate_MFSRR: -+ AuDbg("%llu watermark\n", -+ u.create->mfsrr_watermark); -+ break; -+ case AuWbrCreate_MFSRRV: -+ case AuWbrCreate_PMFSRRV: -+ AuDbg("%llu watermark, %d sec\n", -+ u.create->mfsrr_watermark, -+ u.create->mfs_second); -+ break; -+ } -+ break; -+ case Opt_wbr_copyup: -+ AuDbg("copyup %d, %s\n", opt->wbr_copyup, -+ au_optstr_wbr_copyup(opt->wbr_copyup)); -+ break; -+ case Opt_fhsm_sec: -+ AuDbg("fhsm_sec %u\n", opt->fhsm_second); -+ break; -+ case Opt_acl: -+ AuLabel(acl); -+ break; -+ case Opt_noacl: -+ AuLabel(noacl); -+ break; -+ default: -+ BUG(); -+ } -+ opt++; -+ } -+#endif -+} -+ -+void au_opts_free(struct au_opts *opts) -+{ -+ struct au_opt *opt; -+ -+ opt = opts->opt; -+ while (opt->type != Opt_tail) { -+ switch (opt->type) { -+ case Opt_add: -+ case Opt_append: -+ case Opt_prepend: -+ path_put(&opt->add.path); -+ break; -+ case Opt_del: -+ case Opt_idel: -+ path_put(&opt->del.h_path); -+ break; -+ case Opt_mod: -+ case Opt_imod: -+ dput(opt->mod.h_root); -+ break; -+ case Opt_xino: -+ fput(opt->xino.file); -+ break; -+ } -+ opt++; -+ } -+} -+ -+static int opt_add(struct au_opt *opt, char *opt_str, unsigned long sb_flags, -+ aufs_bindex_t bindex) -+{ -+ int err; -+ struct au_opt_add *add = &opt->add; -+ char *p; -+ -+ add->bindex = bindex; -+ add->perm = AuBrPerm_RO; -+ add->pathname = opt_str; -+ p = strchr(opt_str, '='); -+ if (p) { -+ *p++ = 0; -+ if (*p) -+ add->perm = br_perm_val(p); -+ } -+ -+ err = vfsub_kern_path(add->pathname, lkup_dirflags, &add->path); -+ if (!err) { -+ if (!p) { -+ add->perm = AuBrPerm_RO; -+ if (au_test_fs_rr(add->path.dentry->d_sb)) -+ add->perm = AuBrPerm_RR; -+ else if (!bindex && !(sb_flags & MS_RDONLY)) -+ add->perm = AuBrPerm_RW; -+ } -+ opt->type = Opt_add; -+ goto out; -+ } -+ pr_err("lookup failed %s (%d)\n", add->pathname, err); -+ err = -EINVAL; -+ -+out: -+ return err; -+} -+ -+static int au_opts_parse_del(struct au_opt_del *del, substring_t args[]) -+{ -+ int err; -+ -+ del->pathname = args[0].from; -+ AuDbg("del path %s\n", del->pathname); -+ -+ err = vfsub_kern_path(del->pathname, lkup_dirflags, &del->h_path); -+ if (unlikely(err)) -+ pr_err("lookup failed %s (%d)\n", del->pathname, err); -+ -+ return err; -+} -+ -+#if 0 /* reserved for future use */ -+static int au_opts_parse_idel(struct super_block *sb, aufs_bindex_t bindex, -+ struct au_opt_del *del, substring_t args[]) -+{ -+ int err; -+ struct dentry *root; -+ -+ err = -EINVAL; -+ root = sb->s_root; -+ aufs_read_lock(root, AuLock_FLUSH); -+ if (bindex < 0 || au_sbend(sb) < bindex) { -+ pr_err("out of bounds, %d\n", bindex); -+ goto out; -+ } -+ -+ err = 0; -+ del->h_path.dentry = dget(au_h_dptr(root, bindex)); -+ del->h_path.mnt = mntget(au_sbr_mnt(sb, bindex)); -+ -+out: -+ aufs_read_unlock(root, !AuLock_IR); -+ return err; -+} -+#endif -+ -+static int noinline_for_stack -+au_opts_parse_mod(struct au_opt_mod *mod, substring_t args[]) -+{ -+ int err; -+ struct path path; -+ char *p; -+ -+ err = -EINVAL; -+ mod->path = args[0].from; -+ p = strchr(mod->path, '='); -+ if (unlikely(!p)) { -+ pr_err("no permssion %s\n", args[0].from); -+ goto out; -+ } -+ -+ *p++ = 0; -+ err = vfsub_kern_path(mod->path, lkup_dirflags, &path); -+ if (unlikely(err)) { -+ pr_err("lookup failed %s (%d)\n", mod->path, err); -+ goto out; -+ } -+ -+ mod->perm = br_perm_val(p); -+ AuDbg("mod path %s, perm 0x%x, %s\n", mod->path, mod->perm, p); -+ mod->h_root = dget(path.dentry); -+ path_put(&path); -+ -+out: -+ return err; -+} -+ -+#if 0 /* reserved for future use */ -+static int au_opts_parse_imod(struct super_block *sb, aufs_bindex_t bindex, -+ struct au_opt_mod *mod, substring_t args[]) -+{ -+ int err; -+ struct dentry *root; -+ -+ err = -EINVAL; -+ root = sb->s_root; -+ aufs_read_lock(root, AuLock_FLUSH); -+ if (bindex < 0 || au_sbend(sb) < bindex) { -+ pr_err("out of bounds, %d\n", bindex); -+ goto out; -+ } -+ -+ err = 0; -+ mod->perm = br_perm_val(args[1].from); -+ AuDbg("mod path %s, perm 0x%x, %s\n", -+ mod->path, mod->perm, args[1].from); -+ mod->h_root = dget(au_h_dptr(root, bindex)); -+ -+out: -+ aufs_read_unlock(root, !AuLock_IR); -+ return err; -+} -+#endif -+ -+static int au_opts_parse_xino(struct super_block *sb, struct au_opt_xino *xino, -+ substring_t args[]) -+{ -+ int err; -+ struct file *file; -+ -+ file = au_xino_create(sb, args[0].from, /*silent*/0); -+ err = PTR_ERR(file); -+ if (IS_ERR(file)) -+ goto out; -+ -+ err = -EINVAL; -+ if (unlikely(file->f_dentry->d_sb == sb)) { -+ fput(file); -+ pr_err("%s must be outside\n", args[0].from); -+ goto out; -+ } -+ -+ err = 0; -+ xino->file = file; -+ xino->path = args[0].from; -+ -+out: -+ return err; -+} -+ -+static int noinline_for_stack -+au_opts_parse_xino_itrunc_path(struct super_block *sb, -+ struct au_opt_xino_itrunc *xino_itrunc, -+ substring_t args[]) -+{ -+ int err; -+ aufs_bindex_t bend, bindex; -+ struct path path; -+ struct dentry *root; -+ -+ err = vfsub_kern_path(args[0].from, lkup_dirflags, &path); -+ if (unlikely(err)) { -+ pr_err("lookup failed %s (%d)\n", args[0].from, err); -+ goto out; -+ } -+ -+ xino_itrunc->bindex = -1; -+ root = sb->s_root; -+ aufs_read_lock(root, AuLock_FLUSH); -+ bend = au_sbend(sb); -+ for (bindex = 0; bindex <= bend; bindex++) { -+ if (au_h_dptr(root, bindex) == path.dentry) { -+ xino_itrunc->bindex = bindex; -+ break; -+ } -+ } -+ aufs_read_unlock(root, !AuLock_IR); -+ path_put(&path); -+ -+ if (unlikely(xino_itrunc->bindex < 0)) { -+ pr_err("no such branch %s\n", args[0].from); -+ err = -EINVAL; -+ } -+ -+out: -+ return err; -+} -+ -+/* called without aufs lock */ -+int au_opts_parse(struct super_block *sb, char *str, struct au_opts *opts) -+{ -+ int err, n, token; -+ aufs_bindex_t bindex; -+ unsigned char skipped; -+ struct dentry *root; -+ struct au_opt *opt, *opt_tail; -+ char *opt_str; -+ /* reduce the stack space */ -+ union { -+ struct au_opt_xino_itrunc *xino_itrunc; -+ struct au_opt_wbr_create *create; -+ } u; -+ struct { -+ substring_t args[MAX_OPT_ARGS]; -+ } *a; -+ -+ err = -ENOMEM; -+ a = kmalloc(sizeof(*a), GFP_NOFS); -+ if (unlikely(!a)) -+ goto out; -+ -+ root = sb->s_root; -+ err = 0; -+ bindex = 0; -+ opt = opts->opt; -+ opt_tail = opt + opts->max_opt - 1; -+ opt->type = Opt_tail; -+ while (!err && (opt_str = strsep(&str, ",")) && *opt_str) { -+ err = -EINVAL; -+ skipped = 0; -+ token = match_token(opt_str, options, a->args); -+ switch (token) { -+ case Opt_br: -+ err = 0; -+ while (!err && (opt_str = strsep(&a->args[0].from, ":")) -+ && *opt_str) { -+ err = opt_add(opt, opt_str, opts->sb_flags, -+ bindex++); -+ if (unlikely(!err && ++opt > opt_tail)) { -+ err = -E2BIG; -+ break; -+ } -+ opt->type = Opt_tail; -+ skipped = 1; -+ } -+ break; -+ case Opt_add: -+ if (unlikely(match_int(&a->args[0], &n))) { -+ pr_err("bad integer in %s\n", opt_str); -+ break; -+ } -+ bindex = n; -+ err = opt_add(opt, a->args[1].from, opts->sb_flags, -+ bindex); -+ if (!err) -+ opt->type = token; -+ break; -+ case Opt_append: -+ err = opt_add(opt, a->args[0].from, opts->sb_flags, -+ /*dummy bindex*/1); -+ if (!err) -+ opt->type = token; -+ break; -+ case Opt_prepend: -+ err = opt_add(opt, a->args[0].from, opts->sb_flags, -+ /*bindex*/0); -+ if (!err) -+ opt->type = token; -+ break; -+ case Opt_del: -+ err = au_opts_parse_del(&opt->del, a->args); -+ if (!err) -+ opt->type = token; -+ break; -+#if 0 /* reserved for future use */ -+ case Opt_idel: -+ del->pathname = "(indexed)"; -+ if (unlikely(match_int(&args[0], &n))) { -+ pr_err("bad integer in %s\n", opt_str); -+ break; -+ } -+ err = au_opts_parse_idel(sb, n, &opt->del, a->args); -+ if (!err) -+ opt->type = token; -+ break; -+#endif -+ case Opt_mod: -+ err = au_opts_parse_mod(&opt->mod, a->args); -+ if (!err) -+ opt->type = token; -+ break; -+#ifdef IMOD /* reserved for future use */ -+ case Opt_imod: -+ u.mod->path = "(indexed)"; -+ if (unlikely(match_int(&a->args[0], &n))) { -+ pr_err("bad integer in %s\n", opt_str); -+ break; -+ } -+ err = au_opts_parse_imod(sb, n, &opt->mod, a->args); -+ if (!err) -+ opt->type = token; -+ break; -+#endif -+ case Opt_xino: -+ err = au_opts_parse_xino(sb, &opt->xino, a->args); -+ if (!err) -+ opt->type = token; -+ break; -+ -+ case Opt_trunc_xino_path: -+ err = au_opts_parse_xino_itrunc_path -+ (sb, &opt->xino_itrunc, a->args); -+ if (!err) -+ opt->type = token; -+ break; -+ -+ case Opt_itrunc_xino: -+ u.xino_itrunc = &opt->xino_itrunc; -+ if (unlikely(match_int(&a->args[0], &n))) { -+ pr_err("bad integer in %s\n", opt_str); -+ break; -+ } -+ u.xino_itrunc->bindex = n; -+ aufs_read_lock(root, AuLock_FLUSH); -+ if (n < 0 || au_sbend(sb) < n) { -+ pr_err("out of bounds, %d\n", n); -+ aufs_read_unlock(root, !AuLock_IR); -+ break; -+ } -+ aufs_read_unlock(root, !AuLock_IR); -+ err = 0; -+ opt->type = token; -+ break; -+ -+ case Opt_dirwh: -+ if (unlikely(match_int(&a->args[0], &opt->dirwh))) -+ break; -+ err = 0; -+ opt->type = token; -+ break; -+ -+ case Opt_rdcache: -+ if (unlikely(match_int(&a->args[0], &n))) { -+ pr_err("bad integer in %s\n", opt_str); -+ break; -+ } -+ if (unlikely(n > AUFS_RDCACHE_MAX)) { -+ pr_err("rdcache must be smaller than %d\n", -+ AUFS_RDCACHE_MAX); -+ break; -+ } -+ opt->rdcache = n; -+ err = 0; -+ opt->type = token; -+ break; -+ case Opt_rdblk: -+ if (unlikely(match_int(&a->args[0], &n) -+ || n < 0 -+ || n > KMALLOC_MAX_SIZE)) { -+ pr_err("bad integer in %s\n", opt_str); -+ break; -+ } -+ if (unlikely(n && n < NAME_MAX)) { -+ pr_err("rdblk must be larger than %d\n", -+ NAME_MAX); -+ break; -+ } -+ opt->rdblk = n; -+ err = 0; -+ opt->type = token; -+ break; -+ case Opt_rdhash: -+ if (unlikely(match_int(&a->args[0], &n) -+ || n < 0 -+ || n * sizeof(struct hlist_head) -+ > KMALLOC_MAX_SIZE)) { -+ pr_err("bad integer in %s\n", opt_str); -+ break; -+ } -+ opt->rdhash = n; -+ err = 0; -+ opt->type = token; -+ break; -+ -+ case Opt_trunc_xino: -+ case Opt_notrunc_xino: -+ case Opt_noxino: -+ case Opt_trunc_xib: -+ case Opt_notrunc_xib: -+ case Opt_shwh: -+ case Opt_noshwh: -+ case Opt_dirperm1: -+ case Opt_nodirperm1: -+ case Opt_plink: -+ case Opt_noplink: -+ case Opt_list_plink: -+ case Opt_dio: -+ case Opt_nodio: -+ case Opt_diropq_a: -+ case Opt_diropq_w: -+ case Opt_warn_perm: -+ case Opt_nowarn_perm: -+ case Opt_refrof: -+ case Opt_norefrof: -+ case Opt_verbose: -+ case Opt_noverbose: -+ case Opt_sum: -+ case Opt_nosum: -+ case Opt_wsum: -+ case Opt_rdblk_def: -+ case Opt_rdhash_def: -+ case Opt_acl: -+ case Opt_noacl: -+ err = 0; -+ opt->type = token; -+ break; -+ -+ case Opt_udba: -+ opt->udba = udba_val(a->args[0].from); -+ if (opt->udba >= 0) { -+ err = 0; -+ opt->type = token; -+ } else -+ pr_err("wrong value, %s\n", opt_str); -+ break; -+ -+ case Opt_wbr_create: -+ u.create = &opt->wbr_create; -+ u.create->wbr_create -+ = au_wbr_create_val(a->args[0].from, u.create); -+ if (u.create->wbr_create >= 0) { -+ err = 0; -+ opt->type = token; -+ } else -+ pr_err("wrong value, %s\n", opt_str); -+ break; -+ case Opt_wbr_copyup: -+ opt->wbr_copyup = au_wbr_copyup_val(a->args[0].from); -+ if (opt->wbr_copyup >= 0) { -+ err = 0; -+ opt->type = token; -+ } else -+ pr_err("wrong value, %s\n", opt_str); -+ break; -+ -+ case Opt_fhsm_sec: -+ if (unlikely(match_int(&a->args[0], &n) -+ || n < 0)) { -+ pr_err("bad integer in %s\n", opt_str); -+ break; -+ } -+ if (sysaufs_brs) { -+ opt->fhsm_second = n; -+ opt->type = token; -+ } else -+ pr_warn("ignored %s\n", opt_str); -+ err = 0; -+ break; -+ -+ case Opt_ignore: -+ pr_warn("ignored %s\n", opt_str); -+ /*FALLTHROUGH*/ -+ case Opt_ignore_silent: -+ skipped = 1; -+ err = 0; -+ break; -+ case Opt_err: -+ pr_err("unknown option %s\n", opt_str); -+ break; -+ } -+ -+ if (!err && !skipped) { -+ if (unlikely(++opt > opt_tail)) { -+ err = -E2BIG; -+ opt--; -+ opt->type = Opt_tail; -+ break; -+ } -+ opt->type = Opt_tail; -+ } -+ } -+ -+ kfree(a); -+ dump_opts(opts); -+ if (unlikely(err)) -+ au_opts_free(opts); -+ -+out: -+ return err; -+} -+ -+static int au_opt_wbr_create(struct super_block *sb, -+ struct au_opt_wbr_create *create) -+{ -+ int err; -+ struct au_sbinfo *sbinfo; -+ -+ SiMustWriteLock(sb); -+ -+ err = 1; /* handled */ -+ sbinfo = au_sbi(sb); -+ if (sbinfo->si_wbr_create_ops->fin) { -+ err = sbinfo->si_wbr_create_ops->fin(sb); -+ if (!err) -+ err = 1; -+ } -+ -+ sbinfo->si_wbr_create = create->wbr_create; -+ sbinfo->si_wbr_create_ops = au_wbr_create_ops + create->wbr_create; -+ switch (create->wbr_create) { -+ case AuWbrCreate_MFSRRV: -+ case AuWbrCreate_MFSRR: -+ case AuWbrCreate_PMFSRR: -+ case AuWbrCreate_PMFSRRV: -+ sbinfo->si_wbr_mfs.mfsrr_watermark = create->mfsrr_watermark; -+ /*FALLTHROUGH*/ -+ case AuWbrCreate_MFS: -+ case AuWbrCreate_MFSV: -+ case AuWbrCreate_PMFS: -+ case AuWbrCreate_PMFSV: -+ sbinfo->si_wbr_mfs.mfs_expire -+ = msecs_to_jiffies(create->mfs_second * MSEC_PER_SEC); -+ break; -+ } -+ -+ if (sbinfo->si_wbr_create_ops->init) -+ sbinfo->si_wbr_create_ops->init(sb); /* ignore */ -+ -+ return err; -+} -+ -+/* -+ * returns, -+ * plus: processed without an error -+ * zero: unprocessed -+ */ -+static int au_opt_simple(struct super_block *sb, struct au_opt *opt, -+ struct au_opts *opts) -+{ -+ int err; -+ struct au_sbinfo *sbinfo; -+ -+ SiMustWriteLock(sb); -+ -+ err = 1; /* handled */ -+ sbinfo = au_sbi(sb); -+ switch (opt->type) { -+ case Opt_udba: -+ sbinfo->si_mntflags &= ~AuOptMask_UDBA; -+ sbinfo->si_mntflags |= opt->udba; -+ opts->given_udba |= opt->udba; -+ break; -+ -+ case Opt_plink: -+ au_opt_set(sbinfo->si_mntflags, PLINK); -+ break; -+ case Opt_noplink: -+ if (au_opt_test(sbinfo->si_mntflags, PLINK)) -+ au_plink_put(sb, /*verbose*/1); -+ au_opt_clr(sbinfo->si_mntflags, PLINK); -+ break; -+ case Opt_list_plink: -+ if (au_opt_test(sbinfo->si_mntflags, PLINK)) -+ au_plink_list(sb); -+ break; -+ -+ case Opt_dio: -+ au_opt_set(sbinfo->si_mntflags, DIO); -+ au_fset_opts(opts->flags, REFRESH_DYAOP); -+ break; -+ case Opt_nodio: -+ au_opt_clr(sbinfo->si_mntflags, DIO); -+ au_fset_opts(opts->flags, REFRESH_DYAOP); -+ break; -+ -+ case Opt_fhsm_sec: -+ au_fhsm_set(sbinfo, opt->fhsm_second); -+ break; -+ -+ case Opt_diropq_a: -+ au_opt_set(sbinfo->si_mntflags, ALWAYS_DIROPQ); -+ break; -+ case Opt_diropq_w: -+ au_opt_clr(sbinfo->si_mntflags, ALWAYS_DIROPQ); -+ break; -+ -+ case Opt_warn_perm: -+ au_opt_set(sbinfo->si_mntflags, WARN_PERM); -+ break; -+ case Opt_nowarn_perm: -+ au_opt_clr(sbinfo->si_mntflags, WARN_PERM); -+ break; -+ -+ case Opt_refrof: -+ au_opt_set(sbinfo->si_mntflags, REFROF); -+ break; -+ case Opt_norefrof: -+ au_opt_clr(sbinfo->si_mntflags, REFROF); -+ break; -+ -+ case Opt_verbose: -+ au_opt_set(sbinfo->si_mntflags, VERBOSE); -+ break; -+ case Opt_noverbose: -+ au_opt_clr(sbinfo->si_mntflags, VERBOSE); -+ break; -+ -+ case Opt_sum: -+ au_opt_set(sbinfo->si_mntflags, SUM); -+ break; -+ case Opt_wsum: -+ au_opt_clr(sbinfo->si_mntflags, SUM); -+ au_opt_set(sbinfo->si_mntflags, SUM_W); -+ case Opt_nosum: -+ au_opt_clr(sbinfo->si_mntflags, SUM); -+ au_opt_clr(sbinfo->si_mntflags, SUM_W); -+ break; -+ -+ case Opt_wbr_create: -+ err = au_opt_wbr_create(sb, &opt->wbr_create); -+ break; -+ case Opt_wbr_copyup: -+ sbinfo->si_wbr_copyup = opt->wbr_copyup; -+ sbinfo->si_wbr_copyup_ops = au_wbr_copyup_ops + opt->wbr_copyup; -+ break; -+ -+ case Opt_dirwh: -+ sbinfo->si_dirwh = opt->dirwh; -+ break; -+ -+ case Opt_rdcache: -+ sbinfo->si_rdcache -+ = msecs_to_jiffies(opt->rdcache * MSEC_PER_SEC); -+ break; -+ case Opt_rdblk: -+ sbinfo->si_rdblk = opt->rdblk; -+ break; -+ case Opt_rdblk_def: -+ sbinfo->si_rdblk = AUFS_RDBLK_DEF; -+ break; -+ case Opt_rdhash: -+ sbinfo->si_rdhash = opt->rdhash; -+ break; -+ case Opt_rdhash_def: -+ sbinfo->si_rdhash = AUFS_RDHASH_DEF; -+ break; -+ -+ case Opt_shwh: -+ au_opt_set(sbinfo->si_mntflags, SHWH); -+ break; -+ case Opt_noshwh: -+ au_opt_clr(sbinfo->si_mntflags, SHWH); -+ break; -+ -+ case Opt_dirperm1: -+ au_opt_set(sbinfo->si_mntflags, DIRPERM1); -+ break; -+ case Opt_nodirperm1: -+ au_opt_clr(sbinfo->si_mntflags, DIRPERM1); -+ break; -+ -+ case Opt_trunc_xino: -+ au_opt_set(sbinfo->si_mntflags, TRUNC_XINO); -+ break; -+ case Opt_notrunc_xino: -+ au_opt_clr(sbinfo->si_mntflags, TRUNC_XINO); -+ break; -+ -+ case Opt_trunc_xino_path: -+ case Opt_itrunc_xino: -+ err = au_xino_trunc(sb, opt->xino_itrunc.bindex); -+ if (!err) -+ err = 1; -+ break; -+ -+ case Opt_trunc_xib: -+ au_fset_opts(opts->flags, TRUNC_XIB); -+ break; -+ case Opt_notrunc_xib: -+ au_fclr_opts(opts->flags, TRUNC_XIB); -+ break; -+ -+ case Opt_acl: -+ sb->s_flags |= MS_POSIXACL; -+ break; -+ case Opt_noacl: -+ sb->s_flags &= ~MS_POSIXACL; -+ break; -+ -+ default: -+ err = 0; -+ break; -+ } -+ -+ return err; -+} -+ -+/* -+ * returns tri-state. -+ * plus: processed without an error -+ * zero: unprocessed -+ * minus: error -+ */ -+static int au_opt_br(struct super_block *sb, struct au_opt *opt, -+ struct au_opts *opts) -+{ -+ int err, do_refresh; -+ -+ err = 0; -+ switch (opt->type) { -+ case Opt_append: -+ opt->add.bindex = au_sbend(sb) + 1; -+ if (opt->add.bindex < 0) -+ opt->add.bindex = 0; -+ goto add; -+ case Opt_prepend: -+ opt->add.bindex = 0; -+ add: /* indented label */ -+ case Opt_add: -+ err = au_br_add(sb, &opt->add, -+ au_ftest_opts(opts->flags, REMOUNT)); -+ if (!err) { -+ err = 1; -+ au_fset_opts(opts->flags, REFRESH); -+ } -+ break; -+ -+ case Opt_del: -+ case Opt_idel: -+ err = au_br_del(sb, &opt->del, -+ au_ftest_opts(opts->flags, REMOUNT)); -+ if (!err) { -+ err = 1; -+ au_fset_opts(opts->flags, TRUNC_XIB); -+ au_fset_opts(opts->flags, REFRESH); -+ } -+ break; -+ -+ case Opt_mod: -+ case Opt_imod: -+ err = au_br_mod(sb, &opt->mod, -+ au_ftest_opts(opts->flags, REMOUNT), -+ &do_refresh); -+ if (!err) { -+ err = 1; -+ if (do_refresh) -+ au_fset_opts(opts->flags, REFRESH); -+ } -+ break; -+ } -+ -+ return err; -+} -+ -+static int au_opt_xino(struct super_block *sb, struct au_opt *opt, -+ struct au_opt_xino **opt_xino, -+ struct au_opts *opts) -+{ -+ int err; -+ aufs_bindex_t bend, bindex; -+ struct dentry *root, *parent, *h_root; -+ -+ err = 0; -+ switch (opt->type) { -+ case Opt_xino: -+ err = au_xino_set(sb, &opt->xino, -+ !!au_ftest_opts(opts->flags, REMOUNT)); -+ if (unlikely(err)) -+ break; -+ -+ *opt_xino = &opt->xino; -+ au_xino_brid_set(sb, -1); -+ -+ /* safe d_parent access */ -+ parent = opt->xino.file->f_dentry->d_parent; -+ root = sb->s_root; -+ bend = au_sbend(sb); -+ for (bindex = 0; bindex <= bend; bindex++) { -+ h_root = au_h_dptr(root, bindex); -+ if (h_root == parent) { -+ au_xino_brid_set(sb, au_sbr_id(sb, bindex)); -+ break; -+ } -+ } -+ break; -+ -+ case Opt_noxino: -+ au_xino_clr(sb); -+ au_xino_brid_set(sb, -1); -+ *opt_xino = (void *)-1; -+ break; -+ } -+ -+ return err; -+} -+ -+int au_opts_verify(struct super_block *sb, unsigned long sb_flags, -+ unsigned int pending) -+{ -+ int err, fhsm; -+ aufs_bindex_t bindex, bend; -+ unsigned char do_plink, skip, do_free, can_no_dreval; -+ struct au_branch *br; -+ struct au_wbr *wbr; -+ struct dentry *root, *dentry; -+ struct inode *dir, *h_dir; -+ struct au_sbinfo *sbinfo; -+ struct au_hinode *hdir; -+ -+ SiMustAnyLock(sb); -+ -+ sbinfo = au_sbi(sb); -+ AuDebugOn(!(sbinfo->si_mntflags & AuOptMask_UDBA)); -+ -+ if (!(sb_flags & MS_RDONLY)) { -+ if (unlikely(!au_br_writable(au_sbr_perm(sb, 0)))) -+ pr_warn("first branch should be rw\n"); -+ if (unlikely(au_opt_test(sbinfo->si_mntflags, SHWH))) -+ pr_warn_once("shwh should be used with ro\n"); -+ } -+ -+ if (au_opt_test((sbinfo->si_mntflags | pending), UDBA_HNOTIFY) -+ && !au_opt_test(sbinfo->si_mntflags, XINO)) -+ pr_warn_once("udba=*notify requires xino\n"); -+ -+ if (au_opt_test(sbinfo->si_mntflags, DIRPERM1)) -+ pr_warn_once("dirperm1 breaks the protection" -+ " by the permission bits on the lower branch\n"); -+ -+ err = 0; -+ fhsm = 0; -+ root = sb->s_root; -+ dir = root->d_inode; -+ do_plink = !!au_opt_test(sbinfo->si_mntflags, PLINK); -+ can_no_dreval = !!au_opt_test((sbinfo->si_mntflags | pending), -+ UDBA_NONE); -+ bend = au_sbend(sb); -+ for (bindex = 0; !err && bindex <= bend; bindex++) { -+ skip = 0; -+ h_dir = au_h_iptr(dir, bindex); -+ br = au_sbr(sb, bindex); -+ -+ if ((br->br_perm & AuBrAttr_ICEX) -+ && !h_dir->i_op->listxattr) -+ br->br_perm &= ~AuBrAttr_ICEX; -+#if 0 -+ if ((br->br_perm & AuBrAttr_ICEX_SEC) -+ && (au_br_sb(br)->s_flags & MS_NOSEC)) -+ br->br_perm &= ~AuBrAttr_ICEX_SEC; -+#endif -+ -+ do_free = 0; -+ wbr = br->br_wbr; -+ if (wbr) -+ wbr_wh_read_lock(wbr); -+ -+ if (!au_br_writable(br->br_perm)) { -+ do_free = !!wbr; -+ skip = (!wbr -+ || (!wbr->wbr_whbase -+ && !wbr->wbr_plink -+ && !wbr->wbr_orph)); -+ } else if (!au_br_wh_linkable(br->br_perm)) { -+ /* skip = (!br->br_whbase && !br->br_orph); */ -+ skip = (!wbr || !wbr->wbr_whbase); -+ if (skip && wbr) { -+ if (do_plink) -+ skip = !!wbr->wbr_plink; -+ else -+ skip = !wbr->wbr_plink; -+ } -+ } else { -+ /* skip = (br->br_whbase && br->br_ohph); */ -+ skip = (wbr && wbr->wbr_whbase); -+ if (skip) { -+ if (do_plink) -+ skip = !!wbr->wbr_plink; -+ else -+ skip = !wbr->wbr_plink; -+ } -+ } -+ if (wbr) -+ wbr_wh_read_unlock(wbr); -+ -+ if (can_no_dreval) { -+ dentry = br->br_path.dentry; -+ spin_lock(&dentry->d_lock); -+ if (dentry->d_flags & -+ (DCACHE_OP_REVALIDATE | DCACHE_OP_WEAK_REVALIDATE)) -+ can_no_dreval = 0; -+ spin_unlock(&dentry->d_lock); -+ } -+ -+ if (au_br_fhsm(br->br_perm)) { -+ fhsm++; -+ AuDebugOn(!br->br_fhsm); -+ } -+ -+ if (skip) -+ continue; -+ -+ hdir = au_hi(dir, bindex); -+ au_hn_imtx_lock_nested(hdir, AuLsc_I_PARENT); -+ if (wbr) -+ wbr_wh_write_lock(wbr); -+ err = au_wh_init(br, sb); -+ if (wbr) -+ wbr_wh_write_unlock(wbr); -+ au_hn_imtx_unlock(hdir); -+ -+ if (!err && do_free) { -+ kfree(wbr); -+ br->br_wbr = NULL; -+ } -+ } -+ -+ if (can_no_dreval) -+ au_fset_si(sbinfo, NO_DREVAL); -+ else -+ au_fclr_si(sbinfo, NO_DREVAL); -+ -+ if (fhsm >= 2) { -+ au_fset_si(sbinfo, FHSM); -+ for (bindex = bend; bindex >= 0; bindex--) { -+ br = au_sbr(sb, bindex); -+ if (au_br_fhsm(br->br_perm)) { -+ au_fhsm_set_bottom(sb, bindex); -+ break; -+ } -+ } -+ } else { -+ au_fclr_si(sbinfo, FHSM); -+ au_fhsm_set_bottom(sb, -1); -+ } -+ -+ return err; -+} -+ -+int au_opts_mount(struct super_block *sb, struct au_opts *opts) -+{ -+ int err; -+ unsigned int tmp; -+ aufs_bindex_t bindex, bend; -+ struct au_opt *opt; -+ struct au_opt_xino *opt_xino, xino; -+ struct au_sbinfo *sbinfo; -+ struct au_branch *br; -+ struct inode *dir; -+ -+ SiMustWriteLock(sb); -+ -+ err = 0; -+ opt_xino = NULL; -+ opt = opts->opt; -+ while (err >= 0 && opt->type != Opt_tail) -+ err = au_opt_simple(sb, opt++, opts); -+ if (err > 0) -+ err = 0; -+ else if (unlikely(err < 0)) -+ goto out; -+ -+ /* disable xino and udba temporary */ -+ sbinfo = au_sbi(sb); -+ tmp = sbinfo->si_mntflags; -+ au_opt_clr(sbinfo->si_mntflags, XINO); -+ au_opt_set_udba(sbinfo->si_mntflags, UDBA_REVAL); -+ -+ opt = opts->opt; -+ while (err >= 0 && opt->type != Opt_tail) -+ err = au_opt_br(sb, opt++, opts); -+ if (err > 0) -+ err = 0; -+ else if (unlikely(err < 0)) -+ goto out; -+ -+ bend = au_sbend(sb); -+ if (unlikely(bend < 0)) { -+ err = -EINVAL; -+ pr_err("no branches\n"); -+ goto out; -+ } -+ -+ if (au_opt_test(tmp, XINO)) -+ au_opt_set(sbinfo->si_mntflags, XINO); -+ opt = opts->opt; -+ while (!err && opt->type != Opt_tail) -+ err = au_opt_xino(sb, opt++, &opt_xino, opts); -+ if (unlikely(err)) -+ goto out; -+ -+ err = au_opts_verify(sb, sb->s_flags, tmp); -+ if (unlikely(err)) -+ goto out; -+ -+ /* restore xino */ -+ if (au_opt_test(tmp, XINO) && !opt_xino) { -+ xino.file = au_xino_def(sb); -+ err = PTR_ERR(xino.file); -+ if (IS_ERR(xino.file)) -+ goto out; -+ -+ err = au_xino_set(sb, &xino, /*remount*/0); -+ fput(xino.file); -+ if (unlikely(err)) -+ goto out; -+ } -+ -+ /* restore udba */ -+ tmp &= AuOptMask_UDBA; -+ sbinfo->si_mntflags &= ~AuOptMask_UDBA; -+ sbinfo->si_mntflags |= tmp; -+ bend = au_sbend(sb); -+ for (bindex = 0; bindex <= bend; bindex++) { -+ br = au_sbr(sb, bindex); -+ err = au_hnotify_reset_br(tmp, br, br->br_perm); -+ if (unlikely(err)) -+ AuIOErr("hnotify failed on br %d, %d, ignored\n", -+ bindex, err); -+ /* go on even if err */ -+ } -+ if (au_opt_test(tmp, UDBA_HNOTIFY)) { -+ dir = sb->s_root->d_inode; -+ au_hn_reset(dir, au_hi_flags(dir, /*isdir*/1) & ~AuHi_XINO); -+ } -+ -+out: -+ return err; -+} -+ -+int au_opts_remount(struct super_block *sb, struct au_opts *opts) -+{ -+ int err, rerr; -+ unsigned char no_dreval; -+ struct inode *dir; -+ struct au_opt_xino *opt_xino; -+ struct au_opt *opt; -+ struct au_sbinfo *sbinfo; -+ -+ SiMustWriteLock(sb); -+ -+ err = 0; -+ dir = sb->s_root->d_inode; -+ sbinfo = au_sbi(sb); -+ opt_xino = NULL; -+ opt = opts->opt; -+ while (err >= 0 && opt->type != Opt_tail) { -+ err = au_opt_simple(sb, opt, opts); -+ if (!err) -+ err = au_opt_br(sb, opt, opts); -+ if (!err) -+ err = au_opt_xino(sb, opt, &opt_xino, opts); -+ opt++; -+ } -+ if (err > 0) -+ err = 0; -+ AuTraceErr(err); -+ /* go on even err */ -+ -+ no_dreval = !!au_ftest_si(sbinfo, NO_DREVAL); -+ rerr = au_opts_verify(sb, opts->sb_flags, /*pending*/0); -+ if (unlikely(rerr && !err)) -+ err = rerr; -+ -+ if (no_dreval != !!au_ftest_si(sbinfo, NO_DREVAL)) -+ au_fset_opts(opts->flags, REFRESH_IDOP); -+ -+ if (au_ftest_opts(opts->flags, TRUNC_XIB)) { -+ rerr = au_xib_trunc(sb); -+ if (unlikely(rerr && !err)) -+ err = rerr; -+ } -+ -+ /* will be handled by the caller */ -+ if (!au_ftest_opts(opts->flags, REFRESH) -+ && (opts->given_udba -+ || au_opt_test(sbinfo->si_mntflags, XINO) -+ || au_ftest_opts(opts->flags, REFRESH_IDOP) -+ )) -+ au_fset_opts(opts->flags, REFRESH); -+ -+ AuDbg("status 0x%x\n", opts->flags); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+unsigned int au_opt_udba(struct super_block *sb) -+{ -+ return au_mntflags(sb) & AuOptMask_UDBA; -+} -diff --git a/fs/aufs/opts.h b/fs/aufs/opts.h -new file mode 100644 -index 0000000..50949a0 ---- /dev/null -+++ b/fs/aufs/opts.h -@@ -0,0 +1,212 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * mount options/flags -+ */ -+ -+#ifndef __AUFS_OPTS_H__ -+#define __AUFS_OPTS_H__ -+ -+#ifdef __KERNEL__ -+ -+#include -+ -+struct file; -+struct super_block; -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* mount flags */ -+#define AuOpt_XINO 1 /* external inode number bitmap -+ and translation table */ -+#define AuOpt_TRUNC_XINO (1 << 1) /* truncate xino files */ -+#define AuOpt_UDBA_NONE (1 << 2) /* users direct branch access */ -+#define AuOpt_UDBA_REVAL (1 << 3) -+#define AuOpt_UDBA_HNOTIFY (1 << 4) -+#define AuOpt_SHWH (1 << 5) /* show whiteout */ -+#define AuOpt_PLINK (1 << 6) /* pseudo-link */ -+#define AuOpt_DIRPERM1 (1 << 7) /* ignore the lower dir's perm -+ bits */ -+#define AuOpt_REFROF (1 << 8) /* unimplemented */ -+#define AuOpt_ALWAYS_DIROPQ (1 << 9) /* policy to creating diropq */ -+#define AuOpt_SUM (1 << 10) /* summation for statfs(2) */ -+#define AuOpt_SUM_W (1 << 11) /* unimplemented */ -+#define AuOpt_WARN_PERM (1 << 12) /* warn when add-branch */ -+#define AuOpt_VERBOSE (1 << 13) /* busy inode when del-branch */ -+#define AuOpt_DIO (1 << 14) /* direct io */ -+ -+#ifndef CONFIG_AUFS_HNOTIFY -+#undef AuOpt_UDBA_HNOTIFY -+#define AuOpt_UDBA_HNOTIFY 0 -+#endif -+#ifndef CONFIG_AUFS_SHWH -+#undef AuOpt_SHWH -+#define AuOpt_SHWH 0 -+#endif -+ -+#define AuOpt_Def (AuOpt_XINO \ -+ | AuOpt_UDBA_REVAL \ -+ | AuOpt_PLINK \ -+ /* | AuOpt_DIRPERM1 */ \ -+ | AuOpt_WARN_PERM) -+#define AuOptMask_UDBA (AuOpt_UDBA_NONE \ -+ | AuOpt_UDBA_REVAL \ -+ | AuOpt_UDBA_HNOTIFY) -+ -+#define au_opt_test(flags, name) (flags & AuOpt_##name) -+#define au_opt_set(flags, name) do { \ -+ BUILD_BUG_ON(AuOpt_##name & AuOptMask_UDBA); \ -+ ((flags) |= AuOpt_##name); \ -+} while (0) -+#define au_opt_set_udba(flags, name) do { \ -+ (flags) &= ~AuOptMask_UDBA; \ -+ ((flags) |= AuOpt_##name); \ -+} while (0) -+#define au_opt_clr(flags, name) do { \ -+ ((flags) &= ~AuOpt_##name); \ -+} while (0) -+ -+static inline unsigned int au_opts_plink(unsigned int mntflags) -+{ -+#ifdef CONFIG_PROC_FS -+ return mntflags; -+#else -+ return mntflags & ~AuOpt_PLINK; -+#endif -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* policies to select one among multiple writable branches */ -+enum { -+ AuWbrCreate_TDP, /* top down parent */ -+ AuWbrCreate_RR, /* round robin */ -+ AuWbrCreate_MFS, /* most free space */ -+ AuWbrCreate_MFSV, /* mfs with seconds */ -+ AuWbrCreate_MFSRR, /* mfs then rr */ -+ AuWbrCreate_MFSRRV, /* mfs then rr with seconds */ -+ AuWbrCreate_PMFS, /* parent and mfs */ -+ AuWbrCreate_PMFSV, /* parent and mfs with seconds */ -+ AuWbrCreate_PMFSRR, /* parent, mfs and round-robin */ -+ AuWbrCreate_PMFSRRV, /* plus seconds */ -+ -+ AuWbrCreate_Def = AuWbrCreate_TDP -+}; -+ -+enum { -+ AuWbrCopyup_TDP, /* top down parent */ -+ AuWbrCopyup_BUP, /* bottom up parent */ -+ AuWbrCopyup_BU, /* bottom up */ -+ -+ AuWbrCopyup_Def = AuWbrCopyup_TDP -+}; -+ -+/* ---------------------------------------------------------------------- */ -+ -+struct au_opt_add { -+ aufs_bindex_t bindex; -+ char *pathname; -+ int perm; -+ struct path path; -+}; -+ -+struct au_opt_del { -+ char *pathname; -+ struct path h_path; -+}; -+ -+struct au_opt_mod { -+ char *path; -+ int perm; -+ struct dentry *h_root; -+}; -+ -+struct au_opt_xino { -+ char *path; -+ struct file *file; -+}; -+ -+struct au_opt_xino_itrunc { -+ aufs_bindex_t bindex; -+}; -+ -+struct au_opt_wbr_create { -+ int wbr_create; -+ int mfs_second; -+ unsigned long long mfsrr_watermark; -+}; -+ -+struct au_opt { -+ int type; -+ union { -+ struct au_opt_xino xino; -+ struct au_opt_xino_itrunc xino_itrunc; -+ struct au_opt_add add; -+ struct au_opt_del del; -+ struct au_opt_mod mod; -+ int dirwh; -+ int rdcache; -+ unsigned int rdblk; -+ unsigned int rdhash; -+ int udba; -+ struct au_opt_wbr_create wbr_create; -+ int wbr_copyup; -+ unsigned int fhsm_second; -+ }; -+}; -+ -+/* opts flags */ -+#define AuOpts_REMOUNT 1 -+#define AuOpts_REFRESH (1 << 1) -+#define AuOpts_TRUNC_XIB (1 << 2) -+#define AuOpts_REFRESH_DYAOP (1 << 3) -+#define AuOpts_REFRESH_IDOP (1 << 4) -+#define au_ftest_opts(flags, name) ((flags) & AuOpts_##name) -+#define au_fset_opts(flags, name) \ -+ do { (flags) |= AuOpts_##name; } while (0) -+#define au_fclr_opts(flags, name) \ -+ do { (flags) &= ~AuOpts_##name; } while (0) -+ -+struct au_opts { -+ struct au_opt *opt; -+ int max_opt; -+ -+ unsigned int given_udba; -+ unsigned int flags; -+ unsigned long sb_flags; -+}; -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* opts.c */ -+void au_optstr_br_perm(au_br_perm_str_t *str, int perm); -+const char *au_optstr_udba(int udba); -+const char *au_optstr_wbr_copyup(int wbr_copyup); -+const char *au_optstr_wbr_create(int wbr_create); -+ -+void au_opts_free(struct au_opts *opts); -+int au_opts_parse(struct super_block *sb, char *str, struct au_opts *opts); -+int au_opts_verify(struct super_block *sb, unsigned long sb_flags, -+ unsigned int pending); -+int au_opts_mount(struct super_block *sb, struct au_opts *opts); -+int au_opts_remount(struct super_block *sb, struct au_opts *opts); -+ -+unsigned int au_opt_udba(struct super_block *sb); -+ -+#endif /* __KERNEL__ */ -+#endif /* __AUFS_OPTS_H__ */ -diff --git a/fs/aufs/plink.c b/fs/aufs/plink.c -new file mode 100644 -index 0000000..4f372ec ---- /dev/null -+++ b/fs/aufs/plink.c -@@ -0,0 +1,506 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * pseudo-link -+ */ -+ -+#include "aufs.h" -+ -+/* -+ * the pseudo-link maintenance mode. -+ * during a user process maintains the pseudo-links, -+ * prohibit adding a new plink and branch manipulation. -+ * -+ * Flags -+ * NOPLM: -+ * For entry functions which will handle plink, and i_mutex is already held -+ * in VFS. -+ * They cannot wait and should return an error at once. -+ * Callers has to check the error. -+ * NOPLMW: -+ * For entry functions which will handle plink, but i_mutex is not held -+ * in VFS. -+ * They can wait the plink maintenance mode to finish. -+ * -+ * They behave like F_SETLK and F_SETLKW. -+ * If the caller never handle plink, then both flags are unnecessary. -+ */ -+ -+int au_plink_maint(struct super_block *sb, int flags) -+{ -+ int err; -+ pid_t pid, ppid; -+ struct au_sbinfo *sbi; -+ -+ SiMustAnyLock(sb); -+ -+ err = 0; -+ if (!au_opt_test(au_mntflags(sb), PLINK)) -+ goto out; -+ -+ sbi = au_sbi(sb); -+ pid = sbi->si_plink_maint_pid; -+ if (!pid || pid == current->pid) -+ goto out; -+ -+ /* todo: it highly depends upon /sbin/mount.aufs */ -+ rcu_read_lock(); -+ ppid = task_pid_vnr(rcu_dereference(current->real_parent)); -+ rcu_read_unlock(); -+ if (pid == ppid) -+ goto out; -+ -+ if (au_ftest_lock(flags, NOPLMW)) { -+ /* if there is no i_mutex lock in VFS, we don't need to wait */ -+ /* AuDebugOn(!lockdep_depth(current)); */ -+ while (sbi->si_plink_maint_pid) { -+ si_read_unlock(sb); -+ /* gave up wake_up_bit() */ -+ wait_event(sbi->si_plink_wq, !sbi->si_plink_maint_pid); -+ -+ if (au_ftest_lock(flags, FLUSH)) -+ au_nwt_flush(&sbi->si_nowait); -+ si_noflush_read_lock(sb); -+ } -+ } else if (au_ftest_lock(flags, NOPLM)) { -+ AuDbg("ppid %d, pid %d\n", ppid, pid); -+ err = -EAGAIN; -+ } -+ -+out: -+ return err; -+} -+ -+void au_plink_maint_leave(struct au_sbinfo *sbinfo) -+{ -+ spin_lock(&sbinfo->si_plink_maint_lock); -+ sbinfo->si_plink_maint_pid = 0; -+ spin_unlock(&sbinfo->si_plink_maint_lock); -+ wake_up_all(&sbinfo->si_plink_wq); -+} -+ -+int au_plink_maint_enter(struct super_block *sb) -+{ -+ int err; -+ struct au_sbinfo *sbinfo; -+ -+ err = 0; -+ sbinfo = au_sbi(sb); -+ /* make sure i am the only one in this fs */ -+ si_write_lock(sb, AuLock_FLUSH); -+ if (au_opt_test(au_mntflags(sb), PLINK)) { -+ spin_lock(&sbinfo->si_plink_maint_lock); -+ if (!sbinfo->si_plink_maint_pid) -+ sbinfo->si_plink_maint_pid = current->pid; -+ else -+ err = -EBUSY; -+ spin_unlock(&sbinfo->si_plink_maint_lock); -+ } -+ si_write_unlock(sb); -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+#ifdef CONFIG_AUFS_DEBUG -+void au_plink_list(struct super_block *sb) -+{ -+ int i; -+ struct au_sbinfo *sbinfo; -+ struct hlist_head *plink_hlist; -+ struct au_icntnr *icntnr; -+ -+ SiMustAnyLock(sb); -+ -+ sbinfo = au_sbi(sb); -+ AuDebugOn(!au_opt_test(au_mntflags(sb), PLINK)); -+ AuDebugOn(au_plink_maint(sb, AuLock_NOPLM)); -+ -+ for (i = 0; i < AuPlink_NHASH; i++) { -+ plink_hlist = &sbinfo->si_plink[i].head; -+ rcu_read_lock(); -+ hlist_for_each_entry_rcu(icntnr, plink_hlist, plink) -+ AuDbg("%lu\n", icntnr->vfs_inode.i_ino); -+ rcu_read_unlock(); -+ } -+} -+#endif -+ -+/* is the inode pseudo-linked? */ -+int au_plink_test(struct inode *inode) -+{ -+ int found, i; -+ struct au_sbinfo *sbinfo; -+ struct hlist_head *plink_hlist; -+ struct au_icntnr *icntnr; -+ -+ sbinfo = au_sbi(inode->i_sb); -+ AuRwMustAnyLock(&sbinfo->si_rwsem); -+ AuDebugOn(!au_opt_test(au_mntflags(inode->i_sb), PLINK)); -+ AuDebugOn(au_plink_maint(inode->i_sb, AuLock_NOPLM)); -+ -+ found = 0; -+ i = au_plink_hash(inode->i_ino); -+ plink_hlist = &sbinfo->si_plink[i].head; -+ rcu_read_lock(); -+ hlist_for_each_entry_rcu(icntnr, plink_hlist, plink) -+ if (&icntnr->vfs_inode == inode) { -+ found = 1; -+ break; -+ } -+ rcu_read_unlock(); -+ return found; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * generate a name for plink. -+ * the file will be stored under AUFS_WH_PLINKDIR. -+ */ -+/* 20 is max digits length of ulong 64 */ -+#define PLINK_NAME_LEN ((20 + 1) * 2) -+ -+static int plink_name(char *name, int len, struct inode *inode, -+ aufs_bindex_t bindex) -+{ -+ int rlen; -+ struct inode *h_inode; -+ -+ h_inode = au_h_iptr(inode, bindex); -+ rlen = snprintf(name, len, "%lu.%lu", inode->i_ino, h_inode->i_ino); -+ return rlen; -+} -+ -+struct au_do_plink_lkup_args { -+ struct dentry **errp; -+ struct qstr *tgtname; -+ struct dentry *h_parent; -+ struct au_branch *br; -+}; -+ -+static struct dentry *au_do_plink_lkup(struct qstr *tgtname, -+ struct dentry *h_parent, -+ struct au_branch *br) -+{ -+ struct dentry *h_dentry; -+ struct mutex *h_mtx; -+ -+ h_mtx = &h_parent->d_inode->i_mutex; -+ mutex_lock_nested(h_mtx, AuLsc_I_CHILD2); -+ h_dentry = vfsub_lkup_one(tgtname, h_parent); -+ mutex_unlock(h_mtx); -+ return h_dentry; -+} -+ -+static void au_call_do_plink_lkup(void *args) -+{ -+ struct au_do_plink_lkup_args *a = args; -+ *a->errp = au_do_plink_lkup(a->tgtname, a->h_parent, a->br); -+} -+ -+/* lookup the plink-ed @inode under the branch at @bindex */ -+struct dentry *au_plink_lkup(struct inode *inode, aufs_bindex_t bindex) -+{ -+ struct dentry *h_dentry, *h_parent; -+ struct au_branch *br; -+ struct inode *h_dir; -+ int wkq_err; -+ char a[PLINK_NAME_LEN]; -+ struct qstr tgtname = QSTR_INIT(a, 0); -+ -+ AuDebugOn(au_plink_maint(inode->i_sb, AuLock_NOPLM)); -+ -+ br = au_sbr(inode->i_sb, bindex); -+ h_parent = br->br_wbr->wbr_plink; -+ h_dir = h_parent->d_inode; -+ tgtname.len = plink_name(a, sizeof(a), inode, bindex); -+ -+ if (!uid_eq(current_fsuid(), GLOBAL_ROOT_UID)) { -+ struct au_do_plink_lkup_args args = { -+ .errp = &h_dentry, -+ .tgtname = &tgtname, -+ .h_parent = h_parent, -+ .br = br -+ }; -+ -+ wkq_err = au_wkq_wait(au_call_do_plink_lkup, &args); -+ if (unlikely(wkq_err)) -+ h_dentry = ERR_PTR(wkq_err); -+ } else -+ h_dentry = au_do_plink_lkup(&tgtname, h_parent, br); -+ -+ return h_dentry; -+} -+ -+/* create a pseudo-link */ -+static int do_whplink(struct qstr *tgt, struct dentry *h_parent, -+ struct dentry *h_dentry, struct au_branch *br) -+{ -+ int err; -+ struct path h_path = { -+ .mnt = au_br_mnt(br) -+ }; -+ struct inode *h_dir, *delegated; -+ -+ h_dir = h_parent->d_inode; -+ mutex_lock_nested(&h_dir->i_mutex, AuLsc_I_CHILD2); -+again: -+ h_path.dentry = vfsub_lkup_one(tgt, h_parent); -+ err = PTR_ERR(h_path.dentry); -+ if (IS_ERR(h_path.dentry)) -+ goto out; -+ -+ err = 0; -+ /* wh.plink dir is not monitored */ -+ /* todo: is it really safe? */ -+ if (h_path.dentry->d_inode -+ && h_path.dentry->d_inode != h_dentry->d_inode) { -+ delegated = NULL; -+ err = vfsub_unlink(h_dir, &h_path, &delegated, /*force*/0); -+ if (unlikely(err == -EWOULDBLOCK)) { -+ pr_warn("cannot retry for NFSv4 delegation" -+ " for an internal unlink\n"); -+ iput(delegated); -+ } -+ dput(h_path.dentry); -+ h_path.dentry = NULL; -+ if (!err) -+ goto again; -+ } -+ if (!err && !h_path.dentry->d_inode) { -+ delegated = NULL; -+ err = vfsub_link(h_dentry, h_dir, &h_path, &delegated); -+ if (unlikely(err == -EWOULDBLOCK)) { -+ pr_warn("cannot retry for NFSv4 delegation" -+ " for an internal link\n"); -+ iput(delegated); -+ } -+ } -+ dput(h_path.dentry); -+ -+out: -+ mutex_unlock(&h_dir->i_mutex); -+ return err; -+} -+ -+struct do_whplink_args { -+ int *errp; -+ struct qstr *tgt; -+ struct dentry *h_parent; -+ struct dentry *h_dentry; -+ struct au_branch *br; -+}; -+ -+static void call_do_whplink(void *args) -+{ -+ struct do_whplink_args *a = args; -+ *a->errp = do_whplink(a->tgt, a->h_parent, a->h_dentry, a->br); -+} -+ -+static int whplink(struct dentry *h_dentry, struct inode *inode, -+ aufs_bindex_t bindex, struct au_branch *br) -+{ -+ int err, wkq_err; -+ struct au_wbr *wbr; -+ struct dentry *h_parent; -+ struct inode *h_dir; -+ char a[PLINK_NAME_LEN]; -+ struct qstr tgtname = QSTR_INIT(a, 0); -+ -+ wbr = au_sbr(inode->i_sb, bindex)->br_wbr; -+ h_parent = wbr->wbr_plink; -+ h_dir = h_parent->d_inode; -+ tgtname.len = plink_name(a, sizeof(a), inode, bindex); -+ -+ /* always superio. */ -+ if (!uid_eq(current_fsuid(), GLOBAL_ROOT_UID)) { -+ struct do_whplink_args args = { -+ .errp = &err, -+ .tgt = &tgtname, -+ .h_parent = h_parent, -+ .h_dentry = h_dentry, -+ .br = br -+ }; -+ wkq_err = au_wkq_wait(call_do_whplink, &args); -+ if (unlikely(wkq_err)) -+ err = wkq_err; -+ } else -+ err = do_whplink(&tgtname, h_parent, h_dentry, br); -+ -+ return err; -+} -+ -+/* -+ * create a new pseudo-link for @h_dentry on @bindex. -+ * the linked inode is held in aufs @inode. -+ */ -+void au_plink_append(struct inode *inode, aufs_bindex_t bindex, -+ struct dentry *h_dentry) -+{ -+ struct super_block *sb; -+ struct au_sbinfo *sbinfo; -+ struct hlist_head *plink_hlist; -+ struct au_icntnr *icntnr; -+ struct au_sphlhead *sphl; -+ int found, err, cnt, i; -+ -+ sb = inode->i_sb; -+ sbinfo = au_sbi(sb); -+ AuDebugOn(!au_opt_test(au_mntflags(sb), PLINK)); -+ AuDebugOn(au_plink_maint(sb, AuLock_NOPLM)); -+ -+ found = au_plink_test(inode); -+ if (found) -+ return; -+ -+ i = au_plink_hash(inode->i_ino); -+ sphl = sbinfo->si_plink + i; -+ plink_hlist = &sphl->head; -+ au_igrab(inode); -+ -+ spin_lock(&sphl->spin); -+ hlist_for_each_entry(icntnr, plink_hlist, plink) { -+ if (&icntnr->vfs_inode == inode) { -+ found = 1; -+ break; -+ } -+ } -+ if (!found) { -+ icntnr = container_of(inode, struct au_icntnr, vfs_inode); -+ hlist_add_head_rcu(&icntnr->plink, plink_hlist); -+ } -+ spin_unlock(&sphl->spin); -+ if (!found) { -+ cnt = au_sphl_count(sphl); -+#define msg "unexpectedly unblanced or too many pseudo-links" -+ if (cnt > AUFS_PLINK_WARN) -+ AuWarn1(msg ", %d\n", cnt); -+#undef msg -+ err = whplink(h_dentry, inode, bindex, au_sbr(sb, bindex)); -+ if (unlikely(err)) { -+ pr_warn("err %d, damaged pseudo link.\n", err); -+ au_sphl_del_rcu(&icntnr->plink, sphl); -+ iput(&icntnr->vfs_inode); -+ } -+ } else -+ iput(&icntnr->vfs_inode); -+} -+ -+/* free all plinks */ -+void au_plink_put(struct super_block *sb, int verbose) -+{ -+ int i, warned; -+ struct au_sbinfo *sbinfo; -+ struct hlist_head *plink_hlist; -+ struct hlist_node *tmp; -+ struct au_icntnr *icntnr; -+ -+ SiMustWriteLock(sb); -+ -+ sbinfo = au_sbi(sb); -+ AuDebugOn(!au_opt_test(au_mntflags(sb), PLINK)); -+ AuDebugOn(au_plink_maint(sb, AuLock_NOPLM)); -+ -+ /* no spin_lock since sbinfo is write-locked */ -+ warned = 0; -+ for (i = 0; i < AuPlink_NHASH; i++) { -+ plink_hlist = &sbinfo->si_plink[i].head; -+ if (!warned && verbose && !hlist_empty(plink_hlist)) { -+ pr_warn("pseudo-link is not flushed"); -+ warned = 1; -+ } -+ hlist_for_each_entry_safe(icntnr, tmp, plink_hlist, plink) -+ iput(&icntnr->vfs_inode); -+ INIT_HLIST_HEAD(plink_hlist); -+ } -+} -+ -+void au_plink_clean(struct super_block *sb, int verbose) -+{ -+ struct dentry *root; -+ -+ root = sb->s_root; -+ aufs_write_lock(root); -+ if (au_opt_test(au_mntflags(sb), PLINK)) -+ au_plink_put(sb, verbose); -+ aufs_write_unlock(root); -+} -+ -+static int au_plink_do_half_refresh(struct inode *inode, aufs_bindex_t br_id) -+{ -+ int do_put; -+ aufs_bindex_t bstart, bend, bindex; -+ -+ do_put = 0; -+ bstart = au_ibstart(inode); -+ bend = au_ibend(inode); -+ if (bstart >= 0) { -+ for (bindex = bstart; bindex <= bend; bindex++) { -+ if (!au_h_iptr(inode, bindex) -+ || au_ii_br_id(inode, bindex) != br_id) -+ continue; -+ au_set_h_iptr(inode, bindex, NULL, 0); -+ do_put = 1; -+ break; -+ } -+ if (do_put) -+ for (bindex = bstart; bindex <= bend; bindex++) -+ if (au_h_iptr(inode, bindex)) { -+ do_put = 0; -+ break; -+ } -+ } else -+ do_put = 1; -+ -+ return do_put; -+} -+ -+/* free the plinks on a branch specified by @br_id */ -+void au_plink_half_refresh(struct super_block *sb, aufs_bindex_t br_id) -+{ -+ struct au_sbinfo *sbinfo; -+ struct hlist_head *plink_hlist; -+ struct hlist_node *tmp; -+ struct au_icntnr *icntnr; -+ struct inode *inode; -+ int i, do_put; -+ -+ SiMustWriteLock(sb); -+ -+ sbinfo = au_sbi(sb); -+ AuDebugOn(!au_opt_test(au_mntflags(sb), PLINK)); -+ AuDebugOn(au_plink_maint(sb, AuLock_NOPLM)); -+ -+ /* no spin_lock since sbinfo is write-locked */ -+ for (i = 0; i < AuPlink_NHASH; i++) { -+ plink_hlist = &sbinfo->si_plink[i].head; -+ hlist_for_each_entry_safe(icntnr, tmp, plink_hlist, plink) { -+ inode = au_igrab(&icntnr->vfs_inode); -+ ii_write_lock_child(inode); -+ do_put = au_plink_do_half_refresh(inode, br_id); -+ if (do_put) { -+ hlist_del(&icntnr->plink); -+ iput(inode); -+ } -+ ii_write_unlock(inode); -+ iput(inode); -+ } -+ } -+} -diff --git a/fs/aufs/poll.c b/fs/aufs/poll.c -new file mode 100644 -index 0000000..eea19e7 ---- /dev/null -+++ b/fs/aufs/poll.c -@@ -0,0 +1,52 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * poll operation -+ * There is only one filesystem which implements ->poll operation, currently. -+ */ -+ -+#include "aufs.h" -+ -+unsigned int aufs_poll(struct file *file, poll_table *wait) -+{ -+ unsigned int mask; -+ int err; -+ struct file *h_file; -+ struct super_block *sb; -+ -+ /* We should pretend an error happened. */ -+ mask = POLLERR /* | POLLIN | POLLOUT */; -+ sb = file->f_dentry->d_sb; -+ si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLMW); -+ -+ h_file = au_read_pre(file, /*keep_fi*/0); -+ err = PTR_ERR(h_file); -+ if (IS_ERR(h_file)) -+ goto out; -+ -+ /* it is not an error if h_file has no operation */ -+ mask = DEFAULT_POLLMASK; -+ if (h_file->f_op->poll) -+ mask = h_file->f_op->poll(h_file, wait); -+ fput(h_file); /* instead of au_read_post() */ -+ -+out: -+ si_read_unlock(sb); -+ AuTraceErr((int)mask); -+ return mask; -+} -diff --git a/fs/aufs/posix_acl.c b/fs/aufs/posix_acl.c -new file mode 100644 -index 0000000..89b4127 ---- /dev/null -+++ b/fs/aufs/posix_acl.c -@@ -0,0 +1,98 @@ -+/* -+ * Copyright (C) 2014-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * posix acl operations -+ */ -+ -+#include -+#include "aufs.h" -+ -+struct posix_acl *aufs_get_acl(struct inode *inode, int type) -+{ -+ struct posix_acl *acl; -+ int err; -+ aufs_bindex_t bindex; -+ struct inode *h_inode; -+ struct super_block *sb; -+ -+ acl = NULL; -+ sb = inode->i_sb; -+ si_read_lock(sb, AuLock_FLUSH); -+ ii_read_lock_child(inode); -+ if (!(sb->s_flags & MS_POSIXACL)) -+ goto out; -+ -+ bindex = au_ibstart(inode); -+ h_inode = au_h_iptr(inode, bindex); -+ if (unlikely(!h_inode -+ || ((h_inode->i_mode & S_IFMT) -+ != (inode->i_mode & S_IFMT)))) { -+ err = au_busy_or_stale(); -+ acl = ERR_PTR(err); -+ goto out; -+ } -+ -+ /* always topmost only */ -+ acl = get_acl(h_inode, type); -+ -+out: -+ ii_read_unlock(inode); -+ si_read_unlock(sb); -+ -+ AuTraceErrPtr(acl); -+ return acl; -+} -+ -+int aufs_set_acl(struct inode *inode, struct posix_acl *acl, int type) -+{ -+ int err; -+ ssize_t ssz; -+ struct dentry *dentry; -+ struct au_srxattr arg = { -+ .type = AU_ACL_SET, -+ .u.acl_set = { -+ .acl = acl, -+ .type = type -+ }, -+ }; -+ -+ mutex_lock(&inode->i_mutex); -+ if (inode->i_ino == AUFS_ROOT_INO) -+ dentry = dget(inode->i_sb->s_root); -+ else { -+ dentry = d_find_alias(inode); -+ if (!dentry) -+ dentry = d_find_any_alias(inode); -+ if (!dentry) { -+ pr_warn("cannot handle this inode, " -+ "please report to aufs-users ML\n"); -+ err = -ENOENT; -+ goto out; -+ } -+ } -+ -+ ssz = au_srxattr(dentry, &arg); -+ dput(dentry); -+ err = ssz; -+ if (ssz >= 0) -+ err = 0; -+ -+out: -+ mutex_unlock(&inode->i_mutex); -+ return err; -+} -diff --git a/fs/aufs/procfs.c b/fs/aufs/procfs.c -new file mode 100644 -index 0000000..a334330 ---- /dev/null -+++ b/fs/aufs/procfs.c -@@ -0,0 +1,169 @@ -+/* -+ * Copyright (C) 2010-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * procfs interfaces -+ */ -+ -+#include -+#include "aufs.h" -+ -+static int au_procfs_plm_release(struct inode *inode, struct file *file) -+{ -+ struct au_sbinfo *sbinfo; -+ -+ sbinfo = file->private_data; -+ if (sbinfo) { -+ au_plink_maint_leave(sbinfo); -+ kobject_put(&sbinfo->si_kobj); -+ } -+ -+ return 0; -+} -+ -+static void au_procfs_plm_write_clean(struct file *file) -+{ -+ struct au_sbinfo *sbinfo; -+ -+ sbinfo = file->private_data; -+ if (sbinfo) -+ au_plink_clean(sbinfo->si_sb, /*verbose*/0); -+} -+ -+static int au_procfs_plm_write_si(struct file *file, unsigned long id) -+{ -+ int err; -+ struct super_block *sb; -+ struct au_sbinfo *sbinfo; -+ -+ err = -EBUSY; -+ if (unlikely(file->private_data)) -+ goto out; -+ -+ sb = NULL; -+ /* don't use au_sbilist_lock() here */ -+ spin_lock(&au_sbilist.spin); -+ hlist_for_each_entry(sbinfo, &au_sbilist.head, si_list) -+ if (id == sysaufs_si_id(sbinfo)) { -+ kobject_get(&sbinfo->si_kobj); -+ sb = sbinfo->si_sb; -+ break; -+ } -+ spin_unlock(&au_sbilist.spin); -+ -+ err = -EINVAL; -+ if (unlikely(!sb)) -+ goto out; -+ -+ err = au_plink_maint_enter(sb); -+ if (!err) -+ /* keep kobject_get() */ -+ file->private_data = sbinfo; -+ else -+ kobject_put(&sbinfo->si_kobj); -+out: -+ return err; -+} -+ -+/* -+ * Accept a valid "si=xxxx" only. -+ * Once it is accepted successfully, accept "clean" too. -+ */ -+static ssize_t au_procfs_plm_write(struct file *file, const char __user *ubuf, -+ size_t count, loff_t *ppos) -+{ -+ ssize_t err; -+ unsigned long id; -+ /* last newline is allowed */ -+ char buf[3 + sizeof(unsigned long) * 2 + 1]; -+ -+ err = -EACCES; -+ if (unlikely(!capable(CAP_SYS_ADMIN))) -+ goto out; -+ -+ err = -EINVAL; -+ if (unlikely(count > sizeof(buf))) -+ goto out; -+ -+ err = copy_from_user(buf, ubuf, count); -+ if (unlikely(err)) { -+ err = -EFAULT; -+ goto out; -+ } -+ buf[count] = 0; -+ -+ err = -EINVAL; -+ if (!strcmp("clean", buf)) { -+ au_procfs_plm_write_clean(file); -+ goto out_success; -+ } else if (unlikely(strncmp("si=", buf, 3))) -+ goto out; -+ -+ err = kstrtoul(buf + 3, 16, &id); -+ if (unlikely(err)) -+ goto out; -+ -+ err = au_procfs_plm_write_si(file, id); -+ if (unlikely(err)) -+ goto out; -+ -+out_success: -+ err = count; /* success */ -+out: -+ return err; -+} -+ -+static const struct file_operations au_procfs_plm_fop = { -+ .write = au_procfs_plm_write, -+ .release = au_procfs_plm_release, -+ .owner = THIS_MODULE -+}; -+ -+/* ---------------------------------------------------------------------- */ -+ -+static struct proc_dir_entry *au_procfs_dir; -+ -+void au_procfs_fin(void) -+{ -+ remove_proc_entry(AUFS_PLINK_MAINT_NAME, au_procfs_dir); -+ remove_proc_entry(AUFS_PLINK_MAINT_DIR, NULL); -+} -+ -+int __init au_procfs_init(void) -+{ -+ int err; -+ struct proc_dir_entry *entry; -+ -+ err = -ENOMEM; -+ au_procfs_dir = proc_mkdir(AUFS_PLINK_MAINT_DIR, NULL); -+ if (unlikely(!au_procfs_dir)) -+ goto out; -+ -+ entry = proc_create(AUFS_PLINK_MAINT_NAME, S_IFREG | S_IWUSR, -+ au_procfs_dir, &au_procfs_plm_fop); -+ if (unlikely(!entry)) -+ goto out_dir; -+ -+ err = 0; -+ goto out; /* success */ -+ -+ -+out_dir: -+ remove_proc_entry(AUFS_PLINK_MAINT_DIR, NULL); -+out: -+ return err; -+} -diff --git a/fs/aufs/rdu.c b/fs/aufs/rdu.c -new file mode 100644 -index 0000000..d22b2f8 ---- /dev/null -+++ b/fs/aufs/rdu.c -@@ -0,0 +1,388 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * readdir in userspace. -+ */ -+ -+#include -+#include -+#include -+#include "aufs.h" -+ -+/* bits for struct aufs_rdu.flags */ -+#define AuRdu_CALLED 1 -+#define AuRdu_CONT (1 << 1) -+#define AuRdu_FULL (1 << 2) -+#define au_ftest_rdu(flags, name) ((flags) & AuRdu_##name) -+#define au_fset_rdu(flags, name) \ -+ do { (flags) |= AuRdu_##name; } while (0) -+#define au_fclr_rdu(flags, name) \ -+ do { (flags) &= ~AuRdu_##name; } while (0) -+ -+struct au_rdu_arg { -+ struct dir_context ctx; -+ struct aufs_rdu *rdu; -+ union au_rdu_ent_ul ent; -+ unsigned long end; -+ -+ struct super_block *sb; -+ int err; -+}; -+ -+static int au_rdu_fill(struct dir_context *ctx, const char *name, int nlen, -+ loff_t offset, u64 h_ino, unsigned int d_type) -+{ -+ int err, len; -+ struct au_rdu_arg *arg = container_of(ctx, struct au_rdu_arg, ctx); -+ struct aufs_rdu *rdu = arg->rdu; -+ struct au_rdu_ent ent; -+ -+ err = 0; -+ arg->err = 0; -+ au_fset_rdu(rdu->cookie.flags, CALLED); -+ len = au_rdu_len(nlen); -+ if (arg->ent.ul + len < arg->end) { -+ ent.ino = h_ino; -+ ent.bindex = rdu->cookie.bindex; -+ ent.type = d_type; -+ ent.nlen = nlen; -+ if (unlikely(nlen > AUFS_MAX_NAMELEN)) -+ ent.type = DT_UNKNOWN; -+ -+ /* unnecessary to support mmap_sem since this is a dir */ -+ err = -EFAULT; -+ if (copy_to_user(arg->ent.e, &ent, sizeof(ent))) -+ goto out; -+ if (copy_to_user(arg->ent.e->name, name, nlen)) -+ goto out; -+ /* the terminating NULL */ -+ if (__put_user(0, arg->ent.e->name + nlen)) -+ goto out; -+ err = 0; -+ /* AuDbg("%p, %.*s\n", arg->ent.p, nlen, name); */ -+ arg->ent.ul += len; -+ rdu->rent++; -+ } else { -+ err = -EFAULT; -+ au_fset_rdu(rdu->cookie.flags, FULL); -+ rdu->full = 1; -+ rdu->tail = arg->ent; -+ } -+ -+out: -+ /* AuTraceErr(err); */ -+ return err; -+} -+ -+static int au_rdu_do(struct file *h_file, struct au_rdu_arg *arg) -+{ -+ int err; -+ loff_t offset; -+ struct au_rdu_cookie *cookie = &arg->rdu->cookie; -+ -+ /* we don't have to care (FMODE_32BITHASH | FMODE_64BITHASH) for ext4 */ -+ offset = vfsub_llseek(h_file, cookie->h_pos, SEEK_SET); -+ err = offset; -+ if (unlikely(offset != cookie->h_pos)) -+ goto out; -+ -+ err = 0; -+ do { -+ arg->err = 0; -+ au_fclr_rdu(cookie->flags, CALLED); -+ /* smp_mb(); */ -+ err = vfsub_iterate_dir(h_file, &arg->ctx); -+ if (err >= 0) -+ err = arg->err; -+ } while (!err -+ && au_ftest_rdu(cookie->flags, CALLED) -+ && !au_ftest_rdu(cookie->flags, FULL)); -+ cookie->h_pos = h_file->f_pos; -+ -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+static int au_rdu(struct file *file, struct aufs_rdu *rdu) -+{ -+ int err; -+ aufs_bindex_t bend; -+ struct au_rdu_arg arg = { -+ .ctx = { -+ .actor = au_diractor(au_rdu_fill) -+ } -+ }; -+ struct dentry *dentry; -+ struct inode *inode; -+ struct file *h_file; -+ struct au_rdu_cookie *cookie = &rdu->cookie; -+ -+ err = !access_ok(VERIFY_WRITE, rdu->ent.e, rdu->sz); -+ if (unlikely(err)) { -+ err = -EFAULT; -+ AuTraceErr(err); -+ goto out; -+ } -+ rdu->rent = 0; -+ rdu->tail = rdu->ent; -+ rdu->full = 0; -+ arg.rdu = rdu; -+ arg.ent = rdu->ent; -+ arg.end = arg.ent.ul; -+ arg.end += rdu->sz; -+ -+ err = -ENOTDIR; -+ if (unlikely(!file->f_op->iterate)) -+ goto out; -+ -+ err = security_file_permission(file, MAY_READ); -+ AuTraceErr(err); -+ if (unlikely(err)) -+ goto out; -+ -+ dentry = file->f_dentry; -+ inode = dentry->d_inode; -+#if 1 -+ mutex_lock(&inode->i_mutex); -+#else -+ err = mutex_lock_killable(&inode->i_mutex); -+ AuTraceErr(err); -+ if (unlikely(err)) -+ goto out; -+#endif -+ -+ arg.sb = inode->i_sb; -+ err = si_read_lock(arg.sb, AuLock_FLUSH | AuLock_NOPLM); -+ if (unlikely(err)) -+ goto out_mtx; -+ err = au_alive_dir(dentry); -+ if (unlikely(err)) -+ goto out_si; -+ /* todo: reval? */ -+ fi_read_lock(file); -+ -+ err = -EAGAIN; -+ if (unlikely(au_ftest_rdu(cookie->flags, CONT) -+ && cookie->generation != au_figen(file))) -+ goto out_unlock; -+ -+ err = 0; -+ if (!rdu->blk) { -+ rdu->blk = au_sbi(arg.sb)->si_rdblk; -+ if (!rdu->blk) -+ rdu->blk = au_dir_size(file, /*dentry*/NULL); -+ } -+ bend = au_fbstart(file); -+ if (cookie->bindex < bend) -+ cookie->bindex = bend; -+ bend = au_fbend_dir(file); -+ /* AuDbg("b%d, b%d\n", cookie->bindex, bend); */ -+ for (; !err && cookie->bindex <= bend; -+ cookie->bindex++, cookie->h_pos = 0) { -+ h_file = au_hf_dir(file, cookie->bindex); -+ if (!h_file) -+ continue; -+ -+ au_fclr_rdu(cookie->flags, FULL); -+ err = au_rdu_do(h_file, &arg); -+ AuTraceErr(err); -+ if (unlikely(au_ftest_rdu(cookie->flags, FULL) || err)) -+ break; -+ } -+ AuDbg("rent %llu\n", rdu->rent); -+ -+ if (!err && !au_ftest_rdu(cookie->flags, CONT)) { -+ rdu->shwh = !!au_opt_test(au_sbi(arg.sb)->si_mntflags, SHWH); -+ au_fset_rdu(cookie->flags, CONT); -+ cookie->generation = au_figen(file); -+ } -+ -+ ii_read_lock_child(inode); -+ fsstack_copy_attr_atime(inode, au_h_iptr(inode, au_ibstart(inode))); -+ ii_read_unlock(inode); -+ -+out_unlock: -+ fi_read_unlock(file); -+out_si: -+ si_read_unlock(arg.sb); -+out_mtx: -+ mutex_unlock(&inode->i_mutex); -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+static int au_rdu_ino(struct file *file, struct aufs_rdu *rdu) -+{ -+ int err; -+ ino_t ino; -+ unsigned long long nent; -+ union au_rdu_ent_ul *u; -+ struct au_rdu_ent ent; -+ struct super_block *sb; -+ -+ err = 0; -+ nent = rdu->nent; -+ u = &rdu->ent; -+ sb = file->f_dentry->d_sb; -+ si_read_lock(sb, AuLock_FLUSH); -+ while (nent-- > 0) { -+ /* unnecessary to support mmap_sem since this is a dir */ -+ err = copy_from_user(&ent, u->e, sizeof(ent)); -+ if (!err) -+ err = !access_ok(VERIFY_WRITE, &u->e->ino, sizeof(ino)); -+ if (unlikely(err)) { -+ err = -EFAULT; -+ AuTraceErr(err); -+ break; -+ } -+ -+ /* AuDbg("b%d, i%llu\n", ent.bindex, ent.ino); */ -+ if (!ent.wh) -+ err = au_ino(sb, ent.bindex, ent.ino, ent.type, &ino); -+ else -+ err = au_wh_ino(sb, ent.bindex, ent.ino, ent.type, -+ &ino); -+ if (unlikely(err)) { -+ AuTraceErr(err); -+ break; -+ } -+ -+ err = __put_user(ino, &u->e->ino); -+ if (unlikely(err)) { -+ err = -EFAULT; -+ AuTraceErr(err); -+ break; -+ } -+ u->ul += au_rdu_len(ent.nlen); -+ } -+ si_read_unlock(sb); -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int au_rdu_verify(struct aufs_rdu *rdu) -+{ -+ AuDbg("rdu{%llu, %p, %u | %u | %llu, %u, %u | " -+ "%llu, b%d, 0x%x, g%u}\n", -+ rdu->sz, rdu->ent.e, rdu->verify[AufsCtlRduV_SZ], -+ rdu->blk, -+ rdu->rent, rdu->shwh, rdu->full, -+ rdu->cookie.h_pos, rdu->cookie.bindex, rdu->cookie.flags, -+ rdu->cookie.generation); -+ -+ if (rdu->verify[AufsCtlRduV_SZ] == sizeof(*rdu)) -+ return 0; -+ -+ AuDbg("%u:%u\n", -+ rdu->verify[AufsCtlRduV_SZ], (unsigned int)sizeof(*rdu)); -+ return -EINVAL; -+} -+ -+long au_rdu_ioctl(struct file *file, unsigned int cmd, unsigned long arg) -+{ -+ long err, e; -+ struct aufs_rdu rdu; -+ void __user *p = (void __user *)arg; -+ -+ err = copy_from_user(&rdu, p, sizeof(rdu)); -+ if (unlikely(err)) { -+ err = -EFAULT; -+ AuTraceErr(err); -+ goto out; -+ } -+ err = au_rdu_verify(&rdu); -+ if (unlikely(err)) -+ goto out; -+ -+ switch (cmd) { -+ case AUFS_CTL_RDU: -+ err = au_rdu(file, &rdu); -+ if (unlikely(err)) -+ break; -+ -+ e = copy_to_user(p, &rdu, sizeof(rdu)); -+ if (unlikely(e)) { -+ err = -EFAULT; -+ AuTraceErr(err); -+ } -+ break; -+ case AUFS_CTL_RDU_INO: -+ err = au_rdu_ino(file, &rdu); -+ break; -+ -+ default: -+ /* err = -ENOTTY; */ -+ err = -EINVAL; -+ } -+ -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+#ifdef CONFIG_COMPAT -+long au_rdu_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) -+{ -+ long err, e; -+ struct aufs_rdu rdu; -+ void __user *p = compat_ptr(arg); -+ -+ /* todo: get_user()? */ -+ err = copy_from_user(&rdu, p, sizeof(rdu)); -+ if (unlikely(err)) { -+ err = -EFAULT; -+ AuTraceErr(err); -+ goto out; -+ } -+ rdu.ent.e = compat_ptr(rdu.ent.ul); -+ err = au_rdu_verify(&rdu); -+ if (unlikely(err)) -+ goto out; -+ -+ switch (cmd) { -+ case AUFS_CTL_RDU: -+ err = au_rdu(file, &rdu); -+ if (unlikely(err)) -+ break; -+ -+ rdu.ent.ul = ptr_to_compat(rdu.ent.e); -+ rdu.tail.ul = ptr_to_compat(rdu.tail.e); -+ e = copy_to_user(p, &rdu, sizeof(rdu)); -+ if (unlikely(e)) { -+ err = -EFAULT; -+ AuTraceErr(err); -+ } -+ break; -+ case AUFS_CTL_RDU_INO: -+ err = au_rdu_ino(file, &rdu); -+ break; -+ -+ default: -+ /* err = -ENOTTY; */ -+ err = -EINVAL; -+ } -+ -+out: -+ AuTraceErr(err); -+ return err; -+} -+#endif -diff --git a/fs/aufs/rwsem.h b/fs/aufs/rwsem.h -new file mode 100644 -index 0000000..09ed5a0 ---- /dev/null -+++ b/fs/aufs/rwsem.h -@@ -0,0 +1,191 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * simple read-write semaphore wrappers -+ */ -+ -+#ifndef __AUFS_RWSEM_H__ -+#define __AUFS_RWSEM_H__ -+ -+#ifdef __KERNEL__ -+ -+#include "debug.h" -+ -+struct au_rwsem { -+ struct rw_semaphore rwsem; -+#ifdef CONFIG_AUFS_DEBUG -+ /* just for debugging, not almighty counter */ -+ atomic_t rcnt, wcnt; -+#endif -+}; -+ -+#ifdef CONFIG_AUFS_DEBUG -+#define AuDbgCntInit(rw) do { \ -+ atomic_set(&(rw)->rcnt, 0); \ -+ atomic_set(&(rw)->wcnt, 0); \ -+ smp_mb(); /* atomic set */ \ -+} while (0) -+ -+#define AuDbgRcntInc(rw) atomic_inc(&(rw)->rcnt) -+#define AuDbgRcntDec(rw) WARN_ON(atomic_dec_return(&(rw)->rcnt) < 0) -+#define AuDbgWcntInc(rw) atomic_inc(&(rw)->wcnt) -+#define AuDbgWcntDec(rw) WARN_ON(atomic_dec_return(&(rw)->wcnt) < 0) -+#else -+#define AuDbgCntInit(rw) do {} while (0) -+#define AuDbgRcntInc(rw) do {} while (0) -+#define AuDbgRcntDec(rw) do {} while (0) -+#define AuDbgWcntInc(rw) do {} while (0) -+#define AuDbgWcntDec(rw) do {} while (0) -+#endif /* CONFIG_AUFS_DEBUG */ -+ -+/* to debug easier, do not make them inlined functions */ -+#define AuRwMustNoWaiters(rw) AuDebugOn(!list_empty(&(rw)->rwsem.wait_list)) -+/* rwsem_is_locked() is unusable */ -+#define AuRwMustReadLock(rw) AuDebugOn(atomic_read(&(rw)->rcnt) <= 0) -+#define AuRwMustWriteLock(rw) AuDebugOn(atomic_read(&(rw)->wcnt) <= 0) -+#define AuRwMustAnyLock(rw) AuDebugOn(atomic_read(&(rw)->rcnt) <= 0 \ -+ && atomic_read(&(rw)->wcnt) <= 0) -+#define AuRwDestroy(rw) AuDebugOn(atomic_read(&(rw)->rcnt) \ -+ || atomic_read(&(rw)->wcnt)) -+ -+#define au_rw_class(rw, key) lockdep_set_class(&(rw)->rwsem, key) -+ -+static inline void au_rw_init(struct au_rwsem *rw) -+{ -+ AuDbgCntInit(rw); -+ init_rwsem(&rw->rwsem); -+} -+ -+static inline void au_rw_init_wlock(struct au_rwsem *rw) -+{ -+ au_rw_init(rw); -+ down_write(&rw->rwsem); -+ AuDbgWcntInc(rw); -+} -+ -+static inline void au_rw_init_wlock_nested(struct au_rwsem *rw, -+ unsigned int lsc) -+{ -+ au_rw_init(rw); -+ down_write_nested(&rw->rwsem, lsc); -+ AuDbgWcntInc(rw); -+} -+ -+static inline void au_rw_read_lock(struct au_rwsem *rw) -+{ -+ down_read(&rw->rwsem); -+ AuDbgRcntInc(rw); -+} -+ -+static inline void au_rw_read_lock_nested(struct au_rwsem *rw, unsigned int lsc) -+{ -+ down_read_nested(&rw->rwsem, lsc); -+ AuDbgRcntInc(rw); -+} -+ -+static inline void au_rw_read_unlock(struct au_rwsem *rw) -+{ -+ AuRwMustReadLock(rw); -+ AuDbgRcntDec(rw); -+ up_read(&rw->rwsem); -+} -+ -+static inline void au_rw_dgrade_lock(struct au_rwsem *rw) -+{ -+ AuRwMustWriteLock(rw); -+ AuDbgRcntInc(rw); -+ AuDbgWcntDec(rw); -+ downgrade_write(&rw->rwsem); -+} -+ -+static inline void au_rw_write_lock(struct au_rwsem *rw) -+{ -+ down_write(&rw->rwsem); -+ AuDbgWcntInc(rw); -+} -+ -+static inline void au_rw_write_lock_nested(struct au_rwsem *rw, -+ unsigned int lsc) -+{ -+ down_write_nested(&rw->rwsem, lsc); -+ AuDbgWcntInc(rw); -+} -+ -+static inline void au_rw_write_unlock(struct au_rwsem *rw) -+{ -+ AuRwMustWriteLock(rw); -+ AuDbgWcntDec(rw); -+ up_write(&rw->rwsem); -+} -+ -+/* why is not _nested version defined */ -+static inline int au_rw_read_trylock(struct au_rwsem *rw) -+{ -+ int ret; -+ -+ ret = down_read_trylock(&rw->rwsem); -+ if (ret) -+ AuDbgRcntInc(rw); -+ return ret; -+} -+ -+static inline int au_rw_write_trylock(struct au_rwsem *rw) -+{ -+ int ret; -+ -+ ret = down_write_trylock(&rw->rwsem); -+ if (ret) -+ AuDbgWcntInc(rw); -+ return ret; -+} -+ -+#undef AuDbgCntInit -+#undef AuDbgRcntInc -+#undef AuDbgRcntDec -+#undef AuDbgWcntInc -+#undef AuDbgWcntDec -+ -+#define AuSimpleLockRwsemFuncs(prefix, param, rwsem) \ -+static inline void prefix##_read_lock(param) \ -+{ au_rw_read_lock(rwsem); } \ -+static inline void prefix##_write_lock(param) \ -+{ au_rw_write_lock(rwsem); } \ -+static inline int prefix##_read_trylock(param) \ -+{ return au_rw_read_trylock(rwsem); } \ -+static inline int prefix##_write_trylock(param) \ -+{ return au_rw_write_trylock(rwsem); } -+/* why is not _nested version defined */ -+/* static inline void prefix##_read_trylock_nested(param, lsc) -+{ au_rw_read_trylock_nested(rwsem, lsc)); } -+static inline void prefix##_write_trylock_nestd(param, lsc) -+{ au_rw_write_trylock_nested(rwsem, lsc); } */ -+ -+#define AuSimpleUnlockRwsemFuncs(prefix, param, rwsem) \ -+static inline void prefix##_read_unlock(param) \ -+{ au_rw_read_unlock(rwsem); } \ -+static inline void prefix##_write_unlock(param) \ -+{ au_rw_write_unlock(rwsem); } \ -+static inline void prefix##_downgrade_lock(param) \ -+{ au_rw_dgrade_lock(rwsem); } -+ -+#define AuSimpleRwsemFuncs(prefix, param, rwsem) \ -+ AuSimpleLockRwsemFuncs(prefix, param, rwsem) \ -+ AuSimpleUnlockRwsemFuncs(prefix, param, rwsem) -+ -+#endif /* __KERNEL__ */ -+#endif /* __AUFS_RWSEM_H__ */ -diff --git a/fs/aufs/sbinfo.c b/fs/aufs/sbinfo.c -new file mode 100644 -index 0000000..ff13c9f ---- /dev/null -+++ b/fs/aufs/sbinfo.c -@@ -0,0 +1,348 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * superblock private data -+ */ -+ -+#include "aufs.h" -+ -+/* -+ * they are necessary regardless sysfs is disabled. -+ */ -+void au_si_free(struct kobject *kobj) -+{ -+ int i; -+ struct au_sbinfo *sbinfo; -+ char *locked __maybe_unused; /* debug only */ -+ -+ sbinfo = container_of(kobj, struct au_sbinfo, si_kobj); -+ for (i = 0; i < AuPlink_NHASH; i++) -+ AuDebugOn(!hlist_empty(&sbinfo->si_plink[i].head)); -+ AuDebugOn(atomic_read(&sbinfo->si_nowait.nw_len)); -+ -+ au_rw_write_lock(&sbinfo->si_rwsem); -+ au_br_free(sbinfo); -+ au_rw_write_unlock(&sbinfo->si_rwsem); -+ -+ kfree(sbinfo->si_branch); -+ for (i = 0; i < AU_NPIDMAP; i++) -+ kfree(sbinfo->au_si_pid.pid_bitmap[i]); -+ mutex_destroy(&sbinfo->au_si_pid.pid_mtx); -+ mutex_destroy(&sbinfo->si_xib_mtx); -+ AuRwDestroy(&sbinfo->si_rwsem); -+ -+ kfree(sbinfo); -+} -+ -+int au_si_alloc(struct super_block *sb) -+{ -+ int err, i; -+ struct au_sbinfo *sbinfo; -+ static struct lock_class_key aufs_si; -+ -+ err = -ENOMEM; -+ sbinfo = kzalloc(sizeof(*sbinfo), GFP_NOFS); -+ if (unlikely(!sbinfo)) -+ goto out; -+ -+ /* will be reallocated separately */ -+ sbinfo->si_branch = kzalloc(sizeof(*sbinfo->si_branch), GFP_NOFS); -+ if (unlikely(!sbinfo->si_branch)) -+ goto out_sbinfo; -+ -+ err = sysaufs_si_init(sbinfo); -+ if (unlikely(err)) -+ goto out_br; -+ -+ au_nwt_init(&sbinfo->si_nowait); -+ au_rw_init_wlock(&sbinfo->si_rwsem); -+ au_rw_class(&sbinfo->si_rwsem, &aufs_si); -+ mutex_init(&sbinfo->au_si_pid.pid_mtx); -+ -+ atomic_long_set(&sbinfo->si_ninodes, 0); -+ atomic_long_set(&sbinfo->si_nfiles, 0); -+ -+ sbinfo->si_bend = -1; -+ sbinfo->si_last_br_id = AUFS_BRANCH_MAX / 2; -+ -+ sbinfo->si_wbr_copyup = AuWbrCopyup_Def; -+ sbinfo->si_wbr_create = AuWbrCreate_Def; -+ sbinfo->si_wbr_copyup_ops = au_wbr_copyup_ops + sbinfo->si_wbr_copyup; -+ sbinfo->si_wbr_create_ops = au_wbr_create_ops + sbinfo->si_wbr_create; -+ -+ au_fhsm_init(sbinfo); -+ -+ sbinfo->si_mntflags = au_opts_plink(AuOpt_Def); -+ -+ sbinfo->si_xino_jiffy = jiffies; -+ sbinfo->si_xino_expire -+ = msecs_to_jiffies(AUFS_XINO_DEF_SEC * MSEC_PER_SEC); -+ mutex_init(&sbinfo->si_xib_mtx); -+ sbinfo->si_xino_brid = -1; -+ /* leave si_xib_last_pindex and si_xib_next_bit */ -+ -+ au_sphl_init(&sbinfo->si_aopen); -+ -+ sbinfo->si_rdcache = msecs_to_jiffies(AUFS_RDCACHE_DEF * MSEC_PER_SEC); -+ sbinfo->si_rdblk = AUFS_RDBLK_DEF; -+ sbinfo->si_rdhash = AUFS_RDHASH_DEF; -+ sbinfo->si_dirwh = AUFS_DIRWH_DEF; -+ -+ for (i = 0; i < AuPlink_NHASH; i++) -+ au_sphl_init(sbinfo->si_plink + i); -+ init_waitqueue_head(&sbinfo->si_plink_wq); -+ spin_lock_init(&sbinfo->si_plink_maint_lock); -+ -+ au_sphl_init(&sbinfo->si_files); -+ -+ /* with getattr by default */ -+ sbinfo->si_iop_array = aufs_iop; -+ -+ /* leave other members for sysaufs and si_mnt. */ -+ sbinfo->si_sb = sb; -+ sb->s_fs_info = sbinfo; -+ si_pid_set(sb); -+ return 0; /* success */ -+ -+out_br: -+ kfree(sbinfo->si_branch); -+out_sbinfo: -+ kfree(sbinfo); -+out: -+ return err; -+} -+ -+int au_sbr_realloc(struct au_sbinfo *sbinfo, int nbr) -+{ -+ int err, sz; -+ struct au_branch **brp; -+ -+ AuRwMustWriteLock(&sbinfo->si_rwsem); -+ -+ err = -ENOMEM; -+ sz = sizeof(*brp) * (sbinfo->si_bend + 1); -+ if (unlikely(!sz)) -+ sz = sizeof(*brp); -+ brp = au_kzrealloc(sbinfo->si_branch, sz, sizeof(*brp) * nbr, GFP_NOFS); -+ if (brp) { -+ sbinfo->si_branch = brp; -+ err = 0; -+ } -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+unsigned int au_sigen_inc(struct super_block *sb) -+{ -+ unsigned int gen; -+ -+ SiMustWriteLock(sb); -+ -+ gen = ++au_sbi(sb)->si_generation; -+ au_update_digen(sb->s_root); -+ au_update_iigen(sb->s_root->d_inode, /*half*/0); -+ sb->s_root->d_inode->i_version++; -+ return gen; -+} -+ -+aufs_bindex_t au_new_br_id(struct super_block *sb) -+{ -+ aufs_bindex_t br_id; -+ int i; -+ struct au_sbinfo *sbinfo; -+ -+ SiMustWriteLock(sb); -+ -+ sbinfo = au_sbi(sb); -+ for (i = 0; i <= AUFS_BRANCH_MAX; i++) { -+ br_id = ++sbinfo->si_last_br_id; -+ AuDebugOn(br_id < 0); -+ if (br_id && au_br_index(sb, br_id) < 0) -+ return br_id; -+ } -+ -+ return -1; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* it is ok that new 'nwt' tasks are appended while we are sleeping */ -+int si_read_lock(struct super_block *sb, int flags) -+{ -+ int err; -+ -+ err = 0; -+ if (au_ftest_lock(flags, FLUSH)) -+ au_nwt_flush(&au_sbi(sb)->si_nowait); -+ -+ si_noflush_read_lock(sb); -+ err = au_plink_maint(sb, flags); -+ if (unlikely(err)) -+ si_read_unlock(sb); -+ -+ return err; -+} -+ -+int si_write_lock(struct super_block *sb, int flags) -+{ -+ int err; -+ -+ if (au_ftest_lock(flags, FLUSH)) -+ au_nwt_flush(&au_sbi(sb)->si_nowait); -+ -+ si_noflush_write_lock(sb); -+ err = au_plink_maint(sb, flags); -+ if (unlikely(err)) -+ si_write_unlock(sb); -+ -+ return err; -+} -+ -+/* dentry and super_block lock. call at entry point */ -+int aufs_read_lock(struct dentry *dentry, int flags) -+{ -+ int err; -+ struct super_block *sb; -+ -+ sb = dentry->d_sb; -+ err = si_read_lock(sb, flags); -+ if (unlikely(err)) -+ goto out; -+ -+ if (au_ftest_lock(flags, DW)) -+ di_write_lock_child(dentry); -+ else -+ di_read_lock_child(dentry, flags); -+ -+ if (au_ftest_lock(flags, GEN)) { -+ err = au_digen_test(dentry, au_sigen(sb)); -+ if (!au_opt_test(au_mntflags(sb), UDBA_NONE)) -+ AuDebugOn(!err && au_dbrange_test(dentry)); -+ else if (!err) -+ err = au_dbrange_test(dentry); -+ if (unlikely(err)) -+ aufs_read_unlock(dentry, flags); -+ } -+ -+out: -+ return err; -+} -+ -+void aufs_read_unlock(struct dentry *dentry, int flags) -+{ -+ if (au_ftest_lock(flags, DW)) -+ di_write_unlock(dentry); -+ else -+ di_read_unlock(dentry, flags); -+ si_read_unlock(dentry->d_sb); -+} -+ -+void aufs_write_lock(struct dentry *dentry) -+{ -+ si_write_lock(dentry->d_sb, AuLock_FLUSH | AuLock_NOPLMW); -+ di_write_lock_child(dentry); -+} -+ -+void aufs_write_unlock(struct dentry *dentry) -+{ -+ di_write_unlock(dentry); -+ si_write_unlock(dentry->d_sb); -+} -+ -+int aufs_read_and_write_lock2(struct dentry *d1, struct dentry *d2, int flags) -+{ -+ int err; -+ unsigned int sigen; -+ struct super_block *sb; -+ -+ sb = d1->d_sb; -+ err = si_read_lock(sb, flags); -+ if (unlikely(err)) -+ goto out; -+ -+ di_write_lock2_child(d1, d2, au_ftest_lock(flags, DIRS)); -+ -+ if (au_ftest_lock(flags, GEN)) { -+ sigen = au_sigen(sb); -+ err = au_digen_test(d1, sigen); -+ AuDebugOn(!err && au_dbrange_test(d1)); -+ if (!err) { -+ err = au_digen_test(d2, sigen); -+ AuDebugOn(!err && au_dbrange_test(d2)); -+ } -+ if (unlikely(err)) -+ aufs_read_and_write_unlock2(d1, d2); -+ } -+ -+out: -+ return err; -+} -+ -+void aufs_read_and_write_unlock2(struct dentry *d1, struct dentry *d2) -+{ -+ di_write_unlock2(d1, d2); -+ si_read_unlock(d1->d_sb); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static void si_pid_alloc(struct au_si_pid *au_si_pid, int idx) -+{ -+ unsigned long *p; -+ -+ BUILD_BUG_ON(sizeof(unsigned long) != -+ sizeof(*au_si_pid->pid_bitmap)); -+ -+ mutex_lock(&au_si_pid->pid_mtx); -+ p = au_si_pid->pid_bitmap[idx]; -+ while (!p) { -+ /* -+ * bad approach. -+ * but keeping 'si_pid_set()' void is more important. -+ */ -+ p = kcalloc(BITS_TO_LONGS(AU_PIDSTEP), -+ sizeof(*au_si_pid->pid_bitmap), -+ GFP_NOFS); -+ if (p) -+ break; -+ cond_resched(); -+ } -+ au_si_pid->pid_bitmap[idx] = p; -+ mutex_unlock(&au_si_pid->pid_mtx); -+} -+ -+void si_pid_set(struct super_block *sb) -+{ -+ pid_t bit; -+ int idx; -+ unsigned long *bitmap; -+ struct au_si_pid *au_si_pid; -+ -+ si_pid_idx_bit(&idx, &bit); -+ au_si_pid = &au_sbi(sb)->au_si_pid; -+ bitmap = au_si_pid->pid_bitmap[idx]; -+ if (!bitmap) { -+ si_pid_alloc(au_si_pid, idx); -+ bitmap = au_si_pid->pid_bitmap[idx]; -+ } -+ AuDebugOn(test_bit(bit, bitmap)); -+ set_bit(bit, bitmap); -+ /* smp_mb(); */ -+} -diff --git a/fs/aufs/spl.h b/fs/aufs/spl.h -new file mode 100644 -index 0000000..945343a ---- /dev/null -+++ b/fs/aufs/spl.h -@@ -0,0 +1,111 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * simple list protected by a spinlock -+ */ -+ -+#ifndef __AUFS_SPL_H__ -+#define __AUFS_SPL_H__ -+ -+#ifdef __KERNEL__ -+ -+struct au_splhead { -+ spinlock_t spin; -+ struct list_head head; -+}; -+ -+static inline void au_spl_init(struct au_splhead *spl) -+{ -+ spin_lock_init(&spl->spin); -+ INIT_LIST_HEAD(&spl->head); -+} -+ -+static inline void au_spl_add(struct list_head *list, struct au_splhead *spl) -+{ -+ spin_lock(&spl->spin); -+ list_add(list, &spl->head); -+ spin_unlock(&spl->spin); -+} -+ -+static inline void au_spl_del(struct list_head *list, struct au_splhead *spl) -+{ -+ spin_lock(&spl->spin); -+ list_del(list); -+ spin_unlock(&spl->spin); -+} -+ -+static inline void au_spl_del_rcu(struct list_head *list, -+ struct au_splhead *spl) -+{ -+ spin_lock(&spl->spin); -+ list_del_rcu(list); -+ spin_unlock(&spl->spin); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+struct au_sphlhead { -+ spinlock_t spin; -+ struct hlist_head head; -+}; -+ -+static inline void au_sphl_init(struct au_sphlhead *sphl) -+{ -+ spin_lock_init(&sphl->spin); -+ INIT_HLIST_HEAD(&sphl->head); -+} -+ -+static inline void au_sphl_add(struct hlist_node *hlist, -+ struct au_sphlhead *sphl) -+{ -+ spin_lock(&sphl->spin); -+ hlist_add_head(hlist, &sphl->head); -+ spin_unlock(&sphl->spin); -+} -+ -+static inline void au_sphl_del(struct hlist_node *hlist, -+ struct au_sphlhead *sphl) -+{ -+ spin_lock(&sphl->spin); -+ hlist_del(hlist); -+ spin_unlock(&sphl->spin); -+} -+ -+static inline void au_sphl_del_rcu(struct hlist_node *hlist, -+ struct au_sphlhead *sphl) -+{ -+ spin_lock(&sphl->spin); -+ hlist_del_rcu(hlist); -+ spin_unlock(&sphl->spin); -+} -+ -+static inline unsigned long au_sphl_count(struct au_sphlhead *sphl) -+{ -+ unsigned long cnt; -+ struct hlist_node *pos; -+ -+ cnt = 0; -+ spin_lock(&sphl->spin); -+ hlist_for_each(pos, &sphl->head) -+ cnt++; -+ spin_unlock(&sphl->spin); -+ return cnt; -+} -+ -+#endif /* __KERNEL__ */ -+#endif /* __AUFS_SPL_H__ */ -diff --git a/fs/aufs/super.c b/fs/aufs/super.c -new file mode 100644 -index 0000000..64a6bb4 ---- /dev/null -+++ b/fs/aufs/super.c -@@ -0,0 +1,1041 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * mount and super_block operations -+ */ -+ -+#include -+#include -+#include -+#include -+#include "aufs.h" -+ -+/* -+ * super_operations -+ */ -+static struct inode *aufs_alloc_inode(struct super_block *sb __maybe_unused) -+{ -+ struct au_icntnr *c; -+ -+ c = au_cache_alloc_icntnr(); -+ if (c) { -+ au_icntnr_init(c); -+ c->vfs_inode.i_version = 1; /* sigen(sb); */ -+ c->iinfo.ii_hinode = NULL; -+ return &c->vfs_inode; -+ } -+ return NULL; -+} -+ -+static void aufs_destroy_inode_cb(struct rcu_head *head) -+{ -+ struct inode *inode = container_of(head, struct inode, i_rcu); -+ -+ INIT_HLIST_HEAD(&inode->i_dentry); -+ au_cache_free_icntnr(container_of(inode, struct au_icntnr, vfs_inode)); -+} -+ -+static void aufs_destroy_inode(struct inode *inode) -+{ -+ au_iinfo_fin(inode); -+ call_rcu(&inode->i_rcu, aufs_destroy_inode_cb); -+} -+ -+struct inode *au_iget_locked(struct super_block *sb, ino_t ino) -+{ -+ struct inode *inode; -+ int err; -+ -+ inode = iget_locked(sb, ino); -+ if (unlikely(!inode)) { -+ inode = ERR_PTR(-ENOMEM); -+ goto out; -+ } -+ if (!(inode->i_state & I_NEW)) -+ goto out; -+ -+ err = au_xigen_new(inode); -+ if (!err) -+ err = au_iinfo_init(inode); -+ if (!err) -+ inode->i_version++; -+ else { -+ iget_failed(inode); -+ inode = ERR_PTR(err); -+ } -+ -+out: -+ /* never return NULL */ -+ AuDebugOn(!inode); -+ AuTraceErrPtr(inode); -+ return inode; -+} -+ -+/* lock free root dinfo */ -+static int au_show_brs(struct seq_file *seq, struct super_block *sb) -+{ -+ int err; -+ aufs_bindex_t bindex, bend; -+ struct path path; -+ struct au_hdentry *hdp; -+ struct au_branch *br; -+ au_br_perm_str_t perm; -+ -+ err = 0; -+ bend = au_sbend(sb); -+ hdp = au_di(sb->s_root)->di_hdentry; -+ for (bindex = 0; !err && bindex <= bend; bindex++) { -+ br = au_sbr(sb, bindex); -+ path.mnt = au_br_mnt(br); -+ path.dentry = hdp[bindex].hd_dentry; -+ err = au_seq_path(seq, &path); -+ if (!err) { -+ au_optstr_br_perm(&perm, br->br_perm); -+ err = seq_printf(seq, "=%s", perm.a); -+ if (err == -1) -+ err = -E2BIG; -+ } -+ if (!err && bindex != bend) -+ err = seq_putc(seq, ':'); -+ } -+ -+ return err; -+} -+ -+static void au_show_wbr_create(struct seq_file *m, int v, -+ struct au_sbinfo *sbinfo) -+{ -+ const char *pat; -+ -+ AuRwMustAnyLock(&sbinfo->si_rwsem); -+ -+ seq_puts(m, ",create="); -+ pat = au_optstr_wbr_create(v); -+ switch (v) { -+ case AuWbrCreate_TDP: -+ case AuWbrCreate_RR: -+ case AuWbrCreate_MFS: -+ case AuWbrCreate_PMFS: -+ seq_puts(m, pat); -+ break; -+ case AuWbrCreate_MFSV: -+ seq_printf(m, /*pat*/"mfs:%lu", -+ jiffies_to_msecs(sbinfo->si_wbr_mfs.mfs_expire) -+ / MSEC_PER_SEC); -+ break; -+ case AuWbrCreate_PMFSV: -+ seq_printf(m, /*pat*/"pmfs:%lu", -+ jiffies_to_msecs(sbinfo->si_wbr_mfs.mfs_expire) -+ / MSEC_PER_SEC); -+ break; -+ case AuWbrCreate_MFSRR: -+ seq_printf(m, /*pat*/"mfsrr:%llu", -+ sbinfo->si_wbr_mfs.mfsrr_watermark); -+ break; -+ case AuWbrCreate_MFSRRV: -+ seq_printf(m, /*pat*/"mfsrr:%llu:%lu", -+ sbinfo->si_wbr_mfs.mfsrr_watermark, -+ jiffies_to_msecs(sbinfo->si_wbr_mfs.mfs_expire) -+ / MSEC_PER_SEC); -+ break; -+ case AuWbrCreate_PMFSRR: -+ seq_printf(m, /*pat*/"pmfsrr:%llu", -+ sbinfo->si_wbr_mfs.mfsrr_watermark); -+ break; -+ case AuWbrCreate_PMFSRRV: -+ seq_printf(m, /*pat*/"pmfsrr:%llu:%lu", -+ sbinfo->si_wbr_mfs.mfsrr_watermark, -+ jiffies_to_msecs(sbinfo->si_wbr_mfs.mfs_expire) -+ / MSEC_PER_SEC); -+ break; -+ } -+} -+ -+static int au_show_xino(struct seq_file *seq, struct super_block *sb) -+{ -+#ifdef CONFIG_SYSFS -+ return 0; -+#else -+ int err; -+ const int len = sizeof(AUFS_XINO_FNAME) - 1; -+ aufs_bindex_t bindex, brid; -+ struct qstr *name; -+ struct file *f; -+ struct dentry *d, *h_root; -+ struct au_hdentry *hdp; -+ -+ AuRwMustAnyLock(&sbinfo->si_rwsem); -+ -+ err = 0; -+ f = au_sbi(sb)->si_xib; -+ if (!f) -+ goto out; -+ -+ /* stop printing the default xino path on the first writable branch */ -+ h_root = NULL; -+ brid = au_xino_brid(sb); -+ if (brid >= 0) { -+ bindex = au_br_index(sb, brid); -+ hdp = au_di(sb->s_root)->di_hdentry; -+ h_root = hdp[0 + bindex].hd_dentry; -+ } -+ d = f->f_dentry; -+ name = &d->d_name; -+ /* safe ->d_parent because the file is unlinked */ -+ if (d->d_parent == h_root -+ && name->len == len -+ && !memcmp(name->name, AUFS_XINO_FNAME, len)) -+ goto out; -+ -+ seq_puts(seq, ",xino="); -+ err = au_xino_path(seq, f); -+ -+out: -+ return err; -+#endif -+} -+ -+/* seq_file will re-call me in case of too long string */ -+static int aufs_show_options(struct seq_file *m, struct dentry *dentry) -+{ -+ int err; -+ unsigned int mnt_flags, v; -+ struct super_block *sb; -+ struct au_sbinfo *sbinfo; -+ -+#define AuBool(name, str) do { \ -+ v = au_opt_test(mnt_flags, name); \ -+ if (v != au_opt_test(AuOpt_Def, name)) \ -+ seq_printf(m, ",%s" #str, v ? "" : "no"); \ -+} while (0) -+ -+#define AuStr(name, str) do { \ -+ v = mnt_flags & AuOptMask_##name; \ -+ if (v != (AuOpt_Def & AuOptMask_##name)) \ -+ seq_printf(m, "," #str "=%s", au_optstr_##str(v)); \ -+} while (0) -+ -+#define AuUInt(name, str, val) do { \ -+ if (val != AUFS_##name##_DEF) \ -+ seq_printf(m, "," #str "=%u", val); \ -+} while (0) -+ -+ sb = dentry->d_sb; -+ if (sb->s_flags & MS_POSIXACL) -+ seq_puts(m, ",acl"); -+ -+ /* lock free root dinfo */ -+ si_noflush_read_lock(sb); -+ sbinfo = au_sbi(sb); -+ seq_printf(m, ",si=%lx", sysaufs_si_id(sbinfo)); -+ -+ mnt_flags = au_mntflags(sb); -+ if (au_opt_test(mnt_flags, XINO)) { -+ err = au_show_xino(m, sb); -+ if (unlikely(err)) -+ goto out; -+ } else -+ seq_puts(m, ",noxino"); -+ -+ AuBool(TRUNC_XINO, trunc_xino); -+ AuStr(UDBA, udba); -+ AuBool(SHWH, shwh); -+ AuBool(PLINK, plink); -+ AuBool(DIO, dio); -+ AuBool(DIRPERM1, dirperm1); -+ /* AuBool(REFROF, refrof); */ -+ -+ v = sbinfo->si_wbr_create; -+ if (v != AuWbrCreate_Def) -+ au_show_wbr_create(m, v, sbinfo); -+ -+ v = sbinfo->si_wbr_copyup; -+ if (v != AuWbrCopyup_Def) -+ seq_printf(m, ",cpup=%s", au_optstr_wbr_copyup(v)); -+ -+ v = au_opt_test(mnt_flags, ALWAYS_DIROPQ); -+ if (v != au_opt_test(AuOpt_Def, ALWAYS_DIROPQ)) -+ seq_printf(m, ",diropq=%c", v ? 'a' : 'w'); -+ -+ AuUInt(DIRWH, dirwh, sbinfo->si_dirwh); -+ -+ v = jiffies_to_msecs(sbinfo->si_rdcache) / MSEC_PER_SEC; -+ AuUInt(RDCACHE, rdcache, v); -+ -+ AuUInt(RDBLK, rdblk, sbinfo->si_rdblk); -+ AuUInt(RDHASH, rdhash, sbinfo->si_rdhash); -+ -+ au_fhsm_show(m, sbinfo); -+ -+ AuBool(SUM, sum); -+ /* AuBool(SUM_W, wsum); */ -+ AuBool(WARN_PERM, warn_perm); -+ AuBool(VERBOSE, verbose); -+ -+out: -+ /* be sure to print "br:" last */ -+ if (!sysaufs_brs) { -+ seq_puts(m, ",br:"); -+ au_show_brs(m, sb); -+ } -+ si_read_unlock(sb); -+ return 0; -+ -+#undef AuBool -+#undef AuStr -+#undef AuUInt -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* sum mode which returns the summation for statfs(2) */ -+ -+static u64 au_add_till_max(u64 a, u64 b) -+{ -+ u64 old; -+ -+ old = a; -+ a += b; -+ if (old <= a) -+ return a; -+ return ULLONG_MAX; -+} -+ -+static u64 au_mul_till_max(u64 a, long mul) -+{ -+ u64 old; -+ -+ old = a; -+ a *= mul; -+ if (old <= a) -+ return a; -+ return ULLONG_MAX; -+} -+ -+static int au_statfs_sum(struct super_block *sb, struct kstatfs *buf) -+{ -+ int err; -+ long bsize, factor; -+ u64 blocks, bfree, bavail, files, ffree; -+ aufs_bindex_t bend, bindex, i; -+ unsigned char shared; -+ struct path h_path; -+ struct super_block *h_sb; -+ -+ err = 0; -+ bsize = LONG_MAX; -+ files = 0; -+ ffree = 0; -+ blocks = 0; -+ bfree = 0; -+ bavail = 0; -+ bend = au_sbend(sb); -+ for (bindex = 0; bindex <= bend; bindex++) { -+ h_path.mnt = au_sbr_mnt(sb, bindex); -+ h_sb = h_path.mnt->mnt_sb; -+ shared = 0; -+ for (i = 0; !shared && i < bindex; i++) -+ shared = (au_sbr_sb(sb, i) == h_sb); -+ if (shared) -+ continue; -+ -+ /* sb->s_root for NFS is unreliable */ -+ h_path.dentry = h_path.mnt->mnt_root; -+ err = vfs_statfs(&h_path, buf); -+ if (unlikely(err)) -+ goto out; -+ -+ if (bsize > buf->f_bsize) { -+ /* -+ * we will reduce bsize, so we have to expand blocks -+ * etc. to match them again -+ */ -+ factor = (bsize / buf->f_bsize); -+ blocks = au_mul_till_max(blocks, factor); -+ bfree = au_mul_till_max(bfree, factor); -+ bavail = au_mul_till_max(bavail, factor); -+ bsize = buf->f_bsize; -+ } -+ -+ factor = (buf->f_bsize / bsize); -+ blocks = au_add_till_max(blocks, -+ au_mul_till_max(buf->f_blocks, factor)); -+ bfree = au_add_till_max(bfree, -+ au_mul_till_max(buf->f_bfree, factor)); -+ bavail = au_add_till_max(bavail, -+ au_mul_till_max(buf->f_bavail, factor)); -+ files = au_add_till_max(files, buf->f_files); -+ ffree = au_add_till_max(ffree, buf->f_ffree); -+ } -+ -+ buf->f_bsize = bsize; -+ buf->f_blocks = blocks; -+ buf->f_bfree = bfree; -+ buf->f_bavail = bavail; -+ buf->f_files = files; -+ buf->f_ffree = ffree; -+ buf->f_frsize = 0; -+ -+out: -+ return err; -+} -+ -+static int aufs_statfs(struct dentry *dentry, struct kstatfs *buf) -+{ -+ int err; -+ struct path h_path; -+ struct super_block *sb; -+ -+ /* lock free root dinfo */ -+ sb = dentry->d_sb; -+ si_noflush_read_lock(sb); -+ if (!au_opt_test(au_mntflags(sb), SUM)) { -+ /* sb->s_root for NFS is unreliable */ -+ h_path.mnt = au_sbr_mnt(sb, 0); -+ h_path.dentry = h_path.mnt->mnt_root; -+ err = vfs_statfs(&h_path, buf); -+ } else -+ err = au_statfs_sum(sb, buf); -+ si_read_unlock(sb); -+ -+ if (!err) { -+ buf->f_type = AUFS_SUPER_MAGIC; -+ buf->f_namelen = AUFS_MAX_NAMELEN; -+ memset(&buf->f_fsid, 0, sizeof(buf->f_fsid)); -+ } -+ /* buf->f_bsize = buf->f_blocks = buf->f_bfree = buf->f_bavail = -1; */ -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int aufs_sync_fs(struct super_block *sb, int wait) -+{ -+ int err, e; -+ aufs_bindex_t bend, bindex; -+ struct au_branch *br; -+ struct super_block *h_sb; -+ -+ err = 0; -+ si_noflush_read_lock(sb); -+ bend = au_sbend(sb); -+ for (bindex = 0; bindex <= bend; bindex++) { -+ br = au_sbr(sb, bindex); -+ if (!au_br_writable(br->br_perm)) -+ continue; -+ -+ h_sb = au_sbr_sb(sb, bindex); -+ if (h_sb->s_op->sync_fs) { -+ e = h_sb->s_op->sync_fs(h_sb, wait); -+ if (unlikely(e && !err)) -+ err = e; -+ /* go on even if an error happens */ -+ } -+ } -+ si_read_unlock(sb); -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* final actions when unmounting a file system */ -+static void aufs_put_super(struct super_block *sb) -+{ -+ struct au_sbinfo *sbinfo; -+ -+ sbinfo = au_sbi(sb); -+ if (!sbinfo) -+ return; -+ -+ dbgaufs_si_fin(sbinfo); -+ kobject_put(&sbinfo->si_kobj); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+void *au_array_alloc(unsigned long long *hint, au_arraycb_t cb, void *arg) -+{ -+ void *array; -+ unsigned long long n, sz; -+ -+ array = NULL; -+ n = 0; -+ if (!*hint) -+ goto out; -+ -+ if (*hint > ULLONG_MAX / sizeof(array)) { -+ array = ERR_PTR(-EMFILE); -+ pr_err("hint %llu\n", *hint); -+ goto out; -+ } -+ -+ sz = sizeof(array) * *hint; -+ array = kzalloc(sz, GFP_NOFS); -+ if (unlikely(!array)) -+ array = vzalloc(sz); -+ if (unlikely(!array)) { -+ array = ERR_PTR(-ENOMEM); -+ goto out; -+ } -+ -+ n = cb(array, *hint, arg); -+ AuDebugOn(n > *hint); -+ -+out: -+ *hint = n; -+ return array; -+} -+ -+static unsigned long long au_iarray_cb(void *a, -+ unsigned long long max __maybe_unused, -+ void *arg) -+{ -+ unsigned long long n; -+ struct inode **p, *inode; -+ struct list_head *head; -+ -+ n = 0; -+ p = a; -+ head = arg; -+ spin_lock(&inode_sb_list_lock); -+ list_for_each_entry(inode, head, i_sb_list) { -+ if (!is_bad_inode(inode) -+ && au_ii(inode)->ii_bstart >= 0) { -+ spin_lock(&inode->i_lock); -+ if (atomic_read(&inode->i_count)) { -+ au_igrab(inode); -+ *p++ = inode; -+ n++; -+ AuDebugOn(n > max); -+ } -+ spin_unlock(&inode->i_lock); -+ } -+ } -+ spin_unlock(&inode_sb_list_lock); -+ -+ return n; -+} -+ -+struct inode **au_iarray_alloc(struct super_block *sb, unsigned long long *max) -+{ -+ *max = atomic_long_read(&au_sbi(sb)->si_ninodes); -+ return au_array_alloc(max, au_iarray_cb, &sb->s_inodes); -+} -+ -+void au_iarray_free(struct inode **a, unsigned long long max) -+{ -+ unsigned long long ull; -+ -+ for (ull = 0; ull < max; ull++) -+ iput(a[ull]); -+ kvfree(a); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * refresh dentry and inode at remount time. -+ */ -+/* todo: consolidate with simple_reval_dpath() and au_reval_for_attr() */ -+static int au_do_refresh(struct dentry *dentry, unsigned int dir_flags, -+ struct dentry *parent) -+{ -+ int err; -+ -+ di_write_lock_child(dentry); -+ di_read_lock_parent(parent, AuLock_IR); -+ err = au_refresh_dentry(dentry, parent); -+ if (!err && dir_flags) -+ au_hn_reset(dentry->d_inode, dir_flags); -+ di_read_unlock(parent, AuLock_IR); -+ di_write_unlock(dentry); -+ -+ return err; -+} -+ -+static int au_do_refresh_d(struct dentry *dentry, unsigned int sigen, -+ struct au_sbinfo *sbinfo, -+ const unsigned int dir_flags, unsigned int do_idop) -+{ -+ int err; -+ struct dentry *parent; -+ struct inode *inode; -+ -+ err = 0; -+ parent = dget_parent(dentry); -+ if (!au_digen_test(parent, sigen) && au_digen_test(dentry, sigen)) { -+ inode = dentry->d_inode; -+ if (inode) { -+ if (!S_ISDIR(inode->i_mode)) -+ err = au_do_refresh(dentry, /*dir_flags*/0, -+ parent); -+ else { -+ err = au_do_refresh(dentry, dir_flags, parent); -+ if (unlikely(err)) -+ au_fset_si(sbinfo, FAILED_REFRESH_DIR); -+ } -+ } else -+ err = au_do_refresh(dentry, /*dir_flags*/0, parent); -+ AuDbgDentry(dentry); -+ } -+ dput(parent); -+ -+ if (!err) { -+ if (do_idop) -+ au_refresh_dop(dentry, /*force_reval*/0); -+ } else -+ au_refresh_dop(dentry, /*force_reval*/1); -+ -+ AuTraceErr(err); -+ return err; -+} -+ -+static int au_refresh_d(struct super_block *sb, unsigned int do_idop) -+{ -+ int err, i, j, ndentry, e; -+ unsigned int sigen; -+ struct au_dcsub_pages dpages; -+ struct au_dpage *dpage; -+ struct dentry **dentries, *d; -+ struct au_sbinfo *sbinfo; -+ struct dentry *root = sb->s_root; -+ const unsigned int dir_flags = au_hi_flags(root->d_inode, /*isdir*/1); -+ -+ if (do_idop) -+ au_refresh_dop(root, /*force_reval*/0); -+ -+ err = au_dpages_init(&dpages, GFP_NOFS); -+ if (unlikely(err)) -+ goto out; -+ err = au_dcsub_pages(&dpages, root, NULL, NULL); -+ if (unlikely(err)) -+ goto out_dpages; -+ -+ sigen = au_sigen(sb); -+ sbinfo = au_sbi(sb); -+ for (i = 0; i < dpages.ndpage; i++) { -+ dpage = dpages.dpages + i; -+ dentries = dpage->dentries; -+ ndentry = dpage->ndentry; -+ for (j = 0; j < ndentry; j++) { -+ d = dentries[j]; -+ e = au_do_refresh_d(d, sigen, sbinfo, dir_flags, -+ do_idop); -+ if (unlikely(e && !err)) -+ err = e; -+ /* go on even err */ -+ } -+ } -+ -+out_dpages: -+ au_dpages_free(&dpages); -+out: -+ return err; -+} -+ -+static int au_refresh_i(struct super_block *sb, unsigned int do_idop) -+{ -+ int err, e; -+ unsigned int sigen; -+ unsigned long long max, ull; -+ struct inode *inode, **array; -+ -+ array = au_iarray_alloc(sb, &max); -+ err = PTR_ERR(array); -+ if (IS_ERR(array)) -+ goto out; -+ -+ err = 0; -+ sigen = au_sigen(sb); -+ for (ull = 0; ull < max; ull++) { -+ inode = array[ull]; -+ if (unlikely(!inode)) -+ break; -+ -+ e = 0; -+ ii_write_lock_child(inode); -+ if (au_iigen(inode, NULL) != sigen) { -+ e = au_refresh_hinode_self(inode); -+ if (unlikely(e)) { -+ au_refresh_iop(inode, /*force_getattr*/1); -+ pr_err("error %d, i%lu\n", e, inode->i_ino); -+ if (!err) -+ err = e; -+ /* go on even if err */ -+ } -+ } -+ if (!e && do_idop) -+ au_refresh_iop(inode, /*force_getattr*/0); -+ ii_write_unlock(inode); -+ } -+ -+ au_iarray_free(array, max); -+ -+out: -+ return err; -+} -+ -+static void au_remount_refresh(struct super_block *sb, unsigned int do_idop) -+{ -+ int err, e; -+ unsigned int udba; -+ aufs_bindex_t bindex, bend; -+ struct dentry *root; -+ struct inode *inode; -+ struct au_branch *br; -+ struct au_sbinfo *sbi; -+ -+ au_sigen_inc(sb); -+ sbi = au_sbi(sb); -+ au_fclr_si(sbi, FAILED_REFRESH_DIR); -+ -+ root = sb->s_root; -+ DiMustNoWaiters(root); -+ inode = root->d_inode; -+ IiMustNoWaiters(inode); -+ -+ udba = au_opt_udba(sb); -+ bend = au_sbend(sb); -+ for (bindex = 0; bindex <= bend; bindex++) { -+ br = au_sbr(sb, bindex); -+ err = au_hnotify_reset_br(udba, br, br->br_perm); -+ if (unlikely(err)) -+ AuIOErr("hnotify failed on br %d, %d, ignored\n", -+ bindex, err); -+ /* go on even if err */ -+ } -+ au_hn_reset(inode, au_hi_flags(inode, /*isdir*/1)); -+ -+ if (do_idop) { -+ if (au_ftest_si(sbi, NO_DREVAL)) { -+ AuDebugOn(sb->s_d_op == &aufs_dop_noreval); -+ sb->s_d_op = &aufs_dop_noreval; -+ AuDebugOn(sbi->si_iop_array == aufs_iop_nogetattr); -+ sbi->si_iop_array = aufs_iop_nogetattr; -+ } else { -+ AuDebugOn(sb->s_d_op == &aufs_dop); -+ sb->s_d_op = &aufs_dop; -+ AuDebugOn(sbi->si_iop_array == aufs_iop); -+ sbi->si_iop_array = aufs_iop; -+ } -+ pr_info("reset to %pf and %pf\n", -+ sb->s_d_op, sbi->si_iop_array); -+ } -+ -+ di_write_unlock(root); -+ err = au_refresh_d(sb, do_idop); -+ e = au_refresh_i(sb, do_idop); -+ if (unlikely(e && !err)) -+ err = e; -+ /* aufs_write_lock() calls ..._child() */ -+ di_write_lock_child(root); -+ -+ au_cpup_attr_all(inode, /*force*/1); -+ -+ if (unlikely(err)) -+ AuIOErr("refresh failed, ignored, %d\n", err); -+} -+ -+/* stop extra interpretation of errno in mount(8), and strange error messages */ -+static int cvt_err(int err) -+{ -+ AuTraceErr(err); -+ -+ switch (err) { -+ case -ENOENT: -+ case -ENOTDIR: -+ case -EEXIST: -+ case -EIO: -+ err = -EINVAL; -+ } -+ return err; -+} -+ -+static int aufs_remount_fs(struct super_block *sb, int *flags, char *data) -+{ -+ int err, do_dx; -+ unsigned int mntflags; -+ struct au_opts opts = { -+ .opt = NULL -+ }; -+ struct dentry *root; -+ struct inode *inode; -+ struct au_sbinfo *sbinfo; -+ -+ err = 0; -+ root = sb->s_root; -+ if (!data || !*data) { -+ err = si_write_lock(sb, AuLock_FLUSH | AuLock_NOPLM); -+ if (!err) { -+ di_write_lock_child(root); -+ err = au_opts_verify(sb, *flags, /*pending*/0); -+ aufs_write_unlock(root); -+ } -+ goto out; -+ } -+ -+ err = -ENOMEM; -+ opts.opt = (void *)__get_free_page(GFP_NOFS); -+ if (unlikely(!opts.opt)) -+ goto out; -+ opts.max_opt = PAGE_SIZE / sizeof(*opts.opt); -+ opts.flags = AuOpts_REMOUNT; -+ opts.sb_flags = *flags; -+ -+ /* parse it before aufs lock */ -+ err = au_opts_parse(sb, data, &opts); -+ if (unlikely(err)) -+ goto out_opts; -+ -+ sbinfo = au_sbi(sb); -+ inode = root->d_inode; -+ mutex_lock(&inode->i_mutex); -+ err = si_write_lock(sb, AuLock_FLUSH | AuLock_NOPLM); -+ if (unlikely(err)) -+ goto out_mtx; -+ di_write_lock_child(root); -+ -+ /* au_opts_remount() may return an error */ -+ err = au_opts_remount(sb, &opts); -+ au_opts_free(&opts); -+ -+ if (au_ftest_opts(opts.flags, REFRESH)) -+ au_remount_refresh(sb, au_ftest_opts(opts.flags, REFRESH_IDOP)); -+ -+ if (au_ftest_opts(opts.flags, REFRESH_DYAOP)) { -+ mntflags = au_mntflags(sb); -+ do_dx = !!au_opt_test(mntflags, DIO); -+ au_dy_arefresh(do_dx); -+ } -+ -+ au_fhsm_wrote_all(sb, /*force*/1); /* ?? */ -+ aufs_write_unlock(root); -+ -+out_mtx: -+ mutex_unlock(&inode->i_mutex); -+out_opts: -+ free_page((unsigned long)opts.opt); -+out: -+ err = cvt_err(err); -+ AuTraceErr(err); -+ return err; -+} -+ -+static const struct super_operations aufs_sop = { -+ .alloc_inode = aufs_alloc_inode, -+ .destroy_inode = aufs_destroy_inode, -+ /* always deleting, no clearing */ -+ .drop_inode = generic_delete_inode, -+ .show_options = aufs_show_options, -+ .statfs = aufs_statfs, -+ .put_super = aufs_put_super, -+ .sync_fs = aufs_sync_fs, -+ .remount_fs = aufs_remount_fs -+}; -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int alloc_root(struct super_block *sb) -+{ -+ int err; -+ struct inode *inode; -+ struct dentry *root; -+ -+ err = -ENOMEM; -+ inode = au_iget_locked(sb, AUFS_ROOT_INO); -+ err = PTR_ERR(inode); -+ if (IS_ERR(inode)) -+ goto out; -+ -+ inode->i_op = aufs_iop + AuIop_DIR; /* with getattr by default */ -+ inode->i_fop = &aufs_dir_fop; -+ inode->i_mode = S_IFDIR; -+ set_nlink(inode, 2); -+ unlock_new_inode(inode); -+ -+ root = d_make_root(inode); -+ if (unlikely(!root)) -+ goto out; -+ err = PTR_ERR(root); -+ if (IS_ERR(root)) -+ goto out; -+ -+ err = au_di_init(root); -+ if (!err) { -+ sb->s_root = root; -+ return 0; /* success */ -+ } -+ dput(root); -+ -+out: -+ return err; -+} -+ -+static int aufs_fill_super(struct super_block *sb, void *raw_data, -+ int silent __maybe_unused) -+{ -+ int err; -+ struct au_opts opts = { -+ .opt = NULL -+ }; -+ struct au_sbinfo *sbinfo; -+ struct dentry *root; -+ struct inode *inode; -+ char *arg = raw_data; -+ -+ if (unlikely(!arg || !*arg)) { -+ err = -EINVAL; -+ pr_err("no arg\n"); -+ goto out; -+ } -+ -+ err = -ENOMEM; -+ opts.opt = (void *)__get_free_page(GFP_NOFS); -+ if (unlikely(!opts.opt)) -+ goto out; -+ opts.max_opt = PAGE_SIZE / sizeof(*opts.opt); -+ opts.sb_flags = sb->s_flags; -+ -+ err = au_si_alloc(sb); -+ if (unlikely(err)) -+ goto out_opts; -+ sbinfo = au_sbi(sb); -+ -+ /* all timestamps always follow the ones on the branch */ -+ sb->s_flags |= MS_NOATIME | MS_NODIRATIME; -+ sb->s_op = &aufs_sop; -+ sb->s_d_op = &aufs_dop; -+ sb->s_magic = AUFS_SUPER_MAGIC; -+ sb->s_maxbytes = 0; -+ sb->s_stack_depth = 1; -+ au_export_init(sb); -+ /* au_xattr_init(sb); */ -+ -+ err = alloc_root(sb); -+ if (unlikely(err)) { -+ si_write_unlock(sb); -+ goto out_info; -+ } -+ root = sb->s_root; -+ inode = root->d_inode; -+ -+ /* -+ * actually we can parse options regardless aufs lock here. -+ * but at remount time, parsing must be done before aufs lock. -+ * so we follow the same rule. -+ */ -+ ii_write_lock_parent(inode); -+ aufs_write_unlock(root); -+ err = au_opts_parse(sb, arg, &opts); -+ if (unlikely(err)) -+ goto out_root; -+ -+ /* lock vfs_inode first, then aufs. */ -+ mutex_lock(&inode->i_mutex); -+ aufs_write_lock(root); -+ err = au_opts_mount(sb, &opts); -+ au_opts_free(&opts); -+ if (!err && au_ftest_si(sbinfo, NO_DREVAL)) { -+ sb->s_d_op = &aufs_dop_noreval; -+ pr_info("%pf\n", sb->s_d_op); -+ au_refresh_dop(root, /*force_reval*/0); -+ sbinfo->si_iop_array = aufs_iop_nogetattr; -+ au_refresh_iop(inode, /*force_getattr*/0); -+ } -+ aufs_write_unlock(root); -+ mutex_unlock(&inode->i_mutex); -+ if (!err) -+ goto out_opts; /* success */ -+ -+out_root: -+ dput(root); -+ sb->s_root = NULL; -+out_info: -+ dbgaufs_si_fin(sbinfo); -+ kobject_put(&sbinfo->si_kobj); -+ sb->s_fs_info = NULL; -+out_opts: -+ free_page((unsigned long)opts.opt); -+out: -+ AuTraceErr(err); -+ err = cvt_err(err); -+ AuTraceErr(err); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static struct dentry *aufs_mount(struct file_system_type *fs_type, int flags, -+ const char *dev_name __maybe_unused, -+ void *raw_data) -+{ -+ struct dentry *root; -+ struct super_block *sb; -+ -+ /* all timestamps always follow the ones on the branch */ -+ /* mnt->mnt_flags |= MNT_NOATIME | MNT_NODIRATIME; */ -+ root = mount_nodev(fs_type, flags, raw_data, aufs_fill_super); -+ if (IS_ERR(root)) -+ goto out; -+ -+ sb = root->d_sb; -+ si_write_lock(sb, !AuLock_FLUSH); -+ sysaufs_brs_add(sb, 0); -+ si_write_unlock(sb); -+ au_sbilist_add(sb); -+ -+out: -+ return root; -+} -+ -+static void aufs_kill_sb(struct super_block *sb) -+{ -+ struct au_sbinfo *sbinfo; -+ -+ sbinfo = au_sbi(sb); -+ if (sbinfo) { -+ au_sbilist_del(sb); -+ aufs_write_lock(sb->s_root); -+ au_fhsm_fin(sb); -+ if (sbinfo->si_wbr_create_ops->fin) -+ sbinfo->si_wbr_create_ops->fin(sb); -+ if (au_opt_test(sbinfo->si_mntflags, UDBA_HNOTIFY)) { -+ au_opt_set_udba(sbinfo->si_mntflags, UDBA_NONE); -+ au_remount_refresh(sb, /*do_idop*/0); -+ } -+ if (au_opt_test(sbinfo->si_mntflags, PLINK)) -+ au_plink_put(sb, /*verbose*/1); -+ au_xino_clr(sb); -+ sbinfo->si_sb = NULL; -+ aufs_write_unlock(sb->s_root); -+ au_nwt_flush(&sbinfo->si_nowait); -+ } -+ kill_anon_super(sb); -+} -+ -+struct file_system_type aufs_fs_type = { -+ .name = AUFS_FSTYPE, -+ /* a race between rename and others */ -+ .fs_flags = FS_RENAME_DOES_D_MOVE, -+ .mount = aufs_mount, -+ .kill_sb = aufs_kill_sb, -+ /* no need to __module_get() and module_put(). */ -+ .owner = THIS_MODULE, -+}; -diff --git a/fs/aufs/super.h b/fs/aufs/super.h -new file mode 100644 -index 0000000..ecd364b ---- /dev/null -+++ b/fs/aufs/super.h -@@ -0,0 +1,626 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * super_block operations -+ */ -+ -+#ifndef __AUFS_SUPER_H__ -+#define __AUFS_SUPER_H__ -+ -+#ifdef __KERNEL__ -+ -+#include -+#include "rwsem.h" -+#include "spl.h" -+#include "wkq.h" -+ -+typedef ssize_t (*au_readf_t)(struct file *, char __user *, size_t, loff_t *); -+typedef ssize_t (*au_writef_t)(struct file *, const char __user *, size_t, -+ loff_t *); -+ -+/* policies to select one among multiple writable branches */ -+struct au_wbr_copyup_operations { -+ int (*copyup)(struct dentry *dentry); -+}; -+ -+#define AuWbr_DIR 1 /* target is a dir */ -+#define AuWbr_PARENT (1 << 1) /* always require a parent */ -+ -+#define au_ftest_wbr(flags, name) ((flags) & AuWbr_##name) -+#define au_fset_wbr(flags, name) { (flags) |= AuWbr_##name; } -+#define au_fclr_wbr(flags, name) { (flags) &= ~AuWbr_##name; } -+ -+struct au_wbr_create_operations { -+ int (*create)(struct dentry *dentry, unsigned int flags); -+ int (*init)(struct super_block *sb); -+ int (*fin)(struct super_block *sb); -+}; -+ -+struct au_wbr_mfs { -+ struct mutex mfs_lock; /* protect this structure */ -+ unsigned long mfs_jiffy; -+ unsigned long mfs_expire; -+ aufs_bindex_t mfs_bindex; -+ -+ unsigned long long mfsrr_bytes; -+ unsigned long long mfsrr_watermark; -+}; -+ -+#define AuPlink_NHASH 100 -+static inline int au_plink_hash(ino_t ino) -+{ -+ return ino % AuPlink_NHASH; -+} -+ -+/* File-based Hierarchical Storage Management */ -+struct au_fhsm { -+#ifdef CONFIG_AUFS_FHSM -+ /* allow only one process who can receive the notification */ -+ spinlock_t fhsm_spin; -+ pid_t fhsm_pid; -+ wait_queue_head_t fhsm_wqh; -+ atomic_t fhsm_readable; -+ -+ /* these are protected by si_rwsem */ -+ unsigned long fhsm_expire; -+ aufs_bindex_t fhsm_bottom; -+#endif -+}; -+ -+#define AU_PIDSTEP (int)(BITS_TO_LONGS(PID_MAX_DEFAULT) * BITS_PER_LONG) -+#define AU_NPIDMAP (int)DIV_ROUND_UP(PID_MAX_LIMIT, AU_PIDSTEP) -+struct au_si_pid { -+ unsigned long *pid_bitmap[AU_NPIDMAP]; -+ struct mutex pid_mtx; -+}; -+ -+struct au_branch; -+struct au_sbinfo { -+ /* nowait tasks in the system-wide workqueue */ -+ struct au_nowait_tasks si_nowait; -+ -+ /* -+ * tried sb->s_umount, but failed due to the dependecy between i_mutex. -+ * rwsem for au_sbinfo is necessary. -+ */ -+ struct au_rwsem si_rwsem; -+ -+ /* prevent recursive locking in deleting inode */ -+ struct au_si_pid au_si_pid; -+ -+ /* -+ * dirty approach to protect sb->sb_inodes and ->s_files (gone) from -+ * remount. -+ */ -+ atomic_long_t si_ninodes, si_nfiles; -+ -+ /* branch management */ -+ unsigned int si_generation; -+ -+ /* see AuSi_ flags */ -+ unsigned char au_si_status; -+ -+ aufs_bindex_t si_bend; -+ -+ /* dirty trick to keep br_id plus */ -+ unsigned int si_last_br_id : -+ sizeof(aufs_bindex_t) * BITS_PER_BYTE - 1; -+ struct au_branch **si_branch; -+ -+ /* policy to select a writable branch */ -+ unsigned char si_wbr_copyup; -+ unsigned char si_wbr_create; -+ struct au_wbr_copyup_operations *si_wbr_copyup_ops; -+ struct au_wbr_create_operations *si_wbr_create_ops; -+ -+ /* round robin */ -+ atomic_t si_wbr_rr_next; -+ -+ /* most free space */ -+ struct au_wbr_mfs si_wbr_mfs; -+ -+ /* File-based Hierarchical Storage Management */ -+ struct au_fhsm si_fhsm; -+ -+ /* mount flags */ -+ /* include/asm-ia64/siginfo.h defines a macro named si_flags */ -+ unsigned int si_mntflags; -+ -+ /* external inode number (bitmap and translation table) */ -+ au_readf_t si_xread; -+ au_writef_t si_xwrite; -+ struct file *si_xib; -+ struct mutex si_xib_mtx; /* protect xib members */ -+ unsigned long *si_xib_buf; -+ unsigned long si_xib_last_pindex; -+ int si_xib_next_bit; -+ aufs_bindex_t si_xino_brid; -+ unsigned long si_xino_jiffy; -+ unsigned long si_xino_expire; -+ /* reserved for future use */ -+ /* unsigned long long si_xib_limit; */ /* Max xib file size */ -+ -+#ifdef CONFIG_AUFS_EXPORT -+ /* i_generation */ -+ struct file *si_xigen; -+ atomic_t si_xigen_next; -+#endif -+ -+ /* dirty trick to suppoer atomic_open */ -+ struct au_sphlhead si_aopen; -+ -+ /* vdir parameters */ -+ unsigned long si_rdcache; /* max cache time in jiffies */ -+ unsigned int si_rdblk; /* deblk size */ -+ unsigned int si_rdhash; /* hash size */ -+ -+ /* -+ * If the number of whiteouts are larger than si_dirwh, leave all of -+ * them after au_whtmp_ren to reduce the cost of rmdir(2). -+ * future fsck.aufs or kernel thread will remove them later. -+ * Otherwise, remove all whiteouts and the dir in rmdir(2). -+ */ -+ unsigned int si_dirwh; -+ -+ /* pseudo_link list */ -+ struct au_sphlhead si_plink[AuPlink_NHASH]; -+ wait_queue_head_t si_plink_wq; -+ spinlock_t si_plink_maint_lock; -+ pid_t si_plink_maint_pid; -+ -+ /* file list */ -+ struct au_sphlhead si_files; -+ -+ /* with/without getattr, brother of sb->s_d_op */ -+ struct inode_operations *si_iop_array; -+ -+ /* -+ * sysfs and lifetime management. -+ * this is not a small structure and it may be a waste of memory in case -+ * of sysfs is disabled, particulary when many aufs-es are mounted. -+ * but using sysfs is majority. -+ */ -+ struct kobject si_kobj; -+#ifdef CONFIG_DEBUG_FS -+ struct dentry *si_dbgaufs; -+ struct dentry *si_dbgaufs_plink; -+ struct dentry *si_dbgaufs_xib; -+#ifdef CONFIG_AUFS_EXPORT -+ struct dentry *si_dbgaufs_xigen; -+#endif -+#endif -+ -+#ifdef CONFIG_AUFS_SBILIST -+ struct hlist_node si_list; -+#endif -+ -+ /* dirty, necessary for unmounting, sysfs and sysrq */ -+ struct super_block *si_sb; -+}; -+ -+/* sbinfo status flags */ -+/* -+ * set true when refresh_dirs() failed at remount time. -+ * then try refreshing dirs at access time again. -+ * if it is false, refreshing dirs at access time is unnecesary -+ */ -+#define AuSi_FAILED_REFRESH_DIR 1 -+#define AuSi_FHSM (1 << 1) /* fhsm is active now */ -+#define AuSi_NO_DREVAL (1 << 2) /* disable all d_revalidate */ -+ -+#ifndef CONFIG_AUFS_FHSM -+#undef AuSi_FHSM -+#define AuSi_FHSM 0 -+#endif -+ -+static inline unsigned char au_do_ftest_si(struct au_sbinfo *sbi, -+ unsigned int flag) -+{ -+ AuRwMustAnyLock(&sbi->si_rwsem); -+ return sbi->au_si_status & flag; -+} -+#define au_ftest_si(sbinfo, name) au_do_ftest_si(sbinfo, AuSi_##name) -+#define au_fset_si(sbinfo, name) do { \ -+ AuRwMustWriteLock(&(sbinfo)->si_rwsem); \ -+ (sbinfo)->au_si_status |= AuSi_##name; \ -+} while (0) -+#define au_fclr_si(sbinfo, name) do { \ -+ AuRwMustWriteLock(&(sbinfo)->si_rwsem); \ -+ (sbinfo)->au_si_status &= ~AuSi_##name; \ -+} while (0) -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* policy to select one among writable branches */ -+#define AuWbrCopyup(sbinfo, ...) \ -+ ((sbinfo)->si_wbr_copyup_ops->copyup(__VA_ARGS__)) -+#define AuWbrCreate(sbinfo, ...) \ -+ ((sbinfo)->si_wbr_create_ops->create(__VA_ARGS__)) -+ -+/* flags for si_read_lock()/aufs_read_lock()/di_read_lock() */ -+#define AuLock_DW 1 /* write-lock dentry */ -+#define AuLock_IR (1 << 1) /* read-lock inode */ -+#define AuLock_IW (1 << 2) /* write-lock inode */ -+#define AuLock_FLUSH (1 << 3) /* wait for 'nowait' tasks */ -+#define AuLock_DIRS (1 << 4) /* target is a pair of dirs */ -+#define AuLock_NOPLM (1 << 5) /* return err in plm mode */ -+#define AuLock_NOPLMW (1 << 6) /* wait for plm mode ends */ -+#define AuLock_GEN (1 << 7) /* test digen/iigen */ -+#define au_ftest_lock(flags, name) ((flags) & AuLock_##name) -+#define au_fset_lock(flags, name) \ -+ do { (flags) |= AuLock_##name; } while (0) -+#define au_fclr_lock(flags, name) \ -+ do { (flags) &= ~AuLock_##name; } while (0) -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* super.c */ -+extern struct file_system_type aufs_fs_type; -+struct inode *au_iget_locked(struct super_block *sb, ino_t ino); -+typedef unsigned long long (*au_arraycb_t)(void *array, unsigned long long max, -+ void *arg); -+void *au_array_alloc(unsigned long long *hint, au_arraycb_t cb, void *arg); -+struct inode **au_iarray_alloc(struct super_block *sb, unsigned long long *max); -+void au_iarray_free(struct inode **a, unsigned long long max); -+ -+/* sbinfo.c */ -+void au_si_free(struct kobject *kobj); -+int au_si_alloc(struct super_block *sb); -+int au_sbr_realloc(struct au_sbinfo *sbinfo, int nbr); -+ -+unsigned int au_sigen_inc(struct super_block *sb); -+aufs_bindex_t au_new_br_id(struct super_block *sb); -+ -+int si_read_lock(struct super_block *sb, int flags); -+int si_write_lock(struct super_block *sb, int flags); -+int aufs_read_lock(struct dentry *dentry, int flags); -+void aufs_read_unlock(struct dentry *dentry, int flags); -+void aufs_write_lock(struct dentry *dentry); -+void aufs_write_unlock(struct dentry *dentry); -+int aufs_read_and_write_lock2(struct dentry *d1, struct dentry *d2, int flags); -+void aufs_read_and_write_unlock2(struct dentry *d1, struct dentry *d2); -+ -+/* wbr_policy.c */ -+extern struct au_wbr_copyup_operations au_wbr_copyup_ops[]; -+extern struct au_wbr_create_operations au_wbr_create_ops[]; -+int au_cpdown_dirs(struct dentry *dentry, aufs_bindex_t bdst); -+int au_wbr_nonopq(struct dentry *dentry, aufs_bindex_t bindex); -+int au_wbr_do_copyup_bu(struct dentry *dentry, aufs_bindex_t bstart); -+ -+/* mvdown.c */ -+int au_mvdown(struct dentry *dentry, struct aufs_mvdown __user *arg); -+ -+#ifdef CONFIG_AUFS_FHSM -+/* fhsm.c */ -+ -+static inline pid_t au_fhsm_pid(struct au_fhsm *fhsm) -+{ -+ pid_t pid; -+ -+ spin_lock(&fhsm->fhsm_spin); -+ pid = fhsm->fhsm_pid; -+ spin_unlock(&fhsm->fhsm_spin); -+ -+ return pid; -+} -+ -+void au_fhsm_wrote(struct super_block *sb, aufs_bindex_t bindex, int force); -+void au_fhsm_wrote_all(struct super_block *sb, int force); -+int au_fhsm_fd(struct super_block *sb, int oflags); -+int au_fhsm_br_alloc(struct au_branch *br); -+void au_fhsm_set_bottom(struct super_block *sb, aufs_bindex_t bindex); -+void au_fhsm_fin(struct super_block *sb); -+void au_fhsm_init(struct au_sbinfo *sbinfo); -+void au_fhsm_set(struct au_sbinfo *sbinfo, unsigned int sec); -+void au_fhsm_show(struct seq_file *seq, struct au_sbinfo *sbinfo); -+#else -+AuStubVoid(au_fhsm_wrote, struct super_block *sb, aufs_bindex_t bindex, -+ int force) -+AuStubVoid(au_fhsm_wrote_all, struct super_block *sb, int force) -+AuStub(int, au_fhsm_fd, return -EOPNOTSUPP, struct super_block *sb, int oflags) -+AuStub(pid_t, au_fhsm_pid, return 0, struct au_fhsm *fhsm) -+AuStubInt0(au_fhsm_br_alloc, struct au_branch *br) -+AuStubVoid(au_fhsm_set_bottom, struct super_block *sb, aufs_bindex_t bindex) -+AuStubVoid(au_fhsm_fin, struct super_block *sb) -+AuStubVoid(au_fhsm_init, struct au_sbinfo *sbinfo) -+AuStubVoid(au_fhsm_set, struct au_sbinfo *sbinfo, unsigned int sec) -+AuStubVoid(au_fhsm_show, struct seq_file *seq, struct au_sbinfo *sbinfo) -+#endif -+ -+/* ---------------------------------------------------------------------- */ -+ -+static inline struct au_sbinfo *au_sbi(struct super_block *sb) -+{ -+ return sb->s_fs_info; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+#ifdef CONFIG_AUFS_EXPORT -+int au_test_nfsd(void); -+void au_export_init(struct super_block *sb); -+void au_xigen_inc(struct inode *inode); -+int au_xigen_new(struct inode *inode); -+int au_xigen_set(struct super_block *sb, struct file *base); -+void au_xigen_clr(struct super_block *sb); -+ -+static inline int au_busy_or_stale(void) -+{ -+ if (!au_test_nfsd()) -+ return -EBUSY; -+ return -ESTALE; -+} -+#else -+AuStubInt0(au_test_nfsd, void) -+AuStubVoid(au_export_init, struct super_block *sb) -+AuStubVoid(au_xigen_inc, struct inode *inode) -+AuStubInt0(au_xigen_new, struct inode *inode) -+AuStubInt0(au_xigen_set, struct super_block *sb, struct file *base) -+AuStubVoid(au_xigen_clr, struct super_block *sb) -+AuStub(int, au_busy_or_stale, return -EBUSY, void) -+#endif /* CONFIG_AUFS_EXPORT */ -+ -+/* ---------------------------------------------------------------------- */ -+ -+#ifdef CONFIG_AUFS_SBILIST -+/* module.c */ -+extern struct au_sphlhead au_sbilist; -+ -+static inline void au_sbilist_init(void) -+{ -+ au_sphl_init(&au_sbilist); -+} -+ -+static inline void au_sbilist_add(struct super_block *sb) -+{ -+ au_sphl_add(&au_sbi(sb)->si_list, &au_sbilist); -+} -+ -+static inline void au_sbilist_del(struct super_block *sb) -+{ -+ au_sphl_del(&au_sbi(sb)->si_list, &au_sbilist); -+} -+ -+#ifdef CONFIG_AUFS_MAGIC_SYSRQ -+static inline void au_sbilist_lock(void) -+{ -+ spin_lock(&au_sbilist.spin); -+} -+ -+static inline void au_sbilist_unlock(void) -+{ -+ spin_unlock(&au_sbilist.spin); -+} -+#define AuGFP_SBILIST GFP_ATOMIC -+#else -+AuStubVoid(au_sbilist_lock, void) -+AuStubVoid(au_sbilist_unlock, void) -+#define AuGFP_SBILIST GFP_NOFS -+#endif /* CONFIG_AUFS_MAGIC_SYSRQ */ -+#else -+AuStubVoid(au_sbilist_init, void) -+AuStubVoid(au_sbilist_add, struct super_block *sb) -+AuStubVoid(au_sbilist_del, struct super_block *sb) -+AuStubVoid(au_sbilist_lock, void) -+AuStubVoid(au_sbilist_unlock, void) -+#define AuGFP_SBILIST GFP_NOFS -+#endif -+ -+/* ---------------------------------------------------------------------- */ -+ -+static inline void dbgaufs_si_null(struct au_sbinfo *sbinfo) -+{ -+ /* -+ * This function is a dynamic '__init' function actually, -+ * so the tiny check for si_rwsem is unnecessary. -+ */ -+ /* AuRwMustWriteLock(&sbinfo->si_rwsem); */ -+#ifdef CONFIG_DEBUG_FS -+ sbinfo->si_dbgaufs = NULL; -+ sbinfo->si_dbgaufs_plink = NULL; -+ sbinfo->si_dbgaufs_xib = NULL; -+#ifdef CONFIG_AUFS_EXPORT -+ sbinfo->si_dbgaufs_xigen = NULL; -+#endif -+#endif -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static inline void si_pid_idx_bit(int *idx, pid_t *bit) -+{ -+ /* the origin of pid is 1, but the bitmap's is 0 */ -+ *bit = current->pid - 1; -+ *idx = *bit / AU_PIDSTEP; -+ *bit %= AU_PIDSTEP; -+} -+ -+static inline int si_pid_test(struct super_block *sb) -+{ -+ pid_t bit; -+ int idx; -+ unsigned long *bitmap; -+ -+ si_pid_idx_bit(&idx, &bit); -+ bitmap = au_sbi(sb)->au_si_pid.pid_bitmap[idx]; -+ if (bitmap) -+ return test_bit(bit, bitmap); -+ return 0; -+} -+ -+static inline void si_pid_clr(struct super_block *sb) -+{ -+ pid_t bit; -+ int idx; -+ unsigned long *bitmap; -+ -+ si_pid_idx_bit(&idx, &bit); -+ bitmap = au_sbi(sb)->au_si_pid.pid_bitmap[idx]; -+ BUG_ON(!bitmap); -+ AuDebugOn(!test_bit(bit, bitmap)); -+ clear_bit(bit, bitmap); -+ /* smp_mb(); */ -+} -+ -+void si_pid_set(struct super_block *sb); -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* lock superblock. mainly for entry point functions */ -+/* -+ * __si_read_lock, __si_write_lock, -+ * __si_read_unlock, __si_write_unlock, __si_downgrade_lock -+ */ -+AuSimpleRwsemFuncs(__si, struct super_block *sb, &au_sbi(sb)->si_rwsem); -+ -+#define SiMustNoWaiters(sb) AuRwMustNoWaiters(&au_sbi(sb)->si_rwsem) -+#define SiMustAnyLock(sb) AuRwMustAnyLock(&au_sbi(sb)->si_rwsem) -+#define SiMustWriteLock(sb) AuRwMustWriteLock(&au_sbi(sb)->si_rwsem) -+ -+static inline void si_noflush_read_lock(struct super_block *sb) -+{ -+ __si_read_lock(sb); -+ si_pid_set(sb); -+} -+ -+static inline int si_noflush_read_trylock(struct super_block *sb) -+{ -+ int locked; -+ -+ locked = __si_read_trylock(sb); -+ if (locked) -+ si_pid_set(sb); -+ return locked; -+} -+ -+static inline void si_noflush_write_lock(struct super_block *sb) -+{ -+ __si_write_lock(sb); -+ si_pid_set(sb); -+} -+ -+static inline int si_noflush_write_trylock(struct super_block *sb) -+{ -+ int locked; -+ -+ locked = __si_write_trylock(sb); -+ if (locked) -+ si_pid_set(sb); -+ return locked; -+} -+ -+#if 0 /* reserved */ -+static inline int si_read_trylock(struct super_block *sb, int flags) -+{ -+ if (au_ftest_lock(flags, FLUSH)) -+ au_nwt_flush(&au_sbi(sb)->si_nowait); -+ return si_noflush_read_trylock(sb); -+} -+#endif -+ -+static inline void si_read_unlock(struct super_block *sb) -+{ -+ si_pid_clr(sb); -+ __si_read_unlock(sb); -+} -+ -+#if 0 /* reserved */ -+static inline int si_write_trylock(struct super_block *sb, int flags) -+{ -+ if (au_ftest_lock(flags, FLUSH)) -+ au_nwt_flush(&au_sbi(sb)->si_nowait); -+ return si_noflush_write_trylock(sb); -+} -+#endif -+ -+static inline void si_write_unlock(struct super_block *sb) -+{ -+ si_pid_clr(sb); -+ __si_write_unlock(sb); -+} -+ -+#if 0 /* reserved */ -+static inline void si_downgrade_lock(struct super_block *sb) -+{ -+ __si_downgrade_lock(sb); -+} -+#endif -+ -+/* ---------------------------------------------------------------------- */ -+ -+static inline aufs_bindex_t au_sbend(struct super_block *sb) -+{ -+ SiMustAnyLock(sb); -+ return au_sbi(sb)->si_bend; -+} -+ -+static inline unsigned int au_mntflags(struct super_block *sb) -+{ -+ SiMustAnyLock(sb); -+ return au_sbi(sb)->si_mntflags; -+} -+ -+static inline unsigned int au_sigen(struct super_block *sb) -+{ -+ SiMustAnyLock(sb); -+ return au_sbi(sb)->si_generation; -+} -+ -+static inline void au_ninodes_inc(struct super_block *sb) -+{ -+ atomic_long_inc(&au_sbi(sb)->si_ninodes); -+} -+ -+static inline void au_ninodes_dec(struct super_block *sb) -+{ -+ AuDebugOn(!atomic_long_read(&au_sbi(sb)->si_ninodes)); -+ atomic_long_dec(&au_sbi(sb)->si_ninodes); -+} -+ -+static inline void au_nfiles_inc(struct super_block *sb) -+{ -+ atomic_long_inc(&au_sbi(sb)->si_nfiles); -+} -+ -+static inline void au_nfiles_dec(struct super_block *sb) -+{ -+ AuDebugOn(!atomic_long_read(&au_sbi(sb)->si_nfiles)); -+ atomic_long_dec(&au_sbi(sb)->si_nfiles); -+} -+ -+static inline struct au_branch *au_sbr(struct super_block *sb, -+ aufs_bindex_t bindex) -+{ -+ SiMustAnyLock(sb); -+ return au_sbi(sb)->si_branch[0 + bindex]; -+} -+ -+static inline void au_xino_brid_set(struct super_block *sb, aufs_bindex_t brid) -+{ -+ SiMustWriteLock(sb); -+ au_sbi(sb)->si_xino_brid = brid; -+} -+ -+static inline aufs_bindex_t au_xino_brid(struct super_block *sb) -+{ -+ SiMustAnyLock(sb); -+ return au_sbi(sb)->si_xino_brid; -+} -+ -+#endif /* __KERNEL__ */ -+#endif /* __AUFS_SUPER_H__ */ -diff --git a/fs/aufs/sysaufs.c b/fs/aufs/sysaufs.c -new file mode 100644 -index 0000000..75c9c24 ---- /dev/null -+++ b/fs/aufs/sysaufs.c -@@ -0,0 +1,104 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * sysfs interface and lifetime management -+ * they are necessary regardless sysfs is disabled. -+ */ -+ -+#include -+#include "aufs.h" -+ -+unsigned long sysaufs_si_mask; -+struct kset *sysaufs_kset; -+ -+#define AuSiAttr(_name) { \ -+ .attr = { .name = __stringify(_name), .mode = 0444 }, \ -+ .show = sysaufs_si_##_name, \ -+} -+ -+static struct sysaufs_si_attr sysaufs_si_attr_xi_path = AuSiAttr(xi_path); -+struct attribute *sysaufs_si_attrs[] = { -+ &sysaufs_si_attr_xi_path.attr, -+ NULL, -+}; -+ -+static const struct sysfs_ops au_sbi_ops = { -+ .show = sysaufs_si_show -+}; -+ -+static struct kobj_type au_sbi_ktype = { -+ .release = au_si_free, -+ .sysfs_ops = &au_sbi_ops, -+ .default_attrs = sysaufs_si_attrs -+}; -+ -+/* ---------------------------------------------------------------------- */ -+ -+int sysaufs_si_init(struct au_sbinfo *sbinfo) -+{ -+ int err; -+ -+ sbinfo->si_kobj.kset = sysaufs_kset; -+ /* cf. sysaufs_name() */ -+ err = kobject_init_and_add -+ (&sbinfo->si_kobj, &au_sbi_ktype, /*&sysaufs_kset->kobj*/NULL, -+ SysaufsSiNamePrefix "%lx", sysaufs_si_id(sbinfo)); -+ -+ dbgaufs_si_null(sbinfo); -+ if (!err) { -+ err = dbgaufs_si_init(sbinfo); -+ if (unlikely(err)) -+ kobject_put(&sbinfo->si_kobj); -+ } -+ return err; -+} -+ -+void sysaufs_fin(void) -+{ -+ dbgaufs_fin(); -+ sysfs_remove_group(&sysaufs_kset->kobj, sysaufs_attr_group); -+ kset_unregister(sysaufs_kset); -+} -+ -+int __init sysaufs_init(void) -+{ -+ int err; -+ -+ do { -+ get_random_bytes(&sysaufs_si_mask, sizeof(sysaufs_si_mask)); -+ } while (!sysaufs_si_mask); -+ -+ err = -EINVAL; -+ sysaufs_kset = kset_create_and_add(AUFS_NAME, NULL, fs_kobj); -+ if (unlikely(!sysaufs_kset)) -+ goto out; -+ err = PTR_ERR(sysaufs_kset); -+ if (IS_ERR(sysaufs_kset)) -+ goto out; -+ err = sysfs_create_group(&sysaufs_kset->kobj, sysaufs_attr_group); -+ if (unlikely(err)) { -+ kset_unregister(sysaufs_kset); -+ goto out; -+ } -+ -+ err = dbgaufs_init(); -+ if (unlikely(err)) -+ sysaufs_fin(); -+out: -+ return err; -+} -diff --git a/fs/aufs/sysaufs.h b/fs/aufs/sysaufs.h -new file mode 100644 -index 0000000..14975c9 ---- /dev/null -+++ b/fs/aufs/sysaufs.h -@@ -0,0 +1,101 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * sysfs interface and mount lifetime management -+ */ -+ -+#ifndef __SYSAUFS_H__ -+#define __SYSAUFS_H__ -+ -+#ifdef __KERNEL__ -+ -+#include -+#include "module.h" -+ -+struct super_block; -+struct au_sbinfo; -+ -+struct sysaufs_si_attr { -+ struct attribute attr; -+ int (*show)(struct seq_file *seq, struct super_block *sb); -+}; -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* sysaufs.c */ -+extern unsigned long sysaufs_si_mask; -+extern struct kset *sysaufs_kset; -+extern struct attribute *sysaufs_si_attrs[]; -+int sysaufs_si_init(struct au_sbinfo *sbinfo); -+int __init sysaufs_init(void); -+void sysaufs_fin(void); -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* some people doesn't like to show a pointer in kernel */ -+static inline unsigned long sysaufs_si_id(struct au_sbinfo *sbinfo) -+{ -+ return sysaufs_si_mask ^ (unsigned long)sbinfo; -+} -+ -+#define SysaufsSiNamePrefix "si_" -+#define SysaufsSiNameLen (sizeof(SysaufsSiNamePrefix) + 16) -+static inline void sysaufs_name(struct au_sbinfo *sbinfo, char *name) -+{ -+ snprintf(name, SysaufsSiNameLen, SysaufsSiNamePrefix "%lx", -+ sysaufs_si_id(sbinfo)); -+} -+ -+struct au_branch; -+#ifdef CONFIG_SYSFS -+/* sysfs.c */ -+extern struct attribute_group *sysaufs_attr_group; -+ -+int sysaufs_si_xi_path(struct seq_file *seq, struct super_block *sb); -+ssize_t sysaufs_si_show(struct kobject *kobj, struct attribute *attr, -+ char *buf); -+long au_brinfo_ioctl(struct file *file, unsigned long arg); -+#ifdef CONFIG_COMPAT -+long au_brinfo_compat_ioctl(struct file *file, unsigned long arg); -+#endif -+ -+void sysaufs_br_init(struct au_branch *br); -+void sysaufs_brs_add(struct super_block *sb, aufs_bindex_t bindex); -+void sysaufs_brs_del(struct super_block *sb, aufs_bindex_t bindex); -+ -+#define sysaufs_brs_init() do {} while (0) -+ -+#else -+#define sysaufs_attr_group NULL -+ -+AuStubInt0(sysaufs_si_xi_path, struct seq_file *seq, struct super_block *sb) -+AuStub(ssize_t, sysaufs_si_show, return 0, struct kobject *kobj, -+ struct attribute *attr, char *buf) -+AuStubVoid(sysaufs_br_init, struct au_branch *br) -+AuStubVoid(sysaufs_brs_add, struct super_block *sb, aufs_bindex_t bindex) -+AuStubVoid(sysaufs_brs_del, struct super_block *sb, aufs_bindex_t bindex) -+ -+static inline void sysaufs_brs_init(void) -+{ -+ sysaufs_brs = 0; -+} -+ -+#endif /* CONFIG_SYSFS */ -+ -+#endif /* __KERNEL__ */ -+#endif /* __SYSAUFS_H__ */ -diff --git a/fs/aufs/sysfs.c b/fs/aufs/sysfs.c -new file mode 100644 -index 0000000..b2d1888 ---- /dev/null -+++ b/fs/aufs/sysfs.c -@@ -0,0 +1,376 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * sysfs interface -+ */ -+ -+#include -+#include -+#include "aufs.h" -+ -+#ifdef CONFIG_AUFS_FS_MODULE -+/* this entry violates the "one line per file" policy of sysfs */ -+static ssize_t config_show(struct kobject *kobj, struct kobj_attribute *attr, -+ char *buf) -+{ -+ ssize_t err; -+ static char *conf = -+/* this file is generated at compiling */ -+#include "conf.str" -+ ; -+ -+ err = snprintf(buf, PAGE_SIZE, conf); -+ if (unlikely(err >= PAGE_SIZE)) -+ err = -EFBIG; -+ return err; -+} -+ -+static struct kobj_attribute au_config_attr = __ATTR_RO(config); -+#endif -+ -+static struct attribute *au_attr[] = { -+#ifdef CONFIG_AUFS_FS_MODULE -+ &au_config_attr.attr, -+#endif -+ NULL, /* need to NULL terminate the list of attributes */ -+}; -+ -+static struct attribute_group sysaufs_attr_group_body = { -+ .attrs = au_attr -+}; -+ -+struct attribute_group *sysaufs_attr_group = &sysaufs_attr_group_body; -+ -+/* ---------------------------------------------------------------------- */ -+ -+int sysaufs_si_xi_path(struct seq_file *seq, struct super_block *sb) -+{ -+ int err; -+ -+ SiMustAnyLock(sb); -+ -+ err = 0; -+ if (au_opt_test(au_mntflags(sb), XINO)) { -+ err = au_xino_path(seq, au_sbi(sb)->si_xib); -+ seq_putc(seq, '\n'); -+ } -+ return err; -+} -+ -+/* -+ * the lifetime of branch is independent from the entry under sysfs. -+ * sysfs handles the lifetime of the entry, and never call ->show() after it is -+ * unlinked. -+ */ -+static int sysaufs_si_br(struct seq_file *seq, struct super_block *sb, -+ aufs_bindex_t bindex, int idx) -+{ -+ int err; -+ struct path path; -+ struct dentry *root; -+ struct au_branch *br; -+ au_br_perm_str_t perm; -+ -+ AuDbg("b%d\n", bindex); -+ -+ err = 0; -+ root = sb->s_root; -+ di_read_lock_parent(root, !AuLock_IR); -+ br = au_sbr(sb, bindex); -+ -+ switch (idx) { -+ case AuBrSysfs_BR: -+ path.mnt = au_br_mnt(br); -+ path.dentry = au_h_dptr(root, bindex); -+ err = au_seq_path(seq, &path); -+ if (!err) { -+ au_optstr_br_perm(&perm, br->br_perm); -+ err = seq_printf(seq, "=%s\n", perm.a); -+ } -+ break; -+ case AuBrSysfs_BRID: -+ err = seq_printf(seq, "%d\n", br->br_id); -+ break; -+ } -+ di_read_unlock(root, !AuLock_IR); -+ if (err == -1) -+ err = -E2BIG; -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static struct seq_file *au_seq(char *p, ssize_t len) -+{ -+ struct seq_file *seq; -+ -+ seq = kzalloc(sizeof(*seq), GFP_NOFS); -+ if (seq) { -+ /* mutex_init(&seq.lock); */ -+ seq->buf = p; -+ seq->size = len; -+ return seq; /* success */ -+ } -+ -+ seq = ERR_PTR(-ENOMEM); -+ return seq; -+} -+ -+#define SysaufsBr_PREFIX "br" -+#define SysaufsBrid_PREFIX "brid" -+ -+/* todo: file size may exceed PAGE_SIZE */ -+ssize_t sysaufs_si_show(struct kobject *kobj, struct attribute *attr, -+ char *buf) -+{ -+ ssize_t err; -+ int idx; -+ long l; -+ aufs_bindex_t bend; -+ struct au_sbinfo *sbinfo; -+ struct super_block *sb; -+ struct seq_file *seq; -+ char *name; -+ struct attribute **cattr; -+ -+ sbinfo = container_of(kobj, struct au_sbinfo, si_kobj); -+ sb = sbinfo->si_sb; -+ -+ /* -+ * prevent a race condition between sysfs and aufs. -+ * for instance, sysfs_file_read() calls sysfs_get_active_two() which -+ * prohibits maintaining the sysfs entries. -+ * hew we acquire read lock after sysfs_get_active_two(). -+ * on the other hand, the remount process may maintain the sysfs/aufs -+ * entries after acquiring write lock. -+ * it can cause a deadlock. -+ * simply we gave up processing read here. -+ */ -+ err = -EBUSY; -+ if (unlikely(!si_noflush_read_trylock(sb))) -+ goto out; -+ -+ seq = au_seq(buf, PAGE_SIZE); -+ err = PTR_ERR(seq); -+ if (IS_ERR(seq)) -+ goto out_unlock; -+ -+ name = (void *)attr->name; -+ cattr = sysaufs_si_attrs; -+ while (*cattr) { -+ if (!strcmp(name, (*cattr)->name)) { -+ err = container_of(*cattr, struct sysaufs_si_attr, attr) -+ ->show(seq, sb); -+ goto out_seq; -+ } -+ cattr++; -+ } -+ -+ if (!strncmp(name, SysaufsBrid_PREFIX, -+ sizeof(SysaufsBrid_PREFIX) - 1)) { -+ idx = AuBrSysfs_BRID; -+ name += sizeof(SysaufsBrid_PREFIX) - 1; -+ } else if (!strncmp(name, SysaufsBr_PREFIX, -+ sizeof(SysaufsBr_PREFIX) - 1)) { -+ idx = AuBrSysfs_BR; -+ name += sizeof(SysaufsBr_PREFIX) - 1; -+ } else -+ BUG(); -+ -+ err = kstrtol(name, 10, &l); -+ if (!err) { -+ bend = au_sbend(sb); -+ if (l <= bend) -+ err = sysaufs_si_br(seq, sb, (aufs_bindex_t)l, idx); -+ else -+ err = -ENOENT; -+ } -+ -+out_seq: -+ if (!err) { -+ err = seq->count; -+ /* sysfs limit */ -+ if (unlikely(err == PAGE_SIZE)) -+ err = -EFBIG; -+ } -+ kfree(seq); -+out_unlock: -+ si_read_unlock(sb); -+out: -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int au_brinfo(struct super_block *sb, union aufs_brinfo __user *arg) -+{ -+ int err; -+ int16_t brid; -+ aufs_bindex_t bindex, bend; -+ size_t sz; -+ char *buf; -+ struct seq_file *seq; -+ struct au_branch *br; -+ -+ si_read_lock(sb, AuLock_FLUSH); -+ bend = au_sbend(sb); -+ err = bend + 1; -+ if (!arg) -+ goto out; -+ -+ err = -ENOMEM; -+ buf = (void *)__get_free_page(GFP_NOFS); -+ if (unlikely(!buf)) -+ goto out; -+ -+ seq = au_seq(buf, PAGE_SIZE); -+ err = PTR_ERR(seq); -+ if (IS_ERR(seq)) -+ goto out_buf; -+ -+ sz = sizeof(*arg) - offsetof(union aufs_brinfo, path); -+ for (bindex = 0; bindex <= bend; bindex++, arg++) { -+ err = !access_ok(VERIFY_WRITE, arg, sizeof(*arg)); -+ if (unlikely(err)) -+ break; -+ -+ br = au_sbr(sb, bindex); -+ brid = br->br_id; -+ BUILD_BUG_ON(sizeof(brid) != sizeof(arg->id)); -+ err = __put_user(brid, &arg->id); -+ if (unlikely(err)) -+ break; -+ -+ BUILD_BUG_ON(sizeof(br->br_perm) != sizeof(arg->perm)); -+ err = __put_user(br->br_perm, &arg->perm); -+ if (unlikely(err)) -+ break; -+ -+ err = au_seq_path(seq, &br->br_path); -+ if (unlikely(err)) -+ break; -+ err = seq_putc(seq, '\0'); -+ if (!err && seq->count <= sz) { -+ err = copy_to_user(arg->path, seq->buf, seq->count); -+ seq->count = 0; -+ if (unlikely(err)) -+ break; -+ } else { -+ err = -E2BIG; -+ goto out_seq; -+ } -+ } -+ if (unlikely(err)) -+ err = -EFAULT; -+ -+out_seq: -+ kfree(seq); -+out_buf: -+ free_page((unsigned long)buf); -+out: -+ si_read_unlock(sb); -+ return err; -+} -+ -+long au_brinfo_ioctl(struct file *file, unsigned long arg) -+{ -+ return au_brinfo(file->f_dentry->d_sb, (void __user *)arg); -+} -+ -+#ifdef CONFIG_COMPAT -+long au_brinfo_compat_ioctl(struct file *file, unsigned long arg) -+{ -+ return au_brinfo(file->f_dentry->d_sb, compat_ptr(arg)); -+} -+#endif -+ -+/* ---------------------------------------------------------------------- */ -+ -+void sysaufs_br_init(struct au_branch *br) -+{ -+ int i; -+ struct au_brsysfs *br_sysfs; -+ struct attribute *attr; -+ -+ br_sysfs = br->br_sysfs; -+ for (i = 0; i < ARRAY_SIZE(br->br_sysfs); i++) { -+ attr = &br_sysfs->attr; -+ sysfs_attr_init(attr); -+ attr->name = br_sysfs->name; -+ attr->mode = S_IRUGO; -+ br_sysfs++; -+ } -+} -+ -+void sysaufs_brs_del(struct super_block *sb, aufs_bindex_t bindex) -+{ -+ struct au_branch *br; -+ struct kobject *kobj; -+ struct au_brsysfs *br_sysfs; -+ int i; -+ aufs_bindex_t bend; -+ -+ dbgaufs_brs_del(sb, bindex); -+ -+ if (!sysaufs_brs) -+ return; -+ -+ kobj = &au_sbi(sb)->si_kobj; -+ bend = au_sbend(sb); -+ for (; bindex <= bend; bindex++) { -+ br = au_sbr(sb, bindex); -+ br_sysfs = br->br_sysfs; -+ for (i = 0; i < ARRAY_SIZE(br->br_sysfs); i++) { -+ sysfs_remove_file(kobj, &br_sysfs->attr); -+ br_sysfs++; -+ } -+ } -+} -+ -+void sysaufs_brs_add(struct super_block *sb, aufs_bindex_t bindex) -+{ -+ int err, i; -+ aufs_bindex_t bend; -+ struct kobject *kobj; -+ struct au_branch *br; -+ struct au_brsysfs *br_sysfs; -+ -+ dbgaufs_brs_add(sb, bindex); -+ -+ if (!sysaufs_brs) -+ return; -+ -+ kobj = &au_sbi(sb)->si_kobj; -+ bend = au_sbend(sb); -+ for (; bindex <= bend; bindex++) { -+ br = au_sbr(sb, bindex); -+ br_sysfs = br->br_sysfs; -+ snprintf(br_sysfs[AuBrSysfs_BR].name, sizeof(br_sysfs->name), -+ SysaufsBr_PREFIX "%d", bindex); -+ snprintf(br_sysfs[AuBrSysfs_BRID].name, sizeof(br_sysfs->name), -+ SysaufsBrid_PREFIX "%d", bindex); -+ for (i = 0; i < ARRAY_SIZE(br->br_sysfs); i++) { -+ err = sysfs_create_file(kobj, &br_sysfs->attr); -+ if (unlikely(err)) -+ pr_warn("failed %s under sysfs(%d)\n", -+ br_sysfs->name, err); -+ br_sysfs++; -+ } -+ } -+} -diff --git a/fs/aufs/sysrq.c b/fs/aufs/sysrq.c -new file mode 100644 -index 0000000..057c23e ---- /dev/null -+++ b/fs/aufs/sysrq.c -@@ -0,0 +1,157 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * magic sysrq hanlder -+ */ -+ -+/* #include */ -+#include -+#include "aufs.h" -+ -+/* ---------------------------------------------------------------------- */ -+ -+static void sysrq_sb(struct super_block *sb) -+{ -+ char *plevel; -+ struct au_sbinfo *sbinfo; -+ struct file *file; -+ struct au_sphlhead *files; -+ struct au_finfo *finfo; -+ -+ plevel = au_plevel; -+ au_plevel = KERN_WARNING; -+ -+ /* since we define pr_fmt, call printk directly */ -+#define pr(str) printk(KERN_WARNING AUFS_NAME ": " str) -+ -+ sbinfo = au_sbi(sb); -+ printk(KERN_WARNING "si=%lx\n", sysaufs_si_id(sbinfo)); -+ pr("superblock\n"); -+ au_dpri_sb(sb); -+ -+#if 0 -+ pr("root dentry\n"); -+ au_dpri_dentry(sb->s_root); -+ pr("root inode\n"); -+ au_dpri_inode(sb->s_root->d_inode); -+#endif -+ -+#if 0 -+ do { -+ int err, i, j, ndentry; -+ struct au_dcsub_pages dpages; -+ struct au_dpage *dpage; -+ -+ err = au_dpages_init(&dpages, GFP_ATOMIC); -+ if (unlikely(err)) -+ break; -+ err = au_dcsub_pages(&dpages, sb->s_root, NULL, NULL); -+ if (!err) -+ for (i = 0; i < dpages.ndpage; i++) { -+ dpage = dpages.dpages + i; -+ ndentry = dpage->ndentry; -+ for (j = 0; j < ndentry; j++) -+ au_dpri_dentry(dpage->dentries[j]); -+ } -+ au_dpages_free(&dpages); -+ } while (0); -+#endif -+ -+#if 1 -+ { -+ struct inode *i; -+ -+ pr("isolated inode\n"); -+ spin_lock(&inode_sb_list_lock); -+ list_for_each_entry(i, &sb->s_inodes, i_sb_list) { -+ spin_lock(&i->i_lock); -+ if (1 || hlist_empty(&i->i_dentry)) -+ au_dpri_inode(i); -+ spin_unlock(&i->i_lock); -+ } -+ spin_unlock(&inode_sb_list_lock); -+ } -+#endif -+ pr("files\n"); -+ files = &au_sbi(sb)->si_files; -+ spin_lock(&files->spin); -+ hlist_for_each_entry(finfo, &files->head, fi_hlist) { -+ umode_t mode; -+ -+ file = finfo->fi_file; -+ mode = file_inode(file)->i_mode; -+ if (!special_file(mode)) -+ au_dpri_file(file); -+ } -+ spin_unlock(&files->spin); -+ pr("done\n"); -+ -+#undef pr -+ au_plevel = plevel; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* module parameter */ -+static char *aufs_sysrq_key = "a"; -+module_param_named(sysrq, aufs_sysrq_key, charp, S_IRUGO); -+MODULE_PARM_DESC(sysrq, "MagicSysRq key for " AUFS_NAME); -+ -+static void au_sysrq(int key __maybe_unused) -+{ -+ struct au_sbinfo *sbinfo; -+ -+ lockdep_off(); -+ au_sbilist_lock(); -+ hlist_for_each_entry(sbinfo, &au_sbilist.head, si_list) -+ sysrq_sb(sbinfo->si_sb); -+ au_sbilist_unlock(); -+ lockdep_on(); -+} -+ -+static struct sysrq_key_op au_sysrq_op = { -+ .handler = au_sysrq, -+ .help_msg = "Aufs", -+ .action_msg = "Aufs", -+ .enable_mask = SYSRQ_ENABLE_DUMP -+}; -+ -+/* ---------------------------------------------------------------------- */ -+ -+int __init au_sysrq_init(void) -+{ -+ int err; -+ char key; -+ -+ err = -1; -+ key = *aufs_sysrq_key; -+ if ('a' <= key && key <= 'z') -+ err = register_sysrq_key(key, &au_sysrq_op); -+ if (unlikely(err)) -+ pr_err("err %d, sysrq=%c\n", err, key); -+ return err; -+} -+ -+void au_sysrq_fin(void) -+{ -+ int err; -+ -+ err = unregister_sysrq_key(*aufs_sysrq_key, &au_sysrq_op); -+ if (unlikely(err)) -+ pr_err("err %d (ignored)\n", err); -+} -diff --git a/fs/aufs/vdir.c b/fs/aufs/vdir.c -new file mode 100644 -index 0000000..f942d16 ---- /dev/null -+++ b/fs/aufs/vdir.c -@@ -0,0 +1,888 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * virtual or vertical directory -+ */ -+ -+#include "aufs.h" -+ -+static unsigned int calc_size(int nlen) -+{ -+ return ALIGN(sizeof(struct au_vdir_de) + nlen, sizeof(ino_t)); -+} -+ -+static int set_deblk_end(union au_vdir_deblk_p *p, -+ union au_vdir_deblk_p *deblk_end) -+{ -+ if (calc_size(0) <= deblk_end->deblk - p->deblk) { -+ p->de->de_str.len = 0; -+ /* smp_mb(); */ -+ return 0; -+ } -+ return -1; /* error */ -+} -+ -+/* returns true or false */ -+static int is_deblk_end(union au_vdir_deblk_p *p, -+ union au_vdir_deblk_p *deblk_end) -+{ -+ if (calc_size(0) <= deblk_end->deblk - p->deblk) -+ return !p->de->de_str.len; -+ return 1; -+} -+ -+static unsigned char *last_deblk(struct au_vdir *vdir) -+{ -+ return vdir->vd_deblk[vdir->vd_nblk - 1]; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* estimate the appropriate size for name hash table */ -+unsigned int au_rdhash_est(loff_t sz) -+{ -+ unsigned int n; -+ -+ n = UINT_MAX; -+ sz >>= 10; -+ if (sz < n) -+ n = sz; -+ if (sz < AUFS_RDHASH_DEF) -+ n = AUFS_RDHASH_DEF; -+ /* pr_info("n %u\n", n); */ -+ return n; -+} -+ -+/* -+ * the allocated memory has to be freed by -+ * au_nhash_wh_free() or au_nhash_de_free(). -+ */ -+int au_nhash_alloc(struct au_nhash *nhash, unsigned int num_hash, gfp_t gfp) -+{ -+ struct hlist_head *head; -+ unsigned int u; -+ size_t sz; -+ -+ sz = sizeof(*nhash->nh_head) * num_hash; -+ head = kmalloc(sz, gfp); -+ if (head) { -+ nhash->nh_num = num_hash; -+ nhash->nh_head = head; -+ for (u = 0; u < num_hash; u++) -+ INIT_HLIST_HEAD(head++); -+ return 0; /* success */ -+ } -+ -+ return -ENOMEM; -+} -+ -+static void nhash_count(struct hlist_head *head) -+{ -+#if 0 -+ unsigned long n; -+ struct hlist_node *pos; -+ -+ n = 0; -+ hlist_for_each(pos, head) -+ n++; -+ pr_info("%lu\n", n); -+#endif -+} -+ -+static void au_nhash_wh_do_free(struct hlist_head *head) -+{ -+ struct au_vdir_wh *pos; -+ struct hlist_node *node; -+ -+ hlist_for_each_entry_safe(pos, node, head, wh_hash) -+ kfree(pos); -+} -+ -+static void au_nhash_de_do_free(struct hlist_head *head) -+{ -+ struct au_vdir_dehstr *pos; -+ struct hlist_node *node; -+ -+ hlist_for_each_entry_safe(pos, node, head, hash) -+ au_cache_free_vdir_dehstr(pos); -+} -+ -+static void au_nhash_do_free(struct au_nhash *nhash, -+ void (*free)(struct hlist_head *head)) -+{ -+ unsigned int n; -+ struct hlist_head *head; -+ -+ n = nhash->nh_num; -+ if (!n) -+ return; -+ -+ head = nhash->nh_head; -+ while (n-- > 0) { -+ nhash_count(head); -+ free(head++); -+ } -+ kfree(nhash->nh_head); -+} -+ -+void au_nhash_wh_free(struct au_nhash *whlist) -+{ -+ au_nhash_do_free(whlist, au_nhash_wh_do_free); -+} -+ -+static void au_nhash_de_free(struct au_nhash *delist) -+{ -+ au_nhash_do_free(delist, au_nhash_de_do_free); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+int au_nhash_test_longer_wh(struct au_nhash *whlist, aufs_bindex_t btgt, -+ int limit) -+{ -+ int num; -+ unsigned int u, n; -+ struct hlist_head *head; -+ struct au_vdir_wh *pos; -+ -+ num = 0; -+ n = whlist->nh_num; -+ head = whlist->nh_head; -+ for (u = 0; u < n; u++, head++) -+ hlist_for_each_entry(pos, head, wh_hash) -+ if (pos->wh_bindex == btgt && ++num > limit) -+ return 1; -+ return 0; -+} -+ -+static struct hlist_head *au_name_hash(struct au_nhash *nhash, -+ unsigned char *name, -+ unsigned int len) -+{ -+ unsigned int v; -+ /* const unsigned int magic_bit = 12; */ -+ -+ AuDebugOn(!nhash->nh_num || !nhash->nh_head); -+ -+ v = 0; -+ while (len--) -+ v += *name++; -+ /* v = hash_long(v, magic_bit); */ -+ v %= nhash->nh_num; -+ return nhash->nh_head + v; -+} -+ -+static int au_nhash_test_name(struct au_vdir_destr *str, const char *name, -+ int nlen) -+{ -+ return str->len == nlen && !memcmp(str->name, name, nlen); -+} -+ -+/* returns found or not */ -+int au_nhash_test_known_wh(struct au_nhash *whlist, char *name, int nlen) -+{ -+ struct hlist_head *head; -+ struct au_vdir_wh *pos; -+ struct au_vdir_destr *str; -+ -+ head = au_name_hash(whlist, name, nlen); -+ hlist_for_each_entry(pos, head, wh_hash) { -+ str = &pos->wh_str; -+ AuDbg("%.*s\n", str->len, str->name); -+ if (au_nhash_test_name(str, name, nlen)) -+ return 1; -+ } -+ return 0; -+} -+ -+/* returns found(true) or not */ -+static int test_known(struct au_nhash *delist, char *name, int nlen) -+{ -+ struct hlist_head *head; -+ struct au_vdir_dehstr *pos; -+ struct au_vdir_destr *str; -+ -+ head = au_name_hash(delist, name, nlen); -+ hlist_for_each_entry(pos, head, hash) { -+ str = pos->str; -+ AuDbg("%.*s\n", str->len, str->name); -+ if (au_nhash_test_name(str, name, nlen)) -+ return 1; -+ } -+ return 0; -+} -+ -+static void au_shwh_init_wh(struct au_vdir_wh *wh, ino_t ino, -+ unsigned char d_type) -+{ -+#ifdef CONFIG_AUFS_SHWH -+ wh->wh_ino = ino; -+ wh->wh_type = d_type; -+#endif -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+int au_nhash_append_wh(struct au_nhash *whlist, char *name, int nlen, ino_t ino, -+ unsigned int d_type, aufs_bindex_t bindex, -+ unsigned char shwh) -+{ -+ int err; -+ struct au_vdir_destr *str; -+ struct au_vdir_wh *wh; -+ -+ AuDbg("%.*s\n", nlen, name); -+ AuDebugOn(!whlist->nh_num || !whlist->nh_head); -+ -+ err = -ENOMEM; -+ wh = kmalloc(sizeof(*wh) + nlen, GFP_NOFS); -+ if (unlikely(!wh)) -+ goto out; -+ -+ err = 0; -+ wh->wh_bindex = bindex; -+ if (shwh) -+ au_shwh_init_wh(wh, ino, d_type); -+ str = &wh->wh_str; -+ str->len = nlen; -+ memcpy(str->name, name, nlen); -+ hlist_add_head(&wh->wh_hash, au_name_hash(whlist, name, nlen)); -+ /* smp_mb(); */ -+ -+out: -+ return err; -+} -+ -+static int append_deblk(struct au_vdir *vdir) -+{ -+ int err; -+ unsigned long ul; -+ const unsigned int deblk_sz = vdir->vd_deblk_sz; -+ union au_vdir_deblk_p p, deblk_end; -+ unsigned char **o; -+ -+ err = -ENOMEM; -+ o = krealloc(vdir->vd_deblk, sizeof(*o) * (vdir->vd_nblk + 1), -+ GFP_NOFS); -+ if (unlikely(!o)) -+ goto out; -+ -+ vdir->vd_deblk = o; -+ p.deblk = kmalloc(deblk_sz, GFP_NOFS); -+ if (p.deblk) { -+ ul = vdir->vd_nblk++; -+ vdir->vd_deblk[ul] = p.deblk; -+ vdir->vd_last.ul = ul; -+ vdir->vd_last.p.deblk = p.deblk; -+ deblk_end.deblk = p.deblk + deblk_sz; -+ err = set_deblk_end(&p, &deblk_end); -+ } -+ -+out: -+ return err; -+} -+ -+static int append_de(struct au_vdir *vdir, char *name, int nlen, ino_t ino, -+ unsigned int d_type, struct au_nhash *delist) -+{ -+ int err; -+ unsigned int sz; -+ const unsigned int deblk_sz = vdir->vd_deblk_sz; -+ union au_vdir_deblk_p p, *room, deblk_end; -+ struct au_vdir_dehstr *dehstr; -+ -+ p.deblk = last_deblk(vdir); -+ deblk_end.deblk = p.deblk + deblk_sz; -+ room = &vdir->vd_last.p; -+ AuDebugOn(room->deblk < p.deblk || deblk_end.deblk <= room->deblk -+ || !is_deblk_end(room, &deblk_end)); -+ -+ sz = calc_size(nlen); -+ if (unlikely(sz > deblk_end.deblk - room->deblk)) { -+ err = append_deblk(vdir); -+ if (unlikely(err)) -+ goto out; -+ -+ p.deblk = last_deblk(vdir); -+ deblk_end.deblk = p.deblk + deblk_sz; -+ /* smp_mb(); */ -+ AuDebugOn(room->deblk != p.deblk); -+ } -+ -+ err = -ENOMEM; -+ dehstr = au_cache_alloc_vdir_dehstr(); -+ if (unlikely(!dehstr)) -+ goto out; -+ -+ dehstr->str = &room->de->de_str; -+ hlist_add_head(&dehstr->hash, au_name_hash(delist, name, nlen)); -+ room->de->de_ino = ino; -+ room->de->de_type = d_type; -+ room->de->de_str.len = nlen; -+ memcpy(room->de->de_str.name, name, nlen); -+ -+ err = 0; -+ room->deblk += sz; -+ if (unlikely(set_deblk_end(room, &deblk_end))) -+ err = append_deblk(vdir); -+ /* smp_mb(); */ -+ -+out: -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+void au_vdir_free(struct au_vdir *vdir) -+{ -+ unsigned char **deblk; -+ -+ deblk = vdir->vd_deblk; -+ while (vdir->vd_nblk--) -+ kfree(*deblk++); -+ kfree(vdir->vd_deblk); -+ au_cache_free_vdir(vdir); -+} -+ -+static struct au_vdir *alloc_vdir(struct file *file) -+{ -+ struct au_vdir *vdir; -+ struct super_block *sb; -+ int err; -+ -+ sb = file->f_dentry->d_sb; -+ SiMustAnyLock(sb); -+ -+ err = -ENOMEM; -+ vdir = au_cache_alloc_vdir(); -+ if (unlikely(!vdir)) -+ goto out; -+ -+ vdir->vd_deblk = kzalloc(sizeof(*vdir->vd_deblk), GFP_NOFS); -+ if (unlikely(!vdir->vd_deblk)) -+ goto out_free; -+ -+ vdir->vd_deblk_sz = au_sbi(sb)->si_rdblk; -+ if (!vdir->vd_deblk_sz) { -+ /* estimate the appropriate size for deblk */ -+ vdir->vd_deblk_sz = au_dir_size(file, /*dentry*/NULL); -+ /* pr_info("vd_deblk_sz %u\n", vdir->vd_deblk_sz); */ -+ } -+ vdir->vd_nblk = 0; -+ vdir->vd_version = 0; -+ vdir->vd_jiffy = 0; -+ err = append_deblk(vdir); -+ if (!err) -+ return vdir; /* success */ -+ -+ kfree(vdir->vd_deblk); -+ -+out_free: -+ au_cache_free_vdir(vdir); -+out: -+ vdir = ERR_PTR(err); -+ return vdir; -+} -+ -+static int reinit_vdir(struct au_vdir *vdir) -+{ -+ int err; -+ union au_vdir_deblk_p p, deblk_end; -+ -+ while (vdir->vd_nblk > 1) { -+ kfree(vdir->vd_deblk[vdir->vd_nblk - 1]); -+ /* vdir->vd_deblk[vdir->vd_nblk - 1] = NULL; */ -+ vdir->vd_nblk--; -+ } -+ p.deblk = vdir->vd_deblk[0]; -+ deblk_end.deblk = p.deblk + vdir->vd_deblk_sz; -+ err = set_deblk_end(&p, &deblk_end); -+ /* keep vd_dblk_sz */ -+ vdir->vd_last.ul = 0; -+ vdir->vd_last.p.deblk = vdir->vd_deblk[0]; -+ vdir->vd_version = 0; -+ vdir->vd_jiffy = 0; -+ /* smp_mb(); */ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+#define AuFillVdir_CALLED 1 -+#define AuFillVdir_WHABLE (1 << 1) -+#define AuFillVdir_SHWH (1 << 2) -+#define au_ftest_fillvdir(flags, name) ((flags) & AuFillVdir_##name) -+#define au_fset_fillvdir(flags, name) \ -+ do { (flags) |= AuFillVdir_##name; } while (0) -+#define au_fclr_fillvdir(flags, name) \ -+ do { (flags) &= ~AuFillVdir_##name; } while (0) -+ -+#ifndef CONFIG_AUFS_SHWH -+#undef AuFillVdir_SHWH -+#define AuFillVdir_SHWH 0 -+#endif -+ -+struct fillvdir_arg { -+ struct dir_context ctx; -+ struct file *file; -+ struct au_vdir *vdir; -+ struct au_nhash delist; -+ struct au_nhash whlist; -+ aufs_bindex_t bindex; -+ unsigned int flags; -+ int err; -+}; -+ -+static int fillvdir(struct dir_context *ctx, const char *__name, int nlen, -+ loff_t offset __maybe_unused, u64 h_ino, -+ unsigned int d_type) -+{ -+ struct fillvdir_arg *arg = container_of(ctx, struct fillvdir_arg, ctx); -+ char *name = (void *)__name; -+ struct super_block *sb; -+ ino_t ino; -+ const unsigned char shwh = !!au_ftest_fillvdir(arg->flags, SHWH); -+ -+ arg->err = 0; -+ sb = arg->file->f_dentry->d_sb; -+ au_fset_fillvdir(arg->flags, CALLED); -+ /* smp_mb(); */ -+ if (nlen <= AUFS_WH_PFX_LEN -+ || memcmp(name, AUFS_WH_PFX, AUFS_WH_PFX_LEN)) { -+ if (test_known(&arg->delist, name, nlen) -+ || au_nhash_test_known_wh(&arg->whlist, name, nlen)) -+ goto out; /* already exists or whiteouted */ -+ -+ arg->err = au_ino(sb, arg->bindex, h_ino, d_type, &ino); -+ if (!arg->err) { -+ if (unlikely(nlen > AUFS_MAX_NAMELEN)) -+ d_type = DT_UNKNOWN; -+ arg->err = append_de(arg->vdir, name, nlen, ino, -+ d_type, &arg->delist); -+ } -+ } else if (au_ftest_fillvdir(arg->flags, WHABLE)) { -+ name += AUFS_WH_PFX_LEN; -+ nlen -= AUFS_WH_PFX_LEN; -+ if (au_nhash_test_known_wh(&arg->whlist, name, nlen)) -+ goto out; /* already whiteouted */ -+ -+ if (shwh) -+ arg->err = au_wh_ino(sb, arg->bindex, h_ino, d_type, -+ &ino); -+ if (!arg->err) { -+ if (nlen <= AUFS_MAX_NAMELEN + AUFS_WH_PFX_LEN) -+ d_type = DT_UNKNOWN; -+ arg->err = au_nhash_append_wh -+ (&arg->whlist, name, nlen, ino, d_type, -+ arg->bindex, shwh); -+ } -+ } -+ -+out: -+ if (!arg->err) -+ arg->vdir->vd_jiffy = jiffies; -+ /* smp_mb(); */ -+ AuTraceErr(arg->err); -+ return arg->err; -+} -+ -+static int au_handle_shwh(struct super_block *sb, struct au_vdir *vdir, -+ struct au_nhash *whlist, struct au_nhash *delist) -+{ -+#ifdef CONFIG_AUFS_SHWH -+ int err; -+ unsigned int nh, u; -+ struct hlist_head *head; -+ struct au_vdir_wh *pos; -+ struct hlist_node *n; -+ char *p, *o; -+ struct au_vdir_destr *destr; -+ -+ AuDebugOn(!au_opt_test(au_mntflags(sb), SHWH)); -+ -+ err = -ENOMEM; -+ o = p = (void *)__get_free_page(GFP_NOFS); -+ if (unlikely(!p)) -+ goto out; -+ -+ err = 0; -+ nh = whlist->nh_num; -+ memcpy(p, AUFS_WH_PFX, AUFS_WH_PFX_LEN); -+ p += AUFS_WH_PFX_LEN; -+ for (u = 0; u < nh; u++) { -+ head = whlist->nh_head + u; -+ hlist_for_each_entry_safe(pos, n, head, wh_hash) { -+ destr = &pos->wh_str; -+ memcpy(p, destr->name, destr->len); -+ err = append_de(vdir, o, destr->len + AUFS_WH_PFX_LEN, -+ pos->wh_ino, pos->wh_type, delist); -+ if (unlikely(err)) -+ break; -+ } -+ } -+ -+ free_page((unsigned long)o); -+ -+out: -+ AuTraceErr(err); -+ return err; -+#else -+ return 0; -+#endif -+} -+ -+static int au_do_read_vdir(struct fillvdir_arg *arg) -+{ -+ int err; -+ unsigned int rdhash; -+ loff_t offset; -+ aufs_bindex_t bend, bindex, bstart; -+ unsigned char shwh; -+ struct file *hf, *file; -+ struct super_block *sb; -+ -+ file = arg->file; -+ sb = file->f_dentry->d_sb; -+ SiMustAnyLock(sb); -+ -+ rdhash = au_sbi(sb)->si_rdhash; -+ if (!rdhash) -+ rdhash = au_rdhash_est(au_dir_size(file, /*dentry*/NULL)); -+ err = au_nhash_alloc(&arg->delist, rdhash, GFP_NOFS); -+ if (unlikely(err)) -+ goto out; -+ err = au_nhash_alloc(&arg->whlist, rdhash, GFP_NOFS); -+ if (unlikely(err)) -+ goto out_delist; -+ -+ err = 0; -+ arg->flags = 0; -+ shwh = 0; -+ if (au_opt_test(au_mntflags(sb), SHWH)) { -+ shwh = 1; -+ au_fset_fillvdir(arg->flags, SHWH); -+ } -+ bstart = au_fbstart(file); -+ bend = au_fbend_dir(file); -+ for (bindex = bstart; !err && bindex <= bend; bindex++) { -+ hf = au_hf_dir(file, bindex); -+ if (!hf) -+ continue; -+ -+ offset = vfsub_llseek(hf, 0, SEEK_SET); -+ err = offset; -+ if (unlikely(offset)) -+ break; -+ -+ arg->bindex = bindex; -+ au_fclr_fillvdir(arg->flags, WHABLE); -+ if (shwh -+ || (bindex != bend -+ && au_br_whable(au_sbr_perm(sb, bindex)))) -+ au_fset_fillvdir(arg->flags, WHABLE); -+ do { -+ arg->err = 0; -+ au_fclr_fillvdir(arg->flags, CALLED); -+ /* smp_mb(); */ -+ err = vfsub_iterate_dir(hf, &arg->ctx); -+ if (err >= 0) -+ err = arg->err; -+ } while (!err && au_ftest_fillvdir(arg->flags, CALLED)); -+ -+ /* -+ * dir_relax() may be good for concurrency, but aufs should not -+ * use it since it will cause a lockdep problem. -+ */ -+ } -+ -+ if (!err && shwh) -+ err = au_handle_shwh(sb, arg->vdir, &arg->whlist, &arg->delist); -+ -+ au_nhash_wh_free(&arg->whlist); -+ -+out_delist: -+ au_nhash_de_free(&arg->delist); -+out: -+ return err; -+} -+ -+static int read_vdir(struct file *file, int may_read) -+{ -+ int err; -+ unsigned long expire; -+ unsigned char do_read; -+ struct fillvdir_arg arg = { -+ .ctx = { -+ .actor = au_diractor(fillvdir) -+ } -+ }; -+ struct inode *inode; -+ struct au_vdir *vdir, *allocated; -+ -+ err = 0; -+ inode = file_inode(file); -+ IMustLock(inode); -+ SiMustAnyLock(inode->i_sb); -+ -+ allocated = NULL; -+ do_read = 0; -+ expire = au_sbi(inode->i_sb)->si_rdcache; -+ vdir = au_ivdir(inode); -+ if (!vdir) { -+ do_read = 1; -+ vdir = alloc_vdir(file); -+ err = PTR_ERR(vdir); -+ if (IS_ERR(vdir)) -+ goto out; -+ err = 0; -+ allocated = vdir; -+ } else if (may_read -+ && (inode->i_version != vdir->vd_version -+ || time_after(jiffies, vdir->vd_jiffy + expire))) { -+ do_read = 1; -+ err = reinit_vdir(vdir); -+ if (unlikely(err)) -+ goto out; -+ } -+ -+ if (!do_read) -+ return 0; /* success */ -+ -+ arg.file = file; -+ arg.vdir = vdir; -+ err = au_do_read_vdir(&arg); -+ if (!err) { -+ /* file->f_pos = 0; */ /* todo: ctx->pos? */ -+ vdir->vd_version = inode->i_version; -+ vdir->vd_last.ul = 0; -+ vdir->vd_last.p.deblk = vdir->vd_deblk[0]; -+ if (allocated) -+ au_set_ivdir(inode, allocated); -+ } else if (allocated) -+ au_vdir_free(allocated); -+ -+out: -+ return err; -+} -+ -+static int copy_vdir(struct au_vdir *tgt, struct au_vdir *src) -+{ -+ int err, rerr; -+ unsigned long ul, n; -+ const unsigned int deblk_sz = src->vd_deblk_sz; -+ -+ AuDebugOn(tgt->vd_nblk != 1); -+ -+ err = -ENOMEM; -+ if (tgt->vd_nblk < src->vd_nblk) { -+ unsigned char **p; -+ -+ p = krealloc(tgt->vd_deblk, sizeof(*p) * src->vd_nblk, -+ GFP_NOFS); -+ if (unlikely(!p)) -+ goto out; -+ tgt->vd_deblk = p; -+ } -+ -+ if (tgt->vd_deblk_sz != deblk_sz) { -+ unsigned char *p; -+ -+ tgt->vd_deblk_sz = deblk_sz; -+ p = krealloc(tgt->vd_deblk[0], deblk_sz, GFP_NOFS); -+ if (unlikely(!p)) -+ goto out; -+ tgt->vd_deblk[0] = p; -+ } -+ memcpy(tgt->vd_deblk[0], src->vd_deblk[0], deblk_sz); -+ tgt->vd_version = src->vd_version; -+ tgt->vd_jiffy = src->vd_jiffy; -+ -+ n = src->vd_nblk; -+ for (ul = 1; ul < n; ul++) { -+ tgt->vd_deblk[ul] = kmemdup(src->vd_deblk[ul], deblk_sz, -+ GFP_NOFS); -+ if (unlikely(!tgt->vd_deblk[ul])) -+ goto out; -+ tgt->vd_nblk++; -+ } -+ tgt->vd_nblk = n; -+ tgt->vd_last.ul = tgt->vd_last.ul; -+ tgt->vd_last.p.deblk = tgt->vd_deblk[tgt->vd_last.ul]; -+ tgt->vd_last.p.deblk += src->vd_last.p.deblk -+ - src->vd_deblk[src->vd_last.ul]; -+ /* smp_mb(); */ -+ return 0; /* success */ -+ -+out: -+ rerr = reinit_vdir(tgt); -+ BUG_ON(rerr); -+ return err; -+} -+ -+int au_vdir_init(struct file *file) -+{ -+ int err; -+ struct inode *inode; -+ struct au_vdir *vdir_cache, *allocated; -+ -+ /* test file->f_pos here instead of ctx->pos */ -+ err = read_vdir(file, !file->f_pos); -+ if (unlikely(err)) -+ goto out; -+ -+ allocated = NULL; -+ vdir_cache = au_fvdir_cache(file); -+ if (!vdir_cache) { -+ vdir_cache = alloc_vdir(file); -+ err = PTR_ERR(vdir_cache); -+ if (IS_ERR(vdir_cache)) -+ goto out; -+ allocated = vdir_cache; -+ } else if (!file->f_pos && vdir_cache->vd_version != file->f_version) { -+ /* test file->f_pos here instead of ctx->pos */ -+ err = reinit_vdir(vdir_cache); -+ if (unlikely(err)) -+ goto out; -+ } else -+ return 0; /* success */ -+ -+ inode = file_inode(file); -+ err = copy_vdir(vdir_cache, au_ivdir(inode)); -+ if (!err) { -+ file->f_version = inode->i_version; -+ if (allocated) -+ au_set_fvdir_cache(file, allocated); -+ } else if (allocated) -+ au_vdir_free(allocated); -+ -+out: -+ return err; -+} -+ -+static loff_t calc_offset(struct au_vdir *vdir) -+{ -+ loff_t offset; -+ union au_vdir_deblk_p p; -+ -+ p.deblk = vdir->vd_deblk[vdir->vd_last.ul]; -+ offset = vdir->vd_last.p.deblk - p.deblk; -+ offset += vdir->vd_deblk_sz * vdir->vd_last.ul; -+ return offset; -+} -+ -+/* returns true or false */ -+static int seek_vdir(struct file *file, struct dir_context *ctx) -+{ -+ int valid; -+ unsigned int deblk_sz; -+ unsigned long ul, n; -+ loff_t offset; -+ union au_vdir_deblk_p p, deblk_end; -+ struct au_vdir *vdir_cache; -+ -+ valid = 1; -+ vdir_cache = au_fvdir_cache(file); -+ offset = calc_offset(vdir_cache); -+ AuDbg("offset %lld\n", offset); -+ if (ctx->pos == offset) -+ goto out; -+ -+ vdir_cache->vd_last.ul = 0; -+ vdir_cache->vd_last.p.deblk = vdir_cache->vd_deblk[0]; -+ if (!ctx->pos) -+ goto out; -+ -+ valid = 0; -+ deblk_sz = vdir_cache->vd_deblk_sz; -+ ul = div64_u64(ctx->pos, deblk_sz); -+ AuDbg("ul %lu\n", ul); -+ if (ul >= vdir_cache->vd_nblk) -+ goto out; -+ -+ n = vdir_cache->vd_nblk; -+ for (; ul < n; ul++) { -+ p.deblk = vdir_cache->vd_deblk[ul]; -+ deblk_end.deblk = p.deblk + deblk_sz; -+ offset = ul; -+ offset *= deblk_sz; -+ while (!is_deblk_end(&p, &deblk_end) && offset < ctx->pos) { -+ unsigned int l; -+ -+ l = calc_size(p.de->de_str.len); -+ offset += l; -+ p.deblk += l; -+ } -+ if (!is_deblk_end(&p, &deblk_end)) { -+ valid = 1; -+ vdir_cache->vd_last.ul = ul; -+ vdir_cache->vd_last.p = p; -+ break; -+ } -+ } -+ -+out: -+ /* smp_mb(); */ -+ AuTraceErr(!valid); -+ return valid; -+} -+ -+int au_vdir_fill_de(struct file *file, struct dir_context *ctx) -+{ -+ unsigned int l, deblk_sz; -+ union au_vdir_deblk_p deblk_end; -+ struct au_vdir *vdir_cache; -+ struct au_vdir_de *de; -+ -+ vdir_cache = au_fvdir_cache(file); -+ if (!seek_vdir(file, ctx)) -+ return 0; -+ -+ deblk_sz = vdir_cache->vd_deblk_sz; -+ while (1) { -+ deblk_end.deblk = vdir_cache->vd_deblk[vdir_cache->vd_last.ul]; -+ deblk_end.deblk += deblk_sz; -+ while (!is_deblk_end(&vdir_cache->vd_last.p, &deblk_end)) { -+ de = vdir_cache->vd_last.p.de; -+ AuDbg("%.*s, off%lld, i%lu, dt%d\n", -+ de->de_str.len, de->de_str.name, ctx->pos, -+ (unsigned long)de->de_ino, de->de_type); -+ if (unlikely(!dir_emit(ctx, de->de_str.name, -+ de->de_str.len, de->de_ino, -+ de->de_type))) { -+ /* todo: ignore the error caused by udba? */ -+ /* return err; */ -+ return 0; -+ } -+ -+ l = calc_size(de->de_str.len); -+ vdir_cache->vd_last.p.deblk += l; -+ ctx->pos += l; -+ } -+ if (vdir_cache->vd_last.ul < vdir_cache->vd_nblk - 1) { -+ vdir_cache->vd_last.ul++; -+ vdir_cache->vd_last.p.deblk -+ = vdir_cache->vd_deblk[vdir_cache->vd_last.ul]; -+ ctx->pos = deblk_sz * vdir_cache->vd_last.ul; -+ continue; -+ } -+ break; -+ } -+ -+ /* smp_mb(); */ -+ return 0; -+} -diff --git a/fs/aufs/vfsub.c b/fs/aufs/vfsub.c -new file mode 100644 -index 0000000..5fd008c ---- /dev/null -+++ b/fs/aufs/vfsub.c -@@ -0,0 +1,864 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * sub-routines for VFS -+ */ -+ -+#include -+#include -+#include -+#include -+#include "../fs/mount.h" -+#include "aufs.h" -+ -+#ifdef CONFIG_AUFS_BR_FUSE -+int vfsub_test_mntns(struct vfsmount *mnt, struct super_block *h_sb) -+{ -+ struct nsproxy *ns; -+ -+ if (!au_test_fuse(h_sb) || !au_userns) -+ return 0; -+ -+ ns = current->nsproxy; -+ /* no {get,put}_nsproxy(ns) */ -+ return real_mount(mnt)->mnt_ns == ns->mnt_ns ? 0 : -EACCES; -+} -+#endif -+ -+/* ---------------------------------------------------------------------- */ -+ -+int vfsub_update_h_iattr(struct path *h_path, int *did) -+{ -+ int err; -+ struct kstat st; -+ struct super_block *h_sb; -+ -+ /* for remote fs, leave work for its getattr or d_revalidate */ -+ /* for bad i_attr fs, handle them in aufs_getattr() */ -+ /* still some fs may acquire i_mutex. we need to skip them */ -+ err = 0; -+ if (!did) -+ did = &err; -+ h_sb = h_path->dentry->d_sb; -+ *did = (!au_test_fs_remote(h_sb) && au_test_fs_refresh_iattr(h_sb)); -+ if (*did) -+ err = vfs_getattr(h_path, &st); -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+struct file *vfsub_dentry_open(struct path *path, int flags) -+{ -+ struct file *file; -+ -+ file = dentry_open(path, flags /* | __FMODE_NONOTIFY */, -+ current_cred()); -+ if (!IS_ERR_OR_NULL(file) -+ && (file->f_mode & (FMODE_READ | FMODE_WRITE)) == FMODE_READ) -+ i_readcount_inc(path->dentry->d_inode); -+ -+ return file; -+} -+ -+struct file *vfsub_filp_open(const char *path, int oflags, int mode) -+{ -+ struct file *file; -+ -+ lockdep_off(); -+ file = filp_open(path, -+ oflags /* | __FMODE_NONOTIFY */, -+ mode); -+ lockdep_on(); -+ if (IS_ERR(file)) -+ goto out; -+ vfsub_update_h_iattr(&file->f_path, /*did*/NULL); /*ignore*/ -+ -+out: -+ return file; -+} -+ -+/* -+ * Ideally this function should call VFS:do_last() in order to keep all its -+ * checkings. But it is very hard for aufs to regenerate several VFS internal -+ * structure such as nameidata. This is a second (or third) best approach. -+ * cf. linux/fs/namei.c:do_last(), lookup_open() and atomic_open(). -+ */ -+int vfsub_atomic_open(struct inode *dir, struct dentry *dentry, -+ struct vfsub_aopen_args *args, struct au_branch *br) -+{ -+ int err; -+ struct file *file = args->file; -+ /* copied from linux/fs/namei.c:atomic_open() */ -+ struct dentry *const DENTRY_NOT_SET = (void *)-1UL; -+ -+ IMustLock(dir); -+ AuDebugOn(!dir->i_op->atomic_open); -+ -+ err = au_br_test_oflag(args->open_flag, br); -+ if (unlikely(err)) -+ goto out; -+ -+ args->file->f_path.dentry = DENTRY_NOT_SET; -+ args->file->f_path.mnt = au_br_mnt(br); -+ err = dir->i_op->atomic_open(dir, dentry, file, args->open_flag, -+ args->create_mode, args->opened); -+ if (err >= 0) { -+ /* some filesystems don't set FILE_CREATED while succeeded? */ -+ if (*args->opened & FILE_CREATED) -+ fsnotify_create(dir, dentry); -+ } else -+ goto out; -+ -+ -+ if (!err) { -+ /* todo: call VFS:may_open() here */ -+ err = open_check_o_direct(file); -+ /* todo: ima_file_check() too? */ -+ if (!err && (args->open_flag & __FMODE_EXEC)) -+ err = deny_write_access(file); -+ if (unlikely(err)) -+ /* note that the file is created and still opened */ -+ goto out; -+ } -+ -+ atomic_inc(&br->br_count); -+ fsnotify_open(file); -+ -+out: -+ return err; -+} -+ -+int vfsub_kern_path(const char *name, unsigned int flags, struct path *path) -+{ -+ int err; -+ -+ err = kern_path(name, flags, path); -+ if (!err && path->dentry->d_inode) -+ vfsub_update_h_iattr(path, /*did*/NULL); /*ignore*/ -+ return err; -+} -+ -+struct dentry *vfsub_lookup_one_len(const char *name, struct dentry *parent, -+ int len) -+{ -+ struct path path = { -+ .mnt = NULL -+ }; -+ -+ /* VFS checks it too, but by WARN_ON_ONCE() */ -+ IMustLock(parent->d_inode); -+ -+ path.dentry = lookup_one_len(name, parent, len); -+ if (IS_ERR(path.dentry)) -+ goto out; -+ if (path.dentry->d_inode) -+ vfsub_update_h_iattr(&path, /*did*/NULL); /*ignore*/ -+ -+out: -+ AuTraceErrPtr(path.dentry); -+ return path.dentry; -+} -+ -+void vfsub_call_lkup_one(void *args) -+{ -+ struct vfsub_lkup_one_args *a = args; -+ *a->errp = vfsub_lkup_one(a->name, a->parent); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+struct dentry *vfsub_lock_rename(struct dentry *d1, struct au_hinode *hdir1, -+ struct dentry *d2, struct au_hinode *hdir2) -+{ -+ struct dentry *d; -+ -+ lockdep_off(); -+ d = lock_rename(d1, d2); -+ lockdep_on(); -+ au_hn_suspend(hdir1); -+ if (hdir1 != hdir2) -+ au_hn_suspend(hdir2); -+ -+ return d; -+} -+ -+void vfsub_unlock_rename(struct dentry *d1, struct au_hinode *hdir1, -+ struct dentry *d2, struct au_hinode *hdir2) -+{ -+ au_hn_resume(hdir1); -+ if (hdir1 != hdir2) -+ au_hn_resume(hdir2); -+ lockdep_off(); -+ unlock_rename(d1, d2); -+ lockdep_on(); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+int vfsub_create(struct inode *dir, struct path *path, int mode, bool want_excl) -+{ -+ int err; -+ struct dentry *d; -+ -+ IMustLock(dir); -+ -+ d = path->dentry; -+ path->dentry = d->d_parent; -+ err = security_path_mknod(path, d, mode, 0); -+ path->dentry = d; -+ if (unlikely(err)) -+ goto out; -+ -+ lockdep_off(); -+ err = vfs_create(dir, path->dentry, mode, want_excl); -+ lockdep_on(); -+ if (!err) { -+ struct path tmp = *path; -+ int did; -+ -+ vfsub_update_h_iattr(&tmp, &did); -+ if (did) { -+ tmp.dentry = path->dentry->d_parent; -+ vfsub_update_h_iattr(&tmp, /*did*/NULL); -+ } -+ /*ignore*/ -+ } -+ -+out: -+ return err; -+} -+ -+int vfsub_symlink(struct inode *dir, struct path *path, const char *symname) -+{ -+ int err; -+ struct dentry *d; -+ -+ IMustLock(dir); -+ -+ d = path->dentry; -+ path->dentry = d->d_parent; -+ err = security_path_symlink(path, d, symname); -+ path->dentry = d; -+ if (unlikely(err)) -+ goto out; -+ -+ lockdep_off(); -+ err = vfs_symlink(dir, path->dentry, symname); -+ lockdep_on(); -+ if (!err) { -+ struct path tmp = *path; -+ int did; -+ -+ vfsub_update_h_iattr(&tmp, &did); -+ if (did) { -+ tmp.dentry = path->dentry->d_parent; -+ vfsub_update_h_iattr(&tmp, /*did*/NULL); -+ } -+ /*ignore*/ -+ } -+ -+out: -+ return err; -+} -+ -+int vfsub_mknod(struct inode *dir, struct path *path, int mode, dev_t dev) -+{ -+ int err; -+ struct dentry *d; -+ -+ IMustLock(dir); -+ -+ d = path->dentry; -+ path->dentry = d->d_parent; -+ err = security_path_mknod(path, d, mode, new_encode_dev(dev)); -+ path->dentry = d; -+ if (unlikely(err)) -+ goto out; -+ -+ lockdep_off(); -+ err = vfs_mknod(dir, path->dentry, mode, dev); -+ lockdep_on(); -+ if (!err) { -+ struct path tmp = *path; -+ int did; -+ -+ vfsub_update_h_iattr(&tmp, &did); -+ if (did) { -+ tmp.dentry = path->dentry->d_parent; -+ vfsub_update_h_iattr(&tmp, /*did*/NULL); -+ } -+ /*ignore*/ -+ } -+ -+out: -+ return err; -+} -+ -+static int au_test_nlink(struct inode *inode) -+{ -+ const unsigned int link_max = UINT_MAX >> 1; /* rough margin */ -+ -+ if (!au_test_fs_no_limit_nlink(inode->i_sb) -+ || inode->i_nlink < link_max) -+ return 0; -+ return -EMLINK; -+} -+ -+int vfsub_link(struct dentry *src_dentry, struct inode *dir, struct path *path, -+ struct inode **delegated_inode) -+{ -+ int err; -+ struct dentry *d; -+ -+ IMustLock(dir); -+ -+ err = au_test_nlink(src_dentry->d_inode); -+ if (unlikely(err)) -+ return err; -+ -+ /* we don't call may_linkat() */ -+ d = path->dentry; -+ path->dentry = d->d_parent; -+ err = security_path_link(src_dentry, path, d); -+ path->dentry = d; -+ if (unlikely(err)) -+ goto out; -+ -+ lockdep_off(); -+ err = vfs_link(src_dentry, dir, path->dentry, delegated_inode); -+ lockdep_on(); -+ if (!err) { -+ struct path tmp = *path; -+ int did; -+ -+ /* fuse has different memory inode for the same inumber */ -+ vfsub_update_h_iattr(&tmp, &did); -+ if (did) { -+ tmp.dentry = path->dentry->d_parent; -+ vfsub_update_h_iattr(&tmp, /*did*/NULL); -+ tmp.dentry = src_dentry; -+ vfsub_update_h_iattr(&tmp, /*did*/NULL); -+ } -+ /*ignore*/ -+ } -+ -+out: -+ return err; -+} -+ -+int vfsub_rename(struct inode *src_dir, struct dentry *src_dentry, -+ struct inode *dir, struct path *path, -+ struct inode **delegated_inode) -+{ -+ int err; -+ struct path tmp = { -+ .mnt = path->mnt -+ }; -+ struct dentry *d; -+ -+ IMustLock(dir); -+ IMustLock(src_dir); -+ -+ d = path->dentry; -+ path->dentry = d->d_parent; -+ tmp.dentry = src_dentry->d_parent; -+ err = security_path_rename(&tmp, src_dentry, path, d, /*flags*/0); -+ path->dentry = d; -+ if (unlikely(err)) -+ goto out; -+ -+ lockdep_off(); -+ err = vfs_rename(src_dir, src_dentry, dir, path->dentry, -+ delegated_inode, /*flags*/0); -+ lockdep_on(); -+ if (!err) { -+ int did; -+ -+ tmp.dentry = d->d_parent; -+ vfsub_update_h_iattr(&tmp, &did); -+ if (did) { -+ tmp.dentry = src_dentry; -+ vfsub_update_h_iattr(&tmp, /*did*/NULL); -+ tmp.dentry = src_dentry->d_parent; -+ vfsub_update_h_iattr(&tmp, /*did*/NULL); -+ } -+ /*ignore*/ -+ } -+ -+out: -+ return err; -+} -+ -+int vfsub_mkdir(struct inode *dir, struct path *path, int mode) -+{ -+ int err; -+ struct dentry *d; -+ -+ IMustLock(dir); -+ -+ d = path->dentry; -+ path->dentry = d->d_parent; -+ err = security_path_mkdir(path, d, mode); -+ path->dentry = d; -+ if (unlikely(err)) -+ goto out; -+ -+ lockdep_off(); -+ err = vfs_mkdir(dir, path->dentry, mode); -+ lockdep_on(); -+ if (!err) { -+ struct path tmp = *path; -+ int did; -+ -+ vfsub_update_h_iattr(&tmp, &did); -+ if (did) { -+ tmp.dentry = path->dentry->d_parent; -+ vfsub_update_h_iattr(&tmp, /*did*/NULL); -+ } -+ /*ignore*/ -+ } -+ -+out: -+ return err; -+} -+ -+int vfsub_rmdir(struct inode *dir, struct path *path) -+{ -+ int err; -+ struct dentry *d; -+ -+ IMustLock(dir); -+ -+ d = path->dentry; -+ path->dentry = d->d_parent; -+ err = security_path_rmdir(path, d); -+ path->dentry = d; -+ if (unlikely(err)) -+ goto out; -+ -+ lockdep_off(); -+ err = vfs_rmdir(dir, path->dentry); -+ lockdep_on(); -+ if (!err) { -+ struct path tmp = { -+ .dentry = path->dentry->d_parent, -+ .mnt = path->mnt -+ }; -+ -+ vfsub_update_h_iattr(&tmp, /*did*/NULL); /*ignore*/ -+ } -+ -+out: -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* todo: support mmap_sem? */ -+ssize_t vfsub_read_u(struct file *file, char __user *ubuf, size_t count, -+ loff_t *ppos) -+{ -+ ssize_t err; -+ -+ lockdep_off(); -+ err = vfs_read(file, ubuf, count, ppos); -+ lockdep_on(); -+ if (err >= 0) -+ vfsub_update_h_iattr(&file->f_path, /*did*/NULL); /*ignore*/ -+ return err; -+} -+ -+/* todo: kernel_read()? */ -+ssize_t vfsub_read_k(struct file *file, void *kbuf, size_t count, -+ loff_t *ppos) -+{ -+ ssize_t err; -+ mm_segment_t oldfs; -+ union { -+ void *k; -+ char __user *u; -+ } buf; -+ -+ buf.k = kbuf; -+ oldfs = get_fs(); -+ set_fs(KERNEL_DS); -+ err = vfsub_read_u(file, buf.u, count, ppos); -+ set_fs(oldfs); -+ return err; -+} -+ -+ssize_t vfsub_write_u(struct file *file, const char __user *ubuf, size_t count, -+ loff_t *ppos) -+{ -+ ssize_t err; -+ -+ lockdep_off(); -+ err = vfs_write(file, ubuf, count, ppos); -+ lockdep_on(); -+ if (err >= 0) -+ vfsub_update_h_iattr(&file->f_path, /*did*/NULL); /*ignore*/ -+ return err; -+} -+ -+ssize_t vfsub_write_k(struct file *file, void *kbuf, size_t count, loff_t *ppos) -+{ -+ ssize_t err; -+ mm_segment_t oldfs; -+ union { -+ void *k; -+ const char __user *u; -+ } buf; -+ -+ buf.k = kbuf; -+ oldfs = get_fs(); -+ set_fs(KERNEL_DS); -+ err = vfsub_write_u(file, buf.u, count, ppos); -+ set_fs(oldfs); -+ return err; -+} -+ -+int vfsub_flush(struct file *file, fl_owner_t id) -+{ -+ int err; -+ -+ err = 0; -+ if (file->f_op->flush) { -+ if (!au_test_nfs(file->f_dentry->d_sb)) -+ err = file->f_op->flush(file, id); -+ else { -+ lockdep_off(); -+ err = file->f_op->flush(file, id); -+ lockdep_on(); -+ } -+ if (!err) -+ vfsub_update_h_iattr(&file->f_path, /*did*/NULL); -+ /*ignore*/ -+ } -+ return err; -+} -+ -+int vfsub_iterate_dir(struct file *file, struct dir_context *ctx) -+{ -+ int err; -+ -+ AuDbg("%pD, ctx{%pf, %llu}\n", file, ctx->actor, ctx->pos); -+ -+ lockdep_off(); -+ err = iterate_dir(file, ctx); -+ lockdep_on(); -+ if (err >= 0) -+ vfsub_update_h_iattr(&file->f_path, /*did*/NULL); /*ignore*/ -+ return err; -+} -+ -+long vfsub_splice_to(struct file *in, loff_t *ppos, -+ struct pipe_inode_info *pipe, size_t len, -+ unsigned int flags) -+{ -+ long err; -+ -+ lockdep_off(); -+ err = do_splice_to(in, ppos, pipe, len, flags); -+ lockdep_on(); -+ file_accessed(in); -+ if (err >= 0) -+ vfsub_update_h_iattr(&in->f_path, /*did*/NULL); /*ignore*/ -+ return err; -+} -+ -+long vfsub_splice_from(struct pipe_inode_info *pipe, struct file *out, -+ loff_t *ppos, size_t len, unsigned int flags) -+{ -+ long err; -+ -+ lockdep_off(); -+ err = do_splice_from(pipe, out, ppos, len, flags); -+ lockdep_on(); -+ if (err >= 0) -+ vfsub_update_h_iattr(&out->f_path, /*did*/NULL); /*ignore*/ -+ return err; -+} -+ -+int vfsub_fsync(struct file *file, struct path *path, int datasync) -+{ -+ int err; -+ -+ /* file can be NULL */ -+ lockdep_off(); -+ err = vfs_fsync(file, datasync); -+ lockdep_on(); -+ if (!err) { -+ if (!path) { -+ AuDebugOn(!file); -+ path = &file->f_path; -+ } -+ vfsub_update_h_iattr(path, /*did*/NULL); /*ignore*/ -+ } -+ return err; -+} -+ -+/* cf. open.c:do_sys_truncate() and do_sys_ftruncate() */ -+int vfsub_trunc(struct path *h_path, loff_t length, unsigned int attr, -+ struct file *h_file) -+{ -+ int err; -+ struct inode *h_inode; -+ struct super_block *h_sb; -+ -+ if (!h_file) { -+ err = vfsub_truncate(h_path, length); -+ goto out; -+ } -+ -+ h_inode = h_path->dentry->d_inode; -+ h_sb = h_inode->i_sb; -+ lockdep_off(); -+ sb_start_write(h_sb); -+ lockdep_on(); -+ err = locks_verify_truncate(h_inode, h_file, length); -+ if (!err) -+ err = security_path_truncate(h_path); -+ if (!err) { -+ lockdep_off(); -+ err = do_truncate(h_path->dentry, length, attr, h_file); -+ lockdep_on(); -+ } -+ lockdep_off(); -+ sb_end_write(h_sb); -+ lockdep_on(); -+ -+out: -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+struct au_vfsub_mkdir_args { -+ int *errp; -+ struct inode *dir; -+ struct path *path; -+ int mode; -+}; -+ -+static void au_call_vfsub_mkdir(void *args) -+{ -+ struct au_vfsub_mkdir_args *a = args; -+ *a->errp = vfsub_mkdir(a->dir, a->path, a->mode); -+} -+ -+int vfsub_sio_mkdir(struct inode *dir, struct path *path, int mode) -+{ -+ int err, do_sio, wkq_err; -+ -+ do_sio = au_test_h_perm_sio(dir, MAY_EXEC | MAY_WRITE); -+ if (!do_sio) { -+ lockdep_off(); -+ err = vfsub_mkdir(dir, path, mode); -+ lockdep_on(); -+ } else { -+ struct au_vfsub_mkdir_args args = { -+ .errp = &err, -+ .dir = dir, -+ .path = path, -+ .mode = mode -+ }; -+ wkq_err = au_wkq_wait(au_call_vfsub_mkdir, &args); -+ if (unlikely(wkq_err)) -+ err = wkq_err; -+ } -+ -+ return err; -+} -+ -+struct au_vfsub_rmdir_args { -+ int *errp; -+ struct inode *dir; -+ struct path *path; -+}; -+ -+static void au_call_vfsub_rmdir(void *args) -+{ -+ struct au_vfsub_rmdir_args *a = args; -+ *a->errp = vfsub_rmdir(a->dir, a->path); -+} -+ -+int vfsub_sio_rmdir(struct inode *dir, struct path *path) -+{ -+ int err, do_sio, wkq_err; -+ -+ do_sio = au_test_h_perm_sio(dir, MAY_EXEC | MAY_WRITE); -+ if (!do_sio) { -+ lockdep_off(); -+ err = vfsub_rmdir(dir, path); -+ lockdep_on(); -+ } else { -+ struct au_vfsub_rmdir_args args = { -+ .errp = &err, -+ .dir = dir, -+ .path = path -+ }; -+ wkq_err = au_wkq_wait(au_call_vfsub_rmdir, &args); -+ if (unlikely(wkq_err)) -+ err = wkq_err; -+ } -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+struct notify_change_args { -+ int *errp; -+ struct path *path; -+ struct iattr *ia; -+ struct inode **delegated_inode; -+}; -+ -+static void call_notify_change(void *args) -+{ -+ struct notify_change_args *a = args; -+ struct inode *h_inode; -+ -+ h_inode = a->path->dentry->d_inode; -+ IMustLock(h_inode); -+ -+ *a->errp = -EPERM; -+ if (!IS_IMMUTABLE(h_inode) && !IS_APPEND(h_inode)) { -+ lockdep_off(); -+ *a->errp = notify_change(a->path->dentry, a->ia, -+ a->delegated_inode); -+ lockdep_on(); -+ if (!*a->errp) -+ vfsub_update_h_iattr(a->path, /*did*/NULL); /*ignore*/ -+ } -+ AuTraceErr(*a->errp); -+} -+ -+int vfsub_notify_change(struct path *path, struct iattr *ia, -+ struct inode **delegated_inode) -+{ -+ int err; -+ struct notify_change_args args = { -+ .errp = &err, -+ .path = path, -+ .ia = ia, -+ .delegated_inode = delegated_inode -+ }; -+ -+ call_notify_change(&args); -+ -+ return err; -+} -+ -+int vfsub_sio_notify_change(struct path *path, struct iattr *ia, -+ struct inode **delegated_inode) -+{ -+ int err, wkq_err; -+ struct notify_change_args args = { -+ .errp = &err, -+ .path = path, -+ .ia = ia, -+ .delegated_inode = delegated_inode -+ }; -+ -+ wkq_err = au_wkq_wait(call_notify_change, &args); -+ if (unlikely(wkq_err)) -+ err = wkq_err; -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+struct unlink_args { -+ int *errp; -+ struct inode *dir; -+ struct path *path; -+ struct inode **delegated_inode; -+}; -+ -+static void call_unlink(void *args) -+{ -+ struct unlink_args *a = args; -+ struct dentry *d = a->path->dentry; -+ struct inode *h_inode; -+ const int stop_sillyrename = (au_test_nfs(d->d_sb) -+ && au_dcount(d) == 1); -+ -+ IMustLock(a->dir); -+ -+ a->path->dentry = d->d_parent; -+ *a->errp = security_path_unlink(a->path, d); -+ a->path->dentry = d; -+ if (unlikely(*a->errp)) -+ return; -+ -+ if (!stop_sillyrename) -+ dget(d); -+ h_inode = d->d_inode; -+ if (h_inode) -+ ihold(h_inode); -+ -+ lockdep_off(); -+ *a->errp = vfs_unlink(a->dir, d, a->delegated_inode); -+ lockdep_on(); -+ if (!*a->errp) { -+ struct path tmp = { -+ .dentry = d->d_parent, -+ .mnt = a->path->mnt -+ }; -+ vfsub_update_h_iattr(&tmp, /*did*/NULL); /*ignore*/ -+ } -+ -+ if (!stop_sillyrename) -+ dput(d); -+ if (h_inode) -+ iput(h_inode); -+ -+ AuTraceErr(*a->errp); -+} -+ -+/* -+ * @dir: must be locked. -+ * @dentry: target dentry. -+ */ -+int vfsub_unlink(struct inode *dir, struct path *path, -+ struct inode **delegated_inode, int force) -+{ -+ int err; -+ struct unlink_args args = { -+ .errp = &err, -+ .dir = dir, -+ .path = path, -+ .delegated_inode = delegated_inode -+ }; -+ -+ if (!force) -+ call_unlink(&args); -+ else { -+ int wkq_err; -+ -+ wkq_err = au_wkq_wait(call_unlink, &args); -+ if (unlikely(wkq_err)) -+ err = wkq_err; -+ } -+ -+ return err; -+} -diff --git a/fs/aufs/vfsub.h b/fs/aufs/vfsub.h -new file mode 100644 -index 0000000..2c33298 ---- /dev/null -+++ b/fs/aufs/vfsub.h -@@ -0,0 +1,315 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * sub-routines for VFS -+ */ -+ -+#ifndef __AUFS_VFSUB_H__ -+#define __AUFS_VFSUB_H__ -+ -+#ifdef __KERNEL__ -+ -+#include -+#include -+#include -+#include -+#include "debug.h" -+ -+/* copied from linux/fs/internal.h */ -+/* todo: BAD approach!! */ -+extern void __mnt_drop_write(struct vfsmount *); -+extern spinlock_t inode_sb_list_lock; -+extern int open_check_o_direct(struct file *f); -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* lock subclass for lower inode */ -+/* default MAX_LOCKDEP_SUBCLASSES(8) is not enough */ -+/* reduce? gave up. */ -+enum { -+ AuLsc_I_Begin = I_MUTEX_PARENT2, /* 5 */ -+ AuLsc_I_PARENT, /* lower inode, parent first */ -+ AuLsc_I_PARENT2, /* copyup dirs */ -+ AuLsc_I_PARENT3, /* copyup wh */ -+ AuLsc_I_CHILD, -+ AuLsc_I_CHILD2, -+ AuLsc_I_End -+}; -+ -+/* to debug easier, do not make them inlined functions */ -+#define MtxMustLock(mtx) AuDebugOn(!mutex_is_locked(mtx)) -+#define IMustLock(i) MtxMustLock(&(i)->i_mutex) -+ -+/* ---------------------------------------------------------------------- */ -+ -+static inline void vfsub_drop_nlink(struct inode *inode) -+{ -+ AuDebugOn(!inode->i_nlink); -+ drop_nlink(inode); -+} -+ -+static inline void vfsub_dead_dir(struct inode *inode) -+{ -+ AuDebugOn(!S_ISDIR(inode->i_mode)); -+ inode->i_flags |= S_DEAD; -+ clear_nlink(inode); -+} -+ -+static inline int vfsub_native_ro(struct inode *inode) -+{ -+ return (inode->i_sb->s_flags & MS_RDONLY) -+ || IS_RDONLY(inode) -+ /* || IS_APPEND(inode) */ -+ || IS_IMMUTABLE(inode); -+} -+ -+#ifdef CONFIG_AUFS_BR_FUSE -+int vfsub_test_mntns(struct vfsmount *mnt, struct super_block *h_sb); -+#else -+AuStubInt0(vfsub_test_mntns, struct vfsmount *mnt, struct super_block *h_sb); -+#endif -+ -+/* ---------------------------------------------------------------------- */ -+ -+int vfsub_update_h_iattr(struct path *h_path, int *did); -+struct file *vfsub_dentry_open(struct path *path, int flags); -+struct file *vfsub_filp_open(const char *path, int oflags, int mode); -+struct vfsub_aopen_args { -+ struct file *file; -+ unsigned int open_flag; -+ umode_t create_mode; -+ int *opened; -+}; -+struct au_branch; -+int vfsub_atomic_open(struct inode *dir, struct dentry *dentry, -+ struct vfsub_aopen_args *args, struct au_branch *br); -+int vfsub_kern_path(const char *name, unsigned int flags, struct path *path); -+ -+struct dentry *vfsub_lookup_one_len(const char *name, struct dentry *parent, -+ int len); -+ -+struct vfsub_lkup_one_args { -+ struct dentry **errp; -+ struct qstr *name; -+ struct dentry *parent; -+}; -+ -+static inline struct dentry *vfsub_lkup_one(struct qstr *name, -+ struct dentry *parent) -+{ -+ return vfsub_lookup_one_len(name->name, parent, name->len); -+} -+ -+void vfsub_call_lkup_one(void *args); -+ -+/* ---------------------------------------------------------------------- */ -+ -+static inline int vfsub_mnt_want_write(struct vfsmount *mnt) -+{ -+ int err; -+ -+ lockdep_off(); -+ err = mnt_want_write(mnt); -+ lockdep_on(); -+ return err; -+} -+ -+static inline void vfsub_mnt_drop_write(struct vfsmount *mnt) -+{ -+ lockdep_off(); -+ mnt_drop_write(mnt); -+ lockdep_on(); -+} -+ -+#if 0 /* reserved */ -+static inline void vfsub_mnt_drop_write_file(struct file *file) -+{ -+ lockdep_off(); -+ mnt_drop_write_file(file); -+ lockdep_on(); -+} -+#endif -+ -+/* ---------------------------------------------------------------------- */ -+ -+struct au_hinode; -+struct dentry *vfsub_lock_rename(struct dentry *d1, struct au_hinode *hdir1, -+ struct dentry *d2, struct au_hinode *hdir2); -+void vfsub_unlock_rename(struct dentry *d1, struct au_hinode *hdir1, -+ struct dentry *d2, struct au_hinode *hdir2); -+ -+int vfsub_create(struct inode *dir, struct path *path, int mode, -+ bool want_excl); -+int vfsub_symlink(struct inode *dir, struct path *path, -+ const char *symname); -+int vfsub_mknod(struct inode *dir, struct path *path, int mode, dev_t dev); -+int vfsub_link(struct dentry *src_dentry, struct inode *dir, -+ struct path *path, struct inode **delegated_inode); -+int vfsub_rename(struct inode *src_hdir, struct dentry *src_dentry, -+ struct inode *hdir, struct path *path, -+ struct inode **delegated_inode); -+int vfsub_mkdir(struct inode *dir, struct path *path, int mode); -+int vfsub_rmdir(struct inode *dir, struct path *path); -+ -+/* ---------------------------------------------------------------------- */ -+ -+ssize_t vfsub_read_u(struct file *file, char __user *ubuf, size_t count, -+ loff_t *ppos); -+ssize_t vfsub_read_k(struct file *file, void *kbuf, size_t count, -+ loff_t *ppos); -+ssize_t vfsub_write_u(struct file *file, const char __user *ubuf, size_t count, -+ loff_t *ppos); -+ssize_t vfsub_write_k(struct file *file, void *kbuf, size_t count, -+ loff_t *ppos); -+int vfsub_flush(struct file *file, fl_owner_t id); -+int vfsub_iterate_dir(struct file *file, struct dir_context *ctx); -+ -+/* just for type-check */ -+static inline filldir_t au_diractor(int (*func)(struct dir_context *, -+ const char *, int, loff_t, u64, -+ unsigned)) -+{ -+ return (filldir_t)func; -+} -+ -+static inline loff_t vfsub_f_size_read(struct file *file) -+{ -+ return i_size_read(file_inode(file)); -+} -+ -+static inline unsigned int vfsub_file_flags(struct file *file) -+{ -+ unsigned int flags; -+ -+ spin_lock(&file->f_lock); -+ flags = file->f_flags; -+ spin_unlock(&file->f_lock); -+ -+ return flags; -+} -+ -+#if 0 /* reserved */ -+static inline void vfsub_file_accessed(struct file *h_file) -+{ -+ file_accessed(h_file); -+ vfsub_update_h_iattr(&h_file->f_path, /*did*/NULL); /*ignore*/ -+} -+#endif -+ -+static inline void vfsub_touch_atime(struct vfsmount *h_mnt, -+ struct dentry *h_dentry) -+{ -+ struct path h_path = { -+ .dentry = h_dentry, -+ .mnt = h_mnt -+ }; -+ touch_atime(&h_path); -+ vfsub_update_h_iattr(&h_path, /*did*/NULL); /*ignore*/ -+} -+ -+static inline int vfsub_update_time(struct inode *h_inode, struct timespec *ts, -+ int flags) -+{ -+ return update_time(h_inode, ts, flags); -+ /* no vfsub_update_h_iattr() since we don't have struct path */ -+} -+ -+#ifdef CONFIG_FS_POSIX_ACL -+static inline int vfsub_acl_chmod(struct inode *h_inode, umode_t h_mode) -+{ -+ int err; -+ -+ err = posix_acl_chmod(h_inode, h_mode); -+ if (err == -EOPNOTSUPP) -+ err = 0; -+ return err; -+} -+#else -+AuStubInt0(vfsub_acl_chmod, struct inode *h_inode, umode_t h_mode); -+#endif -+ -+long vfsub_splice_to(struct file *in, loff_t *ppos, -+ struct pipe_inode_info *pipe, size_t len, -+ unsigned int flags); -+long vfsub_splice_from(struct pipe_inode_info *pipe, struct file *out, -+ loff_t *ppos, size_t len, unsigned int flags); -+ -+static inline long vfsub_truncate(struct path *path, loff_t length) -+{ -+ long err; -+ -+ lockdep_off(); -+ err = vfs_truncate(path, length); -+ lockdep_on(); -+ return err; -+} -+ -+int vfsub_trunc(struct path *h_path, loff_t length, unsigned int attr, -+ struct file *h_file); -+int vfsub_fsync(struct file *file, struct path *path, int datasync); -+ -+/* ---------------------------------------------------------------------- */ -+ -+static inline loff_t vfsub_llseek(struct file *file, loff_t offset, int origin) -+{ -+ loff_t err; -+ -+ lockdep_off(); -+ err = vfs_llseek(file, offset, origin); -+ lockdep_on(); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+int vfsub_sio_mkdir(struct inode *dir, struct path *path, int mode); -+int vfsub_sio_rmdir(struct inode *dir, struct path *path); -+int vfsub_sio_notify_change(struct path *path, struct iattr *ia, -+ struct inode **delegated_inode); -+int vfsub_notify_change(struct path *path, struct iattr *ia, -+ struct inode **delegated_inode); -+int vfsub_unlink(struct inode *dir, struct path *path, -+ struct inode **delegated_inode, int force); -+ -+/* ---------------------------------------------------------------------- */ -+ -+static inline int vfsub_setxattr(struct dentry *dentry, const char *name, -+ const void *value, size_t size, int flags) -+{ -+ int err; -+ -+ lockdep_off(); -+ err = vfs_setxattr(dentry, name, value, size, flags); -+ lockdep_on(); -+ -+ return err; -+} -+ -+static inline int vfsub_removexattr(struct dentry *dentry, const char *name) -+{ -+ int err; -+ -+ lockdep_off(); -+ err = vfs_removexattr(dentry, name); -+ lockdep_on(); -+ -+ return err; -+} -+ -+#endif /* __KERNEL__ */ -+#endif /* __AUFS_VFSUB_H__ */ -diff --git a/fs/aufs/wbr_policy.c b/fs/aufs/wbr_policy.c -new file mode 100644 -index 0000000..64cd9fe ---- /dev/null -+++ b/fs/aufs/wbr_policy.c -@@ -0,0 +1,765 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * policies for selecting one among multiple writable branches -+ */ -+ -+#include -+#include "aufs.h" -+ -+/* subset of cpup_attr() */ -+static noinline_for_stack -+int au_cpdown_attr(struct path *h_path, struct dentry *h_src) -+{ -+ int err, sbits; -+ struct iattr ia; -+ struct inode *h_isrc; -+ -+ h_isrc = h_src->d_inode; -+ ia.ia_valid = ATTR_FORCE | ATTR_MODE | ATTR_UID | ATTR_GID; -+ ia.ia_mode = h_isrc->i_mode; -+ ia.ia_uid = h_isrc->i_uid; -+ ia.ia_gid = h_isrc->i_gid; -+ sbits = !!(ia.ia_mode & (S_ISUID | S_ISGID)); -+ au_cpup_attr_flags(h_path->dentry->d_inode, h_isrc->i_flags); -+ /* no delegation since it is just created */ -+ err = vfsub_sio_notify_change(h_path, &ia, /*delegated*/NULL); -+ -+ /* is this nfs only? */ -+ if (!err && sbits && au_test_nfs(h_path->dentry->d_sb)) { -+ ia.ia_valid = ATTR_FORCE | ATTR_MODE; -+ ia.ia_mode = h_isrc->i_mode; -+ err = vfsub_sio_notify_change(h_path, &ia, /*delegated*/NULL); -+ } -+ -+ return err; -+} -+ -+#define AuCpdown_PARENT_OPQ 1 -+#define AuCpdown_WHED (1 << 1) -+#define AuCpdown_MADE_DIR (1 << 2) -+#define AuCpdown_DIROPQ (1 << 3) -+#define au_ftest_cpdown(flags, name) ((flags) & AuCpdown_##name) -+#define au_fset_cpdown(flags, name) \ -+ do { (flags) |= AuCpdown_##name; } while (0) -+#define au_fclr_cpdown(flags, name) \ -+ do { (flags) &= ~AuCpdown_##name; } while (0) -+ -+static int au_cpdown_dir_opq(struct dentry *dentry, aufs_bindex_t bdst, -+ unsigned int *flags) -+{ -+ int err; -+ struct dentry *opq_dentry; -+ -+ opq_dentry = au_diropq_create(dentry, bdst); -+ err = PTR_ERR(opq_dentry); -+ if (IS_ERR(opq_dentry)) -+ goto out; -+ dput(opq_dentry); -+ au_fset_cpdown(*flags, DIROPQ); -+ -+out: -+ return err; -+} -+ -+static int au_cpdown_dir_wh(struct dentry *dentry, struct dentry *h_parent, -+ struct inode *dir, aufs_bindex_t bdst) -+{ -+ int err; -+ struct path h_path; -+ struct au_branch *br; -+ -+ br = au_sbr(dentry->d_sb, bdst); -+ h_path.dentry = au_wh_lkup(h_parent, &dentry->d_name, br); -+ err = PTR_ERR(h_path.dentry); -+ if (IS_ERR(h_path.dentry)) -+ goto out; -+ -+ err = 0; -+ if (h_path.dentry->d_inode) { -+ h_path.mnt = au_br_mnt(br); -+ err = au_wh_unlink_dentry(au_h_iptr(dir, bdst), &h_path, -+ dentry); -+ } -+ dput(h_path.dentry); -+ -+out: -+ return err; -+} -+ -+static int au_cpdown_dir(struct dentry *dentry, aufs_bindex_t bdst, -+ struct au_pin *pin, -+ struct dentry *h_parent, void *arg) -+{ -+ int err, rerr; -+ aufs_bindex_t bopq, bstart; -+ struct path h_path; -+ struct dentry *parent; -+ struct inode *h_dir, *h_inode, *inode, *dir; -+ unsigned int *flags = arg; -+ -+ bstart = au_dbstart(dentry); -+ /* dentry is di-locked */ -+ parent = dget_parent(dentry); -+ dir = parent->d_inode; -+ h_dir = h_parent->d_inode; -+ AuDebugOn(h_dir != au_h_iptr(dir, bdst)); -+ IMustLock(h_dir); -+ -+ err = au_lkup_neg(dentry, bdst, /*wh*/0); -+ if (unlikely(err < 0)) -+ goto out; -+ h_path.dentry = au_h_dptr(dentry, bdst); -+ h_path.mnt = au_sbr_mnt(dentry->d_sb, bdst); -+ err = vfsub_sio_mkdir(au_h_iptr(dir, bdst), &h_path, -+ S_IRWXU | S_IRUGO | S_IXUGO); -+ if (unlikely(err)) -+ goto out_put; -+ au_fset_cpdown(*flags, MADE_DIR); -+ -+ bopq = au_dbdiropq(dentry); -+ au_fclr_cpdown(*flags, WHED); -+ au_fclr_cpdown(*flags, DIROPQ); -+ if (au_dbwh(dentry) == bdst) -+ au_fset_cpdown(*flags, WHED); -+ if (!au_ftest_cpdown(*flags, PARENT_OPQ) && bopq <= bdst) -+ au_fset_cpdown(*flags, PARENT_OPQ); -+ h_inode = h_path.dentry->d_inode; -+ mutex_lock_nested(&h_inode->i_mutex, AuLsc_I_CHILD); -+ if (au_ftest_cpdown(*flags, WHED)) { -+ err = au_cpdown_dir_opq(dentry, bdst, flags); -+ if (unlikely(err)) { -+ mutex_unlock(&h_inode->i_mutex); -+ goto out_dir; -+ } -+ } -+ -+ err = au_cpdown_attr(&h_path, au_h_dptr(dentry, bstart)); -+ mutex_unlock(&h_inode->i_mutex); -+ if (unlikely(err)) -+ goto out_opq; -+ -+ if (au_ftest_cpdown(*flags, WHED)) { -+ err = au_cpdown_dir_wh(dentry, h_parent, dir, bdst); -+ if (unlikely(err)) -+ goto out_opq; -+ } -+ -+ inode = dentry->d_inode; -+ if (au_ibend(inode) < bdst) -+ au_set_ibend(inode, bdst); -+ au_set_h_iptr(inode, bdst, au_igrab(h_inode), -+ au_hi_flags(inode, /*isdir*/1)); -+ au_fhsm_wrote(dentry->d_sb, bdst, /*force*/0); -+ goto out; /* success */ -+ -+ /* revert */ -+out_opq: -+ if (au_ftest_cpdown(*flags, DIROPQ)) { -+ mutex_lock_nested(&h_inode->i_mutex, AuLsc_I_CHILD); -+ rerr = au_diropq_remove(dentry, bdst); -+ mutex_unlock(&h_inode->i_mutex); -+ if (unlikely(rerr)) { -+ AuIOErr("failed removing diropq for %pd b%d (%d)\n", -+ dentry, bdst, rerr); -+ err = -EIO; -+ goto out; -+ } -+ } -+out_dir: -+ if (au_ftest_cpdown(*flags, MADE_DIR)) { -+ rerr = vfsub_sio_rmdir(au_h_iptr(dir, bdst), &h_path); -+ if (unlikely(rerr)) { -+ AuIOErr("failed removing %pd b%d (%d)\n", -+ dentry, bdst, rerr); -+ err = -EIO; -+ } -+ } -+out_put: -+ au_set_h_dptr(dentry, bdst, NULL); -+ if (au_dbend(dentry) == bdst) -+ au_update_dbend(dentry); -+out: -+ dput(parent); -+ return err; -+} -+ -+int au_cpdown_dirs(struct dentry *dentry, aufs_bindex_t bdst) -+{ -+ int err; -+ unsigned int flags; -+ -+ flags = 0; -+ err = au_cp_dirs(dentry, bdst, au_cpdown_dir, &flags); -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* policies for create */ -+ -+int au_wbr_nonopq(struct dentry *dentry, aufs_bindex_t bindex) -+{ -+ int err, i, j, ndentry; -+ aufs_bindex_t bopq; -+ struct au_dcsub_pages dpages; -+ struct au_dpage *dpage; -+ struct dentry **dentries, *parent, *d; -+ -+ err = au_dpages_init(&dpages, GFP_NOFS); -+ if (unlikely(err)) -+ goto out; -+ parent = dget_parent(dentry); -+ err = au_dcsub_pages_rev_aufs(&dpages, parent, /*do_include*/0); -+ if (unlikely(err)) -+ goto out_free; -+ -+ err = bindex; -+ for (i = 0; i < dpages.ndpage; i++) { -+ dpage = dpages.dpages + i; -+ dentries = dpage->dentries; -+ ndentry = dpage->ndentry; -+ for (j = 0; j < ndentry; j++) { -+ d = dentries[j]; -+ di_read_lock_parent2(d, !AuLock_IR); -+ bopq = au_dbdiropq(d); -+ di_read_unlock(d, !AuLock_IR); -+ if (bopq >= 0 && bopq < err) -+ err = bopq; -+ } -+ } -+ -+out_free: -+ dput(parent); -+ au_dpages_free(&dpages); -+out: -+ return err; -+} -+ -+static int au_wbr_bu(struct super_block *sb, aufs_bindex_t bindex) -+{ -+ for (; bindex >= 0; bindex--) -+ if (!au_br_rdonly(au_sbr(sb, bindex))) -+ return bindex; -+ return -EROFS; -+} -+ -+/* top down parent */ -+static int au_wbr_create_tdp(struct dentry *dentry, -+ unsigned int flags __maybe_unused) -+{ -+ int err; -+ aufs_bindex_t bstart, bindex; -+ struct super_block *sb; -+ struct dentry *parent, *h_parent; -+ -+ sb = dentry->d_sb; -+ bstart = au_dbstart(dentry); -+ err = bstart; -+ if (!au_br_rdonly(au_sbr(sb, bstart))) -+ goto out; -+ -+ err = -EROFS; -+ parent = dget_parent(dentry); -+ for (bindex = au_dbstart(parent); bindex < bstart; bindex++) { -+ h_parent = au_h_dptr(parent, bindex); -+ if (!h_parent || !h_parent->d_inode) -+ continue; -+ -+ if (!au_br_rdonly(au_sbr(sb, bindex))) { -+ err = bindex; -+ break; -+ } -+ } -+ dput(parent); -+ -+ /* bottom up here */ -+ if (unlikely(err < 0)) { -+ err = au_wbr_bu(sb, bstart - 1); -+ if (err >= 0) -+ err = au_wbr_nonopq(dentry, err); -+ } -+ -+out: -+ AuDbg("b%d\n", err); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* an exception for the policy other than tdp */ -+static int au_wbr_create_exp(struct dentry *dentry) -+{ -+ int err; -+ aufs_bindex_t bwh, bdiropq; -+ struct dentry *parent; -+ -+ err = -1; -+ bwh = au_dbwh(dentry); -+ parent = dget_parent(dentry); -+ bdiropq = au_dbdiropq(parent); -+ if (bwh >= 0) { -+ if (bdiropq >= 0) -+ err = min(bdiropq, bwh); -+ else -+ err = bwh; -+ AuDbg("%d\n", err); -+ } else if (bdiropq >= 0) { -+ err = bdiropq; -+ AuDbg("%d\n", err); -+ } -+ dput(parent); -+ -+ if (err >= 0) -+ err = au_wbr_nonopq(dentry, err); -+ -+ if (err >= 0 && au_br_rdonly(au_sbr(dentry->d_sb, err))) -+ err = -1; -+ -+ AuDbg("%d\n", err); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* round robin */ -+static int au_wbr_create_init_rr(struct super_block *sb) -+{ -+ int err; -+ -+ err = au_wbr_bu(sb, au_sbend(sb)); -+ atomic_set(&au_sbi(sb)->si_wbr_rr_next, -err); /* less important */ -+ /* smp_mb(); */ -+ -+ AuDbg("b%d\n", err); -+ return err; -+} -+ -+static int au_wbr_create_rr(struct dentry *dentry, unsigned int flags) -+{ -+ int err, nbr; -+ unsigned int u; -+ aufs_bindex_t bindex, bend; -+ struct super_block *sb; -+ atomic_t *next; -+ -+ err = au_wbr_create_exp(dentry); -+ if (err >= 0) -+ goto out; -+ -+ sb = dentry->d_sb; -+ next = &au_sbi(sb)->si_wbr_rr_next; -+ bend = au_sbend(sb); -+ nbr = bend + 1; -+ for (bindex = 0; bindex <= bend; bindex++) { -+ if (!au_ftest_wbr(flags, DIR)) { -+ err = atomic_dec_return(next) + 1; -+ /* modulo for 0 is meaningless */ -+ if (unlikely(!err)) -+ err = atomic_dec_return(next) + 1; -+ } else -+ err = atomic_read(next); -+ AuDbg("%d\n", err); -+ u = err; -+ err = u % nbr; -+ AuDbg("%d\n", err); -+ if (!au_br_rdonly(au_sbr(sb, err))) -+ break; -+ err = -EROFS; -+ } -+ -+ if (err >= 0) -+ err = au_wbr_nonopq(dentry, err); -+ -+out: -+ AuDbg("%d\n", err); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* most free space */ -+static void au_mfs(struct dentry *dentry, struct dentry *parent) -+{ -+ struct super_block *sb; -+ struct au_branch *br; -+ struct au_wbr_mfs *mfs; -+ struct dentry *h_parent; -+ aufs_bindex_t bindex, bend; -+ int err; -+ unsigned long long b, bavail; -+ struct path h_path; -+ /* reduce the stack usage */ -+ struct kstatfs *st; -+ -+ st = kmalloc(sizeof(*st), GFP_NOFS); -+ if (unlikely(!st)) { -+ AuWarn1("failed updating mfs(%d), ignored\n", -ENOMEM); -+ return; -+ } -+ -+ bavail = 0; -+ sb = dentry->d_sb; -+ mfs = &au_sbi(sb)->si_wbr_mfs; -+ MtxMustLock(&mfs->mfs_lock); -+ mfs->mfs_bindex = -EROFS; -+ mfs->mfsrr_bytes = 0; -+ if (!parent) { -+ bindex = 0; -+ bend = au_sbend(sb); -+ } else { -+ bindex = au_dbstart(parent); -+ bend = au_dbtaildir(parent); -+ } -+ -+ for (; bindex <= bend; bindex++) { -+ if (parent) { -+ h_parent = au_h_dptr(parent, bindex); -+ if (!h_parent || !h_parent->d_inode) -+ continue; -+ } -+ br = au_sbr(sb, bindex); -+ if (au_br_rdonly(br)) -+ continue; -+ -+ /* sb->s_root for NFS is unreliable */ -+ h_path.mnt = au_br_mnt(br); -+ h_path.dentry = h_path.mnt->mnt_root; -+ err = vfs_statfs(&h_path, st); -+ if (unlikely(err)) { -+ AuWarn1("failed statfs, b%d, %d\n", bindex, err); -+ continue; -+ } -+ -+ /* when the available size is equal, select the lower one */ -+ BUILD_BUG_ON(sizeof(b) < sizeof(st->f_bavail) -+ || sizeof(b) < sizeof(st->f_bsize)); -+ b = st->f_bavail * st->f_bsize; -+ br->br_wbr->wbr_bytes = b; -+ if (b >= bavail) { -+ bavail = b; -+ mfs->mfs_bindex = bindex; -+ mfs->mfs_jiffy = jiffies; -+ } -+ } -+ -+ mfs->mfsrr_bytes = bavail; -+ AuDbg("b%d\n", mfs->mfs_bindex); -+ kfree(st); -+} -+ -+static int au_wbr_create_mfs(struct dentry *dentry, unsigned int flags) -+{ -+ int err; -+ struct dentry *parent; -+ struct super_block *sb; -+ struct au_wbr_mfs *mfs; -+ -+ err = au_wbr_create_exp(dentry); -+ if (err >= 0) -+ goto out; -+ -+ sb = dentry->d_sb; -+ parent = NULL; -+ if (au_ftest_wbr(flags, PARENT)) -+ parent = dget_parent(dentry); -+ mfs = &au_sbi(sb)->si_wbr_mfs; -+ mutex_lock(&mfs->mfs_lock); -+ if (time_after(jiffies, mfs->mfs_jiffy + mfs->mfs_expire) -+ || mfs->mfs_bindex < 0 -+ || au_br_rdonly(au_sbr(sb, mfs->mfs_bindex))) -+ au_mfs(dentry, parent); -+ mutex_unlock(&mfs->mfs_lock); -+ err = mfs->mfs_bindex; -+ dput(parent); -+ -+ if (err >= 0) -+ err = au_wbr_nonopq(dentry, err); -+ -+out: -+ AuDbg("b%d\n", err); -+ return err; -+} -+ -+static int au_wbr_create_init_mfs(struct super_block *sb) -+{ -+ struct au_wbr_mfs *mfs; -+ -+ mfs = &au_sbi(sb)->si_wbr_mfs; -+ mutex_init(&mfs->mfs_lock); -+ mfs->mfs_jiffy = 0; -+ mfs->mfs_bindex = -EROFS; -+ -+ return 0; -+} -+ -+static int au_wbr_create_fin_mfs(struct super_block *sb __maybe_unused) -+{ -+ mutex_destroy(&au_sbi(sb)->si_wbr_mfs.mfs_lock); -+ return 0; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* most free space and then round robin */ -+static int au_wbr_create_mfsrr(struct dentry *dentry, unsigned int flags) -+{ -+ int err; -+ struct au_wbr_mfs *mfs; -+ -+ err = au_wbr_create_mfs(dentry, flags); -+ if (err >= 0) { -+ mfs = &au_sbi(dentry->d_sb)->si_wbr_mfs; -+ mutex_lock(&mfs->mfs_lock); -+ if (mfs->mfsrr_bytes < mfs->mfsrr_watermark) -+ err = au_wbr_create_rr(dentry, flags); -+ mutex_unlock(&mfs->mfs_lock); -+ } -+ -+ AuDbg("b%d\n", err); -+ return err; -+} -+ -+static int au_wbr_create_init_mfsrr(struct super_block *sb) -+{ -+ int err; -+ -+ au_wbr_create_init_mfs(sb); /* ignore */ -+ err = au_wbr_create_init_rr(sb); -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* top down parent and most free space */ -+static int au_wbr_create_pmfs(struct dentry *dentry, unsigned int flags) -+{ -+ int err, e2; -+ unsigned long long b; -+ aufs_bindex_t bindex, bstart, bend; -+ struct super_block *sb; -+ struct dentry *parent, *h_parent; -+ struct au_branch *br; -+ -+ err = au_wbr_create_tdp(dentry, flags); -+ if (unlikely(err < 0)) -+ goto out; -+ parent = dget_parent(dentry); -+ bstart = au_dbstart(parent); -+ bend = au_dbtaildir(parent); -+ if (bstart == bend) -+ goto out_parent; /* success */ -+ -+ e2 = au_wbr_create_mfs(dentry, flags); -+ if (e2 < 0) -+ goto out_parent; /* success */ -+ -+ /* when the available size is equal, select upper one */ -+ sb = dentry->d_sb; -+ br = au_sbr(sb, err); -+ b = br->br_wbr->wbr_bytes; -+ AuDbg("b%d, %llu\n", err, b); -+ -+ for (bindex = bstart; bindex <= bend; bindex++) { -+ h_parent = au_h_dptr(parent, bindex); -+ if (!h_parent || !h_parent->d_inode) -+ continue; -+ -+ br = au_sbr(sb, bindex); -+ if (!au_br_rdonly(br) && br->br_wbr->wbr_bytes > b) { -+ b = br->br_wbr->wbr_bytes; -+ err = bindex; -+ AuDbg("b%d, %llu\n", err, b); -+ } -+ } -+ -+ if (err >= 0) -+ err = au_wbr_nonopq(dentry, err); -+ -+out_parent: -+ dput(parent); -+out: -+ AuDbg("b%d\n", err); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * - top down parent -+ * - most free space with parent -+ * - most free space round-robin regardless parent -+ */ -+static int au_wbr_create_pmfsrr(struct dentry *dentry, unsigned int flags) -+{ -+ int err; -+ unsigned long long watermark; -+ struct super_block *sb; -+ struct au_branch *br; -+ struct au_wbr_mfs *mfs; -+ -+ err = au_wbr_create_pmfs(dentry, flags | AuWbr_PARENT); -+ if (unlikely(err < 0)) -+ goto out; -+ -+ sb = dentry->d_sb; -+ br = au_sbr(sb, err); -+ mfs = &au_sbi(sb)->si_wbr_mfs; -+ mutex_lock(&mfs->mfs_lock); -+ watermark = mfs->mfsrr_watermark; -+ mutex_unlock(&mfs->mfs_lock); -+ if (br->br_wbr->wbr_bytes < watermark) -+ /* regardless the parent dir */ -+ err = au_wbr_create_mfsrr(dentry, flags); -+ -+out: -+ AuDbg("b%d\n", err); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* policies for copyup */ -+ -+/* top down parent */ -+static int au_wbr_copyup_tdp(struct dentry *dentry) -+{ -+ return au_wbr_create_tdp(dentry, /*flags, anything is ok*/0); -+} -+ -+/* bottom up parent */ -+static int au_wbr_copyup_bup(struct dentry *dentry) -+{ -+ int err; -+ aufs_bindex_t bindex, bstart; -+ struct dentry *parent, *h_parent; -+ struct super_block *sb; -+ -+ err = -EROFS; -+ sb = dentry->d_sb; -+ parent = dget_parent(dentry); -+ bstart = au_dbstart(parent); -+ for (bindex = au_dbstart(dentry); bindex >= bstart; bindex--) { -+ h_parent = au_h_dptr(parent, bindex); -+ if (!h_parent || !h_parent->d_inode) -+ continue; -+ -+ if (!au_br_rdonly(au_sbr(sb, bindex))) { -+ err = bindex; -+ break; -+ } -+ } -+ dput(parent); -+ -+ /* bottom up here */ -+ if (unlikely(err < 0)) -+ err = au_wbr_bu(sb, bstart - 1); -+ -+ AuDbg("b%d\n", err); -+ return err; -+} -+ -+/* bottom up */ -+int au_wbr_do_copyup_bu(struct dentry *dentry, aufs_bindex_t bstart) -+{ -+ int err; -+ -+ err = au_wbr_bu(dentry->d_sb, bstart); -+ AuDbg("b%d\n", err); -+ if (err > bstart) -+ err = au_wbr_nonopq(dentry, err); -+ -+ AuDbg("b%d\n", err); -+ return err; -+} -+ -+static int au_wbr_copyup_bu(struct dentry *dentry) -+{ -+ int err; -+ aufs_bindex_t bstart; -+ -+ bstart = au_dbstart(dentry); -+ err = au_wbr_do_copyup_bu(dentry, bstart); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+struct au_wbr_copyup_operations au_wbr_copyup_ops[] = { -+ [AuWbrCopyup_TDP] = { -+ .copyup = au_wbr_copyup_tdp -+ }, -+ [AuWbrCopyup_BUP] = { -+ .copyup = au_wbr_copyup_bup -+ }, -+ [AuWbrCopyup_BU] = { -+ .copyup = au_wbr_copyup_bu -+ } -+}; -+ -+struct au_wbr_create_operations au_wbr_create_ops[] = { -+ [AuWbrCreate_TDP] = { -+ .create = au_wbr_create_tdp -+ }, -+ [AuWbrCreate_RR] = { -+ .create = au_wbr_create_rr, -+ .init = au_wbr_create_init_rr -+ }, -+ [AuWbrCreate_MFS] = { -+ .create = au_wbr_create_mfs, -+ .init = au_wbr_create_init_mfs, -+ .fin = au_wbr_create_fin_mfs -+ }, -+ [AuWbrCreate_MFSV] = { -+ .create = au_wbr_create_mfs, -+ .init = au_wbr_create_init_mfs, -+ .fin = au_wbr_create_fin_mfs -+ }, -+ [AuWbrCreate_MFSRR] = { -+ .create = au_wbr_create_mfsrr, -+ .init = au_wbr_create_init_mfsrr, -+ .fin = au_wbr_create_fin_mfs -+ }, -+ [AuWbrCreate_MFSRRV] = { -+ .create = au_wbr_create_mfsrr, -+ .init = au_wbr_create_init_mfsrr, -+ .fin = au_wbr_create_fin_mfs -+ }, -+ [AuWbrCreate_PMFS] = { -+ .create = au_wbr_create_pmfs, -+ .init = au_wbr_create_init_mfs, -+ .fin = au_wbr_create_fin_mfs -+ }, -+ [AuWbrCreate_PMFSV] = { -+ .create = au_wbr_create_pmfs, -+ .init = au_wbr_create_init_mfs, -+ .fin = au_wbr_create_fin_mfs -+ }, -+ [AuWbrCreate_PMFSRR] = { -+ .create = au_wbr_create_pmfsrr, -+ .init = au_wbr_create_init_mfsrr, -+ .fin = au_wbr_create_fin_mfs -+ }, -+ [AuWbrCreate_PMFSRRV] = { -+ .create = au_wbr_create_pmfsrr, -+ .init = au_wbr_create_init_mfsrr, -+ .fin = au_wbr_create_fin_mfs -+ } -+}; -diff --git a/fs/aufs/whout.c b/fs/aufs/whout.c -new file mode 100644 -index 0000000..fb667ee ---- /dev/null -+++ b/fs/aufs/whout.c -@@ -0,0 +1,1061 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * whiteout for logical deletion and opaque directory -+ */ -+ -+#include "aufs.h" -+ -+#define WH_MASK S_IRUGO -+ -+/* -+ * If a directory contains this file, then it is opaque. We start with the -+ * .wh. flag so that it is blocked by lookup. -+ */ -+static struct qstr diropq_name = QSTR_INIT(AUFS_WH_DIROPQ, -+ sizeof(AUFS_WH_DIROPQ) - 1); -+ -+/* -+ * generate whiteout name, which is NOT terminated by NULL. -+ * @name: original d_name.name -+ * @len: original d_name.len -+ * @wh: whiteout qstr -+ * returns zero when succeeds, otherwise error. -+ * succeeded value as wh->name should be freed by kfree(). -+ */ -+int au_wh_name_alloc(struct qstr *wh, const struct qstr *name) -+{ -+ char *p; -+ -+ if (unlikely(name->len > PATH_MAX - AUFS_WH_PFX_LEN)) -+ return -ENAMETOOLONG; -+ -+ wh->len = name->len + AUFS_WH_PFX_LEN; -+ p = kmalloc(wh->len, GFP_NOFS); -+ wh->name = p; -+ if (p) { -+ memcpy(p, AUFS_WH_PFX, AUFS_WH_PFX_LEN); -+ memcpy(p + AUFS_WH_PFX_LEN, name->name, name->len); -+ /* smp_mb(); */ -+ return 0; -+ } -+ return -ENOMEM; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * test if the @wh_name exists under @h_parent. -+ * @try_sio specifies the necessary of super-io. -+ */ -+int au_wh_test(struct dentry *h_parent, struct qstr *wh_name, int try_sio) -+{ -+ int err; -+ struct dentry *wh_dentry; -+ -+ if (!try_sio) -+ wh_dentry = vfsub_lkup_one(wh_name, h_parent); -+ else -+ wh_dentry = au_sio_lkup_one(wh_name, h_parent); -+ err = PTR_ERR(wh_dentry); -+ if (IS_ERR(wh_dentry)) { -+ if (err == -ENAMETOOLONG) -+ err = 0; -+ goto out; -+ } -+ -+ err = 0; -+ if (!wh_dentry->d_inode) -+ goto out_wh; /* success */ -+ -+ err = 1; -+ if (S_ISREG(wh_dentry->d_inode->i_mode)) -+ goto out_wh; /* success */ -+ -+ err = -EIO; -+ AuIOErr("%pd Invalid whiteout entry type 0%o.\n", -+ wh_dentry, wh_dentry->d_inode->i_mode); -+ -+out_wh: -+ dput(wh_dentry); -+out: -+ return err; -+} -+ -+/* -+ * test if the @h_dentry sets opaque or not. -+ */ -+int au_diropq_test(struct dentry *h_dentry) -+{ -+ int err; -+ struct inode *h_dir; -+ -+ h_dir = h_dentry->d_inode; -+ err = au_wh_test(h_dentry, &diropq_name, -+ au_test_h_perm_sio(h_dir, MAY_EXEC)); -+ return err; -+} -+ -+/* -+ * returns a negative dentry whose name is unique and temporary. -+ */ -+struct dentry *au_whtmp_lkup(struct dentry *h_parent, struct au_branch *br, -+ struct qstr *prefix) -+{ -+ struct dentry *dentry; -+ int i; -+ char defname[NAME_MAX - AUFS_MAX_NAMELEN + DNAME_INLINE_LEN + 1], -+ *name, *p; -+ /* strict atomic_t is unnecessary here */ -+ static unsigned short cnt; -+ struct qstr qs; -+ -+ BUILD_BUG_ON(sizeof(cnt) * 2 > AUFS_WH_TMP_LEN); -+ -+ name = defname; -+ qs.len = sizeof(defname) - DNAME_INLINE_LEN + prefix->len - 1; -+ if (unlikely(prefix->len > DNAME_INLINE_LEN)) { -+ dentry = ERR_PTR(-ENAMETOOLONG); -+ if (unlikely(qs.len > NAME_MAX)) -+ goto out; -+ dentry = ERR_PTR(-ENOMEM); -+ name = kmalloc(qs.len + 1, GFP_NOFS); -+ if (unlikely(!name)) -+ goto out; -+ } -+ -+ /* doubly whiteout-ed */ -+ memcpy(name, AUFS_WH_PFX AUFS_WH_PFX, AUFS_WH_PFX_LEN * 2); -+ p = name + AUFS_WH_PFX_LEN * 2; -+ memcpy(p, prefix->name, prefix->len); -+ p += prefix->len; -+ *p++ = '.'; -+ AuDebugOn(name + qs.len + 1 - p <= AUFS_WH_TMP_LEN); -+ -+ qs.name = name; -+ for (i = 0; i < 3; i++) { -+ sprintf(p, "%.*x", AUFS_WH_TMP_LEN, cnt++); -+ dentry = au_sio_lkup_one(&qs, h_parent); -+ if (IS_ERR(dentry) || !dentry->d_inode) -+ goto out_name; -+ dput(dentry); -+ } -+ /* pr_warn("could not get random name\n"); */ -+ dentry = ERR_PTR(-EEXIST); -+ AuDbg("%.*s\n", AuLNPair(&qs)); -+ BUG(); -+ -+out_name: -+ if (name != defname) -+ kfree(name); -+out: -+ AuTraceErrPtr(dentry); -+ return dentry; -+} -+ -+/* -+ * rename the @h_dentry on @br to the whiteouted temporary name. -+ */ -+int au_whtmp_ren(struct dentry *h_dentry, struct au_branch *br) -+{ -+ int err; -+ struct path h_path = { -+ .mnt = au_br_mnt(br) -+ }; -+ struct inode *h_dir, *delegated; -+ struct dentry *h_parent; -+ -+ h_parent = h_dentry->d_parent; /* dir inode is locked */ -+ h_dir = h_parent->d_inode; -+ IMustLock(h_dir); -+ -+ h_path.dentry = au_whtmp_lkup(h_parent, br, &h_dentry->d_name); -+ err = PTR_ERR(h_path.dentry); -+ if (IS_ERR(h_path.dentry)) -+ goto out; -+ -+ /* under the same dir, no need to lock_rename() */ -+ delegated = NULL; -+ err = vfsub_rename(h_dir, h_dentry, h_dir, &h_path, &delegated); -+ AuTraceErr(err); -+ if (unlikely(err == -EWOULDBLOCK)) { -+ pr_warn("cannot retry for NFSv4 delegation" -+ " for an internal rename\n"); -+ iput(delegated); -+ } -+ dput(h_path.dentry); -+ -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+/* -+ * functions for removing a whiteout -+ */ -+ -+static int do_unlink_wh(struct inode *h_dir, struct path *h_path) -+{ -+ int err, force; -+ struct inode *delegated; -+ -+ /* -+ * forces superio when the dir has a sticky bit. -+ * this may be a violation of unix fs semantics. -+ */ -+ force = (h_dir->i_mode & S_ISVTX) -+ && !uid_eq(current_fsuid(), h_path->dentry->d_inode->i_uid); -+ delegated = NULL; -+ err = vfsub_unlink(h_dir, h_path, &delegated, force); -+ if (unlikely(err == -EWOULDBLOCK)) { -+ pr_warn("cannot retry for NFSv4 delegation" -+ " for an internal unlink\n"); -+ iput(delegated); -+ } -+ return err; -+} -+ -+int au_wh_unlink_dentry(struct inode *h_dir, struct path *h_path, -+ struct dentry *dentry) -+{ -+ int err; -+ -+ err = do_unlink_wh(h_dir, h_path); -+ if (!err && dentry) -+ au_set_dbwh(dentry, -1); -+ -+ return err; -+} -+ -+static int unlink_wh_name(struct dentry *h_parent, struct qstr *wh, -+ struct au_branch *br) -+{ -+ int err; -+ struct path h_path = { -+ .mnt = au_br_mnt(br) -+ }; -+ -+ err = 0; -+ h_path.dentry = vfsub_lkup_one(wh, h_parent); -+ if (IS_ERR(h_path.dentry)) -+ err = PTR_ERR(h_path.dentry); -+ else { -+ if (h_path.dentry->d_inode -+ && S_ISREG(h_path.dentry->d_inode->i_mode)) -+ err = do_unlink_wh(h_parent->d_inode, &h_path); -+ dput(h_path.dentry); -+ } -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+/* -+ * initialize/clean whiteout for a branch -+ */ -+ -+static void au_wh_clean(struct inode *h_dir, struct path *whpath, -+ const int isdir) -+{ -+ int err; -+ struct inode *delegated; -+ -+ if (!whpath->dentry->d_inode) -+ return; -+ -+ if (isdir) -+ err = vfsub_rmdir(h_dir, whpath); -+ else { -+ delegated = NULL; -+ err = vfsub_unlink(h_dir, whpath, &delegated, /*force*/0); -+ if (unlikely(err == -EWOULDBLOCK)) { -+ pr_warn("cannot retry for NFSv4 delegation" -+ " for an internal unlink\n"); -+ iput(delegated); -+ } -+ } -+ if (unlikely(err)) -+ pr_warn("failed removing %pd (%d), ignored.\n", -+ whpath->dentry, err); -+} -+ -+static int test_linkable(struct dentry *h_root) -+{ -+ struct inode *h_dir = h_root->d_inode; -+ -+ if (h_dir->i_op->link) -+ return 0; -+ -+ pr_err("%pd (%s) doesn't support link(2), use noplink and rw+nolwh\n", -+ h_root, au_sbtype(h_root->d_sb)); -+ return -ENOSYS; -+} -+ -+/* todo: should this mkdir be done in /sbin/mount.aufs helper? */ -+static int au_whdir(struct inode *h_dir, struct path *path) -+{ -+ int err; -+ -+ err = -EEXIST; -+ if (!path->dentry->d_inode) { -+ int mode = S_IRWXU; -+ -+ if (au_test_nfs(path->dentry->d_sb)) -+ mode |= S_IXUGO; -+ err = vfsub_mkdir(h_dir, path, mode); -+ } else if (d_is_dir(path->dentry)) -+ err = 0; -+ else -+ pr_err("unknown %pd exists\n", path->dentry); -+ -+ return err; -+} -+ -+struct au_wh_base { -+ const struct qstr *name; -+ struct dentry *dentry; -+}; -+ -+static void au_wh_init_ro(struct inode *h_dir, struct au_wh_base base[], -+ struct path *h_path) -+{ -+ h_path->dentry = base[AuBrWh_BASE].dentry; -+ au_wh_clean(h_dir, h_path, /*isdir*/0); -+ h_path->dentry = base[AuBrWh_PLINK].dentry; -+ au_wh_clean(h_dir, h_path, /*isdir*/1); -+ h_path->dentry = base[AuBrWh_ORPH].dentry; -+ au_wh_clean(h_dir, h_path, /*isdir*/1); -+} -+ -+/* -+ * returns tri-state, -+ * minus: error, caller should print the message -+ * zero: succuess -+ * plus: error, caller should NOT print the message -+ */ -+static int au_wh_init_rw_nolink(struct dentry *h_root, struct au_wbr *wbr, -+ int do_plink, struct au_wh_base base[], -+ struct path *h_path) -+{ -+ int err; -+ struct inode *h_dir; -+ -+ h_dir = h_root->d_inode; -+ h_path->dentry = base[AuBrWh_BASE].dentry; -+ au_wh_clean(h_dir, h_path, /*isdir*/0); -+ h_path->dentry = base[AuBrWh_PLINK].dentry; -+ if (do_plink) { -+ err = test_linkable(h_root); -+ if (unlikely(err)) { -+ err = 1; -+ goto out; -+ } -+ -+ err = au_whdir(h_dir, h_path); -+ if (unlikely(err)) -+ goto out; -+ wbr->wbr_plink = dget(base[AuBrWh_PLINK].dentry); -+ } else -+ au_wh_clean(h_dir, h_path, /*isdir*/1); -+ h_path->dentry = base[AuBrWh_ORPH].dentry; -+ err = au_whdir(h_dir, h_path); -+ if (unlikely(err)) -+ goto out; -+ wbr->wbr_orph = dget(base[AuBrWh_ORPH].dentry); -+ -+out: -+ return err; -+} -+ -+/* -+ * for the moment, aufs supports the branch filesystem which does not support -+ * link(2). testing on FAT which does not support i_op->setattr() fully either, -+ * copyup failed. finally, such filesystem will not be used as the writable -+ * branch. -+ * -+ * returns tri-state, see above. -+ */ -+static int au_wh_init_rw(struct dentry *h_root, struct au_wbr *wbr, -+ int do_plink, struct au_wh_base base[], -+ struct path *h_path) -+{ -+ int err; -+ struct inode *h_dir; -+ -+ WbrWhMustWriteLock(wbr); -+ -+ err = test_linkable(h_root); -+ if (unlikely(err)) { -+ err = 1; -+ goto out; -+ } -+ -+ /* -+ * todo: should this create be done in /sbin/mount.aufs helper? -+ */ -+ err = -EEXIST; -+ h_dir = h_root->d_inode; -+ if (!base[AuBrWh_BASE].dentry->d_inode) { -+ h_path->dentry = base[AuBrWh_BASE].dentry; -+ err = vfsub_create(h_dir, h_path, WH_MASK, /*want_excl*/true); -+ } else if (S_ISREG(base[AuBrWh_BASE].dentry->d_inode->i_mode)) -+ err = 0; -+ else -+ pr_err("unknown %pd2 exists\n", base[AuBrWh_BASE].dentry); -+ if (unlikely(err)) -+ goto out; -+ -+ h_path->dentry = base[AuBrWh_PLINK].dentry; -+ if (do_plink) { -+ err = au_whdir(h_dir, h_path); -+ if (unlikely(err)) -+ goto out; -+ wbr->wbr_plink = dget(base[AuBrWh_PLINK].dentry); -+ } else -+ au_wh_clean(h_dir, h_path, /*isdir*/1); -+ wbr->wbr_whbase = dget(base[AuBrWh_BASE].dentry); -+ -+ h_path->dentry = base[AuBrWh_ORPH].dentry; -+ err = au_whdir(h_dir, h_path); -+ if (unlikely(err)) -+ goto out; -+ wbr->wbr_orph = dget(base[AuBrWh_ORPH].dentry); -+ -+out: -+ return err; -+} -+ -+/* -+ * initialize the whiteout base file/dir for @br. -+ */ -+int au_wh_init(struct au_branch *br, struct super_block *sb) -+{ -+ int err, i; -+ const unsigned char do_plink -+ = !!au_opt_test(au_mntflags(sb), PLINK); -+ struct inode *h_dir; -+ struct path path = br->br_path; -+ struct dentry *h_root = path.dentry; -+ struct au_wbr *wbr = br->br_wbr; -+ static const struct qstr base_name[] = { -+ [AuBrWh_BASE] = QSTR_INIT(AUFS_BASE_NAME, -+ sizeof(AUFS_BASE_NAME) - 1), -+ [AuBrWh_PLINK] = QSTR_INIT(AUFS_PLINKDIR_NAME, -+ sizeof(AUFS_PLINKDIR_NAME) - 1), -+ [AuBrWh_ORPH] = QSTR_INIT(AUFS_ORPHDIR_NAME, -+ sizeof(AUFS_ORPHDIR_NAME) - 1) -+ }; -+ struct au_wh_base base[] = { -+ [AuBrWh_BASE] = { -+ .name = base_name + AuBrWh_BASE, -+ .dentry = NULL -+ }, -+ [AuBrWh_PLINK] = { -+ .name = base_name + AuBrWh_PLINK, -+ .dentry = NULL -+ }, -+ [AuBrWh_ORPH] = { -+ .name = base_name + AuBrWh_ORPH, -+ .dentry = NULL -+ } -+ }; -+ -+ if (wbr) -+ WbrWhMustWriteLock(wbr); -+ -+ for (i = 0; i < AuBrWh_Last; i++) { -+ /* doubly whiteouted */ -+ struct dentry *d; -+ -+ d = au_wh_lkup(h_root, (void *)base[i].name, br); -+ err = PTR_ERR(d); -+ if (IS_ERR(d)) -+ goto out; -+ -+ base[i].dentry = d; -+ AuDebugOn(wbr -+ && wbr->wbr_wh[i] -+ && wbr->wbr_wh[i] != base[i].dentry); -+ } -+ -+ if (wbr) -+ for (i = 0; i < AuBrWh_Last; i++) { -+ dput(wbr->wbr_wh[i]); -+ wbr->wbr_wh[i] = NULL; -+ } -+ -+ err = 0; -+ if (!au_br_writable(br->br_perm)) { -+ h_dir = h_root->d_inode; -+ au_wh_init_ro(h_dir, base, &path); -+ } else if (!au_br_wh_linkable(br->br_perm)) { -+ err = au_wh_init_rw_nolink(h_root, wbr, do_plink, base, &path); -+ if (err > 0) -+ goto out; -+ else if (err) -+ goto out_err; -+ } else { -+ err = au_wh_init_rw(h_root, wbr, do_plink, base, &path); -+ if (err > 0) -+ goto out; -+ else if (err) -+ goto out_err; -+ } -+ goto out; /* success */ -+ -+out_err: -+ pr_err("an error(%d) on the writable branch %pd(%s)\n", -+ err, h_root, au_sbtype(h_root->d_sb)); -+out: -+ for (i = 0; i < AuBrWh_Last; i++) -+ dput(base[i].dentry); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+/* -+ * whiteouts are all hard-linked usually. -+ * when its link count reaches a ceiling, we create a new whiteout base -+ * asynchronously. -+ */ -+ -+struct reinit_br_wh { -+ struct super_block *sb; -+ struct au_branch *br; -+}; -+ -+static void reinit_br_wh(void *arg) -+{ -+ int err; -+ aufs_bindex_t bindex; -+ struct path h_path; -+ struct reinit_br_wh *a = arg; -+ struct au_wbr *wbr; -+ struct inode *dir, *delegated; -+ struct dentry *h_root; -+ struct au_hinode *hdir; -+ -+ err = 0; -+ wbr = a->br->br_wbr; -+ /* big aufs lock */ -+ si_noflush_write_lock(a->sb); -+ if (!au_br_writable(a->br->br_perm)) -+ goto out; -+ bindex = au_br_index(a->sb, a->br->br_id); -+ if (unlikely(bindex < 0)) -+ goto out; -+ -+ di_read_lock_parent(a->sb->s_root, AuLock_IR); -+ dir = a->sb->s_root->d_inode; -+ hdir = au_hi(dir, bindex); -+ h_root = au_h_dptr(a->sb->s_root, bindex); -+ AuDebugOn(h_root != au_br_dentry(a->br)); -+ -+ au_hn_imtx_lock_nested(hdir, AuLsc_I_PARENT); -+ wbr_wh_write_lock(wbr); -+ err = au_h_verify(wbr->wbr_whbase, au_opt_udba(a->sb), hdir->hi_inode, -+ h_root, a->br); -+ if (!err) { -+ h_path.dentry = wbr->wbr_whbase; -+ h_path.mnt = au_br_mnt(a->br); -+ delegated = NULL; -+ err = vfsub_unlink(hdir->hi_inode, &h_path, &delegated, -+ /*force*/0); -+ if (unlikely(err == -EWOULDBLOCK)) { -+ pr_warn("cannot retry for NFSv4 delegation" -+ " for an internal unlink\n"); -+ iput(delegated); -+ } -+ } else { -+ pr_warn("%pd is moved, ignored\n", wbr->wbr_whbase); -+ err = 0; -+ } -+ dput(wbr->wbr_whbase); -+ wbr->wbr_whbase = NULL; -+ if (!err) -+ err = au_wh_init(a->br, a->sb); -+ wbr_wh_write_unlock(wbr); -+ au_hn_imtx_unlock(hdir); -+ di_read_unlock(a->sb->s_root, AuLock_IR); -+ if (!err) -+ au_fhsm_wrote(a->sb, bindex, /*force*/0); -+ -+out: -+ if (wbr) -+ atomic_dec(&wbr->wbr_wh_running); -+ atomic_dec(&a->br->br_count); -+ si_write_unlock(a->sb); -+ au_nwt_done(&au_sbi(a->sb)->si_nowait); -+ kfree(arg); -+ if (unlikely(err)) -+ AuIOErr("err %d\n", err); -+} -+ -+static void kick_reinit_br_wh(struct super_block *sb, struct au_branch *br) -+{ -+ int do_dec, wkq_err; -+ struct reinit_br_wh *arg; -+ -+ do_dec = 1; -+ if (atomic_inc_return(&br->br_wbr->wbr_wh_running) != 1) -+ goto out; -+ -+ /* ignore ENOMEM */ -+ arg = kmalloc(sizeof(*arg), GFP_NOFS); -+ if (arg) { -+ /* -+ * dec(wh_running), kfree(arg) and dec(br_count) -+ * in reinit function -+ */ -+ arg->sb = sb; -+ arg->br = br; -+ atomic_inc(&br->br_count); -+ wkq_err = au_wkq_nowait(reinit_br_wh, arg, sb, /*flags*/0); -+ if (unlikely(wkq_err)) { -+ atomic_dec(&br->br_wbr->wbr_wh_running); -+ atomic_dec(&br->br_count); -+ kfree(arg); -+ } -+ do_dec = 0; -+ } -+ -+out: -+ if (do_dec) -+ atomic_dec(&br->br_wbr->wbr_wh_running); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * create the whiteout @wh. -+ */ -+static int link_or_create_wh(struct super_block *sb, aufs_bindex_t bindex, -+ struct dentry *wh) -+{ -+ int err; -+ struct path h_path = { -+ .dentry = wh -+ }; -+ struct au_branch *br; -+ struct au_wbr *wbr; -+ struct dentry *h_parent; -+ struct inode *h_dir, *delegated; -+ -+ h_parent = wh->d_parent; /* dir inode is locked */ -+ h_dir = h_parent->d_inode; -+ IMustLock(h_dir); -+ -+ br = au_sbr(sb, bindex); -+ h_path.mnt = au_br_mnt(br); -+ wbr = br->br_wbr; -+ wbr_wh_read_lock(wbr); -+ if (wbr->wbr_whbase) { -+ delegated = NULL; -+ err = vfsub_link(wbr->wbr_whbase, h_dir, &h_path, &delegated); -+ if (unlikely(err == -EWOULDBLOCK)) { -+ pr_warn("cannot retry for NFSv4 delegation" -+ " for an internal link\n"); -+ iput(delegated); -+ } -+ if (!err || err != -EMLINK) -+ goto out; -+ -+ /* link count full. re-initialize br_whbase. */ -+ kick_reinit_br_wh(sb, br); -+ } -+ -+ /* return this error in this context */ -+ err = vfsub_create(h_dir, &h_path, WH_MASK, /*want_excl*/true); -+ if (!err) -+ au_fhsm_wrote(sb, bindex, /*force*/0); -+ -+out: -+ wbr_wh_read_unlock(wbr); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * create or remove the diropq. -+ */ -+static struct dentry *do_diropq(struct dentry *dentry, aufs_bindex_t bindex, -+ unsigned int flags) -+{ -+ struct dentry *opq_dentry, *h_dentry; -+ struct super_block *sb; -+ struct au_branch *br; -+ int err; -+ -+ sb = dentry->d_sb; -+ br = au_sbr(sb, bindex); -+ h_dentry = au_h_dptr(dentry, bindex); -+ opq_dentry = vfsub_lkup_one(&diropq_name, h_dentry); -+ if (IS_ERR(opq_dentry)) -+ goto out; -+ -+ if (au_ftest_diropq(flags, CREATE)) { -+ err = link_or_create_wh(sb, bindex, opq_dentry); -+ if (!err) { -+ au_set_dbdiropq(dentry, bindex); -+ goto out; /* success */ -+ } -+ } else { -+ struct path tmp = { -+ .dentry = opq_dentry, -+ .mnt = au_br_mnt(br) -+ }; -+ err = do_unlink_wh(au_h_iptr(dentry->d_inode, bindex), &tmp); -+ if (!err) -+ au_set_dbdiropq(dentry, -1); -+ } -+ dput(opq_dentry); -+ opq_dentry = ERR_PTR(err); -+ -+out: -+ return opq_dentry; -+} -+ -+struct do_diropq_args { -+ struct dentry **errp; -+ struct dentry *dentry; -+ aufs_bindex_t bindex; -+ unsigned int flags; -+}; -+ -+static void call_do_diropq(void *args) -+{ -+ struct do_diropq_args *a = args; -+ *a->errp = do_diropq(a->dentry, a->bindex, a->flags); -+} -+ -+struct dentry *au_diropq_sio(struct dentry *dentry, aufs_bindex_t bindex, -+ unsigned int flags) -+{ -+ struct dentry *diropq, *h_dentry; -+ -+ h_dentry = au_h_dptr(dentry, bindex); -+ if (!au_test_h_perm_sio(h_dentry->d_inode, MAY_EXEC | MAY_WRITE)) -+ diropq = do_diropq(dentry, bindex, flags); -+ else { -+ int wkq_err; -+ struct do_diropq_args args = { -+ .errp = &diropq, -+ .dentry = dentry, -+ .bindex = bindex, -+ .flags = flags -+ }; -+ -+ wkq_err = au_wkq_wait(call_do_diropq, &args); -+ if (unlikely(wkq_err)) -+ diropq = ERR_PTR(wkq_err); -+ } -+ -+ return diropq; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * lookup whiteout dentry. -+ * @h_parent: lower parent dentry which must exist and be locked -+ * @base_name: name of dentry which will be whiteouted -+ * returns dentry for whiteout. -+ */ -+struct dentry *au_wh_lkup(struct dentry *h_parent, struct qstr *base_name, -+ struct au_branch *br) -+{ -+ int err; -+ struct qstr wh_name; -+ struct dentry *wh_dentry; -+ -+ err = au_wh_name_alloc(&wh_name, base_name); -+ wh_dentry = ERR_PTR(err); -+ if (!err) { -+ wh_dentry = vfsub_lkup_one(&wh_name, h_parent); -+ kfree(wh_name.name); -+ } -+ return wh_dentry; -+} -+ -+/* -+ * link/create a whiteout for @dentry on @bindex. -+ */ -+struct dentry *au_wh_create(struct dentry *dentry, aufs_bindex_t bindex, -+ struct dentry *h_parent) -+{ -+ struct dentry *wh_dentry; -+ struct super_block *sb; -+ int err; -+ -+ sb = dentry->d_sb; -+ wh_dentry = au_wh_lkup(h_parent, &dentry->d_name, au_sbr(sb, bindex)); -+ if (!IS_ERR(wh_dentry) && !wh_dentry->d_inode) { -+ err = link_or_create_wh(sb, bindex, wh_dentry); -+ if (!err) { -+ au_set_dbwh(dentry, bindex); -+ au_fhsm_wrote(sb, bindex, /*force*/0); -+ } else { -+ dput(wh_dentry); -+ wh_dentry = ERR_PTR(err); -+ } -+ } -+ -+ return wh_dentry; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* Delete all whiteouts in this directory on branch bindex. */ -+static int del_wh_children(struct dentry *h_dentry, struct au_nhash *whlist, -+ aufs_bindex_t bindex, struct au_branch *br) -+{ -+ int err; -+ unsigned long ul, n; -+ struct qstr wh_name; -+ char *p; -+ struct hlist_head *head; -+ struct au_vdir_wh *pos; -+ struct au_vdir_destr *str; -+ -+ err = -ENOMEM; -+ p = (void *)__get_free_page(GFP_NOFS); -+ wh_name.name = p; -+ if (unlikely(!wh_name.name)) -+ goto out; -+ -+ err = 0; -+ memcpy(p, AUFS_WH_PFX, AUFS_WH_PFX_LEN); -+ p += AUFS_WH_PFX_LEN; -+ n = whlist->nh_num; -+ head = whlist->nh_head; -+ for (ul = 0; !err && ul < n; ul++, head++) { -+ hlist_for_each_entry(pos, head, wh_hash) { -+ if (pos->wh_bindex != bindex) -+ continue; -+ -+ str = &pos->wh_str; -+ if (str->len + AUFS_WH_PFX_LEN <= PATH_MAX) { -+ memcpy(p, str->name, str->len); -+ wh_name.len = AUFS_WH_PFX_LEN + str->len; -+ err = unlink_wh_name(h_dentry, &wh_name, br); -+ if (!err) -+ continue; -+ break; -+ } -+ AuIOErr("whiteout name too long %.*s\n", -+ str->len, str->name); -+ err = -EIO; -+ break; -+ } -+ } -+ free_page((unsigned long)wh_name.name); -+ -+out: -+ return err; -+} -+ -+struct del_wh_children_args { -+ int *errp; -+ struct dentry *h_dentry; -+ struct au_nhash *whlist; -+ aufs_bindex_t bindex; -+ struct au_branch *br; -+}; -+ -+static void call_del_wh_children(void *args) -+{ -+ struct del_wh_children_args *a = args; -+ *a->errp = del_wh_children(a->h_dentry, a->whlist, a->bindex, a->br); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+struct au_whtmp_rmdir *au_whtmp_rmdir_alloc(struct super_block *sb, gfp_t gfp) -+{ -+ struct au_whtmp_rmdir *whtmp; -+ int err; -+ unsigned int rdhash; -+ -+ SiMustAnyLock(sb); -+ -+ whtmp = kzalloc(sizeof(*whtmp), gfp); -+ if (unlikely(!whtmp)) { -+ whtmp = ERR_PTR(-ENOMEM); -+ goto out; -+ } -+ -+ /* no estimation for dir size */ -+ rdhash = au_sbi(sb)->si_rdhash; -+ if (!rdhash) -+ rdhash = AUFS_RDHASH_DEF; -+ err = au_nhash_alloc(&whtmp->whlist, rdhash, gfp); -+ if (unlikely(err)) { -+ kfree(whtmp); -+ whtmp = ERR_PTR(err); -+ } -+ -+out: -+ return whtmp; -+} -+ -+void au_whtmp_rmdir_free(struct au_whtmp_rmdir *whtmp) -+{ -+ if (whtmp->br) -+ atomic_dec(&whtmp->br->br_count); -+ dput(whtmp->wh_dentry); -+ iput(whtmp->dir); -+ au_nhash_wh_free(&whtmp->whlist); -+ kfree(whtmp); -+} -+ -+/* -+ * rmdir the whiteouted temporary named dir @h_dentry. -+ * @whlist: whiteouted children. -+ */ -+int au_whtmp_rmdir(struct inode *dir, aufs_bindex_t bindex, -+ struct dentry *wh_dentry, struct au_nhash *whlist) -+{ -+ int err; -+ unsigned int h_nlink; -+ struct path h_tmp; -+ struct inode *wh_inode, *h_dir; -+ struct au_branch *br; -+ -+ h_dir = wh_dentry->d_parent->d_inode; /* dir inode is locked */ -+ IMustLock(h_dir); -+ -+ br = au_sbr(dir->i_sb, bindex); -+ wh_inode = wh_dentry->d_inode; -+ mutex_lock_nested(&wh_inode->i_mutex, AuLsc_I_CHILD); -+ -+ /* -+ * someone else might change some whiteouts while we were sleeping. -+ * it means this whlist may have an obsoleted entry. -+ */ -+ if (!au_test_h_perm_sio(wh_inode, MAY_EXEC | MAY_WRITE)) -+ err = del_wh_children(wh_dentry, whlist, bindex, br); -+ else { -+ int wkq_err; -+ struct del_wh_children_args args = { -+ .errp = &err, -+ .h_dentry = wh_dentry, -+ .whlist = whlist, -+ .bindex = bindex, -+ .br = br -+ }; -+ -+ wkq_err = au_wkq_wait(call_del_wh_children, &args); -+ if (unlikely(wkq_err)) -+ err = wkq_err; -+ } -+ mutex_unlock(&wh_inode->i_mutex); -+ -+ if (!err) { -+ h_tmp.dentry = wh_dentry; -+ h_tmp.mnt = au_br_mnt(br); -+ h_nlink = h_dir->i_nlink; -+ err = vfsub_rmdir(h_dir, &h_tmp); -+ /* some fs doesn't change the parent nlink in some cases */ -+ h_nlink -= h_dir->i_nlink; -+ } -+ -+ if (!err) { -+ if (au_ibstart(dir) == bindex) { -+ /* todo: dir->i_mutex is necessary */ -+ au_cpup_attr_timesizes(dir); -+ if (h_nlink) -+ vfsub_drop_nlink(dir); -+ } -+ return 0; /* success */ -+ } -+ -+ pr_warn("failed removing %pd(%d), ignored\n", wh_dentry, err); -+ return err; -+} -+ -+static void call_rmdir_whtmp(void *args) -+{ -+ int err; -+ aufs_bindex_t bindex; -+ struct au_whtmp_rmdir *a = args; -+ struct super_block *sb; -+ struct dentry *h_parent; -+ struct inode *h_dir; -+ struct au_hinode *hdir; -+ -+ /* rmdir by nfsd may cause deadlock with this i_mutex */ -+ /* mutex_lock(&a->dir->i_mutex); */ -+ err = -EROFS; -+ sb = a->dir->i_sb; -+ si_read_lock(sb, !AuLock_FLUSH); -+ if (!au_br_writable(a->br->br_perm)) -+ goto out; -+ bindex = au_br_index(sb, a->br->br_id); -+ if (unlikely(bindex < 0)) -+ goto out; -+ -+ err = -EIO; -+ ii_write_lock_parent(a->dir); -+ h_parent = dget_parent(a->wh_dentry); -+ h_dir = h_parent->d_inode; -+ hdir = au_hi(a->dir, bindex); -+ err = vfsub_mnt_want_write(au_br_mnt(a->br)); -+ if (unlikely(err)) -+ goto out_mnt; -+ au_hn_imtx_lock_nested(hdir, AuLsc_I_PARENT); -+ err = au_h_verify(a->wh_dentry, au_opt_udba(sb), h_dir, h_parent, -+ a->br); -+ if (!err) -+ err = au_whtmp_rmdir(a->dir, bindex, a->wh_dentry, &a->whlist); -+ au_hn_imtx_unlock(hdir); -+ vfsub_mnt_drop_write(au_br_mnt(a->br)); -+ -+out_mnt: -+ dput(h_parent); -+ ii_write_unlock(a->dir); -+out: -+ /* mutex_unlock(&a->dir->i_mutex); */ -+ au_whtmp_rmdir_free(a); -+ si_read_unlock(sb); -+ au_nwt_done(&au_sbi(sb)->si_nowait); -+ if (unlikely(err)) -+ AuIOErr("err %d\n", err); -+} -+ -+void au_whtmp_kick_rmdir(struct inode *dir, aufs_bindex_t bindex, -+ struct dentry *wh_dentry, struct au_whtmp_rmdir *args) -+{ -+ int wkq_err; -+ struct super_block *sb; -+ -+ IMustLock(dir); -+ -+ /* all post-process will be done in do_rmdir_whtmp(). */ -+ sb = dir->i_sb; -+ args->dir = au_igrab(dir); -+ args->br = au_sbr(sb, bindex); -+ atomic_inc(&args->br->br_count); -+ args->wh_dentry = dget(wh_dentry); -+ wkq_err = au_wkq_nowait(call_rmdir_whtmp, args, sb, /*flags*/0); -+ if (unlikely(wkq_err)) { -+ pr_warn("rmdir error %pd (%d), ignored\n", wh_dentry, wkq_err); -+ au_whtmp_rmdir_free(args); -+ } -+} -diff --git a/fs/aufs/whout.h b/fs/aufs/whout.h -new file mode 100644 -index 0000000..5a5c378 ---- /dev/null -+++ b/fs/aufs/whout.h -@@ -0,0 +1,85 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * whiteout for logical deletion and opaque directory -+ */ -+ -+#ifndef __AUFS_WHOUT_H__ -+#define __AUFS_WHOUT_H__ -+ -+#ifdef __KERNEL__ -+ -+#include "dir.h" -+ -+/* whout.c */ -+int au_wh_name_alloc(struct qstr *wh, const struct qstr *name); -+int au_wh_test(struct dentry *h_parent, struct qstr *wh_name, int try_sio); -+int au_diropq_test(struct dentry *h_dentry); -+struct au_branch; -+struct dentry *au_whtmp_lkup(struct dentry *h_parent, struct au_branch *br, -+ struct qstr *prefix); -+int au_whtmp_ren(struct dentry *h_dentry, struct au_branch *br); -+int au_wh_unlink_dentry(struct inode *h_dir, struct path *h_path, -+ struct dentry *dentry); -+int au_wh_init(struct au_branch *br, struct super_block *sb); -+ -+/* diropq flags */ -+#define AuDiropq_CREATE 1 -+#define au_ftest_diropq(flags, name) ((flags) & AuDiropq_##name) -+#define au_fset_diropq(flags, name) \ -+ do { (flags) |= AuDiropq_##name; } while (0) -+#define au_fclr_diropq(flags, name) \ -+ do { (flags) &= ~AuDiropq_##name; } while (0) -+ -+struct dentry *au_diropq_sio(struct dentry *dentry, aufs_bindex_t bindex, -+ unsigned int flags); -+struct dentry *au_wh_lkup(struct dentry *h_parent, struct qstr *base_name, -+ struct au_branch *br); -+struct dentry *au_wh_create(struct dentry *dentry, aufs_bindex_t bindex, -+ struct dentry *h_parent); -+ -+/* real rmdir for the whiteout-ed dir */ -+struct au_whtmp_rmdir { -+ struct inode *dir; -+ struct au_branch *br; -+ struct dentry *wh_dentry; -+ struct au_nhash whlist; -+}; -+ -+struct au_whtmp_rmdir *au_whtmp_rmdir_alloc(struct super_block *sb, gfp_t gfp); -+void au_whtmp_rmdir_free(struct au_whtmp_rmdir *whtmp); -+int au_whtmp_rmdir(struct inode *dir, aufs_bindex_t bindex, -+ struct dentry *wh_dentry, struct au_nhash *whlist); -+void au_whtmp_kick_rmdir(struct inode *dir, aufs_bindex_t bindex, -+ struct dentry *wh_dentry, struct au_whtmp_rmdir *args); -+ -+/* ---------------------------------------------------------------------- */ -+ -+static inline struct dentry *au_diropq_create(struct dentry *dentry, -+ aufs_bindex_t bindex) -+{ -+ return au_diropq_sio(dentry, bindex, AuDiropq_CREATE); -+} -+ -+static inline int au_diropq_remove(struct dentry *dentry, aufs_bindex_t bindex) -+{ -+ return PTR_ERR(au_diropq_sio(dentry, bindex, !AuDiropq_CREATE)); -+} -+ -+#endif /* __KERNEL__ */ -+#endif /* __AUFS_WHOUT_H__ */ -diff --git a/fs/aufs/wkq.c b/fs/aufs/wkq.c -new file mode 100644 -index 0000000..a4e1b92 ---- /dev/null -+++ b/fs/aufs/wkq.c -@@ -0,0 +1,213 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * workqueue for asynchronous/super-io operations -+ * todo: try new dredential scheme -+ */ -+ -+#include -+#include "aufs.h" -+ -+/* internal workqueue named AUFS_WKQ_NAME */ -+ -+static struct workqueue_struct *au_wkq; -+ -+struct au_wkinfo { -+ struct work_struct wk; -+ struct kobject *kobj; -+ -+ unsigned int flags; /* see wkq.h */ -+ -+ au_wkq_func_t func; -+ void *args; -+ -+ struct completion *comp; -+}; -+ -+/* ---------------------------------------------------------------------- */ -+ -+static void wkq_func(struct work_struct *wk) -+{ -+ struct au_wkinfo *wkinfo = container_of(wk, struct au_wkinfo, wk); -+ -+ AuDebugOn(!uid_eq(current_fsuid(), GLOBAL_ROOT_UID)); -+ AuDebugOn(rlimit(RLIMIT_FSIZE) != RLIM_INFINITY); -+ -+ wkinfo->func(wkinfo->args); -+ if (au_ftest_wkq(wkinfo->flags, WAIT)) -+ complete(wkinfo->comp); -+ else { -+ kobject_put(wkinfo->kobj); -+ module_put(THIS_MODULE); /* todo: ?? */ -+ kfree(wkinfo); -+ } -+} -+ -+/* -+ * Since struct completion is large, try allocating it dynamically. -+ */ -+#if 1 /* defined(CONFIG_4KSTACKS) || defined(AuTest4KSTACKS) */ -+#define AuWkqCompDeclare(name) struct completion *comp = NULL -+ -+static int au_wkq_comp_alloc(struct au_wkinfo *wkinfo, struct completion **comp) -+{ -+ *comp = kmalloc(sizeof(**comp), GFP_NOFS); -+ if (*comp) { -+ init_completion(*comp); -+ wkinfo->comp = *comp; -+ return 0; -+ } -+ return -ENOMEM; -+} -+ -+static void au_wkq_comp_free(struct completion *comp) -+{ -+ kfree(comp); -+} -+ -+#else -+ -+/* no braces */ -+#define AuWkqCompDeclare(name) \ -+ DECLARE_COMPLETION_ONSTACK(_ ## name); \ -+ struct completion *comp = &_ ## name -+ -+static int au_wkq_comp_alloc(struct au_wkinfo *wkinfo, struct completion **comp) -+{ -+ wkinfo->comp = *comp; -+ return 0; -+} -+ -+static void au_wkq_comp_free(struct completion *comp __maybe_unused) -+{ -+ /* empty */ -+} -+#endif /* 4KSTACKS */ -+ -+static void au_wkq_run(struct au_wkinfo *wkinfo) -+{ -+ if (au_ftest_wkq(wkinfo->flags, NEST)) { -+ if (au_wkq_test()) { -+ AuWarn1("wkq from wkq, unless silly-rename on NFS," -+ " due to a dead dir by UDBA?\n"); -+ AuDebugOn(au_ftest_wkq(wkinfo->flags, WAIT)); -+ } -+ } else -+ au_dbg_verify_kthread(); -+ -+ if (au_ftest_wkq(wkinfo->flags, WAIT)) { -+ INIT_WORK_ONSTACK(&wkinfo->wk, wkq_func); -+ queue_work(au_wkq, &wkinfo->wk); -+ } else { -+ INIT_WORK(&wkinfo->wk, wkq_func); -+ schedule_work(&wkinfo->wk); -+ } -+} -+ -+/* -+ * Be careful. It is easy to make deadlock happen. -+ * processA: lock, wkq and wait -+ * processB: wkq and wait, lock in wkq -+ * --> deadlock -+ */ -+int au_wkq_do_wait(unsigned int flags, au_wkq_func_t func, void *args) -+{ -+ int err; -+ AuWkqCompDeclare(comp); -+ struct au_wkinfo wkinfo = { -+ .flags = flags, -+ .func = func, -+ .args = args -+ }; -+ -+ err = au_wkq_comp_alloc(&wkinfo, &comp); -+ if (!err) { -+ au_wkq_run(&wkinfo); -+ /* no timeout, no interrupt */ -+ wait_for_completion(wkinfo.comp); -+ au_wkq_comp_free(comp); -+ destroy_work_on_stack(&wkinfo.wk); -+ } -+ -+ return err; -+ -+} -+ -+/* -+ * Note: dget/dput() in func for aufs dentries are not supported. It will be a -+ * problem in a concurrent umounting. -+ */ -+int au_wkq_nowait(au_wkq_func_t func, void *args, struct super_block *sb, -+ unsigned int flags) -+{ -+ int err; -+ struct au_wkinfo *wkinfo; -+ -+ atomic_inc(&au_sbi(sb)->si_nowait.nw_len); -+ -+ /* -+ * wkq_func() must free this wkinfo. -+ * it highly depends upon the implementation of workqueue. -+ */ -+ err = 0; -+ wkinfo = kmalloc(sizeof(*wkinfo), GFP_NOFS); -+ if (wkinfo) { -+ wkinfo->kobj = &au_sbi(sb)->si_kobj; -+ wkinfo->flags = flags & ~AuWkq_WAIT; -+ wkinfo->func = func; -+ wkinfo->args = args; -+ wkinfo->comp = NULL; -+ kobject_get(wkinfo->kobj); -+ __module_get(THIS_MODULE); /* todo: ?? */ -+ -+ au_wkq_run(wkinfo); -+ } else { -+ err = -ENOMEM; -+ au_nwt_done(&au_sbi(sb)->si_nowait); -+ } -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+void au_nwt_init(struct au_nowait_tasks *nwt) -+{ -+ atomic_set(&nwt->nw_len, 0); -+ /* smp_mb(); */ /* atomic_set */ -+ init_waitqueue_head(&nwt->nw_wq); -+} -+ -+void au_wkq_fin(void) -+{ -+ destroy_workqueue(au_wkq); -+} -+ -+int __init au_wkq_init(void) -+{ -+ int err; -+ -+ err = 0; -+ au_wkq = alloc_workqueue(AUFS_WKQ_NAME, 0, WQ_DFL_ACTIVE); -+ if (IS_ERR(au_wkq)) -+ err = PTR_ERR(au_wkq); -+ else if (!au_wkq) -+ err = -ENOMEM; -+ -+ return err; -+} -diff --git a/fs/aufs/wkq.h b/fs/aufs/wkq.h -new file mode 100644 -index 0000000..830123c ---- /dev/null -+++ b/fs/aufs/wkq.h -@@ -0,0 +1,91 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * workqueue for asynchronous/super-io operations -+ * todo: try new credentials management scheme -+ */ -+ -+#ifndef __AUFS_WKQ_H__ -+#define __AUFS_WKQ_H__ -+ -+#ifdef __KERNEL__ -+ -+struct super_block; -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * in the next operation, wait for the 'nowait' tasks in system-wide workqueue -+ */ -+struct au_nowait_tasks { -+ atomic_t nw_len; -+ wait_queue_head_t nw_wq; -+}; -+ -+/* ---------------------------------------------------------------------- */ -+ -+typedef void (*au_wkq_func_t)(void *args); -+ -+/* wkq flags */ -+#define AuWkq_WAIT 1 -+#define AuWkq_NEST (1 << 1) -+#define au_ftest_wkq(flags, name) ((flags) & AuWkq_##name) -+#define au_fset_wkq(flags, name) \ -+ do { (flags) |= AuWkq_##name; } while (0) -+#define au_fclr_wkq(flags, name) \ -+ do { (flags) &= ~AuWkq_##name; } while (0) -+ -+#ifndef CONFIG_AUFS_HNOTIFY -+#undef AuWkq_NEST -+#define AuWkq_NEST 0 -+#endif -+ -+/* wkq.c */ -+int au_wkq_do_wait(unsigned int flags, au_wkq_func_t func, void *args); -+int au_wkq_nowait(au_wkq_func_t func, void *args, struct super_block *sb, -+ unsigned int flags); -+void au_nwt_init(struct au_nowait_tasks *nwt); -+int __init au_wkq_init(void); -+void au_wkq_fin(void); -+ -+/* ---------------------------------------------------------------------- */ -+ -+static inline int au_wkq_test(void) -+{ -+ return current->flags & PF_WQ_WORKER; -+} -+ -+static inline int au_wkq_wait(au_wkq_func_t func, void *args) -+{ -+ return au_wkq_do_wait(AuWkq_WAIT, func, args); -+} -+ -+static inline void au_nwt_done(struct au_nowait_tasks *nwt) -+{ -+ if (atomic_dec_and_test(&nwt->nw_len)) -+ wake_up_all(&nwt->nw_wq); -+} -+ -+static inline int au_nwt_flush(struct au_nowait_tasks *nwt) -+{ -+ wait_event(nwt->nw_wq, !atomic_read(&nwt->nw_len)); -+ return 0; -+} -+ -+#endif /* __KERNEL__ */ -+#endif /* __AUFS_WKQ_H__ */ -diff --git a/fs/aufs/xattr.c b/fs/aufs/xattr.c -new file mode 100644 -index 0000000..e16beea ---- /dev/null -+++ b/fs/aufs/xattr.c -@@ -0,0 +1,344 @@ -+/* -+ * Copyright (C) 2014-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * handling xattr functions -+ */ -+ -+#include -+#include "aufs.h" -+ -+static int au_xattr_ignore(int err, char *name, unsigned int ignore_flags) -+{ -+ if (!ignore_flags) -+ goto out; -+ switch (err) { -+ case -ENOMEM: -+ case -EDQUOT: -+ goto out; -+ } -+ -+ if ((ignore_flags & AuBrAttr_ICEX) == AuBrAttr_ICEX) { -+ err = 0; -+ goto out; -+ } -+ -+#define cmp(brattr, prefix) do { \ -+ if (!strncmp(name, XATTR_##prefix##_PREFIX, \ -+ XATTR_##prefix##_PREFIX_LEN)) { \ -+ if (ignore_flags & AuBrAttr_ICEX_##brattr) \ -+ err = 0; \ -+ goto out; \ -+ } \ -+ } while (0) -+ -+ cmp(SEC, SECURITY); -+ cmp(SYS, SYSTEM); -+ cmp(TR, TRUSTED); -+ cmp(USR, USER); -+#undef cmp -+ -+ if (ignore_flags & AuBrAttr_ICEX_OTH) -+ err = 0; -+ -+out: -+ return err; -+} -+ -+static const int au_xattr_out_of_list = AuBrAttr_ICEX_OTH << 1; -+ -+static int au_do_cpup_xattr(struct dentry *h_dst, struct dentry *h_src, -+ char *name, char **buf, unsigned int ignore_flags, -+ unsigned int verbose) -+{ -+ int err; -+ ssize_t ssz; -+ struct inode *h_idst; -+ -+ ssz = vfs_getxattr_alloc(h_src, name, buf, 0, GFP_NOFS); -+ err = ssz; -+ if (unlikely(err <= 0)) { -+ if (err == -ENODATA -+ || (err == -EOPNOTSUPP -+ && ((ignore_flags & au_xattr_out_of_list) -+ || (au_test_nfs_noacl(h_src->d_inode) -+ && (!strcmp(name, XATTR_NAME_POSIX_ACL_ACCESS) -+ || !strcmp(name, -+ XATTR_NAME_POSIX_ACL_DEFAULT)))) -+ )) -+ err = 0; -+ if (err && (verbose || au_debug_test())) -+ pr_err("%s, err %d\n", name, err); -+ goto out; -+ } -+ -+ /* unlock it temporary */ -+ h_idst = h_dst->d_inode; -+ mutex_unlock(&h_idst->i_mutex); -+ err = vfsub_setxattr(h_dst, name, *buf, ssz, /*flags*/0); -+ mutex_lock_nested(&h_idst->i_mutex, AuLsc_I_CHILD2); -+ if (unlikely(err)) { -+ if (verbose || au_debug_test()) -+ pr_err("%s, err %d\n", name, err); -+ err = au_xattr_ignore(err, name, ignore_flags); -+ } -+ -+out: -+ return err; -+} -+ -+int au_cpup_xattr(struct dentry *h_dst, struct dentry *h_src, int ignore_flags, -+ unsigned int verbose) -+{ -+ int err, unlocked, acl_access, acl_default; -+ ssize_t ssz; -+ struct inode *h_isrc, *h_idst; -+ char *value, *p, *o, *e; -+ -+ /* try stopping to update the source inode while we are referencing */ -+ /* there should not be the parent-child relationship between them */ -+ h_isrc = h_src->d_inode; -+ h_idst = h_dst->d_inode; -+ mutex_unlock(&h_idst->i_mutex); -+ mutex_lock_nested(&h_isrc->i_mutex, AuLsc_I_CHILD); -+ mutex_lock_nested(&h_idst->i_mutex, AuLsc_I_CHILD2); -+ unlocked = 0; -+ -+ /* some filesystems don't list POSIX ACL, for example tmpfs */ -+ ssz = vfs_listxattr(h_src, NULL, 0); -+ err = ssz; -+ if (unlikely(err < 0)) { -+ AuTraceErr(err); -+ if (err == -ENODATA -+ || err == -EOPNOTSUPP) -+ err = 0; /* ignore */ -+ goto out; -+ } -+ -+ err = 0; -+ p = NULL; -+ o = NULL; -+ if (ssz) { -+ err = -ENOMEM; -+ p = kmalloc(ssz, GFP_NOFS); -+ o = p; -+ if (unlikely(!p)) -+ goto out; -+ err = vfs_listxattr(h_src, p, ssz); -+ } -+ mutex_unlock(&h_isrc->i_mutex); -+ unlocked = 1; -+ AuDbg("err %d, ssz %zd\n", err, ssz); -+ if (unlikely(err < 0)) -+ goto out_free; -+ -+ err = 0; -+ e = p + ssz; -+ value = NULL; -+ acl_access = 0; -+ acl_default = 0; -+ while (!err && p < e) { -+ acl_access |= !strncmp(p, XATTR_NAME_POSIX_ACL_ACCESS, -+ sizeof(XATTR_NAME_POSIX_ACL_ACCESS) - 1); -+ acl_default |= !strncmp(p, XATTR_NAME_POSIX_ACL_DEFAULT, -+ sizeof(XATTR_NAME_POSIX_ACL_DEFAULT) -+ - 1); -+ err = au_do_cpup_xattr(h_dst, h_src, p, &value, ignore_flags, -+ verbose); -+ p += strlen(p) + 1; -+ } -+ AuTraceErr(err); -+ ignore_flags |= au_xattr_out_of_list; -+ if (!err && !acl_access) { -+ err = au_do_cpup_xattr(h_dst, h_src, -+ XATTR_NAME_POSIX_ACL_ACCESS, &value, -+ ignore_flags, verbose); -+ AuTraceErr(err); -+ } -+ if (!err && !acl_default) { -+ err = au_do_cpup_xattr(h_dst, h_src, -+ XATTR_NAME_POSIX_ACL_DEFAULT, &value, -+ ignore_flags, verbose); -+ AuTraceErr(err); -+ } -+ -+ kfree(value); -+ -+out_free: -+ kfree(o); -+out: -+ if (!unlocked) -+ mutex_unlock(&h_isrc->i_mutex); -+ AuTraceErr(err); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+enum { -+ AU_XATTR_LIST, -+ AU_XATTR_GET -+}; -+ -+struct au_lgxattr { -+ int type; -+ union { -+ struct { -+ char *list; -+ size_t size; -+ } list; -+ struct { -+ const char *name; -+ void *value; -+ size_t size; -+ } get; -+ } u; -+}; -+ -+static ssize_t au_lgxattr(struct dentry *dentry, struct au_lgxattr *arg) -+{ -+ ssize_t err; -+ struct path h_path; -+ struct super_block *sb; -+ -+ sb = dentry->d_sb; -+ err = si_read_lock(sb, AuLock_FLUSH | AuLock_NOPLM); -+ if (unlikely(err)) -+ goto out; -+ err = au_h_path_getattr(dentry, /*force*/1, &h_path); -+ if (unlikely(err)) -+ goto out_si; -+ if (unlikely(!h_path.dentry)) -+ /* illegally overlapped or something */ -+ goto out_di; /* pretending success */ -+ -+ /* always topmost entry only */ -+ switch (arg->type) { -+ case AU_XATTR_LIST: -+ err = vfs_listxattr(h_path.dentry, -+ arg->u.list.list, arg->u.list.size); -+ break; -+ case AU_XATTR_GET: -+ err = vfs_getxattr(h_path.dentry, -+ arg->u.get.name, arg->u.get.value, -+ arg->u.get.size); -+ break; -+ } -+ -+out_di: -+ di_read_unlock(dentry, AuLock_IR); -+out_si: -+ si_read_unlock(sb); -+out: -+ AuTraceErr(err); -+ return err; -+} -+ -+ssize_t aufs_listxattr(struct dentry *dentry, char *list, size_t size) -+{ -+ struct au_lgxattr arg = { -+ .type = AU_XATTR_LIST, -+ .u.list = { -+ .list = list, -+ .size = size -+ }, -+ }; -+ -+ return au_lgxattr(dentry, &arg); -+} -+ -+ssize_t aufs_getxattr(struct dentry *dentry, const char *name, void *value, -+ size_t size) -+{ -+ struct au_lgxattr arg = { -+ .type = AU_XATTR_GET, -+ .u.get = { -+ .name = name, -+ .value = value, -+ .size = size -+ }, -+ }; -+ -+ return au_lgxattr(dentry, &arg); -+} -+ -+int aufs_setxattr(struct dentry *dentry, const char *name, const void *value, -+ size_t size, int flags) -+{ -+ struct au_srxattr arg = { -+ .type = AU_XATTR_SET, -+ .u.set = { -+ .name = name, -+ .value = value, -+ .size = size, -+ .flags = flags -+ }, -+ }; -+ -+ return au_srxattr(dentry, &arg); -+} -+ -+int aufs_removexattr(struct dentry *dentry, const char *name) -+{ -+ struct au_srxattr arg = { -+ .type = AU_XATTR_REMOVE, -+ .u.remove = { -+ .name = name -+ }, -+ }; -+ -+ return au_srxattr(dentry, &arg); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+#if 0 -+static size_t au_xattr_list(struct dentry *dentry, char *list, size_t list_size, -+ const char *name, size_t name_len, int type) -+{ -+ return aufs_listxattr(dentry, list, list_size); -+} -+ -+static int au_xattr_get(struct dentry *dentry, const char *name, void *buffer, -+ size_t size, int type) -+{ -+ return aufs_getxattr(dentry, name, buffer, size); -+} -+ -+static int au_xattr_set(struct dentry *dentry, const char *name, -+ const void *value, size_t size, int flags, int type) -+{ -+ return aufs_setxattr(dentry, name, value, size, flags); -+} -+ -+static const struct xattr_handler au_xattr_handler = { -+ /* no prefix, no flags */ -+ .list = au_xattr_list, -+ .get = au_xattr_get, -+ .set = au_xattr_set -+ /* why no remove? */ -+}; -+ -+static const struct xattr_handler *au_xattr_handlers[] = { -+ &au_xattr_handler -+}; -+ -+void au_xattr_init(struct super_block *sb) -+{ -+ /* sb->s_xattr = au_xattr_handlers; */ -+} -+#endif -diff --git a/fs/aufs/xino.c b/fs/aufs/xino.c -new file mode 100644 -index 0000000..50ab4ca ---- /dev/null -+++ b/fs/aufs/xino.c -@@ -0,0 +1,1343 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+/* -+ * external inode number translation table and bitmap -+ */ -+ -+#include -+#include -+#include "aufs.h" -+ -+/* todo: unnecessary to support mmap_sem since kernel-space? */ -+ssize_t xino_fread(au_readf_t func, struct file *file, void *kbuf, size_t size, -+ loff_t *pos) -+{ -+ ssize_t err; -+ mm_segment_t oldfs; -+ union { -+ void *k; -+ char __user *u; -+ } buf; -+ -+ buf.k = kbuf; -+ oldfs = get_fs(); -+ set_fs(KERNEL_DS); -+ do { -+ /* todo: signal_pending? */ -+ err = func(file, buf.u, size, pos); -+ } while (err == -EAGAIN || err == -EINTR); -+ set_fs(oldfs); -+ -+#if 0 /* reserved for future use */ -+ if (err > 0) -+ fsnotify_access(file->f_dentry); -+#endif -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static ssize_t xino_fwrite_wkq(au_writef_t func, struct file *file, void *buf, -+ size_t size, loff_t *pos); -+ -+static ssize_t do_xino_fwrite(au_writef_t func, struct file *file, void *kbuf, -+ size_t size, loff_t *pos) -+{ -+ ssize_t err; -+ mm_segment_t oldfs; -+ union { -+ void *k; -+ const char __user *u; -+ } buf; -+ int i; -+ const int prevent_endless = 10; -+ -+ i = 0; -+ buf.k = kbuf; -+ oldfs = get_fs(); -+ set_fs(KERNEL_DS); -+ do { -+ err = func(file, buf.u, size, pos); -+ if (err == -EINTR -+ && !au_wkq_test() -+ && fatal_signal_pending(current)) { -+ set_fs(oldfs); -+ err = xino_fwrite_wkq(func, file, kbuf, size, pos); -+ BUG_ON(err == -EINTR); -+ oldfs = get_fs(); -+ set_fs(KERNEL_DS); -+ } -+ } while (i++ < prevent_endless -+ && (err == -EAGAIN || err == -EINTR)); -+ set_fs(oldfs); -+ -+#if 0 /* reserved for future use */ -+ if (err > 0) -+ fsnotify_modify(file->f_dentry); -+#endif -+ -+ return err; -+} -+ -+struct do_xino_fwrite_args { -+ ssize_t *errp; -+ au_writef_t func; -+ struct file *file; -+ void *buf; -+ size_t size; -+ loff_t *pos; -+}; -+ -+static void call_do_xino_fwrite(void *args) -+{ -+ struct do_xino_fwrite_args *a = args; -+ *a->errp = do_xino_fwrite(a->func, a->file, a->buf, a->size, a->pos); -+} -+ -+static ssize_t xino_fwrite_wkq(au_writef_t func, struct file *file, void *buf, -+ size_t size, loff_t *pos) -+{ -+ ssize_t err; -+ int wkq_err; -+ struct do_xino_fwrite_args args = { -+ .errp = &err, -+ .func = func, -+ .file = file, -+ .buf = buf, -+ .size = size, -+ .pos = pos -+ }; -+ -+ /* -+ * it breaks RLIMIT_FSIZE and normal user's limit, -+ * users should care about quota and real 'filesystem full.' -+ */ -+ wkq_err = au_wkq_wait(call_do_xino_fwrite, &args); -+ if (unlikely(wkq_err)) -+ err = wkq_err; -+ -+ return err; -+} -+ -+ssize_t xino_fwrite(au_writef_t func, struct file *file, void *buf, size_t size, -+ loff_t *pos) -+{ -+ ssize_t err; -+ -+ if (rlimit(RLIMIT_FSIZE) == RLIM_INFINITY) { -+ lockdep_off(); -+ err = do_xino_fwrite(func, file, buf, size, pos); -+ lockdep_on(); -+ } else -+ err = xino_fwrite_wkq(func, file, buf, size, pos); -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * create a new xinofile at the same place/path as @base_file. -+ */ -+struct file *au_xino_create2(struct file *base_file, struct file *copy_src) -+{ -+ struct file *file; -+ struct dentry *base, *parent; -+ struct inode *dir, *delegated; -+ struct qstr *name; -+ struct path path; -+ int err; -+ -+ base = base_file->f_dentry; -+ parent = base->d_parent; /* dir inode is locked */ -+ dir = parent->d_inode; -+ IMustLock(dir); -+ -+ file = ERR_PTR(-EINVAL); -+ name = &base->d_name; -+ path.dentry = vfsub_lookup_one_len(name->name, parent, name->len); -+ if (IS_ERR(path.dentry)) { -+ file = (void *)path.dentry; -+ pr_err("%pd lookup err %ld\n", -+ base, PTR_ERR(path.dentry)); -+ goto out; -+ } -+ -+ /* no need to mnt_want_write() since we call dentry_open() later */ -+ err = vfs_create(dir, path.dentry, S_IRUGO | S_IWUGO, NULL); -+ if (unlikely(err)) { -+ file = ERR_PTR(err); -+ pr_err("%pd create err %d\n", base, err); -+ goto out_dput; -+ } -+ -+ path.mnt = base_file->f_path.mnt; -+ file = vfsub_dentry_open(&path, -+ O_RDWR | O_CREAT | O_EXCL | O_LARGEFILE -+ /* | __FMODE_NONOTIFY */); -+ if (IS_ERR(file)) { -+ pr_err("%pd open err %ld\n", base, PTR_ERR(file)); -+ goto out_dput; -+ } -+ -+ delegated = NULL; -+ err = vfsub_unlink(dir, &file->f_path, &delegated, /*force*/0); -+ if (unlikely(err == -EWOULDBLOCK)) { -+ pr_warn("cannot retry for NFSv4 delegation" -+ " for an internal unlink\n"); -+ iput(delegated); -+ } -+ if (unlikely(err)) { -+ pr_err("%pd unlink err %d\n", base, err); -+ goto out_fput; -+ } -+ -+ if (copy_src) { -+ /* no one can touch copy_src xino */ -+ err = au_copy_file(file, copy_src, vfsub_f_size_read(copy_src)); -+ if (unlikely(err)) { -+ pr_err("%pd copy err %d\n", base, err); -+ goto out_fput; -+ } -+ } -+ goto out_dput; /* success */ -+ -+out_fput: -+ fput(file); -+ file = ERR_PTR(err); -+out_dput: -+ dput(path.dentry); -+out: -+ return file; -+} -+ -+struct au_xino_lock_dir { -+ struct au_hinode *hdir; -+ struct dentry *parent; -+ struct mutex *mtx; -+}; -+ -+static void au_xino_lock_dir(struct super_block *sb, struct file *xino, -+ struct au_xino_lock_dir *ldir) -+{ -+ aufs_bindex_t brid, bindex; -+ -+ ldir->hdir = NULL; -+ bindex = -1; -+ brid = au_xino_brid(sb); -+ if (brid >= 0) -+ bindex = au_br_index(sb, brid); -+ if (bindex >= 0) { -+ ldir->hdir = au_hi(sb->s_root->d_inode, bindex); -+ au_hn_imtx_lock_nested(ldir->hdir, AuLsc_I_PARENT); -+ } else { -+ ldir->parent = dget_parent(xino->f_dentry); -+ ldir->mtx = &ldir->parent->d_inode->i_mutex; -+ mutex_lock_nested(ldir->mtx, AuLsc_I_PARENT); -+ } -+} -+ -+static void au_xino_unlock_dir(struct au_xino_lock_dir *ldir) -+{ -+ if (ldir->hdir) -+ au_hn_imtx_unlock(ldir->hdir); -+ else { -+ mutex_unlock(ldir->mtx); -+ dput(ldir->parent); -+ } -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* trucate xino files asynchronously */ -+ -+int au_xino_trunc(struct super_block *sb, aufs_bindex_t bindex) -+{ -+ int err; -+ unsigned long jiffy; -+ blkcnt_t blocks; -+ aufs_bindex_t bi, bend; -+ struct kstatfs *st; -+ struct au_branch *br; -+ struct file *new_xino, *file; -+ struct super_block *h_sb; -+ struct au_xino_lock_dir ldir; -+ -+ err = -ENOMEM; -+ st = kmalloc(sizeof(*st), GFP_NOFS); -+ if (unlikely(!st)) -+ goto out; -+ -+ err = -EINVAL; -+ bend = au_sbend(sb); -+ if (unlikely(bindex < 0 || bend < bindex)) -+ goto out_st; -+ br = au_sbr(sb, bindex); -+ file = br->br_xino.xi_file; -+ if (!file) -+ goto out_st; -+ -+ err = vfs_statfs(&file->f_path, st); -+ if (unlikely(err)) -+ AuErr1("statfs err %d, ignored\n", err); -+ jiffy = jiffies; -+ blocks = file_inode(file)->i_blocks; -+ pr_info("begin truncating xino(b%d), ib%llu, %llu/%llu free blks\n", -+ bindex, (u64)blocks, st->f_bfree, st->f_blocks); -+ -+ au_xino_lock_dir(sb, file, &ldir); -+ /* mnt_want_write() is unnecessary here */ -+ new_xino = au_xino_create2(file, file); -+ au_xino_unlock_dir(&ldir); -+ err = PTR_ERR(new_xino); -+ if (IS_ERR(new_xino)) { -+ pr_err("err %d, ignored\n", err); -+ goto out_st; -+ } -+ err = 0; -+ fput(file); -+ br->br_xino.xi_file = new_xino; -+ -+ h_sb = au_br_sb(br); -+ for (bi = 0; bi <= bend; bi++) { -+ if (unlikely(bi == bindex)) -+ continue; -+ br = au_sbr(sb, bi); -+ if (au_br_sb(br) != h_sb) -+ continue; -+ -+ fput(br->br_xino.xi_file); -+ br->br_xino.xi_file = new_xino; -+ get_file(new_xino); -+ } -+ -+ err = vfs_statfs(&new_xino->f_path, st); -+ if (!err) { -+ pr_info("end truncating xino(b%d), ib%llu, %llu/%llu free blks\n", -+ bindex, (u64)file_inode(new_xino)->i_blocks, -+ st->f_bfree, st->f_blocks); -+ if (file_inode(new_xino)->i_blocks < blocks) -+ au_sbi(sb)->si_xino_jiffy = jiffy; -+ } else -+ AuErr1("statfs err %d, ignored\n", err); -+ -+out_st: -+ kfree(st); -+out: -+ return err; -+} -+ -+struct xino_do_trunc_args { -+ struct super_block *sb; -+ struct au_branch *br; -+}; -+ -+static void xino_do_trunc(void *_args) -+{ -+ struct xino_do_trunc_args *args = _args; -+ struct super_block *sb; -+ struct au_branch *br; -+ struct inode *dir; -+ int err; -+ aufs_bindex_t bindex; -+ -+ err = 0; -+ sb = args->sb; -+ dir = sb->s_root->d_inode; -+ br = args->br; -+ -+ si_noflush_write_lock(sb); -+ ii_read_lock_parent(dir); -+ bindex = au_br_index(sb, br->br_id); -+ err = au_xino_trunc(sb, bindex); -+ ii_read_unlock(dir); -+ if (unlikely(err)) -+ pr_warn("err b%d, (%d)\n", bindex, err); -+ atomic_dec(&br->br_xino_running); -+ atomic_dec(&br->br_count); -+ si_write_unlock(sb); -+ au_nwt_done(&au_sbi(sb)->si_nowait); -+ kfree(args); -+} -+ -+static int xino_trunc_test(struct super_block *sb, struct au_branch *br) -+{ -+ int err; -+ struct kstatfs st; -+ struct au_sbinfo *sbinfo; -+ -+ /* todo: si_xino_expire and the ratio should be customizable */ -+ sbinfo = au_sbi(sb); -+ if (time_before(jiffies, -+ sbinfo->si_xino_jiffy + sbinfo->si_xino_expire)) -+ return 0; -+ -+ /* truncation border */ -+ err = vfs_statfs(&br->br_xino.xi_file->f_path, &st); -+ if (unlikely(err)) { -+ AuErr1("statfs err %d, ignored\n", err); -+ return 0; -+ } -+ if (div64_u64(st.f_bfree * 100, st.f_blocks) >= AUFS_XINO_DEF_TRUNC) -+ return 0; -+ -+ return 1; -+} -+ -+static void xino_try_trunc(struct super_block *sb, struct au_branch *br) -+{ -+ struct xino_do_trunc_args *args; -+ int wkq_err; -+ -+ if (!xino_trunc_test(sb, br)) -+ return; -+ -+ if (atomic_inc_return(&br->br_xino_running) > 1) -+ goto out; -+ -+ /* lock and kfree() will be called in trunc_xino() */ -+ args = kmalloc(sizeof(*args), GFP_NOFS); -+ if (unlikely(!args)) { -+ AuErr1("no memory\n"); -+ goto out_args; -+ } -+ -+ atomic_inc(&br->br_count); -+ args->sb = sb; -+ args->br = br; -+ wkq_err = au_wkq_nowait(xino_do_trunc, args, sb, /*flags*/0); -+ if (!wkq_err) -+ return; /* success */ -+ -+ pr_err("wkq %d\n", wkq_err); -+ atomic_dec(&br->br_count); -+ -+out_args: -+ kfree(args); -+out: -+ atomic_dec(&br->br_xino_running); -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static int au_xino_do_write(au_writef_t write, struct file *file, -+ ino_t h_ino, ino_t ino) -+{ -+ loff_t pos; -+ ssize_t sz; -+ -+ pos = h_ino; -+ if (unlikely(au_loff_max / sizeof(ino) - 1 < pos)) { -+ AuIOErr1("too large hi%lu\n", (unsigned long)h_ino); -+ return -EFBIG; -+ } -+ pos *= sizeof(ino); -+ sz = xino_fwrite(write, file, &ino, sizeof(ino), &pos); -+ if (sz == sizeof(ino)) -+ return 0; /* success */ -+ -+ AuIOErr("write failed (%zd)\n", sz); -+ return -EIO; -+} -+ -+/* -+ * write @ino to the xinofile for the specified branch{@sb, @bindex} -+ * at the position of @h_ino. -+ * even if @ino is zero, it is written to the xinofile and means no entry. -+ * if the size of the xino file on a specific filesystem exceeds the watermark, -+ * try truncating it. -+ */ -+int au_xino_write(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino, -+ ino_t ino) -+{ -+ int err; -+ unsigned int mnt_flags; -+ struct au_branch *br; -+ -+ BUILD_BUG_ON(sizeof(long long) != sizeof(au_loff_max) -+ || ((loff_t)-1) > 0); -+ SiMustAnyLock(sb); -+ -+ mnt_flags = au_mntflags(sb); -+ if (!au_opt_test(mnt_flags, XINO)) -+ return 0; -+ -+ br = au_sbr(sb, bindex); -+ err = au_xino_do_write(au_sbi(sb)->si_xwrite, br->br_xino.xi_file, -+ h_ino, ino); -+ if (!err) { -+ if (au_opt_test(mnt_flags, TRUNC_XINO) -+ && au_test_fs_trunc_xino(au_br_sb(br))) -+ xino_try_trunc(sb, br); -+ return 0; /* success */ -+ } -+ -+ AuIOErr("write failed (%d)\n", err); -+ return -EIO; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* aufs inode number bitmap */ -+ -+static const int page_bits = (int)PAGE_SIZE * BITS_PER_BYTE; -+static ino_t xib_calc_ino(unsigned long pindex, int bit) -+{ -+ ino_t ino; -+ -+ AuDebugOn(bit < 0 || page_bits <= bit); -+ ino = AUFS_FIRST_INO + pindex * page_bits + bit; -+ return ino; -+} -+ -+static void xib_calc_bit(ino_t ino, unsigned long *pindex, int *bit) -+{ -+ AuDebugOn(ino < AUFS_FIRST_INO); -+ ino -= AUFS_FIRST_INO; -+ *pindex = ino / page_bits; -+ *bit = ino % page_bits; -+} -+ -+static int xib_pindex(struct super_block *sb, unsigned long pindex) -+{ -+ int err; -+ loff_t pos; -+ ssize_t sz; -+ struct au_sbinfo *sbinfo; -+ struct file *xib; -+ unsigned long *p; -+ -+ sbinfo = au_sbi(sb); -+ MtxMustLock(&sbinfo->si_xib_mtx); -+ AuDebugOn(pindex > ULONG_MAX / PAGE_SIZE -+ || !au_opt_test(sbinfo->si_mntflags, XINO)); -+ -+ if (pindex == sbinfo->si_xib_last_pindex) -+ return 0; -+ -+ xib = sbinfo->si_xib; -+ p = sbinfo->si_xib_buf; -+ pos = sbinfo->si_xib_last_pindex; -+ pos *= PAGE_SIZE; -+ sz = xino_fwrite(sbinfo->si_xwrite, xib, p, PAGE_SIZE, &pos); -+ if (unlikely(sz != PAGE_SIZE)) -+ goto out; -+ -+ pos = pindex; -+ pos *= PAGE_SIZE; -+ if (vfsub_f_size_read(xib) >= pos + PAGE_SIZE) -+ sz = xino_fread(sbinfo->si_xread, xib, p, PAGE_SIZE, &pos); -+ else { -+ memset(p, 0, PAGE_SIZE); -+ sz = xino_fwrite(sbinfo->si_xwrite, xib, p, PAGE_SIZE, &pos); -+ } -+ if (sz == PAGE_SIZE) { -+ sbinfo->si_xib_last_pindex = pindex; -+ return 0; /* success */ -+ } -+ -+out: -+ AuIOErr1("write failed (%zd)\n", sz); -+ err = sz; -+ if (sz >= 0) -+ err = -EIO; -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+static void au_xib_clear_bit(struct inode *inode) -+{ -+ int err, bit; -+ unsigned long pindex; -+ struct super_block *sb; -+ struct au_sbinfo *sbinfo; -+ -+ AuDebugOn(inode->i_nlink); -+ -+ sb = inode->i_sb; -+ xib_calc_bit(inode->i_ino, &pindex, &bit); -+ AuDebugOn(page_bits <= bit); -+ sbinfo = au_sbi(sb); -+ mutex_lock(&sbinfo->si_xib_mtx); -+ err = xib_pindex(sb, pindex); -+ if (!err) { -+ clear_bit(bit, sbinfo->si_xib_buf); -+ sbinfo->si_xib_next_bit = bit; -+ } -+ mutex_unlock(&sbinfo->si_xib_mtx); -+} -+ -+/* for s_op->delete_inode() */ -+void au_xino_delete_inode(struct inode *inode, const int unlinked) -+{ -+ int err; -+ unsigned int mnt_flags; -+ aufs_bindex_t bindex, bend, bi; -+ unsigned char try_trunc; -+ struct au_iinfo *iinfo; -+ struct super_block *sb; -+ struct au_hinode *hi; -+ struct inode *h_inode; -+ struct au_branch *br; -+ au_writef_t xwrite; -+ -+ sb = inode->i_sb; -+ mnt_flags = au_mntflags(sb); -+ if (!au_opt_test(mnt_flags, XINO) -+ || inode->i_ino == AUFS_ROOT_INO) -+ return; -+ -+ if (unlinked) { -+ au_xigen_inc(inode); -+ au_xib_clear_bit(inode); -+ } -+ -+ iinfo = au_ii(inode); -+ if (!iinfo) -+ return; -+ -+ bindex = iinfo->ii_bstart; -+ if (bindex < 0) -+ return; -+ -+ xwrite = au_sbi(sb)->si_xwrite; -+ try_trunc = !!au_opt_test(mnt_flags, TRUNC_XINO); -+ hi = iinfo->ii_hinode + bindex; -+ bend = iinfo->ii_bend; -+ for (; bindex <= bend; bindex++, hi++) { -+ h_inode = hi->hi_inode; -+ if (!h_inode -+ || (!unlinked && h_inode->i_nlink)) -+ continue; -+ -+ /* inode may not be revalidated */ -+ bi = au_br_index(sb, hi->hi_id); -+ if (bi < 0) -+ continue; -+ -+ br = au_sbr(sb, bi); -+ err = au_xino_do_write(xwrite, br->br_xino.xi_file, -+ h_inode->i_ino, /*ino*/0); -+ if (!err && try_trunc -+ && au_test_fs_trunc_xino(au_br_sb(br))) -+ xino_try_trunc(sb, br); -+ } -+} -+ -+/* get an unused inode number from bitmap */ -+ino_t au_xino_new_ino(struct super_block *sb) -+{ -+ ino_t ino; -+ unsigned long *p, pindex, ul, pend; -+ struct au_sbinfo *sbinfo; -+ struct file *file; -+ int free_bit, err; -+ -+ if (!au_opt_test(au_mntflags(sb), XINO)) -+ return iunique(sb, AUFS_FIRST_INO); -+ -+ sbinfo = au_sbi(sb); -+ mutex_lock(&sbinfo->si_xib_mtx); -+ p = sbinfo->si_xib_buf; -+ free_bit = sbinfo->si_xib_next_bit; -+ if (free_bit < page_bits && !test_bit(free_bit, p)) -+ goto out; /* success */ -+ free_bit = find_first_zero_bit(p, page_bits); -+ if (free_bit < page_bits) -+ goto out; /* success */ -+ -+ pindex = sbinfo->si_xib_last_pindex; -+ for (ul = pindex - 1; ul < ULONG_MAX; ul--) { -+ err = xib_pindex(sb, ul); -+ if (unlikely(err)) -+ goto out_err; -+ free_bit = find_first_zero_bit(p, page_bits); -+ if (free_bit < page_bits) -+ goto out; /* success */ -+ } -+ -+ file = sbinfo->si_xib; -+ pend = vfsub_f_size_read(file) / PAGE_SIZE; -+ for (ul = pindex + 1; ul <= pend; ul++) { -+ err = xib_pindex(sb, ul); -+ if (unlikely(err)) -+ goto out_err; -+ free_bit = find_first_zero_bit(p, page_bits); -+ if (free_bit < page_bits) -+ goto out; /* success */ -+ } -+ BUG(); -+ -+out: -+ set_bit(free_bit, p); -+ sbinfo->si_xib_next_bit = free_bit + 1; -+ pindex = sbinfo->si_xib_last_pindex; -+ mutex_unlock(&sbinfo->si_xib_mtx); -+ ino = xib_calc_ino(pindex, free_bit); -+ AuDbg("i%lu\n", (unsigned long)ino); -+ return ino; -+out_err: -+ mutex_unlock(&sbinfo->si_xib_mtx); -+ AuDbg("i0\n"); -+ return 0; -+} -+ -+/* -+ * read @ino from xinofile for the specified branch{@sb, @bindex} -+ * at the position of @h_ino. -+ * if @ino does not exist and @do_new is true, get new one. -+ */ -+int au_xino_read(struct super_block *sb, aufs_bindex_t bindex, ino_t h_ino, -+ ino_t *ino) -+{ -+ int err; -+ ssize_t sz; -+ loff_t pos; -+ struct file *file; -+ struct au_sbinfo *sbinfo; -+ -+ *ino = 0; -+ if (!au_opt_test(au_mntflags(sb), XINO)) -+ return 0; /* no xino */ -+ -+ err = 0; -+ sbinfo = au_sbi(sb); -+ pos = h_ino; -+ if (unlikely(au_loff_max / sizeof(*ino) - 1 < pos)) { -+ AuIOErr1("too large hi%lu\n", (unsigned long)h_ino); -+ return -EFBIG; -+ } -+ pos *= sizeof(*ino); -+ -+ file = au_sbr(sb, bindex)->br_xino.xi_file; -+ if (vfsub_f_size_read(file) < pos + sizeof(*ino)) -+ return 0; /* no ino */ -+ -+ sz = xino_fread(sbinfo->si_xread, file, ino, sizeof(*ino), &pos); -+ if (sz == sizeof(*ino)) -+ return 0; /* success */ -+ -+ err = sz; -+ if (unlikely(sz >= 0)) { -+ err = -EIO; -+ AuIOErr("xino read error (%zd)\n", sz); -+ } -+ -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* create and set a new xino file */ -+ -+struct file *au_xino_create(struct super_block *sb, char *fname, int silent) -+{ -+ struct file *file; -+ struct dentry *h_parent, *d; -+ struct inode *h_dir, *inode; -+ int err; -+ -+ /* -+ * at mount-time, and the xino file is the default path, -+ * hnotify is disabled so we have no notify events to ignore. -+ * when a user specified the xino, we cannot get au_hdir to be ignored. -+ */ -+ file = vfsub_filp_open(fname, O_RDWR | O_CREAT | O_EXCL | O_LARGEFILE -+ /* | __FMODE_NONOTIFY */, -+ S_IRUGO | S_IWUGO); -+ if (IS_ERR(file)) { -+ if (!silent) -+ pr_err("open %s(%ld)\n", fname, PTR_ERR(file)); -+ return file; -+ } -+ -+ /* keep file count */ -+ err = 0; -+ inode = file_inode(file); -+ h_parent = dget_parent(file->f_dentry); -+ h_dir = h_parent->d_inode; -+ mutex_lock_nested(&h_dir->i_mutex, AuLsc_I_PARENT); -+ /* mnt_want_write() is unnecessary here */ -+ /* no delegation since it is just created */ -+ if (inode->i_nlink) -+ err = vfsub_unlink(h_dir, &file->f_path, /*delegated*/NULL, -+ /*force*/0); -+ mutex_unlock(&h_dir->i_mutex); -+ dput(h_parent); -+ if (unlikely(err)) { -+ if (!silent) -+ pr_err("unlink %s(%d)\n", fname, err); -+ goto out; -+ } -+ -+ err = -EINVAL; -+ d = file->f_dentry; -+ if (unlikely(sb == d->d_sb)) { -+ if (!silent) -+ pr_err("%s must be outside\n", fname); -+ goto out; -+ } -+ if (unlikely(au_test_fs_bad_xino(d->d_sb))) { -+ if (!silent) -+ pr_err("xino doesn't support %s(%s)\n", -+ fname, au_sbtype(d->d_sb)); -+ goto out; -+ } -+ return file; /* success */ -+ -+out: -+ fput(file); -+ file = ERR_PTR(err); -+ return file; -+} -+ -+/* -+ * find another branch who is on the same filesystem of the specified -+ * branch{@btgt}. search until @bend. -+ */ -+static int is_sb_shared(struct super_block *sb, aufs_bindex_t btgt, -+ aufs_bindex_t bend) -+{ -+ aufs_bindex_t bindex; -+ struct super_block *tgt_sb = au_sbr_sb(sb, btgt); -+ -+ for (bindex = 0; bindex < btgt; bindex++) -+ if (unlikely(tgt_sb == au_sbr_sb(sb, bindex))) -+ return bindex; -+ for (bindex++; bindex <= bend; bindex++) -+ if (unlikely(tgt_sb == au_sbr_sb(sb, bindex))) -+ return bindex; -+ return -1; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * initialize the xinofile for the specified branch @br -+ * at the place/path where @base_file indicates. -+ * test whether another branch is on the same filesystem or not, -+ * if @do_test is true. -+ */ -+int au_xino_br(struct super_block *sb, struct au_branch *br, ino_t h_ino, -+ struct file *base_file, int do_test) -+{ -+ int err; -+ ino_t ino; -+ aufs_bindex_t bend, bindex; -+ struct au_branch *shared_br, *b; -+ struct file *file; -+ struct super_block *tgt_sb; -+ -+ shared_br = NULL; -+ bend = au_sbend(sb); -+ if (do_test) { -+ tgt_sb = au_br_sb(br); -+ for (bindex = 0; bindex <= bend; bindex++) { -+ b = au_sbr(sb, bindex); -+ if (tgt_sb == au_br_sb(b)) { -+ shared_br = b; -+ break; -+ } -+ } -+ } -+ -+ if (!shared_br || !shared_br->br_xino.xi_file) { -+ struct au_xino_lock_dir ldir; -+ -+ au_xino_lock_dir(sb, base_file, &ldir); -+ /* mnt_want_write() is unnecessary here */ -+ file = au_xino_create2(base_file, NULL); -+ au_xino_unlock_dir(&ldir); -+ err = PTR_ERR(file); -+ if (IS_ERR(file)) -+ goto out; -+ br->br_xino.xi_file = file; -+ } else { -+ br->br_xino.xi_file = shared_br->br_xino.xi_file; -+ get_file(br->br_xino.xi_file); -+ } -+ -+ ino = AUFS_ROOT_INO; -+ err = au_xino_do_write(au_sbi(sb)->si_xwrite, br->br_xino.xi_file, -+ h_ino, ino); -+ if (unlikely(err)) { -+ fput(br->br_xino.xi_file); -+ br->br_xino.xi_file = NULL; -+ } -+ -+out: -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* trucate a xino bitmap file */ -+ -+/* todo: slow */ -+static int do_xib_restore(struct super_block *sb, struct file *file, void *page) -+{ -+ int err, bit; -+ ssize_t sz; -+ unsigned long pindex; -+ loff_t pos, pend; -+ struct au_sbinfo *sbinfo; -+ au_readf_t func; -+ ino_t *ino; -+ unsigned long *p; -+ -+ err = 0; -+ sbinfo = au_sbi(sb); -+ MtxMustLock(&sbinfo->si_xib_mtx); -+ p = sbinfo->si_xib_buf; -+ func = sbinfo->si_xread; -+ pend = vfsub_f_size_read(file); -+ pos = 0; -+ while (pos < pend) { -+ sz = xino_fread(func, file, page, PAGE_SIZE, &pos); -+ err = sz; -+ if (unlikely(sz <= 0)) -+ goto out; -+ -+ err = 0; -+ for (ino = page; sz > 0; ino++, sz -= sizeof(ino)) { -+ if (unlikely(*ino < AUFS_FIRST_INO)) -+ continue; -+ -+ xib_calc_bit(*ino, &pindex, &bit); -+ AuDebugOn(page_bits <= bit); -+ err = xib_pindex(sb, pindex); -+ if (!err) -+ set_bit(bit, p); -+ else -+ goto out; -+ } -+ } -+ -+out: -+ return err; -+} -+ -+static int xib_restore(struct super_block *sb) -+{ -+ int err; -+ aufs_bindex_t bindex, bend; -+ void *page; -+ -+ err = -ENOMEM; -+ page = (void *)__get_free_page(GFP_NOFS); -+ if (unlikely(!page)) -+ goto out; -+ -+ err = 0; -+ bend = au_sbend(sb); -+ for (bindex = 0; !err && bindex <= bend; bindex++) -+ if (!bindex || is_sb_shared(sb, bindex, bindex - 1) < 0) -+ err = do_xib_restore -+ (sb, au_sbr(sb, bindex)->br_xino.xi_file, page); -+ else -+ AuDbg("b%d\n", bindex); -+ free_page((unsigned long)page); -+ -+out: -+ return err; -+} -+ -+int au_xib_trunc(struct super_block *sb) -+{ -+ int err; -+ ssize_t sz; -+ loff_t pos; -+ struct au_xino_lock_dir ldir; -+ struct au_sbinfo *sbinfo; -+ unsigned long *p; -+ struct file *file; -+ -+ SiMustWriteLock(sb); -+ -+ err = 0; -+ sbinfo = au_sbi(sb); -+ if (!au_opt_test(sbinfo->si_mntflags, XINO)) -+ goto out; -+ -+ file = sbinfo->si_xib; -+ if (vfsub_f_size_read(file) <= PAGE_SIZE) -+ goto out; -+ -+ au_xino_lock_dir(sb, file, &ldir); -+ /* mnt_want_write() is unnecessary here */ -+ file = au_xino_create2(sbinfo->si_xib, NULL); -+ au_xino_unlock_dir(&ldir); -+ err = PTR_ERR(file); -+ if (IS_ERR(file)) -+ goto out; -+ fput(sbinfo->si_xib); -+ sbinfo->si_xib = file; -+ -+ p = sbinfo->si_xib_buf; -+ memset(p, 0, PAGE_SIZE); -+ pos = 0; -+ sz = xino_fwrite(sbinfo->si_xwrite, sbinfo->si_xib, p, PAGE_SIZE, &pos); -+ if (unlikely(sz != PAGE_SIZE)) { -+ err = sz; -+ AuIOErr("err %d\n", err); -+ if (sz >= 0) -+ err = -EIO; -+ goto out; -+ } -+ -+ mutex_lock(&sbinfo->si_xib_mtx); -+ /* mnt_want_write() is unnecessary here */ -+ err = xib_restore(sb); -+ mutex_unlock(&sbinfo->si_xib_mtx); -+ -+out: -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * xino mount option handlers -+ */ -+static au_readf_t find_readf(struct file *h_file) -+{ -+ const struct file_operations *fop = h_file->f_op; -+ -+ if (fop->read) -+ return fop->read; -+ if (fop->aio_read) -+ return do_sync_read; -+ if (fop->read_iter) -+ return new_sync_read; -+ return ERR_PTR(-ENOSYS); -+} -+ -+static au_writef_t find_writef(struct file *h_file) -+{ -+ const struct file_operations *fop = h_file->f_op; -+ -+ if (fop->write) -+ return fop->write; -+ if (fop->aio_write) -+ return do_sync_write; -+ if (fop->write_iter) -+ return new_sync_write; -+ return ERR_PTR(-ENOSYS); -+} -+ -+/* xino bitmap */ -+static void xino_clear_xib(struct super_block *sb) -+{ -+ struct au_sbinfo *sbinfo; -+ -+ SiMustWriteLock(sb); -+ -+ sbinfo = au_sbi(sb); -+ sbinfo->si_xread = NULL; -+ sbinfo->si_xwrite = NULL; -+ if (sbinfo->si_xib) -+ fput(sbinfo->si_xib); -+ sbinfo->si_xib = NULL; -+ free_page((unsigned long)sbinfo->si_xib_buf); -+ sbinfo->si_xib_buf = NULL; -+} -+ -+static int au_xino_set_xib(struct super_block *sb, struct file *base) -+{ -+ int err; -+ loff_t pos; -+ struct au_sbinfo *sbinfo; -+ struct file *file; -+ -+ SiMustWriteLock(sb); -+ -+ sbinfo = au_sbi(sb); -+ file = au_xino_create2(base, sbinfo->si_xib); -+ err = PTR_ERR(file); -+ if (IS_ERR(file)) -+ goto out; -+ if (sbinfo->si_xib) -+ fput(sbinfo->si_xib); -+ sbinfo->si_xib = file; -+ sbinfo->si_xread = find_readf(file); -+ sbinfo->si_xwrite = find_writef(file); -+ -+ err = -ENOMEM; -+ if (!sbinfo->si_xib_buf) -+ sbinfo->si_xib_buf = (void *)get_zeroed_page(GFP_NOFS); -+ if (unlikely(!sbinfo->si_xib_buf)) -+ goto out_unset; -+ -+ sbinfo->si_xib_last_pindex = 0; -+ sbinfo->si_xib_next_bit = 0; -+ if (vfsub_f_size_read(file) < PAGE_SIZE) { -+ pos = 0; -+ err = xino_fwrite(sbinfo->si_xwrite, file, sbinfo->si_xib_buf, -+ PAGE_SIZE, &pos); -+ if (unlikely(err != PAGE_SIZE)) -+ goto out_free; -+ } -+ err = 0; -+ goto out; /* success */ -+ -+out_free: -+ free_page((unsigned long)sbinfo->si_xib_buf); -+ sbinfo->si_xib_buf = NULL; -+ if (err >= 0) -+ err = -EIO; -+out_unset: -+ fput(sbinfo->si_xib); -+ sbinfo->si_xib = NULL; -+ sbinfo->si_xread = NULL; -+ sbinfo->si_xwrite = NULL; -+out: -+ return err; -+} -+ -+/* xino for each branch */ -+static void xino_clear_br(struct super_block *sb) -+{ -+ aufs_bindex_t bindex, bend; -+ struct au_branch *br; -+ -+ bend = au_sbend(sb); -+ for (bindex = 0; bindex <= bend; bindex++) { -+ br = au_sbr(sb, bindex); -+ if (!br || !br->br_xino.xi_file) -+ continue; -+ -+ fput(br->br_xino.xi_file); -+ br->br_xino.xi_file = NULL; -+ } -+} -+ -+static int au_xino_set_br(struct super_block *sb, struct file *base) -+{ -+ int err; -+ ino_t ino; -+ aufs_bindex_t bindex, bend, bshared; -+ struct { -+ struct file *old, *new; -+ } *fpair, *p; -+ struct au_branch *br; -+ struct inode *inode; -+ au_writef_t writef; -+ -+ SiMustWriteLock(sb); -+ -+ err = -ENOMEM; -+ bend = au_sbend(sb); -+ fpair = kcalloc(bend + 1, sizeof(*fpair), GFP_NOFS); -+ if (unlikely(!fpair)) -+ goto out; -+ -+ inode = sb->s_root->d_inode; -+ ino = AUFS_ROOT_INO; -+ writef = au_sbi(sb)->si_xwrite; -+ for (bindex = 0, p = fpair; bindex <= bend; bindex++, p++) { -+ br = au_sbr(sb, bindex); -+ bshared = is_sb_shared(sb, bindex, bindex - 1); -+ if (bshared >= 0) { -+ /* shared xino */ -+ *p = fpair[bshared]; -+ get_file(p->new); -+ } -+ -+ if (!p->new) { -+ /* new xino */ -+ p->old = br->br_xino.xi_file; -+ p->new = au_xino_create2(base, br->br_xino.xi_file); -+ err = PTR_ERR(p->new); -+ if (IS_ERR(p->new)) { -+ p->new = NULL; -+ goto out_pair; -+ } -+ } -+ -+ err = au_xino_do_write(writef, p->new, -+ au_h_iptr(inode, bindex)->i_ino, ino); -+ if (unlikely(err)) -+ goto out_pair; -+ } -+ -+ for (bindex = 0, p = fpair; bindex <= bend; bindex++, p++) { -+ br = au_sbr(sb, bindex); -+ if (br->br_xino.xi_file) -+ fput(br->br_xino.xi_file); -+ get_file(p->new); -+ br->br_xino.xi_file = p->new; -+ } -+ -+out_pair: -+ for (bindex = 0, p = fpair; bindex <= bend; bindex++, p++) -+ if (p->new) -+ fput(p->new); -+ else -+ break; -+ kfree(fpair); -+out: -+ return err; -+} -+ -+void au_xino_clr(struct super_block *sb) -+{ -+ struct au_sbinfo *sbinfo; -+ -+ au_xigen_clr(sb); -+ xino_clear_xib(sb); -+ xino_clear_br(sb); -+ sbinfo = au_sbi(sb); -+ /* lvalue, do not call au_mntflags() */ -+ au_opt_clr(sbinfo->si_mntflags, XINO); -+} -+ -+int au_xino_set(struct super_block *sb, struct au_opt_xino *xino, int remount) -+{ -+ int err, skip; -+ struct dentry *parent, *cur_parent; -+ struct qstr *dname, *cur_name; -+ struct file *cur_xino; -+ struct inode *dir; -+ struct au_sbinfo *sbinfo; -+ -+ SiMustWriteLock(sb); -+ -+ err = 0; -+ sbinfo = au_sbi(sb); -+ parent = dget_parent(xino->file->f_dentry); -+ if (remount) { -+ skip = 0; -+ dname = &xino->file->f_dentry->d_name; -+ cur_xino = sbinfo->si_xib; -+ if (cur_xino) { -+ cur_parent = dget_parent(cur_xino->f_dentry); -+ cur_name = &cur_xino->f_dentry->d_name; -+ skip = (cur_parent == parent -+ && au_qstreq(dname, cur_name)); -+ dput(cur_parent); -+ } -+ if (skip) -+ goto out; -+ } -+ -+ au_opt_set(sbinfo->si_mntflags, XINO); -+ dir = parent->d_inode; -+ mutex_lock_nested(&dir->i_mutex, AuLsc_I_PARENT); -+ /* mnt_want_write() is unnecessary here */ -+ err = au_xino_set_xib(sb, xino->file); -+ if (!err) -+ err = au_xigen_set(sb, xino->file); -+ if (!err) -+ err = au_xino_set_br(sb, xino->file); -+ mutex_unlock(&dir->i_mutex); -+ if (!err) -+ goto out; /* success */ -+ -+ /* reset all */ -+ AuIOErr("failed creating xino(%d).\n", err); -+ au_xigen_clr(sb); -+ xino_clear_xib(sb); -+ -+out: -+ dput(parent); -+ return err; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* -+ * create a xinofile at the default place/path. -+ */ -+struct file *au_xino_def(struct super_block *sb) -+{ -+ struct file *file; -+ char *page, *p; -+ struct au_branch *br; -+ struct super_block *h_sb; -+ struct path path; -+ aufs_bindex_t bend, bindex, bwr; -+ -+ br = NULL; -+ bend = au_sbend(sb); -+ bwr = -1; -+ for (bindex = 0; bindex <= bend; bindex++) { -+ br = au_sbr(sb, bindex); -+ if (au_br_writable(br->br_perm) -+ && !au_test_fs_bad_xino(au_br_sb(br))) { -+ bwr = bindex; -+ break; -+ } -+ } -+ -+ if (bwr >= 0) { -+ file = ERR_PTR(-ENOMEM); -+ page = (void *)__get_free_page(GFP_NOFS); -+ if (unlikely(!page)) -+ goto out; -+ path.mnt = au_br_mnt(br); -+ path.dentry = au_h_dptr(sb->s_root, bwr); -+ p = d_path(&path, page, PATH_MAX - sizeof(AUFS_XINO_FNAME)); -+ file = (void *)p; -+ if (!IS_ERR(p)) { -+ strcat(p, "/" AUFS_XINO_FNAME); -+ AuDbg("%s\n", p); -+ file = au_xino_create(sb, p, /*silent*/0); -+ if (!IS_ERR(file)) -+ au_xino_brid_set(sb, br->br_id); -+ } -+ free_page((unsigned long)page); -+ } else { -+ file = au_xino_create(sb, AUFS_XINO_DEFPATH, /*silent*/0); -+ if (IS_ERR(file)) -+ goto out; -+ h_sb = file->f_dentry->d_sb; -+ if (unlikely(au_test_fs_bad_xino(h_sb))) { -+ pr_err("xino doesn't support %s(%s)\n", -+ AUFS_XINO_DEFPATH, au_sbtype(h_sb)); -+ fput(file); -+ file = ERR_PTR(-EINVAL); -+ } -+ if (!IS_ERR(file)) -+ au_xino_brid_set(sb, -1); -+ } -+ -+out: -+ return file; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+int au_xino_path(struct seq_file *seq, struct file *file) -+{ -+ int err; -+ -+ err = au_seq_path(seq, &file->f_path); -+ if (unlikely(err)) -+ goto out; -+ -+#define Deleted "\\040(deleted)" -+ seq->count -= sizeof(Deleted) - 1; -+ AuDebugOn(memcmp(seq->buf + seq->count, Deleted, -+ sizeof(Deleted) - 1)); -+#undef Deleted -+ -+out: -+ return err; -+} -diff --git a/fs/buffer.c b/fs/buffer.c -index 20805db..363569f 100644 ---- a/fs/buffer.c -+++ b/fs/buffer.c -@@ -2450,7 +2450,7 @@ int block_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf, - * Update file times before taking page lock. We may end up failing the - * fault so this update may be superfluous but who really cares... - */ -- file_update_time(vma->vm_file); -+ vma_file_update_time(vma); - - ret = __block_page_mkwrite(vma, vmf, get_block); - sb_end_pagefault(sb); -diff --git a/fs/dcache.c b/fs/dcache.c -index d25f8fd..857990a 100644 ---- a/fs/dcache.c -+++ b/fs/dcache.c -@@ -1022,7 +1022,7 @@ enum d_walk_ret { - * - * The @enter() and @finish() callbacks are called with d_lock held. - */ --static void d_walk(struct dentry *parent, void *data, -+void d_walk(struct dentry *parent, void *data, - enum d_walk_ret (*enter)(void *, struct dentry *), - void (*finish)(void *)) - { -diff --git a/fs/fcntl.c b/fs/fcntl.c -index 99d440a..de1a407 100644 ---- a/fs/fcntl.c -+++ b/fs/fcntl.c -@@ -29,7 +29,7 @@ - - #define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT | O_NOATIME) - --static int setfl(int fd, struct file * filp, unsigned long arg) -+int setfl(int fd, struct file * filp, unsigned long arg) - { - struct inode * inode = file_inode(filp); - int error = 0; -@@ -59,6 +59,8 @@ static int setfl(int fd, struct file * filp, unsigned long arg) - - if (filp->f_op->check_flags) - error = filp->f_op->check_flags(arg); -+ if (!error && filp->f_op->setfl) -+ error = filp->f_op->setfl(filp, arg); - if (error) - return error; - -diff --git a/fs/inode.c b/fs/inode.c -index 56d1d2b..2998e86 100644 ---- a/fs/inode.c -+++ b/fs/inode.c -@@ -1497,7 +1497,7 @@ static int relatime_need_update(struct vfsmount *mnt, struct inode *inode, - * This does the actual work of updating an inodes time or version. Must have - * had called mnt_want_write() before calling this. - */ --static int update_time(struct inode *inode, struct timespec *time, int flags) -+int update_time(struct inode *inode, struct timespec *time, int flags) - { - if (inode->i_op->update_time) - return inode->i_op->update_time(inode, time, flags); -diff --git a/fs/proc/base.c b/fs/proc/base.c -index 7dc3ea8..b368ad5 100644 ---- a/fs/proc/base.c -+++ b/fs/proc/base.c -@@ -1735,7 +1735,7 @@ static int proc_map_files_get_link(struct dentry *dentry, struct path *path) - down_read(&mm->mmap_sem); - vma = find_exact_vma(mm, vm_start, vm_end); - if (vma && vma->vm_file) { -- *path = vma->vm_file->f_path; -+ *path = vma_pr_or_file(vma)->f_path; - path_get(path); - rc = 0; - } -diff --git a/fs/proc/nommu.c b/fs/proc/nommu.c -index d4a3574..1397181 100644 ---- a/fs/proc/nommu.c -+++ b/fs/proc/nommu.c -@@ -45,7 +45,10 @@ static int nommu_region_show(struct seq_file *m, struct vm_region *region) - file = region->vm_file; - - if (file) { -- struct inode *inode = file_inode(region->vm_file); -+ struct inode *inode; -+ -+ file = vmr_pr_or_file(region); -+ inode = file_inode(file); - dev = inode->i_sb->s_dev; - ino = inode->i_ino; - } -diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c -index 69aa378..426b962 100644 ---- a/fs/proc/task_mmu.c -+++ b/fs/proc/task_mmu.c -@@ -276,7 +276,10 @@ show_map_vma(struct seq_file *m, struct vm_area_struct *vma, int is_pid) - const char *name = NULL; - - if (file) { -- struct inode *inode = file_inode(vma->vm_file); -+ struct inode *inode; -+ -+ file = vma_pr_or_file(vma); -+ inode = file_inode(file); - dev = inode->i_sb->s_dev; - ino = inode->i_ino; - pgoff = ((loff_t)vma->vm_pgoff) << PAGE_SHIFT; -@@ -1447,7 +1450,7 @@ static int show_numa_map(struct seq_file *m, void *v, int is_pid) - struct proc_maps_private *proc_priv = &numa_priv->proc_maps; - struct vm_area_struct *vma = v; - struct numa_maps *md = &numa_priv->md; -- struct file *file = vma->vm_file; -+ struct file *file = vma_pr_or_file(vma); - struct mm_struct *mm = vma->vm_mm; - struct mm_walk walk = {}; - struct mempolicy *pol; -diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c -index 599ec2e..1740207 100644 ---- a/fs/proc/task_nommu.c -+++ b/fs/proc/task_nommu.c -@@ -160,7 +160,10 @@ static int nommu_vma_show(struct seq_file *m, struct vm_area_struct *vma, - file = vma->vm_file; - - if (file) { -- struct inode *inode = file_inode(vma->vm_file); -+ struct inode *inode; -+ -+ file = vma_pr_or_file(vma); -+ inode = file_inode(file); - dev = inode->i_sb->s_dev; - ino = inode->i_ino; - pgoff = (loff_t)vma->vm_pgoff << PAGE_SHIFT; -diff --git a/fs/splice.c b/fs/splice.c -index 75c6058..619359a 100644 ---- a/fs/splice.c -+++ b/fs/splice.c -@@ -1114,8 +1114,8 @@ EXPORT_SYMBOL(generic_splice_sendpage); - /* - * Attempt to initiate a splice from pipe to file. - */ --static long do_splice_from(struct pipe_inode_info *pipe, struct file *out, -- loff_t *ppos, size_t len, unsigned int flags) -+long do_splice_from(struct pipe_inode_info *pipe, struct file *out, -+ loff_t *ppos, size_t len, unsigned int flags) - { - ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, - loff_t *, size_t, unsigned int); -@@ -1131,9 +1131,9 @@ static long do_splice_from(struct pipe_inode_info *pipe, struct file *out, - /* - * Attempt to initiate a splice from a file to a pipe. - */ --static long do_splice_to(struct file *in, loff_t *ppos, -- struct pipe_inode_info *pipe, size_t len, -- unsigned int flags) -+long do_splice_to(struct file *in, loff_t *ppos, -+ struct pipe_inode_info *pipe, size_t len, -+ unsigned int flags) - { - ssize_t (*splice_read)(struct file *, loff_t *, - struct pipe_inode_info *, size_t, unsigned int); -diff --git a/include/linux/file.h b/include/linux/file.h -index 4d69123..62cffc0 100644 ---- a/include/linux/file.h -+++ b/include/linux/file.h -@@ -19,6 +19,7 @@ struct dentry; - struct path; - extern struct file *alloc_file(struct path *, fmode_t mode, - const struct file_operations *fop); -+extern struct file *get_empty_filp(void); - - static inline void fput_light(struct file *file, int fput_needed) - { -diff --git a/include/linux/fs.h b/include/linux/fs.h -index 6fd017e..c44d25d 100644 ---- a/include/linux/fs.h -+++ b/include/linux/fs.h -@@ -1149,6 +1149,7 @@ extern void fasync_free(struct fasync_struct *); - /* can be called from interrupts */ - extern void kill_fasync(struct fasync_struct **, int, int); - -+extern int setfl(int fd, struct file * filp, unsigned long arg); - extern void __f_setown(struct file *filp, struct pid *, enum pid_type, int force); - extern void f_setown(struct file *filp, unsigned long arg, int force); - extern void f_delown(struct file *filp); -@@ -1507,6 +1508,7 @@ struct file_operations { - ssize_t (*sendpage) (struct file *, struct page *, int, size_t, loff_t *, int); - unsigned long (*get_unmapped_area)(struct file *, unsigned long, unsigned long, unsigned long, unsigned long); - int (*check_flags)(int); -+ int (*setfl)(struct file *, unsigned long); - int (*flock) (struct file *, int, struct file_lock *); - ssize_t (*splice_write)(struct pipe_inode_info *, struct file *, loff_t *, size_t, unsigned int); - ssize_t (*splice_read)(struct file *, loff_t *, struct pipe_inode_info *, size_t, unsigned int); -@@ -2662,6 +2664,7 @@ extern int inode_change_ok(const struct inode *, struct iattr *); - extern int inode_newsize_ok(const struct inode *, loff_t offset); - extern void setattr_copy(struct inode *inode, const struct iattr *attr); - -+extern int update_time(struct inode *, struct timespec *, int); - extern int file_update_time(struct file *file); - - extern int generic_show_options(struct seq_file *m, struct dentry *root); -diff --git a/include/linux/mm.h b/include/linux/mm.h -index 86a977b..a2d0dbb 100644 ---- a/include/linux/mm.h -+++ b/include/linux/mm.h -@@ -1208,6 +1208,28 @@ static inline int fixup_user_fault(struct task_struct *tsk, - } - #endif - -+extern void vma_do_file_update_time(struct vm_area_struct *, const char[], int); -+extern struct file *vma_do_pr_or_file(struct vm_area_struct *, const char[], -+ int); -+extern void vma_do_get_file(struct vm_area_struct *, const char[], int); -+extern void vma_do_fput(struct vm_area_struct *, const char[], int); -+ -+#define vma_file_update_time(vma) vma_do_file_update_time(vma, __func__, \ -+ __LINE__) -+#define vma_pr_or_file(vma) vma_do_pr_or_file(vma, __func__, \ -+ __LINE__) -+#define vma_get_file(vma) vma_do_get_file(vma, __func__, __LINE__) -+#define vma_fput(vma) vma_do_fput(vma, __func__, __LINE__) -+ -+#ifndef CONFIG_MMU -+extern struct file *vmr_do_pr_or_file(struct vm_region *, const char[], int); -+extern void vmr_do_fput(struct vm_region *, const char[], int); -+ -+#define vmr_pr_or_file(region) vmr_do_pr_or_file(region, __func__, \ -+ __LINE__) -+#define vmr_fput(region) vmr_do_fput(region, __func__, __LINE__) -+#endif /* !CONFIG_MMU */ -+ - extern int access_process_vm(struct task_struct *tsk, unsigned long addr, void *buf, int len, int write); - extern int access_remote_vm(struct mm_struct *mm, unsigned long addr, - void *buf, int len, int write); -diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h -index 6e0b286..8f374ed 100644 ---- a/include/linux/mm_types.h -+++ b/include/linux/mm_types.h -@@ -232,6 +232,7 @@ struct vm_region { - unsigned long vm_top; /* region allocated to here */ - unsigned long vm_pgoff; /* the offset in vm_file corresponding to vm_start */ - struct file *vm_file; /* the backing file or NULL */ -+ struct file *vm_prfile; /* the virtual backing file or NULL */ - - int vm_usage; /* region usage count (access under nommu_region_sem) */ - bool vm_icache_flushed : 1; /* true if the icache has been flushed for -@@ -300,6 +301,7 @@ struct vm_area_struct { - unsigned long vm_pgoff; /* Offset (within vm_file) in PAGE_SIZE - units, *not* PAGE_CACHE_SIZE */ - struct file * vm_file; /* File we map to (can be NULL). */ -+ struct file *vm_prfile; /* shadow of vm_file */ - void * vm_private_data; /* was vm_pte (shared mem) */ - - #ifndef CONFIG_MMU -diff --git a/include/linux/splice.h b/include/linux/splice.h -index da2751d..2e0fca6 100644 ---- a/include/linux/splice.h -+++ b/include/linux/splice.h -@@ -83,4 +83,10 @@ extern void splice_shrink_spd(struct splice_pipe_desc *); - extern void spd_release_page(struct splice_pipe_desc *, unsigned int); - - extern const struct pipe_buf_operations page_cache_pipe_buf_ops; -+ -+extern long do_splice_from(struct pipe_inode_info *pipe, struct file *out, -+ loff_t *ppos, size_t len, unsigned int flags); -+extern long do_splice_to(struct file *in, loff_t *ppos, -+ struct pipe_inode_info *pipe, size_t len, -+ unsigned int flags); - #endif -diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild -index 8523f9b..11f8f74 100644 ---- a/include/uapi/linux/Kbuild -+++ b/include/uapi/linux/Kbuild -@@ -56,6 +56,7 @@ header-y += atmppp.h - header-y += atmsap.h - header-y += atmsvc.h - header-y += audit.h -+header-y += aufs_type.h - header-y += auto_fs.h - header-y += auto_fs4.h - header-y += auxvec.h -diff --git a/include/uapi/linux/aufs_type.h b/include/uapi/linux/aufs_type.h -new file mode 100644 -index 0000000..75915f8 ---- /dev/null -+++ b/include/uapi/linux/aufs_type.h -@@ -0,0 +1,419 @@ -+/* -+ * Copyright (C) 2005-2016 Junjiro R. Okajima -+ * -+ * This program, aufs is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ * -+ * This program is distributed in the hope that it will be useful, -+ * but WITHOUT ANY WARRANTY; without even the implied warranty of -+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -+ * GNU General Public License for more details. -+ * -+ * You should have received a copy of the GNU General Public License -+ * along with this program. If not, see . -+ */ -+ -+#ifndef __AUFS_TYPE_H__ -+#define __AUFS_TYPE_H__ -+ -+#define AUFS_NAME "aufs" -+ -+#ifdef __KERNEL__ -+/* -+ * define it before including all other headers. -+ * sched.h may use pr_* macros before defining "current", so define the -+ * no-current version first, and re-define later. -+ */ -+#define pr_fmt(fmt) AUFS_NAME " %s:%d: " fmt, __func__, __LINE__ -+#include -+#undef pr_fmt -+#define pr_fmt(fmt) \ -+ AUFS_NAME " %s:%d:%.*s[%d]: " fmt, __func__, __LINE__, \ -+ (int)sizeof(current->comm), current->comm, current->pid -+#else -+#include -+#include -+#endif /* __KERNEL__ */ -+ -+#include -+ -+#define AUFS_VERSION "3.18.25+-20160509" -+ -+/* todo? move this to linux-2.6.19/include/magic.h */ -+#define AUFS_SUPER_MAGIC ('a' << 24 | 'u' << 16 | 'f' << 8 | 's') -+ -+/* ---------------------------------------------------------------------- */ -+ -+#ifdef CONFIG_AUFS_BRANCH_MAX_127 -+typedef int8_t aufs_bindex_t; -+#define AUFS_BRANCH_MAX 127 -+#else -+typedef int16_t aufs_bindex_t; -+#ifdef CONFIG_AUFS_BRANCH_MAX_511 -+#define AUFS_BRANCH_MAX 511 -+#elif defined(CONFIG_AUFS_BRANCH_MAX_1023) -+#define AUFS_BRANCH_MAX 1023 -+#elif defined(CONFIG_AUFS_BRANCH_MAX_32767) -+#define AUFS_BRANCH_MAX 32767 -+#endif -+#endif -+ -+#ifdef __KERNEL__ -+#ifndef AUFS_BRANCH_MAX -+#error unknown CONFIG_AUFS_BRANCH_MAX value -+#endif -+#endif /* __KERNEL__ */ -+ -+/* ---------------------------------------------------------------------- */ -+ -+#define AUFS_FSTYPE AUFS_NAME -+ -+#define AUFS_ROOT_INO 2 -+#define AUFS_FIRST_INO 11 -+ -+#define AUFS_WH_PFX ".wh." -+#define AUFS_WH_PFX_LEN ((int)sizeof(AUFS_WH_PFX) - 1) -+#define AUFS_WH_TMP_LEN 4 -+/* a limit for rmdir/rename a dir and copyup */ -+#define AUFS_MAX_NAMELEN (NAME_MAX \ -+ - AUFS_WH_PFX_LEN * 2 /* doubly whiteouted */\ -+ - 1 /* dot */\ -+ - AUFS_WH_TMP_LEN) /* hex */ -+#define AUFS_XINO_FNAME "." AUFS_NAME ".xino" -+#define AUFS_XINO_DEFPATH "/tmp/" AUFS_XINO_FNAME -+#define AUFS_XINO_DEF_SEC 30 /* seconds */ -+#define AUFS_XINO_DEF_TRUNC 45 /* percentage */ -+#define AUFS_DIRWH_DEF 3 -+#define AUFS_RDCACHE_DEF 10 /* seconds */ -+#define AUFS_RDCACHE_MAX 3600 /* seconds */ -+#define AUFS_RDBLK_DEF 512 /* bytes */ -+#define AUFS_RDHASH_DEF 32 -+#define AUFS_WKQ_NAME AUFS_NAME "d" -+#define AUFS_MFS_DEF_SEC 30 /* seconds */ -+#define AUFS_MFS_MAX_SEC 3600 /* seconds */ -+#define AUFS_FHSM_CACHE_DEF_SEC 30 /* seconds */ -+#define AUFS_PLINK_WARN 50 /* number of plinks in a single bucket */ -+ -+/* pseudo-link maintenace under /proc */ -+#define AUFS_PLINK_MAINT_NAME "plink_maint" -+#define AUFS_PLINK_MAINT_DIR "fs/" AUFS_NAME -+#define AUFS_PLINK_MAINT_PATH AUFS_PLINK_MAINT_DIR "/" AUFS_PLINK_MAINT_NAME -+ -+#define AUFS_DIROPQ_NAME AUFS_WH_PFX ".opq" /* whiteouted doubly */ -+#define AUFS_WH_DIROPQ AUFS_WH_PFX AUFS_DIROPQ_NAME -+ -+#define AUFS_BASE_NAME AUFS_WH_PFX AUFS_NAME -+#define AUFS_PLINKDIR_NAME AUFS_WH_PFX "plnk" -+#define AUFS_ORPHDIR_NAME AUFS_WH_PFX "orph" -+ -+/* doubly whiteouted */ -+#define AUFS_WH_BASE AUFS_WH_PFX AUFS_BASE_NAME -+#define AUFS_WH_PLINKDIR AUFS_WH_PFX AUFS_PLINKDIR_NAME -+#define AUFS_WH_ORPHDIR AUFS_WH_PFX AUFS_ORPHDIR_NAME -+ -+/* branch permissions and attributes */ -+#define AUFS_BRPERM_RW "rw" -+#define AUFS_BRPERM_RO "ro" -+#define AUFS_BRPERM_RR "rr" -+#define AUFS_BRATTR_COO_REG "coo_reg" -+#define AUFS_BRATTR_COO_ALL "coo_all" -+#define AUFS_BRATTR_FHSM "fhsm" -+#define AUFS_BRATTR_UNPIN "unpin" -+#define AUFS_BRATTR_ICEX "icex" -+#define AUFS_BRATTR_ICEX_SEC "icexsec" -+#define AUFS_BRATTR_ICEX_SYS "icexsys" -+#define AUFS_BRATTR_ICEX_TR "icextr" -+#define AUFS_BRATTR_ICEX_USR "icexusr" -+#define AUFS_BRATTR_ICEX_OTH "icexoth" -+#define AUFS_BRRATTR_WH "wh" -+#define AUFS_BRWATTR_NLWH "nolwh" -+#define AUFS_BRWATTR_MOO "moo" -+ -+#define AuBrPerm_RW 1 /* writable, hardlinkable wh */ -+#define AuBrPerm_RO (1 << 1) /* readonly */ -+#define AuBrPerm_RR (1 << 2) /* natively readonly */ -+#define AuBrPerm_Mask (AuBrPerm_RW | AuBrPerm_RO | AuBrPerm_RR) -+ -+#define AuBrAttr_COO_REG (1 << 3) /* copy-up on open */ -+#define AuBrAttr_COO_ALL (1 << 4) -+#define AuBrAttr_COO_Mask (AuBrAttr_COO_REG | AuBrAttr_COO_ALL) -+ -+#define AuBrAttr_FHSM (1 << 5) /* file-based hsm */ -+#define AuBrAttr_UNPIN (1 << 6) /* rename-able top dir of -+ branch. meaningless since -+ linux-3.18-rc1 */ -+ -+/* ignore error in copying XATTR */ -+#define AuBrAttr_ICEX_SEC (1 << 7) -+#define AuBrAttr_ICEX_SYS (1 << 8) -+#define AuBrAttr_ICEX_TR (1 << 9) -+#define AuBrAttr_ICEX_USR (1 << 10) -+#define AuBrAttr_ICEX_OTH (1 << 11) -+#define AuBrAttr_ICEX (AuBrAttr_ICEX_SEC \ -+ | AuBrAttr_ICEX_SYS \ -+ | AuBrAttr_ICEX_TR \ -+ | AuBrAttr_ICEX_USR \ -+ | AuBrAttr_ICEX_OTH) -+ -+#define AuBrRAttr_WH (1 << 12) /* whiteout-able */ -+#define AuBrRAttr_Mask AuBrRAttr_WH -+ -+#define AuBrWAttr_NoLinkWH (1 << 13) /* un-hardlinkable whiteouts */ -+#define AuBrWAttr_MOO (1 << 14) /* move-up on open */ -+#define AuBrWAttr_Mask (AuBrWAttr_NoLinkWH | AuBrWAttr_MOO) -+ -+#define AuBrAttr_CMOO_Mask (AuBrAttr_COO_Mask | AuBrWAttr_MOO) -+ -+/* #warning test userspace */ -+#ifdef __KERNEL__ -+#ifndef CONFIG_AUFS_FHSM -+#undef AuBrAttr_FHSM -+#define AuBrAttr_FHSM 0 -+#endif -+#ifndef CONFIG_AUFS_XATTR -+#undef AuBrAttr_ICEX -+#define AuBrAttr_ICEX 0 -+#undef AuBrAttr_ICEX_SEC -+#define AuBrAttr_ICEX_SEC 0 -+#undef AuBrAttr_ICEX_SYS -+#define AuBrAttr_ICEX_SYS 0 -+#undef AuBrAttr_ICEX_TR -+#define AuBrAttr_ICEX_TR 0 -+#undef AuBrAttr_ICEX_USR -+#define AuBrAttr_ICEX_USR 0 -+#undef AuBrAttr_ICEX_OTH -+#define AuBrAttr_ICEX_OTH 0 -+#endif -+#endif -+ -+/* the longest combination */ -+/* AUFS_BRATTR_ICEX and AUFS_BRATTR_ICEX_TR don't affect here */ -+#define AuBrPermStrSz sizeof(AUFS_BRPERM_RW \ -+ "+" AUFS_BRATTR_COO_REG \ -+ "+" AUFS_BRATTR_FHSM \ -+ "+" AUFS_BRATTR_UNPIN \ -+ "+" AUFS_BRATTR_ICEX_SEC \ -+ "+" AUFS_BRATTR_ICEX_SYS \ -+ "+" AUFS_BRATTR_ICEX_USR \ -+ "+" AUFS_BRATTR_ICEX_OTH \ -+ "+" AUFS_BRWATTR_NLWH) -+ -+typedef struct { -+ char a[AuBrPermStrSz]; -+} au_br_perm_str_t; -+ -+static inline int au_br_writable(int brperm) -+{ -+ return brperm & AuBrPerm_RW; -+} -+ -+static inline int au_br_whable(int brperm) -+{ -+ return brperm & (AuBrPerm_RW | AuBrRAttr_WH); -+} -+ -+static inline int au_br_wh_linkable(int brperm) -+{ -+ return !(brperm & AuBrWAttr_NoLinkWH); -+} -+ -+static inline int au_br_cmoo(int brperm) -+{ -+ return brperm & AuBrAttr_CMOO_Mask; -+} -+ -+static inline int au_br_fhsm(int brperm) -+{ -+ return brperm & AuBrAttr_FHSM; -+} -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* ioctl */ -+enum { -+ /* readdir in userspace */ -+ AuCtl_RDU, -+ AuCtl_RDU_INO, -+ -+ AuCtl_WBR_FD, /* pathconf wrapper */ -+ AuCtl_IBUSY, /* busy inode */ -+ AuCtl_MVDOWN, /* move-down */ -+ AuCtl_BR, /* info about branches */ -+ AuCtl_FHSM_FD /* connection for fhsm */ -+}; -+ -+/* borrowed from linux/include/linux/kernel.h */ -+#ifndef ALIGN -+#define ALIGN(x, a) __ALIGN_MASK(x, (typeof(x))(a)-1) -+#define __ALIGN_MASK(x, mask) (((x)+(mask))&~(mask)) -+#endif -+ -+/* borrowed from linux/include/linux/compiler-gcc3.h */ -+#ifndef __aligned -+#define __aligned(x) __attribute__((aligned(x))) -+#endif -+ -+#ifdef __KERNEL__ -+#ifndef __packed -+#define __packed __attribute__((packed)) -+#endif -+#endif -+ -+struct au_rdu_cookie { -+ uint64_t h_pos; -+ int16_t bindex; -+ uint8_t flags; -+ uint8_t pad; -+ uint32_t generation; -+} __aligned(8); -+ -+struct au_rdu_ent { -+ uint64_t ino; -+ int16_t bindex; -+ uint8_t type; -+ uint8_t nlen; -+ uint8_t wh; -+ char name[0]; -+} __aligned(8); -+ -+static inline int au_rdu_len(int nlen) -+{ -+ /* include the terminating NULL */ -+ return ALIGN(sizeof(struct au_rdu_ent) + nlen + 1, -+ sizeof(uint64_t)); -+} -+ -+union au_rdu_ent_ul { -+ struct au_rdu_ent __user *e; -+ uint64_t ul; -+}; -+ -+enum { -+ AufsCtlRduV_SZ, -+ AufsCtlRduV_End -+}; -+ -+struct aufs_rdu { -+ /* input */ -+ union { -+ uint64_t sz; /* AuCtl_RDU */ -+ uint64_t nent; /* AuCtl_RDU_INO */ -+ }; -+ union au_rdu_ent_ul ent; -+ uint16_t verify[AufsCtlRduV_End]; -+ -+ /* input/output */ -+ uint32_t blk; -+ -+ /* output */ -+ union au_rdu_ent_ul tail; -+ /* number of entries which were added in a single call */ -+ uint64_t rent; -+ uint8_t full; -+ uint8_t shwh; -+ -+ struct au_rdu_cookie cookie; -+} __aligned(8); -+ -+/* ---------------------------------------------------------------------- */ -+ -+struct aufs_wbr_fd { -+ uint32_t oflags; -+ int16_t brid; -+} __aligned(8); -+ -+/* ---------------------------------------------------------------------- */ -+ -+struct aufs_ibusy { -+ uint64_t ino, h_ino; -+ int16_t bindex; -+} __aligned(8); -+ -+/* ---------------------------------------------------------------------- */ -+ -+/* error code for move-down */ -+/* the actual message strings are implemented in aufs-util.git */ -+enum { -+ EAU_MVDOWN_OPAQUE = 1, -+ EAU_MVDOWN_WHITEOUT, -+ EAU_MVDOWN_UPPER, -+ EAU_MVDOWN_BOTTOM, -+ EAU_MVDOWN_NOUPPER, -+ EAU_MVDOWN_NOLOWERBR, -+ EAU_Last -+}; -+ -+/* flags for move-down */ -+#define AUFS_MVDOWN_DMSG 1 -+#define AUFS_MVDOWN_OWLOWER (1 << 1) /* overwrite lower */ -+#define AUFS_MVDOWN_KUPPER (1 << 2) /* keep upper */ -+#define AUFS_MVDOWN_ROLOWER (1 << 3) /* do even if lower is RO */ -+#define AUFS_MVDOWN_ROLOWER_R (1 << 4) /* did on lower RO */ -+#define AUFS_MVDOWN_ROUPPER (1 << 5) /* do even if upper is RO */ -+#define AUFS_MVDOWN_ROUPPER_R (1 << 6) /* did on upper RO */ -+#define AUFS_MVDOWN_BRID_UPPER (1 << 7) /* upper brid */ -+#define AUFS_MVDOWN_BRID_LOWER (1 << 8) /* lower brid */ -+#define AUFS_MVDOWN_FHSM_LOWER (1 << 9) /* find fhsm attr for lower */ -+#define AUFS_MVDOWN_STFS (1 << 10) /* req. stfs */ -+#define AUFS_MVDOWN_STFS_FAILED (1 << 11) /* output: stfs is unusable */ -+#define AUFS_MVDOWN_BOTTOM (1 << 12) /* output: no more lowers */ -+ -+/* index for move-down */ -+enum { -+ AUFS_MVDOWN_UPPER, -+ AUFS_MVDOWN_LOWER, -+ AUFS_MVDOWN_NARRAY -+}; -+ -+/* -+ * additional info of move-down -+ * number of free blocks and inodes. -+ * subset of struct kstatfs, but smaller and always 64bit. -+ */ -+struct aufs_stfs { -+ uint64_t f_blocks; -+ uint64_t f_bavail; -+ uint64_t f_files; -+ uint64_t f_ffree; -+}; -+ -+struct aufs_stbr { -+ int16_t brid; /* optional input */ -+ int16_t bindex; /* output */ -+ struct aufs_stfs stfs; /* output when AUFS_MVDOWN_STFS set */ -+} __aligned(8); -+ -+struct aufs_mvdown { -+ uint32_t flags; /* input/output */ -+ struct aufs_stbr stbr[AUFS_MVDOWN_NARRAY]; /* input/output */ -+ int8_t au_errno; /* output */ -+} __aligned(8); -+ -+/* ---------------------------------------------------------------------- */ -+ -+union aufs_brinfo { -+ /* PATH_MAX may differ between kernel-space and user-space */ -+ char _spacer[4096]; -+ struct { -+ int16_t id; -+ int perm; -+ char path[0]; -+ }; -+} __aligned(8); -+ -+/* ---------------------------------------------------------------------- */ -+ -+#define AuCtlType 'A' -+#define AUFS_CTL_RDU _IOWR(AuCtlType, AuCtl_RDU, struct aufs_rdu) -+#define AUFS_CTL_RDU_INO _IOWR(AuCtlType, AuCtl_RDU_INO, struct aufs_rdu) -+#define AUFS_CTL_WBR_FD _IOW(AuCtlType, AuCtl_WBR_FD, \ -+ struct aufs_wbr_fd) -+#define AUFS_CTL_IBUSY _IOWR(AuCtlType, AuCtl_IBUSY, struct aufs_ibusy) -+#define AUFS_CTL_MVDOWN _IOWR(AuCtlType, AuCtl_MVDOWN, \ -+ struct aufs_mvdown) -+#define AUFS_CTL_BRINFO _IOW(AuCtlType, AuCtl_BR, union aufs_brinfo) -+#define AUFS_CTL_FHSM_FD _IOW(AuCtlType, AuCtl_FHSM_FD, int) -+ -+#endif /* __AUFS_TYPE_H__ */ -diff --git a/kernel/fork.c b/kernel/fork.c -index 0a4f601..67ecb91 100644 ---- a/kernel/fork.c -+++ b/kernel/fork.c -@@ -430,7 +430,7 @@ static int dup_mmap(struct mm_struct *mm, struct mm_struct *oldmm) - struct inode *inode = file_inode(file); - struct address_space *mapping = file->f_mapping; - -- get_file(file); -+ vma_get_file(tmp); - if (tmp->vm_flags & VM_DENYWRITE) - atomic_dec(&inode->i_writecount); - mutex_lock(&mapping->i_mmap_mutex); -diff --git a/mm/Makefile b/mm/Makefile -index 8405eb0..e0bda2d 100644 ---- a/mm/Makefile -+++ b/mm/Makefile -@@ -18,7 +18,7 @@ obj-y := filemap.o mempool.o oom_kill.o \ - mm_init.o mmu_context.o percpu.o slab_common.o \ - compaction.o vmacache.o \ - interval_tree.o list_lru.o workingset.o \ -- iov_iter.o debug.o $(mmu-y) -+ iov_iter.o prfile.o debug.o $(mmu-y) - - obj-y += init-mm.o - -diff --git a/mm/filemap.c b/mm/filemap.c -index 7e6ab98..2fe1e57 100644 ---- a/mm/filemap.c -+++ b/mm/filemap.c -@@ -2063,7 +2063,7 @@ int filemap_page_mkwrite(struct vm_area_struct *vma, struct vm_fault *vmf) - int ret = VM_FAULT_LOCKED; - - sb_start_pagefault(inode->i_sb); -- file_update_time(vma->vm_file); -+ vma_file_update_time(vma); - lock_page(page); - if (page->mapping != inode->i_mapping) { - unlock_page(page); -diff --git a/mm/fremap.c b/mm/fremap.c -index 72b8fa3..a00bbf0 100644 ---- a/mm/fremap.c -+++ b/mm/fremap.c -@@ -224,16 +224,28 @@ get_write_lock: - */ - if (mapping_cap_account_dirty(mapping)) { - unsigned long addr; -- struct file *file = get_file(vma->vm_file); -+ struct file *file = vma->vm_file, -+ *prfile = vma->vm_prfile; -+ - /* mmap_region may free vma; grab the info now */ - vm_flags = vma->vm_flags; - -+ vma_get_file(vma); - addr = mmap_region(file, start, size, vm_flags, pgoff); -- fput(file); -+ vma_fput(vma); - if (IS_ERR_VALUE(addr)) { - err = addr; - } else { - BUG_ON(addr != start); -+ if (prfile) { -+ struct vm_area_struct *new_vma; -+ -+ new_vma = find_vma(mm, addr); -+ if (!new_vma->vm_prfile) -+ new_vma->vm_prfile = prfile; -+ if (new_vma != vma) -+ get_file(prfile); -+ } - err = 0; - } - goto out_freed; -diff --git a/mm/memory.c b/mm/memory.c -index 90fb265..844df2e 100644 ---- a/mm/memory.c -+++ b/mm/memory.c -@@ -2156,7 +2156,7 @@ reuse: - - /* file_update_time outside page_lock */ - if (vma->vm_file) -- file_update_time(vma->vm_file); -+ vma_file_update_time(vma); - } - put_page(dirty_page); - if (page_mkwrite) { -diff --git a/mm/mmap.c b/mm/mmap.c -index f88b4f9..9994987 100644 ---- a/mm/mmap.c -+++ b/mm/mmap.c -@@ -277,7 +277,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma) - if (vma->vm_ops && vma->vm_ops->close) - vma->vm_ops->close(vma); - if (vma->vm_file) -- fput(vma->vm_file); -+ vma_fput(vma); - mpol_put(vma_policy(vma)); - kmem_cache_free(vm_area_cachep, vma); - return next; -@@ -895,7 +895,7 @@ again: remove_next = 1 + (end > next->vm_end); - if (remove_next) { - if (file) { - uprobe_munmap(next, next->vm_start, next->vm_end); -- fput(file); -+ vma_fput(vma); - } - if (next->anon_vma) - anon_vma_merge(vma, next); -@@ -1680,8 +1680,8 @@ out: - return addr; - - unmap_and_free_vma: -+ vma_fput(vma); - vma->vm_file = NULL; -- fput(file); - - /* Undo any partial mapping done by a device driver. */ - unmap_region(mm, vma, prev, vma->vm_start, vma->vm_end); -@@ -2480,7 +2480,7 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, - goto out_free_mpol; - - if (new->vm_file) -- get_file(new->vm_file); -+ vma_get_file(new); - - if (new->vm_ops && new->vm_ops->open) - new->vm_ops->open(new); -@@ -2499,7 +2499,7 @@ static int __split_vma(struct mm_struct *mm, struct vm_area_struct *vma, - if (new->vm_ops && new->vm_ops->close) - new->vm_ops->close(new); - if (new->vm_file) -- fput(new->vm_file); -+ vma_fput(new); - unlink_anon_vmas(new); - out_free_mpol: - mpol_put(vma_policy(new)); -@@ -2889,7 +2889,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap, - if (anon_vma_clone(new_vma, vma)) - goto out_free_mempol; - if (new_vma->vm_file) -- get_file(new_vma->vm_file); -+ vma_get_file(new_vma); - if (new_vma->vm_ops && new_vma->vm_ops->open) - new_vma->vm_ops->open(new_vma); - vma_link(mm, new_vma, prev, rb_link, rb_parent); -diff --git a/mm/nommu.c b/mm/nommu.c -index b5ba5bc..a7662fc 100644 ---- a/mm/nommu.c -+++ b/mm/nommu.c -@@ -658,7 +658,7 @@ static void __put_nommu_region(struct vm_region *region) - up_write(&nommu_region_sem); - - if (region->vm_file) -- fput(region->vm_file); -+ vmr_fput(region); - - /* IO memory and memory shared directly out of the pagecache - * from ramfs/tmpfs mustn't be released here */ -@@ -823,7 +823,7 @@ static void delete_vma(struct mm_struct *mm, struct vm_area_struct *vma) - if (vma->vm_ops && vma->vm_ops->close) - vma->vm_ops->close(vma); - if (vma->vm_file) -- fput(vma->vm_file); -+ vma_fput(vma); - put_nommu_region(vma->vm_region); - kmem_cache_free(vm_area_cachep, vma); - } -@@ -1385,7 +1385,7 @@ unsigned long do_mmap_pgoff(struct file *file, - goto error_just_free; - } - } -- fput(region->vm_file); -+ vmr_fput(region); - kmem_cache_free(vm_region_jar, region); - region = pregion; - result = start; -@@ -1461,10 +1461,10 @@ error_just_free: - up_write(&nommu_region_sem); - error: - if (region->vm_file) -- fput(region->vm_file); -+ vmr_fput(region); - kmem_cache_free(vm_region_jar, region); - if (vma->vm_file) -- fput(vma->vm_file); -+ vma_fput(vma); - kmem_cache_free(vm_area_cachep, vma); - kleave(" = %d", ret); - return ret; -diff --git a/mm/prfile.c b/mm/prfile.c -new file mode 100644 -index 0000000..532e518 ---- /dev/null -+++ b/mm/prfile.c -@@ -0,0 +1,86 @@ -+/* -+ * Mainly for aufs which mmap(2) diffrent file and wants to print different path -+ * in /proc/PID/maps. -+ * Call these functions via macros defined in linux/mm.h. -+ * -+ * See Documentation/filesystems/aufs/design/06mmap.txt -+ * -+ * Copyright (c) 2014 Junjro R. Okajima -+ * Copyright (c) 2014 Ian Campbell -+ */ -+ -+#include -+#include -+#include -+ -+/* #define PRFILE_TRACE */ -+static inline void prfile_trace(struct file *f, struct file *pr, -+ const char func[], int line, const char func2[]) -+{ -+#ifdef PRFILE_TRACE -+ if (pr) -+ pr_info("%s:%d: %s, %s\n", func, line, func2, -+ f ? (char *)f->f_dentry->d_name.name : "(null)"); -+#endif -+} -+ -+void vma_do_file_update_time(struct vm_area_struct *vma, const char func[], -+ int line) -+{ -+ struct file *f = vma->vm_file, *pr = vma->vm_prfile; -+ -+ prfile_trace(f, pr, func, line, __func__); -+ file_update_time(f); -+ if (f && pr) -+ file_update_time(pr); -+} -+ -+struct file *vma_do_pr_or_file(struct vm_area_struct *vma, const char func[], -+ int line) -+{ -+ struct file *f = vma->vm_file, *pr = vma->vm_prfile; -+ -+ prfile_trace(f, pr, func, line, __func__); -+ return (f && pr) ? pr : f; -+} -+ -+void vma_do_get_file(struct vm_area_struct *vma, const char func[], int line) -+{ -+ struct file *f = vma->vm_file, *pr = vma->vm_prfile; -+ -+ prfile_trace(f, pr, func, line, __func__); -+ get_file(f); -+ if (f && pr) -+ get_file(pr); -+} -+ -+void vma_do_fput(struct vm_area_struct *vma, const char func[], int line) -+{ -+ struct file *f = vma->vm_file, *pr = vma->vm_prfile; -+ -+ prfile_trace(f, pr, func, line, __func__); -+ fput(f); -+ if (f && pr) -+ fput(pr); -+} -+ -+#ifndef CONFIG_MMU -+struct file *vmr_do_pr_or_file(struct vm_region *region, const char func[], -+ int line) -+{ -+ struct file *f = region->vm_file, *pr = region->vm_prfile; -+ -+ prfile_trace(f, pr, func, line, __func__); -+ return (f && pr) ? pr : f; -+} -+ -+void vmr_do_fput(struct vm_region *region, const char func[], int line) -+{ -+ struct file *f = region->vm_file, *pr = region->vm_prfile; -+ -+ prfile_trace(f, pr, func, line, __func__); -+ fput(f); -+ if (f && pr) -+ fput(pr); -+} -+#endif /* !CONFIG_MMU */ --- -2.1.4 - diff --git a/packages/base/any/kernels/3.18.25/patches/backport-some-kernel-patches-based-on-3.18.25.patch b/packages/base/any/kernels/3.18.25/patches/backport-some-kernel-patches-based-on-3.18.25.patch deleted file mode 100644 index 6a6e36f5..00000000 --- a/packages/base/any/kernels/3.18.25/patches/backport-some-kernel-patches-based-on-3.18.25.patch +++ /dev/null @@ -1,11095 +0,0 @@ -From fdf22b15468bed6aac4e52e83903d8e010fbe60b Mon Sep 17 00:00:00 2001 -From: Shengzhou Liu -Date: Fri, 23 Sep 2016 14:58:06 +0800 -Subject: [PATCH 2/2] Backport some kernel patches based on 3.18.25 - -Fixup dpaa2-eth, phy, pcie, gicv3, sdhc, i2c. -Verified on LS2080A/LS2088A RDB. ---- - Documentation/devicetree/bindings/arm/gic.txt | 8 +- - .../devicetree/bindings/clock/qoriq-clock.txt | 64 +- - Documentation/devicetree/bindings/i2c/i2c-imx.txt | 11 + - .../devicetree/bindings/i2c/i2c-mux-pca954x.txt | 3 + - .../bindings/memory-controllers/fsl/ifc.txt | 3 + - Documentation/devicetree/of_selftest.txt | 20 +- - Documentation/devicetree/todo.txt | 1 - - arch/arm64/Kconfig | 1 + - arch/arm64/include/asm/device.h | 1 + - arch/arm64/include/asm/dma-mapping.h | 16 +- - arch/powerpc/include/asm/mpc85xx.h | 94 -- - arch/powerpc/platforms/85xx/mpc85xx_mds.c | 2 +- - arch/powerpc/platforms/85xx/mpc85xx_rdb.c | 2 +- - arch/powerpc/platforms/85xx/p1022_ds.c | 2 +- - arch/powerpc/platforms/85xx/p1022_rdk.c | 2 +- - arch/powerpc/platforms/85xx/smp.c | 2 +- - arch/powerpc/platforms/85xx/twr_p102x.c | 2 +- - arch/powerpc/platforms/86xx/mpc8610_hpcd.c | 2 +- - arch/x86/pci/xen.c | 4 + - drivers/clk/Kconfig | 10 +- - drivers/clk/Makefile | 2 +- - drivers/clk/clk-qoriq.c | 1256 ++++++++++++++++++++ - drivers/cpufreq/Kconfig.powerpc | 2 +- - drivers/i2c/busses/Kconfig | 4 +- - drivers/i2c/busses/i2c-imx.c | 373 +++++- - drivers/i2c/muxes/i2c-mux-pca9541.c | 4 +- - drivers/i2c/muxes/i2c-mux-pca954x.c | 57 +- - drivers/iommu/fsl_pamu.c | 2 +- - drivers/iommu/io-pgtable-arm.c | 15 +- - drivers/irqchip/Kconfig | 8 + - drivers/irqchip/Makefile | 1 + - drivers/irqchip/irq-gic-common.c | 18 +- - drivers/irqchip/irq-gic-common.h | 2 +- - drivers/irqchip/irq-gic-v2m.c | 333 ++++++ - drivers/irqchip/irq-gic-v3-its.c | 6 +- - drivers/irqchip/irq-gic-v3.c | 66 +- - drivers/irqchip/irq-gic.c | 90 +- - drivers/irqchip/irq-hip04.c | 9 +- - drivers/memory/Kconfig | 2 +- - drivers/memory/fsl_ifc.c | 77 +- - drivers/mfd/vexpress-sysreg.c | 2 +- - drivers/mmc/card/block.c | 4 + - drivers/mmc/host/Kconfig | 10 +- - drivers/mmc/host/sdhci-esdhc.h | 9 +- - drivers/mmc/host/sdhci-of-esdhc.c | 680 +++++++++-- - drivers/mmc/host/sdhci.c | 250 ++-- - drivers/mmc/host/sdhci.h | 42 + - drivers/mtd/nand/Kconfig | 2 +- - drivers/mtd/nand/fsl_ifc_nand.c | 301 ++--- - drivers/net/ethernet/freescale/gianfar.c | 6 +- - drivers/net/phy/Kconfig | 14 +- - drivers/net/phy/Makefile | 4 +- - drivers/net/phy/at803x.c | 4 + - drivers/net/phy/fixed.c | 336 ------ - drivers/net/phy/fixed_phy.c | 370 ++++++ - drivers/net/phy/marvell.c | 11 + - drivers/net/phy/mdio_bus.c | 34 +- - drivers/net/phy/phy.c | 19 +- - drivers/net/phy/phy_device.c | 90 +- - drivers/net/phy/realtek.c | 82 +- - drivers/of/base.c | 53 +- - drivers/of/dynamic.c | 13 - - drivers/of/fdt.c | 30 +- - drivers/of/pdt.c | 27 +- - drivers/of/selftest.c | 71 +- - drivers/pci/Makefile | 1 + - drivers/pci/access.c | 87 ++ - drivers/pci/host/Kconfig | 2 +- - drivers/pci/host/pci-layerscape.c | 86 +- - drivers/pci/host/pcie-designware.c | 14 + - drivers/pci/host/pcie-designware.h | 1 + - drivers/pci/msi.c | 5 + - drivers/pci/pci.c | 1 + - drivers/pci/pcie/portdrv_core.c | 31 +- - drivers/pci/probe.c | 1 + - drivers/pci/remove.c | 2 + - drivers/pci/setup-bus.c | 1 + - drivers/pci/setup-irq.c | 1 + - drivers/soc/Kconfig | 13 + - drivers/soc/Makefile | 1 + - drivers/soc/fsl/Kconfig | 6 + - drivers/soc/fsl/Kconfig.arm | 25 + - drivers/soc/fsl/Makefile | 6 + - drivers/soc/fsl/guts.c | 123 ++ - drivers/soc/fsl/ls1/Kconfig | 11 + - drivers/soc/fsl/ls1/Makefile | 1 + - drivers/soc/fsl/ls1/ftm_alarm.c | 274 +++++ - drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c | 273 +++-- - drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h | 48 +- - drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c | 553 ++++----- - drivers/staging/fsl-dpaa2/mac/mac.c | 4 +- - drivers/staging/fsl-mc/bus/dprc-driver.c | 2 +- - drivers/staging/fsl-mc/include/mc-private.h | 2 +- - drivers/usb/host/xhci.c | 6 +- - include/linux/fsl/guts.h | 99 +- - include/linux/fsl/svr.h | 95 ++ - include/linux/fsl_ifc.h | 116 +- - include/linux/interrupt.h | 14 + - include/linux/iommu.h | 1 + - include/linux/irq.h | 8 + - include/linux/irqchip/arm-gic-v3.h | 12 + - include/linux/irqchip/arm-gic.h | 2 + - include/linux/irqdomain.h | 1 + - include/linux/mmc/sdhci.h | 16 +- - include/linux/of.h | 11 +- - include/linux/of_pdt.h | 3 +- - include/linux/pci.h | 11 + - include/linux/phy.h | 1 + - include/linux/phy_fixed.h | 11 +- - kernel/irq/chip.c | 58 +- - kernel/irq/manage.c | 91 ++ - kernel/irq/msi.c | 13 +- - sound/soc/fsl/mpc8610_hpcd.c | 2 +- - sound/soc/fsl/p1022_ds.c | 2 +- - sound/soc/fsl/p1022_rdk.c | 2 +- - 115 files changed, 5570 insertions(+), 1621 deletions(-) - delete mode 100644 arch/powerpc/include/asm/mpc85xx.h - create mode 100644 drivers/clk/clk-qoriq.c - create mode 100644 drivers/irqchip/irq-gic-v2m.c - delete mode 100644 drivers/net/phy/fixed.c - create mode 100644 drivers/net/phy/fixed_phy.c - create mode 100644 drivers/soc/fsl/Kconfig - create mode 100644 drivers/soc/fsl/Kconfig.arm - create mode 100644 drivers/soc/fsl/Makefile - create mode 100644 drivers/soc/fsl/guts.c - create mode 100644 drivers/soc/fsl/ls1/Kconfig - create mode 100644 drivers/soc/fsl/ls1/Makefile - create mode 100644 drivers/soc/fsl/ls1/ftm_alarm.c - create mode 100644 include/linux/fsl/svr.h - -diff --git a/Documentation/devicetree/bindings/arm/gic.txt b/Documentation/devicetree/bindings/arm/gic.txt -index c7d2fa1..e87d3d7 100644 ---- a/Documentation/devicetree/bindings/arm/gic.txt -+++ b/Documentation/devicetree/bindings/arm/gic.txt -@@ -31,12 +31,16 @@ Main node required properties: - The 3rd cell is the flags, encoded as follows: - bits[3:0] trigger type and level flags. - 1 = low-to-high edge triggered -- 2 = high-to-low edge triggered -+ 2 = high-to-low edge triggered (invalid for SPIs) - 4 = active high level-sensitive -- 8 = active low level-sensitive -+ 8 = active low level-sensitive (invalid for SPIs). - bits[15:8] PPI interrupt cpu mask. Each bit corresponds to each of - the 8 possible cpus attached to the GIC. A bit set to '1' indicated - the interrupt is wired to that CPU. Only valid for PPI interrupts. -+ Also note that the configurability of PPI interrupts is IMPLEMENTATION -+ DEFINED and as such not guaranteed to be present (most SoC available -+ in 2014 seem to ignore the setting of this flag and use the hardware -+ default value). - - - reg : Specifies base physical address(s) and size of the GIC registers. The - first region is the GIC distributor register base and size. The 2nd region is -diff --git a/Documentation/devicetree/bindings/clock/qoriq-clock.txt b/Documentation/devicetree/bindings/clock/qoriq-clock.txt -index 5666812..128fc72 100644 ---- a/Documentation/devicetree/bindings/clock/qoriq-clock.txt -+++ b/Documentation/devicetree/bindings/clock/qoriq-clock.txt -@@ -1,6 +1,6 @@ --* Clock Block on Freescale CoreNet Platforms -+* Clock Block on Freescale QorIQ Platforms - --Freescale CoreNet chips take primary clocking input from the external -+Freescale QorIQ chips take primary clocking input from the external - SYSCLK signal. The SYSCLK input (frequency) is multiplied using - multiple phase locked loops (PLL) to create a variety of frequencies - which can then be passed to a variety of internal logic, including -@@ -13,14 +13,16 @@ which the chip complies. - Chassis Version Example Chips - --------------- ------------- - 1.0 p4080, p5020, p5040 --2.0 t4240, b4860, t1040 -+2.0 t4240, b4860 - - 1. Clock Block Binding - - Required properties: --- compatible: Should contain a specific clock block compatible string -- and a single chassis clock compatible string. -- Clock block strings include, but not limited to, one of the: -+- compatible: Should contain a chip-specific clock block compatible -+ string and (if applicable) may contain a chassis-version clock -+ compatible string. -+ -+ Chip-specific strings are of the form "fsl,-clockgen", such as: - * "fsl,p2041-clockgen" - * "fsl,p3041-clockgen" - * "fsl,p4080-clockgen" -@@ -29,15 +31,15 @@ Required properties: - * "fsl,t4240-clockgen" - * "fsl,b4420-clockgen" - * "fsl,b4860-clockgen" -- Chassis clock strings include: -+ * "fsl,ls1021a-clockgen" -+ Chassis-version clock strings include: - * "fsl,qoriq-clockgen-1.0": for chassis 1.0 clocks - * "fsl,qoriq-clockgen-2.0": for chassis 2.0 clocks - - reg: Describes the address of the device's resources within the - address space defined by its parent bus, and resource zero - represents the clock register set --- clock-frequency: Input system clock frequency - --Recommended properties: -+Optional properties: - - ranges: Allows valid translation between child's address space and - parent's. Must be present if the device has sub-nodes. - - #address-cells: Specifies the number of cells used to represent -@@ -46,8 +48,46 @@ Recommended properties: - - #size-cells: Specifies the number of cells used to represent - the size of an address. Must be present if the device has - sub-nodes and set to 1 if present -+- clock-frequency: Input system clock frequency (SYSCLK) -+- clocks: If clock-frequency is not specified, sysclk may be provided -+ as an input clock. Either clock-frequency or clocks must be -+ provided. -+ -+2. Clock Provider -+ -+The clockgen node should act as a clock provider, though in older device -+trees the children of the clockgen node are the clock providers. -+ -+When the clockgen node is a clock provider, #clock-cells = <2>. -+The first cell of the clock specifier is the clock type, and the -+second cell is the clock index for the specified type. -+ -+ Type# Name Index Cell -+ 0 sysclk must be 0 -+ 1 cmux index (n in CLKCnCSR) -+ 2 hwaccel index (n in CLKCGnHWACSR) -+ 3 fman 0 for fm1, 1 for fm2 -+ 4 platform pll 0=pll, 1=pll/2, 2=pll/3, 3=pll/4 -+ -+3. Example -+ -+ clockgen: global-utilities@e1000 { -+ compatible = "fsl,p5020-clockgen", "fsl,qoriq-clockgen-1.0"; -+ clock-frequency = <133333333>; -+ reg = <0xe1000 0x1000>; -+ #clock-cells = <2>; -+ }; -+ -+ fman@400000 { -+ ... -+ clocks = <&clockgen 3 0>; -+ ... -+ }; -+} -+4. Legacy Child Nodes - --2. Clock Provider/Consumer Binding -+NOTE: These nodes are deprecated. Kernels should continue to support -+device trees with these nodes, but new device trees should not use them. - - Most of the bindings are from the common clock binding[1]. - [1] Documentation/devicetree/bindings/clock/clock-bindings.txt -@@ -79,7 +119,7 @@ Recommended properties: - - reg: Should be the offset and length of clock block base address. - The length should be 4. - --Example for clock block and clock provider: -+Legacy Example: - / { - clockgen: global-utilities@e1000 { - compatible = "fsl,p5020-clockgen", "fsl,qoriq-clockgen-1.0"; -@@ -131,7 +171,7 @@ Example for clock block and clock provider: - }; - } - --Example for clock consumer: -+Example for legacy clock consumer: - - / { - cpu0: PowerPC,e5500@0 { -diff --git a/Documentation/devicetree/bindings/i2c/i2c-imx.txt b/Documentation/devicetree/bindings/i2c/i2c-imx.txt -index 4a8513e..52d37fd 100644 ---- a/Documentation/devicetree/bindings/i2c/i2c-imx.txt -+++ b/Documentation/devicetree/bindings/i2c/i2c-imx.txt -@@ -11,6 +11,8 @@ Required properties: - Optional properties: - - clock-frequency : Constains desired I2C/HS-I2C bus clock frequency in Hz. - The absence of the propoerty indicates the default frequency 100 kHz. -+- dmas: A list of two dma specifiers, one for each entry in dma-names. -+- dma-names: should contain "tx" and "rx". - - Examples: - -@@ -26,3 +28,12 @@ i2c@70038000 { /* HS-I2C on i.MX51 */ - interrupts = <64>; - clock-frequency = <400000>; - }; -+ -+i2c0: i2c@40066000 { /* i2c0 on vf610 */ -+ compatible = "fsl,vf610-i2c"; -+ reg = <0x40066000 0x1000>; -+ interrupts =<0 71 0x04>; -+ dmas = <&edma0 0 50>, -+ <&edma0 0 51>; -+ dma-names = "rx","tx"; -+}; -diff --git a/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt b/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt -index 34a3fb6..cf53d5f 100644 ---- a/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt -+++ b/Documentation/devicetree/bindings/i2c/i2c-mux-pca954x.txt -@@ -16,6 +16,9 @@ Required Properties: - Optional Properties: - - - reset-gpios: Reference to the GPIO connected to the reset input. -+ - i2c-mux-idle-disconnect: Boolean; if defined, forces mux to disconnect all -+ children in idle state. This is necessary for example, if there are several -+ multiplexers on the bus and the devices behind them use same I2C addresses. - - - Example: -diff --git a/Documentation/devicetree/bindings/memory-controllers/fsl/ifc.txt b/Documentation/devicetree/bindings/memory-controllers/fsl/ifc.txt -index d5e3704..89427b0 100644 ---- a/Documentation/devicetree/bindings/memory-controllers/fsl/ifc.txt -+++ b/Documentation/devicetree/bindings/memory-controllers/fsl/ifc.txt -@@ -18,6 +18,8 @@ Properties: - interrupt (NAND_EVTER_STAT). If there is only one, - that interrupt reports both types of event. - -+- little-endian : If this property is absent, the big-endian mode will -+ be in use as default for registers. - - - ranges : Each range corresponds to a single chipselect, and covers - the entire access window as configured. -@@ -34,6 +36,7 @@ Example: - #size-cells = <1>; - reg = <0x0 0xffe1e000 0 0x2000>; - interrupts = <16 2 19 2>; -+ little-endian; - - /* NOR, NAND Flashes and CPLD on board */ - ranges = <0x0 0x0 0x0 0xee000000 0x02000000 -diff --git a/Documentation/devicetree/of_selftest.txt b/Documentation/devicetree/of_selftest.txt -index 1e3d5c9..57a808b 100644 ---- a/Documentation/devicetree/of_selftest.txt -+++ b/Documentation/devicetree/of_selftest.txt -@@ -63,7 +63,6 @@ struct device_node { - struct device_node *parent; - struct device_node *child; - struct device_node *sibling; -- struct device_node *allnext; /* next in list of all nodes */ - ... - }; - -@@ -99,12 +98,6 @@ child11 -> sibling12 -> sibling13 -> sibling14 -> null - Figure 1: Generic structure of un-flattened device tree - - --*allnext: it is used to link all the nodes of DT into a list. So, for the -- above tree the list would be as follows: -- --root->child1->child11->sibling12->sibling13->child131->sibling14->sibling2-> --child21->sibling22->sibling23->sibling3->child31->sibling32->sibling4->null -- - Before executing OF selftest, it is required to attach the test data to - machine's device tree (if present). So, when selftest_data_add() is called, - at first it reads the flattened device tree data linked into the kernel image -@@ -131,11 +124,6 @@ root ('/') - test-child01 null null null - - --allnext list: -- --root->testcase-data->test-child0->test-child01->test-sibling1->test-sibling2 --->test-sibling3->null -- - Figure 2: Example test data tree to be attached to live tree. - - According to the scenario above, the live tree is already present so it isn't -@@ -204,8 +192,6 @@ detached and then moving up the parent nodes are removed, and eventually the - whole tree). selftest_data_remove() calls detach_node_and_children() that uses - of_detach_node() to detach the nodes from the live device tree. - --To detach a node, of_detach_node() first updates all_next linked list, by --attaching the previous node's allnext to current node's allnext pointer. And --then, it either updates the child pointer of given node's parent to its --sibling or attaches the previous sibling to the given node's sibling, as --appropriate. That is it :) -+To detach a node, of_detach_node() either updates the child pointer of given -+node's parent to its sibling or attaches the previous sibling to the given -+node's sibling, as appropriate. That is it :) -diff --git a/Documentation/devicetree/todo.txt b/Documentation/devicetree/todo.txt -index c3cf065..b5139d1 100644 ---- a/Documentation/devicetree/todo.txt -+++ b/Documentation/devicetree/todo.txt -@@ -2,7 +2,6 @@ Todo list for devicetree: - - === General structure === - - Switch from custom lists to (h)list_head for nodes and properties structure --- Remove of_allnodes list and iterate using list of child nodes alone - - === CONFIG_OF_DYNAMIC === - - Switch to RCU for tree updates and get rid of global spinlock -diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig -index 08e1287..329f5f4 100644 ---- a/arch/arm64/Kconfig -+++ b/arch/arm64/Kconfig -@@ -13,6 +13,7 @@ config ARM64 - select ARM_ARCH_TIMER - select ARM_GIC - select AUDIT_ARCH_COMPAT_GENERIC -+ select ARM_GIC_V2M if PCI_MSI - select ARM_GIC_V3 - select ARM_GIC_V3_ITS if PCI_MSI - select BUILDTIME_EXTABLE_SORT -diff --git a/arch/arm64/include/asm/device.h b/arch/arm64/include/asm/device.h -index cf98b36..243ef25 100644 ---- a/arch/arm64/include/asm/device.h -+++ b/arch/arm64/include/asm/device.h -@@ -21,6 +21,7 @@ struct dev_archdata { - #ifdef CONFIG_IOMMU_API - void *iommu; /* private IOMMU data */ - #endif -+ bool dma_coherent; - }; - - struct pdev_archdata { -diff --git a/arch/arm64/include/asm/dma-mapping.h b/arch/arm64/include/asm/dma-mapping.h -index adeae3f..9ce3e68 100644 ---- a/arch/arm64/include/asm/dma-mapping.h -+++ b/arch/arm64/include/asm/dma-mapping.h -@@ -52,12 +52,20 @@ static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) - dev->archdata.dma_ops = ops; - } - --static inline int set_arch_dma_coherent_ops(struct device *dev) -+static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size, -+ struct iommu_ops *iommu, bool coherent) - { -- set_dma_ops(dev, &coherent_swiotlb_dma_ops); -- return 0; -+ dev->archdata.dma_coherent = coherent; -+ if (coherent) -+ set_dma_ops(dev, &coherent_swiotlb_dma_ops); -+} -+#define arch_setup_dma_ops arch_setup_dma_ops -+ -+/* do not use this function in a driver */ -+static inline bool is_device_dma_coherent(struct device *dev) -+{ -+ return dev->archdata.dma_coherent; - } --#define set_arch_dma_coherent_ops set_arch_dma_coherent_ops - - #include - -diff --git a/arch/powerpc/include/asm/mpc85xx.h b/arch/powerpc/include/asm/mpc85xx.h -deleted file mode 100644 -index 3bef74a..0000000 ---- a/arch/powerpc/include/asm/mpc85xx.h -+++ /dev/null -@@ -1,94 +0,0 @@ --/* -- * MPC85xx cpu type detection -- * -- * Copyright 2011-2012 Freescale Semiconductor, Inc. -- * -- * This is free software; you can redistribute it and/or modify -- * it under the terms of the GNU General Public License as published by -- * the Free Software Foundation; either version 2 of the License, or -- * (at your option) any later version. -- */ -- --#ifndef __ASM_PPC_MPC85XX_H --#define __ASM_PPC_MPC85XX_H -- --#define SVR_REV(svr) ((svr) & 0xFF) /* SOC design resision */ --#define SVR_MAJ(svr) (((svr) >> 4) & 0xF) /* Major revision field*/ --#define SVR_MIN(svr) (((svr) >> 0) & 0xF) /* Minor revision field*/ -- --/* Some parts define SVR[0:23] as the SOC version */ --#define SVR_SOC_VER(svr) (((svr) >> 8) & 0xFFF7FF) /* SOC Version fields */ -- --#define SVR_8533 0x803400 --#define SVR_8535 0x803701 --#define SVR_8536 0x803700 --#define SVR_8540 0x803000 --#define SVR_8541 0x807200 --#define SVR_8543 0x803200 --#define SVR_8544 0x803401 --#define SVR_8545 0x803102 --#define SVR_8547 0x803101 --#define SVR_8548 0x803100 --#define SVR_8555 0x807100 --#define SVR_8560 0x807000 --#define SVR_8567 0x807501 --#define SVR_8568 0x807500 --#define SVR_8569 0x808000 --#define SVR_8572 0x80E000 --#define SVR_P1010 0x80F100 --#define SVR_P1011 0x80E500 --#define SVR_P1012 0x80E501 --#define SVR_P1013 0x80E700 --#define SVR_P1014 0x80F101 --#define SVR_P1017 0x80F700 --#define SVR_P1020 0x80E400 --#define SVR_P1021 0x80E401 --#define SVR_P1022 0x80E600 --#define SVR_P1023 0x80F600 --#define SVR_P1024 0x80E402 --#define SVR_P1025 0x80E403 --#define SVR_P2010 0x80E300 --#define SVR_P2020 0x80E200 --#define SVR_P2040 0x821000 --#define SVR_P2041 0x821001 --#define SVR_P3041 0x821103 --#define SVR_P4040 0x820100 --#define SVR_P4080 0x820000 --#define SVR_P5010 0x822100 --#define SVR_P5020 0x822000 --#define SVR_P5021 0X820500 --#define SVR_P5040 0x820400 --#define SVR_T4240 0x824000 --#define SVR_T4120 0x824001 --#define SVR_T4160 0x824100 --#define SVR_C291 0x850000 --#define SVR_C292 0x850020 --#define SVR_C293 0x850030 --#define SVR_B4860 0X868000 --#define SVR_G4860 0x868001 --#define SVR_G4060 0x868003 --#define SVR_B4440 0x868100 --#define SVR_G4440 0x868101 --#define SVR_B4420 0x868102 --#define SVR_B4220 0x868103 --#define SVR_T1040 0x852000 --#define SVR_T1041 0x852001 --#define SVR_T1042 0x852002 --#define SVR_T1020 0x852100 --#define SVR_T1021 0x852101 --#define SVR_T1022 0x852102 --#define SVR_T2080 0x853000 --#define SVR_T2081 0x853100 -- --#define SVR_8610 0x80A000 --#define SVR_8641 0x809000 --#define SVR_8641D 0x809001 -- --#define SVR_9130 0x860001 --#define SVR_9131 0x860000 --#define SVR_9132 0x861000 --#define SVR_9232 0x861400 -- --#define SVR_Unknown 0xFFFFFF -- --#endif -diff --git a/arch/powerpc/platforms/85xx/mpc85xx_mds.c b/arch/powerpc/platforms/85xx/mpc85xx_mds.c -index a392e94..f0be439 100644 ---- a/arch/powerpc/platforms/85xx/mpc85xx_mds.c -+++ b/arch/powerpc/platforms/85xx/mpc85xx_mds.c -@@ -34,6 +34,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -51,7 +52,6 @@ - #include - #include - #include --#include - #include "smp.h" - - #include "mpc85xx.h" -diff --git a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c -index e358bed..50dcc00 100644 ---- a/arch/powerpc/platforms/85xx/mpc85xx_rdb.c -+++ b/arch/powerpc/platforms/85xx/mpc85xx_rdb.c -@@ -17,6 +17,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -27,7 +28,6 @@ - #include - #include - #include --#include - - #include - #include -diff --git a/arch/powerpc/platforms/85xx/p1022_ds.c b/arch/powerpc/platforms/85xx/p1022_ds.c -index 6ac986d..371df82 100644 ---- a/arch/powerpc/platforms/85xx/p1022_ds.c -+++ b/arch/powerpc/platforms/85xx/p1022_ds.c -@@ -16,6 +16,7 @@ - * kind, whether express or implied. - */ - -+#include - #include - #include - #include -@@ -25,7 +26,6 @@ - #include - #include - #include --#include - #include - #include "smp.h" - -diff --git a/arch/powerpc/platforms/85xx/p1022_rdk.c b/arch/powerpc/platforms/85xx/p1022_rdk.c -index 7a180f0..4f8fc5f 100644 ---- a/arch/powerpc/platforms/85xx/p1022_rdk.c -+++ b/arch/powerpc/platforms/85xx/p1022_rdk.c -@@ -12,6 +12,7 @@ - * kind, whether express or implied. - */ - -+#include - #include - #include - #include -@@ -21,7 +22,6 @@ - #include - #include - #include --#include - #include "smp.h" - - #include "mpc85xx.h" -diff --git a/arch/powerpc/platforms/85xx/smp.c b/arch/powerpc/platforms/85xx/smp.c -index d7c1e69..3956455 100644 ---- a/arch/powerpc/platforms/85xx/smp.c -+++ b/arch/powerpc/platforms/85xx/smp.c -@@ -19,6 +19,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -26,7 +27,6 @@ - #include - #include - #include --#include - #include - #include - -diff --git a/arch/powerpc/platforms/85xx/twr_p102x.c b/arch/powerpc/platforms/85xx/twr_p102x.c -index 1eadb6d..2799120 100644 ---- a/arch/powerpc/platforms/85xx/twr_p102x.c -+++ b/arch/powerpc/platforms/85xx/twr_p102x.c -@@ -15,6 +15,7 @@ - #include - #include - #include -+#include - #include - #include - -@@ -23,7 +24,6 @@ - #include - #include - #include --#include - - #include - #include -diff --git a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c -index 55413a5..437a9c3 100644 ---- a/arch/powerpc/platforms/86xx/mpc8610_hpcd.c -+++ b/arch/powerpc/platforms/86xx/mpc8610_hpcd.c -@@ -24,6 +24,7 @@ - #include - #include - #include -+#include - - #include - #include -@@ -38,7 +39,6 @@ - #include - #include - #include --#include - - #include "mpc86xx.h" - -diff --git a/arch/x86/pci/xen.c b/arch/x86/pci/xen.c -index 4f6844b..878fb8e 100644 ---- a/arch/x86/pci/xen.c -+++ b/arch/x86/pci/xen.c -@@ -296,12 +296,16 @@ static int xen_initdom_setup_msi_irqs(struct pci_dev *dev, int nvec, int type) - map_irq.entry_nr = nvec; - } else if (type == PCI_CAP_ID_MSIX) { - int pos; -+ unsigned long flags; - u32 table_offset, bir; - - pos = dev->msix_cap; - pci_read_config_dword(dev, pos + PCI_MSIX_TABLE, - &table_offset); - bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR); -+ flags = pci_resource_flags(dev, bir); -+ if (!flags || (flags & IORESOURCE_UNSET)) -+ return -EINVAL; - - map_irq.table_base = pci_resource_start(dev, bir); - map_irq.entry_nr = msidesc->msi_attrib.entry_nr; -diff --git a/drivers/clk/Kconfig b/drivers/clk/Kconfig -index 455fd17..38c8814 100644 ---- a/drivers/clk/Kconfig -+++ b/drivers/clk/Kconfig -@@ -101,12 +101,12 @@ config COMMON_CLK_AXI_CLKGEN - Support for the Analog Devices axi-clkgen pcore clock generator for Xilinx - FPGAs. It is commonly used in Analog Devices' reference designs. - --config CLK_PPC_CORENET -- bool "Clock driver for PowerPC corenet platforms" -- depends on PPC_E500MC && OF -+config CLK_QORIQ -+ bool "Clock driver for Freescale QorIQ platforms" -+ depends on (PPC_E500MC || ARM || ARM64) && OF - ---help--- -- This adds the clock driver support for Freescale PowerPC corenet -- platforms using common clock framework. -+ This adds the clock driver support for Freescale QorIQ platforms -+ using common clock framework. - - config COMMON_CLK_XGENE - bool "Clock driver for APM XGene SoC" -diff --git a/drivers/clk/Makefile b/drivers/clk/Makefile -index d5fba5b..4ff94cd 100644 ---- a/drivers/clk/Makefile -+++ b/drivers/clk/Makefile -@@ -30,7 +30,7 @@ obj-$(CONFIG_ARCH_MOXART) += clk-moxart.o - obj-$(CONFIG_ARCH_NOMADIK) += clk-nomadik.o - obj-$(CONFIG_ARCH_NSPIRE) += clk-nspire.o - obj-$(CONFIG_COMMON_CLK_PALMAS) += clk-palmas.o --obj-$(CONFIG_CLK_PPC_CORENET) += clk-ppc-corenet.o -+obj-$(CONFIG_CLK_QORIQ) += clk-qoriq.o - obj-$(CONFIG_COMMON_CLK_RK808) += clk-rk808.o - obj-$(CONFIG_COMMON_CLK_S2MPS11) += clk-s2mps11.o - obj-$(CONFIG_COMMON_CLK_SI5351) += clk-si5351.o -diff --git a/drivers/clk/clk-qoriq.c b/drivers/clk/clk-qoriq.c -new file mode 100644 -index 0000000..74051c9 ---- /dev/null -+++ b/drivers/clk/clk-qoriq.c -@@ -0,0 +1,1256 @@ -+/* -+ * Copyright 2013 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License version 2 as -+ * published by the Free Software Foundation. -+ * -+ * clock driver for Freescale QorIQ SoCs. -+ */ -+ -+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define PLL_DIV1 0 -+#define PLL_DIV2 1 -+#define PLL_DIV3 2 -+#define PLL_DIV4 3 -+ -+#define PLATFORM_PLL 0 -+#define CGA_PLL1 1 -+#define CGA_PLL2 2 -+#define CGA_PLL3 3 -+#define CGA_PLL4 4 /* only on clockgen-1.0, which lacks CGB */ -+#define CGB_PLL1 4 -+#define CGB_PLL2 5 -+ -+struct clockgen_pll_div { -+ struct clk *clk; -+ char name[32]; -+}; -+ -+struct clockgen_pll { -+ struct clockgen_pll_div div[4]; -+}; -+ -+#define CLKSEL_VALID 1 -+#define CLKSEL_80PCT 2 /* Only allowed if PLL <= 80% of max cpu freq */ -+ -+struct clockgen_sourceinfo { -+ u32 flags; /* CLKSEL_xxx */ -+ int pll; /* CGx_PLLn */ -+ int div; /* PLL_DIVn */ -+}; -+ -+#define NUM_MUX_PARENTS 16 -+ -+struct clockgen_muxinfo { -+ struct clockgen_sourceinfo clksel[NUM_MUX_PARENTS]; -+}; -+ -+#define NUM_HWACCEL 5 -+#define NUM_CMUX 8 -+ -+struct clockgen; -+ -+/* -+ * cmux freq must be >= platform pll. -+ * If not set, cmux freq must be >= platform pll/2 -+ */ -+#define CG_CMUX_GE_PLAT 1 -+ -+#define CG_PLL_8BIT 2 /* PLLCnGSR[CFG] is 8 bits, not 6 */ -+#define CG_VER3 4 /* version 3 cg: reg layout different */ -+#define CG_LITTLE_ENDIAN 8 -+ -+struct clockgen_chipinfo { -+ const char *compat, *guts_compat; -+ const struct clockgen_muxinfo *cmux_groups[2]; -+ const struct clockgen_muxinfo *hwaccel[NUM_HWACCEL]; -+ void (*init_periph)(struct clockgen *cg); -+ int cmux_to_group[NUM_CMUX]; /* -1 terminates if fewer than NUM_CMUX */ -+ u32 pll_mask; /* 1 << n bit set if PLL n is valid */ -+ u32 flags; /* CG_xxx */ -+}; -+ -+struct clockgen { -+ struct device_node *node; -+ void __iomem *regs; -+ struct clockgen_chipinfo info; /* mutable copy */ -+ struct clk *sysclk; -+ struct clockgen_pll pll[6]; -+ struct clk *cmux[NUM_CMUX]; -+ struct clk *hwaccel[NUM_HWACCEL]; -+ struct clk *fman[2]; -+ struct ccsr_guts __iomem *guts; -+}; -+ -+static struct clockgen clockgen; -+ -+static void cg_out(struct clockgen *cg, u32 val, u32 __iomem *reg) -+{ -+ if (cg->info.flags & CG_LITTLE_ENDIAN) -+ iowrite32(val, reg); -+ else -+ iowrite32be(val, reg); -+} -+ -+static u32 cg_in(struct clockgen *cg, u32 __iomem *reg) -+{ -+ u32 val; -+ -+ if (cg->info.flags & CG_LITTLE_ENDIAN) -+ val = ioread32(reg); -+ else -+ val = ioread32be(reg); -+ -+ return val; -+} -+ -+static const struct clockgen_muxinfo p2041_cmux_grp1 = { -+ { -+ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, -+ [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, -+ [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, -+ } -+}; -+ -+static const struct clockgen_muxinfo p2041_cmux_grp2 = { -+ { -+ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, -+ [4] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, -+ [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, -+ } -+}; -+ -+static const struct clockgen_muxinfo p5020_cmux_grp1 = { -+ { -+ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, -+ [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, -+ [4] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV1 }, -+ } -+}; -+ -+static const struct clockgen_muxinfo p5020_cmux_grp2 = { -+ { -+ [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 }, -+ [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, -+ [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, -+ } -+}; -+ -+static const struct clockgen_muxinfo p5040_cmux_grp1 = { -+ { -+ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, -+ [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, -+ [4] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV1 }, -+ [5] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL2, PLL_DIV2 }, -+ } -+}; -+ -+static const struct clockgen_muxinfo p5040_cmux_grp2 = { -+ { -+ [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 }, -+ [1] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV2 }, -+ [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, -+ [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, -+ } -+}; -+ -+static const struct clockgen_muxinfo p4080_cmux_grp1 = { -+ { -+ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, -+ [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, -+ [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, -+ [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, -+ [8] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL3, PLL_DIV1 }, -+ } -+}; -+ -+static const struct clockgen_muxinfo p4080_cmux_grp2 = { -+ { -+ [0] = { CLKSEL_VALID | CLKSEL_80PCT, CGA_PLL1, PLL_DIV1 }, -+ [8] = { CLKSEL_VALID, CGA_PLL3, PLL_DIV1 }, -+ [9] = { CLKSEL_VALID, CGA_PLL3, PLL_DIV2 }, -+ [12] = { CLKSEL_VALID, CGA_PLL4, PLL_DIV1 }, -+ [13] = { CLKSEL_VALID, CGA_PLL4, PLL_DIV2 }, -+ } -+}; -+ -+static const struct clockgen_muxinfo t1023_cmux = { -+ { -+ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, -+ [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, -+ } -+}; -+ -+static const struct clockgen_muxinfo t1040_cmux = { -+ { -+ [0] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, -+ [1] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, -+ [4] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, -+ [5] = { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, -+ } -+}; -+ -+ -+static const struct clockgen_muxinfo clockgen2_cmux_cga = { -+ { -+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, -+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, -+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 }, -+ {}, -+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, -+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, -+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 }, -+ {}, -+ { CLKSEL_VALID, CGA_PLL3, PLL_DIV1 }, -+ { CLKSEL_VALID, CGA_PLL3, PLL_DIV2 }, -+ { CLKSEL_VALID, CGA_PLL3, PLL_DIV4 }, -+ }, -+}; -+ -+static const struct clockgen_muxinfo clockgen2_cmux_cga12 = { -+ { -+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, -+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, -+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 }, -+ {}, -+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, -+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, -+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 }, -+ }, -+}; -+ -+static const struct clockgen_muxinfo clockgen2_cmux_cgb = { -+ { -+ { CLKSEL_VALID, CGB_PLL1, PLL_DIV1 }, -+ { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 }, -+ { CLKSEL_VALID, CGB_PLL1, PLL_DIV4 }, -+ {}, -+ { CLKSEL_VALID, CGB_PLL2, PLL_DIV1 }, -+ { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 }, -+ { CLKSEL_VALID, CGB_PLL2, PLL_DIV4 }, -+ }, -+}; -+ -+static const struct clockgen_muxinfo t1023_hwa1 = { -+ { -+ {}, -+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, -+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, -+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 }, -+ }, -+}; -+ -+static const struct clockgen_muxinfo t1023_hwa2 = { -+ { -+ [6] = { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, -+ }, -+}; -+ -+static const struct clockgen_muxinfo t2080_hwa1 = { -+ { -+ {}, -+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, -+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, -+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 }, -+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 }, -+ { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 }, -+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, -+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 }, -+ }, -+}; -+ -+static const struct clockgen_muxinfo t2080_hwa2 = { -+ { -+ {}, -+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV1 }, -+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, -+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 }, -+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV4 }, -+ { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 }, -+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, -+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 }, -+ }, -+}; -+ -+static const struct clockgen_muxinfo t4240_hwa1 = { -+ { -+ { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV2 }, -+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV1 }, -+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV2 }, -+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV3 }, -+ { CLKSEL_VALID, CGA_PLL1, PLL_DIV4 }, -+ {}, -+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV2 }, -+ { CLKSEL_VALID, CGA_PLL2, PLL_DIV3 }, -+ }, -+}; -+ -+static const struct clockgen_muxinfo t4240_hwa4 = { -+ { -+ [2] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 }, -+ [3] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV3 }, -+ [4] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV4 }, -+ [5] = { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 }, -+ [6] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 }, -+ }, -+}; -+ -+static const struct clockgen_muxinfo t4240_hwa5 = { -+ { -+ [2] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV2 }, -+ [3] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV3 }, -+ [4] = { CLKSEL_VALID, CGB_PLL2, PLL_DIV4 }, -+ [5] = { CLKSEL_VALID, PLATFORM_PLL, PLL_DIV1 }, -+ [6] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV2 }, -+ [7] = { CLKSEL_VALID, CGB_PLL1, PLL_DIV3 }, -+ }, -+}; -+ -+#define RCWSR7_FM1_CLK_SEL 0x40000000 -+#define RCWSR7_FM2_CLK_SEL 0x20000000 -+#define RCWSR7_HWA_ASYNC_DIV 0x04000000 -+ -+static void __init p2041_init_periph(struct clockgen *cg) -+{ -+ u32 reg; -+ -+ reg = ioread32be(&cg->guts->rcwsr[7]); -+ -+ if (reg & RCWSR7_FM1_CLK_SEL) -+ cg->fman[0] = cg->pll[CGA_PLL2].div[PLL_DIV2].clk; -+ else -+ cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk; -+} -+ -+static void __init p4080_init_periph(struct clockgen *cg) -+{ -+ u32 reg; -+ -+ reg = ioread32be(&cg->guts->rcwsr[7]); -+ -+ if (reg & RCWSR7_FM1_CLK_SEL) -+ cg->fman[0] = cg->pll[CGA_PLL3].div[PLL_DIV2].clk; -+ else -+ cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk; -+ -+ if (reg & RCWSR7_FM2_CLK_SEL) -+ cg->fman[1] = cg->pll[CGA_PLL3].div[PLL_DIV2].clk; -+ else -+ cg->fman[1] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk; -+} -+ -+static void __init p5020_init_periph(struct clockgen *cg) -+{ -+ u32 reg; -+ int div = PLL_DIV2; -+ -+ reg = ioread32be(&cg->guts->rcwsr[7]); -+ if (reg & RCWSR7_HWA_ASYNC_DIV) -+ div = PLL_DIV4; -+ -+ if (reg & RCWSR7_FM1_CLK_SEL) -+ cg->fman[0] = cg->pll[CGA_PLL2].div[div].clk; -+ else -+ cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk; -+} -+ -+static void __init p5040_init_periph(struct clockgen *cg) -+{ -+ u32 reg; -+ int div = PLL_DIV2; -+ -+ reg = ioread32be(&cg->guts->rcwsr[7]); -+ if (reg & RCWSR7_HWA_ASYNC_DIV) -+ div = PLL_DIV4; -+ -+ if (reg & RCWSR7_FM1_CLK_SEL) -+ cg->fman[0] = cg->pll[CGA_PLL3].div[div].clk; -+ else -+ cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk; -+ -+ if (reg & RCWSR7_FM2_CLK_SEL) -+ cg->fman[1] = cg->pll[CGA_PLL3].div[div].clk; -+ else -+ cg->fman[1] = cg->pll[PLATFORM_PLL].div[PLL_DIV2].clk; -+} -+ -+static void __init t1023_init_periph(struct clockgen *cg) -+{ -+ cg->fman[0] = cg->hwaccel[1]; -+} -+ -+static void __init t1040_init_periph(struct clockgen *cg) -+{ -+ cg->fman[0] = cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk; -+} -+ -+static void __init t2080_init_periph(struct clockgen *cg) -+{ -+ cg->fman[0] = cg->hwaccel[0]; -+} -+ -+static void __init t4240_init_periph(struct clockgen *cg) -+{ -+ cg->fman[0] = cg->hwaccel[3]; -+ cg->fman[1] = cg->hwaccel[4]; -+} -+ -+static const struct clockgen_chipinfo chipinfo[] = { -+ { -+ .compat = "fsl,b4420-clockgen", -+ .guts_compat = "fsl,b4860-device-config", -+ .init_periph = t2080_init_periph, -+ .cmux_groups = { -+ &clockgen2_cmux_cga12, &clockgen2_cmux_cgb -+ }, -+ .hwaccel = { -+ &t2080_hwa1 -+ }, -+ .cmux_to_group = { -+ 0, 1, 1, 1, -1 -+ }, -+ .pll_mask = 0x3f, -+ .flags = CG_PLL_8BIT, -+ }, -+ { -+ .compat = "fsl,b4860-clockgen", -+ .guts_compat = "fsl,b4860-device-config", -+ .init_periph = t2080_init_periph, -+ .cmux_groups = { -+ &clockgen2_cmux_cga12, &clockgen2_cmux_cgb -+ }, -+ .hwaccel = { -+ &t2080_hwa1 -+ }, -+ .cmux_to_group = { -+ 0, 1, 1, 1, -1 -+ }, -+ .pll_mask = 0x3f, -+ .flags = CG_PLL_8BIT, -+ }, -+ { -+ .compat = "fsl,ls1021a-clockgen", -+ .cmux_groups = { -+ &t1023_cmux -+ }, -+ .cmux_to_group = { -+ 0, -1 -+ }, -+ .pll_mask = 0x03, -+ }, -+ { -+ .compat = "fsl,ls2080a-clockgen", -+ .cmux_groups = { -+ &clockgen2_cmux_cga12, &clockgen2_cmux_cgb -+ }, -+ .cmux_to_group = { -+ 0, 0, 1, 1, -1 -+ }, -+ .pll_mask = 0x37, -+ .flags = CG_VER3 | CG_LITTLE_ENDIAN, -+ }, -+ { -+ .compat = "fsl,ls2088a-clockgen", -+ .cmux_groups = { -+ &clockgen2_cmux_cga12, &clockgen2_cmux_cgb -+ }, -+ .cmux_to_group = { -+ 0, 0, 1, 1, -1 -+ }, -+ .pll_mask = 0x37, -+ .flags = CG_VER3 | CG_LITTLE_ENDIAN, -+ }, -+ { -+ .compat = "fsl,p2041-clockgen", -+ .guts_compat = "fsl,qoriq-device-config-1.0", -+ .init_periph = p2041_init_periph, -+ .cmux_groups = { -+ &p2041_cmux_grp1, &p2041_cmux_grp2 -+ }, -+ .cmux_to_group = { -+ 0, 0, 1, 1, -1 -+ }, -+ .pll_mask = 0x07, -+ }, -+ { -+ .compat = "fsl,p3041-clockgen", -+ .guts_compat = "fsl,qoriq-device-config-1.0", -+ .init_periph = p2041_init_periph, -+ .cmux_groups = { -+ &p2041_cmux_grp1, &p2041_cmux_grp2 -+ }, -+ .cmux_to_group = { -+ 0, 0, 1, 1, -1 -+ }, -+ .pll_mask = 0x07, -+ }, -+ { -+ .compat = "fsl,p4080-clockgen", -+ .guts_compat = "fsl,qoriq-device-config-1.0", -+ .init_periph = p4080_init_periph, -+ .cmux_groups = { -+ &p4080_cmux_grp1, &p4080_cmux_grp2 -+ }, -+ .cmux_to_group = { -+ 0, 0, 0, 0, 1, 1, 1, 1 -+ }, -+ .pll_mask = 0x1f, -+ }, -+ { -+ .compat = "fsl,p5020-clockgen", -+ .guts_compat = "fsl,qoriq-device-config-1.0", -+ .init_periph = p5020_init_periph, -+ .cmux_groups = { -+ &p2041_cmux_grp1, &p2041_cmux_grp2 -+ }, -+ .cmux_to_group = { -+ 0, 1, -1 -+ }, -+ .pll_mask = 0x07, -+ }, -+ { -+ .compat = "fsl,p5040-clockgen", -+ .guts_compat = "fsl,p5040-device-config", -+ .init_periph = p5040_init_periph, -+ .cmux_groups = { -+ &p5040_cmux_grp1, &p5040_cmux_grp2 -+ }, -+ .cmux_to_group = { -+ 0, 0, 1, 1, -1 -+ }, -+ .pll_mask = 0x0f, -+ }, -+ { -+ .compat = "fsl,t1023-clockgen", -+ .guts_compat = "fsl,t1023-device-config", -+ .init_periph = t1023_init_periph, -+ .cmux_groups = { -+ &t1023_cmux -+ }, -+ .hwaccel = { -+ &t1023_hwa1, &t1023_hwa2 -+ }, -+ .cmux_to_group = { -+ 0, 0, -1 -+ }, -+ .pll_mask = 0x03, -+ .flags = CG_PLL_8BIT, -+ }, -+ { -+ .compat = "fsl,t1040-clockgen", -+ .guts_compat = "fsl,t1040-device-config", -+ .init_periph = t1040_init_periph, -+ .cmux_groups = { -+ &t1040_cmux -+ }, -+ .cmux_to_group = { -+ 0, 0, 0, 0, -1 -+ }, -+ .pll_mask = 0x07, -+ .flags = CG_PLL_8BIT, -+ }, -+ { -+ .compat = "fsl,t2080-clockgen", -+ .guts_compat = "fsl,t2080-device-config", -+ .init_periph = t2080_init_periph, -+ .cmux_groups = { -+ &clockgen2_cmux_cga12 -+ }, -+ .hwaccel = { -+ &t2080_hwa1, &t2080_hwa2 -+ }, -+ .cmux_to_group = { -+ 0, -1 -+ }, -+ .pll_mask = 0x07, -+ .flags = CG_PLL_8BIT, -+ }, -+ { -+ .compat = "fsl,t4240-clockgen", -+ .guts_compat = "fsl,t4240-device-config", -+ .init_periph = t4240_init_periph, -+ .cmux_groups = { -+ &clockgen2_cmux_cga, &clockgen2_cmux_cgb -+ }, -+ .hwaccel = { -+ &t4240_hwa1, NULL, NULL, &t4240_hwa4, &t4240_hwa5 -+ }, -+ .cmux_to_group = { -+ 0, 0, 1, -1 -+ }, -+ .pll_mask = 0x3f, -+ .flags = CG_PLL_8BIT, -+ }, -+ {}, -+}; -+ -+struct mux_hwclock { -+ struct clk_hw hw; -+ struct clockgen *cg; -+ const struct clockgen_muxinfo *info; -+ u32 __iomem *reg; -+ u8 parent_to_clksel[NUM_MUX_PARENTS]; -+ s8 clksel_to_parent[NUM_MUX_PARENTS]; -+ int num_parents; -+}; -+ -+#define to_mux_hwclock(p) container_of(p, struct mux_hwclock, hw) -+#define CLKSEL_MASK 0x78000000 -+#define CLKSEL_SHIFT 27 -+ -+static int mux_set_parent(struct clk_hw *hw, u8 idx) -+{ -+ struct mux_hwclock *hwc = to_mux_hwclock(hw); -+ u32 clksel; -+ -+ if (idx >= hwc->num_parents) -+ return -EINVAL; -+ -+ clksel = hwc->parent_to_clksel[idx]; -+ cg_out(hwc->cg, (clksel << CLKSEL_SHIFT) & CLKSEL_MASK, hwc->reg); -+ -+ return 0; -+} -+ -+static u8 mux_get_parent(struct clk_hw *hw) -+{ -+ struct mux_hwclock *hwc = to_mux_hwclock(hw); -+ u32 clksel; -+ s8 ret; -+ -+ clksel = (cg_in(hwc->cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT; -+ -+ ret = hwc->clksel_to_parent[clksel]; -+ if (ret < 0) { -+ pr_err("%s: mux at %p has bad clksel\n", __func__, hwc->reg); -+ return 0; -+ } -+ -+ return ret; -+} -+ -+static const struct clk_ops cmux_ops = { -+ .get_parent = mux_get_parent, -+ .set_parent = mux_set_parent, -+}; -+ -+/* -+ * Don't allow setting for now, as the clock options haven't been -+ * sanitized for additional restrictions. -+ */ -+static const struct clk_ops hwaccel_ops = { -+ .get_parent = mux_get_parent, -+}; -+ -+static const struct clockgen_pll_div *get_pll_div(struct clockgen *cg, -+ struct mux_hwclock *hwc, -+ int idx) -+{ -+ int pll, div; -+ -+ if (!(hwc->info->clksel[idx].flags & CLKSEL_VALID)) -+ return NULL; -+ -+ pll = hwc->info->clksel[idx].pll; -+ div = hwc->info->clksel[idx].div; -+ -+ return &cg->pll[pll].div[div]; -+} -+ -+static struct clk * __init create_mux_common(struct clockgen *cg, -+ struct mux_hwclock *hwc, -+ const struct clk_ops *ops, -+ unsigned long min_rate, -+ unsigned long pct80_rate, -+ const char *fmt, int idx) -+{ -+ struct clk_init_data init = {}; -+ struct clk *clk; -+ const struct clockgen_pll_div *div; -+ const char *parent_names[NUM_MUX_PARENTS]; -+ char name[32]; -+ int i, j; -+ -+ snprintf(name, sizeof(name), fmt, idx); -+ -+ for (i = 0, j = 0; i < NUM_MUX_PARENTS; i++) { -+ unsigned long rate; -+ -+ hwc->clksel_to_parent[i] = -1; -+ -+ div = get_pll_div(cg, hwc, i); -+ if (!div) -+ continue; -+ -+ rate = clk_get_rate(div->clk); -+ -+ if (hwc->info->clksel[i].flags & CLKSEL_80PCT && -+ rate > pct80_rate) -+ continue; -+ if (rate < min_rate) -+ continue; -+ -+ parent_names[j] = div->name; -+ hwc->parent_to_clksel[j] = i; -+ hwc->clksel_to_parent[i] = j; -+ j++; -+ } -+ -+ init.name = name; -+ init.ops = ops; -+ init.parent_names = parent_names; -+ init.num_parents = hwc->num_parents = j; -+ init.flags = 0; -+ hwc->hw.init = &init; -+ hwc->cg = cg; -+ -+ clk = clk_register(NULL, &hwc->hw); -+ if (IS_ERR(clk)) { -+ pr_err("%s: Couldn't register %s: %ld\n", __func__, name, -+ PTR_ERR(clk)); -+ kfree(hwc); -+ return NULL; -+ } -+ -+ return clk; -+} -+ -+static struct clk * __init create_one_cmux(struct clockgen *cg, int idx) -+{ -+ struct mux_hwclock *hwc; -+ const struct clockgen_pll_div *div; -+ unsigned long plat_rate, min_rate; -+ u64 pct80_rate; -+ u32 clksel; -+ -+ hwc = kzalloc(sizeof(*hwc), GFP_KERNEL); -+ if (!hwc) -+ return NULL; -+ -+ if (cg->info.flags & CG_VER3) -+ hwc->reg = cg->regs + 0x70000 + 0x20 * idx; -+ else -+ hwc->reg = cg->regs + 0x20 * idx; -+ -+ hwc->info = cg->info.cmux_groups[cg->info.cmux_to_group[idx]]; -+ -+ /* -+ * Find the rate for the default clksel, and treat it as the -+ * maximum rated core frequency. If this is an incorrect -+ * assumption, certain clock options (possibly including the -+ * default clksel) may be inappropriately excluded on certain -+ * chips. -+ */ -+ clksel = (cg_in(cg, hwc->reg) & CLKSEL_MASK) >> CLKSEL_SHIFT; -+ div = get_pll_div(cg, hwc, clksel); -+ if (!div) -+ return NULL; -+ -+ pct80_rate = clk_get_rate(div->clk); -+ pct80_rate *= 8; -+ do_div(pct80_rate, 10); -+ -+ plat_rate = clk_get_rate(cg->pll[PLATFORM_PLL].div[PLL_DIV1].clk); -+ -+ if (cg->info.flags & CG_CMUX_GE_PLAT) -+ min_rate = plat_rate; -+ else -+ min_rate = plat_rate / 2; -+ -+ return create_mux_common(cg, hwc, &cmux_ops, min_rate, -+ pct80_rate, "cg-cmux%d", idx); -+} -+ -+static struct clk * __init create_one_hwaccel(struct clockgen *cg, int idx) -+{ -+ struct mux_hwclock *hwc; -+ -+ hwc = kzalloc(sizeof(*hwc), GFP_KERNEL); -+ if (!hwc) -+ return NULL; -+ -+ hwc->reg = cg->regs + 0x20 * idx + 0x10; -+ hwc->info = cg->info.hwaccel[idx]; -+ -+ return create_mux_common(cg, hwc, &hwaccel_ops, 0, 0, -+ "cg-hwaccel%d", idx); -+} -+ -+static void __init create_muxes(struct clockgen *cg) -+{ -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(cg->cmux); i++) { -+ if (cg->info.cmux_to_group[i] < 0) -+ break; -+ if (cg->info.cmux_to_group[i] >= -+ ARRAY_SIZE(cg->info.cmux_groups)) { -+ WARN_ON_ONCE(1); -+ continue; -+ } -+ -+ cg->cmux[i] = create_one_cmux(cg, i); -+ } -+ -+ for (i = 0; i < ARRAY_SIZE(cg->hwaccel); i++) { -+ if (!cg->info.hwaccel[i]) -+ continue; -+ -+ cg->hwaccel[i] = create_one_hwaccel(cg, i); -+ } -+} -+ -+static void __init clockgen_init(struct device_node *np); -+ -+/* Legacy nodes may get probed before the parent clockgen node */ -+static void __init legacy_init_clockgen(struct device_node *np) -+{ -+ if (!clockgen.node) -+ clockgen_init(of_get_parent(np)); -+} -+ -+/* Legacy node */ -+static void __init core_mux_init(struct device_node *np) -+{ -+ struct clk *clk; -+ struct resource res; -+ int idx, rc; -+ -+ legacy_init_clockgen(np); -+ -+ if (of_address_to_resource(np, 0, &res)) -+ return; -+ -+ idx = (res.start & 0xf0) >> 5; -+ clk = clockgen.cmux[idx]; -+ -+ rc = of_clk_add_provider(np, of_clk_src_simple_get, clk); -+ if (rc) { -+ pr_err("%s: Couldn't register clk provider for node %s: %d\n", -+ __func__, np->name, rc); -+ return; -+ } -+} -+ -+static struct clk *sysclk_from_fixed(struct device_node *node, const char *name) -+{ -+ u32 rate; -+ -+ if (of_property_read_u32(node, "clock-frequency", &rate)) -+ return ERR_PTR(-ENODEV); -+ -+ return clk_register_fixed_rate(NULL, name, NULL, CLK_IS_ROOT, rate); -+} -+ -+static struct clk *sysclk_from_parent(const char *name) -+{ -+ struct clk *clk; -+ const char *parent_name; -+ -+ clk = of_clk_get(clockgen.node, 0); -+ if (IS_ERR(clk)) -+ return clk; -+ -+ /* Register the input clock under the desired name. */ -+ parent_name = __clk_get_name(clk); -+ clk = clk_register_fixed_factor(NULL, name, parent_name, -+ 0, 1, 1); -+ if (IS_ERR(clk)) -+ pr_err("%s: Couldn't register %s: %ld\n", __func__, name, -+ PTR_ERR(clk)); -+ -+ return clk; -+} -+ -+static struct clk * __init create_sysclk(const char *name) -+{ -+ struct device_node *sysclk; -+ struct clk *clk; -+ -+ clk = sysclk_from_fixed(clockgen.node, name); -+ if (!IS_ERR(clk)) -+ return clk; -+ -+ clk = sysclk_from_parent(name); -+ if (!IS_ERR(clk)) -+ return clk; -+ -+ sysclk = of_get_child_by_name(clockgen.node, "sysclk"); -+ if (sysclk) { -+ clk = sysclk_from_fixed(sysclk, name); -+ if (!IS_ERR(clk)) -+ return clk; -+ } -+ -+ pr_err("%s: No input clock\n", __func__); -+ return NULL; -+} -+ -+/* Legacy node */ -+static void __init sysclk_init(struct device_node *node) -+{ -+ struct clk *clk; -+ -+ legacy_init_clockgen(node); -+ -+ clk = clockgen.sysclk; -+ if (clk) -+ of_clk_add_provider(node, of_clk_src_simple_get, clk); -+} -+ -+#define PLL_KILL BIT(31) -+ -+static void __init create_one_pll(struct clockgen *cg, int idx) -+{ -+ u32 __iomem *reg; -+ u32 mult; -+ struct clockgen_pll *pll = &cg->pll[idx]; -+ int i; -+ -+ if (!(cg->info.pll_mask & (1 << idx))) -+ return; -+ -+ if (cg->info.flags & CG_VER3) { -+ switch (idx) { -+ case PLATFORM_PLL: -+ reg = cg->regs + 0x60080; -+ break; -+ case CGA_PLL1: -+ reg = cg->regs + 0x80; -+ break; -+ case CGA_PLL2: -+ reg = cg->regs + 0xa0; -+ break; -+ case CGB_PLL1: -+ reg = cg->regs + 0x10080; -+ break; -+ case CGB_PLL2: -+ reg = cg->regs + 0x100a0; -+ break; -+ default: -+ WARN_ONCE(1, "index %d\n", idx); -+ return; -+ } -+ } else { -+ if (idx == PLATFORM_PLL) -+ reg = cg->regs + 0xc00; -+ else -+ reg = cg->regs + 0x800 + 0x20 * (idx - 1); -+ } -+ -+ /* Get the multiple of PLL */ -+ mult = cg_in(cg, reg); -+ -+ /* Check if this PLL is disabled */ -+ if (mult & PLL_KILL) { -+ pr_debug("%s(): pll %p disabled\n", __func__, reg); -+ return; -+ } -+ -+ if ((cg->info.flags & CG_VER3) || -+ ((cg->info.flags & CG_PLL_8BIT) && idx != PLATFORM_PLL)) -+ mult = (mult & GENMASK(8, 1)) >> 1; -+ else -+ mult = (mult & GENMASK(6, 1)) >> 1; -+ -+ for (i = 0; i < ARRAY_SIZE(pll->div); i++) { -+ struct clk *clk; -+ -+ snprintf(pll->div[i].name, sizeof(pll->div[i].name), -+ "cg-pll%d-div%d", idx, i + 1); -+ -+ clk = clk_register_fixed_factor(NULL, -+ pll->div[i].name, "cg-sysclk", 0, mult, i + 1); -+ if (IS_ERR(clk)) { -+ pr_err("%s: %s: register failed %ld\n", -+ __func__, pll->div[i].name, PTR_ERR(clk)); -+ continue; -+ } -+ -+ pll->div[i].clk = clk; -+ } -+} -+ -+static void __init create_plls(struct clockgen *cg) -+{ -+ int i; -+ -+ for (i = 0; i < ARRAY_SIZE(cg->pll); i++) -+ create_one_pll(cg, i); -+} -+ -+static void __init legacy_pll_init(struct device_node *np, int idx) -+{ -+ struct clockgen_pll *pll; -+ struct clk_onecell_data *onecell_data; -+ struct clk **subclks; -+ int count, rc; -+ -+ legacy_init_clockgen(np); -+ -+ pll = &clockgen.pll[idx]; -+ count = of_property_count_strings(np, "clock-output-names"); -+ -+ BUILD_BUG_ON(ARRAY_SIZE(pll->div) < 4); -+ subclks = kcalloc(4, sizeof(struct clk *), GFP_KERNEL); -+ if (!subclks) -+ return; -+ -+ onecell_data = kmalloc(sizeof(*onecell_data), GFP_KERNEL); -+ if (!onecell_data) -+ goto err_clks; -+ -+ if (count <= 3) { -+ subclks[0] = pll->div[0].clk; -+ subclks[1] = pll->div[1].clk; -+ subclks[2] = pll->div[3].clk; -+ } else { -+ subclks[0] = pll->div[0].clk; -+ subclks[1] = pll->div[1].clk; -+ subclks[2] = pll->div[2].clk; -+ subclks[3] = pll->div[3].clk; -+ } -+ -+ onecell_data->clks = subclks; -+ onecell_data->clk_num = count; -+ -+ rc = of_clk_add_provider(np, of_clk_src_onecell_get, onecell_data); -+ if (rc) { -+ pr_err("%s: Couldn't register clk provider for node %s: %d\n", -+ __func__, np->name, rc); -+ goto err_cell; -+ } -+ -+ return; -+err_cell: -+ kfree(onecell_data); -+err_clks: -+ kfree(subclks); -+} -+ -+/* Legacy node */ -+static void __init pltfrm_pll_init(struct device_node *np) -+{ -+ legacy_pll_init(np, PLATFORM_PLL); -+} -+ -+/* Legacy node */ -+static void __init core_pll_init(struct device_node *np) -+{ -+ struct resource res; -+ int idx; -+ -+ if (of_address_to_resource(np, 0, &res)) -+ return; -+ -+ if ((res.start & 0xfff) == 0xc00) { -+ /* -+ * ls1021a devtree labels the platform PLL -+ * with the core PLL compatible -+ */ -+ pltfrm_pll_init(np); -+ } else { -+ idx = (res.start & 0xf0) >> 5; -+ legacy_pll_init(np, CGA_PLL1 + idx); -+ } -+} -+ -+static struct clk *clockgen_clk_get(struct of_phandle_args *clkspec, void *data) -+{ -+ struct clockgen *cg = data; -+ struct clk *clk; -+ struct clockgen_pll *pll; -+ u32 type, idx; -+ -+ if (clkspec->args_count < 2) { -+ pr_err("%s: insufficient phandle args\n", __func__); -+ return ERR_PTR(-EINVAL); -+ } -+ -+ type = clkspec->args[0]; -+ idx = clkspec->args[1]; -+ -+ switch (type) { -+ case 0: -+ if (idx != 0) -+ goto bad_args; -+ clk = cg->sysclk; -+ break; -+ case 1: -+ if (idx >= ARRAY_SIZE(cg->cmux)) -+ goto bad_args; -+ clk = cg->cmux[idx]; -+ break; -+ case 2: -+ if (idx >= ARRAY_SIZE(cg->hwaccel)) -+ goto bad_args; -+ clk = cg->hwaccel[idx]; -+ break; -+ case 3: -+ if (idx >= ARRAY_SIZE(cg->fman)) -+ goto bad_args; -+ clk = cg->fman[idx]; -+ break; -+ case 4: -+ pll = &cg->pll[PLATFORM_PLL]; -+ if (idx >= ARRAY_SIZE(pll->div)) -+ goto bad_args; -+ clk = pll->div[idx].clk; -+ break; -+ default: -+ goto bad_args; -+ } -+ -+ if (!clk) -+ return ERR_PTR(-ENOENT); -+ return clk; -+ -+bad_args: -+ pr_err("%s: Bad phandle args %u %u\n", __func__, type, idx); -+ return ERR_PTR(-EINVAL); -+} -+ -+#ifdef CONFIG_PPC -+ -+static const u32 a4510_svrs[] __initconst = { -+ (SVR_P2040 << 8) | 0x10, /* P2040 1.0 */ -+ (SVR_P2040 << 8) | 0x11, /* P2040 1.1 */ -+ (SVR_P2041 << 8) | 0x10, /* P2041 1.0 */ -+ (SVR_P2041 << 8) | 0x11, /* P2041 1.1 */ -+ (SVR_P3041 << 8) | 0x10, /* P3041 1.0 */ -+ (SVR_P3041 << 8) | 0x11, /* P3041 1.1 */ -+ (SVR_P4040 << 8) | 0x20, /* P4040 2.0 */ -+ (SVR_P4080 << 8) | 0x20, /* P4080 2.0 */ -+ (SVR_P5010 << 8) | 0x10, /* P5010 1.0 */ -+ (SVR_P5010 << 8) | 0x20, /* P5010 2.0 */ -+ (SVR_P5020 << 8) | 0x10, /* P5020 1.0 */ -+ (SVR_P5021 << 8) | 0x10, /* P5021 1.0 */ -+ (SVR_P5040 << 8) | 0x10, /* P5040 1.0 */ -+}; -+ -+#define SVR_SECURITY 0x80000 /* The Security (E) bit */ -+ -+static bool __init has_erratum_a4510(void) -+{ -+ u32 svr = mfspr(SPRN_SVR); -+ int i; -+ -+ svr &= ~SVR_SECURITY; -+ -+ for (i = 0; i < ARRAY_SIZE(a4510_svrs); i++) { -+ if (svr == a4510_svrs[i]) -+ return true; -+ } -+ -+ return false; -+} -+#else -+static bool __init has_erratum_a4510(void) -+{ -+ return false; -+} -+#endif -+ -+static void __init clockgen_init(struct device_node *np) -+{ -+ int i, ret; -+ bool is_old_ls1021a = false; -+ -+ /* May have already been called by a legacy probe */ -+ if (clockgen.node) -+ return; -+ -+ clockgen.node = np; -+ clockgen.regs = of_iomap(np, 0); -+ if (!clockgen.regs && -+ of_device_is_compatible(of_root, "fsl,ls1021a")) { -+ /* Compatibility hack for old, broken device trees */ -+ clockgen.regs = ioremap(0x1ee1000, 0x1000); -+ is_old_ls1021a = true; -+ } -+ if (!clockgen.regs) { -+ pr_err("%s(): %s: of_iomap() failed\n", __func__, np->name); -+ return; -+ } -+ -+ for (i = 0; i < ARRAY_SIZE(chipinfo); i++) { -+ if (of_device_is_compatible(np, chipinfo[i].compat)) -+ break; -+ if (is_old_ls1021a && -+ !strcmp(chipinfo[i].compat, "fsl,ls1021a-clockgen")) -+ break; -+ } -+ -+ if (i == ARRAY_SIZE(chipinfo)) { -+ pr_err("%s: unknown clockgen node %s\n", __func__, -+ np->full_name); -+ goto err; -+ } -+ clockgen.info = chipinfo[i]; -+ -+ if (clockgen.info.guts_compat) { -+ struct device_node *guts; -+ -+ guts = of_find_compatible_node(NULL, NULL, -+ clockgen.info.guts_compat); -+ if (guts) { -+ clockgen.guts = of_iomap(guts, 0); -+ if (!clockgen.guts) { -+ pr_err("%s: Couldn't map %s regs\n", __func__, -+ guts->full_name); -+ } -+ } -+ -+ } -+ -+ if (has_erratum_a4510()) -+ clockgen.info.flags |= CG_CMUX_GE_PLAT; -+ -+ clockgen.sysclk = create_sysclk("cg-sysclk"); -+ create_plls(&clockgen); -+ create_muxes(&clockgen); -+ -+ if (clockgen.info.init_periph) -+ clockgen.info.init_periph(&clockgen); -+ -+ ret = of_clk_add_provider(np, clockgen_clk_get, &clockgen); -+ if (ret) { -+ pr_err("%s: Couldn't register clk provider for node %s: %d\n", -+ __func__, np->name, ret); -+ } -+ -+ return; -+err: -+ iounmap(clockgen.regs); -+ clockgen.regs = NULL; -+} -+ -+CLK_OF_DECLARE(qoriq_clockgen_1, "fsl,qoriq-clockgen-1.0", clockgen_init); -+CLK_OF_DECLARE(qoriq_clockgen_2, "fsl,qoriq-clockgen-2.0", clockgen_init); -+CLK_OF_DECLARE(qoriq_clockgen_ls1021a, "fsl,ls1021a-clockgen", clockgen_init); -+CLK_OF_DECLARE(qoriq_clockgen_ls2080a, "fsl,ls2080a-clockgen", clockgen_init); -+CLK_OF_DECLARE(qoriq_clockgen_ls2088a, "fsl,ls2088a-clockgen", clockgen_init); -+ -+/* Legacy nodes */ -+CLK_OF_DECLARE(qoriq_sysclk_1, "fsl,qoriq-sysclk-1.0", sysclk_init); -+CLK_OF_DECLARE(qoriq_sysclk_2, "fsl,qoriq-sysclk-2.0", sysclk_init); -+CLK_OF_DECLARE(qoriq_core_pll_1, "fsl,qoriq-core-pll-1.0", core_pll_init); -+CLK_OF_DECLARE(qoriq_core_pll_2, "fsl,qoriq-core-pll-2.0", core_pll_init); -+CLK_OF_DECLARE(qoriq_core_mux_1, "fsl,qoriq-core-mux-1.0", core_mux_init); -+CLK_OF_DECLARE(qoriq_core_mux_2, "fsl,qoriq-core-mux-2.0", core_mux_init); -+CLK_OF_DECLARE(qoriq_pltfrm_pll_1, "fsl,qoriq-platform-pll-1.0", pltfrm_pll_init); -+CLK_OF_DECLARE(qoriq_pltfrm_pll_2, "fsl,qoriq-platform-pll-2.0", pltfrm_pll_init); -diff --git a/drivers/cpufreq/Kconfig.powerpc b/drivers/cpufreq/Kconfig.powerpc -index 72564b7..7ea2441 100644 ---- a/drivers/cpufreq/Kconfig.powerpc -+++ b/drivers/cpufreq/Kconfig.powerpc -@@ -26,7 +26,7 @@ config CPU_FREQ_MAPLE - config PPC_CORENET_CPUFREQ - tristate "CPU frequency scaling driver for Freescale E500MC SoCs" - depends on PPC_E500MC && OF && COMMON_CLK -- select CLK_PPC_CORENET -+ select CLK_QORIQ - help - This adds the CPUFreq driver support for Freescale e500mc, - e5500 and e6500 series SoCs which are capable of changing -diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig -index 06e99eb..bbf8ae4 100644 ---- a/drivers/i2c/busses/Kconfig -+++ b/drivers/i2c/busses/Kconfig -@@ -526,10 +526,10 @@ config I2C_IBM_IIC - - config I2C_IMX - tristate "IMX I2C interface" -- depends on ARCH_MXC -+ depends on ARCH_MXC || ARCH_LAYERSCAPE - help - Say Y here if you want to use the IIC bus controller on -- the Freescale i.MX/MXC processors. -+ the Freescale i.MX/MXC and layerscape processors. - - This driver can also be built as a module. If so, the module - will be called i2c-imx. -diff --git a/drivers/i2c/busses/i2c-imx.c b/drivers/i2c/busses/i2c-imx.c -index e9fb7cf..13f88f9 100644 ---- a/drivers/i2c/busses/i2c-imx.c -+++ b/drivers/i2c/busses/i2c-imx.c -@@ -33,6 +33,10 @@ - *******************************************************************************/ - - #include -+#include -+#include -+#include -+#include - #include - #include - #include -@@ -47,6 +51,7 @@ - #include - #include - #include -+#include - #include - - /** Defines ******************************************************************** -@@ -58,6 +63,15 @@ - /* Default value */ - #define IMX_I2C_BIT_RATE 100000 /* 100kHz */ - -+/* -+ * Enable DMA if transfer byte size is bigger than this threshold. -+ * As the hardware request, it must bigger than 4 bytes.\ -+ * I have set '16' here, maybe it's not the best but I think it's -+ * the appropriate. -+ */ -+#define DMA_THRESHOLD 16 -+#define DMA_TIMEOUT 1000 -+ - /* IMX I2C registers: - * the I2C register offset is different between SoCs, - * to provid support for all these chips, split the -@@ -83,6 +97,7 @@ - #define I2SR_IBB 0x20 - #define I2SR_IAAS 0x40 - #define I2SR_ICF 0x80 -+#define I2CR_DMAEN 0x02 - #define I2CR_RSTA 0x04 - #define I2CR_TXAK 0x08 - #define I2CR_MTX 0x10 -@@ -169,6 +184,17 @@ struct imx_i2c_hwdata { - unsigned i2cr_ien_opcode; - }; - -+struct imx_i2c_dma { -+ struct dma_chan *chan_tx; -+ struct dma_chan *chan_rx; -+ struct dma_chan *chan_using; -+ struct completion cmd_complete; -+ dma_addr_t dma_buf; -+ unsigned int dma_len; -+ enum dma_transfer_direction dma_transfer_dir; -+ enum dma_data_direction dma_data_dir; -+}; -+ - struct imx_i2c_struct { - struct i2c_adapter adapter; - struct clk *clk; -@@ -181,6 +207,8 @@ struct imx_i2c_struct { - unsigned int cur_clk; - unsigned int bitrate; - const struct imx_i2c_hwdata *hwdata; -+ -+ struct imx_i2c_dma *dma; - }; - - static const struct imx_i2c_hwdata imx1_i2c_hwdata = { -@@ -251,6 +279,162 @@ static inline unsigned char imx_i2c_read_reg(struct imx_i2c_struct *i2c_imx, - return readb(i2c_imx->base + (reg << i2c_imx->hwdata->regshift)); - } - -+/* Functions for DMA support */ -+static void i2c_imx_dma_request(struct imx_i2c_struct *i2c_imx, -+ dma_addr_t phy_addr) -+{ -+ struct imx_i2c_dma *dma; -+ struct dma_slave_config dma_sconfig; -+ struct device *dev = &i2c_imx->adapter.dev; -+ int ret; -+ -+ dma = devm_kzalloc(dev, sizeof(*dma), GFP_KERNEL); -+ if (!dma) -+ return; -+ -+ dma->chan_tx = dma_request_slave_channel(dev, "tx"); -+ if (!dma->chan_tx) { -+ dev_dbg(dev, "can't request DMA tx channel\n"); -+ goto fail_al; -+ } -+ -+ dma_sconfig.dst_addr = phy_addr + -+ (IMX_I2C_I2DR << i2c_imx->hwdata->regshift); -+ dma_sconfig.dst_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; -+ dma_sconfig.dst_maxburst = 1; -+ dma_sconfig.direction = DMA_MEM_TO_DEV; -+ ret = dmaengine_slave_config(dma->chan_tx, &dma_sconfig); -+ if (ret < 0) { -+ dev_dbg(dev, "can't configure tx channel\n"); -+ goto fail_tx; -+ } -+ -+ dma->chan_rx = dma_request_slave_channel(dev, "rx"); -+ if (!dma->chan_rx) { -+ dev_dbg(dev, "can't request DMA rx channel\n"); -+ goto fail_tx; -+ } -+ -+ dma_sconfig.src_addr = phy_addr + -+ (IMX_I2C_I2DR << i2c_imx->hwdata->regshift); -+ dma_sconfig.src_addr_width = DMA_SLAVE_BUSWIDTH_1_BYTE; -+ dma_sconfig.src_maxburst = 1; -+ dma_sconfig.direction = DMA_DEV_TO_MEM; -+ ret = dmaengine_slave_config(dma->chan_rx, &dma_sconfig); -+ if (ret < 0) { -+ dev_dbg(dev, "can't configure rx channel\n"); -+ goto fail_rx; -+ } -+ -+ i2c_imx->dma = dma; -+ init_completion(&dma->cmd_complete); -+ dev_info(dev, "using %s (tx) and %s (rx) for DMA transfers\n", -+ dma_chan_name(dma->chan_tx), dma_chan_name(dma->chan_rx)); -+ -+ return; -+ -+fail_rx: -+ dma_release_channel(dma->chan_rx); -+fail_tx: -+ dma_release_channel(dma->chan_tx); -+fail_al: -+ devm_kfree(dev, dma); -+ dev_info(dev, "can't use DMA\n"); -+} -+ -+static void i2c_imx_dma_callback(void *arg) -+{ -+ struct imx_i2c_struct *i2c_imx = (struct imx_i2c_struct *)arg; -+ struct imx_i2c_dma *dma = i2c_imx->dma; -+ -+ dma_unmap_single(dma->chan_using->device->dev, dma->dma_buf, -+ dma->dma_len, dma->dma_data_dir); -+ complete(&dma->cmd_complete); -+} -+ -+static int i2c_imx_dma_xfer(struct imx_i2c_struct *i2c_imx, -+ struct i2c_msg *msgs) -+{ -+ struct imx_i2c_dma *dma = i2c_imx->dma; -+ struct dma_async_tx_descriptor *txdesc; -+ struct device *dev = &i2c_imx->adapter.dev; -+ struct device *chan_dev = dma->chan_using->device->dev; -+ -+ dma->dma_buf = dma_map_single(chan_dev, msgs->buf, -+ dma->dma_len, dma->dma_data_dir); -+ if (dma_mapping_error(chan_dev, dma->dma_buf)) { -+ dev_err(dev, "DMA mapping failed\n"); -+ goto err_map; -+ } -+ -+ txdesc = dmaengine_prep_slave_single(dma->chan_using, dma->dma_buf, -+ dma->dma_len, dma->dma_transfer_dir, -+ DMA_PREP_INTERRUPT | DMA_CTRL_ACK); -+ if (!txdesc) { -+ dev_err(dev, "Not able to get desc for DMA xfer\n"); -+ goto err_desc; -+ } -+ -+ txdesc->callback = i2c_imx_dma_callback; -+ txdesc->callback_param = i2c_imx; -+ if (dma_submit_error(dmaengine_submit(txdesc))) { -+ dev_err(dev, "DMA submit failed\n"); -+ goto err_submit; -+ } -+ -+ dma_async_issue_pending(dma->chan_using); -+ return 0; -+ -+err_submit: -+err_desc: -+ dma_unmap_single(chan_dev, dma->dma_buf, -+ dma->dma_len, dma->dma_data_dir); -+err_map: -+ return -EINVAL; -+} -+ -+static void i2c_imx_dma_free(struct imx_i2c_struct *i2c_imx) -+{ -+ struct imx_i2c_dma *dma = i2c_imx->dma; -+ -+ dma->dma_buf = 0; -+ dma->dma_len = 0; -+ -+ dma_release_channel(dma->chan_tx); -+ dma->chan_tx = NULL; -+ -+ dma_release_channel(dma->chan_rx); -+ dma->chan_rx = NULL; -+ -+ dma->chan_using = NULL; -+} -+ -+/* -+ * When a system reset does not cause all I2C devices to be reset, it is -+ * sometimes necessary to force the I2C module to become the I2C bus master -+ * out of reset and drive SCL A slave can hold bus low to cause bus hang. -+ * Thus, SDA can be driven low by another I2C device while this I2C module -+ * is coming out of reset and will stay low indefinitely. -+ * The I2C master has to generate 9 clock pulses to get the bus free or idle. -+ */ -+static void imx_i2c_fixup(struct imx_i2c_struct *i2c_imx) -+{ -+ int k; -+ u32 delay_val = 1000000 / i2c_imx->cur_clk + 1; -+ -+ if (delay_val < 2) -+ delay_val = 2; -+ -+ for (k = 9; k; k--) { -+ imx_i2c_write_reg(I2CR_IEN, i2c_imx, IMX_I2C_I2CR); -+ imx_i2c_write_reg((I2CR_MSTA | I2CR_MTX) & (~I2CR_IEN), -+ i2c_imx, IMX_I2C_I2CR); -+ imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); -+ imx_i2c_write_reg(0, i2c_imx, IMX_I2C_I2CR); -+ udelay(delay_val << 1); -+ } -+} -+ - /** Functions for IMX I2C adapter driver *************************************** - *******************************************************************************/ - -@@ -276,8 +460,15 @@ static int i2c_imx_bus_busy(struct imx_i2c_struct *i2c_imx, int for_busy) - if (!for_busy && !(temp & I2SR_IBB)) - break; - if (time_after(jiffies, orig_jiffies + msecs_to_jiffies(500))) { -+ u8 status = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR); -+ - dev_dbg(&i2c_imx->adapter.dev, - "<%s> I2C bus is busy\n", __func__); -+ if ((status & (I2SR_ICF | I2SR_IBB | I2CR_TXAK)) != 0) { -+ imx_i2c_write_reg(status & ~I2SR_IAL, i2c_imx, -+ IMX_I2C_I2CR); -+ imx_i2c_fixup(i2c_imx); -+ } - return -ETIMEDOUT; - } - schedule(); -@@ -382,6 +573,7 @@ static int i2c_imx_start(struct imx_i2c_struct *i2c_imx) - i2c_imx->stopped = 0; - - temp |= I2CR_IIEN | I2CR_MTX | I2CR_TXAK; -+ temp &= ~I2CR_DMAEN; - imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); - return result; - } -@@ -395,6 +587,8 @@ static void i2c_imx_stop(struct imx_i2c_struct *i2c_imx) - dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__); - temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); - temp &= ~(I2CR_MSTA | I2CR_MTX); -+ if (i2c_imx->dma) -+ temp &= ~I2CR_DMAEN; - imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); - } - if (is_imx1_i2c(i2c_imx)) { -@@ -435,6 +629,157 @@ static irqreturn_t i2c_imx_isr(int irq, void *dev_id) - return IRQ_NONE; - } - -+static int i2c_imx_dma_write(struct imx_i2c_struct *i2c_imx, -+ struct i2c_msg *msgs) -+{ -+ int result; -+ unsigned long time_left; -+ unsigned int temp = 0; -+ unsigned long orig_jiffies = jiffies; -+ struct imx_i2c_dma *dma = i2c_imx->dma; -+ struct device *dev = &i2c_imx->adapter.dev; -+ -+ dma->chan_using = dma->chan_tx; -+ dma->dma_transfer_dir = DMA_MEM_TO_DEV; -+ dma->dma_data_dir = DMA_TO_DEVICE; -+ dma->dma_len = msgs->len - 1; -+ result = i2c_imx_dma_xfer(i2c_imx, msgs); -+ if (result) -+ return result; -+ -+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); -+ temp |= I2CR_DMAEN; -+ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); -+ -+ /* -+ * Write slave address. -+ * The first byte must be transmitted by the CPU. -+ */ -+ imx_i2c_write_reg(msgs->addr << 1, i2c_imx, IMX_I2C_I2DR); -+ reinit_completion(&i2c_imx->dma->cmd_complete); -+ time_left = wait_for_completion_timeout( -+ &i2c_imx->dma->cmd_complete, -+ msecs_to_jiffies(DMA_TIMEOUT)); -+ if (time_left == 0) { -+ dmaengine_terminate_all(dma->chan_using); -+ return -ETIMEDOUT; -+ } -+ -+ /* Waiting for transfer complete. */ -+ while (1) { -+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR); -+ if (temp & I2SR_ICF) -+ break; -+ if (time_after(jiffies, orig_jiffies + -+ msecs_to_jiffies(DMA_TIMEOUT))) { -+ dev_dbg(dev, "<%s> Timeout\n", __func__); -+ return -ETIMEDOUT; -+ } -+ schedule(); -+ } -+ -+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); -+ temp &= ~I2CR_DMAEN; -+ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); -+ -+ /* The last data byte must be transferred by the CPU. */ -+ imx_i2c_write_reg(msgs->buf[msgs->len-1], -+ i2c_imx, IMX_I2C_I2DR); -+ result = i2c_imx_trx_complete(i2c_imx); -+ if (result) -+ return result; -+ -+ return i2c_imx_acked(i2c_imx); -+} -+ -+static int i2c_imx_dma_read(struct imx_i2c_struct *i2c_imx, -+ struct i2c_msg *msgs, bool is_lastmsg) -+{ -+ int result; -+ unsigned long time_left; -+ unsigned int temp; -+ unsigned long orig_jiffies = jiffies; -+ struct imx_i2c_dma *dma = i2c_imx->dma; -+ struct device *dev = &i2c_imx->adapter.dev; -+ -+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); -+ temp |= I2CR_DMAEN; -+ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); -+ -+ dma->chan_using = dma->chan_rx; -+ dma->dma_transfer_dir = DMA_DEV_TO_MEM; -+ dma->dma_data_dir = DMA_FROM_DEVICE; -+ /* The last two data bytes must be transferred by the CPU. */ -+ dma->dma_len = msgs->len - 2; -+ result = i2c_imx_dma_xfer(i2c_imx, msgs); -+ if (result) -+ return result; -+ -+ reinit_completion(&i2c_imx->dma->cmd_complete); -+ time_left = wait_for_completion_timeout( -+ &i2c_imx->dma->cmd_complete, -+ msecs_to_jiffies(DMA_TIMEOUT)); -+ if (time_left == 0) { -+ dmaengine_terminate_all(dma->chan_using); -+ return -ETIMEDOUT; -+ } -+ -+ /* waiting for transfer complete. */ -+ while (1) { -+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2SR); -+ if (temp & I2SR_ICF) -+ break; -+ if (time_after(jiffies, orig_jiffies + -+ msecs_to_jiffies(DMA_TIMEOUT))) { -+ dev_dbg(dev, "<%s> Timeout\n", __func__); -+ return -ETIMEDOUT; -+ } -+ schedule(); -+ } -+ -+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); -+ temp &= ~I2CR_DMAEN; -+ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); -+ -+ /* read n-1 byte data */ -+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); -+ temp |= I2CR_TXAK; -+ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); -+ -+ msgs->buf[msgs->len-2] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); -+ /* read n byte data */ -+ result = i2c_imx_trx_complete(i2c_imx); -+ if (result) -+ return result; -+ -+ if (is_lastmsg) { -+ /* -+ * It must generate STOP before read I2DR to prevent -+ * controller from generating another clock cycle -+ */ -+ dev_dbg(dev, "<%s> clear MSTA\n", __func__); -+ temp = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2CR); -+ temp &= ~(I2CR_MSTA | I2CR_MTX); -+ imx_i2c_write_reg(temp, i2c_imx, IMX_I2C_I2CR); -+ i2c_imx_bus_busy(i2c_imx, 0); -+ i2c_imx->stopped = 1; -+ } else { -+ /* -+ * For i2c master receiver repeat restart operation like: -+ * read -> repeat MSTA -> read/write -+ * The controller must set MTX before read the last byte in -+ * the first read operation, otherwise the first read cost -+ * one extra clock cycle. -+ */ -+ temp = readb(i2c_imx->base + IMX_I2C_I2CR); -+ temp |= I2CR_MTX; -+ writeb(temp, i2c_imx->base + IMX_I2C_I2CR); -+ } -+ msgs->buf[msgs->len-1] = imx_i2c_read_reg(i2c_imx, IMX_I2C_I2DR); -+ -+ return 0; -+} -+ - static int i2c_imx_write(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs) - { - int i, result; -@@ -504,6 +849,9 @@ static int i2c_imx_read(struct imx_i2c_struct *i2c_imx, struct i2c_msg *msgs, bo - - dev_dbg(&i2c_imx->adapter.dev, "<%s> read data\n", __func__); - -+ if (i2c_imx->dma && msgs->len >= DMA_THRESHOLD && !block_data) -+ return i2c_imx_dma_read(i2c_imx, msgs, is_lastmsg); -+ - /* read data */ - for (i = 0; i < msgs->len; i++) { - u8 len = 0; -@@ -577,6 +925,13 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter, - - dev_dbg(&i2c_imx->adapter.dev, "<%s>\n", __func__); - -+ /* workround for ERR010027: ensure that the I2C BUS is idle -+ before switching to master mode and attempting a Start cycle -+ */ -+ result = i2c_imx_bus_busy(i2c_imx, 0); -+ if (result) -+ goto fail0; -+ - /* Start I2C transfer */ - result = i2c_imx_start(i2c_imx); - if (result) -@@ -618,8 +973,12 @@ static int i2c_imx_xfer(struct i2c_adapter *adapter, - #endif - if (msgs[i].flags & I2C_M_RD) - result = i2c_imx_read(i2c_imx, &msgs[i], is_lastmsg); -- else -- result = i2c_imx_write(i2c_imx, &msgs[i]); -+ else { -+ if (i2c_imx->dma && msgs[i].len >= DMA_THRESHOLD) -+ result = i2c_imx_dma_write(i2c_imx, &msgs[i]); -+ else -+ result = i2c_imx_write(i2c_imx, &msgs[i]); -+ } - if (result) - goto fail0; - } -@@ -654,6 +1013,7 @@ static int i2c_imx_probe(struct platform_device *pdev) - struct imxi2c_platform_data *pdata = dev_get_platdata(&pdev->dev); - void __iomem *base; - int irq, ret; -+ dma_addr_t phy_addr; - - dev_dbg(&pdev->dev, "<%s>\n", __func__); - -@@ -668,6 +1028,7 @@ static int i2c_imx_probe(struct platform_device *pdev) - if (IS_ERR(base)) - return PTR_ERR(base); - -+ phy_addr = (dma_addr_t)res->start; - i2c_imx = devm_kzalloc(&pdev->dev, sizeof(struct imx_i2c_struct), - GFP_KERNEL); - if (!i2c_imx) -@@ -701,7 +1062,7 @@ static int i2c_imx_probe(struct platform_device *pdev) - return ret; - } - /* Request IRQ */ -- ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, 0, -+ ret = devm_request_irq(&pdev->dev, irq, i2c_imx_isr, IRQF_SHARED, - pdev->name, i2c_imx); - if (ret) { - dev_err(&pdev->dev, "can't claim irq %d\n", irq); -@@ -743,6 +1104,9 @@ static int i2c_imx_probe(struct platform_device *pdev) - i2c_imx->adapter.name); - dev_info(&i2c_imx->adapter.dev, "IMX I2C adapter registered\n"); - -+ /* Init DMA config if support*/ -+ i2c_imx_dma_request(i2c_imx, phy_addr); -+ - return 0; /* Return OK */ - - clk_disable: -@@ -758,6 +1122,9 @@ static int i2c_imx_remove(struct platform_device *pdev) - dev_dbg(&i2c_imx->adapter.dev, "adapter removed\n"); - i2c_del_adapter(&i2c_imx->adapter); - -+ if (i2c_imx->dma) -+ i2c_imx_dma_free(i2c_imx); -+ - /* setup chip registers to defaults */ - imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IADR); - imx_i2c_write_reg(0, i2c_imx, IMX_I2C_IFDR); -diff --git a/drivers/i2c/muxes/i2c-mux-pca9541.c b/drivers/i2c/muxes/i2c-mux-pca9541.c -index cb77277..0c8d4d2 100644 ---- a/drivers/i2c/muxes/i2c-mux-pca9541.c -+++ b/drivers/i2c/muxes/i2c-mux-pca9541.c -@@ -104,7 +104,7 @@ static int pca9541_reg_write(struct i2c_client *client, u8 command, u8 val) - buf[0] = command; - buf[1] = val; - msg.buf = buf; -- ret = adap->algo->master_xfer(adap, &msg, 1); -+ ret = __i2c_transfer(adap, &msg, 1); - } else { - union i2c_smbus_data data; - -@@ -144,7 +144,7 @@ static int pca9541_reg_read(struct i2c_client *client, u8 command) - .buf = &val - } - }; -- ret = adap->algo->master_xfer(adap, msg, 2); -+ ret = __i2c_transfer(adap, msg, 2); - if (ret == 2) - ret = val; - else if (ret >= 0) -diff --git a/drivers/i2c/muxes/i2c-mux-pca954x.c b/drivers/i2c/muxes/i2c-mux-pca954x.c -index ec11b40..28540a4 100644 ---- a/drivers/i2c/muxes/i2c-mux-pca954x.c -+++ b/drivers/i2c/muxes/i2c-mux-pca954x.c -@@ -41,6 +41,7 @@ - #include - #include - #include -+#include - #include - #include - -@@ -62,6 +63,7 @@ struct pca954x { - struct i2c_adapter *virt_adaps[PCA954X_MAX_NCHANS]; - - u8 last_chan; /* last register value */ -+ u8 disable_mux; /* do not disable mux if val not 0 */ - }; - - struct chip_desc { -@@ -133,7 +135,7 @@ static int pca954x_reg_write(struct i2c_adapter *adap, - msg.len = 1; - buf[0] = val; - msg.buf = buf; -- ret = adap->algo->master_xfer(adap, &msg, 1); -+ ret = __i2c_transfer(adap, &msg, 1); - } else { - union i2c_smbus_data data; - ret = adap->algo->smbus_xfer(adap, client->addr, -@@ -173,6 +175,13 @@ static int pca954x_deselect_mux(struct i2c_adapter *adap, - { - struct pca954x *data = i2c_get_clientdata(client); - -+#ifdef CONFIG_ARCH_LAYERSCAPE -+ if (data->disable_mux != 0) -+ data->last_chan = chips[data->type].nchans; -+ else -+ data->last_chan = 0; -+ return pca954x_reg_write(adap, client, data->disable_mux); -+#endif - /* Deselect active channel */ - data->last_chan = 0; - return pca954x_reg_write(adap, client, data->last_chan); -@@ -186,6 +195,8 @@ static int pca954x_probe(struct i2c_client *client, - { - struct i2c_adapter *adap = to_i2c_adapter(client->dev.parent); - struct pca954x_platform_data *pdata = dev_get_platdata(&client->dev); -+ struct device_node *of_node = client->dev.of_node; -+ bool idle_disconnect_dt; - struct gpio_desc *gpio; - int num, force, class; - struct pca954x *data; -@@ -198,27 +209,55 @@ static int pca954x_probe(struct i2c_client *client, - if (!data) - return -ENOMEM; - -+#ifdef CONFIG_ARCH_LAYERSCAPE -+ /* The point here is that you must not disable a mux if there -+ * are no pullups on the input or you mess up the I2C. This -+ * needs to be put into the DTS really as the kernel cannot -+ * know this otherwise. -+ */ -+ data->type = id->driver_data; -+ data->disable_mux = of_node && -+ of_property_read_bool(of_node, "i2c-mux-never-disable") && -+ chips[data->type].muxtype == pca954x_ismux ? -+ chips[data->type].enable : 0; -+ /* force the first selection */ -+ if (data->disable_mux != 0) -+ data->last_chan = chips[data->type].nchans; -+ else -+ data->last_chan = 0; -+#endif - i2c_set_clientdata(client, data); - - /* Get the mux out of reset if a reset GPIO is specified. */ -- gpio = devm_gpiod_get(&client->dev, "reset"); -- if (!IS_ERR(gpio)) -- gpiod_direction_output(gpio, 0); -+ gpio = devm_gpiod_get_optional(&client->dev, "reset", GPIOD_OUT_LOW); -+ if (IS_ERR(gpio)) -+ return PTR_ERR(gpio); - - /* Write the mux register at addr to verify - * that the mux is in fact present. This also - * initializes the mux to disconnected state. - */ -+#ifdef CONFIG_ARCH_LAYERSCAPE -+ if (i2c_smbus_write_byte(client, data->disable_mux) < 0) { -+#else - if (i2c_smbus_write_byte(client, 0) < 0) { -+#endif - dev_warn(&client->dev, "probe failed\n"); - return -ENODEV; - } - -+#ifndef CONFIG_ARCH_LAYERSCAPE - data->type = id->driver_data; - data->last_chan = 0; /* force the first selection */ -+#endif -+ -+ idle_disconnect_dt = of_node && -+ of_property_read_bool(of_node, "i2c-mux-idle-disconnect"); - - /* Now create an adapter for each channel */ - for (num = 0; num < chips[data->type].nchans; num++) { -+ bool idle_disconnect_pd = false; -+ - force = 0; /* dynamic adap number */ - class = 0; /* no class by default */ - if (pdata) { -@@ -229,12 +268,13 @@ static int pca954x_probe(struct i2c_client *client, - } else - /* discard unconfigured channels */ - break; -+ idle_disconnect_pd = pdata->modes[num].deselect_on_exit; - } - - data->virt_adaps[num] = - i2c_add_mux_adapter(adap, &client->dev, client, - force, num, class, pca954x_select_chan, -- (pdata && pdata->modes[num].deselect_on_exit) -+ (idle_disconnect_pd || idle_disconnect_dt) - ? pca954x_deselect_mux : NULL); - - if (data->virt_adaps[num] == NULL) { -@@ -280,6 +320,13 @@ static int pca954x_resume(struct device *dev) - struct i2c_client *client = to_i2c_client(dev); - struct pca954x *data = i2c_get_clientdata(client); - -+#ifdef CONFIG_ARCH_LAYERSCAPE -+ if (data->disable_mux != 0) -+ data->last_chan = chips[data->type].nchans; -+ else -+ data->last_chan = 0; -+ return i2c_smbus_write_byte(client, data->disable_mux); -+#endif - data->last_chan = 0; - return i2c_smbus_write_byte(client, 0); - } -diff --git a/drivers/iommu/fsl_pamu.c b/drivers/iommu/fsl_pamu.c -index 80ac68d..9396c85 100644 ---- a/drivers/iommu/fsl_pamu.c -+++ b/drivers/iommu/fsl_pamu.c -@@ -31,7 +31,7 @@ - #include - #include - #include --#include -+#include - - #include "fsl_pamu.h" - -diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c -index 5a500ed..fd6dd22 100644 ---- a/drivers/iommu/io-pgtable-arm.c -+++ b/drivers/iommu/io-pgtable-arm.c -@@ -56,7 +56,8 @@ - ((((d)->levels - ((l) - ARM_LPAE_START_LVL(d) + 1)) \ - * (d)->bits_per_level) + (d)->pg_shift) - --#define ARM_LPAE_PAGES_PER_PGD(d) ((d)->pgd_size >> (d)->pg_shift) -+#define ARM_LPAE_PAGES_PER_PGD(d) \ -+ DIV_ROUND_UP((d)->pgd_size, 1UL << (d)->pg_shift) - - /* - * Calculate the index at level l used to map virtual address a using the -@@ -66,7 +67,7 @@ - ((l) == ARM_LPAE_START_LVL(d) ? ilog2(ARM_LPAE_PAGES_PER_PGD(d)) : 0) - - #define ARM_LPAE_LVL_IDX(a,l,d) \ -- (((a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ -+ (((u64)(a) >> ARM_LPAE_LVL_SHIFT(l,d)) & \ - ((1 << ((d)->bits_per_level + ARM_LPAE_PGD_IDX(l,d))) - 1)) - - /* Calculate the block/page mapping size at level l for pagetable in d. */ -@@ -115,6 +116,8 @@ - #define ARM_32_LPAE_TCR_EAE (1 << 31) - #define ARM_64_LPAE_S2_TCR_RES1 (1 << 31) - -+#define ARM_LPAE_TCR_EPD1 (1 << 23) -+ - #define ARM_LPAE_TCR_TG0_4K (0 << 14) - #define ARM_LPAE_TCR_TG0_64K (1 << 14) - #define ARM_LPAE_TCR_TG0_16K (2 << 14) -@@ -283,6 +286,9 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, - if (prot & IOMMU_CACHE) - pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE - << ARM_LPAE_PTE_ATTRINDX_SHIFT); -+ else if (prot & IOMMU_MMIO) -+ pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV -+ << ARM_LPAE_PTE_ATTRINDX_SHIFT); - } else { - pte = ARM_LPAE_PTE_HAP_FAULT; - if (prot & IOMMU_READ) -@@ -291,6 +297,8 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data, - pte |= ARM_LPAE_PTE_HAP_WRITE; - if (prot & IOMMU_CACHE) - pte |= ARM_LPAE_PTE_MEMATTR_OIWB; -+ else if (prot & IOMMU_MMIO) -+ pte |= ARM_LPAE_PTE_MEMATTR_DEV; - else - pte |= ARM_LPAE_PTE_MEMATTR_NC; - } -@@ -620,6 +628,9 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie) - } - - reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT; -+ -+ /* Disable speculative walks through TTBR1 */ -+ reg |= ARM_LPAE_TCR_EPD1; - cfg->arm_lpae_s1_cfg.tcr = reg; - - /* MAIRs */ -diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig -index caf590c..e72e239 100644 ---- a/drivers/irqchip/Kconfig -+++ b/drivers/irqchip/Kconfig -@@ -5,8 +5,15 @@ config IRQCHIP - config ARM_GIC - bool - select IRQ_DOMAIN -+ select IRQ_DOMAIN_HIERARCHY - select MULTI_IRQ_HANDLER - -+config ARM_GIC_V2M -+ bool -+ depends on ARM_GIC -+ depends on PCI && PCI_MSI -+ select PCI_MSI_IRQ_DOMAIN -+ - config GIC_NON_BANKED - bool - -@@ -14,6 +21,7 @@ config ARM_GIC_V3 - bool - select IRQ_DOMAIN - select MULTI_IRQ_HANDLER -+ select IRQ_DOMAIN_HIERARCHY - - config ARM_GIC_V3_ITS - bool -diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile -index ec3621d..1c4f9a4 100644 ---- a/drivers/irqchip/Makefile -+++ b/drivers/irqchip/Makefile -@@ -19,6 +19,7 @@ obj-$(CONFIG_ARCH_SUNXI) += irq-sun4i.o - obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi-nmi.o - obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o - obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o -+obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o - obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o - obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o - obj-$(CONFIG_ARM_NVIC) += irq-nvic.o -diff --git a/drivers/irqchip/irq-gic-common.c b/drivers/irqchip/irq-gic-common.c -index 61541ff..ad96ebb 100644 ---- a/drivers/irqchip/irq-gic-common.c -+++ b/drivers/irqchip/irq-gic-common.c -@@ -21,7 +21,7 @@ - - #include "irq-gic-common.h" - --void gic_configure_irq(unsigned int irq, unsigned int type, -+int gic_configure_irq(unsigned int irq, unsigned int type, - void __iomem *base, void (*sync_access)(void)) - { - u32 enablemask = 1 << (irq % 32); -@@ -29,16 +29,17 @@ void gic_configure_irq(unsigned int irq, unsigned int type, - u32 confmask = 0x2 << ((irq % 16) * 2); - u32 confoff = (irq / 16) * 4; - bool enabled = false; -- u32 val; -+ u32 val, oldval; -+ int ret = 0; - - /* - * Read current configuration register, and insert the config - * for "irq", depending on "type". - */ -- val = readl_relaxed(base + GIC_DIST_CONFIG + confoff); -- if (type == IRQ_TYPE_LEVEL_HIGH) -+ val = oldval = readl_relaxed(base + GIC_DIST_CONFIG + confoff); -+ if (type & IRQ_TYPE_LEVEL_MASK) - val &= ~confmask; -- else if (type == IRQ_TYPE_EDGE_RISING) -+ else if (type & IRQ_TYPE_EDGE_BOTH) - val |= confmask; - - /* -@@ -54,15 +55,20 @@ void gic_configure_irq(unsigned int irq, unsigned int type, - - /* - * Write back the new configuration, and possibly re-enable -- * the interrupt. -+ * the interrupt. If we tried to write a new configuration and failed, -+ * return an error. - */ - writel_relaxed(val, base + GIC_DIST_CONFIG + confoff); -+ if (readl_relaxed(base + GIC_DIST_CONFIG + confoff) != val && val != oldval) -+ ret = -EINVAL; - - if (enabled) - writel_relaxed(enablemask, base + GIC_DIST_ENABLE_SET + enableoff); - - if (sync_access) - sync_access(); -+ -+ return ret; - } - - void __init gic_dist_config(void __iomem *base, int gic_irqs, -diff --git a/drivers/irqchip/irq-gic-common.h b/drivers/irqchip/irq-gic-common.h -index b41f024..35a9884 100644 ---- a/drivers/irqchip/irq-gic-common.h -+++ b/drivers/irqchip/irq-gic-common.h -@@ -20,7 +20,7 @@ - #include - #include - --void gic_configure_irq(unsigned int irq, unsigned int type, -+int gic_configure_irq(unsigned int irq, unsigned int type, - void __iomem *base, void (*sync_access)(void)); - void gic_dist_config(void __iomem *base, int gic_irqs, - void (*sync_access)(void)); -diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c -new file mode 100644 -index 0000000..fdf7065 ---- /dev/null -+++ b/drivers/irqchip/irq-gic-v2m.c -@@ -0,0 +1,333 @@ -+/* -+ * ARM GIC v2m MSI(-X) support -+ * Support for Message Signaled Interrupts for systems that -+ * implement ARM Generic Interrupt Controller: GICv2m. -+ * -+ * Copyright (C) 2014 Advanced Micro Devices, Inc. -+ * Authors: Suravee Suthikulpanit -+ * Harish Kasiviswanathan -+ * Brandon Anderson -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License version 2 as published -+ * by the Free Software Foundation. -+ */ -+ -+#define pr_fmt(fmt) "GICv2m: " fmt -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/* -+* MSI_TYPER: -+* [31:26] Reserved -+* [25:16] lowest SPI assigned to MSI -+* [15:10] Reserved -+* [9:0] Numer of SPIs assigned to MSI -+*/ -+#define V2M_MSI_TYPER 0x008 -+#define V2M_MSI_TYPER_BASE_SHIFT 16 -+#define V2M_MSI_TYPER_BASE_MASK 0x3FF -+#define V2M_MSI_TYPER_NUM_MASK 0x3FF -+#define V2M_MSI_SETSPI_NS 0x040 -+#define V2M_MIN_SPI 32 -+#define V2M_MAX_SPI 1019 -+ -+#define V2M_MSI_TYPER_BASE_SPI(x) \ -+ (((x) >> V2M_MSI_TYPER_BASE_SHIFT) & V2M_MSI_TYPER_BASE_MASK) -+ -+#define V2M_MSI_TYPER_NUM_SPI(x) ((x) & V2M_MSI_TYPER_NUM_MASK) -+ -+struct v2m_data { -+ spinlock_t msi_cnt_lock; -+ struct msi_controller mchip; -+ struct resource res; /* GICv2m resource */ -+ void __iomem *base; /* GICv2m virt address */ -+ u32 spi_start; /* The SPI number that MSIs start */ -+ u32 nr_spis; /* The number of SPIs for MSIs */ -+ unsigned long *bm; /* MSI vector bitmap */ -+ struct irq_domain *domain; -+}; -+ -+static void gicv2m_mask_msi_irq(struct irq_data *d) -+{ -+ pci_msi_mask_irq(d); -+ irq_chip_mask_parent(d); -+} -+ -+static void gicv2m_unmask_msi_irq(struct irq_data *d) -+{ -+ pci_msi_unmask_irq(d); -+ irq_chip_unmask_parent(d); -+} -+ -+static struct irq_chip gicv2m_msi_irq_chip = { -+ .name = "MSI", -+ .irq_mask = gicv2m_mask_msi_irq, -+ .irq_unmask = gicv2m_unmask_msi_irq, -+ .irq_eoi = irq_chip_eoi_parent, -+ .irq_write_msi_msg = pci_msi_domain_write_msg, -+}; -+ -+static struct msi_domain_info gicv2m_msi_domain_info = { -+ .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | -+ MSI_FLAG_PCI_MSIX), -+ .chip = &gicv2m_msi_irq_chip, -+}; -+ -+static int gicv2m_set_affinity(struct irq_data *irq_data, -+ const struct cpumask *mask, bool force) -+{ -+ int ret; -+ -+ ret = irq_chip_set_affinity_parent(irq_data, mask, force); -+ if (ret == IRQ_SET_MASK_OK) -+ ret = IRQ_SET_MASK_OK_DONE; -+ -+ return ret; -+} -+ -+static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) -+{ -+ struct v2m_data *v2m = irq_data_get_irq_chip_data(data); -+ phys_addr_t addr = v2m->res.start + V2M_MSI_SETSPI_NS; -+ -+ msg->address_hi = (u32) (addr >> 32); -+ msg->address_lo = (u32) (addr); -+ msg->data = data->hwirq; -+} -+ -+static struct irq_chip gicv2m_irq_chip = { -+ .name = "GICv2m", -+ .irq_mask = irq_chip_mask_parent, -+ .irq_unmask = irq_chip_unmask_parent, -+ .irq_eoi = irq_chip_eoi_parent, -+ .irq_set_affinity = gicv2m_set_affinity, -+ .irq_compose_msi_msg = gicv2m_compose_msi_msg, -+}; -+ -+static int gicv2m_irq_gic_domain_alloc(struct irq_domain *domain, -+ unsigned int virq, -+ irq_hw_number_t hwirq) -+{ -+ struct of_phandle_args args; -+ struct irq_data *d; -+ int err; -+ -+ args.np = domain->parent->of_node; -+ args.args_count = 3; -+ args.args[0] = 0; -+ args.args[1] = hwirq - 32; -+ args.args[2] = IRQ_TYPE_EDGE_RISING; -+ -+ err = irq_domain_alloc_irqs_parent(domain, virq, 1, &args); -+ if (err) -+ return err; -+ -+ /* Configure the interrupt line to be edge */ -+ d = irq_domain_get_irq_data(domain->parent, virq); -+ d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING); -+ return 0; -+} -+ -+static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq) -+{ -+ int pos; -+ -+ pos = hwirq - v2m->spi_start; -+ if (pos < 0 || pos >= v2m->nr_spis) { -+ pr_err("Failed to teardown msi. Invalid hwirq %d\n", hwirq); -+ return; -+ } -+ -+ spin_lock(&v2m->msi_cnt_lock); -+ __clear_bit(pos, v2m->bm); -+ spin_unlock(&v2m->msi_cnt_lock); -+} -+ -+static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, -+ unsigned int nr_irqs, void *args) -+{ -+ struct v2m_data *v2m = domain->host_data; -+ int hwirq, offset, err = 0; -+ -+ spin_lock(&v2m->msi_cnt_lock); -+ offset = find_first_zero_bit(v2m->bm, v2m->nr_spis); -+ if (offset < v2m->nr_spis) -+ __set_bit(offset, v2m->bm); -+ else -+ err = -ENOSPC; -+ spin_unlock(&v2m->msi_cnt_lock); -+ -+ if (err) -+ return err; -+ -+ hwirq = v2m->spi_start + offset; -+ -+ err = gicv2m_irq_gic_domain_alloc(domain, virq, hwirq); -+ if (err) { -+ gicv2m_unalloc_msi(v2m, hwirq); -+ return err; -+ } -+ -+ irq_domain_set_hwirq_and_chip(domain, virq, hwirq, -+ &gicv2m_irq_chip, v2m); -+ -+ return 0; -+} -+ -+static void gicv2m_irq_domain_free(struct irq_domain *domain, -+ unsigned int virq, unsigned int nr_irqs) -+{ -+ struct irq_data *d = irq_domain_get_irq_data(domain, virq); -+ struct v2m_data *v2m = irq_data_get_irq_chip_data(d); -+ -+ BUG_ON(nr_irqs != 1); -+ gicv2m_unalloc_msi(v2m, d->hwirq); -+ irq_domain_free_irqs_parent(domain, virq, nr_irqs); -+} -+ -+static const struct irq_domain_ops gicv2m_domain_ops = { -+ .alloc = gicv2m_irq_domain_alloc, -+ .free = gicv2m_irq_domain_free, -+}; -+ -+static bool is_msi_spi_valid(u32 base, u32 num) -+{ -+ if (base < V2M_MIN_SPI) { -+ pr_err("Invalid MSI base SPI (base:%u)\n", base); -+ return false; -+ } -+ -+ if ((num == 0) || (base + num > V2M_MAX_SPI)) { -+ pr_err("Number of SPIs (%u) exceed maximum (%u)\n", -+ num, V2M_MAX_SPI - V2M_MIN_SPI + 1); -+ return false; -+ } -+ -+ return true; -+} -+ -+static int __init gicv2m_init_one(struct device_node *node, -+ struct irq_domain *parent) -+{ -+ int ret; -+ struct v2m_data *v2m; -+ -+ v2m = kzalloc(sizeof(struct v2m_data), GFP_KERNEL); -+ if (!v2m) { -+ pr_err("Failed to allocate struct v2m_data.\n"); -+ return -ENOMEM; -+ } -+ -+ ret = of_address_to_resource(node, 0, &v2m->res); -+ if (ret) { -+ pr_err("Failed to allocate v2m resource.\n"); -+ goto err_free_v2m; -+ } -+ -+ v2m->base = ioremap(v2m->res.start, resource_size(&v2m->res)); -+ if (!v2m->base) { -+ pr_err("Failed to map GICv2m resource\n"); -+ ret = -ENOMEM; -+ goto err_free_v2m; -+ } -+ -+ if (!of_property_read_u32(node, "arm,msi-base-spi", &v2m->spi_start) && -+ !of_property_read_u32(node, "arm,msi-num-spis", &v2m->nr_spis)) { -+ pr_info("Overriding V2M MSI_TYPER (base:%u, num:%u)\n", -+ v2m->spi_start, v2m->nr_spis); -+ } else { -+ u32 typer = readl_relaxed(v2m->base + V2M_MSI_TYPER); -+ -+ v2m->spi_start = V2M_MSI_TYPER_BASE_SPI(typer); -+ v2m->nr_spis = V2M_MSI_TYPER_NUM_SPI(typer); -+ } -+ -+ if (!is_msi_spi_valid(v2m->spi_start, v2m->nr_spis)) { -+ ret = -EINVAL; -+ goto err_iounmap; -+ } -+ -+ v2m->bm = kzalloc(sizeof(long) * BITS_TO_LONGS(v2m->nr_spis), -+ GFP_KERNEL); -+ if (!v2m->bm) { -+ ret = -ENOMEM; -+ goto err_iounmap; -+ } -+ -+ v2m->domain = irq_domain_add_tree(NULL, &gicv2m_domain_ops, v2m); -+ if (!v2m->domain) { -+ pr_err("Failed to create GICv2m domain\n"); -+ ret = -ENOMEM; -+ goto err_free_bm; -+ } -+ -+ v2m->domain->parent = parent; -+ v2m->mchip.of_node = node; -+ v2m->mchip.domain = pci_msi_create_irq_domain(node, -+ &gicv2m_msi_domain_info, -+ v2m->domain); -+ if (!v2m->mchip.domain) { -+ pr_err("Failed to create MSI domain\n"); -+ ret = -ENOMEM; -+ goto err_free_domains; -+ } -+ -+ spin_lock_init(&v2m->msi_cnt_lock); -+ -+ ret = of_pci_msi_chip_add(&v2m->mchip); -+ if (ret) { -+ pr_err("Failed to add msi_chip.\n"); -+ goto err_free_domains; -+ } -+ -+ pr_info("Node %s: range[%#lx:%#lx], SPI[%d:%d]\n", node->name, -+ (unsigned long)v2m->res.start, (unsigned long)v2m->res.end, -+ v2m->spi_start, (v2m->spi_start + v2m->nr_spis)); -+ -+ return 0; -+ -+err_free_domains: -+ if (v2m->mchip.domain) -+ irq_domain_remove(v2m->mchip.domain); -+ if (v2m->domain) -+ irq_domain_remove(v2m->domain); -+err_free_bm: -+ kfree(v2m->bm); -+err_iounmap: -+ iounmap(v2m->base); -+err_free_v2m: -+ kfree(v2m); -+ return ret; -+} -+ -+static struct of_device_id gicv2m_device_id[] = { -+ { .compatible = "arm,gic-v2m-frame", }, -+ {}, -+}; -+ -+int __init gicv2m_of_init(struct device_node *node, struct irq_domain *parent) -+{ -+ int ret = 0; -+ struct device_node *child; -+ -+ for (child = of_find_matching_node(node, gicv2m_device_id); child; -+ child = of_find_matching_node(child, gicv2m_device_id)) { -+ if (!of_find_property(child, "msi-controller", NULL)) -+ continue; -+ -+ ret = gicv2m_init_one(child, parent); -+ if (ret) { -+ of_node_put(node); -+ break; -+ } -+ } -+ -+ return ret; -+} -diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c -index 43c50ed..d689158 100644 ---- a/drivers/irqchip/irq-gic-v3-its.c -+++ b/drivers/irqchip/irq-gic-v3-its.c -@@ -1293,7 +1293,8 @@ static int its_msi_prepare(struct irq_domain *domain, struct device *dev, - - dev_dbg(dev, "ITT %d entries, %d bits\n", nvec, ilog2(nvec)); - dev_id = PCI_DEVID(pdev->bus->number, pdev->devfn); -- return __its_msi_prepare(domain->parent, dev_alias.dev_id, dev, dev_alias.count, info); -+ return __its_msi_prepare(domain, dev_alias.dev_id, -+ dev, dev_alias.count, info); - } - - static struct msi_domain_ops its_pci_msi_ops = { -@@ -1535,13 +1536,14 @@ static int its_probe(struct device_node *node, struct irq_domain *parent) - writel_relaxed(GITS_CTLR_ENABLE, its->base + GITS_CTLR); - - if (of_property_read_bool(its->msi_chip.of_node, "msi-controller")) { -- its->domain = irq_domain_add_tree(NULL, &its_domain_ops, its); -+ its->domain = irq_domain_add_tree(node, &its_domain_ops, its); - if (!its->domain) { - err = -ENOMEM; - goto out_free_tables; - } - - its->domain->parent = parent; -+ its->domain->bus_token = DOMAIN_BUS_NEXUS; - - its->msi_chip.domain = pci_msi_create_irq_domain(node, - &its_pci_msi_domain_info, -diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c -index 34feda3..fd8850d 100644 ---- a/drivers/irqchip/irq-gic-v3.c -+++ b/drivers/irqchip/irq-gic-v3.c -@@ -238,7 +238,9 @@ static int gic_set_type(struct irq_data *d, unsigned int type) - if (irq < 16) - return -EINVAL; - -- if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) -+ /* SPIs have restrictions on the supported types */ -+ if (irq >= 32 && type != IRQ_TYPE_LEVEL_HIGH && -+ type != IRQ_TYPE_EDGE_RISING) - return -EINVAL; - - if (gic_irq_in_rdist(d)) { -@@ -249,9 +251,7 @@ static int gic_set_type(struct irq_data *d, unsigned int type) - rwp_wait = gic_dist_wait_for_rwp; - } - -- gic_configure_irq(irq, type, base, rwp_wait); -- -- return 0; -+ return gic_configure_irq(irq, type, base, rwp_wait); - } - - static u64 gic_mpidr_to_affinity(u64 mpidr) -@@ -466,7 +466,7 @@ static u16 gic_compute_target_list(int *base_cpu, const struct cpumask *mask, - tlist |= 1 << (mpidr & 0xf); - - cpu = cpumask_next(cpu, mask); -- if (cpu == nr_cpu_ids) -+ if (cpu >= nr_cpu_ids) - goto out; - - mpidr = cpu_logical_map(cpu); -@@ -481,15 +481,19 @@ out: - return tlist; - } - -+#define MPIDR_TO_SGI_AFFINITY(cluster_id, level) \ -+ (MPIDR_AFFINITY_LEVEL(cluster_id, level) \ -+ << ICC_SGI1R_AFFINITY_## level ##_SHIFT) -+ - static void gic_send_sgi(u64 cluster_id, u16 tlist, unsigned int irq) - { - u64 val; - -- val = (MPIDR_AFFINITY_LEVEL(cluster_id, 3) << 48 | -- MPIDR_AFFINITY_LEVEL(cluster_id, 2) << 32 | -- irq << 24 | -- MPIDR_AFFINITY_LEVEL(cluster_id, 1) << 16 | -- tlist); -+ val = (MPIDR_TO_SGI_AFFINITY(cluster_id, 3) | -+ MPIDR_TO_SGI_AFFINITY(cluster_id, 2) | -+ irq << ICC_SGI1R_SGI_ID_SHIFT | -+ MPIDR_TO_SGI_AFFINITY(cluster_id, 1) | -+ tlist << ICC_SGI1R_TARGET_LIST_SHIFT); - - pr_debug("CPU%d: ICC_SGI1R_EL1 %llx\n", smp_processor_id(), val); - gic_write_sgi1r(val); -@@ -617,14 +621,14 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, - /* PPIs */ - if (hw < 32) { - irq_set_percpu_devid(irq); -- irq_set_chip_and_handler(irq, &gic_chip, -- handle_percpu_devid_irq); -+ irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data, -+ handle_percpu_devid_irq, NULL, NULL); - set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN); - } - /* SPIs */ - if (hw >= 32 && hw < gic_data.irq_nr) { -- irq_set_chip_and_handler(irq, &gic_chip, -- handle_fasteoi_irq); -+ irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data, -+ handle_fasteoi_irq, NULL, NULL); - set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); - } - /* LPIs */ -@@ -667,9 +671,41 @@ static int gic_irq_domain_xlate(struct irq_domain *d, - return 0; - } - -+static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, -+ unsigned int nr_irqs, void *arg) -+{ -+ int i, ret; -+ irq_hw_number_t hwirq; -+ unsigned int type = IRQ_TYPE_NONE; -+ struct of_phandle_args *irq_data = arg; -+ -+ ret = gic_irq_domain_xlate(domain, irq_data->np, irq_data->args, -+ irq_data->args_count, &hwirq, &type); -+ if (ret) -+ return ret; -+ -+ for (i = 0; i < nr_irqs; i++) -+ gic_irq_domain_map(domain, virq + i, hwirq + i); -+ -+ return 0; -+} -+ -+static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq, -+ unsigned int nr_irqs) -+{ -+ int i; -+ -+ for (i = 0; i < nr_irqs; i++) { -+ struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); -+ irq_set_handler(virq + i, NULL); -+ irq_domain_reset_irq_data(d); -+ } -+} -+ - static const struct irq_domain_ops gic_irq_domain_ops = { -- .map = gic_irq_domain_map, - .xlate = gic_irq_domain_xlate, -+ .alloc = gic_irq_domain_alloc, -+ .free = gic_irq_domain_free, - }; - - static int __init gic_of_init(struct device_node *node, struct device_node *parent) -diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c -index 38493ff..ab0b1cb 100644 ---- a/drivers/irqchip/irq-gic.c -+++ b/drivers/irqchip/irq-gic.c -@@ -188,12 +188,15 @@ static int gic_set_type(struct irq_data *d, unsigned int type) - { - void __iomem *base = gic_dist_base(d); - unsigned int gicirq = gic_irq(d); -+ int ret; - - /* Interrupt configuration for SGIs can't be changed */ - if (gicirq < 16) - return -EINVAL; - -- if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) -+ /* SPIs have restrictions on the supported types */ -+ if (gicirq >= 32 && type != IRQ_TYPE_LEVEL_HIGH && -+ type != IRQ_TYPE_EDGE_RISING) - return -EINVAL; - - raw_spin_lock(&irq_controller_lock); -@@ -201,11 +204,11 @@ static int gic_set_type(struct irq_data *d, unsigned int type) - if (gic_arch_extn.irq_set_type) - gic_arch_extn.irq_set_type(d, type); - -- gic_configure_irq(gicirq, type, base, NULL); -+ ret = gic_configure_irq(gicirq, type, base, NULL); - - raw_spin_unlock(&irq_controller_lock); - -- return 0; -+ return ret; - } - - static int gic_retrigger(struct irq_data *d) -@@ -788,17 +791,16 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, - { - if (hw < 32) { - irq_set_percpu_devid(irq); -- irq_set_chip_and_handler(irq, &gic_chip, -- handle_percpu_devid_irq); -+ irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data, -+ handle_percpu_devid_irq, NULL, NULL); - set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN); - } else { -- irq_set_chip_and_handler(irq, &gic_chip, -- handle_fasteoi_irq); -+ irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data, -+ handle_fasteoi_irq, NULL, NULL); - set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); - - gic_routable_irq_domain_ops->map(d, irq, hw); - } -- irq_set_chip_data(irq, d->host_data); - return 0; - } - -@@ -858,6 +860,31 @@ static struct notifier_block gic_cpu_notifier = { - }; - #endif - -+static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, -+ unsigned int nr_irqs, void *arg) -+{ -+ int i, ret; -+ irq_hw_number_t hwirq; -+ unsigned int type = IRQ_TYPE_NONE; -+ struct of_phandle_args *irq_data = arg; -+ -+ ret = gic_irq_domain_xlate(domain, irq_data->np, irq_data->args, -+ irq_data->args_count, &hwirq, &type); -+ if (ret) -+ return ret; -+ -+ for (i = 0; i < nr_irqs; i++) -+ gic_irq_domain_map(domain, virq + i, hwirq + i); -+ -+ return 0; -+} -+ -+static const struct irq_domain_ops gic_irq_domain_hierarchy_ops = { -+ .xlate = gic_irq_domain_xlate, -+ .alloc = gic_irq_domain_alloc, -+ .free = irq_domain_free_irqs_top, -+}; -+ - static const struct irq_domain_ops gic_irq_domain_ops = { - .map = gic_irq_domain_map, - .unmap = gic_irq_domain_unmap, -@@ -948,18 +975,6 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, - gic_cpu_map[i] = 0xff; - - /* -- * For primary GICs, skip over SGIs. -- * For secondary GICs, skip over PPIs, too. -- */ -- if (gic_nr == 0 && (irq_start & 31) > 0) { -- hwirq_base = 16; -- if (irq_start != -1) -- irq_start = (irq_start & ~31) + 16; -- } else { -- hwirq_base = 32; -- } -- -- /* - * Find out how many interrupts are supported. - * The GIC only supports up to 1020 interrupt sources. - */ -@@ -969,10 +984,31 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, - gic_irqs = 1020; - gic->gic_irqs = gic_irqs; - -- gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */ -+ if (node) { /* DT case */ -+ const struct irq_domain_ops *ops = &gic_irq_domain_hierarchy_ops; -+ -+ if (!of_property_read_u32(node, "arm,routable-irqs", -+ &nr_routable_irqs)) { -+ ops = &gic_irq_domain_ops; -+ gic_irqs = nr_routable_irqs; -+ } -+ -+ gic->domain = irq_domain_add_linear(node, gic_irqs, ops, gic); -+ } else { /* Non-DT case */ -+ /* -+ * For primary GICs, skip over SGIs. -+ * For secondary GICs, skip over PPIs, too. -+ */ -+ if (gic_nr == 0 && (irq_start & 31) > 0) { -+ hwirq_base = 16; -+ if (irq_start != -1) -+ irq_start = (irq_start & ~31) + 16; -+ } else { -+ hwirq_base = 32; -+ } -+ -+ gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */ - -- if (of_property_read_u32(node, "arm,routable-irqs", -- &nr_routable_irqs)) { - irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, - numa_node_id()); - if (IS_ERR_VALUE(irq_base)) { -@@ -983,10 +1019,6 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, - - gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base, - hwirq_base, &gic_irq_domain_ops, gic); -- } else { -- gic->domain = irq_domain_add_linear(node, nr_routable_irqs, -- &gic_irq_domain_ops, -- gic); - } - - if (WARN_ON(!gic->domain)) -@@ -1037,6 +1069,10 @@ gic_of_init(struct device_node *node, struct device_node *parent) - irq = irq_of_parse_and_map(node, 0); - gic_cascade_irq(gic_cnt, irq); - } -+ -+ if (IS_ENABLED(CONFIG_ARM_GIC_V2M)) -+ gicv2m_of_init(node, gic_data[gic_cnt].domain); -+ - gic_cnt++; - return 0; - } -diff --git a/drivers/irqchip/irq-hip04.c b/drivers/irqchip/irq-hip04.c -index 9c8f833..5507a0c 100644 ---- a/drivers/irqchip/irq-hip04.c -+++ b/drivers/irqchip/irq-hip04.c -@@ -120,21 +120,24 @@ static int hip04_irq_set_type(struct irq_data *d, unsigned int type) - { - void __iomem *base = hip04_dist_base(d); - unsigned int irq = hip04_irq(d); -+ int ret; - - /* Interrupt configuration for SGIs can't be changed */ - if (irq < 16) - return -EINVAL; - -- if (type != IRQ_TYPE_LEVEL_HIGH && type != IRQ_TYPE_EDGE_RISING) -+ /* SPIs have restrictions on the supported types */ -+ if (irq >= 32 && type != IRQ_TYPE_LEVEL_HIGH && -+ type != IRQ_TYPE_EDGE_RISING) - return -EINVAL; - - raw_spin_lock(&irq_controller_lock); - -- gic_configure_irq(irq, type, base, NULL); -+ ret = gic_configure_irq(irq, type, base, NULL); - - raw_spin_unlock(&irq_controller_lock); - -- return 0; -+ return ret; - } - - #ifdef CONFIG_SMP -diff --git a/drivers/memory/Kconfig b/drivers/memory/Kconfig -index 6d91c27..d6af99f 100644 ---- a/drivers/memory/Kconfig -+++ b/drivers/memory/Kconfig -@@ -83,6 +83,6 @@ config FSL_CORENET_CF - - config FSL_IFC - bool -- depends on FSL_SOC -+ depends on FSL_SOC || ARCH_LAYERSCAPE - - endif -diff --git a/drivers/memory/fsl_ifc.c b/drivers/memory/fsl_ifc.c -index 3d5d792..1b182b1 100644 ---- a/drivers/memory/fsl_ifc.c -+++ b/drivers/memory/fsl_ifc.c -@@ -22,6 +22,7 @@ - #include - #include - #include -+#include - #include - #include - #include -@@ -30,7 +31,9 @@ - #include - #include - #include --#include -+#include -+#include -+#include - - struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev; - EXPORT_SYMBOL(fsl_ifc_ctrl_dev); -@@ -58,11 +61,11 @@ int fsl_ifc_find(phys_addr_t addr_base) - { - int i = 0; - -- if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->regs) -+ if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->gregs) - return -ENODEV; - -- for (i = 0; i < ARRAY_SIZE(fsl_ifc_ctrl_dev->regs->cspr_cs); i++) { -- u32 cspr = in_be32(&fsl_ifc_ctrl_dev->regs->cspr_cs[i].cspr); -+ for (i = 0; i < fsl_ifc_ctrl_dev->banks; i++) { -+ u32 cspr = ifc_in32(&fsl_ifc_ctrl_dev->gregs->cspr_cs[i].cspr); - if (cspr & CSPR_V && (cspr & CSPR_BA) == - convert_ifc_address(addr_base)) - return i; -@@ -74,21 +77,21 @@ EXPORT_SYMBOL(fsl_ifc_find); - - static int fsl_ifc_ctrl_init(struct fsl_ifc_ctrl *ctrl) - { -- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; -+ struct fsl_ifc_global __iomem *ifc = ctrl->gregs; - - /* - * Clear all the common status and event registers - */ -- if (in_be32(&ifc->cm_evter_stat) & IFC_CM_EVTER_STAT_CSER) -- out_be32(&ifc->cm_evter_stat, IFC_CM_EVTER_STAT_CSER); -+ if (ifc_in32(&ifc->cm_evter_stat) & IFC_CM_EVTER_STAT_CSER) -+ ifc_out32(IFC_CM_EVTER_STAT_CSER, &ifc->cm_evter_stat); - - /* enable all error and events */ -- out_be32(&ifc->cm_evter_en, IFC_CM_EVTER_EN_CSEREN); -+ ifc_out32(IFC_CM_EVTER_EN_CSEREN, &ifc->cm_evter_en); - - /* enable all error and event interrupts */ -- out_be32(&ifc->cm_evter_intr_en, IFC_CM_EVTER_INTR_EN_CSERIREN); -- out_be32(&ifc->cm_erattr0, 0x0); -- out_be32(&ifc->cm_erattr1, 0x0); -+ ifc_out32(IFC_CM_EVTER_INTR_EN_CSERIREN, &ifc->cm_evter_intr_en); -+ ifc_out32(0x0, &ifc->cm_erattr0); -+ ifc_out32(0x0, &ifc->cm_erattr1); - - return 0; - } -@@ -103,7 +106,7 @@ static int fsl_ifc_ctrl_remove(struct platform_device *dev) - irq_dispose_mapping(ctrl->nand_irq); - irq_dispose_mapping(ctrl->irq); - -- iounmap(ctrl->regs); -+ iounmap(ctrl->gregs); - - dev_set_drvdata(&dev->dev, NULL); - kfree(ctrl); -@@ -121,15 +124,15 @@ static DEFINE_SPINLOCK(nand_irq_lock); - - static u32 check_nand_stat(struct fsl_ifc_ctrl *ctrl) - { -- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; -+ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; - unsigned long flags; - u32 stat; - - spin_lock_irqsave(&nand_irq_lock, flags); - -- stat = in_be32(&ifc->ifc_nand.nand_evter_stat); -+ stat = ifc_in32(&ifc->ifc_nand.nand_evter_stat); - if (stat) { -- out_be32(&ifc->ifc_nand.nand_evter_stat, stat); -+ ifc_out32(stat, &ifc->ifc_nand.nand_evter_stat); - ctrl->nand_stat = stat; - wake_up(&ctrl->nand_wait); - } -@@ -156,21 +159,21 @@ static irqreturn_t fsl_ifc_nand_irq(int irqno, void *data) - static irqreturn_t fsl_ifc_ctrl_irq(int irqno, void *data) - { - struct fsl_ifc_ctrl *ctrl = data; -- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; -+ struct fsl_ifc_global __iomem *ifc = ctrl->gregs; - u32 err_axiid, err_srcid, status, cs_err, err_addr; - irqreturn_t ret = IRQ_NONE; - - /* read for chip select error */ -- cs_err = in_be32(&ifc->cm_evter_stat); -+ cs_err = ifc_in32(&ifc->cm_evter_stat); - if (cs_err) { - dev_err(ctrl->dev, "transaction sent to IFC is not mapped to" - "any memory bank 0x%08X\n", cs_err); - /* clear the chip select error */ -- out_be32(&ifc->cm_evter_stat, IFC_CM_EVTER_STAT_CSER); -+ ifc_out32(IFC_CM_EVTER_STAT_CSER, &ifc->cm_evter_stat); - - /* read error attribute registers print the error information */ -- status = in_be32(&ifc->cm_erattr0); -- err_addr = in_be32(&ifc->cm_erattr1); -+ status = ifc_in32(&ifc->cm_erattr0); -+ err_addr = ifc_in32(&ifc->cm_erattr1); - - if (status & IFC_CM_ERATTR0_ERTYP_READ) - dev_err(ctrl->dev, "Read transaction error" -@@ -213,7 +216,8 @@ static irqreturn_t fsl_ifc_ctrl_irq(int irqno, void *data) - static int fsl_ifc_ctrl_probe(struct platform_device *dev) - { - int ret = 0; -- -+ int version, banks; -+ void __iomem *addr; - - dev_info(&dev->dev, "Freescale Integrated Flash Controller\n"); - -@@ -224,16 +228,41 @@ static int fsl_ifc_ctrl_probe(struct platform_device *dev) - dev_set_drvdata(&dev->dev, fsl_ifc_ctrl_dev); - - /* IOMAP the entire IFC region */ -- fsl_ifc_ctrl_dev->regs = of_iomap(dev->dev.of_node, 0); -- if (!fsl_ifc_ctrl_dev->regs) { -+ fsl_ifc_ctrl_dev->gregs = of_iomap(dev->dev.of_node, 0); -+ if (!fsl_ifc_ctrl_dev->gregs) { - dev_err(&dev->dev, "failed to get memory region\n"); - ret = -ENODEV; - goto err; - } - -+ if (of_property_read_bool(dev->dev.of_node, "little-endian")) { -+ fsl_ifc_ctrl_dev->little_endian = true; -+ dev_dbg(&dev->dev, "IFC REGISTERS are LITTLE endian\n"); -+ } else { -+ fsl_ifc_ctrl_dev->little_endian = false; -+ dev_dbg(&dev->dev, "IFC REGISTERS are BIG endian\n"); -+ } -+ -+ version = ifc_in32(&fsl_ifc_ctrl_dev->gregs->ifc_rev) & -+ FSL_IFC_VERSION_MASK; -+ -+ banks = (version == FSL_IFC_VERSION_1_0_0) ? 4 : 8; -+ dev_info(&dev->dev, "IFC version %d.%d, %d banks\n", -+ version >> 24, (version >> 16) & 0xf, banks); -+ -+ fsl_ifc_ctrl_dev->version = version; -+ fsl_ifc_ctrl_dev->banks = banks; -+ -+ addr = fsl_ifc_ctrl_dev->gregs; -+ if (version >= FSL_IFC_VERSION_2_0_0) -+ addr += PGOFFSET_64K; -+ else -+ addr += PGOFFSET_4K; -+ fsl_ifc_ctrl_dev->rregs = addr; -+ - /* get the Controller level irq */ - fsl_ifc_ctrl_dev->irq = irq_of_parse_and_map(dev->dev.of_node, 0); -- if (fsl_ifc_ctrl_dev->irq == NO_IRQ) { -+ if (fsl_ifc_ctrl_dev->irq == 0) { - dev_err(&dev->dev, "failed to get irq resource " - "for IFC\n"); - ret = -ENODEV; -diff --git a/drivers/mfd/vexpress-sysreg.c b/drivers/mfd/vexpress-sysreg.c -index 9e21e4f..8f43ab8 100644 ---- a/drivers/mfd/vexpress-sysreg.c -+++ b/drivers/mfd/vexpress-sysreg.c -@@ -223,7 +223,7 @@ static int vexpress_sysreg_probe(struct platform_device *pdev) - vexpress_config_set_master(vexpress_sysreg_get_master()); - - /* Confirm board type against DT property, if available */ -- if (of_property_read_u32(of_allnodes, "arm,hbi", &dt_hbi) == 0) { -+ if (of_property_read_u32(of_root, "arm,hbi", &dt_hbi) == 0) { - u32 id = vexpress_get_procid(VEXPRESS_SITE_MASTER); - u32 hbi = (id >> SYS_PROCIDx_HBI_SHIFT) & SYS_HBI_MASK; - -diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c -index 10ecc0a..d356dbc 100644 ---- a/drivers/mmc/card/block.c -+++ b/drivers/mmc/card/block.c -@@ -2402,6 +2402,10 @@ static const struct mmc_fixup blk_fixups[] = - * - * N.B. This doesn't affect SD cards. - */ -+ MMC_FIXUP("SDMB-32", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc, -+ MMC_QUIRK_BLK_NO_CMD23), -+ MMC_FIXUP("SDM032", CID_MANFID_SANDISK, CID_OEMID_ANY, add_quirk_mmc, -+ MMC_QUIRK_BLK_NO_CMD23), - MMC_FIXUP("MMC08G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, - MMC_QUIRK_BLK_NO_CMD23), - MMC_FIXUP("MMC16G", CID_MANFID_TOSHIBA, CID_OEMID_ANY, add_quirk_mmc, -diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig -index 1386065..b8c9b73 100644 ---- a/drivers/mmc/host/Kconfig -+++ b/drivers/mmc/host/Kconfig -@@ -66,7 +66,7 @@ config MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER - has the effect of scrambling the addresses and formats of data - accessed in sizes other than the datum size. - -- This is the case for the Freescale eSDHC and Nintendo Wii SDHCI. -+ This is the case for the Nintendo Wii SDHCI. - - config MMC_SDHCI_PCI - tristate "SDHCI support on PCI bus" -@@ -130,8 +130,10 @@ config MMC_SDHCI_OF_ARASAN - config MMC_SDHCI_OF_ESDHC - tristate "SDHCI OF support for the Freescale eSDHC controller" - depends on MMC_SDHCI_PLTFM -- depends on PPC_OF -- select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER -+ depends on PPC || ARCH_MXC || ARCH_LAYERSCAPE -+ select MMC_SDHCI_IO_ACCESSORS -+ select FSL_SOC_DRIVERS -+ select FSL_GUTS - help - This selects the Freescale eSDHC controller support. - -@@ -142,7 +144,7 @@ config MMC_SDHCI_OF_ESDHC - config MMC_SDHCI_OF_HLWD - tristate "SDHCI OF support for the Nintendo Wii SDHCI controllers" - depends on MMC_SDHCI_PLTFM -- depends on PPC_OF -+ depends on PPC - select MMC_SDHCI_BIG_ENDIAN_32BIT_BYTE_SWAPPER - help - This selects the Secure Digital Host Controller Interface (SDHCI) -diff --git a/drivers/mmc/host/sdhci-esdhc.h b/drivers/mmc/host/sdhci-esdhc.h -index a870c42..f2baede 100644 ---- a/drivers/mmc/host/sdhci-esdhc.h -+++ b/drivers/mmc/host/sdhci-esdhc.h -@@ -21,16 +21,23 @@ - #define ESDHC_DEFAULT_QUIRKS (SDHCI_QUIRK_FORCE_BLK_SZ_2048 | \ - SDHCI_QUIRK_NO_BUSY_IRQ | \ - SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | \ -- SDHCI_QUIRK_PIO_NEEDS_DELAY) -+ SDHCI_QUIRK_PIO_NEEDS_DELAY | \ -+ SDHCI_QUIRK_NO_HISPD_BIT) -+ -+#define ESDHC_PROCTL 0x28 - - #define ESDHC_SYSTEM_CONTROL 0x2c - #define ESDHC_CLOCK_MASK 0x0000fff0 - #define ESDHC_PREDIV_SHIFT 8 - #define ESDHC_DIVIDER_SHIFT 4 -+#define ESDHC_CLOCK_CRDEN 0x00000008 - #define ESDHC_CLOCK_PEREN 0x00000004 - #define ESDHC_CLOCK_HCKEN 0x00000002 - #define ESDHC_CLOCK_IPGEN 0x00000001 - -+#define ESDHC_PRESENT_STATE 0x24 -+#define ESDHC_CLOCK_STABLE 0x00000008 -+ - /* pltfm-specific */ - #define ESDHC_HOST_CONTROL_LE 0x20 - -diff --git a/drivers/mmc/host/sdhci-of-esdhc.c b/drivers/mmc/host/sdhci-of-esdhc.c -index 8872c85..4a4a693 100644 ---- a/drivers/mmc/host/sdhci-of-esdhc.c -+++ b/drivers/mmc/host/sdhci-of-esdhc.c -@@ -18,128 +18,334 @@ - #include - #include - #include -+#include -+#include - #include - #include "sdhci-pltfm.h" - #include "sdhci-esdhc.h" - - #define VENDOR_V_22 0x12 - #define VENDOR_V_23 0x13 --static u32 esdhc_readl(struct sdhci_host *host, int reg) -+ -+struct sdhci_esdhc { -+ u8 vendor_ver; -+ u8 spec_ver; -+ u32 soc_ver; -+ u8 soc_rev; -+}; -+ -+/** -+ * esdhc_read*_fixup - Fixup the value read from incompatible eSDHC register -+ * to make it compatible with SD spec. -+ * -+ * @host: pointer to sdhci_host -+ * @spec_reg: SD spec register address -+ * @value: 32bit eSDHC register value on spec_reg address -+ * -+ * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC -+ * registers are 32 bits. There are differences in register size, register -+ * address, register function, bit position and function between eSDHC spec -+ * and SD spec. -+ * -+ * Return a fixed up register value -+ */ -+static u32 esdhc_readl_fixup(struct sdhci_host *host, -+ int spec_reg, u32 value) - { -+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); -+ struct sdhci_esdhc *esdhc = pltfm_host->priv; - u32 ret; - -- ret = in_be32(host->ioaddr + reg); - /* - * The bit of ADMA flag in eSDHC is not compatible with standard - * SDHC register, so set fake flag SDHCI_CAN_DO_ADMA2 when ADMA is - * supported by eSDHC. - * And for many FSL eSDHC controller, the reset value of field -- * SDHCI_CAN_DO_ADMA1 is one, but some of them can't support ADMA, -+ * SDHCI_CAN_DO_ADMA1 is 1, but some of them can't support ADMA, - * only these vendor version is greater than 2.2/0x12 support ADMA. -- * For FSL eSDHC, must aligned 4-byte, so use 0xFC to read the -- * the verdor version number, oxFE is SDHCI_HOST_VERSION. - */ -- if ((reg == SDHCI_CAPABILITIES) && (ret & SDHCI_CAN_DO_ADMA1)) { -- u32 tmp = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS); -- tmp = (tmp & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT; -- if (tmp > VENDOR_V_22) -- ret |= SDHCI_CAN_DO_ADMA2; -+ if ((spec_reg == SDHCI_CAPABILITIES) && (value & SDHCI_CAN_DO_ADMA1)) { -+ if (esdhc->vendor_ver > VENDOR_V_22) { -+ ret = value | SDHCI_CAN_DO_ADMA2; -+ return ret; -+ } - } -- -+ ret = value; - return ret; - } - --static u16 esdhc_readw(struct sdhci_host *host, int reg) -+static u16 esdhc_readw_fixup(struct sdhci_host *host, -+ int spec_reg, u32 value) - { - u16 ret; -- int base = reg & ~0x3; -- int shift = (reg & 0x2) * 8; -+ int shift = (spec_reg & 0x2) * 8; - -- if (unlikely(reg == SDHCI_HOST_VERSION)) -- ret = in_be32(host->ioaddr + base) & 0xffff; -+ if (spec_reg == SDHCI_HOST_VERSION) -+ ret = value & 0xffff; - else -- ret = (in_be32(host->ioaddr + base) >> shift) & 0xffff; -+ ret = (value >> shift) & 0xffff; - return ret; - } - --static u8 esdhc_readb(struct sdhci_host *host, int reg) -+static u8 esdhc_readb_fixup(struct sdhci_host *host, -+ int spec_reg, u32 value) - { -- int base = reg & ~0x3; -- int shift = (reg & 0x3) * 8; -- u8 ret = (in_be32(host->ioaddr + base) >> shift) & 0xff; -+ u8 ret; -+ u8 dma_bits; -+ int shift = (spec_reg & 0x3) * 8; -+ -+ ret = (value >> shift) & 0xff; - - /* - * "DMA select" locates at offset 0x28 in SD specification, but on - * P5020 or P3041, it locates at 0x29. - */ -- if (reg == SDHCI_HOST_CONTROL) { -- u32 dma_bits; -- -- dma_bits = in_be32(host->ioaddr + reg); -+ if (spec_reg == SDHCI_HOST_CONTROL) { - /* DMA select is 22,23 bits in Protocol Control Register */ -- dma_bits = (dma_bits >> 5) & SDHCI_CTRL_DMA_MASK; -- -+ dma_bits = (value >> 5) & SDHCI_CTRL_DMA_MASK; - /* fixup the result */ - ret &= ~SDHCI_CTRL_DMA_MASK; - ret |= dma_bits; - } -- - return ret; - } - --static void esdhc_writel(struct sdhci_host *host, u32 val, int reg) -+/** -+ * esdhc_write*_fixup - Fixup the SD spec register value so that it could be -+ * written into eSDHC register. -+ * -+ * @host: pointer to sdhci_host -+ * @spec_reg: SD spec register address -+ * @value: 8/16/32bit SD spec register value that would be written -+ * @old_value: 32bit eSDHC register value on spec_reg address -+ * -+ * In SD spec, there are 8/16/32/64 bits registers, while all of eSDHC -+ * registers are 32 bits. There are differences in register size, register -+ * address, register function, bit position and function between eSDHC spec -+ * and SD spec. -+ * -+ * Return a fixed up register value -+ */ -+static u32 esdhc_writel_fixup(struct sdhci_host *host, -+ int spec_reg, u32 value, u32 old_value) - { -+ u32 ret; -+ - /* -- * Enable IRQSTATEN[BGESEN] is just to set IRQSTAT[BGE] -- * when SYSCTL[RSTD]) is set for some special operations. -- * No any impact other operation. -+ * Enabling IRQSTATEN[BGESEN] is just to set IRQSTAT[BGE] -+ * when SYSCTL[RSTD] is set for some special operations. -+ * No any impact on other operation. - */ -- if (reg == SDHCI_INT_ENABLE) -- val |= SDHCI_INT_BLK_GAP; -- sdhci_be32bs_writel(host, val, reg); -+ if (spec_reg == SDHCI_INT_ENABLE) -+ ret = value | SDHCI_INT_BLK_GAP; -+ else -+ ret = value; -+ -+ return ret; - } - --static void esdhc_writew(struct sdhci_host *host, u16 val, int reg) -+static u32 esdhc_writew_fixup(struct sdhci_host *host, -+ int spec_reg, u16 value, u32 old_value) - { -- if (reg == SDHCI_BLOCK_SIZE) { -+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); -+ int shift = (spec_reg & 0x2) * 8; -+ u32 ret; -+ -+ switch (spec_reg) { -+ case SDHCI_TRANSFER_MODE: -+ /* -+ * Postpone this write, we must do it together with a -+ * command write that is down below. Return old value. -+ */ -+ pltfm_host->xfer_mode_shadow = value; -+ return old_value; -+ case SDHCI_COMMAND: -+ ret = (value << 16) | pltfm_host->xfer_mode_shadow; -+ return ret; -+ } -+ -+ ret = old_value & (~(0xffff << shift)); -+ ret |= (value << shift); -+ -+ if (spec_reg == SDHCI_BLOCK_SIZE) { - /* - * Two last DMA bits are reserved, and first one is used for - * non-standard blksz of 4096 bytes that we don't support - * yet. So clear the DMA boundary bits. - */ -- val &= ~SDHCI_MAKE_BLKSZ(0x7, 0); -+ ret &= (~SDHCI_MAKE_BLKSZ(0x7, 0)); - } -- sdhci_be32bs_writew(host, val, reg); -+ return ret; - } - --static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg) -+static u32 esdhc_writeb_fixup(struct sdhci_host *host, -+ int spec_reg, u8 value, u32 old_value) - { -+ u32 ret; -+ u32 dma_bits; -+ u8 tmp; -+ int shift = (spec_reg & 0x3) * 8; -+ -+ /* -+ * eSDHC doesn't have a standard power control register, so we do -+ * nothing here to avoid incorrect operation. -+ */ -+ if (spec_reg == SDHCI_POWER_CONTROL) -+ return old_value; - /* - * "DMA select" location is offset 0x28 in SD specification, but on - * P5020 or P3041, it's located at 0x29. - */ -- if (reg == SDHCI_HOST_CONTROL) { -- u32 dma_bits; -- -+ if (spec_reg == SDHCI_HOST_CONTROL) { - /* - * If host control register is not standard, exit - * this function - */ - if (host->quirks2 & SDHCI_QUIRK2_BROKEN_HOST_CONTROL) -- return; -+ return old_value; - - /* DMA select is 22,23 bits in Protocol Control Register */ -- dma_bits = (val & SDHCI_CTRL_DMA_MASK) << 5; -- clrsetbits_be32(host->ioaddr + reg , SDHCI_CTRL_DMA_MASK << 5, -- dma_bits); -- val &= ~SDHCI_CTRL_DMA_MASK; -- val |= in_be32(host->ioaddr + reg) & SDHCI_CTRL_DMA_MASK; -+ dma_bits = (value & SDHCI_CTRL_DMA_MASK) << 5; -+ ret = (old_value & (~(SDHCI_CTRL_DMA_MASK << 5))) | dma_bits; -+ tmp = (value & (~SDHCI_CTRL_DMA_MASK)) | -+ (old_value & SDHCI_CTRL_DMA_MASK); -+ ret = (ret & (~0xff)) | tmp; -+ -+ /* Prevent SDHCI core from writing reserved bits (e.g. HISPD) */ -+ ret &= ~ESDHC_HOST_CONTROL_RES; -+ return ret; - } - -- /* Prevent SDHCI core from writing reserved bits (e.g. HISPD). */ -- if (reg == SDHCI_HOST_CONTROL) -- val &= ~ESDHC_HOST_CONTROL_RES; -- sdhci_be32bs_writeb(host, val, reg); -+ ret = (old_value & (~(0xff << shift))) | (value << shift); -+ return ret; -+} -+ -+static u32 esdhc_be_readl(struct sdhci_host *host, int reg) -+{ -+ u32 ret; -+ u32 value; -+ -+ value = ioread32be(host->ioaddr + reg); -+ ret = esdhc_readl_fixup(host, reg, value); -+ -+ return ret; -+} -+ -+static u32 esdhc_le_readl(struct sdhci_host *host, int reg) -+{ -+ u32 ret; -+ u32 value; -+ -+ value = ioread32(host->ioaddr + reg); -+ ret = esdhc_readl_fixup(host, reg, value); -+ -+ return ret; -+} -+ -+static u16 esdhc_be_readw(struct sdhci_host *host, int reg) -+{ -+ u16 ret; -+ u32 value; -+ int base = reg & ~0x3; -+ -+ value = ioread32be(host->ioaddr + base); -+ ret = esdhc_readw_fixup(host, reg, value); -+ return ret; -+} -+ -+static u16 esdhc_le_readw(struct sdhci_host *host, int reg) -+{ -+ u16 ret; -+ u32 value; -+ int base = reg & ~0x3; -+ -+ value = ioread32(host->ioaddr + base); -+ ret = esdhc_readw_fixup(host, reg, value); -+ return ret; -+} -+ -+static u8 esdhc_be_readb(struct sdhci_host *host, int reg) -+{ -+ u8 ret; -+ u32 value; -+ int base = reg & ~0x3; -+ -+ value = ioread32be(host->ioaddr + base); -+ ret = esdhc_readb_fixup(host, reg, value); -+ return ret; -+} -+ -+static u8 esdhc_le_readb(struct sdhci_host *host, int reg) -+{ -+ u8 ret; -+ u32 value; -+ int base = reg & ~0x3; -+ -+ value = ioread32(host->ioaddr + base); -+ ret = esdhc_readb_fixup(host, reg, value); -+ return ret; -+} -+ -+static void esdhc_be_writel(struct sdhci_host *host, u32 val, int reg) -+{ -+ u32 value; -+ -+ value = esdhc_writel_fixup(host, reg, val, 0); -+ iowrite32be(value, host->ioaddr + reg); -+} -+ -+static void esdhc_le_writel(struct sdhci_host *host, u32 val, int reg) -+{ -+ u32 value; -+ -+ value = esdhc_writel_fixup(host, reg, val, 0); -+ iowrite32(value, host->ioaddr + reg); -+} -+ -+static void esdhc_be_writew(struct sdhci_host *host, u16 val, int reg) -+{ -+ int base = reg & ~0x3; -+ u32 value; -+ u32 ret; -+ -+ value = ioread32be(host->ioaddr + base); -+ ret = esdhc_writew_fixup(host, reg, val, value); -+ if (reg != SDHCI_TRANSFER_MODE) -+ iowrite32be(ret, host->ioaddr + base); -+} -+ -+static void esdhc_le_writew(struct sdhci_host *host, u16 val, int reg) -+{ -+ int base = reg & ~0x3; -+ u32 value; -+ u32 ret; -+ -+ value = ioread32(host->ioaddr + base); -+ ret = esdhc_writew_fixup(host, reg, val, value); -+ if (reg != SDHCI_TRANSFER_MODE) -+ iowrite32(ret, host->ioaddr + base); -+} -+ -+static void esdhc_be_writeb(struct sdhci_host *host, u8 val, int reg) -+{ -+ int base = reg & ~0x3; -+ u32 value; -+ u32 ret; -+ -+ value = ioread32be(host->ioaddr + base); -+ ret = esdhc_writeb_fixup(host, reg, val, value); -+ iowrite32be(ret, host->ioaddr + base); -+} -+ -+static void esdhc_le_writeb(struct sdhci_host *host, u8 val, int reg) -+{ -+ int base = reg & ~0x3; -+ u32 value; -+ u32 ret; -+ -+ value = ioread32(host->ioaddr + base); -+ ret = esdhc_writeb_fixup(host, reg, val, value); -+ iowrite32(ret, host->ioaddr + base); - } - - /* -@@ -149,37 +355,116 @@ static void esdhc_writeb(struct sdhci_host *host, u8 val, int reg) - * For Continue, apply soft reset for data(SYSCTL[RSTD]); - * and re-issue the entire read transaction from beginning. - */ --static void esdhci_of_adma_workaround(struct sdhci_host *host, u32 intmask) -+static void esdhc_of_adma_workaround(struct sdhci_host *host, u32 intmask) - { -- u32 tmp; -+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); -+ struct sdhci_esdhc *esdhc = pltfm_host->priv; - bool applicable; - dma_addr_t dmastart; - dma_addr_t dmanow; - -- tmp = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS); -- tmp = (tmp & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT; -- - applicable = (intmask & SDHCI_INT_DATA_END) && -- (intmask & SDHCI_INT_BLK_GAP) && -- (tmp == VENDOR_V_23); -- if (!applicable) -+ (intmask & SDHCI_INT_BLK_GAP) && -+ (esdhc->vendor_ver == VENDOR_V_23); -+ if (applicable) { -+ -+ sdhci_reset(host, SDHCI_RESET_DATA); -+ host->data->error = 0; -+ dmastart = sg_dma_address(host->data->sg); -+ dmanow = dmastart + host->data->bytes_xfered; -+ /* -+ * Force update to the next DMA block boundary. -+ */ -+ dmanow = (dmanow & ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) + -+ SDHCI_DEFAULT_BOUNDARY_SIZE; -+ host->data->bytes_xfered = dmanow - dmastart; -+ sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS); -+ - return; -+ } - -- host->data->error = 0; -- dmastart = sg_dma_address(host->data->sg); -- dmanow = dmastart + host->data->bytes_xfered; - /* -- * Force update to the next DMA block boundary. -+ * Check for A-004388: eSDHC DMA might not stop if error -+ * occurs on system transaction -+ * Impact list: -+ * T4240-4160-R1.0 B4860-4420-R1.0-R2.0 P1010-1014-R1.0 -+ * P3041-R1.0-R2.0-R1.1 P2041-2040-R1.0-R1.1-R2.0 -+ * P5020-5010-R2.0-R1.0 P5040-5021-R2.0-R2.1 - */ -- dmanow = (dmanow & ~(SDHCI_DEFAULT_BOUNDARY_SIZE - 1)) + -- SDHCI_DEFAULT_BOUNDARY_SIZE; -- host->data->bytes_xfered = dmanow - dmastart; -- sdhci_writel(host, dmanow, SDHCI_DMA_ADDRESS); -+ if (!(((esdhc->soc_ver == SVR_T4240) && (esdhc->soc_rev == 0x10)) || -+ ((esdhc->soc_ver == SVR_T4160) && (esdhc->soc_rev == 0x10)) || -+ ((esdhc->soc_ver == SVR_B4860) && (esdhc->soc_rev == 0x10)) || -+ ((esdhc->soc_ver == SVR_B4860) && (esdhc->soc_rev == 0x20)) || -+ ((esdhc->soc_ver == SVR_B4420) && (esdhc->soc_rev == 0x10)) || -+ ((esdhc->soc_ver == SVR_B4420) && (esdhc->soc_rev == 0x20)) || -+ ((esdhc->soc_ver == SVR_P1010) && (esdhc->soc_rev == 0x10)) || -+ ((esdhc->soc_ver == SVR_P1014) && (esdhc->soc_rev == 0x10)) || -+ ((esdhc->soc_ver == SVR_P3041) && (esdhc->soc_rev <= 0x20)) || -+ ((esdhc->soc_ver == SVR_P2041) && (esdhc->soc_rev <= 0x20)) || -+ ((esdhc->soc_ver == SVR_P2040) && (esdhc->soc_rev <= 0x20)) || -+ ((esdhc->soc_ver == SVR_P5020) && (esdhc->soc_rev <= 0x20)) || -+ ((esdhc->soc_ver == SVR_P5010) && (esdhc->soc_rev <= 0x20)) || -+ ((esdhc->soc_ver == SVR_P5040) && (esdhc->soc_rev <= 0x21)) || -+ ((esdhc->soc_ver == SVR_P5021) && (esdhc->soc_rev <= 0x21)))) -+ return; -+ -+ sdhci_reset(host, SDHCI_RESET_DATA); -+ -+ if (host->flags & SDHCI_USE_ADMA) { -+ u32 mod, i, offset; -+ u8 *desc; -+ dma_addr_t addr; -+ struct scatterlist *sg; -+ __le32 *dataddr; -+ __le32 *cmdlen; -+ -+ /* -+ * If block count was enabled, in case read transfer there -+ * is no data was corrupted -+ */ -+ mod = sdhci_readl(host, SDHCI_TRANSFER_MODE); -+ if ((mod & SDHCI_TRNS_BLK_CNT_EN) && -+ (host->data->flags & MMC_DATA_READ)) -+ host->data->error = 0; -+ -+ BUG_ON(!host->data); -+ desc = host->adma_table; -+ for_each_sg(host->data->sg, sg, host->sg_count, i) { -+ addr = sg_dma_address(sg); -+ offset = (4 - (addr & 0x3)) & 0x3; -+ if (offset) -+ desc += 8; -+ desc += 8; -+ } -+ -+ /* -+ * Add an extra zero descriptor next to the -+ * terminating descriptor. -+ */ -+ desc += 8; -+ WARN_ON((desc - (u8 *)(host->adma_table)) > (128 * 2 + 1) * 4); -+ -+ dataddr = (__le32 __force *)(desc + 4); -+ cmdlen = (__le32 __force *)desc; -+ -+ cmdlen[0] = cpu_to_le32(0); -+ dataddr[0] = cpu_to_le32(0); -+ } -+ -+ if ((host->flags & SDHCI_USE_SDMA) && -+ (host->data->flags & MMC_DATA_READ)) -+ host->data->error = 0; -+ -+ return; - } - - static int esdhc_of_enable_dma(struct sdhci_host *host) - { -- setbits32(host->ioaddr + ESDHC_DMA_SYSCTL, ESDHC_DMA_SNOOP); -+ u32 value; -+ -+ value = sdhci_readl(host, ESDHC_DMA_SYSCTL); -+ value |= ESDHC_DMA_SNOOP; -+ sdhci_writel(host, value, ESDHC_DMA_SYSCTL); - return 0; - } - -@@ -199,15 +484,22 @@ static unsigned int esdhc_of_get_min_clock(struct sdhci_host *host) - - static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) - { -- int pre_div = 2; -+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); -+ struct sdhci_esdhc *esdhc = pltfm_host->priv; -+ int pre_div = 1; - int div = 1; - u32 temp; -+ u32 timeout; - - host->mmc->actual_clock = 0; - - if (clock == 0) - return; - -+ /* Workaround to start pre_div at 2 for VNN < VENDOR_V_23 */ -+ if (esdhc->vendor_ver < VENDOR_V_23) -+ pre_div = 2; -+ - /* Workaround to reduce the clock frequency for p1010 esdhc */ - if (of_find_compatible_node(NULL, NULL, "fsl,p1010-esdhc")) { - if (clock > 20000000) -@@ -218,7 +510,7 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) - - temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); - temp &= ~(ESDHC_CLOCK_IPGEN | ESDHC_CLOCK_HCKEN | ESDHC_CLOCK_PEREN -- | ESDHC_CLOCK_MASK); -+ | ESDHC_CLOCK_CRDEN | ESDHC_CLOCK_MASK); - sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); - - while (host->max_clk / pre_div / 16 > clock && pre_div < 256) -@@ -229,7 +521,7 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) - - dev_dbg(mmc_dev(host->mmc), "desired SD clock: %d, actual: %d\n", - clock, host->max_clk / pre_div / div); -- -+ host->mmc->actual_clock = host->max_clk / pre_div / div; - pre_div >>= 1; - div--; - -@@ -238,70 +530,117 @@ static void esdhc_of_set_clock(struct sdhci_host *host, unsigned int clock) - | (div << ESDHC_DIVIDER_SHIFT) - | (pre_div << ESDHC_PREDIV_SHIFT)); - sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); -- mdelay(1); --} - --static void esdhc_of_platform_init(struct sdhci_host *host) --{ -- u32 vvn; -- -- vvn = in_be32(host->ioaddr + SDHCI_SLOT_INT_STATUS); -- vvn = (vvn & SDHCI_VENDOR_VER_MASK) >> SDHCI_VENDOR_VER_SHIFT; -- if (vvn == VENDOR_V_22) -- host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23; -+ /* Wait max 20 ms */ -+ timeout = 20; -+ while (!(sdhci_readl(host, ESDHC_PRESENT_STATE) & ESDHC_CLOCK_STABLE)) { -+ if (timeout == 0) { -+ pr_err("%s: Internal clock never stabilised.\n", -+ mmc_hostname(host->mmc)); -+ return; -+ } -+ timeout--; -+ mdelay(1); -+ } - -- if (vvn > VENDOR_V_22) -- host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ; -+ temp |= ESDHC_CLOCK_CRDEN; -+ sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); - } - - static void esdhc_pltfm_set_bus_width(struct sdhci_host *host, int width) - { - u32 ctrl; - -+ ctrl = sdhci_readl(host, ESDHC_PROCTL); -+ ctrl &= (~ESDHC_CTRL_BUSWIDTH_MASK); - switch (width) { - case MMC_BUS_WIDTH_8: -- ctrl = ESDHC_CTRL_8BITBUS; -+ ctrl |= ESDHC_CTRL_8BITBUS; - break; - - case MMC_BUS_WIDTH_4: -- ctrl = ESDHC_CTRL_4BITBUS; -+ ctrl |= ESDHC_CTRL_4BITBUS; - break; - - default: -- ctrl = 0; - break; - } - -- clrsetbits_be32(host->ioaddr + SDHCI_HOST_CONTROL, -- ESDHC_CTRL_BUSWIDTH_MASK, ctrl); -+ sdhci_writel(host, ctrl, ESDHC_PROCTL); - } - --static const struct sdhci_ops sdhci_esdhc_ops = { -- .read_l = esdhc_readl, -- .read_w = esdhc_readw, -- .read_b = esdhc_readb, -- .write_l = esdhc_writel, -- .write_w = esdhc_writew, -- .write_b = esdhc_writeb, -- .set_clock = esdhc_of_set_clock, -- .enable_dma = esdhc_of_enable_dma, -- .get_max_clock = esdhc_of_get_max_clock, -- .get_min_clock = esdhc_of_get_min_clock, -- .platform_init = esdhc_of_platform_init, -- .adma_workaround = esdhci_of_adma_workaround, -- .set_bus_width = esdhc_pltfm_set_bus_width, -- .reset = sdhci_reset, -- .set_uhs_signaling = sdhci_set_uhs_signaling, --}; -+/* -+ * A-003980: SDHC: Glitch is generated on the card clock with software reset -+ * or clock divider change -+ * Workaround: -+ * A simple workaround is to disable the SD card clock before the software -+ * reset, and enable it when the module resumes normal operation. The Host -+ * and the SD card are in a master-slave relationship. The Host provides -+ * clock and control transfer across the interface. Therefore, any existing -+ * operation is discarded when the Host controller is reset. -+ */ -+static int esdhc_of_reset_workaround(struct sdhci_host *host, u8 mask) -+{ -+ struct sdhci_pltfm_host *pltfm_host = sdhci_priv(host); -+ struct sdhci_esdhc *esdhc = pltfm_host->priv; -+ bool disable_clk_before_reset = false; -+ u32 temp; - --#ifdef CONFIG_PM -+ /* -+ * Check for A-003980 -+ * Impact list: -+ * T4240-4160-R1.0-R2.0 B4860-4420-R1.0-R2.0 P5040-5021-R1.0-R2.0-R2.1 -+ * P5020-5010-R1.0-R2.0 P3041-R1.0-R1.1-R2.0 P2041-2040-R1.0-R1.1-R2.0 -+ * P1010-1014-R1.0 -+ */ -+ if (((esdhc->soc_ver == SVR_T4240) && (esdhc->soc_rev == 0x10)) || -+ ((esdhc->soc_ver == SVR_T4240) && (esdhc->soc_rev == 0x20)) || -+ ((esdhc->soc_ver == SVR_T4160) && (esdhc->soc_rev == 0x10)) || -+ ((esdhc->soc_ver == SVR_T4160) && (esdhc->soc_rev == 0x20)) || -+ ((esdhc->soc_ver == SVR_B4860) && (esdhc->soc_rev == 0x10)) || -+ ((esdhc->soc_ver == SVR_B4860) && (esdhc->soc_rev == 0x20)) || -+ ((esdhc->soc_ver == SVR_B4420) && (esdhc->soc_rev == 0x10)) || -+ ((esdhc->soc_ver == SVR_B4420) && (esdhc->soc_rev == 0x20)) || -+ ((esdhc->soc_ver == SVR_P5040) && (esdhc->soc_rev <= 0x21)) || -+ ((esdhc->soc_ver == SVR_P5021) && (esdhc->soc_rev <= 0x21)) || -+ ((esdhc->soc_ver == SVR_P5020) && (esdhc->soc_rev <= 0x20)) || -+ ((esdhc->soc_ver == SVR_P5010) && (esdhc->soc_rev <= 0x20)) || -+ ((esdhc->soc_ver == SVR_P3041) && (esdhc->soc_rev <= 0x20)) || -+ ((esdhc->soc_ver == SVR_P2041) && (esdhc->soc_rev <= 0x20)) || -+ ((esdhc->soc_ver == SVR_P2040) && (esdhc->soc_rev <= 0x20)) || -+ ((esdhc->soc_ver == SVR_P1014) && (esdhc->soc_rev == 0x10)) || -+ ((esdhc->soc_ver == SVR_P1010) && (esdhc->soc_rev == 0x10))) -+ disable_clk_before_reset = true; -+ -+ if (disable_clk_before_reset && (mask & SDHCI_RESET_ALL)) { -+ temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); -+ temp &= ~ESDHC_CLOCK_CRDEN; -+ sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); -+ sdhci_reset(host, mask); -+ temp = sdhci_readl(host, ESDHC_SYSTEM_CONTROL); -+ temp |= ESDHC_CLOCK_CRDEN; -+ sdhci_writel(host, temp, ESDHC_SYSTEM_CONTROL); -+ return 1; -+ } -+ return 0; -+} -+ -+static void esdhc_reset(struct sdhci_host *host, u8 mask) -+{ -+ if (!esdhc_of_reset_workaround(host, mask)) -+ sdhci_reset(host, mask); - -+ sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); -+ sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); -+} -+ -+#ifdef CONFIG_PM - static u32 esdhc_proctl; - static int esdhc_of_suspend(struct device *dev) - { - struct sdhci_host *host = dev_get_drvdata(dev); - -- esdhc_proctl = sdhci_be32bs_readl(host, SDHCI_HOST_CONTROL); -+ esdhc_proctl = sdhci_readl(host, SDHCI_HOST_CONTROL); - - return sdhci_suspend_host(host); - } -@@ -311,11 +650,8 @@ static int esdhc_of_resume(struct device *dev) - struct sdhci_host *host = dev_get_drvdata(dev); - int ret = sdhci_resume_host(host); - -- if (ret == 0) { -- /* Isn't this already done by sdhci_resume_host() ? --rmk */ -- esdhc_of_enable_dma(host); -- sdhci_be32bs_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL); -- } -+ if (ret == 0) -+ sdhci_writel(host, esdhc_proctl, SDHCI_HOST_CONTROL); - - return ret; - } -@@ -329,30 +665,120 @@ static const struct dev_pm_ops esdhc_pmops = { - #define ESDHC_PMOPS NULL - #endif - --static const struct sdhci_pltfm_data sdhci_esdhc_pdata = { -- /* -- * card detection could be handled via GPIO -- * eSDHC cannot support End Attribute in NOP ADMA descriptor -- */ -+static const struct sdhci_ops sdhci_esdhc_be_ops = { -+ .read_l = esdhc_be_readl, -+ .read_w = esdhc_be_readw, -+ .read_b = esdhc_be_readb, -+ .write_l = esdhc_be_writel, -+ .write_w = esdhc_be_writew, -+ .write_b = esdhc_be_writeb, -+ .set_clock = esdhc_of_set_clock, -+ .enable_dma = esdhc_of_enable_dma, -+ .get_max_clock = esdhc_of_get_max_clock, -+ .get_min_clock = esdhc_of_get_min_clock, -+ .adma_workaround = esdhc_of_adma_workaround, -+ .set_bus_width = esdhc_pltfm_set_bus_width, -+ .reset = esdhc_reset, -+ .set_uhs_signaling = sdhci_set_uhs_signaling, -+}; -+ -+static const struct sdhci_ops sdhci_esdhc_le_ops = { -+ .read_l = esdhc_le_readl, -+ .read_w = esdhc_le_readw, -+ .read_b = esdhc_le_readb, -+ .write_l = esdhc_le_writel, -+ .write_w = esdhc_le_writew, -+ .write_b = esdhc_le_writeb, -+ .set_clock = esdhc_of_set_clock, -+ .enable_dma = esdhc_of_enable_dma, -+ .get_max_clock = esdhc_of_get_max_clock, -+ .get_min_clock = esdhc_of_get_min_clock, -+ .adma_workaround = esdhc_of_adma_workaround, -+ .set_bus_width = esdhc_pltfm_set_bus_width, -+ .reset = esdhc_reset, -+ .set_uhs_signaling = sdhci_set_uhs_signaling, -+}; -+ -+static const struct sdhci_pltfm_data sdhci_esdhc_be_pdata = { - .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION - | SDHCI_QUIRK_NO_CARD_NO_RESET - | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, -- .ops = &sdhci_esdhc_ops, -+ .ops = &sdhci_esdhc_be_ops, - }; - -+static const struct sdhci_pltfm_data sdhci_esdhc_le_pdata = { -+ .quirks = ESDHC_DEFAULT_QUIRKS | SDHCI_QUIRK_BROKEN_CARD_DETECTION -+ | SDHCI_QUIRK_NO_CARD_NO_RESET -+ | SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC, -+ .ops = &sdhci_esdhc_le_ops, -+}; -+ -+static void esdhc_init(struct platform_device *pdev, struct sdhci_host *host) -+{ -+ struct sdhci_pltfm_host *pltfm_host; -+ struct sdhci_esdhc *esdhc; -+ u16 host_ver; -+ u32 svr; -+ -+ pltfm_host = sdhci_priv(host); -+ esdhc = devm_kzalloc(&pdev->dev, sizeof(struct sdhci_esdhc), -+ GFP_KERNEL); -+ pltfm_host->priv = esdhc; -+ -+ svr = guts_get_svr(); -+ esdhc->soc_ver = SVR_SOC_VER(svr); -+ esdhc->soc_rev = SVR_REV(svr); -+ -+ host_ver = sdhci_readw(host, SDHCI_HOST_VERSION); -+ esdhc->vendor_ver = (host_ver & SDHCI_VENDOR_VER_MASK) >> -+ SDHCI_VENDOR_VER_SHIFT; -+ esdhc->spec_ver = host_ver & SDHCI_SPEC_VER_MASK; -+} -+ - static int sdhci_esdhc_probe(struct platform_device *pdev) - { - struct sdhci_host *host; - struct device_node *np; -+ struct sdhci_pltfm_host *pltfm_host; -+ struct sdhci_esdhc *esdhc; - int ret; - -- host = sdhci_pltfm_init(pdev, &sdhci_esdhc_pdata, 0); -+ np = pdev->dev.of_node; -+ -+ if (of_get_property(np, "little-endian", NULL)) -+ host = sdhci_pltfm_init(pdev, &sdhci_esdhc_le_pdata, 0); -+ else -+ host = sdhci_pltfm_init(pdev, &sdhci_esdhc_be_pdata, 0); -+ - if (IS_ERR(host)) - return PTR_ERR(host); - -+ esdhc_init(pdev, host); -+ - sdhci_get_of_property(pdev); - -- np = pdev->dev.of_node; -+ pltfm_host = sdhci_priv(host); -+ esdhc = pltfm_host->priv; -+ if (esdhc->vendor_ver == VENDOR_V_22) -+ host->quirks2 |= SDHCI_QUIRK2_HOST_NO_CMD23; -+ -+ if (esdhc->vendor_ver > VENDOR_V_22) -+ host->quirks &= ~SDHCI_QUIRK_NO_BUSY_IRQ; -+ -+ if (of_device_is_compatible(np, "fsl,p5040-esdhc") || -+ of_device_is_compatible(np, "fsl,p5020-esdhc") || -+ of_device_is_compatible(np, "fsl,p4080-esdhc") || -+ of_device_is_compatible(np, "fsl,p1020-esdhc") || -+ of_device_is_compatible(np, "fsl,t1040-esdhc") || -+ of_device_is_compatible(np, "fsl,ls1021a-esdhc") || -+ of_device_is_compatible(np, "fsl,ls2080a-esdhc") || -+ of_device_is_compatible(np, "fsl,ls2085a-esdhc") || -+ of_device_is_compatible(np, "fsl,ls1043a-esdhc")) -+ host->quirks &= ~SDHCI_QUIRK_BROKEN_CARD_DETECTION; -+ -+ if (of_device_is_compatible(np, "fsl,ls1021a-esdhc")) -+ host->quirks |= SDHCI_QUIRK_BROKEN_TIMEOUT_VAL; -+ - if (of_device_is_compatible(np, "fsl,p2020-esdhc")) { - /* - * Freescale messed up with P2020 as it has a non-standard -@@ -362,13 +788,19 @@ static int sdhci_esdhc_probe(struct platform_device *pdev) - } - - /* call to generic mmc_of_parse to support additional capabilities */ -- mmc_of_parse(host->mmc); -+ ret = mmc_of_parse(host->mmc); -+ if (ret) -+ goto err; -+ - mmc_of_parse_voltage(np, &host->ocr_mask); - - ret = sdhci_add_host(host); - if (ret) -- sdhci_pltfm_free(pdev); -+ goto err; - -+ return 0; -+ err: -+ sdhci_pltfm_free(pdev); - return ret; - } - -diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c -index 023c201..8af38a6 100644 ---- a/drivers/mmc/host/sdhci.c -+++ b/drivers/mmc/host/sdhci.c -@@ -44,8 +44,6 @@ - - #define MAX_TUNING_LOOP 40 - --#define ADMA_SIZE ((128 * 2 + 1) * 4) -- - static unsigned int debug_quirks = 0; - static unsigned int debug_quirks2; - -@@ -119,10 +117,17 @@ static void sdhci_dumpregs(struct sdhci_host *host) - pr_debug(DRIVER_NAME ": Host ctl2: 0x%08x\n", - sdhci_readw(host, SDHCI_HOST_CONTROL2)); - -- if (host->flags & SDHCI_USE_ADMA) -- pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n", -- readl(host->ioaddr + SDHCI_ADMA_ERROR), -- readl(host->ioaddr + SDHCI_ADMA_ADDRESS)); -+ if (host->flags & SDHCI_USE_ADMA) { -+ if (host->flags & SDHCI_USE_64_BIT_DMA) -+ pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x%08x\n", -+ readl(host->ioaddr + SDHCI_ADMA_ERROR), -+ readl(host->ioaddr + SDHCI_ADMA_ADDRESS_HI), -+ readl(host->ioaddr + SDHCI_ADMA_ADDRESS)); -+ else -+ pr_debug(DRIVER_NAME ": ADMA Err: 0x%08x | ADMA Ptr: 0x%08x\n", -+ readl(host->ioaddr + SDHCI_ADMA_ERROR), -+ readl(host->ioaddr + SDHCI_ADMA_ADDRESS)); -+ } - - pr_debug(DRIVER_NAME ": ===========================================\n"); - } -@@ -231,6 +236,9 @@ static void sdhci_init(struct sdhci_host *host, int soft) - SDHCI_INT_TIMEOUT | SDHCI_INT_DATA_END | - SDHCI_INT_RESPONSE; - -+ if (host->flags & SDHCI_AUTO_CMD12) -+ host->ier |= SDHCI_INT_ACMD12ERR; -+ - sdhci_writel(host, host->ier, SDHCI_INT_ENABLE); - sdhci_writel(host, host->ier, SDHCI_SIGNAL_ENABLE); - -@@ -448,18 +456,26 @@ static void sdhci_kunmap_atomic(void *buffer, unsigned long *flags) - local_irq_restore(*flags); - } - --static void sdhci_set_adma_desc(u8 *desc, u32 addr, int len, unsigned cmd) -+static void sdhci_adma_write_desc(struct sdhci_host *host, void *desc, -+ dma_addr_t addr, int len, unsigned cmd) - { -- __le32 *dataddr = (__le32 __force *)(desc + 4); -- __le16 *cmdlen = (__le16 __force *)desc; -+ struct sdhci_adma2_64_desc *dma_desc = desc; -+ -+ /* 32-bit and 64-bit descriptors have these members in same position */ -+ dma_desc->cmd = cpu_to_le16(cmd); -+ dma_desc->len = cpu_to_le16(len); -+ dma_desc->addr_lo = cpu_to_le32((u32)addr); - -- /* SDHCI specification says ADMA descriptors should be 4 byte -- * aligned, so using 16 or 32bit operations should be safe. */ -+ if (host->flags & SDHCI_USE_64_BIT_DMA) -+ dma_desc->addr_hi = cpu_to_le32((u64)addr >> 32); -+} - -- cmdlen[0] = cpu_to_le16(cmd); -- cmdlen[1] = cpu_to_le16(len); -+static void sdhci_adma_mark_end(void *desc) -+{ -+ struct sdhci_adma2_64_desc *dma_desc = desc; - -- dataddr[0] = cpu_to_le32(addr); -+ /* 32-bit and 64-bit descriptors have 'cmd' in same position */ -+ dma_desc->cmd |= cpu_to_le16(ADMA2_END); - } - - static int sdhci_adma_table_pre(struct sdhci_host *host, -@@ -467,8 +483,8 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, - { - int direction; - -- u8 *desc; -- u8 *align; -+ void *desc; -+ void *align; - dma_addr_t addr; - dma_addr_t align_addr; - int len, offset; -@@ -489,17 +505,17 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, - direction = DMA_TO_DEVICE; - - host->align_addr = dma_map_single(mmc_dev(host->mmc), -- host->align_buffer, 128 * 4, direction); -+ host->align_buffer, host->align_buffer_sz, direction); - if (dma_mapping_error(mmc_dev(host->mmc), host->align_addr)) - goto fail; -- BUG_ON(host->align_addr & 0x3); -+ BUG_ON(host->align_addr & host->align_mask); - - host->sg_count = dma_map_sg(mmc_dev(host->mmc), - data->sg, data->sg_len, direction); - if (host->sg_count == 0) - goto unmap_align; - -- desc = host->adma_desc; -+ desc = host->adma_table; - align = host->align_buffer; - - align_addr = host->align_addr; -@@ -515,24 +531,27 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, - * the (up to three) bytes that screw up the - * alignment. - */ -- offset = (4 - (addr & 0x3)) & 0x3; -+ offset = (host->align_sz - (addr & host->align_mask)) & -+ host->align_mask; - if (offset) { - if (data->flags & MMC_DATA_WRITE) { - buffer = sdhci_kmap_atomic(sg, &flags); -- WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3)); -+ WARN_ON(((long)buffer & (PAGE_SIZE - 1)) > -+ (PAGE_SIZE - offset)); - memcpy(align, buffer, offset); - sdhci_kunmap_atomic(buffer, &flags); - } - - /* tran, valid */ -- sdhci_set_adma_desc(desc, align_addr, offset, 0x21); -+ sdhci_adma_write_desc(host, desc, align_addr, offset, -+ ADMA2_TRAN_VALID); - - BUG_ON(offset > 65536); - -- align += 4; -- align_addr += 4; -+ align += host->align_sz; -+ align_addr += host->align_sz; - -- desc += 8; -+ desc += host->desc_sz; - - addr += offset; - len -= offset; -@@ -541,23 +560,23 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, - BUG_ON(len > 65536); - - /* tran, valid */ -- sdhci_set_adma_desc(desc, addr, len, 0x21); -- desc += 8; -+ sdhci_adma_write_desc(host, desc, addr, len, ADMA2_TRAN_VALID); -+ desc += host->desc_sz; - - /* - * If this triggers then we have a calculation bug - * somewhere. :/ - */ -- WARN_ON((desc - host->adma_desc) > ADMA_SIZE); -+ WARN_ON((desc - host->adma_table) >= host->adma_table_sz); - } - - if (host->quirks & SDHCI_QUIRK_NO_ENDATTR_IN_NOPDESC) { - /* - * Mark the last descriptor as the terminating descriptor - */ -- if (desc != host->adma_desc) { -- desc -= 8; -- desc[0] |= 0x2; /* end */ -+ if (desc != host->adma_table) { -+ desc -= host->desc_sz; -+ sdhci_adma_mark_end(desc); - } - } else { - /* -@@ -565,7 +584,7 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, - */ - - /* nop, end, valid */ -- sdhci_set_adma_desc(desc, 0, 0, 0x3); -+ sdhci_adma_write_desc(host, desc, 0, 0, ADMA2_NOP_END_VALID); - } - - /* -@@ -573,14 +592,14 @@ static int sdhci_adma_table_pre(struct sdhci_host *host, - */ - if (data->flags & MMC_DATA_WRITE) { - dma_sync_single_for_device(mmc_dev(host->mmc), -- host->align_addr, 128 * 4, direction); -+ host->align_addr, host->align_buffer_sz, direction); - } - - return 0; - - unmap_align: - dma_unmap_single(mmc_dev(host->mmc), host->align_addr, -- 128 * 4, direction); -+ host->align_buffer_sz, direction); - fail: - return -EINVAL; - } -@@ -592,7 +611,7 @@ static void sdhci_adma_table_post(struct sdhci_host *host, - - struct scatterlist *sg; - int i, size; -- u8 *align; -+ void *align; - char *buffer; - unsigned long flags; - bool has_unaligned; -@@ -603,12 +622,12 @@ static void sdhci_adma_table_post(struct sdhci_host *host, - direction = DMA_TO_DEVICE; - - dma_unmap_single(mmc_dev(host->mmc), host->align_addr, -- 128 * 4, direction); -+ host->align_buffer_sz, direction); - - /* Do a quick scan of the SG list for any unaligned mappings */ - has_unaligned = false; - for_each_sg(data->sg, sg, host->sg_count, i) -- if (sg_dma_address(sg) & 3) { -+ if (sg_dma_address(sg) & host->align_mask) { - has_unaligned = true; - break; - } -@@ -620,15 +639,17 @@ static void sdhci_adma_table_post(struct sdhci_host *host, - align = host->align_buffer; - - for_each_sg(data->sg, sg, host->sg_count, i) { -- if (sg_dma_address(sg) & 0x3) { -- size = 4 - (sg_dma_address(sg) & 0x3); -+ if (sg_dma_address(sg) & host->align_mask) { -+ size = host->align_sz - -+ (sg_dma_address(sg) & host->align_mask); - - buffer = sdhci_kmap_atomic(sg, &flags); -- WARN_ON(((long)buffer & PAGE_MASK) > (PAGE_SIZE - 3)); -+ WARN_ON(((long)buffer & (PAGE_SIZE - 1)) > -+ (PAGE_SIZE - size)); - memcpy(buffer, align, size); - sdhci_kunmap_atomic(buffer, &flags); - -- align += 4; -+ align += host->align_sz; - } - } - } -@@ -822,6 +843,10 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) - } else { - sdhci_writel(host, host->adma_addr, - SDHCI_ADMA_ADDRESS); -+ if (host->flags & SDHCI_USE_64_BIT_DMA) -+ sdhci_writel(host, -+ (u64)host->adma_addr >> 32, -+ SDHCI_ADMA_ADDRESS_HI); - } - } else { - int sg_cnt; -@@ -855,10 +880,14 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd) - ctrl = sdhci_readb(host, SDHCI_HOST_CONTROL); - ctrl &= ~SDHCI_CTRL_DMA_MASK; - if ((host->flags & SDHCI_REQ_USE_DMA) && -- (host->flags & SDHCI_USE_ADMA)) -- ctrl |= SDHCI_CTRL_ADMA32; -- else -+ (host->flags & SDHCI_USE_ADMA)) { -+ if (host->flags & SDHCI_USE_64_BIT_DMA) -+ ctrl |= SDHCI_CTRL_ADMA64; -+ else -+ ctrl |= SDHCI_CTRL_ADMA32; -+ } else { - ctrl |= SDHCI_CTRL_SDMA; -+ } - sdhci_writeb(host, ctrl, SDHCI_HOST_CONTROL); - } - -@@ -1797,6 +1826,10 @@ static int sdhci_do_start_signal_voltage_switch(struct sdhci_host *host, - ctrl |= SDHCI_CTRL_VDD_180; - sdhci_writew(host, ctrl, SDHCI_HOST_CONTROL2); - -+ /* Some controller need to do more when switching */ -+ if (host->ops->voltage_switch) -+ host->ops->voltage_switch(host); -+ - /* 1.8V regulator output should be stable within 5 ms */ - ctrl = sdhci_readw(host, SDHCI_HOST_CONTROL2); - if (ctrl & SDHCI_CTRL_VDD_180) -@@ -2250,7 +2283,7 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask) - if (intmask & SDHCI_INT_TIMEOUT) - host->cmd->error = -ETIMEDOUT; - else if (intmask & (SDHCI_INT_CRC | SDHCI_INT_END_BIT | -- SDHCI_INT_INDEX)) -+ SDHCI_INT_INDEX | SDHCI_INT_ACMD12ERR)) - host->cmd->error = -EILSEQ; - - if (host->cmd->error) { -@@ -2292,32 +2325,36 @@ static void sdhci_cmd_irq(struct sdhci_host *host, u32 intmask, u32 *mask) - } - - #ifdef CONFIG_MMC_DEBUG --static void sdhci_show_adma_error(struct sdhci_host *host) -+static void sdhci_adma_show_error(struct sdhci_host *host) - { - const char *name = mmc_hostname(host->mmc); -- u8 *desc = host->adma_desc; -- __le32 *dma; -- __le16 *len; -- u8 attr; -+ void *desc = host->adma_table; - - sdhci_dumpregs(host); - - while (true) { -- dma = (__le32 *)(desc + 4); -- len = (__le16 *)(desc + 2); -- attr = *desc; -- -- DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n", -- name, desc, le32_to_cpu(*dma), le16_to_cpu(*len), attr); -+ struct sdhci_adma2_64_desc *dma_desc = desc; -+ -+ if (host->flags & SDHCI_USE_64_BIT_DMA) -+ DBG("%s: %p: DMA 0x%08x%08x, LEN 0x%04x, Attr=0x%02x\n", -+ name, desc, le32_to_cpu(dma_desc->addr_hi), -+ le32_to_cpu(dma_desc->addr_lo), -+ le16_to_cpu(dma_desc->len), -+ le16_to_cpu(dma_desc->cmd)); -+ else -+ DBG("%s: %p: DMA 0x%08x, LEN 0x%04x, Attr=0x%02x\n", -+ name, desc, le32_to_cpu(dma_desc->addr_lo), -+ le16_to_cpu(dma_desc->len), -+ le16_to_cpu(dma_desc->cmd)); - -- desc += 8; -+ desc += host->desc_sz; - -- if (attr & 2) -+ if (dma_desc->cmd & cpu_to_le16(ADMA2_END)) - break; - } - } - #else --static void sdhci_show_adma_error(struct sdhci_host *host) { } -+static void sdhci_adma_show_error(struct sdhci_host *host) { } - #endif - - static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) -@@ -2380,7 +2417,7 @@ static void sdhci_data_irq(struct sdhci_host *host, u32 intmask) - host->data->error = -EILSEQ; - else if (intmask & SDHCI_INT_ADMA_ERROR) { - pr_err("%s: ADMA error\n", mmc_hostname(host->mmc)); -- sdhci_show_adma_error(host); -+ sdhci_adma_show_error(host); - host->data->error = -EIO; - if (host->ops->adma_workaround) - host->ops->adma_workaround(host, intmask); -@@ -2859,6 +2896,16 @@ int sdhci_add_host(struct sdhci_host *host) - host->flags &= ~SDHCI_USE_ADMA; - } - -+ /* -+ * It is assumed that a 64-bit capable device has set a 64-bit DMA mask -+ * and *must* do 64-bit DMA. A driver has the opportunity to change -+ * that during the first call to ->enable_dma(). Similarly -+ * SDHCI_QUIRK2_BROKEN_64_BIT_DMA must be left to the drivers to -+ * implement. -+ */ -+ if (sdhci_readl(host, SDHCI_CAPABILITIES) & SDHCI_CAN_64BIT) -+ host->flags |= SDHCI_USE_64_BIT_DMA; -+ - if (host->flags & (SDHCI_USE_SDMA | SDHCI_USE_ADMA)) { - if (host->ops->enable_dma) { - if (host->ops->enable_dma(host)) { -@@ -2870,33 +2917,59 @@ int sdhci_add_host(struct sdhci_host *host) - } - } - -+ /* SDMA does not support 64-bit DMA */ -+ if (host->flags & SDHCI_USE_64_BIT_DMA) -+ host->flags &= ~SDHCI_USE_SDMA; -+ - if (host->flags & SDHCI_USE_ADMA) { - /* -- * We need to allocate descriptors for all sg entries -- * (128) and potentially one alignment transfer for -- * each of those entries. -+ * The DMA descriptor table size is calculated as the maximum -+ * number of segments times 2, to allow for an alignment -+ * descriptor for each segment, plus 1 for a nop end descriptor, -+ * all multipled by the descriptor size. - */ -- host->adma_desc = dma_alloc_coherent(mmc_dev(mmc), -- ADMA_SIZE, &host->adma_addr, -- GFP_KERNEL); -- host->align_buffer = kmalloc(128 * 4, GFP_KERNEL); -- if (!host->adma_desc || !host->align_buffer) { -- dma_free_coherent(mmc_dev(mmc), ADMA_SIZE, -- host->adma_desc, host->adma_addr); -+ if (host->flags & SDHCI_USE_64_BIT_DMA) { -+ host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) * -+ SDHCI_ADMA2_64_DESC_SZ; -+ host->align_buffer_sz = SDHCI_MAX_SEGS * -+ SDHCI_ADMA2_64_ALIGN; -+ host->desc_sz = SDHCI_ADMA2_64_DESC_SZ; -+ host->align_sz = SDHCI_ADMA2_64_ALIGN; -+ host->align_mask = SDHCI_ADMA2_64_ALIGN - 1; -+ } else { -+ host->adma_table_sz = (SDHCI_MAX_SEGS * 2 + 1) * -+ SDHCI_ADMA2_32_DESC_SZ; -+ host->align_buffer_sz = SDHCI_MAX_SEGS * -+ SDHCI_ADMA2_32_ALIGN; -+ host->desc_sz = SDHCI_ADMA2_32_DESC_SZ; -+ host->align_sz = SDHCI_ADMA2_32_ALIGN; -+ host->align_mask = SDHCI_ADMA2_32_ALIGN - 1; -+ } -+ host->adma_table = dma_alloc_coherent(mmc_dev(mmc), -+ host->adma_table_sz, -+ &host->adma_addr, -+ GFP_KERNEL); -+ host->align_buffer = kmalloc(host->align_buffer_sz, GFP_KERNEL); -+ if (!host->adma_table || !host->align_buffer) { -+ if (host->adma_table) -+ dma_free_coherent(mmc_dev(mmc), -+ host->adma_table_sz, -+ host->adma_table, -+ host->adma_addr); - kfree(host->align_buffer); - pr_warn("%s: Unable to allocate ADMA buffers - falling back to standard DMA\n", - mmc_hostname(mmc)); - host->flags &= ~SDHCI_USE_ADMA; -- host->adma_desc = NULL; -+ host->adma_table = NULL; - host->align_buffer = NULL; -- } else if (host->adma_addr & 3) { -+ } else if (host->adma_addr & host->align_mask) { - pr_warn("%s: unable to allocate aligned ADMA descriptor\n", - mmc_hostname(mmc)); - host->flags &= ~SDHCI_USE_ADMA; -- dma_free_coherent(mmc_dev(mmc), ADMA_SIZE, -- host->adma_desc, host->adma_addr); -+ dma_free_coherent(mmc_dev(mmc), host->adma_table_sz, -+ host->adma_table, host->adma_addr); - kfree(host->align_buffer); -- host->adma_desc = NULL; -+ host->adma_table = NULL; - host->align_buffer = NULL; - } - } -@@ -2995,7 +3068,8 @@ int sdhci_add_host(struct sdhci_host *host) - /* Auto-CMD23 stuff only works in ADMA or PIO. */ - if ((host->version >= SDHCI_SPEC_300) && - ((host->flags & SDHCI_USE_ADMA) || -- !(host->flags & SDHCI_USE_SDMA))) { -+ !(host->flags & SDHCI_USE_SDMA)) && -+ !(host->quirks2 & SDHCI_QUIRK2_ACMD23_BROKEN)) { - host->flags |= SDHCI_AUTO_CMD23; - DBG("%s: Auto-CMD23 available\n", mmc_hostname(mmc)); - } else { -@@ -3152,13 +3226,14 @@ int sdhci_add_host(struct sdhci_host *host) - SDHCI_MAX_CURRENT_MULTIPLIER; - } - -- /* If OCR set by external regulators, use it instead */ -+ /* If OCR set by host, use it instead. */ -+ if (host->ocr_mask) -+ ocr_avail = host->ocr_mask; -+ -+ /* If OCR set by external regulators, give it highest prio. */ - if (mmc->ocr_avail) - ocr_avail = mmc->ocr_avail; - -- if (host->ocr_mask) -- ocr_avail &= host->ocr_mask; -- - mmc->ocr_avail = ocr_avail; - mmc->ocr_avail_sdio = ocr_avail; - if (host->ocr_avail_sdio) -@@ -3185,11 +3260,11 @@ int sdhci_add_host(struct sdhci_host *host) - * can do scatter/gather or not. - */ - if (host->flags & SDHCI_USE_ADMA) -- mmc->max_segs = 128; -+ mmc->max_segs = SDHCI_MAX_SEGS; - else if (host->flags & SDHCI_USE_SDMA) - mmc->max_segs = 1; - else /* PIO */ -- mmc->max_segs = 128; -+ mmc->max_segs = SDHCI_MAX_SEGS; - - /* - * Maximum number of sectors in one transfer. Limited by DMA boundary -@@ -3287,7 +3362,8 @@ int sdhci_add_host(struct sdhci_host *host) - - pr_info("%s: SDHCI controller on %s [%s] using %s\n", - mmc_hostname(mmc), host->hw_name, dev_name(mmc_dev(mmc)), -- (host->flags & SDHCI_USE_ADMA) ? "ADMA" : -+ (host->flags & SDHCI_USE_ADMA) ? -+ (host->flags & SDHCI_USE_64_BIT_DMA) ? "ADMA 64-bit" : "ADMA" : - (host->flags & SDHCI_USE_SDMA) ? "DMA" : "PIO"); - - sdhci_enable_card_detection(host); -@@ -3355,12 +3431,12 @@ void sdhci_remove_host(struct sdhci_host *host, int dead) - if (!IS_ERR(mmc->supply.vqmmc)) - regulator_disable(mmc->supply.vqmmc); - -- if (host->adma_desc) -- dma_free_coherent(mmc_dev(mmc), ADMA_SIZE, -- host->adma_desc, host->adma_addr); -+ if (host->adma_table) -+ dma_free_coherent(mmc_dev(mmc), host->adma_table_sz, -+ host->adma_table, host->adma_addr); - kfree(host->align_buffer); - -- host->adma_desc = NULL; -+ host->adma_table = NULL; - host->align_buffer = NULL; - } - -diff --git a/drivers/mmc/host/sdhci.h b/drivers/mmc/host/sdhci.h -index 31896a7..5220f36 100644 ---- a/drivers/mmc/host/sdhci.h -+++ b/drivers/mmc/host/sdhci.h -@@ -227,6 +227,7 @@ - /* 55-57 reserved */ - - #define SDHCI_ADMA_ADDRESS 0x58 -+#define SDHCI_ADMA_ADDRESS_HI 0x5C - - /* 60-FB reserved */ - -@@ -266,6 +267,46 @@ - #define SDHCI_DEFAULT_BOUNDARY_SIZE (512 * 1024) - #define SDHCI_DEFAULT_BOUNDARY_ARG (ilog2(SDHCI_DEFAULT_BOUNDARY_SIZE) - 12) - -+/* ADMA2 32-bit DMA descriptor size */ -+#define SDHCI_ADMA2_32_DESC_SZ 8 -+ -+/* ADMA2 32-bit DMA alignment */ -+#define SDHCI_ADMA2_32_ALIGN 4 -+ -+/* ADMA2 32-bit descriptor */ -+struct sdhci_adma2_32_desc { -+ __le16 cmd; -+ __le16 len; -+ __le32 addr; -+} __packed __aligned(SDHCI_ADMA2_32_ALIGN); -+ -+/* ADMA2 64-bit DMA descriptor size */ -+#define SDHCI_ADMA2_64_DESC_SZ 12 -+ -+/* ADMA2 64-bit DMA alignment */ -+#define SDHCI_ADMA2_64_ALIGN 8 -+ -+/* -+ * ADMA2 64-bit descriptor. Note 12-byte descriptor can't always be 8-byte -+ * aligned. -+ */ -+struct sdhci_adma2_64_desc { -+ __le16 cmd; -+ __le16 len; -+ __le32 addr_lo; -+ __le32 addr_hi; -+} __packed __aligned(4); -+ -+#define ADMA2_TRAN_VALID 0x21 -+#define ADMA2_NOP_END_VALID 0x3 -+#define ADMA2_END 0x2 -+ -+/* -+ * Maximum segments assuming a 512KiB maximum requisition size and a minimum -+ * 4KiB page size. -+ */ -+#define SDHCI_MAX_SEGS 128 -+ - struct sdhci_ops { - #ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS - u32 (*read_l)(struct sdhci_host *host, int reg); -@@ -296,6 +337,7 @@ struct sdhci_ops { - void (*adma_workaround)(struct sdhci_host *host, u32 intmask); - void (*platform_init)(struct sdhci_host *host); - void (*card_event)(struct sdhci_host *host); -+ void (*voltage_switch)(struct sdhci_host *host); - }; - - #ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS -diff --git a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig -index dd10646..34ce759 100644 ---- a/drivers/mtd/nand/Kconfig -+++ b/drivers/mtd/nand/Kconfig -@@ -429,7 +429,7 @@ config MTD_NAND_FSL_ELBC - - config MTD_NAND_FSL_IFC - tristate "NAND support for Freescale IFC controller" -- depends on MTD_NAND && FSL_SOC -+ depends on MTD_NAND && (FSL_SOC || ARCH_LAYERSCAPE) - select FSL_IFC - select MEMORY - help -diff --git a/drivers/mtd/nand/fsl_ifc_nand.c b/drivers/mtd/nand/fsl_ifc_nand.c -index 2338124..c8be272 100644 ---- a/drivers/mtd/nand/fsl_ifc_nand.c -+++ b/drivers/mtd/nand/fsl_ifc_nand.c -@@ -31,7 +31,6 @@ - #include - #include - --#define FSL_IFC_V1_1_0 0x01010000 - #define ERR_BYTE 0xFF /* Value returned for read - bytes when read failed */ - #define IFC_TIMEOUT_MSECS 500 /* Maximum number of mSecs to wait -@@ -234,13 +233,13 @@ static void set_addr(struct mtd_info *mtd, int column, int page_addr, int oob) - struct nand_chip *chip = mtd->priv; - struct fsl_ifc_mtd *priv = chip->priv; - struct fsl_ifc_ctrl *ctrl = priv->ctrl; -- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; -+ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; - int buf_num; - - ifc_nand_ctrl->page = page_addr; - /* Program ROW0/COL0 */ -- iowrite32be(page_addr, &ifc->ifc_nand.row0); -- iowrite32be((oob ? IFC_NAND_COL_MS : 0) | column, &ifc->ifc_nand.col0); -+ ifc_out32(page_addr, &ifc->ifc_nand.row0); -+ ifc_out32((oob ? IFC_NAND_COL_MS : 0) | column, &ifc->ifc_nand.col0); - - buf_num = page_addr & priv->bufnum_mask; - -@@ -297,28 +296,28 @@ static void fsl_ifc_run_command(struct mtd_info *mtd) - struct fsl_ifc_mtd *priv = chip->priv; - struct fsl_ifc_ctrl *ctrl = priv->ctrl; - struct fsl_ifc_nand_ctrl *nctrl = ifc_nand_ctrl; -- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; -+ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; - u32 eccstat[4]; - int i; - - /* set the chip select for NAND Transaction */ -- iowrite32be(priv->bank << IFC_NAND_CSEL_SHIFT, -- &ifc->ifc_nand.nand_csel); -+ ifc_out32(priv->bank << IFC_NAND_CSEL_SHIFT, -+ &ifc->ifc_nand.nand_csel); - - dev_vdbg(priv->dev, - "%s: fir0=%08x fcr0=%08x\n", - __func__, -- ioread32be(&ifc->ifc_nand.nand_fir0), -- ioread32be(&ifc->ifc_nand.nand_fcr0)); -+ ifc_in32(&ifc->ifc_nand.nand_fir0), -+ ifc_in32(&ifc->ifc_nand.nand_fcr0)); - - ctrl->nand_stat = 0; - - /* start read/write seq */ -- iowrite32be(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt); -+ ifc_out32(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt); - - /* wait for command complete flag or timeout */ - wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat, -- IFC_TIMEOUT_MSECS * HZ/1000); -+ msecs_to_jiffies(IFC_TIMEOUT_MSECS)); - - /* ctrl->nand_stat will be updated from IRQ context */ - if (!ctrl->nand_stat) -@@ -337,7 +336,7 @@ static void fsl_ifc_run_command(struct mtd_info *mtd) - int sector_end = sector + chip->ecc.steps - 1; - - for (i = sector / 4; i <= sector_end / 4; i++) -- eccstat[i] = ioread32be(&ifc->ifc_nand.nand_eccstat[i]); -+ eccstat[i] = ifc_in32(&ifc->ifc_nand.nand_eccstat[i]); - - for (i = sector; i <= sector_end; i++) { - errors = check_read_ecc(mtd, ctrl, eccstat, i); -@@ -373,37 +372,37 @@ static void fsl_ifc_do_read(struct nand_chip *chip, - { - struct fsl_ifc_mtd *priv = chip->priv; - struct fsl_ifc_ctrl *ctrl = priv->ctrl; -- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; -+ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; - - /* Program FIR/IFC_NAND_FCR0 for Small/Large page */ - if (mtd->writesize > 512) { -- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | -- (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) | -- (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) | -- (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) | -- (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT), -- &ifc->ifc_nand.nand_fir0); -- iowrite32be(0x0, &ifc->ifc_nand.nand_fir1); -- -- iowrite32be((NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) | -- (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT), -- &ifc->ifc_nand.nand_fcr0); -+ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | -+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) | -+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) | -+ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP3_SHIFT) | -+ (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP4_SHIFT), -+ &ifc->ifc_nand.nand_fir0); -+ ifc_out32(0x0, &ifc->ifc_nand.nand_fir1); -+ -+ ifc_out32((NAND_CMD_READ0 << IFC_NAND_FCR0_CMD0_SHIFT) | -+ (NAND_CMD_READSTART << IFC_NAND_FCR0_CMD1_SHIFT), -+ &ifc->ifc_nand.nand_fcr0); - } else { -- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | -- (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) | -- (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) | -- (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT), -- &ifc->ifc_nand.nand_fir0); -- iowrite32be(0x0, &ifc->ifc_nand.nand_fir1); -+ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | -+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) | -+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) | -+ (IFC_FIR_OP_RBCD << IFC_NAND_FIR0_OP3_SHIFT), -+ &ifc->ifc_nand.nand_fir0); -+ ifc_out32(0x0, &ifc->ifc_nand.nand_fir1); - - if (oob) -- iowrite32be(NAND_CMD_READOOB << -- IFC_NAND_FCR0_CMD0_SHIFT, -- &ifc->ifc_nand.nand_fcr0); -+ ifc_out32(NAND_CMD_READOOB << -+ IFC_NAND_FCR0_CMD0_SHIFT, -+ &ifc->ifc_nand.nand_fcr0); - else -- iowrite32be(NAND_CMD_READ0 << -- IFC_NAND_FCR0_CMD0_SHIFT, -- &ifc->ifc_nand.nand_fcr0); -+ ifc_out32(NAND_CMD_READ0 << -+ IFC_NAND_FCR0_CMD0_SHIFT, -+ &ifc->ifc_nand.nand_fcr0); - } - } - -@@ -413,7 +412,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, - struct nand_chip *chip = mtd->priv; - struct fsl_ifc_mtd *priv = chip->priv; - struct fsl_ifc_ctrl *ctrl = priv->ctrl; -- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; -+ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; - - /* clear the read buffer */ - ifc_nand_ctrl->read_bytes = 0; -@@ -423,7 +422,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, - switch (command) { - /* READ0 read the entire buffer to use hardware ECC. */ - case NAND_CMD_READ0: -- iowrite32be(0, &ifc->ifc_nand.nand_fbcr); -+ ifc_out32(0, &ifc->ifc_nand.nand_fbcr); - set_addr(mtd, 0, page_addr, 0); - - ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize; -@@ -438,7 +437,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, - - /* READOOB reads only the OOB because no ECC is performed. */ - case NAND_CMD_READOOB: -- iowrite32be(mtd->oobsize - column, &ifc->ifc_nand.nand_fbcr); -+ ifc_out32(mtd->oobsize - column, &ifc->ifc_nand.nand_fbcr); - set_addr(mtd, column, page_addr, 1); - - ifc_nand_ctrl->read_bytes = mtd->writesize + mtd->oobsize; -@@ -454,19 +453,19 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, - if (command == NAND_CMD_PARAM) - timing = IFC_FIR_OP_RBCD; - -- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | -- (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) | -- (timing << IFC_NAND_FIR0_OP2_SHIFT), -- &ifc->ifc_nand.nand_fir0); -- iowrite32be(command << IFC_NAND_FCR0_CMD0_SHIFT, -- &ifc->ifc_nand.nand_fcr0); -- iowrite32be(column, &ifc->ifc_nand.row3); -+ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | -+ (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) | -+ (timing << IFC_NAND_FIR0_OP2_SHIFT), -+ &ifc->ifc_nand.nand_fir0); -+ ifc_out32(command << IFC_NAND_FCR0_CMD0_SHIFT, -+ &ifc->ifc_nand.nand_fcr0); -+ ifc_out32(column, &ifc->ifc_nand.row3); - - /* - * although currently it's 8 bytes for READID, we always read - * the maximum 256 bytes(for PARAM) - */ -- iowrite32be(256, &ifc->ifc_nand.nand_fbcr); -+ ifc_out32(256, &ifc->ifc_nand.nand_fbcr); - ifc_nand_ctrl->read_bytes = 256; - - set_addr(mtd, 0, 0, 0); -@@ -481,16 +480,16 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, - - /* ERASE2 uses the block and page address from ERASE1 */ - case NAND_CMD_ERASE2: -- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | -- (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) | -- (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT), -- &ifc->ifc_nand.nand_fir0); -+ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | -+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP1_SHIFT) | -+ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR0_OP2_SHIFT), -+ &ifc->ifc_nand.nand_fir0); - -- iowrite32be((NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) | -- (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT), -- &ifc->ifc_nand.nand_fcr0); -+ ifc_out32((NAND_CMD_ERASE1 << IFC_NAND_FCR0_CMD0_SHIFT) | -+ (NAND_CMD_ERASE2 << IFC_NAND_FCR0_CMD1_SHIFT), -+ &ifc->ifc_nand.nand_fcr0); - -- iowrite32be(0, &ifc->ifc_nand.nand_fbcr); -+ ifc_out32(0, &ifc->ifc_nand.nand_fbcr); - ifc_nand_ctrl->read_bytes = 0; - fsl_ifc_run_command(mtd); - return; -@@ -507,19 +506,18 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, - (NAND_CMD_STATUS << IFC_NAND_FCR0_CMD1_SHIFT) | - (NAND_CMD_PAGEPROG << IFC_NAND_FCR0_CMD2_SHIFT); - -- iowrite32be( -- (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | -- (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) | -- (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) | -- (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) | -- (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP4_SHIFT), -- &ifc->ifc_nand.nand_fir0); -- iowrite32be( -- (IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT) | -- (IFC_FIR_OP_RDSTAT << -- IFC_NAND_FIR1_OP6_SHIFT) | -- (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP7_SHIFT), -- &ifc->ifc_nand.nand_fir1); -+ ifc_out32( -+ (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | -+ (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP1_SHIFT) | -+ (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP2_SHIFT) | -+ (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP3_SHIFT) | -+ (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP4_SHIFT), -+ &ifc->ifc_nand.nand_fir0); -+ ifc_out32( -+ (IFC_FIR_OP_CW1 << IFC_NAND_FIR1_OP5_SHIFT) | -+ (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR1_OP6_SHIFT) | -+ (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP7_SHIFT), -+ &ifc->ifc_nand.nand_fir1); - } else { - nand_fcr0 = ((NAND_CMD_PAGEPROG << - IFC_NAND_FCR0_CMD1_SHIFT) | -@@ -528,20 +526,19 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, - (NAND_CMD_STATUS << - IFC_NAND_FCR0_CMD3_SHIFT)); - -- iowrite32be( -+ ifc_out32( - (IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | - (IFC_FIR_OP_CMD2 << IFC_NAND_FIR0_OP1_SHIFT) | - (IFC_FIR_OP_CA0 << IFC_NAND_FIR0_OP2_SHIFT) | - (IFC_FIR_OP_RA0 << IFC_NAND_FIR0_OP3_SHIFT) | - (IFC_FIR_OP_WBCD << IFC_NAND_FIR0_OP4_SHIFT), - &ifc->ifc_nand.nand_fir0); -- iowrite32be( -- (IFC_FIR_OP_CMD1 << IFC_NAND_FIR1_OP5_SHIFT) | -- (IFC_FIR_OP_CW3 << IFC_NAND_FIR1_OP6_SHIFT) | -- (IFC_FIR_OP_RDSTAT << -- IFC_NAND_FIR1_OP7_SHIFT) | -- (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP8_SHIFT), -- &ifc->ifc_nand.nand_fir1); -+ ifc_out32( -+ (IFC_FIR_OP_CMD1 << IFC_NAND_FIR1_OP5_SHIFT) | -+ (IFC_FIR_OP_CW3 << IFC_NAND_FIR1_OP6_SHIFT) | -+ (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR1_OP7_SHIFT) | -+ (IFC_FIR_OP_NOP << IFC_NAND_FIR1_OP8_SHIFT), -+ &ifc->ifc_nand.nand_fir1); - - if (column >= mtd->writesize) - nand_fcr0 |= -@@ -556,7 +553,7 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, - column -= mtd->writesize; - ifc_nand_ctrl->oob = 1; - } -- iowrite32be(nand_fcr0, &ifc->ifc_nand.nand_fcr0); -+ ifc_out32(nand_fcr0, &ifc->ifc_nand.nand_fcr0); - set_addr(mtd, column, page_addr, ifc_nand_ctrl->oob); - return; - } -@@ -564,24 +561,26 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, - /* PAGEPROG reuses all of the setup from SEQIN and adds the length */ - case NAND_CMD_PAGEPROG: { - if (ifc_nand_ctrl->oob) { -- iowrite32be(ifc_nand_ctrl->index - -- ifc_nand_ctrl->column, -- &ifc->ifc_nand.nand_fbcr); -+ ifc_out32(ifc_nand_ctrl->index - -+ ifc_nand_ctrl->column, -+ &ifc->ifc_nand.nand_fbcr); - } else { -- iowrite32be(0, &ifc->ifc_nand.nand_fbcr); -+ ifc_out32(0, &ifc->ifc_nand.nand_fbcr); - } - - fsl_ifc_run_command(mtd); - return; - } - -- case NAND_CMD_STATUS: -- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | -- (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT), -- &ifc->ifc_nand.nand_fir0); -- iowrite32be(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT, -- &ifc->ifc_nand.nand_fcr0); -- iowrite32be(1, &ifc->ifc_nand.nand_fbcr); -+ case NAND_CMD_STATUS: { -+ void __iomem *addr; -+ -+ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | -+ (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP1_SHIFT), -+ &ifc->ifc_nand.nand_fir0); -+ ifc_out32(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT, -+ &ifc->ifc_nand.nand_fcr0); -+ ifc_out32(1, &ifc->ifc_nand.nand_fbcr); - set_addr(mtd, 0, 0, 0); - ifc_nand_ctrl->read_bytes = 1; - -@@ -591,17 +590,19 @@ static void fsl_ifc_cmdfunc(struct mtd_info *mtd, unsigned int command, - * The chip always seems to report that it is - * write-protected, even when it is not. - */ -+ addr = ifc_nand_ctrl->addr; - if (chip->options & NAND_BUSWIDTH_16) -- setbits16(ifc_nand_ctrl->addr, NAND_STATUS_WP); -+ ifc_out16(ifc_in16(addr) | (NAND_STATUS_WP), addr); - else -- setbits8(ifc_nand_ctrl->addr, NAND_STATUS_WP); -+ ifc_out8(ifc_in8(addr) | (NAND_STATUS_WP), addr); - return; -+ } - - case NAND_CMD_RESET: -- iowrite32be(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT, -- &ifc->ifc_nand.nand_fir0); -- iowrite32be(NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT, -- &ifc->ifc_nand.nand_fcr0); -+ ifc_out32(IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT, -+ &ifc->ifc_nand.nand_fir0); -+ ifc_out32(NAND_CMD_RESET << IFC_NAND_FCR0_CMD0_SHIFT, -+ &ifc->ifc_nand.nand_fcr0); - fsl_ifc_run_command(mtd); - return; - -@@ -659,7 +660,7 @@ static uint8_t fsl_ifc_read_byte(struct mtd_info *mtd) - */ - if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) { - offset = ifc_nand_ctrl->index++; -- return in_8(ifc_nand_ctrl->addr + offset); -+ return ifc_in8(ifc_nand_ctrl->addr + offset); - } - - dev_err(priv->dev, "%s: beyond end of buffer\n", __func__); -@@ -681,7 +682,7 @@ static uint8_t fsl_ifc_read_byte16(struct mtd_info *mtd) - * next byte. - */ - if (ifc_nand_ctrl->index < ifc_nand_ctrl->read_bytes) { -- data = in_be16(ifc_nand_ctrl->addr + ifc_nand_ctrl->index); -+ data = ifc_in16(ifc_nand_ctrl->addr + ifc_nand_ctrl->index); - ifc_nand_ctrl->index += 2; - return (uint8_t) data; - } -@@ -723,22 +724,22 @@ static int fsl_ifc_wait(struct mtd_info *mtd, struct nand_chip *chip) - { - struct fsl_ifc_mtd *priv = chip->priv; - struct fsl_ifc_ctrl *ctrl = priv->ctrl; -- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; -+ struct fsl_ifc_runtime __iomem *ifc = ctrl->rregs; - u32 nand_fsr; - - /* Use READ_STATUS command, but wait for the device to be ready */ -- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | -- (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT), -- &ifc->ifc_nand.nand_fir0); -- iowrite32be(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT, -- &ifc->ifc_nand.nand_fcr0); -- iowrite32be(1, &ifc->ifc_nand.nand_fbcr); -+ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | -+ (IFC_FIR_OP_RDSTAT << IFC_NAND_FIR0_OP1_SHIFT), -+ &ifc->ifc_nand.nand_fir0); -+ ifc_out32(NAND_CMD_STATUS << IFC_NAND_FCR0_CMD0_SHIFT, -+ &ifc->ifc_nand.nand_fcr0); -+ ifc_out32(1, &ifc->ifc_nand.nand_fbcr); - set_addr(mtd, 0, 0, 0); - ifc_nand_ctrl->read_bytes = 1; - - fsl_ifc_run_command(mtd); - -- nand_fsr = ioread32be(&ifc->ifc_nand.nand_fsr); -+ nand_fsr = ifc_in32(&ifc->ifc_nand.nand_fsr); - - /* - * The chip always seems to report that it is -@@ -825,67 +826,72 @@ static int fsl_ifc_chip_init_tail(struct mtd_info *mtd) - static void fsl_ifc_sram_init(struct fsl_ifc_mtd *priv) - { - struct fsl_ifc_ctrl *ctrl = priv->ctrl; -- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; -+ struct fsl_ifc_runtime __iomem *ifc_runtime = ctrl->rregs; -+ struct fsl_ifc_global __iomem *ifc_global = ctrl->gregs; - uint32_t csor = 0, csor_8k = 0, csor_ext = 0; - uint32_t cs = priv->bank; - - /* Save CSOR and CSOR_ext */ -- csor = ioread32be(&ifc->csor_cs[cs].csor); -- csor_ext = ioread32be(&ifc->csor_cs[cs].csor_ext); -+ csor = ifc_in32(&ifc_global->csor_cs[cs].csor); -+ csor_ext = ifc_in32(&ifc_global->csor_cs[cs].csor_ext); - - /* chage PageSize 8K and SpareSize 1K*/ - csor_8k = (csor & ~(CSOR_NAND_PGS_MASK)) | 0x0018C000; -- iowrite32be(csor_8k, &ifc->csor_cs[cs].csor); -- iowrite32be(0x0000400, &ifc->csor_cs[cs].csor_ext); -+ ifc_out32(csor_8k, &ifc_global->csor_cs[cs].csor); -+ ifc_out32(0x0000400, &ifc_global->csor_cs[cs].csor_ext); - - /* READID */ -- iowrite32be((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | -+ ifc_out32((IFC_FIR_OP_CW0 << IFC_NAND_FIR0_OP0_SHIFT) | - (IFC_FIR_OP_UA << IFC_NAND_FIR0_OP1_SHIFT) | - (IFC_FIR_OP_RB << IFC_NAND_FIR0_OP2_SHIFT), -- &ifc->ifc_nand.nand_fir0); -- iowrite32be(NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT, -- &ifc->ifc_nand.nand_fcr0); -- iowrite32be(0x0, &ifc->ifc_nand.row3); -+ &ifc_runtime->ifc_nand.nand_fir0); -+ ifc_out32(NAND_CMD_READID << IFC_NAND_FCR0_CMD0_SHIFT, -+ &ifc_runtime->ifc_nand.nand_fcr0); -+ ifc_out32(0x0, &ifc_runtime->ifc_nand.row3); - -- iowrite32be(0x0, &ifc->ifc_nand.nand_fbcr); -+ ifc_out32(0x0, &ifc_runtime->ifc_nand.nand_fbcr); - - /* Program ROW0/COL0 */ -- iowrite32be(0x0, &ifc->ifc_nand.row0); -- iowrite32be(0x0, &ifc->ifc_nand.col0); -+ ifc_out32(0x0, &ifc_runtime->ifc_nand.row0); -+ ifc_out32(0x0, &ifc_runtime->ifc_nand.col0); - - /* set the chip select for NAND Transaction */ -- iowrite32be(cs << IFC_NAND_CSEL_SHIFT, &ifc->ifc_nand.nand_csel); -+ ifc_out32(cs << IFC_NAND_CSEL_SHIFT, -+ &ifc_runtime->ifc_nand.nand_csel); - - /* start read seq */ -- iowrite32be(IFC_NAND_SEQ_STRT_FIR_STRT, &ifc->ifc_nand.nandseq_strt); -+ ifc_out32(IFC_NAND_SEQ_STRT_FIR_STRT, -+ &ifc_runtime->ifc_nand.nandseq_strt); - - /* wait for command complete flag or timeout */ - wait_event_timeout(ctrl->nand_wait, ctrl->nand_stat, -- IFC_TIMEOUT_MSECS * HZ/1000); -+ msecs_to_jiffies(IFC_TIMEOUT_MSECS)); - - if (ctrl->nand_stat != IFC_NAND_EVTER_STAT_OPC) - printk(KERN_ERR "fsl-ifc: Failed to Initialise SRAM\n"); - - /* Restore CSOR and CSOR_ext */ -- iowrite32be(csor, &ifc->csor_cs[cs].csor); -- iowrite32be(csor_ext, &ifc->csor_cs[cs].csor_ext); -+ ifc_out32(csor, &ifc_global->csor_cs[cs].csor); -+ ifc_out32(csor_ext, &ifc_global->csor_cs[cs].csor_ext); - } - - static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv) - { - struct fsl_ifc_ctrl *ctrl = priv->ctrl; -- struct fsl_ifc_regs __iomem *ifc = ctrl->regs; -+ struct fsl_ifc_global __iomem *ifc_global = ctrl->gregs; -+ struct fsl_ifc_runtime __iomem *ifc_runtime = ctrl->rregs; - struct nand_chip *chip = &priv->chip; - struct nand_ecclayout *layout; -- u32 csor, ver; -+ u32 csor; - - /* Fill in fsl_ifc_mtd structure */ - priv->mtd.priv = chip; -- priv->mtd.owner = THIS_MODULE; -+ priv->mtd.dev.parent = priv->dev; - - /* fill in nand_chip structure */ - /* set up function call table */ -- if ((ioread32be(&ifc->cspr_cs[priv->bank].cspr)) & CSPR_PORT_SIZE_16) -+ if ((ifc_in32(&ifc_global->cspr_cs[priv->bank].cspr)) -+ & CSPR_PORT_SIZE_16) - chip->read_byte = fsl_ifc_read_byte16; - else - chip->read_byte = fsl_ifc_read_byte; -@@ -899,13 +905,14 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv) - chip->bbt_td = &bbt_main_descr; - chip->bbt_md = &bbt_mirror_descr; - -- iowrite32be(0x0, &ifc->ifc_nand.ncfgr); -+ ifc_out32(0x0, &ifc_runtime->ifc_nand.ncfgr); - - /* set up nand options */ - chip->bbt_options = NAND_BBT_USE_FLASH; - chip->options = NAND_NO_SUBPAGE_WRITE; - -- if (ioread32be(&ifc->cspr_cs[priv->bank].cspr) & CSPR_PORT_SIZE_16) { -+ if (ifc_in32(&ifc_global->cspr_cs[priv->bank].cspr) -+ & CSPR_PORT_SIZE_16) { - chip->read_byte = fsl_ifc_read_byte16; - chip->options |= NAND_BUSWIDTH_16; - } else { -@@ -918,7 +925,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv) - chip->ecc.read_page = fsl_ifc_read_page; - chip->ecc.write_page = fsl_ifc_write_page; - -- csor = ioread32be(&ifc->csor_cs[priv->bank].csor); -+ csor = ifc_in32(&ifc_global->csor_cs[priv->bank].csor); - - /* Hardware generates ECC per 512 Bytes */ - chip->ecc.size = 512; -@@ -984,8 +991,7 @@ static int fsl_ifc_chip_init(struct fsl_ifc_mtd *priv) - chip->ecc.mode = NAND_ECC_SOFT; - } - -- ver = ioread32be(&ifc->ifc_rev); -- if (ver == FSL_IFC_V1_1_0) -+ if (ctrl->version == FSL_IFC_VERSION_1_1_0) - fsl_ifc_sram_init(priv); - - return 0; -@@ -1005,10 +1011,10 @@ static int fsl_ifc_chip_remove(struct fsl_ifc_mtd *priv) - return 0; - } - --static int match_bank(struct fsl_ifc_regs __iomem *ifc, int bank, -+static int match_bank(struct fsl_ifc_global __iomem *ifc_global, int bank, - phys_addr_t addr) - { -- u32 cspr = ioread32be(&ifc->cspr_cs[bank].cspr); -+ u32 cspr = ifc_in32(&ifc_global->cspr_cs[bank].cspr); - - if (!(cspr & CSPR_V)) - return 0; -@@ -1022,7 +1028,7 @@ static DEFINE_MUTEX(fsl_ifc_nand_mutex); - - static int fsl_ifc_nand_probe(struct platform_device *dev) - { -- struct fsl_ifc_regs __iomem *ifc; -+ struct fsl_ifc_runtime __iomem *ifc; - struct fsl_ifc_mtd *priv; - struct resource res; - static const char *part_probe_types[] -@@ -1033,9 +1039,9 @@ static int fsl_ifc_nand_probe(struct platform_device *dev) - struct mtd_part_parser_data ppdata; - - ppdata.of_node = dev->dev.of_node; -- if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->regs) -+ if (!fsl_ifc_ctrl_dev || !fsl_ifc_ctrl_dev->rregs) - return -ENODEV; -- ifc = fsl_ifc_ctrl_dev->regs; -+ ifc = fsl_ifc_ctrl_dev->rregs; - - /* get, allocate and map the memory resource */ - ret = of_address_to_resource(node, 0, &res); -@@ -1045,12 +1051,12 @@ static int fsl_ifc_nand_probe(struct platform_device *dev) - } - - /* find which chip select it is connected to */ -- for (bank = 0; bank < FSL_IFC_BANK_COUNT; bank++) { -- if (match_bank(ifc, bank, res.start)) -+ for (bank = 0; bank < fsl_ifc_ctrl_dev->banks; bank++) { -+ if (match_bank(fsl_ifc_ctrl_dev->gregs, bank, res.start)) - break; - } - -- if (bank >= FSL_IFC_BANK_COUNT) { -+ if (bank >= fsl_ifc_ctrl_dev->banks) { - dev_err(&dev->dev, "%s: address did not match any chip selects\n", - __func__); - return -ENODEV; -@@ -1094,16 +1100,16 @@ static int fsl_ifc_nand_probe(struct platform_device *dev) - - dev_set_drvdata(priv->dev, priv); - -- iowrite32be(IFC_NAND_EVTER_EN_OPC_EN | -- IFC_NAND_EVTER_EN_FTOER_EN | -- IFC_NAND_EVTER_EN_WPER_EN, -- &ifc->ifc_nand.nand_evter_en); -+ ifc_out32(IFC_NAND_EVTER_EN_OPC_EN | -+ IFC_NAND_EVTER_EN_FTOER_EN | -+ IFC_NAND_EVTER_EN_WPER_EN, -+ &ifc->ifc_nand.nand_evter_en); - - /* enable NAND Machine Interrupts */ -- iowrite32be(IFC_NAND_EVTER_INTR_OPCIR_EN | -- IFC_NAND_EVTER_INTR_FTOERIR_EN | -- IFC_NAND_EVTER_INTR_WPERIR_EN, -- &ifc->ifc_nand.nand_evter_intr_en); -+ ifc_out32(IFC_NAND_EVTER_INTR_OPCIR_EN | -+ IFC_NAND_EVTER_INTR_FTOERIR_EN | -+ IFC_NAND_EVTER_INTR_WPERIR_EN, -+ &ifc->ifc_nand.nand_evter_intr_en); - priv->mtd.name = kasprintf(GFP_KERNEL, "%llx.flash", (u64)res.start); - if (!priv->mtd.name) { - ret = -ENOMEM; -@@ -1163,6 +1169,7 @@ static const struct of_device_id fsl_ifc_nand_match[] = { - }, - {} - }; -+MODULE_DEVICE_TABLE(of, fsl_ifc_nand_match); - - static struct platform_driver fsl_ifc_nand_driver = { - .driver = { -diff --git a/drivers/net/ethernet/freescale/gianfar.c b/drivers/net/ethernet/freescale/gianfar.c -index a4a7396..0359cfd 100644 ---- a/drivers/net/ethernet/freescale/gianfar.c -+++ b/drivers/net/ethernet/freescale/gianfar.c -@@ -86,11 +86,11 @@ - #include - #include - #include -+#include - - #include - #ifdef CONFIG_PPC - #include --#include - #endif - #include - #include -@@ -1720,8 +1720,10 @@ static void gfar_configure_serdes(struct net_device *dev) - * everything for us? Resetting it takes the link down and requires - * several seconds for it to come back. - */ -- if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) -+ if (phy_read(tbiphy, MII_BMSR) & BMSR_LSTATUS) { -+ put_device(&tbiphy->dev); - return; -+ } - - /* Single clk mode, mii mode off(for serdes communication) */ - phy_write(tbiphy, MII_TBICON, TBICON_CLK_SELECT); -diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig -index 2973c60..cdc9f8a 100644 ---- a/drivers/net/phy/Kconfig -+++ b/drivers/net/phy/Kconfig -@@ -65,6 +65,11 @@ config VITESSE_PHY - ---help--- - Currently supports the vsc8244 - -+config TERANETICS_PHY -+ tristate "Drivers for the Teranetics PHYs" -+ ---help--- -+ Currently supports the Teranetics TN2020 -+ - config SMSC_PHY - tristate "Drivers for SMSC PHYs" - ---help--- -@@ -124,8 +129,8 @@ config MICREL_PHY - Supports the KSZ9021, VSC8201, KS8001 PHYs. - - config FIXED_PHY -- bool "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs" -- depends on PHYLIB=y -+ tristate "Driver for MDIO Bus/PHY emulation with fixed speed/link PHYs" -+ depends on PHYLIB - ---help--- - Adds the platform "fixed" MDIO Bus to cover the boards that use - PHYs that are not connected to the real MDIO bus. -@@ -207,6 +212,11 @@ config MDIO_BUS_MUX_MMIOREG - the FPGA's registers. - - Currently, only 8-bit registers are supported. -+config FSL_10GBASE_KR -+ tristate "Support for 10GBASE-KR on Freescale XFI interface" -+ depends on OF_MDIO -+ help -+ This module provides a driver for Freescale XFI's 10GBASE-KR. - - config MDIO_BCM_UNIMAC - tristate "Broadcom UniMAC MDIO bus controller" -diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile -index b5c8f9f..8ad4ac6 100644 ---- a/drivers/net/phy/Makefile -+++ b/drivers/net/phy/Makefile -@@ -10,6 +10,7 @@ obj-$(CONFIG_CICADA_PHY) += cicada.o - obj-$(CONFIG_LXT_PHY) += lxt.o - obj-$(CONFIG_QSEMI_PHY) += qsemi.o - obj-$(CONFIG_SMSC_PHY) += smsc.o -+obj-$(CONFIG_TERANETICS_PHY) += teranetics.o - obj-$(CONFIG_VITESSE_PHY) += vitesse.o - obj-$(CONFIG_BROADCOM_PHY) += broadcom.o - obj-$(CONFIG_BCM63XX_PHY) += bcm63xx.o -@@ -18,7 +19,7 @@ obj-$(CONFIG_BCM87XX_PHY) += bcm87xx.o - obj-$(CONFIG_ICPLUS_PHY) += icplus.o - obj-$(CONFIG_REALTEK_PHY) += realtek.o - obj-$(CONFIG_LSI_ET1011C_PHY) += et1011c.o --obj-$(CONFIG_FIXED_PHY) += fixed.o -+obj-$(CONFIG_FIXED_PHY) += fixed_phy.o - obj-$(CONFIG_MDIO_BITBANG) += mdio-bitbang.o - obj-$(CONFIG_MDIO_GPIO) += mdio-gpio.o - obj-$(CONFIG_NATIONAL_PHY) += national.o -@@ -32,6 +33,7 @@ obj-$(CONFIG_AMD_PHY) += amd.o - obj-$(CONFIG_MDIO_BUS_MUX) += mdio-mux.o - obj-$(CONFIG_MDIO_BUS_MUX_GPIO) += mdio-mux-gpio.o - obj-$(CONFIG_MDIO_BUS_MUX_MMIOREG) += mdio-mux-mmioreg.o -+obj-$(CONFIG_FSL_10GBASE_KR) += fsl_10gkr.o - obj-$(CONFIG_MDIO_SUN4I) += mdio-sun4i.o - obj-$(CONFIG_MDIO_MOXART) += mdio-moxart.o - obj-$(CONFIG_AMD_XGBE_PHY) += amd-xgbe-phy.o -diff --git a/drivers/net/phy/at803x.c b/drivers/net/phy/at803x.c -index fdc1b41..a4f0886 100644 ---- a/drivers/net/phy/at803x.c -+++ b/drivers/net/phy/at803x.c -@@ -307,6 +307,8 @@ static struct phy_driver at803x_driver[] = { - .flags = PHY_HAS_INTERRUPT, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, -+ .ack_interrupt = at803x_ack_interrupt, -+ .config_intr = at803x_config_intr, - .driver = { - .owner = THIS_MODULE, - }, -@@ -326,6 +328,8 @@ static struct phy_driver at803x_driver[] = { - .flags = PHY_HAS_INTERRUPT, - .config_aneg = genphy_config_aneg, - .read_status = genphy_read_status, -+ .ack_interrupt = at803x_ack_interrupt, -+ .config_intr = at803x_config_intr, - .driver = { - .owner = THIS_MODULE, - }, -diff --git a/drivers/net/phy/fixed.c b/drivers/net/phy/fixed.c -deleted file mode 100644 -index 47872ca..0000000 ---- a/drivers/net/phy/fixed.c -+++ /dev/null -@@ -1,336 +0,0 @@ --/* -- * Fixed MDIO bus (MDIO bus emulation with fixed PHYs) -- * -- * Author: Vitaly Bordug -- * Anton Vorontsov -- * -- * Copyright (c) 2006-2007 MontaVista Software, Inc. -- * -- * This program is free software; you can redistribute it and/or modify it -- * under the terms of the GNU General Public License as published by the -- * Free Software Foundation; either version 2 of the License, or (at your -- * option) any later version. -- */ -- --#include --#include --#include --#include --#include --#include --#include --#include --#include --#include -- --#define MII_REGS_NUM 29 -- --struct fixed_mdio_bus { -- int irqs[PHY_MAX_ADDR]; -- struct mii_bus *mii_bus; -- struct list_head phys; --}; -- --struct fixed_phy { -- int addr; -- u16 regs[MII_REGS_NUM]; -- struct phy_device *phydev; -- struct fixed_phy_status status; -- int (*link_update)(struct net_device *, struct fixed_phy_status *); -- struct list_head node; --}; -- --static struct platform_device *pdev; --static struct fixed_mdio_bus platform_fmb = { -- .phys = LIST_HEAD_INIT(platform_fmb.phys), --}; -- --static int fixed_phy_update_regs(struct fixed_phy *fp) --{ -- u16 bmsr = BMSR_ANEGCAPABLE; -- u16 bmcr = 0; -- u16 lpagb = 0; -- u16 lpa = 0; -- -- if (fp->status.duplex) { -- bmcr |= BMCR_FULLDPLX; -- -- switch (fp->status.speed) { -- case 1000: -- bmsr |= BMSR_ESTATEN; -- bmcr |= BMCR_SPEED1000; -- lpagb |= LPA_1000FULL; -- break; -- case 100: -- bmsr |= BMSR_100FULL; -- bmcr |= BMCR_SPEED100; -- lpa |= LPA_100FULL; -- break; -- case 10: -- bmsr |= BMSR_10FULL; -- lpa |= LPA_10FULL; -- break; -- default: -- pr_warn("fixed phy: unknown speed\n"); -- return -EINVAL; -- } -- } else { -- switch (fp->status.speed) { -- case 1000: -- bmsr |= BMSR_ESTATEN; -- bmcr |= BMCR_SPEED1000; -- lpagb |= LPA_1000HALF; -- break; -- case 100: -- bmsr |= BMSR_100HALF; -- bmcr |= BMCR_SPEED100; -- lpa |= LPA_100HALF; -- break; -- case 10: -- bmsr |= BMSR_10HALF; -- lpa |= LPA_10HALF; -- break; -- default: -- pr_warn("fixed phy: unknown speed\n"); -- return -EINVAL; -- } -- } -- -- if (fp->status.link) -- bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE; -- -- if (fp->status.pause) -- lpa |= LPA_PAUSE_CAP; -- -- if (fp->status.asym_pause) -- lpa |= LPA_PAUSE_ASYM; -- -- fp->regs[MII_PHYSID1] = 0; -- fp->regs[MII_PHYSID2] = 0; -- -- fp->regs[MII_BMSR] = bmsr; -- fp->regs[MII_BMCR] = bmcr; -- fp->regs[MII_LPA] = lpa; -- fp->regs[MII_STAT1000] = lpagb; -- -- return 0; --} -- --static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num) --{ -- struct fixed_mdio_bus *fmb = bus->priv; -- struct fixed_phy *fp; -- -- if (reg_num >= MII_REGS_NUM) -- return -1; -- -- /* We do not support emulating Clause 45 over Clause 22 register reads -- * return an error instead of bogus data. -- */ -- switch (reg_num) { -- case MII_MMD_CTRL: -- case MII_MMD_DATA: -- return -1; -- default: -- break; -- } -- -- list_for_each_entry(fp, &fmb->phys, node) { -- if (fp->addr == phy_addr) { -- /* Issue callback if user registered it. */ -- if (fp->link_update) { -- fp->link_update(fp->phydev->attached_dev, -- &fp->status); -- fixed_phy_update_regs(fp); -- } -- return fp->regs[reg_num]; -- } -- } -- -- return 0xFFFF; --} -- --static int fixed_mdio_write(struct mii_bus *bus, int phy_addr, int reg_num, -- u16 val) --{ -- return 0; --} -- --/* -- * If something weird is required to be done with link/speed, -- * network driver is able to assign a function to implement this. -- * May be useful for PHY's that need to be software-driven. -- */ --int fixed_phy_set_link_update(struct phy_device *phydev, -- int (*link_update)(struct net_device *, -- struct fixed_phy_status *)) --{ -- struct fixed_mdio_bus *fmb = &platform_fmb; -- struct fixed_phy *fp; -- -- if (!link_update || !phydev || !phydev->bus) -- return -EINVAL; -- -- list_for_each_entry(fp, &fmb->phys, node) { -- if (fp->addr == phydev->addr) { -- fp->link_update = link_update; -- fp->phydev = phydev; -- return 0; -- } -- } -- -- return -ENOENT; --} --EXPORT_SYMBOL_GPL(fixed_phy_set_link_update); -- --int fixed_phy_add(unsigned int irq, int phy_addr, -- struct fixed_phy_status *status) --{ -- int ret; -- struct fixed_mdio_bus *fmb = &platform_fmb; -- struct fixed_phy *fp; -- -- fp = kzalloc(sizeof(*fp), GFP_KERNEL); -- if (!fp) -- return -ENOMEM; -- -- memset(fp->regs, 0xFF, sizeof(fp->regs[0]) * MII_REGS_NUM); -- -- fmb->irqs[phy_addr] = irq; -- -- fp->addr = phy_addr; -- fp->status = *status; -- -- ret = fixed_phy_update_regs(fp); -- if (ret) -- goto err_regs; -- -- list_add_tail(&fp->node, &fmb->phys); -- -- return 0; -- --err_regs: -- kfree(fp); -- return ret; --} --EXPORT_SYMBOL_GPL(fixed_phy_add); -- --void fixed_phy_del(int phy_addr) --{ -- struct fixed_mdio_bus *fmb = &platform_fmb; -- struct fixed_phy *fp, *tmp; -- -- list_for_each_entry_safe(fp, tmp, &fmb->phys, node) { -- if (fp->addr == phy_addr) { -- list_del(&fp->node); -- kfree(fp); -- return; -- } -- } --} --EXPORT_SYMBOL_GPL(fixed_phy_del); -- --static int phy_fixed_addr; --static DEFINE_SPINLOCK(phy_fixed_addr_lock); -- --struct phy_device *fixed_phy_register(unsigned int irq, -- struct fixed_phy_status *status, -- struct device_node *np) --{ -- struct fixed_mdio_bus *fmb = &platform_fmb; -- struct phy_device *phy; -- int phy_addr; -- int ret; -- -- /* Get the next available PHY address, up to PHY_MAX_ADDR */ -- spin_lock(&phy_fixed_addr_lock); -- if (phy_fixed_addr == PHY_MAX_ADDR) { -- spin_unlock(&phy_fixed_addr_lock); -- return ERR_PTR(-ENOSPC); -- } -- phy_addr = phy_fixed_addr++; -- spin_unlock(&phy_fixed_addr_lock); -- -- ret = fixed_phy_add(PHY_POLL, phy_addr, status); -- if (ret < 0) -- return ERR_PTR(ret); -- -- phy = get_phy_device(fmb->mii_bus, phy_addr, false); -- if (!phy || IS_ERR(phy)) { -- fixed_phy_del(phy_addr); -- return ERR_PTR(-EINVAL); -- } -- -- of_node_get(np); -- phy->dev.of_node = np; -- -- ret = phy_device_register(phy); -- if (ret) { -- phy_device_free(phy); -- of_node_put(np); -- fixed_phy_del(phy_addr); -- return ERR_PTR(ret); -- } -- -- return phy; --} -- --static int __init fixed_mdio_bus_init(void) --{ -- struct fixed_mdio_bus *fmb = &platform_fmb; -- int ret; -- -- pdev = platform_device_register_simple("Fixed MDIO bus", 0, NULL, 0); -- if (IS_ERR(pdev)) { -- ret = PTR_ERR(pdev); -- goto err_pdev; -- } -- -- fmb->mii_bus = mdiobus_alloc(); -- if (fmb->mii_bus == NULL) { -- ret = -ENOMEM; -- goto err_mdiobus_reg; -- } -- -- snprintf(fmb->mii_bus->id, MII_BUS_ID_SIZE, "fixed-0"); -- fmb->mii_bus->name = "Fixed MDIO Bus"; -- fmb->mii_bus->priv = fmb; -- fmb->mii_bus->parent = &pdev->dev; -- fmb->mii_bus->read = &fixed_mdio_read; -- fmb->mii_bus->write = &fixed_mdio_write; -- fmb->mii_bus->irq = fmb->irqs; -- -- ret = mdiobus_register(fmb->mii_bus); -- if (ret) -- goto err_mdiobus_alloc; -- -- return 0; -- --err_mdiobus_alloc: -- mdiobus_free(fmb->mii_bus); --err_mdiobus_reg: -- platform_device_unregister(pdev); --err_pdev: -- return ret; --} --module_init(fixed_mdio_bus_init); -- --static void __exit fixed_mdio_bus_exit(void) --{ -- struct fixed_mdio_bus *fmb = &platform_fmb; -- struct fixed_phy *fp, *tmp; -- -- mdiobus_unregister(fmb->mii_bus); -- mdiobus_free(fmb->mii_bus); -- platform_device_unregister(pdev); -- -- list_for_each_entry_safe(fp, tmp, &fmb->phys, node) { -- list_del(&fp->node); -- kfree(fp); -- } --} --module_exit(fixed_mdio_bus_exit); -- --MODULE_DESCRIPTION("Fixed MDIO bus (MDIO bus emulation with fixed PHYs)"); --MODULE_AUTHOR("Vitaly Bordug"); --MODULE_LICENSE("GPL"); -diff --git a/drivers/net/phy/fixed_phy.c b/drivers/net/phy/fixed_phy.c -new file mode 100644 -index 0000000..88b8194 ---- /dev/null -+++ b/drivers/net/phy/fixed_phy.c -@@ -0,0 +1,370 @@ -+/* -+ * Fixed MDIO bus (MDIO bus emulation with fixed PHYs) -+ * -+ * Author: Vitaly Bordug -+ * Anton Vorontsov -+ * -+ * Copyright (c) 2006-2007 MontaVista Software, Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify it -+ * under the terms of the GNU General Public License as published by the -+ * Free Software Foundation; either version 2 of the License, or (at your -+ * option) any later version. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define MII_REGS_NUM 29 -+ -+struct fixed_mdio_bus { -+ int irqs[PHY_MAX_ADDR]; -+ struct mii_bus *mii_bus; -+ struct list_head phys; -+}; -+ -+struct fixed_phy { -+ int addr; -+ u16 regs[MII_REGS_NUM]; -+ struct phy_device *phydev; -+ struct fixed_phy_status status; -+ int (*link_update)(struct net_device *, struct fixed_phy_status *); -+ struct list_head node; -+}; -+ -+static struct platform_device *pdev; -+static struct fixed_mdio_bus platform_fmb = { -+ .phys = LIST_HEAD_INIT(platform_fmb.phys), -+}; -+ -+static int fixed_phy_update_regs(struct fixed_phy *fp) -+{ -+ u16 bmsr = BMSR_ANEGCAPABLE; -+ u16 bmcr = 0; -+ u16 lpagb = 0; -+ u16 lpa = 0; -+ -+ if (fp->status.duplex) { -+ bmcr |= BMCR_FULLDPLX; -+ -+ switch (fp->status.speed) { -+ case 10000: -+ break; -+ case 1000: -+ bmsr |= BMSR_ESTATEN; -+ bmcr |= BMCR_SPEED1000; -+ lpagb |= LPA_1000FULL; -+ break; -+ case 100: -+ bmsr |= BMSR_100FULL; -+ bmcr |= BMCR_SPEED100; -+ lpa |= LPA_100FULL; -+ break; -+ case 10: -+ bmsr |= BMSR_10FULL; -+ lpa |= LPA_10FULL; -+ break; -+ default: -+ pr_warn("fixed phy: unknown speed\n"); -+ return -EINVAL; -+ } -+ } else { -+ switch (fp->status.speed) { -+ case 10000: -+ break; -+ case 1000: -+ bmsr |= BMSR_ESTATEN; -+ bmcr |= BMCR_SPEED1000; -+ lpagb |= LPA_1000HALF; -+ break; -+ case 100: -+ bmsr |= BMSR_100HALF; -+ bmcr |= BMCR_SPEED100; -+ lpa |= LPA_100HALF; -+ break; -+ case 10: -+ bmsr |= BMSR_10HALF; -+ lpa |= LPA_10HALF; -+ break; -+ default: -+ pr_warn("fixed phy: unknown speed\n"); -+ return -EINVAL; -+ } -+ } -+ -+ if (fp->status.link) -+ bmsr |= BMSR_LSTATUS | BMSR_ANEGCOMPLETE; -+ -+ if (fp->status.pause) -+ lpa |= LPA_PAUSE_CAP; -+ -+ if (fp->status.asym_pause) -+ lpa |= LPA_PAUSE_ASYM; -+ -+ fp->regs[MII_PHYSID1] = 0; -+ fp->regs[MII_PHYSID2] = 0; -+ -+ fp->regs[MII_BMSR] = bmsr; -+ fp->regs[MII_BMCR] = bmcr; -+ fp->regs[MII_LPA] = lpa; -+ fp->regs[MII_STAT1000] = lpagb; -+ -+ return 0; -+} -+ -+static int fixed_mdio_read(struct mii_bus *bus, int phy_addr, int reg_num) -+{ -+ struct fixed_mdio_bus *fmb = bus->priv; -+ struct fixed_phy *fp; -+ -+ if (reg_num >= MII_REGS_NUM) -+ return -1; -+ -+ /* We do not support emulating Clause 45 over Clause 22 register reads -+ * return an error instead of bogus data. -+ */ -+ switch (reg_num) { -+ case MII_MMD_CTRL: -+ case MII_MMD_DATA: -+ return -1; -+ default: -+ break; -+ } -+ -+ list_for_each_entry(fp, &fmb->phys, node) { -+ if (fp->addr == phy_addr) { -+ /* Issue callback if user registered it. */ -+ if (fp->link_update) { -+ fp->link_update(fp->phydev->attached_dev, -+ &fp->status); -+ fixed_phy_update_regs(fp); -+ } -+ return fp->regs[reg_num]; -+ } -+ } -+ -+ return 0xFFFF; -+} -+ -+static int fixed_mdio_write(struct mii_bus *bus, int phy_addr, int reg_num, -+ u16 val) -+{ -+ return 0; -+} -+ -+/* -+ * If something weird is required to be done with link/speed, -+ * network driver is able to assign a function to implement this. -+ * May be useful for PHY's that need to be software-driven. -+ */ -+int fixed_phy_set_link_update(struct phy_device *phydev, -+ int (*link_update)(struct net_device *, -+ struct fixed_phy_status *)) -+{ -+ struct fixed_mdio_bus *fmb = &platform_fmb; -+ struct fixed_phy *fp; -+ -+ if (!phydev || !phydev->bus) -+ return -EINVAL; -+ -+ list_for_each_entry(fp, &fmb->phys, node) { -+ if (fp->addr == phydev->addr) { -+ fp->link_update = link_update; -+ fp->phydev = phydev; -+ return 0; -+ } -+ } -+ -+ return -ENOENT; -+} -+EXPORT_SYMBOL_GPL(fixed_phy_set_link_update); -+ -+int fixed_phy_update_state(struct phy_device *phydev, -+ const struct fixed_phy_status *status, -+ const struct fixed_phy_status *changed) -+{ -+ struct fixed_mdio_bus *fmb = &platform_fmb; -+ struct fixed_phy *fp; -+ -+ if (!phydev || !phydev->bus) -+ return -EINVAL; -+ -+ list_for_each_entry(fp, &fmb->phys, node) { -+ if (fp->addr == phydev->addr) { -+#define _UPD(x) if (changed->x) \ -+ fp->status.x = status->x -+ _UPD(link); -+ _UPD(speed); -+ _UPD(duplex); -+ _UPD(pause); -+ _UPD(asym_pause); -+#undef _UPD -+ fixed_phy_update_regs(fp); -+ return 0; -+ } -+ } -+ -+ return -ENOENT; -+} -+EXPORT_SYMBOL(fixed_phy_update_state); -+ -+int fixed_phy_add(unsigned int irq, int phy_addr, -+ struct fixed_phy_status *status) -+{ -+ int ret; -+ struct fixed_mdio_bus *fmb = &platform_fmb; -+ struct fixed_phy *fp; -+ -+ fp = kzalloc(sizeof(*fp), GFP_KERNEL); -+ if (!fp) -+ return -ENOMEM; -+ -+ memset(fp->regs, 0xFF, sizeof(fp->regs[0]) * MII_REGS_NUM); -+ -+ fmb->irqs[phy_addr] = irq; -+ -+ fp->addr = phy_addr; -+ fp->status = *status; -+ -+ ret = fixed_phy_update_regs(fp); -+ if (ret) -+ goto err_regs; -+ -+ list_add_tail(&fp->node, &fmb->phys); -+ -+ return 0; -+ -+err_regs: -+ kfree(fp); -+ return ret; -+} -+EXPORT_SYMBOL_GPL(fixed_phy_add); -+ -+void fixed_phy_del(int phy_addr) -+{ -+ struct fixed_mdio_bus *fmb = &platform_fmb; -+ struct fixed_phy *fp, *tmp; -+ -+ list_for_each_entry_safe(fp, tmp, &fmb->phys, node) { -+ if (fp->addr == phy_addr) { -+ list_del(&fp->node); -+ kfree(fp); -+ return; -+ } -+ } -+} -+EXPORT_SYMBOL_GPL(fixed_phy_del); -+ -+static int phy_fixed_addr; -+static DEFINE_SPINLOCK(phy_fixed_addr_lock); -+ -+struct phy_device *fixed_phy_register(unsigned int irq, -+ struct fixed_phy_status *status, -+ struct device_node *np) -+{ -+ struct fixed_mdio_bus *fmb = &platform_fmb; -+ struct phy_device *phy; -+ int phy_addr; -+ int ret; -+ -+ /* Get the next available PHY address, up to PHY_MAX_ADDR */ -+ spin_lock(&phy_fixed_addr_lock); -+ if (phy_fixed_addr == PHY_MAX_ADDR) { -+ spin_unlock(&phy_fixed_addr_lock); -+ return ERR_PTR(-ENOSPC); -+ } -+ phy_addr = phy_fixed_addr++; -+ spin_unlock(&phy_fixed_addr_lock); -+ -+ ret = fixed_phy_add(PHY_POLL, phy_addr, status); -+ if (ret < 0) -+ return ERR_PTR(ret); -+ -+ phy = get_phy_device(fmb->mii_bus, phy_addr, false); -+ if (!phy || IS_ERR(phy)) { -+ fixed_phy_del(phy_addr); -+ return ERR_PTR(-EINVAL); -+ } -+ -+ of_node_get(np); -+ phy->dev.of_node = np; -+ -+ ret = phy_device_register(phy); -+ if (ret) { -+ phy_device_free(phy); -+ of_node_put(np); -+ fixed_phy_del(phy_addr); -+ return ERR_PTR(ret); -+ } -+ -+ return phy; -+} -+EXPORT_SYMBOL_GPL(fixed_phy_register); -+ -+static int __init fixed_mdio_bus_init(void) -+{ -+ struct fixed_mdio_bus *fmb = &platform_fmb; -+ int ret; -+ -+ pdev = platform_device_register_simple("Fixed MDIO bus", 0, NULL, 0); -+ if (IS_ERR(pdev)) { -+ ret = PTR_ERR(pdev); -+ goto err_pdev; -+ } -+ -+ fmb->mii_bus = mdiobus_alloc(); -+ if (fmb->mii_bus == NULL) { -+ ret = -ENOMEM; -+ goto err_mdiobus_reg; -+ } -+ -+ snprintf(fmb->mii_bus->id, MII_BUS_ID_SIZE, "fixed-0"); -+ fmb->mii_bus->name = "Fixed MDIO Bus"; -+ fmb->mii_bus->priv = fmb; -+ fmb->mii_bus->parent = &pdev->dev; -+ fmb->mii_bus->read = &fixed_mdio_read; -+ fmb->mii_bus->write = &fixed_mdio_write; -+ fmb->mii_bus->irq = fmb->irqs; -+ -+ ret = mdiobus_register(fmb->mii_bus); -+ if (ret) -+ goto err_mdiobus_alloc; -+ -+ return 0; -+ -+err_mdiobus_alloc: -+ mdiobus_free(fmb->mii_bus); -+err_mdiobus_reg: -+ platform_device_unregister(pdev); -+err_pdev: -+ return ret; -+} -+module_init(fixed_mdio_bus_init); -+ -+static void __exit fixed_mdio_bus_exit(void) -+{ -+ struct fixed_mdio_bus *fmb = &platform_fmb; -+ struct fixed_phy *fp, *tmp; -+ -+ mdiobus_unregister(fmb->mii_bus); -+ mdiobus_free(fmb->mii_bus); -+ platform_device_unregister(pdev); -+ -+ list_for_each_entry_safe(fp, tmp, &fmb->phys, node) { -+ list_del(&fp->node); -+ kfree(fp); -+ } -+} -+module_exit(fixed_mdio_bus_exit); -+ -+MODULE_DESCRIPTION("Fixed MDIO bus (MDIO bus emulation with fixed PHYs)"); -+MODULE_AUTHOR("Vitaly Bordug"); -+MODULE_LICENSE("GPL"); -diff --git a/drivers/net/phy/marvell.c b/drivers/net/phy/marvell.c -index 225c033..969a198 100644 ---- a/drivers/net/phy/marvell.c -+++ b/drivers/net/phy/marvell.c -@@ -50,6 +50,7 @@ - #define MII_M1011_PHY_SCR 0x10 - #define MII_M1011_PHY_SCR_AUTO_CROSS 0x0060 - -+#define MII_M1145_PHY_EXT_ADDR_PAGE 0x16 - #define MII_M1145_PHY_EXT_SR 0x1b - #define MII_M1145_PHY_EXT_CR 0x14 - #define MII_M1145_RGMII_RX_DELAY 0x0080 -@@ -495,6 +496,16 @@ static int m88e1111_config_init(struct phy_device *phydev) - err = phy_write(phydev, MII_M1111_PHY_EXT_SR, temp); - if (err < 0) - return err; -+ -+ /* make sure copper is selected */ -+ err = phy_read(phydev, MII_M1145_PHY_EXT_ADDR_PAGE); -+ if (err < 0) -+ return err; -+ -+ err = phy_write(phydev, MII_M1145_PHY_EXT_ADDR_PAGE, -+ err & (~0xff)); -+ if (err < 0) -+ return err; - } - - if (phydev->interface == PHY_INTERFACE_MODE_RTBI) { -diff --git a/drivers/net/phy/mdio_bus.c b/drivers/net/phy/mdio_bus.c -index 50051f2..accd605 100644 ---- a/drivers/net/phy/mdio_bus.c -+++ b/drivers/net/phy/mdio_bus.c -@@ -288,8 +288,11 @@ int mdiobus_register(struct mii_bus *bus) - - error: - while (--i >= 0) { -- if (bus->phy_map[i]) -- device_unregister(&bus->phy_map[i]->dev); -+ struct phy_device *phydev = bus->phy_map[i]; -+ if (phydev) { -+ phy_device_remove(phydev); -+ phy_device_free(phydev); -+ } - } - device_del(&bus->dev); - return err; -@@ -305,9 +308,11 @@ void mdiobus_unregister(struct mii_bus *bus) - - device_del(&bus->dev); - for (i = 0; i < PHY_MAX_ADDR; i++) { -- if (bus->phy_map[i]) -- device_unregister(&bus->phy_map[i]->dev); -- bus->phy_map[i] = NULL; -+ struct phy_device *phydev = bus->phy_map[i]; -+ if (phydev) { -+ phy_device_remove(phydev); -+ phy_device_free(phydev); -+ } - } - } - EXPORT_SYMBOL(mdiobus_unregister); -@@ -421,6 +426,8 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv) - { - struct phy_device *phydev = to_phy_device(dev); - struct phy_driver *phydrv = to_phy_driver(drv); -+ const int num_ids = ARRAY_SIZE(phydev->c45_ids.device_ids); -+ int i; - - if (of_driver_match_device(dev, drv)) - return 1; -@@ -428,8 +435,21 @@ static int mdio_bus_match(struct device *dev, struct device_driver *drv) - if (phydrv->match_phy_device) - return phydrv->match_phy_device(phydev); - -- return (phydrv->phy_id & phydrv->phy_id_mask) == -- (phydev->phy_id & phydrv->phy_id_mask); -+ if (phydev->is_c45) { -+ for (i = 1; i < num_ids; i++) { -+ if (!(phydev->c45_ids.devices_in_package & (1 << i))) -+ continue; -+ -+ if ((phydrv->phy_id & phydrv->phy_id_mask) == -+ (phydev->c45_ids.device_ids[i] & -+ phydrv->phy_id_mask)) -+ return 1; -+ } -+ return 0; -+ } else { -+ return (phydrv->phy_id & phydrv->phy_id_mask) == -+ (phydev->phy_id & phydrv->phy_id_mask); -+ } - } - - #ifdef CONFIG_PM -diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c -index 91d6d03..840075e 100644 ---- a/drivers/net/phy/phy.c -+++ b/drivers/net/phy/phy.c -@@ -768,6 +768,7 @@ void phy_state_machine(struct work_struct *work) - container_of(dwork, struct phy_device, state_queue); - bool needs_aneg = false, do_suspend = false, do_resume = false; - int err = 0; -+ int old_link; - - mutex_lock(&phydev->lock); - -@@ -814,6 +815,9 @@ void phy_state_machine(struct work_struct *work) - needs_aneg = true; - break; - case PHY_NOLINK: -+ if (phy_interrupt_is_valid(phydev)) -+ break; -+ - err = phy_read_status(phydev); - if (err) - break; -@@ -851,11 +855,18 @@ void phy_state_machine(struct work_struct *work) - phydev->adjust_link(phydev->attached_dev); - break; - case PHY_RUNNING: -- /* Only register a CHANGE if we are -- * polling or ignoring interrupts -+ /* Only register a CHANGE if we are polling or ignoring -+ * interrupts and link changed since latest checking. - */ -- if (!phy_interrupt_is_valid(phydev)) -- phydev->state = PHY_CHANGELINK; -+ if (!phy_interrupt_is_valid(phydev)) { -+ old_link = phydev->link; -+ err = phy_read_status(phydev); -+ if (err) -+ break; -+ -+ if (old_link != phydev->link) -+ phydev->state = PHY_CHANGELINK; -+ } - break; - case PHY_CHANGELINK: - err = phy_read_status(phydev); -diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c -index 70a0d88..07b1aa9 100644 ---- a/drivers/net/phy/phy_device.c -+++ b/drivers/net/phy/phy_device.c -@@ -205,6 +205,37 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, - } - EXPORT_SYMBOL(phy_device_create); - -+/* get_phy_c45_devs_in_pkg - reads a MMD's devices in package registers. -+ * @bus: the target MII bus -+ * @addr: PHY address on the MII bus -+ * @dev_addr: MMD address in the PHY. -+ * @devices_in_package: where to store the devices in package information. -+ * -+ * Description: reads devices in package registers of a MMD at @dev_addr -+ * from PHY at @addr on @bus. -+ * -+ * Returns: 0 on success, -EIO on failure. -+ */ -+static int get_phy_c45_devs_in_pkg(struct mii_bus *bus, int addr, int dev_addr, -+ u32 *devices_in_package) -+{ -+ int phy_reg, reg_addr; -+ -+ reg_addr = MII_ADDR_C45 | dev_addr << 16 | MDIO_DEVS2; -+ phy_reg = mdiobus_read(bus, addr, reg_addr); -+ if (phy_reg < 0) -+ return -EIO; -+ *devices_in_package = (phy_reg & 0xffff) << 16; -+ -+ reg_addr = MII_ADDR_C45 | dev_addr << 16 | MDIO_DEVS1; -+ phy_reg = mdiobus_read(bus, addr, reg_addr); -+ if (phy_reg < 0) -+ return -EIO; -+ *devices_in_package |= (phy_reg & 0xffff); -+ -+ return 0; -+} -+ - /** - * get_phy_c45_ids - reads the specified addr for its 802.3-c45 IDs. - * @bus: the target MII bus -@@ -223,31 +254,32 @@ static int get_phy_c45_ids(struct mii_bus *bus, int addr, u32 *phy_id, - int phy_reg; - int i, reg_addr; - const int num_ids = ARRAY_SIZE(c45_ids->device_ids); -+ u32 *devs = &c45_ids->devices_in_package; - -- /* Find first non-zero Devices In package. Device -- * zero is reserved, so don't probe it. -+ /* Find first non-zero Devices In package. Device zero is reserved -+ * for 802.3 c45 complied PHYs, so don't probe it at first. - */ -- for (i = 1; -- i < num_ids && c45_ids->devices_in_package == 0; -- i++) { -- reg_addr = MII_ADDR_C45 | i << 16 | MDIO_DEVS2; -- phy_reg = mdiobus_read(bus, addr, reg_addr); -- if (phy_reg < 0) -- return -EIO; -- c45_ids->devices_in_package = (phy_reg & 0xffff) << 16; -- -- reg_addr = MII_ADDR_C45 | i << 16 | MDIO_DEVS1; -- phy_reg = mdiobus_read(bus, addr, reg_addr); -+ for (i = 1; i < num_ids && *devs == 0; i++) { -+ phy_reg = get_phy_c45_devs_in_pkg(bus, addr, i, devs); - if (phy_reg < 0) - return -EIO; -- c45_ids->devices_in_package |= (phy_reg & 0xffff); - -- /* If mostly Fs, there is no device there, -- * let's get out of here. -- */ -- if ((c45_ids->devices_in_package & 0x1fffffff) == 0x1fffffff) { -- *phy_id = 0xffffffff; -- return 0; -+ if ((*devs & 0x1fffffff) == 0x1fffffff) { -+ /* If mostly Fs, there is no device there, -+ * then let's continue to probe more, as some -+ * 10G PHYs have zero Devices In package, -+ * e.g. Cortina CS4315/CS4340 PHY. -+ */ -+ phy_reg = get_phy_c45_devs_in_pkg(bus, addr, 0, devs); -+ if (phy_reg < 0) -+ return -EIO; -+ /* no device there, let's get out of here */ -+ if ((*devs & 0x1fffffff) == 0x1fffffff) { -+ *phy_id = 0xffffffff; -+ return 0; -+ } else { -+ break; -+ } - } - } - -@@ -376,6 +408,24 @@ int phy_device_register(struct phy_device *phydev) - EXPORT_SYMBOL(phy_device_register); - - /** -+ * phy_device_remove - Remove a previously registered phy device from the MDIO bus -+ * @phydev: phy_device structure to remove -+ * -+ * This doesn't free the phy_device itself, it merely reverses the effects -+ * of phy_device_register(). Use phy_device_free() to free the device -+ * after calling this function. -+ */ -+void phy_device_remove(struct phy_device *phydev) -+{ -+ struct mii_bus *bus = phydev->bus; -+ int addr = phydev->addr; -+ -+ device_del(&phydev->dev); -+ bus->phy_map[addr] = NULL; -+} -+EXPORT_SYMBOL(phy_device_remove); -+ -+/** - * phy_find_first - finds the first PHY device on the bus - * @bus: the target MII bus - */ -diff --git a/drivers/net/phy/realtek.c b/drivers/net/phy/realtek.c -index 45483fd..badcf24 100644 ---- a/drivers/net/phy/realtek.c -+++ b/drivers/net/phy/realtek.c -@@ -22,8 +22,12 @@ - #define RTL821x_INER 0x12 - #define RTL821x_INER_INIT 0x6400 - #define RTL821x_INSR 0x13 -+#define RTL8211E_INER_LINK_STATUS 0x400 - --#define RTL8211E_INER_LINK_STATUS 0x400 -+#define RTL8211F_INER_LINK_STATUS 0x0010 -+#define RTL8211F_INSR 0x1d -+#define RTL8211F_PAGE_SELECT 0x1f -+#define RTL8211F_TX_DELAY 0x100 - - MODULE_DESCRIPTION("Realtek PHY driver"); - MODULE_AUTHOR("Johnson Leung"); -@@ -38,6 +42,18 @@ static int rtl821x_ack_interrupt(struct phy_device *phydev) - return (err < 0) ? err : 0; - } - -+static int rtl8211f_ack_interrupt(struct phy_device *phydev) -+{ -+ int err; -+ -+ phy_write(phydev, RTL8211F_PAGE_SELECT, 0xa43); -+ err = phy_read(phydev, RTL8211F_INSR); -+ /* restore to default page 0 */ -+ phy_write(phydev, RTL8211F_PAGE_SELECT, 0x0); -+ -+ return (err < 0) ? err : 0; -+} -+ - static int rtl8211b_config_intr(struct phy_device *phydev) - { - int err; -@@ -64,6 +80,41 @@ static int rtl8211e_config_intr(struct phy_device *phydev) - return err; - } - -+static int rtl8211f_config_intr(struct phy_device *phydev) -+{ -+ int err; -+ -+ if (phydev->interrupts == PHY_INTERRUPT_ENABLED) -+ err = phy_write(phydev, RTL821x_INER, -+ RTL8211F_INER_LINK_STATUS); -+ else -+ err = phy_write(phydev, RTL821x_INER, 0); -+ -+ return err; -+} -+ -+static int rtl8211f_config_init(struct phy_device *phydev) -+{ -+ int ret; -+ u16 reg; -+ -+ ret = genphy_config_init(phydev); -+ if (ret < 0) -+ return ret; -+ -+ if (phydev->interface == PHY_INTERFACE_MODE_RGMII) { -+ /* enable TXDLY */ -+ phy_write(phydev, RTL8211F_PAGE_SELECT, 0xd08); -+ reg = phy_read(phydev, 0x11); -+ reg |= RTL8211F_TX_DELAY; -+ phy_write(phydev, 0x11, reg); -+ /* restore to default page 0 */ -+ phy_write(phydev, RTL8211F_PAGE_SELECT, 0x0); -+ } -+ -+ return 0; -+} -+ - static struct phy_driver realtek_drvs[] = { - { - .phy_id = 0x00008201, -@@ -86,6 +137,19 @@ static struct phy_driver realtek_drvs[] = { - .config_intr = &rtl8211b_config_intr, - .driver = { .owner = THIS_MODULE,}, - }, { -+ .phy_id = 0x001cc914, -+ .name = "RTL8211DN Gigabit Ethernet", -+ .phy_id_mask = 0x001fffff, -+ .features = PHY_GBIT_FEATURES, -+ .flags = PHY_HAS_INTERRUPT, -+ .config_aneg = genphy_config_aneg, -+ .read_status = genphy_read_status, -+ .ack_interrupt = rtl821x_ack_interrupt, -+ .config_intr = rtl8211e_config_intr, -+ .suspend = genphy_suspend, -+ .resume = genphy_resume, -+ .driver = { .owner = THIS_MODULE,}, -+ }, { - .phy_id = 0x001cc915, - .name = "RTL8211E Gigabit Ethernet", - .phy_id_mask = 0x001fffff, -@@ -98,6 +162,20 @@ static struct phy_driver realtek_drvs[] = { - .suspend = genphy_suspend, - .resume = genphy_resume, - .driver = { .owner = THIS_MODULE,}, -+ }, { -+ .phy_id = 0x001cc916, -+ .name = "RTL8211F Gigabit Ethernet", -+ .phy_id_mask = 0x001fffff, -+ .features = PHY_GBIT_FEATURES, -+ .flags = PHY_HAS_INTERRUPT, -+ .config_aneg = &genphy_config_aneg, -+ .config_init = &rtl8211f_config_init, -+ .read_status = &genphy_read_status, -+ .ack_interrupt = &rtl8211f_ack_interrupt, -+ .config_intr = &rtl8211f_config_intr, -+ .suspend = genphy_suspend, -+ .resume = genphy_resume, -+ .driver = { .owner = THIS_MODULE }, - }, - }; - -@@ -116,7 +194,9 @@ module_exit(realtek_exit); - - static struct mdio_device_id __maybe_unused realtek_tbl[] = { - { 0x001cc912, 0x001fffff }, -+ { 0x001cc914, 0x001fffff }, - { 0x001cc915, 0x001fffff }, -+ { 0x001cc916, 0x001fffff }, - { } - }; - -diff --git a/drivers/of/base.c b/drivers/of/base.c -index 469d2b7..210c876 100644 ---- a/drivers/of/base.c -+++ b/drivers/of/base.c -@@ -32,8 +32,8 @@ - - LIST_HEAD(aliases_lookup); - --struct device_node *of_allnodes; --EXPORT_SYMBOL(of_allnodes); -+struct device_node *of_root; -+EXPORT_SYMBOL(of_root); - struct device_node *of_chosen; - struct device_node *of_aliases; - struct device_node *of_stdout; -@@ -48,7 +48,7 @@ struct kset *of_kset; - */ - DEFINE_MUTEX(of_mutex); - --/* use when traversing tree through the allnext, child, sibling, -+/* use when traversing tree through the child, sibling, - * or parent members of struct device_node. - */ - DEFINE_RAW_SPINLOCK(devtree_lock); -@@ -204,7 +204,7 @@ static int __init of_init(void) - mutex_unlock(&of_mutex); - - /* Symlink in /proc as required by userspace ABI */ -- if (of_allnodes) -+ if (of_root) - proc_symlink("device-tree", NULL, "/sys/firmware/devicetree/base"); - - return 0; -@@ -245,6 +245,23 @@ struct property *of_find_property(const struct device_node *np, - } - EXPORT_SYMBOL(of_find_property); - -+struct device_node *__of_find_all_nodes(struct device_node *prev) -+{ -+ struct device_node *np; -+ if (!prev) { -+ np = of_root; -+ } else if (prev->child) { -+ np = prev->child; -+ } else { -+ /* Walk back up looking for a sibling, or the end of the structure */ -+ np = prev; -+ while (np->parent && !np->sibling) -+ np = np->parent; -+ np = np->sibling; /* Might be null at the end of the tree */ -+ } -+ return np; -+} -+ - /** - * of_find_all_nodes - Get next node in global list - * @prev: Previous node or NULL to start iteration -@@ -259,10 +276,8 @@ struct device_node *of_find_all_nodes(struct device_node *prev) - unsigned long flags; - - raw_spin_lock_irqsave(&devtree_lock, flags); -- np = prev ? prev->allnext : of_allnodes; -- for (; np != NULL; np = np->allnext) -- if (of_node_get(np)) -- break; -+ np = __of_find_all_nodes(prev); -+ of_node_get(np); - of_node_put(prev); - raw_spin_unlock_irqrestore(&devtree_lock, flags); - return np; -@@ -736,7 +751,7 @@ struct device_node *of_find_node_by_path(const char *path) - unsigned long flags; - - if (strcmp(path, "/") == 0) -- return of_node_get(of_allnodes); -+ return of_node_get(of_root); - - /* The path could begin with an alias */ - if (*path != '/') { -@@ -761,7 +776,7 @@ struct device_node *of_find_node_by_path(const char *path) - /* Step down the tree matching path components */ - raw_spin_lock_irqsave(&devtree_lock, flags); - if (!np) -- np = of_node_get(of_allnodes); -+ np = of_node_get(of_root); - while (np && *path == '/') { - path++; /* Increment past '/' delimiter */ - np = __of_find_node_by_path(np, path); -@@ -790,8 +805,7 @@ struct device_node *of_find_node_by_name(struct device_node *from, - unsigned long flags; - - raw_spin_lock_irqsave(&devtree_lock, flags); -- np = from ? from->allnext : of_allnodes; -- for (; np; np = np->allnext) -+ for_each_of_allnodes_from(from, np) - if (np->name && (of_node_cmp(np->name, name) == 0) - && of_node_get(np)) - break; -@@ -820,8 +834,7 @@ struct device_node *of_find_node_by_type(struct device_node *from, - unsigned long flags; - - raw_spin_lock_irqsave(&devtree_lock, flags); -- np = from ? from->allnext : of_allnodes; -- for (; np; np = np->allnext) -+ for_each_of_allnodes_from(from, np) - if (np->type && (of_node_cmp(np->type, type) == 0) - && of_node_get(np)) - break; -@@ -852,12 +865,10 @@ struct device_node *of_find_compatible_node(struct device_node *from, - unsigned long flags; - - raw_spin_lock_irqsave(&devtree_lock, flags); -- np = from ? from->allnext : of_allnodes; -- for (; np; np = np->allnext) { -+ for_each_of_allnodes_from(from, np) - if (__of_device_is_compatible(np, compatible, type, NULL) && - of_node_get(np)) - break; -- } - of_node_put(from); - raw_spin_unlock_irqrestore(&devtree_lock, flags); - return np; -@@ -884,8 +895,7 @@ struct device_node *of_find_node_with_property(struct device_node *from, - unsigned long flags; - - raw_spin_lock_irqsave(&devtree_lock, flags); -- np = from ? from->allnext : of_allnodes; -- for (; np; np = np->allnext) { -+ for_each_of_allnodes_from(from, np) { - for (pp = np->properties; pp; pp = pp->next) { - if (of_prop_cmp(pp->name, prop_name) == 0) { - of_node_get(np); -@@ -967,8 +977,7 @@ struct device_node *of_find_matching_node_and_match(struct device_node *from, - *match = NULL; - - raw_spin_lock_irqsave(&devtree_lock, flags); -- np = from ? from->allnext : of_allnodes; -- for (; np; np = np->allnext) { -+ for_each_of_allnodes_from(from, np) { - m = __of_match_node(matches, np); - if (m && of_node_get(np)) { - if (match) -@@ -1025,7 +1034,7 @@ struct device_node *of_find_node_by_phandle(phandle handle) - return NULL; - - raw_spin_lock_irqsave(&devtree_lock, flags); -- for (np = of_allnodes; np; np = np->allnext) -+ for_each_of_allnodes(np) - if (np->phandle == handle) - break; - of_node_get(np); -diff --git a/drivers/of/dynamic.c b/drivers/of/dynamic.c -index d499417..d43f305 100644 ---- a/drivers/of/dynamic.c -+++ b/drivers/of/dynamic.c -@@ -117,8 +117,6 @@ void __of_attach_node(struct device_node *np) - - np->child = NULL; - np->sibling = np->parent->child; -- np->allnext = np->parent->allnext; -- np->parent->allnext = np; - np->parent->child = np; - of_node_clear_flag(np, OF_DETACHED); - } -@@ -154,17 +152,6 @@ void __of_detach_node(struct device_node *np) - if (WARN_ON(!parent)) - return; - -- if (of_allnodes == np) -- of_allnodes = np->allnext; -- else { -- struct device_node *prev; -- for (prev = of_allnodes; -- prev->allnext != np; -- prev = prev->allnext) -- ; -- prev->allnext = np->allnext; -- } -- - if (parent->child == np) - parent->child = np->sibling; - else { -diff --git a/drivers/of/fdt.c b/drivers/of/fdt.c -index d134710..f6eda02 100644 ---- a/drivers/of/fdt.c -+++ b/drivers/of/fdt.c -@@ -145,15 +145,15 @@ static void *unflatten_dt_alloc(void **mem, unsigned long size, - * @mem: Memory chunk to use for allocating device nodes and properties - * @p: pointer to node in flat tree - * @dad: Parent struct device_node -- * @allnextpp: pointer to ->allnext from last allocated device_node - * @fpsize: Size of the node path up at the current depth. - */ - static void * unflatten_dt_node(void *blob, - void *mem, - int *poffset, - struct device_node *dad, -- struct device_node ***allnextpp, -- unsigned long fpsize) -+ struct device_node **nodepp, -+ unsigned long fpsize, -+ bool dryrun) - { - const __be32 *p; - struct device_node *np; -@@ -200,7 +200,7 @@ static void * unflatten_dt_node(void *blob, - - np = unflatten_dt_alloc(&mem, sizeof(struct device_node) + allocl, - __alignof__(struct device_node)); -- if (allnextpp) { -+ if (!dryrun) { - char *fn; - of_node_init(np); - np->full_name = fn = ((char *)np) + sizeof(*np); -@@ -222,8 +222,6 @@ static void * unflatten_dt_node(void *blob, - memcpy(fn, pathp, l); - - prev_pp = &np->properties; -- **allnextpp = np; -- *allnextpp = &np->allnext; - if (dad != NULL) { - np->parent = dad; - /* we temporarily use the next field as `last_child'*/ -@@ -254,7 +252,7 @@ static void * unflatten_dt_node(void *blob, - has_name = 1; - pp = unflatten_dt_alloc(&mem, sizeof(struct property), - __alignof__(struct property)); -- if (allnextpp) { -+ if (!dryrun) { - /* We accept flattened tree phandles either in - * ePAPR-style "phandle" properties, or the - * legacy "linux,phandle" properties. If both -@@ -296,7 +294,7 @@ static void * unflatten_dt_node(void *blob, - sz = (pa - ps) + 1; - pp = unflatten_dt_alloc(&mem, sizeof(struct property) + sz, - __alignof__(struct property)); -- if (allnextpp) { -+ if (!dryrun) { - pp->name = "name"; - pp->length = sz; - pp->value = pp + 1; -@@ -308,7 +306,7 @@ static void * unflatten_dt_node(void *blob, - (char *)pp->value); - } - } -- if (allnextpp) { -+ if (!dryrun) { - *prev_pp = NULL; - np->name = of_get_property(np, "name", NULL); - np->type = of_get_property(np, "device_type", NULL); -@@ -324,11 +322,13 @@ static void * unflatten_dt_node(void *blob, - if (depth < 0) - depth = 0; - while (*poffset > 0 && depth > old_depth) -- mem = unflatten_dt_node(blob, mem, poffset, np, allnextpp, -- fpsize); -+ mem = unflatten_dt_node(blob, mem, poffset, np, NULL, -+ fpsize, dryrun); - - if (*poffset < 0 && *poffset != -FDT_ERR_NOTFOUND) - pr_err("unflatten: error %d processing FDT\n", *poffset); -+ if (nodepp) -+ *nodepp = np; - - return mem; - } -@@ -352,7 +352,6 @@ static void __unflatten_device_tree(void *blob, - unsigned long size; - int start; - void *mem; -- struct device_node **allnextp = mynodes; - - pr_debug(" -> unflatten_device_tree()\n"); - -@@ -373,7 +372,7 @@ static void __unflatten_device_tree(void *blob, - - /* First pass, scan for size */ - start = 0; -- size = (unsigned long)unflatten_dt_node(blob, NULL, &start, NULL, NULL, 0); -+ size = (unsigned long)unflatten_dt_node(blob, NULL, &start, NULL, NULL, 0, true); - size = ALIGN(size, 4); - - pr_debug(" size is %lx, allocating...\n", size); -@@ -388,11 +387,10 @@ static void __unflatten_device_tree(void *blob, - - /* Second pass, do actual unflattening */ - start = 0; -- unflatten_dt_node(blob, mem, &start, NULL, &allnextp, 0); -+ unflatten_dt_node(blob, mem, &start, NULL, mynodes, 0, false); - if (be32_to_cpup(mem + size) != 0xdeadbeef) - pr_warning("End of tree marker overwritten: %08x\n", - be32_to_cpup(mem + size)); -- *allnextp = NULL; - - pr_debug(" <- unflatten_device_tree()\n"); - } -@@ -1039,7 +1037,7 @@ bool __init early_init_dt_scan(void *params) - */ - void __init unflatten_device_tree(void) - { -- __unflatten_device_tree(initial_boot_params, &of_allnodes, -+ __unflatten_device_tree(initial_boot_params, &of_root, - early_init_dt_alloc_memory_arch); - - /* Get pointer to "/chosen" and "/aliases" nodes for use everywhere */ -diff --git a/drivers/of/pdt.c b/drivers/of/pdt.c -index 36b4035..d2acae8 100644 ---- a/drivers/of/pdt.c -+++ b/drivers/of/pdt.c -@@ -25,8 +25,7 @@ - - static struct of_pdt_ops *of_pdt_prom_ops __initdata; - --void __initdata (*of_pdt_build_more)(struct device_node *dp, -- struct device_node ***nextp); -+void __initdata (*of_pdt_build_more)(struct device_node *dp); - - #if defined(CONFIG_SPARC) - unsigned int of_pdt_unique_id __initdata; -@@ -192,8 +191,7 @@ static struct device_node * __init of_pdt_create_node(phandle node, - } - - static struct device_node * __init of_pdt_build_tree(struct device_node *parent, -- phandle node, -- struct device_node ***nextp) -+ phandle node) - { - struct device_node *ret = NULL, *prev_sibling = NULL; - struct device_node *dp; -@@ -210,16 +208,12 @@ static struct device_node * __init of_pdt_build_tree(struct device_node *parent, - ret = dp; - prev_sibling = dp; - -- *(*nextp) = dp; -- *nextp = &dp->allnext; -- - dp->full_name = of_pdt_build_full_name(dp); - -- dp->child = of_pdt_build_tree(dp, -- of_pdt_prom_ops->getchild(node), nextp); -+ dp->child = of_pdt_build_tree(dp, of_pdt_prom_ops->getchild(node)); - - if (of_pdt_build_more) -- of_pdt_build_more(dp, nextp); -+ of_pdt_build_more(dp); - - node = of_pdt_prom_ops->getsibling(node); - } -@@ -234,20 +228,17 @@ static void * __init kernel_tree_alloc(u64 size, u64 align) - - void __init of_pdt_build_devicetree(phandle root_node, struct of_pdt_ops *ops) - { -- struct device_node **nextp; -- - BUG_ON(!ops); - of_pdt_prom_ops = ops; - -- of_allnodes = of_pdt_create_node(root_node, NULL); -+ of_root = of_pdt_create_node(root_node, NULL); - #if defined(CONFIG_SPARC) -- of_allnodes->path_component_name = ""; -+ of_root->path_component_name = ""; - #endif -- of_allnodes->full_name = "/"; -+ of_root->full_name = "/"; - -- nextp = &of_allnodes->allnext; -- of_allnodes->child = of_pdt_build_tree(of_allnodes, -- of_pdt_prom_ops->getchild(of_allnodes->phandle), &nextp); -+ of_root->child = of_pdt_build_tree(of_root, -+ of_pdt_prom_ops->getchild(of_root->phandle)); - - /* Get pointer to "/chosen" and "/aliases" nodes for use everywhere */ - of_alias_scan(kernel_tree_alloc); -diff --git a/drivers/of/selftest.c b/drivers/of/selftest.c -index e2d79af..e40089e 100644 ---- a/drivers/of/selftest.c -+++ b/drivers/of/selftest.c -@@ -148,7 +148,7 @@ static void __init of_selftest_dynamic(void) - - static int __init of_selftest_check_node_linkage(struct device_node *np) - { -- struct device_node *child, *allnext_index = np; -+ struct device_node *child; - int count = 0, rc; - - for_each_child_of_node(np, child) { -@@ -158,14 +158,6 @@ static int __init of_selftest_check_node_linkage(struct device_node *np) - return -EINVAL; - } - -- while (allnext_index && allnext_index != child) -- allnext_index = allnext_index->allnext; -- if (allnext_index != child) { -- pr_err("Node %s is ordered differently in sibling and allnode lists\n", -- child->name); -- return -EINVAL; -- } -- - rc = of_selftest_check_node_linkage(child); - if (rc < 0) - return rc; -@@ -180,12 +172,12 @@ static void __init of_selftest_check_tree_linkage(void) - struct device_node *np; - int allnode_count = 0, child_count; - -- if (!of_allnodes) -+ if (!of_root) - return; - - for_each_of_allnodes(np) - allnode_count++; -- child_count = of_selftest_check_node_linkage(of_allnodes); -+ child_count = of_selftest_check_node_linkage(of_root); - - selftest(child_count > 0, "Device node data structure is corrupted\n"); - selftest(child_count == allnode_count, "allnodes list size (%i) doesn't match" -@@ -775,33 +767,29 @@ static void update_node_properties(struct device_node *np, - */ - static int attach_node_and_children(struct device_node *np) - { -- struct device_node *next, *root = np, *dup; -+ struct device_node *next, *dup, *child; - -- /* skip root node */ -- np = np->child; -- /* storing a copy in temporary node */ -- dup = np; -+ dup = of_find_node_by_path(np->full_name); -+ if (dup) { -+ update_node_properties(np, dup); -+ return 0; -+ } - -- while (dup) { -+ /* Children of the root need to be remembered for removal */ -+ if (np->parent == of_root) { - if (WARN_ON(last_node_index >= NO_OF_NODES)) - return -EINVAL; -- nodes[last_node_index++] = dup; -- dup = dup->sibling; -+ nodes[last_node_index++] = np; - } -- dup = NULL; - -- while (np) { -- next = np->allnext; -- dup = of_find_node_by_path(np->full_name); -- if (dup) -- update_node_properties(np, dup); -- else { -- np->child = NULL; -- if (np->parent == root) -- np->parent = of_allnodes; -- of_attach_node(np); -- } -- np = next; -+ child = np->child; -+ np->child = NULL; -+ np->sibling = NULL; -+ of_attach_node(np); -+ while (child) { -+ next = child->sibling; -+ attach_node_and_children(child); -+ child = next; - } - - return 0; -@@ -846,10 +834,10 @@ static int __init selftest_data_add(void) - return -EINVAL; - } - -- if (!of_allnodes) { -+ if (!of_root) { - /* enabling flag for removing nodes */ - selftest_live_tree = true; -- of_allnodes = selftest_data_node; -+ of_root = selftest_data_node; - - for_each_of_allnodes(np) - __of_attach_node_sysfs(np); -@@ -859,7 +847,14 @@ static int __init selftest_data_add(void) - } - - /* attach the sub-tree to live tree */ -- return attach_node_and_children(selftest_data_node); -+ np = selftest_data_node->child; -+ while (np) { -+ struct device_node *next = np->sibling; -+ np->parent = of_root; -+ attach_node_and_children(np); -+ np = next; -+ } -+ return 0; - } - - /** -@@ -889,10 +884,10 @@ static void selftest_data_remove(void) - of_node_put(of_chosen); - of_aliases = NULL; - of_chosen = NULL; -- for_each_child_of_node(of_allnodes, np) -+ for_each_child_of_node(of_root, np) - detach_node_and_children(np); -- __of_detach_node_sysfs(of_allnodes); -- of_allnodes = NULL; -+ __of_detach_node_sysfs(of_root); -+ of_root = NULL; - return; - } - -diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile -index e04fe2d..e9815ac 100644 ---- a/drivers/pci/Makefile -+++ b/drivers/pci/Makefile -@@ -35,6 +35,7 @@ obj-$(CONFIG_PCI_IOV) += iov.o - # - obj-$(CONFIG_ALPHA) += setup-irq.o - obj-$(CONFIG_ARM) += setup-irq.o -+obj-$(CONFIG_ARM64) += setup-irq.o - obj-$(CONFIG_UNICORE32) += setup-irq.o - obj-$(CONFIG_SUPERH) += setup-irq.o - obj-$(CONFIG_MIPS) += setup-irq.o -diff --git a/drivers/pci/access.c b/drivers/pci/access.c -index 7f249b9..b965c12 100644 ---- a/drivers/pci/access.c -+++ b/drivers/pci/access.c -@@ -67,6 +67,93 @@ EXPORT_SYMBOL(pci_bus_write_config_byte); - EXPORT_SYMBOL(pci_bus_write_config_word); - EXPORT_SYMBOL(pci_bus_write_config_dword); - -+int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn, -+ int where, int size, u32 *val) -+{ -+ void __iomem *addr; -+ -+ addr = bus->ops->map_bus(bus, devfn, where); -+ if (!addr) { -+ *val = ~0; -+ return PCIBIOS_DEVICE_NOT_FOUND; -+ } -+ -+ if (size == 1) -+ *val = readb(addr); -+ else if (size == 2) -+ *val = readw(addr); -+ else -+ *val = readl(addr); -+ -+ return PCIBIOS_SUCCESSFUL; -+} -+EXPORT_SYMBOL_GPL(pci_generic_config_read); -+ -+int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn, -+ int where, int size, u32 val) -+{ -+ void __iomem *addr; -+ -+ addr = bus->ops->map_bus(bus, devfn, where); -+ if (!addr) -+ return PCIBIOS_DEVICE_NOT_FOUND; -+ -+ if (size == 1) -+ writeb(val, addr); -+ else if (size == 2) -+ writew(val, addr); -+ else -+ writel(val, addr); -+ -+ return PCIBIOS_SUCCESSFUL; -+} -+EXPORT_SYMBOL_GPL(pci_generic_config_write); -+ -+int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn, -+ int where, int size, u32 *val) -+{ -+ void __iomem *addr; -+ -+ addr = bus->ops->map_bus(bus, devfn, where & ~0x3); -+ if (!addr) { -+ *val = ~0; -+ return PCIBIOS_DEVICE_NOT_FOUND; -+ } -+ -+ *val = readl(addr); -+ -+ if (size <= 2) -+ *val = (*val >> (8 * (where & 3))) & ((1 << (size * 8)) - 1); -+ -+ return PCIBIOS_SUCCESSFUL; -+} -+EXPORT_SYMBOL_GPL(pci_generic_config_read32); -+ -+int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn, -+ int where, int size, u32 val) -+{ -+ void __iomem *addr; -+ u32 mask, tmp; -+ -+ addr = bus->ops->map_bus(bus, devfn, where & ~0x3); -+ if (!addr) -+ return PCIBIOS_DEVICE_NOT_FOUND; -+ -+ if (size == 4) { -+ writel(val, addr); -+ return PCIBIOS_SUCCESSFUL; -+ } else { -+ mask = ~(((1 << (size * 8)) - 1) << ((where & 0x3) * 8)); -+ } -+ -+ tmp = readl(addr) & mask; -+ tmp |= val << ((where & 0x3) * 8); -+ writel(tmp, addr); -+ -+ return PCIBIOS_SUCCESSFUL; -+} -+EXPORT_SYMBOL_GPL(pci_generic_config_write32); -+ - /** - * pci_bus_set_ops - Set raw operations of pci bus - * @bus: pci bus struct -diff --git a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig -index 96586b1..dafa3dc 100644 ---- a/drivers/pci/host/Kconfig -+++ b/drivers/pci/host/Kconfig -@@ -50,7 +50,7 @@ config PCI_RCAR_GEN2_PCIE - - config PCI_HOST_GENERIC - bool "Generic PCI host controller" -- depends on ARM && OF -+ depends on (ARM || ARM64) && OF - help - Say Y here if you want to support a simple generic PCI host - controller, such as the one emulated by kvmtool. -diff --git a/drivers/pci/host/pci-layerscape.c b/drivers/pci/host/pci-layerscape.c -index d491b0f..baa1232 100644 ---- a/drivers/pci/host/pci-layerscape.c -+++ b/drivers/pci/host/pci-layerscape.c -@@ -36,12 +36,21 @@ - #define LTSSM_PCIE_L0 0x11 /* L0 state */ - #define LTSSM_PCIE_L2_IDLE 0x15 /* L2 idle state */ - -+#define PCIE_SRIOV_OFFSET 0x178 -+ -+/* CS2 */ -+#define PCIE_CS2_OFFSET 0x1000 /* For PCIe without SR-IOV */ -+#define PCIE_ENABLE_CS2 0x80000000 /* For PCIe with SR-IOV */ -+ - /* PEX Internal Configuration Registers */ - #define PCIE_STRFMR1 0x71c /* Symbol Timer & Filter Mask Register1 */ - #define PCIE_DBI_RO_WR_EN 0x8bc /* DBI Read-Only Write Enable Register */ -+#define PCIE_ABSERR 0x8d0 /* Bridge Slave Error Response Register */ -+#define PCIE_ABSERR_SETTING 0x9401 /* Forward error of non-posted request */ - - /* PEX LUT registers */ - #define PCIE_LUT_DBG 0x7FC /* PEX LUT Debug Register */ -+#define PCIE_LUT_CTRL0 0x7f8 - #define PCIE_LUT_UDR(n) (0x800 + (n) * 8) - #define PCIE_LUT_LDR(n) (0x804 + (n) * 8) - #define PCIE_LUT_MASK_ALL 0xffff -@@ -72,6 +81,8 @@ - #define CPLD_RST_PCIE_SLOT 0x14 - #define CPLD_RST_PCIESLOT 0x3 - -+#define PCIE_IATU_NUM 6 -+ - struct ls_pcie; - - struct ls_pcie_pm_data { -@@ -111,6 +122,8 @@ struct ls_pcie { - - #define to_ls_pcie(x) container_of(x, struct ls_pcie, pp) - -+static void ls_pcie_host_init(struct pcie_port *pp); -+ - u32 set_pcie_streamid_translation(struct pci_dev *pdev, u32 devid) - { - u32 index, streamid; -@@ -163,6 +176,42 @@ static void ls_pcie_drop_msg_tlp(struct ls_pcie *pcie) - iowrite32(val, pcie->dbi + PCIE_STRFMR1); - } - -+/* Disable all bars in RC mode */ -+static void ls_pcie_disable_bars(struct ls_pcie *pcie) -+{ -+ u32 header; -+ -+ header = ioread32(pcie->dbi + PCIE_SRIOV_OFFSET); -+ if (PCI_EXT_CAP_ID(header) == PCI_EXT_CAP_ID_SRIOV) { -+ iowrite32(PCIE_ENABLE_CS2, pcie->lut + PCIE_LUT_CTRL0); -+ iowrite32(0, pcie->dbi + PCI_BASE_ADDRESS_0); -+ iowrite32(0, pcie->dbi + PCI_BASE_ADDRESS_1); -+ iowrite32(0, pcie->dbi + PCI_ROM_ADDRESS1); -+ iowrite32(0, pcie->lut + PCIE_LUT_CTRL0); -+ } else { -+ iowrite32(0, -+ pcie->dbi + PCIE_CS2_OFFSET + PCI_BASE_ADDRESS_0); -+ iowrite32(0, -+ pcie->dbi + PCIE_CS2_OFFSET + PCI_BASE_ADDRESS_1); -+ iowrite32(0, -+ pcie->dbi + PCIE_CS2_OFFSET + PCI_ROM_ADDRESS1); -+ } -+} -+ -+static void ls_pcie_disable_outbound_atus(struct ls_pcie *pcie) -+{ -+ int i; -+ -+ for (i = 0; i < PCIE_IATU_NUM; i++) -+ dw_pcie_disable_outbound_atu(&pcie->pp, i); -+} -+ -+/* Forward error response of outbound non-posted requests */ -+static void ls_pcie_fix_error_response(struct ls_pcie *pcie) -+{ -+ iowrite32(PCIE_ABSERR_SETTING, pcie->dbi + PCIE_ABSERR); -+} -+ - static int ls1021_pcie_link_up(struct pcie_port *pp) - { - u32 state; -@@ -272,19 +321,24 @@ static void ls1021_pcie_host_init(struct pcie_port *pp) - } - pcie->index = index[1]; - -- dw_pcie_setup_rc(pp); -+ ls_pcie_host_init(pp); - -- ls_pcie_drop_msg_tlp(pcie); -+ dw_pcie_setup_rc(pp); - } - - static int ls_pcie_link_up(struct pcie_port *pp) - { - struct ls_pcie *pcie = to_ls_pcie(pp); -- u32 state; -+ u32 state, offset; - -- state = (ioread32(pcie->lut + PCIE_LUT_DBG) >> -- pcie->drvdata->ltssm_shift) & -- LTSSM_STATE_MASK; -+ if (of_get_property(pp->dev->of_node, "fsl,lut_diff", NULL)) -+ offset = 0x407fc; -+ else -+ offset = PCIE_LUT_DBG; -+ -+ state = (ioread32(pcie->lut + offset) >> -+ pcie->drvdata->ltssm_shift) & -+ LTSSM_STATE_MASK; - - if (state < LTSSM_PCIE_L0) - return 0; -@@ -308,6 +362,10 @@ static void ls_pcie_host_init(struct pcie_port *pp) - ls_pcie_clear_multifunction(pcie); - ls_pcie_drop_msg_tlp(pcie); - iowrite32(0, pcie->dbi + PCIE_DBI_RO_WR_EN); -+ -+ ls_pcie_disable_bars(pcie); -+ ls_pcie_disable_outbound_atus(pcie); -+ ls_pcie_fix_error_response(pcie); - } - - static int ls_pcie_msi_host_init(struct pcie_port *pp, -@@ -426,6 +484,11 @@ static int ls_pcie_host_pme_init(struct ls_pcie *pcie, - - pp = &pcie->pp; - -+ if (dw_pcie_link_up(&pcie->pp)) -+ pcie->in_slot = true; -+ else -+ pcie->in_slot = false; -+ - pcie->pme_irq = platform_get_irq_byname(pdev, "pme"); - if (pcie->pme_irq < 0) { - dev_err(&pdev->dev, -@@ -462,11 +525,6 @@ static int ls_pcie_host_pme_init(struct ls_pcie *pcie, - val |= PCIE_PEX_RCR_PMEIE; - iowrite16(val, pcie->dbi + PCIE_PEX_RCR); - -- if (dw_pcie_link_up(&pcie->pp)) -- pcie->in_slot = true; -- else -- pcie->in_slot = false; -- - return 0; - } - -@@ -590,12 +648,14 @@ static int ls_pcie_pm_do_resume(struct ls_pcie *pcie) - u32 state; - int i = 0; - u16 val; -- -- ls_pcie_host_init(&pcie->pp); -+ struct pcie_port *pp = &pcie->pp; - - if (!pcie->in_slot) - return 0; - -+ dw_pcie_setup_rc(pp); -+ ls_pcie_host_init(pp); -+ - /* Put RC in D0 */ - val = ioread16(pcie->dbi + PCIE_PM_SCR); - val &= PCIE_PM_SCR_PMEPS_D0; -diff --git a/drivers/pci/host/pcie-designware.c b/drivers/pci/host/pcie-designware.c -index 8a9241b..0961ffc 100644 ---- a/drivers/pci/host/pcie-designware.c -+++ b/drivers/pci/host/pcie-designware.c -@@ -159,6 +159,13 @@ static void dw_pcie_prog_outbound_atu(struct pcie_port *pp, int index, - dw_pcie_writel_rc(pp, PCIE_ATU_ENABLE, PCIE_ATU_CR2); - } - -+void dw_pcie_disable_outbound_atu(struct pcie_port *pp, int index) -+{ -+ dw_pcie_writel_rc(pp, PCIE_ATU_REGION_OUTBOUND | index, -+ PCIE_ATU_VIEWPORT); -+ dw_pcie_writel_rc(pp, 0, PCIE_ATU_CR2); -+} -+ - int dw_pcie_link_up(struct pcie_port *pp) - { - if (pp->ops->link_up) -@@ -495,6 +502,13 @@ void dw_pcie_setup_rc(struct pcie_port *pp) - u32 membase; - u32 memlimit; - -+ dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX0, -+ PCIE_ATU_TYPE_IO, pp->io_base, -+ pp->io_bus_addr, pp->io_size); -+ dw_pcie_prog_outbound_atu(pp, PCIE_ATU_REGION_INDEX1, -+ PCIE_ATU_TYPE_MEM, pp->mem_base, -+ pp->mem_bus_addr, pp->mem_size); -+ - /* set the number of lanes */ - dw_pcie_readl_rc(pp, PCIE_PORT_LINK_CONTROL, &val); - val &= ~PORT_LINK_MODE_MASK; -diff --git a/drivers/pci/host/pcie-designware.h b/drivers/pci/host/pcie-designware.h -index 2f01284..fcd6431 100644 ---- a/drivers/pci/host/pcie-designware.h -+++ b/drivers/pci/host/pcie-designware.h -@@ -80,5 +80,6 @@ void dw_pcie_msi_init(struct pcie_port *pp); - int dw_pcie_link_up(struct pcie_port *pp); - void dw_pcie_setup_rc(struct pcie_port *pp); - int dw_pcie_host_init(struct pcie_port *pp); -+void dw_pcie_disable_outbound_atu(struct pcie_port *pp, int index); - - #endif /* _PCIE_DESIGNWARE_H */ -diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c -index 5dd4c96..5e64d37 100644 ---- a/drivers/pci/msi.c -+++ b/drivers/pci/msi.c -@@ -667,11 +667,16 @@ static void __iomem *msix_map_region(struct pci_dev *dev, unsigned nr_entries) - { - resource_size_t phys_addr; - u32 table_offset; -+ unsigned long flags; - u8 bir; - - pci_read_config_dword(dev, dev->msix_cap + PCI_MSIX_TABLE, - &table_offset); - bir = (u8)(table_offset & PCI_MSIX_TABLE_BIR); -+ flags = pci_resource_flags(dev, bir); -+ if (!flags || (flags & IORESOURCE_UNSET)) -+ return NULL; -+ - table_offset &= PCI_MSIX_TABLE_OFFSET; - phys_addr = pci_resource_start(dev, bir) + table_offset; - -diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c -index ce0aa47..a6783a5 100644 ---- a/drivers/pci/pci.c -+++ b/drivers/pci/pci.c -@@ -2467,6 +2467,7 @@ u8 pci_common_swizzle(struct pci_dev *dev, u8 *pinp) - *pinp = pin; - return PCI_SLOT(dev->devfn); - } -+EXPORT_SYMBOL_GPL(pci_common_swizzle); - - /** - * pci_release_region - Release a PCI bar -diff --git a/drivers/pci/pcie/portdrv_core.c b/drivers/pci/pcie/portdrv_core.c -index 2f0ce66..95ef171 100644 ---- a/drivers/pci/pcie/portdrv_core.c -+++ b/drivers/pci/pcie/portdrv_core.c -@@ -15,6 +15,7 @@ - #include - #include - #include -+#include - - #include "../pci.h" - #include "portdrv.h" -@@ -199,6 +200,28 @@ static int pcie_port_enable_msix(struct pci_dev *dev, int *vectors, int mask) - static int init_service_irqs(struct pci_dev *dev, int *irqs, int mask) - { - int i, irq = -1; -+ int ret; -+ struct device_node *np = NULL; -+ -+ for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) -+ irqs[i] = 0; -+ -+ if (dev->bus->dev.of_node) -+ np = dev->bus->dev.of_node; -+ -+ /* If root port doesn't support MSI/MSI-X/INTx in RC mode, -+ * request irq for aer -+ */ -+ if (IS_ENABLED(CONFIG_OF_IRQ) && np && -+ (mask & PCIE_PORT_SERVICE_PME)) { -+ ret = of_irq_get_byname(np, "aer"); -+ if (ret > 0) { -+ irqs[PCIE_PORT_SERVICE_AER_SHIFT] = ret; -+ if (dev->irq) -+ irq = dev->irq; -+ goto no_msi; -+ } -+ } - - /* - * If MSI cannot be used for PCIe PME or hotplug, we have to use -@@ -224,11 +247,13 @@ static int init_service_irqs(struct pci_dev *dev, int *irqs, int mask) - irq = dev->irq; - - no_msi: -- for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) -- irqs[i] = irq; -+ for (i = 0; i < PCIE_PORT_DEVICE_MAXSERVICES; i++) { -+ if (!irqs[i]) -+ irqs[i] = irq; -+ } - irqs[PCIE_PORT_SERVICE_VC_SHIFT] = -1; - -- if (irq < 0) -+ if (irq < 0 && irqs[PCIE_PORT_SERVICE_AER_SHIFT] < 0) - return -ENODEV; - return 0; - } -diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c -index 6bdeb75..0b16384 100644 ---- a/drivers/pci/probe.c -+++ b/drivers/pci/probe.c -@@ -2024,6 +2024,7 @@ err_out: - kfree(b); - return NULL; - } -+EXPORT_SYMBOL_GPL(pci_create_root_bus); - - int pci_bus_insert_busn_res(struct pci_bus *b, int bus, int bus_max) - { -diff --git a/drivers/pci/remove.c b/drivers/pci/remove.c -index 8bd76c9..8a280e9 100644 ---- a/drivers/pci/remove.c -+++ b/drivers/pci/remove.c -@@ -139,6 +139,7 @@ void pci_stop_root_bus(struct pci_bus *bus) - /* stop the host bridge */ - device_release_driver(&host_bridge->dev); - } -+EXPORT_SYMBOL_GPL(pci_stop_root_bus); - - void pci_remove_root_bus(struct pci_bus *bus) - { -@@ -158,3 +159,4 @@ void pci_remove_root_bus(struct pci_bus *bus) - /* remove the host bridge */ - device_unregister(&host_bridge->dev); - } -+EXPORT_SYMBOL_GPL(pci_remove_root_bus); -diff --git a/drivers/pci/setup-bus.c b/drivers/pci/setup-bus.c -index e3e17f3..8169597 100644 ---- a/drivers/pci/setup-bus.c -+++ b/drivers/pci/setup-bus.c -@@ -1750,3 +1750,4 @@ void pci_assign_unassigned_bus_resources(struct pci_bus *bus) - __pci_bus_assign_resources(bus, &add_list, NULL); - BUG_ON(!list_empty(&add_list)); - } -+EXPORT_SYMBOL_GPL(pci_assign_unassigned_bus_resources); -diff --git a/drivers/pci/setup-irq.c b/drivers/pci/setup-irq.c -index 4e2d595..95c225b 100644 ---- a/drivers/pci/setup-irq.c -+++ b/drivers/pci/setup-irq.c -@@ -65,3 +65,4 @@ void pci_fixup_irqs(u8 (*swizzle)(struct pci_dev *, u8 *), - for_each_pci_dev(dev) - pdev_fixup_irq(dev, swizzle, map_irq); - } -+EXPORT_SYMBOL_GPL(pci_fixup_irqs); -diff --git a/drivers/soc/Kconfig b/drivers/soc/Kconfig -index 76d6bd4..d4bcacf 100644 ---- a/drivers/soc/Kconfig -+++ b/drivers/soc/Kconfig -@@ -4,4 +4,17 @@ source "drivers/soc/qcom/Kconfig" - source "drivers/soc/ti/Kconfig" - source "drivers/soc/versatile/Kconfig" - -+config FSL_SOC_DRIVERS -+ bool "Freescale Soc Drivers" -+ depends on FSL_SOC || ARCH_MXC || ARCH_LAYERSCAPE -+ default n -+ help -+ Say y here to enable Freescale Soc Device Drivers support. -+ The Soc Drivers provides the device driver that is a specific block -+ or feature on Freescale platform. -+ -+if FSL_SOC_DRIVERS -+ source "drivers/soc/fsl/Kconfig" -+endif -+ - endmenu -diff --git a/drivers/soc/Makefile b/drivers/soc/Makefile -index 063113d..ef82e45 100644 ---- a/drivers/soc/Makefile -+++ b/drivers/soc/Makefile -@@ -6,3 +6,4 @@ obj-$(CONFIG_ARCH_QCOM) += qcom/ - obj-$(CONFIG_ARCH_TEGRA) += tegra/ - obj-$(CONFIG_SOC_TI) += ti/ - obj-$(CONFIG_PLAT_VERSATILE) += versatile/ -+obj-$(CONFIG_FSL_SOC_DRIVERS) += fsl/ -diff --git a/drivers/soc/fsl/Kconfig b/drivers/soc/fsl/Kconfig -new file mode 100644 -index 0000000..92a085e ---- /dev/null -+++ b/drivers/soc/fsl/Kconfig -@@ -0,0 +1,6 @@ -+config FSL_GUTS -+ bool -+ -+if ARM || ARM64 -+source "drivers/soc/fsl/Kconfig.arm" -+endif -diff --git a/drivers/soc/fsl/Kconfig.arm b/drivers/soc/fsl/Kconfig.arm -new file mode 100644 -index 0000000..5f2d214 ---- /dev/null -+++ b/drivers/soc/fsl/Kconfig.arm -@@ -0,0 +1,25 @@ -+# -+# Freescale ARM SOC Drivers -+# -+ -+config LS1_SOC_DRIVERS -+ bool "LS1021A Soc Drivers" -+ depends on SOC_LS1021A -+ default n -+ help -+ Say y here to enable Freescale LS1021A Soc Device Drivers support. -+ The Soc Drivers provides the device driver that is a specific block -+ or feature on LS1021A platform. -+ -+config LS_SOC_DRIVERS -+ bool "Layerscape Soc Drivers" -+ depends on ARCH_LAYERSCAPE -+ default n -+ help -+ Say y here to enable Freescale Layerscape Soc Device Drivers support. -+ The Soc Drivers provides the device driver that is a specific block -+ or feature on Layerscape platform. -+ -+if LS1_SOC_DRIVERS -+ source "drivers/soc/fsl/ls1/Kconfig" -+endif -diff --git a/drivers/soc/fsl/Makefile b/drivers/soc/fsl/Makefile -new file mode 100644 -index 0000000..9fc17b3 ---- /dev/null -+++ b/drivers/soc/fsl/Makefile -@@ -0,0 +1,6 @@ -+# -+# Makefile for Freescale Soc specific device drivers. -+# -+ -+obj-$(CONFIG_LS1_SOC_DRIVERS) += ls1/ -+obj-$(CONFIG_FSL_GUTS) += guts.o -diff --git a/drivers/soc/fsl/guts.c b/drivers/soc/fsl/guts.c -new file mode 100644 -index 0000000..11065c2 ---- /dev/null -+++ b/drivers/soc/fsl/guts.c -@@ -0,0 +1,123 @@ -+/* -+ * Freescale QorIQ Platforms GUTS Driver -+ * -+ * Copyright (C) 2016 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+ -+struct guts { -+ struct ccsr_guts __iomem *regs; -+ bool little_endian; -+}; -+ -+static struct guts *guts; -+ -+u32 guts_get_svr(void) -+{ -+ u32 svr = 0; -+ -+ if ((!guts) || (!(guts->regs))) { -+#ifdef CONFIG_PPC -+ svr = mfspr(SPRN_SVR); -+#endif -+ return svr; -+ } -+ -+ if (guts->little_endian) -+ svr = ioread32(&guts->regs->svr); -+ else -+ svr = ioread32be(&guts->regs->svr); -+ -+ return svr; -+} -+EXPORT_SYMBOL_GPL(guts_get_svr); -+ -+static int guts_probe(struct platform_device *pdev) -+{ -+ struct device_node *np = pdev->dev.of_node; -+ -+ guts = kzalloc(sizeof(*guts), GFP_KERNEL); -+ if (!guts) -+ return -ENOMEM; -+ -+ if (of_property_read_bool(np, "little-endian")) -+ guts->little_endian = true; -+ else -+ guts->little_endian = false; -+ -+ guts->regs = of_iomap(np, 0); -+ if (!(guts->regs)) -+ return -ENOMEM; -+ -+ of_node_put(np); -+ return 0; -+} -+ -+static int guts_remove(struct platform_device *pdev) -+{ -+ iounmap(guts->regs); -+ kfree(guts); -+ return 0; -+} -+ -+/* -+ * Table for matching compatible strings, for device tree -+ * guts node, for Freescale QorIQ SOCs. -+ */ -+static const struct of_device_id guts_of_match[] = { -+ /* For T4 & B4 SOCs */ -+ { .compatible = "fsl,qoriq-device-config-1.0", }, -+ /* For P Series SOCs */ -+ { .compatible = "fsl,qoriq-device-config-2.0", }, -+ { .compatible = "fsl,p1010-guts", }, -+ { .compatible = "fsl,p1020-guts", }, -+ { .compatible = "fsl,p1021-guts", }, -+ { .compatible = "fsl,p1022-guts", }, -+ { .compatible = "fsl,p1023-guts", }, -+ { .compatible = "fsl,p2020-guts", }, -+ /* For BSC Series SOCs */ -+ { .compatible = "fsl,bsc9131-guts", }, -+ { .compatible = "fsl,bsc9132-guts", }, -+ /* For Layerscape Series SOCs */ -+ { .compatible = "fsl,ls1021a-dcfg", }, -+ { .compatible = "fsl,ls1043a-dcfg", }, -+ { .compatible = "fsl,ls2080a-dcfg", }, -+ {} -+}; -+MODULE_DEVICE_TABLE(of, guts_of_match); -+ -+static struct platform_driver guts_driver = { -+ .driver = { -+ .name = "fsl-guts", -+ .of_match_table = guts_of_match, -+ }, -+ .probe = guts_probe, -+ .remove = guts_remove, -+}; -+ -+static int __init guts_drv_init(void) -+{ -+ return platform_driver_register(&guts_driver); -+} -+subsys_initcall(guts_drv_init); -+ -+static void __exit guts_drv_exit(void) -+{ -+ platform_driver_unregister(&guts_driver); -+} -+module_exit(guts_drv_exit); -+ -+MODULE_AUTHOR("Freescale Semiconductor, Inc."); -+MODULE_DESCRIPTION("Freescale QorIQ Platforms GUTS Driver"); -+MODULE_LICENSE("GPL"); -diff --git a/drivers/soc/fsl/ls1/Kconfig b/drivers/soc/fsl/ls1/Kconfig -new file mode 100644 -index 0000000..c9b04c4 ---- /dev/null -+++ b/drivers/soc/fsl/ls1/Kconfig -@@ -0,0 +1,11 @@ -+# -+# LS-1 Soc drivers -+# -+config FTM_ALARM -+ bool "FTM alarm driver" -+ depends on SOC_LS1021A -+ default n -+ help -+ Say y here to enable FTM alarm support. The FTM alarm provides -+ alarm functions for wakeup system from deep sleep. There is only -+ one FTM can be used in ALARM(FTM 0). -diff --git a/drivers/soc/fsl/ls1/Makefile b/drivers/soc/fsl/ls1/Makefile -new file mode 100644 -index 0000000..6299aa1 ---- /dev/null -+++ b/drivers/soc/fsl/ls1/Makefile -@@ -0,0 +1 @@ -+obj-$(CONFIG_FTM_ALARM) += ftm_alarm.o -diff --git a/drivers/soc/fsl/ls1/ftm_alarm.c b/drivers/soc/fsl/ls1/ftm_alarm.c -new file mode 100644 -index 0000000..c42b26b ---- /dev/null -+++ b/drivers/soc/fsl/ls1/ftm_alarm.c -@@ -0,0 +1,274 @@ -+/* -+ * Freescale FlexTimer Module (FTM) Alarm driver. -+ * -+ * Copyright 2014 Freescale Semiconductor, Inc. -+ * -+ * This program is free software; you can redistribute it and/or -+ * modify it under the terms of the GNU General Public License -+ * as published by the Free Software Foundation; either version 2 -+ * of the License, or (at your option) any later version. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+#define FTM_SC 0x00 -+#define FTM_SC_CLK_SHIFT 3 -+#define FTM_SC_CLK_MASK (0x3 << FTM_SC_CLK_SHIFT) -+#define FTM_SC_CLK(c) ((c) << FTM_SC_CLK_SHIFT) -+#define FTM_SC_PS_MASK 0x7 -+#define FTM_SC_TOIE BIT(6) -+#define FTM_SC_TOF BIT(7) -+ -+#define FTM_SC_CLKS_FIXED_FREQ 0x02 -+ -+#define FTM_CNT 0x04 -+#define FTM_MOD 0x08 -+#define FTM_CNTIN 0x4C -+ -+#define FIXED_FREQ_CLK 32000 -+#define MAX_FREQ_DIV (1 << FTM_SC_PS_MASK) -+#define MAX_COUNT_VAL 0xffff -+ -+static void __iomem *ftm1_base; -+static u32 alarm_freq; -+static bool big_endian; -+ -+static inline u32 ftm_readl(void __iomem *addr) -+{ -+ if (big_endian) -+ return ioread32be(addr); -+ -+ return ioread32(addr); -+} -+ -+static inline void ftm_writel(u32 val, void __iomem *addr) -+{ -+ if (big_endian) -+ iowrite32be(val, addr); -+ else -+ iowrite32(val, addr); -+} -+ -+static inline void ftm_counter_enable(void __iomem *base) -+{ -+ u32 val; -+ -+ /* select and enable counter clock source */ -+ val = ftm_readl(base + FTM_SC); -+ val &= ~(FTM_SC_PS_MASK | FTM_SC_CLK_MASK); -+ val |= (FTM_SC_PS_MASK | FTM_SC_CLK(FTM_SC_CLKS_FIXED_FREQ)); -+ ftm_writel(val, base + FTM_SC); -+} -+ -+static inline void ftm_counter_disable(void __iomem *base) -+{ -+ u32 val; -+ -+ /* disable counter clock source */ -+ val = ftm_readl(base + FTM_SC); -+ val &= ~(FTM_SC_PS_MASK | FTM_SC_CLK_MASK); -+ ftm_writel(val, base + FTM_SC); -+} -+ -+static inline void ftm_irq_acknowledge(void __iomem *base) -+{ -+ u32 val; -+ -+ val = ftm_readl(base + FTM_SC); -+ val &= ~FTM_SC_TOF; -+ ftm_writel(val, base + FTM_SC); -+} -+ -+static inline void ftm_irq_enable(void __iomem *base) -+{ -+ u32 val; -+ -+ val = ftm_readl(base + FTM_SC); -+ val |= FTM_SC_TOIE; -+ ftm_writel(val, base + FTM_SC); -+} -+ -+static inline void ftm_irq_disable(void __iomem *base) -+{ -+ u32 val; -+ -+ val = ftm_readl(base + FTM_SC); -+ val &= ~FTM_SC_TOIE; -+ ftm_writel(val, base + FTM_SC); -+} -+ -+static inline void ftm_reset_counter(void __iomem *base) -+{ -+ /* -+ * The CNT register contains the FTM counter value. -+ * Reset clears the CNT register. Writing any value to COUNT -+ * updates the counter with its initial value, CNTIN. -+ */ -+ ftm_writel(0x00, base + FTM_CNT); -+} -+ -+static u32 time_to_cycle(unsigned long time) -+{ -+ u32 cycle; -+ -+ cycle = time * alarm_freq; -+ if (cycle > MAX_COUNT_VAL) { -+ pr_err("Out of alarm range.\n"); -+ cycle = 0; -+ } -+ -+ return cycle; -+} -+ -+static u32 cycle_to_time(u32 cycle) -+{ -+ return cycle / alarm_freq + 1; -+} -+ -+static void ftm_clean_alarm(void) -+{ -+ ftm_counter_disable(ftm1_base); -+ -+ ftm_writel(0x00, ftm1_base + FTM_CNTIN); -+ ftm_writel(~0UL, ftm1_base + FTM_MOD); -+ -+ ftm_reset_counter(ftm1_base); -+} -+ -+static int ftm_set_alarm(u64 cycle) -+{ -+ ftm_irq_disable(ftm1_base); -+ -+ /* -+ * The counter increments until the value of MOD is reached, -+ * at which point the counter is reloaded with the value of CNTIN. -+ * The TOF (the overflow flag) bit is set when the FTM counter -+ * changes from MOD to CNTIN. So we should using the cycle - 1. -+ */ -+ ftm_writel(cycle - 1, ftm1_base + FTM_MOD); -+ -+ ftm_counter_enable(ftm1_base); -+ -+ ftm_irq_enable(ftm1_base); -+ -+ return 0; -+} -+ -+static irqreturn_t ftm_alarm_interrupt(int irq, void *dev_id) -+{ -+ ftm_irq_acknowledge(ftm1_base); -+ ftm_irq_disable(ftm1_base); -+ ftm_clean_alarm(); -+ -+ return IRQ_HANDLED; -+} -+ -+static ssize_t ftm_alarm_show(struct device *dev, -+ struct device_attribute *attr, -+ char *buf) -+{ -+ u32 count, val; -+ -+ count = ftm_readl(ftm1_base + FTM_MOD); -+ val = ftm_readl(ftm1_base + FTM_CNT); -+ val = (count & MAX_COUNT_VAL) - val; -+ val = cycle_to_time(val); -+ -+ return sprintf(buf, "%u\n", val); -+} -+ -+static ssize_t ftm_alarm_store(struct device *dev, -+ struct device_attribute *attr, -+ const char *buf, size_t count) -+{ -+ u32 cycle; -+ unsigned long time; -+ -+ if (kstrtoul(buf, 0, &time)) -+ return -EINVAL; -+ -+ ftm_clean_alarm(); -+ -+ cycle = time_to_cycle(time); -+ if (!cycle) -+ return -EINVAL; -+ -+ ftm_set_alarm(cycle); -+ -+ return count; -+} -+ -+static struct device_attribute ftm_alarm_attributes = __ATTR(ftm_alarm, 0644, -+ ftm_alarm_show, ftm_alarm_store); -+ -+static int ftm_alarm_probe(struct platform_device *pdev) -+{ -+ struct device_node *np = pdev->dev.of_node; -+ struct resource *r; -+ int irq; -+ int ret; -+ -+ r = platform_get_resource(pdev, IORESOURCE_MEM, 0); -+ if (!r) -+ return -ENODEV; -+ -+ ftm1_base = devm_ioremap_resource(&pdev->dev, r); -+ if (IS_ERR(ftm1_base)) -+ return PTR_ERR(ftm1_base); -+ -+ irq = irq_of_parse_and_map(np, 0); -+ if (irq <= 0) { -+ pr_err("ftm: unable to get IRQ from DT, %d\n", irq); -+ return -EINVAL; -+ } -+ -+ big_endian = of_property_read_bool(np, "big-endian"); -+ -+ ret = devm_request_irq(&pdev->dev, irq, ftm_alarm_interrupt, -+ IRQF_NO_SUSPEND, dev_name(&pdev->dev), NULL); -+ if (ret < 0) { -+ dev_err(&pdev->dev, "failed to request irq\n"); -+ return ret; -+ } -+ -+ ret = device_create_file(&pdev->dev, &ftm_alarm_attributes); -+ if (ret) { -+ dev_err(&pdev->dev, "create sysfs fail.\n"); -+ return ret; -+ } -+ -+ alarm_freq = (u32)FIXED_FREQ_CLK / (u32)MAX_FREQ_DIV; -+ -+ ftm_clean_alarm(); -+ -+ device_init_wakeup(&pdev->dev, true); -+ -+ return ret; -+} -+ -+static const struct of_device_id ftm_alarm_match[] = { -+ { .compatible = "fsl,ftm-alarm", }, -+ { .compatible = "fsl,ftm-timer", }, -+ { }, -+}; -+ -+static struct platform_driver ftm_alarm_driver = { -+ .probe = ftm_alarm_probe, -+ .driver = { -+ .name = "ftm-alarm", -+ .owner = THIS_MODULE, -+ .of_match_table = ftm_alarm_match, -+ }, -+}; -+ -+static int __init ftm_alarm_init(void) -+{ -+ return platform_driver_register(&ftm_alarm_driver); -+} -+device_initcall(ftm_alarm_init); -diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c -index 27d1a91..cb52ede 100644 ---- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c -+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.c -@@ -52,11 +52,6 @@ MODULE_LICENSE("Dual BSD/GPL"); - MODULE_AUTHOR("Freescale Semiconductor, Inc"); - MODULE_DESCRIPTION("Freescale DPAA2 Ethernet Driver"); - --/* Oldest DPAA2 objects version we are compatible with */ --#define DPAA2_SUPPORTED_DPNI_VERSION 6 --#define DPAA2_SUPPORTED_DPBP_VERSION 2 --#define DPAA2_SUPPORTED_DPCON_VERSION 2 -- - static void validate_rx_csum(struct dpaa2_eth_priv *priv, - u32 fd_status, - struct sk_buff *skb) -@@ -261,7 +256,7 @@ static void dpaa2_eth_rx(struct dpaa2_eth_priv *priv, - priv->buf_layout.private_data_size + - sizeof(struct dpaa2_fas)); - -- *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * (*ns); -+ *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ns); - memset(shhwtstamps, 0, sizeof(*shhwtstamps)); - shhwtstamps->hwtstamp = ns_to_ktime(*ns); - } -@@ -362,6 +357,25 @@ static int consume_frames(struct dpaa2_eth_channel *ch) - return cleaned; - } - -+/* Configure the egress frame annotation for timestamp update */ -+static void enable_tx_tstamp(struct dpaa2_fd *fd, void *hwa_start) -+{ -+ struct dpaa2_faead *faead; -+ u32 ctrl; -+ u32 frc; -+ -+ /* Mark the egress frame annotation area as valid */ -+ frc = dpaa2_fd_get_frc(fd); -+ dpaa2_fd_set_frc(fd, frc | DPAA2_FD_FRC_FAEADV); -+ -+ /* enable UPD (update prepanded data) bit in FAEAD field of -+ * hardware frame annotation area -+ */ -+ ctrl = DPAA2_FAEAD_A2V | DPAA2_FAEAD_UPDV | DPAA2_FAEAD_UPD; -+ faead = hwa_start + DPAA2_FAEAD_OFFSET; -+ faead->ctrl = cpu_to_le32(ctrl); -+} -+ - /* Create a frame descriptor based on a fragmented skb */ - static int build_sg_fd(struct dpaa2_eth_priv *priv, - struct sk_buff *skb, -@@ -369,6 +383,7 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv, - { - struct device *dev = priv->net_dev->dev.parent; - void *sgt_buf = NULL; -+ void *hwa; - dma_addr_t addr; - int nr_frags = skb_shinfo(skb)->nr_frags; - struct dpaa2_sg_entry *sgt; -@@ -414,7 +429,8 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv, - * on TX confirmation. We are clearing FAS (Frame Annotation Status) - * field here. - */ -- memset(sgt_buf + priv->buf_layout.private_data_size, 0, 8); -+ hwa = sgt_buf + priv->buf_layout.private_data_size; -+ memset(hwa, 0, 8); - - sgt = (struct dpaa2_sg_entry *)(sgt_buf + priv->tx_data_offset); - -@@ -459,6 +475,9 @@ static int build_sg_fd(struct dpaa2_eth_priv *priv, - fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA | - DPAA2_FD_CTRL_PTV1; - -+ if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) -+ enable_tx_tstamp(fd, hwa); -+ - return 0; - - dma_map_single_failed: -@@ -479,6 +498,7 @@ static int build_single_fd(struct dpaa2_eth_priv *priv, - u8 *buffer_start; - struct sk_buff **skbh; - dma_addr_t addr; -+ void *hwa; - - buffer_start = PTR_ALIGN(skb->data - priv->tx_data_offset - - DPAA2_ETH_TX_BUF_ALIGN, -@@ -487,9 +507,10 @@ static int build_single_fd(struct dpaa2_eth_priv *priv, - /* PTA from egress side is passed as is to the confirmation side so - * we need to clear some fields here in order to find consistent values - * on TX confirmation. We are clearing FAS (Frame Annotation Status) -- * field here. -+ * field here - */ -- memset(buffer_start + priv->buf_layout.private_data_size, 0, 8); -+ hwa = buffer_start + priv->buf_layout.private_data_size; -+ memset(hwa, 0, 8); - - /* Store a backpointer to the skb at the beginning of the buffer - * (in the private data area) such that we can release it -@@ -512,6 +533,9 @@ static int build_single_fd(struct dpaa2_eth_priv *priv, - fd->simple.ctrl = DPAA2_FD_CTRL_ASAL | DPAA2_FD_CTRL_PTA | - DPAA2_FD_CTRL_PTV1; - -+ if (priv->ts_tx_en && skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) -+ enable_tx_tstamp(fd, hwa); -+ - return 0; - } - -@@ -579,7 +603,7 @@ static void free_tx_fd(const struct dpaa2_eth_priv *priv, - ns = (u64 *)((void *)skbh + - priv->buf_layout.private_data_size + - sizeof(struct dpaa2_fas)); -- *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * (*ns); -+ *ns = DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS * le64_to_cpup(ns); - shhwtstamps.hwtstamp = ns_to_ktime(*ns); - skb_tstamp_tx(skb, &shhwtstamps); - } -@@ -779,7 +803,7 @@ static int add_bufs(struct dpaa2_eth_priv *priv, u16 bpid) - /* Allocate buffer visible to WRIOP + skb shared info + - * alignment padding - */ -- buf = napi_alloc_frag(DPAA2_ETH_BUF_RAW_SIZE); -+ buf = netdev_alloc_frag(DPAA2_ETH_BUF_RAW_SIZE); - if (unlikely(!buf)) - goto err_alloc; - -@@ -973,7 +997,7 @@ static int dpaa2_eth_poll(struct napi_struct *napi, int budget) - } - - if (cleaned < budget) { -- napi_complete_done(napi, cleaned); -+ napi_complete(napi); - /* Re-enable data available notifications */ - do { - err = dpaa2_io_service_rearm(NULL, &ch->nctx); -@@ -1353,7 +1377,7 @@ static void dpaa2_eth_set_rx_mode(struct net_device *net_dev) - * in promisc mode, in order to avoid frame loss while we - * progressively add entries to the table. - * We don't know whether we had been in promisc already, and -- * making an MC call to find it is expensive; so set uc promisc -+ * making an MC call to find out is expensive; so set uc promisc - * nonetheless. - */ - err = dpni_set_unicast_promisc(mc_io, 0, mc_token, 1); -@@ -1498,48 +1522,7 @@ static void cdan_cb(struct dpaa2_io_notification_ctx *ctx) - /* Update NAPI statistics */ - ch->stats.cdan++; - -- napi_schedule_irqoff(&ch->napi); --} -- --/* Verify that the FLIB API version of various MC objects is supported -- * by our driver -- */ --static int check_obj_version(struct fsl_mc_device *ls_dev, u16 mc_version) --{ -- char *name = ls_dev->obj_desc.type; -- struct device *dev = &ls_dev->dev; -- u16 supported_version, flib_version; -- -- if (strcmp(name, "dpni") == 0) { -- flib_version = DPNI_VER_MAJOR; -- supported_version = DPAA2_SUPPORTED_DPNI_VERSION; -- } else if (strcmp(name, "dpbp") == 0) { -- flib_version = DPBP_VER_MAJOR; -- supported_version = DPAA2_SUPPORTED_DPBP_VERSION; -- } else if (strcmp(name, "dpcon") == 0) { -- flib_version = DPCON_VER_MAJOR; -- supported_version = DPAA2_SUPPORTED_DPCON_VERSION; -- } else { -- dev_err(dev, "invalid object type (%s)\n", name); -- return -EINVAL; -- } -- -- /* Check that the FLIB-defined version matches the one reported by MC */ -- if (mc_version != flib_version) { -- dev_err(dev, "%s FLIB version mismatch: MC reports %d, we have %d\n", -- name, mc_version, flib_version); -- return -EINVAL; -- } -- -- /* ... and that we actually support it */ -- if (mc_version < supported_version) { -- dev_err(dev, "Unsupported %s FLIB version (%d)\n", -- name, mc_version); -- return -EINVAL; -- } -- dev_dbg(dev, "Using %s FLIB version %d\n", name, mc_version); -- -- return 0; -+ napi_schedule(&ch->napi); - } - - /* Allocate and configure a DPCON object */ -@@ -1563,16 +1546,18 @@ static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv) - goto err_open; - } - -+ err = dpcon_reset(priv->mc_io, 0, dpcon->mc_handle); -+ if (err) { -+ dev_err(dev, "dpcon_reset() failed\n"); -+ goto err_reset; -+ } -+ - err = dpcon_get_attributes(priv->mc_io, 0, dpcon->mc_handle, &attrs); - if (err) { - dev_err(dev, "dpcon_get_attributes() failed\n"); - goto err_get_attr; - } - -- err = check_obj_version(dpcon, attrs.version.major); -- if (err) -- goto err_dpcon_ver; -- - err = dpcon_enable(priv->mc_io, 0, dpcon->mc_handle); - if (err) { - dev_err(dev, "dpcon_enable() failed\n"); -@@ -1582,8 +1567,8 @@ static struct fsl_mc_device *setup_dpcon(struct dpaa2_eth_priv *priv) - return dpcon; - - err_enable: --err_dpcon_ver: - err_get_attr: -+err_reset: - dpcon_close(priv->mc_io, 0, dpcon->mc_handle); - err_open: - fsl_mc_object_free(dpcon); -@@ -1849,6 +1834,12 @@ static int setup_dpbp(struct dpaa2_eth_priv *priv) - goto err_open; - } - -+ err = dpbp_reset(priv->mc_io, 0, dpbp_dev->mc_handle); -+ if (err) { -+ dev_err(dev, "dpbp_reset() failed\n"); -+ goto err_reset; -+ } -+ - err = dpbp_enable(priv->mc_io, 0, dpbp_dev->mc_handle); - if (err) { - dev_err(dev, "dpbp_enable() failed\n"); -@@ -1862,16 +1853,12 @@ static int setup_dpbp(struct dpaa2_eth_priv *priv) - goto err_get_attr; - } - -- err = check_obj_version(dpbp_dev, priv->dpbp_attrs.version.major); -- if (err) -- goto err_dpbp_ver; -- - return 0; - --err_dpbp_ver: - err_get_attr: - dpbp_disable(priv->mc_io, 0, dpbp_dev->mc_handle); - err_enable: -+err_reset: - dpbp_close(priv->mc_io, 0, dpbp_dev->mc_handle); - err_open: - fsl_mc_object_free(dpbp_dev); -@@ -1911,6 +1898,12 @@ static int setup_dpni(struct fsl_mc_device *ls_dev) - ls_dev->mc_io = priv->mc_io; - ls_dev->mc_handle = priv->mc_token; - -+ err = dpni_reset(priv->mc_io, 0, priv->mc_token); -+ if (err) { -+ dev_err(dev, "dpni_reset() failed\n"); -+ goto err_reset; -+ } -+ - /* Map a memory region which will be used by MC to pass us an - * attribute structure - */ -@@ -1940,10 +1933,6 @@ static int setup_dpni(struct fsl_mc_device *ls_dev) - goto err_get_attr; - } - -- err = check_obj_version(ls_dev, priv->dpni_attrs.version.major); -- if (err) -- goto err_dpni_ver; -- - memset(&priv->dpni_ext_cfg, 0, sizeof(priv->dpni_ext_cfg)); - err = dpni_extract_extended_cfg(&priv->dpni_ext_cfg, dma_mem); - if (err) { -@@ -2019,11 +2008,11 @@ err_cls_rule: - err_data_offset: - err_buf_layout: - err_extract: --err_dpni_ver: - err_get_attr: - err_dma_map: - kfree(dma_mem); - err_alloc: -+err_reset: - dpni_close(priv->mc_io, 0, priv->mc_token); - err_open: - return err; -@@ -2157,6 +2146,131 @@ static int setup_rx_err_flow(struct dpaa2_eth_priv *priv, - } - #endif - -+/* default hash key fields */ -+static struct dpaa2_eth_hash_fields default_hash_fields[] = { -+ { -+ /* L2 header */ -+ .rxnfc_field = RXH_L2DA, -+ .cls_prot = NET_PROT_ETH, -+ .cls_field = NH_FLD_ETH_DA, -+ .size = 6, -+ }, { -+ .cls_prot = NET_PROT_ETH, -+ .cls_field = NH_FLD_ETH_SA, -+ .size = 6, -+ }, { -+ /* This is the last ethertype field parsed: -+ * depending on frame format, it can be the MAC ethertype -+ * or the VLAN etype. -+ */ -+ .cls_prot = NET_PROT_ETH, -+ .cls_field = NH_FLD_ETH_TYPE, -+ .size = 2, -+ }, { -+ /* VLAN header */ -+ .rxnfc_field = RXH_VLAN, -+ .cls_prot = NET_PROT_VLAN, -+ .cls_field = NH_FLD_VLAN_TCI, -+ .size = 2, -+ }, { -+ /* IP header */ -+ .rxnfc_field = RXH_IP_SRC, -+ .cls_prot = NET_PROT_IP, -+ .cls_field = NH_FLD_IP_SRC, -+ .size = 4, -+ }, { -+ .rxnfc_field = RXH_IP_DST, -+ .cls_prot = NET_PROT_IP, -+ .cls_field = NH_FLD_IP_DST, -+ .size = 4, -+ }, { -+ .rxnfc_field = RXH_L3_PROTO, -+ .cls_prot = NET_PROT_IP, -+ .cls_field = NH_FLD_IP_PROTO, -+ .size = 1, -+ }, { -+ /* Using UDP ports, this is functionally equivalent to raw -+ * byte pairs from L4 header. -+ */ -+ .rxnfc_field = RXH_L4_B_0_1, -+ .cls_prot = NET_PROT_UDP, -+ .cls_field = NH_FLD_UDP_PORT_SRC, -+ .size = 2, -+ }, { -+ .rxnfc_field = RXH_L4_B_2_3, -+ .cls_prot = NET_PROT_UDP, -+ .cls_field = NH_FLD_UDP_PORT_DST, -+ .size = 2, -+ }, -+}; -+ -+/* Set RX hash options */ -+int set_hash(struct dpaa2_eth_priv *priv) -+{ -+ struct device *dev = priv->net_dev->dev.parent; -+ struct dpkg_profile_cfg cls_cfg; -+ struct dpni_rx_tc_dist_cfg dist_cfg; -+ u8 *dma_mem; -+ int i; -+ int err = 0; -+ -+ memset(&cls_cfg, 0, sizeof(cls_cfg)); -+ -+ for (i = 0; i < priv->num_hash_fields; i++) { -+ struct dpkg_extract *key = -+ &cls_cfg.extracts[cls_cfg.num_extracts]; -+ -+ key->type = DPKG_EXTRACT_FROM_HDR; -+ key->extract.from_hdr.prot = priv->hash_fields[i].cls_prot; -+ key->extract.from_hdr.type = DPKG_FULL_FIELD; -+ key->extract.from_hdr.field = priv->hash_fields[i].cls_field; -+ cls_cfg.num_extracts++; -+ -+ priv->rx_flow_hash |= priv->hash_fields[i].rxnfc_field; -+ } -+ -+ dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_DMA | GFP_KERNEL); -+ if (!dma_mem) -+ return -ENOMEM; -+ -+ err = dpni_prepare_key_cfg(&cls_cfg, dma_mem); -+ if (err) { -+ dev_err(dev, "dpni_prepare_key_cfg error %d", err); -+ return err; -+ } -+ -+ memset(&dist_cfg, 0, sizeof(dist_cfg)); -+ -+ /* Prepare for setting the rx dist */ -+ dist_cfg.key_cfg_iova = dma_map_single(dev, dma_mem, -+ DPAA2_CLASSIFIER_DMA_SIZE, -+ DMA_TO_DEVICE); -+ if (dma_mapping_error(dev, dist_cfg.key_cfg_iova)) { -+ dev_err(dev, "DMA mapping failed\n"); -+ kfree(dma_mem); -+ return -ENOMEM; -+ } -+ -+ dist_cfg.dist_size = dpaa2_eth_queue_count(priv); -+ if (dpaa2_eth_fs_enabled(priv)) { -+ dist_cfg.dist_mode = DPNI_DIST_MODE_FS; -+ dist_cfg.fs_cfg.miss_action = DPNI_FS_MISS_HASH; -+ } else { -+ dist_cfg.dist_mode = DPNI_DIST_MODE_HASH; -+ } -+ -+ err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg); -+ dma_unmap_single(dev, dist_cfg.key_cfg_iova, -+ DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE); -+ kfree(dma_mem); -+ if (err) { -+ dev_err(dev, "dpni_set_rx_tc_dist() error %d\n", err); -+ return err; -+ } -+ -+ return 0; -+} -+ - /* Bind the DPNI to its needed objects and resources: buffer pool, DPIOs, - * frame queues and channels - */ -@@ -2179,15 +2293,22 @@ static int bind_dpni(struct dpaa2_eth_priv *priv) - return err; - } - -- check_fs_support(net_dev); -+ /* Verify classification options and disable hashing and/or -+ * flow steering support in case of invalid configuration values -+ */ -+ check_cls_support(priv); - -- /* have the interface implicitly distribute traffic based on supported -- * header fields -+ /* have the interface implicitly distribute traffic based on -+ * a static hash key - */ - if (dpaa2_eth_hash_enabled(priv)) { -- err = dpaa2_eth_set_hash(net_dev, DPAA2_RXH_SUPPORTED); -- if (err) -+ priv->hash_fields = default_hash_fields; -+ priv->num_hash_fields = ARRAY_SIZE(default_hash_fields); -+ err = set_hash(priv); -+ if (err) { -+ dev_err(dev, "Hashing configuration failed\n"); - return err; -+ } - } - - /* Configure handling of error frames */ -@@ -2512,7 +2633,7 @@ static ssize_t dpaa2_eth_show_txconf_cpumask(struct device *dev, - { - struct dpaa2_eth_priv *priv = netdev_priv(to_net_dev(dev)); - -- return cpumap_print_to_pagebuf(1, buf, &priv->txconf_cpumask); -+ return cpumask_scnprintf(buf, PAGE_SIZE, &priv->txconf_cpumask); - } - - static ssize_t dpaa2_eth_write_txconf_cpumask(struct device *dev, -diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h -index 7274fbe..bdcdbd6 100644 ---- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h -+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-eth.h -@@ -40,7 +40,6 @@ - #include "../../fsl-mc/include/dpbp-cmd.h" - #include "../../fsl-mc/include/dpcon.h" - #include "../../fsl-mc/include/dpcon-cmd.h" --#include "../../fsl-mc/include/dpmng.h" - #include "dpni.h" - #include "dpni-cmd.h" - -@@ -54,8 +53,8 @@ - */ - #define DPAA2_ETH_MAX_SG_ENTRIES ((64 * 1024) / DPAA2_ETH_RX_BUF_SIZE) - --/* Maximum acceptable MTU value. It is in direct relation with the MC-enforced -- * Max Frame Length (currently 10k). -+/* Maximum acceptable MTU value. It is in direct relation with the hardware -+ * enforced Max Frame Length (currently 10k). - */ - #define DPAA2_ETH_MFL (10 * 1024) - #define DPAA2_ETH_MAX_MTU (DPAA2_ETH_MFL - VLAN_ETH_HLEN) -@@ -100,8 +99,8 @@ - SKB_DATA_ALIGN(sizeof(struct skb_shared_info)) + \ - DPAA2_ETH_RX_BUF_ALIGN) - --/* PTP nominal frequency 1MHz */ --#define DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS 1000 -+/* PTP nominal frequency 1GHz */ -+#define DPAA2_PTP_NOMINAL_FREQ_PERIOD_NS 1 - - /* We are accommodating a skb backpointer and some S/G info - * in the frame's software annotation. The hardware -@@ -138,6 +137,18 @@ struct dpaa2_fas { - __le32 status; - } __packed; - -+/* Frame annotation egress action descriptor */ -+#define DPAA2_FAEAD_OFFSET 0x58 -+ -+struct dpaa2_faead { -+ __le32 conf_fqid; -+ __le32 ctrl; -+}; -+ -+#define DPAA2_FAEAD_A2V 0x20000000 -+#define DPAA2_FAEAD_UPDV 0x00001000 -+#define DPAA2_FAEAD_UPD 0x00000010 -+ - /* Error and status bits in the frame annotation status word */ - /* Debug frame, otherwise supposed to be discarded */ - #define DPAA2_FAS_DISC 0x80000000 -@@ -274,6 +285,14 @@ struct dpaa2_eth_cls_rule { - bool in_use; - }; - -+struct dpaa2_eth_hash_fields { -+ u64 rxnfc_field; -+ enum net_prot cls_prot; -+ int cls_field; -+ int offset; -+ int size; -+}; -+ - /* Driver private data */ - struct dpaa2_eth_priv { - struct net_device *net_dev; -@@ -318,8 +337,10 @@ struct dpaa2_eth_priv { - bool do_link_poll; - struct task_struct *poll_thread; - -+ struct dpaa2_eth_hash_fields *hash_fields; -+ u8 num_hash_fields; - /* enabled ethtool hashing bits */ -- u64 rx_hash_fields; -+ u64 rx_flow_hash; - - #ifdef CONFIG_FSL_DPAA2_ETH_DEBUGFS - struct dpaa2_debugfs dbg; -@@ -334,25 +355,24 @@ struct dpaa2_eth_priv { - bool ts_rx_en; /* Rx timestamping enabled */ - }; - --/* default Rx hash options, set during probing */ --#define DPAA2_RXH_SUPPORTED (RXH_L2DA | RXH_VLAN | RXH_L3_PROTO \ -- | RXH_IP_SRC | RXH_IP_DST | RXH_L4_B_0_1 \ -- | RXH_L4_B_2_3) -- - #define dpaa2_eth_hash_enabled(priv) \ - ((priv)->dpni_attrs.options & DPNI_OPT_DIST_HASH) - - #define dpaa2_eth_fs_enabled(priv) \ - ((priv)->dpni_attrs.options & DPNI_OPT_DIST_FS) - -+#define dpaa2_eth_fs_mask_enabled(priv) \ -+ ((priv)->dpni_attrs.options & DPNI_OPT_FS_MASK_SUPPORT) -+ - #define DPAA2_CLASSIFIER_ENTRY_COUNT 16 - - /* Required by struct dpni_attr::ext_cfg_iova */ - #define DPAA2_EXT_CFG_SIZE 256 - --extern const struct ethtool_ops dpaa2_ethtool_ops; -+/* size of DMA memory used to pass configuration to classifier, in bytes */ -+#define DPAA2_CLASSIFIER_DMA_SIZE 256 - --int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags); -+extern const struct ethtool_ops dpaa2_ethtool_ops; - - static int dpaa2_eth_queue_count(struct dpaa2_eth_priv *priv) - { -@@ -372,6 +392,6 @@ static inline int dpaa2_eth_max_channels(struct dpaa2_eth_priv *priv) - priv->dpni_attrs.max_senders); - } - --void check_fs_support(struct net_device *); -+void check_cls_support(struct dpaa2_eth_priv *priv); - - #endif /* __DPAA2_H */ -diff --git a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c -index fdab07f..1d792cd 100644 ---- a/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c -+++ b/drivers/staging/fsl-dpaa2/ethernet/dpaa2-ethtool.c -@@ -32,9 +32,6 @@ - #include "dpni.h" /* DPNI_LINK_OPT_* */ - #include "dpaa2-eth.h" - --/* size of DMA memory used to pass configuration to classifier, in bytes */ --#define DPAA2_CLASSIFIER_DMA_SIZE 256 -- - /* To be kept in sync with 'enum dpni_counter' */ - char dpaa2_ethtool_stats[][ETH_GSTRING_LEN] = { - "rx frames", -@@ -89,28 +86,9 @@ char dpaa2_ethtool_extras[][ETH_GSTRING_LEN] = { - static void dpaa2_eth_get_drvinfo(struct net_device *net_dev, - struct ethtool_drvinfo *drvinfo) - { -- struct mc_version mc_ver; -- struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -- char fw_version[ETHTOOL_FWVERS_LEN]; -- char version[32]; -- int err; -- -- err = mc_get_version(priv->mc_io, 0, &mc_ver); -- if (err) { -- strlcpy(drvinfo->fw_version, "Error retrieving MC version", -- sizeof(drvinfo->fw_version)); -- } else { -- scnprintf(fw_version, sizeof(fw_version), "%d.%d.%d", -- mc_ver.major, mc_ver.minor, mc_ver.revision); -- strlcpy(drvinfo->fw_version, fw_version, -- sizeof(drvinfo->fw_version)); -- } -- -- scnprintf(version, sizeof(version), "%d.%d", DPNI_VER_MAJOR, -- DPNI_VER_MINOR); -- strlcpy(drvinfo->version, version, sizeof(drvinfo->version)); -- - strlcpy(drvinfo->driver, KBUILD_MODNAME, sizeof(drvinfo->driver)); -+ strlcpy(drvinfo->version, VERSION, sizeof(drvinfo->version)); -+ strlcpy(drvinfo->fw_version, "N/A", sizeof(drvinfo->fw_version)); - strlcpy(drvinfo->bus_info, dev_name(net_dev->dev.parent->parent), - sizeof(drvinfo->bus_info)); - } -@@ -152,7 +130,7 @@ static int dpaa2_eth_set_settings(struct net_device *net_dev, - - netdev_dbg(net_dev, "Setting link parameters..."); - -- /* Due to a temporary firmware limitation, the DPNI must be down -+ /* Due to a temporary MC limitation, the DPNI must be down - * in order to be able to change link settings. Taking steps to let - * the user know that. - */ -@@ -211,7 +189,7 @@ static int dpaa2_eth_get_sset_count(struct net_device *net_dev, int sset) - } - } - --/** Fill in hardware counters, as returned by the MC firmware. -+/** Fill in hardware counters, as returned by MC. - */ - static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev, - struct ethtool_stats *stats, -@@ -296,203 +274,223 @@ static void dpaa2_eth_get_ethtool_stats(struct net_device *net_dev, - #endif - } - --static const struct dpaa2_eth_hash_fields { -- u64 rxnfc_field; -- enum net_prot cls_prot; -- int cls_field; -- int size; --} hash_fields[] = { -- { -- /* L2 header */ -- .rxnfc_field = RXH_L2DA, -- .cls_prot = NET_PROT_ETH, -- .cls_field = NH_FLD_ETH_DA, -- .size = 6, -- }, { -- /* VLAN header */ -- .rxnfc_field = RXH_VLAN, -- .cls_prot = NET_PROT_VLAN, -- .cls_field = NH_FLD_VLAN_TCI, -- .size = 2, -- }, { -- /* IP header */ -- .rxnfc_field = RXH_IP_SRC, -- .cls_prot = NET_PROT_IP, -- .cls_field = NH_FLD_IP_SRC, -- .size = 4, -- }, { -- .rxnfc_field = RXH_IP_DST, -- .cls_prot = NET_PROT_IP, -- .cls_field = NH_FLD_IP_DST, -- .size = 4, -- }, { -- .rxnfc_field = RXH_L3_PROTO, -- .cls_prot = NET_PROT_IP, -- .cls_field = NH_FLD_IP_PROTO, -- .size = 1, -- }, { -- /* Using UDP ports, this is functionally equivalent to raw -- * byte pairs from L4 header. -- */ -- .rxnfc_field = RXH_L4_B_0_1, -- .cls_prot = NET_PROT_UDP, -- .cls_field = NH_FLD_UDP_PORT_SRC, -- .size = 2, -- }, { -- .rxnfc_field = RXH_L4_B_2_3, -- .cls_prot = NET_PROT_UDP, -- .cls_field = NH_FLD_UDP_PORT_DST, -- .size = 2, -- }, --}; -- --static int cls_is_enabled(struct net_device *net_dev, u64 flag) --{ -- struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -- -- return !!(priv->rx_hash_fields & flag); --} -- --static int cls_key_off(struct net_device *net_dev, u64 flag) -+static int cls_key_off(struct dpaa2_eth_priv *priv, int prot, int field) - { - int i, off = 0; - -- for (i = 0; i < ARRAY_SIZE(hash_fields); i++) { -- if (hash_fields[i].rxnfc_field & flag) -+ for (i = 0; i < priv->num_hash_fields; i++) { -+ if (priv->hash_fields[i].cls_prot == prot && -+ priv->hash_fields[i].cls_field == field) - return off; -- if (cls_is_enabled(net_dev, hash_fields[i].rxnfc_field)) -- off += hash_fields[i].size; -+ off += priv->hash_fields[i].size; - } - - return -1; - } - --static u8 cls_key_size(struct net_device *net_dev) -+static u8 cls_key_size(struct dpaa2_eth_priv *priv) - { - u8 i, size = 0; - -- for (i = 0; i < ARRAY_SIZE(hash_fields); i++) { -- if (!cls_is_enabled(net_dev, hash_fields[i].rxnfc_field)) -- continue; -- size += hash_fields[i].size; -- } -+ for (i = 0; i < priv->num_hash_fields; i++) -+ size += priv->hash_fields[i].size; - - return size; - } - --static u8 cls_max_key_size(struct net_device *net_dev) -+void check_cls_support(struct dpaa2_eth_priv *priv) - { -- u8 i, size = 0; -+ u8 key_size = cls_key_size(priv); -+ struct device *dev = priv->net_dev->dev.parent; -+ -+ if (dpaa2_eth_hash_enabled(priv)) { -+ if (priv->dpni_attrs.max_dist_key_size < key_size) { -+ dev_dbg(dev, "max_dist_key_size = %d, expected %d. Hashing and steering are disabled\n", -+ priv->dpni_attrs.max_dist_key_size, -+ key_size); -+ goto disable_cls; -+ } -+ if (priv->num_hash_fields > DPKG_MAX_NUM_OF_EXTRACTS) { -+ dev_dbg(dev, "Too many key fields (max = %d). Hashing and steering are disabled\n", -+ DPKG_MAX_NUM_OF_EXTRACTS); -+ goto disable_cls; -+ } -+ } - -- for (i = 0; i < ARRAY_SIZE(hash_fields); i++) -- size += hash_fields[i].size; -+ if (dpaa2_eth_fs_enabled(priv)) { -+ if (!dpaa2_eth_hash_enabled(priv)) { -+ dev_dbg(dev, "DPNI_OPT_DIST_HASH option missing. Steering is disabled\n"); -+ goto disable_cls; -+ } -+ if (!dpaa2_eth_fs_mask_enabled(priv)) { -+ dev_dbg(dev, "Key masks not supported. Steering is disabled\n"); -+ goto disable_fs; -+ } -+ } - -- return size; -+ return; -+ -+disable_cls: -+ priv->dpni_attrs.options &= ~DPNI_OPT_DIST_HASH; -+disable_fs: -+ priv->dpni_attrs.options &= ~(DPNI_OPT_DIST_FS | -+ DPNI_OPT_FS_MASK_SUPPORT); - } - --void check_fs_support(struct net_device *net_dev) -+static int prep_l4_rule(struct dpaa2_eth_priv *priv, -+ struct ethtool_tcpip4_spec *l4_value, -+ struct ethtool_tcpip4_spec *l4_mask, -+ void *key, void *mask, u8 l4_proto) - { -- u8 key_size = cls_max_key_size(net_dev); -- struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ int offset; - -- if (priv->dpni_attrs.options & DPNI_OPT_DIST_FS && -- priv->dpni_attrs.max_dist_key_size < key_size) { -- dev_err(&net_dev->dev, -- "max_dist_key_size = %d, expected %d. Steering is disabled\n", -- priv->dpni_attrs.max_dist_key_size, -- key_size); -- priv->dpni_attrs.options &= ~DPNI_OPT_DIST_FS; -+ if (l4_mask->tos) { -+ netdev_err(priv->net_dev, "ToS is not supported for IPv4 L4\n"); -+ return -EOPNOTSUPP; - } --} - --/* Set RX hash options -- * flags is a combination of RXH_ bits -- */ --int dpaa2_eth_set_hash(struct net_device *net_dev, u64 flags) --{ -- struct device *dev = net_dev->dev.parent; -- struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -- struct dpkg_profile_cfg cls_cfg; -- struct dpni_rx_tc_dist_cfg dist_cfg; -- u8 *dma_mem; -- u64 enabled_flags = 0; -- int i; -- int err = 0; -+ if (l4_mask->ip4src) { -+ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_SRC); -+ *(u32 *)(key + offset) = l4_value->ip4src; -+ *(u32 *)(mask + offset) = l4_mask->ip4src; -+ } - -- if (!dpaa2_eth_hash_enabled(priv)) { -- dev_err(dev, "Hashing support is not enabled\n"); -- return -EOPNOTSUPP; -+ if (l4_mask->ip4dst) { -+ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_DST); -+ *(u32 *)(key + offset) = l4_value->ip4dst; -+ *(u32 *)(mask + offset) = l4_mask->ip4dst; - } - -- if (flags & ~DPAA2_RXH_SUPPORTED) { -- /* RXH_DISCARD is not supported */ -- dev_err(dev, "unsupported option selected, supported options are: mvtsdfn\n"); -- return -EOPNOTSUPP; -+ if (l4_mask->psrc) { -+ offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_SRC); -+ *(u32 *)(key + offset) = l4_value->psrc; -+ *(u32 *)(mask + offset) = l4_mask->psrc; - } - -- memset(&cls_cfg, 0, sizeof(cls_cfg)); -+ if (l4_mask->pdst) { -+ offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_DST); -+ *(u32 *)(key + offset) = l4_value->pdst; -+ *(u32 *)(mask + offset) = l4_mask->pdst; -+ } - -- for (i = 0; i < ARRAY_SIZE(hash_fields); i++) { -- struct dpkg_extract *key = -- &cls_cfg.extracts[cls_cfg.num_extracts]; -+ /* Only apply the rule for the user-specified L4 protocol -+ * and if ethertype matches IPv4 -+ */ -+ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_TYPE); -+ *(u16 *)(key + offset) = htons(ETH_P_IP); -+ *(u16 *)(mask + offset) = 0xFFFF; - -- if (!(flags & hash_fields[i].rxnfc_field)) -- continue; -+ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_PROTO); -+ *(u8 *)(key + offset) = l4_proto; -+ *(u8 *)(mask + offset) = 0xFF; - -- if (cls_cfg.num_extracts >= DPKG_MAX_NUM_OF_EXTRACTS) { -- dev_err(dev, "error adding key extraction rule, too many rules?\n"); -- return -E2BIG; -- } -+ /* TODO: check IP version */ - -- key->type = DPKG_EXTRACT_FROM_HDR; -- key->extract.from_hdr.prot = hash_fields[i].cls_prot; -- key->extract.from_hdr.type = DPKG_FULL_FIELD; -- key->extract.from_hdr.field = hash_fields[i].cls_field; -- cls_cfg.num_extracts++; -+ return 0; -+} -+ -+static int prep_eth_rule(struct dpaa2_eth_priv *priv, -+ struct ethhdr *eth_value, struct ethhdr *eth_mask, -+ void *key, void *mask) -+{ -+ int offset; - -- enabled_flags |= hash_fields[i].rxnfc_field; -+ if (eth_mask->h_proto) { -+ netdev_err(priv->net_dev, "Ethertype is not supported!\n"); -+ return -EOPNOTSUPP; - } - -- dma_mem = kzalloc(DPAA2_CLASSIFIER_DMA_SIZE, GFP_DMA | GFP_KERNEL); -- if (!dma_mem) -- return -ENOMEM; -+ if (!is_zero_ether_addr(eth_mask->h_source)) { -+ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_SA); -+ ether_addr_copy(key + offset, eth_value->h_source); -+ ether_addr_copy(mask + offset, eth_mask->h_source); -+ } - -- err = dpni_prepare_key_cfg(&cls_cfg, dma_mem); -- if (err) { -- dev_err(dev, "dpni_prepare_key_cfg error %d", err); -- return err; -+ if (!is_zero_ether_addr(eth_mask->h_dest)) { -+ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_DA); -+ ether_addr_copy(key + offset, eth_value->h_dest); -+ ether_addr_copy(mask + offset, eth_mask->h_dest); - } - -- memset(&dist_cfg, 0, sizeof(dist_cfg)); -+ return 0; -+} - -- /* Prepare for setting the rx dist */ -- dist_cfg.key_cfg_iova = dma_map_single(net_dev->dev.parent, dma_mem, -- DPAA2_CLASSIFIER_DMA_SIZE, -- DMA_TO_DEVICE); -- if (dma_mapping_error(net_dev->dev.parent, dist_cfg.key_cfg_iova)) { -- dev_err(dev, "DMA mapping failed\n"); -- kfree(dma_mem); -- return -ENOMEM; -+static int prep_user_ip_rule(struct dpaa2_eth_priv *priv, -+ struct ethtool_usrip4_spec *uip_value, -+ struct ethtool_usrip4_spec *uip_mask, -+ void *key, void *mask) -+{ -+ int offset; -+ -+ if (uip_mask->tos) -+ return -EOPNOTSUPP; -+ -+ if (uip_mask->ip4src) { -+ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_SRC); -+ *(u32 *)(key + offset) = uip_value->ip4src; -+ *(u32 *)(mask + offset) = uip_mask->ip4src; - } - -- dist_cfg.dist_size = dpaa2_eth_queue_count(priv); -- if (dpaa2_eth_fs_enabled(priv)) { -- dist_cfg.dist_mode = DPNI_DIST_MODE_FS; -- dist_cfg.fs_cfg.miss_action = DPNI_FS_MISS_HASH; -- } else { -- dist_cfg.dist_mode = DPNI_DIST_MODE_HASH; -+ if (uip_mask->ip4dst) { -+ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_DST); -+ *(u32 *)(key + offset) = uip_value->ip4dst; -+ *(u32 *)(mask + offset) = uip_mask->ip4dst; - } - -- err = dpni_set_rx_tc_dist(priv->mc_io, 0, priv->mc_token, 0, &dist_cfg); -- dma_unmap_single(net_dev->dev.parent, dist_cfg.key_cfg_iova, -- DPAA2_CLASSIFIER_DMA_SIZE, DMA_TO_DEVICE); -- kfree(dma_mem); -- if (err) { -- dev_err(dev, "dpni_set_rx_tc_dist() error %d\n", err); -- return err; -+ if (uip_mask->proto) { -+ offset = cls_key_off(priv, NET_PROT_IP, NH_FLD_IP_PROTO); -+ *(u32 *)(key + offset) = uip_value->proto; -+ *(u32 *)(mask + offset) = uip_mask->proto; -+ } -+ if (uip_mask->l4_4_bytes) { -+ offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_SRC); -+ *(u16 *)(key + offset) = uip_value->l4_4_bytes << 16; -+ *(u16 *)(mask + offset) = uip_mask->l4_4_bytes << 16; -+ -+ offset = cls_key_off(priv, NET_PROT_UDP, NH_FLD_UDP_PORT_DST); -+ *(u16 *)(key + offset) = uip_value->l4_4_bytes & 0xFFFF; -+ *(u16 *)(mask + offset) = uip_mask->l4_4_bytes & 0xFFFF; - } - -- priv->rx_hash_fields = enabled_flags; -+ /* Ethertype must be IP */ -+ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_TYPE); -+ *(u16 *)(key + offset) = htons(ETH_P_IP); -+ *(u16 *)(mask + offset) = 0xFFFF; -+ -+ return 0; -+} -+ -+static int prep_ext_rule(struct dpaa2_eth_priv *priv, -+ struct ethtool_flow_ext *ext_value, -+ struct ethtool_flow_ext *ext_mask, -+ void *key, void *mask) -+{ -+ int offset; -+ -+ if (ext_mask->vlan_etype) -+ return -EOPNOTSUPP; -+ -+ if (ext_mask->vlan_tci) { -+ offset = cls_key_off(priv, NET_PROT_VLAN, NH_FLD_VLAN_TCI); -+ *(u16 *)(key + offset) = ext_value->vlan_tci; -+ *(u16 *)(mask + offset) = ext_mask->vlan_tci; -+ } -+ -+ return 0; -+} -+ -+static int prep_mac_ext_rule(struct dpaa2_eth_priv *priv, -+ struct ethtool_flow_ext *ext_value, -+ struct ethtool_flow_ext *ext_mask, -+ void *key, void *mask) -+{ -+ int offset; -+ -+ if (!is_zero_ether_addr(ext_mask->h_dest)) { -+ offset = cls_key_off(priv, NET_PROT_ETH, NH_FLD_ETH_DA); -+ ether_addr_copy(key + offset, ext_value->h_dest); -+ ether_addr_copy(mask + offset, ext_mask->h_dest); -+ } - - return 0; - } -@@ -501,140 +499,56 @@ static int prep_cls_rule(struct net_device *net_dev, - struct ethtool_rx_flow_spec *fs, - void *key) - { -- struct ethtool_tcpip4_spec *l4ip4_h, *l4ip4_m; -- struct ethhdr *eth_h, *eth_m; -- struct ethtool_flow_ext *ext_h, *ext_m; -- const u8 key_size = cls_key_size(net_dev); -+ struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ const u8 key_size = cls_key_size(priv); - void *msk = key + key_size; -+ int err; - - memset(key, 0, key_size * 2); - -- /* This code is a major mess, it has to be cleaned up after the -- * classification mask issue is fixed and key format will be made static -- */ -- - switch (fs->flow_type & 0xff) { - case TCP_V4_FLOW: -- l4ip4_h = &fs->h_u.tcp_ip4_spec; -- l4ip4_m = &fs->m_u.tcp_ip4_spec; -- /* TODO: ethertype to match IPv4 and protocol to match TCP */ -- goto l4ip4; -- -+ err = prep_l4_rule(priv, &fs->h_u.tcp_ip4_spec, -+ &fs->m_u.tcp_ip4_spec, key, msk, -+ IPPROTO_TCP); -+ break; - case UDP_V4_FLOW: -- l4ip4_h = &fs->h_u.udp_ip4_spec; -- l4ip4_m = &fs->m_u.udp_ip4_spec; -- goto l4ip4; -- -+ err = prep_l4_rule(priv, &fs->h_u.udp_ip4_spec, -+ &fs->m_u.udp_ip4_spec, key, msk, -+ IPPROTO_UDP); -+ break; - case SCTP_V4_FLOW: -- l4ip4_h = &fs->h_u.sctp_ip4_spec; -- l4ip4_m = &fs->m_u.sctp_ip4_spec; -- --l4ip4: -- if (l4ip4_m->tos) { -- netdev_err(net_dev, -- "ToS is not supported for IPv4 L4\n"); -- return -EOPNOTSUPP; -- } -- if (l4ip4_m->ip4src && !cls_is_enabled(net_dev, RXH_IP_SRC)) { -- netdev_err(net_dev, "IP SRC not supported!\n"); -- return -EOPNOTSUPP; -- } -- if (l4ip4_m->ip4dst && !cls_is_enabled(net_dev, RXH_IP_DST)) { -- netdev_err(net_dev, "IP DST not supported!\n"); -- return -EOPNOTSUPP; -- } -- if (l4ip4_m->psrc && !cls_is_enabled(net_dev, RXH_L4_B_0_1)) { -- netdev_err(net_dev, "PSRC not supported, ignored\n"); -- return -EOPNOTSUPP; -- } -- if (l4ip4_m->pdst && !cls_is_enabled(net_dev, RXH_L4_B_2_3)) { -- netdev_err(net_dev, "PDST not supported, ignored\n"); -- return -EOPNOTSUPP; -- } -- -- if (cls_is_enabled(net_dev, RXH_IP_SRC)) { -- *(u32 *)(key + cls_key_off(net_dev, RXH_IP_SRC)) -- = l4ip4_h->ip4src; -- *(u32 *)(msk + cls_key_off(net_dev, RXH_IP_SRC)) -- = l4ip4_m->ip4src; -- } -- if (cls_is_enabled(net_dev, RXH_IP_DST)) { -- *(u32 *)(key + cls_key_off(net_dev, RXH_IP_DST)) -- = l4ip4_h->ip4dst; -- *(u32 *)(msk + cls_key_off(net_dev, RXH_IP_DST)) -- = l4ip4_m->ip4dst; -- } -- -- if (cls_is_enabled(net_dev, RXH_L4_B_0_1)) { -- *(u32 *)(key + cls_key_off(net_dev, RXH_L4_B_0_1)) -- = l4ip4_h->psrc; -- *(u32 *)(msk + cls_key_off(net_dev, RXH_L4_B_0_1)) -- = l4ip4_m->psrc; -- } -- -- if (cls_is_enabled(net_dev, RXH_L4_B_2_3)) { -- *(u32 *)(key + cls_key_off(net_dev, RXH_L4_B_2_3)) -- = l4ip4_h->pdst; -- *(u32 *)(msk + cls_key_off(net_dev, RXH_L4_B_2_3)) -- = l4ip4_m->pdst; -- } -+ err = prep_l4_rule(priv, &fs->h_u.sctp_ip4_spec, -+ &fs->m_u.sctp_ip4_spec, key, msk, -+ IPPROTO_SCTP); - break; -- - case ETHER_FLOW: -- eth_h = &fs->h_u.ether_spec; -- eth_m = &fs->m_u.ether_spec; -- -- if (eth_m->h_proto) { -- netdev_err(net_dev, "Ethertype is not supported!\n"); -- return -EOPNOTSUPP; -- } -- -- if (!is_zero_ether_addr(eth_m->h_source)) { -- netdev_err(net_dev, "ETH SRC is not supported!\n"); -- return -EOPNOTSUPP; -- } -- -- if (cls_is_enabled(net_dev, RXH_L2DA)) { -- ether_addr_copy(key + cls_key_off(net_dev, RXH_L2DA), -- eth_h->h_dest); -- ether_addr_copy(msk + cls_key_off(net_dev, RXH_L2DA), -- eth_m->h_dest); -- } else { -- if (!is_zero_ether_addr(eth_m->h_dest)) { -- netdev_err(net_dev, -- "ETH DST is not supported!\n"); -- return -EOPNOTSUPP; -- } -- } -+ err = prep_eth_rule(priv, &fs->h_u.ether_spec, -+ &fs->m_u.ether_spec, key, msk); -+ break; -+ case IP_USER_FLOW: -+ err = prep_user_ip_rule(priv, &fs->h_u.usr_ip4_spec, -+ &fs->m_u.usr_ip4_spec, key, msk); - break; -- - default: -- /* TODO: IP user flow, AH, ESP */ -+ /* TODO: AH, ESP */ - return -EOPNOTSUPP; - } -+ if (err) -+ return err; - - if (fs->flow_type & FLOW_EXT) { -- /* TODO: ETH data, VLAN ethertype, VLAN TCI .. */ -- return -EOPNOTSUPP; -+ err = prep_ext_rule(priv, &fs->h_ext, &fs->m_ext, key, msk); -+ if (err) -+ return err; - } - - if (fs->flow_type & FLOW_MAC_EXT) { -- ext_h = &fs->h_ext; -- ext_m = &fs->m_ext; -- -- if (cls_is_enabled(net_dev, RXH_L2DA)) { -- ether_addr_copy(key + cls_key_off(net_dev, RXH_L2DA), -- ext_h->h_dest); -- ether_addr_copy(msk + cls_key_off(net_dev, RXH_L2DA), -- ext_m->h_dest); -- } else { -- if (!is_zero_ether_addr(ext_m->h_dest)) { -- netdev_err(net_dev, -- "ETH DST is not supported!\n"); -- return -EOPNOTSUPP; -- } -- } -+ err = prep_mac_ext_rule(priv, &fs->h_ext, &fs->m_ext, key, msk); -+ if (err) -+ return err; - } -+ - return 0; - } - -@@ -643,6 +557,7 @@ static int do_cls(struct net_device *net_dev, - bool add) - { - struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -+ struct device *dev = net_dev->dev.parent; - const int rule_cnt = DPAA2_CLASSIFIER_ENTRY_COUNT; - struct dpni_rule_cfg rule_cfg; - void *dma_mem; -@@ -660,7 +575,7 @@ static int do_cls(struct net_device *net_dev, - return -EINVAL; - - memset(&rule_cfg, 0, sizeof(rule_cfg)); -- rule_cfg.key_size = cls_key_size(net_dev); -+ rule_cfg.key_size = cls_key_size(priv); - - /* allocate twice the key size, for the actual key and for mask */ - dma_mem = kzalloc(rule_cfg.key_size * 2, GFP_DMA | GFP_KERNEL); -@@ -671,27 +586,12 @@ static int do_cls(struct net_device *net_dev, - if (err) - goto err_free_mem; - -- rule_cfg.key_iova = dma_map_single(net_dev->dev.parent, dma_mem, -+ rule_cfg.key_iova = dma_map_single(dev, dma_mem, - rule_cfg.key_size * 2, - DMA_TO_DEVICE); - - rule_cfg.mask_iova = rule_cfg.key_iova + rule_cfg.key_size; - -- if (!(priv->dpni_attrs.options & DPNI_OPT_FS_MASK_SUPPORT)) { -- int i; -- u8 *mask = dma_mem + rule_cfg.key_size; -- -- /* check that nothing is masked out, otherwise it won't work */ -- for (i = 0; i < rule_cfg.key_size; i++) { -- if (mask[i] == 0xff) -- continue; -- netdev_err(net_dev, "dev does not support masking!\n"); -- err = -EOPNOTSUPP; -- goto err_free_mem; -- } -- rule_cfg.mask_iova = 0; -- } -- - /* No way to control rule order in firmware */ - if (add) - err = dpni_add_fs_entry(priv->mc_io, 0, priv->mc_token, 0, -@@ -700,10 +600,10 @@ static int do_cls(struct net_device *net_dev, - err = dpni_remove_fs_entry(priv->mc_io, 0, priv->mc_token, 0, - &rule_cfg); - -- dma_unmap_single(net_dev->dev.parent, rule_cfg.key_iova, -+ dma_unmap_single(dev, rule_cfg.key_iova, - rule_cfg.key_size * 2, DMA_TO_DEVICE); - if (err) { -- netdev_err(net_dev, "dpaa2_add_cls() error %d\n", err); -+ netdev_err(net_dev, "dpaa2_add/remove_cls() error %d\n", err); - goto err_free_mem; - } - -@@ -746,40 +646,12 @@ static int del_cls(struct net_device *net_dev, int location) - return 0; - } - --static void clear_cls(struct net_device *net_dev) --{ -- struct dpaa2_eth_priv *priv = netdev_priv(net_dev); -- int i, err; -- -- for (i = 0; i < DPAA2_CLASSIFIER_ENTRY_COUNT; i++) { -- if (!priv->cls_rule[i].in_use) -- continue; -- -- err = del_cls(net_dev, i); -- if (err) -- netdev_warn(net_dev, -- "err trying to delete classification entry %d\n", -- i); -- } --} -- - static int dpaa2_eth_set_rxnfc(struct net_device *net_dev, - struct ethtool_rxnfc *rxnfc) - { - int err = 0; - - switch (rxnfc->cmd) { -- case ETHTOOL_SRXFH: -- /* first off clear ALL classification rules, chaging key -- * composition will break them anyway -- */ -- clear_cls(net_dev); -- /* we purposely ignore cmd->flow_type for now, because the -- * classifier only supports a single set of fields for all -- * protocols -- */ -- err = dpaa2_eth_set_hash(net_dev, rxnfc->data); -- break; - case ETHTOOL_SRXCLSRLINS: - err = add_cls(net_dev, &rxnfc->fs); - break; -@@ -804,11 +676,10 @@ static int dpaa2_eth_get_rxnfc(struct net_device *net_dev, - - switch (rxnfc->cmd) { - case ETHTOOL_GRXFH: -- /* we purposely ignore cmd->flow_type for now, because the -- * classifier only supports a single set of fields for all -- * protocols -+ /* we purposely ignore cmd->flow_type, because the hashing key -+ * is the same (and fixed) for all protocols - */ -- rxnfc->data = priv->rx_hash_fields; -+ rxnfc->data = priv->rx_flow_hash; - break; - - case ETHTOOL_GRXRINGS: -diff --git a/drivers/staging/fsl-dpaa2/mac/mac.c b/drivers/staging/fsl-dpaa2/mac/mac.c -index 366ad4c..fe16b8b 100644 ---- a/drivers/staging/fsl-dpaa2/mac/mac.c -+++ b/drivers/staging/fsl-dpaa2/mac/mac.c -@@ -120,7 +120,7 @@ static void dpaa2_mac_link_changed(struct net_device *netdev) - phy_print_status(phydev); - } - -- /* We must call into the MC firmware at all times, because we don't know -+ /* We must interrogate MC at all times, because we don't know - * when and whether a potential DPNI may have read the link state. - */ - err = dpmac_set_link_state(priv->mc_dev->mc_io, 0, -@@ -532,7 +532,7 @@ static int dpaa2_mac_probe(struct fsl_mc_device *mc_dev) - goto err_close; - } - -- dev_info_once(dev, "Using DPMAC API %d.%d\n", -+ dev_warn(dev, "Using DPMAC API %d.%d\n", - priv->attr.version.major, priv->attr.version.minor); - - /* Look up the DPMAC node in the device-tree. */ -diff --git a/drivers/staging/fsl-mc/bus/dprc-driver.c b/drivers/staging/fsl-mc/bus/dprc-driver.c -index f8d8cbe..5b6fa1c 100644 ---- a/drivers/staging/fsl-mc/bus/dprc-driver.c -+++ b/drivers/staging/fsl-mc/bus/dprc-driver.c -@@ -1078,7 +1078,7 @@ int __init dprc_driver_init(void) - return fsl_mc_driver_register(&dprc_driver); - } - --void __exit dprc_driver_exit(void) -+void dprc_driver_exit(void) - { - fsl_mc_driver_unregister(&dprc_driver); - } -diff --git a/drivers/staging/fsl-mc/include/mc-private.h b/drivers/staging/fsl-mc/include/mc-private.h -index 1246ca8..58ed441 100644 ---- a/drivers/staging/fsl-mc/include/mc-private.h -+++ b/drivers/staging/fsl-mc/include/mc-private.h -@@ -143,7 +143,7 @@ int dprc_scan_objects(struct fsl_mc_device *mc_bus_dev, - - int __init dprc_driver_init(void); - --void __exit dprc_driver_exit(void); -+void dprc_driver_exit(void); - - int __init fsl_mc_allocator_driver_init(void); - -diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c -index f951b75..600a137 100644 ---- a/drivers/usb/host/xhci.c -+++ b/drivers/usb/host/xhci.c -@@ -1685,8 +1685,10 @@ int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev, - cpu_to_le32(EP_STATE_DISABLED)) || - le32_to_cpu(ctrl_ctx->drop_flags) & - xhci_get_endpoint_flag(&ep->desc)) { -- xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", -- __func__, ep); -+ /* Do not warn when called after a usb_device_reset */ -+ if (xhci->devs[udev->slot_id]->eps[ep_index].ring != NULL) -+ xhci_warn(xhci, "xHCI %s called with disabled ep %p\n", -+ __func__, ep); - return 0; - } - -diff --git a/include/linux/fsl/guts.h b/include/linux/fsl/guts.h -index 84d971f..f13b12e 100644 ---- a/include/linux/fsl/guts.h -+++ b/include/linux/fsl/guts.h -@@ -29,83 +29,86 @@ - * #ifdefs. - */ - struct ccsr_guts { -- __be32 porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */ -- __be32 porbmsr; /* 0x.0004 - POR Boot Mode Status Register */ -- __be32 porimpscr; /* 0x.0008 - POR I/O Impedance Status and Control Register */ -- __be32 pordevsr; /* 0x.000c - POR I/O Device Status Register */ -- __be32 pordbgmsr; /* 0x.0010 - POR Debug Mode Status Register */ -- __be32 pordevsr2; /* 0x.0014 - POR device status register 2 */ -+ u32 porpllsr; /* 0x.0000 - POR PLL Ratio Status Register */ -+ u32 porbmsr; /* 0x.0004 - POR Boot Mode Status Register */ -+ u32 porimpscr; /* 0x.0008 - POR I/O Impedance Status and Control Register */ -+ u32 pordevsr; /* 0x.000c - POR I/O Device Status Register */ -+ u32 pordbgmsr; /* 0x.0010 - POR Debug Mode Status Register */ -+ u32 pordevsr2; /* 0x.0014 - POR device status register 2 */ - u8 res018[0x20 - 0x18]; -- __be32 porcir; /* 0x.0020 - POR Configuration Information Register */ -+ u32 porcir; /* 0x.0020 - POR Configuration Information Register */ - u8 res024[0x30 - 0x24]; -- __be32 gpiocr; /* 0x.0030 - GPIO Control Register */ -+ u32 gpiocr; /* 0x.0030 - GPIO Control Register */ - u8 res034[0x40 - 0x34]; -- __be32 gpoutdr; /* 0x.0040 - General-Purpose Output Data Register */ -+ u32 gpoutdr; /* 0x.0040 - General-Purpose Output Data Register */ - u8 res044[0x50 - 0x44]; -- __be32 gpindr; /* 0x.0050 - General-Purpose Input Data Register */ -+ u32 gpindr; /* 0x.0050 - General-Purpose Input Data Register */ - u8 res054[0x60 - 0x54]; -- __be32 pmuxcr; /* 0x.0060 - Alternate Function Signal Multiplex Control */ -- __be32 pmuxcr2; /* 0x.0064 - Alternate function signal multiplex control 2 */ -- __be32 dmuxcr; /* 0x.0068 - DMA Mux Control Register */ -+ u32 pmuxcr; /* 0x.0060 - Alternate Function Signal Multiplex Control */ -+ u32 pmuxcr2; /* 0x.0064 - Alternate function signal multiplex control 2 */ -+ u32 dmuxcr; /* 0x.0068 - DMA Mux Control Register */ - u8 res06c[0x70 - 0x6c]; -- __be32 devdisr; /* 0x.0070 - Device Disable Control */ -+ u32 devdisr; /* 0x.0070 - Device Disable Control */ - #define CCSR_GUTS_DEVDISR_TB1 0x00001000 - #define CCSR_GUTS_DEVDISR_TB0 0x00004000 -- __be32 devdisr2; /* 0x.0074 - Device Disable Control 2 */ -+ u32 devdisr2; /* 0x.0074 - Device Disable Control 2 */ - u8 res078[0x7c - 0x78]; -- __be32 pmjcr; /* 0x.007c - 4 Power Management Jog Control Register */ -- __be32 powmgtcsr; /* 0x.0080 - Power Management Status and Control Register */ -- __be32 pmrccr; /* 0x.0084 - Power Management Reset Counter Configuration Register */ -- __be32 pmpdccr; /* 0x.0088 - Power Management Power Down Counter Configuration Register */ -- __be32 pmcdr; /* 0x.008c - 4Power management clock disable register */ -- __be32 mcpsumr; /* 0x.0090 - Machine Check Summary Register */ -- __be32 rstrscr; /* 0x.0094 - Reset Request Status and Control Register */ -- __be32 ectrstcr; /* 0x.0098 - Exception reset control register */ -- __be32 autorstsr; /* 0x.009c - Automatic reset status register */ -- __be32 pvr; /* 0x.00a0 - Processor Version Register */ -- __be32 svr; /* 0x.00a4 - System Version Register */ -+ u32 pmjcr; /* 0x.007c - 4 Power Management Jog Control Register */ -+ u32 powmgtcsr; /* 0x.0080 - Power Management Status and Control Register */ -+ u32 pmrccr; /* 0x.0084 - Power Management Reset Counter Configuration Register */ -+ u32 pmpdccr; /* 0x.0088 - Power Management Power Down Counter Configuration Register */ -+ u32 pmcdr; /* 0x.008c - 4Power management clock disable register */ -+ u32 mcpsumr; /* 0x.0090 - Machine Check Summary Register */ -+ u32 rstrscr; /* 0x.0094 - Reset Request Status and Control Register */ -+ u32 ectrstcr; /* 0x.0098 - Exception reset control register */ -+ u32 autorstsr; /* 0x.009c - Automatic reset status register */ -+ u32 pvr; /* 0x.00a0 - Processor Version Register */ -+ u32 svr; /* 0x.00a4 - System Version Register */ - u8 res0a8[0xb0 - 0xa8]; -- __be32 rstcr; /* 0x.00b0 - Reset Control Register */ -+ u32 rstcr; /* 0x.00b0 - Reset Control Register */ - u8 res0b4[0xc0 - 0xb4]; -- __be32 iovselsr; /* 0x.00c0 - I/O voltage select status register -+ u32 iovselsr; /* 0x.00c0 - I/O voltage select status register - Called 'elbcvselcr' on 86xx SOCs */ - u8 res0c4[0x100 - 0xc4]; -- __be32 rcwsr[16]; /* 0x.0100 - Reset Control Word Status registers -+ u32 rcwsr[16]; /* 0x.0100 - Reset Control Word Status registers - There are 16 registers */ - u8 res140[0x224 - 0x140]; -- __be32 iodelay1; /* 0x.0224 - IO delay control register 1 */ -- __be32 iodelay2; /* 0x.0228 - IO delay control register 2 */ -+ u32 iodelay1; /* 0x.0224 - IO delay control register 1 */ -+ u32 iodelay2; /* 0x.0228 - IO delay control register 2 */ - u8 res22c[0x604 - 0x22c]; -- __be32 pamubypenr; /* 0x.604 - PAMU bypass enable register */ -+ u32 pamubypenr; /* 0x.604 - PAMU bypass enable register */ - u8 res608[0x800 - 0x608]; -- __be32 clkdvdr; /* 0x.0800 - Clock Divide Register */ -+ u32 clkdvdr; /* 0x.0800 - Clock Divide Register */ - u8 res804[0x900 - 0x804]; -- __be32 ircr; /* 0x.0900 - Infrared Control Register */ -+ u32 ircr; /* 0x.0900 - Infrared Control Register */ - u8 res904[0x908 - 0x904]; -- __be32 dmacr; /* 0x.0908 - DMA Control Register */ -+ u32 dmacr; /* 0x.0908 - DMA Control Register */ - u8 res90c[0x914 - 0x90c]; -- __be32 elbccr; /* 0x.0914 - eLBC Control Register */ -+ u32 elbccr; /* 0x.0914 - eLBC Control Register */ - u8 res918[0xb20 - 0x918]; -- __be32 ddr1clkdr; /* 0x.0b20 - DDR1 Clock Disable Register */ -- __be32 ddr2clkdr; /* 0x.0b24 - DDR2 Clock Disable Register */ -- __be32 ddrclkdr; /* 0x.0b28 - DDR Clock Disable Register */ -+ u32 ddr1clkdr; /* 0x.0b20 - DDR1 Clock Disable Register */ -+ u32 ddr2clkdr; /* 0x.0b24 - DDR2 Clock Disable Register */ -+ u32 ddrclkdr; /* 0x.0b28 - DDR Clock Disable Register */ - u8 resb2c[0xe00 - 0xb2c]; -- __be32 clkocr; /* 0x.0e00 - Clock Out Select Register */ -+ u32 clkocr; /* 0x.0e00 - Clock Out Select Register */ - u8 rese04[0xe10 - 0xe04]; -- __be32 ddrdllcr; /* 0x.0e10 - DDR DLL Control Register */ -+ u32 ddrdllcr; /* 0x.0e10 - DDR DLL Control Register */ - u8 rese14[0xe20 - 0xe14]; -- __be32 lbcdllcr; /* 0x.0e20 - LBC DLL Control Register */ -- __be32 cpfor; /* 0x.0e24 - L2 charge pump fuse override register */ -+ u32 lbcdllcr; /* 0x.0e20 - LBC DLL Control Register */ -+ u32 cpfor; /* 0x.0e24 - L2 charge pump fuse override register */ - u8 rese28[0xf04 - 0xe28]; -- __be32 srds1cr0; /* 0x.0f04 - SerDes1 Control Register 0 */ -- __be32 srds1cr1; /* 0x.0f08 - SerDes1 Control Register 0 */ -+ u32 srds1cr0; /* 0x.0f04 - SerDes1 Control Register 0 */ -+ u32 srds1cr1; /* 0x.0f08 - SerDes1 Control Register 0 */ - u8 resf0c[0xf2c - 0xf0c]; -- __be32 itcr; /* 0x.0f2c - Internal transaction control register */ -+ u32 itcr; /* 0x.0f2c - Internal transaction control register */ - u8 resf30[0xf40 - 0xf30]; -- __be32 srds2cr0; /* 0x.0f40 - SerDes2 Control Register 0 */ -- __be32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */ -+ u32 srds2cr0; /* 0x.0f40 - SerDes2 Control Register 0 */ -+ u32 srds2cr1; /* 0x.0f44 - SerDes2 Control Register 0 */ - } __attribute__ ((packed)); - -+#ifdef CONFIG_FSL_GUTS -+extern u32 guts_get_svr(void); -+#endif - - /* Alternate function signal multiplex control */ - #define MPC85xx_PMUXCR_QE(x) (0x8000 >> (x)) -diff --git a/include/linux/fsl/svr.h b/include/linux/fsl/svr.h -new file mode 100644 -index 0000000..8d13836 ---- /dev/null -+++ b/include/linux/fsl/svr.h -@@ -0,0 +1,95 @@ -+/* -+ * MPC85xx cpu type detection -+ * -+ * Copyright 2011-2012 Freescale Semiconductor, Inc. -+ * -+ * This is free software; you can redistribute it and/or modify -+ * it under the terms of the GNU General Public License as published by -+ * the Free Software Foundation; either version 2 of the License, or -+ * (at your option) any later version. -+ */ -+ -+#ifndef FSL_SVR_H -+#define FSL_SVR_H -+ -+#define SVR_REV(svr) ((svr) & 0xFF) /* SOC design resision */ -+#define SVR_MAJ(svr) (((svr) >> 4) & 0xF) /* Major revision field*/ -+#define SVR_MIN(svr) (((svr) >> 0) & 0xF) /* Minor revision field*/ -+ -+/* Some parts define SVR[0:23] as the SOC version */ -+#define SVR_SOC_VER(svr) (((svr) >> 8) & 0xFFF7FF) /* SOC Version fields */ -+ -+#define SVR_8533 0x803400 -+#define SVR_8535 0x803701 -+#define SVR_8536 0x803700 -+#define SVR_8540 0x803000 -+#define SVR_8541 0x807200 -+#define SVR_8543 0x803200 -+#define SVR_8544 0x803401 -+#define SVR_8545 0x803102 -+#define SVR_8547 0x803101 -+#define SVR_8548 0x803100 -+#define SVR_8555 0x807100 -+#define SVR_8560 0x807000 -+#define SVR_8567 0x807501 -+#define SVR_8568 0x807500 -+#define SVR_8569 0x808000 -+#define SVR_8572 0x80E000 -+#define SVR_P1010 0x80F100 -+#define SVR_P1011 0x80E500 -+#define SVR_P1012 0x80E501 -+#define SVR_P1013 0x80E700 -+#define SVR_P1014 0x80F101 -+#define SVR_P1017 0x80F700 -+#define SVR_P1020 0x80E400 -+#define SVR_P1021 0x80E401 -+#define SVR_P1022 0x80E600 -+#define SVR_P1023 0x80F600 -+#define SVR_P1024 0x80E402 -+#define SVR_P1025 0x80E403 -+#define SVR_P2010 0x80E300 -+#define SVR_P2020 0x80E200 -+#define SVR_P2040 0x821000 -+#define SVR_P2041 0x821001 -+#define SVR_P3041 0x821103 -+#define SVR_P4040 0x820100 -+#define SVR_P4080 0x820000 -+#define SVR_P5010 0x822100 -+#define SVR_P5020 0x822000 -+#define SVR_P5021 0X820500 -+#define SVR_P5040 0x820400 -+#define SVR_T4240 0x824000 -+#define SVR_T4120 0x824001 -+#define SVR_T4160 0x824100 -+#define SVR_T4080 0x824102 -+#define SVR_C291 0x850000 -+#define SVR_C292 0x850020 -+#define SVR_C293 0x850030 -+#define SVR_B4860 0X868000 -+#define SVR_G4860 0x868001 -+#define SVR_G4060 0x868003 -+#define SVR_B4440 0x868100 -+#define SVR_G4440 0x868101 -+#define SVR_B4420 0x868102 -+#define SVR_B4220 0x868103 -+#define SVR_T1040 0x852000 -+#define SVR_T1041 0x852001 -+#define SVR_T1042 0x852002 -+#define SVR_T1020 0x852100 -+#define SVR_T1021 0x852101 -+#define SVR_T1022 0x852102 -+#define SVR_T2080 0x853000 -+#define SVR_T2081 0x853100 -+ -+#define SVR_8610 0x80A000 -+#define SVR_8641 0x809000 -+#define SVR_8641D 0x809001 -+ -+#define SVR_9130 0x860001 -+#define SVR_9131 0x860000 -+#define SVR_9132 0x861000 -+#define SVR_9232 0x861400 -+ -+#define SVR_Unknown 0xFFFFFF -+ -+#endif -diff --git a/include/linux/fsl_ifc.h b/include/linux/fsl_ifc.h -index 84d60cb..3f9778c 100644 ---- a/include/linux/fsl_ifc.h -+++ b/include/linux/fsl_ifc.h -@@ -29,7 +29,20 @@ - #include - #include - --#define FSL_IFC_BANK_COUNT 4 -+/* -+ * The actual number of banks implemented depends on the IFC version -+ * - IFC version 1.0 implements 4 banks. -+ * - IFC version 1.1 onward implements 8 banks. -+ */ -+#define FSL_IFC_BANK_COUNT 8 -+ -+#define FSL_IFC_VERSION_MASK 0x0F0F0000 -+#define FSL_IFC_VERSION_1_0_0 0x01000000 -+#define FSL_IFC_VERSION_1_1_0 0x01010000 -+#define FSL_IFC_VERSION_2_0_0 0x02000000 -+ -+#define PGOFFSET_64K (64*1024) -+#define PGOFFSET_4K (4*1024) - - /* - * CSPR - Chip Select Property Register -@@ -714,20 +727,26 @@ struct fsl_ifc_nand { - __be32 nand_evter_en; - u32 res17[0x2]; - __be32 nand_evter_intr_en; -- u32 res18[0x2]; -+ __be32 nand_vol_addr_stat; -+ u32 res18; - __be32 nand_erattr0; - __be32 nand_erattr1; - u32 res19[0x10]; - __be32 nand_fsr; -- u32 res20; -- __be32 nand_eccstat[4]; -- u32 res21[0x20]; -+ u32 res20[0x3]; -+ __be32 nand_eccstat[6]; -+ u32 res21[0x1c]; - __be32 nanndcr; - u32 res22[0x2]; - __be32 nand_autoboot_trgr; - u32 res23; - __be32 nand_mdr; -- u32 res24[0x5C]; -+ u32 res24[0x1C]; -+ __be32 nand_dll_lowcfg0; -+ __be32 nand_dll_lowcfg1; -+ u32 res25; -+ __be32 nand_dll_lowstat; -+ u32 res26[0x3c]; - }; - - /* -@@ -762,13 +781,12 @@ struct fsl_ifc_gpcm { - __be32 gpcm_erattr1; - __be32 gpcm_erattr2; - __be32 gpcm_stat; -- u32 res4[0x1F3]; - }; - - /* - * IFC Controller Registers - */ --struct fsl_ifc_regs { -+struct fsl_ifc_global { - __be32 ifc_rev; - u32 res1[0x2]; - struct { -@@ -776,39 +794,44 @@ struct fsl_ifc_regs { - __be32 cspr; - u32 res2; - } cspr_cs[FSL_IFC_BANK_COUNT]; -- u32 res3[0x19]; -+ u32 res3[0xd]; - struct { - __be32 amask; - u32 res4[0x2]; - } amask_cs[FSL_IFC_BANK_COUNT]; -- u32 res5[0x18]; -+ u32 res5[0xc]; - struct { - __be32 csor; - __be32 csor_ext; - u32 res6; - } csor_cs[FSL_IFC_BANK_COUNT]; -- u32 res7[0x18]; -+ u32 res7[0xc]; - struct { - __be32 ftim[4]; - u32 res8[0x8]; - } ftim_cs[FSL_IFC_BANK_COUNT]; -- u32 res9[0x60]; -+ u32 res9[0x30]; - __be32 rb_stat; -- u32 res10[0x2]; -+ __be32 rb_map; -+ __be32 wb_map; - __be32 ifc_gcr; -- u32 res11[0x2]; -+ u32 res10[0x2]; - __be32 cm_evter_stat; -- u32 res12[0x2]; -+ u32 res11[0x2]; - __be32 cm_evter_en; -- u32 res13[0x2]; -+ u32 res12[0x2]; - __be32 cm_evter_intr_en; -- u32 res14[0x2]; -+ u32 res13[0x2]; - __be32 cm_erattr0; - __be32 cm_erattr1; -- u32 res15[0x2]; -+ u32 res14[0x2]; - __be32 ifc_ccr; - __be32 ifc_csr; -- u32 res16[0x2EB]; -+ __be32 ddr_ccr_low; -+}; -+ -+ -+struct fsl_ifc_runtime { - struct fsl_ifc_nand ifc_nand; - struct fsl_ifc_nor ifc_nor; - struct fsl_ifc_gpcm ifc_gpcm; -@@ -822,17 +845,70 @@ extern int fsl_ifc_find(phys_addr_t addr_base); - struct fsl_ifc_ctrl { - /* device info */ - struct device *dev; -- struct fsl_ifc_regs __iomem *regs; -+ struct fsl_ifc_global __iomem *gregs; -+ struct fsl_ifc_runtime __iomem *rregs; - int irq; - int nand_irq; - spinlock_t lock; - void *nand; -+ int version; -+ int banks; - - u32 nand_stat; - wait_queue_head_t nand_wait; -+ bool little_endian; - }; - - extern struct fsl_ifc_ctrl *fsl_ifc_ctrl_dev; - -+static inline u32 ifc_in32(void __iomem *addr) -+{ -+ u32 val; -+ -+ if (fsl_ifc_ctrl_dev->little_endian) -+ val = ioread32(addr); -+ else -+ val = ioread32be(addr); -+ -+ return val; -+} -+ -+static inline u16 ifc_in16(void __iomem *addr) -+{ -+ u16 val; -+ -+ if (fsl_ifc_ctrl_dev->little_endian) -+ val = ioread16(addr); -+ else -+ val = ioread16be(addr); -+ -+ return val; -+} -+ -+static inline u8 ifc_in8(void __iomem *addr) -+{ -+ return ioread8(addr); -+} -+ -+static inline void ifc_out32(u32 val, void __iomem *addr) -+{ -+ if (fsl_ifc_ctrl_dev->little_endian) -+ iowrite32(val, addr); -+ else -+ iowrite32be(val, addr); -+} -+ -+static inline void ifc_out16(u16 val, void __iomem *addr) -+{ -+ if (fsl_ifc_ctrl_dev->little_endian) -+ iowrite16(val, addr); -+ else -+ iowrite16be(val, addr); -+} -+ -+static inline void ifc_out8(u8 val, void __iomem *addr) -+{ -+ iowrite8(val, addr); -+} - - #endif /* __ASM_FSL_IFC_H */ -diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h -index 69517a2..cbbe6a2 100644 ---- a/include/linux/interrupt.h -+++ b/include/linux/interrupt.h -@@ -356,6 +356,20 @@ static inline int disable_irq_wake(unsigned int irq) - return irq_set_irq_wake(irq, 0); - } - -+/* -+ * irq_get_irqchip_state/irq_set_irqchip_state specific flags -+ */ -+enum irqchip_irq_state { -+ IRQCHIP_STATE_PENDING, /* Is interrupt pending? */ -+ IRQCHIP_STATE_ACTIVE, /* Is interrupt in progress? */ -+ IRQCHIP_STATE_MASKED, /* Is interrupt masked? */ -+ IRQCHIP_STATE_LINE_LEVEL, /* Is IRQ line high? */ -+}; -+ -+extern int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which, -+ bool *state); -+extern int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, -+ bool state); - - #ifdef CONFIG_IRQ_FORCED_THREADING - extern bool force_irqthreads; -diff --git a/include/linux/iommu.h b/include/linux/iommu.h -index 04229cb..7421bdf 100644 ---- a/include/linux/iommu.h -+++ b/include/linux/iommu.h -@@ -30,6 +30,7 @@ - #define IOMMU_WRITE (1 << 1) - #define IOMMU_CACHE (1 << 2) /* DMA cache coherency */ - #define IOMMU_NOEXEC (1 << 3) -+#define IOMMU_MMIO (1 << 4) /* Device memory access */ - - struct iommu_ops; - struct iommu_group; -diff --git a/include/linux/irq.h b/include/linux/irq.h -index 9ba173b..4931a8b 100644 ---- a/include/linux/irq.h -+++ b/include/linux/irq.h -@@ -30,6 +30,7 @@ - struct seq_file; - struct module; - struct msi_msg; -+enum irqchip_irq_state; - - /* - * IRQ line status. -@@ -324,6 +325,8 @@ static inline irq_hw_number_t irqd_to_hwirq(struct irq_data *d) - * irq_request_resources - * @irq_compose_msi_msg: optional to compose message content for MSI - * @irq_write_msi_msg: optional to write message content for MSI -+ * @irq_get_irqchip_state: return the internal state of an interrupt -+ * @irq_set_irqchip_state: set the internal state of a interrupt - * @flags: chip specific flags - */ - struct irq_chip { -@@ -363,6 +366,9 @@ struct irq_chip { - void (*irq_compose_msi_msg)(struct irq_data *data, struct msi_msg *msg); - void (*irq_write_msi_msg)(struct irq_data *data, struct msi_msg *msg); - -+ int (*irq_get_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool *state); -+ int (*irq_set_irqchip_state)(struct irq_data *data, enum irqchip_irq_state which, bool state); -+ - unsigned long flags; - }; - -@@ -460,6 +466,8 @@ extern void irq_chip_eoi_parent(struct irq_data *data); - extern int irq_chip_set_affinity_parent(struct irq_data *data, - const struct cpumask *dest, - bool force); -+extern int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on); -+extern int irq_chip_set_type_parent(struct irq_data *data, unsigned int type); - #endif - - /* Handling of unhandled and spurious interrupts: */ -diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h -index da1aa15..36caf46 100644 ---- a/include/linux/irqchip/arm-gic-v3.h -+++ b/include/linux/irqchip/arm-gic-v3.h -@@ -270,6 +270,18 @@ - #define ICC_SRE_EL2_SRE (1 << 0) - #define ICC_SRE_EL2_ENABLE (1 << 3) - -+#define ICC_SGI1R_TARGET_LIST_SHIFT 0 -+#define ICC_SGI1R_TARGET_LIST_MASK (0xffff << ICC_SGI1R_TARGET_LIST_SHIFT) -+#define ICC_SGI1R_AFFINITY_1_SHIFT 16 -+#define ICC_SGI1R_AFFINITY_1_MASK (0xff << ICC_SGI1R_AFFINITY_1_SHIFT) -+#define ICC_SGI1R_SGI_ID_SHIFT 24 -+#define ICC_SGI1R_SGI_ID_MASK (0xff << ICC_SGI1R_SGI_ID_SHIFT) -+#define ICC_SGI1R_AFFINITY_2_SHIFT 32 -+#define ICC_SGI1R_AFFINITY_2_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT) -+#define ICC_SGI1R_IRQ_ROUTING_MODE_BIT 40 -+#define ICC_SGI1R_AFFINITY_3_SHIFT 48 -+#define ICC_SGI1R_AFFINITY_3_MASK (0xffULL << ICC_SGI1R_AFFINITY_1_SHIFT) -+ - /* - * System register definitions - */ -diff --git a/include/linux/irqchip/arm-gic.h b/include/linux/irqchip/arm-gic.h -index 13eed92..60b09ed 100644 ---- a/include/linux/irqchip/arm-gic.h -+++ b/include/linux/irqchip/arm-gic.h -@@ -106,6 +106,8 @@ static inline void gic_init(unsigned int nr, int start, - gic_init_bases(nr, start, dist, cpu, 0, NULL); - } - -+int gicv2m_of_init(struct device_node *node, struct irq_domain *parent); -+ - void gic_send_sgi(unsigned int cpu_id, unsigned int irq); - int gic_get_cpu_id(unsigned int cpu); - void gic_migrate_target(unsigned int new_cpu_id); -diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h -index ebace05..3c5ca45 100644 ---- a/include/linux/irqdomain.h -+++ b/include/linux/irqdomain.h -@@ -56,6 +56,7 @@ enum irq_domain_bus_token { - DOMAIN_BUS_ANY = 0, - DOMAIN_BUS_PCI_MSI, - DOMAIN_BUS_PLATFORM_MSI, -+ DOMAIN_BUS_NEXUS, - }; - - /** -diff --git a/include/linux/mmc/sdhci.h b/include/linux/mmc/sdhci.h -index dba793e..62d966a 100644 ---- a/include/linux/mmc/sdhci.h -+++ b/include/linux/mmc/sdhci.h -@@ -100,6 +100,10 @@ struct sdhci_host { - #define SDHCI_QUIRK2_BROKEN_DDR50 (1<<7) - /* Stop command (CMD12) can set Transfer Complete when not using MMC_RSP_BUSY */ - #define SDHCI_QUIRK2_STOP_WITH_TC (1<<8) -+/* Controller does not support 64-bit DMA */ -+#define SDHCI_QUIRK2_BROKEN_64_BIT_DMA (1<<9) -+/* Controller broken with using ACMD23 */ -+#define SDHCI_QUIRK2_ACMD23_BROKEN (1<<14) - - int irq; /* Device IRQ */ - void __iomem *ioaddr; /* Mapped address */ -@@ -130,6 +134,7 @@ struct sdhci_host { - #define SDHCI_SDIO_IRQ_ENABLED (1<<9) /* SDIO irq enabled */ - #define SDHCI_SDR104_NEEDS_TUNING (1<<10) /* SDR104/HS200 needs tuning */ - #define SDHCI_USING_RETUNING_TIMER (1<<11) /* Host is using a retuning timer for the card */ -+#define SDHCI_USE_64_BIT_DMA (1<<12) /* Use 64-bit DMA */ - - unsigned int version; /* SDHCI spec. version */ - -@@ -155,12 +160,19 @@ struct sdhci_host { - - int sg_count; /* Mapped sg entries */ - -- u8 *adma_desc; /* ADMA descriptor table */ -- u8 *align_buffer; /* Bounce buffer */ -+ void *adma_table; /* ADMA descriptor table */ -+ void *align_buffer; /* Bounce buffer */ -+ -+ size_t adma_table_sz; /* ADMA descriptor table size */ -+ size_t align_buffer_sz; /* Bounce buffer size */ - - dma_addr_t adma_addr; /* Mapped ADMA descr. table */ - dma_addr_t align_addr; /* Mapped bounce buffer */ - -+ unsigned int desc_sz; /* ADMA descriptor size */ -+ unsigned int align_sz; /* ADMA alignment */ -+ unsigned int align_mask; /* ADMA alignment mask */ -+ - struct tasklet_struct finish_tasklet; /* Tasklet structures */ - - struct timer_list timer; /* Timer for timeouts */ -diff --git a/include/linux/of.h b/include/linux/of.h -index 4a6a489..25111fb 100644 ---- a/include/linux/of.h -+++ b/include/linux/of.h -@@ -57,7 +57,6 @@ struct device_node { - struct device_node *child; - struct device_node *sibling; - struct device_node *next; /* next device of same type */ -- struct device_node *allnext; /* next in list of all nodes */ - struct kobject kobj; - unsigned long _flags; - void *data; -@@ -109,7 +108,7 @@ static inline void of_node_put(struct device_node *node) { } - #ifdef CONFIG_OF - - /* Pointer for first entry in chain of all nodes. */ --extern struct device_node *of_allnodes; -+extern struct device_node *of_root; - extern struct device_node *of_chosen; - extern struct device_node *of_aliases; - extern struct device_node *of_stdout; -@@ -117,7 +116,7 @@ extern raw_spinlock_t devtree_lock; - - static inline bool of_have_populated_dt(void) - { -- return of_allnodes != NULL; -+ return of_root != NULL; - } - - static inline bool of_node_is_root(const struct device_node *node) -@@ -161,6 +160,7 @@ static inline void of_property_clear_flag(struct property *p, unsigned long flag - clear_bit(flag, &p->_flags); - } - -+extern struct device_node *__of_find_all_nodes(struct device_node *prev); - extern struct device_node *of_find_all_nodes(struct device_node *prev); - - /* -@@ -216,8 +216,9 @@ static inline const char *of_node_full_name(const struct device_node *np) - return np ? np->full_name : ""; - } - --#define for_each_of_allnodes(dn) \ -- for (dn = of_allnodes; dn; dn = dn->allnext) -+#define for_each_of_allnodes_from(from, dn) \ -+ for (dn = __of_find_all_nodes(from); dn; dn = __of_find_all_nodes(dn)) -+#define for_each_of_allnodes(dn) for_each_of_allnodes_from(NULL, dn) - extern struct device_node *of_find_node_by_name(struct device_node *from, - const char *name); - extern struct device_node *of_find_node_by_type(struct device_node *from, -diff --git a/include/linux/of_pdt.h b/include/linux/of_pdt.h -index c65a18a..7e09244 100644 ---- a/include/linux/of_pdt.h -+++ b/include/linux/of_pdt.h -@@ -39,7 +39,6 @@ extern void *prom_early_alloc(unsigned long size); - /* for building the device tree */ - extern void of_pdt_build_devicetree(phandle root_node, struct of_pdt_ops *ops); - --extern void (*of_pdt_build_more)(struct device_node *dp, -- struct device_node ***nextp); -+extern void (*of_pdt_build_more)(struct device_node *dp); - - #endif /* _LINUX_OF_PDT_H */ -diff --git a/include/linux/pci.h b/include/linux/pci.h -index a99f301..f28c88b 100644 ---- a/include/linux/pci.h -+++ b/include/linux/pci.h -@@ -562,6 +562,7 @@ static inline int pcibios_err_to_errno(int err) - /* Low-level architecture-dependent routines */ - - struct pci_ops { -+ void __iomem *(*map_bus)(struct pci_bus *bus, unsigned int devfn, int where); - int (*read)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 *val); - int (*write)(struct pci_bus *bus, unsigned int devfn, int where, int size, u32 val); - }; -@@ -859,6 +860,16 @@ int pci_bus_write_config_word(struct pci_bus *bus, unsigned int devfn, - int where, u16 val); - int pci_bus_write_config_dword(struct pci_bus *bus, unsigned int devfn, - int where, u32 val); -+ -+int pci_generic_config_read(struct pci_bus *bus, unsigned int devfn, -+ int where, int size, u32 *val); -+int pci_generic_config_write(struct pci_bus *bus, unsigned int devfn, -+ int where, int size, u32 val); -+int pci_generic_config_read32(struct pci_bus *bus, unsigned int devfn, -+ int where, int size, u32 *val); -+int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn, -+ int where, int size, u32 val); -+ - struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops); - - static inline int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val) -diff --git a/include/linux/phy.h b/include/linux/phy.h -index d090cfc..eda18a8 100644 ---- a/include/linux/phy.h -+++ b/include/linux/phy.h -@@ -700,6 +700,7 @@ struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id, - struct phy_c45_device_ids *c45_ids); - struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45); - int phy_device_register(struct phy_device *phy); -+void phy_device_remove(struct phy_device *phydev); - int phy_init_hw(struct phy_device *phydev); - int phy_suspend(struct phy_device *phydev); - int phy_resume(struct phy_device *phydev); -diff --git a/include/linux/phy_fixed.h b/include/linux/phy_fixed.h -index f2ca1b4..fe5732d 100644 ---- a/include/linux/phy_fixed.h -+++ b/include/linux/phy_fixed.h -@@ -11,7 +11,7 @@ struct fixed_phy_status { - - struct device_node; - --#ifdef CONFIG_FIXED_PHY -+#if IS_ENABLED(CONFIG_FIXED_PHY) - extern int fixed_phy_add(unsigned int irq, int phy_id, - struct fixed_phy_status *status); - extern struct phy_device *fixed_phy_register(unsigned int irq, -@@ -21,6 +21,9 @@ extern void fixed_phy_del(int phy_addr); - extern int fixed_phy_set_link_update(struct phy_device *phydev, - int (*link_update)(struct net_device *, - struct fixed_phy_status *)); -+extern int fixed_phy_update_state(struct phy_device *phydev, -+ const struct fixed_phy_status *status, -+ const struct fixed_phy_status *changed); - #else - static inline int fixed_phy_add(unsigned int irq, int phy_id, - struct fixed_phy_status *status) -@@ -43,6 +46,12 @@ static inline int fixed_phy_set_link_update(struct phy_device *phydev, - { - return -ENODEV; - } -+static inline int fixed_phy_update_state(struct phy_device *phydev, -+ const struct fixed_phy_status *status, -+ const struct fixed_phy_status *changed) -+{ -+ return -ENODEV; -+} - #endif /* CONFIG_FIXED_PHY */ - - #endif /* __PHY_FIXED_H */ -diff --git a/kernel/irq/chip.c b/kernel/irq/chip.c -index 63c16d1..55dd2fb 100644 ---- a/kernel/irq/chip.c -+++ b/kernel/irq/chip.c -@@ -731,7 +731,30 @@ __irq_set_handler(unsigned int irq, irq_flow_handler_t handle, int is_chained, - if (!handle) { - handle = handle_bad_irq; - } else { -- if (WARN_ON(desc->irq_data.chip == &no_irq_chip)) -+ struct irq_data *irq_data = &desc->irq_data; -+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY -+ /* -+ * With hierarchical domains we might run into a -+ * situation where the outermost chip is not yet set -+ * up, but the inner chips are there. Instead of -+ * bailing we install the handler, but obviously we -+ * cannot enable/startup the interrupt at this point. -+ */ -+ while (irq_data) { -+ if (irq_data->chip != &no_irq_chip) -+ break; -+ /* -+ * Bail out if the outer chip is not set up -+ * and the interrrupt supposed to be started -+ * right away. -+ */ -+ if (WARN_ON(is_chained)) -+ goto out; -+ /* Try the parent */ -+ irq_data = irq_data->parent_data; -+ } -+#endif -+ if (WARN_ON(!irq_data || irq_data->chip == &no_irq_chip)) - goto out; - } - -@@ -911,6 +934,23 @@ int irq_chip_set_affinity_parent(struct irq_data *data, - } - - /** -+ * irq_chip_set_type_parent - Set IRQ type on the parent interrupt -+ * @data: Pointer to interrupt specific data -+ * @type: IRQ_TYPE_{LEVEL,EDGE}_* value - see include/linux/irq.h -+ * -+ * Conditional, as the underlying parent chip might not implement it. -+ */ -+int irq_chip_set_type_parent(struct irq_data *data, unsigned int type) -+{ -+ data = data->parent_data; -+ -+ if (data->chip->irq_set_type) -+ return data->chip->irq_set_type(data, type); -+ -+ return -ENOSYS; -+} -+ -+/** - * irq_chip_retrigger_hierarchy - Retrigger an interrupt in hardware - * @data: Pointer to interrupt specific data - * -@@ -925,6 +965,22 @@ int irq_chip_retrigger_hierarchy(struct irq_data *data) - - return -ENOSYS; - } -+ -+/** -+ * irq_chip_set_wake_parent - Set/reset wake-up on the parent interrupt -+ * @data: Pointer to interrupt specific data -+ * @on: Whether to set or reset the wake-up capability of this irq -+ * -+ * Conditional, as the underlying parent chip might not implement it. -+ */ -+int irq_chip_set_wake_parent(struct irq_data *data, unsigned int on) -+{ -+ data = data->parent_data; -+ if (data->chip->irq_set_wake) -+ return data->chip->irq_set_wake(data, on); -+ -+ return -ENOSYS; -+} - #endif - - /** -diff --git a/kernel/irq/manage.c b/kernel/irq/manage.c -index 8069237..acb401f 100644 ---- a/kernel/irq/manage.c -+++ b/kernel/irq/manage.c -@@ -1758,3 +1758,94 @@ int request_percpu_irq(unsigned int irq, irq_handler_t handler, - - return retval; - } -+ -+/** -+ * irq_get_irqchip_state - returns the irqchip state of a interrupt. -+ * @irq: Interrupt line that is forwarded to a VM -+ * @which: One of IRQCHIP_STATE_* the caller wants to know about -+ * @state: a pointer to a boolean where the state is to be storeed -+ * -+ * This call snapshots the internal irqchip state of an -+ * interrupt, returning into @state the bit corresponding to -+ * stage @which -+ * -+ * This function should be called with preemption disabled if the -+ * interrupt controller has per-cpu registers. -+ */ -+int irq_get_irqchip_state(unsigned int irq, enum irqchip_irq_state which, -+ bool *state) -+{ -+ struct irq_desc *desc; -+ struct irq_data *data; -+ struct irq_chip *chip; -+ unsigned long flags; -+ int err = -EINVAL; -+ -+ desc = irq_get_desc_buslock(irq, &flags, 0); -+ if (!desc) -+ return err; -+ -+ data = irq_desc_get_irq_data(desc); -+ -+ do { -+ chip = irq_data_get_irq_chip(data); -+ if (chip->irq_get_irqchip_state) -+ break; -+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY -+ data = data->parent_data; -+#else -+ data = NULL; -+#endif -+ } while (data); -+ -+ if (data) -+ err = chip->irq_get_irqchip_state(data, which, state); -+ -+ irq_put_desc_busunlock(desc, flags); -+ return err; -+} -+ -+/** -+ * irq_set_irqchip_state - set the state of a forwarded interrupt. -+ * @irq: Interrupt line that is forwarded to a VM -+ * @which: State to be restored (one of IRQCHIP_STATE_*) -+ * @val: Value corresponding to @which -+ * -+ * This call sets the internal irqchip state of an interrupt, -+ * depending on the value of @which. -+ * -+ * This function should be called with preemption disabled if the -+ * interrupt controller has per-cpu registers. -+ */ -+int irq_set_irqchip_state(unsigned int irq, enum irqchip_irq_state which, -+ bool val) -+{ -+ struct irq_desc *desc; -+ struct irq_data *data; -+ struct irq_chip *chip; -+ unsigned long flags; -+ int err = -EINVAL; -+ -+ desc = irq_get_desc_buslock(irq, &flags, 0); -+ if (!desc) -+ return err; -+ -+ data = irq_desc_get_irq_data(desc); -+ -+ do { -+ chip = irq_data_get_irq_chip(data); -+ if (chip->irq_set_irqchip_state) -+ break; -+#ifdef CONFIG_IRQ_DOMAIN_HIERARCHY -+ data = data->parent_data; -+#else -+ data = NULL; -+#endif -+ } while (data); -+ -+ if (data) -+ err = chip->irq_set_irqchip_state(data, which, val); -+ -+ irq_put_desc_busunlock(desc, flags); -+ return err; -+} -diff --git a/kernel/irq/msi.c b/kernel/irq/msi.c -index 2495ed0..54433c2 100644 ---- a/kernel/irq/msi.c -+++ b/kernel/irq/msi.c -@@ -106,8 +106,10 @@ static int msi_domain_alloc(struct irq_domain *domain, unsigned int virq, - irq_hw_number_t hwirq = ops->get_hwirq(info, arg); - int i, ret; - -+#if 0 - if (irq_find_mapping(domain, hwirq) > 0) - return -EEXIST; -+#endif - - ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); - if (ret < 0) -@@ -327,8 +329,15 @@ void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev) - struct msi_desc *desc; - - for_each_msi_entry(desc, dev) { -- irq_domain_free_irqs(desc->irq, desc->nvec_used); -- desc->irq = 0; -+ /* -+ * We might have failed to allocate an MSI early -+ * enough that there is no IRQ associated to this -+ * entry. If that's the case, don't do anything. -+ */ -+ if (desc->irq) { -+ irq_domain_free_irqs(desc->irq, desc->nvec_used); -+ desc->irq = 0; -+ } - } - } - -diff --git a/sound/soc/fsl/mpc8610_hpcd.c b/sound/soc/fsl/mpc8610_hpcd.c -index fa756d0..ad57f0c 100644 ---- a/sound/soc/fsl/mpc8610_hpcd.c -+++ b/sound/soc/fsl/mpc8610_hpcd.c -@@ -12,11 +12,11 @@ - - #include - #include -+#include - #include - #include - #include - #include --#include - - #include "fsl_dma.h" - #include "fsl_ssi.h" -diff --git a/sound/soc/fsl/p1022_ds.c b/sound/soc/fsl/p1022_ds.c -index f75c3cf..64a0bb6 100644 ---- a/sound/soc/fsl/p1022_ds.c -+++ b/sound/soc/fsl/p1022_ds.c -@@ -11,12 +11,12 @@ - */ - - #include -+#include - #include - #include - #include - #include - #include --#include - - #include "fsl_dma.h" - #include "fsl_ssi.h" -diff --git a/sound/soc/fsl/p1022_rdk.c b/sound/soc/fsl/p1022_rdk.c -index 9d89bb0..4ce4aff 100644 ---- a/sound/soc/fsl/p1022_rdk.c -+++ b/sound/soc/fsl/p1022_rdk.c -@@ -18,12 +18,12 @@ - */ - - #include -+#include - #include - #include - #include - #include - #include --#include - - #include "fsl_dma.h" - #include "fsl_ssi.h" --- -2.1.0.27.g96db324 - diff --git a/packages/base/any/kernels/3.18.25/patches/driver-support-intel-igb-bcm54616-phy.patch b/packages/base/any/kernels/3.18.25/patches/driver-support-intel-igb-bcm54616-phy.patch deleted file mode 100644 index c83708ac..00000000 --- a/packages/base/any/kernels/3.18.25/patches/driver-support-intel-igb-bcm54616-phy.patch +++ /dev/null @@ -1,67 +0,0 @@ -diff --git a/drivers/net/ethernet/intel/igb/e1000_82575.c b/drivers/net/ethernet/intel/igb/e1000_82575.c -index 051ea94..2a04baa 100644 ---- a/drivers/net/ethernet/intel/igb/e1000_82575.c -+++ b/drivers/net/ethernet/intel/igb/e1000_82575.c -@@ -286,6 +286,9 @@ static s32 igb_init_phy_params_82575(struct e1000_hw *hw) - phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580; - phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88; - break; -+ case BCM54616_E_PHY_ID: -+ phy->type = e1000_phy_bcm54616; -+ break; - default: - ret_val = -E1000_ERR_PHY; - goto out; -@@ -1550,6 +1553,7 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) - case e1000_i350: - case e1000_i210: - case e1000_i211: -+ case e1000_i354: - phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT); - phpm_reg &= ~E1000_82580_PM_GO_LINKD; - wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg); -@@ -1593,6 +1597,8 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw) - case e1000_phy_82580: - ret_val = igb_copper_link_setup_82580(hw); - break; -+ case e1000_phy_bcm54616: -+ break; - default: - ret_val = -E1000_ERR_PHY; - break; -diff --git a/drivers/net/ethernet/intel/igb/e1000_defines.h b/drivers/net/ethernet/intel/igb/e1000_defines.h -index 217f813..5322fbf 100644 ---- a/drivers/net/ethernet/intel/igb/e1000_defines.h -+++ b/drivers/net/ethernet/intel/igb/e1000_defines.h -@@ -860,6 +860,7 @@ - #define M88_VENDOR 0x0141 - #define I210_I_PHY_ID 0x01410C00 - #define M88E1543_E_PHY_ID 0x01410EA0 -+#define BCM54616_E_PHY_ID 0x3625D10 - - /* M88E1000 Specific Registers */ - #define M88E1000_PHY_SPEC_CTRL 0x10 /* PHY Specific Control Register */ -diff --git a/drivers/net/ethernet/intel/igb/e1000_hw.h b/drivers/net/ethernet/intel/igb/e1000_hw.h -index 2003b37..d82c96b 100644 ---- a/drivers/net/ethernet/intel/igb/e1000_hw.h -+++ b/drivers/net/ethernet/intel/igb/e1000_hw.h -@@ -128,6 +128,7 @@ enum e1000_phy_type { - e1000_phy_ife, - e1000_phy_82580, - e1000_phy_i210, -+ e1000_phy_bcm54616, - }; - - enum e1000_bus_type { -diff --git a/drivers/net/ethernet/intel/igb/igb_main.c b/drivers/net/ethernet/intel/igb/igb_main.c -index e0f3664..013c1f1 100644 ---- a/drivers/net/ethernet/intel/igb/igb_main.c -+++ b/drivers/net/ethernet/intel/igb/igb_main.c -@@ -108,6 +108,7 @@ static const struct pci_device_id igb_pci_tbl[] = { - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 }, - { PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 }, -+ { PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII), board_82575 }, - /* required last entry */ - {0, } - }; diff --git a/packages/base/any/kernels/3.18.25/patches/ls2_mc_console.patch b/packages/base/any/kernels/3.18.25/patches/ls2_mc_console.patch deleted file mode 100644 index f5eb969e..00000000 --- a/packages/base/any/kernels/3.18.25/patches/ls2_mc_console.patch +++ /dev/null @@ -1,327 +0,0 @@ -diff -uNr a/drivers/soc/fsl/Kconfig.arm b/drivers/soc/fsl/Kconfig.arm ---- a/drivers/soc/fsl/Kconfig.arm 2017-06-05 17:37:14.530348991 +0530 -+++ b/drivers/soc/fsl/Kconfig.arm 2017-06-05 17:32:18.630348990 +0530 -@@ -23,3 +23,7 @@ - if LS1_SOC_DRIVERS - source "drivers/soc/fsl/ls1/Kconfig" - endif -+ -+if LS_SOC_DRIVERS -+ source "drivers/soc/fsl/ls2-console/Kconfig" -+endif -diff -uNr a/drivers/soc/fsl/ls2-console/Kconfig b/drivers/soc/fsl/ls2-console/Kconfig ---- a/drivers/soc/fsl/ls2-console/Kconfig 1970-01-01 05:30:00.000000000 +0530 -+++ b/drivers/soc/fsl/ls2-console/Kconfig 2017-06-05 17:32:52.582348990 +0530 -@@ -0,0 +1,4 @@ -+config FSL_LS2_CONSOLE -+ tristate "Layerscape MC and AIOP console support" -+ depends on ARCH_LAYERSCAPE -+ default y -diff -uNr a/drivers/soc/fsl/ls2-console/ls2-console.c b/drivers/soc/fsl/ls2-console/ls2-console.c ---- a/drivers/soc/fsl/ls2-console/ls2-console.c 1970-01-01 05:30:00.000000000 +0530 -+++ b/drivers/soc/fsl/ls2-console/ls2-console.c 2017-06-05 17:50:42.494348990 +0530 -@@ -0,0 +1,291 @@ -+/* Copyright 2015-2016 Freescale Semiconductor Inc. -+ * -+ * Redistribution and use in source and binary forms, with or without -+ * modification, are permitted provided that the following conditions are met: -+ * * Redistributions of source code must retain the above copyright -+ * notice, this list of conditions and the following disclaimer. -+ * * Redistributions in binary form must reproduce the above copyright -+ * notice, this list of conditions and the following disclaimer in the -+ * documentation and/or other materials provided with the distribution. -+ * * Neither the name of the above-listed copyright holders nor the -+ * names of any contributors may be used to endorse or promote products -+ * derived from this software without specific prior written permission. -+ * -+ * -+ * ALTERNATIVELY, this software may be distributed under the terms of the -+ * GNU General Public License ("GPL") as published by the Free Software -+ * Foundation, either version 2 of that License or (at your option) any -+ * later version. -+ * -+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE -+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDERS OR CONTRIBUTORS BE -+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR -+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF -+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS -+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN -+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) -+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -+ * POSSIBILITY OF SUCH DAMAGE. -+ */ -+ -+#include -+#include -+#include -+#include -+#include -+#include -+#include -+ -+/* SoC address for the MC firmware base low/high registers */ -+#define SOC_CCSR_MC_FW_BASE_ADDR_REGS 0x8340020 -+#define SOC_CCSR_MC_FW_BASE_ADDR_REGS_SIZE 2 -+/* MC firmware base low/high registers indexes */ -+#define MCFBALR_OFFSET 0 -+#define MCFBAHR_OFFSET 1 -+ -+/* Bit mask used to obtain the most significant part of the MC base address */ -+#define MC_FW_HIGH_ADDR_MASK 0x1FFFF -+/* Bit mask used to obtain the least significant part of the MC base address */ -+#define MC_FW_LOW_ADDR_MASK 0xE0000000 -+ -+#define MC_BUFFER_OFFSET 0x01000000 -+#define MC_BUFFER_SIZE (1024*1024*16) -+#define MC_OFFSET_DELTA (MC_BUFFER_OFFSET) -+ -+#define AIOP_BUFFER_OFFSET 0x06000000 -+#define AIOP_BUFFER_SIZE (1024*1024*16) -+#define AIOP_OFFSET_DELTA (0) -+ -+struct log_header { -+ char magic_word[8]; /* magic word */ -+ uint32_t buf_start; /* holds the 32-bit little-endian -+ offset of the start of the buffer */ -+ uint32_t buf_length; /* holds the 32-bit little-endian -+ length of the buffer */ -+ uint32_t last_byte; /* holds the 32-bit little-endian offset -+ of the byte after the last byte that was written */ -+ char reserved[44]; -+}; -+ -+#define LOG_HEADER_FLAG_BUFFER_WRAPAROUND 0x80000000 -+#define LOG_VERSION_MAJOR 1 -+#define LOG_VERSION_MINOR 0 -+ -+ -+#define invalidate(p) { asm volatile("dc ivac, %0" : : "r" (p) : "memory"); } -+ -+struct console_data { -+ char *map_addr; -+ struct log_header *hdr; -+ char *start_addr; /* Start of buffer */ -+ char *end_addr; /* End of buffer */ -+ char *end_of_data; /* Current end of data */ -+ char *cur_ptr; /* Last data sent to console */ -+}; -+ -+#define LAST_BYTE(a) ((a) & ~(LOG_HEADER_FLAG_BUFFER_WRAPAROUND)) -+ -+static inline void __adjust_end(struct console_data *cd) -+{ -+ cd->end_of_data = cd->start_addr -+ + LAST_BYTE(le32_to_cpu(cd->hdr->last_byte)); -+} -+ -+static inline void adjust_end(struct console_data *cd) -+{ -+ invalidate(cd->hdr); -+ __adjust_end(cd); -+} -+ -+static inline uint64_t get_mc_fw_base_address(void) { -+ uint32_t* mcfbaregs = (uint32_t*) ioremap(SOC_CCSR_MC_FW_BASE_ADDR_REGS, -+ SOC_CCSR_MC_FW_BASE_ADDR_REGS_SIZE); -+ uint64_t mcfwbase = 0ULL; -+ mcfwbase = readl(mcfbaregs + MCFBAHR_OFFSET) & MC_FW_HIGH_ADDR_MASK; -+ mcfwbase <<= 32; -+ mcfwbase |= readl(mcfbaregs + MCFBALR_OFFSET) & MC_FW_LOW_ADDR_MASK; -+ iounmap(mcfbaregs); -+ pr_info("fsl-ls2-console: MC base address at 0x%016llx\n", mcfwbase); -+ return mcfwbase; -+} -+ -+static int fsl_ls2_generic_console_open(struct inode *node, struct file *fp, -+ u64 offset, u64 size, -+ uint8_t *emagic, uint8_t magic_len, -+ u32 offset_delta) -+{ -+ struct console_data *cd; -+ uint8_t *magic; -+ uint32_t wrapped; -+ -+ cd = kmalloc(sizeof(*cd), GFP_KERNEL); -+ if (cd == NULL) -+ return -ENOMEM; -+ fp->private_data = cd; -+ cd->map_addr = ioremap(get_mc_fw_base_address() + offset, size); -+ -+ cd->hdr = (struct log_header *) cd->map_addr; -+ invalidate(cd->hdr); -+ -+ magic = cd->hdr->magic_word; -+ if (memcmp(magic, emagic, magic_len)) { -+ pr_info("magic didn't match!\n"); -+ pr_info("expected: %02x %02x %02x %02x %02x %02x %02x %02x\n", -+ emagic[0], emagic[1], emagic[2], emagic[3], -+ emagic[4], emagic[5], emagic[6], emagic[7]); -+ pr_info(" seen: %02x %02x %02x %02x %02x %02x %02x %02x\n", -+ magic[0], magic[1], magic[2], magic[3], -+ magic[4], magic[5], magic[6], magic[7]); -+ kfree(cd); -+ iounmap(cd->map_addr); -+ return -EIO; -+ } -+ -+ cd->start_addr = cd->map_addr -+ + le32_to_cpu(cd->hdr->buf_start) - offset_delta; -+ cd->end_addr = cd->start_addr + le32_to_cpu(cd->hdr->buf_length); -+ -+ wrapped = le32_to_cpu(cd->hdr->last_byte) -+ & LOG_HEADER_FLAG_BUFFER_WRAPAROUND; -+ -+ __adjust_end(cd); -+ if (wrapped && (cd->end_of_data != cd->end_addr)) -+ cd->cur_ptr = cd->end_of_data+1; -+ else -+ cd->cur_ptr = cd->start_addr; -+ -+ return 0; -+} -+ -+static int fsl_ls2_mc_console_open(struct inode *node, struct file *fp) -+{ -+ uint8_t magic_word[] = { 0, 1, 'C', 'M' }; -+ -+ return fsl_ls2_generic_console_open(node, fp, -+ MC_BUFFER_OFFSET, MC_BUFFER_SIZE, -+ magic_word, sizeof(magic_word), -+ MC_OFFSET_DELTA); -+} -+ -+static int fsl_ls2_aiop_console_open(struct inode *node, struct file *fp) -+{ -+ uint8_t magic_word[] = { 'P', 'O', 'I', 'A' }; -+ -+ return fsl_ls2_generic_console_open(node, fp, -+ AIOP_BUFFER_OFFSET, AIOP_BUFFER_SIZE, -+ magic_word, sizeof(magic_word), -+ AIOP_OFFSET_DELTA); -+} -+ -+static int fsl_ls2_console_close(struct inode *node, struct file *fp) -+{ -+ struct console_data *cd = fp->private_data; -+ -+ iounmap(cd->map_addr); -+ kfree(cd); -+ return 0; -+} -+ -+ssize_t fsl_ls2_console_read(struct file *fp, char __user *buf, size_t count, -+ loff_t *f_pos) -+{ -+ struct console_data *cd = fp->private_data; -+ size_t bytes = 0; -+ char data; -+ -+ /* Check if we need to adjust the end of data addr */ -+ adjust_end(cd); -+ -+ while ((count != bytes) && (cd->end_of_data != cd->cur_ptr)) { -+ if (((u64)cd->cur_ptr) % 64 == 0) -+ invalidate(cd->cur_ptr); -+ -+ data = *(cd->cur_ptr); -+ if (copy_to_user(&buf[bytes], &data, 1)) -+ return -EFAULT; -+ cd->cur_ptr++; -+ if (cd->cur_ptr >= cd->end_addr) -+ cd->cur_ptr = cd->start_addr; -+ ++bytes; -+ } -+ return bytes; -+} -+ -+static const struct file_operations fsl_ls2_mc_console_fops = { -+ .owner = THIS_MODULE, -+ .open = fsl_ls2_mc_console_open, -+ .release = fsl_ls2_console_close, -+ .read = fsl_ls2_console_read, -+}; -+ -+static struct miscdevice fsl_ls2_mc_console_dev = { -+ .minor = MISC_DYNAMIC_MINOR, -+ .name = "fsl_mc_console", -+ .fops = &fsl_ls2_mc_console_fops -+}; -+ -+static const struct file_operations fsl_ls2_aiop_console_fops = { -+ .owner = THIS_MODULE, -+ .open = fsl_ls2_aiop_console_open, -+ .release = fsl_ls2_console_close, -+ .read = fsl_ls2_console_read, -+}; -+ -+static struct miscdevice fsl_ls2_aiop_console_dev = { -+ .minor = MISC_DYNAMIC_MINOR, -+ .name = "fsl_aiop_console", -+ .fops = &fsl_ls2_aiop_console_fops -+}; -+ -+static int __init fsl_ls2_console_init(void) -+{ -+ int err = 0; -+ -+ pr_info("Freescale LS2 console driver\n"); -+ err = misc_register(&fsl_ls2_mc_console_dev); -+ if (err) { -+ pr_err("fsl_mc_console: cannot register device\n"); -+ return err; -+ } -+ pr_info("fsl-ls2-console: device %s registered\n", -+ fsl_ls2_mc_console_dev.name); -+ -+ err = misc_register(&fsl_ls2_aiop_console_dev); -+ if (err) { -+ pr_err("fsl_aiop_console: cannot register device\n"); -+ return err; -+ } -+ pr_info("fsl-ls2-console: device %s registered\n", -+ fsl_ls2_aiop_console_dev.name); -+ -+ return 0; -+} -+ -+static void __exit fsl_ls2_console_exit(void) -+{ -+ int err = misc_deregister(&fsl_ls2_mc_console_dev); -+ -+ if (err) -+ pr_err("Failed to deregister device %s code %d\n", -+ fsl_ls2_mc_console_dev.name, err); -+ else -+ pr_info("device %s deregistered\n", -+ fsl_ls2_mc_console_dev.name); -+ -+ err = misc_deregister(&fsl_ls2_aiop_console_dev); -+ if (err) -+ pr_err("Failed to deregister device %s code %d\n", -+ fsl_ls2_aiop_console_dev.name, err); -+ else -+ pr_info("device %s deregistered\n", -+ fsl_ls2_aiop_console_dev.name); -+} -+ -+module_init(fsl_ls2_console_init); -+module_exit(fsl_ls2_console_exit); -+ -+MODULE_AUTHOR("Roy Pledge "); -+MODULE_LICENSE("Dual BSD/GPL"); -+MODULE_DESCRIPTION("Freescale LS2 console driver"); -diff -uNr a/drivers/soc/fsl/ls2-console/Makefile b/drivers/soc/fsl/ls2-console/Makefile ---- a/drivers/soc/fsl/ls2-console/Makefile 1970-01-01 05:30:00.000000000 +0530 -+++ b/drivers/soc/fsl/ls2-console/Makefile 2017-06-05 17:32:52.582348990 +0530 -@@ -0,0 +1 @@ -+obj-$(CONFIG_FSL_LS2_CONSOLE) += ls2-console.o -diff -uNr a/drivers/soc/fsl/Makefile b/drivers/soc/fsl/Makefile ---- a/drivers/soc/fsl/Makefile 2017-06-05 17:37:14.530348991 +0530 -+++ b/drivers/soc/fsl/Makefile 2017-06-05 17:33:54.022348991 +0530 -@@ -4,3 +4,4 @@ - - obj-$(CONFIG_LS1_SOC_DRIVERS) += ls1/ - obj-$(CONFIG_FSL_GUTS) += guts.o -+obj-$(CONFIG_LS_SOC_DRIVERS) += ls2-console/ diff --git a/packages/base/any/kernels/3.18.25/patches/series b/packages/base/any/kernels/3.18.25/patches/series deleted file mode 100644 index 6fb98212..00000000 --- a/packages/base/any/kernels/3.18.25/patches/series +++ /dev/null @@ -1,3 +0,0 @@ -aufs.patch -driver-support-intel-igb-bcm54616-phy.patch - diff --git a/packages/base/any/kernels/3.18.25/patches/series.arm64 b/packages/base/any/kernels/3.18.25/patches/series.arm64 deleted file mode 100644 index f9ac0ce2..00000000 --- a/packages/base/any/kernels/3.18.25/patches/series.arm64 +++ /dev/null @@ -1,2 +0,0 @@ -0001-Patch-set-for-booting-ls2088rdb-with-vfio.patch -ls2_mc_console.patch diff --git a/packages/base/any/kernels/4.14-lts/configs/x86_64-all/x86_64-all.config b/packages/base/any/kernels/4.14-lts/configs/x86_64-all/x86_64-all.config index f16bb336..e5e3b53a 100755 --- a/packages/base/any/kernels/4.14-lts/configs/x86_64-all/x86_64-all.config +++ b/packages/base/any/kernels/4.14-lts/configs/x86_64-all/x86_64-all.config @@ -1,6 +1,6 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/x86 4.14.34 Kernel Configuration +# Linux/x86_64 4.14.49 Kernel Configuration # CONFIG_64BIT=y CONFIG_X86_64=y @@ -37,7 +37,6 @@ CONFIG_ZONE_DMA32=y CONFIG_AUDIT_ARCH=y CONFIG_ARCH_SUPPORTS_OPTIMIZED_INLINING=y CONFIG_ARCH_SUPPORTS_DEBUG_PAGEALLOC=y -CONFIG_HAVE_INTEL_TXT=y CONFIG_X86_64_SMP=y CONFIG_ARCH_SUPPORTS_UPROBES=y CONFIG_FIX_EARLYCON_MEM=y @@ -556,7 +555,6 @@ CONFIG_ARCH_ENABLE_THP_MIGRATION=y CONFIG_PHYS_ADDR_T_64BIT=y CONFIG_BOUNCE=y CONFIG_VIRT_TO_BUS=y -CONFIG_MMU_NOTIFIER=y CONFIG_KSM=y CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 CONFIG_ARCH_SUPPORTS_MEMORY_FAILURE=y @@ -646,7 +644,6 @@ CONFIG_ACPI_SYSTEM_POWER_STATES_SUPPORT=y CONFIG_ACPI_AC=y CONFIG_ACPI_BATTERY=y CONFIG_ACPI_BUTTON=y -CONFIG_ACPI_VIDEO=y CONFIG_ACPI_FAN=y # CONFIG_ACPI_DOCK is not set CONFIG_ACPI_CPU_FREQ_PSS=y @@ -750,7 +747,7 @@ CONFIG_PCI_BUS_ADDR_T_64BIT=y CONFIG_PCI_MSI=y CONFIG_PCI_MSI_IRQ_DOMAIN=y # CONFIG_PCI_DEBUG is not set -# CONFIG_PCI_REALLOC_ENABLE_AUTO is not set +CONFIG_PCI_REALLOC_ENABLE_AUTO=y # CONFIG_PCI_STUB is not set CONFIG_HT_IRQ=y CONFIG_PCI_ATS=y @@ -832,7 +829,7 @@ CONFIG_PACKET=y # CONFIG_PACKET_DIAG is not set CONFIG_UNIX=y # CONFIG_UNIX_DIAG is not set -# CONFIG_TLS is not set +CONFIG_TLS=y CONFIG_XFRM=y CONFIG_XFRM_OFFLOAD=y CONFIG_XFRM_ALGO=y @@ -850,6 +847,7 @@ CONFIG_IP_FIB_TRIE_STATS=y CONFIG_IP_MULTIPLE_TABLES=y CONFIG_IP_ROUTE_MULTIPATH=y CONFIG_IP_ROUTE_VERBOSE=y +CONFIG_IP_ROUTE_CLASSID=y CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y CONFIG_IP_PNP_BOOTP=y @@ -951,133 +949,137 @@ CONFIG_BRIDGE_NETFILTER=y # CONFIG_NETFILTER_INGRESS=y CONFIG_NETFILTER_NETLINK=y -# CONFIG_NETFILTER_NETLINK_ACCT is not set -# CONFIG_NETFILTER_NETLINK_QUEUE is not set +CONFIG_NETFILTER_NETLINK_ACCT=y +CONFIG_NETFILTER_NETLINK_QUEUE=y CONFIG_NETFILTER_NETLINK_LOG=y CONFIG_NF_CONNTRACK=y -CONFIG_NF_LOG_COMMON=m -# CONFIG_NF_LOG_NETDEV is not set +CONFIG_NF_LOG_COMMON=y +CONFIG_NF_LOG_NETDEV=y CONFIG_NF_CONNTRACK_MARK=y CONFIG_NF_CONNTRACK_SECMARK=y +CONFIG_NF_CONNTRACK_ZONES=y CONFIG_NF_CONNTRACK_PROCFS=y -# CONFIG_NF_CONNTRACK_EVENTS is not set -# CONFIG_NF_CONNTRACK_TIMEOUT is not set -# CONFIG_NF_CONNTRACK_TIMESTAMP is not set +CONFIG_NF_CONNTRACK_EVENTS=y +CONFIG_NF_CONNTRACK_TIMEOUT=y +CONFIG_NF_CONNTRACK_TIMESTAMP=y +CONFIG_NF_CONNTRACK_LABELS=y CONFIG_NF_CT_PROTO_DCCP=y +CONFIG_NF_CT_PROTO_GRE=y CONFIG_NF_CT_PROTO_SCTP=y CONFIG_NF_CT_PROTO_UDPLITE=y -# CONFIG_NF_CONNTRACK_AMANDA is not set +CONFIG_NF_CONNTRACK_AMANDA=y CONFIG_NF_CONNTRACK_FTP=y -# CONFIG_NF_CONNTRACK_H323 is not set +CONFIG_NF_CONNTRACK_H323=y CONFIG_NF_CONNTRACK_IRC=y -# CONFIG_NF_CONNTRACK_NETBIOS_NS is not set -# CONFIG_NF_CONNTRACK_SNMP is not set -# CONFIG_NF_CONNTRACK_PPTP is not set -# CONFIG_NF_CONNTRACK_SANE is not set +CONFIG_NF_CONNTRACK_BROADCAST=y +CONFIG_NF_CONNTRACK_NETBIOS_NS=y +CONFIG_NF_CONNTRACK_SNMP=y +CONFIG_NF_CONNTRACK_PPTP=y +CONFIG_NF_CONNTRACK_SANE=y CONFIG_NF_CONNTRACK_SIP=y -# CONFIG_NF_CONNTRACK_TFTP is not set +CONFIG_NF_CONNTRACK_TFTP=y CONFIG_NF_CT_NETLINK=y -# CONFIG_NF_CT_NETLINK_TIMEOUT is not set +CONFIG_NF_CT_NETLINK_TIMEOUT=y # CONFIG_NETFILTER_NETLINK_GLUE_CT is not set CONFIG_NF_NAT=y CONFIG_NF_NAT_NEEDED=y CONFIG_NF_NAT_PROTO_DCCP=y CONFIG_NF_NAT_PROTO_UDPLITE=y CONFIG_NF_NAT_PROTO_SCTP=y -# CONFIG_NF_NAT_AMANDA is not set +CONFIG_NF_NAT_AMANDA=y CONFIG_NF_NAT_FTP=y CONFIG_NF_NAT_IRC=y CONFIG_NF_NAT_SIP=y -# CONFIG_NF_NAT_TFTP is not set -# CONFIG_NF_NAT_REDIRECT is not set +CONFIG_NF_NAT_TFTP=y +CONFIG_NF_NAT_REDIRECT=y # CONFIG_NF_TABLES is not set CONFIG_NETFILTER_XTABLES=y # # Xtables combined modules # -CONFIG_NETFILTER_XT_MARK=m -# CONFIG_NETFILTER_XT_CONNMARK is not set -# CONFIG_NETFILTER_XT_SET is not set +CONFIG_NETFILTER_XT_MARK=y +CONFIG_NETFILTER_XT_CONNMARK=y +CONFIG_NETFILTER_XT_SET=y # # Xtables targets # -# CONFIG_NETFILTER_XT_TARGET_AUDIT is not set -# CONFIG_NETFILTER_XT_TARGET_CHECKSUM is not set -# CONFIG_NETFILTER_XT_TARGET_CLASSIFY is not set -# CONFIG_NETFILTER_XT_TARGET_CONNMARK is not set +CONFIG_NETFILTER_XT_TARGET_AUDIT=y +CONFIG_NETFILTER_XT_TARGET_CHECKSUM=y +CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y +CONFIG_NETFILTER_XT_TARGET_CONNMARK=y CONFIG_NETFILTER_XT_TARGET_CONNSECMARK=y -# CONFIG_NETFILTER_XT_TARGET_CT is not set -# CONFIG_NETFILTER_XT_TARGET_DSCP is not set +CONFIG_NETFILTER_XT_TARGET_CT=y +CONFIG_NETFILTER_XT_TARGET_DSCP=y CONFIG_NETFILTER_XT_TARGET_HL=y -# CONFIG_NETFILTER_XT_TARGET_HMARK is not set -# CONFIG_NETFILTER_XT_TARGET_IDLETIMER is not set -# CONFIG_NETFILTER_XT_TARGET_LED is not set +CONFIG_NETFILTER_XT_TARGET_HMARK=y +CONFIG_NETFILTER_XT_TARGET_IDLETIMER=y +CONFIG_NETFILTER_XT_TARGET_LED=y CONFIG_NETFILTER_XT_TARGET_LOG=m -# CONFIG_NETFILTER_XT_TARGET_MARK is not set +CONFIG_NETFILTER_XT_TARGET_MARK=y CONFIG_NETFILTER_XT_NAT=y -# CONFIG_NETFILTER_XT_TARGET_NETMAP is not set +CONFIG_NETFILTER_XT_TARGET_NETMAP=y CONFIG_NETFILTER_XT_TARGET_NFLOG=y -# CONFIG_NETFILTER_XT_TARGET_NFQUEUE is not set -# CONFIG_NETFILTER_XT_TARGET_NOTRACK is not set -# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set -# CONFIG_NETFILTER_XT_TARGET_REDIRECT is not set -# CONFIG_NETFILTER_XT_TARGET_TEE is not set -# CONFIG_NETFILTER_XT_TARGET_TPROXY is not set -# CONFIG_NETFILTER_XT_TARGET_TRACE is not set +CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y +CONFIG_NETFILTER_XT_TARGET_NOTRACK=y +CONFIG_NETFILTER_XT_TARGET_RATEEST=y +CONFIG_NETFILTER_XT_TARGET_REDIRECT=y +CONFIG_NETFILTER_XT_TARGET_TEE=y +CONFIG_NETFILTER_XT_TARGET_TPROXY=y +CONFIG_NETFILTER_XT_TARGET_TRACE=y CONFIG_NETFILTER_XT_TARGET_SECMARK=y CONFIG_NETFILTER_XT_TARGET_TCPMSS=y -# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set +CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP=y # # Xtables matches # -CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=m -# CONFIG_NETFILTER_XT_MATCH_BPF is not set -# CONFIG_NETFILTER_XT_MATCH_CGROUP is not set -# CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set -# CONFIG_NETFILTER_XT_MATCH_COMMENT is not set -# CONFIG_NETFILTER_XT_MATCH_CONNBYTES is not set -# CONFIG_NETFILTER_XT_MATCH_CONNLABEL is not set -# CONFIG_NETFILTER_XT_MATCH_CONNLIMIT is not set -# CONFIG_NETFILTER_XT_MATCH_CONNMARK is not set +CONFIG_NETFILTER_XT_MATCH_ADDRTYPE=y +CONFIG_NETFILTER_XT_MATCH_BPF=y +CONFIG_NETFILTER_XT_MATCH_CGROUP=y +CONFIG_NETFILTER_XT_MATCH_CLUSTER=y +CONFIG_NETFILTER_XT_MATCH_COMMENT=y +CONFIG_NETFILTER_XT_MATCH_CONNBYTES=y +CONFIG_NETFILTER_XT_MATCH_CONNLABEL=y +CONFIG_NETFILTER_XT_MATCH_CONNLIMIT=y +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y -# CONFIG_NETFILTER_XT_MATCH_CPU is not set -# CONFIG_NETFILTER_XT_MATCH_DCCP is not set -# CONFIG_NETFILTER_XT_MATCH_DEVGROUP is not set -# CONFIG_NETFILTER_XT_MATCH_DSCP is not set +CONFIG_NETFILTER_XT_MATCH_CPU=y +CONFIG_NETFILTER_XT_MATCH_DCCP=y +CONFIG_NETFILTER_XT_MATCH_DEVGROUP=y +CONFIG_NETFILTER_XT_MATCH_DSCP=y CONFIG_NETFILTER_XT_MATCH_ECN=y -# CONFIG_NETFILTER_XT_MATCH_ESP is not set -# CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set -# CONFIG_NETFILTER_XT_MATCH_HELPER is not set +CONFIG_NETFILTER_XT_MATCH_ESP=y +CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y +CONFIG_NETFILTER_XT_MATCH_HELPER=y CONFIG_NETFILTER_XT_MATCH_HL=y -# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set -# CONFIG_NETFILTER_XT_MATCH_IPRANGE is not set -# CONFIG_NETFILTER_XT_MATCH_IPVS is not set -# CONFIG_NETFILTER_XT_MATCH_L2TP is not set -# CONFIG_NETFILTER_XT_MATCH_LENGTH is not set -# CONFIG_NETFILTER_XT_MATCH_LIMIT is not set -# CONFIG_NETFILTER_XT_MATCH_MAC is not set -# CONFIG_NETFILTER_XT_MATCH_MARK is not set -# CONFIG_NETFILTER_XT_MATCH_MULTIPORT is not set -# CONFIG_NETFILTER_XT_MATCH_NFACCT is not set -# CONFIG_NETFILTER_XT_MATCH_OSF is not set -# CONFIG_NETFILTER_XT_MATCH_OWNER is not set +CONFIG_NETFILTER_XT_MATCH_IPCOMP=y +CONFIG_NETFILTER_XT_MATCH_IPRANGE=y +CONFIG_NETFILTER_XT_MATCH_IPVS=y +CONFIG_NETFILTER_XT_MATCH_L2TP=y +CONFIG_NETFILTER_XT_MATCH_LENGTH=y +CONFIG_NETFILTER_XT_MATCH_LIMIT=y +CONFIG_NETFILTER_XT_MATCH_MAC=y +CONFIG_NETFILTER_XT_MATCH_MARK=y +CONFIG_NETFILTER_XT_MATCH_MULTIPORT=y +CONFIG_NETFILTER_XT_MATCH_NFACCT=y +CONFIG_NETFILTER_XT_MATCH_OSF=y +CONFIG_NETFILTER_XT_MATCH_OWNER=y CONFIG_NETFILTER_XT_MATCH_POLICY=y -# CONFIG_NETFILTER_XT_MATCH_PHYSDEV is not set -# CONFIG_NETFILTER_XT_MATCH_PKTTYPE is not set -# CONFIG_NETFILTER_XT_MATCH_QUOTA is not set -# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set -# CONFIG_NETFILTER_XT_MATCH_REALM is not set -# CONFIG_NETFILTER_XT_MATCH_RECENT is not set -# CONFIG_NETFILTER_XT_MATCH_SCTP is not set +CONFIG_NETFILTER_XT_MATCH_PHYSDEV=y +CONFIG_NETFILTER_XT_MATCH_PKTTYPE=y +CONFIG_NETFILTER_XT_MATCH_QUOTA=y +CONFIG_NETFILTER_XT_MATCH_RATEEST=y +CONFIG_NETFILTER_XT_MATCH_REALM=y +CONFIG_NETFILTER_XT_MATCH_RECENT=y +CONFIG_NETFILTER_XT_MATCH_SCTP=y CONFIG_NETFILTER_XT_MATCH_STATE=y # CONFIG_NETFILTER_XT_MATCH_STATISTIC is not set -# CONFIG_NETFILTER_XT_MATCH_STRING is not set -# CONFIG_NETFILTER_XT_MATCH_TCPMSS is not set -# CONFIG_NETFILTER_XT_MATCH_TIME is not set -# CONFIG_NETFILTER_XT_MATCH_U32 is not set +CONFIG_NETFILTER_XT_MATCH_STRING=y +CONFIG_NETFILTER_XT_MATCH_TCPMSS=y +CONFIG_NETFILTER_XT_MATCH_TIME=y +CONFIG_NETFILTER_XT_MATCH_U32=y CONFIG_IP_SET=y CONFIG_IP_SET_MAX=256 CONFIG_IP_SET_BITMAP_IP=y @@ -1145,14 +1147,16 @@ CONFIG_IP_VS_PE_SIP=y CONFIG_NF_DEFRAG_IPV4=y CONFIG_NF_CONNTRACK_IPV4=y # CONFIG_NF_SOCKET_IPV4 is not set -# CONFIG_NF_DUP_IPV4 is not set +CONFIG_NF_DUP_IPV4=y CONFIG_NF_LOG_ARP=m CONFIG_NF_LOG_IPV4=m CONFIG_NF_REJECT_IPV4=y CONFIG_NF_NAT_IPV4=y CONFIG_NF_NAT_MASQUERADE_IPV4=y -# CONFIG_NF_NAT_PPTP is not set -# CONFIG_NF_NAT_H323 is not set +CONFIG_NF_NAT_SNMP_BASIC=y +CONFIG_NF_NAT_PROTO_GRE=y +CONFIG_NF_NAT_PPTP=y +CONFIG_NF_NAT_H323=y CONFIG_IP_NF_IPTABLES=y CONFIG_IP_NF_MATCH_AH=y CONFIG_IP_NF_MATCH_ECN=y @@ -1181,7 +1185,7 @@ CONFIG_IP_NF_ARP_MANGLE=y CONFIG_NF_DEFRAG_IPV6=y CONFIG_NF_CONNTRACK_IPV6=y # CONFIG_NF_SOCKET_IPV6 is not set -# CONFIG_NF_DUP_IPV6 is not set +CONFIG_NF_DUP_IPV6=y CONFIG_NF_REJECT_IPV6=y CONFIG_NF_LOG_IPV6=m # CONFIG_NF_NAT_IPV6 is not set @@ -1358,12 +1362,7 @@ CONFIG_NET_FLOW_LIMIT=y # CONFIG_NET_PKTGEN is not set # CONFIG_NET_TCPPROBE is not set # CONFIG_NET_DROP_MONITOR is not set -CONFIG_HAMRADIO=y - -# -# Packet Radio protocols -# -# CONFIG_AX25 is not set +# CONFIG_HAMRADIO is not set # CONFIG_CAN is not set # CONFIG_BT is not set CONFIG_AF_RXRPC=y @@ -1375,37 +1374,20 @@ CONFIG_AF_RXRPC=y # CONFIG_STREAM_PARSER is not set CONFIG_FIB_RULES=y CONFIG_WIRELESS=y -CONFIG_CFG80211=y -# CONFIG_NL80211_TESTMODE is not set -# CONFIG_CFG80211_DEVELOPER_WARNINGS is not set -# CONFIG_CFG80211_CERTIFICATION_ONUS is not set -CONFIG_CFG80211_DEFAULT_PS=y -# CONFIG_CFG80211_DEBUGFS is not set -# CONFIG_CFG80211_INTERNAL_REGDB is not set -CONFIG_CFG80211_CRDA_SUPPORT=y -# CONFIG_CFG80211_WEXT is not set +# CONFIG_CFG80211 is not set # CONFIG_LIB80211 is not set -CONFIG_MAC80211=y -CONFIG_MAC80211_HAS_RC=y -CONFIG_MAC80211_RC_MINSTREL=y -CONFIG_MAC80211_RC_MINSTREL_HT=y -# CONFIG_MAC80211_RC_MINSTREL_VHT is not set -CONFIG_MAC80211_RC_DEFAULT_MINSTREL=y -CONFIG_MAC80211_RC_DEFAULT="minstrel_ht" -# CONFIG_MAC80211_MESH is not set -CONFIG_MAC80211_LEDS=y -# CONFIG_MAC80211_DEBUGFS is not set -# CONFIG_MAC80211_MESSAGE_TRACING is not set -# CONFIG_MAC80211_DEBUG_MENU is not set + +# +# CFG80211 needs to be enabled for MAC80211 +# CONFIG_MAC80211_STA_HASH_MAX_SIZE=0 # CONFIG_WIMAX is not set -CONFIG_RFKILL=y -CONFIG_RFKILL_LEDS=y -# CONFIG_RFKILL_INPUT is not set -# CONFIG_RFKILL_GPIO is not set +# CONFIG_RFKILL is not set # CONFIG_NET_9P is not set # CONFIG_CAIF is not set -# CONFIG_CEPH_LIB is not set +CONFIG_CEPH_LIB=y +# CONFIG_CEPH_LIB_PRETTYDEBUG is not set +# CONFIG_CEPH_LIB_USE_DNS_RESOLVER is not set # CONFIG_NFC is not set # CONFIG_PSAMPLE is not set # CONFIG_NET_IFE is not set @@ -1491,6 +1473,7 @@ CONFIG_VIRTIO_BLK=y # CONFIG_BLK_DEV_RSXX is not set # CONFIG_BLK_DEV_NVME is not set # CONFIG_NVME_FC is not set +# CONFIG_NVME_TARGET is not set # # Misc devices @@ -1721,7 +1704,7 @@ CONFIG_PATA_SCH=y # CONFIG_PATA_LEGACY is not set CONFIG_MD=y CONFIG_BLK_DEV_MD=y -CONFIG_MD_AUTODETECT=y +# CONFIG_MD_AUTODETECT is not set # CONFIG_MD_LINEAR is not set # CONFIG_MD_RAID0 is not set # CONFIG_MD_RAID1 is not set @@ -1734,15 +1717,14 @@ CONFIG_BLK_DEV_DM_BUILTIN=y CONFIG_BLK_DEV_DM=y # CONFIG_DM_MQ_DEFAULT is not set # CONFIG_DM_DEBUG is not set -# CONFIG_DM_CRYPT is not set +CONFIG_DM_CRYPT=y # CONFIG_DM_SNAPSHOT is not set # CONFIG_DM_THIN_PROVISIONING is not set # CONFIG_DM_CACHE is not set # CONFIG_DM_ERA is not set -CONFIG_DM_MIRROR=y -# CONFIG_DM_LOG_USERSPACE is not set +# CONFIG_DM_MIRROR is not set # CONFIG_DM_RAID is not set -CONFIG_DM_ZERO=y +# CONFIG_DM_ZERO is not set # CONFIG_DM_MULTIPATH is not set # CONFIG_DM_DELAY is not set # CONFIG_DM_UEVENT is not set @@ -1759,8 +1741,7 @@ CONFIG_DM_ZERO=y # # CONFIG_FIREWIRE is not set # CONFIG_FIREWIRE_NOSY is not set -CONFIG_MACINTOSH_DRIVERS=y -CONFIG_MAC_EMUMOUSEBTN=y +# CONFIG_MACINTOSH_DRIVERS is not set CONFIG_NETDEVICES=y CONFIG_MII=y CONFIG_NET_CORE=y @@ -1770,13 +1751,15 @@ CONFIG_NET_CORE=y # CONFIG_NET_FC is not set # CONFIG_IFB is not set # CONFIG_NET_TEAM is not set -# CONFIG_MACVLAN is not set +CONFIG_MACVLAN=y +CONFIG_MACVTAP=y # CONFIG_VXLAN is not set # CONFIG_MACSEC is not set -CONFIG_NETCONSOLE=y -CONFIG_NETPOLL=y -CONFIG_NET_POLL_CONTROLLER=y +# CONFIG_NETCONSOLE is not set +# CONFIG_NETPOLL is not set +# CONFIG_NET_POLL_CONTROLLER is not set CONFIG_TUN=y +CONFIG_TAP=y # CONFIG_TUN_VNET_CROSS_LE is not set CONFIG_VETH=y CONFIG_VIRTIO_NET=y @@ -1792,90 +1775,51 @@ CONFIG_VIRTIO_NET=y # CONFIG_ETHERNET=y CONFIG_MDIO=y -CONFIG_NET_VENDOR_3COM=y -# CONFIG_PCMCIA_3C574 is not set -# CONFIG_PCMCIA_3C589 is not set -# CONFIG_VORTEX is not set -# CONFIG_TYPHOON is not set -CONFIG_NET_VENDOR_ADAPTEC=y -# CONFIG_ADAPTEC_STARFIRE is not set -CONFIG_NET_VENDOR_AGERE=y -# CONFIG_ET131X is not set -CONFIG_NET_VENDOR_ALACRITECH=y -# CONFIG_SLICOSS is not set -CONFIG_NET_VENDOR_ALTEON=y -# CONFIG_ACENIC is not set +# CONFIG_NET_VENDOR_3COM is not set +# CONFIG_NET_VENDOR_ADAPTEC is not set +# CONFIG_NET_VENDOR_AGERE is not set +# CONFIG_NET_VENDOR_ALACRITECH is not set +# CONFIG_NET_VENDOR_ALTEON is not set # CONFIG_ALTERA_TSE is not set -CONFIG_NET_VENDOR_AMAZON=y -# CONFIG_ENA_ETHERNET is not set -CONFIG_NET_VENDOR_AMD=y -# CONFIG_AMD8111_ETH is not set -# CONFIG_PCNET32 is not set -# CONFIG_PCMCIA_NMCLAN is not set -# CONFIG_AMD_XGBE is not set -# CONFIG_AMD_XGBE_HAVE_ECC is not set -CONFIG_NET_VENDOR_AQUANTIA=y -# CONFIG_AQTION is not set +# CONFIG_NET_VENDOR_AMAZON is not set +# CONFIG_NET_VENDOR_AMD is not set +# CONFIG_NET_VENDOR_AQUANTIA is not set CONFIG_NET_VENDOR_ARC=y -CONFIG_NET_VENDOR_ATHEROS=y -# CONFIG_ATL2 is not set -# CONFIG_ATL1 is not set -# CONFIG_ATL1E is not set -# CONFIG_ATL1C is not set -# CONFIG_ALX is not set +# CONFIG_NET_VENDOR_ATHEROS is not set # CONFIG_NET_VENDOR_AURORA is not set -CONFIG_NET_CADENCE=y -# CONFIG_MACB is not set +# CONFIG_NET_CADENCE is not set CONFIG_NET_VENDOR_BROADCOM=y -# CONFIG_B44 is not set -# CONFIG_BNX2 is not set -# CONFIG_CNIC is not set +CONFIG_B44=y +CONFIG_B44_PCI_AUTOSELECT=y +CONFIG_B44_PCICORE_AUTOSELECT=y +CONFIG_B44_PCI=y +CONFIG_BNX2=y +CONFIG_CNIC=y CONFIG_TIGON3=y CONFIG_TIGON3_HWMON=y -# CONFIG_BNX2X is not set -# CONFIG_BNXT is not set -CONFIG_NET_VENDOR_BROCADE=y -# CONFIG_BNA is not set -CONFIG_NET_VENDOR_CAVIUM=y -# CONFIG_THUNDER_NIC_PF is not set -# CONFIG_THUNDER_NIC_VF is not set -# CONFIG_THUNDER_NIC_BGX is not set -# CONFIG_THUNDER_NIC_RGX is not set -# CONFIG_LIQUIDIO is not set -# CONFIG_LIQUIDIO_VF is not set +CONFIG_BNX2X=y +CONFIG_BNX2X_SRIOV=y +CONFIG_BNXT=y +CONFIG_BNXT_SRIOV=y +CONFIG_BNXT_FLOWER_OFFLOAD=y +# CONFIG_NET_VENDOR_BROCADE is not set +# CONFIG_NET_VENDOR_CAVIUM is not set CONFIG_NET_VENDOR_CHELSIO=y # CONFIG_CHELSIO_T1 is not set -# CONFIG_CHELSIO_T3 is not set -# CONFIG_CHELSIO_T4 is not set -# CONFIG_CHELSIO_T4VF is not set -CONFIG_NET_VENDOR_CISCO=y -# CONFIG_ENIC is not set +CONFIG_CHELSIO_T3=y +CONFIG_CHELSIO_T4=y +CONFIG_CHELSIO_T4VF=y +# CONFIG_NET_VENDOR_CISCO is not set # CONFIG_CX_ECAT is not set # CONFIG_DNET is not set -CONFIG_NET_VENDOR_DEC=y -CONFIG_NET_TULIP=y -# CONFIG_DE2104X is not set -# CONFIG_TULIP is not set -# CONFIG_DE4X5 is not set -# CONFIG_WINBOND_840 is not set -# CONFIG_DM9102 is not set -# CONFIG_ULI526X is not set -# CONFIG_PCMCIA_XIRCOM is not set -CONFIG_NET_VENDOR_DLINK=y -# CONFIG_DL2K is not set -# CONFIG_SUNDANCE is not set -CONFIG_NET_VENDOR_EMULEX=y -# CONFIG_BE2NET is not set -CONFIG_NET_VENDOR_EZCHIP=y -CONFIG_NET_VENDOR_EXAR=y -# CONFIG_S2IO is not set -# CONFIG_VXGE is not set -CONFIG_NET_VENDOR_FUJITSU=y -# CONFIG_PCMCIA_FMVJ18X is not set -CONFIG_NET_VENDOR_HP=y -# CONFIG_HP100 is not set -CONFIG_NET_VENDOR_HUAWEI=y -# CONFIG_HINIC is not set +# CONFIG_NET_VENDOR_DEC is not set +# CONFIG_NET_VENDOR_DLINK is not set +# CONFIG_NET_VENDOR_EMULEX is not set +# CONFIG_NET_VENDOR_EZCHIP is not set +# CONFIG_NET_VENDOR_EXAR is not set +# CONFIG_NET_VENDOR_FUJITSU is not set +# CONFIG_NET_VENDOR_HP is not set +# CONFIG_NET_VENDOR_HUAWEI is not set CONFIG_NET_VENDOR_INTEL=y CONFIG_E100=y CONFIG_E1000=y @@ -1890,108 +1834,56 @@ CONFIG_IXGBE_HWMON=y CONFIG_IXGBEVF=y # CONFIG_I40E is not set # CONFIG_I40EVF is not set -# CONFIG_FM10K is not set +CONFIG_FM10K=y CONFIG_NET_VENDOR_I825XX=y # CONFIG_JME is not set -CONFIG_NET_VENDOR_MARVELL=y -# CONFIG_MVMDIO is not set -# CONFIG_SKGE is not set -CONFIG_SKY2=y -# CONFIG_SKY2_DEBUG is not set +# CONFIG_NET_VENDOR_MARVELL is not set CONFIG_NET_VENDOR_MELLANOX=y # CONFIG_MLX4_EN is not set # CONFIG_MLX4_CORE is not set # CONFIG_MLX5_CORE is not set # CONFIG_MLXSW_CORE is not set # CONFIG_MLXFW is not set -CONFIG_NET_VENDOR_MICREL=y -# CONFIG_KS8851 is not set -# CONFIG_KS8851_MLL is not set -# CONFIG_KSZ884X_PCI is not set +# CONFIG_NET_VENDOR_MICREL is not set CONFIG_NET_VENDOR_MICROCHIP=y -# CONFIG_ENC28J60 is not set +CONFIG_ENC28J60=y +CONFIG_ENC28J60_WRITEVERIFY=y # CONFIG_ENCX24J600 is not set -CONFIG_NET_VENDOR_MYRI=y -# CONFIG_MYRI10GE is not set +# CONFIG_NET_VENDOR_MYRI is not set # CONFIG_FEALNX is not set -CONFIG_NET_VENDOR_NATSEMI=y -# CONFIG_NATSEMI is not set -# CONFIG_NS83820 is not set -CONFIG_NET_VENDOR_NETRONOME=y -# CONFIG_NFP is not set -CONFIG_NET_VENDOR_8390=y -# CONFIG_PCMCIA_AXNET is not set -# CONFIG_NE2K_PCI is not set -# CONFIG_PCMCIA_PCNET is not set -CONFIG_NET_VENDOR_NVIDIA=y -CONFIG_FORCEDETH=y -CONFIG_NET_VENDOR_OKI=y +# CONFIG_NET_VENDOR_NATSEMI is not set +# CONFIG_NET_VENDOR_NETRONOME is not set +# CONFIG_NET_VENDOR_NVIDIA is not set +# CONFIG_NET_VENDOR_OKI is not set # CONFIG_ETHOC is not set -CONFIG_NET_PACKET_ENGINE=y -# CONFIG_HAMACHI is not set -# CONFIG_YELLOWFIN is not set -CONFIG_NET_VENDOR_QLOGIC=y -# CONFIG_QLA3XXX is not set -# CONFIG_QLCNIC is not set -# CONFIG_QLGE is not set -# CONFIG_NETXEN_NIC is not set -# CONFIG_QED is not set -CONFIG_NET_VENDOR_QUALCOMM=y -# CONFIG_QCOM_EMAC is not set -# CONFIG_RMNET is not set +# CONFIG_NET_PACKET_ENGINE is not set +# CONFIG_NET_VENDOR_QLOGIC is not set +# CONFIG_NET_VENDOR_QUALCOMM is not set CONFIG_NET_VENDOR_REALTEK=y # CONFIG_8139CP is not set -CONFIG_8139TOO=y -CONFIG_8139TOO_PIO=y -# CONFIG_8139TOO_TUNE_TWISTER is not set -# CONFIG_8139TOO_8129 is not set -# CONFIG_8139_OLD_RX_RESET is not set +# CONFIG_8139TOO is not set CONFIG_R8169=y -CONFIG_NET_VENDOR_RENESAS=y -CONFIG_NET_VENDOR_RDC=y -# CONFIG_R6040 is not set -CONFIG_NET_VENDOR_ROCKER=y +# CONFIG_NET_VENDOR_RENESAS is not set +# CONFIG_NET_VENDOR_RDC is not set +# CONFIG_NET_VENDOR_ROCKER is not set CONFIG_NET_VENDOR_SAMSUNG=y # CONFIG_SXGBE_ETH is not set -CONFIG_NET_VENDOR_SEEQ=y -CONFIG_NET_VENDOR_SILAN=y -# CONFIG_SC92031 is not set -CONFIG_NET_VENDOR_SIS=y -# CONFIG_SIS900 is not set -# CONFIG_SIS190 is not set -CONFIG_NET_VENDOR_SOLARFLARE=y -# CONFIG_SFC is not set -# CONFIG_SFC_FALCON is not set -CONFIG_NET_VENDOR_SMSC=y -# CONFIG_PCMCIA_SMC91C92 is not set -# CONFIG_EPIC100 is not set -# CONFIG_SMSC911X is not set -# CONFIG_SMSC9420 is not set -CONFIG_NET_VENDOR_STMICRO=y -# CONFIG_STMMAC_ETH is not set -CONFIG_NET_VENDOR_SUN=y -# CONFIG_HAPPYMEAL is not set -# CONFIG_SUNGEM is not set -# CONFIG_CASSINI is not set -# CONFIG_NIU is not set -CONFIG_NET_VENDOR_TEHUTI=y -# CONFIG_TEHUTI is not set -CONFIG_NET_VENDOR_TI=y -# CONFIG_TI_CPSW_ALE is not set -# CONFIG_TLAN is not set -CONFIG_NET_VENDOR_VIA=y -# CONFIG_VIA_RHINE is not set -# CONFIG_VIA_VELOCITY is not set +# CONFIG_NET_VENDOR_SEEQ is not set +# CONFIG_NET_VENDOR_SILAN is not set +# CONFIG_NET_VENDOR_SIS is not set +# CONFIG_NET_VENDOR_SOLARFLARE is not set +# CONFIG_NET_VENDOR_SMSC is not set +# CONFIG_NET_VENDOR_STMICRO is not set +# CONFIG_NET_VENDOR_SUN is not set +# CONFIG_NET_VENDOR_TEHUTI is not set +# CONFIG_NET_VENDOR_TI is not set +# CONFIG_NET_VENDOR_VIA is not set CONFIG_NET_VENDOR_WIZNET=y # CONFIG_WIZNET_W5100 is not set # CONFIG_WIZNET_W5300 is not set -CONFIG_NET_VENDOR_XIRCOM=y -# CONFIG_PCMCIA_XIRC2PS is not set -CONFIG_NET_VENDOR_SYNOPSYS=y -# CONFIG_DWC_XLGMAC is not set -CONFIG_FDDI=y -# CONFIG_DEFXX is not set -# CONFIG_SKFP is not set +# CONFIG_NET_VENDOR_XIRCOM is not set +# CONFIG_NET_VENDOR_SYNOPSYS is not set +# CONFIG_FDDI is not set # CONFIG_HIPPI is not set # CONFIG_NET_SB1000 is not set CONFIG_MDIO_DEVICE=y @@ -2045,87 +1937,8 @@ CONFIG_USB_NET_DRIVERS=y # CONFIG_USB_RTL8152 is not set # CONFIG_USB_LAN78XX is not set # CONFIG_USB_USBNET is not set -# CONFIG_USB_HSO is not set # CONFIG_USB_IPHETH is not set -CONFIG_WLAN=y -# CONFIG_WIRELESS_WDS is not set -CONFIG_WLAN_VENDOR_ADMTEK=y -# CONFIG_ADM8211 is not set -CONFIG_WLAN_VENDOR_ATH=y -# CONFIG_ATH_DEBUG is not set -# CONFIG_ATH5K is not set -# CONFIG_ATH5K_PCI is not set -# CONFIG_ATH9K is not set -# CONFIG_ATH9K_HTC is not set -# CONFIG_CARL9170 is not set -# CONFIG_ATH6KL is not set -# CONFIG_AR5523 is not set -# CONFIG_WIL6210 is not set -# CONFIG_ATH10K is not set -# CONFIG_WCN36XX is not set -CONFIG_WLAN_VENDOR_ATMEL=y -# CONFIG_ATMEL is not set -# CONFIG_AT76C50X_USB is not set -CONFIG_WLAN_VENDOR_BROADCOM=y -# CONFIG_B43 is not set -# CONFIG_B43LEGACY is not set -# CONFIG_BRCMSMAC is not set -# CONFIG_BRCMFMAC is not set -CONFIG_WLAN_VENDOR_CISCO=y -# CONFIG_AIRO is not set -# CONFIG_AIRO_CS is not set -CONFIG_WLAN_VENDOR_INTEL=y -# CONFIG_IPW2100 is not set -# CONFIG_IPW2200 is not set -# CONFIG_IWL4965 is not set -# CONFIG_IWL3945 is not set -# CONFIG_IWLWIFI is not set -CONFIG_WLAN_VENDOR_INTERSIL=y -# CONFIG_HOSTAP is not set -# CONFIG_HERMES is not set -# CONFIG_P54_COMMON is not set -# CONFIG_PRISM54 is not set -CONFIG_WLAN_VENDOR_MARVELL=y -# CONFIG_LIBERTAS is not set -# CONFIG_LIBERTAS_THINFIRM is not set -# CONFIG_MWIFIEX is not set -# CONFIG_MWL8K is not set -CONFIG_WLAN_VENDOR_MEDIATEK=y -# CONFIG_MT7601U is not set -CONFIG_WLAN_VENDOR_RALINK=y -# CONFIG_RT2X00 is not set -CONFIG_WLAN_VENDOR_REALTEK=y -# CONFIG_RTL8180 is not set -# CONFIG_RTL8187 is not set -CONFIG_RTL_CARDS=y -# CONFIG_RTL8192CE is not set -# CONFIG_RTL8192SE is not set -# CONFIG_RTL8192DE is not set -# CONFIG_RTL8723AE is not set -# CONFIG_RTL8723BE is not set -# CONFIG_RTL8188EE is not set -# CONFIG_RTL8192EE is not set -# CONFIG_RTL8821AE is not set -# CONFIG_RTL8192CU is not set -# CONFIG_RTL8XXXU is not set -CONFIG_WLAN_VENDOR_RSI=y -# CONFIG_RSI_91X is not set -CONFIG_WLAN_VENDOR_ST=y -# CONFIG_CW1200 is not set -CONFIG_WLAN_VENDOR_TI=y -# CONFIG_WL1251 is not set -# CONFIG_WL12XX is not set -# CONFIG_WL18XX is not set -# CONFIG_WLCORE is not set -CONFIG_WLAN_VENDOR_ZYDAS=y -# CONFIG_USB_ZD1201 is not set -# CONFIG_ZD1211RW is not set -CONFIG_WLAN_VENDOR_QUANTENNA=y -# CONFIG_QTNFMAC_PEARL_PCIE is not set -# CONFIG_PCMCIA_RAYCS is not set -# CONFIG_PCMCIA_WL3501 is not set -# CONFIG_MAC80211_HWSIM is not set -# CONFIG_USB_NET_RNDIS_WLAN is not set +# CONFIG_WLAN is not set # # Enable WiMAX (Networking options) to see the WiMAX drivers @@ -2420,13 +2233,13 @@ CONFIG_IPMI_DMI_DECODE=y CONFIG_IPMI_DEVICE_INTERFACE=y CONFIG_IPMI_SI=y CONFIG_IPMI_SI_PROBE_DEFAULTS=y -# CONFIG_IPMI_SSIF is not set +CONFIG_IPMI_SSIF=y CONFIG_IPMI_WATCHDOG=y CONFIG_IPMI_POWEROFF=y CONFIG_HW_RANDOM=y -# CONFIG_HW_RANDOM_TIMERIOMEM is not set -# CONFIG_HW_RANDOM_INTEL is not set -# CONFIG_HW_RANDOM_AMD is not set +CONFIG_HW_RANDOM_TIMERIOMEM=y +CONFIG_HW_RANDOM_INTEL=y +CONFIG_HW_RANDOM_AMD=y CONFIG_HW_RANDOM_VIA=y # CONFIG_HW_RANDOM_VIRTIO is not set CONFIG_NVRAM=y @@ -2925,7 +2738,18 @@ CONFIG_SSB_POSSIBLE=y # # Sonics Silicon Backplane # -# CONFIG_SSB is not set +CONFIG_SSB=y +CONFIG_SSB_SPROM=y +CONFIG_SSB_PCIHOST_POSSIBLE=y +CONFIG_SSB_PCIHOST=y +# CONFIG_SSB_B43_PCI_BRIDGE is not set +CONFIG_SSB_PCMCIAHOST_POSSIBLE=y +# CONFIG_SSB_PCMCIAHOST is not set +# CONFIG_SSB_SILENT is not set +# CONFIG_SSB_DEBUG is not set +CONFIG_SSB_DRIVER_PCICORE_POSSIBLE=y +CONFIG_SSB_DRIVER_PCICORE=y +# CONFIG_SSB_DRIVER_GPIO is not set CONFIG_BCMA_POSSIBLE=y # CONFIG_BCMA is not set @@ -3022,210 +2846,37 @@ CONFIG_LPC_SCH=y # CONFIG_MFD_WM8350_I2C is not set # CONFIG_MFD_WM8994 is not set # CONFIG_REGULATOR is not set -CONFIG_RC_CORE=y -CONFIG_RC_MAP=y -CONFIG_RC_DECODERS=y -# CONFIG_LIRC is not set -CONFIG_IR_NEC_DECODER=y -CONFIG_IR_RC5_DECODER=y -CONFIG_IR_RC6_DECODER=y -CONFIG_IR_JVC_DECODER=y -CONFIG_IR_SONY_DECODER=y -CONFIG_IR_SANYO_DECODER=y -CONFIG_IR_SHARP_DECODER=y -CONFIG_IR_MCE_KBD_DECODER=y -CONFIG_IR_XMP_DECODER=y -# CONFIG_RC_DEVICES is not set +# CONFIG_RC_CORE is not set # CONFIG_MEDIA_SUPPORT is not set # # Graphics support # -CONFIG_AGP=y -CONFIG_AGP_AMD64=y -CONFIG_AGP_INTEL=y -# CONFIG_AGP_SIS is not set -# CONFIG_AGP_VIA is not set -CONFIG_INTEL_GTT=y -CONFIG_VGA_ARB=y -CONFIG_VGA_ARB_MAX_GPUS=16 +# CONFIG_AGP is not set +# CONFIG_VGA_ARB is not set # CONFIG_VGA_SWITCHEROO is not set -CONFIG_DRM=y -CONFIG_DRM_MIPI_DSI=y -# CONFIG_DRM_DP_AUX_CHARDEV is not set -# CONFIG_DRM_DEBUG_MM is not set -# CONFIG_DRM_DEBUG_MM_SELFTEST is not set -CONFIG_DRM_KMS_HELPER=y -CONFIG_DRM_KMS_FB_HELPER=y -CONFIG_DRM_FBDEV_EMULATION=y -CONFIG_DRM_FBDEV_OVERALLOC=100 -# CONFIG_DRM_LOAD_EDID_FIRMWARE is not set - -# -# I2C encoder or helper chips -# -# CONFIG_DRM_I2C_CH7006 is not set -# CONFIG_DRM_I2C_SIL164 is not set -# CONFIG_DRM_I2C_NXP_TDA998X is not set -# CONFIG_DRM_RADEON is not set -# CONFIG_DRM_AMDGPU is not set +# CONFIG_DRM is not set # # ACP (Audio CoProcessor) Configuration # -# CONFIG_DRM_NOUVEAU is not set -CONFIG_DRM_I915=y -# CONFIG_DRM_I915_ALPHA_SUPPORT is not set -CONFIG_DRM_I915_CAPTURE_ERROR=y -CONFIG_DRM_I915_COMPRESS_ERROR=y -CONFIG_DRM_I915_USERPTR=y -# CONFIG_DRM_I915_GVT is not set - -# -# drm/i915 Debugging -# -# CONFIG_DRM_I915_WERROR is not set -# CONFIG_DRM_I915_DEBUG is not set -# CONFIG_DRM_I915_SW_FENCE_DEBUG_OBJECTS is not set -# CONFIG_DRM_I915_SW_FENCE_CHECK_DAG is not set -# CONFIG_DRM_I915_SELFTEST is not set -# CONFIG_DRM_I915_LOW_LEVEL_TRACEPOINTS is not set -# CONFIG_DRM_I915_DEBUG_VBLANK_EVADE is not set -# CONFIG_DRM_VGEM is not set -# CONFIG_DRM_VMWGFX is not set -# CONFIG_DRM_GMA500 is not set -# CONFIG_DRM_UDL is not set -# CONFIG_DRM_AST is not set -# CONFIG_DRM_MGAG200 is not set -# CONFIG_DRM_CIRRUS_QEMU is not set -# CONFIG_DRM_QXL is not set -# CONFIG_DRM_BOCHS is not set -# CONFIG_DRM_VIRTIO_GPU is not set -CONFIG_DRM_PANEL=y - -# -# Display Panels -# -CONFIG_DRM_BRIDGE=y -CONFIG_DRM_PANEL_BRIDGE=y - -# -# Display Interface Bridges -# -# CONFIG_DRM_ANALOGIX_ANX78XX is not set -# CONFIG_DRM_HISI_HIBMC is not set -# CONFIG_DRM_TINYDRM is not set -# CONFIG_DRM_LEGACY is not set # CONFIG_DRM_LIB_RANDOM is not set # # Frame buffer Devices # -CONFIG_FB=y -# CONFIG_FIRMWARE_EDID is not set -CONFIG_FB_CMDLINE=y -CONFIG_FB_NOTIFY=y -# CONFIG_FB_DDC is not set -# CONFIG_FB_BOOT_VESA_SUPPORT is not set -CONFIG_FB_CFB_FILLRECT=y -CONFIG_FB_CFB_COPYAREA=y -CONFIG_FB_CFB_IMAGEBLIT=y -# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set -CONFIG_FB_SYS_FILLRECT=y -CONFIG_FB_SYS_COPYAREA=y -CONFIG_FB_SYS_IMAGEBLIT=y -# CONFIG_FB_PROVIDE_GET_FB_UNMAPPED_AREA is not set -# CONFIG_FB_FOREIGN_ENDIAN is not set -CONFIG_FB_SYS_FOPS=y -CONFIG_FB_DEFERRED_IO=y -# CONFIG_FB_SVGALIB is not set -# CONFIG_FB_MACMODES is not set -# CONFIG_FB_BACKLIGHT is not set -CONFIG_FB_MODE_HELPERS=y -CONFIG_FB_TILEBLITTING=y - -# -# Frame buffer hardware drivers -# -# CONFIG_FB_CIRRUS is not set -# CONFIG_FB_PM2 is not set -# CONFIG_FB_CYBER2000 is not set -# CONFIG_FB_ARC is not set -# CONFIG_FB_ASILIANT is not set -# CONFIG_FB_IMSTT is not set -# CONFIG_FB_VGA16 is not set -# CONFIG_FB_UVESA is not set -# CONFIG_FB_VESA is not set -CONFIG_FB_EFI=y -# CONFIG_FB_N411 is not set -# CONFIG_FB_HGA is not set -# CONFIG_FB_OPENCORES is not set -# CONFIG_FB_S1D13XXX is not set -# CONFIG_FB_NVIDIA is not set -# CONFIG_FB_RIVA is not set -# CONFIG_FB_I740 is not set -# CONFIG_FB_LE80578 is not set -# CONFIG_FB_MATROX is not set -# CONFIG_FB_RADEON is not set -# CONFIG_FB_ATY128 is not set -# CONFIG_FB_ATY is not set -# CONFIG_FB_S3 is not set -# CONFIG_FB_SAVAGE is not set -# CONFIG_FB_SIS is not set -# CONFIG_FB_VIA is not set -# CONFIG_FB_NEOMAGIC is not set -# CONFIG_FB_KYRO is not set -# CONFIG_FB_3DFX is not set -# CONFIG_FB_VOODOO1 is not set -# CONFIG_FB_VT8623 is not set -# CONFIG_FB_TRIDENT is not set -# CONFIG_FB_ARK is not set -# CONFIG_FB_PM3 is not set -# CONFIG_FB_CARMINE is not set -# CONFIG_FB_SMSCUFX is not set -# CONFIG_FB_UDL is not set -# CONFIG_FB_IBM_GXT4500 is not set -# CONFIG_FB_VIRTUAL is not set -# CONFIG_FB_METRONOME is not set -# CONFIG_FB_MB862XX is not set -# CONFIG_FB_BROADSHEET is not set -# CONFIG_FB_AUO_K190X is not set -# CONFIG_FB_SIMPLE is not set -# CONFIG_FB_SM712 is not set -CONFIG_BACKLIGHT_LCD_SUPPORT=y -# CONFIG_LCD_CLASS_DEVICE is not set -CONFIG_BACKLIGHT_CLASS_DEVICE=y -CONFIG_BACKLIGHT_GENERIC=y -# CONFIG_BACKLIGHT_APPLE is not set -# CONFIG_BACKLIGHT_PM8941_WLED is not set -# CONFIG_BACKLIGHT_SAHARA is not set -# CONFIG_BACKLIGHT_ADP8860 is not set -# CONFIG_BACKLIGHT_ADP8870 is not set -# CONFIG_BACKLIGHT_LM3639 is not set -# CONFIG_BACKLIGHT_GPIO is not set -# CONFIG_BACKLIGHT_LV5207LP is not set -# CONFIG_BACKLIGHT_BD6107 is not set -# CONFIG_BACKLIGHT_ARCXCNN is not set +# CONFIG_FB is not set +# CONFIG_BACKLIGHT_LCD_SUPPORT is not set # CONFIG_VGASTATE is not set -CONFIG_HDMI=y # # Console display driver support # CONFIG_VGA_CONSOLE=y -CONFIG_VGACON_SOFT_SCROLLBACK=y -CONFIG_VGACON_SOFT_SCROLLBACK_SIZE=64 -# CONFIG_VGACON_SOFT_SCROLLBACK_PERSISTENT_ENABLE_BY_DEFAULT is not set +# CONFIG_VGACON_SOFT_SCROLLBACK is not set CONFIG_DUMMY_CONSOLE=y CONFIG_DUMMY_CONSOLE_COLUMNS=80 CONFIG_DUMMY_CONSOLE_ROWS=25 -CONFIG_FRAMEBUFFER_CONSOLE=y -CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y -# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set -CONFIG_LOGO=y -# CONFIG_LOGO_LINUX_MONO is not set -# CONFIG_LOGO_LINUX_VGA16 is not set -CONFIG_LOGO_LINUX_CLUT224=y # CONFIG_SOUND is not set # @@ -3377,10 +3028,12 @@ CONFIG_USB_EHCI_PCI=y # CONFIG_USB_MAX3421_HCD is not set CONFIG_USB_OHCI_HCD=y CONFIG_USB_OHCI_HCD_PCI=y +# CONFIG_USB_OHCI_HCD_SSB is not set # CONFIG_USB_OHCI_HCD_PLATFORM is not set CONFIG_USB_UHCI_HCD=y # CONFIG_USB_SL811_HCD is not set # CONFIG_USB_R8A66597_HCD is not set +# CONFIG_USB_HCD_SSB is not set # CONFIG_USB_HCD_TEST_MODE is not set # @@ -3742,7 +3395,6 @@ CONFIG_UIO=y # CONFIG_UIO_NETX is not set # CONFIG_UIO_PRUSS is not set # CONFIG_UIO_MF624 is not set -# CONFIG_VFIO is not set CONFIG_VIRT_DRIVERS=y CONFIG_VIRTIO=y @@ -3762,24 +3414,12 @@ CONFIG_VIRTIO_PCI_LEGACY=y # CONFIG_STAGING is not set CONFIG_X86_PLATFORM_DEVICES=y # CONFIG_ACERHDF is not set -# CONFIG_ASUS_LAPTOP is not set -# CONFIG_DELL_LAPTOP is not set # CONFIG_DELL_SMO8800 is not set -# CONFIG_DELL_RBTN is not set -# CONFIG_FUJITSU_LAPTOP is not set # CONFIG_FUJITSU_TABLET is not set -# CONFIG_AMILO_RFKILL is not set # CONFIG_HP_ACCEL is not set # CONFIG_HP_WIRELESS is not set -# CONFIG_MSI_LAPTOP is not set -# CONFIG_PANASONIC_LAPTOP is not set -# CONFIG_COMPAL_LAPTOP is not set -# CONFIG_SONY_LAPTOP is not set -# CONFIG_IDEAPAD_LAPTOP is not set -# CONFIG_THINKPAD_ACPI is not set # CONFIG_SENSORS_HDAPS is not set # CONFIG_INTEL_MENLOW is not set -# CONFIG_EEEPC_LAPTOP is not set # CONFIG_ASUS_WIRELESS is not set # CONFIG_ACPI_WMI is not set # CONFIG_TOPSTAR_LAPTOP is not set @@ -3793,10 +3433,7 @@ CONFIG_X86_PLATFORM_DEVICES=y # CONFIG_INTEL_IPS is not set # CONFIG_INTEL_PMC_CORE is not set # CONFIG_IBM_RTL is not set -# CONFIG_SAMSUNG_LAPTOP is not set -# CONFIG_INTEL_OAKTRAIL is not set # CONFIG_SAMSUNG_Q10 is not set -# CONFIG_APPLE_GMUX is not set # CONFIG_INTEL_RST is not set # CONFIG_INTEL_SMARTCONNECT is not set # CONFIG_PVPANIC is not set @@ -3837,20 +3474,13 @@ CONFIG_CLKBLD_I8253=y CONFIG_MAILBOX=y CONFIG_PCC=y # CONFIG_ALTERA_MBOX is not set -CONFIG_IOMMU_API=y CONFIG_IOMMU_SUPPORT=y # # Generic IOMMU Pagetable Support # -CONFIG_IOMMU_IOVA=y -CONFIG_AMD_IOMMU=y -# CONFIG_AMD_IOMMU_V2 is not set -CONFIG_DMAR_TABLE=y -CONFIG_INTEL_IOMMU=y -# CONFIG_INTEL_IOMMU_SVM is not set -# CONFIG_INTEL_IOMMU_DEFAULT_ON is not set -CONFIG_INTEL_IOMMU_FLOPPY_WA=y +# CONFIG_AMD_IOMMU is not set +# CONFIG_INTEL_IOMMU is not set # CONFIG_IRQ_REMAP is not set # @@ -3988,6 +3618,7 @@ CONFIG_FS_MBCACHE=y # CONFIG_JFS_FS is not set # CONFIG_XFS_FS is not set # CONFIG_GFS2_FS is not set +# CONFIG_OCFS2_FS is not set # CONFIG_BTRFS_FS is not set # CONFIG_NILFS2_FS is not set # CONFIG_F2FS_FS is not set @@ -4050,7 +3681,7 @@ CONFIG_PROC_KCORE=y CONFIG_PROC_VMCORE=y CONFIG_PROC_SYSCTL=y CONFIG_PROC_PAGE_MONITOR=y -# CONFIG_PROC_CHILDREN is not set +CONFIG_PROC_CHILDREN=y CONFIG_KERNFS=y CONFIG_SYSFS=y CONFIG_TMPFS=y @@ -4059,14 +3690,13 @@ CONFIG_TMPFS_XATTR=y CONFIG_HUGETLBFS=y CONFIG_HUGETLB_PAGE=y CONFIG_ARCH_HAS_GIGANTIC_PAGE=y -# CONFIG_CONFIGFS_FS is not set +CONFIG_CONFIGFS_FS=y CONFIG_EFIVAR_FS=y CONFIG_MISC_FILESYSTEMS=y # CONFIG_ORANGEFS_FS is not set # CONFIG_ADFS_FS is not set # CONFIG_AFFS_FS is not set -CONFIG_ECRYPT_FS=y -# CONFIG_ECRYPT_FS_MESSAGING is not set +# CONFIG_ECRYPT_FS is not set # CONFIG_HFS_FS is not set # CONFIG_HFSPLUS_FS is not set # CONFIG_BEFS_FS is not set @@ -4085,7 +3715,7 @@ CONFIG_SQUASHFS_LZ4=y CONFIG_SQUASHFS_LZO=y CONFIG_SQUASHFS_XZ=y CONFIG_SQUASHFS_ZSTD=y -CONFIG_SQUASHFS_4K_DEVBLK_SIZE=y +# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set # CONFIG_SQUASHFS_EMBEDDED is not set CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 # CONFIG_VXFS_FS is not set @@ -4175,6 +3805,7 @@ CONFIG_NLS_ISO8859_1=y # CONFIG_NLS_MAC_ROMANIAN is not set # CONFIG_NLS_MAC_TURKISH is not set CONFIG_NLS_UTF8=y +# CONFIG_DLM is not set # # Kernel hacking @@ -4438,7 +4069,6 @@ CONFIG_SECURITY_NETWORK=y CONFIG_PAGE_TABLE_ISOLATION=y # CONFIG_SECURITY_NETWORK_XFRM is not set # CONFIG_SECURITY_PATH is not set -# CONFIG_INTEL_TXT is not set CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR=y # CONFIG_HARDENED_USERCOPY is not set # CONFIG_FORTIFY_SOURCE is not set @@ -4627,6 +4257,7 @@ CONFIG_CRYPTO_HW=y # CONFIG_CRYPTO_DEV_QAT_C3XXXVF is not set # CONFIG_CRYPTO_DEV_QAT_C62XVF is not set # CONFIG_CRYPTO_DEV_NITROX_CNN55XX is not set +# CONFIG_CRYPTO_DEV_CHELSIO is not set CONFIG_CRYPTO_DEV_VIRTIO=m # CONFIG_ASYMMETRIC_KEY_TYPE is not set @@ -4694,7 +4325,10 @@ CONFIG_DECOMPRESS_XZ=y CONFIG_DECOMPRESS_LZO=y CONFIG_DECOMPRESS_LZ4=y CONFIG_GENERIC_ALLOCATOR=y -CONFIG_INTERVAL_TREE=y +CONFIG_TEXTSEARCH=y +CONFIG_TEXTSEARCH_KMP=y +CONFIG_TEXTSEARCH_BM=y +CONFIG_TEXTSEARCH_FSM=y CONFIG_RADIX_TREE_MULTIORDER=y CONFIG_ASSOCIATIVE_ARRAY=y CONFIG_HAS_IOMEM=y @@ -4713,10 +4347,6 @@ CONFIG_NLATTR=y # CONFIG_IRQ_POLL is not set CONFIG_OID_REGISTRY=y CONFIG_UCS2_STRING=y -CONFIG_FONT_SUPPORT=y -# CONFIG_FONTS is not set -CONFIG_FONT_8x8=y -CONFIG_FONT_8x16=y # CONFIG_SG_SPLIT is not set CONFIG_SG_POOL=y CONFIG_ARCH_HAS_SG_CHAIN=y diff --git a/packages/base/any/kernels/4.14-lts/kconfig.mk b/packages/base/any/kernels/4.14-lts/kconfig.mk index 07ad594b..c3d7e661 100644 --- a/packages/base/any/kernels/4.14-lts/kconfig.mk +++ b/packages/base/any/kernels/4.14-lts/kconfig.mk @@ -21,7 +21,7 @@ THIS_DIR := $(abspath $(dir $(lastword $(MAKEFILE_LIST)))) K_MAJOR_VERSION := 4 K_PATCH_LEVEL := 14 -K_SUB_LEVEL := 34 +K_SUB_LEVEL := 49 K_SUFFIX := K_PATCH_DIR := $(THIS_DIR)/patches K_MODSYNCLIST := tools/objtool diff --git a/packages/base/any/kernels/4.4-lts/configs/arm-iproc-all/.gitignore b/packages/base/any/kernels/4.4-lts/configs/arm-iproc-all/.gitignore new file mode 100644 index 00000000..5540b78d --- /dev/null +++ b/packages/base/any/kernels/4.4-lts/configs/arm-iproc-all/.gitignore @@ -0,0 +1,3 @@ +kernel-* +linux-* +lib/ diff --git a/packages/base/any/kernels/3.18.25/configs/x86_64-all/Makefile b/packages/base/any/kernels/4.4-lts/configs/arm-iproc-all/Makefile similarity index 78% rename from packages/base/any/kernels/3.18.25/configs/x86_64-all/Makefile rename to packages/base/any/kernels/4.4-lts/configs/arm-iproc-all/Makefile index 8daf53d7..b3fcbd82 100644 --- a/packages/base/any/kernels/3.18.25/configs/x86_64-all/Makefile +++ b/packages/base/any/kernels/4.4-lts/configs/arm-iproc-all/Makefile @@ -18,25 +18,24 @@ # # ############################################################ -# -# Default 3.18.25 configuration for x86_64 platforms. -# -############################################################ + THIS_DIR := $(abspath $(dir $(lastword $(MAKEFILE_LIST)))) include $(ONL)/make/config.mk -export ARCH := x86_64 ifndef K_TARGET_DIR K_TARGET_DIR := $(THIS_DIR) endif +K_PATCH_DIR := $(THIS_DIR)/patches + include ../../kconfig.mk -K_CONFIG := x86_64-all.config -K_BUILD_TARGET := bzImage -K_COPY_SRC := arch/x86/boot/bzImage +K_CONFIG := arm-iproc-all.config +K_BUILD_TARGET := Image +K_COPY_SRC := arch/arm/boot/Image +K_COPY_GZIP := 1 ifndef K_COPY_DST -K_COPY_DST := kernel-3.18-x86_64-all +K_COPY_DST := kernel-4.4-lts-arm-iproc-all.bin.gz endif +export ARCH=arm include $(ONL)/make/kbuild.mk - diff --git a/packages/base/any/kernels/3.18.25/configs/arm64-all/arm64-all.config b/packages/base/any/kernels/4.4-lts/configs/arm-iproc-all/arm-iproc-all.config similarity index 56% rename from packages/base/any/kernels/3.18.25/configs/arm64-all/arm64-all.config rename to packages/base/any/kernels/4.4-lts/configs/arm-iproc-all/arm-iproc-all.config index a023c63c..b1a38f03 100644 --- a/packages/base/any/kernels/3.18.25/configs/arm64-all/arm64-all.config +++ b/packages/base/any/kernels/4.4-lts/configs/arm-iproc-all/arm-iproc-all.config @@ -1,28 +1,25 @@ # # Automatically generated file; DO NOT EDIT. -# Linux/arm64 3.18.25 Kernel Configuration +# Linux/arm 4.4.39 Kernel Configuration # -CONFIG_ARM64=y -CONFIG_64BIT=y -CONFIG_ARCH_PHYS_ADDR_T_64BIT=y -CONFIG_MMU=y +CONFIG_ARM=y +CONFIG_ARM_HAS_SG_CHAIN=y +CONFIG_MIGHT_HAVE_PCI=y +CONFIG_SYS_SUPPORTS_APM_EMULATION=y +CONFIG_HAVE_PROC_CPU=y CONFIG_STACKTRACE_SUPPORT=y -CONFIG_ILLEGAL_POINTER_VALUE=0xdead000000000000 CONFIG_LOCKDEP_SUPPORT=y CONFIG_TRACE_IRQFLAGS_SUPPORT=y CONFIG_RWSEM_XCHGADD_ALGORITHM=y -CONFIG_GENERIC_HWEIGHT=y -CONFIG_GENERIC_CSUM=y -CONFIG_GENERIC_CALIBRATE_DELAY=y -CONFIG_ZONE_DMA=y -CONFIG_HAVE_GENERIC_RCU_GUP=y -CONFIG_ARCH_DMA_ADDR_T_64BIT=y -CONFIG_NEED_DMA_MAP_STATE=y -CONFIG_NEED_SG_DMA_LENGTH=y -CONFIG_SWIOTLB=y -CONFIG_IOMMU_HELPER=y -CONFIG_KERNEL_MODE_NEON=y CONFIG_FIX_EARLYCON_MEM=y +CONFIG_GENERIC_HWEIGHT=y +CONFIG_GENERIC_CALIBRATE_DELAY=y +CONFIG_NEED_DMA_MAP_STATE=y +CONFIG_ARCH_SUPPORTS_UPROBES=y +CONFIG_VECTORS_BASE=0xffff0000 +CONFIG_ARM_PATCH_PHYS_VIRT=y +CONFIG_GENERIC_BUG=y +CONFIG_PGTABLE_LEVELS=2 CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config" CONFIG_IRQ_WORK=y CONFIG_BUILDTIME_EXTABLE_SORT=y @@ -31,39 +28,46 @@ CONFIG_BUILDTIME_EXTABLE_SORT=y # General setup # CONFIG_INIT_ENV_ARG_LIMIT=32 -CONFIG_CROSS_COMPILE="aarch64-linux-gnu-" +CONFIG_CROSS_COMPILE="arm-linux-gnueabi-" # CONFIG_COMPILE_TEST is not set CONFIG_LOCALVERSION="" CONFIG_LOCALVERSION_AUTO=y +CONFIG_HAVE_KERNEL_GZIP=y +CONFIG_HAVE_KERNEL_LZMA=y +CONFIG_HAVE_KERNEL_XZ=y +CONFIG_HAVE_KERNEL_LZO=y +CONFIG_HAVE_KERNEL_LZ4=y +CONFIG_KERNEL_GZIP=y +# CONFIG_KERNEL_LZMA is not set +# CONFIG_KERNEL_XZ is not set +# CONFIG_KERNEL_LZO is not set +# CONFIG_KERNEL_LZ4 is not set CONFIG_DEFAULT_HOSTNAME="(none)" CONFIG_SWAP=y CONFIG_SYSVIPC=y CONFIG_SYSVIPC_SYSCTL=y -CONFIG_POSIX_MQUEUE=y -CONFIG_POSIX_MQUEUE_SYSCTL=y +# CONFIG_POSIX_MQUEUE is not set CONFIG_CROSS_MEMORY_ATTACH=y # CONFIG_FHANDLE is not set CONFIG_USELIB=y -CONFIG_AUDIT=y +# CONFIG_AUDIT is not set CONFIG_HAVE_ARCH_AUDITSYSCALL=y -# CONFIG_AUDITSYSCALL is not set # # IRQ subsystem # CONFIG_GENERIC_IRQ_PROBE=y CONFIG_GENERIC_IRQ_SHOW=y +CONFIG_GENERIC_IRQ_SHOW_LEVEL=y CONFIG_HARDIRQS_SW_RESEND=y CONFIG_IRQ_DOMAIN=y CONFIG_IRQ_DOMAIN_HIERARCHY=y CONFIG_GENERIC_MSI_IRQ=y -CONFIG_GENERIC_MSI_IRQ_DOMAIN=y CONFIG_HANDLE_DOMAIN_IRQ=y # CONFIG_IRQ_DOMAIN_DEBUG is not set +CONFIG_IRQ_FORCED_THREADING=y CONFIG_SPARSE_IRQ=y -CONFIG_GENERIC_TIME_VSYSCALL=y CONFIG_GENERIC_CLOCKEVENTS=y -CONFIG_GENERIC_CLOCKEVENTS_BUILD=y CONFIG_ARCH_HAS_TICK_BROADCAST=y CONFIG_GENERIC_CLOCKEVENTS_BROADCAST=y @@ -75,136 +79,109 @@ CONFIG_NO_HZ_COMMON=y # CONFIG_HZ_PERIODIC is not set CONFIG_NO_HZ_IDLE=y # CONFIG_NO_HZ_FULL is not set -# CONFIG_NO_HZ is not set -CONFIG_HIGH_RES_TIMERS=y +CONFIG_NO_HZ=y +# CONFIG_HIGH_RES_TIMERS is not set # # CPU/Task time and stats accounting # CONFIG_TICK_CPU_ACCOUNTING=y # CONFIG_VIRT_CPU_ACCOUNTING_GEN is not set -CONFIG_BSD_PROCESS_ACCT=y -CONFIG_BSD_PROCESS_ACCT_V3=y -CONFIG_TASKSTATS=y -CONFIG_TASK_DELAY_ACCT=y -CONFIG_TASK_XACCT=y -CONFIG_TASK_IO_ACCOUNTING=y +# CONFIG_IRQ_TIME_ACCOUNTING is not set +# CONFIG_BSD_PROCESS_ACCT is not set +# CONFIG_TASKSTATS is not set # # RCU Subsystem # -CONFIG_TREE_PREEMPT_RCU=y CONFIG_PREEMPT_RCU=y +# CONFIG_RCU_EXPERT is not set +CONFIG_SRCU=y # CONFIG_TASKS_RCU is not set CONFIG_RCU_STALL_COMMON=y -# CONFIG_RCU_USER_QS is not set -CONFIG_RCU_FANOUT=64 -CONFIG_RCU_FANOUT_LEAF=16 -# CONFIG_RCU_FANOUT_EXACT is not set -# CONFIG_RCU_FAST_NO_HZ is not set # CONFIG_TREE_RCU_TRACE is not set -# CONFIG_RCU_BOOST is not set -# CONFIG_RCU_NOCB_CPU is not set +# CONFIG_RCU_EXPEDITE_BOOT is not set CONFIG_BUILD_BIN2C=y CONFIG_IKCONFIG=y CONFIG_IKCONFIG_PROC=y CONFIG_LOG_BUF_SHIFT=14 CONFIG_LOG_CPU_MAX_BUF_SHIFT=12 CONFIG_GENERIC_SCHED_CLOCK=y -CONFIG_CGROUPS=y -# CONFIG_CGROUP_DEBUG is not set -# CONFIG_CGROUP_FREEZER is not set -# CONFIG_CGROUP_DEVICE is not set -# CONFIG_CPUSETS is not set -# CONFIG_CGROUP_CPUACCT is not set -CONFIG_RESOURCE_COUNTERS=y -CONFIG_MEMCG=y -CONFIG_MEMCG_SWAP=y -CONFIG_MEMCG_SWAP_ENABLED=y -CONFIG_MEMCG_KMEM=y -CONFIG_CGROUP_HUGETLB=y -# CONFIG_CGROUP_PERF is not set -CONFIG_CGROUP_SCHED=y -CONFIG_FAIR_GROUP_SCHED=y -# CONFIG_CFS_BANDWIDTH is not set -# CONFIG_RT_GROUP_SCHED is not set -# CONFIG_BLK_CGROUP is not set +# CONFIG_CGROUPS is not set # CONFIG_CHECKPOINT_RESTORE is not set -CONFIG_NAMESPACES=y -# CONFIG_UTS_NS is not set -# CONFIG_IPC_NS is not set -# CONFIG_USER_NS is not set -# CONFIG_PID_NS is not set -CONFIG_NET_NS=y -CONFIG_SCHED_AUTOGROUP=y +# CONFIG_NAMESPACES is not set +# CONFIG_SCHED_AUTOGROUP is not set # CONFIG_SYSFS_DEPRECATED is not set # CONFIG_RELAY is not set -CONFIG_BLK_DEV_INITRD=y -CONFIG_INITRAMFS_SOURCE="" -CONFIG_RD_GZIP=y -CONFIG_RD_BZIP2=y -CONFIG_RD_LZMA=y -CONFIG_RD_XZ=y -CONFIG_RD_LZO=y -CONFIG_RD_LZ4=y +# CONFIG_BLK_DEV_INITRD is not set # CONFIG_CC_OPTIMIZE_FOR_SIZE is not set CONFIG_SYSCTL=y -CONFIG_ANON_INODES=y CONFIG_HAVE_UID16=y -CONFIG_SYSCTL_EXCEPTION_TRACE=y CONFIG_BPF=y -# CONFIG_EXPERT is not set +CONFIG_EXPERT=y CONFIG_UID16=y +CONFIG_MULTIUSER=y # CONFIG_SGETMASK_SYSCALL is not set CONFIG_SYSFS_SYSCALL=y -# CONFIG_SYSCTL_SYSCALL is not set +CONFIG_SYSCTL_SYSCALL=y CONFIG_KALLSYMS=y -CONFIG_KALLSYMS_ALL=y +# CONFIG_KALLSYMS_ALL is not set CONFIG_PRINTK=y CONFIG_BUG=y -CONFIG_ELF_CORE=y -CONFIG_BASE_FULL=y +# CONFIG_ELF_CORE is not set +# CONFIG_BASE_FULL is not set CONFIG_FUTEX=y -CONFIG_EPOLL=y -CONFIG_SIGNALFD=y -CONFIG_TIMERFD=y -CONFIG_EVENTFD=y +# CONFIG_EPOLL is not set +# CONFIG_SIGNALFD is not set +# CONFIG_TIMERFD is not set +# CONFIG_EVENTFD is not set # CONFIG_BPF_SYSCALL is not set -CONFIG_SHMEM=y -CONFIG_AIO=y +# CONFIG_SHMEM is not set +# CONFIG_AIO is not set CONFIG_ADVISE_SYSCALLS=y +# CONFIG_USERFAULTFD is not set CONFIG_PCI_QUIRKS=y -# CONFIG_EMBEDDED is not set +CONFIG_MEMBARRIER=y +CONFIG_EMBEDDED=y CONFIG_HAVE_PERF_EVENTS=y CONFIG_PERF_USE_VMALLOC=y # # Kernel Performance Events And Counters # -CONFIG_PERF_EVENTS=y -# CONFIG_DEBUG_PERF_USE_VMALLOC is not set +# CONFIG_PERF_EVENTS is not set CONFIG_VM_EVENT_COUNTERS=y +# CONFIG_SLUB_DEBUG is not set # CONFIG_COMPAT_BRK is not set -CONFIG_SLAB=y -# CONFIG_SLUB is not set -# CONFIG_SYSTEM_TRUSTED_KEYRING is not set -CONFIG_PROFILING=y -CONFIG_JUMP_LABEL=y +# CONFIG_SLAB is not set +CONFIG_SLUB=y +# CONFIG_SLOB is not set +CONFIG_SLUB_CPU_PARTIAL=y +# CONFIG_SYSTEM_DATA_VERIFICATION is not set +# CONFIG_PROFILING is not set +CONFIG_HAVE_OPROFILE=y +# CONFIG_KPROBES is not set +# CONFIG_JUMP_LABEL is not set # CONFIG_UPROBES is not set # CONFIG_HAVE_64BIT_ALIGNED_ACCESS is not set CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS=y +CONFIG_ARCH_USE_BUILTIN_BSWAP=y +CONFIG_HAVE_KPROBES=y +CONFIG_HAVE_KRETPROBES=y +CONFIG_HAVE_OPTPROBES=y CONFIG_HAVE_ARCH_TRACEHOOK=y CONFIG_HAVE_DMA_ATTRS=y CONFIG_HAVE_DMA_CONTIGUOUS=y CONFIG_GENERIC_SMP_IDLE_THREAD=y +CONFIG_GENERIC_IDLE_POLL_SETUP=y +CONFIG_HAVE_REGS_AND_STACK_ACCESS_API=y CONFIG_HAVE_CLK=y CONFIG_HAVE_DMA_API_DEBUG=y -CONFIG_HAVE_HW_BREAKPOINT=y CONFIG_HAVE_PERF_REGS=y CONFIG_HAVE_PERF_USER_STACK_DUMP=y CONFIG_HAVE_ARCH_JUMP_LABEL=y -CONFIG_HAVE_RCU_TABLE_FREE=y -CONFIG_ARCH_WANT_COMPAT_IPC_PARSE_VERSION=y +CONFIG_ARCH_WANT_IPC_PARSE_VERSION=y +CONFIG_HAVE_ARCH_SECCOMP_FILTER=y CONFIG_HAVE_CC_STACKPROTECTOR=y # CONFIG_CC_STACKPROTECTOR is not set CONFIG_CC_STACKPROTECTOR_NONE=y @@ -212,31 +189,33 @@ CONFIG_CC_STACKPROTECTOR_NONE=y # CONFIG_CC_STACKPROTECTOR_STRONG is not set CONFIG_HAVE_CONTEXT_TRACKING=y CONFIG_HAVE_VIRT_CPU_ACCOUNTING_GEN=y -CONFIG_HAVE_ARCH_TRANSPARENT_HUGEPAGE=y -CONFIG_MODULES_USE_ELF_RELA=y +CONFIG_HAVE_IRQ_TIME_ACCOUNTING=y +CONFIG_HAVE_MOD_ARCH_SPECIFIC=y +CONFIG_MODULES_USE_ELF_REL=y +CONFIG_ARCH_HAS_ELF_RANDOMIZE=y CONFIG_CLONE_BACKWARDS=y CONFIG_OLD_SIGSUSPEND3=y -CONFIG_COMPAT_OLD_SIGACTION=y +CONFIG_OLD_SIGACTION=y # # GCOV-based kernel profiling # # CONFIG_GCOV_KERNEL is not set +CONFIG_ARCH_HAS_GCOV_PROFILE_ALL=y CONFIG_HAVE_GENERIC_DMA_COHERENT=y -CONFIG_SLABINFO=y CONFIG_RT_MUTEXES=y -CONFIG_BASE_SMALL=0 +CONFIG_BASE_SMALL=1 CONFIG_MODULES=y -CONFIG_MODULE_FORCE_LOAD=y +# CONFIG_MODULE_FORCE_LOAD is not set CONFIG_MODULE_UNLOAD=y # CONFIG_MODULE_FORCE_UNLOAD is not set -CONFIG_MODVERSIONS=y +# CONFIG_MODVERSIONS is not set # CONFIG_MODULE_SRCVERSION_ALL is not set # CONFIG_MODULE_SIG is not set # CONFIG_MODULE_COMPRESS is not set -CONFIG_STOP_MACHINE=y CONFIG_BLOCK=y -# CONFIG_BLK_DEV_BSG is not set +CONFIG_LBDAF=y +CONFIG_BLK_DEV_BSG=y # CONFIG_BLK_DEV_BSGLIB is not set # CONFIG_BLK_DEV_INTEGRITY is not set # CONFIG_BLK_CMDLINE_PARSER is not set @@ -264,42 +243,184 @@ CONFIG_MSDOS_PARTITION=y CONFIG_EFI_PARTITION=y # CONFIG_SYSV68_PARTITION is not set # CONFIG_CMDLINE_PARTITION is not set -CONFIG_BLOCK_COMPAT=y # # IO Schedulers # CONFIG_IOSCHED_NOOP=y -# CONFIG_IOSCHED_DEADLINE is not set +CONFIG_IOSCHED_DEADLINE=y CONFIG_IOSCHED_CFQ=y +# CONFIG_DEFAULT_DEADLINE is not set CONFIG_DEFAULT_CFQ=y # CONFIG_DEFAULT_NOOP is not set CONFIG_DEFAULT_IOSCHED="cfq" -CONFIG_PREEMPT_NOTIFIERS=y CONFIG_UNINLINE_SPIN_UNLOCK=y CONFIG_ARCH_SUPPORTS_ATOMIC_RMW=y CONFIG_MUTEX_SPIN_ON_OWNER=y CONFIG_RWSEM_SPIN_ON_OWNER=y +CONFIG_LOCK_SPIN_ON_OWNER=y CONFIG_FREEZER=y # -# Platform selection +# System Type # -CONFIG_ARCH_THUNDER=y -CONFIG_ARCH_VEXPRESS=y -CONFIG_ARCH_XGENE=y -CONFIG_ARCH_LAYERSCAPE=y +CONFIG_MMU=y +CONFIG_ARCH_MULTIPLATFORM=y +# CONFIG_ARCH_REALVIEW is not set +# CONFIG_ARCH_VERSATILE is not set +# CONFIG_ARCH_CLPS711X is not set +# CONFIG_ARCH_GEMINI is not set +# CONFIG_ARCH_EBSA110 is not set +# CONFIG_ARCH_EP93XX is not set +# CONFIG_ARCH_FOOTBRIDGE is not set +# CONFIG_ARCH_NETX is not set +# CONFIG_ARCH_IOP13XX is not set +# CONFIG_ARCH_IOP32X is not set +# CONFIG_ARCH_IOP33X is not set +# CONFIG_ARCH_IXP4XX is not set +# CONFIG_ARCH_DOVE is not set +# CONFIG_ARCH_MV78XX0 is not set +# CONFIG_ARCH_ORION5X is not set +# CONFIG_ARCH_MMP is not set +# CONFIG_ARCH_KS8695 is not set +# CONFIG_ARCH_W90X900 is not set +# CONFIG_ARCH_LPC32XX is not set +# CONFIG_ARCH_PXA is not set +# CONFIG_ARCH_RPC is not set +# CONFIG_ARCH_SA1100 is not set +# CONFIG_ARCH_S3C24XX is not set +# CONFIG_ARCH_S3C64XX is not set +# CONFIG_ARCH_DAVINCI is not set +# CONFIG_ARCH_OMAP1 is not set + +# +# Multiple platform selection +# + +# +# CPU Core family selection +# +# CONFIG_ARCH_MULTI_V6 is not set +CONFIG_ARCH_MULTI_V7=y +CONFIG_ARCH_MULTI_V6_V7=y +# CONFIG_ARCH_MULTI_CPU_AUTO is not set +# CONFIG_ARCH_VIRT is not set +# CONFIG_ARCH_MVEBU is not set +# CONFIG_ARCH_ALPINE is not set +# CONFIG_ARCH_AT91 is not set +# CONFIG_ARCH_BCM is not set +# CONFIG_ARCH_BERLIN is not set +# CONFIG_ARCH_DIGICOLOR is not set +# CONFIG_ARCH_HIGHBANK is not set +# CONFIG_ARCH_HISI is not set +CONFIG_ARCH_XGS_IPROC=y + +# +# XGS iProc SoC based Machine types +# +CONFIG_MACH_HX4=y +# CONFIG_MACH_HR2 is not set +# CONFIG_MACH_KT2 is not set +# CONFIG_MACH_GH is not set +# CONFIG_MACH_SB2 is not set +# CONFIG_MACH_HR3 is not set +# CONFIG_MACH_GH2 is not set +# CONFIG_MACH_IPROC_EMULATION is not set +# CONFIG_ARCH_KEYSTONE is not set +# CONFIG_ARCH_MESON is not set +# CONFIG_ARCH_MXC is not set +# CONFIG_ARCH_MEDIATEK is not set + +# +# TI OMAP/AM/DM/DRA Family +# +# CONFIG_ARCH_OMAP3 is not set +# CONFIG_ARCH_OMAP4 is not set +# CONFIG_SOC_OMAP5 is not set +# CONFIG_SOC_AM33XX is not set +# CONFIG_SOC_AM43XX is not set +# CONFIG_SOC_DRA7XX is not set +# CONFIG_ARCH_QCOM is not set +# CONFIG_ARCH_ROCKCHIP is not set +# CONFIG_ARCH_SOCFPGA is not set +# CONFIG_PLAT_SPEAR is not set +# CONFIG_ARCH_STI is not set +# CONFIG_ARCH_S5PV210 is not set +# CONFIG_ARCH_EXYNOS is not set +# CONFIG_ARCH_SHMOBILE_MULTI is not set +# CONFIG_ARCH_SUNXI is not set +# CONFIG_ARCH_SIRF is not set +# CONFIG_ARCH_TEGRA is not set +# CONFIG_ARCH_UNIPHIER is not set +# CONFIG_ARCH_U8500 is not set +# CONFIG_ARCH_VEXPRESS is not set +# CONFIG_ARCH_WM8850 is not set +# CONFIG_ARCH_ZX is not set +# CONFIG_ARCH_ZYNQ is not set + +# +# Processor Type +# +CONFIG_CPU_V7=y +CONFIG_CPU_32v6K=y +CONFIG_CPU_32v7=y +CONFIG_CPU_ABRT_EV7=y +CONFIG_CPU_PABRT_V7=y +CONFIG_CPU_CACHE_V7=y +CONFIG_CPU_CACHE_VIPT=y +CONFIG_CPU_COPY_V6=y +CONFIG_CPU_TLB_V7=y +CONFIG_CPU_HAS_ASID=y +CONFIG_CPU_CP15=y +CONFIG_CPU_CP15_MMU=y + +# +# Processor Features +# +# CONFIG_ARM_LPAE is not set +# CONFIG_ARCH_PHYS_ADDR_T_64BIT is not set +CONFIG_ARM_THUMB=y +# CONFIG_ARM_THUMBEE is not set +CONFIG_ARM_VIRT_EXT=y +CONFIG_SWP_EMULATE=y +# CONFIG_CPU_BIG_ENDIAN is not set +# CONFIG_CPU_ICACHE_DISABLE is not set +# CONFIG_CPU_BPREDICT_DISABLE is not set +CONFIG_KUSER_HELPERS=y +# CONFIG_VDSO is not set +CONFIG_OUTER_CACHE=y +CONFIG_OUTER_CACHE_SYNC=y +CONFIG_MIGHT_HAVE_CACHE_L2X0=y +CONFIG_CACHE_L2X0=y +# CONFIG_PL310_ERRATA_588369 is not set +# CONFIG_PL310_ERRATA_727915 is not set +# CONFIG_PL310_ERRATA_753970 is not set +# CONFIG_PL310_ERRATA_769419 is not set +CONFIG_ARM_L1_CACHE_SHIFT_6=y +CONFIG_ARM_L1_CACHE_SHIFT=6 +CONFIG_ARM_DMA_MEM_BUFFERABLE=y +CONFIG_ARM_HEAVY_MB=y +CONFIG_ARCH_SUPPORTS_BIG_ENDIAN=y +# CONFIG_ARM_KERNMEM_PERMS is not set +CONFIG_MULTI_IRQ_HANDLER=y +# CONFIG_ARM_ERRATA_430973 is not set +# CONFIG_ARM_ERRATA_643719 is not set +# CONFIG_ARM_ERRATA_720789 is not set +CONFIG_ARM_ERRATA_754322=y +# CONFIG_ARM_ERRATA_754327 is not set +CONFIG_ARM_ERRATA_764369=y +CONFIG_ARM_ERRATA_775420=y +# CONFIG_ARM_ERRATA_798181 is not set +# CONFIG_ARM_ERRATA_773022 is not set # # Bus support # -CONFIG_ARM_AMBA=y CONFIG_PCI=y CONFIG_PCI_DOMAINS=y CONFIG_PCI_DOMAINS_GENERIC=y CONFIG_PCI_SYSCALL=y CONFIG_PCI_MSI=y -CONFIG_PCI_MSI_IRQ_DOMAIN=y # CONFIG_PCI_DEBUG is not set # CONFIG_PCI_REALLOC_ENABLE_AUTO is not set # CONFIG_PCI_STUB is not set @@ -310,144 +431,117 @@ CONFIG_PCI_MSI_IRQ_DOMAIN=y # # PCI host controller drivers # -CONFIG_PCIE_DW=y # CONFIG_PCI_HOST_GENERIC is not set -CONFIG_PCI_XGENE=y -CONFIG_PCI_XGENE_MSI=y -CONFIG_PCI_LAYERSCAPE=y -CONFIG_PCIEPORTBUS=y -CONFIG_PCIEAER=y -# CONFIG_PCIE_ECRC is not set -# CONFIG_PCIEAER_INJECT is not set -CONFIG_PCIEASPM=y -# CONFIG_PCIEASPM_DEBUG is not set -CONFIG_PCIEASPM_DEFAULT=y -# CONFIG_PCIEASPM_POWERSAVE is not set -# CONFIG_PCIEASPM_PERFORMANCE is not set -# CONFIG_HOTPLUG_PCI is not set +# CONFIG_PCI_LAYERSCAPE is not set +CONFIG_PCIE_XGS_IPROC=y +# CONFIG_PCIE_IPROC is not set +# CONFIG_PCIE_ALTERA is not set +# CONFIG_PCIEPORTBUS is not set +# CONFIG_PCCARD is not set # # Kernel Features # - -# -# ARM errata workarounds via the alternatives framework -# -CONFIG_ARM64_ERRATUM_826319=y -CONFIG_ARM64_ERRATUM_827319=y -CONFIG_ARM64_ERRATUM_824069=y -CONFIG_ARM64_ERRATUM_819472=y -CONFIG_ARM64_ERRATUM_832075=y -CONFIG_ARM64_ERRATUM_845719=y -CONFIG_ARM64_4K_PAGES=y -# CONFIG_ARM64_64K_PAGES is not set -# CONFIG_ARM64_VA_BITS_39 is not set -CONFIG_ARM64_VA_BITS_48=y -CONFIG_ARM64_VA_BITS=48 -CONFIG_ARM64_PGTABLE_LEVELS=4 -# CONFIG_CPU_BIG_ENDIAN is not set +CONFIG_HAVE_SMP=y CONFIG_SMP=y +CONFIG_SMP_ON_UP=y +CONFIG_ARM_CPU_TOPOLOGY=y # CONFIG_SCHED_MC is not set # CONFIG_SCHED_SMT is not set -CONFIG_NR_CPUS=64 +CONFIG_HAVE_ARM_SCU=y +# CONFIG_HAVE_ARM_ARCH_TIMER is not set +CONFIG_HAVE_ARM_TWD=y +# CONFIG_MCPM is not set +# CONFIG_BIG_LITTLE is not set +CONFIG_VMSPLIT_3G=y +# CONFIG_VMSPLIT_3G_OPT is not set +# CONFIG_VMSPLIT_2G is not set +# CONFIG_VMSPLIT_1G is not set +CONFIG_PAGE_OFFSET=0xC0000000 +CONFIG_NR_CPUS=4 CONFIG_HOTPLUG_CPU=y +# CONFIG_ARM_PSCI is not set +CONFIG_ARCH_NR_GPIO=0 # CONFIG_PREEMPT_NONE is not set # CONFIG_PREEMPT_VOLUNTARY is not set CONFIG_PREEMPT=y CONFIG_PREEMPT_COUNT=y +CONFIG_HZ_FIXED=0 +CONFIG_HZ_100=y +# CONFIG_HZ_200 is not set +# CONFIG_HZ_250 is not set +# CONFIG_HZ_300 is not set +# CONFIG_HZ_500 is not set +# CONFIG_HZ_1000 is not set CONFIG_HZ=100 -CONFIG_ARCH_HAS_HOLES_MEMORYMODEL=y -CONFIG_ARCH_SPARSEMEM_ENABLE=y -CONFIG_ARCH_SPARSEMEM_DEFAULT=y -CONFIG_ARCH_SELECT_MEMORY_MODEL=y +# CONFIG_SCHED_HRTICK is not set +# CONFIG_THUMB2_KERNEL is not set +CONFIG_AEABI=y +# CONFIG_OABI_COMPAT is not set +# CONFIG_ARCH_SPARSEMEM_DEFAULT is not set +# CONFIG_ARCH_SELECT_MEMORY_MODEL is not set CONFIG_HAVE_ARCH_PFN_VALID=y -CONFIG_HW_PERF_EVENTS=y -CONFIG_SYS_SUPPORTS_HUGETLBFS=y +# CONFIG_HIGHMEM is not set +CONFIG_CPU_SW_DOMAIN_PAN=y CONFIG_ARCH_WANT_GENERAL_HUGETLB=y -CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y -CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y -CONFIG_SELECT_MEMORY_MODEL=y -CONFIG_SPARSEMEM_MANUAL=y -CONFIG_SPARSEMEM=y -CONFIG_HAVE_MEMORY_PRESENT=y -CONFIG_SPARSEMEM_EXTREME=y -CONFIG_SPARSEMEM_VMEMMAP_ENABLE=y -CONFIG_SPARSEMEM_VMEMMAP=y +# CONFIG_ARM_MODULE_PLTS is not set +CONFIG_FLATMEM=y +CONFIG_FLAT_NODE_MEM_MAP=y CONFIG_HAVE_MEMBLOCK=y CONFIG_NO_BOOTMEM=y CONFIG_MEMORY_ISOLATION=y # CONFIG_HAVE_BOOTMEM_INFO_NODE is not set -CONFIG_PAGEFLAGS_EXTENDED=y CONFIG_SPLIT_PTLOCK_CPUS=4 -CONFIG_MEMORY_BALLOON=y -CONFIG_BALLOON_COMPACTION=y -CONFIG_COMPACTION=y +# CONFIG_COMPACTION is not set CONFIG_MIGRATION=y -CONFIG_PHYS_ADDR_T_64BIT=y -CONFIG_ZONE_DMA_FLAG=1 -CONFIG_BOUNCE=y -CONFIG_MMU_NOTIFIER=y -CONFIG_KSM=y +# CONFIG_PHYS_ADDR_T_64BIT is not set +CONFIG_ZONE_DMA_FLAG=0 +# CONFIG_KSM is not set CONFIG_DEFAULT_MMAP_MIN_ADDR=4096 -CONFIG_TRANSPARENT_HUGEPAGE=y -CONFIG_TRANSPARENT_HUGEPAGE_ALWAYS=y -# CONFIG_TRANSPARENT_HUGEPAGE_MADVISE is not set # CONFIG_CLEANCACHE is not set # CONFIG_FRONTSWAP is not set CONFIG_CMA=y -# CONFIG_CMA_DEBUG is not set +CONFIG_CMA_DEBUG=y +# CONFIG_CMA_DEBUGFS is not set CONFIG_CMA_AREAS=7 # CONFIG_ZPOOL is not set # CONFIG_ZBUD is not set # CONFIG_ZSMALLOC is not set -CONFIG_GENERIC_EARLY_IOREMAP=y -# CONFIG_XEN is not set +# CONFIG_IDLE_PAGE_TRACKING is not set CONFIG_FORCE_MAX_ZONEORDER=11 +CONFIG_ALIGNMENT_TRAP=y +# CONFIG_UACCESS_WITH_MEMCPY is not set +# CONFIG_SECCOMP is not set +CONFIG_SWIOTLB=y +CONFIG_IOMMU_HELPER=y +# CONFIG_XEN is not set # # Boot options # -CONFIG_CMDLINE="console=ttyAMA0" +CONFIG_USE_OF=y +CONFIG_ATAGS=y +# CONFIG_DEPRECATED_PARAM_STRUCT is not set +CONFIG_ZBOOT_ROM_TEXT=0x0 +CONFIG_ZBOOT_ROM_BSS=0x0 +# CONFIG_ARM_APPENDED_DTB is not set +CONFIG_CMDLINE="console=ttyS0,115200n8 maxcpus=2 mem=496M" +CONFIG_CMDLINE_FROM_BOOTLOADER=y +# CONFIG_CMDLINE_EXTEND is not set # CONFIG_CMDLINE_FORCE is not set -CONFIG_EFI_STUB=y -CONFIG_EFI=y - -# -# Userspace binary formats -# -CONFIG_BINFMT_ELF=y -CONFIG_COMPAT_BINFMT_ELF=y -CONFIG_ARCH_BINFMT_ELF_RANDOMIZE_PIE=y -# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set -CONFIG_BINFMT_SCRIPT=y -# CONFIG_HAVE_AOUT is not set -# CONFIG_BINFMT_MISC is not set -CONFIG_COREDUMP=y -CONFIG_COMPAT=y -CONFIG_SYSVIPC_COMPAT=y - -# -# Power management options -# -CONFIG_SUSPEND=y -CONFIG_SUSPEND_FREEZER=y -CONFIG_PM_SLEEP=y -CONFIG_PM_SLEEP_SMP=y -# CONFIG_PM_AUTOSLEEP is not set -# CONFIG_PM_WAKELOCKS is not set -# CONFIG_PM_RUNTIME is not set -CONFIG_PM=y -# CONFIG_PM_DEBUG is not set -CONFIG_PM_CLK=y -# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set -CONFIG_CPU_PM=y -CONFIG_ARCH_SUSPEND_POSSIBLE=y -CONFIG_ARM64_CPU_SUSPEND=y +# CONFIG_KEXEC is not set +# CONFIG_CRASH_DUMP is not set +CONFIG_AUTO_ZRELADDR=y # # CPU Power Management # +# +# CPU Frequency scaling +# +# CONFIG_CPU_FREQ is not set + # # CPU Idle # @@ -455,29 +549,45 @@ CONFIG_ARM64_CPU_SUSPEND=y # CONFIG_ARCH_NEEDS_CPU_IDLE_COUPLED is not set # -# CPU Frequency scaling +# Floating point emulation # -CONFIG_CPU_FREQ=y -CONFIG_CPU_FREQ_GOV_COMMON=y -CONFIG_CPU_FREQ_STAT=y -# CONFIG_CPU_FREQ_STAT_DETAILS is not set -CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y -# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set -# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set -CONFIG_CPU_FREQ_GOV_PERFORMANCE=y -CONFIG_CPU_FREQ_GOV_POWERSAVE=y -CONFIG_CPU_FREQ_GOV_USERSPACE=y -CONFIG_CPU_FREQ_GOV_ONDEMAND=y -CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y -# CONFIG_CPUFREQ_DT is not set # -# ARM CPU frequency scaling drivers +# At least one emulation must be selected # -# CONFIG_ARM_KIRKWOOD_CPUFREQ is not set -CONFIG_ARM64_ERRATUM_843419=y +# CONFIG_VFP is not set + +# +# Userspace binary formats +# +CONFIG_BINFMT_ELF=y +CONFIG_BINFMT_SCRIPT=y +# CONFIG_HAVE_AOUT is not set +# CONFIG_BINFMT_MISC is not set +CONFIG_COREDUMP=y + +# +# Power management options +# +CONFIG_SUSPEND=y +CONFIG_SUSPEND_FREEZER=y +# CONFIG_SUSPEND_SKIP_SYNC is not set +# CONFIG_HIBERNATION is not set +CONFIG_PM_SLEEP=y +CONFIG_PM_SLEEP_SMP=y +# CONFIG_PM_AUTOSLEEP is not set +# CONFIG_PM_WAKELOCKS is not set +CONFIG_PM=y +# CONFIG_PM_DEBUG is not set +# CONFIG_APM_EMULATION is not set +CONFIG_PM_CLK=y +# CONFIG_WQ_POWER_EFFICIENT_DEFAULT is not set +CONFIG_CPU_PM=y +CONFIG_ARCH_SUSPEND_POSSIBLE=y +CONFIG_ARM_CPU_SUSPEND=y +CONFIG_ARCH_HIBERNATION_POSSIBLE=y CONFIG_NET=y +CONFIG_NET_INGRESS=y # # Networking options @@ -487,19 +597,17 @@ CONFIG_PACKET=y CONFIG_UNIX=y # CONFIG_UNIX_DIAG is not set CONFIG_XFRM=y -CONFIG_XFRM_ALGO=y # CONFIG_XFRM_USER is not set # CONFIG_XFRM_SUB_POLICY is not set # CONFIG_XFRM_MIGRATE is not set # CONFIG_XFRM_STATISTICS is not set -CONFIG_XFRM_IPCOMP=y # CONFIG_NET_KEY is not set CONFIG_INET=y CONFIG_IP_MULTICAST=y CONFIG_IP_ADVANCED_ROUTER=y -CONFIG_IP_FIB_TRIE_STATS=y +# CONFIG_IP_FIB_TRIE_STATS is not set CONFIG_IP_MULTIPLE_TABLES=y -CONFIG_IP_ROUTE_MULTIPATH=y +# CONFIG_IP_ROUTE_MULTIPATH is not set # CONFIG_IP_ROUTE_VERBOSE is not set CONFIG_IP_PNP=y CONFIG_IP_PNP_DHCP=y @@ -508,92 +616,240 @@ CONFIG_IP_PNP_BOOTP=y # CONFIG_NET_IPIP is not set # CONFIG_NET_IPGRE_DEMUX is not set CONFIG_NET_IP_TUNNEL=y -CONFIG_IP_MROUTE=y -# CONFIG_IP_MROUTE_MULTIPLE_TABLES is not set -# CONFIG_IP_PIMSM_V1 is not set -CONFIG_IP_PIMSM_V2=y +# CONFIG_IP_MROUTE is not set # CONFIG_SYN_COOKIES is not set -# CONFIG_NET_IPVTI is not set # CONFIG_NET_UDP_TUNNEL is not set # CONFIG_NET_FOU is not set -# CONFIG_GENEVE is not set +# CONFIG_NET_FOU_IP_TUNNELS is not set # CONFIG_INET_AH is not set # CONFIG_INET_ESP is not set # CONFIG_INET_IPCOMP is not set # CONFIG_INET_XFRM_TUNNEL is not set CONFIG_INET_TUNNEL=y -CONFIG_INET_XFRM_MODE_TRANSPORT=y -CONFIG_INET_XFRM_MODE_TUNNEL=y -CONFIG_INET_XFRM_MODE_BEET=y +# CONFIG_INET_XFRM_MODE_TRANSPORT is not set +# CONFIG_INET_XFRM_MODE_TUNNEL is not set +# CONFIG_INET_XFRM_MODE_BEET is not set # CONFIG_INET_LRO is not set -CONFIG_INET_DIAG=y -CONFIG_INET_TCP_DIAG=y -# CONFIG_INET_UDP_DIAG is not set -CONFIG_TCP_CONG_ADVANCED=y -CONFIG_TCP_CONG_BIC=y +# CONFIG_INET_DIAG is not set +# CONFIG_TCP_CONG_ADVANCED is not set CONFIG_TCP_CONG_CUBIC=y -CONFIG_TCP_CONG_WESTWOOD=y -CONFIG_TCP_CONG_HTCP=y -# CONFIG_TCP_CONG_HSTCP is not set -# CONFIG_TCP_CONG_HYBLA is not set -# CONFIG_TCP_CONG_VEGAS is not set -# CONFIG_TCP_CONG_SCALABLE is not set -# CONFIG_TCP_CONG_LP is not set -# CONFIG_TCP_CONG_VENO is not set -# CONFIG_TCP_CONG_YEAH is not set -# CONFIG_TCP_CONG_ILLINOIS is not set -# CONFIG_TCP_CONG_DCTCP is not set -# CONFIG_DEFAULT_BIC is not set -CONFIG_DEFAULT_CUBIC=y -# CONFIG_DEFAULT_HTCP is not set -# CONFIG_DEFAULT_WESTWOOD is not set -# CONFIG_DEFAULT_RENO is not set CONFIG_DEFAULT_TCP_CONG="cubic" # CONFIG_TCP_MD5SIG is not set CONFIG_IPV6=y -CONFIG_IPV6_ROUTER_PREF=y -CONFIG_IPV6_ROUTE_INFO=y -CONFIG_IPV6_OPTIMISTIC_DAD=y -CONFIG_INET6_AH=y -CONFIG_INET6_ESP=y -CONFIG_INET6_IPCOMP=y -CONFIG_IPV6_MIP6=y -CONFIG_INET6_XFRM_TUNNEL=y +# CONFIG_IPV6_ROUTER_PREF is not set +# CONFIG_IPV6_OPTIMISTIC_DAD is not set +# CONFIG_INET6_AH is not set +# CONFIG_INET6_ESP is not set +# CONFIG_INET6_IPCOMP is not set +# CONFIG_IPV6_MIP6 is not set +# CONFIG_IPV6_ILA is not set +# CONFIG_INET6_XFRM_TUNNEL is not set CONFIG_INET6_TUNNEL=y CONFIG_INET6_XFRM_MODE_TRANSPORT=y CONFIG_INET6_XFRM_MODE_TUNNEL=y CONFIG_INET6_XFRM_MODE_BEET=y -CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION=y +# CONFIG_INET6_XFRM_MODE_ROUTEOPTIMIZATION is not set # CONFIG_IPV6_VTI is not set CONFIG_IPV6_SIT=y # CONFIG_IPV6_SIT_6RD is not set CONFIG_IPV6_NDISC_NODETYPE=y CONFIG_IPV6_TUNNEL=y # CONFIG_IPV6_GRE is not set -CONFIG_IPV6_MULTIPLE_TABLES=y -CONFIG_IPV6_SUBTREES=y +# CONFIG_IPV6_MULTIPLE_TABLES is not set # CONFIG_IPV6_MROUTE is not set -# CONFIG_NETLABEL is not set # CONFIG_NETWORK_SECMARK is not set CONFIG_NET_PTP_CLASSIFY=y # CONFIG_NETWORK_PHY_TIMESTAMPING is not set -# CONFIG_NETFILTER is not set +CONFIG_NETFILTER=y +CONFIG_NETFILTER_DEBUG=y +CONFIG_NETFILTER_ADVANCED=y +CONFIG_BRIDGE_NETFILTER=y + +# +# Core Netfilter Configuration +# +CONFIG_NETFILTER_INGRESS=y +CONFIG_NETFILTER_NETLINK=y +# CONFIG_NETFILTER_NETLINK_ACCT is not set +CONFIG_NETFILTER_NETLINK_QUEUE=y +CONFIG_NETFILTER_NETLINK_LOG=y +CONFIG_NF_CONNTRACK=y +CONFIG_NF_CONNTRACK_MARK=y +# CONFIG_NF_CONNTRACK_ZONES is not set +CONFIG_NF_CONNTRACK_PROCFS=y +# CONFIG_NF_CONNTRACK_EVENTS is not set +# CONFIG_NF_CONNTRACK_TIMEOUT is not set +# CONFIG_NF_CONNTRACK_TIMESTAMP is not set +# CONFIG_NF_CT_PROTO_DCCP is not set +# CONFIG_NF_CT_PROTO_SCTP is not set +# CONFIG_NF_CT_PROTO_UDPLITE is not set +# CONFIG_NF_CONNTRACK_AMANDA is not set +CONFIG_NF_CONNTRACK_FTP=y +# CONFIG_NF_CONNTRACK_H323 is not set +# CONFIG_NF_CONNTRACK_IRC is not set +# CONFIG_NF_CONNTRACK_NETBIOS_NS is not set +# CONFIG_NF_CONNTRACK_SNMP is not set +# CONFIG_NF_CONNTRACK_PPTP is not set +# CONFIG_NF_CONNTRACK_SANE is not set +# CONFIG_NF_CONNTRACK_SIP is not set +CONFIG_NF_CONNTRACK_TFTP=y +# CONFIG_NF_CT_NETLINK is not set +# CONFIG_NF_CT_NETLINK_TIMEOUT is not set +# CONFIG_NF_TABLES is not set +CONFIG_NETFILTER_XTABLES=y + +# +# Xtables combined modules +# +CONFIG_NETFILTER_XT_MARK=y +CONFIG_NETFILTER_XT_CONNMARK=y + +# +# Xtables targets +# +# CONFIG_NETFILTER_XT_TARGET_CHECKSUM is not set +# CONFIG_NETFILTER_XT_TARGET_CLASSIFY is not set +# CONFIG_NETFILTER_XT_TARGET_CONNMARK is not set +CONFIG_NETFILTER_XT_TARGET_CT=y +# CONFIG_NETFILTER_XT_TARGET_DSCP is not set +CONFIG_NETFILTER_XT_TARGET_HL=y +# CONFIG_NETFILTER_XT_TARGET_HMARK is not set +# CONFIG_NETFILTER_XT_TARGET_IDLETIMER is not set +# CONFIG_NETFILTER_XT_TARGET_LOG is not set +# CONFIG_NETFILTER_XT_TARGET_MARK is not set +# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set +# CONFIG_NETFILTER_XT_TARGET_NFQUEUE is not set +# CONFIG_NETFILTER_XT_TARGET_NOTRACK is not set +# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set +# CONFIG_NETFILTER_XT_TARGET_TEE is not set +# CONFIG_NETFILTER_XT_TARGET_TPROXY is not set +# CONFIG_NETFILTER_XT_TARGET_TRACE is not set +CONFIG_NETFILTER_XT_TARGET_TCPMSS=y +# CONFIG_NETFILTER_XT_TARGET_TCPOPTSTRIP is not set + +# +# Xtables matches +# +# CONFIG_NETFILTER_XT_MATCH_ADDRTYPE is not set +# CONFIG_NETFILTER_XT_MATCH_BPF is not set +# CONFIG_NETFILTER_XT_MATCH_CLUSTER is not set +# CONFIG_NETFILTER_XT_MATCH_COMMENT is not set +# CONFIG_NETFILTER_XT_MATCH_CONNBYTES is not set +# CONFIG_NETFILTER_XT_MATCH_CONNLABEL is not set +# CONFIG_NETFILTER_XT_MATCH_CONNLIMIT is not set +CONFIG_NETFILTER_XT_MATCH_CONNMARK=y +CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y +# CONFIG_NETFILTER_XT_MATCH_CPU is not set +# CONFIG_NETFILTER_XT_MATCH_DCCP is not set +# CONFIG_NETFILTER_XT_MATCH_DEVGROUP is not set +# CONFIG_NETFILTER_XT_MATCH_DSCP is not set +CONFIG_NETFILTER_XT_MATCH_ECN=y +# CONFIG_NETFILTER_XT_MATCH_ESP is not set +# CONFIG_NETFILTER_XT_MATCH_HASHLIMIT is not set +CONFIG_NETFILTER_XT_MATCH_HELPER=y +CONFIG_NETFILTER_XT_MATCH_HL=y +# CONFIG_NETFILTER_XT_MATCH_IPCOMP is not set +# CONFIG_NETFILTER_XT_MATCH_IPRANGE is not set +# CONFIG_NETFILTER_XT_MATCH_L2TP is not set +# CONFIG_NETFILTER_XT_MATCH_LENGTH is not set +# CONFIG_NETFILTER_XT_MATCH_LIMIT is not set +# CONFIG_NETFILTER_XT_MATCH_MAC is not set +# CONFIG_NETFILTER_XT_MATCH_MARK is not set +# CONFIG_NETFILTER_XT_MATCH_MULTIPORT is not set +# CONFIG_NETFILTER_XT_MATCH_NFACCT is not set +# CONFIG_NETFILTER_XT_MATCH_OSF is not set +# CONFIG_NETFILTER_XT_MATCH_OWNER is not set +# CONFIG_NETFILTER_XT_MATCH_POLICY is not set +# CONFIG_NETFILTER_XT_MATCH_PHYSDEV is not set +# CONFIG_NETFILTER_XT_MATCH_PKTTYPE is not set +# CONFIG_NETFILTER_XT_MATCH_QUOTA is not set +# CONFIG_NETFILTER_XT_MATCH_RATEEST is not set +# CONFIG_NETFILTER_XT_MATCH_REALM is not set +# CONFIG_NETFILTER_XT_MATCH_RECENT is not set +# CONFIG_NETFILTER_XT_MATCH_SCTP is not set +# CONFIG_NETFILTER_XT_MATCH_SOCKET is not set +CONFIG_NETFILTER_XT_MATCH_STATE=y +# CONFIG_NETFILTER_XT_MATCH_STATISTIC is not set +# CONFIG_NETFILTER_XT_MATCH_STRING is not set +CONFIG_NETFILTER_XT_MATCH_TCPMSS=y +# CONFIG_NETFILTER_XT_MATCH_TIME is not set +# CONFIG_NETFILTER_XT_MATCH_U32 is not set +# CONFIG_IP_SET is not set +# CONFIG_IP_VS is not set + +# +# IP: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV4=y +CONFIG_NF_CONNTRACK_IPV4=y +CONFIG_NF_CONNTRACK_PROC_COMPAT=y +# CONFIG_NF_DUP_IPV4 is not set +# CONFIG_NF_LOG_ARP is not set +# CONFIG_NF_LOG_IPV4 is not set +CONFIG_NF_REJECT_IPV4=y +# CONFIG_NF_NAT_IPV4 is not set +CONFIG_IP_NF_IPTABLES=y +CONFIG_IP_NF_MATCH_AH=y +CONFIG_IP_NF_MATCH_ECN=y +# CONFIG_IP_NF_MATCH_RPFILTER is not set +CONFIG_IP_NF_MATCH_TTL=y +CONFIG_IP_NF_FILTER=y +CONFIG_IP_NF_TARGET_REJECT=y +# CONFIG_IP_NF_TARGET_SYNPROXY is not set +# CONFIG_IP_NF_NAT is not set +CONFIG_IP_NF_MANGLE=y +# CONFIG_IP_NF_TARGET_CLUSTERIP is not set +CONFIG_IP_NF_TARGET_ECN=y +CONFIG_IP_NF_TARGET_TTL=y +CONFIG_IP_NF_RAW=y +CONFIG_IP_NF_ARPTABLES=y +CONFIG_IP_NF_ARPFILTER=y +CONFIG_IP_NF_ARP_MANGLE=y + +# +# IPv6: Netfilter Configuration +# +CONFIG_NF_DEFRAG_IPV6=y +CONFIG_NF_CONNTRACK_IPV6=y +# CONFIG_NF_DUP_IPV6 is not set +CONFIG_NF_REJECT_IPV6=y +# CONFIG_NF_LOG_IPV6 is not set +# CONFIG_NF_NAT_IPV6 is not set +CONFIG_IP6_NF_IPTABLES=y +# CONFIG_IP6_NF_MATCH_AH is not set +# CONFIG_IP6_NF_MATCH_EUI64 is not set +# CONFIG_IP6_NF_MATCH_FRAG is not set +# CONFIG_IP6_NF_MATCH_OPTS is not set +# CONFIG_IP6_NF_MATCH_HL is not set +# CONFIG_IP6_NF_MATCH_IPV6HEADER is not set +# CONFIG_IP6_NF_MATCH_MH is not set +CONFIG_IP6_NF_MATCH_RPFILTER=y +CONFIG_IP6_NF_MATCH_RT=y +# CONFIG_IP6_NF_TARGET_HL is not set +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +# CONFIG_IP6_NF_TARGET_SYNPROXY is not set +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y +# CONFIG_IP6_NF_NAT is not set +# CONFIG_BRIDGE_NF_EBTABLES is not set # CONFIG_IP_DCCP is not set # CONFIG_IP_SCTP is not set # CONFIG_RDS is not set # CONFIG_TIPC is not set # CONFIG_ATM is not set # CONFIG_L2TP is not set -CONFIG_STP=m -CONFIG_BRIDGE=m +CONFIG_STP=y +CONFIG_GARP=y +CONFIG_BRIDGE=y CONFIG_BRIDGE_IGMP_SNOOPING=y # CONFIG_BRIDGE_VLAN_FILTERING is not set CONFIG_HAVE_NET_DSA=y CONFIG_VLAN_8021Q=y -# CONFIG_VLAN_8021Q_GVRP is not set +CONFIG_VLAN_8021Q_GVRP=y # CONFIG_VLAN_8021Q_MVRP is not set # CONFIG_DECNET is not set -CONFIG_LLC=m +CONFIG_LLC=y # CONFIG_LLC2 is not set # CONFIG_IPX is not set # CONFIG_ATALK is not set @@ -602,68 +858,24 @@ CONFIG_LLC=m # CONFIG_PHONET is not set # CONFIG_6LOWPAN is not set # CONFIG_IEEE802154 is not set -CONFIG_NET_SCHED=y - -# -# Queueing/Scheduling -# -# CONFIG_NET_SCH_CBQ is not set -# CONFIG_NET_SCH_HTB is not set -# CONFIG_NET_SCH_HFSC is not set -# CONFIG_NET_SCH_PRIO is not set -# CONFIG_NET_SCH_MULTIQ is not set -# CONFIG_NET_SCH_RED is not set -# CONFIG_NET_SCH_SFB is not set -# CONFIG_NET_SCH_SFQ is not set -# CONFIG_NET_SCH_TEQL is not set -# CONFIG_NET_SCH_TBF is not set -# CONFIG_NET_SCH_GRED is not set -# CONFIG_NET_SCH_DSMARK is not set -# CONFIG_NET_SCH_NETEM is not set -# CONFIG_NET_SCH_DRR is not set -# CONFIG_NET_SCH_MQPRIO is not set -# CONFIG_NET_SCH_CHOKE is not set -# CONFIG_NET_SCH_QFQ is not set -# CONFIG_NET_SCH_CODEL is not set -# CONFIG_NET_SCH_FQ_CODEL is not set -# CONFIG_NET_SCH_FQ is not set -# CONFIG_NET_SCH_HHF is not set -# CONFIG_NET_SCH_PIE is not set -# CONFIG_NET_SCH_PLUG is not set - -# -# Classification -# -# CONFIG_NET_CLS_BASIC is not set -# CONFIG_NET_CLS_TCINDEX is not set -# CONFIG_NET_CLS_ROUTE4 is not set -# CONFIG_NET_CLS_FW is not set -# CONFIG_NET_CLS_U32 is not set -# CONFIG_NET_CLS_RSVP is not set -# CONFIG_NET_CLS_RSVP6 is not set -# CONFIG_NET_CLS_FLOW is not set -# CONFIG_NET_CLS_CGROUP is not set -# CONFIG_NET_CLS_BPF is not set -# CONFIG_NET_EMATCH is not set -# CONFIG_NET_CLS_ACT is not set -CONFIG_NET_SCH_FIFO=y -CONFIG_DCB=y +# CONFIG_NET_SCHED is not set +# CONFIG_DCB is not set CONFIG_DNS_RESOLVER=y # CONFIG_BATMAN_ADV is not set # CONFIG_OPENVSWITCH is not set # CONFIG_VSOCKETS is not set # CONFIG_NETLINK_MMAP is not set # CONFIG_NETLINK_DIAG is not set -# CONFIG_NET_MPLS_GSO is not set +# CONFIG_MPLS is not set # CONFIG_HSR is not set +# CONFIG_NET_SWITCHDEV is not set +# CONFIG_NET_L3_MASTER_DEV is not set CONFIG_RPS=y CONFIG_RFS_ACCEL=y CONFIG_XPS=y -# CONFIG_CGROUP_NET_PRIO is not set -# CONFIG_CGROUP_NET_CLASSID is not set CONFIG_NET_RX_BUSY_POLL=y CONFIG_BQL=y -CONFIG_BPF_JIT=y +# CONFIG_BPF_JIT is not set CONFIG_NET_FLOW_LIMIT=y # @@ -679,24 +891,24 @@ CONFIG_FIB_RULES=y # CONFIG_WIRELESS is not set # CONFIG_WIMAX is not set # CONFIG_RFKILL is not set -# CONFIG_RFKILL_REGULATOR is not set -CONFIG_NET_9P=y -CONFIG_NET_9P_VIRTIO=y -# CONFIG_NET_9P_DEBUG is not set +# CONFIG_NET_9P is not set # CONFIG_CAIF is not set # CONFIG_CEPH_LIB is not set # CONFIG_NFC is not set +# CONFIG_LWTUNNEL is not set CONFIG_HAVE_BPF_JIT=y # # Device Drivers # +CONFIG_ARM_AMBA=y +# CONFIG_TEGRA_AHB is not set # # Generic Driver Options # CONFIG_UEVENT_HELPER=y -CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug" +CONFIG_UEVENT_HELPER_PATH="/sbin/mdev" CONFIG_DEVTMPFS=y CONFIG_DEVTMPFS_MOUNT=y CONFIG_STANDALONE=y @@ -710,16 +922,13 @@ CONFIG_ALLOW_DEV_COREDUMP=y # CONFIG_DEBUG_DEVRES is not set # CONFIG_SYS_HYPERVISOR is not set # CONFIG_GENERIC_CPU_DEVICES is not set -CONFIG_GENERIC_CPU_AUTOPROBE=y -CONFIG_REGMAP=y -CONFIG_REGMAP_MMIO=y # CONFIG_DMA_SHARED_BUFFER is not set CONFIG_DMA_CMA=y # # Default contiguous memory area size: # -CONFIG_CMA_SIZE_MBYTES=16 +CONFIG_CMA_SIZE_MBYTES=32 CONFIG_CMA_SIZE_SEL_MBYTES=y # CONFIG_CMA_SIZE_SEL_PERCENTAGE is not set # CONFIG_CMA_SIZE_SEL_MIN is not set @@ -729,13 +938,14 @@ CONFIG_CMA_ALIGNMENT=8 # # Bus devices # -# CONFIG_ARM_CCN is not set -CONFIG_VEXPRESS_CONFIG=y +# CONFIG_BRCMSTB_GISB_ARB is not set +# CONFIG_VEXPRESS_CONFIG is not set # CONFIG_CONNECTOR is not set CONFIG_MTD=y -# CONFIG_MTD_TESTS is not set +CONFIG_MTD_TESTS=m # CONFIG_MTD_REDBOOT_PARTS is not set CONFIG_MTD_CMDLINE_PARTS=y +# CONFIG_MTD_AFS_PARTS is not set CONFIG_MTD_OF_PARTS=y # CONFIG_MTD_AR7_PARTS is not set @@ -744,7 +954,7 @@ CONFIG_MTD_OF_PARTS=y # CONFIG_MTD_BLKDEVS=y CONFIG_MTD_BLOCK=y -CONFIG_FTL=y +# CONFIG_FTL is not set # CONFIG_NFTL is not set # CONFIG_INFTL is not set # CONFIG_RFD_FTL is not set @@ -752,18 +962,15 @@ CONFIG_FTL=y # CONFIG_SM_FTL is not set # CONFIG_MTD_OOPS is not set # CONFIG_MTD_SWAP is not set +# CONFIG_MTD_PARTITIONED_MASTER is not set # # RAM/ROM/Flash chip drivers # CONFIG_MTD_CFI=y -# CONFIG_MTD_JEDECPROBE is not set +CONFIG_MTD_JEDECPROBE=y CONFIG_MTD_GEN_PROBE=y -CONFIG_MTD_CFI_ADV_OPTIONS=y -CONFIG_MTD_CFI_NOSWAP=y -# CONFIG_MTD_CFI_BE_BYTE_SWAP is not set -# CONFIG_MTD_CFI_LE_BYTE_SWAP is not set -# CONFIG_MTD_CFI_GEOMETRY is not set +# CONFIG_MTD_CFI_ADV_OPTIONS is not set CONFIG_MTD_MAP_BANK_WIDTH_1=y CONFIG_MTD_MAP_BANK_WIDTH_2=y CONFIG_MTD_MAP_BANK_WIDTH_4=y @@ -774,12 +981,11 @@ CONFIG_MTD_CFI_I1=y CONFIG_MTD_CFI_I2=y # CONFIG_MTD_CFI_I4 is not set # CONFIG_MTD_CFI_I8 is not set -# CONFIG_MTD_OTP is not set CONFIG_MTD_CFI_INTELEXT=y CONFIG_MTD_CFI_AMDSTD=y CONFIG_MTD_CFI_STAA=y CONFIG_MTD_CFI_UTIL=y -CONFIG_MTD_RAM=y +# CONFIG_MTD_RAM is not set # CONFIG_MTD_ROM is not set # CONFIG_MTD_ABSENT is not set @@ -787,18 +993,19 @@ CONFIG_MTD_RAM=y # Mapping drivers for chip access # # CONFIG_MTD_COMPLEX_MAPPINGS is not set -CONFIG_MTD_PHYSMAP=y -# CONFIG_MTD_PHYSMAP_COMPAT is not set -CONFIG_MTD_PHYSMAP_OF=y +# CONFIG_MTD_PHYSMAP is not set +# CONFIG_MTD_PHYSMAP_OF is not set +# CONFIG_MTD_NOR_XGS_IPROC is not set +# CONFIG_MTD_IMPA7 is not set # CONFIG_MTD_INTEL_VR_NOR is not set -CONFIG_MTD_PLATRAM=y +# CONFIG_MTD_PLATRAM is not set # # Self-contained MTD device drivers # # CONFIG_MTD_PMC551 is not set # CONFIG_MTD_DATAFLASH is not set -CONFIG_MTD_M25P80=y +CONFIG_MTD_M25P80_IPROC=y # CONFIG_MTD_SST25L is not set # CONFIG_MTD_SLRAM is not set # CONFIG_MTD_PHRAM is not set @@ -814,8 +1021,9 @@ CONFIG_MTD_NAND_ECC=y CONFIG_MTD_NAND=y # CONFIG_MTD_NAND_ECC_BCH is not set # CONFIG_MTD_SM_COMMON is not set -# CONFIG_MTD_NAND_DENALI is not set -CONFIG_MTD_NAND_GPIO=y +# CONFIG_MTD_NAND_DENALI_PCI is not set +# CONFIG_MTD_NAND_DENALI_DT is not set +# CONFIG_MTD_NAND_GPIO is not set # CONFIG_MTD_NAND_OMAP_BCH_BUILD is not set CONFIG_MTD_NAND_IDS=y # CONFIG_MTD_NAND_RICOH is not set @@ -823,24 +1031,30 @@ CONFIG_MTD_NAND_IDS=y # CONFIG_MTD_NAND_DOCG4 is not set # CONFIG_MTD_NAND_CAFE is not set # CONFIG_MTD_NAND_NANDSIM is not set +CONFIG_MTD_NAND_BRCMNAND=y +CONFIG_MTD_NAND_XGS_IPROC=y # CONFIG_MTD_NAND_PLATFORM is not set -CONFIG_MTD_NAND_FSL_IFC=y +# CONFIG_MTD_NAND_HISI504 is not set # CONFIG_MTD_ONENAND is not set # # LPDDR & LPDDR2 PCM memory drivers # # CONFIG_MTD_LPDDR is not set -CONFIG_MTD_SPI_NOR=y -CONFIG_MTD_SPI_NOR_USE_4K_SECTORS=y -# CONFIG_MTD_UBI is not set +# CONFIG_MTD_LPDDR2_NVM is not set +# CONFIG_MTD_SPI_NOR is not set +# CONFIG_MTD_SPI_NOR_USE_4K_SECTORS is not set +CONFIG_MTD_SPI_NOR_IPROC=y +CONFIG_M25PXX_STAY_IN_3BYTE_MODE=y +CONFIG_MTD_UBI=y +CONFIG_MTD_UBI_WL_THRESHOLD=4096 +CONFIG_MTD_UBI_BEB_LIMIT=20 +# CONFIG_MTD_UBI_FASTMAP is not set +# CONFIG_MTD_UBI_GLUEBI is not set +# CONFIG_MTD_UBI_BLOCK is not set CONFIG_DTC=y CONFIG_OF=y - -# -# Device Tree and Open Firmware support -# -# CONFIG_OF_SELFTEST is not set +# CONFIG_OF_UNITTEST is not set CONFIG_OF_FLATTREE=y CONFIG_OF_EARLY_FLATTREE=y CONFIG_OF_ADDRESS=y @@ -852,6 +1066,8 @@ CONFIG_OF_PCI=y CONFIG_OF_PCI_IRQ=y CONFIG_OF_MTD=y CONFIG_OF_RESERVED_MEM=y +# CONFIG_OF_OVERLAY is not set +CONFIG_ARCH_MIGHT_HAVE_PC_PARPORT=y # CONFIG_PARPORT is not set CONFIG_BLK_DEV=y # CONFIG_BLK_DEV_NULL_BLK is not set @@ -860,28 +1076,23 @@ CONFIG_BLK_DEV=y # CONFIG_BLK_DEV_DAC960 is not set # CONFIG_BLK_DEV_UMEM is not set # CONFIG_BLK_DEV_COW_COMMON is not set -CONFIG_BLK_DEV_LOOP=y -CONFIG_BLK_DEV_LOOP_MIN_COUNT=8 -# CONFIG_BLK_DEV_CRYPTOLOOP is not set +# CONFIG_BLK_DEV_LOOP is not set # CONFIG_BLK_DEV_DRBD is not set # CONFIG_BLK_DEV_NBD is not set -# CONFIG_BLK_DEV_NVME is not set -# CONFIG_BLK_DEV_SKD is not set # CONFIG_BLK_DEV_SX8 is not set CONFIG_BLK_DEV_RAM=y CONFIG_BLK_DEV_RAM_COUNT=16 -CONFIG_BLK_DEV_RAM_SIZE=262144 -# CONFIG_BLK_DEV_XIP is not set +CONFIG_BLK_DEV_RAM_SIZE=4096 # CONFIG_CDROM_PKTCDVD is not set # CONFIG_ATA_OVER_ETH is not set -CONFIG_VIRTIO_BLK=y +# CONFIG_MG_DISK is not set # CONFIG_BLK_DEV_RBD is not set # CONFIG_BLK_DEV_RSXX is not set +# CONFIG_BLK_DEV_NVME is not set # # Misc devices # -# CONFIG_SENSORS_LIS3LV02D is not set # CONFIG_AD525X_DPOT is not set # CONFIG_DUMMY_IRQ is not set # CONFIG_PHANTOM is not set @@ -905,14 +1116,13 @@ CONFIG_VIRTIO_BLK=y # CONFIG_USB_SWITCH_FSA9480 is not set # CONFIG_LATTICE_ECP3_CONFIG is not set # CONFIG_SRAM is not set -CONFIG_VEXPRESS_SYSCFG=y # CONFIG_C2PORT is not set # # EEPROM support # CONFIG_EEPROM_AT24=y -CONFIG_EEPROM_AT25=y +# CONFIG_EEPROM_AT25 is not set # CONFIG_EEPROM_LEGACY is not set # CONFIG_EEPROM_MAX6875 is not set # CONFIG_EEPROM_93CX6 is not set @@ -923,8 +1133,6 @@ CONFIG_EEPROM_AT25=y # Texas Instruments shared transport line discipline # # CONFIG_TI_ST is not set -# CONFIG_SENSORS_LIS3_SPI is not set -# CONFIG_SENSORS_LIS3_I2C is not set # # Altera FPGA firmware download module @@ -935,6 +1143,10 @@ CONFIG_EEPROM_AT25=y # Intel MIC Bus Driver # +# +# SCIF Bus Driver +# + # # Intel MIC Host Driver # @@ -942,9 +1154,20 @@ CONFIG_EEPROM_AT25=y # # Intel MIC Card Driver # -# CONFIG_GENWQE is not set + +# +# SCIF Driver +# + +# +# Intel MIC Coprocessor State Management (COSM) Drivers +# # CONFIG_ECHO is not set # CONFIG_CXL_BASE is not set +# CONFIG_CXL_KERNEL_API is not set +# CONFIG_CXL_EEH is not set +CONFIG_HAVE_IDE=y +# CONFIG_IDE is not set # # SCSI device support @@ -961,13 +1184,13 @@ CONFIG_SCSI_PROC_FS=y # SCSI support type (disk, tape, CD-ROM) # CONFIG_BLK_DEV_SD=y -# CONFIG_CHR_DEV_ST is not set +CONFIG_CHR_DEV_ST=y # CONFIG_CHR_DEV_OSST is not set # CONFIG_BLK_DEV_SR is not set -# CONFIG_CHR_DEV_SG is not set +CONFIG_CHR_DEV_SG=y # CONFIG_CHR_DEV_SCH is not set -# CONFIG_SCSI_CONSTANTS is not set -# CONFIG_SCSI_LOGGING is not set +CONFIG_SCSI_CONSTANTS=y +CONFIG_SCSI_LOGGING=y # CONFIG_SCSI_SCAN_ASYNC is not set # @@ -979,101 +1202,54 @@ CONFIG_BLK_DEV_SD=y # CONFIG_SCSI_SAS_ATTRS is not set # CONFIG_SCSI_SAS_LIBSAS is not set # CONFIG_SCSI_SRP_ATTRS is not set -# CONFIG_SCSI_LOWLEVEL is not set -# CONFIG_SCSI_LOWLEVEL_PCMCIA is not set +CONFIG_SCSI_LOWLEVEL=y +# CONFIG_ISCSI_TCP is not set +# CONFIG_ISCSI_BOOT_SYSFS is not set +# CONFIG_SCSI_CXGB3_ISCSI is not set +# CONFIG_SCSI_CXGB4_ISCSI is not set +# CONFIG_SCSI_BNX2_ISCSI is not set +# CONFIG_BE2ISCSI is not set +# CONFIG_BLK_DEV_3W_XXXX_RAID is not set +# CONFIG_SCSI_HPSA is not set +# CONFIG_SCSI_3W_9XXX is not set +# CONFIG_SCSI_3W_SAS is not set +# CONFIG_SCSI_ACARD is not set +# CONFIG_SCSI_AACRAID is not set +# CONFIG_SCSI_AIC7XXX is not set +# CONFIG_SCSI_AIC79XX is not set +# CONFIG_SCSI_AIC94XX is not set +# CONFIG_SCSI_MVSAS is not set +# CONFIG_SCSI_MVUMI is not set +# CONFIG_SCSI_ADVANSYS is not set +# CONFIG_SCSI_ARCMSR is not set +# CONFIG_SCSI_ESAS2R is not set +# CONFIG_MEGARAID_NEWGEN is not set +# CONFIG_MEGARAID_LEGACY is not set +# CONFIG_MEGARAID_SAS is not set +# CONFIG_SCSI_MPT3SAS is not set +# CONFIG_SCSI_MPT2SAS is not set +# CONFIG_SCSI_UFSHCD is not set +# CONFIG_SCSI_HPTIOP is not set +# CONFIG_SCSI_SNIC is not set +# CONFIG_SCSI_DMX3191D is not set +# CONFIG_SCSI_FUTURE_DOMAIN is not set +# CONFIG_SCSI_IPS is not set +# CONFIG_SCSI_INITIO is not set +# CONFIG_SCSI_INIA100 is not set +# CONFIG_SCSI_STEX is not set +# CONFIG_SCSI_SYM53C8XX_2 is not set +# CONFIG_SCSI_QLOGIC_1280 is not set +# CONFIG_SCSI_QLA_ISCSI is not set +# CONFIG_SCSI_DC395x is not set +# CONFIG_SCSI_AM53C974 is not set +# CONFIG_SCSI_NSP32 is not set +# CONFIG_SCSI_WD719X is not set +# CONFIG_SCSI_DEBUG is not set +# CONFIG_SCSI_PMCRAID is not set +# CONFIG_SCSI_PM8001 is not set # CONFIG_SCSI_DH is not set # CONFIG_SCSI_OSD_INITIATOR is not set -CONFIG_HAVE_PATA_PLATFORM=y -CONFIG_ATA=y -# CONFIG_ATA_NONSTANDARD is not set -CONFIG_ATA_VERBOSE_ERROR=y -CONFIG_SATA_PMP=y - -# -# Controllers with non-SFF native interface -# -CONFIG_SATA_AHCI=y -CONFIG_SATA_AHCI_PLATFORM=y -CONFIG_AHCI_XGENE=y -# CONFIG_SATA_INIC162X is not set -# CONFIG_SATA_ACARD_AHCI is not set -# CONFIG_SATA_SIL24 is not set -CONFIG_ATA_SFF=y - -# -# SFF controllers with custom DMA interface -# -# CONFIG_PDC_ADMA is not set -# CONFIG_SATA_QSTOR is not set -# CONFIG_SATA_SX4 is not set -CONFIG_ATA_BMDMA=y - -# -# SATA SFF controllers with BMDMA -# -# CONFIG_ATA_PIIX is not set -# CONFIG_SATA_MV is not set -# CONFIG_SATA_NV is not set -# CONFIG_SATA_PROMISE is not set -# CONFIG_SATA_SIL is not set -# CONFIG_SATA_SIS is not set -# CONFIG_SATA_SVW is not set -# CONFIG_SATA_ULI is not set -# CONFIG_SATA_VIA is not set -# CONFIG_SATA_VITESSE is not set - -# -# PATA SFF controllers with BMDMA -# -# CONFIG_PATA_ALI is not set -# CONFIG_PATA_AMD is not set -# CONFIG_PATA_ARTOP is not set -# CONFIG_PATA_ATIIXP is not set -# CONFIG_PATA_ATP867X is not set -# CONFIG_PATA_CMD64X is not set -# CONFIG_PATA_CYPRESS is not set -# CONFIG_PATA_EFAR is not set -# CONFIG_PATA_HPT366 is not set -# CONFIG_PATA_HPT37X is not set -# CONFIG_PATA_HPT3X2N is not set -# CONFIG_PATA_HPT3X3 is not set -# CONFIG_PATA_IT8213 is not set -# CONFIG_PATA_IT821X is not set -# CONFIG_PATA_JMICRON is not set -# CONFIG_PATA_MARVELL is not set -# CONFIG_PATA_NETCELL is not set -# CONFIG_PATA_NINJA32 is not set -# CONFIG_PATA_NS87415 is not set -# CONFIG_PATA_OLDPIIX is not set -# CONFIG_PATA_OPTIDMA is not set -# CONFIG_PATA_PDC2027X is not set -# CONFIG_PATA_PDC_OLD is not set -# CONFIG_PATA_RADISYS is not set -# CONFIG_PATA_RDC is not set -# CONFIG_PATA_SCH is not set -# CONFIG_PATA_SERVERWORKS is not set -# CONFIG_PATA_SIL680 is not set -# CONFIG_PATA_SIS is not set -# CONFIG_PATA_TOSHIBA is not set -# CONFIG_PATA_TRIFLEX is not set -# CONFIG_PATA_VIA is not set -# CONFIG_PATA_WINBOND is not set - -# -# PIO-only SFF controllers -# -# CONFIG_PATA_CMD640_PCI is not set -# CONFIG_PATA_MPIIX is not set -# CONFIG_PATA_NS87410 is not set -# CONFIG_PATA_OPTI is not set -# CONFIG_PATA_PLATFORM is not set -# CONFIG_PATA_RZ1000 is not set - -# -# Generic fallback / legacy drivers -# -# CONFIG_ATA_GENERIC is not set -# CONFIG_PATA_LEGACY is not set +# CONFIG_ATA is not set # CONFIG_MD is not set # CONFIG_TARGET_CORE is not set # CONFIG_FUSION is not set @@ -1083,24 +1259,22 @@ CONFIG_ATA_BMDMA=y # # CONFIG_FIREWIRE is not set # CONFIG_FIREWIRE_NOSY is not set -# CONFIG_I2O is not set CONFIG_NETDEVICES=y -CONFIG_MII=y CONFIG_NET_CORE=y -# CONFIG_BONDING is not set +CONFIG_BONDING=y # CONFIG_DUMMY is not set # CONFIG_EQUALIZER is not set # CONFIG_NET_FC is not set # CONFIG_NET_TEAM is not set -CONFIG_MACVLAN=y -# CONFIG_MACVTAP is not set +# CONFIG_MACVLAN is not set +# CONFIG_IPVLAN is not set # CONFIG_VXLAN is not set # CONFIG_NETCONSOLE is not set # CONFIG_NETPOLL is not set # CONFIG_NET_POLL_CONTROLLER is not set -CONFIG_TUN=y +# CONFIG_TUN is not set +# CONFIG_TUN_VNET_CROSS_LE is not set # CONFIG_VETH is not set -CONFIG_VIRTIO_NET=y # CONFIG_NLMON is not set # CONFIG_ARCNET is not set @@ -1112,12 +1286,7 @@ CONFIG_VIRTIO_NET=y # Distributed Switch Architecture drivers # # CONFIG_NET_DSA_MV88E6XXX is not set -# CONFIG_NET_DSA_MV88E6060 is not set # CONFIG_NET_DSA_MV88E6XXX_NEED_PPU is not set -# CONFIG_NET_DSA_MV88E6131 is not set -# CONFIG_NET_DSA_MV88E6123_61_65 is not set -# CONFIG_NET_DSA_MV88E6171 is not set -# CONFIG_NET_DSA_BCM_SF2 is not set CONFIG_ETHERNET=y CONFIG_NET_VENDOR_3COM=y # CONFIG_VORTEX is not set @@ -1132,34 +1301,57 @@ CONFIG_NET_VENDOR_ALTEON=y CONFIG_NET_VENDOR_AMD=y # CONFIG_AMD8111_ETH is not set # CONFIG_PCNET32 is not set -# CONFIG_AMD_XGBE is not set -CONFIG_NET_XGENE=y CONFIG_NET_VENDOR_ARC=y # CONFIG_ARC_EMAC is not set -# CONFIG_EMAC_ROCKCHIP is not set CONFIG_NET_VENDOR_ATHEROS=y # CONFIG_ATL2 is not set # CONFIG_ATL1 is not set # CONFIG_ATL1E is not set # CONFIG_ATL1C is not set # CONFIG_ALX is not set +# CONFIG_NET_VENDOR_AURORA is not set +CONFIG_NET_CADENCE=y +# CONFIG_MACB is not set CONFIG_NET_VENDOR_BROADCOM=y # CONFIG_B44 is not set # CONFIG_BCMGENET is not set # CONFIG_BNX2 is not set # CONFIG_CNIC is not set -# CONFIG_TIGON3 is not set +CONFIG_TIGON3=y # CONFIG_BNX2X is not set # CONFIG_SYSTEMPORT is not set +# CONFIG_BNXT is not set +CONFIG_GMAC_XGS_IPROC=y + +# +# Broadcom HND network devices +# +CONFIG_HND=y +CONFIG_ET=y +# CONFIG_ET_NAPI2_POLL is not set +# CONFIG_BCM_IPROC_GMAC_ACP is not set +# CONFIG_BCM_IPROC_GMAC_PREFETCH is not set +# CONFIG_BCM_IPROC_GMAC_LOCK_OPT is not set +# CONFIG_BCM_IPROC_GMAC_RWREG_OPT is not set +# CONFIG_BCM_IPROC_GMAC_SG is not set +CONFIG_IPROC_SDK_MGT_PORT_HANDOFF=y +# CONFIG_IPROC_2STAGE_RX is not set +# CONFIG_SERDES_ASYMMETRIC_MODE is not set +# CONFIG_JUMBO_FRAME is not set +CONFIG_MDIO_XGS_IPROC=y CONFIG_NET_VENDOR_BROCADE=y # CONFIG_BNA is not set +CONFIG_NET_VENDOR_CAVIUM=y CONFIG_NET_VENDOR_CHELSIO=y # CONFIG_CHELSIO_T1 is not set # CONFIG_CHELSIO_T3 is not set # CONFIG_CHELSIO_T4 is not set # CONFIG_CHELSIO_T4VF is not set +CONFIG_NET_VENDOR_CIRRUS=y +# CONFIG_CS89x0 is not set CONFIG_NET_VENDOR_CISCO=y # CONFIG_ENIC is not set +# CONFIG_DM9000 is not set # CONFIG_DNET is not set CONFIG_NET_VENDOR_DEC=y # CONFIG_NET_TULIP is not set @@ -1168,18 +1360,26 @@ CONFIG_NET_VENDOR_DLINK=y # CONFIG_SUNDANCE is not set CONFIG_NET_VENDOR_EMULEX=y # CONFIG_BE2NET is not set +CONFIG_NET_VENDOR_EZCHIP=y +# CONFIG_EZCHIP_NPS_MANAGEMENT_ENET is not set CONFIG_NET_VENDOR_EXAR=y # CONFIG_S2IO is not set # CONFIG_VXGE is not set -CONFIG_NET_VENDOR_FREESCALE=y -# CONFIG_FSL_PQ_MDIO is not set -CONFIG_FSL_XGMAC_MDIO=y +CONFIG_NET_VENDOR_FARADAY=y +# CONFIG_FTMAC100 is not set +# CONFIG_FTGMAC100 is not set +CONFIG_NET_VENDOR_HISILICON=y +# CONFIG_HIX5HD2_GMAC is not set +# CONFIG_HIP04_ETH is not set +# CONFIG_HNS is not set +# CONFIG_HNS_DSAF is not set +# CONFIG_HNS_ENET is not set CONFIG_NET_VENDOR_HP=y # CONFIG_HP100 is not set CONFIG_NET_VENDOR_INTEL=y # CONFIG_E100 is not set -CONFIG_E1000=y -CONFIG_E1000E=y +# CONFIG_E1000 is not set +# CONFIG_E1000E is not set # CONFIG_IGB is not set # CONFIG_IGBVF is not set # CONFIG_IXGB is not set @@ -1189,7 +1389,6 @@ CONFIG_E1000E=y # CONFIG_I40EVF is not set # CONFIG_FM10K is not set CONFIG_NET_VENDOR_I825XX=y -# CONFIG_IP1000 is not set # CONFIG_JME is not set CONFIG_NET_VENDOR_MARVELL=y # CONFIG_MVMDIO is not set @@ -1199,6 +1398,7 @@ CONFIG_NET_VENDOR_MELLANOX=y # CONFIG_MLX4_EN is not set # CONFIG_MLX4_CORE is not set # CONFIG_MLX5_CORE is not set +# CONFIG_MLXSW_CORE is not set CONFIG_NET_VENDOR_MICREL=y # CONFIG_KS8842 is not set # CONFIG_KS8851 is not set @@ -1206,6 +1406,7 @@ CONFIG_NET_VENDOR_MICREL=y # CONFIG_KSZ884X_PCI is not set CONFIG_NET_VENDOR_MICROCHIP=y # CONFIG_ENC28J60 is not set +# CONFIG_ENCX24J600 is not set CONFIG_NET_VENDOR_MYRI=y # CONFIG_MYRI10GE is not set # CONFIG_FEALNX is not set @@ -1213,6 +1414,7 @@ CONFIG_NET_VENDOR_NATSEMI=y # CONFIG_NATSEMI is not set # CONFIG_NS83820 is not set CONFIG_NET_VENDOR_8390=y +# CONFIG_AX88796 is not set # CONFIG_NE2K_PCI is not set CONFIG_NET_VENDOR_NVIDIA=y # CONFIG_FORCEDETH is not set @@ -1226,14 +1428,17 @@ CONFIG_NET_VENDOR_QLOGIC=y # CONFIG_QLCNIC is not set # CONFIG_QLGE is not set # CONFIG_NETXEN_NIC is not set +# CONFIG_QED is not set CONFIG_NET_VENDOR_QUALCOMM=y # CONFIG_QCA7000 is not set CONFIG_NET_VENDOR_REALTEK=y # CONFIG_8139CP is not set # CONFIG_8139TOO is not set # CONFIG_R8169 is not set +CONFIG_NET_VENDOR_RENESAS=y CONFIG_NET_VENDOR_RDC=y # CONFIG_R6040 is not set +CONFIG_NET_VENDOR_ROCKER=y CONFIG_NET_VENDOR_SAMSUNG=y # CONFIG_SXGBE_ETH is not set CONFIG_NET_VENDOR_SEEQ=y @@ -1244,10 +1449,10 @@ CONFIG_NET_VENDOR_SIS=y # CONFIG_SIS190 is not set # CONFIG_SFC is not set CONFIG_NET_VENDOR_SMSC=y -CONFIG_SMC91X=y +# CONFIG_SMC91X is not set # CONFIG_EPIC100 is not set -CONFIG_SMSC911X=y -# CONFIG_SMSC911X_ARCH_HOOKS is not set +# CONFIG_SMC911X is not set +# CONFIG_SMSC911X is not set # CONFIG_SMSC9420 is not set CONFIG_NET_VENDOR_STMICRO=y # CONFIG_STMMAC_ETH is not set @@ -1256,9 +1461,12 @@ CONFIG_NET_VENDOR_SUN=y # CONFIG_SUNGEM is not set # CONFIG_CASSINI is not set # CONFIG_NIU is not set +CONFIG_NET_VENDOR_SYNOPSYS=y +# CONFIG_SYNOPSYS_DWC_ETH_QOS is not set CONFIG_NET_VENDOR_TEHUTI=y # CONFIG_TEHUTI is not set CONFIG_NET_VENDOR_TI=y +# CONFIG_TI_CPSW_ALE is not set # CONFIG_TLAN is not set CONFIG_NET_VENDOR_VIA=y # CONFIG_VIA_RHINE is not set @@ -1273,33 +1481,33 @@ CONFIG_PHYLIB=y # # MII PHY device drivers # -CONFIG_AQUANTIA_PHY=y +# CONFIG_AQUANTIA_PHY is not set # CONFIG_AT803X_PHY is not set # CONFIG_AMD_PHY is not set -# CONFIG_AMD_XGBE_PHY is not set # CONFIG_MARVELL_PHY is not set # CONFIG_DAVICOM_PHY is not set # CONFIG_QSEMI_PHY is not set # CONFIG_LXT_PHY is not set # CONFIG_CICADA_PHY is not set -CONFIG_VITESSE_PHY=y +# CONFIG_VITESSE_PHY is not set # CONFIG_TERANETICS_PHY is not set -CONFIG_SMSC_PHY=y -CONFIG_BROADCOM_PHY=y +# CONFIG_SMSC_PHY is not set +# CONFIG_BROADCOM_PHY is not set # CONFIG_BCM7XXX_PHY is not set # CONFIG_BCM87XX_PHY is not set # CONFIG_ICPLUS_PHY is not set -CONFIG_REALTEK_PHY=y +# CONFIG_REALTEK_PHY is not set # CONFIG_NATIONAL_PHY is not set # CONFIG_STE10XP is not set # CONFIG_LSI_ET1011C_PHY is not set # CONFIG_MICREL_PHY is not set -CONFIG_FIXED_PHY=y +# CONFIG_DP83848_PHY is not set +# CONFIG_DP83867_PHY is not set +# CONFIG_MICROCHIP_PHY is not set +# CONFIG_FIXED_PHY is not set # CONFIG_MDIO_BITBANG is not set -CONFIG_MDIO_BUS_MUX=y # CONFIG_MDIO_BUS_MUX_GPIO is not set -CONFIG_MDIO_BUS_MUX_MMIOREG=y -# CONFIG_FSL_10GBASE_KR is not set +# CONFIG_MDIO_BUS_MUX_MMIOREG is not set # CONFIG_MDIO_BCM_UNIMAC is not set # CONFIG_MICREL_KS8995MA is not set # CONFIG_PPP is not set @@ -1310,6 +1518,7 @@ CONFIG_USB_NET_DRIVERS=y # CONFIG_USB_PEGASUS is not set # CONFIG_USB_RTL8150 is not set # CONFIG_USB_RTL8152 is not set +# CONFIG_USB_LAN78XX is not set # CONFIG_USB_USBNET is not set # CONFIG_USB_IPHETH is not set # CONFIG_WLAN is not set @@ -1320,83 +1529,19 @@ CONFIG_USB_NET_DRIVERS=y # CONFIG_WAN is not set # CONFIG_VMXNET3 is not set # CONFIG_ISDN is not set +# CONFIG_NVM is not set # # Input device support # -CONFIG_INPUT=y -# CONFIG_INPUT_FF_MEMLESS is not set -# CONFIG_INPUT_POLLDEV is not set -# CONFIG_INPUT_SPARSEKMAP is not set -# CONFIG_INPUT_MATRIXKMAP is not set - -# -# Userland interfaces -# -CONFIG_INPUT_MOUSEDEV=y -CONFIG_INPUT_MOUSEDEV_PSAUX=y -CONFIG_INPUT_MOUSEDEV_SCREEN_X=1024 -CONFIG_INPUT_MOUSEDEV_SCREEN_Y=768 -# CONFIG_INPUT_JOYDEV is not set -CONFIG_INPUT_EVDEV=y -# CONFIG_INPUT_EVBUG is not set - -# -# Input Device Drivers -# -CONFIG_INPUT_KEYBOARD=y -# CONFIG_KEYBOARD_ADP5588 is not set -# CONFIG_KEYBOARD_ADP5589 is not set -CONFIG_KEYBOARD_ATKBD=y -# CONFIG_KEYBOARD_QT1070 is not set -# CONFIG_KEYBOARD_QT2160 is not set -# CONFIG_KEYBOARD_LKKBD is not set -# CONFIG_KEYBOARD_GPIO is not set -# CONFIG_KEYBOARD_GPIO_POLLED is not set -# CONFIG_KEYBOARD_TCA6416 is not set -# CONFIG_KEYBOARD_TCA8418 is not set -# CONFIG_KEYBOARD_MATRIX is not set -# CONFIG_KEYBOARD_LM8333 is not set -# CONFIG_KEYBOARD_MAX7359 is not set -# CONFIG_KEYBOARD_MCS is not set -# CONFIG_KEYBOARD_MPR121 is not set -# CONFIG_KEYBOARD_NEWTON is not set -# CONFIG_KEYBOARD_OPENCORES is not set -# CONFIG_KEYBOARD_SAMSUNG is not set -# CONFIG_KEYBOARD_STOWAWAY is not set -# CONFIG_KEYBOARD_SUNKBD is not set -# CONFIG_KEYBOARD_OMAP4 is not set -# CONFIG_KEYBOARD_XTKBD is not set -# CONFIG_KEYBOARD_CAP1106 is not set -CONFIG_INPUT_MOUSE=y -CONFIG_MOUSE_PS2=y -CONFIG_MOUSE_PS2_ALPS=y -CONFIG_MOUSE_PS2_LOGIPS2PP=y -CONFIG_MOUSE_PS2_SYNAPTICS=y -CONFIG_MOUSE_PS2_CYPRESS=y -CONFIG_MOUSE_PS2_TRACKPOINT=y -# CONFIG_MOUSE_PS2_ELANTECH is not set -# CONFIG_MOUSE_PS2_SENTELIC is not set -# CONFIG_MOUSE_PS2_TOUCHKIT is not set -# CONFIG_MOUSE_SERIAL is not set -# CONFIG_MOUSE_APPLETOUCH is not set -# CONFIG_MOUSE_BCM5974 is not set -# CONFIG_MOUSE_CYAPA is not set -# CONFIG_MOUSE_VSXXXAA is not set -# CONFIG_MOUSE_GPIO is not set -# CONFIG_MOUSE_SYNAPTICS_I2C is not set -# CONFIG_MOUSE_SYNAPTICS_USB is not set -# CONFIG_INPUT_JOYSTICK is not set -# CONFIG_INPUT_TABLET is not set -# CONFIG_INPUT_TOUCHSCREEN is not set -# CONFIG_INPUT_MISC is not set +# CONFIG_INPUT is not set # # Hardware I/O ports # CONFIG_SERIO=y -# CONFIG_SERIO_SERPORT is not set -CONFIG_SERIO_AMBAKMI=y +CONFIG_SERIO_SERPORT=y +# CONFIG_SERIO_AMBAKMI is not set # CONFIG_SERIO_PCIPS2 is not set CONFIG_SERIO_LIBPS2=y # CONFIG_SERIO_RAW is not set @@ -1404,26 +1549,22 @@ CONFIG_SERIO_LIBPS2=y # CONFIG_SERIO_PS2MULT is not set # CONFIG_SERIO_ARC_PS2 is not set # CONFIG_SERIO_APBPS2 is not set +# CONFIG_USERIO is not set # CONFIG_GAMEPORT is not set # # Character devices # CONFIG_TTY=y -CONFIG_VT=y -CONFIG_CONSOLE_TRANSLATIONS=y -CONFIG_VT_CONSOLE=y -CONFIG_VT_CONSOLE_SLEEP=y -CONFIG_HW_CONSOLE=y -CONFIG_VT_HW_CONSOLE_BINDING=y +# CONFIG_VT is not set CONFIG_UNIX98_PTYS=y -# CONFIG_DEVPTS_MULTIPLE_INSTANCES is not set -CONFIG_LEGACY_PTYS=y -CONFIG_LEGACY_PTY_COUNT=16 +CONFIG_DEVPTS_MULTIPLE_INSTANCES=y +# CONFIG_LEGACY_PTYS is not set # CONFIG_SERIAL_NONSTANDARD is not set # CONFIG_NOZOMI is not set # CONFIG_N_GSM is not set # CONFIG_TRACE_SINK is not set +CONFIG_DEVMEM=y CONFIG_DEVKMEM=y # @@ -1435,27 +1576,36 @@ CONFIG_SERIAL_8250_DEPRECATED_OPTIONS=y CONFIG_SERIAL_8250_CONSOLE=y CONFIG_SERIAL_8250_DMA=y CONFIG_SERIAL_8250_PCI=y -CONFIG_SERIAL_8250_NR_UARTS=4 -CONFIG_SERIAL_8250_RUNTIME_UARTS=4 -# CONFIG_SERIAL_8250_EXTENDED is not set -# CONFIG_SERIAL_8250_DW is not set +CONFIG_SERIAL_8250_NR_UARTS=2 +CONFIG_SERIAL_8250_RUNTIME_UARTS=2 +CONFIG_SERIAL_8250_EXTENDED=y +# CONFIG_SERIAL_8250_MANY_PORTS is not set +CONFIG_SERIAL_8250_SHARE_IRQ=y +CONFIG_SERIAL_8250_DETECT_IRQ=y +CONFIG_SERIAL_8250_RSA=y +CONFIG_SERIAL_8250_FSL=y +CONFIG_SERIAL_8250_DW=y +# CONFIG_SERIAL_8250_EM is not set +# CONFIG_SERIAL_8250_RT288X is not set +# CONFIG_SERIAL_8250_INGENIC is not set +# CONFIG_SERIAL_8250_MID is not set # # Non-8250 serial port support # # CONFIG_SERIAL_AMBA_PL010 is not set -CONFIG_SERIAL_AMBA_PL011=y -CONFIG_SERIAL_AMBA_PL011_CONSOLE=y +# CONFIG_SERIAL_AMBA_PL011 is not set # CONFIG_SERIAL_EARLYCON_ARM_SEMIHOST is not set # CONFIG_SERIAL_MAX3100 is not set # CONFIG_SERIAL_MAX310X is not set -# CONFIG_SERIAL_MFD_HSU is not set +# CONFIG_SERIAL_UARTLITE is not set CONFIG_SERIAL_CORE=y CONFIG_SERIAL_CORE_CONSOLE=y # CONFIG_SERIAL_JSM is not set CONFIG_SERIAL_OF_PLATFORM=y # CONFIG_SERIAL_SCCNXP is not set # CONFIG_SERIAL_SC16IS7XX is not set +# CONFIG_SERIAL_BCM63XX is not set # CONFIG_SERIAL_ALTERA_JTAGUART is not set # CONFIG_SERIAL_ALTERA_UART is not set # CONFIG_SERIAL_IFX6X60 is not set @@ -1463,19 +1613,17 @@ CONFIG_SERIAL_OF_PLATFORM=y # CONFIG_SERIAL_ARC is not set # CONFIG_SERIAL_RP2 is not set # CONFIG_SERIAL_FSL_LPUART is not set -CONFIG_HVC_DRIVER=y -CONFIG_VIRTIO_CONSOLE=y +# CONFIG_SERIAL_CONEXANT_DIGICOLOR is not set +# CONFIG_SERIAL_ST_ASC is not set +# CONFIG_SERIAL_STM32 is not set +# CONFIG_TTY_PRINTK is not set +# CONFIG_HVC_DCC is not set # CONFIG_IPMI_HANDLER is not set CONFIG_HW_RANDOM=y # CONFIG_HW_RANDOM_TIMERIOMEM is not set -# CONFIG_HW_RANDOM_VIRTIO is not set -CONFIG_HW_RANDOM_XGENE=y +# CONFIG_HW_RANDOM_XGS_IPROC_RNG is not set # CONFIG_R3964 is not set # CONFIG_APPLICOM is not set - -# -# PCMCIA character devices -# # CONFIG_RAW_DRIVER is not set # CONFIG_TCG_TPM is not set CONFIG_DEVPORT=y @@ -1486,18 +1634,18 @@ CONFIG_DEVPORT=y # CONFIG_I2C=y CONFIG_I2C_BOARDINFO=y -CONFIG_I2C_COMPAT=y +# CONFIG_I2C_COMPAT is not set CONFIG_I2C_CHARDEV=y -CONFIG_I2C_MUX=y +# CONFIG_I2C_MUX is not set +# CONFIG_I2C_HELPER_AUTO is not set +# CONFIG_I2C_SMBUS is not set # -# Multiplexer I2C Chip support +# I2C Algorithms # -# CONFIG_I2C_ARB_GPIO_CHALLENGE is not set -# CONFIG_I2C_MUX_GPIO is not set -# CONFIG_I2C_MUX_PCA9541 is not set -CONFIG_I2C_MUX_PCA954x=y -CONFIG_I2C_HELPER_AUTO=y +# CONFIG_I2C_ALGOBIT is not set +# CONFIG_I2C_ALGOPCF is not set +# CONFIG_I2C_ALGOPCA is not set # # I2C Hardware Bus support @@ -1524,18 +1672,19 @@ CONFIG_I2C_HELPER_AUTO=y # # I2C system bus drivers (mostly embedded / system-on-chip) # +CONFIG_I2C_XGS_IPROC=y +# CONFIG_SMBUS_XGS_IPROC is not set # CONFIG_I2C_CBUS_GPIO is not set # CONFIG_I2C_DESIGNWARE_PLATFORM is not set # CONFIG_I2C_DESIGNWARE_PCI is not set +# CONFIG_I2C_EMEV2 is not set # CONFIG_I2C_GPIO is not set -CONFIG_I2C_IMX=y # CONFIG_I2C_NOMADIK is not set # CONFIG_I2C_OCORES is not set # CONFIG_I2C_PCA_PLATFORM is not set # CONFIG_I2C_PXA_PCI is not set # CONFIG_I2C_RK3X is not set # CONFIG_I2C_SIMTEC is not set -# CONFIG_I2C_VERSATILE is not set # CONFIG_I2C_XILINX is not set # @@ -1551,6 +1700,7 @@ CONFIG_I2C_IMX=y # Other I2C/SMBus bus drivers # # CONFIG_I2C_STUB is not set +# CONFIG_I2C_SLAVE is not set # CONFIG_I2C_DEBUG_CORE is not set # CONFIG_I2C_DEBUG_ALGO is not set # CONFIG_I2C_DEBUG_BUS is not set @@ -1563,16 +1713,23 @@ CONFIG_SPI_MASTER=y # # CONFIG_SPI_ALTERA is not set # CONFIG_SPI_BITBANG is not set +# CONFIG_SPI_CADENCE is not set # CONFIG_SPI_GPIO is not set # CONFIG_SPI_FSL_SPI is not set # CONFIG_SPI_OC_TINY is not set -CONFIG_SPI_PL022=y +# CONFIG_SPI_PL022 is not set # CONFIG_SPI_PXA2XX is not set # CONFIG_SPI_PXA2XX_PCI is not set # CONFIG_SPI_ROCKCHIP is not set # CONFIG_SPI_SC18IS602 is not set # CONFIG_SPI_XCOMM is not set # CONFIG_SPI_XILINX is not set +# CONFIG_SPI_ZYNQMP_GQSPI is not set +CONFIG_SPI_XGS_IPROC=y +CONFIG_IPROC_QSPI_SINGLE_MODE=y +# CONFIG_IPROC_QSPI_DUAL_MODE is not set +# CONFIG_IPROC_QSPI_QUAD_MODE is not set +CONFIG_IPROC_QSPI_MAX_HZ=62500000 # CONFIG_SPI_DESIGNWARE is not set # @@ -1588,7 +1745,6 @@ CONFIG_SPI_PL022=y # CONFIG_PPS=y # CONFIG_PPS_DEBUG is not set -# CONFIG_NTP_PPS is not set # # PPS clients support @@ -1609,6 +1765,7 @@ CONFIG_PTP_1588_CLOCK=y # # Enable PHYLIB and NETWORK_PHY_TIMESTAMPING to see the additional clocks. # +CONFIG_ARCH_HAVE_CUSTOM_GPIO_H=y CONFIG_ARCH_WANT_OPTIONAL_GPIOLIB=y CONFIG_ARCH_REQUIRE_GPIOLIB=y CONFIG_GPIOLIB=y @@ -1616,95 +1773,222 @@ CONFIG_GPIO_DEVRES=y CONFIG_OF_GPIO=y CONFIG_GPIOLIB_IRQCHIP=y # CONFIG_DEBUG_GPIO is not set -# CONFIG_GPIO_SYSFS is not set +CONFIG_GPIO_SYSFS=y CONFIG_GPIO_GENERIC=y # -# Memory mapped GPIO drivers: +# Memory mapped GPIO drivers # -CONFIG_GPIO_GENERIC_PLATFORM=y +# CONFIG_GPIO_74XX_MMIO is not set +# CONFIG_GPIO_ALTERA is not set +CONFIG_GPIO_XGS_IPROC=y # CONFIG_GPIO_DWAPB is not set -CONFIG_GPIO_PL061=y -# CONFIG_GPIO_SCH311X is not set -# CONFIG_GPIO_SYSCON is not set -CONFIG_GPIO_XGENE=y -# CONFIG_GPIO_VX855 is not set +# CONFIG_GPIO_EM is not set +# CONFIG_GPIO_GENERIC_PLATFORM is not set # CONFIG_GPIO_GRGPIO is not set +# CONFIG_GPIO_PL061 is not set +# CONFIG_GPIO_VX855 is not set +# CONFIG_GPIO_XILINX is not set +# CONFIG_GPIO_ZEVIO is not set +# CONFIG_GPIO_ZX is not set # -# I2C GPIO expanders: +# I2C GPIO expanders # +# CONFIG_GPIO_ADP5588 is not set +# CONFIG_GPIO_ADNP is not set # CONFIG_GPIO_MAX7300 is not set # CONFIG_GPIO_MAX732X is not set # CONFIG_GPIO_PCA953X is not set # CONFIG_GPIO_PCF857X is not set # CONFIG_GPIO_SX150X is not set -# CONFIG_GPIO_ADP5588 is not set -# CONFIG_GPIO_ADNP is not set # -# PCI GPIO expanders: +# MFD GPIO expanders +# + +# +# PCI GPIO expanders # -# CONFIG_GPIO_BT8XX is not set # CONFIG_GPIO_AMD8111 is not set +# CONFIG_GPIO_BT8XX is not set # CONFIG_GPIO_ML_IOH is not set # CONFIG_GPIO_RDC321X is not set # -# SPI GPIO expanders: +# SPI GPIO expanders # -# CONFIG_GPIO_MAX7301 is not set -# CONFIG_GPIO_MCP23S08 is not set -# CONFIG_GPIO_MC33880 is not set # CONFIG_GPIO_74X164 is not set +# CONFIG_GPIO_MAX7301 is not set +# CONFIG_GPIO_MC33880 is not set # -# AC97 GPIO expanders: +# SPI or I2C GPIO expanders # +# CONFIG_GPIO_MCP23S08 is not set # -# LPC GPIO expanders: -# - -# -# MODULbus GPIO expanders: -# - -# -# USB GPIO expanders: +# USB GPIO expanders # # CONFIG_W1 is not set -CONFIG_POWER_SUPPLY=y -# CONFIG_POWER_SUPPLY_DEBUG is not set -# CONFIG_PDA_POWER is not set -# CONFIG_TEST_POWER is not set -# CONFIG_BATTERY_DS2780 is not set -# CONFIG_BATTERY_DS2781 is not set -# CONFIG_BATTERY_DS2782 is not set -# CONFIG_BATTERY_SBS is not set -# CONFIG_BATTERY_BQ27x00 is not set -# CONFIG_BATTERY_MAX17040 is not set -# CONFIG_BATTERY_MAX17042 is not set -# CONFIG_CHARGER_MAX8903 is not set -# CONFIG_CHARGER_LP8727 is not set -# CONFIG_CHARGER_GPIO is not set -# CONFIG_CHARGER_MANAGER is not set -# CONFIG_CHARGER_BQ2415X is not set -# CONFIG_CHARGER_BQ24190 is not set -# CONFIG_CHARGER_BQ24735 is not set -# CONFIG_CHARGER_SMB347 is not set -CONFIG_POWER_RESET=y -# CONFIG_POWER_RESET_GPIO is not set -# CONFIG_POWER_RESET_GPIO_RESTART is not set -# CONFIG_POWER_RESET_LTC2952 is not set -CONFIG_POWER_RESET_VEXPRESS=y -# CONFIG_POWER_RESET_XGENE is not set -# CONFIG_POWER_RESET_SYSCON is not set -CONFIG_POWER_RESET_LAYERSCAPE=y +# CONFIG_POWER_SUPPLY is not set # CONFIG_POWER_AVS is not set -# CONFIG_HWMON is not set +CONFIG_HWMON=y +# CONFIG_HWMON_VID is not set +# CONFIG_HWMON_DEBUG_CHIP is not set + +# +# Native drivers +# +# CONFIG_SENSORS_AD7314 is not set +# CONFIG_SENSORS_AD7414 is not set +# CONFIG_SENSORS_AD7418 is not set +# CONFIG_SENSORS_ADM1021 is not set +# CONFIG_SENSORS_ADM1025 is not set +# CONFIG_SENSORS_ADM1026 is not set +# CONFIG_SENSORS_ADM1029 is not set +# CONFIG_SENSORS_ADM1031 is not set +# CONFIG_SENSORS_ADM9240 is not set +# CONFIG_SENSORS_ADT7310 is not set +# CONFIG_SENSORS_ADT7410 is not set +# CONFIG_SENSORS_ADT7411 is not set +# CONFIG_SENSORS_ADT7462 is not set +# CONFIG_SENSORS_ADT7470 is not set +# CONFIG_SENSORS_ADT7475 is not set +# CONFIG_SENSORS_ASC7621 is not set +# CONFIG_SENSORS_ATXP1 is not set +# CONFIG_SENSORS_DS620 is not set +# CONFIG_SENSORS_DS1621 is not set +# CONFIG_SENSORS_I5K_AMB is not set +# CONFIG_SENSORS_F71805F is not set +# CONFIG_SENSORS_F71882FG is not set +# CONFIG_SENSORS_F75375S is not set +# CONFIG_SENSORS_GL518SM is not set +# CONFIG_SENSORS_GL520SM is not set +# CONFIG_SENSORS_G760A is not set +# CONFIG_SENSORS_G762 is not set +# CONFIG_SENSORS_GPIO_FAN is not set +# CONFIG_SENSORS_HIH6130 is not set +# CONFIG_SENSORS_IT87 is not set +# CONFIG_SENSORS_JC42 is not set +# CONFIG_SENSORS_POWR1220 is not set +# CONFIG_SENSORS_LINEAGE is not set +# CONFIG_SENSORS_LTC2945 is not set +# CONFIG_SENSORS_LTC4151 is not set +# CONFIG_SENSORS_LTC4215 is not set +# CONFIG_SENSORS_LTC4222 is not set +# CONFIG_SENSORS_LTC4245 is not set +# CONFIG_SENSORS_LTC4260 is not set +# CONFIG_SENSORS_LTC4261 is not set +# CONFIG_SENSORS_MAX1111 is not set +# CONFIG_SENSORS_MAX16065 is not set +# CONFIG_SENSORS_MAX1619 is not set +# CONFIG_SENSORS_MAX1668 is not set +# CONFIG_SENSORS_MAX197 is not set +# CONFIG_SENSORS_MAX6639 is not set +# CONFIG_SENSORS_MAX6642 is not set +# CONFIG_SENSORS_MAX6650 is not set +# CONFIG_SENSORS_MAX6697 is not set +# CONFIG_SENSORS_MAX31790 is not set +# CONFIG_SENSORS_HTU21 is not set +# CONFIG_SENSORS_MCP3021 is not set +# CONFIG_SENSORS_ADCXX is not set +# CONFIG_SENSORS_LM63 is not set +# CONFIG_SENSORS_LM70 is not set +# CONFIG_SENSORS_LM73 is not set +# CONFIG_SENSORS_LM75 is not set +# CONFIG_SENSORS_LM77 is not set +# CONFIG_SENSORS_LM78 is not set +# CONFIG_SENSORS_LM80 is not set +# CONFIG_SENSORS_LM83 is not set +# CONFIG_SENSORS_LM85 is not set +# CONFIG_SENSORS_LM87 is not set +# CONFIG_SENSORS_LM90 is not set +# CONFIG_SENSORS_LM92 is not set +# CONFIG_SENSORS_LM93 is not set +# CONFIG_SENSORS_LM95234 is not set +# CONFIG_SENSORS_LM95241 is not set +# CONFIG_SENSORS_LM95245 is not set +# CONFIG_SENSORS_PC87360 is not set +# CONFIG_SENSORS_PC87427 is not set +# CONFIG_SENSORS_NTC_THERMISTOR is not set +# CONFIG_SENSORS_NCT6683 is not set +# CONFIG_SENSORS_NCT6775 is not set +# CONFIG_SENSORS_NCT7802 is not set +# CONFIG_SENSORS_NCT7904 is not set +# CONFIG_SENSORS_PCF8591 is not set +# CONFIG_PMBUS is not set +# CONFIG_SENSORS_SHT15 is not set +# CONFIG_SENSORS_SHT21 is not set +# CONFIG_SENSORS_SHTC1 is not set +# CONFIG_SENSORS_SIS5595 is not set +# CONFIG_SENSORS_DME1737 is not set +# CONFIG_SENSORS_EMC1403 is not set +# CONFIG_SENSORS_EMC2103 is not set +# CONFIG_SENSORS_EMC6W201 is not set +# CONFIG_SENSORS_SMSC47M1 is not set +# CONFIG_SENSORS_SMSC47M192 is not set +# CONFIG_SENSORS_SMSC47B397 is not set +# CONFIG_SENSORS_SCH56XX_COMMON is not set +# CONFIG_SENSORS_SCH5627 is not set +# CONFIG_SENSORS_SCH5636 is not set +# CONFIG_SENSORS_SMM665 is not set +# CONFIG_SENSORS_ADC128D818 is not set +# CONFIG_SENSORS_ADS1015 is not set +# CONFIG_SENSORS_ADS7828 is not set +# CONFIG_SENSORS_ADS7871 is not set +# CONFIG_SENSORS_AMC6821 is not set +# CONFIG_SENSORS_INA209 is not set +# CONFIG_SENSORS_INA2XX is not set +# CONFIG_SENSORS_TC74 is not set +# CONFIG_SENSORS_THMC50 is not set +# CONFIG_SENSORS_TMP102 is not set +# CONFIG_SENSORS_TMP103 is not set +# CONFIG_SENSORS_TMP401 is not set +# CONFIG_SENSORS_TMP421 is not set +# CONFIG_SENSORS_VIA686A is not set +# CONFIG_SENSORS_VT1211 is not set +# CONFIG_SENSORS_VT8231 is not set +# CONFIG_SENSORS_W83781D is not set +# CONFIG_SENSORS_W83791D is not set +# CONFIG_SENSORS_W83792D is not set +# CONFIG_SENSORS_W83793 is not set +# CONFIG_SENSORS_W83795 is not set +# CONFIG_SENSORS_W83L785TS is not set +# CONFIG_SENSORS_W83L786NG is not set +# CONFIG_SENSORS_W83627HF is not set +# CONFIG_SENSORS_W83627EHF is not set # CONFIG_THERMAL is not set -# CONFIG_WATCHDOG is not set +CONFIG_WATCHDOG=y +CONFIG_WATCHDOG_CORE=y +# CONFIG_WATCHDOG_NOWAYOUT is not set + +# +# Watchdog Device Drivers +# +# CONFIG_SOFT_WATCHDOG is not set +# CONFIG_GPIO_WATCHDOG is not set +# CONFIG_XILINX_WATCHDOG is not set +# CONFIG_ARM_SP805_WATCHDOG is not set +# CONFIG_CADENCE_WATCHDOG is not set +# CONFIG_DW_WATCHDOG is not set +# CONFIG_MAX63XX_WATCHDOG is not set +CONFIG_XGS_IPROC_SP805_WDT=y +# CONFIG_ALIM7101_WDT is not set +# CONFIG_I6300ESB_WDT is not set +# CONFIG_BCM7038_WDT is not set +# CONFIG_MEN_A21_WDT is not set + +# +# PCI-based Watchdog Cards +# +# CONFIG_PCIPCWATCHDOG is not set +# CONFIG_WDTPCI is not set + +# +# USB-based Watchdog Cards +# +# CONFIG_USBPCWATCHDOG is not set CONFIG_SSB_POSSIBLE=y # @@ -1721,22 +2005,29 @@ CONFIG_BCMA_POSSIBLE=y # # Multifunction device drivers # -CONFIG_MFD_CORE=y +# CONFIG_MFD_CORE is not set # CONFIG_MFD_AS3711 is not set # CONFIG_MFD_AS3722 is not set # CONFIG_PMIC_ADP5520 is not set # CONFIG_MFD_AAT2870_CORE is not set +# CONFIG_MFD_ATMEL_FLEXCOM is not set +# CONFIG_MFD_ATMEL_HLCDC is not set # CONFIG_MFD_BCM590XX is not set # CONFIG_MFD_AXP20X is not set # CONFIG_MFD_CROS_EC is not set +# CONFIG_MFD_ASIC3 is not set # CONFIG_PMIC_DA903X is not set # CONFIG_MFD_DA9052_SPI is not set # CONFIG_MFD_DA9052_I2C is not set # CONFIG_MFD_DA9055 is not set +# CONFIG_MFD_DA9062 is not set # CONFIG_MFD_DA9063 is not set +# CONFIG_MFD_DA9150 is not set +# CONFIG_MFD_DLN2 is not set # CONFIG_MFD_MC13XXX_SPI is not set # CONFIG_MFD_MC13XXX_I2C is not set # CONFIG_MFD_HI6421_PMIC is not set +# CONFIG_HTC_EGPIO is not set # CONFIG_HTC_PASIC3 is not set # CONFIG_HTC_I2CPLD is not set # CONFIG_LPC_ICH is not set @@ -1750,17 +2041,21 @@ CONFIG_MFD_CORE=y # CONFIG_MFD_MAX14577 is not set # CONFIG_MFD_MAX77686 is not set # CONFIG_MFD_MAX77693 is not set +# CONFIG_MFD_MAX77843 is not set # CONFIG_MFD_MAX8907 is not set # CONFIG_MFD_MAX8925 is not set # CONFIG_MFD_MAX8997 is not set # CONFIG_MFD_MAX8998 is not set +# CONFIG_MFD_MT6397 is not set # CONFIG_MFD_MENF21BMC is not set # CONFIG_EZX_PCAP is not set # CONFIG_MFD_VIPERBOARD is not set # CONFIG_MFD_RETU is not set # CONFIG_MFD_PCF50633 is not set +# CONFIG_MFD_PM8921_CORE is not set # CONFIG_MFD_RDC321X is not set # CONFIG_MFD_RTSX_PCI is not set +# CONFIG_MFD_RT5033 is not set # CONFIG_MFD_RTSX_USB is not set # CONFIG_MFD_RC5T583 is not set # CONFIG_MFD_RK808 is not set @@ -1768,10 +2063,11 @@ CONFIG_MFD_CORE=y # CONFIG_MFD_SEC_CORE is not set # CONFIG_MFD_SI476X_CORE is not set # CONFIG_MFD_SM501 is not set +# CONFIG_MFD_SKY81452 is not set # CONFIG_MFD_SMSC is not set # CONFIG_ABX500_CORE is not set # CONFIG_MFD_STMPE is not set -CONFIG_MFD_SYSCON=y +# CONFIG_MFD_SYSCON is not set # CONFIG_MFD_TI_AM335X_TSCADC is not set # CONFIG_MFD_LP3943 is not set # CONFIG_MFD_LP8788 is not set @@ -1794,6 +2090,9 @@ CONFIG_MFD_SYSCON=y # CONFIG_MFD_LM3533 is not set # CONFIG_MFD_TC3589X is not set # CONFIG_MFD_TMIO is not set +# CONFIG_MFD_T7L66XB is not set +# CONFIG_MFD_TC6387XB is not set +# CONFIG_MFD_TC6393XB is not set # CONFIG_MFD_VX855 is not set # CONFIG_MFD_ARIZONA_I2C is not set # CONFIG_MFD_ARIZONA_SPI is not set @@ -1802,229 +2101,38 @@ CONFIG_MFD_SYSCON=y # CONFIG_MFD_WM831X_SPI is not set # CONFIG_MFD_WM8350_I2C is not set # CONFIG_MFD_WM8994 is not set -CONFIG_MFD_VEXPRESS_SYSREG=y -CONFIG_REGULATOR=y -# CONFIG_REGULATOR_DEBUG is not set -CONFIG_REGULATOR_FIXED_VOLTAGE=y -# CONFIG_REGULATOR_VIRTUAL_CONSUMER is not set -# CONFIG_REGULATOR_USERSPACE_CONSUMER is not set -# CONFIG_REGULATOR_ACT8865 is not set -# CONFIG_REGULATOR_AD5398 is not set -# CONFIG_REGULATOR_ANATOP is not set -# CONFIG_REGULATOR_DA9210 is not set -# CONFIG_REGULATOR_DA9211 is not set -# CONFIG_REGULATOR_FAN53555 is not set -# CONFIG_REGULATOR_GPIO is not set -# CONFIG_REGULATOR_ISL9305 is not set -# CONFIG_REGULATOR_ISL6271A is not set -# CONFIG_REGULATOR_LP3971 is not set -# CONFIG_REGULATOR_LP3972 is not set -# CONFIG_REGULATOR_LP872X is not set -# CONFIG_REGULATOR_LP8755 is not set -# CONFIG_REGULATOR_LTC3589 is not set -# CONFIG_REGULATOR_MAX1586 is not set -# CONFIG_REGULATOR_MAX8649 is not set -# CONFIG_REGULATOR_MAX8660 is not set -# CONFIG_REGULATOR_MAX8952 is not set -# CONFIG_REGULATOR_MAX8973 is not set -# CONFIG_REGULATOR_PFUZE100 is not set -# CONFIG_REGULATOR_TPS51632 is not set -# CONFIG_REGULATOR_TPS62360 is not set -# CONFIG_REGULATOR_TPS65023 is not set -# CONFIG_REGULATOR_TPS6507X is not set -# CONFIG_REGULATOR_TPS6524X is not set -# CONFIG_REGULATOR_VEXPRESS is not set +# CONFIG_REGULATOR is not set # CONFIG_MEDIA_SUPPORT is not set # # Graphics support # -CONFIG_VGA_ARB=y -CONFIG_VGA_ARB_MAX_GPUS=16 - -# -# Direct Rendering Manager -# +# CONFIG_VGA_ARB is not set # CONFIG_DRM is not set # # Frame buffer Devices # -CONFIG_FB=y -# CONFIG_FIRMWARE_EDID is not set -CONFIG_FB_CMDLINE=y -# CONFIG_FB_DDC is not set -# CONFIG_FB_BOOT_VESA_SUPPORT is not set -CONFIG_FB_CFB_FILLRECT=y -CONFIG_FB_CFB_COPYAREA=y -CONFIG_FB_CFB_IMAGEBLIT=y -# CONFIG_FB_CFB_REV_PIXELS_IN_BYTE is not set -# CONFIG_FB_SYS_FILLRECT is not set -# CONFIG_FB_SYS_COPYAREA is not set -# CONFIG_FB_SYS_IMAGEBLIT is not set -# CONFIG_FB_FOREIGN_ENDIAN is not set -# CONFIG_FB_SYS_FOPS is not set -# CONFIG_FB_SVGALIB is not set -# CONFIG_FB_MACMODES is not set -# CONFIG_FB_BACKLIGHT is not set -CONFIG_FB_MODE_HELPERS=y -# CONFIG_FB_TILEBLITTING is not set - -# -# Frame buffer hardware drivers -# -# CONFIG_FB_CIRRUS is not set -# CONFIG_FB_PM2 is not set -CONFIG_FB_ARMCLCD=y -# CONFIG_FB_CYBER2000 is not set -# CONFIG_FB_ASILIANT is not set -# CONFIG_FB_IMSTT is not set -# CONFIG_FB_OPENCORES is not set -# CONFIG_FB_S1D13XXX is not set -# CONFIG_FB_NVIDIA is not set -# CONFIG_FB_RIVA is not set -# CONFIG_FB_I740 is not set -# CONFIG_FB_MATROX is not set -# CONFIG_FB_RADEON is not set -# CONFIG_FB_ATY128 is not set -# CONFIG_FB_ATY is not set -# CONFIG_FB_S3 is not set -# CONFIG_FB_SAVAGE is not set -# CONFIG_FB_SIS is not set -# CONFIG_FB_NEOMAGIC is not set -# CONFIG_FB_KYRO is not set -# CONFIG_FB_3DFX is not set -# CONFIG_FB_VOODOO1 is not set -# CONFIG_FB_VT8623 is not set -# CONFIG_FB_TRIDENT is not set -# CONFIG_FB_ARK is not set -# CONFIG_FB_PM3 is not set -# CONFIG_FB_CARMINE is not set -# CONFIG_FB_SMSCUFX is not set -# CONFIG_FB_UDL is not set -# CONFIG_FB_VIRTUAL is not set -# CONFIG_FB_METRONOME is not set -# CONFIG_FB_MB862XX is not set -# CONFIG_FB_BROADSHEET is not set -# CONFIG_FB_AUO_K190X is not set -# CONFIG_FB_SIMPLE is not set -# CONFIG_FB_SSD1307 is not set +# CONFIG_FB is not set # CONFIG_BACKLIGHT_LCD_SUPPORT is not set # CONFIG_VGASTATE is not set -CONFIG_VIDEOMODE_HELPERS=y - -# -# Console display driver support -# -CONFIG_DUMMY_CONSOLE=y -CONFIG_FRAMEBUFFER_CONSOLE=y -# CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY is not set -# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set -CONFIG_LOGO=y -# CONFIG_LOGO_LINUX_MONO is not set -# CONFIG_LOGO_LINUX_VGA16 is not set -CONFIG_LOGO_LINUX_CLUT224=y # CONFIG_SOUND is not set - -# -# HID support -# -CONFIG_HID=y -# CONFIG_HID_BATTERY_STRENGTH is not set -# CONFIG_HIDRAW is not set -# CONFIG_UHID is not set -CONFIG_HID_GENERIC=y - -# -# Special HID drivers -# -CONFIG_HID_A4TECH=y -# CONFIG_HID_ACRUX is not set -CONFIG_HID_APPLE=y -# CONFIG_HID_APPLEIR is not set -# CONFIG_HID_AUREAL is not set -CONFIG_HID_BELKIN=y -CONFIG_HID_CHERRY=y -CONFIG_HID_CHICONY=y -# CONFIG_HID_CP2112 is not set -CONFIG_HID_CYPRESS=y -# CONFIG_HID_DRAGONRISE is not set -# CONFIG_HID_EMS_FF is not set -# CONFIG_HID_ELECOM is not set -# CONFIG_HID_ELO is not set -CONFIG_HID_EZKEY=y -# CONFIG_HID_HOLTEK is not set -# CONFIG_HID_HUION is not set -# CONFIG_HID_KEYTOUCH is not set -# CONFIG_HID_KYE is not set -# CONFIG_HID_UCLOGIC is not set -# CONFIG_HID_WALTOP is not set -# CONFIG_HID_GYRATION is not set -# CONFIG_HID_ICADE is not set -# CONFIG_HID_TWINHAN is not set -CONFIG_HID_KENSINGTON=y -# CONFIG_HID_LCPOWER is not set -# CONFIG_HID_LENOVO is not set -CONFIG_HID_LOGITECH=y -# CONFIG_HID_LOGITECH_HIDPP is not set -# CONFIG_LOGITECH_FF is not set -# CONFIG_LOGIRUMBLEPAD2_FF is not set -# CONFIG_LOGIG940_FF is not set -# CONFIG_LOGIWHEELS_FF is not set -# CONFIG_HID_MAGICMOUSE is not set -CONFIG_HID_MICROSOFT=y -CONFIG_HID_MONTEREY=y -# CONFIG_HID_MULTITOUCH is not set -# CONFIG_HID_NTRIG is not set -# CONFIG_HID_ORTEK is not set -# CONFIG_HID_PANTHERLORD is not set -# CONFIG_HID_PENMOUNT is not set -# CONFIG_HID_PETALYNX is not set -# CONFIG_HID_PICOLCD is not set -# CONFIG_HID_PRIMAX is not set -# CONFIG_HID_ROCCAT is not set -# CONFIG_HID_SAITEK is not set -# CONFIG_HID_SAMSUNG is not set -# CONFIG_HID_SPEEDLINK is not set -# CONFIG_HID_STEELSERIES is not set -# CONFIG_HID_SUNPLUS is not set -# CONFIG_HID_RMI is not set -# CONFIG_HID_GREENASIA is not set -# CONFIG_HID_SMARTJOYPLUS is not set -# CONFIG_HID_TIVO is not set -# CONFIG_HID_TOPSEED is not set -# CONFIG_HID_THRUSTMASTER is not set -# CONFIG_HID_WACOM is not set -# CONFIG_HID_XINMO is not set -# CONFIG_HID_ZEROPLUS is not set -# CONFIG_HID_ZYDACRON is not set -# CONFIG_HID_SENSOR_HUB is not set - -# -# USB HID support -# -CONFIG_USB_HID=y -# CONFIG_HID_PID is not set -# CONFIG_USB_HIDDEV is not set - -# -# I2C HID support -# -# CONFIG_I2C_HID is not set CONFIG_USB_OHCI_LITTLE_ENDIAN=y CONFIG_USB_SUPPORT=y CONFIG_USB_COMMON=y CONFIG_USB_ARCH_HAS_HCD=y CONFIG_USB=y -# CONFIG_USB_ANNOUNCE_NEW_DEVICES is not set +CONFIG_USB_ANNOUNCE_NEW_DEVICES=y # # Miscellaneous USB options # CONFIG_USB_DEFAULT_PERSIST=y -# CONFIG_USB_DYNAMIC_MINORS is not set +CONFIG_USB_DYNAMIC_MINORS=y +# CONFIG_USB_OTG is not set # CONFIG_USB_OTG_WHITELIST is not set -# CONFIG_USB_OTG_FSM is not set +# CONFIG_USB_OTG_BLACKLIST_HUB is not set +# CONFIG_USB_ULPI_BUS is not set # CONFIG_USB_MON is not set # CONFIG_USB_WUSB_CBAF is not set @@ -2032,24 +2140,22 @@ CONFIG_USB_DEFAULT_PERSIST=y # USB Host Controller Drivers # # CONFIG_USB_C67X00_HCD is not set -CONFIG_USB_XHCI_HCD=y -CONFIG_USB_XHCI_PCI=y -CONFIG_USB_XHCI_PLATFORM=y +# CONFIG_USB_XHCI_HCD is not set CONFIG_USB_EHCI_HCD=y -# CONFIG_USB_EHCI_ROOT_HUB_TT is not set +CONFIG_USB_EHCI_ROOT_HUB_TT=y CONFIG_USB_EHCI_TT_NEWSCHED=y CONFIG_USB_EHCI_PCI=y CONFIG_USB_EHCI_HCD_PLATFORM=y +CONFIG_USB_EHCI_XGS_IPROC=y # CONFIG_USB_OXU210HP_HCD is not set # CONFIG_USB_ISP116X_HCD is not set -CONFIG_USB_ISP1760_HCD=y # CONFIG_USB_ISP1362_HCD is not set -# CONFIG_USB_FUSBH200_HCD is not set # CONFIG_USB_FOTG210_HCD is not set # CONFIG_USB_MAX3421_HCD is not set CONFIG_USB_OHCI_HCD=y CONFIG_USB_OHCI_HCD_PCI=y CONFIG_USB_OHCI_HCD_PLATFORM=y +CONFIG_USB_OHCI_XGS_IPROC=y # CONFIG_USB_UHCI_HCD is not set # CONFIG_USB_SL811_HCD is not set # CONFIG_USB_R8A66597_HCD is not set @@ -2081,7 +2187,6 @@ CONFIG_USB_STORAGE=y # CONFIG_USB_STORAGE_SDDR55 is not set # CONFIG_USB_STORAGE_JUMPSHOT is not set # CONFIG_USB_STORAGE_ALAUDA is not set -# CONFIG_USB_STORAGE_ONETOUCH is not set # CONFIG_USB_STORAGE_KARMA is not set # CONFIG_USB_STORAGE_CYPRESS_ATACB is not set # CONFIG_USB_STORAGE_ENE_UB6250 is not set @@ -2094,21 +2199,10 @@ CONFIG_USB_STORAGE=y # CONFIG_USB_MICROTEK is not set # CONFIG_USBIP_CORE is not set # CONFIG_USB_MUSB_HDRC is not set -CONFIG_USB_DWC3=y -CONFIG_USB_DWC3_HOST=y - -# -# Platform Glue Driver Support -# -CONFIG_USB_DWC3_PCI=y - -# -# Debugging features -# -# CONFIG_USB_DWC3_DEBUG is not set -# CONFIG_DWC3_HOST_USB3_LPM_ENABLE is not set +# CONFIG_USB_DWC3 is not set # CONFIG_USB_DWC2 is not set # CONFIG_USB_CHIPIDEA is not set +# CONFIG_USB_ISP1760 is not set # # USB port drivers @@ -2142,20 +2236,67 @@ CONFIG_USB_DWC3_PCI=y # CONFIG_USB_EZUSB_FX2 is not set # CONFIG_USB_HSIC_USB3503 is not set # CONFIG_USB_LINK_LAYER_TEST is not set +# CONFIG_USB_CHAOSKEY is not set # # USB Physical Layer drivers # -# CONFIG_USB_PHY is not set +CONFIG_USB_PHY=y # CONFIG_NOP_USB_XCEIV is not set +# CONFIG_AM335X_PHY_USB is not set # CONFIG_USB_GPIO_VBUS is not set # CONFIG_USB_ISP1301 is not set -CONFIG_USB_ULPI=y -# CONFIG_USB_GADGET is not set +# CONFIG_USB_ULPI is not set +CONFIG_USBPHY_XGS_IPROC=y +CONFIG_USB_GADGET=y +# CONFIG_USB_GADGET_DEBUG is not set +# CONFIG_USB_GADGET_DEBUG_FILES is not set +# CONFIG_USB_GADGET_DEBUG_FS is not set +CONFIG_USB_GADGET_VBUS_DRAW=2 +CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=2 + +# +# USB Peripheral Controller +# +# CONFIG_USB_FUSB300 is not set +# CONFIG_USB_FOTG210_UDC is not set +# CONFIG_USB_GR_UDC is not set +# CONFIG_USB_R8A66597 is not set +# CONFIG_USB_PXA27X is not set +# CONFIG_USB_MV_UDC is not set +# CONFIG_USB_MV_U3D is not set +# CONFIG_USB_M66592 is not set +# CONFIG_USB_BDC_UDC is not set +# CONFIG_USB_AMD5536UDC is not set +# CONFIG_USB_NET2272 is not set +# CONFIG_USB_NET2280 is not set +# CONFIG_USB_GOKU is not set +# CONFIG_USB_EG20T is not set +# CONFIG_USB_GADGET_XILINX is not set +CONFIG_USB_XGS_IPROC_UDC=m +# CONFIG_USB_DUMMY_HCD is not set +CONFIG_USB_LIBCOMPOSITE=m +CONFIG_USB_F_ACM=m +CONFIG_USB_U_SERIAL=m +CONFIG_USB_F_SERIAL=m +CONFIG_USB_F_OBEX=m +# CONFIG_USB_CONFIGFS is not set +# CONFIG_USB_ZERO is not set +# CONFIG_USB_ETH is not set +# CONFIG_USB_G_NCM is not set +# CONFIG_USB_GADGETFS is not set +# CONFIG_USB_FUNCTIONFS is not set +# CONFIG_USB_MASS_STORAGE is not set +CONFIG_USB_G_SERIAL=m +# CONFIG_USB_G_PRINTER is not set +# CONFIG_USB_CDC_COMPOSITE is not set +# CONFIG_USB_G_ACM_MS is not set +# CONFIG_USB_G_MULTI is not set +# CONFIG_USB_G_HID is not set +# CONFIG_USB_G_DBGP is not set # CONFIG_UWB is not set CONFIG_MMC=y # CONFIG_MMC_DEBUG is not set -# CONFIG_MMC_CLKGATE is not set # # MMC/SD/SDIO Card Drivers @@ -2169,131 +2310,48 @@ CONFIG_MMC_BLOCK_BOUNCE=y # # MMC/SD/SDIO Host Controller Drivers # -CONFIG_MMC_ARMMMCI=y +# CONFIG_MMC_ARMMMCI is not set CONFIG_MMC_SDHCI=y CONFIG_MMC_SDHCI_IO_ACCESSORS=y # CONFIG_MMC_SDHCI_PCI is not set -CONFIG_MMC_SDHCI_PLTFM=y -# CONFIG_MMC_SDHCI_OF_ARASAN is not set -CONFIG_MMC_SDHCI_OF_ESDHC=y -# CONFIG_MMC_SDHCI_PXAV3 is not set -# CONFIG_MMC_SDHCI_PXAV2 is not set +# CONFIG_MMC_SDHCI_PLTFM is not set # CONFIG_MMC_TIFM_SD is not set -CONFIG_MMC_SPI=y +# CONFIG_MMC_SPI is not set # CONFIG_MMC_CB710 is not set # CONFIG_MMC_VIA_SDMMC is not set +# CONFIG_MMC_DW is not set # CONFIG_MMC_VUB300 is not set # CONFIG_MMC_USHC is not set # CONFIG_MMC_USDHI6ROL0 is not set +# CONFIG_MMC_TOSHIBA_PCI is not set +# CONFIG_MMC_MTK is not set +CONFIG_MMC_SDHCI_XGS_IPROC=y # CONFIG_MEMSTICK is not set # CONFIG_NEW_LEDS is not set # CONFIG_ACCESSIBILITY is not set # CONFIG_INFINIBAND is not set +CONFIG_EDAC_ATOMIC_SCRUB=y +CONFIG_EDAC_SUPPORT=y +# CONFIG_EDAC is not set CONFIG_RTC_LIB=y -CONFIG_RTC_CLASS=y -CONFIG_RTC_HCTOSYS=y -CONFIG_RTC_SYSTOHC=y -CONFIG_RTC_HCTOSYS_DEVICE="rtc0" -# CONFIG_RTC_DEBUG is not set - -# -# RTC interfaces -# -CONFIG_RTC_INTF_SYSFS=y -CONFIG_RTC_INTF_PROC=y -CONFIG_RTC_INTF_DEV=y -# CONFIG_RTC_INTF_DEV_UIE_EMUL is not set -# CONFIG_RTC_DRV_TEST is not set - -# -# I2C RTC drivers -# -# CONFIG_RTC_DRV_DS1307 is not set -# CONFIG_RTC_DRV_DS1374 is not set -# CONFIG_RTC_DRV_DS1672 is not set -CONFIG_RTC_DRV_DS3232=y -# CONFIG_RTC_DRV_HYM8563 is not set -# CONFIG_RTC_DRV_MAX6900 is not set -# CONFIG_RTC_DRV_RS5C372 is not set -# CONFIG_RTC_DRV_ISL1208 is not set -# CONFIG_RTC_DRV_ISL12022 is not set -# CONFIG_RTC_DRV_ISL12057 is not set -# CONFIG_RTC_DRV_X1205 is not set -# CONFIG_RTC_DRV_PCF2127 is not set -# CONFIG_RTC_DRV_PCF8523 is not set -# CONFIG_RTC_DRV_PCF8563 is not set -# CONFIG_RTC_DRV_PCF85063 is not set -# CONFIG_RTC_DRV_PCF8583 is not set -# CONFIG_RTC_DRV_M41T80 is not set -# CONFIG_RTC_DRV_BQ32K is not set -# CONFIG_RTC_DRV_S35390A is not set -# CONFIG_RTC_DRV_FM3130 is not set -# CONFIG_RTC_DRV_RX8581 is not set -# CONFIG_RTC_DRV_RX8025 is not set -# CONFIG_RTC_DRV_EM3027 is not set -# CONFIG_RTC_DRV_RV3029C2 is not set - -# -# SPI RTC drivers -# -# CONFIG_RTC_DRV_M41T93 is not set -# CONFIG_RTC_DRV_M41T94 is not set -# CONFIG_RTC_DRV_DS1305 is not set -# CONFIG_RTC_DRV_DS1343 is not set -# CONFIG_RTC_DRV_DS1347 is not set -# CONFIG_RTC_DRV_DS1390 is not set -# CONFIG_RTC_DRV_MAX6902 is not set -# CONFIG_RTC_DRV_R9701 is not set -# CONFIG_RTC_DRV_RS5C348 is not set -# CONFIG_RTC_DRV_DS3234 is not set -# CONFIG_RTC_DRV_PCF2123 is not set -# CONFIG_RTC_DRV_RX4581 is not set -# CONFIG_RTC_DRV_MCP795 is not set - -# -# Platform RTC drivers -# -# CONFIG_RTC_DRV_DS1286 is not set -# CONFIG_RTC_DRV_DS1511 is not set -# CONFIG_RTC_DRV_DS1553 is not set -# CONFIG_RTC_DRV_DS1742 is not set -# CONFIG_RTC_DRV_DS2404 is not set -CONFIG_RTC_DRV_EFI=y -# CONFIG_RTC_DRV_STK17TA8 is not set -# CONFIG_RTC_DRV_M48T86 is not set -# CONFIG_RTC_DRV_M48T35 is not set -# CONFIG_RTC_DRV_M48T59 is not set -# CONFIG_RTC_DRV_MSM6242 is not set -# CONFIG_RTC_DRV_BQ4802 is not set -# CONFIG_RTC_DRV_RP5C01 is not set -# CONFIG_RTC_DRV_V3020 is not set - -# -# on-CPU RTC drivers -# -# CONFIG_RTC_DRV_PL030 is not set -# CONFIG_RTC_DRV_PL031 is not set -# CONFIG_RTC_DRV_SNVS is not set -CONFIG_RTC_DRV_XGENE=y - -# -# HID Sensor RTC drivers -# -# CONFIG_RTC_DRV_HID_SENSOR_TIME is not set +# CONFIG_RTC_CLASS is not set CONFIG_DMADEVICES=y -# CONFIG_DMADEVICES_DEBUG is not set +CONFIG_DMADEVICES_DEBUG=y +CONFIG_DMADEVICES_VDEBUG=y # # DMA Devices # -# CONFIG_AMBA_PL08X is not set -# CONFIG_DW_DMAC_CORE is not set -# CONFIG_DW_DMAC is not set -# CONFIG_DW_DMAC_PCI is not set -# CONFIG_PL330_DMA is not set -# CONFIG_FSL_EDMA is not set CONFIG_DMA_ENGINE=y CONFIG_DMA_OF=y +# CONFIG_AMBA_PL08X is not set +# CONFIG_FSL_EDMA is not set +# CONFIG_INTEL_IDMA64 is not set +# CONFIG_NBPFAXI_DMA is not set +CONFIG_PL330_DMA=y +# CONFIG_XGS_IPROC_DMA330_DMA is not set +# CONFIG_DW_DMAC is not set +# CONFIG_DW_DMAC_PCI is not set # # DMA Clients @@ -2304,68 +2362,17 @@ CONFIG_DMA_OF=y # CONFIG_UIO is not set # CONFIG_VIRT_DRIVERS is not set -CONFIG_VFIO_IOMMU_TYPE1=y -CONFIG_VFIO=y -CONFIG_VFIO_PCI=y -CONFIG_VFIO_FSL_MC=y - -CONFIG_VIRTIO=y - # # Virtio drivers # -CONFIG_VIRTIO_PCI=y -CONFIG_VIRTIO_BALLOON=y -CONFIG_VIRTIO_MMIO=y -# CONFIG_VIRTIO_MMIO_CMDLINE_DEVICES is not set +# CONFIG_VIRTIO_PCI is not set +# CONFIG_VIRTIO_MMIO is not set # # Microsoft Hyper-V guest support # -CONFIG_STAGING=y -# CONFIG_COMEDI is not set -# CONFIG_RTS5208 is not set -# CONFIG_FB_XGI is not set -# CONFIG_BCM_WIMAX is not set -# CONFIG_FT1000 is not set - -# -# Speakup console speech -# -# CONFIG_SPEAKUP is not set -# CONFIG_TOUCHSCREEN_CLEARPAD_TM1217 is not set -# CONFIG_TOUCHSCREEN_SYNAPTICS_I2C_RMI4 is not set -# CONFIG_STAGING_MEDIA is not set - -# -# Android -# -# CONFIG_ANDROID is not set -# CONFIG_USB_WPAN_HCD is not set -# CONFIG_WIMAX_GDM72XX is not set -# CONFIG_LTE_GDM724X is not set -# CONFIG_MTD_SPINAND_MT29F is not set -# CONFIG_LUSTRE_FS is not set -# CONFIG_DGNC is not set -# CONFIG_DGAP is not set -# CONFIG_GS_FPGABOOT is not set -CONFIG_FSL_MC_BUS=y -CONFIG_FSL_MC_RESTOOL=y -CONFIG_FSL_MC_DPIO=y -# CONFIG_FSL_QBMAN_DEBUG is not set -CONFIG_FSL_DPAA2=y -CONFIG_FSL_DPAA2_ETH=y -# CONFIG_FSL_DPAA2_ETH_USE_ERR_QUEUE is not set -CONFIG_FSL_DPAA2_MAC=y -# CONFIG_FSL_DPAA2_MAC_NETDEVS is not set - -# -# SOC (System On Chip) specific Drivers -# -# CONFIG_SOC_TI is not set -CONFIG_FSL_SOC_DRIVERS=y -CONFIG_FSL_GUTS=y -CONFIG_LS_SOC_DRIVERS=y +# CONFIG_STAGING is not set +# CONFIG_CHROME_PLATFORMS is not set CONFIG_CLKDEV_LOOKUP=y CONFIG_HAVE_CLK_PREPARE=y CONFIG_COMMON_CLK=y @@ -2373,15 +2380,15 @@ CONFIG_COMMON_CLK=y # # Common Clock Framework # -CONFIG_COMMON_CLK_VERSATILE=y -CONFIG_CLK_SP810=y -CONFIG_CLK_VEXPRESS_OSC=y # CONFIG_COMMON_CLK_SI5351 is not set +# CONFIG_COMMON_CLK_SI514 is not set # CONFIG_COMMON_CLK_SI570 is not set -CONFIG_CLK_QORIQ=y -CONFIG_COMMON_CLK_XGENE=y +# CONFIG_COMMON_CLK_CDCE925 is not set +# CONFIG_CLK_QORIQ is not set # CONFIG_COMMON_CLK_PXA is not set -# CONFIG_COMMON_CLK_QCOM is not set +# CONFIG_COMMON_CLK_CDCE706 is not set +CONFIG_COMMON_CLK_IPROC=y +CONFIG_CLK_XGS_IPROC=y # # Hardware Spinlock drivers @@ -2391,27 +2398,23 @@ CONFIG_COMMON_CLK_XGENE=y # Clock Source drivers # CONFIG_CLKSRC_OF=y -CONFIG_CLKSRC_MMIO=y -CONFIG_ARM_ARCH_TIMER=y -CONFIG_ARM_ARCH_TIMER_EVTSTREAM=y +CONFIG_CLKSRC_PROBE=y +CONFIG_ARM_GLOBAL_TIMER=y +# CONFIG_ARM_TIMER_SP804 is not set +CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK=y # CONFIG_ATMEL_PIT is not set # CONFIG_SH_TIMER_CMT is not set # CONFIG_SH_TIMER_MTU2 is not set # CONFIG_SH_TIMER_TMU is not set # CONFIG_EM_TIMER_STI is not set -CONFIG_CLKSRC_VERSATILE=y # CONFIG_MAILBOX is not set -CONFIG_IOMMU_API=y CONFIG_IOMMU_SUPPORT=y # # Generic IOMMU Pagetable Support # -CONFIG_IOMMU_IO_PGTABLE=y -CONFIG_IOMMU_IO_PGTABLE_LPAE=y -# CONFIG_IOMMU_IO_PGTABLE_LPAE_SELFTEST is not set -CONFIG_OF_IOMMU=y -CONFIG_ARM_SMMU=y +# CONFIG_IOMMU_IO_PGTABLE_LPAE is not set +# CONFIG_ARM_SMMU is not set # # Remoteproc drivers @@ -2425,62 +2428,73 @@ CONFIG_ARM_SMMU=y # # SOC (System On Chip) specific Drivers # +# CONFIG_SOC_BRCMSTB is not set +CONFIG_SOC_XGS_IPROC=y +# CONFIG_SUNXI_SRAM is not set +# CONFIG_SOC_TI is not set # CONFIG_PM_DEVFREQ is not set # CONFIG_EXTCON is not set -CONFIG_MEMORY=y -CONFIG_FSL_IFC=y +# CONFIG_MEMORY is not set # CONFIG_IIO is not set +# CONFIG_NTB is not set # CONFIG_VME_BUS is not set # CONFIG_PWM is not set CONFIG_IRQCHIP=y CONFIG_ARM_GIC=y -CONFIG_ARM_GIC_V2M=y -CONFIG_ARM_GIC_V3=y -CONFIG_ARM_GIC_V3_ITS=y # CONFIG_IPACK_BUS is not set -CONFIG_RESET_CONTROLLER=y +# CONFIG_RESET_CONTROLLER is not set # CONFIG_FMC is not set # # PHY Subsystem # -CONFIG_GENERIC_PHY=y +# CONFIG_GENERIC_PHY is not set +# CONFIG_PHY_PXA_28NM_HSIC is not set +# CONFIG_PHY_PXA_28NM_USB2 is not set # CONFIG_BCM_KONA_USB2_PHY is not set -CONFIG_PHY_XGENE=y # CONFIG_POWERCAP is not set # CONFIG_MCB is not set -CONFIG_RAS=y + +# +# Performance monitor support +# +# CONFIG_RAS is not set # CONFIG_THUNDERBOLT is not set +# +# Android +# +# CONFIG_ANDROID is not set +# CONFIG_NVMEM is not set +# CONFIG_STM is not set +# CONFIG_STM_DUMMY is not set +# CONFIG_STM_SOURCE_CONSOLE is not set +# CONFIG_INTEL_TH is not set + +# +# FPGA Configuration Support +# +# CONFIG_FPGA is not set + # # Firmware Drivers # # CONFIG_FIRMWARE_MEMMAP is not set -# -# EFI (Extensible Firmware Interface) Support -# -# CONFIG_EFI_VARS is not set -CONFIG_EFI_PARAMS_FROM_FDT=y -CONFIG_EFI_RUNTIME_WRAPPERS=y -CONFIG_EFI_ARMSTUB=y - # # File systems # CONFIG_DCACHE_WORD_ACCESS=y CONFIG_EXT2_FS=y # CONFIG_EXT2_FS_XATTR is not set -# CONFIG_EXT2_FS_XIP is not set CONFIG_EXT3_FS=y -# CONFIG_EXT3_DEFAULTS_TO_ORDERED is not set -# CONFIG_EXT3_FS_XATTR is not set +# CONFIG_EXT3_FS_POSIX_ACL is not set +# CONFIG_EXT3_FS_SECURITY is not set CONFIG_EXT4_FS=y # CONFIG_EXT4_FS_POSIX_ACL is not set # CONFIG_EXT4_FS_SECURITY is not set +# CONFIG_EXT4_ENCRYPTION is not set # CONFIG_EXT4_DEBUG is not set -CONFIG_JBD=y -# CONFIG_JBD_DEBUG is not set CONFIG_JBD2=y # CONFIG_JBD2_DEBUG is not set CONFIG_FS_MBCACHE=y @@ -2488,20 +2502,21 @@ CONFIG_FS_MBCACHE=y # CONFIG_JFS_FS is not set # CONFIG_XFS_FS is not set # CONFIG_GFS2_FS is not set +# CONFIG_OCFS2_FS is not set # CONFIG_BTRFS_FS is not set # CONFIG_NILFS2_FS is not set -# CONFIG_FS_POSIX_ACL is not set +# CONFIG_F2FS_FS is not set +CONFIG_FS_POSIX_ACL=y +CONFIG_EXPORTFS=y CONFIG_FILE_LOCKING=y -CONFIG_FSNOTIFY=y -CONFIG_DNOTIFY=y -CONFIG_INOTIFY_USER=y -CONFIG_FANOTIFY=y -CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y +# CONFIG_FSNOTIFY is not set +# CONFIG_DNOTIFY is not set +# CONFIG_INOTIFY_USER is not set +# CONFIG_FANOTIFY is not set # CONFIG_QUOTA is not set # CONFIG_QUOTACTL is not set -# CONFIG_AUTOFS4_FS is not set -CONFIG_FUSE_FS=y -CONFIG_CUSE=y +CONFIG_AUTOFS4_FS=y +# CONFIG_FUSE_FS is not set CONFIG_OVERLAY_FS=y # @@ -2523,23 +2538,21 @@ CONFIG_MSDOS_FS=y CONFIG_VFAT_FS=y CONFIG_FAT_DEFAULT_CODEPAGE=437 CONFIG_FAT_DEFAULT_IOCHARSET="iso8859-1" -# CONFIG_NTFS_FS is not set +CONFIG_NTFS_FS=y +# CONFIG_NTFS_DEBUG is not set +# CONFIG_NTFS_RW is not set # # Pseudo filesystems # CONFIG_PROC_FS=y -# CONFIG_PROC_KCORE is not set CONFIG_PROC_SYSCTL=y -CONFIG_PROC_PAGE_MONITOR=y +# CONFIG_PROC_PAGE_MONITOR is not set +# CONFIG_PROC_CHILDREN is not set CONFIG_KERNFS=y CONFIG_SYSFS=y -CONFIG_TMPFS=y -# CONFIG_TMPFS_POSIX_ACL is not set -CONFIG_TMPFS_XATTR=y -CONFIG_HUGETLBFS=y -CONFIG_HUGETLB_PAGE=y -# CONFIG_CONFIGFS_FS is not set +# CONFIG_HUGETLB_PAGE is not set +CONFIG_CONFIGFS_FS=m CONFIG_MISC_FILESYSTEMS=y # CONFIG_ADFS_FS is not set # CONFIG_AFFS_FS is not set @@ -2553,28 +2566,21 @@ CONFIG_JFFS2_FS=y CONFIG_JFFS2_FS_DEBUG=0 CONFIG_JFFS2_FS_WRITEBUFFER=y # CONFIG_JFFS2_FS_WBUF_VERIFY is not set -CONFIG_JFFS2_SUMMARY=y +# CONFIG_JFFS2_SUMMARY is not set # CONFIG_JFFS2_FS_XATTR is not set # CONFIG_JFFS2_COMPRESSION_OPTIONS is not set CONFIG_JFFS2_ZLIB=y # CONFIG_JFFS2_LZO is not set CONFIG_JFFS2_RTIME=y # CONFIG_JFFS2_RUBIN is not set +CONFIG_UBIFS_FS=y +# CONFIG_UBIFS_FS_ADVANCED_COMPR is not set +CONFIG_UBIFS_FS_LZO=y +CONFIG_UBIFS_FS_ZLIB=y +# CONFIG_UBIFS_ATIME_SUPPORT is not set # CONFIG_LOGFS is not set # CONFIG_CRAMFS is not set -CONFIG_SQUASHFS=y -CONFIG_SQUASHFS_FILE_CACHE=y -# CONFIG_SQUASHFS_FILE_DIRECT is not set -CONFIG_SQUASHFS_DECOMP_SINGLE=y -# CONFIG_SQUASHFS_DECOMP_MULTI is not set -# CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU is not set -CONFIG_SQUASHFS_XATTR=y -CONFIG_SQUASHFS_ZLIB=y -CONFIG_SQUASHFS_LZO=y -CONFIG_SQUASHFS_XZ=y -# CONFIG_SQUASHFS_4K_DEVBLK_SIZE is not set -# CONFIG_SQUASHFS_EMBEDDED is not set -CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 +# CONFIG_SQUASHFS is not set # CONFIG_VXFS_FS is not set # CONFIG_MINIX_FS is not set # CONFIG_OMFS_FS is not set @@ -2585,36 +2591,34 @@ CONFIG_SQUASHFS_FRAGMENT_CACHE_SIZE=3 # CONFIG_PSTORE is not set # CONFIG_SYSV_FS is not set # CONFIG_UFS_FS is not set -# CONFIG_F2FS_FS is not set -# CONFIG_EFIVAR_FS is not set -# CONFIG_AUFS_FS is not set CONFIG_NETWORK_FILESYSTEMS=y CONFIG_NFS_FS=y CONFIG_NFS_V2=y CONFIG_NFS_V3=y -# CONFIG_NFS_V3_ACL is not set +CONFIG_NFS_V3_ACL=y CONFIG_NFS_V4=y -# CONFIG_NFS_SWAP is not set +CONFIG_NFS_SWAP=y # CONFIG_NFS_V4_1 is not set CONFIG_ROOT_NFS=y # CONFIG_NFS_USE_LEGACY_DNS is not set CONFIG_NFS_USE_KERNEL_DNS=y -# CONFIG_NFSD is not set +CONFIG_NFSD=y +# CONFIG_NFSD_V3 is not set +# CONFIG_NFSD_V4 is not set CONFIG_GRACE_PERIOD=y CONFIG_LOCKD=y CONFIG_LOCKD_V4=y +CONFIG_NFS_ACL_SUPPORT=y CONFIG_NFS_COMMON=y CONFIG_SUNRPC=y CONFIG_SUNRPC_GSS=y +CONFIG_SUNRPC_SWAP=y # CONFIG_SUNRPC_DEBUG is not set # CONFIG_CEPH_FS is not set # CONFIG_CIFS is not set # CONFIG_NCP_FS is not set # CONFIG_CODA_FS is not set # CONFIG_AFS_FS is not set -CONFIG_9P_FS=y -# CONFIG_9P_FS_POSIX_ACL is not set -# CONFIG_9P_FS_SECURITY is not set CONFIG_NLS=y CONFIG_NLS_DEFAULT="iso8859-1" CONFIG_NLS_CODEPAGE_437=y @@ -2665,16 +2669,8 @@ CONFIG_NLS_ISO8859_1=y # CONFIG_NLS_MAC_INUIT is not set # CONFIG_NLS_MAC_ROMANIAN is not set # CONFIG_NLS_MAC_TURKISH is not set -CONFIG_NLS_UTF8=y -CONFIG_HAVE_KVM_IRQCHIP=y -CONFIG_KVM_MMIO=y -CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT=y -CONFIG_VIRTUALIZATION=y -CONFIG_KVM=y -CONFIG_KVM_ARM_HOST=y -CONFIG_KVM_ARM_MAX_VCPUS=8 -CONFIG_KVM_ARM_VGIC=y -CONFIG_KVM_ARM_TIMER=y +# CONFIG_NLS_UTF8 is not set +# CONFIG_DLM is not set # # Kernel hacking @@ -2683,7 +2679,7 @@ CONFIG_KVM_ARM_TIMER=y # # printk and dmesg options # -CONFIG_PRINTK_TIME=y +# CONFIG_PRINTK_TIME is not set CONFIG_MESSAGE_LOGLEVEL_DEFAULT=4 # CONFIG_BOOT_PRINTK_DELAY is not set # CONFIG_DYNAMIC_DEBUG is not set @@ -2695,16 +2691,18 @@ CONFIG_DEBUG_INFO=y # CONFIG_DEBUG_INFO_REDUCED is not set # CONFIG_DEBUG_INFO_SPLIT is not set # CONFIG_DEBUG_INFO_DWARF4 is not set -CONFIG_ENABLE_WARN_DEPRECATED=y -CONFIG_ENABLE_MUST_CHECK=y -CONFIG_FRAME_WARN=2048 +# CONFIG_GDB_SCRIPTS is not set +# CONFIG_ENABLE_WARN_DEPRECATED is not set +# CONFIG_ENABLE_MUST_CHECK is not set +CONFIG_FRAME_WARN=1024 # CONFIG_STRIP_ASM_SYMS is not set # CONFIG_READABLE_ASM is not set # CONFIG_UNUSED_SYMBOLS is not set +# CONFIG_PAGE_OWNER is not set CONFIG_DEBUG_FS=y # CONFIG_HEADERS_CHECK is not set # CONFIG_DEBUG_SECTION_MISMATCH is not set -CONFIG_ARCH_WANT_FRAME_POINTERS=y +CONFIG_SECTION_MISMATCH_WARN_ONLY=y CONFIG_FRAME_POINTER=y # CONFIG_DEBUG_FORCE_WEAK_PER_CPU is not set CONFIG_MAGIC_SYSRQ=y @@ -2714,33 +2712,31 @@ CONFIG_DEBUG_KERNEL=y # # Memory Debugging # +# CONFIG_PAGE_EXTENSION is not set # CONFIG_DEBUG_PAGEALLOC is not set # CONFIG_DEBUG_OBJECTS is not set -# CONFIG_DEBUG_SLAB is not set +# CONFIG_SLUB_STATS is not set CONFIG_HAVE_DEBUG_KMEMLEAK=y # CONFIG_DEBUG_KMEMLEAK is not set # CONFIG_DEBUG_STACK_USAGE is not set # CONFIG_DEBUG_VM is not set -CONFIG_DEBUG_MEMORY_INIT=y +# CONFIG_DEBUG_MEMORY_INIT is not set # CONFIG_DEBUG_PER_CPU_MAPS is not set # CONFIG_DEBUG_SHIRQ is not set # # Debug Lockups and Hangs # -CONFIG_LOCKUP_DETECTOR=y -# CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC is not set -CONFIG_BOOTPARAM_SOFTLOCKUP_PANIC_VALUE=0 -CONFIG_DETECT_HUNG_TASK=y -CONFIG_DEFAULT_HUNG_TASK_TIMEOUT=120 -# CONFIG_BOOTPARAM_HUNG_TASK_PANIC is not set -CONFIG_BOOTPARAM_HUNG_TASK_PANIC_VALUE=0 +# CONFIG_LOCKUP_DETECTOR is not set +# CONFIG_DETECT_HUNG_TASK is not set # CONFIG_PANIC_ON_OOPS is not set CONFIG_PANIC_ON_OOPS_VALUE=0 CONFIG_PANIC_TIMEOUT=0 -CONFIG_SCHED_DEBUG=y +# CONFIG_SCHED_DEBUG is not set +# CONFIG_SCHED_INFO is not set # CONFIG_SCHEDSTATS is not set # CONFIG_SCHED_STACK_END_CHECK is not set +# CONFIG_DEBUG_TIMEKEEPING is not set # CONFIG_TIMER_STATS is not set CONFIG_DEBUG_PREEMPT=y @@ -2759,7 +2755,6 @@ CONFIG_DEBUG_PREEMPT=y # CONFIG_LOCK_TORTURE_TEST is not set # CONFIG_STACKTRACE is not set # CONFIG_DEBUG_KOBJECT is not set -CONFIG_HAVE_DEBUG_BUGVERBOSE=y CONFIG_DEBUG_BUGVERBOSE=y # CONFIG_DEBUG_LIST is not set # CONFIG_DEBUG_PI_LIST is not set @@ -2770,13 +2765,13 @@ CONFIG_DEBUG_BUGVERBOSE=y # # RCU Debugging # +# CONFIG_PROVE_RCU is not set # CONFIG_SPARSE_RCU_POINTER is not set # CONFIG_TORTURE_TEST is not set # CONFIG_RCU_TORTURE_TEST is not set -CONFIG_RCU_CPU_STALL_TIMEOUT=21 -CONFIG_RCU_CPU_STALL_VERBOSE=y -# CONFIG_RCU_CPU_STALL_INFO is not set +CONFIG_RCU_CPU_STALL_TIMEOUT=60 # CONFIG_RCU_TRACE is not set +# CONFIG_RCU_EQS_DEBUG is not set # CONFIG_DEBUG_BLOCK_EXT_DEVT is not set # CONFIG_NOTIFIER_ERROR_INJECTION is not set # CONFIG_FAULT_INJECTION is not set @@ -2799,8 +2794,10 @@ CONFIG_TRACING_SUPPORT=y # CONFIG_INTERVAL_TREE_TEST is not set # CONFIG_PERCPU_TEST is not set # CONFIG_ATOMIC64_SELFTEST is not set +# CONFIG_TEST_HEXDUMP is not set # CONFIG_TEST_STRING_HELPERS is not set # CONFIG_TEST_KSTRTOX is not set +# CONFIG_TEST_PRINTF is not set # CONFIG_TEST_RHASHTABLE is not set # CONFIG_DMA_API_DEBUG is not set # CONFIG_TEST_LKM is not set @@ -2808,36 +2805,32 @@ CONFIG_TRACING_SUPPORT=y # CONFIG_TEST_BPF is not set # CONFIG_TEST_FIRMWARE is not set # CONFIG_TEST_UDELAY is not set +# CONFIG_MEMTEST is not set +# CONFIG_TEST_STATIC_KEYS is not set # CONFIG_SAMPLES is not set CONFIG_HAVE_ARCH_KGDB=y # CONFIG_KGDB is not set +# CONFIG_ARM_PTDUMP is not set # CONFIG_STRICT_DEVMEM is not set -CONFIG_PID_IN_CONTEXTIDR=y -# CONFIG_ARM64_RANDOMIZE_TEXT_OFFSET is not set +# CONFIG_ARM_UNWIND is not set +CONFIG_DEBUG_USER=y +# CONFIG_DEBUG_LL is not set +CONFIG_DEBUG_LL_INCLUDE="mach/debug-macro.S" +# CONFIG_DEBUG_UART_8250 is not set +CONFIG_UNCOMPRESS_INCLUDE="debug/uncompress.h" +# CONFIG_PID_IN_CONTEXTIDR is not set # CONFIG_DEBUG_SET_MODULE_RONX is not set +# CONFIG_CORESIGHT is not set # # Security options # CONFIG_KEYS=y # CONFIG_PERSISTENT_KEYRINGS is not set -# CONFIG_BIG_KEYS is not set # CONFIG_ENCRYPTED_KEYS is not set -# CONFIG_KEYS_DEBUG_PROC_KEYS is not set # CONFIG_SECURITY_DMESG_RESTRICT is not set -CONFIG_SECURITY=y +# CONFIG_SECURITY is not set # CONFIG_SECURITYFS is not set -# CONFIG_SECURITY_NETWORK is not set -# CONFIG_SECURITY_PATH is not set -# CONFIG_SECURITY_SMACK is not set -# CONFIG_SECURITY_TOMOYO is not set -# CONFIG_SECURITY_APPARMOR is not set -# CONFIG_SECURITY_YAMA is not set -CONFIG_INTEGRITY=y -# CONFIG_INTEGRITY_SIGNATURE is not set -CONFIG_INTEGRITY_AUDIT=y -# CONFIG_IMA is not set -# CONFIG_EVM is not set CONFIG_DEFAULT_SECURITY_DAC=y CONFIG_DEFAULT_SECURITY="" CONFIG_CRYPTO=y @@ -2847,35 +2840,40 @@ CONFIG_CRYPTO=y # CONFIG_CRYPTO_ALGAPI=y CONFIG_CRYPTO_ALGAPI2=y -CONFIG_CRYPTO_AEAD=y +CONFIG_CRYPTO_AEAD=m CONFIG_CRYPTO_AEAD2=y CONFIG_CRYPTO_BLKCIPHER=y CONFIG_CRYPTO_BLKCIPHER2=y CONFIG_CRYPTO_HASH=y CONFIG_CRYPTO_HASH2=y -CONFIG_CRYPTO_RNG=y +CONFIG_CRYPTO_RNG=m CONFIG_CRYPTO_RNG2=y +CONFIG_CRYPTO_RNG_DEFAULT=m CONFIG_CRYPTO_PCOMP2=y +CONFIG_CRYPTO_AKCIPHER2=y +# CONFIG_CRYPTO_RSA is not set CONFIG_CRYPTO_MANAGER=y CONFIG_CRYPTO_MANAGER2=y # CONFIG_CRYPTO_USER is not set CONFIG_CRYPTO_MANAGER_DISABLE_TESTS=y # CONFIG_CRYPTO_GF128MUL is not set -# CONFIG_CRYPTO_NULL is not set +CONFIG_CRYPTO_NULL=m +CONFIG_CRYPTO_NULL2=y # CONFIG_CRYPTO_PCRYPT is not set CONFIG_CRYPTO_WORKQUEUE=y -CONFIG_CRYPTO_CRYPTD=y +# CONFIG_CRYPTO_CRYPTD is not set # CONFIG_CRYPTO_MCRYPTD is not set -CONFIG_CRYPTO_AUTHENC=y +# CONFIG_CRYPTO_AUTHENC is not set # CONFIG_CRYPTO_TEST is not set -CONFIG_CRYPTO_ABLK_HELPER=y # # Authenticated Encryption with Associated Data # # CONFIG_CRYPTO_CCM is not set # CONFIG_CRYPTO_GCM is not set +# CONFIG_CRYPTO_CHACHA20POLY1305 is not set # CONFIG_CRYPTO_SEQIV is not set +CONFIG_CRYPTO_ECHAINIV=m # # Block modes @@ -2887,12 +2885,13 @@ CONFIG_CRYPTO_CBC=y # CONFIG_CRYPTO_LRW is not set # CONFIG_CRYPTO_PCBC is not set # CONFIG_CRYPTO_XTS is not set +# CONFIG_CRYPTO_KEYWRAP is not set # # Hash modes # # CONFIG_CRYPTO_CMAC is not set -CONFIG_CRYPTO_HMAC=y +CONFIG_CRYPTO_HMAC=m # CONFIG_CRYPTO_XCBC is not set # CONFIG_CRYPTO_VMAC is not set @@ -2903,6 +2902,7 @@ CONFIG_CRYPTO_CRC32C=y # CONFIG_CRYPTO_CRC32 is not set # CONFIG_CRYPTO_CRCT10DIF is not set # CONFIG_CRYPTO_GHASH is not set +# CONFIG_CRYPTO_POLY1305 is not set # CONFIG_CRYPTO_MD4 is not set CONFIG_CRYPTO_MD5=y # CONFIG_CRYPTO_MICHAEL_MIC is not set @@ -2910,8 +2910,8 @@ CONFIG_CRYPTO_MD5=y # CONFIG_CRYPTO_RMD160 is not set # CONFIG_CRYPTO_RMD256 is not set # CONFIG_CRYPTO_RMD320 is not set -CONFIG_CRYPTO_SHA1=y -# CONFIG_CRYPTO_SHA256 is not set +# CONFIG_CRYPTO_SHA1 is not set +CONFIG_CRYPTO_SHA256=m # CONFIG_CRYPTO_SHA512 is not set # CONFIG_CRYPTO_TGR192 is not set # CONFIG_CRYPTO_WP512 is not set @@ -2930,6 +2930,7 @@ CONFIG_CRYPTO_DES=y # CONFIG_CRYPTO_FCRYPT is not set # CONFIG_CRYPTO_KHAZAD is not set # CONFIG_CRYPTO_SALSA20 is not set +# CONFIG_CRYPTO_CHACHA20 is not set # CONFIG_CRYPTO_SEED is not set # CONFIG_CRYPTO_SERPENT is not set # CONFIG_CRYPTO_TEA is not set @@ -2940,63 +2941,67 @@ CONFIG_CRYPTO_DES=y # CONFIG_CRYPTO_DEFLATE=y # CONFIG_CRYPTO_ZLIB is not set -# CONFIG_CRYPTO_LZO is not set +CONFIG_CRYPTO_LZO=y +# CONFIG_CRYPTO_842 is not set # CONFIG_CRYPTO_LZ4 is not set # CONFIG_CRYPTO_LZ4HC is not set # # Random Number Generation # -CONFIG_CRYPTO_ANSI_CPRNG=y -# CONFIG_CRYPTO_DRBG_MENU is not set +CONFIG_CRYPTO_ANSI_CPRNG=m +CONFIG_CRYPTO_DRBG_MENU=m +CONFIG_CRYPTO_DRBG_HMAC=y +# CONFIG_CRYPTO_DRBG_HASH is not set +# CONFIG_CRYPTO_DRBG_CTR is not set +CONFIG_CRYPTO_DRBG=m +CONFIG_CRYPTO_JITTERENTROPY=m # CONFIG_CRYPTO_USER_API_HASH is not set # CONFIG_CRYPTO_USER_API_SKCIPHER is not set +# CONFIG_CRYPTO_USER_API_RNG is not set +# CONFIG_CRYPTO_USER_API_AEAD is not set CONFIG_CRYPTO_HW=y -# CONFIG_CRYPTO_DEV_CCP is not set +# CONFIG_CRYPTO_DEV_HIFN_795X is not set # CONFIG_ASYMMETRIC_KEY_TYPE is not set -CONFIG_ARM64_CRYPTO=y -CONFIG_CRYPTO_SHA1_ARM64_CE=y -CONFIG_CRYPTO_SHA2_ARM64_CE=y -CONFIG_CRYPTO_GHASH_ARM64_CE=y -CONFIG_CRYPTO_AES_ARM64_CE=y -CONFIG_CRYPTO_AES_ARM64_CE_CCM=y -CONFIG_CRYPTO_AES_ARM64_CE_BLK=y -CONFIG_CRYPTO_AES_ARM64_NEON_BLK=y + +# +# Certificates for signature checking +# +# CONFIG_SYSTEM_TRUSTED_KEYRING is not set +# CONFIG_ARM_CRYPTO is not set # CONFIG_BINARY_PRINTF is not set # # Library routines # CONFIG_BITREVERSE=y +CONFIG_HAVE_ARCH_BITREVERSE=y +CONFIG_RATIONAL=y CONFIG_GENERIC_STRNCPY_FROM_USER=y CONFIG_GENERIC_STRNLEN_USER=y CONFIG_GENERIC_NET_UTILS=y CONFIG_GENERIC_PCI_IOMAP=y -CONFIG_GENERIC_IOMAP=y CONFIG_GENERIC_IO=y CONFIG_ARCH_USE_CMPXCHG_LOCKREF=y # CONFIG_CRC_CCITT is not set CONFIG_CRC16=y # CONFIG_CRC_T10DIF is not set -CONFIG_CRC_ITU_T=y +# CONFIG_CRC_ITU_T is not set CONFIG_CRC32=y # CONFIG_CRC32_SELFTEST is not set CONFIG_CRC32_SLICEBY8=y # CONFIG_CRC32_SLICEBY4 is not set # CONFIG_CRC32_SARWATE is not set # CONFIG_CRC32_BIT is not set -CONFIG_CRC7=y +# CONFIG_CRC7 is not set # CONFIG_LIBCRC32C is not set # CONFIG_CRC8 is not set -CONFIG_AUDIT_GENERIC=y -CONFIG_AUDIT_ARCH_COMPAT_GENERIC=y -CONFIG_AUDIT_COMPAT_GENERIC=y +# CONFIG_AUDIT_ARCH_COMPAT_GENERIC is not set # CONFIG_RANDOM32_SELFTEST is not set CONFIG_ZLIB_INFLATE=y CONFIG_ZLIB_DEFLATE=y CONFIG_LZO_COMPRESS=y CONFIG_LZO_DECOMPRESS=y -CONFIG_LZ4_DECOMPRESS=y CONFIG_XZ_DEC=y CONFIG_XZ_DEC_X86=y CONFIG_XZ_DEC_POWERPC=y @@ -3006,12 +3011,6 @@ CONFIG_XZ_DEC_ARMTHUMB=y CONFIG_XZ_DEC_SPARC=y CONFIG_XZ_DEC_BCJ=y # CONFIG_XZ_DEC_TEST is not set -CONFIG_DECOMPRESS_GZIP=y -CONFIG_DECOMPRESS_BZIP2=y -CONFIG_DECOMPRESS_LZMA=y -CONFIG_DECOMPRESS_XZ=y -CONFIG_DECOMPRESS_LZO=y -CONFIG_DECOMPRESS_LZ4=y CONFIG_GENERIC_ALLOCATOR=y CONFIG_ASSOCIATIVE_ARRAY=y CONFIG_HAS_IOMEM=y @@ -3019,19 +3018,12 @@ CONFIG_HAS_IOPORT_MAP=y CONFIG_HAS_DMA=y CONFIG_CPU_RMAP=y CONFIG_DQL=y -CONFIG_GLOB=y -# CONFIG_GLOB_SELFTEST is not set CONFIG_NLATTR=y CONFIG_ARCH_HAS_ATOMIC64_DEC_IF_POSITIVE=y -CONFIG_AVERAGE=y # CONFIG_CORDIC is not set # CONFIG_DDR is not set CONFIG_LIBFDT=y CONFIG_OID_REGISTRY=y -CONFIG_UCS2_STRING=y -CONFIG_FONT_SUPPORT=y -# CONFIG_FONTS is not set -CONFIG_FONT_8x8=y -CONFIG_FONT_8x16=y +# CONFIG_SG_SPLIT is not set CONFIG_ARCH_HAS_SG_CHAIN=y -CONFIG_FSL_LS2_CONSOLE=y +# CONFIG_VIRTUALIZATION is not set diff --git a/packages/base/any/kernels/3.18.25/kconfig.mk b/packages/base/any/kernels/4.4-lts/kconfig.mk similarity index 80% rename from packages/base/any/kernels/3.18.25/kconfig.mk rename to packages/base/any/kernels/4.4-lts/kconfig.mk index 0fab8c89..4262cad1 100644 --- a/packages/base/any/kernels/3.18.25/kconfig.mk +++ b/packages/base/any/kernels/4.4-lts/kconfig.mk @@ -18,14 +18,9 @@ # # ############################################################ -# -# 3.18.25 Kernel Builds -# -############################################################ THIS_DIR := $(abspath $(dir $(lastword $(MAKEFILE_LIST)))) -K_MAJOR_VERSION := 3 -K_PATCH_LEVEL := 18 -K_SUB_LEVEL := 25 +K_MAJOR_VERSION := 4 +K_PATCH_LEVEL := 4 +K_SUB_LEVEL := 39 K_SUFFIX := K_PATCH_DIR := $(THIS_DIR)/patches -K_ARCHIVE_URL := http://opennetlinux.org/tarballs/linux-3.18.25.tar.xz diff --git a/packages/base/any/kernels/4.4-lts/patches/kernel-4.4-brcm-iproc.patch b/packages/base/any/kernels/4.4-lts/patches/kernel-4.4-brcm-iproc.patch new file mode 100644 index 00000000..4da5c804 --- /dev/null +++ b/packages/base/any/kernels/4.4-lts/patches/kernel-4.4-brcm-iproc.patch @@ -0,0 +1,75281 @@ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/Documentation/devicetree/bindings/gpio/xgs-gpio-iproc.txt b/Documentation/devicetree/bindings/gpio/xgs-gpio-iproc.txt +--- a/Documentation/devicetree/bindings/gpio/xgs-gpio-iproc.txt 1970-01-01 08:00:00.000000000 +0800 ++++ b/Documentation/devicetree/bindings/gpio/xgs-gpio-iproc.txt 2017-11-09 17:52:48.421892000 +0800 +@@ -0,0 +1,46 @@ ++Broadcom XGS iProc GPIO Controller ++ ++Required properties: ++- compatible : Should be "brcm,iproc-gpio,cca" or "brcm,iproc-gpio,ccg". ++ "brcm,iproc-gpio,cca" is used for CCA type gpio controllers on Helix4/Katana2/HR2. ++ "brcm,iproc-gpio,ccg" is used for CCG type gpio controllers on Grehound/Saber2/HR3/Greyhound2. ++- reg : Physical base address and length of the controller's registers. ++- #gpio-cells : should be 2 ++- ngpios : The number of GPIO's the controller provides. ++- interrupts : The interrupt id for the controller. ++- pin-base : The first chip GPIO pin provided by CCA or CCG. For Helix4, set to 4. ++- pin-offset : The offset of available CCA or CCG GPIO pins. For Helix4, set to 0. ++- gpio-controller : Marks the device node as a GPIO controller. ++ ++Optional properties: ++- interrupt-controller : Marks the device node as an interrupt controller. ++ ++ ++Examples for cca gpio: ++ ++ gpio_cca: gpio@18000060 { ++ compatible = "brcm,iproc-gpio,cca"; ++ #gpio-cells = <2>; ++ reg = gpio: <0x18000060 0x50>, ++ intr: <0x18000000 0x50>; ++ ngpios = <8>; ++ pin-offset = <0>; ++ pin-base = <4>; ++ gpio-controller; ++ interrupt-controller; ++ interrupts = ; ++ }; ++ ++ gpio_ccg: gpio@1800a000 { ++ compatible = "brcm,iproc-gpio,ccg"; ++ #gpio-cells = <2>; ++ reg = gpio: <0x1800a000 0x50>; ++ ngpios = <12>; ++ pin-offset = <4>; ++ pin-base = <4>; ++ gpio-controller; ++ interrupt-controller; ++ interrupts = ; ++ }; ++ ++ +\ No newline at end of file +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/Documentation/devicetree/bindings/net/xgs-iproc-mdio.txt b/Documentation/devicetree/bindings/net/xgs-iproc-mdio.txt +--- a/Documentation/devicetree/bindings/net/xgs-iproc-mdio.txt 1970-01-01 08:00:00.000000000 +0800 ++++ b/Documentation/devicetree/bindings/net/xgs-iproc-mdio.txt 2017-11-09 17:52:49.481900000 +0800 +@@ -0,0 +1,30 @@ ++* Broadcom XGS iProc MDIO bus controller ++ ++Required properties: ++- compatible: should be "brcm,iproc-ccb-mdio" or "brcm,iproc-ccg-mdio" or "brcm,iproc-cmicd-mdio" ++- reg: address and length of the register set for the MDIO interface ++- #size-cells: must be 0 ++- #address-cells: must be 1 ++- #bus-id: Physical bus ID ++- #logical-bus-id: Logical bus ID, required for cmicd ++- bus-type: "internal" or "external" ++- clocks: clock source ++ ++Example: ++ ++ mdio_int: mdio_int@18002000 { ++ compatible = "brcm,iproc-ccg-mdio"; ++ reg = <0x18002000 0x1000>; ++ #bus-id = <0>; ++ bus-type = "internal"; ++ clocks = <&iproc_apb_clk>; ++ }; ++ ++ mdio_ext: mdio_ext@03210000 { ++ compatible = "brcm,iproc-cmicd-mdio"; ++ reg = <0x03210000 0x1000>; ++ #bus-id = <2>; ++ #logical-bus-id = <0>; ++ bus-type = "external"; ++ clocks = <&iproc_apb_clk>; ++ }; +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/Documentation/devicetree/bindings/pci/xgs-iproc-pcie.txt b/Documentation/devicetree/bindings/pci/xgs-iproc-pcie.txt +--- a/Documentation/devicetree/bindings/pci/xgs-iproc-pcie.txt 1970-01-01 08:00:00.000000000 +0800 ++++ b/Documentation/devicetree/bindings/pci/xgs-iproc-pcie.txt 2017-11-09 17:52:49.540901000 +0800 +@@ -0,0 +1,35 @@ ++* Broadcom XGS iProc PCIe controller ++ ++Required properties: ++- compatible: set to "brcm,iproc-pcie" ++- reg: base address and length of the PCIe controller ++- linux,pci-domain: PCI domain ID. Should be unique for each host controller ++- interrupts: interrupt ID ++- #address-cells: set to <3> ++- #size-cells: set to <2> ++- device_type: set to "pci" ++- ranges: ranges for the PCI memory and I/O regions ++ ++Optional properties: ++- phy-addr: PCIe PHY addr for MDIO access ++- bus-range: PCI bus numbers covered ++ ++Example: ++ ++ pcie0: pcie@18012000 { ++ compatible = "brcm,iproc-pcie"; ++ reg = <0x18012000 0x1000>; ++ linux,pci-domain = <0>; ++ interrupts = , ++ , ++ , ++ , ++ , ++ ; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ device_type = "pci"; ++ ranges = <0x82000000 0 0x20000000 0x20000000 0 0x20000000>; ++ phy-addr = <0>; ++ }; ++ +\ No newline at end of file +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/Documentation/devicetree/bindings/spi/xgs-iproc-qspi.txt b/Documentation/devicetree/bindings/spi/xgs-iproc-qspi.txt +--- a/Documentation/devicetree/bindings/spi/xgs-iproc-qspi.txt 1970-01-01 08:00:00.000000000 +0800 ++++ b/Documentation/devicetree/bindings/spi/xgs-iproc-qspi.txt 2017-11-09 17:52:50.501910000 +0800 +@@ -0,0 +1,34 @@ ++BROADCOM XGS iProc QSPI controller ++ ++Required properties: ++- compatible: "brcm,iproc-qspi"; ++- reg: Offset and length of the register set including ++ mspi_hw: Master SPI ++ bspi_hw: Boot SPI ++ bspi_hw_raf: Boot SPI read ahead fifo ++ qspi_intr: QSPI interrupt related ++ idm_qspi:QSPI IDM related ++ cru_hw: QSPI CRU related ++- interrupts: interrupt id of the QSPI controller ++- clocks: clock source ++ ++Optional properties: ++- #chip-select: Specify the used chip select for controller with multi chip selects ++ ++Example: ++ ++ qspi: spi@18047000 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ compatible = "brcm,iproc-qspi"; ++ reg = mspi_hw:<0x18047200 0x188>, ++ bspi_hw:<0x18047000 0x050>, ++ bspi_hw_raf:<0x18047100 0x024>, ++ qspi_intr:<0x180473a0 0x01c>, ++ idm_qspi:<0x1811f408 0x004>, ++ cru_hw:<0x1800e000 0x004>; ++ interrupts = ; ++ #chip-select = <0>; ++ clocks = <&iproc_apb_clk>; ++ }; ++ +\ No newline at end of file +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/arch/arm/Kconfig b/arch/arm/Kconfig +--- a/arch/arm/Kconfig 2016-12-16 00:49:34.000000000 +0800 ++++ b/arch/arm/Kconfig 2017-11-09 17:52:54.375925000 +0800 +@@ -850,6 +850,8 @@ source "arch/arm/mach-iop33x/Kconfig" + + source "arch/arm/mach-iop13xx/Kconfig" + ++source "arch/arm/mach-iproc/Kconfig" ++ + source "arch/arm/mach-ixp4xx/Kconfig" + + source "arch/arm/mach-keystone/Kconfig" +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/arch/arm/Makefile b/arch/arm/Makefile +--- a/arch/arm/Makefile 2016-12-16 00:49:34.000000000 +0800 ++++ b/arch/arm/Makefile 2017-11-09 17:52:54.385925000 +0800 +@@ -176,6 +176,7 @@ machine-$(CONFIG_ARCH_INTEGRATOR) += int + machine-$(CONFIG_ARCH_IOP13XX) += iop13xx + machine-$(CONFIG_ARCH_IOP32X) += iop32x + machine-$(CONFIG_ARCH_IOP33X) += iop33x ++machine-$(CONFIG_ARCH_XGS_IPROC) += iproc + machine-$(CONFIG_ARCH_IXP4XX) += ixp4xx + machine-$(CONFIG_ARCH_KEYSTONE) += keystone + machine-$(CONFIG_ARCH_KS8695) += ks8695 +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/arch/arm/boot/dts/Makefile b/arch/arm/boot/dts/Makefile +--- a/arch/arm/boot/dts/Makefile 2016-12-16 00:49:34.000000000 +0800 ++++ b/arch/arm/boot/dts/Makefile 2017-11-09 17:52:54.462930000 +0800 +@@ -777,6 +777,17 @@ dtb-$(CONFIG_ARCH_MEDIATEK) += \ + mt8127-moose.dtb \ + mt8135-evbp1.dtb + dtb-$(CONFIG_ARCH_ZX) += zx296702-ad1.dtb ++ ++dtb-$(CONFIG_MACH_HX4) += bcm956340.dtb ++dtb-$(CONFIG_MACH_KT2) += bcm956450.dtb ++dtb-$(CONFIG_MACH_HR2) += bcm956150.dtb ++dtb-$(CONFIG_MACH_GH) += bcm95341x.dtb ++dtb-$(CONFIG_MACH_GH2) += bcm956170.dtb bcm95357x.dtb ++dtb-$(CONFIG_MACH_SB2) += bcm956260.dtb ++dtb-$(CONFIG_MACH_WH2) += bcm953547.dtb ++dtb-$(CONFIG_MACH_HR3) += bcm956160.dtb \ ++ bcm953444.dtb ++ + endif + + dtstree := $(srctree)/$(src) +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/arch/arm/boot/dts/bcm-greyhound.dtsi b/arch/arm/boot/dts/bcm-greyhound.dtsi +--- a/arch/arm/boot/dts/bcm-greyhound.dtsi 1970-01-01 08:00:00.000000000 +0800 ++++ b/arch/arm/boot/dts/bcm-greyhound.dtsi 2017-11-09 17:52:54.682932000 +0800 +@@ -0,0 +1,362 @@ ++/* ++ * BSD LICENSE ++ * ++ * Copyright(c) 2016 Broadcom Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Broadcom Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include ++#include ++#include "skeleton.dtsi" ++ ++ ++/ { ++ model = "Broadcom GH iProc"; ++ compatible = "brcm,greyhound"; ++ interrupt-parent = <&gic>; ++ ++ cpus { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ cpu@0 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a9"; ++ next-level-cache = <&L2>; ++ reg = <0x0>; ++ }; ++ }; ++ ++ core { ++ compatible = "simple-bus"; ++ ranges = <0x00000000 0x19000000 0x00023000>; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++ a9pll: arm_clk@00000 { ++ #clock-cells = <0>; ++ compatible = "brcm,xgs-iproc-armpll"; ++ clocks = <&osc>; ++ reg = <0x0 0x1000>; ++ }; ++ ++ gic: interrupt-controller@21000 { ++ compatible = "arm,cortex-a9-gic"; ++ #interrupt-cells = <3>; ++ interrupt-controller; ++ reg = <0x21000 0x1000>, <0x20100 0x100>; ++ }; ++ ++ twd-timer@20600 { ++ compatible = "arm,cortex-a9-twd-timer"; ++ reg = <0x20600 0x100>; ++ interrupts = ; ++ clocks = <&periph_clk>; ++ }; ++ ++ timer@20200 { ++ compatible = "arm,cortex-a9-global-timer"; ++ reg = <0x20200 0x100>; ++ interrupts = ; ++ clocks = <&periph_clk>; ++ }; ++ ++ L2: l2-cache { ++ compatible = "arm,pl310-cache"; ++ reg = <0x22000 0x1000>; ++ cache-unified; ++ cache-level = <2>; ++ arm,filter-ranges = <0x60000000 0x80000000>; ++ }; ++ }; ++ ++ clocks { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ranges; ++ ++ osc: oscillator { ++ #clock-cells = <0>; ++ compatible = "fixed-clock"; ++ clock-frequency = <25000000>; ++ }; ++ ++ periph_clk: periph_clk { ++ #clock-cells = <0>; ++ compatible = "fixed-factor-clock"; ++ clocks = <&a9pll>; ++ clock-div = <2>; ++ clock-mult = <1>; ++ }; ++ ++ iproc_axi_clk: iproc_axi_clk@0x1800fc00 { ++ #clock-cells = <0>; ++ compatible = "brcm,xgs-iproc-axi-clk"; ++ clocks = <&osc>; ++ reg = <0x1800fc00 0x1c>; ++ }; ++ ++ iproc_apb_clk: iproc_apb_clk { ++ #clock-cells = <0>; ++ compatible = "fixed-factor-clock"; ++ clocks = <&iproc_axi_clk>; ++ clock-div = <4>; ++ clock-mult = <1>; ++ }; ++ }; ++ ++ axi { ++ compatible = "simple-bus"; ++ ranges; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++ uart0: serial@18020000 { ++ compatible = "snps,dw-apb-uart"; ++ reg = <0x18020000 0x100>; ++ interrupts = ; ++ clocks = <&iproc_apb_clk>; ++ reg-io-width = <4>; ++ reg-shift = <2>; ++ status = "disabled"; ++ }; ++ ++ uart1: serial@18021000 { ++ compatible = "snps,dw-apb-uart"; ++ reg = <0x18021000 0x100>; ++ interrupts = ; ++ clocks = <&iproc_apb_clk>; ++ reg-io-width = <4>; ++ reg-shift = <2>; ++ status = "disabled"; ++ }; ++ ++ gmac0: ethernet@18042000 { ++ compatible = "brcm,iproc-gmac"; ++ reg = <0x18042000 0x1000>; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ ++ gpio_ccg: gpio@1800a000 { ++ compatible = "brcm,iproc-gpio,ccg"; ++ #gpio-cells = <2>; ++ reg = gpio: <0x1800a000 0x50>; ++ ngpios = <12>; ++ pin-offset = <4>; ++ pin-base = <4>; ++ gpio-controller; ++ interrupt-controller; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ ++ usbphy0: usbphy0 { ++ #phy-cells = <0>; ++ compatible = "brcm,usb-phy,gh"; ++ reg = idm_usb2h: <0x18115000 0x1000>, ++ idm_usb2d: <0x18111000 0x1000>; ++ vbus-gpio = <&gpio_ccg 6 GPIO_ACTIVE_HIGH>; ++ status = "disabled"; ++ }; ++ ++ ehci0: usb@18048000 { ++ compatible = "generic-ehci"; ++ reg = <0x18048000 0x800>; ++ interrupts = ; ++ usb-phy = <&usbphy0>; ++ status = "disabled"; ++ }; ++ ++ ohci0: usb@18048800 { ++ compatible = "generic-ohci"; ++ reg = <0x18048800 0x800>; ++ interrupts = ; ++ usb-phy = <&usbphy0>; ++ status = "disabled"; ++ }; ++ ++ usbd: usbd@1804c000 { ++ compatible = "brcm,usbd,gh"; ++ reg = <0x1804c000 0x2000>; ++ interrupts = ; ++ usb-phy = <&usbphy0>; ++ status = "disabled"; ++ }; ++ ++ nand: nand@18046000 { ++ compatible = "brcm,nand-iproc", "brcm,brcmnand-v6.1"; ++ reg = <0x18046000 0x600>, ++ <0xf8105408 0x10>, ++ <0x18046f00 0x20>; ++ reg-names = "nand", "iproc-idm", "iproc-ext"; ++ interrupts = ; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ brcm,nand-has-wp; ++ status = "disabled"; ++ }; ++ ++ qspi: spi@18047000 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ compatible = "brcm,iproc-qspi"; ++ reg = mspi_hw:<0x18047200 0x188>, ++ bspi_hw:<0x18047000 0x050>, ++ bspi_hw_raf:<0x18047100 0x024>, ++ qspi_intr:<0x180473a0 0x01c>, ++ idm_qspi:<0xf8106408 0x004>, ++ cru_hw:<0x1800e000 0x004>; ++ interrupts = ; ++ #chip-select = <0>; ++ clocks = <&iproc_apb_clk>; ++ status = "disabled"; ++ }; ++ ++ i2c0: i2c@18008000 { ++ compatible = "brcm,iproc-i2c"; ++ reg = <0x18008000 0x100>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ interrupts = ; ++ clock-frequency = <100000>; ++ status = "disabled"; ++ }; ++ ++ mdio_int: mdio_int@18002000 { ++ compatible = "brcm,iproc-ccg-mdio"; ++ reg = <0x18002000 0x1000>; ++ #bus-id = <0>; ++ bus-type = "internal"; ++ clocks = <&iproc_apb_clk>; ++ status = "disabled"; ++ }; ++ ++ /* cmicd cmic_common mdio */ ++ mdio_ext: mdio_ext@03210000 { ++ compatible = "brcm,iproc-cmicd-mdio"; ++ reg = <0x03210000 0x1000>; ++ #bus-id = <2>; ++ #logical-bus-id = <0>; ++ bus-type = "external"; ++ clocks = <&iproc_apb_clk>; ++ status = "disabled"; ++ }; ++ ++ pnor_flash: pnor_flash@18045000 { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "brcm,iproc-nor"; ++ reg = nor_regs: <0x18045000 0x1000>, ++ nor_mem: <0xE8000000 0x8000000>, ++ nor_sel: <0x18000c8c 0x4>, ++ nor_strap: <0x18000a5c 0x4>; ++ status = "disabled"; ++ }; ++ ++ hwrng: hwrng@18032000 { ++ compatible = "brcm,iproc-rng"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ reg = <0x18032000 0x1000>; ++ rng-type = "rng200"; ++ status = "disabled"; ++ }; ++ ++ iproc_wdt: iproc_wdt@18009000 { ++ compatible = "arm,sp805", "arm,primecell"; ++ reg = iproc_wdt_base: <0x18009000 0x1000>, ++ iproc_reset_reg: <0x1800f014 0x4>; ++ wdt_boot_status_bit = <0x0>; ++ clocks = <&iproc_apb_clk>; ++ clock-names = "apb_pclk"; ++ status = "disabled"; ++ }; ++ ++ /* cmicd */ ++ iproc_cmicd: iproc_cmicd@03200000 { ++ compatible = "brcm,iproc-cmicd"; ++ reg = <0x03200000 0x100000>; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ }; ++ ++ pcie0: pcie@18012000 { ++ compatible = "brcm,iproc-pcie", "iproc-p7"; ++ reg = <0x18012000 0x1000>; ++ linux,pci-domain = <0>; ++ interrupts = , ++ , ++ , ++ , ++ , ++ ; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ device_type = "pci"; ++ /*non-prefetchable mem space, pcie addr 0x0 0x20000000, ++ * cpu addr 0x20000000, size 0x0 0x20000000 ++ */ ++ ranges = <0x82000000 0 0x20000000 0x20000000 0 0x20000000>; ++ phy-addr = <0>; ++ status = "disabled"; ++ }; ++ ++ dmu_pcu: dmu_pcu@1800f000 { ++ compatible = "brcm,iproc-dmu-pcu"; ++ reg = <0x1800f000 0xc00>; ++ }; ++ ++ iproc_wrap_ctrl: iproc_wrap_ctrl@1800fc00 { ++ compatible = "brcm,iproc-wrap-ctrl"; ++ reg = <0x1800fc00 0x100>; ++ }; ++ ++ iproc_idm: iproc_idm@18100000 { ++ compatible = "brcm,iproc-idm"; ++ reg = idm0: <0x18100000 0x100000>, ++ idm1: <0xf8100000 0x100000>; ++ interrupts = , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ ; ++ }; ++}; +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/arch/arm/boot/dts/bcm-greyhound2.dtsi b/arch/arm/boot/dts/bcm-greyhound2.dtsi +--- a/arch/arm/boot/dts/bcm-greyhound2.dtsi 1970-01-01 08:00:00.000000000 +0800 ++++ b/arch/arm/boot/dts/bcm-greyhound2.dtsi 2017-11-09 17:52:54.683934000 +0800 +@@ -0,0 +1,415 @@ ++/* ++ * BSD LICENSE ++ * ++ * Copyright(c) 2016 Broadcom Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Broadcom Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include ++#include ++#include "skeleton.dtsi" ++ ++ ++/ { ++ model = "Broadcom GH2 iProc"; ++ compatible = "brcm,greyhound2"; ++ interrupt-parent = <&gic>; ++ ++ cpus { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ cpu@0 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a9"; ++ next-level-cache = <&L2>; ++ reg = <0x0>; ++ }; ++ }; ++ ++ core { ++ compatible = "simple-bus"; ++ ranges = <0x00000000 0x19000000 0x00023000>; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++ a9pll: arm_clk@00000 { ++ #clock-cells = <0>; ++ compatible = "brcm,xgs-iproc-armpll"; ++ clocks = <&osc>; ++ reg = <0x0 0x1000>; ++ }; ++ ++ gic: interrupt-controller@21000 { ++ compatible = "arm,cortex-a9-gic"; ++ #interrupt-cells = <3>; ++ interrupt-controller; ++ reg = <0x21000 0x1000>, <0x20100 0x100>; ++ }; ++ ++ twd-timer@20600 { ++ compatible = "arm,cortex-a9-twd-timer"; ++ reg = <0x20600 0x100>; ++ interrupts = ; ++ clocks = <&periph_clk>; ++ }; ++ ++ timer@20200 { ++ compatible = "arm,cortex-a9-global-timer"; ++ reg = <0x20200 0x100>; ++ interrupts = ; ++ clocks = <&periph_clk>; ++ }; ++ ++ L2: l2-cache { ++ compatible = "arm,pl310-cache"; ++ reg = <0x22000 0x1000>; ++ cache-unified; ++ cache-level = <2>; ++ arm,filter-ranges = <0x60000000 0x80000000>; ++ }; ++ }; ++ ++ clocks { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ranges; ++ ++ osc: oscillator { ++ #clock-cells = <0>; ++ compatible = "fixed-clock"; ++ clock-frequency = <25000000>; ++ }; ++ ++ periph_clk: periph_clk { ++ #clock-cells = <0>; ++ compatible = "fixed-factor-clock"; ++ clocks = <&a9pll>; ++ clock-div = <2>; ++ clock-mult = <1>; ++ }; ++ ++ iproc_axi_clk: iproc_axi_clk@0x1800fc00 { ++ #clock-cells = <0>; ++ compatible = "brcm,xgs-iproc-axi-clk"; ++ clocks = <&osc>; ++ reg = <0x1800fc00 0x1c>; ++ }; ++ ++ iproc_apb_clk: iproc_apb_clk { ++ #clock-cells = <0>; ++ compatible = "fixed-factor-clock"; ++ clocks = <&iproc_axi_clk>; ++ clock-div = <4>; ++ clock-mult = <1>; ++ }; ++ }; ++ ++ axi { ++ compatible = "simple-bus"; ++ ranges; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++ uart0: serial@18020000 { ++ compatible = "snps,dw-apb-uart"; ++ reg = <0x18020000 0x100>; ++ interrupts = ; ++ clocks = <&iproc_apb_clk>; ++ reg-io-width = <4>; ++ reg-shift = <2>; ++ status = "disabled"; ++ }; ++ ++ uart1: serial@18021000 { ++ compatible = "snps,dw-apb-uart"; ++ reg = <0x18021000 0x100>; ++ interrupts = ; ++ clocks = <&iproc_apb_clk>; ++ reg-io-width = <4>; ++ reg-shift = <2>; ++ status = "disabled"; ++ }; ++ ++ gmac0: ethernet@18042000 { ++ compatible = "brcm,iproc-gmac"; ++ reg = <0x18042000 0x1000>; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ ++ gmac1: ethernet@1804a000 { ++ compatible = "brcm,iproc-gmac"; ++ reg = <0x1804a000 0x1000>; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ ++ gpio_ccg: gpio@1800a000 { ++ compatible = "brcm,iproc-gpio,ccg"; ++ #gpio-cells = <2>; ++ reg = gpio: <0x1800a000 0x50>; ++ ngpios = <12>; ++ pin-offset = <4>; ++ pin-base = <4>; ++ gpio-controller; ++ interrupt-controller; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ ++ usbphy0: usbphy0 { ++ #phy-cells = <0>; ++ compatible = "brcm,usb-phy,gh"; ++ reg = idm_usb2h: <0x18115000 0x1000>, ++ idm_usb2d: <0x18111000 0x1000>, ++ idm_utmih: <0x18049500 0x100>; ++ vbus-gpio = <&gpio_ccg 6 GPIO_ACTIVE_HIGH>; ++ status = "disabled"; ++ }; ++ ++ ehci0: usb@18048000 { ++ compatible = "generic-ehci"; ++ reg = <0x18048000 0x800>; ++ interrupts = ; ++ usb-phy = <&usbphy0>; ++ status = "disabled"; ++ }; ++ ++ ohci0: usb@18048800 { ++ compatible = "generic-ohci"; ++ reg = <0x18048800 0x800>; ++ interrupts = ; ++ usb-phy = <&usbphy0>; ++ status = "disabled"; ++ }; ++ ++ usbd: usbd@1804c000 { ++ compatible = "brcm,usbd,gh"; ++ reg = <0x1804c000 0x2000>; ++ interrupts = ; ++ usb-phy = <&usbphy0>; ++ status = "disabled"; ++ }; ++ ++ nand: nand@18046000 { ++ compatible = "brcm,nand-iproc", "brcm,brcmnand-v6.1"; ++ reg = <0x18046000 0x600>, ++ <0xf8105408 0x10>, ++ <0x18046f00 0x20>; ++ reg-names = "nand", "iproc-idm", "iproc-ext"; ++ interrupts = ; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ brcm,nand-has-wp; ++ status = "disabled"; ++ }; ++ ++ qspi: spi@18047000 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ compatible = "brcm,iproc-qspi"; ++ reg = mspi_hw:<0x18047200 0x188>, ++ bspi_hw:<0x18047000 0x050>, ++ bspi_hw_raf:<0x18047100 0x024>, ++ qspi_intr:<0x180473a0 0x01c>, ++ idm_qspi:<0xf8106408 0x004>, ++ cru_hw:<0x1800e000 0x004>; ++ interrupts = ; ++ #chip-select = <0>; ++ clocks = <&iproc_apb_clk>; ++ status = "disabled"; ++ }; ++ ++ i2c0: i2c@18008000 { ++ compatible = "brcm,iproc-i2c"; ++ reg = <0x18008000 0x100>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ interrupts = ; ++ clock-frequency = <100000>; ++ status = "disabled"; ++ }; ++ ++ ccg_mdio_int: ccg_mdio_int@18002000 { ++ compatible = "brcm,iproc-ccg-mdio"; ++ reg = <0x18002000 0x1000>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ #bus-id = <0>; ++ bus-type = "internal"; ++ clocks = <&iproc_apb_clk>; ++ status = "disabled"; ++ }; ++ ++ /* cmicd cmic_common mdio */ ++ mdio_int: mdio_int@03210000 { ++ compatible = "brcm,iproc-cmicd-mdio"; ++ reg = <0x03210000 0x1000>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ #bus-id = <0>; ++ #logical-bus-id = <1>; ++ bus-type = "internal"; ++ clocks = <&iproc_apb_clk>; ++ status = "disabled"; ++ }; ++ ++ /* cmicd cmic_common mdio */ ++ mdio_ext: mdio_ext@03210000 { ++ compatible = "brcm,iproc-cmicd-mdio"; ++ reg = <0x03210000 0x1000>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ #bus-id = <2>; ++ #logical-bus-id = <0>; ++ bus-type = "external"; ++ clocks = <&iproc_apb_clk>; ++ status = "disabled"; ++ }; ++ ++ pnor_flash: pnor_flash@18045000 { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "brcm,iproc-nor"; ++ reg = nor_regs: <0x18045000 0x1000>, ++ nor_mem: <0xE8000000 0x8000000>, ++ nor_sel: <0x18000c8c 0x4>, ++ nor_strap: <0x18000a5c 0x4>; ++ status = "disabled"; ++ }; ++ ++ hwrng: hwrng@18032000 { ++ compatible = "brcm,iproc-rng"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ reg = <0x18032000 0x1000>; ++ rng-type = "rng200"; ++ status = "disabled"; ++ }; ++ ++ iproc_wdt: iproc_wdt@18009000 { ++ compatible = "arm,sp805", "arm,primecell"; ++ reg = iproc_wdt_base: <0x18009000 0x1000>, ++ iproc_reset_reg: <0x1800f014 0x4>; ++ wdt_boot_status_bit = <0x0>; ++ clocks = <&iproc_apb_clk>; ++ clock-names = "apb_pclk"; ++ status = "disabled"; ++ }; ++ ++ dmac0: dma@18018000 { ++ compatible = "brcm,dma330", "arm,primecell"; ++ reg = dma330_base: <0x18018000 0x1000>; ++ interrupts = , ++ , ++ , ++ , ++ , ++ , ++ , ++ ; ++ clocks = <&iproc_apb_clk>; ++ clock-names = "apb_pclk"; ++ #dma-cells = <1>; ++ #dma-channels = <8>; ++ #dma-requests = <16>; ++ status = "disabled"; ++ }; ++ ++ crypto: crypto@03100000 { ++ compatible = "brcm,iproc-crypto"; ++ reg = axi: <0x03100000 0x100>, /* SPUM AXI registers */ ++ apb: <0x18037000 0x100>, /* SPUM control registers */ ++ idm: <0x1811a000 0x1000>; /* Crypto control registers */ ++ brcm,max-pkt-size = <65536>; ++ status = "disabled"; ++ }; ++ ++ /* cmicd */ ++ iproc_cmicd: iproc_cmicd@03200000 { ++ compatible = "brcm,iproc-cmicd"; ++ reg = <0x03200000 0x100000>; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ }; ++ ++ pcie0: pcie@18012000 { ++ compatible = "brcm,iproc-pcie", "iproc-p7"; ++ reg = <0x18012000 0x1000>; ++ linux,pci-domain = <0>; ++ interrupts = , ++ , ++ , ++ , ++ , ++ ; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ device_type = "pci"; ++ /*non-prefetchable mem space, pcie addr 0x0 0x20000000, ++ * cpu addr 0x20000000, size 0x0 0x20000000 ++ */ ++ ranges = <0x82000000 0 0x20000000 0x20000000 0 0x20000000>; ++ phy-addr = <0>; ++ status = "disabled"; ++ }; ++ ++ dmu_pcu: dmu_pcu@1800f000 { ++ compatible = "brcm,iproc-dmu-pcu"; ++ reg = <0x1800f000 0xc00>; ++ }; ++ ++ iproc_wrap_ctrl: iproc_wrap_ctrl@1800fc00 { ++ compatible = "brcm,iproc-wrap-ctrl"; ++ reg = <0x1800fc00 0x100>; ++ }; ++ ++ iproc_idm: iproc_idm@18100000 { ++ compatible = "brcm,iproc-idm"; ++ reg = idm0: <0x18100000 0x100000>, ++ idm1: <0xf8100000 0x100000>; ++ interrupts = , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ ; ++ }; ++}; +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/arch/arm/boot/dts/bcm-helix4.dtsi b/arch/arm/boot/dts/bcm-helix4.dtsi +--- a/arch/arm/boot/dts/bcm-helix4.dtsi 1970-01-01 08:00:00.000000000 +0800 ++++ b/arch/arm/boot/dts/bcm-helix4.dtsi 2017-11-09 17:52:54.684933000 +0800 +@@ -0,0 +1,434 @@ ++/* ++ * BSD LICENSE ++ * ++ * Copyright(c) 2016 Broadcom Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Broadcom Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include ++#include ++#include "skeleton.dtsi" ++ ++ ++/ { ++ model = "Broadcom HX4 iProc"; ++ compatible = "brcm,helix4"; ++ interrupt-parent = <&gic>; ++ ++ cpus { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ cpu0: cpu@0 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a9"; ++ next-level-cache = <&L2>; ++ reg = <0x0>; ++ }; ++ ++ cpu1: cpu@1 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a9"; ++ next-level-cache = <&L2>; ++ enable-method = "brcm,bcm-nsp-smp"; ++ secondary-boot-reg = <0xffff042c>; ++ reg = <0x1>; ++ }; ++ }; ++ ++ mpcore { ++ compatible = "simple-bus"; ++ ranges = <0x00000000 0x19000000 0x00023000>; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++ a9pll: arm_clk@00000 { ++ #clock-cells = <0>; ++ compatible = "brcm,xgs-iproc-armpll"; ++ clocks = <&osc>; ++ reg = <0x0 0x1000>; ++ }; ++ ++ gic: interrupt-controller@21000 { ++ compatible = "arm,cortex-a9-gic"; ++ #interrupt-cells = <3>; ++ interrupt-controller; ++ reg = <0x21000 0x1000>, <0x20100 0x100>; ++ }; ++ ++ twd-timer@20600 { ++ compatible = "arm,cortex-a9-twd-timer"; ++ reg = <0x20600 0x100>; ++ interrupts = ; ++ clocks = <&periph_clk>; ++ }; ++ ++ timer@20200 { ++ compatible = "arm,cortex-a9-global-timer"; ++ reg = <0x20200 0x100>; ++ interrupts = ; ++ clocks = <&periph_clk>; ++ }; ++ ++ L2: l2-cache { ++ compatible = "arm,pl310-cache"; ++ reg = <0x22000 0x1000>; ++ cache-unified; ++ cache-level = <2>; ++ arm,filter-ranges = <0x60000000 0x80000000>; ++ /*arm,parity-enable = <1>; ++ interrupts = ;*/ ++ }; ++ }; ++ ++ clocks { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ranges; ++ ++ osc: oscillator { ++ #clock-cells = <0>; ++ compatible = "fixed-clock"; ++ clock-frequency = <25000000>; ++ }; ++ ++ periph_clk: periph_clk { ++ #clock-cells = <0>; ++ compatible = "fixed-factor-clock"; ++ clocks = <&a9pll>; ++ clock-div = <2>; ++ clock-mult = <1>; ++ }; ++ ++ iproc_axi_clk: iproc_axi_clk@0x1803fc00 { ++ #clock-cells = <0>; ++ compatible = "brcm,xgs-iproc-axi-clk", "axi-clk-hx4"; ++ clocks = <&osc>; ++ reg = <0x1803fc00 0x1c>; ++ }; ++ ++ iproc_apb_clk: iproc_apb_clk { ++ #clock-cells = <0>; ++ compatible = "fixed-factor-clock"; ++ clocks = <&iproc_axi_clk>; ++ clock-div = <4>; ++ clock-mult = <1>; ++ }; ++ }; ++ ++ axi { ++ compatible = "simple-bus"; ++ ranges; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++ uart0: serial@18000300 { ++ compatible = "ns16550a"; ++ reg = <0x18000300 0x100>; ++ interrupts = ; ++ clock-frequency = <62500000>; ++ status = "disabled"; ++ }; ++ ++ uart1: serial@18000400 { ++ compatible = "ns16550a"; ++ reg = <0x18000400 0x100>; ++ interrupts = ; ++ clock-frequency = <62500000>; ++ status = "disabled"; ++ }; ++ ++ uart2: serial@18037000 { ++ compatible = "ns16550a"; ++ reg = <0x18037000 0x100>; ++ interrupts = ; ++ clocks = <&iproc_apb_clk>; ++ reg-io-width = <4>; ++ reg-shift = <2>; ++ status = "disabled"; ++ }; ++ ++ gpio_cca: gpio@18000060 { ++ compatible = "brcm,iproc-gpio,cca"; ++ #gpio-cells = <2>; ++ reg = gpio: <0x18000060 0x50>, ++ intr: <0x18000000 0x50>; ++ ngpios = <8>; ++ pin-offset = <0>; ++ pin-base = <4>; ++ gpio-controller; ++ interrupt-controller; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ ++ gmac0: ethernet@18022000 { ++ compatible = "brcm,iproc-gmac"; ++ reg = <0x18022000 0x1000>; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ ++ gmac1: ethernet@18023000 { ++ compatible = "brcm,iproc-gmac"; ++ reg = <0x18023000 0x1000>; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ ++ nand: nand@18026000 { ++ compatible = "brcm,nand-iproc", "brcm,brcmnand-v6.1"; ++ reg = <0x18026000 0x600>, ++ <0x1811b408 0x10>, ++ <0x18026f00 0x20>; ++ reg-names = "nand", "iproc-idm", "iproc-ext"; ++ interrupts = ; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ brcm,nand-has-wp; ++ }; ++ ++ qspi: spi@18027000 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ compatible = "brcm,iproc-qspi"; ++ reg = mspi_hw:<0x18027200 0x188>, ++ bspi_hw:<0x18027000 0x050>, ++ bspi_hw_raf:<0x18027100 0x024>, ++ qspi_intr:<0x180273a0 0x01c>, ++ idm_qspi:<0x1811c408 0x004>, ++ cru_hw:<0x1803e000 0x004>; ++ interrupts = , ++ , ++ , ++ , ++ , ++ , ++ ; ++ #chip-select = <0>; ++ clocks = <&iproc_apb_clk>; ++ status = "disabled"; ++ }; ++ ++ usbphy0: usbphy0 { ++ #phy-cells = <0>; ++ compatible = "brcm,usb-phy,hx4"; ++ reg = idm_usb2h: <0x18115000 0x1000>, ++ idm_usb2d: <0x18116000 0x1000>; ++ vbus-gpio = <&gpio_cca 1 GPIO_ACTIVE_LOW>; ++ usbdev-gpio = <&gpio_cca 0 GPIO_ACTIVE_HIGH>; ++ status = "disabled"; ++ }; ++ ++ ehci0: usb@1802a000 { ++ compatible = "generic-ehci"; ++ reg = <0x1802a000 0x1000>; ++ interrupts = ; ++ usb-phy = <&usbphy0>; ++ status = "disabled"; ++ }; ++ ++ usbd: usbd@18042000 { ++ compatible = "brcm,usbd,hx4"; ++ reg = <0x18042000 0x2000>; ++ interrupts = ; ++ usb-phy = <&usbphy0>; ++ status = "disabled"; ++ }; ++ ++ i2c0: i2c@18038000 { ++ compatible = "brcm,iproc-i2c"; ++ reg = <0x18038000 0x100>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ interrupts = ; ++ clock-frequency = <100000>; ++ status = "disabled"; ++ }; ++ ++ i2c1: i2c@1803b000 { ++ compatible = "brcm,iproc-i2c"; ++ reg = <0x1803b000 0x100>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ interrupts = ; ++ clock-frequency = <100000>; ++ status = "disabled"; ++ }; ++ ++ mdio_int: mdio_int@18032000 { ++ compatible = "brcm,iproc-ccb-mdio"; ++ reg = <0x18032000 0x1000>; ++ #bus-id = <0>; ++ bus-type = "internal"; ++ clocks = <&iproc_apb_clk>; ++ status = "disabled"; ++ }; ++ ++ mdio_ext: mdio_ext@18032000 { ++ compatible = "brcm,iproc-ccb-mdio"; ++ reg = <0x18032000 0x1000>; ++ #bus-id = <0>; ++ bus-type = "external"; ++ clocks = <&iproc_apb_clk>; ++ status = "disabled"; ++ }; ++ ++ hwrng: hwrng@18033000 { ++ compatible = "brcm,iproc-rng"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ reg = <0x18033000 0x1000>; ++ rng-type = "rng"; ++ status = "disabled"; ++ }; ++ ++ iproc_wdt: iproc_wdt@0x18039000 { ++ compatible = "arm,sp805", "arm,primecell"; ++ reg = iproc_wdt_base: <0x18039000 0x1000>, ++ iproc_reset_reg: <0x1803f014 0x4>; ++ wdt_boot_status_bit = <0x0>; ++ clocks = <&iproc_apb_clk>; ++ clock-names = "apb_pclk"; ++ status = "disabled"; ++ }; ++ ++ dmac0: dma@18020000 { ++ compatible = "arm,pl330", "arm,primecell"; ++ reg = pl330_base: <0x18020000 0x1000>; ++ interrupts = , ++ , ++ , ++ , ++ , ++ , ++ , ++ ; ++ /*arm,primecell-periphid = <0x00041330>;*/ ++ clocks = <&iproc_apb_clk>; ++ clock-names = "apb_pclk"; ++ #dma-cells = <1>; ++ #dma-channels = <8>; ++ #dma-requests = <16>; ++ status = "disabled"; ++ }; ++ ++ /* cmicd */ ++ iproc_cmicd: iproc_cmicd@48000000 { ++ compatible = "brcm,iproc-cmicd"; ++ reg = <0x48000000 0x40000>; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ }; ++ ++ pcie0: pcie@18012000 { ++ compatible = "brcm,iproc-pcie", "iproc-p2"; ++ reg = <0x18012000 0x1000>; ++ linux,pci-domain = <0>; ++ interrupts = , ++ , ++ , ++ , ++ , ++ ; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ device_type = "pci"; ++ /*non-prefetchable mem space, pcie addr 0x0 0x08000000, ++ * cpu addr 0x08000000, size 0x0 0x08000000 ++ */ ++ ranges = <0x82000000 0 0x08000000 0x08000000 0 0x08000000>; ++ phy-addr = <0>; ++ status = "disabled"; ++ }; ++ ++ pcie1: pcie@18013000 { ++ compatible = "brcm,iproc-pcie", "iproc-p2"; ++ reg = <0x18013000 0x1000>; ++ linux,pci-domain = <1>; ++ interrupts = , ++ , ++ , ++ , ++ , ++ ; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ device_type = "pci"; ++ /*non-prefetchable mem space, pcie addr 0x0 0x40000000, ++ * cpu addr 0x40000000, size 0x0 0x08000000 ++ */ ++ ranges = <0x82000000 0 0x40000000 0x40000000 0 0x08000000>; ++ phy-addr = <1>; ++ status = "disabled"; ++ }; ++ ++ dmu_pcu: dmu_pcu@1803f000 { ++ compatible = "brcm,iproc-dmu-pcu"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ reg = <0x1803f000 0xc00>; ++ }; ++ ++ iproc_wrap_ctrl: iproc_wrap_ctrl@1803fc00 { ++ compatible = "brcm,iproc-wrap-ctrl"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ reg = <0x1803fc00 0x100>; ++ }; ++ ++ iproc_idm: iproc_idm@18100000 { ++ compatible = "brcm,iproc-idm"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ reg = <0x18100000 0x100000>; ++ interrupts = , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ ; ++ }; ++}; +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/arch/arm/boot/dts/bcm-hurricane2.dtsi b/arch/arm/boot/dts/bcm-hurricane2.dtsi +--- a/arch/arm/boot/dts/bcm-hurricane2.dtsi 1970-01-01 08:00:00.000000000 +0800 ++++ b/arch/arm/boot/dts/bcm-hurricane2.dtsi 2017-11-09 17:52:54.685935000 +0800 +@@ -0,0 +1,314 @@ ++/* ++ * BSD LICENSE ++ * ++ * Copyright(c) 2016 Broadcom Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Broadcom Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include ++#include "skeleton.dtsi" ++ ++ ++/ { ++ model = "Broadcom HR2 iProc"; ++ compatible = "brcm,hurricane2"; ++ interrupt-parent = <&gic>; ++ ++ cpus { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ cpu@0 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a9"; ++ next-level-cache = <&L2>; ++ reg = <0x0>; ++ }; ++ }; ++ ++ core { ++ compatible = "simple-bus"; ++ ranges = <0x00000000 0x19000000 0x00023000>; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++ a9pll: arm_clk@00000 { ++ #clock-cells = <0>; ++ compatible = "brcm,xgs-iproc-armpll"; ++ clocks = <&osc>; ++ reg = <0x0 0x1000>; ++ }; ++ ++ gic: interrupt-controller@21000 { ++ compatible = "arm,cortex-a9-gic"; ++ #interrupt-cells = <3>; ++ interrupt-controller; ++ reg = <0x21000 0x1000>, <0x20100 0x100>; ++ }; ++ ++ twd-timer@20600 { ++ compatible = "arm,cortex-a9-twd-timer"; ++ reg = <0x20600 0x100>; ++ interrupts = ; ++ clocks = <&periph_clk>; ++ }; ++ ++ timer@20200 { ++ compatible = "arm,cortex-a9-global-timer"; ++ reg = <0x20200 0x100>; ++ interrupts = ; ++ clocks = <&periph_clk>; ++ }; ++ ++ L2: l2-cache { ++ compatible = "arm,pl310-cache"; ++ reg = <0x22000 0x1000>; ++ cache-unified; ++ cache-level = <2>; ++ arm,filter-ranges = <0x60000000 0x80000000>; ++ }; ++ }; ++ ++ clocks { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ranges; ++ ++ osc: oscillator { ++ #clock-cells = <0>; ++ compatible = "fixed-clock"; ++ clock-frequency = <25000000>; ++ }; ++ ++ periph_clk: periph_clk { ++ #clock-cells = <0>; ++ compatible = "fixed-factor-clock"; ++ clocks = <&a9pll>; ++ clock-div = <2>; ++ clock-mult = <1>; ++ }; ++ ++ iproc_axi_clk: iproc_axi_clk@1803fc00 { ++ #clock-cells = <0>; ++ compatible = "brcm,xgs-iproc-axi-clk", "axi-clk-hr2"; ++ clocks = <&osc>; ++ reg = <0x1803fc00 0x1c>; ++ }; ++ ++ iproc_apb_clk: iproc_apb_clk { ++ #clock-cells = <0>; ++ compatible = "fixed-factor-clock"; ++ clocks = <&iproc_axi_clk>; ++ clock-div = <4>; ++ clock-mult = <1>; ++ }; ++ }; ++ ++ axi { ++ compatible = "simple-bus"; ++ ranges; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++ uart0: serial@18000300 { ++ compatible = "ns16550a"; ++ reg = <0x18000300 0x100>; ++ interrupts = ; ++ clocks = <&iproc_apb_clk>; ++ status = "disabled"; ++ }; ++ ++ uart1: serial@18000400 { ++ compatible = "ns16550a"; ++ reg = <0x18000400 0x100>; ++ interrupts = ; ++ clocks = <&iproc_apb_clk>; ++ status = "disabled"; ++ }; ++ ++ gpio_cca: gpio@18000060 { ++ compatible = "brcm,iproc-gpio,cca"; ++ #gpio-cells = <2>; ++ reg = gpio: <0x18000060 0x50>, ++ intr: <0x18000000 0x50>, ++ dmu: <0x18020000 0x200>; ++ ngpios = <12>; ++ pin-offset = <0>; ++ pin-base = <4>; ++ gpio-controller; ++ interrupt-controller; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ ++ gmac0: ethernet@18022000 { ++ compatible = "brcm,iproc-gmac"; ++ reg = <0x18022000 0x1000>; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ ++ nand: nand@18026000 { ++ compatible = "brcm,nand-iproc", "brcm,brcmnand-v6.1"; ++ reg = <0x18026000 0x600>, ++ <0x1811b408 0x10>, ++ <0x18026f00 0x20>; ++ reg-names = "nand", "iproc-idm", "iproc-ext"; ++ interrupts = ; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ brcm,nand-has-wp; ++ status = "disabled"; ++ }; ++ ++ qspi: spi@18027000 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ compatible = "brcm,iproc-qspi"; ++ reg = mspi_hw:<0x18027200 0x188>, ++ bspi_hw:<0x18027000 0x050>, ++ bspi_hw_raf:<0x18027100 0x024>, ++ qspi_intr:<0x180273a0 0x01c>, ++ idm_qspi:<0x1811c408 0x004>, ++ cru_hw:<0x1803e000 0x004>; ++ interrupts = , ++ , ++ , ++ , ++ , ++ , ++ ; ++ #chip-select = <0>; ++ clocks = <&iproc_apb_clk>; ++ status = "disabled"; ++ }; ++ ++ mdio_int: mdio_int@18032000 { ++ compatible = "brcm,iproc-ccb-mdio"; ++ reg = <0x18032000 0x1000>; ++ #bus-id = <0>; ++ bus-type = "internal"; ++ clocks = <&iproc_apb_clk>; ++ status = "disabled"; ++ }; ++ ++ mdio_ext: mdio_ext@18032000 { ++ compatible = "brcm,iproc-ccb-mdio"; ++ reg = <0x18032000 0x1000>; ++ #bus-id = <0>; ++ bus-type = "external"; ++ clocks = <&iproc_apb_clk>; ++ status = "disabled"; ++ }; ++ ++ pnor_flash: pnor_flash@18021000 { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "brcm,iproc-nor"; ++ reg = nor_regs: <0x18021000 0x1000>, ++ nor_mem: <0x20000000 0x4000000>, ++ nor_sel: <0x1803fc3c 0x4>; ++ status = "disabled"; ++ }; ++ ++ iproc_wdt: iproc_wdt@18039000 { ++ compatible = "arm,sp805", "arm,primecell"; ++ reg = iproc_wdt_base: <0x18039000 0x1000>, ++ iproc_reset_reg: <0x1803f014 0x4>; ++ wdt_boot_status_bit = <0x0>; ++ clocks = <&iproc_apb_clk>; ++ clock-names = "apb_pclk"; ++ status = "disabled"; ++ }; ++ ++ /* cmicd */ ++ iproc_cmicd: iproc_cmicd@48000000 { ++ compatible = "brcm,iproc-cmicd"; ++ reg = <0x48000000 0x40000>; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ }; ++ ++ pcie0: pcie@18012000 { ++ compatible = "brcm,iproc-pcie", "iproc-p2"; ++ reg = <0x18012000 0x1000>; ++ linux,pci-domain = <0>; ++ interrupts = , ++ , ++ , ++ , ++ , ++ ; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ device_type = "pci"; ++ /*non-prefetchable mem space, pcie addr 0x0 0x08000000, ++ * cpu addr 0x08000000, size 0x0 0x08000000 ++ */ ++ ranges = <0x82000000 0 0x08000000 0x08000000 0 0x08000000>; ++ phy-addr = <0>; ++ status = "disabled"; ++ }; ++ ++ dmu_pcu: dmu_pcu@1803f000 { ++ compatible = "brcm,iproc-dmu-pcu"; ++ reg = <0x1803f000 0xc00>; ++ }; ++ ++ iproc_wrap_ctrl: iproc_wrap_ctrl@1803fc00 { ++ compatible = "brcm,iproc-wrap-ctrl"; ++ reg = <0x1803fc00 0x100>; ++ }; ++ ++ iproc_idm: iproc_idm@18100000 { ++ compatible = "brcm,iproc-idm"; ++ reg = <0x18100000 0x100000>; ++ interrupts = , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ ; ++ }; ++}; +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/arch/arm/boot/dts/bcm-hurricane3.dtsi b/arch/arm/boot/dts/bcm-hurricane3.dtsi +--- a/arch/arm/boot/dts/bcm-hurricane3.dtsi 1970-01-01 08:00:00.000000000 +0800 ++++ b/arch/arm/boot/dts/bcm-hurricane3.dtsi 2017-11-09 17:52:54.686936000 +0800 +@@ -0,0 +1,369 @@ ++/* ++ * BSD LICENSE ++ * ++ * Copyright(c) 2016 Broadcom Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Broadcom Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include ++#include ++#include "skeleton.dtsi" ++ ++ ++/ { ++ model = "Broadcom HR3 iProc"; ++ compatible = "brcm,hurricane3"; ++ interrupt-parent = <&gic>; ++ ++ cpus { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ cpu@0 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a9"; ++ next-level-cache = <&L2>; ++ reg = <0x0>; ++ }; ++ }; ++ ++ core { ++ compatible = "simple-bus"; ++ ranges = <0x00000000 0x19000000 0x00023000>; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++ a9pll: arm_clk@00000 { ++ #clock-cells = <0>; ++ compatible = "brcm,xgs-iproc-armpll"; ++ clocks = <&osc>; ++ reg = <0x0 0x1000>; ++ }; ++ ++ gic: interrupt-controller@21000 { ++ compatible = "arm,cortex-a9-gic"; ++ #interrupt-cells = <3>; ++ interrupt-controller; ++ reg = <0x21000 0x1000>, <0x20100 0x100>; ++ }; ++ ++ twd-timer@20600 { ++ compatible = "arm,cortex-a9-twd-timer"; ++ reg = <0x20600 0x100>; ++ interrupts = ; ++ clocks = <&periph_clk>; ++ }; ++ ++ timer@20200 { ++ compatible = "arm,cortex-a9-global-timer"; ++ reg = <0x20200 0x100>; ++ interrupts = ; ++ clocks = <&periph_clk>; ++ }; ++ ++ L2: l2-cache { ++ compatible = "arm,pl310-cache"; ++ reg = <0x22000 0x1000>; ++ cache-unified; ++ cache-level = <2>; ++ arm,filter-ranges = <0x60000000 0x80000000>; ++ }; ++ }; ++ ++ clocks { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ranges; ++ ++ osc: oscillator { ++ #clock-cells = <0>; ++ compatible = "fixed-clock"; ++ clock-frequency = <25000000>; ++ }; ++ ++ periph_clk: periph_clk { ++ #clock-cells = <0>; ++ compatible = "fixed-factor-clock"; ++ clocks = <&a9pll>; ++ clock-div = <2>; ++ clock-mult = <1>; ++ }; ++ ++ iproc_axi_clk: iproc_axi_clk@1800fc00 { ++ #clock-cells = <0>; ++ compatible = "brcm,xgs-iproc-axi-clk"; ++ clocks = <&osc>; ++ reg = <0x1800fc00 0x1c>; ++ }; ++ ++ iproc_apb_clk: iproc_apb_clk { ++ #clock-cells = <0>; ++ compatible = "fixed-factor-clock"; ++ clocks = <&iproc_axi_clk>; ++ clock-div = <4>; ++ clock-mult = <1>; ++ }; ++ }; ++ ++ axi { ++ compatible = "simple-bus"; ++ ranges; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++ uart0: serial@18020000 { ++ compatible = "snps,dw-apb-uart"; ++ reg = <0x18020000 0x100>; ++ interrupts = ; ++ clocks = <&iproc_apb_clk>; ++ reg-io-width = <4>; ++ reg-shift = <2>; ++ status = "disabled"; ++ }; ++ ++ uart1: serial@18021000 { ++ compatible = "snps,dw-apb-uart"; ++ reg = <0x18021000 0x100>; ++ interrupts = ; ++ clocks = <&iproc_apb_clk>; ++ reg-io-width = <4>; ++ reg-shift = <2>; ++ status = "disabled"; ++ }; ++ ++ gmac0: ethernet@18042000 { ++ compatible = "brcm,iproc-gmac"; ++ reg = <0x18042000 0x1000>; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ ++ gpio_ccg: gpio@1800a000 { ++ compatible = "brcm,iproc-gpio,ccg"; ++ #gpio-cells = <2>; ++ reg = gpio: <0x1800a000 0x50>; ++ ngpios = <12>; ++ pin-offset = <4>; ++ pin-base = <4>; ++ gpio-controller; ++ interrupt-controller; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ ++ usbphy0: usbphy0 { ++ #phy-cells = <0>; ++ compatible = "brcm,usb-phy,gh"; ++ reg = idm_usb2h: <0x18115000 0x1000>, ++ idm_usb2d: <0x18111000 0x1000>; ++ vbus-gpio = <&gpio_ccg 3 GPIO_ACTIVE_HIGH>; ++ status = "disabled"; ++ }; ++ ++ ehci0: usb@0x18048000 { ++ compatible = "generic-ehci"; ++ reg = <0x18048000 0x800>; ++ interrupts = ; ++ usb-phy = <&usbphy0>; ++ status = "disabled"; ++ }; ++ ++ ohci0: usb@0x18048800 { ++ compatible = "generic-ohci"; ++ reg = <0x18048800 0x800>; ++ interrupts = ; ++ usb-phy = <&usbphy0>; ++ status = "disabled"; ++ }; ++ ++ usbd: usbd@1804c000 { ++ compatible = "brcm,usbd,hr3"; ++ reg = <0x1804c000 0x2000>; ++ interrupts = ; ++ usb-phy = <&usbphy0>; ++ status = "disabled"; ++ }; ++ ++ sdio: sdio@18041000 { ++ compatible = "brcm,iproc-hr3-sdio"; ++ reg = <0x18041000 0x1000>, ++ <0x18116408 0x1000>; ++ reg-names = "sdio", "iproc-idm"; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ ++ nand: nand@18046000 { ++ compatible = "brcm,nand-iproc", "brcm,brcmnand-v6.1"; ++ reg = <0x18046000 0x600>, ++ <0x1811d408 0x10>, ++ <0x18046f00 0x20>; ++ reg-names = "nand", "iproc-idm", "iproc-ext"; ++ interrupts = ; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ brcm,nand-has-wp; ++ status = "disabled"; ++ }; ++ ++ qspi: spi@18047000 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ compatible = "brcm,iproc-qspi"; ++ reg = mspi_hw:<0x18047200 0x188>, ++ bspi_hw:<0x18047000 0x050>, ++ bspi_hw_raf:<0x18047100 0x024>, ++ qspi_intr:<0x180473a0 0x01c>, ++ idm_qspi:<0x1811f408 0x004>, ++ cru_hw:<0x1800e000 0x004>; ++ interrupts = ; ++ #chip-select = <0>; ++ clocks = <&iproc_apb_clk>; ++ status = "disabled"; ++ }; ++ ++ i2c0: i2c@18008000 { ++ compatible = "brcm,iproc-i2c"; ++ reg = <0x18008000 0x100>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ interrupts = ; ++ clock-frequency = <100000>; ++ status = "disabled"; ++ }; ++ ++ mdio_int: mdio_int@18002000 { ++ compatible = "brcm,iproc-ccg-mdio"; ++ reg = <0x18002000 0x1000>; ++ #bus-id = <0>; ++ bus-type = "internal"; ++ clocks = <&iproc_apb_clk>; ++ status = "disabled"; ++ }; ++ ++ /* cmicd cmic_common mdio */ ++ mdio_ext: mdio_ext@03210000 { ++ compatible = "brcm,iproc-cmicd-mdio"; ++ reg = <0x03210000 0x1000>; ++ #bus-id = <2>; ++ #logical-bus-id = <0>; ++ bus-type = "external"; ++ clocks = <&iproc_apb_clk>; ++ status = "disabled"; ++ }; ++ ++ pnor_flash: pnor_flash@18045000 { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "brcm,iproc-nor"; ++ reg = nor_regs: <0x18045000 0x1000>, ++ nor_mem: <0xE8000000 0x8000000>, ++ nor_sel: <0x18000c8c 0x4>, ++ nor_strap: <0x18000a5c 0x4>; ++ status = "disabled"; ++ }; ++ ++ hwrng: hwrng@18032000 { ++ compatible = "brcm,iproc-rng"; ++ reg = <0x18032000 0x1000>; ++ rng-type = "rng200"; ++ status = "disabled"; ++ }; ++ ++ iproc_wdt: iproc_wdt@18009000 { ++ compatible = "arm,sp805", "arm,primecell"; ++ reg = iproc_wdt_base: <0x18009000 0x1000>, ++ iproc_reset_reg: <0x1800f014 0x4>; ++ wdt_boot_status_bit = <0x0>; ++ clocks = <&iproc_apb_clk>; ++ clock-names = "apb_pclk"; ++ status = "disabled"; ++ }; ++ ++ /* cmicd */ ++ iproc_cmicd: iproc_cmicd@03200000 { ++ compatible = "brcm,iproc-cmicd"; ++ reg = <0x03200000 0x100000>; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ }; ++ ++ pcie0: pcie@18012000 { ++ compatible = "brcm,iproc-pcie"; ++ reg = <0x18012000 0x1000>; ++ linux,pci-domain = <0>; ++ interrupts = , ++ , ++ , ++ , ++ , ++ ; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ device_type = "pci"; ++ /*non-prefetchable mem space, pcie addr 0x0 0x20000000, ++ * cpu addr 0x20000000, size 0x0 0x20000000 ++ */ ++ ranges = <0x82000000 0 0x20000000 0x20000000 0 0x20000000>; ++ phy-addr = <0>; ++ status = "disabled"; ++ }; ++ ++ dmu_pcu: dmu_pcu@1800f000 { ++ compatible = "brcm,iproc-dmu-pcu"; ++ reg = <0x1800f000 0xc00>; ++ }; ++ ++ iproc_wrap_ctrl: iproc_wrap_ctrl@1800fc00 { ++ compatible = "brcm,iproc-wrap-ctrl"; ++ reg = <0x1800fc00 0x100>; ++ ++ }; ++ ++ iproc_idm: iproc_idm@18100000 { ++ compatible = "brcm,iproc-idm"; ++ reg = <0x18100000 0x100000>; ++ interrupts = , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ ; ++ }; ++ ++}; +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/arch/arm/boot/dts/bcm-katana2.dtsi b/arch/arm/boot/dts/bcm-katana2.dtsi +--- a/arch/arm/boot/dts/bcm-katana2.dtsi 1970-01-01 08:00:00.000000000 +0800 ++++ b/arch/arm/boot/dts/bcm-katana2.dtsi 2017-11-09 17:52:54.687932000 +0800 +@@ -0,0 +1,431 @@ ++/* ++ * BSD LICENSE ++ * ++ * Copyright(c) 2016 Broadcom Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Broadcom Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include ++#include ++#include "skeleton.dtsi" ++ ++ ++/ { ++ model = "Broadcom KT2 iProc"; ++ compatible = "brcm,katana2"; ++ interrupt-parent = <&gic>; ++ ++ cpus { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ cpu0: cpu@0 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a9"; ++ next-level-cache = <&L2>; ++ reg = <0x0>; ++ }; ++ ++ cpu1: cpu@1 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a9"; ++ next-level-cache = <&L2>; ++ enable-method = "brcm,bcm-nsp-smp"; ++ secondary-boot-reg = <0xffff042c>; ++ reg = <0x1>; ++ }; ++ }; ++ ++ mpcore { ++ compatible = "simple-bus"; ++ ranges = <0x00000000 0x19000000 0x00023000>; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++ a9pll: arm_clk@00000 { ++ #clock-cells = <0>; ++ compatible = "brcm,xgs-iproc-armpll"; ++ clocks = <&osc>; ++ reg = <0x0 0x1000>; ++ }; ++ ++ gic: interrupt-controller@21000 { ++ compatible = "arm,cortex-a9-gic"; ++ #interrupt-cells = <3>; ++ interrupt-controller; ++ reg = <0x21000 0x1000>, <0x20100 0x100>; ++ }; ++ ++ twd-timer@20600 { ++ compatible = "arm,cortex-a9-twd-timer"; ++ reg = <0x20600 0x100>; ++ interrupts = ; ++ clocks = <&periph_clk>; ++ }; ++ ++ timer@20200 { ++ compatible = "arm,cortex-a9-global-timer"; ++ reg = <0x20200 0x100>; ++ interrupts = ; ++ clocks = <&periph_clk>; ++ }; ++ ++ L2: l2-cache { ++ compatible = "arm,pl310-cache"; ++ reg = <0x22000 0x1000>; ++ cache-unified; ++ cache-level = <2>; ++ arm,filter-ranges = <0x60000000 0x80000000>; ++ }; ++ }; ++ ++ clocks { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ranges; ++ ++ osc: oscillator { ++ #clock-cells = <0>; ++ compatible = "fixed-clock"; ++ clock-frequency = <25000000>; ++ }; ++ ++ periph_clk: periph_clk { ++ #clock-cells = <0>; ++ compatible = "fixed-factor-clock"; ++ clocks = <&a9pll>; ++ clock-div = <2>; ++ clock-mult = <1>; ++ }; ++ ++ iproc_axi_clk: axi_clk_fixed_495M { ++ #clock-cells = <0>; ++ compatible = "fixed-clock"; ++ clock-frequency = <495000000>; ++ }; ++ ++ iproc_apb_clk: iproc_apb_clk { ++ #clock-cells = <0>; ++ compatible = "fixed-factor-clock"; ++ clocks = <&iproc_axi_clk>; ++ clock-div = <4>; ++ clock-mult = <1>; ++ }; ++ }; ++ ++ axi { ++ compatible = "simple-bus"; ++ ranges; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++ uart0: serial@18000300 { ++ compatible = "ns16550a"; ++ reg = <0x18000300 0x100>; ++ interrupts = ; ++ clock-frequency = <61875000>; ++ status = "disabled"; ++ }; ++ ++ uart1: serial@18000400 { ++ compatible = "ns16550a"; ++ reg = <0x18000400 0x100>; ++ interrupts = ; ++ clock-frequency = <61875000>; ++ status = "disabled"; ++ }; ++ ++ uart2: serial@18037000 { ++ compatible = "ns16550a"; ++ reg = <0x18037000 0x100>; ++ interrupts = ; ++ clocks = <&iproc_apb_clk>; ++ reg-io-width = <4>; ++ reg-shift = <2>; ++ status = "disabled"; ++ }; ++ ++ gpio_cca: gpio@18000060 { ++ compatible = "brcm,iproc-gpio,cca"; ++ #gpio-cells = <2>; ++ reg = gpio: <0x18000060 0x50>, ++ intr: <0x18000000 0x50>; ++ ngpios = <8>; ++ pin-offset = <0>; ++ pin-base = <4>; ++ gpio-controller; ++ interrupt-controller; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ ++ gmac0: ethernet@18022000 { ++ compatible = "brcm,iproc-gmac"; ++ reg = <0x18022000 0x1000>; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ ++ gmac1: ethernet@18023000 { ++ compatible = "brcm,iproc-gmac"; ++ reg = <0x18023000 0x1000>; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ ++ nand: nand@18026000 { ++ compatible = "brcm,nand-iproc", "brcm,brcmnand-v6.1"; ++ reg = <0x18026000 0x600>, ++ <0x1811b408 0x10>, ++ <0x18026f00 0x20>; ++ reg-names = "nand", "iproc-idm", "iproc-ext"; ++ interrupts = ; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ brcm,nand-has-wp; ++ }; ++ ++ qspi: spi@18027000 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ compatible = "brcm,iproc-qspi"; ++ reg = mspi_hw:<0x18027200 0x188>, ++ bspi_hw:<0x18027000 0x050>, ++ bspi_hw_raf:<0x18027100 0x024>, ++ qspi_intr:<0x180273a0 0x01c>, ++ idm_qspi:<0x1811c408 0x004>, ++ cru_hw:<0x1803e000 0x004>; ++ interrupts = , ++ , ++ , ++ , ++ , ++ , ++ ; ++ #chip-select = <0>; ++ clocks = <&iproc_apb_clk>; ++ status = "disabled"; ++ }; ++ ++ usbphy0: usbphy0 { ++ #phy-cells = <0>; ++ compatible = "brcm,usb-phy,kt2"; ++ reg = idm_usb2h: <0x18115000 0x1000>, ++ idm_usb2d: <0x18116000 0x1000>; ++ vbus-gpio = <&gpio_cca 1 GPIO_ACTIVE_LOW>; ++ usbdev-gpio = <&gpio_cca 0 GPIO_ACTIVE_HIGH>; ++ status = "disabled"; ++ }; ++ ++ ehci0: usb@1802a000 { ++ compatible = "generic-ehci"; ++ reg = <0x1802a000 0x1000>; ++ interrupts = ; ++ usb-phy = <&usbphy0>; ++ status = "disabled"; ++ }; ++ ++ usbd: usbd@18042000 { ++ compatible = "brcm,usbd,kt2"; ++ reg = <0x18042000 0x2000>; ++ interrupts = ; ++ usb-phy = <&usbphy0>; ++ status = "disabled"; ++ }; ++ ++ i2c0: i2c@0x18038000 { ++ compatible = "brcm,iproc-i2c"; ++ reg = <0x18038000 0x100>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ interrupts = ; ++ clock-frequency = <100000>; ++ status = "disabled"; ++ }; ++ ++ i2c1: i2c@1803b000 { ++ compatible = "brcm,iproc-i2c"; ++ reg = <0x1803b000 0x100>; ++ interrupts = ; ++ #bus-id = <1>; ++ clock-frequency = <100000>; ++ status = "disabled"; ++ }; ++ ++ mdio_int: mdio_int@18032000 { ++ compatible = "brcm,iproc-ccb-mdio"; ++ reg = <0x18032000 0x1000>; ++ #bus-id = <0>; ++ bus-type = "internal"; ++ clocks = <&iproc_apb_clk>; ++ status = "disabled"; ++ }; ++ ++ mdio_ext: mdio_ext@18032000 { ++ compatible = "brcm,iproc-ccb-mdio"; ++ reg = <0x18032000 0x1000>; ++ #bus-id = <0>; ++ bus-type = "external"; ++ clocks = <&iproc_apb_clk>; ++ status = "disabled"; ++ }; ++ ++ hwrng: hwrng@18033000 { ++ compatible = "brcm,iproc-rng"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ reg = <0x18033000 0x1000>; ++ rng-type = "rng"; ++ status = "disabled"; ++ }; ++ ++ iproc_wdt: iproc_wdt@18039000 { ++ compatible = "arm,sp805", "arm,primecell"; ++ reg = iproc_wdt_base: <0x18039000 0x1000>, ++ iproc_reset_reg: <0x1803f014 0x4>; ++ wdt_boot_status_bit = <0x0>; ++ clocks = <&iproc_apb_clk>; ++ clock-names = "apb_pclk"; ++ status = "disabled"; ++ }; ++ ++ dmac0: dma@18020000 { ++ compatible = "arm,pl330", "arm,primecell"; ++ reg = pl330_base: <0x18020000 0x1000>; ++ interrupts = , ++ , ++ , ++ , ++ , ++ , ++ , ++ ; ++ /*arm,primecell-periphid = <0x00041330>;*/ ++ clocks = <&iproc_apb_clk>; ++ clock-names = "apb_pclk"; ++ #dma-cells = <1>; ++ #dma-channels = <8>; ++ #dma-requests = <16>; ++ status = "disabled"; ++ }; ++ ++ /* cmicd */ ++ iproc_cmicd: iproc_cmicd@48000000 { ++ compatible = "brcm,iproc-cmicd"; ++ reg = <0x48000000 0x40000>; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ }; ++ ++ pcie0: pcie@18012000 { ++ compatible = "brcm,iproc-pcie", "iproc-p2"; ++ reg = <0x18012000 0x1000>; ++ linux,pci-domain = <0>; ++ interrupts = , ++ , ++ , ++ , ++ , ++ ; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ device_type = "pci"; ++ /*non-prefetchable mem space, pcie addr 0x0 0x08000000, ++ * cpu addr 0x08000000, size 0x0 0x08000000 ++ */ ++ ranges = <0x82000000 0 0x08000000 0x08000000 0 0x08000000>; ++ phy-addr = <0>; ++ status = "disabled"; ++ }; ++ ++ pcie1: pcie@18013000 { ++ compatible = "brcm,iproc-pcie", "iproc-p2"; ++ reg = <0x18013000 0x1000>; ++ linux,pci-domain = <1>; ++ interrupts = , ++ , ++ , ++ , ++ , ++ ; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ device_type = "pci"; ++ /*non-prefetchable mem space, pcie addr 0x0 0x40000000, ++ * cpu addr 0x40000000, size 0x0 0x08000000 ++ */ ++ ranges = <0x82000000 0 0x40000000 0x40000000 0 0x08000000>; ++ phy-addr = <1>; ++ status = "disabled"; ++ }; ++ ++ dmu_pcu: dmu_pcu@1803f000 { ++ compatible = "brcm,iproc-dmu-pcu"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ reg = <0x1803f000 0xc00>; ++ }; ++ ++ iproc_wrap_ctrl: iproc_wrap_ctrl@1803fc00 { ++ compatible = "brcm,iproc-wrap-ctrl"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ reg = <0x1803fc00 0x100>; ++ }; ++ ++ iproc_idm: iproc_idm@18100000 { ++ compatible = "brcm,iproc-idm"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ reg = <0x18100000 0x100000>; ++ interrupts = , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ ; ++ }; ++}; ++ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/arch/arm/boot/dts/bcm-saber2.dtsi b/arch/arm/boot/dts/bcm-saber2.dtsi +--- a/arch/arm/boot/dts/bcm-saber2.dtsi 1970-01-01 08:00:00.000000000 +0800 ++++ b/arch/arm/boot/dts/bcm-saber2.dtsi 2017-11-09 17:52:54.689928000 +0800 +@@ -0,0 +1,356 @@ ++/* ++ * BSD LICENSE ++ * ++ * Copyright(c) 2016 Broadcom Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Broadcom Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include ++#include ++#include "skeleton.dtsi" ++ ++/ { ++ model = "Broadcom SB2 iProc"; ++ compatible = "brcm,saber2"; ++ interrupt-parent = <&gic>; ++ ++ cpus { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ cpu@0 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a9"; ++ next-level-cache = <&L2>; ++ reg = <0x0>; ++ }; ++ }; ++ ++ core { ++ compatible = "simple-bus"; ++ ranges = <0x00000000 0x19000000 0x00023000>; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++ a9pll: arm_clk@00000 { ++ #clock-cells = <0>; ++ compatible = "brcm,xgs-iproc-armpll"; ++ clocks = <&osc>; ++ reg = <0x0 0x1000>; ++ }; ++ ++ gic: interrupt-controller@21000 { ++ compatible = "arm,cortex-a9-gic"; ++ #interrupt-cells = <3>; ++ interrupt-controller; ++ reg = <0x21000 0x1000>, <0x20100 0x100>; ++ }; ++ ++ twd-timer@20600 { ++ compatible = "arm,cortex-a9-twd-timer"; ++ reg = <0x20600 0x100>; ++ interrupts = ; ++ clocks = <&periph_clk>; ++ }; ++ ++ timer@20200 { ++ compatible = "arm,cortex-a9-global-timer"; ++ reg = <0x20200 0x100>; ++ interrupts = ; ++ clocks = <&periph_clk>; ++ }; ++ ++ L2: l2-cache { ++ compatible = "arm,pl310-cache"; ++ reg = <0x22000 0x1000>; ++ cache-unified; ++ cache-level = <2>; ++ arm,filter-ranges = <0x60000000 0x80000000>; ++ }; ++ }; ++ ++ clocks { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ranges; ++ ++ osc: oscillator_25M { ++ #clock-cells = <0>; ++ compatible = "fixed-clock"; ++ clock-frequency = <25000000>; ++ }; ++ ++ osc_1: oscillator_50M { ++ #clock-cells = <0>; ++ compatible = "fixed-clock"; ++ clock-frequency = <50000000>; ++ }; ++ ++ periph_clk: periph_clk@19000000 { ++ #clock-cells = <0>; ++ compatible = "fixed-factor-clock"; ++ clocks = <&a9pll>; ++ clock-div = <2>; ++ clock-mult = <1>; ++ }; ++ ++ iproc_axi_clk: iproc_axi_clk@1800fc50 { ++ #clock-cells = <0>; ++ compatible = "brcm,xgs-iproc-axi-clk", "axi-clk-sb2"; ++ clocks = <&osc_1>; ++ reg = <0x1800fc50 0x1c>; ++ }; ++ ++ iproc_apb_clk: iproc_apb_clk { ++ #clock-cells = <0>; ++ compatible = "fixed-factor-clock"; ++ clocks = <&iproc_axi_clk>; ++ clock-div = <4>; ++ clock-mult = <1>; ++ }; ++ }; ++ ++ axi { ++ compatible = "simple-bus"; ++ ranges; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++ uart0: serial@18020000 { ++ compatible = "snps,dw-apb-uart"; ++ reg = <0x18020000 0x100>; ++ interrupts = ; ++ clocks = <&iproc_apb_clk>; ++ reg-io-width = <4>; ++ reg-shift = <2>; ++ status = "disabled"; ++ }; ++ ++ uart1: serial@18021000 { ++ compatible = "snps,dw-apb-uart"; ++ reg = <0x18021000 0x1000>; ++ interrupts = ; ++ clocks = <&iproc_apb_clk>; ++ reg-io-width = <4>; ++ reg-shift = <2>; ++ status = "disabled"; ++ }; ++ ++ qspi: spi@18047000 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ compatible = "brcm,iproc-qspi"; ++ reg = mspi_hw:<0x18047200 0x188>, ++ bspi_hw:<0x18047000 0x050>, ++ bspi_hw_raf:<0x18047100 0x024>, ++ qspi_intr:<0x180473a0 0x01c>, ++ idm_qspi:<0xf8106408 0x004>, ++ cru_hw:<0x1800e000 0x004>; ++ interrupts = ; ++ #chip-select = <0>; ++ clocks = <&iproc_apb_clk>; ++ status = "disabled"; ++ }; ++ ++ ++ gmac0: ethernet@18042000 { ++ compatible = "brcm,iproc-gmac"; ++ reg = <0x18042000 0x1000>; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ ++ usbphy0: usbphy0 { ++ #phy-cells = <0>; ++ compatible = "brcm,usb-phy,sb2"; ++ reg = idm_usb2h: <0x18115000 0x1000>, ++ idm_usb2d: <0x18111000 0x1000>; ++ vbus-gpio = <&gpio_ccg 1 GPIO_ACTIVE_LOW>; ++ status = "disabled"; ++ }; ++ ++ ehci0: usb@18048000 { ++ compatible = "generic-ehci"; ++ reg = <0x18048000 0x800>; ++ interrupts = ; ++ usb-phy = <&usbphy0>; ++ status = "disabled"; ++ }; ++ ++ ohci0: usb@18048800 { ++ compatible = "generic-ohci"; ++ reg = <0x18048800 0x800>; ++ interrupts = ; ++ usb-phy = <&usbphy0>; ++ status = "disabled"; ++ }; ++ ++ usbd: usbd@1804c000 { ++ compatible = "brcm,usbd,sb2"; ++ reg = usb2d: <0x1804c000 0x2000>, ++ idm_usb: <0x18111000 0x1000>; ++ interrupts = ; ++ usb-phy = <&usbphy0>; ++ status = "disabled"; ++ }; ++ ++ gpio_ccg: gpio@1800a000 { ++ compatible = "brcm,iproc-gpio,ccg"; ++ #gpio-cells = <2>; ++ reg = gpio: <0x1800a000 0x50>; ++ ngpios = <16>; ++ pin-offset = <0>; ++ pin-base = <0>; ++ gpio-controller; ++ interrupt-controller; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ ++ nand: nand@18046000 { ++ compatible = "brcm,nand-iproc", "brcm,brcmnand-v6.1"; ++ reg = <0x18046000 0x600>, ++ <0xf8105408 0x10>, ++ <0x18046f00 0x20>; ++ reg-names = "nand", "iproc-idm", "iproc-ext"; ++ interrupts = ; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ brcm,nand-has-wp; ++ status = "disabled"; ++ }; ++ ++ i2c0: i2c@18008000 { ++ compatible = "brcm,iproc-i2c"; ++ reg = <0x18008000 0x100>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ interrupts = ; ++ clock-frequency = <100000>; ++ status = "disabled"; ++ }; ++ ++ mdio_int: mdio_int@18002000 { ++ compatible = "brcm,iproc-ccg-mdio"; ++ reg = <0x18002000 0x1000>; ++ #bus-id = <0>; ++ bus-type = "internal"; ++ clocks = <&iproc_apb_clk>; ++ status = "disabled"; ++ }; ++ ++ mdio_ext: mdio_ext@18002000 { ++ compatible = "brcm,iproc-ccg-mdio"; ++ reg = <0x18002000 0x1000>; ++ #bus-id = <0>; ++ bus-type = "external"; ++ clocks = <&iproc_apb_clk>; ++ status = "disabled"; ++ }; ++ ++ hwrng: hwrng@18032000 { ++ compatible = "brcm,iproc-rng"; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ reg = <0x18032000 0x1000>; ++ rng-type = "rng200"; ++ status = "disabled"; ++ }; ++ ++ iproc_wdt: iproc_wdt@18009000 { ++ compatible = "arm,sp805", "arm,primecell"; ++ reg = iproc_wdt_base: <0x18009000 0x1000>, ++ iproc_reset_reg: <0x1800f014 0x4>; ++ wdt_boot_status_bit = <0x0>; ++ clocks = <&iproc_apb_clk>; ++ clock-names = "apb_pclk"; ++ status = "disabled"; ++ }; ++ ++ /* cmicd */ ++ iproc_cmicd: iproc_cmicd@03200000 { ++ compatible = "brcm,iproc-cmicd"; ++ reg = <0x03200000 0x100000>; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ }; ++ ++ pcie0: pcie@18012000 { ++ compatible = "brcm,iproc-pcie", "iproc-p7"; ++ reg = <0x18012000 0x1000>; ++ linux,pci-domain = <0>; ++ interrupts = , ++ , ++ , ++ , ++ , ++ ; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ device_type = "pci"; ++ /*non-prefetchable mem space, pcie addr 0x0 0x20000000, ++ * cpu addr 0x20000000, size 0x0 0x20000000 ++ */ ++ ranges = <0x82000000 0 0x20000000 0x20000000 0 0x20000000>; ++ phy-addr = <0>; ++ status = "disabled"; ++ }; ++ ++ dmu_pcu: dmu_pcu@1800f000 { ++ compatible = "brcm,iproc-dmu-pcu"; ++ reg = <0x1800f000 0xc00>; ++ }; ++ ++ iproc_wrap_ctrl: iproc_wrap_ctrl@1800fc00 { ++ compatible = "brcm,iproc-wrap-ctrl"; ++ reg = <0x1800fc00 0x100>; ++ }; ++ ++ iproc_idm: iproc_idm@18100000 { ++ compatible = "brcm,iproc-idm"; ++ reg = idm0: <0x18100000 0x100000>, ++ idm1: <0xf8100000 0x100000>; ++ interrupts = , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ ; ++ }; ++}; +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/arch/arm/boot/dts/bcm-wolfhound2.dtsi b/arch/arm/boot/dts/bcm-wolfhound2.dtsi +--- a/arch/arm/boot/dts/bcm-wolfhound2.dtsi 1970-01-01 08:00:00.000000000 +0800 ++++ b/arch/arm/boot/dts/bcm-wolfhound2.dtsi 2017-11-09 17:52:54.690928000 +0800 +@@ -0,0 +1,354 @@ ++/* ++ * BSD LICENSE ++ * ++ * Copyright(c) 2016 Broadcom Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Broadcom Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++#include ++#include ++#include "skeleton.dtsi" ++ ++ ++/ { ++ model = "Broadcom HR3 iProc"; ++ compatible = "brcm,hurricane3"; ++ interrupt-parent = <&gic>; ++ ++ cpus { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ ++ cpu@0 { ++ device_type = "cpu"; ++ compatible = "arm,cortex-a9"; ++ next-level-cache = <&L2>; ++ reg = <0x0>; ++ }; ++ }; ++ ++ core { ++ compatible = "simple-bus"; ++ ranges = <0x00000000 0x19000000 0x00023000>; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++ a9pll: arm_clk@00000 { ++ #clock-cells = <0>; ++ compatible = "brcm,xgs-iproc-armpll"; ++ clocks = <&osc>; ++ reg = <0x0 0x1000>; ++ }; ++ ++ gic: interrupt-controller@21000 { ++ compatible = "arm,cortex-a9-gic"; ++ #interrupt-cells = <3>; ++ interrupt-controller; ++ reg = <0x21000 0x1000>, <0x20100 0x100>; ++ }; ++ ++ twd-timer@20600 { ++ compatible = "arm,cortex-a9-twd-timer"; ++ reg = <0x20600 0x100>; ++ interrupts = ; ++ clocks = <&periph_clk>; ++ }; ++ ++ timer@20200 { ++ compatible = "arm,cortex-a9-global-timer"; ++ reg = <0x20200 0x100>; ++ interrupts = ; ++ clocks = <&periph_clk>; ++ }; ++ ++ L2: l2-cache { ++ compatible = "arm,pl310-cache"; ++ reg = <0x22000 0x1000>; ++ cache-unified; ++ cache-level = <2>; ++ arm,filter-ranges = <0x60000000 0x80000000>; ++ }; ++ }; ++ ++ clocks { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ranges; ++ ++ osc: oscillator { ++ #clock-cells = <0>; ++ compatible = "fixed-clock"; ++ clock-frequency = <25000000>; ++ }; ++ ++ periph_clk: periph_clk { ++ #clock-cells = <0>; ++ compatible = "fixed-factor-clock"; ++ clocks = <&a9pll>; ++ clock-div = <2>; ++ clock-mult = <1>; ++ }; ++ ++ iproc_axi_clk: iproc_axi_clk@1800fc00 { ++ #clock-cells = <0>; ++ compatible = "brcm,xgs-iproc-axi-clk"; ++ clocks = <&osc>; ++ reg = <0x1800fc00 0x1c>; ++ }; ++ ++ iproc_apb_clk: iproc_apb_clk { ++ #clock-cells = <0>; ++ compatible = "fixed-factor-clock"; ++ clocks = <&iproc_axi_clk>; ++ clock-div = <4>; ++ clock-mult = <1>; ++ }; ++ }; ++ ++ axi { ++ compatible = "simple-bus"; ++ ranges; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++ uart0: serial@18020000 { ++ compatible = "snps,dw-apb-uart"; ++ reg = <0x18020000 0x100>; ++ interrupts = ; ++ clocks = <&iproc_apb_clk>; ++ reg-io-width = <4>; ++ reg-shift = <2>; ++ status = "disabled"; ++ }; ++ ++ uart1: serial@18021000 { ++ compatible = "snps,dw-apb-uart"; ++ reg = <0x18021000 0x100>; ++ interrupts = ; ++ clocks = <&iproc_apb_clk>; ++ reg-io-width = <4>; ++ reg-shift = <2>; ++ status = "disabled"; ++ }; ++ ++ gmac0: ethernet@18042000 { ++ compatible = "brcm,iproc-gmac"; ++ reg = <0x18042000 0x1000>; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ ++ gpio_ccg: gpio@1800a000 { ++ compatible = "brcm,iproc-gpio,ccg"; ++ #gpio-cells = <2>; ++ reg = gpio: <0x1800a000 0x50>; ++ ngpios = <12>; ++ pin-offset = <4>; ++ pin-base = <4>; ++ gpio-controller; ++ interrupt-controller; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ ++ usbphy0: usbphy0 { ++ #phy-cells = <0>; ++ compatible = "brcm,usb-phy,gh"; ++ reg = idm_usb2h: <0x18115000 0x1000>, ++ idm_usb2d: <0x18111000 0x1000>; ++ vbus-gpio = <&gpio_ccg 3 GPIO_ACTIVE_HIGH>; ++ status = "disabled"; ++ }; ++ ++ ehci0: usb@0x18048000 { ++ compatible = "generic-ehci"; ++ reg = <0x18048000 0x800>; ++ interrupts = ; ++ usb-phy = <&usbphy0>; ++ status = "disabled"; ++ }; ++ ++ ohci0: usb@0x18048800 { ++ compatible = "generic-ohci"; ++ reg = <0x18048800 0x800>; ++ interrupts = ; ++ usb-phy = <&usbphy0>; ++ status = "disabled"; ++ }; ++ ++ usbd: usbd@1804c000 { ++ compatible = "brcm,usbd,hr3"; ++ reg = <0x1804c000 0x2000>; ++ interrupts = ; ++ usb-phy = <&usbphy0>; ++ status = "disabled"; ++ }; ++ ++ qspi: spi@18047000 { ++ #address-cells = <1>; ++ #size-cells = <0>; ++ compatible = "brcm,iproc-qspi"; ++ reg = mspi_hw:<0x18047200 0x188>, ++ bspi_hw:<0x18047000 0x050>, ++ bspi_hw_raf:<0x18047100 0x024>, ++ qspi_intr:<0x180473a0 0x01c>, ++ idm_qspi:<0x1811f408 0x004>, ++ cru_hw:<0x1800e000 0x004>; ++ interrupts = ; ++ #chip-select = <0>; ++ clocks = <&iproc_apb_clk>; ++ status = "disabled"; ++ }; ++ ++ i2c0: i2c@18008000 { ++ compatible = "brcm,iproc-i2c"; ++ reg = <0x18008000 0x100>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ interrupts = ; ++ clock-frequency = <100000>; ++ status = "disabled"; ++ }; ++ ++ /* cmicd cmic_common mdio */ ++ mdio_int0: mdio_int0@03210000 { ++ compatible = "brcm,iproc-cmicd-mdio"; ++ reg = <0x03210000 0x1000>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ #bus-id = <0>; ++ #logical-bus-id = <0>; ++ bus-type = "internal"; ++ clocks = <&iproc_apb_clk>; ++ status = "disabled"; ++ }; ++ ++ /* cmicd cmic_common mdio */ ++ mdio_int1: mdio_int1@03210000 { ++ compatible = "brcm,iproc-cmicd-mdio"; ++ reg = <0x03210000 0x1000>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ #bus-id = <1>; ++ #logical-bus-id = <1>; ++ bus-type = "internal"; ++ clocks = <&iproc_apb_clk>; ++ status = "disabled"; ++ }; ++ ++ /* CCG mdio */ ++ mdio_int2: mdio_int2@18002000 { ++ compatible = "brcm,iproc-ccg-mdio"; ++ reg = <0x18002000 0x1000>; ++ #address-cells = <1>; ++ #size-cells = <0>; ++ #bus-id = <2>; ++ bus-type = "internal"; ++ clocks = <&iproc_apb_clk>; ++ status = "disabled"; ++ }; ++ ++ hwrng: hwrng@18032000 { ++ compatible = "brcm,iproc-rng"; ++ reg = <0x18032000 0x1000>; ++ rng-type = "rng200"; ++ status = "disabled"; ++ }; ++ ++ iproc_wdt: iproc_wdt@18009000 { ++ compatible = "arm,sp805", "arm,primecell"; ++ reg = iproc_wdt_base: <0x18009000 0x1000>, ++ iproc_reset_reg: <0x1800f014 0x4>; ++ wdt_boot_status_bit = <0x0>; ++ clocks = <&iproc_apb_clk>; ++ clock-names = "apb_pclk"; ++ status = "disabled"; ++ }; ++ ++ /* cmicd */ ++ iproc_cmicd: iproc_cmicd@03200000 { ++ compatible = "brcm,iproc-cmicd"; ++ reg = <0x03200000 0x100000>; ++ interrupts = ; ++ status = "disabled"; ++ }; ++ }; ++ ++ pcie0: pcie@18012000 { ++ compatible = "brcm,iproc-pcie"; ++ reg = <0x18012000 0x1000>; ++ linux,pci-domain = <0>; ++ interrupts = , ++ , ++ , ++ , ++ , ++ ; ++ #address-cells = <3>; ++ #size-cells = <2>; ++ device_type = "pci"; ++ /*non-prefetchable mem space, pcie addr 0x0 0x20000000, ++ * cpu addr 0x20000000, size 0x0 0x20000000 ++ */ ++ ranges = <0x82000000 0 0x20000000 0x20000000 0 0x20000000>; ++ phy-addr = <0>; ++ status = "disabled"; ++ }; ++ ++ dmu_pcu: dmu_pcu@1800f000 { ++ compatible = "brcm,iproc-dmu-pcu"; ++ reg = <0x1800f000 0xc00>; ++ }; ++ ++ iproc_wrap_ctrl: iproc_wrap_ctrl@1800fc00 { ++ compatible = "brcm,iproc-wrap-ctrl"; ++ reg = <0x1800fc00 0x100>; ++ ++ }; ++ ++ iproc_idm: iproc_idm@18100000 { ++ compatible = "brcm,iproc-idm"; ++ reg = <0x18100000 0x100000>; ++ interrupts = , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ , ++ ; ++ }; ++ ++}; +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/arch/arm/boot/dts/bcm95341x.dts b/arch/arm/boot/dts/bcm95341x.dts +--- a/arch/arm/boot/dts/bcm95341x.dts 1970-01-01 08:00:00.000000000 +0800 ++++ b/arch/arm/boot/dts/bcm95341x.dts 2017-11-09 17:52:54.730934000 +0800 +@@ -0,0 +1,207 @@ ++/* ++ * BSD LICENSE ++ * ++ * Copyright(c) 2016 Broadcom Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Broadcom Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++/dts-v1/; ++ ++#include "bcm-greyhound.dtsi" ++ ++/ { ++ model = "Broadcom GH SVK (BCM95341x)"; ++ compatible = "brcm,bcm95341x", "brcm,greyhound"; ++ ++ aliases { ++ serial0 = &uart0; ++ serial1 = &uart1; ++ ethernet0 = &gmac0; ++ }; ++ ++ chosen { ++ bootargs = "console=ttyS0,115200n8 maxcpus=1 mem=496M"; ++ }; ++}; ++ ++&uart0 { ++ status = "okay"; ++}; ++ ++&uart1 { ++ status = "okay"; ++}; ++ ++&gmac0 { ++ status = "okay"; ++}; ++ ++&usbphy0 { ++ status = "okay"; ++}; ++ ++&ehci0 { ++ status = "okay"; ++}; ++ ++&ohci0 { ++ status = "okay"; ++}; ++ ++&usbd { ++ status = "okay"; ++}; ++ ++&gpio_ccg { ++ status = "okay"; ++}; ++ ++&pcie0 { ++ status = "okay"; ++}; ++ ++&i2c0 { ++ status = "okay"; ++ eeprom@0x50 { ++ compatible = "atmel,24c01"; ++ reg = <0x50>; ++ pagesize = <8>; ++ }; ++}; ++ ++&nand { ++ status = "okay"; ++ nandcs@1 { ++ compatible = "brcm,nandcs"; ++ reg = <0>; ++ nand-on-flash-bbt; ++ /*nand-bus-width = <8>;*/ ++ nand-ecc-strength = <24>; ++ nand-ecc-step-size = <1024>; ++ brcm,nand-oob-sector-size = <27>; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++ partition@0 { ++ label = "nboot"; ++ reg = <0x0 0x200000>; ++ /*read-only;*/ ++ }; ++ partition@1 { ++ label = "nenv"; ++ reg = <0x200000 0x400000>; ++ }; ++ partition@2 { ++ label = "nsystem"; ++ reg = <0x600000 0xa00000>; ++ }; ++ partition@3 { ++ label = "nrootfs"; ++ reg = <0x1000000 0xf000000>; ++ }; ++ partition@4 { ++ label = "ncustfs"; ++ reg = <0x10000000 0x30000000>; ++ }; ++ }; ++}; ++ ++&qspi { ++ status = "okay"; ++ flash: m25p80@0 { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "m25p80"; ++ m25p,fast-read = <1>; ++ spi-max-frequency = <62500000>; ++ reg = <0x0>; ++ partition@0 { ++ label = "boot"; ++ reg = <0x00000000 0x000c0000>; ++ /*read-only;*/ ++ }; ++ partition@1 { ++ label = "env"; ++ reg = <0x000c0000 0x00040000>; ++ }; ++ partition@2 { ++ label = "system"; ++ reg = <0x00100000 0x00f00000>; ++ }; ++ partition@3 { ++ label = "rootfs"; ++ reg = <0x01000000 0x03000000>; ++ }; ++ }; ++}; ++ ++&pnor_flash { ++ status = "okay"; ++ ++ partition@0 { ++ label = "pboot"; ++ reg = <0x0 0xc0000>; ++ /*read-only;*/ ++ }; ++ partition@1 { ++ label = "penv"; ++ reg = <0xc0000 0x40000>; ++ }; ++ partition@2 { ++ label = "psystem"; ++ reg = <0x100000 0xf00000>; ++ }; ++ partition@3 { ++ label = "prootfs"; ++ reg = <0x1000000 0x1000000>; ++ }; ++ partition@4 { ++ label = "pcustfs"; ++ reg = <0x2000000 0x2000000>; ++ }; ++}; ++ ++&mdio_int { ++ status = "okay"; ++}; ++ ++&mdio_ext { ++ status = "okay"; ++}; ++ ++&hwrng { ++ status = "okay"; ++}; ++ ++&iproc_wdt { ++ status = "okay"; ++}; ++ ++&iproc_cmicd { ++ status = "okay"; ++}; +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/arch/arm/boot/dts/bcm953444.dts b/arch/arm/boot/dts/bcm953444.dts +--- a/arch/arm/boot/dts/bcm953444.dts 1970-01-01 08:00:00.000000000 +0800 ++++ b/arch/arm/boot/dts/bcm953444.dts 2017-11-09 17:52:54.731931000 +0800 +@@ -0,0 +1,128 @@ ++/* ++ * BSD LICENSE ++ * ++ * Copyright(c) 2016 Broadcom Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Broadcom Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++/dts-v1/; ++ ++#include "bcm-hurricane3.dtsi" ++ ++/ { ++ model = "Broadcom HR3 SVK (BCM953444K)"; ++ compatible = "brcm,bcm953444k", "brcm,hurricane3"; ++ ++ aliases { ++ serial0 = &uart0; ++ serial1 = &uart1; ++ ethernet0 = &gmac0; ++ }; ++ ++ chosen { ++ bootargs = "console=ttyS0,115200n8 maxcpus=1 mem=496M"; ++ }; ++}; ++ ++&uart0 { ++ status = "okay"; ++}; ++ ++&uart1 { ++ status = "okay"; ++}; ++ ++&gmac0 { ++ status = "okay"; ++}; ++ ++ ++&gpio_ccg { ++ status = "okay"; ++}; ++ ++&pcie0 { ++ status = "okay"; ++}; ++ ++&i2c0 { ++ status = "okay"; ++ eeprom@0x50 { ++ compatible = "atmel,24c01"; ++ reg = <0x50>; ++ pagesize = <8>; ++ }; ++}; ++ ++&qspi { ++ status = "okay"; ++ flash: m25p80@0 { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "m25p80"; ++ m25p,fast-read = <1>; ++ spi-max-frequency = <62500000>; ++ reg = <0x0>; ++ partition@0 { ++ label = "boot"; ++ reg = <0x00000000 0x000c0000>; ++ /*read-only;*/ ++ }; ++ partition@1 { ++ label = "env"; ++ reg = <0x000c0000 0x00040000>; ++ }; ++ partition@2 { ++ label = "system"; ++ reg = <0x00100000 0x00f00000>; ++ }; ++ partition@3 { ++ label = "rootfs"; ++ reg = <0x01000000 0x03000000>; ++ }; ++ }; ++}; ++ ++&mdio_int { ++ status = "okay"; ++}; ++ ++&mdio_ext { ++ #bus-id = <1>; /* cmicd mdio needs #logical-bus-id in addition to #bus-id (physical) */ ++ #logical-bus-id = <1>; ++ status = "okay"; ++}; ++ ++&iproc_wdt { ++ status = "okay"; ++}; ++ ++&iproc_cmicd { ++ status = "okay"; ++}; ++ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/arch/arm/boot/dts/bcm953547.dts b/arch/arm/boot/dts/bcm953547.dts +--- a/arch/arm/boot/dts/bcm953547.dts 1970-01-01 08:00:00.000000000 +0800 ++++ b/arch/arm/boot/dts/bcm953547.dts 2017-11-09 17:52:54.740928000 +0800 +@@ -0,0 +1,154 @@ ++/* ++ * BSD LICENSE ++ * ++ * Copyright(c) 2016 Broadcom Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Broadcom Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++/dts-v1/; ++ ++#include "bcm-wolfhound2.dtsi" ++ ++/ { ++ model = "Broadcom HR3-WH2 SVK (BCM953547K)"; ++ compatible = "brcm,bcm953547k", "brcm,hurricane3"; ++ ++ aliases { ++ serial0 = &uart0; ++ serial1 = &uart1; ++ ethernet0 = &gmac0; ++ }; ++ ++ chosen { ++ bootargs = "console=ttyS0,115200n8 maxcpus=1 mem=496M"; ++ }; ++}; ++ ++&uart0 { ++ status = "okay"; ++}; ++ ++&uart1 { ++ status = "okay"; ++}; ++ ++&gmac0 { ++ status = "okay"; ++}; ++ ++&usbphy0 { ++ status = "okay"; ++}; ++ ++&ehci0 { ++ status = "okay"; ++}; ++ ++&ohci0 { ++ status = "okay"; ++}; ++ ++&usbd { ++ status = "okay"; ++}; ++ ++&gpio_ccg { ++ status = "okay"; ++}; ++ ++&pcie0 { ++ status = "okay"; ++}; ++ ++&i2c0 { ++ status = "okay"; ++ eeprom@0x50 { ++ compatible = "atmel,24c01"; ++ reg = <0x50>; ++ pagesize = <8>; ++ }; ++}; ++ ++&qspi { ++ status = "okay"; ++ flash: m25p80@0 { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "m25p80"; ++ m25p,fast-read = <1>; ++ spi-max-frequency = <62500000>; ++ reg = <0x0>; ++ partition@0 { ++ label = "boot"; ++ reg = <0x00000000 0x000c0000>; ++ /*read-only;*/ ++ }; ++ partition@1 { ++ label = "env"; ++ reg = <0x000c0000 0x00040000>; ++ }; ++ partition@2 { ++ label = "system"; ++ reg = <0x00100000 0x00f00000>; ++ }; ++ partition@3 { ++ label = "rootfs"; ++ reg = <0x01000000 0x03000000>; ++ }; ++ }; ++}; ++ ++&mdio_int0 { ++ status = "okay"; ++ amac_phy0: amac_phy@0 { ++ reg = <24>; ++ }; ++}; ++ ++&mdio_int1 { ++ status = "okay"; ++ amac_serdes: amac_serdes@0 { ++ reg = <20>; ++ }; ++}; ++ ++&mdio_int2 { ++ status = "okay"; ++ pcie_phy0: pcie_phy@0 { ++ reg = <2>; ++ }; ++}; ++ ++&iproc_wdt { ++ status = "okay"; ++}; ++ ++&iproc_cmicd { ++ status = "okay"; ++}; ++ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/arch/arm/boot/dts/bcm95357x.dts b/arch/arm/boot/dts/bcm95357x.dts +--- a/arch/arm/boot/dts/bcm95357x.dts 1970-01-01 08:00:00.000000000 +0800 ++++ b/arch/arm/boot/dts/bcm95357x.dts 2017-11-09 17:52:54.744928000 +0800 +@@ -0,0 +1,224 @@ ++/* ++ * BSD LICENSE ++ * ++ * Copyright(c) 2016 Broadcom Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Broadcom Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++/dts-v1/; ++ ++#include "bcm-greyhound2.dtsi" ++ ++/ { ++ model = "Broadcom RG3 SVK (BCM95357x)"; ++ compatible = "brcm,bcm95357x", "brcm,greyhound2"; ++ ++ aliases { ++ serial0 = &uart0; ++ serial1 = &uart1; ++ ethernet0 = &gmac0; ++ ethernet1 = &gmac1; ++ }; ++ ++ chosen { ++ bootargs = "console=ttyS0,115200n8 maxcpus=1 mem=496M"; ++ }; ++}; ++ ++&uart0 { ++ status = "okay"; ++}; ++ ++&uart1 { ++ status = "okay"; ++}; ++ ++&gmac0 { ++ status = "okay"; ++}; ++ ++&gmac1 { ++ status = "okay"; ++}; ++ ++&usbphy0 { ++ status = "okay"; ++}; ++ ++&ehci0 { ++ status = "okay"; ++}; ++ ++&ohci0 { ++ status = "okay"; ++}; ++ ++&usbd { ++ status = "okay"; ++}; ++ ++&gpio_ccg { ++ status = "okay"; ++}; ++ ++&pcie0 { ++ status = "okay"; ++}; ++ ++&i2c0 { ++ status = "okay"; ++ eeprom@0x50 { ++ compatible = "atmel,24c01"; ++ reg = <0x50>; ++ pagesize = <8>; ++ }; ++}; ++ ++&nand { ++ status = "okay"; ++ nandcs@1 { ++ compatible = "brcm,nandcs"; ++ reg = <0>; ++ nand-on-flash-bbt; ++ /*nand-bus-width = <8>;*/ ++ nand-ecc-strength = <24>; ++ nand-ecc-step-size = <1024>; ++ brcm,nand-oob-sector-size = <27>; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++ partition@0 { ++ label = "nboot"; ++ reg = <0x0 0x200000>; ++ /*read-only;*/ ++ }; ++ partition@1 { ++ label = "nenv"; ++ reg = <0x200000 0x400000>; ++ }; ++ partition@2 { ++ label = "nsystem"; ++ reg = <0x600000 0xa00000>; ++ }; ++ partition@3 { ++ label = "nrootfs"; ++ reg = <0x1000000 0xf000000>; ++ }; ++ partition@4 { ++ label = "ncustfs"; ++ reg = <0x10000000 0x30000000>; ++ }; ++ }; ++}; ++ ++&qspi { ++ status = "okay"; ++ flash: m25p80@0 { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "m25p80"; ++ m25p,fast-read = <1>; ++ spi-max-frequency = <62500000>; ++ reg = <0x0>; ++ partition@0 { ++ label = "boot"; ++ reg = <0x00000000 0x000c0000>; ++ /*read-only;*/ ++ }; ++ partition@1 { ++ label = "env"; ++ reg = <0x000c0000 0x00040000>; ++ }; ++ partition@2 { ++ label = "system"; ++ reg = <0x00100000 0x00f00000>; ++ }; ++ partition@3 { ++ label = "rootfs"; ++ reg = <0x01000000 0x03000000>; ++ }; ++ }; ++}; ++ ++&pnor_flash { ++ status = "okay"; ++ ++ partition@0 { ++ label = "pboot"; ++ reg = <0x0 0xc0000>; ++ /*read-only;*/ ++ }; ++ partition@1 { ++ label = "penv"; ++ reg = <0xc0000 0x40000>; ++ }; ++ partition@2 { ++ label = "psystem"; ++ reg = <0x100000 0xf00000>; ++ }; ++ partition@3 { ++ label = "prootfs"; ++ reg = <0x1000000 0x1000000>; ++ }; ++ partition@4 { ++ label = "pcustfs"; ++ reg = <0x2000000 0x2000000>; ++ }; ++}; ++ ++&ccg_mdio_int { ++ status = "okay"; ++}; ++ ++&mdio_int { ++ status = "okay"; ++}; ++ ++&mdio_ext { ++ status = "okay"; ++}; ++ ++&hwrng { ++ status = "okay"; ++}; ++ ++&iproc_wdt { ++ status = "okay"; ++}; ++ ++&dmac0 { ++ status = "okay"; ++}; ++ ++&crypto { ++ status = "okay"; ++}; ++ ++&iproc_cmicd { ++ status = "okay"; ++}; +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/arch/arm/boot/dts/bcm956150.dts b/arch/arm/boot/dts/bcm956150.dts +--- a/arch/arm/boot/dts/bcm956150.dts 1970-01-01 08:00:00.000000000 +0800 ++++ b/arch/arm/boot/dts/bcm956150.dts 2017-11-09 17:52:54.744938000 +0800 +@@ -0,0 +1,180 @@ ++/* ++ * BSD LICENSE ++ * ++ * Copyright(c) 2016 Broadcom Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Broadcom Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++/dts-v1/; ++ ++#include "bcm-hurricane2.dtsi" ++ ++/ { ++ model = "Broadcom HR2 SVK (BCM956150K)"; ++ compatible = "brcm,bcm956150k", "brcm,hurricane2"; ++ ++ aliases { ++ serial0 = &uart1; ++ serial1 = &uart0; ++ ethernet0 = &gmac0; ++ }; ++ ++ chosen { ++ bootargs = "console=ttyS0,115200n8 maxcpus=1 mem=496M"; ++ }; ++}; ++ ++&uart0 { ++ status = "okay"; ++}; ++ ++&uart1 { ++ status = "okay"; ++}; ++ ++&gmac0 { ++ status = "okay"; ++}; ++ ++&gpio_cca { ++ status = "okay"; ++}; ++ ++&pcie0 { ++ status = "okay"; ++}; ++ ++&nand { ++ status = "okay"; ++ nandcs@1 { ++ compatible = "brcm,nandcs"; ++ reg = <0>; ++ nand-on-flash-bbt; ++ /*nand-bus-width = <8>;*/ ++ nand-ecc-strength = <24>; ++ nand-ecc-step-size = <1024>; ++ brcm,nand-oob-sector-size = <27>; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++ partition@0 { ++ label = "nboot"; ++ reg = <0x0 0x200000>; ++ /*read-only;*/ ++ }; ++ partition@1 { ++ label = "nenv"; ++ reg = <0x200000 0x400000>; ++ }; ++ partition@2 { ++ label = "nsystem"; ++ reg = <0x600000 0xa00000>; ++ }; ++ partition@3 { ++ label = "nrootfs"; ++ reg = <0x1000000 0xf000000>; ++ }; ++ partition@4 { ++ label = "ncustfs"; ++ reg = <0x10000000 0x70000000>; ++ }; ++ }; ++}; ++ ++&qspi { ++ status = "okay"; ++ flash: m25p80@0 { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "m25p80"; ++ m25p,fast-read = <1>; ++ spi-max-frequency = <62500000>; ++ reg = <0x0>; ++ partition@0 { ++ label = "boot"; ++ reg = <0x00000000 0x000c0000>; ++ /*read-only;*/ ++ }; ++ partition@1 { ++ label = "env"; ++ reg = <0x000c0000 0x00040000>; ++ }; ++ partition@2 { ++ label = "system"; ++ reg = <0x00100000 0x00f00000>; ++ }; ++ partition@3 { ++ label = "rootfs"; ++ reg = <0x01000000 0x01000000>; ++ }; ++ }; ++}; ++ ++&pnor_flash { ++ status = "okay"; ++ ++ partition@0 { ++ label = "pboot"; ++ reg = <0x0 0xc0000>; ++ /*read-only;*/ ++ }; ++ partition@1 { ++ label = "penv"; ++ reg = <0xc0000 0x40000>; ++ }; ++ partition@2 { ++ label = "psystem"; ++ reg = <0x100000 0xf00000>; ++ }; ++ partition@3 { ++ label = "prootfs"; ++ reg = <0x1000000 0x1000000>; ++ }; ++ partition@4 { ++ label = "pcustfs"; ++ reg = <0x2000000 0x2000000>; ++ }; ++}; ++ ++&mdio_int { ++ status = "okay"; ++}; ++ ++&mdio_ext { ++ status = "okay"; ++}; ++ ++ ++&iproc_wdt { ++ status = "okay"; ++}; ++ ++&iproc_cmicd { ++ status = "okay"; ++}; ++ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/arch/arm/boot/dts/bcm956160.dts b/arch/arm/boot/dts/bcm956160.dts +--- a/arch/arm/boot/dts/bcm956160.dts 1970-01-01 08:00:00.000000000 +0800 ++++ b/arch/arm/boot/dts/bcm956160.dts 2017-11-09 17:52:54.745938000 +0800 +@@ -0,0 +1,212 @@ ++/* ++ * BSD LICENSE ++ * ++ * Copyright(c) 2016 Broadcom Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Broadcom Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++/dts-v1/; ++ ++#include "bcm-hurricane3.dtsi" ++ ++/ { ++ model = "Broadcom HR3 SVK (BCM956160K)"; ++ compatible = "brcm,bcm956160k", "brcm,hurricane3"; ++ ++ aliases { ++ serial0 = &uart0; ++ serial1 = &uart1; ++ ethernet0 = &gmac0; ++ }; ++ ++ chosen { ++ bootargs = "console=ttyS0,115200n8 maxcpus=1 mem=496M"; ++ }; ++}; ++ ++&uart0 { ++ status = "okay"; ++}; ++ ++&uart1 { ++ status = "okay"; ++}; ++ ++&gmac0 { ++ status = "okay"; ++}; ++ ++&usbphy0 { ++ status = "okay"; ++}; ++ ++&ehci0 { ++ status = "okay"; ++}; ++ ++&ohci0 { ++ status = "okay"; ++}; ++ ++&usbd { ++ status = "okay"; ++}; ++ ++&sdio { ++ status = "okay"; ++}; ++ ++&gpio_ccg { ++ status = "okay"; ++}; ++ ++&pcie0 { ++ status = "okay"; ++}; ++ ++&i2c0 { ++ status = "okay"; ++ eeprom@0x50 { ++ compatible = "atmel,24c01"; ++ reg = <0x50>; ++ pagesize = <8>; ++ }; ++}; ++ ++&nand { ++ status = "okay"; ++ nandcs@1 { ++ compatible = "brcm,nandcs"; ++ reg = <0>; ++ nand-on-flash-bbt; ++ /*nand-bus-width = <8>;*/ ++ nand-ecc-strength = <24>; ++ nand-ecc-step-size = <1024>; ++ brcm,nand-oob-sector-size = <27>; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++ partition@0 { ++ label = "nboot"; ++ reg = <0x0 0x200000>; ++ /*read-only;*/ ++ }; ++ partition@1 { ++ label = "nenv"; ++ reg = <0x200000 0x400000>; ++ }; ++ partition@2 { ++ label = "nsystem"; ++ reg = <0x600000 0xa00000>; ++ }; ++ partition@3 { ++ label = "nrootfs"; ++ reg = <0x1000000 0xf000000>; ++ }; ++ partition@4 { ++ label = "ncustfs"; ++ reg = <0x10000000 0x30000000>; ++ }; ++ }; ++}; ++ ++&qspi { ++ status = "okay"; ++ flash: m25p80@0 { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "m25p80"; ++ m25p,fast-read = <1>; ++ spi-max-frequency = <62500000>; ++ reg = <0x0>; ++ partition@0 { ++ label = "boot"; ++ reg = <0x00000000 0x000c0000>; ++ /*read-only;*/ ++ }; ++ partition@1 { ++ label = "env"; ++ reg = <0x000c0000 0x00040000>; ++ }; ++ partition@2 { ++ label = "system"; ++ reg = <0x00100000 0x00f00000>; ++ }; ++ partition@3 { ++ label = "rootfs"; ++ reg = <0x01000000 0x03000000>; ++ }; ++ }; ++}; ++ ++&pnor_flash { ++ status = "okay"; ++ ++ partition@0 { ++ label = "pboot"; ++ reg = <0x0 0xc0000>; ++ /*read-only;*/ ++ }; ++ partition@1 { ++ label = "penv"; ++ reg = <0xc0000 0x40000>; ++ }; ++ partition@2 { ++ label = "psystem"; ++ reg = <0x100000 0xf00000>; ++ }; ++ partition@3 { ++ label = "prootfs"; ++ reg = <0x1000000 0x1000000>; ++ }; ++ partition@4 { ++ label = "pcustfs"; ++ reg = <0x2000000 0x2000000>; ++ }; ++}; ++ ++&mdio_int { ++ status = "okay"; ++}; ++ ++&mdio_ext { ++ status = "okay"; ++}; ++ ++&hwrng { ++ status = "okay"; ++}; ++ ++&iproc_wdt { ++ status = "okay"; ++}; ++ ++&iproc_cmicd { ++ status = "okay"; ++}; ++ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/arch/arm/boot/dts/bcm956170.dts b/arch/arm/boot/dts/bcm956170.dts +--- a/arch/arm/boot/dts/bcm956170.dts 1970-01-01 08:00:00.000000000 +0800 ++++ b/arch/arm/boot/dts/bcm956170.dts 2017-11-09 17:52:54.746936000 +0800 +@@ -0,0 +1,239 @@ ++/* ++ * BSD LICENSE ++ * ++ * Copyright(c) 2016 Broadcom Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Broadcom Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++/dts-v1/; ++ ++#include "bcm-greyhound2.dtsi" ++ ++/ { ++ model = "Broadcom GH2 SVK (BCM956170)"; ++ compatible = "brcm,bcm956170", "brcm,greyhound2"; ++ ++ aliases { ++ serial0 = &uart0; ++ serial1 = &uart1; ++ ethernet0 = &gmac0; ++ ethernet1 = &gmac1; ++ }; ++ ++ chosen { ++ bootargs = "console=ttyS0,115200n8 maxcpus=1 mem=496M"; ++ }; ++}; ++ ++&uart0 { ++ status = "okay"; ++}; ++ ++&uart1 { ++ status = "okay"; ++}; ++ ++&gmac0 { ++ status = "okay"; ++}; ++ ++&gmac1 { ++ status = "okay"; ++}; ++ ++&usbphy0 { ++ status = "okay"; ++}; ++ ++&ehci0 { ++ status = "okay"; ++}; ++ ++&ohci0 { ++ status = "okay"; ++}; ++ ++&usbd { ++ status = "okay"; ++}; ++ ++&gpio_ccg { ++ status = "okay"; ++}; ++ ++&pcie0 { ++ status = "okay"; ++}; ++ ++&i2c0 { ++ status = "okay"; ++ eeprom@0x50 { ++ compatible = "atmel,24c01"; ++ reg = <0x50>; ++ pagesize = <8>; ++ }; ++}; ++ ++&nand { ++ status = "okay"; ++ nandcs@1 { ++ compatible = "brcm,nandcs"; ++ reg = <0>; ++ nand-on-flash-bbt; ++ /*nand-bus-width = <8>;*/ ++ nand-ecc-strength = <24>; ++ nand-ecc-step-size = <1024>; ++ brcm,nand-oob-sector-size = <27>; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++ partition@0 { ++ label = "nboot"; ++ reg = <0x0 0x200000>; ++ /*read-only;*/ ++ }; ++ partition@1 { ++ label = "nenv"; ++ reg = <0x200000 0x400000>; ++ }; ++ partition@2 { ++ label = "nsystem"; ++ reg = <0x600000 0xa00000>; ++ }; ++ partition@3 { ++ label = "nrootfs"; ++ reg = <0x1000000 0xf000000>; ++ }; ++ partition@4 { ++ label = "ncustfs"; ++ reg = <0x10000000 0x30000000>; ++ }; ++ }; ++}; ++ ++&qspi { ++ status = "okay"; ++ flash: m25p80@0 { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "m25p80"; ++ m25p,fast-read = <1>; ++ spi-max-frequency = <62500000>; ++ reg = <0x0>; ++ partition@0 { ++ label = "boot"; ++ reg = <0x00000000 0x000c0000>; ++ /*read-only;*/ ++ }; ++ partition@1 { ++ label = "env"; ++ reg = <0x000c0000 0x00040000>; ++ }; ++ partition@2 { ++ label = "system"; ++ reg = <0x00100000 0x00f00000>; ++ }; ++ partition@3 { ++ label = "rootfs"; ++ reg = <0x01000000 0x03000000>; ++ }; ++ }; ++}; ++ ++&pnor_flash { ++ status = "okay"; ++ ++ partition@0 { ++ label = "pboot"; ++ reg = <0x0 0xc0000>; ++ /*read-only;*/ ++ }; ++ partition@1 { ++ label = "penv"; ++ reg = <0xc0000 0x40000>; ++ }; ++ partition@2 { ++ label = "psystem"; ++ reg = <0x100000 0xf00000>; ++ }; ++ partition@3 { ++ label = "prootfs"; ++ reg = <0x1000000 0x1000000>; ++ }; ++ partition@4 { ++ label = "pcustfs"; ++ reg = <0x2000000 0x2000000>; ++ }; ++}; ++ ++&ccg_mdio_int { ++ status = "okay"; ++ pcie_phy0: pcie_phy@0 { ++ reg = <2>; ++ }; ++}; ++ ++&mdio_int { ++ status = "okay"; ++ amac_serdes0: amac_serdes@0 { ++ reg = <25>; ++ }; ++ amac_serdes1: amac_serdes@1 { ++ reg = <26>; ++ }; ++}; ++ ++&mdio_ext { ++ status = "okay"; ++ amac_phy0: amac_phy@0 { ++ reg = <16>; ++ }; ++ amac_phy1: amac_phy@1 { ++ reg = <17>; ++ }; ++}; ++ ++&hwrng { ++ status = "okay"; ++}; ++ ++&iproc_wdt { ++ status = "okay"; ++}; ++ ++&dmac0 { ++ status = "okay"; ++}; ++ ++&crypto { ++ status = "okay"; ++}; ++ ++&iproc_cmicd { ++ status = "okay"; ++}; +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/arch/arm/boot/dts/bcm956260.dts b/arch/arm/boot/dts/bcm956260.dts +--- a/arch/arm/boot/dts/bcm956260.dts 1970-01-01 08:00:00.000000000 +0800 ++++ b/arch/arm/boot/dts/bcm956260.dts 2017-11-09 17:52:54.747931000 +0800 +@@ -0,0 +1,182 @@ ++/* ++ * BSD LICENSE ++ * ++ * Copyright(c) 2016 Broadcom Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Broadcom Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++/dts-v1/; ++ ++#include "bcm-saber2.dtsi" ++ ++/ { ++ model = "Broadcom SB2 SVK (BCM956260K)"; ++ compatible = "brcm,bcm956260k", "brcm,saber2"; ++ ++ aliases { ++ serial0 = &uart0; ++ serial1 = &uart1; ++ ethernet0 = &gmac0; ++ }; ++ ++ chosen { ++ bootargs = "console=ttyS0,115200n8 maxcpus=1 mem=496M"; ++ }; ++}; ++ ++&uart0 { ++ status = "okay"; ++}; ++ ++&uart1 { ++ status = "okay"; ++}; ++ ++&gmac0 { ++ status = "okay"; ++}; ++ ++&usbphy0 { ++ status = "okay"; ++}; ++ ++&ehci0 { ++ status = "okay"; ++}; ++ ++&ohci0 { ++ status = "okay"; ++}; ++ ++&usbd { ++ status = "okay"; ++}; ++ ++&gpio_ccg { ++ status = "okay"; ++}; ++ ++&pcie0 { ++ status = "okay"; ++}; ++ ++&i2c0 { ++ status = "okay"; ++ eeprom@0x50 { ++ compatible = "atmel,24c64"; ++ reg = <0x50>; ++ pagesize = <32>; ++ }; ++}; ++ ++&nand { ++ status = "okay"; ++ nandcs@1 { ++ compatible = "brcm,nandcs"; ++ reg = <0>; ++ nand-on-flash-bbt; ++ /*nand-bus-width = <8>;*/ ++ nand-ecc-strength = <24>; ++ nand-ecc-step-size = <1024>; ++ brcm,nand-oob-sector-size = <27>; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++ partition@0 { ++ label = "nboot"; ++ reg = <0x0 0x200000>; ++ /*read-only;*/ ++ }; ++ partition@1 { ++ label = "nenv"; ++ reg = <0x200000 0x400000>; ++ }; ++ partition@2 { ++ label = "nsystem"; ++ reg = <0x600000 0xa00000>; ++ }; ++ partition@3 { ++ label = "nrootfs"; ++ reg = <0x1000000 0xf000000>; ++ }; ++ partition@4 { ++ label = "ncustfs"; ++ reg = <0x10000000 0x70000000>; ++ }; ++ }; ++}; ++ ++&qspi { ++ status = "okay"; ++ flash: m25p80@0 { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "m25p80"; ++ m25p,fast-read = <1>; ++ spi-max-frequency = <62500000>; ++ reg = <0x0>; ++ partition@0 { ++ label = "boot"; ++ reg = <0x00000000 0x000c0000>; ++ /*read-only;*/ ++ }; ++ partition@1 { ++ label = "env"; ++ reg = <0x000c0000 0x00040000>; ++ }; ++ partition@2 { ++ label = "system"; ++ reg = <0x00100000 0x00f00000>; ++ }; ++ partition@3 { ++ label = "rootfs"; ++ reg = <0x01000000 0x01000000>; ++ }; ++ }; ++}; ++ ++ ++&mdio_int { ++ status = "okay"; ++}; ++ ++&mdio_ext { ++ status = "okay"; ++}; ++ ++&hwrng { ++ status = "okay"; ++}; ++ ++&iproc_wdt { ++ status = "okay"; ++}; ++ ++&iproc_cmicd { ++ status = "okay"; ++}; +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/arch/arm/boot/dts/bcm956340.dts b/arch/arm/boot/dts/bcm956340.dts +--- a/arch/arm/boot/dts/bcm956340.dts 1970-01-01 08:00:00.000000000 +0800 ++++ b/arch/arm/boot/dts/bcm956340.dts 2017-11-09 17:52:54.748928000 +0800 +@@ -0,0 +1,196 @@ ++/* ++ * BSD LICENSE ++ * ++ * Copyright(c) 2016 Broadcom Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Broadcom Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++/dts-v1/; ++ ++#include "bcm-helix4.dtsi" ++ ++/ { ++ model = "Broadcom HX4 SVK (BCM956340K)"; ++ compatible = "brcm,bcm956340k", "brcm,helix4"; ++ ++ aliases { ++ serial0 = &uart0; ++ serial1 = &uart1; ++ ethernet0 = &gmac0; ++ ethernet1 = &gmac1; ++ }; ++ ++ chosen { ++ bootargs = "console=ttyS0,115200n8 maxcpus=2 mem=496M"; ++ }; ++}; ++ ++&uart0 { ++ status = "okay"; ++}; ++ ++&uart1 { ++ status = "okay"; ++}; ++ ++&gmac0 { ++ status = "okay"; ++}; ++ ++&gmac1 { ++ status = "okay"; ++}; ++ ++&usbphy0 { ++ status = "okay"; ++}; ++ ++&ehci0 { ++ status = "okay"; ++}; ++ ++&usbd { ++ status = "okay"; ++}; ++ ++&gpio_cca { ++ status = "okay"; ++}; ++ ++&pcie0 { ++ status = "okay"; ++}; ++ ++&pcie1 { ++ status = "okay"; ++}; ++ ++&i2c0 { ++ status = "okay"; ++ eeprom@0x50 { ++ compatible = "atmel,24c01"; ++ reg = <0x50>; ++ pagesize = <8>; ++ }; ++}; ++ ++&i2c1 { ++ status = "okay"; ++}; ++ ++&nand { ++ status = "okay"; ++ nandcs@1 { ++ compatible = "brcm,nandcs"; ++ reg = <0>; ++ nand-on-flash-bbt; ++ /*nand-bus-width = <8>;*/ ++ nand-ecc-strength = <24>; ++ nand-ecc-step-size = <1024>; ++ brcm,nand-oob-sector-size = <27>; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++ partition@0 { ++ label = "nboot"; ++ reg = <0x0 0x200000>; ++ /*read-only;*/ ++ }; ++ partition@1 { ++ label = "nenv"; ++ reg = <0x200000 0x400000>; ++ }; ++ partition@2 { ++ label = "nsystem"; ++ reg = <0x600000 0xa00000>; ++ }; ++ partition@3 { ++ label = "nrootfs"; ++ reg = <0x1000000 0xf000000>; ++ }; ++ partition@4 { ++ label = "ncustfs"; ++ reg = <0x10000000 0x70000000>; ++ }; ++ }; ++}; ++ ++&qspi { ++ status = "okay"; ++ flash: m25p80@0 { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "m25p80"; ++ m25p,fast-read = <1>; ++ spi-max-frequency = <62500000>; ++ reg = <0x0>; ++ partition@0 { ++ label = "boot"; ++ reg = <0x00000000 0x000c0000>; ++ /*read-only;*/ ++ }; ++ partition@1 { ++ label = "env"; ++ reg = <0x000c0000 0x00040000>; ++ }; ++ partition@2 { ++ label = "system"; ++ reg = <0x00100000 0x00f00000>; ++ }; ++ partition@3 { ++ label = "rootfs"; ++ reg = <0x01000000 0x01000000>; ++ }; ++ }; ++}; ++ ++ ++&mdio_int { ++ status = "okay"; ++}; ++ ++&mdio_ext { ++ status = "okay"; ++}; ++ ++&hwrng { ++ status = "okay"; ++}; ++ ++&iproc_wdt { ++ status = "okay"; ++}; ++ ++&dmac0 { ++ status = "okay"; ++}; ++ ++&iproc_cmicd { ++ status = "okay"; ++}; ++ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/arch/arm/boot/dts/bcm956450.dts b/arch/arm/boot/dts/bcm956450.dts +--- a/arch/arm/boot/dts/bcm956450.dts 1970-01-01 08:00:00.000000000 +0800 ++++ b/arch/arm/boot/dts/bcm956450.dts 2017-11-09 17:52:54.748939000 +0800 +@@ -0,0 +1,195 @@ ++/* ++ * BSD LICENSE ++ * ++ * Copyright(c) 2016 Broadcom Corporation. All rights reserved. ++ * ++ * Redistribution and use in source and binary forms, with or without ++ * modification, are permitted provided that the following conditions ++ * are met: ++ * ++ * * Redistributions of source code must retain the above copyright ++ * notice, this list of conditions and the following disclaimer. ++ * * Redistributions in binary form must reproduce the above copyright ++ * notice, this list of conditions and the following disclaimer in ++ * the documentation and/or other materials provided with the ++ * distribution. ++ * * Neither the name of Broadcom Corporation nor the names of its ++ * contributors may be used to endorse or promote products derived ++ * from this software without specific prior written permission. ++ * ++ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS ++ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT ++ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR ++ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT ++ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, ++ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT ++ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, ++ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY ++ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT ++ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE ++ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. ++ */ ++ ++/dts-v1/; ++ ++#include "bcm-katana2.dtsi" ++ ++/ { ++ model = "Broadcom KT2 SVK (BCM956450K)"; ++ compatible = "brcm,bcm956450k", "brcm,katana2"; ++ ++ aliases { ++ serial0 = &uart0; ++ serial1 = &uart1; ++ ethernet0 = &gmac0; ++ ethernet1 = &gmac1; ++ }; ++ ++ chosen { ++ bootargs = "console=ttyS0,115200n8 maxcpus=2 mem=496M"; ++ }; ++}; ++ ++&uart0 { ++ status = "okay"; ++}; ++ ++&uart1 { ++ status = "okay"; ++}; ++ ++&gmac0 { ++ status = "okay"; ++}; ++ ++&gmac1 { ++ status = "okay"; ++}; ++ ++&usbphy0 { ++ status = "okay"; ++}; ++ ++&ehci0 { ++ status = "okay"; ++}; ++ ++&usbd { ++ status = "okay"; ++}; ++ ++&gpio_cca { ++ status = "okay"; ++}; ++ ++&pcie0 { ++ status = "okay"; ++}; ++ ++&pcie1 { ++ status = "okay"; ++}; ++ ++&i2c0 { ++ status = "okay"; ++ eeprom@0x50 { ++ compatible = "atmel,24c01"; ++ reg = <0x50>; ++ pagesize = <8>; ++ }; ++}; ++ ++&i2c1 { ++ status = "okay"; ++}; ++ ++&nand { ++ status = "okay"; ++ nandcs@1 { ++ compatible = "brcm,nandcs"; ++ reg = <0>; ++ nand-on-flash-bbt; ++ /*nand-bus-width = <8>;*/ ++ nand-ecc-strength = <24>; ++ nand-ecc-step-size = <1024>; ++ brcm,nand-oob-sector-size = <27>; ++ #address-cells = <1>; ++ #size-cells = <1>; ++ ++ partition@0 { ++ label = "nboot"; ++ reg = <0x0 0x200000>; ++ /*read-only;*/ ++ }; ++ partition@1 { ++ label = "nenv"; ++ reg = <0x200000 0x400000>; ++ }; ++ partition@2 { ++ label = "nsystem"; ++ reg = <0x600000 0xa00000>; ++ }; ++ partition@3 { ++ label = "nrootfs"; ++ reg = <0x1000000 0xf000000>; ++ }; ++ partition@4 { ++ label = "ncustfs"; ++ reg = <0x10000000 0x70000000>; ++ }; ++ }; ++}; ++ ++&qspi { ++ status = "okay"; ++ flash: m25p80@0 { ++ #address-cells = <1>; ++ #size-cells = <1>; ++ compatible = "m25p80"; ++ m25p,fast-read = <1>; ++ spi-max-frequency = <62500000>; ++ reg = <0x0>; ++ partition@0 { ++ label = "boot"; ++ reg = <0x00000000 0x000c0000>; ++ /*read-only;*/ ++ }; ++ partition@1 { ++ label = "env"; ++ reg = <0x000c0000 0x00040000>; ++ }; ++ partition@2 { ++ label = "system"; ++ reg = <0x00100000 0x00f00000>; ++ }; ++ partition@3 { ++ label = "rootfs"; ++ reg = <0x01000000 0x01000000>; ++ }; ++ }; ++}; ++ ++ ++&mdio_int { ++ status = "okay"; ++}; ++ ++&mdio_ext { ++ status = "okay"; ++}; ++ ++&hwrng { ++ status = "okay"; ++}; ++ ++&iproc_wdt { ++ status = "okay"; ++}; ++ ++&dmac0 { ++ status = "okay"; ++}; ++ ++&iproc_cmicd { ++ status = "okay"; ++}; +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/arch/arm/boot/dts/greyhound.its b/arch/arm/boot/dts/greyhound.its +--- a/arch/arm/boot/dts/greyhound.its 1970-01-01 08:00:00.000000000 +0800 ++++ b/arch/arm/boot/dts/greyhound.its 2017-11-09 17:52:54.939941000 +0800 +@@ -0,0 +1,62 @@ ++/dts-v1/; ++ ++/ { ++ description = "Linux kernel and FDT blob"; ++ #address-cells = <1>; ++ ++ images { ++ kernel@1 { ++ description = "Broadcom iProc Linux"; ++ data = /incbin/("../zImage"); ++ type = "kernel"; ++ arch = "arm"; ++ os = "linux"; ++ compression = "none"; ++ load = <0x61008000>; ++ entry = <0x61008000>; ++ hash@1 { ++ algo = "crc32"; ++ }; ++ }; ++ ++ fdt@1 { ++ description = "Flattened Device Tree blob - bcm95341x.dtb"; ++ data = /incbin/("./bcm95341x.dtb"); ++ type = "flat_dt"; ++ arch = "arm"; ++ compression = "none"; ++ hash@1 { ++ algo = "crc32"; ++ }; ++ }; ++/* ++ fdt@2 { ++ description = "Flattened Device Tree blob - bcm95606x.dtb"; ++ data = /incbin/("./bcm95606x.dtb"); ++ type = "flat_dt"; ++ arch = "arm"; ++ compression = "none"; ++ hash@1 { ++ algo = "md5"; ++ }; ++ }; ++*/ ++ }; ++ ++ configurations { ++ default = "conf@1"; ++ conf@1 { ++ description = "Boot Linux kernel with FDT blob "; ++ kernel = "kernel@1"; ++ fdt = "fdt@1"; ++ }; ++/* ++ conf@2 { ++ description = "Boot Linux kernel with FDT blob"; ++ kernel = "kernel@1"; ++ fdt = "fdt@2"; ++ }; ++*/ ++ }; ++}; ++ +\ No newline at end of file +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/arch/arm/boot/dts/greyhound2.its b/arch/arm/boot/dts/greyhound2.its +--- a/arch/arm/boot/dts/greyhound2.its 1970-01-01 08:00:00.000000000 +0800 ++++ b/arch/arm/boot/dts/greyhound2.its 2017-11-09 17:52:54.945934000 +0800 +@@ -0,0 +1,62 @@ ++/dts-v1/; ++ ++/ { ++ description = "Linux kernel and FDT blob"; ++ #address-cells = <1>; ++ ++ images { ++ kernel@1 { ++ description = "Broadcom iProc Linux"; ++ data = /incbin/("../zImage"); ++ type = "kernel"; ++ arch = "arm"; ++ os = "linux"; ++ compression = "none"; ++ load = <0x61008000>; ++ entry = <0x61008000>; ++ hash@1 { ++ algo = "crc32"; ++ }; ++ }; ++ ++ fdt@1 { ++ description = "Flattened Device Tree blob - bcm956170.dtb"; ++ data = /incbin/("./bcm956170.dtb"); ++ type = "flat_dt"; ++ arch = "arm"; ++ compression = "none"; ++ hash@1 { ++ algo = "crc32"; ++ }; ++ }; ++ ++ fdt@2 { ++ description = "Flattened Device Tree blob - bcm95357x.dtb"; ++ data = /incbin/("./bcm95357x.dtb"); ++ type = "flat_dt"; ++ arch = "arm"; ++ compression = "none"; ++ hash@1 { ++ algo = "crc32"; ++ }; ++ }; ++ ++ }; ++ ++ configurations { ++ default = "conf@1"; ++ conf@1 { ++ description = "Boot Linux kernel with FDT blob "; ++ kernel = "kernel@1"; ++ fdt = "fdt@1"; ++ }; ++/* ++ conf@2 { ++ description = "Boot Linux kernel with FDT blob"; ++ kernel = "kernel@1"; ++ fdt = "fdt@2"; ++ }; ++*/ ++ }; ++}; ++ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/arch/arm/boot/dts/helix4.its b/arch/arm/boot/dts/helix4.its +--- a/arch/arm/boot/dts/helix4.its 1970-01-01 08:00:00.000000000 +0800 ++++ b/arch/arm/boot/dts/helix4.its 2017-11-09 17:52:54.946939000 +0800 +@@ -0,0 +1,43 @@ ++/dts-v1/; ++ ++/ { ++ description = "Linux kernel and FDT blob"; ++ #address-cells = <1>; ++ ++ images { ++ kernel@1 { ++ description = "Broadcom iProc Linux"; ++ data = /incbin/("../zImage"); ++ type = "kernel"; ++ arch = "arm"; ++ os = "linux"; ++ compression = "none"; ++ load = <0x61008000>; ++ entry = <0x61008000>; ++ hash@1 { ++ algo = "crc32"; ++ }; ++ }; ++ ++ fdt@1 { ++ description = "Flattened Device Tree blob - bcm956340.dtb"; ++ data = /incbin/("./bcm956340.dtb"); ++ type = "flat_dt"; ++ arch = "arm"; ++ compression = "none"; ++ hash@1 { ++ algo = "crc32"; ++ }; ++ }; ++ }; ++ ++ configurations { ++ default = "conf@1"; ++ conf@1 { ++ description = "Boot Linux kernel with FDT blob "; ++ kernel = "kernel@1"; ++ fdt = "fdt@1"; ++ }; ++ }; ++}; ++ +\ No newline at end of file +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/arch/arm/boot/dts/hurricane2.its b/arch/arm/boot/dts/hurricane2.its +--- a/arch/arm/boot/dts/hurricane2.its 1970-01-01 08:00:00.000000000 +0800 ++++ b/arch/arm/boot/dts/hurricane2.its 2017-11-09 17:52:54.966932000 +0800 +@@ -0,0 +1,43 @@ ++/dts-v1/; ++ ++/ { ++ description = "Linux kernel and FDT blob"; ++ #address-cells = <1>; ++ ++ images { ++ kernel@1 { ++ description = "Broadcom iProc Linux"; ++ data = /incbin/("../zImage"); ++ type = "kernel"; ++ arch = "arm"; ++ os = "linux"; ++ compression = "none"; ++ load = <0x81008000>; ++ entry = <0x81008000>; ++ hash@1 { ++ algo = "crc32"; ++ }; ++ }; ++ ++ fdt@1 { ++ description = "Flattened Device Tree blob - bcm956150.dtb"; ++ data = /incbin/("./bcm956150.dtb"); ++ type = "flat_dt"; ++ arch = "arm"; ++ compression = "none"; ++ hash@1 { ++ algo = "crc32"; ++ }; ++ }; ++ }; ++ ++ configurations { ++ default = "conf@1"; ++ conf@1 { ++ description = "Boot Linux kernel with FDT blob "; ++ kernel = "kernel@1"; ++ fdt = "fdt@1"; ++ }; ++ }; ++}; ++ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/arch/arm/boot/dts/hurricane3.its b/arch/arm/boot/dts/hurricane3.its +--- a/arch/arm/boot/dts/hurricane3.its 1970-01-01 08:00:00.000000000 +0800 ++++ b/arch/arm/boot/dts/hurricane3.its 2017-11-09 17:52:54.966943000 +0800 +@@ -0,0 +1,60 @@ ++/dts-v1/; ++ ++/ { ++ description = "Linux kernel and FDT blob"; ++ #address-cells = <1>; ++ ++ images { ++ kernel@1 { ++ description = "Broadcom iProc Linux"; ++ data = /incbin/("../zImage"); ++ type = "kernel"; ++ arch = "arm"; ++ os = "linux"; ++ compression = "none"; ++ load = <0x61008000>; ++ entry = <0x61008000>; ++ hash@1 { ++ algo = "crc32"; ++ }; ++ }; ++ ++ fdt@1 { ++ description = "Flattened Device Tree blob - bcm956160.dtb"; ++ data = /incbin/("./bcm956160.dtb"); ++ type = "flat_dt"; ++ arch = "arm"; ++ compression = "none"; ++ hash@1 { ++ algo = "crc32"; ++ }; ++ }; ++ ++ fdt@2 { ++ description = "Flattened Device Tree blob - bcm953444.dtb"; ++ data = /incbin/("./bcm953444.dtb"); ++ type = "flat_dt"; ++ arch = "arm"; ++ compression = "none"; ++ hash@1 { ++ algo = "crc32"; ++ }; ++ }; ++ }; ++ ++ configurations { ++ default = "conf@1"; ++ conf@1 { ++ description = "Boot Linux kernel with FDT blob 1"; ++ kernel = "kernel@1"; ++ fdt = "fdt@1"; ++ }; ++ ++ conf@2 { ++ description = "Boot Linux kernel with FDT blob 2"; ++ kernel = "kernel@1"; ++ fdt = "fdt@2"; ++ }; ++ }; ++}; ++ +\ No newline at end of file +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/arch/arm/boot/dts/katana2.its b/arch/arm/boot/dts/katana2.its +--- a/arch/arm/boot/dts/katana2.its 1970-01-01 08:00:00.000000000 +0800 ++++ b/arch/arm/boot/dts/katana2.its 2017-11-09 17:52:55.281942000 +0800 +@@ -0,0 +1,43 @@ ++/dts-v1/; ++ ++/ { ++ description = "Linux kernel and FDT blob"; ++ #address-cells = <1>; ++ ++ images { ++ kernel@1 { ++ description = "Broadcom iProc Linux"; ++ data = /incbin/("../zImage"); ++ type = "kernel"; ++ arch = "arm"; ++ os = "linux"; ++ compression = "none"; ++ load = <0x61008000>; ++ entry = <0x61008000>; ++ hash@1 { ++ algo = "crc32"; ++ }; ++ }; ++ ++ fdt@1 { ++ description = "Flattened Device Tree blob - bcm956450.dtb"; ++ data = /incbin/("./bcm956450.dtb"); ++ type = "flat_dt"; ++ arch = "arm"; ++ compression = "none"; ++ hash@1 { ++ algo = "crc32"; ++ }; ++ }; ++ }; ++ ++ configurations { ++ default = "conf@1"; ++ conf@1 { ++ description = "Boot Linux kernel with FDT blob "; ++ kernel = "kernel@1"; ++ fdt = "fdt@1"; ++ }; ++ }; ++}; ++ +\ No newline at end of file +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/arch/arm/boot/dts/saber2.its b/arch/arm/boot/dts/saber2.its +--- a/arch/arm/boot/dts/saber2.its 1970-01-01 08:00:00.000000000 +0800 ++++ b/arch/arm/boot/dts/saber2.its 2017-11-09 17:52:55.880949000 +0800 +@@ -0,0 +1,43 @@ ++/dts-v1/; ++ ++/ { ++ description = "Linux kernel and FDT blob"; ++ #address-cells = <1>; ++ ++ images { ++ kernel@1 { ++ description = "Broadcom iProc Linux"; ++ data = /incbin/("../zImage"); ++ type = "kernel"; ++ arch = "arm"; ++ os = "linux"; ++ compression = "none"; ++ load = <0x61008000>; ++ entry = <0x61008000>; ++ hash@1 { ++ algo = "crc32"; ++ }; ++ }; ++ ++ fdt@1 { ++ description = "Flattened Device Tree blob - bcm956260.dtb"; ++ data = /incbin/("./bcm956260.dtb"); ++ type = "flat_dt"; ++ arch = "arm"; ++ compression = "none"; ++ hash@1 { ++ algo = "crc32"; ++ }; ++ }; ++ }; ++ ++ configurations { ++ default = "conf@1"; ++ conf@1 { ++ description = "Boot Linux kernel with FDT blob "; ++ kernel = "kernel@1"; ++ fdt = "fdt@1"; ++ }; ++ }; ++}; ++ +\ No newline at end of file +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/arch/arm/boot/dts/wolfhound2.its b/arch/arm/boot/dts/wolfhound2.its +--- a/arch/arm/boot/dts/wolfhound2.its 1970-01-01 08:00:00.000000000 +0800 ++++ b/arch/arm/boot/dts/wolfhound2.its 2017-11-09 17:52:56.333945000 +0800 +@@ -0,0 +1,60 @@ ++/dts-v1/; ++ ++/ { ++ description = "Linux kernel and FDT blob"; ++ #address-cells = <1>; ++ ++ images { ++ kernel@1 { ++ description = "Broadcom iProc Linux"; ++ data = /incbin/("../zImage"); ++ type = "kernel"; ++ arch = "arm"; ++ os = "linux"; ++ compression = "none"; ++ load = <0x61008000>; ++ entry = <0x61008000>; ++ hash@1 { ++ algo = "crc32"; ++ }; ++ }; ++ ++ fdt@1 { ++ description = "Flattened Device Tree blob - bcm953547.dtb"; ++ data = /incbin/("./bcm953547.dtb"); ++ type = "flat_dt"; ++ arch = "arm"; ++ compression = "none"; ++ hash@1 { ++ algo = "crc32"; ++ }; ++ }; ++ ++ /* fdt@2 { ++ description = "Flattened Device Tree blob - bcm953444.dtb"; ++ data = /incbin/("./bcm953444.dtb"); ++ type = "flat_dt"; ++ arch = "arm"; ++ compression = "none"; ++ hash@1 { ++ algo = "crc32"; ++ }; ++ }; */ ++ }; ++ ++ configurations { ++ default = "conf@1"; ++ conf@1 { ++ description = "Boot Linux kernel with FDT blob 1"; ++ kernel = "kernel@1"; ++ fdt = "fdt@1"; ++ }; ++ ++ /* conf@2 { ++ description = "Boot Linux kernel with FDT blob 2"; ++ kernel = "kernel@1"; ++ fdt = "fdt@2"; ++ }; */ ++ }; ++}; ++ +\ No newline at end of file +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/arch/arm/mach-iproc/Kconfig b/arch/arm/mach-iproc/Kconfig +--- a/arch/arm/mach-iproc/Kconfig 1970-01-01 08:00:00.000000000 +0800 ++++ b/arch/arm/mach-iproc/Kconfig 2017-11-09 17:52:58.315964000 +0800 +@@ -0,0 +1,84 @@ ++menuconfig ARCH_XGS_IPROC ++ bool "Broadcom XGS iProc Support" if ARCH_MULTI_V7 ++ select HAVE_ARM_TWD if SMP ++ select HAVE_ARM_SCU if SMP ++ select ARM_GLOBAL_TIMER ++ select ARM_GIC ++ select ARCH_REQUIRE_GPIOLIB ++ select CACHE_L2X0 ++ select ARM_AMBA ++ select ARCH_SUPPORTS_BIG_ENDIAN ++ select CPU_ENDIAN_BE8 if CPU_BIG_ENDIAN ++ select ARM_ERRATA_754322 ++ select ARM_ERRATA_764369 if SMP ++ select ARM_ERRATA_775420 ++ help ++ This enables support for Broadcom XGS iProc based SoC chips ++ ++if ARCH_XGS_IPROC ++ ++comment "XGS iProc SoC based Machine types" ++ ++choice ++ prompt "XGS iProc SoC based Machine types" ++ default MACH_HX4 ++ ++config MACH_HX4 ++ bool "Support Broadcom Helix4 bring-up board" ++ help ++ Support for the Broadcom Helix4 bring-up board. ++ ++config MACH_HR2 ++ bool "Support Broadcom Hurricane2 bring-up board" ++ help ++ Support for the Broadcom Hurricane2 bring-up board. ++ ++config MACH_KT2 ++ bool "Support Broadcom Katana2 bring-up board" ++ help ++ Support for the Broadcom Katana2 bring-up board. ++ ++config MACH_GH ++ bool "Support Broadcom Greyhound bring-up board" ++ select MACH_IPROC_P7 ++ help ++ Support for the Broadcom Greyhound bring-up board. ++ ++config MACH_SB2 ++ bool "Support Broadcom Saber2 bring-up board" ++ select MACH_IPROC_P7 ++ help ++ Support for the Broadcom Saber2 bring-up board. ++ ++config MACH_HR3 ++ bool "Support Broadcom Hurricane3 bring-up board" ++ select MACH_IPROC_P7 ++ help ++ Support for the Broadcom Hurricane3 bring-up board. ++ ++config MACH_GH2 ++ bool "Support Broadcom Greyhound2 bring-up board" ++ select MACH_IPROC_P7 ++ help ++ Support for the Broadcom Greyhound2 bring-up board. ++endchoice ++ ++config MACH_IPROC_P7 ++ bool "Support iProc Profile 7 architecture" ++ depends on (MACH_GH || MACH_SB2 || MACH_HR3 || MACH_GH2 || MACH_WH2) ++ help ++ Support for iProc Profile 7 architecture. ++ ++config MACH_WH2 ++ bool "Support Broadcom Wolfhound2 bring-up board" ++ depends on MACH_HR3 ++ default n ++ help ++ Support for the Broadcom Wolfhound2 bring-up board. ++ ++config MACH_IPROC_EMULATION ++ bool "Support iProc emulation" ++ help ++ Support for the iProc emulation. ++ ++endif +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/arch/arm/mach-iproc/Makefile b/arch/arm/mach-iproc/Makefile +--- a/arch/arm/mach-iproc/Makefile 1970-01-01 08:00:00.000000000 +0800 ++++ b/arch/arm/mach-iproc/Makefile 2017-11-09 17:52:58.316963000 +0800 +@@ -0,0 +1,3 @@ ++obj-y := board_bu.o ++obj-y += shm.o ++obj-$(CONFIG_SMP) += platsmp.o +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/arch/arm/mach-iproc/board_bu.c b/arch/arm/mach-iproc/board_bu.c +--- a/arch/arm/mach-iproc/board_bu.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/arch/arm/mach-iproc/board_bu.c 2017-11-09 17:52:58.317968000 +0800 +@@ -0,0 +1,120 @@ ++/* ++ * $Copyright Open Broadcom Corporation$ ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define DMU_CRU_RESET_BASE 0x200 ++ ++#if defined(CONFIG_PL330_DMA) || defined(CONFIG_XGS_IPROC_DMA330_DMA) ++/* SB2/HR3 */ ++#define DMAC_IDM_RESET_OFFSET 0xf800 ++/* HX4/KT2/HR2/GH */ ++#define DMAC_IDM_RESET_OFFSET_1 0x14800 ++#endif /* CONFIG_PL330_DMA || CONFIG_XGS_IPROC_DMA330_DMA */ ++ ++enum xgs_iproc_dev_id { ++ XGS_IPROC_HX4=0, ++ XGS_IPROC_KT2, ++ XGS_IPROC_HR2, ++ XGS_IPROC_GH, ++ XGS_IPROC_SB2, ++ XGS_IPROC_HR3, ++ XGS_IPROC_GH2, ++ XGS_IPROC_GENERIC, ++}; ++ ++const char *const xgs_iproc_dt_compat[] = { ++ "brcm,helix4", ++ "brcm,katana2", ++ "brcm,hurricane2", ++ "brcm,greyhound", ++ "brcm,saber2", ++ "brcm,hurricane3", ++ "brcm,greyhound2", ++ "brcm,xgs-iproc", ++ NULL, ++}; ++ ++#if defined(CONFIG_PL330_DMA) || defined(CONFIG_XGS_IPROC_DMA330_DMA) ++void xgs_iproc_dmac_idm_reset(void) ++{ ++ void __iomem *reset_base = NULL; ++ ++ /* Need to de-assert reset of DMAC before of_platform_populate */ ++ if (of_machine_is_compatible(xgs_iproc_dt_compat[XGS_IPROC_SB2]) || ++ of_machine_is_compatible(xgs_iproc_dt_compat[XGS_IPROC_HR3]) || ++ of_machine_is_compatible(xgs_iproc_dt_compat[XGS_IPROC_GH2])) ++ reset_base = get_iproc_idm_base(0) + DMAC_IDM_RESET_OFFSET; ++ else ++ reset_base = get_iproc_idm_base(0) + DMAC_IDM_RESET_OFFSET_1; ++ ++ if (reset_base != NULL) ++ writel(readl(reset_base) & 0xFFFFFFFE, reset_base); ++} ++#endif /* CONFIG_PL330_DMA || CONFIG_XGS_IPROC_DMA330_DMA */ ++ ++void __init xgs_iproc_init_early(void) ++{ ++ /* ++ * SDK allocates coherent buffers from atomic context. ++ * Increase size of atomic coherent pool to make sure such ++ * the allocations won't fail. ++ */ ++#ifdef CONFIG_DMA_CMA ++ /*can be overrided by "coherent_pool" in bootargs */ ++ init_dma_coherent_pool_size(SZ_1M * 16); ++#endif ++} ++ ++static void __init xgs_iproc_init(void) ++{ ++ int ret; ++ ++ ret = xgs_iproc_wrap_idm_dmu_base_reg_setup(); ++ if (ret < 0) ++ return; ++ ++#if defined(CONFIG_PL330_DMA) || defined(CONFIG_XGS_IPROC_DMA330_DMA) ++ xgs_iproc_dmac_idm_reset(); ++#endif ++ /* Populate platform devices */ ++ of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL); ++ ++ /* Setup IDM timeout handler */ ++ xgs_iproc_idm_timeout_handler_setup(); ++} ++ ++ ++static void xgs_iproc_restart(enum reboot_mode mode, const char *cmd) ++{ ++ void * __iomem reg_addr; ++ u32 reg; ++ ++ /* CRU_RESET register */ ++ reg_addr = (void * __iomem)(get_iproc_dmu_pcu_base() + ++ DMU_CRU_RESET_BASE); ++ /* set iproc_reset_n to 0 */ ++ reg = readl(reg_addr); ++ reg &= ~((u32) 1 << 1); ++ ++ writel(reg, reg_addr); ++ ++ /* Wait for reset */ ++ while (1) ++ cpu_do_idle(); ++} ++ ++DT_MACHINE_START(XGS_iProc_DT, "BRCM XGS iProc") ++ .init_early = xgs_iproc_init_early, ++ .init_machine = xgs_iproc_init, ++ .dt_compat = xgs_iproc_dt_compat, ++ .restart = xgs_iproc_restart, ++ .l2c_aux_val = 0, ++ .l2c_aux_mask = ~0, ++MACHINE_END +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/arch/arm/mach-iproc/include/plat/shm.h b/arch/arm/mach-iproc/include/plat/shm.h +--- a/arch/arm/mach-iproc/include/plat/shm.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/arch/arm/mach-iproc/include/plat/shm.h 2017-11-09 17:52:58.322965000 +0800 +@@ -0,0 +1,72 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ */ ++/* ++ * Header for declaring shim layer exports. ++ */ ++ ++#ifndef __SHM_DOT_H_INCLUDED__ ++#define __SHM_DOT_H_INCLUDED__ ++ ++#include ++#include ++#include ++ ++ ++#define iproc_class_create(owner, name) \ ++({ \ ++ static struct lock_class_key __key; \ ++ iproc__class_create(owner, name, &__key); \ ++}) ++ ++extern int iproc_platform_get_irq(struct platform_device *dev, unsigned int num); ++extern struct resource * ++iproc_platform_get_resource_byname(struct platform_device *dev, unsigned int type, const char *name); ++extern struct resource * ++iproc_platform_get_resource(struct platform_device *dev, unsigned int type, ++ unsigned int num); ++extern int iproc_platform_device_add_resources(struct platform_device *pdev, const struct resource *res, unsigned int num); ++ ++extern int iproc_platform_device_register(struct platform_device * pdev); ++extern void iproc_platform_device_unregister(struct platform_device * pdev); ++extern int iproc_platform_driver_register(struct platform_driver *drv); ++extern void iproc_platform_driver_unregister(struct platform_driver *drv); ++ ++extern struct platform_device *iproc_platform_device_alloc(const char *name, int id); ++ ++extern int iproc_platform_device_add(struct platform_device *pdev); ++extern void iproc_platform_device_put(struct platform_device *pdev); ++ ++extern void iproc_platform_device_put(struct platform_device *pdev); ++extern int iproc_platform_device_add(struct platform_device *pdev); ++extern void iproc_platform_device_del(struct platform_device *pdev); ++extern int iproc_sysfs_create_group(struct kobject *kobj, const struct attribute_group *grp); ++extern void iproc_sysfs_remove_group(struct kobject *kobj, const struct attribute_group *grp); ++ ++ ++extern struct class *iproc__class_create(struct module *owner, const char *name, ++ struct lock_class_key *key); ++extern void iproc_class_destroy(struct class *cls); ++extern int iproc_device_create_file(struct device *dev, ++ const struct device_attribute *attr); ++extern struct device *iproc_device_create(struct class *class, ++ struct device *parent, dev_t devt, void *drvdata, const char *fmt, ...); ++extern void iproc_device_destroy(struct class *class, dev_t devt); ++extern void iproc_device_remove_file(struct device *dev, ++ const struct device_attribute *attr); ++extern int iproc_platform_get_irq_byname(struct platform_device *, const char *); ++ ++extern int iproc_gpio_to_irq(unsigned gpio); ++#endif /*#ifndef __SHM_DOT_H_INCLUDED__*/ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/arch/arm/mach-iproc/platsmp.c b/arch/arm/mach-iproc/platsmp.c +--- a/arch/arm/mach-iproc/platsmp.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/arch/arm/mach-iproc/platsmp.c 2017-11-09 17:52:58.323963000 +0800 +@@ -0,0 +1,210 @@ ++/* ++ * Copyright (C) 2014-2015 Broadcom Corporation ++ * Copyright 2014 Linaro Limited ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation version 2. ++ * ++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any ++ * kind, whether express or implied; without even the implied warranty ++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++ ++/* Size of mapped Cortex A9 SCU address space */ ++#define CORTEX_A9_SCU_SIZE 0x58 ++ ++#define SECONDARY_TIMEOUT_NS NSEC_PER_MSEC /* 1 msec (in nanoseconds) */ ++#define BOOT_ADDR_CPUID_MASK 0x3 ++ ++/* Name of device node property defining secondary boot register location */ ++#define OF_SECONDARY_BOOT "secondary-boot-reg" ++#define MPIDR_CPUID_BITMASK 0x3 ++ ++/* I/O address of register used to coordinate secondary core startup */ ++static u32 secondary_boot_addr; ++ ++/* ++ * Enable the Cortex A9 Snoop Control Unit ++ * ++ * By the time this is called we already know there are multiple ++ * cores present. We assume we're running on a Cortex A9 processor, ++ * so any trouble getting the base address register or getting the ++ * SCU base is a problem. ++ * ++ * Return 0 if successful or an error code otherwise. ++ */ ++static int __init scu_a9_enable(void) ++{ ++ unsigned long config_base; ++ void __iomem *scu_base; ++ ++ if (!scu_a9_has_base()) { ++ pr_err("no configuration base address register!\n"); ++ return -ENXIO; ++ } ++ ++ /* Config base address register value is zero for uniprocessor */ ++ config_base = scu_a9_get_base(); ++ if (!config_base) { ++ pr_err("hardware reports only one core\n"); ++ return -ENOENT; ++ } ++ ++ scu_base = ioremap((phys_addr_t)config_base, CORTEX_A9_SCU_SIZE); ++ if (!scu_base) { ++ pr_err("failed to remap config base (%lu/%u) for SCU\n", ++ config_base, CORTEX_A9_SCU_SIZE); ++ return -ENOMEM; ++ } ++ ++ scu_enable(scu_base); ++ ++ iounmap(scu_base); /* That's the last we'll need of this */ ++ ++ return 0; ++} ++ ++static int nsp_write_lut(void) ++{ ++ void __iomem *sku_rom_lut; ++ phys_addr_t secondary_startup_phy; ++ ++ if (!secondary_boot_addr) { ++ pr_warn("required secondary boot register not specified\n"); ++ return -EINVAL; ++ } ++ ++ sku_rom_lut = ioremap_nocache((phys_addr_t)secondary_boot_addr, ++ sizeof(secondary_boot_addr)); ++ if (!sku_rom_lut) { ++ pr_warn("unable to ioremap SKU-ROM LUT register\n"); ++ return -ENOMEM; ++ } ++ ++ secondary_startup_phy = virt_to_phys(secondary_startup); ++ BUG_ON(secondary_startup_phy > (phys_addr_t)U32_MAX); ++ ++ writel_relaxed(secondary_startup_phy, sku_rom_lut); ++ ++ /* Ensure the write is visible to the secondary core */ ++ smp_wmb(); ++ ++ iounmap(sku_rom_lut); ++ ++ return 0; ++} ++ ++static void __init bcm_smp_prepare_cpus(unsigned int max_cpus) ++{ ++ static cpumask_t only_cpu_0 = { CPU_BITS_CPU0 }; ++ struct device_node *cpus_node = NULL; ++ struct device_node *cpu_node = NULL; ++ int ret; ++ ++ /* ++ * This function is only called via smp_ops->smp_prepare_cpu(). ++ * That only happens if a "/cpus" device tree node exists ++ * and has an "enable-method" property that selects the SMP ++ * operations defined herein. ++ */ ++ cpus_node = of_find_node_by_path("/cpus"); ++ if (!cpus_node) ++ return; ++ ++ for_each_child_of_node(cpus_node, cpu_node) { ++ u32 cpuid; ++ ++ if (of_node_cmp(cpu_node->type, "cpu")) ++ continue; ++ ++ if (of_property_read_u32(cpu_node, "reg", &cpuid)) { ++ pr_debug("%s: missing reg property\n", ++ cpu_node->full_name); ++ ret = -ENOENT; ++ goto out; ++ } ++ ++ /* ++ * "secondary-boot-reg" property should be defined only ++ * for secondary cpu ++ */ ++ if ((cpuid & MPIDR_CPUID_BITMASK) == 1) { ++ /* ++ * Our secondary enable method requires a ++ * "secondary-boot-reg" property to specify a register ++ * address used to request the ROM code boot a secondary ++ * core. If we have any trouble getting this we fall ++ * back to uniprocessor mode. ++ */ ++ if (of_property_read_u32(cpu_node, ++ OF_SECONDARY_BOOT, ++ &secondary_boot_addr)) { ++ pr_warn("%s: no" OF_SECONDARY_BOOT "property\n", ++ cpu_node->name); ++ ret = -ENOENT; ++ goto out; ++ } ++ } ++ } ++ ++ /* ++ * Enable the SCU on Cortex A9 based SoCs. If -ENOENT is ++ * returned, the SoC reported a uniprocessor configuration. ++ * We bail on any other error. ++ */ ++ ret = scu_a9_enable(); ++out: ++ of_node_put(cpu_node); ++ of_node_put(cpus_node); ++ ++ if (ret) { ++ /* Update the CPU present map to reflect uniprocessor mode */ ++ pr_warn("disabling SMP\n"); ++ init_cpu_present(&only_cpu_0); ++ } ++} ++ ++static int nsp_boot_secondary(unsigned int cpu, struct task_struct *idle) ++{ ++ int ret; ++ ++ /* ++ * After wake up, secondary core branches to the startup ++ * address programmed at SKU ROM LUT location. ++ */ ++ ret = nsp_write_lut(); ++ if (ret) { ++ pr_err("unable to write startup addr to SKU ROM LUT\n"); ++ goto out; ++ } ++ ++ /* Send a CPU wakeup interrupt to the secondary core */ ++ arch_send_wakeup_ipi_mask(cpumask_of(cpu)); ++ ++out: ++ return ret; ++} ++ ++/* Leverage NSP SMP code for HX4/KT2 */ ++static const struct smp_operations nsp_smp_ops __initconst = { ++ .smp_prepare_cpus = bcm_smp_prepare_cpus, ++ .smp_boot_secondary = nsp_boot_secondary, ++}; ++CPU_METHOD_OF_DECLARE(bcm_smp_nsp, "brcm,bcm-nsp-smp", &nsp_smp_ops); +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/arch/arm/mach-iproc/shm.c b/arch/arm/mach-iproc/shm.c +--- a/arch/arm/mach-iproc/shm.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/arch/arm/mach-iproc/shm.c 2017-11-09 17:52:58.324964000 +0800 +@@ -0,0 +1,309 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ */ ++ ++#include "include/plat/shm.h" ++/** ++ * iproc_platform_get_irq - get an IRQ for a device ++ * wrapper function for platform_get_irq ++ * @dev: platform device ++ * @num: IRQ number index ++ */ ++int iproc_platform_get_irq(struct platform_device *dev, unsigned int num) ++{ ++ return platform_get_irq(dev, num); ++} ++EXPORT_SYMBOL(iproc_platform_get_irq); ++ ++ ++/** ++ * iproc_platform_get_resource_byname - ++ * wrapper function for platform_get_resource_byname ++ * @dev: platform device ++ * @type: resource type ++ * @name: resource name ++ */ ++struct resource * ++iproc_platform_get_resource_byname(struct platform_device *dev, ++ unsigned int type, ++ const char *name) ++{ ++ return platform_get_resource_byname(dev, type, name); ++} ++EXPORT_SYMBOL(iproc_platform_get_resource_byname); ++ ++ ++/** ++ * iproc_platform_get_resource - ++ * wrapper function for platform_get_resource ++ * @dev: platform device ++ * @type: resource type ++ * @num: resource index ++ */ ++struct resource * ++iproc_platform_get_resource(struct platform_device *dev, unsigned int type, ++ unsigned int num) ++{ ++ return platform_get_resource(dev, type, num); ++} ++EXPORT_SYMBOL(iproc_platform_get_resource); ++ ++ ++/** ++ * iproc_platform_driver_register - ++ * wrapper function for platform_driver_register ++ * @drv: platform driver structure ++ */ ++int iproc_platform_driver_register(struct platform_driver *drv) ++{ ++ return platform_driver_register(drv); ++} ++EXPORT_SYMBOL(iproc_platform_driver_register); ++ ++ ++/** ++ * iproc_platform_driver_unregister ++ * wrapper function for platform_driver_unregister ++ * @drv: platform driver structure ++ */ ++void iproc_platform_driver_unregister(struct platform_driver *drv) ++{ ++ return platform_driver_unregister(drv); ++} ++EXPORT_SYMBOL(iproc_platform_driver_unregister); ++ ++ ++/** ++ * iproc_platform_device_register - add a platform-level device ++ * wrapper function for platform_device_register ++ * @pdev: platform device we're adding ++ * ++ */ ++int iproc_platform_device_register(struct platform_device * pdev) ++{ ++ return platform_device_register(pdev); ++} ++EXPORT_SYMBOL(iproc_platform_device_register); ++ ++ ++/** ++ * iproc_platform_device_unregister - ++ * wrapper function for platform_device_unregister ++ * @pdev: platform device we're unregistering ++ */ ++void iproc_platform_device_unregister(struct platform_device * pdev) ++{ ++ return platform_device_unregister(pdev); ++} ++EXPORT_SYMBOL(iproc_platform_device_unregister); ++ ++ ++/** ++ * iproc_platform_device_alloc - ++ * wrapper function for platform_device_alloc ++ * @name: base name of the device we're adding ++ * @id: instance id ++ */ ++struct platform_device *iproc_platform_device_alloc(const char *name, int id) ++{ ++ return platform_device_alloc(name, id); ++} ++EXPORT_SYMBOL(iproc_platform_device_alloc); ++ ++/** ++ * iproc_platform_device_add - ++ * wrapper function for platform_device_add ++ * @pdev: platform device we're adding ++ */ ++int iproc_platform_device_add(struct platform_device *pdev) ++{ ++ return platform_device_add(pdev); ++} ++EXPORT_SYMBOL(iproc_platform_device_add); ++ ++/** ++ * iproc_platform_device_del - ++ * wrapper function for platform_device_del ++ * @pdev: platform device we're removing ++ */ ++void iproc_platform_device_del(struct platform_device *pdev) ++{ ++ platform_device_del(pdev); ++} ++EXPORT_SYMBOL(iproc_platform_device_del); ++ ++ ++/** ++ * iproc_platform_device_put - ++ * wrapper function for platform_device_put ++ * @pdev: platform device to free ++ */ ++void iproc_platform_device_put(struct platform_device *pdev) ++{ ++ platform_device_put(pdev); ++} ++EXPORT_SYMBOL(iproc_platform_device_put); ++ ++ ++/** ++ * iproc_platform_device_add_resources - ++ * wrapper function for platform_device_add_resources ++ * @pdev: platform device allocated by platform_device_alloc to add resources to ++ * @res: set of resources that needs to be allocated for the device ++ * @num: number of resources ++ */ ++int iproc_platform_device_add_resources(struct platform_device *pdev, ++ const struct resource *res, unsigned int num) ++{ ++ return platform_device_add_resources(pdev, res, num); ++} ++EXPORT_SYMBOL(iproc_platform_device_add_resources); ++ ++ ++/** ++ * iproc_platform_device_put - ++ * wrapper function for sysfs_create_group ++ * @kobj: The kobject to create the group on ++ * @grp: The attribute group to create ++ */ ++int iproc_sysfs_create_group(struct kobject *kobj, const struct attribute_group *grp) ++{ ++ return sysfs_create_group(kobj, grp); ++} ++EXPORT_SYMBOL(iproc_sysfs_create_group); ++ ++ ++/** ++ * iproc_sysfs_remove_group - ++ * wrapper function for sysfs_remove_group ++ * @kobj: The kobject which the group is on ++ * @grp: The attribute group to remove ++ */ ++void iproc_sysfs_remove_group(struct kobject * kobj, const struct attribute_group * grp) ++{ ++ sysfs_remove_group(kobj, grp); ++} ++EXPORT_SYMBOL(iproc_sysfs_remove_group); ++ ++/** ++ * iproc__class_create - ++ * wrapper function for __class_create ++ * @ower: pointer to the module that is to "own" this struct class ++ * @name: pointer to a string for the name of this class. ++ * @key: the lock_class_key for this class; used by mutex lock debugging ++ */ ++struct class *iproc__class_create(struct module *owner, const char *name, ++ struct lock_class_key *key) ++{ ++ return __class_create(owner, name, key); ++} ++EXPORT_SYMBOL(iproc__class_create); ++ ++/** ++ * iproc_class_destroy - ++ * wrapper function for class_destroy ++ * @cls: pointer to the struct class that is to be destroyed ++ */ ++void iproc_class_destroy(struct class *cls) ++{ ++ class_destroy(cls); ++} ++EXPORT_SYMBOL(iproc_class_destroy); ++ ++/** ++ * iproc_device_create_file - ++ * wrapper function for device_create_file ++ * @dev: device. ++ * @attr: device attribute descriptor. ++ */ ++int iproc_device_create_file(struct device *dev, ++ const struct device_attribute *attr) ++{ ++ return device_create_file(dev, attr); ++} ++EXPORT_SYMBOL(iproc_device_create_file); ++ ++/** ++ * iproc_device_create - ++ * wrapper function for device_create ++ * ++ * @class: pointer to the struct class that this device should be ++ * registered to ++ * @parent: pointer to the parent struct device of this new device, if any ++ * @devt: the dev_t for the char device to be added ++ * @drvdata: the data to be added to the device for callbacks ++ * @fmt: string for the device's name ++ */ ++struct device *iproc_device_create(struct class *class, ++ struct device *parent, dev_t devt, void *drvdata, const char *fmt, ...) ++{ ++ va_list args; ++ struct device *r; ++ ++ va_start(args, fmt); ++ r = device_create_vargs(class, parent, devt, drvdata, fmt, args); ++ va_end(args); ++ ++ return r; ++} ++EXPORT_SYMBOL(iproc_device_create); ++ ++/** ++ * iproc_device_destroy - ++ * wrapper function for device_destroy ++ * @class: pointer to the struct class that this device was registered with ++ * @devt: the dev_t of the device that was previously registered ++ */ ++void iproc_device_destroy(struct class *class, dev_t devt) ++{ ++ return device_destroy(class, devt); ++} ++EXPORT_SYMBOL(iproc_device_destroy); ++ ++/** ++ * proc_device_remove_file - ++ * wrapper function for device_remove_file ++ * @dev: device. ++ * @attr: device attribute descriptor. ++ */ ++void iproc_device_remove_file(struct device *dev, ++ const struct device_attribute *attr) ++{ ++ return device_remove_file(dev, attr); ++} ++EXPORT_SYMBOL(iproc_device_remove_file); ++ ++/** ++ * iproc_platform_get_irq_byname - ++ * wrapper function for platform_get_irq_byname ++ * @dev: platform device ++ * @name: IRQ name ++ */ ++int iproc_platform_get_irq_byname(struct platform_device *dev, const char *n) ++{ ++ return platform_get_irq_byname(dev,n); ++} ++EXPORT_SYMBOL(iproc_platform_get_irq_byname); ++ ++/** ++ * iproc_gpio_to_irq - ++ * wrapper function for gpio_to_irq ++ * @gpio: gpio whose IRQ will be returned (already requested) ++ */ ++int iproc_gpio_to_irq(unsigned gpio) ++{ ++ return gpio_to_irq(gpio); ++} ++EXPORT_SYMBOL(iproc_gpio_to_irq); +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig +--- a/drivers/char/hw_random/Kconfig 2016-12-16 00:49:34.000000000 +0800 ++++ b/drivers/char/hw_random/Kconfig 2017-11-09 17:53:25.197163000 +0800 +@@ -114,6 +114,18 @@ config HW_RANDOM_IPROC_RNG200 + + If unsure, say Y. + ++config HW_RANDOM_XGS_IPROC_RNG ++ tristate "Broadcom iProc RNG support" ++ depends on (ARCH_XGS_IPROC && HW_RANDOM) ++ ---help--- ++ This driver provides kernel-side support for the RNG ++ hardware found on the Broadcom iProc SoCs. ++ ++ To compile this driver as a module, choose M here: the ++ module will be called iproc-rng ++ ++ If unsure, say Y. ++ + config HW_RANDOM_GEODE + tristate "AMD Geode HW Random Number Generator support" + depends on X86_32 && PCI +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/char/hw_random/Makefile b/drivers/char/hw_random/Makefile +--- a/drivers/char/hw_random/Makefile 2016-12-16 00:49:34.000000000 +0800 ++++ b/drivers/char/hw_random/Makefile 2017-11-09 17:53:25.198159000 +0800 +@@ -29,6 +29,7 @@ obj-$(CONFIG_HW_RANDOM_EXYNOS) += exynos + obj-$(CONFIG_HW_RANDOM_TPM) += tpm-rng.o + obj-$(CONFIG_HW_RANDOM_BCM2835) += bcm2835-rng.o + obj-$(CONFIG_HW_RANDOM_IPROC_RNG200) += iproc-rng200.o ++obj-$(CONFIG_HW_RANDOM_XGS_IPROC_RNG) += xgs-iproc-rng200.o + obj-$(CONFIG_HW_RANDOM_MSM) += msm-rng.o + obj-$(CONFIG_HW_RANDOM_ST) += st-rng.o + obj-$(CONFIG_HW_RANDOM_XGENE) += xgene-rng.o +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/char/hw_random/xgs-iproc-rng200.c b/drivers/char/hw_random/xgs-iproc-rng200.c +--- a/drivers/char/hw_random/xgs-iproc-rng200.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/char/hw_random/xgs-iproc-rng200.c 2017-11-09 17:53:25.250157000 +0800 +@@ -0,0 +1,442 @@ ++/* ++ * $Copyright Open Broadcom Corporation$ ++ */ ++ ++/* ++ * DESCRIPTION: The Broadcom iProc RNG200 Driver ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* Registers for RNG */ ++#define RNG_CTRL_OFFSET 0x00000000 ++#define RNG_CTRL_RESERVED_MASK 0xF00000CC ++#define RNG_CTRL_COMBLK2_OSC_DIS_SHIFT 22 ++#define RNG_CTRL_COMBLK2_OSC_DIS_MASK 0x0FC00000 ++#define RNG_CTRL_COMBLK1_OSC_DIS_SHIFT 16 ++#define RNG_CTRL_COMBLK1_OSC_DIS_MASK 0x003F0000 ++#define RNG_CTRL_JCLK_BYP_DIV_CNT_SHIFT 8 ++#define RNG_CTRL_JCLK_BYP_DIV_CNT_MASK 0x0000FF00 ++#define RNG_CTRL_JCLK_BYP_SRC_SHIFT 5 ++#define RNG_CTRL_JCLK_BYP_SRC_MASK 0x00000020 ++#define RNG_CTRL_JCLK_BYP_SEL_SHIFT 4 ++#define RNG_CTRL_JCLK_BYP_SEL_MASK 0x00000010 ++#define RNG_CTRL_RBG2X_SHIFT 1 ++#define RNG_CTRL_RBG2X_MASK 0x00000002 ++#define RNG_CTRL_RBGEN_SHIFT 0 ++#define RNG_CTRL_RBGEN_MASK 0x00000001 ++ ++#define RNG_STATUS_OFFSET 0x00000004 ++#define RNG_STATUS_RESERVED_MASK 0x00F00000 ++#define RNG_STATUS_RND_VAL_SHIFT 24 ++#define RNG_STATUS_RND_VAL_MASK 0xFF000000 ++#define RNG_STATUS_WARM_CNT_SHIFT 0 ++#define RNG_STATUS_WARM_CNT_MASK 0x000FFFFF ++ ++#define RNG_DATA_OFFSET 0x00000008 ++#define RNG_DATA_RESERVED_MASK 0x00000000 ++#define RNG_DATA_RNG_NUM_SHIFT 0 ++#define RNG_DATA_RNG_NUM_MASK 0xFFFFFFFF ++ ++#define RNG_FF_THRES_OFFSET 0x0000000C ++#define RNG_FF_THRES_RESERVED_MASK 0xFFFFFFE0 ++#define RNG_FF_THRES_RNG_FF_THRESH_SHIFT 0 ++#define RNG_FF_THRES_RNG_FF_THRESH_MASK 0x0000001F ++ ++#define RNG_INT_MASK_OFFSET 0x00000010 ++#define RNG_INT_MASK_RESERVED_MASK 0xFFFFFFFE ++#define RNG_INT_MASK_OFF_SHIFT 0 ++#define RNG_INT_MASK_OFF_MASK 0x00000001 ++ ++/* Registers for RNG200*/ ++#define RNG200_CTRL_OFFSET 0x00 ++#define RNG200_CTRL_RBGEN_MASK 0x00001FFF ++#define RNG200_CTRL_RBGEN_ENABLE 0x00000001 ++#define RNG200_CTRL_RBGEN_DISABLE 0x00000000 ++ ++#define RNG200_SOFT_RESET_OFFSET 0x04 ++#define RNG200_SOFT_RESET_MASK 0x00000001 ++#define RNG200_SOFT_RESET_ACTIVE 0x00000001 ++#define RNG200_SOFT_RESET_CLEAR 0x00000000 ++ ++#define RBG_SOFT_RESET_OFFSET 0x08 ++#define RBG_RNG_SOFT_RESET_MASK 0x00000001 ++#define RBG_RNG_SOFT_RESET_ACTIVE 0x00000001 ++#define RBG_RNG_SOFT_RESET_CLEAR 0x00000000 ++ ++#define RNG200_INT_STATUS_OFFSET 0x18 ++#define RNG200_INT_STATUS_MASTER_FAIL_LOCKOUT_IRQ_MASK 0x80000000 ++#define RNG200_INT_STATUS_STARTUP_TRANSITIONS_MET_IRQ_MASK 0x00020000 ++#define RNG200_INT_STATUS_NIST_FAIL_IRQ_MASK 0x00000020 ++#define RNG200_INT_STATUS_TOTAL_BITS_COUNT_IRQ_MASK 0x00000001 ++ ++#define RNG200_FIFO_DATA_OFFSET 0x20 ++#define RNG200_FIFO_COUNT_OFFSET 0x24 ++#define RNG200_FIFO_COUNT_MASK 0x000000FF ++ ++static int rng_read(struct hwrng *rng, void *buf, size_t max, ++ bool wait) ++{ ++ u32 num_words = 0; ++ u32 num_remaining = max; ++ ++ #define MAX_IDLE_TIME (1 * HZ) ++ unsigned long idle_endtime = jiffies + MAX_IDLE_TIME; ++ ++ /* Retrieve HW RNG registers base address. */ ++ void __iomem *base_addr = (void __iomem *)rng->priv; ++ ++ while ((num_remaining > 0) && time_before(jiffies, idle_endtime)) { ++ /* Are there any random numbers available? */ ++ num_words = (ioread32(base_addr + RNG_STATUS_OFFSET) & ++ RNG_STATUS_RND_VAL_MASK) >> RNG_STATUS_RND_VAL_SHIFT; ++ if (num_words > 0) { ++ if (num_remaining >= sizeof(u32)) { ++ /* Buffer has room to store entire word */ ++ *(u32 *)buf = ioread32(base_addr + ++ RNG_DATA_OFFSET); ++ buf += sizeof(u32); ++ num_remaining -= sizeof(u32); ++ } else { ++ /* Buffer can only store partial word */ ++ u32 rnd_number = ioread32(base_addr + ++ RNG_DATA_OFFSET); ++ memcpy(buf, &rnd_number, num_remaining); ++ buf += num_remaining; ++ num_remaining = 0; ++ } ++ ++ /* Reset the IDLE timeout */ ++ idle_endtime = jiffies + MAX_IDLE_TIME; ++ } else if (!wait) { ++ /* Cannot wait, return immediately */ ++ break; ++ } else { ++ /* Can wait, give others chance to run */ ++ cpu_relax(); ++ } ++ } ++ ++ return max - num_remaining; ++} ++ ++static struct hwrng rng_ops = { ++ .name = "iproc-rng", ++ .read = rng_read, ++}; ++ ++static int rng_probe(struct platform_device *pdev) ++{ ++ int error; ++ u32 val; ++ struct device *dev = &pdev->dev; ++ void __iomem *base_addr; ++ struct device_node *node; ++ ++ pr_info("Broadcom IPROC RNG Driver\n"); ++ /* We only accept one device, and it must have an id of -1 */ ++ if (pdev->id != -1) ++ return -ENODEV; ++ ++ node = pdev->dev.of_node; ++ base_addr = of_iomap(node, 0); ++ if (!base_addr) { ++ dev_err(&pdev->dev, "can't iomap base_addr for rng\n"); ++ return -EIO; ++ } ++ rng_ops.priv = (unsigned long)base_addr; ++ ++ /* Start RNG block */ ++ val = ioread32(base_addr + RNG_CTRL_OFFSET); ++ val |= RNG_CTRL_RBGEN_MASK; ++ iowrite32(val, base_addr + RNG_CTRL_OFFSET); ++ ++ /* Enable RNG RBG2X */ ++ val = ioread32(base_addr + RNG_CTRL_OFFSET); ++ val |= RNG_CTRL_RBG2X_MASK; ++ iowrite32(val, base_addr + RNG_CTRL_OFFSET); ++ ++ /* Disable RNG INTERRUPT */ ++ val = ioread32(base_addr + RNG_INT_MASK_OFFSET); ++ val |= RNG_INT_MASK_OFF_MASK; ++ iowrite32(val, base_addr + RNG_INT_MASK_OFFSET); ++ ++ /* set warmup cycle 0xfff */ ++ iowrite32(RNG_STATUS_WARM_CNT_MASK - ++ (0xfff & RNG_STATUS_WARM_CNT_MASK), ++ base_addr + RNG_STATUS_OFFSET); ++ while ((ioread32(base_addr + RNG_STATUS_OFFSET) & ++ RNG_STATUS_WARM_CNT_MASK) != RNG_STATUS_WARM_CNT_MASK) ++ cpu_relax(); ++ ++ /* register to the Linux RNG framework */ ++ error = hwrng_register(&rng_ops); ++ if (error) { ++ dev_err(dev, "hwrng registration failed\n"); ++ iounmap(base_addr); ++ return error; ++ } ++ dev_dbg(dev, "hwrng registered\n"); ++ ++ return 0; ++} ++ ++static int rng_remove(struct platform_device *pdev) ++{ ++ u32 val; ++ void __iomem *base_addr = (void __iomem *)rng_ops.priv; ++ /* Unregister driver */ ++ hwrng_unregister(&rng_ops); ++ ++ if (base_addr) { ++ /* Disable RNG hardware */ ++ val = ioread32(base_addr + RNG_CTRL_OFFSET); ++ val &= ~RNG_CTRL_RBGEN_MASK; ++ iowrite32(val, base_addr + RNG_CTRL_OFFSET); ++ ++ val = ioread32(base_addr + RNG_CTRL_OFFSET); ++ val &= ~RNG_CTRL_RBG2X_MASK; ++ iowrite32(val, base_addr + RNG_CTRL_OFFSET); ++ ++ iounmap(base_addr); ++ } ++ ++ return 0; ++} ++ ++static void iproc_rng200_restart(void __iomem *rng_base) ++{ ++ u32 val; ++ ++ /* Disable RBG */ ++ val = ioread32(rng_base + RNG200_CTRL_OFFSET); ++ val &= ~RNG200_CTRL_RBGEN_MASK; ++ val |= RNG200_CTRL_RBGEN_DISABLE; ++ iowrite32(val, rng_base + RNG200_CTRL_OFFSET); ++ ++ /* Clear all interrupt status */ ++ iowrite32(0xFFFFFFFFUL, rng_base + RNG200_INT_STATUS_OFFSET); ++ ++ /* Reset RNG and RBG */ ++ val = ioread32(rng_base + RBG_SOFT_RESET_OFFSET); ++ val &= ~RBG_RNG_SOFT_RESET_MASK; ++ val |= RBG_RNG_SOFT_RESET_ACTIVE; ++ iowrite32(val, rng_base + RBG_SOFT_RESET_OFFSET); ++ ++ val = ioread32(rng_base + RNG200_SOFT_RESET_OFFSET); ++ val &= ~RNG200_SOFT_RESET_MASK; ++ val |= RNG200_SOFT_RESET_ACTIVE; ++ iowrite32(val, rng_base + RNG200_SOFT_RESET_OFFSET); ++ ++ val = ioread32(rng_base + RNG200_SOFT_RESET_OFFSET); ++ val &= ~RNG200_SOFT_RESET_MASK; ++ val |= RNG200_SOFT_RESET_CLEAR; ++ iowrite32(val, rng_base + RNG200_SOFT_RESET_OFFSET); ++ ++ val = ioread32(rng_base + RBG_SOFT_RESET_OFFSET); ++ val &= ~RBG_RNG_SOFT_RESET_MASK; ++ val |= RBG_RNG_SOFT_RESET_CLEAR; ++ iowrite32(val, rng_base + RBG_SOFT_RESET_OFFSET); ++ ++ /* Enable RBG */ ++ val = ioread32(rng_base + RNG200_CTRL_OFFSET); ++ val &= ~RNG200_CTRL_RBGEN_MASK; ++ val |= RNG200_CTRL_RBGEN_ENABLE; ++ iowrite32(val, rng_base + RNG200_CTRL_OFFSET); ++} ++ ++static int iproc_rng200_read(struct hwrng *rng, void *buf, size_t max, ++ bool wait) ++{ ++ u32 status; ++ u32 rng_fifo; ++ u32 num_remaining = max; ++ ++ #define MAX_RESETS_PER_READ 1 ++ u32 num_resets = 0; ++ ++ #define MAX_IDLE_TIME (1 * HZ) ++ unsigned long idle_endtime = jiffies + MAX_IDLE_TIME; ++ ++ /* Retrieve HW RNG registers base address. */ ++ void __iomem *rng_base = (void __iomem *)rng->priv; ++ ++ while ((num_remaining > 0) && time_before(jiffies, idle_endtime)) { ++ ++ /* Is RNG sane? If not, reset it. */ ++ status = ioread32(rng_base + RNG200_INT_STATUS_OFFSET); ++ if ((status & (RNG200_INT_STATUS_MASTER_FAIL_LOCKOUT_IRQ_MASK | ++ RNG200_INT_STATUS_NIST_FAIL_IRQ_MASK)) != 0) { ++ ++ if (num_resets >= MAX_RESETS_PER_READ) ++ return max - num_remaining; ++ ++ iproc_rng200_restart(rng_base); ++ num_resets++; ++ } ++ ++ /* Are there any random numbers available? */ ++ rng_fifo = ioread32(rng_base + RNG200_FIFO_COUNT_OFFSET); ++ if ((rng_fifo & RNG200_FIFO_COUNT_MASK) > 0) { ++ ++ if (num_remaining >= sizeof(u32)) { ++ /* Buffer has room to store entire word */ ++ *(u32 *)buf = ioread32(rng_base + ++ RNG200_FIFO_DATA_OFFSET); ++ buf += sizeof(u32); ++ num_remaining -= sizeof(u32); ++ } else { ++ /* Buffer can only store partial word */ ++ u32 rnd_number = ioread32(rng_base + ++ RNG200_FIFO_DATA_OFFSET); ++ memcpy(buf, &rnd_number, num_remaining); ++ buf += num_remaining; ++ num_remaining = 0; ++ } ++ ++ /* Reset the IDLE timeout */ ++ idle_endtime = jiffies + MAX_IDLE_TIME; ++ } else { ++ if (!wait) ++ /* Cannot wait, return immediately */ ++ break; ++ ++ /* Can wait, give others chance to run */ ++ cpu_relax(); ++ } ++ } ++ ++ return max - num_remaining; ++} ++ ++static struct hwrng iproc_rng200_ops = { ++ .name = "iproc-rng200", ++ .read = iproc_rng200_read, ++}; ++ ++static int iproc_rng200_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ void __iomem *rng_base; ++ struct resource *res; ++ u32 val; ++ int err; ++ ++ pr_info("Broadcom IPROC RNG200 Driver\n"); ++ /* Map peripheral */ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!res) { ++ dev_err(dev, "failed to get rng resources"); ++ return -ENODEV; ++ } ++ ++ rng_base = devm_ioremap_resource(dev, res); ++ //rng_base = ioremap(res->start, res->end - res->start); ++ if (!rng_base) { ++ dev_err(dev, "failed to remap rng regs"); ++ return -ENODEV; ++ } ++ ++ iproc_rng200_ops.priv = (unsigned long)rng_base; ++ ++ /* Setup RNG. */ ++ val = ioread32(rng_base + RNG200_CTRL_OFFSET); ++ val &= ~RNG200_CTRL_RBGEN_MASK; ++ val |= RNG200_CTRL_RBGEN_ENABLE; ++ iowrite32(val, rng_base + RNG200_CTRL_OFFSET); ++ ++ /* Register driver */ ++ err = hwrng_register(&iproc_rng200_ops); ++ if (err) { ++ dev_err(dev, "hwrng registration failed\n"); ++ return err; ++ } ++ dev_dbg(dev, "hwrng registered\n"); ++ ++ return 0; ++} ++ ++static int iproc_rng200_remove(struct platform_device *pdev) ++{ ++ u32 val; ++ void __iomem *rng_base = (void __iomem *)iproc_rng200_ops.priv; ++ ++ /* Unregister driver */ ++ hwrng_unregister(&iproc_rng200_ops); ++ if (rng_base) { ++ /* Disable RNG hardware */ ++ val = ioread32(rng_base + RNG200_CTRL_OFFSET); ++ val &= ~RNG200_CTRL_RBGEN_MASK; ++ val |= RNG200_CTRL_RBGEN_DISABLE; ++ iowrite32(val, rng_base + RNG200_CTRL_OFFSET); ++ } ++ return 0; ++} ++static int rng_probe_gen(struct platform_device *pdev) ++{ ++ int ret = -ENODEV; ++ struct device_node *node; ++ const char *rng_name; ++ node = pdev->dev.of_node; ++ rng_name = node->name; ++ ++ if (!of_device_is_available(node)) ++ return -ENODEV; ++ ++ of_property_read_string(node, "rng-type", &rng_name); ++ if (strcmp(rng_name, "rng200") == 0) ++ ret = iproc_rng200_probe(pdev); ++ else if (strcmp(rng_name, "rng") == 0) ++ ret = rng_probe(pdev); ++ ++ return ret; ++} ++ ++static int rng_remove_gen(struct platform_device *pdev) ++{ ++ int ret = -ENODEV; ++ struct device_node *node; ++ const char *rng_name; ++ node = pdev->dev.of_node; ++ rng_name = node->name; ++ ++ if (!of_device_is_available(node)) ++ return -ENODEV; ++ ++ of_property_read_string(node, "rng-type", &rng_name); ++ if (strcmp(rng_name, "rng200") == 0) ++ ret = iproc_rng200_remove(pdev); ++ else if (strcmp(rng_name, "rng") == 0) ++ ret = rng_remove(pdev); ++ ++ return ret; ++} ++ ++static const struct of_device_id bcm_iproc_dt_ids[] = { ++ { .compatible = "brcm,iproc-rng"}, ++ { } ++}; ++MODULE_DEVICE_TABLE(of, bcm_iproc_dt_ids); ++ ++static struct platform_driver iproc_rng_driver = { ++ .driver = { ++ .name = "iproc-rng", ++ .owner = THIS_MODULE, ++ .of_match_table = of_match_ptr(bcm_iproc_dt_ids), ++ }, ++ .probe = rng_probe_gen, ++ .remove = rng_remove_gen, ++}; ++module_platform_driver(iproc_rng_driver); ++ ++MODULE_AUTHOR("Broadcom"); ++MODULE_DESCRIPTION("iProc RNG/RNG200 Random Number Generator driver"); ++MODULE_LICENSE("GPL v2"); +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/clk/bcm/Kconfig b/drivers/clk/bcm/Kconfig +--- a/drivers/clk/bcm/Kconfig 2016-12-16 00:49:34.000000000 +0800 ++++ b/drivers/clk/bcm/Kconfig 2017-11-09 17:53:25.478158000 +0800 +@@ -14,3 +14,15 @@ config COMMON_CLK_IPROC + help + Enable common clock framework support for Broadcom SoCs + based on the iProc architecture ++ ++config CLK_XGS_IPROC ++ bool "BRCM XGS iProc clock support" ++ depends on COMMON_CLK && ARCH_XGS_IPROC ++ select COMMON_CLK_IPROC ++ default ARCH_XGS_IPROC ++ help ++ Enable XGS iProc clock ++ ++ ++ ++ +\ No newline at end of file +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/clk/bcm/Makefile b/drivers/clk/bcm/Makefile +--- a/drivers/clk/bcm/Makefile 2016-12-16 00:49:34.000000000 +0800 ++++ b/drivers/clk/bcm/Makefile 2017-11-09 17:53:25.479153000 +0800 +@@ -8,3 +8,4 @@ obj-$(CONFIG_COMMON_CLK_IPROC) += clk-ns + obj-$(CONFIG_ARCH_BCM_CYGNUS) += clk-cygnus.o + obj-$(CONFIG_ARCH_BCM_NSP) += clk-nsp.o + obj-$(CONFIG_ARCH_BCM_5301X) += clk-nsp.o ++obj-$(CONFIG_CLK_XGS_IPROC) += clk-xgs-iproc.o +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/clk/bcm/clk-iproc-armpll.c b/drivers/clk/bcm/clk-iproc-armpll.c +--- a/drivers/clk/bcm/clk-iproc-armpll.c 2016-12-16 00:49:34.000000000 +0800 ++++ b/drivers/clk/bcm/clk-iproc-armpll.c 2017-11-09 17:53:25.494157000 +0800 +@@ -224,8 +224,10 @@ static unsigned long iproc_arm_pll_recal + pll->rate = 0; + return 0; + } ++ ++ /* To avoid pll->rate overflow, do divide before multiply */ ++ parent_rate = (parent_rate / pdiv) / mdiv; + pll->rate = (ndiv * parent_rate) >> 20; +- pll->rate = (pll->rate / pdiv) / mdiv; + + pr_debug("%s: ARM PLL rate: %lu. parent rate: %lu\n", __func__, + pll->rate, parent_rate); +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/clk/bcm/clk-xgs-iproc.c b/drivers/clk/bcm/clk-xgs-iproc.c +--- a/drivers/clk/bcm/clk-xgs-iproc.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/clk/bcm/clk-xgs-iproc.c 2017-11-09 17:53:25.512166000 +0800 +@@ -0,0 +1,170 @@ ++/* ++ * Copyright (C) 2014 Broadcom Corporation ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation version 2. ++ * ++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any ++ * kind, whether express or implied; without even the implied warranty ++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "clk-iproc.h" ++ ++#define SB2_GEN_PLL_CTRL_1_OFFSET 0x04 ++#define SB2_GEN_PLL_CTRL_3_OFFSET 0x0C ++#define SB2_GEN_PLL_CTRL_5_OFFSET 0x14 ++#define SB2_GEN_PLL_CTRL_1_PDIV_R 27 ++#define SB2_GEN_PLL_CTRL_3_NDIV_INT_R 20 ++#define SB2_GEN_PLL_CTRL_5_CH1_MDIV_R 8 ++#define SB2_GEN_PLL_CTRL_1_PDIV_WIDTH 4 ++#define SB2_GEN_PLL_CTRL_3_NDIV_INT_WIDTH 10 ++#define SB2_GEN_PLL_CTRL_5_CH1_MDIV_WIDTH 8 ++ ++#define GEN_PLL_CTRL1_OFFSET 0x4 ++#define GEN_PLL_CTRL2_OFFSET 0x8 ++#define GEN_PLL_CTRL1_NDIV_INT_R 0 ++#define GEN_PLL_CTRL1_NDIV_INT_WIDTH 10 ++#define GEN_PLL_CTRL1_PDIV_R 10 ++#define GEN_PLL_CTRL2_CH3_MDIV_R 8 ++#define GEN_PLL_CTRL2_CH3_MDIV_WIDTH 8 ++#define GEN_PLL_CTRL1_PDIV_WIDTH_3 3 ++#define GEN_PLL_CTRL1_PDIV_WIDTH_4 4 ++ ++ ++struct iproc_gen_pll { ++ struct clk_hw hw; ++ void __iomem *base; ++ unsigned long rate; ++}; ++ ++#define to_iproc_gen_pll(phw) container_of(phw, struct iproc_gen_pll, hw) ++ ++static u32 genpll_pdiv_width; ++ ++static unsigned long iproc_axi_clk_recalc_rate(struct clk_hw *hw, ++ unsigned long parent_rate) ++{ ++ uint32_t ndiv, mdiv, pdiv; ++ struct iproc_gen_pll *pll = to_iproc_gen_pll(hw); ++ ++ ++ ndiv = readl(pll->base + GEN_PLL_CTRL1_OFFSET) >> ++ GEN_PLL_CTRL1_NDIV_INT_R; ++ ndiv &= (1 << GEN_PLL_CTRL1_NDIV_INT_WIDTH) - 1; ++ if (ndiv == 0) ++ ndiv = 1 << GEN_PLL_CTRL1_NDIV_INT_WIDTH; ++ ++ pdiv = readl(pll->base + GEN_PLL_CTRL1_OFFSET) >> GEN_PLL_CTRL1_PDIV_R; ++ pdiv &= (1 << genpll_pdiv_width) -1; ++ if (pdiv == 0) ++ pdiv = 1 << genpll_pdiv_width; ++ ++ mdiv = readl(pll->base + GEN_PLL_CTRL2_OFFSET) >> ++ GEN_PLL_CTRL2_CH3_MDIV_R; ++ mdiv &= (1 << GEN_PLL_CTRL2_CH3_MDIV_WIDTH) - 1; ++ if (mdiv == 0) ++ mdiv = 1 << GEN_PLL_CTRL2_CH3_MDIV_WIDTH; ++ ++ pll->rate = parent_rate * ndiv / pdiv / mdiv; ++ return pll->rate; ++} ++ ++static unsigned long iproc_sb2_axi_clk_recalc_rate(struct clk_hw *hw, ++ unsigned long parent_rate) ++{ ++ uint32_t ndiv, mdiv, pdiv; ++ struct iproc_gen_pll *pll = to_iproc_gen_pll(hw); ++ ++ ndiv = readl(pll->base + SB2_GEN_PLL_CTRL_3_OFFSET) >> ++ SB2_GEN_PLL_CTRL_3_NDIV_INT_R; ++ ndiv &= (1 << SB2_GEN_PLL_CTRL_3_NDIV_INT_WIDTH) - 1; ++ ++ mdiv = readl(pll->base + SB2_GEN_PLL_CTRL_5_OFFSET) >> ++ SB2_GEN_PLL_CTRL_5_CH1_MDIV_R; ++ mdiv &= (1 << SB2_GEN_PLL_CTRL_5_CH1_MDIV_WIDTH) - 1; ++ ++ pdiv = readl(pll->base + SB2_GEN_PLL_CTRL_1_OFFSET) >> ++ SB2_GEN_PLL_CTRL_1_PDIV_R; ++ pdiv &= (1 << SB2_GEN_PLL_CTRL_1_PDIV_WIDTH) - 1; ++ ++ pll->rate = parent_rate * ndiv / pdiv / mdiv; ++ return pll->rate; ++} ++ ++ ++static struct clk_ops iproc_axi_clk_ops = { ++ .recalc_rate = iproc_axi_clk_recalc_rate, ++}; ++ ++void __init xgs_iproc_axi_clk_setup(struct device_node *node) ++{ ++ int ret; ++ struct clk *clk; ++ struct iproc_gen_pll *pll; ++ struct clk_init_data init; ++ const char *parent_name; ++ ++ pll = kzalloc(sizeof(*pll), GFP_KERNEL); ++ if (WARN_ON(!pll)) ++ return; ++ ++ pll->base = of_iomap(node, 0); ++ if (WARN_ON(!pll->base)) ++ goto err_free_pll; ++ ++ init.name = node->name; ++ if ( of_device_is_compatible(node, "axi-clk-sb2") ) ++ iproc_axi_clk_ops.recalc_rate = iproc_sb2_axi_clk_recalc_rate; ++ if ( of_device_is_compatible(node, "axi-clk-hx4") || ++ of_device_is_compatible(node, "axi-clk-hr2") ) ++ genpll_pdiv_width = GEN_PLL_CTRL1_PDIV_WIDTH_3; ++ else ++ genpll_pdiv_width = GEN_PLL_CTRL1_PDIV_WIDTH_4; ++ ++ init.ops = &iproc_axi_clk_ops; ++ init.flags = 0; ++ parent_name = of_clk_get_parent_name(node, 0); ++ init.parent_names = (parent_name ? &parent_name : NULL); ++ init.num_parents = (parent_name ? 1 : 0); ++ pll->hw.init = &init; ++ ++ clk = clk_register(NULL, &pll->hw); ++ if (WARN_ON(IS_ERR(clk))) ++ goto err_iounmap; ++ ++ ret = of_clk_add_provider(node, of_clk_src_simple_get, clk); ++ if (WARN_ON(ret)) ++ goto err_clk_unregister; ++ ++ return; ++ ++err_clk_unregister: ++ clk_unregister(clk); ++err_iounmap: ++ iounmap(pll->base); ++err_free_pll: ++ kfree(pll); ++} ++ ++CLK_OF_DECLARE(xgs_iproc_axi_clk, "brcm,xgs-iproc-axi-clk", ++ xgs_iproc_axi_clk_setup); ++ ++ ++static void __init xgs_iproc_armpll_init(struct device_node *node) ++{ ++ iproc_armpll_setup(node); ++} ++CLK_OF_DECLARE(xgs_iproc_armpll, "brcm,xgs-iproc-armpll", ++ xgs_iproc_armpll_init); +\ No newline at end of file +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/dma/Kconfig b/drivers/dma/Kconfig +--- a/drivers/dma/Kconfig 2016-12-16 00:49:34.000000000 +0800 ++++ b/drivers/dma/Kconfig 2017-11-09 17:53:27.047177000 +0800 +@@ -397,6 +397,14 @@ config PL330_DMA + You need to provide platform specific settings via + platform_data for a dma-pl330 device. + ++config XGS_IPROC_DMA330_DMA ++ tristate "DMA API Driver for XGS IPROC DMA330" ++ select DMA_ENGINE ++ depends on ARM_AMBA ++ help ++ Support the DMA engine for BRCM IPROC CoreLink ++ DMA Controller (DMA-330). ++ + config PXA_DMA + bool "PXA DMA support" + depends on (ARCH_MMP || ARCH_PXA) +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/dma/Makefile b/drivers/dma/Makefile +--- a/drivers/dma/Makefile 2016-12-16 00:49:34.000000000 +0800 ++++ b/drivers/dma/Makefile 2017-11-09 17:53:27.048171000 +0800 +@@ -50,6 +50,7 @@ obj-$(CONFIG_MX3_IPU) += ipu/ + obj-$(CONFIG_NBPFAXI_DMA) += nbpfaxi.o + obj-$(CONFIG_PCH_DMA) += pch_dma.o + obj-$(CONFIG_PL330_DMA) += pl330.o ++obj-$(CONFIG_XGS_IPROC_DMA330_DMA) += iproc-dma330.o + obj-$(CONFIG_PPC_BESTCOMM) += bestcomm/ + obj-$(CONFIG_PXA_DMA) += pxa_dma.o + obj-$(CONFIG_QCOM_BAM_DMA) += qcom_bam_dma.o +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/dma/iproc-dma330.c b/drivers/dma/iproc-dma330.c +--- a/drivers/dma/iproc-dma330.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/dma/iproc-dma330.c 2017-11-09 17:53:27.192174000 +0800 +@@ -0,0 +1,3067 @@ ++/* ++ * BRCM IPROC DMA330 support ++ * ++ * This driver, modified from pl330.c, supports CoreLink DMA Controller ++ * (DMA-330). ++ * ++ * Copyright (C) 2016 Broadcom Corporation ++ * ++ * Copyright (C) 2010 Samsung Electronics Co. Ltd. ++ * Jaswinder Singh ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "dmaengine.h" ++#define DMA330_MAX_CHAN 8 ++#define DMA330_MAX_IRQS 32 ++#define DMA330_MAX_PERI 32 ++ ++struct dma_dma330_platdata { ++ u8 nr_valid_peri; ++ u8 *peri_id; ++ dma_cap_mask_t cap_mask; ++ unsigned mcbuf_sz; ++}; ++ ++enum dma330_cachectrl { ++ CCTRL0, /* Noncacheable and nonbufferable */ ++ CCTRL1, /* Bufferable only */ ++ CCTRL2, /* Cacheable, but do not allocate */ ++ CCTRL3, /* Cacheable and bufferable, but do not allocate */ ++ INVALID1, /* AWCACHE = 0x1000 */ ++ INVALID2, ++ CCTRL6, /* Cacheable write-through, allocate on writes only */ ++ CCTRL7, /* Cacheable write-back, allocate on writes only */ ++}; ++ ++enum dma330_byteswap { ++ SWAP_NO, ++ SWAP_2, ++ SWAP_4, ++ SWAP_8, ++ SWAP_16, ++}; ++ ++/* Register and Bit field Definitions */ ++#define DS 0x0 ++#define DS_ST_STOP 0x0 ++#define DS_ST_EXEC 0x1 ++#define DS_ST_CMISS 0x2 ++#define DS_ST_UPDTPC 0x3 ++#define DS_ST_WFE 0x4 ++#define DS_ST_ATBRR 0x5 ++#define DS_ST_QBUSY 0x6 ++#define DS_ST_WFP 0x7 ++#define DS_ST_KILL 0x8 ++#define DS_ST_CMPLT 0x9 ++#define DS_ST_FLTCMP 0xe ++#define DS_ST_FAULT 0xf ++ ++#define DPC 0x4 ++#define INTEN 0x20 ++#define ES 0x24 ++#define INTSTATUS 0x28 ++#define INTCLR 0x2c ++#define FSM 0x30 ++#define FSC 0x34 ++#define FTM 0x38 ++ ++#define _FTC 0x40 ++#define FTC(n) (_FTC + (n)*0x4) ++ ++#define _CS 0x100 ++#define CS(n) (_CS + (n)*0x8) ++#define CS_CNS (1 << 21) ++ ++#define _CPC 0x104 ++#define CPC(n) (_CPC + (n)*0x8) ++ ++#define _SA 0x400 ++#define SA(n) (_SA + (n)*0x20) ++ ++#define _DA 0x404 ++#define DA(n) (_DA + (n)*0x20) ++ ++#define _CC 0x408 ++#define CC(n) (_CC + (n)*0x20) ++ ++#define CC_SRCINC (1 << 0) ++#define CC_DSTINC (1 << 14) ++#define CC_SRCPRI (1 << 8) ++#define CC_DSTPRI (1 << 22) ++#define CC_SRCNS (1 << 9) ++#define CC_DSTNS (1 << 23) ++#define CC_SRCIA (1 << 10) ++#define CC_DSTIA (1 << 24) ++#define CC_SRCBRSTLEN_SHFT 4 ++#define CC_DSTBRSTLEN_SHFT 18 ++#define CC_SRCBRSTSIZE_SHFT 1 ++#define CC_DSTBRSTSIZE_SHFT 15 ++#define CC_SRCCCTRL_SHFT 11 ++#define CC_SRCCCTRL_MASK 0x7 ++#define CC_DSTCCTRL_SHFT 25 ++#define CC_DRCCCTRL_MASK 0x7 ++#define CC_SWAP_SHFT 28 ++ ++#define _LC0 0x40c ++#define LC0(n) (_LC0 + (n)*0x20) ++ ++#define _LC1 0x410 ++#define LC1(n) (_LC1 + (n)*0x20) ++ ++#define DBGSTATUS 0xd00 ++#define DBG_BUSY (1 << 0) ++ ++#define DBGCMD 0xd04 ++#define DBGINST0 0xd08 ++#define DBGINST1 0xd0c ++ ++#define CR0 0xe00 ++#define CR1 0xe04 ++#define CR2 0xe08 ++#define CR3 0xe0c ++#define CR4 0xe10 ++#define CRD 0xe14 ++ ++#define PERIPH_ID 0xfe0 ++#define PERIPH_REV_SHIFT 20 ++#define PERIPH_REV_MASK 0xf ++#define PERIPH_REV_R0P0 0 ++#define PERIPH_REV_R1P0 1 ++#define PERIPH_REV_R1P1 2 ++ ++#define CR0_PERIPH_REQ_SET (1 << 0) ++#define CR0_BOOT_EN_SET (1 << 1) ++#define CR0_BOOT_MAN_NS (1 << 2) ++#define CR0_NUM_CHANS_SHIFT 4 ++#define CR0_NUM_CHANS_MASK 0x7 ++#define CR0_NUM_PERIPH_SHIFT 12 ++#define CR0_NUM_PERIPH_MASK 0x1f ++#define CR0_NUM_EVENTS_SHIFT 17 ++#define CR0_NUM_EVENTS_MASK 0x1f ++ ++#define CR1_ICACHE_LEN_SHIFT 0 ++#define CR1_ICACHE_LEN_MASK 0x7 ++#define CR1_NUM_ICACHELINES_SHIFT 4 ++#define CR1_NUM_ICACHELINES_MASK 0xf ++ ++#define CRD_DATA_WIDTH_SHIFT 0 ++#define CRD_DATA_WIDTH_MASK 0x7 ++#define CRD_WR_CAP_SHIFT 4 ++#define CRD_WR_CAP_MASK 0x7 ++#define CRD_WR_Q_DEP_SHIFT 8 ++#define CRD_WR_Q_DEP_MASK 0xf ++#define CRD_RD_CAP_SHIFT 12 ++#define CRD_RD_CAP_MASK 0x7 ++#define CRD_RD_Q_DEP_SHIFT 16 ++#define CRD_RD_Q_DEP_MASK 0xf ++#define CRD_DATA_BUFF_SHIFT 20 ++#define CRD_DATA_BUFF_MASK 0x3ff ++ ++#define PART 0x330 ++#define DESIGNER 0x41 ++#define REVISION 0x2 ++#define INTEG_CFG 0x0 ++#define PERIPH_ID_VAL ((PART << 0) | (DESIGNER << 12) | (REVISION << 20)) ++ ++#define DMA330_STATE_STOPPED (1 << 0) ++#define DMA330_STATE_EXECUTING (1 << 1) ++#define DMA330_STATE_WFE (1 << 2) ++#define DMA330_STATE_FAULTING (1 << 3) ++#define DMA330_STATE_COMPLETING (1 << 4) ++#define DMA330_STATE_WFP (1 << 5) ++#define DMA330_STATE_KILLING (1 << 6) ++#define DMA330_STATE_FAULT_COMPLETING (1 << 7) ++#define DMA330_STATE_CACHEMISS (1 << 8) ++#define DMA330_STATE_UPDTPC (1 << 9) ++#define DMA330_STATE_ATBARRIER (1 << 10) ++#define DMA330_STATE_QUEUEBUSY (1 << 11) ++#define DMA330_STATE_INVALID (1 << 15) ++ ++#define DMA330_STABLE_STATES (DMA330_STATE_STOPPED | DMA330_STATE_EXECUTING \ ++ | DMA330_STATE_WFE | DMA330_STATE_FAULTING) ++ ++#define CMD_DMAADDH 0x54 ++#define CMD_DMAEND 0x00 ++#define CMD_DMAFLUSHP 0x35 ++#define CMD_DMAGO 0xa0 ++#define CMD_DMALD 0x04 ++#define CMD_DMALDP 0x25 ++#define CMD_DMALP 0x20 ++#define CMD_DMALPEND 0x28 ++#define CMD_DMAKILL 0x01 ++#define CMD_DMAMOV 0xbc ++#define CMD_DMANOP 0x18 ++#define CMD_DMARMB 0x12 ++#define CMD_DMASEV 0x34 ++#define CMD_DMAST 0x08 ++#define CMD_DMASTP 0x29 ++#define CMD_DMASTZ 0x0c ++#define CMD_DMAWFE 0x36 ++#define CMD_DMAWFP 0x30 ++#define CMD_DMAWMB 0x13 ++ ++#define SZ_DMAADDH 3 ++#define SZ_DMAEND 1 ++#define SZ_DMAFLUSHP 2 ++#define SZ_DMALD 1 ++#define SZ_DMALDP 2 ++#define SZ_DMALP 2 ++#define SZ_DMALPEND 2 ++#define SZ_DMAKILL 1 ++#define SZ_DMAMOV 6 ++#define SZ_DMANOP 1 ++#define SZ_DMARMB 1 ++#define SZ_DMASEV 2 ++#define SZ_DMAST 1 ++#define SZ_DMASTP 2 ++#define SZ_DMASTZ 1 ++#define SZ_DMAWFE 2 ++#define SZ_DMAWFP 2 ++#define SZ_DMAWMB 1 ++#define SZ_DMAGO 6 ++ ++#define BRST_LEN(ccr) ((((ccr) >> CC_SRCBRSTLEN_SHFT) & 0xf) + 1) ++#define BRST_SIZE(ccr) (1 << (((ccr) >> CC_SRCBRSTSIZE_SHFT) & 0x7)) ++ ++#define BYTE_TO_BURST(b, ccr) ((b) / BRST_SIZE(ccr) / BRST_LEN(ccr)) ++#define BURST_TO_BYTE(c, ccr) ((c) * BRST_SIZE(ccr) * BRST_LEN(ccr)) ++ ++/* ++ * With 256 bytes, we can do more than 2.5MB and 5MB xfers per req ++ * at 1byte/burst for P<->M and M<->M respectively. ++ * For typical scenario, at 1word/burst, 10MB and 20MB xfers per req ++ * should be enough for P<->M and M<->M respectively. ++ */ ++#define MCODE_BUFF_PER_REQ 256 ++ ++/* Use this _only_ to wait on transient states */ ++#define UNTIL(t, s) while (!(_state(t) & (s))) cpu_relax(); ++ ++#ifdef DMA330_DEBUG_MCGEN ++static unsigned cmd_line; ++#define DMA330_DBGCMD_DUMP(off, x...) do { \ ++ printk("%x:", cmd_line); \ ++ printk(x); \ ++ cmd_line += off; \ ++ } while (0) ++#define DMA330_DBGMC_START(addr) (cmd_line = addr) ++#else ++#define DMA330_DBGCMD_DUMP(off, x...) do {} while (0) ++#define DMA330_DBGMC_START(addr) do {} while (0) ++#endif ++ ++/* The number of default descriptors */ ++ ++#define NR_DEFAULT_DESC 16 ++ ++/* Delay for runtime PM autosuspend, ms */ ++#define DMA330_AUTOSUSPEND_DELAY 20 ++ ++/* Populated by the DMA330 core driver for DMA API driver's info */ ++struct dma330_config { ++ u32 periph_id; ++#define DMAC_MODE_NS (1 << 0) ++ unsigned int mode; ++ unsigned int data_bus_width:10; /* In number of bits */ ++ unsigned int data_buf_dep:11; ++ unsigned int num_chan:4; ++ unsigned int num_peri:6; ++ u32 peri_ns; ++ unsigned int num_events:6; ++ u32 irq_ns; ++}; ++ ++/** ++ * Request Configuration. ++ * The DMA330 core does not modify this and uses the last ++ * working configuration if the request doesn't provide any. ++ * ++ * The Client may want to provide this info only for the ++ * first request and a request with new settings. ++ */ ++struct dma330_reqcfg { ++ /* Address Incrementing */ ++ unsigned dst_inc:1; ++ unsigned src_inc:1; ++ ++ /* ++ * For now, the SRC & DST protection levels ++ * and burst size/length are assumed same. ++ */ ++ bool nonsecure; ++ bool privileged; ++ bool insnaccess; ++ unsigned brst_len:5; ++ unsigned brst_size:3; /* in power of 2 */ ++ ++ enum dma330_cachectrl dcctl; ++ enum dma330_cachectrl scctl; ++ enum dma330_byteswap swap; ++ struct dma330_config *pcfg; ++}; ++ ++/* ++ * One cycle of DMAC operation. ++ * There may be more than one xfer in a request. ++ */ ++struct dma330_xfer { ++ u32 src_addr; ++ u32 dst_addr; ++ /* Size to xfer */ ++ u32 bytes; ++}; ++ ++/* The xfer callbacks are made with one of these arguments. */ ++enum dma330_op_err { ++ /* The all xfers in the request were success. */ ++ DMA330_ERR_NONE, ++ /* If req aborted due to global error. */ ++ DMA330_ERR_ABORT, ++ /* If req failed due to problem with Channel. */ ++ DMA330_ERR_FAIL, ++}; ++ ++enum dmamov_dst { ++ SAR = 0, ++ CCR, ++ DAR, ++}; ++ ++enum dma330_dst { ++ SRC = 0, ++ DST, ++}; ++ ++enum dma330_cond { ++ SINGLE, ++ BURST, ++ ALWAYS, ++}; ++ ++struct dma_dma330_desc; ++ ++struct _dma330_req { ++ u32 mc_bus; ++ void *mc_cpu; ++ struct dma_dma330_desc *desc; ++}; ++ ++/* ToBeDone for tasklet */ ++struct _dma330_tbd { ++ bool reset_dmac; ++ bool reset_mngr; ++ u8 reset_chan; ++}; ++ ++/* A DMAC Thread */ ++struct dma330_thread { ++ u8 id; ++ int ev; ++ /* If the channel is not yet acquired by any client */ ++ bool free; ++ /* Parent DMAC */ ++ struct dma330_dmac *dmac; ++ /* Only two at a time */ ++ struct _dma330_req req[2]; ++ /* Index of the last enqueued request */ ++ unsigned lstenq; ++ /* Index of the last submitted request or -1 if the DMA is stopped */ ++ int req_running; ++}; ++ ++enum dma330_dmac_state { ++ UNINIT, ++ INIT, ++ DYING, ++}; ++ ++enum desc_status { ++ /* In the DMAC pool */ ++ FREE, ++ /* ++ * Allocated to some channel during prep_xxx ++ * Also may be sitting on the work_list. ++ */ ++ PREP, ++ /* ++ * Sitting on the work_list and already submitted ++ * to the DMA330 core. Not more than two descriptors ++ * of a channel can be BUSY at any time. ++ */ ++ BUSY, ++ /* ++ * Sitting on the channel work_list but xfer done ++ * by DMA330 core ++ */ ++ DONE, ++}; ++ ++struct dma_dma330_chan { ++ /* Schedule desc completion */ ++ struct tasklet_struct task; ++ ++ /* DMA-Engine Channel */ ++ struct dma_chan chan; ++ ++ /* List of submitted descriptors */ ++ struct list_head submitted_list; ++ /* List of issued descriptors */ ++ struct list_head work_list; ++ /* List of completed descriptors */ ++ struct list_head completed_list; ++ ++ /* Pointer to the DMAC that manages this channel, ++ * NULL if the channel is available to be acquired. ++ * As the parent, this DMAC also provides descriptors ++ * to the channel. ++ */ ++ struct dma330_dmac *dmac; ++ ++ /* To protect channel manipulation */ ++ spinlock_t lock; ++ ++ /* ++ * Hardware channel thread of DMA330 DMAC. NULL if the channel is ++ * available. ++ */ ++ struct dma330_thread *thread; ++ ++ /* For D-to-M and M-to-D channels */ ++ int burst_sz; /* the peripheral fifo width */ ++ int burst_len; /* the number of burst */ ++ dma_addr_t fifo_addr; ++ ++ /* for cyclic capability */ ++ bool cyclic; ++}; ++ ++struct dma330_dmac { ++ /* DMA-Engine Device */ ++ struct dma_device ddma; ++ ++ /* Holds info about sg limitations */ ++ struct device_dma_parameters dma_parms; ++ ++ /* Pool of descriptors available for the DMAC's channels */ ++ struct list_head desc_pool; ++ /* To protect desc_pool manipulation */ ++ spinlock_t pool_lock; ++ ++ /* Size of MicroCode buffers for each channel. */ ++ unsigned mcbufsz; ++ /* ioremap'ed address of DMA330 registers. */ ++ void __iomem *base; ++ /* Populated by the DMA330 core driver during dma330_add */ ++ struct dma330_config pcfg; ++ ++ spinlock_t lock; ++ /* Maximum possible events/irqs */ ++ int events[32]; ++ /* BUS address of MicroCode buffer */ ++ dma_addr_t mcode_bus; ++ /* CPU address of MicroCode buffer */ ++ void *mcode_cpu; ++ /* List of all Channel threads */ ++ struct dma330_thread *channels; ++ /* Pointer to the MANAGER thread */ ++ struct dma330_thread *manager; ++ /* To handle bad news in interrupt */ ++ struct tasklet_struct tasks; ++ struct _dma330_tbd dmac_tbd; ++ /* State of DMAC operation */ ++ enum dma330_dmac_state state; ++ /* Holds list of reqs with due callbacks */ ++ struct list_head req_done; ++ ++ /* Peripheral channels connected to this DMAC */ ++ unsigned int num_peripherals; ++ struct dma_dma330_chan *peripherals; /* keep at end */ ++}; ++ ++struct dma_dma330_desc { ++ /* To attach to a queue as child */ ++ struct list_head node; ++ ++ /* Descriptor for the DMA Engine API */ ++ struct dma_async_tx_descriptor txd; ++ ++ /* Xfer for DMA330 core */ ++ struct dma330_xfer px; ++ ++ struct dma330_reqcfg rqcfg; ++ ++ enum desc_status status; ++ ++ int bytes_requested; ++ bool last; ++ ++ /* The channel which currently holds this desc */ ++ struct dma_dma330_chan *pchan; ++ ++ enum dma_transfer_direction rqtype; ++ /* Index of peripheral for the xfer. */ ++ unsigned peri:5; ++ /* Hook to attach to DMAC's list of reqs with due callback */ ++ struct list_head rqd; ++}; ++ ++struct _xfer_spec { ++ u32 ccr; ++ struct dma_dma330_desc *desc; ++}; ++ ++static inline bool _queue_empty(struct dma330_thread *thrd) ++{ ++ return thrd->req[0].desc == NULL && thrd->req[1].desc == NULL; ++} ++ ++static inline bool _queue_full(struct dma330_thread *thrd) ++{ ++ return thrd->req[0].desc != NULL && thrd->req[1].desc != NULL; ++} ++ ++static inline bool is_manager(struct dma330_thread *thrd) ++{ ++ return thrd->dmac->manager == thrd; ++} ++ ++/* If manager of the thread is in Non-Secure mode */ ++static inline bool _manager_ns(struct dma330_thread *thrd) ++{ ++ return (thrd->dmac->pcfg.mode & DMAC_MODE_NS) ? true : false; ++} ++ ++static inline u32 get_revision(u32 periph_id) ++{ ++ return (periph_id >> PERIPH_REV_SHIFT) & PERIPH_REV_MASK; ++} ++ ++static inline u32 _emit_ADDH(unsigned dry_run, u8 buf[], ++ enum dma330_dst da, u16 val) ++{ ++ if (dry_run) ++ return SZ_DMAADDH; ++ ++ buf[0] = CMD_DMAADDH; ++ buf[0] |= (da << 1); ++ *((__le16 *)&buf[1]) = cpu_to_le16(val); ++ ++ DMA330_DBGCMD_DUMP(SZ_DMAADDH, "\tDMAADDH %s %u\n", ++ da == 1 ? "DA" : "SA", val); ++ ++ return SZ_DMAADDH; ++} ++ ++static inline u32 _emit_END(unsigned dry_run, u8 buf[]) ++{ ++ if (dry_run) ++ return SZ_DMAEND; ++ ++ buf[0] = CMD_DMAEND; ++ ++ DMA330_DBGCMD_DUMP(SZ_DMAEND, "\tDMAEND\n"); ++ ++ return SZ_DMAEND; ++} ++ ++static inline u32 _emit_FLUSHP(unsigned dry_run, u8 buf[], u8 peri) ++{ ++ if (dry_run) ++ return SZ_DMAFLUSHP; ++ ++ buf[0] = CMD_DMAFLUSHP; ++ ++ peri &= 0x1f; ++ peri <<= 3; ++ buf[1] = peri; ++ ++ DMA330_DBGCMD_DUMP(SZ_DMAFLUSHP, "\tDMAFLUSHP %u\n", peri >> 3); ++ ++ return SZ_DMAFLUSHP; ++} ++ ++static inline u32 _emit_LD(unsigned dry_run, u8 buf[], enum dma330_cond cond) ++{ ++ if (dry_run) ++ return SZ_DMALD; ++ ++ buf[0] = CMD_DMALD; ++ ++ if (cond == SINGLE) ++ buf[0] |= (0 << 1) | (1 << 0); ++ else if (cond == BURST) ++ buf[0] |= (1 << 1) | (1 << 0); ++ ++ DMA330_DBGCMD_DUMP(SZ_DMALD, "\tDMALD%c\n", ++ cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A')); ++ ++ return SZ_DMALD; ++} ++ ++static inline u32 _emit_LDP(unsigned dry_run, u8 buf[], ++ enum dma330_cond cond, u8 peri) ++{ ++ if (dry_run) ++ return SZ_DMALDP; ++ ++ buf[0] = CMD_DMALDP; ++ ++ if (cond == BURST) ++ buf[0] |= (1 << 1); ++ ++ peri &= 0x1f; ++ peri <<= 3; ++ buf[1] = peri; ++ ++ DMA330_DBGCMD_DUMP(SZ_DMALDP, "\tDMALDP%c %u\n", ++ cond == SINGLE ? 'S' : 'B', peri >> 3); ++ ++ return SZ_DMALDP; ++} ++ ++static inline u32 _emit_LP(unsigned dry_run, u8 buf[], ++ unsigned loop, u8 cnt) ++{ ++ if (dry_run) ++ return SZ_DMALP; ++ ++ buf[0] = CMD_DMALP; ++ ++ if (loop) ++ buf[0] |= (1 << 1); ++ ++ cnt--; /* DMAC increments by 1 internally */ ++ buf[1] = cnt; ++ ++ DMA330_DBGCMD_DUMP(SZ_DMALP, "\tDMALP_%c %u\n", loop ? '1' : '0', cnt); ++ ++ return SZ_DMALP; ++} ++ ++struct _arg_LPEND { ++ enum dma330_cond cond; ++ bool forever; ++ unsigned loop; ++ u8 bjump; ++}; ++ ++static inline u32 _emit_LPEND(unsigned dry_run, u8 buf[], ++ const struct _arg_LPEND *arg) ++{ ++ enum dma330_cond cond = arg->cond; ++ bool forever = arg->forever; ++ unsigned loop = arg->loop; ++ u8 bjump = arg->bjump; ++ ++ if (dry_run) ++ return SZ_DMALPEND; ++ ++ buf[0] = CMD_DMALPEND; ++ ++ if (loop) ++ buf[0] |= (1 << 2); ++ ++ if (!forever) ++ buf[0] |= (1 << 4); ++ ++ if (cond == SINGLE) ++ buf[0] |= (0 << 1) | (1 << 0); ++ else if (cond == BURST) ++ buf[0] |= (1 << 1) | (1 << 0); ++ ++ buf[1] = bjump; ++ ++ DMA330_DBGCMD_DUMP(SZ_DMALPEND, "\tDMALP%s%c_%c bjmpto_%x\n", ++ forever ? "FE" : "END", ++ cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A'), ++ loop ? '1' : '0', ++ bjump); ++ ++ return SZ_DMALPEND; ++} ++ ++static inline u32 _emit_KILL(unsigned dry_run, u8 buf[]) ++{ ++ if (dry_run) ++ return SZ_DMAKILL; ++ ++ buf[0] = CMD_DMAKILL; ++ ++ return SZ_DMAKILL; ++} ++ ++static inline u32 _emit_MOV(unsigned dry_run, u8 buf[], ++ enum dmamov_dst dst, u32 val) ++{ ++ if (dry_run) ++ return SZ_DMAMOV; ++ ++ buf[0] = CMD_DMAMOV; ++ buf[1] = dst; ++ *((__le32 *)&buf[2]) = cpu_to_le32(val); ++ ++ DMA330_DBGCMD_DUMP(SZ_DMAMOV, "\tDMAMOV %s 0x%x\n", ++ dst == SAR ? "SAR" : (dst == DAR ? "DAR" : "CCR"), val); ++ ++ return SZ_DMAMOV; ++} ++ ++static inline u32 _emit_NOP(unsigned dry_run, u8 buf[]) ++{ ++ if (dry_run) ++ return SZ_DMANOP; ++ ++ buf[0] = CMD_DMANOP; ++ ++ DMA330_DBGCMD_DUMP(SZ_DMANOP, "\tDMANOP\n"); ++ ++ return SZ_DMANOP; ++} ++ ++static inline u32 _emit_RMB(unsigned dry_run, u8 buf[]) ++{ ++ if (dry_run) ++ return SZ_DMARMB; ++ ++ buf[0] = CMD_DMARMB; ++ ++ DMA330_DBGCMD_DUMP(SZ_DMARMB, "\tDMARMB\n"); ++ ++ return SZ_DMARMB; ++} ++ ++static inline u32 _emit_SEV(unsigned dry_run, u8 buf[], u8 ev) ++{ ++ if (dry_run) ++ return SZ_DMASEV; ++ ++ buf[0] = CMD_DMASEV; ++ ++ ev &= 0x1f; ++ ev <<= 3; ++ buf[1] = ev; ++ ++ DMA330_DBGCMD_DUMP(SZ_DMASEV, "\tDMASEV %u\n", ev >> 3); ++ ++ return SZ_DMASEV; ++} ++ ++static inline u32 _emit_ST(unsigned dry_run, u8 buf[], enum dma330_cond cond) ++{ ++ if (dry_run) ++ return SZ_DMAST; ++ ++ buf[0] = CMD_DMAST; ++ ++ if (cond == SINGLE) ++ buf[0] |= (0 << 1) | (1 << 0); ++ else if (cond == BURST) ++ buf[0] |= (1 << 1) | (1 << 0); ++ ++ DMA330_DBGCMD_DUMP(SZ_DMAST, "\tDMAST%c\n", ++ cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'A')); ++ ++ return SZ_DMAST; ++} ++ ++static inline u32 _emit_STP(unsigned dry_run, u8 buf[], ++ enum dma330_cond cond, u8 peri) ++{ ++ if (dry_run) ++ return SZ_DMASTP; ++ ++ buf[0] = CMD_DMASTP; ++ ++ if (cond == BURST) ++ buf[0] |= (1 << 1); ++ ++ peri &= 0x1f; ++ peri <<= 3; ++ buf[1] = peri; ++ ++ DMA330_DBGCMD_DUMP(SZ_DMASTP, "\tDMASTP%c %u\n", ++ cond == SINGLE ? 'S' : 'B', peri >> 3); ++ ++ return SZ_DMASTP; ++} ++ ++static inline u32 _emit_STZ(unsigned dry_run, u8 buf[]) ++{ ++ if (dry_run) ++ return SZ_DMASTZ; ++ ++ buf[0] = CMD_DMASTZ; ++ ++ DMA330_DBGCMD_DUMP(SZ_DMASTZ, "\tDMASTZ\n"); ++ ++ return SZ_DMASTZ; ++} ++ ++static inline u32 _emit_WFE(unsigned dry_run, u8 buf[], u8 ev, ++ unsigned invalidate) ++{ ++ if (dry_run) ++ return SZ_DMAWFE; ++ ++ buf[0] = CMD_DMAWFE; ++ ++ ev &= 0x1f; ++ ev <<= 3; ++ buf[1] = ev; ++ ++ if (invalidate) ++ buf[1] |= (1 << 1); ++ ++ DMA330_DBGCMD_DUMP(SZ_DMAWFE, "\tDMAWFE %u%s\n", ++ ev >> 3, invalidate ? ", I" : ""); ++ ++ return SZ_DMAWFE; ++} ++ ++static inline u32 _emit_WFP(unsigned dry_run, u8 buf[], ++ enum dma330_cond cond, u8 peri) ++{ ++ if (dry_run) ++ return SZ_DMAWFP; ++ ++ buf[0] = CMD_DMAWFP; ++ ++ if (cond == SINGLE) ++ buf[0] |= (0 << 1) | (0 << 0); ++ else if (cond == BURST) ++ buf[0] |= (1 << 1) | (0 << 0); ++ else ++ buf[0] |= (0 << 1) | (1 << 0); ++ ++ peri &= 0x1f; ++ peri <<= 3; ++ buf[1] = peri; ++ ++ DMA330_DBGCMD_DUMP(SZ_DMAWFP, "\tDMAWFP%c %u\n", ++ cond == SINGLE ? 'S' : (cond == BURST ? 'B' : 'P'), peri >> 3); ++ ++ return SZ_DMAWFP; ++} ++ ++static inline u32 _emit_WMB(unsigned dry_run, u8 buf[]) ++{ ++ if (dry_run) ++ return SZ_DMAWMB; ++ ++ buf[0] = CMD_DMAWMB; ++ ++ DMA330_DBGCMD_DUMP(SZ_DMAWMB, "\tDMAWMB\n"); ++ ++ return SZ_DMAWMB; ++} ++ ++struct _arg_GO { ++ u8 chan; ++ u32 addr; ++ unsigned ns; ++}; ++ ++static inline u32 _emit_GO(unsigned dry_run, u8 buf[], ++ const struct _arg_GO *arg) ++{ ++ u8 chan = arg->chan; ++ u32 addr = arg->addr; ++ unsigned ns = arg->ns; ++ ++ if (dry_run) ++ return SZ_DMAGO; ++ ++ buf[0] = CMD_DMAGO; ++ buf[0] |= (ns << 1); ++ ++ buf[1] = chan & 0x7; ++ ++ *((__le32 *)&buf[2]) = cpu_to_le32(addr); ++ ++ return SZ_DMAGO; ++} ++ ++#define msecs_to_loops(t) (loops_per_jiffy / 1000 * HZ * t) ++ ++/* Returns Time-Out */ ++static bool _until_dmac_idle(struct dma330_thread *thrd) ++{ ++ void __iomem *regs = thrd->dmac->base; ++ unsigned long loops = msecs_to_loops(5); ++ ++ do { ++ /* Until Manager is Idle */ ++ if (!(readl(regs + DBGSTATUS) & DBG_BUSY)) ++ break; ++ ++ cpu_relax(); ++ } while (--loops); ++ ++ if (!loops) ++ return true; ++ ++ return false; ++} ++ ++static inline void _execute_DBGINSN(struct dma330_thread *thrd, ++ u8 insn[], bool as_manager) ++{ ++ void __iomem *regs = thrd->dmac->base; ++ u32 val; ++ ++ val = (insn[0] << 16) | (insn[1] << 24); ++ if (!as_manager) { ++ val |= (1 << 0); ++ val |= (thrd->id << 8); /* Channel Number */ ++ } ++ writel(val, regs + DBGINST0); ++ ++ val = le32_to_cpu(*((__le32 *)&insn[2])); ++ writel(val, regs + DBGINST1); ++ ++ /* If timed out due to halted state-machine */ ++ if (_until_dmac_idle(thrd)) { ++ dev_err(thrd->dmac->ddma.dev, "DMAC halted!\n"); ++ return; ++ } ++ ++ /* Get going */ ++ writel(0, regs + DBGCMD); ++} ++ ++static inline u32 _state(struct dma330_thread *thrd) ++{ ++ void __iomem *regs = thrd->dmac->base; ++ u32 val; ++ ++ if (is_manager(thrd)) ++ val = readl(regs + DS) & 0xf; ++ else ++ val = readl(regs + CS(thrd->id)) & 0xf; ++ ++ switch (val) { ++ case DS_ST_STOP: ++ return DMA330_STATE_STOPPED; ++ case DS_ST_EXEC: ++ return DMA330_STATE_EXECUTING; ++ case DS_ST_CMISS: ++ return DMA330_STATE_CACHEMISS; ++ case DS_ST_UPDTPC: ++ return DMA330_STATE_UPDTPC; ++ case DS_ST_WFE: ++ return DMA330_STATE_WFE; ++ case DS_ST_FAULT: ++ return DMA330_STATE_FAULTING; ++ case DS_ST_ATBRR: ++ if (is_manager(thrd)) ++ return DMA330_STATE_INVALID; ++ else ++ return DMA330_STATE_ATBARRIER; ++ case DS_ST_QBUSY: ++ if (is_manager(thrd)) ++ return DMA330_STATE_INVALID; ++ else ++ return DMA330_STATE_QUEUEBUSY; ++ case DS_ST_WFP: ++ if (is_manager(thrd)) ++ return DMA330_STATE_INVALID; ++ else ++ return DMA330_STATE_WFP; ++ case DS_ST_KILL: ++ if (is_manager(thrd)) ++ return DMA330_STATE_INVALID; ++ else ++ return DMA330_STATE_KILLING; ++ case DS_ST_CMPLT: ++ if (is_manager(thrd)) ++ return DMA330_STATE_INVALID; ++ else ++ return DMA330_STATE_COMPLETING; ++ case DS_ST_FLTCMP: ++ if (is_manager(thrd)) ++ return DMA330_STATE_INVALID; ++ else ++ return DMA330_STATE_FAULT_COMPLETING; ++ default: ++ return DMA330_STATE_INVALID; ++ } ++} ++ ++static void _stop(struct dma330_thread *thrd) ++{ ++ void __iomem *regs = thrd->dmac->base; ++ u8 insn[6] = {0, 0, 0, 0, 0, 0}; ++ ++ if (_state(thrd) == DMA330_STATE_FAULT_COMPLETING) ++ UNTIL(thrd, DMA330_STATE_FAULTING | DMA330_STATE_KILLING); ++ ++ /* Return if nothing needs to be done */ ++ if (_state(thrd) == DMA330_STATE_COMPLETING ++ || _state(thrd) == DMA330_STATE_KILLING ++ || _state(thrd) == DMA330_STATE_STOPPED) ++ return; ++ ++ _emit_KILL(0, insn); ++ ++ /* Stop generating interrupts for SEV */ ++ writel(readl(regs + INTEN) & ~(1 << thrd->ev), regs + INTEN); ++ ++ _execute_DBGINSN(thrd, insn, is_manager(thrd)); ++} ++ ++/* Start doing req 'idx' of thread 'thrd' */ ++static bool _trigger(struct dma330_thread *thrd) ++{ ++ void __iomem *regs = thrd->dmac->base; ++ struct _dma330_req *req; ++ struct dma_dma330_desc *desc; ++ struct _arg_GO go; ++ unsigned ns; ++ u8 insn[6] = {0, 0, 0, 0, 0, 0}; ++ int idx; ++ ++ /* Return if already ACTIVE */ ++ if (_state(thrd) != DMA330_STATE_STOPPED) ++ return true; ++ ++ idx = 1 - thrd->lstenq; ++ if (thrd->req[idx].desc != NULL) { ++ req = &thrd->req[idx]; ++ } else { ++ idx = thrd->lstenq; ++ if (thrd->req[idx].desc != NULL) ++ req = &thrd->req[idx]; ++ else ++ req = NULL; ++ } ++ ++ /* Return if no request */ ++ if (!req) ++ return true; ++ ++ /* Return if req is running */ ++ if (idx == thrd->req_running) ++ return true; ++ ++ desc = req->desc; ++ ++ ns = desc->rqcfg.nonsecure ? 1 : 0; ++ ++ /* See 'Abort Sources' point-4 at Page 2-25 */ ++ if (_manager_ns(thrd) && !ns) ++ dev_info(thrd->dmac->ddma.dev, "%s:%d Recipe for ABORT!\n", ++ __func__, __LINE__); ++ ++ go.chan = thrd->id; ++ go.addr = req->mc_bus; ++ go.ns = ns; ++ _emit_GO(0, insn, &go); ++ ++ /* Set to generate interrupts for SEV */ ++ writel(readl(regs + INTEN) | (1 << thrd->ev), regs + INTEN); ++ ++ /* Only manager can execute GO */ ++ _execute_DBGINSN(thrd, insn, true); ++ ++ thrd->req_running = idx; ++ ++ return true; ++} ++ ++static bool _start(struct dma330_thread *thrd) ++{ ++ switch (_state(thrd)) { ++ case DMA330_STATE_FAULT_COMPLETING: ++ UNTIL(thrd, DMA330_STATE_FAULTING | DMA330_STATE_KILLING); ++ ++ if (_state(thrd) == DMA330_STATE_KILLING) ++ UNTIL(thrd, DMA330_STATE_STOPPED) ++ ++ case DMA330_STATE_FAULTING: ++ _stop(thrd); ++ ++ case DMA330_STATE_KILLING: ++ case DMA330_STATE_COMPLETING: ++ UNTIL(thrd, DMA330_STATE_STOPPED) ++ ++ case DMA330_STATE_STOPPED: ++ return _trigger(thrd); ++ ++ case DMA330_STATE_WFP: ++ case DMA330_STATE_QUEUEBUSY: ++ case DMA330_STATE_ATBARRIER: ++ case DMA330_STATE_UPDTPC: ++ case DMA330_STATE_CACHEMISS: ++ case DMA330_STATE_EXECUTING: ++ return true; ++ ++ case DMA330_STATE_WFE: /* For RESUME, nothing yet */ ++ default: ++ return false; ++ } ++} ++ ++static inline int _ldst_memtomem(unsigned dry_run, u8 buf[], ++ const struct _xfer_spec *pxs, int cyc) ++{ ++ int off = 0; ++ ++ while (cyc--) { ++ off += _emit_LD(dry_run, &buf[off], ALWAYS); ++ off += _emit_ST(dry_run, &buf[off], ALWAYS); ++ } ++ ++ return off; ++} ++ ++static inline int _ldst_devtomem(unsigned dry_run, u8 buf[], ++ const struct _xfer_spec *pxs, int cyc) ++{ ++ int off = 0; ++ ++ while (cyc--) { ++ off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->desc->peri); ++ off += _emit_LD(dry_run, &buf[off], ALWAYS); ++ off += _emit_STP(dry_run, &buf[off], SINGLE, pxs->desc->peri); ++ off += _emit_FLUSHP(dry_run, &buf[off], pxs->desc->peri); ++ } ++ ++ return off; ++} ++ ++static inline int _ldst_memtodev(unsigned dry_run, u8 buf[], ++ const struct _xfer_spec *pxs, int cyc) ++{ ++ int off = 0; ++ ++ while (cyc--) { ++ off += _emit_WFP(dry_run, &buf[off], SINGLE, pxs->desc->peri); ++ off += _emit_LD(dry_run, &buf[off], ALWAYS); ++ off += _emit_STP(dry_run, &buf[off], SINGLE, pxs->desc->peri); ++ off += _emit_FLUSHP(dry_run, &buf[off], pxs->desc->peri); ++ } ++ ++ return off; ++} ++ ++static int _bursts(unsigned dry_run, u8 buf[], ++ const struct _xfer_spec *pxs, int cyc) ++{ ++ int off = 0; ++ ++ switch (pxs->desc->rqtype) { ++ case DMA_MEM_TO_DEV: ++ off += _ldst_memtodev(dry_run, &buf[off], pxs, cyc); ++ break; ++ case DMA_DEV_TO_MEM: ++ off += _ldst_devtomem(dry_run, &buf[off], pxs, cyc); ++ break; ++ case DMA_MEM_TO_MEM: ++ off += _ldst_memtomem(dry_run, &buf[off], pxs, cyc); ++ break; ++ default: ++ off += 0x40000000; /* Scare off the Client */ ++ break; ++ } ++ ++ return off; ++} ++ ++/* Returns bytes consumed and updates bursts */ ++static inline int _loop(unsigned dry_run, u8 buf[], ++ unsigned long *bursts, const struct _xfer_spec *pxs) ++{ ++ int cyc, cycmax, szlp, szlpend, szbrst, off; ++ unsigned lcnt0, lcnt1, ljmp0, ljmp1; ++ struct _arg_LPEND lpend; ++ ++ if (*bursts == 1) ++ return _bursts(dry_run, buf, pxs, 1); ++ ++ /* Max iterations possible in DMALP is 256 */ ++ if (*bursts >= 256*256) { ++ lcnt1 = 256; ++ lcnt0 = 256; ++ cyc = *bursts / lcnt1 / lcnt0; ++ } else if (*bursts > 256) { ++ lcnt1 = 256; ++ lcnt0 = *bursts / lcnt1; ++ cyc = 1; ++ } else { ++ lcnt1 = *bursts; ++ lcnt0 = 0; ++ cyc = 1; ++ } ++ ++ szlp = _emit_LP(1, buf, 0, 0); ++ szbrst = _bursts(1, buf, pxs, 1); ++ ++ lpend.cond = ALWAYS; ++ lpend.forever = false; ++ lpend.loop = 0; ++ lpend.bjump = 0; ++ szlpend = _emit_LPEND(1, buf, &lpend); ++ ++ if (lcnt0) { ++ szlp *= 2; ++ szlpend *= 2; ++ } ++ ++ /* ++ * Max bursts that we can unroll due to limit on the ++ * size of backward jump that can be encoded in DMALPEND ++ * which is 8-bits and hence 255 ++ */ ++ cycmax = (255 - (szlp + szlpend)) / szbrst; ++ ++ cyc = (cycmax < cyc) ? cycmax : cyc; ++ ++ off = 0; ++ ++ if (lcnt0) { ++ off += _emit_LP(dry_run, &buf[off], 0, lcnt0); ++ ljmp0 = off; ++ } ++ ++ off += _emit_LP(dry_run, &buf[off], 1, lcnt1); ++ ljmp1 = off; ++ ++ off += _bursts(dry_run, &buf[off], pxs, cyc); ++ ++ lpend.cond = ALWAYS; ++ lpend.forever = false; ++ lpend.loop = 1; ++ lpend.bjump = off - ljmp1; ++ off += _emit_LPEND(dry_run, &buf[off], &lpend); ++ ++ if (lcnt0) { ++ lpend.cond = ALWAYS; ++ lpend.forever = false; ++ lpend.loop = 0; ++ lpend.bjump = off - ljmp0; ++ off += _emit_LPEND(dry_run, &buf[off], &lpend); ++ } ++ ++ *bursts = lcnt1 * cyc; ++ if (lcnt0) ++ *bursts *= lcnt0; ++ ++ return off; ++} ++ ++static inline int _req_loop(unsigned dry_run, u8 buf[], enum dma330_cond cond, ++ unsigned long *count, const struct _xfer_spec *pxs) ++{ ++ unsigned ljmp0; ++ struct _arg_LPEND lpend; ++ int off = 0; ++ ++ off += _emit_LP(dry_run, &buf[off], 0, *count); ++ ljmp0 = off; ++ ++ off += _emit_WFP(dry_run, &buf[off], cond, pxs->desc->peri); ++ off += _emit_LD(dry_run, &buf[off], cond); ++ off += _emit_STP(dry_run, &buf[off], cond, pxs->desc->peri); ++ ++ lpend.cond = cond; ++ lpend.forever = false; ++ lpend.loop = 0; ++ lpend.bjump = off - ljmp0; ++ off += _emit_LPEND(dry_run, &buf[off], &lpend); ++ ++ return off; ++} ++ ++static inline u32 _prepare_ccr(const struct dma330_reqcfg *rqc, ++ enum dma330_cond cond) ++{ ++ u32 ccr = 0; ++ ++ if (rqc->src_inc) ++ ccr |= CC_SRCINC; ++ ++ if (rqc->dst_inc) ++ ccr |= CC_DSTINC; ++ ++ /* We set same protection levels for Src and DST for now */ ++ if (rqc->privileged) ++ ccr |= CC_SRCPRI | CC_DSTPRI; ++ if (rqc->nonsecure) ++ ccr |= CC_SRCNS | CC_DSTNS; ++ if (rqc->insnaccess) ++ ccr |= CC_SRCIA | CC_DSTIA; ++ ++ if (cond != SINGLE) { ++ ccr |= (((rqc->brst_len - 1) & 0xf) << CC_SRCBRSTLEN_SHFT); ++ ccr |= (((rqc->brst_len - 1) & 0xf) << CC_DSTBRSTLEN_SHFT); ++ } ++ ++ ccr |= (rqc->brst_size << CC_SRCBRSTSIZE_SHFT); ++ ccr |= (rqc->brst_size << CC_DSTBRSTSIZE_SHFT); ++ ++ ccr |= (rqc->scctl << CC_SRCCCTRL_SHFT); ++ ccr |= (rqc->dcctl << CC_DSTCCTRL_SHFT); ++ ++ ccr |= (rqc->swap << CC_SWAP_SHFT); ++ ++ return ccr; ++} ++ ++ ++static inline int _setup_xfer(unsigned dry_run, u8 buf[], ++ const struct _xfer_spec *pxs) ++{ ++ u32 bytes = pxs->desc->px.bytes; ++ u32 bursts = BYTE_TO_BURST(bytes, pxs->ccr); ++ u32 singles = (bytes - BURST_TO_BYTE(bursts, pxs->ccr)) >> 2; ++ u32 ccr = pxs->ccr; ++ u32 src_addr = pxs->desc->px.src_addr; ++ u32 dst_addr = pxs->desc->px.dst_addr; ++ unsigned long c; ++ int off = 0; ++ ++ if ((pxs->desc->rqtype == DMA_DEV_TO_MEM) || ++ (pxs->desc->rqtype == DMA_MEM_TO_DEV)) { ++ if(bursts) { ++ off += _emit_MOV(dry_run, &buf[off], CCR, ccr); ++ off += _emit_MOV(dry_run, &buf[off], SAR, src_addr); ++ off += _emit_MOV(dry_run, &buf[off], DAR, dst_addr); ++ ++ off += _emit_FLUSHP(dry_run, &buf[off], pxs->desc->peri); ++ ++ while(bursts) { ++ c = (bursts > 256) ? 256 : bursts; ++ off += _req_loop(dry_run, &buf[off], BURST, &c, pxs); ++ bursts -= c; ++ } ++ } ++ ++ if (singles) { ++ ccr = _prepare_ccr(&pxs->desc->rqcfg, SINGLE); ++ if (pxs->desc->rqtype == DMA_MEM_TO_DEV) { ++ src_addr += bytes - singles * 4; ++ } else { ++ dst_addr += bytes - singles * 4; ++ } ++ off += _emit_MOV(dry_run, &buf[off], CCR, ccr); ++ off += _emit_MOV(dry_run, &buf[off], SAR, src_addr); ++ off += _emit_MOV(dry_run, &buf[off], DAR, dst_addr); ++ ++ while(singles) { ++ c = (singles > 256) ? 256 : singles; ++ off += _req_loop(dry_run, &buf[off], SINGLE, &c, pxs); ++ singles -= c; ++ } ++ ++ off += _emit_FLUSHP(dry_run, &buf[off], pxs->desc->peri); ++ } ++ } else { ++ /* Error if xfer length is not aligned at burst size */ ++ if (bytes % (BRST_SIZE(pxs->ccr) * BRST_LEN(pxs->ccr))) ++ return -EINVAL; ++ ++ off += _emit_MOV(dry_run, &buf[off], CCR, pxs->ccr); ++ off += _emit_MOV(dry_run, &buf[off], SAR, src_addr); ++ off += _emit_MOV(dry_run, &buf[off], DAR, dst_addr); ++ ++ while (bursts) { ++ c = bursts; ++ off += _loop(dry_run, &buf[off], &c, pxs); ++ bursts -= c; ++ } ++ } ++ ++ return off; ++} ++ ++/* ++ * A req is a sequence of one or more xfer units. ++ * Returns the number of bytes taken to setup the MC for the req. ++ */ ++static int _setup_req(unsigned dry_run, struct dma330_thread *thrd, ++ unsigned index, struct _xfer_spec *pxs) ++{ ++ struct _dma330_req *req = &thrd->req[index]; ++ u8 *buf = req->mc_cpu; ++ int off = 0; ++ ++ DMA330_DBGMC_START(req->mc_bus); ++ ++ off += _setup_xfer(dry_run, &buf[off], pxs); ++ ++ /* DMASEV peripheral/event */ ++ off += _emit_SEV(dry_run, &buf[off], thrd->ev); ++ /* DMAEND */ ++ off += _emit_END(dry_run, &buf[off]); ++ ++ return off; ++} ++ ++/* ++ * Submit a list of xfers after which the client wants notification. ++ * Client is not notified after each xfer unit, just once after all ++ * xfer units are done or some error occurs. ++ */ ++static int dma330_submit_req(struct dma330_thread *thrd, ++ struct dma_dma330_desc *desc) ++{ ++ struct dma330_dmac *dma330 = thrd->dmac; ++ struct _xfer_spec xs; ++ unsigned long flags; ++ unsigned idx; ++ u32 ccr; ++ int ret = 0; ++ ++ if (dma330->state == DYING ++ || dma330->dmac_tbd.reset_chan & (1 << thrd->id)) { ++ dev_info(thrd->dmac->ddma.dev, "%s:%d\n", ++ __func__, __LINE__); ++ return -EAGAIN; ++ } ++ ++ /* If request for non-existing peripheral */ ++ if (desc->rqtype != DMA_MEM_TO_MEM && ++ desc->peri >= dma330->pcfg.num_peri) { ++ dev_info(thrd->dmac->ddma.dev, ++ "%s:%d Invalid peripheral(%u)!\n", ++ __func__, __LINE__, desc->peri); ++ return -EINVAL; ++ } ++ ++ spin_lock_irqsave(&dma330->lock, flags); ++ ++ if (_queue_full(thrd)) { ++ ret = -EAGAIN; ++ goto xfer_exit; ++ } ++ ++ /* Prefer Secure Channel */ ++ if (!_manager_ns(thrd)) ++ desc->rqcfg.nonsecure = 0; ++ else ++ desc->rqcfg.nonsecure = 1; ++ ++ ccr = _prepare_ccr(&desc->rqcfg, BURST); ++ ++ idx = thrd->req[0].desc == NULL ? 0 : 1; ++ ++ xs.ccr = ccr; ++ xs.desc = desc; ++ ++ /* First dry run to check if req is acceptable */ ++ ret = _setup_req(1, thrd, idx, &xs); ++ if (ret < 0) ++ goto xfer_exit; ++ ++ if (ret > dma330->mcbufsz / 2) { ++ dev_info(dma330->ddma.dev, "%s:%d Try increasing mcbufsz (%i/%i)\n", ++ __func__, __LINE__, ret, dma330->mcbufsz / 2); ++ ret = -ENOMEM; ++ goto xfer_exit; ++ } ++ ++ /* Hook the request */ ++ thrd->lstenq = idx; ++ thrd->req[idx].desc = desc; ++ _setup_req(0, thrd, idx, &xs); ++ ++ ret = 0; ++ ++xfer_exit: ++ spin_unlock_irqrestore(&dma330->lock, flags); ++ ++ return ret; ++} ++ ++static void dma_dma330_rqcb(struct dma_dma330_desc *desc, enum dma330_op_err err) ++{ ++ struct dma_dma330_chan *pch; ++ unsigned long flags; ++ ++ if (!desc) ++ return; ++ ++ pch = desc->pchan; ++ ++ /* If desc aborted */ ++ if (!pch) ++ return; ++ ++ spin_lock_irqsave(&pch->lock, flags); ++ ++ desc->status = DONE; ++ ++ spin_unlock_irqrestore(&pch->lock, flags); ++ ++ tasklet_schedule(&pch->task); ++} ++ ++static void dma330_dotask(unsigned long data) ++{ ++ struct dma330_dmac *dma330 = (struct dma330_dmac *) data; ++ unsigned long flags; ++ int i; ++ ++ spin_lock_irqsave(&dma330->lock, flags); ++ ++ /* The DMAC itself gone nuts */ ++ if (dma330->dmac_tbd.reset_dmac) { ++ dma330->state = DYING; ++ /* Reset the manager too */ ++ dma330->dmac_tbd.reset_mngr = true; ++ /* Clear the reset flag */ ++ dma330->dmac_tbd.reset_dmac = false; ++ } ++ ++ if (dma330->dmac_tbd.reset_mngr) { ++ _stop(dma330->manager); ++ /* Reset all channels */ ++ dma330->dmac_tbd.reset_chan = (1 << dma330->pcfg.num_chan) - 1; ++ /* Clear the reset flag */ ++ dma330->dmac_tbd.reset_mngr = false; ++ } ++ ++ for (i = 0; i < dma330->pcfg.num_chan; i++) { ++ ++ if (dma330->dmac_tbd.reset_chan & (1 << i)) { ++ struct dma330_thread *thrd = &dma330->channels[i]; ++ void __iomem *regs = dma330->base; ++ enum dma330_op_err err; ++ ++ _stop(thrd); ++ ++ if (readl(regs + FSC) & (1 << thrd->id)) ++ err = DMA330_ERR_FAIL; ++ else ++ err = DMA330_ERR_ABORT; ++ ++ spin_unlock_irqrestore(&dma330->lock, flags); ++ dma_dma330_rqcb(thrd->req[1 - thrd->lstenq].desc, err); ++ dma_dma330_rqcb(thrd->req[thrd->lstenq].desc, err); ++ spin_lock_irqsave(&dma330->lock, flags); ++ ++ thrd->req[0].desc = NULL; ++ thrd->req[1].desc = NULL; ++ thrd->req_running = -1; ++ ++ /* Clear the reset flag */ ++ dma330->dmac_tbd.reset_chan &= ~(1 << i); ++ } ++ } ++ ++ spin_unlock_irqrestore(&dma330->lock, flags); ++ ++ return; ++} ++ ++/* Returns 1 if state was updated, 0 otherwise */ ++static int dma330_update(struct dma330_dmac *dma330) ++{ ++ struct dma_dma330_desc *descdone, *tmp; ++ unsigned long flags; ++ void __iomem *regs; ++ u32 val; ++ int id, ev, ret = 0; ++ ++ regs = dma330->base; ++ ++ spin_lock_irqsave(&dma330->lock, flags); ++ ++ val = readl(regs + FSM) & 0x1; ++ if (val) ++ dma330->dmac_tbd.reset_mngr = true; ++ else ++ dma330->dmac_tbd.reset_mngr = false; ++ ++ val = readl(regs + FSC) & ((1 << dma330->pcfg.num_chan) - 1); ++ dma330->dmac_tbd.reset_chan |= val; ++ if (val) { ++ int i = 0; ++ while (i < dma330->pcfg.num_chan) { ++ if (val & (1 << i)) { ++ dev_info(dma330->ddma.dev, ++ "Reset Channel-%d\t CS-%x FTC-%x\n", ++ i, readl(regs + CS(i)), ++ readl(regs + FTC(i))); ++ _stop(&dma330->channels[i]); ++ } ++ i++; ++ } ++ } ++ ++ /* Check which event happened i.e, thread notified */ ++ val = readl(regs + ES); ++ if (dma330->pcfg.num_events < 32 ++ && val & ~((1 << dma330->pcfg.num_events) - 1)) { ++ dma330->dmac_tbd.reset_dmac = true; ++ dev_err(dma330->ddma.dev, "%s:%d Unexpected!\n", __func__, ++ __LINE__); ++ ret = 1; ++ goto updt_exit; ++ } ++ ++ for (ev = 0; ev < dma330->pcfg.num_events; ev++) { ++ if (val & (1 << ev)) { /* Event occurred */ ++ struct dma330_thread *thrd; ++ u32 inten = readl(regs + INTEN); ++ int active; ++ ++ /* Clear the event */ ++ if (inten & (1 << ev)) ++ writel(1 << ev, regs + INTCLR); ++ ++ ret = 1; ++ ++ id = dma330->events[ev]; ++ ++ thrd = &dma330->channels[id]; ++ ++ active = thrd->req_running; ++ if (active == -1) /* Aborted */ ++ continue; ++ ++ /* Detach the req */ ++ descdone = thrd->req[active].desc; ++ thrd->req[active].desc = NULL; ++ ++ thrd->req_running = -1; ++ ++ /* Get going again ASAP */ ++ _start(thrd); ++ ++ /* For now, just make a list of callbacks to be done */ ++ list_add_tail(&descdone->rqd, &dma330->req_done); ++ } ++ } ++ ++ /* Now that we are in no hurry, do the callbacks */ ++ list_for_each_entry_safe(descdone, tmp, &dma330->req_done, rqd) { ++ list_del(&descdone->rqd); ++ spin_unlock_irqrestore(&dma330->lock, flags); ++ dma_dma330_rqcb(descdone, DMA330_ERR_NONE); ++ spin_lock_irqsave(&dma330->lock, flags); ++ } ++ ++updt_exit: ++ spin_unlock_irqrestore(&dma330->lock, flags); ++ ++ if (dma330->dmac_tbd.reset_dmac ++ || dma330->dmac_tbd.reset_mngr ++ || dma330->dmac_tbd.reset_chan) { ++ ret = 1; ++ tasklet_schedule(&dma330->tasks); ++ } ++ ++ return ret; ++} ++ ++/* Reserve an event */ ++static inline int _alloc_event(struct dma330_thread *thrd) ++{ ++ struct dma330_dmac *dma330 = thrd->dmac; ++ int ev; ++ ++ for (ev = 0; ev < dma330->pcfg.num_events; ev++) ++ if (dma330->events[ev] == -1) { ++ dma330->events[ev] = thrd->id; ++ return ev; ++ } ++ ++ return -1; ++} ++ ++static bool _chan_ns(const struct dma330_dmac *dma330, int i) ++{ ++ return dma330->pcfg.irq_ns & (1 << i); ++} ++ ++/* Upon success, returns IdentityToken for the ++ * allocated channel, NULL otherwise. ++ */ ++static struct dma330_thread *dma330_request_channel(struct dma330_dmac *dma330) ++{ ++ struct dma330_thread *thrd = NULL; ++ unsigned long flags; ++ int chans, i; ++ ++ if (dma330->state == DYING) ++ return NULL; ++ ++ chans = dma330->pcfg.num_chan; ++ ++ spin_lock_irqsave(&dma330->lock, flags); ++ ++ for (i = 0; i < chans; i++) { ++ thrd = &dma330->channels[i]; ++ if ((thrd->free) && (!_manager_ns(thrd) || ++ _chan_ns(dma330, i))) { ++ thrd->ev = _alloc_event(thrd); ++ if (thrd->ev >= 0) { ++ thrd->free = false; ++ thrd->lstenq = 1; ++ thrd->req[0].desc = NULL; ++ thrd->req[1].desc = NULL; ++ thrd->req_running = -1; ++ break; ++ } ++ } ++ thrd = NULL; ++ } ++ ++ spin_unlock_irqrestore(&dma330->lock, flags); ++ ++ return thrd; ++} ++ ++/* Release an event */ ++static inline void _free_event(struct dma330_thread *thrd, int ev) ++{ ++ struct dma330_dmac *dma330 = thrd->dmac; ++ ++ /* If the event is valid and was held by the thread */ ++ if (ev >= 0 && ev < dma330->pcfg.num_events ++ && dma330->events[ev] == thrd->id) ++ dma330->events[ev] = -1; ++} ++ ++static void dma330_release_channel(struct dma330_thread *thrd) ++{ ++ struct dma330_dmac *dma330; ++ unsigned long flags; ++ ++ if (!thrd || thrd->free) ++ return; ++ ++ _stop(thrd); ++ ++ dma_dma330_rqcb(thrd->req[1 - thrd->lstenq].desc, DMA330_ERR_ABORT); ++ dma_dma330_rqcb(thrd->req[thrd->lstenq].desc, DMA330_ERR_ABORT); ++ ++ dma330 = thrd->dmac; ++ ++ spin_lock_irqsave(&dma330->lock, flags); ++ _free_event(thrd, thrd->ev); ++ thrd->free = true; ++ spin_unlock_irqrestore(&dma330->lock, flags); ++} ++ ++/* Initialize the structure for DMA330 configuration, that can be used ++ * by the client driver the make best use of the DMAC ++ */ ++static void read_dmac_config(struct dma330_dmac *dma330) ++{ ++ void __iomem *regs = dma330->base; ++ u32 val; ++ ++ val = readl(regs + CRD) >> CRD_DATA_WIDTH_SHIFT; ++ val &= CRD_DATA_WIDTH_MASK; ++ dma330->pcfg.data_bus_width = 8 * (1 << val); ++ ++ val = readl(regs + CRD) >> CRD_DATA_BUFF_SHIFT; ++ val &= CRD_DATA_BUFF_MASK; ++ dma330->pcfg.data_buf_dep = val + 1; ++ ++ val = readl(regs + CR0) >> CR0_NUM_CHANS_SHIFT; ++ val &= CR0_NUM_CHANS_MASK; ++ val += 1; ++ dma330->pcfg.num_chan = val; ++ ++ val = readl(regs + CR0); ++ if (val & CR0_PERIPH_REQ_SET) { ++ val = (val >> CR0_NUM_PERIPH_SHIFT) & CR0_NUM_PERIPH_MASK; ++ val += 1; ++ dma330->pcfg.num_peri = val; ++ dma330->pcfg.peri_ns = readl(regs + CR4); ++ } else { ++ dma330->pcfg.num_peri = 0; ++ } ++ ++ val = readl(regs + CR0); ++ if (val & CR0_BOOT_MAN_NS) ++ dma330->pcfg.mode |= DMAC_MODE_NS; ++ else ++ dma330->pcfg.mode &= ~DMAC_MODE_NS; ++ ++ val = readl(regs + CR0) >> CR0_NUM_EVENTS_SHIFT; ++ val &= CR0_NUM_EVENTS_MASK; ++ val += 1; ++ dma330->pcfg.num_events = val; ++ ++ dma330->pcfg.irq_ns = readl(regs + CR3); ++} ++ ++static inline void _reset_thread(struct dma330_thread *thrd) ++{ ++ struct dma330_dmac *dma330 = thrd->dmac; ++ ++ thrd->req[0].mc_cpu = dma330->mcode_cpu ++ + (thrd->id * dma330->mcbufsz); ++ thrd->req[0].mc_bus = dma330->mcode_bus ++ + (thrd->id * dma330->mcbufsz); ++ thrd->req[0].desc = NULL; ++ ++ thrd->req[1].mc_cpu = thrd->req[0].mc_cpu ++ + dma330->mcbufsz / 2; ++ thrd->req[1].mc_bus = thrd->req[0].mc_bus ++ + dma330->mcbufsz / 2; ++ thrd->req[1].desc = NULL; ++ ++ thrd->req_running = -1; ++} ++ ++static int dmac_alloc_threads(struct dma330_dmac *dma330) ++{ ++ int chans = dma330->pcfg.num_chan; ++ struct dma330_thread *thrd; ++ int i; ++ ++ /* Allocate 1 Manager and 'chans' Channel threads */ ++ dma330->channels = kzalloc((1 + chans) * sizeof(*thrd), ++ GFP_KERNEL); ++ if (!dma330->channels) ++ return -ENOMEM; ++ ++ /* Init Channel threads */ ++ for (i = 0; i < chans; i++) { ++ thrd = &dma330->channels[i]; ++ thrd->id = i; ++ thrd->dmac = dma330; ++ _reset_thread(thrd); ++ thrd->free = true; ++ } ++ ++ /* MANAGER is indexed at the end */ ++ thrd = &dma330->channels[chans]; ++ thrd->id = chans; ++ thrd->dmac = dma330; ++ thrd->free = false; ++ dma330->manager = thrd; ++ ++ return 0; ++} ++ ++static int dmac_alloc_resources(struct dma330_dmac *dma330) ++{ ++ int chans = dma330->pcfg.num_chan; ++ int ret; ++ ++ /* ++ * Alloc MicroCode buffer for 'chans' Channel threads. ++ * A channel's buffer offset is (Channel_Id * MCODE_BUFF_PERCHAN) ++ */ ++ dma330->mcode_cpu = dma_alloc_coherent(dma330->ddma.dev, ++ chans * dma330->mcbufsz, ++ &dma330->mcode_bus, GFP_KERNEL); ++ if (!dma330->mcode_cpu) { ++ dev_err(dma330->ddma.dev, "%s:%d Can't allocate memory!\n", ++ __func__, __LINE__); ++ return -ENOMEM; ++ } ++ ++ ret = dmac_alloc_threads(dma330); ++ if (ret) { ++ dev_err(dma330->ddma.dev, "%s:%d Can't to create channels for DMAC!\n", ++ __func__, __LINE__); ++ dma_free_coherent(dma330->ddma.dev, ++ chans * dma330->mcbufsz, ++ dma330->mcode_cpu, dma330->mcode_bus); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static int dma330_add(struct dma330_dmac *dma330) ++{ ++ void __iomem *regs; ++ int i, ret; ++ ++ regs = dma330->base; ++ ++ /* Check if we can handle this DMAC */ ++ if ((dma330->pcfg.periph_id & 0xffffff) != PERIPH_ID_VAL) { ++ dev_err(dma330->ddma.dev, "PERIPH_ID 0x%x !\n", ++ dma330->pcfg.periph_id); ++ return -EINVAL; ++ } ++ ++ /* Read the configuration of the DMAC */ ++ read_dmac_config(dma330); ++ ++ if (dma330->pcfg.num_events == 0) { ++ dev_err(dma330->ddma.dev, "%s:%d Can't work without events!\n", ++ __func__, __LINE__); ++ return -EINVAL; ++ } ++ ++ spin_lock_init(&dma330->lock); ++ ++ INIT_LIST_HEAD(&dma330->req_done); ++ ++ /* Use default MC buffer size if not provided */ ++ if (!dma330->mcbufsz) ++ dma330->mcbufsz = MCODE_BUFF_PER_REQ * 2; ++ ++ /* Mark all events as free */ ++ for (i = 0; i < dma330->pcfg.num_events; i++) ++ dma330->events[i] = -1; ++ ++ /* Allocate resources needed by the DMAC */ ++ ret = dmac_alloc_resources(dma330); ++ if (ret) { ++ dev_err(dma330->ddma.dev, "Unable to create channels for DMAC\n"); ++ return ret; ++ } ++ ++ tasklet_init(&dma330->tasks, dma330_dotask, (unsigned long) dma330); ++ ++ dma330->state = INIT; ++ ++ return 0; ++} ++ ++static int dmac_free_threads(struct dma330_dmac *dma330) ++{ ++ struct dma330_thread *thrd; ++ int i; ++ ++ /* Release Channel threads */ ++ for (i = 0; i < dma330->pcfg.num_chan; i++) { ++ thrd = &dma330->channels[i]; ++ dma330_release_channel(thrd); ++ } ++ ++ /* Free memory */ ++ kfree(dma330->channels); ++ ++ return 0; ++} ++ ++static void dma330_del(struct dma330_dmac *dma330) ++{ ++ dma330->state = UNINIT; ++ ++ tasklet_kill(&dma330->tasks); ++ ++ /* Free DMAC resources */ ++ dmac_free_threads(dma330); ++ ++ dma_free_coherent(dma330->ddma.dev, ++ dma330->pcfg.num_chan * dma330->mcbufsz, dma330->mcode_cpu, ++ dma330->mcode_bus); ++} ++ ++/* forward declaration */ ++static struct amba_driver iproc_dma330_driver; ++ ++static inline struct dma_dma330_chan * ++to_pchan(struct dma_chan *ch) ++{ ++ if (!ch) ++ return NULL; ++ ++ return container_of(ch, struct dma_dma330_chan, chan); ++} ++ ++static inline struct dma_dma330_desc * ++to_desc(struct dma_async_tx_descriptor *tx) ++{ ++ return container_of(tx, struct dma_dma330_desc, txd); ++} ++ ++static inline void fill_queue(struct dma_dma330_chan *pch) ++{ ++ struct dma_dma330_desc *desc; ++ int ret; ++ ++ list_for_each_entry(desc, &pch->work_list, node) { ++ ++ /* If already submitted */ ++ if (desc->status == BUSY) ++ continue; ++ ++ ret = dma330_submit_req(pch->thread, desc); ++ if (!ret) { ++ desc->status = BUSY; ++ } else if (ret == -EAGAIN) { ++ /* QFull or DMAC Dying */ ++ break; ++ } else { ++ /* Unacceptable request */ ++ desc->status = DONE; ++ dev_err(pch->dmac->ddma.dev, "%s:%d Bad Desc(%d)\n", ++ __func__, __LINE__, desc->txd.cookie); ++ tasklet_schedule(&pch->task); ++ } ++ } ++} ++ ++static void dma330_tasklet(unsigned long data) ++{ ++ struct dma_dma330_chan *pch = (struct dma_dma330_chan *)data; ++ struct dma_dma330_desc *desc, *_dt; ++ unsigned long flags; ++ bool power_down = false; ++ ++ spin_lock_irqsave(&pch->lock, flags); ++ ++ /* Pick up ripe tomatoes */ ++ list_for_each_entry_safe(desc, _dt, &pch->work_list, node) ++ if (desc->status == DONE) { ++ if (!pch->cyclic) ++ dma_cookie_complete(&desc->txd); ++ list_move_tail(&desc->node, &pch->completed_list); ++ } ++ ++ /* Try to submit a req imm. next to the last completed cookie */ ++ fill_queue(pch); ++ ++ if (list_empty(&pch->work_list)) { ++ spin_lock(&pch->thread->dmac->lock); ++ _stop(pch->thread); ++ spin_unlock(&pch->thread->dmac->lock); ++ power_down = true; ++ } else { ++ /* Make sure the DMA330 Channel thread is active */ ++ spin_lock(&pch->thread->dmac->lock); ++ _start(pch->thread); ++ spin_unlock(&pch->thread->dmac->lock); ++ } ++ ++ while (!list_empty(&pch->completed_list)) { ++ dma_async_tx_callback callback; ++ void *callback_param; ++ ++ desc = list_first_entry(&pch->completed_list, ++ struct dma_dma330_desc, node); ++ ++ callback = desc->txd.callback; ++ callback_param = desc->txd.callback_param; ++ ++ if (pch->cyclic) { ++ desc->status = PREP; ++ list_move_tail(&desc->node, &pch->work_list); ++ if (power_down) { ++ spin_lock(&pch->thread->dmac->lock); ++ _start(pch->thread); ++ spin_unlock(&pch->thread->dmac->lock); ++ power_down = false; ++ } ++ } else { ++ desc->status = FREE; ++ list_move_tail(&desc->node, &pch->dmac->desc_pool); ++ } ++ ++ dma_descriptor_unmap(&desc->txd); ++ ++ if (callback) { ++ spin_unlock_irqrestore(&pch->lock, flags); ++ callback(callback_param); ++ spin_lock_irqsave(&pch->lock, flags); ++ } ++ } ++ spin_unlock_irqrestore(&pch->lock, flags); ++ ++ /* If work list empty, power down */ ++ if (power_down) { ++ pm_runtime_mark_last_busy(pch->dmac->ddma.dev); ++ pm_runtime_put_autosuspend(pch->dmac->ddma.dev); ++ } ++} ++ ++bool dma330_filter(struct dma_chan *chan, void *param) ++{ ++ u8 *peri_id; ++ ++ if (chan->device->dev->driver != &iproc_dma330_driver.drv) ++ return false; ++ ++ peri_id = chan->private; ++ return *peri_id == (unsigned long)param; ++} ++EXPORT_SYMBOL(dma330_filter); ++ ++static struct dma_chan *of_dma_dma330_xlate(struct of_phandle_args *dma_spec, ++ struct of_dma *ofdma) ++{ ++ int count = dma_spec->args_count; ++ struct dma330_dmac *dma330 = ofdma->of_dma_data; ++ unsigned int chan_id; ++ ++ if (!dma330) ++ return NULL; ++ ++ if (count != 1) ++ return NULL; ++ ++ chan_id = dma_spec->args[0]; ++ if (chan_id >= dma330->num_peripherals) ++ return NULL; ++ ++ return dma_get_slave_channel(&dma330->peripherals[chan_id].chan); ++} ++ ++static int dma330_alloc_chan_resources(struct dma_chan *chan) ++{ ++ struct dma_dma330_chan *pch = to_pchan(chan); ++ struct dma330_dmac *dma330 = pch->dmac; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&pch->lock, flags); ++ ++ dma_cookie_init(chan); ++ pch->cyclic = false; ++ ++ pch->thread = dma330_request_channel(dma330); ++ if (!pch->thread) { ++ spin_unlock_irqrestore(&pch->lock, flags); ++ return -ENOMEM; ++ } ++ ++ tasklet_init(&pch->task, dma330_tasklet, (unsigned long) pch); ++ ++ spin_unlock_irqrestore(&pch->lock, flags); ++ ++ return 1; ++} ++ ++static int dma330_config(struct dma_chan *chan, ++ struct dma_slave_config *slave_config) ++{ ++ struct dma_dma330_chan *pch = to_pchan(chan); ++ ++ if (slave_config->direction == DMA_MEM_TO_DEV) { ++ if (slave_config->dst_addr) ++ pch->fifo_addr = slave_config->dst_addr; ++ if (slave_config->dst_addr_width) ++ pch->burst_sz = __ffs(slave_config->dst_addr_width); ++ if (slave_config->dst_maxburst) ++ pch->burst_len = slave_config->dst_maxburst; ++ } else if (slave_config->direction == DMA_DEV_TO_MEM) { ++ if (slave_config->src_addr) ++ pch->fifo_addr = slave_config->src_addr; ++ if (slave_config->src_addr_width) ++ pch->burst_sz = __ffs(slave_config->src_addr_width); ++ if (slave_config->src_maxburst) ++ pch->burst_len = slave_config->src_maxburst; ++ } ++ ++ return 0; ++} ++ ++static int dma330_terminate_all(struct dma_chan *chan) ++{ ++ struct dma_dma330_chan *pch = to_pchan(chan); ++ struct dma_dma330_desc *desc; ++ unsigned long flags; ++ struct dma330_dmac *dma330 = pch->dmac; ++ LIST_HEAD(list); ++ ++ pm_runtime_get_sync(dma330->ddma.dev); ++ spin_lock_irqsave(&pch->lock, flags); ++ spin_lock(&dma330->lock); ++ _stop(pch->thread); ++ spin_unlock(&dma330->lock); ++ ++ pch->thread->req[0].desc = NULL; ++ pch->thread->req[1].desc = NULL; ++ pch->thread->req_running = -1; ++ ++ /* Mark all desc done */ ++ list_for_each_entry(desc, &pch->submitted_list, node) { ++ desc->status = FREE; ++ dma_cookie_complete(&desc->txd); ++ } ++ ++ list_for_each_entry(desc, &pch->work_list , node) { ++ desc->status = FREE; ++ dma_cookie_complete(&desc->txd); ++ } ++ ++ list_splice_tail_init(&pch->submitted_list, &dma330->desc_pool); ++ list_splice_tail_init(&pch->work_list, &dma330->desc_pool); ++ list_splice_tail_init(&pch->completed_list, &dma330->desc_pool); ++ spin_unlock_irqrestore(&pch->lock, flags); ++ pm_runtime_mark_last_busy(dma330->ddma.dev); ++ pm_runtime_put_autosuspend(dma330->ddma.dev); ++ ++ return 0; ++} ++ ++/* ++ * We don't support DMA_RESUME command because of hardware ++ * limitations, so after pausing the channel we cannot restore ++ * it to active state. We have to terminate channel and setup ++ * DMA transfer again. This pause feature was implemented to ++ * allow safely read residue before channel termination. ++ */ ++static int dma330_pause(struct dma_chan *chan) ++{ ++ struct dma_dma330_chan *pch = to_pchan(chan); ++ struct dma330_dmac *dma330 = pch->dmac; ++ unsigned long flags; ++ ++ pm_runtime_get_sync(dma330->ddma.dev); ++ spin_lock_irqsave(&pch->lock, flags); ++ ++ spin_lock(&dma330->lock); ++ _stop(pch->thread); ++ spin_unlock(&dma330->lock); ++ ++ spin_unlock_irqrestore(&pch->lock, flags); ++ pm_runtime_mark_last_busy(dma330->ddma.dev); ++ pm_runtime_put_autosuspend(dma330->ddma.dev); ++ ++ return 0; ++} ++ ++static void dma330_free_chan_resources(struct dma_chan *chan) ++{ ++ struct dma_dma330_chan *pch = to_pchan(chan); ++ unsigned long flags; ++ ++ tasklet_kill(&pch->task); ++ ++ pm_runtime_get_sync(pch->dmac->ddma.dev); ++ spin_lock_irqsave(&pch->lock, flags); ++ ++ dma330_release_channel(pch->thread); ++ pch->thread = NULL; ++ ++ if (pch->cyclic) ++ list_splice_tail_init(&pch->work_list, &pch->dmac->desc_pool); ++ ++ spin_unlock_irqrestore(&pch->lock, flags); ++ pm_runtime_mark_last_busy(pch->dmac->ddma.dev); ++ pm_runtime_put_autosuspend(pch->dmac->ddma.dev); ++} ++ ++static int dma330_get_current_xferred_count(struct dma_dma330_chan *pch, ++ struct dma_dma330_desc *desc) ++{ ++ struct dma330_thread *thrd = pch->thread; ++ struct dma330_dmac *dma330 = pch->dmac; ++ void __iomem *regs = thrd->dmac->base; ++ u32 val, addr; ++ ++ pm_runtime_get_sync(dma330->ddma.dev); ++ val = addr = 0; ++ if (desc->rqcfg.src_inc) { ++ val = readl(regs + SA(thrd->id)); ++ addr = desc->px.src_addr; ++ } else { ++ val = readl(regs + DA(thrd->id)); ++ addr = desc->px.dst_addr; ++ } ++ pm_runtime_mark_last_busy(pch->dmac->ddma.dev); ++ pm_runtime_put_autosuspend(dma330->ddma.dev); ++ return val - addr; ++} ++ ++static enum dma_status ++dma330_tx_status(struct dma_chan *chan, dma_cookie_t cookie, ++ struct dma_tx_state *txstate) ++{ ++ enum dma_status ret; ++ unsigned long flags; ++ struct dma_dma330_desc *desc, *running = NULL; ++ struct dma_dma330_chan *pch = to_pchan(chan); ++ unsigned int transferred, residual = 0; ++ ++ ret = dma_cookie_status(chan, cookie, txstate); ++ ++ if (!txstate) ++ return ret; ++ ++ if (ret == DMA_COMPLETE) ++ goto out; ++ ++ spin_lock_irqsave(&pch->lock, flags); ++ ++ if (pch->thread->req_running != -1) ++ running = pch->thread->req[pch->thread->req_running].desc; ++ ++ /* Check in pending list */ ++ list_for_each_entry(desc, &pch->work_list, node) { ++ if (desc->status == DONE) ++ transferred = desc->bytes_requested; ++ else if (running && desc == running) ++ transferred = ++ dma330_get_current_xferred_count(pch, desc); ++ else ++ transferred = 0; ++ residual += desc->bytes_requested - transferred; ++ if (desc->txd.cookie == cookie) { ++ switch (desc->status) { ++ case DONE: ++ ret = DMA_COMPLETE; ++ break; ++ case PREP: ++ case BUSY: ++ ret = DMA_IN_PROGRESS; ++ break; ++ default: ++ WARN_ON(1); ++ } ++ break; ++ } ++ if (desc->last) ++ residual = 0; ++ } ++ spin_unlock_irqrestore(&pch->lock, flags); ++ ++out: ++ dma_set_residue(txstate, residual); ++ ++ return ret; ++} ++ ++static void dma330_issue_pending(struct dma_chan *chan) ++{ ++ struct dma_dma330_chan *pch = to_pchan(chan); ++ unsigned long flags; ++ ++ spin_lock_irqsave(&pch->lock, flags); ++ if (list_empty(&pch->work_list)) { ++ /* ++ * Warn on nothing pending. Empty submitted_list may ++ * break our pm_runtime usage counter as it is ++ * updated on work_list emptiness status. ++ */ ++ WARN_ON(list_empty(&pch->submitted_list)); ++ pm_runtime_get_sync(pch->dmac->ddma.dev); ++ } ++ list_splice_tail_init(&pch->submitted_list, &pch->work_list); ++ spin_unlock_irqrestore(&pch->lock, flags); ++ ++ dma330_tasklet((unsigned long)pch); ++} ++ ++/* ++ * We returned the last one of the circular list of descriptor(s) ++ * from prep_xxx, so the argument to submit corresponds to the last ++ * descriptor of the list. ++ */ ++static dma_cookie_t dma330_tx_submit(struct dma_async_tx_descriptor *tx) ++{ ++ struct dma_dma330_desc *desc, *last = to_desc(tx); ++ struct dma_dma330_chan *pch = to_pchan(tx->chan); ++ dma_cookie_t cookie; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&pch->lock, flags); ++ ++ /* Assign cookies to all nodes */ ++ while (!list_empty(&last->node)) { ++ desc = list_entry(last->node.next, struct dma_dma330_desc, node); ++ if (pch->cyclic) { ++ desc->txd.callback = last->txd.callback; ++ desc->txd.callback_param = last->txd.callback_param; ++ } ++ desc->last = false; ++ ++ dma_cookie_assign(&desc->txd); ++ ++ list_move_tail(&desc->node, &pch->submitted_list); ++ } ++ ++ last->last = true; ++ cookie = dma_cookie_assign(&last->txd); ++ list_add_tail(&last->node, &pch->submitted_list); ++ spin_unlock_irqrestore(&pch->lock, flags); ++ ++ return cookie; ++} ++ ++static inline void _init_desc(struct dma_dma330_desc *desc) ++{ ++#ifdef __LITTLE_ENDIAN ++ desc->rqcfg.swap = SWAP_NO; ++#else ++ desc->rqcfg.swap = SWAP_4; ++#endif /* __LITTLE_ENDIAN */ ++ desc->rqcfg.scctl = CCTRL0; ++ desc->rqcfg.dcctl = CCTRL0; ++ desc->txd.tx_submit = dma330_tx_submit; ++ ++ INIT_LIST_HEAD(&desc->node); ++} ++ ++/* Returns the number of descriptors added to the DMAC pool */ ++static int add_desc(struct dma330_dmac *dma330, gfp_t flg, int count) ++{ ++ struct dma_dma330_desc *desc; ++ unsigned long flags; ++ int i; ++ ++ desc = kcalloc(count, sizeof(*desc), flg); ++ if (!desc) ++ return 0; ++ ++ spin_lock_irqsave(&dma330->pool_lock, flags); ++ ++ for (i = 0; i < count; i++) { ++ _init_desc(&desc[i]); ++ list_add_tail(&desc[i].node, &dma330->desc_pool); ++ } ++ ++ spin_unlock_irqrestore(&dma330->pool_lock, flags); ++ ++ return count; ++} ++ ++static struct dma_dma330_desc *pluck_desc(struct dma330_dmac *dma330) ++{ ++ struct dma_dma330_desc *desc = NULL; ++ unsigned long flags; ++ ++ spin_lock_irqsave(&dma330->pool_lock, flags); ++ ++ if (!list_empty(&dma330->desc_pool)) { ++ desc = list_entry(dma330->desc_pool.next, ++ struct dma_dma330_desc, node); ++ ++ list_del_init(&desc->node); ++ ++ desc->status = PREP; ++ desc->txd.callback = NULL; ++ } ++ ++ spin_unlock_irqrestore(&dma330->pool_lock, flags); ++ ++ return desc; ++} ++ ++static struct dma_dma330_desc *dma330_get_desc(struct dma_dma330_chan *pch) ++{ ++ struct dma330_dmac *dma330 = pch->dmac; ++ u8 *peri_id = pch->chan.private; ++ struct dma_dma330_desc *desc; ++ ++ /* Pluck one desc from the pool of DMAC */ ++ desc = pluck_desc(dma330); ++ ++ /* If the DMAC pool is empty, alloc new */ ++ if (!desc) { ++ if (!add_desc(dma330, GFP_ATOMIC, 1)) ++ return NULL; ++ ++ /* Try again */ ++ desc = pluck_desc(dma330); ++ if (!desc) { ++ dev_err(pch->dmac->ddma.dev, ++ "%s:%d ALERT!\n", __func__, __LINE__); ++ return NULL; ++ } ++ } ++ ++ /* Initialize the descriptor */ ++ desc->pchan = pch; ++ desc->txd.cookie = 0; ++ async_tx_ack(&desc->txd); ++ ++ desc->peri = peri_id ? pch->chan.chan_id : 0; ++ desc->rqcfg.pcfg = &pch->dmac->pcfg; ++ ++ dma_async_tx_descriptor_init(&desc->txd, &pch->chan); ++ ++ return desc; ++} ++ ++static inline void fill_px(struct dma330_xfer *px, ++ dma_addr_t dst, dma_addr_t src, size_t len) ++{ ++ px->bytes = len; ++ px->dst_addr = dst; ++ px->src_addr = src; ++} ++ ++static struct dma_dma330_desc * ++__dma330_prep_dma_memcpy(struct dma_dma330_chan *pch, dma_addr_t dst, ++ dma_addr_t src, size_t len) ++{ ++ struct dma_dma330_desc *desc = dma330_get_desc(pch); ++ ++ if (!desc) { ++ dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n", ++ __func__, __LINE__); ++ return NULL; ++ } ++ ++ /* ++ * Ideally we should lookout for reqs bigger than ++ * those that can be programmed with 256 bytes of ++ * MC buffer, but considering a req size is seldom ++ * going to be word-unaligned and more than 200MB, ++ * we take it easy. ++ * Also, should the limit is reached we'd rather ++ * have the platform increase MC buffer size than ++ * complicating this API driver. ++ */ ++ fill_px(&desc->px, dst, src, len); ++ ++ return desc; ++} ++ ++/* Call after fixing burst size */ ++static inline int get_burst_len(struct dma_dma330_desc *desc, size_t len) ++{ ++ struct dma_dma330_chan *pch = desc->pchan; ++ struct dma330_dmac *dma330 = pch->dmac; ++ int burst_len; ++ ++ burst_len = dma330->pcfg.data_bus_width / 8; ++ burst_len *= dma330->pcfg.data_buf_dep / dma330->pcfg.num_chan; ++ burst_len >>= desc->rqcfg.brst_size; ++ ++ /* src/dst_burst_len can't be more than 16 */ ++ if (burst_len > 16) ++ burst_len = 16; ++ ++ while (burst_len > 1) { ++ if (!(len % (burst_len << desc->rqcfg.brst_size))) ++ break; ++ burst_len--; ++ } ++ ++ return burst_len; ++} ++ ++static struct dma_async_tx_descriptor *dma330_prep_dma_cyclic( ++ struct dma_chan *chan, dma_addr_t dma_addr, size_t len, ++ size_t period_len, enum dma_transfer_direction direction, ++ unsigned long flags) ++{ ++ struct dma_dma330_desc *desc = NULL, *first = NULL; ++ struct dma_dma330_chan *pch = to_pchan(chan); ++ struct dma330_dmac *dma330 = pch->dmac; ++ unsigned int i; ++ dma_addr_t dst; ++ dma_addr_t src; ++ ++ if (len % period_len != 0) ++ return NULL; ++ ++ if (!is_slave_direction(direction)) { ++ dev_err(pch->dmac->ddma.dev, "%s:%d Invalid dma direction\n", ++ __func__, __LINE__); ++ return NULL; ++ } ++ ++ for (i = 0; i < len / period_len; i++) { ++ desc = dma330_get_desc(pch); ++ if (!desc) { ++ dev_err(pch->dmac->ddma.dev, "%s:%d Unable to fetch desc\n", ++ __func__, __LINE__); ++ ++ if (!first) ++ return NULL; ++ ++ spin_lock_irqsave(&dma330->pool_lock, flags); ++ ++ while (!list_empty(&first->node)) { ++ desc = list_entry(first->node.next, ++ struct dma_dma330_desc, node); ++ list_move_tail(&desc->node, &dma330->desc_pool); ++ } ++ ++ list_move_tail(&first->node, &dma330->desc_pool); ++ ++ spin_unlock_irqrestore(&dma330->pool_lock, flags); ++ ++ return NULL; ++ } ++ ++ switch (direction) { ++ case DMA_MEM_TO_DEV: ++ desc->rqcfg.src_inc = 1; ++ desc->rqcfg.dst_inc = 0; ++ src = dma_addr; ++ dst = pch->fifo_addr; ++ break; ++ case DMA_DEV_TO_MEM: ++ desc->rqcfg.src_inc = 0; ++ desc->rqcfg.dst_inc = 1; ++ src = pch->fifo_addr; ++ dst = dma_addr; ++ break; ++ default: ++ break; ++ } ++ ++ desc->rqtype = direction; ++ desc->rqcfg.brst_size = pch->burst_sz; ++ desc->rqcfg.brst_len = pch->burst_len; ++ desc->bytes_requested = period_len; ++ fill_px(&desc->px, dst, src, period_len); ++ ++ if (!first) ++ first = desc; ++ else ++ list_add_tail(&desc->node, &first->node); ++ ++ dma_addr += period_len; ++ } ++ ++ if (!desc) ++ return NULL; ++ ++ pch->cyclic = true; ++ desc->txd.flags = flags; ++ ++ return &desc->txd; ++} ++ ++static struct dma_async_tx_descriptor * ++dma330_prep_dma_memcpy(struct dma_chan *chan, dma_addr_t dst, ++ dma_addr_t src, size_t len, unsigned long flags) ++{ ++ struct dma_dma330_desc *desc; ++ struct dma_dma330_chan *pch = to_pchan(chan); ++ struct dma330_dmac *dma330; ++ int burst; ++ ++ if (unlikely(!pch || !len)) ++ return NULL; ++ ++ dma330 = pch->dmac; ++ ++ desc = __dma330_prep_dma_memcpy(pch, dst, src, len); ++ if (!desc) ++ return NULL; ++ ++ desc->rqcfg.src_inc = 1; ++ desc->rqcfg.dst_inc = 1; ++ desc->rqtype = DMA_MEM_TO_MEM; ++ ++ /* Select max possible burst size */ ++ burst = dma330->pcfg.data_bus_width / 8; ++ ++ /* ++ * Make sure we use a burst size that aligns with all the memcpy ++ * parameters because our DMA programming algorithm doesn't cope with ++ * transfers which straddle an entry in the DMA device's MFIFO. ++ */ ++ while ((src | dst | len) & (burst - 1)) ++ burst /= 2; ++ ++ desc->rqcfg.brst_size = 0; ++ while (burst != (1 << desc->rqcfg.brst_size)) ++ desc->rqcfg.brst_size++; ++ ++ /* ++ * If burst size is smaller than bus width then make sure we only ++ * transfer one at a time to avoid a burst stradling an MFIFO entry. ++ */ ++ if (desc->rqcfg.brst_size * 8 < dma330->pcfg.data_bus_width) ++ desc->rqcfg.brst_len = 1; ++ ++ desc->rqcfg.brst_len = get_burst_len(desc, len); ++ desc->bytes_requested = len; ++ ++ desc->txd.flags = flags; ++ ++ return &desc->txd; ++} ++ ++static void __dma330_giveback_desc(struct dma330_dmac *dma330, ++ struct dma_dma330_desc *first) ++{ ++ unsigned long flags; ++ struct dma_dma330_desc *desc; ++ ++ if (!first) ++ return; ++ ++ spin_lock_irqsave(&dma330->pool_lock, flags); ++ ++ while (!list_empty(&first->node)) { ++ desc = list_entry(first->node.next, ++ struct dma_dma330_desc, node); ++ list_move_tail(&desc->node, &dma330->desc_pool); ++ } ++ ++ list_move_tail(&first->node, &dma330->desc_pool); ++ ++ spin_unlock_irqrestore(&dma330->pool_lock, flags); ++} ++ ++static struct dma_async_tx_descriptor * ++dma330_prep_slave_sg(struct dma_chan *chan, struct scatterlist *sgl, ++ unsigned int sg_len, enum dma_transfer_direction direction, ++ unsigned long flg, void *context) ++{ ++ struct dma_dma330_desc *first, *desc = NULL; ++ struct dma_dma330_chan *pch = to_pchan(chan); ++ struct scatterlist *sg; ++ int i; ++ dma_addr_t addr; ++ ++ if (unlikely(!pch || !sgl || !sg_len)) ++ return NULL; ++ ++ addr = pch->fifo_addr; ++ ++ first = NULL; ++ ++ for_each_sg(sgl, sg, sg_len, i) { ++ ++ desc = dma330_get_desc(pch); ++ if (!desc) { ++ struct dma330_dmac *dma330 = pch->dmac; ++ ++ dev_err(pch->dmac->ddma.dev, ++ "%s:%d Unable to fetch desc\n", ++ __func__, __LINE__); ++ __dma330_giveback_desc(dma330, first); ++ ++ return NULL; ++ } ++ ++ if (!first) ++ first = desc; ++ else ++ list_add_tail(&desc->node, &first->node); ++ ++ if (direction == DMA_MEM_TO_DEV) { ++ desc->rqcfg.src_inc = 1; ++ desc->rqcfg.dst_inc = 0; ++ fill_px(&desc->px, ++ addr, sg_dma_address(sg), sg_dma_len(sg)); ++ } else { ++ desc->rqcfg.src_inc = 0; ++ desc->rqcfg.dst_inc = 1; ++ fill_px(&desc->px, ++ sg_dma_address(sg), addr, sg_dma_len(sg)); ++ } ++ ++ desc->rqcfg.brst_size = pch->burst_sz; ++ desc->rqcfg.brst_len = pch->burst_len; ++ desc->rqtype = direction; ++ desc->bytes_requested = sg_dma_len(sg); ++ } ++ ++ /* Return the last desc in the chain */ ++ desc->txd.flags = flg; ++ return &desc->txd; ++} ++ ++static irqreturn_t dma330_irq_handler(int irq, void *data) ++{ ++ if (dma330_update(data)) ++ return IRQ_HANDLED; ++ else ++ return IRQ_NONE; ++} ++ ++#define DMA330_DMA_BUSWIDTHS \ ++ BIT(DMA_SLAVE_BUSWIDTH_UNDEFINED) | \ ++ BIT(DMA_SLAVE_BUSWIDTH_1_BYTE) | \ ++ BIT(DMA_SLAVE_BUSWIDTH_2_BYTES) | \ ++ BIT(DMA_SLAVE_BUSWIDTH_4_BYTES) | \ ++ BIT(DMA_SLAVE_BUSWIDTH_8_BYTES) ++ ++/* ++ * Runtime PM callbacks are provided by amba/bus.c driver. ++ * ++ * It is assumed here that IRQ safe runtime PM is chosen in probe and amba ++ * bus driver will only disable/enable the clock in runtime PM callbacks. ++ */ ++static int __maybe_unused iproc_dma330_suspend(struct device *dev) ++{ ++ struct amba_device *pcdev = to_amba_device(dev); ++ ++ pm_runtime_disable(dev); ++ ++ if (!pm_runtime_status_suspended(dev)) { ++ /* amba did not disable the clock */ ++ amba_pclk_disable(pcdev); ++ } ++ amba_pclk_unprepare(pcdev); ++ ++ return 0; ++} ++ ++static int __maybe_unused iproc_dma330_resume(struct device *dev) ++{ ++ struct amba_device *pcdev = to_amba_device(dev); ++ int ret; ++ ++ ret = amba_pclk_prepare(pcdev); ++ if (ret) ++ return ret; ++ ++ if (!pm_runtime_status_suspended(dev)) ++ ret = amba_pclk_enable(pcdev); ++ ++ pm_runtime_enable(dev); ++ ++ return ret; ++} ++ ++static SIMPLE_DEV_PM_OPS(iproc_dma330_pm, iproc_dma330_suspend, iproc_dma330_resume); ++ ++static int ++iproc_dma330_probe(struct amba_device *adev, const struct amba_id *id) ++{ ++ struct dma_dma330_platdata *pdat; ++ struct dma330_config *pcfg; ++ struct dma330_dmac *dma330; ++ struct dma_dma330_chan *pch, *_p; ++ struct dma_device *pd; ++ struct resource *res; ++ int i, ret, irq; ++ int num_chan; ++ ++ pdat = dev_get_platdata(&adev->dev); ++ ++ ret = dma_set_mask_and_coherent(&adev->dev, DMA_BIT_MASK(32)); ++ if (ret) ++ return ret; ++ ++ /* Allocate a new DMAC and its Channels */ ++ dma330 = devm_kzalloc(&adev->dev, sizeof(*dma330), GFP_KERNEL); ++ if (!dma330) { ++ dev_err(&adev->dev, "unable to allocate mem\n"); ++ return -ENOMEM; ++ } ++ ++ pd = &dma330->ddma; ++ pd->dev = &adev->dev; ++ ++ dma330->mcbufsz = pdat ? pdat->mcbuf_sz : 0; ++ ++ res = &adev->res; ++ dma330->base = devm_ioremap_resource(&adev->dev, res); ++ if (IS_ERR(dma330->base)) ++ return PTR_ERR(dma330->base); ++ ++ amba_set_drvdata(adev, dma330); ++ ++ for (i = 0; i < AMBA_NR_IRQS; i++) { ++ irq = adev->irq[i]; ++ if (irq) { ++ ret = devm_request_irq(&adev->dev, irq, ++ dma330_irq_handler, 0, ++ dev_name(&adev->dev), dma330); ++ if (ret) ++ return ret; ++ } else { ++ break; ++ } ++ } ++ ++ pcfg = &dma330->pcfg; ++ ++ pcfg->periph_id = adev->periphid; ++ ret = dma330_add(dma330); ++ if (ret) ++ return ret; ++ ++ INIT_LIST_HEAD(&dma330->desc_pool); ++ spin_lock_init(&dma330->pool_lock); ++ ++ /* Create a descriptor pool of default size */ ++ if (!add_desc(dma330, GFP_KERNEL, NR_DEFAULT_DESC)) ++ dev_warn(&adev->dev, "unable to allocate desc\n"); ++ ++ INIT_LIST_HEAD(&pd->channels); ++ ++ /* Initialize channel parameters */ ++ if (pdat) ++ num_chan = max_t(int, pdat->nr_valid_peri, pcfg->num_chan); ++ else ++ num_chan = max_t(int, pcfg->num_peri, pcfg->num_chan); ++ ++ dma330->num_peripherals = num_chan; ++ ++ dma330->peripherals = kzalloc(num_chan * sizeof(*pch), GFP_KERNEL); ++ if (!dma330->peripherals) { ++ ret = -ENOMEM; ++ dev_err(&adev->dev, "unable to allocate dma330->peripherals\n"); ++ goto probe_err2; ++ } ++ ++ for (i = 0; i < num_chan; i++) { ++ pch = &dma330->peripherals[i]; ++ if (!adev->dev.of_node) ++ pch->chan.private = pdat ? &pdat->peri_id[i] : NULL; ++ else ++ pch->chan.private = adev->dev.of_node; ++ ++ INIT_LIST_HEAD(&pch->submitted_list); ++ INIT_LIST_HEAD(&pch->work_list); ++ INIT_LIST_HEAD(&pch->completed_list); ++ spin_lock_init(&pch->lock); ++ pch->thread = NULL; ++ pch->chan.device = pd; ++ pch->dmac = dma330; ++ ++ /* Add the channel to the DMAC list */ ++ list_add_tail(&pch->chan.device_node, &pd->channels); ++ } ++ ++ if (pdat) { ++ pd->cap_mask = pdat->cap_mask; ++ } else { ++ dma_cap_set(DMA_MEMCPY, pd->cap_mask); ++ if (pcfg->num_peri) { ++ dma_cap_set(DMA_SLAVE, pd->cap_mask); ++ dma_cap_set(DMA_CYCLIC, pd->cap_mask); ++ dma_cap_set(DMA_PRIVATE, pd->cap_mask); ++ } ++ } ++ ++ pd->device_alloc_chan_resources = dma330_alloc_chan_resources; ++ pd->device_free_chan_resources = dma330_free_chan_resources; ++ pd->device_prep_dma_memcpy = dma330_prep_dma_memcpy; ++ pd->device_prep_dma_cyclic = dma330_prep_dma_cyclic; ++ pd->device_tx_status = dma330_tx_status; ++ pd->device_prep_slave_sg = dma330_prep_slave_sg; ++ pd->device_config = dma330_config; ++ pd->device_pause = dma330_pause; ++ pd->device_terminate_all = dma330_terminate_all; ++ pd->device_issue_pending = dma330_issue_pending; ++ pd->src_addr_widths = DMA330_DMA_BUSWIDTHS; ++ pd->dst_addr_widths = DMA330_DMA_BUSWIDTHS; ++ pd->directions = BIT(DMA_DEV_TO_MEM) | BIT(DMA_MEM_TO_DEV); ++ pd->residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT; ++ ++ ret = dma_async_device_register(pd); ++ if (ret) { ++ dev_err(&adev->dev, "unable to register DMAC\n"); ++ goto probe_err3; ++ } ++ ++ if (adev->dev.of_node) { ++ ret = of_dma_controller_register(adev->dev.of_node, ++ of_dma_dma330_xlate, dma330); ++ if (ret) { ++ dev_err(&adev->dev, ++ "unable to register DMA to the generic DT DMA helpers\n"); ++ } ++ } ++ ++ adev->dev.dma_parms = &dma330->dma_parms; ++ ++ /* ++ * This is the limit for transfers with a buswidth of 1, larger ++ * buswidths will have larger limits. ++ */ ++ ret = dma_set_max_seg_size(&adev->dev, 1900800); ++ if (ret) ++ dev_err(&adev->dev, "unable to set the seg size\n"); ++ ++ ++ dev_info(&adev->dev, ++ "Loaded driver for DMA330 DMAC-%x\n", adev->periphid); ++ dev_info(&adev->dev, ++ "\tDBUFF-%ux%ubytes Num_Chans-%u Num_Peri-%u Num_Events-%u\n", ++ pcfg->data_buf_dep, pcfg->data_bus_width / 8, pcfg->num_chan, ++ pcfg->num_peri, pcfg->num_events); ++ ++ pm_runtime_irq_safe(&adev->dev); ++ pm_runtime_use_autosuspend(&adev->dev); ++ pm_runtime_set_autosuspend_delay(&adev->dev, DMA330_AUTOSUSPEND_DELAY); ++ pm_runtime_mark_last_busy(&adev->dev); ++ pm_runtime_put_autosuspend(&adev->dev); ++ ++ return 0; ++probe_err3: ++ /* Idle the DMAC */ ++ list_for_each_entry_safe(pch, _p, &dma330->ddma.channels, ++ chan.device_node) { ++ ++ /* Remove the channel */ ++ list_del(&pch->chan.device_node); ++ ++ /* Flush the channel */ ++ if (pch->thread) { ++ dma330_terminate_all(&pch->chan); ++ dma330_free_chan_resources(&pch->chan); ++ } ++ } ++probe_err2: ++ dma330_del(dma330); ++ ++ return ret; ++} ++ ++static int iproc_dma330_remove(struct amba_device *adev) ++{ ++ struct dma330_dmac *dma330 = amba_get_drvdata(adev); ++ struct dma_dma330_chan *pch, *_p; ++ ++ pm_runtime_get_noresume(dma330->ddma.dev); ++ ++ if (adev->dev.of_node) ++ of_dma_controller_free(adev->dev.of_node); ++ ++ dma_async_device_unregister(&dma330->ddma); ++ ++ /* Idle the DMAC */ ++ list_for_each_entry_safe(pch, _p, &dma330->ddma.channels, ++ chan.device_node) { ++ ++ /* Remove the channel */ ++ list_del(&pch->chan.device_node); ++ ++ /* Flush the channel */ ++ if (pch->thread) { ++ dma330_terminate_all(&pch->chan); ++ dma330_free_chan_resources(&pch->chan); ++ } ++ } ++ ++ dma330_del(dma330); ++ ++ return 0; ++} ++ ++static struct amba_id iproc_dma330_ids[] = { ++ { ++ .id = 0x00241330, ++ .mask = 0x00ffffff, ++ }, ++ { 0, 0 }, ++}; ++ ++MODULE_DEVICE_TABLE(amba, iproc_dma330_ids); ++ ++static struct amba_driver iproc_dma330_driver = { ++ .drv = { ++ .owner = THIS_MODULE, ++ .name = "dma-dma330", ++ .pm = &iproc_dma330_pm, ++ }, ++ .id_table = iproc_dma330_ids, ++ .probe = iproc_dma330_probe, ++ .remove = iproc_dma330_remove, ++}; ++ ++module_amba_driver(iproc_dma330_driver); ++ ++MODULE_DESCRIPTION("API Driver for DMA330 DMAC"); ++MODULE_LICENSE("GPL"); +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/gpio/Kconfig b/drivers/gpio/Kconfig +--- a/drivers/gpio/Kconfig 2016-12-16 00:49:34.000000000 +0800 ++++ b/drivers/gpio/Kconfig 2017-11-09 17:53:27.600169000 +0800 +@@ -142,6 +142,15 @@ config GPIO_BRCMSTB + help + Say yes here to enable GPIO support for Broadcom STB (BCM7XXX) SoCs. + ++config GPIO_XGS_IPROC ++ tristate "BRCM XGS iProc GPIO support" ++ default y if ARCH_XGS_IPROC ++ depends on OF_GPIO && (ARCH_XGS_IPROC || COMPILE_TEST) ++ select GPIO_GENERIC ++ select GPIOLIB_IRQCHIP ++ help ++ Say yes here to enable GPIO support for Broadcom XGS iProc SoCs. ++ + config GPIO_CLPS711X + tristate "CLPS711X GPIO support" + depends on ARCH_CLPS711X || COMPILE_TEST +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/gpio/Makefile b/drivers/gpio/Makefile +--- a/drivers/gpio/Makefile 2016-12-16 00:49:34.000000000 +0800 ++++ b/drivers/gpio/Makefile 2017-11-09 17:53:27.601169000 +0800 +@@ -25,6 +25,7 @@ obj-$(CONFIG_GPIO_ARIZONA) += gpio-arizo + obj-$(CONFIG_ATH79) += gpio-ath79.o + obj-$(CONFIG_GPIO_BCM_KONA) += gpio-bcm-kona.o + obj-$(CONFIG_GPIO_BRCMSTB) += gpio-brcmstb.o ++obj-$(CONFIG_GPIO_XGS_IPROC) += gpio-xgs-iproc.o + obj-$(CONFIG_GPIO_BT8XX) += gpio-bt8xx.o + obj-$(CONFIG_GPIO_CLPS711X) += gpio-clps711x.o + obj-$(CONFIG_GPIO_CS5535) += gpio-cs5535.o +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/gpio/gpio-xgs-iproc.c b/drivers/gpio/gpio-xgs-iproc.c +--- a/drivers/gpio/gpio-xgs-iproc.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/gpio/gpio-xgs-iproc.c 2017-11-09 17:53:27.757170000 +0800 +@@ -0,0 +1,815 @@ ++/* ++ * $Copyright Open Broadcom Corporation$ ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++ ++#include "gpio-xgs-iproc.h" ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 37) ++#define irq_get_chip_data get_irq_chip_data ++#define irq_set_chip_data set_irq_chip_data ++#define irq_set_chip set_irq_chip ++#define irq_set_handler set_irq_handler ++#define status_use_accessors status ++#endif ++ ++ ++#define IPROC_CCA_INT_F_GPIOINT 1 ++#define IPROC_CCA_INT_MASK 0x24 ++#define IPROC_GPIO_CCA_DIN 0x0 ++#define IPROC_GPIO_CCA_INT_LEVEL 0x10 ++#define IPROC_GPIO_CCA_INT_LEVEL_MASK 0x14 ++#define IPROC_GPIO_CCA_INT_EVENT 0x18 ++#define IPROC_GPIO_CCA_INT_EVENT_MASK 0x1C ++#define IPROC_CCA_INT_STS 0x20 ++#define IPROC_GPIO_CCA_INT_EDGE 0x24 ++ ++#define IPROC_GPIO_CCB_INT_TYPE 0xC ++#define IPROC_GPIO_CCB_INT_DE 0x10 ++#define IPROC_GPIO_CCB_INT_EDGE 0x14 ++#define IPROC_GPIO_CCB_INT_MSTAT 0x20 ++#define IPROC_GPIO_CCB_INT_CLR 0x24 ++#define IPROC_GPIO_CCB_INT_MASK 0x18 ++ ++ ++static unsigned int _iproc_gpio_readl(struct iproc_gpio_chip *chip, int reg) ++{ ++ return readl(chip->ioaddr + reg); ++} ++ ++static void _iproc_gpio_writel(struct iproc_gpio_chip *chip, unsigned int val, int reg) ++{ ++ writel(val, chip->ioaddr + reg); ++} ++ ++ ++/* ++@ pin : the actual pin number of the gpiochip ++*/ ++static int iproc_gpio_to_irq(struct iproc_gpio_chip *chip, unsigned int pin) { ++ return irq_linear_revmap(chip->irq_domain, pin - chip->pin_offset); ++} ++ ++/* ++returns the actual pin number of the gpiochip ++*/ ++static int iproc_irq_to_gpio(struct iproc_gpio_chip *chip, unsigned int irq) { ++ struct irq_data *data = irq_domain_get_irq_data(chip->irq_domain, irq); ++ ++ return data->hwirq + chip->pin_offset; ++} ++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 5) ++static void iproc_gpio_irq_ack(unsigned int irq) ++{ ++#else ++static void iproc_gpio_irq_ack(struct irq_data *d) ++{ ++ unsigned int irq = d->irq; ++#endif ++ struct iproc_gpio_chip *ourchip = irq_get_chip_data(irq); ++ ++ if (ourchip) { ++ struct iproc_gpio_irqcfg *irqcfg = ourchip->irqcfg; ++ if (irqcfg && irqcfg->ack) ++ irqcfg->ack(irq); ++ ++ } ++} ++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 5) ++static void iproc_gpio_irq_unmask(unsigned int irq) ++{ ++#else ++static void iproc_gpio_irq_unmask(struct irq_data *d) ++{ ++ unsigned int irq = d->irq; ++#endif ++ struct iproc_gpio_chip *ourchip = irq_get_chip_data(irq); ++ ++ if (ourchip) { ++ struct iproc_gpio_irqcfg *irqcfg = ourchip->irqcfg; ++ if (irqcfg && irqcfg->unmask) ++ irqcfg->unmask(irq); ++ } ++} ++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 5) ++static void iproc_gpio_irq_mask(unsigned int irq) ++{ ++#else ++static void iproc_gpio_irq_mask(struct irq_data *d) ++{ ++ unsigned int irq = d->irq; ++#endif ++ struct iproc_gpio_chip *ourchip = irq_get_chip_data(irq); ++ ++ if (ourchip) { ++ struct iproc_gpio_irqcfg *irqcfg = ourchip->irqcfg; ++ if (irqcfg && irqcfg->mask) ++ irqcfg->mask(irq); ++ } ++} ++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 5) ++static int iproc_gpio_irq_set_type(unsigned int irq, unsigned int type) ++{ ++#else ++static int iproc_gpio_irq_set_type(struct irq_data *d, unsigned int type) ++{ ++ unsigned int irq = d->irq; ++#endif ++ struct iproc_gpio_chip *ourchip = irq_get_chip_data(irq); ++ ++ if (ourchip) { ++ struct iproc_gpio_irqcfg *irqcfg = ourchip->irqcfg; ++ if (irqcfg && irqcfg->set_type) ++ return irqcfg->set_type(irq, type); ++ } ++ return -EINVAL; ++} ++ ++#if defined(IPROC_GPIO_CCA) ++static irqreturn_t iproc_gpio_irq_handler_cca(int irq, void *data) ++{ ++ struct iproc_gpio_chip *iproc_gpio = (struct iproc_gpio_chip *)data; ++ struct gpio_chip gc = iproc_gpio->chip; ++ int bit; ++ unsigned long int_bits = 0; ++ u32 int_status; ++ ++ /* go through the entire GPIOs and handle all interrupts */ ++ int_status = readl(iproc_gpio->intr_ioaddr + IPROC_CCA_INT_STS); ++ if (int_status & IPROC_CCA_INT_F_GPIOINT) { ++ unsigned int event, level; ++ ++ /* Get level and edge interrupts */ ++ event = readl(iproc_gpio->ioaddr + IPROC_GPIO_CCA_INT_EVENT_MASK) & readl(iproc_gpio->ioaddr + IPROC_GPIO_CCA_INT_EVENT); ++ level = readl(iproc_gpio->ioaddr + IPROC_GPIO_CCA_DIN) ^ ++ readl(iproc_gpio->ioaddr + IPROC_GPIO_CCA_INT_LEVEL); ++ level &= readl(iproc_gpio->ioaddr + IPROC_GPIO_CCA_INT_LEVEL_MASK); ++ int_bits = level | event; ++ ++ for_each_set_bit(bit, &int_bits, gc.ngpio) ++ generic_handle_irq( ++ irq_linear_revmap(iproc_gpio->irq_domain, bit)); ++ } ++ ++ return int_bits ? IRQ_HANDLED : IRQ_NONE; ++} ++ ++ ++static void iproc_gpio_irq_ack_cca(unsigned int irq) ++{ ++ struct iproc_gpio_chip *ourchip = irq_get_chip_data(irq); ++ int pin; ++ ++ pin = iproc_irq_to_gpio(ourchip, irq); ++ ++ if (ourchip->id == IPROC_GPIO_CCA_ID) { ++ unsigned int event_status, irq_type; ++ ++ event_status = 0; ++ irq_type = irq_get_trigger_type(irq); ++ if (irq_type & IRQ_TYPE_EDGE_BOTH) ++ { ++ event_status |= (1 << pin); ++ _iproc_gpio_writel(ourchip, event_status, ++ IPROC_GPIO_CCA_INT_EVENT); ++ } ++ ++ } ++} ++ ++static void iproc_gpio_irq_unmask_cca(unsigned int irq) ++{ ++ struct iproc_gpio_chip *ourchip = irq_get_chip_data(irq); ++ int pin; ++ unsigned int int_mask, irq_type; ++ ++ pin = iproc_irq_to_gpio(ourchip, irq); ++ irq_type = irq_get_trigger_type(irq); ++ ++ if (ourchip->id == IPROC_GPIO_CCA_ID) { ++ unsigned int event_mask; ++ ++ event_mask = _iproc_gpio_readl(ourchip, IPROC_GPIO_CCA_INT_EVENT_MASK); ++ int_mask = _iproc_gpio_readl(ourchip, IPROC_GPIO_CCA_INT_LEVEL_MASK); ++ ++ if (irq_type & IRQ_TYPE_EDGE_BOTH) { ++ event_mask |= 1 << pin; ++ _iproc_gpio_writel(ourchip, event_mask, ++ IPROC_GPIO_CCA_INT_EVENT_MASK); ++ } else { ++ int_mask |= 1 << pin; ++ _iproc_gpio_writel(ourchip, int_mask, ++ IPROC_GPIO_CCA_INT_LEVEL_MASK); ++ } ++ } ++ ++} ++ ++static void iproc_gpio_irq_mask_cca(unsigned int irq) ++{ ++ struct iproc_gpio_chip *ourchip = irq_get_chip_data(irq); ++ int pin; ++ unsigned int irq_type, int_mask; ++ ++ pin = iproc_irq_to_gpio(ourchip, irq); ++ irq_type = irq_get_trigger_type(irq); ++ ++ if (ourchip->id == IPROC_GPIO_CCA_ID) { ++ unsigned int event_mask; ++ ++ event_mask = _iproc_gpio_readl(ourchip, IPROC_GPIO_CCA_INT_EVENT_MASK); ++ int_mask = _iproc_gpio_readl(ourchip, IPROC_GPIO_CCA_INT_LEVEL_MASK); ++ ++ if (irq_type & IRQ_TYPE_EDGE_BOTH) { ++ event_mask &= ~(1 << pin); ++ _iproc_gpio_writel(ourchip, event_mask, ++ IPROC_GPIO_CCA_INT_EVENT_MASK); ++ } else { ++ int_mask &= ~(1 << pin); ++ _iproc_gpio_writel(ourchip, int_mask, ++ IPROC_GPIO_CCA_INT_LEVEL_MASK); ++ } ++ } ++} ++ ++static int iproc_gpio_irq_set_type_cca(unsigned int irq, unsigned int type) ++{ ++ struct iproc_gpio_chip *ourchip = irq_get_chip_data(irq); ++ int pin; ++ ++ pin = iproc_irq_to_gpio(ourchip, irq); ++ ++ if (ourchip->id == IPROC_GPIO_CCA_ID) { ++ unsigned int event_pol, int_pol; ++ ++ switch (type & IRQ_TYPE_SENSE_MASK) { ++ case IRQ_TYPE_EDGE_RISING: ++ event_pol = _iproc_gpio_readl(ourchip, IPROC_GPIO_CCA_INT_EDGE); ++ event_pol &= ~(1 << pin); ++ _iproc_gpio_writel(ourchip, event_pol, IPROC_GPIO_CCA_INT_EDGE); ++ break; ++ case IRQ_TYPE_EDGE_FALLING: ++ event_pol = _iproc_gpio_readl(ourchip, IPROC_GPIO_CCA_INT_EDGE); ++ event_pol |= (1 << pin); ++ _iproc_gpio_writel(ourchip, event_pol, IPROC_GPIO_CCA_INT_EDGE); ++ break; ++ case IRQ_TYPE_LEVEL_HIGH: ++ int_pol = _iproc_gpio_readl(ourchip, IPROC_GPIO_CCA_INT_LEVEL); ++ int_pol &= ~(1 << pin); ++ _iproc_gpio_writel(ourchip, int_pol, IPROC_GPIO_CCA_INT_LEVEL); ++ break; ++ case IRQ_TYPE_LEVEL_LOW: ++ int_pol = _iproc_gpio_readl(ourchip,IPROC_GPIO_CCA_INT_LEVEL); ++ int_pol |= (1 << pin); ++ _iproc_gpio_writel(ourchip, int_pol, IPROC_GPIO_CCA_INT_LEVEL); ++ break; ++ default: ++ printk(KERN_ERR "unsupport irq type !\n"); ++ return -EINVAL; ++ } ++ } ++ ++ if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) ++ irq_set_handler_locked(irq_get_irq_data(irq), handle_level_irq); ++ else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) ++ irq_set_handler_locked(irq_get_irq_data(irq), handle_edge_irq); ++ ++ return 0; ++} ++ ++struct iproc_gpio_irqcfg cca_gpio_irqcfg = { ++ /* Remove IRQF_NO_SUSPEND to be consistent with 8250_core.c setting ++ * since CCA gpio and uart share the same IRQ. ++ */ ++ .flags = IRQF_SHARED, ++ .handler = iproc_gpio_irq_handler_cca, ++ .ack = iproc_gpio_irq_ack_cca, ++ .mask = iproc_gpio_irq_mask_cca, ++ .unmask = iproc_gpio_irq_unmask_cca, ++ .set_type = iproc_gpio_irq_set_type_cca, ++}; ++#endif /* IPROC_GPIO_CCA */ ++ ++#if defined(IPROC_GPIO_CCB) || defined(IPROC_GPIO_CCG) ++static irqreturn_t ++iproc_gpio_irq_handler_ccb(int irq, void *dev) ++{ ++ struct iproc_gpio_chip *ourchip = dev; ++ int iter, max_pin; ++ unsigned int val; ++ ++ val = _iproc_gpio_readl(ourchip, IPROC_GPIO_CCB_INT_MSTAT); ++ if(!val){ ++ return IRQ_NONE; ++ } ++ ++ max_pin = ourchip->pin_offset + ourchip->chip.ngpio; ++ for (iter = ourchip->pin_offset; iter < max_pin; iter ++) { ++ if (val & (1 << iter)) { ++ //writel(1 << iter, ourchip->ioaddr + IPROC_GPIO_CCB_INT_CLR); ++ generic_handle_irq(iproc_gpio_to_irq(ourchip, iter)); ++ } ++ } ++ ++ return IRQ_HANDLED; ++} ++ ++static void iproc_gpio_irq_ack_ccb(unsigned int irq) ++{ ++ struct iproc_gpio_chip *ourchip = irq_get_chip_data(irq); ++ int pin; ++ ++ pin = iproc_irq_to_gpio(ourchip, irq); ++ ++ if ((ourchip->id == IPROC_GPIO_CCB_ID) || ++ (ourchip->id == IPROC_GPIO_CCG_ID)) { ++ unsigned int int_clear = 0; ++ ++ int_clear |= (1 << pin); ++ _iproc_gpio_writel(ourchip, int_clear, IPROC_GPIO_CCB_INT_CLR); ++ ++ } ++} ++ ++static void iproc_gpio_irq_unmask_ccb(unsigned int irq) ++{ ++ struct iproc_gpio_chip *ourchip = irq_get_chip_data(irq); ++ int pin; ++ unsigned int int_mask; ++ ++ pin = iproc_irq_to_gpio(ourchip, irq); ++ ++ if ((ourchip->id == IPROC_GPIO_CCB_ID) || ++ (ourchip->id == IPROC_GPIO_CCG_ID)) { ++ int_mask = _iproc_gpio_readl(ourchip, IPROC_GPIO_CCB_INT_MASK); ++ int_mask |= (1 << pin); ++ _iproc_gpio_writel(ourchip, int_mask, IPROC_GPIO_CCB_INT_MASK); ++ } ++ ++} ++ ++static void iproc_gpio_irq_mask_ccb(unsigned int irq) ++{ ++ struct iproc_gpio_chip *ourchip = irq_get_chip_data(irq); ++ int pin; ++ unsigned int int_mask; ++ ++ pin = iproc_irq_to_gpio(ourchip, irq); ++ ++ if ((ourchip->id == IPROC_GPIO_CCB_ID) || ++ (ourchip->id == IPROC_GPIO_CCG_ID)) { ++ int_mask = _iproc_gpio_readl(ourchip, IPROC_GPIO_CCB_INT_MASK); ++ int_mask &= ~(1 << pin); ++ _iproc_gpio_writel(ourchip, int_mask,IPROC_GPIO_CCB_INT_MASK); ++ } ++} ++ ++static int iproc_gpio_irq_set_type_ccb(unsigned int irq, unsigned int type) ++{ ++ struct iproc_gpio_chip *ourchip = irq_get_chip_data(irq); ++ int pin; ++ ++ ++ pin = iproc_irq_to_gpio(ourchip, irq); ++ ++ if ((ourchip->id == IPROC_GPIO_CCB_ID) || ++ (ourchip->id == IPROC_GPIO_CCG_ID)) { ++ unsigned int int_type, int_de, int_edge; ++ int_type = _iproc_gpio_readl(ourchip, IPROC_GPIO_CCB_INT_TYPE); ++ int_edge = _iproc_gpio_readl(ourchip, IPROC_GPIO_CCB_INT_EDGE); ++ switch (type) { ++ case IRQ_TYPE_EDGE_BOTH: ++ int_type &= ~(1 << pin); ++ int_de = _iproc_gpio_readl(ourchip, IPROC_GPIO_CCB_INT_DE); ++ int_de |= (1 << pin); ++ _iproc_gpio_writel(ourchip, int_de, IPROC_GPIO_CCB_INT_DE); ++ break; ++ case IRQ_TYPE_EDGE_RISING: ++ int_type &= ~(1 << pin); ++ int_edge |= (1 << pin); ++ ++ int_de = _iproc_gpio_readl(ourchip, IPROC_GPIO_CCB_INT_DE); ++ int_de &= ~(1 << pin); ++ _iproc_gpio_writel(ourchip, int_de, IPROC_GPIO_CCB_INT_DE); ++ break; ++ case IRQ_TYPE_EDGE_FALLING: ++ int_type &= ~(1 << pin); ++ int_edge &= ~(1 << pin); ++ ++ int_de = _iproc_gpio_readl(ourchip, IPROC_GPIO_CCB_INT_DE); ++ int_de &= ~(1 << pin); ++ _iproc_gpio_writel(ourchip, int_de, IPROC_GPIO_CCB_INT_DE); ++ break; ++ case IRQ_TYPE_LEVEL_HIGH: ++ int_type |= (1 << pin); ++ int_edge |= (1 << pin); ++ break; ++ case IRQ_TYPE_LEVEL_LOW: ++ int_type |= (1 << pin); ++ int_edge &= ~(1 << pin); ++ break; ++ default: ++ printk(KERN_ERR "unsupport irq type !\n"); ++ return -EINVAL; ++ } ++ _iproc_gpio_writel(ourchip, int_type, IPROC_GPIO_CCB_INT_TYPE); ++ _iproc_gpio_writel(ourchip, int_edge, IPROC_GPIO_CCB_INT_EDGE); ++ } ++ ++ if (type & (IRQ_TYPE_LEVEL_LOW | IRQ_TYPE_LEVEL_HIGH)) ++ irq_set_handler_locked(irq_get_irq_data(irq), handle_level_irq); ++ else if (type & (IRQ_TYPE_EDGE_FALLING | IRQ_TYPE_EDGE_RISING)) ++ irq_set_handler_locked(irq_get_irq_data(irq), handle_edge_irq); ++ ++ return 0; ++} ++ ++struct iproc_gpio_irqcfg ccb_gpio_irqcfg = { ++ .flags = IRQF_NO_SUSPEND, ++ .handler = iproc_gpio_irq_handler_ccb, ++ .ack = iproc_gpio_irq_ack_ccb, ++ .mask = iproc_gpio_irq_mask_ccb, ++ .unmask = iproc_gpio_irq_unmask_ccb, ++ .set_type = iproc_gpio_irq_set_type_ccb, ++}; ++#endif /* IPROC_GPIO_CCB || IPROC_GPIO_CCG*/ ++ ++static struct irq_chip iproc_gpio_irq_chip = { ++ .name = "IPROC-GPIO", ++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 6, 5) ++ .ack = (void *) iproc_gpio_irq_ack, ++ .mask = (void *) iproc_gpio_irq_mask, ++ .unmask = (void *) iproc_gpio_irq_unmask, ++ .set_type = (void *) iproc_gpio_irq_set_type, ++#else ++ .irq_ack = (void *) iproc_gpio_irq_ack, ++ .irq_mask = (void *) iproc_gpio_irq_mask, ++ .irq_unmask = (void *) iproc_gpio_irq_unmask, ++ .irq_set_type = (void *) iproc_gpio_irq_set_type, ++#endif ++}; ++ ++ ++static int iproc_gpiolib_input(struct gpio_chip *chip, unsigned gpio) ++{ ++ struct iproc_gpio_chip *ourchip = to_iproc_gpio(chip); ++ unsigned long flags; ++ unsigned int val, nBitMask; ++ int reg_offset; ++ unsigned int pin_offset = gpio + ourchip->pin_offset; ++ ++ iproc_gpio_lock(ourchip, flags); ++ ++ nBitMask = 1 << pin_offset; ++ reg_offset = REGOFFSET_GPIO_EN; ++ ++ val = _iproc_gpio_readl(ourchip, reg_offset); ++ val &= ~nBitMask; ++ _iproc_gpio_writel(ourchip, val, reg_offset); ++ ++ iproc_gpio_unlock(ourchip, flags); ++ return 0; ++} ++ ++static int iproc_gpiolib_output(struct gpio_chip *chip, ++ unsigned gpio, int value) ++{ ++ struct iproc_gpio_chip *ourchip = to_iproc_gpio(chip); ++ unsigned long flags, val; ++ unsigned int nBitMask; ++ int reg_offset; ++ unsigned int pin_offset = gpio + ourchip->pin_offset; ++ ++ iproc_gpio_lock(ourchip, flags); ++ ++ nBitMask = 1 << pin_offset; ++ reg_offset = REGOFFSET_GPIO_EN; ++ ++ val = _iproc_gpio_readl(ourchip, reg_offset); ++ val |= nBitMask; ++ _iproc_gpio_writel(ourchip, val, reg_offset); ++ ++ iproc_gpio_unlock(ourchip, flags); ++ return 0; ++} ++ ++static void iproc_gpiolib_set(struct gpio_chip *chip, ++ unsigned gpio, int value) ++{ ++ struct iproc_gpio_chip *ourchip = to_iproc_gpio(chip); ++ unsigned long flags, val; ++ unsigned int nBitMask; ++ int reg_offset = 0; ++ unsigned int pin_offset = gpio + ourchip->pin_offset; ++ ++ iproc_gpio_lock(ourchip, flags); ++ ++ nBitMask = 1 << pin_offset; ++ ++ val = _iproc_gpio_readl(ourchip, REGOFFSET_GPIO_EN + reg_offset); ++ val &= nBitMask; ++ ++ /* this function only applies to output pin ++ */ ++ if (!val) ++ return; ++ ++ val = _iproc_gpio_readl(ourchip, REGOFFSET_GPIO_DOUT + reg_offset); ++ ++ if ( value == 0 ){ ++ /* Set the pin to zero */ ++ val &= ~nBitMask; ++ }else{ ++ /* Set the pin to 1 */ ++ val |= nBitMask; ++ } ++ _iproc_gpio_writel(ourchip, val, REGOFFSET_GPIO_DOUT + reg_offset); ++ ++ iproc_gpio_unlock(ourchip, flags); ++ ++} ++ ++ ++static int iproc_gpiolib_get(struct gpio_chip *chip, unsigned gpio) ++{ ++ struct iproc_gpio_chip *ourchip = to_iproc_gpio(chip); ++ unsigned long flags; ++ unsigned int val, offset, nBitMask; ++ int reg_offset = 0; ++ unsigned int pin_offset = gpio + ourchip->pin_offset; ++ ++ iproc_gpio_lock(ourchip, flags); ++ ++ nBitMask = 1 << pin_offset; ++ ++ /* determine the GPIO pin direction ++ */ ++ offset = _iproc_gpio_readl(ourchip, REGOFFSET_GPIO_EN + reg_offset); ++ offset &= nBitMask; ++ ++ if (offset){ ++ val = _iproc_gpio_readl(ourchip, REGOFFSET_GPIO_DOUT + reg_offset); ++ } else { ++ val = _iproc_gpio_readl(ourchip, REGOFFSET_GPIO_DIN + reg_offset); ++ } ++ ++ val >>= pin_offset; ++ ++ val &= 1; ++ ++ iproc_gpio_unlock(ourchip, flags); ++ ++ return val; ++} ++ ++/* ++@offset : the gpio pin index number from gpiolib view (minus gpio base only) ++*/ ++static int iproc_gpiolib_to_irq(struct gpio_chip *chip, ++ unsigned offset) ++{ ++ struct iproc_gpio_chip *ourchip = to_iproc_gpio(chip); ++ return irq_linear_revmap(ourchip->irq_domain, offset); ++} ++ ++static struct __initconst of_device_id bcm_iproc_gpio_of_match[] = { ++ { .compatible = "brcm,iproc-gpio,cca" }, ++ { .compatible = "brcm,iproc-gpio,ccb" }, ++ { .compatible = "brcm,iproc-gpio,ccg" }, ++ {} ++}; ++MODULE_DEVICE_TABLE(of, bcm_iproc_gpio_of_match); ++ ++void iproc_gpiolib_add(struct iproc_gpio_chip *chip) ++{ ++ struct gpio_chip *gc = &chip->chip; ++ int ret; ++ ++ BUG_ON(!gc->label); ++ BUG_ON(!gc->ngpio); ++ ++ /* ++ * The register offsets for data in, out, and enable are the same for ++ * all GPIO's. ++ */ ++ if (!gc->direction_input) ++ gc->direction_input = iproc_gpiolib_input; ++ if (!gc->direction_output) ++ gc->direction_output = iproc_gpiolib_output; ++ if (!gc->set) ++ gc->set = iproc_gpiolib_set; ++ if (!gc->get) ++ gc->get = iproc_gpiolib_get; ++ if (!gc->to_irq) ++ gc->to_irq = iproc_gpiolib_to_irq; ++ ++ ret = gpiochip_add(gc); ++ if (ret >= 0) ++ printk(KERN_INFO "iproc gpiochip add %s\n", gc->label); ++ ++ return; ++} ++ ++/* ++ * Handles CCA, CCB, and CCG type GPIO's and registers the gpio ++ * controller. ++ */ ++ ++static int iproc_gpio_probe(struct platform_device *pdev) ++{ ++ const struct of_device_id *match; ++ struct device_node *dn = pdev->dev.of_node; ++ struct iproc_gpio_chip *iproc_gpio; ++ u32 num_gpios, pin_base, pin_offset, count/*, irq_base*/; ++ int ret; ++ ++ match = of_match_device(bcm_iproc_gpio_of_match, &pdev->dev); ++ if (!match) { ++ dev_err(&pdev->dev, "Failed to find gpio controller\n"); ++ return -ENODEV; ++ } ++ ++ iproc_gpio = devm_kzalloc(&pdev->dev, sizeof(*iproc_gpio), GFP_KERNEL); ++ if (!iproc_gpio) { ++ dev_err(&pdev->dev, "Error allocating memory\n"); ++ return -ENOMEM; ++ } ++ ++ platform_set_drvdata(pdev, iproc_gpio); ++ ++ /* Determine type of gpio controller to allocate. */ ++#if defined(IPROC_GPIO_CCA) ++ if (strstr(match->compatible, "cca")) { ++ iproc_gpio->chip.label = "gpio_cca"; ++ iproc_gpio->id = IPROC_GPIO_CCA_ID; ++ iproc_gpio->irqcfg = &cca_gpio_irqcfg; ++ ++ iproc_gpio->intr_ioaddr = of_iomap(dn, 1); ++ if (!iproc_gpio->intr_ioaddr) { ++ dev_err(&pdev->dev, "can't iomap gpio interrupt base address\n"); ++ return -ENOMEM; ++ } ++ ++ dev_info(&pdev->dev, "%s intr_ioaddr: %p\n", ++ iproc_gpio->chip.label, iproc_gpio->intr_ioaddr); ++ } ++ else ++#endif ++#if defined(IPROC_GPIO_CCB) ++ if (strstr(match->compatible, "ccb")) { ++ iproc_gpio->chip.label = "gpio_ccb"; ++ iproc_gpio->id = IPROC_GPIO_CCB_ID; ++ iproc_gpio->irqcfg = &ccb_gpio_irqcfg; ++ } ++ else ++#endif ++#if defined(IPROC_GPIO_CCG) ++ if (strstr(match->compatible, "ccg")) { ++ iproc_gpio->chip.label = "gpio_ccg"; ++ iproc_gpio->id = IPROC_GPIO_CCG_ID; ++ iproc_gpio->irqcfg = &ccb_gpio_irqcfg; ++ } ++ else ++#endif ++ { ++ dev_err(&pdev->dev, "Error parsing device tree of GPIO\n"); ++ return -ENODEV; ++ } ++ ++ /* Map gpio base ioaddr address. */ ++ iproc_gpio->ioaddr = of_iomap(dn, 0); ++ if (!iproc_gpio->ioaddr) { ++ dev_err(&pdev->dev, "can't iomap gpio base address\n"); ++ return -ENOMEM; ++ } ++ dev_info(&pdev->dev, "%s iaddr: %p\n", iproc_gpio->chip.label, iproc_gpio->ioaddr); ++ ++ if (of_property_read_u32(dn, "pin-base", &pin_base)) { ++ dev_err(&pdev->dev, "Missing pin-base property\n"); ++ return -EINVAL; ++ } ++ iproc_gpio->chip.base = pin_base; ++ ++ /* get pin_offset */ ++ if (of_property_read_u32(dn, "pin-offset", &pin_offset)) { ++ dev_err(&pdev->dev, "Missing pin-offset property\n"); ++ return -EINVAL; ++ } ++ iproc_gpio->pin_offset = pin_offset; ++ ++ /* Get number of GPIO's from device tree for gpiolib. */ ++ if (of_property_read_u32(dn, "ngpios", &num_gpios)) { ++ dev_err(&pdev->dev, "Missing ngpios property\n"); ++ return -EINVAL; ++ } ++ iproc_gpio->chip.ngpio = num_gpios; ++ ++ /* Register controller with gpiolib. */ ++ iproc_gpio->chip.dev = &pdev->dev; ++ iproc_gpiolib_add(iproc_gpio); ++ ++ /* Get interrupt number from device tree. */ ++ iproc_gpio->irq = irq_of_parse_and_map(dn, 0); ++ ++ /* Install ISR for this GPIO controller. */ ++ if (iproc_gpio->irq > 0) { ++ /* Create irq domain so that each pin can be assigned an IRQ.*/ ++ iproc_gpio->irq_domain = irq_domain_add_linear(dn, num_gpios, ++ &irq_domain_simple_ops, iproc_gpio); ++ ++ if (!iproc_gpio->irq_domain) { ++ dev_err(&pdev->dev, "Couldn't allocate IRQ domain\n"); ++ return -ENXIO; ++ } ++ ++ /* Map each gpio to an IRQ and set the handler for gpiolib. */ ++ for (count = 0; count < num_gpios; count++) { ++ int irq; ++ ++ irq = irq_create_mapping(iproc_gpio->irq_domain, count); ++ irq_set_chip_and_handler(irq, &iproc_gpio_irq_chip, ++ handle_simple_irq); ++ irq_set_chip_data(irq, iproc_gpio); ++ } ++ ++ /* Enable GPIO interrupts in CCA interrupt mask. */ ++#if defined(IPROC_GPIO_CCA) ++ if (iproc_gpio->id == IPROC_GPIO_CCA_ID) { ++ unsigned int val; ++ val = readl(iproc_gpio->intr_ioaddr + IPROC_CCA_INT_MASK); ++ val |= IPROC_CCA_INT_F_GPIOINT; ++ writel(val, iproc_gpio->intr_ioaddr + IPROC_CCA_INT_MASK); ++ } ++#endif /* IPROC_GPIO_CCA */ ++ if (iproc_gpio->irqcfg) { ++ struct iproc_gpio_irqcfg *irqcfg = iproc_gpio->irqcfg; ++ if (irqcfg->handler) { ++ ret = request_irq(iproc_gpio->irq, ++ irqcfg->handler, irqcfg->flags, ++ iproc_gpio->chip.label, iproc_gpio); ++ if (ret) { ++ printk(KERN_ERR "Unable to request IRQ%d: %d\n", iproc_gpio->irq, ret); ++ return -ENODEV; ++ } ++ } ++ else ++ printk(KERN_ERR "%s is added without isr!\n", iproc_gpio->chip.label); ++ } ++ } ++ else ++ dev_warn(&pdev->dev, "IRQ not specified. No ISR installed\n"); ++ ++ return 0; ++} ++ ++static int __exit iproc_gpio_remove(struct platform_device *pdev) ++{ ++ struct iproc_gpio_chip *iproc_gpio; ++ ++ iproc_gpio = platform_get_drvdata(pdev); ++ if (iproc_gpio == NULL) ++ return -ENODEV; ++ ++ if (iproc_gpio->intr_ioaddr) { ++#if defined(IPROC_GPIO_CCA) ++ if (iproc_gpio->id == IPROC_GPIO_CCA_ID) { ++ unsigned int val; ++ val = readl(iproc_gpio->intr_ioaddr + IPROC_CCA_INT_MASK); ++ val &= ~(IPROC_CCA_INT_F_GPIOINT); ++ writel(val, iproc_gpio->intr_ioaddr + IPROC_CCA_INT_MASK); ++ } ++#endif ++ } ++ ++ gpiochip_remove(&iproc_gpio->chip); ++ ++ return 0; ++} ++ ++static struct platform_driver bcm_iproc_gpio_driver = { ++ .driver = { ++ .name = "iproc-gpio", ++ .owner = THIS_MODULE, ++ .of_match_table = bcm_iproc_gpio_of_match, ++ }, ++ .probe = iproc_gpio_probe, ++ .remove = iproc_gpio_remove, ++}; ++ ++module_platform_driver(bcm_iproc_gpio_driver); ++ ++MODULE_DESCRIPTION("IPROC GPIO driver"); ++MODULE_LICENSE("GPL"); +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/gpio/gpio-xgs-iproc.h b/drivers/gpio/gpio-xgs-iproc.h +--- a/drivers/gpio/gpio-xgs-iproc.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/gpio/gpio-xgs-iproc.h 2017-11-09 17:53:27.757186000 +0800 +@@ -0,0 +1,61 @@ ++/* ++ * $Copyright Open Broadcom Corporation$ ++ */ ++ ++#ifndef __IPROC_PLAT_GPIO_H ++#define __IPROC_PLAT_GPIO_H ++ ++#if defined(CONFIG_MACH_IPROC_P7) ++#define IPROC_GPIO_CCG ++#else ++#define IPROC_GPIO_CCA ++#define IPROC_GPIO_CCB ++#endif ++ ++#define IPROC_GPIO_REG_SIZE (0x50) ++ ++#define REGOFFSET_GPIO_DIN 0x000 /* GPIO Data in register */ ++#define REGOFFSET_GPIO_DOUT 0x004 /* GPIO Data out register */ ++#define REGOFFSET_GPIO_EN 0x008 /* GPIO output enable register */ ++ ++#define IPROC_GPIO_CCA_ID (0) ++#define IPROC_GPIO_CCB_ID (1) ++#define IPROC_GPIO_CCG_ID (2) ++ ++struct iproc_gpio_irqcfg { ++ unsigned long flags; ++ irqreturn_t (*handler)(int irq, void *dev); ++ void (*ack)(unsigned int irq); ++ void (*unmask)(unsigned int irq); ++ void (*mask)(unsigned int irq); ++ int (*set_type)(unsigned int irq, unsigned int type); ++}; ++ ++struct iproc_gpio_chip { ++ int id; ++ struct gpio_chip chip; ++ struct iproc_gpio_cfg *config; ++ void __iomem *ioaddr; ++ void __iomem *intr_ioaddr; ++ spinlock_t lock; ++ struct irq_domain *irq_domain; ++ struct resource * resource; ++ int irq; ++ struct iproc_gpio_irqcfg *irqcfg; ++ int pin_offset; ++}; ++ ++ ++static inline struct iproc_gpio_chip *to_iproc_gpio(struct gpio_chip *gpc) ++{ ++ return container_of(gpc, struct iproc_gpio_chip, chip); ++} ++ ++ ++/* locking wrappers to deal with multiple access to the same gpio bank */ ++#define iproc_gpio_lock(_oc, _fl) spin_lock_irqsave(&(_oc)->lock, _fl) ++#define iproc_gpio_unlock(_oc, _fl) spin_unlock_irqrestore(&(_oc)->lock, _fl) ++ ++extern void iproc_gpiolib_add(struct iproc_gpio_chip *chip); ++ ++#endif +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig +--- a/drivers/i2c/busses/Kconfig 2016-12-16 00:49:34.000000000 +0800 ++++ b/drivers/i2c/busses/Kconfig 2017-11-09 17:53:33.506215000 +0800 +@@ -385,6 +385,26 @@ config I2C_BCM_IPROC + + If you don't know what to do here, say N. + ++config I2C_XGS_IPROC ++ tristate "Broadcom XGS iProc I2C controller" ++ depends on ARCH_XGS_IPROC ++ default ARCH_XGS_IPROC ++ help ++ If you say yes to this option, support will be included for the ++ Broadcom XGS iProc I2C controller. ++ ++ If you don't know what to do here, say N. ++ ++config SMBUS_XGS_IPROC ++ tristate "Broadcom XGS iProc SMBUS controller" ++ depends on ARCH_XGS_IPROC ++ default !I2C_XGS_IPROC ++ help ++ If you say yes to this option, support will be included for the ++ Broadcom XGS iProc SMBUS controller. ++ ++ If you don't know what to do here, say N. ++ + config I2C_BCM_KONA + tristate "BCM Kona I2C adapter" + depends on ARCH_BCM_MOBILE +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/i2c/busses/Makefile b/drivers/i2c/busses/Makefile +--- a/drivers/i2c/busses/Makefile 2016-12-16 00:49:34.000000000 +0800 ++++ b/drivers/i2c/busses/Makefile 2017-11-09 17:53:33.507214000 +0800 +@@ -92,6 +92,8 @@ obj-$(CONFIG_I2C_UNIPHIER_F) += i2c-unip + obj-$(CONFIG_I2C_VERSATILE) += i2c-versatile.o + obj-$(CONFIG_I2C_WMT) += i2c-wmt.o + obj-$(CONFIG_I2C_OCTEON) += i2c-octeon.o ++obj-$(CONFIG_I2C_XGS_IPROC) += i2c-xgs-iproc.o ++obj-$(CONFIG_SMBUS_XGS_IPROC) += xgs_iproc_smbus.o + obj-$(CONFIG_I2C_XILINX) += i2c-xiic.o + obj-$(CONFIG_I2C_XLR) += i2c-xlr.o + obj-$(CONFIG_I2C_XLP9XX) += i2c-xlp9xx.o +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/i2c/busses/i2c-xgs-iproc.c b/drivers/i2c/busses/i2c-xgs-iproc.c +--- a/drivers/i2c/busses/i2c-xgs-iproc.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/i2c/busses/i2c-xgs-iproc.c 2017-11-09 17:53:33.700222000 +0800 +@@ -0,0 +1,598 @@ ++/* ++ * Copyright (C) 2014 Broadcom Corporation ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation version 2. ++ * ++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any ++ * kind, whether express or implied; without even the implied warranty ++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define CFG_OFFSET 0x00 ++#define CFG_RESET_SHIFT 31 ++#define CFG_EN_SHIFT 30 ++#define CFG_M_RETRY_CNT_SHIFT 16 ++#define CFG_M_RETRY_CNT_MASK 0x0f ++ ++#define TIM_CFG_OFFSET 0x04 ++#define TIM_CFG_MODE_400_SHIFT 31 ++ ++#define M_FIFO_CTRL_OFFSET 0x0c ++#define M_FIFO_RX_FLUSH_SHIFT 31 ++#define M_FIFO_TX_FLUSH_SHIFT 30 ++#define M_FIFO_RX_CNT_SHIFT 16 ++#define M_FIFO_RX_CNT_MASK 0x7f ++#define M_FIFO_RX_THLD_SHIFT 8 ++#define M_FIFO_RX_THLD_MASK 0x3f ++ ++#define M_CMD_OFFSET 0x30 ++#define M_CMD_START_BUSY_SHIFT 31 ++#define M_CMD_STATUS_SHIFT 25 ++#define M_CMD_STATUS_MASK 0x07 ++#define M_CMD_STATUS_SUCCESS 0x0 ++#define M_CMD_STATUS_LOST_ARB 0x1 ++#define M_CMD_STATUS_NACK_ADDR 0x2 ++#define M_CMD_STATUS_NACK_DATA 0x3 ++#define M_CMD_STATUS_TIMEOUT 0x4 ++#define M_CMD_PROTOCOL_SHIFT 9 ++#define M_CMD_PROTOCOL_MASK 0xf ++#define M_CMD_PROTOCOL_BLK_WR 0x7 ++#define M_CMD_PROTOCOL_BLK_RD 0x8 ++#define M_CMD_PEC_SHIFT 8 ++#define M_CMD_RD_CNT_SHIFT 0 ++#define M_CMD_RD_CNT_MASK 0xff ++ ++#define IE_OFFSET 0x38 ++#define IE_M_RX_FIFO_FULL_SHIFT 31 ++#define IE_M_RX_THLD_SHIFT 30 ++#define IE_M_START_BUSY_SHIFT 28 ++ ++#define IS_OFFSET 0x3c ++#define IS_M_RX_FIFO_FULL_SHIFT 31 ++#define IS_M_RX_THLD_SHIFT 30 ++#define IS_M_START_BUSY_SHIFT 28 ++ ++#define M_TX_OFFSET 0x40 ++#define M_TX_WR_STATUS_SHIFT 31 ++#define M_TX_DATA_SHIFT 0 ++#define M_TX_DATA_MASK 0xff ++ ++#define M_RX_OFFSET 0x44 ++#define M_RX_STATUS_SHIFT 30 ++#define M_RX_STATUS_MASK 0x03 ++#define M_RX_PEC_ERR_SHIFT 29 ++#define M_RX_DATA_SHIFT 0 ++#define M_RX_DATA_MASK 0xff ++ ++#define I2C_TIMEOUT_MESC 100 ++ ++#define M_TX_RX_FIFO_SIZE 64 ++#define I2C_MAX_DATA_READ_LEN (M_TX_RX_FIFO_SIZE - 1) ++#define I2C_MAX_DATA_WRITE_LEN (M_TX_RX_FIFO_SIZE - 1) ++ ++/* ++ * Enable support of EEPROM I2C devices with 2 byte addressing mode and page ++ * size >= 64B. ++ */ ++#define CONFIG_ENABLE_WRITE_MSG_SPLIT 1 ++ ++ ++enum bus_speed_index { ++ I2C_SPD_100K = 0, ++ I2C_SPD_400K, ++}; ++ ++struct bcm_iproc_i2c_dev { ++ struct device *device; ++ int irq; ++ ++ void __iomem *base; ++ ++ struct i2c_adapter adapter; ++ unsigned int bus_speed; ++ ++ struct completion done; ++ int xfer_is_done; ++}; ++ ++/* ++ * Can be expanded in the future if more interrupt status bits are utilized ++ */ ++#define ISR_MASK (1 << IS_M_START_BUSY_SHIFT) ++ ++static irqreturn_t bcm_iproc_i2c_isr(int irq, void *data) ++{ ++ struct bcm_iproc_i2c_dev *iproc_i2c = data; ++ u32 status = readl(iproc_i2c->base + IS_OFFSET); ++ ++ status &= ISR_MASK; ++ ++ if (!status) ++ return IRQ_NONE; ++ ++ writel(status, iproc_i2c->base + IS_OFFSET); ++ iproc_i2c->xfer_is_done = 1; ++ complete_all(&iproc_i2c->done); ++ ++ return IRQ_HANDLED; ++} ++ ++static int bcm_iproc_i2c_check_status(struct bcm_iproc_i2c_dev *iproc_i2c, ++ struct i2c_msg *msg) ++{ ++ u32 val; ++ ++ val = readl(iproc_i2c->base + M_CMD_OFFSET); ++ val = (val >> M_CMD_STATUS_SHIFT) & M_CMD_STATUS_MASK; ++ ++ switch (val) { ++ case M_CMD_STATUS_SUCCESS: ++ return 0; ++ ++ case M_CMD_STATUS_LOST_ARB: ++ dev_dbg(iproc_i2c->device, "lost bus arbitration\n"); ++ return -EAGAIN; ++ ++ case M_CMD_STATUS_NACK_ADDR: ++ dev_dbg(iproc_i2c->device, "NAK addr:0x%02x\n", msg->addr); ++ return -ENXIO; ++ ++ case M_CMD_STATUS_NACK_DATA: ++ dev_dbg(iproc_i2c->device, "NAK data\n"); ++ return -ENXIO; ++ ++ case M_CMD_STATUS_TIMEOUT: ++ dev_dbg(iproc_i2c->device, "bus timeout\n"); ++ return -ETIMEDOUT; ++ ++ default: ++ dev_dbg(iproc_i2c->device, "unknown error code=%d\n", val); ++ return -EIO; ++ } ++} ++ ++static int bcm_iproc_i2c_xfer_single_msg(struct bcm_iproc_i2c_dev *iproc_i2c, ++ struct i2c_msg *msg) ++{ ++ int ret, i; ++ u8 addr; ++ u32 val; ++ unsigned long time_left = msecs_to_jiffies(I2C_TIMEOUT_MESC); ++ ++ /* check if bus is busy */ ++ if (!!(readl(iproc_i2c->base + M_CMD_OFFSET) & ++ BIT(M_CMD_START_BUSY_SHIFT))) { ++ dev_warn(iproc_i2c->device, "bus is busy\n"); ++ return -EBUSY; ++ } ++ ++ /* format and load slave address into the TX FIFO */ ++ addr = msg->addr << 1 | (msg->flags & I2C_M_RD ? 1 : 0); ++ writel(addr, iproc_i2c->base + M_TX_OFFSET); ++ ++ /* for a write transaction, load data into the TX FIFO */ ++ if (!(msg->flags & I2C_M_RD)) { ++ for (i = 0; i < msg->len; i++) { ++ val = msg->buf[i]; ++ ++ /* mark the last byte */ ++ if (i == msg->len - 1) ++ val |= 1 << M_TX_WR_STATUS_SHIFT; ++ ++ writel(val, iproc_i2c->base + M_TX_OFFSET); ++ } ++ } ++ ++ /* mark as incomplete before starting the transaction */ ++ reinit_completion(&iproc_i2c->done); ++ iproc_i2c->xfer_is_done = 0; ++ ++ /* ++ * Enable the "start busy" interrupt, which will be triggered after the ++ * transaction is done, i.e., the internal start_busy bit, transitions ++ * from 1 to 0. ++ */ ++ writel(1 << IE_M_START_BUSY_SHIFT, iproc_i2c->base + IE_OFFSET); ++ ++ /* ++ * Now we can activate the transfer. For a read operation, specify the ++ * number of bytes to read ++ */ ++ val = 1 << M_CMD_START_BUSY_SHIFT; ++ if (msg->flags & I2C_M_RD) { ++ val |= (M_CMD_PROTOCOL_BLK_RD << M_CMD_PROTOCOL_SHIFT) | ++ (msg->len << M_CMD_RD_CNT_SHIFT); ++ } else { ++ val |= (M_CMD_PROTOCOL_BLK_WR << M_CMD_PROTOCOL_SHIFT); ++ } ++ writel(val, iproc_i2c->base + M_CMD_OFFSET); ++ ++ time_left = wait_for_completion_timeout(&iproc_i2c->done, time_left); ++ ++ /* disable all interrupts */ ++ writel(0, iproc_i2c->base + IE_OFFSET); ++ /* read it back to flush the write */ ++ readl(iproc_i2c->base + IE_OFFSET); ++ ++ /* make sure the interrupt handler isn't running */ ++ synchronize_irq(iproc_i2c->irq); ++ ++ if (!time_left && !iproc_i2c->xfer_is_done) { ++ dev_err(iproc_i2c->device, "transaction timed out\n"); ++ ++ /* flush FIFOs */ ++ val = (1 << M_FIFO_RX_FLUSH_SHIFT) | ++ (1 << M_FIFO_TX_FLUSH_SHIFT); ++ writel(val, iproc_i2c->base + M_FIFO_CTRL_OFFSET); ++ return -ETIMEDOUT; ++ } ++ ++ ret = bcm_iproc_i2c_check_status(iproc_i2c, msg); ++ if (ret) { ++ /* flush both TX/RX FIFOs */ ++ val = (1 << M_FIFO_RX_FLUSH_SHIFT) | ++ (1 << M_FIFO_TX_FLUSH_SHIFT); ++ writel(val, iproc_i2c->base + M_FIFO_CTRL_OFFSET); ++ return ret; ++ } ++ ++ /* ++ * For a read operation, we now need to load the data from FIFO ++ * into the memory buffer ++ */ ++ if (msg->flags & I2C_M_RD) { ++ for (i = 0; i < msg->len; i++) { ++ msg->buf[i] = (readl(iproc_i2c->base + M_RX_OFFSET) >> ++ M_RX_DATA_SHIFT) & M_RX_DATA_MASK; ++ } ++ } ++ ++ return 0; ++} ++ ++static int bcm_iproc_i2c_xfer(struct i2c_adapter *adapter, ++ struct i2c_msg msgs[], int num) ++{ ++ struct bcm_iproc_i2c_dev *iproc_i2c = i2c_get_adapdata(adapter); ++ int ret, i; ++ int xfer_msg_len, xfer_msg_len_max; ++ u8 addr_h, addr_l; ++ ++ /* go through all messages */ ++ for (i = 0; i < num; i++) { ++ xfer_msg_len = msgs[i].len; ++ if (msgs[i].flags & I2C_M_RD) ++ xfer_msg_len_max = I2C_MAX_DATA_READ_LEN; ++ else ++ xfer_msg_len_max = I2C_MAX_DATA_WRITE_LEN; ++ ++ while (xfer_msg_len) { ++ if (xfer_msg_len > xfer_msg_len_max) ++ msgs[i].len = xfer_msg_len_max; ++ ret = bcm_iproc_i2c_xfer_single_msg(iproc_i2c, &msgs[i]); ++ if (ret) { ++ dev_dbg(iproc_i2c->device, "xfer failed\n"); ++ return ret; ++ } ++ ++ if (msgs[i].len == xfer_msg_len_max) { ++ xfer_msg_len -= xfer_msg_len_max; ++ if (xfer_msg_len == 0) ++ break; ++ /* Keep the addr offset for later use */ ++ addr_h = *(msgs[i].buf); ++ addr_l = *(msgs[i].buf + 1); ++ ++ msgs[i].len = xfer_msg_len; ++ msgs[i].buf += xfer_msg_len_max; ++ ++#if defined(CONFIG_ENABLE_WRITE_MSG_SPLIT) ++ if (!(msgs[i].flags & I2C_M_RD)) { ++ /* ++ * For write transfer with len >= 64B, ++ * assuming 2 byte addressing should be ++ * reasonable. ++ */ ++ xfer_msg_len += 2; ++ msgs[i].len = xfer_msg_len; ++ ++ /* ++ * Append new 2-byte address offset. ++ * The upper byte should be unchanged. ++ * The lower byte is increased by ++ * actually written bytes: ++ * (xfer_msg_len_max - 2) ++ */ ++ msgs[i].buf -= 2; ++ *(msgs[i].buf) = addr_h; ++ *(msgs[i].buf + 1) = addr_l - 2 + ++ xfer_msg_len_max; ++ ++ /* ++ * Wait some time so that EEPROM ++ * is ready to respond after previous ++ * partial page write. ++ */ ++ mdelay(10); ++ } ++#endif /* CONFIG_ENABLE_WRITE_MSG_SPLIT */ ++ } else { ++ /* ++ * msgs[i] is transfered completely, ++ * if msgs[i].len is less than xfer_msg_len_max. ++ */ ++ break; ++ } ++ } /* while */ ++ } /* for */ ++ ++ return num; ++} ++ ++ ++static uint32_t bcm_iproc_i2c_functionality(struct i2c_adapter *adap) ++{ ++ return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL; ++} ++ ++static const struct i2c_algorithm bcm_iproc_algo = { ++ .master_xfer = bcm_iproc_i2c_xfer, ++ .functionality = bcm_iproc_i2c_functionality, ++}; ++ ++/* ++ * Don't limit the max write length for Linux I2C core, if support of ++ * write msg split is enabled. ++ * Read msg split is support, so max_read_len is commented out. ++ */ ++#if !defined(CONFIG_ENABLE_WRITE_MSG_SPLIT) ++static struct i2c_adapter_quirks bcm_iproc_i2c_quirks = { ++ /* need to reserve one byte in the FIFO for the slave address */ ++ //.max_read_len = M_TX_RX_FIFO_SIZE - 1, ++ .max_write_len = M_TX_RX_FIFO_SIZE - 1, ++}; ++#endif ++ ++static int bcm_iproc_i2c_cfg_speed(struct bcm_iproc_i2c_dev *iproc_i2c) ++{ ++ unsigned int bus_speed; ++ u32 val; ++ int ret = of_property_read_u32(iproc_i2c->device->of_node, ++ "clock-frequency", &bus_speed); ++ if (ret < 0) { ++ dev_info(iproc_i2c->device, ++ "unable to interpret clock-frequency DT property\n"); ++ bus_speed = 100000; ++ } ++ ++ if (bus_speed < 100000) { ++ dev_err(iproc_i2c->device, "%d Hz bus speed not supported\n", ++ bus_speed); ++ dev_err(iproc_i2c->device, ++ "valid speeds are 100khz and 400khz\n"); ++ return -EINVAL; ++ } else if (bus_speed < 400000) { ++ bus_speed = 100000; ++ } else { ++ bus_speed = 400000; ++ } ++ ++ iproc_i2c->bus_speed = bus_speed; ++ val = readl(iproc_i2c->base + TIM_CFG_OFFSET); ++ val &= ~(1 << TIM_CFG_MODE_400_SHIFT); ++ val |= (bus_speed == 400000) << TIM_CFG_MODE_400_SHIFT; ++ writel(val, iproc_i2c->base + TIM_CFG_OFFSET); ++ ++ dev_info(iproc_i2c->device, "bus set to %u Hz\n", bus_speed); ++ ++ return 0; ++} ++ ++static int bcm_iproc_i2c_init(struct bcm_iproc_i2c_dev *iproc_i2c) ++{ ++ u32 val; ++ ++ /* put controller in reset */ ++ val = readl(iproc_i2c->base + CFG_OFFSET); ++ val |= 1 << CFG_RESET_SHIFT; ++ val &= ~(1 << CFG_EN_SHIFT); ++ writel(val, iproc_i2c->base + CFG_OFFSET); ++ ++ /* wait 100 usec per spec */ ++ udelay(100); ++ ++ /* bring controller out of reset */ ++ val &= ~(1 << CFG_RESET_SHIFT); ++ writel(val, iproc_i2c->base + CFG_OFFSET); ++ ++ /* flush TX/RX FIFOs and set RX FIFO threshold to zero */ ++ val = (1 << M_FIFO_RX_FLUSH_SHIFT) | (1 << M_FIFO_TX_FLUSH_SHIFT); ++ writel(val, iproc_i2c->base + M_FIFO_CTRL_OFFSET); ++ ++ /* disable all interrupts */ ++ writel(0, iproc_i2c->base + IE_OFFSET); ++ ++ /* clear all pending interrupts */ ++ writel(0xffffffff, iproc_i2c->base + IS_OFFSET); ++ ++ return 0; ++} ++ ++static void bcm_iproc_i2c_enable_disable(struct bcm_iproc_i2c_dev *iproc_i2c, ++ bool enable) ++{ ++ u32 val; ++ ++ val = readl(iproc_i2c->base + CFG_OFFSET); ++ if (enable) ++ val |= BIT(CFG_EN_SHIFT); ++ else ++ val &= ~BIT(CFG_EN_SHIFT); ++ writel(val, iproc_i2c->base + CFG_OFFSET); ++} ++ ++static int bcm_iproc_i2c_probe(struct platform_device *pdev) ++{ ++ int irq, ret = 0; ++ struct bcm_iproc_i2c_dev *iproc_i2c; ++ struct i2c_adapter *adap; ++ struct resource *res; ++ ++ iproc_i2c = devm_kzalloc(&pdev->dev, sizeof(*iproc_i2c), ++ GFP_KERNEL); ++ if (!iproc_i2c) ++ return -ENOMEM; ++ ++ platform_set_drvdata(pdev, iproc_i2c); ++ iproc_i2c->device = &pdev->dev; ++ init_completion(&iproc_i2c->done); ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ iproc_i2c->base = devm_ioremap_resource(iproc_i2c->device, res); ++ if (IS_ERR(iproc_i2c->base)) ++ return PTR_ERR(iproc_i2c->base); ++ ++ ret = bcm_iproc_i2c_init(iproc_i2c); ++ if (ret) ++ return ret; ++ ++ ret = bcm_iproc_i2c_cfg_speed(iproc_i2c); ++ if (ret) ++ return ret; ++ ++ irq = platform_get_irq(pdev, 0); ++ if (irq <= 0) { ++ dev_err(iproc_i2c->device, "no irq resource\n"); ++ return irq; ++ } ++ iproc_i2c->irq = irq; ++ ++ ret = devm_request_irq(iproc_i2c->device, irq, bcm_iproc_i2c_isr, 0, ++ pdev->name, iproc_i2c); ++ if (ret < 0) { ++ dev_err(iproc_i2c->device, "unable to request irq %i\n", irq); ++ return ret; ++ } ++ ++ bcm_iproc_i2c_enable_disable(iproc_i2c, true); ++ ++ adap = &iproc_i2c->adapter; ++ i2c_set_adapdata(adap, iproc_i2c); ++ strlcpy(adap->name, "Broadcom iProc I2C adapter", sizeof(adap->name)); ++ adap->algo = &bcm_iproc_algo; ++#if !defined(CONFIG_ENABLE_WRITE_MSG_SPLIT) ++ adap->quirks = &bcm_iproc_i2c_quirks; ++#endif ++ adap->dev.parent = &pdev->dev; ++ adap->dev.of_node = pdev->dev.of_node; ++ ++ ret = i2c_add_adapter(adap); ++ if (ret) { ++ dev_err(iproc_i2c->device, "failed to add adapter\n"); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static int bcm_iproc_i2c_remove(struct platform_device *pdev) ++{ ++ struct bcm_iproc_i2c_dev *iproc_i2c = platform_get_drvdata(pdev); ++ ++ /* make sure there's no pending interrupt when we remove the adapter */ ++ writel(0, iproc_i2c->base + IE_OFFSET); ++ readl(iproc_i2c->base + IE_OFFSET); ++ synchronize_irq(iproc_i2c->irq); ++ ++ i2c_del_adapter(&iproc_i2c->adapter); ++ bcm_iproc_i2c_enable_disable(iproc_i2c, false); ++ ++ return 0; ++} ++ ++#ifdef CONFIG_PM_SLEEP ++ ++static int bcm_iproc_i2c_suspend(struct device *dev) ++{ ++ struct platform_device *pdev = to_platform_device(dev); ++ struct bcm_iproc_i2c_dev *iproc_i2c = platform_get_drvdata(pdev); ++ ++ /* make sure there's no pending interrupt when we go into suspend */ ++ writel(0, iproc_i2c->base + IE_OFFSET); ++ readl(iproc_i2c->base + IE_OFFSET); ++ synchronize_irq(iproc_i2c->irq); ++ ++ /* now disable the controller */ ++ bcm_iproc_i2c_enable_disable(iproc_i2c, false); ++ ++ return 0; ++} ++ ++static int bcm_iproc_i2c_resume(struct device *dev) ++{ ++ struct platform_device *pdev = to_platform_device(dev); ++ struct bcm_iproc_i2c_dev *iproc_i2c = platform_get_drvdata(pdev); ++ int ret; ++ u32 val; ++ ++ /* ++ * Power domain could have been shut off completely in system deep ++ * sleep, so re-initialize the block here ++ */ ++ ret = bcm_iproc_i2c_init(iproc_i2c); ++ if (ret) ++ return ret; ++ ++ /* configure to the desired bus speed */ ++ val = readl(iproc_i2c->base + TIM_CFG_OFFSET); ++ val &= ~(1 << TIM_CFG_MODE_400_SHIFT); ++ val |= (iproc_i2c->bus_speed == 400000) << TIM_CFG_MODE_400_SHIFT; ++ writel(val, iproc_i2c->base + TIM_CFG_OFFSET); ++ ++ bcm_iproc_i2c_enable_disable(iproc_i2c, true); ++ ++ return 0; ++} ++ ++static const struct dev_pm_ops bcm_iproc_i2c_pm_ops = { ++ .suspend_late = &bcm_iproc_i2c_suspend, ++ .resume_early = &bcm_iproc_i2c_resume ++}; ++ ++#define BCM_IPROC_I2C_PM_OPS (&bcm_iproc_i2c_pm_ops) ++#else ++#define BCM_IPROC_I2C_PM_OPS NULL ++#endif /* CONFIG_PM_SLEEP */ ++ ++static const struct of_device_id bcm_iproc_i2c_of_match[] = { ++ { .compatible = "brcm,iproc-i2c" }, ++ { /* sentinel */ } ++}; ++MODULE_DEVICE_TABLE(of, bcm_iproc_i2c_of_match); ++ ++static struct platform_driver bcm_iproc_i2c_driver = { ++ .driver = { ++ .name = "bcm-iproc-i2c", ++ .of_match_table = bcm_iproc_i2c_of_match, ++ .pm = BCM_IPROC_I2C_PM_OPS, ++ }, ++ .probe = bcm_iproc_i2c_probe, ++ .remove = bcm_iproc_i2c_remove, ++}; ++module_platform_driver(bcm_iproc_i2c_driver); ++ ++MODULE_AUTHOR("Ray Jui "); ++MODULE_DESCRIPTION("Broadcom iProc I2C Driver"); ++MODULE_LICENSE("GPL v2"); +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/i2c/busses/iproc_smbus.h b/drivers/i2c/busses/iproc_smbus.h +--- a/drivers/i2c/busses/iproc_smbus.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/i2c/busses/iproc_smbus.h 2017-11-09 17:53:33.705215000 +0800 +@@ -0,0 +1,189 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ */ ++ ++#ifndef __IPROC_SMBUS_H__ ++#define __IPROC_SMBUS_H__ ++ ++#define IPROC_I2C_INVALID_ADDR 0xFF ++ ++#define MAX_PROC_BUF_SIZE 256 ++#define MAX_PROC_NAME_SIZE 15 ++#define PROC_GLOBAL_PARENT_DIR "iproc-i2c" ++#define PROC_ENTRY_DEBUG "iproc-i2c-dbg" ++ ++#define IPROC_SMB_MAX_RETRIES 35 ++ ++#define GETREGFLDVAL(regval, mask, startbit) (((regval) & (mask)) >> (startbit)) ++ ++#define SETREGFLDVAL(regval, fldval, mask, startbit) regval = \ ++ (regval & ~(mask)) | \ ++ ((fldval) << (startbit)) ++ ++/* Enum to specify clock speed. The user will provide it during initialization. ++ * If needed, it can be changed dynamically ++ */ ++typedef enum iproc_smb_clk_freq { ++ I2C_SPEED_100KHz = 0, ++ I2C_SPEED_400KHz = 1, ++ I2C_SPEED_INVALID = 255 ++} smb_clk_freq_t; ++ ++/* This enum will be used to notify the user of status of a data transfer ++ * request ++ */ ++typedef enum iproc_smb_error_code { ++ I2C_NO_ERR = 0, ++ I2C_TIMEOUT_ERR = 1, ++ I2C_INVALID_PARAM_ERR = 2, /* Invalid parameter(s) passed to the driver */ ++ I2C_OPER_IN_PROGRESS = 3, /* The driver API was called before the present ++ transfer was completed */ ++ I2C_OPER_ABORT_ERR = 4, /* Transfer aborted unexpectedly, for example a NACK ++ received, before last byte was read/written */ ++ I2C_FUNC_NOT_SUPPORTED = 5, /* Feature or function not supported ++ (e.g., 10-bit addresses, or clock speeds ++ other than 100KHz, 400KHz) */ ++} iproc_smb_error_code_t; ++ ++/* Counters will be used mainly for testing and debugging */ ++struct iproc_smb_counters { ++ unsigned int num_read_requests; ++ unsigned int num_write_requests; ++ unsigned int num_read_errors; ++ unsigned int num_write_errors; ++ unsigned int mstr_rx_evt_cnt; /* ISR counter to check recv event */ ++ unsigned int mstr_start_busy_cnt; /* ISR counter to checking xact sts */ ++ unsigned int mstr_rx_fifo_full_cnt; /* ISR counter to detect rx fifo full */ ++ unsigned int last_int_sts; /* last value of intr status reg */ ++}; ++ ++ ++/* This enum may be used in a call back function to provide the user of the ++ * type of request sent by the user. It can also be used for testing and ++ * debugging purposes ++ */ ++typedef enum iproc_smb_message_type { ++ I2C_DISABLE_MSG = 0, /* To be used after hardware initialization. ++ Driver will _not_ respond to API calls */ ++ I2C_ENABLE_MSG = 1, /* Used after hardware initialization, if required. ++ Driver will start responding to API calls. ++ Will not (re-)program the hardware. */ ++ I2C_READ_MSG = 2, /* I2C read request from application */ ++ I2C_WRITE_MSG = 3 /* I2C write request from application */ ++} iproc_smb_message_type_t; ++ ++/* For debugging purposes, we will store the information about the last ++ * (latest) transfer request from the client application ++ */ ++struct iproc_smb_dbg_trans_info ++{ ++ iproc_smb_message_type_t i2c_last_mesg_type; ++ unsigned int i2c_last_dev_addr; ++ unsigned int i2c_last_num_bytes_xfer_req; ++}; ++ ++struct procfs { ++ char name[MAX_PROC_NAME_SIZE]; ++ struct proc_dir_entry *parent; ++}; ++ ++/* This structure will be used internally by the driver to maintain its ++ * configuration information as well as information programmed in to the ++ * hardware ++ */ ++struct iproc_smb_drv_int_data { ++ struct device *dev; ++ struct iproc_smb_drv_int_data *next; ++ ++ int irq; ++ ++ unsigned int drv_state_init; /* 1 = Initialized, 0 = not initialized */ ++ ++ unsigned int drv_state_open; /* 1 = Accepting transaction requests, ++ 0 = Not accepting transaction requests */ ++ smb_clk_freq_t clk_speed; ++ ++ void __iomem *block_base_addr; /* iomapped virtual base address for ++ register access */ ++ ++ struct i2c_adapter adapter; ++ ++ unsigned int i2c_slave_addr; /* Up to four 7-bit SMB slave addresses can be ++ assigned, we will assume only one for now. ++ Valid only if SMBus will act as a slave ++ device */ ++ ++ struct semaphore xfer_lock; /* Lock for data transfer */ ++ ++ struct completion ses_done; /* To signal the command completion */ ++ ++ struct procfs proc; ++ ++ volatile int debug; ++ ++ unsigned int master_rx_fifo_thr; /* Master FIFO threshold. Interrupt will be ++ generated if the threshold is exceeded */ ++ ++ unsigned int slave_rx_fifo_thr; /* Slave FIFO threshold. Interrupt will be ++ generated if the threshold is exceeded */ ++ ++ unsigned int enable_evts; /* If true, enable interrupts. If false, ++ disable interrupts. Default is false */ ++ unsigned int evt_enable_bmap; /* Bit map of events enabled by the driver */ ++ ++ struct iproc_smb_counters smb_counters; /* Statistics maintained by driver. A caller ++ can request them through an API */ ++}; ++ ++/* Data to be supplied by the platform to initialise the IPROC SMBus (I2C). ++ * block ++ * init: Function called during driver initialization. Used by platform to ++ * configure GPIO functions and similar. ++ */ ++struct iproc_smb_platform_data { ++ int (*init)(struct iproc_smb_drv_int_data *iproc_i2c_info_ptr, int flags); ++ ++ unsigned int flags; ++}; ++ ++/* This structure will be used by the user during driver initialization to pass ++ * initial configuration information to the driver ++ */ ++struct iproc_smb_init_params { ++ unsigned int intr_mode; /* TRUE (1) for enabling interrupt mode, ++ FALSE (0) for polling mode */ ++ unsigned int clock_freq; /* 0=100KHz, 1=400KHz */ ++ void (*i2c_callback_func)(unsigned char *data); /* Application can ++ register a callback ++ function for driver to ++ notify the application ++ of any asynchronous ++ event(s), or exception. ++ Can be NULL */ ++}; ++ ++/* Structure used to pass information to read/write functions. */ ++struct iproc_xact_info { ++ bool cmd_valid; /* true if command field below is valid. Otherwise, false */ ++ unsigned short command; /* Passed by caller to send SMBus command code */ ++ unsigned char *data; /* actual data pased by the caller */ ++ unsigned int size; /* Size of data buffer passed */ ++ unsigned short flags; /* Sent by caller specifying PEC, 10-bit addresses */ ++ unsigned char smb_proto; /* SMBus protocol to use to perform transaction */ ++}; ++ ++#define XACT_TIMEOUT (msecs_to_jiffies(100)) /* Verify if 100 is OK */ ++ ++#endif /* __IPROC_SMBUS_H__ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/i2c/busses/iproc_smbus_defs.h b/drivers/i2c/busses/iproc_smbus_defs.h +--- a/drivers/i2c/busses/iproc_smbus_defs.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/i2c/busses/iproc_smbus_defs.h 2017-11-09 17:53:33.705243000 +0800 +@@ -0,0 +1,47 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ */ ++ ++#ifndef __IPROC_SMBUS_DEFS_H__ ++#define __IPROC_SMBUS_DEFS_H__ ++ ++/* Transaction error codes defined in Master command register (0x30) */ ++#define MSTR_STS_XACT_SUCCESS 0 ++#define MSTR_STS_LOST_ARB 1 ++#define MSTR_STS_NACK_FIRST_BYTE 2 ++#define MSTR_STS_NACK_NON_FIRST_BYTE 3 /* NACK on a byte other than ++ the first byte */ ++#define MSTR_STS_TTIMEOUT_EXCEEDED 4 ++#define MSTR_STS_TX_TLOW_MEXT_EXCEEDED 5 ++#define MSTR_STS_RX_TLOW_MEXT_EXCEEDED 6 ++ ++/* SMBUS protocol values defined in register 0x30 */ ++#define SMBUS_PROT_QUICK_CMD 0 ++#define SMBUS_PROT_SEND_BYTE 1 ++#define SMBUS_PROT_RECV_BYTE 2 ++#define SMBUS_PROT_WR_BYTE 3 ++#define SMBUS_PROT_RD_BYTE 4 ++#define SMBUS_PROT_WR_WORD 5 ++#define SMBUS_PROT_RD_WORD 6 ++#define SMBUS_PROT_BLK_WR 7 ++#define SMBUS_PROT_BLK_RD 8 ++#define SMBUS_PROT_PROC_CALL 9 ++#define SMBUS_PROT_BLK_WR_BLK_RD_PROC_CALL 10 ++ ++#define BUS_BUSY_COUNT 100000 /* Number can be changed later */ ++ ++#define DISABLE_INTR 0 ++#define ENABLE_INTR 1 ++#endif /* __IPROC_SMBUS_DEFS_H__ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/i2c/busses/iproc_smbus_regs.h b/drivers/i2c/busses/iproc_smbus_regs.h +--- a/drivers/i2c/busses/iproc_smbus_regs.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/i2c/busses/iproc_smbus_regs.h 2017-11-09 17:53:33.706235000 +0800 +@@ -0,0 +1,290 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ */ ++ ++#ifndef __IPROC_SMBUS_REGS_H__ ++#define __IPROC_SMBUS_REGS_H__ ++ ++/* --- */ ++#define CCB_SMB_CFG_REG 0x0 ++ ++#define CCB_SMB_CFG_RST_MASK 0x80000000 ++#define CCB_SMB_CFG_RST_SHIFT 31 ++ ++#define CCB_SMB_CFG_SMBEN_MASK 0x40000000 ++#define CCB_SMB_CFG_SMBEN_SHIFT 30 ++ ++#define CCB_SMB_CFG_BITBANGEN_MASK 0x20000000 ++#define CCB_SMB_CFG_BITBANGEN_SHIFT 29 ++ ++#define CCB_SMB_CFG_EN_NIC_SMBADDR0_MASK 0x10000000 ++#define CCB_SMB_CFG_EN_NIC_SMBADDR0_SHIFT 28 ++ ++#define CCB_SMB_CFG_PROMISCMODE_MASK 0x08000000 ++#define CCB_SMB_CFG_PROMISCMODE_SHIFT 27 ++ ++#define CCB_SMB_CFG_TSTMPCNTEN_MASK 0x04000000 ++#define CCB_SMB_CFG_TSTMPCNTEN_SHIFT 26 ++ ++#define CCB_SMB_CFG_MSTRRTRYCNT_MASK 0x000F0000 ++#define CCB_SMB_CFG_MSTRRTRYCNT_SHIFT 16 ++ ++ ++/* --- */ ++#define CCB_SMB_TIMGCFG_REG 0x4 ++ ++#define CCB_SMB_TIMGCFG_MODE400_MASK 0x80000000 ++#define CCB_SMB_TIMGCFG_MODE400_SHIFT 31 ++ ++#define CCB_SMB_TIMGCFG_RNDSLVSTR_MASK 0x7F000000 ++#define CCB_SMB_TIMGCFG_RNDSLVSTR_SHIFT 24 ++ ++#define CCB_SMB_TIMGCFG_PERSLVSTR_MASK 0x00FF0000 ++#define CCB_SMB_TIMGCFG_PERSLVSTR_SHIFT 16 ++ ++#define CCB_SMB_TIMGCFG_IDLTIME_MASK 0x0000FF00 ++#define CCB_SMB_TIMGCFG_IDLTIME_SHIFT 8 ++ ++/* --- */ ++#define CCB_SMB_ADDR_REG 0x8 ++ ++#define CCB_SMB_EN_NIC_SMBADDR3_MASK 0x80000000 ++#define CCB_SMB_EN_NIC_SMBADDR3_SHIFT 31 ++ ++#define CCB_SMB_NIC_SMBADDR3_MASK 0x7F000000 ++#define CCB_SMB_NIC_SMBADDR3_SHIFT 24 ++ ++#define CCB_SMB_EN_NIC_SMBADDR2_MASK 0x00800000 ++#define CCB_SMB_EN_NIC_SMBADDR2_SHIFT 23 ++ ++#define CCB_SMB_NIC_SMBADDR2_MASK 0x007F0000 ++#define CCB_SMB_NIC_SMBADDR2_SHIFT 16 ++ ++#define CCB_SMB_EN_NIC_SMBADDR1_MASK 0x00008000 ++#define CCB_SMB_EN_NIC_SMBADDR1_SHIFT 15 ++ ++#define CCB_SMB_NIC_SMBADDR1_MASK 0x00007F00 ++#define CCB_SMB_NIC_SMBADDR1_SHIFT 8 ++ ++#define CCB_SMB_EN_NIC_SMBADDR0_MASK 0x00000080 ++#define CCB_SMB_EN_NIC_SMBADDR0_SHIFT 7 ++ ++#define CCB_SMB_NIC_SMBADDR0_MASK 0x0000007F ++#define CCB_SMB_NIC_SMBADDR0_SHIFT 0 ++ ++/* --- */ ++#define CCB_SMB_MSTRFIFOCTL_REG 0xC ++ ++#define CCB_SMB_MSTRRXFIFOFLSH_MASK 0x80000000 ++#define CCB_SMB_MSTRRXFIFOFLSH_SHIFT 31 ++ ++#define CCB_SMB_MSTRTXFIFOFLSH_MASK 0x40000000 ++#define CCB_SMB_MSTRTXFIFOFLSH_SHIFT 30 ++ ++#define CCB_SMB_MSTRRXPKTCNT_MASK 0x007F0000 ++#define CCB_SMB_MSTRRXPKTCNT_SHIFT 16 ++ ++#define CCB_SMB_MSTRRXFIFOTHR_MASK 0x00003F00 ++#define CCB_SMB_MSTRRXFIFOTHR_SHIFT 8 ++ ++/* --- */ ++#define CCB_SMB_SLVFIFOCTL_REG 0x10 ++ ++#define CCB_SMB_SLVRXFIFOFLSH_MASK 0x80000000 ++#define CCB_SMB_SLVRXFIFOFLSH_SHIFT 31 ++ ++#define CCB_SMB_SLVTXFIFOFLSH_MASK 0x40000000 ++#define CCB_SMB_SLVTXFIFOFLSH_SHIFT 30 ++ ++#define CCB_SMB_SLVRXPKTCNT_MASK 0x007F0000 ++#define CCB_SMB_SLVRXPKTCNT_SHIFT 16 ++ ++#define CCB_SMB_SLVRXFIFOTHR_MASK 0x00003F00 ++#define CCB_SMB_SLVRXFIFOTHR_SHIFT 8 ++ ++/* --- */ ++#define CCB_SMB_BITBANGCTL_REG 0x14 ++ ++#define CCB_SMB_SMBCLKIN_MASK 0x80000000 ++#define CCB_SMB_SMBCLKIN_SHIFT 31 ++ ++#define CCB_SMB_SMBCLKOUTEN_MASK 0x40000000 ++#define CCB_SMB_SMBCLKOUTEN_SHIFT 30 ++ ++#define CCB_SMB_SMBDATAIN_MASK 0x20000000 ++#define CCB_SMB_SMBDATAIN_SHIFT 29 ++ ++#define CCB_SMB_SMBDATAOUTEN_MASK 0x10000000 ++#define CCB_SMB_SMBDATAOUTEN_SHIFT 28 ++ ++/* --- */ ++#define CCB_SMB_MSTRCMD_REG 0x30 ++ ++#define CCB_SMB_MSTRSTARTBUSYCMD_MASK 0x80000000 ++#define CCB_SMB_MSTRSTARTBUSYCMD_SHIFT 31 ++ ++#define CCB_SMB_MSTRABORT_MASK 0x40000000 ++#define CCB_SMB_MSTRABORT_SHIFT 30 ++ ++#define CCB_SMB_MSTRSTS_MASK 0x0E000000 ++#define CCB_SMB_MSTRSTS_SHIFT 25 ++ ++#define CCB_SMB_MSTRSMBUSPROTO_MASK 0x00001E00 ++#define CCB_SMB_MSTRSMBUSPROTO_SHIFT 9 ++ ++#define CCB_SMB_MSTRPEC_MASK 0x00000100 ++#define CCB_SMB_MSTRPEC_SHIFT 8 ++ ++#define CCB_SMB_MSTRRDBYTECNT_MASK 0x000000FF ++#define CCB_SMB_MSTRRDBYTECNT_SHIFT 0 ++ ++/* --- */ ++#define CCB_SMB_SLVCMD_REG 0x34 ++ ++#define CCB_SMB_SLVSTARTBUSYCMD_MASK 0x80000000 ++#define CCB_SMB_SLVSTARTBUSYCMD_SHIFT 31 ++ ++#define CCB_SMB_SLVABORT_MASK 0x40000000 ++#define CCB_SMB_SLVABORT_SHIFT 30 ++ ++#define CCB_SMB_SLVSTS_MASK 0x03800000 ++#define CCB_SMB_SLVSTS_SHIFT 23 ++ ++#define CCB_SMB_SLVPEC_MASK 0x00000100 ++#define CCB_SMB_SLVPEC_SHIFT 8 ++ ++ ++/* --- */ ++#define CCB_SMB_EVTEN_REG 0x38 ++ ++#define CCB_SMB_MSTRRXFIFOFULLEN_MASK 0x80000000 ++#define CCB_SMB_MSTRRXFIFOFULLEN_SHIFT 31 ++ ++#define CCB_SMB_MSTRRXFIFOTHRHITEN_MASK 0x40000000 ++#define CCB_SMB_MSTRRXFIFOTHRHITEN_SHIFT 30 ++ ++#define CCB_SMB_MSTRRXEVTEN_MASK 0x20000000 ++#define CCB_SMB_MSTRRXEVTEN_SHIFT 29 ++ ++#define CCB_SMB_MSTRSTARTBUSYEN_MASK 0x10000000 ++#define CCB_SMB_MSTRSTARTBUSYEN_SHIFT 28 ++ ++#define CCB_SMB_MSTRTXUNDEN_MASK 0x08000000 ++#define CCB_SMB_MSTRTXUNDEN_SHIFT 27 ++ ++ ++#define CCB_SMB_SLVRXFIFOFULLEN_MASK 0x04000000 ++#define CCB_SMB_SLVRXFIFOFULLEN_SHIFT 26 ++ ++#define CCB_SMB_SLVRXFIFOTHRHITEN_MASK 0x02000000 ++#define CCB_SMB_SLVRXFIFOTHRHITEN_SHIFT 25 ++ ++#define CCB_SMB_SLVRXEVTEN_MASK 0x01000000 ++#define CCB_SMB_SLVRXEVTEN_SHIFT 24 ++ ++#define CCB_SMB_SLVSTARTBUSYEN_MASK 0x00800000 ++#define CCB_SMB_SLVSTARTBUSYEN_SHIFT 23 ++ ++#define CCB_SMB_SLVTXUNDEN_MASK 0x00400000 ++#define CCB_SMB_SLVTXUNDEN_SHIFT 22 ++ ++#define CCB_SMB_SLVRDEVTEN_MASK 0x00200000 ++#define CCB_SMB_SLVRDEVTEN_SHIFT 21 ++ ++ ++/* --- */ ++#define CCB_SMB_EVTSTS_REG 0x3C ++ ++#define CCB_SMB_MSTRRXFIFOFULLSTS_MASK 0x80000000 ++#define CCB_SMB_MSTRRXFIFOFULLSTS_SHIFT 31 ++ ++#define CCB_SMB_MSTRRXFIFOTHRHITSTS_MASK 0x40000000 ++#define CCB_SMB_MSTRRXFIFOTHRHITSTS_SHIFT 30 ++ ++#define CCB_SMB_MSTRRXEVTSTS_MASK 0x20000000 ++#define CCB_SMB_MSTRRXEVTSTS_SHIFT 29 ++ ++#define CCB_SMB_MSTRSTARTBUSYSTS_MASK 0x10000000 ++#define CCB_SMB_MSTRSTARTBUSYSTS_SHIFT 28 ++ ++#define CCB_SMB_MSTRTXUNDSTS_MASK 0x08000000 ++#define CCB_SMB_MSTRTXUNDSTS_SHIFT 27 ++ ++ ++#define CCB_SMB_SLVRXFIFOFULLSTS_MASK 0x04000000 ++#define CCB_SMB_SLVRXFIFOFULLSTS_SHIFT 26 ++ ++#define CCB_SMB_SLVRXFIFOTHRHITSTS_MASK 0x02000000 ++#define CCB_SMB_SLVRXFIFOTHRHITSTS_SHIFT 25 ++ ++#define CCB_SMB_SLVRXEVTSTS_MASK 0x01000000 ++#define CCB_SMB_SLVRXEVTSTS_SHIFT 24 ++ ++#define CCB_SMB_SLVSTARTBUSYSTS_MASK 0x00800000 ++#define CCB_SMB_SLVSTARTBUSYSTS_SHIFT 23 ++ ++#define CCB_SMB_SLVTXUNDSTS_MASK 0x00400000 ++#define CCB_SMB_SLVTXUNDSTS_SHIFT 22 ++ ++#define CCB_SMB_SLVRDEVTSTS_MASK 0x00200000 ++#define CCB_SMB_SLVRDEVTSTS_SHIFT 21 ++ ++ ++/* --- */ ++#define CCB_SMB_MSTRDATAWR_REG 0x40 ++ ++#define CCB_SMB_MSTRWRSTS_MASK 0x80000000 ++#define CCB_SMB_MSTRWRSTS_SHIFT 31 ++ ++#define CCB_SMB_MSTRWRDATA_MASK 0x000000FF ++#define CCB_SMB_MSTRWRDATA_SHIFT 0 ++ ++ ++/* --- */ ++#define CCB_SMB_MSTRDATARD_REG 0x44 ++ ++#define CCB_SMB_MSTRRDSTS_MASK 0xC0000000 ++#define CCB_SMB_MSTRRDSTS_SHIFT 30 ++ ++#define CCB_SMB_MSTRRDPECERR_MASK 0x20000000 ++#define CCB_SMB_MSTRRDPECERR_SHIFT 29 ++ ++#define CCB_SMB_MSTRRDDATA_MASK 0x000000FF ++#define CCB_SMB_MSTRRDDATA_SHIFT 0 ++ ++ ++/* --- */ ++#define CCB_SMB_SLVDATAWR_REG 0x48 ++ ++#define CCB_SMB_SLVWRSTS_MASK 0x80000000 ++#define CCB_SMB_SLVWRSTS_SHIFT 31 ++ ++#define CCB_SMB_SLVWRDATA_MASK 0x000000FF ++#define CCB_SMB_SLVWRDATA_SHIFT 0 ++ ++ ++/* --- */ ++#define CCB_SMB_SLVDATARD_REG 0x4C ++ ++#define CCB_SMB_SLVRDSTS_MASK 0xC0000000 ++#define CCB_SMB_SLVRDSTS_SHIFT 30 ++ ++#define CCB_SMB_SLVRDERRSTS_MASK 0x30000000 ++#define CCB_SMB_SLVRDERRSTS_SHIFT 28 ++ ++#define CCB_SMB_SLVRDDATA_MASK 0x000000FF ++#define CCB_SMB_SLVRDDATA_SHIFT 0 ++ ++#endif /* __IPROC_SMBUS_REGS_H__ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/i2c/busses/xgs_iproc_smbus.c b/drivers/i2c/busses/xgs_iproc_smbus.c +--- a/drivers/i2c/busses/xgs_iproc_smbus.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/i2c/busses/xgs_iproc_smbus.c 2017-11-09 17:53:33.709223000 +0800 +@@ -0,0 +1,2014 @@ ++/* ++ * $Copyright Open Broadcom Corporation$ ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "iproc_smbus_regs.h" ++#include "iproc_smbus_defs.h" ++#include "iproc_smbus.h" ++ ++#ifdef CONFIG_OF ++#include ++#include ++#include ++#endif /* CONFIG_OF */ ++ ++#undef IPROC_SMB_DBG ++ ++/* Support I2C devices without length field xfer*/ ++//#define SMB_BLOCK_XFER_VARIANT ++ ++#define SMB_MAX_DATA_SIZE 32 ++#define SMB_BLK_XFER_TEST ++ ++#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 37) ++#define init_MUTEX(x) sema_init(x,1) ++#endif ++ ++static struct proc_dir_entry *gProcParent=NULL; ++//static int use_svk_version; ++#undef CONFIG_USE_SVK_VERSION ++ ++static int smb_in_intr; ++ ++static struct iproc_smb_drv_int_data *iproc_smbus_list = NULL; ++static int iproc_smbus_block_init(struct iproc_smb_drv_int_data *dev); ++ ++/* Function to read a value from specified register. */ ++static unsigned int iproc_smb_reg_read(unsigned long reg_addr) ++{ ++ unsigned int val; ++ ++ val = ioread32((void *)reg_addr); ++ ++#ifdef IPROC_SMB_DBG ++ if (!smb_in_intr) { ++ printk(KERN_DEBUG "\nRd: addr:0x%08X, val:0x%08X", (unsigned int)reg_addr, val); ++ } ++#endif ++ ++ return(val); ++} ++ ++/* Function to write a value ('val') in to a specified register. */ ++static int iproc_smb_reg_write(unsigned long reg_addr, unsigned int val) ++{ ++ iowrite32(val, (void *)reg_addr); ++ ++#ifdef IPROC_SMB_DBG ++ if (!smb_in_intr) { ++ printk(KERN_DEBUG "\nWr: addr:0x%08X, val:0x%08X", (unsigned int)reg_addr, val); ++ } ++#endif ++ ++ return (0); ++} ++ ++#ifdef IPROC_SMB_DBG ++static int iproc_dump_smb_regs(struct iproc_smb_drv_int_data *dev) ++{ ++ unsigned int regval; ++ unsigned long base_addr = (unsigned long)dev->block_base_addr; ++ ++ printk(KERN_DEBUG "\n----------------------------------------------"); ++ ++ printk(KERN_DEBUG "\nBase addr=0x%08X", (unsigned int)base_addr); ++ ++ printk(KERN_DEBUG "%s: Dumping SMBus registers... ", __func__); ++ ++ regval = iproc_smb_reg_read(base_addr + CCB_SMB_CFG_REG); ++ printk(KERN_DEBUG "\nCCB_SMB_CFG_REG=0x%08X", regval); ++ ++ regval = iproc_smb_reg_read(base_addr + CCB_SMB_TIMGCFG_REG); ++ printk(KERN_DEBUG "\nCCB_SMB_TIMGCFG_REG=0x%08X", regval); ++ ++ regval = iproc_smb_reg_read(base_addr + CCB_SMB_ADDR_REG); ++ printk(KERN_DEBUG "\nCCB_SMB_ADDR_REG=0x%08X", regval); ++ ++ regval = iproc_smb_reg_read(base_addr + CCB_SMB_MSTRFIFOCTL_REG); ++ printk(KERN_DEBUG "\nCCB_SMB_MSTRFIFOCTL_REG=0x%08X", regval); ++ ++ regval = iproc_smb_reg_read(base_addr + CCB_SMB_SLVFIFOCTL_REG); ++ printk(KERN_DEBUG "\nCCB_SMB_SLVFIFOCTL_REG=0x%08X", regval); ++ ++ regval = iproc_smb_reg_read(base_addr + CCB_SMB_BITBANGCTL_REG); ++ printk(KERN_DEBUG "\nCCB_SMB_BITBANGCTL_REG=0x%08X", regval); ++ ++ regval = iproc_smb_reg_read(base_addr + CCB_SMB_MSTRCMD_REG); ++ printk(KERN_DEBUG "\nCCB_SMB_MSTRCMD_REG=0x%08X", regval); ++ ++ regval = iproc_smb_reg_read(base_addr + CCB_SMB_SLVCMD_REG); ++ printk(KERN_DEBUG "\nCCB_SMB_SLVCMD_REG=0x%08X", regval); ++ ++ regval = iproc_smb_reg_read(base_addr + CCB_SMB_EVTEN_REG); ++ printk(KERN_DEBUG "\nCCB_SMB_EVTEN_REG=0x%08X", regval); ++ ++ regval = iproc_smb_reg_read(base_addr + CCB_SMB_EVTSTS_REG); ++ printk(KERN_DEBUG "\nCCB_SMB_EVTSTS_REG=0x%08X", regval); ++ ++ regval = iproc_smb_reg_read(base_addr + CCB_SMB_MSTRDATAWR_REG); ++ printk(KERN_DEBUG "\nCCB_SMB_MSTRDATAWR_REG=0x%08X", regval); ++ ++ regval = iproc_smb_reg_read(base_addr + CCB_SMB_MSTRDATARD_REG); ++ printk(KERN_DEBUG "\nCCB_SMB_MSTRDATARD_REG=0x%08X", regval); ++ ++ regval = iproc_smb_reg_read(base_addr + CCB_SMB_SLVDATAWR_REG); ++ printk(KERN_DEBUG "\nCCB_SMB_SLVDATAWR_REG=0x%08X", regval); ++ ++ regval = iproc_smb_reg_read(base_addr + CCB_SMB_SLVDATARD_REG); ++ printk(KERN_DEBUG "\nCCB_SMB_SLVDATARD_REG=0x%08X", regval); ++ ++ printk(KERN_DEBUG "\n----------------------------------------------\n\n"); ++ ++ return(0); ++} ++#endif /* IPROC_SMB_DBG */ ++ ++static irqreturn_t iproc_smb_isr(int irq, void*devid) ++{ ++ struct iproc_smb_drv_int_data *dev = ++ (struct iproc_smb_drv_int_data *)devid; ++ unsigned int intsts; ++ unsigned int regval; ++ ++ ++ smb_in_intr = 1; ++ ++ intsts = iproc_smb_reg_read((unsigned long)dev->block_base_addr + ++ CCB_SMB_EVTSTS_REG); ++ ++ dev->smb_counters.last_int_sts = intsts; ++ ++ if (!intsts) { ++ ++ /* Likely received a spurious interrupt */ ++ ++ return IRQ_NONE; ++ ++ } ++ ++ /* Clear interrupts */ ++ iproc_smb_reg_write((unsigned long)dev->block_base_addr + ++ CCB_SMB_EVTSTS_REG, intsts); ++ ++ /* Master read or write complete */ ++ if ((intsts & CCB_SMB_MSTRSTARTBUSYEN_MASK) || ++ (intsts & CCB_SMB_MSTRRXEVTSTS_MASK)) { ++ ++ if (intsts & CCB_SMB_MSTRSTARTBUSYEN_MASK) { ++ ++ dev->smb_counters.mstr_start_busy_cnt++; ++ ++ } ++ ++ if (intsts & CCB_SMB_MSTRRXEVTSTS_MASK) { ++ ++ dev->smb_counters.mstr_rx_evt_cnt++; ++ ++ } ++ ++ /* In case of a receive transaction, data will be copied in the recv ++ * function ++ */ ++ complete(&dev->ses_done); ++ ++ } ++ ++ /* If RX FIFO was full we can either read and then flush the FIFO. Or, only ++ * flush the FIFO (since the client process did not read the data on time), ++ * and then the client process can restart the transaction ++ * For now, we will flush the later action. ++ */ ++ if (intsts & CCB_SMB_MSTRRXFIFOFULLSTS_MASK) { ++ ++ dev->smb_counters.mstr_rx_fifo_full_cnt++; ++ ++ regval = iproc_smb_reg_read((unsigned long)dev->block_base_addr + ++ CCB_SMB_MSTRFIFOCTL_REG); ++ ++ regval |= CCB_SMB_MSTRRXFIFOFLSH_MASK; ++ ++ iproc_smb_reg_write((unsigned long)dev->block_base_addr + ++ CCB_SMB_MSTRFIFOCTL_REG, regval); ++ ++ complete(&dev->ses_done); ++ ++ } ++ ++ smb_in_intr = 0; ++ ++ return IRQ_HANDLED; ++} ++ ++/* ++ * Function to ensure that the previous transaction was completed before ++ * initiating a new transaction. It can also be used in polling mode to ++ * check status of completion of a command ++ */ ++static int iproc_smb_startbusy_wait(struct iproc_smb_drv_int_data *dev) ++{ ++ unsigned int regval; ++ ++ regval = iproc_smb_reg_read((unsigned long)dev->block_base_addr + ++ CCB_SMB_MSTRCMD_REG); ++ ++ /* Check if an operation is in progress. During probe it won't be. ++ * But when shutdown/remove was called we want to make sure that ++ * the transaction in progress completed ++ */ ++ if (regval & CCB_SMB_MSTRSTARTBUSYCMD_MASK) { ++ unsigned int i = 0; ++ ++ do { ++ ++ msleep(1); /* Wait for 1 msec */ ++ ++ i++; ++ ++ regval = iproc_smb_reg_read( ++ (unsigned long)dev->block_base_addr + CCB_SMB_MSTRCMD_REG); ++ ++ /* If start-busy bit cleared, exit the loop */ ++ } while ((regval & CCB_SMB_MSTRSTARTBUSYCMD_MASK) && ++ (i < IPROC_SMB_MAX_RETRIES)); ++ ++ if (i >= IPROC_SMB_MAX_RETRIES) { ++ printk(KERN_ERR "%s: %s START_BUSY bit didn't clear, exiting\n", ++ __func__, dev->adapter.name); ++ return -ETIMEDOUT; ++ ++ } ++ ++ } ++ ++ return 0; ++} ++ ++ ++static unsigned int smbus0_sdaRecoveryCnt = 0, smbus0_sdaFailedCnt = 0, smbus0_startBusyCnt = 0; ++static unsigned int smbus1_sdaRecoveryCnt = 0, smbus1_sdaFailedCnt = 0, smbus1_startBusyCnt = 0; ++ ++/* ++ * Function to recover SMB hangs caused stuck master START_BUSY. ++ * Returns 0 if recovery procedure executed successfully. ++ * Returns -1 if recovery failed. ++ */ ++static int iproc_smb_startbusy_recovery(struct iproc_smb_drv_int_data *dev) ++{ ++ int rc = -1; ++ unsigned int recoveryCnt; ++ ++ if (dev->adapter.nr == 0) { ++ recoveryCnt = ++smbus0_startBusyCnt; ++ } ++ else { ++ recoveryCnt = ++smbus1_startBusyCnt; ++ } ++ ++ printk(KERN_INFO "%s: %s START_BUSY recovery #%d \n", __func__, dev->adapter.name, recoveryCnt); ++ ++ /* reset the SMBus block, wait a minimum of 50 uSecs and then re-initialize */ ++ iproc_smb_reg_write((unsigned long)dev->block_base_addr + CCB_SMB_CFG_REG, CCB_SMB_CFG_RST_MASK); ++ udelay(60); ++ ++ if ( iproc_smbus_block_init(dev) == 0 ) { ++ rc = 0; ++ } ++ ++ return rc; ++} ++ ++ ++ ++/* ++ * Function to recover SMB hang caused by a slave device holding SDA low. ++ * Returns 0 if recovery procedure executed successfully. ++ * Returns -1 if recovery failed. ++ */ ++ ++static int iproc_smb_sda_low_recovery(struct iproc_smb_drv_int_data *dev) ++{ ++ unsigned int bbReg, cfgReg, cfgSave, recoveryCnt, failedCnt, i; ++ int rc = -1; ++ ++ ++ /* enable bit-bang */ ++ cfgSave = iproc_smb_reg_read((unsigned long)dev->block_base_addr + CCB_SMB_CFG_REG); ++ cfgReg = cfgSave; ++ cfgReg |= CCB_SMB_CFG_BITBANGEN_MASK; ++ iproc_smb_reg_write((unsigned long)dev->block_base_addr + CCB_SMB_CFG_REG, cfgReg); ++ udelay(50); ++ ++ /* start with clock and SDA set high */ ++ bbReg = iproc_smb_reg_read((unsigned long)dev->block_base_addr + CCB_SMB_BITBANGCTL_REG); ++ ++ bbReg |= (CCB_SMB_SMBCLKOUTEN_MASK | CCB_SMB_SMBDATAOUTEN_MASK); ++ iproc_smb_reg_write((unsigned long)dev->block_base_addr + CCB_SMB_BITBANGCTL_REG, bbReg); ++ udelay(5); /* should be sufficient for 100 KHz bus */ ++ ++ /* set up to toggle the clock line with SDA out held high for 9 cycles */ ++ for (i=0; i<18; i++) ++ { ++ /* toggle CLK out */ ++ if ( (bbReg & CCB_SMB_SMBCLKOUTEN_MASK) == 0 ) { ++ bbReg |= CCB_SMB_SMBCLKOUTEN_MASK; /* set clock high */ ++ } ++ else { ++ bbReg &= ~CCB_SMB_SMBCLKOUTEN_MASK; /* set clock low */ ++ } ++ ++ iproc_smb_reg_write((unsigned long)dev->block_base_addr + CCB_SMB_BITBANGCTL_REG, bbReg); ++ udelay(5); ++ } ++ ++ /* check bit 29 -- SMBDAT_IN and make sure SDA not being held low any more */ ++ for ( i=0; i<10; i++ ) ++ { ++ bbReg = iproc_smb_reg_read((unsigned long)dev->block_base_addr + CCB_SMB_BITBANGCTL_REG); ++ bbReg &= CCB_SMB_SMBDATAIN_MASK; ++ ++ if (bbReg) ++ break; ++ ++ udelay(1); ++ } ++ ++ if ( bbReg == 0 ) { ++ /* SDA is still low */ ++ if (dev->adapter.nr == 0) { ++ failedCnt = ++smbus0_sdaFailedCnt; ++ } ++ else { ++ failedCnt = ++smbus1_sdaFailedCnt; ++ } ++ printk(KERN_INFO "\n%s: %s SDA release #%d FAILED.\n", __func__, dev->adapter.name, failedCnt); ++ } ++ else { ++ if (dev->adapter.nr == 0) { ++ recoveryCnt = ++smbus0_sdaRecoveryCnt; ++ } ++ else { ++ recoveryCnt = ++smbus1_sdaRecoveryCnt; ++ } ++ ++ printk(KERN_INFO "%s: %s SDA release #%d SUCCESSFUL.\n", __func__, dev->adapter.name, recoveryCnt); ++ rc = 0; ++ } ++ ++ ++ /* manually issue a stop by transitioning SDA from low to high with clock held high */ ++ bbReg = iproc_smb_reg_read((unsigned long)dev->block_base_addr + CCB_SMB_BITBANGCTL_REG); ++ bbReg &= ~CCB_SMB_SMBCLKOUTEN_MASK; /* set clock low */ ++ iproc_smb_reg_write((unsigned long)dev->block_base_addr + CCB_SMB_BITBANGCTL_REG, bbReg); ++ udelay(2); ++ ++ bbReg &= ~CCB_SMB_SMBDATAOUTEN_MASK; /* drop SDA low */ ++ iproc_smb_reg_write((unsigned long)dev->block_base_addr + CCB_SMB_BITBANGCTL_REG, bbReg); ++ udelay(2); ++ ++ bbReg |= CCB_SMB_SMBCLKOUTEN_MASK; /* set clock high */ ++ iproc_smb_reg_write((unsigned long)dev->block_base_addr + CCB_SMB_BITBANGCTL_REG, bbReg); ++ udelay(5); ++ ++ bbReg |= CCB_SMB_SMBDATAOUTEN_MASK; /* pull SDA high */ ++ iproc_smb_reg_write((unsigned long)dev->block_base_addr + CCB_SMB_BITBANGCTL_REG, bbReg); ++ udelay(2); ++ ++ ++ /* disable bit-bang and then re-enable the SMB with the saved configuration */ ++ cfgReg = iproc_smb_reg_read((unsigned long)dev->block_base_addr + CCB_SMB_CFG_REG); ++ cfgReg &= ~CCB_SMB_CFG_BITBANGEN_MASK; ++ iproc_smb_reg_write((unsigned long)dev->block_base_addr + CCB_SMB_CFG_REG, cfgReg); ++ udelay(10); ++ ++ iproc_smb_reg_write((unsigned long)dev->block_base_addr + CCB_SMB_CFG_REG, cfgSave); ++ ++ return rc; ++} ++ ++ ++/* ++ * Function to recover SMB hang caused by a slave device hold SDA low. ++ * Returns 0 if recovery procedure executed successfully. ++ * Returns -1 if recovery failed. ++ */ ++static int iproc_smb_timeout_recovery(struct iproc_smb_drv_int_data *dev) ++{ ++ unsigned int bbReg, mCmdReg; ++ int rc = -1; ++ ++ /* read bit-bang control. If SDA low, attempt SDA release recovery */ ++ bbReg = iproc_smb_reg_read((unsigned long)dev->block_base_addr + CCB_SMB_BITBANGCTL_REG); ++ ++ if ( (bbReg & CCB_SMB_SMBDATAIN_MASK) == 0 ) { ++ if ( iproc_smb_sda_low_recovery( dev ) == 0 ) { ++ rc = 0; ++ } ++ } ++ ++ /* regardless of whether there was an SDA hang or not, see if START_BUSY stuck high */ ++ mCmdReg = iproc_smb_reg_read( (unsigned long)dev->block_base_addr + CCB_SMB_MSTRCMD_REG ); ++ if ( mCmdReg & CCB_SMB_MSTRSTARTBUSYCMD_MASK ) { ++ /* attempt to recover the bus */ ++ if (iproc_smb_startbusy_recovery(dev) == 0) { ++ rc = 0; ++ } ++ } ++ ++ return rc; ++ ++} ++ ++/* ++ * This function copies data to SMBus's Tx FIFO. Valid for write transactions ++ * only ++ * ++ * base_addr: Mapped address of this SMBus instance ++ * dev_addr: SMBus (I2C) device address. We are assuming 7-bit addresses ++ * initially ++ * info: Data to copy in to Tx FIFO. For read commands, the size should be ++ * set to zero by the caller ++ * ++ */ ++static void iproc_smb_write_trans_data(unsigned long base_addr, ++ unsigned short dev_addr, ++ struct iproc_xact_info *info) ++{ ++ unsigned int regval; ++ unsigned int i; ++ unsigned int num_data_bytes = 0; ++ ++#ifdef IPROC_SMB_DBG ++ printk(KERN_DEBUG "\n%s: dev_addr=0x%X, offset=%u, cmd_valid=%u, size=%u\n", __func__, dev_addr, info->command, info->cmd_valid, info->size); ++#endif /* IPROC_SMB_DBG */ ++ ++ /* Write SMBus device address first */ ++ /* Note, we are assuming 7-bit addresses for now. For 10-bit addresses, ++ * we may have one more write to send the upper 3 bits of 10-bit addr ++ */ ++ iproc_smb_reg_write(base_addr + CCB_SMB_MSTRDATAWR_REG, dev_addr); ++ ++ /* If the protocol needs command code, copy it */ ++ if (info->cmd_valid == true) { ++ iproc_smb_reg_write(base_addr + CCB_SMB_MSTRDATAWR_REG, info->command); ++ } ++ ++ /* Depending on the SMBus protocol, we need to write additional transaction ++ * data in to Tx FIFO. Refer to section 5.5 of SMBus spec for sequence for a ++ * transaction ++ */ ++ switch (info->smb_proto) { ++ ++ case SMBUS_PROT_RECV_BYTE: ++ /* No additional data to be written */ ++ num_data_bytes = 0; ++ break; ++ ++ case SMBUS_PROT_SEND_BYTE: ++ num_data_bytes = info->size; ++ break; ++ ++ case SMBUS_PROT_RD_BYTE: ++ case SMBUS_PROT_RD_WORD: ++ case SMBUS_PROT_BLK_RD: ++ /* Write slave address with R/W~ set (bit #0) */ ++ iproc_smb_reg_write(base_addr + CCB_SMB_MSTRDATAWR_REG, dev_addr | 0x1); ++ num_data_bytes = 0; ++ break; ++ ++ case SMBUS_PROT_WR_BYTE: ++ case SMBUS_PROT_WR_WORD: ++ /* No additional bytes to be written. Data portion is written in the ++ * 'for' loop below ++ */ ++ num_data_bytes = info->size; ++ ++ /* Note for hx4 eeprom (at24c64). the low addr bytes can be passed ++ * in to 1st byte of info->data ++ */ ++ break; ++ ++ case SMBUS_PROT_BLK_WR: ++ /* 3rd byte is byte count */ ++#ifndef SMB_BLOCK_XFER_VARIANT ++ iproc_smb_reg_write(base_addr + CCB_SMB_MSTRDATAWR_REG, info->size); ++#endif ++ num_data_bytes = info->size; ++ break; ++ ++ case SMBUS_PROT_BLK_WR_BLK_RD_PROC_CALL: ++ /* Write byte count */ ++ iproc_smb_reg_write(base_addr + CCB_SMB_MSTRDATAWR_REG, info->size); ++ num_data_bytes = info->size; ++ break; ++ ++ default: ++ break; ++ ++ } ++ ++ /* Copy actual data from caller, next. In general, for reads, no data is ++ * copied ++ */ ++ for (i = 0; num_data_bytes; --num_data_bytes, i++) { ++ ++ /* For the last byte, set MASTER_WR_STATUS bit. For block rd/wr process ++ * call, we need to program slave addr after copying data byte(s), so ++ * master status bit is set later, after the loop ++ */ ++ if ((num_data_bytes == 1) && ++ (info->smb_proto != SMBUS_PROT_BLK_WR_BLK_RD_PROC_CALL)) { ++ regval = info->data[i] | CCB_SMB_MSTRWRSTS_MASK; ++ } ++ else { ++ regval = info->data[i]; ++ } ++ ++ iproc_smb_reg_write(base_addr + CCB_SMB_MSTRDATAWR_REG, regval); ++ ++ } ++ ++ if (info->smb_proto == SMBUS_PROT_BLK_WR_BLK_RD_PROC_CALL) { ++ /* Write device address needed during repeat start condition */ ++ iproc_smb_reg_write(base_addr + CCB_SMB_MSTRDATAWR_REG, ++ CCB_SMB_MSTRWRSTS_MASK | dev_addr | 0x1); ++ } ++ ++ return; ++} ++ ++static int iproc_smb_data_send(struct i2c_adapter *adapter, ++ unsigned short addr, ++ struct iproc_xact_info *info) ++{ ++ int rc; ++ unsigned int regval; ++ struct iproc_smb_drv_int_data *dev = i2c_get_adapdata(adapter); ++ unsigned long time_left; ++ ++ ++ /* Make sure the previous transaction completed */ ++ rc = iproc_smb_startbusy_wait(dev); ++ ++ if (rc < 0) { ++ printk(KERN_ERR "%s: Send: %s bus is busy, attempt recovery \n", __func__, dev->adapter.name); ++ /* attempt to recover the bus */ ++ if (iproc_smb_startbusy_recovery(dev) != 0) { ++ return rc; ++ } ++ } ++ ++ if (dev->enable_evts == ENABLE_INTR) { ++ ++ /* Enable start_busy interrupt */ ++ regval = iproc_smb_reg_read((unsigned long)dev->block_base_addr + ++ CCB_SMB_EVTEN_REG); ++ ++ regval |= CCB_SMB_MSTRSTARTBUSYEN_MASK; ++ ++ iproc_smb_reg_write((unsigned long)dev->block_base_addr + ++ CCB_SMB_EVTEN_REG, regval); ++ ++ /* Mark as incomplete before sending the data */ ++ reinit_completion(&dev->ses_done); ++ ++ } ++ ++ /* Write transaction bytes to Tx FIFO */ ++ iproc_smb_write_trans_data((unsigned long)dev->block_base_addr, addr, info); ++ ++ /* Program master command register (0x30) with protocol type and set ++ * start_busy_command bit to initiate the write transaction ++ */ ++ regval = (info->smb_proto << CCB_SMB_MSTRSMBUSPROTO_SHIFT) | ++ CCB_SMB_MSTRSTARTBUSYCMD_MASK; ++ ++ iproc_smb_reg_write((unsigned long)dev->block_base_addr + CCB_SMB_MSTRCMD_REG, regval); ++ if (dev->enable_evts == ENABLE_INTR) { ++ /* ++ * Block waiting for the transaction to finish. When it's finished, ++ * we'll be signaled by an interrupt ++ */ ++ time_left = wait_for_completion_timeout(&dev->ses_done, XACT_TIMEOUT); ++ /* Disable start_busy interrupt */ ++ regval = iproc_smb_reg_read((unsigned long)dev->block_base_addr + CCB_SMB_EVTEN_REG); ++ regval &= ~CCB_SMB_MSTRSTARTBUSYEN_MASK; ++ iproc_smb_reg_write((unsigned long)dev->block_base_addr + CCB_SMB_EVTEN_REG, regval); ++ ++ if (time_left == 0) { ++ printk (KERN_INFO "%s: Send: %s timeout accessing device x%02x\n", ++ __func__, dev->adapter.name, addr); ++ ++ /* attempt to recover the bus */ ++ rc = iproc_smb_timeout_recovery(dev); ++ if ( rc != 0 ) { ++ return -ETIMEDOUT; ++ } ++ else { ++ return -ECOMM; ++ } ++ } ++ } ++ ++ regval = iproc_smb_reg_read((unsigned long)dev->block_base_addr + CCB_SMB_MSTRCMD_REG); ++ ++ /* If start_busy bit cleared, check if there are any errors */ ++ if (!(regval & CCB_SMB_MSTRSTARTBUSYCMD_MASK)) { ++ /* start_busy bit cleared, check master_status field now */ ++ regval &= CCB_SMB_MSTRSTS_MASK; ++ regval >>= CCB_SMB_MSTRSTS_SHIFT; ++ ++ if (regval != MSTR_STS_XACT_SUCCESS) { ++ /* We can flush Tx FIFO here */ ++ printk(KERN_ERR "\n\n%s:Send: %s Error in transaction %d to device x%02x, exiting\n", ++ __func__, dev->adapter.name, regval, addr); ++ ++ return -EREMOTEIO; ++ } ++ } ++ ++ return(0); ++} ++ ++static int iproc_smb_data_recv(struct i2c_adapter *adapter, ++ unsigned short addr, ++ struct iproc_xact_info *info, ++ unsigned int *num_bytes_read) ++{ ++ int rc; ++ unsigned int regval; ++ struct iproc_smb_drv_int_data *dev = i2c_get_adapdata(adapter); ++ unsigned long time_left; ++ ++ /* Make sure the previous transaction completed */ ++ rc = iproc_smb_startbusy_wait(dev); ++ ++ if (rc < 0) { ++ printk(KERN_ERR "%s: Receive: %s bus is busy, attempt recovery \n", __func__, dev->adapter.name); ++ /* attempt to recover the bus */ ++ if (iproc_smb_startbusy_recovery(dev) != 0) { ++ return rc; ++ } ++ } ++ ++ if (dev->enable_evts == ENABLE_INTR) { ++ /* Enable start_busy interrupt */ ++ regval = iproc_smb_reg_read((unsigned long)dev->block_base_addr + CCB_SMB_EVTEN_REG); ++ ++ /* Set Rx_event_en bit for notification of reception event */ ++ regval |= (CCB_SMB_MSTRSTARTBUSYEN_MASK); ++ ++ iproc_smb_reg_write((unsigned long)dev->block_base_addr + CCB_SMB_EVTEN_REG, regval); ++ ++ /* Mark as incomplete before sending the data */ ++ reinit_completion(&dev->ses_done); ++ } ++ ++ /* Program all transaction bytes into master Tx FIFO */ ++ iproc_smb_write_trans_data((unsigned long)dev->block_base_addr, addr, info); ++ ++ /* Program master command register (0x30) with protocol type and set ++ * start_busy_command bit to initiate the write transaction ++ */ ++ regval = (info->smb_proto << CCB_SMB_MSTRSMBUSPROTO_SHIFT) | ++ CCB_SMB_MSTRSTARTBUSYCMD_MASK | info->size; ++ ++ iproc_smb_reg_write((unsigned long)dev->block_base_addr + ++ CCB_SMB_MSTRCMD_REG, regval); ++ ++ if (dev->enable_evts == ENABLE_INTR) { ++ /* ++ * Block waiting for the transaction to finish. When it's finished, ++ * we'll be signaled by an interrupt ++ */ ++ time_left = wait_for_completion_timeout(&dev->ses_done, XACT_TIMEOUT); ++ ++ /* Disable start_busy and rx_event interrupts. Above call has handled ++ * the interrupt ++ */ ++ regval = iproc_smb_reg_read((unsigned long)dev->block_base_addr + CCB_SMB_EVTEN_REG); ++ regval &= ~(CCB_SMB_MSTRSTARTBUSYEN_MASK); ++ iproc_smb_reg_write((unsigned long)dev->block_base_addr + CCB_SMB_EVTEN_REG, regval); ++ ++ if (time_left == 0) { ++ printk (KERN_ERR "\n%s: Receive: %s timeout accessing device 0x%02x\n", ++ __func__, dev->adapter.name, addr); ++ /* attempt to recover the bus */ ++ rc = iproc_smb_timeout_recovery(dev); ++ if ( rc != 0 ) { ++ return -ETIMEDOUT; ++ } ++ else { ++ return -ECOMM; ++ } ++ } ++ } ++ ++ regval = iproc_smb_reg_read((unsigned long)dev->block_base_addr + CCB_SMB_MSTRCMD_REG); ++ ++ /* If start_busy bit cleared, check if there are any errors */ ++ if (!(regval & CCB_SMB_MSTRSTARTBUSYCMD_MASK)) { ++ /* start_busy bit cleared, check master_status field now */ ++ regval &= CCB_SMB_MSTRSTS_MASK; ++ regval >>= CCB_SMB_MSTRSTS_SHIFT; ++ ++ if (regval != MSTR_STS_XACT_SUCCESS) { ++ /* We can flush Tx FIFO here */ ++ printk(KERN_INFO "\n%s: %s Error in transaction %d to device x%02x, exiting\n", ++ __func__, dev->adapter.name, regval, addr); ++ return -EREMOTEIO; ++ } ++ } ++ ++ /* In the isr we will read the received byte, and also deal with ++ * rx fifo full event. The above check is for timeout error. If needed ++ * we may move it to rx isr ++ */ ++ ++ /* For block read, protocol (hw) returns byte count, as the first byte */ ++ if ((info->smb_proto == SMBUS_PROT_BLK_RD) || (info->smb_proto == SMBUS_PROT_BLK_WR_BLK_RD_PROC_CALL)) { ++ int i, adj; ++#ifndef SMB_BLOCK_XFER_VARIANT ++ /* Read received byte(s) */ ++ regval = iproc_smb_reg_read((unsigned long)dev->block_base_addr + CCB_SMB_MSTRDATARD_REG); ++ *num_bytes_read = regval & CCB_SMB_MSTRRDDATA_MASK; ++#else ++ *num_bytes_read = info->size; ++#endif ++ adj = 0; ++ ++ /* Limit to reading a max of 32 bytes only; just a safeguard. If ++ * # bytes read is a number > 32, check transaction set up, and contact ++ * hw engg. Assumption: PEC is disabled ++ */ ++ /* SMBUS spec ver. 3 (2015) extends max block transfer byte count from 32 to 256 */ ++ /* Use SMB_MAX_DATA_SIZE (according to HW FIFO) instead of I2C_SMBUS_BLOCK_MAX (defined in Linux)*/ ++ /* Current SMBUS HW FIFO length is 64B. For block write xfer, the first three FIFO entries are for slave adress, register ofset, and length count*/ ++ //for (i = 0; (i < *num_bytes_read) && (i < (I2C_SMBUS_BLOCK_MAX - adj)); i++) { ++ for (i = 0; (i < *num_bytes_read) && (i < (SMB_MAX_DATA_SIZE - adj)); i++) { ++ /* Read Rx FIFO for data bytes */ ++ regval = iproc_smb_reg_read((unsigned long)dev->block_base_addr + CCB_SMB_MSTRDATARD_REG); ++ info->data[i + adj] = regval & CCB_SMB_MSTRRDDATA_MASK; ++ } ++ /* To make sure that atmost 32 bytes are read */ ++ *num_bytes_read = i + adj; ++ } ++ else { ++ regval = iproc_smb_reg_read((unsigned long)dev->block_base_addr + CCB_SMB_MSTRDATARD_REG); ++ *info->data = regval & CCB_SMB_MSTRRDDATA_MASK; ++ *num_bytes_read = 1; ++ if (info->smb_proto == SMBUS_PROT_RD_WORD) { ++ /* Read Rx FIFO for data bytes */ ++ regval = iproc_smb_reg_read((unsigned long)dev->block_base_addr + CCB_SMB_MSTRDATARD_REG); ++ info->data[1] = regval & CCB_SMB_MSTRRDDATA_MASK; ++ *num_bytes_read = 2; ++ } ++ } ++ ++ return(0); ++} ++ ++static int iproc_smb_xfer(struct i2c_adapter *i2c_adap, u16 addr, ++ unsigned short flags, char read_write, ++ u8 command, int size, union i2c_smbus_data *data) ++{ ++ int rc = 0; ++ struct iproc_smb_drv_int_data *dev = i2c_get_adapdata(i2c_adap); ++ struct iproc_xact_info info; ++ unsigned int num_bytes_read = 0; ++ int smb_xfer_size; ++ ++#ifdef IPROC_SMB_DBG ++ printk(KERN_DEBUG "\n%s: dev=0x%08X\n", __func__, (unsigned int)dev); ++#endif ++ ++ down(&dev->xfer_lock); ++ ++ addr <<= 1; ++ ++ switch (size /* protocol */) { ++ ++ case I2C_SMBUS_BYTE: ++ info.cmd_valid = false; ++ info.command = command; /* not used */ ++ if (read_write == I2C_SMBUS_WRITE) { ++ info.data = &command; ++ } ++ else { ++ info.data = &data->byte; ++ } ++ info.size = 1; ++ info.flags = flags; ++ if (read_write == I2C_SMBUS_READ) { ++ addr |= 0x1; /* Read operation */ ++ info.smb_proto = SMBUS_PROT_RECV_BYTE; ++ info.data = &data->byte; ++ } ++ else { ++ info.smb_proto = SMBUS_PROT_SEND_BYTE; ++ } ++ break; ++ ++ case I2C_SMBUS_BYTE_DATA: ++ info.cmd_valid = true; ++ info.command = command; ++ info.data = &data->byte; ++ info.size = 1; ++ info.flags = flags; ++ ++ if (read_write == I2C_SMBUS_READ) { ++ info.smb_proto = SMBUS_PROT_RD_BYTE; ++ } ++ else { ++ info.smb_proto = SMBUS_PROT_WR_BYTE; ++ //info.smb_proto = SMBUS_PROT_WR_WORD; /* TEMP chg. remove later */ ++ } ++ break; ++ ++ case I2C_SMBUS_WORD_DATA: ++ info.cmd_valid = true; ++ info.command = command; ++ info.data = (unsigned char *)(&data->word); ++ info.size = 2; ++ info.flags = flags; ++ if (read_write == I2C_SMBUS_READ) { ++ info.smb_proto = SMBUS_PROT_RD_WORD; ++ /* Protocol(hw) returns data byte count as part of response, ++ for smbus compliant devices */ ++ // info.size = 0; ++ } ++ else { ++ info.smb_proto = SMBUS_PROT_WR_WORD; ++ info.size = 2; ++ } ++ break; ++ ++ case I2C_SMBUS_BLOCK_DATA: ++ case I2C_SMBUS_I2C_BLOCK_DATA: ++ info.cmd_valid = true; ++ info.command = command; ++ info.data = &data->block[1]; ++ info.flags = flags; ++ ++ if (read_write == I2C_SMBUS_READ) { ++ info.smb_proto = SMBUS_PROT_BLK_RD; ++ /* See desc for RD_BYTE_COUNT in reg 0x30 about 'block read'. ++ * If '0', protocol(hw) returns data byte count as part of ++ * response. ++ */ ++#ifdef SMB_BLOCK_XFER_VARIANT ++ info.size = data->block[0]; ++#else ++ info.size = 0; ++#endif ++ } ++ else { ++ info.smb_proto = SMBUS_PROT_BLK_WR; ++ info.size = data->block[0]; /* i2c-core passes the length in this field */ ++ } ++ ++ break; ++ ++ case I2C_SMBUS_BLOCK_PROC_CALL: ++ info.cmd_valid = true; ++ info.command = command; ++ info.data = &data->block[1]; ++ info.flags = flags; ++ info.size = data->block[0]; ++ info.smb_proto = SMBUS_PROT_BLK_WR_BLK_RD_PROC_CALL; ++ break; ++ ++ default: ++ printk(KERN_ERR "%s: Unsupported transaction %d\n", __func__, size); ++ up(&dev->xfer_lock); ++ return -EINVAL; ++ ++ } ++ ++ /* Handle of large packet by spliting into SMB_MAX_DATA_SIZE packet */ ++ smb_xfer_size = (int)info.size; ++ if ((info.smb_proto == SMBUS_PROT_BLK_RD) || (info.smb_proto == SMBUS_PROT_BLK_WR_BLK_RD_PROC_CALL)) ++ data->block[0] = 0; ++ while ( smb_xfer_size ) { ++ if (info.size >= SMB_MAX_DATA_SIZE) ++ info.size = SMB_MAX_DATA_SIZE; ++ ++ if (read_write == I2C_SMBUS_READ) { ++ /* Refer to i2c_smbus_read_byte for params passed. */ ++ rc = iproc_smb_data_recv(i2c_adap, addr, &info, &num_bytes_read); ++ /* if failed due to bus hang, but recovered, retry once */ ++ if (rc == -ECOMM) { ++ rc = iproc_smb_data_recv(i2c_adap, addr, &info, &num_bytes_read); ++ } ++ /* For block read call, we pass the actual amount of data sent by ++ * slave, as expected by std Linux API */ ++ if ((info.smb_proto == SMBUS_PROT_BLK_RD) || ++ (info.smb_proto == SMBUS_PROT_BLK_WR_BLK_RD_PROC_CALL)) { ++ if (rc == 0) { ++ data->block[0] += num_bytes_read; ++#ifdef IPROC_SMB_DBG ++ printk(KERN_DEBUG "%s: num bytes read=%u\n", ++ __func__, data->block[0]); ++#endif ++ } ++ } ++ } ++ else { ++ /* Refer to i2c_smbus_write_byte params passed. */ ++ rc = iproc_smb_data_send(i2c_adap, addr, &info); ++ /* if failed due to bus hang, but recovered, retry */ ++ if (rc == -ECOMM) { ++ rc = iproc_smb_data_send(i2c_adap, addr, &info); ++ } ++ } ++ ++ if (rc < 0) { ++ printk(KERN_INFO "%s %s: %s error accessing device 0x%X rc=%d", __func__, dev->adapter.name, ++ (read_write == I2C_SMBUS_READ) ? "Read" : "Write", addr, rc); ++ up(&dev->xfer_lock); ++ return -EREMOTEIO; ++ } ++ if (info.size == SMB_MAX_DATA_SIZE) { ++ smb_xfer_size -= SMB_MAX_DATA_SIZE; ++ info.size = smb_xfer_size; ++ info.data += SMB_MAX_DATA_SIZE; ++ info.command += SMB_MAX_DATA_SIZE; /* Adjust I2c device register offset. Not required if the access register addr pointing to FIFO */ ++ } ++ else ++ break; ++ } ++ msleep(1); ++ up(&dev->xfer_lock); ++ ++ return (rc); ++} ++ ++static int ++proc_debug_read(struct file *file, char __user *buffer, size_t count, loff_t *off) ++{ ++ unsigned int len = 0; ++ struct iproc_smb_drv_int_data *dev = (struct iproc_smb_drv_int_data *) PDE_DATA (file->f_inode); ++ ++ if (off > 0) ++ return 0; ++ ++ len += sprintf(buffer + len, "Debug print is %s\n", ++ dev->debug ? "enabled" : "disabled"); ++ ++ return len; ++} ++ ++/* Command interface for reading/writing to various I2C/SMBus devices */ ++#ifndef SMB_BLK_XFER_TEST ++static int ++proc_debug_write(struct file *file, const char __user *buffer, ++ unsigned long count, void *data) ++{ ++ struct iproc_smb_drv_int_data *dev = (struct iproc_smb_drv_int_data *)data; ++ int rc; ++ unsigned char kbuf[MAX_PROC_BUF_SIZE]; ++ union i2c_smbus_data i2cdata; ++ unsigned int val, i2cdev_addr, rd_wr_op; ++ int addr; ++ ++ if (count > MAX_PROC_BUF_SIZE) { ++ count = MAX_PROC_BUF_SIZE; ++ } ++ ++ rc = copy_from_user(kbuf, buffer, count); ++ if (rc) { ++ printk(KERN_ERR "%s: copy_from_user failed status=%d\n", __func__, rc); ++ return -EFAULT; ++ } ++ ++ rc = sscanf(kbuf, "%u %u %d %u", &rd_wr_op, &i2cdev_addr, &addr, &val); ++ if (rc != 4) { ++ printk(KERN_ERR "\necho args > %s", PROC_ENTRY_DEBUG); ++ printk(KERN_ERR "\nargs (all values should be in decimal)):"); ++ printk(KERN_ERR "\nrd_wr_op: 1 = read, 0 = write"); ++ printk(KERN_ERR "\ni2cdev_addr: I2C device address in decimal"); ++ printk(KERN_ERR "\noffset: offset of location within I2C device"); ++ printk(KERN_ERR "\naddr -1 if offset not applicable"); ++ printk(KERN_ERR "\nval: For write op: 8-bit value.\n" ++ " For read op: not used, may be 0\n\n"); ++ return count; ++ } ++ ++ printk("\nArg values :"); ++ printk("\nrd_wr_op = %u", rd_wr_op); ++ printk("\ni2cdev_addr = 0x%X", i2cdev_addr); ++ printk("\noffset = %d", addr); ++ printk("\nval = %u", val); ++ if (rd_wr_op > 1) { ++ printk(KERN_ERR "Error: Invalid rd_wr_op value %u\n", rd_wr_op); ++ return count; ++ } ++ ++ if (i2cdev_addr > 127) { ++ printk(KERN_ERR "Error: i2cdev_addr must be 7-bit value\n"); ++ return count; ++ } ++ ++ if (addr > 255) { ++ printk(KERN_ERR "Error: offset out of range for this device\n"); ++ return count; ++ } ++ ++ printk("Command can execute slow, please wait...\n"); ++ ++ if (rd_wr_op == 0) { /* Write operation */ ++ i2cdata.byte = val; ++ if (addr == -1) { ++ /* Device does not support, or require an offset to write to the ++ * location ++ */ ++ rc = iproc_smb_xfer(&dev->adapter, i2cdev_addr, 0x0, ++ I2C_SMBUS_WRITE, (unsigned char)0, ++ I2C_SMBUS_BYTE, &i2cdata); ++ } else { ++ /* Address required for write access */ ++ rc = iproc_smb_xfer(&dev->adapter, i2cdev_addr, 0x0, ++ I2C_SMBUS_WRITE, addr, I2C_SMBUS_BYTE_DATA, ++ &i2cdata); ++ } ++ ++ if (rc) { ++ printk(KERN_ERR "%s: iproc_smb_xfer:write failed status=%d," ++ " addr=%u, val = 0x%X\n", __func__, rc, addr, val); ++ /* return -EFAULT; */ ++ } else { ++ printk("Write OK.Wrote 0x%X at addr %u\n", val, addr); ++ } ++ ++ msleep(1); /* Delay required, since smb(i2c) interface is slow */ ++ } ++ ++ if (rd_wr_op == 1) { /* Read operation */ ++ if (addr == -1) { ++ /* Device does not support, or require an offset to read from the ++ * location ++ */ ++ rc = iproc_smb_xfer(&dev->adapter, i2cdev_addr, 0x0, I2C_SMBUS_READ, ++ (unsigned char)0, I2C_SMBUS_BYTE, &i2cdata); ++ } else { ++ rc = iproc_smb_xfer(&dev->adapter, i2cdev_addr, 0x0, I2C_SMBUS_READ, ++ addr, I2C_SMBUS_BYTE_DATA, &i2cdata); ++ } ++ ++ if (rc) { ++ printk(KERN_ERR "%s: iproc_smb_xfer failed status=%d\n", __func__, rc); ++ /* return -EFAULT; */ ++ } else { ++ printk("Read OK.Value read at %u = 0x%X\n", addr, i2cdata.byte); ++ } ++ msleep(1); /* Delay required, since smb(i2c) interface is slow */ ++ } ++ ++#ifdef IPROC_SMB_DBG ++ iproc_dump_smb_regs(dev); ++#endif /* IPROC_SMB_DBG */ ++ ++ printk("Last intr sts = 0x%08X\n", dev->smb_counters.last_int_sts); ++ printk("mstr_start_busy_cnt = %u, mstr_rx_evt_cnt = %u, rx fifo full cnt = %u\n\n", ++ dev->smb_counters.mstr_start_busy_cnt, ++ dev->smb_counters.mstr_rx_evt_cnt, ++ dev->smb_counters.mstr_rx_fifo_full_cnt); ++ ++ return count; ++} ++#endif ++ ++#ifdef SMB_BLK_XFER_TEST ++static int ++proc_debug_write(struct file *file, const char __user *buffer, size_t count, loff_t *off) ++{ ++ struct iproc_smb_drv_int_data *dev = (struct iproc_smb_drv_int_data *) PDE_DATA (file->f_inode); ++ int rc; ++ unsigned char kbuf[MAX_PROC_BUF_SIZE]; ++ union i2c_smbus_data i2cdata; ++ unsigned int val, i2cdev_addr, rd_wr_op; ++ int addr; ++ int i, j; ++ unsigned int burst_len, repated_cnt, total_cnt; ++ ++ if (count > MAX_PROC_BUF_SIZE) { ++ count = MAX_PROC_BUF_SIZE; ++ } ++ ++ rc = copy_from_user(kbuf, buffer, count); ++ ++ if (rc) { ++ printk (KERN_ERR "%s: copy_from_user failed status=%d", __func__, rc); ++ return -EFAULT; ++ ++ } ++ ++ rc = sscanf(kbuf, "%u %u %d %u %u %u", &rd_wr_op, &i2cdev_addr, &addr, &val, &burst_len, &repated_cnt); ++ if (rc != 6) { ++ burst_len = 1; ++ repated_cnt = 1; ++ ++ if (rc < 4 ) { ++ printk(KERN_ERR "\necho args > %s", PROC_ENTRY_DEBUG); ++ printk(KERN_ERR "\nargs (all values should be in decimal)):"); ++ printk(KERN_ERR "\nrd_wr_op: 1 = read, 0 = write"); ++ printk(KERN_ERR "\ni2cdev_addr: I2C device address in decimal"); ++ printk(KERN_ERR "\noffset: offset of location within I2C device"); ++ printk(KERN_ERR "\naddr -1 if offset not applicable"); ++ printk(KERN_ERR "\nval: For write op: 8-bit value.\n" ++ " For read op: not used, may be 0\n\n"); ++ printk(KERN_ERR "\burst_length: write block transfer byte length (<=8 for many EEPROM devices)"); ++ printk(KERN_ERR "\repated_cnt: number of repated write transfer of burst_len"); ++ return -EFAULT; ++ } ++ } ++ total_cnt = burst_len * repated_cnt; ++ ++ printk(KERN_DEBUG "\nArg values :"); ++ printk(KERN_DEBUG "\nrd_wr_op = %u", rd_wr_op); ++ printk(KERN_DEBUG "\ni2cdev_addr = 0x%X", i2cdev_addr); ++ printk(KERN_DEBUG "\noffset = %d", addr); ++ printk(KERN_DEBUG "\nval = %u", val); ++ ++ if (rd_wr_op > 1) { ++ printk(KERN_ERR "\nError: Invalid rd_wr_op value %u\n", rd_wr_op); ++ return count; ++ } ++ ++ if (i2cdev_addr > 127) { ++ printk(KERN_ERR "\nError: i2cdev_addr must be 7-bit value\n"); ++ return count; ++ } ++ ++ if (addr > 255) { ++ printk(KERN_ERR "\nError: offset out of range for this device\n"); ++ return count; ++ } ++ ++ printk (KERN_ERR "\nCommand can execute slow, please wait...\n"); ++ ++ if (rd_wr_op == 0) { /* Write operation */ ++ if (total_cnt == 1) { ++#if defined(CONFIG_MACH_SB2) ++ /* Testing EEPROM that requires 2-byte address */ ++ unsigned char *data = &i2cdata.word; ++ data[0] = addr; ++ data[1] = val; ++ rc = iproc_smb_xfer(&dev->adapter, i2cdev_addr, 0x0, ++ I2C_SMBUS_WRITE, (unsigned char)0, I2C_SMBUS_WORD_DATA, &i2cdata); ++#else ++ i2cdata.byte = val; ++ if (addr == -1) { ++ /* Device does not support, or require an offset to write to the ++ * location ++ */ ++ rc = iproc_smb_xfer(&dev->adapter, i2cdev_addr, 0x0, ++ I2C_SMBUS_WRITE, (unsigned char)0, ++ I2C_SMBUS_BYTE, &i2cdata); ++ } ++ else { ++ /* Address required for write access */ ++ rc = iproc_smb_xfer(&dev->adapter, i2cdev_addr, 0x0, ++ I2C_SMBUS_WRITE, addr, I2C_SMBUS_BYTE_DATA, &i2cdata); ++ } ++#endif ++ if (rc) { ++ printk (KERN_ERR "\n%s: iproc_smb_xfer:write failed status=%d," ++ " addr=%u, val = 0x%X\n", __func__, rc, addr, val); ++ /* return -EFAULT; */ ++ } ++ else { ++ printk(KERN_ERR "Write OK. Wrote 0x%X at addr %u\n", val, addr); ++ } ++ msleep(1); /* Delay required, since smb(i2c) interface is slow */ ++ } ++ else { ++/* test of block xfer: echo "0 80 0 0 8 16" > /proc/iproc-i2c/iproc-i2c0/iproc-i2c-dbg */ ++/* write to EEPROM I2c device (slave addr 80=0x50), addr offset: 0, vlaue: starting from 0, block xfer size:8, repeat_cnt:16 (EEPROM doesn't accept block xfer length > 8) */ ++/* repeat_cnt times of write block transfer */ ++/* for single address cycle I2C device only */ ++ for(j=0;jadapter, i2cdev_addr, 0x0, I2C_SMBUS_WRITE, ++ addr+j*burst_len, I2C_SMBUS_BLOCK_DATA, &i2cdata); ++ if (rc) { ++ printk (KERN_ERR "\n%s: iproc_smb_xfer:write failed status=%d," ++ " addr=%u, val = 0x%X\n", __func__, rc, addr, val); ++ return -EFAULT; ++ } ++ msleep(1); ++ } ++ } /* ! total_cnt == 1 */ ++ } ++ ++ if (rd_wr_op == 1) { /* Read operation */ ++ if (total_cnt == 1) { ++#if defined(CONFIG_MACH_SB2) ++ /* Testing EEPROM that requires 2-byte address: to support random read, ++ * issue dummy write and then current address read ++ */ ++ i2cdata.byte = addr; ++ rc = iproc_smb_xfer(&dev->adapter, i2cdev_addr, 0x0, ++ I2C_SMBUS_WRITE, 0, I2C_SMBUS_BYTE_DATA, &i2cdata); ++ if (rc) { ++ printk (KERN_ERR "\n%s: iproc_smb_xfer dummy write failed status=%d\n", __func__, rc); ++ } ++ rc = iproc_smb_xfer(&dev->adapter, i2cdev_addr, 0x0, I2C_SMBUS_READ, ++ (unsigned char)0, I2C_SMBUS_BYTE, &i2cdata); ++#else ++ if (addr == -1) { ++ /* Device does not support, or require an offset to read from the ++ * location ++ */ ++ rc = iproc_smb_xfer(&dev->adapter, i2cdev_addr, 0x0, I2C_SMBUS_READ, ++ (unsigned char)0, I2C_SMBUS_BYTE, &i2cdata); ++ } ++ else { ++ rc = iproc_smb_xfer(&dev->adapter, i2cdev_addr, 0x0, I2C_SMBUS_READ, ++ addr, I2C_SMBUS_BYTE_DATA, &i2cdata); ++ } ++#endif ++ if (rc) { ++ printk (KERN_ERR "\n%s: iproc_smb_xfer failed status=%d\n", __func__, rc); ++ ++ /* return -EFAULT; */ ++ } ++ else { ++ printk(KERN_ERR "\nRead OK.\n--------Value read at %u = 0x%X\n", addr, i2cdata.byte); ++ } ++ ++ msleep(1); /* Delay required, since smb(i2c) interface is slow */ ++ } ++ else { ++ for (i = 1; i <= total_cnt; i++) ++ i2cdata.block[i] = 0; ++ i2cdata.block[0] = total_cnt; ++ rc = iproc_smb_xfer(&dev->adapter, i2cdev_addr, 0x0, I2C_SMBUS_READ, ++ addr, I2C_SMBUS_BLOCK_DATA, &i2cdata); ++ if (rc) { ++ printk (KERN_ERR "\n%s: iproc_smb_xfer:read failed status=%d," ++ " addr=%u, val = 0x%X\n", __func__, rc, addr, val); ++ return -EFAULT; ++ } ++ msleep(1); ++ for (i = 1; i <= total_cnt; i++) ++ printk("%d ", i2cdata.block[i]); ++ } /* ! total_cnt == 1 */ ++ } ++ ++#ifdef IPROC_SMB_DBG ++ iproc_dump_smb_regs(dev); ++#endif ++ ++ printk(KERN_DEBUG "Last intr sts = 0x%08X\n", dev->smb_counters.last_int_sts); ++ ++ printk(KERN_DEBUG "mstr_start_busy_cnt = %u, mstr_rx_evt_cnt = %u, rx fifo full cnt = %u\n", ++ dev->smb_counters.mstr_start_busy_cnt, ++ dev->smb_counters.mstr_rx_evt_cnt, ++ dev->smb_counters.mstr_rx_fifo_full_cnt); ++ ++ return count; ++} ++#endif ++ ++ ++#ifdef CONFIG_USE_SVK_VERSION ++/* Written for SVK boards */ ++static int ++proc_debug_write_svk(struct file *file, const char __user *buffer, size_t count, loff_t *off) ++{ ++ struct iproc_smb_drv_int_data *dev = (struct iproc_smb_drv_int_data *) PDE_DATA (file->f_inode); ++ int rc; ++ unsigned int debug; ++ unsigned char kbuf[MAX_PROC_BUF_SIZE]; ++ union i2c_smbus_data i2cdata; ++ unsigned int val, addr; ++ ++ if (count > MAX_PROC_BUF_SIZE) { ++ count = MAX_PROC_BUF_SIZE; ++ } ++ ++ rc = copy_from_user(kbuf, buffer, count); ++ if (rc) { ++ printk(KERN_ERR "%s: copy_from_user failed status=%d\n", __func__, rc); ++ return -EFAULT; ++ } ++ ++ if (sscanf(kbuf, "%u", &debug) != 1) { ++ printk(KERN_ERR "%s: echo > %s\n", __func__, PROC_ENTRY_DEBUG); ++ return count; ++ } ++ ++ if (debug) { ++ dev->debug = 1; ++ } else { ++ dev->debug = 0; ++ } ++ ++ printk ("Command can execute slow, please wait...\n"); ++ if (!dev->debug) { ++ val = 0xFF; /* Initial value to write */ ++ for(addr = 0x0; addr < 256; val--, addr++) { ++ i2cdata.byte = val; ++ rc = iproc_smb_xfer(&dev->adapter, 0xA0 >> 1, 0x0, I2C_SMBUS_WRITE, ++ addr, I2C_SMBUS_BYTE_DATA, &i2cdata); ++ if (rc) { ++ printk(KERN_ERR "%s: iproc_smb_xfer:write failed status=%d," ++ " addr=%u, val = 0x%X", __func__, rc, addr, val); ++ } else { ++ printk("Write OK. Wrote 0x%X at addr %u\n", val, addr); ++ } ++ msleep(1); /* Delay required, since smb(i2c) interface is slow */ ++ } ++ } else { ++ int i; ++ ++ /* Note about address expected by AT24C02: To write in correct order ++ * to AT24C02 using block write, refer bottom of page 9 (Write ++ * Operations) of the data sheet regarding internal incrementing of ++ * address. Based on that explanation, we program the addr value below. ++ * Select the 'highest' address in that page (7, 15, 23, and so on) to ++ * write to that page ++ */ ++ addr = debug - 1; ++ val = jiffies % 256; ++ printk("EEPROM page write. Page start addr = %u," ++ " write data: \n", debug - 8); ++ ++ for (i = 1; i <= 8; i++) { ++ i2cdata.block[i] = val % 256; /* Fill a sequence pattern */ ++ val++; ++ printk("byte%d = 0x%02X\n", i, i2cdata.block[i]); ++ } ++ ++ i2cdata.block[0] = 8; ++ rc = iproc_smb_xfer(&dev->adapter, 0xA0 >> 1, 0x0, I2C_SMBUS_WRITE, ++ addr, I2C_SMBUS_BLOCK_DATA, &i2cdata); ++ if (rc) { ++ printk(KERN_ERR "%s: iproc_smb_xfer:write failed status=%d," ++ " addr=%u, val = 0x%X\n", __func__, rc, addr, val); ++ } else { ++ printk("Block Write OK.\n"); ++ } ++ } ++ ++#ifdef IPROC_SMB_DBG ++ iproc_dump_smb_regs(dev); ++#endif /* IPROC_SMB_DBG */ ++ ++ printk("Last intr sts = 0x%08X\n", ++ dev->smb_counters.last_int_sts); ++ printk("mstr_start_busy_cnt = %u, mstr_rx_evt_cnt = %u, rx fifo full cnt = %u\n\n", ++ dev->smb_counters.mstr_start_busy_cnt, ++ dev->smb_counters.mstr_rx_evt_cnt, ++ dev->smb_counters.mstr_rx_fifo_full_cnt); ++ ++ return count; ++} ++ ++ ++/* Written for SVK boards */ ++static int ++proc_debug_read_svk(struct file *file, char __user *buffer, size_t count, loff_t *off) ++{ ++ unsigned int len = 0; ++ struct iproc_smb_drv_int_data *dev = (struct iproc_smb_drv_int_data *) PDE_DATA (file->f_inode); ++ int rc; ++ union i2c_smbus_data i2cdata; ++ unsigned int addr; ++ ++ if (off > 0) { ++ return 0; ++ } ++ ++ len += sprintf(buffer + len, "Read\n"); ++ ++ printk(KERN_ERR "\nCommand can execute slow, please wait...\n"); ++ ++ for(addr = 0x0; addr < 256; addr++) { ++ ++ /* Read operation */ ++ rc = iproc_smb_xfer(&dev->adapter, 0xA0 >> 1, 0x0, I2C_SMBUS_READ, addr, ++ I2C_SMBUS_BYTE_DATA, &i2cdata); ++ ++ if (rc) { ++ printk (KERN_ERR "%s: iproc_smb_xfer failed status=%d", __func__, rc); ++ } ++ else { ++ printk(KERN_DEBUG "Read OK.Value read at %u = 0x%X\n", addr, i2cdata.byte); ++ } ++ ++ msleep(1); ++ } ++ ++#ifdef IPROC_SMB_DBG ++ iproc_dump_smb_regs(dev); ++#endif /* IPROC_SMB_DBG */ ++ ++ printk(KERN_DEBUG "\n\nLast intr sts = 0x%08X", dev->smb_counters.last_int_sts); ++ ++ printk(KERN_DEBUG "mstr_start_busy_cnt = %u, mstr_rx_evt_cnt = %u, rx fifo full cnt = %u\n\n", ++ dev->smb_counters.mstr_start_busy_cnt, ++ dev->smb_counters.mstr_rx_evt_cnt, ++ dev->smb_counters.mstr_rx_fifo_full_cnt); ++ ++ return len; ++} ++#endif /* #ifdef CONFIG_USE_SVK_VERSION */ ++ ++static const struct file_operations proc_smb_file_fops= { ++#ifdef CONFIG_USE_SVK_VERSION ++ .read = proc_debug_read_svk, ++ .write = proc_debug_write_svk, ++#else ++ .read = proc_debug_read, ++ .write = proc_debug_write, ++#endif ++ ++}; ++ ++ ++ ++static int proc_init(struct platform_device *pdev) ++{ ++ int rc; ++ struct iproc_smb_drv_int_data *dev = platform_get_drvdata(pdev); ++ struct procfs *proc = &dev->proc; ++ struct proc_dir_entry *proc_debug; ++ ++ ++ snprintf(proc->name, sizeof(proc->name), "%s%d", PROC_GLOBAL_PARENT_DIR, pdev->id); ++ ++ /* sub directory */ ++ proc->parent = proc_mkdir(proc->name, gProcParent); ++ ++ if (proc->parent == NULL) { ++ return -ENOMEM; ++ } ++ ++ proc_debug = proc_create_data(PROC_ENTRY_DEBUG, 0644, proc->parent, &proc_smb_file_fops, dev); ++ ++ if (proc_debug == NULL) { ++ rc = -ENOMEM; ++ goto err_del_parent; ++ } ++ ++ return 0; ++ ++err_del_parent: ++ remove_proc_entry(proc->name, gProcParent); ++ ++ return rc; ++} ++ ++static int proc_term(struct platform_device *pdev) ++{ ++ struct iproc_smb_drv_int_data *dev = platform_get_drvdata(pdev); ++ struct procfs *proc = &dev->proc; ++ ++ remove_proc_entry(PROC_ENTRY_DEBUG, proc->parent); ++ remove_proc_entry(proc->name, gProcParent); ++ ++ return 0; ++} ++ ++/* ++ * This function set clock frequency for SMBus block. As per hardware ++ * engineering, the clock frequency can be changed dynamically. ++ */ ++static int iproc_smb_set_clk_freq(unsigned long base_addr, ++ smb_clk_freq_t freq) ++{ ++ unsigned int regval; ++ unsigned int val; ++ ++ switch (freq) { ++ ++ case I2C_SPEED_100KHz: ++ val = 0; ++ break; ++ ++ case I2C_SPEED_400KHz: ++ val = 1; ++ break; ++ ++ default: ++ return -EINVAL; ++ break; ++ ++ } ++ ++ regval = iproc_smb_reg_read(base_addr + CCB_SMB_TIMGCFG_REG); ++ ++ SETREGFLDVAL(regval, val, CCB_SMB_TIMGCFG_MODE400_MASK, ++ CCB_SMB_TIMGCFG_MODE400_SHIFT); ++ ++ iproc_smb_reg_write(base_addr + CCB_SMB_TIMGCFG_REG, regval); ++ ++ return(0); ++} ++ ++static int iproc_smbus_block_init(struct iproc_smb_drv_int_data *dev) ++{ ++ ++ unsigned long base_addr = (unsigned long)dev->block_base_addr; ++ unsigned int regval; ++#ifdef CONFIG_OF ++ u32 i2c_clk_freq; ++ struct device_node *dn = dev->dev->of_node; ++#endif ++ ++ /* Flush Tx, Rx FIFOs. Note we are setting the Rx FIFO threshold to 0. ++ * May be OK since we are setting RX_EVENT and RX_FIFO_FULL interrupts ++ */ ++ regval = CCB_SMB_MSTRRXFIFOFLSH_MASK | CCB_SMB_MSTRTXFIFOFLSH_MASK; ++ ++ iproc_smb_reg_write(base_addr + CCB_SMB_MSTRFIFOCTL_REG, regval); ++ ++ /* Enable SMbus block. Note, we are setting MASTER_RETRY_COUNT to zero ++ * since there will be only one master ++ */ ++ regval = CCB_SMB_CFG_SMBEN_MASK; ++ ++ iproc_smb_reg_write(base_addr + CCB_SMB_CFG_REG, regval); ++ ++ /* Wait a minimum of 50 Usec, as per SMB hw doc. But we wait longer */ ++ udelay(100); ++ ++ ++ /* Set default clock frequency */ ++#ifndef CONFIG_OF ++ iproc_smb_set_clk_freq(base_addr, I2C_SPEED_100KHz); ++#else ++ if (of_property_read_u32(dn, "clock-frequency", &i2c_clk_freq)) { ++ i2c_clk_freq = I2C_SPEED_100KHz; /*no property available, use default: 100KHz*/ ++ } ++ iproc_smb_set_clk_freq(base_addr, i2c_clk_freq); ++#endif /* CONFIG_OF */ ++ /* Disable intrs */ ++ regval = 0x0; ++ iproc_smb_reg_write(base_addr + CCB_SMB_EVTEN_REG, regval); ++ ++ /* Clear intrs (W1TC) */ ++ regval = iproc_smb_reg_read(base_addr + CCB_SMB_EVTSTS_REG); ++ ++ iproc_smb_reg_write(base_addr + CCB_SMB_EVTSTS_REG, regval); ++ ++ return(0); ++} ++ ++/* This function enables interrupts */ ++static int iproc_intr_enable(struct iproc_smb_drv_int_data *dev, unsigned int bmap) ++{ ++ unsigned long base_addr = (unsigned long)dev->block_base_addr; ++ unsigned int regval; ++ ++ regval = iproc_smb_reg_read(base_addr + CCB_SMB_EVTEN_REG); ++ ++ regval |= bmap; ++ ++ iproc_smb_reg_write(base_addr + CCB_SMB_EVTEN_REG, regval); ++ ++ /* Store all interrupts enabled so far. Note bmap can have only 'incremental' ++ * set of events ++ */ ++ dev->evt_enable_bmap = regval; ++ ++ return(0); ++} ++ ++/* This function disables interrupts */ ++static int iproc_intr_disable(struct iproc_smb_drv_int_data *dev, unsigned int bmap) ++{ ++ unsigned long base_addr = (unsigned long)dev->block_base_addr; ++ unsigned int regval; ++ ++ regval = iproc_smb_reg_read(base_addr + CCB_SMB_EVTEN_REG); ++ ++ regval &= ~bmap; ++ ++ iproc_smb_reg_write(base_addr + CCB_SMB_EVTEN_REG, regval); ++ ++ dev->evt_enable_bmap = regval; ++ ++ return(0); ++} ++ ++/* Verify this sequence with hw engg */ ++static int iproc_smbus_block_deinit(struct iproc_smb_drv_int_data *dev) ++{ ++ unsigned int regval; ++ int rc; ++ ++ /* Disable all interrupts */ ++ regval = 0x0; ++ ++ iproc_smb_reg_write((unsigned long)dev->block_base_addr + CCB_SMB_EVTEN_REG, regval); ++ ++ /* Check if a transaction is in progress */ ++ rc = iproc_smb_startbusy_wait(dev); ++ ++ if (rc < 0) { ++ ++ /* Do not exit the function, since we are most likely shutting down */ ++ printk(KERN_ERR "%s: A transaction is still in progress," ++ "but still continuing ", __func__); ++ ++ } ++ ++ /* Disable SMBus block */ ++ regval = iproc_smb_reg_read((unsigned long)dev->block_base_addr + CCB_SMB_CFG_REG); ++ ++ regval &= ~CCB_SMB_CFG_SMBEN_MASK; ++ ++ iproc_smb_reg_write((unsigned long)dev->block_base_addr + CCB_SMB_CFG_REG, regval); ++ ++ ++ /* Wait for some time */ ++ udelay(100); ++ ++ /* Put the block under reset. Note the RESET bit in reg 0x0 is ++ * self clearing ++ */ ++ regval = CCB_SMB_CFG_RST_MASK; ++ ++ iproc_smb_reg_write((unsigned long)dev->block_base_addr + CCB_SMB_CFG_REG, regval); ++ ++ return(0); ++} ++ ++static u32 iproc_smb_funcs(struct i2c_adapter *adapter) ++{ ++ /* I2C_FUNC_SMBUS_I2C_BLOCK */ ++ return (I2C_FUNC_SMBUS_BYTE | I2C_FUNC_SMBUS_BYTE_DATA | ++ I2C_FUNC_SMBUS_WORD_DATA | I2C_FUNC_SMBUS_BLOCK_DATA); ++} ++ ++static struct i2c_algorithm iproc_smb_algorithm = { ++ /* .name = "iproc-smb", */ ++ .smbus_xfer = iproc_smb_xfer, ++ .master_xfer = NULL, ++ .functionality = iproc_smb_funcs, ++}; ++ ++ ++static int iproc_smb_probe(struct platform_device *pdev) ++{ ++ int rc=0, irq; ++ struct iproc_smb_drv_int_data *dev; ++ struct i2c_adapter *adap; ++ struct resource *iomem; ++ struct resource *ioarea; ++#ifdef CONFIG_OF ++ struct device_node *dn = pdev->dev.of_node; ++ u32 smb_bus_id; ++#endif ++ ++#ifdef IPROC_SMB_DBG ++ printk(KERN_DEBUG "\n%s: Entering probe\n", __func__); ++#endif /* IPROC_SMB_DBG */ ++ ++#ifdef CONFIG_OF ++ /* first I2C init */ ++ if (gProcParent == NULL) { ++ gProcParent = proc_mkdir(PROC_GLOBAL_PARENT_DIR, NULL); ++ if (gProcParent == NULL) { ++ printk(KERN_ERR "%s: SMBus driver procfs failed\n", __func__); ++ return -ENOMEM; ++ } ++ iproc_smbus_list = NULL; ++ } ++#endif /* CONFIG_OF */ ++ ++ /* Get register memory resource */ ++ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ ++ if (!iomem) { ++ printk(KERN_ERR "%s: No mem resource\n", __func__); ++ return -ENODEV; ++ } ++ ++#ifdef IPROC_SMB_DBG ++ printk(KERN_DEBUG "\nGot iomem 0x%p\n", iomem); ++#endif /* IPROC_SMB_DBG */ ++ ++ /* Get the interrupt number */ ++ irq = platform_get_irq(pdev, 0); ++ ++ if (irq == -ENXIO) { ++ printk(KERN_ERR "%s: No irq resource\n", __func__); ++ return -ENODEV; ++ } ++ ++#ifdef IPROC_SMB_DBG ++ printk(KERN_DEBUG "\nGot irqnum %d\n", irq); ++#endif /* IPROC_SMB_DBG */ ++ ++ /* Mark the memory region as used */ ++ ioarea = request_mem_region(iomem->start, resource_size(iomem), ++ pdev->name); ++ if (!ioarea) { ++ printk(KERN_ERR "%s: SMBus region already claimed\n", __func__); ++ return -EBUSY; ++ } ++ ++#ifdef IPROC_SMB_DBG ++ printk(KERN_DEBUG "\nGot ioarea 0x%p\n", ioarea); ++#endif /* IPROC_SMB_DBG */ ++ ++ /* Allocate memory for driver's internal data structure */ ++ dev = kzalloc(sizeof(*dev), GFP_KERNEL); ++ ++ if (!dev) { ++ printk(KERN_ERR "%s: Couldn't allocate memory for driver's internaldb\n", __func__); ++ rc = -ENOMEM; ++ goto err_release_mem_region; ++ } ++ ++#ifdef IPROC_SMB_DBG ++ printk(KERN_DEBUG "\nGot dev 0x%p\n", dev); ++#endif /* IPROC_SMB_DBG */ ++ ++ dev->dev = &pdev->dev; ++ init_MUTEX(&dev->xfer_lock); ++ init_completion(&dev->ses_done); ++ dev->irq = irq; ++ ++ dev->block_base_addr = ioremap(iomem->start, resource_size(iomem)); ++ ++ if (!dev->block_base_addr) { ++ printk(KERN_ERR "%s: ioremap of register space failed\n", __func__); ++ rc = -ENOMEM; ++ goto err_free_dev_mem; ++ } ++ ++#ifdef IPROC_SMB_DBG ++ printk(KERN_DEBUG "\n ==== Got block_base_addr=0x%08X\n", (unsigned int)dev->block_base_addr); ++ /* iproc_dump_smb_regs(dev); */ ++#endif /* IPROC_SMB_DBG */ ++ ++ dev->enable_evts = ENABLE_INTR; /* Default value, can be changed after ++ initial testing */ ++ ++ platform_set_drvdata(pdev, dev); ++ ++ /* Init internal regs, disable intrs (and then clear intrs), set fifo ++ * thresholds, etc. ++ */ ++ iproc_smbus_block_init(dev); ++ ++ /* Register ISR handler */ ++ rc = request_irq(dev->irq, iproc_smb_isr, IRQF_SHARED, pdev->name, dev); ++ ++ if (rc) { ++ printk(KERN_ERR "%s: failed to request irq %d, rc=%d\n", __func__, dev->irq, rc); ++ goto err_smb_deinit; ++ } ++ ++#ifdef IPROC_SMB_DBG ++ printk(KERN_DEBUG "\nrequest_irq succeeded\n"); ++#endif /* IPROC_SMB_DBG */ ++ ++ adap = &dev->adapter; ++ i2c_set_adapdata(adap, dev); /* Verify if this place is OK */ ++ adap->owner = THIS_MODULE; ++ adap->class = UINT_MAX; /* Can be used by any I2C device */ ++ adap->algo = &iproc_smb_algorithm; ++ adap->dev.parent = &pdev->dev; /* */ ++#ifndef CONFIG_OF ++ adap->nr = pdev->id; ++#else ++ if (of_property_read_u32(dn, "#bus-id", &smb_bus_id)) { ++ dev_warn(&pdev->dev, "missing #bus-id property (default to 0)\n"); ++ smb_bus_id = 0; ++ } ++ adap->nr = smb_bus_id; ++ pdev->id = smb_bus_id; ++ adap->dev.of_node = pdev->dev.of_node; /* needed for adding I2C child devices */ ++#endif /* CONFIG_OF */ ++ snprintf(adap->name, sizeof(adap->name), "iproc-smb%d", pdev->id); ++ ++ /* ++ * I2C device drivers may be active on return from ++ * i2c_add_numbered_adapter() ++ */ ++ rc = i2c_add_numbered_adapter(adap); ++ ++ if (rc) { ++ printk(KERN_ERR "%s: Failed to add I2C adapter, rc=%d\n", __func__, rc); ++ goto err_free_irq; ++ } ++ ++#ifdef IPROC_SMB_DBG ++ printk(KERN_DEBUG "\ni2c_add_numbered_adapter succeeded\n"); ++#endif /* IPROC_SMB_DBG */ ++ ++ /* Turn on default set of interrupts */ ++ /* For Rx, enable RX fifo full, threshold hit interrupts. Other rx ++ * interrupts will be set in the read/recv transactions, as required ++ * For Tx, enable fifo under run intr. Other intrs will be set in send ++ * write access functions ++ */ ++ iproc_intr_enable(dev, CCB_SMB_MSTRRXFIFOFULLEN_MASK); ++ ++#ifdef IPROC_SMB_DBG ++ printk(KERN_DEBUG "\niproc_intr_enable complete, intrs enabled\n"); ++#endif /* IPROC_SMB_DBG */ ++ ++ rc = proc_init(pdev); ++ ++ if (rc) { ++ printk(KERN_ERR "%s: Failed to install procfs entry, rc=%d\n", __func__, rc); ++ goto err_proc_term; ++ } ++ ++ dev->next = iproc_smbus_list; ++ iproc_smbus_list = dev; ++ ++#ifdef IPROC_SMB_DBG ++ iproc_dump_smb_regs(dev); ++ printk(KERN_DEBUG "%s: probe successful", __func__); ++#endif /* IPROC_SMB_DBG */ ++ ++ return 0; ++ ++err_proc_term: ++ proc_term(pdev); ++ ++err_free_irq: ++ free_irq(dev->irq, dev); ++ ++err_smb_deinit: ++ iproc_smbus_block_deinit(dev); ++ ++ iounmap(dev->block_base_addr); ++ ++ platform_set_drvdata(pdev, NULL); ++ ++err_free_dev_mem: ++ kfree(dev); ++ ++err_release_mem_region: ++ release_mem_region(iomem->start, resource_size(iomem)); ++ ++ printk(KERN_ERR "%s: probe failed, error=%d", __func__, rc); ++ ++ return (rc); ++} ++ ++static int iproc_smb_remove(struct platform_device *pdev) ++{ ++ struct iproc_smb_drv_int_data *dev = platform_get_drvdata(pdev); ++ struct resource *iomem; ++ unsigned int regval; ++ ++ /* Disable interrupts. */ ++ /* Verify: Should we wait for any in-progress xact to complete? */ ++ iproc_intr_disable(dev, ~0); ++ ++ /* Disable SMbus block */ ++ regval = iproc_smb_reg_read((unsigned long)dev->block_base_addr + CCB_SMB_CFG_REG); ++ ++ regval &= ~CCB_SMB_CFG_SMBEN_MASK; ++ ++ iproc_smb_reg_write((unsigned long)dev->block_base_addr + CCB_SMB_CFG_REG, regval); ++ ++ i2c_del_adapter(&dev->adapter); ++ ++ platform_set_drvdata(pdev, NULL); ++ ++ free_irq(dev->irq, dev); ++ ++ iproc_smbus_block_deinit(dev); ++ ++ iounmap(dev->block_base_addr); ++ ++ kfree(dev); ++ ++ iomem = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ ++ release_mem_region(iomem->start, resource_size(iomem)); ++ ++ return 0; ++} ++ ++#ifndef CONFIG_OF ++static int iproc_smb_suspend(struct platform_device *pdev, pm_message_t state) ++{ ++/* struct iproc_smb_drv_int_data *dev = platform_get_drvdata(pdev); */ ++ ++ /* Add additional processing, if required */ ++ ++ return (0); ++} ++ ++static int iproc_smb_resume(struct platform_device *pdev) ++{ ++/* struct iproc_smb_drv_int_data *dev = platform_get_drvdata(pdev); */ ++ ++ /* Add additional processing, if required */ ++ ++ return (0); ++} ++ ++static struct platform_driver iproc_smb_driver = { ++ .driver = { ++ .name = "iproc-smb", ++ .owner = THIS_MODULE, ++ }, ++ .probe = iproc_smb_probe, ++ .remove = iproc_smb_remove, ++ .suspend = iproc_smb_suspend, ++ .resume = iproc_smb_resume, ++}; ++ ++static int __init iproc_smb_init(void) ++{ ++ int rc; ++ ++#ifdef IPROC_SMB_DBG ++ printk(KERN_DEBUG "%s: Entering init", __func__); ++#endif /* IPROC_SMB_DBG */ ++ ++ gProcParent = proc_mkdir(PROC_GLOBAL_PARENT_DIR, NULL); ++ ++ if (gProcParent == NULL) { ++ ++ printk(KERN_ERR "%s: SMBus driver procfs failed\n", __func__); ++ ++ return -ENOMEM; ++ ++ } ++ ++#ifdef IPROC_SMB_DBG ++ printk(KERN_DEBUG "\nproc_mkdir succeeded, gProcParent=0x%08X\n", (unsigned int)gProcParent); ++#endif /* IPROC_SMB_DBG */ ++ ++ rc = platform_driver_register(&iproc_smb_driver); ++ ++ if (rc < 0) { ++ ++ printk(KERN_ERR "%s: SMBus driver init failed, error %d\n", __func__, rc); ++ ++ } ++ ++#ifdef IPROC_SMB_DBG ++ printk(KERN_DEBUG "\n%s: Called platform_driver_register, rc=%d\n", __func__, rc); ++#endif /* IPROC_SMB_DBG */ ++ ++ ++ iproc_smbus_list = NULL; ++ ++ /* Should we set RESET bit (reg 0x0) here?: Not necessary as per hw engg */ ++ ++ return rc; ++} ++ ++static void __exit iproc_smb_exit(void) ++{ ++ platform_driver_unregister(&iproc_smb_driver); ++ ++ remove_proc_entry(PROC_GLOBAL_PARENT_DIR, NULL); ++} ++ ++module_init(iproc_smb_init); ++module_exit(iproc_smb_exit); ++ ++#else /* CONFIG_OF */ ++ ++static const struct of_device_id bcm_iproc_i2c_of_match[] = { ++ { .compatible = "brcm,iproc-i2c" }, ++ { } ++}; ++MODULE_DEVICE_TABLE(of, bcm_iproc_i2c_of_match); ++ ++static struct platform_driver bcm_iproc_i2c_driver = { ++ .driver = { ++ .name = "bcm-iproc-i2c", ++ .of_match_table = bcm_iproc_i2c_of_match, ++ }, ++ .probe = iproc_smb_probe, ++ .remove = iproc_smb_remove, ++}; ++module_platform_driver(bcm_iproc_i2c_driver); ++#endif /* !CONFIG_OF */ ++ ++MODULE_AUTHOR("Broadcom Corporation"); ++MODULE_DESCRIPTION("IPROC I2C (SMBus) Bus Driver"); ++MODULE_LICENSE("GPL"); +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig +--- a/drivers/mmc/host/Kconfig 2016-12-16 00:49:34.000000000 +0800 ++++ b/drivers/mmc/host/Kconfig 2017-11-09 17:53:42.449278000 +0800 +@@ -786,3 +786,12 @@ config MMC_MTK + If you have a machine with a integrated SD/MMC card reader, say Y or M here. + This is needed if support for any SD/SDIO/MMC devices is required. + If unsure, say N. ++ ++config MMC_SDHCI_XGS_IPROC ++ tristate "Broadcom XGS iProc SD/MMC Card Interface support" ++ select MMC_SDHCI_IO_ACCESSORS ++ default n ++ help ++ This selects the platform Secure Digital Host Controller Interface. ++ If unsure, say N. ++ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/mmc/host/Makefile b/drivers/mmc/host/Makefile +--- a/drivers/mmc/host/Makefile 2016-12-16 00:49:34.000000000 +0800 ++++ b/drivers/mmc/host/Makefile 2017-11-09 17:53:42.457283000 +0800 +@@ -75,6 +75,7 @@ obj-$(CONFIG_MMC_SDHCI_BCM2835) += sdhc + obj-$(CONFIG_MMC_SDHCI_IPROC) += sdhci-iproc.o + obj-$(CONFIG_MMC_SDHCI_MSM) += sdhci-msm.o + obj-$(CONFIG_MMC_SDHCI_ST) += sdhci-st.o ++obj-$(CONFIG_MMC_SDHCI_XGS_IPROC) += sdhci-bcm-hr3.o sdhci-xgs-iproc.o + + ifeq ($(CONFIG_CB710_DEBUG),y) + CFLAGS-cb710-mmc += -DDEBUG +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/mmc/host/sdhci-bcm-hr3.c b/drivers/mmc/host/sdhci-bcm-hr3.c +--- a/drivers/mmc/host/sdhci-bcm-hr3.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/mmc/host/sdhci-bcm-hr3.c 2017-11-09 17:53:42.528279000 +0800 +@@ -0,0 +1,611 @@ ++/* ++ * drivers/mmc/host/sdhci-bcm-hr3 - Broadcom HR3 SDHCI Platform driver ++ * ++ * Copyright (C) 2014-2016, Broadcom Corporation. All Rights Reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "sdhci.h" ++ ++struct sdhci_xgs_iproc_data { ++ struct sdhci_host *host; ++ struct clk *clk; ++ unsigned host_num; ++}; ++ ++struct xgs_iproc_sdhci_host { ++ struct sdhci_host host; ++ void __iomem *wrap_base; ++ void __iomem *idm_base; ++ void __iomem *cmicd_base; ++ u32 shadow_cmd; ++ u32 shadow_blk; ++}; ++ ++extern void __iomem *get_iproc_wrap_ctrl_base(void); ++static int iproc_top_sdio_config(void __iomem *cmicd_base); ++ ++static inline void ++iproc_sdhci_writel(struct sdhci_host *host, u32 val, int reg) ++{ ++ /* WAR for SDIO/GPIO setting might be reset by SDK for HR3. */ ++ if (reg == SDHCI_INT_STATUS) { ++ struct xgs_iproc_sdhci_host *iproc_host = (struct xgs_iproc_sdhci_host *)host; ++ iproc_top_sdio_config(iproc_host->cmicd_base); ++ } ++ ++ writel(val, host->ioaddr + reg); ++} ++ ++static inline u32 ++iproc_sdhci_readl(struct sdhci_host *host, int reg) ++{ ++ return readl(host->ioaddr + reg); ++} ++ ++static void ++iproc_sdhci_writew(struct sdhci_host *host, u16 val, int reg) ++{ ++ struct xgs_iproc_sdhci_host *iproc_host = (struct xgs_iproc_sdhci_host *)host; ++ u32 oldval, newval; ++ u32 word_num = (reg >> 1) & 1; ++ u32 word_shift = word_num * 16; ++ u32 mask = 0xffff << word_shift; ++ ++ if (reg == SDHCI_COMMAND) { ++ if (iproc_host->shadow_blk != 0) { ++ iproc_sdhci_writel(host, iproc_host->shadow_blk, SDHCI_BLOCK_SIZE); ++ iproc_host->shadow_blk = 0; ++ } ++ oldval = iproc_host->shadow_cmd; ++ } else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) { ++ oldval = iproc_host->shadow_blk; ++ } else { ++ oldval = iproc_sdhci_readl(host, reg & ~3); ++ } ++ newval = (oldval & ~mask) | (val << word_shift); ++ ++ if (reg == SDHCI_TRANSFER_MODE) { ++ iproc_host->shadow_cmd = newval; ++ } else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) { ++ iproc_host->shadow_blk = newval; ++ } else { ++ iproc_sdhci_writel(host, newval, reg & ~3); ++ } ++} ++ ++static u16 ++iproc_sdhci_readw(struct sdhci_host *host, int reg) ++{ ++ u32 val, word; ++ u32 word_num = (reg >> 1) & 1; ++ u32 word_shift = word_num * 16; ++ ++ val = iproc_sdhci_readl(host, (reg & ~3)); ++ word = (val >> word_shift) & 0xffff; ++ return word; ++} ++ ++static void ++iproc_sdhci_writeb(struct sdhci_host *host, u8 val, int reg) ++{ ++ u32 oldval, newval; ++ u32 byte_num = reg & 3; ++ u32 byte_shift = byte_num * 8; ++ u32 mask = 0xff << byte_shift; ++ ++ oldval = iproc_sdhci_readl(host, reg & ~3); ++ newval = (oldval & ~mask) | (val << byte_shift); ++ ++ iproc_sdhci_writel(host, newval, reg & ~3); ++} ++ ++static u8 ++iproc_sdhci_readb(struct sdhci_host *host, int reg) ++{ ++ u32 val, byte; ++ u32 byte_num = reg & 3; ++ u32 byte_shift = byte_num * 8; ++ ++ val = iproc_sdhci_readl(host, (reg & ~3)); ++ byte = (val >> byte_shift) & 0xff; ++ return byte; ++} ++ ++static u32 ++iproc_sdhci_get_max_clock(struct sdhci_host *host) ++{ ++ unsigned long max_clock; ++ ++ max_clock = (host->caps & SDHCI_CLOCK_V3_BASE_MASK) ++ >> SDHCI_CLOCK_BASE_SHIFT; ++ max_clock *= 1000000; ++ ++ return max_clock; ++} ++ ++static u32 ++iproc_sdhci_get_min_clock(struct sdhci_host *host) ++{ ++ return (host->max_clk / SDHCI_MAX_DIV_SPEC_300); ++} ++ ++static int ++iproc_sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) ++{ ++ /* ++ * Tuning is unnecessary for SDR50 and DDR50; moreover, the IPROC platform ++ * doesn't support SDR104, HS200 and Hs400 cards. So, we needn't do anything ++ * for tuning. ++ */ ++ return 0; ++} ++ ++static void ++iproc_sdhci_set_clock(struct sdhci_host *host, unsigned int clock) ++{ ++ /* ++ * WAR that IPROC SD/MMC host need to set the driver strength ++ * to TYPE_A in 3.3v DS/HS mode even if the driver strength is ++ * meaningless for 3.3V signaling. ++ */ ++ if ((host->timing == MMC_TIMING_LEGACY) || ++ (host->timing == MMC_TIMING_MMC_HS) || ++ (host->timing == MMC_TIMING_SD_HS)) { ++ host->mmc->ios.drv_type = MMC_SET_DRIVER_TYPE_A; ++ } ++ ++ sdhci_set_clock(host, clock); ++} ++ ++static struct sdhci_ops sdhci_xgs_iproc_ops = { ++#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS ++ .write_l = iproc_sdhci_writel, ++ .write_w = iproc_sdhci_writew, ++ .write_b = iproc_sdhci_writeb, ++ .read_l = iproc_sdhci_readl, ++ .read_w = iproc_sdhci_readw, ++ .read_b = iproc_sdhci_readb, ++#else ++#error The iproc SDHCI driver needs CONFIG_MMC_SDHCI_IO_ACCESSORS to be set ++#endif ++ .reset = sdhci_reset, ++ .set_bus_width = sdhci_set_bus_width, ++ .set_uhs_signaling = sdhci_set_uhs_signaling, ++ .set_clock = iproc_sdhci_set_clock, ++ .get_max_clock = iproc_sdhci_get_max_clock, ++ .get_min_clock = iproc_sdhci_get_min_clock, ++ .platform_execute_tuning = iproc_sdhci_execute_tuning, ++}; ++ ++#define IPROC_CMICD_COMPATIBLE "brcm,iproc-cmicd" ++ ++#define CMIC_SBUS_RING_MAP_0_7(base) (base + 0x10098) ++#define CMIC_SBUS_RING_MAP_8_15(base) (base + 0x1009C) ++#define CMIC_SBUS_RING_MAP_16_23(base) (base + 0x100A0) ++#define CMIC_SBUS_RING_MAP_24_31(base) (base + 0x100A4) ++#define CMIC_COMMON_SCHAN_CTRL(base) (base + 0x10000) ++#define CMIC_COMMON_SCHAN_MESSAGE0(base) (base + 0x1000C) ++#define CMIC_COMMON_SCHAN_MESSAGE1(base) (base + 0x10010) ++#define CMIC_COMMON_SCHAN_MESSAGE2(base) (base + 0x10014) ++ ++/* TOP registers */ ++#define TOP_SDIO_MISC_CONTROL 0x0207e500 ++#define TOP_SDIO_MISC_CONTROL__TOP_SDIO_8B_INF 4 ++#define TOP_SDIO_MISC_CONTROL__TOP_SDIO_GPIO_INF_SEL_R 0 ++ ++/* SDIO IDM registers */ ++#define SDIO_IDM0_IO_CONTROL_DIRECT(base) (base + 0x0) ++#define SDIO_IDM0_IO_CONTROL_DIRECT__CMD_COMFLICT_DISABLE 22 ++#define SDIO_IDM0_IO_CONTROL_DIRECT__FEEDBACK_CLK_EN 21 ++#define SDIO_IDM0_IO_CONTROL_DIRECT__clk_enable 0 ++#define SDIO_IDM0_IDM_RESET_CONTROL(base) (base + 0x3F8) ++ ++/* IPROC WRAP registers */ ++#define IPROC_WRAP_SDIO_CONTROL(base) (base + 0xb0) ++#define IPROC_WRAP_SDIO_CONTROL1(base) (base + 0xb4) ++#define IPROC_WRAP_SDIO_CONTROL2(base) (base + 0xb8) ++#define IPROC_WRAP_SDIO_CONTROL3(base) (base + 0xbc) ++#define IPROC_WRAP_SDIO_CONTROL4(base) (base + 0xc0) ++#define IPROC_WRAP_SDIO_CONTROL5(base) (base + 0xc4) ++#define IPROC_WRAP_SDIO_1P8_FAIL_CONTROL(base) (base + 0xc8) ++#define IPROC_WRAP_SDIO_1P8_FAIL_CONTROL__SDIO_VDDO_18V_FAIL_SOVW 1 ++#define IPROC_WRAP_SDIO_1P8_FAIL_CONTROL__SDIO_UHS1_18V_VREG_FAIL 0 ++ ++/* ++ * SDIO_CAPS_L ++ * ++ * Field Bit(s) ++ * =========================== ++ * DDR50 31 ++ * SDR104 30 ++ * SDR50 29 ++ * SLOTTYPE 28:27 ++ * ASYNCHIRQ 26 ++ * SYSBUS64 25 ++ * V18 24 ++ * V3 23 ++ * V33 22 ++ * SUPRSM 21 ++ * SDMA 20 ++ * HSPEED 19 ++ * ADMA2 18 ++ * EXTBUSMED 17 ++ * MAXBLK 16:15 ++ * BCLK 14:7 ++ * TOUT 6 ++ * TOUTFREQ 5:0 ++ */ ++#define SDIO_CAPS_L 0xA17f6470 ++ ++/* ++ * SDIO_CAPS_H ++ * ++ * Field Bit(s) ++ * =========================== ++ * reserved 31:20 ++ * SPIBLOCKMODE 19 ++ * SPIMODE_CAP 18 ++ * CLOCKMULT 17:10 ++ * RETUNE_MODE 9:8 ++ * USETUNE_SDR50 7 ++ * TMRCNT_RETUNE 6:3 ++ * DRVR_TYPED 2 ++ * DRVR_TYPEC 1 ++ * DRVR_TYPEA 0 ++ */ ++#define SDIO_CAPS_H 0x000C000f ++ ++/* ++ * Preset value ++ * ++ * Field Bit(s) ++ * =========================== ++ * Driver Strength 12:11 ++ * Clock Generator 10 ++ * SDCLK Frequeency 9:0 ++ */ ++ ++/* ++ * SDIO_PRESETVAL1 ++ * ++ * Field Bit(s) Description ++ * ============================================================ ++ * DDR50_PRESET 25:13 Preset Value for DDR50 ++ * DEFAULT_PRESET 12:0 Preset Value for Default Speed ++ */ ++#define SDIO_PRESETVAL1 0x01004004 ++ ++/* ++ * SDIO_PRESETVAL2 ++ * ++ * Field Bit(s) Description ++ * ============================================================ ++ * HIGH_SPEED_PRESET 25:13 Preset Value for High Speed ++ * INIT_PRESET 12:0 Preset Value for Initialization ++ */ ++#define SDIO_PRESETVAL2 0x01004100 ++ ++/* ++ * SDIO_PRESETVAL3 ++ * ++ * Field Bit(s) Description ++ * ============================================================ ++ * SDR104_PRESET 25:13 Preset Value for SDR104 ++ * SDR12_PRESET 12:0 Preset Value for SDR12 ++ */ ++#define SDIO_PRESETVAL3 0x00000004 ++ ++/* ++ * SDIO_PRESETVAL4 ++ * ++ * Field Bit(s) Description ++ * ============================================================ ++ * SDR25_PRESET 25:13 Preset Value for SDR25 ++ * SDR50_PRESET 12:0 Preset Value for SDR50 ++ */ ++#define SDIO_PRESETVAL4 0x01005001 ++ ++u32 ++cmicd_schan_read(void __iomem *base, u32 ctrl, u32 addr) { ++ u32 read = 0x0; ++ ++ writel(ctrl, CMIC_COMMON_SCHAN_MESSAGE0(base)); ++ writel(addr, CMIC_COMMON_SCHAN_MESSAGE1(base)); ++ ++ writel(0x1, CMIC_COMMON_SCHAN_CTRL(base)); ++ ++ while (read != 0x2) { ++ read = readl(CMIC_COMMON_SCHAN_CTRL(base)); ++ } ++ read = readl(CMIC_COMMON_SCHAN_MESSAGE1(base)); ++ return read; ++} ++ ++u32 ++cmicd_schan_write(void __iomem *base, u32 ctrl, u32 addr, u32 val) { ++ u32 read = 0x0; ++ ++ writel(ctrl, CMIC_COMMON_SCHAN_MESSAGE0(base)); ++ writel(addr, CMIC_COMMON_SCHAN_MESSAGE1(base)); ++ writel(val, CMIC_COMMON_SCHAN_MESSAGE2(base)); ++ ++ writel(0x1, CMIC_COMMON_SCHAN_CTRL(base)); ++ ++ while (read != 0x2) { ++ read = readl(CMIC_COMMON_SCHAN_CTRL(base)); ++ } ++ return read; ++} ++ ++static void ++cmicd_init_soc(void __iomem *base) { ++ /* Configure SBUS Ring Map for TOP, block id = 16, ring number = 4 */ ++ writel(0x11112200, CMIC_SBUS_RING_MAP_0_7(base)); ++ writel(0x00430001, CMIC_SBUS_RING_MAP_8_15(base)); ++ writel(0x00005064, CMIC_SBUS_RING_MAP_16_23(base)); ++ writel(0x00000000, CMIC_SBUS_RING_MAP_24_31(base)); ++} ++ ++static int ++iproc_top_sdio_config(void __iomem *cmicd_base) ++{ ++ u32 val; ++ ++ cmicd_init_soc(cmicd_base); ++ ++ /* Enable SDIO 8 bit mode */ ++ val = cmicd_schan_read(cmicd_base, 0x2c800200, TOP_SDIO_MISC_CONTROL); ++ if ((val & 0x1f) != 0x1f) { ++ val |= (0x1 << TOP_SDIO_MISC_CONTROL__TOP_SDIO_8B_INF); ++ val |= (0xf << TOP_SDIO_MISC_CONTROL__TOP_SDIO_GPIO_INF_SEL_R); ++ cmicd_schan_write(cmicd_base, 0x34800200, TOP_SDIO_MISC_CONTROL, val); ++ } ++ ++ return 0; ++} ++ ++static int ++iproc_sdio_init(struct xgs_iproc_sdhci_host *iproc_host) ++{ ++ int ret = 0; ++ u32 val; ++ ++ /* Enable SDIO for SDIO/GPIO selection */ ++ ret = iproc_top_sdio_config(iproc_host->cmicd_base); ++ if (ret < 0) { ++ return ret; ++ } ++ ++ /* Release reset */ ++ writel(0x1, SDIO_IDM0_IDM_RESET_CONTROL(iproc_host->idm_base)); ++ udelay(1000); ++ writel(0x0, SDIO_IDM0_IDM_RESET_CONTROL(iproc_host->idm_base)); ++ ++ /* Enable the SDIO clock */ ++ val = readl(SDIO_IDM0_IO_CONTROL_DIRECT(iproc_host->idm_base)); ++ val |= (1 << SDIO_IDM0_IO_CONTROL_DIRECT__CMD_COMFLICT_DISABLE); ++ val |= (1 << SDIO_IDM0_IO_CONTROL_DIRECT__FEEDBACK_CLK_EN); ++ val |= (1 << SDIO_IDM0_IO_CONTROL_DIRECT__clk_enable); ++ writel(val, SDIO_IDM0_IO_CONTROL_DIRECT(iproc_host->idm_base)); ++ ++ /* Set the 1.8v fail control for HR3. ++ * This setting will not impact the uboot SD/MMC driver, since uboot doesn't ++ * support 1.8v. The 1.8v SDIO will be supportted in Kernel. ++ */ ++ val = readl(IPROC_WRAP_SDIO_1P8_FAIL_CONTROL(iproc_host->wrap_base)); ++ val |= (1 << IPROC_WRAP_SDIO_1P8_FAIL_CONTROL__SDIO_VDDO_18V_FAIL_SOVW); ++ val &= ~(1 << IPROC_WRAP_SDIO_1P8_FAIL_CONTROL__SDIO_UHS1_18V_VREG_FAIL); ++ writel(val, IPROC_WRAP_SDIO_1P8_FAIL_CONTROL(iproc_host->wrap_base)); ++ ++ /* ++ * Configure SDIO host controller capabilities ++ * (common setting for all SDIO controllers) ++ */ ++ writel(SDIO_CAPS_H, IPROC_WRAP_SDIO_CONTROL(iproc_host->wrap_base)); ++ writel(SDIO_CAPS_L, IPROC_WRAP_SDIO_CONTROL1(iproc_host->wrap_base)); ++ ++ /* ++ * Configure SDIO host controller preset values ++ * (common setting for all SDIO controllers) ++ */ ++ writel(SDIO_PRESETVAL1, IPROC_WRAP_SDIO_CONTROL2(iproc_host->wrap_base)); ++ writel(SDIO_PRESETVAL2, IPROC_WRAP_SDIO_CONTROL3(iproc_host->wrap_base)); ++ writel(SDIO_PRESETVAL3, IPROC_WRAP_SDIO_CONTROL4(iproc_host->wrap_base)); ++ writel(SDIO_PRESETVAL4, IPROC_WRAP_SDIO_CONTROL5(iproc_host->wrap_base)); ++ ++ return 0; ++} ++ ++static int ++sdhci_xgs_iproc_probe(struct platform_device *pdev) ++{ ++ struct xgs_iproc_sdhci_host *iproc_host; ++ struct sdhci_host *host; ++ struct sdhci_xgs_iproc_data *data; ++ struct device_node *np = pdev->dev.of_node; ++ int ret = 0; ++ ++ /* allocate SDHCI host + platform data memory */ ++ host = sdhci_alloc_host(&pdev->dev, sizeof(struct sdhci_xgs_iproc_data)); ++ if (IS_ERR(host)) { ++ printk(KERN_ERR "SDIO%d: Unable to allocate SDHCI host\n", pdev->id); ++ return PTR_ERR(host); ++ } ++ ++ iproc_host = (struct xgs_iproc_sdhci_host *)host; ++ ++ /* set up data structure */ ++ data = sdhci_priv(host); ++ data->host = host; ++ data->host_num = pdev->id; ++ host->hw_name = "IPROC-SDIO"; ++ host->ops = &sdhci_xgs_iproc_ops; ++ host->mmc->caps = MMC_CAP_8_BIT_DATA; ++ host->quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | ++ SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12; ++ host->irq = (unsigned int)irq_of_parse_and_map(np, 0); ++ host->ioaddr = (void *)of_iomap(np, 0); ++ if (!host->ioaddr) { ++ printk(KERN_ERR "SDIO%d: Unable to iomap SDIO registers\n", pdev->id); ++ ret = -ENXIO; ++ goto err_free_host; ++ } ++ ++ iproc_host->idm_base = of_iomap(np, 1); ++ if (!iproc_host->idm_base) { ++ printk(KERN_ERR "Unable to iomap SDIO IDM base address\n"); ++ ret = -ENXIO; ++ goto err_iounmap; ++ } ++ ++ np = of_find_compatible_node(NULL, NULL, IPROC_CMICD_COMPATIBLE); ++ if (!np) { ++ printk(KERN_ERR "Failed to find IPROC_CMICD defined in DT\n"); ++ ret = -ENODEV; ++ goto err_iounmap; ++ } ++ ++ iproc_host->cmicd_base = of_iomap(np, 0); ++ if (!iproc_host->cmicd_base) { ++ printk(KERN_ERR "Unable to iomap IPROC CMICD base address\n"); ++ ret = -ENXIO; ++ goto err_iounmap; ++ } ++ ++ iproc_host->wrap_base = get_iproc_wrap_ctrl_base(); ++ if (!iproc_host->wrap_base) { ++ printk(KERN_ERR "Unable to get IPROC WRAP base address\n"); ++ ret = -ENXIO; ++ goto err_iounmap; ++ } ++ ++ ret = iproc_sdio_init(iproc_host); ++ if (ret < 0) { ++ printk(KERN_ERR "SDIO%d: SDIO initial failed\n", pdev->id); ++ ret = -ENXIO; ++ goto err_iounmap; ++ } ++ ++ platform_set_drvdata(pdev, data); ++ ++ ret = sdhci_add_host(host); ++ if (ret) { ++ printk(KERN_ERR "SDIO%d: Failed to add SDHCI host\n", pdev->id); ++ goto err_iounmap; ++ } ++ ++ return ret; ++ ++err_iounmap: ++ if (iproc_host->idm_base) ++ iounmap(iproc_host->idm_base); ++ if (iproc_host->cmicd_base) ++ iounmap(iproc_host->cmicd_base); ++ if (host->ioaddr) ++ iounmap(host->ioaddr); ++ ++err_free_host: ++ sdhci_free_host(host); ++ ++ return ret; ++} ++ ++static int __exit ++sdhci_xgs_iproc_remove(struct platform_device *pdev) ++{ ++ struct sdhci_xgs_iproc_data *data = platform_get_drvdata(pdev); ++ struct sdhci_host *host = data->host; ++ struct xgs_iproc_sdhci_host *iproc_host = (struct xgs_iproc_sdhci_host *)host; ++ ++ sdhci_remove_host(host, 0); ++ platform_set_drvdata(pdev, NULL); ++ ++ if (iproc_host->idm_base) ++ iounmap(iproc_host->idm_base); ++ if (iproc_host->cmicd_base) ++ iounmap(iproc_host->cmicd_base); ++ if (host->ioaddr) ++ iounmap(host->ioaddr); ++ ++ sdhci_free_host(host); ++ release_mem_region(pdev->resource[0].start, ++ pdev->resource[0].end - pdev->resource[0].start + 1); ++ return 0; ++} ++ ++#ifdef CONFIG_PM ++static int ++sdhci_xgs_iproc_suspend(struct platform_device *pdev, pm_message_t state) ++{ ++ int ret = 0; ++ struct sdhci_xgs_iproc_data *data = platform_get_drvdata(pdev); ++ ++ ret = sdhci_suspend_host(data->host); ++ if (ret < 0) { ++ printk("%s: %d\n", __FILE__, __LINE__); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static int ++sdhci_xgs_iproc_resume(struct platform_device *pdev) ++{ ++ int ret = 0; ++ struct sdhci_xgs_iproc_data *data = platform_get_drvdata(pdev); ++ ++ ret = sdhci_resume_host(data->host); ++ if (ret < 0) { ++ printk("%s: %d\n", __FILE__, __LINE__); ++ return ret; ++ } ++ return 0; ++} ++#else /* CONFIG_PM */ ++ ++#define sdhci_xgs_iproc_suspend NULL ++#define sdhci_xgs_iproc_resume NULL ++ ++#endif /* CONFIG_PM */ ++ ++ ++static const struct of_device_id brcm_iproc_hr3_dt_ids[] = { ++ { .compatible = "brcm,iproc-hr3-sdio"}, ++ { } ++}; ++MODULE_DEVICE_TABLE(of, brcm_iproc_dt_ids); ++ ++static struct platform_driver sdhci_xgs_iproc_driver = { ++ .probe = sdhci_xgs_iproc_probe, ++ .remove = __exit_p(sdhci_xgs_iproc_remove), ++ .suspend = sdhci_xgs_iproc_suspend, ++ .resume = sdhci_xgs_iproc_resume, ++ .driver = { ++ .name = "iproc-hr3-sdio", ++ .owner = THIS_MODULE, ++ .of_match_table = of_match_ptr(brcm_iproc_hr3_dt_ids), ++ }, ++}; ++ ++module_platform_driver(sdhci_xgs_iproc_driver); ++ ++MODULE_AUTHOR("Broadcom"); ++MODULE_DESCRIPTION("SDHCI XGS HR3 driver"); ++MODULE_LICENSE("GPL"); +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/mmc/host/sdhci-xgs-iproc.c b/drivers/mmc/host/sdhci-xgs-iproc.c +--- a/drivers/mmc/host/sdhci-xgs-iproc.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/mmc/host/sdhci-xgs-iproc.c 2017-11-09 17:53:42.564288000 +0800 +@@ -0,0 +1,311 @@ ++/* ++ * drivers/mmc/host/sdhci-iproc.c - Broadcom IPROC SDHCI Platform driver ++ * ++ * Copyright (C) 2014-2016, Broadcom Corporation. All Rights Reserved. ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 and ++ * only version 2 as published by the Free Software Foundation. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "sdhci.h" ++ ++struct sdhci_xgs_iproc_data { ++ struct sdhci_host *host; ++ struct clk *clk; ++ unsigned host_num; ++}; ++ ++struct xgs_iproc_sdhci_host { ++ struct sdhci_host host; ++ u32 shadow_cmd; ++ u32 shadow_blk; ++}; ++ ++static inline void ++iproc_sdhci_writel(struct sdhci_host *host, u32 val, int reg) ++{ ++ writel(val, host->ioaddr + reg); ++} ++ ++static inline u32 ++iproc_sdhci_readl(struct sdhci_host *host, int reg) ++{ ++ return readl(host->ioaddr + reg); ++} ++ ++static void ++iproc_sdhci_writew(struct sdhci_host *host, u16 val, int reg) ++{ ++ struct xgs_iproc_sdhci_host *iproc_host = (struct xgs_iproc_sdhci_host *)host; ++ u32 oldval, newval; ++ u32 word_num = (reg >> 1) & 1; ++ u32 word_shift = word_num * 16; ++ u32 mask = 0xffff << word_shift; ++ ++ if (reg == SDHCI_COMMAND) { ++ if (iproc_host->shadow_blk != 0) { ++ iproc_sdhci_writel(host, iproc_host->shadow_blk, SDHCI_BLOCK_SIZE); ++ iproc_host->shadow_blk = 0; ++ } ++ oldval = iproc_host->shadow_cmd; ++ } else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) { ++ oldval = iproc_host->shadow_blk; ++ } else { ++ oldval = iproc_sdhci_readl(host, reg & ~3); ++ } ++ newval = (oldval & ~mask) | (val << word_shift); ++ ++ if (reg == SDHCI_TRANSFER_MODE) { ++ iproc_host->shadow_cmd = newval; ++ } else if (reg == SDHCI_BLOCK_SIZE || reg == SDHCI_BLOCK_COUNT) { ++ iproc_host->shadow_blk = newval; ++ } else { ++ iproc_sdhci_writel(host, newval, reg & ~3); ++ } ++} ++ ++static u16 ++iproc_sdhci_readw(struct sdhci_host *host, int reg) ++{ ++ u32 val, word; ++ u32 word_num = (reg >> 1) & 1; ++ u32 word_shift = word_num * 16; ++ ++ val = iproc_sdhci_readl(host, (reg & ~3)); ++ word = (val >> word_shift) & 0xffff; ++ return word; ++} ++ ++ ++static void ++iproc_sdhci_writeb(struct sdhci_host *host, u8 val, int reg) ++{ ++ u32 oldval, newval; ++ u32 byte_num = reg & 3; ++ u32 byte_shift = byte_num * 8; ++ u32 mask = 0xff << byte_shift; ++ ++ oldval = iproc_sdhci_readl(host, reg & ~3); ++ newval = (oldval & ~mask) | (val << byte_shift); ++ ++ iproc_sdhci_writel(host, newval, reg & ~3); ++} ++ ++static u8 ++iproc_sdhci_readb(struct sdhci_host *host, int reg) ++{ ++ u32 val, byte; ++ u32 byte_num = reg & 3; ++ u32 byte_shift = byte_num * 8; ++ ++ val = iproc_sdhci_readl(host, (reg & ~3)); ++ byte = (val >> byte_shift) & 0xff; ++ return byte; ++} ++ ++static u32 ++iproc_sdhci_get_max_clock(struct sdhci_host *host) ++{ ++ unsigned long max_clock; ++ ++ max_clock = (host->caps & SDHCI_CLOCK_V3_BASE_MASK) ++ >> SDHCI_CLOCK_BASE_SHIFT; ++ max_clock *= 1000000; ++ ++ return max_clock; ++} ++ ++static u32 ++iproc_sdhci_get_min_clock(struct sdhci_host *host) ++{ ++ return (host->max_clk / SDHCI_MAX_DIV_SPEC_300); ++} ++ ++static int ++iproc_sdhci_execute_tuning(struct sdhci_host *host, u32 opcode) ++{ ++ /* ++ * Tuning is unnecessary for SDR50 and DDR50; moreover, the IPROC platform ++ * doesn't support SDR104, HS200 and Hs400 cards. So, we needn't do anything ++ * for tuning. ++ */ ++ return 0; ++} ++ ++static void ++iproc_sdhci_set_clock(struct sdhci_host *host, unsigned int clock) ++{ ++ /* ++ * WAR that IPROC SD/MMC host need to set the driver strength ++ * to TYPE_A in 3.3v DS/HS mode even if the driver strength is ++ * meaningless for 3.3V signaling. ++ */ ++ if ((host->timing == MMC_TIMING_LEGACY) || ++ (host->timing == MMC_TIMING_MMC_HS) || ++ (host->timing == MMC_TIMING_SD_HS)) { ++ host->mmc->ios.drv_type = MMC_SET_DRIVER_TYPE_A; ++ } ++ ++ sdhci_set_clock(host, clock); ++} ++ ++static struct sdhci_ops sdhci_xgs_iproc_ops = { ++#ifdef CONFIG_MMC_SDHCI_IO_ACCESSORS ++ .write_l = iproc_sdhci_writel, ++ .write_w = iproc_sdhci_writew, ++ .write_b = iproc_sdhci_writeb, ++ .read_l = iproc_sdhci_readl, ++ .read_w = iproc_sdhci_readw, ++ .read_b = iproc_sdhci_readb, ++#else ++#error The iproc SDHCI driver needs CONFIG_MMC_SDHCI_IO_ACCESSORS to be set ++#endif ++ .reset = sdhci_reset, ++ .set_bus_width = sdhci_set_bus_width, ++ .set_uhs_signaling = sdhci_set_uhs_signaling, ++ .set_clock = iproc_sdhci_set_clock, ++ .get_max_clock = iproc_sdhci_get_max_clock, ++ .get_min_clock = iproc_sdhci_get_min_clock, ++ .platform_execute_tuning = iproc_sdhci_execute_tuning, ++}; ++ ++static int ++sdhci_xgs_iproc_probe(struct platform_device *pdev) ++{ ++ struct sdhci_host *host; ++ struct sdhci_xgs_iproc_data *data; ++ struct device_node *np = pdev->dev.of_node; ++ int ret = 0; ++ ++ /* allocate SDHCI host + platform data memory */ ++ host = sdhci_alloc_host(&pdev->dev, sizeof(struct sdhci_xgs_iproc_data)); ++ if (IS_ERR(host)) { ++ printk(KERN_ERR "SDIO%d: Unable to allocate SDHCI host\n", pdev->id); ++ return PTR_ERR(host); ++ } ++ ++ /* set up data structure */ ++ data = sdhci_priv(host); ++ data->host = host; ++ data->host_num = pdev->id; ++ host->hw_name = "IPROC-SDIO"; ++ host->ops = &sdhci_xgs_iproc_ops; ++ host->mmc->caps = MMC_CAP_8_BIT_DATA; ++ host->quirks = SDHCI_QUIRK_DATA_TIMEOUT_USES_SDCLK | ++ SDHCI_QUIRK_MULTIBLOCK_READ_ACMD12; ++ ++ host->irq = (unsigned int)irq_of_parse_and_map(np, 0); ++ host->ioaddr = (void *)of_iomap(np, 0); ++ if (!host->ioaddr) { ++ printk(KERN_ERR "SDIO%d: Unable to iomap SDIO registers\n", pdev->id); ++ ret = -ENXIO; ++ goto err_free_host; ++ } ++ ++ platform_set_drvdata(pdev, data); ++ ++ ret = sdhci_add_host(host); ++ if (ret) { ++ printk(KERN_ERR "SDIO%d: Failed to add SDHCI host\n", pdev->id); ++ goto err_iounmap; ++ } ++ ++ return ret; ++ ++err_iounmap: ++ iounmap(host->ioaddr); ++ ++err_free_host: ++ sdhci_free_host(host); ++ ++ return ret; ++} ++ ++static int __exit ++sdhci_xgs_iproc_remove(struct platform_device *pdev) ++{ ++ struct sdhci_xgs_iproc_data *data = platform_get_drvdata(pdev); ++ struct sdhci_host *host = data->host; ++ ++ sdhci_remove_host(host, 0); ++ platform_set_drvdata(pdev, NULL); ++ iounmap(host->ioaddr); ++ sdhci_free_host(host); ++ release_mem_region(pdev->resource[0].start, ++ pdev->resource[0].end - pdev->resource[0].start + 1); ++ return 0; ++} ++ ++#ifdef CONFIG_PM ++static int ++sdhci_xgs_iproc_suspend(struct platform_device *pdev, pm_message_t state) ++{ ++ int ret = 0; ++ struct sdhci_xgs_iproc_data *data = platform_get_drvdata(pdev); ++ ++ ret = sdhci_suspend_host(data->host); ++ if (ret < 0) { ++ printk("%s: %d\n", __FILE__, __LINE__); ++ return ret; ++ } ++ ++ return 0; ++} ++ ++static int ++sdhci_xgs_iproc_resume(struct platform_device *pdev) ++{ ++ int ret = 0; ++ struct sdhci_xgs_iproc_data *data = platform_get_drvdata(pdev); ++ ++ ret = sdhci_resume_host(data->host); ++ if (ret < 0) { ++ printk("%s: %d\n", __FILE__, __LINE__); ++ return ret; ++ } ++ return 0; ++} ++#else /* CONFIG_PM */ ++ ++#define sdhci_xgs_iproc_suspend NULL ++#define sdhci_xgs_iproc_resume NULL ++ ++#endif /* CONFIG_PM */ ++ ++ ++static const struct of_device_id brcm_iproc_xgs_dt_ids[] = { ++ { .compatible = "brcm,iproc-xgs-sdio"}, ++ { } ++}; ++MODULE_DEVICE_TABLE(of, brcm_iproc_dt_ids); ++ ++static struct platform_driver sdhci_xgs_iproc_driver = { ++ .probe = sdhci_xgs_iproc_probe, ++ .remove = __exit_p(sdhci_xgs_iproc_remove), ++ .suspend = sdhci_xgs_iproc_suspend, ++ .resume = sdhci_xgs_iproc_resume, ++ .driver = { ++ .name = "iproc-xgs-sdio", ++ .owner = THIS_MODULE, ++ .of_match_table = of_match_ptr(brcm_iproc_xgs_dt_ids), ++ }, ++}; ++ ++module_platform_driver(sdhci_xgs_iproc_driver); ++ ++MODULE_AUTHOR("Broadcom"); ++MODULE_DESCRIPTION("SDHCI XGS IPROC driver"); ++MODULE_LICENSE("GPL"); +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/mtd/Makefile b/drivers/mtd/Makefile +--- a/drivers/mtd/Makefile 2016-12-16 00:49:34.000000000 +0800 ++++ b/drivers/mtd/Makefile 2017-11-09 17:53:42.613320000 +0800 +@@ -33,4 +33,5 @@ inftl-objs := inftlcore.o inftlmount.o + obj-y += chips/ lpddr/ maps/ devices/ nand/ onenand/ tests/ + + obj-$(CONFIG_MTD_SPI_NOR) += spi-nor/ ++obj-$(CONFIG_MTD_SPI_NOR_IPROC) += spi-nor/ + obj-$(CONFIG_MTD_UBI) += ubi/ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/mtd/devices/Kconfig b/drivers/mtd/devices/Kconfig +--- a/drivers/mtd/devices/Kconfig 2016-12-16 00:49:34.000000000 +0800 ++++ b/drivers/mtd/devices/Kconfig 2017-11-09 17:53:42.646279000 +0800 +@@ -95,6 +95,12 @@ config MTD_M25P80 + if you want to specify device partitioning or to use a device which + doesn't support the JEDEC ID instruction. + ++config MTD_M25P80_IPROC ++ tristate "M25P80 modified for BRCM iProc" ++ depends on SPI_MASTER && MTD_SPI_NOR_IPROC ++ help ++ This enables access to most modern SPI flash chips for BRCM iProc QSPI controller ++ + config MTD_SPEAR_SMI + tristate "SPEAR MTD NOR Support through SMI controller" + depends on PLAT_SPEAR +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/mtd/devices/Makefile b/drivers/mtd/devices/Makefile +--- a/drivers/mtd/devices/Makefile 2016-12-16 00:49:34.000000000 +0800 ++++ b/drivers/mtd/devices/Makefile 2017-11-09 17:53:42.647286000 +0800 +@@ -12,6 +12,7 @@ obj-$(CONFIG_MTD_LART) += lart.o + obj-$(CONFIG_MTD_BLOCK2MTD) += block2mtd.o + obj-$(CONFIG_MTD_DATAFLASH) += mtd_dataflash.o + obj-$(CONFIG_MTD_M25P80) += m25p80.o ++obj-$(CONFIG_MTD_M25P80_IPROC) += m25p80-iproc.o + obj-$(CONFIG_MTD_SPEAR_SMI) += spear_smi.o + obj-$(CONFIG_MTD_SST25L) += sst25l.o + obj-$(CONFIG_MTD_BCM47XXSFLASH) += bcm47xxsflash.o +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/mtd/devices/m25p80-iproc.c b/drivers/mtd/devices/m25p80-iproc.c +--- a/drivers/mtd/devices/m25p80-iproc.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/mtd/devices/m25p80-iproc.c 2017-11-09 17:53:42.661287000 +0800 +@@ -0,0 +1,328 @@ ++/* ++ * MTD SPI driver for ST M25Pxx (and similar) serial flash chips based ++ * on m25p80.c with BRCM iProc patch ++ * ++ * This code is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#include ++#include ++#include ++ ++#define MAX_CMD_SIZE 6 ++struct m25p { ++ struct spi_device *spi; ++ struct spi_nor spi_nor; ++ u8 command[MAX_CMD_SIZE]; ++}; ++ ++static int m25p80_read_reg(struct spi_nor *nor, u8 code, u8 *val, int len) ++{ ++ struct m25p *flash = nor->priv; ++ struct spi_device *spi = flash->spi; ++ int ret; ++ ++ ret = spi_write_then_read(spi, &code, 1, val, len); ++ if (ret < 0) ++ dev_err(&spi->dev, "error %d reading %x\n", ret, code); ++ ++ return ret; ++} ++ ++static void m25p_addr2cmd(struct spi_nor *nor, unsigned int addr, unsigned int len, u8 *cmd) ++{ ++ u16 addr_width = nor->addr_width; ++ ++#ifdef CONFIG_M25PXX_STAY_IN_3BYTE_MODE ++ /* Use 4-byte mode only if (address + len) is > 16MB */ ++ if (addr + len > 0x1000000) { ++ addr_width = 4; ++ } ++#endif /* CONFIG_M25PXX_STAY_IN_3BYTE_MODE */ ++ ++ /* opcode is in cmd[0] */ ++ cmd[1] = addr >> (addr_width * 8 - 8); ++ cmd[2] = addr >> (addr_width * 8 - 16); ++ cmd[3] = addr >> (addr_width * 8 - 24); ++ cmd[4] = addr >> (addr_width * 8 - 32); ++} ++ ++static int m25p_cmdsz(struct spi_nor *nor, unsigned int addr) ++{ ++#ifdef CONFIG_M25PXX_STAY_IN_3BYTE_MODE ++ /* Use 4-byte mode only if the address is >= 16MB */ ++ if (addr >= 0x1000000) { ++ return 1 + 4; ++ } ++#endif /* CONFIG_M25PXX_STAY_IN_3BYTE_MODE */ ++ return 1 + nor->addr_width; ++} ++ ++static int m25p80_write_reg(struct spi_nor *nor, u8 opcode, u8 *buf, int len) ++{ ++ struct m25p *flash = nor->priv; ++ struct spi_device *spi = flash->spi; ++ ++ flash->command[0] = opcode; ++ if (buf) ++ memcpy(&flash->command[1], buf, len); ++ ++ return spi_write(spi, flash->command, len + 1); ++} ++ ++static void m25p80_write(struct spi_nor *nor, loff_t to, size_t len, ++ size_t *retlen, const u_char *buf) ++{ ++ struct m25p *flash = nor->priv; ++ struct spi_device *spi = flash->spi; ++ struct spi_transfer t[2] = {}; ++ struct spi_message m; ++ int cmd_sz = m25p_cmdsz(nor, to); ++ ++ spi_message_init(&m); ++ ++ if (nor->program_opcode == SPINOR_OP_AAI_WP && nor->sst_write_second) ++ cmd_sz = 1; ++ ++ flash->command[0] = nor->program_opcode; ++ m25p_addr2cmd(nor, to, 1, flash->command); ++ ++ t[0].tx_buf = flash->command; ++ t[0].len = cmd_sz; ++ spi_message_add_tail(&t[0], &m); ++ ++ t[1].tx_buf = buf; ++ t[1].len = len; ++ spi_message_add_tail(&t[1], &m); ++ ++ spi_sync(spi, &m); ++ ++ *retlen += m.actual_length - cmd_sz; ++} ++ ++static inline unsigned int m25p80_rx_nbits(struct spi_nor *nor) ++{ ++ switch (nor->flash_read) { ++ case SPI_NOR_DUAL: ++ return 2; ++ case SPI_NOR_QUAD: ++ return 4; ++ default: ++ return 0; ++ } ++} ++ ++/* ++ * Read an address range from the nor chip. The address range ++ * may be any size provided it is within the physical boundaries. ++ */ ++static int m25p80_read(struct spi_nor *nor, loff_t from, size_t len, ++ size_t *retlen, u_char *buf) ++{ ++ struct m25p *flash = nor->priv; ++ struct spi_device *spi = flash->spi; ++ struct spi_transfer t[2]; ++ struct spi_message m; ++ unsigned int dummy = nor->read_dummy; ++ ++ /* convert the dummy cycles to the number of bytes */ ++ dummy /= 8; ++ ++ spi_message_init(&m); ++ memset(t, 0, (sizeof t)); ++ ++ flash->command[0] = nor->read_opcode; ++ m25p_addr2cmd(nor, from, len, flash->command); ++ ++ t[0].tx_buf = flash->command; ++ t[0].len = m25p_cmdsz(nor, from + len - 1) + dummy; ++ spi_message_add_tail(&t[0], &m); ++ ++ t[1].rx_buf = buf; ++ t[1].rx_nbits = m25p80_rx_nbits(nor); ++ t[1].len = len; ++ spi_message_add_tail(&t[1], &m); ++ ++ spi_sync(spi, &m); ++ ++ *retlen = m.actual_length - m25p_cmdsz(nor, from + len - 1) - dummy; ++ return 0; ++} ++ ++static int m25p80_erase(struct spi_nor *nor, loff_t offset) ++{ ++ struct m25p *flash = nor->priv; ++ ++ dev_dbg(nor->dev, "%dKiB at 0x%08x\n", ++ flash->spi_nor.mtd.erasesize / 1024, (u32)offset); ++ ++ /* Set up command buffer. */ ++ flash->command[0] = nor->erase_opcode; ++ m25p_addr2cmd(nor, offset, 1, flash->command); ++ ++ spi_write(flash->spi, flash->command, m25p_cmdsz(nor, offset)); ++ ++ return 0; ++} ++ ++/* ++ * board specific setup should have ensured the SPI clock used here ++ * matches what the READ command supports, at least until this driver ++ * understands FAST_READ (for clocks over 25 MHz). ++ */ ++static int m25p_probe(struct spi_device *spi) ++{ ++ struct mtd_part_parser_data ppdata; ++ struct flash_platform_data *data; ++ struct m25p *flash; ++ struct spi_nor *nor; ++ enum read_mode mode = SPI_NOR_NORMAL; ++ char *flash_name = NULL; ++ int ret; ++ ++ data = dev_get_platdata(&spi->dev); ++ ++ flash = devm_kzalloc(&spi->dev, sizeof(*flash), GFP_KERNEL); ++ if (!flash) ++ return -ENOMEM; ++ ++ nor = &flash->spi_nor; ++ ++ /* install the hooks */ ++ nor->read = m25p80_read; ++ nor->write = m25p80_write; ++ nor->erase = m25p80_erase; ++ nor->write_reg = m25p80_write_reg; ++ nor->read_reg = m25p80_read_reg; ++ ++ nor->dev = &spi->dev; ++ nor->flash_node = spi->dev.of_node; ++ nor->priv = flash; ++ ++ spi_set_drvdata(spi, flash); ++ flash->spi = spi; ++ ++ if (spi->mode & SPI_RX_QUAD) ++ mode = SPI_NOR_QUAD; ++ else if (spi->mode & SPI_RX_DUAL) ++ mode = SPI_NOR_DUAL; ++ ++ if (data && data->name) ++ nor->mtd.name = data->name; ++ ++ /* For some (historical?) reason many platforms provide two different ++ * names in flash_platform_data: "name" and "type". Quite often name is ++ * set to "m25p80" and then "type" provides a real chip name. ++ * If that's the case, respect "type" and ignore a "name". ++ */ ++ if (data && data->type) ++ flash_name = data->type; ++ else ++ flash_name = spi->modalias; ++ ++ ret = spi_nor_scan(nor, flash_name, mode); ++ if (ret) ++ return ret; ++ ++ ppdata.of_node = spi->dev.of_node; ++ ++ return mtd_device_parse_register(&nor->mtd, NULL, &ppdata, ++ data ? data->parts : NULL, ++ data ? data->nr_parts : 0); ++} ++ ++ ++static int m25p_remove(struct spi_device *spi) ++{ ++ struct m25p *flash = spi_get_drvdata(spi); ++ ++ /* Clean up MTD stuff. */ ++ return mtd_device_unregister(&flash->spi_nor.mtd); ++} ++ ++/* ++ * Do NOT add to this array without reading the following: ++ * ++ * Historically, many flash devices are bound to this driver by their name. But ++ * since most of these flash are compatible to some extent, and their ++ * differences can often be differentiated by the JEDEC read-ID command, we ++ * encourage new users to add support to the spi-nor library, and simply bind ++ * against a generic string here (e.g., "jedec,spi-nor"). ++ * ++ * Many flash names are kept here in this list (as well as in spi-nor.c) to ++ * keep them available as module aliases for existing platforms. ++ */ ++static const struct spi_device_id m25p_ids[] = { ++ /* ++ * Entries not used in DTs that should be safe to drop after replacing ++ * them with "nor-jedec" in platform data. ++ */ ++ {"s25sl064a"}, {"w25x16"}, {"m25p10"}, {"m25px64"}, ++ ++ /* ++ * Entries that were used in DTs without "nor-jedec" fallback and should ++ * be kept for backward compatibility. ++ */ ++ {"at25df321a"}, {"at25df641"}, {"at26df081a"}, ++ {"mr25h256"}, ++ {"mx25l4005a"}, {"mx25l1606e"}, {"mx25l6405d"}, {"mx25l12805d"}, ++ {"mx25l25635e"},{"mx66l51235l"}, ++ {"n25q064"}, {"n25q128a11"}, {"n25q128a13"}, {"n25q512a"}, ++ {"s25fl256s1"}, {"s25fl512s"}, {"s25sl12801"}, {"s25fl008k"}, ++ {"s25fl064k"}, ++ {"sst25vf040b"},{"sst25vf016b"},{"sst25vf032b"},{"sst25wf040"}, ++ {"m25p40"}, {"m25p80"}, {"m25p16"}, {"m25p32"}, ++ {"m25p64"}, {"m25p128"}, ++ {"w25x80"}, {"w25x32"}, {"w25q32"}, {"w25q32dw"}, ++ {"w25q80bl"}, {"w25q128"}, {"w25q256"}, ++ ++ /* Flashes that can't be detected using JEDEC */ ++ {"m25p05-nonjedec"}, {"m25p10-nonjedec"}, {"m25p20-nonjedec"}, ++ {"m25p40-nonjedec"}, {"m25p80-nonjedec"}, {"m25p16-nonjedec"}, ++ {"m25p32-nonjedec"}, {"m25p64-nonjedec"}, {"m25p128-nonjedec"}, ++ ++ { }, ++}; ++MODULE_DEVICE_TABLE(spi, m25p_ids); ++ ++static const struct of_device_id m25p_of_table[] = { ++ /* ++ * Generic compatibility for SPI NOR that can be identified by the ++ * JEDEC READ ID opcode (0x9F). Use this, if possible. ++ */ ++ { .compatible = "jedec,spi-nor" }, ++ {} ++}; ++MODULE_DEVICE_TABLE(of, m25p_of_table); ++ ++static struct spi_driver m25p80_driver = { ++ .driver = { ++ .name = "m25p80", ++ .of_match_table = m25p_of_table, ++ }, ++ .id_table = m25p_ids, ++ .probe = m25p_probe, ++ .remove = m25p_remove, ++ ++ /* REVISIT: many of these chips have deep power-down modes, which ++ * should clearly be entered on suspend() to minimize power use. ++ * And also when they're otherwise idle... ++ */ ++}; ++ ++module_spi_driver(m25p80_driver); ++ ++MODULE_LICENSE("GPL"); ++MODULE_AUTHOR("Mike Lavender"); ++MODULE_DESCRIPTION("MTD SPI driver for ST M25Pxx flash chips"); +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig +--- a/drivers/mtd/maps/Kconfig 2016-12-16 00:49:34.000000000 +0800 ++++ b/drivers/mtd/maps/Kconfig 2017-11-09 17:53:42.700292000 +0800 +@@ -97,6 +97,15 @@ config MSP_FLASH_MAP_LIMIT + default "0x02000000" + depends on MSP_FLASH_MAP_LIMIT_32M + ++config MTD_NOR_XGS_IPROC ++ bool "Broadcom XGS iProc CFI NOR support" ++ depends on (ARCH_XGS_IPROC || COMPILE_TEST) && MTD_CFI ++ default n ++ help ++ This selects a driver for the iProc NOR support. ++ ++ If unsure, say N. ++ + config MTD_SUN_UFLASH + tristate "Sun Microsystems userflash support" + depends on SPARC && MTD_CFI && PCI +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/mtd/maps/Makefile b/drivers/mtd/maps/Makefile +--- a/drivers/mtd/maps/Makefile 2016-12-16 00:49:34.000000000 +0800 ++++ b/drivers/mtd/maps/Makefile 2017-11-09 17:53:42.701291000 +0800 +@@ -43,3 +43,4 @@ obj-$(CONFIG_MTD_VMU) += vmu-flash.o + obj-$(CONFIG_MTD_GPIO_ADDR) += gpio-addr-flash.o + obj-$(CONFIG_MTD_LATCH_ADDR) += latch-addr-flash.o + obj-$(CONFIG_MTD_LANTIQ) += lantiq-flash.o ++obj-$(CONFIG_MTD_NOR_XGS_IPROC) += xgs-iproc-flash.o +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/mtd/maps/xgs-iproc-flash.c b/drivers/mtd/maps/xgs-iproc-flash.c +--- a/drivers/mtd/maps/xgs-iproc-flash.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/mtd/maps/xgs-iproc-flash.c 2017-11-09 17:53:42.757297000 +0800 +@@ -0,0 +1,229 @@ ++/* ++ * $Copyright Open Broadcom Corporation$ ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++ ++#define IPROC_NOR_COMPATIBLE "brcm,iproc-nor" ++extern void __iomem *get_iproc_dmu_pcu_base(void); ++ ++/* HR2 */ ++#define PNOR_NAND_SEL_REG_OVERRIDE_HR2 2 ++#define PNOR_NAND_SEL_REG_PNOR_SEL_HR2 3 ++ ++/* GH/HR3 */ ++#define PNOR_NAND_SEL_REG_OVERRIDE_GH 0 ++#define PNOR_NAND_SEL_REG_PNOR_SEL_GH 1 ++#define PNOR_DIRECT_CMD_OFFSET 0x10 ++#define PNOR_SET_OPMODE_OFFSET 0X18 ++ ++ ++#define IPROC_STRAP_BOOT_DEV_NAND 1 ++#define IPROC_STRAP_BOOT_DEV_PNOR 4 ++#define ICFG_PNOR_STRAPS__PNOR_SRAM_MW_R 0 ++#define PNOR_set_opmode__set_mw_R 0 ++#define PNOR_direct_cmd__cmd_type_R 21 ++#define IPROC_DMU_STRAPS_OFFSET 0x28 ++#define IPROC_BOOT_STRAP_MASK 0x7 ++#if defined(CONFIG_MACH_HR2) || defined(CONFIG_MACH_GH) ++#define IPROC_STRAP_BOOT_DEV_SHIFT 9 ++#elif defined(CONFIG_MACH_GH2) || defined(CONFIG_MACH_SB2) || \ ++ defined(CONFIG_MACH_HR3) ++#define IPROC_STRAP_BOOT_DEV_SHIFT 10 ++#endif ++ ++ ++static struct mtd_info *nor_mtd; ++ ++static struct map_info s29gl_map = { ++ .name = "S29GL", ++ .bankwidth = 4, ++}; ++ ++ ++/* ++ * Initialize FLASH support ++ */ ++static int __init s29gl_mtd_init(void) ++{ ++ struct device_node *np; ++ void __iomem *reg_addr; ++#ifdef CONFIG_MACH_IPROC_P7 ++ void __iomem *reg_strap; ++#endif ++ void __iomem *nor_enable=NULL; ++ struct platform_device *pdev; ++ struct resource *memres; ++ struct mtd_part_parser_data ppdata; ++ u32 straps, val; ++ ++ np = of_find_compatible_node(NULL, NULL, IPROC_NOR_COMPATIBLE); ++ if (!np) { ++ printk(KERN_INFO "No NOR flash controller enabled in DT\n"); ++ return -ENODEV; ++ } ++ ++ if (!of_device_is_available(np)) { ++ printk(KERN_INFO "NOR flash controller disabled in DT\n"); ++ return -ENODEV; ++ } ++ ++ reg_addr = of_iomap(np, 0); ++ if (!reg_addr) { ++ printk(KERN_ERR "NOR base addr ioremap eror\n"); ++ return -EIO; ++ } ++ ++ nor_enable = of_iomap(np, 2); ++ if (!nor_enable) { ++ printk(KERN_ERR "PNOR sel ioremap eror\n"); ++ return -EIO; ++ } ++ ++ /* Check boot device */ ++ straps = readl(get_iproc_dmu_pcu_base() + IPROC_DMU_STRAPS_OFFSET); ++ straps = (straps >> IPROC_STRAP_BOOT_DEV_SHIFT) & IPROC_BOOT_STRAP_MASK; ++ ++ if (straps == IPROC_STRAP_BOOT_DEV_NAND) { ++ /* If booting from NAND, PNOR cannot be used */ ++ return -ENODEV; ++ } else if (straps != IPROC_STRAP_BOOT_DEV_PNOR) { ++ /* Switching to PNOR only if not booting from PNOR */ ++ val = readl_relaxed(nor_enable); ++ if (of_find_compatible_node(NULL, NULL, "brcm,hurricane2")) { ++ val |= (1UL << PNOR_NAND_SEL_REG_OVERRIDE_HR2) | ++ (1UL << PNOR_NAND_SEL_REG_PNOR_SEL_HR2); ++ } else { ++ val |= (1UL << PNOR_NAND_SEL_REG_OVERRIDE_GH) | ++ (1UL << PNOR_NAND_SEL_REG_PNOR_SEL_GH); ++ } ++ writel_relaxed(val, nor_enable); ++ ++#ifdef CONFIG_MACH_IPROC_P7 ++ /* Configure controller memory width based on strap */ ++ reg_strap = of_iomap(np, 3); ++ if (!reg_strap) { ++ printk(KERN_ERR "NOR strap addr ioremap eror\n"); ++ return -EIO; ++ } ++ straps = readl_relaxed(reg_strap) & ++ (1 << ICFG_PNOR_STRAPS__PNOR_SRAM_MW_R); ++ if (straps) { ++ /* 16-bit */ ++ val = readl_relaxed ((void * __iomem) ++ (reg_addr + PNOR_SET_OPMODE_OFFSET)); ++ val |= (1 << PNOR_set_opmode__set_mw_R); ++ writel_relaxed(val, (void * __iomem)( ++ reg_addr + PNOR_SET_OPMODE_OFFSET)); ++ } else { ++ /* 8-bit */ ++ val = readl_relaxed((void * __iomem) ++ (reg_addr + PNOR_SET_OPMODE_OFFSET)); ++ val &= ~(1 << PNOR_set_opmode__set_mw_R); ++ writel_relaxed(val, (void * __iomem) ++ (reg_addr + PNOR_SET_OPMODE_OFFSET)); ++ } ++ val = readl_relaxed((void * __iomem)(reg_addr + ++ PNOR_DIRECT_CMD_OFFSET)); ++ val |= (2 << PNOR_direct_cmd__cmd_type_R); ++ writel_relaxed(val, (void * __iomem)(reg_addr + ++ PNOR_DIRECT_CMD_OFFSET)); ++#endif ++ } ++ ++ printk(KERN_INFO "S29GL-MTD: NOR_INTERFACE Enabled\n"); ++ ++ udelay(1000); ++ ++ pdev = of_find_device_by_node(np); ++ memres = platform_get_resource(pdev, IORESOURCE_MEM, 1); ++ s29gl_map.phys = memres->start; ++ s29gl_map.size = resource_size(memres); ++ s29gl_map.virt = ioremap(s29gl_map.phys, s29gl_map.size); ++ ++ if (!s29gl_map.virt) { ++ printk(KERN_ERR "S29GL-MTD: ioremap failed\n"); ++ if (nor_enable) { ++ /* revert to NAND mode */ ++ val = readl_relaxed(nor_enable); ++ if (of_find_compatible_node(NULL, NULL, ++ "brcm,hurricane2")) ++ val &= ~(1UL << PNOR_NAND_SEL_REG_PNOR_SEL_HR2); ++ else ++ val &= ~(1UL << PNOR_NAND_SEL_REG_PNOR_SEL_GH); ++ writel_relaxed(val, nor_enable); ++ } ++ return -EIO; ++ } ++ ++ simple_map_init(&s29gl_map); ++ ++ // Probe for flash bankwidth 4 ++ printk (KERN_INFO "S29GL-MTD probing 32bit FLASH\n"); ++ nor_mtd = do_map_probe("cfi_probe", &s29gl_map); ++ if (!nor_mtd) { ++ printk (KERN_INFO "S29GL-MTD probing 16bit FLASH\n"); ++ // Probe for bankwidth 2 ++ s29gl_map.bankwidth = 2; ++ nor_mtd = do_map_probe("cfi_probe", &s29gl_map); ++ } ++ ++ if (nor_mtd) { ++ nor_mtd->owner = THIS_MODULE; ++ ppdata.of_node = np; ++ mtd_device_parse_register(nor_mtd, NULL, &ppdata, NULL, 0); ++ printk (KERN_INFO "S29GL-MTD MTD partitions parsed!\n"); ++ return 0; ++ } ++ ++ printk (KERN_INFO "S29GL-MTD NO FLASH found!\n"); ++ if (nor_enable) { ++ /* revert to NAND mode */ ++ val = readl_relaxed(nor_enable); ++ if (of_find_compatible_node(NULL, NULL, "brcm,hurricane2")) ++ val &= ~(1UL << PNOR_NAND_SEL_REG_PNOR_SEL_HR2); ++ else ++ val &= ~(1UL << PNOR_NAND_SEL_REG_PNOR_SEL_GH); ++ writel_relaxed(val, nor_enable); ++ } ++ iounmap((void *)s29gl_map.virt); ++ return -ENXIO; ++} ++ ++/* ++ * Cleanup ++ */ ++static void __exit s29gl_mtd_cleanup(void) ++{ ++ if (nor_mtd) { ++ mtd_device_unregister(nor_mtd); ++ map_destroy(nor_mtd); ++ } ++ ++ if (s29gl_map.virt) { ++ iounmap((void *)s29gl_map.virt); ++ s29gl_map.virt = 0; ++ } ++} ++ ++ ++module_init(s29gl_mtd_init); ++module_exit(s29gl_mtd_cleanup); ++ ++MODULE_LICENSE("GPL"); ++MODULE_DESCRIPTION("MTD map driver for Hurricane2 Deerhound evaluation boards"); +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/mtd/nand/Kconfig b/drivers/mtd/nand/Kconfig +--- a/drivers/mtd/nand/Kconfig 2016-12-16 00:49:34.000000000 +0800 ++++ b/drivers/mtd/nand/Kconfig 2017-11-09 17:53:42.777285000 +0800 +@@ -399,6 +399,17 @@ config MTD_NAND_BRCMNAND + originally designed for Set-Top Box but is used on various BCM7xxx, + BCM3xxx, BCM63xxx, iProc/Cygnus and more. + ++if MTD_NAND_BRCMNAND ++config MTD_NAND_XGS_IPROC ++ bool "XGS iProc NAND controller" ++ depends on ARCH_XGS_IPROC ++ default n ++ help ++ Enable XGS iProc NAND controller. ++ ++ If unsure, say N. ++endif # MTD_NAND_BRCMNAND ++ + config MTD_NAND_BCM47XXNFLASH + tristate "Support for NAND flash on BCM4706 BCMA bus" + depends on BCMA_NFLASH +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/mtd/nand/brcmnand/brcmnand.c b/drivers/mtd/nand/brcmnand/brcmnand.c +--- a/drivers/mtd/nand/brcmnand/brcmnand.c 2016-12-16 00:49:34.000000000 +0800 ++++ b/drivers/mtd/nand/brcmnand/brcmnand.c 2017-11-09 17:53:42.804318000 +0800 +@@ -14,7 +14,7 @@ + #include + #include + #include +-#include ++/*#include */ + #include + #include + #include +@@ -26,7 +26,7 @@ + #include + #include + #include +-#include ++/*#include */ + #include + #include + #include +@@ -1053,8 +1053,16 @@ static void brcmnand_send_cmd(struct brc + ctrl->cmd_pending = cmd; + + intfc = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS); ++ /* Currently one DTS file supports both NAND and PNOR flash, ++ * but not both controllers can be activated at the same time. ++ * The strap pin on board determines either NAND or PNOR flash ++ * controller is active. Need to comment out the following checking ++ * of NAND controller ready, otherwise it will fail as actually the ++ * NAND controller might be disabled. ++ */ ++#if !defined (CONFIG_MTD_NAND_XGS_IPROC) + BUG_ON(!(intfc & INTFC_CTLR_READY)); +- ++#endif + mb(); /* flush previous writes */ + brcmnand_write_reg(ctrl, BRCMNAND_CMD_START, + cmd << brcmnand_cmd_shift(ctrl)); +@@ -1282,9 +1290,20 @@ static uint8_t brcmnand_read_byte(struct + if (host->last_byte > 0 && offs == 0) + chip->cmdfunc(mtd, NAND_CMD_RNDOUT, addr, -1); + ++ /* For XGS iproc, no byte swap needed for little-endian mode */ ++#if defined (CONFIG_MTD_NAND_XGS_IPROC) ++ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) ++ ret = ctrl->flash_cache[offs >> 2] >> ++ (24 - ((offs & 0x03) << 3)); ++ else ++ ret = ctrl->flash_cache[offs >> 2] >> ++ ((offs & 0x03) << 3); ++#else + ret = ctrl->flash_cache[offs >> 2] >> + (24 - ((offs & 0x03) << 3)); ++#endif + break; ++ + case NAND_CMD_GET_FEATURES: + if (host->last_byte >= ONFI_SUBFEATURE_PARAM_LEN) { + ret = 0; +@@ -1483,6 +1502,56 @@ static int brcmnand_read_by_pio(struct m + return ret; + } + ++/* ++ * Check a page to see if it is erased (w/ bitflips) after an uncorrectable ECC ++ * error ++ * ++ * Because the HW ECC signals an ECC error if an erase paged has even a single ++ * bitflip, we must check each ECC error to see if it is actually an erased ++ * page with bitflips, not a truly corrupted page. ++ * ++ * On a real error, return a negative error code (-EBADMSG for ECC error), and ++ * buf will contain raw data. ++ * Otherwise, buf gets filled with 0xffs and return the maximum number of ++ * bitflips-per-ECC-sector to the caller. ++ * ++ */ ++static int brcmstb_nand_verify_erased_page(struct mtd_info *mtd, ++ struct nand_chip *chip, void *buf, u64 addr) ++{ ++ int i, sas; ++ void *oob = chip->oob_poi; ++ int bitflips = 0; ++ int page = addr >> chip->page_shift; ++ int ret; ++ ++ if (!buf) { ++ buf = chip->buffers->databuf; ++ /* Invalidate page cache */ ++ chip->pagebuf = -1; ++ } ++ ++ sas = mtd->oobsize / chip->ecc.steps; ++ ++ /* read without ecc for verification */ ++ chip->cmdfunc(mtd, NAND_CMD_READ0, 0x00, page); ++ ret = chip->ecc.read_page_raw(mtd, chip, buf, true, page); ++ if (ret) ++ return ret; ++ ++ for (i = 0; i < chip->ecc.steps; i++, oob += sas) { ++ ret = nand_check_erased_ecc_chunk(buf, chip->ecc.size, ++ oob, sas, NULL, 0, ++ chip->ecc.strength); ++ if (ret < 0) ++ return ret; ++ ++ bitflips = max(bitflips, ret); ++ } ++ ++ return bitflips; ++} ++ + static int brcmnand_read(struct mtd_info *mtd, struct nand_chip *chip, + u64 addr, unsigned int trans, u32 *buf, u8 *oob) + { +@@ -1513,6 +1582,18 @@ static int brcmnand_read(struct mtd_info + } + + if (mtd_is_eccerr(err)) { ++ /* ++ * Controller version 7.2 has hw encoder to detect erased page ++ * bitflips, apply sw verification for older controllers only ++ */ ++ if (ctrl->nand_version < 0x0702) { ++ err = brcmstb_nand_verify_erased_page(mtd, chip, buf, ++ addr); ++ /* erased page bitflips corrected */ ++ if (err >= 0) ++ return err; ++ } ++ + dev_dbg(ctrl->dev, "uncorrectable error at 0x%llx\n", + (unsigned long long)err_addr); + mtd->ecc_stats.failed++; +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/mtd/spi-nor/Kconfig b/drivers/mtd/spi-nor/Kconfig +--- a/drivers/mtd/spi-nor/Kconfig 2016-12-16 00:49:34.000000000 +0800 ++++ b/drivers/mtd/spi-nor/Kconfig 2017-11-09 17:53:42.932285000 +0800 +@@ -42,3 +42,38 @@ config SPI_NXP_SPIFI + controller and want to access the Flash as a mtd device. + + endif # MTD_SPI_NOR ++ ++ ++menuconfig MTD_SPI_NOR_IPROC ++ tristate "BRCM IPROC SPI-NOR device support" ++ depends on MTD ++ help ++ This is the framework for the SPI NOR which can be used by the SPI ++ device drivers and the SPI-NOR device driver. ++ ++if MTD_SPI_NOR_IPROC ++ ++config M25PXX_STAY_IN_3BYTE_MODE ++ bool "Stay in 3-byte address mode when idle" ++ default n ++ help ++ This option forces the flash to stay in 3-byte address mode when idle ++ (even for flashes that require 4-byte address). This is work around the ++ reset problem if the controller cannot issue 4-byte OPCODE when booting. ++ ++endif # MTD_SPI_NOR_IPROC ++ ++config MTD_SPI_NOR_USE_4K_SECTORS ++ bool "Use small 4096 B erase sectors" ++ default n ++ help ++ Many flash memories support erasing small (4096 B) sectors. Depending ++ on the usage this feature may provide performance gain in comparison ++ to erasing whole blocks (32/64 KiB). ++ Changing a small part of the flash's contents is usually faster with ++ small sectors. On the other hand erasing should be faster when using ++ 64 KiB block instead of 16 × 4 KiB sectors. ++ ++ Please note that some tools/drivers/filesystems may not work with ++ 4096 B erase size (e.g. UBIFS requires 15 KiB as a minimum). ++ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/mtd/spi-nor/Makefile b/drivers/mtd/spi-nor/Makefile +--- a/drivers/mtd/spi-nor/Makefile 2016-12-16 00:49:34.000000000 +0800 ++++ b/drivers/mtd/spi-nor/Makefile 2017-11-09 17:53:42.933281000 +0800 +@@ -1,3 +1,5 @@ + obj-$(CONFIG_MTD_SPI_NOR) += spi-nor.o ++obj-$(CONFIG_MTD_SPI_NOR_IPROC) += spi-nor-iproc.o + obj-$(CONFIG_SPI_FSL_QUADSPI) += fsl-quadspi.o + obj-$(CONFIG_SPI_NXP_SPIFI) += nxp-spifi.o ++ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/mtd/spi-nor/spi-nor-iproc.c b/drivers/mtd/spi-nor/spi-nor-iproc.c +--- a/drivers/mtd/spi-nor/spi-nor-iproc.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/mtd/spi-nor/spi-nor-iproc.c 2017-11-09 17:53:42.936293000 +0800 +@@ -0,0 +1,1467 @@ ++/* ++ Based on spi-nor.c ++ ++ * This code is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License version 2 as ++ * published by the Free Software Foundation. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++ ++/* Define max times to check status register before we give up. */ ++ ++/* ++ * For everything but full-chip erase; probably could be much smaller, but kept ++ * around for safety for now ++ */ ++#define DEFAULT_READY_WAIT_JIFFIES (40UL * HZ) ++ ++/* ++ * For full-chip erase, calibrated to a 2MB flash (M25P16); should be scaled up ++ * for larger flash ++ */ ++#define CHIP_ERASE_2MB_READY_WAIT_JIFFIES (40UL * HZ) ++ ++#define SPI_NOR_MAX_ID_LEN 6 ++ ++struct flash_info { ++ char *name; ++ ++ /* ++ * This array stores the ID bytes. ++ * The first three bytes are the JEDIC ID. ++ * JEDEC ID zero means "no ID" (mostly older chips). ++ */ ++ u8 id[SPI_NOR_MAX_ID_LEN]; ++ u8 id_len; ++ ++ /* The size listed here is what works with SPINOR_OP_SE, which isn't ++ * necessarily called a "sector" by the vendor. ++ */ ++ unsigned sector_size; ++ u16 n_sectors; ++ ++ u16 page_size; ++ u16 addr_width; ++ ++ u16 flags; ++#define SECT_4K 0x01 /* SPINOR_OP_BE_4K works uniformly */ ++#define SPI_NOR_NO_ERASE 0x02 /* No erase command needed */ ++#define SST_WRITE 0x04 /* use SST byte programming */ ++#define SPI_NOR_NO_FR 0x08 /* Can't do fastread */ ++#define SECT_4K_PMC 0x10 /* SPINOR_OP_BE_4K_PMC works uniformly */ ++#define SPI_NOR_DUAL_READ 0x20 /* Flash supports Dual Read */ ++#define SPI_NOR_QUAD_READ 0x40 /* Flash supports Quad Read */ ++#define USE_FSR 0x80 /* use flag status register */ ++}; ++ ++#define JEDEC_MFR(info) ((info)->id[0]) ++ ++static const struct flash_info *spi_nor_match_id(const char *name); ++ ++/* ++ * Read the status register, returning its value in the location ++ * Return the status register value. ++ * Returns negative if error occurred. ++ */ ++static int read_sr(struct spi_nor *nor) ++{ ++ int ret; ++ u8 val; ++ ++ ret = nor->read_reg(nor, SPINOR_OP_RDSR, &val, 1); ++ if (ret < 0) { ++ pr_err("error %d reading SR\n", (int) ret); ++ return ret; ++ } ++ ++ return val; ++} ++ ++/* ++ * Read the flag status register, returning its value in the location ++ * Return the status register value. ++ * Returns negative if error occurred. ++ */ ++static int read_fsr(struct spi_nor *nor) ++{ ++ int ret; ++ u8 val; ++ ++ ret = nor->read_reg(nor, SPINOR_OP_RDFSR, &val, 1); ++ if (ret < 0) { ++ pr_err("error %d reading FSR\n", ret); ++ return ret; ++ } ++ ++ return val; ++} ++ ++/* ++ * Read configuration register, returning its value in the ++ * location. Return the configuration register value. ++ * Returns negative if error occured. ++ */ ++static int read_cr(struct spi_nor *nor) ++{ ++ int ret; ++ u8 val; ++ ++ ret = nor->read_reg(nor, SPINOR_OP_RDCR, &val, 1); ++ if (ret < 0) { ++ dev_err(nor->dev, "error %d reading CR\n", ret); ++ return ret; ++ } ++ ++ return val; ++} ++ ++/* ++ * Dummy Cycle calculation for different type of read. ++ * It can be used to support more commands with ++ * different dummy cycle requirements. ++ */ ++static inline int spi_nor_read_dummy_cycles(struct spi_nor *nor) ++{ ++ switch (nor->flash_read) { ++ case SPI_NOR_FAST: ++ case SPI_NOR_DUAL: ++ case SPI_NOR_QUAD: ++ return 8; ++ case SPI_NOR_NORMAL: ++ return 0; ++ } ++ return 0; ++} ++ ++/* ++ * Write status register 1 byte ++ * Returns negative if error occurred. ++ */ ++static inline int write_sr(struct spi_nor *nor, u8 val) ++{ ++ nor->cmd_buf[0] = val; ++ return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 1); ++} ++ ++/* ++ * Set write enable latch with Write Enable command. ++ * Returns negative if error occurred. ++ */ ++static inline int write_enable(struct spi_nor *nor) ++{ ++ return nor->write_reg(nor, SPINOR_OP_WREN, NULL, 0); ++} ++ ++/* ++ * Send write disble instruction to the chip. ++ */ ++static inline int write_disable(struct spi_nor *nor) ++{ ++ return nor->write_reg(nor, SPINOR_OP_WRDI, NULL, 0); ++} ++ ++static inline struct spi_nor *mtd_to_spi_nor(struct mtd_info *mtd) ++{ ++ return mtd->priv; ++} ++ ++/* Enable/disable 4-byte addressing mode. */ ++static inline int set_4byte(struct spi_nor *nor, const struct flash_info *info, ++ int enable) ++{ ++ int status; ++ bool need_wren = false; ++ u8 cmd; ++ ++ switch (JEDEC_MFR(info)) { ++ case SNOR_MFR_MICRON: ++ /* Some Micron need WREN command; all will accept it */ ++ need_wren = true; ++ case SNOR_MFR_MACRONIX: ++ case SNOR_MFR_WINBOND: ++ if (need_wren) ++ write_enable(nor); ++ ++ cmd = enable ? SPINOR_OP_EN4B : SPINOR_OP_EX4B; ++ status = nor->write_reg(nor, cmd, NULL, 0); ++ if (need_wren) ++ write_disable(nor); ++ ++ return status; ++ default: ++ /* Spansion style */ ++ nor->cmd_buf[0] = enable << 7; ++ return nor->write_reg(nor, SPINOR_OP_BRWR, nor->cmd_buf, 1); ++ } ++} ++static inline int spi_nor_sr_ready(struct spi_nor *nor) ++{ ++ int sr = read_sr(nor); ++ if (sr < 0) ++ return sr; ++ else ++ return !(sr & SR_WIP); ++} ++ ++static inline int spi_nor_fsr_ready(struct spi_nor *nor) ++{ ++ int fsr = read_fsr(nor); ++ if (fsr < 0) ++ return fsr; ++ else ++ return fsr & FSR_READY; ++} ++ ++static int spi_nor_ready(struct spi_nor *nor) ++{ ++ int sr, fsr; ++ sr = spi_nor_sr_ready(nor); ++ if (sr < 0) ++ return sr; ++ fsr = nor->flags & SNOR_F_USE_FSR ? spi_nor_fsr_ready(nor) : 1; ++ if (fsr < 0) ++ return fsr; ++ return sr && fsr; ++} ++ ++/* ++ * Service routine to read status register until ready, or timeout occurs. ++ * Returns non-zero if error. ++ */ ++static int spi_nor_wait_till_ready_with_timeout(struct spi_nor *nor, ++ unsigned long timeout_jiffies) ++{ ++ unsigned long deadline; ++ int timeout = 0, ret; ++ ++ deadline = jiffies + timeout_jiffies; ++ ++ while (!timeout) { ++ if (time_after_eq(jiffies, deadline)) ++ timeout = 1; ++ ++ ret = spi_nor_ready(nor); ++ if (ret < 0) ++ return ret; ++ if (ret) ++ return 0; ++ ++ cond_resched(); ++ } ++ ++ dev_err(nor->dev, "flash operation timed out\n"); ++ ++ return -ETIMEDOUT; ++} ++ ++static int spi_nor_wait_till_ready(struct spi_nor *nor) ++{ ++ return spi_nor_wait_till_ready_with_timeout(nor, ++ DEFAULT_READY_WAIT_JIFFIES); ++} ++ ++/* ++ * Erase the whole flash memory ++ * ++ * Returns 0 if successful, non-zero otherwise. ++ */ ++static int erase_chip(struct spi_nor *nor) ++{ ++ dev_dbg(nor->dev, " %lldKiB\n", (long long)(nor->mtd.size >> 10)); ++ ++ return nor->write_reg(nor, SPINOR_OP_CHIP_ERASE, NULL, 0); ++} ++ ++static int spi_nor_lock_and_prep(struct spi_nor *nor, enum spi_nor_ops ops) ++{ ++ int ret = 0; ++ ++ mutex_lock(&nor->lock); ++ ++ if (nor->prepare) { ++ ret = nor->prepare(nor, ops); ++ if (ret) { ++ dev_err(nor->dev, "failed in the preparation.\n"); ++ mutex_unlock(&nor->lock); ++ return ret; ++ } ++ } ++ return ret; ++} ++ ++static void spi_nor_unlock_and_unprep(struct spi_nor *nor, enum spi_nor_ops ops) ++{ ++ if (nor->unprepare) ++ nor->unprepare(nor, ops); ++ mutex_unlock(&nor->lock); ++} ++ ++/* ++ * Erase an address range on the nor chip. The address range may extend ++ * one or more erase sectors. Return an error is there is a problem erasing. ++ */ ++static int spi_nor_erase(struct mtd_info *mtd, struct erase_info *instr) ++{ ++ struct spi_nor *nor = mtd_to_spi_nor(mtd); ++ u32 addr, len; ++ uint32_t rem; ++ int ret; ++ ++ dev_dbg(nor->dev, "at 0x%llx, len %lld\n", (long long)instr->addr, ++ (long long)instr->len); ++ ++ div_u64_rem(instr->len, mtd->erasesize, &rem); ++ if (rem) ++ return -EINVAL; ++ ++ addr = instr->addr; ++ len = instr->len; ++ ++ ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_ERASE); ++ if (ret) ++ return ret; ++ ++ /* whole-chip erase? */ ++ if (len == mtd->size) { ++ unsigned long timeout; ++ ++ write_enable(nor); ++ ++ if (erase_chip(nor)) { ++ ret = -EIO; ++ goto erase_err; ++ } ++ ++ /* ++ * Scale the timeout linearly with the size of the flash, with ++ * a minimum calibrated to an old 2MB flash. We could try to ++ * pull these from CFI/SFDP, but these values should be good ++ * enough for now. ++ */ ++ timeout = max(CHIP_ERASE_2MB_READY_WAIT_JIFFIES, ++ CHIP_ERASE_2MB_READY_WAIT_JIFFIES * ++ (unsigned long)(mtd->size / SZ_2M)); ++ ret = spi_nor_wait_till_ready_with_timeout(nor, timeout); ++ if (ret) ++ goto erase_err; ++ ++ /* REVISIT in some cases we could speed up erasing large regions ++ * by using SPINOR_OP_SE instead of SPINOR_OP_BE_4K. We may have set up ++ * to use "small sector erase", but that's not always optimal. ++ */ ++ ++ /* "sector"-at-a-time erase */ ++ } else { ++ while (len) { ++#ifdef CONFIG_M25PXX_STAY_IN_3BYTE_MODE ++ /* Set to 4-byte mode if addr >= 16M */ ++ /* Note: set_4byte will call write_disable for Micron flash, so set_4byte should be called before the following write_enable(nor) */ ++ if ( addr >= 0x1000000 ) ++ set_4byte(nor, nor->priv1, 1); ++#endif /* CONFIG_M25PXX_STAY_IN_3BYTE_MODE */ ++ ++ write_enable(nor); ++ ++ if (nor->erase(nor, addr)) { ++ ret = -EIO; ++ goto erase_err; ++ } ++ ++ addr += mtd->erasesize; ++ len -= mtd->erasesize; ++ ++ ret = spi_nor_wait_till_ready(nor); ++ if (ret) ++ goto erase_err; ++ } ++ } ++ ++#ifdef CONFIG_M25PXX_STAY_IN_3BYTE_MODE ++ /* Reset to 3-byte mode if it was set to 4-byte mode */ ++ if (addr >= 0x1000000) { ++ spi_nor_wait_till_ready(nor); ++ set_4byte(nor, nor->priv1, 0); ++ } ++#endif /* CONFIG_M25PXX_STAY_IN_3BYTE_MODE */ ++ ++ write_disable(nor); ++ ++ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE); ++ ++ instr->state = MTD_ERASE_DONE; ++ mtd_erase_callback(instr); ++ ++ return ret; ++ ++erase_err: ++ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_ERASE); ++ instr->state = MTD_ERASE_FAILED; ++ return ret; ++} ++ ++static void stm_get_locked_range(struct spi_nor *nor, u8 sr, loff_t *ofs, ++ uint64_t *len) ++{ ++ struct mtd_info *mtd = &nor->mtd; ++ u8 mask = SR_BP2 | SR_BP1 | SR_BP0; ++ int shift = ffs(mask) - 1; ++ int pow; ++ ++ if (!(sr & mask)) { ++ /* No protection */ ++ *ofs = 0; ++ *len = 0; ++ } else { ++ pow = ((sr & mask) ^ mask) >> shift; ++ *len = mtd->size >> pow; ++ *ofs = mtd->size - *len; ++ } ++} ++ ++/* ++ * Return 1 if the entire region is locked, 0 otherwise ++ */ ++static int stm_is_locked_sr(struct spi_nor *nor, loff_t ofs, uint64_t len, ++ u8 sr) ++{ ++ loff_t lock_offs; ++ uint64_t lock_len; ++ ++ stm_get_locked_range(nor, sr, &lock_offs, &lock_len); ++ ++ return (ofs + len <= lock_offs + lock_len) && (ofs >= lock_offs); ++} ++ ++/* ++ * Lock a region of the flash. Compatible with ST Micro and similar flash. ++ * Supports only the block protection bits BP{0,1,2} in the status register ++ * (SR). Does not support these features found in newer SR bitfields: ++ * - TB: top/bottom protect - only handle TB=0 (top protect) ++ * - SEC: sector/block protect - only handle SEC=0 (block protect) ++ * - CMP: complement protect - only support CMP=0 (range is not complemented) ++ * ++ * Sample table portion for 8MB flash (Winbond w25q64fw): ++ * ++ * SEC | TB | BP2 | BP1 | BP0 | Prot Length | Protected Portion ++ * -------------------------------------------------------------------------- ++ * X | X | 0 | 0 | 0 | NONE | NONE ++ * 0 | 0 | 0 | 0 | 1 | 128 KB | Upper 1/64 ++ * 0 | 0 | 0 | 1 | 0 | 256 KB | Upper 1/32 ++ * 0 | 0 | 0 | 1 | 1 | 512 KB | Upper 1/16 ++ * 0 | 0 | 1 | 0 | 0 | 1 MB | Upper 1/8 ++ * 0 | 0 | 1 | 0 | 1 | 2 MB | Upper 1/4 ++ * 0 | 0 | 1 | 1 | 0 | 4 MB | Upper 1/2 ++ * X | X | 1 | 1 | 1 | 8 MB | ALL ++ * ++ * Returns negative on errors, 0 on success. ++ */ ++static int stm_lock(struct spi_nor *nor, loff_t ofs, uint64_t len) ++{ ++ struct mtd_info *mtd = &nor->mtd; ++ u8 status_old, status_new; ++ u8 mask = SR_BP2 | SR_BP1 | SR_BP0; ++ u8 shift = ffs(mask) - 1, pow, val; ++ ++ status_old = read_sr(nor); ++ ++ /* SPI NOR always locks to the end */ ++ if (ofs + len != mtd->size) { ++ /* Does combined region extend to end? */ ++ if (!stm_is_locked_sr(nor, ofs + len, mtd->size - ofs - len, ++ status_old)) ++ return -EINVAL; ++ len = mtd->size - ofs; ++ } ++ ++ /* ++ * Need smallest pow such that: ++ * ++ * 1 / (2^pow) <= (len / size) ++ * ++ * so (assuming power-of-2 size) we do: ++ * ++ * pow = ceil(log2(size / len)) = log2(size) - floor(log2(len)) ++ */ ++ pow = ilog2(mtd->size) - ilog2(len); ++ val = mask - (pow << shift); ++ if (val & ~mask) ++ return -EINVAL; ++ /* Don't "lock" with no region! */ ++ if (!(val & mask)) ++ return -EINVAL; ++ ++ status_new = (status_old & ~mask) | val; ++ ++ /* Only modify protection if it will not unlock other areas */ ++ if ((status_new & mask) <= (status_old & mask)) ++ return -EINVAL; ++ ++ write_enable(nor); ++ return write_sr(nor, status_new); ++} ++ ++/* ++ * Unlock a region of the flash. See stm_lock() for more info ++ * ++ * Returns negative on errors, 0 on success. ++ */ ++static int stm_unlock(struct spi_nor *nor, loff_t ofs, uint64_t len) ++{ ++ struct mtd_info *mtd = &nor->mtd; ++ uint8_t status_old, status_new; ++ u8 mask = SR_BP2 | SR_BP1 | SR_BP0; ++ u8 shift = ffs(mask) - 1, pow, val; ++ ++ status_old = read_sr(nor); ++ ++ /* Cannot unlock; would unlock larger region than requested */ ++ if (stm_is_locked_sr(nor, ofs - mtd->erasesize, mtd->erasesize, ++ status_old)) ++ return -EINVAL; ++ ++ /* ++ * Need largest pow such that: ++ * ++ * 1 / (2^pow) >= (len / size) ++ * ++ * so (assuming power-of-2 size) we do: ++ * ++ * pow = floor(log2(size / len)) = log2(size) - ceil(log2(len)) ++ */ ++ pow = ilog2(mtd->size) - order_base_2(mtd->size - (ofs + len)); ++ if (ofs + len == mtd->size) { ++ val = 0; /* fully unlocked */ ++ } else { ++ val = mask - (pow << shift); ++ /* Some power-of-two sizes are not supported */ ++ if (val & ~mask) ++ return -EINVAL; ++ } ++ ++ status_new = (status_old & ~mask) | val; ++ ++ /* Only modify protection if it will not lock other areas */ ++ if ((status_new & mask) >= (status_old & mask)) ++ return -EINVAL; ++ ++ write_enable(nor); ++ return write_sr(nor, status_new); ++} ++ ++/* ++ * Check if a region of the flash is (completely) locked. See stm_lock() for ++ * more info. ++ * ++ * Returns 1 if entire region is locked, 0 if any portion is unlocked, and ++ * negative on errors. ++ */ ++static int stm_is_locked(struct spi_nor *nor, loff_t ofs, uint64_t len) ++{ ++ int status; ++ ++ status = read_sr(nor); ++ if (status < 0) ++ return status; ++ ++ return stm_is_locked_sr(nor, ofs, len, status); ++} ++ ++static int spi_nor_lock(struct mtd_info *mtd, loff_t ofs, uint64_t len) ++{ ++ struct spi_nor *nor = mtd_to_spi_nor(mtd); ++ int ret; ++ ++ ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_LOCK); ++ if (ret) ++ return ret; ++ ++ ret = nor->flash_lock(nor, ofs, len); ++ ++ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_UNLOCK); ++ return ret; ++} ++ ++static int spi_nor_unlock(struct mtd_info *mtd, loff_t ofs, uint64_t len) ++{ ++ struct spi_nor *nor = mtd_to_spi_nor(mtd); ++ int ret; ++ ++ ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK); ++ if (ret) ++ return ret; ++ ++ ret = nor->flash_unlock(nor, ofs, len); ++ ++ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK); ++ return ret; ++} ++ ++static int spi_nor_is_locked(struct mtd_info *mtd, loff_t ofs, uint64_t len) ++{ ++ struct spi_nor *nor = mtd_to_spi_nor(mtd); ++ int ret; ++ ++ ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_UNLOCK); ++ if (ret) ++ return ret; ++ ++ ret = nor->flash_is_locked(nor, ofs, len); ++ ++ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_LOCK); ++ return ret; ++} ++ ++/* Used when the "_ext_id" is two bytes at most */ ++#define INFO(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \ ++ .id = { \ ++ ((_jedec_id) >> 16) & 0xff, \ ++ ((_jedec_id) >> 8) & 0xff, \ ++ (_jedec_id) & 0xff, \ ++ ((_ext_id) >> 8) & 0xff, \ ++ (_ext_id) & 0xff, \ ++ }, \ ++ .id_len = (!(_jedec_id) ? 0 : (3 + ((_ext_id) ? 2 : 0))), \ ++ .sector_size = (_sector_size), \ ++ .n_sectors = (_n_sectors), \ ++ .page_size = 256, \ ++ .flags = (_flags), ++ ++#define INFO6(_jedec_id, _ext_id, _sector_size, _n_sectors, _flags) \ ++ .id = { \ ++ ((_jedec_id) >> 16) & 0xff, \ ++ ((_jedec_id) >> 8) & 0xff, \ ++ (_jedec_id) & 0xff, \ ++ ((_ext_id) >> 16) & 0xff, \ ++ ((_ext_id) >> 8) & 0xff, \ ++ (_ext_id) & 0xff, \ ++ }, \ ++ .id_len = 6, \ ++ .sector_size = (_sector_size), \ ++ .n_sectors = (_n_sectors), \ ++ .page_size = 256, \ ++ .flags = (_flags), ++ ++#define CAT25_INFO(_sector_size, _n_sectors, _page_size, _addr_width, _flags) \ ++ .sector_size = (_sector_size), \ ++ .n_sectors = (_n_sectors), \ ++ .page_size = (_page_size), \ ++ .addr_width = (_addr_width), \ ++ .flags = (_flags), ++ ++/* NOTE: double check command sets and memory organization when you add ++ * more nor chips. This current list focusses on newer chips, which ++ * have been converging on command sets which including JEDEC ID. ++ * ++ * All newly added entries should describe *hardware* and should use SECT_4K ++ * (or SECT_4K_PMC) if hardware supports erasing 4 KiB sectors. For usage ++ * scenarios excluding small sectors there is config option that can be ++ * disabled: CONFIG_MTD_SPI_NOR_USE_4K_SECTORS. ++ * For historical (and compatibility) reasons (before we got above config) some ++ * old entries may be missing 4K flag. ++ */ ++static const struct flash_info spi_nor_ids[] = { ++ /* Atmel -- some are (confusingly) marketed as "DataFlash" */ ++ { "at25fs010", INFO(0x1f6601, 0, 32 * 1024, 4, SECT_4K) }, ++ { "at25fs040", INFO(0x1f6604, 0, 64 * 1024, 8, SECT_4K) }, ++ ++ { "at25df041a", INFO(0x1f4401, 0, 64 * 1024, 8, SECT_4K) }, ++ { "at25df321a", INFO(0x1f4701, 0, 64 * 1024, 64, SECT_4K) }, ++ { "at25df641", INFO(0x1f4800, 0, 64 * 1024, 128, SECT_4K) }, ++ ++ { "at26f004", INFO(0x1f0400, 0, 64 * 1024, 8, SECT_4K) }, ++ { "at26df081a", INFO(0x1f4501, 0, 64 * 1024, 16, SECT_4K) }, ++ { "at26df161a", INFO(0x1f4601, 0, 64 * 1024, 32, SECT_4K) }, ++ { "at26df321", INFO(0x1f4700, 0, 64 * 1024, 64, SECT_4K) }, ++ ++ { "at45db081d", INFO(0x1f2500, 0, 64 * 1024, 16, SECT_4K) }, ++ ++ /* EON -- en25xxx */ ++ { "en25f32", INFO(0x1c3116, 0, 64 * 1024, 64, SECT_4K) }, ++ { "en25p32", INFO(0x1c2016, 0, 64 * 1024, 64, 0) }, ++ { "en25q32b", INFO(0x1c3016, 0, 64 * 1024, 64, 0) }, ++ { "en25p64", INFO(0x1c2017, 0, 64 * 1024, 128, 0) }, ++ { "en25q64", INFO(0x1c3017, 0, 64 * 1024, 128, SECT_4K) }, ++ { "en25qh128", INFO(0x1c7018, 0, 64 * 1024, 256, 0) }, ++ { "en25qh256", INFO(0x1c7019, 0, 64 * 1024, 512, 0) }, ++ { "en25s64", INFO(0x1c3817, 0, 64 * 1024, 128, SECT_4K) }, ++ ++ /* ESMT */ ++ { "f25l32pa", INFO(0x8c2016, 0, 64 * 1024, 64, SECT_4K) }, ++ ++ /* Everspin */ ++ { "mr25h256", CAT25_INFO( 32 * 1024, 1, 256, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) }, ++ { "mr25h10", CAT25_INFO(128 * 1024, 1, 256, 3, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) }, ++ ++ /* Fujitsu */ ++ { "mb85rs1mt", INFO(0x047f27, 0, 128 * 1024, 1, SPI_NOR_NO_ERASE) }, ++ ++ /* GigaDevice */ ++ { "gd25q32", INFO(0xc84016, 0, 64 * 1024, 64, SECT_4K) }, ++ { "gd25q64", INFO(0xc84017, 0, 64 * 1024, 128, SECT_4K) }, ++ { "gd25q128", INFO(0xc84018, 0, 64 * 1024, 256, SECT_4K) }, ++ ++ /* Intel/Numonyx -- xxxs33b */ ++ { "160s33b", INFO(0x898911, 0, 64 * 1024, 32, 0) }, ++ { "320s33b", INFO(0x898912, 0, 64 * 1024, 64, 0) }, ++ { "640s33b", INFO(0x898913, 0, 64 * 1024, 128, 0) }, ++ ++ /* ISSI */ ++ { "is25cd512", INFO(0x7f9d20, 0, 32 * 1024, 2, SECT_4K) }, ++ ++ /* Macronix */ ++ { "mx25l512e", INFO(0xc22010, 0, 64 * 1024, 1, SECT_4K) }, ++ { "mx25l2005a", INFO(0xc22012, 0, 64 * 1024, 4, SECT_4K) }, ++ { "mx25l4005a", INFO(0xc22013, 0, 64 * 1024, 8, SECT_4K) }, ++ { "mx25l8005", INFO(0xc22014, 0, 64 * 1024, 16, 0) }, ++ { "mx25l1606e", INFO(0xc22015, 0, 64 * 1024, 32, SECT_4K) }, ++ { "mx25l3205d", INFO(0xc22016, 0, 64 * 1024, 64, 0) }, ++ { "mx25l3255e", INFO(0xc29e16, 0, 64 * 1024, 64, SECT_4K) }, ++ { "mx25l6405d", INFO(0xc22017, 0, 64 * 1024, 128, 0) }, ++ { "mx25u6435f", INFO(0xc22537, 0, 64 * 1024, 128, SECT_4K) }, ++ { "mx25l12805d", INFO(0xc22018, 0, 64 * 1024, 256, 0) }, ++ { "mx25l12855e", INFO(0xc22618, 0, 64 * 1024, 256, 0) }, ++ { "mx25l25635e", INFO(0xc22019, 0, 64 * 1024, 512, 0) }, ++ { "mx25l25655e", INFO(0xc22619, 0, 64 * 1024, 512, 0) }, ++ { "mx66l51235l", INFO(0xc2201a, 0, 64 * 1024, 1024, SPI_NOR_QUAD_READ) }, ++ { "mx66l1g55g", INFO(0xc2261b, 0, 64 * 1024, 2048, SPI_NOR_QUAD_READ) }, ++ ++ /* Micron */ ++ { "n25q032", INFO(0x20ba16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) }, ++ { "n25q032a", INFO(0x20bb16, 0, 64 * 1024, 64, SPI_NOR_QUAD_READ) }, ++ { "n25q064", INFO(0x20ba17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) }, ++ { "n25q064a", INFO(0x20bb17, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_QUAD_READ) }, ++ { "n25q128a11", INFO(0x20bb18, 0, 64 * 1024, 256, SPI_NOR_QUAD_READ) }, ++ { "n25q128a13", INFO(0x20ba18, 0, 64 * 1024, 256, SPI_NOR_QUAD_READ) }, ++ { "n25q256a", INFO(0x20ba19, 0, 64 * 1024, 512, SECT_4K | SPI_NOR_QUAD_READ) }, ++ { "n25q512a", INFO(0x20bb20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) }, ++ { "n25q512ax3", INFO(0x20ba20, 0, 64 * 1024, 1024, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) }, ++ { "n25q00", INFO(0x20ba21, 0, 64 * 1024, 2048, SECT_4K | USE_FSR | SPI_NOR_QUAD_READ) }, ++ ++ /* PMC */ ++ { "pm25lv512", INFO(0, 0, 32 * 1024, 2, SECT_4K_PMC) }, ++ { "pm25lv010", INFO(0, 0, 32 * 1024, 4, SECT_4K_PMC) }, ++ { "pm25lq032", INFO(0x7f9d46, 0, 64 * 1024, 64, SECT_4K) }, ++ ++ /* Spansion -- single (large) sector size only, at least ++ * for the chips listed here (without boot sectors). ++ */ ++ { "s25sl032p", INFO(0x010215, 0x4d00, 64 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, ++ { "s25sl064p", INFO(0x010216, 0x4d00, 64 * 1024, 128, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, ++ { "s25fl256s0", INFO(0x010219, 0x4d00, 256 * 1024, 128, 0) }, ++ { "s25fl256s1", INFO(0x010219, 0x4d01, 64 * 1024, 512, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, ++ { "s25fl512s", INFO(0x010220, 0x4d00, 256 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, ++ { "s70fl01gs", INFO(0x010221, 0x4d00, 256 * 1024, 256, 0) }, ++ { "s25sl12800", INFO(0x012018, 0x0300, 256 * 1024, 64, 0) }, ++ { "s25sl12801", INFO(0x012018, 0x0301, 64 * 1024, 256, 0) }, ++ { "s25fl128s", INFO6(0x012018, 0x4d0180, 64 * 1024, 256, SECT_4K | SPI_NOR_QUAD_READ) }, ++ { "s25fl129p0", INFO(0x012018, 0x4d00, 256 * 1024, 64, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, ++ { "s25fl129p1", INFO(0x012018, 0x4d01, 64 * 1024, 256, SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, ++ { "s25sl004a", INFO(0x010212, 0, 64 * 1024, 8, 0) }, ++ { "s25sl008a", INFO(0x010213, 0, 64 * 1024, 16, 0) }, ++ { "s25sl016a", INFO(0x010214, 0, 64 * 1024, 32, 0) }, ++ { "s25sl032a", INFO(0x010215, 0, 64 * 1024, 64, 0) }, ++ { "s25sl064a", INFO(0x010216, 0, 64 * 1024, 128, 0) }, ++ { "s25fl004k", INFO(0xef4013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, ++ { "s25fl008k", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, ++ { "s25fl016k", INFO(0xef4015, 0, 64 * 1024, 32, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, ++ { "s25fl064k", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) }, ++ { "s25fl132k", INFO(0x014016, 0, 64 * 1024, 64, SECT_4K) }, ++ { "s25fl164k", INFO(0x014017, 0, 64 * 1024, 128, SECT_4K) }, ++ { "s25fl204k", INFO(0x014013, 0, 64 * 1024, 8, SECT_4K | SPI_NOR_DUAL_READ) }, ++ ++ /* SST -- large erase sizes are "overlays", "sectors" are 4K */ ++ { "sst25vf040b", INFO(0xbf258d, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) }, ++ { "sst25vf080b", INFO(0xbf258e, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) }, ++ { "sst25vf016b", INFO(0xbf2541, 0, 64 * 1024, 32, SECT_4K | SST_WRITE) }, ++ { "sst25vf032b", INFO(0xbf254a, 0, 64 * 1024, 64, SECT_4K | SST_WRITE) }, ++ { "sst25vf064c", INFO(0xbf254b, 0, 64 * 1024, 128, SECT_4K) }, ++ { "sst25wf512", INFO(0xbf2501, 0, 64 * 1024, 1, SECT_4K | SST_WRITE) }, ++ { "sst25wf010", INFO(0xbf2502, 0, 64 * 1024, 2, SECT_4K | SST_WRITE) }, ++ { "sst25wf020", INFO(0xbf2503, 0, 64 * 1024, 4, SECT_4K | SST_WRITE) }, ++ { "sst25wf020a", INFO(0x621612, 0, 64 * 1024, 4, SECT_4K) }, ++ { "sst25wf040b", INFO(0x621613, 0, 64 * 1024, 8, SECT_4K) }, ++ { "sst25wf040", INFO(0xbf2504, 0, 64 * 1024, 8, SECT_4K | SST_WRITE) }, ++ { "sst25wf080", INFO(0xbf2505, 0, 64 * 1024, 16, SECT_4K | SST_WRITE) }, ++ { "sst26vf016", INFO(0xbf2601, 0, 64 * 1024, 32, SECT_4K) }, ++ { "sst26vf032", INFO(0xbf2602, 0, 64 * 1024, 64, SECT_4K) }, ++ ++ /* ST Microelectronics -- newer production may have feature updates */ ++ { "m25p05", INFO(0x202010, 0, 32 * 1024, 2, 0) }, ++ { "m25p10", INFO(0x202011, 0, 32 * 1024, 4, 0) }, ++ { "m25p20", INFO(0x202012, 0, 64 * 1024, 4, 0) }, ++ { "m25p40", INFO(0x202013, 0, 64 * 1024, 8, 0) }, ++ { "m25p80", INFO(0x202014, 0, 64 * 1024, 16, 0) }, ++ { "m25p16", INFO(0x202015, 0, 64 * 1024, 32, 0) }, ++ { "m25p32", INFO(0x202016, 0, 64 * 1024, 64, 0) }, ++ { "m25p64", INFO(0x202017, 0, 64 * 1024, 128, 0) }, ++ { "m25p128", INFO(0x202018, 0, 256 * 1024, 64, 0) }, ++ ++ { "m25p05-nonjedec", INFO(0, 0, 32 * 1024, 2, 0) }, ++ { "m25p10-nonjedec", INFO(0, 0, 32 * 1024, 4, 0) }, ++ { "m25p20-nonjedec", INFO(0, 0, 64 * 1024, 4, 0) }, ++ { "m25p40-nonjedec", INFO(0, 0, 64 * 1024, 8, 0) }, ++ { "m25p80-nonjedec", INFO(0, 0, 64 * 1024, 16, 0) }, ++ { "m25p16-nonjedec", INFO(0, 0, 64 * 1024, 32, 0) }, ++ { "m25p32-nonjedec", INFO(0, 0, 64 * 1024, 64, 0) }, ++ { "m25p64-nonjedec", INFO(0, 0, 64 * 1024, 128, 0) }, ++ { "m25p128-nonjedec", INFO(0, 0, 256 * 1024, 64, 0) }, ++ ++ { "m45pe10", INFO(0x204011, 0, 64 * 1024, 2, 0) }, ++ { "m45pe80", INFO(0x204014, 0, 64 * 1024, 16, 0) }, ++ { "m45pe16", INFO(0x204015, 0, 64 * 1024, 32, 0) }, ++ ++ { "m25pe20", INFO(0x208012, 0, 64 * 1024, 4, 0) }, ++ { "m25pe80", INFO(0x208014, 0, 64 * 1024, 16, 0) }, ++ { "m25pe16", INFO(0x208015, 0, 64 * 1024, 32, SECT_4K) }, ++ ++ { "m25px16", INFO(0x207115, 0, 64 * 1024, 32, SECT_4K) }, ++ { "m25px32", INFO(0x207116, 0, 64 * 1024, 64, SECT_4K) }, ++ { "m25px32-s0", INFO(0x207316, 0, 64 * 1024, 64, SECT_4K) }, ++ { "m25px32-s1", INFO(0x206316, 0, 64 * 1024, 64, SECT_4K) }, ++ { "m25px64", INFO(0x207117, 0, 64 * 1024, 128, 0) }, ++ { "m25px80", INFO(0x207114, 0, 64 * 1024, 16, 0) }, ++ ++ /* Winbond -- w25x "blocks" are 64K, "sectors" are 4KiB */ ++ { "w25x05", INFO(0xef3010, 0, 64 * 1024, 1, SECT_4K) }, ++ { "w25x10", INFO(0xef3011, 0, 64 * 1024, 2, SECT_4K) }, ++ { "w25x20", INFO(0xef3012, 0, 64 * 1024, 4, SECT_4K) }, ++ { "w25x40", INFO(0xef3013, 0, 64 * 1024, 8, SECT_4K) }, ++ { "w25x80", INFO(0xef3014, 0, 64 * 1024, 16, SECT_4K) }, ++ { "w25x16", INFO(0xef3015, 0, 64 * 1024, 32, SECT_4K) }, ++ { "w25x32", INFO(0xef3016, 0, 64 * 1024, 64, SECT_4K) }, ++ { "w25q32", INFO(0xef4016, 0, 64 * 1024, 64, SECT_4K) }, ++ { "w25q32dw", INFO(0xef6016, 0, 64 * 1024, 64, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, ++ { "w25x64", INFO(0xef3017, 0, 64 * 1024, 128, SECT_4K) }, ++ { "w25q64", INFO(0xef4017, 0, 64 * 1024, 128, SECT_4K) }, ++ { "w25q64dw", INFO(0xef6017, 0, 64 * 1024, 128, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, ++ { "w25q128fw", INFO(0xef6018, 0, 64 * 1024, 256, SECT_4K | SPI_NOR_DUAL_READ | SPI_NOR_QUAD_READ) }, ++ { "w25q80", INFO(0xef5014, 0, 64 * 1024, 16, SECT_4K) }, ++ { "w25q80bl", INFO(0xef4014, 0, 64 * 1024, 16, SECT_4K) }, ++ { "w25q128", INFO(0xef4018, 0, 64 * 1024, 256, SECT_4K) }, ++ { "w25q256", INFO(0xef4019, 0, 64 * 1024, 512, SECT_4K) }, ++ { "w25m512", INFO(0xef7119, 0, 64 * 1024, 1024, SECT_4K) }, ++ ++ /* Catalyst / On Semiconductor -- non-JEDEC */ ++ { "cat25c11", CAT25_INFO( 16, 8, 16, 1, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) }, ++ { "cat25c03", CAT25_INFO( 32, 8, 16, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) }, ++ { "cat25c09", CAT25_INFO( 128, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) }, ++ { "cat25c17", CAT25_INFO( 256, 8, 32, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) }, ++ { "cat25128", CAT25_INFO(2048, 8, 64, 2, SPI_NOR_NO_ERASE | SPI_NOR_NO_FR) }, ++ { }, ++}; ++ ++static const struct flash_info *spi_nor_read_id(struct spi_nor *nor) ++{ ++ int tmp; ++ u8 id[SPI_NOR_MAX_ID_LEN]; ++ const struct flash_info *info; ++ ++ tmp = nor->read_reg(nor, SPINOR_OP_RDID, id, SPI_NOR_MAX_ID_LEN); ++ if (tmp < 0) { ++ dev_dbg(nor->dev, " error %d reading JEDEC ID\n", tmp); ++ return ERR_PTR(tmp); ++ } ++ ++ for (tmp = 0; tmp < ARRAY_SIZE(spi_nor_ids) - 1; tmp++) { ++ info = &spi_nor_ids[tmp]; ++ if (info->id_len) { ++ if (!memcmp(info->id, id, info->id_len)) ++ return &spi_nor_ids[tmp]; ++ } ++ } ++ dev_err(nor->dev, "unrecognized JEDEC id bytes: %02x, %2x, %2x\n", ++ id[0], id[1], id[2]); ++ return ERR_PTR(-ENODEV); ++} ++ ++static int spi_nor_read(struct mtd_info *mtd, loff_t from, size_t len, ++ size_t *retlen, u_char *buf) ++{ ++ struct spi_nor *nor = mtd_to_spi_nor(mtd); ++ int ret; ++ ++ dev_dbg(nor->dev, "from 0x%08x, len %zd\n", (u32)from, len); ++ ++ ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_READ); ++ if (ret) ++ return ret; ++ ++#ifdef CONFIG_M25PXX_STAY_IN_3BYTE_MODE ++ /* set to 4-byte mode */ ++ if (from + len > 0x1000000) ++ set_4byte(nor, nor->priv1, 1); ++#endif /* CONFIG_M25PXX_STAY_IN_3BYTE_MODE */ ++ ++ ret = nor->read(nor, from, len, retlen, buf); ++ ++#ifdef CONFIG_M25PXX_STAY_IN_3BYTE_MODE ++ /* set to 3-byte mode */ ++ if (from + len > 0x1000000) ++ set_4byte(nor, nor->priv1, 0); ++#endif /* CONFIG_M25PXX_STAY_IN_3BYTE_MODE */ ++ ++ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_READ); ++ return ret; ++} ++ ++static int sst_write(struct mtd_info *mtd, loff_t to, size_t len, ++ size_t *retlen, const u_char *buf) ++{ ++ struct spi_nor *nor = mtd_to_spi_nor(mtd); ++ size_t actual; ++ int ret; ++#ifdef CONFIG_M25PXX_STAY_IN_3BYTE_MODE ++ int addr_4byte = 0; ++#endif /* CONFIG_M25PXX_STAY_IN_3BYTE_MODE */ ++ ++ dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len); ++ ++ ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE); ++ if (ret) ++ return ret; ++ ++#ifdef CONFIG_M25PXX_STAY_IN_3BYTE_MODE ++ /* set to 4-byte mode */ ++ if (to >= 0x1000000) { ++ set_4byte(nor, nor->priv1, 1); ++ addr_4byte = 1; ++ } ++#endif /* CONFIG_M25PXX_STAY_IN_3BYTE_MODE */ ++ ++ write_enable(nor); ++ ++ nor->sst_write_second = false; ++ ++ actual = to % 2; ++ /* Start write from odd address. */ ++ if (actual) { ++ nor->program_opcode = SPINOR_OP_BP; ++ ++ /* write one byte. */ ++ nor->write(nor, to, 1, retlen, buf); ++ ret = spi_nor_wait_till_ready(nor); ++ if (ret) ++ goto time_out; ++ } ++ to += actual; ++ ++#ifdef CONFIG_M25PXX_STAY_IN_3BYTE_MODE ++ /* Use 4-byte mode only if the address is > 16MB */ ++ if (to >= 0x1000000 && !addr_4byte) { ++ set_4byte(nor, nor->priv1, 1); ++ addr_4byte = 1; ++ } ++#endif /* CONFIG_M25PXX_STAY_IN_3BYTE_MODE */ ++ ++ /* Write out most of the data here. */ ++ for (; actual < len - 1; actual += 2) { ++ nor->program_opcode = SPINOR_OP_AAI_WP; ++ ++ /* write two bytes. */ ++ nor->write(nor, to, 2, retlen, buf + actual); ++ ret = spi_nor_wait_till_ready(nor); ++ if (ret) ++ goto time_out; ++ to += 2; ++ nor->sst_write_second = true; ++ } ++ nor->sst_write_second = false; ++ ++ write_disable(nor); ++ ret = spi_nor_wait_till_ready(nor); ++ if (ret) ++ goto time_out; ++ ++ /* Write out trailing byte if it exists. */ ++ if (actual != len) { ++#ifdef CONFIG_M25PXX_STAY_IN_3BYTE_MODE ++ /* Use 4-byte mode only if the address is > 16MB */ ++ if (to >= 0x1000000 && !addr_4byte) { ++ set_4byte(nor, nor->priv1, 1); ++ addr_4byte = 1; ++ } ++#endif /* CONFIG_M25PXX_STAY_IN_3BYTE_MODE */ ++ ++ write_enable(nor); ++ ++ nor->program_opcode = SPINOR_OP_BP; ++ nor->write(nor, to, 1, retlen, buf + actual); ++ ++ ret = spi_nor_wait_till_ready(nor); ++ if (ret) ++ goto time_out; ++ write_disable(nor); ++ } ++ ++#ifdef CONFIG_M25PXX_STAY_IN_3BYTE_MODE ++ /* Reset to 3-byte mode if the address is > 16MB */ ++ if (addr_4byte) ++ set_4byte(nor, nor->priv1, 0); ++#endif /* CONFIG_M25PXX_STAY_IN_3BYTE_MODE */ ++ ++time_out: ++ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE); ++ return ret; ++} ++ ++/* ++ * Write an address range to the nor chip. Data must be written in ++ * FLASH_PAGESIZE chunks. The address range may be any size provided ++ * it is within the physical boundaries. ++ */ ++static int spi_nor_write(struct mtd_info *mtd, loff_t to, size_t len, ++ size_t *retlen, const u_char *buf) ++{ ++ struct spi_nor *nor = mtd_to_spi_nor(mtd); ++ u32 page_offset, page_size, i; ++ int ret; ++ ++ dev_dbg(nor->dev, "to 0x%08x, len %zd\n", (u32)to, len); ++ ++ ret = spi_nor_lock_and_prep(nor, SPI_NOR_OPS_WRITE); ++ if (ret) ++ return ret; ++ ++#ifdef CONFIG_M25PXX_STAY_IN_3BYTE_MODE ++ /* set to 4-byte mode */ ++ /* Note: set_4byte will call write_disable for Micron flash, so set_4byte should be called before the following write_enable(nor) */ ++ if (to >= 0x1000000) ++ set_4byte(nor, nor->priv1, 1); ++#endif /* CONFIG_M25PXX_STAY_IN_3BYTE_MODE */ ++ ++ write_enable(nor); ++ ++ page_offset = to & (nor->page_size - 1); ++ ++ /* do all the bytes fit onto one page? */ ++ if (page_offset + len <= nor->page_size) { ++ nor->write(nor, to, len, retlen, buf); ++ } else { ++ /* the size of data remaining on the first page */ ++ page_size = nor->page_size - page_offset; ++ nor->write(nor, to, page_size, retlen, buf); ++ ++ /* write everything in nor->page_size chunks */ ++ for (i = page_size; i < len; i += page_size) { ++ page_size = len - i; ++ if (page_size > nor->page_size) ++ page_size = nor->page_size; ++ ++ ret = spi_nor_wait_till_ready(nor); ++ if (ret) ++ goto write_err; ++ ++#ifdef CONFIG_M25PXX_STAY_IN_3BYTE_MODE ++ /* set to 4-byte mode if this is the first time > 16MB*/ ++ if ( (to + i - page_size < 0x1000000) && (to + i >= 0x1000000) ) ++ set_4byte(nor, nor->priv1, 1); ++#endif /* CONFIG_M25PXX_STAY_IN_3BYTE_MODE */ ++ ++ write_enable(nor); ++ ++ nor->write(nor, to + i, page_size, retlen, buf + i); ++ } ++ } ++ ++ ret = spi_nor_wait_till_ready(nor); ++ ++#ifdef CONFIG_M25PXX_STAY_IN_3BYTE_MODE ++ /* reset to 3-byte mode*/ ++ if (to + len >= 0x1000000) ++ set_4byte(nor, nor->priv1, 0); ++#endif /* CONFIG_M25PXX_STAY_IN_3BYTE_MODE */ ++ ++write_err: ++ spi_nor_unlock_and_unprep(nor, SPI_NOR_OPS_WRITE); ++ return ret; ++} ++ ++static int macronix_quad_enable(struct spi_nor *nor) ++{ ++ int ret, val; ++ ++ val = read_sr(nor); ++ write_enable(nor); ++ ++ write_sr(nor, val | SR_QUAD_EN_MX); ++ ++ if (spi_nor_wait_till_ready(nor)) ++ return 1; ++ ++ ret = read_sr(nor); ++ if (!(ret > 0 && (ret & SR_QUAD_EN_MX))) { ++ dev_err(nor->dev, "Macronix Quad bit not set\n"); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++/* ++ * Write status Register and configuration register with 2 bytes ++ * The first byte will be written to the status register, while the ++ * second byte will be written to the configuration register. ++ * Return negative if error occured. ++ */ ++static int write_sr_cr(struct spi_nor *nor, u16 val) ++{ ++ nor->cmd_buf[0] = val & 0xff; ++ nor->cmd_buf[1] = (val >> 8); ++ ++ return nor->write_reg(nor, SPINOR_OP_WRSR, nor->cmd_buf, 2); ++} ++ ++static int spansion_quad_enable(struct spi_nor *nor) ++{ ++ int ret; ++ int quad_en = CR_QUAD_EN_SPAN << 8; ++ ++ write_enable(nor); ++ ++ ret = write_sr_cr(nor, quad_en); ++ if (ret < 0) { ++ dev_err(nor->dev, ++ "error while writing configuration register\n"); ++ return -EINVAL; ++ } ++ ++ /* read back and check it */ ++ ret = read_cr(nor); ++ if (!(ret > 0 && (ret & CR_QUAD_EN_SPAN))) { ++ dev_err(nor->dev, "Spansion Quad bit not set\n"); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static int micron_quad_enable(struct spi_nor *nor) ++{ ++ int ret; ++ u8 val; ++ ++ ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1); ++ if (ret < 0) { ++ dev_err(nor->dev, "error %d reading EVCR\n", ret); ++ return ret; ++ } ++ ++ write_enable(nor); ++ ++ /* set EVCR, enable quad I/O */ ++ nor->cmd_buf[0] = val & ~EVCR_QUAD_EN_MICRON; ++ ret = nor->write_reg(nor, SPINOR_OP_WD_EVCR, nor->cmd_buf, 1); ++ if (ret < 0) { ++ dev_err(nor->dev, "error while writing EVCR register\n"); ++ return ret; ++ } ++ ++ ret = spi_nor_wait_till_ready(nor); ++ if (ret) ++ return ret; ++ ++ /* read EVCR and check it */ ++ ret = nor->read_reg(nor, SPINOR_OP_RD_EVCR, &val, 1); ++ if (ret < 0) { ++ dev_err(nor->dev, "error %d reading EVCR\n", ret); ++ return ret; ++ } ++ if (val & EVCR_QUAD_EN_MICRON) { ++ dev_err(nor->dev, "Micron EVCR Quad bit not clear\n"); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++static int set_quad_mode(struct spi_nor *nor, const struct flash_info *info) ++{ ++ int status; ++ ++ switch (JEDEC_MFR(info)) { ++ case SNOR_MFR_MACRONIX: ++ status = macronix_quad_enable(nor); ++ if (status) { ++ dev_err(nor->dev, "Macronix quad-read not enabled\n"); ++ return -EINVAL; ++ } ++ return status; ++ case SNOR_MFR_MICRON: ++ status = micron_quad_enable(nor); ++ if (status) { ++ dev_err(nor->dev, "Micron quad-read not enabled\n"); ++ return -EINVAL; ++ } ++ return status; ++ default: ++ status = spansion_quad_enable(nor); ++ if (status) { ++ dev_err(nor->dev, "Spansion quad-read not enabled\n"); ++ return -EINVAL; ++ } ++ return status; ++ } ++} ++ ++static int spi_nor_check(struct spi_nor *nor) ++{ ++ if (!nor->dev || !nor->read || !nor->write || ++ !nor->read_reg || !nor->write_reg || !nor->erase) { ++ pr_err("spi-nor: please fill all the necessary fields!\n"); ++ return -EINVAL; ++ } ++ ++ return 0; ++} ++ ++int spi_nor_scan(struct spi_nor *nor, const char *name, enum read_mode mode) ++{ ++ const struct flash_info *info = NULL; ++ struct device *dev = nor->dev; ++ struct mtd_info *mtd = &nor->mtd; ++ struct device_node *np = nor->flash_node; ++ int ret; ++ int i; ++ ++ ret = spi_nor_check(nor); ++ if (ret) ++ return ret; ++ ++ if (name) ++ info = spi_nor_match_id(name); ++ /* Try to auto-detect if chip name wasn't specified or not found */ ++ if (!info) ++ info = spi_nor_read_id(nor); ++ if (IS_ERR_OR_NULL(info)) ++ return -ENOENT; ++ ++ /* ++ * If caller has specified name of flash model that can normally be ++ * detected using JEDEC, let's verify it. ++ */ ++ if (name && info->id_len) { ++ const struct flash_info *jinfo; ++ ++ jinfo = spi_nor_read_id(nor); ++ if (IS_ERR(jinfo)) { ++ return PTR_ERR(jinfo); ++ } else if (jinfo != info) { ++ /* ++ * JEDEC knows better, so overwrite platform ID. We ++ * can't trust partitions any longer, but we'll let ++ * mtd apply them anyway, since some partitions may be ++ * marked read-only, and we don't want to lose that ++ * information, even if it's not 100% accurate. ++ */ ++ dev_warn(dev, "found %s, expected %s\n", ++ jinfo->name, info->name); ++ info = jinfo; ++ } ++ } ++ ++#ifdef CONFIG_M25PXX_STAY_IN_3BYTE_MODE ++ /* keep the flash_info pointer for use with call to set_4byte() */ ++ nor->priv1 = info; ++#endif ++ ++ mutex_init(&nor->lock); ++ ++ /* ++ * Atmel, SST, Intel/Numonyx, and others serial NOR tend to power up ++ * with the software protection bits set ++ */ ++ ++ if (JEDEC_MFR(info) == SNOR_MFR_ATMEL || ++ JEDEC_MFR(info) == SNOR_MFR_INTEL || ++ JEDEC_MFR(info) == SNOR_MFR_SST) { ++ write_enable(nor); ++ write_sr(nor, 0); ++ } ++ ++ if (!mtd->name) ++ mtd->name = dev_name(dev); ++ mtd->priv = nor; ++ mtd->type = MTD_NORFLASH; ++ mtd->writesize = 1; ++ mtd->flags = MTD_CAP_NORFLASH; ++ mtd->size = info->sector_size * info->n_sectors; ++ mtd->_erase = spi_nor_erase; ++ mtd->_read = spi_nor_read; ++ ++ /* NOR protection support for STmicro/Micron chips and similar */ ++ if (JEDEC_MFR(info) == SNOR_MFR_MICRON) { ++ nor->flash_lock = stm_lock; ++ nor->flash_unlock = stm_unlock; ++ nor->flash_is_locked = stm_is_locked; ++ } ++ ++ if (nor->flash_lock && nor->flash_unlock && nor->flash_is_locked) { ++ mtd->_lock = spi_nor_lock; ++ mtd->_unlock = spi_nor_unlock; ++ mtd->_is_locked = spi_nor_is_locked; ++ } ++ ++ /* sst nor chips use AAI word program */ ++ if (info->flags & SST_WRITE) ++ mtd->_write = sst_write; ++ else ++ mtd->_write = spi_nor_write; ++ ++ if (info->flags & USE_FSR) ++ nor->flags |= SNOR_F_USE_FSR; ++ ++#ifdef CONFIG_MTD_SPI_NOR_USE_4K_SECTORS ++ /* prefer "small sector" erase if possible */ ++ if (info->flags & SECT_4K) { ++ nor->erase_opcode = SPINOR_OP_BE_4K; ++ mtd->erasesize = 4096; ++ } else if (info->flags & SECT_4K_PMC) { ++ nor->erase_opcode = SPINOR_OP_BE_4K_PMC; ++ mtd->erasesize = 4096; ++ } else ++#endif ++ { ++ nor->erase_opcode = SPINOR_OP_SE; ++ mtd->erasesize = info->sector_size; ++ } ++ ++ if (info->flags & SPI_NOR_NO_ERASE) ++ mtd->flags |= MTD_NO_ERASE; ++ ++ mtd->dev.parent = dev; ++ nor->page_size = info->page_size; ++ mtd->writebufsize = nor->page_size; ++ ++ if (np) { ++ /* If we were instantiated by DT, use it */ ++ if (of_property_read_bool(np, "m25p,fast-read")) ++ nor->flash_read = SPI_NOR_FAST; ++ else ++ nor->flash_read = SPI_NOR_NORMAL; ++ } else { ++ /* If we weren't instantiated by DT, default to fast-read */ ++ nor->flash_read = SPI_NOR_FAST; ++ } ++ ++ /* Some devices cannot do fast-read, no matter what DT tells us */ ++ if (info->flags & SPI_NOR_NO_FR) ++ nor->flash_read = SPI_NOR_NORMAL; ++ ++ /* Quad/Dual-read mode takes precedence over fast/normal */ ++ if (mode == SPI_NOR_QUAD && info->flags & SPI_NOR_QUAD_READ) { ++ ret = set_quad_mode(nor, info); ++ if (ret) { ++ dev_err(dev, "quad mode not supported\n"); ++ return ret; ++ } ++ nor->flash_read = SPI_NOR_QUAD; ++ } else if (mode == SPI_NOR_DUAL && info->flags & SPI_NOR_DUAL_READ) { ++ nor->flash_read = SPI_NOR_DUAL; ++ } ++ ++ /* Default commands */ ++ switch (nor->flash_read) { ++ case SPI_NOR_QUAD: ++ nor->read_opcode = SPINOR_OP_READ_1_1_4; ++ break; ++ case SPI_NOR_DUAL: ++ nor->read_opcode = SPINOR_OP_READ_1_1_2; ++ break; ++ case SPI_NOR_FAST: ++ nor->read_opcode = SPINOR_OP_READ_FAST; ++ break; ++ case SPI_NOR_NORMAL: ++ nor->read_opcode = SPINOR_OP_READ; ++ break; ++ default: ++ dev_err(dev, "No Read opcode defined\n"); ++ return -EINVAL; ++ } ++ ++ nor->program_opcode = SPINOR_OP_PP; ++ ++ if (info->addr_width) ++ nor->addr_width = info->addr_width; ++ else { ++#ifndef CONFIG_M25PXX_STAY_IN_3BYTE_MODE ++ if (mtd->size > 0x1000000) { ++ /* enable 4-byte addressing if the device exceeds 16MiB */ ++ nor->addr_width = 4; ++ if (JEDEC_MFR(info) == CFI_MFR_AMD) { ++ /* Dedicated 4-byte command set */ ++ switch (nor->flash_read) { ++ case SPI_NOR_QUAD: ++ nor->read_opcode = SPINOR_OP_READ4_1_1_4; ++ break; ++ case SPI_NOR_DUAL: ++ nor->read_opcode = SPINOR_OP_READ4_1_1_2; ++ break; ++ case SPI_NOR_FAST: ++ nor->read_opcode = SPINOR_OP_READ4_FAST; ++ break; ++ case SPI_NOR_NORMAL: ++ nor->read_opcode = SPINOR_OP_READ4; ++ break; ++ } ++ nor->program_opcode = SPINOR_OP_PP_4B; ++ /* No small sector erase for 4-byte command set */ ++ nor->erase_opcode = SPINOR_OP_SE_4B; ++ mtd->erasesize = info->sector_size; ++ } else ++ set_4byte(nor, info, 1); ++ } else ++#endif /* !CONFIG_M25PXX_STAY_IN_3BYTE_MODE */ ++ nor->addr_width = 3; ++ } ++ ++ nor->read_dummy = spi_nor_read_dummy_cycles(nor); ++ ++ dev_info(dev, "%s (%lld Kbytes)\n", info->name, ++ (long long)mtd->size >> 10); ++ ++ dev_dbg(dev, ++ "mtd .name = %s, .size = 0x%llx (%lldMiB), " ++ ".erasesize = 0x%.8x (%uKiB) .numeraseregions = %d\n", ++ mtd->name, (long long)mtd->size, (long long)(mtd->size >> 20), ++ mtd->erasesize, mtd->erasesize / 1024, mtd->numeraseregions); ++ ++ if (mtd->numeraseregions) ++ for (i = 0; i < mtd->numeraseregions; i++) ++ dev_dbg(dev, ++ "mtd.eraseregions[%d] = { .offset = 0x%llx, " ++ ".erasesize = 0x%.8x (%uKiB), " ++ ".numblocks = %d }\n", ++ i, (long long)mtd->eraseregions[i].offset, ++ mtd->eraseregions[i].erasesize, ++ mtd->eraseregions[i].erasesize / 1024, ++ mtd->eraseregions[i].numblocks); ++ return 0; ++} ++EXPORT_SYMBOL_GPL(spi_nor_scan); ++ ++static const struct flash_info *spi_nor_match_id(const char *name) ++{ ++ const struct flash_info *id = spi_nor_ids; ++ ++ while (id->name) { ++ if (!strcmp(name, id->name)) ++ return id; ++ id++; ++ } ++ return NULL; ++} ++ ++MODULE_LICENSE("GPL"); ++MODULE_DESCRIPTION("framework for SPI NOR"); +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/Kconfig b/drivers/net/ethernet/broadcom/Kconfig +--- a/drivers/net/ethernet/broadcom/Kconfig 2016-12-16 00:49:34.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/Kconfig 2017-11-09 17:53:43.698295000 +0800 +@@ -190,4 +190,8 @@ config BNXT_SRIOV + Virtualization support in the NetXtreme-C/E products. This + allows for virtual function acceleration in virtual environments. + ++source "drivers/net/ethernet/broadcom/gmac/et/Kconfig" ++source "drivers/net/ethernet/broadcom/gmac/hnd/Kconfig" ++source "drivers/net/ethernet/broadcom/mdio/Kconfig" ++ + endif # NET_VENDOR_BROADCOM +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/Makefile b/drivers/net/ethernet/broadcom/Makefile +--- a/drivers/net/ethernet/broadcom/Makefile 2016-12-16 00:49:34.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/Makefile 2017-11-09 17:53:43.699295000 +0800 +@@ -11,5 +11,8 @@ obj-$(CONFIG_BNX2X) += bnx2x/ + obj-$(CONFIG_SB1250_MAC) += sb1250-mac.o + obj-$(CONFIG_TIGON3) += tg3.o + obj-$(CONFIG_BGMAC) += bgmac.o ++obj-$(CONFIG_MDIO_XGS_IPROC) += mdio/ ++obj-$(CONFIG_GMAC_XGS_IPROC) += gmac/et/ ++obj-$(CONFIG_GMAC_XGS_IPROC) += gmac/hnd/ + obj-$(CONFIG_SYSTEMPORT) += bcmsysport.o + obj-$(CONFIG_BNXT) += bnxt/ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/et/Kconfig b/drivers/net/ethernet/broadcom/gmac/et/Kconfig +--- a/drivers/net/ethernet/broadcom/gmac/et/Kconfig 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/et/Kconfig 2017-11-09 17:53:43.878296000 +0800 +@@ -0,0 +1,26 @@ ++# ++# Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++# ++# Permission to use, copy, modify, and/or distribute this software for any ++# purpose with or without fee is hereby granted, provided that the above ++# copyright notice and this permission notice appear in all copies. ++# ++# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++# OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++# ++config GMAC_XGS_IPROC ++ tristate "BRCM XGS iProc GMAC support " ++ select HND ++ select ET ++ select ET_ALL_PASSIVE_ON ++ depends on ARCH_XGS_IPROC ++ default n ++ help ++ Add GMAC support ++ ++ If unsure, say N. +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/et/Makefile b/drivers/net/ethernet/broadcom/gmac/et/Makefile +--- a/drivers/net/ethernet/broadcom/gmac/et/Makefile 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/et/Makefile 2017-11-09 17:53:43.879292000 +0800 +@@ -0,0 +1,67 @@ ++# ++# Makefile for the Broadcom et driver ++# ++# Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++# ++# Permission to use, copy, modify, and/or distribute this software for any ++# purpose with or without fee is hereby granted, provided that the above ++# copyright notice and this permission notice appear in all copies. ++# ++# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++# OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++# ++ETSRCDIR := ../src/et ++ ++et-objs := $(ETSRCDIR)/sys/et_linux.o $(ETSRCDIR)/sys/etc.o ++ ++## from linux dir ########## ++export SRCBASE_et := $(src)/$(ETSRCDIR)/sys/../../ ++KBUILD_CFLAGS += -I$(SRCBASE_et)/include -DBCMDRIVER -Dlinux ++KBUILD_AFLAGS += -I$(SRCBASE_et)/include ++################################# ++obj-$(CONFIG_ET) := et.o ++ ++et-objs += $(ETSRCDIR)/sys/etcgmac.o ++EXTRA_CFLAGS += -DDMA -Wno-error ++EXTRA_CFLAGS += -DGMAC_RATE_LIMITING -DBCMDMA32 -DBCMDBG_ERR ++ ++ifeq ($(CONFIG_BCM_IPROC_GMAC_SG),y) ++EXTRA_CFLAGS += -DBCMDMASGLISTOSL ++endif ++ ++ifeq ($(CONFIG_ET_ALL_PASSIVE_ON),y) ++EXTRA_CFLAGS += -DGMAC_ALL_PASSIVE ++else ++ifeq ($(CONFIG_ET_ALL_PASSIVE_RUNTIME),y) ++EXTRA_CFLAGS += -DGMAC_ALL_PASSIVE ++endif ++endif ++ ++ifeq ($(CONFIG_ET_NAPI_POLL),y) ++EXTRA_CFLAGS += -DGMAC_NAPI_POLL ++else ++ifeq ($(CONFIG_ET_NAPI2_POLL),y) ++EXTRA_CFLAGS += -DGMAC_NAPI2_POLL ++endif ++endif ++ ++EXTRA_CFLAGS += -I$(src)/$(ETSRCDIR)/sys ++ ++ifneq ($(KERNELRELEASE),) ++# kbuild part of makefile ++else ++# Normal makefile ++KERNELDIR := ../../kernel/linux ++all: ++ $(MAKE) -C $(KERNELDIR) M=`pwd` ++ ++clean: ++ $(MAKE) -C $(KERNELDIR) M=`pwd` clean ++endif ++ ++clean-files += $(ETSRCDIR)/sys/*.o $(ETSRCDIR)/sys/.*.o.cmd +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/hnd/Kconfig b/drivers/net/ethernet/broadcom/gmac/hnd/Kconfig +--- a/drivers/net/ethernet/broadcom/gmac/hnd/Kconfig 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/hnd/Kconfig 2017-11-09 17:53:43.880305000 +0800 +@@ -0,0 +1,100 @@ ++# ++# Broadcom Home Networking Division (HND) driver configuration ++# ++# Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++# ++# Permission to use, copy, modify, and/or distribute this software for any ++# purpose with or without fee is hereby granted, provided that the above ++# copyright notice and this permission notice appear in all copies. ++# ++# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++# OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++# ++ ++menu "Broadcom HND network devices" ++# Kenlo depends on PCI ++config HND ++ bool "Broadcom HND network device support" ++ depends on GMAC_XGS_IPROC ++config ET ++ tristate "10/100 Ethernet support" ++ depends on HND ++choice ++ prompt "ET ALL PASSIVE mode" ++ depends on ET ++ optional ++config ET_ALL_PASSIVE_ON ++ bool "ET ALL PASSIVE on" ++config ET_ALL_PASSIVE_RUNTIME ++ bool "ET ALL PASSIVE with runtime setting" ++endchoice ++config ET_NAPI2_POLL ++ bool "BCM GMAC NAPI2_POLL" ++ default n ++ depends on !ET_ALL_PASSIVE_ON && !ET_ALL_PASSIVE_RUNTIME ++config BCM_IPROC_GMAC_ACP ++ tristate "BCM GMAC_ACP support" ++ depends on HND ++ default n ++ help ++ Add GMAC_ACP support to improve performance without ++ cache flushing/invalidate. The uboot's bootargs must ++ include "mem=240M" to limit whole Kernel memory inside ++ ACP region which is 256MB from 0x80000000; since kernel ++ starts from 0x81000000, total mem is 240MB only ++ If unsure, say N. ++config BCM_IPROC_GMAC_PREFETCH ++ tristate "BCM GMAC prefetching support" ++ depends on HND ++ default n ++ help ++ If unsure, say N. ++config BCM_IPROC_GMAC_TXONCPU1 ++ tristate "BCM GMAC TX-ON-CPU1 support" ++ depends on HND && SMP && (ET_ALL_PASSIVE_ON || ET_ALL_PASSIVE_RUNTIME) ++ default n ++ help ++ Run "Passive Mode" Tx workthread on CPU1 for ++ multi-cores utilizing; ++ If unsure, say N. ++config BCM_IPROC_GMAC_LOCK_OPT ++ tristate "BCM GMAC LOCK OPTIMIZATION support" ++ depends on HND ++ default n ++ help ++ Minimize locks during Tx/Rx tasks; ++ it is tested under "Passive Mode" (workthread) only. ++ If unsure, say N. ++config BCM_IPROC_GMAC_RWREG_OPT ++ tristate "BCM GMAC R/W_REG OPTIMIZATION support" ++ depends on HND ++ default n ++ help ++ Remove unnecessary "DSB" intructions of R/W_REG Macro. ++ If unsure, say N. ++config BCM_IPROC_GMAC_SG ++ bool "BCM GMAC Scatter Gather support" ++ default n ++ depends on HND ++config IPROC_SDK_MGT_PORT_HANDOFF ++ bool "GMAC SDK Management port handoff" ++ default y ++ depends on HND ++config IPROC_2STAGE_RX ++ bool "GMAC 2 stage packet RX" ++ default n ++ depends on HND ++config SERDES_ASYMMETRIC_MODE ++ bool "GMAC SDK Serdes Asymmetric Mode" ++ default n ++ depends on HND && (MACH_KT2 || MACH_HX4) ++config JUMBO_FRAME ++ bool "GMAC Jumbo Frame Support" ++ default n ++ depends on HND ++endmenu +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/hnd/Makefile b/drivers/net/ethernet/broadcom/gmac/hnd/Makefile +--- a/drivers/net/ethernet/broadcom/gmac/hnd/Makefile 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/hnd/Makefile 2017-11-09 17:53:43.881317000 +0800 +@@ -0,0 +1,121 @@ ++# ++# Makefile for Broadcom Home Networking Division (HND) shared driver code ++# ++# $Copyright Open Broadcom Corporation$ ++# ++# $Id: Makefile,v 1.5 2008-05-02 22:49:54 pmoutarl Exp $ ++# ++ ++SHARED := ../src/shared ++ ++## from linux dir ########## ++export SRCBASE_hnd := $(src)/$(SHARED)/../ ++KBUILD_CFLAGS += -I$(SRCBASE_hnd)/include -DBCMDRIVER -Dlinux ++KBUILD_AFLAGS += -I$(SRCBASE_hnd)/include ++################################# ++obj-$(CONFIG_HND) := hnd.o ++ ++EXTRA_CFLAGS += -DBCMDBG_ERR -DBCMDMA32 ++ ++ifeq ($(CONFIG_BCM_IPROC_GMAC_SG),y) ++EXTRA_CFLAGS += -DBCMDMASGLISTOSL ++endif ++ ++HND_OBJS += $(src)/$(SHARED)/nvramstubs.o ++hnd-objs += $(SHARED)/nvramstubs.o ++ ++HND_OBJS += $(src)/$(SHARED)/hnddma.o ++hnd-objs += $(SHARED)/hnddma.o ++ ++HND_OBJS += $(src)/$(SHARED)/bcmutils.o ++hnd-objs += $(SHARED)/bcmutils.o ++ ++HND_OBJS += $(src)/$(SHARED)/linux_osl.o ++hnd-objs += $(SHARED)/linux_osl.o ++ ++HND_OBJS += $(src)/$(SHARED)/siutils.o ++hnd-objs += $(SHARED)/siutils.o ++ ++HND_OBJS += $(src)/$(SHARED)/aiutils.o ++hnd-objs += $(SHARED)/aiutils.o ++ ++ ++ifeq ($(CONFIG_MACH_HX4),y) ++HND_OBJS += $(src)/$(SHARED)/bcmiproc_serdes.o ++hnd-objs += $(SHARED)/bcmiproc_serdes.o ++ ++HND_OBJS += $(src)/$(SHARED)/bcmiproc_phy5461s.o ++hnd-objs += $(SHARED)/bcmiproc_phy5461s.o ++ ++HND_OBJS += $(src)/$(SHARED)/hx4_erom.o ++hnd-objs += $(SHARED)/hx4_erom.o ++endif ++ ++ifeq ($(CONFIG_MACH_SB2),y) ++HND_OBJS += $(src)/$(SHARED)/bcmiproc_serdes.o ++hnd-objs += $(SHARED)/bcmiproc_serdes.o ++ ++HND_OBJS += $(src)/$(SHARED)/bcmiproc_phy5461s.o ++hnd-objs += $(SHARED)/bcmiproc_phy5461s.o ++ ++HND_OBJS += $(src)/$(SHARED)/sb2_erom.o ++hnd-objs += $(SHARED)/sb2_erom.o ++endif ++ ++ifeq ($(CONFIG_MACH_KT2),y) ++HND_OBJS += $(src)/$(SHARED)/bcmiproc_serdes.o ++hnd-objs += $(SHARED)/bcmiproc_serdes.o ++ ++HND_OBJS += $(src)/$(SHARED)/bcmiproc_phy5461s.o ++hnd-objs += $(SHARED)/bcmiproc_phy5461s.o ++ ++HND_OBJS += $(src)/$(SHARED)/kt2_erom.o ++hnd-objs += $(SHARED)/kt2_erom.o ++endif ++ ++ifeq ($(CONFIG_MACH_HR2),y) ++HND_OBJS += $(src)/$(SHARED)/bcmiproc_phy5221.o ++hnd-objs += $(SHARED)/bcmiproc_phy5221.o ++ ++HND_OBJS += $(src)/$(SHARED)/hr2_erom.o ++hnd-objs += $(SHARED)/hr2_erom.o ++endif ++ ++ifeq ($(CONFIG_MACH_GH),y) ++HND_OBJS += $(src)/$(SHARED)/bcmiproc_phy5481.o ++hnd-objs += $(SHARED)/bcmiproc_phy5481.o ++ ++HND_OBJS += $(src)/$(SHARED)/gh_erom.o ++hnd-objs += $(SHARED)/gh_erom.o ++endif ++ ++ifeq ($(CONFIG_MACH_HR3),y) ++ifeq ($(CONFIG_MACH_WH2),y) ++HND_OBJS += $(src)/$(SHARED)/sgmiiplus2_serdes.o ++hnd-objs += $(SHARED)/sgmiiplus2_serdes.o ++ ++HND_OBJS += $(src)/$(SHARED)/bcmiproc_egphy28.o ++hnd-objs += $(SHARED)/bcmiproc_egphy28.o ++else ++HND_OBJS += $(src)/$(SHARED)/bcmiproc_phy5481.o ++hnd-objs += $(SHARED)/bcmiproc_phy5481.o ++endif ++HND_OBJS += $(src)/$(SHARED)/hr3_erom.o ++hnd-objs += $(SHARED)/hr3_erom.o ++endif ++ ++ifeq ($(CONFIG_MACH_GH2),y) ++HND_OBJS += $(src)/$(SHARED)/sgmiiplus2_serdes.o ++hnd-objs += $(SHARED)/sgmiiplus2_serdes.o ++ ++HND_OBJS += $(src)/$(SHARED)/phy542xx.o ++hnd-objs += $(SHARED)/phy542xx.o ++ ++HND_OBJS += $(src)/$(SHARED)/gh2_erom.o ++hnd-objs += $(SHARED)/gh2_erom.o ++endif ++ ++#$(src)/shared_ksyms.c: $(src)/shared_ksyms.sh $(HND_OBJS) ++# sh -e $< $(HND_OBJS) > $@ ++ ++hnd-objs += shared_ksyms.o +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/hnd/shared_ksyms.c b/drivers/net/ethernet/broadcom/gmac/hnd/shared_ksyms.c +--- a/drivers/net/ethernet/broadcom/gmac/hnd/shared_ksyms.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/hnd/shared_ksyms.c 2017-11-09 17:53:43.882312000 +0800 +@@ -0,0 +1,43 @@ ++/* ++ * $Copyright Open Broadcom Corporation$ ++ */ ++#include ++#include ++#include ++#include ++EXPORT_SYMBOL(bcm_atoi); ++EXPORT_SYMBOL(bcm_binit); ++EXPORT_SYMBOL(bcm_bprintf); ++EXPORT_SYMBOL(bcm_ether_atoe); ++EXPORT_SYMBOL(bcm_ether_ntoa); ++EXPORT_SYMBOL(bcm_strtoul); ++EXPORT_SYMBOL(getgpiopin); ++EXPORT_SYMBOL(getintvar); ++EXPORT_SYMBOL(getvar); ++EXPORT_SYMBOL(nvram_env_gmac_name); ++EXPORT_SYMBOL(nvram_get); ++EXPORT_SYMBOL(osl_delay); ++EXPORT_SYMBOL(osl_detach); ++EXPORT_SYMBOL(osl_dma_map); ++EXPORT_SYMBOL(osl_malloc); ++EXPORT_SYMBOL(osl_malloced); ++EXPORT_SYMBOL(osl_mfree); ++EXPORT_SYMBOL(osl_pkt_frmnative); ++EXPORT_SYMBOL(osl_pkt_tonative); ++EXPORT_SYMBOL(osl_pktfree); ++EXPORT_SYMBOL(pktsetprio); ++EXPORT_SYMBOL(si_attach); ++EXPORT_SYMBOL(si_setcore); ++EXPORT_SYMBOL(si_core_cflags); ++EXPORT_SYMBOL(si_core_disable); ++EXPORT_SYMBOL(si_core_reset); ++EXPORT_SYMBOL(si_core_sflags); ++EXPORT_SYMBOL(si_coreid); ++EXPORT_SYMBOL(si_coreidx); ++EXPORT_SYMBOL(si_corerev); ++EXPORT_SYMBOL(si_coreunit); ++EXPORT_SYMBOL(si_detach); ++EXPORT_SYMBOL(si_gpioout); ++EXPORT_SYMBOL(si_gpioouten); ++EXPORT_SYMBOL(si_iscoreup); ++EXPORT_SYMBOL(si_setcoreidx); +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/hnd/shared_ksyms.sh b/drivers/net/ethernet/broadcom/gmac/hnd/shared_ksyms.sh +--- a/drivers/net/ethernet/broadcom/gmac/hnd/shared_ksyms.sh 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/hnd/shared_ksyms.sh 2017-11-09 17:53:43.883313000 +0800 +@@ -0,0 +1,30 @@ ++#!/bin/sh ++# ++# Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++# ++# Permission to use, copy, modify, and/or distribute this software for any ++# purpose with or without fee is hereby granted, provided that the above ++# copyright notice and this permission notice appear in all copies. ++# ++# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++# OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++# ++# $Id: shared_ksyms.sh,v 1.2 2008-12-05 20:10:41 $ ++# ++ ++cat < ++#include ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) ++#include ++#endif ++EOF ++ ++for file in $* ; do ++ ${NM} $file | sed -ne 's/[0-9A-Fa-f]* [BDRT] \([^ ]*\)/extern void \1; EXPORT_SYMBOL(\1);/p' ++done +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/et/sys/et_cfg.h b/drivers/net/ethernet/broadcom/gmac/src/et/sys/et_cfg.h +--- a/drivers/net/ethernet/broadcom/gmac/src/et/sys/et_cfg.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/et/sys/et_cfg.h 2017-11-09 17:53:43.886293000 +0800 +@@ -0,0 +1,12 @@ ++/* ++ * $Copyright Open Broadcom Corporation$ ++ * ++ * BCM ET driver config options ++ * ++ * $Id: et_cfg.h,v 1.1.4.1 2010-08-05 19:17:00 jaredh Exp $ ++ */ ++ ++#if defined(__NetBSD__) || defined(__FreeBSD__) ++#include ++#include ++#endif /* defined(__NetBSD__) || defined(__FreeBSD__) */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/et/sys/et_dbg.h b/drivers/net/ethernet/broadcom/gmac/src/et/sys/et_dbg.h +--- a/drivers/net/ethernet/broadcom/gmac/src/et/sys/et_dbg.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/et/sys/et_dbg.h 2017-11-09 17:53:43.896296000 +0800 +@@ -0,0 +1,47 @@ ++/* ++ * $Copyright Open Broadcom Corporation$ ++ * ++ * Minimal debug/trace/assert driver definitions for ++ * Broadcom Home Networking Division 10/100 Mbit/s Ethernet ++ * Device Driver. ++ * ++ * $Id: et_dbg.h 286404 2011-09-27 19:29:08Z nisar $ ++ */ ++ ++#ifndef _et_dbg_ ++#define _et_dbg_ ++ ++#ifdef BCMDBG ++struct ether_header; ++extern void etc_prhdr(char *msg, struct ether_header *eh, uint len, int unit); ++extern void etc_prhex(char *msg, uchar *buf, uint nbytes, int unit); ++/* ++ * et_msg_level is a bitvector: ++ * 0 errors ++ * 1 function-level tracing ++ * 2 one-line frame tx/rx summary ++ * 3 complex frame tx/rx in hex ++ */ ++#define ET_ERROR(args) if (!(et_msg_level & 1)) ; else printf args ++#define ET_TRACE(args) if (!(et_msg_level & 2)) ; else printf args ++#define ET_PRHDR(msg, eh, len, unit) if (!(et_msg_level & 4)) ; else etc_prhdr(msg, eh, len, unit) ++#define ET_PRPKT(msg, buf, len, unit) if (!(et_msg_level & 8)) ; else etc_prhex(msg, buf, len, unit) ++#else /* BCMDBG */ ++#define ET_ERROR(args) ++#define ET_TRACE(args) ++#define ET_PRHDR(msg, eh, len, unit) ++#define ET_PRPKT(msg, buf, len, unit) ++#endif /* BCMDBG */ ++ ++extern uint32 et_msg_level; ++ ++#define ET_LOG(fmt, a1, a2) ++ ++/* include port-specific tunables */ ++#if defined(linux) ++#include ++#else ++#error ++#endif ++ ++#endif /* _et_dbg_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/et/sys/et_export.h b/drivers/net/ethernet/broadcom/gmac/src/et/sys/et_export.h +--- a/drivers/net/ethernet/broadcom/gmac/src/et/sys/et_export.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/et/sys/et_export.h 2017-11-09 17:53:43.897291000 +0800 +@@ -0,0 +1,28 @@ ++/* ++ * $Copyright Open Broadcom Corporation$ ++ * ++ * Required functions exported by the port-specific (os-dependent) driver ++ * to common (os-independent) driver code. ++ * ++ * $Id: et_export.h 322208 2012-03-20 01:53:23Z rnuti $ ++ */ ++ ++#ifndef _et_export_h_ ++#define _et_export_h_ ++ ++/* misc callbacks */ ++extern void et_init(void *et, uint options); ++extern void et_reset(void *et); ++extern void et_link_up(void *et); ++extern void et_link_down(void *et); ++extern bool et_is_link_up(void *et); ++extern int et_up(void *et); ++extern int et_down(void *et, int reset); ++extern void et_dump(void *et, struct bcmstrbuf *b); ++extern void et_intrson(void *et); ++ ++/* for BCM5222 dual-phy shared mdio contortion */ ++extern void *et_phyfind(void *et, uint coreunit); ++extern uint16 et_phyrd(void *et, uint phyaddr, uint reg); ++extern void et_phywr(void *et, uint reg, uint phyaddr, uint16 val); ++#endif /* _et_export_h_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/et/sys/et_linux.c b/drivers/net/ethernet/broadcom/gmac/src/et/sys/et_linux.c +--- a/drivers/net/ethernet/broadcom/gmac/src/et/sys/et_linux.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/et/sys/et_linux.c 2017-11-09 17:53:43.899291000 +0800 +@@ -0,0 +1,2603 @@ ++/* ++ * $Copyright Open Broadcom Corporation$ ++ * ++ * Linux device driver for ++ * Broadcom BCM47XX 10/100/1000 Mbps Ethernet Controller ++ * ++ * $Id: et_linux.c 327582 2012-04-14 05:02:37Z kenlo $ ++ */ ++ ++#include ++#define __UNDEF_NO_VERSION__ ++ ++#include ++ ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#ifdef SIOCETHTOOL ++#include ++#endif /* SIOCETHTOOL */ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#ifdef CONFIG_OF ++#include ++#include ++#include ++#include ++#include ++#endif ++ ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* to be cleaned and fixed */ ++/* to be cleaned Makefile */ ++#include ++#include ++#include ++#include ++#include ++ ++#ifdef CONFIG_BCM_IPROC_GMAC_PREFETCH ++#include ++ ++#define SKB_PREFETCH_LEN (128) ++/* 30 rxhdr + 34 mac & ip */ ++#define SKB_DATA_PREFETCH_LEN (96) ++#endif /* CONFIG_BCM_IPROC_GMAC_PREFETCH */ ++ ++#if LINUX_VERSION_CODE <= KERNEL_VERSION(2, 4, 5) ++#error Linux version must be newer than 2.4.5 ++#endif /* LINUX_VERSION_CODE <= KERNEL_VERSION(2, 4, 5) */ ++ ++#define MIN_PACKET_SIZE 70 /* for gmac2 (&GMAC3?) */ ++/* if packet is less than 64 bytes, it will not tx */ ++/* if packet is less than 66 bytes, CRC is not generated) */ ++/* this length is after brm tag is stripped off */ ++ ++#define DATAHIWAT 1000 /* data msg txq hiwat mark */ ++ ++#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36) ++#define HAVE_NET_DEVICE_OPS 1 ++#define HAVE_NETDEV_PRIV 1 ++#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 36) */ ++ ++#ifndef HAVE_NETDEV_PRIV ++#define HAVE_NETDEV_PRIV ++#define netdev_priv(dev) ((dev)->priv) ++#define ET_INFO(dev) (et_info_t *)((dev)->priv) ++#else ++#define ET_INFO(dev) netdev_priv(dev) ++#endif /* HAVE_NETDEV_PRIV */ ++ ++#ifdef GMAC_ALL_PASSIVE ++#define ET_LIMIT_TXQ ++#define ET_ALL_PASSIVE_ENAB(et) (!(et)->all_dispatch_mode) ++ ++/* passive mode: 1: enable, 0: disable */ ++static int passivemode = 0; ++module_param(passivemode, int, 0); ++#else /* GMAC_ALL_PASSIVE */ ++#define ET_ALL_PASSIVE_ENAB(et) 0 ++#endif /* GMAC_ALL_PASSIVE */ ++ ++#ifdef ET_LIMIT_TXQ ++#define ET_TXQ_THRESH 0 ++static int et_txq_thresh = ET_TXQ_THRESH; ++module_param(et_txq_thresh, int, 0); ++#endif /* ET_LIMIT_TXQ */ ++ ++ ++/* In 2.6.20 kernels work functions get passed a pointer to the ++ * struct work, so things will continue to work as long as the work ++ * structure is the first component of the task structure. ++ */ ++typedef struct et_task { ++ struct work_struct work; ++ void *context; ++} et_task_t; ++ ++typedef struct et_info { ++ etc_info_t *etc; /* pointer to common os-independent data */ ++ struct net_device *dev; /* backpoint to device */ ++ struct pci_dev *pdev; /* backpoint to pci_dev */ ++ void *osh; /* pointer to os handle */ ++ struct semaphore sem; /* use semaphore to allow sleep */ ++ spinlock_t lock; /* per-device perimeter lock */ ++ spinlock_t txq_lock; /* lock for txq protection */ ++ spinlock_t tx_lock; /* lock for tx protection */ ++ spinlock_t isr_lock; /* lock for irq reentrancy protection */ ++ struct sk_buff_head txq[NUMTXQ];/* send queue */ ++ void *regsva; /* opaque chip registers virtual address */ ++ struct timer_list timer; /* one second watchdog timer */ ++ bool set; /* indicate the timer is set or not */ ++ struct net_device_stats stats; /* stat counter reporting structure */ ++ int events; /* bit channel between isr and dpc */ ++ struct et_info *next; /* pointer to next et_info_t in chain */ ++#ifdef GMAC_NAPI2_POLL ++ struct napi_struct napi_poll; ++#endif /* GMAC_NAPI2_POLL */ ++#ifndef GMAC_NAPI_POLL ++ struct tasklet_struct tasklet;/* dpc tasklet */ ++#endif /* GMAC_NAPI_POLL */ ++#ifdef GMAC_ALL_PASSIVE ++ et_task_t dpc_task; /* work queue for rx dpc */ ++ et_task_t txq_task; /* work queue for tx frames */ ++ bool all_dispatch_mode; /* dispatch mode: tasklets or passive */ ++#endif /* GMAC_ALL_PASSIVE */ ++ bool resched; /* dpc was rescheduled */ ++#ifdef CONFIG_IPROC_2STAGE_RX ++ bool rxinisr; ++#endif /* CONFIG_IPROC_2STAGE_RX */ ++} et_info_t; ++ ++#define ET_LOCK(et) \ ++do { \ ++ if (ET_ALL_PASSIVE_ENAB(et)) \ ++ down(&(et)->sem); \ ++ else \ ++ spin_lock_bh(&(et)->lock); \ ++} while (0) ++ ++#define ET_UNLOCK(et) \ ++do { \ ++ if (ET_ALL_PASSIVE_ENAB(et)) \ ++ up(&(et)->sem); \ ++ else \ ++ spin_unlock_bh(&(et)->lock); \ ++} while (0) ++ ++#define ET_TXQ_LOCK(et) spin_lock_bh(&(et)->txq_lock) ++#define ET_TXQ_UNLOCK(et) spin_unlock_bh(&(et)->txq_lock) ++#define ET_TX_LOCK(et) spin_lock_bh(&(et)->tx_lock) ++#define ET_TX_UNLOCK(et) spin_unlock_bh(&(et)->tx_lock) ++#define INT_LOCK(et, flags) spin_lock_irqsave(&(et)->isr_lock, flags) ++#define INT_UNLOCK(et, flags) spin_unlock_irqrestore(&(et)->isr_lock, flags) ++ ++#ifdef GMAC_RATE_LIMITING ++static int et_rx_rate_limit = 0; ++extern void etc_check_rate_limiting(etc_info_t *etc, void *pch); ++#endif /* GMAC_RATE_LIMITING */ ++ ++#if defined(CONFIG_IPROC_SDK_MGT_PORT_HANDOFF) ++#if (defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2)) ++extern int gmac_has_mdio_access(void); ++#endif /* (defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2)) */ ++#endif /* defined(CONFIG_IPROC_SDK_MGT_PORT_HANDOFF) */ ++ ++static int et_found = 0; ++static et_info_t *et_list = NULL; ++ ++#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 37) ++#define init_MUTEX(x) sema_init(x,1) ++#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 37) */ ++ ++/* linux 2.4 doesn't have in_atomic */ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20) ++#define in_atomic() 0 ++#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 37) */ ++ ++/* Prototypes called by etc.c */ ++#ifdef CONFIG_BCM_GRO_ENABLE ++void et_flush(void *dev_id); ++#endif /* CONFIG_BCM_GRO_ENABLE */ ++void et_init(et_info_t *et, uint options); ++void et_reset(et_info_t *et); ++void et_up(et_info_t *et); ++void et_down(et_info_t *et, int reset); ++void et_intrson(et_info_t *et); ++void et_dump(et_info_t *et, struct bcmstrbuf *b); ++void et_link_up(et_info_t *et); ++void et_link_down(et_info_t *et); ++bool et_is_link_up(et_info_t *et); ++ ++/* Local prototypes */ ++static void et_free(et_info_t *et); ++static int et_open(struct net_device *dev); ++static int et_close(struct net_device *dev); ++static int et_start(struct sk_buff *skb, struct net_device *dev); ++static int et_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd); ++static struct net_device_stats *et_get_stats(struct net_device *dev); ++static int et_set_mac_address(struct net_device *dev, void *addr); ++static void et_set_multicast_list(struct net_device *dev); ++ ++static void et_sendnext(et_info_t *et); ++static void _et_watchdog(struct net_device *data); ++static void et_watchdog(ulong data); ++#ifdef GMAC_ALL_PASSIVE ++static void et_watchdog_task(et_task_t *task); ++static void et_dpc_work(struct et_task *task); ++static int et_schedule_task(et_info_t *et, void (*fn)(struct et_task *task), void *context); ++static void et_txq_work(struct et_task *task); ++#endif /* GMAC_ALL_PASSIVE */ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20) ++static irqreturn_t et_isr(int irq, void *dev_id); ++#else ++static irqreturn_t et_isr(int irq, void *dev_id, struct pt_regs *ptregs); ++#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20) */ ++static int et_rxevent(osl_t *osh, et_info_t *et, struct chops *chops, void *ch, int quota); ++#ifdef GMAC_NAPI2_POLL ++static int et_poll(struct napi_struct *napi, int budget); ++#elif defined(GMAC_NAPI_POLL) ++static int et_poll(struct net_device *dev, int *budget); ++#else /* ! GMAC_NAPI_POLL */ ++static void et_dpc(ulong data); ++#endif /* GMAC_NAPI_POLL */ ++static void et_error(et_info_t *et, struct sk_buff *skb, void *rxh); ++static void et_sendup(et_info_t *et, struct sk_buff *skb); ++static void et_dumpet(et_info_t *et, struct bcmstrbuf *b); ++static int et_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd); ++static int et_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd); ++static void et_get_driver_info(struct net_device *dev, struct ethtool_drvinfo *info); ++ ++static int eth_mac_proc_create(struct net_device *dev); ++#ifndef CONFIG_OF ++static void eth_mac_proc_remove(void); ++#else ++static void eth_mac_proc_remove(struct net_device *dev); ++#endif ++static int iproc_gmac_drv_probe(struct platform_device*); ++static int __exit iproc_gmac_drv_remove(struct platform_device*); ++#ifdef CONFIG_PM ++static int iproc_gmac_drv_suspend(struct platform_device *pdev, pm_message_t state); ++static int iproc_gmac_drv_resume(struct platform_device *pdev); ++#else /* CONFIG_PM */ ++#define iproc_gmac_drv_suspend NULL ++#define iproc_gmac_drv_resume NULL ++#endif /* CONFIG_PM */ ++ ++#define DISABLE_FA_BYPASS 0 ++#define ENABLE_FA_BYPASS 1 ++ ++#if 0 ++static unsigned int gBypass = DISABLE_FA_BYPASS; ++#endif ++ ++#ifdef BCMDBG ++static uint32 msglevel = 0xdeadbeef; ++module_param(msglevel, uint, 0644); ++#endif /* BCMDBG */ ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36) ++static const struct ethtool_ops et_ethtool_ops = { ++ .get_settings = et_get_settings, ++ .set_settings = et_set_settings, ++ .get_drvinfo = et_get_driver_info, ++}; ++#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36) */ ++ ++#ifdef HAVE_NET_DEVICE_OPS ++static const struct net_device_ops et_netdev_ops = { ++ .ndo_open = et_open, ++ .ndo_stop = et_close, ++ .ndo_start_xmit = et_start, ++ .ndo_get_stats = et_get_stats, ++ .ndo_set_mac_address = et_set_mac_address, ++ .ndo_set_rx_mode = et_set_multicast_list, ++ .ndo_do_ioctl = et_ioctl, ++}; ++#endif /*HAVE_NET_DEVICE_OPS*/ ++ ++#ifndef CONFIG_OF ++static struct platform_driver gmac_pdrv[IPROC_MAX_GMAC_CORES] = { ++ { ++ .probe = iproc_gmac_drv_probe, ++ .remove = __exit_p(iproc_gmac_drv_remove), ++ .suspend = iproc_gmac_drv_suspend, ++ .resume = iproc_gmac_drv_resume, ++ .driver = { ++ .name = "bcm-gmac0", ++ }, ++ }, ++ { ++ .probe = iproc_gmac_drv_probe, ++ .remove = __exit_p(iproc_gmac_drv_remove), ++ .suspend = iproc_gmac_drv_suspend, ++ .resume = iproc_gmac_drv_resume, ++ .driver = { ++ .name = "bcm-gmac1", ++ }, ++ }, ++ { ++ .probe = iproc_gmac_drv_probe, ++ .remove = __exit_p(iproc_gmac_drv_remove), ++ .suspend = iproc_gmac_drv_suspend, ++ .resume = iproc_gmac_drv_resume, ++ .driver = { ++ .name = "bcm-gmac2", ++ }, ++ }, ++ { ++ .probe = iproc_gmac_drv_probe, ++ .remove = __exit_p(iproc_gmac_drv_remove), ++ .suspend = iproc_gmac_drv_suspend, ++ .resume = iproc_gmac_drv_resume, ++ .driver = { ++ .name = "bcm-gmac3", ++ }, ++ } ++}; ++#endif ++ ++/*int gmac_pdev_loaded[IPROC_NUM_GMACS];*/ ++ ++/***************************************************************************** ++*****************************************************************************/ ++static bool __maybe_unused ++et_ctf_pipeline_loopback(et_info_t *et) ++{ ++ if (et->etc->unit == 3) { ++ return true; ++ } else { ++ return false; ++ } ++} ++ ++#ifdef BCMDMASGLISTOSL ++static int ++et_bcmtag_len(et_info_t *et) ++{ ++ return (et_ctf_pipeline_loopback(et)) ? 8 : 0; ++} ++#endif /* BCMDMASGLISTOSL */ ++ ++static void ++et_free(et_info_t *et) ++{ ++ et_info_t **prev; ++ osl_t *osh; ++ ++ ET_TRACE(("et: et_free\n")); ++ ++ if (et == NULL) { ++ return; ++ } ++ ++ if (et->dev && et->dev->irq) { ++ free_irq(et->dev->irq, et); ++ } ++ ++#ifdef GMAC_NAPI2_POLL ++ napi_disable(&et->napi_poll); ++ netif_napi_del(&et->napi_poll); ++#endif /* GMAC_NAPI2_POLL */ ++ ++ if (et->dev) { ++ unregister_netdev(et->dev); ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0) ++ free_netdev(et->dev); ++#else ++ MFREE(et->osh, et->dev, sizeof(struct net_device)); ++#endif ++ et->dev = NULL; ++ } ++ ++ /* free common resources */ ++ if (et->etc) { ++ etc_detach(et->etc); ++ et->etc = NULL; ++ } ++ ++ /* unregister_netdev() calls get_stats() which may read chip registers ++ * so we cannot unmap the chip registers until after calling unregister_netdev() . ++ */ ++ if (et->regsva) { ++ iounmap((void *)et->regsva); ++ et->regsva = NULL; ++ } ++ ++ /* remove us from the global linked list */ ++ for (prev = &et_list; *prev; prev = &(*prev)->next) { ++ if (*prev == et) { ++ *prev = et->next; ++ break; ++ } ++ } ++ ++ osh = et->osh; ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36) ++ free_netdev(et->dev); ++ et->dev = NULL; ++#else ++ MFREE(et->osh, et, sizeof(et_info_t)); ++#endif ++ ++ if (MALLOCED(osh)) { ++ ET_ERROR(("Memory leak of bytes %d\n", MALLOCED(osh))); ++ } ++ ASSERT(MALLOCED(osh) == 0); ++ ++ osl_detach(osh); ++} ++ ++static int ++et_open(struct net_device *dev) ++{ ++ et_info_t *et = ET_INFO(dev); ++ ++ ET_TRACE(("et%d: et_open\n", et->etc->unit)); ++ ++ et->etc->promisc = (dev->flags & IFF_PROMISC)? TRUE: FALSE; ++ et->etc->allmulti = (dev->flags & IFF_ALLMULTI)? TRUE: et->etc->promisc; ++#ifdef GMAC_RATE_LIMITING ++ et->etc->rl_enabled = et_rx_rate_limit; ++#endif /* GMAC_RATE_LIMITING */ ++ ++ ET_LOCK(et); ++ et_up(et); ++ ET_UNLOCK(et); ++ ++ OLD_MOD_INC_USE_COUNT; ++ ++ return (0); ++} ++ ++static int ++et_close(struct net_device *dev) ++{ ++ et_info_t *et = ET_INFO(dev); ++ ++ ET_TRACE(("et%d: et_close\n", et->etc->unit)); ++ ++ et->etc->promisc = FALSE; ++ et->etc->allmulti = FALSE; ++ ++ ET_LOCK(et); ++ et_down(et, 1); ++ ET_UNLOCK(et); ++ ++ OLD_MOD_DEC_USE_COUNT; ++ ++ return (0); ++} ++ ++#if defined(BCMDMASGLISTOSL) ++/* ++ * Driver level checksum offload. This is being done so that we can advertise ++ * checksum offload support to Linux. ++ */ ++static void BCMFASTPATH_HOST ++et_cso(et_info_t *et, struct sk_buff *skb) ++{ ++ struct ethervlan_header *evh; ++ uint8 *th = skb_transport_header(skb); ++ uint16 thoff, eth_type, *check; ++ uint8 prot; ++ ++ ASSERT(!PKTISCTF(et->osh, skb)); ++ ++ evh = (struct ethervlan_header *)PKTDATA(et->osh, skb); ++ eth_type = ((evh->vlan_type == HTON16(ETHER_TYPE_8021Q)) ? ++ evh->ether_type : evh->vlan_type); ++ ++ /* tcp/udp checksum calculation */ ++ thoff = (th - skb->data); ++ if (eth_type == HTON16(ETHER_TYPE_IP)) { ++ struct iphdr *ih = ip_hdr(skb); ++ prot = ih->protocol; ++ ASSERT((prot == IP_PROT_TCP) || (prot == IP_PROT_UDP)); ++ check = (uint16 *)(th + ((prot == IP_PROT_UDP) ? ++ offsetof(struct udphdr, check) : offsetof(struct tcphdr, check))); ++ *check = 0; ++ skb->csum = skb_checksum(skb, thoff, skb->len - thoff, 0); ++ *check = csum_tcpudp_magic(ih->saddr, ih->daddr, ++ skb->len - thoff, prot, skb->csum); ++ } else if (eth_type == HTON16(ETHER_TYPE_IPV6)) { ++ struct ipv6hdr *ih = ipv6_hdr(skb); ++ prot = IPV6_PROT(ih); ++ ASSERT((prot == IP_PROT_TCP) || (prot == IP_PROT_UDP)); ++ check = (uint16 *)(th + ((prot == IP_PROT_UDP) ? ++ offsetof(struct udphdr, check) : offsetof(struct tcphdr, check))); ++ *check = 0; ++ skb->csum = skb_checksum(skb, thoff, skb->len - thoff, 0); ++ *check = csum_ipv6_magic(&ih->saddr, &ih->daddr, ++ skb->len - thoff, prot, skb->csum); ++ } else { ++ return; ++ } ++ ++ if ((*check == 0) && (prot == IP_PROT_UDP)) { ++ *check = CSUM_MANGLED_0; ++ } ++} ++#endif /* defined(BCMDMASGLISTOSL) */ ++ ++/* ++ * Yeah, queueing the packets on a tx queue instead of throwing them ++ * directly into the descriptor ring in the case of dma is kinda lame, ++ * but this results in a unified transmit path for both dma and pio ++ * and localizes/simplifies the netif_*_queue semantics, too. ++ */ ++static int BCMFASTPATH ++et_start(struct sk_buff *skb, struct net_device *dev) ++{ ++ et_info_t *et = ET_INFO(dev); ++ uint32 q = 0; ++#ifdef BCMDMASGLISTOSL ++ bool sw_cksum = true; ++ struct iphdr *iph = NULL; ++#endif /* BCMDMASGLISTOSL */ ++#ifdef ET_LIMIT_TXQ ++ int qlen; ++#endif /* ET_LIMIT_TXQ */ ++ ++#ifdef BCMDMASGLISTOSL ++ if (!PKTSUMNEEDED(skb)) { ++ sw_cksum = false; ++ } ++ ++ /* can only update checksum once. */ ++ /* if checksum is updated later, don't do it here */ ++ iph = (struct iphdr *)skb->network_header; ++ if (((skb->len + et_bcmtag_len(et)) < MIN_PACKET_SIZE) && ++ ((iph->protocol == IPPROTO_TCP) || (iph->protocol == IPPROTO_UDP))) { ++ sw_cksum = false; ++ } ++ ++ if (sw_cksum) { ++ et_cso(et, skb); ++ } ++#endif /* BCMDMASGLISTOSL */ ++ ++ if (skb_is_nonlinear(skb)) { ++ et->etc->txsgpkt++; ++ } ++ ++ if (skb->len > et->etc->txmaxlen) { ++ et->etc->txmaxlen = skb->len; ++ } ++ ++ ET_TRACE(("et%d: et_start: len %d\n", et->etc->unit, skb->len)); ++ ET_LOG("et%d: et_start: len %d", et->etc->unit, skb->len); ++ ++ et->etc->txfrm++; ++#ifdef ET_LIMIT_TXQ ++#ifndef CONFIG_BCM_IPROC_GMAC_LOCK_OPT ++ ET_TXQ_LOCK(et); ++#endif /* CONFIG_BCM_IPROC_GMAC_LOCK_OPT */ ++ qlen = skb_queue_len(&et->txq[q]); ++#ifndef CONFIG_BCM_IPROC_GMAC_LOCK_OPT ++ ET_TXQ_UNLOCK(et); ++#endif /* CONFIG_BCM_IPROC_GMAC_LOCK_OPT */ ++ if (qlen > et->etc->txqlen) { ++ et->etc->txqlen = qlen; ++ } ++ ++ if (et_txq_thresh && (qlen >= et_txq_thresh)) { ++ //PKTCFREE(et->osh, skb, TRUE); ++ //return 0; ++ et->etc->txfrmdropped++; ++ /* schedule work */ ++#ifdef GMAC_ALL_PASSIVE ++ if (ET_ALL_PASSIVE_ENAB(et)) { ++#ifdef CONFIG_BCM_IPROC_GMAC_TXONCPU1 ++ schedule_work_on(1, &et->txq_task.work); ++#else ++ schedule_work(&et->txq_task.work); ++#endif ++ } ++#endif /* GMAC_ALL_PASSIVE */ ++ return NETDEV_TX_BUSY; ++ } ++#endif /* ET_LIMIT_TXQ */ ++ ++ /* put it on the tx queue and call sendnext */ ++ ET_TXQ_LOCK(et); ++ __skb_queue_tail(&et->txq[q], skb); ++ et->etc->txq_state |= (1 << q); ++ ET_TXQ_UNLOCK(et); ++ ++ if (!ET_ALL_PASSIVE_ENAB(et)) { ++ ET_LOCK(et); ++ et_sendnext(et); ++ ET_UNLOCK(et); ++ } ++#ifdef GMAC_ALL_PASSIVE ++ else { ++#ifdef CONFIG_BCM_IPROC_GMAC_TXONCPU1 ++ schedule_work_on(1, &et->txq_task.work); ++#else ++ schedule_work(&et->txq_task.work); ++#endif /* CONFIG_BCM_IPROC_GMAC_TXONCPU1 */ ++ } ++#endif /* GMAC_ALL_PASSIVE */ ++ ++ ET_LOG("et%d: et_start ret\n", et->etc->unit, 0); ++ ++ return (0); ++} ++ ++static void BCMFASTPATH ++et_sendnext(et_info_t *et) ++{ ++ etc_info_t *etc; ++ struct sk_buff *skb; ++ void *p, *n; ++ uint32 priq = TX_Q0; ++#ifdef DMA ++ uint32 txavail; ++#endif ++#ifdef DBG_PRINT_PKT ++ int tagoff, idx; ++#endif /* DBG_PRINT_PKT */ ++ ++ etc = et->etc; ++ ++ ET_TRACE(("et%d: et_sendnext\n", etc->unit)); ++ ET_LOG("et%d: et_sendnext", etc->unit, 0); ++ ++ /* dequeue packets from highest priority queue and send */ ++ while (1) { ++ ET_TXQ_LOCK(et); ++ ++ if (etc->txq_state == 0) ++ break; ++ ++ priq = etc_priq(etc->txq_state); ++ ++ ET_TRACE(("et%d: txq_state %x priq %d txavail %d\n", ++ etc->unit, etc->txq_state, priq, ++ *(uint *)etc->txavail[priq])); ++ ++ if ((skb = skb_peek(&et->txq[priq])) == NULL) { ++ etc->txq_state &= ~(1 << priq); ++ ET_TXQ_UNLOCK(et); ++ continue; ++ } ++ ++#ifdef DMA ++ /* current highest priority dma queue is full */ ++ txavail = *(uint *)(etc->txavail[priq]); ++ if ((PKTISCHAINED(skb) && (txavail < PKTCCNT(skb))) || (txavail == 0)) ++#else /* DMA */ ++ if (etc->pioactive != NULL) ++#endif /* DMA */ ++ { ++ etc->txdmafull++; ++ break; ++ } ++ ++ skb = __skb_dequeue(&et->txq[priq]); ++ ++ ET_TXQ_UNLOCK(et); ++ ET_PRHDR("tx", (struct ether_header *)skb->data, skb->len, etc->unit); ++ ET_PRPKT("txpkt", skb->data, skb->len, etc->unit); ++ ++#ifdef DBG_PRINT_PKT ++ tagoff = 16; ++ printf("et%d: txpkt len(0x%x) tag:0x%02x%02x%02x%02x\n", etc->unit, skb->len, ++ skb->data[tagoff], skb->data[tagoff+1], skb->data[tagoff+2], skb->data[tagoff+3]); ++ ++ printk("et%d: %s len(0x%x) txpkt:", etc->unit, __FUNCTION__, skb->len); ++ for (idx = 0; idx < skb->len; idx++) { ++ if ((idx % 16) == 0) { ++ printk("\n"); ++ } ++ printk("%02x ", skb->data[idx]); ++ } ++ printk("\n"); ++#endif /* DBG_PRINT_PKT */ ++ ++ /* convert the packet. */ ++ p = PKTFRMNATIVE(etc->osh, skb); ++ ASSERT(p != NULL); ++ ++ ET_TRACE(("%s: sdu %p chained %d chain sz %d next %p\n", ++ __FUNCTION__, p, PKTISCHAINED(p), PKTCCNT(p), PKTCLINK(p))); ++ ++ ET_TX_LOCK(et); ++ FOREACH_CHAINED_PKT(p, n) { ++ /* replicate vlan header contents from curr frame */ ++ if (n != NULL) { ++ uint8 *n_evh; ++ n_evh = PKTPUSH(et->osh, n, VLAN_TAG_LEN); ++ *(struct ethervlan_header *)n_evh = ++ *(struct ethervlan_header *)PKTDATA(et->osh, p); ++ } ++ (*etc->chops->tx)(etc->ch, p); ++#ifdef CONFIG_BCM_IPROC_GMAC_LOCK_OPT ++ ET_LOCK(et); ++#endif /* CONFIG_BCM_IPROC_GMAC_LOCK_OPT */ ++ etc->txframe++; ++ etc->txbyte += PKTLEN(et->osh, p); ++#ifdef CONFIG_BCM_IPROC_GMAC_LOCK_OPT ++ ET_UNLOCK(et); ++#endif /* CONFIG_BCM_IPROC_GMAC_LOCK_OPT */ ++ } ++ ET_TX_UNLOCK(et); ++ } ++ ++ /* no flow control when qos is enabled */ ++ if (!et->etc->qos) { ++ /* stop the queue whenever txq fills */ ++ if ((skb_queue_len(&et->txq[TX_Q0]) > DATAHIWAT) && !netif_queue_stopped(et->dev)) { ++ et->etc->txqstop++; ++ netif_stop_queue(et->dev); ++ } else if (netif_queue_stopped(et->dev) && ++ (skb_queue_len(&et->txq[TX_Q0]) < (DATAHIWAT/2))) { ++ netif_wake_queue(et->dev); ++ } ++ } else { ++ /* drop the frame if corresponding prec txq len exceeds hiwat ++ * when qos is enabled. ++ */ ++ if ((priq != TC_NONE) && (skb_queue_len(&et->txq[priq]) > DATAHIWAT)) { ++ skb = __skb_dequeue(&et->txq[priq]); ++ PKTCFREE(et->osh, skb, TRUE); ++ ET_ERROR(("et%d: %s: txqlen %d\n", et->etc->unit, ++ __FUNCTION__, skb_queue_len(&et->txq[priq]))); ++ } ++ } ++ ++ ET_TXQ_UNLOCK(et); ++} ++ ++#ifdef CONFIG_BCM_GRO_ENABLE ++#ifdef CONFIG_ET_MODULE ++extern int et_flushptr_ready; ++extern void (*et_flushptr)(void *dev_id); ++#endif /* CONFIG_ET_MODULE */ ++ ++void ++et_flush(void *dev_id) ++{ ++ et_info_t *et; ++ struct chops *chops; ++ void *ch; ++ osl_t *osh; ++ ++ et = (et_info_t *)dev_id; ++ chops = et->etc->chops; ++ ch = et->etc->ch; ++ osh = et->etc->osh; ++ ++ /* guard against shared interrupts */ ++ if (!et->etc->up) { ++ ET_TRACE(("et%d: et_isr: not up\n", et->etc->unit)); ++ return; ++ } ++ if (!et->napi_poll.gro_list) { ++ return; ++ } ++ ++ /* disable interrupts */ ++ (*chops->intrsoff)(ch); ++ ++ et->resched = TRUE; ++ ++ napi_gro_flush(&et->napi_poll); ++ ++ /* enable interrupts now */ ++ (*chops->intrson)(ch); ++} ++#endif /* CONFIG_BCM_GRO_ENABLE */ ++ ++void ++et_init(et_info_t *et, uint options) ++{ ++ ET_TRACE(("et%d: et_init\n", et->etc->unit)); ++ ET_LOG("et%d: et_init", et->etc->unit, 0); ++ ++ etc_init(et->etc, options); ++ ++#ifdef CONFIG_BCM_GRO_ENABLE ++#ifdef CONFIG_ET_MODULE ++ et_flushptr = &et_flush; ++ et_flushptr_ready = 1; ++#endif /* CONFIG_ET_MODULE */ ++#endif /* CONFIG_BCM_GRO_ENABLE */ ++ ++#if defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_HR2) || defined(CONFIG_MACH_KT2) ++ netif_carrier_off(et->dev); ++#endif /* defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_HR2) || defined(CONFIG_MACH_KT2) */ ++} ++ ++ ++void ++et_reset(et_info_t *et) ++{ ++ ET_TRACE(("et%d: et_reset\n", et->etc->unit)); ++ ++ etc_reset(et->etc); ++ ++ /* zap any pending dpc interrupt bits */ ++ et->events = 0; ++ ++ /* dpc will not be rescheduled */ ++ et->resched = 0; ++} ++ ++void ++et_up(et_info_t *et) ++{ ++ etc_info_t *etc; ++ ++ etc = et->etc; ++ ++ if (etc->up) { ++ return; ++ } ++ ++ ET_TRACE(("et%d: et_up\n", etc->unit)); ++ ++ etc_up(etc); ++ ++#if defined(CONFIG_IPROC_SDK_MGT_PORT_HANDOFF) ++#if (defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2)) ++ if (et->set) { ++ /* This will happen if running watchdog to monitor mdio bus */ ++ /* and port not up */ ++ del_timer(&et->timer); ++ et->set = FALSE; ++ } ++#endif /* (defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2)) */ ++#endif /* defined(CONFIG_IPROC_SDK_MGT_PORT_HANDOFF) */ ++ ++ /* schedule one second watchdog timer */ ++ et->timer.expires = jiffies + HZ; ++ add_timer(&et->timer); ++ et->set=TRUE; ++ ++ netif_start_queue(et->dev); ++} ++ ++void ++et_down(et_info_t *et, int reset) ++{ ++ etc_info_t *etc; ++ struct sk_buff *skb; ++ int32 i; ++ bool stoptmr = TRUE; ++ ++ etc = et->etc; ++ ++ ET_TRACE(("et%d: et_down\n", etc->unit)); ++ ++ netif_down(et->dev); ++ netif_stop_queue(et->dev); ++ ++#ifdef CONFIG_IPROC_SDK_MGT_PORT_HANDOFF ++#if (defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2)) ++ if (gmac_has_mdio_access()) { ++ /* we have mdio bus don't stop timer so we can continue to monitor */ ++ stoptmr = FALSE; ++ } ++#endif /* (defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2)) */ ++#endif /* CONFIG_IPROC_SDK_MGT_PORT_HANDOFF */ ++ ++ if ( stoptmr ) { ++ /* stop watchdog timer */ ++ del_timer(&et->timer); ++ et->set = FALSE; ++ } ++ ++#ifdef GMAC_RATE_LIMITING ++ /* stop ratelimiting timer */ ++ del_timer(&et->etc->rl_timer); ++ et->etc->rl_set = FALSE; ++#endif /* GMAC_RATE_LIMITING */ ++ ++ etc_down(etc, reset); ++ ++ /* flush the txq(s) */ ++ for (i = 0; i < NUMTXQ; i++) { ++ while ((skb = skb_dequeue(&et->txq[i]))) { ++ PKTFREE(etc->osh, skb, TRUE); ++ } ++ } ++ ++#if !defined(GMAC_NAPI_POLL) && !defined(GMAC_NAPI2_POLL) ++ /* kill dpc */ ++ ET_UNLOCK(et); ++ tasklet_kill(&et->tasklet); ++ ET_LOCK(et); ++#endif /* GMAC_NAPI_POLL */ ++} ++ ++/* ++ * These are interrupt on/off entry points. Disable interrupts ++ * during interrupt state transition. ++ */ ++void ++et_intrson(et_info_t *et) ++{ ++ unsigned long flags; ++ INT_LOCK(et, flags); ++ (*et->etc->chops->intrson)(et->etc->ch); ++ INT_UNLOCK(et, flags); ++} ++ ++static void ++_et_watchdog(struct net_device *dev) ++{ ++ et_info_t *et; ++ ++ et = ET_INFO(dev); ++ ++ ET_LOCK(et); ++ ++ etc_watchdog(et->etc); ++ ++ if (et->set) { ++ /* reschedule one second watchdog timer */ ++ et->timer.expires = jiffies + HZ; ++ add_timer(&et->timer); ++ } ++#if defined(CONFIG_IPROC_SDK_MGT_PORT_HANDOFF) ++#if (defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2)) ++ /* this in case port when up then down before we released mdio */ ++ else if (gmac_has_mdio_access()) { ++ /* interface not up but we have mdio bus */ ++ /* reschedule one second watchdog timer */ ++ et->timer.expires = jiffies + HZ; ++ add_timer(&et->timer); ++ et->set = TRUE; ++ } ++#endif /* (defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2)) */ ++#endif /* defined(CONFIG_IPROC_SDK_MGT_PORT_HANDOFF) */ ++ ++ ET_UNLOCK(et); ++} ++ ++#ifdef GMAC_ALL_PASSIVE ++/* Schedule a completion handler to run at safe time */ ++static int ++et_schedule_task(et_info_t *et, void (*fn)(struct et_task *task), void *context) ++{ ++ et_task_t *task; ++ ++ ET_TRACE(("et%d: et_schedule_task\n", et->etc->unit)); ++ ++ if (!(task = MALLOC(et->osh, sizeof(et_task_t)))) { ++ ET_ERROR(("et%d: et_schedule_task: out of memory, malloced %d bytes\n", ++ et->etc->unit, MALLOCED(et->osh))); ++ return -ENOMEM; ++ } ++ ++ MY_INIT_WORK(&task->work, (work_func_t)fn); ++ task->context = context; ++ ++ if (!schedule_work(&task->work)) { ++ ET_ERROR(("et%d: schedule_work() failed\n", et->etc->unit)); ++ MFREE(et->osh, task, sizeof(et_task_t)); ++ return -ENOMEM; ++ } ++ ++ return 0; ++} ++ ++static void BCMFASTPATH ++et_txq_work(struct et_task *task) ++{ ++ et_info_t *et = (et_info_t *)task->context; ++ ++#ifndef CONFIG_BCM_IPROC_GMAC_LOCK_OPT ++ ET_LOCK(et); ++#endif /* !CONFIG_BCM_IPROC_GMAC_LOCK_OPT */ ++ ++ et_sendnext(et); ++ ++#ifndef CONFIG_BCM_IPROC_GMAC_LOCK_OPT ++ ET_UNLOCK(et); ++#endif /* !CONFIG_BCM_IPROC_GMAC_LOCK_OPT */ ++ return; ++} ++ ++static void ++et_watchdog_task(et_task_t *task) ++{ ++ et_info_t *et = ET_INFO((struct net_device *)task->context); ++ ++ _et_watchdog((struct net_device *)task->context); ++ MFREE(et->osh, task, sizeof(et_task_t)); ++} ++#endif /* GMAC_ALL_PASSIVE */ ++ ++static void ++et_watchdog(ulong data) ++{ ++ struct net_device *dev = (struct net_device *)data; ++ ++#ifdef GMAC_ALL_PASSIVE ++ et_info_t *et = ET_INFO(dev); ++#endif /* GMAC_ALL_PASSIVE */ ++ ++ if (!ET_ALL_PASSIVE_ENAB(et)) { ++ _et_watchdog(dev); ++ } ++#ifdef GMAC_ALL_PASSIVE ++ else { ++ et_schedule_task(et, et_watchdog_task, dev); ++ } ++#endif /* GMAC_ALL_PASSIVE */ ++} ++ ++/* Rate limiting */ ++#ifdef GMAC_RATE_LIMITING ++static void et_release_congestion(ulong data) ++{ ++ struct net_device *dev = (struct net_device *)data; ++ et_info_t *et = ET_INFO(dev); ++ ++ if (!et) { ++ return; ++ } ++ ++ if (et->etc->rl_stopping_broadcasts) { ++ et->etc->rl_stopping_broadcasts = 0; ++ /* Clear the number of dropped broadcast packets */ ++ et->etc->rl_dropped_bc_packets = 0; ++ } ++ if (et->etc->rl_stopping_all_packets) { ++ et->etc->rl_stopping_all_packets = 0; ++ et->etc->rl_dropped_all_packets = 0; ++ } ++} ++#endif /* GMAC_RATE_LIMITING */ ++ ++ ++ ++static int ++et_get_settings(struct net_device *dev, struct ethtool_cmd *ecmd) ++{ ++ et_info_t *et = ET_INFO(dev); ++ ++ ecmd->supported = (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | ++ SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | ++ SUPPORTED_Autoneg | SUPPORTED_TP); ++#if (defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2)) ++ ecmd->supported |= SUPPORTED_1000baseT_Full | SUPPORTED_Pause; ++#endif ++#if defined(CONFIG_MACH_HR2) ++ ecmd->supported |= SUPPORTED_Pause; ++#endif ++ ++ ecmd->advertising = ADVERTISED_TP; ++ ecmd->advertising |= (et->etc->advertise & ADV_10HALF) ? ++ ADVERTISED_10baseT_Half : 0; ++ ecmd->advertising |= (et->etc->advertise & ADV_10FULL) ? ++ ADVERTISED_10baseT_Full : 0; ++ ecmd->advertising |= (et->etc->advertise & ADV_100HALF) ? ++ ADVERTISED_100baseT_Half : 0; ++ ecmd->advertising |= (et->etc->advertise & ADV_100FULL) ? ++ ADVERTISED_100baseT_Full : 0; ++ ecmd->advertising |= (et->etc->advertise2 & ADV_1000FULL) ? ++ ADVERTISED_1000baseT_Full : 0; ++ ecmd->advertising |= (et->etc->advertise2 & ADV_1000HALF) ? ++ ADVERTISED_1000baseT_Half : 0; ++ ecmd->advertising |= (et->etc->forcespeed == ET_AUTO) ? ++ ADVERTISED_Autoneg : 0; ++#if (defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2) || defined(CONFIG_MACH_HR2)) ++ ecmd->advertising |= ADVERTISED_Pause; ++#endif ++ if (et->etc->linkstate) { ++ ecmd->speed = (et->etc->speed == 1000) ? SPEED_1000 : ++ ((et->etc->speed == 100) ? SPEED_100 : SPEED_10); ++ ecmd->duplex = (et->etc->duplex == 1) ? DUPLEX_FULL : DUPLEX_HALF; ++ } else { ++ ecmd->speed = 0; ++ ecmd->duplex = 0; ++ } ++ ecmd->port = PORT_TP; ++ ecmd->phy_address = et->etc->phyaddr; ++ ecmd->transceiver = XCVR_INTERNAL; ++ ecmd->autoneg = (et->etc->forcespeed == ET_AUTO) ? AUTONEG_ENABLE : AUTONEG_DISABLE; ++ ecmd->maxtxpkt = 0; ++ ecmd->maxrxpkt = 0; ++ ++ return 0; ++} ++ ++static int ++et_set_settings(struct net_device *dev, struct ethtool_cmd *ecmd) ++{ ++ int speed[2]; ++ ++ et_info_t *et = ET_INFO(dev); ++ ++ if (!capable(CAP_NET_ADMIN)) ++ return (-EPERM); ++ ++ if (ecmd->autoneg == AUTONEG_ENABLE) { ++ speed[0] = ET_AUTO; ++ speed[1] = ecmd->advertising; ++ } else if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_HALF) { ++ speed[0] = ET_10HALF; ++ } else if (ecmd->speed == SPEED_10 && ecmd->duplex == DUPLEX_FULL) { ++ speed[0] = ET_10FULL; ++ } else if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_HALF) { ++ speed[0] = ET_100HALF; ++ } else if (ecmd->speed == SPEED_100 && ecmd->duplex == DUPLEX_FULL) { ++ speed[0] = ET_100FULL; ++ } else if (ecmd->speed == SPEED_1000 && ecmd->duplex == DUPLEX_FULL) { ++ speed[0] = ET_1000FULL; ++ } else { ++ return (-EINVAL); ++ } ++ ++ return etc_ioctl(et->etc, ETCSPEED, speed); ++} ++ ++static void ++et_get_driver_info(struct net_device *dev, struct ethtool_drvinfo *info) ++{ ++ et_info_t *et = ET_INFO(dev); ++ bzero(info, sizeof(struct ethtool_drvinfo)); ++ info->cmd = ETHTOOL_GDRVINFO; ++ sprintf(info->driver, "et%d", et->etc->unit); ++ strncpy(info->version, EPI_VERSION_STR, sizeof(info->version)); ++ info->version[(sizeof(info->version))-1] = '\0'; ++} ++ ++#ifdef SIOCETHTOOL ++static int ++et_ethtool(et_info_t *et, struct ethtool_cmd *ecmd) ++{ ++ int ret = 0; ++ ++ ET_LOCK(et); ++ ++ switch (ecmd->cmd) { ++ case ETHTOOL_GSET: ++ ret = et_get_settings(et->dev, ecmd); ++ break; ++ case ETHTOOL_SSET: ++ ret = et_set_settings(et->dev, ecmd); ++ break; ++ case ETHTOOL_GDRVINFO: ++ et_get_driver_info(et->dev, (struct ethtool_drvinfo *)ecmd); ++ break; ++ default: ++ ret = -EINVAL; ++ break; ++ } ++ ++ ET_UNLOCK(et); ++ ++ return (ret); ++} ++#endif /* SIOCETHTOOL */ ++ ++static int ++et_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) ++{ ++ et_info_t *et; ++ int error; ++ char *buf; ++ int size, ethtoolcmd; ++ bool get = 0, set; ++ et_var_t *var = NULL; ++ void *buffer = NULL; ++ ++ et = ET_INFO(dev); ++ ++ ET_TRACE(("et%d: et_ioctl: cmd 0x%x\n", et->etc->unit, cmd)); ++ ++ switch (cmd) { ++#ifdef SIOCETHTOOL ++ case SIOCETHTOOL: ++ if (copy_from_user(ðtoolcmd, ifr->ifr_data, sizeof(uint32))) ++ return (-EFAULT); ++ ++ if (ethtoolcmd == ETHTOOL_GDRVINFO) ++ size = sizeof(struct ethtool_drvinfo); ++ else ++ size = sizeof(struct ethtool_cmd); ++ get = TRUE; set = TRUE; ++ break; ++#endif /* SIOCETHTOOL */ ++ case SIOCGETCDUMP: ++ size = IOCBUFSZ; ++ get = TRUE; set = FALSE; ++ break; ++ case SIOCGETCPHYRD: ++ case SIOCGETCPHYRD2: ++ case SIOCGETCROBORD: ++ size = sizeof(int) * 2; ++ get = TRUE; set = TRUE; ++ break; ++ case SIOCSETCSPEED: ++ case SIOCSETCPHYWR: ++ case SIOCSETCPHYWR2: ++ case SIOCSETCROBOWR: ++ size = sizeof(int) * 2; ++ get = FALSE; set = TRUE; ++ break; ++ case SIOCSETGETVAR: ++ size = sizeof(et_var_t); ++ set = TRUE; ++ break; ++ default: ++ size = sizeof(int); ++ get = FALSE; set = TRUE; ++ break; ++ } ++ ++ if ((buf = MALLOC(et->osh, size)) == NULL) { ++ ET_ERROR(("et: et_ioctl: out of memory, malloced %d bytes\n", MALLOCED(et->osh))); ++ return (-ENOMEM); ++ } ++ ++ if (set && copy_from_user(buf, ifr->ifr_data, size)) { ++ MFREE(et->osh, buf, size); ++ return (-EFAULT); ++ } ++ ++ if (cmd == SIOCSETGETVAR) { ++ var = (et_var_t *)buf; ++ if (var->buf) { ++ if (!var->set) ++ get = TRUE; ++ ++ if (!(buffer = (void *) MALLOC(et->osh, var->len))) { ++ ET_ERROR(("et: et_ioctl: out of memory, malloced %d bytes\n", ++ MALLOCED(et->osh))); ++ MFREE(et->osh, buf, size); ++ return (-ENOMEM); ++ } ++ ++ if (copy_from_user(buffer, var->buf, var->len)) { ++ MFREE(et->osh, buffer, var->len); ++ MFREE(et->osh, buf, size); ++ return (-EFAULT); ++ } ++ } ++ } ++ ++ switch (cmd) { ++#ifdef SIOCETHTOOL ++ case SIOCETHTOOL: ++ error = et_ethtool(et, (struct ethtool_cmd *)buf); ++ break; ++#endif /* SIOCETHTOOL */ ++ case SIOCSETGETVAR: ++ ET_LOCK(et); ++ error = etc_iovar(et->etc, var->cmd, var->set, buffer); ++ ET_UNLOCK(et); ++ if (!error && get) { ++ error = copy_to_user(var->buf, buffer, var->len); ++ } ++ ++ if (buffer) { ++ MFREE(et->osh, buffer, var->len); ++ } ++ break; ++ default: ++ ET_LOCK(et); ++ error = etc_ioctl(et->etc, cmd - SIOCSETCUP, buf) ? -EINVAL : 0; ++ ET_UNLOCK(et); ++ break; ++ } ++ ++ if (!error && get) { ++ error = copy_to_user(ifr->ifr_data, buf, size); ++ } ++ ++ MFREE(et->osh, buf, size); ++ ++ return (error); ++} ++ ++static struct net_device_stats * ++et_get_stats(struct net_device *dev) ++{ ++ et_info_t *et; ++ etc_info_t *etc; ++ struct net_device_stats *stats; ++ int locked = 0; ++ ++ et = ET_INFO(dev); ++ ++ ET_TRACE(("et%d: et_get_stats\n", et->etc->unit)); ++ ++ if (!in_atomic()) { ++ locked = 1; ++ ET_LOCK(et); ++ } ++ ++ etc = et->etc; ++ stats = &et->stats; ++ bzero(stats, sizeof(struct net_device_stats)); ++ ++ /* refresh stats */ ++ if (et->etc->up) { ++ (*etc->chops->statsupd)(etc->ch); ++ } ++ ++ /* SWAG */ ++ stats->rx_packets = etc->rxframe; ++ stats->tx_packets = etc->txframe; ++ stats->rx_bytes = etc->rxbyte; ++ stats->tx_bytes = etc->txbyte; ++ stats->rx_errors = etc->rxerror; ++ stats->tx_errors = etc->txerror; ++ ++ if (ET_GMAC(etc)) { ++ gmacmib_t *mib; ++ ++ mib = etc->mib; ++ stats->collisions = mib->tx_total_cols; ++ stats->rx_length_errors = (mib->rx_oversize_pkts + mib->rx_undersize); ++ stats->rx_crc_errors = mib->rx_crc_errs; ++ stats->rx_frame_errors = mib->rx_align_errs; ++ stats->rx_missed_errors = mib->rx_missed_pkts; ++ } else { ++ bcmenetmib_t *mib; ++ ++ mib = etc->mib; ++ stats->collisions = mib->tx_total_cols; ++ stats->rx_length_errors = (mib->rx_oversize_pkts + mib->rx_undersize); ++ stats->rx_crc_errors = mib->rx_crc_errs; ++ stats->rx_frame_errors = mib->rx_align_errs; ++ stats->rx_missed_errors = mib->rx_missed_pkts; ++ ++ } ++ ++ stats->rx_fifo_errors = etc->rxoflo; ++ stats->rx_over_errors = etc->rxoflo; ++ stats->tx_fifo_errors = etc->txuflo; ++ ++ if (locked) { ++ ET_UNLOCK(et); ++ } ++ ++ return (stats); ++} ++ ++static int ++et_set_mac_address(struct net_device *dev, void *addr) ++{ ++ et_info_t *et; ++ struct sockaddr *sa = (struct sockaddr *) addr; ++ ++ et = ET_INFO(dev); ++ ET_TRACE(("et%d: et_set_mac_address\n", et->etc->unit)); ++ ++ if (et->etc->up) { ++ return -EBUSY; ++ } ++ ++ bcopy(sa->sa_data, dev->dev_addr, ETHER_ADDR_LEN); ++ bcopy(dev->dev_addr, &et->etc->cur_etheraddr, ETHER_ADDR_LEN); ++ ++ return 0; ++} ++ ++static void ++et_set_multicast_list(struct net_device *dev) ++{ ++ et_info_t *et; ++ etc_info_t *etc; ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35) ++ struct dev_mc_list *mclist; ++#else ++ struct netdev_hw_addr *ha ; ++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35) */ ++ int i; ++ int locked = 0; ++ ++ et = ET_INFO(dev); ++ etc = et->etc; ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35) ++ mclist = NULL ; /* fend off warnings */ ++#else ++ ha = NULL ; ++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35) */ ++ ++ ET_TRACE(("et%d: et_set_multicast_list\n", etc->unit)); ++ ++ if (!in_atomic()) { ++ locked = 1; ++ ET_LOCK(et); ++ } ++ ++ if (etc->up) { ++ etc->promisc = (dev->flags & IFF_PROMISC)? TRUE: FALSE; ++ etc->allmulti = (dev->flags & IFF_ALLMULTI)? TRUE: etc->promisc; ++ ++ /* copy the list of multicasts into our private table */ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35) ++ for (i = 0, mclist = dev->mc_list; mclist && (i < dev->mc_count); ++ i++, mclist = mclist->next) { ++ if (i >= MAXMULTILIST) { ++ etc->allmulti = TRUE; ++ i = 0; ++ break; ++ } ++ etc->multicast[i] = *((struct ether_addr *)mclist->dmi_addr); ++ } ++#else /* >= 2.6.36 */ ++ i = 0; ++ netdev_for_each_mc_addr(ha, dev) { ++ i ++; ++ if (i >= MAXMULTILIST) { ++ etc->allmulti = TRUE; ++ i = 0; ++ break; ++ } ++ etc->multicast[i] = *((struct ether_addr *)ha->addr); ++ } /* for each ha */ ++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35) */ ++ etc->nmulticast = i; ++ ++ /* LR: partial re-init, DMA is already initialized */ ++ et_init(et, ET_INIT_INTRON); ++ } ++ ++ if (locked) { ++ ET_UNLOCK(et); ++ } ++} ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20) ++static irqreturn_t BCMFASTPATH ++et_isr(int irq, void *dev_id) ++#else ++static irqreturn_t BCMFASTPATH ++et_isr(int irq, void *dev_id, struct pt_regs *ptregs) ++#endif ++{ ++ et_info_t *et; ++ struct chops *chops; ++ void *ch; ++ uint events = 0; ++ osl_t *osh; ++ ++ et = (et_info_t *)dev_id; ++ chops = et->etc->chops; ++ ch = et->etc->ch; ++ osh = et->etc->osh; ++ ++ /* guard against shared interrupts */ ++ if (!et->etc->up) { ++ ET_TRACE(("et%d: et_isr: not up\n", et->etc->unit)); ++ goto done; ++ } ++ ++ /* get interrupt condition bits */ ++ events = (*chops->getintrevents)(ch, TRUE); ++ ++ /* not for us */ ++ if (!(events & INTR_NEW)) { ++ goto done; ++ } ++ ++ ET_TRACE(("et%d: et_isr: events 0x%x\n", et->etc->unit, events)); ++ ET_LOG("et%d: et_isr: events 0x%x", et->etc->unit, events); ++ ++#ifdef CONFIG_IPROC_2STAGE_RX ++ if (events & INTR_RX) { ++ et->rxinisr = true; ++ /* process a few RX interrupts */ ++ et_rxevent(osh, et, chops, ch, 1); ++ ++ et->rxinisr = false; ++ /* get interrupt condition bits */ ++ events = (*chops->getintrevents)(ch, TRUE); ++ et->resched = FALSE; ++ ++ /* not for us */ ++ if (!(events & INTR_NEW)) { ++ goto done; ++ } ++ } ++#endif /* CONFIG_IPROC_2STAGE_RX */ ++ ++ /* disable interrupts */ ++ (*chops->intrsoff)(ch); ++ ++ /* save intstatus bits */ ++ ASSERT(et->events == 0); ++ et->events = events; ++ ++ ASSERT(et->resched == FALSE); ++ ++#ifdef GMAC_NAPI2_POLL ++ napi_schedule(&et->napi_poll); ++#elif defined(GMAC_NAPI_POLL) ++ /* allow the device to be added to the cpu polling list if we are up */ ++ if (netif_rx_schedule_prep(et->dev)) { ++ /* tell the network core that we have packets to send up */ ++ __netif_rx_schedule(et->dev); ++ } else { ++ ET_ERROR(("et%d: et_isr: intr while in poll!\n", ++ et->etc->unit)); ++ (*chops->intrson)(ch); ++ } ++#else /* ! GMAC_NAPI_POLL && ! GMAC_NAPI2_POLL */ ++ /* schedule dpc */ ++#ifdef GMAC_ALL_PASSIVE ++ if (ET_ALL_PASSIVE_ENAB(et)) { ++ schedule_work(&et->dpc_task.work); ++ } else ++#endif /* GMAC_ALL_PASSIVE */ ++ { ++ tasklet_schedule(&et->tasklet); ++ } ++#endif /* GMAC_NAPI_POLL */ ++ ++done: ++ ET_LOG("et%d: et_isr ret", et->etc->unit, 0); ++ ++ return IRQ_RETVAL(events & INTR_NEW); ++} ++ ++static inline int ++et_rxevent(osl_t *osh, et_info_t *et, struct chops *chops, void *ch, int quota) ++{ ++ uint processed = 0; ++ void *p, *h = NULL, *t = NULL; ++ struct sk_buff *skb; ++ ++#ifdef GMAC_RATE_LIMITING ++ /* rate limiting */ ++ if (et->etc->rl_enabled) { ++ etc_check_rate_limiting(et->etc, ch); ++ } ++#endif /* GMAC_RATE_LIMITING */ ++ ++ /* read the buffers first */ ++ while ((p = (*chops->rx)(ch))) { ++ PKTSETLINK(p, NULL); ++ if (t == NULL) { ++ h = t = p; ++ } else { ++ PKTSETLINK(t, p); ++ t = p; ++ } ++ ++ /* we reached quota already */ ++ if (++processed >= quota) { ++ /* reschedule et_dpc()/et_poll() */ ++ et->resched = TRUE; ++ et->etc->rxquota++; ++ break; ++ } ++ } ++ ++ /* prefetch the headers */ ++ if (h != NULL) { ++#ifdef CONFIG_BCM_IPROC_GMAC_PREFETCH ++ prefetch_range(PKTDATA(osh, h), SKB_DATA_PREFETCH_LEN); ++#else ++ ETPREFHDRS(PKTDATA(osh, h), PREFSZ); ++#endif ++ } ++ ++ /* post more rx bufs */ ++ (*chops->rxfill)(ch); ++ ++ while ((p = h) != NULL) { ++ h = PKTLINK(h); ++ PKTSETLINK(p, NULL); ++ ++ /* prefetch the headers */ ++ if (h != NULL) { ++#ifdef CONFIG_BCM_IPROC_GMAC_PREFETCH ++ prefetch_range(PKTDATA(osh, h), SKB_DATA_PREFETCH_LEN); ++#else ++ ETPREFHDRS(PKTDATA(osh, h), PREFSZ); ++#endif ++ } ++ ++ skb = PKTTONATIVE(osh, p); ++ et->etc->unchained++; ++ et_sendup(et, skb); ++ } ++ ++ return (processed); ++} ++ ++#if defined(GMAC_NAPI2_POLL) ++static int BCMFASTPATH ++et_poll(struct napi_struct *napi, int budget) ++{ ++ int quota = budget; ++ struct net_device *dev = napi->dev; ++ et_info_t *et = ET_INFO(dev); ++ ++#elif defined(GMAC_NAPI_POLL) ++static int BCMFASTPATH ++et_poll(struct net_device *dev, int *budget) ++{ ++ int quota = min(RXBND, *budget); ++ et_info_t *et = ET_INFO(dev); ++#else /* GMAC_NAPI_POLL */ ++static void BCMFASTPATH ++et_dpc(ulong data) ++{ ++ et_info_t *et = (et_info_t *)data; ++ int quota = RXBND; ++#endif /* GMAC_NAPI_POLL */ ++ struct chops *chops; ++ void *ch; ++ osl_t *osh; ++ uint nrx = 0; ++ ++ chops = et->etc->chops; ++ ch = et->etc->ch; ++ osh = et->etc->osh; ++ ++ ET_TRACE(("et%d: et_dpc: events 0x%x\n", et->etc->unit, et->events)); ++ ET_LOG("et%d: et_dpc: events 0x%x", et->etc->unit, et->events); ++ ++#if !defined(GMAC_NAPI_POLL) && !defined(GMAC_NAPI2_POLL) ++ ET_LOCK(et); ++#endif /* ! NAPIx_POLL */ ++ ++ if (!et->etc->up) { ++ goto done; ++ } ++ ++ /* get interrupt condition bits again when dpc was rescheduled */ ++ if (et->resched) { ++ et->events = (*chops->getintrevents)(ch, FALSE); ++ et->resched = FALSE; ++ } ++ ++ if (et->events & INTR_RX) { ++ nrx = et_rxevent(osh, et, chops, ch, quota); ++ } ++ ++ if (et->events & INTR_TX) { ++ (*chops->txreclaim)(ch, FALSE); ++ } ++ ++ (*chops->rxfill)(ch); ++ ++ /* handle error conditions, if reset required leave interrupts off! */ ++ if (et->events & INTR_ERROR) { ++ if ((*chops->errors)(ch)) { ++ printk("%s error, calling et_init() for et%d\n", __FUNCTION__, et->etc->unit); ++ et_init(et, ET_INIT_INTROFF); ++ } else { ++ if (nrx < quota) { ++ nrx += et_rxevent(osh, et, chops, ch, quota); ++ } ++ } ++ } ++ ++ /* run the tx queue */ ++ if (et->etc->txq_state != 0) { ++ if (!ET_ALL_PASSIVE_ENAB(et)) { ++ et_sendnext(et); ++ } ++#ifdef GMAC_ALL_PASSIVE ++ else { ++#ifdef CONFIG_BCM_IPROC_GMAC_TXONCPU1 ++ schedule_work_on(1, &et->txq_task.work); ++#else ++ schedule_work(&et->txq_task.work); ++#endif ++ } ++#endif /* GMAC_ALL_PASSIVE */ ++ } ++ ++ /* clear this before re-enabling interrupts */ ++ et->events = 0; ++ ++ /* something may bring the driver down */ ++ if (!et->etc->up) { ++ et->resched = FALSE; ++ goto done; ++ } ++ ++#if !defined(GMAC_NAPI_POLL) && !defined(GMAC_NAPI2_POLL) ++#ifdef GMAC_ALL_PASSIVE ++ if (et->resched) { ++ if (!ET_ALL_PASSIVE_ENAB(et)) { ++ tasklet_schedule(&et->tasklet); ++ } else { ++ schedule_work(&et->dpc_task.work); ++ } ++ } else { ++ (*chops->intrson)(ch); ++ } ++#else /* GMAC_ALL_PASSIVE */ ++ if (et->resched) { /* there may be frames left, reschedule et_dpc() */ ++ tasklet_schedule(&et->tasklet); ++ } else { /* re-enable interrupts */ ++ (*chops->intrson)(ch); ++ } ++#endif /* GMAC_ALL_PASSIVE */ ++#endif /* ! NAPIx_POLL */ ++ ++done: ++#if defined(GMAC_NAPI_POLL) ++ /* update number of frames processed */ ++ *budget -= nrx; ++ dev->quota -= nrx; ++ ++ ET_TRACE(("et%d: et_poll: quota %d budget %d\n", ++ et->etc->unit, dev->quota, *budget)); ++ ++ /* we got packets but no quota */ ++ if (et->resched) { ++ return (1); ++ } ++ ++ netif_rx_complete(dev); ++ ++ /* enable interrupts now */ ++ (*chops->intrson)(ch); ++ ++ /* indicate that we are done */ ++ return (0); ++#elif defined(GMAC_NAPI2_POLL) ++ ET_TRACE(("et%d: et_poll: budget %d\n", ++ et->etc->unit, budget)); ++ ++ /* we got packets but no quota */ ++ if (et->resched) { ++ return (1); ++ } ++ ++ napi_complete(napi); ++ ++ /* enable interrupts now */ ++ (*chops->intrson)(ch); ++ ++ /* indicate that we are done */ ++ return (0); ++#else /* !defined(GMAC_NAPI_POLL) && !defined(GMAC_NAPI2_POLL) */ ++ ET_UNLOCK(et); ++ return; ++#endif ++} ++ ++#ifdef GMAC_ALL_PASSIVE ++static void BCMFASTPATH ++et_dpc_work(struct et_task *task) ++{ ++#if !defined(GMAC_NAPI_POLL) && !defined(GMAC_NAPI2_POLL) ++ et_info_t *et = (et_info_t *)task->context; ++ et_dpc((unsigned long)et); ++#else ++ BUG_ON(1); ++#endif ++ return; ++} ++#endif /* GMAC_ALL_PASSIVE */ ++ ++static void ++et_error(et_info_t *et, struct sk_buff *skb, void *rxh) ++{ ++ uchar eabuf[32]; ++ struct ether_header *eh; ++ ++ eh = (struct ether_header *)skb->data; ++ bcm_ether_ntoa((struct ether_addr *)eh->ether_shost, eabuf); ++ ++ if (RXH_OVERSIZE(et->etc, rxh)) { ++ ET_ERROR(("et%d: rx: over size packet from %s\n", et->etc->unit, eabuf)); ++ } ++ if (RXH_CRC(et->etc, rxh)) { ++ ET_ERROR(("et%d: rx: crc error from %s\n", et->etc->unit, eabuf)); ++ } ++ if (RXH_OVF(et->etc, rxh)) { ++ ET_ERROR(("et%d: rx: fifo overflow\n", et->etc->unit)); ++ } ++ if (RXH_NO(et->etc, rxh)) { ++ ET_ERROR(("et%d: rx: crc error (odd nibbles) from %s\n", ++ et->etc->unit, eabuf)); ++ } ++ if (RXH_RXER(et->etc, rxh)) { ++ ET_ERROR(("et%d: rx: symbol error from %s\n", et->etc->unit, eabuf)); ++ } ++} ++ ++static void BCMFASTPATH ++et_sendup(et_info_t *et, struct sk_buff *skb) ++{ ++ etc_info_t *etc; ++ void *rxh; ++ uint16 flags; ++#ifdef DBG_PRINT_PKT ++ int idx; ++#endif /* DBG_PRINT_PKT */ ++#ifdef CONFIG_BCM_IPROC_GMAC_PREFETCH ++ struct sk_buff *next; ++#endif /* CONFIG_BCM_IPROC_GMAC_PREFETCH */ ++ ++ etc = et->etc; ++ ++ /* packet buffer starts with rxhdr */ ++ rxh = skb->data; ++ ++ /* strip off rxhdr */ ++ __skb_pull(skb, HWRXOFF); ++ ++ ET_TRACE(("et%d: et_sendup: %d bytes\n", et->etc->unit, skb->len)); ++ ET_LOG("et%d: et_sendup: len %d", et->etc->unit, skb->len); ++ ++ etc->rxframe++; ++ etc->rxbyte += skb->len; ++ ++ /* eh should now be aligned 2-mod-4 */ ++ ASSERT(((ulong)skb->data & 3) == 2); ++ ++ /* strip off crc32 */ ++ __skb_trim(skb, skb->len - ETHER_CRC_LEN); ++ ++ ET_PRHDR("rx", (struct ether_header *)skb->data, skb->len, etc->unit); ++ ET_PRPKT("rxpkt", skb->data, skb->len, etc->unit); ++ ++#ifdef DBG_PRINT_PKT ++ printk("et%d: rxpkt len(0x%x) tag:0x%02x%02x%02x%02x\n", etc->unit, skb->len, ++ skb->data[12], skb->data[13], skb->data[14], skb->data[15]); ++ ++ printk("et%d: %s len(0x%x) rxpkt:", etc->unit, __FUNCTION__, skb->len); ++ for (idx = 0; idx < skb->len; idx++) { ++ if ((idx % 16) == 0) { ++ printk("\n"); ++ } ++ printk("%02x ", skb->data[idx]); ++ } ++ printk("\n"); ++#endif /* DBG_PRINT_PKT */ ++ ++ /* get the error flags */ ++ flags = RXH_FLAGS(etc, rxh); ++ ++ /* check for reported frame errors */ ++ if (flags) { ++ goto err; ++ } ++ ++ skb->dev = et->dev; ++ ++ ASSERT(!PKTISCHAINED(skb)); ++ ++ /* extract priority from payload and store it out-of-band ++ * in skb->priority ++ */ ++ if (et->etc->qos) { ++ pktsetprio(skb, TRUE); ++ } ++ ++ skb->protocol = eth_type_trans(skb, et->dev); ++ ++#ifdef CONFIG_BCM_IPROC_GMAC_PREFETCH ++ next = skb->next; ++ while (1) { ++ if (next != NULL) { ++ ++ prefetch_range(next, SKB_PREFETCH_LEN); ++ next = next->next; ++ } else { ++ break; ++ } ++ } ++#endif /* CONFIG_BCM_IPROC_GMAC_PREFETCH */ ++ ++ /* send it up */ ++#if defined(GMAC_NAPI_POLL) || defined(GMAC_NAPI2_POLL) ++#ifdef CONFIG_IPROC_2STAGE_RX ++ if (!et->rxinisr) { ++ netif_receive_skb(skb); ++ } else { ++ netif_rx(skb); ++ } ++#else /* CONFIG_IPROC_2STAGE_RX */ ++ if (et->dev->features & NETIF_F_GRO) { ++ skb->ip_summed = CHECKSUM_UNNECESSARY; ++ if (skb->protocol == cpu_to_be16(ETH_P_8021Q)) { ++ skb = vlan_untag(skb); ++ if (unlikely(!skb)) { ++ goto err; ++ } ++ } ++ napi_gro_receive(&et->napi_poll, skb); ++ } else { ++ netif_receive_skb(skb); ++ } ++#endif /* CONFIG_IPROC_2STAGE_RX */ ++#else ++ netif_rx(skb); ++#endif /* defined(GMAC_NAPI_POLL) || defined(GMAC_NAPI2_POLL) */ ++ ++ ET_LOG("et%d: et_sendup ret", et->etc->unit, 0); ++ ++ return; ++ ++err: ++ et_error(et, skb, rxh); ++ ++ PKTFRMNATIVE(etc->osh, skb); ++ PKTFREE(etc->osh, skb, FALSE); ++ ++ return; ++} ++ ++static void ++et_dumpet(et_info_t *et, struct bcmstrbuf *b) ++{ ++ bcm_bprintf(b, "et %p dev %p name %s tbusy %d txq[0].qlen %d malloced %d\n", ++ et, et->dev, et->dev->name, (uint)netif_queue_stopped(et->dev), et->txq[0].qlen, ++ MALLOCED(et->osh)); ++} ++ ++void ++et_dump(et_info_t *et, struct bcmstrbuf *b) ++{ ++/* bcm_bprintf(b, "et%d: %s %s version %s\n", et->etc->unit, ++ __DATE__, __TIME__, EPI_VERSION_STR); ++*/ ++ bcm_bprintf(b, "et%d: version %s\n", et->etc->unit, EPI_VERSION_STR); ++ ++ et_dumpet(et, b); ++ etc_dump(et->etc, b); ++ ++ bcm_bprintf(b, "txdfrm(%d); txdfrmropped(%d); txqlen(%d); txqstop(%d); txdmafull(%d) txmaxlen(%d) txsgpkt(%d)\n", ++ et->etc->txfrm, et->etc->txfrmdropped, et->etc->txqlen, et->etc->txqstop, et->etc->txdmafull, ++ et->etc->txmaxlen, et->etc->txsgpkt); ++ et->etc->txfrm=0; ++ et->etc->txfrmdropped=0; ++ et->etc->txqlen=0; ++ et->etc->txqstop=0; ++ et->etc->txdmafull=0; ++ et->etc->txmaxlen=0; ++ et->etc->txsgpkt=0; ++ ++ bcm_bprintf(b, "rxquota(%d); rxdmastopped(%d)\n", ++ et->etc->rxquota, et->etc->rxdmastopped); ++ et->etc->rxquota=0; ++ et->etc->rxdmastopped=0; ++#ifdef GMAC_RATE_LIMITING ++ bcm_bprintf(b, "rxd_dropped_packets(%d)\n", ++ et->etc->rl_dropped_packets); ++ et->etc->rl_dropped_packets=0; ++#endif /* GMAC_RATE_LIMITING */ ++ ++} ++ ++void ++et_link_up(et_info_t *et) ++{ ++ ET_ERROR(("et%d: link up (%d%s)\n", ++ et->etc->unit, et->etc->speed, (et->etc->duplex? "FD" : "HD"))); ++ printk(KERN_DEBUG "et%d Link Up: %d%s\n", et->etc->unit, et->etc->speed, et->etc->duplex?"FD":"HD"); ++ netif_carrier_on(et->dev); ++} ++ ++void ++et_link_down(et_info_t *et) ++{ ++ ET_ERROR(("et%d: link down\n", et->etc->unit)); ++ printk(KERN_DEBUG "et%d Link Down\n", et->etc->unit); ++ netif_carrier_off(et->dev); ++} ++ ++bool ++et_is_link_up(et_info_t *et) ++{ ++ return netif_carrier_ok(et->dev); ++} ++ ++/********************************************************************** ++ * iproc_gmac_drv_probe(device) ++ * ++ * The Platform Driver Probe function. ++ * ++ * Input parameters: ++ * device: The Device Context ++ * ++ * Return value: ++ * 0: Driver Probe is Succesful ++ * not 0: ERROR ++ **********************************************************************/ ++static int iproc_gmac_drv_probe(struct platform_device* pldev) ++{ ++ struct net_device *dev = NULL; ++ osl_t *osh = NULL; ++ et_info_t *et = NULL; ++ int unit = et_found; ++ int err = 0; ++ unsigned char devname[8] = {0}; ++ char name[128]; ++ int idx; ++ struct device_node *np = pldev->dev.of_node; ++ const void *macaddr; ++ ++ printk("%s enter :%s; id:0x%x; unit:%d\n", __FUNCTION__, pldev->name, pldev->id, unit); ++ ++ /* Validation of platform device structure */ ++ if (!pldev) { ++ ET_ERROR(("WRONG INPUT\nplatfrom_device pointer should not be NULL.\n")); ++ return -EINVAL; ++ } ++ ++ et_found++; ++ ++ macaddr = of_get_mac_address(np); ++ if (!macaddr) { ++ dev_err(&pldev->dev, "can't find MAC address\n"); ++ return -ENODEV; ++ } ++ ++ osh = osl_attach(pldev, PCI_BUS, FALSE); ++ ASSERT(osh); ++ ++ ET_TRACE(("%s call alloc_etherdev\n", __FUNCTION__)); ++ if ((dev = alloc_etherdev(sizeof( et_info_t ))) == NULL) { ++ ET_ERROR(("%s: alloc_etherdev() failed\n", __FUNCTION__)); ++ err = -ENOMEM; ++ goto exit; ++ } ++ ++ et = ET_INFO(dev); ++ bzero(et, sizeof(et_info_t)); /* Is this needed in 2.6.36 ? -LR */ ++ et->dev = dev; ++ et->osh = osh; ++ ++ dev->base_addr = (unsigned long)of_iomap(np, 0); ++ dev->irq = (unsigned int)irq_of_parse_and_map(np, 0); ++ ++ printk("et%d: base_addr (0x%x) irq (%d)\n", unit, (uint32)dev->base_addr, dev->irq); ++ ++ if ((et->regsva = ioremap_nocache(dev->base_addr, 0xc00)) == NULL) { ++ ET_ERROR(("et%d: ioremap() failed\n", unit)); ++ err = -ENOMEM; ++ goto exit; ++ } ++ ET_TRACE(("%s base_addr: 0x%x; regsva:0x%x\n", __FUNCTION__, (uint32)dev->base_addr, (uint32)et->regsva)); ++ ++ pldev->id = dev->base_addr; ++ dev_set_drvdata(&(pldev->dev), dev); ++ SET_NETDEV_DEV(dev, (&pldev->dev)); ++ ++ init_MUTEX(&et->sem); ++ spin_lock_init(&et->lock); ++ spin_lock_init(&et->txq_lock); ++ spin_lock_init(&et->tx_lock); ++ spin_lock_init(&et->isr_lock); ++ ++ for (idx = 0; idx < NUMTXQ; idx++) { ++ skb_queue_head_init(&et->txq[idx]); ++ } ++ ++ /* Common load-time initialization */ ++ et->etc = etc_attach((void *)et, VENDOR_BROADCOM, BCMIPROC_CHIP_ID, unit, osh, et->regsva); ++ if (et->etc == NULL) { ++ ET_ERROR(("et%d: etc_attach() failed\n", unit)); ++ err = -ENOMEM; ++ goto exit; ++ } ++ ++ ether_addr_copy(dev->dev_addr, macaddr); ++ bcopy(macaddr, (char *)&et->etc->cur_etheraddr, ETHER_ADDR_LEN); ++ ++ /* init 1 second watchdog timer */ ++ init_timer(&et->timer); ++ et->timer.data = (ulong)dev; ++ et->timer.function = et_watchdog; ++ ++#if defined(CONFIG_IPROC_SDK_MGT_PORT_HANDOFF) ++#if (defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2)) ++ /* schedule one second watchdog timer */ ++ et->timer.expires = jiffies + HZ; ++ add_timer(&et->timer); ++ et->set = TRUE; ++#endif /* (defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2)) */ ++#endif /* defined(CONFIG_IPROC_SDK_MGT_PORT_HANDOFF) */ ++ ++#ifdef GMAC_RATE_LIMITING ++ /* Init 1 second watchdog timer */ ++ init_timer(&et->etc->rl_timer); ++ et->etc->rl_timer.data = (ulong)dev; ++ et->etc->rl_timer.function = et_release_congestion; ++#endif /* GMAC_RATE_LIMITING */ ++ ++#ifdef GMAC_ALL_PASSIVE ++ if (ET_ALL_PASSIVE_ENAB(et)) { ++ MY_INIT_WORK(&et->dpc_task.work, (work_func_t)et_dpc_work); ++ et->dpc_task.context = et; ++ MY_INIT_WORK(&et->txq_task.work, (work_func_t)et_txq_work); ++ et->txq_task.context = et; ++ } ++ if (et_ctf_pipeline_loopback(et)) { ++ et->all_dispatch_mode = FALSE; ++ } else { ++ et->all_dispatch_mode = (passivemode == 0) ? TRUE : FALSE; ++ } ++#endif /* GMAC_ALL_PASSIVE */ ++ ++ ET_TRACE(("%s request irq\n", __FUNCTION__)); ++ /* register our interrupt handler */ ++ if (request_irq(dev->irq, et_isr, IRQF_SHARED, dev->name, et)) { ++ ET_ERROR(("%s: request_irq(%d) failed\n", __FUNCTION__, dev->irq)); ++ err = -ENOMEM; ++ goto exit; ++ } ++ ++ /* add us to the global linked list */ ++ et->next = et_list; ++ et_list = et; ++ ++#ifdef HAVE_NET_DEVICE_OPS ++ dev->netdev_ops = &et_netdev_ops ; ++#else /* HAVE_NET_DEVICE_OPS */ ++ dev->open = et_open; ++ dev->stop = et_close; ++ dev->hard_start_xmit = et_start; ++ dev->get_stats = et_get_stats; ++ dev->set_mac_address = et_set_mac_address; ++ dev->set_multicast_list = et_set_multicast_list; ++ dev->do_ioctl = et_ioctl; ++#endif /* HAVE_NET_DEVICE_OPS */ ++ ++#if defined(GMAC_NAPI_POLL) ++ dev->poll = et_poll; ++ dev->weight = (ET_GMAC(et->etc) ? 64 : 32); ++#elif defined(GMAC_NAPI2_POLL) ++ netif_napi_add(dev, & et->napi_poll, et_poll, 64); ++ napi_enable(&et->napi_poll); ++#else /* !GMAC_NAPI_POLL && !GMAC_NAPI2_POLL */ ++ /* Setup the bottom half handler */ ++ tasklet_init(&et->tasklet, et_dpc, (ulong)et); ++#endif ++ ++#if defined(BCMDMASGLISTOSL) ++ dev->features = (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_ALL_CSUM); ++ dev->vlan_features = (NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_ALL_CSUM); ++ ++#ifdef CONFIG_BCM_GRO_ENABLE ++ dev->features |= NETIF_F_GRO; ++ dev->vlan_features |= NETIF_F_GRO; ++ printk("et%d: Enable Checksum-SG-GRO\n", unit); ++#endif /* CONFIG_BCM_GRO_ENABLE */ ++#endif /* BCMDMASGLISTOSL */ ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36) ++ dev->ethtool_ops = &et_ethtool_ops; ++#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 36) */ ++ ++ /* Assign netdev name consistently, even if GMAC0 or 1 is disabled */ ++ snprintf(devname, 8, "eth%d", unit); ++ dev_alloc_name(dev, devname); ++ ++ ET_TRACE(("%s register netdev\n", __FUNCTION__)); ++ if (register_netdev(dev)) { ++ ET_ERROR(("%s register_netdev() failed\n", __FUNCTION__)); ++ err = -ENOMEM; ++ goto exit; ++ } ++ ++ /* Print hello string */ ++ (*et->etc->chops->longname)(et->etc->ch, name, sizeof(name)); ++ printk("%s: %s %s\n", dev->name, name, EPI_VERSION_STR); ++ ++ eth_mac_proc_create(dev); ++ ++ ET_TRACE(("%s: exit\n", __FUNCTION__)); ++ ++ return 0; ++ ++exit: ++#ifndef CONFIG_OF ++ if (macbase) { ++ iounmap(macbase); ++ macbase=NULL; ++ } ++ if (memres) { ++ release_mem_region(memres->start, (memres->end - memres->start + 1)); ++ memres=NULL; ++ } ++#endif ++ if (dev) { ++ free_netdev(dev); ++ dev = NULL; ++ } ++ if (osh) { ++ osl_detach(osh); ++ osh=NULL; ++ } ++ if (et) { ++ etc_detach(et->etc); ++ et->dev = NULL; ++ et->osh = NULL; ++ et_free(et); ++ et=NULL; ++ } ++ return err; ++} ++ ++ ++/********************************************************************** ++ * iproc_gmac_drv_remove(device) ++ * ++ * The Removal of Platform Device, and un-initialize the previously ++ * added MAC, and it's MEM Regions and Resources. ++ * ++ * Input parameters: ++ * device: The Device Context ++ * ++ * Return value: ++ * 0: Driver Entry is Succesfull ++ **********************************************************************/ ++static int __exit iproc_gmac_drv_remove(struct platform_device *pldev) ++{ ++ struct net_device *dev = platform_get_drvdata(pldev); ++ int retVal = 0; ++ et_info_t *et = NULL; ++ struct resource *memres = NULL; ++ ++ ET_TRACE(("%s: enter\n", __FUNCTION__)); ++ printk("%s: enter\n", __FUNCTION__); ++ ++#ifdef CONFIG_PM ++ iproc_gmac_drv_suspend(pldev, PMSG_SUSPEND); ++#endif ++ ++ et = ET_INFO(dev); ++ ++ iounmap(et->regsva); ++ unregister_netdev(dev); ++ ++ memres = platform_get_resource(pldev, IORESOURCE_MEM, 0); ++ if (memres) { ++ release_mem_region(memres->start, (memres->end - memres->start + 1)); ++ } else { ++ ET_ERROR(("ERROR: Could not get Platform Resource GMAC Register Memory Resource\n")); ++ retVal = -ENOMEM; ++ } ++ ++ free_netdev(dev); ++ ++#ifdef CONFIG_OF ++ eth_mac_proc_remove (dev); ++#endif ++ ++ et->dev = NULL; ++ et_free(et); ++ ++ ET_TRACE(("%s: exit\n", __FUNCTION__)); ++ ++ return retVal; ++} ++ ++#ifdef CONFIG_PM ++static int iproc_gmac_drv_suspend(struct platform_device *pdev, pm_message_t state) ++{ ++ int ret; ++ char *filename = "/usr/sbin/ifdown"; ++ char *argv[] = {filename, "eth0", NULL}; ++ char *envp[] = {"HOME=/", ++ "TERM=linux", ++ "PATH=/sbin:/usr/sbin:/bin:/usr/bin", ++ NULL}; ++ ++ ET_TRACE(("%s: enter\n", __FUNCTION__)); ++ printk("%s: enter\n", __FUNCTION__); ++ ++/* ret = kernel_execve(filename, (const char * const*) argv, (const char * const*) envp);*/ ++ ret = do_execve(getname_kernel(filename), (const char *const *)argv, (const char *const *)envp); ++ ET_TRACE(("%s: exit\n", __FUNCTION__)); ++ ++ return 0; ++} ++ ++static int iproc_gmac_drv_resume(struct platform_device *pdev) ++{ ++ int ret; ++ char *filename = "/usr/sbin/ifup"; ++ char *argv[] = {filename, "eth0", NULL}; ++ char *envp[] = {"HOME=/", ++ "TERM=linux", ++ "PATH=/sbin:/usr/sbin:/bin:/usr/bin", ++ NULL}; ++ ++ ET_TRACE(("%s: enter\n", __FUNCTION__)); ++ printk("%s: enter\n", __FUNCTION__); ++ /*ret = kernel_execve(filename, (const char * const*) argv, (const char * const*) envp);*/ ++ ret = do_execve(getname_kernel(filename), (const char *const *)argv, (const char *const *)envp); ++ ++ ET_TRACE(("%s: exit\n", __FUNCTION__)); ++ ++ return 0; ++} ++#endif /* CONFIG_PM */ ++ ++ ++/********************************************************************** ++ * iproc_gmac_init_module(VOID) ++ * ++ * The Driver Entry Function ++ * ++ * Input parameters: ++ * None ++ * ++ * Return value: ++ * 0: Driver Entry is Succesful ++ * not 0: ERROR ++ **********************************************************************/ ++static int __init ++iproc_gmac_init_module(void) ++{ ++ ET_TRACE(("%s: enter\n", __FUNCTION__)); ++ ++#ifdef BCMDBG ++ if (msglevel != 0xdeadbeef) { ++ et_msg_level = msglevel; ++ } else { ++ char *var = getvar(NULL, "et_msglevel"); ++ if (var) { ++ et_msg_level = bcm_strtoul(var, NULL, 0); ++ } ++ } ++ ++ printk("%s: msglevel set to 0x%x\n", __FUNCTION__, et_msg_level); ++#endif /* BCMDBG */ ++ ++#ifdef GMAC_ALL_PASSIVE ++ { ++ char *var = getvar(NULL, "et_dispatch_mode"); ++ if (var) ++ passivemode = bcm_strtoul(var, NULL, 0); ++ printk("%s: passivemode set to 0x%x\n", __FUNCTION__, passivemode); ++ } ++#endif /* GMAC_ALL_PASSIVE */ ++ ++#ifdef GMAC_NAPI_POLL ++ printk("%s: GMAC_NAPI_POLL mode\n", __FUNCTION__); ++#endif /* GMAC_NAPI_POLL */ ++ ++#ifdef GMAC_NAPI2_POLL ++ printk("%s: GMAC_NAPI2_POLL mode\n", __FUNCTION__); ++#endif /* GMAC_NAPI2_POLL */ ++ ++#ifdef ET_LIMIT_TXQ ++ { ++ char *var = getvar(NULL, "et_txq_thresh"); ++ if (var) { ++ et_txq_thresh = bcm_strtoul(var, NULL, 0); ++ } ++ printk("%s: et_txq_thresh set to 0x%x\n", __FUNCTION__, et_txq_thresh); ++ } ++#endif /* ET_LIMIT_TXQ */ ++ ++#ifdef GMAC_RATE_LIMITING ++ { ++ char *var = getvar(NULL, "et_rx_rate_limit"); ++ if (var) { ++ et_rx_rate_limit = bcm_strtoul(var, NULL, 0); ++ } ++ printk("%s: et_rx_rate_limit set to 0x%x\n", __FUNCTION__, et_rx_rate_limit); ++ } ++#endif /* GMAC_RATE_LIMITING */ ++ ++ ET_TRACE(("%s: exit\n", __FUNCTION__)); ++ return 0; ++} ++ ++ ++#ifndef CONFIG_OF ++/********************************************************************** ++ * iproc_gmac_cleanup_module(VOID) ++ * ++ * The Driver Exit Function ++ * ++ * Input parameters: ++ * None ++ * ++ * Return value: ++ * Nothing ++ **********************************************************************/ ++static void __exit ++iproc_gmac_cleanup_module(void) ++{ ++ int idx; ++ ++ ET_TRACE(("%s: enter\n", __FUNCTION__)); ++ ++ for (idx = 0; idx < IPROC_NUM_GMACS; idx++) { ++ if (gmac_pdev_loaded[idx]) { ++ /* Unregister GMAC driver */ ++ iproc_platform_driver_unregister(&gmac_pdrv[idx]); ++ } ++ } ++ ++ /* Clean up the proc directory */ ++ eth_mac_proc_remove(); ++ ++ ET_TRACE(("%s: exit\n", __FUNCTION__)); ++ return; ++} ++#endif /*CONFIG_OF*/ ++ ++#if 0 ++static int get_fa_bypass(char *page, char **start, off_t off, int count, int *eof, void *data) ++{ ++ unsigned int len=0; ++ len += sprintf(page+len, "\n\n## Current FA Bypass setting = 0x%x, %s ##\n\n",gBypass, gBypass?"enabled":"disabled"); ++ *eof = 1; ++ return len; ++} ++ ++static int set_fa_bypass(struct file *file, const char *buffer, unsigned long count, void *data) ++{ ++ unsigned int len=1; ++ unsigned char debug_buffer[2]; ++ int bypass =0; ++ ++ if (count != 2) { ++ ET_ERROR(("Please pass (one:1) digit FA bypass value only, 0=disable FA bypass, 1 = enable FA bypass\n")); ++ return -EINVAL; ++ } ++ ++ /* Last buffer byte will be LF or CR only */ ++ if(copy_from_user(&debug_buffer[0], buffer, len)) { ++ ET_ERROR(("Problem in copying invalid user buffer\n")); ++ return -EFAULT; ++ } ++ ++ debug_buffer[len]='\0'; /* Only one byte value is available now */ ++ if ( sscanf(debug_buffer,"%d",&bypass) != 1) { ++ ET_ERROR(("\n##Invalid value :%s: is passed ##\n",debug_buffer)); ++ return -EINVAL; ++ } ++ if (!((bypass >=DISABLE_FA_BYPASS) && (bypass <= ENABLE_FA_BYPASS))) { ++ ET_ERROR(("\n##Passed value :%d: is not in valid range %d-%d \n",bypass,DISABLE_FA_BYPASS,ENABLE_FA_BYPASS)); ++ return -EINVAL; ++ } ++ ET_TRACE(("\n##set_fa_bypass(): Previous: 0x%x %s ##\n", gBypass, gBypass?"enabled":"disabled")); ++ gBypass = bypass; ++ ET_TRACE(("\n##set_fa_bypass(): New: 0x%x %s ##\n", gBypass, gBypass?"enabled":"disabled")); ++ return count; ++} ++#endif ++ ++static char* iproc_eth_proc_root="iproc_eth"; ++static struct proc_dir_entry *iproc_eth_root_dir=NULL; // BCM5892 eth proc root directory ++static int eth_mac_proc_create(struct net_device *dev ) ++{ ++/* struct proc_dir_entry *dent, *ent;*/ ++ struct proc_dir_entry *dent; ++ et_info_t *et = NULL; ++ etc_info_t *etc = NULL; ++ char fname[32]; ++ ++ et = ET_INFO(dev); ++ if (et != NULL) { ++ etc = et->etc; ++ } ++ ++ if ((et == NULL) || (etc == NULL)) { ++ printk("%s: error: Unit probably not initialized by probe function." ++ " et=0x%pm etc=0x%p\n", __FUNCTION__, et, etc); ++ return -1; ++ } ++ ++ ET_TRACE(("%s: enter\n", __FUNCTION__)); ++ ++ snprintf(fname, 32, "%s%u", iproc_eth_proc_root, etc->unit); ++ ++ dent = proc_mkdir(fname,iproc_eth_root_dir); ++#if 0 ++ if (dent) { ++ /* unit 2 has FA connectivity, create bypass path only for unit 2 */ ++ if (etc->unit == 2) { ++ printk("\nCreating fa bypass proc entry\n"); ++ ++ ent = create_proc_entry("fa_bypass", S_IFREG|S_IRUGO, dent); ++ if (ent) { ++ ent->read_proc = get_fa_bypass; ++ ent->write_proc = set_fa_bypass; ++ } else { ++ printk("Error creating proc_entry, returning\n"); ++ return -1; ++ } ++ } ++ } ++#endif ++ ET_TRACE(("%s: exit\n", __FUNCTION__)); ++ return 0; ++} ++ ++#ifndef CONFIG_OF ++static void eth_mac_proc_remove(void) ++{ ++ ET_TRACE(("%s: enter\n", __FUNCTION__)); ++ printk("%s: enter\n", __FUNCTION__); ++ remove_proc_entry(iproc_eth_proc_root,NULL); ++ ET_TRACE(("%s: exit\n", __FUNCTION__)); ++} ++#else ++static void eth_mac_proc_remove(struct net_device *dev) ++{ ++ et_info_t *et; ++ etc_info_t *etc; ++ char fname[32]; ++ ++ ET_TRACE(("%s: enter\n", __FUNCTION__)); ++ printk("%s: enter\n", __FUNCTION__); ++ ++ et = ET_INFO(dev); ++ if (et == NULL) { ++ printk("%s: error: Unit probably not initialized by probe function.\n", __FUNCTION__); ++ return; ++ } ++ ++ etc = et->etc; ++ if (etc == NULL) { ++ printk("%s: error: Unit probably not initialized by probe \ ++ function.\n", __FUNCTION__); ++ return; ++ } ++ ++ snprintf(fname, 32, "%s%u", iproc_eth_proc_root, etc->unit); ++ remove_proc_entry(fname,NULL); ++ ++ ET_TRACE(("%s: exit\n", __FUNCTION__)); ++} ++#endif /* CONFIG_OF */ ++ ++#ifdef CONFIG_OF ++static const struct of_device_id brcm_iproc_dt_ids[] = { ++ { .compatible = "brcm,iproc-gmac"}, ++ { } ++}; ++MODULE_DEVICE_TABLE(of, brcm_iproc_dt_ids); ++ ++static struct platform_driver iproc_gmac_driver = ++{ ++ .probe = iproc_gmac_drv_probe, ++ .remove = __exit_p(iproc_gmac_drv_remove), ++ .suspend = iproc_gmac_drv_suspend, ++ .resume = iproc_gmac_drv_resume, ++ .driver = ++ { ++ .name = "bcmiproc-gmac", ++ .owner = THIS_MODULE, ++ .of_match_table = of_match_ptr(brcm_iproc_dt_ids), ++ }, ++}; ++module_init(iproc_gmac_init_module); ++module_platform_driver(iproc_gmac_driver); ++#else ++module_init(iproc_gmac_init_module); ++module_exit(iproc_gmac_cleanup_module); ++#endif /*CONFIG_OF*/ ++ ++MODULE_DESCRIPTION("Broadcom Northstar Ethernet Driver"); ++MODULE_LICENSE("GPL"); +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/et/sys/et_linux.h b/drivers/net/ethernet/broadcom/gmac/src/et/sys/et_linux.h +--- a/drivers/net/ethernet/broadcom/gmac/src/et/sys/et_linux.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/et/sys/et_linux.h 2017-11-09 17:53:43.900292000 +0800 +@@ -0,0 +1,50 @@ ++/* ++ * $Copyright Open Broadcom Corporation$ ++ * ++ * Linux device driver tunables for ++ * Broadcom BCM47XX 10/100Mbps Ethernet Device Driver ++ * ++ * $Id: et_linux.h 320789 2012-03-13 04:01:27Z rnuti $ ++ */ ++ ++#ifndef _et_linux_h_ ++#define _et_linux_h_ ++ ++/* tunables */ ++#define NTXD 512 /* # tx dma ring descriptors (must be ^2) */ ++#define NRXD 512 /* # rx dma ring descriptors (must be ^2) */ ++#if defined(CONFIG_RAM_SIZE) && (CONFIG_RAM_SIZE <= 16) ++#define NRXBUFPOST 256 /* try to keep this # rbufs posted to the chip */ ++#else ++#define NRXBUFPOST 420 /* try to keep this # rbufs posted to the chip */ ++#endif ++#ifdef CONFIG_JUMBO_FRAME ++#define BCM_ETHER_MAX_LEN 2500 ++#define RXBUFSZ (BCM_ETHER_MAX_LEN + HWRXOFF + BCMEXTRAHDROOM) /* receive buffer size */ ++#else ++#define BCM_ETHER_MAX_LEN 1518 //ETHER_MAX_LEN (1518) ++#define RXBUFSZ 1792 ++#endif /* CONFIG_JUMBO_FRAME */ ++ ++ ++#ifndef RXBND ++#define RXBND 64 //32 /* max # rx frames to process in dpc */ ++#endif ++ ++#if defined(ILSIM) || defined(__arch_um__) ++#undef NTXD ++#define NTXD 16 ++#undef NRXD ++#define NRXD 16 ++#undef NRXBUFPOST ++#define NRXBUFPOST 2 ++#endif ++ ++#define PKTCBND 48 ++ ++#define CTFPOOLSZ 768 ++ ++#define PREFSZ 96 ++#define ETPREFHDRS(h, sz) OSL_PREF_RANGE_ST((h), (sz)) ++ ++#endif /* _et_linux_h_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/et/sys/etc.c b/drivers/net/ethernet/broadcom/gmac/src/et/sys/etc.c +--- a/drivers/net/ethernet/broadcom/gmac/src/et/sys/etc.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/et/sys/etc.c 2017-11-09 17:53:43.901298000 +0800 +@@ -0,0 +1,746 @@ ++/* ++ * $Copyright Open Broadcom Corporation$ ++ * ++ * Common [OS-independent] portion of ++ * Broadcom Home Networking Division 10/100 Mbit/s Ethernet ++ * Device Driver. ++ * ++ * $Id: etc.c 323634 2012-03-26 10:26:11Z groques $ ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "etcgmac.h" ++ ++#ifdef BCMDBG ++uint32 et_msg_level = 1; ++#else ++uint32 et_msg_level = 0; ++#endif /* BCMDBG */ ++uint8 ethup = 0; ++uint8 ethupmask = 0; ++etc_info_t *ethupetcptr[IPROC_NUM_GMACS]; ++ ++/* local prototypes */ ++static void etc_loopback(etc_info_t *etc, int on); ++static void etc_dumpetc(etc_info_t *etc, struct bcmstrbuf *b); ++int etc_gmac_speed(int gmac); ++ ++#if (defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2)) ++extern void gmac_set_amac_mdio(int en); ++extern int gmac_has_mdio_access(void); ++#elif defined(CONFIG_MACH_WH2) ++extern void __iomem *get_iproc_wrap_ctrl_base(void); ++extern int egphy28_reg_read(uint32 phy_addr, int reg_addr, uint16 *data); ++#endif /* (defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2)) */ ++ ++#ifdef CONFIG_SERDES_ASYMMETRIC_MODE ++void gmac_serdes_asym_mode(etc_info_t *etcptrs[]); ++#endif /* CONFIG_SERDES_ASYMMETRIC_MODE */ ++ ++/* 802.1d priority to traffic class mapping. queues correspond one-to-one ++ * with traffic classes. ++ */ ++uint32 up2tc[NUMPRIO] = { ++ TC_BE, /* 0 BE TC_BE Best Effort */ ++ TC_BK, /* 1 BK TC_BK Background */ ++ TC_BK, /* 2 -- TC_BK Background */ ++ TC_BE, /* 3 EE TC_BE Best Effort */ ++ TC_CL, /* 4 CL TC_CL Controlled Load */ ++ TC_CL, /* 5 VI TC_CL Controlled Load */ ++ TC_VO, /* 6 VO TC_VO Voice */ ++ TC_VO /* 7 NC TC_VO Voice */ ++}; ++ ++uint32 priq_selector[] = { ++ [0x0] = TC_NONE, [0x1] = TC_BK, [0x2] = TC_BE, [0x3] = TC_BE, ++ [0x4] = TC_CL, [0x5] = TC_CL, [0x6] = TC_CL, [0x7] = TC_CL, ++ [0x8] = TC_VO, [0x9] = TC_VO, [0xa] = TC_VO, [0xb] = TC_VO, ++ [0xc] = TC_VO, [0xd] = TC_VO, [0xe] = TC_VO, [0xf] = TC_VO ++}; ++ ++/* find the chip opsvec for this chip */ ++struct chops* ++etc_chipmatch(uint vendor, uint device) ++{ ++ extern struct chops bcmgmac_et_chops; ++ ++ if (bcmgmac_et_chops.id(vendor, device)) { ++ return (&bcmgmac_et_chops); ++ } ++ ++ return (NULL); ++} ++ ++void* ++etc_attach(void *et, uint vendor, uint device, uint unit, void *osh, void *regsva) ++{ ++ etc_info_t *etc; ++ char *var; ++ ++ ET_TRACE(("et%d: etc_attach: vendor 0x%x device 0x%x\n", unit, vendor, device)); ++ ++ /* some code depends on packed structures */ ++ ASSERT(sizeof(struct ether_addr) == ETHER_ADDR_LEN); ++ ASSERT(sizeof(struct ether_header) == ETHER_HDR_LEN); ++ ++ /* allocate etc_info_t state structure */ ++ if ((etc = (etc_info_t*) MALLOC(osh, sizeof(etc_info_t))) == NULL) { ++ ET_ERROR(("et%d: etc_attach: out of memory, malloced %d bytes\n", unit, ++ MALLOCED(osh))); ++ return (NULL); ++ } ++ bzero((char*)etc, sizeof(etc_info_t)); ++ ++ etc->et = et; ++ etc->unit = unit; ++ etc->osh = osh; ++ etc->vendorid = (uint16) vendor; ++ etc->deviceid = (uint16) device; ++ etc->forcespeed = etc_gmac_speed(unit); ++ etc->linkstate = FALSE; ++ etc->mdio_init_time = 5; /* number of seconds to wait before release mdio bus */ ++ var = getvar(NULL, "eth_init_time"); ++ if (var) { ++ etc->mdio_init_time = bcm_strtoul(var, NULL, 0); ++ } ++ printk("%s() mdio_init_time = %d\n", __FUNCTION__, etc->mdio_init_time); ++ ethupmask |= 1<unit; ++ ethupetcptr[unit] = etc; ++ ++ /* set chip opsvec */ ++ etc->chops = etc_chipmatch(vendor, device); ++ ASSERT(etc->chops); ++ ++ /* chip attach */ ++ if ((etc->ch = (*etc->chops->attach)(etc, osh, regsva)) == NULL) { ++ ET_ERROR(("et%d: chipattach error\n", unit)); ++ goto fail; ++ } ++ ++ return ((void*)etc); ++ ++fail: ++ etc_detach(etc); ++ return (NULL); ++} ++ ++void ++etc_detach(etc_info_t *etc) ++{ ++ if (etc == NULL) ++ return; ++ ++ /* free chip private state */ ++ if (etc->ch) { ++ (*etc->chops->detach)(etc->ch); ++ etc->chops = etc->ch = NULL; ++ } ++ ++ MFREE(etc->osh, etc, sizeof(etc_info_t)); ++} ++ ++void ++etc_reset(etc_info_t *etc) ++{ ++ ET_TRACE(("et%d: etc_reset\n", etc->unit)); ++ ++ etc->reset++; ++ ++ /* reset the chip */ ++ (*etc->chops->reset)(etc->ch); ++ ++ /* free any posted tx packets */ ++ (*etc->chops->txreclaim)(etc->ch, TRUE); ++ ++#ifdef DMA ++ /* free any posted rx packets */ ++ (*etc->chops->rxreclaim)(etc->ch); ++#endif /* DMA */ ++} ++ ++void ++etc_init(etc_info_t *etc, uint options) ++{ ++ ET_TRACE(("et%d: etc_init\n", etc->unit)); ++ ++ ASSERT(etc->pioactive == NULL); ++ ASSERT(!ETHER_ISNULLADDR(&etc->cur_etheraddr)); ++ ASSERT(!ETHER_ISMULTI(&etc->cur_etheraddr)); ++ ++ /* init the chip */ ++ (*etc->chops->init)(etc->ch, options); ++ /* init the PM change mode and linkstate */ ++ etc->pm_modechange = FALSE; ++ etc->linkstate = FALSE; ++} ++ ++/* mark interface up */ ++void ++etc_up(etc_info_t *etc) ++{ ++ etc->up = TRUE; ++ ++ /* enable the port phy */ ++ (*etc->chops->phyenable)(etc->ch, etc->unit, etc->phyaddr, 1); ++ ++ et_init(etc->et, ET_INIT_FULL | ET_INIT_INTRON); ++} ++ ++/* mark interface down */ ++uint ++etc_down(etc_info_t *etc, int reset) ++{ ++ uint callback; ++ ++ callback = 0; ++ ++ ET_FLAG_DOWN(etc); ++ ++ /* disable the port phy */ ++ (*etc->chops->phyenable)(etc->ch, etc->unit, etc->phyaddr, 0); ++ ++ if (reset) { ++ et_reset(etc->et); ++ } ++ ++ /* suppress link state changes during power management mode changes */ ++ if (etc->linkstate) { ++ etc->linkstate = FALSE; ++ if (!etc->pm_modechange) { ++ et_link_down(etc->et); ++ } ++ } ++ ++ return (callback); ++} ++ ++/* common iovar handler. return 0=ok, -1=error */ ++int ++etc_iovar(etc_info_t *etc, uint cmd, uint set, void *arg) ++{ ++ int error; ++ uint *vecarg; ++ ++ error = 0; ++ vecarg = (uint *)arg; ++ ET_TRACE(("et%d: etc_iovar: cmd 0x%x\n", etc->unit, cmd)); ++ ++ switch (cmd) { ++#ifdef BCMDBG ++ case IOV_ET_CLEAR_DUMP: ++ if (set) { ++ uint size = ((char *)(&etc->rxbadlen) - (char *)(&etc->txframe)); ++ ++ bzero((char *)&etc->txframe, size + sizeof(etc->rxbadlen)); ++ (*etc->chops->dumpmib)(etc->ch, NULL, TRUE); ++ error = 0; ++ } ++ break; ++#endif /* BCMDBG */ ++ case IOV_PKTC: ++ if (set) { ++ etc->pktc = *vecarg; ++ } else { ++ *vecarg = (uint)etc->pktc; ++ } ++ break; ++ ++ case IOV_PKTCBND: ++ if (set) { ++ etc->pktcbnd = MAX(*vecarg, 32); ++ } else { ++ *vecarg = etc->pktcbnd; ++ } ++ break; ++ ++ case IOV_COUNTERS: ++ { ++ struct bcmstrbuf b; ++ bcm_binit(&b, (char*)arg, IOCBUFSZ); ++ etc_dumpetc(etc, &b); ++ } ++ break; ++ ++ default: ++ error = -1; ++ } ++ ++ return (error); ++} ++ ++/* common ioctl handler. return: 0=ok, -1=error */ ++int ++etc_ioctl(etc_info_t *etc, int cmd, void *arg) ++{ ++ int error; ++ int val; ++ int *vec = (int*)arg; ++ ++ error = 0; ++ ++ val = arg ? *(int*)arg : 0; ++ ++ ET_TRACE(("et%d: etc_ioctl: cmd 0x%x\n", etc->unit, cmd)); ++ ++ switch (cmd) { ++ case ETCUP: ++ et_up(etc->et); ++ break; ++ ++ case ETCDOWN: ++ et_down(etc->et, TRUE); ++ break; ++ ++ case ETCLOOP: ++ etc_loopback(etc, val); ++ break; ++ ++ case ETCDUMP: ++ if (et_msg_level & 0x10000) { ++ bcmdumplog((char *)arg, IOCBUFSZ); ++ } else { ++ struct bcmstrbuf b; ++ bcm_binit(&b, (char*)arg, IOCBUFSZ); ++ et_dump(etc->et, &b); ++ } ++ break; ++ ++ case ETCSETMSGLEVEL: ++ et_msg_level = val; ++ break; ++ ++ case ETCPROMISC: ++ etc_promisc(etc, val); ++ break; ++ ++ case ETCQOS: ++ etc_qos(etc, val); ++ break; ++ ++ case ETCSPEED: ++ if (vec) { ++ if (vec[0] < ET_AUTO || vec[0] > ET_1000FULL) { ++ goto err; ++ } ++ ++ etc->forcespeed = vec[0]; ++ ++ /* explicitly reset the phy */ ++ (*etc->chops->phyreset)(etc->ch); ++ ++ /* request restart autonegotiation if we're reverting to adv mode */ ++ etc->advertise = etc->advertise2 = 0; ++ if (etc->forcespeed == ET_AUTO) { ++ if (vec[1] & ADVERTISED_10baseT_Half) { ++ etc->advertise |= ADV_10HALF; ++ } ++ if (vec[1] & ADVERTISED_10baseT_Full) { ++ etc->advertise |= ADV_10FULL; ++ } ++ if (vec[1] & ADVERTISED_100baseT_Half) { ++ etc->advertise |= ADV_100HALF; ++ } ++ if (vec[1] & ADVERTISED_100baseT_Full) { ++ etc->advertise |= ADV_100FULL; ++ } ++ if (vec[1] & ADVERTISED_1000baseT_Full) { ++ etc->advertise2 |= ADV_1000FULL; ++ } ++ etc->needautoneg = TRUE; ++ } else { ++ etc->needautoneg = FALSE; ++ } ++ et_init(etc->et, ET_INIT_INTRON); ++ } ++ break; ++ ++ case ETCPHYRD: ++ if (vec) { ++ vec[1] = (*etc->chops->phyrd)(etc->ch, etc->phyaddr, vec[0]); ++ ET_TRACE(("etc_ioctl: ETCPHYRD of reg 0x%x => 0x%x\n", vec[0], vec[1])); ++ } ++ break; ++ ++ case ETCPHYRD2: ++ if (vec) { ++ uint phyaddr, reg; ++ phyaddr = vec[0] >> 16; ++ reg = vec[0] & 0xffff; ++ vec[1] = (*etc->chops->phyrd)(etc->ch, phyaddr, reg); ++ ET_TRACE(("etc_ioctl: ETCPHYRD2 of phy 0x%x, reg 0x%x => 0x%x\n", ++ phyaddr, reg, vec[1])); ++ } ++ break; ++ ++ case ETCPHYWR: ++ if (vec) { ++ ET_TRACE(("etc_ioctl: ETCPHYWR to reg 0x%x <= 0x%x\n", vec[0], vec[1])); ++ (*etc->chops->phywr)(etc->ch, etc->phyaddr, vec[0], (uint16)vec[1]); ++ } ++ break; ++ ++ case ETCPHYWR2: ++ if (vec) { ++ uint phyaddr, reg; ++ phyaddr = vec[0] >> 16; ++ reg = vec[0] & 0xffff; ++ (*etc->chops->phywr)(etc->ch, phyaddr, reg, (uint16)vec[1]); ++ ET_TRACE(("etc_ioctl: ETCPHYWR2 to phy 0x%x, reg 0x%x <= 0x%x\n", ++ phyaddr, reg, vec[1])); ++ } ++ break; ++ ++ default: ++err: ++ error = -1; ++ } ++ ++ return (error); ++} ++ ++/* called once per second */ ++void ++etc_watchdog(etc_info_t *etc) ++{ ++ uint16 status; ++ uint16 lpa; ++ ++#if defined(CONFIG_MACH_WH2) ++ uint32 select = (ioread32(get_iproc_wrap_ctrl_base() + 0xa8)); /* IPROC_WRAP_TOP_STRAP_STATUS_1 */ ++ if (select & 0x04) /* select SGMII path */ ++ { ++ etc->speed = 1000; ++ etc->duplex = 1; ++ etc->linkstate = true; ++ (*etc->chops->duplexupd)(etc->ch); ++ return; ++ } ++#endif ++ etc->now++; ++ ++ /* no local phy registers */ ++ if (etc->phyaddr == EPHY_NOREG) ++ { ++ etc->linkstate = TRUE; ++ etc->duplex = 1; ++ /* keep emac txcontrol duplex bit consistent with current phy duplex */ ++ (*etc->chops->duplexupd)(etc->ch); ++ return; ++ } ++ ++ if (etc->up && etc->linkstate) { ++ if (!(ethup & 1<unit)) { ++ printk(KERN_DEBUG "et%d Interface up\n", etc->unit); ++ } ++ ethup |= 1<unit; ++ } ++ ++#if defined(CONFIG_IPROC_SDK_MGT_PORT_HANDOFF) ++#if (defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2)) ++ if ( !gmac_has_mdio_access()) { ++ /* we can't monitor link so force link up */ ++ /* if GMAC does not have access to MDIO then exit */ ++ if (!etc->linkstate) { ++ etc->linkstate = TRUE; ++ etc->duplex = 1; ++ etc->speed = 1000; ++ } ++ /* keep emac txcontrol duplex bit consistent with current phy duplex */ ++ (*etc->chops->duplexupd)(etc->ch); ++ if (!et_is_link_up(etc->et)) { ++ printk(KERN_DEBUG "%s rcan't access PHY, forcing link up\n", __FUNCTION__); ++ et_link_up(etc->et); ++ } ++ return; ++ } ++ ++ /* check if need to release mdio access */ ++ if ((ethup==ethupmask) || (etc->now > etc->mdio_init_time)) { ++ /* either both links up or (5) "eth_init_time" seconds elapsed */ ++ /* keep mdio access if ethtool is set */ ++ char *s = getvar(NULL, "ethtool"); ++ if (!s) { ++#ifdef CONFIG_SERDES_ASYMMETRIC_MODE ++ gmac_serdes_asym_mode(ethupetcptr); ++#endif /* CONFIG_SERDES_ASYMMETRIC_MODE */ ++ printk(KERN_DEBUG "%s releasing MDIO access; ethup(0x%x)\n", __FUNCTION__, ethup); ++ gmac_set_amac_mdio(0); ++ return; ++ } ++ } ++#endif /* (defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2)) */ ++#endif /* defined(CONFIG_IPROC_SDK_MGT_PORT_HANDOFF) */ ++ ++ status = (*etc->chops->phyrd)(etc->ch, etc->phyaddr, 1); ++ /* check for bad mdio read */ ++ if (status == 0xffff) { ++ ET_ERROR(("et%d: etc_watchdog: bad mdio read: phyaddr %d mdcport %d\n", ++ etc->unit, etc->phyaddr, etc->mdcport)); ++ return; ++ } ++ ++ if (etc->forcespeed == ET_AUTO) { ++ uint16 adv, adv2 = 0, status2 = 0, estatus; ++ ++ adv = (*etc->chops->phyrd)(etc->ch, etc->phyaddr, 4); ++ lpa = (*etc->chops->phyrd)(etc->ch, etc->phyaddr, 5); ++ ++ /* read extended status register. if we are 1000BASE-T ++ * capable then get our advertised capabilities and the ++ * link partner capabilities from 1000BASE-T control and ++ * status registers. ++ */ ++ estatus = (*etc->chops->phyrd)(etc->ch, etc->phyaddr, 15); ++ if ((estatus != 0xffff) && (estatus & EST_1000TFULL)) { ++ /* read 1000BASE-T control and status registers */ ++ adv2 = (*etc->chops->phyrd)(etc->ch, etc->phyaddr, 9); ++ status2 = (*etc->chops->phyrd)(etc->ch, etc->phyaddr, 10); ++ } ++ ++ /* update current speed and duplex */ ++ if ((adv2 & ADV_1000FULL) && (status2 & LPA_1000FULL)) { ++ etc->speed = 1000; ++ etc->duplex = 1; ++ } else if ((adv2 & ADV_1000HALF) && (status2 & LPA_1000HALF)) { ++ etc->speed = 1000; ++ etc->duplex = 0; ++ } else if ((adv & ADV_100FULL) && (lpa & LPA_100FULL)) { ++ etc->speed = 100; ++ etc->duplex = 1; ++ } else if ((adv & ADV_100HALF) && (lpa & LPA_100HALF)) { ++ etc->speed = 100; ++ etc->duplex = 0; ++ } else if ((adv & ADV_10FULL) && (lpa & LPA_10FULL)) { ++ etc->speed = 10; ++ etc->duplex = 1; ++ } else { ++ etc->speed = 10; ++ etc->duplex = 0; ++ } ++ } ++ ++ /* monitor link state */ ++ if (!etc->linkstate && (status & STAT_LINK)) { ++ etc->linkstate = TRUE; ++ if (etc->pm_modechange) { ++ etc->pm_modechange = FALSE; ++ } else { ++ et_link_up(etc->et); ++#ifdef CONFIG_SERDES_ASYMMETRIC_MODE ++ (*etc->chops->forcespddpx)(etc->ch); ++#endif /* CONFIG_SERDES_ASYMMETRIC_MODE */ ++ } ++ } else if (etc->linkstate && !(status & STAT_LINK)) { ++ etc->linkstate = FALSE; ++ if (!etc->pm_modechange) { ++ et_link_down(etc->et); ++ } ++ } ++ ++ /* keep emac txcontrol duplex bit consistent with current phy duplex */ ++ (*etc->chops->duplexupd)(etc->ch); ++ ++ /* check for remote fault error */ ++ if (status & STAT_REMFAULT) { ++ ET_ERROR(("et%d: remote fault\n", etc->unit)); ++ } ++ ++ /* check for jabber error */ ++ if (status & STAT_JAB) { ++ ET_ERROR(("et%d: jabber\n", etc->unit)); ++ } ++ ++ /* ++ * Read chip mib counters occationally before the 16bit ones can wrap. ++ * We don't use the high-rate mib counters. ++ */ ++ if ((etc->now % 30) == 0) { ++ (*etc->chops->statsupd)(etc->ch); ++ } ++} ++ ++static void ++etc_loopback(etc_info_t *etc, int on) ++{ ++ ET_TRACE(("et%d: etc_loopback: %d\n", etc->unit, on)); ++ ++ etc->loopbk = (bool) on; ++ et_init(etc->et, ET_INIT_INTRON); ++} ++ ++void ++etc_promisc(etc_info_t *etc, uint on) ++{ ++ ET_TRACE(("et%d: etc_promisc: %d\n", etc->unit, on)); ++ ++ etc->promisc = (bool) on; ++ et_init(etc->et, ET_INIT_INTRON); ++} ++ ++void ++etc_qos(etc_info_t *etc, uint on) ++{ ++ ET_TRACE(("et%d: etc_qos: %d\n", etc->unit, on)); ++ ++ etc->qos = (bool) on; ++ et_init(etc->et, ET_INIT_INTRON); ++} ++ ++void ++etc_dump(etc_info_t *etc, struct bcmstrbuf *b) ++{ ++ etc_dumpetc(etc, b); ++ (*etc->chops->dump)(etc->ch, b); ++} ++ ++static void ++etc_dumpetc(etc_info_t *etc, struct bcmstrbuf *b) ++{ ++ char perm[32], cur[32]; ++ uint i; ++ ++ bcm_bprintf(b, "etc 0x%x et 0x%x unit %d msglevel %d speed/duplex %d%s\n", ++ (ulong)etc, (ulong)etc->et, etc->unit, et_msg_level, ++ etc->speed, (etc->duplex ? "full": "half")); ++ bcm_bprintf(b, "up %d promisc %d loopbk %d forcespeed %d advertise 0x%x " ++ "advertise2 0x%x needautoneg %d\n", ++ etc->up, etc->promisc, etc->loopbk, etc->forcespeed, ++ etc->advertise, etc->advertise2, etc->needautoneg); ++ bcm_bprintf(b, "piomode %d pioactive 0x%x nmulticast %d allmulti %d qos %d\n", ++ etc->piomode, (ulong)etc->pioactive, etc->nmulticast, etc->allmulti, etc->qos); ++ bcm_bprintf(b, "vendor 0x%x device 0x%x rev %d coreunit %d phyaddr %d mdcport %d\n", ++ etc->vendorid, etc->deviceid, etc->chiprev, ++ etc->coreunit, etc->phyaddr, etc->mdcport); ++ ++ bcm_bprintf(b, "perm_etheraddr %s cur_etheraddr %s\n", ++ bcm_ether_ntoa(&etc->perm_etheraddr, perm), ++ bcm_ether_ntoa(&etc->cur_etheraddr, cur)); ++ ++ if (etc->nmulticast) { ++ bcm_bprintf(b, "multicast: "); ++ for (i = 0; i < etc->nmulticast; i++) { ++ bcm_bprintf(b, "%s ", bcm_ether_ntoa(&etc->multicast[i], cur)); ++ } ++ bcm_bprintf(b, "\n"); ++ } ++ ++ bcm_bprintf(b, "linkstate %d\n", etc->linkstate); ++ bcm_bprintf(b, "\n"); ++ ++ /* refresh stat counters */ ++ (*etc->chops->statsupd)(etc->ch); ++ ++ /* summary stat counter line */ ++ /* use sw frame and byte counters -- hw mib counters wrap too quickly */ ++ bcm_bprintf(b, "txframe %d txbyte %d txerror %d rxframe %d rxbyte %d rxerror %d\n", ++ etc->txframe, etc->txbyte, etc->txerror, ++ etc->rxframe, etc->rxbyte, etc->rxerror); ++ ++ /* transmit & receive stat counters */ ++ /* hardware mib pkt and octet counters wrap too quickly to be useful */ ++ (*etc->chops->dumpmib)(etc->ch, b, FALSE); ++ ++ bcm_bprintf(b, "txnobuf %d reset %d dmade %d dmada %d dmape %d\n", ++ etc->txnobuf, etc->reset, etc->dmade, etc->dmada, etc->dmape); ++ ++ /* hardware mib pkt and octet counters wrap too quickly to be useful */ ++ bcm_bprintf(b, "rxnobuf %d rxdmauflo %d rxoflo %d rxbadlen %d " ++ "rxgiants %d rxoflodiscards %d\n", ++ etc->rxnobuf, etc->rxdmauflo, etc->rxoflo, etc->rxbadlen, ++ etc->rxgiants, etc->rxoflodiscards); ++ ++ bcm_bprintf(b, "chained %d chainedsz1 %d unchained %d maxchainsz %d currchainsz %d\n", ++ etc->chained, etc->chainedsz1, etc->unchained, etc->maxchainsz, ++ etc->currchainsz); ++ ++ bcm_bprintf(b, "\n"); ++} ++ ++uint ++etc_totlen(etc_info_t *etc, void *p) ++{ ++ uint total; ++ ++ total = 0; ++ for (; p; p = PKTNEXT(etc->osh, p)) { ++ total += PKTLEN(etc->osh, p); ++ } ++ return (total); ++} ++ ++#ifdef BCMDBG ++void ++etc_prhdr(char *msg, struct ether_header *eh, uint len, int unit) ++{ ++ char da[32], sa[32]; ++ ++ if (msg && (msg[0] != '\0')) { ++ printf("et%d: %s: ", unit, msg); ++ } else { ++ printf("et%d: ", unit); ++ } ++ ++ printf("dst %s src %s type 0x%04X len %d\n", ++ bcm_ether_ntoa((struct ether_addr *)eh->ether_dhost, da), ++ bcm_ether_ntoa((struct ether_addr *)eh->ether_shost, sa), ++ ntoh16(eh->ether_type), ++ len); ++} ++void ++etc_prhex(char *msg, uchar *buf, uint nbytes, int unit) ++{ ++ if (msg && (msg[0] != '\0')) { ++ printf("et%d: %s:\n", unit, msg); ++ } else { ++ printf("et%d:\n", unit); ++ } ++ ++ prhex(NULL, buf, nbytes); ++} ++#endif /* BCMDBG */ ++ ++int ++etc_gmac_speed(int gmac) ++{ ++ char name[16], *speed; ++ sprintf(name, "et%dspeed", gmac); ++ ++ speed = nvram_get(name); ++ if (speed == NULL) { ++ printf("%s default GMAC%d speed: auto\n", __FUNCTION__, gmac); ++ return ET_AUTO; ++ } ++ ++ if (!strcmp(speed, "2500")) { ++ printf("%s specifing GMAC%d speed: 2500\n", __FUNCTION__, gmac); ++ return ET_2500FULL; ++ } else if (!strcmp(speed, "1000")) { ++ printf("%s specifing GMAC%d speed: 1000\n", __FUNCTION__, gmac); ++ return ET_1000FULL; ++ } else if (!strcmp(speed, "100")) { ++ printf("%s specifing GMAC%d speed: 100\n", __FUNCTION__, gmac); ++ return ET_100FULL; ++ } else if (!strcmp(speed, "10")) { ++ printf("%s specifing GMAC%d speed: 10\n", __FUNCTION__, gmac); ++ return ET_10FULL; ++ } ++ ++ printf("%s default GMAC%d speed: auto\n", __FUNCTION__, gmac); ++ return ET_AUTO; ++} +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/et/sys/etc.h b/drivers/net/ethernet/broadcom/gmac/src/et/sys/etc.h +--- a/drivers/net/ethernet/broadcom/gmac/src/et/sys/etc.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/et/sys/etc.h 2017-11-09 17:53:43.902294000 +0800 +@@ -0,0 +1,295 @@ ++/* ++ * $Copyright Open Broadcom Corporation$ ++ * ++ * Common [OS-independent] header file for ++ * Broadcom BCM47XX 10/100Mbps Ethernet Device Driver ++ * ++ * $Id: etc.h 327582 2012-04-14 05:02:37Z kenlo $ ++ */ ++ ++#ifndef _ETC_H_ ++#define _ETC_H_ ++ ++#include ++ ++#define MAXMULTILIST 32 ++ ++#ifndef ch_t ++#define ch_t void ++#endif ++ ++#define NUMTXQ 1 ++ ++ ++#define TXREC_THR 8 ++ ++#if defined(__ECOS) ++#define IOCBUFSZ 4096 ++#elif defined(__linux__) ++#define IOCBUFSZ 16384 ++#else ++#define IOCBUFSZ 4096 ++#endif ++ ++struct etc_info; /* forward declaration */ ++struct bcmstrbuf; /* forward declaration */ ++ ++/* each chip type supports a set of chip-type-specific ops */ ++struct chops { ++ bool (*id)(uint vendor, uint device); /* return true if match */ ++ void *(*attach)(struct etc_info *etc, void *dev, void *regs); ++ void (*detach)(ch_t *ch); /* free chip private state */ ++ void (*reset)(ch_t *ch); /* chip reset */ ++ void (*init)(ch_t *ch, uint options); /* chip init */ ++ bool (*tx)(ch_t *ch, void *p); /* transmit frame */ ++ void *(*rx)(ch_t *ch); /* receive frame */ ++ void (*rxfill)(ch_t *ch); /* post dma rx buffers */ ++ int (*getintrevents)(ch_t *ch, bool in_isr); /* return intr events */ ++ bool (*errors)(ch_t *ch); /* handle chip errors */ ++ void (*intrson)(ch_t *ch); /* enable chip interrupts */ ++ void (*intrsoff)(ch_t *ch); /* disable chip interrupts */ ++ void (*txreclaim)(ch_t *ch, bool all); /* reclaim transmit resources */ ++ void (*rxreclaim)(ch_t *ch); /* reclaim receive resources */ ++ void (*statsupd)(ch_t *ch); /* update sw stat counters */ ++ void (*dumpmib)(ch_t *ch, struct bcmstrbuf *, bool clear); /* get sw mib counters */ ++ void (*enablepme)(ch_t *ch); /* enable PME */ ++ void (*disablepme)(ch_t *ch); /* disable PME */ ++ void (*phyreset)(ch_t *ch); /* reset phy */ ++ uint16 (*phyrd)(ch_t *ch, uint phyaddr, uint reg); /* read phy register */ ++ void (*phywr)(ch_t *ch, uint phyaddr, uint reg, uint16 val); /* write phy register */ ++ void (*dump)(ch_t *ch, struct bcmstrbuf *b); /* debugging output */ ++ void (*longname)(ch_t *ch, char *buf, uint bufsize); /* return descriptive name */ ++ void (*duplexupd)(ch_t *ch); /* keep mac duplex consistent */ ++#if defined(CONFIG_SERDES_ASYMMETRIC_MODE) ++ void (*forcespddpx)(ch_t *ch); /* force the speed and duplex */ ++#endif /* (defined(CONFIG_SERDES_ASYMMETRIC_MODE)) */ ++ void (*phyenable)(ch_t *ch, uint eth_num, uint phyaddr, int enable); /* enable phy */ ++}; ++ ++/* ++ * "Common" os-independent software state structure. ++ */ ++typedef struct etc_info { ++ void *et; /* pointer to os-specific private state */ ++ uint unit; /* device instance number */ ++ void *osh; /* pointer to os handler */ ++ bool pktc; /* packet chaining enabled or not */ ++ int pktcbnd; /* max # of packets to chain */ ++ void *mib; /* pointer to s/w maintained mib counters */ ++ bool up; /* interface up and running */ ++ bool promisc; /* promiscuous destination address */ ++ bool qos; /* QoS priority determination on rx */ ++ bool loopbk; /* loopback override mode */ ++ ++ int forcespeed; /* disable autonegotiation and force speed/duplex */ ++ uint advertise; /* control speed/duplex advertised caps */ ++ uint advertise2; /* control gige speed/duplex advertised caps */ ++ bool needautoneg; /* request restart autonegotiation */ ++ int speed; /* current speed: 10, 100 */ ++ int duplex; /* current duplex: 0=half, 1=full */ ++ ++ bool piomode; /* enable programmed io (!dma) */ ++ void *pioactive; /* points to pio packet being transmitted */ ++ volatile uint *txavail[NUMTXQ]; /* dma: # tx descriptors available */ ++ ++ uint16 vendorid; /* pci function vendor id */ ++ uint16 deviceid; /* pci function device id */ ++ uint chip; /* chip number */ ++ uint chiprev; /* chip revision */ ++ uint coreid; /* core id */ ++ uint corerev; /* core revision */ ++ ++ bool nicmode; /* is this core using its own pci i/f */ ++ ++ struct chops *chops; /* pointer to chip-specific opsvec */ ++ void *ch; /* pointer to chip-specific state */ ++ void *robo; /* optional robo private data */ ++ ++ uint txq_state; /* tx queues state bits */ ++ uint coreunit; /* sb chips: chip enet instance # */ ++ uint phyaddr; /* mdio 5-bit phy address for external phy */ ++ uint int_phyaddr; /* mdio 5-bit phy address for internal serdes*/ ++ uint mdcport; /* sb chips: which mii to use (enet core #) to access phy */ ++ ++ struct ether_addr cur_etheraddr; /* our local ethernet address */ ++ struct ether_addr perm_etheraddr; /* original sprom local ethernet address */ ++ ++ struct ether_addr multicast[MAXMULTILIST]; ++ uint nmulticast; ++ bool allmulti; /* enable all multicasts */ ++ ++ bool linkstate; /* link integrity state */ ++ bool pm_modechange; /* true if mode change is to due pm */ ++ ++ uint32 now; /* elapsed seconds */ ++ ++ uint32 boardflags; /* board flags */ ++ uint32 txrec_thresh; /* # of tx frames after which reclaim is done */ ++ uint32 switch_mode; /* switch mode */ ++ ++ uint32 mdio_init_time; /* # of seconds to wait before release mdio bus */ ++ ++#ifdef GMAC_RATE_LIMITING ++ /* rate limiting */ ++ bool rl_enabled; /* enable rate limiting logic */ ++ struct timer_list rl_timer; /* one second ratelimiting timer */ ++ bool rl_set; /* indicate the timer is set or not */ ++ uint32 rl_stopping_all_packets; ++ uint32 rl_stopping_broadcasts; ++ uint32 rl_dropped_all_packets; ++ uint32 rl_dropped_bc_packets; ++ uint32 rl_dropped_packets; ++ uint32 rl_prior_jiffies; ++ uint32 rx_bc_frame_cnt; ++#endif /* GMAC_RATE_LIMITING */ ++ ++ /* sw-maintained stat counters */ ++ uint32 txframes[NUMTXQ]; /* transmitted frames on each tx fifo */ ++ uint32 txframe; /* transmitted frames */ ++ uint32 txbyte; /* transmitted bytes */ ++ uint32 rxframe; /* received frames */ ++ uint32 rxbyte; /* received bytes */ ++ uint32 txerror; /* total tx errors */ ++ uint32 txnobuf; /* tx out-of-buffer errors */ ++ uint32 rxerror; /* total rx errors */ ++ uint32 rxgiants; /* total rx giant frames */ ++ uint32 rxnobuf; /* rx out-of-buffer errors */ ++ uint32 reset; /* reset count */ ++ uint32 dmade; /* pci descriptor errors */ ++ uint32 dmada; /* pci data errors */ ++ uint32 dmape; /* descriptor protocol error */ ++ uint32 rxdmauflo; /* receive descriptor underflow */ ++ uint32 rxoflo; /* receive fifo overflow */ ++ uint32 txuflo; /* transmit fifo underflow */ ++ uint32 rxoflodiscards; /* frames discarded during rx fifo overflow */ ++ uint32 rxbadlen; /* 802.3 len field != read length */ ++ uint32 chained; /* number of frames chained */ ++ uint32 chainedsz1; /* number of chain size 1 frames */ ++ uint32 unchained; /* number of frames not chained */ ++ uint32 maxchainsz; /* max chain size so far */ ++ uint32 currchainsz; /* current chain size */ ++ /* my counters */ ++ uint32 txfrm; /* tx frames */ ++ uint32 txfrmdropped; /* tx dropped frames */ ++ uint32 txqlen; ++ uint32 txqstop; ++ uint32 txdmafull; ++ uint32 txmaxlen; ++ uint32 txsgpkt; ++ uint32 rxquota; ++ uint32 rxdmastopped; ++} etc_info_t; ++ ++/* interrupt event bitvec */ ++#define INTR_TX 0x1 ++#define INTR_RX 0x2 ++#define INTR_ERROR 0x4 ++#define INTR_TO 0x8 ++#define INTR_NEW 0x10 ++ ++/* forcespeed values */ ++#define ET_AUTO -1 ++#define ET_10HALF 0 ++#define ET_10FULL 1 ++#define ET_100HALF 2 ++#define ET_100FULL 3 ++#define ET_1000HALF 4 ++#define ET_1000FULL 5 ++#define ET_2500FULL 6 /* 2.5Gigabit */ ++ ++/* init options */ ++#define ET_INIT_FULL 0x1 ++#define ET_INIT_INTRON 0x2 ++ ++/* Specific init options for et_init */ ++#define ET_INIT_DEF_OPTIONS (ET_INIT_FULL | ET_INIT_INTRON) ++#define ET_INIT_INTROFF (ET_INIT_FULL) ++#define ET_INIT_PARTIAL (0) ++ ++/* macro to safely clear the UP flag */ ++#define ET_FLAG_DOWN(x) (*(x)->chops->intrsoff)((x)->ch); \ ++ (x)->up = FALSE; ++ ++/* ++ * Least-common denominator rxbuf start-of-data offset: ++ * Must be >= size of largest rxhdr ++ * Must be 2-mod-4 aligned so IP is 0-mod-4 ++ */ ++#define HWRXOFF 30 ++ ++#define TC_BK 0 /* background traffic class */ ++#define TC_BE 1 /* best effort traffic class */ ++#define TC_CL 2 /* controlled load traffic class */ ++#define TC_VO 3 /* voice traffic class */ ++#define TC_NONE -1 /* traffic class none */ ++ ++#define RX_Q0 0 /* receive DMA queue */ ++#define NUMRXQ 1 /* gmac has one rx queue */ ++ ++#define TX_Q0 TC_BK /* DMA txq 0 */ ++#define TX_Q1 TC_BE /* DMA txq 1 */ ++#define TX_Q2 TC_CL /* DMA txq 2 */ ++#define TX_Q3 TC_VO /* DMA txq 3 */ ++ ++static inline uint32 ++etc_up2tc(uint32 up) ++{ ++ extern uint32 up2tc[]; ++ return (up2tc[up]); ++} ++ ++static inline uint32 ++etc_priq(uint32 txq_state) ++{ ++ extern uint32 priq_selector[]; ++ return (priq_selector[txq_state]); ++} ++ ++/* rx header flags bits */ ++#define RXH_FLAGS(etc, rxh) (((etc)->coreid == GMAC_CORE_ID) ? \ ++ ((((bcmgmacrxh_t *)(rxh))->flags) & htol16(GRXF_CRC | GRXF_OVF | GRXF_OVERSIZE)) : \ ++ ((((bcmenetrxh_t *)(rxh))->flags) & htol16(RXF_NO | RXF_RXER | RXF_CRC | RXF_OV))) ++ ++#define RXH_OVERSIZE(etc, rxh) (((etc)->coreid == GMAC_CORE_ID) ? \ ++ (ltoh16(((bcmgmacrxh_t *)(rxh))->flags) & GRXF_OVERSIZE) : FALSE) ++ ++#define RXH_PT(etc, rxh) (ltoh16(((bcmgmacrxh_t *)(rxh))->flags) & GRXF_PT_MASK) ++ ++#define RXH_CRC(etc, rxh) (((etc)->coreid == GMAC_CORE_ID) ? \ ++ (ltoh16(((bcmgmacrxh_t *)(rxh))->flags) & GRXF_CRC) : \ ++ (ltoh16(((bcmenetrxh_t *)(rxh))->flags) & RXF_CRC)) ++ ++#define RXH_OVF(etc, rxh) (((etc)->coreid == GMAC_CORE_ID) ? \ ++ (ltoh16(((bcmgmacrxh_t *)(rxh))->flags) & GRXF_OVF) : \ ++ (ltoh16(((bcmenetrxh_t *)(rxh))->flags) & RXF_OV)) ++ ++#define RXH_RXER(etc, rxh) (((etc)->coreid == GMAC_CORE_ID) ? \ ++ FALSE : (ltoh16(((bcmenetrxh_t *)(rxh))->flags) & RXF_RXER)) ++ ++#define RXH_NO(etc, rxh) (((etc)->coreid == GMAC_CORE_ID) ? \ ++ FALSE : (ltoh16(((bcmenetrxh_t *)(rxh))->flags) & RXF_NO)) ++ ++/* Used for fa+ error determination */ ++#define RXH_CTFERROR(etc, rxh) (((etc)->coreid == GMAC_CORE_ID) ? \ ++ (ltoh16(((bcmenetrxh_t *)(rxh))->flags) & (GRXF_CTFERR | GRXF_CRC | GRXF_OVF)) : FALSE) ++ ++#define ET_GMAC(etc) ((etc)->coreid == GMAC_CORE_ID) ++ ++/* exported prototypes */ ++extern struct chops *etc_chipmatch(uint vendor, uint device); ++extern void *etc_attach(void *et, uint vendor, uint device, uint unit, void *dev, void *regsva); ++extern void etc_detach(etc_info_t *etc); ++extern void etc_reset(etc_info_t *etc); ++extern void etc_init(etc_info_t *etc, uint options); ++extern void etc_up(etc_info_t *etc); ++extern uint etc_down(etc_info_t *etc, int reset); ++extern int etc_ioctl(etc_info_t *etc, int cmd, void *arg); ++extern int etc_iovar(etc_info_t *etc, uint cmd, uint set, void *arg); ++extern void etc_promisc(etc_info_t *etc, uint on); ++extern void etc_qos(etc_info_t *etc, uint on); ++extern void etc_dump(etc_info_t *etc, struct bcmstrbuf *b); ++extern void etc_watchdog(etc_info_t *etc); ++extern uint etc_totlen(etc_info_t *etc, void *p); ++extern void etc_robomib(etc_info_t *etc); ++ ++#endif /* _ETC_H_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/et/sys/etcgmac.c b/drivers/net/ethernet/broadcom/gmac/src/et/sys/etcgmac.c +--- a/drivers/net/ethernet/broadcom/gmac/src/et/sys/etcgmac.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/et/sys/etcgmac.c 2017-11-09 17:53:43.904292000 +0800 +@@ -0,0 +1,2593 @@ ++/* ++ * $Copyright Open Broadcom Corporation$ ++ * ++ * Broadcom Gigabit Ethernet MAC (Unimac) core. ++ * This file implements the chip-specific routines for the GMAC core. ++ * ++ * $Id: etcgmac.c 327582 2012-04-14 05:02:37Z kenlo $ ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include /* for et_phyxx() routines */ ++#include ++#include ++#include ++#include "bcmiproc_phy.h" ++ ++#define ENABLE_MIB_REG_DUMP ++ ++/* MDIO address definitation */ ++#if defined(CONFIG_MACH_HR3) ++#if defined(CONFIG_MACH_WH2) ++#define GMAC_EXT_PHY_ADDR 0x18 ++#define GMAC_INT_PHY_ADDR 0x14 ++#else ++#define GMAC_EXT_PHY_ADDR 0x18 ++#define GMAC_INT_PHY_ADDR 0xFF ++#endif ++#elif defined(CONFIG_MACH_GH) ++#define GMAC_EXT_PHY_ADDR 0x18 ++#define GMAC_INT_PHY_ADDR 0xFF ++#elif defined(CONFIG_MACH_GH2) ++#define GMAC_EXT_PHY_ADDR 0x10 ++#define GMAC_INT_PHY_ADDR 0x19 ++#else ++#define GMAC_EXT_PHY_ADDR 0x01 ++#define GMAC_INT_PHY_ADDR 0x01 ++#endif ++ ++ ++extern void __iomem *get_iproc_wrap_ctrl_base(void); ++extern void __iomem *get_iproc_idm_base(int index); ++#if defined (CONFIG_MACH_KT2) ++#define IPROC_WRAP_MISC_CONTROL_OFFSET 0x24 ++#elif (defined (CONFIG_MACH_HX4) || defined (CONFIG_MACH_HR2)) ++#define IPROC_WRAP_MISC_CONTROL_OFFSET 0x3C ++#else ++#define IPROC_WRAP_MISC_CONTROL_OFFSET 0x40 ++#endif ++ ++static const u32 idm_ioctl_offset[] = { ++ 0x10408, ++#if defined (CONFIG_MACH_HX4) || defined (CONFIG_MACH_KT2) ++ 0x11408, ++#elif defined (CONFIG_MACH_SB2) || defined(CONFIG_MACH_GH2) ++ 0x1f408, ++#endif ++}; ++#define AMAC_IDM_IO_CONTROL_DIRECT__CLK_250_SEL 6 ++#define AMAC_IDM_IO_CONTROL_DIRECT__DIRECT_GMII_MODE 5 ++#define AMAC_IDM_IO_CONTROL_DIRECT__DEST_SYNC_MODE_EN 3 ++ ++#if defined(CONFIG_MACH_HX4) ++#define IPROC_WRAP_MISC_CONTROL__QUAD_SERDES_MDIO_SEL 3 ++#define IPROC_WRAP_MISC_CONTROL__QUAD_SERDES_CTRL_SEL 2 ++#define IPROC_WRAP_MISC_CONTROL__IPROC_MDIO_SEL 4 ++#endif /* defined(CONFIG_MACH_HX4) */ ++ ++#if defined(CONFIG_MACH_KT2) ++#define IPROC_WRAP_MISC_CONTROL__UNICORE_SERDES_CTRL_SEL 1 ++#define IPROC_WRAP_MISC_CONTROL__UNICORE_SERDES_MDIO_SEL 2 ++#define IPROC_WRAP_MISC_CONTROL__IPROC_MDIO_SEL 3 ++#endif /* defined(CONFIG_MACH_KT2) */ ++ ++#if defined(CONFIG_MACH_SB2) || defined(CONFIG_MACH_WH2) ++#define IPROC_WRAP_MISC_CONTROL__IPROC_MDIO_SEL 1 ++#endif /* defined(CONFIG_MACH_SB2) */ ++ ++ ++#if (defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2) || defined(CONFIG_MACH_SB2)) ++#include "bcmiproc_serdes.h" ++#include "bcmiproc_phy5461s.h" ++#elif defined(CONFIG_MACH_GH2) ++#include "sgmiiplus2_serdes.h" ++#include "phy542xx.h" ++#elif defined(CONFIG_MACH_HR3) ++#if defined(CONFIG_MACH_WH2) ++#include "../../../mdio/iproc_mdio.h" ++#include "sgmiiplus2_serdes.h" ++#include "bcmiproc_egphy28.h" ++#include ++#define IPROC_CMICD_COMPATIBLE "brcm,iproc-cmicd" ++#define CMIC_SBUS_RING_MAP_0_7(base) (base + 0x10098) ++#define CMIC_SBUS_RING_MAP_8_15(base) (base + 0x1009C) ++#define CMIC_SBUS_RING_MAP_16_23(base) (base + 0x100A0) ++#define CMIC_SBUS_RING_MAP_24_31(base) (base + 0x100A4) ++extern u32 cmicd_schan_read(void __iomem *base, u32 ctrl, u32 addr); ++extern u32 cmicd_schan_write(void __iomem *base, u32 ctrl, u32 addr, u32 val); ++extern int egphy28_init(void __iomem *base, u32 phy_addr); ++#else ++#include "bcmiproc_phy5481.h" ++#endif ++#elif defined(CONFIG_MACH_HR2) ++#include "bcmiproc_phy5221.h" ++#elif defined(CONFIG_MACH_GH) ++#include "bcmiproc_phy5481.h" ++#endif ++ ++ ++#if !defined(CONFIG_MACH_HR2) ++/* BCM5221 on HR2 board does not support this feature */ ++#define CONFIG_FORCED_MODE_AUTO_MDIX 1 ++#endif ++ ++struct bcmgmac; /* forward declaration */ ++#define ch_t struct bcmgmac ++#include ++ ++extern int nvram_env_gmac_name(int gmac, char *name); ++ ++/* private chip state */ ++struct bcmgmac { ++ void *et; /* pointer to et private state */ ++ etc_info_t *etc; /* pointer to etc public state */ ++ ++ gmac_commonregs_t *regscomm; /* pointer to GMAC COMMON registers */ ++ gmacregs_t *regs; /* pointer to chip registers */ ++ osl_t *osh; /* os handle */ ++ ++ void *etphy; /* pointer to et for shared mdc/mdio contortion */ ++ ++ uint32 intstatus; /* saved interrupt condition bits */ ++ uint32 intmask; /* current software interrupt mask */ ++ uint32 def_intmask;/* default interrupt mask */ ++ ++ hnddma_t *di[NUMTXQ];/* dma engine software state */ ++ ++ bool mibgood; /* true once mib registers have been cleared */ ++ gmacmib_t mib; /* mib statistic counters */ ++ si_t *sih; /* si utils handle */ ++ ++ char *vars; /* sprom name=value */ ++ uint vars_size; ++ ++ void *adm; /* optional admtek private data */ ++ mcfilter_t mf; /* multicast filter */ ++}; ++ ++/* local prototypes */ ++static bool chipid(uint vendor, uint device); ++static void *chipattach(etc_info_t *etc, void *osh, void *regsva); ++static void chipdetach(ch_t *ch); ++static void chipreset(ch_t *ch); ++static void chipinit(ch_t *ch, uint options); ++static bool chiptx(ch_t *ch, void *p); ++static void *chiprx(ch_t *ch); ++static void chiprxfill(ch_t *ch); ++static int chipgetintrevents(ch_t *ch, bool in_isr); ++static bool chiperrors(ch_t *ch); ++static void chipintrson(ch_t *ch); ++static void chipintrsoff(ch_t *ch); ++static void chiptxreclaim(ch_t *ch, bool all); ++static void chiprxreclaim(ch_t *ch); ++static void chipstatsupd(ch_t *ch); ++static void chipdumpmib(ch_t *ch, struct bcmstrbuf *b, bool clear); ++static void chipenablepme(ch_t *ch); ++static void chipdisablepme(ch_t *ch); ++static void chipphyreset(ch_t *ch); ++static uint16 chipphyrd(ch_t *ch, uint phyaddr, uint reg); ++static void chipphywr(ch_t *ch, uint phyaddr, uint reg, uint16 val); ++static void chipdump(ch_t *ch, struct bcmstrbuf *b); ++static void chiplongname(ch_t *ch, char *buf, uint bufsize); ++static void chipduplexupd(ch_t *ch); ++#ifdef CONFIG_SERDES_ASYMMETRIC_MODE ++static void chipforcespddpx(ch_t *ch); ++#endif /* CONFIG_SERDES_ASYMMETRIC_MODE */ ++ ++static void chipphyinit(ch_t *ch); ++static void chipphyor(ch_t *ch, uint phyaddr, uint reg, uint16 v); ++static void chipphyforce(ch_t *ch, uint phyaddr); ++static void chipphyadvertise(ch_t *ch, uint phyaddr); ++static void chipphyenable(ch_t *ch, uint eth_num, uint phyaddr, int enable); ++static void chipdumpregs(ch_t *ch, gmacregs_t *regs, struct bcmstrbuf *b); ++static void gmac_mf_cleanup(ch_t *ch); ++static int gmac_speed(ch_t *ch, uint32 speed); ++#if defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2) || \ ++ defined(CONFIG_MACH_SB2) || defined(CONFIG_MACH_GH2) || defined(CONFIG_MACH_WH2) ++static void gmac_serdes_init(ch_t *ch); ++#endif ++static void gmac_miiconfig(ch_t *ch); ++ ++struct chops bcmgmac_et_chops = { ++ chipid, ++ chipattach, ++ chipdetach, ++ chipreset, ++ chipinit, ++ chiptx, ++ chiprx, ++ chiprxfill, ++ chipgetintrevents, ++ chiperrors, ++ chipintrson, ++ chipintrsoff, ++ chiptxreclaim, ++ chiprxreclaim, ++ chipstatsupd, ++ chipdumpmib, ++ chipenablepme, ++ chipdisablepme, ++ chipphyreset, ++ chipphyrd, ++ chipphywr, ++ chipdump, ++ chiplongname, ++ chipduplexupd, ++#ifdef CONFIG_SERDES_ASYMMETRIC_MODE ++ chipforcespddpx, ++#endif /* CONFIG_SERDES_ASYMMETRIC_MODE */ ++ chipphyenable ++}; ++ ++static uint devices[] = { ++ BCM56150_CHIP_ID, ++ BCM56340_CHIP_ID, ++ BCM56450_CHIP_ID, ++ BCM53400_CHIP_ID, ++ BCM56260_CHIP_ID, ++ BCM56160_CHIP_ID, ++ BCM56170_CHIP_ID, ++ BCM53540_CHIP_ID, ++ 0x0000 ++}; ++ ++ ++#if defined(CONFIG_MACH_WH2) ++static uint32 select = 0x06; ++ ++static void ++cmid_schan_modify(void __iomem *base, u32 ctrl, u32 addr, u32 val, u32 mask) ++{ ++ u32 ori_val; ++ ++ ori_val = cmicd_schan_read(base, ctrl, addr); ++ ori_val &= ~mask; ++ ori_val |= (val & mask); ++ cmicd_schan_write(base, ctrl + 0x08000000, addr, ori_val); ++} ++ ++static void power_on_serdesphy(ch_t *ch) ++{ ++ void __iomem *base; ++ uint32 val; ++ int i; ++ ++ select = (ioread32(get_iproc_wrap_ctrl_base() + 0xa8)); /* IPROC_WRAP_TOP_STRAP_STATUS_1 */ ++ base = of_iomap(of_find_compatible_node(NULL, NULL, IPROC_CMICD_COMPATIBLE), 0); ++ //* Configure SBUS Ring Map for TOP, block id = 16, ring number = 4 */ ++ writel(0x00000000, CMIC_SBUS_RING_MAP_0_7(base)); ++ writel(0x00430000, CMIC_SBUS_RING_MAP_8_15(base)); ++ writel(0x00005064, CMIC_SBUS_RING_MAP_16_23(base)); ++ writel(0x00000000, CMIC_SBUS_RING_MAP_24_31(base)); ++ ++ if (select & 0x04) /* select Serdes (SGMII Plus2) path */ ++ { ++ printf("AMAC selects SGMII path... \n"); ++ ++ /* Reset TOP_SGMII_CTRL_REG through S-Channel */ ++ /* bit 2 (RSTB_HW) & 3 (IDDQ) & 4 (PWRDWN) = 0 */ ++ cmid_schan_modify(base, 0x2c800200, 0x0207e800, 0x0, 0x0000001c); ++ /* bit 2 (RSTB_HW) & 1 (RSTB_MDIOREGS) & 0 (RSTB_PLL) = 1 */ ++ cmid_schan_modify(base, 0x2c800200, 0x0207e800, 0x00000007, 0x00000007); ++ ++ /* Hardware reset 4th lane. PHY addr = 0x17, Reg addr = 0x0000, bit 15 = 1 */ ++ val = chipphyrd(ch, GMAC_INT_PHY_ADDR, 0x0000); ++ val |= (1 << 15); ++ chipphywr(ch, GMAC_INT_PHY_ADDR, 0x0000, val); ++ ++ /* Power down other 3 lanes. PHY addr = (0x14, 0x15, 0x16) */ ++ for (i = 0; i < 3; ++i) ++ { ++ val = chipphyrd(ch, GMAC_INT_PHY_ADDR + i, 0x0); ++ val |= (1 << 11); ++ chipphywr(ch, GMAC_INT_PHY_ADDR + i, 0x0, val); ++ } ++ } else /* select EGPHY28 path */ ++ { ++ printf("AMAC selects EGPHY path... \n"); ++ ++ /* TOP_QGPHY_CTRL_0.EXT_PWRDOWN[23:20] = HIGH */ ++ cmid_schan_modify(base, 0x2c800200, 0x02033800, 0x00F00000, 0x00F00000); ++ /* TOP_QGPHY_CTRL_2.GPHY_IDDQ_GLOBAL_PWR[18] = LOW */ ++ cmid_schan_modify(base, 0x2c800200, 0x02033400, 0x0, 0x040000); ++ /* TOP_QGPHY_CTRL_2.IDDQ_BIAS[5] = HIGH */ ++ cmid_schan_modify(base, 0x2c800200, 0x02033400, 0x20, 0x20); ++ /* TOP_SOFT_RESET_REG.TOP_QGPHY_RST_L[21] = HIGH */ ++ cmid_schan_modify(base, 0x2c800200, 0x02030400, 0x200000, 0x200000); ++ ++ /* TOP_QGPHY_CTRL_2.GPHY_IDDQ_GLOBAL_PWR[18] = HIGH */ ++ cmid_schan_modify(base, 0x2c800200, 0x02033400, 0x040000, 0x040000); ++ /* TOP_SOFT_RESET_REG.TOP_QGPHY_RST_L[21] = LOW */ ++ cmid_schan_modify(base, 0x2c800200, 0x02030400, 0x0, 0x200000); ++ /* Reset TOP_SOFT_RESET_REG bit 4 (GXP2_RST) & 5 (GXP0_RST) & 6 (GXP1_RST) = 0 */ ++ cmid_schan_modify(base, 0x2c800200, 0x02030400, 0x0, 0x00000070); ++ ++ mdelay(100); ++ ++ /* Give initial value */ ++ /* TOP_QGPHY_CTRL_0.EXT_PWRDOWN[23:20] = LOW */ ++ cmid_schan_modify(base, 0x2c800200, 0x02033800, 0x0, 0x00F00000); ++ /* TOP_QGPHY_CTRL_2.GPHY_IDDQ_GLOBAL_PWR[18] = HIGH */ ++ cmid_schan_modify(base, 0x2c800200, 0x02033400, 0x40000, 0x040000); ++ /* TOP_QGPHY_CTRL_2.IDDQ_BIAS[5] = LOW */ ++ cmid_schan_modify(base, 0x2c800200, 0x02033400, 0x0, 0x20); ++ /* TOP_SOFT_RESET_REG.TOP_QGPHY_RST_L[21] = LOW */ ++ cmid_schan_modify(base, 0x2c800200, 0x02030400, 0x0, 0x200000); ++ ++ /* TOP_QGPHY_CTRL_2.GPHY_IDDQ_GLOBAL_PWR[18] = LOW */ ++ cmid_schan_modify(base, 0x2c800200, 0x02033400, 0x0, 0x040000); ++ /* TOP_SOFT_RESET_REG.TOP_QGPHY_RST_L[21] = HIGH */ ++ cmid_schan_modify(base, 0x2c800200, 0x02030400, 0x200000, 0x200000); ++ /* Reset TOP_SOFT_RESET_REG bit 4 (GXP2_RST) & 5 (GXP0_RST) & 6 (GXP1_RST) = 1 */ ++ cmid_schan_modify(base, 0x2c800200, 0x02030400, 0x00000070, 0x00000070); ++ } ++} ++#endif ++ ++#if (defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2) || defined(CONFIG_MACH_SB2)) ++static void *wrapaddr = 0; ++void gmac_set_amac_mdio(int en) ++{ ++ u32 tmp; ++#if defined(CONFIG_MACH_HX4) ++ u32 mdio_sel= IPROC_WRAP_MISC_CONTROL__QUAD_SERDES_MDIO_SEL; ++ u32 ctrl_sel= IPROC_WRAP_MISC_CONTROL__QUAD_SERDES_CTRL_SEL; ++#elif defined(CONFIG_MACH_KT2) ++ u32 mdio_sel= IPROC_WRAP_MISC_CONTROL__UNICORE_SERDES_MDIO_SEL; ++ u32 ctrl_sel= IPROC_WRAP_MISC_CONTROL__UNICORE_SERDES_CTRL_SEL; ++#endif ++ u32 iproc_mdio_sel= IPROC_WRAP_MISC_CONTROL__IPROC_MDIO_SEL; ++ ++ if (en) { ++ /* Get register base address */ ++ wrapaddr = get_iproc_wrap_ctrl_base() + IPROC_WRAP_MISC_CONTROL_OFFSET; ++ } ++ ++ tmp = ioread32(wrapaddr); ++ if (en) { ++#if defined(CONFIG_MACH_SB2) ++ /* set bits IPROC_WRAP_MISC_CONTROL__IPROC_MDIO_SEL ++ so AMAC can access the Serdes and Phy */ ++ tmp |= (1 << iproc_mdio_sel); ++#else ++ /* set bits IPROC_WRAP_MISC_CONTROL__IPROC_MDIO_SEL, ++ IPROC_WRAP_MISC_CONTROL__QUAD_SERDES_MDIO_SEL & ++ IPROC_WRAP_MISC_CONTROL__QUAD_SERDES_CTRL_SEL ++ so AMAC can access the Serdes and Phy */ ++ tmp |= ((1 << mdio_sel) | (1 << ctrl_sel) | (1 << iproc_mdio_sel)); ++#endif ++ } else { ++#if defined(CONFIG_MACH_SB2) ++ /* clear bits IPROC_WRAP_MISC_CONTROL__IPROC_MDIO_SEL ++ so CMIC can access the Serdes and Phy */ ++ tmp &= ~(1 << iproc_mdio_sel); ++#else ++ /* clear bits IPROC_WRAP_MISC_CONTROL__IPROC_MDIO_SEL & ++ IPROC_WRAP_MISC_CONTROL__QUAD_SERDES_MDIO_SEL ++ so CMIC can access the Serdes and Phy */ ++ tmp &= ~((1 << mdio_sel) | (1 << iproc_mdio_sel)); ++#endif ++ } ++ iowrite32(tmp, wrapaddr); ++ ++ if (!en) { ++ wrapaddr=0; ++ } ++} ++ ++int gmac_has_mdio_access(void) ++{ ++ u32 tmp; ++ u32 regmsk = (1 << IPROC_WRAP_MISC_CONTROL__IPROC_MDIO_SEL); ++ ++#if defined(CONFIG_MACH_HX4) ++ regmsk |= ((1 << IPROC_WRAP_MISC_CONTROL__QUAD_SERDES_MDIO_SEL) | ++ (1 << IPROC_WRAP_MISC_CONTROL__QUAD_SERDES_CTRL_SEL)); ++#elif defined(CONFIG_MACH_KT2) ++ regmsk |= ((1 << IPROC_WRAP_MISC_CONTROL__UNICORE_SERDES_MDIO_SEL) | ++ (1 << IPROC_WRAP_MISC_CONTROL__UNICORE_SERDES_CTRL_SEL)); ++#endif ++ ++ if (wrapaddr==0) { ++ /* if no wrapaddr then no access */ ++ return 0; ++ } ++ ++ tmp = ioread32(wrapaddr); ++ tmp &= ~regmsk; ++ if (tmp == regmsk) { ++ return 0; ++ } ++ ++ return 1; ++} ++#endif /* (defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2) || defined(CONFIG_MACH_SB2)) */ ++ ++/* This api will determine if this unit specified is the last interface. */ ++bool gmac_last_interface(int unit) ++{ ++ char name[128]; ++ int idx; ++ ++ /* if interface 2 or greater then must be last */ ++ if (unit >= 2) { ++ return true; ++ } ++ ++ /* Look to see if there is a next interface specified */ ++ for (idx = unit + 1; idx <= 2; idx++) { ++ nvram_env_gmac_name(idx, name); ++ if (getvar(NULL, name) != NULL) { ++ /* there is a next interface */ ++ return false; ++ } ++ } ++ /* no other interfaces */ ++ return true; ++} ++ ++ ++static bool ++chipid(uint vendor, uint device) ++{ ++ int idx; ++ ++ if (vendor != VENDOR_BROADCOM) { ++ ET_ERROR(("%s ERROR: NOT a BROADCOM Vendor ID (0x%x)\n", __FUNCTION__, vendor)); ++ return (FALSE); ++ } ++ ++ for (idx = 0; devices[idx]; idx++) { ++ if (device == devices[idx]) { ++ return (TRUE); ++ } ++ } ++ ++ ET_ERROR(("%s ERROR: UNKNOWN Device ID (0x%x)\n", __FUNCTION__, device)); ++ printk("%s ERROR: UNKNOWN Device ID (0x%x)\n", __FUNCTION__, device); ++ return (FALSE); ++} ++ ++static void * ++chipattach(etc_info_t *etc, void *osh, void *regsva) ++{ ++ ch_t *ch; ++ gmacregs_t *regs; ++ void *amacidmaddr; ++ uint32 idx, tmp; ++ char name[16], *var; ++ uint boardflags, boardtype; ++ uint coreidx; ++ ulong flags; ++ ++ ET_TRACE(("et%d: chipattach: regsva 0x%lx\n", etc->unit, (ulong)regsva)); ++ ++ if ((ch = (ch_t *)MALLOC(osh, sizeof(ch_t))) == NULL) { ++ ET_ERROR(("et%d: chipattach: out of memory, malloced %d bytes\n", etc->unit, MALLOCED(osh))); ++ return (NULL); ++ } ++ bzero((char *)ch, sizeof(ch_t)); ++ ++ ch->etc = etc; ++ ch->et = etc->et; ++ ch->osh = osh; ++ ++ /* store the pointer to the sw mib */ ++ etc->mib = (void *)&ch->mib; ++ ++ /* get si handle */ ++ if ((ch->sih = si_attach(etc->deviceid, ch->osh, regsva, SI_BUS, NULL, &ch->vars, ++ &ch->vars_size)) == NULL) { ++ ET_ERROR(("et%d: chipattach: si_attach error\n", etc->unit)); ++ goto fail; ++ } ++ ++ if ((regs = (gmacregs_t *)si_setcore(ch->sih, GMAC_CORE_ID, etc->unit)) == NULL) { ++ ET_ERROR(("et%d: chipattach: Could not setcore to GMAC\n", etc->unit)); ++ goto fail; ++ } ++ ++ /* 2G_ENABLED: Enable IDM 250MHz for 2G mode */ ++ spin_lock_irqsave((spinlock_t *)&ch->sih->sih_lock, flags); ++ ++ coreidx = si_coreidx(ch->sih); ++ si_core_reset(ch->sih, 0, 0); ++ si_setcoreidx(ch->sih, coreidx); ++ ++ spin_unlock_irqrestore((spinlock_t *)&ch->sih->sih_lock, flags); ++ ++ ch->regs = regs; ++ etc->chip = ch->sih->chip; ++ etc->chiprev = ch->sih->chiprev; ++ etc->coreid = si_coreid(ch->sih); ++ etc->nicmode = !(ch->sih->bustype == SI_BUS); ++ etc->coreunit = si_coreunit(ch->sih); ++ etc->boardflags = getintvar(ch->vars, "boardflags"); ++ ++ boardflags = etc->boardflags; ++ boardtype = ch->sih->boardtype; ++ ++ etc->switch_mode = 0; ++ ++ /* ++ * Too much can go wrong in scanning MDC/MDIO playing "whos my phy?" . ++ * Instead, explicitly require the environment var "etphyaddr=". ++ */ ++ ++ /* get our phyaddr value */ ++ sprintf(name, "et%dphyaddr", etc->coreunit); ++ var = getvar(NULL, name); ++ if (var == NULL) { ++ etc->phyaddr = etc->unit + GMAC_EXT_PHY_ADDR; ++ etc->int_phyaddr = etc->unit + GMAC_INT_PHY_ADDR; ++ } else { ++ etc->phyaddr = bcm_atoi(var) & EPHY_MASK; ++ etc->int_phyaddr = etc->unit + GMAC_INT_PHY_ADDR; ++ } ++ printf("et%d: chipattach: phyaddr(0x%x)\n", etc->unit, etc->phyaddr); ++ ++ /* nvram says no phy is present */ ++ if (etc->phyaddr == EPHY_NONE) { ++ ET_ERROR(("et%d: chipattach: phy not present\n", etc->unit)); ++ goto fail; ++ } ++ ++ /* reset the gmac core */ ++ chipreset(ch); ++ ++#if (defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2) || defined(CONFIG_MACH_SB2)) ++ /* flip switch so AMAC can access serdes */ ++ if (wrapaddr == 0) { ++ gmac_set_amac_mdio(1); ++ } ++#elif defined(CONFIG_MACH_WH2) ++ /* power on SGMII/EGPHY most before chipphyreset() to read PHY ID */ ++ power_on_serdesphy(ch); ++#endif /* (defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2) || defined(CONFIG_MACH_SB2)) */ ++ ++ /* Get register base address */ ++ amacidmaddr = get_iproc_idm_base(0) + idm_ioctl_offset[etc->unit]; ++ tmp = ioread32(amacidmaddr); ++ tmp &= ~(1 << AMAC_IDM_IO_CONTROL_DIRECT__CLK_250_SEL); ++ tmp |= (1 << AMAC_IDM_IO_CONTROL_DIRECT__DIRECT_GMII_MODE); ++ tmp |= (1 << AMAC_IDM_IO_CONTROL_DIRECT__DEST_SYNC_MODE_EN); ++ iowrite32(tmp, amacidmaddr); ++ ++#if defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2) || \ ++ defined(CONFIG_MACH_SB2) || defined(CONFIG_MACH_GH2) ++ /* enable serdes */ ++ gmac_serdes_init(ch); ++#elif defined(CONFIG_MACH_WH2) ++ if (select & 0x04) /* select Serdes (SGMII Plus2) path */ ++ gmac_serdes_init(ch); ++#endif ++ ++ /* dma attach */ ++ sprintf(name, "et%d", etc->coreunit); ++ ++ /* allocate dma resources for txqs */ ++ /* TX: TC_BK, RX: RX_Q0 */ ++ ch->di[0] = dma_attach(osh, name, ch->sih, ++ DMAREG(ch, DMA_TX, TX_Q0), ++ DMAREG(ch, DMA_RX, RX_Q0), ++ NTXD, NRXD, RXBUFSZ, -1, NRXBUFPOST, HWRXOFF, ++ &et_msg_level); ++ ++ for (idx = 0; idx < NUMTXQ; idx++) { ++ if (ch->di[idx] == NULL) { ++ ET_ERROR(("et%d: chipattach: dma_attach failed\n", etc->unit)); ++ goto fail; ++ } ++ } ++ ++ for (idx = 0; idx < NUMTXQ; idx++) { ++ if (ch->di[idx] != NULL) { ++ etc->txavail[idx] = (uint *)&ch->di[idx]->txavail; ++ } ++ } ++ ++ /* set default sofware intmask */ ++ sprintf(name, "et%d_no_txint", etc->coreunit); ++ if (getintvar(ch->vars, name)) { ++ /* if no_txint variable is non-zero we disable tx interrupts. ++ * we do the tx buffer reclaim once every few frames. ++ */ ++ ch->def_intmask = (DEF_INTMASK & ~(I_XI0 | I_XI1 | I_XI2 | I_XI3)); ++ etc->txrec_thresh = (((NTXD >> 2) > TXREC_THR) ? TXREC_THR - 1 : 1); ++ } else { ++ ch->def_intmask = DEF_INTMASK; ++ } ++ ++ ch->intmask = ch->def_intmask; ++ ++ /* reset phy: reset it once now */ ++#if (defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2)) ++ if (ch->etc->unit == 0) { ++ serdes_reset_core(ch->etc->unit, etc->int_phyaddr); ++ } ++#endif ++ ++ chipphyreset(ch); ++#if (defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2)) ++ if (gmac_last_interface(etc->unit)) { ++ /* ++ * The 4-lane serdes is shared between XLDK and SDK. XLDK has to ++ * initialize the 3rd lane (phy address 3) that is used by SDK. ++ */ ++ serdes_init(etc->unit, 3); ++ serdes_start_pll(etc->unit, 1); ++ } ++#endif ++ ++ if (etc->forcespeed == ET_AUTO) { ++ etc->needautoneg = TRUE; ++ etc->advertise = (ADV_100FULL | ADV_100HALF | ADV_10FULL | ADV_10HALF); ++#if defined(CONFIG_MACH_HR2) ++ etc->advertise2 = 0; ++#else ++ etc->advertise2 = ADV_1000FULL; ++#endif ++ } ++ ++ return ((void *) ch); ++ ++fail: ++ chipdetach(ch); ++ return (NULL); ++} ++ ++static void ++chipdetach(ch_t *ch) ++{ ++ int32 idx; ++ ++ ET_TRACE(("et%d: chipdetach\n", ch->etc->unit)); ++ ++ if (ch == NULL) { ++ return; ++ } ++ ++ /* free dma state */ ++ for (idx = 0; idx < NUMTXQ; idx++) { ++ if (ch->di[idx] != NULL) { ++ dma_detach(ch->di[idx]); ++ ch->di[idx] = NULL; ++ } ++ } ++ ++ /* put the core back into reset */ ++ if (ch->sih) { ++ si_core_disable(ch->sih, 0); ++ } ++ ++ if (ch->etc) { ++ if (ch->etc->mib) { ++ ch->etc->mib = NULL; ++ } ++ } ++ ++ /* free si handle */ ++ if (ch->sih) { ++ si_detach(ch->sih); ++ ch->sih = NULL; ++ } ++ ++ /* free vars */ ++ if (ch->vars) { ++ MFREE(ch->osh, ch->vars, ch->vars_size); ++ } ++ ++ /* free chip private state */ ++ MFREE(ch->osh, ch, sizeof(ch_t)); ++} ++ ++static void ++chiplongname(ch_t *ch, char *buf, uint bufsize) ++{ ++ char *s; ++ ++ switch (ch->etc->deviceid) { ++ case BCM56150_CHIP_ID: ++ s = "Broadcom BCM5615x 10/100 Mbps Ethernet Controller"; ++ break; ++ case BCM56340_CHIP_ID: ++ s = "Broadcom BCM5634x 10/100/1000 Mbps Ethernet Controller"; ++ break; ++ case BCM56450_CHIP_ID: ++ s = "Broadcom BCM5645x 10/100/1000 Mbps Ethernet Controller"; ++ break; ++ case BCM53400_CHIP_ID: ++ s = "Broadcom BCM5340x 10/100/1000 Mbps Ethernet Controller"; ++ break; ++ case BCM56260_CHIP_ID: ++ s = "Broadcom BCM5626x 10/100/1000 Mbps Ethernet Controller"; ++ break; ++ case BCM56160_CHIP_ID: ++ s = "Broadcom BCM5616x 10/100/1000 Mbps Ethernet Controller"; ++ break; ++ case BCM53540_CHIP_ID: ++ s = "Broadcom BCM5354x 10/100/1000 Mbps Ethernet Controller"; ++ break; ++ case BCM56170_CHIP_ID: ++ s = "Broadcom BCM5617x 10/100/1000 Mbps Ethernet Controller"; ++ break; ++ default: ++ s = "Broadcom BCM5301x 10/100/1000 Mbps Ethernet Controller"; ++ break; ++ } ++ ++ strncpy(buf, s, bufsize); ++ buf[bufsize - 1] = '\0'; ++} ++ ++static void ++chipdump(ch_t *ch, struct bcmstrbuf *b) ++{ ++ bcm_bprintf(b, "regs 0x%lx etphy 0x%lx ch->intstatus 0x%x intmask 0x%x\n", ++ (ulong)ch->regs, (ulong)ch->etphy, ch->intstatus, ch->intmask); ++ bcm_bprintf(b, "\n"); ++ ++ /* registers */ ++ chipdumpregs(ch, ch->regs, b); ++ bcm_bprintf(b, "\n"); ++} ++ ++ ++#define PRREG(name) bcm_bprintf(b, #name " 0x%x ", R_REG(ch->osh, ®s->name)) ++#define PRMIBREG(name) bcm_bprintf(b, #name " 0x%x ", R_REG(ch->osh, ®s->mib.name)) ++ ++static void ++chipdumpregs(ch_t *ch, gmacregs_t *regs, struct bcmstrbuf *b) ++{ ++ uint phyaddr; ++ ++ phyaddr = ch->etc->phyaddr; ++ ++ PRREG(devcontrol); PRREG(devstatus); ++ bcm_bprintf(b, "\n"); ++ PRREG(biststatus); ++ bcm_bprintf(b, "\n"); ++ PRREG(intstatus); PRREG(intmask); PRREG(gptimer); ++ bcm_bprintf(b, "\n"); ++ PRREG(intrecvlazy); ++ bcm_bprintf(b, "\n"); ++ PRREG(flowctlthresh); PRREG(wrrthresh); PRREG(gmac_idle_cnt_thresh); ++ bcm_bprintf(b, "\n"); ++ PRREG(phyaccess); PRREG(phycontrol); ++ bcm_bprintf(b, "\n"); ++ ++ PRREG(txqctl); PRREG(rxqctl); ++ bcm_bprintf(b, "\n"); ++ PRREG(gpioselect); PRREG(gpio_output_en); ++ bcm_bprintf(b, "\n"); ++ PRREG(clk_ctl_st); PRREG(pwrctl); ++ bcm_bprintf(b, "\n"); ++ ++ /* unimac registers */ ++ PRREG(hdbkpctl); ++ bcm_bprintf(b, "\n"); ++ PRREG(cmdcfg); ++ bcm_bprintf(b, "\n"); ++ PRREG(macaddrhigh); PRREG(macaddrlow); ++ bcm_bprintf(b, "\n"); ++ PRREG(rxmaxlength); PRREG(pausequanta); PRREG(macmode); ++ bcm_bprintf(b, "\n"); ++ PRREG(outertag); PRREG(innertag); PRREG(txipg); PRREG(pausectl); ++ bcm_bprintf(b, "\n"); ++ PRREG(txflush); PRREG(rxstatus); PRREG(txstatus); ++ bcm_bprintf(b, "\n"); ++#ifdef ENABLE_MIB_REG_DUMP ++ /* mib registers */ ++ PRMIBREG(tx_good_octets); PRMIBREG(tx_good_pkts); PRMIBREG(tx_octets); PRMIBREG(tx_pkts); ++ bcm_bprintf(b, "\n"); ++ PRMIBREG(tx_broadcast_pkts); PRMIBREG(tx_multicast_pkts); ++ bcm_bprintf(b, "\n"); ++ PRMIBREG(tx_jabber_pkts); PRMIBREG(tx_oversize_pkts); PRMIBREG(tx_fragment_pkts); ++ bcm_bprintf(b, "\n"); ++ PRMIBREG(tx_underruns); PRMIBREG(tx_total_cols); PRMIBREG(tx_single_cols); ++ bcm_bprintf(b, "\n"); ++ PRMIBREG(tx_multiple_cols); PRMIBREG(tx_excessive_cols); PRMIBREG(tx_late_cols); ++ bcm_bprintf(b, "\n"); ++ PRMIBREG(tx_defered); PRMIBREG(tx_carrier_lost); PRMIBREG(tx_pause_pkts); ++ bcm_bprintf(b, "\n"); ++ ++ PRMIBREG(rx_good_octets); PRMIBREG(rx_good_pkts); PRMIBREG(rx_octets); PRMIBREG(rx_pkts); ++ bcm_bprintf(b, "\n"); ++ PRMIBREG(rx_broadcast_pkts); PRMIBREG(rx_multicast_pkts); ++ bcm_bprintf(b, "\n"); ++ PRMIBREG(rx_jabber_pkts); ++ PRMIBREG(rx_oversize_pkts); PRMIBREG(rx_fragment_pkts); ++ bcm_bprintf(b, "\n"); ++ PRMIBREG(rx_missed_pkts); PRMIBREG(rx_crc_align_errs); PRMIBREG(rx_undersize); ++ bcm_bprintf(b, "\n"); ++ PRMIBREG(rx_crc_errs); PRMIBREG(rx_align_errs); PRMIBREG(rx_symbol_errs); ++ bcm_bprintf(b, "\n"); ++ PRMIBREG(rx_pause_pkts); PRMIBREG(rx_nonpause_pkts); ++#endif /* ENABLE_MIB_REG_DUMP */ ++ bcm_bprintf(b, "\n"); ++ if (phyaddr != EPHY_NOREG) { ++ /* print a few interesting phy registers */ ++ bcm_bprintf(b, "phy0 0x%x phy1 0x%x phy2 0x%x phy3 0x%x\n", ++ chipphyrd(ch, phyaddr, 0), ++ chipphyrd(ch, phyaddr, 1), ++ chipphyrd(ch, phyaddr, 2), ++ chipphyrd(ch, phyaddr, 3)); ++ bcm_bprintf(b, "phy4 0x%x phy5 0x%x phy24 0x%x phy25 0x%x\n", ++ chipphyrd(ch, phyaddr, 4), ++ chipphyrd(ch, phyaddr, 5), ++ chipphyrd(ch, phyaddr, 24), ++ chipphyrd(ch, phyaddr, 25)); ++ } ++ ++} ++ ++static void ++gmac_clearmib(ch_t *ch) ++{ ++ volatile uint32 *ptr; ++ ++ /* enable clear on read */ ++ OR_REG(ch->osh, &ch->regs->devcontrol, DC_MROR); ++ ++ for (ptr = &ch->regs->mib.tx_good_octets; ptr <= &ch->regs->mib.rx_uni_pkts; ptr++) { ++ (void)R_REG(ch->osh, ptr); ++ if (ptr == &ch->regs->mib.tx_q3_octets_high) { ++ ptr++; ++ } ++ } ++ ++ return; ++} ++ ++static void ++gmac_init_reset(ch_t *ch) ++{ ++ OR_REG(ch->osh, &ch->regs->cmdcfg, CC_SR); ++ OSL_DELAY(GMAC_RESET_DELAY); ++} ++ ++static void ++gmac_clear_reset(ch_t *ch) ++{ ++ AND_REG(ch->osh, &ch->regs->cmdcfg, ~CC_SR); ++ OSL_DELAY(GMAC_RESET_DELAY); ++} ++ ++static void ++gmac_reset(ch_t *ch) ++{ ++ uint32 ocmdcfg, cmdcfg; ++ ++ /* put the mac in reset */ ++ gmac_init_reset(ch); ++ ++ /* initialize default config */ ++ ocmdcfg = cmdcfg = R_REG(ch->osh, &ch->regs->cmdcfg); ++ ++ cmdcfg &= ~(CC_TE | CC_RE | CC_RPI | CC_TAI | CC_HD | CC_ML | ++ CC_CFE | CC_RL | CC_RED | CC_PE | CC_TPI | CC_PAD_EN | CC_PF); ++ cmdcfg |= (CC_PROM | CC_NLC | CC_CFE); ++ ++#if defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2) || \ ++ defined(CONFIG_MACH_SB2) || defined(CONFIG_MACH_GH2) ++ cmdcfg |= (CC_AE | CC_OT | CC_OR); ++#endif ++ ++ if (cmdcfg != ocmdcfg) { ++ W_REG(ch->osh, &ch->regs->cmdcfg, cmdcfg); ++ } ++ ++ /* bring mac out of reset */ ++ gmac_clear_reset(ch); ++} ++ ++static void ++gmac_promisc(ch_t *ch, bool mode) ++{ ++ uint32 cmdcfg; ++ ++ cmdcfg = R_REG(ch->osh, &ch->regs->cmdcfg); ++ ++ /* put the mac in reset */ ++ gmac_init_reset(ch); ++ ++ /* enable or disable promiscuous mode */ ++ if (mode) { ++ cmdcfg |= CC_PROM; ++ } else { ++ cmdcfg &= ~CC_PROM; ++ } ++ ++ W_REG(ch->osh, &ch->regs->cmdcfg, cmdcfg); ++ ++ /* bring mac out of reset */ ++ gmac_clear_reset(ch); ++} ++ ++static int ++gmac_speed(ch_t *ch, uint32 speed) ++{ ++ uint32 cmdcfg; ++ uint32 hd_ena = 0; ++#if (defined(CONFIG_MACH_GH) || defined(CONFIG_MACH_HR3)) ++ uint32_t sdctl; ++ uint32 set_speed = speed; ++#endif /* CONFIG_MACH_GH || CONFIG_MACH_HR3 */ ++ ++ switch (speed) { ++ case ET_10HALF: ++ hd_ena = CC_HD; ++ /* FALLTHRU */ ++ ++ case ET_10FULL: ++ speed = 0; ++ break; ++ ++ case ET_100HALF: ++ hd_ena = CC_HD; ++ /* FALLTHRU */ ++ ++ case ET_100FULL: ++ speed = 1; ++ break; ++ ++ case ET_1000FULL: ++ speed = 2; ++ break; ++ ++ case ET_1000HALF: ++ ET_ERROR(("et%d: gmac_speed: supports 1000 mbps full duplex only\n", ++ ch->etc->unit)); ++ return (FAILURE); ++ ++ case ET_2500FULL: ++ speed = 3; ++ break; ++ ++ default: ++ ET_ERROR(("et%d: gmac_speed: speed %d not supported\n", ++ ch->etc->unit, speed)); ++ return (FAILURE); ++ } ++ ++ cmdcfg = R_REG(ch->osh, &ch->regs->cmdcfg); ++ ++ /* put mac in reset */ ++ gmac_init_reset(ch); ++ ++ /* set the speed */ ++ cmdcfg &= ~(CC_ES_MASK | CC_HD); ++ cmdcfg |= ((speed << CC_ES_SHIFT) | hd_ena); ++ ++#if defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2) || \ ++ defined(CONFIG_MACH_SB2) || defined(CONFIG_MACH_GH2) ++ cmdcfg |= (CC_AE | CC_OT | CC_OR); ++#endif ++ ++ W_REG(ch->osh, &ch->regs->cmdcfg, cmdcfg); ++ ++ /* bring mac out of reset */ ++ gmac_clear_reset(ch); ++ ++#if (defined(CONFIG_MACH_GH) || defined(CONFIG_MACH_HR3)) ++ sdctl = R_REG(ch->osh, &ch->regs->serdes_ctl); ++ sdctl &= ~(SC_FORCE_SPD_STRAP_MASK); ++ switch (set_speed) { ++ case ET_1000FULL: ++ sdctl |= SC_FORCE_SPD_1G_VAL; ++ break; ++ case ET_100FULL: ++ case ET_100HALF: ++ sdctl |= SC_FORCE_SPD_100M_VAL; ++ break; ++ default: ++ break; ++ } ++ W_REG(ch->osh, &ch->regs->serdes_ctl, sdctl); ++ ++ udelay(1000); ++#endif /* CONFIG_MACH_GH || CONFIG_MACH_HR3 */ ++ ++ return (SUCCESS); ++} ++ ++static void ++gmac_macloopback(ch_t *ch, bool on) ++{ ++ uint32 ocmdcfg, cmdcfg; ++ ++ ocmdcfg = cmdcfg = R_REG(ch->osh, &ch->regs->cmdcfg); ++ ++ /* put mac in reset */ ++ gmac_init_reset(ch); ++ ++ /* set/clear the mac loopback mode */ ++ if (on) { ++ cmdcfg |= CC_ML; ++ } else { ++ cmdcfg &= ~CC_ML; ++ } ++ ++ if (cmdcfg != ocmdcfg) { ++ W_REG(ch->osh, &ch->regs->cmdcfg, cmdcfg); ++ } ++ ++ /* bring mac out of reset */ ++ gmac_clear_reset(ch); ++} ++ ++static int ++gmac_loopback(ch_t *ch, uint32 mode) ++{ ++ switch (mode) { ++ case LOOPBACK_MODE_DMA: ++ /* to enable loopback for any channel set the loopback ++ * enable bit in xmt0control register. ++ */ ++ dma_fifoloopbackenable(ch->di[TX_Q0]); ++ break; ++ ++ case LOOPBACK_MODE_MAC: ++ gmac_macloopback(ch, TRUE); ++ break; ++ ++ case LOOPBACK_MODE_NONE: ++ gmac_macloopback(ch, FALSE); ++ break; ++ ++ default: ++ ET_ERROR(("et%d: gmac_loopaback: Unknown loopback mode %d\n", ++ ch->etc->unit, mode)); ++ return (FAILURE); ++ } ++ ++ return (SUCCESS); ++} ++ ++static void ++gmac_enable(ch_t *ch) ++{ ++ uint32 cmdcfg; ++ gmacregs_t *regs; ++ ++ regs = ch->regs; ++ ++ cmdcfg = R_REG(ch->osh, &ch->regs->cmdcfg); ++ ++ /* put mac in reset */ ++ gmac_init_reset(ch); ++ ++ cmdcfg |= CC_SR; ++ ++ /* first deassert rx_ena and tx_ena while in reset */ ++ cmdcfg &= ~(CC_RE | CC_TE); ++ W_REG(ch->osh, ®s->cmdcfg, cmdcfg); ++ ++ /* bring mac out of reset */ ++ gmac_clear_reset(ch); ++ ++ /* enable the mac transmit and receive paths now */ ++ OSL_DELAY(2); ++ cmdcfg &= ~CC_SR; ++ cmdcfg |= (CC_RE | CC_TE); ++ ++ /* assert rx_ena and tx_ena when out of reset to enable the mac */ ++ W_REG(ch->osh, ®s->cmdcfg, cmdcfg); ++ ++ /* request ht clock */ ++ OR_REG(ch->osh, ®s->clk_ctl_st, CS_FH); ++ ++ return; ++} ++ ++static void ++gmac_txflowcontrol(ch_t *ch, bool on) ++{ ++ uint32 cmdcfg; ++ ++ cmdcfg = R_REG(ch->osh, &ch->regs->cmdcfg); ++ ++ /* put the mac in reset */ ++ gmac_init_reset(ch); ++ ++ /* to enable tx flow control clear the rx pause ignore bit */ ++ if (on) { ++ cmdcfg &= ~CC_RPI; ++ } else { ++ cmdcfg |= CC_RPI; ++ } ++ ++ W_REG(ch->osh, &ch->regs->cmdcfg, cmdcfg); ++ ++ /* bring mac out of reset */ ++ gmac_clear_reset(ch); ++} ++ ++ ++#if defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2) || \ ++ defined(CONFIG_MACH_SB2) || defined(CONFIG_MACH_GH2) || defined(CONFIG_MACH_WH2) ++static void ++gmac_serdes_init(ch_t *ch) ++{ ++ uint32_t sdctl, sdstat0, sdstat1; ++ gmacregs_t *regs; ++ ++ regs = ch->regs; ++ ++ ET_TRACE(("%s enter\n", __FUNCTION__)); ++ ++ sdctl = R_REG(ch->osh, &ch->regs->serdes_ctl); ++ sdstat0 = R_REG(ch->osh, &ch->regs->serdes_status0); ++ sdstat1 = R_REG(ch->osh, &ch->regs->serdes_status1); ++ ++ /* ++ * Bring up both digital and analog clocks ++ * ++ * NOTE: Many MAC registers are not accessible until the PLL is locked. ++ * An S-Channel timeout will occur before that. ++ */ ++ ++ sdctl = (SC_TX1G_FIFO_RST_VAL|SC_FORCE_SPD_STRAP_VAL); ++#if defined(CONFIG_MACH_HX4) ++ sdctl |= (SC_REFSEL_VAL|SC_REF_TERM_SEL_MASK); ++#elif defined(CONFIG_MACH_KT2) ++ sdctl |= SC_REF_TERM_SEL_MASK; ++#endif /* (defined(CONFIG_MACH_HX4) */ ++ W_REG(ch->osh, &ch->regs->serdes_ctl, sdctl); ++ ++ udelay(1000); ++ ++ sdctl = R_REG(ch->osh, &ch->regs->serdes_ctl); ++ sdctl |= (SC_IDDQ_MASK|SC_PWR_DOWN_MASK); ++ W_REG(ch->osh, &ch->regs->serdes_ctl, sdctl); ++ ++ sdctl = R_REG(ch->osh, &ch->regs->serdes_ctl); ++ sdctl &= ~(SC_IDDQ_MASK|SC_PWR_DOWN_MASK); ++ W_REG(ch->osh, &ch->regs->serdes_ctl, sdctl); ++ ++ /* Bring hardware out of reset */ ++ sdctl = R_REG(ch->osh, &ch->regs->serdes_ctl); ++ sdctl |= (SC_RSTB_HW_MASK); ++ W_REG(ch->osh, &ch->regs->serdes_ctl, sdctl); ++ ++ /* Bring MDIOREGS out of reset */ ++ sdctl = R_REG(ch->osh, &ch->regs->serdes_ctl); ++ sdctl |= (SC_RSTB_MDIOREGS_MASK); ++ W_REG(ch->osh, &ch->regs->serdes_ctl, sdctl); ++ ++ udelay(1000); ++ ++ /* Bring PLL out of reset */ ++ sdctl = R_REG(ch->osh, &ch->regs->serdes_ctl); ++ sdctl |= (SC_RSTB_PLL_MASK); ++ W_REG(ch->osh, &ch->regs->serdes_ctl, sdctl); ++ ++ udelay(1000); ++ ++ sdctl = R_REG(ch->osh, &ch->regs->serdes_ctl); ++ sdstat0 = R_REG(ch->osh, &ch->regs->serdes_status0); ++ sdstat1 = R_REG(ch->osh, &ch->regs->serdes_status1); ++ ++ return; ++} ++#endif /* defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2) || \ ++ defined(CONFIG_MACH_SB2) || defined(CONFIG_MACH_GH2) || defined(CONFIG_MACH_WH2) */ ++ ++static void ++gmac_miiconfig(ch_t *ch) ++{ ++ /* BCM53010 GMAC DevStatus register has different definition of "Interface Mode" ++ * Bit 12:8 "interface_mode" This field is programmed through IDM control bits [6:2] ++ * ++ * Bit 0 : SOURCE_SYNC_MODE_EN - If set, Rx line clock input will be used by Unimac for ++ * sampling data.If this is reset, PLL reference clock (Clock 250 or Clk 125 based ++ * on CLK_250_SEL) will be used as receive side line clock. ++ * Bit 1 : DEST_SYNC_MODE_EN - If this is reset, PLL reference clock input (Clock 250 or ++ * Clk 125 based on CLK_250_SEL) will be used as transmit line clock. ++ * If this is set, TX line clock input (from external switch/PHY) is used as ++ * transmit line clock. ++ * Bit 2 : TX_CLK_OUT_INVERT_EN - If set, this will invert the TX clock out of AMAC. ++ * Bit 3 : DIRECT_GMII_MODE - If direct gmii is set to 0, then only 25 MHz clock needs to ++ * be fed at 25MHz reference clock input, for both 10/100 Mbps speeds. ++ * Unimac will internally divide the clock to 2.5 MHz for 10 Mbps speed ++ * Bit 4 : CLK_250_SEL - When set, this selects 250Mhz reference clock input and hence ++ * Unimac line rate will be 2G. ++ * If reset, this selects 125MHz reference clock input. ++ */ ++ ++ if (ch->etc->forcespeed == ET_AUTO) { ++ if (ch->etc->deviceid == BCM56150_CHIP_ID) { ++ gmac_speed(ch, ET_100FULL); ++ } else { ++ gmac_speed(ch, ET_1000FULL); ++ } ++ } else { ++ gmac_speed(ch, ch->etc->forcespeed); ++ } ++} ++ ++#ifdef CONFIG_SERDES_ASYMMETRIC_MODE ++void ++gmac_serdes_asym_mode(etc_info_t *etcptrs[]) ++{ ++ etc_info_t *etc; ++ ++ etc = etcptrs[0]; ++ ++ /* initialize serdes */ ++ gmac_serdes_init(etc->ch); ++ serdes_reset_core(etc->unit, etc->int_phyaddr); ++ ++ /* initialize lane 0 */ ++ serdes_set_asym_mode(etc->unit, etc->int_phyaddr); ++ serdes_init(etc->unit, etc->int_phyaddr); ++ serdes_speeddpx_set(etc->unit, etc->int_phyaddr, etc->speed, etc->duplex); ++ /* initialize lane 1 */ ++ etc = etcptrs[1]; ++ if (etc->linkstate) { ++ serdes_set_asym_mode(etc->unit, etc->int_phyaddr); ++ serdes_init(etc->unit, etc->int_phyaddr); ++ serdes_speeddpx_set(etc->unit, etc->int_phyaddr, etc->speed, etc->duplex); ++ } ++ ++ /* ++ * The 4-lane serdes is shared between XLDK and SDK. XLDK has to ++ * initialize the 3rd lane (phy address 3) that is used by SDK. ++ */ ++ serdes_init(etc->unit, 3); ++ ++ /* start PLL */ ++ serdes_start_pll(etc->unit, 1); ++} ++#endif /* CONFIG_SERDES_ASYMMETRIC_MODE */ ++ ++ ++static void ++chipreset(ch_t *ch) ++{ ++ gmacregs_t *regs; ++ uint32 idx, sflags, flagbits = 0; ++ ++ ET_TRACE(("et%d: chipreset\n", ch->etc->unit)); ++ ++ regs = ch->regs; ++ ++ if (!si_iscoreup(ch->sih)) { ++ /* power on reset: reset the enet core */ ++ goto chipinreset; ++ } ++ ++ /* Reset other three GMAC cores if needed */ ++ for (idx = 0; idx < IPROC_NUM_GMACS; idx++) { ++ /* As northstar requirement, we have to reset all GAMCs before accessing them. ++ * et_probe() call pci_enable_device() for etx and do si_core_reset for GAMCx only. ++ * then the other three GAMC didn't reset. ++ * We do it here. ++ */ ++ si_setcore(ch->sih, GMAC_CORE_ID, idx); ++ if (!si_iscoreup(ch->sih)) { ++ ET_TRACE(("et%d: reset NorthStar GMAC[%d] core\n", ch->etc->unit, idx)); ++ si_core_reset(ch->sih, flagbits, 0); ++ } ++ } ++ si_setcore(ch->sih, GMAC_CORE_ID, 0); ++ ++ /* update software counters before resetting the chip */ ++ if (ch->mibgood) { ++ chipstatsupd(ch); ++ } ++ ++ /* reset the tx dma engines */ ++ for (idx = 0; idx < NUMTXQ; idx++) { ++ if (ch->di[idx]) { ++ ET_TRACE(("et%d: resetting tx dma%d\n", ch->etc->unit, idx)); ++ dma_txreset(ch->di[idx]); ++ } ++ } ++ ++ /* set gmac into loopback mode to ensure no rx traffic */ ++ gmac_loopback(ch, LOOPBACK_MODE_MAC); ++ OSL_DELAY(1); ++ ++ /* reset the rx dma engine */ ++ if (ch->di[RX_Q0]) { ++ ET_TRACE(("et%d: resetting rx dma\n", ch->etc->unit)); ++ dma_rxreset(ch->di[RX_Q0]); ++ } ++ ++ /* clear the multicast filter table */ ++ gmac_mf_cleanup(ch); ++ ++chipinreset: ++ sflags = si_core_sflags(ch->sih, 0, 0); ++ if (sflags & SISF_SW_ATTACHED) { ++ ET_TRACE(("et%d: internal switch attached\n", ch->etc->unit)); ++ flagbits = SICF_SWCLKE; ++ if (!ch->etc->robo) { ++ ET_TRACE(("et%d: reseting switch\n", ch->etc->unit)); ++ flagbits |= SICF_SWRST; ++ } ++ } ++ ++ /* Reset all GMAC cores */ ++ for (idx = 0; idx < IPROC_NUM_GMACS; idx++) { ++ /* As northstar requirement, we have to reset all GAMCs before accessing them. ++ * et_probe() call pci_enable_device() for etx and do si_core_reset for GAMCx only. ++ * then the other three GAMC didn't reset. ++ * We do it here. ++ */ ++ si_setcore(ch->sih, GMAC_CORE_ID, idx); ++ if (!si_iscoreup(ch->sih)) { ++ ET_TRACE(("et%d: reset NorthStar GMAC[%d] core\n", ch->etc->unit, idx)); ++ si_core_reset(ch->sih, flagbits, 0); ++ } ++ } ++ si_setcore(ch->sih, GMAC_CORE_ID, 0); ++ ++ if ((sflags & SISF_SW_ATTACHED) && (!ch->etc->robo)) { ++ ET_TRACE(("et%d: taking switch out of reset\n", ch->etc->unit)); ++ si_core_cflags(ch->sih, SICF_SWRST, 0); ++ } ++ ++ /* reset gmac */ ++ gmac_reset(ch); ++ ++ /* clear mib */ ++ gmac_clearmib(ch); ++ ch->mibgood = TRUE; ++ ++ /* set mdc_transition_en */ ++ OR_REG(ch->osh, ®s->phycontrol, PC_MTE); ++ ++ /* Read the devstatus to figure out the configuration mode of ++ * the interface. Set the speed to 100 if the switch interface ++ * is mii/rmii. ++ */ ++ gmac_miiconfig(ch); ++ ++ /* clear persistent sw intstatus */ ++ ch->intstatus = 0; ++} ++ ++/* ++ * Lookup a multicast address in the filter hash table. ++ */ ++static int ++gmac_mf_lkup(ch_t *ch, struct ether_addr *mcaddr) ++{ ++ mflist_t *ptr; ++ ++ /* find the multicast address */ ++ for (ptr = ch->mf.bucket[GMAC_MCADDR_HASH(mcaddr)]; ptr != NULL; ptr = ptr->next) { ++ if (!ETHER_MCADDR_CMP(&ptr->mc_addr, mcaddr)) { ++ return (SUCCESS); ++ } ++ } ++ ++ return (FAILURE); ++} ++ ++/* ++ * Add a multicast address to the filter hash table. ++ */ ++static int ++gmac_mf_add(ch_t *ch, struct ether_addr *mcaddr) ++{ ++ uint32 hash; ++ mflist_t *entry; ++#ifdef BCMDBG ++ char mac[ETHER_ADDR_STR_LEN]; ++#endif /* BCMDBG */ ++ ++ /* add multicast addresses only */ ++ if (!ETHER_ISMULTI(mcaddr)) { ++ ET_ERROR(("et%d: adding invalid multicast address %s\n", ++ ch->etc->unit, bcm_ether_ntoa(mcaddr, mac))); ++ return (FAILURE); ++ } ++ ++ /* discard duplicate add requests */ ++ if (gmac_mf_lkup(ch, mcaddr) == SUCCESS) { ++ ET_ERROR(("et%d: adding duplicate mcast filter entry\n", ch->etc->unit)); ++ return (FAILURE); ++ } ++ ++ /* allocate memory for list entry */ ++ entry = MALLOC(ch->osh, sizeof(mflist_t)); ++ if (entry == NULL) { ++ ET_ERROR(("et%d: out of memory allocating mcast filter entry\n", ch->etc->unit)); ++ return (FAILURE); ++ } ++ ++ /* add the entry to the hash bucket */ ++ ether_copy(mcaddr, &entry->mc_addr); ++ hash = GMAC_MCADDR_HASH(mcaddr); ++ entry->next = ch->mf.bucket[hash]; ++ ch->mf.bucket[hash] = entry; ++ ++ return (SUCCESS); ++} ++ ++/* ++ * Cleanup the multicast filter hash table. ++ */ ++static void ++gmac_mf_cleanup(ch_t *ch) ++{ ++ mflist_t *ptr, *tmp; ++ int32 idx; ++ ++ for (idx = 0; idx < GMAC_HASHT_SIZE; idx++) { ++ ptr = ch->mf.bucket[idx]; ++ while (ptr) { ++ tmp = ptr; ++ ptr = ptr->next; ++ MFREE(ch->osh, tmp, sizeof(mflist_t)); ++ } ++ ch->mf.bucket[idx] = NULL; ++ } ++} ++ ++/* ++ * Initialize all the chip registers. If dma mode, init tx and rx dma engines ++ * but leave the devcontrol tx and rx (fifos) disabled. ++ */ ++static void ++chipinit(ch_t *ch, uint options) ++{ ++ etc_info_t *etc; ++ gmacregs_t *regs; ++ uint idx; ++ ++ regs = ch->regs; ++ etc = ch->etc; ++ ++ ET_TRACE(("et%d: chipinit\n", etc->unit)); ++ ++ /* enable one rx interrupt per received frame */ ++ W_REG(ch->osh, ®s->intrecvlazy, (1 << IRL_FC_SHIFT)); ++ ++ /* enable 802.3x tx flow control (honor received PAUSE frames) */ ++ gmac_txflowcontrol(ch, TRUE); ++ ++ /* enable/disable promiscuous mode */ ++ gmac_promisc(ch, etc->promisc); ++ ++ /* set our local address */ ++ W_REG(ch->osh, ®s->macaddrhigh, ++ hton32(*(uint32 *)&etc->cur_etheraddr.octet[0])); ++ W_REG(ch->osh, ®s->macaddrlow, ++ hton16(*(uint16 *)&etc->cur_etheraddr.octet[4])); ++ ++ if (!etc->promisc) { ++ /* gmac doesn't have a cam, hence do the multicast address filtering ++ * in the software ++ */ ++ /* allmulti or a list of discrete multicast addresses */ ++ if (!etc->allmulti && etc->nmulticast) { ++ for (idx = 0; idx < etc->nmulticast; idx++) { ++ (void)gmac_mf_add(ch, &etc->multicast[idx]); ++ } ++ } ++ } ++ ++ /* optionally enable mac-level loopback */ ++ if (etc->loopbk) { ++ gmac_loopback(ch, LOOPBACK_MODE_MAC); ++ } else { ++ gmac_loopback(ch, LOOPBACK_MODE_NONE); ++ } ++ ++ /* set max frame lengths - account for possible vlan tag */ ++ W_REG(ch->osh, ®s->rxmaxlength, BCM_ETHER_MAX_LEN + 32); ++ ++ /* ++ * Optionally, disable phy autonegotiation and force our speed/duplex ++ * or constrain our advertised capabilities. ++ */ ++ if (etc->forcespeed != ET_AUTO) { ++ gmac_speed(ch, etc->forcespeed); ++ chipphyforce(ch, etc->phyaddr); ++ switch (etc->forcespeed) { ++ case ET_1000FULL: ++ etc->speed = 1000; ++ etc->duplex = 1; ++ break; ++ case ET_1000HALF: ++ etc->speed = 1000; ++ etc->duplex = 0; ++ break; ++ case ET_100FULL: ++ etc->speed = 100; ++ etc->duplex = 1; ++ break; ++ case ET_100HALF: ++ etc->speed = 100; ++ etc->duplex = 0; ++ break; ++ case ET_10FULL: ++ etc->speed = 10; ++ etc->duplex = 1; ++ break; ++ case ET_10HALF: ++ etc->speed = 10; ++ etc->duplex = 0; ++ break; ++ default: ++ break; ++ } ++ } else if (etc->advertise && etc->needautoneg) { ++ chipphyadvertise(ch, etc->phyaddr); ++ } ++ /* enable the overflow continue feature and disable parity */ ++ dma_ctrlflags(ch->di[0], DMA_CTRL_ROC | DMA_CTRL_PEN /* mask */, ++ DMA_CTRL_ROC /* value */); ++ ++ if (options & ET_INIT_FULL) { ++ /* initialize the tx and rx dma channels */ ++ for (idx = 0; idx < NUMTXQ; idx++) { ++ dma_txinit(ch->di[idx]); ++ } ++ dma_rxinit(ch->di[RX_Q0]); ++ ++ /* post dma receive buffers */ ++ dma_rxfill(ch->di[RX_Q0]); ++ ++ /* lastly, enable interrupts */ ++ if (options & ET_INIT_INTRON) { ++ et_intrson(etc->et); ++ } ++ } else { ++ dma_rxenable(ch->di[RX_Q0]); ++ } ++ ++ /* turn on the emac */ ++ gmac_enable(ch); ++} ++ ++/* dma transmit */ ++static bool BCMFASTPATH ++chiptx(ch_t *ch, void *p0) ++{ ++ int error, len; ++ uint32 q = TX_Q0; ++ ++ ET_TRACE(("et%d: chiptx\n", ch->etc->unit)); ++ ET_LOG("et%d: chiptx", ch->etc->unit, 0); ++ ++ len = PKTLEN(ch->osh, p0); ++ ++ /* check tx max length */ ++ if (len > (BCM_ETHER_MAX_LEN + 32)) { ++ ET_ERROR(("et%d: chiptx: max frame length exceeded\n", ++ ch->etc->unit)); ++ PKTFREE(ch->osh, p0, TRUE); ++ return FALSE; ++ } ++ ++ /* gmac rev 0 workaround: unimac can only transmit frames of ++ * length 17 bytes or greater. so pad the frame and send a ++ * 17 byte frame. to do the padding just modify the packet ++ * length that we provide to the dma. unimac does the extra ++ * padding * required to send 64 byte frames. ++ */ ++ if ((len < GMAC_MIN_FRAMESIZE) && (ch->etc->corerev == 0)) { ++ PKTSETLEN(ch->osh, p0, GMAC_MIN_FRAMESIZE); ++ } ++ ++ ASSERT(q < NUMTXQ); ++ ++ /* if tx completion intr is disabled then do the reclaim ++ * once every few frames transmitted. ++ */ ++ if ((ch->etc->txframes[q] & ch->etc->txrec_thresh) == 1) { ++ dma_txreclaim(ch->di[q], HNDDMA_RANGE_TRANSMITTED); ++ } ++ ++ error = dma_txfast(ch->di[q], p0, TRUE); ++ ++ if (error) { ++ ET_ERROR(("et%d: chiptx: out of txds\n", ch->etc->unit)); ++ ch->etc->txnobuf++; ++ return FALSE; ++ } ++ ++ ch->etc->txframes[q]++; ++ ++ if ((len < GMAC_MIN_FRAMESIZE) && (ch->etc->corerev == 0)) { ++ if (skb_is_nonlinear((struct sk_buff*)p0)) { ++ printk("Modified nonlinear skb (et_ctf_pipeline_loopback) - not calling skb_trim\n"); ++ } else { ++ /* set back the orig length */ ++ PKTSETLEN(ch->osh, p0, len); ++ } ++ } ++ ++ return TRUE; ++} ++ ++/* reclaim completed transmit descriptors and packets */ ++static void BCMFASTPATH ++chiptxreclaim(ch_t *ch, bool forceall) ++{ ++ int32 idx; ++ ++ ET_TRACE(("et%d: chiptxreclaim\n", ch->etc->unit)); ++ ++ for (idx = 0; idx < NUMTXQ; idx++) { ++ dma_txreclaim(ch->di[idx], forceall ? HNDDMA_RANGE_ALL : HNDDMA_RANGE_TRANSMITTED); ++ ch->intstatus &= ~(I_XI0 << idx); ++ } ++} ++ ++/* dma receive: returns a pointer to the next frame received, or NULL if there are no more */ ++static void * BCMFASTPATH ++chiprx(ch_t *ch) ++{ ++ void *p; ++ struct ether_addr *da; ++ ++ ET_TRACE(("et%d: chiprx\n", ch->etc->unit)); ++ ET_LOG("et%d: chiprx", ch->etc->unit, 0); ++ ++ if (dma_rxstopped(ch->di[RX_Q0])) { ++ ch->etc->rxdmastopped++; ++ } ++ ++ /* gmac doesn't have a cam to do address filtering. so we implement ++ * the multicast address filtering here. ++ */ ++ while ((p = dma_rx(ch->di[RX_Q0])) != NULL) { ++ /* check for overflow error packet */ ++ if (RXH_FLAGS(ch->etc, PKTDATA(ch->osh, p)) & GRXF_OVF) { ++ PKTFREE(ch->osh, p, FALSE); ++ ch->etc->rxoflodiscards++; ++ continue; ++ } ++ ++#ifdef GMAC_RATE_LIMITING ++ /* rate limiting */ ++ /* printf("et%d: chiprx RXH_PT(0x%x)\n", ch->etc->unit, RXH_PT(ch->etc, PKTDATA(ch->osh, p))); */ ++ if (RXH_PT(ch->etc, PKTDATA(ch->osh, p)) == 2) ++ ch->etc->rx_bc_frame_cnt++; ++ if (ch->etc->rl_stopping_broadcasts) { ++ /* check if broadcast packet */ ++ if (RXH_PT(ch->etc, PKTDATA(ch->osh, p)) == 2) { ++ /* broadcast packet */ ++ PKTFREE(ch->osh, p, FALSE); ++ ch->etc->rl_dropped_bc_packets++; ++ ch->etc->rl_dropped_packets++; ++ continue; ++ } ++ } else if (ch->etc->rl_stopping_all_packets) { ++ PKTFREE(ch->osh, p, FALSE); ++ ch->etc->rl_dropped_all_packets++; ++ ch->etc->rl_dropped_packets++; ++ continue; ++ } ++#endif /* GMAC_RATE_LIMITING */ ++ ++ if (ch->etc->allmulti) { ++ return (p); ++ } else { ++ /* skip the rx header */ ++ PKTPULL(ch->osh, p, HWRXOFF); ++ ++ /* do filtering only for multicast packets when allmulti is false */ ++ da = (struct ether_addr *)PKTDATA(ch->osh, p); ++ if (!ETHER_ISMULTI(da) || ++ (gmac_mf_lkup(ch, da) == SUCCESS) || ETHER_ISBCAST(da)) { ++ PKTPUSH(ch->osh, p, HWRXOFF); ++ return (p); ++ } ++ PKTFREE(ch->osh, p, FALSE); ++ } ++ } ++ ++ ch->intstatus &= ~I_RI; ++ ++ /* post more rx buffers since we consumed a few */ ++ dma_rxfill(ch->di[RX_Q0]); ++ ++ return (NULL); ++} ++ ++/* reclaim completed dma receive descriptors and packets */ ++static void ++chiprxreclaim(ch_t *ch) ++{ ++ ET_TRACE(("et%d: chiprxreclaim\n", ch->etc->unit)); ++ dma_rxreclaim(ch->di[RX_Q0]); ++ ch->intstatus &= ~I_RI; ++} ++ ++/* allocate and post dma receive buffers */ ++static void BCMFASTPATH ++chiprxfill(ch_t *ch) ++{ ++ ET_TRACE(("et%d: chiprxfill\n", ch->etc->unit)); ++ ET_LOG("et%d: chiprxfill", ch->etc->unit, 0); ++ dma_rxfill(ch->di[RX_Q0]); ++} ++ ++#ifdef DBG_CHECK_ERR ++/* get current and pending interrupt events */ ++static void ++check_errs(ch_t *ch) ++{ ++ static uint32 crserrs = 0; ++ uint32 err; ++ ++ /* read the interrupt status register */ ++ err = R_REG(ch->osh, &ch->regs->mib.tx_jabber_pkts); ++ if (err) { ++ printk("%s tx_jabber_pkts (0x%x)\n", __FUNCTION__, err); ++ } ++ ++ err = R_REG(ch->osh, &ch->regs->mib.tx_oversize_pkts); ++ if (err) { ++ printk("%s tx_oversize_pkts (0x%x)\n", __FUNCTION__, err); ++ } ++ ++ err = R_REG(ch->osh, &ch->regs->mib.tx_fragment_pkts); ++ if (err) { ++ printk("%s tx_fragment_pkts (0x%x)\n", __FUNCTION__, err); ++ } ++ ++ err = R_REG(ch->osh, &ch->regs->mib.tx_underruns); ++ if (err) { ++ printk("%s tx_underruns (0x%x)\n", __FUNCTION__, err); ++ } ++ ++ err = R_REG(ch->osh, &ch->regs->mib.tx_total_cols); ++ if (err) { ++ printk("%s tx_total_cols (0x%x)\n", __FUNCTION__, err); ++ } ++ ++ err = R_REG(ch->osh, &ch->regs->mib.tx_single_cols); ++ if (err) { ++ printk("%s tx_single_cols (0x%x)\n", __FUNCTION__, err); ++ } ++ ++ err = R_REG(ch->osh, &ch->regs->mib.tx_multiple_cols); ++ if (err) { ++ printk("%s tx_multiple_cols (0x%x)\n", __FUNCTION__, err); ++ } ++ ++ err = R_REG(ch->osh, &ch->regs->mib.tx_excessive_cols); ++ if (err) { ++ printk("%s tx_excessive_cols (0x%x)\n", __FUNCTION__, err); ++ } ++ ++ err = R_REG(ch->osh, &ch->regs->mib.tx_late_cols); ++ if (err) { ++ printk("%s tx_late_cols (0x%x)\n", __FUNCTION__, err); ++ } ++ ++ err = R_REG(ch->osh, &ch->regs->mib.tx_defered); ++ if (err) { ++ printk("%s tx_defered (0x%x)\n", __FUNCTION__, err); ++ } ++ ++ err = R_REG(ch->osh, &ch->regs->mib.tx_carrier_lost); ++ crserrs += err; ++ if (crserrs > 100) { ++ printk("%s tx_carrier_lost crserrs(0x%x)\n", __FUNCTION__, crserrs); ++ crserrs = 0; ++ } ++ ++ err = R_REG(ch->osh, &ch->regs->mib.tx_pause_pkts); ++ if (err) { ++ printk("%s tx_pause_pkts (0x%x)\n", __FUNCTION__, err); ++ } ++ ++ err = R_REG(ch->osh, &ch->regs->mib.rx_jabber_pkts); ++ if (err) { ++ printk("%s rx_jabber_pkts (0x%x)\n", __FUNCTION__, err); ++ } ++ ++ err = R_REG(ch->osh, &ch->regs->mib.rx_oversize_pkts); ++ if (err) { ++ printk("%s rx_oversize_pkts (0x%x)\n", __FUNCTION__, err); ++ } ++ ++ err = R_REG(ch->osh, &ch->regs->mib.rx_fragment_pkts); ++ if (err) { ++ printk("%s rx_fragment_pkts (0x%x)\n", __FUNCTION__, err); ++ } ++ ++ err = R_REG(ch->osh, &ch->regs->mib.rx_missed_pkts); ++ if (err) { ++ printk("%s rx_missed_pkts (0x%x)\n", __FUNCTION__, err); ++ } ++ ++ err = R_REG(ch->osh, &ch->regs->mib.rx_crc_align_errs); ++ if (err) { ++ printk("%s rx_crc_align_errs (0x%x)\n", __FUNCTION__, err); ++ } ++ ++ err = R_REG(ch->osh, &ch->regs->mib.rx_undersize); ++ if (err) { ++ printk("%s rx_undersize (0x%x)\n", __FUNCTION__, err); ++ } ++ ++ err = R_REG(ch->osh, &ch->regs->mib.rx_crc_errs); ++ if (err) { ++ printk("%s rx_crc_errs (0x%x)\n", __FUNCTION__, err); ++ } ++ ++ err = R_REG(ch->osh, &ch->regs->mib.rx_align_errs); ++ if (err) { ++ printk("%s rx_align_errs (0x%x)\n", __FUNCTION__, err); ++ } ++ ++ err = R_REG(ch->osh, &ch->regs->mib.rx_symbol_errs); ++ if (err) { ++ printk("%s rx_symbol_errs (0x%x)\n", __FUNCTION__, err); ++ } ++ ++ err = R_REG(ch->osh, &ch->regs->mib.rx_pause_pkts); ++ if (err) { ++ printk("%s rx_pause_pkts (0x%x)\n", __FUNCTION__, err); ++ } ++ ++ err = R_REG(ch->osh, &ch->regs->mib.rx_nonpause_pkts); ++ if (err) { ++ printk("%s rx_nonpause_pkts (0x%x)\n", __FUNCTION__, err); ++ } ++ ++ err = R_REG(ch->osh, &ch->regs->mib.rx_sachanges); ++ if (err) { ++ printk("%s rx_sachanges (0x%x)\n", __FUNCTION__, err); ++ } ++ ++ err = R_REG(ch->osh, &ch->regs->dmaregs[0].dmaxmt.status1); ++ if (err & 0xf0000000) { ++ printk("%s dma0 xmit status (0x%x)\n", __FUNCTION__, err); ++ } ++ ++ err = R_REG(ch->osh, &ch->regs->dmaregs[0].dmarcv.status1); ++ if (err & 0xf0000000) { ++ printk("%s dma0 rcv status (0x%x)\n", __FUNCTION__, err); ++ } ++ ++#if defined(CONFIG_MACH_HR2) ++ phy5221_chk_err(ch->etc->unit, ch->etc->phyaddr); ++#endif /* defined(CONFIG_MACH_HR2) */ ++} ++#endif /* DBG_CHECK_ERR */ ++ ++/* get current and pending interrupt events */ ++static int BCMFASTPATH ++chipgetintrevents(ch_t *ch, bool in_isr) ++{ ++ uint32 intstatus; ++ int events; ++ ++ events = 0; ++ ++ /* read the interrupt status register */ ++ intstatus = R_REG(ch->osh, &ch->regs->intstatus); ++ ++ /* defer unsolicited interrupts */ ++ intstatus &= (in_isr ? ch->intmask : ch->def_intmask); ++ ++ if (intstatus != 0) { ++ events = INTR_NEW; ++ } ++ ++ /* or new bits into persistent intstatus */ ++ intstatus = (ch->intstatus |= intstatus); ++ ++ /* return if no events */ ++ if (intstatus == 0) { ++ return (0); ++ } ++ ++#ifdef DBG_CHECK_ERR ++ check_errs(ch); ++#endif /* DBG_CHECK_ERR */ ++ ++ /* convert chip-specific intstatus bits into generic intr event bits */ ++ if (intstatus & I_RI) { ++ events |= INTR_RX; ++ } ++ if (intstatus & (I_XI0 | I_XI1 | I_XI2 | I_XI3)) { ++ events |= INTR_TX; ++ } ++ if (intstatus & I_ERRORS) { ++ events |= INTR_ERROR; ++ } ++ ++ return (events); ++} ++ ++/* enable chip interrupts */ ++static void BCMFASTPATH ++chipintrson(ch_t *ch) ++{ ++ ch->intmask = ch->def_intmask; ++ W_REG(ch->osh, &ch->regs->intmask, ch->intmask); ++} ++ ++/* disable chip interrupts */ ++static void BCMFASTPATH ++chipintrsoff(ch_t *ch) ++{ ++ /* disable further interrupts from gmac */ ++ W_REG(ch->osh, &ch->regs->intmask, 0); ++ (void) R_REG(ch->osh, &ch->regs->intmask); /* sync readback */ ++ ch->intmask = 0; ++ ++ /* clear the interrupt conditions */ ++ W_REG(ch->osh, &ch->regs->intstatus, ch->intstatus); ++} ++ ++/* return true of caller should re-initialize, otherwise false */ ++static bool BCMFASTPATH ++chiperrors(ch_t *ch) ++{ ++ uint32 intstatus; ++ etc_info_t *etc; ++ ++ etc = ch->etc; ++ ++ intstatus = ch->intstatus; ++ ch->intstatus &= ~(I_ERRORS); ++ ++ ET_TRACE(("et%d: chiperrors: intstatus 0x%x\n", etc->unit, intstatus)); ++ ++ if (intstatus & I_PDEE) { ++ ET_ERROR(("et%d: descriptor error\n", etc->unit)); ++ etc->dmade++; ++ } ++ ++ if (intstatus & I_PDE) { ++ ET_ERROR(("et%d: data error\n", etc->unit)); ++ etc->dmada++; ++ } ++ ++ if (intstatus & I_DE) { ++ ET_ERROR(("et%d: descriptor protocol error\n", etc->unit)); ++ etc->dmape++; ++ } ++ ++ if (intstatus & I_RDU) { ++ ET_ERROR(("et%d: receive descriptor underflow\n", etc->unit)); ++ etc->rxdmauflo++; ++ } ++ ++ if (intstatus & I_RFO) { ++ ET_TRACE(("et%d: receive fifo overflow\n", etc->unit)); ++ etc->rxoflo++; ++ } ++ ++ if (intstatus & I_XFU) { ++ ET_ERROR(("et%d: transmit fifo underflow\n", etc->unit)); ++ etc->txuflo++; ++ } ++ ++ /* if overflows or decriptors underflow, don't report it ++ * as an error and provoque a reset ++ */ ++ if (intstatus & ~(I_RDU | I_RFO) & I_ERRORS) { ++ return (TRUE); ++ } ++ ++ return (FALSE); ++} ++ ++static void ++chipstatsupd(ch_t *ch) ++{ ++ etc_info_t *etc; ++ gmacregs_t *regs; ++ volatile uint32 *s; ++ uint32 *d; ++ ++ etc = ch->etc; ++ regs = ch->regs; ++ ++ /* read the mib counters and update the driver maintained software ++ * counters. ++ */ ++ OR_REG(ch->osh, ®s->devcontrol, DC_MROR); ++ for (s = ®s->mib.tx_good_octets, d = &ch->mib.tx_good_octets; ++ s <= ®s->mib.rx_uni_pkts; s++, d++) { ++ *d += R_REG(ch->osh, s); ++ if (s == &ch->regs->mib.tx_q3_octets_high) { ++ s++; ++ d++; ++ } ++ } ++ ++ /* ++ * Aggregate transmit and receive errors that probably resulted ++ * in the loss of a frame are computed on the fly. ++ * ++ * We seem to get lots of tx_carrier_lost errors when flipping ++ * speed modes so don't count these as tx errors. ++ * ++ * Arbitrarily lump the non-specific dma errors as tx errors. ++ */ ++ etc->txerror = ch->mib.tx_jabber_pkts + ch->mib.tx_oversize_pkts ++ + ch->mib.tx_underruns + ch->mib.tx_excessive_cols ++ + ch->mib.tx_late_cols + etc->txnobuf + etc->dmade ++ + etc->dmada + etc->dmape + etc->txuflo; ++ etc->rxerror = ch->mib.rx_jabber_pkts + ch->mib.rx_oversize_pkts ++ + ch->mib.rx_missed_pkts + ch->mib.rx_crc_align_errs ++ + ch->mib.rx_undersize + ch->mib.rx_crc_errs ++ + ch->mib.rx_align_errs ++ + etc->rxnobuf + etc->rxdmauflo + etc->rxoflo + etc->rxbadlen; ++ etc->rxgiants = (ch->di[RX_Q0])->rxgiants; ++} ++ ++static void ++chipdumpmib(ch_t *ch, struct bcmstrbuf *b, bool clear) ++{ ++ gmacmib_t *m; ++ ++ m = &ch->mib; ++ ++ if (clear) { ++ bzero((char *)m, sizeof(gmacmib_t)); ++ return; ++ } ++ ++ bcm_bprintf(b, "tx_broadcast_pkts %d tx_multicast_pkts %d tx_jabber_pkts %d " ++ "tx_oversize_pkts %d\n", ++ m->tx_broadcast_pkts, m->tx_multicast_pkts, ++ m->tx_jabber_pkts, ++ m->tx_oversize_pkts); ++ bcm_bprintf(b, "tx_fragment_pkts %d tx_underruns %d\n", ++ m->tx_fragment_pkts, m->tx_underruns); ++ bcm_bprintf(b, "tx_total_cols %d tx_single_cols %d tx_multiple_cols %d " ++ "tx_excessive_cols %d\n", ++ m->tx_total_cols, m->tx_single_cols, m->tx_multiple_cols, ++ m->tx_excessive_cols); ++ bcm_bprintf(b, "tx_late_cols %d tx_defered %d tx_carrier_lost %d tx_pause_pkts %d\n", ++ m->tx_late_cols, m->tx_defered, m->tx_carrier_lost, ++ m->tx_pause_pkts); ++ ++ /* receive stat counters */ ++ /* hardware mib pkt and octet counters wrap too quickly to be useful */ ++ bcm_bprintf(b, "rx_broadcast_pkts %d rx_multicast_pkts %d rx_jabber_pkts %d " ++ "rx_oversize_pkts %d\n", ++ m->rx_broadcast_pkts, m->rx_multicast_pkts, ++ m->rx_jabber_pkts, m->rx_oversize_pkts); ++ bcm_bprintf(b, "rx_fragment_pkts %d rx_missed_pkts %d rx_crc_align_errs %d " ++ "rx_undersize %d\n", ++ m->rx_fragment_pkts, m->rx_missed_pkts, ++ m->rx_crc_align_errs, m->rx_undersize); ++ bcm_bprintf(b, "rx_crc_errs %d rx_align_errs %d rx_symbol_errs %d\n", ++ m->rx_crc_errs, m->rx_align_errs, m->rx_symbol_errs); ++ bcm_bprintf(b, "rx_pause_pkts %d rx_nonpause_pkts %d\n", ++ m->rx_pause_pkts, m->rx_nonpause_pkts); ++} ++ ++static void ++chipenablepme(ch_t *ch) ++{ ++ return; ++} ++ ++static void ++chipdisablepme(ch_t *ch) ++{ ++ return; ++} ++ ++static void ++chipduplexupd(ch_t *ch) ++{ ++ uint32 cmdcfg; ++ int32 duplex, speed; ++#if (defined(CONFIG_MACH_GH) || defined(CONFIG_MACH_HR3)) ++ uint32_t sdctl; ++#endif /* defined(CONFIG_MACH_GH) || defined(CONFIG_MACH_HR3) */ ++ ++ cmdcfg = R_REG(ch->osh, &ch->regs->cmdcfg); ++ ++ /* check if duplex mode changed */ ++ if (ch->etc->duplex && (cmdcfg & CC_HD)) { ++ duplex = 0; ++ } else if (!ch->etc->duplex && ((cmdcfg & CC_HD) == 0)) { ++ duplex = CC_HD; ++ } else { ++ duplex = -1; ++ } ++ ++ /* check if the speed changed */ ++ speed = ((cmdcfg & CC_ES_MASK) >> CC_ES_SHIFT); ++ if ((ch->etc->speed == 1000) && (speed != 2)) { ++ speed = 2; ++ } else if ((ch->etc->speed == 100) && (speed != 1)) { ++ speed = 1; ++ } else if ((ch->etc->speed == 10) && (speed != 0)) { ++ speed = 0; ++ } else { ++ speed = -1; ++ } ++ ++ /* no duplex or speed change required */ ++ if ((speed == -1) && (duplex == -1)) { ++ return; ++ } ++ ++ /* update the speed */ ++ if (speed != -1) { ++ cmdcfg &= ~CC_ES_MASK; ++ cmdcfg |= (speed << CC_ES_SHIFT); ++ } ++ ++ /* update the duplex mode */ ++ if (duplex != -1) { ++ cmdcfg &= ~CC_HD; ++ cmdcfg |= duplex; ++ } ++ ++#if defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2) || \ ++ defined(CONFIG_MACH_SB2) || defined(CONFIG_MACH_GH2) ++ cmdcfg |= (CC_AE | CC_OT | CC_OR); ++#endif /* defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2) || \ ++ defined(CONFIG_MACH_SB2) || defined(CONFIG_MACH_GH2) */ ++ ++ ET_TRACE(("chipduplexupd: updating speed & duplex %x\n", cmdcfg)); ++ ++ /* put mac in reset */ ++ gmac_init_reset(ch); ++ ++ W_REG(ch->osh, &ch->regs->cmdcfg, cmdcfg); ++ ++ /* bring mac out of reset */ ++ gmac_clear_reset(ch); ++ ++#if (defined(CONFIG_MACH_GH) || defined(CONFIG_MACH_HR3)) ++ sdctl = R_REG(ch->osh, &ch->regs->serdes_ctl); ++ sdctl &= ~(SC_FORCE_SPD_STRAP_MASK); ++ switch (ch->etc->speed) { ++ case 1000: ++ sdctl |= SC_FORCE_SPD_1G_VAL; ++ break; ++ case 100: ++ sdctl |= SC_FORCE_SPD_100M_VAL; ++ break; ++ default: ++ break; ++ } ++ W_REG(ch->osh, &ch->regs->serdes_ctl, sdctl); ++#endif /* (defined(CONFIG_MACH_GH) || defined(CONFIG_MACH_HR3)) */ ++} ++ ++#ifdef CONFIG_SERDES_ASYMMETRIC_MODE ++static void ++chipforcespddpx(ch_t *ch) ++{ ++ uint32 cmdcfg; ++ int32 duplex=0, speed; ++ ++ cmdcfg = R_REG(ch->osh, &ch->regs->cmdcfg); ++ ++ /* set duplex */ ++ if (!ch->etc->duplex) ++ duplex = CC_HD; ++ ++ /* set speed */ ++ if (ch->etc->speed == 10) { ++ speed = 0; ++ } else if (ch->etc->speed == 100) { ++ speed = 1; ++ } else { ++ speed = 2; ++ } ++ ++ /* update the speed */ ++ cmdcfg &= ~CC_ES_MASK; ++ cmdcfg |= (speed << CC_ES_SHIFT); ++ ++ /* update the duplex mode */ ++ cmdcfg &= ~CC_HD; ++ cmdcfg |= duplex; ++ ++ ET_TRACE(("chipforcespddpx: forcing speed & duplex %x\n", cmdcfg)); ++ ++ /* put mac in reset */ ++ gmac_init_reset(ch); ++ ++ W_REG(ch->osh, &ch->regs->cmdcfg, cmdcfg); ++ ++ /* bring mac out of reset */ ++ gmac_clear_reset(ch); ++ ++ if (ch->etc->up) { ++ serdes_speeddpx_set(ch->etc->unit, ch->etc->int_phyaddr, ch->etc->speed, ch->etc->duplex); ++ } ++} ++#endif /* CONFIG_SERDES_ASYMMETRIC_MODE */ ++ ++ ++static uint16 ++chipphyrd(ch_t *ch, uint phyaddr, uint reg) ++{ ++ uint16 val = 0; ++ uint32 addr = PHY_REG_ADDR(phyaddr); ++ ++#if (defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2) || defined(CONFIG_MACH_SB2)) ++ if (PHY_REG_BUS(phyaddr)) { /* Internal serdes */ ++ val = serdes_rd_reg(ch->etc->unit, addr, reg); ++ } else { ++ phy5461_rd_reg(ch->etc->unit, addr, ++ PHY_REG_FLAGS(phyaddr) ? SOC_PHY_REG_1000X : 0, ++ PHY_REG_BANK(phyaddr), reg, &val); ++ } ++#elif defined(CONFIG_MACH_HR2) ++ phy5221_rd_reg(ch->etc->unit, addr, PHY_REG_BANK(phyaddr), reg, &val); ++#elif defined(CONFIG_MACH_HR3) ++#if defined(CONFIG_MACH_WH2) ++ iproc_mii_read(MII_DEV_LOCAL, addr, reg, &val); ++#else ++ phy5481_rd_reg(ch->etc->unit, addr, PHY_REG_BANK(phyaddr), reg, &val); ++#endif ++#elif defined(CONFIG_MACH_GH) ++ phy5481_rd_reg(ch->etc->unit, addr, PHY_REG_BANK(phyaddr), reg, &val); ++#elif defined(CONFIG_MACH_GH2) ++ phy542xx_rd_reg(addr, PHY_REG_FLAGS(phyaddr), reg, &val); ++#endif /* defined(CONFIG_MACH_GH2) */ ++ ++ return val; ++} ++ ++static void ++chipphywr(ch_t *ch, uint phyaddr, uint reg, uint16 val) ++{ ++ uint32 addr = PHY_REG_ADDR(phyaddr); ++ ++#if (defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2) || defined(CONFIG_MACH_SB2)) ++ if (PHY_REG_BUS(phyaddr)) { /* Internal serdes */ ++ serdes_wr_reg(ch->etc->unit, addr, reg, val); ++ } else { ++ phy5461_wr_reg(ch->etc->unit, addr, ++ PHY_REG_FLAGS(phyaddr) ? SOC_PHY_REG_1000X : 0, ++ PHY_REG_BANK(phyaddr), reg, &val); ++ } ++#elif defined(CONFIG_MACH_HR2) ++ phy5221_wr_reg(ch->etc->unit, addr, PHY_REG_BANK(phyaddr), reg, &val); ++#elif defined(CONFIG_MACH_HR3) ++#if defined(CONFIG_MACH_WH2) ++ iproc_mii_write(MII_DEV_LOCAL, addr, reg, val); ++#else ++ phy5481_wr_reg(ch->etc->unit, addr, PHY_REG_BANK(phyaddr), reg, &val); ++#endif ++#elif defined(CONFIG_MACH_GH) ++ phy5481_wr_reg(ch->etc->unit, addr, PHY_REG_BANK(phyaddr), reg, &val); ++#elif defined(CONFIG_MACH_GH2) ++ phy542xx_wr_reg(addr, PHY_REG_FLAGS(phyaddr), reg, val); ++#endif /* defined(CONFIG_MACH_GH2) */ ++} ++ ++static void ++chipphyor(ch_t *ch, uint phyaddr, uint reg, uint16 val) ++{ ++ uint16 tmp; ++ ++ tmp = chipphyrd(ch, phyaddr, reg); ++ tmp |= val; ++ chipphywr(ch, phyaddr, reg, tmp); ++} ++ ++static void ++chipphyreset(ch_t *ch) ++{ ++ uint ext_phyaddr = ch->etc->phyaddr; ++ uint int_phyaddr = ch->etc->int_phyaddr; ++ ++ ASSERT(ext_phyaddr < MAXEPHY); ++ if (ext_phyaddr == EPHY_NOREG) { ++ return; ++ } ++ ++ ET_TRACE(("et%d: chipphyreset, ext_phyaddr: 0x%x, int_phyaddr: 0x%x\n", ++ ch->etc->unit, ext_phyaddr, int_phyaddr)); ++ ++ chipphywr(ch, ext_phyaddr, 0, CTL_RESET); ++ OSL_DELAY(100); ++ if (chipphyrd(ch, ext_phyaddr, 0) & CTL_RESET) { ++ ET_ERROR(("et%d: chipphyreset: reset not complete\n", ch->etc->unit)); ++ } ++ ++ /* Internal serdes reset */ ++ if (int_phyaddr < MAXEPHY) { ++#if (defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2) || defined(CONFIG_MACH_SB2)) ++ serdes_reset(ch->etc->unit, int_phyaddr); ++#elif defined(CONFIG_MACH_GH2) ++ sgmiiplus2_serdes_reset(ch->etc->unit, int_phyaddr); ++#elif defined(CONFIG_MACH_WH2) ++ if (select & 0x04) /* select Serdes (SGMII Plus2) path */ ++ sgmiiplus2_serdes_reset(ch->etc->unit, int_phyaddr); ++#endif ++ } ++ ++ chipphyinit(ch); ++} ++ ++static void ++chipphyinit(ch_t *ch) ++{ ++ uint ext_phyaddr = ch->etc->phyaddr; ++ uint int_phyaddr = ch->etc->int_phyaddr; ++ ++ ASSERT(ext_phyaddr < MAXEPHY); ++ if (ext_phyaddr == EPHY_NOREG) { ++ return; ++ } ++ ++ ET_TRACE(("et%d: chipphyinit, ext_phyaddr: 0x%x, int_phyaddr: 0x%x\n", ++ ch->etc->unit, ext_phyaddr, int_phyaddr)); ++ ++#if (defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2) || defined(CONFIG_MACH_SB2)) ++ phy5461_init(ch->etc->unit, ext_phyaddr); ++#elif defined(CONFIG_MACH_HR2) ++ phy5221_init(ch->etc->unit, ext_phyaddr); ++#elif defined(CONFIG_MACH_GH) ++ phy5481_init(ch->etc->unit, ext_phyaddr); ++#elif defined(CONFIG_MACH_HR3) ++ ++#if defined(CONFIG_MACH_WH2) ++ if ((select & 0x04) == 0x0) /* select egphy28 path */ ++ { ++ void __iomem *base; ++ base = of_iomap(of_find_compatible_node(NULL, NULL, IPROC_CMICD_COMPATIBLE), 0); ++ egphy28_init(base, ext_phyaddr); ++ } ++#else ++ phy5481_init(ch->etc->unit, ext_phyaddr); ++#endif ++ ++#elif defined(CONFIG_MACH_GH2) ++ phy542xx_init(ext_phyaddr); ++#endif /* defined(CONFIG_MACH_GH) || defined(CONFIG_MACH_HR3) */ ++ ++ /* Internal serdes reset */ ++ if (int_phyaddr < MAXEPHY) { ++#if (defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2) || defined(CONFIG_MACH_SB2)) ++ serdes_init(ch->etc->unit, int_phyaddr); ++#elif defined(CONFIG_MACH_GH2) ++ sgmiiplus2_serdes_init(ch->etc->unit, int_phyaddr); ++#elif defined(CONFIG_MACH_WH2) ++ if (select & 0x04) /* select Serdes (SGMII Plus2) path */ ++ { ++ sgmiiplus2_serdes_init(ch->etc->unit, int_phyaddr); ++ } ++#endif ++ } ++} ++ ++static void ++chipphyforce(ch_t *ch, uint phyaddr) ++{ ++ etc_info_t *etc; ++ uint16 ctl; ++#ifdef CONFIG_FORCED_MODE_AUTO_MDIX ++ uint16 adv; ++#endif ++ ASSERT(phyaddr < MAXEPHY); ++ ++ if (phyaddr == EPHY_NOREG) { ++ return; ++ } ++ ++ etc = ch->etc; ++ ++ if (etc->forcespeed == ET_AUTO) { ++ return; ++ } ++ ++ ET_TRACE(("et%d: chipphyforce: phyaddr %d speed %d\n", ++ ch->etc->unit, phyaddr, etc->forcespeed)); ++ ++ ctl = chipphyrd(ch, phyaddr, PHY_MII_CTRLr_ADDR); ++ ctl &= ~(CTL_SPEED | CTL_SPEED_MSB | CTL_ANENAB | CTL_DUPLEX); ++#ifdef CONFIG_FORCED_MODE_AUTO_MDIX ++ adv = chipphyrd(ch, phyaddr, PHY_MII_GB_CTRLr_ADDR); ++ adv &= ~(ADV_1000FULL | ADV_1000HALF); ++ chipphywr(ch, phyaddr, PHY_MII_GB_CTRLr_ADDR, adv); ++ ++ adv = chipphyrd(ch, phyaddr, PHY_MII_ANAr_ADDR); ++ adv &= ~(ADV_100FULL | ADV_100HALF | ADV_10FULL | ADV_10HALF); ++#endif ++ switch (etc->forcespeed) { ++ case ET_10HALF: ++#ifdef CONFIG_FORCED_MODE_AUTO_MDIX ++ adv |= ADV_10HALF; ++#endif ++ break; ++ ++ case ET_10FULL: ++ ctl |= CTL_DUPLEX; ++#ifdef CONFIG_FORCED_MODE_AUTO_MDIX ++ adv |= ADV_10FULL; ++#endif ++ break; ++ ++ case ET_100HALF: ++ ctl |= CTL_SPEED_100; ++#ifdef CONFIG_FORCED_MODE_AUTO_MDIX ++ adv |= ADV_100HALF; ++#endif ++ break; ++ ++ case ET_100FULL: ++ ctl |= (CTL_SPEED_100 | CTL_DUPLEX); ++#ifdef CONFIG_FORCED_MODE_AUTO_MDIX ++ adv |= ADV_100FULL; ++#endif ++ break; ++ ++ case ET_1000FULL: ++ ctl |= (CTL_SPEED_1000 | CTL_DUPLEX); ++ break; ++ } ++ ++ chipphywr(ch, phyaddr, PHY_MII_CTRLr_ADDR, ctl); ++#ifdef CONFIG_FORCED_MODE_AUTO_MDIX ++ chipphywr(ch, phyaddr, PHY_MII_ANAr_ADDR, adv); ++ if (etc->forcespeed != ET_1000FULL) { ++#if defined(CONFIG_MACH_GH2) ++ phy542xx_force_auto_mdix(phyaddr, 0); ++#else ++ adv = chipphyrd(ch, phyaddr | (PHY_MII_MISC_CTRLr_BANK << 8), PHY_MII_MISC_CTRLr_ADDR); ++ adv |= MII_FORCED_AUTO_MDIX; ++ chipphywr(ch, phyaddr | (PHY_MII_MISC_CTRLr_BANK << 8), PHY_MII_MISC_CTRLr_ADDR, adv); ++#endif /* defined(CONFIG_MACH_GH2) */ ++ } ++#endif ++} ++ ++/* set selected capability bits in autonegotiation advertisement */ ++static void ++chipphyadvertise(ch_t *ch, uint phyaddr) ++{ ++ etc_info_t *etc; ++ uint16 adv, adv2; ++ ++ ASSERT(phyaddr < MAXEPHY); ++ ++ if (phyaddr == EPHY_NOREG) { ++ return; ++ } ++ ++ etc = ch->etc; ++ ++ if ((etc->forcespeed != ET_AUTO) || !etc->needautoneg) { ++ return; ++ } ++ ++ ASSERT(etc->advertise); ++ ++ ET_TRACE(("et%d: chipphyadvertise: phyaddr %d advertise %x\n", ++ ch->etc->unit, phyaddr, etc->advertise)); ++ ++ /* reset our advertised capabilitity bits */ ++ adv = chipphyrd(ch, phyaddr, PHY_MII_ANAr_ADDR); ++ adv &= ~(ADV_100FULL | ADV_100HALF | ADV_10FULL | ADV_10HALF); ++ adv |= (etc->advertise | ADV_PAUSE); ++ chipphywr(ch, phyaddr, PHY_MII_ANAr_ADDR, adv); ++ ++ adv2 = chipphyrd(ch, phyaddr, PHY_MII_GB_CTRLr_ADDR); ++ adv2 &= ~(ADV_1000FULL | ADV_1000HALF); ++ adv2 |= etc->advertise2; ++ chipphywr(ch, phyaddr, PHY_MII_GB_CTRLr_ADDR, adv2); ++ ++ ET_TRACE(("et%d: chipphyadvertise: phyaddr %d adv %x adv2 %x phyad0 %x\n", ++ ch->etc->unit, phyaddr, adv, adv2, chipphyrd(ch, phyaddr, 0))); ++#ifdef CONFIG_FORCED_MODE_AUTO_MDIX ++#if defined(CONFIG_MACH_GH2) ++ phy542xx_force_auto_mdix(phyaddr, 0); ++#else ++ adv = chipphyrd(ch, phyaddr | (PHY_MII_MISC_CTRLr_BANK << 8), PHY_MII_MISC_CTRLr_ADDR); ++ if (adv & MII_FORCED_AUTO_MDIX) { ++ adv &= ~MII_FORCED_AUTO_MDIX; ++ chipphywr(ch, phyaddr | (PHY_MII_MISC_CTRLr_BANK << 8), PHY_MII_MISC_CTRLr_ADDR, adv); ++ } ++#endif /* defined(CONFIG_MACH_GH2) */ ++#endif ++ /* restart autonegotiation */ ++ chipphyor(ch, phyaddr, PHY_MII_CTRLr_ADDR, CTL_RESTART); ++ etc->needautoneg = FALSE; ++} ++ ++static void ++chipphyenable(ch_t *ch, uint eth_num, uint phyaddr, int enable) ++{ ++#if (defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2) || defined(CONFIG_MACH_SB2)) ++ phy5461_enable_set(eth_num, phyaddr, enable); ++#elif defined(CONFIG_MACH_HR2) ++ phy5221_enable_set(eth_num, phyaddr, enable); ++#elif defined(CONFIG_MACH_GH) ++ phy5481_enable_set(eth_num, phyaddr, enable); ++#elif defined(CONFIG_MACH_HR3) ++#if defined(CONFIG_MACH_WH2) ++ if ((select & 0x04) == 0x0) /* Select EGPHY28 path */ ++ egphy28_enable_set(phyaddr, enable); ++#else ++ phy5481_enable_set(eth_num, phyaddr, enable); ++#endif ++#elif defined(CONFIG_MACH_GH2) ++ phy542xx_enable_set(phyaddr, enable); ++#endif /* defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2) || defined(CONFIG_MACH_SB2) */ ++} ++ ++#ifdef GMAC_RATE_LIMITING ++void ++etc_check_rate_limiting(etc_info_t *etc, void *pch) ++{ ++ /*ch_t *ch = (ch_t*)pch;*/ ++ uint32 timediff, unit; ++ int bc_cnt_diff; ++ static uint32 first_run[2]={1,1}; ++ static uint32 prev_bc_frame_cnt[2]={0,0}, bc_frame_cnt[2]={0,0}; ++ ++ unit = etc->unit; ++ if (first_run[unit]) { ++ bc_frame_cnt[unit] = etc->rx_bc_frame_cnt; ++ prev_bc_frame_cnt[unit] = bc_frame_cnt[unit]; ++ first_run[unit] = 0; ++ } ++ else { ++ bc_cnt_diff = etc->rx_bc_frame_cnt - prev_bc_frame_cnt[unit]; ++ if (bc_cnt_diff >= 0) ++ bc_frame_cnt[unit] += bc_cnt_diff; ++ else ++ bc_frame_cnt[unit] += (bc_cnt_diff + 0xffffffff + 0x1); ++ prev_bc_frame_cnt[unit] = etc->rx_bc_frame_cnt; ++ } ++ ++ timediff = ((long)jiffies - (long)(etc->rl_prior_jiffies)); ++ if ((timediff >> 5) != 0) { ++ /* ++ * 32 or more jiffies have gone by, See if ++ * we're seeing too many broadcast packets. ++ */ ++ if ((timediff >> 5) == 1) { ++ /* 32-63 jiffies elapsed */ ++ if (((bc_frame_cnt[unit] >> 10) != 0) && ++ !(etc->rl_stopping_broadcasts)) { ++ /* 1K or more broadcast packets have arrived in ++ * 32-63 jiffies; try to throttle back the ++ * incoming packets ++ */ ++ etc->rl_stopping_broadcasts = 1; ++ printk("et%d: %s: stopping broadcasts bc_frame_cnts(0x%x)\n", ++ etc->unit, __FUNCTION__, bc_frame_cnt[unit]); ++ if (!timer_pending(&etc->rl_timer)) { ++ etc->rl_timer.expires = jiffies + HZ; ++ add_timer(&etc->rl_timer); ++ etc->rl_set=TRUE; ++ } ++ } ++ } ++ etc->rl_prior_jiffies = jiffies; ++ bc_frame_cnt[unit] = 0; ++ } ++} ++#endif /* GMAC_RATE_LIMITING */ ++ ++ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/et/sys/etcgmac.h b/drivers/net/ethernet/broadcom/gmac/src/et/sys/etcgmac.h +--- a/drivers/net/ethernet/broadcom/gmac/src/et/sys/etcgmac.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/et/sys/etcgmac.h 2017-11-09 17:53:43.905290000 +0800 +@@ -0,0 +1,65 @@ ++/* ++ * $Copyright Open Broadcom Corporation$ ++ * ++ * Broadcom Gigabit Ethernet MAC defines. ++ * ++ * $Id: etcgmac.h 267700 2011-06-19 15:41:07Z sudhirbs $ ++ */ ++ ++#ifndef _ETCGMAC_H_ ++#define _ETCGMAC_H_ ++ ++#if (defined(CONFIG_MACH_HR2) || defined(CONFIG_MACH_GH) || defined(CONFIG_MACH_HR3)) ++#define IPROC_NUM_GMACS 1 ++#else ++#define IPROC_NUM_GMACS 2 ++#endif ++ ++/* chip interrupt bit error summary */ ++#define I_ERRORS (I_PDEE | I_PDE | I_DE | I_RDU | I_RFO | I_XFU) ++#define DEF_INTMASK (I_XI0 | I_XI1 | I_XI2 | I_XI3 | I_RI | I_ERRORS) ++ ++#define GMAC_RESET_DELAY 2 ++ ++#define GMAC_MIN_FRAMESIZE 17 /* gmac can only send frames of ++ * size above 17 octetes. ++ */ ++ ++#define LOOPBACK_MODE_DMA 0 /* loopback the packet at the DMA engine */ ++#define LOOPBACK_MODE_MAC 1 /* loopback the packet at MAC */ ++#define LOOPBACK_MODE_NONE 2 /* no Loopback */ ++ ++#define FA2_GMAC_MAX_LEN 2048 ++ ++#define DMAREG(ch, dir, qnum) ((dir == DMA_TX) ? \ ++ (void *)(uintptr)&(ch->regs->dmaregs[qnum].dmaxmt) : \ ++ (void *)(uintptr)&(ch->regs->dmaregs[qnum].dmarcv)) ++ ++/* ++ * Add multicast address to the list. Multicast address are maintained as ++ * hash table with chaining. ++ */ ++typedef struct mclist { ++ struct ether_addr mc_addr; /* multicast address to allow */ ++ struct mclist *next; /* next entry */ ++} mflist_t; ++ ++#define GMAC_HASHT_SIZE 16 /* hash table size */ ++#define GMAC_MCADDR_HASH(m) ((((uint8 *)(m))[3] + ((uint8 *)(m))[4] + \ ++ ((uint8 *)(m))[5]) & (GMAC_HASHT_SIZE - 1)) ++ ++#define ETHER_MCADDR_CMP(x, y) ((((uint16 *)(x))[0] ^ ((uint16 *)(y))[0]) | \ ++ (((uint16 *)(x))[1] ^ ((uint16 *)(y))[1]) | \ ++ (((uint16 *)(x))[2] ^ ((uint16 *)(y))[2])) ++ ++#define SUCCESS 0 ++#define FAILURE -1 ++ ++typedef struct mcfilter { ++ /* hash table for multicast filtering */ ++ mflist_t *bucket[GMAC_HASHT_SIZE]; ++} mcfilter_t; ++ ++extern uint32 find_priq(uint32 pri_map); ++ ++#endif /* _ETCGMAC_H_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/Makefile b/drivers/net/ethernet/broadcom/gmac/src/include/Makefile +--- a/drivers/net/ethernet/broadcom/gmac/src/include/Makefile 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/Makefile 2017-11-09 17:53:43.912294000 +0800 +@@ -0,0 +1,62 @@ ++# This script serves following purpose: ++# ++# 1. It generates native version information by querying ++# automerger maintained database to see where src/include ++# came from ++# 2. For select components, as listed in compvers.sh ++# it generates component version files ++# ++# Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++# ++# Permission to use, copy, modify, and/or distribute this software for any ++# purpose with or without fee is hereby granted, provided that the above ++# copyright notice and this permission notice appear in all copies. ++# ++# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++# OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++# ++# ++ ++SRCBASE := .. ++ ++TARGETS := epivers.h ++ ++ifdef VERBOSE ++export VERBOSE ++endif ++ ++all release: epivers compvers ++ ++# Generate epivers.h for native branch version ++epivers: ++ bash epivers.sh ++ ++# Generate epivers.h for native branch version ++compvers: ++ @if [ -s "compvers.sh" ]; then \ ++ echo "Generating component versions, if any"; \ ++ bash compvers.sh; \ ++ else \ ++ echo "Skipping component version generation"; \ ++ fi ++ ++# Generate epivers.h for native branch version ++clean_compvers: ++ @if [ -s "compvers.sh" ]; then \ ++ echo "bash compvers.sh clean"; \ ++ bash compvers.sh clean; \ ++ else \ ++ echo "Skipping component version clean"; \ ++ fi ++ ++clean: ++ rm -f $(TARGETS) *.prev ++ ++clean_all: clean clean_compvers ++ ++.PHONY: all release clean epivers compvers clean_compvers +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/aidmp.h b/drivers/net/ethernet/broadcom/gmac/src/include/aidmp.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/aidmp.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/aidmp.h 2017-11-09 17:53:43.913291000 +0800 +@@ -0,0 +1,383 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Broadcom AMBA Interconnect definitions. ++ * ++ * $Id: aidmp.h 241182 2011-02-17 21:50:03Z $ ++ */ ++ ++#ifndef _AIDMP_H ++#define _AIDMP_H ++ ++/* Manufacturer Ids */ ++#define MFGID_ARM 0x43b ++#define MFGID_BRCM 0x4bf ++#define MFGID_MIPS 0x4a7 ++ ++/* Component Classes */ ++#define CC_SIM 0 ++#define CC_EROM 1 ++#define CC_CORESIGHT 9 ++#define CC_VERIF 0xb ++#define CC_OPTIMO 0xd ++#define CC_GEN 0xe ++#define CC_PRIMECELL 0xf ++ ++/* Enumeration ROM registers */ ++#define ER_EROMENTRY 0x000 ++#define ER_REMAPCONTROL 0xe00 ++#define ER_REMAPSELECT 0xe04 ++#define ER_MASTERSELECT 0xe10 ++#define ER_ITCR 0xf00 ++#define ER_ITIP 0xf04 ++ ++/* Erom entries */ ++#define ER_TAG 0xe ++#define ER_TAG1 0x6 ++#define ER_VALID 1 ++#define ER_CI 0 ++#define ER_MP 2 ++#define ER_ADD 4 ++#define ER_END 0xe ++#define ER_BAD 0xffffffff ++ ++/* EROM CompIdentA */ ++#define CIA_MFG_MASK 0xfff00000 ++#define CIA_MFG_SHIFT 20 ++#define CIA_CID_MASK 0x000fff00 ++#define CIA_CID_SHIFT 8 ++#define CIA_CCL_MASK 0x000000f0 ++#define CIA_CCL_SHIFT 4 ++ ++/* EROM CompIdentB */ ++#define CIB_REV_MASK 0xff000000 ++#define CIB_REV_SHIFT 24 ++#define CIB_NSW_MASK 0x00f80000 ++#define CIB_NSW_SHIFT 19 ++#define CIB_NMW_MASK 0x0007c000 ++#define CIB_NMW_SHIFT 14 ++#define CIB_NSP_MASK 0x00003e00 ++#define CIB_NSP_SHIFT 9 ++#define CIB_NMP_MASK 0x000001f0 ++#define CIB_NMP_SHIFT 4 ++ ++/* EROM MasterPortDesc */ ++#define MPD_MUI_MASK 0x0000ff00 ++#define MPD_MUI_SHIFT 8 ++#define MPD_MP_MASK 0x000000f0 ++#define MPD_MP_SHIFT 4 ++ ++/* EROM AddrDesc */ ++#define AD_ADDR_MASK 0xfffff000 ++#define AD_SP_MASK 0x00000f00 ++#define AD_SP_SHIFT 8 ++#define AD_ST_MASK 0x000000c0 ++#define AD_ST_SHIFT 6 ++#define AD_ST_SLAVE 0x00000000 ++#define AD_ST_BRIDGE 0x00000040 ++#define AD_ST_SWRAP 0x00000080 ++#define AD_ST_MWRAP 0x000000c0 ++#define AD_SZ_MASK 0x00000030 ++#define AD_SZ_SHIFT 4 ++#define AD_SZ_4K 0x00000000 ++#define AD_SZ_8K 0x00000010 ++#define AD_SZ_16K 0x00000020 ++#define AD_SZ_SZD 0x00000030 ++#define AD_AG32 0x00000008 ++#define AD_ADDR_ALIGN 0x00000fff ++#define AD_SZ_BASE 0x00001000 /* 4KB */ ++ ++/* EROM SizeDesc */ ++#define SD_SZ_MASK 0xfffff000 ++#define SD_SG32 0x00000008 ++#define SD_SZ_ALIGN 0x00000fff ++ ++ ++#ifndef _LANGUAGE_ASSEMBLY ++ ++typedef volatile struct _aidmp { ++ uint32 oobselina30; /* 0x000 */ ++ uint32 oobselina74; /* 0x004 */ ++ uint32 PAD[6]; ++ uint32 oobselinb30; /* 0x020 */ ++ uint32 oobselinb74; /* 0x024 */ ++ uint32 PAD[6]; ++ uint32 oobselinc30; /* 0x040 */ ++ uint32 oobselinc74; /* 0x044 */ ++ uint32 PAD[6]; ++ uint32 oobselind30; /* 0x060 */ ++ uint32 oobselind74; /* 0x064 */ ++ uint32 PAD[38]; ++ uint32 oobselouta30; /* 0x100 */ ++ uint32 oobselouta74; /* 0x104 */ ++ uint32 PAD[6]; ++ uint32 oobseloutb30; /* 0x120 */ ++ uint32 oobseloutb74; /* 0x124 */ ++ uint32 PAD[6]; ++ uint32 oobseloutc30; /* 0x140 */ ++ uint32 oobseloutc74; /* 0x144 */ ++ uint32 PAD[6]; ++ uint32 oobseloutd30; /* 0x160 */ ++ uint32 oobseloutd74; /* 0x164 */ ++ uint32 PAD[38]; ++ uint32 oobsynca; /* 0x200 */ ++ uint32 oobseloutaen; /* 0x204 */ ++ uint32 PAD[6]; ++ uint32 oobsyncb; /* 0x220 */ ++ uint32 oobseloutben; /* 0x224 */ ++ uint32 PAD[6]; ++ uint32 oobsyncc; /* 0x240 */ ++ uint32 oobseloutcen; /* 0x244 */ ++ uint32 PAD[6]; ++ uint32 oobsyncd; /* 0x260 */ ++ uint32 oobseloutden; /* 0x264 */ ++ uint32 PAD[38]; ++ uint32 oobaextwidth; /* 0x300 */ ++ uint32 oobainwidth; /* 0x304 */ ++ uint32 oobaoutwidth; /* 0x308 */ ++ uint32 PAD[5]; ++ uint32 oobbextwidth; /* 0x320 */ ++ uint32 oobbinwidth; /* 0x324 */ ++ uint32 oobboutwidth; /* 0x328 */ ++ uint32 PAD[5]; ++ uint32 oobcextwidth; /* 0x340 */ ++ uint32 oobcinwidth; /* 0x344 */ ++ uint32 oobcoutwidth; /* 0x348 */ ++ uint32 PAD[5]; ++ uint32 oobdextwidth; /* 0x360 */ ++ uint32 oobdinwidth; /* 0x364 */ ++ uint32 oobdoutwidth; /* 0x368 */ ++ uint32 PAD[37]; ++ uint32 ioctrlset; /* 0x400 */ ++ uint32 ioctrlclear; /* 0x404 */ ++ uint32 ioctrl; /* 0x408 */ ++ uint32 PAD[61]; ++ uint32 iostatus; /* 0x500 */ ++ uint32 PAD[127]; ++ uint32 ioctrlwidth; /* 0x700 */ ++ uint32 iostatuswidth; /* 0x704 */ ++ uint32 PAD[62]; ++ uint32 resetctrl; /* 0x800 */ ++ uint32 resetstatus; /* 0x804 */ ++ uint32 resetreadid; /* 0x808 */ ++ uint32 resetwriteid; /* 0x80c */ ++ uint32 PAD[60]; ++ uint32 errlogctrl; /* 0x900 */ ++ uint32 errlogdone; /* 0x904 */ ++ uint32 errlogstatus; /* 0x908 */ ++ uint32 errlogaddrlo; /* 0x90c */ ++ uint32 errlogaddrhi; /* 0x910 */ ++ uint32 errlogid; /* 0x914 */ ++ uint32 errloguser; /* 0x918 */ ++ uint32 errlogflags; /* 0x91c */ ++ uint32 PAD[56]; ++ uint32 intstatus; /* 0xa00 */ ++ uint32 PAD[255]; ++ uint32 config; /* 0xe00 */ ++ uint32 PAD[63]; ++ uint32 itcr; /* 0xf00 */ ++ uint32 PAD[3]; ++ uint32 itipooba; /* 0xf10 */ ++ uint32 itipoobb; /* 0xf14 */ ++ uint32 itipoobc; /* 0xf18 */ ++ uint32 itipoobd; /* 0xf1c */ ++ uint32 PAD[4]; ++ uint32 itipoobaout; /* 0xf30 */ ++ uint32 itipoobbout; /* 0xf34 */ ++ uint32 itipoobcout; /* 0xf38 */ ++ uint32 itipoobdout; /* 0xf3c */ ++ uint32 PAD[4]; ++ uint32 itopooba; /* 0xf50 */ ++ uint32 itopoobb; /* 0xf54 */ ++ uint32 itopoobc; /* 0xf58 */ ++ uint32 itopoobd; /* 0xf5c */ ++ uint32 PAD[4]; ++ uint32 itopoobain; /* 0xf70 */ ++ uint32 itopoobbin; /* 0xf74 */ ++ uint32 itopoobcin; /* 0xf78 */ ++ uint32 itopoobdin; /* 0xf7c */ ++ uint32 PAD[4]; ++ uint32 itopreset; /* 0xf90 */ ++ uint32 PAD[15]; ++ uint32 peripherialid4; /* 0xfd0 */ ++ uint32 peripherialid5; /* 0xfd4 */ ++ uint32 peripherialid6; /* 0xfd8 */ ++ uint32 peripherialid7; /* 0xfdc */ ++ uint32 peripherialid0; /* 0xfe0 */ ++ uint32 peripherialid1; /* 0xfe4 */ ++ uint32 peripherialid2; /* 0xfe8 */ ++ uint32 peripherialid3; /* 0xfec */ ++ uint32 componentid0; /* 0xff0 */ ++ uint32 componentid1; /* 0xff4 */ ++ uint32 componentid2; /* 0xff8 */ ++ uint32 componentid3; /* 0xffc */ ++} aidmp_t; ++ ++#endif /* _LANGUAGE_ASSEMBLY */ ++ ++/* Out-of-band Router registers */ ++#define OOB_BUSCONFIG 0x020 ++#define OOB_STATUSA 0x100 ++#define OOB_STATUSB 0x104 ++#define OOB_STATUSC 0x108 ++#define OOB_STATUSD 0x10c ++#define OOB_ENABLEA0 0x200 ++#define OOB_ENABLEA1 0x204 ++#define OOB_ENABLEA2 0x208 ++#define OOB_ENABLEA3 0x20c ++#define OOB_ENABLEB0 0x280 ++#define OOB_ENABLEB1 0x284 ++#define OOB_ENABLEB2 0x288 ++#define OOB_ENABLEB3 0x28c ++#define OOB_ENABLEC0 0x300 ++#define OOB_ENABLEC1 0x304 ++#define OOB_ENABLEC2 0x308 ++#define OOB_ENABLEC3 0x30c ++#define OOB_ENABLED0 0x380 ++#define OOB_ENABLED1 0x384 ++#define OOB_ENABLED2 0x388 ++#define OOB_ENABLED3 0x38c ++#define OOB_ITCR 0xf00 ++#define OOB_ITIPOOBA 0xf10 ++#define OOB_ITIPOOBB 0xf14 ++#define OOB_ITIPOOBC 0xf18 ++#define OOB_ITIPOOBD 0xf1c ++#define OOB_ITOPOOBA 0xf30 ++#define OOB_ITOPOOBB 0xf34 ++#define OOB_ITOPOOBC 0xf38 ++#define OOB_ITOPOOBD 0xf3c ++ ++/* DMP wrapper registers */ ++#define AI_OOBSELINA30 0x000 ++#define AI_OOBSELINA74 0x004 ++#define AI_OOBSELINB30 0x020 ++#define AI_OOBSELINB74 0x024 ++#define AI_OOBSELINC30 0x040 ++#define AI_OOBSELINC74 0x044 ++#define AI_OOBSELIND30 0x060 ++#define AI_OOBSELIND74 0x064 ++#define AI_OOBSELOUTA30 0x100 ++#define AI_OOBSELOUTA74 0x104 ++#define AI_OOBSELOUTB30 0x120 ++#define AI_OOBSELOUTB74 0x124 ++#define AI_OOBSELOUTC30 0x140 ++#define AI_OOBSELOUTC74 0x144 ++#define AI_OOBSELOUTD30 0x160 ++#define AI_OOBSELOUTD74 0x164 ++#define AI_OOBSYNCA 0x200 ++#define AI_OOBSELOUTAEN 0x204 ++#define AI_OOBSYNCB 0x220 ++#define AI_OOBSELOUTBEN 0x224 ++#define AI_OOBSYNCC 0x240 ++#define AI_OOBSELOUTCEN 0x244 ++#define AI_OOBSYNCD 0x260 ++#define AI_OOBSELOUTDEN 0x264 ++#define AI_OOBAEXTWIDTH 0x300 ++#define AI_OOBAINWIDTH 0x304 ++#define AI_OOBAOUTWIDTH 0x308 ++#define AI_OOBBEXTWIDTH 0x320 ++#define AI_OOBBINWIDTH 0x324 ++#define AI_OOBBOUTWIDTH 0x328 ++#define AI_OOBCEXTWIDTH 0x340 ++#define AI_OOBCINWIDTH 0x344 ++#define AI_OOBCOUTWIDTH 0x348 ++#define AI_OOBDEXTWIDTH 0x360 ++#define AI_OOBDINWIDTH 0x364 ++#define AI_OOBDOUTWIDTH 0x368 ++ ++#if defined(IL_BIGENDIAN) && defined(BCMHND74K) ++/* Selective swapped defines for those registers we need in ++ * big-endian code. ++ */ ++#define AI_IOCTRLSET 0x404 ++#define AI_IOCTRLCLEAR 0x400 ++#define AI_IOCTRL 0x40c ++#define AI_IOSTATUS 0x504 ++#define AI_RESETCTRL 0x804 ++#define AI_RESETSTATUS 0x800 ++ ++#else /* !IL_BIGENDIAN || !BCMHND74K */ ++ ++#define AI_IOCTRLSET 0x400 ++#define AI_IOCTRLCLEAR 0x404 ++#define AI_IOCTRL 0x408 ++#define AI_IOSTATUS 0x500 ++#define AI_RESETCTRL 0x800 ++#define AI_RESETSTATUS 0x804 ++ ++#endif /* IL_BIGENDIAN && BCMHND74K */ ++ ++#define AI_IOCTRLWIDTH 0x700 ++#define AI_IOSTATUSWIDTH 0x704 ++ ++#define AI_RESETREADID 0x808 ++#define AI_RESETWRITEID 0x80c ++#define AI_ERRLOGCTRL 0xa00 ++#define AI_ERRLOGDONE 0xa04 ++#define AI_ERRLOGSTATUS 0xa08 ++#define AI_ERRLOGADDRLO 0xa0c ++#define AI_ERRLOGADDRHI 0xa10 ++#define AI_ERRLOGID 0xa14 ++#define AI_ERRLOGUSER 0xa18 ++#define AI_ERRLOGFLAGS 0xa1c ++#define AI_INTSTATUS 0xa00 ++#define AI_CONFIG 0xe00 ++#define AI_ITCR 0xf00 ++#define AI_ITIPOOBA 0xf10 ++#define AI_ITIPOOBB 0xf14 ++#define AI_ITIPOOBC 0xf18 ++#define AI_ITIPOOBD 0xf1c ++#define AI_ITIPOOBAOUT 0xf30 ++#define AI_ITIPOOBBOUT 0xf34 ++#define AI_ITIPOOBCOUT 0xf38 ++#define AI_ITIPOOBDOUT 0xf3c ++#define AI_ITOPOOBA 0xf50 ++#define AI_ITOPOOBB 0xf54 ++#define AI_ITOPOOBC 0xf58 ++#define AI_ITOPOOBD 0xf5c ++#define AI_ITOPOOBAIN 0xf70 ++#define AI_ITOPOOBBIN 0xf74 ++#define AI_ITOPOOBCIN 0xf78 ++#define AI_ITOPOOBDIN 0xf7c ++#define AI_ITOPRESET 0xf90 ++#define AI_PERIPHERIALID4 0xfd0 ++#define AI_PERIPHERIALID5 0xfd4 ++#define AI_PERIPHERIALID6 0xfd8 ++#define AI_PERIPHERIALID7 0xfdc ++#define AI_PERIPHERIALID0 0xfe0 ++#define AI_PERIPHERIALID1 0xfe4 ++#define AI_PERIPHERIALID2 0xfe8 ++#define AI_PERIPHERIALID3 0xfec ++#define AI_COMPONENTID0 0xff0 ++#define AI_COMPONENTID1 0xff4 ++#define AI_COMPONENTID2 0xff8 ++#define AI_COMPONENTID3 0xffc ++ ++/* resetctrl */ ++#define AIRC_RESET 1 ++ ++/* config */ ++#define AICFG_OOB 0x00000020 ++#define AICFG_IOS 0x00000010 ++#define AICFG_IOC 0x00000008 ++#define AICFG_TO 0x00000004 ++#define AICFG_ERRL 0x00000002 ++#define AICFG_RST 0x00000001 ++ ++/* bit defines for AI_OOBSELOUTB74 reg */ ++#define OOB_SEL_OUTEN_B_5 15 ++#define OOB_SEL_OUTEN_B_6 23 ++ ++#endif /* _AIDMP_H */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/arminc.h b/drivers/net/ethernet/broadcom/gmac/src/include/arminc.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/arminc.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/arminc.h 2017-11-09 17:53:43.914306000 +0800 +@@ -0,0 +1,317 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * HND Run Time Environment for standalone ARM programs. ++ * ++ * $Id: arminc.h 325951 2012-04-05 06:03:27Z $ ++ */ ++ ++#ifndef _ARMINC_H ++#define _ARMINC_H ++ ++ ++/* ARM defines */ ++ ++#ifdef _LANGUAGE_ASSEMBLY ++ ++/* ++ * LEAF - declare leaf routine ++ */ ++#define LEAF(function) \ ++ .section .text.function, "ax"; \ ++ .global function; \ ++ .func function; \ ++function: ++ ++#define THUMBLEAF(function) \ ++ .section .text.function, "ax"; \ ++ .global function; \ ++ .func function; \ ++ .thumb; \ ++ .thumb_func; \ ++function: ++ ++/* ++ * END - mark end of function ++ */ ++#define END(function) \ ++ .ltorg; \ ++ .endfunc; \ ++ .size function, . - function ++ ++#define DW(var, val) \ ++ .global var; \ ++ .type var, %object; \ ++ .size var, 4; \ ++ .align 2; \ ++var: .word val ++ ++ ++#define _ULCAST_ ++ ++#else ++ ++/* ++ * The following macros are especially useful for __asm__ ++ * inline assembler. ++ */ ++#ifndef __STR ++#define __STR(x) #x ++#endif ++#ifndef STR ++#define STR(x) __STR(x) ++#endif ++ ++#define _ULCAST_ (unsigned long) ++ ++#endif /* _LANGUAGE_ASSEMBLY */ ++ ++ ++#if defined(__ARM_ARCH_7M__) /* Cortex-M3 */ ++ ++/* Data Watchpoint and Trigger */ ++#define CM3_DWT_CTRL 0xe0001000 ++#define CM3_DWT_CYCCNT 0xe0001004 ++#define CM3_DWT_CPICNT 0xe0001008 ++#define CM3_DWT_EXCCNT 0xe000100c ++#define CM3_DWT_SLEEPCNT 0xe0001010 ++#define CM3_DWT_LSUCNT 0xe0001014 ++#define CM3_DWT_FOLDCNT 0xe0001018 ++#define CM3_DWT_COMP0 0xe0001020 ++#define CM3_DWT_MASK0 0xe0001024 ++#define CM3_DWT_FUNCTION0 0xe0001028 ++#define CM3_DWT_COMP1 0xe0001030 ++#define CM3_DWT_MASK1 0xe0001034 ++#define CM3_DWT_FUNCTION1 0xe0001038 ++#define CM3_DWT_COMP2 0xe0001040 ++#define CM3_DWT_MASK2 0xe0001044 ++#define CM3_DWT_FUNCTION2 0xe0001048 ++#define CM3_DWT_COMP3 0xe0001050 ++#define CM3_DWT_MASK3 0xe0001054 ++#define CM3_DWT_FUNCTION3 0xe0001058 ++ ++#define CM3_DWT_FUNCTION_DISAB 0 ++#define CM3_DWT_FUNCTION_WP_PCMATCH 4 ++#define CM3_DWT_FUNCTION_WP_READ 5 ++#define CM3_DWT_FUNCTION_WP_WRITE 6 ++#define CM3_DWT_FUNCTION_WP_RDWR 7 ++ ++#define CM3_NVIC_IC_TYPE 0xe000e004 /* Interrupt Control Type Reg */ ++#define CM3_NVIC_TICK_CSR 0xe000e010 /* SysTick Control and Status Reg */ ++#define CM3_NVIC_TICK_CSR_COUNTFLAG 0x10000 ++#define CM3_NVIC_TICK_CSR_CLKSOURCE 0x4 /* Set for core clock, 0 for ext ref */ ++#define CM3_NVIC_TICK_CSR_TICKINT 0x2 /* Set for intr on count going 1 => 0 */ ++#define CM3_NVIC_TICK_CSR_ENABLE 0x1 ++#define CM3_NVIC_TICK_RLDVAL 0xe000e014 /* SysTick Reload Value Reg */ ++#define CM3_NVIC_TICK_CURVAL 0xe000e018 /* SysTick Current Value Reg */ ++#define CM3_NVIC_TICK_CALVAL 0xe000e01c /* SysTick Calibration Value Reg */ ++ ++/* Interrupt enable/disable register */ ++#define CM3_NVIC_IRQ_SET_EN0 0xe000e100 /* Irq 0 to 31 Set Enable Reg */ ++#define CM3_NVIC_IRQ_SET_EN(n) (0xe000e100 + (n) * 4) /* Irq 0-31, 32-63, ..., 224-239 */ ++ ++#define CM3_NVIC_IRQ_CLR_EN0 0xe000e180 /* Irq 0 to 31 Clear Enable Reg [...] */ ++#define CM3_NVIC_IRQ_CLR_EN(n) (0xe000e180 + (n) * 4) /* Irq 0-31, 32-63, ..., 224-239 */ ++ ++#define CM3_NVIC_IRQ_SET_PND0 0xe000e200 /* Irq 0 to 31 Set Pending Reg [...] */ ++#define CM3_NVIC_IRQ_SET_PND(n) (0xe000e200 + (n) * 4) /* Irq 0-31, 32-63, ..., 224-239 */ ++ ++#define CM3_NVIC_IRQ_CLR_PND0 0xe000e280 /* Irq 0 to 31 Clear Pending Reg [...] */ ++#define CM3_NVIC_IRQ_CLR_PND(n) (0xe000e280 + (n) * 4) /* Irq 0-31, 32-63, ..., 224-239 */ ++ ++#define CM3_NVIC_IRQ_ACT_BIT0 0xe000e300 /* Irq 0 to 31 Active Bit Reg [...] */ ++#define CM3_NVIC_IRQ_ACT_BIT(n) (0xe000e300 + (n) * 4) /* Irq 0-31, 32-63, ..., 224-239 */ ++ ++#define CM3_NVIC_IRQ_PRIO0 0xe000e400 /* Irq 0 to 31 Priority Reg [...] */ ++#define CM3_NVIC_IRQ_PRIO(n) (0xe000e400 + (n) * 4) /* Irq 0-31, 32-63, ..., 224-239 */ ++ ++/* CPU control */ ++#define CM3_CPUID 0xe000ed00 ++#define CM3_INTCTLSTATE 0xe000ed04 ++#define CM3_VTOFF 0xe000ed08 /* Vector Table Offset */ ++#define CM3_SYSCTRL 0xe000ed10 ++#define CM3_CFGCTRL 0xe000ed14 ++#define CM3_CFGCTRL_UNALIGN_TRP 0x8 ++#define CM3_CFGCTRL_DIV_0_TRP 0x10 ++#define CM3_CFGCTRL_STKALIGN 0x200 ++ ++#define CM3_PFR0 0xe000ed40 ++#define CM3_PFR1 0xe000ed44 ++#define CM3_DFR0 0xe000ed48 ++#define CM3_AFR0 0xe000ed4c ++#define CM3_MMFR0 0xe000ed50 ++#define CM3_MMFR1 0xe000ed54 ++#define CM3_MMFR2 0xe000ed58 ++#define CM3_MMFR3 0xe000ed5c ++#define CM3_ISAR0 0xe000ed60 ++#define CM3_ISAR1 0xe000ed64 ++#define CM3_ISAR2 0xe000ed68 ++#define CM3_ISAR3 0xe000ed6c ++#define CM3_ISAR4 0xe000ed70 ++#define CM3_ISAR5 0xe000ed74 ++ ++#define CM3_MPUTYPE 0xe000ed90 ++#define CM3_MPUCTRL 0xe000ed94 ++#define CM3_REGNUM 0xe000ed98 ++#define CM3_REGBAR 0xe000ed9c ++#define CM3_REGASZ 0xe000eda0 ++#define CM3_AL1BAR 0xe000eda4 ++#define CM3_AL1ASZ 0xe000eda8 ++#define CM3_AL2BAR 0xe000edac ++#define CM3_AL2ASZ 0xe000edb0 ++#define CM3_AL3BAR 0xe000edb4 ++#define CM3_AL3ASZ 0xe000edb8 ++ ++#define CM3_DBG_HCSR 0xe000edf0 /* Debug Halting Control and Status Reg */ ++#define CM3_DBG_CRSR 0xe000edf4 /* Debug Core Register Selector Reg */ ++#define CM3_DBG_CRDR 0xe000edf8 /* Debug Core Register Data Reg */ ++#define CM3_DBG_EMCR 0xe000edfc /* Debug Exception and Monitor Control Reg */ ++#define CM3_DBG_EMCR_TRCENA (1U << 24) ++#define CM3_DBG_EMCR_MON_EN (1U << 16) ++ ++/* Trap types */ ++#define TR_RST 1 /* Reset */ ++#define TR_NMI 2 /* NMI */ ++#define TR_FAULT 3 /* Hard Fault */ ++#define TR_MM 4 /* Memory Management */ ++#define TR_BUS 5 /* Bus Fault */ ++#define TR_USAGE 6 /* Usage Fault */ ++#define TR_SVC 11 /* SVCall */ ++#define TR_DMON 12 /* Debug Monitor */ ++#define TR_PENDSV 14 /* PendSV */ ++#define TR_SYSTICK 15 /* SysTick */ ++#define TR_ISR 16 /* External Interrupts start here */ ++ ++#define TR_BAD 256 /* Bad trap: Not used by CM3 */ ++ ++/* Offsets of automatically saved registers from sp upon trap */ ++#define CM3_TROFF_R0 0 ++#define CM3_TROFF_R1 4 ++#define CM3_TROFF_R2 8 ++#define CM3_TROFF_R3 12 ++#define CM3_TROFF_R12 16 ++#define CM3_TROFF_LR 20 ++#define CM3_TROFF_PC 24 ++#define CM3_TROFF_xPSR 28 ++ ++#elif defined(__ARM_ARCH_7A__) /* Cortex-A9 */ ++/* Fields in cpsr */ ++#define PS_USR 0x00000010 /* Mode: User */ ++#define PS_FIQ 0x00000011 /* Mode: FIQ */ ++#define PS_IRQ 0x00000012 /* Mode: IRQ */ ++#define PS_SVC 0x00000013 /* Mode: Supervisor */ ++#define PS_ABT 0x00000017 /* Mode: Abort */ ++#define PS_UND 0x0000001b /* Mode: Undefined */ ++#define PS_SYS 0x0000001f /* Mode: System */ ++#define PS_MM 0x0000001f /* Mode bits mask */ ++#define PS_T 0x00000020 /* Thumb mode */ ++#define PS_F 0x00000040 /* FIQ disable */ ++#define PS_I 0x00000080 /* IRQ disable */ ++#define PS_A 0x00000100 /* Imprecise abort */ ++#define PS_E 0x00000200 /* Endianess */ ++#define PS_IT72 0x0000fc00 /* IT[7:2] */ ++#define PS_GE 0x000f0000 /* IT[7:2] */ ++#define PS_J 0x01000000 /* Java state */ ++#define PS_IT10 0x06000000 /* IT[1:0] */ ++#define PS_Q 0x08000000 /* Sticky overflow */ ++#define PS_V 0x10000000 /* Overflow cc */ ++#define PS_C 0x20000000 /* Carry cc */ ++#define PS_Z 0x40000000 /* Zero cc */ ++#define PS_N 0x80000000 /* Negative cc */ ++ ++/* Trap types */ ++#define TR_RST 0 /* Reset trap */ ++#define TR_UND 1 /* Indefined instruction trap */ ++#define TR_SWI 2 /* Software intrrupt */ ++#define TR_IAB 3 /* Instruction fetch abort */ ++#define TR_DAB 4 /* Data access abort */ ++#define TR_BAD 5 /* Bad trap: Not used by ARM */ ++#define TR_IRQ 6 /* Interrupt */ ++#define TR_FIQ 7 /* Fast interrupt */ ++ ++/* ++ * Memory segments (32bit kernel mode addresses) ++ */ ++#define PHYSADDR_MASK 0xffffffff ++ ++/* ++ * Map an address to a certain kernel segment ++ */ ++#undef PHYSADDR ++#define PHYSADDR(a) (_ULCAST_(a) & PHYSADDR_MASK) ++#else /* !__ARM_ARCH_7M__ */ ++ ++/* Fields in cpsr */ ++#define PS_USR 0x00000010 /* Mode: User */ ++#define PS_FIQ 0x00000011 /* Mode: FIQ */ ++#define PS_IRQ 0x00000012 /* Mode: IRQ */ ++#define PS_SVC 0x00000013 /* Mode: Supervisor */ ++#define PS_ABT 0x00000017 /* Mode: Abort */ ++#define PS_UND 0x0000001b /* Mode: Undefined */ ++#define PS_SYS 0x0000001f /* Mode: System */ ++#define PS_MM 0x0000001f /* Mode bits mask */ ++#define PS_T 0x00000020 /* Thumb mode */ ++#define PS_F 0x00000040 /* FIQ disable */ ++#define PS_I 0x00000080 /* IRQ disable */ ++#define PS_A 0x00000100 /* Imprecise abort */ ++#define PS_E 0x00000200 /* Endianess */ ++#define PS_IT72 0x0000fc00 /* IT[7:2] */ ++#define PS_GE 0x000f0000 /* IT[7:2] */ ++#define PS_J 0x01000000 /* Java state */ ++#define PS_IT10 0x06000000 /* IT[1:0] */ ++#define PS_Q 0x08000000 /* Sticky overflow */ ++#define PS_V 0x10000000 /* Overflow cc */ ++#define PS_C 0x20000000 /* Carry cc */ ++#define PS_Z 0x40000000 /* Zero cc */ ++#define PS_N 0x80000000 /* Negative cc */ ++ ++/* Trap types */ ++#define TR_RST 0 /* Reset trap */ ++#define TR_UND 1 /* Indefined instruction trap */ ++#define TR_SWI 2 /* Software intrrupt */ ++#define TR_IAB 3 /* Instruction fetch abort */ ++#define TR_DAB 4 /* Data access abort */ ++#define TR_BAD 5 /* Bad trap: Not used by ARM */ ++#define TR_IRQ 6 /* Interrupt */ ++#define TR_FIQ 7 /* Fast interrupt */ ++ ++#ifdef BCMDBG_ARMRST ++#define TR_ARMRST 0xF /* Debug facility to trap Arm reset */ ++#endif ++ ++/* used to fill an overlay region with nop's */ ++#define NOP_UINT32 0x46c046c0 ++ ++ ++#define mrc(cp, a, b, n) \ ++({ \ ++ int __res; \ ++ __asm__ __volatile__("\tmrc\tp"STR(cp)", 0, %0, c"STR(a)", c"STR(b)", "STR(n) \ ++ :"=r" (__res)); \ ++ __res; \ ++}) ++ ++ ++#endif /* !__ARM_ARCH_7M__ */ ++ ++/* Pieces of a CPU Id */ ++#define CID_IMPL 0xff000000 /* Implementor: 0x41 for ARM Ltd. */ ++#define CID_VARIANT 0x00f00000 ++#define CID_ARCH 0x000f0000 ++#define CID_PART 0x0000fff0 ++#define CID_REV 0x0000000f ++#define CID_MASK (CID_IMPL | CID_ARCH | CID_PART) ++ ++#endif /* _ARMINC_H */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/bcm_cfg.h b/drivers/net/ethernet/broadcom/gmac/src/include/bcm_cfg.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/bcm_cfg.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/bcm_cfg.h 2017-11-09 17:53:43.915303000 +0800 +@@ -0,0 +1,28 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * BCM common config options ++ * ++ * $Id: bcm_cfg.h 294399 2011-11-07 03:31:22Z $ ++ */ ++ ++#ifndef _bcm_cfg_h_ ++#define _bcm_cfg_h_ ++#if defined(__NetBSD__) || defined(__FreeBSD__) ++#if defined(_KERNEL) ++#include ++#endif /* defined(_KERNEL) */ ++#endif /* defined(__NetBSD__) || defined(__FreeBSD__) */ ++#endif /* _bcm_cfg_h_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/bcm_mpool_pub.h b/drivers/net/ethernet/broadcom/gmac/src/include/bcm_mpool_pub.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/bcm_mpool_pub.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/bcm_mpool_pub.h 2017-11-09 17:53:43.916305000 +0800 +@@ -0,0 +1,355 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Memory pools library, Public interface ++ * ++ * API Overview ++ * ++ * This package provides a memory allocation subsystem based on pools of ++ * homogenous objects. ++ * ++ * Instrumentation is available for reporting memory utilization both ++ * on a per-data-structure basis and system wide. ++ * ++ * There are two main types defined in this API. ++ * ++ * pool manager: A singleton object that acts as a factory for ++ * pool allocators. It also is used for global ++ * instrumentation, such as reporting all blocks ++ * in use across all data structures. The pool manager ++ * creates and provides individual memory pools ++ * upon request to application code. ++ * ++ * memory pool: An object for allocating homogenous memory blocks. ++ * ++ * Global identifiers in this module use the following prefixes: ++ * bcm_mpm_* Memory pool manager ++ * bcm_mp_* Memory pool ++ * ++ * There are two main types of memory pools: ++ * ++ * prealloc: The contiguous memory block of objects can either be supplied ++ * by the client or malloc'ed by the memory manager. The objects are ++ * allocated out of a block of memory and freed back to the block. ++ * ++ * heap: The memory pool allocator uses the heap (malloc/free) for memory. ++ * In this case, the pool allocator is just providing statistics ++ * and instrumentation on top of the heap, without modifying the heap ++ * allocation implementation. ++ * ++ * $Id$ ++ */ ++ ++#ifndef _BCM_MPOOL_PUB_H ++#define _BCM_MPOOL_PUB_H 1 ++ ++#include /* needed for uint16 */ ++ ++ ++/* ++************************************************************************** ++* ++* Type definitions, handles ++* ++************************************************************************** ++*/ ++ ++/* Forward declaration of OSL handle. */ ++struct osl_info; ++ ++/* Forward declaration of string buffer. */ ++struct bcmstrbuf; ++ ++/* ++ * Opaque type definition for the pool manager handle. This object is used for global ++ * memory pool operations such as obtaining a new pool, deleting a pool, iterating and ++ * instrumentation/debugging. ++ */ ++struct bcm_mpm_mgr; ++typedef struct bcm_mpm_mgr *bcm_mpm_mgr_h; ++ ++/* ++ * Opaque type definition for an instance of a pool. This handle is used for allocating ++ * and freeing memory through the pool, as well as management/instrumentation on this ++ * specific pool. ++ */ ++struct bcm_mp_pool; ++typedef struct bcm_mp_pool *bcm_mp_pool_h; ++ ++ ++/* ++ * To make instrumentation more readable, every memory ++ * pool must have a readable name. Pool names are up to ++ * 8 bytes including '\0' termination. (7 printable characters.) ++ */ ++#define BCM_MP_NAMELEN 8 ++ ++ ++/* ++ * Type definition for pool statistics. ++ */ ++typedef struct bcm_mp_stats { ++ char name[BCM_MP_NAMELEN]; /* Name of this pool. */ ++ unsigned int objsz; /* Object size allocated in this pool */ ++ uint16 nobj; /* Total number of objects in this pool */ ++ uint16 num_alloc; /* Number of objects currently allocated */ ++ uint16 high_water; /* Max number of allocated objects. */ ++ uint16 failed_alloc; /* Failed allocations. */ ++} bcm_mp_stats_t; ++ ++ ++/* ++************************************************************************** ++* ++* API Routines on the pool manager. ++* ++************************************************************************** ++*/ ++ ++/* ++ * bcm_mpm_init() - initialize the whole memory pool system. ++ * ++ * Parameters: ++ * osh: INPUT Operating system handle. Needed for heap memory allocation. ++ * max_pools: INPUT Maximum number of mempools supported. ++ * mgr: OUTPUT The handle is written with the new pools manager object/handle. ++ * ++ * Returns: ++ * BCME_OK Object initialized successfully. May be used. ++ * BCME_NOMEM Initialization failed due to no memory. Object must not be used. ++ */ ++int bcm_mpm_init(struct osl_info *osh, int max_pools, bcm_mpm_mgr_h *mgrp); ++ ++ ++/* ++ * bcm_mpm_deinit() - de-initialize the whole memory pool system. ++ * ++ * Parameters: ++ * mgr: INPUT Pointer to pool manager handle. ++ * ++ * Returns: ++ * BCME_OK Memory pool manager successfully de-initialized. ++ * other Indicated error occured during de-initialization. ++ */ ++int bcm_mpm_deinit(bcm_mpm_mgr_h *mgrp); ++ ++/* ++ * bcm_mpm_create_prealloc_pool() - Create a new pool for fixed size objects. The ++ * pool uses a contiguous block of pre-alloced ++ * memory. The memory block may either be provided ++ * by the client or dynamically allocated by the ++ * pool manager. ++ * ++ * Parameters: ++ * mgr: INPUT The handle to the pool manager ++ * obj_sz: INPUT Size of objects that will be allocated by the new pool ++ * Must be >= sizeof(void *). ++ * nobj: INPUT Maximum number of concurrently existing objects to support ++ * memstart INPUT Pointer to the memory to use, or NULL to malloc() ++ * memsize INPUT Number of bytes referenced from memstart (for error checking). ++ * Must be 0 if 'memstart' is NULL. ++ * poolname INPUT For instrumentation, the name of the pool ++ * newp: OUTPUT The handle for the new pool, if creation is successful ++ * ++ * Returns: ++ * BCME_OK Pool created ok. ++ * other Pool not created due to indicated error. newpoolp set to NULL. ++ * ++ * ++ */ ++int bcm_mpm_create_prealloc_pool(bcm_mpm_mgr_h mgr, ++ unsigned int obj_sz, ++ int nobj, ++ void *memstart, ++ unsigned int memsize, ++ char poolname[BCM_MP_NAMELEN], ++ bcm_mp_pool_h *newp); ++ ++ ++/* ++ * bcm_mpm_delete_prealloc_pool() - Delete a memory pool. This should only be called after ++ * all memory objects have been freed back to the pool. ++ * ++ * Parameters: ++ * mgr: INPUT The handle to the pools manager ++ * pool: INPUT The handle of the pool to delete ++ * ++ * Returns: ++ * BCME_OK Pool deleted ok. ++ * other Pool not deleted due to indicated error. ++ * ++ */ ++int bcm_mpm_delete_prealloc_pool(bcm_mpm_mgr_h mgr, bcm_mp_pool_h *poolp); ++ ++/* ++ * bcm_mpm_create_heap_pool() - Create a new pool for fixed size objects. The memory ++ * pool allocator uses the heap (malloc/free) for memory. ++ * In this case, the pool allocator is just providing ++ * statistics and instrumentation on top of the heap, ++ * without modifying the heap allocation implementation. ++ * ++ * Parameters: ++ * mgr: INPUT The handle to the pool manager ++ * obj_sz: INPUT Size of objects that will be allocated by the new pool ++ * poolname INPUT For instrumentation, the name of the pool ++ * newp: OUTPUT The handle for the new pool, if creation is successful ++ * ++ * Returns: ++ * BCME_OK Pool created ok. ++ * other Pool not created due to indicated error. newpoolp set to NULL. ++ * ++ * ++ */ ++int bcm_mpm_create_heap_pool(bcm_mpm_mgr_h mgr, unsigned int obj_sz, ++ char poolname[BCM_MP_NAMELEN], ++ bcm_mp_pool_h *newp); ++ ++ ++/* ++ * bcm_mpm_delete_heap_pool() - Delete a memory pool. This should only be called after ++ * all memory objects have been freed back to the pool. ++ * ++ * Parameters: ++ * mgr: INPUT The handle to the pools manager ++ * pool: INPUT The handle of the pool to delete ++ * ++ * Returns: ++ * BCME_OK Pool deleted ok. ++ * other Pool not deleted due to indicated error. ++ * ++ */ ++int bcm_mpm_delete_heap_pool(bcm_mpm_mgr_h mgr, bcm_mp_pool_h *poolp); ++ ++ ++/* ++ * bcm_mpm_stats() - Return stats for all pools ++ * ++ * Parameters: ++ * mgr: INPUT The handle to the pools manager ++ * stats: OUTPUT Array of pool statistics. ++ * nentries: MOD Max elements in 'stats' array on INPUT. Actual number ++ * of array elements copied to 'stats' on OUTPUT. ++ * ++ * Returns: ++ * BCME_OK Ok ++ * other Error getting stats. ++ * ++ */ ++int bcm_mpm_stats(bcm_mpm_mgr_h mgr, bcm_mp_stats_t *stats, int *nentries); ++ ++ ++/* ++ * bcm_mpm_dump() - Display statistics on all pools ++ * ++ * Parameters: ++ * mgr: INPUT The handle to the pools manager ++ * b: OUTPUT Output buffer. ++ * ++ * Returns: ++ * BCME_OK Ok ++ * other Error during dump. ++ * ++ */ ++int bcm_mpm_dump(bcm_mpm_mgr_h mgr, struct bcmstrbuf *b); ++ ++ ++/* ++ * bcm_mpm_get_obj_size() - The size of memory objects may need to be padded to ++ * compensate for alignment requirements of the objects. ++ * This function provides the padded object size. If clients ++ * pre-allocate a memory slab for a memory pool, the ++ * padded object size should be used by the client to allocate ++ * the memory slab (in order to provide sufficent space for ++ * the maximum number of objects). ++ * ++ * Parameters: ++ * mgr: INPUT The handle to the pools manager. ++ * obj_sz: INPUT Input object size. ++ * padded_obj_sz: OUTPUT Padded object size. ++ * ++ * Returns: ++ * BCME_OK Ok ++ * BCME_BADARG Bad arguments. ++ * ++ */ ++int bcm_mpm_get_obj_size(bcm_mpm_mgr_h mgr, unsigned int obj_sz, unsigned int *padded_obj_sz); ++ ++ ++/* ++*************************************************************************** ++* ++* API Routines on a specific pool. ++* ++*************************************************************************** ++*/ ++ ++ ++/* ++ * bcm_mp_alloc() - Allocate a memory pool object. ++ * ++ * Parameters: ++ * pool: INPUT The handle to the pool. ++ * ++ * Returns: ++ * A pointer to the new object. NULL on error. ++ * ++ */ ++void* bcm_mp_alloc(bcm_mp_pool_h pool); ++ ++/* ++ * bcm_mp_free() - Free a memory pool object. ++ * ++ * Parameters: ++ * pool: INPUT The handle to the pool. ++ * objp: INPUT A pointer to the object to free. ++ * ++ * Returns: ++ * BCME_OK Ok ++ * other Error during free. ++ * ++ */ ++int bcm_mp_free(bcm_mp_pool_h pool, void *objp); ++ ++/* ++ * bcm_mp_stats() - Return stats for this pool ++ * ++ * Parameters: ++ * pool: INPUT The handle to the pool ++ * stats: OUTPUT Pool statistics ++ * ++ * Returns: ++ * BCME_OK Ok ++ * other Error getting statistics. ++ * ++ */ ++int bcm_mp_stats(bcm_mp_pool_h pool, bcm_mp_stats_t *stats); ++ ++ ++/* ++ * bcm_mp_dump() - Dump a pool ++ * ++ * Parameters: ++ * pool: INPUT The handle to the pool ++ * b OUTPUT Output buffer ++ * ++ * Returns: ++ * BCME_OK Ok ++ * other Error during dump. ++ * ++ */ ++int bcm_mp_dump(bcm_mp_pool_h pool, struct bcmstrbuf *b); ++ ++ ++#endif /* _BCM_MPOOL_PUB_H */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/bcmcdc.h b/drivers/net/ethernet/broadcom/gmac/src/include/bcmcdc.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/bcmcdc.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/bcmcdc.h 2017-11-09 17:53:43.917296000 +0800 +@@ -0,0 +1,122 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * CDC network driver ioctl/indication encoding ++ * Broadcom 802.11abg Networking Device Driver ++ * ++ * Definitions subject to change without notice. ++ * ++ * $Id: bcmcdc.h 291086 2011-10-21 01:17:24Z $ ++ */ ++#ifndef _bcmcdc_h_ ++#define _bcmcdc_h_ ++#include ++ ++typedef struct cdc_ioctl { ++ uint32 cmd; /* ioctl command value */ ++ uint32 len; /* lower 16: output buflen; upper 16: input buflen (excludes header) */ ++ uint32 flags; /* flag defns given below */ ++ uint32 status; /* status code returned from the device */ ++} cdc_ioctl_t; ++ ++/* Max valid buffer size that can be sent to the dongle */ ++#define CDC_MAX_MSG_SIZE ETHER_MAX_LEN ++ ++/* len field is divided into input and output buffer lengths */ ++#define CDCL_IOC_OUTLEN_MASK 0x0000FFFF /* maximum or expected response length, */ ++ /* excluding IOCTL header */ ++#define CDCL_IOC_OUTLEN_SHIFT 0 ++#define CDCL_IOC_INLEN_MASK 0xFFFF0000 /* input buffer length, excluding IOCTL header */ ++#define CDCL_IOC_INLEN_SHIFT 16 ++ ++/* CDC flag definitions */ ++#define CDCF_IOC_ERROR 0x01 /* 0=success, 1=ioctl cmd failed */ ++#define CDCF_IOC_SET 0x02 /* 0=get, 1=set cmd */ ++#define CDCF_IOC_OVL_IDX_MASK 0x3c /* overlay region index mask */ ++#define CDCF_IOC_OVL_RSV 0x40 /* 1=reserve this overlay region */ ++#define CDCF_IOC_OVL 0x80 /* 1=this ioctl corresponds to an overlay */ ++#define CDCF_IOC_ACTION_MASK 0xfe /* SET/GET, OVL_IDX, OVL_RSV, OVL mask */ ++#define CDCF_IOC_ACTION_SHIFT 1 /* SET/GET, OVL_IDX, OVL_RSV, OVL shift */ ++#define CDCF_IOC_IF_MASK 0xF000 /* I/F index */ ++#define CDCF_IOC_IF_SHIFT 12 ++#define CDCF_IOC_ID_MASK 0xFFFF0000 /* used to uniquely id an ioctl req/resp pairing */ ++#define CDCF_IOC_ID_SHIFT 16 /* # of bits of shift for ID Mask */ ++ ++#define CDC_IOC_IF_IDX(flags) (((flags) & CDCF_IOC_IF_MASK) >> CDCF_IOC_IF_SHIFT) ++#define CDC_IOC_ID(flags) (((flags) & CDCF_IOC_ID_MASK) >> CDCF_IOC_ID_SHIFT) ++ ++#define CDC_GET_IF_IDX(hdr) \ ++ ((int)((((hdr)->flags) & CDCF_IOC_IF_MASK) >> CDCF_IOC_IF_SHIFT)) ++#define CDC_SET_IF_IDX(hdr, idx) \ ++ ((hdr)->flags = (((hdr)->flags & ~CDCF_IOC_IF_MASK) | ((idx) << CDCF_IOC_IF_SHIFT))) ++ ++/* ++ * BDC header ++ * ++ * The BDC header is used on data packets to convey priority across USB. ++ */ ++ ++#define BDC_HEADER_LEN 4 ++ ++#define BDC_PROTO_VER_1 1 /* Old Protocol version */ ++#define BDC_PROTO_VER 2 /* Protocol version */ ++ ++#define BDC_FLAG_VER_MASK 0xf0 /* Protocol version mask */ ++#define BDC_FLAG_VER_SHIFT 4 /* Protocol version shift */ ++ ++#define BDC_FLAG__UNUSED 0x03 /* Unassigned */ ++#define BDC_FLAG_SUM_GOOD 0x04 /* Dongle has verified good RX checksums */ ++#define BDC_FLAG_SUM_NEEDED 0x08 /* Dongle needs to do TX checksums */ ++ ++#define BDC_PRIORITY_MASK 0x7 ++ ++#define BDC_FLAG2_FC_FLAG 0x10 /* flag to indicate if pkt contains */ ++ /* FLOW CONTROL info only */ ++#define BDC_PRIORITY_FC_SHIFT 4 /* flow control info shift */ ++ ++#define BDC_FLAG2_IF_MASK 0x0f /* APSTA: interface on which the packet was received */ ++#define BDC_FLAG2_IF_SHIFT 0 ++#define BDC_FLAG2_PAD_MASK 0xf0 ++#define BDC_FLAG_PAD_MASK 0x03 ++#define BDC_FLAG2_PAD_SHIFT 2 ++#define BDC_FLAG_PAD_SHIFT 0 ++#define BDC_FLAG2_PAD_IDX 0x3c ++#define BDC_FLAG_PAD_IDX 0x03 ++#define BDC_GET_PAD_LEN(hdr) \ ++ ((int)(((((hdr)->flags2) & BDC_FLAG2_PAD_MASK) >> BDC_FLAG2_PAD_SHIFT) | \ ++ ((((hdr)->flags) & BDC_FLAG_PAD_MASK) >> BDC_FLAG_PAD_SHIFT))) ++#define BDC_SET_PAD_LEN(hdr, idx) \ ++ ((hdr)->flags2 = (((hdr)->flags2 & ~BDC_FLAG2_PAD_MASK) | \ ++ (((idx) & BDC_FLAG2_PAD_IDX) << BDC_FLAG2_PAD_SHIFT))); \ ++ ((hdr)->flags = (((hdr)->flags & ~BDC_FLAG_PAD_MASK) | \ ++ (((idx) & BDC_FLAG_PAD_IDX) << BDC_FLAG_PAD_SHIFT))) ++ ++#define BDC_GET_IF_IDX(hdr) \ ++ ((int)((((hdr)->flags2) & BDC_FLAG2_IF_MASK) >> BDC_FLAG2_IF_SHIFT)) ++#define BDC_SET_IF_IDX(hdr, idx) \ ++ ((hdr)->flags2 = (((hdr)->flags2 & ~BDC_FLAG2_IF_MASK) | ((idx) << BDC_FLAG2_IF_SHIFT))) ++ ++struct bdc_header { ++ uint8 flags; /* Flags */ ++ uint8 priority; /* 802.1d Priority 0:2 bits, 4:7 USB flow control info */ ++ uint8 flags2; ++ uint8 dataOffset; /* Offset from end of BDC header to packet data, in ++ * 4-byte words. Leaves room for optional headers. ++ */ ++}; ++ ++#define BDC_PROTO_VER_1 1 /* Old Protocol version */ ++ ++#endif /* _bcmcdc_h_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/bcmdefs.h b/drivers/net/ethernet/broadcom/gmac/src/include/bcmdefs.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/bcmdefs.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/bcmdefs.h 2017-11-09 17:53:43.918300000 +0800 +@@ -0,0 +1,332 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Misc system wide definitions ++ * ++ * $Id: bcmdefs.h 316696 2012-02-23 03:29:35Z $ ++ */ ++ ++#ifndef _bcmdefs_h_ ++#define _bcmdefs_h_ ++ ++/* ++ * One doesn't need to include this file explicitly, gets included automatically if ++ * typedefs.h is included. ++ */ ++ ++/* Use BCM_REFERENCE to suppress warnings about intentionally-unused function ++ * arguments or local variables. ++ */ ++#define BCM_REFERENCE(data) ((void)(data)) ++ ++/* Compile-time assert can be used in place of ASSERT if the expression evaluates ++ * to a constant at compile time. ++ */ ++#define STATIC_ASSERT(expr) { \ ++ /* Make sure the expression is constant. */ \ ++ typedef enum { _STATIC_ASSERT_NOT_CONSTANT = (expr) } _static_assert_e; \ ++ /* Make sure the expression is true. */ \ ++ typedef char STATIC_ASSERT_FAIL[(expr) ? 1 : -1]; \ ++} ++ ++/* Reclaiming text and data : ++ * The following macros specify special linker sections that can be reclaimed ++ * after a system is considered 'up'. ++ * BCMATTACHFN is also used for detach functions (it's not worth having a BCMDETACHFN, ++ * as in most cases, the attach function calls the detach function to clean up on error). ++ */ ++#ifdef DONGLEBUILD ++ ++extern bool bcmreclaimed; ++extern bool attach_part_reclaimed; ++ ++#define BCMATTACHDATA(_data) __attribute__ ((__section__ (".dataini2." #_data))) _data ++#define BCMATTACHFN(_fn) __attribute__ ((__section__ (".textini2." #_fn), noinline)) _fn ++ ++#ifndef PREATTACH_NORECLAIM ++#define BCMPREATTACHDATA(_data) __attribute__ ((__section__ (".dataini3." #_data))) _data ++#define BCMPREATTACHFN(_fn) __attribute__ ((__section__ (".textini3." #_fn), noinline)) _fn ++#else ++#define BCMPREATTACHDATA(_data) __attribute__ ((__section__ (".dataini2." #_data))) _data ++#define BCMPREATTACHFN(_fn) __attribute__ ((__section__ (".textini2." #_fn), noinline)) _fn ++#endif ++ ++#if defined(BCMRECLAIM) ++#define BCMINITDATA(_data) __attribute__ ((__section__ (".dataini1." #_data))) _data ++#define BCMINITFN(_fn) __attribute__ ((__section__ (".textini1." #_fn), noinline)) _fn ++#define CONST ++#else ++#define BCMINITDATA(_data) _data ++#define BCMINITFN(_fn) _fn ++#define CONST const ++#endif ++ ++/* Non-manufacture or internal attach function/dat */ ++#if !defined(WLTEST) ++#define BCMNMIATTACHFN(_fn) BCMATTACHFN(_fn) ++#define BCMNMIATTACHDATA(_data) BCMATTACHDATA(_data) ++#else ++#define BCMNMIATTACHFN(_fn) _fn ++#define BCMNMIATTACHDATA(_data) _data ++#endif ++ ++#define BCMUNINITFN(_fn) _fn ++ ++#define BCMFASTPATH ++#else /* DONGLEBUILD */ ++ ++#define bcmreclaimed 0 ++#define BCMATTACHDATA(_data) _data ++#define BCMATTACHFN(_fn) _fn ++#define BCMPREATTACHDATA(_data) _data ++#define BCMPREATTACHFN(_fn) _fn ++#define BCMINITDATA(_data) _data ++#define BCMINITFN(_fn) _fn ++#define BCMUNINITFN(_fn) _fn ++#define BCMNMIATTACHFN(_fn) _fn ++#define BCMNMIATTACHDATA(_data) _data ++#define CONST const ++#if defined(__ARM_ARCH_7A__) ++#define BCM47XX_CA9 ++#else ++#undef BCM47XX_CA9 ++#endif ++#ifndef BCMFASTPATH ++#if defined(mips) || defined(BCM47XX_CA9) ++#define BCMFASTPATH __attribute__ ((__section__ (".text.fastpath"))) ++#define BCMFASTPATH_HOST __attribute__ ((__section__ (".text.fastpath_host"))) ++#else ++#define BCMFASTPATH ++#define BCMFASTPATH_HOST ++#endif ++#endif /* BCMFASTPATH */ ++ ++#endif /* DONGLEBUILD */ ++ ++#if defined(BCMROMBUILD) ++typedef struct { ++ uint16 esiz; ++ uint16 cnt; ++ void *addr; ++} bcmromdat_patch_t; ++#endif ++ ++/* Put some library data/code into ROM to reduce RAM requirements */ ++#if defined(BCMROMBUILD) && !defined(BCMROMSYMGEN_BUILD) && !defined(BCMJMPTBL_TCAM) ++#include ++#define STATIC static ++#else /* !BCMROMBUILD */ ++#define BCMROMDATA(_data) _data ++#define BCMROMDAT_NAME(_data) _data ++#define BCMROMFN(_fn) _fn ++#define BCMROMFN_NAME(_fn) _fn ++#define STATIC static ++#define BCMROMDAT_ARYSIZ(data) ARRAYSIZE(data) ++#define BCMROMDAT_SIZEOF(data) sizeof(data) ++#define BCMROMDAT_APATCH(data) ++#define BCMROMDAT_SPATCH(data) ++#endif /* !BCMROMBUILD */ ++ ++/* Bus types */ ++#define SI_BUS 0 /* SOC Interconnect */ ++#define PCI_BUS 1 /* PCI target */ ++#define PCMCIA_BUS 2 /* PCMCIA target */ ++#define SDIO_BUS 3 /* SDIO target */ ++#define JTAG_BUS 4 /* JTAG */ ++#define USB_BUS 5 /* USB (does not support R/W REG) */ ++#define SPI_BUS 6 /* gSPI target */ ++#define RPC_BUS 7 /* RPC target */ ++ ++/* Allows size optimization for single-bus image */ ++#ifdef BCMBUSTYPE ++#define BUSTYPE(bus) (BCMBUSTYPE) ++#else ++#define BUSTYPE(bus) (bus) ++#endif ++ ++/* Allows size optimization for single-backplane image */ ++#ifdef BCMCHIPTYPE ++#define CHIPTYPE(bus) (BCMCHIPTYPE) ++#else ++#define CHIPTYPE(bus) (bus) ++#endif ++ ++ ++/* Allows size optimization for SPROM support */ ++#if defined(BCMSPROMBUS) ++#define SPROMBUS (BCMSPROMBUS) ++#elif defined(SI_PCMCIA_SROM) ++#define SPROMBUS (PCMCIA_BUS) ++#else ++#define SPROMBUS (PCI_BUS) ++#endif ++ ++/* Allows size optimization for single-chip image */ ++#ifdef BCMCHIPID ++#define CHIPID(chip) (BCMCHIPID) ++#else ++#define CHIPID(chip) (chip) ++#endif ++ ++#ifdef BCMCHIPREV ++#define CHIPREV(rev) (BCMCHIPREV) ++#else ++#define CHIPREV(rev) (rev) ++#endif ++ ++/* Defines for DMA Address Width - Shared between OSL and HNDDMA */ ++#define DMADDR_MASK_32 0x0 /* Address mask for 32-bits */ ++#define DMADDR_MASK_30 0xc0000000 /* Address mask for 30-bits */ ++#define DMADDR_MASK_0 0xffffffff /* Address mask for 0-bits (hi-part) */ ++ ++#define DMADDRWIDTH_30 30 /* 30-bit addressing capability */ ++#define DMADDRWIDTH_32 32 /* 32-bit addressing capability */ ++#define DMADDRWIDTH_63 63 /* 64-bit addressing capability */ ++#define DMADDRWIDTH_64 64 /* 64-bit addressing capability */ ++ ++#ifdef BCMDMA64OSL ++typedef struct { ++ uint32 loaddr; ++ uint32 hiaddr; ++} dma64addr_t; ++ ++typedef dma64addr_t dmaaddr_t; ++#define PHYSADDRHI(_pa) ((_pa).hiaddr) ++#define PHYSADDRHISET(_pa, _val) \ ++ do { \ ++ (_pa).hiaddr = (_val); \ ++ } while (0) ++#define PHYSADDRLO(_pa) ((_pa).loaddr) ++#define PHYSADDRLOSET(_pa, _val) \ ++ do { \ ++ (_pa).loaddr = (_val); \ ++ } while (0) ++ ++#else ++typedef unsigned long dmaaddr_t; ++#define PHYSADDRHI(_pa) (0) ++#define PHYSADDRHISET(_pa, _val) ++#define PHYSADDRLO(_pa) ((_pa)) ++#define PHYSADDRLOSET(_pa, _val) \ ++ do { \ ++ (_pa) = (_val); \ ++ } while (0) ++#endif /* BCMDMA64OSL */ ++ ++/* One physical DMA segment */ ++typedef struct { ++ dmaaddr_t addr; ++ uint32 length; ++} hnddma_seg_t; ++ ++#if defined(MACOSX) ++/* In MacOS, the OS API may return large number of segments. Setting this number lower ++ * will result in failure of dma map ++ */ ++#define MAX_DMA_SEGS 8 ++#elif defined(__NetBSD__) ++/* In NetBSD we also want more segments because the lower level mbuf mapping api might ++ * allocate a large number of segments ++ */ ++#define MAX_DMA_SEGS 16 ++#else ++#define MAX_DMA_SEGS 4 ++#endif ++ ++ ++typedef struct { ++ void *oshdmah; /* Opaque handle for OSL to store its information */ ++ uint origsize; /* Size of the virtual packet */ ++ uint nsegs; ++ hnddma_seg_t segs[MAX_DMA_SEGS]; ++} hnddma_seg_map_t; ++ ++ ++/* packet headroom necessary to accommodate the largest header in the system, (i.e TXOFF). ++ * By doing, we avoid the need to allocate an extra buffer for the header when bridging to WL. ++ * There is a compile time check in wlc.c which ensure that this value is at least as big ++ * as TXOFF. This value is used in dma_rxfill (hnddma.c). ++ */ ++ ++#if defined(BCM_RPC_NOCOPY) || defined(BCM_RCP_TXNOCOPY) ++/* add 40 bytes to allow for extra RPC header and info */ ++#define BCMEXTRAHDROOM 260 ++#else /* BCM_RPC_NOCOPY || BCM_RPC_TXNOCOPY */ ++#define BCMEXTRAHDROOM 204 ++#endif /* BCM_RPC_NOCOPY || BCM_RPC_TXNOCOPY */ ++ ++/* Packet alignment for most efficient SDIO (can change based on platform) */ ++#ifndef SDALIGN ++#define SDALIGN 32 ++#endif ++ ++/* Headroom required for dongle-to-host communication. Packets allocated ++ * locally in the dongle (e.g. for CDC ioctls or RNDIS messages) should ++ * leave this much room in front for low-level message headers which may ++ * be needed to get across the dongle bus to the host. (These messages ++ * don't go over the network, so room for the full WL header above would ++ * be a waste.). ++*/ ++#define BCMDONGLEHDRSZ 12 ++#define BCMDONGLEPADSZ 16 ++ ++#define BCMDONGLEOVERHEAD (BCMDONGLEHDRSZ + BCMDONGLEPADSZ) ++ ++#ifdef BCMDBG ++ ++#ifndef BCMDBG_ERR ++#define BCMDBG_ERR ++#endif /* BCMDBG_ERR */ ++ ++#define BCMDBG_ASSERT ++ ++#endif /* BCMDBG */ ++ ++ ++/* Macros for doing definition and get/set of bitfields ++ * Usage example, e.g. a three-bit field (bits 4-6): ++ * #define _M BITFIELD_MASK(3) ++ * #define _S 4 ++ * ... ++ * regval = R_REG(osh, ®s->regfoo); ++ * field = GFIELD(regval, ); ++ * regval = SFIELD(regval, , 1); ++ * W_REG(osh, ®s->regfoo, regval); ++ */ ++#define BITFIELD_MASK(width) \ ++ (((unsigned)1 << (width)) - 1) ++#define GFIELD(val, field) \ ++ (((val) >> field ## _S) & field ## _M) ++#define SFIELD(val, field, bits) \ ++ (((val) & (~(field ## _M << field ## _S))) | \ ++ ((unsigned)(bits) << field ## _S)) ++ ++/* define BCMSMALL to remove misc features for memory-constrained environments */ ++#ifdef BCMSMALL ++#undef BCMSPACE ++#define bcmspace FALSE /* if (bcmspace) code is discarded */ ++#else ++#define BCMSPACE ++#define bcmspace TRUE /* if (bcmspace) code is retained */ ++#endif ++ ++/* Max. nvram variable table size */ ++#define MAXSZ_NVRAM_VARS 4096 ++ ++#ifdef EFI ++#define __attribute__(x) /* CSTYLED */ ++#endif ++ ++#endif /* _bcmdefs_h_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/bcmdevs.h b/drivers/net/ethernet/broadcom/gmac/src/include/bcmdevs.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/bcmdevs.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/bcmdevs.h 2017-11-09 17:53:43.920291000 +0800 +@@ -0,0 +1,870 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Broadcom device-specific manifest constants. ++ * ++ * $Id: bcmdevs.h 328955 2012-04-23 09:06:12Z $ ++ */ ++ ++#ifndef _BCMDEVS_H ++#define _BCMDEVS_H ++ ++/* PCI vendor IDs */ ++#define VENDOR_EPIGRAM 0xfeda ++#define VENDOR_BROADCOM 0x14e4 ++#define VENDOR_3COM 0x10b7 ++#define VENDOR_NETGEAR 0x1385 ++#define VENDOR_DIAMOND 0x1092 ++#define VENDOR_INTEL 0x8086 ++#define VENDOR_DELL 0x1028 ++#define VENDOR_HP 0x103c ++#define VENDOR_HP_COMPAQ 0x0e11 ++#define VENDOR_APPLE 0x106b ++#define VENDOR_SI_IMAGE 0x1095 /* Silicon Image, used by Arasan SDIO Host */ ++#define VENDOR_BUFFALO 0x1154 /* Buffalo vendor id */ ++#define VENDOR_TI 0x104c /* Texas Instruments */ ++#define VENDOR_RICOH 0x1180 /* Ricoh */ ++#define VENDOR_JMICRON 0x197b ++ ++ ++/* PCMCIA vendor IDs */ ++#define VENDOR_BROADCOM_PCMCIA 0x02d0 ++ ++/* SDIO vendor IDs */ ++#define VENDOR_BROADCOM_SDIO 0x00BF ++ ++/* DONGLE VID/PIDs */ ++#define BCM_DNGL_VID 0x0a5c ++#define BCM_DNGL_BL_PID_4328 0xbd12 ++#define BCM_DNGL_BL_PID_4322 0xbd13 ++#define BCM_DNGL_BL_PID_4319 0xbd16 ++#define BCM_DNGL_BL_PID_43236 0xbd17 ++#define BCM_DNGL_BL_PID_4332 0xbd18 ++#define BCM_DNGL_BL_PID_4330 0xbd19 ++#define BCM_DNGL_BL_PID_4334 0xbd1a ++#define BCM_DNGL_BL_PID_43239 0xbd1b ++#define BCM_DNGL_BL_PID_4324 0xbd1c ++#define BCM_DNGL_BL_PID_4360 0xbd1d ++#define BCM_DNGL_BL_PID_4335 0xbd20 ++ ++#define BCM_DNGL_BDC_PID 0x0bdc ++#define BCM_DNGL_JTAG_PID 0x4a44 ++ ++/* HW USB BLOCK [CPULESS USB] PIDs */ ++#define BCM_HWUSB_PID_43239 43239 ++ ++/* PCI Device IDs */ ++#define BCM4210_DEVICE_ID 0x1072 /* never used */ ++#define BCM4230_DEVICE_ID 0x1086 /* never used */ ++#define BCM4401_ENET_ID 0x170c /* 4401b0 production enet cards */ ++#define BCM3352_DEVICE_ID 0x3352 /* bcm3352 device id */ ++#define BCM3360_DEVICE_ID 0x3360 /* bcm3360 device id */ ++#define BCM4211_DEVICE_ID 0x4211 ++#define BCM4231_DEVICE_ID 0x4231 ++#define BCM4303_D11B_ID 0x4303 /* 4303 802.11b */ ++#define BCM4311_D11G_ID 0x4311 /* 4311 802.11b/g id */ ++#define BCM4311_D11DUAL_ID 0x4312 /* 4311 802.11a/b/g id */ ++#define BCM4311_D11A_ID 0x4313 /* 4311 802.11a id */ ++#define BCM4328_D11DUAL_ID 0x4314 /* 4328/4312 802.11a/g id */ ++#define BCM4328_D11G_ID 0x4315 /* 4328/4312 802.11g id */ ++#define BCM4328_D11A_ID 0x4316 /* 4328/4312 802.11a id */ ++#define BCM4318_D11G_ID 0x4318 /* 4318 802.11b/g id */ ++#define BCM4318_D11DUAL_ID 0x4319 /* 4318 802.11a/b/g id */ ++#define BCM4318_D11A_ID 0x431a /* 4318 802.11a id */ ++#define BCM4325_D11DUAL_ID 0x431b /* 4325 802.11a/g id */ ++#define BCM4325_D11G_ID 0x431c /* 4325 802.11g id */ ++#define BCM4325_D11A_ID 0x431d /* 4325 802.11a id */ ++#define BCM4306_D11G_ID 0x4320 /* 4306 802.11g */ ++#define BCM4306_D11A_ID 0x4321 /* 4306 802.11a */ ++#define BCM4306_UART_ID 0x4322 /* 4306 uart */ ++#define BCM4306_V90_ID 0x4323 /* 4306 v90 codec */ ++#define BCM4306_D11DUAL_ID 0x4324 /* 4306 dual A+B */ ++#define BCM4306_D11G_ID2 0x4325 /* BCM4306_D11G_ID; INF w/loose binding war */ ++#define BCM4321_D11N_ID 0x4328 /* 4321 802.11n dualband id */ ++#define BCM4321_D11N2G_ID 0x4329 /* 4321 802.11n 2.4Ghz band id */ ++#define BCM4321_D11N5G_ID 0x432a /* 4321 802.11n 5Ghz band id */ ++#define BCM4322_D11N_ID 0x432b /* 4322 802.11n dualband device */ ++#define BCM4322_D11N2G_ID 0x432c /* 4322 802.11n 2.4GHz device */ ++#define BCM4322_D11N5G_ID 0x432d /* 4322 802.11n 5GHz device */ ++#define BCM4329_D11N_ID 0x432e /* 4329 802.11n dualband device */ ++#define BCM4329_D11N2G_ID 0x432f /* 4329 802.11n 2.4G device */ ++#define BCM4329_D11N5G_ID 0x4330 /* 4329 802.11n 5G device */ ++#define BCM4315_D11DUAL_ID 0x4334 /* 4315 802.11a/g id */ ++#define BCM4315_D11G_ID 0x4335 /* 4315 802.11g id */ ++#define BCM4315_D11A_ID 0x4336 /* 4315 802.11a id */ ++#define BCM4319_D11N_ID 0x4337 /* 4319 802.11n dualband device */ ++#define BCM4319_D11N2G_ID 0x4338 /* 4319 802.11n 2.4G device */ ++#define BCM4319_D11N5G_ID 0x4339 /* 4319 802.11n 5G device */ ++#define BCM43231_D11N2G_ID 0x4340 /* 43231 802.11n 2.4GHz device */ ++#define BCM43221_D11N2G_ID 0x4341 /* 43221 802.11n 2.4GHz device */ ++#define BCM43222_D11N_ID 0x4350 /* 43222 802.11n dualband device */ ++#define BCM43222_D11N2G_ID 0x4351 /* 43222 802.11n 2.4GHz device */ ++#define BCM43222_D11N5G_ID 0x4352 /* 43222 802.11n 5GHz device */ ++#define BCM43224_D11N_ID 0x4353 /* 43224 802.11n dualband device */ ++#define BCM43224_D11N_ID_VEN1 0x0576 /* Vendor specific 43224 802.11n db device */ ++#define BCM43226_D11N_ID 0x4354 /* 43226 802.11n dualband device */ ++#define BCM43236_D11N_ID 0x4346 /* 43236 802.11n dualband device */ ++#define BCM43236_D11N2G_ID 0x4347 /* 43236 802.11n 2.4GHz device */ ++#define BCM43236_D11N5G_ID 0x4348 /* 43236 802.11n 5GHz device */ ++#define BCM43225_D11N2G_ID 0x4357 /* 43225 802.11n 2.4GHz device */ ++#define BCM43421_D11N_ID 0xA99D /* 43421 802.11n dualband device */ ++#define BCM4313_D11N2G_ID 0x4727 /* 4313 802.11n 2.4G device */ ++#define BCM4330_D11N_ID 0x4360 /* 4330 802.11n dualband device */ ++#define BCM4330_D11N2G_ID 0x4361 /* 4330 802.11n 2.4G device */ ++#define BCM4330_D11N5G_ID 0x4362 /* 4330 802.11n 5G device */ ++#define BCM4336_D11N_ID 0x4343 /* 4336 802.11n 2.4GHz device */ ++#define BCM6362_D11N_ID 0x435f /* 6362 802.11n dualband device */ ++#define BCM4331_D11N_ID 0x4331 /* 4331 802.11n dualband id */ ++#define BCM4331_D11N2G_ID 0x4332 /* 4331 802.11n 2.4Ghz band id */ ++#define BCM4331_D11N5G_ID 0x4333 /* 4331 802.11n 5Ghz band id */ ++#define BCM43237_D11N_ID 0x4355 /* 43237 802.11n dualband device */ ++#define BCM43237_D11N5G_ID 0x4356 /* 43237 802.11n 5GHz device */ ++#define BCM43227_D11N2G_ID 0x4358 /* 43228 802.11n 2.4GHz device */ ++#define BCM43228_D11N_ID 0x4359 /* 43228 802.11n DualBand device */ ++#define BCM43228_D11N5G_ID 0x435a /* 43228 802.11n 5GHz device */ ++#define BCM43362_D11N_ID 0x4363 /* 43362 802.11n 2.4GHz device */ ++#define BCM43239_D11N_ID 0x4370 /* 43239 802.11n dualband device */ ++#define BCM4324_D11N_ID 0x4374 /* 4324 802.11n dualband device */ ++#define BCM43217_D11N2G_ID 0x43a9 /* 43217 802.11n 2.4GHz device */ ++#define BCM43131_D11N2G_ID 0x43aa /* 43131 802.11n 2.4GHz device */ ++#define BCM4314_D11N2G_ID 0x4364 /* 4314 802.11n 2.4G device */ ++#define BCM43142_D11N2G_ID 0x4365 /* 43142 802.11n 2.4G device */ ++#define BCM4334_D11N_ID 0x4380 /* 4334 802.11n dualband device */ ++#define BCM4334_D11N2G_ID 0x4381 /* 4334 802.11n 2.4G device */ ++#define BCM4334_D11N5G_ID 0x4382 /* 4334 802.11n 5G device */ ++#define BCM4360_D11AC_ID 0x43a0 ++#define BCM4360_D11AC2G_ID 0x43a1 ++#define BCM4360_D11AC5G_ID 0x43a2 ++#define BCM4335_D11AC_ID 0x43ae ++#define BCM4335_D11AC2G_ID 0x43af ++#define BCM4335_D11AC5G_ID 0x43b0 ++#define BCM4352_D11AC_ID 0x43b1 /* 4352 802.11ac dualband device */ ++#define BCM4352_D11AC2G_ID 0x43b2 /* 4352 802.11ac 2.4G device */ ++#define BCM4352_D11AC5G_ID 0x43b3 /* 4352 802.11ac 5G device */ ++ ++/* PCI Subsystem ID */ ++#define BCM943228HMB_SSID_VEN1 0x0607 ++#define BCM94313HMGBL_SSID_VEN1 0x0608 ++#define BCM94313HMG_SSID_VEN1 0x0609 ++ ++ ++#define BCMGPRS_UART_ID 0x4333 /* Uart id used by 4306/gprs card */ ++#define BCMGPRS2_UART_ID 0x4344 /* Uart id used by 4306/gprs card */ ++#define FPGA_JTAGM_ID 0x43f0 /* FPGA jtagm device id */ ++#define BCM_JTAGM_ID 0x43f1 /* BCM jtagm device id */ ++#define SDIOH_FPGA_ID 0x43f2 /* sdio host fpga */ ++#define BCM_SDIOH_ID 0x43f3 /* BCM sdio host id */ ++#define SDIOD_FPGA_ID 0x43f4 /* sdio device fpga */ ++#define SPIH_FPGA_ID 0x43f5 /* PCI SPI Host Controller FPGA */ ++#define BCM_SPIH_ID 0x43f6 /* Synopsis SPI Host Controller */ ++#define MIMO_FPGA_ID 0x43f8 /* FPGA mimo minimacphy device id */ ++#define BCM_JTAGM2_ID 0x43f9 /* BCM alternate jtagm device id */ ++#define SDHCI_FPGA_ID 0x43fa /* Standard SDIO Host Controller FPGA */ ++#define BCM4402_ENET_ID 0x4402 /* 4402 enet */ ++#define BCM4402_V90_ID 0x4403 /* 4402 v90 codec */ ++#define BCM4410_DEVICE_ID 0x4410 /* bcm44xx family pci iline */ ++#define BCM4412_DEVICE_ID 0x4412 /* bcm44xx family pci enet */ ++#define BCM4430_DEVICE_ID 0x4430 /* bcm44xx family cardbus iline */ ++#define BCM4432_DEVICE_ID 0x4432 /* bcm44xx family cardbus enet */ ++#define BCM4704_ENET_ID 0x4706 /* 4704 enet (Use 47XX_ENET_ID instead!) */ ++#define BCM4710_DEVICE_ID 0x4710 /* 4710 primary function 0 */ ++#define BCM47XX_AUDIO_ID 0x4711 /* 47xx audio codec */ ++#define BCM47XX_V90_ID 0x4712 /* 47xx v90 codec */ ++#define BCM47XX_ENET_ID 0x4713 /* 47xx enet */ ++#define BCM47XX_EXT_ID 0x4714 /* 47xx external i/f */ ++#define BCM47XX_GMAC_ID 0x4715 /* 47xx Unimac based GbE */ ++#define BCM47XX_USBH_ID 0x4716 /* 47xx usb host */ ++#define BCM47XX_USBD_ID 0x4717 /* 47xx usb device */ ++#define BCM47XX_IPSEC_ID 0x4718 /* 47xx ipsec */ ++#define BCM47XX_ROBO_ID 0x4719 /* 47xx/53xx roboswitch core */ ++#define BCM47XX_USB20H_ID 0x471a /* 47xx usb 2.0 host */ ++#define BCM47XX_USB20D_ID 0x471b /* 47xx usb 2.0 device */ ++#define BCM47XX_ATA100_ID 0x471d /* 47xx parallel ATA */ ++#define BCM47XX_SATAXOR_ID 0x471e /* 47xx serial ATA & XOR DMA */ ++#define BCM47XX_GIGETH_ID 0x471f /* 47xx GbE (5700) */ ++#define BCM4712_MIPS_ID 0x4720 /* 4712 base devid */ ++#define BCM4716_DEVICE_ID 0x4722 /* 4716 base devid */ ++#define BCM47XX_SMBUS_EMU_ID 0x47fe /* 47xx emulated SMBus device */ ++#define BCM47XX_XOR_EMU_ID 0x47ff /* 47xx emulated XOR engine */ ++#define EPI41210_DEVICE_ID 0xa0fa /* bcm4210 */ ++#define EPI41230_DEVICE_ID 0xa10e /* bcm4230 */ ++#define JINVANI_SDIOH_ID 0x4743 /* Jinvani SDIO Gold Host */ ++#define BCM27XX_SDIOH_ID 0x2702 /* BCM27xx Standard SDIO Host */ ++#define PCIXX21_FLASHMEDIA_ID 0x803b /* TI PCI xx21 Standard Host Controller */ ++#define PCIXX21_SDIOH_ID 0x803c /* TI PCI xx21 Standard Host Controller */ ++#define R5C822_SDIOH_ID 0x0822 /* Ricoh Co Ltd R5C822 SD/SDIO/MMC/MS/MSPro Host */ ++#define JMICRON_SDIOH_ID 0x2381 /* JMicron Standard SDIO Host Controller */ ++ ++/* Chip IDs */ ++#define BCM4306_CHIP_ID 0x4306 /* 4306 chipcommon chipid */ ++#define BCM4311_CHIP_ID 0x4311 /* 4311 PCIe 802.11a/b/g */ ++#define BCM43111_CHIP_ID 43111 /* 43111 chipcommon chipid (OTP chipid) */ ++#define BCM43112_CHIP_ID 43112 /* 43112 chipcommon chipid (OTP chipid) */ ++#define BCM4312_CHIP_ID 0x4312 /* 4312 chipcommon chipid */ ++#define BCM4313_CHIP_ID 0x4313 /* 4313 chip id */ ++#define BCM43131_CHIP_ID 43131 /* 43131 chip id (OTP chipid) */ ++#define BCM4315_CHIP_ID 0x4315 /* 4315 chip id */ ++#define BCM4318_CHIP_ID 0x4318 /* 4318 chipcommon chipid */ ++#define BCM4319_CHIP_ID 0x4319 /* 4319 chip id */ ++#define BCM4320_CHIP_ID 0x4320 /* 4320 chipcommon chipid */ ++#define BCM4321_CHIP_ID 0x4321 /* 4321 chipcommon chipid */ ++#define BCM43217_CHIP_ID 43217 /* 43217 chip id (OTP chipid) */ ++#define BCM4322_CHIP_ID 0x4322 /* 4322 chipcommon chipid */ ++#define BCM43221_CHIP_ID 43221 /* 43221 chipcommon chipid (OTP chipid) */ ++#define BCM43222_CHIP_ID 43222 /* 43222 chipcommon chipid */ ++#define BCM43224_CHIP_ID 43224 /* 43224 chipcommon chipid */ ++#define BCM43225_CHIP_ID 43225 /* 43225 chipcommon chipid */ ++#define BCM43227_CHIP_ID 43227 /* 43227 chipcommon chipid */ ++#define BCM43228_CHIP_ID 43228 /* 43228 chipcommon chipid */ ++#define BCM43226_CHIP_ID 43226 /* 43226 chipcommon chipid */ ++#define BCM43231_CHIP_ID 43231 /* 43231 chipcommon chipid (OTP chipid) */ ++#define BCM43234_CHIP_ID 43234 /* 43234 chipcommon chipid */ ++#define BCM43235_CHIP_ID 43235 /* 43235 chipcommon chipid */ ++#define BCM43236_CHIP_ID 43236 /* 43236 chipcommon chipid */ ++#define BCM43237_CHIP_ID 43237 /* 43237 chipcommon chipid */ ++#define BCM43238_CHIP_ID 43238 /* 43238 chipcommon chipid */ ++#define BCM43239_CHIP_ID 43239 /* 43239 chipcommon chipid */ ++#define BCM43420_CHIP_ID 43420 /* 43222 chipcommon chipid (OTP, RBBU) */ ++#define BCM43421_CHIP_ID 43421 /* 43224 chipcommon chipid (OTP, RBBU) */ ++#define BCM43428_CHIP_ID 43428 /* 43228 chipcommon chipid (OTP, RBBU) */ ++#define BCM43431_CHIP_ID 43431 /* 4331 chipcommon chipid (OTP, RBBU) */ ++#define BCM43460_CHIP_ID 43460 /* 4360 chipcommon chipid (OTP, RBBU) */ ++#define BCM4325_CHIP_ID 0x4325 /* 4325 chip id */ ++#define BCM4328_CHIP_ID 0x4328 /* 4328 chip id */ ++#define BCM4329_CHIP_ID 0x4329 /* 4329 chipcommon chipid */ ++#define BCM4331_CHIP_ID 0x4331 /* 4331 chipcommon chipid */ ++#define BCM4336_CHIP_ID 0x4336 /* 4336 chipcommon chipid */ ++#define BCM43362_CHIP_ID 43362 /* 43362 chipcommon chipid */ ++#define BCM4330_CHIP_ID 0x4330 /* 4330 chipcommon chipid */ ++#define BCM6362_CHIP_ID 0x6362 /* 6362 chipcommon chipid */ ++#define BCM4314_CHIP_ID 0x4314 /* 4314 chipcommon chipid */ ++#define BCM43142_CHIP_ID 43142 /* 43142 chipcommon chipid */ ++#define BCM4324_CHIP_ID 0x4324 /* 4324 chipcommon chipid */ ++#define BCM43242_CHIP_ID 43242 /* 43242 chipcommon chipid */ ++#define BCM4334_CHIP_ID 0x4334 /* 4334 chipcommon chipid */ ++#define BCM4335_CHIP_ID 0x4335 ++#define BCM4360_CHIP_ID 0x4360 ++#define BCM43526_CHIP_ID 0xAA06 ++#define BCM4352_CHIP_ID 0x4352 ++ ++#define BCM4342_CHIP_ID 4342 /* 4342 chipcommon chipid (OTP, RBBU) */ ++#define BCM4402_CHIP_ID 0x4402 /* 4402 chipid */ ++#define BCM4704_CHIP_ID 0x4704 /* 4704 chipcommon chipid */ ++#define BCM4706_CHIP_ID 0x5300 /* 4706 chipcommon chipid */ ++#define BCM4710_CHIP_ID 0x4710 /* 4710 chipid */ ++#define BCM4712_CHIP_ID 0x4712 /* 4712 chipcommon chipid */ ++#define BCM4716_CHIP_ID 0x4716 /* 4716 chipcommon chipid */ ++#define BCM47162_CHIP_ID 47162 /* 47162 chipcommon chipid */ ++#define BCM4748_CHIP_ID 0x4748 /* 4716 chipcommon chipid (OTP, RBBU) */ ++#define BCM4749_CHIP_ID 0x4749 /* 5357 chipcommon chipid (OTP, RBBU) */ ++#define BCM4785_CHIP_ID 0x4785 /* 4785 chipcommon chipid */ ++#define BCM5350_CHIP_ID 0x5350 /* 5350 chipcommon chipid */ ++#define BCM5352_CHIP_ID 0x5352 /* 5352 chipcommon chipid */ ++#define BCM5354_CHIP_ID 0x5354 /* 5354 chipcommon chipid */ ++#define BCM5365_CHIP_ID 0x5365 /* 5365 chipcommon chipid */ ++#define BCM5356_CHIP_ID 0x5356 /* 5356 chipcommon chipid */ ++#define BCM5357_CHIP_ID 0x5357 /* 5357 chipcommon chipid */ ++#define BCM53572_CHIP_ID 53572 /* 53572 chipcommon chipid */ ++#define BCM53010_CHIP_ID 53010 /* NS chipcommon chipid */ ++#define BCM56150_CHIP_ID 56150 /* HR2 chipcommon chipid */ ++#define BCM56340_CHIP_ID 56340 /* HX4 chipcommon chipid */ ++#define BCM53020_CHIP_ID 53020 /* NSP chipcommon chipid */ ++#define BCM56450_CHIP_ID 56450 /* KT2 chipcommon chipid */ ++#define BCM54016_CHIP_ID 54016 /* CYGNUS chipcommon chipid */ ++#define BCM53400_CHIP_ID 0x8416 /* GH chipcommon chipid */ ++#define BCM56260_CHIP_ID 0xb260 /* SB2 chipcommon chipid */ ++#define BCM56160_CHIP_ID 0xb160 /* HR3 chipcommon chipid */ ++#define BCM56170_CHIP_ID 0xb170 /* GH2 chipcommon chipid */ ++#define BCM53540_CHIP_ID 0x8540 /* WF2 chipcommon chipid */ ++ ++#if defined(CONFIG_MACH_HX4) ++#define BCMIPROC_CHIP_ID BCM56340_CHIP_ID ++#elif defined(CONFIG_MACH_HR2) ++#define BCMIPROC_CHIP_ID BCM56150_CHIP_ID ++#elif defined(CONFIG_MACH_KT2) ++#define BCMIPROC_CHIP_ID BCM56450_CHIP_ID ++#elif defined(CONFIG_MACH_GH) ++#define BCMIPROC_CHIP_ID BCM53400_CHIP_ID ++#elif defined(CONFIG_MACH_SB2) ++#define BCMIPROC_CHIP_ID BCM56260_CHIP_ID ++#elif defined(CONFIG_MACH_HR3) ++ ++#if defined(CONFIG_MACH_WH2) ++#define BCMIPROC_CHIP_ID BCM53540_CHIP_ID ++#else ++#define BCMIPROC_CHIP_ID BCM56160_CHIP_ID ++#endif ++ ++#elif defined(CONFIG_MACH_GH2) ++#define BCMIPROC_CHIP_ID BCM56170_CHIP_ID ++#endif ++ ++/* Package IDs */ ++#define BCM4303_PKG_ID 2 /* 4303 package id */ ++#define BCM4309_PKG_ID 1 /* 4309 package id */ ++#define BCM4712LARGE_PKG_ID 0 /* 340pin 4712 package id */ ++#define BCM4712SMALL_PKG_ID 1 /* 200pin 4712 package id */ ++#define BCM4712MID_PKG_ID 2 /* 225pin 4712 package id */ ++#define BCM4328USBD11G_PKG_ID 2 /* 4328 802.11g USB package id */ ++#define BCM4328USBDUAL_PKG_ID 3 /* 4328 802.11a/g USB package id */ ++#define BCM4328SDIOD11G_PKG_ID 4 /* 4328 802.11g SDIO package id */ ++#define BCM4328SDIODUAL_PKG_ID 5 /* 4328 802.11a/g SDIO package id */ ++#define BCM4329_289PIN_PKG_ID 0 /* 4329 289-pin package id */ ++#define BCM4329_182PIN_PKG_ID 1 /* 4329N 182-pin package id */ ++#define BCM5354E_PKG_ID 1 /* 5354E package id */ ++#define BCM4716_PKG_ID 8 /* 4716 package id */ ++#define BCM4717_PKG_ID 9 /* 4717 package id */ ++#define BCM4718_PKG_ID 10 /* 4718 package id */ ++#define BCM5356_PKG_NONMODE 1 /* 5356 package without nmode suppport */ ++#define BCM5358U_PKG_ID 8 /* 5358U package id */ ++#define BCM5358_PKG_ID 9 /* 5358 package id */ ++#define BCM47186_PKG_ID 10 /* 47186 package id */ ++#define BCM5357_PKG_ID 11 /* 5357 package id */ ++#define BCM5356U_PKG_ID 12 /* 5356U package id */ ++#define BCM53572_PKG_ID 8 /* 53572 package id */ ++#define BCM5357C0_PKG_ID 8 /* 5357c0 package id (the same as 53572) */ ++#define BCM47188_PKG_ID 9 /* 47188 package id */ ++#define BCM5358C0_PKG_ID 0xa /* 5358c0 package id */ ++#define BCM5356C0_PKG_ID 0xb /* 5356c0 package id */ ++#define BCM4331TT_PKG_ID 8 /* 4331 12x12 package id */ ++#define BCM4331TN_PKG_ID 9 /* 4331 12x9 package id */ ++#define BCM4331TNA0_PKG_ID 0xb /* 4331 12x9 package id */ ++#define BCM4706L_PKG_ID 1 /* 4706L package id */ ++ ++#define HDLSIM5350_PKG_ID 1 /* HDL simulator package id for a 5350 */ ++#define HDLSIM_PKG_ID 14 /* HDL simulator package id */ ++#define HWSIM_PKG_ID 15 /* Hardware simulator package id */ ++#define BCM43224_FAB_CSM 0x8 /* the chip is manufactured by CSM */ ++#define BCM43224_FAB_SMIC 0xa /* the chip is manufactured by SMIC */ ++#define BCM4336_WLBGA_PKG_ID 0x8 ++#define BCM4330_WLBGA_PKG_ID 0x0 ++#define BCM4314PCIE_ARM_PKG_ID (8 | 0) /* 4314 QFN PCI package id, bit 3 tie high */ ++#define BCM4314SDIO_PKG_ID (8 | 1) /* 4314 QFN SDIO package id */ ++#define BCM4314PCIE_PKG_ID (8 | 2) /* 4314 QFN PCI (ARM-less) package id */ ++#define BCM4314SDIO_ARM_PKG_ID (8 | 3) /* 4314 QFN SDIO (ARM-less) package id */ ++#define BCM4314SDIO_FPBGA_PKG_ID (8 | 4) /* 4314 FpBGA SDIO package id */ ++#define BCM4314DEV_PKG_ID (8 | 6) /* 4314 Developement package id */ ++ ++#define BCM4707_PKG_ID 1 /* 4707 package id */ ++#define BCM4708_PKG_ID 2 /* 4708 package id */ ++#define BCM4709_PKG_ID 0 /* 4709 package id */ ++ ++#define PCIXX21_FLASHMEDIA0_ID 0x8033 /* TI PCI xx21 Standard Host Controller */ ++#define PCIXX21_SDIOH0_ID 0x8034 /* TI PCI xx21 Standard Host Controller */ ++ ++#define BCM4335_WLCSP_PKG_ID (0x0) /* WLCSP Module/Mobile SDIO/HSIC. */ ++#define BCM4335_FCBGA_PKG_ID (0x1) /* FCBGA PC/Embeded/Media PCIE/SDIO */ ++#define BCM4335_WLBGA_PKG_ID (0x2) /* WLBGA COB/Mobile SDIO/HSIC. */ ++#define BCM4335_FCBGAD_PKG_ID (0x3) /* FCBGA Debug Debug/Dev All if's. */ ++#define BCM4335_PKG_MASK (0x3) ++ ++/* boardflags */ ++#define BFL_BTC2WIRE 0x00000001 /* old 2wire Bluetooth coexistence, OBSOLETE */ ++#define BFL_BTCOEX 0x00000001 /* Board supports BTCOEX */ ++#define BFL_PACTRL 0x00000002 /* Board has gpio 9 controlling the PA */ ++#define BFL_AIRLINEMODE 0x00000004 /* Board implements gpio 13 radio disable indication, UNUSED */ ++#define BFL_ADCDIV 0x00000008 /* Board has the rssi ADC divider */ ++#define BFL_ENETROBO 0x00000010 /* Board has robo switch or core */ ++#define BFL_NOPLLDOWN 0x00000020 /* Not ok to power down the chip pll and oscillator */ ++#define BFL_CCKHIPWR 0x00000040 /* Can do high-power CCK transmission */ ++#define BFL_ENETADM 0x00000080 /* Board has ADMtek switch */ ++#define BFL_ENETVLAN 0x00000100 /* Board has VLAN capability */ ++#define BFL_UNUSED 0x00000200 ++#define BFL_NOPCI 0x00000400 /* Board leaves PCI floating */ ++#define BFL_FEM 0x00000800 /* Board supports the Front End Module */ ++#define BFL_EXTLNA 0x00001000 /* Board has an external LNA in 2.4GHz band */ ++#define BFL_HGPA 0x00002000 /* Board has a high gain PA */ ++#define BFL_BTC2WIRE_ALTGPIO 0x00004000 /* Board's BTC 2wire is in the alternate gpios */ ++#define BFL_ALTIQ 0x00008000 /* Alternate I/Q settings */ ++#define BFL_NOPA 0x00010000 /* Board has no PA */ ++#define BFL_RSSIINV 0x00020000 /* Board's RSSI uses positive slope(not TSSI) */ ++#define BFL_PAREF 0x00040000 /* Board uses the PARef LDO */ ++#define BFL_3TSWITCH 0x00080000 /* Board uses a triple throw switch shared with BT */ ++#define BFL_PHASESHIFT 0x00100000 /* Board can support phase shifter */ ++#define BFL_BUCKBOOST 0x00200000 /* Power topology uses BUCKBOOST */ ++#define BFL_FEM_BT 0x00400000 /* Board has FEM and switch to share antenna w/ BT */ ++#define BFL_NOCBUCK 0x00800000 /* Power topology doesn't use CBUCK */ ++#define BFL_CCKFAVOREVM 0x01000000 /* Favor CCK EVM over spectral mask */ ++#define BFL_PALDO 0x02000000 /* Power topology uses PALDO */ ++#define BFL_LNLDO2_2P5 0x04000000 /* Select 2.5V as LNLDO2 output voltage */ ++#define BFL_FASTPWR 0x08000000 ++#define BFL_UCPWRCTL_MININDX 0x08000000 /* Enforce min power index to avoid FEM damage */ ++#define BFL_EXTLNA_5GHz 0x10000000 /* Board has an external LNA in 5GHz band */ ++#define BFL_TRSW_1by2 0x20000000 /* Board has 2 TRSW's in 1by2 designs */ ++#define BFL_LO_TRSW_R_5GHz 0x40000000 /* In 5G do not throw TRSW to T for clipLO gain */ ++#define BFL_ELNA_GAINDEF 0x80000000 /* Backoff InitGain based on elna_2g/5g field ++ * when this flag is set ++ */ ++#define BFL_EXTLNA_TX 0x20000000 /* Temp boardflag to indicate to */ ++ ++/* boardflags2 */ ++#define BFL2_RXBB_INT_REG_DIS 0x00000001 /* Board has an external rxbb regulator */ ++#define BFL2_APLL_WAR 0x00000002 /* Flag to implement alternative A-band PLL settings */ ++#define BFL2_TXPWRCTRL_EN 0x00000004 /* Board permits enabling TX Power Control */ ++#define BFL2_2X4_DIV 0x00000008 /* Board supports the 2X4 diversity switch */ ++#define BFL2_5G_PWRGAIN 0x00000010 /* Board supports 5G band power gain */ ++#define BFL2_PCIEWAR_OVR 0x00000020 /* Board overrides ASPM and Clkreq settings */ ++#define BFL2_CAESERS_BRD 0x00000040 /* Board is Caesers brd (unused by sw) */ ++#define BFL2_BTC3WIRE 0x00000080 /* Board support legacy 3 wire or 4 wire */ ++#define BFL2_BTCLEGACY 0x00000080 /* Board support legacy 3/4 wire, to replace ++ * BFL2_BTC3WIRE ++ */ ++#define BFL2_SKWRKFEM_BRD 0x00000100 /* 4321mcm93 board uses Skyworks FEM */ ++#define BFL2_SPUR_WAR 0x00000200 /* Board has a WAR for clock-harmonic spurs */ ++#define BFL2_GPLL_WAR 0x00000400 /* Flag to narrow G-band PLL loop b/w */ ++#define BFL2_TRISTATE_LED 0x00000800 /* Tri-state the LED */ ++#define BFL2_SINGLEANT_CCK 0x00001000 /* Tx CCK pkts on Ant 0 only */ ++#define BFL2_2G_SPUR_WAR 0x00002000 /* WAR to reduce and avoid clock-harmonic spurs in 2G */ ++#define BFL2_BPHY_ALL_TXCORES 0x00004000 /* Transmit bphy frames using all tx cores */ ++#define BFL2_FCC_BANDEDGE_WAR 0x00008000 /* Activates WAR to improve FCC bandedge performance */ ++#define BFL2_GPLL_WAR2 0x00010000 /* Flag to widen G-band PLL loop b/w */ ++#define BFL2_IPALVLSHIFT_3P3 0x00020000 ++#define BFL2_INTERNDET_TXIQCAL 0x00040000 /* Use internal envelope detector for TX IQCAL */ ++#define BFL2_XTALBUFOUTEN 0x00080000 /* Keep the buffered Xtal output from radio on */ ++ /* Most drivers will turn it off without this flag */ ++ /* to save power. */ ++ ++#define BFL2_ANAPACTRL_2G 0x00100000 /* 2G ext PAs are controlled by analog PA ctrl lines */ ++#define BFL2_ANAPACTRL_5G 0x00200000 /* 5G ext PAs are controlled by analog PA ctrl lines */ ++#define BFL2_ELNACTRL_TRSW_2G 0x00400000 /* AZW4329: 2G gmode_elna_gain controls TR Switch */ ++#define BFL2_BT_SHARE_ANT0 0x00800000 /* share core0 antenna with BT */ ++#define BFL2_TEMPSENSE_HIGHER 0x01000000 /* The tempsense threshold can sustain higher value ++ * than programmed. The exact delta is decided by ++ * driver per chip/boardtype. This can be used ++ * when tempsense qualification happens after shipment ++ */ ++#define BFL2_BTC3WIREONLY 0x02000000 /* standard 3 wire btc only. 4 wire not supported */ ++#define BFL2_PWR_NOMINAL 0x04000000 /* 0: power reduction on, 1: no power reduction */ ++#define BFL2_EXTLNA_PWRSAVE 0x08000000 /* boardflag to enable ucode to apply power save */ ++ /* ucode control of eLNA during Tx */ ++#define BFL2_4313_RADIOREG 0x10000000 ++ /* board rework */ ++#define BFL2_SDR_EN 0x20000000 /* SDR enabled or disabled */ ++ ++/* board specific GPIO assignment, gpio 0-3 are also customer-configurable led */ ++#define BOARD_GPIO_BTC3W_IN 0x850 /* bit 4 is RF_ACTIVE, bit 6 is STATUS, bit 11 is PRI */ ++#define BOARD_GPIO_BTC3W_OUT 0x020 /* bit 5 is TX_CONF */ ++#define BOARD_GPIO_BTCMOD_IN 0x010 /* bit 4 is the alternate BT Coexistence Input */ ++#define BOARD_GPIO_BTCMOD_OUT 0x020 /* bit 5 is the alternate BT Coexistence Out */ ++#define BOARD_GPIO_BTC_IN 0x080 /* bit 7 is BT Coexistence Input */ ++#define BOARD_GPIO_BTC_OUT 0x100 /* bit 8 is BT Coexistence Out */ ++#define BOARD_GPIO_PACTRL 0x200 /* bit 9 controls the PA on new 4306 boards */ ++#define BOARD_GPIO_12 0x1000 /* gpio 12 */ ++#define BOARD_GPIO_13 0x2000 /* gpio 13 */ ++#define BOARD_GPIO_BTC4_IN 0x0800 /* gpio 11, coex4, in */ ++#define BOARD_GPIO_BTC4_BT 0x2000 /* gpio 12, coex4, bt active */ ++#define BOARD_GPIO_BTC4_STAT 0x4000 /* gpio 14, coex4, status */ ++#define BOARD_GPIO_BTC4_WLAN 0x8000 /* gpio 15, coex4, wlan active */ ++#define BOARD_GPIO_1_WLAN_PWR 0x02 /* throttle WLAN power on X21 board */ ++#define BOARD_GPIO_3_WLAN_PWR 0x08 /* throttle WLAN power on X28 board */ ++#define BOARD_GPIO_4_WLAN_PWR 0x10 /* throttle WLAN power on X19 board */ ++ ++#define GPIO_BTC4W_OUT_4312 0x010 /* bit 4 is BT_IODISABLE */ ++#define GPIO_BTC4W_OUT_43224 0x020 /* bit 5 is BT_IODISABLE */ ++#define GPIO_BTC4W_OUT_43224_SHARED 0x0e0 /* bit 5 is BT_IODISABLE */ ++#define GPIO_BTC4W_OUT_43225 0x0e0 /* bit 5 BT_IODISABLE, bit 6 SW_BT, bit 7 SW_WL */ ++#define GPIO_BTC4W_OUT_43421 0x020 /* bit 5 is BT_IODISABLE */ ++#define GPIO_BTC4W_OUT_4313 0x060 /* bit 5 SW_BT, bit 6 SW_WL */ ++#define GPIO_BTC4W_OUT_4331_SHARED 0x010 /* GPIO 4 */ ++ ++#define PCI_CFG_GPIO_SCS 0x10 /* PCI config space bit 4 for 4306c0 slow clock source */ ++#define PCI_CFG_GPIO_HWRAD 0x20 /* PCI config space GPIO 13 for hw radio disable */ ++#define PCI_CFG_GPIO_XTAL 0x40 /* PCI config space GPIO 14 for Xtal power-up */ ++#define PCI_CFG_GPIO_PLL 0x80 /* PCI config space GPIO 15 for PLL power-down */ ++ ++/* power control defines */ ++#define PLL_DELAY 150 /* us pll on delay */ ++#define FREF_DELAY 200 /* us fref change delay */ ++#define MIN_SLOW_CLK 32 /* us Slow clock period */ ++#define XTAL_ON_DELAY 1000 /* us crystal power-on delay */ ++ ++#ifndef LINUX_POSTMOGRIFY_REMOVAL ++/* Reference Board Types */ ++#define BU4710_BOARD 0x0400 ++#define VSIM4710_BOARD 0x0401 ++#define QT4710_BOARD 0x0402 ++ ++#define BU4309_BOARD 0x040a ++#define BCM94309CB_BOARD 0x040b ++#define BCM94309MP_BOARD 0x040c ++#define BCM4309AP_BOARD 0x040d ++ ++#define BCM94302MP_BOARD 0x040e ++ ++#define BU4306_BOARD 0x0416 ++#define BCM94306CB_BOARD 0x0417 ++#define BCM94306MP_BOARD 0x0418 ++ ++#define BCM94710D_BOARD 0x041a ++#define BCM94710R1_BOARD 0x041b ++#define BCM94710R4_BOARD 0x041c ++#define BCM94710AP_BOARD 0x041d ++ ++#define BU2050_BOARD 0x041f ++ ++#define BCM94306P50_BOARD 0x0420 ++ ++#define BCM94309G_BOARD 0x0421 ++ ++#define BU4704_BOARD 0x0423 ++#define BU4702_BOARD 0x0424 ++ ++#define BCM94306PC_BOARD 0x0425 /* pcmcia 3.3v 4306 card */ ++ ++#define MPSG4306_BOARD 0x0427 ++ ++#define BCM94702MN_BOARD 0x0428 ++ ++/* BCM4702 1U CompactPCI Board */ ++#define BCM94702CPCI_BOARD 0x0429 ++ ++/* BCM4702 with BCM95380 VLAN Router */ ++#define BCM95380RR_BOARD 0x042a ++ ++/* cb4306 with SiGe PA */ ++#define BCM94306CBSG_BOARD 0x042b ++ ++/* cb4306 with SiGe PA */ ++#define PCSG94306_BOARD 0x042d ++ ++/* bu4704 with sdram */ ++#define BU4704SD_BOARD 0x042e ++ ++/* Dual 11a/11g Router */ ++#define BCM94704AGR_BOARD 0x042f ++ ++/* 11a-only minipci */ ++#define BCM94308MP_BOARD 0x0430 ++ ++/* 4306/gprs combo */ ++#define BCM94306GPRS_BOARD 0x0432 ++ ++/* BCM5365/BCM4704 FPGA Bringup Board */ ++#define BU5365_FPGA_BOARD 0x0433 ++ ++#define BU4712_BOARD 0x0444 ++#define BU4712SD_BOARD 0x045d ++#define BU4712L_BOARD 0x045f ++ ++/* BCM4712 boards */ ++#define BCM94712AP_BOARD 0x0445 ++#define BCM94712P_BOARD 0x0446 ++ ++/* BCM4318 boards */ ++#define BU4318_BOARD 0x0447 ++#define CB4318_BOARD 0x0448 ++#define MPG4318_BOARD 0x0449 ++#define MP4318_BOARD 0x044a ++#define SD4318_BOARD 0x044b ++ ++/* BCM4313 boards */ ++#define BCM94313BU_BOARD 0x050f ++#define BCM94313HM_BOARD 0x0510 ++#define BCM94313EPA_BOARD 0x0511 ++#define BCM94313HMG_BOARD 0x051C ++ ++/* BCM63XX boards */ ++#define BCM96338_BOARD 0x6338 ++#define BCM96348_BOARD 0x6348 ++#define BCM96358_BOARD 0x6358 ++#define BCM96368_BOARD 0x6368 ++ ++/* Another mp4306 with SiGe */ ++#define BCM94306P_BOARD 0x044c ++ ++/* mp4303 */ ++#define BCM94303MP_BOARD 0x044e ++ ++/* mpsgh4306 */ ++#define BCM94306MPSGH_BOARD 0x044f ++ ++/* BRCM 4306 w/ Front End Modules */ ++#define BCM94306MPM 0x0450 ++#define BCM94306MPL 0x0453 ++ ++/* 4712agr */ ++#define BCM94712AGR_BOARD 0x0451 ++ ++/* pcmcia 4303 */ ++#define PC4303_BOARD 0x0454 ++ ++/* 5350K */ ++#define BCM95350K_BOARD 0x0455 ++ ++/* 5350R */ ++#define BCM95350R_BOARD 0x0456 ++ ++/* 4306mplna */ ++#define BCM94306MPLNA_BOARD 0x0457 ++ ++/* 4320 boards */ ++#define BU4320_BOARD 0x0458 ++#define BU4320S_BOARD 0x0459 ++#define BCM94320PH_BOARD 0x045a ++ ++/* 4306mph */ ++#define BCM94306MPH_BOARD 0x045b ++ ++/* 4306pciv */ ++#define BCM94306PCIV_BOARD 0x045c ++ ++#define BU4712SD_BOARD 0x045d ++ ++#define BCM94320PFLSH_BOARD 0x045e ++ ++#define BU4712L_BOARD 0x045f ++#define BCM94712LGR_BOARD 0x0460 ++#define BCM94320R_BOARD 0x0461 ++ ++#define BU5352_BOARD 0x0462 ++ ++#define BCM94318MPGH_BOARD 0x0463 ++ ++#define BU4311_BOARD 0x0464 ++#define BCM94311MC_BOARD 0x0465 ++#define BCM94311MCAG_BOARD 0x0466 ++ ++#define BCM95352GR_BOARD 0x0467 ++ ++/* bcm95351agr */ ++#define BCM95351AGR_BOARD 0x0470 ++ ++/* bcm94704mpcb */ ++#define BCM94704MPCB_BOARD 0x0472 ++ ++/* 4785 boards */ ++#define BU4785_BOARD 0x0478 ++ ++/* 4321 boards */ ++#define BU4321_BOARD 0x046b ++#define BU4321E_BOARD 0x047c ++#define MP4321_BOARD 0x046c ++#define CB2_4321_BOARD 0x046d ++#define CB2_4321_AG_BOARD 0x0066 ++#define MC4321_BOARD 0x046e ++ ++/* 4328 boards */ ++#define BU4328_BOARD 0x0481 ++#define BCM4328SDG_BOARD 0x0482 ++#define BCM4328SDAG_BOARD 0x0483 ++#define BCM4328UG_BOARD 0x0484 ++#define BCM4328UAG_BOARD 0x0485 ++#define BCM4328PC_BOARD 0x0486 ++#define BCM4328CF_BOARD 0x0487 ++ ++/* 4325 boards */ ++#define BCM94325DEVBU_BOARD 0x0490 ++#define BCM94325BGABU_BOARD 0x0491 ++ ++#define BCM94325SDGWB_BOARD 0x0492 ++ ++#define BCM94325SDGMDL_BOARD 0x04aa ++#define BCM94325SDGMDL2_BOARD 0x04c6 ++#define BCM94325SDGMDL3_BOARD 0x04c9 ++ ++#define BCM94325SDABGWBA_BOARD 0x04e1 ++ ++/* 4322 boards */ ++#define BCM94322MC_SSID 0x04a4 ++#define BCM94322USB_SSID 0x04a8 /* dualband */ ++#define BCM94322HM_SSID 0x04b0 ++#define BCM94322USB2D_SSID 0x04bf /* single band discrete front end */ ++ ++/* 4312 boards */ ++#define BCM4312MCGSG_BOARD 0x04b5 ++ ++/* 4315 boards */ ++#define BCM94315DEVBU_SSID 0x04c2 ++#define BCM94315USBGP_SSID 0x04c7 ++#define BCM94315BGABU_SSID 0x04ca ++#define BCM94315USBGP41_SSID 0x04cb ++ ++/* 4319 boards */ ++#define BCM94319DEVBU_SSID 0X04e5 ++#define BCM94319USB_SSID 0X04e6 ++#define BCM94319SD_SSID 0X04e7 ++ ++/* 4716 boards */ ++#define BCM94716NR2_SSID 0x04cd ++ ++/* 4319 boards */ ++#define BCM94319DEVBU_SSID 0X04e5 ++#define BCM94319USBNP4L_SSID 0X04e6 ++#define BCM94319WLUSBN4L_SSID 0X04e7 ++#define BCM94319SDG_SSID 0X04ea ++#define BCM94319LCUSBSDN4L_SSID 0X04eb ++#define BCM94319USBB_SSID 0x04ee ++#define BCM94319LCSDN4L_SSID 0X0507 ++#define BCM94319LSUSBN4L_SSID 0X0508 ++#define BCM94319SDNA4L_SSID 0X0517 ++#define BCM94319SDELNA4L_SSID 0X0518 ++#define BCM94319SDELNA6L_SSID 0X0539 ++#define BCM94319ARCADYAN_SSID 0X0546 ++#define BCM94319WINDSOR_SSID 0x0561 ++#define BCM94319MLAP_SSID 0x0562 ++#define BCM94319SDNA_SSID 0x058b ++#define BCM94319BHEMU3_SSID 0x0563 ++#define BCM94319SDHMB_SSID 0x058c ++#define BCM94319SDBREF_SSID 0x05a1 ++#define BCM94319USBSDB_SSID 0x05a2 ++ ++ ++/* 4329 boards */ ++#define BCM94329AGB_SSID 0X04b9 ++#define BCM94329TDKMDL1_SSID 0X04ba ++#define BCM94329TDKMDL11_SSID 0X04fc ++#define BCM94329OLYMPICN18_SSID 0X04fd ++#define BCM94329OLYMPICN90_SSID 0X04fe ++#define BCM94329OLYMPICN90U_SSID 0X050c ++#define BCM94329OLYMPICN90M_SSID 0X050b ++#define BCM94329AGBF_SSID 0X04ff ++#define BCM94329OLYMPICX17_SSID 0X0504 ++#define BCM94329OLYMPICX17M_SSID 0X050a ++#define BCM94329OLYMPICX17U_SSID 0X0509 ++#define BCM94329OLYMPICUNO_SSID 0X0564 ++#define BCM94329MOTOROLA_SSID 0X0565 ++#define BCM94329OLYMPICLOCO_SSID 0X0568 ++/* 4336 SDIO board types */ ++#define BCM94336SD_WLBGABU_SSID 0x0511 ++#define BCM94336SD_WLBGAREF_SSID 0x0519 ++#define BCM94336SDGP_SSID 0x0538 ++#define BCM94336SDG_SSID 0x0519 ++#define BCM94336SDGN_SSID 0x0538 ++#define BCM94336SDGFC_SSID 0x056B ++ ++/* 4330 SDIO board types */ ++#define BCM94330SDG_SSID 0x0528 ++#define BCM94330SD_FCBGABU_SSID 0x052e ++#define BCM94330SD_WLBGABU_SSID 0x052f ++#define BCM94330SD_FCBGA_SSID 0x0530 ++#define BCM94330FCSDAGB_SSID 0x0532 ++#define BCM94330OLYMPICAMG_SSID 0x0549 ++#define BCM94330OLYMPICAMGEPA_SSID 0x054F ++#define BCM94330OLYMPICUNO3_SSID 0x0551 ++#define BCM94330WLSDAGB_SSID 0x0547 ++#define BCM94330CSPSDAGBB_SSID 0x054A ++ ++/* 43224 boards */ ++#define BCM943224X21 0x056e ++#define BCM943224X21_FCC 0x00d1 ++#define BCM943224X21B 0x00e9 ++#define BCM943224M93 0x008b ++#define BCM943224M93A 0x0090 ++#define BCM943224X16 0x0093 ++#define BCM94322X9 0x008d ++#define BCM94322M35e 0x008e ++ ++/* 43228 Boards */ ++#define BCM943228BU8_SSID 0x0540 ++#define BCM943228BU9_SSID 0x0541 ++#define BCM943228BU_SSID 0x0542 ++#define BCM943227HM4L_SSID 0x0543 ++#define BCM943227HMB_SSID 0x0544 ++#define BCM943228HM4L_SSID 0x0545 ++#define BCM943228SD_SSID 0x0573 ++ ++/* 43239 Boards */ ++#define BCM943239MOD_SSID 0x05ac ++#define BCM943239REF_SSID 0x05aa ++ ++/* 4331 boards */ ++#define BCM94331X19 0x00D6 /* X19B */ ++#define BCM94331X28 0x00E4 /* X28 */ ++#define BCM94331X28B 0x010E /* X28B */ ++#define BCM94331PCIEBT3Ax_SSID BCM94331X28 ++#define BCM94331X12_2G_SSID 0x00EC /* X12 2G */ ++#define BCM94331X12_5G_SSID 0x00ED /* X12 5G */ ++#define BCM94331X29B 0x00EF /* X29B */ ++#define BCM94331CSAX_SSID BCM94331X29B ++#define BCM94331X19C 0x00F5 /* X19C */ ++#define BCM94331X33 0x00F4 /* X33 */ ++#define BCM94331BU_SSID 0x0523 ++#define BCM94331S9BU_SSID 0x0524 ++#define BCM94331MC_SSID 0x0525 ++#define BCM94331MCI_SSID 0x0526 ++#define BCM94331PCIEBT4_SSID 0x0527 ++#define BCM94331HM_SSID 0x0574 ++#define BCM94331PCIEDUAL_SSID 0x059B ++#define BCM94331MCH5_SSID 0x05A9 ++#define BCM94331CS_SSID 0x05C6 ++#define BCM94331CD_SSID 0x05DA ++ ++/* 4314 Boards */ ++#define BCM94314BU_SSID 0x05b1 ++ ++/* 53572 Boards */ ++#define BCM953572BU_SSID 0x058D ++#define BCM953572NR2_SSID 0x058E ++#define BCM947188NR2_SSID 0x058F ++#define BCM953572SDRNR2_SSID 0x0590 ++ ++/* 43236 boards */ ++#define BCM943236OLYMPICSULLEY_SSID 0x594 ++#define BCM943236PREPROTOBLU2O3_SSID 0x5b9 ++#define BCM943236USBELNA_SSID 0x5f8 ++ ++/* 4314 Boards */ ++#define BCM94314BUSDIO_SSID 0x05c8 ++#define BCM94314BGABU_SSID 0x05c9 ++#define BCM94314HMEPA_SSID 0x05ca ++#define BCM94314HMEPABK_SSID 0x05cb ++#define BCM94314SUHMEPA_SSID 0x05cc ++#define BCM94314SUHM_SSID 0x05cd ++#define BCM94314HM_SSID 0x05d1 ++ ++/* 4334 Boards */ ++#define BCM94334FCAGBI_SSID 0x05df ++#define BCM94334WLAGBI_SSID 0x05dd ++ ++/* 43217 Boards */ ++#define BCM943217BU_SSID 0x05d5 ++#define BCM943217HM2L_SSID 0x05d6 ++#define BCM943217HMITR2L_SSID 0x05d7 ++ ++/* 43142 Boards */ ++#define BCM943142HM_SSID 0x05e0 ++#endif /* LINUX_POSTMOGRIFY_REMOVAL */ ++ ++/* # of GPIO pins */ ++#define GPIO_NUMPINS 32 ++ ++/* These values are used by dhd host driver. */ ++#define RDL_RAM_BASE_4319 0x60000000 ++#define RDL_RAM_BASE_4329 0x60000000 ++#define RDL_RAM_SIZE_4319 0x48000 ++#define RDL_RAM_SIZE_4329 0x48000 ++#define RDL_RAM_SIZE_43236 0x70000 ++#define RDL_RAM_BASE_43236 0x60000000 ++#define RDL_RAM_SIZE_4328 0x60000 ++#define RDL_RAM_BASE_4328 0x80000000 ++#define RDL_RAM_SIZE_4322 0x60000 ++#define RDL_RAM_BASE_4322 0x60000000 ++#define RDL_RAM_SIZE_4360 0xE0000 ++#define RDL_RAM_BASE_4360 0x60000000 ++ ++/* generic defs for nvram "muxenab" bits ++* Note: these differ for 4335a0. refer bcmchipc.h for specific mux options. ++*/ ++#define MUXENAB_UART 0x00000001 ++#define MUXENAB_GPIO 0x00000002 ++#define MUXENAB_ERCX 0x00000004 ++#define MUXENAB_JTAG 0x00000008 ++#define MUXENAB_HOST_WAKE 0x00000010 ++ ++/* Boot flags */ ++#define FLASH_KERNEL_NFLASH 0x00000001 ++#define FLASH_BOOT_NFLASH 0x00000002 ++ ++#endif /* _BCMDEVS_H */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/bcmendian.h b/drivers/net/ethernet/broadcom/gmac/src/include/bcmendian.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/bcmendian.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/bcmendian.h 2017-11-09 17:53:43.921293000 +0800 +@@ -0,0 +1,324 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Byte order utilities ++ * ++ * $Id: bcmendian.h 241182 2011-02-17 21:50:03Z $ ++ * ++ * This file by default provides proper behavior on little-endian architectures. ++ * On big-endian architectures, IL_BIGENDIAN should be defined. ++ */ ++ ++#ifndef _BCMENDIAN_H_ ++#define _BCMENDIAN_H_ ++ ++#include ++ ++/* Reverse the bytes in a 16-bit value */ ++#define BCMSWAP16(val) \ ++ ((uint16)((((uint16)(val) & (uint16)0x00ffU) << 8) | \ ++ (((uint16)(val) & (uint16)0xff00U) >> 8))) ++ ++/* Reverse the bytes in a 32-bit value */ ++#define BCMSWAP32(val) \ ++ ((uint32)((((uint32)(val) & (uint32)0x000000ffU) << 24) | \ ++ (((uint32)(val) & (uint32)0x0000ff00U) << 8) | \ ++ (((uint32)(val) & (uint32)0x00ff0000U) >> 8) | \ ++ (((uint32)(val) & (uint32)0xff000000U) >> 24))) ++ ++/* Reverse the two 16-bit halves of a 32-bit value */ ++#define BCMSWAP32BY16(val) \ ++ ((uint32)((((uint32)(val) & (uint32)0x0000ffffU) << 16) | \ ++ (((uint32)(val) & (uint32)0xffff0000U) >> 16))) ++ ++/* Byte swapping macros ++ * Host <=> Network (Big Endian) for 16- and 32-bit values ++ * Host <=> Little-Endian for 16- and 32-bit values ++ */ ++#ifndef hton16 ++#ifndef IL_BIGENDIAN ++#define HTON16(i) BCMSWAP16(i) ++#define hton16(i) bcmswap16(i) ++#define HTON32(i) BCMSWAP32(i) ++#define hton32(i) bcmswap32(i) ++#define NTOH16(i) BCMSWAP16(i) ++#define ntoh16(i) bcmswap16(i) ++#define NTOH32(i) BCMSWAP32(i) ++#define ntoh32(i) bcmswap32(i) ++#define LTOH16(i) (i) ++#define ltoh16(i) (i) ++#define LTOH32(i) (i) ++#define ltoh32(i) (i) ++#define HTOL16(i) (i) ++#define htol16(i) (i) ++#define HTOL32(i) (i) ++#define htol32(i) (i) ++#else /* IL_BIGENDIAN */ ++#define HTON16(i) (i) ++#define hton16(i) (i) ++#define HTON32(i) (i) ++#define hton32(i) (i) ++#define NTOH16(i) (i) ++#define ntoh16(i) (i) ++#define NTOH32(i) (i) ++#define ntoh32(i) (i) ++#define LTOH16(i) BCMSWAP16(i) ++#define ltoh16(i) bcmswap16(i) ++#define LTOH32(i) BCMSWAP32(i) ++#define ltoh32(i) bcmswap32(i) ++#define HTOL16(i) BCMSWAP16(i) ++#define htol16(i) bcmswap16(i) ++#define HTOL32(i) BCMSWAP32(i) ++#define htol32(i) bcmswap32(i) ++#endif /* IL_BIGENDIAN */ ++#endif /* hton16 */ ++ ++#ifndef IL_BIGENDIAN ++#define ltoh16_buf(buf, i) ++#define htol16_buf(buf, i) ++#else ++#define ltoh16_buf(buf, i) bcmswap16_buf((uint16 *)(buf), (i)) ++#define htol16_buf(buf, i) bcmswap16_buf((uint16 *)(buf), (i)) ++#endif /* IL_BIGENDIAN */ ++ ++/* Unaligned loads and stores in host byte order */ ++#ifndef IL_BIGENDIAN ++#define load32_ua(a) ltoh32_ua(a) ++#define store32_ua(a, v) htol32_ua_store(v, a) ++#define load16_ua(a) ltoh16_ua(a) ++#define store16_ua(a, v) htol16_ua_store(v, a) ++#else ++#define load32_ua(a) ntoh32_ua(a) ++#define store32_ua(a, v) hton32_ua_store(v, a) ++#define load16_ua(a) ntoh16_ua(a) ++#define store16_ua(a, v) hton16_ua_store(v, a) ++#endif /* IL_BIGENDIAN */ ++ ++#define _LTOH16_UA(cp) ((cp)[0] | ((cp)[1] << 8)) ++#define _LTOH32_UA(cp) ((cp)[0] | ((cp)[1] << 8) | ((cp)[2] << 16) | ((cp)[3] << 24)) ++#define _NTOH16_UA(cp) (((cp)[0] << 8) | (cp)[1]) ++#define _NTOH32_UA(cp) (((cp)[0] << 24) | ((cp)[1] << 16) | ((cp)[2] << 8) | (cp)[3]) ++ ++#define ltoh_ua(ptr) \ ++ (sizeof(*(ptr)) == sizeof(uint8) ? *(const uint8 *)(ptr) : \ ++ sizeof(*(ptr)) == sizeof(uint16) ? _LTOH16_UA((const uint8 *)(ptr)) : \ ++ sizeof(*(ptr)) == sizeof(uint32) ? _LTOH32_UA((const uint8 *)(ptr)) : \ ++ *(uint8 *)0) ++ ++#define ntoh_ua(ptr) \ ++ (sizeof(*(ptr)) == sizeof(uint8) ? *(const uint8 *)(ptr) : \ ++ sizeof(*(ptr)) == sizeof(uint16) ? _NTOH16_UA((const uint8 *)(ptr)) : \ ++ sizeof(*(ptr)) == sizeof(uint32) ? _NTOH32_UA((const uint8 *)(ptr)) : \ ++ *(uint8 *)0) ++ ++#ifdef __GNUC__ ++ ++/* GNU macro versions avoid referencing the argument multiple times, while also ++ * avoiding the -fno-inline used in ROM builds. ++ */ ++ ++#define bcmswap16(val) ({ \ ++ uint16 _val = (val); \ ++ BCMSWAP16(_val); \ ++}) ++ ++#define bcmswap32(val) ({ \ ++ uint32 _val = (val); \ ++ BCMSWAP32(_val); \ ++}) ++ ++#define bcmswap32by16(val) ({ \ ++ uint32 _val = (val); \ ++ BCMSWAP32BY16(_val); \ ++}) ++ ++#define bcmswap16_buf(buf, len) ({ \ ++ uint16 *_buf = (uint16 *)(buf); \ ++ uint _wds = (len) / 2; \ ++ while (_wds--) { \ ++ *_buf = bcmswap16(*_buf); \ ++ _buf++; \ ++ } \ ++}) ++ ++#define htol16_ua_store(val, bytes) ({ \ ++ uint16 _val = (val); \ ++ uint8 *_bytes = (uint8 *)(bytes); \ ++ _bytes[0] = _val & 0xff; \ ++ _bytes[1] = _val >> 8; \ ++}) ++ ++#define htol32_ua_store(val, bytes) ({ \ ++ uint32 _val = (val); \ ++ uint8 *_bytes = (uint8 *)(bytes); \ ++ _bytes[0] = _val & 0xff; \ ++ _bytes[1] = (_val >> 8) & 0xff; \ ++ _bytes[2] = (_val >> 16) & 0xff; \ ++ _bytes[3] = _val >> 24; \ ++}) ++ ++#define hton16_ua_store(val, bytes) ({ \ ++ uint16 _val = (val); \ ++ uint8 *_bytes = (uint8 *)(bytes); \ ++ _bytes[0] = _val >> 8; \ ++ _bytes[1] = _val & 0xff; \ ++}) ++ ++#define hton32_ua_store(val, bytes) ({ \ ++ uint32 _val = (val); \ ++ uint8 *_bytes = (uint8 *)(bytes); \ ++ _bytes[0] = _val >> 24; \ ++ _bytes[1] = (_val >> 16) & 0xff; \ ++ _bytes[2] = (_val >> 8) & 0xff; \ ++ _bytes[3] = _val & 0xff; \ ++}) ++ ++#define ltoh16_ua(bytes) ({ \ ++ const uint8 *_bytes = (const uint8 *)(bytes); \ ++ _LTOH16_UA(_bytes); \ ++}) ++ ++#define ltoh32_ua(bytes) ({ \ ++ const uint8 *_bytes = (const uint8 *)(bytes); \ ++ _LTOH32_UA(_bytes); \ ++}) ++ ++#define ntoh16_ua(bytes) ({ \ ++ const uint8 *_bytes = (const uint8 *)(bytes); \ ++ _NTOH16_UA(_bytes); \ ++}) ++ ++#define ntoh32_ua(bytes) ({ \ ++ const uint8 *_bytes = (const uint8 *)(bytes); \ ++ _NTOH32_UA(_bytes); \ ++}) ++ ++#else /* !__GNUC__ */ ++ ++/* Inline versions avoid referencing the argument multiple times */ ++static INLINE uint16 ++bcmswap16(uint16 val) ++{ ++ return BCMSWAP16(val); ++} ++ ++static INLINE uint32 ++bcmswap32(uint32 val) ++{ ++ return BCMSWAP32(val); ++} ++ ++static INLINE uint32 ++bcmswap32by16(uint32 val) ++{ ++ return BCMSWAP32BY16(val); ++} ++ ++/* Reverse pairs of bytes in a buffer (not for high-performance use) */ ++/* buf - start of buffer of shorts to swap */ ++/* len - byte length of buffer */ ++static INLINE void ++bcmswap16_buf(uint16 *buf, uint len) ++{ ++ len = len / 2; ++ ++ while (len--) { ++ *buf = bcmswap16(*buf); ++ buf++; ++ } ++} ++ ++/* ++ * Store 16-bit value to unaligned little-endian byte array. ++ */ ++static INLINE void ++htol16_ua_store(uint16 val, uint8 *bytes) ++{ ++ bytes[0] = val & 0xff; ++ bytes[1] = val >> 8; ++} ++ ++/* ++ * Store 32-bit value to unaligned little-endian byte array. ++ */ ++static INLINE void ++htol32_ua_store(uint32 val, uint8 *bytes) ++{ ++ bytes[0] = val & 0xff; ++ bytes[1] = (val >> 8) & 0xff; ++ bytes[2] = (val >> 16) & 0xff; ++ bytes[3] = val >> 24; ++} ++ ++/* ++ * Store 16-bit value to unaligned network-(big-)endian byte array. ++ */ ++static INLINE void ++hton16_ua_store(uint16 val, uint8 *bytes) ++{ ++ bytes[0] = val >> 8; ++ bytes[1] = val & 0xff; ++} ++ ++/* ++ * Store 32-bit value to unaligned network-(big-)endian byte array. ++ */ ++static INLINE void ++hton32_ua_store(uint32 val, uint8 *bytes) ++{ ++ bytes[0] = val >> 24; ++ bytes[1] = (val >> 16) & 0xff; ++ bytes[2] = (val >> 8) & 0xff; ++ bytes[3] = val & 0xff; ++} ++ ++/* ++ * Load 16-bit value from unaligned little-endian byte array. ++ */ ++static INLINE uint16 ++ltoh16_ua(const void *bytes) ++{ ++ return _LTOH16_UA((const uint8 *)bytes); ++} ++ ++/* ++ * Load 32-bit value from unaligned little-endian byte array. ++ */ ++static INLINE uint32 ++ltoh32_ua(const void *bytes) ++{ ++ return _LTOH32_UA((const uint8 *)bytes); ++} ++ ++/* ++ * Load 16-bit value from unaligned big-(network-)endian byte array. ++ */ ++static INLINE uint16 ++ntoh16_ua(const void *bytes) ++{ ++ return _NTOH16_UA((const uint8 *)bytes); ++} ++ ++/* ++ * Load 32-bit value from unaligned big-(network-)endian byte array. ++ */ ++static INLINE uint32 ++ntoh32_ua(const void *bytes) ++{ ++ return _NTOH32_UA((const uint8 *)bytes); ++} ++ ++#endif /* !__GNUC__ */ ++#endif /* !_BCMENDIAN_H_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/bcmenetmib.h b/drivers/net/ethernet/broadcom/gmac/src/include/bcmenetmib.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/bcmenetmib.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/bcmenetmib.h 2017-11-09 17:53:43.922294000 +0800 +@@ -0,0 +1,88 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Hardware-specific MIB definition for ++ * Broadcom Home Networking Division ++ * BCM44XX and BCM47XX 10/100 Mbps Ethernet cores. ++ * ++ * $Id: bcmenetmib.h 241182 2011-02-17 21:50:03Z $ ++ */ ++ ++#ifndef _bcmenetmib_h_ ++#define _bcmenetmib_h_ ++ ++/* cpp contortions to concatenate w/arg prescan */ ++#ifndef PAD ++#define _PADLINE(line) pad ## line ++#define _XSTR(line) _PADLINE(line) ++#define PAD _XSTR(__LINE__) ++#endif /* PAD */ ++ ++/* ++ * EMAC MIB Registers ++ */ ++typedef volatile struct { ++ uint32 tx_good_octets; ++ uint32 tx_good_pkts; ++ uint32 tx_octets; ++ uint32 tx_pkts; ++ uint32 tx_broadcast_pkts; ++ uint32 tx_multicast_pkts; ++ uint32 tx_len_64; ++ uint32 tx_len_65_to_127; ++ uint32 tx_len_128_to_255; ++ uint32 tx_len_256_to_511; ++ uint32 tx_len_512_to_1023; ++ uint32 tx_len_1024_to_max; ++ uint32 tx_jabber_pkts; ++ uint32 tx_oversize_pkts; ++ uint32 tx_fragment_pkts; ++ uint32 tx_underruns; ++ uint32 tx_total_cols; ++ uint32 tx_single_cols; ++ uint32 tx_multiple_cols; ++ uint32 tx_excessive_cols; ++ uint32 tx_late_cols; ++ uint32 tx_defered; ++ uint32 tx_carrier_lost; ++ uint32 tx_pause_pkts; ++ uint32 PAD[8]; ++ ++ uint32 rx_good_octets; ++ uint32 rx_good_pkts; ++ uint32 rx_octets; ++ uint32 rx_pkts; ++ uint32 rx_broadcast_pkts; ++ uint32 rx_multicast_pkts; ++ uint32 rx_len_64; ++ uint32 rx_len_65_to_127; ++ uint32 rx_len_128_to_255; ++ uint32 rx_len_256_to_511; ++ uint32 rx_len_512_to_1023; ++ uint32 rx_len_1024_to_max; ++ uint32 rx_jabber_pkts; ++ uint32 rx_oversize_pkts; ++ uint32 rx_fragment_pkts; ++ uint32 rx_missed_pkts; ++ uint32 rx_crc_align_errs; ++ uint32 rx_undersize; ++ uint32 rx_crc_errs; ++ uint32 rx_align_errs; ++ uint32 rx_symbol_errs; ++ uint32 rx_pause_pkts; ++ uint32 rx_nonpause_pkts; ++} bcmenetmib_t; ++ ++#endif /* _bcmenetmib_h_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/bcmenetphy.h b/drivers/net/ethernet/broadcom/gmac/src/include/bcmenetphy.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/bcmenetphy.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/bcmenetphy.h 2017-11-09 17:53:43.923292000 +0800 +@@ -0,0 +1,86 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Misc Broadcom BCM47XX MDC/MDIO enet phy definitions. ++ * ++ * $Id: bcmenetphy.h 241182 2011-02-17 21:50:03Z $ ++ */ ++ ++#ifndef _bcmenetphy_h_ ++#define _bcmenetphy_h_ ++ ++/* phy address */ ++#define MAXEPHY 32 /* mdio phy addresses are 5bit quantities */ ++#define EPHY_MASK 0x1f /* phy mask */ ++#define EPHY_NONE 31 /* nvram: no phy present at all */ ++#define EPHY_NOREG 30 /* nvram: no local phy regs */ ++ ++#define MAXPHYREG 32 /* max 32 registers per phy */ ++ ++/* just a few phy registers */ ++#define CTL_RESET (1 << 15) /* reset */ ++#define CTL_LOOP (1 << 14) /* loopback */ ++#define CTL_SPEED (1 << 13) /* speed selection lsb 0=10, 1=100 */ ++#define CTL_ANENAB (1 << 12) /* autonegotiation enable */ ++#define CTL_RESTART (1 << 9) /* restart autonegotiation */ ++#define CTL_DUPLEX (1 << 8) /* duplex mode 0=half, 1=full */ ++#define CTL_SPEED_MSB (1 << 6) /* speed selection msb */ ++ ++#define CTL_SPEED_10 ((0 << 6) | (0 << 13)) /* speed selection CTL.6=0, CTL.13=0 */ ++#define CTL_SPEED_100 ((0 << 6) | (1 << 13)) /* speed selection CTL.6=0, CTL.13=1 */ ++#define CTL_SPEED_1000 ((1 << 6) | (0 << 13)) /* speed selection CTL.6=1, CTL.13=0 */ ++ ++#define ADV_10FULL (1 << 6) /* autonegotiate advertise 10full */ ++#define ADV_10HALF (1 << 5) /* autonegotiate advertise 10half */ ++#define ADV_100FULL (1 << 8) /* autonegotiate advertise 100full */ ++#define ADV_100HALF (1 << 7) /* autonegotiate advertise 100half */ ++#define ADV_PAUSE (1 << 10) /* autonegotiate advertise pause */ ++ ++/* link partner ability register */ ++#define LPA_SLCT 0x001f /* same as advertise selector */ ++#define LPA_10HALF 0x0020 /* can do 10mbps half-duplex */ ++#define LPA_10FULL 0x0040 /* can do 10mbps full-duplex */ ++#define LPA_100HALF 0x0080 /* can do 100mbps half-duplex */ ++#define LPA_100FULL 0x0100 /* can do 100mbps full-duplex */ ++#define LPA_100BASE4 0x0200 /* can do 100mbps 4k packets */ ++#define LPA_RFAULT 0x2000 /* link partner faulted */ ++#define LPA_LPACK 0x4000 /* link partner acked us */ ++#define LPA_NPAGE 0x8000 /* next page bit */ ++ ++#define LPA_DUPLEX (LPA_10FULL | LPA_100FULL) ++#define LPA_100 (LPA_100FULL | LPA_100HALF | LPA_100BASE4) ++ ++/* 1000BASE-T control register */ ++#define ADV_1000HALF 0x0100 /* advertise 1000BASE-T half duplex */ ++#define ADV_1000FULL 0x0200 /* advertise 1000BASE-T full duplex */ ++ ++/* 1000BASE-T status register */ ++#define LPA_1000HALF 0x0400 /* link partner 1000BASE-T half duplex */ ++#define LPA_1000FULL 0x0800 /* link partner 1000BASE-T full duplex */ ++ ++/* 1000BASE-T extended status register */ ++#define EST_1000THALF 0x1000 /* 1000BASE-T half duplex capable */ ++#define EST_1000TFULL 0x2000 /* 1000BASE-T full duplex capable */ ++#define EST_1000XHALF 0x4000 /* 1000BASE-X half duplex capable */ ++#define EST_1000XFULL 0x8000 /* 1000BASE-X full duplex capable */ ++ ++#define STAT_REMFAULT (1 << 4) /* remote fault */ ++#define STAT_LINK (1 << 2) /* link status */ ++#define STAT_JAB (1 << 1) /* jabber detected */ ++#define AUX_FORCED (1 << 2) /* forced 10/100 */ ++#define AUX_SPEED (1 << 1) /* speed 0=10mbps 1=100mbps */ ++#define AUX_DUPLEX (1 << 0) /* duplex 0=half 1=full */ ++ ++#endif /* _bcmenetphy_h_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/bcmenetrxh.h b/drivers/net/ethernet/broadcom/gmac/src/include/bcmenetrxh.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/bcmenetrxh.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/bcmenetrxh.h 2017-11-09 17:53:43.927297000 +0800 +@@ -0,0 +1,50 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Hardware-specific Receive Data Header for the ++ * Broadcom Home Networking Division ++ * BCM44XX and BCM47XX 10/100 Mbps Ethernet cores. ++ * ++ * $Id: bcmenetrxh.h 241182 2011-02-17 21:50:03Z $ ++ */ ++ ++#ifndef _bcmenetrxh_h_ ++#define _bcmenetrxh_h_ ++ ++/* ++ * The Ethernet MAC core returns an 8-byte Receive Frame Data Header ++ * with every frame consisting of ++ * 16bits of frame length, followed by ++ * 16bits of EMAC rx descriptor info, followed by 32bits of undefined. ++ */ ++typedef volatile struct { ++ uint16 len; ++ uint16 flags; ++ uint16 pad[12]; ++} bcmenetrxh_t; ++ ++#define RXHDR_LEN 28 /* Header length */ ++ ++#define RXF_L ((uint16)1 << 11) /* last buffer in a frame */ ++#define RXF_MISS ((uint16)1 << 7) /* received due to promisc mode */ ++#define RXF_BRDCAST ((uint16)1 << 6) /* dest is broadcast address */ ++#define RXF_MULT ((uint16)1 << 5) /* dest is multicast address */ ++#define RXF_LG ((uint16)1 << 4) /* frame length > rxmaxlength */ ++#define RXF_NO ((uint16)1 << 3) /* odd number of nibbles */ ++#define RXF_RXER ((uint16)1 << 2) /* receive symbol error */ ++#define RXF_CRC ((uint16)1 << 1) /* crc error */ ++#define RXF_OV ((uint16)1 << 0) /* fifo overflow */ ++ ++#endif /* _bcmenetrxh_h_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/bcmgmacmib.h b/drivers/net/ethernet/broadcom/gmac/src/include/bcmgmacmib.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/bcmgmacmib.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/bcmgmacmib.h 2017-11-09 17:53:43.928296000 +0800 +@@ -0,0 +1,117 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Hardware-specific MIB definition for ++ * Broadcom Home Networking Division ++ * GbE Unimac core ++ * ++ * $Id: bcmgmacmib.h 241182 2011-02-17 21:50:03Z $ ++ */ ++ ++#ifndef _bcmgmacmib_h_ ++#define _bcmgmacmib_h_ ++ ++ ++/* cpp contortions to concatenate w/arg prescan */ ++#ifndef PAD ++#define _PADLINE(line) pad ## line ++#define _XSTR(line) _PADLINE(line) ++#define PAD _XSTR(__LINE__) ++#endif /* PAD */ ++ ++/* GMAC MIB structure */ ++ ++typedef struct _gmacmib { ++ uint32 tx_good_octets; /* 0x300 */ ++ uint32 tx_good_octets_high; /* 0x304 */ ++ uint32 tx_good_pkts; /* 0x308 */ ++ uint32 tx_octets; /* 0x30c */ ++ uint32 tx_octets_high; /* 0x310 */ ++ uint32 tx_pkts; /* 0x314 */ ++ uint32 tx_broadcast_pkts; /* 0x318 */ ++ uint32 tx_multicast_pkts; /* 0x31c */ ++ uint32 tx_len_64; /* 0x320 */ ++ uint32 tx_len_65_to_127; /* 0x324 */ ++ uint32 tx_len_128_to_255; /* 0x328 */ ++ uint32 tx_len_256_to_511; /* 0x32c */ ++ uint32 tx_len_512_to_1023; /* 0x330 */ ++ uint32 tx_len_1024_to_1522; /* 0x334 */ ++ uint32 tx_len_1523_to_2047; /* 0x338 */ ++ uint32 tx_len_2048_to_4095; /* 0x33c */ ++ uint32 tx_len_4095_to_8191; /* 0x340 */ ++ uint32 tx_len_8192_to_max; /* 0x344 */ ++ uint32 tx_jabber_pkts; /* 0x348 */ ++ uint32 tx_oversize_pkts; /* 0x34c */ ++ uint32 tx_fragment_pkts; /* 0x350 */ ++ uint32 tx_underruns; /* 0x354 */ ++ uint32 tx_total_cols; /* 0x358 */ ++ uint32 tx_single_cols; /* 0x35c */ ++ uint32 tx_multiple_cols; /* 0x360 */ ++ uint32 tx_excessive_cols; /* 0x364 */ ++ uint32 tx_late_cols; /* 0x368 */ ++ uint32 tx_defered; /* 0x36c */ ++ uint32 tx_carrier_lost; /* 0x370 */ ++ uint32 tx_pause_pkts; /* 0x374 */ ++ uint32 tx_uni_pkts; /* 0x378 */ ++ uint32 tx_q0_pkts; /* 0x37c */ ++ uint32 tx_q0_octets; /* 0x380 */ ++ uint32 tx_q0_octets_high; /* 0x384 */ ++ uint32 tx_q1_pkts; /* 0x388 */ ++ uint32 tx_q1_octets; /* 0x38c */ ++ uint32 tx_q1_octets_high; /* 0x390 */ ++ uint32 tx_q2_pkts; /* 0x394 */ ++ uint32 tx_q2_octets; /* 0x398 */ ++ uint32 tx_q2_octets_high; /* 0x39c */ ++ uint32 tx_q3_pkts; /* 0x3a0 */ ++ uint32 tx_q3_octets; /* 0x3a4 */ ++ uint32 tx_q3_octets_high; /* 0x3a8 */ ++ uint32 PAD; ++ uint32 rx_good_octets; /* 0x3b0 */ ++ uint32 rx_good_octets_high; /* 0x3b4 */ ++ uint32 rx_good_pkts; /* 0x3b8 */ ++ uint32 rx_octets; /* 0x3bc */ ++ uint32 rx_octets_high; /* 0x3c0 */ ++ uint32 rx_pkts; /* 0x3c4 */ ++ uint32 rx_broadcast_pkts; /* 0x3c8 */ ++ uint32 rx_multicast_pkts; /* 0x3cc */ ++ uint32 rx_len_64; /* 0x3d0 */ ++ uint32 rx_len_65_to_127; /* 0x3d4 */ ++ uint32 rx_len_128_to_255; /* 0x3d8 */ ++ uint32 rx_len_256_to_511; /* 0x3dc */ ++ uint32 rx_len_512_to_1023; /* 0x3e0 */ ++ uint32 rx_len_1024_to_1522; /* 0x3e4 */ ++ uint32 rx_len_1523_to_2047; /* 0x3e8 */ ++ uint32 rx_len_2048_to_4095; /* 0x3ec */ ++ uint32 rx_len_4095_to_8191; /* 0x3f0 */ ++ uint32 rx_len_8192_to_max; /* 0x3f4 */ ++ uint32 rx_jabber_pkts; /* 0x3f8 */ ++ uint32 rx_oversize_pkts; /* 0x3fc */ ++ uint32 rx_fragment_pkts; /* 0x400 */ ++ uint32 rx_missed_pkts; /* 0x404 */ ++ uint32 rx_crc_align_errs; /* 0x408 */ ++ uint32 rx_undersize; /* 0x40c */ ++ uint32 rx_crc_errs; /* 0x410 */ ++ uint32 rx_align_errs; /* 0x414 */ ++ uint32 rx_symbol_errs; /* 0x418 */ ++ uint32 rx_pause_pkts; /* 0x41c */ ++ uint32 rx_nonpause_pkts; /* 0x420 */ ++ uint32 rx_sachanges; /* 0x424 */ ++ uint32 rx_uni_pkts; /* 0x428 */ ++} gmacmib_t; ++ ++#define GM_MIB_BASE 0x300 ++#define GM_MIB_LIMIT 0x800 ++ ++#endif /* _bcmgmacmib_h_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/bcmgmacrxh.h b/drivers/net/ethernet/broadcom/gmac/src/include/bcmgmacrxh.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/bcmgmacrxh.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/bcmgmacrxh.h 2017-11-09 17:53:43.929307000 +0800 +@@ -0,0 +1,53 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Hardware-specific Receive Data Header for the ++ * Broadcom Home Networking Division ++ * BCM47XX GbE cores. ++ * ++ * $Id: bcmgmacrxh.h 241182 2011-02-17 21:50:03Z $ ++ */ ++ ++#ifndef _bcmgmacrxh_h_ ++#define _bcmgmacrxh_h_ ++ ++/* ++ * The Ethernet GMAC core returns an 8-byte Receive Frame Data Header ++ * with every frame consisting of ++ * 16 bits of frame length, followed by ++ * 16 bits of GMAC rx descriptor info, followed by 32bits of undefined. ++ */ ++typedef volatile struct { ++ uint16 len; ++ uint16 flags; ++ uint16 pad[12]; ++} bcmgmacrxh_t; ++ ++#define RXHDR_LEN 28 /* Header length */ ++ ++#define GRXF_DT_MASK ((uint16)0xf) /* data type */ ++#define GRXF_DT_SHIFT 12 ++#define GRXF_DC_MASK ((uint16)0xf) /* (num descr to xfer the frame) - 1 */ ++#define GRXF_DC_SHIFT 8 ++#define GRXF_OVF ((uint16)1 << 7) /* overflow error occured */ ++#define GRXF_CTFERR ((uint16)1 << 6) /* overflow error occured */ ++#define GRXF_OVERSIZE ((uint16)1 << 4) /* frame size > rxmaxlength */ ++#define GRXF_CRC ((uint16)1 << 3) /* crc error */ ++#define GRXF_VLAN ((uint16)1 << 2) /* vlan tag detected */ ++#define GRXF_PT_MASK ((uint16)3) /* packet type 0 - Unicast, ++ * 1 - Multicast, 2 - Broadcast ++ */ ++ ++#endif /* _bcmgmacrxh_h_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/bcmiproc_egphy28.h b/drivers/net/ethernet/broadcom/gmac/src/include/bcmiproc_egphy28.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/bcmiproc_egphy28.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/bcmiproc_egphy28.h 2017-11-09 17:53:43.930299000 +0800 +@@ -0,0 +1,68 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * These routines provide access to the external phy ++ * ++ */ ++ ++#ifndef _EGPHY28_H_ ++#define _EGPHY28_H_ ++ ++#define EGPHY28_REG_RDB_ADDR 0x1e ++#define EGPHY28_REG_RDB_DATA 0x1f ++ ++#define EGPHY28_RDB_ACCESS_ADDR_1 0x17 ++#define EGPHY28_RDB_ACCESS_DATA_1 0x0f7e ++#define EGPHY28_RDB_ACCESS_ADDR_2 0x15 ++#define EGPHY28_RDB_ACCESS_DATA_2 0x0000 ++ ++/* Generic MII registers */ ++#define EGPHY28_COPPER_MII_CTRL 0x00 ++#define EGPHY28_PHY_ID_MSB 0x02 /* PHY ID MSB */ ++#define EGPHY28_PHY_ID_LSB 0x03 /* PHY ID LSB */ ++#define EGPHY28_MII_ADVERTISE 0x04 /* Advertisement control reg */ ++#define EGPHY28_MII_CTRL1000 0x09 /* 1000BASE-T control */ ++#define EGPGY28_MII_ECONTROL 0x10 /* Extended Control */ ++#define EGPHY28_COPPER_MISC_CTRL 0x2f /* COPPER MISC CONTROL */ ++ ++/* For EGPHY28_COPPER_MII_CTRL(0x00) */ ++#define BMCR_FULLDPLX 0x0100 /* Full duplex */ ++#define BMCR_SPEED1000 0x0040 /* MSB of Speed (1000) */ ++#define BMCR_ANENABLE 0x1000 /* Enable auto negotiation */ ++#define BMCR_ANRESTART 0x0200 /* Auto negotiation restart */ ++#define BMCR_PDOWN 0x0800 /* Powerdown EGPHY28 */ ++#define BMCR_RESET 0x8000 /* Reset EGPHY28 */ ++ ++/* For EGPHY28_MII_ADVERTISE(0x04) */ ++#define ADVERTISE_1000HALF 0x0100 /* Advertise 1000BASE-T half duplex */ ++#define ADVERTISE_CSMA 0x0001 /* Only selector supported */ ++#define ADVERTISE_100FULL 0x0100 /* Try for 100mbps full-duplex */ ++#define ADVERTISE_10FULL 0x0040 /* Try for 10mbps full-duplex */ ++#define ADVERTISE_100HALF 0x0080 /* Try for 100mbps half-duplex */ ++#define ADVERTISE_10HALF 0x0020 /* Try for 10mbps half-duplex */ ++ ++/* For EGPHY28_MII_CTRL1000(0x09) */ ++#define ADVERTISE_1000FULL 0x0200 /* Advertise 1000BASE-T full duplex */ ++#define REPEATER_DTE 0x0400 /* Repeater/switch or DTE port type */ ++ ++/* For EGPHY28_COPPER_MISC_CTRL(0x2f) */ ++#define BMCR_FORCE_AUTO_MDIX 0x0200 ++ ++extern int egphy28_enable_set(u32 phy_addr, int enable); ++extern int egphy28_init(void __iomem *base, u32 phy_addr); ++extern int egphy28_enable_set(u32 phy_addr, int enable); ++extern int egphy28_force_auto_mdix(u32 phy_addr, int enable); ++ ++#endif +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/bcmiproc_phy.h b/drivers/net/ethernet/broadcom/gmac/src/include/bcmiproc_phy.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/bcmiproc_phy.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/bcmiproc_phy.h 2017-11-09 17:53:43.931300000 +0800 +@@ -0,0 +1,290 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * These routines provide access to the external phy ++ * ++ */ ++ ++#ifndef _bcm_iproc_phy_h_ ++#define _bcm_iproc_phy_h_ ++ ++ ++#define PHY_ADDR_MASK 0x00001F ++#define PHY_BUS_MASK 0x000020 ++#define PHY_BANK_MASK 0x001F00 ++#define PHY_FLAG_MASK 0xFF0000 ++#define PHY_ADDR(_flag, _bank, _bus, _addr) \ ++ (((_flag << 16) & PHY_FLAG_MASK) | ((_bank << 8) & PHY_BANK_MASK) | \ ++ ((_bus << 5) & PHY_BUS_MASK) | ((_addr << 0) & PHY_ADDR_MASK)) ++ ++#define PHY_REG_ADDR(_addr) ((_addr & PHY_ADDR_MASK) >> 0) ++#define PHY_REG_BUS(_addr) ((_addr & PHY_BUS_MASK) >> 5) ++#define PHY_REG_BANK(_addr) ((_addr & PHY_BANK_MASK) >> 8) ++#define PHY_REG_FLAGS(_addr) ((_addr & PHY_FLAG_MASK) >> 16) ++ ++/* Flags for phy register */ ++#define PHY_REG_FLAGS_NONE (0) ++#define PHY_REG_FLAGS_1000X (1 << 0) ++#define PHY_REG_FLAGS_PRI_SERDES (1 << 1) ++#define PHY_REG_FLAGS_RDB (1 << 2) ++#define PHY_REG_FLAGS_QSGMII (1 << 3) ++#define PHY_REG_FLAGS_FIBER (PHY_REG_FLAGS_1000X | \ ++ PHY_REG_FLAGS_PRI_SERDES) ++ ++typedef enum { ++ SOC_E_NONE = 0, ++ SOC_E_INTERNAL = -1, ++ SOC_E_MEMORY = -2, ++ SOC_E_UNIT = -3, ++ SOC_E_PARAM = -4, ++ SOC_E_EMPTY = -5, ++ SOC_E_FULL = -6, ++ SOC_E_NOT_FOUND = -7, ++ SOC_E_EXISTS = -8, ++ SOC_E_TIMEOUT = -9, ++ SOC_E_BUSY = -10, ++ SOC_E_FAIL = -11, ++ SOC_E_DISABLED = -12, ++ SOC_E_BADID = -13, ++ SOC_E_RESOURCE = -14, ++ SOC_E_CONFIG = -15, ++ SOC_E_UNAVAIL = -16, ++ SOC_E_INIT = -17, ++ SOC_E_PORT = -18, ++ ++ SOC_E_LIMIT = -19 /* Must come last */ ++} soc_error_t; ++ ++#define SOC_SUCCESS(rv) ((rv) >= 0) ++#define SOC_FAILURE(rv) ((rv) < 0) ++ ++typedef enum _soc_port_if_e { ++ SOC_PORT_IF_NOCXN, /* No physical connection */ ++ SOC_PORT_IF_NULL, /* Pass-through connection without PHY */ ++ SOC_PORT_IF_MII, ++ SOC_PORT_IF_GMII, ++ SOC_PORT_IF_SGMII, ++ SOC_PORT_IF_TBI, ++ SOC_PORT_IF_XGMII, ++ SOC_PORT_IF_RGMII, ++ SOC_PORT_IF_RvMII, ++ SOC_PORT_IF_SFI, ++ SOC_PORT_IF_XFI, ++ SOC_PORT_IF_KR, ++ SOC_PORT_IF_KR4, ++ SOC_PORT_IF_CR, ++ SOC_PORT_IF_CR4, ++ SOC_PORT_IF_XLAUI, ++ SOC_PORT_IF_SR, ++ SOC_PORT_IF_RXAUI, ++ SOC_PORT_IF_XAUI, ++ SOC_PORT_IF_SPAUI, ++ SOC_PORT_IF_QSGMII, ++ SOC_PORT_IF_ILKN, ++ SOC_PORT_IF_RCY, ++ SOC_PORT_IF_FAT_PIPE, ++ SOC_PORT_IF_CGMII, ++ SOC_PORT_IF_CAUI, ++ SOC_PORT_IF_LR, ++ SOC_PORT_IF_LR4, ++ SOC_PORT_IF_SR4, ++ SOC_PORT_IF_KX, ++ SOC_PORT_IF_CPU, ++ SOC_PORT_IF_OLP, ++ SOC_PORT_IF_OAMP, ++ SOC_PORT_IF_ERP, ++ SOC_PORT_IF_COUNT /* last, please */ ++} _soc_port_if_t; ++typedef _soc_port_if_t soc_port_if_t; ++ ++ ++/* 1000BASE-T/100BASE-TX/10BASE-T MII Control Register (Addr 00h) */ ++#define PHY_MII_CTRLr_FLAGS 0x00 ++#define PHY_MII_CTRLr_BANK 0x0000 ++#define PHY_MII_CTRLr_ADDR 0x00 ++/* 1000BASE-T/100BASE-TX/10BASE-T MII Status Register (ADDR 01h) */ ++#define PHY_MII_STATr_FLAGS 0x00 ++#define PHY_MII_STATr_BANK 0x0000 ++#define PHY_MII_STATr_ADDR 0x01 ++/* 1000BASE-T/100BASE-TX/10BASE-T PHY Identifier Register (ADDR 02h) */ ++#define PHY_MII_PHY_ID0r_FLAGS _SOC_PHY_REG_DIRECT ++#define PHY_MII_PHY_ID0r_BANK 0x0000 ++#define PHY_MII_PHY_ID0r_ADDR 0x02 ++/* 1000BASE-T/100BASE-TX/10BASE-T PHY Identifier Register (ADDR 03h) */ ++#define PHY_MII_PHY_ID1r_FLAGS _SOC_PHY_REG_DIRECT ++#define PHY_MII_PHY_ID1r_BANK 0x0000 ++#define PHY_MII_PHY_ID1r_ADDR 0x03 ++/* 1000BASE-T/100BASE-TX/10BASE-T Auto-neg Advertisment Register (ADDR 04h) */ ++#define PHY_MII_ANAr_FLAGS 0x00 ++#define PHY_MII_ANAr_BANK 0x0000 ++#define PHY_MII_ANAr_ADDR 0x04 ++/* 1000BASE-T/100BASE-TX/10BASE-T Auto-neg Link Partner Ability (ADDR 05h) */ ++#define PHY_MII_ANPr_FLAGS 0x00 ++#define PHY_MII_ANPr_BANK 0x0000 ++#define PHY_MII_ANPr_ADDR 0x05 ++/* 1000BASE-T Control Register (ADDR 09h) */ ++#define PHY_MII_GB_CTRLr_FLAGS 0x00 ++#define PHY_MII_GB_CTRLr_BANK 0x0000 ++#define PHY_MII_GB_CTRLr_ADDR 0x09 ++/* 1000BASE-T Status Register (ADDR 0ah) */ ++#define PHY_MII_GB_STATr_FLAGS 0x00 ++#define PHY_MII_GB_STATr_BANK 0x0000 ++#define PHY_MII_GB_STATr_ADDR 0x0a ++/* 1000BASE-T/100BASE-TX/10BASE-T IEEE Extended Status Register (ADDR 0fh) */ ++#define PHY_MII_ESRr_FLAGS 0x00 ++#define PHY_MII_ESRr_BANK 0x0000 ++#define PHY_MII_ESRr_ADDR 0x0f ++/* 1000BASE-T/100BASE-TX/10BASE-T PHY Extended Control Register (ADDR 10h) */ ++#define PHY_MII_ECRr_FLAGS 0x00 ++#define PHY_MII_ECRr_BANK 0x0000 ++#define PHY_MII_ECRr_ADDR 0x10 ++/* 1000BASE-T/100BASE-TX/10BASE-T Auxiliary Control Reg (ADDR 18h Shadow 000)*/ ++#define PHY_MII_AUX_CTRLr_FLAGS 0x00 ++#define PHY_MII_AUX_CTRLr_BANK 0x0000 ++#define PHY_MII_AUX_CTRLr_ADDR 0x18 ++/* 1000BASE-T/100BASE-TX/10BASE-T Power/MII Control Reg (ADDR 18h Shadow 010)*/ ++#define PHY_MII_POWER_CTRLr_FLAGS 0x00 ++#define PHY_MII_POWER_CTRLr_BANK 0x0002 ++#define PHY_MII_POWER_CTRLr_ADDR 0x18 ++/* 1000BASE-T/100BASE-TX/10BASE-T Misc Control Reg (ADDR 18h Shadow 111)*/ ++#define PHY_MII_MISC_CTRLr_FLAGS 0x00 ++#define PHY_MII_MISC_CTRLr_BANK 0x0007 ++#define PHY_MII_MISC_CTRLr_ADDR 0x18 ++/* Auxiliary 1000BASE-X Control Reg (ADDR 1ch shadow 11011) */ ++#define PHY_AUX_1000X_CTRLr_FLAGS 0x00 ++#define PHY_AUX_1000X_CTRLr_BANK 0x001B ++#define PHY_AUX_1000X_CTRLr_ADDRS 0x1c ++/* Mode Control Reg (ADDR 1ch shadow 11111) */ ++#define PHY_MODE_CTRLr_FLAGS 0x00 ++#define PHY_MODE_CTRLr_BANK 0x001F ++#define PHY_MODE_CTRLr_ADDR 0x1c ++ ++/* ++ * Primary SerDes Registers ++ */ ++/* 1000BASE-X MII Control Register (Addr 00h) */ ++#define PHY_1000X_MII_CTRLr_FLAGS SOC_PHY_REG_1000X ++#define PHY_1000X_MII_CTRLr_BANK 0x0000 ++#define PHY_1000X_MII_CTRLr_ADDR 0x00 ++ ++ ++/* MII Control Register: bit definitions */ ++#define MII_CTRL_FS_2500 (1 << 5) /* Force speed to 2500 Mbps */ ++#define MII_CTRL_SS_MSB (1 << 6) /* Speed select, MSb */ ++#define MII_CTRL_CST (1 << 7) /* Collision Signal test */ ++#define MII_CTRL_FD (1 << 8) /* Full Duplex */ ++#define MII_CTRL_RAN (1 << 9) /* Restart Autonegotiation */ ++#define MII_CTRL_IP (1 << 10) /* Isolate Phy */ ++#define MII_CTRL_PD (1 << 11) /* Power Down */ ++#define MII_CTRL_AE (1 << 12) /* Autonegotiation enable */ ++#define MII_CTRL_SS_LSB (1 << 13) /* Speed select, LSb */ ++#define MII_CTRL_LE (1 << 14) /* Loopback enable */ ++#define MII_CTRL_RESET (1 << 15) /* PHY reset */ ++ ++#define MII_CTRL_SS(_x) ((_x) & (MII_CTRL_SS_LSB|MII_CTRL_SS_MSB)) ++#define MII_CTRL_SS_10 0 ++#define MII_CTRL_SS_100 (MII_CTRL_SS_LSB) ++#define MII_CTRL_SS_1000 (MII_CTRL_SS_MSB) ++#define MII_CTRL_SS_INVALID (MII_CTRL_SS_LSB | MII_CTRL_SS_MSB) ++#define MII_CTRL_SS_MASK (MII_CTRL_SS_LSB | MII_CTRL_SS_MSB) ++ ++/* ++ * MII Status Register: See 802.3, 1998 pg 544 ++ */ ++#define MII_STAT_EXT (1 << 0) /* Extended Registers */ ++#define MII_STAT_JBBR (1 << 1) /* Jabber Detected */ ++#define MII_STAT_LA (1 << 2) /* Link Active */ ++#define MII_STAT_AN_CAP (1 << 3) /* Autoneg capable */ ++#define MII_STAT_RF (1 << 4) /* Remote Fault */ ++#define MII_STAT_AN_DONE (1 << 5) /* Autoneg complete */ ++#define MII_STAT_MF_PS (1 << 6) /* Preamble suppression */ ++#define MII_STAT_ES (1 << 8) /* Extended status (R15) */ ++#define MII_STAT_HD_100_T2 (1 << 9) /* Half duplex 100Mb/s supported */ ++#define MII_STAT_FD_100_T2 (1 << 10)/* Full duplex 100Mb/s supported */ ++#define MII_STAT_HD_10 (1 << 11)/* Half duplex 100Mb/s supported */ ++#define MII_STAT_FD_10 (1 << 12)/* Full duplex 100Mb/s supported */ ++#define MII_STAT_HD_100 (1 << 13)/* Half duplex 100Mb/s supported */ ++#define MII_STAT_FD_100 (1 << 14)/* Full duplex 100Mb/s supported */ ++#define MII_STAT_100_T4 (1 << 15)/* Full duplex 100Mb/s supported */ ++ ++/* ++ * MII Link Advertisment ++ */ ++#define MII_ANA_ASF (1 << 0)/* Advertise Selector Field */ ++#define MII_ANA_HD_10 (1 << 5)/* Half duplex 10Mb/s supported */ ++#define MII_ANA_FD_1000X (1 << 5)/* Full duplex for 1000BASE-X */ ++#define MII_ANA_FD_10 (1 << 6)/* Full duplex 10Mb/s supported */ ++#define MII_ANA_HD_1000X (1 << 6)/* Half duplex for 1000BASE-X */ ++#define MII_ANA_HD_100 (1 << 7)/* Half duplex 100Mb/s supported */ ++#define MII_ANA_1000X_PAUSE (1 << 7)/* Pause supported for 1000BASE-X */ ++#define MII_ANA_FD_100 (1 << 8)/* Full duplex 100Mb/s supported */ ++#define MII_ANA_1000X_ASYM_PAUSE (1 << 8)/* Asymmetric pause supported for 1000BASE-X */ ++#define MII_ANA_T4 (1 << 9)/* T4 */ ++#define MII_ANA_PAUSE (1 << 10)/* Pause supported */ ++#define MII_ANA_ASYM_PAUSE (1 << 11)/* Asymmetric pause supported */ ++#define MII_ANA_RF (1 << 13)/* Remote fault */ ++#define MII_ANA_NP (1 << 15)/* Next Page */ ++ ++#define MII_ANA_ASF_802_3 (1) /* 802.3 PHY */ ++ ++/* ++ * 1000Base-T Control Register ++ */ ++#define MII_GB_CTRL_MS_MAN (1 << 12) /* Manual Master/Slave mode */ ++#define MII_GB_CTRL_MS (1 << 11) /* Master/Slave negotiation mode */ ++#define MII_GB_CTRL_PT (1 << 10) /* Port type */ ++#define MII_GB_CTRL_ADV_1000FD (1 << 9) /* Advertise 1000Base-T FD */ ++#define MII_GB_CTRL_ADV_1000HD (1 << 8) /* Advertise 1000Base-T HD */ ++ ++/* ++ * 1000Base-T Status Register ++ */ ++#define MII_GB_STAT_MS_FAULT (1 << 15) /* Master/Slave Fault */ ++#define MII_GB_STAT_MS (1 << 14) /* Master/Slave, 1 == Master */ ++#define MII_GB_STAT_LRS (1 << 13) /* Local receiver status */ ++#define MII_GB_STAT_RRS (1 << 12) /* Remote receiver status */ ++#define MII_GB_STAT_LP_1000FD (1 << 11) /* Link partner 1000FD capable */ ++#define MII_GB_STAT_LP_1000HD (1 << 10) /* Link partner 1000HD capable */ ++#define MII_GB_STAT_IDE (0xff << 0) /* Idle error count */ ++ ++/* ++ * IEEE Extended Status Register ++ */ ++#define MII_ESR_1000_X_FD (1 << 15) /* 1000Base-T FD capable */ ++#define MII_ESR_1000_X_HD (1 << 14) /* 1000Base-T HD capable */ ++#define MII_ESR_1000_T_FD (1 << 13) /* 1000Base-T FD capable */ ++#define MII_ESR_1000_T_HD (1 << 12) /* 1000Base-T FD capable */ ++ ++/* MII Extended Control Register (BROADCOM) */ ++#define MII_ECR_FE (1 << 0) /* FIFO Elasticity */ ++#define MII_ECR_TLLM (1 << 1) /* Three link LED mode */ ++#define MII_ECR_ET_IPG (1 << 2) /* Extended XMIT IPG mode */ ++#define MII_ECR_FLED_OFF (1 << 3) /* Force LED off */ ++#define MII_ECR_FLED_ON (1 << 4) /* Force LED on */ ++#define MII_ECR_ELT (1 << 5) /* Enable LED traffic */ ++#define MII_ECR_RS (1 << 6) /* Reset Scrambler */ ++#define MII_ECR_BRSA (1 << 7) /* Bypass Receive Sym. align */ ++#define MII_ECR_BMLT3 (1 << 8) /* Bypass MLT3 Encoder/Decoder */ ++#define MII_ECR_BSD (1 << 9) /* Bypass Scramble/Descramble */ ++#define MII_ECR_B4B5B (1 << 10) /* Bypass 4B/5B Encode/Decode */ ++#define MII_ECR_FI (1 << 11) /* Force Interrupt */ ++#define MII_ECR_ID (1 << 12) /* Interrupt Disable */ ++#define MII_ECR_TD (1 << 13) /* XMIT Disable */ ++#define MII_ECR_DAMC (1 << 14) /* DIsable Auto-MDI Crossover */ ++#define MII_ECR_10B (1 << 15) /* 1 == 10B, 0 == GMII */ ++ ++/* MISC Control Register (Addr 18h, Shadow Value 111) */ ++#define MII_FORCED_AUTO_MDIX (1 << 9) /* 1 == AUTO-MDIX enabled when AN disabled */ ++#endif /* _bcm_iproc_phy_h_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/bcmiproc_phy5221.h b/drivers/net/ethernet/broadcom/gmac/src/include/bcmiproc_phy5221.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/bcmiproc_phy5221.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/bcmiproc_phy5221.h 2017-11-09 17:53:43.932294000 +0800 +@@ -0,0 +1,45 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * These routines provide access to the external phy ++ * ++ */ ++ ++#ifndef _bcm_iproc_phy5221_h_ ++#define _bcm_iproc_phy5221_h_ ++ ++ ++/* ---- Include Files ---------------------------------------------------- */ ++#include ++ ++#define PHY_AUX_MULTIPLE_PHYr_BANK 0x0000 ++#define PHY_AUX_MULTIPLE_PHYr_ADDR 0x1e ++ ++#define PHY522X_SUPER_ISOLATE_MODE (1<<3) ++ ++/* ---- External Function Prototypes ------------------------------------- */ ++ ++extern int phy5221_wr_reg(uint eth_num, uint phyaddr, uint16 reg_bank, ++ uint8 reg_addr, uint16 *data); ++extern int phy5221_rd_reg(uint eth_num, uint phyaddr, uint16 reg_bank, ++ uint8 reg_addr, uint16 *data); ++extern int phy5221_mod_reg(uint eth_num, uint phyaddr, uint16 reg_bank, ++ uint8 reg_addr, uint16 data, uint16 mask); ++extern int phy5221_init(uint eth_num, uint phyaddr); ++extern int phy5221_link_get(uint eth_num, uint phyaddr, int *link); ++extern int phy5221_enable_set(uint eth_num, uint phyaddr, int enable); ++extern int phy5221_speed_get(uint eth_num, uint phyaddr, int *speed, int *duplex); ++ ++#endif /* _bcm_iproc_phy5221_h_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/bcmiproc_phy5461s.h b/drivers/net/ethernet/broadcom/gmac/src/include/bcmiproc_phy5461s.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/bcmiproc_phy5461s.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/bcmiproc_phy5461s.h 2017-11-09 17:53:43.933291000 +0800 +@@ -0,0 +1,46 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * These routines provide access to the external phy ++ * ++ */ ++ ++#ifndef _bcm_iproc_phy5461s_h_ ++#define _bcm_iproc_phy5461s_h_ ++ ++ ++/* ---- Include Files ---------------------------------------------------- */ ++#include ++ ++/* Indirect PHY register address flags */ ++#define SOC_PHY_REG_RESERVE_ACCESS 0x20000000 ++#define SOC_PHY_REG_1000X 0x40000000 ++#define SOC_PHY_REG_INDIRECT 0x80000000 ++#define _SOC_PHY_REG_DIRECT ((SOC_PHY_REG_1000X << 1) | (SOC_PHY_REG_1000X >> 1)) ++ ++/* ---- External Function Prototypes ------------------------------------- */ ++ ++extern int phy5461_wr_reg(uint eth_num, uint phyaddr, uint32 flags, uint16 reg_bank, ++ uint8 reg_addr, uint16 *data); ++extern int phy5461_rd_reg(uint eth_num, uint phyaddr, uint32 flags, uint16 reg_bank, ++ uint8 reg_addr, uint16 *data); ++extern int phy5461_mod_reg(uint eth_num, uint phyaddr, uint32 flags, uint16 reg_bank, ++ uint8 reg_addr, uint16 data, uint16 mask); ++extern int phy5461_init(uint eth_num, uint phyaddr); ++extern int phy5461_link_get(uint eth_num, uint phyaddr, int *link); ++extern int phy5461_enable_set(uint eth_num, uint phyaddr, int enable); ++extern int phy5461_speed_get(uint eth_num, uint phyaddr, int *speed, int *duplex); ++ ++#endif /* _bcm_iproc_phy5461s_h_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/bcmiproc_phy5481.h b/drivers/net/ethernet/broadcom/gmac/src/include/bcmiproc_phy5481.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/bcmiproc_phy5481.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/bcmiproc_phy5481.h 2017-11-09 17:53:43.933313000 +0800 +@@ -0,0 +1,45 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * These routines provide access to the external phy ++ * ++ */ ++ ++#ifndef _bcm_iproc_phy5481_h_ ++#define _bcm_iproc_phy5481_h_ ++ ++ ++/* ---- Include Files ---------------------------------------------------- */ ++#include ++ ++// #define PHY_AUX_MULTIPLE_PHYr_BANK 0x0000 ++// #define PHY_AUX_MULTIPLE_PHYr_ADDR 0x1e ++ ++#define PHY5481_SUPER_ISOLATE_MODE (1U<<5) ++ ++/* ---- External Function Prototypes ------------------------------------- */ ++ ++extern int phy5481_wr_reg(uint eth_num, uint phyaddr, uint16 reg_bank, ++ uint8 reg_addr, uint16 *data); ++extern int phy5481_rd_reg(uint eth_num, uint phyaddr, uint16 reg_bank, ++ uint8 reg_addr, uint16 *data); ++extern int phy5481_mod_reg(uint eth_num, uint phyaddr, uint16 reg_bank, ++ uint8 reg_addr, uint16 data, uint16 mask); ++extern int phy5481_init(uint eth_num, uint phyaddr); ++extern int phy5481_link_get(uint eth_num, uint phyaddr, int *link); ++extern int phy5481_enable_set(uint eth_num, uint phyaddr, int enable); ++extern int phy5481_speed_get(uint eth_num, uint phyaddr, int *speed, int *duplex); ++ ++#endif /* _bcm_iproc_phy5481_h_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/bcmiproc_serdes.h b/drivers/net/ethernet/broadcom/gmac/src/include/bcmiproc_serdes.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/bcmiproc_serdes.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/bcmiproc_serdes.h 2017-11-09 17:53:43.934297000 +0800 +@@ -0,0 +1,78 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * These routines provide access to the serdes ++ * ++ */ ++ ++#ifndef _bcm_iproc_serdes_h_ ++#define _bcm_iproc_serdes_h_ ++ ++ ++/* ---- Include Files ---------------------------------------------------- */ ++#include ++ ++#define PHY_REG_BLK_ADDR 0x001f /* GLOBAL BLOCK ADDRESS REGISTER */ ++ ++/* ++ * MII Link Advertisment (Clause 37) ++ */ ++#define MII_ANA_C37_NP (1 << 15) /* Next Page */ ++#define MII_ANA_C37_RF_OK (0 << 12) /* No error, link OK */ ++#define MII_ANA_C37_RF_LINK_FAIL (1 << 12) /* Offline */ ++#define MII_ANA_C37_RF_OFFLINE (2 << 12) /* Link failure */ ++#define MII_ANA_C37_RF_AN_ERR (3 << 12) /* Auto-Negotiation Error */ ++#define MII_ANA_C37_PAUSE (1 << 7) /* Symmetric Pause */ ++#define MII_ANA_C37_ASYM_PAUSE (1 << 8) /* Asymmetric Pause */ ++#define MII_ANA_C37_HD (1 << 6) /* Half duplex */ ++#define MII_ANA_C37_FD (1 << 5) /* Full duplex */ ++ ++/* MII Control Register: bit definitions */ ++ ++#define MII_CTRL_FS_2500 (1 << 5) /* Force speed to 2500 Mbps */ ++#define MII_CTRL_SS_MSB (1 << 6) /* Speed select, MSb */ ++#define MII_CTRL_CST (1 << 7) /* Collision Signal test */ ++#define MII_CTRL_FD (1 << 8) /* Full Duplex */ ++#define MII_CTRL_RAN (1 << 9) /* Restart Autonegotiation */ ++#define MII_CTRL_IP (1 << 10) /* Isolate Phy */ ++#define MII_CTRL_PD (1 << 11) /* Power Down */ ++#define MII_CTRL_AE (1 << 12) /* Autonegotiation enable */ ++#define MII_CTRL_SS_LSB (1 << 13) /* Speed select, LSb */ ++#define MII_CTRL_LE (1 << 14) /* Loopback enable */ ++#define MII_CTRL_RESET (1 << 15) /* PHY reset */ ++ ++#define MII_CTRL_SS(_x) ((_x) & (MII_CTRL_SS_LSB|MII_CTRL_SS_MSB)) ++#define MII_CTRL_SS_10 0 ++#define MII_CTRL_SS_100 (MII_CTRL_SS_LSB) ++#define MII_CTRL_SS_1000 (MII_CTRL_SS_MSB) ++#define MII_CTRL_SS_INVALID (MII_CTRL_SS_LSB | MII_CTRL_SS_MSB) ++#define MII_CTRL_SS_MASK (MII_CTRL_SS_LSB | MII_CTRL_SS_MSB) ++ ++/* ---- External Function Prototypes ------------------------------------- */ ++ ++extern void serdes_set_blk(uint eth_num, uint phyaddr, uint blk); ++extern void serdes_wr_reg(uint eth_num, uint phyaddr, uint reg, uint data); ++extern uint16 serdes_rd_reg(uint eth_num, uint phyaddr, uint reg); ++extern uint16 serdes_get_id(uint eth_num, uint phyaddr, uint off); ++extern void serdes_reset(uint eth_num, uint phyaddr); ++extern int serdes_reset_core(uint eth_num, uint phyaddr); ++extern int serdes_start_pll(uint eth_num, uint phyaddr); ++extern int serdes_init(uint eth_num, uint phyaddr); ++#if defined(CONFIG_SERDES_ASYMMETRIC_MODE) ++extern int serdes_speeddpx_set(uint eth_num, uint phyaddr, int speed, int fulldpx); ++extern int serdes_set_asym_mode(uint eth_num, uint phyaddr); ++#endif /* (defined(CONFIG_SERDES_ASYMMETRIC_MODE)) */ ++ ++#endif /* _bcm_iproc_serdes_h_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/bcmiproc_serdes_def.h b/drivers/net/ethernet/broadcom/gmac/src/include/bcmiproc_serdes_def.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/bcmiproc_serdes_def.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/bcmiproc_serdes_def.h 2017-11-09 17:53:43.935301000 +0800 +@@ -0,0 +1,328 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * These are serdes defines ++ * ++ */ ++ ++#ifndef _PHY_XGXS16G_H_ ++#define _PHY_XGXS16G_H_ ++ ++/* macros */ ++ ++/* Macros ONLY used after initialization */ ++#define XGXS16G_2p5G_ID(id2) ((id2 & 0xff) == 0xf) ++ ++ ++/****************************************************************************/ ++/***** Starting below is auto-generated register macros from RDB files *****/ ++/****************************************************************************/ ++ ++/**************************************************************************** ++ * Core Enums. ++ ***************************************************************************/ ++ ++#define XGXS16G_IEEE0BLK_IEEECONTROL0r 0x00000000 ++#define XGXS16G_XGXSBLK0_XGXSCONTROLr 0x00008000 ++#define XGXS16G_XGXSBLK0_XGXSSTATUSr 0x00008001 ++#define XGXS16G_XGXSBLK0_MMDSELECTr 0x0000800d ++#define XGXS16G_XGXSBLK0_MISCCONTROL1r 0x0000800e ++#define XGXS16G_XGXSBLK1_LANECTRL0r 0x00008015 ++#define XGXS16G_XGXSBLK1_LANECTRL1r 0x00008016 ++#define XGXS16G_XGXSBLK1_LANECTRL3r 0x00008018 ++#define XGXS16G_TX0_TX_ACONTROL0r 0x00008061 ++#define XGXS16G_RX0_RX_CONTROLr 0x000080b1 ++#define XGXS16G_AN73_PDET_PARDET10GCONTROLr 0x00008131 ++#define XGXS16G_XGXSBLK7_EEECONTROLr 0x00008150 ++#define XGXS16G_TX_LN_SWAP1r 0x00008169 ++#define XGXS16G_SERDESDIGITAL_CONTROL1000X1r 0x00008300 ++#define XGXS16G_SERDESDIGITAL_CONTROL1000X2r 0x00008301 ++#define XGXS16G_SERDESDIGITAL_CONTROL1000X3r 0x00008302 ++#define XGXS16G_SERDESDIGITAL_STATUS1000X1r 0x00008304 ++#define XGXS16G_SERDESDIGITAL_MISC1r 0x00008308 ++#define XGXS16G_SERDESID_SERDESID0r 0x00008310 ++#define XGXS16G_SERDESID_SERDESID1r 0x00008311 ++#define XGXS16G_SERDESID_SERDESID2r 0x00008312 ++#define XGXS16G_SERDESID_SERDESID3r 0x00008313 ++#define XGXS16G_REMOTEPHY_MISC3r 0x0000833c ++#define XGXS16G_REMOTEPHY_MISC5r 0x0000833e ++#define XGXS16G_BAM_NEXTPAGE_MP5_NEXTPAGECTRLr 0x00008350 ++#define XGXS16G_BAM_NEXTPAGE_UD_FIELDr 0x00008357 ++#define XGXS16G_COMBO_IEEE0_MIICNTLr 0x0000ffe0 ++#define XGXS16G_COMBO_IEEE0_AUTONEGADVr 0x0000ffe4 ++ ++#define WC40_DIGITAL4_MISC3r 0x0000833c ++ ++/* Digital4 :: Misc3 :: laneDisable [06:06] */ ++#define DIGITAL4_MISC3_LANEDISABLE_MASK 0x0040 ++#define DIGITAL4_MISC3_LANEDISABLE_ALIGN 0 ++#define DIGITAL4_MISC3_LANEDISABLE_BITS 1 ++#define DIGITAL4_MISC3_LANEDISABLE_SHIFT 6 ++ ++ ++/**************************************************************************** ++ * XGXS16G_IEEE_ieee0Blk ++ ***************************************************************************/ ++/**************************************************************************** ++ * ieee0Blk :: ieeeControl0 ++ ***************************************************************************/ ++/* ieee0Blk :: ieeeControl0 :: rst_hw [15:15] */ ++#define IEEE0BLK_IEEECONTROL0_RST_HW_MASK 0x8000 ++#define IEEE0BLK_IEEECONTROL0_RST_HW_ALIGN 0 ++#define IEEE0BLK_IEEECONTROL0_RST_HW_BITS 1 ++#define IEEE0BLK_IEEECONTROL0_RST_HW_SHIFT 15 ++ ++/* ieee0Blk :: ieeeControl0 :: gloopback [14:14] */ ++#define IEEE0BLK_IEEECONTROL0_GLOOPBACK_MASK 0x4000 ++#define IEEE0BLK_IEEECONTROL0_GLOOPBACK_ALIGN 0 ++#define IEEE0BLK_IEEECONTROL0_GLOOPBACK_BITS 1 ++#define IEEE0BLK_IEEECONTROL0_GLOOPBACK_SHIFT 14 ++ ++ ++/**************************************************************************** ++ * XGXS16G_USER_XgxsBlk0 ++ ***************************************************************************/ ++/**************************************************************************** ++ * XgxsBlk0 :: xgxsControl ++ ***************************************************************************/ ++/* XgxsBlk0 :: xgxsControl :: start_sequencer [13:13] */ ++#define XGXSBLK0_XGXSCONTROL_START_SEQUENCER_MASK 0x2000 ++#define XGXSBLK0_XGXSCONTROL_START_SEQUENCER_ALIGN 0 ++#define XGXSBLK0_XGXSCONTROL_START_SEQUENCER_BITS 1 ++#define XGXSBLK0_XGXSCONTROL_START_SEQUENCER_SHIFT 13 ++ ++/* XgxsBlk0 :: xgxsControl :: mode_10g [11:08] */ ++#define XGXSBLK0_XGXSCONTROL_MODE_10G_MASK 0x0f00 ++#define XGXSBLK0_XGXSCONTROL_MODE_10G_ALIGN 0 ++#define XGXSBLK0_XGXSCONTROL_MODE_10G_BITS 4 ++#define XGXSBLK0_XGXSCONTROL_MODE_10G_SHIFT 8 ++#define XGXSBLK0_XGXSCONTROL_MODE_10G_XGXS 0 ++#define XGXSBLK0_XGXSCONTROL_MODE_10G_XGXS_noCC 1 ++#define XGXSBLK0_XGXSCONTROL_MODE_10G_IndLane 6 ++#define XGXSBLK0_XGXSCONTROL_MODE_10G_XGXS_noLss 8 ++#define XGXSBLK0_XGXSCONTROL_MODE_10G_XGXS_noLss_noCC 9 ++#define XGXSBLK0_XGXSCONTROL_MODE_10G_protBypass 10 ++#define XGXSBLK0_XGXSCONTROL_MODE_10G_protBypass_noDsk 11 ++#define XGXSBLK0_XGXSCONTROL_MODE_10G_ComboCoreMode 12 ++#define XGXSBLK0_XGXSCONTROL_MODE_10G_ClocksOff 15 ++ ++/* XgxsBlk0 :: xgxsControl :: hstl [05:05] */ ++#define XGXSBLK0_XGXSCONTROL_HSTL_MASK 0x0020 ++#define XGXSBLK0_XGXSCONTROL_HSTL_ALIGN 0 ++#define XGXSBLK0_XGXSCONTROL_HSTL_BITS 1 ++#define XGXSBLK0_XGXSCONTROL_HSTL_SHIFT 5 ++ ++/* XgxsBlk0 :: xgxsControl :: cdet_en [03:03] */ ++#define XGXSBLK0_XGXSCONTROL_CDET_EN_MASK 0x0008 ++#define XGXSBLK0_XGXSCONTROL_CDET_EN_ALIGN 0 ++#define XGXSBLK0_XGXSCONTROL_CDET_EN_BITS 1 ++#define XGXSBLK0_XGXSCONTROL_CDET_EN_SHIFT 3 ++ ++/* XgxsBlk0 :: xgxsControl :: eden [02:02] */ ++#define XGXSBLK0_XGXSCONTROL_EDEN_MASK 0x0004 ++#define XGXSBLK0_XGXSCONTROL_EDEN_ALIGN 0 ++#define XGXSBLK0_XGXSCONTROL_EDEN_BITS 1 ++#define XGXSBLK0_XGXSCONTROL_EDEN_SHIFT 2 ++ ++/* XgxsBlk0 :: xgxsControl :: afrst_en [01:01] */ ++#define XGXSBLK0_XGXSCONTROL_AFRST_EN_MASK 0x0002 ++#define XGXSBLK0_XGXSCONTROL_AFRST_EN_ALIGN 0 ++#define XGXSBLK0_XGXSCONTROL_AFRST_EN_BITS 1 ++#define XGXSBLK0_XGXSCONTROL_AFRST_EN_SHIFT 1 ++ ++/* XgxsBlk0 :: xgxsControl :: txcko_div [00:00] */ ++#define XGXSBLK0_XGXSCONTROL_TXCKO_DIV_MASK 0x0001 ++#define XGXSBLK0_XGXSCONTROL_TXCKO_DIV_ALIGN 0 ++#define XGXSBLK0_XGXSCONTROL_TXCKO_DIV_BITS 1 ++#define XGXSBLK0_XGXSCONTROL_TXCKO_DIV_SHIFT 0 ++ ++ ++/**************************************************************************** ++ * XgxsBlk0 :: xgxsStatus ++ ***************************************************************************/ ++/* XgxsBlk0 :: xgxsStatus :: txpll_lock [11:11] */ ++#define XGXSBLK0_XGXSSTATUS_TXPLL_LOCK_MASK 0x0800 ++#define XGXSBLK0_XGXSSTATUS_TXPLL_LOCK_ALIGN 0 ++#define XGXSBLK0_XGXSSTATUS_TXPLL_LOCK_BITS 1 ++#define XGXSBLK0_XGXSSTATUS_TXPLL_LOCK_SHIFT 11 ++ ++ ++/**************************************************************************** ++ * XgxsBlk0 :: miscControl1 ++ ***************************************************************************/ ++/* XgxsBlk0 :: miscControl1 :: PCS_dev_en_override [10:10] */ ++#define XGXSBLK0_MISCCONTROL1_PCS_DEV_EN_OVERRIDE_MASK 0x0400 ++#define XGXSBLK0_MISCCONTROL1_PCS_DEV_EN_OVERRIDE_ALIGN 0 ++#define XGXSBLK0_MISCCONTROL1_PCS_DEV_EN_OVERRIDE_BITS 1 ++#define XGXSBLK0_MISCCONTROL1_PCS_DEV_EN_OVERRIDE_SHIFT 10 ++ ++/* XgxsBlk0 :: miscControl1 :: PMD_dev_en_override [09:09] */ ++#define XGXSBLK0_MISCCONTROL1_PMD_DEV_EN_OVERRIDE_MASK 0x0200 ++#define XGXSBLK0_MISCCONTROL1_PMD_DEV_EN_OVERRIDE_ALIGN 0 ++#define XGXSBLK0_MISCCONTROL1_PMD_DEV_EN_OVERRIDE_BITS 1 ++#define XGXSBLK0_MISCCONTROL1_PMD_DEV_EN_OVERRIDE_SHIFT 9 ++ ++/* XgxsBlk0 :: miscControl1 :: ieee_blksel_autodet [01:01] */ ++#define XGXSBLK0_MISCCONTROL1_IEEE_BLKSEL_AUTODET_MASK 0x0002 ++#define XGXSBLK0_MISCCONTROL1_IEEE_BLKSEL_AUTODET_ALIGN 0 ++#define XGXSBLK0_MISCCONTROL1_IEEE_BLKSEL_AUTODET_BITS 1 ++#define XGXSBLK0_MISCCONTROL1_IEEE_BLKSEL_AUTODET_SHIFT 1 ++ ++/* XgxsBlk0 :: miscControl1 :: ieee_blksel_val [00:00] */ ++#define XGXSBLK0_MISCCONTROL1_IEEE_BLKSEL_VAL_MASK 0x0001 ++#define XGXSBLK0_MISCCONTROL1_IEEE_BLKSEL_VAL_ALIGN 0 ++#define XGXSBLK0_MISCCONTROL1_IEEE_BLKSEL_VAL_BITS 1 ++#define XGXSBLK0_MISCCONTROL1_IEEE_BLKSEL_VAL_SHIFT 0 ++ ++ ++/**************************************************************************** ++ * XGXS16G_USER_XgxsBlk1 ++ ***************************************************************************/ ++/**************************************************************************** ++ * XgxsBlk1 :: laneCtrl0 ++ ***************************************************************************/ ++/* XgxsBlk1 :: laneCtrl0 :: cl36_pcs_en_rx [07:04] */ ++#define XGXSBLK1_LANECTRL0_CL36_PCS_EN_RX_MASK 0x00f0 ++#define XGXSBLK1_LANECTRL0_CL36_PCS_EN_RX_ALIGN 0 ++#define XGXSBLK1_LANECTRL0_CL36_PCS_EN_RX_BITS 4 ++#define XGXSBLK1_LANECTRL0_CL36_PCS_EN_RX_SHIFT 4 ++ ++/* XgxsBlk1 :: laneCtrl0 :: cl36_pcs_en_tx [03:00] */ ++#define XGXSBLK1_LANECTRL0_CL36_PCS_EN_TX_MASK 0x000f ++#define XGXSBLK1_LANECTRL0_CL36_PCS_EN_TX_ALIGN 0 ++#define XGXSBLK1_LANECTRL0_CL36_PCS_EN_TX_BITS 4 ++#define XGXSBLK1_LANECTRL0_CL36_PCS_EN_TX_SHIFT 0 ++ ++ ++/**************************************************************************** ++ * XGXS16G_USER_TX0 ++ ***************************************************************************/ ++/**************************************************************************** ++ * TX0 :: Tx_AControl0 ++ ***************************************************************************/ ++/* TX0 :: Tx_AControl0 :: txpol_flip [05:05] */ ++#define TX0_TX_ACONTROL0_TXPOL_FLIP_MASK 0x0020 ++#define TX0_TX_ACONTROL0_TXPOL_FLIP_ALIGN 0 ++#define TX0_TX_ACONTROL0_TXPOL_FLIP_BITS 1 ++#define TX0_TX_ACONTROL0_TXPOL_FLIP_SHIFT 5 ++ ++ ++/**************************************************************************** ++ * XGXS16G_USER_dsc_2_0 ++ ***************************************************************************/ ++/**************************************************************************** ++ * dsc_2_0 :: dsc_ctrl0 ++ ***************************************************************************/ ++/* dsc_2_0 :: dsc_ctrl0 :: rxSeqStart [15:15] */ ++#define DSC_2_0_DSC_CTRL0_RXSEQSTART_MASK 0x8000 ++#define DSC_2_0_DSC_CTRL0_RXSEQSTART_ALIGN 0 ++#define DSC_2_0_DSC_CTRL0_RXSEQSTART_BITS 1 ++#define DSC_2_0_DSC_CTRL0_RXSEQSTART_SHIFT 15 ++ ++ ++/**************************************************************************** ++ * XGXS16G_USER_SerdesDigital ++ ***************************************************************************/ ++/**************************************************************************** ++ * SerdesDigital :: Control1000X1 ++ ***************************************************************************/ ++/* SerdesDigital :: Control1000X1 :: crc_checker_disable [07:07] */ ++#define SERDESDIGITAL_CONTROL1000X1_CRC_CHECKER_DISABLE_MASK 0x0080 ++#define SERDESDIGITAL_CONTROL1000X1_CRC_CHECKER_DISABLE_ALIGN 0 ++#define SERDESDIGITAL_CONTROL1000X1_CRC_CHECKER_DISABLE_BITS 1 ++#define SERDESDIGITAL_CONTROL1000X1_CRC_CHECKER_DISABLE_SHIFT 7 ++ ++/* SerdesDigital :: Control1000X1 :: disable_pll_pwrdwn [06:06] */ ++#define SERDESDIGITAL_CONTROL1000X1_DISABLE_PLL_PWRDWN_MASK 0x0040 ++#define SERDESDIGITAL_CONTROL1000X1_DISABLE_PLL_PWRDWN_ALIGN 0 ++#define SERDESDIGITAL_CONTROL1000X1_DISABLE_PLL_PWRDWN_BITS 1 ++#define SERDESDIGITAL_CONTROL1000X1_DISABLE_PLL_PWRDWN_SHIFT 6 ++ ++/* SerdesDigital :: Control1000X1 :: fiber_mode_1000X [00:00] */ ++#define SERDESDIGITAL_CONTROL1000X1_FIBER_MODE_1000X_MASK 0x0001 ++#define SERDESDIGITAL_CONTROL1000X1_FIBER_MODE_1000X_ALIGN 0 ++#define SERDESDIGITAL_CONTROL1000X1_FIBER_MODE_1000X_BITS 1 ++#define SERDESDIGITAL_CONTROL1000X1_FIBER_MODE_1000X_SHIFT 0 ++ ++/**************************************************************************** ++ * SerdesDigital :: Control1000X3 ++ ***************************************************************************/ ++/* SerdesDigital :: Control1000X3 :: fifo_elasicity_tx_rx [02:01] */ ++#define SERDESDIGITAL_CONTROL1000X3_FIFO_ELASICITY_TX_RX_MASK 0x0006 ++#define SERDESDIGITAL_CONTROL1000X3_FIFO_ELASICITY_TX_RX_ALIGN 0 ++#define SERDESDIGITAL_CONTROL1000X3_FIFO_ELASICITY_TX_RX_BITS 2 ++#define SERDESDIGITAL_CONTROL1000X3_FIFO_ELASICITY_TX_RX_SHIFT 1 ++ ++/* SerdesDigital :: Control1000X3 :: tx_fifo_rst [00:00] */ ++#define SERDESDIGITAL_CONTROL1000X3_TX_FIFO_RST_MASK 0x0001 ++#define SERDESDIGITAL_CONTROL1000X3_TX_FIFO_RST_ALIGN 0 ++#define SERDESDIGITAL_CONTROL1000X3_TX_FIFO_RST_BITS 1 ++#define SERDESDIGITAL_CONTROL1000X3_TX_FIFO_RST_SHIFT 0 ++ ++/**************************************************************************** ++ * SerdesDigital :: Status1000X1 ++ ***************************************************************************/ ++/* SerdesDigital :: Status1000X1 :: speed_status [04:03] */ ++#define SERDESDIGITAL_STATUS1000X1_SPEED_STATUS_MASK 0x0018 ++#define SERDESDIGITAL_STATUS1000X1_SPEED_STATUS_ALIGN 0 ++#define SERDESDIGITAL_STATUS1000X1_SPEED_STATUS_BITS 2 ++#define SERDESDIGITAL_STATUS1000X1_SPEED_STATUS_SHIFT 3 ++ ++/**************************************************************************** ++ * SerdesDigital :: Misc1 ++ ***************************************************************************/ ++/* SerdesDigital :: Misc1 :: refclk_sel [15:13] */ ++#define SERDESDIGITAL_MISC1_REFCLK_SEL_MASK 0xe000 ++#define SERDESDIGITAL_MISC1_REFCLK_SEL_ALIGN 0 ++#define SERDESDIGITAL_MISC1_REFCLK_SEL_BITS 3 ++#define SERDESDIGITAL_MISC1_REFCLK_SEL_SHIFT 13 ++#define SERDESDIGITAL_MISC1_REFCLK_SEL_clk_25MHz 0 ++#define SERDESDIGITAL_MISC1_REFCLK_SEL_clk_100MHz 1 ++#define SERDESDIGITAL_MISC1_REFCLK_SEL_clk_125MHz 2 ++#define SERDESDIGITAL_MISC1_REFCLK_SEL_clk_156p25MHz 3 ++#define SERDESDIGITAL_MISC1_REFCLK_SEL_clk_187p5MHz 4 ++#define SERDESDIGITAL_MISC1_REFCLK_SEL_clk_161p25Mhz 5 ++#define SERDESDIGITAL_MISC1_REFCLK_SEL_clk_50Mhz 6 ++#define SERDESDIGITAL_MISC1_REFCLK_SEL_clk_106p25Mhz 7 ++ ++/* SerdesDigital :: Misc1 :: force_speed_sel [04:04] */ ++#define SERDESDIGITAL_MISC1_FORCE_SPEED_SEL_MASK 0x0010 ++#define SERDESDIGITAL_MISC1_FORCE_SPEED_SEL_ALIGN 0 ++#define SERDESDIGITAL_MISC1_FORCE_SPEED_SEL_BITS 1 ++#define SERDESDIGITAL_MISC1_FORCE_SPEED_SEL_SHIFT 4 ++ ++/* SerdesDigital :: Misc1 :: force_speed [03:00] */ ++#define SERDESDIGITAL_MISC1_FORCE_SPEED_MASK 0x000f ++#define SERDESDIGITAL_MISC1_FORCE_SPEED_ALIGN 0 ++#define SERDESDIGITAL_MISC1_FORCE_SPEED_BITS 4 ++#define SERDESDIGITAL_MISC1_FORCE_SPEED_SHIFT 0 ++ ++ ++/**************************************************************************** ++ * CL73_UserB0 :: CL73_BAMCtrl1 ++ ***************************************************************************/ ++/* CL73_UserB0 :: CL73_BAMCtrl1 :: CL73_bamEn [15:15] */ ++#define CL73_USERB0_CL73_BAMCTRL1_CL73_BAMEN_MASK 0x8000 ++#define CL73_USERB0_CL73_BAMCTRL1_CL73_BAMEN_ALIGN 0 ++#define CL73_USERB0_CL73_BAMCTRL1_CL73_BAMEN_BITS 1 ++#define CL73_USERB0_CL73_BAMCTRL1_CL73_BAMEN_SHIFT 15 ++ ++ ++/**************************************************************************** ++ * Datatype Definitions. ++ ***************************************************************************/ ++#endif /* _PHY_XGXS16G_H_ */ ++ ++/* End of File */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/bcmnvram.h b/drivers/net/ethernet/broadcom/gmac/src/include/bcmnvram.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/bcmnvram.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/bcmnvram.h 2017-11-09 17:53:43.936309000 +0800 +@@ -0,0 +1,290 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * NVRAM variable manipulation ++ * ++ * $Id: bcmnvram.h 325984 2012-04-05 08:51:37Z $ ++ */ ++ ++#ifndef _bcmnvram_h_ ++#define _bcmnvram_h_ ++ ++#ifndef _LANGUAGE_ASSEMBLY ++ ++#include ++#include ++#include ++ ++struct nvram_header { ++ uint32 magic; ++ uint32 len; ++ uint32 crc_ver_init; /* 0:7 crc, 8:15 ver, 16:31 sdram_init */ ++ uint32 config_refresh; /* 0:15 sdram_config, 16:31 sdram_refresh */ ++ uint32 config_ncdl; /* ncdl values for memc */ ++}; ++ ++struct nvram_otphdr { ++ struct nvram_header nvh; ++ uint16 flags_swmacm_gpio_phya; /* otp flags, switch/gmac mode, gpio, phyaddr */ ++ struct ether_addr mac; ++ uint32 clkfreq; ++}; ++ ++struct nvram_tuple { ++ char *name; ++ char *value; ++ struct nvram_tuple *next; ++}; ++ ++/* ++ * Get default value for an NVRAM variable ++ */ ++extern char *nvram_default_get(const char *name); ++ ++/* ++ * Initialize NVRAM access. May be unnecessary or undefined on certain ++ * platforms. ++ */ ++extern int nvram_init(void *sih); ++ ++/* ++ * Append a chunk of nvram variables to the global list ++ */ ++extern int nvram_append(void *si, char *vars, uint varsz); ++ ++extern void nvram_get_global_vars(char **varlst, uint *varsz); ++ ++ ++/* ++ * Check for reset button press for restoring factory defaults. ++ */ ++extern int nvram_reset(void *sih); ++ ++/* ++ * Disable NVRAM access. May be unnecessary or undefined on certain ++ * platforms. ++ */ ++extern void nvram_exit(void *sih); ++ ++/* ++ * Get the value of an NVRAM variable. The pointer returned may be ++ * invalid after a set. ++ * @param name name of variable to get ++ * @return value of variable or NULL if undefined ++ */ ++extern char * nvram_get(const char *name); ++ ++/* ++ * Read the reset GPIO value from the nvram and set the GPIO ++ * as input ++ */ ++extern int BCMINITFN(nvram_resetgpio_init)(void *sih); ++ ++/* ++ * Get the value of an NVRAM variable. ++ * @param name name of variable to get ++ * @return value of variable or NUL if undefined ++ */ ++static INLINE char * ++nvram_safe_get(const char *name) ++{ ++ char *p = nvram_get(name); ++ return p ? p : ""; ++} ++ ++/* ++ * Match an NVRAM variable. ++ * @param name name of variable to match ++ * @param match value to compare against value of variable ++ * @return TRUE if variable is defined and its value is string equal ++ * to match or FALSE otherwise ++ */ ++static INLINE int ++nvram_match(char *name, char *match) ++{ ++ const char *value = nvram_get(name); ++ return (value && !strcmp(value, match)); ++} ++ ++/* ++ * Inversely match an NVRAM variable. ++ * @param name name of variable to match ++ * @param match value to compare against value of variable ++ * @return TRUE if variable is defined and its value is not string ++ * equal to invmatch or FALSE otherwise ++ */ ++static INLINE int ++nvram_invmatch(char *name, char *invmatch) ++{ ++ const char *value = nvram_get(name); ++ return (value && strcmp(value, invmatch)); ++} ++ ++/* ++ * Set the value of an NVRAM variable. The name and value strings are ++ * copied into private storage. Pointers to previously set values ++ * may become invalid. The new value may be immediately ++ * retrieved but will not be permanently stored until a commit. ++ * @param name name of variable to set ++ * @param value value of variable ++ * @return 0 on success and errno on failure ++ */ ++extern int nvram_set(const char *name, const char *value); ++ ++/* ++ * Unset an NVRAM variable. Pointers to previously set values ++ * remain valid until a set. ++ * @param name name of variable to unset ++ * @return 0 on success and errno on failure ++ * NOTE: use nvram_commit to commit this change to flash. ++ */ ++extern int nvram_unset(const char *name); ++ ++/* ++ * NVRAM is based of FLASH or OTP. ++ * @return From FLASH: TRUE ++ * From OTP: FALSE ++ */ ++extern bool nvram_inotp(void); ++ ++/* ++ * Commit NVRAM header to OTP. All pointers to values ++ * may be invalid after a commit. ++ * NVRAM values are undefined after a commit. ++ * @return 0 on success and errno on failure ++ */ ++extern int nvram_otpcommit(void *sih); ++ ++/* ++ * Commit NVRAM variables to permanent storage. All pointers to values ++ * may be invalid after a commit. ++ * NVRAM values are undefined after a commit. ++ * @return 0 on success and errno on failure ++ */ ++extern int nvram_commit(void); ++ ++/* ++ * Get all NVRAM variables (format name=value\0 ... \0\0). ++ * @param buf buffer to store variables ++ * @param count size of buffer in bytes ++ * @return 0 on success and errno on failure ++ */ ++extern int nvram_getall(char *nvram_buf, int count); ++ ++/* ++ * returns the crc value of the nvram ++ * @param nvh nvram header pointer ++ */ ++uint8 nvram_calc_crc(struct nvram_header * nvh); ++ ++#endif /* _LANGUAGE_ASSEMBLY */ ++ ++/* The NVRAM version number stored as an NVRAM variable */ ++#define NVRAM_SOFTWARE_VERSION "1" ++ ++#define NVRAM_MAGIC 0x48534C46 /* 'FLSH' */ ++#define NVRAM_CLEAR_MAGIC 0x0 ++#define NVRAM_INVALID_MAGIC 0xFFFFFFFF ++#define NVRAM_VERSION 1 ++#define NVRAM_HEADER_SIZE 20 ++#define NVRAM_SPACE 0x8000 ++#define ENVRAM_SPACE 0x1000 ++ ++#define NVRAM_MAX_VALUE_LEN 255 ++#define NVRAM_MAX_PARAM_LEN 64 ++ ++#define NVRAM_CRC_START_POSITION 9 /* magic, len, crc8 to be skipped */ ++#define NVRAM_CRC_VER_MASK 0xffffff00 /* for crc_ver_init */ ++ ++/* Incase of nvram header(in OTP), we save 16bit after nvram header ++ * o 0:0 Switch Present ++ * o 1:4 Switch and gmac mode ++ * o 5:10 robo reset GPIO pin number ++ * o 11:15 phyaddr ++ */ ++#define OTPNVRAM_SWITCH_PRESENT 0x1 ++ ++#define OTPNVRAM_FLAGS_MASK 0x1 ++#define OTPNVRAM_SMACMODE_MASK 0x1e ++#define OTPNVRAM_GPIO_MASK 0x7e0 ++#define OTPNVRAM_PHYADDR_MASK 0xf800 ++ ++#define OTPNVRAM_SMACMODE_SHIFT 1 ++#define OTPNVRAM_GPIO_SHIFT 5 ++#define OTPNVRAM_PHYADDR_SHIFT 11 ++ ++/* clkfreq is saved in following format in OTP nvram data ++ * 9:0 pci clock ++ * 20:10 si clock ++ * 31:21 mips clock ++ */ ++ ++#define NVRAM_PCI_CLKMASK 0x3ff ++#define NVRAM_SI_CLKMASK 0x1ffc00 ++#define NVRAM_SI_CLKSHIFT 10 ++#define NVRAM_CPUCLK_SHIFT 21 ++ ++/* Offsets to embedded nvram area */ ++#define NVRAM_START_COMPRESSED 0x400 ++#define NVRAM_START 0x1000 ++ ++#define BCM_JUMBO_NVRAM_DELIMIT '\n' ++#define BCM_JUMBO_START "Broadcom Jumbo Nvram file" ++ ++#if (defined(FAILSAFE_UPGRADE) || defined(CONFIG_FAILSAFE_UPGRADE) || \ ++ defined(__CONFIG_FAILSAFE_UPGRADE_SUPPORT__)) ++#define IMAGE_SIZE "image_size" ++#define BOOTPARTITION "bootpartition" ++#define IMAGE_BOOT BOOTPARTITION ++#define PARTIALBOOTS "partialboots" ++#define MAXPARTIALBOOTS "maxpartialboots" ++#define IMAGE_1ST_FLASH_TRX "flash0.trx" ++#define IMAGE_1ST_FLASH_OS "flash0.os" ++#define IMAGE_2ND_FLASH_TRX "flash0.trx2" ++#define IMAGE_2ND_FLASH_OS "flash0.os2" ++#define IMAGE_FIRST_OFFSET "image_first_offset" ++#define IMAGE_SECOND_OFFSET "image_second_offset" ++#define LINUX_FIRST "linux" ++#define LINUX_SECOND "linux2" ++#endif ++ ++#if (defined(DUAL_IMAGE) || defined(CONFIG_DUAL_IMAGE) || \ ++ defined(__CONFIG_DUAL_IMAGE_FLASH_SUPPORT__)) ++/* Shared by all: CFE, Linux Kernel, and Ap */ ++#define IMAGE_BOOT "image_boot" ++#define BOOTPARTITION IMAGE_BOOT ++/* CFE variables */ ++#define IMAGE_1ST_FLASH_TRX "flash0.trx" ++#define IMAGE_1ST_FLASH_OS "flash0.os" ++#define IMAGE_2ND_FLASH_TRX "flash0.trx2" ++#define IMAGE_2ND_FLASH_OS "flash0.os2" ++#define IMAGE_SIZE "image_size" ++ ++/* CFE and Linux Kernel shared variables */ ++#define IMAGE_FIRST_OFFSET "image_first_offset" ++#define IMAGE_SECOND_OFFSET "image_second_offset" ++ ++/* Linux application variables */ ++#define LINUX_FIRST "linux" ++#define LINUX_SECOND "linux2" ++#define POLICY_TOGGLE "toggle" ++#define LINUX_PART_TO_FLASH "linux_to_flash" ++#define LINUX_FLASH_POLICY "linux_flash_policy" ++ ++#endif /* defined(DUAL_IMAGE||CONFIG_DUAL_IMAGE)||__CONFIG_DUAL_IMAGE_FLASH_SUPPORT__ */ ++ ++int nvram_env_gmac_name(int gmac, char *name); ++ ++#endif /* _bcmnvram_h_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/bcmparams.h b/drivers/net/ethernet/broadcom/gmac/src/include/bcmparams.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/bcmparams.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/bcmparams.h 2017-11-09 17:53:43.937301000 +0800 +@@ -0,0 +1,32 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Misc system wide parameters. ++ * ++ * $Id: bcmparams.h 241182 2011-02-17 21:50:03Z $ ++ */ ++ ++#ifndef _bcmparams_h_ ++#define _bcmparams_h_ ++ ++#define VLAN_MAXVID 15 /* Max. VLAN ID supported/allowed */ ++ ++#define VLAN_NUMPRIS 8 /* # of prio, start from 0 */ ++ ++#define DEV_NUMIFS 16 /* Max. # of devices/interfaces supported */ ++ ++#define WL_MAXBSSCFG 16 /* maximum number of BSS Configs we can configure */ ++ ++#endif +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/bcmperf.h b/drivers/net/ethernet/broadcom/gmac/src/include/bcmperf.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/bcmperf.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/bcmperf.h 2017-11-09 17:53:43.938295000 +0800 +@@ -0,0 +1,40 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Performance counters software interface. ++ * ++ * $Id: bcmperf.h 241182 2011-02-17 21:50:03Z $ ++ */ ++/* essai */ ++#ifndef _BCMPERF_H_ ++#define _BCMPERF_H_ ++/* get cache hits and misses */ ++#if defined(BCMMIPS) && defined(BCMPERFSTATS) ++#include ++#define BCMPERF_ENABLE_INSTRCOUNT() hndmips_perf_instrcount_enable() ++#define BCMPERF_ENABLE_ICACHE_MISS() hndmips_perf_icache_miss_enable() ++#define BCMPERF_ENABLE_ICACHE_HIT() hndmips_perf_icache_hit_enable() ++#define BCMPERF_GETICACHE_MISS(x) ((x) = hndmips_perf_read_cache_miss()) ++#define BCMPERF_GETICACHE_HIT(x) ((x) = hndmips_perf_read_cache_hit()) ++#define BCMPERF_GETINSTRCOUNT(x) ((x) = hndmips_perf_read_instrcount()) ++#else ++#define BCMPERF_ENABLE_INSTRCOUNT() ++#define BCMPERF_ENABLE_ICACHE_MISS() ++#define BCMPERF_ENABLE_ICACHE_HIT() ++#define BCMPERF_GETICACHE_MISS(x) ((x) = 0) ++#define BCMPERF_GETICACHE_HIT(x) ((x) = 0) ++#define BCMPERF_GETINSTRCOUNT(x) ((x) = 0) ++#endif /* defined(mips) */ ++#endif /* _BCMPERF_H_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/bcmsdpcm.h b/drivers/net/ethernet/broadcom/gmac/src/include/bcmsdpcm.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/bcmsdpcm.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/bcmsdpcm.h 2017-11-09 17:53:43.939297000 +0800 +@@ -0,0 +1,268 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Broadcom SDIO/PCMCIA ++ * Software-specific definitions shared between device and host side ++ * ++ * $Id: bcmsdpcm.h 314495 2012-02-12 07:56:39Z $ ++ */ ++ ++#ifndef _bcmsdpcm_h_ ++#define _bcmsdpcm_h_ ++ ++/* ++ * Software allocation of To SB Mailbox resources ++ */ ++ ++/* intstatus bits */ ++#define I_SMB_NAK I_SMB_SW0 /* To SB Mailbox Frame NAK */ ++#define I_SMB_INT_ACK I_SMB_SW1 /* To SB Mailbox Host Interrupt ACK */ ++#define I_SMB_USE_OOB I_SMB_SW2 /* To SB Mailbox Use OOB Wakeup */ ++#define I_SMB_DEV_INT I_SMB_SW3 /* To SB Mailbox Miscellaneous Interrupt */ ++ ++#define I_TOSBMAIL (I_SMB_NAK | I_SMB_INT_ACK | I_SMB_USE_OOB | I_SMB_DEV_INT) ++ ++/* tosbmailbox bits corresponding to intstatus bits */ ++#define SMB_NAK (1 << 0) /* To SB Mailbox Frame NAK */ ++#define SMB_INT_ACK (1 << 1) /* To SB Mailbox Host Interrupt ACK */ ++#define SMB_USE_OOB (1 << 2) /* To SB Mailbox Use OOB Wakeup */ ++#define SMB_DEV_INT (1 << 3) /* To SB Mailbox Miscellaneous Interrupt */ ++#define SMB_MASK 0x0000000f /* To SB Mailbox Mask */ ++ ++/* tosbmailboxdata */ ++#define SMB_DATA_VERSION_MASK 0x00ff0000 /* host protocol version (sent with F2 enable) */ ++#define SMB_DATA_VERSION_SHIFT 16 /* host protocol version (sent with F2 enable) */ ++ ++/* ++ * Software allocation of To Host Mailbox resources ++ */ ++ ++/* intstatus bits */ ++#define I_HMB_FC_STATE I_HMB_SW0 /* To Host Mailbox Flow Control State */ ++#define I_HMB_FC_CHANGE I_HMB_SW1 /* To Host Mailbox Flow Control State Changed */ ++#define I_HMB_FRAME_IND I_HMB_SW2 /* To Host Mailbox Frame Indication */ ++#define I_HMB_HOST_INT I_HMB_SW3 /* To Host Mailbox Miscellaneous Interrupt */ ++ ++#define I_TOHOSTMAIL (I_HMB_FC_CHANGE | I_HMB_FRAME_IND | I_HMB_HOST_INT) ++ ++/* tohostmailbox bits corresponding to intstatus bits */ ++#define HMB_FC_ON (1 << 0) /* To Host Mailbox Flow Control State */ ++#define HMB_FC_CHANGE (1 << 1) /* To Host Mailbox Flow Control State Changed */ ++#define HMB_FRAME_IND (1 << 2) /* To Host Mailbox Frame Indication */ ++#define HMB_HOST_INT (1 << 3) /* To Host Mailbox Miscellaneous Interrupt */ ++#define HMB_MASK 0x0000000f /* To Host Mailbox Mask */ ++ ++/* tohostmailboxdata */ ++#define HMB_DATA_NAKHANDLED 0x01 /* we're ready to retransmit NAK'd frame to host */ ++#define HMB_DATA_DEVREADY 0x02 /* we're ready to to talk to host after enable */ ++#define HMB_DATA_FC 0x04 /* per prio flowcontrol update flag to host */ ++#define HMB_DATA_FWREADY 0x08 /* firmware is ready for protocol activity */ ++#define HMB_DATA_FWHALT 0x10 /* firmware has halted operation */ ++ ++#define HMB_DATA_FCDATA_MASK 0xff000000 /* per prio flowcontrol data */ ++#define HMB_DATA_FCDATA_SHIFT 24 /* per prio flowcontrol data */ ++ ++#define HMB_DATA_VERSION_MASK 0x00ff0000 /* device protocol version (with devready) */ ++#define HMB_DATA_VERSION_SHIFT 16 /* device protocol version (with devready) */ ++ ++/* ++ * Software-defined protocol header ++ */ ++ ++/* Current protocol version */ ++#define SDPCM_PROT_VERSION 4 ++ ++/* SW frame header */ ++#define SDPCM_SEQUENCE_MASK 0x000000ff /* Sequence Number Mask */ ++#define SDPCM_PACKET_SEQUENCE(p) (((uint8 *)p)[0] & 0xff) /* p starts w/SW Header */ ++ ++#define SDPCM_CHANNEL_MASK 0x00000f00 /* Channel Number Mask */ ++#define SDPCM_CHANNEL_SHIFT 8 /* Channel Number Shift */ ++#define SDPCM_PACKET_CHANNEL(p) (((uint8 *)p)[1] & 0x0f) /* p starts w/SW Header */ ++ ++#define SDPCM_FLAGS_MASK 0x0000f000 /* Mask of flag bits */ ++#define SDPCM_FLAGS_SHIFT 12 /* Flag bits shift */ ++#define SDPCM_PACKET_FLAGS(p) ((((uint8 *)p)[1] & 0xf0) >> 4) /* p starts w/SW Header */ ++ ++/* Next Read Len: lookahead length of next frame, in 16-byte units (rounded up) */ ++#define SDPCM_NEXTLEN_MASK 0x00ff0000 /* Next Read Len Mask */ ++#define SDPCM_NEXTLEN_SHIFT 16 /* Next Read Len Shift */ ++#define SDPCM_NEXTLEN_VALUE(p) ((((uint8 *)p)[2] & 0xff) << 4) /* p starts w/SW Header */ ++#define SDPCM_NEXTLEN_OFFSET 2 ++ ++/* Data Offset from SOF (HW Tag, SW Tag, Pad) */ ++#define SDPCM_DOFFSET_OFFSET 3 /* Data Offset */ ++#define SDPCM_DOFFSET_VALUE(p) (((uint8 *)p)[SDPCM_DOFFSET_OFFSET] & 0xff) ++#define SDPCM_DOFFSET_MASK 0xff000000 ++#define SDPCM_DOFFSET_SHIFT 24 ++ ++#define SDPCM_FCMASK_OFFSET 4 /* Flow control */ ++#define SDPCM_FCMASK_VALUE(p) (((uint8 *)p)[SDPCM_FCMASK_OFFSET ] & 0xff) ++#define SDPCM_WINDOW_OFFSET 5 /* Credit based fc */ ++#define SDPCM_WINDOW_VALUE(p) (((uint8 *)p)[SDPCM_WINDOW_OFFSET] & 0xff) ++#define SDPCM_VERSION_OFFSET 6 /* Version # */ ++#define SDPCM_VERSION_VALUE(p) (((uint8 *)p)[SDPCM_VERSION_OFFSET] & 0xff) ++#define SDPCM_UNUSED_OFFSET 7 /* Spare */ ++#define SDPCM_UNUSED_VALUE(p) (((uint8 *)p)[SDPCM_UNUSED_OFFSET] & 0xff) ++ ++#define SDPCM_SWHEADER_LEN 8 /* SW header is 64 bits */ ++ ++/* logical channel numbers */ ++#define SDPCM_CONTROL_CHANNEL 0 /* Control Request/Response Channel Id */ ++#define SDPCM_EVENT_CHANNEL 1 /* Asyc Event Indication Channel Id */ ++#define SDPCM_DATA_CHANNEL 2 /* Data Xmit/Recv Channel Id */ ++#define SDPCM_GLOM_CHANNEL 3 /* For coalesced packets (superframes) */ ++#define SDPCM_TEST_CHANNEL 15 /* Reserved for test/debug packets */ ++#define SDPCM_MAX_CHANNEL 15 ++ ++#define SDPCM_SEQUENCE_WRAP 256 /* wrap-around val for eight-bit frame seq number */ ++ ++#define SDPCM_FLAG_RESVD0 0x01 ++#define SDPCM_FLAG_RESVD1 0x02 ++#define SDPCM_FLAG_GSPI_TXENAB 0x04 ++#define SDPCM_FLAG_GLOMDESC 0x08 /* Superframe descriptor mask */ ++ ++/* For GLOM_CHANNEL frames, use a flag to indicate descriptor frame */ ++#define SDPCM_GLOMDESC_FLAG (SDPCM_FLAG_GLOMDESC << SDPCM_FLAGS_SHIFT) ++ ++#define SDPCM_GLOMDESC(p) (((uint8 *)p)[1] & 0x80) ++ ++/* For TEST_CHANNEL packets, define another 4-byte header */ ++#define SDPCM_TEST_HDRLEN 4 /* Generally: Cmd(1), Ext(1), Len(2); ++ * Semantics of Ext byte depend on command. ++ * Len is current or requested frame length, not ++ * including test header; sent little-endian. ++ */ ++#define SDPCM_TEST_DISCARD 0x01 /* Receiver discards. Ext is a pattern id. */ ++#define SDPCM_TEST_ECHOREQ 0x02 /* Echo request. Ext is a pattern id. */ ++#define SDPCM_TEST_ECHORSP 0x03 /* Echo response. Ext is a pattern id. */ ++#define SDPCM_TEST_BURST 0x04 /* Receiver to send a burst. Ext is a frame count */ ++#define SDPCM_TEST_SEND 0x05 /* Receiver sets send mode. Ext is boolean on/off */ ++ ++/* Handy macro for filling in datagen packets with a pattern */ ++#define SDPCM_TEST_FILL(byteno, id) ((uint8)(id + byteno)) ++ ++/* ++ * Software counters (first part matches hardware counters) ++ */ ++ ++typedef volatile struct { ++ uint32 cmd52rd; /* Cmd52RdCount, SDIO: cmd52 reads */ ++ uint32 cmd52wr; /* Cmd52WrCount, SDIO: cmd52 writes */ ++ uint32 cmd53rd; /* Cmd53RdCount, SDIO: cmd53 reads */ ++ uint32 cmd53wr; /* Cmd53WrCount, SDIO: cmd53 writes */ ++ uint32 abort; /* AbortCount, SDIO: aborts */ ++ uint32 datacrcerror; /* DataCrcErrorCount, SDIO: frames w/CRC error */ ++ uint32 rdoutofsync; /* RdOutOfSyncCount, SDIO/PCMCIA: Rd Frm out of sync */ ++ uint32 wroutofsync; /* RdOutOfSyncCount, SDIO/PCMCIA: Wr Frm out of sync */ ++ uint32 writebusy; /* WriteBusyCount, SDIO: device asserted "busy" */ ++ uint32 readwait; /* ReadWaitCount, SDIO: no data ready for a read cmd */ ++ uint32 readterm; /* ReadTermCount, SDIO: read frame termination cmds */ ++ uint32 writeterm; /* WriteTermCount, SDIO: write frames termination cmds */ ++ uint32 rxdescuflo; /* receive descriptor underflows */ ++ uint32 rxfifooflo; /* receive fifo overflows */ ++ uint32 txfifouflo; /* transmit fifo underflows */ ++ uint32 runt; /* runt (too short) frames recv'd from bus */ ++ uint32 badlen; /* frame's rxh len does not match its hw tag len */ ++ uint32 badcksum; /* frame's hw tag chksum doesn't agree with len value */ ++ uint32 seqbreak; /* break in sequence # space from one rx frame to the next */ ++ uint32 rxfcrc; /* frame rx header indicates crc error */ ++ uint32 rxfwoos; /* frame rx header indicates write out of sync */ ++ uint32 rxfwft; /* frame rx header indicates write frame termination */ ++ uint32 rxfabort; /* frame rx header indicates frame aborted */ ++ uint32 woosint; /* write out of sync interrupt */ ++ uint32 roosint; /* read out of sync interrupt */ ++ uint32 rftermint; /* read frame terminate interrupt */ ++ uint32 wftermint; /* write frame terminate interrupt */ ++} sdpcmd_cnt_t; ++ ++/* ++ * Register Access Macros ++ */ ++ ++#define SDIODREV_IS(var, val) ((var) == (val)) ++#define SDIODREV_GE(var, val) ((var) >= (val)) ++#define SDIODREV_GT(var, val) ((var) > (val)) ++#define SDIODREV_LT(var, val) ((var) < (val)) ++#define SDIODREV_LE(var, val) ((var) <= (val)) ++ ++#define SDIODDMAREG32(h, dir, chnl) \ ++ ((dir) == DMA_TX ? \ ++ (void *)(uintptr)&((h)->regs->dma.sdiod32.dma32regs[chnl].xmt) : \ ++ (void *)(uintptr)&((h)->regs->dma.sdiod32.dma32regs[chnl].rcv)) ++ ++#define SDIODDMAREG64(h, dir, chnl) \ ++ ((dir) == DMA_TX ? \ ++ (void *)(uintptr)&((h)->regs->dma.sdiod64.dma64regs[chnl].xmt) : \ ++ (void *)(uintptr)&((h)->regs->dma.sdiod64.dma64regs[chnl].rcv)) ++ ++#define SDIODDMAREG(h, dir, chnl) \ ++ (SDIODREV_LT((h)->corerev, 1) ? \ ++ SDIODDMAREG32((h), (dir), (chnl)) : \ ++ SDIODDMAREG64((h), (dir), (chnl))) ++ ++#define PCMDDMAREG(h, dir, chnl) \ ++ ((dir) == DMA_TX ? \ ++ (void *)(uintptr)&((h)->regs->dma.pcm32.dmaregs.xmt) : \ ++ (void *)(uintptr)&((h)->regs->dma.pcm32.dmaregs.rcv)) ++ ++#define SDPCMDMAREG(h, dir, chnl, coreid) \ ++ ((coreid) == SDIOD_CORE_ID ? \ ++ SDIODDMAREG(h, dir, chnl) : \ ++ PCMDDMAREG(h, dir, chnl)) ++ ++#define SDIODFIFOREG(h, corerev) \ ++ (SDIODREV_LT((corerev), 1) ? \ ++ ((dma32diag_t *)(uintptr)&((h)->regs->dma.sdiod32.dmafifo)) : \ ++ ((dma32diag_t *)(uintptr)&((h)->regs->dma.sdiod64.dmafifo))) ++ ++#define PCMDFIFOREG(h) \ ++ ((dma32diag_t *)(uintptr)&((h)->regs->dma.pcm32.dmafifo)) ++ ++#define SDPCMFIFOREG(h, coreid, corerev) \ ++ ((coreid) == SDIOD_CORE_ID ? \ ++ SDIODFIFOREG(h, corerev) : \ ++ PCMDFIFOREG(h)) ++ ++/* ++ * Shared structure between dongle and the host. ++ * The structure contains pointers to trap or assert information. ++ */ ++#define SDPCM_SHARED_VERSION 0x0001 ++#define SDPCM_SHARED_VERSION_MASK 0x00FF ++#define SDPCM_SHARED_ASSERT_BUILT 0x0100 ++#define SDPCM_SHARED_ASSERT 0x0200 ++#define SDPCM_SHARED_TRAP 0x0400 ++#define SDPCM_SHARED_IN_BRPT 0x0800 ++#define SDPCM_SHARED_SET_BRPT 0x1000 ++#define SDPCM_SHARED_PENDING_BRPT 0x2000 ++ ++typedef struct { ++ uint32 flags; ++ uint32 trap_addr; ++ uint32 assert_exp_addr; ++ uint32 assert_file_addr; ++ uint32 assert_line; ++ uint32 console_addr; /* Address of hndrte_cons_t */ ++ uint32 msgtrace_addr; ++ uint32 fwid; ++} sdpcm_shared_t; ++ ++extern sdpcm_shared_t sdpcm_shared; ++ ++/* Function can be used to notify host of FW halt */ ++extern void sdpcmd_fwhalt(void); ++ ++#endif /* _bcmsdpcm_h_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/bcmstdlib.h b/drivers/net/ethernet/broadcom/gmac/src/include/bcmstdlib.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/bcmstdlib.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/bcmstdlib.h 2017-11-09 17:53:43.940291000 +0800 +@@ -0,0 +1,128 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * prototypes for functions defined in bcmstdlib.c ++ * ++ * $Id: bcmstdlib.h 289936 2011-10-14 21:06:33Z $: ++ */ ++ ++/* ++ * bcmstdlib.h file should be used only to construct an OSL or alone without any OSL ++ * It should not be used with any orbitarary OSL's as there could be a conflict ++ * with some of the routines defined here. ++*/ ++ ++#ifndef _BCMSTDLIB_H ++#define _BCMSTDLIB_H ++ ++#include ++#include ++#include ++ ++#ifndef INT_MAX ++#define INT_MAX 2147483647 /* from limits.h */ ++#endif ++ ++ ++/* For backwards compatibility, define "BWL_NO_INTERNAL_STDLIB_SUPPORT" to ++ * exclude support for the BRCM stdlib APIs. This should be cleaned-up such ++ * that platforms that require the BRCM stdlib API should simply define ++ * "BWL_INTERNAL_STDLIB_SUPPORT". This would eliminate the need for the ++ * following #ifndef check. ++ */ ++#ifndef BWL_NO_INTERNAL_STDLIB_SUPPORT ++#define BWL_INTERNAL_STDLIB_SUPPORT ++#endif ++ ++#ifdef BWL_INTERNAL_STDLIB_SUPPORT ++/* This should be cleaned-up such that platforms that require the BRCM stdlib ++ * API should simply define "BWL_INTERNAL_STDLIB_SUPPORT". This would eliminate ++ * the need for the following #ifdef check. ++ */ ++#if !defined(_WIN32) && !defined(_CFE_) && !defined(EFI) ++ ++typedef int FILE; ++#define stdout ((FILE *)1) ++#define stderr ((FILE *)2) ++ ++/* i/o functions */ ++extern int fputc(int c, FILE *stream); ++extern void putc(int c); ++/* extern int putc(int c, FILE *stream); */ ++#define putchar(c) putc(c) ++extern int fputs(const char *s, FILE *stream); ++extern int puts(const char *s); ++extern int getc(void); ++extern bool keypressed(void); ++ ++/* bcopy, bcmp, and bzero */ ++#define bcopy(src, dst, len) memcpy((dst), (src), (len)) ++#define bcmp(b1, b2, len) memcmp((b1), (b2), (len)) ++#define bzero(b, len) memset((b), '\0', (len)) ++ ++extern unsigned long rand(void); ++ ++#define atoi(s) ((int)(strtoul((s), NULL, 10))) ++ ++#endif ++ ++#if !defined(_WIN32) || defined(EFI) ++/* string functions */ ++#define PRINTF_BUFLEN 512 ++extern int printf(const char *fmt, ...) ++ __attribute__ ((format (__printf__, 1, 2))); ++extern int BCMROMFN(sprintf)(char *buf, const char *fmt, ...) ++ __attribute__ ((format (__printf__, 2, 3))); ++ ++extern int BCMROMFN(strcmp)(const char *s1, const char *s2); ++extern size_t BCMROMFN(strlen)(const char *s); ++extern char *BCMROMFN(strcpy)(char *dest, const char *src); ++extern char *BCMROMFN(strstr)(const char *s, const char *find); ++extern char *BCMROMFN(strncpy)(char *dest, const char *src, size_t n); ++extern char *BCMROMFN(strcat)(char *d, const char *s); ++ ++extern int BCMROMFN(strncmp)(const char *s1, const char *s2, size_t n); ++extern char *BCMROMFN(strchr)(const char *str, int c); ++extern char *BCMROMFN(strrchr)(const char *str, int c); ++extern size_t BCMROMFN(strspn)(const char *s1, const char *s2); ++extern size_t BCMROMFN(strcspn)(const char *s1, const char *s2); ++extern unsigned long BCMROMFN(strtoul)(const char *cp, char **endp, int base); ++#define strtol(nptr, endptr, base) ((long)strtoul((nptr), (endptr), (base))) ++ ++extern void *BCMROMFN(memmove)(void *dest, const void *src, size_t n); ++extern void *BCMROMFN(memchr)(const void *s, int c, size_t n); ++ ++extern int BCMROMFN(vsprintf)(char *buf, const char *fmt, va_list ap); ++/* mem functions */ ++/* For EFI, using EFIDriverLib versions */ ++/* Cannot use memmem in ROM because of character array initialization wiht "" in gcc */ ++extern void *memset(void *dest, int c, size_t n); ++/* Cannot use memcpy in ROM because of structure assignmnets in gcc */ ++extern void *memcpy(void *dest, const void *src, size_t n); ++extern int BCMROMFN(memcmp)(const void *s1, const void *s2, size_t n); ++ ++#endif /* !_WIN32 || EFI */ ++#endif /* BWL_INTERNAL_STDLIB_SUPPORT */ ++ ++#if !defined(_WIN32) || defined(EFI) ++extern int BCMROMFN(snprintf)(char *str, size_t n, char const *fmt, ...) ++ __attribute__ ((format (__printf__, 3, 4))); ++#else ++extern int BCMROMFN(snprintf)(char *str, size_t n, char const *fmt, ...); ++#endif ++ ++extern int BCMROMFN(vsnprintf)(char *buf, size_t size, const char *fmt, va_list ap); ++ ++#endif /* _BCMSTDLIB_H */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/bcmutils.h b/drivers/net/ethernet/broadcom/gmac/src/include/bcmutils.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/bcmutils.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/bcmutils.h 2017-11-09 17:53:43.941299000 +0800 +@@ -0,0 +1,864 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Misc useful os-independent macros and functions. ++ * ++ * $Id: bcmutils.h 325951 2012-04-05 06:03:27Z $ ++ */ ++ ++#ifndef _bcmutils_h_ ++#define _bcmutils_h_ ++ ++#if defined(UNDER_CE) ++#include ++#else ++#define bcm_strcpy_s(dst, noOfElements, src) strcpy((dst), (src)) ++#define bcm_strncpy_s(dst, noOfElements, src, count) strncpy((dst), (src), (count)) ++#define bcm_strcat_s(dst, noOfElements, src) strcat((dst), (src)) ++#endif ++ ++#ifdef __cplusplus ++extern "C" { ++#endif ++ ++#ifdef PKTQ_LOG ++#include ++#endif ++ ++/* ctype replacement */ ++#define _BCM_U 0x01 /* upper */ ++#define _BCM_L 0x02 /* lower */ ++#define _BCM_D 0x04 /* digit */ ++#define _BCM_C 0x08 /* cntrl */ ++#define _BCM_P 0x10 /* punct */ ++#define _BCM_S 0x20 /* white space (space/lf/tab) */ ++#define _BCM_X 0x40 /* hex digit */ ++#define _BCM_SP 0x80 /* hard space (0x20) */ ++ ++#if defined(BCMROMBUILD) ++extern const unsigned char BCMROMDATA(bcm_ctype)[]; ++#else ++extern const unsigned char bcm_ctype[]; ++#endif ++#define bcm_ismask(x) (bcm_ctype[(int)(unsigned char)(x)]) ++ ++#define bcm_isalnum(c) ((bcm_ismask(c)&(_BCM_U|_BCM_L|_BCM_D)) != 0) ++#define bcm_isalpha(c) ((bcm_ismask(c)&(_BCM_U|_BCM_L)) != 0) ++#define bcm_iscntrl(c) ((bcm_ismask(c)&(_BCM_C)) != 0) ++#define bcm_isdigit(c) ((bcm_ismask(c)&(_BCM_D)) != 0) ++#define bcm_isgraph(c) ((bcm_ismask(c)&(_BCM_P|_BCM_U|_BCM_L|_BCM_D)) != 0) ++#define bcm_islower(c) ((bcm_ismask(c)&(_BCM_L)) != 0) ++#define bcm_isprint(c) ((bcm_ismask(c)&(_BCM_P|_BCM_U|_BCM_L|_BCM_D|_BCM_SP)) != 0) ++#define bcm_ispunct(c) ((bcm_ismask(c)&(_BCM_P)) != 0) ++#define bcm_isspace(c) ((bcm_ismask(c)&(_BCM_S)) != 0) ++#define bcm_isupper(c) ((bcm_ismask(c)&(_BCM_U)) != 0) ++#define bcm_isxdigit(c) ((bcm_ismask(c)&(_BCM_D|_BCM_X)) != 0) ++#define bcm_tolower(c) (bcm_isupper((c)) ? ((c) + 'a' - 'A') : (c)) ++#define bcm_toupper(c) (bcm_islower((c)) ? ((c) + 'A' - 'a') : (c)) ++ ++/* Buffer structure for collecting string-formatted data ++* using bcm_bprintf() API. ++* Use bcm_binit() to initialize before use ++*/ ++ ++struct bcmstrbuf { ++ char *buf; /* pointer to current position in origbuf */ ++ unsigned int size; /* current (residual) size in bytes */ ++ char *origbuf; /* unmodified pointer to orignal buffer */ ++ unsigned int origsize; /* unmodified orignal buffer size in bytes */ ++}; ++ ++/* ** driver-only section ** */ ++#ifdef BCMDRIVER ++#ifdef EFI ++/* forward declare structyre type */ ++struct spktq; ++#endif ++#include ++ ++#define GPIO_PIN_NOTDEFINED 0x20 /* Pin not defined */ ++ ++/* ++ * Spin at most 'us' microseconds while 'exp' is true. ++ * Caller should explicitly test 'exp' when this completes ++ * and take appropriate error action if 'exp' is still true. ++ */ ++#define SPINWAIT(exp, us) { \ ++ uint countdown = (us) + 9; \ ++ while ((exp) && (countdown >= 10)) {\ ++ OSL_DELAY(10); \ ++ countdown -= 10; \ ++ } \ ++} ++ ++/* osl multi-precedence packet queue */ ++#ifndef PKTQ_LEN_DEFAULT ++#define PKTQ_LEN_DEFAULT 128 /* Max 128 packets */ ++#endif ++#ifndef PKTQ_MAX_PREC ++#define PKTQ_MAX_PREC 16 /* Maximum precedence levels */ ++#endif ++ ++typedef struct pktq_prec { ++ void *head; /* first packet to dequeue */ ++ void *tail; /* last packet to dequeue */ ++ uint16 len; /* number of queued packets */ ++ uint16 max; /* maximum number of queued packets */ ++} pktq_prec_t; ++ ++#ifdef PKTQ_LOG ++typedef struct { ++ uint32 requested; /* packets requested to be stored */ ++ uint32 stored; /* packets stored */ ++ uint32 saved; /* packets saved, ++ because a lowest priority queue has given away one packet ++ */ ++ uint32 selfsaved; /* packets saved, ++ because an older packet from the same queue has been dropped ++ */ ++ uint32 full_dropped; /* packets dropped, ++ because pktq is full with higher precedence packets ++ */ ++ uint32 dropped; /* packets dropped because pktq per that precedence is full */ ++ uint32 sacrificed; /* packets dropped, ++ in order to save one from a queue of a highest priority ++ */ ++ uint32 busy; /* packets droped because of hardware/transmission error */ ++ uint32 retry; /* packets re-sent because they were not received */ ++ uint32 ps_retry; /* packets retried again prior to moving power save mode */ ++ uint32 retry_drop; /* packets finally dropped after retry limit */ ++ uint32 max_avail; /* the high-water mark of the queue capacity for packets - ++ goes to zero as queue fills ++ */ ++ uint32 max_used; /* the high-water mark of the queue utilisation for packets - ++ increases with use ('inverse' of max_avail) ++ */ ++ uint32 queue_capacity; /* the maximum capacity of the queue */ ++} pktq_counters_t; ++#endif /* PKTQ_LOG */ ++ ++ ++#define PKTQ_COMMON \ ++ uint16 num_prec; /* number of precedences in use */ \ ++ uint16 hi_prec; /* rapid dequeue hint (>= highest non-empty prec) */ \ ++ uint16 max; /* total max packets */ \ ++ uint16 len; /* total number of packets */ ++ ++/* multi-priority pkt queue */ ++struct pktq { ++ PKTQ_COMMON ++ /* q array must be last since # of elements can be either PKTQ_MAX_PREC or 1 */ ++ struct pktq_prec q[PKTQ_MAX_PREC]; ++#ifdef PKTQ_LOG ++ pktq_counters_t _prec_cnt[PKTQ_MAX_PREC]; /* Counters per queue */ ++#endif ++}; ++ ++/* simple, non-priority pkt queue */ ++struct spktq { ++ PKTQ_COMMON ++ /* q array must be last since # of elements can be either PKTQ_MAX_PREC or 1 */ ++ struct pktq_prec q[1]; ++}; ++ ++#define PKTQ_PREC_ITER(pq, prec) for (prec = (pq)->num_prec - 1; prec >= 0; prec--) ++ ++/* fn(pkt, arg). return true if pkt belongs to if */ ++typedef bool (*ifpkt_cb_t)(void*, int); ++ ++#ifdef BCMPKTPOOL ++#define POOL_ENAB(pool) ((pool) && (pool)->inited) ++#if defined(BCM4329C0) ++#define SHARED_POOL (pktpool_shared_ptr) ++#else ++#define SHARED_POOL (pktpool_shared) ++#endif /* BCM4329C0 */ ++#else /* BCMPKTPOOL */ ++#define POOL_ENAB(bus) 0 ++#define SHARED_POOL ((struct pktpool *)NULL) ++#endif /* BCMPKTPOOL */ ++ ++#ifndef PKTPOOL_LEN_MAX ++#define PKTPOOL_LEN_MAX 40 ++#endif /* PKTPOOL_LEN_MAX */ ++#define PKTPOOL_CB_MAX 3 ++ ++struct pktpool; ++typedef void (*pktpool_cb_t)(struct pktpool *pool, void *arg); ++typedef struct { ++ pktpool_cb_t cb; ++ void *arg; ++} pktpool_cbinfo_t; ++ ++#ifdef BCMDBG_POOL ++/* pkt pool debug states */ ++#define POOL_IDLE 0 ++#define POOL_RXFILL 1 ++#define POOL_RXDH 2 ++#define POOL_RXD11 3 ++#define POOL_TXDH 4 ++#define POOL_TXD11 5 ++#define POOL_AMPDU 6 ++#define POOL_TXENQ 7 ++ ++typedef struct { ++ void *p; ++ uint32 cycles; ++ uint32 dur; ++} pktpool_dbg_t; ++ ++typedef struct { ++ uint8 txdh; /* tx to host */ ++ uint8 txd11; /* tx to d11 */ ++ uint8 enq; /* waiting in q */ ++ uint8 rxdh; /* rx from host */ ++ uint8 rxd11; /* rx from d11 */ ++ uint8 rxfill; /* dma_rxfill */ ++ uint8 idle; /* avail in pool */ ++} pktpool_stats_t; ++#endif /* BCMDBG_POOL */ ++ ++typedef struct pktpool { ++ bool inited; ++ uint16 r; ++ uint16 w; ++ uint16 len; ++ uint16 maxlen; ++ uint16 plen; ++ bool istx; ++ bool empty; ++ uint8 cbtoggle; ++ uint8 cbcnt; ++ uint8 ecbcnt; ++ bool emptycb_disable; ++ pktpool_cbinfo_t *availcb_excl; ++ pktpool_cbinfo_t cbs[PKTPOOL_CB_MAX]; ++ pktpool_cbinfo_t ecbs[PKTPOOL_CB_MAX]; ++ void *q[PKTPOOL_LEN_MAX + 1]; ++ ++#ifdef BCMDBG_POOL ++ uint8 dbg_cbcnt; ++ pktpool_cbinfo_t dbg_cbs[PKTPOOL_CB_MAX]; ++ uint16 dbg_qlen; ++ pktpool_dbg_t dbg_q[PKTPOOL_LEN_MAX + 1]; ++#endif ++} pktpool_t; ++ ++#if defined(BCM4329C0) ++extern pktpool_t *pktpool_shared_ptr; ++#else ++extern pktpool_t *pktpool_shared; ++#endif /* BCM4329C0 */ ++ ++extern int pktpool_init(osl_t *osh, pktpool_t *pktp, int *pktplen, int plen, bool istx); ++extern int pktpool_deinit(osl_t *osh, pktpool_t *pktp); ++extern int pktpool_fill(osl_t *osh, pktpool_t *pktp, bool minimal); ++extern void* pktpool_get(pktpool_t *pktp); ++extern void pktpool_free(pktpool_t *pktp, void *p); ++extern int pktpool_add(pktpool_t *pktp, void *p); ++extern uint16 pktpool_avail(pktpool_t *pktp); ++extern int pktpool_avail_notify_normal(osl_t *osh, pktpool_t *pktp); ++extern int pktpool_avail_notify_exclusive(osl_t *osh, pktpool_t *pktp, pktpool_cb_t cb); ++extern int pktpool_avail_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg); ++extern int pktpool_empty_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg); ++extern int pktpool_setmaxlen(pktpool_t *pktp, uint16 maxlen); ++extern int pktpool_setmaxlen_strict(osl_t *osh, pktpool_t *pktp, uint16 maxlen); ++extern void pktpool_emptycb_disable(pktpool_t *pktp, bool disable); ++extern bool pktpool_emptycb_disabled(pktpool_t *pktp); ++ ++#define POOLPTR(pp) ((pktpool_t *)(pp)) ++#define pktpool_len(pp) (POOLPTR(pp)->len - 1) ++#define pktpool_plen(pp) (POOLPTR(pp)->plen) ++#define pktpool_maxlen(pp) (POOLPTR(pp)->maxlen) ++ ++#ifdef BCMDBG_POOL ++extern int pktpool_dbg_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg); ++extern int pktpool_start_trigger(pktpool_t *pktp, void *p); ++extern int pktpool_dbg_dump(pktpool_t *pktp); ++extern int pktpool_dbg_notify(pktpool_t *pktp); ++extern int pktpool_stats_dump(pktpool_t *pktp, pktpool_stats_t *stats); ++#endif /* BCMDBG_POOL */ ++ ++/* forward definition of ether_addr structure used by some function prototypes */ ++ ++struct ether_addr; ++ ++extern int ether_isbcast(const void *ea); ++extern int ether_isnulladdr(const void *ea); ++ ++/* operations on a specific precedence in packet queue */ ++ ++#define pktq_psetmax(pq, prec, _max) ((pq)->q[prec].max = (_max)) ++#define pktq_pmax(pq, prec) ((pq)->q[prec].max) ++#define pktq_plen(pq, prec) ((pq)->q[prec].len) ++#define pktq_pavail(pq, prec) ((pq)->q[prec].max - (pq)->q[prec].len) ++#define pktq_pfull(pq, prec) ((pq)->q[prec].len >= (pq)->q[prec].max) ++#define pktq_pempty(pq, prec) ((pq)->q[prec].len == 0) ++ ++#define pktq_ppeek(pq, prec) ((pq)->q[prec].head) ++#define pktq_ppeek_tail(pq, prec) ((pq)->q[prec].tail) ++ ++extern void *pktq_penq(struct pktq *pq, int prec, void *p); ++extern void *pktq_penq_head(struct pktq *pq, int prec, void *p); ++extern void *pktq_pdeq(struct pktq *pq, int prec); ++extern void *pktq_pdeq_prev(struct pktq *pq, int prec, void *prev_p); ++extern void *pktq_pdeq_tail(struct pktq *pq, int prec); ++/* Empty the queue at particular precedence level */ ++extern void pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir, ++ ifpkt_cb_t fn, int arg); ++/* Remove a specified packet from its queue */ ++extern bool pktq_pdel(struct pktq *pq, void *p, int prec); ++ ++/* operations on a set of precedences in packet queue */ ++ ++extern int pktq_mlen(struct pktq *pq, uint prec_bmp); ++extern void *pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out); ++extern void *pktq_mpeek(struct pktq *pq, uint prec_bmp, int *prec_out); ++ ++/* operations on packet queue as a whole */ ++ ++#define pktq_len(pq) ((int)(pq)->len) ++#define pktq_max(pq) ((int)(pq)->max) ++#define pktq_avail(pq) ((int)((pq)->max - (pq)->len)) ++#define pktq_full(pq) ((pq)->len >= (pq)->max) ++#define pktq_empty(pq) ((pq)->len == 0) ++ ++/* operations for single precedence queues */ ++#define pktenq(pq, p) pktq_penq(((struct pktq *)(void *)pq), 0, (p)) ++#define pktenq_head(pq, p) pktq_penq_head(((struct pktq *)(void *)pq), 0, (p)) ++#define pktdeq(pq) pktq_pdeq(((struct pktq *)(void *)pq), 0) ++#define pktdeq_tail(pq) pktq_pdeq_tail(((struct pktq *)(void *)pq), 0) ++#define pktqinit(pq, len) pktq_init(((struct pktq *)(void *)pq), 1, len) ++ ++extern void pktq_init(struct pktq *pq, int num_prec, int max_len); ++extern void pktq_set_max_plen(struct pktq *pq, int prec, int max_len); ++ ++/* prec_out may be NULL if caller is not interested in return value */ ++extern void *pktq_deq(struct pktq *pq, int *prec_out); ++extern void *pktq_deq_tail(struct pktq *pq, int *prec_out); ++extern void *pktq_peek(struct pktq *pq, int *prec_out); ++extern void *pktq_peek_tail(struct pktq *pq, int *prec_out); ++extern void pktq_flush(osl_t *osh, struct pktq *pq, bool dir, ifpkt_cb_t fn, int arg); ++ ++/* externs */ ++/* packet */ ++extern uint pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf); ++extern uint pktfrombuf(osl_t *osh, void *p, uint offset, int len, uchar *buf); ++extern uint pkttotlen(osl_t *osh, void *p); ++extern void *pktlast(osl_t *osh, void *p); ++extern uint pktsegcnt(osl_t *osh, void *p); ++extern uint pktsegcnt_war(osl_t *osh, void *p); ++extern uint8 *pktdataoffset(osl_t *osh, void *p, uint offset); ++extern void *pktoffset(osl_t *osh, void *p, uint offset); ++ ++/* Get priority from a packet and pass it back in scb (or equiv) */ ++#define PKTPRIO_VDSCP 0x100 /* DSCP prio found after VLAN tag */ ++#define PKTPRIO_VLAN 0x200 /* VLAN prio found */ ++#define PKTPRIO_UPD 0x400 /* DSCP used to update VLAN prio */ ++#define PKTPRIO_DSCP 0x800 /* DSCP prio found */ ++ ++extern uint pktsetprio(void *pkt, bool update_vtag); ++ ++/* string */ ++extern int BCMROMFN(bcm_atoi)(const char *s); ++extern ulong BCMROMFN(bcm_strtoul)(const char *cp, char **endp, uint base); ++extern char *BCMROMFN(bcmstrstr)(const char *haystack, const char *needle); ++extern char *BCMROMFN(bcmstrcat)(char *dest, const char *src); ++extern char *BCMROMFN(bcmstrncat)(char *dest, const char *src, uint size); ++extern ulong wchar2ascii(char *abuf, ushort *wbuf, ushort wbuflen, ulong abuflen); ++char* bcmstrtok(char **string, const char *delimiters, char *tokdelim); ++int bcmstricmp(const char *s1, const char *s2); ++int bcmstrnicmp(const char* s1, const char* s2, int cnt); ++ ++ ++/* ethernet address */ ++extern char *bcm_ether_ntoa(const struct ether_addr *ea, char *buf); ++extern int BCMROMFN(bcm_ether_atoe)(const char *p, struct ether_addr *ea); ++ ++/* ip address */ ++struct ipv4_addr; ++extern char *bcm_ip_ntoa(struct ipv4_addr *ia, char *buf); ++ ++/* delay */ ++extern void bcm_mdelay(uint ms); ++/* variable access */ ++#if defined(DONGLEBUILD) && !defined(WLTEST) ++#ifdef BCMDBG ++#define NVRAM_RECLAIM_CHECK(name) \ ++ if (attach_part_reclaimed == TRUE) { \ ++ printf("%s: NVRAM already reclaimed, %s\n", __FUNCTION__, (name)); \ ++ *(char*) 0 = 0; /* TRAP */ \ ++ return NULL; \ ++ } ++#else /* BCMDBG */ ++#define NVRAM_RECLAIM_CHECK(name) \ ++ if (attach_part_reclaimed == TRUE) { \ ++ *(char*) 0 = 0; /* TRAP */ \ ++ return NULL; \ ++ } ++#endif /* BCMDBG */ ++#else /* DONGLEBUILD && !WLTEST && !BCMINTERNAL && !BCMDBG_DUMP */ ++#define NVRAM_RECLAIM_CHECK(name) ++#endif ++ ++extern char *getvar(char *vars, const char *name); ++extern int getintvar(char *vars, const char *name); ++extern int getintvararray(char *vars, const char *name, int index); ++extern int getintvararraysize(char *vars, const char *name); ++extern uint getgpiopin(char *vars, char *pin_name, uint def_pin); ++extern int getwanport(void); ++extern int getbrcmtag(void); ++#ifdef BCMDBG ++extern void prpkt(const char *msg, osl_t *osh, void *p0); ++#endif /* BCMDBG */ ++#ifdef BCMPERFSTATS ++extern void bcm_perf_enable(void); ++extern void bcmstats(char *fmt); ++extern void bcmlog(char *fmt, uint a1, uint a2); ++extern void bcmdumplog(char *buf, int size); ++extern int bcmdumplogent(char *buf, uint idx); ++#else ++#define bcm_perf_enable() ++#define bcmstats(fmt) ++#define bcmlog(fmt, a1, a2) ++#define bcmdumplog(buf, size) *buf = '\0' ++#define bcmdumplogent(buf, idx) -1 ++#endif /* BCMPERFSTATS */ ++ ++#if defined(BCMTSTAMPEDLOGS) ++#define TSF_TICKS_PER_MS 1024 ++/* Store a TSF timestamp and a log line in the log buffer */ ++extern void bcmtslog(uint32 tstamp, char *fmt, uint a1, uint a2); ++/* Print out the log buffer with timestamps */ ++extern void bcmprinttslogs(void); ++/* Print out a microsecond timestamp as "sec.ms.us " */ ++extern void bcmprinttstamp(uint32 us); ++/* Dump to buffer a microsecond timestamp as "sec.ms.us " */ ++extern void bcmdumptslog(char *buf, int size); ++#else ++#define bcmtslog(tstamp, fmt, a1, a2) ++#define bcmprinttslogs() ++#define bcmprinttstamp(us) ++#define bcmdumptslog(buf, size) ++#endif /* BCMTSTAMPEDLOGS */ ++ ++extern char *bcm_nvram_vars(uint *length); ++extern int bcm_nvram_cache(void *sih); ++ ++/* Support for sharing code across in-driver iovar implementations. ++ * The intent is that a driver use this structure to map iovar names ++ * to its (private) iovar identifiers, and the lookup function to ++ * find the entry. Macros are provided to map ids and get/set actions ++ * into a single number space for a switch statement. ++ */ ++ ++/* iovar structure */ ++typedef struct bcm_iovar { ++ const char *name; /* name for lookup and display */ ++ uint16 varid; /* id for switch */ ++ uint16 flags; /* driver-specific flag bits */ ++ uint16 type; /* base type of argument */ ++ uint16 minlen; /* min length for buffer vars */ ++} bcm_iovar_t; ++ ++/* varid definitions are per-driver, may use these get/set bits */ ++ ++/* IOVar action bits for id mapping */ ++#define IOV_GET 0 /* Get an iovar */ ++#define IOV_SET 1 /* Set an iovar */ ++ ++/* Varid to actionid mapping */ ++#define IOV_GVAL(id) ((id) * 2) ++#define IOV_SVAL(id) ((id) * 2 + IOV_SET) ++#define IOV_ISSET(actionid) ((actionid & IOV_SET) == IOV_SET) ++#define IOV_ID(actionid) (actionid >> 1) ++ ++/* flags are per-driver based on driver attributes */ ++ ++extern const bcm_iovar_t *bcm_iovar_lookup(const bcm_iovar_t *table, const char *name); ++extern int bcm_iovar_lencheck(const bcm_iovar_t *table, void *arg, int len, bool set); ++#if defined(WLTINYDUMP) || defined(BCMDBG) || defined(WLMSG_INFORM) || \ ++ defined(WLMSG_ASSOC) || defined(WLMSG_PRPKT) || defined(WLMSG_WSEC) ++extern int bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len); ++#endif /* WLTINYDUMP || BCMDBG || WLMSG_INFORM || WLMSG_ASSOC || WLMSG_PRPKT */ ++#endif /* BCMDRIVER */ ++ ++/* Base type definitions */ ++#define IOVT_VOID 0 /* no value (implictly set only) */ ++#define IOVT_BOOL 1 /* any value ok (zero/nonzero) */ ++#define IOVT_INT8 2 /* integer values are range-checked */ ++#define IOVT_UINT8 3 /* unsigned int 8 bits */ ++#define IOVT_INT16 4 /* int 16 bits */ ++#define IOVT_UINT16 5 /* unsigned int 16 bits */ ++#define IOVT_INT32 6 /* int 32 bits */ ++#define IOVT_UINT32 7 /* unsigned int 32 bits */ ++#define IOVT_BUFFER 8 /* buffer is size-checked as per minlen */ ++#define BCM_IOVT_VALID(type) (((unsigned int)(type)) <= IOVT_BUFFER) ++ ++/* Initializer for IOV type strings */ ++#define BCM_IOV_TYPE_INIT { \ ++ "void", \ ++ "bool", \ ++ "int8", \ ++ "uint8", \ ++ "int16", \ ++ "uint16", \ ++ "int32", \ ++ "uint32", \ ++ "buffer", \ ++ "" } ++ ++#define BCM_IOVT_IS_INT(type) (\ ++ (type == IOVT_BOOL) || \ ++ (type == IOVT_INT8) || \ ++ (type == IOVT_UINT8) || \ ++ (type == IOVT_INT16) || \ ++ (type == IOVT_UINT16) || \ ++ (type == IOVT_INT32) || \ ++ (type == IOVT_UINT32)) ++ ++/* ** driver/apps-shared section ** */ ++ ++#define BCME_STRLEN 64 /* Max string length for BCM errors */ ++#define VALID_BCMERROR(e) ((e <= 0) && (e >= BCME_LAST)) ++ ++ ++/* ++ * error codes could be added but the defined ones shouldn't be changed/deleted ++ * these error codes are exposed to the user code ++ * when ever a new error code is added to this list ++ * please update errorstring table with the related error string and ++ * update osl files with os specific errorcode map ++*/ ++ ++#define BCME_OK 0 /* Success */ ++#define BCME_ERROR -1 /* Error generic */ ++#define BCME_BADARG -2 /* Bad Argument */ ++#define BCME_BADOPTION -3 /* Bad option */ ++#define BCME_NOTUP -4 /* Not up */ ++#define BCME_NOTDOWN -5 /* Not down */ ++#define BCME_NOTAP -6 /* Not AP */ ++#define BCME_NOTSTA -7 /* Not STA */ ++#define BCME_BADKEYIDX -8 /* BAD Key Index */ ++#define BCME_RADIOOFF -9 /* Radio Off */ ++#define BCME_NOTBANDLOCKED -10 /* Not band locked */ ++#define BCME_NOCLK -11 /* No Clock */ ++#define BCME_BADRATESET -12 /* BAD Rate valueset */ ++#define BCME_BADBAND -13 /* BAD Band */ ++#define BCME_BUFTOOSHORT -14 /* Buffer too short */ ++#define BCME_BUFTOOLONG -15 /* Buffer too long */ ++#define BCME_BUSY -16 /* Busy */ ++#define BCME_NOTASSOCIATED -17 /* Not Associated */ ++#define BCME_BADSSIDLEN -18 /* Bad SSID len */ ++#define BCME_OUTOFRANGECHAN -19 /* Out of Range Channel */ ++#define BCME_BADCHAN -20 /* Bad Channel */ ++#define BCME_BADADDR -21 /* Bad Address */ ++#define BCME_NORESOURCE -22 /* Not Enough Resources */ ++#define BCME_UNSUPPORTED -23 /* Unsupported */ ++#define BCME_BADLEN -24 /* Bad length */ ++#define BCME_NOTREADY -25 /* Not Ready */ ++#define BCME_EPERM -26 /* Not Permitted */ ++#define BCME_NOMEM -27 /* No Memory */ ++#define BCME_ASSOCIATED -28 /* Associated */ ++#define BCME_RANGE -29 /* Not In Range */ ++#define BCME_NOTFOUND -30 /* Not Found */ ++#define BCME_WME_NOT_ENABLED -31 /* WME Not Enabled */ ++#define BCME_TSPEC_NOTFOUND -32 /* TSPEC Not Found */ ++#define BCME_ACM_NOTSUPPORTED -33 /* ACM Not Supported */ ++#define BCME_NOT_WME_ASSOCIATION -34 /* Not WME Association */ ++#define BCME_SDIO_ERROR -35 /* SDIO Bus Error */ ++#define BCME_DONGLE_DOWN -36 /* Dongle Not Accessible */ ++#define BCME_VERSION -37 /* Incorrect version */ ++#define BCME_TXFAIL -38 /* TX failure */ ++#define BCME_RXFAIL -39 /* RX failure */ ++#define BCME_NODEVICE -40 /* Device not present */ ++#define BCME_NMODE_DISABLED -41 /* NMODE disabled */ ++#define BCME_NONRESIDENT -42 /* access to nonresident overlay */ ++#define BCME_LAST BCME_NONRESIDENT ++ ++/* These are collection of BCME Error strings */ ++#define BCMERRSTRINGTABLE { \ ++ "OK", \ ++ "Undefined error", \ ++ "Bad Argument", \ ++ "Bad Option", \ ++ "Not up", \ ++ "Not down", \ ++ "Not AP", \ ++ "Not STA", \ ++ "Bad Key Index", \ ++ "Radio Off", \ ++ "Not band locked", \ ++ "No clock", \ ++ "Bad Rate valueset", \ ++ "Bad Band", \ ++ "Buffer too short", \ ++ "Buffer too long", \ ++ "Busy", \ ++ "Not Associated", \ ++ "Bad SSID len", \ ++ "Out of Range Channel", \ ++ "Bad Channel", \ ++ "Bad Address", \ ++ "Not Enough Resources", \ ++ "Unsupported", \ ++ "Bad length", \ ++ "Not Ready", \ ++ "Not Permitted", \ ++ "No Memory", \ ++ "Associated", \ ++ "Not In Range", \ ++ "Not Found", \ ++ "WME Not Enabled", \ ++ "TSPEC Not Found", \ ++ "ACM Not Supported", \ ++ "Not WME Association", \ ++ "SDIO Bus Error", \ ++ "Dongle Not Accessible", \ ++ "Incorrect version", \ ++ "TX Failure", \ ++ "RX Failure", \ ++ "Device Not Present", \ ++ "NMODE Disabled", \ ++ "Nonresident overlay access", \ ++} ++ ++#ifndef ABS ++#define ABS(a) (((a) < 0) ? -(a) : (a)) ++#endif /* ABS */ ++ ++#ifndef MIN ++#define MIN(a, b) (((a) < (b)) ? (a) : (b)) ++#endif /* MIN */ ++ ++#ifndef MAX ++#define MAX(a, b) (((a) > (b)) ? (a) : (b)) ++#endif /* MAX */ ++ ++#define CEIL(x, y) (((x) + ((y) - 1)) / (y)) ++#define ROUNDUP(x, y) ((((x) + ((y) - 1)) / (y)) * (y)) ++#define ISALIGNED(a, x) (((uintptr)(a) & ((x) - 1)) == 0) ++#define ALIGN_ADDR(addr, boundary) (void *)(((uintptr)(addr) + (boundary) - 1) \ ++ & ~((boundary) - 1)) ++#define ALIGN_SIZE(size, boundary) (((size) + (boundary) - 1) \ ++ & ~((boundary) - 1)) ++#define ISPOWEROF2(x) ((((x) - 1) & (x)) == 0) ++#define VALID_MASK(mask) !((mask) & ((mask) + 1)) ++ ++#ifndef OFFSETOF ++#ifdef __ARMCC_VERSION ++/* ++ * The ARM RVCT compiler complains when using OFFSETOF where a constant ++ * expression is expected, such as an initializer for a static object. ++ * offsetof from the runtime library doesn't have that problem. ++ */ ++#include ++#define OFFSETOF(type, member) offsetof(type, member) ++#else ++#define OFFSETOF(type, member) ((uint)(uintptr)&((type *)0)->member) ++#endif /* __ARMCC_VERSION */ ++#endif /* OFFSETOF */ ++ ++#ifndef ARRAYSIZE ++#define ARRAYSIZE(a) (sizeof(a) / sizeof(a[0])) ++#endif ++ ++/* Reference a function; used to prevent a static function from being optimized out */ ++extern void *_bcmutils_dummy_fn; ++#define REFERENCE_FUNCTION(f) (_bcmutils_dummy_fn = (void *)(f)) ++ ++/* bit map related macros */ ++#ifndef setbit ++#ifndef NBBY /* the BSD family defines NBBY */ ++#define NBBY 8 /* 8 bits per byte */ ++#endif /* #ifndef NBBY */ ++#define setbit(a, i) (((uint8 *)a)[(i) / NBBY] |= 1 << ((i) % NBBY)) ++#define clrbit(a, i) (((uint8 *)a)[(i) / NBBY] &= ~(1 << ((i) % NBBY))) ++#define isset(a, i) (((const uint8 *)a)[(i) / NBBY] & (1 << ((i) % NBBY))) ++#define isclr(a, i) ((((const uint8 *)a)[(i) / NBBY] & (1 << ((i) % NBBY))) == 0) ++#endif /* setbit */ ++ ++#define NBITS(type) (sizeof(type) * 8) ++#define NBITVAL(nbits) (1 << (nbits)) ++#define MAXBITVAL(nbits) ((1 << (nbits)) - 1) ++#define NBITMASK(nbits) MAXBITVAL(nbits) ++#define MAXNBVAL(nbyte) MAXBITVAL((nbyte) * 8) ++ ++/* basic mux operation - can be optimized on several architectures */ ++#define MUX(pred, true, false) ((pred) ? (true) : (false)) ++ ++/* modulo inc/dec - assumes x E [0, bound - 1] */ ++#define MODDEC(x, bound) MUX((x) == 0, (bound) - 1, (x) - 1) ++#define MODINC(x, bound) MUX((x) == (bound) - 1, 0, (x) + 1) ++ ++/* modulo inc/dec, bound = 2^k */ ++#define MODDEC_POW2(x, bound) (((x) - 1) & ((bound) - 1)) ++#define MODINC_POW2(x, bound) (((x) + 1) & ((bound) - 1)) ++ ++/* modulo add/sub - assumes x, y E [0, bound - 1] */ ++#define MODADD(x, y, bound) \ ++ MUX((x) + (y) >= (bound), (x) + (y) - (bound), (x) + (y)) ++#define MODSUB(x, y, bound) \ ++ MUX(((int)(x)) - ((int)(y)) < 0, (x) - (y) + (bound), (x) - (y)) ++ ++/* module add/sub, bound = 2^k */ ++#define MODADD_POW2(x, y, bound) (((x) + (y)) & ((bound) - 1)) ++#define MODSUB_POW2(x, y, bound) (((x) - (y)) & ((bound) - 1)) ++ ++/* crc defines */ ++#define CRC8_INIT_VALUE 0xff /* Initial CRC8 checksum value */ ++#define CRC8_GOOD_VALUE 0x9f /* Good final CRC8 checksum value */ ++#define CRC16_INIT_VALUE 0xffff /* Initial CRC16 checksum value */ ++#define CRC16_GOOD_VALUE 0xf0b8 /* Good final CRC16 checksum value */ ++#define CRC32_INIT_VALUE 0xffffffff /* Initial CRC32 checksum value */ ++#define CRC32_GOOD_VALUE 0xdebb20e3 /* Good final CRC32 checksum value */ ++ ++/* use for direct output of MAC address in printf etc */ ++#define MACF "%02x:%02x:%02x:%02x:%02x:%02x" ++#define ETHERP_TO_MACF(ea) ((struct ether_addr *) (ea))->octet[0], \ ++ ((struct ether_addr *) (ea))->octet[1], \ ++ ((struct ether_addr *) (ea))->octet[2], \ ++ ((struct ether_addr *) (ea))->octet[3], \ ++ ((struct ether_addr *) (ea))->octet[4], \ ++ ((struct ether_addr *) (ea))->octet[5] ++ ++#define ETHER_TO_MACF(ea) (ea).octet[0], \ ++ (ea).octet[1], \ ++ (ea).octet[2], \ ++ (ea).octet[3], \ ++ (ea).octet[4], \ ++ (ea).octet[5] ++ ++/* bcm_format_flags() bit description structure */ ++typedef struct bcm_bit_desc { ++ uint32 bit; ++ const char* name; ++} bcm_bit_desc_t; ++ ++/* tag_ID/length/value_buffer tuple */ ++typedef struct bcm_tlv { ++ uint8 id; ++ uint8 len; ++ uint8 data[1]; ++} bcm_tlv_t; ++ ++/* Check that bcm_tlv_t fits into the given buflen */ ++#define bcm_valid_tlv(elt, buflen) ((buflen) >= 2 && (int)(buflen) >= (int)(2 + (elt)->len)) ++ ++/* buffer length for ethernet address from bcm_ether_ntoa() */ ++#define ETHER_ADDR_STR_LEN 18 /* 18-bytes of Ethernet address buffer length */ ++ ++/* crypto utility function */ ++/* 128-bit xor: *dst = *src1 xor *src2. dst1, src1 and src2 may have any alignment */ ++static INLINE void ++xor_128bit_block(const uint8 *src1, const uint8 *src2, uint8 *dst) ++{ ++ if ( ++#ifdef __i386__ ++ 1 || ++#endif ++ (((uintptr)src1 | (uintptr)src2 | (uintptr)dst) & 3) == 0) { ++ /* ARM CM3 rel time: 1229 (727 if alignment check could be omitted) */ ++ /* x86 supports unaligned. This version runs 6x-9x faster on x86. */ ++ ((uint32 *)dst)[0] = ((const uint32 *)src1)[0] ^ ((const uint32 *)src2)[0]; ++ ((uint32 *)dst)[1] = ((const uint32 *)src1)[1] ^ ((const uint32 *)src2)[1]; ++ ((uint32 *)dst)[2] = ((const uint32 *)src1)[2] ^ ((const uint32 *)src2)[2]; ++ ((uint32 *)dst)[3] = ((const uint32 *)src1)[3] ^ ((const uint32 *)src2)[3]; ++ } else { ++ /* ARM CM3 rel time: 4668 (4191 if alignment check could be omitted) */ ++ int k; ++ for (k = 0; k < 16; k++) ++ dst[k] = src1[k] ^ src2[k]; ++ } ++} ++ ++/* externs */ ++/* crc */ ++extern uint8 BCMROMFN(hndcrc8)(uint8 *p, uint nbytes, uint8 crc); ++extern uint16 BCMROMFN(hndcrc16)(uint8 *p, uint nbytes, uint16 crc); ++extern uint32 BCMROMFN(hndcrc32)(uint8 *p, uint nbytes, uint32 crc); ++ ++/* format/print */ ++#if defined(BCMDBG) || defined(DHD_DEBUG) || defined(BCMDBG_ERR) || \ ++ defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || defined(WLMSG_ASSOC) ++extern int bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, int len); ++#endif ++ ++#if defined(BCMDBG) || defined(DHD_DEBUG) || defined(BCMDBG_ERR) || \ ++ defined(WLMSG_PRHDRS) || defined(WLMSG_PRPKT) || defined(WLMSG_ASSOC) || \ ++ defined(WLMEDIA_PEAKRATE) ++extern int bcm_format_hex(char *str, const void *bytes, int len); ++#endif ++ ++#ifdef BCMDBG ++extern void deadbeef(void *p, size_t len); ++#endif ++extern const char *bcm_crypto_algo_name(uint algo); ++extern char *bcm_chipname(uint chipid, char *buf, uint len); ++extern char *bcm_brev_str(uint32 brev, char *buf); ++extern void printbig(char *buf); ++extern void prhex(const char *msg, uchar *buf, uint len); ++ ++/* IE parsing */ ++extern bcm_tlv_t *BCMROMFN(bcm_next_tlv)(bcm_tlv_t *elt, int *buflen); ++extern bcm_tlv_t *BCMROMFN(bcm_parse_tlvs)(void *buf, int buflen, uint key); ++extern bcm_tlv_t *BCMROMFN(bcm_parse_ordered_tlvs)(void *buf, int buflen, uint key); ++ ++/* bcmerror */ ++extern const char *bcmerrorstr(int bcmerror); ++extern bcm_tlv_t *BCMROMFN(bcm_parse_tlvs)(void *buf, int buflen, uint key); ++ ++/* multi-bool data type: set of bools, mbool is true if any is set */ ++typedef uint32 mbool; ++#define mboolset(mb, bit) ((mb) |= (bit)) /* set one bool */ ++#define mboolclr(mb, bit) ((mb) &= ~(bit)) /* clear one bool */ ++#define mboolisset(mb, bit) (((mb) & (bit)) != 0) /* TRUE if one bool is set */ ++#define mboolmaskset(mb, mask, val) ((mb) = (((mb) & ~(mask)) | (val))) ++ ++/* generic datastruct to help dump routines */ ++struct fielddesc { ++ const char *nameandfmt; ++ uint32 offset; ++ uint32 len; ++}; ++ ++extern void bcm_binit(struct bcmstrbuf *b, char *buf, uint size); ++extern void bcm_bprhex(struct bcmstrbuf *b, const char *msg, bool newline, uint8 *buf, int len); ++ ++extern void bcm_inc_bytes(uchar *num, int num_bytes, uint8 amount); ++extern int bcm_cmp_bytes(const uchar *arg1, const uchar *arg2, uint8 nbytes); ++extern void bcm_print_bytes(const char *name, const uchar *cdata, int len); ++ ++typedef uint32 (*bcmutl_rdreg_rtn)(void *arg0, uint arg1, uint32 offset); ++extern uint bcmdumpfields(bcmutl_rdreg_rtn func_ptr, void *arg0, uint arg1, struct fielddesc *str, ++ char *buf, uint32 bufsize); ++extern uint BCMROMFN(bcm_bitcount)(uint8 *bitmap, uint bytelength); ++ ++extern int bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...); ++ ++/* power conversion */ ++extern uint16 BCMROMFN(bcm_qdbm_to_mw)(uint8 qdbm); ++extern uint8 BCMROMFN(bcm_mw_to_qdbm)(uint16 mw); ++ ++extern int32 exthdr_validate(char *ptr, uint size); ++extern uint bcm_mkiovar(char *name, char *data, uint datalen, char *buf, uint len); ++ ++unsigned int process_nvram_vars(char *varbuf, unsigned int len); ++ ++#ifdef __cplusplus ++ } ++#endif ++ ++#endif /* _bcmutils_h_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/bcmwifi.h b/drivers/net/ethernet/broadcom/gmac/src/include/bcmwifi.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/bcmwifi.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/bcmwifi.h 2017-11-09 17:53:43.942304000 +0800 +@@ -0,0 +1,456 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Misc utility routines for WL and Apps ++ * This header file housing the define and function prototype use by ++ * both the wl driver, tools & Apps. ++ * ++ * $Id: bcmwifi.h 293848 2011-11-03 12:31:04Z $ ++ */ ++ ++#ifndef _bcmwifi_h_ ++#define _bcmwifi_h_ ++ ++ ++/* A chanspec holds the channel number, band, bandwidth and control sideband */ ++typedef uint16 chanspec_t; ++ ++/* channel defines */ ++#define CH_UPPER_SB 0x01 ++#define CH_LOWER_SB 0x02 ++#define CH_EWA_VALID 0x04 ++#define CH_80MHZ_APART 16 ++#define CH_40MHZ_APART 8 ++#define CH_20MHZ_APART 4 ++#define CH_10MHZ_APART 2 ++#define CH_5MHZ_APART 1 /* 2G band channels are 5 Mhz apart */ ++#define CH_MAX_2G_CHANNEL 14 /* Max channel in 2G band */ ++#define MAXCHANNEL 224 /* max # supported channels. The max channel no is 216, ++ * this is that + 1 rounded up to a multiple of NBBY (8). ++ * DO NOT MAKE it > 255: channels are uint8's all over ++ */ ++#define CHSPEC_CTLOVLP(sp1, sp2, sep) ABS(wf_chspec_ctlchan(sp1) - wf_chspec_ctlchan(sp2)) < (sep) ++ ++#ifndef D11AC_IOTYPES ++ ++#define WL_CHANSPEC_CHAN_MASK 0x00ff ++#define WL_CHANSPEC_CHAN_SHIFT 0 ++ ++#define WL_CHANSPEC_CTL_SB_MASK 0x0300 ++#define WL_CHANSPEC_CTL_SB_SHIFT 8 ++#define WL_CHANSPEC_CTL_SB_LOWER 0x0100 ++#define WL_CHANSPEC_CTL_SB_UPPER 0x0200 ++#define WL_CHANSPEC_CTL_SB_NONE 0x0300 ++ ++#define WL_CHANSPEC_BW_MASK 0x0C00 ++#define WL_CHANSPEC_BW_SHIFT 10 ++#define WL_CHANSPEC_BW_10 0x0400 ++#define WL_CHANSPEC_BW_20 0x0800 ++#define WL_CHANSPEC_BW_40 0x0C00 ++ ++#define WL_CHANSPEC_BAND_MASK 0xf000 ++#define WL_CHANSPEC_BAND_SHIFT 12 ++#define WL_CHANSPEC_BAND_5G 0x1000 ++#define WL_CHANSPEC_BAND_2G 0x2000 ++#define INVCHANSPEC 255 ++ ++/* channel defines */ ++#define LOWER_20_SB(channel) (((channel) > CH_10MHZ_APART) ? ((channel) - CH_10MHZ_APART) : 0) ++#define UPPER_20_SB(channel) (((channel) < (MAXCHANNEL - CH_10MHZ_APART)) ? \ ++ ((channel) + CH_10MHZ_APART) : 0) ++#define CHSPEC_WLCBANDUNIT(chspec) (CHSPEC_IS5G(chspec) ? BAND_5G_INDEX : BAND_2G_INDEX) ++#define CH20MHZ_CHSPEC(channel) (chanspec_t)((chanspec_t)(channel) | WL_CHANSPEC_BW_20 | \ ++ WL_CHANSPEC_CTL_SB_NONE | (((channel) <= CH_MAX_2G_CHANNEL) ? \ ++ WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G)) ++#define NEXT_20MHZ_CHAN(channel) (((channel) < (MAXCHANNEL - CH_20MHZ_APART)) ? \ ++ ((channel) + CH_20MHZ_APART) : 0) ++#define CH40MHZ_CHSPEC(channel, ctlsb) (chanspec_t) \ ++ ((channel) | (ctlsb) | WL_CHANSPEC_BW_40 | \ ++ ((channel) <= CH_MAX_2G_CHANNEL ? WL_CHANSPEC_BAND_2G : \ ++ WL_CHANSPEC_BAND_5G)) ++#define CHSPEC_CHANNEL(chspec) ((uint8)((chspec) & WL_CHANSPEC_CHAN_MASK)) ++#define CHSPEC_BAND(chspec) ((chspec) & WL_CHANSPEC_BAND_MASK) ++ ++/* chanspec stores radio channel & flags to indicate control channel location, i.e. upper/lower */ ++#define CHSPEC_CTL_SB(chspec) ((chspec) & WL_CHANSPEC_CTL_SB_MASK) ++#define CHSPEC_BW(chspec) ((chspec) & WL_CHANSPEC_BW_MASK) ++ ++#ifdef WL11N_20MHZONLY ++ ++#define CHSPEC_IS10(chspec) 0 ++#define CHSPEC_IS20(chspec) 1 ++#ifndef CHSPEC_IS40 ++#define CHSPEC_IS40(chspec) 0 ++#endif ++ ++#else /* !WL11N_20MHZONLY */ ++ ++#define CHSPEC_IS10(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_10) ++#define CHSPEC_IS20(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20) ++#ifndef CHSPEC_IS40 ++#define CHSPEC_IS40(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40) ++#endif ++ ++#endif /* !WL11N_20MHZONLY */ ++ ++#define CHSPEC_IS5G(chspec) (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_5G) ++#define CHSPEC_IS2G(chspec) (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_2G) ++#define CHSPEC_SB_NONE(chspec) (((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_NONE) ++#define CHSPEC_SB_UPPER(chspec) (((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_UPPER) ++#define CHSPEC_SB_LOWER(chspec) (((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_LOWER) ++#define CHSPEC_CTL_CHAN(chspec) ((CHSPEC_SB_LOWER(chspec)) ? \ ++ (LOWER_20_SB(((chspec) & WL_CHANSPEC_CHAN_MASK))) : \ ++ (UPPER_20_SB(((chspec) & WL_CHANSPEC_CHAN_MASK)))) ++#define CHSPEC2WLC_BAND(chspec) (CHSPEC_IS5G(chspec) ? WLC_BAND_5G : WLC_BAND_2G) ++ ++#define CHANSPEC_STR_LEN 8 ++ ++#else /* D11AC_IOTYPES */ ++ ++#define WL_CHANSPEC_CHAN_MASK 0x00ff ++#define WL_CHANSPEC_CHAN_SHIFT 0 ++#define WL_CHANSPEC_CHAN1_MASK 0x000f ++#define WL_CHANSPEC_CHAN1_SHIFT 0 ++#define WL_CHANSPEC_CHAN2_MASK 0x00f0 ++#define WL_CHANSPEC_CHAN2_SHIFT 4 ++ ++#define WL_CHANSPEC_CTL_SB_MASK 0x0700 ++#define WL_CHANSPEC_CTL_SB_SHIFT 8 ++#define WL_CHANSPEC_CTL_SB_LLL 0x0000 ++#define WL_CHANSPEC_CTL_SB_LLU 0x0100 ++#define WL_CHANSPEC_CTL_SB_LUL 0x0200 ++#define WL_CHANSPEC_CTL_SB_LUU 0x0300 ++#define WL_CHANSPEC_CTL_SB_ULL 0x0400 ++#define WL_CHANSPEC_CTL_SB_ULU 0x0500 ++#define WL_CHANSPEC_CTL_SB_UUL 0x0600 ++#define WL_CHANSPEC_CTL_SB_UUU 0x0700 ++#define WL_CHANSPEC_CTL_SB_LL WL_CHANSPEC_CTL_SB_LLL ++#define WL_CHANSPEC_CTL_SB_LU WL_CHANSPEC_CTL_SB_LLU ++#define WL_CHANSPEC_CTL_SB_UL WL_CHANSPEC_CTL_SB_LUL ++#define WL_CHANSPEC_CTL_SB_UU WL_CHANSPEC_CTL_SB_LUU ++#define WL_CHANSPEC_CTL_SB_L WL_CHANSPEC_CTL_SB_LLL ++#define WL_CHANSPEC_CTL_SB_U WL_CHANSPEC_CTL_SB_LLU ++#define WL_CHANSPEC_CTL_SB_LOWER WL_CHANSPEC_CTL_SB_LLL ++#define WL_CHANSPEC_CTL_SB_UPPER WL_CHANSPEC_CTL_SB_LLU ++ ++#define WL_CHANSPEC_BW_MASK 0x3800 ++#define WL_CHANSPEC_BW_SHIFT 11 ++#define WL_CHANSPEC_BW_5 0x0000 ++#define WL_CHANSPEC_BW_10 0x0800 ++#define WL_CHANSPEC_BW_20 0x1000 ++#define WL_CHANSPEC_BW_40 0x1800 ++#define WL_CHANSPEC_BW_80 0x2000 ++#define WL_CHANSPEC_BW_160 0x2800 ++#define WL_CHANSPEC_BW_8080 0x3000 ++ ++#define WL_CHANSPEC_BAND_MASK 0xc000 ++#define WL_CHANSPEC_BAND_SHIFT 14 ++#define WL_CHANSPEC_BAND_2G 0x0000 ++#define WL_CHANSPEC_BAND_3G 0x4000 ++#define WL_CHANSPEC_BAND_4G 0x8000 ++#define WL_CHANSPEC_BAND_5G 0xc000 ++#define INVCHANSPEC 255 ++ ++/* channel defines */ ++#define LOWER_20_SB(channel) (((channel) > CH_10MHZ_APART) ? \ ++ ((channel) - CH_10MHZ_APART) : 0) ++#define UPPER_20_SB(channel) (((channel) < (MAXCHANNEL - CH_10MHZ_APART)) ? \ ++ ((channel) + CH_10MHZ_APART) : 0) ++#define CHSPEC_WLCBANDUNIT(chspec) (CHSPEC_IS5G(chspec) ? BAND_5G_INDEX : BAND_2G_INDEX) ++#define CH20MHZ_CHSPEC(channel) (chanspec_t)((chanspec_t)(channel) | WL_CHANSPEC_BW_20 | \ ++ (((channel) <= CH_MAX_2G_CHANNEL) ? \ ++ WL_CHANSPEC_BAND_2G : WL_CHANSPEC_BAND_5G)) ++#define NEXT_20MHZ_CHAN(channel) (((channel) < (MAXCHANNEL - CH_20MHZ_APART)) ? \ ++ ((channel) + CH_20MHZ_APART) : 0) ++#define CH40MHZ_CHSPEC(channel, ctlsb) (chanspec_t) \ ++ ((channel) | (ctlsb) | WL_CHANSPEC_BW_40 | \ ++ ((channel) <= CH_MAX_2G_CHANNEL ? WL_CHANSPEC_BAND_2G : \ ++ WL_CHANSPEC_BAND_5G)) ++#define CH80MHZ_CHSPEC(channel, ctlsb) (chanspec_t) \ ++ ((channel) | (ctlsb) | WL_CHANSPEC_BW_80 | \ ++ ((channel) <= CH_MAX_2G_CHANNEL ? WL_CHANSPEC_BAND_2G : \ ++ WL_CHANSPEC_BAND_5G)) ++#define CH160MHZ_CHSPEC(channel, ctlsb) (chanspec_t) \ ++ ((channel) | (ctlsb) | WL_CHANSPEC_BW_160 | \ ++ ((channel) <= CH_MAX_2G_CHANNEL ? WL_CHANSPEC_BAND_2G : \ ++ WL_CHANSPEC_BAND_5G)) ++ ++/* simple MACROs to get different fields of chanspec */ ++#define CHSPEC_CHANNEL(chspec) ((uint8)((chspec) & WL_CHANSPEC_CHAN_MASK)) ++#define CHSPEC_CHAN1(chspec) ((chspec) & WL_CHANSPEC_CHAN1_MASK) ++#define CHSPEC_CHAN2(chspec) ((chspec) & WL_CHANSPEC_CHAN2_MASK) ++#define CHSPEC_BAND(chspec) ((chspec) & WL_CHANSPEC_BAND_MASK) ++#define CHSPEC_CTL_SB(chspec) ((chspec) & WL_CHANSPEC_CTL_SB_MASK) ++#define CHSPEC_BW(chspec) ((chspec) & WL_CHANSPEC_BW_MASK) ++ ++#ifdef WL11N_20MHZONLY ++ ++#define CHSPEC_IS10(chspec) 0 ++#define CHSPEC_IS20(chspec) 1 ++#ifndef CHSPEC_IS40 ++#define CHSPEC_IS40(chspec) 0 ++#endif ++#ifndef CHSPEC_IS80 ++#define CHSPEC_IS160(chspec) 0 ++#endif ++#ifndef CHSPEC_IS160 ++#define CHSPEC_IS160(chspec) 0 ++#endif ++#ifndef CHSPEC_IS8080 ++#define CHSPEC_IS8080(chspec) 0 ++#endif ++ ++#else /* !WL11N_20MHZONLY */ ++ ++#define CHSPEC_IS10(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_10) ++#define CHSPEC_IS20(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_20) ++#ifndef CHSPEC_IS40 ++#define CHSPEC_IS40(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40) ++#endif ++#ifndef CHSPEC_IS80 ++#define CHSPEC_IS80(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_80) ++#endif ++#ifndef CHSPEC_IS160 ++#define CHSPEC_IS160(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_160) ++#endif ++#ifndef CHSPEC_IS8080 ++#define CHSPEC_IS8080(chspec) (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_8080) ++#endif ++ ++#endif /* !WL11N_20MHZONLY */ ++ ++#define CHSPEC_IS5G(chspec) (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_5G) ++#define CHSPEC_IS2G(chspec) (((chspec) & WL_CHANSPEC_BAND_MASK) == WL_CHANSPEC_BAND_2G) ++#define CHSPEC_SB_UPPER(chspec) \ ++ ((((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_UPPER) && \ ++ (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40)) ++#define CHSPEC_SB_LOWER(chspec) \ ++ ((((chspec) & WL_CHANSPEC_CTL_SB_MASK) == WL_CHANSPEC_CTL_SB_LOWER) && \ ++ (((chspec) & WL_CHANSPEC_BW_MASK) == WL_CHANSPEC_BW_40)) ++#define CHSPEC2WLC_BAND(chspec) (CHSPEC_IS5G(chspec) ? WLC_BAND_5G : WLC_BAND_2G) ++ ++/** ++ * Number of chars needed for wf_chspec_ntoa() destination character buffer. ++ */ ++#define CHANSPEC_STR_LEN 20 ++ ++ ++/* Legacy Chanspec defines ++ * These are the defines for the previous format of the chanspec_t ++ */ ++#define WL_LCHANSPEC_CHAN_MASK 0x00ff ++#define WL_LCHANSPEC_CHAN_SHIFT 0 ++ ++#define WL_LCHANSPEC_CTL_SB_MASK 0x0300 ++#define WL_LCHANSPEC_CTL_SB_SHIFT 8 ++#define WL_LCHANSPEC_CTL_SB_LOWER 0x0100 ++#define WL_LCHANSPEC_CTL_SB_UPPER 0x0200 ++#define WL_LCHANSPEC_CTL_SB_NONE 0x0300 ++ ++#define WL_LCHANSPEC_BW_MASK 0x0C00 ++#define WL_LCHANSPEC_BW_SHIFT 10 ++#define WL_LCHANSPEC_BW_10 0x0400 ++#define WL_LCHANSPEC_BW_20 0x0800 ++#define WL_LCHANSPEC_BW_40 0x0C00 ++ ++#define WL_LCHANSPEC_BAND_MASK 0xf000 ++#define WL_LCHANSPEC_BAND_SHIFT 12 ++#define WL_LCHANSPEC_BAND_5G 0x1000 ++#define WL_LCHANSPEC_BAND_2G 0x2000 ++ ++#define LCHSPEC_CHANNEL(chspec) ((uint8)((chspec) & WL_LCHANSPEC_CHAN_MASK)) ++#define LCHSPEC_BAND(chspec) ((chspec) & WL_LCHANSPEC_BAND_MASK) ++#define LCHSPEC_CTL_SB(chspec) ((chspec) & WL_LCHANSPEC_CTL_SB_MASK) ++#define LCHSPEC_BW(chspec) ((chspec) & WL_LCHANSPEC_BW_MASK) ++#define LCHSPEC_IS10(chspec) (((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_10) ++#define LCHSPEC_IS20(chspec) (((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_20) ++#define LCHSPEC_IS40(chspec) (((chspec) & WL_LCHANSPEC_BW_MASK) == WL_LCHANSPEC_BW_40) ++#define LCHSPEC_IS5G(chspec) (((chspec) & WL_LCHANSPEC_BAND_MASK) == WL_LCHANSPEC_BAND_5G) ++#define LCHSPEC_IS2G(chspec) (((chspec) & WL_LCHANSPEC_BAND_MASK) == WL_LCHANSPEC_BAND_2G) ++ ++#define LCHSPEC_CREATE(chan, band, bw, sb) ((uint16)((chan) | (sb) | (bw) | (band))) ++ ++#endif /* D11AC_IOTYPES */ ++ ++/* ++ * WF_CHAN_FACTOR_* constants are used to calculate channel frequency ++ * given a channel number. ++ * chan_freq = chan_factor * 500Mhz + chan_number * 5 ++ */ ++ ++/** ++ * Channel Factor for the starting frequence of 2.4 GHz channels. ++ * The value corresponds to 2407 MHz. ++ */ ++#define WF_CHAN_FACTOR_2_4_G 4814 /* 2.4 GHz band, 2407 MHz */ ++ ++/** ++ * Channel Factor for the starting frequence of 5 GHz channels. ++ * The value corresponds to 5000 MHz. ++ */ ++#define WF_CHAN_FACTOR_5_G 10000 /* 5 GHz band, 5000 MHz */ ++ ++/** ++ * Channel Factor for the starting frequence of 4.9 GHz channels. ++ * The value corresponds to 4000 MHz. ++ */ ++#define WF_CHAN_FACTOR_4_G 8000 /* 4.9 GHz band for Japan */ ++ ++/* defined rate in 500kbps */ ++#define WLC_MAXRATE 108 /* in 500kbps units */ ++#define WLC_RATE_1M 2 /* in 500kbps units */ ++#define WLC_RATE_2M 4 /* in 500kbps units */ ++#define WLC_RATE_5M5 11 /* in 500kbps units */ ++#define WLC_RATE_11M 22 /* in 500kbps units */ ++#define WLC_RATE_6M 12 /* in 500kbps units */ ++#define WLC_RATE_9M 18 /* in 500kbps units */ ++#define WLC_RATE_12M 24 /* in 500kbps units */ ++#define WLC_RATE_18M 36 /* in 500kbps units */ ++#define WLC_RATE_24M 48 /* in 500kbps units */ ++#define WLC_RATE_36M 72 /* in 500kbps units */ ++#define WLC_RATE_48M 96 /* in 500kbps units */ ++#define WLC_RATE_54M 108 /* in 500kbps units */ ++ ++#define WLC_2G_25MHZ_OFFSET 5 /* 2.4GHz band channel offset */ ++ ++/** ++ * Convert chanspec to ascii string ++ * ++ * @param chspec chanspec format ++ * @param buf ascii string of chanspec ++ * ++ * @return pointer to buf with room for at least CHANSPEC_STR_LEN bytes ++ * ++ * @see CHANSPEC_STR_LEN ++ */ ++extern char * wf_chspec_ntoa(chanspec_t chspec, char *buf); ++ ++/** ++ * Convert ascii string to chanspec ++ * ++ * @param a pointer to input string ++ * ++ * @return >= 0 if successful or 0 otherwise ++ */ ++extern chanspec_t wf_chspec_aton(const char *a); ++ ++/** ++ * Verify the chanspec fields are valid. ++ * ++ * Verify the chanspec is using a legal set field values, i.e. that the chanspec ++ * specified a band, bw, ctl_sb and channel and that the combination could be ++ * legal given some set of circumstances. ++ * ++ * @param chanspec input chanspec to verify ++ * ++ * @return TRUE if the chanspec is malformed, FALSE if it looks good. ++ */ ++extern bool wf_chspec_malformed(chanspec_t chanspec); ++ ++/** ++ * Verify the chanspec specifies a valid channel according to 802.11. ++ * ++ * @param chanspec input chanspec to verify ++ * ++ * @return TRUE if the chanspec is a valid 802.11 channel ++ */ ++extern bool wf_chspec_valid(chanspec_t chanspec); ++ ++/** ++ * Return the primary (control) channel. ++ * ++ * This function returns the channel number of the primary 20MHz channel. For ++ * 20MHz channels this is just the channel number. For 40MHz or wider channels ++ * it is the primary 20MHz channel specified by the chanspec. ++ * ++ * @param chspec input chanspec ++ * ++ * @return Returns the channel number of the primary 20MHz channel ++ */ ++extern uint8 wf_chspec_ctlchan(chanspec_t chspec); ++ ++/** ++ * Return the primary (control) chanspec. ++ * ++ * This function returns the chanspec of the primary 20MHz channel. For 20MHz ++ * channels this is just the chanspec. For 40MHz or wider channels it is the ++ * chanspec of the primary 20MHZ channel specified by the chanspec. ++ * ++ * @param chspec input chanspec ++ * ++ * @return Returns the chanspec of the primary 20MHz channel ++ */ ++extern chanspec_t wf_chspec_ctlchspec(chanspec_t chspec); ++ ++/** ++ * Return a channel number corresponding to a frequency. ++ * ++ * Return the channel number for a given frequency and base frequency. ++ * The returned channel number is relative to the given base frequency. ++ * If the given base frequency is zero, a base frequency of 5 GHz is assumed for ++ * frequencies from 5 - 6 GHz, and 2.407 GHz is assumed for 2.4 - 2.5 GHz. ++ * ++ * Frequency is specified in MHz. ++ * The base frequency is specified as (start_factor * 500 kHz). ++ * Constants WF_CHAN_FACTOR_2_4_G, WF_CHAN_FACTOR_5_G are defined for ++ * 2.4 GHz and 5 GHz bands. ++ * ++ * The returned channel will be in the range [1, 14] in the 2.4 GHz band ++ * and [0, 200] otherwise. ++ * -1 is returned if the start_factor is WF_CHAN_FACTOR_2_4_G and the ++ * frequency is not a 2.4 GHz channel, or if the frequency is not and even ++ * multiple of 5 MHz from the base frequency to the base plus 1 GHz. ++ * ++ * Reference 802.11 REVma, section 17.3.8.3, and 802.11B section 18.4.6.2 ++ * ++ * @param freq frequency in MHz ++ * @param start_factor base frequency in 500 kHz units, e.g. 10000 for 5 GHz ++ * ++ * @return Returns a channel number ++ * ++ * @see WF_CHAN_FACTOR_2_4_G ++ * @see WF_CHAN_FACTOR_5_G ++ */ ++extern int wf_mhz2channel(uint freq, uint start_factor); ++ ++/** ++ * Return the center frequency in MHz of the given channel and base frequency. ++ * ++ * Return the center frequency in MHz of the given channel and base frequency. ++ * The channel number is interpreted relative to the given base frequency. ++ * ++ * The valid channel range is [1, 14] in the 2.4 GHz band and [0, 200] otherwise. ++ * The base frequency is specified as (start_factor * 500 kHz). ++ * Constants WF_CHAN_FACTOR_2_4_G, WF_CHAN_FACTOR_5_G are defined for ++ * 2.4 GHz and 5 GHz bands. ++ * The channel range of [1, 14] is only checked for a start_factor of ++ * WF_CHAN_FACTOR_2_4_G (4814). ++ * Odd start_factors produce channels on .5 MHz boundaries, in which case ++ * the answer is rounded down to an integral MHz. ++ * -1 is returned for an out of range channel. ++ * ++ * Reference 802.11 REVma, section 17.3.8.3, and 802.11B section 18.4.6.2 ++ * ++ * @param channel input channel number ++ * @param start_factor base frequency in 500 kHz units, e.g. 10000 for 5 GHz ++ * ++ * @return Returns a frequency in MHz ++ * ++ * @see WF_CHAN_FACTOR_2_4_G ++ * @see WF_CHAN_FACTOR_5_G ++ */ ++extern int wf_channel2mhz(uint channel, uint start_factor); ++ ++#endif /* _bcmwifi_h_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/compvers.sh b/drivers/net/ethernet/broadcom/gmac/src/include/compvers.sh +--- a/drivers/net/ethernet/broadcom/gmac/src/include/compvers.sh 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/compvers.sh 2017-11-09 17:53:43.943304000 +0800 +@@ -0,0 +1,133 @@ ++#!/bin/bash ++# ++# Given a list of components, generate _version.h ++# from version.h.in in 's directory ++# ++# Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++# ++# Permission to use, copy, modify, and/or distribute this software for any ++# purpose with or without fee is hereby granted, provided that the above ++# copyright notice and this permission notice appear in all copies. ++# ++# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++# OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++# ++# ++ ++# Optional argument ++ACTION=$1 ++[ -n "$VERBOSE" ] && export VERBOSE ++ ++SRCBASE=.. ++ ++# List of components ++# TODO: In the long term component versioning model, following list ++# TODO: or table of components will come from a central file ++COMPONENTS=( \ ++ upnp \ ++ phy \ ++ router \ ++ wps \ ++) ++ ++# Component dirs. Need one entry for each of above COMPONENTS ++COMPONENT_DIR_upnp=${SRCBASE}/router/libupnp/include ++COMPONENT_DIR_phy=${SRCBASE}/wl/phy ++COMPONENT_DIR_router=${SRCBASE}/router/shared ++COMPONENT_DIR_wps=${SRCBASE}/wps/common/include ++ ++# For a given component, query automerger for a different ++# path than COMPONENT_DIR_. ++# Force router component to be pointing to local branch or tag. ++COMPONENT_QUERY_router=src_force_local_component ++ ++ ++ ++# ===== DO NOT CHANGE ANYTHING BELOW THIS LINE ===== ++ ++NULL=/dev/null ++MKCOMPVER=${SRCBASE}/tools/release/mkversion.sh ++MERGERLOG=${SRCBASE}/../merger_sources.log ++ ++# TODO: Post svn transition, network paths will be taken away ++GETCOMPVER=getcompver.py ++GETCOMPVER_NET=/projects/hnd_software/gallery/src/tools/build/$GETCOMPVER ++GETCOMPVER_NET_WIN=Z:${GETCOMPVER_NET} ++ ++# ++# If there is a local copy GETCOMPVER use it ahead of network copy ++# ++if [ -s "$GETCOMPVER" ]; then ++ GETCOMPVER_PATH="$GETCOMPVER" ++elif [ -s "${SRCBASE}/../src/tools/build/$GETCOMPVER" ]; then ++ GETCOMPVER_PATH="${SRCBASE}/../src/tools/build/$GETCOMPVER" ++elif [ -s "$GETCOMPVER_NET" ]; then ++ GETCOMPVER_PATH="$GETCOMPVER_NET" ++elif [ -s "$GETCOMPVER_NET_WIN" ]; then ++ GETCOMPVER_PATH="$GETCOMPVER_NET_WIN" ++fi ++ ++# ++# If $GETCOMPVER isn't found, fetch it from SVN ++# (this is very rare) ++# ++if [ ! -s "$GETCOMPVER_PATH" ]; then ++ svn export -q \ ++ ^/proj/trunk/src/tools/build/${GETCOMPVER} \ ++ ${GETCOMPVER} 2> $NULL ++ GETCOMPVER_PATH=$GETCOMPVER ++fi ++ ++# ++# Now walk through each specified component to generate its ++# component_version.h file from version.h.in template ++# ++for component in ${COMPONENTS[*]} ++do ++ # Get relative path of component from current dir ++ tmp="COMPONENT_DIR_$component" ++ eval rel_path=\$$tmp ++ ++ # Get query path for component ++ tmp="COMPONENT_QUERY_$component" ++ eval query_path=\$$tmp ++ ++ if [ ! -d "$rel_path" ]; then ++ continue ++ fi ++ ++ if [ "$query_path" != "" ]; then ++ abs_path=$(echo $query_path | sed -e "s%\.\.%src%g") ++ else ++ abs_path=$(echo $rel_path | sed -e "s%\.\.%src%g") ++ fi ++ ++ [ -n "$VERBOSE" ] && \ ++ echo "DBG: python $GETCOMPVER_PATH $MERGERLOG $abs_path" ++ ++ tag=$(python $GETCOMPVER_PATH $MERGERLOG $abs_path 2> $NULL | sed -e 's/[[:space:]]*//g') ++ ++ template=$rel_path/version.h.in ++ verfile=$rel_path/${component}_version.h ++ ++ if [ "$ACTION" == "clean" ]; then ++ rm -fv $verfile ++ continue ++ fi ++ ++ # MKCOMPVER always has defaults if tag isn't set correctly ++ if [ ! -f "$verfile" -o "$FORCE" != "" ]; then ++ echo "" ++ echo ">>> Generate $abs_path/${component}_version.h from $tag" ++ ++ [ -n "$VERBOSE" ] && \ ++ echo "DBG: bash $MKCOMPVER $template $verfile $tag" ++ ++ bash $MKCOMPVER $template $verfile $tag ++ fi ++done +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/epivers.h b/drivers/net/ethernet/broadcom/gmac/src/include/epivers.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/epivers.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/epivers.h 2017-11-09 17:53:43.944299000 +0800 +@@ -0,0 +1,45 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * $Id: epivers.h.in,v 13.33 2010-09-08 22:08:53 $ ++*/ ++ ++#ifndef _epivers_h_ ++#define _epivers_h_ ++ ++#define EPI_MAJOR_VERSION 6 ++ ++#define EPI_MINOR_VERSION 30 ++ ++#define EPI_RC_NUMBER 40 ++ ++#define EPI_INCREMENTAL_NUMBER 0 ++ ++#define EPI_BUILD_NUMBER 2 ++ ++#define EPI_VERSION 6, 30, 40, 0 ++ ++#define EPI_VERSION_NUM 0x061e2800 ++ ++#define EPI_VERSION_DEV 6.30.40 ++ ++/* Driver Version String, ASCII, 32 chars max */ ++#ifdef WLTEST ++#define EPI_VERSION_STR "6.30.40 (TOB) (r WLTEST)" ++#else ++#define EPI_VERSION_STR "6.30.40 (TOB) (r)" ++#endif ++ ++#endif /* _epivers_h_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/epivers.h.in b/drivers/net/ethernet/broadcom/gmac/src/include/epivers.h.in +--- a/drivers/net/ethernet/broadcom/gmac/src/include/epivers.h.in 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/epivers.h.in 2017-11-09 17:53:43.945295000 +0800 +@@ -0,0 +1,46 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * $Id: epivers.h.in,v 13.33 2010-09-08 22:08:53 $ ++ * ++*/ ++ ++#ifndef _epivers_h_ ++#define _epivers_h_ ++ ++#define EPI_MAJOR_VERSION @EPI_MAJOR_VERSION@ ++ ++#define EPI_MINOR_VERSION @EPI_MINOR_VERSION@ ++ ++#define EPI_RC_NUMBER @EPI_RC_NUMBER@ ++ ++#define EPI_INCREMENTAL_NUMBER @EPI_INCREMENTAL_NUMBER@ ++ ++#define EPI_BUILD_NUMBER @EPI_BUILD_NUMBER@ ++ ++#define EPI_VERSION @EPI_VERSION@ ++ ++#define EPI_VERSION_NUM @EPI_VERSION_NUM@ ++ ++#define EPI_VERSION_DEV @EPI_VERSION_DEV@ ++ ++/* Driver Version String, ASCII, 32 chars max */ ++#ifdef WLTEST ++#define EPI_VERSION_STR "@EPI_VERSION_STR@@EPI_VERSION_TYPE@ (@VC_VERSION_NUM@ WLTEST)" ++#else ++#define EPI_VERSION_STR "@EPI_VERSION_STR@@EPI_VERSION_TYPE@ (@VC_VERSION_NUM@)" ++#endif ++ ++#endif /* _epivers_h_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/epivers.sh b/drivers/net/ethernet/broadcom/gmac/src/include/epivers.sh +--- a/drivers/net/ethernet/broadcom/gmac/src/include/epivers.sh 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/epivers.sh 2017-11-09 17:53:43.946291000 +0800 +@@ -0,0 +1,309 @@ ++#! /bin/bash ++# ++# Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++# ++# Permission to use, copy, modify, and/or distribute this software for any ++# purpose with or without fee is hereby granted, provided that the above ++# copyright notice and this permission notice appear in all copies. ++# ++# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++# OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++# ++# Create the epivers.h file from epivers.h.in ++# ++# Epivers.h generation mechanism supports svn based checkouts ++# ++# $Id: epivers.sh 299409 2011-11-30 00:52:43Z $ ++# ++# GetCompVer.py return value and action needed ++# i. trunk => use current date as version string ++# ii. local => use SVNURL expanded by HeadURL keyword ++# iii. => use it as as is ++# (some components can override and say give me native ver) ++# iv. empty => ++# a) If TAG is specified use it ++# a) If no TAG is specified use date ++# ++ ++# If the version header file already exists, increment its build number. ++# Otherwise, create a new file. ++if [ -f epivers.h ]; then ++ ++ # If REUSE_VERSION is set, epivers iteration is not incremented ++ # This can be used precommit and continuous integration projects ++ if [ -n "$REUSE_VERSION" ]; then ++ echo "Previous epivers.h exists. Skipping version increment" ++ exit 0 ++ fi ++ ++ build=`grep EPI_BUILD_NUMBER epivers.h | sed -e "s,.*BUILD_NUMBER[ ]*,,"` ++ build=`expr ${build} + 1` ++ echo build=${build} ++ sed -e "s,.*_BUILD_NUMBER.*,#define EPI_BUILD_NUMBER ${build}," \ ++ < epivers.h > epivers.h.new ++ mv epivers.h epivers.h.prev ++ mv epivers.h.new epivers.h ++ exit 0 ++ ++else # epivers.h doesn't exist ++ ++ NULL="/dev/null" ++ svncmd="svn --non-interactive" ++ ++ # Check for the in file, if not there we're in the wrong directory ++ if [ ! -f epivers.h.in ]; then ++ echo "ERROR: No epivers.h.in found" ++ exit 1 ++ fi ++ ++ # Following SVNURL should be expanded on checkout ++ SVNURL='$HeadURL: http://svn.sj.broadcom.com/svn/wlansvn/users/kenlo/northstar/AARDVARK_TWIG_6_30_40/src/include/epivers.sh $' ++ ++ # If SVNURL isn't expanded, extract it from svn info ++ if echo "$SVNURL" | grep -vq '$.*HeadURL.*epivers.sh.*$'; then ++ [ -n "$VERBOSE" ] && \ ++ echo "DBG: SVN URL wasn't expanded. Getting it from svn info" ++ SVNURL=$($svncmd info epivers.sh 2> $NULL | egrep "^URL:") ++ fi ++ ++ if echo "${TAG}" | grep -q "_BRANCH_\|_TWIG_"; then ++ branchtag=$TAG ++ else ++ branchtag="" ++ fi ++ ++ # If this is a tagged build, use the tag to supply the numbers ++ # Tag should be in the form ++ # _REL__ ++ # or ++ # _REL___RC ++ # or ++ # _REL___RC_ ++ ++ SRCBASE=.. ++ MERGERLOG=${SRCBASE}/../merger_sources.log ++ GETCOMPVER=getcompver.py ++ GETCOMPVER_NET=/projects/hnd_software/gallery/src/tools/build/$GETCOMPVER ++ GETCOMPVER_NET_WIN=Z:${GETCOMPVER_NET} ++ ++ # ++ # If there is a local copy GETCOMPVER use it ahead of network copy ++ # ++ if [ -s "$GETCOMPVER" ]; then ++ GETCOMPVER_PATH="$GETCOMPVER" ++ elif [ -s "${SRCBASE}/../src/tools/build/$GETCOMPVER" ]; then ++ GETCOMPVER_PATH="${SRCBASE}/../src/tools/build/$GETCOMPVER" ++ elif [ -s "$GETCOMPVER_NET" ]; then ++ GETCOMPVER_PATH="$GETCOMPVER_NET" ++ elif [ -s "$GETCOMPVER_NET_WIN" ]; then ++ GETCOMPVER_PATH="$GETCOMPVER_NET_WIN" ++ fi ++ ++ # ++ # If $GETCOMPVER isn't found, fetch it from SVN ++ # (this should be very rare) ++ # ++ if [ ! -s "$GETCOMPVER_PATH" ]; then ++ [ -n "$VERBOSE" ] && \ ++ echo "DBG: Fetching $GETCOMPVER from trunk" ++ ++ $svncmd export -q \ ++ ^/proj/trunk/src/tools/build/${GETCOMPVER} \ ++ ${GETCOMPVER} 2> $NULL ++ ++ GETCOMPVER_PATH=$GETCOMPVER ++ fi ++ ++ # Now get tag for src/include from automerger log ++ [ -n "$VERBOSE" ] && \ ++ echo "DBG: python $GETCOMPVER_PATH $MERGERLOG src/include" ++ ++ COMPTAG=$(python $GETCOMPVER_PATH $MERGERLOG src/include 2> $NULL | sed -e 's/[[:space:]]*//g') ++ ++ echo "DBG: Component Tag String Derived = $COMPTAG" ++ ++ # Process COMPTAG values ++ # Rule: ++ # If trunk is returned, use date as component tag ++ # If LOCAL_COMPONENT is returned, use SVN URL to get native tag ++ # If component is returned or empty, assign it to SVNTAG ++ # GetCompVer.py return value and action needed ++ # i. trunk => use current date as version string ++ # ii. local => use SVNURL expanded by HeadURL keyword ++ # iii. => use it as as is ++ # iv. empty => ++ # a) If TAG is specified use it ++ # a) If no TAG is specified use SVNURL from HeadURL ++ ++ SVNURL_VER=false ++ ++ if [ "$COMPTAG" == "" ]; then ++ SVNURL_VER=true ++ elif [ "$COMPTAG" == "LOCAL_COMPONENT" ]; then ++ SVNURL_VER=true ++ elif [ "$COMPTAG" == "trunk" ]; then ++ SVNTAG=$(date '+TRUNKCOMP_REL_%Y_%m_%d') ++ else ++ SVNTAG=$COMPTAG ++ fi ++ ++ # Given SVNURL path conventions or naming conventions, derive SVNTAG ++ # TO-DO: SVNTAG derivation logic can move to a central common API ++ # TO-DO: ${SRCBASE}/tools/build/svnurl2tag.sh ++ if [ "$SVNURL_VER" == "true" ]; then ++ case "${SVNURL}" in ++ */branches/*) ++ SVNTAG=$(echo $SVNURL | sed -e 's%.*/branches/\(.*\)/src.*%\1%g' | xargs printf "%s") ++ ;; ++ *_BRANCH_*) ++ SVNTAG=$(echo $SVNURL | sed -e 's%/%\n%g' | egrep _BRANCH_ | xargs printf "%s") ++ ;; ++ *_TWIG_*) ++ SVNTAG=$(echo $SVNURL | sed -e 's%/%\n%g' | egrep _TWIG_ | xargs printf "%s") ++ ;; ++ */tags/*) ++ SVNTAG=$(echo $SVNURL | sed -e 's%.*/tags/.*/\(.*\)/src.*%\1%g' | xargs printf "%s") ++ ;; ++ *_REL_*) ++ SVNTAG=$(echo $SVNURL | sed -e 's%/%\n%g' | egrep _REL_ | xargs printf "%s") ++ ;; ++ */trunk/*) ++ SVNTAG=$(date '+TRUNKURL_REL_%Y_%m_%d') ++ ;; ++ *) ++ SVNTAG=$(date '+OTHER_REL_%Y_%m_%d') ++ ;; ++ esac ++ echo "DBG: Native Tag String Derived from URL: $SVNTAG" ++ else ++ echo "DBG: Native Tag String Derived: $SVNTAG" ++ fi ++ ++ TAG=${SVNTAG} ++ ++ # Normalize the branch name portion to "D11" in case it has underscores in it ++ branch_name=`expr match "$TAG" '\(.*\)_\(BRANCH\|TWIG\|REL\)_.*'` ++ TAG=`echo $TAG | sed -e "s%^$branch_name%D11%"` ++ ++ # Split the tag into an array on underbar or whitespace boundaries. ++ IFS="_ " tag=(${TAG}) ++ unset IFS ++ ++ tagged=1 ++ if [ ${#tag[*]} -eq 0 ]; then ++ tag=(`date '+TOT REL %Y %m %d 0 %y'`); ++ # reconstruct a TAG from the date ++ TAG=${tag[0]}_${tag[1]}_${tag[2]}_${tag[3]}_${tag[4]}_${tag[5]} ++ tagged=0 ++ fi ++ ++ # Allow environment variable to override values. ++ # Missing values default to 0 ++ # ++ maj=${EPI_MAJOR_VERSION:-${tag[2]:-0}} ++ min=${EPI_MINOR_VERSION:-${tag[3]:-0}} ++ rcnum=${EPI_RC_NUMBER:-${tag[4]:-0}} ++ ++ # If increment field is 0, set it to date suffix if on TOB ++ if [ -n "$branchtag" ]; then ++ [ "${tag[5]:-0}" -eq 0 ] && echo "Using date suffix for incr" ++ today=`date '+%Y%m%d'` ++ incremental=${EPI_INCREMENTAL_NUMBER:-${tag[5]:-${today:-0}}} ++ else ++ incremental=${EPI_INCREMENTAL_NUMBER:-${tag[5]:-0}} ++ fi ++ origincr=${EPI_INCREMENTAL_NUMBER:-${tag[5]:-0}} ++ build=${EPI_BUILD_NUMBER:-0} ++ ++ # Strip 'RC' from front of rcnum if present ++ rcnum=${rcnum/#RC/} ++ ++ # strip leading zero off the number (otherwise they look like octal) ++ maj=${maj/#0/} ++ min=${min/#0/} ++ rcnum=${rcnum/#0/} ++ incremental=${incremental/#0/} ++ origincr=${origincr/#0/} ++ build=${build/#0/} ++ ++ # some numbers may now be null. replace with with zero. ++ maj=${maj:-0} ++ min=${min:-0} ++ ++ rcnum=${rcnum:-0} ++ incremental=${incremental:-0} ++ origincr=${origincr:-0} ++ build=${build:-0} ++ ++ if [ ${tagged} -eq 1 ]; then ++ # vernum is 32chars max ++ vernum=`printf "0x%02x%02x%02x%02x" ${maj} ${min} ${rcnum} ${origincr}` ++ else ++ vernum=`printf "0x00%02x%02x%02x" ${tag[7]} ${min} ${rcnum}` ++ fi ++ ++ # make sure the size of vernum is under 32 bits. ++ # Otherwise, truncate. The string will keep full information. ++ vernum=${vernum:0:10} ++ ++ # build the string directly from the tag, irrespective of its length ++ # remove the name , the tag type, then replace all _ by . ++ tag_ver_str=${TAG/${tag[0]}_} ++ tag_ver_str=${tag_ver_str/${tag[1]}_} ++ tag_ver_str=${tag_ver_str//_/.} ++ ++ # record tag type ++ tagtype= ++ ++ if [ "${tag[1]}" = "BRANCH" -o "${tag[1]}" = "TWIG" ]; then ++ tagtype=" (TOB)" ++ echo "tag type: $tagtype" ++ fi ++ ++ echo "Effective version string: $tag_ver_str" ++ ++ if [ "$(uname -s)" == "Darwin" ]; then ++ # Mac does not like 2-digit numbers so convert the number to single ++ # digit. 5.100 becomes 5.1 ++ if [ $min -gt 99 ]; then ++ minmac=`expr $min / 100` ++ else ++ minmac=$min ++ fi ++ epi_ver_dev="${maj}.${minmac}.0" ++ else ++ epi_ver_dev="${maj}.${min}.${rcnum}" ++ fi ++ ++ # Finally get version control revision number of (if any) ++ vc_version_num=$($svncmd info ${SRCBASE} 2> $NULL | awk -F': ' '/^Revision: /{printf "%s", $2}') ++ ++ # OK, go do it ++ echo "maj=${maj}, min=${min}, rc=${rcnum}, inc=${incremental}, build=${build}" ++ ++ sed \ ++ -e "s;@EPI_MAJOR_VERSION@;${maj};" \ ++ -e "s;@EPI_MINOR_VERSION@;${min};" \ ++ -e "s;@EPI_RC_NUMBER@;${rcnum};" \ ++ -e "s;@EPI_INCREMENTAL_NUMBER@;${incremental};" \ ++ -e "s;@EPI_BUILD_NUMBER@;${build};" \ ++ -e "s;@EPI_VERSION@;${maj}, ${min}, ${rcnum}, ${incremental};" \ ++ -e "s;@EPI_VERSION_STR@;${tag_ver_str};" \ ++ -e "s;@EPI_VERSION_TYPE@;${tagtype};" \ ++ -e "s;@VERSION_TYPE@;${tagtype};" \ ++ -e "s;@EPI_VERSION_NUM@;${vernum};" \ ++ -e "s;@EPI_VERSION_DEV@;${epi_ver_dev};" \ ++ -e "s;@VC_VERSION_NUM@;r${vc_version_num};" \ ++ < epivers.h.in > epivers.h ++ ++ # In shared workspaces across different platforms, ensure that ++ # windows generated file is made platform neutral without CRLF ++ if uname -s | egrep -i -q "cygwin"; then ++ dos2unix epivers.h > $NULL 2>&1 ++ fi ++fi # epivers.h +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/etioctl.h b/drivers/net/ethernet/broadcom/gmac/src/include/etioctl.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/etioctl.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/etioctl.h 2017-11-09 17:53:43.951293000 +0800 +@@ -0,0 +1,158 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * BCM44XX Ethernet Windows device driver custom OID definitions. ++ * ++ * $Id: etioctl.h 322208 2012-03-20 01:53:23Z $ ++ */ ++ ++#ifndef _etioctl_h_ ++#define _etioctl_h_ ++ ++/* ++ * Minor kludge alert: ++ * Duplicate a few definitions that irelay requires from epiioctl.h here ++ * so caller doesn't have to include this file and epiioctl.h . ++ * If this grows any more, it would be time to move these irelay-specific ++ * definitions out of the epiioctl.h and into a separate driver common file. ++ */ ++#ifndef EPICTRL_COOKIE ++#define EPICTRL_COOKIE 0xABADCEDE ++#endif ++ ++/* common ioctl definitions */ ++#define ETCUP 0 ++#define ETCDOWN 1 ++#define ETCLOOP 2 ++#define ETCDUMP 3 ++#define ETCSETMSGLEVEL 4 ++#define ETCPROMISC 5 ++#define ETCVAR 6 ++#define ETCSPEED 7 ++#define ETCPHYRD 9 ++#define ETCPHYWR 10 ++#define ETCQOS 11 ++#define ETCPHYRD2 12 ++#define ETCPHYWR2 13 ++#define ETCROBORD 14 ++#define ETCROBOWR 15 ++ ++/* ++ * A set of iovars defined for ET set/get ++ */ ++#define IOV_ET_POWER_SAVE_MODE 1 ++#define IOV_ET_CLEAR_DUMP 2 ++#define IOV_ET_ROBO_DEVID 3 ++#define IOV_PKTC 4 ++#define IOV_PKTCBND 5 ++#define IOV_COUNTERS 6 ++#define IOV_DUMP_CTF 7 ++ ++#if defined(linux) || defined(__ECOS) ++#define SIOCSETCUP (SIOCDEVPRIVATE + ETCUP) ++#define SIOCSETCDOWN (SIOCDEVPRIVATE + ETCDOWN) ++#define SIOCSETCLOOP (SIOCDEVPRIVATE + ETCLOOP) ++#define SIOCGETCDUMP (SIOCDEVPRIVATE + ETCDUMP) ++#define SIOCSETCSETMSGLEVEL (SIOCDEVPRIVATE + ETCSETMSGLEVEL) ++#define SIOCSETCPROMISC (SIOCDEVPRIVATE + ETCPROMISC) ++#define SIOCSETGETVAR (SIOCDEVPRIVATE + ETCVAR) ++#define SIOCSETCSPEED (SIOCDEVPRIVATE + ETCSPEED) ++#define SIOCTXGEN (SIOCDEVPRIVATE + 8) ++#define SIOCGETCPHYRD (SIOCDEVPRIVATE + ETCPHYRD) ++#define SIOCSETCPHYWR (SIOCDEVPRIVATE + ETCPHYWR) ++#define SIOCSETCQOS (SIOCDEVPRIVATE + ETCQOS) ++#define SIOCGETCPHYRD2 (SIOCDEVPRIVATE + ETCPHYRD2) ++#define SIOCSETCPHYWR2 (SIOCDEVPRIVATE + ETCPHYWR2) ++#define SIOCGETCROBORD (SIOCDEVPRIVATE + ETCROBORD) ++#define SIOCSETCROBOWR (SIOCDEVPRIVATE + ETCROBOWR) ++ ++/* structure to send a generic var set/get */ ++typedef struct et_var_s { ++ uint cmd; ++ uint set; ++ void *buf; ++ uint len; ++} et_var_t; ++ ++/* arg to SIOCTXGEN */ ++struct txg { ++ uint32 num; /* number of frames to send */ ++ uint32 delay; /* delay in microseconds between sending each */ ++ uint32 size; /* size of ether frame to send */ ++ uchar buf[1514]; /* starting ether frame data */ ++}; ++#endif /* linux */ ++ ++ ++#if defined(__NetBSD__) ++#define SIOCSETCUP _IOW('e', 0, struct ifreq) ++#define SIOCSETCDOWN _IOW('e', 1, struct ifreq) ++#define SIOCSETCLOOP _IOW('e', 2, struct ifreq) ++#define SIOCGETCDUMP _IOWR('e', 3, struct ifreq) ++#define SIOCSETCSETMSGLEVEL _IOW('e', 4, struct ifreq) ++#define SIOCSETCPROMISC _IOW('e', 5, struct ifreq) ++#define SIOCSETCTXDOWN _IOW('e', 6, struct ifreq) /* obsolete */ ++#define SIOCSETCSPEED _IOW('e', 7, struct ifreq) ++#define SIOCTXGEN _IOW('e', 8, struct ifreq) ++#define SIOCGETCPHYRD _IOWR('e', 9, struct ifreq) ++#define SIOCSETCPHYWR _IOW('e', 10, struct ifreq) ++#define SIOCSETCQOS _IOW('e', 11, struct ifreq) ++#define SIOCGETCPHYRD2 _IOWR('e', 12, struct ifreq) ++#define SIOCSETCPHYWR2 _IOW('e', 13, struct ifreq) ++#define SIOCGETCROBORD _IOWR('e', 14, struct ifreq) ++#define SIOCSETCROBOWR _IOW('e', 15, struct ifreq) ++ ++/* arg to SIOCTXGEN */ ++struct txg { ++ uint32 num; /* number of frames to send */ ++ uint32 delay; /* delay in microseconds between sending each */ ++ uint32 size; /* size of ether frame to send */ ++ uchar buf[1514]; /* starting ether frame data */ ++}; ++#endif /* __NetBSD__ */ ++ ++/* ++ * custom OID support ++ * ++ * 0xFF - implementation specific OID ++ * 0xE4 - first byte of Broadcom PCI vendor ID ++ * 0x14 - second byte of Broadcom PCI vendor ID ++ * 0xXX - the custom OID number ++ */ ++#define ET_OID_BASE 0xFFE41400 /* OID Base for ET */ ++ ++#define OID_ET_UP (ET_OID_BASE + ETCUP) ++#define OID_ET_DOWN (ET_OID_BASE + ETCDOWN) ++#define OID_ET_LOOP (ET_OID_BASE + ETCLOOP) ++#define OID_ET_DUMP (ET_OID_BASE + ETCDUMP) ++#define OID_ET_SETMSGLEVEL (ET_OID_BASE + ETCSETMSGLEVEL) ++#define OID_ET_PROMISC (ET_OID_BASE + ETCPROMISC) ++#define OID_ET_TXDOWN (ET_OID_BASE + 6) ++#define OID_ET_SPEED (ET_OID_BASE + ETCSPEED) ++#define OID_ET_GETINSTANCE (ET_OID_BASE + 8) ++#define OID_ET_SETCALLBACK (ET_OID_BASE + 9) ++#define OID_ET_UNSETCALLBACK (ET_OID_BASE + 10) ++ ++#define IS_ET_OID(oid) (((oid) & 0xFFFFFF00) == 0xFFE41400) ++ ++#define ET_ISQUERYOID(oid) ((oid == OID_ET_DUMP) || (oid == OID_ET_GETINSTANCE)) ++ ++/* OID_ET_SETCALLBACK data type */ ++typedef struct et_cb { ++ void (*fn)(void *, int); /* Callback function */ ++ void *context; /* Passed to callback function */ ++} et_cb_t; ++ ++#endif /* _etioctl_h_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/gmac_common.h b/drivers/net/ethernet/broadcom/gmac/src/include/gmac_common.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/gmac_common.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/gmac_common.h 2017-11-09 17:53:43.959311000 +0800 +@@ -0,0 +1,560 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * gmacdefs - Broadcom gmac (Unimac) specific definitions ++ * ++ * $Id: gmac_common.h 241182 2011-02-17 21:50:03Z $ ++ */ ++ ++#ifndef _gmac_common_core_h_ ++#define _gmac_common_core_h_ ++ ++#ifndef PAD ++#define _PADLINE(line) pad ## line ++#define _XSTR(line) _PADLINE(line) ++#define PAD XSTR(__LINE__) ++#endif ++ ++typedef volatile struct _gmac_commonregs { ++ uint32 stag0; ++ uint32 stag1; ++ uint32 stag2; ++ uint32 stag3; ++ uint32 PAD[4]; ++ uint32 parsercontrol; ++ uint32 mib_max_len; ++ uint32 PAD[54]; ++ uint32 phyaccess; ++ uint32 phycontrol; ++ uint32 PAD[2]; ++ uint32 gmac0_rgmii_cntl; ++ uint32 PAD[59]; ++ uint32 cfp_access; ++ uint32 PAD[3]; ++ uint32 cfp_tcam_data0; ++ uint32 cfp_tcam_data1; ++ uint32 cfp_tcam_data2; ++ uint32 cfp_tcam_data3; ++ uint32 cfp_tcam_data4; ++ uint32 cfp_tcam_data5; ++ uint32 cfp_tcam_data6; ++ uint32 cfp_tcam_data7; ++ uint32 cfp_tcam_mask0; ++ uint32 cfp_tcam_mask1; ++ uint32 cfp_tcam_mask2; ++ uint32 cfp_tcam_mask3; ++ uint32 cfp_tcam_mask4; ++ uint32 cfp_tcam_mask5; ++ uint32 cfp_tcam_mask6; ++ uint32 cfp_tcam_mask7; ++ uint32 cfp_action_data; ++ uint32 PAD[19]; ++ uint32 tcam_bist_cntl; ++ uint32 tcam_bist_status; ++ uint32 tcam_cmp_status; ++ uint32 tcam_disable; ++ uint32 PAD[16]; ++ uint32 tcam_test_cntl; ++ uint32 PAD[3]; ++ uint32 udf_0_a3_a0; ++ uint32 udf_0_a7_a4; ++ uint32 udf_0_a8; ++ uint32 PAD[1]; ++ uint32 udf_1_a3_a0; ++ uint32 udf_1_a7_a4; ++ uint32 udf_1_a8; ++ uint32 PAD[1]; ++ uint32 udf_2_a3_a0; ++ uint32 udf_2_a7_a4; ++ uint32 udf_2_a8; ++ uint32 PAD[1]; ++ uint32 udf_0_b3_b0; ++ uint32 udf_0_b7_b4; ++ uint32 udf_0_b8; ++ uint32 PAD[1]; ++ uint32 udf_1_b3_b0; ++ uint32 udf_1_b7_b4; ++ uint32 udf_1_b8; ++ uint32 PAD[1]; ++ uint32 udf_2_b3_b0; ++ uint32 udf_2_b7_b4; ++ uint32 udf_2_b8; ++ uint32 PAD[1]; ++ uint32 udf_0_c3_c0; ++ uint32 udf_0_c7_c4; ++ uint32 udf_0_c8; ++ uint32 PAD[1]; ++ uint32 udf_1_c3_c0; ++ uint32 udf_1_c7_c4; ++ uint32 udf_1_c8; ++ uint32 PAD[1]; ++ uint32 udf_2_c3_c0; ++ uint32 udf_2_c7_c4; ++ uint32 udf_2_c8; ++ uint32 PAD[1]; ++ uint32 udf_0_d3_d0; ++ uint32 udf_0_d7_d4; ++ uint32 udf_0_d11_d8; ++} gmac_commonregs_t; ++ ++/* stag0 offset0x0 */ ++#define STAG0_TPID_SHIFT 0 ++#define STAG0_TPID_MASK 0xffff ++ ++/* stag1 offset0x4 */ ++#define STAG1_TPID_SHIFT 0 ++#define STAG1_TPID_MASK 0xffff ++ ++/* stag2 offset0x8 */ ++#define STAG2_TPID_SHIFT 0 ++#define STAG2_TPID_MASK 0xffff ++ ++/* stag3 offset0xc */ ++#define STAG3_TPID_SHIFT 0 ++#define STAG3_TPID_MASK 0xffff ++ ++/* parsercontrol offset0x20 */ ++#define PARSERCONTROL_MAX_PARSER_LEN_TH_SHIFT 0 ++#define PARSERCONTROL_MAX_PARSER_LEN_TH_MASK 0x3fff ++ ++/* mib_max_len offset0x24 */ ++#define MIB_MAX_LEN_MIB_MAX_LEN_SHIFT 0 ++#define MIB_MAX_LEN_MIB_MAX_LEN_MASK 0x3fff ++ ++/* phyaccess offset0x100 */ ++#define PHYACCESS_TRIGGER_SHIFT 30 ++#define PHYACCESS_TRIGGER_MASK 0x40000000 ++#define PHYACCESS_WR_CMD_SHIFT 29 ++#define PHYACCESS_WR_CMD_MASK 0x20000000 ++#define PHYACCESS_CPU_REG_ADDR_SHIFT 24 ++#define PHYACCESS_CPU_REG_ADDR_MASK 0x1f000000 ++#define PHYACCESS_CPU_PHY_ADDR_SHIFT 16 ++#define PHYACCESS_CPU_PHY_ADDR_MASK 0x1f0000 ++#define PHYACCESS_ACC_DATA_SHIFT 0 ++#define PHYACCESS_ACC_DATA_MASK 0xffff ++ ++/* phycontrol offset0x104 */ ++#define PHYCONTROL_SD_ACCESS_EN_SHIFT 25 ++#define PHYCONTROL_SD_ACCESS_EN_MASK 0x2000000 ++#define PHYCONTROL_NWAY_AUTO_POLLING_EN_SHIFT 24 ++#define PHYCONTROL_NWAY_AUTO_POLLING_EN_MASK 0x1000000 ++#define PHYCONTROL_MDC_TRANSITION_EN_SHIFT 23 ++#define PHYCONTROL_MDC_TRANSITION_EN_MASK 0x800000 ++#define PHYCONTROL_MDC_CYCLE_TH_SHIFT 16 ++#define PHYCONTROL_MDC_CYCLE_TH_MASK 0x7f0000 ++#define PHYCONTROL_EXT_PHY_ADDR_SHIFT 0 ++#define PHYCONTROL_EXT_PHY_ADDR_MASK 0x1f ++ ++/* gmac0_rgmii_cntl offset0x110 */ ++#define GMAC0_RGMII_CNTL_TIMING_SEL_SHIFT 0 ++#define GMAC0_RGMII_CNTL_TIMING_SEL_MASK 0x1 ++#define GMAC0_RGMII_CNTL_RGMII_DLL_RXC_BYPASS_SHIFT 1 ++#define GMAC0_RGMII_CNTL_RGMII_DLL_RXC_BYPASS_MASK 0x2 ++#define GMAC0_RGMII_CNTL_BYPASS_2NS_DEL_SHIFT 2 ++#define GMAC0_RGMII_CNTL_BYPASS_2NS_DEL_MASK 0x4 ++#define GMAC0_RGMII_CNTL_DEL_STRB_SHIFT 3 ++#define GMAC0_RGMII_CNTL_DEL_STRB_MASK 0x8 ++#define GMAC0_RGMII_CNTL_DEL_VALUE_SHIFT 4 ++#define GMAC0_RGMII_CNTL_DEL_VALUE_MASK 0x70 ++#define GMAC0_RGMII_CNTL_DEL_ADDR_SHIFT 7 ++#define GMAC0_RGMII_CNTL_DEL_ADDR_MASK 0x780 ++ ++/* cfp_access offset0x200 */ ++#define CFP_ACCESS_OP_START_DONE_SHIFT 0 ++#define CFP_ACCESS_OP_START_DONE_MASK 0x1 ++#define CFP_ACCESS_OP_SEL_SHIFT 1 ++#define CFP_ACCESS_OP_SEL_MASK 0xe ++#define CFP_ACCESS_CFP_RAM_CLEAR_SHIFT 4 ++#define CFP_ACCESS_CFP_RAM_CLEAR_MASK 0x10 ++#define CFP_ACCESS_RESERVED1_SHIFT 5 ++#define CFP_ACCESS_RESERVED1_MASK 0x3e0 ++#define CFP_ACCESS_RAM_SEL_SHIFT 10 ++#define CFP_ACCESS_RAM_SEL_MASK 0x7c00 ++#define CFP_ACCESS_TCAM_RESET_SHIFT 15 ++#define CFP_ACCESS_TCAM_RESET_MASK 0x8000 ++#define CFP_ACCESS_XCESS_ADDR_SHIFT 16 ++#define CFP_ACCESS_XCESS_ADDR_MASK 0x1ff0000 ++#define CFP_ACCESS_RESERVED0_SHIFT 25 ++#define CFP_ACCESS_RESERVED0_MASK 0xe000000 ++#define CFP_ACCESS_RD_STATUS_SHIFT 28 ++#define CFP_ACCESS_RD_STATUS_MASK 0xf0000000 ++ ++/* cfp_tcam_data0 offset0x210 */ ++#define CFP_TCAM_DATA0_DATA_SHIFT 0 ++#define CFP_TCAM_DATA0_DATA_MASK 0xffffffff ++ ++/* cfp_tcam_data1 offset0x214 */ ++#define CFP_TCAM_DATA1_DATA_SHIFT 0 ++#define CFP_TCAM_DATA1_DATA_MASK 0xffffffff ++ ++/* cfp_tcam_data2 offset0x218 */ ++#define CFP_TCAM_DATA2_DATA_SHIFT 0 ++#define CFP_TCAM_DATA2_DATA_MASK 0xffffffff ++ ++/* cfp_tcam_data3 offset0x21c */ ++#define CFP_TCAM_DATA3_DATA_SHIFT 0 ++#define CFP_TCAM_DATA3_DATA_MASK 0xffffffff ++ ++/* cfp_tcam_data4 offset0x220 */ ++#define CFP_TCAM_DATA4_DATA_SHIFT 0 ++#define CFP_TCAM_DATA4_DATA_MASK 0xffffffff ++ ++/* cfp_tcam_data5 offset0x224 */ ++#define CFP_TCAM_DATA5_DATA_SHIFT 0 ++#define CFP_TCAM_DATA5_DATA_MASK 0xffffffff ++ ++/* cfp_tcam_data6 offset0x228 */ ++#define CFP_TCAM_DATA6_DATA_SHIFT 0 ++#define CFP_TCAM_DATA6_DATA_MASK 0xffffffff ++ ++/* cfp_tcam_data7 offset0x22c */ ++#define CFP_TCAM_DATA7_DATA_SHIFT 0 ++#define CFP_TCAM_DATA7_DATA_MASK 0xffffffff ++ ++/* cfp_tcam_mask0 offset0x230 */ ++#define CFP_TCAM_MASK0_DATA_SHIFT 0 ++#define CFP_TCAM_MASK0_DATA_MASK 0xffffffff ++ ++/* cfp_tcam_mask1 offset0x234 */ ++#define CFP_TCAM_MASK1_DATA_SHIFT 0 ++#define CFP_TCAM_MASK1_DATA_MASK 0xffffffff ++ ++/* cfp_tcam_mask2 offset0x238 */ ++#define CFP_TCAM_MASK2_DATA_SHIFT 0 ++#define CFP_TCAM_MASK2_DATA_MASK 0xffffffff ++ ++/* cfp_tcam_mask3 offset0x23c */ ++#define CFP_TCAM_MASK3_DATA_SHIFT 0 ++#define CFP_TCAM_MASK3_DATA_MASK 0xffffffff ++ ++/* cfp_tcam_mask4 offset0x240 */ ++#define CFP_TCAM_MASK4_DATA_SHIFT 0 ++#define CFP_TCAM_MASK4_DATA_MASK 0xffffffff ++ ++/* cfp_tcam_mask5 offset0x244 */ ++#define CFP_TCAM_MASK5_DATA_SHIFT 0 ++#define CFP_TCAM_MASK5_DATA_MASK 0xffffffff ++ ++/* cfp_tcam_mask6 offset0x248 */ ++#define CFP_TCAM_MASK6_DATA_SHIFT 0 ++#define CFP_TCAM_MASK6_DATA_MASK 0xffffffff ++ ++/* cfp_tcam_mask7 offset0x24c */ ++#define CFP_TCAM_MASK7_DATA_SHIFT 0 ++#define CFP_TCAM_MASK7_DATA_MASK 0xffffffff ++ ++/* cfp_action_data offset0x250 */ ++#define CFP_ACTION_DATA_CHAINID_SHIFT 0 ++#define CFP_ACTION_DATA_CHAINID_MASK 0xff ++#define CFP_ACTION_DATA_CHANNELID_SHIFT 8 ++#define CFP_ACTION_DATA_CHANNELID_MASK 0xf00 ++#define CFP_ACTION_DATA_DROP_SHIFT 12 ++#define CFP_ACTION_DATA_DROP_MASK 0x1000 ++#define CFP_ACTION_DATA_RESERVED_SHIFT 13 ++#define CFP_ACTION_DATA_RESERVED_MASK 0xffffe000 ++ ++/* tcam_bist_cntl offset0x2a0 */ ++#define TCAM_BIST_CNTL_TCAM_BIST_EN_SHIFT 0 ++#define TCAM_BIST_CNTL_TCAM_BIST_EN_MASK 0x1 ++#define TCAM_BIST_CNTL_TCAM_BIST_TCAM_SEL_SHIFT 1 ++#define TCAM_BIST_CNTL_TCAM_BIST_TCAM_SEL_MASK 0x6 ++#define TCAM_BIST_CNTL_RESERVED1_SHIFT 3 ++#define TCAM_BIST_CNTL_RESERVED1_MASK 0x8 ++#define TCAM_BIST_CNTL_TCAM_BIST_STATUS_SEL_SHIFT 4 ++#define TCAM_BIST_CNTL_TCAM_BIST_STATUS_SEL_MASK 0xf0 ++#define TCAM_BIST_CNTL_TCAM_BIST_SKIP_ERR_CNT_SHIFT 8 ++#define TCAM_BIST_CNTL_TCAM_BIST_SKIP_ERR_CNT_MASK 0xff00 ++#define TCAM_BIST_CNTL_TCAM_TEST_COMPARE_SHIFT 16 ++#define TCAM_BIST_CNTL_TCAM_TEST_COMPARE_MASK 0x10000 ++#define TCAM_BIST_CNTL_RESERVED_SHIFT 17 ++#define TCAM_BIST_CNTL_RESERVED_MASK 0x7ffe0000 ++#define TCAM_BIST_CNTL_TCAM_BIST_DONE_SHIFT 31 ++#define TCAM_BIST_CNTL_TCAM_BIST_DONE_MASK 0x80000000 ++ ++/* tcam_bist_status offset0x2a4 */ ++#define TCAM_BIST_STATUS_TCAM_BIST_STATUS_SHIFT 0 ++#define TCAM_BIST_STATUS_TCAM_BIST_STATUS_MASK 0xffff ++#define TCAM_BIST_STATUS_RESERVED_SHIFT 16 ++#define TCAM_BIST_STATUS_RESERVED_MASK 0xffff0000 ++ ++/* tcam_cmp_status offset0x2a8 */ ++#define TCAM_CMP_STATUS_TCAM_HIT_ADDR_SHIFT 0 ++#define TCAM_CMP_STATUS_TCAM_HIT_ADDR_MASK 0x1ff ++#define TCAM_CMP_STATUS_RESERVED2_SHIFT 9 ++#define TCAM_CMP_STATUS_RESERVED2_MASK 0x7e00 ++#define TCAM_CMP_STATUS_TCAM_HIT_SHIFT 15 ++#define TCAM_CMP_STATUS_TCAM_HIT_MASK 0x8000 ++#define TCAM_CMP_STATUS_RESERVED1_SHIFT 16 ++#define TCAM_CMP_STATUS_RESERVED1_MASK 0xffff0000 ++ ++/* tcam_disable offset0x2ac */ ++#define TCAM_DISABLE_TCAM_DISABLE_SHIFT 0 ++#define TCAM_DISABLE_TCAM_DISABLE_MASK 0xf ++#define TCAM_DISABLE_RESERVED_SHIFT 4 ++#define TCAM_DISABLE_RESERVED_MASK 0xfffffff0 ++ ++/* tcam_test_cntl offset0x2f0 */ ++#define TCAM_TEST_CNTL_TCAM_TEST_CNTL_SHIFT 0 ++#define TCAM_TEST_CNTL_TCAM_TEST_CNTL_MASK 0x7ff ++#define TCAM_TEST_CNTL_RESERVED_SHIFT 11 ++#define TCAM_TEST_CNTL_RESERVED_MASK 0xfffff800 ++ ++/* udf_0_a3_a0 offset0x300 */ ++#define UDF_0_A3_A0_CFG_UDF_0_A0_SHIFT 0 ++#define UDF_0_A3_A0_CFG_UDF_0_A0_MASK 0xff ++#define UDF_0_A3_A0_CFG_UDF_0_A1_SHIFT 8 ++#define UDF_0_A3_A0_CFG_UDF_0_A1_MASK 0xff00 ++#define UDF_0_A3_A0_CFG_UDF_0_A2_SHIFT 16 ++#define UDF_0_A3_A0_CFG_UDF_0_A2_MASK 0xff0000 ++#define UDF_0_A3_A0_CFG_UDF_0_A3_SHIFT 24 ++#define UDF_0_A3_A0_CFG_UDF_0_A3_MASK 0xff000000 ++ ++/* udf_0_a7_a4 offset0x304 */ ++#define UDF_0_A7_A4_CFG_UDF_0_A4_SHIFT 0 ++#define UDF_0_A7_A4_CFG_UDF_0_A4_MASK 0xff ++#define UDF_0_A7_A4_CFG_UDF_0_A5_SHIFT 8 ++#define UDF_0_A7_A4_CFG_UDF_0_A5_MASK 0xff00 ++#define UDF_0_A7_A4_CFG_UDF_0_A6_SHIFT 16 ++#define UDF_0_A7_A4_CFG_UDF_0_A6_MASK 0xff0000 ++#define UDF_0_A7_A4_CFG_UDF_0_A7_SHIFT 24 ++#define UDF_0_A7_A4_CFG_UDF_0_A7_MASK 0xff000000 ++ ++/* udf_0_a8 offset0x308 */ ++#define UDF_0_A8_CFG_UDF_0_A8_SHIFT 0 ++#define UDF_0_A8_CFG_UDF_0_A8_MASK 0xff ++ ++/* udf_1_a3_a0 offset0x310 */ ++#define UDF_1_A3_A0_CFG_UDF_1_A0_SHIFT 0 ++#define UDF_1_A3_A0_CFG_UDF_1_A0_MASK 0xff ++#define UDF_1_A3_A0_CFG_UDF_1_A1_SHIFT 8 ++#define UDF_1_A3_A0_CFG_UDF_1_A1_MASK 0xff00 ++#define UDF_1_A3_A0_CFG_UDF_1_A2_SHIFT 16 ++#define UDF_1_A3_A0_CFG_UDF_1_A2_MASK 0xff0000 ++#define UDF_1_A3_A0_CFG_UDF_1_A3_SHIFT 24 ++#define UDF_1_A3_A0_CFG_UDF_1_A3_MASK 0xff000000 ++ ++/* udf_1_a7_a4 offset0x314 */ ++#define UDF_1_A7_A4_CFG_UDF_1_A4_SHIFT 0 ++#define UDF_1_A7_A4_CFG_UDF_1_A4_MASK 0xff ++#define UDF_1_A7_A4_CFG_UDF_1_A5_SHIFT 8 ++#define UDF_1_A7_A4_CFG_UDF_1_A5_MASK 0xff00 ++#define UDF_1_A7_A4_CFG_UDF_1_A6_SHIFT 16 ++#define UDF_1_A7_A4_CFG_UDF_1_A6_MASK 0xff0000 ++#define UDF_1_A7_A4_CFG_UDF_1_A7_SHIFT 24 ++#define UDF_1_A7_A4_CFG_UDF_1_A7_MASK 0xff000000 ++ ++/* udf_1_a8 offset0x318 */ ++#define UDF_1_A8_CFG_UDF_1_A8_SHIFT 0 ++#define UDF_1_A8_CFG_UDF_1_A8_MASK 0xff ++ ++/* udf_2_a3_a0 offset0x320 */ ++#define UDF_2_A3_A0_CFG_UDF_2_A0_SHIFT 0 ++#define UDF_2_A3_A0_CFG_UDF_2_A0_MASK 0xff ++#define UDF_2_A3_A0_CFG_UDF_2_A1_SHIFT 8 ++#define UDF_2_A3_A0_CFG_UDF_2_A1_MASK 0xff00 ++#define UDF_2_A3_A0_CFG_UDF_2_A2_SHIFT 16 ++#define UDF_2_A3_A0_CFG_UDF_2_A2_MASK 0xff0000 ++#define UDF_2_A3_A0_CFG_UDF_2_A3_SHIFT 24 ++#define UDF_2_A3_A0_CFG_UDF_2_A3_MASK 0xff000000 ++ ++/* udf_2_a7_a4 offset0x324 */ ++#define UDF_2_A7_A4_CFG_UDF_2_A4_SHIFT 0 ++#define UDF_2_A7_A4_CFG_UDF_2_A4_MASK 0xff ++#define UDF_2_A7_A4_CFG_UDF_2_A5_SHIFT 8 ++#define UDF_2_A7_A4_CFG_UDF_2_A5_MASK 0xff00 ++#define UDF_2_A7_A4_CFG_UDF_2_A6_SHIFT 16 ++#define UDF_2_A7_A4_CFG_UDF_2_A6_MASK 0xff0000 ++#define UDF_2_A7_A4_CFG_UDF_2_A7_SHIFT 24 ++#define UDF_2_A7_A4_CFG_UDF_2_A7_MASK 0xff000000 ++ ++/* udf_2_a8 offset0x328 */ ++#define UDF_2_A8_CFG_UDF_2_A8_SHIFT 0 ++#define UDF_2_A8_CFG_UDF_2_A8_MASK 0xff ++ ++/* udf_0_b3_b0 offset0x330 */ ++#define UDF_0_B3_B0_CFG_UDF_0_B0_SHIFT 0 ++#define UDF_0_B3_B0_CFG_UDF_0_B0_MASK 0xff ++#define UDF_0_B3_B0_CFG_UDF_0_B1_SHIFT 8 ++#define UDF_0_B3_B0_CFG_UDF_0_B1_MASK 0xff00 ++#define UDF_0_B3_B0_CFG_UDF_0_B2_SHIFT 16 ++#define UDF_0_B3_B0_CFG_UDF_0_B2_MASK 0xff0000 ++#define UDF_0_B3_B0_CFG_UDF_0_B3_SHIFT 24 ++#define UDF_0_B3_B0_CFG_UDF_0_B3_MASK 0xff000000 ++ ++/* udf_0_b7_b4 offset0x334 */ ++#define UDF_0_B7_B4_CFG_UDF_0_B4_SHIFT 0 ++#define UDF_0_B7_B4_CFG_UDF_0_B4_MASK 0xff ++#define UDF_0_B7_B4_CFG_UDF_0_B5_SHIFT 8 ++#define UDF_0_B7_B4_CFG_UDF_0_B5_MASK 0xff00 ++#define UDF_0_B7_B4_CFG_UDF_0_B6_SHIFT 16 ++#define UDF_0_B7_B4_CFG_UDF_0_B6_MASK 0xff0000 ++#define UDF_0_B7_B4_CFG_UDF_0_B7_SHIFT 24 ++#define UDF_0_B7_B4_CFG_UDF_0_B7_MASK 0xff000000 ++ ++/* udf_0_b8 offset0x338 */ ++#define UDF_0_B8_CFG_UDF_0_B8_SHIFT 0 ++#define UDF_0_B8_CFG_UDF_0_B8_MASK 0xff ++ ++/* udf_1_b3_b0 offset0x340 */ ++#define UDF_1_B3_B0_CFG_UDF_1_B0_SHIFT 0 ++#define UDF_1_B3_B0_CFG_UDF_1_B0_MASK 0xff ++#define UDF_1_B3_B0_CFG_UDF_1_B1_SHIFT 8 ++#define UDF_1_B3_B0_CFG_UDF_1_B1_MASK 0xff00 ++#define UDF_1_B3_B0_CFG_UDF_1_B2_SHIFT 16 ++#define UDF_1_B3_B0_CFG_UDF_1_B2_MASK 0xff0000 ++#define UDF_1_B3_B0_CFG_UDF_1_B3_SHIFT 24 ++#define UDF_1_B3_B0_CFG_UDF_1_B3_MASK 0xff000000 ++ ++/* udf_1_b7_b4 offset0x344 */ ++#define UDF_1_B7_B4_CFG_UDF_1_B4_SHIFT 0 ++#define UDF_1_B7_B4_CFG_UDF_1_B4_MASK 0xff ++#define UDF_1_B7_B4_CFG_UDF_1_B5_SHIFT 8 ++#define UDF_1_B7_B4_CFG_UDF_1_B5_MASK 0xff00 ++#define UDF_1_B7_B4_CFG_UDF_1_B6_SHIFT 16 ++#define UDF_1_B7_B4_CFG_UDF_1_B6_MASK 0xff0000 ++#define UDF_1_B7_B4_CFG_UDF_1_B7_SHIFT 24 ++#define UDF_1_B7_B4_CFG_UDF_1_B7_MASK 0xff000000 ++ ++/* udf_1_b8 offset0x348 */ ++#define UDF_1_B8_CFG_UDF_1_B8_SHIFT 0 ++#define UDF_1_B8_CFG_UDF_1_B8_MASK 0xff ++ ++/* udf_2_b3_b0 offset0x350 */ ++#define UDF_2_B3_B0_CFG_UDF_2_B0_SHIFT 0 ++#define UDF_2_B3_B0_CFG_UDF_2_B0_MASK 0xff ++#define UDF_2_B3_B0_CFG_UDF_2_B1_SHIFT 8 ++#define UDF_2_B3_B0_CFG_UDF_2_B1_MASK 0xff00 ++#define UDF_2_B3_B0_CFG_UDF_2_B2_SHIFT 16 ++#define UDF_2_B3_B0_CFG_UDF_2_B2_MASK 0xff0000 ++#define UDF_2_B3_B0_CFG_UDF_2_B3_SHIFT 24 ++#define UDF_2_B3_B0_CFG_UDF_2_B3_MASK 0xff000000 ++ ++/* udf_2_b7_b4 offset0x354 */ ++#define UDF_2_B7_B4_CFG_UDF_2_B4_SHIFT 0 ++#define UDF_2_B7_B4_CFG_UDF_2_B4_MASK 0xff ++#define UDF_2_B7_B4_CFG_UDF_2_B5_SHIFT 8 ++#define UDF_2_B7_B4_CFG_UDF_2_B5_MASK 0xff00 ++#define UDF_2_B7_B4_CFG_UDF_2_B6_SHIFT 16 ++#define UDF_2_B7_B4_CFG_UDF_2_B6_MASK 0xff0000 ++#define UDF_2_B7_B4_CFG_UDF_2_B7_SHIFT 24 ++#define UDF_2_B7_B4_CFG_UDF_2_B7_MASK 0xff000000 ++ ++/* udf_2_b8 offset0x358 */ ++#define UDF_2_B8_CFG_UDF_2_B8_SHIFT 0 ++#define UDF_2_B8_CFG_UDF_2_B8_MASK 0xff ++ ++/* udf_0_c3_c0 offset0x360 */ ++#define UDF_0_C3_C0_CFG_UDF_0_C0_SHIFT 0 ++#define UDF_0_C3_C0_CFG_UDF_0_C0_MASK 0xff ++#define UDF_0_C3_C0_CFG_UDF_0_C1_SHIFT 8 ++#define UDF_0_C3_C0_CFG_UDF_0_C1_MASK 0xff00 ++#define UDF_0_C3_C0_CFG_UDF_0_C2_SHIFT 16 ++#define UDF_0_C3_C0_CFG_UDF_0_C2_MASK 0xff0000 ++#define UDF_0_C3_C0_CFG_UDF_0_C3_SHIFT 24 ++#define UDF_0_C3_C0_CFG_UDF_0_C3_MASK 0xff000000 ++ ++/* udf_0_c7_c4 offset0x364 */ ++#define UDF_0_C7_C4_CFG_UDF_0_C4_SHIFT 0 ++#define UDF_0_C7_C4_CFG_UDF_0_C4_MASK 0xff ++#define UDF_0_C7_C4_CFG_UDF_0_C5_SHIFT 8 ++#define UDF_0_C7_C4_CFG_UDF_0_C5_MASK 0xff00 ++#define UDF_0_C7_C4_CFG_UDF_0_C6_SHIFT 16 ++#define UDF_0_C7_C4_CFG_UDF_0_C6_MASK 0xff0000 ++#define UDF_0_C7_C4_CFG_UDF_0_C7_SHIFT 24 ++#define UDF_0_C7_C4_CFG_UDF_0_C7_MASK 0xff000000 ++ ++/* udf_0_c8 offset0x368 */ ++#define UDF_0_C8_CFG_UDF_0_C8_SHIFT 0 ++#define UDF_0_C8_CFG_UDF_0_C8_MASK 0xff ++ ++/* udf_1_c3_c0 offset0x370 */ ++#define UDF_1_C3_C0_CFG_UDF_1_C0_SHIFT 0 ++#define UDF_1_C3_C0_CFG_UDF_1_C0_MASK 0xff ++#define UDF_1_C3_C0_CFG_UDF_1_C1_SHIFT 8 ++#define UDF_1_C3_C0_CFG_UDF_1_C1_MASK 0xff00 ++#define UDF_1_C3_C0_CFG_UDF_1_C2_SHIFT 16 ++#define UDF_1_C3_C0_CFG_UDF_1_C2_MASK 0xff0000 ++#define UDF_1_C3_C0_CFG_UDF_1_C3_SHIFT 24 ++#define UDF_1_C3_C0_CFG_UDF_1_C3_MASK 0xff000000 ++ ++/* udf_1_c7_c4 offset0x374 */ ++#define UDF_1_C7_C4_CFG_UDF_1_C4_SHIFT 0 ++#define UDF_1_C7_C4_CFG_UDF_1_C4_MASK 0xff ++#define UDF_1_C7_C4_CFG_UDF_1_C5_SHIFT 8 ++#define UDF_1_C7_C4_CFG_UDF_1_C5_MASK 0xff00 ++#define UDF_1_C7_C4_CFG_UDF_1_C6_SHIFT 16 ++#define UDF_1_C7_C4_CFG_UDF_1_C6_MASK 0xff0000 ++#define UDF_1_C7_C4_CFG_UDF_1_C7_SHIFT 24 ++#define UDF_1_C7_C4_CFG_UDF_1_C7_MASK 0xff000000 ++ ++/* udf_1_c8 offset0x378 */ ++#define UDF_1_C8_CFG_UDF_1_C8_SHIFT 0 ++#define UDF_1_C8_CFG_UDF_1_C8_MASK 0xff ++ ++/* udf_2_c3_c0 offset0x380 */ ++#define UDF_2_C3_C0_CFG_UDF_2_C0_SHIFT 0 ++#define UDF_2_C3_C0_CFG_UDF_2_C0_MASK 0xff ++#define UDF_2_C3_C0_CFG_UDF_2_C1_SHIFT 8 ++#define UDF_2_C3_C0_CFG_UDF_2_C1_MASK 0xff00 ++#define UDF_2_C3_C0_CFG_UDF_2_C2_SHIFT 16 ++#define UDF_2_C3_C0_CFG_UDF_2_C2_MASK 0xff0000 ++#define UDF_2_C3_C0_CFG_UDF_2_C3_SHIFT 24 ++#define UDF_2_C3_C0_CFG_UDF_2_C3_MASK 0xff000000 ++ ++/* udf_2_c7_c4 offset0x384 */ ++#define UDF_2_C7_C4_CFG_UDF_2_C4_SHIFT 0 ++#define UDF_2_C7_C4_CFG_UDF_2_C4_MASK 0xff ++#define UDF_2_C7_C4_CFG_UDF_2_C5_SHIFT 8 ++#define UDF_2_C7_C4_CFG_UDF_2_C5_MASK 0xff00 ++#define UDF_2_C7_C4_CFG_UDF_2_C6_SHIFT 16 ++#define UDF_2_C7_C4_CFG_UDF_2_C6_MASK 0xff0000 ++#define UDF_2_C7_C4_CFG_UDF_2_C7_SHIFT 24 ++#define UDF_2_C7_C4_CFG_UDF_2_C7_MASK 0xff000000 ++ ++/* udf_2_c8 offset0x388 */ ++#define UDF_2_C8_CFG_UDF_2_C8_SHIFT 0 ++#define UDF_2_C8_CFG_UDF_2_C8_MASK 0xff ++ ++/* udf_0_d3_d0 offset0x390 */ ++#define UDF_0_D3_D0_CFG_UDF_0_D0_SHIFT 0 ++#define UDF_0_D3_D0_CFG_UDF_0_D0_MASK 0xff ++#define UDF_0_D3_D0_CFG_UDF_0_D1_SHIFT 8 ++#define UDF_0_D3_D0_CFG_UDF_0_D1_MASK 0xff00 ++#define UDF_0_D3_D0_CFG_UDF_0_D2_SHIFT 16 ++#define UDF_0_D3_D0_CFG_UDF_0_D2_MASK 0xff0000 ++#define UDF_0_D3_D0_CFG_UDF_0_D3_SHIFT 24 ++#define UDF_0_D3_D0_CFG_UDF_0_D3_MASK 0xff000000 ++ ++/* udf_0_d7_d4 offset0x394 */ ++#define UDF_0_D7_D4_CFG_UDF_0_D4_SHIFT 0 ++#define UDF_0_D7_D4_CFG_UDF_0_D4_MASK 0xff ++#define UDF_0_D7_D4_CFG_UDF_0_D5_SHIFT 8 ++#define UDF_0_D7_D4_CFG_UDF_0_D5_MASK 0xff00 ++#define UDF_0_D7_D4_CFG_UDF_0_D6_SHIFT 16 ++#define UDF_0_D7_D4_CFG_UDF_0_D6_MASK 0xff0000 ++#define UDF_0_D7_D4_CFG_UDF_0_D7_SHIFT 24 ++#define UDF_0_D7_D4_CFG_UDF_0_D7_MASK 0xff000000 ++ ++/* udf_0_d11_d8 offset0x398 */ ++#define UDF_0_D11_D8_CFG_UDF_0_D8_SHIFT 0 ++#define UDF_0_D11_D8_CFG_UDF_0_D8_MASK 0xff ++#define UDF_0_D11_D8_CFG_UDF_0_D9_SHIFT 8 ++#define UDF_0_D11_D8_CFG_UDF_0_D9_MASK 0xff00 ++#define UDF_0_D11_D8_CFG_UDF_0_D10_SHIFT 16 ++#define UDF_0_D11_D8_CFG_UDF_0_D10_MASK 0xff0000 ++#define UDF_0_D11_D8_CFG_UDF_0_D11_SHIFT 24 ++#define UDF_0_D11_D8_CFG_UDF_0_D11_MASK 0xff000000 ++ ++#endif /* _gmac_common_core_h_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/gmac_core.h b/drivers/net/ethernet/broadcom/gmac/src/include/gmac_core.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/gmac_core.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/gmac_core.h 2017-11-09 17:53:43.960313000 +0800 +@@ -0,0 +1,304 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * gmacdefs - Broadcom gmac (Unimac) specific definitions ++ * ++ * $Id: gmac_core.h 241182 2011-02-17 21:50:03Z $ ++ */ ++ ++#ifndef _gmac_core_h_ ++#define _gmac_core_h_ ++ ++ ++/* cpp contortions to concatenate w/arg prescan */ ++#ifndef PAD ++#define _PADLINE(line) pad ## line ++#define _XSTR(line) _PADLINE(line) ++#define PAD _XSTR(__LINE__) ++#endif /* PAD */ ++ ++/* We have 4 DMA TX channels */ ++#define GMAC_NUM_DMA_TX 4 ++ ++typedef volatile struct { ++ dma64regs_t dmaxmt; /* dma tx */ ++ uint32 PAD[2]; ++ dma64regs_t dmarcv; /* dma rx */ ++ uint32 PAD[2]; ++} dma64_t; ++ ++/* ++ * Host Interface Registers ++ */ ++typedef volatile struct _gmacregs { ++ uint32 devcontrol; /* 0x000 */ ++ uint32 devstatus; /* 0x004 */ ++ uint32 PAD; ++ uint32 biststatus; /* 0x00c */ ++ uint32 PAD[4]; ++ uint32 intstatus; /* 0x020 */ ++ uint32 intmask; /* 0x024 */ ++ uint32 gptimer; /* 0x028 */ ++ uint32 PAD[53]; ++ uint32 intrecvlazy; /* 0x100 */ ++ uint32 flowctlthresh; /* 0x104 */ ++ uint32 wrrthresh; /* 0x108 */ ++ uint32 gmac_idle_cnt_thresh; /* 0x10c */ ++ uint32 PAD[28]; ++ uint32 phyaccess; /* 0x180 */ ++ uint32 PAD; ++ uint32 phycontrol; /* 0x188 */ ++ uint32 txqctl; /* 0x18c */ ++ uint32 rxqctl; /* 0x190 */ ++ uint32 gpioselect; /* 0x194 */ ++ uint32 gpio_output_en; /* 0x198 */ ++ uint32 PAD; /* 0x19c */ ++ uint32 txq_rxq_mem_ctl; /* 0x1a0 */ ++ uint32 memory_ecc_status; /* 0x1a4 */ ++ uint32 serdes_ctl; /* 0x1a8 */ ++ uint32 serdes_status0; /* 0x1ac */ ++ uint32 serdes_status1; /* 0x1b0 */ ++ uint32 PAD[11]; /* 0x1b4-1dc */ ++ uint32 clk_ctl_st; /* 0x1e0 */ ++ uint32 hw_war; /* 0x1e4 */ ++ uint32 pwrctl; /* 0x1e8 */ ++ uint32 PAD[5]; ++ ++ dma64_t dmaregs[GMAC_NUM_DMA_TX]; ++ ++ /* GAMC MIB counters */ ++ gmacmib_t mib; ++ uint32 PAD[245]; ++ ++ uint32 unimacversion; /* 0x800 */ ++ uint32 hdbkpctl; /* 0x804 */ ++ uint32 cmdcfg; /* 0x808 */ ++ uint32 macaddrhigh; /* 0x80c */ ++ uint32 macaddrlow; /* 0x810 */ ++ uint32 rxmaxlength; /* 0x814 */ ++ uint32 pausequanta; /* 0x818 */ ++ uint32 PAD[10]; ++ uint32 macmode; /* 0x844 */ ++ uint32 outertag; /* 0x848 */ ++ uint32 innertag; /* 0x84c */ ++ uint32 PAD[3]; ++ uint32 txipg; /* 0x85c */ ++ uint32 PAD[180]; ++ uint32 pausectl; /* 0xb30 */ ++ uint32 txflush; /* 0xb34 */ ++ uint32 rxstatus; /* 0xb38 */ ++ uint32 txstatus; /* 0xb3c */ ++} gmacregs_t; ++ ++#define GM_MIB_BASE 0x300 ++#define GM_MIB_LIMIT 0x800 ++ ++/* ++ * register-specific flag definitions ++ */ ++ ++/* device control */ ++#define DC_TSM 0x00000002 ++#define DC_CFCO 0x00000004 ++#define DC_RLSS 0x00000008 ++#define DC_MROR 0x00000010 ++#define DC_FCM_MASK 0x00000060 ++#define DC_FCM_SHIFT 5 ++#define DC_NAE 0x00000080 ++#define DC_TF 0x00000100 ++#define DC_RDS_MASK 0x00030000 ++#define DC_RDS_SHIFT 16 ++#define DC_TDS_MASK 0x000c0000 ++#define DC_TDS_SHIFT 18 ++ ++/* device status */ ++#define DS_RBF 0x00000001 ++#define DS_RDF 0x00000002 ++#define DS_RIF 0x00000004 ++#define DS_TBF 0x00000008 ++#define DS_TDF 0x00000010 ++#define DS_TIF 0x00000020 ++#define DS_PO 0x00000040 ++#define DS_MM_MASK 0x00000300 ++#define DS_MM_SHIFT 8 ++ ++/* bist status */ ++#define BS_MTF 0x00000001 ++#define BS_MRF 0x00000002 ++#define BS_TDB 0x00000004 ++#define BS_TIB 0x00000008 ++#define BS_TBF 0x00000010 ++#define BS_RDB 0x00000020 ++#define BS_RIB 0x00000040 ++#define BS_RBF 0x00000080 ++#define BS_URTF 0x00000100 ++#define BS_UTF 0x00000200 ++#define BS_URF 0x00000400 ++ ++/* interrupt status and mask registers */ ++#define I_MRO 0x00000001 ++#define I_MTO 0x00000002 ++#define I_TFD 0x00000004 ++#define I_LS 0x00000008 ++#define I_MDIO 0x00000010 ++#define I_MR 0x00000020 ++#define I_MT 0x00000040 ++#define I_TO 0x00000080 ++#define I_PDEE 0x00000400 ++#define I_PDE 0x00000800 ++#define I_DE 0x00001000 ++#define I_RDU 0x00002000 ++#define I_RFO 0x00004000 ++#define I_XFU 0x00008000 ++#define I_RI 0x00010000 ++#define I_XI0 0x01000000 ++#define I_XI1 0x02000000 ++#define I_XI2 0x04000000 ++#define I_XI3 0x08000000 ++#define I_INTMASK 0x0f01fcff ++#define I_ERRMASK 0x0000fc00 ++ ++/* interrupt receive lazy */ ++#define IRL_TO_MASK 0x00ffffff ++#define IRL_FC_MASK 0xff000000 ++#define IRL_FC_SHIFT 24 ++ ++/* flow control thresholds */ ++#define FCT_TT_MASK 0x00000fff ++#define FCT_RT_MASK 0x0fff0000 ++#define FCT_RT_SHIFT 16 ++ ++/* txq aribter wrr thresholds */ ++#define WRRT_Q0T_MASK 0x000000ff ++#define WRRT_Q1T_MASK 0x0000ff00 ++#define WRRT_Q1T_SHIFT 8 ++#define WRRT_Q2T_MASK 0x00ff0000 ++#define WRRT_Q2T_SHIFT 16 ++#define WRRT_Q3T_MASK 0xff000000 ++#define WRRT_Q3T_SHIFT 24 ++ ++/* phy access */ ++#define PA_DATA_MASK 0x0000ffff ++#define PA_ADDR_MASK 0x001f0000 ++#define PA_ADDR_SHIFT 16 ++#define PA_REG_MASK 0x1f000000 ++#define PA_REG_SHIFT 24 ++#define PA_WRITE 0x20000000 ++#define PA_START 0x40000000 ++ ++/* phy control */ ++#define PC_EPA_MASK 0x0000001f ++#define PC_MCT_MASK 0x007f0000 ++#define PC_MCT_SHIFT 16 ++#define PC_MTE 0x00800000 ++ ++/* rxq control */ ++#define RC_DBT_MASK 0x00000fff ++#define RC_DBT_SHIFT 0 ++#define RC_PTE 0x00001000 ++#define RC_MDP_MASK 0x3f000000 ++#define RC_MDP_SHIFT 24 ++ ++#define RC_MAC_DATA_PERIOD 9 ++ ++/* txq control */ ++#define TC_DBT_MASK 0x00000fff ++#define TC_DBT_SHIFT 0 ++ ++/* gpio select */ ++#define GS_GSC_MASK 0x0000000f ++#define GS_GSC_SHIFT 0 ++ ++/* gpio output enable */ ++#define GS_GOE_MASK 0x0000ffff ++#define GS_GOE_SHIFT 0 ++ ++/* gpio output enable */ ++#define SC_TX1G_FIFO_RST_MASK 0x00f00000 ++#define SC_TX1G_FIFO_RST_VAL 0x00f00000 ++#define SC_FORCE_SPD_STRAP_MASK 0x00060000 ++#define SC_FORCE_SPD_STRAP_VAL 0x00040000 ++#define SC_FORCE_SPD_100M_VAL 0x00020000 ++#define SC_FORCE_SPD_1G_VAL 0x00040000 ++#define SC_REF_TERM_SEL_MASK 0x00001000 ++#define SC_REFSEL_MASK 0x00000c00 ++#define SC_REFSEL_VAL 0x00000400 ++#define SC_REFDIV_MASK 0x00000300 ++#define SC_REFDIV_VAL 0x00000000 ++#define SC_LCREF_EN_MASK 0x00000040 ++#define SC_RSTB_PLL_MASK 0x00000010 ++#define SC_RSTB_MDIOREGS_MASK 0x00000008 ++#define SC_RSTB_HW_MASK 0x00000004 ++#define SC_IDDQ_MASK 0x00000002 ++#define SC_PWR_DOWN_MASK 0x00000001 ++ ++/* clk control status */ ++#define CS_FA 0x00000001 ++#define CS_FH 0x00000002 ++#define CS_FI 0x00000004 ++#define CS_AQ 0x00000008 ++#define CS_HQ 0x00000010 ++#define CS_FC 0x00000020 ++#define CS_ER 0x00000100 ++#define CS_AA 0x00010000 ++#define CS_HA 0x00020000 ++#define CS_BA 0x00040000 ++#define CS_BH 0x00080000 ++#define CS_ES 0x01000000 ++ ++/* command config */ ++#define CC_TE 0x00000001 ++#define CC_RE 0x00000002 ++#define CC_ES_MASK 0x0000000c ++#define CC_ES_SHIFT 2 ++#define CC_PROM 0x00000010 ++#define CC_PAD_EN 0x00000020 ++#define CC_CF 0x00000040 ++#define CC_PF 0x00000080 ++#define CC_RPI 0x00000100 ++#define CC_TAI 0x00000200 ++#define CC_HD 0x00000400 ++#define CC_HD_SHIFT 10 ++#define CC_SR 0x00002000 ++#define CC_ML 0x00008000 ++#define CC_OT 0x00020000 ++#define CC_OR 0x00040000 ++#define CC_AE 0x00400000 ++#define CC_CFE 0x00800000 ++#define CC_NLC 0x01000000 ++#define CC_RL 0x02000000 ++#define CC_RED 0x04000000 ++#define CC_PE 0x08000000 ++#define CC_TPI 0x10000000 ++#define CC_AT 0x20000000 ++ ++/* mac addr high */ ++#define MH_HI_MASK 0xffff ++#define MH_HI_SHIFT 16 ++#define MH_MID_MASK 0xffff ++#define MH_MID_SHIFT 0 ++ ++/* mac addr low */ ++#define ML_LO_MASK 0xffff ++#define ML_LO_SHIFT 0 ++ ++/* Core specific control flags */ ++#define SICF_SWCLKE 0x0004 ++#define SICF_SWRST 0x0008 ++ ++/* Core specific status flags */ ++#define SISF_SW_ATTACHED 0x0800 ++ ++#endif /* _gmac_core_h_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/hndarm.h b/drivers/net/ethernet/broadcom/gmac/src/include/hndarm.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/hndarm.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/hndarm.h 2017-11-09 17:53:43.961300000 +0800 +@@ -0,0 +1,96 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * HND SiliconBackplane ARM core software interface. ++ * ++ * $Id: hndarm.h 325951 2012-04-05 06:03:27Z $ ++ */ ++ ++#ifndef _hndarm_h_ ++#define _hndarm_h_ ++ ++#include ++ ++extern void *hndarm_armr; ++extern uint32 hndarm_rev; ++ ++ ++extern void si_arm_init(si_t *sih); ++ ++#ifdef __ARM_ARCH_7A__ ++extern uint32 si_memc_get_ncdl(si_t *sih); ++#endif ++ ++extern void enable_arm_ints(uint32 which); ++extern void disable_arm_ints(uint32 which); ++ ++extern uint32 get_arm_cyclecount(void); ++extern void set_arm_cyclecount(uint32 ticks); ++ ++#ifdef __ARM_ARCH_7R__ ++extern uint32 get_arm_perfcount_enable(void); ++extern void set_arm_perfcount_enable(uint32 which); ++extern uint32 set_arm_perfcount_disable(void); ++ ++extern uint32 get_arm_perfcount_sel(void); ++extern void set_arm_perfcount_sel(uint32 which); ++ ++extern uint32 get_arm_perfcount_event(void); ++extern void set_arm_perfcount_event(uint32 which); ++ ++extern uint32 get_arm_perfcount(void); ++extern void set_arm_perfcount(uint32 which); ++ ++extern void enable_arm_cyclecount(void); ++extern void disable_arm_cyclecount(void); ++#endif /* __ARM_ARCH_7R__ */ ++ ++extern uint32 get_arm_inttimer(void); ++extern void set_arm_inttimer(uint32 ticks); ++ ++extern uint32 get_arm_intmask(void); ++extern void set_arm_intmask(uint32 ticks); ++ ++extern uint32 get_arm_intstatus(void); ++extern void set_arm_intstatus(uint32 ticks); ++ ++extern uint32 get_arm_firqmask(void); ++extern void set_arm_firqmask(uint32 ticks); ++ ++extern uint32 get_arm_firqstatus(void); ++extern void set_arm_firqstatus(uint32 ticks); ++ ++extern void arm_wfi(si_t *sih); ++extern void arm_jumpto(void *addr); ++ ++extern void traptest(void); ++ ++#ifdef BCMOVLHW ++#define BCMOVLHW_ENAB(sih) TRUE ++ ++extern int si_arm_ovl_remap(si_t *sih, void *virt, void *phys, uint size); ++extern int si_arm_ovl_reset(si_t *sih); ++extern bool si_arm_ovl_vaildaddr(si_t *sih, void *virt); ++extern bool si_arm_ovl_isenab(si_t *sih); ++extern bool si_arm_ovl_int(si_t *sih, uint32 pc); ++#else ++#define BCMOVLHW_ENAB(sih) FALSE ++ ++#define si_arm_ovl_remap(a, b, c, d) do {} while (0) ++#define si_arm_ovl_reset(a) do {} while (0) ++#define si_arm_ovl_int(a, b) FALSE ++#endif ++ ++#endif /* _hndarm_h_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/hndchipc.h b/drivers/net/ethernet/broadcom/gmac/src/include/hndchipc.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/hndchipc.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/hndchipc.h 2017-11-09 17:53:43.962303000 +0800 +@@ -0,0 +1,38 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * HND SiliconBackplane chipcommon support. ++ * ++ * $Id: hndchipc.h 241182 2011-02-17 21:50:03Z $ ++ */ ++ ++#ifndef _hndchipc_h_ ++#define _hndchipc_h_ ++ ++typedef void (*si_serial_init_fn)(void *regs, uint irq, uint baud_base, uint reg_shift); ++ ++extern void si_serial_init(si_t *sih, si_serial_init_fn add); ++ ++extern void *hnd_jtagm_init(si_t *sih, uint clkd, bool exttap); ++extern void hnd_jtagm_disable(si_t *sih, void *h); ++extern uint32 jtag_scan(si_t *sih, void *h, uint irsz, uint32 ir0, uint32 ir1, ++ uint drsz, uint32 dr0, uint32 *dr1, bool rti); ++ ++typedef void (*cc_isr_fn)(void* cbdata, uint32 ccintst); ++ ++extern bool si_cc_register_isr(si_t *sih, cc_isr_fn isr, uint32 ccintmask, void *cbdata); ++extern void si_cc_isr(si_t *sih, chipcregs_t *regs); ++ ++#endif /* _hndchipc_h_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/hndcpu.h b/drivers/net/ethernet/broadcom/gmac/src/include/hndcpu.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/hndcpu.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/hndcpu.h 2017-11-09 17:53:43.963297000 +0800 +@@ -0,0 +1,40 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * HND SiliconBackplane MIPS/ARM cores software interface. ++ * ++ * $Id: hndcpu.h 258983 2011-05-11 09:59:25Z $ ++ */ ++ ++#ifndef _hndcpu_h_ ++#define _hndcpu_h_ ++ ++#if defined(mips) ++#include ++#elif defined(__arm__) || defined(__thumb__) || defined(__thumb2__) ++#include ++#endif ++ ++extern uint si_irq(si_t *sih); ++extern uint32 si_cpu_clock(si_t *sih); ++extern uint32 si_mem_clock(si_t *sih); ++extern void hnd_cpu_wait(si_t *sih); ++extern void hnd_cpu_jumpto(void *addr); ++extern void hnd_cpu_reset(si_t *sih); ++extern void hnd_cpu_deadman_timer(si_t *sih, uint32 val); ++extern void si_router_coma(si_t *sih, int reset, int delay); ++extern void si_dmc_phyctl(si_t *sih, uint32 phyctl_val); ++ ++#endif /* _hndcpu_h_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/hnddma.h b/drivers/net/ethernet/broadcom/gmac/src/include/hnddma.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/hnddma.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/hnddma.h 2017-11-09 17:53:43.964295000 +0800 +@@ -0,0 +1,317 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Generic Broadcom Home Networking Division (HND) DMA engine SW interface ++ * This supports the following chips: BCM42xx, 44xx, 47xx . ++ * ++ * $Id: hnddma.h 321146 2012-03-14 08:27:23Z $ ++ */ ++ ++#ifndef _hnddma_h_ ++#define _hnddma_h_ ++ ++#ifndef _hnddma_pub_ ++#define _hnddma_pub_ ++typedef const struct hnddma_pub hnddma_t; ++#endif /* _hnddma_pub_ */ ++ ++/* range param for dma_getnexttxp() and dma_txreclaim */ ++typedef enum txd_range { ++ HNDDMA_RANGE_ALL = 1, ++ HNDDMA_RANGE_TRANSMITTED, ++ HNDDMA_RANGE_TRANSFERED ++} txd_range_t; ++ ++/* dma parameters id */ ++enum dma_param_id { ++ HNDDMA_PID_TX_MULTI_OUTSTD_RD = 0, ++ HNDDMA_PID_TX_PREFETCH_CTL, ++ HNDDMA_PID_TX_PREFETCH_THRESH, ++ HNDDMA_PID_TX_BURSTLEN, ++ ++ HNDDMA_PID_RX_PREFETCH_CTL = 0x100, ++ HNDDMA_PID_RX_PREFETCH_THRESH, ++ HNDDMA_PID_RX_BURSTLEN ++}; ++ ++/* dma function type */ ++typedef void (*di_detach_t)(hnddma_t *dmah); ++typedef bool (*di_txreset_t)(hnddma_t *dmah); ++typedef bool (*di_rxreset_t)(hnddma_t *dmah); ++typedef bool (*di_rxidle_t)(hnddma_t *dmah); ++typedef void (*di_txinit_t)(hnddma_t *dmah); ++typedef bool (*di_txenabled_t)(hnddma_t *dmah); ++typedef void (*di_rxinit_t)(hnddma_t *dmah); ++typedef void (*di_txsuspend_t)(hnddma_t *dmah); ++typedef void (*di_txresume_t)(hnddma_t *dmah); ++typedef bool (*di_txsuspended_t)(hnddma_t *dmah); ++typedef bool (*di_txsuspendedidle_t)(hnddma_t *dmah); ++#ifdef WL_MULTIQUEUE ++typedef void (*di_txflush_t)(hnddma_t *dmah); ++typedef void (*di_txflush_clear_t)(hnddma_t *dmah); ++#endif /* WL_MULTIQUEUE */ ++typedef int (*di_txfast_t)(hnddma_t *dmah, void *p, bool commit); ++typedef int (*di_txunframed_t)(hnddma_t *dmah, void *p, uint len, bool commit); ++typedef int (*di_rxunframed_t)(hnddma_t *dmah, void *p, uint len, bool commit); ++typedef void* (*di_getpos_t)(hnddma_t *di, bool direction); ++typedef void (*di_fifoloopbackenable_t)(hnddma_t *dmah); ++typedef bool (*di_txstopped_t)(hnddma_t *dmah); ++typedef bool (*di_rxstopped_t)(hnddma_t *dmah); ++typedef bool (*di_rxenable_t)(hnddma_t *dmah); ++typedef bool (*di_rxenabled_t)(hnddma_t *dmah); ++typedef void* (*di_rx_t)(hnddma_t *dmah); ++typedef bool (*di_rxfill_t)(hnddma_t *dmah); ++typedef void (*di_txreclaim_t)(hnddma_t *dmah, txd_range_t range); ++typedef void (*di_rxreclaim_t)(hnddma_t *dmah); ++typedef uintptr (*di_getvar_t)(hnddma_t *dmah, const char *name); ++typedef void* (*di_getnexttxp_t)(hnddma_t *dmah, txd_range_t range); ++typedef void* (*di_getnextrxp_t)(hnddma_t *dmah, bool forceall); ++typedef void* (*di_peeknexttxp_t)(hnddma_t *dmah); ++typedef void* (*di_peekntxp_t)(hnddma_t *dmah, int *len, void *txps[], txd_range_t range); ++typedef void* (*di_peeknextrxp_t)(hnddma_t *dmah); ++typedef void (*di_rxparam_get_t)(hnddma_t *dmah, uint16 *rxoffset, uint16 *rxbufsize); ++typedef void (*di_txblock_t)(hnddma_t *dmah); ++typedef void (*di_txunblock_t)(hnddma_t *dmah); ++typedef uint (*di_txactive_t)(hnddma_t *dmah); ++typedef void (*di_txrotate_t)(hnddma_t *dmah); ++typedef void (*di_counterreset_t)(hnddma_t *dmah); ++typedef uint (*di_ctrlflags_t)(hnddma_t *dmah, uint mask, uint flags); ++typedef char* (*di_dump_t)(hnddma_t *dmah, struct bcmstrbuf *b, bool dumpring); ++typedef char* (*di_dumptx_t)(hnddma_t *dmah, struct bcmstrbuf *b, bool dumpring); ++typedef char* (*di_dumprx_t)(hnddma_t *dmah, struct bcmstrbuf *b, bool dumpring); ++typedef uint (*di_rxactive_t)(hnddma_t *dmah); ++typedef uint (*di_txpending_t)(hnddma_t *dmah); ++typedef uint (*di_txcommitted_t)(hnddma_t *dmah); ++typedef int (*di_pktpool_set_t)(hnddma_t *dmah, pktpool_t *pool); ++typedef bool (*di_rxtxerror_t)(hnddma_t *dmah, bool istx); ++typedef void (*di_burstlen_set_t)(hnddma_t *dmah, uint8 rxburstlen, uint8 txburstlen); ++typedef uint (*di_avoidancecnt_t)(hnddma_t *dmah); ++typedef void (*di_param_set_t)(hnddma_t *dmah, uint16 paramid, uint16 paramval); ++typedef bool (*dma_glom_enable_t) (hnddma_t *dmah, uint32 val); ++typedef uint (*dma_active_rxbuf_t) (hnddma_t *dmah); ++/* dma opsvec */ ++typedef struct di_fcn_s { ++ di_detach_t detach; ++ di_txinit_t txinit; ++ di_txreset_t txreset; ++ di_txenabled_t txenabled; ++ di_txsuspend_t txsuspend; ++ di_txresume_t txresume; ++ di_txsuspended_t txsuspended; ++ di_txsuspendedidle_t txsuspendedidle; ++#ifdef WL_MULTIQUEUE ++ di_txflush_t txflush; ++ di_txflush_clear_t txflush_clear; ++#endif /* WL_MULTIQUEUE */ ++ di_txfast_t txfast; ++ di_txunframed_t txunframed; ++ di_getpos_t getpos; ++ di_txstopped_t txstopped; ++ di_txreclaim_t txreclaim; ++ di_getnexttxp_t getnexttxp; ++ di_peeknexttxp_t peeknexttxp; ++ di_peekntxp_t peekntxp; ++ di_txblock_t txblock; ++ di_txunblock_t txunblock; ++ di_txactive_t txactive; ++ di_txrotate_t txrotate; ++ ++ di_rxinit_t rxinit; ++ di_rxreset_t rxreset; ++ di_rxidle_t rxidle; ++ di_rxstopped_t rxstopped; ++ di_rxenable_t rxenable; ++ di_rxenabled_t rxenabled; ++ di_rx_t rx; ++ di_rxfill_t rxfill; ++ di_rxreclaim_t rxreclaim; ++ di_getnextrxp_t getnextrxp; ++ di_peeknextrxp_t peeknextrxp; ++ di_rxparam_get_t rxparam_get; ++ ++ di_fifoloopbackenable_t fifoloopbackenable; ++ di_getvar_t d_getvar; ++ di_counterreset_t counterreset; ++ di_ctrlflags_t ctrlflags; ++ di_dump_t dump; ++ di_dumptx_t dumptx; ++ di_dumprx_t dumprx; ++ di_rxactive_t rxactive; ++ di_txpending_t txpending; ++ di_txcommitted_t txcommitted; ++ di_pktpool_set_t pktpool_set; ++ di_rxtxerror_t rxtxerror; ++ di_burstlen_set_t burstlen_set; ++ di_avoidancecnt_t avoidancecnt; ++ di_param_set_t param_set; ++ dma_glom_enable_t glom_enab; ++ di_rxunframed_t rxunframed; ++ dma_active_rxbuf_t dma_activerxbuf; ++ uint endnum; ++} di_fcn_t; ++ ++/* ++ * Exported data structure (read-only) ++ */ ++/* export structure */ ++struct hnddma_pub { ++ const di_fcn_t *di_fn; /* DMA function pointers */ ++ uint txavail; /* # free tx descriptors */ ++ uint dmactrlflags; /* dma control flags */ ++ ++ /* rx error counters */ ++ uint rxgiants; /* rx giant frames */ ++ uint rxnobuf; /* rx out of dma descriptors */ ++ /* tx error counters */ ++ uint txnobuf; /* tx out of dma descriptors */ ++ uint txnodesc; /* tx out of dma descriptors running count */ ++}; ++ ++ ++extern hnddma_t * dma_attach(osl_t *osh, const char *name, si_t *sih, ++ volatile void *dmaregstx, volatile void *dmaregsrx, ++ uint ntxd, uint nrxd, uint rxbufsize, int rxextheadroom, uint nrxpost, ++ uint rxoffset, uint *msg_level); ++#ifdef BCMDMA32 ++ ++#define dma_detach(di) ((di)->di_fn->detach(di)) ++#define dma_txreset(di) ((di)->di_fn->txreset(di)) ++#define dma_rxreset(di) ((di)->di_fn->rxreset(di)) ++#define dma_rxidle(di) ((di)->di_fn->rxidle(di)) ++#define dma_txinit(di) ((di)->di_fn->txinit(di)) ++#define dma_txenabled(di) ((di)->di_fn->txenabled(di)) ++#define dma_rxinit(di) ((di)->di_fn->rxinit(di)) ++#define dma_txsuspend(di) ((di)->di_fn->txsuspend(di)) ++#define dma_txresume(di) ((di)->di_fn->txresume(di)) ++#define dma_txsuspended(di) ((di)->di_fn->txsuspended(di)) ++#define dma_txsuspendedidle(di) ((di)->di_fn->txsuspendedidle(di)) ++#ifdef WL_MULTIQUEUE ++#define dma_txflush(di) ((di)->di_fn->txflush(di)) ++#define dma_txflush_clear(di) ((di)->di_fn->txflush_clear(di)) ++#endif /* WL_MULTIQUEUE */ ++#define dma_txfast(di, p, commit) ((di)->di_fn->txfast(di, p, commit)) ++#define dma_fifoloopbackenable(di) ((di)->di_fn->fifoloopbackenable(di)) ++#define dma_txstopped(di) ((di)->di_fn->txstopped(di)) ++#define dma_rxstopped(di) ((di)->di_fn->rxstopped(di)) ++#define dma_rxenable(di) ((di)->di_fn->rxenable(di)) ++#define dma_rxenabled(di) ((di)->di_fn->rxenabled(di)) ++#define dma_rx(di) ((di)->di_fn->rx(di)) ++#define dma_rxfill(di) ((di)->di_fn->rxfill(di)) ++#define dma_txreclaim(di, range) ((di)->di_fn->txreclaim(di, range)) ++#define dma_rxreclaim(di) ((di)->di_fn->rxreclaim(di)) ++#define dma_getvar(di, name) ((di)->di_fn->d_getvar(di, name)) ++#define dma_getnexttxp(di, range) ((di)->di_fn->getnexttxp(di, range)) ++#define dma_getnextrxp(di, forceall) ((di)->di_fn->getnextrxp(di, forceall)) ++#define dma_peeknexttxp(di) ((di)->di_fn->peeknexttxp(di)) ++#define dma_peekntxp(di, l, t, r) ((di)->di_fn->peekntxp(di, l, t, r)) ++#define dma_peeknextrxp(di) ((di)->di_fn->peeknextrxp(di)) ++#define dma_rxparam_get(di, off, bufs) ((di)->di_fn->rxparam_get(di, off, bufs)) ++ ++#define dma_txblock(di) ((di)->di_fn->txblock(di)) ++#define dma_txunblock(di) ((di)->di_fn->txunblock(di)) ++#define dma_txactive(di) ((di)->di_fn->txactive(di)) ++#define dma_rxactive(di) ((di)->di_fn->rxactive(di)) ++#define dma_txrotate(di) ((di)->di_fn->txrotate(di)) ++#define dma_counterreset(di) ((di)->di_fn->counterreset(di)) ++#define dma_ctrlflags(di, mask, flags) ((di)->di_fn->ctrlflags((di), (mask), (flags))) ++#define dma_txpending(di) ((di)->di_fn->txpending(di)) ++#define dma_txcommitted(di) ((di)->di_fn->txcommitted(di)) ++#define dma_pktpool_set(di, pool) ((di)->di_fn->pktpool_set((di), (pool))) ++#if defined(BCMDBG) ++#define dma_dump(di, buf, dumpring) ((di)->di_fn->dump(di, buf, dumpring)) ++#define dma_dumptx(di, buf, dumpring) ((di)->di_fn->dumptx(di, buf, dumpring)) ++#define dma_dumprx(di, buf, dumpring) ((di)->di_fn->dumprx(di, buf, dumpring)) ++#endif ++#define dma_rxtxerror(di, istx) ((di)->di_fn->rxtxerror(di, istx)) ++#define dma_burstlen_set(di, rxlen, txlen) ((di)->di_fn->burstlen_set(di, rxlen, txlen)) ++#define dma_avoidance_cnt(di) ((di)->di_fn->avoidancecnt(di)) ++#define dma_param_set(di, paramid, paramval) ((di)->di_fn->param_set(di, paramid, paramval)) ++#define dma_activerxbuf(di) ((di)->di_fn->dma_activerxbuf(di)) ++ ++#else /* BCMDMA32 */ ++extern const di_fcn_t dma64proc; ++ ++#define dma_detach(di) (dma64proc.detach(di)) ++#define dma_txreset(di) (dma64proc.txreset(di)) ++#define dma_rxreset(di) (dma64proc.rxreset(di)) ++#define dma_rxidle(di) (dma64proc.rxidle(di)) ++#define dma_txinit(di) (dma64proc.txinit(di)) ++#define dma_txenabled(di) (dma64proc.txenabled(di)) ++#define dma_rxinit(di) (dma64proc.rxinit(di)) ++#define dma_txsuspend(di) (dma64proc.txsuspend(di)) ++#define dma_txresume(di) (dma64proc.txresume(di)) ++#define dma_txsuspended(di) (dma64proc.txsuspended(di)) ++#define dma_txsuspendedidle(di) (dma64proc.txsuspendedidle(di)) ++#ifdef WL_MULTIQUEUE ++#define dma_txflush(di) (dma64proc.txflush(di)) ++#define dma_txflush_clear(di) (dma64proc.txflush_clear(di)) ++#endif /* WL_MULTIQUEUE */ ++#define dma_txfast(di, p, commit) (dma64proc.txfast(di, p, commit)) ++#define dma_txunframed(di, p, l, commit)(dma64proc.txunframed(di, p, l, commit)) ++#define dma_getpos(di, dir) (dma64proc.getpos(di, dir)) ++#define dma_fifoloopbackenable(di) (dma64proc.fifoloopbackenable(di)) ++#define dma_txstopped(di) (dma64proc.txstopped(di)) ++#define dma_rxstopped(di) (dma64proc.rxstopped(di)) ++#define dma_rxenable(di) (dma64proc.rxenable(di)) ++#define dma_rxenabled(di) (dma64proc.rxenabled(di)) ++#define dma_rx(di) (dma64proc.rx(di)) ++#define dma_rxfill(di) (dma64proc.rxfill(di)) ++#define dma_txreclaim(di, range) (dma64proc.txreclaim(di, range)) ++#define dma_rxreclaim(di) (dma64proc.rxreclaim(di)) ++#define dma_getvar(di, name) (dma64proc.d_getvar(di, name)) ++#define dma_getnexttxp(di, range) (dma64proc.getnexttxp(di, range)) ++#define dma_getnextrxp(di, forceall) (dma64proc.getnextrxp(di, forceall)) ++#define dma_peeknexttxp(di) (dma64proc.peeknexttxp(di)) ++#define dma_peekntxp(di, l, t, r) (dma64proc.peekntxp(di, l, t, r)) ++#define dma_peeknextrxp(di) (dma64proc.peeknextrxp(di)) ++#define dma_rxparam_get(di, off, bufs) (dma64proc.rxparam_get(di, off, bufs)) ++ ++#define dma_txblock(di) (dma64proc.txblock(di)) ++#define dma_txunblock(di) (dma64proc.txunblock(di)) ++#define dma_txactive(di) (dma64proc.txactive(di)) ++#define dma_rxactive(di) (dma64proc.rxactive(di)) ++#define dma_txrotate(di) (dma64proc.txrotate(di)) ++#define dma_counterreset(di) (dma64proc.counterreset(di)) ++#define dma_ctrlflags(di, mask, flags) (dma64proc.ctrlflags((di), (mask), (flags))) ++#define dma_txpending(di) (dma64proc.txpending(di)) ++#define dma_txcommitted(di) (dma64proc.txcommitted(di)) ++#define dma_pktpool_set(di, pool) (dma64proc.pktpool_set((di), (pool))) ++#define dma_rxunframed(di, p, l, commit)(dma64proc.rxunframed(di, p, l, commit)) ++#if defined(BCMDBG) ++#define dma_dump(di, buf, dumpring) (dma64proc.dump(di, buf, dumpring)) ++#define dma_dumptx(di, buf, dumpring) (dma64proc.dumptx(di, buf, dumpring)) ++#define dma_dumprx(di, buf, dumpring) (dma64proc.dumprx(di, buf, dumpring)) ++#endif ++#define dma_rxtxerror(di, istx) (dma64proc.rxtxerror(di, istx)) ++#define dma_burstlen_set(di, rxlen, txlen) (dma64proc.burstlen_set(di, rxlen, txlen)) ++#define dma_avoidance_cnt(di) (dma64proc.avoidancecnt(di)) ++#define dma_param_set(di, paramid, paramval) (dma64proc.param_set(di, paramid, paramval)) ++ ++#define dma_glom_enable(di, val) (dma64proc.glom_enab(di, val)) ++#define dma_activerxbuf(di) (dma64proc.dma_activerxbuf(di)) ++ ++#endif /* BCMDMA32 */ ++ ++/* return addresswidth allowed ++ * This needs to be done after SB attach but before dma attach. ++ * SB attach provides ability to probe backplane and dma core capabilities ++ * This info is needed by DMA_ALLOC_CONSISTENT in dma attach ++ */ ++extern uint dma_addrwidth(si_t *sih, void *dmaregs); ++ ++/* pio helpers */ ++extern void dma_txpioloopback(osl_t *osh, dma32regs_t *); ++ ++#endif /* _hnddma_h_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/hndsoc.h b/drivers/net/ethernet/broadcom/gmac/src/include/hndsoc.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/hndsoc.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/hndsoc.h 2017-11-09 17:53:43.965312000 +0800 +@@ -0,0 +1,259 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Broadcom HND chip & on-chip-interconnect-related definitions. ++ * ++ * $Id: hndsoc.h 325951 2012-04-05 06:03:27Z $ ++ */ ++ ++#ifndef _HNDSOC_H ++#define _HNDSOC_H ++ ++/* Include the soci specific files */ ++#include ++#include ++ ++/* ++ * SOC Interconnect Address Map. ++ * All regions may not exist on all chips. ++ */ ++#define SI_SDRAM_BASE 0x00000000 /* Physical SDRAM */ ++#define SI_PCI_MEM 0x08000000 /* Host Mode sb2pcitranslation0 (64 MB) */ ++#define SI_PCI_MEM_SZ (64 * 1024 * 1024) ++#define SI_PCI_CFG 0x0c000000 /* Host Mode sb2pcitranslation1 (64 MB) */ ++#define SI_SDRAM_SWAPPED 0x10000000 /* Byteswapped Physical SDRAM */ ++#define SI_SDRAM_R2 0x80000000 /* Region 2 for sdram (512 MB) */ ++#define SI_ENUM_BASE 0x18000000 /* Enumeration space base */ ++#define SI_WRAP_BASE 0x18100000 /* Wrapper space base */ ++#define SI_CORE_SIZE 0x1000 /* each core gets 4Kbytes for registers */ ++#define SI_MAXCORES 20 /* Max cores (this is arbitrary, for software ++ * convenience and could be changed if we ++ * make any larger chips ++ */ ++ ++#define SI_FASTRAM 0x19000000 /* On-chip RAM on chips that also have DDR */ ++#define SI_FASTRAM_SWAPPED 0x19800000 ++ ++#define SI_FLASH2 0x1c000000 /* Flash Region 2 (region 1 shadowed here) */ ++#define SI_FLASH2_SZ 0x02000000 /* Size of Flash Region 2 */ ++#define SI_ARMCM3_ROM 0x1e000000 /* ARM Cortex-M3 ROM */ ++#define SI_FLASH1 0x1fc00000 /* MIPS Flash Region 1 */ ++#define SI_FLASH1_SZ 0x00400000 /* MIPS Size of Flash Region 1 */ ++ ++#define SI_NS_NANDFLASH 0x1c000000 /* NorthStar NAND flash base */ ++#define SI_NS_NORFLASH 0x1e000000 /* NorthStar NOR flash base */ ++#define SI_NS_NORFLASH_SZ 0x02000000 /* Size of NorthStar NOR flash region */ ++#define SI_NS_ROM 0xfffd0000 /* NorthStar ROM */ ++ ++#define SI_ARM7S_ROM 0x20000000 /* ARM7TDMI-S ROM */ ++#define SI_ARMCR4_ROM 0x000f0000 /* ARM Cortex-R4 ROM */ ++#define SI_ARMCM3_SRAM2 0x60000000 /* ARM Cortex-M3 SRAM Region 2 */ ++#define SI_ARM7S_SRAM2 0x80000000 /* ARM7TDMI-S SRAM Region 2 */ ++#define SI_ARM_FLASH1 0xffff0000 /* ARM Flash Region 1 */ ++#define SI_ARM_FLASH1_SZ 0x00010000 /* ARM Size of Flash Region 1 */ ++ ++#define SI_PCI_DMA 0x40000000 /* Client Mode sb2pcitranslation2 (1 GB) */ ++#define SI_PCI_DMA2 0x80000000 /* Client Mode sb2pcitranslation2 (1 GB) */ ++#define SI_PCI_DMA_SZ 0x40000000 /* Client Mode sb2pcitranslation2 size in bytes */ ++#define SI_PCIE_DMA_L32 0x00000000 /* PCIE Client Mode sb2pcitranslation2 ++ * (2 ZettaBytes), low 32 bits ++ */ ++#define SI_PCIE_DMA_H32 0x80000000 /* PCIE Client Mode sb2pcitranslation2 ++ * (2 ZettaBytes), high 32 bits ++ */ ++#define SI_NS_CUR 0x1800B000 /* NorthStar CUR base */ ++ ++#define SI_NS_CHIPCB_SRAB 0x18036000 /* NorthStar+ Chip Common B SRAB base */ ++ ++/* core codes */ ++#define NODEV_CORE_ID 0x700 /* Invalid coreid */ ++#define CC_CORE_ID 0x800 /* chipcommon core */ ++#define ILINE20_CORE_ID 0x801 /* iline20 core */ ++#define SRAM_CORE_ID 0x802 /* sram core */ ++#define SDRAM_CORE_ID 0x803 /* sdram core */ ++#define PCI_CORE_ID 0x804 /* pci core */ ++#define MIPS_CORE_ID 0x805 /* mips core */ ++#define ENET_CORE_ID 0x806 /* enet mac core */ ++#define CODEC_CORE_ID 0x807 /* v90 codec core */ ++#define USB_CORE_ID 0x808 /* usb 1.1 host/device core */ ++#define ADSL_CORE_ID 0x809 /* ADSL core */ ++#define ILINE100_CORE_ID 0x80a /* iline100 core */ ++#define IPSEC_CORE_ID 0x80b /* ipsec core */ ++#define UTOPIA_CORE_ID 0x80c /* utopia core */ ++#define PCMCIA_CORE_ID 0x80d /* pcmcia core */ ++#define SOCRAM_CORE_ID 0x80e /* internal memory core */ ++#define MEMC_CORE_ID 0x80f /* memc sdram core */ ++#define OFDM_CORE_ID 0x810 /* OFDM phy core */ ++#define EXTIF_CORE_ID 0x811 /* external interface core */ ++#define D11_CORE_ID 0x812 /* 802.11 MAC core */ ++#define APHY_CORE_ID 0x813 /* 802.11a phy core */ ++#define BPHY_CORE_ID 0x814 /* 802.11b phy core */ ++#define GPHY_CORE_ID 0x815 /* 802.11g phy core */ ++#define MIPS33_CORE_ID 0x816 /* mips3302 core */ ++#define USB11H_CORE_ID 0x817 /* usb 1.1 host core */ ++#define USB11D_CORE_ID 0x818 /* usb 1.1 device core */ ++#define USB20H_CORE_ID 0x819 /* usb 2.0 host core */ ++#define USB20D_CORE_ID 0x81a /* usb 2.0 device core */ ++#define SDIOH_CORE_ID 0x81b /* sdio host core */ ++#define ROBO_CORE_ID 0x81c /* roboswitch core */ ++#define ATA100_CORE_ID 0x81d /* parallel ATA core */ ++#define SATAXOR_CORE_ID 0x81e /* serial ATA & XOR DMA core */ ++#define GIGETH_CORE_ID 0x81f /* gigabit ethernet core */ ++#define PCIE_CORE_ID 0x820 /* pci express core */ ++#define NPHY_CORE_ID 0x821 /* 802.11n 2x2 phy core */ ++#define SRAMC_CORE_ID 0x822 /* SRAM controller core */ ++#define MINIMAC_CORE_ID 0x823 /* MINI MAC/phy core */ ++#define ARM11_CORE_ID 0x824 /* ARM 1176 core */ ++#define ARM7S_CORE_ID 0x825 /* ARM7tdmi-s core */ ++#define LPPHY_CORE_ID 0x826 /* 802.11a/b/g phy core */ ++#define PMU_CORE_ID 0x827 /* PMU core */ ++#define SSNPHY_CORE_ID 0x828 /* 802.11n single-stream phy core */ ++#define SDIOD_CORE_ID 0x829 /* SDIO device core */ ++#define ARMCM3_CORE_ID 0x82a /* ARM Cortex M3 core */ ++#define HTPHY_CORE_ID 0x82b /* 802.11n 4x4 phy core */ ++#define MIPS74K_CORE_ID 0x82c /* mips 74k core */ ++#define GMAC_CORE_ID 0x82d /* Gigabit MAC core */ ++#define DMEMC_CORE_ID 0x82e /* DDR1/2 memory controller core */ ++#define PCIERC_CORE_ID 0x82f /* PCIE Root Complex core */ ++#define OCP_CORE_ID 0x830 /* OCP2OCP bridge core */ ++#define SC_CORE_ID 0x831 /* shared common core */ ++#define AHB_CORE_ID 0x832 /* OCP2AHB bridge core */ ++#define SPIH_CORE_ID 0x833 /* SPI host core */ ++#define I2S_CORE_ID 0x834 /* I2S core */ ++#define DMEMS_CORE_ID 0x835 /* SDR/DDR1 memory controller core */ ++#define DEF_SHIM_COMP 0x837 /* SHIM component in ubus/6362 */ ++ ++#define ACPHY_CORE_ID 0x83b /* Dot11 ACPHY */ ++#define PCIE2_CORE_ID 0x83c /* pci express Gen2 core */ ++#define USB30D_CORE_ID 0x83d /* usb 3.0 device core */ ++#define ARMCR4_CORE_ID 0x83e /* ARM CR4 CPU */ ++#define APB_BRIDGE_CORE_ID 0x135 /* APB bridge core ID */ ++#define AXI_CORE_ID 0x301 /* AXI/GPV core ID */ ++#define EROM_CORE_ID 0x366 /* EROM core ID */ ++#define OOB_ROUTER_CORE_ID 0x367 /* OOB router core ID */ ++#define DEF_AI_COMP 0xfff /* Default component, in ai chips it maps all ++ * unused address ranges ++ */ ++ ++#define CC_4706_CORE_ID 0x500 /* chipcommon core */ ++#define NS_PCIEG2_CORE_ID 0x501 /* PCIE Gen 2 core */ ++#define NS_DMA_CORE_ID 0x502 /* DMA core */ ++#define NS_SDIO3_CORE_ID 0x503 /* SDIO3 core */ ++#define NS_USB20_CORE_ID 0x504 /* USB2.0 core */ ++#define NS_USB30_CORE_ID 0x505 /* USB3.0 core */ ++#define NS_A9JTAG_CORE_ID 0x506 /* ARM Cortex A9 JTAG core */ ++#define NS_DDR23_CORE_ID 0x507 /* Denali DDR2/DDR3 memory controller */ ++#define NS_ROM_CORE_ID 0x508 /* ROM core */ ++#define NS_NAND_CORE_ID 0x509 /* NAND flash controller core */ ++#define NS_QSPI_CORE_ID 0x50a /* SPI flash controller core */ ++#define NS_CCB_CORE_ID 0x50b /* ChipcommonB core */ ++#define SOCRAM_4706_CORE_ID 0x50e /* internal memory core */ ++#define NS_SOCRAM_CORE_ID SOCRAM_4706_CORE_ID ++#define ARMCA9_CORE_ID 0x510 /* ARM Cortex A9 core (ihost) */ ++#define NS_IHOST_CORE_ID ARMCA9_CORE_ID /* ARM Cortex A9 core (ihost) */ ++#define GMAC_COMMON_4706_CORE_ID 0x5dc /* Gigabit MAC core */ ++#define GMAC_4706_CORE_ID 0x52d /* Gigabit MAC core */ ++#define AMEMC_CORE_ID 0x52e /* DDR1/2 memory controller core */ ++#define ALTA_CORE_ID 0x534 /* I2S core */ ++#define DDR23_PHY_CORE_ID 0x5dd ++ ++#define SI_PCI1_MEM 0x40000000 /* Host Mode sb2pcitranslation0 (64 MB) */ ++#define SI_PCI1_CFG 0x44000000 /* Host Mode sb2pcitranslation1 (64 MB) */ ++#define SI_PCIE1_DMA_H32 0xc0000000 /* PCIE Client Mode sb2pcitranslation2 ++ * (2 ZettaBytes), high 32 bits ++ */ ++#define CC_4706B0_CORE_REV 0x8000001f /* chipcommon core */ ++#define SOCRAM_4706B0_CORE_REV 0x80000005 /* internal memory core */ ++#define GMAC_4706B0_CORE_REV 0x80000000 /* Gigabit MAC core */ ++ ++/* There are TWO constants on all HND chips: SI_ENUM_BASE above, ++ * and chipcommon being the first core: ++ */ ++#define SI_CC_IDX 0 ++ ++/* SOC Interconnect types (aka chip types) */ ++#define SOCI_SB 0 ++#define SOCI_AI 1 ++#define SOCI_UBUS 2 ++#define SOCI_NS 3 ++ ++/* Common core control flags */ ++#define SICF_BIST_EN 0x8000 ++#define SICF_PME_EN 0x4000 ++#define SICF_CORE_BITS 0x3ffc ++#define SICF_FGC 0x0002 ++#define SICF_CLOCK_EN 0x0001 ++ ++/* Common core status flags */ ++#define SISF_BIST_DONE 0x8000 ++#define SISF_BIST_ERROR 0x4000 ++#define SISF_GATED_CLK 0x2000 ++#define SISF_DMA64 0x1000 ++#define SISF_CORE_BITS 0x0fff ++ ++/* Norstar core status flags */ ++#define SISF_NS_BOOTDEV_MASK 0x0003 /* ROM core */ ++#define SISF_NS_BOOTDEV_NOR 0x0000 /* ROM core */ ++#define SISF_NS_BOOTDEV_NAND 0x0001 /* ROM core */ ++#define SISF_NS_BOOTDEV_ROM 0x0002 /* ROM core */ ++#define SISF_NS_BOOTDEV_OFFLOAD 0x0003 /* ROM core */ ++#define SISF_NS_SKUVEC_MASK 0x000c /* ROM core */ ++ ++/* A register that is common to all cores to ++ * communicate w/PMU regarding clock control. ++ */ ++#define SI_CLK_CTL_ST 0x1e0 /* clock control and status */ ++ ++/* clk_ctl_st register */ ++#define CCS_FORCEALP 0x00000001 /* force ALP request */ ++#define CCS_FORCEHT 0x00000002 /* force HT request */ ++#define CCS_FORCEILP 0x00000004 /* force ILP request */ ++#define CCS_ALPAREQ 0x00000008 /* ALP Avail Request */ ++#define CCS_HTAREQ 0x00000010 /* HT Avail Request */ ++#define CCS_FORCEHWREQOFF 0x00000020 /* Force HW Clock Request Off */ ++#define CCS_HQCLKREQ 0x00000040 /* HQ Clock Required */ ++#define CCS_USBCLKREQ 0x00000100 /* USB Clock Req */ ++#define CCS_ERSRC_REQ_MASK 0x00000700 /* external resource requests */ ++#define CCS_ERSRC_REQ_SHIFT 8 ++#define CCS_ALPAVAIL 0x00010000 /* ALP is available */ ++#define CCS_HTAVAIL 0x00020000 /* HT is available */ ++#define CCS_BP_ON_APL 0x00040000 /* RO: Backplane is running on ALP clock */ ++#define CCS_BP_ON_HT 0x00080000 /* RO: Backplane is running on HT clock */ ++#define CCS_ERSRC_STS_MASK 0x07000000 /* external resource status */ ++#define CCS_ERSRC_STS_SHIFT 24 ++ ++#define CCS0_HTAVAIL 0x00010000 /* HT avail in chipc and pcmcia on 4328a0 */ ++#define CCS0_ALPAVAIL 0x00020000 /* ALP avail in chipc and pcmcia on 4328a0 */ ++ ++/* Not really related to SOC Interconnect, but a couple of software ++ * conventions for the use the flash space: ++ */ ++ ++/* Minumum amount of flash we support */ ++#define FLASH_MIN 0x00020000 /* Minimum flash size */ ++ ++/* A boot/binary may have an embedded block that describes its size */ ++#define BISZ_OFFSET 0x3e0 /* At this offset into the binary */ ++#define BISZ_MAGIC 0x4249535a /* Marked with this value: 'BISZ' */ ++#define BISZ_MAGIC_IDX 0 /* Word 0: magic */ ++#define BISZ_TXTST_IDX 1 /* 1: text start */ ++#define BISZ_TXTEND_IDX 2 /* 2: text end */ ++#define BISZ_DATAST_IDX 3 /* 3: data start */ ++#define BISZ_DATAEND_IDX 4 /* 4: data end */ ++#define BISZ_BSSST_IDX 5 /* 5: bss start */ ++#define BISZ_BSSEND_IDX 6 /* 6: bss end */ ++#define BISZ_SIZE 7 /* descriptor size in 32-bit integers */ ++ ++#endif /* _HNDSOC_H */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/hndtcam.h b/drivers/net/ethernet/broadcom/gmac/src/include/hndtcam.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/hndtcam.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/hndtcam.h 2017-11-09 17:53:43.966304000 +0800 +@@ -0,0 +1,95 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * HND SOCRAM TCAM software interface. ++ * ++ * $Id: hndtcam.h 317281 2012-02-27 11:23:27Z $ ++ */ ++#ifndef _hndtcam_h_ ++#define _hndtcam_h_ ++ ++/* ++ * 0 - 1 ++ * 1 - 2 Consecutive locations are patched ++ * 2 - 4 Consecutive locations are patched ++ * 3 - 8 Consecutive locations are patched ++ * 4 - 16 Consecutive locations are patched ++ * Define default to patch 2 locations ++ */ ++ ++#ifdef PATCHCOUNT ++#define SRPC_PATCHCOUNT PATCHCOUNT ++#else ++#define PATCHCOUNT 0 ++#define SRPC_PATCHCOUNT PATCHCOUNT ++#endif ++ ++#if defined(__ARM_ARCH_7R__) ++#ifndef PATCHCOUNT ++#define PATCHCOUNT 1 ++#endif ++#define ARMCR4_TCAMPATCHCOUNT PATCHCOUNT ++#define ARMCR4_TCAMADDR_MASK (~((1 << (ARMCR4_TCAMPATCHCOUNT + 2))-1)) ++#define ARMCR4_PATCHNLOC (1 << ARMCR4_TCAMPATCHCOUNT) ++#endif /* defined(__ARM_ARCH_7R__) */ ++ ++/* N Consecutive location to patch */ ++#define SRPC_PATCHNLOC (1 << (SRPC_PATCHCOUNT)) ++ ++#define PATCHHDR(_p) __attribute__ ((__section__ (".patchhdr."#_p))) _p ++#define PATCHENTRY(_p) __attribute__ ((__section__ (".patchentry."#_p))) _p ++ ++#if defined(__ARM_ARCH_7R__) ++typedef struct { ++ uint32 data[ARMCR4_PATCHNLOC]; ++} patch_entry_t; ++#else ++typedef struct { ++ uint32 data[SRPC_PATCHNLOC]; ++} patch_entry_t; ++#endif ++ ++typedef struct { ++ void *addr; /* patch address */ ++ uint32 len; /* bytes to patch in entry */ ++ patch_entry_t *entry; /* patch entry data */ ++} patch_hdr_t; ++ ++/* patch values and address structure */ ++typedef struct patchaddrvalue { ++ uint32 addr; ++ uint32 value; ++} patchaddrvalue_t; ++ ++extern void *socram_regs; ++extern uint32 socram_rev; ++ ++extern void *arm_regs; ++ ++extern void hnd_patch_init(void *srp); ++extern void hnd_tcam_write(void *srp, uint16 idx, uint32 data); ++extern void hnd_tcam_read(void *srp, uint16 idx, uint32 *content); ++void * hnd_tcam_init(void *srp, int no_addrs); ++extern void hnd_tcam_disablepatch(void *srp); ++extern void hnd_tcam_enablepatch(void *srp); ++#ifdef CONFIG_XIP ++extern void hnd_tcam_bootloader_load(void *srp, char *pvars); ++#else ++extern void hnd_tcam_load(void *srp, const patchaddrvalue_t *patchtbl); ++#endif /* CONFIG_XIP */ ++extern void BCMATTACHFN(hnd_tcam_load_default)(void); ++extern void hnd_tcam_reclaim(void); ++ ++#endif /* _hndtcam_h_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/linux_osl.h b/drivers/net/ethernet/broadcom/gmac/src/include/linux_osl.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/linux_osl.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/linux_osl.h 2017-11-09 17:53:43.968293000 +0800 +@@ -0,0 +1,737 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Linux OS Independent Layer ++ * ++ * $Id: linux_osl.h 329351 2012-04-25 01:48:39Z $ ++ */ ++ ++#ifndef _linux_osl_h_ ++#define _linux_osl_h_ ++ ++#include ++ ++/* Linux Kernel: File Operations: start */ ++extern void * osl_os_open_image(char * filename); ++extern int osl_os_get_image_block(char * buf, int len, void * image); ++extern void osl_os_close_image(void * image); ++extern int osl_os_image_size(void *image); ++/* Linux Kernel: File Operations: end */ ++ ++#ifdef BCMDRIVER ++ ++/* OSL initialization */ ++extern osl_t *osl_attach(void *pdev, uint bustype, bool pkttag); ++extern void osl_detach(osl_t *osh); ++ ++/* Global ASSERT type */ ++extern uint32 g_assert_type; ++ ++/* ASSERT */ ++ #ifdef __GNUC__ ++ #define GCC_VERSION \ ++ (__GNUC__ * 10000 + __GNUC_MINOR__ * 100 + __GNUC_PATCHLEVEL__) ++ #if GCC_VERSION > 30100 ++ #define ASSERT(exp) do {} while (0) ++ #else ++ /* ASSERT could cause segmentation fault on GCC3.1, use empty instead */ ++ #define ASSERT(exp) ++ #endif /* GCC_VERSION > 30100 */ ++ #endif /* __GNUC__ */ ++ ++/* microsecond delay */ ++#define OSL_DELAY(usec) osl_delay(usec) ++extern void osl_delay(uint usec); ++ ++#define OSL_PCMCIA_READ_ATTR(osh, offset, buf, size) \ ++ osl_pcmcia_read_attr((osh), (offset), (buf), (size)) ++#define OSL_PCMCIA_WRITE_ATTR(osh, offset, buf, size) \ ++ osl_pcmcia_write_attr((osh), (offset), (buf), (size)) ++extern void osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size); ++extern void osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size); ++ ++/* PCI configuration space access macros */ ++#define OSL_PCI_READ_CONFIG(osh, offset, size) \ ++ osl_pci_read_config((osh), (offset), (size)) ++#define OSL_PCI_WRITE_CONFIG(osh, offset, size, val) \ ++ osl_pci_write_config((osh), (offset), (size), (val)) ++extern uint32 osl_pci_read_config(osl_t *osh, uint offset, uint size); ++extern void osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val); ++ ++/* PCI device bus # and slot # */ ++#define OSL_PCI_BUS(osh) osl_pci_bus(osh) ++#define OSL_PCI_SLOT(osh) osl_pci_slot(osh) ++extern uint osl_pci_bus(osl_t *osh); ++extern uint osl_pci_slot(osl_t *osh); ++extern struct pci_dev *osl_pci_device(osl_t *osh); ++ ++/* Pkttag flag should be part of public information */ ++typedef struct { ++ bool pkttag; ++ bool mmbus; /* Bus supports memory-mapped register accesses */ ++ pktfree_cb_fn_t tx_fn; /* Callback function for PKTFREE */ ++ void *tx_ctx; /* Context to the callback function */ ++#ifdef OSLREGOPS ++ osl_rreg_fn_t rreg_fn; /* Read Register function */ ++ osl_wreg_fn_t wreg_fn; /* Write Register function */ ++ void *reg_ctx; /* Context to the reg callback functions */ ++#else ++ void *unused[3]; ++#endif ++} osl_pubinfo_t; ++ ++#define PKTFREESETCB(osh, _tx_fn, _tx_ctx) \ ++ do { \ ++ ((osl_pubinfo_t*)osh)->tx_fn = _tx_fn; \ ++ ((osl_pubinfo_t*)osh)->tx_ctx = _tx_ctx; \ ++ } while (0) ++ ++#ifdef OSLREGOPS ++#define REGOPSSET(osh, rreg, wreg, ctx) \ ++ do { \ ++ ((osl_pubinfo_t*)osh)->rreg_fn = rreg; \ ++ ((osl_pubinfo_t*)osh)->wreg_fn = wreg; \ ++ ((osl_pubinfo_t*)osh)->reg_ctx = ctx; \ ++ } while (0) ++#endif /* OSLREGOPS */ ++ ++/* host/bus architecture-specific byte swap */ ++// #define BUS_SWAP32(v) (v) /* JIRA:LINUXDEV-16 */ ++#ifdef IL_BIGENDIAN ++#define BUS_SWAP32(v) bcmswap32(v) ++#else ++#define BUS_SWAP32(v) (v) ++#endif /* IL_BIGENDIAN */ ++ ++ #define MALLOC(osh, size) osl_malloc((osh), (size)) ++ #define MFREE(osh, addr, size) osl_mfree((osh), (addr), (size)) ++ #define MALLOCED(osh) osl_malloced((osh)) ++ extern void *osl_malloc(osl_t *osh, uint size); ++ extern void osl_mfree(osl_t *osh, void *addr, uint size); ++ extern uint osl_malloced(osl_t *osh); ++ ++#define NATIVE_MALLOC(osh, size) kmalloc(size, GFP_ATOMIC) ++#define NATIVE_MFREE(osh, addr, size) kfree(addr) ++#ifdef USBAP ++#include ++#define VMALLOC(osh, size) vmalloc(size) ++#define VFREE(osh, addr, size) vfree(addr) ++#endif /* USBAP */ ++ ++#define MALLOC_FAILED(osh) osl_malloc_failed((osh)) ++extern uint osl_malloc_failed(osl_t *osh); ++ ++/* allocate/free shared (dma-able) consistent memory */ ++#define DMA_CONSISTENT_ALIGN osl_dma_consistent_align() ++#define DMA_ALLOC_CONSISTENT(osh, size, align, tot, pap, dmah) \ ++ osl_dma_alloc_consistent((osh), (size), (align), (tot), (pap)) ++#define DMA_FREE_CONSISTENT(osh, va, size, pa, dmah) \ ++ osl_dma_free_consistent((osh), (void*)(va), (size), (pa)) ++extern uint osl_dma_consistent_align(void); ++extern void *osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align, uint *tot, ulong *pap); ++extern void osl_dma_free_consistent(osl_t *osh, void *va, uint size, ulong pa); ++ ++/* map/unmap direction */ ++#define DMA_TX 1 /* TX direction for DMA */ ++#define DMA_RX 2 /* RX direction for DMA */ ++ ++/* map/unmap shared (dma-able) memory */ ++#define DMA_UNMAP(osh, pa, size, direction, p, dmah) \ ++ osl_dma_unmap((osh), (pa), (size), (direction)) ++extern uint osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah); ++extern void osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction); ++ ++/* API for DMA addressing capability */ ++#define OSL_DMADDRWIDTH(osh, addrwidth) do {} while (0) ++ ++#define SELECT_BUS_WRITE(osh, mmap_op, bus_op) mmap_op ++#define SELECT_BUS_READ(osh, mmap_op, bus_op) mmap_op ++ ++#define OSL_ERROR(bcmerror) osl_error(bcmerror) ++extern int osl_error(int bcmerror); ++ ++/* the largest reasonable packet buffer driver uses for ethernet MTU in bytes */ ++#define PKTBUFSZ 2048 /* largest reasonable packet buffer, driver uses for ethernet MTU */ ++ ++/* ++ * BINOSL selects the slightly slower function-call-based binary compatible osl. ++ * Macros expand to calls to functions defined in linux_osl.c . ++ */ ++#ifndef BINOSL ++#include /* use current 2.4.x calling conventions */ ++#include /* for vsn/printf's */ ++#include /* for mem*, str* */ ++ ++#define OSL_SYSUPTIME() ((uint32)jiffies * (1000 / HZ)) ++#define printf(fmt, args...) printk(fmt , ## args) ++#include /* for vsn/printf's */ ++#include /* for mem*, str* */ ++/* bcopy's: Linux kernel doesn't provide these (anymore) */ ++#define bcopy(src, dst, len) memcpy((dst), (src), (len)) ++#define bcmp(b1, b2, len) memcmp((b1), (b2), (len)) ++#define bzero(b, len) memset((b), '\0', (len)) ++ ++/* register access macros */ ++#if defined(OSLREGOPS) ++#define R_REG(osh, r) (\ ++ sizeof(*(r)) == sizeof(uint8) ? osl_readb((osh), (volatile uint8*)(r)) : \ ++ sizeof(*(r)) == sizeof(uint16) ? osl_readw((osh), (volatile uint16*)(r)) : \ ++ osl_readl((osh), (volatile uint32*)(r)) \ ++) ++#define W_REG(osh, r, v) do { \ ++ switch (sizeof(*(r))) { \ ++ case sizeof(uint8): osl_writeb((osh), (volatile uint8*)(r), (uint8)(v)); break; \ ++ case sizeof(uint16): osl_writew((osh), (volatile uint16*)(r), (uint16)(v)); break; \ ++ case sizeof(uint32): osl_writel((osh), (volatile uint32*)(r), (uint32)(v)); break; \ ++ } \ ++} while (0) ++ ++extern uint8 osl_readb(osl_t *osh, volatile uint8 *r); ++extern uint16 osl_readw(osl_t *osh, volatile uint16 *r); ++extern uint32 osl_readl(osl_t *osh, volatile uint32 *r); ++extern void osl_writeb(osl_t *osh, volatile uint8 *r, uint8 v); ++extern void osl_writew(osl_t *osh, volatile uint16 *r, uint16 v); ++extern void osl_writel(osl_t *osh, volatile uint32 *r, uint32 v); ++#else /* OSLREGOPS */ ++ ++#ifndef IL_BIGENDIAN ++#ifndef __mips__ ++#define R_REG(osh, r) (\ ++ SELECT_BUS_READ(osh, \ ++ ({ \ ++ __typeof(*(r)) __osl_v; \ ++ BCM_REFERENCE(osh); \ ++ switch (sizeof(*(r))) { \ ++ case sizeof(uint8): __osl_v = \ ++ readb((volatile uint8*)(r)); break; \ ++ case sizeof(uint16): __osl_v = \ ++ readw((volatile uint16*)(r)); break; \ ++ case sizeof(uint32): __osl_v = \ ++ readl((volatile uint32*)(r)); break; \ ++ } \ ++ __osl_v; \ ++ }), \ ++ OSL_READ_REG(osh, r)) \ ++) ++#else /* __mips__ */ ++#define R_REG(osh, r) (\ ++ SELECT_BUS_READ(osh, \ ++ ({ \ ++ __typeof(*(r)) __osl_v; \ ++ BCM_REFERENCE(osh); \ ++ __asm__ __volatile__("sync"); \ ++ switch (sizeof(*(r))) { \ ++ case sizeof(uint8): __osl_v = \ ++ readb((volatile uint8*)(r)); break; \ ++ case sizeof(uint16): __osl_v = \ ++ readw((volatile uint16*)(r)); break; \ ++ case sizeof(uint32): __osl_v = \ ++ readl((volatile uint32*)(r)); break; \ ++ } \ ++ __asm__ __volatile__("sync"); \ ++ __osl_v; \ ++ }), \ ++ ({ \ ++ __typeof(*(r)) __osl_v; \ ++ __asm__ __volatile__("sync"); \ ++ __osl_v = OSL_READ_REG(osh, r); \ ++ __asm__ __volatile__("sync"); \ ++ __osl_v; \ ++ })) \ ++) ++#endif /* __mips__ */ ++ ++#define W_REG(osh, r, v) do { \ ++ BCM_REFERENCE(osh); \ ++ SELECT_BUS_WRITE(osh, \ ++ switch (sizeof(*(r))) { \ ++ case sizeof(uint8): writeb((uint8)(v), (volatile uint8*)(r)); break; \ ++ case sizeof(uint16): writew((uint16)(v), (volatile uint16*)(r)); break; \ ++ case sizeof(uint32): writel((uint32)(v), (volatile uint32*)(r)); break; \ ++ }, \ ++ (OSL_WRITE_REG(osh, r, v))); \ ++ } while (0) ++#else /* IL_BIGENDIAN */ ++#define R_REG(osh, r) (\ ++ SELECT_BUS_READ(osh, \ ++ ({ \ ++ __typeof(*(r)) __osl_v; \ ++ BCM_REFERENCE(osh); \ ++ switch (sizeof(*(r))) { \ ++ case sizeof(uint8): __osl_v = \ ++ readb((volatile uint8*)((uintptr)(r)^3)); break; \ ++ case sizeof(uint16): __osl_v = \ ++ readw((volatile uint16*)((uintptr)(r)^2)); break; \ ++ case sizeof(uint32): __osl_v = \ ++ readl((volatile uint32*)(r)); break; \ ++ } \ ++ __osl_v; \ ++ }), \ ++ OSL_READ_REG(osh, r)) \ ++) ++#define W_REG(osh, r, v) do { \ ++ BCM_REFERENCE(osh); \ ++ SELECT_BUS_WRITE(osh, \ ++ switch (sizeof(*(r))) { \ ++ case sizeof(uint8): writeb((uint8)(v), \ ++ (volatile uint8*)((uintptr)(r)^3)); break; \ ++ case sizeof(uint16): writew((uint16)(v), \ ++ (volatile uint16*)((uintptr)(r)^2)); break; \ ++ case sizeof(uint32): writel((uint32)(v), \ ++ (volatile uint32*)(r)); break; \ ++ }, \ ++ (OSL_WRITE_REG(osh, r, v))); \ ++ } while (0) ++#endif /* IL_BIGENDIAN */ ++#endif /* OSLREGOPS */ ++ ++#define AND_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) & (v)) ++#define OR_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) | (v)) ++ ++/* bcopy, bcmp, and bzero functions */ ++#define bcopy(src, dst, len) memcpy((dst), (src), (len)) ++#define bcmp(b1, b2, len) memcmp((b1), (b2), (len)) ++#define bzero(b, len) memset((b), '\0', (len)) ++ ++/* uncached/cached virtual address */ ++#ifdef __mips__ ++#include ++#define OSL_UNCACHED(va) ((void *)KSEG1ADDR((va))) ++#define OSL_CACHED(va) ((void *)KSEG0ADDR((va))) ++#else ++#define OSL_UNCACHED(va) ((void *)va) ++#define OSL_CACHED(va) ((void *)va) ++ ++/* ARM NorthStar */ ++#define OSL_CACHE_FLUSH(va, len) ++ ++#endif /* mips */ ++ ++#ifdef __mips__ ++#define OSL_PREF_RANGE_LD(va, sz) prefetch_range_PREF_LOAD_RETAINED(va, sz) ++#define OSL_PREF_RANGE_ST(va, sz) prefetch_range_PREF_STORE_RETAINED(va, sz) ++#else /* __mips__ */ ++#define OSL_PREF_RANGE_LD(va, sz) ++#define OSL_PREF_RANGE_ST(va, sz) ++#endif /* __mips__ */ ++ ++/* get processor cycle count */ ++#if defined(mips) ++#define OSL_GETCYCLES(x) ((x) = read_c0_count() * 2) ++#elif defined(__i386__) ++#define OSL_GETCYCLES(x) rdtscl((x)) ++#else ++#define OSL_GETCYCLES(x) ((x) = 0) ++#endif /* defined(mips) */ ++ ++/* dereference an address that may cause a bus exception */ ++#ifdef mips ++#if defined(MODULE) && (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 17)) ++#define BUSPROBE(val, addr) panic("get_dbe() will not fixup a bus exception when compiled into"\ ++ " a module") ++#else ++#define BUSPROBE(val, addr) get_dbe((val), (addr)) ++#include ++#endif /* defined(MODULE) && (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 17)) */ ++#else ++#define BUSPROBE(val, addr) ({ (val) = R_REG(NULL, (addr)); 0; }) ++#endif /* mips */ ++ ++/* map/unmap physical to virtual I/O */ ++#if !defined(CONFIG_MMC_MSM7X00A) ++#define REG_MAP(pa, size) ioremap_nocache((unsigned long)(pa), (unsigned long)(size)) ++#else ++#define REG_MAP(pa, size) (void *)(0) ++#endif /* !defined(CONFIG_MMC_MSM7X00A */ ++#define REG_UNMAP(va) iounmap((va)) ++ ++/* shared (dma-able) memory access macros */ ++#define R_SM(r) *(r) ++#define W_SM(r, v) (*(r) = (v)) ++#define BZERO_SM(r, len) memset((r), '\0', (len)) ++ ++/* Because the non BINOSL implemenation of the PKT OSL routines are macros (for ++ * performance reasons), we need the Linux headers. ++ */ ++#include /* use current 2.4.x calling conventions */ ++ ++/* packet primitives */ ++#define PKTGET(osh, len, send) osl_pktget((osh), (len)) ++#define PKTDUP(osh, skb) osl_pktdup((osh), (skb)) ++#define PKTLIST_DUMP(osh, buf) ++#define PKTDBG_TRACE(osh, pkt, bit) ++#define PKTFREE(osh, skb, send) osl_pktfree((osh), (skb), (send)) ++#ifdef DHD_USE_STATIC_BUF ++#define PKTGET_STATIC(osh, len, send) osl_pktget_static((osh), (len)) ++#define PKTFREE_STATIC(osh, skb, send) osl_pktfree_static((osh), (skb), (send)) ++#endif /* DHD_USE_STATIC_BUF */ ++#define PKTDATA(osh, skb) (((struct sk_buff*)(skb))->data) ++#define PKTLEN(osh, skb) (((struct sk_buff*)(skb))->len) ++#define PKTHEADROOM(osh, skb) (PKTDATA(osh, skb)-(((struct sk_buff*)(skb))->head)) ++#define PKTTAILROOM(osh, skb) ((((struct sk_buff*)(skb))->end)-(((struct sk_buff*)(skb))->tail)) ++#define PKTNEXT(osh, skb) (((struct sk_buff*)(skb))->next) ++#define PKTSETNEXT(osh, skb, x) (((struct sk_buff*)(skb))->next = (struct sk_buff*)(x)) ++#define PKTSETLEN(osh, skb, len) __pskb_trim((struct sk_buff*)(skb), (len)) ++#define PKTPUSH(osh, skb, bytes) skb_push((struct sk_buff*)(skb), (bytes)) ++#define PKTPULL(osh, skb, bytes) skb_pull((struct sk_buff*)(skb), (bytes)) ++#define PKTTAG(skb) ((void*)(((struct sk_buff*)(skb))->cb)) ++#define PKTSETPOOL(osh, skb, x, y) do {} while (0) ++#define PKTPOOL(osh, skb) FALSE ++#define PKTSHRINK(osh, m) (m) ++ ++#define FASTBUF (1 << 16) ++#define CTFBUF (1 << 17) ++#define PKTSETFAST(osh, skb) ((((struct sk_buff*)(skb))->mac_len) |= FASTBUF) ++#define PKTCLRFAST(osh, skb) ((((struct sk_buff*)(skb))->mac_len) &= (~FASTBUF)) ++#define PKTSETCTF(osh, skb) ((((struct sk_buff*)(skb))->mac_len) |= CTFBUF) ++#define PKTCLRCTF(osh, skb) ((((struct sk_buff*)(skb))->mac_len) &= (~CTFBUF)) ++#define PKTISFAST(osh, skb) ((((struct sk_buff*)(skb))->mac_len) & FASTBUF) ++#define PKTISCTF(osh, skb) ((((struct sk_buff*)(skb))->mac_len) & CTFBUF) ++#define PKTFAST(osh, skb) (((struct sk_buff*)(skb))->mac_len) ++ ++#define PKTSETSKIPCT(osh, skb) ++#define PKTCLRSKIPCT(osh, skb) ++#define PKTSKIPCT(osh, skb) ++#define PKTCLRCHAINED(osh, skb) ++#define PKTSETCHAINED(osh, skb) ++#define CTF_MARK(m) 0 ++ ++#ifdef BCMFA ++#ifdef BCMFA_HW_HASH ++#define PKTSETFAHIDX(skb, idx) (((struct sk_buff*)(skb))->napt_idx = idx) ++#else ++#define PKTSETFAHIDX(skb, idx) ++#endif /* BCMFA_SW_HASH */ ++#define PKTGETFAHIDX(skb) (((struct sk_buff*)(skb))->napt_idx) ++#define PKTSETFADEV(skb, imp) (((struct sk_buff*)(skb))->dev = imp) ++#define PKTSETRXDEV(skb) (((struct sk_buff*)(skb))->rxdev = ((struct sk_buff*)(skb))->dev) ++ ++#define AUX_TCP_FIN_RST (1 << 0) ++#define AUX_FREED (1 << 1) ++#define PKTSETFAAUX(skb) (((struct sk_buff*)(skb))->napt_flags |= AUX_TCP_FIN_RST) ++#define PKTCLRFAAUX(skb) (((struct sk_buff*)(skb))->napt_flags &= (~AUX_TCP_FIN_RST)) ++#define PKTISFAAUX(skb) (((struct sk_buff*)(skb))->napt_flags & AUX_TCP_FIN_RST) ++#define PKTSETFAFREED(skb) (((struct sk_buff*)(skb))->napt_flags |= AUX_FREED) ++#define PKTCLRFAFREED(skb) (((struct sk_buff*)(skb))->napt_flags &= (~AUX_FREED)) ++#define PKTISFAFREED(skb) (((struct sk_buff*)(skb))->napt_flags & AUX_FREED) ++#define PKTISFABRIDGED(skb) PKTISFAAUX(skb) ++#else ++#define PKTISFAAUX(skb) (FALSE) ++#define PKTISFABRIDGED(skb) (FALSE) ++#define PKTISFAFREED(skb) (FALSE) ++ ++#define PKTCLRFAAUX(skb) ++#define PKTSETFAFREED(skb) ++#define PKTCLRFAFREED(skb) ++#endif /* BCMFA */ ++ ++extern void osl_pktfree(osl_t *osh, void *skb, bool send); ++extern void *osl_pktget_static(osl_t *osh, uint len); ++extern void osl_pktfree_static(osl_t *osh, void *skb, bool send); ++ ++extern void *osl_pkt_frmnative(osl_t *osh, void *skb); ++extern void *osl_pktget(osl_t *osh, uint len); ++extern void *osl_pktdup(osl_t *osh, void *skb); ++extern struct sk_buff *osl_pkt_tonative(osl_t *osh, void *pkt); ++#define PKTFRMNATIVE(osh, skb) osl_pkt_frmnative(((osl_t *)osh), (struct sk_buff*)(skb)) ++#define PKTTONATIVE(osh, pkt) osl_pkt_tonative((osl_t *)(osh), (pkt)) ++ ++#define PKTLINK(skb) (((struct sk_buff*)(skb))->prev) ++#define PKTSETLINK(skb, x) (((struct sk_buff*)(skb))->prev = (struct sk_buff*)(x)) ++#define PKTPRIO(skb) (((struct sk_buff*)(skb))->priority) ++#define PKTSETPRIO(skb, x) (((struct sk_buff*)(skb))->priority = (x)) ++#define PKTSUMNEEDED(skb) (((struct sk_buff*)(skb))->ip_summed == CHECKSUM_HW) ++#define PKTSETSUMGOOD(skb, x) (((struct sk_buff*)(skb))->ip_summed = \ ++ ((x) ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE)) ++/* PKTSETSUMNEEDED and PKTSUMGOOD are not possible because skb->ip_summed is overloaded */ ++#define PKTSHARED(skb) (((struct sk_buff*)(skb))->cloned) ++ ++#ifdef CONFIG_NF_CONNTRACK_MARK ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) ++#define PKTMARK(p) (((struct sk_buff *)(p))->mark) ++#define PKTSETMARK(p, m) ((struct sk_buff *)(p))->mark = (m) ++#else /* !2.6.0 */ ++#define PKTMARK(p) (((struct sk_buff *)(p))->nfmark) ++#define PKTSETMARK(p, m) ((struct sk_buff *)(p))->nfmark = (m) ++#endif /* 2.6.0 */ ++#else /* CONFIG_NF_CONNTRACK_MARK */ ++#define PKTMARK(p) 0 ++#define PKTSETMARK(p, m) ++#endif /* CONFIG_NF_CONNTRACK_MARK */ ++ ++#else /* BINOSL */ ++ ++/* Where to get the declarations for mem, str, printf, bcopy's? Two basic approaches. ++ * ++ * First, use the Linux header files and the C standard library replacmenent versions ++ * built-in to the kernel. Use this approach when compiling non hybrid code or compling ++ * the OS port files. The second approach is to use our own defines/prototypes and ++ * functions we have provided in the Linux OSL, i.e. linux_osl.c. Use this approach when ++ * compiling the files that make up the hybrid binary. We are ensuring we ++ * don't directly link to the kernel replacement routines from the hybrid binary. ++ * ++ * NOTE: The issue we are trying to avoid is any questioning of whether the ++ * hybrid binary is derived from Linux. The wireless common code (wlc) is designed ++ * to be OS independent through the use of the OSL API and thus the hybrid binary doesn't ++ * derive from the Linux kernel at all. But since we defined our OSL API to include ++ * a small collection of standard C library routines and these routines are provided in ++ * the kernel we want to avoid even the appearance of deriving at all even though clearly ++ * usage of a C standard library API doesn't represent a derivation from Linux. Lastly ++ * note at the time of this checkin 4 references to memcpy/memset could not be eliminated ++ * from the binary because they are created internally by GCC as part of things like ++ * structure assignment. I don't think the compiler should be doing this but there is ++ * no options to disable it on Intel architectures (there is for MIPS so somebody must ++ * agree with me). I may be able to even remove these references eventually with ++ * a GNU binutil such as objcopy via a symbol rename (i.e. memcpy to osl_memcpy). ++ */ ++#if !defined(LINUX_HYBRID) || defined(LINUX_PORT) ++ #define printf(fmt, args...) printk(fmt , ## args) ++ #include /* for vsn/printf's */ ++ #include /* for mem*, str* */ ++ /* bcopy's: Linux kernel doesn't provide these (anymore) */ ++ #define bcopy(src, dst, len) memcpy((dst), (src), (len)) ++ #define bcmp(b1, b2, len) memcmp((b1), (b2), (len)) ++ #define bzero(b, len) memset((b), '\0', (len)) ++ ++ /* These are provided only because when compiling linux_osl.c there ++ * must be an explicit prototype (separate from the definition) because ++ * we are compiling with GCC option -Wstrict-prototypes. Conversely ++ * these could be placed directly in linux_osl.c. ++ */ ++ extern int osl_printf(const char *format, ...); ++ extern int osl_sprintf(char *buf, const char *format, ...); ++ extern int osl_snprintf(char *buf, size_t n, const char *format, ...); ++ extern int osl_vsprintf(char *buf, const char *format, va_list ap); ++ extern int osl_vsnprintf(char *buf, size_t n, const char *format, va_list ap); ++ extern int osl_strcmp(const char *s1, const char *s2); ++ extern int osl_strncmp(const char *s1, const char *s2, uint n); ++ extern int osl_strlen(const char *s); ++ extern char* osl_strcpy(char *d, const char *s); ++ extern char* osl_strncpy(char *d, const char *s, uint n); ++ extern char* osl_strchr(const char *s, int c); ++ extern char* osl_strrchr(const char *s, int c); ++ extern void *osl_memset(void *d, int c, size_t n); ++ extern void *osl_memcpy(void *d, const void *s, size_t n); ++ extern void *osl_memmove(void *d, const void *s, size_t n); ++ extern int osl_memcmp(const void *s1, const void *s2, size_t n); ++#else ++ ++ /* In the below defines we undefine the macro first in case it is ++ * defined. This shouldn't happen because we are not using Linux ++ * header files but because our Linux 2.4 make includes modversions.h ++ * through a GCC -include compile option, they get defined to point ++ * at the appropriate versioned symbol name. Note this doesn't ++ * happen with our Linux 2.6 makes. ++ */ ++ ++ /* *printf functions */ ++ #include /* va_list needed for v*printf */ ++ #include /* size_t needed for *nprintf */ ++ #undef printf ++ #undef sprintf ++ #undef snprintf ++ #undef vsprintf ++ #undef vsnprintf ++ #define printf(fmt, args...) osl_printf((fmt) , ## args) ++ #define sprintf(buf, fmt, args...) osl_sprintf((buf), (fmt) , ## args) ++ #define snprintf(buf, n, fmt, args...) osl_snprintf((buf), (n), (fmt) , ## args) ++ #define vsprintf(buf, fmt, ap) osl_vsprintf((buf), (fmt), (ap)) ++ #define vsnprintf(buf, n, fmt, ap) osl_vsnprintf((buf), (n), (fmt), (ap)) ++ extern int osl_printf(const char *format, ...); ++ extern int osl_sprintf(char *buf, const char *format, ...); ++ extern int osl_snprintf(char *buf, size_t n, const char *format, ...); ++ extern int osl_vsprintf(char *buf, const char *format, va_list ap); ++ extern int osl_vsnprintf(char *buf, size_t n, const char *format, va_list ap); ++ ++ /* str* functions */ ++ #undef strcmp ++ #undef strncmp ++ #undef strlen ++ #undef strcpy ++ #undef strncpy ++ #undef strchr ++ #undef strrchr ++ #define strcmp(s1, s2) osl_strcmp((s1), (s2)) ++ #define strncmp(s1, s2, n) osl_strncmp((s1), (s2), (n)) ++ #define strlen(s) osl_strlen((s)) ++ #define strcpy(d, s) osl_strcpy((d), (s)) ++ #define strncpy(d, s, n) osl_strncpy((d), (s), (n)) ++ #define strchr(s, c) osl_strchr((s), (c)) ++ #define strrchr(s, c) osl_strrchr((s), (c)) ++ extern int osl_strcmp(const char *s1, const char *s2); ++ extern int osl_strncmp(const char *s1, const char *s2, uint n); ++ extern int osl_strlen(const char *s); ++ extern char* osl_strcpy(char *d, const char *s); ++ extern char* osl_strncpy(char *d, const char *s, uint n); ++ extern char* osl_strchr(const char *s, int c); ++ extern char* osl_strrchr(const char *s, int c); ++ ++ /* mem* functions */ ++ #undef memset ++ #undef memcpy ++ #undef memcmp ++ #define memset(d, c, n) osl_memset((d), (c), (n)) ++ #define memcpy(d, s, n) osl_memcpy((d), (s), (n)) ++ #define memmove(d, s, n) osl_memmove((d), (s), (n)) ++ #define memcmp(s1, s2, n) osl_memcmp((s1), (s2), (n)) ++ extern void *osl_memset(void *d, int c, size_t n); ++ extern void *osl_memcpy(void *d, const void *s, size_t n); ++ extern void *osl_memmove(void *d, const void *s, size_t n); ++ extern int osl_memcmp(const void *s1, const void *s2, size_t n); ++ ++ /* bcopy, bcmp, and bzero functions */ ++ #undef bcopy ++ #undef bcmp ++ #undef bzero ++ #define bcopy(src, dst, len) osl_memcpy((dst), (src), (len)) ++ #define bcmp(b1, b2, len) osl_memcmp((b1), (b2), (len)) ++ #define bzero(b, len) osl_memset((b), '\0', (len)) ++#endif /* !defined(LINUX_HYBRID) || defined(LINUX_PORT) */ ++ ++/* register access macros */ ++#define R_REG(osh, r) (\ ++ sizeof(*(r)) == sizeof(uint8) ? osl_readb((volatile uint8*)(r)) : \ ++ sizeof(*(r)) == sizeof(uint16) ? osl_readw((volatile uint16*)(r)) : \ ++ osl_readl((volatile uint32*)(r)) \ ++) ++#define W_REG(osh, r, v) do { \ ++ switch (sizeof(*(r))) { \ ++ case sizeof(uint8): osl_writeb((uint8)(v), (volatile uint8*)(r)); break; \ ++ case sizeof(uint16): osl_writew((uint16)(v), (volatile uint16*)(r)); break; \ ++ case sizeof(uint32): osl_writel((uint32)(v), (volatile uint32*)(r)); break; \ ++ } \ ++} while (0) ++ ++#define AND_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) & (v)) ++#define OR_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) | (v)) ++extern uint8 osl_readb(volatile uint8 *r); ++extern uint16 osl_readw(volatile uint16 *r); ++extern uint32 osl_readl(volatile uint32 *r); ++extern void osl_writeb(uint8 v, volatile uint8 *r); ++extern void osl_writew(uint16 v, volatile uint16 *r); ++extern void osl_writel(uint32 v, volatile uint32 *r); ++ ++/* system up time in ms */ ++#define OSL_SYSUPTIME() osl_sysuptime() ++extern uint32 osl_sysuptime(void); ++ ++/* uncached/cached virtual address */ ++#define OSL_UNCACHED(va) osl_uncached((va)) ++extern void *osl_uncached(void *va); ++#define OSL_CACHED(va) osl_cached((va)) ++extern void *osl_cached(void *va); ++ ++#define OSL_PREF_RANGE_LD(va, sz) ++#define OSL_PREF_RANGE_ST(va, sz) ++ ++/* get processor cycle count */ ++#define OSL_GETCYCLES(x) ((x) = osl_getcycles()) ++extern uint osl_getcycles(void); ++ ++/* dereference an address that may target abort */ ++#define BUSPROBE(val, addr) osl_busprobe(&(val), (addr)) ++extern int osl_busprobe(uint32 *val, uint32 addr); ++ ++/* map/unmap physical to virtual */ ++#define REG_MAP(pa, size) osl_reg_map((pa), (size)) ++#define REG_UNMAP(va) osl_reg_unmap((va)) ++extern void *osl_reg_map(uint32 pa, uint size); ++extern void osl_reg_unmap(void *va); ++ ++/* shared (dma-able) memory access macros */ ++#define R_SM(r) *(r) ++#define W_SM(r, v) (*(r) = (v)) ++#define BZERO_SM(r, len) bzero((r), (len)) ++ ++/* packet primitives */ ++#define PKTGET(osh, len, send) osl_pktget((osh), (len)) ++#define PKTDUP(osh, skb) osl_pktdup((osh), (skb)) ++#define PKTFRMNATIVE(osh, skb) osl_pkt_frmnative((osh), (skb)) ++#define PKTLIST_DUMP(osh, buf) ++#define PKTDBG_TRACE(osh, pkt, bit) ++#define PKTFREE(osh, skb, send) osl_pktfree((osh), (skb), (send)) ++#define PKTDATA(osh, skb) osl_pktdata((osh), (skb)) ++#define PKTLEN(osh, skb) osl_pktlen((osh), (skb)) ++#define PKTHEADROOM(osh, skb) osl_pktheadroom((osh), (skb)) ++#define PKTTAILROOM(osh, skb) osl_pkttailroom((osh), (skb)) ++#define PKTNEXT(osh, skb) osl_pktnext((osh), (skb)) ++#define PKTSETNEXT(osh, skb, x) osl_pktsetnext((skb), (x)) ++#define PKTSETLEN(osh, skb, len) osl_pktsetlen((osh), (skb), (len)) ++#define PKTPUSH(osh, skb, bytes) osl_pktpush((osh), (skb), (bytes)) ++#define PKTPULL(osh, skb, bytes) osl_pktpull((osh), (skb), (bytes)) ++#define PKTTAG(skb) osl_pkttag((skb)) ++#define PKTTONATIVE(osh, pkt) osl_pkt_tonative((osh), (pkt)) ++#define PKTLINK(skb) osl_pktlink((skb)) ++#define PKTSETLINK(skb, x) osl_pktsetlink((skb), (x)) ++#define PKTPRIO(skb) osl_pktprio((skb)) ++#define PKTSETPRIO(skb, x) osl_pktsetprio((skb), (x)) ++#define PKTSHARED(skb) osl_pktshared((skb)) ++#define PKTSETPOOL(osh, skb, x, y) do {} while (0) ++#define PKTPOOL(osh, skb) FALSE ++ ++extern void *osl_pktget(osl_t *osh, uint len); ++extern void *osl_pktdup(osl_t *osh, void *skb); ++extern void *osl_pkt_frmnative(osl_t *osh, void *skb); ++extern void osl_pktfree(osl_t *osh, void *skb, bool send); ++extern uchar *osl_pktdata(osl_t *osh, void *skb); ++extern uint osl_pktlen(osl_t *osh, void *skb); ++extern uint osl_pktheadroom(osl_t *osh, void *skb); ++extern uint osl_pkttailroom(osl_t *osh, void *skb); ++extern void *osl_pktnext(osl_t *osh, void *skb); ++extern void osl_pktsetnext(void *skb, void *x); ++extern void osl_pktsetlen(osl_t *osh, void *skb, uint len); ++extern uchar *osl_pktpush(osl_t *osh, void *skb, int bytes); ++extern uchar *osl_pktpull(osl_t *osh, void *skb, int bytes); ++extern void *osl_pkttag(void *skb); ++extern void *osl_pktlink(void *skb); ++extern void osl_pktsetlink(void *skb, void *x); ++extern uint osl_pktprio(void *skb); ++extern void osl_pktsetprio(void *skb, uint x); ++extern struct sk_buff *osl_pkt_tonative(osl_t *osh, void *pkt); ++extern bool osl_pktshared(void *skb); ++ ++ ++#endif /* BINOSL */ ++ ++#define PKTALLOCED(osh) osl_pktalloced(osh) ++extern uint osl_pktalloced(osl_t *osh); ++ ++#define DMA_MAP(osh, va, size, direction, p, dmah) \ ++ osl_dma_map((osh), (va), (size), (direction), (p), (dmah)) ++ ++#else /* ! BCMDRIVER */ ++ ++ ++/* ASSERT */ ++ #define ASSERT(exp) do {} while (0) ++ ++/* MALLOC and MFREE */ ++#define MALLOC(o, l) malloc(l) ++#define MFREE(o, p, l) free(p) ++#include ++ ++/* str* and mem* functions */ ++#include ++ ++/* *printf functions */ ++#include ++ ++/* bcopy, bcmp, and bzero */ ++extern void bcopy(const void *src, void *dst, size_t len); ++extern int bcmp(const void *b1, const void *b2, size_t len); ++extern void bzero(void *b, size_t len); ++#endif /* ! BCMDRIVER */ ++ ++#endif /* _linux_osl_h_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/linuxver.h b/drivers/net/ethernet/broadcom/gmac/src/include/linuxver.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/linuxver.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/linuxver.h 2017-11-09 17:53:43.969290000 +0800 +@@ -0,0 +1,662 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Linux-specific abstractions to gain some independence from linux kernel versions. ++ * Pave over some 2.2 versus 2.4 versus 2.6 kernel differences. ++ * ++ * $Id: linuxver.h 312774 2012-02-03 22:20:14Z $ ++ */ ++ ++#ifndef _linuxver_h_ ++#define _linuxver_h_ ++ ++#include ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) ++#include ++#else ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 33)) ++#include ++#else ++#include ++#endif ++#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */ ++#include ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0)) ++/* __NO_VERSION__ must be defined for all linkables except one in 2.2 */ ++#ifdef __UNDEF_NO_VERSION__ ++#undef __NO_VERSION__ ++#else ++#define __NO_VERSION__ ++#endif ++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 0) */ ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 0) ++#define module_param(_name_, _type_, _perm_) MODULE_PARM(_name_, "i") ++#define module_param_string(_name_, _string_, _size_, _perm_) \ ++ MODULE_PARM(_string_, "c" __MODULE_STRING(_size_)) ++#endif ++ ++/* linux/malloc.h is deprecated, use linux/slab.h instead. */ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 9)) ++#include ++#else ++#include ++#endif ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) ++#include ++#else ++#include ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) */ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)) ++#undef IP_TOS ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 28)) */ ++#include ++ ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41)) ++#include ++#else ++#include ++#ifndef work_struct ++#define work_struct tq_struct ++#endif ++#ifndef INIT_WORK ++#define INIT_WORK(_work, _func, _data) INIT_TQUEUE((_work), (_func), (_data)) ++#endif ++#ifndef schedule_work ++#define schedule_work(_work) schedule_task((_work)) ++#endif ++#ifndef flush_scheduled_work ++#define flush_scheduled_work() flush_scheduled_tasks() ++#endif ++#endif /* LINUX_VERSION_CODE > KERNEL_VERSION(2, 5, 41) */ ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19) ++#define MY_INIT_WORK(_work, _func) INIT_WORK(_work, _func) ++#else ++#define MY_INIT_WORK(_work, _func) INIT_WORK(_work, _func, _work) ++typedef void (*work_func_t)(void *work); ++#endif /* >= 2.6.20 */ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) ++/* Some distributions have their own 2.6.x compatibility layers */ ++#ifndef IRQ_NONE ++typedef void irqreturn_t; ++#define IRQ_NONE ++#define IRQ_HANDLED ++#define IRQ_RETVAL(x) ++#endif ++#else ++typedef irqreturn_t(*FN_ISR) (int irq, void *dev_id, struct pt_regs *ptregs); ++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0) */ ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 18) ++#define IRQF_SHARED SA_SHIRQ ++#endif /* < 2.6.18 */ ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 17) ++#ifdef CONFIG_NET_RADIO ++#define CONFIG_WIRELESS_EXT ++#endif ++#endif /* < 2.6.17 */ ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67) ++#define MOD_INC_USE_COUNT ++#define MOD_DEC_USE_COUNT ++#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 5, 67) */ ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 32) ++#include ++#endif ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) ++#include ++#endif ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 29) ++#include ++#else ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 14) ++#include ++#endif ++#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 30) */ ++ ++#if defined(CONFIG_PCMCIA) || defined(CONFIG_PCMCIA_MODULE) ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27) ++#include ++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 27) */ ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 36) ++#include ++#include ++#endif ++#include ++#include ++#include ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 69)) ++/* In 2.5 (as of 2.5.69 at least) there is a cs_error exported which ++ * does this, but it's not in 2.4 so we do our own for now. ++ */ ++static inline void ++cs_error(client_handle_t handle, int func, int ret) ++{ ++ error_info_t err = { func, ret }; ++ CardServices(ReportError, handle, &err); ++} ++#endif ++ ++#if (LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 16)) ++ ++typedef struct pcmcia_device dev_link_t; ++ ++#endif ++ ++#endif /* CONFIG_PCMCIA */ ++ ++#ifndef __exit ++#define __exit ++#endif ++#ifndef __devexit ++#define __devexit ++#endif ++#ifndef __devinit ++#define __devinit __init ++#endif ++#ifndef __devinitdata ++#define __devinitdata ++#endif ++#ifndef __devexit_p ++#define __devexit_p(x) x ++#endif ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 0)) ++ ++#define pci_get_drvdata(dev) (dev)->sysdata ++#define pci_set_drvdata(dev, value) (dev)->sysdata = (value) ++ ++/* ++ * New-style (2.4.x) PCI/hot-pluggable PCI/CardBus registration ++ */ ++ ++struct pci_device_id { ++ unsigned int vendor, device; /* Vendor and device ID or PCI_ANY_ID */ ++ unsigned int subvendor, subdevice; /* Subsystem ID's or PCI_ANY_ID */ ++ unsigned int class, class_mask; /* (class,subclass,prog-if) triplet */ ++ unsigned long driver_data; /* Data private to the driver */ ++}; ++ ++struct pci_driver { ++ struct list_head node; ++ char *name; ++ const struct pci_device_id *id_table; /* NULL if wants all devices */ ++ int (*probe)(struct pci_dev *dev, ++ const struct pci_device_id *id); /* New device inserted */ ++ void (*remove)(struct pci_dev *dev); /* Device removed (NULL if not a hot-plug ++ * capable driver) ++ */ ++ void (*suspend)(struct pci_dev *dev); /* Device suspended */ ++ void (*resume)(struct pci_dev *dev); /* Device woken up */ ++}; ++ ++#define MODULE_DEVICE_TABLE(type, name) ++#define PCI_ANY_ID (~0) ++ ++/* compatpci.c */ ++#define pci_module_init pci_register_driver ++extern int pci_register_driver(struct pci_driver *drv); ++extern void pci_unregister_driver(struct pci_driver *drv); ++ ++#endif /* PCI registration */ ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 18)) ++#define pci_module_init pci_register_driver ++#endif ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18)) ++#ifdef MODULE ++#define module_init(x) int init_module(void) { return x(); } ++#define module_exit(x) void cleanup_module(void) { x(); } ++#else ++#define module_init(x) __initcall(x); ++#define module_exit(x) __exitcall(x); ++#endif ++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 2, 18) */ ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31) ++#define WL_USE_NETDEV_OPS ++#else ++#undef WL_USE_NETDEV_OPS ++#endif ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) && defined(CONFIG_RFKILL) ++#define WL_CONFIG_RFKILL ++#else ++#undef WL_CONFIG_RFKILL ++#endif ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 48)) ++#define list_for_each(pos, head) \ ++ for (pos = (head)->next; pos != (head); pos = pos->next) ++#endif ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 13)) ++#define pci_resource_start(dev, bar) ((dev)->base_address[(bar)]) ++#elif (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 44)) ++#define pci_resource_start(dev, bar) ((dev)->resource[(bar)].start) ++#endif ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 23)) ++#define pci_enable_device(dev) do { } while (0) ++#endif ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 14)) ++#define net_device device ++#endif ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 42)) ++ ++/* ++ * DMA mapping ++ * ++ * See linux/Documentation/DMA-mapping.txt ++ */ ++ ++#ifndef PCI_DMA_TODEVICE ++#define PCI_DMA_TODEVICE 1 ++#define PCI_DMA_FROMDEVICE 2 ++#endif ++ ++typedef u32 dma_addr_t; ++ ++/* Pure 2^n version of get_order */ ++static inline int get_order(unsigned long size) ++{ ++ int order; ++ ++ size = (size-1) >> (PAGE_SHIFT-1); ++ order = -1; ++ do { ++ size >>= 1; ++ order++; ++ } while (size); ++ return order; ++} ++ ++static inline void *pci_alloc_consistent(struct pci_dev *hwdev, size_t size, ++ dma_addr_t *dma_handle) ++{ ++ void *ret; ++ int gfp = GFP_ATOMIC | GFP_DMA; ++ ++ ret = (void *)__get_free_pages(gfp, get_order(size)); ++ ++ if (ret != NULL) { ++ memset(ret, 0, size); ++ *dma_handle = virt_to_bus(ret); ++ } ++ return ret; ++} ++static inline void pci_free_consistent(struct pci_dev *hwdev, size_t size, ++ void *vaddr, dma_addr_t dma_handle) ++{ ++ free_pages((unsigned long)vaddr, get_order(size)); ++} ++#ifdef ILSIM ++extern uint pci_map_single(void *dev, void *va, uint size, int direction); ++extern void pci_unmap_single(void *dev, uint pa, uint size, int direction); ++#else ++#define pci_map_single(cookie, address, size, dir) virt_to_bus(address) ++#define pci_unmap_single(cookie, address, size, dir) ++#endif ++ ++#endif /* DMA mapping */ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 3, 43)) ++ ++#define dev_kfree_skb_any(a) dev_kfree_skb(a) ++#define netif_down(dev) do { (dev)->start = 0; } while (0) ++ ++/* pcmcia-cs provides its own netdevice compatibility layer */ ++#ifndef _COMPAT_NETDEVICE_H ++ ++/* ++ * SoftNet ++ * ++ * For pre-softnet kernels we need to tell the upper layer not to ++ * re-enter start_xmit() while we are in there. However softnet ++ * guarantees not to enter while we are in there so there is no need ++ * to do the netif_stop_queue() dance unless the transmit queue really ++ * gets stuck. This should also improve performance according to tests ++ * done by Aman Singla. ++ */ ++ ++#define dev_kfree_skb_irq(a) dev_kfree_skb(a) ++#define netif_wake_queue(dev) \ ++ do { clear_bit(0, &(dev)->tbusy); mark_bh(NET_BH); } while (0) ++#define netif_stop_queue(dev) set_bit(0, &(dev)->tbusy) ++ ++static inline void netif_start_queue(struct net_device *dev) ++{ ++ dev->tbusy = 0; ++ dev->interrupt = 0; ++ dev->start = 1; ++} ++ ++#define netif_queue_stopped(dev) (dev)->tbusy ++#define netif_running(dev) (dev)->start ++ ++#endif /* _COMPAT_NETDEVICE_H */ ++ ++#define netif_device_attach(dev) netif_start_queue(dev) ++#define netif_device_detach(dev) netif_stop_queue(dev) ++ ++/* 2.4.x renamed bottom halves to tasklets */ ++#define tasklet_struct tq_struct ++static inline void tasklet_schedule(struct tasklet_struct *tasklet) ++{ ++ queue_task(tasklet, &tq_immediate); ++ mark_bh(IMMEDIATE_BH); ++} ++ ++static inline void tasklet_init(struct tasklet_struct *tasklet, ++ void (*func)(unsigned long), ++ unsigned long data) ++{ ++ tasklet->next = NULL; ++ tasklet->sync = 0; ++ tasklet->routine = (void (*)(void *))func; ++ tasklet->data = (void *)data; ++} ++#define tasklet_kill(tasklet) { do {} while (0); } ++ ++/* 2.4.x introduced del_timer_sync() */ ++#define del_timer_sync(timer) del_timer(timer) ++ ++#else ++ ++#define netif_down(dev) ++ ++#endif /* SoftNet */ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3)) ++ ++/* ++ * Emit code to initialise a tq_struct's routine and data pointers ++ */ ++#define PREPARE_TQUEUE(_tq, _routine, _data) \ ++ do { \ ++ (_tq)->routine = _routine; \ ++ (_tq)->data = _data; \ ++ } while (0) ++ ++/* ++ * Emit code to initialise all of a tq_struct ++ */ ++#define INIT_TQUEUE(_tq, _routine, _data) \ ++ do { \ ++ INIT_LIST_HEAD(&(_tq)->list); \ ++ (_tq)->sync = 0; \ ++ PREPARE_TQUEUE((_tq), (_routine), (_data)); \ ++ } while (0) ++ ++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 3) */ ++ ++/* Power management related macro & routines */ ++#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 9) ++#define PCI_SAVE_STATE(a, b) pci_save_state(a) ++#define PCI_RESTORE_STATE(a, b) pci_restore_state(a) ++#else ++#define PCI_SAVE_STATE(a, b) pci_save_state(a, b) ++#define PCI_RESTORE_STATE(a, b) pci_restore_state(a, b) ++#endif ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 6)) ++static inline int ++pci_save_state(struct pci_dev *dev, u32 *buffer) ++{ ++ int i; ++ if (buffer) { ++ for (i = 0; i < 16; i++) ++ pci_read_config_dword(dev, i * 4, &buffer[i]); ++ } ++ return 0; ++} ++ ++static inline int ++pci_restore_state(struct pci_dev *dev, u32 *buffer) ++{ ++ int i; ++ ++ if (buffer) { ++ for (i = 0; i < 16; i++) ++ pci_write_config_dword(dev, i * 4, buffer[i]); ++ } ++ /* ++ * otherwise, write the context information we know from bootup. ++ * This works around a problem where warm-booting from Windows ++ * combined with a D3(hot)->D0 transition causes PCI config ++ * header data to be forgotten. ++ */ ++ else { ++ for (i = 0; i < 6; i ++) ++ pci_write_config_dword(dev, ++ PCI_BASE_ADDRESS_0 + (i * 4), ++ pci_resource_start(dev, i)); ++ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, dev->irq); ++ } ++ return 0; ++} ++#endif /* PCI power management */ ++ ++/* Old cp0 access macros deprecated in 2.4.19 */ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 4, 19)) ++#define read_c0_count() read_32bit_cp0_register(CP0_COUNT) ++#endif ++ ++/* Module refcount handled internally in 2.6.x */ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)) ++#ifndef SET_MODULE_OWNER ++#define SET_MODULE_OWNER(dev) do {} while (0) ++#define OLD_MOD_INC_USE_COUNT MOD_INC_USE_COUNT ++#define OLD_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT ++#else ++#define OLD_MOD_INC_USE_COUNT do {} while (0) ++#define OLD_MOD_DEC_USE_COUNT do {} while (0) ++#endif ++#else /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) */ ++#ifndef SET_MODULE_OWNER ++#define SET_MODULE_OWNER(dev) do {} while (0) ++#endif ++#ifndef MOD_INC_USE_COUNT ++#define MOD_INC_USE_COUNT do {} while (0) ++#endif ++#ifndef MOD_DEC_USE_COUNT ++#define MOD_DEC_USE_COUNT do {} while (0) ++#endif ++#define OLD_MOD_INC_USE_COUNT MOD_INC_USE_COUNT ++#define OLD_MOD_DEC_USE_COUNT MOD_DEC_USE_COUNT ++#endif /* LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24) */ ++ ++#ifndef SET_NETDEV_DEV ++#define SET_NETDEV_DEV(net, pdev) do {} while (0) ++#endif ++ ++#ifndef HAVE_FREE_NETDEV ++#define free_netdev(dev) kfree(dev) ++#endif ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) ++/* struct packet_type redefined in 2.6.x */ ++#define af_packet_priv data ++#endif ++ ++/* suspend args */ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 11) ++#define DRV_SUSPEND_STATE_TYPE pm_message_t ++#else ++#define DRV_SUSPEND_STATE_TYPE uint32 ++#endif ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19) ++#define CHECKSUM_HW CHECKSUM_PARTIAL ++#endif ++ ++typedef struct { ++ void *parent; /* some external entity that the thread supposed to work for */ ++ struct task_struct *p_task; ++ long thr_pid; ++ int prio; /* priority */ ++ struct semaphore sema; ++ int terminated; ++ struct completion completed; ++} tsk_ctl_t; ++ ++ ++/* requires tsk_ctl_t tsk argument, the caller's priv data is passed in owner ptr */ ++/* note this macro assumes there may be only one context waiting on thread's completion */ ++#ifdef DHD_DEBUG ++#define DBG_THR(x) printk x ++#else ++#define DBG_THR(x) ++#endif ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) ++#define SMP_RD_BARRIER_DEPENDS(x) smp_read_barrier_depends(x) ++#else ++#define SMP_RD_BARRIER_DEPENDS(x) smp_rmb(x) ++#endif ++ ++ ++#define PROC_START(thread_func, owner, tsk_ctl, flags) \ ++{ \ ++ sema_init(&((tsk_ctl)->sema), 0); \ ++ init_completion(&((tsk_ctl)->completed)); \ ++ (tsk_ctl)->parent = owner; \ ++ (tsk_ctl)->terminated = FALSE; \ ++ (tsk_ctl)->thr_pid = kernel_thread(thread_func, tsk_ctl, flags); \ ++ if ((tsk_ctl)->thr_pid > 0) \ ++ wait_for_completion(&((tsk_ctl)->completed)); \ ++ DBG_THR(("%s thr:%lx started\n", __FUNCTION__, (tsk_ctl)->thr_pid)); \ ++} ++ ++#define PROC_STOP(tsk_ctl) \ ++{ \ ++ (tsk_ctl)->terminated = TRUE; \ ++ smp_wmb(); \ ++ up(&((tsk_ctl)->sema)); \ ++ wait_for_completion(&((tsk_ctl)->completed)); \ ++ DBG_THR(("%s thr:%lx terminated OK\n", __FUNCTION__, (tsk_ctl)->thr_pid)); \ ++ (tsk_ctl)->thr_pid = -1; \ ++} ++ ++/* ----------------------- */ ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31)) ++#define KILL_PROC(nr, sig) \ ++{ \ ++struct task_struct *tsk; \ ++struct pid *pid; \ ++pid = find_get_pid((pid_t)nr); \ ++tsk = pid_task(pid, PIDTYPE_PID); \ ++if (tsk) send_sig(sig, tsk, 1); \ ++} ++#else ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 27)) && (LINUX_VERSION_CODE <= \ ++ KERNEL_VERSION(2, 6, 30)) ++#define KILL_PROC(pid, sig) \ ++{ \ ++ struct task_struct *tsk; \ ++ tsk = find_task_by_vpid(pid); \ ++ if (tsk) send_sig(sig, tsk, 1); \ ++} ++#else ++#define KILL_PROC(pid, sig) \ ++{ \ ++ kill_proc(pid, sig, 1); \ ++} ++#endif ++#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 31) */ ++ ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) ++#include ++#include ++#else ++#include ++ ++#define __wait_event_interruptible_timeout(wq, condition, ret) \ ++do { \ ++ wait_queue_t __wait; \ ++ init_waitqueue_entry(&__wait, current); \ ++ \ ++ add_wait_queue(&wq, &__wait); \ ++ for (;;) { \ ++ set_current_state(TASK_INTERRUPTIBLE); \ ++ if (condition) \ ++ break; \ ++ if (!signal_pending(current)) { \ ++ ret = schedule_timeout(ret); \ ++ if (!ret) \ ++ break; \ ++ continue; \ ++ } \ ++ ret = -ERESTARTSYS; \ ++ break; \ ++ } \ ++ current->state = TASK_RUNNING; \ ++ remove_wait_queue(&wq, &__wait); \ ++} while (0) ++ ++#define wait_event_interruptible_timeout(wq, condition, timeout) \ ++({ \ ++ long __ret = timeout; \ ++ if (!(condition)) \ ++ __wait_event_interruptible_timeout(wq, condition, __ret); \ ++ __ret; \ ++}) ++ ++#endif /* (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 0)) */ ++ ++/* ++For < 2.6.24, wl creates its own netdev but doesn't ++align the priv area like the genuine alloc_netdev(). ++Since netdev_priv() always gives us the aligned address, it will ++not match our unaligned address for < 2.6.24 ++*/ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 24)) ++#define DEV_PRIV(dev) (dev->priv) ++#else ++#define DEV_PRIV(dev) netdev_priv(dev) ++#endif ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 20) ++#define WL_ISR(i, d, p) wl_isr((i), (d)) ++#else ++#define WL_ISR(i, d, p) wl_isr((i), (d), (p)) ++#endif /* < 2.6.20 */ ++ ++#if (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) ++#define netdev_priv(dev) dev->priv ++#endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 0)) */ ++ ++#endif /* _linuxver_h_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/osl.h b/drivers/net/ethernet/broadcom/gmac/src/include/osl.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/osl.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/osl.h 2017-11-09 17:53:43.969308000 +0800 +@@ -0,0 +1,143 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * OS Abstraction Layer ++ * ++ * $Id: osl.h 321101 2012-03-14 02:53:01Z $ ++ */ ++ ++#ifndef _osl_h_ ++#define _osl_h_ ++ ++/* osl handle type forward declaration */ ++typedef struct osl_info osl_t; ++typedef struct osl_dmainfo osldma_t; ++ ++#define OSL_PKTTAG_SZ 32 /* Size of PktTag */ ++ ++/* Drivers use PKTFREESETCB to register a callback function when a packet is freed by OSL */ ++typedef void (*pktfree_cb_fn_t)(void *ctx, void *pkt, unsigned int status); ++ ++/* Drivers use REGOPSSET() to register register read/write funcitons */ ++typedef unsigned int (*osl_rreg_fn_t)(void *ctx, volatile void *reg, unsigned int size); ++typedef void (*osl_wreg_fn_t)(void *ctx, volatile void *reg, unsigned int val, unsigned int size); ++ ++#ifdef __mips__ ++#define PREF_LOAD 0 ++#define PREF_STORE 1 ++#define PREF_LOAD_STREAMED 4 ++#define PREF_STORE_STREAMED 5 ++#define PREF_LOAD_RETAINED 6 ++#define PREF_STORE_RETAINED 7 ++#define PREF_WBACK_INV 25 ++#define PREF_PREPARE4STORE 30 ++ ++ ++#define MAKE_PREFETCH_FN(hint) \ ++static inline void prefetch_##hint(const void *addr) \ ++{ \ ++ __asm__ __volatile__(\ ++ " .set mips4 \n" \ ++ " pref %0, (%1) \n" \ ++ " .set mips0 \n" \ ++ : \ ++ : "i" (hint), "r" (addr)); \ ++} ++ ++#define MAKE_PREFETCH_RANGE_FN(hint) \ ++static inline void prefetch_range_##hint(const void *addr, int len) \ ++{ \ ++ int size = len; \ ++ while (size > 0) { \ ++ prefetch_##hint(addr); \ ++ size -= 32; \ ++ } \ ++} ++ ++MAKE_PREFETCH_FN(PREF_LOAD) ++MAKE_PREFETCH_RANGE_FN(PREF_LOAD) ++MAKE_PREFETCH_FN(PREF_STORE) ++MAKE_PREFETCH_RANGE_FN(PREF_STORE) ++MAKE_PREFETCH_FN(PREF_LOAD_STREAMED) ++MAKE_PREFETCH_RANGE_FN(PREF_LOAD_STREAMED) ++MAKE_PREFETCH_FN(PREF_STORE_STREAMED) ++MAKE_PREFETCH_RANGE_FN(PREF_STORE_STREAMED) ++MAKE_PREFETCH_FN(PREF_LOAD_RETAINED) ++MAKE_PREFETCH_RANGE_FN(PREF_LOAD_RETAINED) ++MAKE_PREFETCH_FN(PREF_STORE_RETAINED) ++MAKE_PREFETCH_RANGE_FN(PREF_STORE_RETAINED) ++#endif /* __mips__ */ ++ ++#if defined(linux) ++#include ++#else ++#error "Unsupported OSL requested" ++#endif ++ ++#ifndef PKTDBG_TRACE ++#define PKTDBG_TRACE(osh, pkt, bit) ++#endif ++ ++#ifndef PKTCTFMAP ++#define PKTCTFMAP(osh, p) ++#endif /* PKTCTFMAP */ ++ ++/* -------------------------------------------------------------------------- ++** Register manipulation macros. ++*/ ++ ++#define SET_REG(osh, r, mask, val) W_REG((osh), (r), ((R_REG((osh), r) & ~(mask)) | (val))) ++ ++#ifndef AND_REG ++#define AND_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) & (v)) ++#endif /* !AND_REG */ ++ ++#ifndef OR_REG ++#define OR_REG(osh, r, v) W_REG(osh, (r), R_REG(osh, r) | (v)) ++#endif /* !OR_REG */ ++ ++#if !defined(OSL_SYSUPTIME) ++#define OSL_SYSUPTIME() (0) ++#define OSL_SYSUPTIME_SUPPORT FALSE ++#else ++#define OSL_SYSUPTIME_SUPPORT TRUE ++#endif /* OSL_SYSUPTIME */ ++ ++#define PKTCGETATTR(s) (0) ++#define PKTCSETATTR(skb, f, p, b) ++#define PKTCCLRATTR(skb) ++#define PKTCCNT(skb) (1) ++#define PKTCLEN(skb) PKTLEN(NULL, skb) ++#define PKTCGETFLAGS(skb) (0) ++#define PKTCSETFLAGS(skb, f) ++#define PKTCCLRFLAGS(skb) ++#define PKTCFLAGS(skb) (0) ++#define PKTCSETCNT(skb, c) ++#define PKTCINCRCNT(skb) ++#define PKTCADDCNT(skb, c) ++#define PKTCSETLEN(skb, l) ++#define PKTCADDLEN(skb, l) ++#define PKTCSETFLAG(skb, fb) ++#define PKTCCLRFLAG(skb, fb) ++#define PKTCLINK(skb) NULL ++#define PKTSETCLINK(skb, x) ++#undef PKTISCHAINED ++#define PKTISCHAINED(skb) FALSE ++#define FOREACH_CHAINED_PKT(skb, nskb) \ ++ for ((nskb) = NULL; (skb) != NULL; (skb) = (nskb)) ++#define PKTCFREE PKTFREE ++ ++ ++#endif /* _osl_h_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/packed_section_end.h b/drivers/net/ethernet/broadcom/gmac/src/include/packed_section_end.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/packed_section_end.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/packed_section_end.h 2017-11-09 17:53:43.970305000 +0800 +@@ -0,0 +1,71 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Declare directives for structure packing. No padding will be provided ++ * between the members of packed structures, and therefore, there is no ++ * guarantee that structure members will be aligned. ++ * ++ * Declaring packed structures is compiler specific. In order to handle all ++ * cases, packed structures should be delared as: ++ * ++ * #include ++ * ++ * typedef BWL_PRE_PACKED_STRUCT struct foobar_t { ++ * some_struct_members; ++ * } BWL_POST_PACKED_STRUCT foobar_t; ++ * ++ * #include ++ * ++ * ++ * $Id: packed_section_end.h 241182 2011-02-17 21:50:03Z $ ++ */ ++ ++ ++/* Error check - BWL_PACKED_SECTION is defined in packed_section_start.h ++ * and undefined in packed_section_end.h. If it is NOT defined at this ++ * point, then there is a missing include of packed_section_start.h. ++ */ ++#ifdef BWL_PACKED_SECTION ++ #undef BWL_PACKED_SECTION ++#else ++ #error "BWL_PACKED_SECTION is NOT defined!" ++#endif ++ ++ ++#if defined(_MSC_VER) ++ /* Disable compiler warning about pragma pack changing alignment. */ ++ #pragma warning(disable:4103) ++ ++ /* The Microsoft compiler uses pragmas for structure packing. Other ++ * compilers use structure attribute modifiers. Refer to ++ * BWL_PRE_PACKED_STRUCT and BWL_POST_PACKED_STRUCT defined in ++ * typedefs.h ++ */ ++ #if defined(BWL_DEFAULT_PACKING) ++ /* require default structure packing */ ++ #pragma pack(pop) ++ #undef BWL_DEFAULT_PACKING ++ #else /* BWL_PACKED_SECTION */ ++ #pragma pack() ++ #endif /* BWL_PACKED_SECTION */ ++#endif /* _MSC_VER */ ++ ++ ++/* Compiler-specific directives for structure packing are declared in ++ * packed_section_start.h. This marks the end of the structure packing section, ++ * so, undef them here. ++ */ ++#undef BWL_PRE_PACKED_STRUCT ++#undef BWL_POST_PACKED_STRUCT +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/packed_section_start.h b/drivers/net/ethernet/broadcom/gmac/src/include/packed_section_start.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/packed_section_start.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/packed_section_start.h 2017-11-09 17:53:43.971302000 +0800 +@@ -0,0 +1,76 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Declare directives for structure packing. No padding will be provided ++ * between the members of packed structures, and therefore, there is no ++ * guarantee that structure members will be aligned. ++ * ++ * Declaring packed structures is compiler specific. In order to handle all ++ * cases, packed structures should be delared as: ++ * ++ * #include ++ * ++ * typedef BWL_PRE_PACKED_STRUCT struct foobar_t { ++ * some_struct_members; ++ * } BWL_POST_PACKED_STRUCT foobar_t; ++ * ++ * #include ++ * ++ * ++ * $Id: packed_section_start.h 286783 2011-09-29 06:18:57Z $ ++ */ ++ ++ ++/* Error check - BWL_PACKED_SECTION is defined in packed_section_start.h ++ * and undefined in packed_section_end.h. If it is already defined at this ++ * point, then there is a missing include of packed_section_end.h. ++ */ ++#ifdef BWL_PACKED_SECTION ++ #error "BWL_PACKED_SECTION is already defined!" ++#else ++ #define BWL_PACKED_SECTION ++#endif ++ ++ ++#if defined(_MSC_VER) ++ /* Disable compiler warning about pragma pack changing alignment. */ ++ #pragma warning(disable:4103) ++ ++ /* The Microsoft compiler uses pragmas for structure packing. Other ++ * compilers use structure attribute modifiers. Refer to ++ * BWL_PRE_PACKED_STRUCT and BWL_POST_PACKED_STRUCT defined below. ++ */ ++ #if defined(BWL_DEFAULT_PACKING) ++ /* Default structure packing */ ++ #pragma pack(push, 8) ++ #else /* BWL_PACKED_SECTION */ ++ #pragma pack(1) ++ #endif /* BWL_PACKED_SECTION */ ++#endif /* _MSC_VER */ ++ ++ ++/* Declare compiler-specific directives for structure packing. */ ++#if defined(_MSC_VER) ++ #define BWL_PRE_PACKED_STRUCT ++ #define BWL_POST_PACKED_STRUCT ++#elif defined(__GNUC__) || defined(__lint) ++ #define BWL_PRE_PACKED_STRUCT ++ #define BWL_POST_PACKED_STRUCT __attribute__ ((packed)) ++#elif defined(__CC_ARM) ++ #define BWL_PRE_PACKED_STRUCT __packed ++ #define BWL_POST_PACKED_STRUCT ++#else ++ #error "Unknown compiler!" ++#endif +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/pcicfg.h b/drivers/net/ethernet/broadcom/gmac/src/include/pcicfg.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/pcicfg.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/pcicfg.h 2017-11-09 17:53:43.972306000 +0800 +@@ -0,0 +1,569 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * pcicfg.h: PCI configuration constants and structures. ++ * ++ * $Id: pcicfg.h 316716 2012-02-23 04:39:13Z $ ++ */ ++ ++#ifndef _h_pcicfg_ ++#define _h_pcicfg_ ++ ++#ifndef LINUX_POSTMOGRIFY_REMOVAL ++/* The following inside ifndef's so we don't collide with NTDDK.H */ ++#ifndef PCI_MAX_BUS ++#define PCI_MAX_BUS 0x100 ++#endif ++#ifndef PCI_MAX_DEVICES ++#define PCI_MAX_DEVICES 0x20 ++#endif ++#ifndef PCI_MAX_FUNCTION ++#define PCI_MAX_FUNCTION 0x8 ++#endif ++ ++#ifndef PCI_INVALID_VENDORID ++#define PCI_INVALID_VENDORID 0xffff ++#endif ++#ifndef PCI_INVALID_DEVICEID ++#define PCI_INVALID_DEVICEID 0xffff ++#endif ++ ++ ++/* Convert between bus-slot-function-register and config addresses */ ++ ++#define PCICFG_BUS_SHIFT 16 /* Bus shift */ ++#define PCICFG_SLOT_SHIFT 11 /* Slot shift */ ++#define PCICFG_FUN_SHIFT 8 /* Function shift */ ++#define PCICFG_OFF_SHIFT 0 /* Register shift */ ++ ++#define PCICFG_BUS_MASK 0xff /* Bus mask */ ++#define PCICFG_SLOT_MASK 0x1f /* Slot mask */ ++#define PCICFG_FUN_MASK 7 /* Function mask */ ++#define PCICFG_OFF_MASK 0xff /* Bus mask */ ++ ++#define PCI_CONFIG_ADDR(b, s, f, o) \ ++ ((((b) & PCICFG_BUS_MASK) << PCICFG_BUS_SHIFT) \ ++ | (((s) & PCICFG_SLOT_MASK) << PCICFG_SLOT_SHIFT) \ ++ | (((f) & PCICFG_FUN_MASK) << PCICFG_FUN_SHIFT) \ ++ | (((o) & PCICFG_OFF_MASK) << PCICFG_OFF_SHIFT)) ++ ++#define PCI_CONFIG_BUS(a) (((a) >> PCICFG_BUS_SHIFT) & PCICFG_BUS_MASK) ++#define PCI_CONFIG_SLOT(a) (((a) >> PCICFG_SLOT_SHIFT) & PCICFG_SLOT_MASK) ++#define PCI_CONFIG_FUN(a) (((a) >> PCICFG_FUN_SHIFT) & PCICFG_FUN_MASK) ++#define PCI_CONFIG_OFF(a) (((a) >> PCICFG_OFF_SHIFT) & PCICFG_OFF_MASK) ++ ++/* PCIE Config space accessing MACROS */ ++ ++#define PCIECFG_BUS_SHIFT 24 /* Bus shift */ ++#define PCIECFG_SLOT_SHIFT 19 /* Slot/Device shift */ ++#define PCIECFG_FUN_SHIFT 16 /* Function shift */ ++#define PCIECFG_OFF_SHIFT 0 /* Register shift */ ++ ++#define PCIECFG_BUS_MASK 0xff /* Bus mask */ ++#define PCIECFG_SLOT_MASK 0x1f /* Slot/Device mask */ ++#define PCIECFG_FUN_MASK 7 /* Function mask */ ++#define PCIECFG_OFF_MASK 0xfff /* Register mask */ ++ ++#define PCIE_CONFIG_ADDR(b, s, f, o) \ ++ ((((b) & PCIECFG_BUS_MASK) << PCIECFG_BUS_SHIFT) \ ++ | (((s) & PCIECFG_SLOT_MASK) << PCIECFG_SLOT_SHIFT) \ ++ | (((f) & PCIECFG_FUN_MASK) << PCIECFG_FUN_SHIFT) \ ++ | (((o) & PCIECFG_OFF_MASK) << PCIECFG_OFF_SHIFT)) ++ ++#define PCIE_CONFIG_BUS(a) (((a) >> PCIECFG_BUS_SHIFT) & PCIECFG_BUS_MASK) ++#define PCIE_CONFIG_SLOT(a) (((a) >> PCIECFG_SLOT_SHIFT) & PCIECFG_SLOT_MASK) ++#define PCIE_CONFIG_FUN(a) (((a) >> PCIECFG_FUN_SHIFT) & PCIECFG_FUN_MASK) ++#define PCIE_CONFIG_OFF(a) (((a) >> PCIECFG_OFF_SHIFT) & PCIECFG_OFF_MASK) ++ ++/* The actual config space */ ++ ++#define PCI_BAR_MAX 6 ++ ++#define PCI_ROM_BAR 8 ++ ++#define PCR_RSVDA_MAX 2 ++ ++/* Bits in PCI bars' flags */ ++ ++#define PCIBAR_FLAGS 0xf ++#define PCIBAR_IO 0x1 ++#define PCIBAR_MEM1M 0x2 ++#define PCIBAR_MEM64 0x4 ++#define PCIBAR_PREFETCH 0x8 ++#define PCIBAR_MEM32_MASK 0xFFFFFF80 ++ ++/* pci config status reg has a bit to indicate that capability ptr is present */ ++ ++#define PCI_CAPPTR_PRESENT 0x0010 ++ ++typedef struct _pci_config_regs { ++ uint16 vendor; ++ uint16 device; ++ uint16 command; ++ uint16 status; ++ uint8 rev_id; ++ uint8 prog_if; ++ uint8 sub_class; ++ uint8 base_class; ++ uint8 cache_line_size; ++ uint8 latency_timer; ++ uint8 header_type; ++ uint8 bist; ++ uint32 base[PCI_BAR_MAX]; ++ uint32 cardbus_cis; ++ uint16 subsys_vendor; ++ uint16 subsys_id; ++ uint32 baserom; ++ uint32 rsvd_a[PCR_RSVDA_MAX]; ++ uint8 int_line; ++ uint8 int_pin; ++ uint8 min_gnt; ++ uint8 max_lat; ++ uint8 dev_dep[192]; ++} pci_config_regs; ++ ++#define SZPCR (sizeof (pci_config_regs)) ++#define MINSZPCR 64 /* offsetof (dev_dep[0] */ ++ ++#endif /* !LINUX_POSTMOGRIFY_REMOVAL */ ++/* A structure for the config registers is nice, but in most ++ * systems the config space is not memory mapped, so we need ++ * field offsetts. :-( ++ */ ++#define PCI_CFG_VID 0 ++#define PCI_CFG_DID 2 ++#define PCI_CFG_CMD 4 ++#define PCI_CFG_STAT 6 ++#define PCI_CFG_REV 8 ++#define PCI_CFG_PROGIF 9 ++#define PCI_CFG_SUBCL 0xa ++#define PCI_CFG_BASECL 0xb ++#define PCI_CFG_CLSZ 0xc ++#define PCI_CFG_LATTIM 0xd ++#define PCI_CFG_HDR 0xe ++#define PCI_CFG_BIST 0xf ++#define PCI_CFG_BAR0 0x10 ++#define PCI_CFG_BAR1 0x14 ++#define PCI_CFG_BAR2 0x18 ++#define PCI_CFG_BAR3 0x1c ++#define PCI_CFG_BAR4 0x20 ++#define PCI_CFG_BAR5 0x24 ++#define PCI_CFG_CIS 0x28 ++#define PCI_CFG_SVID 0x2c ++#define PCI_CFG_SSID 0x2e ++#define PCI_CFG_ROMBAR 0x30 ++#define PCI_CFG_CAPPTR 0x34 ++#define PCI_CFG_INT 0x3c ++#define PCI_CFG_PIN 0x3d ++#define PCI_CFG_MINGNT 0x3e ++#define PCI_CFG_MAXLAT 0x3f ++#define PCI_CFG_DEVCTRL 0xd8 ++#ifndef LINUX_POSTMOGRIFY_REMOVAL ++ ++#ifdef __NetBSD__ ++#undef PCI_CLASS_DISPLAY ++#undef PCI_CLASS_MEMORY ++#undef PCI_CLASS_BRIDGE ++#undef PCI_CLASS_INPUT ++#undef PCI_CLASS_DOCK ++#endif /* __NetBSD__ */ ++ ++#ifdef EFI ++#undef PCI_CLASS_BRIDGE ++#undef PCI_CLASS_OLD ++#undef PCI_CLASS_DISPLAY ++#undef PCI_CLASS_SERIAL ++#undef PCI_CLASS_SATELLITE ++#endif /* EFI */ ++ ++/* Classes and subclasses */ ++ ++typedef enum { ++ PCI_CLASS_OLD = 0, ++ PCI_CLASS_DASDI, ++ PCI_CLASS_NET, ++ PCI_CLASS_DISPLAY, ++ PCI_CLASS_MMEDIA, ++ PCI_CLASS_MEMORY, ++ PCI_CLASS_BRIDGE, ++ PCI_CLASS_COMM, ++ PCI_CLASS_BASE, ++ PCI_CLASS_INPUT, ++ PCI_CLASS_DOCK, ++ PCI_CLASS_CPU, ++ PCI_CLASS_SERIAL, ++ PCI_CLASS_INTELLIGENT = 0xe, ++ PCI_CLASS_SATELLITE, ++ PCI_CLASS_CRYPT, ++ PCI_CLASS_DSP, ++ PCI_CLASS_XOR = 0xfe ++} pci_classes; ++ ++typedef enum { ++ PCI_DASDI_SCSI, ++ PCI_DASDI_IDE, ++ PCI_DASDI_FLOPPY, ++ PCI_DASDI_IPI, ++ PCI_DASDI_RAID, ++ PCI_DASDI_OTHER = 0x80 ++} pci_dasdi_subclasses; ++ ++typedef enum { ++ PCI_NET_ETHER, ++ PCI_NET_TOKEN, ++ PCI_NET_FDDI, ++ PCI_NET_ATM, ++ PCI_NET_OTHER = 0x80 ++} pci_net_subclasses; ++ ++typedef enum { ++ PCI_DISPLAY_VGA, ++ PCI_DISPLAY_XGA, ++ PCI_DISPLAY_3D, ++ PCI_DISPLAY_OTHER = 0x80 ++} pci_display_subclasses; ++ ++typedef enum { ++ PCI_MMEDIA_VIDEO, ++ PCI_MMEDIA_AUDIO, ++ PCI_MMEDIA_PHONE, ++ PCI_MEDIA_OTHER = 0x80 ++} pci_mmedia_subclasses; ++ ++typedef enum { ++ PCI_MEMORY_RAM, ++ PCI_MEMORY_FLASH, ++ PCI_MEMORY_OTHER = 0x80 ++} pci_memory_subclasses; ++ ++typedef enum { ++ PCI_BRIDGE_HOST, ++ PCI_BRIDGE_ISA, ++ PCI_BRIDGE_EISA, ++ PCI_BRIDGE_MC, ++ PCI_BRIDGE_PCI, ++ PCI_BRIDGE_PCMCIA, ++ PCI_BRIDGE_NUBUS, ++ PCI_BRIDGE_CARDBUS, ++ PCI_BRIDGE_RACEWAY, ++ PCI_BRIDGE_OTHER = 0x80 ++} pci_bridge_subclasses; ++ ++typedef enum { ++ PCI_COMM_UART, ++ PCI_COMM_PARALLEL, ++ PCI_COMM_MULTIUART, ++ PCI_COMM_MODEM, ++ PCI_COMM_OTHER = 0x80 ++} pci_comm_subclasses; ++ ++typedef enum { ++ PCI_BASE_PIC, ++ PCI_BASE_DMA, ++ PCI_BASE_TIMER, ++ PCI_BASE_RTC, ++ PCI_BASE_PCI_HOTPLUG, ++ PCI_BASE_OTHER = 0x80 ++} pci_base_subclasses; ++ ++typedef enum { ++ PCI_INPUT_KBD, ++ PCI_INPUT_PEN, ++ PCI_INPUT_MOUSE, ++ PCI_INPUT_SCANNER, ++ PCI_INPUT_GAMEPORT, ++ PCI_INPUT_OTHER = 0x80 ++} pci_input_subclasses; ++ ++typedef enum { ++ PCI_DOCK_GENERIC, ++ PCI_DOCK_OTHER = 0x80 ++} pci_dock_subclasses; ++ ++typedef enum { ++ PCI_CPU_386, ++ PCI_CPU_486, ++ PCI_CPU_PENTIUM, ++ PCI_CPU_ALPHA = 0x10, ++ PCI_CPU_POWERPC = 0x20, ++ PCI_CPU_MIPS = 0x30, ++ PCI_CPU_COPROC = 0x40, ++ PCI_CPU_OTHER = 0x80 ++} pci_cpu_subclasses; ++ ++typedef enum { ++ PCI_SERIAL_IEEE1394, ++ PCI_SERIAL_ACCESS, ++ PCI_SERIAL_SSA, ++ PCI_SERIAL_USB, ++ PCI_SERIAL_FIBER, ++ PCI_SERIAL_SMBUS, ++ PCI_SERIAL_OTHER = 0x80 ++} pci_serial_subclasses; ++ ++typedef enum { ++ PCI_INTELLIGENT_I2O ++} pci_intelligent_subclasses; ++ ++typedef enum { ++ PCI_SATELLITE_TV, ++ PCI_SATELLITE_AUDIO, ++ PCI_SATELLITE_VOICE, ++ PCI_SATELLITE_DATA, ++ PCI_SATELLITE_OTHER = 0x80 ++} pci_satellite_subclasses; ++ ++typedef enum { ++ PCI_CRYPT_NETWORK, ++ PCI_CRYPT_ENTERTAINMENT, ++ PCI_CRYPT_OTHER = 0x80 ++} pci_crypt_subclasses; ++ ++typedef enum { ++ PCI_DSP_DPIO, ++ PCI_DSP_OTHER = 0x80 ++} pci_dsp_subclasses; ++ ++typedef enum { ++ PCI_XOR_QDMA, ++ PCI_XOR_OTHER = 0x80 ++} pci_xor_subclasses; ++ ++/* Header types */ ++#define PCI_HEADER_MULTI 0x80 ++#define PCI_HEADER_MASK 0x7f ++typedef enum { ++ PCI_HEADER_NORMAL, ++ PCI_HEADER_BRIDGE, ++ PCI_HEADER_CARDBUS ++} pci_header_types; ++ ++ ++/* Overlay for a PCI-to-PCI bridge */ ++ ++#define PPB_RSVDA_MAX 2 ++#define PPB_RSVDD_MAX 8 ++ ++typedef struct _ppb_config_regs { ++ uint16 vendor; ++ uint16 device; ++ uint16 command; ++ uint16 status; ++ uint8 rev_id; ++ uint8 prog_if; ++ uint8 sub_class; ++ uint8 base_class; ++ uint8 cache_line_size; ++ uint8 latency_timer; ++ uint8 header_type; ++ uint8 bist; ++ uint32 rsvd_a[PPB_RSVDA_MAX]; ++ uint8 prim_bus; ++ uint8 sec_bus; ++ uint8 sub_bus; ++ uint8 sec_lat; ++ uint8 io_base; ++ uint8 io_lim; ++ uint16 sec_status; ++ uint16 mem_base; ++ uint16 mem_lim; ++ uint16 pf_mem_base; ++ uint16 pf_mem_lim; ++ uint32 pf_mem_base_hi; ++ uint32 pf_mem_lim_hi; ++ uint16 io_base_hi; ++ uint16 io_lim_hi; ++ uint16 subsys_vendor; ++ uint16 subsys_id; ++ uint32 rsvd_b; ++ uint8 rsvd_c; ++ uint8 int_pin; ++ uint16 bridge_ctrl; ++ uint8 chip_ctrl; ++ uint8 diag_ctrl; ++ uint16 arb_ctrl; ++ uint32 rsvd_d[PPB_RSVDD_MAX]; ++ uint8 dev_dep[192]; ++} ppb_config_regs; ++ ++ ++/* PCI CAPABILITY DEFINES */ ++#define PCI_CAP_POWERMGMTCAP_ID 0x01 ++#define PCI_CAP_MSICAP_ID 0x05 ++#define PCI_CAP_VENDSPEC_ID 0x09 ++#define PCI_CAP_PCIECAP_ID 0x10 ++ ++/* Data structure to define the Message Signalled Interrupt facility ++ * Valid for PCI and PCIE configurations ++ */ ++typedef struct _pciconfig_cap_msi { ++ uint8 capID; ++ uint8 nextptr; ++ uint16 msgctrl; ++ uint32 msgaddr; ++} pciconfig_cap_msi; ++ ++/* Data structure to define the Power managment facility ++ * Valid for PCI and PCIE configurations ++ */ ++typedef struct _pciconfig_cap_pwrmgmt { ++ uint8 capID; ++ uint8 nextptr; ++ uint16 pme_cap; ++ uint16 pme_sts_ctrl; ++ uint8 pme_bridge_ext; ++ uint8 data; ++} pciconfig_cap_pwrmgmt; ++ ++#define PME_CAP_PM_STATES (0x1f << 27) /* Bits 31:27 states that can generate PME */ ++#define PME_CSR_OFFSET 0x4 /* 4-bytes offset */ ++#define PME_CSR_PME_EN (1 << 8) /* Bit 8 Enable generating of PME */ ++#define PME_CSR_PME_STAT (1 << 15) /* Bit 15 PME got asserted */ ++ ++/* Data structure to define the PCIE capability */ ++typedef struct _pciconfig_cap_pcie { ++ uint8 capID; ++ uint8 nextptr; ++ uint16 pcie_cap; ++ uint32 dev_cap; ++ uint16 dev_ctrl; ++ uint16 dev_status; ++ uint32 link_cap; ++ uint16 link_ctrl; ++ uint16 link_status; ++ uint32 slot_cap; ++ uint16 slot_ctrl; ++ uint16 slot_status; ++ uint16 root_ctrl; ++ uint16 root_cap; ++ uint32 root_status; ++} pciconfig_cap_pcie; ++ ++/* PCIE Enhanced CAPABILITY DEFINES */ ++#define PCIE_EXTCFG_OFFSET 0x100 ++#define PCIE_ADVERRREP_CAPID 0x0001 ++#define PCIE_VC_CAPID 0x0002 ++#define PCIE_DEVSNUM_CAPID 0x0003 ++#define PCIE_PWRBUDGET_CAPID 0x0004 ++ ++/* PCIE Extended configuration */ ++#define PCIE_ADV_CORR_ERR_MASK 0x114 ++#define CORR_ERR_RE (1 << 0) /* Receiver */ ++#define CORR_ERR_BT (1 << 6) /* Bad TLP */ ++#define CORR_ERR_BD (1 << 7) /* Bad DLLP */ ++#define CORR_ERR_RR (1 << 8) /* REPLAY_NUM rollover */ ++#define CORR_ERR_RT (1 << 12) /* Reply timer timeout */ ++#define ALL_CORR_ERRORS (CORR_ERR_RE | CORR_ERR_BT | CORR_ERR_BD | \ ++ CORR_ERR_RR | CORR_ERR_RT) ++ ++/* PCIE Root Control Register bits (Host mode only) */ ++#define PCIE_RC_CORR_SERR_EN 0x0001 ++#define PCIE_RC_NONFATAL_SERR_EN 0x0002 ++#define PCIE_RC_FATAL_SERR_EN 0x0004 ++#define PCIE_RC_PME_INT_EN 0x0008 ++#define PCIE_RC_CRS_EN 0x0010 ++ ++/* PCIE Root Capability Register bits (Host mode only) */ ++#define PCIE_RC_CRS_VISIBILITY 0x0001 ++ ++/* Header to define the PCIE specific capabilities in the extended config space */ ++typedef struct _pcie_enhanced_caphdr { ++ uint16 capID; ++ uint16 cap_ver : 4; ++ uint16 next_ptr : 12; ++} pcie_enhanced_caphdr; ++ ++ ++/* Everything below is BRCM HND proprietary */ ++ ++ ++/* Brcm PCI configuration registers */ ++#define cap_list rsvd_a[0] ++#define bar0_window dev_dep[0x80 - 0x40] ++#define bar1_window dev_dep[0x84 - 0x40] ++#define sprom_control dev_dep[0x88 - 0x40] ++#endif /* LINUX_POSTMOGRIFY_REMOVAL */ ++#define PCI_BAR0_WIN 0x80 /* backplane addres space accessed by BAR0 */ ++#define PCI_BAR1_WIN 0x84 /* backplane addres space accessed by BAR1 */ ++#define PCI_SPROM_CONTROL 0x88 /* sprom property control */ ++#define PCI_BAR1_CONTROL 0x8c /* BAR1 region burst control */ ++#define PCI_INT_STATUS 0x90 /* PCI and other cores interrupts */ ++#define PCI_INT_MASK 0x94 /* mask of PCI and other cores interrupts */ ++#define PCI_TO_SB_MB 0x98 /* signal backplane interrupts */ ++#define PCI_BACKPLANE_ADDR 0xa0 /* address an arbitrary location on the system backplane */ ++#define PCI_BACKPLANE_DATA 0xa4 /* data at the location specified by above address */ ++#define PCI_CLK_CTL_ST 0xa8 /* pci config space clock control/status (>=rev14) */ ++#define PCI_BAR0_WIN2 0xac /* backplane addres space accessed by second 4KB of BAR0 */ ++#define PCI_GPIO_IN 0xb0 /* pci config space gpio input (>=rev3) */ ++#define PCI_GPIO_OUT 0xb4 /* pci config space gpio output (>=rev3) */ ++#define PCI_GPIO_OUTEN 0xb8 /* pci config space gpio output enable (>=rev3) */ ++ ++#define PCI_BAR0_SHADOW_OFFSET (2 * 1024) /* bar0 + 2K accesses sprom shadow (in pci core) */ ++#define PCI_BAR0_SPROM_OFFSET (4 * 1024) /* bar0 + 4K accesses external sprom */ ++#define PCI_BAR0_PCIREGS_OFFSET (6 * 1024) /* bar0 + 6K accesses pci core registers */ ++#define PCI_BAR0_PCISBR_OFFSET (4 * 1024) /* pci core SB registers are at the end of the ++ * 8KB window, so their address is the "regular" ++ * address plus 4K ++ */ ++/* ++ * PCIE GEN2 changed some of the above locations for ++ * Bar0WrapperBase, SecondaryBAR0Window and SecondaryBAR0WrapperBase ++ * BAR0 maps 32K of register space ++*/ ++#define PCIE2_BAR0_WIN2 0x70 /* backplane addres space accessed by second 4KB of BAR0 */ ++#define PCIE2_BAR0_CORE2_WIN 0x74 /* backplane addres space accessed by second 4KB of BAR0 */ ++#define PCIE2_BAR0_CORE2_WIN2 0x78 /* backplane addres space accessed by second 4KB of BAR0 */ ++ ++#define PCI_BAR0_WINSZ (16 * 1024) /* bar0 window size Match with corerev 13 */ ++/* On pci corerev >= 13 and all pcie, the bar0 is now 16KB and it maps: */ ++#define PCI_16KB0_PCIREGS_OFFSET (8 * 1024) /* bar0 + 8K accesses pci/pcie core registers */ ++#define PCI_16KB0_CCREGS_OFFSET (12 * 1024) /* bar0 + 12K accesses chipc core registers */ ++#define PCI_16KBB0_WINSZ (16 * 1024) /* bar0 window size */ ++ ++#ifndef LINUX_POSTMOGRIFY_REMOVAL ++/* On AI chips we have a second window to map DMP regs are mapped: */ ++#define PCI_16KB0_WIN2_OFFSET (4 * 1024) /* bar0 + 4K is "Window 2" */ ++ ++/* PCI_INT_STATUS */ ++#define PCI_SBIM_STATUS_SERR 0x4 /* backplane SBErr interrupt status */ ++ ++/* PCI_INT_MASK */ ++#define PCI_SBIM_SHIFT 8 /* backplane core interrupt mask bits offset */ ++#define PCI_SBIM_MASK 0xff00 /* backplane core interrupt mask */ ++#define PCI_SBIM_MASK_SERR 0x4 /* backplane SBErr interrupt mask */ ++ ++#ifndef LINUX_POSTMOGRIFY_REMOVAL ++/* PCI_SPROM_CONTROL */ ++#define SPROM_SZ_MSK 0x02 /* SPROM Size Mask */ ++#define SPROM_LOCKED 0x08 /* SPROM Locked */ ++#define SPROM_BLANK 0x04 /* indicating a blank SPROM */ ++#define SPROM_WRITEEN 0x10 /* SPROM write enable */ ++#define SPROM_BOOTROM_WE 0x20 /* external bootrom write enable */ ++#define SPROM_BACKPLANE_EN 0x40 /* Enable indirect backplane access */ ++#define SPROM_OTPIN_USE 0x80 /* device OTP In use */ ++#endif /* LINUX_POSTMOGRIFY_REMOVAL */ ++ ++/* Bits in PCI command and status regs */ ++#define PCI_CMD_IO 0x00000001 /* I/O enable */ ++#define PCI_CMD_MEMORY 0x00000002 /* Memory enable */ ++#define PCI_CMD_MASTER 0x00000004 /* Master enable */ ++#define PCI_CMD_SPECIAL 0x00000008 /* Special cycles enable */ ++#define PCI_CMD_INVALIDATE 0x00000010 /* Invalidate? */ ++#define PCI_CMD_VGA_PAL 0x00000040 /* VGA Palate */ ++#define PCI_STAT_TA 0x08000000 /* target abort status */ ++#endif /* LINUX_POSTMOGRIFY_REMOVAL */ ++ ++#define PCI_CONFIG_SPACE_SIZE 256 ++#endif /* _h_pcicfg_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/phy542xx.h b/drivers/net/ethernet/broadcom/gmac/src/include/phy542xx.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/phy542xx.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/phy542xx.h 2017-11-09 17:53:43.973296000 +0800 +@@ -0,0 +1,98 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * These routines provide access to the external phy ++ * ++ */ ++ ++#ifndef _PHY542XX_H_ ++#define _PHY542XX_H_ ++ ++#include ++ ++/* Broadcom BCM542xx */ ++#define phy542xx_rd_reg phy542xx_reg_read ++#define phy542xx_wr_reg phy542xx_reg_write ++ ++#define BCM542XX_REG_EXP_SEL 0x17 ++#define BCM542XX_REG_EXP_SELECT_7E 0x0F7E ++#define BCM542XX_REG_EXP_DATA 0x15 ++#define BCM542XX_REG_EXP_RDB_EN 0x0000 ++ ++#define BCM542XX_REG_RDB_ADDR 0x1e ++#define BCM542XX_REG_RDB_DATA 0x1f ++ ++#define MIIM_BCM542xx_RDB_AUXSTATUS 0x09 ++#define MIIM_BCM542xx_AUXSTATUS_LINKMODE_MASK 0x0700 ++#define MIIM_BCM542xx_AUXSTATUS_LINKMODE_SHIFT 8 ++ ++#define BCM542XX_REG_RDB_MII_MISC_CTRL 0x02f ++ ++#define BCM542XX_REG_RDB_EXT_SERDES_CTRL 0x234 ++#define BCM542XX_REG_EXT_SERDES_AUTO_FX (1 << 6) ++#define BCM542XX_REG_EXT_SERDES_FX_FD (1 << 5) ++#define BCM542XX_REG_EXT_SERDES_FX (1 << 4) ++#define BCM542XX_REG_EXT_SERDES_LED (1 << 3) ++#define BCM542XX_REG_EXT_SEL_SYNC_ST (1 << 2) ++#define BCM542XX_REG_EXT_SELECT_SD (1 << 1) ++#define BCM542XX_REG_EXT_SERDES_SEL (1 << 0) ++#define BCM542XX_REG_EXT_SERDES_FX_MASK (BCM542XX_REG_EXT_SERDES_FX | \ ++ BCM542XX_REG_EXT_SERDES_AUTO_FX) ++ ++#define BCM542XX_REG_RDB_SGMII_SLAVE 0x235 ++#define BCM542XX_REG_SGMII_SLAVE_AUTO (1 << 0) ++ ++#define MIIM_BCM542xx_RDB_AUTO_DETECT_MEDIUM 0x23e ++#define BCM542XX_REG_MII_AUTO_DET_MED_2ND_SERDES (1 << 9) ++#define BCM542XX_REG_MII_INV_FIBER_SD (1 << 8) ++#define BCM542XX_REG_MII_FIBER_IN_USE_LED (1 << 7) ++#define BCM542XX_REG_MII_FIBER_LED (1 << 6) ++#define BCM542XX_REG_MII_FIBER_SD_SYNC (1 << 5) ++#define BCM542XX_REG_MII_FIBER_AUTO_PWRDN (1 << 4) ++#define BCM542XX_REG_MII_SD_en_ov (1 << 3) ++#define BCM542XX_REG_MII_AUTO_DET_MED_DEFAULT (1 << 2) ++#define BCM542XX_REG_MII_AUTO_DET_MED_PRI (1 << 1) ++#define BCM542XX_REG_MII_AUTO_DET_MED_EN (1 << 0) ++#define BCM542XX_REG_MII_AUTO_DET_MASK 0x033f ++ ++#define BCM542XX_REG_RDB_MODE_CTRL 0x021 ++#define BCM542XX_REG_MODE_CTRL_COPPER_LINK (1 << 7) ++#define BCM542XX_REG_MODE_CTRL_SERDES_LINK (1 << 6) ++#define BCM542XX_REG_MODE_CTRL_COPPER_ENERGY_DET (1 << 5) ++#define BCM542XX_REG_MODE_CNTL_MODE_SEL_2 (1 << 2) ++#define BCM542XX_REG_MODE_CNTL_MODE_SEL_1 (1 << 1) ++#define BCM542XX_REG_MODE_CTRL_1000X_EN (1 << 0) ++ ++#define BCM542XX_REG_RDB_COPPER_MISC_CTRL 0x02f ++#define BCM542XX_REG_MISC_CTRL_FORCE_AUTO_MDIX (1 << 9) ++ ++#define BCM542XX_REG_MODE_SEL_COPPER_2_SGMII (0x0) ++#define BCM542XX_REG_MODE_SEL_FIBER_2_SGMII (BCM542XX_REG_MODE_CNTL_MODE_SEL_1) ++#define BCM542XX_REG_MODE_SEL_SGMII_2_COPPER (BCM542XX_REG_MODE_CNTL_MODE_SEL_2) ++#define BCM542XX_REG_MODE_SEL_GBIC (BCM542XX_REG_MODE_CNTL_MODE_SEL_1 | \ ++ BCM542XX_REG_MODE_CNTL_MODE_SEL_2) ++ ++#define BCM542XX_REG_RDB_2ND_SERDES_BASE 0xb00 ++#define BCM542XX_REG_RDB_2ND_SERDES_MISC_1000X 0xb17 ++ ++ ++extern int phy542xx_reg_read(u32 phy_addr, u32 flags, int reg_addr, u16 *data); ++extern int phy542xx_reg_write(u32 phy_addr, u32 flags, int reg_addr, u16 data); ++extern int phy542xx_reset_setup(u32 phy_addr); ++extern int phy542xx_init(u32 phy_addr); ++extern int phy542xx_enable_set(u32 phy_addr, int enable); ++extern int phy542xx_force_auto_mdix(u32 phy_addr, int enable); ++ ++#endif /* _PHY542XX_H_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/proto/802.11.h b/drivers/net/ethernet/broadcom/gmac/src/include/proto/802.11.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/proto/802.11.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/proto/802.11.h 2017-11-09 17:53:43.976303000 +0800 +@@ -0,0 +1,2356 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Fundamental types and constants relating to 802.11 ++ * ++ * $Id: 802.11.h 308961 2012-01-18 03:01:00Z $ ++ */ ++ ++#ifndef _802_11_H_ ++#define _802_11_H_ ++ ++#ifndef _TYPEDEFS_H_ ++#include ++#endif ++ ++#ifndef _NET_ETHERNET_H_ ++#include ++#endif ++ ++#include ++ ++/* This marks the start of a packed structure section. */ ++#include ++ ++ ++#define DOT11_TU_TO_US 1024 /* 802.11 Time Unit is 1024 microseconds */ ++ ++/* Generic 802.11 frame constants */ ++#define DOT11_A3_HDR_LEN 24 /* d11 header length with A3 */ ++#define DOT11_A4_HDR_LEN 30 /* d11 header length with A4 */ ++#define DOT11_MAC_HDR_LEN DOT11_A3_HDR_LEN /* MAC header length */ ++#define DOT11_FCS_LEN 4 /* d11 FCS length */ ++#define DOT11_ICV_LEN 4 /* d11 ICV length */ ++#define DOT11_ICV_AES_LEN 8 /* d11 ICV/AES length */ ++#define DOT11_QOS_LEN 2 /* d11 QoS length */ ++#define DOT11_HTC_LEN 4 /* d11 HT Control field length */ ++ ++#define DOT11_KEY_INDEX_SHIFT 6 /* d11 key index shift */ ++#define DOT11_IV_LEN 4 /* d11 IV length */ ++#define DOT11_IV_TKIP_LEN 8 /* d11 IV TKIP length */ ++#define DOT11_IV_AES_OCB_LEN 4 /* d11 IV/AES/OCB length */ ++#define DOT11_IV_AES_CCM_LEN 8 /* d11 IV/AES/CCM length */ ++#define DOT11_IV_MAX_LEN 8 /* maximum iv len for any encryption */ ++ ++/* Includes MIC */ ++#define DOT11_MAX_MPDU_BODY_LEN 2304 /* max MPDU body length */ ++/* A4 header + QoS + CCMP + PDU + ICV + FCS = 2352 */ ++#define DOT11_MAX_MPDU_LEN (DOT11_A4_HDR_LEN + \ ++ DOT11_QOS_LEN + \ ++ DOT11_IV_AES_CCM_LEN + \ ++ DOT11_MAX_MPDU_BODY_LEN + \ ++ DOT11_ICV_LEN + \ ++ DOT11_FCS_LEN) /* d11 max MPDU length */ ++ ++#define DOT11_MAX_SSID_LEN 32 /* d11 max ssid length */ ++ ++/* dot11RTSThreshold */ ++#define DOT11_DEFAULT_RTS_LEN 2347 /* d11 default RTS length */ ++#define DOT11_MAX_RTS_LEN 2347 /* d11 max RTS length */ ++ ++/* dot11FragmentationThreshold */ ++#define DOT11_MIN_FRAG_LEN 256 /* d11 min fragmentation length */ ++#define DOT11_MAX_FRAG_LEN 2346 /* Max frag is also limited by aMPDUMaxLength ++ * of the attached PHY ++ */ ++#define DOT11_DEFAULT_FRAG_LEN 2346 /* d11 default fragmentation length */ ++ ++/* dot11BeaconPeriod */ ++#define DOT11_MIN_BEACON_PERIOD 1 /* d11 min beacon period */ ++#define DOT11_MAX_BEACON_PERIOD 0xFFFF /* d11 max beacon period */ ++ ++/* dot11DTIMPeriod */ ++#define DOT11_MIN_DTIM_PERIOD 1 /* d11 min DTIM period */ ++#define DOT11_MAX_DTIM_PERIOD 0xFF /* d11 max DTIM period */ ++ ++/* 802.2 LLC/SNAP header used by 802.11 per 802.1H */ ++#define DOT11_LLC_SNAP_HDR_LEN 8 /* d11 LLC/SNAP header length */ ++#define DOT11_OUI_LEN 3 /* d11 OUI length */ ++BWL_PRE_PACKED_STRUCT struct dot11_llc_snap_header { ++ uint8 dsap; /* always 0xAA */ ++ uint8 ssap; /* always 0xAA */ ++ uint8 ctl; /* always 0x03 */ ++ uint8 oui[DOT11_OUI_LEN]; /* RFC1042: 0x00 0x00 0x00 ++ * Bridge-Tunnel: 0x00 0x00 0xF8 ++ */ ++ uint16 type; /* ethertype */ ++} BWL_POST_PACKED_STRUCT; ++ ++/* RFC1042 header used by 802.11 per 802.1H */ ++#define RFC1042_HDR_LEN (ETHER_HDR_LEN + DOT11_LLC_SNAP_HDR_LEN) /* RCF1042 header length */ ++ ++/* Generic 802.11 MAC header */ ++/* ++ * N.B.: This struct reflects the full 4 address 802.11 MAC header. ++ * The fields are defined such that the shorter 1, 2, and 3 ++ * address headers just use the first k fields. ++ */ ++BWL_PRE_PACKED_STRUCT struct dot11_header { ++ uint16 fc; /* frame control */ ++ uint16 durid; /* duration/ID */ ++ struct ether_addr a1; /* address 1 */ ++ struct ether_addr a2; /* address 2 */ ++ struct ether_addr a3; /* address 3 */ ++ uint16 seq; /* sequence control */ ++ struct ether_addr a4; /* address 4 */ ++} BWL_POST_PACKED_STRUCT; ++ ++/* Control frames */ ++ ++BWL_PRE_PACKED_STRUCT struct dot11_rts_frame { ++ uint16 fc; /* frame control */ ++ uint16 durid; /* duration/ID */ ++ struct ether_addr ra; /* receiver address */ ++ struct ether_addr ta; /* transmitter address */ ++} BWL_POST_PACKED_STRUCT; ++#define DOT11_RTS_LEN 16 /* d11 RTS frame length */ ++ ++BWL_PRE_PACKED_STRUCT struct dot11_cts_frame { ++ uint16 fc; /* frame control */ ++ uint16 durid; /* duration/ID */ ++ struct ether_addr ra; /* receiver address */ ++} BWL_POST_PACKED_STRUCT; ++#define DOT11_CTS_LEN 10 /* d11 CTS frame length */ ++ ++BWL_PRE_PACKED_STRUCT struct dot11_ack_frame { ++ uint16 fc; /* frame control */ ++ uint16 durid; /* duration/ID */ ++ struct ether_addr ra; /* receiver address */ ++} BWL_POST_PACKED_STRUCT; ++#define DOT11_ACK_LEN 10 /* d11 ACK frame length */ ++ ++BWL_PRE_PACKED_STRUCT struct dot11_ps_poll_frame { ++ uint16 fc; /* frame control */ ++ uint16 durid; /* AID */ ++ struct ether_addr bssid; /* receiver address, STA in AP */ ++ struct ether_addr ta; /* transmitter address */ ++} BWL_POST_PACKED_STRUCT; ++#define DOT11_PS_POLL_LEN 16 /* d11 PS poll frame length */ ++ ++BWL_PRE_PACKED_STRUCT struct dot11_cf_end_frame { ++ uint16 fc; /* frame control */ ++ uint16 durid; /* duration/ID */ ++ struct ether_addr ra; /* receiver address */ ++ struct ether_addr bssid; /* transmitter address, STA in AP */ ++} BWL_POST_PACKED_STRUCT; ++#define DOT11_CS_END_LEN 16 /* d11 CF-END frame length */ ++ ++/* RWL wifi protocol: The Vendor Specific Action frame is defined for vendor-specific signaling ++* category+OUI+vendor specific content ( this can be variable) ++*/ ++BWL_PRE_PACKED_STRUCT struct dot11_action_wifi_vendor_specific { ++ uint8 category; ++ uint8 OUI[3]; ++ uint8 type; ++ uint8 subtype; ++ uint8 data[1040]; ++} BWL_POST_PACKED_STRUCT; ++typedef struct dot11_action_wifi_vendor_specific dot11_action_wifi_vendor_specific_t; ++ ++/* generic vender specific action frame with variable length */ ++BWL_PRE_PACKED_STRUCT struct dot11_action_vs_frmhdr { ++ uint8 category; ++ uint8 OUI[3]; ++ uint8 type; ++ uint8 subtype; ++ uint8 data[1]; ++} BWL_POST_PACKED_STRUCT; ++typedef struct dot11_action_vs_frmhdr dot11_action_vs_frmhdr_t; ++#define DOT11_ACTION_VS_HDR_LEN 6 ++ ++#define BCM_ACTION_OUI_BYTE0 0x00 ++#define BCM_ACTION_OUI_BYTE1 0x90 ++#define BCM_ACTION_OUI_BYTE2 0x4c ++ ++/* BA/BAR Control parameters */ ++#define DOT11_BA_CTL_POLICY_NORMAL 0x0000 /* normal ack */ ++#define DOT11_BA_CTL_POLICY_NOACK 0x0001 /* no ack */ ++#define DOT11_BA_CTL_POLICY_MASK 0x0001 /* ack policy mask */ ++ ++#define DOT11_BA_CTL_MTID 0x0002 /* multi tid BA */ ++#define DOT11_BA_CTL_COMPRESSED 0x0004 /* compressed bitmap */ ++ ++#define DOT11_BA_CTL_NUMMSDU_MASK 0x0FC0 /* num msdu in bitmap mask */ ++#define DOT11_BA_CTL_NUMMSDU_SHIFT 6 /* num msdu in bitmap shift */ ++ ++#define DOT11_BA_CTL_TID_MASK 0xF000 /* tid mask */ ++#define DOT11_BA_CTL_TID_SHIFT 12 /* tid shift */ ++ ++/* control frame header (BA/BAR) */ ++BWL_PRE_PACKED_STRUCT struct dot11_ctl_header { ++ uint16 fc; /* frame control */ ++ uint16 durid; /* duration/ID */ ++ struct ether_addr ra; /* receiver address */ ++ struct ether_addr ta; /* transmitter address */ ++} BWL_POST_PACKED_STRUCT; ++#define DOT11_CTL_HDR_LEN 16 /* control frame hdr len */ ++ ++/* BAR frame payload */ ++BWL_PRE_PACKED_STRUCT struct dot11_bar { ++ uint16 bar_control; /* BAR Control */ ++ uint16 seqnum; /* Starting Sequence control */ ++} BWL_POST_PACKED_STRUCT; ++#define DOT11_BAR_LEN 4 /* BAR frame payload length */ ++ ++#define DOT11_BA_BITMAP_LEN 128 /* bitmap length */ ++#define DOT11_BA_CMP_BITMAP_LEN 8 /* compressed bitmap length */ ++/* BA frame payload */ ++BWL_PRE_PACKED_STRUCT struct dot11_ba { ++ uint16 ba_control; /* BA Control */ ++ uint16 seqnum; /* Starting Sequence control */ ++ uint8 bitmap[DOT11_BA_BITMAP_LEN]; /* Block Ack Bitmap */ ++} BWL_POST_PACKED_STRUCT; ++#define DOT11_BA_LEN 4 /* BA frame payload len (wo bitmap) */ ++ ++/* Management frame header */ ++BWL_PRE_PACKED_STRUCT struct dot11_management_header { ++ uint16 fc; /* frame control */ ++ uint16 durid; /* duration/ID */ ++ struct ether_addr da; /* receiver address */ ++ struct ether_addr sa; /* transmitter address */ ++ struct ether_addr bssid; /* BSS ID */ ++ uint16 seq; /* sequence control */ ++} BWL_POST_PACKED_STRUCT; ++#define DOT11_MGMT_HDR_LEN 24 /* d11 management header length */ ++ ++/* Management frame payloads */ ++ ++BWL_PRE_PACKED_STRUCT struct dot11_bcn_prb { ++ uint32 timestamp[2]; ++ uint16 beacon_interval; ++ uint16 capability; ++} BWL_POST_PACKED_STRUCT; ++#define DOT11_BCN_PRB_LEN 12 /* 802.11 beacon/probe frame fixed length */ ++#define DOT11_BCN_PRB_FIXED_LEN 12 /* 802.11 beacon/probe frame fixed length */ ++ ++BWL_PRE_PACKED_STRUCT struct dot11_auth { ++ uint16 alg; /* algorithm */ ++ uint16 seq; /* sequence control */ ++ uint16 status; /* status code */ ++} BWL_POST_PACKED_STRUCT; ++#define DOT11_AUTH_FIXED_LEN 6 /* length of auth frame without challenge IE */ ++ ++BWL_PRE_PACKED_STRUCT struct dot11_assoc_req { ++ uint16 capability; /* capability information */ ++ uint16 listen; /* listen interval */ ++} BWL_POST_PACKED_STRUCT; ++#define DOT11_ASSOC_REQ_FIXED_LEN 4 /* length of assoc frame without info elts */ ++ ++BWL_PRE_PACKED_STRUCT struct dot11_reassoc_req { ++ uint16 capability; /* capability information */ ++ uint16 listen; /* listen interval */ ++ struct ether_addr ap; /* Current AP address */ ++} BWL_POST_PACKED_STRUCT; ++#define DOT11_REASSOC_REQ_FIXED_LEN 10 /* length of assoc frame without info elts */ ++ ++BWL_PRE_PACKED_STRUCT struct dot11_assoc_resp { ++ uint16 capability; /* capability information */ ++ uint16 status; /* status code */ ++ uint16 aid; /* association ID */ ++} BWL_POST_PACKED_STRUCT; ++#define DOT11_ASSOC_RESP_FIXED_LEN 6 /* length of assoc resp frame without info elts */ ++ ++BWL_PRE_PACKED_STRUCT struct dot11_action_measure { ++ uint8 category; ++ uint8 action; ++ uint8 token; ++ uint8 data[1]; ++} BWL_POST_PACKED_STRUCT; ++#define DOT11_ACTION_MEASURE_LEN 3 /* d11 action measurement header length */ ++ ++BWL_PRE_PACKED_STRUCT struct dot11_action_ht_ch_width { ++ uint8 category; ++ uint8 action; ++ uint8 ch_width; ++} BWL_POST_PACKED_STRUCT; ++ ++BWL_PRE_PACKED_STRUCT struct dot11_action_ht_mimops { ++ uint8 category; ++ uint8 action; ++ uint8 control; ++} BWL_POST_PACKED_STRUCT; ++ ++BWL_PRE_PACKED_STRUCT struct dot11_action_sa_query { ++ uint8 category; ++ uint8 action; ++ uint16 id; ++} BWL_POST_PACKED_STRUCT; ++ ++#define SM_PWRSAVE_ENABLE 1 ++#define SM_PWRSAVE_MODE 2 ++ ++/* ************* 802.11h related definitions. ************* */ ++BWL_PRE_PACKED_STRUCT struct dot11_power_cnst { ++ uint8 id; ++ uint8 len; ++ uint8 power; ++} BWL_POST_PACKED_STRUCT; ++typedef struct dot11_power_cnst dot11_power_cnst_t; ++ ++BWL_PRE_PACKED_STRUCT struct dot11_power_cap { ++ uint8 min; ++ uint8 max; ++} BWL_POST_PACKED_STRUCT; ++typedef struct dot11_power_cap dot11_power_cap_t; ++ ++BWL_PRE_PACKED_STRUCT struct dot11_tpc_rep { ++ uint8 id; ++ uint8 len; ++ uint8 tx_pwr; ++ uint8 margin; ++} BWL_POST_PACKED_STRUCT; ++typedef struct dot11_tpc_rep dot11_tpc_rep_t; ++#define DOT11_MNG_IE_TPC_REPORT_LEN 2 /* length of IE data, not including 2 byte header */ ++ ++BWL_PRE_PACKED_STRUCT struct dot11_supp_channels { ++ uint8 id; ++ uint8 len; ++ uint8 first_channel; ++ uint8 num_channels; ++} BWL_POST_PACKED_STRUCT; ++typedef struct dot11_supp_channels dot11_supp_channels_t; ++ ++/* Extension Channel Offset IE: 802.11n-D1.0 spec. added sideband ++ * offset for 40MHz operation. The possible 3 values are: ++ * 1 = above control channel ++ * 3 = below control channel ++ * 0 = no extension channel ++ */ ++BWL_PRE_PACKED_STRUCT struct dot11_extch { ++ uint8 id; /* IE ID, 62, DOT11_MNG_EXT_CHANNEL_OFFSET */ ++ uint8 len; /* IE length */ ++ uint8 extch; ++} BWL_POST_PACKED_STRUCT; ++typedef struct dot11_extch dot11_extch_ie_t; ++ ++BWL_PRE_PACKED_STRUCT struct dot11_brcm_extch { ++ uint8 id; /* IE ID, 221, DOT11_MNG_PROPR_ID */ ++ uint8 len; /* IE length */ ++ uint8 oui[3]; /* Proprietary OUI, BRCM_PROP_OUI */ ++ uint8 type; /* type inidicates what follows */ ++ uint8 extch; ++} BWL_POST_PACKED_STRUCT; ++typedef struct dot11_brcm_extch dot11_brcm_extch_ie_t; ++ ++#define BRCM_EXTCH_IE_LEN 5 ++#define BRCM_EXTCH_IE_TYPE 53 /* 802.11n ID not yet assigned */ ++#define DOT11_EXTCH_IE_LEN 1 ++#define DOT11_EXT_CH_MASK 0x03 /* extension channel mask */ ++#define DOT11_EXT_CH_UPPER 0x01 /* ext. ch. on upper sb */ ++#define DOT11_EXT_CH_LOWER 0x03 /* ext. ch. on lower sb */ ++#define DOT11_EXT_CH_NONE 0x00 /* no extension ch. */ ++ ++BWL_PRE_PACKED_STRUCT struct dot11_action_frmhdr { ++ uint8 category; ++ uint8 action; ++ uint8 data[1]; ++} BWL_POST_PACKED_STRUCT; ++#define DOT11_ACTION_FRMHDR_LEN 2 ++ ++/* CSA IE data structure */ ++BWL_PRE_PACKED_STRUCT struct dot11_channel_switch { ++ uint8 id; /* id DOT11_MNG_CHANNEL_SWITCH_ID */ ++ uint8 len; /* length of IE */ ++ uint8 mode; /* mode 0 or 1 */ ++ uint8 channel; /* channel switch to */ ++ uint8 count; /* number of beacons before switching */ ++} BWL_POST_PACKED_STRUCT; ++typedef struct dot11_channel_switch dot11_chan_switch_ie_t; ++ ++#define DOT11_SWITCH_IE_LEN 3 /* length of IE data, not including 2 byte header */ ++/* CSA mode - 802.11h-2003 $7.3.2.20 */ ++#define DOT11_CSA_MODE_ADVISORY 0 /* no DOT11_CSA_MODE_NO_TX restriction imposed */ ++#define DOT11_CSA_MODE_NO_TX 1 /* no transmission upon receiving CSA frame. */ ++ ++BWL_PRE_PACKED_STRUCT struct dot11_action_switch_channel { ++ uint8 category; ++ uint8 action; ++ dot11_chan_switch_ie_t chan_switch_ie; /* for switch IE */ ++ dot11_brcm_extch_ie_t extch_ie; /* extension channel offset */ ++} BWL_POST_PACKED_STRUCT; ++ ++BWL_PRE_PACKED_STRUCT struct dot11_csa_body { ++ uint8 mode; /* mode 0 or 1 */ ++ uint8 reg; /* regulatory class */ ++ uint8 channel; /* channel switch to */ ++ uint8 count; /* number of beacons before switching */ ++} BWL_POST_PACKED_STRUCT; ++ ++/* 11n Extended Channel Switch IE data structure */ ++BWL_PRE_PACKED_STRUCT struct dot11_ext_csa { ++ uint8 id; /* id DOT11_MNG_EXT_CHANNEL_SWITCH_ID */ ++ uint8 len; /* length of IE */ ++ struct dot11_csa_body b; /* body of the ie */ ++} BWL_POST_PACKED_STRUCT; ++typedef struct dot11_ext_csa dot11_ext_csa_ie_t; ++#define DOT11_EXT_CSA_IE_LEN 4 /* length of extended channel switch IE body */ ++ ++BWL_PRE_PACKED_STRUCT struct dot11_action_ext_csa { ++ uint8 category; ++ uint8 action; ++ dot11_ext_csa_ie_t chan_switch_ie; /* for switch IE */ ++} BWL_POST_PACKED_STRUCT; ++ ++BWL_PRE_PACKED_STRUCT struct dot11y_action_ext_csa { ++ uint8 category; ++ uint8 action; ++ struct dot11_csa_body b; /* body of the ie */ ++} BWL_POST_PACKED_STRUCT; ++ ++BWL_PRE_PACKED_STRUCT struct dot11_obss_coex { ++ uint8 id; ++ uint8 len; ++ uint8 info; ++} BWL_POST_PACKED_STRUCT; ++typedef struct dot11_obss_coex dot11_obss_coex_t; ++#define DOT11_OBSS_COEXINFO_LEN 1 /* length of OBSS Coexistence INFO IE */ ++ ++#define DOT11_OBSS_COEX_INFO_REQ 0x01 ++#define DOT11_OBSS_COEX_40MHZ_INTOLERANT 0x02 ++#define DOT11_OBSS_COEX_20MHZ_WIDTH_REQ 0x04 ++ ++BWL_PRE_PACKED_STRUCT struct dot11_obss_chanlist { ++ uint8 id; ++ uint8 len; ++ uint8 regclass; ++ uint8 chanlist[1]; ++} BWL_POST_PACKED_STRUCT; ++typedef struct dot11_obss_chanlist dot11_obss_chanlist_t; ++#define DOT11_OBSS_CHANLIST_FIXED_LEN 1 /* fixed length of regclass */ ++ ++BWL_PRE_PACKED_STRUCT struct dot11_extcap_ie { ++ uint8 id; ++ uint8 len; ++ uint8 cap[1]; ++} BWL_POST_PACKED_STRUCT; ++typedef struct dot11_extcap_ie dot11_extcap_ie_t; ++ ++#define DOT11_EXTCAP_LEN_MAX 7 ++#define DOT11_EXTCAP_LEN_COEX 1 ++#define DOT11_EXTCAP_LEN_BT 3 ++#define DOT11_EXTCAP_LEN_IW 4 ++#define DOT11_EXTCAP_LEN_SI 6 ++ ++#define DOT11_EXTCAP_LEN_TDLS 5 ++BWL_PRE_PACKED_STRUCT struct dot11_extcap { ++ uint8 extcap[DOT11_EXTCAP_LEN_TDLS]; ++} BWL_POST_PACKED_STRUCT; ++typedef struct dot11_extcap dot11_extcap_t; ++ ++/* TDLS Capabilities */ ++#define TDLS_CAP_TDLS 37 /* TDLS support */ ++#define TDLS_CAP_PU_BUFFER_STA 28 /* TDLS Peer U-APSD buffer STA support */ ++#define TDLS_CAP_PEER_PSM 20 /* TDLS Peer PSM support */ ++#define TDLS_CAP_CH_SW 30 /* TDLS Channel switch */ ++#define TDLS_CAP_PROH 38 /* TDLS prohibited */ ++#define TDLS_CAP_CH_SW_PROH 39 /* TDLS Channel switch prohibited */ ++ ++#define TDLS_CAP_MAX_BIT 39 /* TDLS max bit defined in ext cap */ ++ ++/* 802.11h/802.11k Measurement Request/Report IEs */ ++/* Measurement Type field */ ++#define DOT11_MEASURE_TYPE_BASIC 0 /* d11 measurement basic type */ ++#define DOT11_MEASURE_TYPE_CCA 1 /* d11 measurement CCA type */ ++#define DOT11_MEASURE_TYPE_RPI 2 /* d11 measurement RPI type */ ++#define DOT11_MEASURE_TYPE_CHLOAD 3 /* d11 measurement Channel Load type */ ++#define DOT11_MEASURE_TYPE_NOISE 4 /* d11 measurement Noise Histogram type */ ++#define DOT11_MEASURE_TYPE_BEACON 5 /* d11 measurement Beacon type */ ++#define DOT11_MEASURE_TYPE_FRAME 6 /* d11 measurement Frame type */ ++#define DOT11_MEASURE_TYPE_STATS 7 /* d11 measurement STA Statistics type */ ++#define DOT11_MEASURE_TYPE_LCI 8 /* d11 measurement LCI type */ ++#define DOT11_MEASURE_TYPE_TXSTREAM 9 /* d11 measurement TX Stream type */ ++#define DOT11_MEASURE_TYPE_PAUSE 255 /* d11 measurement pause type */ ++ ++/* Measurement Request Modes */ ++#define DOT11_MEASURE_MODE_PARALLEL (1<<0) /* d11 measurement parallel */ ++#define DOT11_MEASURE_MODE_ENABLE (1<<1) /* d11 measurement enable */ ++#define DOT11_MEASURE_MODE_REQUEST (1<<2) /* d11 measurement request */ ++#define DOT11_MEASURE_MODE_REPORT (1<<3) /* d11 measurement report */ ++#define DOT11_MEASURE_MODE_DUR (1<<4) /* d11 measurement dur mandatory */ ++/* Measurement Report Modes */ ++#define DOT11_MEASURE_MODE_LATE (1<<0) /* d11 measurement late */ ++#define DOT11_MEASURE_MODE_INCAPABLE (1<<1) /* d11 measurement incapable */ ++#define DOT11_MEASURE_MODE_REFUSED (1<<2) /* d11 measurement refuse */ ++/* Basic Measurement Map bits */ ++#define DOT11_MEASURE_BASIC_MAP_BSS ((uint8)(1<<0)) /* d11 measurement basic map BSS */ ++#define DOT11_MEASURE_BASIC_MAP_OFDM ((uint8)(1<<1)) /* d11 measurement map OFDM */ ++#define DOT11_MEASURE_BASIC_MAP_UKNOWN ((uint8)(1<<2)) /* d11 measurement map unknown */ ++#define DOT11_MEASURE_BASIC_MAP_RADAR ((uint8)(1<<3)) /* d11 measurement map radar */ ++#define DOT11_MEASURE_BASIC_MAP_UNMEAS ((uint8)(1<<4)) /* d11 measurement map unmeasuremnt */ ++ ++BWL_PRE_PACKED_STRUCT struct dot11_meas_req { ++ uint8 id; ++ uint8 len; ++ uint8 token; ++ uint8 mode; ++ uint8 type; ++ uint8 channel; ++ uint8 start_time[8]; ++ uint16 duration; ++} BWL_POST_PACKED_STRUCT; ++typedef struct dot11_meas_req dot11_meas_req_t; ++#define DOT11_MNG_IE_MREQ_LEN 14 /* d11 measurement request IE length */ ++/* length of Measure Request IE data not including variable len */ ++#define DOT11_MNG_IE_MREQ_FIXED_LEN 3 /* d11 measurement request IE fixed length */ ++ ++BWL_PRE_PACKED_STRUCT struct dot11_meas_rep { ++ uint8 id; ++ uint8 len; ++ uint8 token; ++ uint8 mode; ++ uint8 type; ++ BWL_PRE_PACKED_STRUCT union ++ { ++ BWL_PRE_PACKED_STRUCT struct { ++ uint8 channel; ++ uint8 start_time[8]; ++ uint16 duration; ++ uint8 map; ++ } BWL_POST_PACKED_STRUCT basic; ++ uint8 data[1]; ++ } BWL_POST_PACKED_STRUCT rep; ++} BWL_POST_PACKED_STRUCT; ++typedef struct dot11_meas_rep dot11_meas_rep_t; ++ ++/* length of Measure Report IE data not including variable len */ ++#define DOT11_MNG_IE_MREP_FIXED_LEN 3 /* d11 measurement response IE fixed length */ ++ ++BWL_PRE_PACKED_STRUCT struct dot11_meas_rep_basic { ++ uint8 channel; ++ uint8 start_time[8]; ++ uint16 duration; ++ uint8 map; ++} BWL_POST_PACKED_STRUCT; ++typedef struct dot11_meas_rep_basic dot11_meas_rep_basic_t; ++#define DOT11_MEASURE_BASIC_REP_LEN 12 /* d11 measurement basic report length */ ++ ++BWL_PRE_PACKED_STRUCT struct dot11_quiet { ++ uint8 id; ++ uint8 len; ++ uint8 count; /* TBTTs until beacon interval in quiet starts */ ++ uint8 period; /* Beacon intervals between periodic quiet periods ? */ ++ uint16 duration; /* Length of quiet period, in TU's */ ++ uint16 offset; /* TU's offset from TBTT in Count field */ ++} BWL_POST_PACKED_STRUCT; ++typedef struct dot11_quiet dot11_quiet_t; ++ ++BWL_PRE_PACKED_STRUCT struct chan_map_tuple { ++ uint8 channel; ++ uint8 map; ++} BWL_POST_PACKED_STRUCT; ++typedef struct chan_map_tuple chan_map_tuple_t; ++ ++BWL_PRE_PACKED_STRUCT struct dot11_ibss_dfs { ++ uint8 id; ++ uint8 len; ++ uint8 eaddr[ETHER_ADDR_LEN]; ++ uint8 interval; ++ chan_map_tuple_t map[1]; ++} BWL_POST_PACKED_STRUCT; ++typedef struct dot11_ibss_dfs dot11_ibss_dfs_t; ++ ++/* WME Elements */ ++#define WME_OUI "\x00\x50\xf2" /* WME OUI */ ++#define WME_OUI_LEN 3 ++#define WME_OUI_TYPE 2 /* WME type */ ++#define WME_TYPE 2 /* WME type, deprecated */ ++#define WME_SUBTYPE_IE 0 /* Information Element */ ++#define WME_SUBTYPE_PARAM_IE 1 /* Parameter Element */ ++#define WME_SUBTYPE_TSPEC 2 /* Traffic Specification */ ++#define WME_VER 1 /* WME version */ ++ ++/* WME Access Category Indices (ACIs) */ ++#define AC_BE 0 /* Best Effort */ ++#define AC_BK 1 /* Background */ ++#define AC_VI 2 /* Video */ ++#define AC_VO 3 /* Voice */ ++#define AC_COUNT 4 /* number of ACs */ ++ ++typedef uint8 ac_bitmap_t; /* AC bitmap of (1 << AC_xx) */ ++ ++#define AC_BITMAP_NONE 0x0 /* No ACs */ ++#define AC_BITMAP_ALL 0xf /* All ACs */ ++#define AC_BITMAP_TST(ab, ac) (((ab) & (1 << (ac))) != 0) ++#define AC_BITMAP_SET(ab, ac) (((ab) |= (1 << (ac)))) ++#define AC_BITMAP_RESET(ab, ac) (((ab) &= ~(1 << (ac)))) ++ ++/* WME Information Element (IE) */ ++BWL_PRE_PACKED_STRUCT struct wme_ie { ++ uint8 oui[3]; ++ uint8 type; ++ uint8 subtype; ++ uint8 version; ++ uint8 qosinfo; ++} BWL_POST_PACKED_STRUCT; ++typedef struct wme_ie wme_ie_t; ++#define WME_IE_LEN 7 /* WME IE length */ ++ ++BWL_PRE_PACKED_STRUCT struct edcf_acparam { ++ uint8 ACI; ++ uint8 ECW; ++ uint16 TXOP; /* stored in network order (ls octet first) */ ++} BWL_POST_PACKED_STRUCT; ++typedef struct edcf_acparam edcf_acparam_t; ++ ++/* WME Parameter Element (PE) */ ++BWL_PRE_PACKED_STRUCT struct wme_param_ie { ++ uint8 oui[3]; ++ uint8 type; ++ uint8 subtype; ++ uint8 version; ++ uint8 qosinfo; ++ uint8 rsvd; ++ edcf_acparam_t acparam[AC_COUNT]; ++} BWL_POST_PACKED_STRUCT; ++typedef struct wme_param_ie wme_param_ie_t; ++#define WME_PARAM_IE_LEN 24 /* WME Parameter IE length */ ++ ++/* QoS Info field for IE as sent from AP */ ++#define WME_QI_AP_APSD_MASK 0x80 /* U-APSD Supported mask */ ++#define WME_QI_AP_APSD_SHIFT 7 /* U-APSD Supported shift */ ++#define WME_QI_AP_COUNT_MASK 0x0f /* Parameter set count mask */ ++#define WME_QI_AP_COUNT_SHIFT 0 /* Parameter set count shift */ ++ ++/* QoS Info field for IE as sent from STA */ ++#define WME_QI_STA_MAXSPLEN_MASK 0x60 /* Max Service Period Length mask */ ++#define WME_QI_STA_MAXSPLEN_SHIFT 5 /* Max Service Period Length shift */ ++#define WME_QI_STA_APSD_ALL_MASK 0xf /* APSD all AC bits mask */ ++#define WME_QI_STA_APSD_ALL_SHIFT 0 /* APSD all AC bits shift */ ++#define WME_QI_STA_APSD_BE_MASK 0x8 /* APSD AC_BE mask */ ++#define WME_QI_STA_APSD_BE_SHIFT 3 /* APSD AC_BE shift */ ++#define WME_QI_STA_APSD_BK_MASK 0x4 /* APSD AC_BK mask */ ++#define WME_QI_STA_APSD_BK_SHIFT 2 /* APSD AC_BK shift */ ++#define WME_QI_STA_APSD_VI_MASK 0x2 /* APSD AC_VI mask */ ++#define WME_QI_STA_APSD_VI_SHIFT 1 /* APSD AC_VI shift */ ++#define WME_QI_STA_APSD_VO_MASK 0x1 /* APSD AC_VO mask */ ++#define WME_QI_STA_APSD_VO_SHIFT 0 /* APSD AC_VO shift */ ++ ++/* ACI */ ++#define EDCF_AIFSN_MIN 1 /* AIFSN minimum value */ ++#define EDCF_AIFSN_MAX 15 /* AIFSN maximum value */ ++#define EDCF_AIFSN_MASK 0x0f /* AIFSN mask */ ++#define EDCF_ACM_MASK 0x10 /* ACM mask */ ++#define EDCF_ACI_MASK 0x60 /* ACI mask */ ++#define EDCF_ACI_SHIFT 5 /* ACI shift */ ++#define EDCF_AIFSN_SHIFT 12 /* 4 MSB(0xFFF) in ifs_ctl for AC idx */ ++ ++/* ECW */ ++#define EDCF_ECW_MIN 0 /* cwmin/cwmax exponent minimum value */ ++#define EDCF_ECW_MAX 15 /* cwmin/cwmax exponent maximum value */ ++#define EDCF_ECW2CW(exp) ((1 << (exp)) - 1) ++#define EDCF_ECWMIN_MASK 0x0f /* cwmin exponent form mask */ ++#define EDCF_ECWMAX_MASK 0xf0 /* cwmax exponent form mask */ ++#define EDCF_ECWMAX_SHIFT 4 /* cwmax exponent form shift */ ++ ++/* TXOP */ ++#define EDCF_TXOP_MIN 0 /* TXOP minimum value */ ++#define EDCF_TXOP_MAX 65535 /* TXOP maximum value */ ++#define EDCF_TXOP2USEC(txop) ((txop) << 5) ++ ++/* Default BE ACI value for non-WME connection STA */ ++#define NON_EDCF_AC_BE_ACI_STA 0x02 ++ ++/* Default EDCF parameters that AP advertises for STA to use; WMM draft Table 12 */ ++#define EDCF_AC_BE_ACI_STA 0x03 /* STA ACI value for best effort AC */ ++#define EDCF_AC_BE_ECW_STA 0xA4 /* STA ECW value for best effort AC */ ++#define EDCF_AC_BE_TXOP_STA 0x0000 /* STA TXOP value for best effort AC */ ++#define EDCF_AC_BK_ACI_STA 0x27 /* STA ACI value for background AC */ ++#define EDCF_AC_BK_ECW_STA 0xA4 /* STA ECW value for background AC */ ++#define EDCF_AC_BK_TXOP_STA 0x0000 /* STA TXOP value for background AC */ ++#define EDCF_AC_VI_ACI_STA 0x42 /* STA ACI value for video AC */ ++#define EDCF_AC_VI_ECW_STA 0x43 /* STA ECW value for video AC */ ++#define EDCF_AC_VI_TXOP_STA 0x005e /* STA TXOP value for video AC */ ++#define EDCF_AC_VO_ACI_STA 0x62 /* STA ACI value for audio AC */ ++#define EDCF_AC_VO_ECW_STA 0x32 /* STA ECW value for audio AC */ ++#define EDCF_AC_VO_TXOP_STA 0x002f /* STA TXOP value for audio AC */ ++ ++/* Default EDCF parameters that AP uses; WMM draft Table 14 */ ++#define EDCF_AC_BE_ACI_AP 0x03 /* AP ACI value for best effort AC */ ++#define EDCF_AC_BE_ECW_AP 0x64 /* AP ECW value for best effort AC */ ++#define EDCF_AC_BE_TXOP_AP 0x0000 /* AP TXOP value for best effort AC */ ++#define EDCF_AC_BK_ACI_AP 0x27 /* AP ACI value for background AC */ ++#define EDCF_AC_BK_ECW_AP 0xA4 /* AP ECW value for background AC */ ++#define EDCF_AC_BK_TXOP_AP 0x0000 /* AP TXOP value for background AC */ ++#define EDCF_AC_VI_ACI_AP 0x41 /* AP ACI value for video AC */ ++#define EDCF_AC_VI_ECW_AP 0x43 /* AP ECW value for video AC */ ++#define EDCF_AC_VI_TXOP_AP 0x005e /* AP TXOP value for video AC */ ++#define EDCF_AC_VO_ACI_AP 0x61 /* AP ACI value for audio AC */ ++#define EDCF_AC_VO_ECW_AP 0x32 /* AP ECW value for audio AC */ ++#define EDCF_AC_VO_TXOP_AP 0x002f /* AP TXOP value for audio AC */ ++ ++/* EDCA Parameter IE */ ++BWL_PRE_PACKED_STRUCT struct edca_param_ie { ++ uint8 qosinfo; ++ uint8 rsvd; ++ edcf_acparam_t acparam[AC_COUNT]; ++} BWL_POST_PACKED_STRUCT; ++typedef struct edca_param_ie edca_param_ie_t; ++#define EDCA_PARAM_IE_LEN 18 /* EDCA Parameter IE length */ ++ ++/* QoS Capability IE */ ++BWL_PRE_PACKED_STRUCT struct qos_cap_ie { ++ uint8 qosinfo; ++} BWL_POST_PACKED_STRUCT; ++typedef struct qos_cap_ie qos_cap_ie_t; ++ ++BWL_PRE_PACKED_STRUCT struct dot11_qbss_load_ie { ++ uint8 id; /* 11, DOT11_MNG_QBSS_LOAD_ID */ ++ uint8 length; ++ uint16 station_count; /* total number of STAs associated */ ++ uint8 channel_utilization; /* % of time, normalized to 255, QAP sensed medium busy */ ++ uint16 aac; /* available admission capacity */ ++} BWL_POST_PACKED_STRUCT; ++typedef struct dot11_qbss_load_ie dot11_qbss_load_ie_t; ++#define BSS_LOAD_IE_SIZE 7 /* BSS load IE size */ ++ ++/* nom_msdu_size */ ++#define FIXED_MSDU_SIZE 0x8000 /* MSDU size is fixed */ ++#define MSDU_SIZE_MASK 0x7fff /* (Nominal or fixed) MSDU size */ ++ ++/* surplus_bandwidth */ ++/* Represented as 3 bits of integer, binary point, 13 bits fraction */ ++#define INTEGER_SHIFT 13 /* integer shift */ ++#define FRACTION_MASK 0x1FFF /* fraction mask */ ++ ++/* Management Notification Frame */ ++BWL_PRE_PACKED_STRUCT struct dot11_management_notification { ++ uint8 category; /* DOT11_ACTION_NOTIFICATION */ ++ uint8 action; ++ uint8 token; ++ uint8 status; ++ uint8 data[1]; /* Elements */ ++} BWL_POST_PACKED_STRUCT; ++#define DOT11_MGMT_NOTIFICATION_LEN 4 /* Fixed length */ ++ ++/* Timeout Interval IE */ ++BWL_PRE_PACKED_STRUCT struct ti_ie { ++ uint8 ti_type; ++ uint32 ti_val; ++} BWL_POST_PACKED_STRUCT; ++typedef struct ti_ie ti_ie_t; ++#define TI_TYPE_REASSOC_DEADLINE 1 ++#define TI_TYPE_KEY_LIFETIME 2 ++ ++/* WME Action Codes */ ++#define WME_ADDTS_REQUEST 0 /* WME ADDTS request */ ++#define WME_ADDTS_RESPONSE 1 /* WME ADDTS response */ ++#define WME_DELTS_REQUEST 2 /* WME DELTS request */ ++ ++/* WME Setup Response Status Codes */ ++#define WME_ADMISSION_ACCEPTED 0 /* WME admission accepted */ ++#define WME_INVALID_PARAMETERS 1 /* WME invalide parameters */ ++#define WME_ADMISSION_REFUSED 3 /* WME admission refused */ ++ ++/* Macro to take a pointer to a beacon or probe response ++ * body and return the char* pointer to the SSID info element ++ */ ++#define BCN_PRB_SSID(body) ((char*)(body) + DOT11_BCN_PRB_LEN) ++ ++/* Authentication frame payload constants */ ++#define DOT11_OPEN_SYSTEM 0 /* d11 open authentication */ ++#define DOT11_SHARED_KEY 1 /* d11 shared authentication */ ++#define DOT11_FAST_BSS 2 /* d11 fast bss authentication */ ++#define DOT11_CHALLENGE_LEN 128 /* d11 challenge text length */ ++ ++/* Frame control macros */ ++#define FC_PVER_MASK 0x3 /* PVER mask */ ++#define FC_PVER_SHIFT 0 /* PVER shift */ ++#define FC_TYPE_MASK 0xC /* type mask */ ++#define FC_TYPE_SHIFT 2 /* type shift */ ++#define FC_SUBTYPE_MASK 0xF0 /* subtype mask */ ++#define FC_SUBTYPE_SHIFT 4 /* subtype shift */ ++#define FC_TODS 0x100 /* to DS */ ++#define FC_TODS_SHIFT 8 /* to DS shift */ ++#define FC_FROMDS 0x200 /* from DS */ ++#define FC_FROMDS_SHIFT 9 /* from DS shift */ ++#define FC_MOREFRAG 0x400 /* more frag. */ ++#define FC_MOREFRAG_SHIFT 10 /* more frag. shift */ ++#define FC_RETRY 0x800 /* retry */ ++#define FC_RETRY_SHIFT 11 /* retry shift */ ++#define FC_PM 0x1000 /* PM */ ++#define FC_PM_SHIFT 12 /* PM shift */ ++#define FC_MOREDATA 0x2000 /* more data */ ++#define FC_MOREDATA_SHIFT 13 /* more data shift */ ++#define FC_WEP 0x4000 /* WEP */ ++#define FC_WEP_SHIFT 14 /* WEP shift */ ++#define FC_ORDER 0x8000 /* order */ ++#define FC_ORDER_SHIFT 15 /* order shift */ ++ ++/* sequence control macros */ ++#define SEQNUM_SHIFT 4 /* seq. number shift */ ++#define SEQNUM_MAX 0x1000 /* max seqnum + 1 */ ++#define FRAGNUM_MASK 0xF /* frag. number mask */ ++ ++/* Frame Control type/subtype defs */ ++ ++/* FC Types */ ++#define FC_TYPE_MNG 0 /* management type */ ++#define FC_TYPE_CTL 1 /* control type */ ++#define FC_TYPE_DATA 2 /* data type */ ++ ++/* Management Subtypes */ ++#define FC_SUBTYPE_ASSOC_REQ 0 /* assoc. request */ ++#define FC_SUBTYPE_ASSOC_RESP 1 /* assoc. response */ ++#define FC_SUBTYPE_REASSOC_REQ 2 /* reassoc. request */ ++#define FC_SUBTYPE_REASSOC_RESP 3 /* reassoc. response */ ++#define FC_SUBTYPE_PROBE_REQ 4 /* probe request */ ++#define FC_SUBTYPE_PROBE_RESP 5 /* probe response */ ++#define FC_SUBTYPE_BEACON 8 /* beacon */ ++#define FC_SUBTYPE_ATIM 9 /* ATIM */ ++#define FC_SUBTYPE_DISASSOC 10 /* disassoc. */ ++#define FC_SUBTYPE_AUTH 11 /* authentication */ ++#define FC_SUBTYPE_DEAUTH 12 /* de-authentication */ ++#define FC_SUBTYPE_ACTION 13 /* action */ ++#define FC_SUBTYPE_ACTION_NOACK 14 /* action no-ack */ ++ ++/* Control Subtypes */ ++#define FC_SUBTYPE_CTL_WRAPPER 7 /* Control Wrapper */ ++#define FC_SUBTYPE_BLOCKACK_REQ 8 /* Block Ack Req */ ++#define FC_SUBTYPE_BLOCKACK 9 /* Block Ack */ ++#define FC_SUBTYPE_PS_POLL 10 /* PS poll */ ++#define FC_SUBTYPE_RTS 11 /* RTS */ ++#define FC_SUBTYPE_CTS 12 /* CTS */ ++#define FC_SUBTYPE_ACK 13 /* ACK */ ++#define FC_SUBTYPE_CF_END 14 /* CF-END */ ++#define FC_SUBTYPE_CF_END_ACK 15 /* CF-END ACK */ ++ ++/* Data Subtypes */ ++#define FC_SUBTYPE_DATA 0 /* Data */ ++#define FC_SUBTYPE_DATA_CF_ACK 1 /* Data + CF-ACK */ ++#define FC_SUBTYPE_DATA_CF_POLL 2 /* Data + CF-Poll */ ++#define FC_SUBTYPE_DATA_CF_ACK_POLL 3 /* Data + CF-Ack + CF-Poll */ ++#define FC_SUBTYPE_NULL 4 /* Null */ ++#define FC_SUBTYPE_CF_ACK 5 /* CF-Ack */ ++#define FC_SUBTYPE_CF_POLL 6 /* CF-Poll */ ++#define FC_SUBTYPE_CF_ACK_POLL 7 /* CF-Ack + CF-Poll */ ++#define FC_SUBTYPE_QOS_DATA 8 /* QoS Data */ ++#define FC_SUBTYPE_QOS_DATA_CF_ACK 9 /* QoS Data + CF-Ack */ ++#define FC_SUBTYPE_QOS_DATA_CF_POLL 10 /* QoS Data + CF-Poll */ ++#define FC_SUBTYPE_QOS_DATA_CF_ACK_POLL 11 /* QoS Data + CF-Ack + CF-Poll */ ++#define FC_SUBTYPE_QOS_NULL 12 /* QoS Null */ ++#define FC_SUBTYPE_QOS_CF_POLL 14 /* QoS CF-Poll */ ++#define FC_SUBTYPE_QOS_CF_ACK_POLL 15 /* QoS CF-Ack + CF-Poll */ ++ ++/* Data Subtype Groups */ ++#define FC_SUBTYPE_ANY_QOS(s) (((s) & 8) != 0) ++#define FC_SUBTYPE_ANY_NULL(s) (((s) & 4) != 0) ++#define FC_SUBTYPE_ANY_CF_POLL(s) (((s) & 2) != 0) ++#define FC_SUBTYPE_ANY_CF_ACK(s) (((s) & 1) != 0) ++ ++/* Type/Subtype Combos */ ++#define FC_KIND_MASK (FC_TYPE_MASK | FC_SUBTYPE_MASK) /* FC kind mask */ ++ ++#define FC_KIND(t, s) (((t) << FC_TYPE_SHIFT) | ((s) << FC_SUBTYPE_SHIFT)) /* FC kind */ ++ ++#define FC_SUBTYPE(fc) (((fc) & FC_SUBTYPE_MASK) >> FC_SUBTYPE_SHIFT) /* Subtype from FC */ ++#define FC_TYPE(fc) (((fc) & FC_TYPE_MASK) >> FC_TYPE_SHIFT) /* Type from FC */ ++ ++#define FC_ASSOC_REQ FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ASSOC_REQ) /* assoc. request */ ++#define FC_ASSOC_RESP FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ASSOC_RESP) /* assoc. response */ ++#define FC_REASSOC_REQ FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_REASSOC_REQ) /* reassoc. request */ ++#define FC_REASSOC_RESP FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_REASSOC_RESP) /* reassoc. response */ ++#define FC_PROBE_REQ FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_PROBE_REQ) /* probe request */ ++#define FC_PROBE_RESP FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_PROBE_RESP) /* probe response */ ++#define FC_BEACON FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_BEACON) /* beacon */ ++#define FC_DISASSOC FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_DISASSOC) /* disassoc */ ++#define FC_AUTH FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_AUTH) /* authentication */ ++#define FC_DEAUTH FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_DEAUTH) /* deauthentication */ ++#define FC_ACTION FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ACTION) /* action */ ++#define FC_ACTION_NOACK FC_KIND(FC_TYPE_MNG, FC_SUBTYPE_ACTION_NOACK) /* action no-ack */ ++ ++#define FC_CTL_WRAPPER FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CTL_WRAPPER) /* Control Wrapper */ ++#define FC_BLOCKACK_REQ FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_BLOCKACK_REQ) /* Block Ack Req */ ++#define FC_BLOCKACK FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_BLOCKACK) /* Block Ack */ ++#define FC_PS_POLL FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_PS_POLL) /* PS poll */ ++#define FC_RTS FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_RTS) /* RTS */ ++#define FC_CTS FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CTS) /* CTS */ ++#define FC_ACK FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_ACK) /* ACK */ ++#define FC_CF_END FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CF_END) /* CF-END */ ++#define FC_CF_END_ACK FC_KIND(FC_TYPE_CTL, FC_SUBTYPE_CF_END_ACK) /* CF-END ACK */ ++ ++#define FC_DATA FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_DATA) /* data */ ++#define FC_NULL_DATA FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_NULL) /* null data */ ++#define FC_DATA_CF_ACK FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_DATA_CF_ACK) /* data CF ACK */ ++#define FC_QOS_DATA FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_QOS_DATA) /* QoS data */ ++#define FC_QOS_NULL FC_KIND(FC_TYPE_DATA, FC_SUBTYPE_QOS_NULL) /* QoS null */ ++ ++/* QoS Control Field */ ++ ++/* 802.1D Priority */ ++#define QOS_PRIO_SHIFT 0 /* QoS priority shift */ ++#define QOS_PRIO_MASK 0x0007 /* QoS priority mask */ ++#define QOS_PRIO(qos) (((qos) & QOS_PRIO_MASK) >> QOS_PRIO_SHIFT) /* QoS priority */ ++ ++/* Traffic Identifier */ ++#define QOS_TID_SHIFT 0 /* QoS TID shift */ ++#define QOS_TID_MASK 0x000f /* QoS TID mask */ ++#define QOS_TID(qos) (((qos) & QOS_TID_MASK) >> QOS_TID_SHIFT) /* QoS TID */ ++ ++/* End of Service Period (U-APSD) */ ++#define QOS_EOSP_SHIFT 4 /* QoS End of Service Period shift */ ++#define QOS_EOSP_MASK 0x0010 /* QoS End of Service Period mask */ ++#define QOS_EOSP(qos) (((qos) & QOS_EOSP_MASK) >> QOS_EOSP_SHIFT) /* Qos EOSP */ ++ ++/* Ack Policy */ ++#define QOS_ACK_NORMAL_ACK 0 /* Normal Ack */ ++#define QOS_ACK_NO_ACK 1 /* No Ack (eg mcast) */ ++#define QOS_ACK_NO_EXP_ACK 2 /* No Explicit Ack */ ++#define QOS_ACK_BLOCK_ACK 3 /* Block Ack */ ++#define QOS_ACK_SHIFT 5 /* QoS ACK shift */ ++#define QOS_ACK_MASK 0x0060 /* QoS ACK mask */ ++#define QOS_ACK(qos) (((qos) & QOS_ACK_MASK) >> QOS_ACK_SHIFT) /* QoS ACK */ ++ ++/* A-MSDU flag */ ++#define QOS_AMSDU_SHIFT 7 /* AMSDU shift */ ++#define QOS_AMSDU_MASK 0x0080 /* AMSDU mask */ ++ ++/* Management Frames */ ++ ++/* Management Frame Constants */ ++ ++/* Fixed fields */ ++#define DOT11_MNG_AUTH_ALGO_LEN 2 /* d11 management auth. algo. length */ ++#define DOT11_MNG_AUTH_SEQ_LEN 2 /* d11 management auth. seq. length */ ++#define DOT11_MNG_BEACON_INT_LEN 2 /* d11 management beacon interval length */ ++#define DOT11_MNG_CAP_LEN 2 /* d11 management cap. length */ ++#define DOT11_MNG_AP_ADDR_LEN 6 /* d11 management AP address length */ ++#define DOT11_MNG_LISTEN_INT_LEN 2 /* d11 management listen interval length */ ++#define DOT11_MNG_REASON_LEN 2 /* d11 management reason length */ ++#define DOT11_MNG_AID_LEN 2 /* d11 management AID length */ ++#define DOT11_MNG_STATUS_LEN 2 /* d11 management status length */ ++#define DOT11_MNG_TIMESTAMP_LEN 8 /* d11 management timestamp length */ ++ ++/* DUR/ID field in assoc resp is 0xc000 | AID */ ++#define DOT11_AID_MASK 0x3fff /* d11 AID mask */ ++ ++/* Reason Codes */ ++#define DOT11_RC_RESERVED 0 /* d11 RC reserved */ ++#define DOT11_RC_UNSPECIFIED 1 /* Unspecified reason */ ++#define DOT11_RC_AUTH_INVAL 2 /* Previous authentication no longer valid */ ++#define DOT11_RC_DEAUTH_LEAVING 3 /* Deauthenticated because sending station ++ * is leaving (or has left) IBSS or ESS ++ */ ++#define DOT11_RC_INACTIVITY 4 /* Disassociated due to inactivity */ ++#define DOT11_RC_BUSY 5 /* Disassociated because AP is unable to handle ++ * all currently associated stations ++ */ ++#define DOT11_RC_INVAL_CLASS_2 6 /* Class 2 frame received from ++ * nonauthenticated station ++ */ ++#define DOT11_RC_INVAL_CLASS_3 7 /* Class 3 frame received from ++ * nonassociated station ++ */ ++#define DOT11_RC_DISASSOC_LEAVING 8 /* Disassociated because sending station is ++ * leaving (or has left) BSS ++ */ ++#define DOT11_RC_NOT_AUTH 9 /* Station requesting (re)association is not ++ * authenticated with responding station ++ */ ++#define DOT11_RC_BAD_PC 10 /* Unacceptable power capability element */ ++#define DOT11_RC_BAD_CHANNELS 11 /* Unacceptable supported channels element */ ++/* 12 is unused */ ++ ++/* 32-39 are QSTA specific reasons added in 11e */ ++#define DOT11_RC_UNSPECIFIED_QOS 32 /* unspecified QoS-related reason */ ++#define DOT11_RC_INSUFFCIENT_BW 33 /* QAP lacks sufficient bandwidth */ ++#define DOT11_RC_EXCESSIVE_FRAMES 34 /* excessive number of frames need ack */ ++#define DOT11_RC_TX_OUTSIDE_TXOP 35 /* transmitting outside the limits of txop */ ++#define DOT11_RC_LEAVING_QBSS 36 /* QSTA is leaving the QBSS (or restting) */ ++#define DOT11_RC_BAD_MECHANISM 37 /* does not want to use the mechanism */ ++#define DOT11_RC_SETUP_NEEDED 38 /* mechanism needs a setup */ ++#define DOT11_RC_TIMEOUT 39 /* timeout */ ++ ++#define DOT11_RC_MAX 23 /* Reason codes > 23 are reserved */ ++ ++#define DOT11_RC_TDLS_PEER_UNREACH 25 ++#define DOT11_RC_TDLS_DOWN_UNSPECIFIED 26 ++ ++/* Status Codes */ ++#define DOT11_SC_SUCCESS 0 /* Successful */ ++#define DOT11_SC_FAILURE 1 /* Unspecified failure */ ++#define DOT11_SC_TDLS_WAKEUP_SCH_ALT 2 /* TDLS wakeup schedule rejected but alternative */ ++ /* schedule provided */ ++#define DOT11_SC_TDLS_WAKEUP_SCH_REJ 3 /* TDLS wakeup schedule rejected */ ++#define DOT11_SC_TDLS_SEC_DISABLED 5 /* TDLS Security disabled */ ++#define DOT11_SC_LIFETIME_REJ 6 /* Unacceptable lifetime */ ++#define DOT11_SC_NOT_SAME_BSS 7 /* Not in same BSS */ ++#define DOT11_SC_CAP_MISMATCH 10 /* Cannot support all requested ++ * capabilities in the Capability ++ * Information field ++ */ ++#define DOT11_SC_REASSOC_FAIL 11 /* Reassociation denied due to inability ++ * to confirm that association exists ++ */ ++#define DOT11_SC_ASSOC_FAIL 12 /* Association denied due to reason ++ * outside the scope of this standard ++ */ ++#define DOT11_SC_AUTH_MISMATCH 13 /* Responding station does not support ++ * the specified authentication ++ * algorithm ++ */ ++#define DOT11_SC_AUTH_SEQ 14 /* Received an Authentication frame ++ * with authentication transaction ++ * sequence number out of expected ++ * sequence ++ */ ++#define DOT11_SC_AUTH_CHALLENGE_FAIL 15 /* Authentication rejected because of ++ * challenge failure ++ */ ++#define DOT11_SC_AUTH_TIMEOUT 16 /* Authentication rejected due to timeout ++ * waiting for next frame in sequence ++ */ ++#define DOT11_SC_ASSOC_BUSY_FAIL 17 /* Association denied because AP is ++ * unable to handle additional ++ * associated stations ++ */ ++#define DOT11_SC_ASSOC_RATE_MISMATCH 18 /* Association denied due to requesting ++ * station not supporting all of the ++ * data rates in the BSSBasicRateSet ++ * parameter ++ */ ++#define DOT11_SC_ASSOC_SHORT_REQUIRED 19 /* Association denied due to requesting ++ * station not supporting the Short ++ * Preamble option ++ */ ++#define DOT11_SC_ASSOC_PBCC_REQUIRED 20 /* Association denied due to requesting ++ * station not supporting the PBCC ++ * Modulation option ++ */ ++#define DOT11_SC_ASSOC_AGILITY_REQUIRED 21 /* Association denied due to requesting ++ * station not supporting the Channel ++ * Agility option ++ */ ++#define DOT11_SC_ASSOC_SPECTRUM_REQUIRED 22 /* Association denied because Spectrum ++ * Management capability is required. ++ */ ++#define DOT11_SC_ASSOC_BAD_POWER_CAP 23 /* Association denied because the info ++ * in the Power Cap element is ++ * unacceptable. ++ */ ++#define DOT11_SC_ASSOC_BAD_SUP_CHANNELS 24 /* Association denied because the info ++ * in the Supported Channel element is ++ * unacceptable ++ */ ++#define DOT11_SC_ASSOC_SHORTSLOT_REQUIRED 25 /* Association denied due to requesting ++ * station not supporting the Short Slot ++ * Time option ++ */ ++#define DOT11_SC_ASSOC_ERPBCC_REQUIRED 26 /* Association denied due to requesting ++ * station not supporting the ER-PBCC ++ * Modulation option ++ */ ++#define DOT11_SC_ASSOC_DSSOFDM_REQUIRED 27 /* Association denied due to requesting ++ * station not supporting the DSS-OFDM ++ * option ++ */ ++#define DOT11_SC_ASSOC_R0KH_UNREACHABLE 28 /* Association denied due to AP ++ * being unable to reach the R0 Key Holder ++ */ ++#define DOT11_SC_ASSOC_TRY_LATER 30 /* Association denied temporarily, try again later ++ */ ++#define DOT11_SC_ASSOC_MFP_VIOLATION 31 /* Association denied due to Robust Management ++ * frame policy violation ++ */ ++ ++#define DOT11_SC_DECLINED 37 /* request declined */ ++#define DOT11_SC_INVALID_PARAMS 38 /* One or more params have invalid values */ ++#define DOT11_SC_INVALID_PAIRWISE_CIPHER 42 /* invalid pairwise cipher */ ++#define DOT11_SC_INVALID_AKMP 43 /* Association denied due to invalid AKMP */ ++#define DOT11_SC_INVALID_RSNIE_CAP 45 /* invalid RSN IE capabilities */ ++#define DOT11_SC_DLS_NOT_ALLOWED 48 /* DLS is not allowed in the BSS by policy */ ++#define DOT11_SC_INVALID_PMKID 53 /* Association denied due to invalid PMKID */ ++#define DOT11_SC_INVALID_MDID 54 /* Association denied due to invalid MDID */ ++#define DOT11_SC_INVALID_FTIE 55 /* Association denied due to invalid FTIE */ ++ ++#define DOT11_SC_UNEXP_MSG 70 /* Unexpected message */ ++#define DOT11_SC_INVALID_SNONCE 71 /* Invalid SNonce */ ++#define DOT11_SC_INVALID_RSNIE 72 /* Invalid contents of RSNIE */ ++ ++/* Info Elts, length of INFORMATION portion of Info Elts */ ++#define DOT11_MNG_DS_PARAM_LEN 1 /* d11 management DS parameter length */ ++#define DOT11_MNG_IBSS_PARAM_LEN 2 /* d11 management IBSS parameter length */ ++ ++/* TIM Info element has 3 bytes fixed info in INFORMATION field, ++ * followed by 1 to 251 bytes of Partial Virtual Bitmap ++ */ ++#define DOT11_MNG_TIM_FIXED_LEN 3 /* d11 management TIM fixed length */ ++#define DOT11_MNG_TIM_DTIM_COUNT 0 /* d11 management DTIM count */ ++#define DOT11_MNG_TIM_DTIM_PERIOD 1 /* d11 management DTIM period */ ++#define DOT11_MNG_TIM_BITMAP_CTL 2 /* d11 management TIM BITMAP control */ ++#define DOT11_MNG_TIM_PVB 3 /* d11 management TIM PVB */ ++ ++/* TLV defines */ ++#define TLV_TAG_OFF 0 /* tag offset */ ++#define TLV_LEN_OFF 1 /* length offset */ ++#define TLV_HDR_LEN 2 /* header length */ ++#define TLV_BODY_OFF 2 /* body offset */ ++ ++/* Management Frame Information Element IDs */ ++#define DOT11_MNG_SSID_ID 0 /* d11 management SSID id */ ++#define DOT11_MNG_RATES_ID 1 /* d11 management rates id */ ++#define DOT11_MNG_FH_PARMS_ID 2 /* d11 management FH parameter id */ ++#define DOT11_MNG_DS_PARMS_ID 3 /* d11 management DS parameter id */ ++#define DOT11_MNG_CF_PARMS_ID 4 /* d11 management CF parameter id */ ++#define DOT11_MNG_TIM_ID 5 /* d11 management TIM id */ ++#define DOT11_MNG_IBSS_PARMS_ID 6 /* d11 management IBSS parameter id */ ++#define DOT11_MNG_COUNTRY_ID 7 /* d11 management country id */ ++#define DOT11_MNG_HOPPING_PARMS_ID 8 /* d11 management hopping parameter id */ ++#define DOT11_MNG_HOPPING_TABLE_ID 9 /* d11 management hopping table id */ ++#define DOT11_MNG_REQUEST_ID 10 /* d11 management request id */ ++#define DOT11_MNG_QBSS_LOAD_ID 11 /* d11 management QBSS Load id */ ++#define DOT11_MNG_EDCA_PARAM_ID 12 /* 11E EDCA Parameter id */ ++#define DOT11_MNG_CHALLENGE_ID 16 /* d11 management chanllenge id */ ++#define DOT11_MNG_PWR_CONSTRAINT_ID 32 /* 11H PowerConstraint */ ++#define DOT11_MNG_PWR_CAP_ID 33 /* 11H PowerCapability */ ++#define DOT11_MNG_TPC_REQUEST_ID 34 /* 11H TPC Request */ ++#define DOT11_MNG_TPC_REPORT_ID 35 /* 11H TPC Report */ ++#define DOT11_MNG_SUPP_CHANNELS_ID 36 /* 11H Supported Channels */ ++#define DOT11_MNG_CHANNEL_SWITCH_ID 37 /* 11H ChannelSwitch Announcement */ ++#define DOT11_MNG_MEASURE_REQUEST_ID 38 /* 11H MeasurementRequest */ ++#define DOT11_MNG_MEASURE_REPORT_ID 39 /* 11H MeasurementReport */ ++#define DOT11_MNG_QUIET_ID 40 /* 11H Quiet */ ++#define DOT11_MNG_IBSS_DFS_ID 41 /* 11H IBSS_DFS */ ++#define DOT11_MNG_ERP_ID 42 /* d11 management ERP id */ ++#define DOT11_MNG_TS_DELAY_ID 43 /* d11 management TS Delay id */ ++#define DOT11_MNG_HT_CAP 45 /* d11 mgmt HT cap id */ ++#define DOT11_MNG_QOS_CAP_ID 46 /* 11E QoS Capability id */ ++#define DOT11_MNG_NONERP_ID 47 /* d11 management NON-ERP id */ ++#define DOT11_MNG_RSN_ID 48 /* d11 management RSN id */ ++#define DOT11_MNG_EXT_RATES_ID 50 /* d11 management ext. rates id */ ++#define DOT11_MNG_AP_CHREP_ID 51 /* 11k AP Channel report id */ ++#define DOT11_MNG_NBR_REP_ID 52 /* 11k Neighbor report id */ ++#define DOT11_MNG_MDIE_ID 54 /* 11r Mobility domain id */ ++#define DOT11_MNG_FTIE_ID 55 /* 11r Fast Bss Transition id */ ++#define DOT11_MNG_FT_TI_ID 56 /* 11r Timeout Interval id */ ++#define DOT11_MNG_REGCLASS_ID 59 /* d11 management regulatory class id */ ++#define DOT11_MNG_EXT_CSA_ID 60 /* d11 Extended CSA */ ++#define DOT11_MNG_HT_ADD 61 /* d11 mgmt additional HT info */ ++#define DOT11_MNG_EXT_CHANNEL_OFFSET 62 /* d11 mgmt ext channel offset */ ++#define DOT11_MNG_WAPI_ID 68 /* d11 management WAPI id */ ++#define DOT11_MNG_TIME_ADVERTISE_ID 69 /* 11p time advertisement */ ++#define DOT11_MNG_RRM_CAP_ID 70 /* 11k radio measurement capability */ ++#define DOT11_MNG_HT_BSS_COEXINFO_ID 72 /* d11 mgmt OBSS Coexistence INFO */ ++#define DOT11_MNG_HT_BSS_CHANNEL_REPORT_ID 73 /* d11 mgmt OBSS Intolerant Channel list */ ++#define DOT11_MNG_HT_OBSS_ID 74 /* d11 mgmt OBSS HT info */ ++#define DOT11_MNG_CHANNEL_USAGE 97 /* 11v channel usage */ ++#define DOT11_MNG_TIME_ZONE_ID 98 /* 11v time zone */ ++#define DOT11_MNG_LINK_IDENTIFIER_ID 101 /* 11z TDLS Link Identifier IE */ ++#define DOT11_MNG_WAKEUP_SCHEDULE_ID 102 /* 11z TDLS Wakeup Schedule IE */ ++#define DOT11_MNG_CHANNEL_SWITCH_TIMING_ID 104 /* 11z TDLS Channel Switch Timing IE */ ++#define DOT11_MNG_PTI_CONTROL_ID 105 /* 11z TDLS PTI Control IE */ ++#define DOT11_MNG_PU_BUFFER_STATUS_ID 106 /* 11z TDLS PU Buffer Status IE */ ++#define DOT11_MNG_INTERWORKING_ID 107 /* 11u interworking */ ++#define DOT11_MNG_ADVERTISEMENT_ID 108 /* 11u advertisement protocol */ ++#define DOT11_MNG_EXP_BW_REQ_ID 109 /* 11u expedited bandwith request */ ++#define DOT11_MNG_QOS_MAP_ID 110 /* 11u QoS map set */ ++#define DOT11_MNG_ROAM_CONSORT_ID 111 /* 11u roaming consortium */ ++#define DOT11_MNG_EMERGCY_ALERT_ID 112 /* 11u emergency alert identifier */ ++#define DOT11_MNG_EXT_CAP_ID 127 /* d11 mgmt ext capability */ ++#define DOT11_MNG_VHT_CAP_ID 191 /* d11 mgmt VHT cap id */ ++#define DOT11_MNG_VHT_OPERATION_ID 192 /* d11 mgmt VHT op id */ ++ ++#define DOT11_MNG_WPA_ID 221 /* d11 management WPA id */ ++#define DOT11_MNG_PROPR_ID 221 /* d11 management proprietary id */ ++/* should start using this one instead of above two */ ++#define DOT11_MNG_VS_ID 221 /* d11 management Vendor Specific IE */ ++ ++/* Rate element Basic flag and rate mask */ ++#define DOT11_RATE_BASIC 0x80 /* flag for a Basic Rate */ ++#define DOT11_RATE_MASK 0x7F /* mask for numeric part of rate */ ++ ++/* ERP info element bit values */ ++#define DOT11_MNG_ERP_LEN 1 /* ERP is currently 1 byte long */ ++#define DOT11_MNG_NONERP_PRESENT 0x01 /* NonERP (802.11b) STAs are present ++ *in the BSS ++ */ ++#define DOT11_MNG_USE_PROTECTION 0x02 /* Use protection mechanisms for ++ *ERP-OFDM frames ++ */ ++#define DOT11_MNG_BARKER_PREAMBLE 0x04 /* Short Preambles: 0 == allowed, ++ * 1 == not allowed ++ */ ++/* TS Delay element offset & size */ ++#define DOT11_MGN_TS_DELAY_LEN 4 /* length of TS DELAY IE */ ++#define TS_DELAY_FIELD_SIZE 4 /* TS DELAY field size */ ++ ++/* Capability Information Field */ ++#define DOT11_CAP_ESS 0x0001 /* d11 cap. ESS */ ++#define DOT11_CAP_IBSS 0x0002 /* d11 cap. IBSS */ ++#define DOT11_CAP_POLLABLE 0x0004 /* d11 cap. pollable */ ++#define DOT11_CAP_POLL_RQ 0x0008 /* d11 cap. poll request */ ++#define DOT11_CAP_PRIVACY 0x0010 /* d11 cap. privacy */ ++#define DOT11_CAP_SHORT 0x0020 /* d11 cap. short */ ++#define DOT11_CAP_PBCC 0x0040 /* d11 cap. PBCC */ ++#define DOT11_CAP_AGILITY 0x0080 /* d11 cap. agility */ ++#define DOT11_CAP_SPECTRUM 0x0100 /* d11 cap. spectrum */ ++#define DOT11_CAP_SHORTSLOT 0x0400 /* d11 cap. shortslot */ ++#define DOT11_CAP_RRM 0x1000 /* d11 cap. 11k radio measurement */ ++#define DOT11_CAP_CCK_OFDM 0x2000 /* d11 cap. CCK/OFDM */ ++ ++/* Extended capabilities IE bitfields */ ++/* 20/40 BSS Coexistence Management support bit position */ ++#define DOT11_EXT_CAP_OBSS_COEX_MGMT 0 ++/* scheduled PSMP support bit position */ ++#define DOT11_EXT_CAP_SPSMP 6 ++/* BSS Transition Management support bit position */ ++#define DOT11_EXT_CAP_BSS_TRANSITION_MGMT 19 ++/* Interworking support bit position */ ++#define DOT11_EXT_CAP_IW 31 ++/* service Interval granularity bit position and mask */ ++#define DOT11_EXT_CAP_SI 41 ++#define DOT11_EXT_CAP_SI_MASK 0x0E ++ ++/* ++ * Action Frame Constants ++ */ ++#define DOT11_ACTION_HDR_LEN 2 /* action frame category + action field */ ++#define DOT11_ACTION_CAT_OFF 0 /* category offset */ ++#define DOT11_ACTION_ACT_OFF 1 /* action offset */ ++ ++/* Action Category field (sec 7.3.1.11) */ ++#define DOT11_ACTION_CAT_ERR_MASK 0x80 /* category error mask */ ++#define DOT11_ACTION_CAT_MASK 0x7F /* category mask */ ++#define DOT11_ACTION_CAT_SPECT_MNG 0 /* category spectrum management */ ++#define DOT11_ACTION_CAT_QOS 1 /* category QoS */ ++#define DOT11_ACTION_CAT_DLS 2 /* category DLS */ ++#define DOT11_ACTION_CAT_BLOCKACK 3 /* category block ack */ ++#define DOT11_ACTION_CAT_PUBLIC 4 /* category public */ ++#define DOT11_ACTION_CAT_RRM 5 /* category radio measurements */ ++#define DOT11_ACTION_CAT_FBT 6 /* category fast bss transition */ ++#define DOT11_ACTION_CAT_HT 7 /* category for HT */ ++#define DOT11_ACTION_CAT_SA_QUERY 8 /* security association query */ ++#define DOT11_ACTION_CAT_PDPA 9 /* protected dual of public action */ ++#define DOT11_ACTION_CAT_BSSMGMT 10 /* category for BSS transition management */ ++#define DOT11_ACTION_NOTIFICATION 17 ++#define DOT11_ACTION_CAT_VSP 126 /* protected vendor specific */ ++#define DOT11_ACTION_CAT_VS 127 /* category Vendor Specific */ ++ ++/* Spectrum Management Action IDs (sec 7.4.1) */ ++#define DOT11_SM_ACTION_M_REQ 0 /* d11 action measurement request */ ++#define DOT11_SM_ACTION_M_REP 1 /* d11 action measurement response */ ++#define DOT11_SM_ACTION_TPC_REQ 2 /* d11 action TPC request */ ++#define DOT11_SM_ACTION_TPC_REP 3 /* d11 action TPC response */ ++#define DOT11_SM_ACTION_CHANNEL_SWITCH 4 /* d11 action channel switch */ ++#define DOT11_SM_ACTION_EXT_CSA 5 /* d11 extened CSA for 11n */ ++ ++/* HT action ids */ ++#define DOT11_ACTION_ID_HT_CH_WIDTH 0 /* notify channel width action id */ ++#define DOT11_ACTION_ID_HT_MIMO_PS 1 /* mimo ps action id */ ++ ++/* Public action ids */ ++#define DOT11_PUB_ACTION_BSS_COEX_MNG 0 /* 20/40 Coexistence Management action id */ ++#define DOT11_PUB_ACTION_CHANNEL_SWITCH 4 /* d11 action channel switch */ ++ ++/* Block Ack action types */ ++#define DOT11_BA_ACTION_ADDBA_REQ 0 /* ADDBA Req action frame type */ ++#define DOT11_BA_ACTION_ADDBA_RESP 1 /* ADDBA Resp action frame type */ ++#define DOT11_BA_ACTION_DELBA 2 /* DELBA action frame type */ ++ ++/* ADDBA action parameters */ ++#define DOT11_ADDBA_PARAM_AMSDU_SUP 0x0001 /* AMSDU supported under BA */ ++#define DOT11_ADDBA_PARAM_POLICY_MASK 0x0002 /* policy mask(ack vs delayed) */ ++#define DOT11_ADDBA_PARAM_POLICY_SHIFT 1 /* policy shift */ ++#define DOT11_ADDBA_PARAM_TID_MASK 0x003c /* tid mask */ ++#define DOT11_ADDBA_PARAM_TID_SHIFT 2 /* tid shift */ ++#define DOT11_ADDBA_PARAM_BSIZE_MASK 0xffc0 /* buffer size mask */ ++#define DOT11_ADDBA_PARAM_BSIZE_SHIFT 6 /* buffer size shift */ ++ ++#define DOT11_ADDBA_POLICY_DELAYED 0 /* delayed BA policy */ ++#define DOT11_ADDBA_POLICY_IMMEDIATE 1 /* immediate BA policy */ ++ ++/* Fast Transition action types */ ++#define DOT11_FT_ACTION_FT_RESERVED 0 ++#define DOT11_FT_ACTION_FT_REQ 1 /* FBT request - for over-the-DS FBT */ ++#define DOT11_FT_ACTION_FT_RES 2 /* FBT response - for over-the-DS FBT */ ++#define DOT11_FT_ACTION_FT_CON 3 /* FBT confirm - for OTDS with RRP */ ++#define DOT11_FT_ACTION_FT_ACK 4 /* FBT ack */ ++ ++/* DLS action types */ ++#define DOT11_DLS_ACTION_REQ 0 /* DLS Request */ ++#define DOT11_DLS_ACTION_RESP 1 /* DLS Response */ ++#define DOT11_DLS_ACTION_TD 2 /* DLS Teardown */ ++ ++/* Wireless Network Management (WNM) action types */ ++#define DOT11_WNM_ACTION_EVENT_REQ 0 ++#define DOT11_WNM_ACTION_EVENT_REP 1 ++#define DOT11_WNM_ACTION_DIAG_REQ 2 ++#define DOT11_WNM_ACTION_DIAG_REP 3 ++#define DOT11_WNM_ACTION_LOC_CFG_REQ 4 ++#define DOT11_WNM_ACTION_LOC_RFG_RESP 5 ++#define DOT11_WNM_ACTION_BSS_TRANS_QURY 6 ++#define DOT11_WNM_ACTION_BSS_TRANS_REQ 7 ++#define DOT11_WNM_ACTION_BSS_TRANS_RESP 8 ++#define DOT11_WNM_ACTION_FMS_REQ 9 ++#define DOT11_WNM_ACTION_FMS_RESP 10 ++#define DOT11_WNM_ACTION_COL_INTRFRNCE_REQ 11 ++#define DOT11_WNM_ACTION_COL_INTRFRNCE_REP 12 ++#define DOT11_WNM_ACTION_TFS_REQ 13 ++#define DOT11_WNM_ACTION_TFS_RESP 14 ++#define DOT11_WNM_ACTION_TFS_NOTIFY 15 ++#define DOT11_WNM_ACTION_WNM_SLEEP_REQ 16 ++#define DOT11_WNM_ACTION_WNM_SLEEP_RESP 17 ++#define DOT11_WNM_ACTION_TIM_BCAST_REQ 18 ++#define DOT11_WNM_ACTION_TIM_BCAST_RESP 19 ++#define DOT11_WNM_ACTION_QOS_TRFC_CAP_UPD 20 ++#define DOT11_WNM_ACTION_CHAN_USAGE_REQ 21 ++#define DOT11_WNM_ACTION_CHAN_USAGE_RESP 22 ++#define DOT11_WNM_ACTION_DMS_REQ 23 ++#define DOT11_WNM_ACTION_DMS_RESP 24 ++#define DOT11_WNM_ACTION_TMNG_MEASUR_REQ 25 ++#define DOT11_WNM_ACTION_NOTFCTN_REQ 26 ++#define DOT11_WNM_ACTION_NOTFCTN_RES 27 ++ ++#define DOT11_MNG_COUNTRY_ID_LEN 3 ++ ++/* DLS Request frame header */ ++BWL_PRE_PACKED_STRUCT struct dot11_dls_req { ++ uint8 category; /* category of action frame (2) */ ++ uint8 action; /* DLS action: req (0) */ ++ struct ether_addr da; /* destination address */ ++ struct ether_addr sa; /* source address */ ++ uint16 cap; /* capability */ ++ uint16 timeout; /* timeout value */ ++ uint8 data[1]; /* IE:support rate, extend support rate, HT cap */ ++} BWL_POST_PACKED_STRUCT; ++typedef struct dot11_dls_req dot11_dls_req_t; ++#define DOT11_DLS_REQ_LEN 18 /* Fixed length */ ++ ++/* DLS response frame header */ ++BWL_PRE_PACKED_STRUCT struct dot11_dls_resp { ++ uint8 category; /* category of action frame (2) */ ++ uint8 action; /* DLS action: req (0) */ ++ uint16 status; /* status code field */ ++ struct ether_addr da; /* destination address */ ++ struct ether_addr sa; /* source address */ ++ uint8 data[1]; /* optional: capability, rate ... */ ++} BWL_POST_PACKED_STRUCT; ++typedef struct dot11_dls_resp dot11_dls_resp_t; ++#define DOT11_DLS_RESP_LEN 16 /* Fixed length */ ++ ++ ++/* BSS Management Transition Query frame header */ ++BWL_PRE_PACKED_STRUCT struct dot11_bss_trans_query { ++ uint8 category; /* category of action frame (10) */ ++ uint8 action; /* WNM action: trans_query (6) */ ++ uint8 token; /* dialog token */ ++ uint8 reason; /* transition query reason */ ++ uint8 data[1]; /* Elements */ ++} BWL_POST_PACKED_STRUCT; ++typedef struct dot11_bss_trans_query dot11_bss_trans_query_t; ++#define DOT11_BSS_TRANS_QUERY_LEN 4 /* Fixed length */ ++ ++/* BSS Management Transition Request frame header */ ++BWL_PRE_PACKED_STRUCT struct dot11_bss_trans_req { ++ uint8 category; /* category of action frame (10) */ ++ uint8 action; /* WNM action: trans_req (7) */ ++ uint8 token; /* dialog token */ ++ uint8 reqmode; /* transition request mode */ ++ uint16 disassoc_tmr; /* disassociation timer */ ++ uint8 validity_intrvl; /* validity interval */ ++ uint8 data[1]; /* optional: BSS term duration, ... */ ++ /* ...session info URL, list */ ++} BWL_POST_PACKED_STRUCT; ++typedef struct dot11_bss_trans_req dot11_bss_trans_req_t; ++#define DOT11_BSS_TRANS_REQ_LEN 7 /* Fixed length */ ++ ++#define DOT11_BSS_TERM_DUR_LEN 12 /* Fixed length if present */ ++ ++ ++/* BSS Mgmt Transition Request Mode Field - 802.11v */ ++#define DOT11_BSS_TRNS_REQMODE_PREF_LIST_INCL 0x01 ++#define DOT11_BSS_TRNS_REQMODE_ABRIDGED 0x02 ++#define DOT11_BSS_TRNS_REQMODE_DISASSOC_IMMINENT 0x04 ++#define DOT11_BSS_TRNS_REQMODE_BSS_TERM_INCL 0x08 ++#define DOT11_BSS_TRNS_REQMODE_ESS_DISASSOC_IMNT 0x10 ++ ++ ++/* BSS Management transition response frame header */ ++BWL_PRE_PACKED_STRUCT struct dot11_bss_trans_res { ++ uint8 category; /* category of action frame (10) */ ++ uint8 action; /* WNM action: trans_res (8) */ ++ uint8 token; /* dialog token */ ++ uint8 status; /* transition status */ ++ uint8 term_delay; /* validity interval */ ++ uint8 data[1]; /* optional: BSS term duration, ... */ ++ /* ...session info URL, list */ ++} BWL_POST_PACKED_STRUCT; ++typedef struct dot11_bss_trans_res dot11_bss_trans_res_t; ++#define DOT11_BSS_TRANS_RES_LEN 5 /* Fixed length */ ++ ++/* BSS Mgmt Transition Response Status Field */ ++#define DOT11_BSS_TRNS_RES_STATUS_ACCEPT 0 ++#define DOT11_BSS_TRNS_RES_STATUS_REJECT 1 ++#define DOT11_BSS_TRNS_RES_STATUS_REJ_INSUFF_BCN 2 ++#define DOT11_BSS_TRNS_RES_STATUS_REJ_INSUFF_CAP 3 ++#define DOT11_BSS_TRNS_RES_STATUS_REJ_TERM_UNDESIRED 4 ++#define DOT11_BSS_TRNS_RES_STATUS_REJ_TERM_DELAY_REQ 5 ++#define DOT11_BSS_TRNS_RES_STATUS_REJ_BSS_LIST_PROVIDED 6 ++#define DOT11_BSS_TRNS_RES_STATUS_REJ_NO_SUITABLE_BSS 7 ++#define DOT11_BSS_TRNS_RES_STATUS_REJ_LEAVING_ESS 8 ++ ++ ++/* Neighbor Report BSSID Information Field */ ++#define DOT11_NBR_RPRT_BSSID_INFO_REACHABILTY 0x0003 ++#define DOT11_NBR_RPRT_BSSID_INFO_SEC 0x0004 ++#define DOT11_NBR_RPRT_BSSID_INFO_KEY_SCOPE 0x0008 ++#define DOT11_NBR_RPRT_BSSID_INFO_CAP 0x03f0 ++ ++#define DOT11_NBR_RPRT_BSSID_INFO_CAP_SPEC_MGMT 0x0010 ++#define DOT11_NBR_RPRT_BSSID_INFO_CAP_QOS 0x0020 ++#define DOT11_NBR_RPRT_BSSID_INFO_CAP_APSD 0x0040 ++#define DOT11_NBR_RPRT_BSSID_INFO_CAP_RDIO_MSMT 0x0080 ++#define DOT11_NBR_RPRT_BSSID_INFO_CAP_DEL_BA 0x0100 ++#define DOT11_NBR_RPRT_BSSID_INFO_CAP_IMM_BA 0x0200 ++ ++/* Neighbor Report Subelements */ ++#define DOT11_NBR_RPRT_SUBELEM_BSS_CANDDT_PREF_ID 3 ++ ++ ++BWL_PRE_PACKED_STRUCT struct dot11_addba_req { ++ uint8 category; /* category of action frame (3) */ ++ uint8 action; /* action: addba req */ ++ uint8 token; /* identifier */ ++ uint16 addba_param_set; /* parameter set */ ++ uint16 timeout; /* timeout in seconds */ ++ uint16 start_seqnum; /* starting sequence number */ ++} BWL_POST_PACKED_STRUCT; ++typedef struct dot11_addba_req dot11_addba_req_t; ++#define DOT11_ADDBA_REQ_LEN 9 /* length of addba req frame */ ++ ++BWL_PRE_PACKED_STRUCT struct dot11_addba_resp { ++ uint8 category; /* category of action frame (3) */ ++ uint8 action; /* action: addba resp */ ++ uint8 token; /* identifier */ ++ uint16 status; /* status of add request */ ++ uint16 addba_param_set; /* negotiated parameter set */ ++ uint16 timeout; /* negotiated timeout in seconds */ ++} BWL_POST_PACKED_STRUCT; ++typedef struct dot11_addba_resp dot11_addba_resp_t; ++#define DOT11_ADDBA_RESP_LEN 9 /* length of addba resp frame */ ++ ++/* DELBA action parameters */ ++#define DOT11_DELBA_PARAM_INIT_MASK 0x0800 /* initiator mask */ ++#define DOT11_DELBA_PARAM_INIT_SHIFT 11 /* initiator shift */ ++#define DOT11_DELBA_PARAM_TID_MASK 0xf000 /* tid mask */ ++#define DOT11_DELBA_PARAM_TID_SHIFT 12 /* tid shift */ ++ ++BWL_PRE_PACKED_STRUCT struct dot11_delba { ++ uint8 category; /* category of action frame (3) */ ++ uint8 action; /* action: addba req */ ++ uint16 delba_param_set; /* paarmeter set */ ++ uint16 reason; /* reason for dellba */ ++} BWL_POST_PACKED_STRUCT; ++typedef struct dot11_delba dot11_delba_t; ++#define DOT11_DELBA_LEN 6 /* length of delba frame */ ++ ++/* SA Query action field value */ ++#define SA_QUERY_REQUEST 0 ++#define SA_QUERY_RESPONSE 1 ++ ++/* ************* 802.11r related definitions. ************* */ ++ ++/* Over-the-DS Fast Transition Request frame header */ ++BWL_PRE_PACKED_STRUCT struct dot11_ft_req { ++ uint8 category; /* category of action frame (6) */ ++ uint8 action; /* action: ft req */ ++ uint8 sta_addr[ETHER_ADDR_LEN]; ++ uint8 tgt_ap_addr[ETHER_ADDR_LEN]; ++ uint8 data[1]; /* Elements */ ++} BWL_POST_PACKED_STRUCT; ++typedef struct dot11_ft_req dot11_ft_req_t; ++#define DOT11_FT_REQ_FIXED_LEN 14 ++ ++/* Over-the-DS Fast Transition Response frame header */ ++BWL_PRE_PACKED_STRUCT struct dot11_ft_res { ++ uint8 category; /* category of action frame (6) */ ++ uint8 action; /* action: ft resp */ ++ uint8 sta_addr[ETHER_ADDR_LEN]; ++ uint8 tgt_ap_addr[ETHER_ADDR_LEN]; ++ uint16 status; /* status code */ ++ uint8 data[1]; /* Elements */ ++} BWL_POST_PACKED_STRUCT; ++typedef struct dot11_ft_res dot11_ft_res_t; ++#define DOT11_FT_RES_FIXED_LEN 16 ++ ++ ++/* ************* 802.11k related definitions. ************* */ ++ ++/* Radio measurements enabled capability ie */ ++ ++#define DOT11_RRM_CAP_LEN 5 /* length of rrm cap bitmap */ ++BWL_PRE_PACKED_STRUCT struct dot11_rrm_cap_ie { ++ uint8 cap[DOT11_RRM_CAP_LEN]; ++} BWL_POST_PACKED_STRUCT; ++typedef struct dot11_rrm_cap_ie dot11_rrm_cap_ie_t; ++ ++/* Bitmap definitions for cap ie */ ++#define DOT11_RRM_CAP_LINK 0 ++#define DOT11_RRM_CAP_NEIGHBOR_REPORT 1 ++#define DOT11_RRM_CAP_PARALLEL 2 ++#define DOT11_RRM_CAP_REPEATED 3 ++#define DOT11_RRM_CAP_BCN_PASSIVE 4 ++#define DOT11_RRM_CAP_BCN_ACTIVE 5 ++#define DOT11_RRM_CAP_BCN_TABLE 6 ++#define DOT11_RRM_CAP_BCN_REP_COND 7 ++#define DOT11_RRM_CAP_AP_CHANREP 16 ++ ++ ++/* Operating Class (formerly "Regulatory Class") definitions */ ++#define DOT11_OP_CLASS_NONE 255 ++ ++ ++/* Radio Measurements action ids */ ++#define DOT11_RM_ACTION_RM_REQ 0 /* Radio measurement request */ ++#define DOT11_RM_ACTION_RM_REP 1 /* Radio measurement report */ ++#define DOT11_RM_ACTION_LM_REQ 2 /* Link measurement request */ ++#define DOT11_RM_ACTION_LM_REP 3 /* Link measurement report */ ++#define DOT11_RM_ACTION_NR_REQ 4 /* Neighbor report request */ ++#define DOT11_RM_ACTION_NR_REP 5 /* Neighbor report response */ ++ ++/* Generic radio measurement action frame header */ ++BWL_PRE_PACKED_STRUCT struct dot11_rm_action { ++ uint8 category; /* category of action frame (5) */ ++ uint8 action; /* radio measurement action */ ++ uint8 token; /* dialog token */ ++ uint8 data[1]; ++} BWL_POST_PACKED_STRUCT; ++typedef struct dot11_rm_action dot11_rm_action_t; ++#define DOT11_RM_ACTION_LEN 3 ++ ++BWL_PRE_PACKED_STRUCT struct dot11_rmreq { ++ uint8 category; /* category of action frame (5) */ ++ uint8 action; /* radio measurement action */ ++ uint8 token; /* dialog token */ ++ uint16 reps; /* no. of repetitions */ ++} BWL_POST_PACKED_STRUCT; ++typedef struct dot11_rmreq dot11_rmreq_t; ++#define DOT11_RMREQ_LEN 5 ++ ++BWL_PRE_PACKED_STRUCT struct dot11_rm_ie { ++ uint8 id; ++ uint8 len; ++ uint8 token; ++ uint8 mode; ++ uint8 type; ++} BWL_POST_PACKED_STRUCT; ++typedef struct dot11_rm_ie dot11_rm_ie_t; ++#define DOT11_RM_IE_LEN 5 ++ ++/* Definitions for "mode" bits in rm req */ ++#define DOT11_RMREQ_MODE_PARALLEL 1 ++#define DOT11_RMREQ_MODE_ENABLE 2 ++#define DOT11_RMREQ_MODE_REQUEST 4 ++#define DOT11_RMREQ_MODE_REPORT 8 ++#define DOT11_RMREQ_MODE_DURMAND 0x10 /* Duration Mandatory */ ++ ++/* Definitions for "mode" bits in rm rep */ ++#define DOT11_RMREP_MODE_LATE 1 ++#define DOT11_RMREP_MODE_INCAPABLE 2 ++#define DOT11_RMREP_MODE_REFUSED 4 ++ ++BWL_PRE_PACKED_STRUCT struct dot11_rmreq_bcn { ++ uint8 id; ++ uint8 len; ++ uint8 token; ++ uint8 mode; ++ uint8 type; ++ uint8 reg; ++ uint8 channel; ++ uint16 interval; ++ uint16 duration; ++ uint8 bcn_mode; ++ struct ether_addr bssid; ++} BWL_POST_PACKED_STRUCT; ++typedef struct dot11_rmreq_bcn dot11_rmreq_bcn_t; ++#define DOT11_RMREQ_BCN_LEN 18 ++ ++BWL_PRE_PACKED_STRUCT struct dot11_rmrep_bcn { ++ uint8 reg; ++ uint8 channel; ++ uint32 starttime[2]; ++ uint16 duration; ++ uint8 frame_info; ++ uint8 rcpi; ++ uint8 rsni; ++ struct ether_addr bssid; ++ uint8 antenna_id; ++ uint32 parent_tsf; ++} BWL_POST_PACKED_STRUCT; ++typedef struct dot11_rmrep_bcn dot11_rmrep_bcn_t; ++#define DOT11_RMREP_BCN_LEN 26 ++ ++/* Beacon request measurement mode */ ++#define DOT11_RMREQ_BCN_PASSIVE 0 ++#define DOT11_RMREQ_BCN_ACTIVE 1 ++#define DOT11_RMREQ_BCN_TABLE 2 ++ ++/* Sub-element IDs for Beacon Request */ ++#define DOT11_RMREQ_BCN_SSID_ID 0 ++#define DOT11_RMREQ_BCN_REPINFO_ID 1 ++#define DOT11_RMREQ_BCN_REPDET_ID 2 ++#define DOT11_RMREQ_BCN_REQUEST_ID 10 ++#define DOT11_RMREQ_BCN_APCHREP_ID 51 ++ ++/* Reporting Detail element definition */ ++#define DOT11_RMREQ_BCN_REPDET_FIXED 0 /* Fixed length fields only */ ++#define DOT11_RMREQ_BCN_REPDET_REQUEST 1 /* + requested information elems */ ++#define DOT11_RMREQ_BCN_REPDET_ALL 2 /* All fields */ ++ ++/* Sub-element IDs for Beacon Report */ ++#define DOT11_RMREP_BCN_FRM_BODY 1 ++ ++/* Neighbor measurement report */ ++BWL_PRE_PACKED_STRUCT struct dot11_rmrep_nbr { ++ struct ether_addr bssid; ++ uint32 bssid_info; ++ uint8 reg; ++ uint8 channel; ++ uint8 phytype; ++ uchar sub_elements[1]; /* Variable size data */ ++} BWL_POST_PACKED_STRUCT; ++typedef struct dot11_rmrep_nbr dot11_rmrep_nbr_t; ++#define DOT11_RMREP_NBR_LEN 13 ++ ++/* MLME Enumerations */ ++#define DOT11_BSSTYPE_INFRASTRUCTURE 0 /* d11 infrastructure */ ++#define DOT11_BSSTYPE_INDEPENDENT 1 /* d11 independent */ ++#define DOT11_BSSTYPE_ANY 2 /* d11 any BSS type */ ++#define DOT11_SCANTYPE_ACTIVE 0 /* d11 scan active */ ++#define DOT11_SCANTYPE_PASSIVE 1 /* d11 scan passive */ ++ ++/* Link Measurement */ ++BWL_PRE_PACKED_STRUCT struct dot11_lmreq { ++ uint8 category; /* category of action frame (5) */ ++ uint8 action; /* radio measurement action */ ++ uint8 token; /* dialog token */ ++ uint8 txpwr; /* Transmit Power Used */ ++ uint8 maxtxpwr; /* Max Transmit Power */ ++} BWL_POST_PACKED_STRUCT; ++typedef struct dot11_lmreq dot11_lmreq_t; ++#define DOT11_LMREQ_LEN 5 ++ ++BWL_PRE_PACKED_STRUCT struct dot11_lmrep { ++ uint8 category; /* category of action frame (5) */ ++ uint8 action; /* radio measurement action */ ++ uint8 token; /* dialog token */ ++ dot11_tpc_rep_t tpc; /* TPC element */ ++ uint8 rxant; /* Receive Antenna ID */ ++ uint8 txant; /* Transmit Antenna ID */ ++ uint8 rcpi; /* RCPI */ ++ uint8 rsni; /* RSNI */ ++} BWL_POST_PACKED_STRUCT; ++typedef struct dot11_lmrep dot11_lmrep_t; ++#define DOT11_LMREP_LEN 11 ++ ++/* 802.11 BRCM "Compromise" Pre N constants */ ++#define PREN_PREAMBLE 24 /* green field preamble time */ ++#define PREN_MM_EXT 12 /* extra mixed mode preamble time */ ++#define PREN_PREAMBLE_EXT 4 /* extra preamble (multiply by unique_streams-1) */ ++ ++/* 802.11N PHY constants */ ++#define RIFS_11N_TIME 2 /* NPHY RIFS time */ ++ ++/* 802.11 HT PLCP format 802.11n-2009, sec 20.3.9.4.3 ++ * HT-SIG is composed of two 24 bit parts, HT-SIG1 and HT-SIG2 ++ */ ++/* HT-SIG1 */ ++#define HT_SIG1_MCS_MASK 0x00007F ++#define HT_SIG1_CBW 0x000080 ++#define HT_SIG1_HT_LENGTH 0xFFFF00 ++ ++/* HT-SIG2 */ ++#define HT_SIG2_SMOOTHING 0x000001 ++#define HT_SIG2_NOT_SOUNDING 0x000002 ++#define HT_SIG2_RESERVED 0x000004 ++#define HT_SIG2_AGGREGATION 0x000008 ++#define HT_SIG2_STBC_MASK 0x000030 ++#define HT_SIG2_STBC_SHIFT 4 ++#define HT_SIG2_FEC_CODING 0x000040 ++#define HT_SIG2_SHORT_GI 0x000080 ++#define HT_SIG2_ESS_MASK 0x000300 ++#define HT_SIG2_ESS_SHIFT 8 ++#define HT_SIG2_CRC 0x03FC00 ++#define HT_SIG2_TAIL 0x1C0000 ++ ++/* 802.11 A PHY constants */ ++#define APHY_SLOT_TIME 9 /* APHY slot time */ ++#define APHY_SIFS_TIME 16 /* APHY SIFS time */ ++#define APHY_DIFS_TIME (APHY_SIFS_TIME + (2 * APHY_SLOT_TIME)) /* APHY DIFS time */ ++#define APHY_PREAMBLE_TIME 16 /* APHY preamble time */ ++#define APHY_SIGNAL_TIME 4 /* APHY signal time */ ++#define APHY_SYMBOL_TIME 4 /* APHY symbol time */ ++#define APHY_SERVICE_NBITS 16 /* APHY service nbits */ ++#define APHY_TAIL_NBITS 6 /* APHY tail nbits */ ++#define APHY_CWMIN 15 /* APHY cwmin */ ++ ++/* 802.11 B PHY constants */ ++#define BPHY_SLOT_TIME 20 /* BPHY slot time */ ++#define BPHY_SIFS_TIME 10 /* BPHY SIFS time */ ++#define BPHY_DIFS_TIME 50 /* BPHY DIFS time */ ++#define BPHY_PLCP_TIME 192 /* BPHY PLCP time */ ++#define BPHY_PLCP_SHORT_TIME 96 /* BPHY PLCP short time */ ++#define BPHY_CWMIN 31 /* BPHY cwmin */ ++ ++/* 802.11 G constants */ ++#define DOT11_OFDM_SIGNAL_EXTENSION 6 /* d11 OFDM signal extension */ ++ ++#define PHY_CWMAX 1023 /* PHY cwmax */ ++ ++#define DOT11_MAXNUMFRAGS 16 /* max # fragments per MSDU */ ++ ++/* 802.11 AC (VHT) constants */ ++ ++typedef int vht_group_id_t; ++ ++/* for VHT-A1 */ ++/* SIG-A1 reserved bits */ ++#define VHT_SIGA1_CONST_MASK 0x800004 ++ ++#define VHT_SIGA1_20MHZ_VAL 0x000000 ++#define VHT_SIGA1_40MHZ_VAL 0x000001 ++#define VHT_SIGA1_80MHZ_VAL 0x000002 ++#define VHT_SIGA1_160MHZ_VAL 0x000003 ++ ++#define VHT_SIGA1_STBC 0x000008 ++ ++#define VHT_SIGA1_GID_MAX_GID 0x3f ++#define VHT_SIGA1_GID_SHIFT 4 ++#define VHT_SIGA1_GID_TO_AP 0x00 ++#define VHT_SIGA1_GID_NOT_TO_AP 0x3f ++ ++#define VHT_SIGA1_NSTS_SHIFT 10 ++#define VHT_SIGA1_NSTS_SHIFT_MASK_USER0 0x001C00 ++ ++#define VHT_SIGA1_PARTIAL_AID_SHIFT 13 ++ ++/* for VHT-A2 */ ++#define VHT_SIGA2_GI_NONE 0x000000 ++#define VHT_SIGA2_GI_SHORT 0x000001 ++#define VHT_SIGA2_GI_W_MOD10 0x000002 ++#define VHT_SIGA2_CODING_LDPC 0x000004 ++#define VHT_SIGA2_BEAMFORM_ENABLE 0x000100 ++#define VHT_SIGA2_MCS_SHIFT 4 ++ ++#define VHT_SIGA2_B9_RESERVED 0x000200 ++#define VHT_SIGA2_TAIL_MASK 0xfc0000 ++#define VHT_SIGA2_TAIL_VALUE 0x000000 ++ ++#define VHT_SIGA2_SVC_BITS 16 ++#define VHT_SIGA2_TAIL_BITS 6 ++ ++ ++/* dot11Counters Table - 802.11 spec., Annex D */ ++typedef struct d11cnt { ++ uint32 txfrag; /* dot11TransmittedFragmentCount */ ++ uint32 txmulti; /* dot11MulticastTransmittedFrameCount */ ++ uint32 txfail; /* dot11FailedCount */ ++ uint32 txretry; /* dot11RetryCount */ ++ uint32 txretrie; /* dot11MultipleRetryCount */ ++ uint32 rxdup; /* dot11FrameduplicateCount */ ++ uint32 txrts; /* dot11RTSSuccessCount */ ++ uint32 txnocts; /* dot11RTSFailureCount */ ++ uint32 txnoack; /* dot11ACKFailureCount */ ++ uint32 rxfrag; /* dot11ReceivedFragmentCount */ ++ uint32 rxmulti; /* dot11MulticastReceivedFrameCount */ ++ uint32 rxcrc; /* dot11FCSErrorCount */ ++ uint32 txfrmsnt; /* dot11TransmittedFrameCount */ ++ uint32 rxundec; /* dot11WEPUndecryptableCount */ ++} d11cnt_t; ++ ++/* OUI for BRCM proprietary IE */ ++#define BRCM_PROP_OUI "\x00\x90\x4C" /* Broadcom proprietary OUI */ ++ ++#ifndef LINUX_POSTMOGRIFY_REMOVAL ++/* The following BRCM_PROP_OUI types are currently in use (defined in ++ * relevant subsections). Each of them will be in a separate proprietary(221) IE ++ * #define SES_VNDR_IE_TYPE 1 (defined in src/ses/shared/ses.h) ++ * #define DPT_IE_TYPE 2 ++ * #define HT_CAP_IE_TYPE 51 ++ * #define HT_ADD_IE_TYPE 52 ++ * #define BRCM_EXTCH_IE_TYPE 53 ++ */ ++ ++/* Following is the generic structure for brcm_prop_ie (uses BRCM_PROP_OUI). ++ * DPT uses this format with type set to DPT_IE_TYPE ++ */ ++BWL_PRE_PACKED_STRUCT struct brcm_prop_ie_s { ++ uint8 id; /* IE ID, 221, DOT11_MNG_PROPR_ID */ ++ uint8 len; /* IE length */ ++ uint8 oui[3]; /* Proprietary OUI, BRCM_PROP_OUI */ ++ uint8 type; /* type of this IE */ ++ uint16 cap; /* DPT capabilities */ ++} BWL_POST_PACKED_STRUCT; ++typedef struct brcm_prop_ie_s brcm_prop_ie_t; ++ ++#define BRCM_PROP_IE_LEN 6 /* len of fixed part of brcm_prop ie */ ++ ++#define DPT_IE_TYPE 2 ++#define WET_TUNNEL_IE_TYPE 3 ++#endif /* LINUX_POSTMOGRIFY_REMOVAL */ ++ ++/* BRCM OUI: Used in the proprietary(221) IE in all broadcom devices */ ++#define BRCM_OUI "\x00\x10\x18" /* Broadcom OUI */ ++ ++/* BRCM info element */ ++BWL_PRE_PACKED_STRUCT struct brcm_ie { ++ uint8 id; /* IE ID, 221, DOT11_MNG_PROPR_ID */ ++ uint8 len; /* IE length */ ++ uint8 oui[3]; /* Proprietary OUI, BRCM_OUI */ ++ uint8 ver; /* type/ver of this IE */ ++ uint8 assoc; /* # of assoc STAs */ ++ uint8 flags; /* misc flags */ ++ uint8 flags1; /* misc flags */ ++ uint16 amsdu_mtu_pref; /* preferred A-MSDU MTU */ ++} BWL_POST_PACKED_STRUCT; ++typedef struct brcm_ie brcm_ie_t; ++#define BRCM_IE_LEN 11 /* BRCM IE length */ ++#define BRCM_IE_VER 2 /* BRCM IE version */ ++#define BRCM_IE_LEGACY_AES_VER 1 /* BRCM IE legacy AES version */ ++ ++/* brcm_ie flags */ ++#define BRF_LZWDS 0x4 /* lazy wds enabled */ ++#define BRF_BLOCKACK 0x8 /* BlockACK capable */ ++ ++/* brcm_ie flags1 */ ++#define BRF1_AMSDU 0x1 /* A-MSDU capable */ ++#define BRF1_WMEPS 0x4 /* AP is capable of handling WME + PS w/o APSD */ ++#define BRF1_PSOFIX 0x8 /* AP has fixed PS mode out-of-order packets */ ++#define BRF1_RX_LARGE_AGG 0x10 /* device can rx large aggregates */ ++#define BRF1_RFAWARE_DCS 0x20 /* RFAWARE dynamic channel selection (DCS) */ ++#define BRF1_SOFTAP 0x40 /* Configure as Broadcom SOFTAP */ ++ ++/* Vendor IE structure */ ++BWL_PRE_PACKED_STRUCT struct vndr_ie { ++ uchar id; ++ uchar len; ++ uchar oui [3]; ++ uchar data [1]; /* Variable size data */ ++} BWL_POST_PACKED_STRUCT; ++typedef struct vndr_ie vndr_ie_t; ++ ++#define VNDR_IE_HDR_LEN 2 /* id + len field */ ++#define VNDR_IE_MIN_LEN 3 /* size of the oui field */ ++#define VNDR_IE_MAX_LEN 256 /* verdor IE max length */ ++ ++/* ************* HT definitions. ************* */ ++#define MCSSET_LEN 16 /* 16-bits per 8-bit set to give 128-bits bitmap of MCS Index */ ++#define MAX_MCS_NUM (128) /* max mcs number = 128 */ ++ ++BWL_PRE_PACKED_STRUCT struct ht_cap_ie { ++ uint16 cap; ++ uint8 params; ++ uint8 supp_mcs[MCSSET_LEN]; ++ uint16 ext_htcap; ++ uint32 txbf_cap; ++ uint8 as_cap; ++} BWL_POST_PACKED_STRUCT; ++typedef struct ht_cap_ie ht_cap_ie_t; ++ ++/* CAP IE: HT 1.0 spec. simply stole a 802.11 IE, we use our prop. IE until this is resolved */ ++/* the capability IE is primarily used to convey this nodes abilities */ ++BWL_PRE_PACKED_STRUCT struct ht_prop_cap_ie { ++ uint8 id; /* IE ID, 221, DOT11_MNG_PROPR_ID */ ++ uint8 len; /* IE length */ ++ uint8 oui[3]; /* Proprietary OUI, BRCM_PROP_OUI */ ++ uint8 type; /* type inidicates what follows */ ++ ht_cap_ie_t cap_ie; ++} BWL_POST_PACKED_STRUCT; ++typedef struct ht_prop_cap_ie ht_prop_cap_ie_t; ++ ++#define HT_PROP_IE_OVERHEAD 4 /* overhead bytes for prop oui ie */ ++#define HT_CAP_IE_LEN 26 /* HT capability len (based on .11n d2.0) */ ++#define HT_CAP_IE_TYPE 51 ++ ++#define HT_CAP_LDPC_CODING 0x0001 /* Support for rx of LDPC coded pkts */ ++#define HT_CAP_40MHZ 0x0002 /* FALSE:20Mhz, TRUE:20/40MHZ supported */ ++#define HT_CAP_MIMO_PS_MASK 0x000C /* Mimo PS mask */ ++#define HT_CAP_MIMO_PS_SHIFT 0x0002 /* Mimo PS shift */ ++#define HT_CAP_MIMO_PS_OFF 0x0003 /* Mimo PS, no restriction */ ++#define HT_CAP_MIMO_PS_RTS 0x0001 /* Mimo PS, send RTS/CTS around MIMO frames */ ++#define HT_CAP_MIMO_PS_ON 0x0000 /* Mimo PS, MIMO disallowed */ ++#define HT_CAP_GF 0x0010 /* Greenfield preamble support */ ++#define HT_CAP_SHORT_GI_20 0x0020 /* 20MHZ short guard interval support */ ++#define HT_CAP_SHORT_GI_40 0x0040 /* 40Mhz short guard interval support */ ++#define HT_CAP_TX_STBC 0x0080 /* Tx STBC support */ ++#define HT_CAP_RX_STBC_MASK 0x0300 /* Rx STBC mask */ ++#define HT_CAP_RX_STBC_SHIFT 8 /* Rx STBC shift */ ++#define HT_CAP_DELAYED_BA 0x0400 /* delayed BA support */ ++#define HT_CAP_MAX_AMSDU 0x0800 /* Max AMSDU size in bytes , 0=3839, 1=7935 */ ++ ++#define HT_CAP_DSSS_CCK 0x1000 /* DSSS/CCK supported by the BSS */ ++#define HT_CAP_PSMP 0x2000 /* Power Save Multi Poll support */ ++#define HT_CAP_40MHZ_INTOLERANT 0x4000 /* 40MHz Intolerant */ ++#define HT_CAP_LSIG_TXOP 0x8000 /* L-SIG TXOP protection support */ ++ ++#define HT_CAP_RX_STBC_NO 0x0 /* no rx STBC support */ ++#define HT_CAP_RX_STBC_ONE_STREAM 0x1 /* rx STBC support of 1 spatial stream */ ++#define HT_CAP_RX_STBC_TWO_STREAM 0x2 /* rx STBC support of 1-2 spatial streams */ ++#define HT_CAP_RX_STBC_THREE_STREAM 0x3 /* rx STBC support of 1-3 spatial streams */ ++ ++#define VHT_MAX_MPDU 11454 /* max mpdu size for now (bytes) */ ++#define VHT_MPDU_MSDU_DELTA 56 /* Difference in spec - vht mpdu, amsdu len */ ++/* Max AMSDU len - per spec */ ++#define VHT_MAX_AMSDU (VHT_MAX_MPDU - VHT_MPDU_MSDU_DELTA) ++ ++#define HT_MAX_AMSDU 7935 /* max amsdu size (bytes) per the HT spec */ ++#define HT_MIN_AMSDU 3835 /* min amsdu size (bytes) per the HT spec */ ++ ++#define HT_PARAMS_RX_FACTOR_MASK 0x03 /* ampdu rcv factor mask */ ++#define HT_PARAMS_DENSITY_MASK 0x1C /* ampdu density mask */ ++#define HT_PARAMS_DENSITY_SHIFT 2 /* ampdu density shift */ ++ ++/* HT/AMPDU specific define */ ++#define AMPDU_MAX_MPDU_DENSITY 7 /* max mpdu density; in 1/8 usec units */ ++#define AMPDU_RX_FACTOR_8K 0 /* max rcv ampdu len (8kb) */ ++#define AMPDU_RX_FACTOR_16K 1 /* max rcv ampdu len (16kb) */ ++#define AMPDU_RX_FACTOR_32K 2 /* max rcv ampdu len (32kb) */ ++#define AMPDU_RX_FACTOR_64K 3 /* max rcv ampdu len (64kb) */ ++#define AMPDU_RX_FACTOR_BASE 8*1024 /* ampdu factor base for rx len */ ++ ++#define AMPDU_DELIMITER_LEN 4 /* length of ampdu delimiter */ ++#define AMPDU_DELIMITER_LEN_MAX 63 /* max length of ampdu delimiter(enforced in HW) */ ++ ++#define HT_CAP_EXT_PCO 0x0001 ++#define HT_CAP_EXT_PCO_TTIME_MASK 0x0006 ++#define HT_CAP_EXT_PCO_TTIME_SHIFT 1 ++#define HT_CAP_EXT_MCS_FEEDBACK_MASK 0x0300 ++#define HT_CAP_EXT_MCS_FEEDBACK_SHIFT 8 ++#define HT_CAP_EXT_HTC 0x0400 ++#define HT_CAP_EXT_RD_RESP 0x0800 ++ ++BWL_PRE_PACKED_STRUCT struct ht_add_ie { ++ uint8 ctl_ch; /* control channel number */ ++ uint8 byte1; /* ext ch,rec. ch. width, RIFS support */ ++ uint16 opmode; /* operation mode */ ++ uint16 misc_bits; /* misc bits */ ++ uint8 basic_mcs[MCSSET_LEN]; /* required MCS set */ ++} BWL_POST_PACKED_STRUCT; ++typedef struct ht_add_ie ht_add_ie_t; ++ ++/* ADD IE: HT 1.0 spec. simply stole a 802.11 IE, we use our prop. IE until this is resolved */ ++/* the additional IE is primarily used to convey the current BSS configuration */ ++BWL_PRE_PACKED_STRUCT struct ht_prop_add_ie { ++ uint8 id; /* IE ID, 221, DOT11_MNG_PROPR_ID */ ++ uint8 len; /* IE length */ ++ uint8 oui[3]; /* Proprietary OUI, BRCM_PROP_OUI */ ++ uint8 type; /* indicates what follows */ ++ ht_add_ie_t add_ie; ++} BWL_POST_PACKED_STRUCT; ++typedef struct ht_prop_add_ie ht_prop_add_ie_t; ++ ++#define HT_ADD_IE_LEN 22 ++#define HT_ADD_IE_TYPE 52 ++ ++/* byte1 defn's */ ++#define HT_BW_ANY 0x04 /* set, STA can use 20 or 40MHz */ ++#define HT_RIFS_PERMITTED 0x08 /* RIFS allowed */ ++ ++/* opmode defn's */ ++#define HT_OPMODE_MASK 0x0003 /* protection mode mask */ ++#define HT_OPMODE_SHIFT 0 /* protection mode shift */ ++#define HT_OPMODE_PURE 0x0000 /* protection mode PURE */ ++#define HT_OPMODE_OPTIONAL 0x0001 /* protection mode optional */ ++#define HT_OPMODE_HT20IN40 0x0002 /* protection mode 20MHz HT in 40MHz BSS */ ++#define HT_OPMODE_MIXED 0x0003 /* protection mode Mixed Mode */ ++#define HT_OPMODE_NONGF 0x0004 /* protection mode non-GF */ ++#define DOT11N_TXBURST 0x0008 /* Tx burst limit */ ++#define DOT11N_OBSS_NONHT 0x0010 /* OBSS Non-HT STA present */ ++ ++/* misc_bites defn's */ ++#define HT_BASIC_STBC_MCS 0x007f /* basic STBC MCS */ ++#define HT_DUAL_STBC_PROT 0x0080 /* Dual STBC Protection */ ++#define HT_SECOND_BCN 0x0100 /* Secondary beacon support */ ++#define HT_LSIG_TXOP 0x0200 /* L-SIG TXOP Protection full support */ ++#define HT_PCO_ACTIVE 0x0400 /* PCO active */ ++#define HT_PCO_PHASE 0x0800 /* PCO phase */ ++#define HT_DUALCTS_PROTECTION 0x0080 /* DUAL CTS protection needed */ ++ ++/* Tx Burst Limits */ ++#define DOT11N_2G_TXBURST_LIMIT 6160 /* 2G band Tx burst limit per 802.11n Draft 1.10 (usec) */ ++#define DOT11N_5G_TXBURST_LIMIT 3080 /* 5G band Tx burst limit per 802.11n Draft 1.10 (usec) */ ++ ++/* Macros for opmode */ ++#define GET_HT_OPMODE(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \ ++ >> HT_OPMODE_SHIFT) ++#define HT_MIXEDMODE_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \ ++ == HT_OPMODE_MIXED) /* mixed mode present */ ++#define HT_HT20_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \ ++ == HT_OPMODE_HT20IN40) /* 20MHz HT present */ ++#define HT_OPTIONAL_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_MASK) \ ++ == HT_OPMODE_OPTIONAL) /* Optional protection present */ ++#define HT_USE_PROTECTION(add_ie) (HT_HT20_PRESENT((add_ie)) || \ ++ HT_MIXEDMODE_PRESENT((add_ie))) /* use protection */ ++#define HT_NONGF_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & HT_OPMODE_NONGF) \ ++ == HT_OPMODE_NONGF) /* non-GF present */ ++#define DOT11N_TXBURST_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & DOT11N_TXBURST) \ ++ == DOT11N_TXBURST) /* Tx Burst present */ ++#define DOT11N_OBSS_NONHT_PRESENT(add_ie) ((ltoh16_ua(&add_ie->opmode) & DOT11N_OBSS_NONHT) \ ++ == DOT11N_OBSS_NONHT) /* OBSS Non-HT present */ ++ ++BWL_PRE_PACKED_STRUCT struct obss_params { ++ uint16 passive_dwell; ++ uint16 active_dwell; ++ uint16 bss_widthscan_interval; ++ uint16 passive_total; ++ uint16 active_total; ++ uint16 chanwidth_transition_dly; ++ uint16 activity_threshold; ++} BWL_POST_PACKED_STRUCT; ++typedef struct obss_params obss_params_t; ++ ++BWL_PRE_PACKED_STRUCT struct dot11_obss_ie { ++ uint8 id; ++ uint8 len; ++ obss_params_t obss_params; ++} BWL_POST_PACKED_STRUCT; ++typedef struct dot11_obss_ie dot11_obss_ie_t; ++#define DOT11_OBSS_SCAN_IE_LEN sizeof(obss_params_t) /* HT OBSS len (based on 802.11n d3.0) */ ++ ++/* HT control field */ ++#define HT_CTRL_LA_TRQ 0x00000002 /* sounding request */ ++#define HT_CTRL_LA_MAI 0x0000003C /* MCS request or antenna selection indication */ ++#define HT_CTRL_LA_MAI_SHIFT 2 ++#define HT_CTRL_LA_MAI_MRQ 0x00000004 /* MCS request */ ++#define HT_CTRL_LA_MAI_MSI 0x00000038 /* MCS request sequence identifier */ ++#define HT_CTRL_LA_MFSI 0x000001C0 /* MFB sequence identifier */ ++#define HT_CTRL_LA_MFSI_SHIFT 6 ++#define HT_CTRL_LA_MFB_ASELC 0x0000FE00 /* MCS feedback, antenna selection command/data */ ++#define HT_CTRL_LA_MFB_ASELC_SH 9 ++#define HT_CTRL_LA_ASELC_CMD 0x00000C00 /* ASEL command */ ++#define HT_CTRL_LA_ASELC_DATA 0x0000F000 /* ASEL data */ ++#define HT_CTRL_CAL_POS 0x00030000 /* Calibration position */ ++#define HT_CTRL_CAL_SEQ 0x000C0000 /* Calibration sequence */ ++#define HT_CTRL_CSI_STEERING 0x00C00000 /* CSI/Steering */ ++#define HT_CTRL_CSI_STEER_SHIFT 22 ++#define HT_CTRL_CSI_STEER_NFB 0 /* no fedback required */ ++#define HT_CTRL_CSI_STEER_CSI 1 /* CSI, H matrix */ ++#define HT_CTRL_CSI_STEER_NCOM 2 /* non-compressed beamforming */ ++#define HT_CTRL_CSI_STEER_COM 3 /* compressed beamforming */ ++#define HT_CTRL_NDP_ANNOUNCE 0x01000000 /* NDP announcement */ ++#define HT_CTRL_AC_CONSTRAINT 0x40000000 /* AC Constraint */ ++#define HT_CTRL_RDG_MOREPPDU 0x80000000 /* RDG/More PPDU */ ++ ++#define HT_OPMODE_OPTIONAL 0x0001 /* protection mode optional */ ++#define HT_OPMODE_HT20IN40 0x0002 /* protection mode 20MHz HT in 40MHz BSS */ ++#define HT_OPMODE_MIXED 0x0003 /* protection mode Mixed Mode */ ++#define HT_OPMODE_NONGF 0x0004 /* protection mode non-GF */ ++#define DOT11N_TXBURST 0x0008 /* Tx burst limit */ ++#define DOT11N_OBSS_NONHT 0x0010 /* OBSS Non-HT STA present */ ++ ++/* ************* VHT definitions. ************* */ ++ ++BWL_PRE_PACKED_STRUCT struct vht_cap_ie { ++ uint32 vht_cap_info; ++ /* supported MCS set - 64 bit field */ ++ uint16 rx_mcs_map; ++ uint16 rx_max_rate; ++ uint16 tx_mcs_map; ++ uint16 tx_max_rate; ++} BWL_POST_PACKED_STRUCT; ++typedef struct vht_cap_ie vht_cap_ie_t; ++/* 4B cap_info + 8B supp_mcs */ ++#define VHT_CAP_IE_LEN 12 ++/* 32bit - cap info */ ++#define VHT_CAP_INFO_MAX_MPDU_LEN_MASK 0x00000003 ++#define VHT_CAP_INFO_SUPP_CHAN_WIDTH_MASK 0x0000000c ++#define VHT_CAP_INFO_LDPC 0x00000010 ++#define VHT_CAP_INFO_SGI_80MHZ 0x00000020 ++#define VHT_CAP_INFO_SGI_160MHZ 0x00000040 ++#define VHT_CAP_INFO_TX_STBC 0x00000080 ++#define VHT_CAP_INFO_RX_STBC 0x00000700 ++ ++#define VHT_CAP_INFO_RX_STBC_MASK 0x00000700 ++#define VHT_CAP_INFO_RX_STBC_SHIFT 8 ++#define VHT_CAP_INFO_SU_BEAMFMR 0x00000800 ++#define VHT_CAP_INFO_SU_BEAMFMEE 0x00001000 ++#define VHT_CAP_INFO_NUM_BMFMR_ANT_MASK 0x0000e000 ++#define VHT_CAP_INFO_NUM_BMFMR_ANT_SHIFT 13 ++ ++#define VHT_CAP_INFO_NUM_SOUNDING_DIM_MASK 0x00070000 ++#define VHT_CAP_INFO_NUM_SOUNDING_DIM_SHIFT 16 ++#define VHT_CAP_INFO_MU_BEAMFMR 0x00080000 ++#define VHT_CAP_INFO_MU_BEAMFMEE 0x00100000 ++#define VHT_CAP_INFO_TXOPPS 0x00200000 ++#define VHT_CAP_INFO_HTCVHT 0x00400000 ++#define VHT_CAP_INFO_AMPDU_MAXLEN_EXP_MASK 0x03800000 ++#define VHT_CAP_INFO_AMPDU_MAXLEN_EXP_SHIFT 23 ++ ++#define VHT_CAP_INFO_LINK_ADAPT_CAP_MASK 0x0c000000 ++#define VHT_CAP_INFO_LINK_ADAPT_CAP_SHIFT 26 ++ ++/* 64-bit Supp MCS. */ ++#define VHT_CAP_SUPP_MCS_RX_HIGHEST_RATE_MASK 0x1fff ++#define VHT_CAP_SUPP_MCS_RX_HIGHEST_RATE_SHIFT 0 ++ ++#define VHT_CAP_SUPP_MCS_TX_HIGHEST_RATE_MASK 0x1fff ++#define VHT_CAP_SUPP_MCS_TX_HIGHEST_RATE_SHIFT 0 ++ ++#define VHT_CAP_MCS_MAP_0_7 0 ++#define VHT_CAP_MCS_MAP_0_8 1 ++#define VHT_CAP_MCS_MAP_0_9 2 ++#define VHT_CAP_MCS_MAP_NONE 3 ++ ++#define VHT_CAP_MCS_MAP_NSS_MAX 8 ++ ++/* VHT Capabilities Supported Channel Width */ ++typedef enum vht_cap_chan_width { ++ VHT_CAP_CHAN_WIDTH_20_40 = 0x00, ++ VHT_CAP_CHAN_WIDTH_80 = 0x04, ++ VHT_CAP_CHAN_WIDTH_160 = 0x08 ++} vht_cap_chan_width_t; ++ ++/* VHT Capabilities Supported max MPDU LEN */ ++typedef enum vht_cap_max_mpdu_len { ++ VHT_CAP_MPDU_MAX_4K = 0x00, ++ VHT_CAP_MPDU_MAX_8K = 0x01, ++ VHT_CAP_MPDU_MAX_11K = 0x02 ++} vht_cap_max_mpdu_len_t; ++ ++/* VHT Operation Element */ ++BWL_PRE_PACKED_STRUCT struct vht_op_ie { ++ uint8 chan_width; ++ uint8 chan1; ++ uint8 chan2; ++ uint16 supp_mcs; /* same def as above in vht cap */ ++} BWL_POST_PACKED_STRUCT; ++typedef struct vht_op_ie vht_op_ie_t; ++/* 3B VHT Op info + 2B Basic MCS */ ++#define VHT_OP_IE_LEN 5 ++ ++typedef enum vht_op_chan_width { ++ VHT_OP_CHAN_WIDTH_20_40 = 0, ++ VHT_OP_CHAN_WIDTH_80 = 1, ++ VHT_OP_CHAN_WIDTH_160 = 2, ++ VHT_OP_CHAN_WIDTH_80_80 = 3 ++} vht_op_chan_width_t; ++ ++/* Def for rx & tx basic mcs maps - ea ss num has 2 bits of info */ ++#define VHT_MCS_MAP_GET_SS_IDX(nss) (((nss)-1)*2) ++#define VHT_MCS_MAP_GET_MCS_PER_SS(nss, mcsMap) \ ++ (((mcsMap) >> VHT_MCS_MAP_GET_SS_IDX(nss)) & 0x3) ++#define VHT_MCS_MAP_SET_MCS_PER_SS(nss, numMcs, mcsMap) \ ++ ((mcsMap) |= (((numMcs) & 0x3) << VHT_MCS_MAP_GET_SS_IDX(nss))) ++ ++/* ************* WPA definitions. ************* */ ++#define WPA_OUI "\x00\x50\xF2" /* WPA OUI */ ++#define WPA_OUI_LEN 3 /* WPA OUI length */ ++#define WPA_OUI_TYPE 1 ++#define WPA_VERSION 1 /* WPA version */ ++#define WPA2_OUI "\x00\x0F\xAC" /* WPA2 OUI */ ++#define WPA2_OUI_LEN 3 /* WPA2 OUI length */ ++#define WPA2_VERSION 1 /* WPA2 version */ ++#define WPA2_VERSION_LEN 2 /* WAP2 version length */ ++ ++/* ************* WPS definitions. ************* */ ++#define WPS_OUI "\x00\x50\xF2" /* WPS OUI */ ++#define WPS_OUI_LEN 3 /* WPS OUI length */ ++#define WPS_OUI_TYPE 4 ++ ++/* ************* WFA definitions. ************* */ ++#if defined(MACOSX) ++#define MAC_OUI "\x00\x17\xF2" /* MACOSX OUI */ ++#define MAC_OUI_TYPE_P2P 5 ++#endif /* MACOSX */ ++ ++#if defined(MACOSX) && !defined(WLP2P_NEW_WFA_OUI) ++#define WFA_OUI WPS_OUI /* WFA OUI */ ++#else ++#ifdef P2P_IE_OVRD ++#define WFA_OUI MAC_OUI ++#else ++#define WFA_OUI "\x50\x6F\x9A" /* WFA OUI */ ++#endif /* P2P_IE_OVRD */ ++#endif /* MACOSX && !WLP2P_NEW_WFA_OUI */ ++#define WFA_OUI_LEN 3 /* WFA OUI length */ ++#ifdef P2P_IE_OVRD ++#define WFA_OUI_TYPE_P2P MAC_OUI_TYPE_P2P ++#else ++#define WFA_OUI_TYPE_P2P 9 ++#endif ++ ++#define WFA_OUI_TYPE_TPC 8 ++ ++/* RSN authenticated key managment suite */ ++#define RSN_AKM_NONE 0 /* None (IBSS) */ ++#define RSN_AKM_UNSPECIFIED 1 /* Over 802.1x */ ++#define RSN_AKM_PSK 2 /* Pre-shared Key */ ++#define RSN_AKM_FBT_1X 3 /* Fast Bss transition using 802.1X */ ++#define RSN_AKM_FBT_PSK 4 /* Fast Bss transition using Pre-shared Key */ ++#define RSN_AKM_MFP_1X 5 /* SHA256 key derivation, using 802.1X */ ++#define RSN_AKM_MFP_PSK 6 /* SHA256 key derivation, using Pre-shared Key */ ++#define RSN_AKM_TPK 7 /* TPK(TDLS Peer Key) handshake */ ++ ++/* Key related defines */ ++#define DOT11_MAX_DEFAULT_KEYS 4 /* number of default keys */ ++#define DOT11_MAX_KEY_SIZE 32 /* max size of any key */ ++#define DOT11_MAX_IV_SIZE 16 /* max size of any IV */ ++#define DOT11_EXT_IV_FLAG (1<<5) /* flag to indicate IV is > 4 bytes */ ++#define DOT11_WPA_KEY_RSC_LEN 8 /* WPA RSC key len */ ++ ++#define WEP1_KEY_SIZE 5 /* max size of any WEP key */ ++#define WEP1_KEY_HEX_SIZE 10 /* size of WEP key in hex. */ ++#define WEP128_KEY_SIZE 13 /* max size of any WEP key */ ++#define WEP128_KEY_HEX_SIZE 26 /* size of WEP key in hex. */ ++#define TKIP_MIC_SIZE 8 /* size of TKIP MIC */ ++#define TKIP_EOM_SIZE 7 /* max size of TKIP EOM */ ++#define TKIP_EOM_FLAG 0x5a /* TKIP EOM flag byte */ ++#define TKIP_KEY_SIZE 32 /* size of any TKIP key */ ++#define TKIP_MIC_AUTH_TX 16 /* offset to Authenticator MIC TX key */ ++#define TKIP_MIC_AUTH_RX 24 /* offset to Authenticator MIC RX key */ ++#define TKIP_MIC_SUP_RX TKIP_MIC_AUTH_TX /* offset to Supplicant MIC RX key */ ++#define TKIP_MIC_SUP_TX TKIP_MIC_AUTH_RX /* offset to Supplicant MIC TX key */ ++#define AES_KEY_SIZE 16 /* size of AES key */ ++#define AES_MIC_SIZE 8 /* size of AES MIC */ ++#define BIP_KEY_SIZE 16 /* size of BIP key */ ++ ++/* WCN */ ++#define WCN_OUI "\x00\x50\xf2" /* WCN OUI */ ++#define WCN_TYPE 4 /* WCN type */ ++ ++ ++/* 802.11r protocol definitions */ ++ ++/* Mobility Domain IE */ ++BWL_PRE_PACKED_STRUCT struct dot11_mdid_ie { ++ uint8 id; ++ uint8 len; ++ uint16 mdid; /* Mobility Domain Id */ ++ uint8 cap; ++} BWL_POST_PACKED_STRUCT; ++typedef struct dot11_mdid_ie dot11_mdid_ie_t; ++ ++#define FBT_MDID_CAP_OVERDS 0x01 /* Fast Bss transition over the DS support */ ++#define FBT_MDID_CAP_RRP 0x02 /* Resource request protocol support */ ++ ++/* Fast Bss Transition IE */ ++BWL_PRE_PACKED_STRUCT struct dot11_ft_ie { ++ uint8 id; ++ uint8 len; ++ uint16 mic_control; /* Mic Control */ ++ uint8 mic[16]; ++ uint8 anonce[32]; ++ uint8 snonce[32]; ++} BWL_POST_PACKED_STRUCT; ++typedef struct dot11_ft_ie dot11_ft_ie_t; ++ ++#define TIE_TYPE_RESERVED 0 ++#define TIE_TYPE_REASSOC_DEADLINE 1 ++#define TIE_TYPE_KEY_LIEFTIME 2 ++#define TIE_TYPE_ASSOC_COMEBACK 3 ++BWL_PRE_PACKED_STRUCT struct dot11_timeout_ie { ++ uint8 id; ++ uint8 len; ++ uint8 type; /* timeout interval type */ ++ uint32 value; /* timeout interval value */ ++} BWL_POST_PACKED_STRUCT; ++typedef struct dot11_timeout_ie dot11_timeout_ie_t; ++ ++/* GTK ie */ ++BWL_PRE_PACKED_STRUCT struct dot11_gtk_ie { ++ uint8 id; ++ uint8 len; ++ uint16 key_info; ++ uint8 key_len; ++ uint8 rsc[8]; ++ uint8 data[1]; ++} BWL_POST_PACKED_STRUCT; ++typedef struct dot11_gtk_ie dot11_gtk_ie_t; ++ ++#define BSSID_INVALID "\x00\x00\x00\x00\x00\x00" ++#define BSSID_BROADCAST "\xFF\xFF\xFF\xFF\xFF\xFF" ++ ++ ++/* ************* WMM Parameter definitions. ************* */ ++#define WMM_OUI "\x00\x50\xF2" /* WNN OUI */ ++#define WMM_OUI_LEN 3 /* WMM OUI length */ ++#define WMM_OUI_TYPE 2 /* WMM OUT type */ ++#define WMM_VERSION 1 ++#define WMM_VERSION_LEN 1 ++ ++/* WMM OUI subtype */ ++#define WMM_OUI_SUBTYPE_PARAMETER 1 ++#define WMM_PARAMETER_IE_LEN 24 ++ ++/* Link Identifier Element */ ++BWL_PRE_PACKED_STRUCT struct link_id_ie { ++ uint8 id; ++ uint8 len; ++ struct ether_addr bssid; ++ struct ether_addr tdls_init_mac; ++ struct ether_addr tdls_resp_mac; ++} BWL_POST_PACKED_STRUCT; ++typedef struct link_id_ie link_id_ie_t; ++#define TDLS_LINK_ID_IE_LEN 18 ++ ++/* Link Wakeup Schedule Element */ ++BWL_PRE_PACKED_STRUCT struct wakeup_sch_ie { ++ uint8 id; ++ uint8 len; ++ uint32 offset; /* in ms between TSF0 and start of 1st Awake Window */ ++ uint32 interval; /* in ms bwtween the start of 2 Awake Windows */ ++ uint32 awake_win_slots; /* in backof slots, duration of Awake Window */ ++ uint32 max_wake_win; /* in ms, max duration of Awake Window */ ++ uint16 idle_cnt; /* number of consecutive Awake Windows */ ++} BWL_POST_PACKED_STRUCT; ++typedef struct wakeup_sch_ie wakeup_sch_ie_t; ++#define TDLS_WAKEUP_SCH_IE_LEN 18 ++ ++/* Channel Switch Timing Element */ ++BWL_PRE_PACKED_STRUCT struct channel_switch_timing_ie { ++ uint8 id; ++ uint8 len; ++ uint16 switch_time; /* in ms, time to switch channels */ ++ uint16 switch_timeout; /* in ms */ ++} BWL_POST_PACKED_STRUCT; ++typedef struct channel_switch_timing_ie channel_switch_timing_ie_t; ++#define TDLS_CHANNEL_SWITCH_TIMING_IE_LEN 4 ++ ++/* PTI Control Element */ ++BWL_PRE_PACKED_STRUCT struct pti_control_ie { ++ uint8 id; ++ uint8 len; ++ uint8 tid; ++ uint16 seq_control; ++} BWL_POST_PACKED_STRUCT; ++typedef struct pti_control_ie pti_control_ie_t; ++#define TDLS_PTI_CONTROL_IE_LEN 3 ++ ++/* PU Buffer Status Element */ ++BWL_PRE_PACKED_STRUCT struct pu_buffer_status_ie { ++ uint8 id; ++ uint8 len; ++ uint8 status; ++} BWL_POST_PACKED_STRUCT; ++typedef struct pu_buffer_status_ie pu_buffer_status_ie_t; ++#define TDLS_PU_BUFFER_STATUS_IE_LEN 1 ++#define TDLS_PU_BUFFER_STATUS_AC_BK 1 ++#define TDLS_PU_BUFFER_STATUS_AC_BE 2 ++#define TDLS_PU_BUFFER_STATUS_AC_VI 4 ++#define TDLS_PU_BUFFER_STATUS_AC_VO 8 ++ ++/* This marks the end of a packed structure section. */ ++#include ++ ++#endif /* _802_11_H_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/proto/802.1d.h b/drivers/net/ethernet/broadcom/gmac/src/include/proto/802.1d.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/proto/802.1d.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/proto/802.1d.h 2017-11-09 17:53:43.977301000 +0800 +@@ -0,0 +1,44 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Fundamental types and constants relating to 802.1D ++ * ++ * $Id: 802.1d.h 241182 2011-02-17 21:50:03Z $ ++ */ ++ ++#ifndef _802_1_D_ ++#define _802_1_D_ ++ ++/* 802.1D priority defines */ ++#define PRIO_8021D_NONE 2 /* None = - */ ++#define PRIO_8021D_BK 1 /* BK - Background */ ++#define PRIO_8021D_BE 0 /* BE - Best-effort */ ++#define PRIO_8021D_EE 3 /* EE - Excellent-effort */ ++#define PRIO_8021D_CL 4 /* CL - Controlled Load */ ++#define PRIO_8021D_VI 5 /* Vi - Video */ ++#define PRIO_8021D_VO 6 /* Vo - Voice */ ++#define PRIO_8021D_NC 7 /* NC - Network Control */ ++#define MAXPRIO 7 /* 0-7 */ ++#define NUMPRIO (MAXPRIO + 1) ++ ++#define ALLPRIO -1 /* All prioirty */ ++ ++/* Converts prio to precedence since the numerical value of ++ * PRIO_8021D_BE and PRIO_8021D_NONE are swapped. ++ */ ++#define PRIO2PREC(prio) \ ++ (((prio) == PRIO_8021D_NONE || (prio) == PRIO_8021D_BE) ? ((prio^2)) : (prio)) ++ ++#endif /* _802_1_D__ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/proto/BOM b/drivers/net/ethernet/broadcom/gmac/src/include/proto/BOM +--- a/drivers/net/ethernet/broadcom/gmac/src/include/proto/BOM 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/proto/BOM 2017-11-09 17:53:43.978294000 +0800 +@@ -0,0 +1,4 @@ ++# Created by mkbom ++# $Id: BOM,v 9.0 1998-07-30 23:19:02 $ ++ ++File 1.46 vip.h +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/proto/Makefile b/drivers/net/ethernet/broadcom/gmac/src/include/proto/Makefile +--- a/drivers/net/ethernet/broadcom/gmac/src/include/proto/Makefile 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/proto/Makefile 2017-11-09 17:53:43.979288000 +0800 +@@ -0,0 +1,21 @@ ++# ++# include/proto/Makefile ++# ++# Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++# ++# Permission to use, copy, modify, and/or distribute this software for any ++# purpose with or without fee is hereby granted, provided that the above ++# copyright notice and this permission notice appear in all copies. ++# ++# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++# OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++# ++# $Id: Makefile 241182 2011-02-17 21:50:03Z $ ++# ++ ++# build etags +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/proto/bcmeth.h b/drivers/net/ethernet/broadcom/gmac/src/include/proto/bcmeth.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/proto/bcmeth.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/proto/bcmeth.h 2017-11-09 17:53:43.979307000 +0800 +@@ -0,0 +1,106 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Broadcom Ethernettype protocol definitions ++ * ++ * $Id: bcmeth.h 294352 2011-11-06 19:23:00Z $ ++ */ ++ ++/* ++ * Broadcom Ethernet protocol defines ++ */ ++ ++#ifndef _BCMETH_H_ ++#define _BCMETH_H_ ++ ++#ifndef _TYPEDEFS_H_ ++#include ++#endif ++ ++/* This marks the start of a packed structure section. */ ++#include ++ ++/* ETHER_TYPE_BRCM is defined in ethernet.h */ ++ ++/* ++ * Following the 2byte BRCM ether_type is a 16bit BRCM subtype field ++ * in one of two formats: (only subtypes 32768-65535 are in use now) ++ * ++ * subtypes 0-32767: ++ * 8 bit subtype (0-127) ++ * 8 bit length in bytes (0-255) ++ * ++ * subtypes 32768-65535: ++ * 16 bit big-endian subtype ++ * 16 bit big-endian length in bytes (0-65535) ++ * ++ * length is the number of additional bytes beyond the 4 or 6 byte header ++ * ++ * Reserved values: ++ * 0 reserved ++ * 5-15 reserved for iLine protocol assignments ++ * 17-126 reserved, assignable ++ * 127 reserved ++ * 32768 reserved ++ * 32769-65534 reserved, assignable ++ * 65535 reserved ++ */ ++ ++/* ++ * While adding the subtypes and their specific processing code make sure ++ * bcmeth_bcm_hdr_t is the first data structure in the user specific data structure definition ++ */ ++ ++#define BCMILCP_SUBTYPE_RATE 1 ++#define BCMILCP_SUBTYPE_LINK 2 ++#define BCMILCP_SUBTYPE_CSA 3 ++#define BCMILCP_SUBTYPE_LARQ 4 ++#define BCMILCP_SUBTYPE_VENDOR 5 ++#define BCMILCP_SUBTYPE_FLH 17 ++ ++#define BCMILCP_SUBTYPE_VENDOR_LONG 32769 ++#define BCMILCP_SUBTYPE_CERT 32770 ++#define BCMILCP_SUBTYPE_SES 32771 ++ ++ ++#define BCMILCP_BCM_SUBTYPE_RESERVED 0 ++#define BCMILCP_BCM_SUBTYPE_EVENT 1 ++#define BCMILCP_BCM_SUBTYPE_SES 2 ++/* ++ * The EAPOL type is not used anymore. Instead EAPOL messages are now embedded ++ * within BCMILCP_BCM_SUBTYPE_EVENT type messages ++ */ ++/* #define BCMILCP_BCM_SUBTYPE_EAPOL 3 */ ++#define BCMILCP_BCM_SUBTYPE_DPT 4 ++ ++#define BCMILCP_BCM_SUBTYPEHDR_MINLENGTH 8 ++#define BCMILCP_BCM_SUBTYPEHDR_VERSION 0 ++ ++/* These fields are stored in network order */ ++typedef BWL_PRE_PACKED_STRUCT struct bcmeth_hdr ++{ ++ uint16 subtype; /* Vendor specific..32769 */ ++ uint16 length; ++ uint8 version; /* Version is 0 */ ++ uint8 oui[3]; /* Broadcom OUI */ ++ /* user specific Data */ ++ uint16 usr_subtype; ++} BWL_POST_PACKED_STRUCT bcmeth_hdr_t; ++ ++ ++/* This marks the end of a packed structure section. */ ++#include ++ ++#endif /* _BCMETH_H_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/proto/bcmevent.h b/drivers/net/ethernet/broadcom/gmac/src/include/proto/bcmevent.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/proto/bcmevent.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/proto/bcmevent.h 2017-11-09 17:53:43.988295000 +0800 +@@ -0,0 +1,313 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Broadcom Event protocol definitions ++ * ++ * Dependencies: proto/bcmeth.h ++ * ++ * $Id: bcmevent.h 315348 2012-02-16 07:32:51Z $ ++ * ++ */ ++ ++/* ++ * Broadcom Ethernet Events protocol defines ++ * ++ */ ++ ++#ifndef _BCMEVENT_H_ ++#define _BCMEVENT_H_ ++ ++#ifndef _TYPEDEFS_H_ ++#include ++#endif ++ ++/* This marks the start of a packed structure section. */ ++#include ++ ++#define BCM_EVENT_MSG_VERSION 2 /* wl_event_msg_t struct version */ ++#define BCM_MSG_IFNAME_MAX 16 /* max length of interface name */ ++ ++/* flags */ ++#define WLC_EVENT_MSG_LINK 0x01 /* link is up */ ++#define WLC_EVENT_MSG_FLUSHTXQ 0x02 /* flush tx queue on MIC error */ ++#define WLC_EVENT_MSG_GROUP 0x04 /* group MIC error */ ++#define WLC_EVENT_MSG_UNKBSS 0x08 /* unknown source bsscfg */ ++#define WLC_EVENT_MSG_UNKIF 0x10 /* unknown source OS i/f */ ++ ++/* these fields are stored in network order */ ++ ++/* version 1 */ ++typedef BWL_PRE_PACKED_STRUCT struct ++{ ++ uint16 version; ++ uint16 flags; /* see flags below */ ++ uint32 event_type; /* Message (see below) */ ++ uint32 status; /* Status code (see below) */ ++ uint32 reason; /* Reason code (if applicable) */ ++ uint32 auth_type; /* WLC_E_AUTH */ ++ uint32 datalen; /* data buf */ ++ struct ether_addr addr; /* Station address (if applicable) */ ++ char ifname[BCM_MSG_IFNAME_MAX]; /* name of the packet incoming interface */ ++} BWL_POST_PACKED_STRUCT wl_event_msg_v1_t; ++ ++/* the current version */ ++typedef BWL_PRE_PACKED_STRUCT struct ++{ ++ uint16 version; ++ uint16 flags; /* see flags below */ ++ uint32 event_type; /* Message (see below) */ ++ uint32 status; /* Status code (see below) */ ++ uint32 reason; /* Reason code (if applicable) */ ++ uint32 auth_type; /* WLC_E_AUTH */ ++ uint32 datalen; /* data buf */ ++ struct ether_addr addr; /* Station address (if applicable) */ ++ char ifname[BCM_MSG_IFNAME_MAX]; /* name of the packet incoming interface */ ++ uint8 ifidx; /* destination OS i/f index */ ++ uint8 bsscfgidx; /* source bsscfg index */ ++} BWL_POST_PACKED_STRUCT wl_event_msg_t; ++ ++/* used by driver msgs */ ++typedef BWL_PRE_PACKED_STRUCT struct bcm_event { ++ struct ether_header eth; ++ bcmeth_hdr_t bcm_hdr; ++ wl_event_msg_t event; ++ /* data portion follows */ ++} BWL_POST_PACKED_STRUCT bcm_event_t; ++ ++#define BCM_MSG_LEN (sizeof(bcm_event_t) - sizeof(bcmeth_hdr_t) - sizeof(struct ether_header)) ++ ++/* Event messages */ ++#define WLC_E_SET_SSID 0 /* indicates status of set SSID */ ++#define WLC_E_JOIN 1 /* differentiates join IBSS from found (WLC_E_START) IBSS */ ++#define WLC_E_START 2 /* STA founded an IBSS or AP started a BSS */ ++#define WLC_E_AUTH 3 /* 802.11 AUTH request */ ++#define WLC_E_AUTH_IND 4 /* 802.11 AUTH indication */ ++#define WLC_E_DEAUTH 5 /* 802.11 DEAUTH request */ ++#define WLC_E_DEAUTH_IND 6 /* 802.11 DEAUTH indication */ ++#define WLC_E_ASSOC 7 /* 802.11 ASSOC request */ ++#define WLC_E_ASSOC_IND 8 /* 802.11 ASSOC indication */ ++#define WLC_E_REASSOC 9 /* 802.11 REASSOC request */ ++#define WLC_E_REASSOC_IND 10 /* 802.11 REASSOC indication */ ++#define WLC_E_DISASSOC 11 /* 802.11 DISASSOC request */ ++#define WLC_E_DISASSOC_IND 12 /* 802.11 DISASSOC indication */ ++#define WLC_E_QUIET_START 13 /* 802.11h Quiet period started */ ++#define WLC_E_QUIET_END 14 /* 802.11h Quiet period ended */ ++#define WLC_E_BEACON_RX 15 /* BEACONS received/lost indication */ ++#define WLC_E_LINK 16 /* generic link indication */ ++#define WLC_E_MIC_ERROR 17 /* TKIP MIC error occurred */ ++#define WLC_E_NDIS_LINK 18 /* NDIS style link indication */ ++#define WLC_E_ROAM 19 /* roam attempt occurred: indicate status & reason */ ++#define WLC_E_TXFAIL 20 /* change in dot11FailedCount (txfail) */ ++#define WLC_E_PMKID_CACHE 21 /* WPA2 pmkid cache indication */ ++#define WLC_E_RETROGRADE_TSF 22 /* current AP's TSF value went backward */ ++#define WLC_E_PRUNE 23 /* AP was pruned from join list for reason */ ++#define WLC_E_AUTOAUTH 24 /* report AutoAuth table entry match for join attempt */ ++#define WLC_E_EAPOL_MSG 25 /* Event encapsulating an EAPOL message */ ++#define WLC_E_SCAN_COMPLETE 26 /* Scan results are ready or scan was aborted */ ++#define WLC_E_ADDTS_IND 27 /* indicate to host addts fail/success */ ++#define WLC_E_DELTS_IND 28 /* indicate to host delts fail/success */ ++#define WLC_E_BCNSENT_IND 29 /* indicate to host of beacon transmit */ ++#define WLC_E_BCNRX_MSG 30 /* Send the received beacon up to the host */ ++#define WLC_E_BCNLOST_MSG 31 /* indicate to host loss of beacon */ ++#define WLC_E_ROAM_PREP 32 /* before attempting to roam */ ++#define WLC_E_PFN_NET_FOUND 33 /* PFN network found event */ ++#define WLC_E_PFN_NET_LOST 34 /* PFN network lost event */ ++#define WLC_E_RESET_COMPLETE 35 ++#define WLC_E_JOIN_START 36 ++#define WLC_E_ROAM_START 37 ++#define WLC_E_ASSOC_START 38 ++#define WLC_E_IBSS_ASSOC 39 ++#define WLC_E_RADIO 40 ++#define WLC_E_PSM_WATCHDOG 41 /* PSM microcode watchdog fired */ ++#define WLC_E_PROBREQ_MSG 44 /* probe request received */ ++#define WLC_E_SCAN_CONFIRM_IND 45 ++#define WLC_E_PSK_SUP 46 /* WPA Handshake fail */ ++#define WLC_E_COUNTRY_CODE_CHANGED 47 ++#define WLC_E_EXCEEDED_MEDIUM_TIME 48 /* WMMAC excedded medium time */ ++#define WLC_E_ICV_ERROR 49 /* WEP ICV error occurred */ ++#define WLC_E_UNICAST_DECODE_ERROR 50 /* Unsupported unicast encrypted frame */ ++#define WLC_E_MULTICAST_DECODE_ERROR 51 /* Unsupported multicast encrypted frame */ ++#define WLC_E_TRACE 52 ++#define WLC_E_IF 54 /* I/F change (for dongle host notification) */ ++#define WLC_E_P2P_DISC_LISTEN_COMPLETE 55 /* listen state expires */ ++#define WLC_E_RSSI 56 /* indicate RSSI change based on configured levels */ ++#define WLC_E_PFN_SCAN_COMPLETE 57 /* PFN completed scan of network list */ ++#define WLC_E_EXTLOG_MSG 58 ++#define WLC_E_ACTION_FRAME 59 /* Action frame Rx */ ++#define WLC_E_ACTION_FRAME_COMPLETE 60 /* Action frame Tx complete */ ++#define WLC_E_PRE_ASSOC_IND 61 /* assoc request received */ ++#define WLC_E_PRE_REASSOC_IND 62 /* re-assoc request received */ ++#define WLC_E_CHANNEL_ADOPTED 63 ++#define WLC_E_AP_STARTED 64 /* AP started */ ++#define WLC_E_DFS_AP_STOP 65 /* AP stopped due to DFS */ ++#define WLC_E_DFS_AP_RESUME 66 /* AP resumed due to DFS */ ++#define WLC_E_WAI_STA_EVENT 67 /* WAI stations event */ ++#define WLC_E_WAI_MSG 68 /* event encapsulating an WAI message */ ++#define WLC_E_ESCAN_RESULT 69 /* escan result event */ ++#define WLC_E_ACTION_FRAME_OFF_CHAN_COMPLETE 70 /* action frame off channel complete */ ++#define WLC_E_PROBRESP_MSG 71 /* probe response received */ ++#define WLC_E_P2P_PROBREQ_MSG 72 /* P2P Probe request received */ ++#define WLC_E_DCS_REQUEST 73 ++ ++#define WLC_E_FIFO_CREDIT_MAP 74 /* credits for D11 FIFOs. [AC0,AC1,AC2,AC3,BC_MC,ATIM] */ ++ ++#define WLC_E_ACTION_FRAME_RX 75 /* Received action frame event WITH ++ * wl_event_rx_frame_data_t header ++ */ ++#define WLC_E_WAKE_EVENT 76 /* Wake Event timer fired, used for wake WLAN test mode */ ++#define WLC_E_RM_COMPLETE 77 /* Radio measurement complete */ ++#define WLC_E_HTSFSYNC 78 /* Synchronize TSF with the host */ ++#define WLC_E_OVERLAY_REQ 79 /* request an overlay IOCTL/iovar from the host */ ++#define WLC_E_CSA_COMPLETE_IND 80 /* 802.11 CHANNEL SWITCH ACTION completed */ ++#define WLC_E_EXCESS_PM_WAKE_EVENT 81 /* excess PM Wake Event to inform host */ ++#define WLC_E_PFN_SCAN_NONE 82 /* no PFN networks around */ ++#define WLC_E_PFN_SCAN_ALLGONE 83 /* last found PFN network gets lost */ ++#define WLC_E_GTK_PLUMBED 84 ++#define WLC_E_ASSOC_IND_NDIS 85 /* 802.11 ASSOC indication for NDIS only */ ++#define WLC_E_REASSOC_IND_NDIS 86 /* 802.11 REASSOC indication for NDIS only */ ++#define WLC_E_ASSOC_REQ_IE 87 ++#define WLC_E_ASSOC_RESP_IE 88 ++ ++#define WLC_E_LAST 89 /* highest val + 1 for range checking */ ++ ++/* Table of event name strings for UIs and debugging dumps */ ++typedef struct { ++ uint event; ++ const char *name; ++} bcmevent_name_t; ++ ++extern const bcmevent_name_t bcmevent_names[]; ++extern const int bcmevent_names_size; ++ ++/* Event status codes */ ++#define WLC_E_STATUS_SUCCESS 0 /* operation was successful */ ++#define WLC_E_STATUS_FAIL 1 /* operation failed */ ++#define WLC_E_STATUS_TIMEOUT 2 /* operation timed out */ ++#define WLC_E_STATUS_NO_NETWORKS 3 /* failed due to no matching network found */ ++#define WLC_E_STATUS_ABORT 4 /* operation was aborted */ ++#define WLC_E_STATUS_NO_ACK 5 /* protocol failure: packet not ack'd */ ++#define WLC_E_STATUS_UNSOLICITED 6 /* AUTH or ASSOC packet was unsolicited */ ++#define WLC_E_STATUS_ATTEMPT 7 /* attempt to assoc to an auto auth configuration */ ++#define WLC_E_STATUS_PARTIAL 8 /* scan results are incomplete */ ++#define WLC_E_STATUS_NEWSCAN 9 /* scan aborted by another scan */ ++#define WLC_E_STATUS_NEWASSOC 10 /* scan aborted due to assoc in progress */ ++#define WLC_E_STATUS_11HQUIET 11 /* 802.11h quiet period started */ ++#define WLC_E_STATUS_SUPPRESS 12 /* user disabled scanning (WLC_SET_SCANSUPPRESS) */ ++#define WLC_E_STATUS_NOCHANS 13 /* no allowable channels to scan */ ++#define WLC_E_STATUS_CS_ABORT 15 /* abort channel select */ ++#define WLC_E_STATUS_ERROR 16 /* request failed due to error */ ++ ++/* roam reason codes */ ++#define WLC_E_REASON_INITIAL_ASSOC 0 /* initial assoc */ ++#define WLC_E_REASON_LOW_RSSI 1 /* roamed due to low RSSI */ ++#define WLC_E_REASON_DEAUTH 2 /* roamed due to DEAUTH indication */ ++#define WLC_E_REASON_DISASSOC 3 /* roamed due to DISASSOC indication */ ++#define WLC_E_REASON_BCNS_LOST 4 /* roamed due to lost beacons */ ++#define WLC_E_REASON_MINTXRATE 9 /* roamed because at mintxrate for too long */ ++#define WLC_E_REASON_TXFAIL 10 /* We can hear AP, but AP can't hear us */ ++ ++/* Roam codes used primarily by CCX */ ++#define WLC_E_REASON_FAST_ROAM_FAILED 5 /* roamed due to fast roam failure */ ++#define WLC_E_REASON_DIRECTED_ROAM 6 /* roamed due to request by AP */ ++#define WLC_E_REASON_TSPEC_REJECTED 7 /* roamed due to TSPEC rejection */ ++#define WLC_E_REASON_BETTER_AP 8 /* roamed due to finding better AP */ ++ ++#define WLC_E_REASON_REQUESTED_ROAM 11 /* roamed due to BSS Mgmt Transition request by AP */ ++ ++/* prune reason codes */ ++#define WLC_E_PRUNE_ENCR_MISMATCH 1 /* encryption mismatch */ ++#define WLC_E_PRUNE_BCAST_BSSID 2 /* AP uses a broadcast BSSID */ ++#define WLC_E_PRUNE_MAC_DENY 3 /* STA's MAC addr is in AP's MAC deny list */ ++#define WLC_E_PRUNE_MAC_NA 4 /* STA's MAC addr is not in AP's MAC allow list */ ++#define WLC_E_PRUNE_REG_PASSV 5 /* AP not allowed due to regulatory restriction */ ++#define WLC_E_PRUNE_SPCT_MGMT 6 /* AP does not support STA locale spectrum mgmt */ ++#define WLC_E_PRUNE_RADAR 7 /* AP is on a radar channel of STA locale */ ++#define WLC_E_RSN_MISMATCH 8 /* STA does not support AP's RSN */ ++#define WLC_E_PRUNE_NO_COMMON_RATES 9 /* No rates in common with AP */ ++#define WLC_E_PRUNE_BASIC_RATES 10 /* STA does not support all basic rates of BSS */ ++#define WLC_E_PRUNE_CIPHER_NA 12 /* BSS's cipher not supported */ ++#define WLC_E_PRUNE_KNOWN_STA 13 /* AP is already known to us as a STA */ ++#define WLC_E_PRUNE_WDS_PEER 15 /* AP is already known to us as a WDS peer */ ++#define WLC_E_PRUNE_QBSS_LOAD 16 /* QBSS LOAD - AAC is too low */ ++#define WLC_E_PRUNE_HOME_AP 17 /* prune home AP */ ++ ++/* WPA failure reason codes carried in the WLC_E_PSK_SUP event */ ++#define WLC_E_SUP_OTHER 0 /* Other reason */ ++#define WLC_E_SUP_DECRYPT_KEY_DATA 1 /* Decryption of key data failed */ ++#define WLC_E_SUP_BAD_UCAST_WEP128 2 /* Illegal use of ucast WEP128 */ ++#define WLC_E_SUP_BAD_UCAST_WEP40 3 /* Illegal use of ucast WEP40 */ ++#define WLC_E_SUP_UNSUP_KEY_LEN 4 /* Unsupported key length */ ++#define WLC_E_SUP_PW_KEY_CIPHER 5 /* Unicast cipher mismatch in pairwise key */ ++#define WLC_E_SUP_MSG3_TOO_MANY_IE 6 /* WPA IE contains > 1 RSN IE in key msg 3 */ ++#define WLC_E_SUP_MSG3_IE_MISMATCH 7 /* WPA IE mismatch in key message 3 */ ++#define WLC_E_SUP_NO_INSTALL_FLAG 8 /* INSTALL flag unset in 4-way msg */ ++#define WLC_E_SUP_MSG3_NO_GTK 9 /* encapsulated GTK missing from msg 3 */ ++#define WLC_E_SUP_GRP_KEY_CIPHER 10 /* Multicast cipher mismatch in group key */ ++#define WLC_E_SUP_GRP_MSG1_NO_GTK 11 /* encapsulated GTK missing from group msg 1 */ ++#define WLC_E_SUP_GTK_DECRYPT_FAIL 12 /* GTK decrypt failure */ ++#define WLC_E_SUP_SEND_FAIL 13 /* message send failure */ ++#define WLC_E_SUP_DEAUTH 14 /* received FC_DEAUTH */ ++#define WLC_E_SUP_WPA_PSK_TMO 15 /* WPA PSK 4-way handshake timeout */ ++ ++/* Event data for events that include frames received over the air */ ++/* WLC_E_PROBRESP_MSG ++ * WLC_E_P2P_PROBREQ_MSG ++ * WLC_E_ACTION_FRAME_RX ++ */ ++typedef BWL_PRE_PACKED_STRUCT struct wl_event_rx_frame_data { ++ uint16 version; ++ uint16 channel; /* Matches chanspec_t format from bcmwifi_channels.h */ ++ int32 rssi; ++ uint32 mactime; ++ uint32 rate; ++} BWL_POST_PACKED_STRUCT wl_event_rx_frame_data_t; ++ ++#define BCM_RX_FRAME_DATA_VERSION 1 ++ ++/* WLC_E_IF event data */ ++typedef struct wl_event_data_if { ++ uint8 ifidx; /* RTE virtual device index (for dongle) */ ++ uint8 opcode; /* see I/F opcode */ ++ uint8 reserved; ++ uint8 bssidx; /* bsscfg index */ ++ uint8 role; /* see I/F role */ ++} wl_event_data_if_t; ++ ++/* opcode in WLC_E_IF event */ ++#define WLC_E_IF_ADD 1 /* bsscfg add */ ++#define WLC_E_IF_DEL 2 /* bsscfg delete */ ++#define WLC_E_IF_CHANGE 3 /* bsscfg role change */ ++ ++/* I/F role code in WLC_E_IF event */ ++#define WLC_E_IF_ROLE_STA 0 /* Infra STA */ ++#define WLC_E_IF_ROLE_AP 1 /* Access Point */ ++#define WLC_E_IF_ROLE_WDS 2 /* WDS link */ ++#define WLC_E_IF_ROLE_P2P_GO 3 /* P2P Group Owner */ ++#define WLC_E_IF_ROLE_P2P_CLIENT 4 /* P2P Client */ ++ ++/* Reason codes for LINK */ ++#define WLC_E_LINK_BCN_LOSS 1 /* Link down because of beacon loss */ ++#define WLC_E_LINK_DISASSOC 2 /* Link down because of disassoc */ ++#define WLC_E_LINK_ASSOC_REC 3 /* Link down because assoc recreate failed */ ++#define WLC_E_LINK_BSSCFG_DIS 4 /* Link down due to bsscfg down */ ++ ++/* reason codes for WLC_E_OVERLAY_REQ event */ ++#define WLC_E_OVL_DOWNLOAD 0 /* overlay download request */ ++#define WLC_E_OVL_UPDATE_IND 1 /* device indication of host overlay update */ ++ ++/* This marks the end of a packed structure section. */ ++#include ++ ++#endif /* _BCMEVENT_H_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/proto/bcmip.h b/drivers/net/ethernet/broadcom/gmac/src/include/proto/bcmip.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/proto/bcmip.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/proto/bcmip.h 2017-11-09 17:53:43.989292000 +0800 +@@ -0,0 +1,205 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Fundamental constants relating to IP Protocol ++ * ++ * $Id: bcmip.h 324300 2012-03-28 20:29:37Z $ ++ */ ++ ++#ifndef _bcmip_h_ ++#define _bcmip_h_ ++ ++#ifndef _TYPEDEFS_H_ ++#include ++#endif ++ ++/* This marks the start of a packed structure section. */ ++#include ++ ++ ++/* IPV4 and IPV6 common */ ++#define IP_VER_OFFSET 0x0 /* offset to version field */ ++#define IP_VER_MASK 0xf0 /* version mask */ ++#define IP_VER_SHIFT 4 /* version shift */ ++#define IP_VER_4 4 /* version number for IPV4 */ ++#define IP_VER_6 6 /* version number for IPV6 */ ++ ++#define IP_VER(ip_body) \ ++ ((((uint8 *)(ip_body))[IP_VER_OFFSET] & IP_VER_MASK) >> IP_VER_SHIFT) ++ ++#define IP_PROT_ICMP 0x1 /* ICMP protocol */ ++#define IP_PROT_IGMP 0x2 /* IGMP protocol */ ++#define IP_PROT_TCP 0x6 /* TCP protocol */ ++#define IP_PROT_UDP 0x11 /* UDP protocol type */ ++#define IP_PROT_ICMP6 0x3a /* ICMPv6 protocol type */ ++ ++/* IPV4 field offsets */ ++#define IPV4_VER_HL_OFFSET 0 /* version and ihl byte offset */ ++#define IPV4_TOS_OFFSET 1 /* type of service offset */ ++#define IPV4_PKTLEN_OFFSET 2 /* packet length offset */ ++#define IPV4_PKTFLAG_OFFSET 6 /* more-frag,dont-frag flag offset */ ++#define IPV4_PROT_OFFSET 9 /* protocol type offset */ ++#define IPV4_CHKSUM_OFFSET 10 /* IP header checksum offset */ ++#define IPV4_SRC_IP_OFFSET 12 /* src IP addr offset */ ++#define IPV4_DEST_IP_OFFSET 16 /* dest IP addr offset */ ++#define IPV4_OPTIONS_OFFSET 20 /* IP options offset */ ++#define IPV4_MIN_HEADER_LEN 20 /* Minimum size for an IP header (no options) */ ++ ++/* IPV4 field decodes */ ++#define IPV4_VER_MASK 0xf0 /* IPV4 version mask */ ++#define IPV4_VER_SHIFT 4 /* IPV4 version shift */ ++ ++#define IPV4_HLEN_MASK 0x0f /* IPV4 header length mask */ ++#define IPV4_HLEN(ipv4_body) (4 * (((uint8 *)(ipv4_body))[IPV4_VER_HL_OFFSET] & IPV4_HLEN_MASK)) ++ ++#define IPV4_ADDR_LEN 4 /* IPV4 address length */ ++ ++#define IPV4_ADDR_NULL(a) ((((uint8 *)(a))[0] | ((uint8 *)(a))[1] | \ ++ ((uint8 *)(a))[2] | ((uint8 *)(a))[3]) == 0) ++ ++#define IPV4_ADDR_BCAST(a) ((((uint8 *)(a))[0] & ((uint8 *)(a))[1] & \ ++ ((uint8 *)(a))[2] & ((uint8 *)(a))[3]) == 0xff) ++ ++#define IPV4_TOS_DSCP_MASK 0xfc /* DiffServ codepoint mask */ ++#define IPV4_TOS_DSCP_SHIFT 2 /* DiffServ codepoint shift */ ++ ++#define IPV4_TOS(ipv4_body) (((uint8 *)(ipv4_body))[IPV4_TOS_OFFSET]) ++ ++#define IPV4_TOS_PREC_MASK 0xe0 /* Historical precedence mask */ ++#define IPV4_TOS_PREC_SHIFT 5 /* Historical precedence shift */ ++ ++#define IPV4_TOS_LOWDELAY 0x10 /* Lowest delay requested */ ++#define IPV4_TOS_THROUGHPUT 0x8 /* Best throughput requested */ ++#define IPV4_TOS_RELIABILITY 0x4 /* Most reliable delivery requested */ ++ ++#define IPV4_PROT(ipv4_body) (((uint8 *)(ipv4_body))[IPV4_PROT_OFFSET]) ++ ++#define IPV4_FRAG_RESV 0x8000 /* Reserved */ ++#define IPV4_FRAG_DONT 0x4000 /* Don't fragment */ ++#define IPV4_FRAG_MORE 0x2000 /* More fragments */ ++#define IPV4_FRAG_OFFSET_MASK 0x1fff /* Fragment offset */ ++ ++#define IPV4_ADDR_STR_LEN 16 /* Max IP address length in string format */ ++ ++/* IPV4 packet formats */ ++BWL_PRE_PACKED_STRUCT struct ipv4_addr { ++ uint8 addr[IPV4_ADDR_LEN]; ++} BWL_POST_PACKED_STRUCT; ++ ++BWL_PRE_PACKED_STRUCT struct ipv4_hdr { ++ uint8 version_ihl; /* Version and Internet Header Length */ ++ uint8 tos; /* Type Of Service */ ++ uint16 tot_len; /* Number of bytes in packet (max 65535) */ ++ uint16 id; ++ uint16 frag; /* 3 flag bits and fragment offset */ ++ uint8 ttl; /* Time To Live */ ++ uint8 prot; /* Protocol */ ++ uint16 hdr_chksum; /* IP header checksum */ ++ uint8 src_ip[IPV4_ADDR_LEN]; /* Source IP Address */ ++ uint8 dst_ip[IPV4_ADDR_LEN]; /* Destination IP Address */ ++} BWL_POST_PACKED_STRUCT; ++ ++/* IPV6 field offsets */ ++#define IPV6_PAYLOAD_LEN_OFFSET 4 /* payload length offset */ ++#define IPV6_NEXT_HDR_OFFSET 6 /* next header/protocol offset */ ++#define IPV6_HOP_LIMIT_OFFSET 7 /* hop limit offset */ ++#define IPV6_SRC_IP_OFFSET 8 /* src IP addr offset */ ++#define IPV6_DEST_IP_OFFSET 24 /* dst IP addr offset */ ++ ++/* IPV6 field decodes */ ++#define IPV6_TRAFFIC_CLASS(ipv6_body) \ ++ (((((uint8 *)(ipv6_body))[0] & 0x0f) << 4) | \ ++ ((((uint8 *)(ipv6_body))[1] & 0xf0) >> 4)) ++ ++#define IPV6_FLOW_LABEL(ipv6_body) \ ++ (((((uint8 *)(ipv6_body))[1] & 0x0f) << 16) | \ ++ (((uint8 *)(ipv6_body))[2] << 8) | \ ++ (((uint8 *)(ipv6_body))[3])) ++ ++#define IPV6_PAYLOAD_LEN(ipv6_body) \ ++ ((((uint8 *)(ipv6_body))[IPV6_PAYLOAD_LEN_OFFSET + 0] << 8) | \ ++ ((uint8 *)(ipv6_body))[IPV6_PAYLOAD_LEN_OFFSET + 1]) ++ ++#define IPV6_NEXT_HDR(ipv6_body) \ ++ (((uint8 *)(ipv6_body))[IPV6_NEXT_HDR_OFFSET]) ++ ++#define IPV6_PROT(ipv6_body) IPV6_NEXT_HDR(ipv6_body) ++ ++#define IPV6_ADDR_LEN 16 /* IPV6 address length */ ++ ++/* IPV4 TOS or IPV6 Traffic Classifier or 0 */ ++#define IP_TOS46(ip_body) \ ++ (IP_VER(ip_body) == IP_VER_4 ? IPV4_TOS(ip_body) : \ ++ IP_VER(ip_body) == IP_VER_6 ? IPV6_TRAFFIC_CLASS(ip_body) : 0) ++ ++/* IPV6 extension headers (options) */ ++#define IPV6_EXTHDR_HOP 0 ++#define IPV6_EXTHDR_ROUTING 43 ++#define IPV6_EXTHDR_FRAGMENT 44 ++#define IPV6_EXTHDR_AUTH 51 ++#define IPV6_EXTHDR_NONE 59 ++#define IPV6_EXTHDR_DEST 60 ++ ++#define IPV6_EXTHDR(prot) (((prot) == IPV6_EXTHDR_HOP) || \ ++ ((prot) == IPV6_EXTHDR_ROUTING) || \ ++ ((prot) == IPV6_EXTHDR_FRAGMENT) || \ ++ ((prot) == IPV6_EXTHDR_AUTH) || \ ++ ((prot) == IPV6_EXTHDR_NONE) || \ ++ ((prot) == IPV6_EXTHDR_DEST)) ++ ++#define IPV6_MIN_HLEN 40 ++ ++#define IPV6_EXTHDR_LEN(eh) ((((struct ipv6_exthdr *)(eh))->hdrlen + 1) << 3) ++ ++BWL_PRE_PACKED_STRUCT struct ipv6_exthdr { ++ uint8 nexthdr; ++ uint8 hdrlen; ++} BWL_POST_PACKED_STRUCT; ++ ++BWL_PRE_PACKED_STRUCT struct ipv6_exthdr_frag { ++ uint8 nexthdr; ++ uint8 rsvd; ++ uint16 frag_off; ++ uint32 ident; ++} BWL_POST_PACKED_STRUCT; ++ ++static INLINE int32 ++ipv6_exthdr_len(uint8 *h, uint8 *proto) ++{ ++ uint16 len = 0, hlen; ++ struct ipv6_exthdr *eh = (struct ipv6_exthdr *)h; ++ ++ while (IPV6_EXTHDR(eh->nexthdr)) { ++ if (eh->nexthdr == IPV6_EXTHDR_NONE) ++ return -1; ++ else if (eh->nexthdr == IPV6_EXTHDR_FRAGMENT) ++ hlen = 8; ++ else if (eh->nexthdr == IPV6_EXTHDR_AUTH) ++ hlen = (eh->hdrlen + 2) << 2; ++ else ++ hlen = IPV6_EXTHDR_LEN(eh); ++ ++ len += hlen; ++ eh = (struct ipv6_exthdr *)(h + len); ++ } ++ ++ *proto = eh->nexthdr; ++ return len; ++} ++ ++/* This marks the end of a packed structure section. */ ++#include ++ ++#endif /* _bcmip_h_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/proto/bcmipv6.h b/drivers/net/ethernet/broadcom/gmac/src/include/proto/bcmipv6.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/proto/bcmipv6.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/proto/bcmipv6.h 2017-11-09 17:53:43.990289000 +0800 +@@ -0,0 +1,101 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Fundamental constants relating to Neighbor Discovery Protocol ++ * ++ * $Id: bcmipv6.h 305568 2011-12-29 20:21:17Z $ ++ */ ++ ++#ifndef _bcmipv6_h_ ++#define _bcmipv6_h_ ++ ++#ifndef _TYPEDEFS_H_ ++#include ++#endif ++ ++/* This marks the start of a packed structure section. */ ++#include ++ ++#define ICMPV6_HEADER_TYPE 0x3A ++#define ICMPV6_PKT_TYPE_NS 135 ++#define ICMPV6_PKT_TYPE_NA 136 ++ ++#define ICMPV6_ND_OPT_TYPE_TARGET_MAC 2 ++#define ICMPV6_ND_OPT_TYPE_SRC_MAC 1 ++ ++#define IPV6_VERSION 6 ++#define IPV6_HOP_LIMIT 255 ++ ++#define IPV6_ADDR_NULL(a) ((a[0] | a[1] | a[2] | a[3] | a[4] | \ ++ a[5] | a[6] | a[7] | a[8] | a[9] | \ ++ a[10] | a[11] | a[12] | a[13] | \ ++ a[14] | a[15]) == 0) ++ ++/* IPV6 address */ ++BWL_PRE_PACKED_STRUCT struct ipv6_addr { ++ uint8 addr[16]; ++} BWL_POST_PACKED_STRUCT; ++ ++#ifndef IL_BIGENDIAN ++ ++/* ICMPV6 Header */ ++BWL_PRE_PACKED_STRUCT struct icmp6_hdr { ++ uint8 icmp6_type; ++ uint8 icmp6_code; ++ uint16 icmp6_cksum; ++ BWL_PRE_PACKED_STRUCT union { ++ uint32 reserved; ++ BWL_PRE_PACKED_STRUCT struct nd_advt { ++ uint32 reserved1:5, ++ override:1, ++ solicited:1, ++ router:1, ++ reserved2:24; ++ } BWL_POST_PACKED_STRUCT nd_advt; ++ } BWL_POST_PACKED_STRUCT opt; ++} BWL_POST_PACKED_STRUCT; ++ ++/* Ipv6 Header Format */ ++BWL_PRE_PACKED_STRUCT struct ipv6_hdr { ++ uint8 priority:4, ++ version:4; ++ uint8 flow_lbl[3]; ++ uint16 payload_len; ++ uint8 nexthdr; ++ uint8 hop_limit; ++ struct ipv6_addr saddr; ++ struct ipv6_addr daddr; ++} BWL_POST_PACKED_STRUCT; ++ ++/* Neighbor Advertisement/Solicitation Packet Structure */ ++BWL_PRE_PACKED_STRUCT struct nd_msg { ++ struct icmp6_hdr icmph; ++ struct ipv6_addr target; ++} BWL_POST_PACKED_STRUCT; ++ ++ ++/* Neighibor Solicitation/Advertisement Optional Structure */ ++BWL_PRE_PACKED_STRUCT struct nd_msg_opt { ++ uint8 type; ++ uint8 len; ++ uint8 mac_addr[ETHER_ADDR_LEN]; ++} BWL_POST_PACKED_STRUCT; ++ ++#endif /* IL_BIGENDIAN */ ++ ++/* This marks the end of a packed structure section. */ ++#include ++ ++#endif /* !defined(_bcmipv6_h_) */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/proto/ethernet.h b/drivers/net/ethernet/broadcom/gmac/src/include/proto/ethernet.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/proto/ethernet.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/proto/ethernet.h 2017-11-09 17:53:43.991291000 +0800 +@@ -0,0 +1,202 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * From FreeBSD 2.2.7: Fundamental constants relating to ethernet. ++ * ++ * $Id: ethernet.h 316696 2012-02-23 03:29:35Z $ ++ */ ++ ++#ifndef _NET_ETHERNET_H_ /* use native BSD ethernet.h when available */ ++#define _NET_ETHERNET_H_ ++ ++#ifndef _TYPEDEFS_H_ ++#include "typedefs.h" ++#endif ++ ++/* This marks the start of a packed structure section. */ ++#include ++ ++ ++/* ++ * The number of bytes in an ethernet (MAC) address. ++ */ ++#define ETHER_ADDR_LEN 6 ++ ++/* ++ * The number of bytes in the type field. ++ */ ++#define ETHER_TYPE_LEN 2 ++ ++/* ++ * The number of bytes in the trailing CRC field. ++ */ ++#define ETHER_CRC_LEN 4 ++ ++/* ++ * The length of the combined header. ++ */ ++#define ETHER_HDR_LEN (ETHER_ADDR_LEN * 2 + ETHER_TYPE_LEN) ++ ++/* ++ * The minimum packet length. ++ */ ++#define ETHER_MIN_LEN 64 ++ ++/* ++ * The minimum packet user data length. ++ */ ++#define ETHER_MIN_DATA 46 ++ ++/* ++ * The maximum packet length. ++ */ ++#define ETHER_MAX_LEN 1518 ++ ++/* ++ * The maximum packet user data length. ++ */ ++#define ETHER_MAX_DATA 1500 ++ ++/* ether types */ ++#define ETHER_TYPE_MIN 0x0600 /* Anything less than MIN is a length */ ++#define ETHER_TYPE_IP 0x0800 /* IP */ ++#define ETHER_TYPE_ARP 0x0806 /* ARP */ ++#define ETHER_TYPE_8021Q 0x8100 /* 802.1Q */ ++#define ETHER_TYPE_IPV6 0x86dd /* IPv6 */ ++#define ETHER_TYPE_BRCM 0x886c /* Broadcom Corp. */ ++#define ETHER_TYPE_802_1X 0x888e /* 802.1x */ ++#define ETHER_TYPE_802_1X_PREAUTH 0x88c7 /* 802.1x preauthentication */ ++#define ETHER_TYPE_WAI 0x88b4 /* WAI */ ++#define ETHER_TYPE_89_0D 0x890d /* 89-0d frame for TDLS */ ++ ++#define ETHER_TYPE_PPP_SES 0x8864 /* PPPoE Session */ ++ ++/* Broadcom subtype follows ethertype; First 2 bytes are reserved; Next 2 are subtype; */ ++#define ETHER_BRCM_SUBTYPE_LEN 4 /* Broadcom 4 byte subtype */ ++ ++/* ether header */ ++#define ETHER_DEST_OFFSET (0 * ETHER_ADDR_LEN) /* dest address offset */ ++#define ETHER_SRC_OFFSET (1 * ETHER_ADDR_LEN) /* src address offset */ ++#define ETHER_TYPE_OFFSET (2 * ETHER_ADDR_LEN) /* ether type offset */ ++ ++/* ++ * A macro to validate a length with ++ */ ++#define ETHER_IS_VALID_LEN(foo) \ ++ ((foo) >= ETHER_MIN_LEN && (foo) <= ETHER_MAX_LEN) ++ ++#define ETHER_FILL_MCAST_ADDR_FROM_IP(ea, mgrp_ip) { \ ++ ((uint8 *)ea)[0] = 0x01; \ ++ ((uint8 *)ea)[1] = 0x00; \ ++ ((uint8 *)ea)[2] = 0x5e; \ ++ ((uint8 *)ea)[3] = ((mgrp_ip) >> 16) & 0x7f; \ ++ ((uint8 *)ea)[4] = ((mgrp_ip) >> 8) & 0xff; \ ++ ((uint8 *)ea)[5] = ((mgrp_ip) >> 0) & 0xff; \ ++} ++ ++#ifndef __INCif_etherh /* Quick and ugly hack for VxWorks */ ++/* ++ * Structure of a 10Mb/s Ethernet header. ++ */ ++BWL_PRE_PACKED_STRUCT struct ether_header { ++ uint8 ether_dhost[ETHER_ADDR_LEN]; ++ uint8 ether_shost[ETHER_ADDR_LEN]; ++ uint16 ether_type; ++} BWL_POST_PACKED_STRUCT; ++ ++/* ++ * Structure of a 48-bit Ethernet address. ++ */ ++BWL_PRE_PACKED_STRUCT struct ether_addr { ++ uint8 octet[ETHER_ADDR_LEN]; ++} BWL_POST_PACKED_STRUCT; ++#endif /* !__INCif_etherh Quick and ugly hack for VxWorks */ ++ ++/* ++ * Takes a pointer, set, test, clear, toggle locally admininistered ++ * address bit in the 48-bit Ethernet address. ++ */ ++#define ETHER_SET_LOCALADDR(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] | 2)) ++#define ETHER_IS_LOCALADDR(ea) (((uint8 *)(ea))[0] & 2) ++#define ETHER_CLR_LOCALADDR(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] & 0xfd)) ++#define ETHER_TOGGLE_LOCALADDR(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] ^ 2)) ++ ++/* Takes a pointer, marks unicast address bit in the MAC address */ ++#define ETHER_SET_UNICAST(ea) (((uint8 *)(ea))[0] = (((uint8 *)(ea))[0] & ~1)) ++ ++/* ++ * Takes a pointer, returns true if a 48-bit multicast address ++ * (including broadcast, since it is all ones) ++ */ ++#define ETHER_ISMULTI(ea) (((const uint8 *)(ea))[0] & 1) ++ ++/* Copy an ethernet address in reverse order */ ++#define ether_rcopy(s, d) \ ++do { \ ++ ((uint16 *)(d))[2] = ((uint16 *)(s))[2]; \ ++ ((uint16 *)(d))[1] = ((uint16 *)(s))[1]; \ ++ ((uint16 *)(d))[0] = ((uint16 *)(s))[0]; \ ++} while (0) ++ ++/* compare two ethernet addresses - assumes the pointers can be referenced as shorts */ ++#define eacmp(a, b) ((((uint16 *)(a))[0] ^ ((uint16 *)(b))[0]) | \ ++ (((uint16 *)(a))[1] ^ ((uint16 *)(b))[1]) | \ ++ (((uint16 *)(a))[2] ^ ((uint16 *)(b))[2])) ++ ++#define ether_cmp(a, b) eacmp(a, b) ++ ++/* copy an ethernet address - assumes the pointers can be referenced as shorts */ ++#define eacopy(s, d) \ ++do { \ ++ ((uint16 *)(d))[0] = ((const uint16 *)(s))[0]; \ ++ ((uint16 *)(d))[1] = ((const uint16 *)(s))[1]; \ ++ ((uint16 *)(d))[2] = ((const uint16 *)(s))[2]; \ ++} while (0) ++ ++#define ether_copy(s, d) eacopy(s, d) ++ ++ ++static const struct ether_addr ether_bcast = {{255, 255, 255, 255, 255, 255}}; ++static const struct ether_addr ether_null = {{0, 0, 0, 0, 0, 0}}; ++ ++#define ETHER_ISBCAST(ea) ((((const uint8 *)(ea))[0] & \ ++ ((const uint8 *)(ea))[1] & \ ++ ((const uint8 *)(ea))[2] & \ ++ ((const uint8 *)(ea))[3] & \ ++ ((const uint8 *)(ea))[4] & \ ++ ((const uint8 *)(ea))[5]) == 0xff) ++#define ETHER_ISNULLADDR(ea) ((((const uint8 *)(ea))[0] | \ ++ ((const uint8 *)(ea))[1] | \ ++ ((const uint8 *)(ea))[2] | \ ++ ((const uint8 *)(ea))[3] | \ ++ ((const uint8 *)(ea))[4] | \ ++ ((const uint8 *)(ea))[5]) == 0) ++ ++#define ETHER_ISNULLDEST(da) ((((const uint16 *)(da))[0] | \ ++ ((const uint16 *)(da))[1] | \ ++ ((const uint16 *)(da))[2]) == 0) ++ ++ ++#define ETHER_MOVE_HDR(d, s) \ ++do { \ ++ struct ether_header t; \ ++ t = *(struct ether_header *)(s); \ ++ *(struct ether_header *)(d) = t; \ ++} while (0) ++ ++/* This marks the end of a packed structure section. */ ++#include ++ ++#endif /* _NET_ETHERNET_H_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/proto/vlan.h b/drivers/net/ethernet/broadcom/gmac/src/include/proto/vlan.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/proto/vlan.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/proto/vlan.h 2017-11-09 17:53:43.991302000 +0800 +@@ -0,0 +1,67 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * 802.1Q VLAN protocol definitions ++ * ++ * $Id: vlan.h 241182 2011-02-17 21:50:03Z $ ++ */ ++ ++#ifndef _vlan_h_ ++#define _vlan_h_ ++ ++#ifndef _TYPEDEFS_H_ ++#include ++#endif ++ ++/* This marks the start of a packed structure section. */ ++#include ++ ++#ifndef VLAN_VID_MASK ++#define VLAN_VID_MASK 0xfff /* low 12 bits are vlan id */ ++#endif ++#define VLAN_CFI_SHIFT 12 /* canonical format indicator bit */ ++#define VLAN_PRI_SHIFT 13 /* user priority */ ++ ++#define VLAN_PRI_MASK 7 /* 3 bits of priority */ ++ ++#define VLAN_TCI_OFFSET 14 /* offset of tag ctrl info field */ ++ ++#define VLAN_TAG_LEN 4 ++#define VLAN_TAG_OFFSET (2 * ETHER_ADDR_LEN) /* offset in Ethernet II packet only */ ++ ++#define VLAN_TPID 0x8100 /* VLAN ethertype/Tag Protocol ID */ ++ ++struct ethervlan_header { ++ uint8 ether_dhost[ETHER_ADDR_LEN]; ++ uint8 ether_shost[ETHER_ADDR_LEN]; ++ uint16 vlan_type; /* 0x8100 */ ++ uint16 vlan_tag; /* priority, cfi and vid */ ++ uint16 ether_type; ++}; ++ ++#define ETHERVLAN_HDR_LEN (ETHER_HDR_LEN + VLAN_TAG_LEN) ++ ++ ++/* This marks the end of a packed structure section. */ ++#include ++ ++#define ETHERVLAN_MOVE_HDR(d, s) \ ++do { \ ++ struct ethervlan_header t; \ ++ t = *(struct ethervlan_header *)(s); \ ++ *(struct ethervlan_header *)(d) = t; \ ++} while (0) ++ ++#endif /* _vlan_h_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/proto/wpa.h b/drivers/net/ethernet/broadcom/gmac/src/include/proto/wpa.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/proto/wpa.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/proto/wpa.h 2017-11-09 17:53:43.992308000 +0800 +@@ -0,0 +1,169 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Fundamental types and constants relating to WPA ++ * ++ * $Id: wpa.h 261155 2011-05-23 23:51:32Z $ ++ */ ++ ++#ifndef _proto_wpa_h_ ++#define _proto_wpa_h_ ++ ++#include ++#include ++ ++ ++/* This marks the start of a packed structure section. */ ++#include ++ ++/* Reason Codes */ ++ ++/* 13 through 23 taken from IEEE Std 802.11i-2004 */ ++#define DOT11_RC_INVALID_WPA_IE 13 /* Invalid info. element */ ++#define DOT11_RC_MIC_FAILURE 14 /* Michael failure */ ++#define DOT11_RC_4WH_TIMEOUT 15 /* 4-way handshake timeout */ ++#define DOT11_RC_GTK_UPDATE_TIMEOUT 16 /* Group key update timeout */ ++#define DOT11_RC_WPA_IE_MISMATCH 17 /* WPA IE in 4-way handshake differs from ++ * (re-)assoc. request/probe response ++ */ ++#define DOT11_RC_INVALID_MC_CIPHER 18 /* Invalid multicast cipher */ ++#define DOT11_RC_INVALID_UC_CIPHER 19 /* Invalid unicast cipher */ ++#define DOT11_RC_INVALID_AKMP 20 /* Invalid authenticated key management protocol */ ++#define DOT11_RC_BAD_WPA_VERSION 21 /* Unsupported WPA version */ ++#define DOT11_RC_INVALID_WPA_CAP 22 /* Invalid WPA IE capabilities */ ++#define DOT11_RC_8021X_AUTH_FAIL 23 /* 802.1X authentication failure */ ++ ++#define WPA2_PMKID_LEN 16 ++ ++/* WPA IE fixed portion */ ++typedef BWL_PRE_PACKED_STRUCT struct ++{ ++ uint8 tag; /* TAG */ ++ uint8 length; /* TAG length */ ++ uint8 oui[3]; /* IE OUI */ ++ uint8 oui_type; /* OUI type */ ++ BWL_PRE_PACKED_STRUCT struct { ++ uint8 low; ++ uint8 high; ++ } BWL_POST_PACKED_STRUCT version; /* IE version */ ++} BWL_POST_PACKED_STRUCT wpa_ie_fixed_t; ++#define WPA_IE_OUITYPE_LEN 4 ++#define WPA_IE_FIXED_LEN 8 ++#define WPA_IE_TAG_FIXED_LEN 6 ++ ++typedef BWL_PRE_PACKED_STRUCT struct { ++ uint8 tag; /* TAG */ ++ uint8 length; /* TAG length */ ++ BWL_PRE_PACKED_STRUCT struct { ++ uint8 low; ++ uint8 high; ++ } BWL_POST_PACKED_STRUCT version; /* IE version */ ++} BWL_POST_PACKED_STRUCT wpa_rsn_ie_fixed_t; ++#define WPA_RSN_IE_FIXED_LEN 4 ++#define WPA_RSN_IE_TAG_FIXED_LEN 2 ++typedef uint8 wpa_pmkid_t[WPA2_PMKID_LEN]; ++ ++/* WPA suite/multicast suite */ ++typedef BWL_PRE_PACKED_STRUCT struct ++{ ++ uint8 oui[3]; ++ uint8 type; ++} BWL_POST_PACKED_STRUCT wpa_suite_t, wpa_suite_mcast_t; ++#define WPA_SUITE_LEN 4 ++ ++/* WPA unicast suite list/key management suite list */ ++typedef BWL_PRE_PACKED_STRUCT struct ++{ ++ BWL_PRE_PACKED_STRUCT struct { ++ uint8 low; ++ uint8 high; ++ } BWL_POST_PACKED_STRUCT count; ++ wpa_suite_t list[1]; ++} BWL_POST_PACKED_STRUCT wpa_suite_ucast_t, wpa_suite_auth_key_mgmt_t; ++#define WPA_IE_SUITE_COUNT_LEN 2 ++typedef BWL_PRE_PACKED_STRUCT struct ++{ ++ BWL_PRE_PACKED_STRUCT struct { ++ uint8 low; ++ uint8 high; ++ } BWL_POST_PACKED_STRUCT count; ++ wpa_pmkid_t list[1]; ++} BWL_POST_PACKED_STRUCT wpa_pmkid_list_t; ++ ++/* WPA cipher suites */ ++#define WPA_CIPHER_NONE 0 /* None */ ++#define WPA_CIPHER_WEP_40 1 /* WEP (40-bit) */ ++#define WPA_CIPHER_TKIP 2 /* TKIP: default for WPA */ ++#define WPA_CIPHER_AES_OCB 3 /* AES (OCB) */ ++#define WPA_CIPHER_AES_CCM 4 /* AES (CCM) */ ++#define WPA_CIPHER_WEP_104 5 /* WEP (104-bit) */ ++#define WPA_CIPHER_BIP 6 /* WEP (104-bit) */ ++#define WPA_CIPHER_TPK 7 /* Group addressed traffic not allowed */ ++ ++ ++#define IS_WPA_CIPHER(cipher) ((cipher) == WPA_CIPHER_NONE || \ ++ (cipher) == WPA_CIPHER_WEP_40 || \ ++ (cipher) == WPA_CIPHER_WEP_104 || \ ++ (cipher) == WPA_CIPHER_TKIP || \ ++ (cipher) == WPA_CIPHER_AES_OCB || \ ++ (cipher) == WPA_CIPHER_AES_CCM || \ ++ (cipher) == WPA_CIPHER_TPK) ++ ++ ++/* WPA TKIP countermeasures parameters */ ++#define WPA_TKIP_CM_DETECT 60 /* multiple MIC failure window (seconds) */ ++#define WPA_TKIP_CM_BLOCK 60 /* countermeasures active window (seconds) */ ++ ++/* RSN IE defines */ ++#define RSN_CAP_LEN 2 /* Length of RSN capabilities field (2 octets) */ ++ ++/* RSN Capabilities defined in 802.11i */ ++#define RSN_CAP_PREAUTH 0x0001 ++#define RSN_CAP_NOPAIRWISE 0x0002 ++#define RSN_CAP_PTK_REPLAY_CNTR_MASK 0x000C ++#define RSN_CAP_PTK_REPLAY_CNTR_SHIFT 2 ++#define RSN_CAP_GTK_REPLAY_CNTR_MASK 0x0030 ++#define RSN_CAP_GTK_REPLAY_CNTR_SHIFT 4 ++#define RSN_CAP_1_REPLAY_CNTR 0 ++#define RSN_CAP_2_REPLAY_CNTRS 1 ++#define RSN_CAP_4_REPLAY_CNTRS 2 ++#define RSN_CAP_16_REPLAY_CNTRS 3 ++#ifdef MFP ++#define RSN_CAP_MFPR 0x0040 ++#define RSN_CAP_MFPC 0x0080 ++#endif ++ ++/* WPA capabilities defined in 802.11i */ ++#define WPA_CAP_4_REPLAY_CNTRS RSN_CAP_4_REPLAY_CNTRS ++#define WPA_CAP_16_REPLAY_CNTRS RSN_CAP_16_REPLAY_CNTRS ++#define WPA_CAP_REPLAY_CNTR_SHIFT RSN_CAP_PTK_REPLAY_CNTR_SHIFT ++#define WPA_CAP_REPLAY_CNTR_MASK RSN_CAP_PTK_REPLAY_CNTR_MASK ++ ++/* WPA capabilities defined in 802.11zD9.0 */ ++#define WPA_CAP_PEER_KEY_ENABLE (0x1 << 1) /* bit 9 */ ++ ++/* WPA Specific defines */ ++#define WPA_CAP_LEN RSN_CAP_LEN /* Length of RSN capabilities in RSN IE (2 octets) */ ++#define WPA_PMKID_CNT_LEN 2 /* Length of RSN PMKID count (2 octests) */ ++ ++#define WPA_CAP_WPA2_PREAUTH RSN_CAP_PREAUTH ++ ++#define WPA2_PMKID_COUNT_LEN 2 ++ ++ ++/* This marks the end of a packed structure section. */ ++#include ++ ++#endif /* _proto_wpa_h_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/sbchipc.h b/drivers/net/ethernet/broadcom/gmac/src/include/sbchipc.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/sbchipc.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/sbchipc.h 2017-11-09 17:53:44.004291000 +0800 +@@ -0,0 +1,2515 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * SiliconBackplane Chipcommon core hardware definitions. ++ * ++ * The chipcommon core provides chip identification, SB control, ++ * JTAG, 0/1/2 UARTs, clock frequency control, a watchdog interrupt timer, ++ * GPIO interface, extbus, and support for serial and parallel flashes. ++ * ++ * $Id: sbchipc.h 328955 2012-04-23 09:06:12Z $ ++ */ ++ ++#ifndef _SBCHIPC_H ++#define _SBCHIPC_H ++ ++#ifndef _LANGUAGE_ASSEMBLY ++ ++/* cpp contortions to concatenate w/arg prescan */ ++#ifndef PAD ++#define _PADLINE(line) pad ## line ++#define _XSTR(line) _PADLINE(line) ++#define PAD _XSTR(__LINE__) ++#endif /* PAD */ ++ ++typedef struct eci_prerev35 { ++ uint32 eci_output; ++ uint32 eci_control; ++ uint32 eci_inputlo; ++ uint32 eci_inputmi; ++ uint32 eci_inputhi; ++ uint32 eci_inputintpolaritylo; ++ uint32 eci_inputintpolaritymi; ++ uint32 eci_inputintpolarityhi; ++ uint32 eci_intmasklo; ++ uint32 eci_intmaskmi; ++ uint32 eci_intmaskhi; ++ uint32 eci_eventlo; ++ uint32 eci_eventmi; ++ uint32 eci_eventhi; ++ uint32 eci_eventmasklo; ++ uint32 eci_eventmaskmi; ++ uint32 eci_eventmaskhi; ++ uint32 PAD[3]; ++} eci_prerev35_t; ++ ++typedef struct eci_rev35 { ++ uint32 eci_outputlo; ++ uint32 eci_outputhi; ++ uint32 eci_controllo; ++ uint32 eci_controlhi; ++ uint32 eci_inputlo; ++ uint32 eci_inputhi; ++ uint32 eci_inputintpolaritylo; ++ uint32 eci_inputintpolarityhi; ++ uint32 eci_intmasklo; ++ uint32 eci_intmaskhi; ++ uint32 eci_eventlo; ++ uint32 eci_eventhi; ++ uint32 eci_eventmasklo; ++ uint32 eci_eventmaskhi; ++ uint32 eci_auxtx; ++ uint32 eci_auxrx; ++ uint32 eci_datatag; ++ uint32 eci_uartescvalue; ++ uint32 eci_autobaudctr; ++ uint32 eci_uartfifolevel; ++} eci_rev35_t; ++ ++typedef struct flash_config { ++ uint32 PAD[19]; ++ /* Flash struct configuration registers (0x18c) for BCM4706 (corerev = 31) */ ++ uint32 flashstrconfig; ++} flash_config_t; ++ ++typedef volatile struct { ++ uint32 chipid; /* 0x0 */ ++ uint32 capabilities; ++ uint32 corecontrol; /* corerev >= 1 */ ++ uint32 bist; ++ ++ /* OTP */ ++ uint32 otpstatus; /* 0x10, corerev >= 10 */ ++ uint32 otpcontrol; ++ uint32 otpprog; ++ uint32 otplayout; /* corerev >= 23 */ ++ ++ /* Interrupt control */ ++ uint32 intstatus; /* 0x20 */ ++ uint32 intmask; ++ ++ /* Chip specific regs */ ++ uint32 chipcontrol; /* 0x28, rev >= 11 */ ++ uint32 chipstatus; /* 0x2c, rev >= 11 */ ++ ++ /* Jtag Master */ ++ uint32 jtagcmd; /* 0x30, rev >= 10 */ ++ uint32 jtagir; ++ uint32 jtagdr; ++ uint32 jtagctrl; ++ ++ /* serial flash interface registers */ ++ uint32 flashcontrol; /* 0x40 */ ++ uint32 flashaddress; ++ uint32 flashdata; ++ uint32 otplayoutextension; /* rev >= 35 */ ++ ++ /* Silicon backplane configuration broadcast control */ ++ uint32 broadcastaddress; /* 0x50 */ ++ uint32 broadcastdata; ++ ++ /* gpio - cleared only by power-on-reset */ ++ uint32 gpiopullup; /* 0x58, corerev >= 20 */ ++ uint32 gpiopulldown; /* 0x5c, corerev >= 20 */ ++ uint32 gpioin; /* 0x60 */ ++ uint32 gpioout; /* 0x64 */ ++ uint32 gpioouten; /* 0x68 */ ++ uint32 gpiocontrol; /* 0x6C */ ++ uint32 gpiointpolarity; /* 0x70 */ ++ uint32 gpiointmask; /* 0x74 */ ++ ++ /* GPIO events corerev >= 11 */ ++ uint32 gpioevent; ++ uint32 gpioeventintmask; ++ ++ /* Watchdog timer */ ++ uint32 watchdog; /* 0x80 */ ++ ++ /* GPIO events corerev >= 11 */ ++ uint32 gpioeventintpolarity; ++ ++ /* GPIO based LED powersave registers corerev >= 16 */ ++ uint32 gpiotimerval; /* 0x88 */ ++ uint32 gpiotimeroutmask; ++ ++ /* clock control */ ++ uint32 clockcontrol_n; /* 0x90 */ ++ uint32 clockcontrol_sb; /* aka m0 */ ++ uint32 clockcontrol_pci; /* aka m1 */ ++ uint32 clockcontrol_m2; /* mii/uart/mipsref */ ++ uint32 clockcontrol_m3; /* cpu */ ++ uint32 clkdiv; /* corerev >= 3 */ ++ uint32 gpiodebugsel; /* corerev >= 28 */ ++ uint32 capabilities_ext; /* 0xac */ ++ ++ /* pll delay registers (corerev >= 4) */ ++ uint32 pll_on_delay; /* 0xb0 */ ++ uint32 fref_sel_delay; ++ uint32 slow_clk_ctl; /* 5 < corerev < 10 */ ++ uint32 PAD; ++ ++ /* Instaclock registers (corerev >= 10) */ ++ uint32 system_clk_ctl; /* 0xc0 */ ++ uint32 clkstatestretch; ++ uint32 PAD[2]; ++ ++ /* Indirect backplane access (corerev >= 22) */ ++ uint32 bp_addrlow; /* 0xd0 */ ++ uint32 bp_addrhigh; ++ uint32 bp_data; ++ uint32 PAD; ++ uint32 bp_indaccess; ++ /* SPI registers, corerev >= 37 */ ++ uint32 gsioctrl; ++ uint32 gsioaddress; ++ uint32 gsiodata; ++ ++ /* More clock dividers (corerev >= 32) */ ++ uint32 clkdiv2; ++ /* FAB ID (corerev >= 40) */ ++ uint32 otpcontrol1; ++ uint32 fabid; /* 0xf8 */ ++ ++ /* In AI chips, pointer to erom */ ++ uint32 eromptr; /* 0xfc */ ++ ++ /* ExtBus control registers (corerev >= 3) */ ++ uint32 pcmcia_config; /* 0x100 */ ++ uint32 pcmcia_memwait; ++ uint32 pcmcia_attrwait; ++ uint32 pcmcia_iowait; ++ uint32 ide_config; ++ uint32 ide_memwait; ++ uint32 ide_attrwait; ++ uint32 ide_iowait; ++ uint32 prog_config; ++ uint32 prog_waitcount; ++ uint32 flash_config; ++ uint32 flash_waitcount; ++ uint32 SECI_config; /* 0x130 SECI configuration */ ++ uint32 SECI_status; ++ uint32 SECI_statusmask; ++ uint32 SECI_rxnibchanged; ++ ++ union { /* 0x140 */ ++ /* Enhanced Coexistence Interface (ECI) registers (corerev >= 21) */ ++ struct eci_prerev35 lt35; ++ struct eci_rev35 ge35; ++ /* Other interfaces */ ++ struct flash_config flashconf; ++ uint32 PAD[20]; ++ } eci; ++ ++ /* SROM interface (corerev >= 32) */ ++ uint32 sromcontrol; /* 0x190 */ ++ uint32 sromaddress; ++ uint32 sromdata; ++ uint32 PAD[1]; /* 0x19C */ ++ /* NAND flash registers for BCM4706 (corerev = 31) */ ++ uint32 nflashctrl; /* 0x1a0 */ ++ uint32 nflashconf; ++ uint32 nflashcoladdr; ++ uint32 nflashrowaddr; ++ uint32 nflashdata; ++ uint32 nflashwaitcnt0; /* 0x1b4 */ ++ uint32 PAD[2]; ++ ++ uint32 seci_uart_data; /* 0x1C0 */ ++ uint32 seci_uart_bauddiv; ++ uint32 seci_uart_fcr; ++ uint32 seci_uart_lcr; ++ uint32 seci_uart_mcr; ++ uint32 seci_uart_lsr; ++ uint32 seci_uart_msr; ++ uint32 seci_uart_baudadj; ++ /* Clock control and hardware workarounds (corerev >= 20) */ ++ uint32 clk_ctl_st; /* 0x1e0 */ ++ uint32 hw_war; ++ uint32 PAD[70]; ++ ++ /* UARTs */ ++ uint8 uart0data; /* 0x300 */ ++ uint8 uart0imr; ++ uint8 uart0fcr; ++ uint8 uart0lcr; ++ uint8 uart0mcr; ++ uint8 uart0lsr; ++ uint8 uart0msr; ++ uint8 uart0scratch; ++ uint8 PAD[248]; /* corerev >= 1 */ ++ ++ uint8 uart1data; /* 0x400 */ ++ uint8 uart1imr; ++ uint8 uart1fcr; ++ uint8 uart1lcr; ++ uint8 uart1mcr; ++ uint8 uart1lsr; ++ uint8 uart1msr; ++ uint8 uart1scratch; ++ uint32 PAD[126]; ++ ++ /* PMU registers (corerev >= 20) */ ++ /* Note: all timers driven by ILP clock are updated asynchronously to HT/ALP. ++ * The CPU must read them twice, compare, and retry if different. ++ */ ++ uint32 pmucontrol; /* 0x600 */ ++ uint32 pmucapabilities; ++ uint32 pmustatus; ++ uint32 res_state; ++ uint32 res_pending; ++ uint32 pmutimer; ++ uint32 min_res_mask; ++ uint32 max_res_mask; ++ uint32 res_table_sel; ++ uint32 res_dep_mask; ++ uint32 res_updn_timer; ++ uint32 res_timer; ++ uint32 clkstretch; ++ uint32 pmuwatchdog; ++ uint32 gpiosel; /* 0x638, rev >= 1 */ ++ uint32 gpioenable; /* 0x63c, rev >= 1 */ ++ uint32 res_req_timer_sel; ++ uint32 res_req_timer; ++ uint32 res_req_mask; ++ uint32 PAD; ++ uint32 chipcontrol_addr; /* 0x650 */ ++ uint32 chipcontrol_data; /* 0x654 */ ++ uint32 regcontrol_addr; ++ uint32 regcontrol_data; ++ uint32 pllcontrol_addr; ++ uint32 pllcontrol_data; ++ uint32 pmustrapopt; /* 0x668, corerev >= 28 */ ++ uint32 pmu_xtalfreq; /* 0x66C, pmurev >= 10 */ ++ uint32 PAD[100]; ++ uint16 sromotp[512]; /* 0x800 */ ++#ifdef NFLASH_SUPPORT ++ /* Nand flash MLC controller registers (corerev >= 38) */ ++ uint32 nand_revision; /* 0xC00 */ ++ uint32 nand_cmd_start; ++ uint32 nand_cmd_addr_x; ++ uint32 nand_cmd_addr; ++ uint32 nand_cmd_end_addr; ++ uint32 nand_cs_nand_select; ++ uint32 nand_cs_nand_xor; ++ uint32 PAD; ++ uint32 nand_spare_rd0; ++ uint32 nand_spare_rd4; ++ uint32 nand_spare_rd8; ++ uint32 nand_spare_rd12; ++ uint32 nand_spare_wr0; ++ uint32 nand_spare_wr4; ++ uint32 nand_spare_wr8; ++ uint32 nand_spare_wr12; ++ uint32 nand_acc_control; ++ uint32 PAD; ++ uint32 nand_config; ++ uint32 PAD; ++ uint32 nand_timing_1; ++ uint32 nand_timing_2; ++ uint32 nand_semaphore; ++ uint32 PAD; ++ uint32 nand_devid; ++ uint32 nand_devid_x; ++ uint32 nand_block_lock_status; ++ uint32 nand_intfc_status; ++ uint32 nand_ecc_corr_addr_x; ++ uint32 nand_ecc_corr_addr; ++ uint32 nand_ecc_unc_addr_x; ++ uint32 nand_ecc_unc_addr; ++ uint32 nand_read_error_count; ++ uint32 nand_corr_stat_threshold; ++ uint32 PAD[2]; ++ uint32 nand_read_addr_x; ++ uint32 nand_read_addr; ++ uint32 nand_page_program_addr_x; ++ uint32 nand_page_program_addr; ++ uint32 nand_copy_back_addr_x; ++ uint32 nand_copy_back_addr; ++ uint32 nand_block_erase_addr_x; ++ uint32 nand_block_erase_addr; ++ uint32 nand_inv_read_addr_x; ++ uint32 nand_inv_read_addr; ++ uint32 PAD[2]; ++ uint32 nand_blk_wr_protect; ++ uint32 PAD[3]; ++ uint32 nand_acc_control_cs1; ++ uint32 nand_config_cs1; ++ uint32 nand_timing_1_cs1; ++ uint32 nand_timing_2_cs1; ++ uint32 PAD[20]; ++ uint32 nand_spare_rd16; ++ uint32 nand_spare_rd20; ++ uint32 nand_spare_rd24; ++ uint32 nand_spare_rd28; ++ uint32 nand_cache_addr; ++ uint32 nand_cache_data; ++ uint32 nand_ctrl_config; ++ uint32 nand_ctrl_status; ++#endif /* NFLASH_SUPPORT */ ++ uint32 gci_corecaps0; /* GCI starting at 0xC00 */ ++ uint32 gci_corecaps1; ++ uint32 gci_corecaps2; ++ uint32 gci_corectrl; ++ uint32 gci_corestat; /* 0xC10 */ ++ uint32 PAD[11]; ++ uint32 gci_indirect_addr; /* 0xC40 */ ++ uint32 PAD[111]; ++ uint32 gci_chipctrl; /* 0xE00 */ ++} chipcregs_t; ++ ++#endif /* _LANGUAGE_ASSEMBLY */ ++ ++#if defined(IL_BIGENDIAN) && defined(BCMHND74K) ++/* Selective swapped defines for those registers we need in ++ * big-endian code. ++ */ ++#define CC_CHIPID 4 ++#define CC_CAPABILITIES 0 ++#define CC_CHIPST 0x28 ++#define CC_EROMPTR 0xf8 ++ ++#else /* !IL_BIGENDIAN || !BCMHND74K */ ++ ++#define CC_CHIPID 0 ++#define CC_CAPABILITIES 4 ++#define CC_CHIPST 0x2c ++#define CC_EROMPTR 0xfc ++ ++#endif /* IL_BIGENDIAN && BCMHND74K */ ++ ++#define CC_OTPST 0x10 ++#define CC_JTAGCMD 0x30 ++#define CC_JTAGIR 0x34 ++#define CC_JTAGDR 0x38 ++#define CC_JTAGCTRL 0x3c ++#define CC_GPIOPU 0x58 ++#define CC_GPIOPD 0x5c ++#define CC_GPIOIN 0x60 ++#define CC_GPIOOUT 0x64 ++#define CC_GPIOOUTEN 0x68 ++#define CC_GPIOCTRL 0x6c ++#define CC_GPIOPOL 0x70 ++#define CC_GPIOINTM 0x74 ++#define CC_WATCHDOG 0x80 ++#define CC_CLKC_N 0x90 ++#define CC_CLKC_M0 0x94 ++#define CC_CLKC_M1 0x98 ++#define CC_CLKC_M2 0x9c ++#define CC_CLKC_M3 0xa0 ++#define CC_CLKDIV 0xa4 ++#define CC_SYS_CLK_CTL 0xc0 ++#define CC_CLK_CTL_ST SI_CLK_CTL_ST ++#define PMU_CTL 0x600 ++#define PMU_CAP 0x604 ++#define PMU_ST 0x608 ++#define PMU_RES_STATE 0x60c ++#define PMU_TIMER 0x614 ++#define PMU_MIN_RES_MASK 0x618 ++#define PMU_MAX_RES_MASK 0x61c ++#define CC_CHIPCTL_ADDR 0x650 ++#define CC_CHIPCTL_DATA 0x654 ++#define PMU_REG_CONTROL_ADDR 0x658 ++#define PMU_REG_CONTROL_DATA 0x65C ++#define PMU_PLL_CONTROL_ADDR 0x660 ++#define PMU_PLL_CONTROL_DATA 0x664 ++#define CC_SROM_CTRL 0x190 ++#define CC_SROM_OTP 0x800 /* SROM/OTP address space */ ++#define CC_GCI_INDIRECT_ADDR_REG 0xC40 ++#define CC_GCI_CHIP_CTRL_REG 0xE00 ++#define CC_GCI_CC_OFFSET_2 2 ++#define CC_GCI_CC_OFFSET_5 5 ++ ++#ifdef NFLASH_SUPPORT ++/* NAND flash support */ ++#define CC_NAND_REVISION 0xC00 ++#define CC_NAND_CMD_START 0xC04 ++#define CC_NAND_CMD_ADDR 0xC0C ++#define CC_NAND_SPARE_RD_0 0xC20 ++#define CC_NAND_SPARE_RD_4 0xC24 ++#define CC_NAND_SPARE_RD_8 0xC28 ++#define CC_NAND_SPARE_RD_C 0xC2C ++#define CC_NAND_CONFIG 0xC48 ++#define CC_NAND_DEVID 0xC60 ++#define CC_NAND_DEVID_EXT 0xC64 ++#define CC_NAND_INTFC_STATUS 0xC6C ++#endif /* NFLASH_SUPPORT */ ++ ++/* chipid */ ++#define CID_ID_MASK 0x0000ffff /* Chip Id mask */ ++#define CID_REV_MASK 0x000f0000 /* Chip Revision mask */ ++#define CID_REV_SHIFT 16 /* Chip Revision shift */ ++#define CID_PKG_MASK 0x00f00000 /* Package Option mask */ ++#define CID_PKG_SHIFT 20 /* Package Option shift */ ++#define CID_CC_MASK 0x0f000000 /* CoreCount (corerev >= 4) */ ++#define CID_CC_SHIFT 24 ++#define CID_TYPE_MASK 0xf0000000 /* Chip Type */ ++#define CID_TYPE_SHIFT 28 ++ ++/* capabilities */ ++#define CC_CAP_UARTS_MASK 0x00000003 /* Number of UARTs */ ++#define CC_CAP_MIPSEB 0x00000004 /* MIPS is in big-endian mode */ ++#define CC_CAP_UCLKSEL 0x00000018 /* UARTs clock select */ ++#define CC_CAP_UINTCLK 0x00000008 /* UARTs are driven by internal divided clock */ ++#define CC_CAP_UARTGPIO 0x00000020 /* UARTs own GPIOs 15:12 */ ++#define CC_CAP_EXTBUS_MASK 0x000000c0 /* External bus mask */ ++#define CC_CAP_EXTBUS_NONE 0x00000000 /* No ExtBus present */ ++#define CC_CAP_EXTBUS_FULL 0x00000040 /* ExtBus: PCMCIA, IDE & Prog */ ++#define CC_CAP_EXTBUS_PROG 0x00000080 /* ExtBus: ProgIf only */ ++#define CC_CAP_FLASH_MASK 0x00000700 /* Type of flash */ ++#define CC_CAP_PLL_MASK 0x00038000 /* Type of PLL */ ++#define CC_CAP_PWR_CTL 0x00040000 /* Power control */ ++#define CC_CAP_OTPSIZE 0x00380000 /* OTP Size (0 = none) */ ++#define CC_CAP_OTPSIZE_SHIFT 19 /* OTP Size shift */ ++#define CC_CAP_OTPSIZE_BASE 5 /* OTP Size base */ ++#define CC_CAP_JTAGP 0x00400000 /* JTAG Master Present */ ++#define CC_CAP_ROM 0x00800000 /* Internal boot rom active */ ++#define CC_CAP_BKPLN64 0x08000000 /* 64-bit backplane */ ++#define CC_CAP_PMU 0x10000000 /* PMU Present, rev >= 20 */ ++#define CC_CAP_ECI 0x20000000 /* ECI Present, rev >= 21 */ ++#define CC_CAP_SROM 0x40000000 /* Srom Present, rev >= 32 */ ++#define CC_CAP_NFLASH 0x80000000 /* Nand flash present, rev >= 35 */ ++ ++#define CC_CAP2_SECI 0x00000001 /* SECI Present, rev >= 36 */ ++#define CC_CAP2_GSIO 0x00000002 /* GSIO (spi/i2c) present, rev >= 37 */ ++ ++/* capabilities extension */ ++#define CC_CAP_EXT_SECI_PRESENT 0x00000001 /* SECI present */ ++ ++/* PLL type */ ++#define PLL_NONE 0x00000000 ++#define PLL_TYPE1 0x00010000 /* 48MHz base, 3 dividers */ ++#define PLL_TYPE2 0x00020000 /* 48MHz, 4 dividers */ ++#define PLL_TYPE3 0x00030000 /* 25MHz, 2 dividers */ ++#define PLL_TYPE4 0x00008000 /* 48MHz, 4 dividers */ ++#define PLL_TYPE5 0x00018000 /* 25MHz, 4 dividers */ ++#define PLL_TYPE6 0x00028000 /* 100/200 or 120/240 only */ ++#define PLL_TYPE7 0x00038000 /* 25MHz, 4 dividers */ ++ ++/* ILP clock */ ++#define ILP_CLOCK 32000 ++ ++/* ALP clock on pre-PMU chips */ ++#define ALP_CLOCK 20000000 ++#define NS_ALP_CLOCK 125000000 ++#define NS_SLOW_ALP_CLOCK 100000000 ++#define NS_CPU_CLOCK 1000000000 ++#define NS_SLOW_CPU_CLOCK 800000000 ++#define NS_SI_CLOCK 250000000 ++#define NS_SLOW_SI_CLOCK 200000000 ++#define NS_FAST_MEM_CLOCK 800000000 ++#define NS_MEM_CLOCK 533000000 ++#define NS_SLOW_MEM_CLOCK 400000000 ++ ++/* HT clock */ ++#define HT_CLOCK 80000000 ++ ++/* corecontrol */ ++#define CC_UARTCLKO 0x00000001 /* Drive UART with internal clock */ ++#define CC_SE 0x00000002 /* sync clk out enable (corerev >= 3) */ ++#define CC_ASYNCGPIO 0x00000004 /* 1=generate GPIO interrupt without backplane clock */ ++#define CC_UARTCLKEN 0x00000008 /* enable UART Clock (corerev > = 21 */ ++ ++/* 4321 chipcontrol */ ++#define CHIPCTRL_4321A0_DEFAULT 0x3a4 ++#define CHIPCTRL_4321A1_DEFAULT 0x0a4 ++#define CHIPCTRL_4321_PLL_DOWN 0x800000 /* serdes PLL down override */ ++ ++/* Fields in the otpstatus register in rev >= 21 */ ++#define OTPS_OL_MASK 0x000000ff ++#define OTPS_OL_MFG 0x00000001 /* manuf row is locked */ ++#define OTPS_OL_OR1 0x00000002 /* otp redundancy row 1 is locked */ ++#define OTPS_OL_OR2 0x00000004 /* otp redundancy row 2 is locked */ ++#define OTPS_OL_GU 0x00000008 /* general use region is locked */ ++#define OTPS_GUP_MASK 0x00000f00 ++#define OTPS_GUP_SHIFT 8 ++#define OTPS_GUP_HW 0x00000100 /* h/w subregion is programmed */ ++#define OTPS_GUP_SW 0x00000200 /* s/w subregion is programmed */ ++#define OTPS_GUP_CI 0x00000400 /* chipid/pkgopt subregion is programmed */ ++#define OTPS_GUP_FUSE 0x00000800 /* fuse subregion is programmed */ ++#define OTPS_READY 0x00001000 ++#define OTPS_RV(x) (1 << (16 + (x))) /* redundancy entry valid */ ++#define OTPS_RV_MASK 0x0fff0000 ++#define OTPS_PROGOK 0x40000000 ++ ++/* Fields in the otpcontrol register in rev >= 21 */ ++#define OTPC_PROGSEL 0x00000001 ++#define OTPC_PCOUNT_MASK 0x0000000e ++#define OTPC_PCOUNT_SHIFT 1 ++#define OTPC_VSEL_MASK 0x000000f0 ++#define OTPC_VSEL_SHIFT 4 ++#define OTPC_TMM_MASK 0x00000700 ++#define OTPC_TMM_SHIFT 8 ++#define OTPC_ODM 0x00000800 ++#define OTPC_PROGEN 0x80000000 ++ ++/* Fields in the 40nm otpcontrol register in rev >= 40 */ ++#define OTPC_40NM_PROGSEL_SHIFT 0 ++#define OTPC_40NM_PCOUNT_SHIFT 1 ++#define OTPC_40NM_PCOUNT_WR 0xA ++#define OTPC_40NM_PCOUNT_V1X 0xB ++#define OTPC_40NM_REGCSEL_SHIFT 5 ++#define OTPC_40NM_REGCSEL_DEF 0x4 ++#define OTPC_40NM_PROGIN_SHIFT 8 ++#define OTPC_40NM_R2X_SHIFT 10 ++#define OTPC_40NM_ODM_SHIFT 11 ++#define OTPC_40NM_DF_SHIFT 15 ++#define OTPC_40NM_VSEL_SHIFT 16 ++#define OTPC_40NM_VSEL_WR 0xA ++#define OTPC_40NM_VSEL_V1X 0xA ++#define OTPC_40NM_VSEL_R1X 0x5 ++#define OTPC_40NM_COFAIL_SHIFT 30 ++ ++#define OTPC1_CPCSEL_SHIFT 0 ++#define OTPC1_CPCSEL_DEF 6 ++#define OTPC1_TM_SHIFT 8 ++#define OTPC1_TM_WR 0x84 ++#define OTPC1_TM_V1X 0x84 ++#define OTPC1_TM_R1X 0x4 ++ ++/* Fields in otpprog in rev >= 21 and HND OTP */ ++#define OTPP_COL_MASK 0x000000ff ++#define OTPP_COL_SHIFT 0 ++#define OTPP_ROW_MASK 0x0000ff00 ++#define OTPP_ROW_SHIFT 8 ++#define OTPP_OC_MASK 0x0f000000 ++#define OTPP_OC_SHIFT 24 ++#define OTPP_READERR 0x10000000 ++#define OTPP_VALUE_MASK 0x20000000 ++#define OTPP_VALUE_SHIFT 29 ++#define OTPP_START_BUSY 0x80000000 ++#define OTPP_READ 0x40000000 /* HND OTP */ ++ ++/* Fields in otplayout register */ ++#define OTPL_HWRGN_OFF_MASK 0x00000FFF ++#define OTPL_HWRGN_OFF_SHIFT 0 ++#define OTPL_WRAP_REVID_MASK 0x00F80000 ++#define OTPL_WRAP_REVID_SHIFT 19 ++#define OTPL_WRAP_TYPE_MASK 0x00070000 ++#define OTPL_WRAP_TYPE_SHIFT 16 ++#define OTPL_WRAP_TYPE_65NM 0 ++#define OTPL_WRAP_TYPE_40NM 1 ++ ++/* otplayout reg corerev >= 36 */ ++#define OTP_CISFORMAT_NEW 0x80000000 ++ ++/* Opcodes for OTPP_OC field */ ++#define OTPPOC_READ 0 ++#define OTPPOC_BIT_PROG 1 ++#define OTPPOC_VERIFY 3 ++#define OTPPOC_INIT 4 ++#define OTPPOC_SET 5 ++#define OTPPOC_RESET 6 ++#define OTPPOC_OCST 7 ++#define OTPPOC_ROW_LOCK 8 ++#define OTPPOC_PRESCN_TEST 9 ++ ++/* Opcodes for OTPP_OC field (40NM) */ ++#define OTPPOC_READ_40NM 0 ++#define OTPPOC_PROG_ENABLE_40NM 1 ++#define OTPPOC_PROG_DISABLE_40NM 2 ++#define OTPPOC_VERIFY_40NM 3 ++#define OTPPOC_WORD_VERIFY_1_40NM 4 ++#define OTPPOC_ROW_LOCK_40NM 5 ++#define OTPPOC_STBY_40NM 6 ++#define OTPPOC_WAKEUP_40NM 7 ++#define OTPPOC_WORD_VERIFY_0_40NM 8 ++#define OTPPOC_PRESCN_TEST_40NM 9 ++#define OTPPOC_BIT_PROG_40NM 10 ++#define OTPPOC_WORDPROG_40NM 11 ++#define OTPPOC_BURNIN_40NM 12 ++#define OTPPOC_AUTORELOAD_40NM 13 ++#define OTPPOC_OVST_READ_40NM 14 ++#define OTPPOC_OVST_PROG_40NM 15 ++ ++/* Fields in otplayoutextension */ ++#define OTPLAYOUTEXT_FUSE_MASK 0x3FF ++ ++ ++/* Jtagm characteristics that appeared at a given corerev */ ++#define JTAGM_CREV_OLD 10 /* Old command set, 16bit max IR */ ++#define JTAGM_CREV_IRP 22 /* Able to do pause-ir */ ++#define JTAGM_CREV_RTI 28 /* Able to do return-to-idle */ ++ ++/* jtagcmd */ ++#define JCMD_START 0x80000000 ++#define JCMD_BUSY 0x80000000 ++#define JCMD_STATE_MASK 0x60000000 ++#define JCMD_STATE_TLR 0x00000000 /* Test-logic-reset */ ++#define JCMD_STATE_PIR 0x20000000 /* Pause IR */ ++#define JCMD_STATE_PDR 0x40000000 /* Pause DR */ ++#define JCMD_STATE_RTI 0x60000000 /* Run-test-idle */ ++#define JCMD0_ACC_MASK 0x0000f000 ++#define JCMD0_ACC_IRDR 0x00000000 ++#define JCMD0_ACC_DR 0x00001000 ++#define JCMD0_ACC_IR 0x00002000 ++#define JCMD0_ACC_RESET 0x00003000 ++#define JCMD0_ACC_IRPDR 0x00004000 ++#define JCMD0_ACC_PDR 0x00005000 ++#define JCMD0_IRW_MASK 0x00000f00 ++#define JCMD_ACC_MASK 0x000f0000 /* Changes for corerev 11 */ ++#define JCMD_ACC_IRDR 0x00000000 ++#define JCMD_ACC_DR 0x00010000 ++#define JCMD_ACC_IR 0x00020000 ++#define JCMD_ACC_RESET 0x00030000 ++#define JCMD_ACC_IRPDR 0x00040000 ++#define JCMD_ACC_PDR 0x00050000 ++#define JCMD_ACC_PIR 0x00060000 ++#define JCMD_ACC_IRDR_I 0x00070000 /* rev 28: return to run-test-idle */ ++#define JCMD_ACC_DR_I 0x00080000 /* rev 28: return to run-test-idle */ ++#define JCMD_IRW_MASK 0x00001f00 ++#define JCMD_IRW_SHIFT 8 ++#define JCMD_DRW_MASK 0x0000003f ++ ++/* jtagctrl */ ++#define JCTRL_FORCE_CLK 4 /* Force clock */ ++#define JCTRL_EXT_EN 2 /* Enable external targets */ ++#define JCTRL_EN 1 /* Enable Jtag master */ ++ ++/* Fields in clkdiv */ ++#define CLKD_SFLASH 0x0f000000 ++#define CLKD_SFLASH_SHIFT 24 ++#define CLKD_OTP 0x000f0000 ++#define CLKD_OTP_SHIFT 16 ++#define CLKD_JTAG 0x00000f00 ++#define CLKD_JTAG_SHIFT 8 ++#define CLKD_UART 0x000000ff ++ ++#define CLKD2_SROM 0x00000003 ++ ++/* intstatus/intmask */ ++#define CI_GPIO 0x00000001 /* gpio intr */ ++#define CI_EI 0x00000002 /* extif intr (corerev >= 3) */ ++#define CI_TEMP 0x00000004 /* temp. ctrl intr (corerev >= 15) */ ++#define CI_SIRQ 0x00000008 /* serial IRQ intr (corerev >= 15) */ ++#define CI_ECI 0x00000010 /* eci intr (corerev >= 21) */ ++#define CI_PMU 0x00000020 /* pmu intr (corerev >= 21) */ ++#define CI_UART 0x00000040 /* uart intr (corerev >= 21) */ ++#define CI_WDRESET 0x80000000 /* watchdog reset occurred */ ++ ++/* slow_clk_ctl */ ++#define SCC_SS_MASK 0x00000007 /* slow clock source mask */ ++#define SCC_SS_LPO 0x00000000 /* source of slow clock is LPO */ ++#define SCC_SS_XTAL 0x00000001 /* source of slow clock is crystal */ ++#define SCC_SS_PCI 0x00000002 /* source of slow clock is PCI */ ++#define SCC_LF 0x00000200 /* LPOFreqSel, 1: 160Khz, 0: 32KHz */ ++#define SCC_LP 0x00000400 /* LPOPowerDown, 1: LPO is disabled, ++ * 0: LPO is enabled ++ */ ++#define SCC_FS 0x00000800 /* ForceSlowClk, 1: sb/cores running on slow clock, ++ * 0: power logic control ++ */ ++#define SCC_IP 0x00001000 /* IgnorePllOffReq, 1/0: power logic ignores/honors ++ * PLL clock disable requests from core ++ */ ++#define SCC_XC 0x00002000 /* XtalControlEn, 1/0: power logic does/doesn't ++ * disable crystal when appropriate ++ */ ++#define SCC_XP 0x00004000 /* XtalPU (RO), 1/0: crystal running/disabled */ ++#define SCC_CD_MASK 0xffff0000 /* ClockDivider (SlowClk = 1/(4+divisor)) */ ++#define SCC_CD_SHIFT 16 ++ ++/* system_clk_ctl */ ++#define SYCC_IE 0x00000001 /* ILPen: Enable Idle Low Power */ ++#define SYCC_AE 0x00000002 /* ALPen: Enable Active Low Power */ ++#define SYCC_FP 0x00000004 /* ForcePLLOn */ ++#define SYCC_AR 0x00000008 /* Force ALP (or HT if ALPen is not set */ ++#define SYCC_HR 0x00000010 /* Force HT */ ++#define SYCC_CD_MASK 0xffff0000 /* ClkDiv (ILP = 1/(4 * (divisor + 1)) */ ++#define SYCC_CD_SHIFT 16 ++ ++/* Indirect backplane access */ ++#define BPIA_BYTEEN 0x0000000f ++#define BPIA_SZ1 0x00000001 ++#define BPIA_SZ2 0x00000003 ++#define BPIA_SZ4 0x00000007 ++#define BPIA_SZ8 0x0000000f ++#define BPIA_WRITE 0x00000100 ++#define BPIA_START 0x00000200 ++#define BPIA_BUSY 0x00000200 ++#define BPIA_ERROR 0x00000400 ++ ++/* pcmcia/prog/flash_config */ ++#define CF_EN 0x00000001 /* enable */ ++#define CF_EM_MASK 0x0000000e /* mode */ ++#define CF_EM_SHIFT 1 ++#define CF_EM_FLASH 0 /* flash/asynchronous mode */ ++#define CF_EM_SYNC 2 /* synchronous mode */ ++#define CF_EM_PCMCIA 4 /* pcmcia mode */ ++#define CF_DS 0x00000010 /* destsize: 0=8bit, 1=16bit */ ++#define CF_BS 0x00000020 /* byteswap */ ++#define CF_CD_MASK 0x000000c0 /* clock divider */ ++#define CF_CD_SHIFT 6 ++#define CF_CD_DIV2 0x00000000 /* backplane/2 */ ++#define CF_CD_DIV3 0x00000040 /* backplane/3 */ ++#define CF_CD_DIV4 0x00000080 /* backplane/4 */ ++#define CF_CE 0x00000100 /* clock enable */ ++#define CF_SB 0x00000200 /* size/bytestrobe (synch only) */ ++ ++/* pcmcia_memwait */ ++#define PM_W0_MASK 0x0000003f /* waitcount0 */ ++#define PM_W1_MASK 0x00001f00 /* waitcount1 */ ++#define PM_W1_SHIFT 8 ++#define PM_W2_MASK 0x001f0000 /* waitcount2 */ ++#define PM_W2_SHIFT 16 ++#define PM_W3_MASK 0x1f000000 /* waitcount3 */ ++#define PM_W3_SHIFT 24 ++ ++/* pcmcia_attrwait */ ++#define PA_W0_MASK 0x0000003f /* waitcount0 */ ++#define PA_W1_MASK 0x00001f00 /* waitcount1 */ ++#define PA_W1_SHIFT 8 ++#define PA_W2_MASK 0x001f0000 /* waitcount2 */ ++#define PA_W2_SHIFT 16 ++#define PA_W3_MASK 0x1f000000 /* waitcount3 */ ++#define PA_W3_SHIFT 24 ++ ++/* pcmcia_iowait */ ++#define PI_W0_MASK 0x0000003f /* waitcount0 */ ++#define PI_W1_MASK 0x00001f00 /* waitcount1 */ ++#define PI_W1_SHIFT 8 ++#define PI_W2_MASK 0x001f0000 /* waitcount2 */ ++#define PI_W2_SHIFT 16 ++#define PI_W3_MASK 0x1f000000 /* waitcount3 */ ++#define PI_W3_SHIFT 24 ++ ++/* prog_waitcount */ ++#define PW_W0_MASK 0x0000001f /* waitcount0 */ ++#define PW_W1_MASK 0x00001f00 /* waitcount1 */ ++#define PW_W1_SHIFT 8 ++#define PW_W2_MASK 0x001f0000 /* waitcount2 */ ++#define PW_W2_SHIFT 16 ++#define PW_W3_MASK 0x1f000000 /* waitcount3 */ ++#define PW_W3_SHIFT 24 ++ ++#define PW_W0 0x0000000c ++#define PW_W1 0x00000a00 ++#define PW_W2 0x00020000 ++#define PW_W3 0x01000000 ++ ++/* flash_waitcount */ ++#define FW_W0_MASK 0x0000003f /* waitcount0 */ ++#define FW_W1_MASK 0x00001f00 /* waitcount1 */ ++#define FW_W1_SHIFT 8 ++#define FW_W2_MASK 0x001f0000 /* waitcount2 */ ++#define FW_W2_SHIFT 16 ++#define FW_W3_MASK 0x1f000000 /* waitcount3 */ ++#define FW_W3_SHIFT 24 ++ ++/* When Srom support present, fields in sromcontrol */ ++#define SRC_START 0x80000000 ++#define SRC_BUSY 0x80000000 ++#define SRC_OPCODE 0x60000000 ++#define SRC_OP_READ 0x00000000 ++#define SRC_OP_WRITE 0x20000000 ++#define SRC_OP_WRDIS 0x40000000 ++#define SRC_OP_WREN 0x60000000 ++#define SRC_OTPSEL 0x00000010 ++#define SRC_LOCK 0x00000008 ++#define SRC_SIZE_MASK 0x00000006 ++#define SRC_SIZE_1K 0x00000000 ++#define SRC_SIZE_4K 0x00000002 ++#define SRC_SIZE_16K 0x00000004 ++#define SRC_SIZE_SHIFT 1 ++#define SRC_PRESENT 0x00000001 ++ ++/* Fields in pmucontrol */ ++#define PCTL_ILP_DIV_MASK 0xffff0000 ++#define PCTL_ILP_DIV_SHIFT 16 ++#define PCTL_PLL_PLLCTL_UPD 0x00000400 /* rev 2 */ ++#define PCTL_NOILP_ON_WAIT 0x00000200 /* rev 1 */ ++#define PCTL_HT_REQ_EN 0x00000100 ++#define PCTL_ALP_REQ_EN 0x00000080 ++#define PCTL_XTALFREQ_MASK 0x0000007c ++#define PCTL_XTALFREQ_SHIFT 2 ++#define PCTL_ILP_DIV_EN 0x00000002 ++#define PCTL_LPO_SEL 0x00000001 ++ ++/* Fields in clkstretch */ ++#define CSTRETCH_HT 0xffff0000 ++#define CSTRETCH_ALP 0x0000ffff ++ ++/* gpiotimerval */ ++#define GPIO_ONTIME_SHIFT 16 ++ ++/* clockcontrol_n */ ++#define CN_N1_MASK 0x3f /* n1 control */ ++#define CN_N2_MASK 0x3f00 /* n2 control */ ++#define CN_N2_SHIFT 8 ++#define CN_PLLC_MASK 0xf0000 /* pll control */ ++#define CN_PLLC_SHIFT 16 ++ ++/* clockcontrol_sb/pci/uart */ ++#define CC_M1_MASK 0x3f /* m1 control */ ++#define CC_M2_MASK 0x3f00 /* m2 control */ ++#define CC_M2_SHIFT 8 ++#define CC_M3_MASK 0x3f0000 /* m3 control */ ++#define CC_M3_SHIFT 16 ++#define CC_MC_MASK 0x1f000000 /* mux control */ ++#define CC_MC_SHIFT 24 ++ ++/* N3M Clock control magic field values */ ++#define CC_F6_2 0x02 /* A factor of 2 in */ ++#define CC_F6_3 0x03 /* 6-bit fields like */ ++#define CC_F6_4 0x05 /* N1, M1 or M3 */ ++#define CC_F6_5 0x09 ++#define CC_F6_6 0x11 ++#define CC_F6_7 0x21 ++ ++#define CC_F5_BIAS 5 /* 5-bit fields get this added */ ++ ++#define CC_MC_BYPASS 0x08 ++#define CC_MC_M1 0x04 ++#define CC_MC_M1M2 0x02 ++#define CC_MC_M1M2M3 0x01 ++#define CC_MC_M1M3 0x11 ++ ++/* Type 2 Clock control magic field values */ ++#define CC_T2_BIAS 2 /* n1, n2, m1 & m3 bias */ ++#define CC_T2M2_BIAS 3 /* m2 bias */ ++ ++#define CC_T2MC_M1BYP 1 ++#define CC_T2MC_M2BYP 2 ++#define CC_T2MC_M3BYP 4 ++ ++/* Type 6 Clock control magic field values */ ++#define CC_T6_MMASK 1 /* bits of interest in m */ ++#define CC_T6_M0 120000000 /* sb clock for m = 0 */ ++#define CC_T6_M1 100000000 /* sb clock for m = 1 */ ++#define SB2MIPS_T6(sb) (2 * (sb)) ++ ++/* Common clock base */ ++#define CC_CLOCK_BASE1 24000000 /* Half the clock freq */ ++#define CC_CLOCK_BASE2 12500000 /* Alternate crystal on some PLLs */ ++ ++/* Clock control values for 200MHz in 5350 */ ++#define CLKC_5350_N 0x0311 ++#define CLKC_5350_M 0x04020009 ++ ++/* Flash types in the chipcommon capabilities register */ ++#define FLASH_NONE 0x000 /* No flash */ ++#define SFLASH_ST 0x100 /* ST serial flash */ ++#define SFLASH_AT 0x200 /* Atmel serial flash */ ++#define NFLASH 0x300 ++#define PFLASH 0x700 /* Parallel flash */ ++#define QSPIFLASH_ST 0x800 ++#define QSPIFLASH_AT 0x900 ++ ++/* Bits in the ExtBus config registers */ ++#define CC_CFG_EN 0x0001 /* Enable */ ++#define CC_CFG_EM_MASK 0x000e /* Extif Mode */ ++#define CC_CFG_EM_ASYNC 0x0000 /* Async/Parallel flash */ ++#define CC_CFG_EM_SYNC 0x0002 /* Synchronous */ ++#define CC_CFG_EM_PCMCIA 0x0004 /* PCMCIA */ ++#define CC_CFG_EM_IDE 0x0006 /* IDE */ ++#define CC_CFG_DS 0x0010 /* Data size, 0=8bit, 1=16bit */ ++#define CC_CFG_CD_MASK 0x00e0 /* Sync: Clock divisor, rev >= 20 */ ++#define CC_CFG_CE 0x0100 /* Sync: Clock enable, rev >= 20 */ ++#define CC_CFG_SB 0x0200 /* Sync: Size/Bytestrobe, rev >= 20 */ ++#define CC_CFG_IS 0x0400 /* Extif Sync Clk Select, rev >= 20 */ ++ ++/* ExtBus address space */ ++#define CC_EB_BASE 0x1a000000 /* Chipc ExtBus base address */ ++#define CC_EB_PCMCIA_MEM 0x1a000000 /* PCMCIA 0 memory base address */ ++#define CC_EB_PCMCIA_IO 0x1a200000 /* PCMCIA 0 I/O base address */ ++#define CC_EB_PCMCIA_CFG 0x1a400000 /* PCMCIA 0 config base address */ ++#define CC_EB_IDE 0x1a800000 /* IDE memory base */ ++#define CC_EB_PCMCIA1_MEM 0x1a800000 /* PCMCIA 1 memory base address */ ++#define CC_EB_PCMCIA1_IO 0x1aa00000 /* PCMCIA 1 I/O base address */ ++#define CC_EB_PCMCIA1_CFG 0x1ac00000 /* PCMCIA 1 config base address */ ++#define CC_EB_PROGIF 0x1b000000 /* ProgIF Async/Sync base address */ ++ ++ ++/* Start/busy bit in flashcontrol */ ++#define SFLASH_OPCODE 0x000000ff ++#define SFLASH_ACTION 0x00000700 ++#define SFLASH_CS_ACTIVE 0x00001000 /* Chip Select Active, rev >= 20 */ ++#define SFLASH_START 0x80000000 ++#define SFLASH_BUSY SFLASH_START ++ ++/* flashcontrol action codes */ ++#define SFLASH_ACT_OPONLY 0x0000 /* Issue opcode only */ ++#define SFLASH_ACT_OP1D 0x0100 /* opcode + 1 data byte */ ++#define SFLASH_ACT_OP3A 0x0200 /* opcode + 3 addr bytes */ ++#define SFLASH_ACT_OP3A1D 0x0300 /* opcode + 3 addr & 1 data bytes */ ++#define SFLASH_ACT_OP3A4D 0x0400 /* opcode + 3 addr & 4 data bytes */ ++#define SFLASH_ACT_OP3A4X4D 0x0500 /* opcode + 3 addr, 4 don't care & 4 data bytes */ ++#define SFLASH_ACT_OP3A1X4D 0x0700 /* opcode + 3 addr, 1 don't care & 4 data bytes */ ++ ++/* flashcontrol action+opcodes for ST flashes */ ++#define SFLASH_ST_WREN 0x0006 /* Write Enable */ ++#define SFLASH_ST_WRDIS 0x0004 /* Write Disable */ ++#define SFLASH_ST_RDSR 0x0105 /* Read Status Register */ ++#define SFLASH_ST_WRSR 0x0101 /* Write Status Register */ ++#define SFLASH_ST_READ 0x0303 /* Read Data Bytes */ ++#define SFLASH_ST_PP 0x0302 /* Page Program */ ++#define SFLASH_ST_SE 0x02d8 /* Sector Erase */ ++#define SFLASH_ST_BE 0x00c7 /* Bulk Erase */ ++#define SFLASH_ST_DP 0x00b9 /* Deep Power-down */ ++#define SFLASH_ST_RES 0x03ab /* Read Electronic Signature */ ++#define SFLASH_ST_CSA 0x1000 /* Keep chip select asserted */ ++#define SFLASH_ST_SSE 0x0220 /* Sub-sector Erase */ ++ ++#define SFLASH_MXIC_RDID 0x0390 /* Read Manufacture ID */ ++#define SFLASH_MXIC_MFID 0xc2 /* MXIC Manufacture ID */ ++ ++/* Status register bits for ST flashes */ ++#define SFLASH_ST_WIP 0x01 /* Write In Progress */ ++#define SFLASH_ST_WEL 0x02 /* Write Enable Latch */ ++#define SFLASH_ST_BP_MASK 0x1c /* Block Protect */ ++#define SFLASH_ST_BP_SHIFT 2 ++#define SFLASH_ST_SRWD 0x80 /* Status Register Write Disable */ ++ ++/* flashcontrol action+opcodes for Atmel flashes */ ++#define SFLASH_AT_READ 0x07e8 ++#define SFLASH_AT_PAGE_READ 0x07d2 ++#define SFLASH_AT_BUF1_READ ++#define SFLASH_AT_BUF2_READ ++#define SFLASH_AT_STATUS 0x01d7 ++#define SFLASH_AT_BUF1_WRITE 0x0384 ++#define SFLASH_AT_BUF2_WRITE 0x0387 ++#define SFLASH_AT_BUF1_ERASE_PROGRAM 0x0283 ++#define SFLASH_AT_BUF2_ERASE_PROGRAM 0x0286 ++#define SFLASH_AT_BUF1_PROGRAM 0x0288 ++#define SFLASH_AT_BUF2_PROGRAM 0x0289 ++#define SFLASH_AT_PAGE_ERASE 0x0281 ++#define SFLASH_AT_BLOCK_ERASE 0x0250 ++#define SFLASH_AT_BUF1_WRITE_ERASE_PROGRAM 0x0382 ++#define SFLASH_AT_BUF2_WRITE_ERASE_PROGRAM 0x0385 ++#define SFLASH_AT_BUF1_LOAD 0x0253 ++#define SFLASH_AT_BUF2_LOAD 0x0255 ++#define SFLASH_AT_BUF1_COMPARE 0x0260 ++#define SFLASH_AT_BUF2_COMPARE 0x0261 ++#define SFLASH_AT_BUF1_REPROGRAM 0x0258 ++#define SFLASH_AT_BUF2_REPROGRAM 0x0259 ++ ++/* Status register bits for Atmel flashes */ ++#define SFLASH_AT_READY 0x80 ++#define SFLASH_AT_MISMATCH 0x40 ++#define SFLASH_AT_ID_MASK 0x38 ++#define SFLASH_AT_ID_SHIFT 3 ++ ++/* SPI register bits, corerev >= 37 */ ++#define GSIO_START 0x80000000 ++#define GSIO_BUSY GSIO_START ++ ++/* ++ * These are the UART port assignments, expressed as offsets from the base ++ * register. These assignments should hold for any serial port based on ++ * a 8250, 16450, or 16550(A). ++ */ ++ ++#define UART_RX 0 /* In: Receive buffer (DLAB=0) */ ++#define UART_TX 0 /* Out: Transmit buffer (DLAB=0) */ ++#define UART_DLL 0 /* Out: Divisor Latch Low (DLAB=1) */ ++#define UART_IER 1 /* In/Out: Interrupt Enable Register (DLAB=0) */ ++#define UART_DLM 1 /* Out: Divisor Latch High (DLAB=1) */ ++#define UART_IIR 2 /* In: Interrupt Identity Register */ ++#define UART_FCR 2 /* Out: FIFO Control Register */ ++#define UART_LCR 3 /* Out: Line Control Register */ ++#define UART_MCR 4 /* Out: Modem Control Register */ ++#define UART_LSR 5 /* In: Line Status Register */ ++#define UART_MSR 6 /* In: Modem Status Register */ ++#define UART_SCR 7 /* I/O: Scratch Register */ ++#define UART_LCR_DLAB 0x80 /* Divisor latch access bit */ ++#define UART_LCR_WLEN8 0x03 /* Word length: 8 bits */ ++#define UART_MCR_OUT2 0x08 /* MCR GPIO out 2 */ ++#define UART_MCR_LOOP 0x10 /* Enable loopback test mode */ ++#define UART_LSR_RX_FIFO 0x80 /* Receive FIFO error */ ++#define UART_LSR_TDHR 0x40 /* Data-hold-register empty */ ++#define UART_LSR_THRE 0x20 /* Transmit-hold-register empty */ ++#define UART_LSR_BREAK 0x10 /* Break interrupt */ ++#define UART_LSR_FRAMING 0x08 /* Framing error */ ++#define UART_LSR_PARITY 0x04 /* Parity error */ ++#define UART_LSR_OVERRUN 0x02 /* Overrun error */ ++#define UART_LSR_RXRDY 0x01 /* Receiver ready */ ++#define UART_FCR_FIFO_ENABLE 1 /* FIFO control register bit controlling FIFO enable/disable */ ++ ++/* Interrupt Identity Register (IIR) bits */ ++#define UART_IIR_FIFO_MASK 0xc0 /* IIR FIFO disable/enabled mask */ ++#define UART_IIR_INT_MASK 0xf /* IIR interrupt ID source */ ++#define UART_IIR_MDM_CHG 0x0 /* Modem status changed */ ++#define UART_IIR_NOINT 0x1 /* No interrupt pending */ ++#define UART_IIR_THRE 0x2 /* THR empty */ ++#define UART_IIR_RCVD_DATA 0x4 /* Received data available */ ++#define UART_IIR_RCVR_STATUS 0x6 /* Receiver status */ ++#define UART_IIR_CHAR_TIME 0xc /* Character time */ ++ ++/* Interrupt Enable Register (IER) bits */ ++#define UART_IER_EDSSI 8 /* enable modem status interrupt */ ++#define UART_IER_ELSI 4 /* enable receiver line status interrupt */ ++#define UART_IER_ETBEI 2 /* enable transmitter holding register empty interrupt */ ++#define UART_IER_ERBFI 1 /* enable data available interrupt */ ++ ++/* pmustatus */ ++#define PST_EXTLPOAVAIL 0x0100 ++#define PST_WDRESET 0x0080 ++#define PST_INTPEND 0x0040 ++#define PST_SBCLKST 0x0030 ++#define PST_SBCLKST_ILP 0x0010 ++#define PST_SBCLKST_ALP 0x0020 ++#define PST_SBCLKST_HT 0x0030 ++#define PST_ALPAVAIL 0x0008 ++#define PST_HTAVAIL 0x0004 ++#define PST_RESINIT 0x0003 ++ ++/* pmucapabilities */ ++#define PCAP_REV_MASK 0x000000ff ++#define PCAP_RC_MASK 0x00001f00 ++#define PCAP_RC_SHIFT 8 ++#define PCAP_TC_MASK 0x0001e000 ++#define PCAP_TC_SHIFT 13 ++#define PCAP_PC_MASK 0x001e0000 ++#define PCAP_PC_SHIFT 17 ++#define PCAP_VC_MASK 0x01e00000 ++#define PCAP_VC_SHIFT 21 ++#define PCAP_CC_MASK 0x1e000000 ++#define PCAP_CC_SHIFT 25 ++#define PCAP5_PC_MASK 0x003e0000 /* PMU corerev >= 5 */ ++#define PCAP5_PC_SHIFT 17 ++#define PCAP5_VC_MASK 0x07c00000 ++#define PCAP5_VC_SHIFT 22 ++#define PCAP5_CC_MASK 0xf8000000 ++#define PCAP5_CC_SHIFT 27 ++ ++/* PMU Resource Request Timer registers */ ++/* This is based on PmuRev0 */ ++#define PRRT_TIME_MASK 0x03ff ++#define PRRT_INTEN 0x0400 ++#define PRRT_REQ_ACTIVE 0x0800 ++#define PRRT_ALP_REQ 0x1000 ++#define PRRT_HT_REQ 0x2000 ++#define PRRT_HQ_REQ 0x4000 ++ ++/* PMU resource bit position */ ++#define PMURES_BIT(bit) (1 << (bit)) ++ ++/* PMU resource number limit */ ++#define PMURES_MAX_RESNUM 30 ++ ++/* PMU chip control0 register */ ++#define PMU_CHIPCTL0 0 ++ ++/* clock req types */ ++#define PMU_CC1_CLKREQ_TYPE_SHIFT 19 ++#define PMU_CC1_CLKREQ_TYPE_MASK (1 << PMU_CC1_CLKREQ_TYPE_SHIFT) ++ ++#define CLKREQ_TYPE_CONFIG_OPENDRAIN 0 ++#define CLKREQ_TYPE_CONFIG_PUSHPULL 1 ++ ++/* PMU chip control1 register */ ++#define PMU_CHIPCTL1 1 ++#define PMU_CC1_RXC_DLL_BYPASS 0x00010000 ++ ++#define PMU_CC1_IF_TYPE_MASK 0x00000030 ++#define PMU_CC1_IF_TYPE_RMII 0x00000000 ++#define PMU_CC1_IF_TYPE_MII 0x00000010 ++#define PMU_CC1_IF_TYPE_RGMII 0x00000020 ++ ++#define PMU_CC1_SW_TYPE_MASK 0x000000c0 ++#define PMU_CC1_SW_TYPE_EPHY 0x00000000 ++#define PMU_CC1_SW_TYPE_EPHYMII 0x00000040 ++#define PMU_CC1_SW_TYPE_EPHYRMII 0x00000080 ++#define PMU_CC1_SW_TYPE_RGMII 0x000000c0 ++ ++/* PMU chip control2 register */ ++#define PMU_CHIPCTL2 2 ++ ++/* PMU chip control3 register */ ++#define PMU_CHIPCTL3 3 ++ ++#define PMU_CC3_ENABLE_SDIO_WAKEUP_SHIFT 19 ++#define PMU_CC3_ENABLE_RF_SHIFT 22 ++#define PMU_CC3_RF_DISABLE_IVALUE_SHIFT 23 ++ ++ ++/* PMU corerev and chip specific PLL controls. ++ * PMU_PLL_XX where is PMU corerev and is an arbitrary number ++ * to differentiate different PLLs controlled by the same PMU rev. ++ */ ++/* pllcontrol registers */ ++/* PDIV, div_phy, div_arm, div_adc, dith_sel, ioff, kpd_scale, lsb_sel, mash_sel, lf_c & lf_r */ ++#define PMU0_PLL0_PLLCTL0 0 ++#define PMU0_PLL0_PC0_PDIV_MASK 1 ++#define PMU0_PLL0_PC0_PDIV_FREQ 25000 ++#define PMU0_PLL0_PC0_DIV_ARM_MASK 0x00000038 ++#define PMU0_PLL0_PC0_DIV_ARM_SHIFT 3 ++#define PMU0_PLL0_PC0_DIV_ARM_BASE 8 ++ ++/* PC0_DIV_ARM for PLLOUT_ARM */ ++#define PMU0_PLL0_PC0_DIV_ARM_110MHZ 0 ++#define PMU0_PLL0_PC0_DIV_ARM_97_7MHZ 1 ++#define PMU0_PLL0_PC0_DIV_ARM_88MHZ 2 ++#define PMU0_PLL0_PC0_DIV_ARM_80MHZ 3 /* Default */ ++#define PMU0_PLL0_PC0_DIV_ARM_73_3MHZ 4 ++#define PMU0_PLL0_PC0_DIV_ARM_67_7MHZ 5 ++#define PMU0_PLL0_PC0_DIV_ARM_62_9MHZ 6 ++#define PMU0_PLL0_PC0_DIV_ARM_58_6MHZ 7 ++ ++/* Wildcard base, stop_mod, en_lf_tp, en_cal & lf_r2 */ ++#define PMU0_PLL0_PLLCTL1 1 ++#define PMU0_PLL0_PC1_WILD_INT_MASK 0xf0000000 ++#define PMU0_PLL0_PC1_WILD_INT_SHIFT 28 ++#define PMU0_PLL0_PC1_WILD_FRAC_MASK 0x0fffff00 ++#define PMU0_PLL0_PC1_WILD_FRAC_SHIFT 8 ++#define PMU0_PLL0_PC1_STOP_MOD 0x00000040 ++ ++/* Wildcard base, vco_calvar, vco_swc, vco_var_selref, vso_ical & vco_sel_avdd */ ++#define PMU0_PLL0_PLLCTL2 2 ++#define PMU0_PLL0_PC2_WILD_INT_MASK 0xf ++#define PMU0_PLL0_PC2_WILD_INT_SHIFT 4 ++ ++/* pllcontrol registers */ ++/* ndiv_pwrdn, pwrdn_ch, refcomp_pwrdn, dly_ch, p1div, p2div, _bypass_sdmod */ ++#define PMU1_PLL0_PLLCTL0 0 ++#define PMU1_PLL0_PC0_P1DIV_MASK 0x00f00000 ++#define PMU1_PLL0_PC0_P1DIV_SHIFT 20 ++#define PMU1_PLL0_PC0_P2DIV_MASK 0x0f000000 ++#define PMU1_PLL0_PC0_P2DIV_SHIFT 24 ++ ++/* mdiv */ ++#define PMU1_PLL0_PLLCTL1 1 ++#define PMU1_PLL0_PC1_M1DIV_MASK 0x000000ff ++#define PMU1_PLL0_PC1_M1DIV_SHIFT 0 ++#define PMU1_PLL0_PC1_M2DIV_MASK 0x0000ff00 ++#define PMU1_PLL0_PC1_M2DIV_SHIFT 8 ++#define PMU1_PLL0_PC1_M3DIV_MASK 0x00ff0000 ++#define PMU1_PLL0_PC1_M3DIV_SHIFT 16 ++#define PMU1_PLL0_PC1_M4DIV_MASK 0xff000000 ++#define PMU1_PLL0_PC1_M4DIV_SHIFT 24 ++#define PMU1_PLL0_PC1_M4DIV_BY_9 9 ++#define PMU1_PLL0_PC1_M4DIV_BY_18 0x12 ++#define PMU1_PLL0_PC1_M4DIV_BY_36 0x24 ++ ++#define DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT 8 ++#define DOT11MAC_880MHZ_CLK_DIVISOR_MASK (0xFF << DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT) ++#define DOT11MAC_880MHZ_CLK_DIVISOR_VAL (0xE << DOT11MAC_880MHZ_CLK_DIVISOR_SHIFT) ++ ++/* mdiv, ndiv_dither_mfb, ndiv_mode, ndiv_int */ ++#define PMU1_PLL0_PLLCTL2 2 ++#define PMU1_PLL0_PC2_M5DIV_MASK 0x000000ff ++#define PMU1_PLL0_PC2_M5DIV_SHIFT 0 ++#define PMU1_PLL0_PC2_M5DIV_BY_12 0xc ++#define PMU1_PLL0_PC2_M5DIV_BY_18 0x12 ++#define PMU1_PLL0_PC2_M5DIV_BY_36 0x24 ++#define PMU1_PLL0_PC2_M6DIV_MASK 0x0000ff00 ++#define PMU1_PLL0_PC2_M6DIV_SHIFT 8 ++#define PMU1_PLL0_PC2_M6DIV_BY_18 0x12 ++#define PMU1_PLL0_PC2_M6DIV_BY_36 0x24 ++#define PMU1_PLL0_PC2_NDIV_MODE_MASK 0x000e0000 ++#define PMU1_PLL0_PC2_NDIV_MODE_SHIFT 17 ++#define PMU1_PLL0_PC2_NDIV_MODE_MASH 1 ++#define PMU1_PLL0_PC2_NDIV_MODE_MFB 2 /* recommended for 4319 */ ++#define PMU1_PLL0_PC2_NDIV_INT_MASK 0x1ff00000 ++#define PMU1_PLL0_PC2_NDIV_INT_SHIFT 20 ++ ++/* ndiv_frac */ ++#define PMU1_PLL0_PLLCTL3 3 ++#define PMU1_PLL0_PC3_NDIV_FRAC_MASK 0x00ffffff ++#define PMU1_PLL0_PC3_NDIV_FRAC_SHIFT 0 ++ ++/* pll_ctrl */ ++#define PMU1_PLL0_PLLCTL4 4 ++ ++/* pll_ctrl, vco_rng, clkdrive_ch */ ++#define PMU1_PLL0_PLLCTL5 5 ++#define PMU1_PLL0_PC5_CLK_DRV_MASK 0xffffff00 ++#define PMU1_PLL0_PC5_CLK_DRV_SHIFT 8 ++ ++/* PMU rev 2 control words */ ++#define PMU2_PHY_PLL_PLLCTL 4 ++#define PMU2_SI_PLL_PLLCTL 10 ++ ++/* PMU rev 2 */ ++/* pllcontrol registers */ ++/* ndiv_pwrdn, pwrdn_ch, refcomp_pwrdn, dly_ch, p1div, p2div, _bypass_sdmod */ ++#define PMU2_PLL_PLLCTL0 0 ++#define PMU2_PLL_PC0_P1DIV_MASK 0x00f00000 ++#define PMU2_PLL_PC0_P1DIV_SHIFT 20 ++#define PMU2_PLL_PC0_P2DIV_MASK 0x0f000000 ++#define PMU2_PLL_PC0_P2DIV_SHIFT 24 ++ ++/* mdiv */ ++#define PMU2_PLL_PLLCTL1 1 ++#define PMU2_PLL_PC1_M1DIV_MASK 0x000000ff ++#define PMU2_PLL_PC1_M1DIV_SHIFT 0 ++#define PMU2_PLL_PC1_M2DIV_MASK 0x0000ff00 ++#define PMU2_PLL_PC1_M2DIV_SHIFT 8 ++#define PMU2_PLL_PC1_M3DIV_MASK 0x00ff0000 ++#define PMU2_PLL_PC1_M3DIV_SHIFT 16 ++#define PMU2_PLL_PC1_M4DIV_MASK 0xff000000 ++#define PMU2_PLL_PC1_M4DIV_SHIFT 24 ++ ++/* mdiv, ndiv_dither_mfb, ndiv_mode, ndiv_int */ ++#define PMU2_PLL_PLLCTL2 2 ++#define PMU2_PLL_PC2_M5DIV_MASK 0x000000ff ++#define PMU2_PLL_PC2_M5DIV_SHIFT 0 ++#define PMU2_PLL_PC2_M6DIV_MASK 0x0000ff00 ++#define PMU2_PLL_PC2_M6DIV_SHIFT 8 ++#define PMU2_PLL_PC2_NDIV_MODE_MASK 0x000e0000 ++#define PMU2_PLL_PC2_NDIV_MODE_SHIFT 17 ++#define PMU2_PLL_PC2_NDIV_INT_MASK 0x1ff00000 ++#define PMU2_PLL_PC2_NDIV_INT_SHIFT 20 ++ ++/* ndiv_frac */ ++#define PMU2_PLL_PLLCTL3 3 ++#define PMU2_PLL_PC3_NDIV_FRAC_MASK 0x00ffffff ++#define PMU2_PLL_PC3_NDIV_FRAC_SHIFT 0 ++ ++/* pll_ctrl */ ++#define PMU2_PLL_PLLCTL4 4 ++ ++/* pll_ctrl, vco_rng, clkdrive_ch */ ++#define PMU2_PLL_PLLCTL5 5 ++#define PMU2_PLL_PC5_CLKDRIVE_CH1_MASK 0x00000f00 ++#define PMU2_PLL_PC5_CLKDRIVE_CH1_SHIFT 8 ++#define PMU2_PLL_PC5_CLKDRIVE_CH2_MASK 0x0000f000 ++#define PMU2_PLL_PC5_CLKDRIVE_CH2_SHIFT 12 ++#define PMU2_PLL_PC5_CLKDRIVE_CH3_MASK 0x000f0000 ++#define PMU2_PLL_PC5_CLKDRIVE_CH3_SHIFT 16 ++#define PMU2_PLL_PC5_CLKDRIVE_CH4_MASK 0x00f00000 ++#define PMU2_PLL_PC5_CLKDRIVE_CH4_SHIFT 20 ++#define PMU2_PLL_PC5_CLKDRIVE_CH5_MASK 0x0f000000 ++#define PMU2_PLL_PC5_CLKDRIVE_CH5_SHIFT 24 ++#define PMU2_PLL_PC5_CLKDRIVE_CH6_MASK 0xf0000000 ++#define PMU2_PLL_PC5_CLKDRIVE_CH6_SHIFT 28 ++ ++/* PMU rev 5 (& 6) */ ++#define PMU5_PLL_P1P2_OFF 0 ++#define PMU5_PLL_P1_MASK 0x0f000000 ++#define PMU5_PLL_P1_SHIFT 24 ++#define PMU5_PLL_P2_MASK 0x00f00000 ++#define PMU5_PLL_P2_SHIFT 20 ++#define PMU5_PLL_M14_OFF 1 ++#define PMU5_PLL_MDIV_MASK 0x000000ff ++#define PMU5_PLL_MDIV_WIDTH 8 ++#define PMU5_PLL_NM5_OFF 2 ++#define PMU5_PLL_NDIV_MASK 0xfff00000 ++#define PMU5_PLL_NDIV_SHIFT 20 ++#define PMU5_PLL_NDIV_MODE_MASK 0x000e0000 ++#define PMU5_PLL_NDIV_MODE_SHIFT 17 ++#define PMU5_PLL_FMAB_OFF 3 ++#define PMU5_PLL_MRAT_MASK 0xf0000000 ++#define PMU5_PLL_MRAT_SHIFT 28 ++#define PMU5_PLL_ABRAT_MASK 0x08000000 ++#define PMU5_PLL_ABRAT_SHIFT 27 ++#define PMU5_PLL_FDIV_MASK 0x07ffffff ++#define PMU5_PLL_PLLCTL_OFF 4 ++#define PMU5_PLL_PCHI_OFF 5 ++#define PMU5_PLL_PCHI_MASK 0x0000003f ++ ++/* pmu XtalFreqRatio */ ++#define PMU_XTALFREQ_REG_ILPCTR_MASK 0x00001FFF ++#define PMU_XTALFREQ_REG_MEASURE_MASK 0x80000000 ++#define PMU_XTALFREQ_REG_MEASURE_SHIFT 31 ++ ++/* Divider allocation in 4716/47162/5356/5357 */ ++#define PMU5_MAINPLL_CPU 1 ++#define PMU5_MAINPLL_MEM 2 ++#define PMU5_MAINPLL_SI 3 ++ ++/* 4706 PMU */ ++#define PMU4706_MAINPLL_PLL0 0 ++#define PMU6_4706_PROCPLL_OFF 4 /* The CPU PLL */ ++#define PMU6_4706_PROC_P2DIV_MASK 0x000f0000 ++#define PMU6_4706_PROC_P2DIV_SHIFT 16 ++#define PMU6_4706_PROC_P1DIV_MASK 0x0000f000 ++#define PMU6_4706_PROC_P1DIV_SHIFT 12 ++#define PMU6_4706_PROC_NDIV_INT_MASK 0x00000ff8 ++#define PMU6_4706_PROC_NDIV_INT_SHIFT 3 ++#define PMU6_4706_PROC_NDIV_MODE_MASK 0x00000007 ++#define PMU6_4706_PROC_NDIV_MODE_SHIFT 0 ++ ++#define PMU7_PLL_PLLCTL7 7 ++#define PMU7_PLL_CTL7_M4DIV_MASK 0xff000000 ++#define PMU7_PLL_CTL7_M4DIV_SHIFT 24 ++#define PMU7_PLL_CTL7_M4DIV_BY_6 6 ++#define PMU7_PLL_CTL7_M4DIV_BY_12 0xc ++#define PMU7_PLL_CTL7_M4DIV_BY_24 0x18 ++#define PMU7_PLL_PLLCTL8 8 ++#define PMU7_PLL_CTL8_M5DIV_MASK 0x000000ff ++#define PMU7_PLL_CTL8_M5DIV_SHIFT 0 ++#define PMU7_PLL_CTL8_M5DIV_BY_8 8 ++#define PMU7_PLL_CTL8_M5DIV_BY_12 0xc ++#define PMU7_PLL_CTL8_M5DIV_BY_24 0x18 ++#define PMU7_PLL_CTL8_M6DIV_MASK 0x0000ff00 ++#define PMU7_PLL_CTL8_M6DIV_SHIFT 8 ++#define PMU7_PLL_CTL8_M6DIV_BY_12 0xc ++#define PMU7_PLL_CTL8_M6DIV_BY_24 0x18 ++#define PMU7_PLL_PLLCTL11 11 ++#define PMU7_PLL_PLLCTL11_MASK 0xffffff00 ++#define PMU7_PLL_PLLCTL11_VAL 0x22222200 ++ ++/* PMU rev 15 */ ++#define PMU15_PLL_PLLCTL0 0 ++#define PMU15_PLL_PC0_CLKSEL_MASK 0x00000003 ++#define PMU15_PLL_PC0_CLKSEL_SHIFT 0 ++#define PMU15_PLL_PC0_FREQTGT_MASK 0x003FFFFC ++#define PMU15_PLL_PC0_FREQTGT_SHIFT 2 ++#define PMU15_PLL_PC0_PRESCALE_MASK 0x00C00000 ++#define PMU15_PLL_PC0_PRESCALE_SHIFT 22 ++#define PMU15_PLL_PC0_KPCTRL_MASK 0x07000000 ++#define PMU15_PLL_PC0_KPCTRL_SHIFT 24 ++#define PMU15_PLL_PC0_FCNTCTRL_MASK 0x38000000 ++#define PMU15_PLL_PC0_FCNTCTRL_SHIFT 27 ++#define PMU15_PLL_PC0_FDCMODE_MASK 0x40000000 ++#define PMU15_PLL_PC0_FDCMODE_SHIFT 30 ++#define PMU15_PLL_PC0_CTRLBIAS_MASK 0x80000000 ++#define PMU15_PLL_PC0_CTRLBIAS_SHIFT 31 ++ ++#define PMU15_PLL_PLLCTL1 1 ++#define PMU15_PLL_PC1_BIAS_CTLM_MASK 0x00000060 ++#define PMU15_PLL_PC1_BIAS_CTLM_SHIFT 5 ++#define PMU15_PLL_PC1_BIAS_CTLM_RST_MASK 0x00000040 ++#define PMU15_PLL_PC1_BIAS_CTLM_RST_SHIFT 6 ++#define PMU15_PLL_PC1_BIAS_SS_DIVR_MASK 0x0001FF80 ++#define PMU15_PLL_PC1_BIAS_SS_DIVR_SHIFT 7 ++#define PMU15_PLL_PC1_BIAS_SS_RSTVAL_MASK 0x03FE0000 ++#define PMU15_PLL_PC1_BIAS_SS_RSTVAL_SHIFT 17 ++#define PMU15_PLL_PC1_BIAS_INTG_BW_MASK 0x0C000000 ++#define PMU15_PLL_PC1_BIAS_INTG_BW_SHIFT 26 ++#define PMU15_PLL_PC1_BIAS_INTG_BYP_MASK 0x10000000 ++#define PMU15_PLL_PC1_BIAS_INTG_BYP_SHIFT 28 ++#define PMU15_PLL_PC1_OPENLP_EN_MASK 0x40000000 ++#define PMU15_PLL_PC1_OPENLP_EN_SHIFT 30 ++ ++#define PMU15_PLL_PLLCTL2 2 ++#define PMU15_PLL_PC2_CTEN_MASK 0x00000001 ++#define PMU15_PLL_PC2_CTEN_SHIFT 0 ++ ++#define PMU15_PLL_PLLCTL3 3 ++#define PMU15_PLL_PC3_DITHER_EN_MASK 0x00000001 ++#define PMU15_PLL_PC3_DITHER_EN_SHIFT 0 ++#define PMU15_PLL_PC3_DCOCTLSP_MASK 0xFE000000 ++#define PMU15_PLL_PC3_DCOCTLSP_SHIFT 25 ++#define PMU15_PLL_PC3_DCOCTLSP_DIV2EN_MASK 0x01 ++#define PMU15_PLL_PC3_DCOCTLSP_DIV2EN_SHIFT 0 ++#define PMU15_PLL_PC3_DCOCTLSP_CH0EN_MASK 0x02 ++#define PMU15_PLL_PC3_DCOCTLSP_CH0EN_SHIFT 1 ++#define PMU15_PLL_PC3_DCOCTLSP_CH1EN_MASK 0x04 ++#define PMU15_PLL_PC3_DCOCTLSP_CH1EN_SHIFT 2 ++#define PMU15_PLL_PC3_DCOCTLSP_CH0SEL_MASK 0x18 ++#define PMU15_PLL_PC3_DCOCTLSP_CH0SEL_SHIFT 3 ++#define PMU15_PLL_PC3_DCOCTLSP_CH1SEL_MASK 0x60 ++#define PMU15_PLL_PC3_DCOCTLSP_CH1SEL_SHIFT 5 ++#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV1 0 ++#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV2 1 ++#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV3 2 ++#define PMU15_PLL_PC3_DCOCTLSP_CHSEL_OUTP_DIV5 3 ++ ++#define PMU15_PLL_PLLCTL4 4 ++#define PMU15_PLL_PC4_FLLCLK1_DIV_MASK 0x00000007 ++#define PMU15_PLL_PC4_FLLCLK1_DIV_SHIFT 0 ++#define PMU15_PLL_PC4_FLLCLK2_DIV_MASK 0x00000038 ++#define PMU15_PLL_PC4_FLLCLK2_DIV_SHIFT 3 ++#define PMU15_PLL_PC4_FLLCLK3_DIV_MASK 0x000001C0 ++#define PMU15_PLL_PC4_FLLCLK3_DIV_SHIFT 6 ++#define PMU15_PLL_PC4_DBGMODE_MASK 0x00000E00 ++#define PMU15_PLL_PC4_DBGMODE_SHIFT 9 ++#define PMU15_PLL_PC4_FLL480_CTLSP_LK_MASK 0x00001000 ++#define PMU15_PLL_PC4_FLL480_CTLSP_LK_SHIFT 12 ++#define PMU15_PLL_PC4_FLL480_CTLSP_MASK 0x000FE000 ++#define PMU15_PLL_PC4_FLL480_CTLSP_SHIFT 13 ++#define PMU15_PLL_PC4_DINPOL_MASK 0x00100000 ++#define PMU15_PLL_PC4_DINPOL_SHIFT 20 ++#define PMU15_PLL_PC4_CLKOUT_PD_MASK 0x00200000 ++#define PMU15_PLL_PC4_CLKOUT_PD_SHIFT 21 ++#define PMU15_PLL_PC4_CLKDIV2_PD_MASK 0x00400000 ++#define PMU15_PLL_PC4_CLKDIV2_PD_SHIFT 22 ++#define PMU15_PLL_PC4_CLKDIV4_PD_MASK 0x00800000 ++#define PMU15_PLL_PC4_CLKDIV4_PD_SHIFT 23 ++#define PMU15_PLL_PC4_CLKDIV8_PD_MASK 0x01000000 ++#define PMU15_PLL_PC4_CLKDIV8_PD_SHIFT 24 ++#define PMU15_PLL_PC4_CLKDIV16_PD_MASK 0x02000000 ++#define PMU15_PLL_PC4_CLKDIV16_PD_SHIFT 25 ++#define PMU15_PLL_PC4_TEST_EN_MASK 0x04000000 ++#define PMU15_PLL_PC4_TEST_EN_SHIFT 26 ++ ++#define PMU15_PLL_PLLCTL5 5 ++#define PMU15_PLL_PC5_FREQTGT_MASK 0x000FFFFF ++#define PMU15_PLL_PC5_FREQTGT_SHIFT 0 ++#define PMU15_PLL_PC5_DCOCTLSP_MASK 0x07F00000 ++#define PMU15_PLL_PC5_DCOCTLSP_SHIFT 20 ++#define PMU15_PLL_PC5_PRESCALE_MASK 0x18000000 ++#define PMU15_PLL_PC5_PRESCALE_SHIFT 27 ++ ++#define PMU15_PLL_PLLCTL6 6 ++#define PMU15_PLL_PC6_FREQTGT_MASK 0x000FFFFF ++#define PMU15_PLL_PC6_FREQTGT_SHIFT 0 ++#define PMU15_PLL_PC6_DCOCTLSP_MASK 0x07F00000 ++#define PMU15_PLL_PC6_DCOCTLSP_SHIFT 20 ++#define PMU15_PLL_PC6_PRESCALE_MASK 0x18000000 ++#define PMU15_PLL_PC6_PRESCALE_SHIFT 27 ++ ++#define PMU15_FREQTGT_480_DEFAULT 0x19AB1 ++#define PMU15_FREQTGT_492_DEFAULT 0x1A4F5 ++#define PMU15_ARM_96MHZ 96000000 /* 96 Mhz */ ++#define PMU15_ARM_98MHZ 98400000 /* 98.4 Mhz */ ++#define PMU15_ARM_97MHZ 97000000 /* 97 Mhz */ ++ ++ ++#define PMU17_PLLCTL2_NDIVTYPE_MASK 0x00000070 ++#define PMU17_PLLCTL2_NDIVTYPE_SHIFT 4 ++ ++#define PMU17_PLLCTL2_NDIV_MODE_INT 0 ++#define PMU17_PLLCTL2_NDIV_MODE_INT1B8 1 ++#define PMU17_PLLCTL2_NDIV_MODE_MASH111 2 ++#define PMU17_PLLCTL2_NDIV_MODE_MASH111B8 3 ++ ++#define PMU17_PLLCTL0_BBPLL_PWRDWN 0 ++#define PMU17_PLLCTL0_BBPLL_DRST 3 ++#define PMU17_PLLCTL0_BBPLL_DISBL_CLK 8 ++ ++/* PLL usage in 4716/47162 */ ++#define PMU4716_MAINPLL_PLL0 12 ++ ++/* PLL usage in 5356/5357 */ ++#define PMU5356_MAINPLL_PLL0 0 ++#define PMU5357_MAINPLL_PLL0 0 ++ ++/* 4716/47162 resources */ ++#define RES4716_PROC_PLL_ON 0x00000040 ++#define RES4716_PROC_HT_AVAIL 0x00000080 ++ ++/* 4716/4717/4718 Chip specific ChipControl register bits */ ++#define CCTRL_471X_I2S_PINS_ENABLE 0x0080 /* I2S pins off by default, shared w/ pflash */ ++ ++/* 5357 Chip specific ChipControl register bits */ ++/* 2nd - 32-bit reg */ ++#define CCTRL_5357_I2S_PINS_ENABLE 0x00040000 /* I2S pins enable */ ++#define CCTRL_5357_I2CSPI_PINS_ENABLE 0x00080000 /* I2C/SPI pins enable */ ++ ++/* 5354 resources */ ++#define RES5354_EXT_SWITCHER_PWM 0 /* 0x00001 */ ++#define RES5354_BB_SWITCHER_PWM 1 /* 0x00002 */ ++#define RES5354_BB_SWITCHER_BURST 2 /* 0x00004 */ ++#define RES5354_BB_EXT_SWITCHER_BURST 3 /* 0x00008 */ ++#define RES5354_ILP_REQUEST 4 /* 0x00010 */ ++#define RES5354_RADIO_SWITCHER_PWM 5 /* 0x00020 */ ++#define RES5354_RADIO_SWITCHER_BURST 6 /* 0x00040 */ ++#define RES5354_ROM_SWITCH 7 /* 0x00080 */ ++#define RES5354_PA_REF_LDO 8 /* 0x00100 */ ++#define RES5354_RADIO_LDO 9 /* 0x00200 */ ++#define RES5354_AFE_LDO 10 /* 0x00400 */ ++#define RES5354_PLL_LDO 11 /* 0x00800 */ ++#define RES5354_BG_FILTBYP 12 /* 0x01000 */ ++#define RES5354_TX_FILTBYP 13 /* 0x02000 */ ++#define RES5354_RX_FILTBYP 14 /* 0x04000 */ ++#define RES5354_XTAL_PU 15 /* 0x08000 */ ++#define RES5354_XTAL_EN 16 /* 0x10000 */ ++#define RES5354_BB_PLL_FILTBYP 17 /* 0x20000 */ ++#define RES5354_RF_PLL_FILTBYP 18 /* 0x40000 */ ++#define RES5354_BB_PLL_PU 19 /* 0x80000 */ ++ ++/* 5357 Chip specific ChipControl register bits */ ++#define CCTRL5357_EXTPA (1<<14) /* extPA in ChipControl 1, bit 14 */ ++#define CCTRL5357_ANT_MUX_2o3 (1<<15) /* 2o3 in ChipControl 1, bit 15 */ ++#define CCTRL5357_NFLASH (1<<16) /* Nandflash in ChipControl 1, bit 16 */ ++ ++/* 4328 resources */ ++#define RES4328_EXT_SWITCHER_PWM 0 /* 0x00001 */ ++#define RES4328_BB_SWITCHER_PWM 1 /* 0x00002 */ ++#define RES4328_BB_SWITCHER_BURST 2 /* 0x00004 */ ++#define RES4328_BB_EXT_SWITCHER_BURST 3 /* 0x00008 */ ++#define RES4328_ILP_REQUEST 4 /* 0x00010 */ ++#define RES4328_RADIO_SWITCHER_PWM 5 /* 0x00020 */ ++#define RES4328_RADIO_SWITCHER_BURST 6 /* 0x00040 */ ++#define RES4328_ROM_SWITCH 7 /* 0x00080 */ ++#define RES4328_PA_REF_LDO 8 /* 0x00100 */ ++#define RES4328_RADIO_LDO 9 /* 0x00200 */ ++#define RES4328_AFE_LDO 10 /* 0x00400 */ ++#define RES4328_PLL_LDO 11 /* 0x00800 */ ++#define RES4328_BG_FILTBYP 12 /* 0x01000 */ ++#define RES4328_TX_FILTBYP 13 /* 0x02000 */ ++#define RES4328_RX_FILTBYP 14 /* 0x04000 */ ++#define RES4328_XTAL_PU 15 /* 0x08000 */ ++#define RES4328_XTAL_EN 16 /* 0x10000 */ ++#define RES4328_BB_PLL_FILTBYP 17 /* 0x20000 */ ++#define RES4328_RF_PLL_FILTBYP 18 /* 0x40000 */ ++#define RES4328_BB_PLL_PU 19 /* 0x80000 */ ++ ++/* 4325 A0/A1 resources */ ++#define RES4325_BUCK_BOOST_BURST 0 /* 0x00000001 */ ++#define RES4325_CBUCK_BURST 1 /* 0x00000002 */ ++#define RES4325_CBUCK_PWM 2 /* 0x00000004 */ ++#define RES4325_CLDO_CBUCK_BURST 3 /* 0x00000008 */ ++#define RES4325_CLDO_CBUCK_PWM 4 /* 0x00000010 */ ++#define RES4325_BUCK_BOOST_PWM 5 /* 0x00000020 */ ++#define RES4325_ILP_REQUEST 6 /* 0x00000040 */ ++#define RES4325_ABUCK_BURST 7 /* 0x00000080 */ ++#define RES4325_ABUCK_PWM 8 /* 0x00000100 */ ++#define RES4325_LNLDO1_PU 9 /* 0x00000200 */ ++#define RES4325_OTP_PU 10 /* 0x00000400 */ ++#define RES4325_LNLDO3_PU 11 /* 0x00000800 */ ++#define RES4325_LNLDO4_PU 12 /* 0x00001000 */ ++#define RES4325_XTAL_PU 13 /* 0x00002000 */ ++#define RES4325_ALP_AVAIL 14 /* 0x00004000 */ ++#define RES4325_RX_PWRSW_PU 15 /* 0x00008000 */ ++#define RES4325_TX_PWRSW_PU 16 /* 0x00010000 */ ++#define RES4325_RFPLL_PWRSW_PU 17 /* 0x00020000 */ ++#define RES4325_LOGEN_PWRSW_PU 18 /* 0x00040000 */ ++#define RES4325_AFE_PWRSW_PU 19 /* 0x00080000 */ ++#define RES4325_BBPLL_PWRSW_PU 20 /* 0x00100000 */ ++#define RES4325_HT_AVAIL 21 /* 0x00200000 */ ++ ++/* 4325 B0/C0 resources */ ++#define RES4325B0_CBUCK_LPOM 1 /* 0x00000002 */ ++#define RES4325B0_CBUCK_BURST 2 /* 0x00000004 */ ++#define RES4325B0_CBUCK_PWM 3 /* 0x00000008 */ ++#define RES4325B0_CLDO_PU 4 /* 0x00000010 */ ++ ++/* 4325 C1 resources */ ++#define RES4325C1_LNLDO2_PU 12 /* 0x00001000 */ ++ ++/* 4325 chip-specific ChipStatus register bits */ ++#define CST4325_SPROM_OTP_SEL_MASK 0x00000003 ++#define CST4325_DEFCIS_SEL 0 /* OTP is powered up, use def. CIS, no SPROM */ ++#define CST4325_SPROM_SEL 1 /* OTP is powered up, SPROM is present */ ++#define CST4325_OTP_SEL 2 /* OTP is powered up, no SPROM */ ++#define CST4325_OTP_PWRDN 3 /* OTP is powered down, SPROM is present */ ++#define CST4325_SDIO_USB_MODE_MASK 0x00000004 ++#define CST4325_SDIO_USB_MODE_SHIFT 2 ++#define CST4325_RCAL_VALID_MASK 0x00000008 ++#define CST4325_RCAL_VALID_SHIFT 3 ++#define CST4325_RCAL_VALUE_MASK 0x000001f0 ++#define CST4325_RCAL_VALUE_SHIFT 4 ++#define CST4325_PMUTOP_2B_MASK 0x00000200 /* 1 for 2b, 0 for to 2a */ ++#define CST4325_PMUTOP_2B_SHIFT 9 ++ ++#define RES4329_RESERVED0 0 /* 0x00000001 */ ++#define RES4329_CBUCK_LPOM 1 /* 0x00000002 */ ++#define RES4329_CBUCK_BURST 2 /* 0x00000004 */ ++#define RES4329_CBUCK_PWM 3 /* 0x00000008 */ ++#define RES4329_CLDO_PU 4 /* 0x00000010 */ ++#define RES4329_PALDO_PU 5 /* 0x00000020 */ ++#define RES4329_ILP_REQUEST 6 /* 0x00000040 */ ++#define RES4329_RESERVED7 7 /* 0x00000080 */ ++#define RES4329_RESERVED8 8 /* 0x00000100 */ ++#define RES4329_LNLDO1_PU 9 /* 0x00000200 */ ++#define RES4329_OTP_PU 10 /* 0x00000400 */ ++#define RES4329_RESERVED11 11 /* 0x00000800 */ ++#define RES4329_LNLDO2_PU 12 /* 0x00001000 */ ++#define RES4329_XTAL_PU 13 /* 0x00002000 */ ++#define RES4329_ALP_AVAIL 14 /* 0x00004000 */ ++#define RES4329_RX_PWRSW_PU 15 /* 0x00008000 */ ++#define RES4329_TX_PWRSW_PU 16 /* 0x00010000 */ ++#define RES4329_RFPLL_PWRSW_PU 17 /* 0x00020000 */ ++#define RES4329_LOGEN_PWRSW_PU 18 /* 0x00040000 */ ++#define RES4329_AFE_PWRSW_PU 19 /* 0x00080000 */ ++#define RES4329_BBPLL_PWRSW_PU 20 /* 0x00100000 */ ++#define RES4329_HT_AVAIL 21 /* 0x00200000 */ ++ ++#define CST4329_SPROM_OTP_SEL_MASK 0x00000003 ++#define CST4329_DEFCIS_SEL 0 /* OTP is powered up, use def. CIS, no SPROM */ ++#define CST4329_SPROM_SEL 1 /* OTP is powered up, SPROM is present */ ++#define CST4329_OTP_SEL 2 /* OTP is powered up, no SPROM */ ++#define CST4329_OTP_PWRDN 3 /* OTP is powered down, SPROM is present */ ++#define CST4329_SPI_SDIO_MODE_MASK 0x00000004 ++#define CST4329_SPI_SDIO_MODE_SHIFT 2 ++ ++/* 4312 chip-specific ChipStatus register bits */ ++#define CST4312_SPROM_OTP_SEL_MASK 0x00000003 ++#define CST4312_DEFCIS_SEL 0 /* OTP is powered up, use def. CIS, no SPROM */ ++#define CST4312_SPROM_SEL 1 /* OTP is powered up, SPROM is present */ ++#define CST4312_OTP_SEL 2 /* OTP is powered up, no SPROM */ ++#define CST4312_OTP_BAD 3 /* OTP is broken, SPROM is present */ ++ ++/* 4312 resources (all PMU chips with little memory constraint) */ ++#define RES4312_SWITCHER_BURST 0 /* 0x00000001 */ ++#define RES4312_SWITCHER_PWM 1 /* 0x00000002 */ ++#define RES4312_PA_REF_LDO 2 /* 0x00000004 */ ++#define RES4312_CORE_LDO_BURST 3 /* 0x00000008 */ ++#define RES4312_CORE_LDO_PWM 4 /* 0x00000010 */ ++#define RES4312_RADIO_LDO 5 /* 0x00000020 */ ++#define RES4312_ILP_REQUEST 6 /* 0x00000040 */ ++#define RES4312_BG_FILTBYP 7 /* 0x00000080 */ ++#define RES4312_TX_FILTBYP 8 /* 0x00000100 */ ++#define RES4312_RX_FILTBYP 9 /* 0x00000200 */ ++#define RES4312_XTAL_PU 10 /* 0x00000400 */ ++#define RES4312_ALP_AVAIL 11 /* 0x00000800 */ ++#define RES4312_BB_PLL_FILTBYP 12 /* 0x00001000 */ ++#define RES4312_RF_PLL_FILTBYP 13 /* 0x00002000 */ ++#define RES4312_HT_AVAIL 14 /* 0x00004000 */ ++ ++/* 4322 resources */ ++#define RES4322_RF_LDO 0 ++#define RES4322_ILP_REQUEST 1 ++#define RES4322_XTAL_PU 2 ++#define RES4322_ALP_AVAIL 3 ++#define RES4322_SI_PLL_ON 4 ++#define RES4322_HT_SI_AVAIL 5 ++#define RES4322_PHY_PLL_ON 6 ++#define RES4322_HT_PHY_AVAIL 7 ++#define RES4322_OTP_PU 8 ++ ++/* 4322 chip-specific ChipStatus register bits */ ++#define CST4322_XTAL_FREQ_20_40MHZ 0x00000020 ++#define CST4322_SPROM_OTP_SEL_MASK 0x000000c0 ++#define CST4322_SPROM_OTP_SEL_SHIFT 6 ++#define CST4322_NO_SPROM_OTP 0 /* no OTP, no SPROM */ ++#define CST4322_SPROM_PRESENT 1 /* SPROM is present */ ++#define CST4322_OTP_PRESENT 2 /* OTP is present */ ++#define CST4322_PCI_OR_USB 0x00000100 ++#define CST4322_BOOT_MASK 0x00000600 ++#define CST4322_BOOT_SHIFT 9 ++#define CST4322_BOOT_FROM_SRAM 0 /* boot from SRAM, ARM in reset */ ++#define CST4322_BOOT_FROM_ROM 1 /* boot from ROM */ ++#define CST4322_BOOT_FROM_FLASH 2 /* boot from FLASH */ ++#define CST4322_BOOT_FROM_INVALID 3 ++#define CST4322_ILP_DIV_EN 0x00000800 ++#define CST4322_FLASH_TYPE_MASK 0x00001000 ++#define CST4322_FLASH_TYPE_SHIFT 12 ++#define CST4322_FLASH_TYPE_SHIFT_ST 0 /* ST serial FLASH */ ++#define CST4322_FLASH_TYPE_SHIFT_ATMEL 1 /* ATMEL flash */ ++#define CST4322_ARM_TAP_SEL 0x00002000 ++#define CST4322_RES_INIT_MODE_MASK 0x0000c000 ++#define CST4322_RES_INIT_MODE_SHIFT 14 ++#define CST4322_RES_INIT_MODE_ILPAVAIL 0 /* resinitmode: ILP available */ ++#define CST4322_RES_INIT_MODE_ILPREQ 1 /* resinitmode: ILP request */ ++#define CST4322_RES_INIT_MODE_ALPAVAIL 2 /* resinitmode: ALP available */ ++#define CST4322_RES_INIT_MODE_HTAVAIL 3 /* resinitmode: HT available */ ++#define CST4322_PCIPLLCLK_GATING 0x00010000 ++#define CST4322_CLK_SWITCH_PCI_TO_ALP 0x00020000 ++#define CST4322_PCI_CARDBUS_MODE 0x00040000 ++ ++/* 43224 chip-specific ChipControl register bits */ ++#define CCTRL43224_GPIO_TOGGLE 0x8000 /* gpio[3:0] pins as btcoex or s/w gpio */ ++#define CCTRL_43224A0_12MA_LED_DRIVE 0x00F000F0 /* 12 mA drive strength */ ++#define CCTRL_43224B0_12MA_LED_DRIVE 0xF0 /* 12 mA drive strength for later 43224s */ ++ ++/* 43236 resources */ ++#define RES43236_REGULATOR 0 ++#define RES43236_ILP_REQUEST 1 ++#define RES43236_XTAL_PU 2 ++#define RES43236_ALP_AVAIL 3 ++#define RES43236_SI_PLL_ON 4 ++#define RES43236_HT_SI_AVAIL 5 ++ ++/* 43236 chip-specific ChipControl register bits */ ++#define CCTRL43236_BT_COEXIST (1<<0) /* 0 disable */ ++#define CCTRL43236_SECI (1<<1) /* 0 SECI is disabled (JATG functional) */ ++#define CCTRL43236_EXT_LNA (1<<2) /* 0 disable */ ++#define CCTRL43236_ANT_MUX_2o3 (1<<3) /* 2o3 mux, chipcontrol bit 3 */ ++#define CCTRL43236_GSIO (1<<4) /* 0 disable */ ++ ++/* 43236 Chip specific ChipStatus register bits */ ++#define CST43236_SFLASH_MASK 0x00000040 ++#define CST43236_OTP_SEL_MASK 0x00000080 ++#define CST43236_OTP_SEL_SHIFT 7 ++#define CST43236_HSIC_MASK 0x00000100 /* USB/HSIC */ ++#define CST43236_BP_CLK 0x00000200 /* 120/96Mbps */ ++#define CST43236_BOOT_MASK 0x00001800 ++#define CST43236_BOOT_SHIFT 11 ++#define CST43236_BOOT_FROM_SRAM 0 /* boot from SRAM, ARM in reset */ ++#define CST43236_BOOT_FROM_ROM 1 /* boot from ROM */ ++#define CST43236_BOOT_FROM_FLASH 2 /* boot from FLASH */ ++#define CST43236_BOOT_FROM_INVALID 3 ++ ++/* 43237 resources */ ++#define RES43237_REGULATOR 0 ++#define RES43237_ILP_REQUEST 1 ++#define RES43237_XTAL_PU 2 ++#define RES43237_ALP_AVAIL 3 ++#define RES43237_SI_PLL_ON 4 ++#define RES43237_HT_SI_AVAIL 5 ++ ++/* 43237 chip-specific ChipControl register bits */ ++#define CCTRL43237_BT_COEXIST (1<<0) /* 0 disable */ ++#define CCTRL43237_SECI (1<<1) /* 0 SECI is disabled (JATG functional) */ ++#define CCTRL43237_EXT_LNA (1<<2) /* 0 disable */ ++#define CCTRL43237_ANT_MUX_2o3 (1<<3) /* 2o3 mux, chipcontrol bit 3 */ ++#define CCTRL43237_GSIO (1<<4) /* 0 disable */ ++ ++/* 43237 Chip specific ChipStatus register bits */ ++#define CST43237_SFLASH_MASK 0x00000040 ++#define CST43237_OTP_SEL_MASK 0x00000080 ++#define CST43237_OTP_SEL_SHIFT 7 ++#define CST43237_HSIC_MASK 0x00000100 /* USB/HSIC */ ++#define CST43237_BP_CLK 0x00000200 /* 120/96Mbps */ ++#define CST43237_BOOT_MASK 0x00001800 ++#define CST43237_BOOT_SHIFT 11 ++#define CST43237_BOOT_FROM_SRAM 0 /* boot from SRAM, ARM in reset */ ++#define CST43237_BOOT_FROM_ROM 1 /* boot from ROM */ ++#define CST43237_BOOT_FROM_FLASH 2 /* boot from FLASH */ ++#define CST43237_BOOT_FROM_INVALID 3 ++ ++/* 43239 resources */ ++#define RES43239_OTP_PU 9 ++#define RES43239_MACPHY_CLKAVAIL 23 ++#define RES43239_HT_AVAIL 24 ++ ++/* 43239 Chip specific ChipStatus register bits */ ++#define CST43239_SPROM_MASK 0x00000002 ++#define CST43239_SFLASH_MASK 0x00000004 ++#define CST43239_RES_INIT_MODE_SHIFT 7 ++#define CST43239_RES_INIT_MODE_MASK 0x000001f0 ++#define CST43239_CHIPMODE_SDIOD(cs) ((cs) & (1 << 15)) /* SDIO || gSPI */ ++#define CST43239_CHIPMODE_USB20D(cs) (~(cs) & (1 << 15)) /* USB || USBDA */ ++#define CST43239_CHIPMODE_SDIO(cs) (((cs) & (1 << 0)) == 0) /* SDIO */ ++#define CST43239_CHIPMODE_GSPI(cs) (((cs) & (1 << 0)) == (1 << 0)) /* gSPI */ ++ ++/* 4324 resources */ ++#define RES4324_OTP_PU 10 ++#define RES4324_HT_AVAIL 29 ++#define RES4324_MACPHY_CLKAVAIL 30 ++ ++/* 4324 Chip specific ChipStatus register bits */ ++#define CST4324_SPROM_MASK 0x00000080 ++#define CST4324_SFLASH_MASK 0x00400000 ++#define CST4324_RES_INIT_MODE_SHIFT 10 ++#define CST4324_RES_INIT_MODE_MASK 0x00000c00 ++#define CST4324_CHIPMODE_MASK 0x7 ++#define CST4324_CHIPMODE_SDIOD(cs) ((~(cs)) & (1 << 2)) /* SDIO || gSPI */ ++#define CST4324_CHIPMODE_USB20D(cs) (((cs) & CST4324_CHIPMODE_MASK) == 0x6) /* USB || USBDA */ ++ ++/* 4331 resources */ ++#define RES4331_REGULATOR 0 ++#define RES4331_ILP_REQUEST 1 ++#define RES4331_XTAL_PU 2 ++#define RES4331_ALP_AVAIL 3 ++#define RES4331_SI_PLL_ON 4 ++#define RES4331_HT_SI_AVAIL 5 ++ ++/* 4331 chip-specific ChipControl register bits */ ++#define CCTRL4331_BT_COEXIST (1<<0) /* 0 disable */ ++#define CCTRL4331_SECI (1<<1) /* 0 SECI is disabled (JATG functional) */ ++#define CCTRL4331_EXT_LNA_G (1<<2) /* 0 disable */ ++#define CCTRL4331_SPROM_GPIO13_15 (1<<3) /* sprom/gpio13-15 mux */ ++#define CCTRL4331_EXTPA_EN (1<<4) /* 0 ext pa disable, 1 ext pa enabled */ ++#define CCTRL4331_GPIOCLK_ON_SPROMCS (1<<5) /* set drive out GPIO_CLK on sprom_cs pin */ ++#define CCTRL4331_PCIE_MDIO_ON_SPROMCS (1<<6) /* use sprom_cs pin as PCIE mdio interface */ ++#define CCTRL4331_EXTPA_ON_GPIO2_5 (1<<7) /* aband extpa will be at gpio2/5 and sprom_dout */ ++#define CCTRL4331_OVR_PIPEAUXCLKEN (1<<8) /* override core control on pipe_AuxClkEnable */ ++#define CCTRL4331_OVR_PIPEAUXPWRDOWN (1<<9) /* override core control on pipe_AuxPowerDown */ ++#define CCTRL4331_PCIE_AUXCLKEN (1<<10) /* pcie_auxclkenable */ ++#define CCTRL4331_PCIE_PIPE_PLLDOWN (1<<11) /* pcie_pipe_pllpowerdown */ ++#define CCTRL4331_EXTPA_EN2 (1<<12) /* 0 ext pa disable, 1 ext pa enabled */ ++#define CCTRL4331_EXT_LNA_A (1<<13) /* 0 disable */ ++#define CCTRL4331_BT_SHD0_ON_GPIO4 (1<<16) /* enable bt_shd0 at gpio4 */ ++#define CCTRL4331_BT_SHD1_ON_GPIO5 (1<<17) /* enable bt_shd1 at gpio5 */ ++#define CCTRL4331_EXTPA_ANA_EN (1<<24) /* 0 ext pa disable, 1 ext pa enabled */ ++ ++/* 4331 Chip specific ChipStatus register bits */ ++#define CST4331_XTAL_FREQ 0x00000001 /* crystal frequency 20/40Mhz */ ++#define CST4331_SPROM_OTP_SEL_MASK 0x00000006 ++#define CST4331_SPROM_OTP_SEL_SHIFT 1 ++#define CST4331_SPROM_PRESENT 0x00000002 ++#define CST4331_OTP_PRESENT 0x00000004 ++#define CST4331_LDO_RF 0x00000008 ++#define CST4331_LDO_PAR 0x00000010 ++ ++/* 4315 resource */ ++#define RES4315_CBUCK_LPOM 1 /* 0x00000002 */ ++#define RES4315_CBUCK_BURST 2 /* 0x00000004 */ ++#define RES4315_CBUCK_PWM 3 /* 0x00000008 */ ++#define RES4315_CLDO_PU 4 /* 0x00000010 */ ++#define RES4315_PALDO_PU 5 /* 0x00000020 */ ++#define RES4315_ILP_REQUEST 6 /* 0x00000040 */ ++#define RES4315_LNLDO1_PU 9 /* 0x00000200 */ ++#define RES4315_OTP_PU 10 /* 0x00000400 */ ++#define RES4315_LNLDO2_PU 12 /* 0x00001000 */ ++#define RES4315_XTAL_PU 13 /* 0x00002000 */ ++#define RES4315_ALP_AVAIL 14 /* 0x00004000 */ ++#define RES4315_RX_PWRSW_PU 15 /* 0x00008000 */ ++#define RES4315_TX_PWRSW_PU 16 /* 0x00010000 */ ++#define RES4315_RFPLL_PWRSW_PU 17 /* 0x00020000 */ ++#define RES4315_LOGEN_PWRSW_PU 18 /* 0x00040000 */ ++#define RES4315_AFE_PWRSW_PU 19 /* 0x00080000 */ ++#define RES4315_BBPLL_PWRSW_PU 20 /* 0x00100000 */ ++#define RES4315_HT_AVAIL 21 /* 0x00200000 */ ++ ++/* 4315 chip-specific ChipStatus register bits */ ++#define CST4315_SPROM_OTP_SEL_MASK 0x00000003 /* gpio [7:6], SDIO CIS selection */ ++#define CST4315_DEFCIS_SEL 0x00000000 /* use default CIS, OTP is powered up */ ++#define CST4315_SPROM_SEL 0x00000001 /* use SPROM, OTP is powered up */ ++#define CST4315_OTP_SEL 0x00000002 /* use OTP, OTP is powered up */ ++#define CST4315_OTP_PWRDN 0x00000003 /* use SPROM, OTP is powered down */ ++#define CST4315_SDIO_MODE 0x00000004 /* gpio [8], sdio/usb mode */ ++#define CST4315_RCAL_VALID 0x00000008 ++#define CST4315_RCAL_VALUE_MASK 0x000001f0 ++#define CST4315_RCAL_VALUE_SHIFT 4 ++#define CST4315_PALDO_EXTPNP 0x00000200 /* PALDO is configured with external PNP */ ++#define CST4315_CBUCK_MODE_MASK 0x00000c00 ++#define CST4315_CBUCK_MODE_BURST 0x00000400 ++#define CST4315_CBUCK_MODE_LPBURST 0x00000c00 ++ ++/* 4319 resources */ ++#define RES4319_CBUCK_LPOM 1 /* 0x00000002 */ ++#define RES4319_CBUCK_BURST 2 /* 0x00000004 */ ++#define RES4319_CBUCK_PWM 3 /* 0x00000008 */ ++#define RES4319_CLDO_PU 4 /* 0x00000010 */ ++#define RES4319_PALDO_PU 5 /* 0x00000020 */ ++#define RES4319_ILP_REQUEST 6 /* 0x00000040 */ ++#define RES4319_LNLDO1_PU 9 /* 0x00000200 */ ++#define RES4319_OTP_PU 10 /* 0x00000400 */ ++#define RES4319_LNLDO2_PU 12 /* 0x00001000 */ ++#define RES4319_XTAL_PU 13 /* 0x00002000 */ ++#define RES4319_ALP_AVAIL 14 /* 0x00004000 */ ++#define RES4319_RX_PWRSW_PU 15 /* 0x00008000 */ ++#define RES4319_TX_PWRSW_PU 16 /* 0x00010000 */ ++#define RES4319_RFPLL_PWRSW_PU 17 /* 0x00020000 */ ++#define RES4319_LOGEN_PWRSW_PU 18 /* 0x00040000 */ ++#define RES4319_AFE_PWRSW_PU 19 /* 0x00080000 */ ++#define RES4319_BBPLL_PWRSW_PU 20 /* 0x00100000 */ ++#define RES4319_HT_AVAIL 21 /* 0x00200000 */ ++ ++/* 4319 chip-specific ChipStatus register bits */ ++#define CST4319_SPI_CPULESSUSB 0x00000001 ++#define CST4319_SPI_CLK_POL 0x00000002 ++#define CST4319_SPI_CLK_PH 0x00000008 ++#define CST4319_SPROM_OTP_SEL_MASK 0x000000c0 /* gpio [7:6], SDIO CIS selection */ ++#define CST4319_SPROM_OTP_SEL_SHIFT 6 ++#define CST4319_DEFCIS_SEL 0x00000000 /* use default CIS, OTP is powered up */ ++#define CST4319_SPROM_SEL 0x00000040 /* use SPROM, OTP is powered up */ ++#define CST4319_OTP_SEL 0x00000080 /* use OTP, OTP is powered up */ ++#define CST4319_OTP_PWRDN 0x000000c0 /* use SPROM, OTP is powered down */ ++#define CST4319_SDIO_USB_MODE 0x00000100 /* gpio [8], sdio/usb mode */ ++#define CST4319_REMAP_SEL_MASK 0x00000600 ++#define CST4319_ILPDIV_EN 0x00000800 ++#define CST4319_XTAL_PD_POL 0x00001000 ++#define CST4319_LPO_SEL 0x00002000 ++#define CST4319_RES_INIT_MODE 0x0000c000 ++#define CST4319_PALDO_EXTPNP 0x00010000 /* PALDO is configured with external PNP */ ++#define CST4319_CBUCK_MODE_MASK 0x00060000 ++#define CST4319_CBUCK_MODE_BURST 0x00020000 ++#define CST4319_CBUCK_MODE_LPBURST 0x00060000 ++#define CST4319_RCAL_VALID 0x01000000 ++#define CST4319_RCAL_VALUE_MASK 0x3e000000 ++#define CST4319_RCAL_VALUE_SHIFT 25 ++ ++#define PMU1_PLL0_CHIPCTL0 0 ++#define PMU1_PLL0_CHIPCTL1 1 ++#define PMU1_PLL0_CHIPCTL2 2 ++#define CCTL_4319USB_XTAL_SEL_MASK 0x00180000 ++#define CCTL_4319USB_XTAL_SEL_SHIFT 19 ++#define CCTL_4319USB_48MHZ_PLL_SEL 1 ++#define CCTL_4319USB_24MHZ_PLL_SEL 2 ++ ++/* PMU resources for 4336 */ ++#define RES4336_CBUCK_LPOM 0 ++#define RES4336_CBUCK_BURST 1 ++#define RES4336_CBUCK_LP_PWM 2 ++#define RES4336_CBUCK_PWM 3 ++#define RES4336_CLDO_PU 4 ++#define RES4336_DIS_INT_RESET_PD 5 ++#define RES4336_ILP_REQUEST 6 ++#define RES4336_LNLDO_PU 7 ++#define RES4336_LDO3P3_PU 8 ++#define RES4336_OTP_PU 9 ++#define RES4336_XTAL_PU 10 ++#define RES4336_ALP_AVAIL 11 ++#define RES4336_RADIO_PU 12 ++#define RES4336_BG_PU 13 ++#define RES4336_VREG1p4_PU_PU 14 ++#define RES4336_AFE_PWRSW_PU 15 ++#define RES4336_RX_PWRSW_PU 16 ++#define RES4336_TX_PWRSW_PU 17 ++#define RES4336_BB_PWRSW_PU 18 ++#define RES4336_SYNTH_PWRSW_PU 19 ++#define RES4336_MISC_PWRSW_PU 20 ++#define RES4336_LOGEN_PWRSW_PU 21 ++#define RES4336_BBPLL_PWRSW_PU 22 ++#define RES4336_MACPHY_CLKAVAIL 23 ++#define RES4336_HT_AVAIL 24 ++#define RES4336_RSVD 25 ++ ++/* 4336 chip-specific ChipStatus register bits */ ++#define CST4336_SPI_MODE_MASK 0x00000001 ++#define CST4336_SPROM_PRESENT 0x00000002 ++#define CST4336_OTP_PRESENT 0x00000004 ++#define CST4336_ARMREMAP_0 0x00000008 ++#define CST4336_ILPDIV_EN_MASK 0x00000010 ++#define CST4336_ILPDIV_EN_SHIFT 4 ++#define CST4336_XTAL_PD_POL_MASK 0x00000020 ++#define CST4336_XTAL_PD_POL_SHIFT 5 ++#define CST4336_LPO_SEL_MASK 0x00000040 ++#define CST4336_LPO_SEL_SHIFT 6 ++#define CST4336_RES_INIT_MODE_MASK 0x00000180 ++#define CST4336_RES_INIT_MODE_SHIFT 7 ++#define CST4336_CBUCK_MODE_MASK 0x00000600 ++#define CST4336_CBUCK_MODE_SHIFT 9 ++ ++/* 4336 Chip specific PMU ChipControl register bits */ ++#define PCTL_4336_SERIAL_ENAB (1 << 24) ++ ++/* 4330 resources */ ++#define RES4330_CBUCK_LPOM 0 ++#define RES4330_CBUCK_BURST 1 ++#define RES4330_CBUCK_LP_PWM 2 ++#define RES4330_CBUCK_PWM 3 ++#define RES4330_CLDO_PU 4 ++#define RES4330_DIS_INT_RESET_PD 5 ++#define RES4330_ILP_REQUEST 6 ++#define RES4330_LNLDO_PU 7 ++#define RES4330_LDO3P3_PU 8 ++#define RES4330_OTP_PU 9 ++#define RES4330_XTAL_PU 10 ++#define RES4330_ALP_AVAIL 11 ++#define RES4330_RADIO_PU 12 ++#define RES4330_BG_PU 13 ++#define RES4330_VREG1p4_PU_PU 14 ++#define RES4330_AFE_PWRSW_PU 15 ++#define RES4330_RX_PWRSW_PU 16 ++#define RES4330_TX_PWRSW_PU 17 ++#define RES4330_BB_PWRSW_PU 18 ++#define RES4330_SYNTH_PWRSW_PU 19 ++#define RES4330_MISC_PWRSW_PU 20 ++#define RES4330_LOGEN_PWRSW_PU 21 ++#define RES4330_BBPLL_PWRSW_PU 22 ++#define RES4330_MACPHY_CLKAVAIL 23 ++#define RES4330_HT_AVAIL 24 ++#define RES4330_5gRX_PWRSW_PU 25 ++#define RES4330_5gTX_PWRSW_PU 26 ++#define RES4330_5g_LOGEN_PWRSW_PU 27 ++ ++/* 4330 chip-specific ChipStatus register bits */ ++#define CST4330_CHIPMODE_SDIOD(cs) (((cs) & 0x7) < 6) /* SDIO || gSPI */ ++#define CST4330_CHIPMODE_USB20D(cs) (((cs) & 0x7) >= 6) /* USB || USBDA */ ++#define CST4330_CHIPMODE_SDIO(cs) (((cs) & 0x4) == 0) /* SDIO */ ++#define CST4330_CHIPMODE_GSPI(cs) (((cs) & 0x6) == 4) /* gSPI */ ++#define CST4330_CHIPMODE_USB(cs) (((cs) & 0x7) == 6) /* USB packet-oriented */ ++#define CST4330_CHIPMODE_USBDA(cs) (((cs) & 0x7) == 7) /* USB Direct Access */ ++#define CST4330_OTP_PRESENT 0x00000010 ++#define CST4330_LPO_AUTODET_EN 0x00000020 ++#define CST4330_ARMREMAP_0 0x00000040 ++#define CST4330_SPROM_PRESENT 0x00000080 /* takes priority over OTP if both set */ ++#define CST4330_ILPDIV_EN 0x00000100 ++#define CST4330_LPO_SEL 0x00000200 ++#define CST4330_RES_INIT_MODE_SHIFT 10 ++#define CST4330_RES_INIT_MODE_MASK 0x00000c00 ++#define CST4330_CBUCK_MODE_SHIFT 12 ++#define CST4330_CBUCK_MODE_MASK 0x00003000 ++#define CST4330_CBUCK_POWER_OK 0x00004000 ++#define CST4330_BB_PLL_LOCKED 0x00008000 ++#define SOCDEVRAM_BP_ADDR 0x1E000000 ++#define SOCDEVRAM_ARM_ADDR 0x00800000 ++ ++/* 4330 Chip specific PMU ChipControl register bits */ ++#define PCTL_4330_SERIAL_ENAB (1 << 24) ++ ++/* 4330 Chip specific ChipControl register bits */ ++#define CCTRL_4330_GPIO_SEL 0x00000001 /* 1=select GPIOs to be muxed out */ ++#define CCTRL_4330_ERCX_SEL 0x00000002 /* 1=select ERCX BT coex to be muxed out */ ++#define CCTRL_4330_SDIO_HOST_WAKE 0x00000004 /* SDIO: 1=configure GPIO0 for host wake */ ++#define CCTRL_4330_JTAG_DISABLE 0x00000008 /* 1=disable JTAG interface on mux'd pins */ ++ ++/* 4334 resources */ ++#define RES4334_LPLDO_PU 0 ++#define RES4334_RESET_PULLDN_DIS 1 ++#define RES4334_PMU_BG_PU 2 ++#define RES4334_HSIC_LDO_PU 3 ++#define RES4334_CBUCK_LPOM_PU 4 ++#define RES4334_CBUCK_PFM_PU 5 ++#define RES4334_CLDO_PU 6 ++#define RES4334_LPLDO2_LVM 7 ++#define RES4334_LNLDO_PU 8 ++#define RES4334_LDO3P3_PU 9 ++#define RES4334_OTP_PU 10 ++#define RES4334_XTAL_PU 11 ++#define RES4334_WL_PWRSW_PU 12 ++#define RES4334_LQ_AVAIL 13 ++#define RES4334_LOGIC_RET 14 ++#define RES4334_MEM_SLEEP 15 ++#define RES4334_MACPHY_RET 16 ++#define RES4334_WL_CORE_READY 17 ++#define RES4334_ILP_REQ 18 ++#define RES4334_ALP_AVAIL 19 ++#define RES4334_MISC_PWRSW_PU 20 ++#define RES4334_SYNTH_PWRSW_PU 21 ++#define RES4334_RX_PWRSW_PU 22 ++#define RES4334_RADIO_PU 23 ++#define RES4334_WL_PMU_PU 24 ++#define RES4334_VCO_LDO_PU 25 ++#define RES4334_AFE_LDO_PU 26 ++#define RES4334_RX_LDO_PU 27 ++#define RES4334_TX_LDO_PU 28 ++#define RES4334_HT_AVAIL 29 ++#define RES4334_MACPHY_CLK_AVAIL 30 ++ ++/* 4334 chip-specific ChipStatus register bits */ ++#define CST4334_CHIPMODE_MASK 7 ++#define CST4334_SDIO_MODE 0x00000000 ++#define CST4334_SPI_MODE 0x00000004 ++#define CST4334_HSIC_MODE 0x00000006 ++#define CST4334_BLUSB_MODE 0x00000007 ++#define CST4334_CHIPMODE_HSIC(cs) (((cs) & CST4334_CHIPMODE_MASK) == CST4334_HSIC_MODE) ++#define CST4334_OTP_PRESENT 0x00000010 ++#define CST4334_LPO_AUTODET_EN 0x00000020 ++#define CST4334_ARMREMAP_0 0x00000040 ++#define CST4334_SPROM_PRESENT 0x00000080 ++#define CST4334_ILPDIV_EN_MASK 0x00000100 ++#define CST4334_ILPDIV_EN_SHIFT 8 ++#define CST4334_LPO_SEL_MASK 0x00000200 ++#define CST4334_LPO_SEL_SHIFT 9 ++#define CST4334_RES_INIT_MODE_MASK 0x00000C00 ++#define CST4334_RES_INIT_MODE_SHIFT 10 ++ ++/* 4334 Chip specific PMU ChipControl register bits */ ++#define PCTL_4334_GPIO3_ENAB (1 << 3) ++ ++/* 4334 Chip control */ ++#define CCTRL4334_HSIC_LDO_PU (1 << 23) ++ ++/* 4324 Chip specific ChipControl1 register bits */ ++#define CCTRL1_4324_GPIO_SEL (1 << 0) /* 1=select GPIOs to be muxed out */ ++#define CCTRL1_4324_SDIO_HOST_WAKE (1 << 2) /* SDIO: 1=configure GPIO0 for host wake */ ++ ++ ++/* 4313 resources */ ++#define RES4313_BB_PU_RSRC 0 ++#define RES4313_ILP_REQ_RSRC 1 ++#define RES4313_XTAL_PU_RSRC 2 ++#define RES4313_ALP_AVAIL_RSRC 3 ++#define RES4313_RADIO_PU_RSRC 4 ++#define RES4313_BG_PU_RSRC 5 ++#define RES4313_VREG1P4_PU_RSRC 6 ++#define RES4313_AFE_PWRSW_RSRC 7 ++#define RES4313_RX_PWRSW_RSRC 8 ++#define RES4313_TX_PWRSW_RSRC 9 ++#define RES4313_BB_PWRSW_RSRC 10 ++#define RES4313_SYNTH_PWRSW_RSRC 11 ++#define RES4313_MISC_PWRSW_RSRC 12 ++#define RES4313_BB_PLL_PWRSW_RSRC 13 ++#define RES4313_HT_AVAIL_RSRC 14 ++#define RES4313_MACPHY_CLK_AVAIL_RSRC 15 ++ ++/* 4313 chip-specific ChipStatus register bits */ ++#define CST4313_SPROM_PRESENT 1 ++#define CST4313_OTP_PRESENT 2 ++#define CST4313_SPROM_OTP_SEL_MASK 0x00000002 ++#define CST4313_SPROM_OTP_SEL_SHIFT 0 ++ ++/* 4313 Chip specific ChipControl register bits */ ++#define CCTRL_4313_12MA_LED_DRIVE 0x00000007 /* 12 mA drive strengh for later 4313 */ ++ ++/* PMU respources for 4314 */ ++#define RES4314_LPLDO_PU 0 ++#define RES4314_PMU_SLEEP_DIS 1 ++#define RES4314_PMU_BG_PU 2 ++#define RES4314_CBUCK_LPOM_PU 3 ++#define RES4314_CBUCK_PFM_PU 4 ++#define RES4314_CLDO_PU 5 ++#define RES4314_LPLDO2_LVM 6 ++#define RES4314_WL_PMU_PU 7 ++#define RES4314_LNLDO_PU 8 ++#define RES4314_LDO3P3_PU 9 ++#define RES4314_OTP_PU 10 ++#define RES4314_XTAL_PU 11 ++#define RES4314_WL_PWRSW_PU 12 ++#define RES4314_LQ_AVAIL 13 ++#define RES4314_LOGIC_RET 14 ++#define RES4314_MEM_SLEEP 15 ++#define RES4314_MACPHY_RET 16 ++#define RES4314_WL_CORE_READY 17 ++#define RES4314_ILP_REQ 18 ++#define RES4314_ALP_AVAIL 19 ++#define RES4314_MISC_PWRSW_PU 20 ++#define RES4314_SYNTH_PWRSW_PU 21 ++#define RES4314_RX_PWRSW_PU 22 ++#define RES4314_RADIO_PU 23 ++#define RES4314_VCO_LDO_PU 24 ++#define RES4314_AFE_LDO_PU 25 ++#define RES4314_RX_LDO_PU 26 ++#define RES4314_TX_LDO_PU 27 ++#define RES4314_HT_AVAIL 28 ++#define RES4314_MACPHY_CLK_AVAIL 29 ++ ++/* 4314 chip-specific ChipStatus register bits */ ++#define CST4314_OTP_ENABLED 0x00200000 ++ ++/* 43228 resources */ ++#define RES43228_NOT_USED 0 ++#define RES43228_ILP_REQUEST 1 ++#define RES43228_XTAL_PU 2 ++#define RES43228_ALP_AVAIL 3 ++#define RES43228_PLL_EN 4 ++#define RES43228_HT_PHY_AVAIL 5 ++ ++/* 43228 chipstatus reg bits */ ++#define CST43228_ILP_DIV_EN 0x1 ++#define CST43228_OTP_PRESENT 0x2 ++#define CST43228_SERDES_REFCLK_PADSEL 0x4 ++#define CST43228_SDIO_MODE 0x8 ++#define CST43228_SDIO_OTP_PRESENT 0x10 ++#define CST43228_SDIO_RESET 0x20 ++ ++/* 4706 chipstatus reg bits */ ++#define CST4706_PKG_OPTION (1<<0) /* 0: full-featured package 1: low-cost package */ ++#define CST4706_SFLASH_PRESENT (1<<1) /* 0: parallel, 1: serial flash is present */ ++#define CST4706_SFLASH_TYPE (1<<2) /* 0: 8b-p/ST-s flash, 1: 16b-p/Atmal-s flash */ ++#define CST4706_MIPS_BENDIAN (1<<3) /* 0: little, 1: big endian */ ++#define CST4706_PCIE1_DISABLE (1<<5) /* PCIE1 enable strap pin */ ++ ++/* 4706 flashstrconfig reg bits */ ++#define FLSTRCF4706_MASK 0x000000ff ++#define FLSTRCF4706_SF1 0x00000001 /* 2nd serial flash present */ ++#define FLSTRCF4706_PF1 0x00000002 /* 2nd parallel flash present */ ++#define FLSTRCF4706_SF1_TYPE 0x00000004 /* 2nd serial flash type : 0 : ST, 1 : Atmel */ ++#define FLSTRCF4706_NF1 0x00000008 /* 2nd NAND flash present */ ++#define FLSTRCF4706_1ST_MADDR_SEG_MASK 0x000000f0 /* Valid value mask */ ++#define FLSTRCF4706_1ST_MADDR_SEG_4MB 0x00000010 /* 4MB */ ++#define FLSTRCF4706_1ST_MADDR_SEG_8MB 0x00000020 /* 8MB */ ++#define FLSTRCF4706_1ST_MADDR_SEG_16MB 0x00000030 /* 16MB */ ++#define FLSTRCF4706_1ST_MADDR_SEG_32MB 0x00000040 /* 32MB */ ++#define FLSTRCF4706_1ST_MADDR_SEG_64MB 0x00000050 /* 64MB */ ++#define FLSTRCF4706_1ST_MADDR_SEG_128MB 0x00000060 /* 128MB */ ++#define FLSTRCF4706_1ST_MADDR_SEG_256MB 0x00000070 /* 256MB */ ++ ++/* 4360 Chip specific ChipControl register bits */ ++#define CCTRL4360_SECI_MODE (1 << 2) ++#define CCTRL4360_BTSWCTRL_MODE (1 << 3) ++#define CCTRL4360_EXTRA_FEMCTRL_MODE (1 << 8) ++#define CCTRL4360_BT_LGCY_MODE (1 << 9) ++#define CCTRL4360_CORE2FEMCTRL4_ON (1 << 21) ++ ++/* 4360 PMU resources and chip status bits */ ++#define RES4360_REGULATOR 0 ++#define RES4360_ILP_AVAIL 1 ++#define RES4360_ILP_REQ 2 ++#define RES4360_XTAL_PU 3 ++#define RES4360_ALP_AVAIL 4 ++#define RES4360_BBPLLPWRSW_PU 5 ++#define RES4360_HT_AVAIL 6 ++#define RES4360_OTP_PU 7 ++#define RES4360_USBLDO_PU 8 ++#define RES4360_USBPLL_PWRSW_PU 9 ++#define RES4360_LQ_AVAIL 10 ++ ++#define CST4360_XTAL_40MZ 0x00000001 ++#define CST4360_SFLASH 0x00000002 ++#define CST4360_SPROM_PRESENT 0x00000004 ++#define CST4360_SFLASH_TYPE 0x00000004 ++#define CST4360_OTP_ENABLED 0x00000008 ++#define CST4360_REMAP_ROM 0x00000010 ++#define CST4360_RSRC_INIT_MODE_MASK 0x00000060 ++#define CST4360_RSRC_INIT_MODE_SHIFT 5 ++#define CST4360_ILP_DIVEN 0x00000080 ++#define CST4360_MODE_USB 0x00000100 ++#define CST4360_SPROM_SIZE_MASK 0x00000600 ++#define CST4360_SPROM_SIZE_SHIFT 9 ++#define CST4360_BBPLL_LOCK 0x00000800 ++#define CST4360_AVBBPLL_LOCK 0x00001000 ++#define CST4360_USBBBPLL_LOCK 0x00002000 ++ ++#define CCTL_4360_UART_SEL 2 ++ ++/* 4335 resources */ ++#define RES4335_LPLDO_PO 0 ++#define RES4335_PMU_BG_PU 1 ++#define RES4335_PMU_SLEEP 2 ++#define RES4335_RSVD_3 3 ++#define RES4335_CBUCK_LPOM_PU 4 ++#define RES4335_CBUCK_PFM_PU 5 ++#define RES4335_RSVD_6 6 ++#define RES4335_RSVD_7 7 ++#define RES4335_LNLDO_PU 8 ++#define RES4335_XTALLDO_PU 9 ++#define RES4335_LDO3P3_PU 10 ++#define RES4335_OTP_PU 11 ++#define RES4335_XTAL_PU 12 ++#define RES4335_SR_CLK_START 13 ++#define RES4335_LQ_AVAIL 14 ++#define RES4335_LQ_START 15 ++#define RES4335_RSVD_16 16 ++#define RES4335_WL_CORE_RDY 17 ++#define RES4335_ILP_REQ 18 ++#define RES4335_ALP_AVAIL 19 ++#define RES4335_MINI_PMU 20 ++#define RES4335_RADIO_PU 21 ++#define RES4335_SR_CLK_STABLE 22 ++#define RES4335_SR_SAVE_RESTORE 23 ++#define RES4335_SR_PHY_PWRSW 24 ++#define RES4335_SR_VDDM_PWRSW 25 ++#define RES4335_SR_SUBCORE_PWRSW 26 ++#define RES4335_SR_SLEEP 27 ++#define RES4335_HT_START 28 ++#define RES4335_HT_AVAIL 29 ++#define RES4335_MACPHY_CLKAVAIL 30 ++ ++/* 4335 Chip specific ChipStatus register bits */ ++#define CST4335_SPROM_MASK 0x00000020 ++#define CST4335_SFLASH_MASK 0x00000040 ++#define CST4335_RES_INIT_MODE_SHIFT 7 ++#define CST4335_RES_INIT_MODE_MASK 0x00000180 ++#define CST4335_CHIPMODE_MASK 0xF ++#define CST4335_CHIPMODE_SDIOD(cs) (((cs) & (1 << 0)) != 0) /* SDIO */ ++#define CST4335_CHIPMODE_GSPI(cs) (((cs) & (1 << 1)) != 0) /* gSPI */ ++#define CST4335_CHIPMODE_USB20D(cs) (((cs) & (1 << 2)) != 0) /* USB || USBDA */ ++#define CST4335_CHIPMODE_PCIE(cs) (((cs) & (1 << 3)) != 0) /* PCIE */ ++ ++/* 4335 Chip specific ChipControl1 register bits */ ++#define CCTRL1_4335_GPIO_SEL (1 << 0) /* 1=select GPIOs to be muxed out */ ++#define CCTRL1_4335_SDIO_HOST_WAKE (1 << 2) /* SDIO: 1=configure GPIO0 for host wake */ ++ ++ ++#define CR4_RAM_BASE (0x180000) ++#define PATCHTBL_SIZE (0x800) ++ ++ ++/* 4335 resources--END */ ++ ++/* GCI chipcontrol register indices */ ++#define CC_GCI_CHIPCTRL_00 (0) ++#define CC_GCI_CHIPCTRL_01 (1) ++#define CC_GCI_CHIPCTRL_02 (2) ++#define CC_GCI_CHIPCTRL_03 (3) ++#define CC_GCI_CHIPCTRL_04 (4) ++#define CC_GCI_CHIPCTRL_05 (5) ++#define CC_GCI_CHIPCTRL_06 (6) ++#define CC_GCI_CHIPCTRL_07 (7) ++#define CC_GCI_CHIPCTRL_08 (8) ++ ++#define CC_GCI_NUMCHIPCTRLREGS(cap1) ((cap1 & 0xF00) >> 8) ++ ++/* 4335 pins ++* note: only the values set as default/used are added here. ++*/ ++#define CC4335_PIN_GPIO_00 (0) ++#define CC4335_PIN_GPIO_01 (1) ++#define CC4335_PIN_GPIO_02 (2) ++#define CC4335_PIN_GPIO_03 (3) ++#define CC4335_PIN_GPIO_04 (4) ++#define CC4335_PIN_GPIO_05 (5) ++#define CC4335_PIN_GPIO_06 (6) ++#define CC4335_PIN_GPIO_07 (7) ++#define CC4335_PIN_GPIO_08 (8) ++#define CC4335_PIN_GPIO_09 (9) ++#define CC4335_PIN_GPIO_10 (10) ++#define CC4335_PIN_GPIO_11 (11) ++#define CC4335_PIN_GPIO_12 (12) ++#define CC4335_PIN_GPIO_13 (13) ++#define CC4335_PIN_GPIO_14 (14) ++#define CC4335_PIN_GPIO_15 (15) ++#define CC4335_PIN_SDIO_CLK (16) ++#define CC4335_PIN_SDIO_CMD (17) ++#define CC4335_PIN_SDIO_DATA0 (18) ++#define CC4335_PIN_SDIO_DATA1 (19) ++#define CC4335_PIN_SDIO_DATA2 (20) ++#define CC4335_PIN_SDIO_DATA3 (21) ++#define CC4335_PIN_RF_SW_CTRL_0 (22) ++#define CC4335_PIN_RF_SW_CTRL_1 (23) ++#define CC4335_PIN_RF_SW_CTRL_2 (24) ++#define CC4335_PIN_RF_SW_CTRL_3 (25) ++#define CC4335_PIN_RF_SW_CTRL_4 (26) ++#define CC4335_PIN_RF_SW_CTRL_5 (27) ++#define CC4335_PIN_RF_SW_CTRL_6 (28) ++#define CC4335_PIN_RF_SW_CTRL_7 (29) ++#define CC4335_PIN_RF_SW_CTRL_8 (30) ++#define CC4335_PIN_RF_SW_CTRL_9 (31) ++ ++/* 4335 GCI function sel values ++*/ ++#define CC4335_FNSEL_HWDEF (0) ++#define CC4335_FNSEL_SAMEASPIN (1) ++#define CC4335_FNSEL_GPIO0 (2) ++#define CC4335_FNSEL_GPIO1 (3) ++#define CC4335_FNSEL_GCI0 (4) ++#define CC4335_FNSEL_GCI1 (5) ++#define CC4335_FNSEL_UART (6) ++#define CC4335_FNSEL_SFLASH (7) ++#define CC4335_FNSEL_SPROM (8) ++#define CC4335_FNSEL_MISC0 (9) ++#define CC4335_FNSEL_MISC1 (10) ++#define CC4335_FNSEL_MISC2 (11) ++#define CC4335_FNSEL_IND (12) ++#define CC4335_FNSEL_PDN (13) ++#define CC4335_FNSEL_PUP (14) ++#define CC4335_FNSEL_TRI (15) ++ ++/* find the 4 bit mask given the bit position */ ++#define GCIMASK(pos) (((uint32)0xF) << pos) ++ ++/* get the value which can be used to directly OR with chipcontrol reg */ ++#define GCIPOSVAL(val, pos) ((((uint32)val) << pos) & GCIMASK(pos)) ++ ++/* 4335 MUX options. each nibble belongs to a setting. Non-zero value specifies a logic ++* for now only UART for bootloader. ++*/ ++#define MUXENAB4335_UART_MASK (0x0000000f) ++ ++ ++/* defines to detect active host interface in use */ ++#define CHIP_HOSTIF_USB(sih) (si_chip_hostif(sih) & CST4360_MODE_USB) ++ ++/* ++* Maximum delay for the PMU state transition in us. ++* This is an upper bound intended for spinwaits etc. ++*/ ++#define PMU_MAX_TRANSITION_DLY 20000 ++ ++/* PMU resource up transition time in ILP cycles */ ++#define PMURES_UP_TRANSITION 2 ++ ++/* ++* Information from BT to WLAN over eci_inputlo, eci_inputmi & ++* eci_inputhi register. Rev >=21 ++*/ ++/* Fields in eci_inputlo register - [0:31] */ ++#define ECI_INLO_TASKTYPE_MASK 0x0000000f /* [3:0] - 4 bits */ ++#define ECI_INLO_TASKTYPE_SHIFT 0 ++#define ECI_INLO_PKTDUR_MASK 0x000000f0 /* [7:4] - 4 bits */ ++#define ECI_INLO_PKTDUR_SHIFT 4 ++#define ECI_INLO_ROLE_MASK 0x00000100 /* [8] - 1 bits */ ++#define ECI_INLO_ROLE_SHIFT 8 ++#define ECI_INLO_MLP_MASK 0x00000e00 /* [11:9] - 3 bits */ ++#define ECI_INLO_MLP_SHIFT 9 ++#define ECI_INLO_TXPWR_MASK 0x000ff000 /* [19:12] - 8 bits */ ++#define ECI_INLO_TXPWR_SHIFT 12 ++#define ECI_INLO_RSSI_MASK 0x0ff00000 /* [27:20] - 8 bits */ ++#define ECI_INLO_RSSI_SHIFT 20 ++#define ECI_INLO_VAD_MASK 0x10000000 /* [28] - 1 bits */ ++#define ECI_INLO_VAD_SHIFT 28 ++ ++/* ++* Register eci_inputlo bitfield values. ++* - BT packet type information bits [7:0] ++*/ ++/* [3:0] - Task (link) type */ ++#define BT_ACL 0x00 ++#define BT_SCO 0x01 ++#define BT_eSCO 0x02 ++#define BT_A2DP 0x03 ++#define BT_SNIFF 0x04 ++#define BT_PAGE_SCAN 0x05 ++#define BT_INQUIRY_SCAN 0x06 ++#define BT_PAGE 0x07 ++#define BT_INQUIRY 0x08 ++#define BT_MSS 0x09 ++#define BT_PARK 0x0a ++#define BT_RSSISCAN 0x0b ++#define BT_MD_ACL 0x0c ++#define BT_MD_eSCO 0x0d ++#define BT_SCAN_WITH_SCO_LINK 0x0e ++#define BT_SCAN_WITHOUT_SCO_LINK 0x0f ++/* [7:4] = packet duration code */ ++/* [8] - Master / Slave */ ++#define BT_MASTER 0 ++#define BT_SLAVE 1 ++/* [11:9] - multi-level priority */ ++#define BT_LOWEST_PRIO 0x0 ++#define BT_HIGHEST_PRIO 0x3 ++/* [19:12] - BT transmit power */ ++/* [27:20] - BT RSSI */ ++/* [28] - VAD silence */ ++/* [31:29] - Undefined */ ++/* Register eci_inputmi values - [32:63] - none defined */ ++/* [63:32] - Undefined */ ++ ++/* Information from WLAN to BT over eci_output register. */ ++/* Fields in eci_output register - [0:31] */ ++#define ECI48_OUT_MASKMAGIC_HIWORD 0x55550000 ++#define ECI_OUT_CHANNEL_MASK(ccrev) ((ccrev) < 35 ? 0xf : (ECI48_OUT_MASKMAGIC_HIWORD | 0xf000)) ++#define ECI_OUT_CHANNEL_SHIFT(ccrev) ((ccrev) < 35 ? 0 : 12) ++#define ECI_OUT_BW_MASK(ccrev) ((ccrev) < 35 ? 0x70 : (ECI48_OUT_MASKMAGIC_HIWORD | 0xe00)) ++#define ECI_OUT_BW_SHIFT(ccrev) ((ccrev) < 35 ? 4 : 9) ++#define ECI_OUT_ANTENNA_MASK(ccrev) ((ccrev) < 35 ? 0x80 : (ECI48_OUT_MASKMAGIC_HIWORD | 0x100)) ++#define ECI_OUT_ANTENNA_SHIFT(ccrev) ((ccrev) < 35 ? 7 : 8) ++#define ECI_OUT_SIMUL_TXRX_MASK(ccrev) \ ++ ((ccrev) < 35 ? 0x10000 : (ECI48_OUT_MASKMAGIC_HIWORD | 0x80)) ++#define ECI_OUT_SIMUL_TXRX_SHIFT(ccrev) ((ccrev) < 35 ? 16 : 7) ++#define ECI_OUT_FM_DISABLE_MASK(ccrev) \ ++ ((ccrev) < 35 ? 0x40000 : (ECI48_OUT_MASKMAGIC_HIWORD | 0x40)) ++#define ECI_OUT_FM_DISABLE_SHIFT(ccrev) ((ccrev) < 35 ? 18 : 6) ++ ++/* Indicate control of ECI bits between s/w and dot11mac. ++ * 0 => FW control, 1=> MAC/ucode control ++ ++ * Current assignment (ccrev >= 35): ++ * 0 - TxConf (ucode) ++ * 38 - FM disable (wl) ++ * 39 - Allow sim rx (ucode) ++ * 40 - Num antennas (wl) ++ * 43:41 - WLAN channel exclusion BW (wl) ++ * 47:44 - WLAN channel (wl) ++ * ++ * (ccrev < 35) ++ * 15:0 - wl ++ * 16 - ++ * 18 - FM disable ++ * 30 - wl interrupt ++ * 31 - ucode interrupt ++ * others - unassigned (presumed to be with dot11mac/ucode) ++ */ ++#define ECI_MACCTRL_BITS 0xbffb0000 ++#define ECI_MACCTRLLO_BITS 0x1 ++#define ECI_MACCTRLHI_BITS 0xFF ++ ++/* SECI configuration */ ++#define SECI_MODE_UART 0x0 ++#define SECI_MODE_SECI 0x1 ++#define SECI_MODE_LEGACY_3WIRE_BT 0x2 ++#define SECI_MODE_LEGACY_3WIRE_WLAN 0x3 ++#define SECI_MODE_HALF_SECI 0x4 ++ ++#define SECI_RESET (1 << 0) ++#define SECI_RESET_BAR_UART (1 << 1) ++#define SECI_ENAB_SECI_ECI (1 << 2) ++#define SECI_ENAB_SECIOUT_DIS (1 << 3) ++#define SECI_MODE_MASK 0x7 ++#define SECI_MODE_SHIFT 4 /* (bits 5, 6, 7) */ ++#define SECI_UPD_SECI (1 << 7) ++ ++#define SECI_SIGNOFF_0 0xDB ++#define SECI_SIGNOFF_1 0 ++ ++/* seci clk_ctl_st bits */ ++#define CLKCTL_STS_SECI_CLK_REQ (1 << 8) ++#define CLKCTL_STS_SECI_CLK_AVAIL (1 << 24) ++ ++#define SECI_UART_MSR_CTS_STATE (1 << 0) ++#define SECI_UART_MSR_RTS_STATE (1 << 1) ++#define SECI_UART_SECI_IN_STATE (1 << 2) ++#define SECI_UART_SECI_IN2_STATE (1 << 3) ++ ++/* SECI UART LCR/MCR register bits */ ++#define SECI_UART_LCR_STOP_BITS (1 << 0) /* 0 - 1bit, 1 - 2bits */ ++#define SECI_UART_LCR_PARITY_EN (1 << 1) ++#define SECI_UART_LCR_PARITY (1 << 2) /* 0 - odd, 1 - even */ ++#define SECI_UART_LCR_RX_EN (1 << 3) ++#define SECI_UART_LCR_LBRK_CTRL (1 << 4) /* 1 => SECI_OUT held low */ ++#define SECI_UART_LCR_TXO_EN (1 << 5) ++#define SECI_UART_LCR_RTSO_EN (1 << 6) ++#define SECI_UART_LCR_SLIPMODE_EN (1 << 7) ++#define SECI_UART_LCR_RXCRC_CHK (1 << 8) ++#define SECI_UART_LCR_TXCRC_INV (1 << 9) ++#define SECI_UART_LCR_TXCRC_LSBF (1 << 10) ++#define SECI_UART_LCR_TXCRC_EN (1 << 11) ++ ++#define SECI_UART_MCR_TX_EN (1 << 0) ++#define SECI_UART_MCR_PRTS (1 << 1) ++#define SECI_UART_MCR_SWFLCTRL_EN (1 << 2) ++#define SECI_UART_MCR_HIGHRATE_EN (1 << 3) ++#define SECI_UART_MCR_LOOPBK_EN (1 << 4) ++#define SECI_UART_MCR_AUTO_RTS (1 << 5) ++#define SECI_UART_MCR_AUTO_TX_DIS (1 << 6) ++#define SECI_UART_MCR_BAUD_ADJ_EN (1 << 7) ++#define SECI_UART_MCR_XONOFF_RPT (1 << 9) ++ ++/* WLAN channel numbers - used from wifi.h */ ++ ++/* WLAN BW */ ++#define ECI_BW_20 0x0 ++#define ECI_BW_25 0x1 ++#define ECI_BW_30 0x2 ++#define ECI_BW_35 0x3 ++#define ECI_BW_40 0x4 ++#define ECI_BW_45 0x5 ++#define ECI_BW_50 0x6 ++#define ECI_BW_ALL 0x7 ++ ++/* WLAN - number of antenna */ ++#define WLAN_NUM_ANT1 TXANT_0 ++#define WLAN_NUM_ANT2 TXANT_1 ++ ++#endif /* _SBCHIPC_H */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/sbconfig.h b/drivers/net/ethernet/broadcom/gmac/src/include/sbconfig.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/sbconfig.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/sbconfig.h 2017-11-09 17:53:44.005292000 +0800 +@@ -0,0 +1,276 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Broadcom SiliconBackplane hardware register definitions. ++ * ++ * $Id: sbconfig.h 241182 2011-02-17 21:50:03Z $ ++ */ ++ ++#ifndef _SBCONFIG_H ++#define _SBCONFIG_H ++ ++/* cpp contortions to concatenate w/arg prescan */ ++#ifndef PAD ++#define _PADLINE(line) pad ## line ++#define _XSTR(line) _PADLINE(line) ++#define PAD _XSTR(__LINE__) ++#endif ++ ++/* enumeration in SB is based on the premise that cores are contiguos in the ++ * enumeration space. ++ */ ++#define SB_BUS_SIZE 0x10000 /* Each bus gets 64Kbytes for cores */ ++#define SB_BUS_BASE(b) (SI_ENUM_BASE + (b) * SB_BUS_SIZE) ++#define SB_BUS_MAXCORES (SB_BUS_SIZE / SI_CORE_SIZE) /* Max cores per bus */ ++ ++/* ++ * Sonics Configuration Space Registers. ++ */ ++#define SBCONFIGOFF 0xf00 /* core sbconfig regs are top 256bytes of regs */ ++#define SBCONFIGSIZE 256 /* sizeof (sbconfig_t) */ ++ ++#define SBIPSFLAG 0x08 ++#define SBTPSFLAG 0x18 ++#define SBTMERRLOGA 0x48 /* sonics >= 2.3 */ ++#define SBTMERRLOG 0x50 /* sonics >= 2.3 */ ++#define SBADMATCH3 0x60 ++#define SBADMATCH2 0x68 ++#define SBADMATCH1 0x70 ++#define SBIMSTATE 0x90 ++#define SBINTVEC 0x94 ++#define SBTMSTATELOW 0x98 ++#define SBTMSTATEHIGH 0x9c ++#define SBBWA0 0xa0 ++#define SBIMCONFIGLOW 0xa8 ++#define SBIMCONFIGHIGH 0xac ++#define SBADMATCH0 0xb0 ++#define SBTMCONFIGLOW 0xb8 ++#define SBTMCONFIGHIGH 0xbc ++#define SBBCONFIG 0xc0 ++#define SBBSTATE 0xc8 ++#define SBACTCNFG 0xd8 ++#define SBFLAGST 0xe8 ++#define SBIDLOW 0xf8 ++#define SBIDHIGH 0xfc ++ ++/* All the previous registers are above SBCONFIGOFF, but with Sonics 2.3, we have ++ * a few registers *below* that line. I think it would be very confusing to try ++ * and change the value of SBCONFIGOFF, so I'm definig them as absolute offsets here, ++ */ ++ ++#define SBIMERRLOGA 0xea8 ++#define SBIMERRLOG 0xeb0 ++#define SBTMPORTCONNID0 0xed8 ++#define SBTMPORTLOCK0 0xef8 ++ ++#ifndef _LANGUAGE_ASSEMBLY ++ ++typedef volatile struct _sbconfig { ++ uint32 PAD[2]; ++ uint32 sbipsflag; /* initiator port ocp slave flag */ ++ uint32 PAD[3]; ++ uint32 sbtpsflag; /* target port ocp slave flag */ ++ uint32 PAD[11]; ++ uint32 sbtmerrloga; /* (sonics >= 2.3) */ ++ uint32 PAD; ++ uint32 sbtmerrlog; /* (sonics >= 2.3) */ ++ uint32 PAD[3]; ++ uint32 sbadmatch3; /* address match3 */ ++ uint32 PAD; ++ uint32 sbadmatch2; /* address match2 */ ++ uint32 PAD; ++ uint32 sbadmatch1; /* address match1 */ ++ uint32 PAD[7]; ++ uint32 sbimstate; /* initiator agent state */ ++ uint32 sbintvec; /* interrupt mask */ ++ uint32 sbtmstatelow; /* target state */ ++ uint32 sbtmstatehigh; /* target state */ ++ uint32 sbbwa0; /* bandwidth allocation table0 */ ++ uint32 PAD; ++ uint32 sbimconfiglow; /* initiator configuration */ ++ uint32 sbimconfighigh; /* initiator configuration */ ++ uint32 sbadmatch0; /* address match0 */ ++ uint32 PAD; ++ uint32 sbtmconfiglow; /* target configuration */ ++ uint32 sbtmconfighigh; /* target configuration */ ++ uint32 sbbconfig; /* broadcast configuration */ ++ uint32 PAD; ++ uint32 sbbstate; /* broadcast state */ ++ uint32 PAD[3]; ++ uint32 sbactcnfg; /* activate configuration */ ++ uint32 PAD[3]; ++ uint32 sbflagst; /* current sbflags */ ++ uint32 PAD[3]; ++ uint32 sbidlow; /* identification */ ++ uint32 sbidhigh; /* identification */ ++} sbconfig_t; ++ ++#endif /* _LANGUAGE_ASSEMBLY */ ++ ++/* sbipsflag */ ++#define SBIPS_INT1_MASK 0x3f /* which sbflags get routed to mips interrupt 1 */ ++#define SBIPS_INT1_SHIFT 0 ++#define SBIPS_INT2_MASK 0x3f00 /* which sbflags get routed to mips interrupt 2 */ ++#define SBIPS_INT2_SHIFT 8 ++#define SBIPS_INT3_MASK 0x3f0000 /* which sbflags get routed to mips interrupt 3 */ ++#define SBIPS_INT3_SHIFT 16 ++#define SBIPS_INT4_MASK 0x3f000000 /* which sbflags get routed to mips interrupt 4 */ ++#define SBIPS_INT4_SHIFT 24 ++ ++/* sbtpsflag */ ++#define SBTPS_NUM0_MASK 0x3f /* interrupt sbFlag # generated by this core */ ++#define SBTPS_F0EN0 0x40 /* interrupt is always sent on the backplane */ ++ ++/* sbtmerrlog */ ++#define SBTMEL_CM 0x00000007 /* command */ ++#define SBTMEL_CI 0x0000ff00 /* connection id */ ++#define SBTMEL_EC 0x0f000000 /* error code */ ++#define SBTMEL_ME 0x80000000 /* multiple error */ ++ ++/* sbimstate */ ++#define SBIM_PC 0xf /* pipecount */ ++#define SBIM_AP_MASK 0x30 /* arbitration policy */ ++#define SBIM_AP_BOTH 0x00 /* use both timeslaces and token */ ++#define SBIM_AP_TS 0x10 /* use timesliaces only */ ++#define SBIM_AP_TK 0x20 /* use token only */ ++#define SBIM_AP_RSV 0x30 /* reserved */ ++#define SBIM_IBE 0x20000 /* inbanderror */ ++#define SBIM_TO 0x40000 /* timeout */ ++#define SBIM_BY 0x01800000 /* busy (sonics >= 2.3) */ ++#define SBIM_RJ 0x02000000 /* reject (sonics >= 2.3) */ ++ ++/* sbtmstatelow */ ++#define SBTML_RESET 0x0001 /* reset */ ++#define SBTML_REJ_MASK 0x0006 /* reject field */ ++#define SBTML_REJ 0x0002 /* reject */ ++#define SBTML_TMPREJ 0x0004 /* temporary reject, for error recovery */ ++ ++#define SBTML_SICF_SHIFT 16 /* Shift to locate the SI control flags in sbtml */ ++ ++/* sbtmstatehigh */ ++#define SBTMH_SERR 0x0001 /* serror */ ++#define SBTMH_INT 0x0002 /* interrupt */ ++#define SBTMH_BUSY 0x0004 /* busy */ ++#define SBTMH_TO 0x0020 /* timeout (sonics >= 2.3) */ ++ ++#define SBTMH_SISF_SHIFT 16 /* Shift to locate the SI status flags in sbtmh */ ++ ++/* sbbwa0 */ ++#define SBBWA_TAB0_MASK 0xffff /* lookup table 0 */ ++#define SBBWA_TAB1_MASK 0xffff /* lookup table 1 */ ++#define SBBWA_TAB1_SHIFT 16 ++ ++/* sbimconfiglow */ ++#define SBIMCL_STO_MASK 0x7 /* service timeout */ ++#define SBIMCL_RTO_MASK 0x70 /* request timeout */ ++#define SBIMCL_RTO_SHIFT 4 ++#define SBIMCL_CID_MASK 0xff0000 /* connection id */ ++#define SBIMCL_CID_SHIFT 16 ++ ++/* sbimconfighigh */ ++#define SBIMCH_IEM_MASK 0xc /* inband error mode */ ++#define SBIMCH_TEM_MASK 0x30 /* timeout error mode */ ++#define SBIMCH_TEM_SHIFT 4 ++#define SBIMCH_BEM_MASK 0xc0 /* bus error mode */ ++#define SBIMCH_BEM_SHIFT 6 ++ ++/* sbadmatch0 */ ++#define SBAM_TYPE_MASK 0x3 /* address type */ ++#define SBAM_AD64 0x4 /* reserved */ ++#define SBAM_ADINT0_MASK 0xf8 /* type0 size */ ++#define SBAM_ADINT0_SHIFT 3 ++#define SBAM_ADINT1_MASK 0x1f8 /* type1 size */ ++#define SBAM_ADINT1_SHIFT 3 ++#define SBAM_ADINT2_MASK 0x1f8 /* type2 size */ ++#define SBAM_ADINT2_SHIFT 3 ++#define SBAM_ADEN 0x400 /* enable */ ++#define SBAM_ADNEG 0x800 /* negative decode */ ++#define SBAM_BASE0_MASK 0xffffff00 /* type0 base address */ ++#define SBAM_BASE0_SHIFT 8 ++#define SBAM_BASE1_MASK 0xfffff000 /* type1 base address for the core */ ++#define SBAM_BASE1_SHIFT 12 ++#define SBAM_BASE2_MASK 0xffff0000 /* type2 base address for the core */ ++#define SBAM_BASE2_SHIFT 16 ++ ++/* sbtmconfiglow */ ++#define SBTMCL_CD_MASK 0xff /* clock divide */ ++#define SBTMCL_CO_MASK 0xf800 /* clock offset */ ++#define SBTMCL_CO_SHIFT 11 ++#define SBTMCL_IF_MASK 0xfc0000 /* interrupt flags */ ++#define SBTMCL_IF_SHIFT 18 ++#define SBTMCL_IM_MASK 0x3000000 /* interrupt mode */ ++#define SBTMCL_IM_SHIFT 24 ++ ++/* sbtmconfighigh */ ++#define SBTMCH_BM_MASK 0x3 /* busy mode */ ++#define SBTMCH_RM_MASK 0x3 /* retry mode */ ++#define SBTMCH_RM_SHIFT 2 ++#define SBTMCH_SM_MASK 0x30 /* stop mode */ ++#define SBTMCH_SM_SHIFT 4 ++#define SBTMCH_EM_MASK 0x300 /* sb error mode */ ++#define SBTMCH_EM_SHIFT 8 ++#define SBTMCH_IM_MASK 0xc00 /* int mode */ ++#define SBTMCH_IM_SHIFT 10 ++ ++/* sbbconfig */ ++#define SBBC_LAT_MASK 0x3 /* sb latency */ ++#define SBBC_MAX0_MASK 0xf0000 /* maxccntr0 */ ++#define SBBC_MAX0_SHIFT 16 ++#define SBBC_MAX1_MASK 0xf00000 /* maxccntr1 */ ++#define SBBC_MAX1_SHIFT 20 ++ ++/* sbbstate */ ++#define SBBS_SRD 0x1 /* st reg disable */ ++#define SBBS_HRD 0x2 /* hold reg disable */ ++ ++/* sbidlow */ ++#define SBIDL_CS_MASK 0x3 /* config space */ ++#define SBIDL_AR_MASK 0x38 /* # address ranges supported */ ++#define SBIDL_AR_SHIFT 3 ++#define SBIDL_SYNCH 0x40 /* sync */ ++#define SBIDL_INIT 0x80 /* initiator */ ++#define SBIDL_MINLAT_MASK 0xf00 /* minimum backplane latency */ ++#define SBIDL_MINLAT_SHIFT 8 ++#define SBIDL_MAXLAT 0xf000 /* maximum backplane latency */ ++#define SBIDL_MAXLAT_SHIFT 12 ++#define SBIDL_FIRST 0x10000 /* this initiator is first */ ++#define SBIDL_CW_MASK 0xc0000 /* cycle counter width */ ++#define SBIDL_CW_SHIFT 18 ++#define SBIDL_TP_MASK 0xf00000 /* target ports */ ++#define SBIDL_TP_SHIFT 20 ++#define SBIDL_IP_MASK 0xf000000 /* initiator ports */ ++#define SBIDL_IP_SHIFT 24 ++#define SBIDL_RV_MASK 0xf0000000 /* sonics backplane revision code */ ++#define SBIDL_RV_SHIFT 28 ++#define SBIDL_RV_2_2 0x00000000 /* version 2.2 or earlier */ ++#define SBIDL_RV_2_3 0x10000000 /* version 2.3 */ ++ ++/* sbidhigh */ ++#define SBIDH_RC_MASK 0x000f /* revision code */ ++#define SBIDH_RCE_MASK 0x7000 /* revision code extension field */ ++#define SBIDH_RCE_SHIFT 8 ++#define SBCOREREV(sbidh) \ ++ ((((sbidh) & SBIDH_RCE_MASK) >> SBIDH_RCE_SHIFT) | ((sbidh) & SBIDH_RC_MASK)) ++#define SBIDH_CC_MASK 0x8ff0 /* core code */ ++#define SBIDH_CC_SHIFT 4 ++#define SBIDH_VC_MASK 0xffff0000 /* vendor code */ ++#define SBIDH_VC_SHIFT 16 ++ ++#define SB_COMMIT 0xfd8 /* update buffered registers value */ ++ ++/* vendor codes */ ++#define SB_VEND_BCM 0x4243 /* Broadcom's SB vendor code */ ++ ++#endif /* _SBCONFIG_H */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/sbhndarm.h b/drivers/net/ethernet/broadcom/gmac/src/include/sbhndarm.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/sbhndarm.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/sbhndarm.h 2017-11-09 17:53:44.005307000 +0800 +@@ -0,0 +1,293 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Broadcom SiliconBackplane ARM definitions ++ * ++ * $Id: sbhndarm.h 325951 2012-04-05 06:03:27Z $ ++ */ ++ ++#ifndef _sbhndarm_h_ ++#define _sbhndarm_h_ ++ ++#include ++#include ++ ++/* register offsets */ ++#define ARM7_CORECTL 0 ++ ++/* bits in corecontrol */ ++#define ACC_FORCED_RST 0x1 ++#define ACC_SERRINT 0x2 ++#define ACC_NOTSLEEPINGCLKREQ_SHIFT 24 ++ ++/* arm resetlog */ ++#define SBRESETLOG 0x1 ++#define SERRORLOG 0x2 ++ ++/* arm core-specific control flags */ ++#define SICF_REMAP_MSK 0x001c ++#define SICF_REMAP_NONE 0 ++#define SICF_REMAP_ROM 0x0004 ++#define SIFC_REMAP_FLASH 0x0008 ++ ++/* misc core-specific defines */ ++#if defined(__ARM_ARCH_4T__) ++/* arm7tdmi-s */ ++/* backplane related stuff */ ++#define ARM_CORE_ID ARM7S_CORE_ID /* arm coreid */ ++#define SI_ARM_ROM SI_ARM7S_ROM /* ROM backplane/system address */ ++#define SI_ARM_SRAM2 SI_ARM7S_SRAM2 /* RAM backplane address when remap is 1 or 2 */ ++#elif defined(__ARM_ARCH_7M__) ++/* cortex-m3 */ ++/* backplane related stuff */ ++#define ARM_CORE_ID ARMCM3_CORE_ID /* arm coreid */ ++#define SI_ARM_ROM SI_ARMCM3_ROM /* ROM backplane/system address */ ++#define SI_ARM_SRAM2 SI_ARMCM3_SRAM2 /* RAM backplane address when remap is 1 or 2 */ ++/* core registers offsets */ ++#define ARMCM3_CYCLECNT 0x90 /* Cortex-M3 core registers offsets */ ++#define ARMCM3_INTTIMER 0x94 ++#define ARMCM3_INTMASK 0x98 ++#define ARMCM3_INTSTATUS 0x9c ++/* interrupt/exception */ ++#define ARMCM3_NUMINTS 16 /* # of external interrupts */ ++#define ARMCM3_INTALL ((1 << ARMCM3_NUMINTS) - 1) /* Interrupt mask */ ++#define ARMCM3_FAULTMASK 0x40000000 /* Master fault enable/disable */ ++#define ARMCM3_PRIMASK 0x80000000 /* Master interrupt enable/disable */ ++#define ARMCM3_SHARED_INT 0 /* Interrupt shared by multiple cores */ ++#define ARMCM3_INT(i) (1 << (i)) /* Individual interrupt enable/disable */ ++/* compatible with arm7tdmi-s */ ++#define PS_I ARMCM3_PRIMASK ++#define PS_F ARMCM3_FAULTMASK ++/* intmask/intstatus bits */ ++#define ARMCM3_INTMASK_TIMER 0x1 ++#define ARMCM3_INTMASK_SYSRESET 0x4 ++#define ARMCM3_INTMASK_LOCKUP 0x8 ++ ++/* ++ * Overlay Support in Rev 5 ++ */ ++#define ARMCM3_OVL_VALID_SHIFT 0 ++#define ARMCM3_OVL_VALID 1 ++#define ARMCM3_OVL_SZ_SHIFT 1 ++#define ARMCM3_OVL_SZ_MASK 0x0000000e ++#define ARMCM3_OVL_SZ_512B 0 /* 512B */ ++#define ARMCM3_OVL_SZ_1KB 1 /* 1KB */ ++#define ARMCM3_OVL_SZ_2KB 2 /* 2KB */ ++#define ARMCM3_OVL_SZ_4KB 3 /* 4KB */ ++#define ARMCM3_OVL_SZ_8KB 4 /* 8KB */ ++#define ARMCM3_OVL_SZ_16KB 5 /* 16KB */ ++#define ARMCM3_OVL_SZ_32KB 6 /* 32KB */ ++#define ARMCM3_OVL_SZ_64KB 7 /* 64KB */ ++#define ARMCM3_OVL_ADDR_SHIFT 9 ++#define ARMCM3_OVL_ADDR_MASK 0x003FFE00 ++#define ARMCM3_OVL_MAX 16 ++ ++#elif defined(__ARM_ARCH_7R__) ++/* cortex-r4 */ ++/* backplane related stuff */ ++#define ARM_CORE_ID ARMCR4_CORE_ID /* arm coreid */ ++#define SI_ARM_ROM SI_ARMCR4_ROM /* ROM backplane/system address */ ++#define SI_ARM_SRAM2 0x0 /* In the cr4 the RAM is just not available ++ * when remap is 1 ++ */ ++ ++/* core registers offsets */ ++#define ARMCR4_CORECTL 0 ++#define ARMCR4_CORECAP 4 ++#define ARMCR4_COREST 8 ++ ++#define ARMCR4_FIQRSTATUS 0x10 ++#define ARMCR4_FIQMASK 0x14 ++#define ARMCR4_IRQMASK 0x18 ++ ++#define ARMCR4_INTSTATUS 0x20 ++#define ARMCR4_INTMASK 0x24 ++#define ARMCR4_CYCLECNT 0x28 ++#define ARMCR4_INTTIMER 0x2c ++ ++#define ARMCR4_GPIOSEL 0x30 ++#define ARMCR4_GPIOEN 0x34 ++ ++#define ARMCR4_BANKIDX 0x40 ++#define ARMCR4_BANKINFO 0x44 ++#define ARMCR4_BANKSTBY 0x48 ++#define ARMCR4_BANKPDA 0x4c ++ ++#define ARMCR4_TCAMPATCHCTRL 0x68 ++#define ARMCR4_TCAMPATCHTBLBASEADDR 0x6C ++#define ARMCR4_TCAMCMDREG 0x70 ++#define ARMCR4_TCAMDATAREG 0x74 ++#define ARMCR4_TCAMBANKXMASKREG 0x78 ++ ++#define ARMCR4_ROMNB_MASK 0xf00 ++#define ARMCR4_ROMNB_SHIFT 8 ++#define ARMCR4_TCBBNB_MASK 0xf0 ++#define ARMCR4_TCBBNB_SHIFT 4 ++#define ARMCR4_TCBANB_MASK 0xf ++#define ARMCR4_TCBANB_SHIFT 0 ++ ++#define ARMCR4_MT_MASK 0x300 ++#define ARMCR4_MT_SHIFT 8 ++#define ARMCR4_MT_ROM 0x100 ++#define ARMCR4_MT_RAM 0 ++ ++#define ARMCR4_BSZ_MASK 0x3f ++#define ARMCR4_BSZ_MULT 8192 ++ ++#define ARMCR4_TCAM_ENABLE (1 << 31) ++#define ARMCR4_TCAM_CLKENAB (1 << 30) ++#define ARMCR4_TCAM_PATCHCNT_MASK 0xf ++ ++#define ARMCR4_TCAM_CMD_DONE (1 << 31) ++#define ARMCR4_TCAM_MATCH (1 << 24) ++#define ARMCR4_TCAM_OPCODE_MASK (3 << 16) ++#define ARMCR4_TCAM_OPCODE_SHIFT 16 ++#define ARMCR4_TCAM_ADDR_MASK 0xffff ++#define ARMCR4_TCAM_NONE (0 << ARMCR4_TCAM_OPCODE_SHIFT) ++#define ARMCR4_TCAM_READ (1 << ARMCR4_TCAM_OPCODE_SHIFT) ++#define ARMCR4_TCAM_WRITE (2 << ARMCR4_TCAM_OPCODE_SHIFT) ++#define ARMCR4_TCAM_COMPARE (3 << ARMCR4_TCAM_OPCODE_SHIFT) ++#define ARMCR4_TCAM_CMD_DONE_DLY 1000 ++ ++#define ARMCR4_DATA_MASK (~0x7) ++#define ARMCR4_DATA_VALID (1 << 0) ++ ++ ++/* arm core-specific conrol flags */ ++#define SICF_CPUHALT 0x0020 ++#define SICF_UPDATEFW 0x0040 ++ ++/* arm core-specific status flags */ ++#define SISF_SDRENABLE 0x0001 ++#define SISF_TCMPROT 0x0002 ++ ++#define CHIP_SDRENABLE(sih) (sih->boardflags2 & BFL2_SDR_EN) ++#define CHIP_TCMPROTENAB(sih) (si_arm_sflags(sih) & SISF_TCMPROT) ++ ++#elif defined(__ARM_ARCH_7A__) ++/* backplane related stuff */ ++#define ARM_CORE_ID ARMCA9_CORE_ID /* arm coreid */ ++ ++#else /* !__ARM_ARCH_4T__ && !__ARM_ARCH_7M__ && !__ARM_ARCH_7R__ */ ++#error Unrecognized ARM Architecture ++#endif /* !__ARM_ARCH_4T__ && !__ARM_ARCH_7M__ && !__ARM_ARCH_7R__ */ ++ ++#ifndef _LANGUAGE_ASSEMBLY ++ ++/* cpp contortions to concatenate w/arg prescan */ ++#ifndef PAD ++#define _PADLINE(line) pad ## line ++#define _XSTR(line) _PADLINE(line) ++#define PAD _XSTR(__LINE__) ++#endif /* PAD */ ++ ++#if defined(__ARM_ARCH_4T__) ++/* arm7tdmi-s */ ++typedef volatile struct { ++ uint32 corecontrol; /* 0 */ ++ uint32 sleepcontrol; /* 4 */ ++ uint32 PAD; ++ uint32 biststatus; /* 0xc */ ++ uint32 firqstatus; /* 0x10 */ ++ uint32 fiqmask; /* 0x14 */ ++ uint32 irqmask; /* 0x18 */ ++ uint32 PAD; ++ uint32 resetlog; /* 0x20 */ ++ uint32 gpioselect; /* 0x24 */ ++ uint32 gpioenable; /* 0x28 */ ++ uint32 PAD; ++ uint32 bpaddrlo; /* 0x30 */ ++ uint32 bpaddrhi; /* 0x34 */ ++ uint32 bpdata; /* 0x38 */ ++ uint32 bpindaccess; /* 0x3c */ ++ uint32 PAD[104]; ++ uint32 clk_ctl_st; /* 0x1e0 */ ++ uint32 hw_war; /* 0x1e4 */ ++} armregs_t; ++#define ARMREG(regs, reg) (&((armregs_t *)regs)->reg) ++#endif /* __ARM_ARCH_4T__ */ ++ ++#if defined(__ARM_ARCH_7M__) ++/* cortex-m3 */ ++typedef volatile struct { ++ uint32 corecontrol; /* 0x0 */ ++ uint32 corestatus; /* 0x4 */ ++ uint32 PAD[1]; ++ uint32 biststatus; /* 0xc */ ++ uint32 nmiisrst; /* 0x10 */ ++ uint32 nmimask; /* 0x14 */ ++ uint32 isrmask; /* 0x18 */ ++ uint32 PAD[1]; ++ uint32 resetlog; /* 0x20 */ ++ uint32 gpioselect; /* 0x24 */ ++ uint32 gpioenable; /* 0x28 */ ++ uint32 PAD[1]; ++ uint32 bpaddrlo; /* 0x30 */ ++ uint32 bpaddrhi; /* 0x34 */ ++ uint32 bpdata; /* 0x38 */ ++ uint32 bpindaccess; /* 0x3c */ ++ uint32 ovlidx; /* 0x40 */ ++ uint32 ovlmatch; /* 0x44 */ ++ uint32 ovladdr; /* 0x48 */ ++ uint32 PAD[13]; ++ uint32 bwalloc; /* 0x80 */ ++ uint32 PAD[3]; ++ uint32 cyclecnt; /* 0x90 */ ++ uint32 inttimer; /* 0x94 */ ++ uint32 intmask; /* 0x98 */ ++ uint32 intstatus; /* 0x9c */ ++ uint32 PAD[80]; ++ uint32 clk_ctl_st; /* 0x1e0 */ ++} cm3regs_t; ++#define ARMREG(regs, reg) (&((cm3regs_t *)regs)->reg) ++#endif /* __ARM_ARCH_7M__ */ ++ ++#if defined(__ARM_ARCH_7R__) ++/* cortex-R4 */ ++typedef volatile struct { ++ uint32 corecontrol; /* 0x0 */ ++ uint32 corecapabilities; /* 0x4 */ ++ uint32 corestatus; /* 0x8 */ ++ uint32 biststatus; /* 0xc */ ++ uint32 nmiisrst; /* 0x10 */ ++ uint32 nmimask; /* 0x14 */ ++ uint32 isrmask; /* 0x18 */ ++ uint32 PAD[1]; ++ uint32 intstatus; /* 0x20 */ ++ uint32 intmask; /* 0x24 */ ++ uint32 cyclecnt; /* 0x28 */ ++ uint32 inttimer; /* 0x2c */ ++ uint32 gpioselect; /* 0x30 */ ++ uint32 gpioenable; /* 0x34 */ ++ uint32 PAD[2]; ++ uint32 bankidx; /* 0x40 */ ++ uint32 bankinfo; /* 0x44 */ ++ uint32 bankstbyctl; /* 0x48 */ ++ uint32 bankpda; /* 0x4c */ ++ uint32 PAD[6]; ++ uint32 tcampatchctrl; /* 0x68 */ ++ uint32 tcampatchtblbaseaddr; /* 0x6c */ ++ uint32 tcamcmdreg; /* 0x70 */ ++ uint32 tcamdatareg; /* 0x74 */ ++ uint32 tcambankxmaskreg; /* 0x78 */ ++ uint32 PAD[89]; ++ uint32 clk_ctl_st; /* 0x1e0 */ ++} cr4regs_t; ++#define ARMREG(regs, reg) (&((cr4regs_t *)regs)->reg) ++#endif /* __ARM_ARCH_7R__ */ ++ ++#endif /* _LANGUAGE_ASSEMBLY */ ++ ++#endif /* _sbhndarm_h_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/sbhnddma.h b/drivers/net/ethernet/broadcom/gmac/src/include/sbhnddma.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/sbhnddma.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/sbhnddma.h 2017-11-09 17:53:44.006300000 +0800 +@@ -0,0 +1,403 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Generic Broadcom Home Networking Division (HND) DMA engine HW interface ++ * This supports the following chips: BCM42xx, 44xx, 47xx . ++ * ++ * $Id: sbhnddma.h 321146 2012-03-14 08:27:23Z $ ++ */ ++ ++#ifndef _sbhnddma_h_ ++#define _sbhnddma_h_ ++ ++/* DMA structure: ++ * support two DMA engines: 32 bits address or 64 bit addressing ++ * basic DMA register set is per channel(transmit or receive) ++ * a pair of channels is defined for convenience ++ */ ++ ++ ++/* 32 bits addressing */ ++ ++/* dma registers per channel(xmt or rcv) */ ++typedef volatile struct { ++ uint32 control; /* enable, et al */ ++ uint32 addr; /* descriptor ring base address (4K aligned) */ ++ uint32 ptr; /* last descriptor posted to chip */ ++ uint32 status; /* current active descriptor, et al */ ++} dma32regs_t; ++ ++typedef volatile struct { ++ dma32regs_t xmt; /* dma tx channel */ ++ dma32regs_t rcv; /* dma rx channel */ ++} dma32regp_t; ++ ++typedef volatile struct { /* diag access */ ++ uint32 fifoaddr; /* diag address */ ++ uint32 fifodatalow; /* low 32bits of data */ ++ uint32 fifodatahigh; /* high 32bits of data */ ++ uint32 pad; /* reserved */ ++} dma32diag_t; ++ ++/* ++ * DMA Descriptor ++ * Descriptors are only read by the hardware, never written back. ++ */ ++typedef volatile struct { ++ uint32 ctrl; /* misc control bits & bufcount */ ++ uint32 addr; /* data buffer address */ ++} dma32dd_t; ++ ++/* ++ * Each descriptor ring must be 4096byte aligned, and fit within a single 4096byte page. ++ */ ++#define D32RINGALIGN_BITS 12 ++#define D32MAXRINGSZ (1 << D32RINGALIGN_BITS) ++#define D32RINGALIGN (1 << D32RINGALIGN_BITS) ++ ++#define D32MAXDD (D32MAXRINGSZ / sizeof (dma32dd_t)) ++ ++/* transmit channel control */ ++#define XC_XE ((uint32)1 << 0) /* transmit enable */ ++#define XC_SE ((uint32)1 << 1) /* transmit suspend request */ ++#define XC_LE ((uint32)1 << 2) /* loopback enable */ ++#define XC_FL ((uint32)1 << 4) /* flush request */ ++#define XC_MR_MASK 0x000000C0 /* Multiple outstanding reads */ ++#define XC_MR_SHIFT 6 ++#define XC_PD ((uint32)1 << 11) /* parity check disable */ ++#define XC_AE ((uint32)3 << 16) /* address extension bits */ ++#define XC_AE_SHIFT 16 ++#define XC_BL_MASK 0x001C0000 /* BurstLen bits */ ++#define XC_BL_SHIFT 18 ++#define XC_PC_MASK 0x00E00000 /* Prefetch control */ ++#define XC_PC_SHIFT 21 ++#define XC_PT_MASK 0x03000000 /* Prefetch threshold */ ++#define XC_PT_SHIFT 24 ++ ++/* Multiple outstanding reads */ ++#define DMA_MR_1 0 ++#define DMA_MR_2 1 ++/* 2, 3: reserved */ ++ ++/* DMA Burst Length in bytes */ ++#define DMA_BL_16 0 ++#define DMA_BL_32 1 ++#define DMA_BL_64 2 ++#define DMA_BL_128 3 ++#define DMA_BL_256 4 ++#define DMA_BL_512 5 ++#define DMA_BL_1024 6 ++ ++/* Prefetch control */ ++#define DMA_PC_0 0 ++#define DMA_PC_4 1 ++#define DMA_PC_8 2 ++#define DMA_PC_16 3 ++/* others: reserved */ ++ ++/* Prefetch threshold */ ++#define DMA_PT_1 0 ++#define DMA_PT_2 1 ++#define DMA_PT_4 2 ++#define DMA_PT_8 3 ++ ++/* transmit descriptor table pointer */ ++#define XP_LD_MASK 0xfff /* last valid descriptor */ ++ ++/* transmit channel status */ ++#define XS_CD_MASK 0x0fff /* current descriptor pointer */ ++#define XS_XS_MASK 0xf000 /* transmit state */ ++#define XS_XS_SHIFT 12 ++#define XS_XS_DISABLED 0x0000 /* disabled */ ++#define XS_XS_ACTIVE 0x1000 /* active */ ++#define XS_XS_IDLE 0x2000 /* idle wait */ ++#define XS_XS_STOPPED 0x3000 /* stopped */ ++#define XS_XS_SUSP 0x4000 /* suspend pending */ ++#define XS_XE_MASK 0xf0000 /* transmit errors */ ++#define XS_XE_SHIFT 16 ++#define XS_XE_NOERR 0x00000 /* no error */ ++#define XS_XE_DPE 0x10000 /* descriptor protocol error */ ++#define XS_XE_DFU 0x20000 /* data fifo underrun */ ++#define XS_XE_BEBR 0x30000 /* bus error on buffer read */ ++#define XS_XE_BEDA 0x40000 /* bus error on descriptor access */ ++#define XS_AD_MASK 0xfff00000 /* active descriptor */ ++#define XS_AD_SHIFT 20 ++ ++/* receive channel control */ ++#define RC_RE ((uint32)1 << 0) /* receive enable */ ++#define RC_RO_MASK 0xfe /* receive frame offset */ ++#define RC_RO_SHIFT 1 ++#define RC_FM ((uint32)1 << 8) /* direct fifo receive (pio) mode */ ++#define RC_SH ((uint32)1 << 9) /* separate rx header descriptor enable */ ++#define RC_OC ((uint32)1 << 10) /* overflow continue */ ++#define RC_PD ((uint32)1 << 11) /* parity check disable */ ++#define RC_AE ((uint32)3 << 16) /* address extension bits */ ++#define RC_AE_SHIFT 16 ++#define RC_BL_MASK 0x001C0000 /* BurstLen bits */ ++#define RC_BL_SHIFT 18 ++#define RC_PC_MASK 0x00E00000 /* Prefetch control */ ++#define RC_PC_SHIFT 21 ++#define RC_PT_MASK 0x03000000 /* Prefetch threshold */ ++#define RC_PT_SHIFT 24 ++ ++/* receive descriptor table pointer */ ++#define RP_LD_MASK 0xfff /* last valid descriptor */ ++ ++/* receive channel status */ ++#define RS_CD_MASK 0x0fff /* current descriptor pointer */ ++#define RS_RS_MASK 0xf000 /* receive state */ ++#define RS_RS_SHIFT 12 ++#define RS_RS_DISABLED 0x0000 /* disabled */ ++#define RS_RS_ACTIVE 0x1000 /* active */ ++#define RS_RS_IDLE 0x2000 /* idle wait */ ++#define RS_RS_STOPPED 0x3000 /* reserved */ ++#define RS_RE_MASK 0xf0000 /* receive errors */ ++#define RS_RE_SHIFT 16 ++#define RS_RE_NOERR 0x00000 /* no error */ ++#define RS_RE_DPE 0x10000 /* descriptor protocol error */ ++#define RS_RE_DFO 0x20000 /* data fifo overflow */ ++#define RS_RE_BEBW 0x30000 /* bus error on buffer write */ ++#define RS_RE_BEDA 0x40000 /* bus error on descriptor access */ ++#define RS_AD_MASK 0xfff00000 /* active descriptor */ ++#define RS_AD_SHIFT 20 ++ ++/* fifoaddr */ ++#define FA_OFF_MASK 0xffff /* offset */ ++#define FA_SEL_MASK 0xf0000 /* select */ ++#define FA_SEL_SHIFT 16 ++#define FA_SEL_XDD 0x00000 /* transmit dma data */ ++#define FA_SEL_XDP 0x10000 /* transmit dma pointers */ ++#define FA_SEL_RDD 0x40000 /* receive dma data */ ++#define FA_SEL_RDP 0x50000 /* receive dma pointers */ ++#define FA_SEL_XFD 0x80000 /* transmit fifo data */ ++#define FA_SEL_XFP 0x90000 /* transmit fifo pointers */ ++#define FA_SEL_RFD 0xc0000 /* receive fifo data */ ++#define FA_SEL_RFP 0xd0000 /* receive fifo pointers */ ++#define FA_SEL_RSD 0xe0000 /* receive frame status data */ ++#define FA_SEL_RSP 0xf0000 /* receive frame status pointers */ ++ ++/* descriptor control flags */ ++#define CTRL_BC_MASK 0x00001fff /* buffer byte count, real data len must <= 4KB */ ++#define CTRL_AE ((uint32)3 << 16) /* address extension bits */ ++#define CTRL_AE_SHIFT 16 ++#define CTRL_PARITY ((uint32)3 << 18) /* parity bit */ ++#define CTRL_EOT ((uint32)1 << 28) /* end of descriptor table */ ++#define CTRL_IOC ((uint32)1 << 29) /* interrupt on completion */ ++#define CTRL_EOF ((uint32)1 << 30) /* end of frame */ ++#define CTRL_SOF ((uint32)1 << 31) /* start of frame */ ++ ++/* control flags in the range [27:20] are core-specific and not defined here */ ++#define CTRL_CORE_MASK 0x0ff00000 ++ ++/* 64 bits addressing */ ++ ++/* dma registers per channel(xmt or rcv) */ ++typedef volatile struct { ++ uint32 control; /* enable, et al */ ++ uint32 ptr; /* last descriptor posted to chip */ ++ uint32 addrlow; /* descriptor ring base address low 32-bits (8K aligned) */ ++ uint32 addrhigh; /* descriptor ring base address bits 63:32 (8K aligned) */ ++ uint32 status0; /* current descriptor, xmt state */ ++ uint32 status1; /* active descriptor, xmt error */ ++} dma64regs_t; ++ ++typedef volatile struct { ++ dma64regs_t tx; /* dma64 tx channel */ ++ dma64regs_t rx; /* dma64 rx channel */ ++} dma64regp_t; ++ ++typedef volatile struct { /* diag access */ ++ uint32 fifoaddr; /* diag address */ ++ uint32 fifodatalow; /* low 32bits of data */ ++ uint32 fifodatahigh; /* high 32bits of data */ ++ uint32 pad; /* reserved */ ++} dma64diag_t; ++ ++/* ++ * DMA Descriptor ++ * Descriptors are only read by the hardware, never written back. ++ */ ++typedef volatile struct { ++ uint32 ctrl1; /* misc control bits */ ++ uint32 ctrl2; /* buffer count and address extension */ ++ uint32 addrlow; /* memory address of the date buffer, bits 31:0 */ ++ uint32 addrhigh; /* memory address of the date buffer, bits 63:32 */ ++} dma64dd_t; ++ ++/* ++ * Each descriptor ring must be 8kB aligned, and fit within a contiguous 8kB physical addresss. ++ */ ++#define D64RINGALIGN_BITS 13 ++#define D64MAXRINGSZ (1 << D64RINGALIGN_BITS) ++#define D64RINGBOUNDARY (1 << D64RINGALIGN_BITS) ++ ++#define D64MAXDD (D64MAXRINGSZ / sizeof (dma64dd_t)) ++ ++/* for cores with large descriptor ring support, descriptor ring size can be up to 4096 */ ++#define D64MAXDD_LARGE ((1 << 16) / sizeof (dma64dd_t)) ++ ++/* for cores with large descriptor ring support (4k descriptors), descriptor ring cannot cross ++ * 64K boundary ++ */ ++#define D64RINGBOUNDARY_LARGE (1 << 16) ++ ++/* ++ * Default DMA Burstlen values for USBRev >= 12 and SDIORev >= 11. ++ * When this field contains the value N, the burst length is 2**(N + 4) bytes. ++ */ ++#define D64_DEF_USBBURSTLEN 2 ++#define D64_DEF_SDIOBURSTLEN 1 ++ ++ ++#ifndef D64_USBBURSTLEN ++#define D64_USBBURSTLEN DMA_BL_64 ++#endif ++#ifndef D64_SDIOBURSTLEN ++#define D64_SDIOBURSTLEN DMA_BL_32 ++#endif ++ ++/* transmit channel control */ ++#define D64_XC_XE 0x00000001 /* transmit enable */ ++#define D64_XC_SE 0x00000002 /* transmit suspend request */ ++#define D64_XC_LE 0x00000004 /* loopback enable */ ++#define D64_XC_FL 0x00000010 /* flush request */ ++#define D64_XC_MR_MASK 0x000000C0 /* Multiple outstanding reads */ ++#define D64_XC_MR_SHIFT 6 ++#define D64_XC_PD 0x00000800 /* parity check disable */ ++#define D64_XC_AE 0x00030000 /* address extension bits */ ++#define D64_XC_AE_SHIFT 16 ++#define D64_XC_BL_MASK 0x001C0000 /* BurstLen bits */ ++#define D64_XC_BL_SHIFT 18 ++#define D64_XC_PC_MASK 0x00E00000 /* Prefetch control */ ++#define D64_XC_PC_SHIFT 21 ++#define D64_XC_PT_MASK 0x03000000 /* Prefetch threshold */ ++#define D64_XC_PT_SHIFT 24 ++ ++/* transmit descriptor table pointer */ ++#define D64_XP_LD_MASK 0x00001fff /* last valid descriptor */ ++ ++/* transmit channel status */ ++#define D64_XS0_CD_MASK 0x00001fff /* current descriptor pointer */ ++#define D64_XS0_XS_MASK 0xf0000000 /* transmit state */ ++#define D64_XS0_XS_SHIFT 28 ++#define D64_XS0_XS_DISABLED 0x00000000 /* disabled */ ++#define D64_XS0_XS_ACTIVE 0x10000000 /* active */ ++#define D64_XS0_XS_IDLE 0x20000000 /* idle wait */ ++#define D64_XS0_XS_STOPPED 0x30000000 /* stopped */ ++#define D64_XS0_XS_SUSP 0x40000000 /* suspend pending */ ++ ++#define D64_XS1_AD_MASK 0x00001fff /* active descriptor */ ++#define D64_XS1_XE_MASK 0xf0000000 /* transmit errors */ ++#define D64_XS1_XE_SHIFT 28 ++#define D64_XS1_XE_NOERR 0x00000000 /* no error */ ++#define D64_XS1_XE_DPE 0x10000000 /* descriptor protocol error */ ++#define D64_XS1_XE_DFU 0x20000000 /* data fifo underrun */ ++#define D64_XS1_XE_DTE 0x30000000 /* data transfer error */ ++#define D64_XS1_XE_DESRE 0x40000000 /* descriptor read error */ ++#define D64_XS1_XE_COREE 0x50000000 /* core error */ ++ ++/* receive channel control */ ++#define D64_RC_RE 0x00000001 /* receive enable */ ++#define D64_RC_RO_MASK 0x000000fe /* receive frame offset */ ++#define D64_RC_RO_SHIFT 1 ++#define D64_RC_FM 0x00000100 /* direct fifo receive (pio) mode */ ++#define D64_RC_SH 0x00000200 /* separate rx header descriptor enable */ ++#define D64_RC_OC 0x00000400 /* overflow continue */ ++#define D64_RC_PD 0x00000800 /* parity check disable */ ++#define D64_RC_GE 0x00004000 /* Glom enable */ ++#define D64_RC_AE 0x00030000 /* address extension bits */ ++#define D64_RC_AE_SHIFT 16 ++#define D64_RC_BL_MASK 0x001C0000 /* BurstLen bits */ ++#define D64_RC_BL_SHIFT 18 ++#define D64_RC_PC_MASK 0x00E00000 /* Prefetch control */ ++#define D64_RC_PC_SHIFT 21 ++#define D64_RC_PT_MASK 0x03000000 /* Prefetch threshold */ ++#define D64_RC_PT_SHIFT 24 ++ ++/* flags for dma controller */ ++#define DMA_CTRL_PEN (1 << 0) /* partity enable */ ++#define DMA_CTRL_ROC (1 << 1) /* rx overflow continue */ ++#define DMA_CTRL_RXMULTI (1 << 2) /* allow rx scatter to multiple descriptors */ ++#define DMA_CTRL_UNFRAMED (1 << 3) /* Unframed Rx/Tx data */ ++#define DMA_CTRL_USB_BOUNDRY4KB_WAR (1 << 4) ++#define DMA_CTRL_DMA_AVOIDANCE_WAR (1 << 5) /* DMA avoidance WAR for 4331 */ ++#define DMA_CTRL_RXSINGLE (1 << 6) /* always single buffer */ ++ ++/* receive descriptor table pointer */ ++#define D64_RP_LD_MASK 0x00001fff /* last valid descriptor */ ++ ++/* receive channel status */ ++#define D64_RS0_CD_MASK 0x00001fff /* current descriptor pointer */ ++#define D64_RS0_RS_MASK 0xf0000000 /* receive state */ ++#define D64_RS0_RS_SHIFT 28 ++#define D64_RS0_RS_DISABLED 0x00000000 /* disabled */ ++#define D64_RS0_RS_ACTIVE 0x10000000 /* active */ ++#define D64_RS0_RS_IDLE 0x20000000 /* idle wait */ ++#define D64_RS0_RS_STOPPED 0x30000000 /* stopped */ ++#define D64_RS0_RS_SUSP 0x40000000 /* suspend pending */ ++ ++#define D64_RS1_AD_MASK 0x0001ffff /* active descriptor */ ++#define D64_RS1_RE_MASK 0xf0000000 /* receive errors */ ++#define D64_RS1_RE_SHIFT 28 ++#define D64_RS1_RE_NOERR 0x00000000 /* no error */ ++#define D64_RS1_RE_DPO 0x10000000 /* descriptor protocol error */ ++#define D64_RS1_RE_DFU 0x20000000 /* data fifo overflow */ ++#define D64_RS1_RE_DTE 0x30000000 /* data transfer error */ ++#define D64_RS1_RE_DESRE 0x40000000 /* descriptor read error */ ++#define D64_RS1_RE_COREE 0x50000000 /* core error */ ++ ++/* fifoaddr */ ++#define D64_FA_OFF_MASK 0xffff /* offset */ ++#define D64_FA_SEL_MASK 0xf0000 /* select */ ++#define D64_FA_SEL_SHIFT 16 ++#define D64_FA_SEL_XDD 0x00000 /* transmit dma data */ ++#define D64_FA_SEL_XDP 0x10000 /* transmit dma pointers */ ++#define D64_FA_SEL_RDD 0x40000 /* receive dma data */ ++#define D64_FA_SEL_RDP 0x50000 /* receive dma pointers */ ++#define D64_FA_SEL_XFD 0x80000 /* transmit fifo data */ ++#define D64_FA_SEL_XFP 0x90000 /* transmit fifo pointers */ ++#define D64_FA_SEL_RFD 0xc0000 /* receive fifo data */ ++#define D64_FA_SEL_RFP 0xd0000 /* receive fifo pointers */ ++#define D64_FA_SEL_RSD 0xe0000 /* receive frame status data */ ++#define D64_FA_SEL_RSP 0xf0000 /* receive frame status pointers */ ++ ++/* descriptor control flags 1 */ ++#define D64_CTRL_COREFLAGS 0x0ff00000 /* core specific flags */ ++#define D64_CTRL1_EOT ((uint32)1 << 28) /* end of descriptor table */ ++#define D64_CTRL1_IOC ((uint32)1 << 29) /* interrupt on completion */ ++#define D64_CTRL1_EOF ((uint32)1 << 30) /* end of frame */ ++#define D64_CTRL1_SOF ((uint32)1 << 31) /* start of frame */ ++ ++/* descriptor control flags 2 */ ++#define D64_CTRL2_BC_MASK 0x00007fff /* buffer byte count. real data len must <= 16KB */ ++#define D64_CTRL2_AE 0x00030000 /* address extension bits */ ++#define D64_CTRL2_AE_SHIFT 16 ++#define D64_CTRL2_PARITY 0x00040000 /* parity bit */ ++ ++/* control flags in the range [27:20] are core-specific and not defined here */ ++#define D64_CTRL_CORE_MASK 0x0ff00000 ++ ++#define D64_RX_FRM_STS_LEN 0x0000ffff /* frame length mask */ ++#define D64_RX_FRM_STS_OVFL 0x00800000 /* RxOverFlow */ ++#define D64_RX_FRM_STS_DSCRCNT 0x0f000000 /* no. of descriptors used - 1, d11corerev >= 22 */ ++#define D64_RX_FRM_STS_DATATYPE 0xf0000000 /* core-dependent data type */ ++ ++/* receive frame status */ ++typedef volatile struct { ++ uint16 len; ++ uint16 flags; ++} dma_rxh_t; ++ ++#endif /* _sbhnddma_h_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/sbsocram.h b/drivers/net/ethernet/broadcom/gmac/src/include/sbsocram.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/sbsocram.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/sbsocram.h 2017-11-09 17:53:44.007303000 +0800 +@@ -0,0 +1,193 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * BCM47XX Sonics SiliconBackplane embedded ram core ++ * ++ * $Id: sbsocram.h 271781 2011-07-13 20:00:06Z $ ++ */ ++ ++#ifndef _SBSOCRAM_H ++#define _SBSOCRAM_H ++ ++#ifndef _LANGUAGE_ASSEMBLY ++ ++/* cpp contortions to concatenate w/arg prescan */ ++#ifndef PAD ++#define _PADLINE(line) pad ## line ++#define _XSTR(line) _PADLINE(line) ++#define PAD _XSTR(__LINE__) ++#endif /* PAD */ ++ ++/* Memcsocram core registers */ ++typedef volatile struct sbsocramregs { ++ uint32 coreinfo; ++ uint32 bwalloc; ++ uint32 extracoreinfo; ++ uint32 biststat; ++ uint32 bankidx; ++ uint32 standbyctrl; ++ ++ uint32 errlogstatus; /* rev 6 */ ++ uint32 errlogaddr; /* rev 6 */ ++ /* used for patching rev 3 & 5 */ ++ uint32 cambankidx; ++ uint32 cambankstandbyctrl; ++ uint32 cambankpatchctrl; ++ uint32 cambankpatchtblbaseaddr; ++ uint32 cambankcmdreg; ++ uint32 cambankdatareg; ++ uint32 cambankmaskreg; ++ uint32 PAD[1]; ++ uint32 bankinfo; /* corev 8 */ ++ uint32 PAD[15]; ++ uint32 extmemconfig; ++ uint32 extmemparitycsr; ++ uint32 extmemparityerrdata; ++ uint32 extmemparityerrcnt; ++ uint32 extmemwrctrlandsize; ++ uint32 PAD[84]; ++ uint32 workaround; ++ uint32 pwrctl; /* corerev >= 2 */ ++ uint32 PAD[133]; ++ uint32 sr_control; /* corerev >= 15 */ ++ uint32 sr_status; /* corerev >= 15 */ ++ uint32 sr_address; /* corerev >= 15 */ ++ uint32 sr_data; /* corerev >= 15 */ ++} sbsocramregs_t; ++ ++#endif /* _LANGUAGE_ASSEMBLY */ ++ ++/* Register offsets */ ++#define SR_COREINFO 0x00 ++#define SR_BWALLOC 0x04 ++#define SR_BISTSTAT 0x0c ++#define SR_BANKINDEX 0x10 ++#define SR_BANKSTBYCTL 0x14 ++#define SR_PWRCTL 0x1e8 ++ ++/* Coreinfo register */ ++#define SRCI_PT_MASK 0x00070000 /* corerev >= 6; port type[18:16] */ ++#define SRCI_PT_SHIFT 16 ++/* port types : SRCI_PT__ */ ++#define SRCI_PT_OCP_OCP 0 ++#define SRCI_PT_AXI_OCP 1 ++#define SRCI_PT_ARM7AHB_OCP 2 ++#define SRCI_PT_CM3AHB_OCP 3 ++#define SRCI_PT_AXI_AXI 4 ++#define SRCI_PT_AHB_AXI 5 ++/* corerev >= 3 */ ++#define SRCI_LSS_MASK 0x00f00000 ++#define SRCI_LSS_SHIFT 20 ++#define SRCI_LRS_MASK 0x0f000000 ++#define SRCI_LRS_SHIFT 24 ++ ++/* In corerev 0, the memory size is 2 to the power of the ++ * base plus 16 plus to the contents of the memsize field plus 1. ++ */ ++#define SRCI_MS0_MASK 0xf ++#define SR_MS0_BASE 16 ++ ++/* ++ * In corerev 1 the bank size is 2 ^ the bank size field plus 14, ++ * the memory size is number of banks times bank size. ++ * The same applies to rom size. ++ */ ++#define SRCI_ROMNB_MASK 0xf000 ++#define SRCI_ROMNB_SHIFT 12 ++#define SRCI_ROMBSZ_MASK 0xf00 ++#define SRCI_ROMBSZ_SHIFT 8 ++#define SRCI_SRNB_MASK 0xf0 ++#define SRCI_SRNB_SHIFT 4 ++#define SRCI_SRBSZ_MASK 0xf ++#define SRCI_SRBSZ_SHIFT 0 ++ ++#define SR_BSZ_BASE 14 ++ ++/* Standby control register */ ++#define SRSC_SBYOVR_MASK 0x80000000 ++#define SRSC_SBYOVR_SHIFT 31 ++#define SRSC_SBYOVRVAL_MASK 0x60000000 ++#define SRSC_SBYOVRVAL_SHIFT 29 ++#define SRSC_SBYEN_MASK 0x01000000 /* rev >= 3 */ ++#define SRSC_SBYEN_SHIFT 24 ++ ++/* Power control register */ ++#define SRPC_PMU_STBYDIS_MASK 0x00000010 /* rev >= 3 */ ++#define SRPC_PMU_STBYDIS_SHIFT 4 ++#define SRPC_STBYOVRVAL_MASK 0x00000008 ++#define SRPC_STBYOVRVAL_SHIFT 3 ++#define SRPC_STBYOVR_MASK 0x00000007 ++#define SRPC_STBYOVR_SHIFT 0 ++ ++/* Extra core capability register */ ++#define SRECC_NUM_BANKS_MASK 0x000000F0 ++#define SRECC_NUM_BANKS_SHIFT 4 ++#define SRECC_BANKSIZE_MASK 0x0000000F ++#define SRECC_BANKSIZE_SHIFT 0 ++ ++#define SRECC_BANKSIZE(value) (1 << (value)) ++ ++/* CAM bank patch control */ ++#define SRCBPC_PATCHENABLE 0x80000000 ++ ++#define SRP_ADDRESS 0x0001FFFC ++#define SRP_VALID 0x8000 ++ ++/* CAM bank command reg */ ++#define SRCMD_WRITE 0x00020000 ++#define SRCMD_READ 0x00010000 ++#define SRCMD_DONE 0x80000000 ++ ++#define SRCMD_DONE_DLY 1000 ++ ++/* bankidx and bankinfo reg defines corerev >= 8 */ ++#define SOCRAM_BANKINFO_SZMASK 0x7f ++#define SOCRAM_BANKIDX_ROM_MASK 0x100 ++ ++#define SOCRAM_BANKIDX_MEMTYPE_SHIFT 8 ++/* socram bankinfo memtype */ ++#define SOCRAM_MEMTYPE_RAM 0 ++#define SOCRAM_MEMTYPE_R0M 1 ++#define SOCRAM_MEMTYPE_DEVRAM 2 ++ ++#define SOCRAM_BANKINFO_REG 0x40 ++#define SOCRAM_BANKIDX_REG 0x10 ++#define SOCRAM_BANKINFO_STDBY_MASK 0x400 ++#define SOCRAM_BANKINFO_STDBY_TIMER 0x800 ++ ++/* bankinfo rev >= 10 */ ++#define SOCRAM_BANKINFO_DEVRAMSEL_SHIFT 13 ++#define SOCRAM_BANKINFO_DEVRAMSEL_MASK 0x2000 ++#define SOCRAM_BANKINFO_DEVRAMPRO_SHIFT 14 ++#define SOCRAM_BANKINFO_DEVRAMPRO_MASK 0x4000 ++#define SOCRAM_BANKINFO_SLPSUPP_SHIFT 15 ++#define SOCRAM_BANKINFO_SLPSUPP_MASK 0x8000 ++#define SOCRAM_BANKINFO_RETNTRAM_SHIFT 16 ++#define SOCRAM_BANKINFO_RETNTRAM_MASK 0x00010000 ++#define SOCRAM_BANKINFO_PDASZ_SHIFT 17 ++#define SOCRAM_BANKINFO_PDASZ_MASK 0x003E0000 ++#define SOCRAM_BANKINFO_DEVRAMREMAP_SHIFT 24 ++#define SOCRAM_BANKINFO_DEVRAMREMAP_MASK 0x01000000 ++ ++/* extracoreinfo register */ ++#define SOCRAM_DEVRAMBANK_MASK 0xF000 ++#define SOCRAM_DEVRAMBANK_SHIFT 12 ++ ++/* bank info to calculate bank size */ ++#define SOCRAM_BANKINFO_SZBASE 8192 ++#define SOCRAM_BANKSIZE_SHIFT 13 /* SOCRAM_BANKINFO_SZBASE */ ++ ++ ++#endif /* _SBSOCRAM_H */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/sgmiiplus2_serdes.h b/drivers/net/ethernet/broadcom/gmac/src/include/sgmiiplus2_serdes.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/sgmiiplus2_serdes.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/sgmiiplus2_serdes.h 2017-11-09 17:53:44.008297000 +0800 +@@ -0,0 +1,28 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * These routines provide access to the serdes ++ * ++ */ ++ ++#ifndef _SGMIIPLUS2_SERDES_H_ ++#define _SGMIIPLUS2_SERDES_H_ ++ ++#include ++ ++extern void sgmiiplus2_serdes_reset(uint eth_num, uint phyaddr); ++extern int sgmiiplus2_serdes_init(uint eth_num, uint phyaddr); ++ ++#endif /* _SGMIIPLUS2_SERDES_H_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/siutils.h b/drivers/net/ethernet/broadcom/gmac/src/include/siutils.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/siutils.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/siutils.h 2017-11-09 17:53:44.009295000 +0800 +@@ -0,0 +1,243 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Misc utility routines for accessing the SOC Interconnects ++ * of Broadcom HNBU chips. ++ * ++ * $Id: siutils.h 323456 2012-03-24 07:17:39Z $ ++ */ ++ ++#ifndef _siutils_h_ ++#define _siutils_h_ ++ ++#if defined(WLC_HIGH) && !defined(WLC_LOW) ++#include "bcm_rpc.h" ++#endif ++/* ++ * Data structure to export all chip specific common variables ++ * public (read-only) portion of siutils handle returned by si_attach() ++ */ ++struct si_pub { ++ uint socitype; /* SOCI_SB, SOCI_AI */ ++ ++ uint bustype; /* SI_BUS, PCI_BUS */ ++ uint buscoretype; /* PCI_CORE_ID, PCIE_CORE_ID, PCMCIA_CORE_ID */ ++ uint buscorerev; /* buscore rev */ ++ uint buscoreidx; /* buscore index */ ++ int ccrev; /* chip common core rev */ ++ uint32 cccaps; /* chip common capabilities */ ++ uint32 cccaps_ext; /* chip common capabilities extension */ ++ int pmurev; /* pmu core rev */ ++ uint32 pmucaps; /* pmu capabilities */ ++ uint boardtype; /* board type */ ++ uint boardrev; /* board rev */ ++ uint boardvendor; /* board vendor */ ++ uint boardflags; /* board flags */ ++ uint boardflags2; /* board flags2 */ ++ uint chip; /* chip number */ ++ uint chiprev; /* chip revision */ ++ uint chippkg; /* chip package option */ ++ uint32 chipst; /* chip status */ ++ bool issim; /* chip is in simulation or emulation */ ++ uint socirev; /* SOC interconnect rev */ ++ bool pci_pr32414; ++ spinlock_t sih_lock; ++ ++#if defined(WLC_HIGH) && !defined(WLC_LOW) ++ rpc_info_t *rpc; ++#endif ++}; ++ ++/* for HIGH_ONLY driver, the si_t must be writable to allow states sync from BMAC to HIGH driver ++ * for monolithic driver, it is readonly to prevent accident change ++ */ ++#if defined(WLC_HIGH) && !defined(WLC_LOW) ++typedef struct si_pub si_t; ++#else ++typedef const struct si_pub si_t; ++#endif ++ ++#ifdef ATE_BUILD ++typedef struct _ate_params { ++ void* wl; ++ uint8 gpio_input; ++ uint8 gpio_output; ++ bool cmd_proceed; ++ uint16 cmd_idx; ++ bool ate_cmd_done; ++} ate_params_t; ++#endif /* ATE_BUILD */ ++ ++/* ++ * Many of the routines below take an 'sih' handle as their first arg. ++ * Allocate this by calling si_attach(). Free it by calling si_detach(). ++ * At any one time, the sih is logically focused on one particular si core ++ * (the "current core"). ++ * Use si_setcore() or si_setcoreidx() to change the association to another core. ++ */ ++#define BADIDX (SI_MAXCORES + 1) ++ ++/* clkctl xtal what flags */ ++#define XTAL 0x1 /* primary crystal oscillator (2050) */ ++#define PLL 0x2 /* main chip pll */ ++ ++/* clkctl clk mode */ ++#define CLK_FAST 0 /* force fast (pll) clock */ ++#define CLK_DYNAMIC 2 /* enable dynamic clock control */ ++ ++/* GPIO usage priorities */ ++#define GPIO_DRV_PRIORITY 0 /* Driver */ ++#define GPIO_APP_PRIORITY 1 /* Application */ ++#define GPIO_HI_PRIORITY 2 /* Highest priority. Ignore GPIO reservation */ ++ ++/* GPIO pull up/down */ ++#define GPIO_PULLUP 0 ++#define GPIO_PULLDN 1 ++ ++/* GPIO event regtype */ ++#define GPIO_REGEVT 0 /* GPIO register event */ ++#define GPIO_REGEVT_INTMSK 1 /* GPIO register event int mask */ ++#define GPIO_REGEVT_INTPOL 2 /* GPIO register event int polarity */ ++ ++/* device path */ ++#define SI_DEVPATH_BUFSZ 16 /* min buffer size in bytes */ ++ ++/* SI routine enumeration: to be used by update function with multiple hooks */ ++#define SI_DOATTACH 1 ++#define SI_PCIDOWN 2 ++#define SI_PCIUP 3 ++ ++#if defined(BCMQT) ++#define ISSIM_ENAB(sih) ((sih)->issim) ++#else ++#define ISSIM_ENAB(sih) 0 ++#endif ++ ++/* PMU clock/power control */ ++#if defined(BCMPMUCTL) ++#define PMUCTL_ENAB(sih) (BCMPMUCTL) ++#else ++#define PMUCTL_ENAB(sih) ((sih)->cccaps & CC_CAP_PMU) ++#endif ++ ++/* chipcommon clock/power control (exclusive with PMU's) */ ++#if defined(BCMPMUCTL) && BCMPMUCTL ++#define CCCTL_ENAB(sih) (0) ++#define CCPLL_ENAB(sih) (0) ++#else ++#define CCCTL_ENAB(sih) ((sih)->cccaps & CC_CAP_PWR_CTL) ++#define CCPLL_ENAB(sih) ((sih)->cccaps & CC_CAP_PLL_MASK) ++#endif ++ ++typedef void (*gpio_handler_t)(uint32 stat, void *arg); ++/* External BT Coex enable mask */ ++#define CC_BTCOEX_EN_MASK 0x01 ++/* External PA enable mask */ ++#define GPIO_CTRL_EPA_EN_MASK 0x40 ++/* WL/BT control enable mask */ ++#define GPIO_CTRL_5_6_EN_MASK 0x60 ++#define GPIO_CTRL_7_6_EN_MASK 0xC0 ++#define GPIO_OUT_7_EN_MASK 0x80 ++ ++ ++/* === exported functions === */ ++extern si_t *si_attach(uint pcidev, osl_t *osh, void *regs, uint bustype, ++ void *sdh, char **vars, uint *varsz); ++extern void si_detach(si_t *sih); ++ ++extern uint si_corelist(si_t *sih, uint coreid[]); ++extern uint si_coreid(si_t *sih); ++extern uint si_flag(si_t *sih); ++extern uint si_intflag(si_t *sih); ++extern uint si_coreidx(si_t *sih); ++extern uint si_coreunit(si_t *sih); ++extern uint si_corevendor(si_t *sih); ++extern uint si_corerev(si_t *sih); ++extern void *si_osh(si_t *sih); ++extern void si_setosh(si_t *sih, osl_t *osh); ++extern uint si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val); ++extern void *si_coreregs(si_t *sih); ++extern uint si_wrapperreg(si_t *sih, uint32 offset, uint32 mask, uint32 val); ++extern uint32 si_core_cflags(si_t *sih, uint32 mask, uint32 val); ++extern void si_core_cflags_wo(si_t *sih, uint32 mask, uint32 val); ++extern uint32 si_core_sflags(si_t *sih, uint32 mask, uint32 val); ++#ifdef WLC_HIGH_ONLY ++extern bool wlc_bmac_iscoreup(si_t *sih); ++#define si_iscoreup(sih) wlc_bmac_iscoreup(sih) ++#else ++extern bool si_iscoreup(si_t *sih); ++#endif /* __CONFIG_USBAP__ */ ++extern uint si_findcoreidx(si_t *sih, uint coreid, uint coreunit); ++extern void *si_setcoreidx(si_t *sih, uint coreidx); ++extern void *si_setcore(si_t *sih, uint coreid, uint coreunit); ++extern void *si_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val); ++extern void si_restore_core(si_t *sih, uint coreid, uint intr_val); ++extern int si_numaddrspaces(si_t *sih); ++extern uint32 si_addrspace(si_t *sih, uint asidx); ++extern uint32 si_addrspacesize(si_t *sih, uint asidx); ++extern void si_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size); ++extern int si_corebist(si_t *sih); ++extern void si_core_reset(si_t *sih, uint32 bits, uint32 resetbits); ++extern void si_core_disable(si_t *sih, uint32 bits); ++extern uint32 si_clock_rate(uint32 pll_type, uint32 n, uint32 m); ++extern uint32 si_clock(si_t *sih); ++extern void si_setint(si_t *sih, int siflag); ++extern bool si_backplane64(si_t *sih); ++extern void si_clkctl_init(si_t *sih); ++extern bool si_clkctl_cc(si_t *sih, uint mode); ++extern int si_clkctl_xtal(si_t *sih, uint what, bool on); ++ ++extern uint32 si_gpioouten(si_t *sih, uint32 mask, uint32 val, uint8 priority); ++extern uint32 si_gpioout(si_t *sih, uint32 mask, uint32 val, uint8 priority); ++ ++/* Wake-on-wireless-LAN (WOWL) */ ++extern bool si_pci_pmecap(si_t *sih); ++struct osl_info; ++extern bool si_pci_fastpmecap(struct osl_info *osh); ++ ++/* Fab-id information */ ++#define DEFAULT_FAB 0x0 /* Original/first fab used for this chip */ ++#define CSM_FAB7 0x1 /* CSM Fab7 chip */ ++#define TSMC_FAB12 0x2 /* TSMC Fab12/Fab14 chip */ ++#define SMIC_FAB4 0x3 /* SMIC Fab4 chip */ ++ ++/* ++ * Build device path. Path size must be >= SI_DEVPATH_BUFSZ. ++ * The returned path is NULL terminated and has trailing '/'. ++ * Return 0 on success, nonzero otherwise. ++ */ ++extern int si_devpath(si_t *sih, char *path, int size); ++/* Read variable with prepending the devpath to the name */ ++extern int si_getdevpathintvar(si_t *sih, const char *name); ++extern char *si_coded_devpathvar(si_t *sih, char *varname, int var_len, const char *name); ++ ++ ++extern void si_war42780_clkreq(si_t *sih, bool clkreq); ++extern void si_pcie_extendL1timer(si_t *sih, bool extend); ++ ++/* === debug routines === */ ++ ++#ifdef BCMDBG ++extern void si_view(si_t *sih, bool verbose); ++extern void si_viewall(si_t *sih, bool verbose); ++#endif ++ ++#if defined(BCMDBG) ++struct bcmstrbuf; ++extern void si_dumpregs(si_t *sih, struct bcmstrbuf *b); ++#endif ++ ++ ++#endif /* _siutils_h_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/trxhdr.h b/drivers/net/ethernet/broadcom/gmac/src/include/trxhdr.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/trxhdr.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/trxhdr.h 2017-11-09 17:53:44.010293000 +0800 +@@ -0,0 +1,86 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * TRX image file header format. ++ * ++ * $Id: trxhdr.h 314841 2012-02-14 18:28:33Z $ ++ */ ++ ++#ifndef _TRX_HDR_H ++#define _TRX_HDR_H ++ ++#include ++ ++#define TRX_MAGIC 0x30524448 /* "HDR0" */ ++#define TRX_MAX_LEN 0x3B0000 /* Max length */ ++#define TRX_NO_HEADER 1 /* Do not write TRX header */ ++#define TRX_GZ_FILES 0x2 /* Contains up to TRX_MAX_OFFSET individual gzip files */ ++#define TRX_EMBED_UCODE 0x8 /* Trx contains embedded ucode image */ ++#define TRX_ROMSIM_IMAGE 0x10 /* Trx contains ROM simulation image */ ++#define TRX_UNCOMP_IMAGE 0x20 /* Trx contains uncompressed rtecdc.bin image */ ++#define TRX_BOOTLOADER 0x40 /* the image is a bootloader */ ++ ++#define TRX_V1 1 ++#define TRX_V1_MAX_OFFSETS 3 /* V1: Max number of individual files */ ++ ++#ifndef BCMTRXV2 ++#define TRX_VERSION TRX_V1 /* Version 1 */ ++#define TRX_MAX_OFFSET TRX_V1_MAX_OFFSETS ++#endif ++ ++/* BMAC Host driver/application like bcmdl need to support both Ver 1 as well as ++ * Ver 2 of trx header. To make it generic, trx_header is structure is modified ++ * as below where size of "offsets" field will vary as per the TRX version. ++ * Currently, BMAC host driver and bcmdl are modified to support TRXV2 as well. ++ * To make sure, other applications like "dhdl" which are yet to be enhanced to support ++ * TRXV2 are not broken, new macro and structure defintion take effect only when BCMTRXV2 ++ * is defined. ++ */ ++struct trx_header { ++ uint32 magic; /* "HDR0" */ ++ uint32 len; /* Length of file including header */ ++ uint32 crc32; /* 32-bit CRC from flag_version to end of file */ ++ uint32 flag_version; /* 0:15 flags, 16:31 version */ ++#ifndef BCMTRXV2 ++ uint32 offsets[TRX_MAX_OFFSET]; /* Offsets of partitions from start of header */ ++#else ++ uint32 offsets[1]; /* Offsets of partitions from start of header */ ++#endif ++}; ++ ++#ifdef BCMTRXV2 ++#define TRX_VERSION TRX_V2 /* Version 2 */ ++#define TRX_MAX_OFFSET TRX_V2_MAX_OFFSETS ++ ++#define TRX_V2 2 ++/* V2: Max number of individual files ++ * To support SDR signature + Config data region ++ */ ++#define TRX_V2_MAX_OFFSETS 5 ++#define SIZEOF_TRXHDR_V1 (sizeof(struct trx_header)+(TRX_V1_MAX_OFFSETS-1)*sizeof(uint32)) ++#define SIZEOF_TRXHDR_V2 (sizeof(struct trx_header)+(TRX_V2_MAX_OFFSETS-1)*sizeof(uint32)) ++#define TRX_VER(trx) (trx->flag_version>>16) ++#define ISTRX_V1(trx) (TRX_VER(trx) == TRX_V1) ++#define ISTRX_V2(trx) (TRX_VER(trx) == TRX_V2) ++/* For V2, return size of V2 size: others, return V1 size */ ++#define SIZEOF_TRX(trx) (ISTRX_V2(trx) ? SIZEOF_TRXHDR_V2: SIZEOF_TRXHDR_V1) ++#else ++#define SIZEOF_TRX(trx) (sizeof(struct trx_header)) ++#endif /* BCMTRXV2 */ ++ ++/* Compatibility */ ++typedef struct trx_header TRXHDR, *PTRXHDR; ++ ++#endif /* _TRX_HDR_H */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/typedefs.h b/drivers/net/ethernet/broadcom/gmac/src/include/typedefs.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/typedefs.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/typedefs.h 2017-11-09 17:53:44.011290000 +0800 +@@ -0,0 +1,447 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * $Id: typedefs.h 286783 2011-09-29 06:18:57Z $ ++ */ ++ ++#ifndef _TYPEDEFS_H_ ++#define _TYPEDEFS_H_ ++ ++#ifdef SITE_TYPEDEFS ++ ++/* ++ * Define SITE_TYPEDEFS in the compile to include a site-specific ++ * typedef file "site_typedefs.h". ++ * ++ * If SITE_TYPEDEFS is not defined, then the code section below makes ++ * inferences about the compile environment based on defined symbols and ++ * possibly compiler pragmas. ++ * ++ * Following these two sections is the Default Typedefs section. ++ * This section is only processed if USE_TYPEDEF_DEFAULTS is ++ * defined. This section has a default set of typedefs and a few ++ * preprocessor symbols (TRUE, FALSE, NULL, ...). ++ */ ++ ++#include "site_typedefs.h" ++ ++#else ++ ++/* ++ * Infer the compile environment based on preprocessor symbols and pragmas. ++ * Override type definitions as needed, and include configuration-dependent ++ * header files to define types. ++ */ ++ ++#ifdef __cplusplus ++ ++#define TYPEDEF_BOOL ++#ifndef FALSE ++#define FALSE false ++#endif ++#ifndef TRUE ++#define TRUE true ++#endif ++ ++#else /* ! __cplusplus */ ++ ++#if defined(_WIN32) ++ ++#define TYPEDEF_BOOL ++typedef unsigned char bool; /* consistent w/BOOL */ ++ ++#endif /* _WIN32 */ ++ ++#endif /* ! __cplusplus */ ++ ++#if defined(_WIN64) && !defined(EFI) ++/* use the Windows ULONG_PTR type when compiling for 64 bit */ ++#include ++#define TYPEDEF_UINTPTR ++typedef ULONG_PTR uintptr; ++#elif defined(__x86_64__) ++#define TYPEDEF_UINTPTR ++typedef unsigned long long int uintptr; ++#endif ++ ++ ++#if defined(_MINOSL_) ++#define _NEED_SIZE_T_ ++#endif ++ ++#if defined(EFI) && !defined(_WIN64) ++#define _NEED_SIZE_T_ ++#endif ++ ++#if defined(TARGETOS_nucleus) ++/* for 'size_t' type */ ++#include ++ ++/* float_t types conflict with the same typedefs from the standard ANSI-C ++** math.h header file. Don't re-typedef them here. ++*/ ++#define TYPEDEF_FLOAT_T ++#endif /* TARGETOS_nucleus */ ++ ++#if defined(_NEED_SIZE_T_) ++typedef long unsigned int size_t; ++#endif ++ ++#ifdef _MSC_VER /* Microsoft C */ ++#define TYPEDEF_INT64 ++#define TYPEDEF_UINT64 ++typedef signed __int64 int64; ++typedef unsigned __int64 uint64; ++#endif ++ ++#if defined(MACOSX) ++#define TYPEDEF_BOOL ++#endif ++ ++#if defined(__NetBSD__) ++#define TYPEDEF_BOOL ++#ifndef _KERNEL ++#include ++#endif ++#define TYPEDEF_UINT ++#define TYPEDEF_USHORT ++#define TYPEDEF_ULONG ++#endif /* defined(__NetBSD__) */ ++ ++#if defined(__sparc__) ++#define TYPEDEF_ULONG ++#endif ++ ++ ++#ifdef linux ++/* ++ * If this is either a Linux hybrid build or the per-port code of a hybrid build ++ * then use the Linux header files to get some of the typedefs. Otherwise, define ++ * them entirely in this file. We can't always define the types because we get ++ * a duplicate typedef error; there is no way to "undefine" a typedef. ++ * We know when it's per-port code because each file defines LINUX_PORT at the top. ++ */ ++#if !defined(LINUX_HYBRID) || defined(LINUX_PORT) ++#define TYPEDEF_UINT ++#ifndef TARGETENV_android ++#define TYPEDEF_USHORT ++#define TYPEDEF_ULONG ++#endif /* TARGETENV_android */ ++#ifdef __KERNEL__ ++#include ++#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 19)) ++#define TYPEDEF_BOOL ++#endif /* >= 2.6.19 */ ++/* special detection for 2.6.18-128.7.1.0.1.el5 */ ++#if (LINUX_VERSION_CODE == KERNEL_VERSION(2, 6, 18)) ++#include ++#ifdef noinline_for_stack ++#define TYPEDEF_BOOL ++#endif ++#endif /* == 2.6.18 */ ++#endif /* __KERNEL__ */ ++#endif /* !defined(LINUX_HYBRID) || defined(LINUX_PORT) */ ++#endif /* linux */ ++ ++#if defined(__ECOS) ++#define TYPEDEF_UCHAR ++#define TYPEDEF_UINT ++#define TYPEDEF_USHORT ++#define TYPEDEF_ULONG ++#define TYPEDEF_BOOL ++#endif ++ ++#if !defined(linux) && !defined(_WIN32) && !defined(_CFE_) && !defined(_MINOSL_) && \ ++ !defined(__DJGPP__) && !defined(__ECOS) && !defined(__BOB__) && \ ++ !defined(TARGETOS_nucleus) && !defined(EFI) && !defined(__FreeBSD__) ++#define TYPEDEF_UINT ++#define TYPEDEF_USHORT ++#endif ++ ++ ++/* Do not support the (u)int64 types with strict ansi for GNU C */ ++#if defined(__GNUC__) && defined(__STRICT_ANSI__) ++#define TYPEDEF_INT64 ++#define TYPEDEF_UINT64 ++#endif ++ ++/* ICL accepts unsigned 64 bit type only, and complains in ANSI mode ++ * for signed or unsigned ++ */ ++#if defined(__ICL) ++ ++#define TYPEDEF_INT64 ++ ++#if defined(__STDC__) ++#define TYPEDEF_UINT64 ++#endif ++ ++#endif /* __ICL */ ++ ++#if !defined(_WIN32) && !defined(_CFE_) && !defined(_MINOSL_) && !defined(__DJGPP__) && \ ++ !defined(__BOB__) && !defined(TARGETOS_nucleus) && !defined(EFI) ++ ++/* pick up ushort & uint from standard types.h */ ++#if defined(linux) && defined(__KERNEL__) ++ ++/* See note above */ ++#if !defined(LINUX_HYBRID) || defined(LINUX_PORT) ++#ifdef USER_MODE ++#include ++#else ++#include /* sys/types.h and linux/types.h are oil and water */ ++#endif /* USER_MODE */ ++#endif /* !defined(LINUX_HYBRID) || defined(LINUX_PORT) */ ++ ++#else ++ ++#if defined(__ECOS) ++#include ++#include ++#include ++#endif ++ ++#include ++ ++#endif /* linux && __KERNEL__ */ ++ ++#endif ++ ++#ifdef CONFIG_CPU_BIG_ENDIAN ++#define IL_BIGENDIAN ++#else ++#ifdef IL_BIGENDIAN ++#error "IL_BIGENDIAN was defined for a little-endian compile" ++#endif ++#endif /* CONFIG_CPU_BIG_ENDIAN */ ++ ++#if defined(MACOSX) ++ ++#ifdef __BIG_ENDIAN__ ++#define IL_BIGENDIAN ++#else ++#ifdef IL_BIGENDIAN ++#error "IL_BIGENDIAN was defined for a little-endian compile" ++#endif ++#endif /* __BIG_ENDIAN__ */ ++ ++#if !defined(__cplusplus) ++ ++#if defined(__i386__) ++typedef unsigned char bool; ++#else ++typedef unsigned int bool; ++#endif ++#define TYPE_BOOL 1 ++enum { ++ false = 0, ++ true = 1 ++}; ++ ++#if defined(KERNEL) ++#include ++#endif /* KERNEL */ ++ ++#endif /* __cplusplus */ ++ ++#endif /* MACOSX */ ++ ++ ++/* use the default typedefs in the next section of this file */ ++#define USE_TYPEDEF_DEFAULTS ++ ++#endif /* SITE_TYPEDEFS */ ++ ++ ++/* ++ * Default Typedefs ++ */ ++ ++#ifdef USE_TYPEDEF_DEFAULTS ++#undef USE_TYPEDEF_DEFAULTS ++ ++#ifndef TYPEDEF_BOOL ++typedef /* @abstract@ */ unsigned char bool; ++#endif ++ ++/* define uchar, ushort, uint, ulong */ ++ ++#ifndef TYPEDEF_UCHAR ++typedef unsigned char uchar; ++#endif ++ ++#ifndef TYPEDEF_USHORT ++typedef unsigned short ushort; ++#endif ++ ++#ifndef TYPEDEF_UINT ++typedef unsigned int uint; ++#endif ++ ++#ifndef TYPEDEF_ULONG ++typedef unsigned long ulong; ++#endif ++ ++/* define [u]int8/16/32/64, uintptr */ ++ ++#ifndef TYPEDEF_UINT8 ++typedef unsigned char uint8; ++#endif ++ ++#ifndef TYPEDEF_UINT16 ++typedef unsigned short uint16; ++#endif ++ ++#ifndef TYPEDEF_UINT32 ++typedef unsigned int uint32; ++#endif ++ ++#ifndef TYPEDEF_UINT64 ++typedef unsigned long long uint64; ++#endif ++ ++#ifndef TYPEDEF_UINTPTR ++typedef unsigned int uintptr; ++#endif ++ ++#ifndef TYPEDEF_INT8 ++typedef signed char int8; ++#endif ++ ++#ifndef TYPEDEF_INT16 ++typedef signed short int16; ++#endif ++ ++#ifndef TYPEDEF_INT32 ++typedef signed int int32; ++#endif ++ ++#ifndef TYPEDEF_INT64 ++typedef signed long long int64; ++#endif ++ ++/* define float32/64, float_t */ ++ ++#ifndef TYPEDEF_FLOAT32 ++typedef float float32; ++#endif ++ ++#ifndef TYPEDEF_FLOAT64 ++typedef double float64; ++#endif ++ ++/* ++ * abstracted floating point type allows for compile time selection of ++ * single or double precision arithmetic. Compiling with -DFLOAT32 ++ * selects single precision; the default is double precision. ++ */ ++ ++#ifndef TYPEDEF_FLOAT_T ++ ++#if defined(FLOAT32) ++typedef float32 float_t; ++#else /* default to double precision floating point */ ++typedef float64 float_t; ++#endif ++ ++#endif /* TYPEDEF_FLOAT_T */ ++ ++/* define macro values */ ++ ++#ifndef FALSE ++#define FALSE 0 ++#endif ++ ++#ifndef TRUE ++#define TRUE 1 /* TRUE */ ++#endif ++ ++#ifndef NULL ++#define NULL 0 ++#endif ++ ++#ifndef OFF ++#define OFF 0 ++#endif ++ ++#ifndef ON ++#define ON 1 /* ON = 1 */ ++#endif ++ ++#define AUTO (-1) /* Auto = -1 */ ++ ++/* define PTRSZ, INLINE */ ++ ++#ifndef PTRSZ ++#define PTRSZ sizeof(char*) ++#endif ++ ++ ++/* Detect compiler type. */ ++#ifdef _MSC_VER ++ #define BWL_COMPILER_MICROSOFT ++#elif defined(__GNUC__) || defined(__lint) ++ #define BWL_COMPILER_GNU ++#elif defined(__CC_ARM) && __CC_ARM ++ #define BWL_COMPILER_ARMCC ++#else ++ #error "Unknown compiler!" ++#endif /* _MSC_VER */ ++ ++ ++#ifndef INLINE ++ #if defined(BWL_COMPILER_MICROSOFT) ++ #define INLINE __inline ++ #elif defined(BWL_COMPILER_GNU) ++ #define INLINE __inline__ ++ #elif defined(BWL_COMPILER_ARMCC) ++ #define INLINE __inline ++ #else ++ #define INLINE ++ #endif /* _MSC_VER */ ++#endif /* INLINE */ ++ ++#undef TYPEDEF_BOOL ++#undef TYPEDEF_UCHAR ++#undef TYPEDEF_USHORT ++#undef TYPEDEF_UINT ++#undef TYPEDEF_ULONG ++#undef TYPEDEF_UINT8 ++#undef TYPEDEF_UINT16 ++#undef TYPEDEF_UINT32 ++#undef TYPEDEF_UINT64 ++#undef TYPEDEF_UINTPTR ++#undef TYPEDEF_INT8 ++#undef TYPEDEF_INT16 ++#undef TYPEDEF_INT32 ++#undef TYPEDEF_INT64 ++#undef TYPEDEF_FLOAT32 ++#undef TYPEDEF_FLOAT64 ++#undef TYPEDEF_FLOAT_T ++ ++#endif /* USE_TYPEDEF_DEFAULTS */ ++ ++/* Suppress unused parameter warning */ ++#define UNUSED_PARAMETER(x) (void)(x) ++ ++/* Avoid warning for discarded const or volatile qualifier in special cases (-Wcast-qual) */ ++#define DISCARD_QUAL(ptr, type) ((type *)(uintptr)(ptr)) ++ ++/* ++ * Including the bcmdefs.h here, to make sure everyone including typedefs.h ++ * gets this automatically ++*/ ++#include ++#endif /* _TYPEDEFS_H_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/include/wlioctl.h b/drivers/net/ethernet/broadcom/gmac/src/include/wlioctl.h +--- a/drivers/net/ethernet/broadcom/gmac/src/include/wlioctl.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/include/wlioctl.h 2017-11-09 17:53:44.016288000 +0800 +@@ -0,0 +1,4877 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Custom OID/ioctl definitions for ++ * Broadcom 802.11abg Networking Device Driver ++ * ++ * Definitions subject to change without notice. ++ * ++ * $Id: wlioctl.h 324203 2012-03-28 09:55:17Z $ ++ */ ++ ++#ifndef _wlioctl_h_ ++#define _wlioctl_h_ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++#ifdef __NetBSD__ ++/* NetBSD 2.0 does not have SIOCDEVPRIVATE. */ ++#define SIOCDEVPRIVATE _IOWR('i', 139, struct ifreq) ++#endif ++ ++#ifndef INTF_NAME_SIZ ++#define INTF_NAME_SIZ 16 ++#endif ++ ++/* Used to send ioctls over the transport pipe */ ++typedef struct remote_ioctl { ++ cdc_ioctl_t msg; ++ uint data_len; ++#ifndef OLYMPIC_RWL ++ char intf_name[INTF_NAME_SIZ]; ++#endif ++} rem_ioctl_t; ++#define REMOTE_SIZE sizeof(rem_ioctl_t) ++#ifdef EFI ++#define BCMWL_IOCTL_GUID \ ++ {0xB4910A35, 0x88C5, 0x4328, { 0x90, 0x08, 0x9F, 0xB2, 0x00, 0x00, 0x0, 0x0 } } ++#endif /* EFI */ ++ ++#define ACTION_FRAME_SIZE 1800 ++ ++typedef struct wl_action_frame { ++ struct ether_addr da; ++ uint16 len; ++ uint32 packetId; ++ uint8 data[ACTION_FRAME_SIZE]; ++} wl_action_frame_t; ++ ++#define WL_WIFI_ACTION_FRAME_SIZE sizeof(struct wl_action_frame) ++ ++typedef struct ssid_info ++{ ++ uint8 ssid_len; /* the length of SSID */ ++ uint8 ssid[32]; /* SSID string */ ++} ssid_info_t; ++ ++typedef struct wl_af_params { ++ uint32 channel; ++ int32 dwell_time; ++ struct ether_addr BSSID; ++ wl_action_frame_t action_frame; ++} wl_af_params_t; ++ ++#define WL_WIFI_AF_PARAMS_SIZE sizeof(struct wl_af_params) ++ ++#define MFP_TEST_FLAG_NORMAL 0 ++#define MFP_TEST_FLAG_ANY_KEY 1 ++typedef struct wl_sa_query { ++ uint32 flag; ++ uint8 action; ++ uint16 id; ++ struct ether_addr da; ++} wl_sa_query_t; ++ ++ ++/* require default structure packing */ ++#define BWL_DEFAULT_PACKING ++#include ++ ++ ++#ifndef LINUX_POSTMOGRIFY_REMOVAL ++/* Legacy structure to help keep backward compatible wl tool and tray app */ ++ ++#define LEGACY_WL_BSS_INFO_VERSION 107 /* older version of wl_bss_info struct */ ++ ++typedef struct wl_bss_info_107 { ++ uint32 version; /* version field */ ++ uint32 length; /* byte length of data in this record, ++ * starting at version and including IEs ++ */ ++ struct ether_addr BSSID; ++ uint16 beacon_period; /* units are Kusec */ ++ uint16 capability; /* Capability information */ ++ uint8 SSID_len; ++ uint8 SSID[32]; ++ struct { ++ uint count; /* # rates in this set */ ++ uint8 rates[16]; /* rates in 500kbps units w/hi bit set if basic */ ++ } rateset; /* supported rates */ ++ uint8 channel; /* Channel no. */ ++ uint16 atim_window; /* units are Kusec */ ++ uint8 dtim_period; /* DTIM period */ ++ int16 RSSI; /* receive signal strength (in dBm) */ ++ int8 phy_noise; /* noise (in dBm) */ ++ uint32 ie_length; /* byte length of Information Elements */ ++ /* variable length Information Elements */ ++} wl_bss_info_107_t; ++#endif /* LINUX_POSTMOGRIFY_REMOVAL */ ++ ++/* ++ * Per-BSS information structure. ++ */ ++ ++#define LEGACY2_WL_BSS_INFO_VERSION 108 /* old version of wl_bss_info struct */ ++ ++/* BSS info structure ++ * Applications MUST CHECK ie_offset field and length field to access IEs and ++ * next bss_info structure in a vector (in wl_scan_results_t) ++ */ ++typedef struct wl_bss_info_108 { ++ uint32 version; /* version field */ ++ uint32 length; /* byte length of data in this record, ++ * starting at version and including IEs ++ */ ++ struct ether_addr BSSID; ++ uint16 beacon_period; /* units are Kusec */ ++ uint16 capability; /* Capability information */ ++ uint8 SSID_len; ++ uint8 SSID[32]; ++ struct { ++ uint count; /* # rates in this set */ ++ uint8 rates[16]; /* rates in 500kbps units w/hi bit set if basic */ ++ } rateset; /* supported rates */ ++ chanspec_t chanspec; /* chanspec for bss */ ++ uint16 atim_window; /* units are Kusec */ ++ uint8 dtim_period; /* DTIM period */ ++ int16 RSSI; /* receive signal strength (in dBm) */ ++ int8 phy_noise; /* noise (in dBm) */ ++ ++ uint8 n_cap; /* BSS is 802.11N Capable */ ++ uint32 nbss_cap; /* 802.11N BSS Capabilities (based on HT_CAP_*) */ ++ uint8 ctl_ch; /* 802.11N BSS control channel number */ ++ uint32 reserved32[1]; /* Reserved for expansion of BSS properties */ ++ uint8 flags; /* flags */ ++ uint8 reserved[3]; /* Reserved for expansion of BSS properties */ ++ uint8 basic_mcs[MCSSET_LEN]; /* 802.11N BSS required MCS set */ ++ ++ uint16 ie_offset; /* offset at which IEs start, from beginning */ ++ uint32 ie_length; /* byte length of Information Elements */ ++ /* Add new fields here */ ++ /* variable length Information Elements */ ++} wl_bss_info_108_t; ++ ++#define WL_BSS_INFO_VERSION 109 /* current version of wl_bss_info struct */ ++ ++/* BSS info structure ++ * Applications MUST CHECK ie_offset field and length field to access IEs and ++ * next bss_info structure in a vector (in wl_scan_results_t) ++ */ ++typedef struct wl_bss_info { ++ uint32 version; /* version field */ ++ uint32 length; /* byte length of data in this record, ++ * starting at version and including IEs ++ */ ++ struct ether_addr BSSID; ++ uint16 beacon_period; /* units are Kusec */ ++ uint16 capability; /* Capability information */ ++ uint8 SSID_len; ++ uint8 SSID[32]; ++ struct { ++ uint count; /* # rates in this set */ ++ uint8 rates[16]; /* rates in 500kbps units w/hi bit set if basic */ ++ } rateset; /* supported rates */ ++ chanspec_t chanspec; /* chanspec for bss */ ++ uint16 atim_window; /* units are Kusec */ ++ uint8 dtim_period; /* DTIM period */ ++ int16 RSSI; /* receive signal strength (in dBm) */ ++ int8 phy_noise; /* noise (in dBm) */ ++ ++ uint8 n_cap; /* BSS is 802.11N Capable */ ++ uint32 nbss_cap; /* 802.11N BSS Capabilities (based on HT_CAP_*) */ ++ uint8 ctl_ch; /* 802.11N BSS control channel number */ ++ uint16 vht_rxmcsmap; /* VHT rx mcs map */ ++ uint16 vht_txmcsmap; /* VHT tx mcs map */ ++ uint8 flags; /* flags */ ++ uint8 vht_cap; /* BSS is vht capable */ ++ uint8 reserved[2]; /* Reserved for expansion of BSS properties */ ++ uint8 basic_mcs[MCSSET_LEN]; /* 802.11N BSS required MCS set */ ++ ++ uint16 ie_offset; /* offset at which IEs start, from beginning */ ++ uint32 ie_length; /* byte length of Information Elements */ ++ int16 SNR; /* average SNR of during frame reception */ ++ /* Add new fields here */ ++ /* variable length Information Elements */ ++} wl_bss_info_t; ++ ++typedef struct wl_bsscfg { ++ uint32 wsec; ++ uint32 WPA_auth; ++ uint32 wsec_index; ++ uint32 associated; ++ uint32 BSS; ++ uint32 phytest_on; ++ struct ether_addr prev_BSSID; ++ struct ether_addr BSSID; ++} wl_bsscfg_t; ++ ++typedef struct wl_bss_config { ++ uint32 atim_window; ++ uint32 beacon_period; ++ uint32 chanspec; ++} wl_bss_config_t; ++ ++#define DLOAD_HANDLER_VER 1 /* Downloader version */ ++#define DLOAD_FLAG_VER_MASK 0xf000 /* Downloader version mask */ ++#define DLOAD_FLAG_VER_SHIFT 12 /* Downloader version shift */ ++ ++#define DL_CRC_NOT_INUSE 0x0001 ++ ++/* generic download types & flags */ ++enum { ++ DL_TYPE_UCODE = 1, ++ DL_TYPE_CLM = 2 ++}; ++ ++/* ucode type values */ ++enum { ++ UCODE_FW, ++ INIT_VALS, ++ BS_INIT_VALS ++}; ++ ++struct wl_dload_data { ++ uint16 flag; ++ uint16 dload_type; ++ uint32 len; ++ uint32 crc; ++ uint8 data[1]; ++}; ++typedef struct wl_dload_data wl_dload_data_t; ++ ++struct wl_ucode_info { ++ uint32 ucode_type; ++ uint32 num_chunks; ++ uint32 chunk_len; ++ uint32 chunk_num; ++ uint8 data_chunk[1]; ++}; ++typedef struct wl_ucode_info wl_ucode_info_t; ++ ++struct wl_clm_dload_info { ++ uint32 ds_id; ++ uint32 clm_total_len; ++ uint32 num_chunks; ++ uint32 chunk_len; ++ uint32 chunk_offset; ++ uint8 data_chunk[1]; ++}; ++typedef struct wl_clm_dload_info wl_clm_dload_info_t; ++ ++typedef struct wlc_ssid { ++ uint32 SSID_len; ++ uchar SSID[32]; ++} wlc_ssid_t; ++ ++#define MAX_PREFERRED_AP_NUM 5 ++typedef struct wlc_fastssidinfo { ++ uint32 SSID_channel[MAX_PREFERRED_AP_NUM]; ++ wlc_ssid_t SSID_info[MAX_PREFERRED_AP_NUM]; ++} wlc_fastssidinfo_t; ++ ++typedef BWL_PRE_PACKED_STRUCT struct wnm_url { ++ uint8 len; ++ uint8 data[1]; ++} BWL_POST_PACKED_STRUCT wnm_url_t; ++ ++#ifndef LINUX_POSTMOGRIFY_REMOVAL ++typedef struct chan_scandata { ++ uint8 txpower; ++ uint8 pad; ++ chanspec_t channel; /* Channel num, bw, ctrl_sb and band */ ++ uint32 channel_mintime; ++ uint32 channel_maxtime; ++} chan_scandata_t; ++ ++typedef enum wl_scan_type { ++ EXTDSCAN_FOREGROUND_SCAN, ++ EXTDSCAN_BACKGROUND_SCAN, ++ EXTDSCAN_FORCEDBACKGROUND_SCAN ++} wl_scan_type_t; ++ ++#define WLC_EXTDSCAN_MAX_SSID 5 ++ ++#define WL_BSS_FLAGS_FROM_BEACON 0x01 /* bss_info derived from beacon */ ++#define WL_BSS_FLAGS_FROM_CACHE 0x02 /* bss_info collected from cache */ ++#define WL_BSS_FLAGS_RSSI_ONCHANNEL 0x04 /* rssi info was received on channel (vs offchannel) */ ++ ++typedef struct wl_extdscan_params { ++ int8 nprobes; /* 0, passive, otherwise active */ ++ int8 split_scan; /* split scan */ ++ int8 band; /* band */ ++ int8 pad; ++ wlc_ssid_t ssid[WLC_EXTDSCAN_MAX_SSID]; /* ssid list */ ++ uint32 tx_rate; /* in 500ksec units */ ++ wl_scan_type_t scan_type; /* enum */ ++ int32 channel_num; ++ chan_scandata_t channel_list[1]; /* list of chandata structs */ ++} wl_extdscan_params_t; ++ ++#define WL_EXTDSCAN_PARAMS_FIXED_SIZE (sizeof(wl_extdscan_params_t) - sizeof(chan_scandata_t)) ++#endif /* LINUX_POSTMOGRIFY_REMOVAL */ ++ ++#define WL_BSSTYPE_INFRA 1 ++#define WL_BSSTYPE_INDEP 0 ++#define WL_BSSTYPE_ANY 2 ++ ++/* Bitmask for scan_type */ ++#define WL_SCANFLAGS_PASSIVE 0x01 /* force passive scan */ ++#define WL_SCANFLAGS_RESERVED 0x02 /* Reserved */ ++#define WL_SCANFLAGS_PROHIBITED 0x04 /* allow scanning prohibited channels */ ++ ++#define WL_SCAN_PARAMS_SSID_MAX 10 ++ ++typedef struct wl_scan_params { ++ wlc_ssid_t ssid; /* default: {0, ""} */ ++ struct ether_addr bssid; /* default: bcast */ ++ int8 bss_type; /* default: any, ++ * DOT11_BSSTYPE_ANY/INFRASTRUCTURE/INDEPENDENT ++ */ ++ uint8 scan_type; /* flags, 0 use default */ ++ int32 nprobes; /* -1 use default, number of probes per channel */ ++ int32 active_time; /* -1 use default, dwell time per channel for ++ * active scanning ++ */ ++ int32 passive_time; /* -1 use default, dwell time per channel ++ * for passive scanning ++ */ ++ int32 home_time; /* -1 use default, dwell time for the home channel ++ * between channel scans ++ */ ++ int32 channel_num; /* count of channels and ssids that follow ++ * ++ * low half is count of channels in channel_list, 0 ++ * means default (use all available channels) ++ * ++ * high half is entries in wlc_ssid_t array that ++ * follows channel_list, aligned for int32 (4 bytes) ++ * meaning an odd channel count implies a 2-byte pad ++ * between end of channel_list and first ssid ++ * ++ * if ssid count is zero, single ssid in the fixed ++ * parameter portion is assumed, otherwise ssid in ++ * the fixed portion is ignored ++ */ ++ uint16 channel_list[1]; /* list of chanspecs */ ++} wl_scan_params_t; ++ ++/* size of wl_scan_params not including variable length array */ ++#define WL_SCAN_PARAMS_FIXED_SIZE 64 ++ ++/* masks for channel and ssid count */ ++#define WL_SCAN_PARAMS_COUNT_MASK 0x0000ffff ++#define WL_SCAN_PARAMS_NSSID_SHIFT 16 ++ ++#define WL_SCAN_ACTION_START 1 ++#define WL_SCAN_ACTION_CONTINUE 2 ++#define WL_SCAN_ACTION_ABORT 3 ++ ++#define ISCAN_REQ_VERSION 1 ++ ++/* incremental scan struct */ ++typedef struct wl_iscan_params { ++ uint32 version; ++ uint16 action; ++ uint16 scan_duration; ++ wl_scan_params_t params; ++} wl_iscan_params_t; ++ ++/* 3 fields + size of wl_scan_params, not including variable length array */ ++#define WL_ISCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_iscan_params_t, params) + sizeof(wlc_ssid_t)) ++ ++typedef struct wl_scan_results { ++ uint32 buflen; ++ uint32 version; ++ uint32 count; ++ wl_bss_info_t bss_info[1]; ++} wl_scan_results_t; ++ ++/* size of wl_scan_results not including variable length array */ ++#define WL_SCAN_RESULTS_FIXED_SIZE (sizeof(wl_scan_results_t) - sizeof(wl_bss_info_t)) ++ ++/* wl_iscan_results status values */ ++#define WL_SCAN_RESULTS_SUCCESS 0 ++#define WL_SCAN_RESULTS_PARTIAL 1 ++#define WL_SCAN_RESULTS_PENDING 2 ++#define WL_SCAN_RESULTS_ABORTED 3 ++#define WL_SCAN_RESULTS_NO_MEM 4 ++ ++/* Used in EXT_STA */ ++#define DNGL_RXCTXT_SIZE 45 ++ ++#if defined(SIMPLE_ISCAN) ++#define ISCAN_RETRY_CNT 5 ++#define ISCAN_STATE_IDLE 0 ++#define ISCAN_STATE_SCANING 1 ++#define ISCAN_STATE_PENDING 2 ++ ++/* the buf lengh can be WLC_IOCTL_MAXLEN (8K) to reduce iteration */ ++#define WLC_IW_ISCAN_MAXLEN 2048 ++typedef struct iscan_buf { ++ struct iscan_buf * next; ++ char iscan_buf[WLC_IW_ISCAN_MAXLEN]; ++} iscan_buf_t; ++#endif /* SIMPLE_ISCAN */ ++ ++#define ESCAN_REQ_VERSION 1 ++ ++typedef struct wl_escan_params { ++ uint32 version; ++ uint16 action; ++ uint16 sync_id; ++ wl_scan_params_t params; ++} wl_escan_params_t; ++ ++#define WL_ESCAN_PARAMS_FIXED_SIZE (OFFSETOF(wl_escan_params_t, params) + sizeof(wlc_ssid_t)) ++ ++typedef struct wl_escan_result { ++ uint32 buflen; ++ uint32 version; ++ uint16 sync_id; ++ uint16 bss_count; ++ wl_bss_info_t bss_info[1]; ++} wl_escan_result_t; ++ ++#define WL_ESCAN_RESULTS_FIXED_SIZE (sizeof(wl_escan_result_t) - sizeof(wl_bss_info_t)) ++ ++/* incremental scan results struct */ ++typedef struct wl_iscan_results { ++ uint32 status; ++ wl_scan_results_t results; ++} wl_iscan_results_t; ++ ++/* size of wl_iscan_results not including variable length array */ ++#define WL_ISCAN_RESULTS_FIXED_SIZE \ ++ (WL_SCAN_RESULTS_FIXED_SIZE + OFFSETOF(wl_iscan_results_t, results)) ++ ++typedef struct wl_probe_params { ++ wlc_ssid_t ssid; ++ struct ether_addr bssid; ++ struct ether_addr mac; ++} wl_probe_params_t; ++ ++#define WL_MAXRATES_IN_SET 16 /* max # of rates in a rateset */ ++typedef struct wl_rateset { ++ uint32 count; /* # rates in this set */ ++ uint8 rates[WL_MAXRATES_IN_SET]; /* rates in 500kbps units w/hi bit set if basic */ ++} wl_rateset_t; ++ ++typedef struct wl_rateset_args { ++ uint32 count; /* # rates in this set */ ++ uint8 rates[WL_MAXRATES_IN_SET]; /* rates in 500kbps units w/hi bit set if basic */ ++ uint8 mcs[MCSSET_LEN]; /* supported mcs index bit map */ ++} wl_rateset_args_t; ++ ++/* uint32 list */ ++typedef struct wl_uint32_list { ++ /* in - # of elements, out - # of entries */ ++ uint32 count; ++ /* variable length uint32 list */ ++ uint32 element[1]; ++} wl_uint32_list_t; ++ ++/* used for association with a specific BSSID and chanspec list */ ++typedef struct wl_assoc_params { ++ struct ether_addr bssid; /* 00:00:00:00:00:00: broadcast scan */ ++ int32 chanspec_num; /* 0: all available channels, ++ * otherwise count of chanspecs in chanspec_list ++ */ ++ chanspec_t chanspec_list[1]; /* list of chanspecs */ ++} wl_assoc_params_t; ++#define WL_ASSOC_PARAMS_FIXED_SIZE OFFSETOF(wl_assoc_params_t, chanspec_list) ++ ++/* used for reassociation/roam to a specific BSSID and channel */ ++typedef wl_assoc_params_t wl_reassoc_params_t; ++#define WL_REASSOC_PARAMS_FIXED_SIZE WL_ASSOC_PARAMS_FIXED_SIZE ++ ++/* used for association to a specific BSSID and channel */ ++typedef wl_assoc_params_t wl_join_assoc_params_t; ++#define WL_JOIN_ASSOC_PARAMS_FIXED_SIZE WL_ASSOC_PARAMS_FIXED_SIZE ++ ++/* used for join with or without a specific bssid and channel list */ ++typedef struct wl_join_params { ++ wlc_ssid_t ssid; ++ wl_assoc_params_t params; /* optional field, but it must include the fixed portion ++ * of the wl_assoc_params_t struct when it does present. ++ */ ++} wl_join_params_t; ++#define WL_JOIN_PARAMS_FIXED_SIZE (OFFSETOF(wl_join_params_t, params) + \ ++ WL_ASSOC_PARAMS_FIXED_SIZE) ++/* scan params for extended join */ ++typedef struct wl_join_scan_params { ++ uint8 scan_type; /* 0 use default, active or passive scan */ ++ int32 nprobes; /* -1 use default, number of probes per channel */ ++ int32 active_time; /* -1 use default, dwell time per channel for ++ * active scanning ++ */ ++ int32 passive_time; /* -1 use default, dwell time per channel ++ * for passive scanning ++ */ ++ int32 home_time; /* -1 use default, dwell time for the home channel ++ * between channel scans ++ */ ++} wl_join_scan_params_t; ++ ++/* extended join params */ ++typedef struct wl_extjoin_params { ++ wlc_ssid_t ssid; /* {0, ""}: wildcard scan */ ++ wl_join_scan_params_t scan; ++ wl_join_assoc_params_t assoc; /* optional field, but it must include the fixed portion ++ * of the wl_join_assoc_params_t struct when it does ++ * present. ++ */ ++} wl_extjoin_params_t; ++#define WL_EXTJOIN_PARAMS_FIXED_SIZE (OFFSETOF(wl_extjoin_params_t, assoc) + \ ++ WL_JOIN_ASSOC_PARAMS_FIXED_SIZE) ++ ++/* All builds use the new 11ac ratespec/chanspec */ ++#undef D11AC_IOTYPES ++#define D11AC_IOTYPES ++ ++#ifndef D11AC_IOTYPES ++ ++/* defines used by the nrate iovar */ ++#define NRATE_MCS_INUSE 0x00000080 /* MSC in use,indicates b0-6 holds an mcs */ ++#define NRATE_RATE_MASK 0x0000007f /* rate/mcs value */ ++#define NRATE_STF_MASK 0x0000ff00 /* stf mode mask: siso, cdd, stbc, sdm */ ++#define NRATE_STF_SHIFT 8 /* stf mode shift */ ++#define NRATE_OVERRIDE 0x80000000 /* bit indicates override both rate & mode */ ++#define NRATE_OVERRIDE_MCS_ONLY 0x40000000 /* bit indicate to override mcs only */ ++#define NRATE_SGI_MASK 0x00800000 /* sgi mode */ ++#define NRATE_SGI_SHIFT 23 /* sgi mode */ ++#define NRATE_LDPC_CODING 0x00400000 /* bit indicates adv coding in use */ ++#define NRATE_LDPC_SHIFT 22 /* ldpc shift */ ++ ++#define NRATE_STF_SISO 0 /* stf mode SISO */ ++#define NRATE_STF_CDD 1 /* stf mode CDD */ ++#define NRATE_STF_STBC 2 /* stf mode STBC */ ++#define NRATE_STF_SDM 3 /* stf mode SDM */ ++ ++#else /* D11AC_IOTYPES */ ++ ++/* WL_RSPEC defines for rate information */ ++#define WL_RSPEC_RATE_MASK 0x000000FF /* rate or HT MCS value */ ++#define WL_RSPEC_VHT_MCS_MASK 0x0000000F /* VHT MCS value */ ++#define WL_RSPEC_VHT_NSS_MASK 0x000000F0 /* VHT Nss value */ ++#define WL_RSPEC_VHT_NSS_SHIFT 4 /* VHT Nss value shift */ ++#define WL_RSPEC_TXEXP_MASK 0x00000300 ++#define WL_RSPEC_TXEXP_SHIFT 8 ++#define WL_RSPEC_BW_MASK 0x00070000 /* bandwidth mask */ ++#define WL_RSPEC_BW_SHIFT 16 /* bandwidth shift */ ++#define WL_RSPEC_STBC 0x00100000 /* STBC encoding, Nsts = 2 x Nss */ ++#define WL_RSPEC_LDPC 0x00400000 /* bit indicates adv coding in use */ ++#define WL_RSPEC_SGI 0x00800000 /* Short GI mode */ ++#define WL_RSPEC_ENCODING_MASK 0x03000000 /* Encoding of Rate/MCS field */ ++#define WL_RSPEC_OVERRIDE_RATE 0x40000000 /* bit indicate to override mcs only */ ++#define WL_RSPEC_OVERRIDE_MODE 0x80000000 /* bit indicates override both rate & mode */ ++ ++/* WL_RSPEC_ENCODING field defs */ ++#define WL_RSPEC_ENCODE_RATE 0x00000000 /* Legacy rate is stored in RSPEC_RATE_MASK */ ++#define WL_RSPEC_ENCODE_HT 0x01000000 /* HT MCS is stored in RSPEC_RATE_MASK */ ++#define WL_RSPEC_ENCODE_VHT 0x02000000 /* VHT MCS and Nss is stored in RSPEC_RATE_MASK */ ++ ++/* WL_RSPEC_BW field defs */ ++#define WL_RSPEC_BW_UNSPECIFIED 0 ++#define WL_RSPEC_BW_20MHZ 0x00010000 ++#define WL_RSPEC_BW_40MHZ 0x00020000 ++#define WL_RSPEC_BW_80MHZ 0x00030000 ++#define WL_RSPEC_BW_160MHZ 0x00040000 ++ ++/* Legacy defines for the nrate iovar */ ++#define OLD_NRATE_MCS_INUSE 0x00000080 /* MSC in use,indicates b0-6 holds an mcs */ ++#define OLD_NRATE_RATE_MASK 0x0000007f /* rate/mcs value */ ++#define OLD_NRATE_STF_MASK 0x0000ff00 /* stf mode mask: siso, cdd, stbc, sdm */ ++#define OLD_NRATE_STF_SHIFT 8 /* stf mode shift */ ++#define OLD_NRATE_OVERRIDE 0x80000000 /* bit indicates override both rate & mode */ ++#define OLD_NRATE_OVERRIDE_MCS_ONLY 0x40000000 /* bit indicate to override mcs only */ ++#define OLD_NRATE_SGI 0x00800000 /* sgi mode */ ++#define OLD_NRATE_LDPC_CODING 0x00400000 /* bit indicates adv coding in use */ ++ ++#define OLD_NRATE_STF_SISO 0 /* stf mode SISO */ ++#define OLD_NRATE_STF_CDD 1 /* stf mode CDD */ ++#define OLD_NRATE_STF_STBC 2 /* stf mode STBC */ ++#define OLD_NRATE_STF_SDM 3 /* stf mode SDM */ ++ ++#endif /* D11AC_IOTYPES */ ++ ++#define ANTENNA_NUM_1 1 /* total number of antennas to be used */ ++#define ANTENNA_NUM_2 2 ++#define ANTENNA_NUM_3 3 ++#define ANTENNA_NUM_4 4 ++ ++#define ANT_SELCFG_AUTO 0x80 /* bit indicates antenna sel AUTO */ ++#define ANT_SELCFG_MASK 0x33 /* antenna configuration mask */ ++#define ANT_SELCFG_MAX 4 /* max number of antenna configurations */ ++#define ANT_SELCFG_TX_UNICAST 0 /* unicast tx antenna configuration */ ++#define ANT_SELCFG_RX_UNICAST 1 /* unicast rx antenna configuration */ ++#define ANT_SELCFG_TX_DEF 2 /* default tx antenna configuration */ ++#define ANT_SELCFG_RX_DEF 3 /* default rx antenna configuration */ ++ ++#define MAX_STREAMS_SUPPORTED 4 /* max number of streams supported */ ++ ++typedef struct { ++ uint8 ant_config[ANT_SELCFG_MAX]; /* antenna configuration */ ++ uint8 num_antcfg; /* number of available antenna configurations */ ++} wlc_antselcfg_t; ++ ++#define HIGHEST_SINGLE_STREAM_MCS 7 /* MCS values greater than this enable multiple streams */ ++ ++#define MAX_CCA_CHANNELS 38 /* Max number of 20 Mhz wide channels */ ++#define MAX_CCA_SECS 60 /* CCA keeps this many seconds history */ ++ ++#define IBSS_MED 15 /* Mediom in-bss congestion percentage */ ++#define IBSS_HI 25 /* Hi in-bss congestion percentage */ ++#define OBSS_MED 12 ++#define OBSS_HI 25 ++#define INTERFER_MED 5 ++#define INTERFER_HI 10 ++ ++#define CCA_FLAG_2G_ONLY 0x01 /* Return a channel from 2.4 Ghz band */ ++#define CCA_FLAG_5G_ONLY 0x02 /* Return a channel from 2.4 Ghz band */ ++#define CCA_FLAG_IGNORE_DURATION 0x04 /* Ignore dwell time for each channel */ ++#define CCA_FLAGS_PREFER_1_6_11 0x10 ++#define CCA_FLAG_IGNORE_INTERFER 0x20 /* do not exlude channel based on interfer level */ ++ ++#define CCA_ERRNO_BAND 1 /* After filtering for band pref, no choices left */ ++#define CCA_ERRNO_DURATION 2 /* After filtering for duration, no choices left */ ++#define CCA_ERRNO_PREF_CHAN 3 /* After filtering for chan pref, no choices left */ ++#define CCA_ERRNO_INTERFER 4 /* After filtering for interference, no choices left */ ++#define CCA_ERRNO_TOO_FEW 5 /* Only 1 channel was input */ ++ ++typedef struct { ++ uint32 duration; /* millisecs spent sampling this channel */ ++ uint32 congest_ibss; /* millisecs in our bss (presumably this traffic will */ ++ /* move if cur bss moves channels) */ ++ uint32 congest_obss; /* traffic not in our bss */ ++ uint32 interference; /* millisecs detecting a non 802.11 interferer. */ ++ uint32 timestamp; /* second timestamp */ ++} cca_congest_t; ++ ++typedef struct { ++ chanspec_t chanspec; /* Which channel? */ ++ uint8 num_secs; /* How many secs worth of data */ ++ cca_congest_t secs[1]; /* Data */ ++} cca_congest_channel_req_t; ++ ++/* interference source detection and identification mode */ ++#define ITFR_MODE_DISABLE 0 /* disable feature */ ++#define ITFR_MODE_MANUAL_ENABLE 1 /* enable manual detection */ ++#define ITFR_MODE_AUTO_ENABLE 2 /* enable auto detection */ ++ ++/* interference sources */ ++enum interference_source { ++ ITFR_NONE = 0, /* interference */ ++ ITFR_PHONE, /* wireless phone */ ++ ITFR_VIDEO_CAMERA, /* wireless video camera */ ++ ITFR_MICROWAVE_OVEN, /* microwave oven */ ++ ITFR_BABY_MONITOR, /* wireless baby monitor */ ++ ITFR_BLUETOOTH, /* bluetooth */ ++ ITFR_VIDEO_CAMERA_OR_BABY_MONITOR, /* wireless camera or baby monitor */ ++ ITFR_BLUETOOTH_OR_BABY_MONITOR, /* bluetooth or baby monitor */ ++ ITFR_VIDEO_CAMERA_OR_PHONE, /* video camera or phone */ ++ ITFR_UNIDENTIFIED /* interference from unidentified source */ ++}; ++ ++/* structure for interference source report */ ++typedef struct { ++ uint32 flags; /* flags. bit definitions below */ ++ uint32 source; /* last detected interference source */ ++ uint32 timestamp; /* second timestamp on interferenced flag change */ ++} interference_source_rep_t; ++ ++/* bit definitions for flags in interference source report */ ++#define ITFR_INTERFERENCED 1 /* interference detected */ ++#define ITFR_HOME_CHANNEL 2 /* home channel has interference */ ++#define ITFR_NOISY_ENVIRONMENT 4 /* noisy environemnt so feature stopped */ ++ ++#define WLC_CNTRY_BUF_SZ 4 /* Country string is 3 bytes + NUL */ ++ ++typedef struct wl_country { ++ char country_abbrev[WLC_CNTRY_BUF_SZ]; /* nul-terminated country code used in ++ * the Country IE ++ */ ++ int32 rev; /* revision specifier for ccode ++ * on set, -1 indicates unspecified. ++ * on get, rev >= 0 ++ */ ++ char ccode[WLC_CNTRY_BUF_SZ]; /* nul-terminated built-in country code. ++ * variable length, but fixed size in ++ * struct allows simple allocation for ++ * expected country strings <= 3 chars. ++ */ ++} wl_country_t; ++ ++typedef struct wl_channels_in_country { ++ uint32 buflen; ++ uint32 band; ++ char country_abbrev[WLC_CNTRY_BUF_SZ]; ++ uint32 count; ++ uint32 channel[1]; ++} wl_channels_in_country_t; ++ ++typedef struct wl_country_list { ++ uint32 buflen; ++ uint32 band_set; ++ uint32 band; ++ uint32 count; ++ char country_abbrev[1]; ++} wl_country_list_t; ++ ++#define WL_NUM_RPI_BINS 8 ++#define WL_RM_TYPE_BASIC 1 ++#define WL_RM_TYPE_CCA 2 ++#define WL_RM_TYPE_RPI 3 ++ ++#define WL_RM_FLAG_PARALLEL (1<<0) ++ ++#define WL_RM_FLAG_LATE (1<<1) ++#define WL_RM_FLAG_INCAPABLE (1<<2) ++#define WL_RM_FLAG_REFUSED (1<<3) ++ ++typedef struct wl_rm_req_elt { ++ int8 type; ++ int8 flags; ++ chanspec_t chanspec; ++ uint32 token; /* token for this measurement */ ++ uint32 tsf_h; /* TSF high 32-bits of Measurement start time */ ++ uint32 tsf_l; /* TSF low 32-bits */ ++ uint32 dur; /* TUs */ ++} wl_rm_req_elt_t; ++ ++typedef struct wl_rm_req { ++ uint32 token; /* overall measurement set token */ ++ uint32 count; /* number of measurement requests */ ++ void *cb; /* completion callback function: may be NULL */ ++ void *cb_arg; /* arg to completion callback function */ ++ wl_rm_req_elt_t req[1]; /* variable length block of requests */ ++} wl_rm_req_t; ++#define WL_RM_REQ_FIXED_LEN OFFSETOF(wl_rm_req_t, req) ++ ++typedef struct wl_rm_rep_elt { ++ int8 type; ++ int8 flags; ++ chanspec_t chanspec; ++ uint32 token; /* token for this measurement */ ++ uint32 tsf_h; /* TSF high 32-bits of Measurement start time */ ++ uint32 tsf_l; /* TSF low 32-bits */ ++ uint32 dur; /* TUs */ ++ uint32 len; /* byte length of data block */ ++ uint8 data[1]; /* variable length data block */ ++} wl_rm_rep_elt_t; ++#define WL_RM_REP_ELT_FIXED_LEN 24 /* length excluding data block */ ++ ++#define WL_RPI_REP_BIN_NUM 8 ++typedef struct wl_rm_rpi_rep { ++ uint8 rpi[WL_RPI_REP_BIN_NUM]; ++ int8 rpi_max[WL_RPI_REP_BIN_NUM]; ++} wl_rm_rpi_rep_t; ++ ++typedef struct wl_rm_rep { ++ uint32 token; /* overall measurement set token */ ++ uint32 len; /* length of measurement report block */ ++ wl_rm_rep_elt_t rep[1]; /* variable length block of reports */ ++} wl_rm_rep_t; ++#define WL_RM_REP_FIXED_LEN 8 ++ ++ ++#if defined(BCMSUP_PSK) ++typedef enum sup_auth_status { ++ /* Basic supplicant authentication states */ ++ WLC_SUP_DISCONNECTED = 0, ++ WLC_SUP_CONNECTING, ++ WLC_SUP_IDREQUIRED, ++ WLC_SUP_AUTHENTICATING, ++ WLC_SUP_AUTHENTICATED, ++ WLC_SUP_KEYXCHANGE, ++ WLC_SUP_KEYED, ++ WLC_SUP_TIMEOUT, ++ WLC_SUP_LAST_BASIC_STATE, ++ ++ /* Extended supplicant authentication states */ ++ /* Waiting to receive handshake msg M1 */ ++ WLC_SUP_KEYXCHANGE_WAIT_M1 = WLC_SUP_AUTHENTICATED, ++ /* Preparing to send handshake msg M2 */ ++ WLC_SUP_KEYXCHANGE_PREP_M2 = WLC_SUP_KEYXCHANGE, ++ /* Waiting to receive handshake msg M3 */ ++ WLC_SUP_KEYXCHANGE_WAIT_M3 = WLC_SUP_LAST_BASIC_STATE, ++ WLC_SUP_KEYXCHANGE_PREP_M4, /* Preparing to send handshake msg M4 */ ++ WLC_SUP_KEYXCHANGE_WAIT_G1, /* Waiting to receive handshake msg G1 */ ++ WLC_SUP_KEYXCHANGE_PREP_G2 /* Preparing to send handshake msg G2 */ ++} sup_auth_status_t; ++#endif ++ ++/* Enumerate crypto algorithms */ ++#define CRYPTO_ALGO_OFF 0 ++#define CRYPTO_ALGO_WEP1 1 ++#define CRYPTO_ALGO_TKIP 2 ++#define CRYPTO_ALGO_WEP128 3 ++#define CRYPTO_ALGO_AES_CCM 4 ++#define CRYPTO_ALGO_AES_OCB_MSDU 5 ++#define CRYPTO_ALGO_AES_OCB_MPDU 6 ++#define CRYPTO_ALGO_NALG 7 ++#define CRYPTO_ALGO_PMK 12 /* for 802.1x supp to set PMK before 4-way */ ++ ++#define WSEC_GEN_MIC_ERROR 0x0001 ++#define WSEC_GEN_REPLAY 0x0002 ++#define WSEC_GEN_ICV_ERROR 0x0004 ++#define WSEC_GEN_MFP_ACT_ERROR 0x0008 ++#define WSEC_GEN_MFP_DISASSOC_ERROR 0x0010 ++#define WSEC_GEN_MFP_DEAUTH_ERROR 0x0020 ++ ++#define WL_SOFT_KEY (1 << 0) /* Indicates this key is using soft encrypt */ ++#define WL_PRIMARY_KEY (1 << 1) /* Indicates this key is the primary (ie tx) key */ ++#define WL_KF_RES_4 (1 << 4) /* Reserved for backward compat */ ++#define WL_KF_RES_5 (1 << 5) /* Reserved for backward compat */ ++#define WL_IBSS_PEER_GROUP_KEY (1 << 6) /* Indicates a group key for a IBSS PEER */ ++ ++typedef struct wl_wsec_key { ++ uint32 index; /* key index */ ++ uint32 len; /* key length */ ++ uint8 data[DOT11_MAX_KEY_SIZE]; /* key data */ ++ uint32 pad_1[18]; ++ uint32 algo; /* CRYPTO_ALGO_AES_CCM, CRYPTO_ALGO_WEP128, etc */ ++ uint32 flags; /* misc flags */ ++ uint32 pad_2[2]; ++ int pad_3; ++ int iv_initialized; /* has IV been initialized already? */ ++ int pad_4; ++ /* Rx IV */ ++ struct { ++ uint32 hi; /* upper 32 bits of IV */ ++ uint16 lo; /* lower 16 bits of IV */ ++ } rxiv; ++ uint32 pad_5[2]; ++ struct ether_addr ea; /* per station */ ++} wl_wsec_key_t; ++ ++#define WSEC_MIN_PSK_LEN 8 ++#define WSEC_MAX_PSK_LEN 64 ++ ++/* Flag for key material needing passhash'ing */ ++#define WSEC_PASSPHRASE (1<<0) ++ ++/* receptacle for WLC_SET_WSEC_PMK parameter */ ++typedef struct { ++ ushort key_len; /* octets in key material */ ++ ushort flags; /* key handling qualification */ ++ uint8 key[WSEC_MAX_PSK_LEN]; /* PMK material */ ++} wsec_pmk_t; ++ ++/* wireless security bitvec */ ++#define WEP_ENABLED 0x0001 ++#define TKIP_ENABLED 0x0002 ++#define AES_ENABLED 0x0004 ++#define WSEC_SWFLAG 0x0008 ++#define SES_OW_ENABLED 0x0040 /* to go into transition mode without setting wep */ ++ ++/* wsec macros for operating on the above definitions */ ++#define WSEC_WEP_ENABLED(wsec) ((wsec) & WEP_ENABLED) ++#define WSEC_TKIP_ENABLED(wsec) ((wsec) & TKIP_ENABLED) ++#define WSEC_AES_ENABLED(wsec) ((wsec) & AES_ENABLED) ++ ++#define WSEC_ENABLED(wsec) ((wsec) & (WEP_ENABLED | TKIP_ENABLED | AES_ENABLED)) ++#define WSEC_SES_OW_ENABLED(wsec) ((wsec) & SES_OW_ENABLED) ++ ++#ifdef MFP ++#define MFP_CAPABLE 0x0200 ++#define MFP_REQUIRED 0x0400 ++#define MFP_SHA256 0x0800 /* a special configuration for STA for WIFI test tool */ ++#endif /* MFP */ ++ ++/* WPA authentication mode bitvec */ ++#define WPA_AUTH_DISABLED 0x0000 /* Legacy (i.e., non-WPA) */ ++#define WPA_AUTH_NONE 0x0001 /* none (IBSS) */ ++#define WPA_AUTH_UNSPECIFIED 0x0002 /* over 802.1x */ ++#define WPA_AUTH_PSK 0x0004 /* Pre-shared key */ ++/* #define WPA_AUTH_8021X 0x0020 */ /* 802.1x, reserved */ ++#define WPA2_AUTH_UNSPECIFIED 0x0040 /* over 802.1x */ ++#define WPA2_AUTH_PSK 0x0080 /* Pre-shared key */ ++#define BRCM_AUTH_PSK 0x0100 /* BRCM specific PSK */ ++#define BRCM_AUTH_DPT 0x0200 /* DPT PSK without group keys */ ++#define WPA2_AUTH_MFP 0x1000 /* MFP (11w) in contrast to CCX */ ++#define WPA2_AUTH_TPK 0x2000 /* TDLS Peer Key */ ++#define WPA2_AUTH_FT 0x4000 /* Fast Transition. */ ++#define WPA_AUTH_PFN_ANY 0xffffffff /* for PFN, match only ssid */ ++ ++/* pmkid */ ++#define MAXPMKID 16 ++ ++typedef struct _pmkid { ++ struct ether_addr BSSID; ++ uint8 PMKID[WPA2_PMKID_LEN]; ++} pmkid_t; ++ ++typedef struct _pmkid_list { ++ uint32 npmkid; ++ pmkid_t pmkid[1]; ++} pmkid_list_t; ++ ++typedef struct _pmkid_cand { ++ struct ether_addr BSSID; ++ uint8 preauth; ++} pmkid_cand_t; ++ ++typedef struct _pmkid_cand_list { ++ uint32 npmkid_cand; ++ pmkid_cand_t pmkid_cand[1]; ++} pmkid_cand_list_t; ++ ++typedef struct wl_assoc_info { ++ uint32 req_len; ++ uint32 resp_len; ++ uint32 flags; ++ struct dot11_assoc_req req; ++ struct ether_addr reassoc_bssid; /* used in reassoc's */ ++ struct dot11_assoc_resp resp; ++} wl_assoc_info_t; ++ ++/* flags */ ++#define WLC_ASSOC_REQ_IS_REASSOC 0x01 /* assoc req was actually a reassoc */ ++ ++#ifndef LINUX_POSTMOGRIFY_REMOVAL ++typedef struct wl_led_info { ++ uint32 index; /* led index */ ++ uint32 behavior; ++ uint8 activehi; ++} wl_led_info_t; ++ ++ ++/* srom read/write struct passed through ioctl */ ++typedef struct { ++ uint byteoff; /* byte offset */ ++ uint nbytes; /* number of bytes */ ++ uint16 buf[1]; ++} srom_rw_t; ++ ++/* similar cis (srom or otp) struct [iovar: may not be aligned] */ ++typedef struct { ++ uint32 source; /* cis source */ ++ uint32 byteoff; /* byte offset */ ++ uint32 nbytes; /* number of bytes */ ++ /* data follows here */ ++} cis_rw_t; ++ ++#define WLC_CIS_DEFAULT 0 /* built-in default */ ++#define WLC_CIS_SROM 1 /* source is sprom */ ++#define WLC_CIS_OTP 2 /* source is otp */ ++ ++/* R_REG and W_REG struct passed through ioctl */ ++typedef struct { ++ uint32 byteoff; /* byte offset of the field in d11regs_t */ ++ uint32 val; /* read/write value of the field */ ++ uint32 size; /* sizeof the field */ ++ uint band; /* band (optional) */ ++} rw_reg_t; ++ ++/* Structure used by GET/SET_ATTEN ioctls - it controls power in b/g-band */ ++/* PCL - Power Control Loop */ ++/* current gain setting is replaced by user input */ ++#define WL_ATTEN_APP_INPUT_PCL_OFF 0 /* turn off PCL, apply supplied input */ ++#define WL_ATTEN_PCL_ON 1 /* turn on PCL */ ++/* current gain setting is maintained */ ++#define WL_ATTEN_PCL_OFF 2 /* turn off PCL. */ ++ ++typedef struct { ++ uint16 auto_ctrl; /* WL_ATTEN_XX */ ++ uint16 bb; /* Baseband attenuation */ ++ uint16 radio; /* Radio attenuation */ ++ uint16 txctl1; /* Radio TX_CTL1 value */ ++} atten_t; ++ ++/* Per-AC retry parameters */ ++struct wme_tx_params_s { ++ uint8 short_retry; ++ uint8 short_fallback; ++ uint8 long_retry; ++ uint8 long_fallback; ++ uint16 max_rate; /* In units of 512 Kbps */ ++}; ++ ++typedef struct wme_tx_params_s wme_tx_params_t; ++ ++#define WL_WME_TX_PARAMS_IO_BYTES (sizeof(wme_tx_params_t) * AC_COUNT) ++ ++/* defines used by poweridx iovar - it controls power in a-band */ ++/* current gain setting is maintained */ ++#define WL_PWRIDX_PCL_OFF -2 /* turn off PCL. */ ++#define WL_PWRIDX_PCL_ON -1 /* turn on PCL */ ++#define WL_PWRIDX_LOWER_LIMIT -2 /* lower limit */ ++#define WL_PWRIDX_UPPER_LIMIT 63 /* upper limit */ ++/* value >= 0 causes ++ * - input to be set to that value ++ * - PCL to be off ++ */ ++ ++/* Used to get specific link/ac parameters */ ++typedef struct { ++ int ac; ++ uint8 val; ++ struct ether_addr ea; ++} link_val_t; ++ ++#define BCM_MAC_STATUS_INDICATION (0x40010200L) ++#endif /* LINUX_POSTMOGRIFY_REMOVAL */ ++ ++typedef struct { ++ uint16 ver; /* version of this struct */ ++ uint16 len; /* length in bytes of this structure */ ++ uint16 cap; /* sta's advertised capabilities */ ++ uint32 flags; /* flags defined below */ ++ uint32 idle; /* time since data pkt rx'd from sta */ ++ struct ether_addr ea; /* Station address */ ++ wl_rateset_t rateset; /* rateset in use */ ++ uint32 in; /* seconds elapsed since associated */ ++ uint32 listen_interval_inms; /* Min Listen interval in ms for this STA */ ++ uint32 tx_pkts; /* # of packets transmitted */ ++ uint32 tx_failures; /* # of packets failed */ ++ uint32 rx_ucast_pkts; /* # of unicast packets received */ ++ uint32 rx_mcast_pkts; /* # of multicast packets received */ ++ uint32 tx_rate; /* Rate of last successful tx frame */ ++ uint32 rx_rate; /* Rate of last successful rx frame */ ++ uint32 rx_decrypt_succeeds; /* # of packet decrypted successfully */ ++ uint32 rx_decrypt_failures; /* # of packet decrypted unsuccessfully */ ++} sta_info_t; ++ ++#define WL_OLD_STAINFO_SIZE OFFSETOF(sta_info_t, tx_pkts) ++ ++#define WL_STA_VER 3 ++ ++/* Flags for sta_info_t indicating properties of STA */ ++#define WL_STA_BRCM 0x1 /* Running a Broadcom driver */ ++#define WL_STA_WME 0x2 /* WMM association */ ++#define WL_STA_UNUSED 0x4 ++#define WL_STA_AUTHE 0x8 /* Authenticated */ ++#define WL_STA_ASSOC 0x10 /* Associated */ ++#define WL_STA_AUTHO 0x20 /* Authorized */ ++#define WL_STA_WDS 0x40 /* Wireless Distribution System */ ++#define WL_STA_WDS_LINKUP 0x80 /* WDS traffic/probes flowing properly */ ++#define WL_STA_PS 0x100 /* STA is in power save mode from AP's viewpoint */ ++#define WL_STA_APSD_BE 0x200 /* APSD delv/trigger for AC_BE is default enabled */ ++#define WL_STA_APSD_BK 0x400 /* APSD delv/trigger for AC_BK is default enabled */ ++#define WL_STA_APSD_VI 0x800 /* APSD delv/trigger for AC_VI is default enabled */ ++#define WL_STA_APSD_VO 0x1000 /* APSD delv/trigger for AC_VO is default enabled */ ++#define WL_STA_N_CAP 0x2000 /* STA 802.11n capable */ ++#define WL_STA_SCBSTATS 0x4000 /* Per STA debug stats */ ++ ++#define WL_WDS_LINKUP WL_STA_WDS_LINKUP /* deprecated */ ++ ++/* Values for TX Filter override mode */ ++#define WLC_TXFILTER_OVERRIDE_DISABLED 0 ++#define WLC_TXFILTER_OVERRIDE_ENABLED 1 ++ ++/* Used to get specific STA parameters */ ++typedef struct { ++ uint32 val; ++ struct ether_addr ea; ++} scb_val_t; ++ ++/* Used by iovar versions of some ioctls, i.e. WLC_SCB_AUTHORIZE et al */ ++typedef struct { ++ uint32 code; ++ scb_val_t ioctl_args; ++} authops_t; ++ ++/* channel encoding */ ++typedef struct channel_info { ++ int hw_channel; ++ int target_channel; ++ int scan_channel; ++} channel_info_t; ++ ++/* For ioctls that take a list of MAC addresses */ ++struct maclist { ++ uint count; /* number of MAC addresses */ ++ struct ether_addr ea[1]; /* variable length array of MAC addresses */ ++}; ++ ++/* get pkt count struct passed through ioctl */ ++typedef struct get_pktcnt { ++ uint rx_good_pkt; ++ uint rx_bad_pkt; ++ uint tx_good_pkt; ++ uint tx_bad_pkt; ++ uint rx_ocast_good_pkt; /* unicast packets destined for others */ ++} get_pktcnt_t; ++ ++/* NINTENDO2 */ ++#define LQ_IDX_MIN 0 ++#define LQ_IDX_MAX 1 ++#define LQ_IDX_AVG 2 ++#define LQ_IDX_SUM 2 ++#define LQ_IDX_LAST 3 ++#define LQ_STOP_MONITOR 0 ++#define LQ_START_MONITOR 1 ++ ++/* Get averages RSSI, Rx PHY rate and SNR values */ ++typedef struct { ++ int rssi[LQ_IDX_LAST]; /* Array to keep min, max, avg rssi */ ++ int snr[LQ_IDX_LAST]; /* Array to keep min, max, avg snr */ ++ int isvalid; /* Flag indicating whether above data is valid */ ++} wl_lq_t; /* Link Quality */ ++ ++typedef enum wl_wakeup_reason_type { ++ LCD_ON = 1, ++ LCD_OFF, ++ DRC1_WAKE, ++ DRC2_WAKE, ++ REASON_LAST ++} wl_wr_type_t; ++ ++typedef struct { ++/* Unique filter id */ ++ uint32 id; ++ ++/* stores the reason for the last wake up */ ++ uint8 reason; ++} wl_wr_t; ++ ++/* Get MAC specific rate histogram command */ ++typedef struct { ++ struct ether_addr ea; /* MAC Address */ ++ uint8 ac_cat; /* Access Category */ ++ uint8 num_pkts; /* Number of packet entries to be averaged */ ++} wl_mac_ratehisto_cmd_t; /* MAC Specific Rate Histogram command */ ++ ++/* Get MAC rate histogram response */ ++typedef struct { ++ uint32 rate[WLC_MAXRATE + 1]; /* Rates */ ++ uint32 mcs[WL_RATESET_SZ_HT_MCS * WL_TX_CHAINS_MAX]; /* MCS counts */ ++ uint32 vht[WL_RATESET_SZ_VHT_MCS][WL_TX_CHAINS_MAX]; /* VHT counts */ ++ uint32 tsf_timer[2][2]; /* Start and End time for 8bytes value */ ++} wl_mac_ratehisto_res_t; /* MAC Specific Rate Histogram Response */ ++ ++/* Values for TX Filter override mode */ ++#define WLC_TXFILTER_OVERRIDE_DISABLED 0 ++#define WLC_TXFILTER_OVERRIDE_ENABLED 1 ++ ++#define WL_IOCTL_ACTION_GET 0x0 ++#define WL_IOCTL_ACTION_SET 0x1 ++#define WL_IOCTL_ACTION_OVL_IDX_MASK 0x1e ++#define WL_IOCTL_ACTION_OVL_RSV 0x20 ++#define WL_IOCTL_ACTION_OVL 0x40 ++#define WL_IOCTL_ACTION_MASK 0x7e ++#define WL_IOCTL_ACTION_OVL_SHIFT 1 ++ ++/* Linux network driver ioctl encoding */ ++typedef struct wl_ioctl { ++ uint cmd; /* common ioctl definition */ ++ void *buf; /* pointer to user buffer */ ++ uint len; /* length of user buffer */ ++ uint8 set; /* 1=set IOCTL; 0=query IOCTL */ ++ uint used; /* bytes read or written (optional) */ ++ uint needed; /* bytes needed (optional) */ ++} wl_ioctl_t; ++ ++/* reference to wl_ioctl_t struct used by usermode driver */ ++#define ioctl_subtype set /* subtype param */ ++#define ioctl_pid used /* pid param */ ++#define ioctl_status needed /* status param */ ++ ++/* ++ * Structure for passing hardware and software ++ * revision info up from the driver. ++ */ ++typedef struct wlc_rev_info { ++ uint vendorid; /* PCI vendor id */ ++ uint deviceid; /* device id of chip */ ++ uint radiorev; /* radio revision */ ++ uint chiprev; /* chip revision */ ++ uint corerev; /* core revision */ ++ uint boardid; /* board identifier (usu. PCI sub-device id) */ ++ uint boardvendor; /* board vendor (usu. PCI sub-vendor id) */ ++ uint boardrev; /* board revision */ ++ uint driverrev; /* driver version */ ++ uint ucoderev; /* microcode version */ ++ uint bus; /* bus type */ ++ uint chipnum; /* chip number */ ++ uint phytype; /* phy type */ ++ uint phyrev; /* phy revision */ ++ uint anarev; /* anacore rev */ ++ uint chippkg; /* chip package info */ ++} wlc_rev_info_t; ++ ++#define WL_REV_INFO_LEGACY_LENGTH 48 ++ ++#define WL_BRAND_MAX 10 ++typedef struct wl_instance_info { ++ uint instance; ++ char brand[WL_BRAND_MAX]; ++} wl_instance_info_t; ++ ++/* structure to change size of tx fifo */ ++typedef struct wl_txfifo_sz { ++ uint16 magic; ++ uint16 fifo; ++ uint16 size; ++} wl_txfifo_sz_t; ++/* magic pattern used for mismatch driver and wl */ ++#define WL_TXFIFO_SZ_MAGIC 0xa5a5 ++ ++/* Transfer info about an IOVar from the driver */ ++/* Max supported IOV name size in bytes, + 1 for nul termination */ ++#define WLC_IOV_NAME_LEN 30 ++typedef struct wlc_iov_trx_s { ++ uint8 module; ++ uint8 type; ++ char name[WLC_IOV_NAME_LEN]; ++} wlc_iov_trx_t; ++ ++/* check this magic number */ ++#define WLC_IOCTL_MAGIC 0x14e46c77 ++ ++/* bump this number if you change the ioctl interface */ ++#ifdef D11AC_IOTYPES ++#define WLC_IOCTL_VERSION 2 ++#define WLC_IOCTL_VERSION_LEGACY_IOTYPES 1 ++#else ++#define WLC_IOCTL_VERSION 1 ++#endif /* D11AC_IOTYPES */ ++ ++#define WLC_IOCTL_MAXLEN 8192 /* max length ioctl buffer required */ ++#define WLC_IOCTL_SMLEN 256 /* "small" length ioctl buffer required */ ++#define WLC_IOCTL_MEDLEN 1536 /* "med" length ioctl buffer required */ ++#ifdef WLC_HIGH_ONLY ++#define WLC_SAMPLECOLLECT_MAXLEN 1024 /* limit sample size for bmac */ ++#else ++#if defined(LCNCONF) || defined(LCN40CONF) ++#define WLC_SAMPLECOLLECT_MAXLEN 8192 /* Max Sample Collect buffer */ ++#else ++#define WLC_SAMPLECOLLECT_MAXLEN 10240 /* Max Sample Collect buffer for two cores */ ++#endif ++#endif /* WLC_HIGH_ONLY */ ++ ++/* common ioctl definitions */ ++#define WLC_GET_MAGIC 0 ++#define WLC_GET_VERSION 1 ++#define WLC_UP 2 ++#define WLC_DOWN 3 ++#define WLC_GET_LOOP 4 ++#define WLC_SET_LOOP 5 ++#define WLC_DUMP 6 ++#define WLC_GET_MSGLEVEL 7 ++#define WLC_SET_MSGLEVEL 8 ++#define WLC_GET_PROMISC 9 ++#define WLC_SET_PROMISC 10 ++/* #define WLC_OVERLAY_IOCTL 11 */ /* not supported */ ++#define WLC_GET_RATE 12 ++#define WLC_GET_MAX_RATE 13 ++#define WLC_GET_INSTANCE 14 ++/* #define WLC_GET_FRAG 15 */ /* no longer supported */ ++/* #define WLC_SET_FRAG 16 */ /* no longer supported */ ++/* #define WLC_GET_RTS 17 */ /* no longer supported */ ++/* #define WLC_SET_RTS 18 */ /* no longer supported */ ++#define WLC_GET_INFRA 19 ++#define WLC_SET_INFRA 20 ++#define WLC_GET_AUTH 21 ++#define WLC_SET_AUTH 22 ++#define WLC_GET_BSSID 23 ++#define WLC_SET_BSSID 24 ++#define WLC_GET_SSID 25 ++#define WLC_SET_SSID 26 ++#define WLC_RESTART 27 ++#define WLC_TERMINATED 28 ++/* #define WLC_DUMP_SCB 28 */ /* no longer supported */ ++#define WLC_GET_CHANNEL 29 ++#define WLC_SET_CHANNEL 30 ++#define WLC_GET_SRL 31 ++#define WLC_SET_SRL 32 ++#define WLC_GET_LRL 33 ++#define WLC_SET_LRL 34 ++#define WLC_GET_PLCPHDR 35 ++#define WLC_SET_PLCPHDR 36 ++#define WLC_GET_RADIO 37 ++#define WLC_SET_RADIO 38 ++#define WLC_GET_PHYTYPE 39 ++#define WLC_DUMP_RATE 40 ++#define WLC_SET_RATE_PARAMS 41 ++#define WLC_GET_FIXRATE 42 ++#define WLC_SET_FIXRATE 43 ++/* #define WLC_GET_WEP 42 */ /* no longer supported */ ++/* #define WLC_SET_WEP 43 */ /* no longer supported */ ++#define WLC_GET_KEY 44 ++#define WLC_SET_KEY 45 ++#define WLC_GET_REGULATORY 46 ++#define WLC_SET_REGULATORY 47 ++#define WLC_GET_PASSIVE_SCAN 48 ++#define WLC_SET_PASSIVE_SCAN 49 ++#define WLC_SCAN 50 ++#define WLC_SCAN_RESULTS 51 ++#define WLC_DISASSOC 52 ++#define WLC_REASSOC 53 ++#define WLC_GET_ROAM_TRIGGER 54 ++#define WLC_SET_ROAM_TRIGGER 55 ++#define WLC_GET_ROAM_DELTA 56 ++#define WLC_SET_ROAM_DELTA 57 ++#define WLC_GET_ROAM_SCAN_PERIOD 58 ++#define WLC_SET_ROAM_SCAN_PERIOD 59 ++#define WLC_EVM 60 /* diag */ ++#define WLC_GET_TXANT 61 ++#define WLC_SET_TXANT 62 ++#define WLC_GET_ANTDIV 63 ++#define WLC_SET_ANTDIV 64 ++/* #define WLC_GET_TXPWR 65 */ /* no longer supported */ ++/* #define WLC_SET_TXPWR 66 */ /* no longer supported */ ++#define WLC_GET_CLOSED 67 ++#define WLC_SET_CLOSED 68 ++#define WLC_GET_MACLIST 69 ++#define WLC_SET_MACLIST 70 ++#define WLC_GET_RATESET 71 ++#define WLC_SET_RATESET 72 ++/* #define WLC_GET_LOCALE 73 */ /* no longer supported */ ++#define WLC_LONGTRAIN 74 ++#define WLC_GET_BCNPRD 75 ++#define WLC_SET_BCNPRD 76 ++#define WLC_GET_DTIMPRD 77 ++#define WLC_SET_DTIMPRD 78 ++#define WLC_GET_SROM 79 ++#define WLC_SET_SROM 80 ++#define WLC_GET_WEP_RESTRICT 81 ++#define WLC_SET_WEP_RESTRICT 82 ++#define WLC_GET_COUNTRY 83 ++#define WLC_SET_COUNTRY 84 ++#define WLC_GET_PM 85 ++#define WLC_SET_PM 86 ++#define WLC_GET_WAKE 87 ++#define WLC_SET_WAKE 88 ++/* #define WLC_GET_D11CNTS 89 */ /* -> "counters" iovar */ ++#define WLC_GET_FORCELINK 90 /* ndis only */ ++#define WLC_SET_FORCELINK 91 /* ndis only */ ++#define WLC_FREQ_ACCURACY 92 /* diag */ ++#define WLC_CARRIER_SUPPRESS 93 /* diag */ ++#define WLC_GET_PHYREG 94 ++#define WLC_SET_PHYREG 95 ++#define WLC_GET_RADIOREG 96 ++#define WLC_SET_RADIOREG 97 ++#define WLC_GET_REVINFO 98 ++#define WLC_GET_UCANTDIV 99 ++#define WLC_SET_UCANTDIV 100 ++#define WLC_R_REG 101 ++#define WLC_W_REG 102 ++/* #define WLC_DIAG_LOOPBACK 103 old tray diag */ ++/* #define WLC_RESET_D11CNTS 104 */ /* -> "reset_d11cnts" iovar */ ++#define WLC_GET_MACMODE 105 ++#define WLC_SET_MACMODE 106 ++#define WLC_GET_MONITOR 107 ++#define WLC_SET_MONITOR 108 ++#define WLC_GET_GMODE 109 ++#define WLC_SET_GMODE 110 ++#define WLC_GET_LEGACY_ERP 111 ++#define WLC_SET_LEGACY_ERP 112 ++#define WLC_GET_RX_ANT 113 ++#define WLC_GET_CURR_RATESET 114 /* current rateset */ ++#define WLC_GET_SCANSUPPRESS 115 ++#define WLC_SET_SCANSUPPRESS 116 ++#define WLC_GET_AP 117 ++#define WLC_SET_AP 118 ++#define WLC_GET_EAP_RESTRICT 119 ++#define WLC_SET_EAP_RESTRICT 120 ++#define WLC_SCB_AUTHORIZE 121 ++#define WLC_SCB_DEAUTHORIZE 122 ++#define WLC_GET_WDSLIST 123 ++#define WLC_SET_WDSLIST 124 ++#define WLC_GET_ATIM 125 ++#define WLC_SET_ATIM 126 ++#define WLC_GET_RSSI 127 ++#define WLC_GET_PHYANTDIV 128 ++#define WLC_SET_PHYANTDIV 129 ++#define WLC_AP_RX_ONLY 130 ++#define WLC_GET_TX_PATH_PWR 131 ++#define WLC_SET_TX_PATH_PWR 132 ++#define WLC_GET_WSEC 133 ++#define WLC_SET_WSEC 134 ++#define WLC_GET_PHY_NOISE 135 ++#define WLC_GET_BSS_INFO 136 ++#define WLC_GET_PKTCNTS 137 ++#define WLC_GET_LAZYWDS 138 ++#define WLC_SET_LAZYWDS 139 ++#define WLC_GET_BANDLIST 140 ++#define WLC_GET_BAND 141 ++#define WLC_SET_BAND 142 ++#define WLC_SCB_DEAUTHENTICATE 143 ++#define WLC_GET_SHORTSLOT 144 ++#define WLC_GET_SHORTSLOT_OVERRIDE 145 ++#define WLC_SET_SHORTSLOT_OVERRIDE 146 ++#define WLC_GET_SHORTSLOT_RESTRICT 147 ++#define WLC_SET_SHORTSLOT_RESTRICT 148 ++#define WLC_GET_GMODE_PROTECTION 149 ++#define WLC_GET_GMODE_PROTECTION_OVERRIDE 150 ++#define WLC_SET_GMODE_PROTECTION_OVERRIDE 151 ++#define WLC_UPGRADE 152 ++/* #define WLC_GET_MRATE 153 */ /* no longer supported */ ++/* #define WLC_SET_MRATE 154 */ /* no longer supported */ ++#define WLC_GET_IGNORE_BCNS 155 ++#define WLC_SET_IGNORE_BCNS 156 ++#define WLC_GET_SCB_TIMEOUT 157 ++#define WLC_SET_SCB_TIMEOUT 158 ++#define WLC_GET_ASSOCLIST 159 ++#define WLC_GET_CLK 160 ++#define WLC_SET_CLK 161 ++#define WLC_GET_UP 162 ++#define WLC_OUT 163 ++#define WLC_GET_WPA_AUTH 164 ++#define WLC_SET_WPA_AUTH 165 ++#define WLC_GET_UCFLAGS 166 ++#define WLC_SET_UCFLAGS 167 ++#define WLC_GET_PWRIDX 168 ++#define WLC_SET_PWRIDX 169 ++#define WLC_GET_TSSI 170 ++#define WLC_GET_SUP_RATESET_OVERRIDE 171 ++#define WLC_SET_SUP_RATESET_OVERRIDE 172 ++/* #define WLC_SET_FAST_TIMER 173 */ /* no longer supported */ ++/* #define WLC_GET_FAST_TIMER 174 */ /* no longer supported */ ++/* #define WLC_SET_SLOW_TIMER 175 */ /* no longer supported */ ++/* #define WLC_GET_SLOW_TIMER 176 */ /* no longer supported */ ++/* #define WLC_DUMP_PHYREGS 177 */ /* no longer supported */ ++#define WLC_GET_PROTECTION_CONTROL 178 ++#define WLC_SET_PROTECTION_CONTROL 179 ++#define WLC_GET_PHYLIST 180 ++#define WLC_ENCRYPT_STRENGTH 181 /* ndis only */ ++#define WLC_DECRYPT_STATUS 182 /* ndis only */ ++#define WLC_GET_KEY_SEQ 183 ++#define WLC_GET_SCAN_CHANNEL_TIME 184 ++#define WLC_SET_SCAN_CHANNEL_TIME 185 ++#define WLC_GET_SCAN_UNASSOC_TIME 186 ++#define WLC_SET_SCAN_UNASSOC_TIME 187 ++#define WLC_GET_SCAN_HOME_TIME 188 ++#define WLC_SET_SCAN_HOME_TIME 189 ++#define WLC_GET_SCAN_NPROBES 190 ++#define WLC_SET_SCAN_NPROBES 191 ++#define WLC_GET_PRB_RESP_TIMEOUT 192 ++#define WLC_SET_PRB_RESP_TIMEOUT 193 ++#define WLC_GET_ATTEN 194 ++#define WLC_SET_ATTEN 195 ++#define WLC_GET_SHMEM 196 /* diag */ ++#define WLC_SET_SHMEM 197 /* diag */ ++/* #define WLC_GET_GMODE_PROTECTION_CTS 198 */ /* no longer supported */ ++/* #define WLC_SET_GMODE_PROTECTION_CTS 199 */ /* no longer supported */ ++#define WLC_SET_WSEC_TEST 200 ++#define WLC_SCB_DEAUTHENTICATE_FOR_REASON 201 ++#define WLC_TKIP_COUNTERMEASURES 202 ++#define WLC_GET_PIOMODE 203 ++#define WLC_SET_PIOMODE 204 ++#define WLC_SET_ASSOC_PREFER 205 ++#define WLC_GET_ASSOC_PREFER 206 ++#define WLC_SET_ROAM_PREFER 207 ++#define WLC_GET_ROAM_PREFER 208 ++#define WLC_SET_LED 209 ++#define WLC_GET_LED 210 ++#define WLC_GET_INTERFERENCE_MODE 211 ++#define WLC_SET_INTERFERENCE_MODE 212 ++#define WLC_GET_CHANNEL_QA 213 ++#define WLC_START_CHANNEL_QA 214 ++#define WLC_GET_CHANNEL_SEL 215 ++#define WLC_START_CHANNEL_SEL 216 ++#define WLC_GET_VALID_CHANNELS 217 ++#define WLC_GET_FAKEFRAG 218 ++#define WLC_SET_FAKEFRAG 219 ++#define WLC_GET_PWROUT_PERCENTAGE 220 ++#define WLC_SET_PWROUT_PERCENTAGE 221 ++#define WLC_SET_BAD_FRAME_PREEMPT 222 ++#define WLC_GET_BAD_FRAME_PREEMPT 223 ++#define WLC_SET_LEAP_LIST 224 ++#define WLC_GET_LEAP_LIST 225 ++#define WLC_GET_CWMIN 226 ++#define WLC_SET_CWMIN 227 ++#define WLC_GET_CWMAX 228 ++#define WLC_SET_CWMAX 229 ++#define WLC_GET_WET 230 ++#define WLC_SET_WET 231 ++#define WLC_GET_PUB 232 ++/* #define WLC_SET_GLACIAL_TIMER 233 */ /* no longer supported */ ++/* #define WLC_GET_GLACIAL_TIMER 234 */ /* no longer supported */ ++#define WLC_GET_KEY_PRIMARY 235 ++#define WLC_SET_KEY_PRIMARY 236 ++/* #define WLC_DUMP_RADIOREGS 237 */ /* no longer supported */ ++#define WLC_GET_ACI_ARGS 238 ++#define WLC_SET_ACI_ARGS 239 ++#define WLC_UNSET_CALLBACK 240 ++#define WLC_SET_CALLBACK 241 ++#define WLC_GET_RADAR 242 ++#define WLC_SET_RADAR 243 ++#define WLC_SET_SPECT_MANAGMENT 244 ++#define WLC_GET_SPECT_MANAGMENT 245 ++#define WLC_WDS_GET_REMOTE_HWADDR 246 /* handled in wl_linux.c/wl_vx.c */ ++#define WLC_WDS_GET_WPA_SUP 247 ++#define WLC_SET_CS_SCAN_TIMER 248 ++#define WLC_GET_CS_SCAN_TIMER 249 ++#define WLC_MEASURE_REQUEST 250 ++#define WLC_INIT 251 ++#define WLC_SEND_QUIET 252 ++#define WLC_KEEPALIVE 253 ++#define WLC_SEND_PWR_CONSTRAINT 254 ++#define WLC_UPGRADE_STATUS 255 ++#define WLC_CURRENT_PWR 256 ++#define WLC_GET_SCAN_PASSIVE_TIME 257 ++#define WLC_SET_SCAN_PASSIVE_TIME 258 ++#define WLC_LEGACY_LINK_BEHAVIOR 259 ++#define WLC_GET_CHANNELS_IN_COUNTRY 260 ++#define WLC_GET_COUNTRY_LIST 261 ++#define WLC_GET_VAR 262 /* get value of named variable */ ++#define WLC_SET_VAR 263 /* set named variable to value */ ++#define WLC_NVRAM_GET 264 /* deprecated */ ++#define WLC_NVRAM_SET 265 ++#define WLC_NVRAM_DUMP 266 ++#define WLC_REBOOT 267 ++#define WLC_SET_WSEC_PMK 268 ++#define WLC_GET_AUTH_MODE 269 ++#define WLC_SET_AUTH_MODE 270 ++#define WLC_GET_WAKEENTRY 271 ++#define WLC_SET_WAKEENTRY 272 ++#define WLC_NDCONFIG_ITEM 273 /* currently handled in wl_oid.c */ ++#define WLC_NVOTPW 274 ++#define WLC_OTPW 275 ++#define WLC_IOV_BLOCK_GET 276 ++#define WLC_IOV_MODULES_GET 277 ++#define WLC_SOFT_RESET 278 ++#define WLC_GET_ALLOW_MODE 279 ++#define WLC_SET_ALLOW_MODE 280 ++#define WLC_GET_DESIRED_BSSID 281 ++#define WLC_SET_DESIRED_BSSID 282 ++#define WLC_DISASSOC_MYAP 283 ++#define WLC_GET_NBANDS 284 /* for Dongle EXT_STA support */ ++#define WLC_GET_BANDSTATES 285 /* for Dongle EXT_STA support */ ++#define WLC_GET_WLC_BSS_INFO 286 /* for Dongle EXT_STA support */ ++#define WLC_GET_ASSOC_INFO 287 /* for Dongle EXT_STA support */ ++#define WLC_GET_OID_PHY 288 /* for Dongle EXT_STA support */ ++#define WLC_SET_OID_PHY 289 /* for Dongle EXT_STA support */ ++#define WLC_SET_ASSOC_TIME 290 /* for Dongle EXT_STA support */ ++#define WLC_GET_DESIRED_SSID 291 /* for Dongle EXT_STA support */ ++#define WLC_GET_CHANSPEC 292 /* for Dongle EXT_STA support */ ++#define WLC_GET_ASSOC_STATE 293 /* for Dongle EXT_STA support */ ++#define WLC_SET_PHY_STATE 294 /* for Dongle EXT_STA support */ ++#define WLC_GET_SCAN_PENDING 295 /* for Dongle EXT_STA support */ ++#define WLC_GET_SCANREQ_PENDING 296 /* for Dongle EXT_STA support */ ++#define WLC_GET_PREV_ROAM_REASON 297 /* for Dongle EXT_STA support */ ++#define WLC_SET_PREV_ROAM_REASON 298 /* for Dongle EXT_STA support */ ++#define WLC_GET_BANDSTATES_PI 299 /* for Dongle EXT_STA support */ ++#define WLC_GET_PHY_STATE 300 /* for Dongle EXT_STA support */ ++#define WLC_GET_BSS_WPA_RSN 301 /* for Dongle EXT_STA support */ ++#define WLC_GET_BSS_WPA2_RSN 302 /* for Dongle EXT_STA support */ ++#define WLC_GET_BSS_BCN_TS 303 /* for Dongle EXT_STA support */ ++#define WLC_GET_INT_DISASSOC 304 /* for Dongle EXT_STA support */ ++#define WLC_SET_NUM_PEERS 305 /* for Dongle EXT_STA support */ ++#define WLC_GET_NUM_BSS 306 /* for Dongle EXT_STA support */ ++#define WLC_PHY_SAMPLE_COLLECT 307 /* phy sample collect mode */ ++/* #define WLC_UM_PRIV 308 */ /* Deprecated: usermode driver */ ++#define WLC_GET_CMD 309 ++/* #define WLC_LAST 310 */ /* Never used - can be reused */ ++#define WLC_SET_INTERFERENCE_OVERRIDE_MODE 311 /* set inter mode override */ ++#define WLC_GET_INTERFERENCE_OVERRIDE_MODE 312 /* get inter mode override */ ++/* #define WLC_GET_WAI_RESTRICT 313 */ /* for WAPI, deprecated use iovar instead */ ++/* #define WLC_SET_WAI_RESTRICT 314 */ /* for WAPI, deprecated use iovar instead */ ++/* #define WLC_SET_WAI_REKEY 315 */ /* for WAPI, deprecated use iovar instead */ ++#define WLC_SET_NAT_CONFIG 316 /* for configuring NAT filter driver */ ++#define WLC_GET_NAT_STATE 317 ++#define WLC_LAST 318 ++ ++#ifndef EPICTRL_COOKIE ++#define EPICTRL_COOKIE 0xABADCEDE ++#endif ++ ++/* vx wlc ioctl's offset */ ++#define CMN_IOCTL_OFF 0x180 ++ ++/* ++ * custom OID support ++ * ++ * 0xFF - implementation specific OID ++ * 0xE4 - first byte of Broadcom PCI vendor ID ++ * 0x14 - second byte of Broadcom PCI vendor ID ++ * 0xXX - the custom OID number ++ */ ++ ++/* begin 0x1f values beyond the start of the ET driver range. */ ++#define WL_OID_BASE 0xFFE41420 ++ ++/* NDIS overrides */ ++#define OID_WL_GETINSTANCE (WL_OID_BASE + WLC_GET_INSTANCE) ++#define OID_WL_GET_FORCELINK (WL_OID_BASE + WLC_GET_FORCELINK) ++#define OID_WL_SET_FORCELINK (WL_OID_BASE + WLC_SET_FORCELINK) ++#define OID_WL_ENCRYPT_STRENGTH (WL_OID_BASE + WLC_ENCRYPT_STRENGTH) ++#define OID_WL_DECRYPT_STATUS (WL_OID_BASE + WLC_DECRYPT_STATUS) ++#define OID_LEGACY_LINK_BEHAVIOR (WL_OID_BASE + WLC_LEGACY_LINK_BEHAVIOR) ++#define OID_WL_NDCONFIG_ITEM (WL_OID_BASE + WLC_NDCONFIG_ITEM) ++ ++/* EXT_STA Dongle suuport */ ++#define OID_STA_CHANSPEC (WL_OID_BASE + WLC_GET_CHANSPEC) ++#define OID_STA_NBANDS (WL_OID_BASE + WLC_GET_NBANDS) ++#define OID_STA_GET_PHY (WL_OID_BASE + WLC_GET_OID_PHY) ++#define OID_STA_SET_PHY (WL_OID_BASE + WLC_SET_OID_PHY) ++#define OID_STA_ASSOC_TIME (WL_OID_BASE + WLC_SET_ASSOC_TIME) ++#define OID_STA_DESIRED_SSID (WL_OID_BASE + WLC_GET_DESIRED_SSID) ++#define OID_STA_SET_PHY_STATE (WL_OID_BASE + WLC_SET_PHY_STATE) ++#define OID_STA_SCAN_PENDING (WL_OID_BASE + WLC_GET_SCAN_PENDING) ++#define OID_STA_SCANREQ_PENDING (WL_OID_BASE + WLC_GET_SCANREQ_PENDING) ++#define OID_STA_GET_ROAM_REASON (WL_OID_BASE + WLC_GET_PREV_ROAM_REASON) ++#define OID_STA_SET_ROAM_REASON (WL_OID_BASE + WLC_SET_PREV_ROAM_REASON) ++#define OID_STA_GET_PHY_STATE (WL_OID_BASE + WLC_GET_PHY_STATE) ++#define OID_STA_INT_DISASSOC (WL_OID_BASE + WLC_GET_INT_DISASSOC) ++#define OID_STA_SET_NUM_PEERS (WL_OID_BASE + WLC_SET_NUM_PEERS) ++#define OID_STA_GET_NUM_BSS (WL_OID_BASE + WLC_GET_NUM_BSS) ++ ++/* NAT filter driver support */ ++#define OID_NAT_SET_CONFIG (WL_OID_BASE + WLC_SET_NAT_CONFIG) ++#define OID_NAT_GET_STATE (WL_OID_BASE + WLC_GET_NAT_STATE) ++ ++#define WL_DECRYPT_STATUS_SUCCESS 1 ++#define WL_DECRYPT_STATUS_FAILURE 2 ++#define WL_DECRYPT_STATUS_UNKNOWN 3 ++ ++/* allows user-mode app to poll the status of USB image upgrade */ ++#define WLC_UPGRADE_SUCCESS 0 ++#define WLC_UPGRADE_PENDING 1 ++ ++#ifdef CONFIG_USBRNDIS_RETAIL ++/* struct passed in for WLC_NDCONFIG_ITEM */ ++typedef struct { ++ char *name; ++ void *param; ++} ndconfig_item_t; ++#endif ++ ++ ++/* WLC_GET_AUTH, WLC_SET_AUTH values */ ++#define WL_AUTH_OPEN_SYSTEM 0 /* d11 open authentication */ ++#define WL_AUTH_SHARED_KEY 1 /* d11 shared authentication */ ++#define WL_AUTH_OPEN_SHARED 2 /* try open, then shared if open failed w/rc 13 */ ++ ++/* Bit masks for radio disabled status - returned by WL_GET_RADIO */ ++#define WL_RADIO_SW_DISABLE (1<<0) ++#define WL_RADIO_HW_DISABLE (1<<1) ++#define WL_RADIO_MPC_DISABLE (1<<2) ++#define WL_RADIO_COUNTRY_DISABLE (1<<3) /* some countries don't support any channel */ ++ ++#define WL_SPURAVOID_OFF 0 ++#define WL_SPURAVOID_ON1 1 ++#define WL_SPURAVOID_ON2 2 ++ ++/* Override bit for WLC_SET_TXPWR. if set, ignore other level limits */ ++#define WL_TXPWR_OVERRIDE (1U<<31) ++#define WL_TXPWR_NEG (1U<<30) ++ ++#define WL_PHY_PAVARS_LEN 32 /* Phy type, Band range, chain, a1[0], b0[0], b1[0] ... */ ++ ++#define WL_PHY_PAVARS2_NUM 3 /* a1, b0, b1 */ ++#define WL_PHY_PAVAR_VER 1 /* pavars version */ ++typedef struct wl_pavars2 { ++ uint16 ver; /* version of this struct */ ++ uint16 len; /* len of this structure */ ++ uint16 inuse; /* driver return 1 for a1,b0,b1 in current band range */ ++ uint16 phy_type; /* phy type */ ++ uint16 bandrange; ++ uint16 chain; ++ uint16 inpa[WL_PHY_PAVARS2_NUM]; /* phy pavars for one band range */ ++} wl_pavars2_t; ++ ++typedef struct wl_po { ++ uint16 phy_type; /* Phy type */ ++ uint16 band; ++ uint16 cckpo; ++ uint32 ofdmpo; ++ uint16 mcspo[8]; ++} wl_po_t; ++ ++/* a large TX Power as an init value to factor out of MIN() calculations, ++ * keep low enough to fit in an int8, units are .25 dBm ++ */ ++#define WLC_TXPWR_MAX (127) /* ~32 dBm = 1,500 mW */ ++ ++/* "diag" iovar argument and error code */ ++#define WL_DIAG_INTERRUPT 1 /* d11 loopback interrupt test */ ++#define WL_DIAG_LOOPBACK 2 /* d11 loopback data test */ ++#define WL_DIAG_MEMORY 3 /* d11 memory test */ ++#define WL_DIAG_LED 4 /* LED test */ ++#define WL_DIAG_REG 5 /* d11/phy register test */ ++#define WL_DIAG_SROM 6 /* srom read/crc test */ ++#define WL_DIAG_DMA 7 /* DMA test */ ++#define WL_DIAG_LOOPBACK_EXT 8 /* enhenced d11 loopback data test */ ++ ++#define WL_DIAGERR_SUCCESS 0 ++#define WL_DIAGERR_FAIL_TO_RUN 1 /* unable to run requested diag */ ++#define WL_DIAGERR_NOT_SUPPORTED 2 /* diag requested is not supported */ ++#define WL_DIAGERR_INTERRUPT_FAIL 3 /* loopback interrupt test failed */ ++#define WL_DIAGERR_LOOPBACK_FAIL 4 /* loopback data test failed */ ++#define WL_DIAGERR_SROM_FAIL 5 /* srom read failed */ ++#define WL_DIAGERR_SROM_BADCRC 6 /* srom crc failed */ ++#define WL_DIAGERR_REG_FAIL 7 /* d11/phy register test failed */ ++#define WL_DIAGERR_MEMORY_FAIL 8 /* d11 memory test failed */ ++#define WL_DIAGERR_NOMEM 9 /* diag test failed due to no memory */ ++#define WL_DIAGERR_DMA_FAIL 10 /* DMA test failed */ ++ ++#define WL_DIAGERR_MEMORY_TIMEOUT 11 /* d11 memory test didn't finish in time */ ++#define WL_DIAGERR_MEMORY_BADPATTERN 12 /* d11 memory test result in bad pattern */ ++ ++/* band types */ ++#define WLC_BAND_AUTO 0 /* auto-select */ ++#define WLC_BAND_5G 1 /* 5 Ghz */ ++#define WLC_BAND_2G 2 /* 2.4 Ghz */ ++#define WLC_BAND_ALL 3 /* all bands */ ++ ++/* band range returned by band_range iovar */ ++#define WL_CHAN_FREQ_RANGE_2G 0 ++#define WL_CHAN_FREQ_RANGE_5GL 1 ++#define WL_CHAN_FREQ_RANGE_5GM 2 ++#define WL_CHAN_FREQ_RANGE_5GH 3 ++ ++#define WL_CHAN_FREQ_RANGE_5GLL_5BAND 4 ++#define WL_CHAN_FREQ_RANGE_5GLH_5BAND 5 ++#define WL_CHAN_FREQ_RANGE_5GML_5BAND 6 ++#define WL_CHAN_FREQ_RANGE_5GMH_5BAND 7 ++#define WL_CHAN_FREQ_RANGE_5GH_5BAND 8 ++ ++#define WL_CHAN_FREQ_RANGE_5G_BAND0 1 ++#define WL_CHAN_FREQ_RANGE_5G_BAND1 2 ++#define WL_CHAN_FREQ_RANGE_5G_BAND2 3 ++#define WL_CHAN_FREQ_RANGE_5G_BAND3 4 ++ ++#define WL_CHAN_FREQ_RANGE_5G_4BAND 5 ++ ++/* phy types (returned by WLC_GET_PHYTPE) */ ++#define WLC_PHY_TYPE_A 0 ++#define WLC_PHY_TYPE_B 1 ++#define WLC_PHY_TYPE_G 2 ++#define WLC_PHY_TYPE_N 4 ++#define WLC_PHY_TYPE_LP 5 ++#define WLC_PHY_TYPE_SSN 6 ++#define WLC_PHY_TYPE_HT 7 ++#define WLC_PHY_TYPE_LCN 8 ++#define WLC_PHY_TYPE_LCN40 10 ++#define WLC_PHY_TYPE_AC 11 ++#define WLC_PHY_TYPE_NULL 0xf ++ ++/* MAC list modes */ ++#define WLC_MACMODE_DISABLED 0 /* MAC list disabled */ ++#define WLC_MACMODE_DENY 1 /* Deny specified (i.e. allow unspecified) */ ++#define WLC_MACMODE_ALLOW 2 /* Allow specified (i.e. deny unspecified) */ ++ ++/* ++ * 54g modes (basic bits may still be overridden) ++ * ++ * GMODE_LEGACY_B Rateset: 1b, 2b, 5.5, 11 ++ * Preamble: Long ++ * Shortslot: Off ++ * GMODE_AUTO Rateset: 1b, 2b, 5.5b, 11b, 18, 24, 36, 54 ++ * Extended Rateset: 6, 9, 12, 48 ++ * Preamble: Long ++ * Shortslot: Auto ++ * GMODE_ONLY Rateset: 1b, 2b, 5.5b, 11b, 18, 24b, 36, 54 ++ * Extended Rateset: 6b, 9, 12b, 48 ++ * Preamble: Short required ++ * Shortslot: Auto ++ * GMODE_B_DEFERRED Rateset: 1b, 2b, 5.5b, 11b, 18, 24, 36, 54 ++ * Extended Rateset: 6, 9, 12, 48 ++ * Preamble: Long ++ * Shortslot: On ++ * GMODE_PERFORMANCE Rateset: 1b, 2b, 5.5b, 6b, 9, 11b, 12b, 18, 24b, 36, 48, 54 ++ * Preamble: Short required ++ * Shortslot: On and required ++ * GMODE_LRS Rateset: 1b, 2b, 5.5b, 11b ++ * Extended Rateset: 6, 9, 12, 18, 24, 36, 48, 54 ++ * Preamble: Long ++ * Shortslot: Auto ++ */ ++#define GMODE_LEGACY_B 0 ++#define GMODE_AUTO 1 ++#define GMODE_ONLY 2 ++#define GMODE_B_DEFERRED 3 ++#define GMODE_PERFORMANCE 4 ++#define GMODE_LRS 5 ++#define GMODE_MAX 6 ++ ++/* values for PLCPHdr_override */ ++#define WLC_PLCP_AUTO -1 ++#define WLC_PLCP_SHORT 0 ++#define WLC_PLCP_LONG 1 ++ ++/* values for g_protection_override and n_protection_override */ ++#define WLC_PROTECTION_AUTO -1 ++#define WLC_PROTECTION_OFF 0 ++#define WLC_PROTECTION_ON 1 ++#define WLC_PROTECTION_MMHDR_ONLY 2 ++#define WLC_PROTECTION_CTS_ONLY 3 ++ ++/* values for g_protection_control and n_protection_control */ ++#define WLC_PROTECTION_CTL_OFF 0 ++#define WLC_PROTECTION_CTL_LOCAL 1 ++#define WLC_PROTECTION_CTL_OVERLAP 2 ++ ++/* values for n_protection */ ++#define WLC_N_PROTECTION_OFF 0 ++#define WLC_N_PROTECTION_OPTIONAL 1 ++#define WLC_N_PROTECTION_20IN40 2 ++#define WLC_N_PROTECTION_MIXEDMODE 3 ++ ++/* values for n_preamble_type */ ++#define WLC_N_PREAMBLE_MIXEDMODE 0 ++#define WLC_N_PREAMBLE_GF 1 ++#define WLC_N_PREAMBLE_GF_BRCM 2 ++ ++/* values for band specific 40MHz capabilities (deprecated) */ ++#define WLC_N_BW_20ALL 0 ++#define WLC_N_BW_40ALL 1 ++#define WLC_N_BW_20IN2G_40IN5G 2 ++ ++#define WLC_BW_20MHZ_BIT (1<<0) ++#define WLC_BW_40MHZ_BIT (1<<1) ++#define WLC_BW_80MHZ_BIT (1<<2) ++ ++/* Bandwidth capabilities */ ++#define WLC_BW_CAP_20MHZ (WLC_BW_20MHZ_BIT) ++#define WLC_BW_CAP_40MHZ (WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT) ++#define WLC_BW_CAP_80MHZ (WLC_BW_80MHZ_BIT|WLC_BW_40MHZ_BIT|WLC_BW_20MHZ_BIT) ++#define WLC_BW_CAP_UNRESTRICTED 0xFF ++ ++#define WL_BW_CAP_20MHZ(bw_cap) (((bw_cap) & WLC_BW_20MHZ_BIT) ? TRUE : FALSE) ++#define WL_BW_CAP_40MHZ(bw_cap) (((bw_cap) & WLC_BW_40MHZ_BIT) ? TRUE : FALSE) ++#define WL_BW_CAP_80MHZ(bw_cap) (((bw_cap) & WLC_BW_80MHZ_BIT) ? TRUE : FALSE) ++ ++/* values to force tx/rx chain */ ++#define WLC_N_TXRX_CHAIN0 0 ++#define WLC_N_TXRX_CHAIN1 1 ++ ++/* bitflags for SGI support (sgi_rx iovar) */ ++#define WLC_N_SGI_20 0x01 ++#define WLC_N_SGI_40 0x02 ++ ++/* when sgi_tx==WLC_SGI_ALL, bypass rate selection, enable sgi for all mcs */ ++#define WLC_SGI_ALL 0x02 ++ ++/* Values for PM */ ++#define PM_OFF 0 ++#define PM_MAX 1 ++#define PM_FAST 2 ++#define PM_FORCE_OFF 3 /* use this bit to force PM off even bt is active */ ++ ++#define LISTEN_INTERVAL 10 ++/* interference mitigation options */ ++#define INTERFERE_OVRRIDE_OFF -1 /* interference override off */ ++#define INTERFERE_NONE 0 /* off */ ++#define NON_WLAN 1 /* foreign/non 802.11 interference, no auto detect */ ++#define WLAN_MANUAL 2 /* ACI: no auto detection */ ++#define WLAN_AUTO 3 /* ACI: auto detect */ ++#define WLAN_AUTO_W_NOISE 4 /* ACI: auto - detect and non 802.11 interference */ ++#define AUTO_ACTIVE (1 << 7) /* Auto is currently active */ ++ ++typedef struct wl_aci_args { ++ int enter_aci_thresh; /* Trigger level to start detecting ACI */ ++ int exit_aci_thresh; /* Trigger level to exit ACI mode */ ++ int usec_spin; /* microsecs to delay between rssi samples */ ++ int glitch_delay; /* interval between ACI scans when glitch count is consistently high */ ++ uint16 nphy_adcpwr_enter_thresh; /* ADC power to enter ACI mitigation mode */ ++ uint16 nphy_adcpwr_exit_thresh; /* ADC power to exit ACI mitigation mode */ ++ uint16 nphy_repeat_ctr; /* Number of tries per channel to compute power */ ++ uint16 nphy_num_samples; /* Number of samples to compute power on one channel */ ++ uint16 nphy_undetect_window_sz; /* num of undetects to exit ACI Mitigation mode */ ++ uint16 nphy_b_energy_lo_aci; /* low ACI power energy threshold for bphy */ ++ uint16 nphy_b_energy_md_aci; /* mid ACI power energy threshold for bphy */ ++ uint16 nphy_b_energy_hi_aci; /* high ACI power energy threshold for bphy */ ++ uint16 nphy_noise_noassoc_glitch_th_up; /* wl interference 4 */ ++ uint16 nphy_noise_noassoc_glitch_th_dn; ++ uint16 nphy_noise_assoc_glitch_th_up; ++ uint16 nphy_noise_assoc_glitch_th_dn; ++ uint16 nphy_noise_assoc_aci_glitch_th_up; ++ uint16 nphy_noise_assoc_aci_glitch_th_dn; ++ uint16 nphy_noise_assoc_enter_th; ++ uint16 nphy_noise_noassoc_enter_th; ++ uint16 nphy_noise_assoc_rx_glitch_badplcp_enter_th; ++ uint16 nphy_noise_noassoc_crsidx_incr; ++ uint16 nphy_noise_assoc_crsidx_incr; ++ uint16 nphy_noise_crsidx_decr; ++} wl_aci_args_t; ++ ++#define TRIGGER_NOW 0 ++#define TRIGGER_CRS 0x01 ++#define TRIGGER_CRSDEASSERT 0x02 ++#define TRIGGER_GOODFCS 0x04 ++#define TRIGGER_BADFCS 0x08 ++#define TRIGGER_BADPLCP 0x10 ++#define TRIGGER_CRSGLITCH 0x20 ++#define WL_ACI_ARGS_LEGACY_LENGTH 16 /* bytes of pre NPHY aci args */ ++#define WL_SAMPLECOLLECT_T_VERSION 2 /* version of wl_samplecollect_args_t struct */ ++typedef struct wl_samplecollect_args { ++ /* version 0 fields */ ++ uint8 coll_us; ++ int cores; ++ /* add'l version 1 fields */ ++ uint16 version; /* see definition of WL_SAMPLECOLLECT_T_VERSION */ ++ uint16 length; /* length of entire structure */ ++ int8 trigger; ++ uint16 timeout; ++ uint16 mode; ++ uint32 pre_dur; ++ uint32 post_dur; ++ uint8 gpio_sel; ++ bool downsamp; ++ bool be_deaf; ++ bool agc; /* loop from init gain and going down */ ++ bool filter; /* override high pass corners to lowest */ ++ /* add'l version 2 fields */ ++ uint8 trigger_state; ++ uint8 module_sel1; ++ uint8 module_sel2; ++ uint16 nsamps; ++} wl_samplecollect_args_t; ++ ++#define WL_SAMPLEDATA_HEADER_TYPE 1 ++#define WL_SAMPLEDATA_HEADER_SIZE 80 /* sample collect header size (bytes) */ ++#define WL_SAMPLEDATA_TYPE 2 ++#define WL_SAMPLEDATA_SEQ 0xff /* sequence # */ ++#define WL_SAMPLEDATA_MORE_DATA 0x100 /* more data mask */ ++#define WL_SAMPLEDATA_T_VERSION 1 /* version of wl_samplecollect_args_t struct */ ++/* version for unpacked sample data, int16 {(I,Q),Core(0..N)} */ ++#define WL_SAMPLEDATA_T_VERSION_SPEC_AN 2 ++ ++typedef struct wl_sampledata { ++ uint16 version; /* structure version */ ++ uint16 size; /* size of structure */ ++ uint16 tag; /* Header/Data */ ++ uint16 length; /* data length */ ++ uint32 flag; /* bit def */ ++} wl_sampledata_t; ++ ++#ifndef LINUX_POSTMOGRIFY_REMOVAL ++/* wl_radar_args_t */ ++typedef struct { ++ int npulses; /* required number of pulses at n * t_int */ ++ int ncontig; /* required number of pulses at t_int */ ++ int min_pw; /* minimum pulse width (20 MHz clocks) */ ++ int max_pw; /* maximum pulse width (20 MHz clocks) */ ++ uint16 thresh0; /* Radar detection, thresh 0 */ ++ uint16 thresh1; /* Radar detection, thresh 1 */ ++ uint16 blank; /* Radar detection, blank control */ ++ uint16 fmdemodcfg; /* Radar detection, fmdemod config */ ++ int npulses_lp; /* Radar detection, minimum long pulses */ ++ int min_pw_lp; /* Minimum pulsewidth for long pulses */ ++ int max_pw_lp; /* Maximum pulsewidth for long pulses */ ++ int min_fm_lp; /* Minimum fm for long pulses */ ++ int max_span_lp; /* Maximum deltat for long pulses */ ++ int min_deltat; /* Minimum spacing between pulses */ ++ int max_deltat; /* Maximum spacing between pulses */ ++ uint16 autocorr; /* Radar detection, autocorr on or off */ ++ uint16 st_level_time; /* Radar detection, start_timing level */ ++ uint16 t2_min; /* minimum clocks needed to remain in state 2 */ ++ uint32 version; /* version */ ++ uint32 fra_pulse_err; /* sample error margin for detecting French radar pulsed */ ++ int npulses_fra; /* Radar detection, minimum French pulses set */ ++ int npulses_stg2; /* Radar detection, minimum staggered-2 pulses set */ ++ int npulses_stg3; /* Radar detection, minimum staggered-3 pulses set */ ++ uint16 percal_mask; /* defines which period cal is masked from radar detection */ ++ int quant; /* quantization resolution to pulse positions */ ++ uint32 min_burst_intv_lp; /* minimum burst to burst interval for bin3 radar */ ++ uint32 max_burst_intv_lp; /* maximum burst to burst interval for bin3 radar */ ++ int nskip_rst_lp; /* number of skipped pulses before resetting lp buffer */ ++ int max_pw_tol; /* maximum tollerance allowed in detected pulse width for radar detection */ ++ uint16 feature_mask; /* 16-bit mask to specify enabled features */ ++} wl_radar_args_t; ++ ++#define WL_RADAR_ARGS_VERSION 2 ++ ++typedef struct { ++ uint32 version; /* version */ ++ uint16 thresh0_20_lo; /* Radar detection, thresh 0 (range 5250-5350MHz) for BW 20MHz */ ++ uint16 thresh1_20_lo; /* Radar detection, thresh 1 (range 5250-5350MHz) for BW 20MHz */ ++ uint16 thresh0_40_lo; /* Radar detection, thresh 0 (range 5250-5350MHz) for BW 40MHz */ ++ uint16 thresh1_40_lo; /* Radar detection, thresh 1 (range 5250-5350MHz) for BW 40MHz */ ++ uint16 thresh0_80_lo; /* Radar detection, thresh 0 (range 5250-5350MHz) for BW 80MHz */ ++ uint16 thresh1_80_lo; /* Radar detection, thresh 1 (range 5250-5350MHz) for BW 80MHz */ ++ uint16 thresh0_160_lo; /* Radar detection, thresh 0 (range 5250-5350MHz) for BW 160MHz */ ++ uint16 thresh1_160_lo; /* Radar detection, thresh 1 (range 5250-5350MHz) for BW 160MHz */ ++ uint16 thresh0_20_hi; /* Radar detection, thresh 0 (range 5470-5725MHz) for BW 20MHz */ ++ uint16 thresh1_20_hi; /* Radar detection, thresh 1 (range 5470-5725MHz) for BW 20MHz */ ++ uint16 thresh0_40_hi; /* Radar detection, thresh 0 (range 5470-5725MHz) for BW 40MHz */ ++ uint16 thresh1_40_hi; /* Radar detection, thresh 1 (range 5470-5725MHz) for BW 40MHz */ ++ uint16 thresh0_80_hi; /* Radar detection, thresh 0 (range 5470-5725MHz) for BW 80MHz */ ++ uint16 thresh1_80_hi; /* Radar detection, thresh 1 (range 5470-5725MHz) for BW 80MHz */ ++ uint16 thresh0_160_hi; /* Radar detection, thresh 0 (range 5470-5725MHz) for BW 160MHz */ ++ uint16 thresh1_160_hi; /* Radar detection, thresh 1 (range 5470-5725MHz) for BW 160MHz */ ++} wl_radar_thr_t; ++ ++#define WL_RADAR_THR_VERSION 2 ++#define WL_THRESHOLD_LO_BAND 70 /* range from 5250MHz - 5350MHz */ ++ ++/* radar iovar SET defines */ ++#define WL_RADAR_DETECTOR_OFF 0 /* radar detector off */ ++#define WL_RADAR_DETECTOR_ON 1 /* radar detector on */ ++#define WL_RADAR_SIMULATED 2 /* force radar detector to declare ++ * detection once ++ */ ++#define WL_RSSI_ANT_VERSION 1 /* current version of wl_rssi_ant_t */ ++#define WL_ANT_RX_MAX 2 /* max 2 receive antennas */ ++#define WL_ANT_HT_RX_MAX 3 /* max 3 receive antennas/cores */ ++#define WL_ANT_IDX_1 0 /* antenna index 1 */ ++#define WL_ANT_IDX_2 1 /* antenna index 2 */ ++ ++#ifndef WL_RSSI_ANT_MAX ++#define WL_RSSI_ANT_MAX 4 /* max possible rx antennas */ ++#elif WL_RSSI_ANT_MAX != 4 ++#error "WL_RSSI_ANT_MAX does not match" ++#endif ++ ++/* RSSI per antenna */ ++typedef struct { ++ uint32 version; /* version field */ ++ uint32 count; /* number of valid antenna rssi */ ++ int8 rssi_ant[WL_RSSI_ANT_MAX]; /* rssi per antenna */ ++} wl_rssi_ant_t; ++ ++/* dfs_status iovar-related defines */ ++ ++/* cac - channel availability check, ++ * ism - in-service monitoring ++ * csa - channel switching announcement ++ */ ++ ++/* cac state values */ ++#define WL_DFS_CACSTATE_IDLE 0 /* state for operating in non-radar channel */ ++#define WL_DFS_CACSTATE_PREISM_CAC 1 /* CAC in progress */ ++#define WL_DFS_CACSTATE_ISM 2 /* ISM in progress */ ++#define WL_DFS_CACSTATE_CSA 3 /* csa */ ++#define WL_DFS_CACSTATE_POSTISM_CAC 4 /* ISM CAC */ ++#define WL_DFS_CACSTATE_PREISM_OOC 5 /* PREISM OOC */ ++#define WL_DFS_CACSTATE_POSTISM_OOC 6 /* POSTISM OOC */ ++#define WL_DFS_CACSTATES 7 /* this many states exist */ ++ ++/* data structure used in 'dfs_status' wl interface, which is used to query dfs status */ ++typedef struct { ++ uint state; /* noted by WL_DFS_CACSTATE_XX. */ ++ uint duration; /* time spent in ms in state. */ ++ /* as dfs enters ISM state, it removes the operational channel from quiet channel ++ * list and notes the channel in channel_cleared. set to 0 if no channel is cleared ++ */ ++ chanspec_t chanspec_cleared; ++ /* chanspec cleared used to be a uint, add another to uint16 to maintain size */ ++ uint16 pad; ++} wl_dfs_status_t; ++ ++#define NUM_PWRCTRL_RATES 12 ++ ++typedef struct { ++ uint8 txpwr_band_max[NUM_PWRCTRL_RATES]; /* User set target */ ++ uint8 txpwr_limit[NUM_PWRCTRL_RATES]; /* reg and local power limit */ ++ uint8 txpwr_local_max; /* local max according to the AP */ ++ uint8 txpwr_local_constraint; /* local constraint according to the AP */ ++ uint8 txpwr_chan_reg_max; /* Regulatory max for this channel */ ++ uint8 txpwr_target[2][NUM_PWRCTRL_RATES]; /* Latest target for 2.4 and 5 Ghz */ ++ uint8 txpwr_est_Pout[2]; /* Latest estimate for 2.4 and 5 Ghz */ ++ uint8 txpwr_opo[NUM_PWRCTRL_RATES]; /* On G phy, OFDM power offset */ ++ uint8 txpwr_bphy_cck_max[NUM_PWRCTRL_RATES]; /* Max CCK power for this band (SROM) */ ++ uint8 txpwr_bphy_ofdm_max; /* Max OFDM power for this band (SROM) */ ++ uint8 txpwr_aphy_max[NUM_PWRCTRL_RATES]; /* Max power for A band (SROM) */ ++ int8 txpwr_antgain[2]; /* Ant gain for each band - from SROM */ ++ uint8 txpwr_est_Pout_gofdm; /* Pwr estimate for 2.4 OFDM */ ++} tx_power_legacy_t; ++ ++#define WL_TX_POWER_RATES_LEGACY 45 ++#define WL_TX_POWER_MCS20_FIRST 12 ++#define WL_TX_POWER_MCS20_NUM 16 ++#define WL_TX_POWER_MCS40_FIRST 28 ++#define WL_TX_POWER_MCS40_NUM 17 ++ ++typedef struct { ++ uint32 flags; ++ chanspec_t chanspec; /* txpwr report for this channel */ ++ chanspec_t local_chanspec; /* channel on which we are associated */ ++ uint8 local_max; /* local max according to the AP */ ++ uint8 local_constraint; /* local constraint according to the AP */ ++ int8 antgain[2]; /* Ant gain for each band - from SROM */ ++ uint8 rf_cores; /* count of RF Cores being reported */ ++ uint8 est_Pout[4]; /* Latest tx power out estimate per RF ++ * chain without adjustment ++ */ ++ uint8 est_Pout_cck; /* Latest CCK tx power out estimate */ ++ uint8 user_limit[WL_TX_POWER_RATES_LEGACY]; /* User limit */ ++ uint8 reg_limit[WL_TX_POWER_RATES_LEGACY]; /* Regulatory power limit */ ++ uint8 board_limit[WL_TX_POWER_RATES_LEGACY]; /* Max power board can support (SROM) */ ++ uint8 target[WL_TX_POWER_RATES_LEGACY]; /* Latest target power */ ++} tx_power_legacy2_t; ++ ++/* TX Power index defines */ ++#define WL_NUM_RATES_CCK 4 /* 1, 2, 5.5, 11 Mbps */ ++#define WL_NUM_RATES_OFDM 8 /* 6, 9, 12, 18, 24, 36, 48, 54 Mbps SISO/CDD */ ++#define WL_NUM_RATES_MCS_1STREAM 8 /* MCS 0-7 1-stream rates - SISO/CDD/STBC/MCS */ ++#define WL_NUM_RATES_EXTRA_VHT 2 /* Additional VHT 11AC rates */ ++#define WL_NUM_RATES_VHT 10 ++#define WL_NUM_RATES_MCS32 1 ++ ++#define WLC_NUM_RATES_CCK WL_NUM_RATES_CCK ++#define WLC_NUM_RATES_OFDM WL_NUM_RATES_OFDM ++#define WLC_NUM_RATES_MCS_1_STREAM WL_NUM_RATES_MCS_1STREAM ++#define WLC_NUM_RATES_MCS_2_STREAM WL_NUM_RATES_MCS_1STREAM ++#define WLC_NUM_RATES_MCS32 WL_NUM_RATES_MCS32 ++#define WL_TX_POWER_CCK_NUM WL_NUM_RATES_CCK ++#define WL_TX_POWER_OFDM_NUM WL_NUM_RATES_OFDM ++#define WL_TX_POWER_MCS_1_STREAM_NUM WL_NUM_RATES_MCS_1STREAM ++#define WL_TX_POWER_MCS_2_STREAM_NUM WL_NUM_RATES_MCS_1STREAM ++#define WL_TX_POWER_MCS_32_NUM WL_NUM_RATES_MCS32 ++ ++#define WL_NUM_2x2_ELEMENTS 4 ++#define WL_NUM_3x3_ELEMENTS 6 ++ ++typedef struct txppr { ++ /* start of 20MHz tx power limits */ ++ uint8 b20_1x1dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */ ++ uint8 b20_1x1ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM transmission */ ++ uint8 b20_1x1mcs0[WL_NUM_RATES_MCS_1STREAM]; /* SISO MCS 0-7 */ ++ ++ uint8 b20_1x2dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */ ++ uint8 b20_1x2cdd_ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM CDD transmission */ ++ uint8 b20_1x2cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* CDD MCS 0-7 */ ++ uint8 b20_2x2stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */ ++ uint8 b20_2x2sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* MCS 8-15 */ ++ ++ uint8 b20_1x3dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */ ++ uint8 b20_1x3cdd_ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM CDD transmission */ ++ uint8 b20_1x3cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* 1 Nsts to 3 Tx Chain */ ++ uint8 b20_2x3stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */ ++ uint8 b20_2x3sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* 2 Nsts to 3 Tx Chain */ ++ uint8 b20_3x3sdm_mcs16[WL_NUM_RATES_MCS_1STREAM]; /* 3 Nsts to 3 Tx Chain */ ++ ++ uint8 b20_1x1vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1 */ ++ uint8 b20_1x2cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD1 */ ++ uint8 b20_2x2stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC */ ++ uint8 b20_2x2sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2 */ ++ uint8 b20_1x3cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD2 */ ++ uint8 b20_2x3stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC_SPEXP1 */ ++ uint8 b20_2x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2_SPEXP1 */ ++ uint8 b20_3x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS3 */ ++ ++ /* start of 40MHz tx power limits */ ++ uint8 b40_dummy1x1dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */ ++ uint8 b40_1x1ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM transmission */ ++ uint8 b40_1x1mcs0[WL_NUM_RATES_MCS_1STREAM]; /* SISO MCS 0-7 */ ++ ++ uint8 b40_dummy1x2dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */ ++ uint8 b40_1x2cdd_ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM CDD transmission */ ++ uint8 b40_1x2cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* CDD MCS 0-7 */ ++ uint8 b40_2x2stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */ ++ uint8 b40_2x2sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* MCS 8-15 */ ++ ++ uint8 b40_dummy1x3dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */ ++ uint8 b40_1x3cdd_ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM CDD transmission */ ++ uint8 b40_1x3cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* 1 Nsts to 3 Tx Chain */ ++ uint8 b40_2x3stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */ ++ uint8 b40_2x3sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* 2 Nsts to 3 Tx Chain */ ++ uint8 b40_3x3sdm_mcs16[WL_NUM_RATES_MCS_1STREAM]; /* 3 Nsts to 3 Tx Chain */ ++ ++ uint8 b40_1x1vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1 */ ++ uint8 b40_1x2cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD1 */ ++ uint8 b40_2x2stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC */ ++ uint8 b40_2x2sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2 */ ++ uint8 b40_1x3cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD2 */ ++ uint8 b40_2x3stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC_SPEXP1 */ ++ uint8 b40_2x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2_SPEXP1 */ ++ uint8 b40_3x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS3 */ ++ ++ /* start of 20in40MHz tx power limits */ ++ uint8 b20in40_1x1dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */ ++ uint8 b20in40_1x1ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM transmission */ ++ uint8 b20in40_1x1mcs0[WL_NUM_RATES_MCS_1STREAM]; /* SISO MCS 0-7 */ ++ ++ uint8 b20in40_1x2dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */ ++ uint8 b20in40_1x2cdd_ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM CDD transmission */ ++ uint8 b20in40_1x2cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* CDD MCS 0-7 */ ++ uint8 b20in40_2x2stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */ ++ uint8 b20in40_2x2sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* MCS 8-15 */ ++ ++ uint8 b20in40_1x3dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */ ++ uint8 b20in40_1x3cdd_ofdm[WL_NUM_RATES_OFDM]; /* 20 in 40 MHz Legacy OFDM CDD */ ++ uint8 b20in40_1x3cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* 1 Nsts to 3 Tx Chain */ ++ uint8 b20in40_2x3stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */ ++ uint8 b20in40_2x3sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* 2 Nsts to 3 Tx Chain */ ++ uint8 b20in40_3x3sdm_mcs16[WL_NUM_RATES_MCS_1STREAM]; /* 3 Nsts to 3 Tx Chain */ ++ ++ uint8 b20in40_1x1vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1 */ ++ uint8 b20in40_1x2cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD1 */ ++ uint8 b20in40_2x2stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC */ ++ uint8 b20in40_2x2sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2 */ ++ uint8 b20in40_1x3cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD2 */ ++ uint8 b20in40_2x3stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC_SPEXP1 */ ++ uint8 b20in40_2x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2_SPEXP1 */ ++ uint8 b20in40_3x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS3 */ ++ ++ /* start of 80MHz tx power limits */ ++ uint8 b80_dummy1x1dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */ ++ uint8 b80_1x1ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM transmission */ ++ uint8 b80_1x1mcs0[WL_NUM_RATES_MCS_1STREAM]; /* SISO MCS 0-7 */ ++ ++ uint8 b80_dummy1x2dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */ ++ uint8 b80_1x2cdd_ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM CDD transmission */ ++ uint8 b80_1x2cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* CDD MCS 0-7 */ ++ uint8 b80_2x2stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */ ++ uint8 b80_2x2sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* MCS 8-15 */ ++ ++ uint8 b80_dummy1x3dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */ ++ uint8 b80_1x3cdd_ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM CDD transmission */ ++ uint8 b80_1x3cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* 1 Nsts to 3 Tx Chain */ ++ uint8 b80_2x3stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */ ++ uint8 b80_2x3sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* 2 Nsts to 3 Tx Chain */ ++ uint8 b80_3x3sdm_mcs16[WL_NUM_RATES_MCS_1STREAM]; /* 3 Nsts to 3 Tx Chain */ ++ ++ uint8 b80_1x1vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1 */ ++ uint8 b80_1x2cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD1 */ ++ uint8 b80_2x2stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC */ ++ uint8 b80_2x2sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2 */ ++ uint8 b80_1x3cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD2 */ ++ uint8 b80_2x3stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC_SPEXP1 */ ++ uint8 b80_2x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2_SPEXP1 */ ++ uint8 b80_3x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS3 */ ++ ++ /* start of 20in80MHz tx power limits */ ++ uint8 b20in80_1x1dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */ ++ uint8 b20in80_1x1ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM transmission */ ++ uint8 b20in80_1x1mcs0[WL_NUM_RATES_MCS_1STREAM]; /* SISO MCS 0-7 */ ++ ++ uint8 b20in80_1x2dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */ ++ uint8 b20in80_1x2cdd_ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM CDD transmission */ ++ uint8 b20in80_1x2cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* CDD MCS 0-7 */ ++ uint8 b20in80_2x2stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */ ++ uint8 b20in80_2x2sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* MCS 8-15 */ ++ ++ uint8 b20in80_1x3dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */ ++ uint8 b20in80_1x3cdd_ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM CDD transmission */ ++ uint8 b20in80_1x3cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* 1 Nsts to 3 Tx Chain */ ++ uint8 b20in80_2x3stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */ ++ uint8 b20in80_2x3sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* 2 Nsts to 3 Tx Chain */ ++ uint8 b20in80_3x3sdm_mcs16[WL_NUM_RATES_MCS_1STREAM]; /* 3 Nsts to 3 Tx Chain */ ++ ++ uint8 b20in80_1x1vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1 */ ++ uint8 b20in80_1x2cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD1 */ ++ uint8 b20in80_2x2stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC */ ++ uint8 b20in80_2x2sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2 */ ++ uint8 b20in80_1x3cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD2 */ ++ uint8 b20in80_2x3stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC_SPEXP1 */ ++ uint8 b20in80_2x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2_SPEXP1 */ ++ uint8 b20in80_3x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS3 */ ++ ++ /* start of 40in80MHz tx power limits */ ++ uint8 b40in80_dummy1x1dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */ ++ uint8 b40in80_1x1ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM transmission */ ++ uint8 b40in80_1x1mcs0[WL_NUM_RATES_MCS_1STREAM]; /* SISO MCS 0-7 */ ++ ++ uint8 b40in80_dummy1x2dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */ ++ uint8 b40in80_1x2cdd_ofdm[WL_NUM_RATES_OFDM]; /* Legacy OFDM CDD transmission */ ++ uint8 b40in80_1x2cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* CDD MCS 0-7 */ ++ uint8 b40in80_2x2stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */ ++ uint8 b40in80_2x2sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* MCS 8-15 */ ++ ++ uint8 b40in80_dummy1x3dsss[WL_NUM_RATES_CCK]; /* Legacy CCK/DSSS */ ++ uint8 b40in80_1x3cdd_ofdm[WL_NUM_RATES_OFDM]; /* MHz Legacy OFDM CDD */ ++ uint8 b40in80_1x3cdd_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* 1 Nsts to 3 Tx Chain */ ++ uint8 b40in80_2x3stbc_mcs0[WL_NUM_RATES_MCS_1STREAM]; /* STBC MCS 0-7 */ ++ uint8 b40in80_2x3sdm_mcs8[WL_NUM_RATES_MCS_1STREAM]; /* 2 Nsts to 3 Tx Chain */ ++ uint8 b40in80_3x3sdm_mcs16[WL_NUM_RATES_MCS_1STREAM]; /* 3 Nsts to 3 Tx Chain */ ++ ++ uint8 b40in80_1x1vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1 */ ++ uint8 b40in80_1x2cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD1 */ ++ uint8 b40in80_2x2stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC */ ++ uint8 b40in80_2x2sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2 */ ++ uint8 b40in80_1x3cdd_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_CDD2 */ ++ uint8 b40in80_2x3stbc_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS1_STBC_SPEXP1 */ ++ uint8 b40in80_2x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS2_SPEXP1 */ ++ uint8 b40in80_3x3sdm_vht[WL_NUM_RATES_EXTRA_VHT]; /* VHT8_9SS3 */ ++ ++ uint8 mcs32; /* C_CHECK - THIS NEEDS TO BE REMOVED THROUGHOUT THE CODE */ ++} txppr_t; ++ ++/* 20MHz */ ++#define WL_TX_POWER_CCK_FIRST OFFSETOF(txppr_t, b20_1x1dsss) ++#define WL_TX_POWER_OFDM20_FIRST OFFSETOF(txppr_t, b20_1x1ofdm) ++#define WL_TX_POWER_MCS20_SISO_FIRST OFFSETOF(txppr_t, b20_1x1mcs0) ++#define WL_TX_POWER_20_S1x1_FIRST OFFSETOF(txppr_t, b20_1x1mcs0) ++ ++#define WL_TX_POWER_CCK_CDD_S1x2_FIRST OFFSETOF(txppr_t, b20_1x2dsss) ++#define WL_TX_POWER_OFDM20_CDD_FIRST OFFSETOF(txppr_t, b20_1x2cdd_ofdm) ++#define WL_TX_POWER_MCS20_CDD_FIRST OFFSETOF(txppr_t, b20_1x2cdd_mcs0) ++#define WL_TX_POWER_20_S1x2_FIRST OFFSETOF(txppr_t, b20_1x2cdd_mcs0) ++#define WL_TX_POWER_MCS20_STBC_FIRST OFFSETOF(txppr_t, b20_2x2stbc_mcs0) ++#define WL_TX_POWER_MCS20_SDM_FIRST OFFSETOF(txppr_t, b20_2x2sdm_mcs8) ++#define WL_TX_POWER_20_S2x2_FIRST OFFSETOF(txppr_t, b20_2x2sdm_mcs8) ++ ++#define WL_TX_POWER_CCK_CDD_S1x3_FIRST OFFSETOF(txppr_t, b20_1x3dsss) ++#define WL_TX_POWER_OFDM20_CDD_S1x3_FIRST OFFSETOF(txppr_t, b20_1x3cdd_ofdm) ++#define WL_TX_POWER_20_S1x3_FIRST OFFSETOF(txppr_t, b20_1x3cdd_mcs0) ++#define WL_TX_POWER_20_STBC_S2x3_FIRST OFFSETOF(txppr_t, b20_2x3stbc_mcs0) ++#define WL_TX_POWER_20_S2x3_FIRST OFFSETOF(txppr_t, b20_2x3sdm_mcs8) ++#define WL_TX_POWER_20_S3x3_FIRST OFFSETOF(txppr_t, b20_3x3sdm_mcs16) ++ ++#define WL_TX_POWER_20_S1X1_VHT OFFSETOF(txppr_t, b20_1x1vht) ++#define WL_TX_POWER_20_S1X2_CDD_VHT OFFSETOF(txppr_t, b20_1x2cdd_vht) ++#define WL_TX_POWER_20_S2X2_STBC_VHT OFFSETOF(txppr_t, b20_2x2stbc_vht) ++#define WL_TX_POWER_20_S2X2_VHT OFFSETOF(txppr_t, b20_2x2sdm_vht) ++#define WL_TX_POWER_20_S1X3_CDD_VHT OFFSETOF(txppr_t, b20_1x3cdd_vht) ++#define WL_TX_POWER_20_S2X3_STBC_VHT OFFSETOF(txppr_t, b20_2x3stbc_vht) ++#define WL_TX_POWER_20_S2X3_VHT OFFSETOF(txppr_t, b20_2x3sdm_vht) ++#define WL_TX_POWER_20_S3X3_VHT OFFSETOF(txppr_t, b20_3x3sdm_vht) ++ ++/* 40MHz */ ++#define WL_TX_POWER_40_DUMMY_CCK_FIRST OFFSETOF(txppr_t, b40_dummy1x1dsss) ++#define WL_TX_POWER_OFDM40_FIRST OFFSETOF(txppr_t, b40_1x1ofdm) ++#define WL_TX_POWER_MCS40_SISO_FIRST OFFSETOF(txppr_t, b40_1x1mcs0) ++#define WL_TX_POWER_40_S1x1_FIRST OFFSETOF(txppr_t, b40_1x1mcs0) ++ ++#define WL_TX_POWER_40_DUMMY_CCK_CDD_S1x2_FIRST OFFSETOF(txppr_t, b40_dummy1x2dsss) ++#define WL_TX_POWER_OFDM40_CDD_FIRST OFFSETOF(txppr_t, b40_1x2cdd_ofdm) ++#define WL_TX_POWER_MCS40_CDD_FIRST OFFSETOF(txppr_t, b40_1x2cdd_mcs0) ++#define WL_TX_POWER_40_S1x2_FIRST OFFSETOF(txppr_t, b40_1x2cdd_mcs0) ++#define WL_TX_POWER_MCS40_STBC_FIRST OFFSETOF(txppr_t, b40_2x2stbc_mcs0) ++#define WL_TX_POWER_MCS40_SDM_FIRST OFFSETOF(txppr_t, b40_2x2sdm_mcs8) ++#define WL_TX_POWER_40_S2x2_FIRST OFFSETOF(txppr_t, b40_2x2sdm_mcs8) ++ ++#define WL_TX_POWER_40_DUMMY_CCK_CDD_S1x3_FIRST OFFSETOF(txppr_t, b40_dummy1x3dsss) ++#define WL_TX_POWER_OFDM40_CDD_S1x3_FIRST OFFSETOF(txppr_t, b40_1x3cdd_ofdm) ++#define WL_TX_POWER_40_S1x3_FIRST OFFSETOF(txppr_t, b40_1x3cdd_mcs0) ++#define WL_TX_POWER_40_STBC_S2x3_FIRST OFFSETOF(txppr_t, b40_2x3stbc_mcs0) ++#define WL_TX_POWER_40_S2x3_FIRST OFFSETOF(txppr_t, b40_2x3sdm_mcs8) ++#define WL_TX_POWER_40_S3x3_FIRST OFFSETOF(txppr_t, b40_3x3sdm_mcs16) ++ ++#define WL_TX_POWER_40_S1X1_VHT OFFSETOF(txppr_t, b40_1x1vht) ++#define WL_TX_POWER_40_S1X2_CDD_VHT OFFSETOF(txppr_t, b40_1x2cdd_vht) ++#define WL_TX_POWER_40_S2X2_STBC_VHT OFFSETOF(txppr_t, b40_2x2stbc_vht) ++#define WL_TX_POWER_40_S2X2_VHT OFFSETOF(txppr_t, b40_2x2sdm_vht) ++#define WL_TX_POWER_40_S1X3_CDD_VHT OFFSETOF(txppr_t, b40_1x3cdd_vht) ++#define WL_TX_POWER_40_S2X3_STBC_VHT OFFSETOF(txppr_t, b40_2x3stbc_vht) ++#define WL_TX_POWER_40_S2X3_VHT OFFSETOF(txppr_t, b40_2x3sdm_vht) ++#define WL_TX_POWER_40_S3X3_VHT OFFSETOF(txppr_t, b40_3x3sdm_vht) ++ ++/* 20 in 40MHz */ ++#define WL_TX_POWER_20UL_CCK_FIRST OFFSETOF(txppr_t, b20in40_1x1dsss) ++#define WL_TX_POWER_20UL_OFDM_FIRST OFFSETOF(txppr_t, b20in40_1x1ofdm) ++#define WL_TX_POWER_20UL_S1x1_FIRST OFFSETOF(txppr_t, b20in40_1x1mcs0) ++ ++#define WL_TX_POWER_CCK_20U_CDD_S1x2_FIRST OFFSETOF(txppr_t, b20in40_1x2dsss) ++#define WL_TX_POWER_20UL_OFDM_CDD_FIRST OFFSETOF(txppr_t, b20in40_1x2cdd_ofdm) ++#define WL_TX_POWER_20UL_S1x2_FIRST OFFSETOF(txppr_t, b20in40_1x2cdd_mcs0) ++#define WL_TX_POWER_20UL_STBC_S2x2_FIRST OFFSETOF(txppr_t, b20in40_2x2stbc_mcs0) ++#define WL_TX_POWER_20UL_S2x2_FIRST OFFSETOF(txppr_t, b20in40_2x2sdm_mcs8) ++ ++#define WL_TX_POWER_CCK_20U_CDD_S1x3_FIRST OFFSETOF(txppr_t, b20in40_1x3dsss) ++#define WL_TX_POWER_20UL_OFDM_CDD_S1x3_FIRST OFFSETOF(txppr_t, b20in40_1x3cdd_ofdm) ++#define WL_TX_POWER_20UL_S1x3_FIRST OFFSETOF(txppr_t, b20in40_1x3cdd_mcs0) ++#define WL_TX_POWER_20UL_STBC_S2x3_FIRST OFFSETOF(txppr_t, b20in40_2x3stbc_mcs0) ++#define WL_TX_POWER_20UL_S2x3_FIRST OFFSETOF(txppr_t, b20in40_2x3sdm_mcs8) ++#define WL_TX_POWER_20UL_S3x3_FIRST OFFSETOF(txppr_t, b20in40_3x3sdm_mcs16) ++ ++#define WL_TX_POWER_20UL_S1X1_VHT OFFSETOF(txppr_t, b20in40_1x1vht) ++#define WL_TX_POWER_20UL_S1X2_CDD_VHT OFFSETOF(txppr_t, b20in40_1x2cdd_vht) ++#define WL_TX_POWER_20UL_S2X2_STBC_VHT OFFSETOF(txppr_t, b20in40_2x2stbc_vht) ++#define WL_TX_POWER_20UL_S2X2_VHT OFFSETOF(txppr_t, b20in40_2x2sdm_vht) ++#define WL_TX_POWER_20UL_S1X3_CDD_VHT OFFSETOF(txppr_t, b20in40_1x3cdd_vht) ++#define WL_TX_POWER_20UL_S2X3_STBC_VHT OFFSETOF(txppr_t, b20in40_2x3stbc_vht) ++#define WL_TX_POWER_20UL_S2X3_VHT OFFSETOF(txppr_t, b20in40_2x3sdm_vht) ++#define WL_TX_POWER_20UL_S3X3_VHT OFFSETOF(txppr_t, b20in40_3x3sdm_vht) ++ ++/* 80MHz */ ++#define WL_TX_POWER_80_DUMMY_CCK_FIRST OFFSETOF(txppr_t, b80_dummy1x1dsss) ++#define WL_TX_POWER_OFDM80_FIRST OFFSETOF(txppr_t, b80_1x1ofdm) ++#define WL_TX_POWER_MCS80_SISO_FIRST OFFSETOF(txppr_t, b80_1x1mcs0) ++#define WL_TX_POWER_80_S1x1_FIRST OFFSETOF(txppr_t, b80_1x1mcs0) ++ ++#define WL_TX_POWER_80_DUMMY_CCK_CDD_S1x2_FIRST OFFSETOF(txppr_t, b80_dummy1x2dsss) ++#define WL_TX_POWER_OFDM80_CDD_FIRST OFFSETOF(txppr_t, b80_1x2cdd_ofdm) ++#define WL_TX_POWER_MCS80_CDD_FIRST OFFSETOF(txppr_t, b80_1x2cdd_mcs0) ++#define WL_TX_POWER_80_S1x2_FIRST OFFSETOF(txppr_t, b80_1x2cdd_mcs0) ++#define WL_TX_POWER_MCS80_STBC_FIRST OFFSETOF(txppr_t, b80_2x2stbc_mcs0) ++#define WL_TX_POWER_MCS80_SDM_FIRST OFFSETOF(txppr_t, b80_2x2sdm_mcs8) ++#define WL_TX_POWER_80_S2x2_FIRST OFFSETOF(txppr_t, b80_2x2sdm_mcs8) ++ ++#define WL_TX_POWER_80_DUMMY_CCK_CDD_S1x3_FIRST OFFSETOF(txppr_t, b80_dummy1x3dsss) ++#define WL_TX_POWER_OFDM80_CDD_S1x3_FIRST OFFSETOF(txppr_t, b80_1x3cdd_ofdm) ++#define WL_TX_POWER_80_S1x3_FIRST OFFSETOF(txppr_t, b80_1x3cdd_mcs0) ++#define WL_TX_POWER_80_STBC_S2x3_FIRST OFFSETOF(txppr_t, b80_2x3stbc_mcs0) ++#define WL_TX_POWER_80_S2x3_FIRST OFFSETOF(txppr_t, b80_2x3sdm_mcs8) ++#define WL_TX_POWER_80_S3x3_FIRST OFFSETOF(txppr_t, b80_3x3sdm_mcs16) ++ ++#define WL_TX_POWER_80_S1X1_VHT OFFSETOF(txppr_t, b80_1x1vht) ++#define WL_TX_POWER_80_S1X2_CDD_VHT OFFSETOF(txppr_t, b80_1x2cdd_vht) ++#define WL_TX_POWER_80_S2X2_STBC_VHT OFFSETOF(txppr_t, b80_2x2stbc_vht) ++#define WL_TX_POWER_80_S2X2_VHT OFFSETOF(txppr_t, b80_2x2sdm_vht) ++#define WL_TX_POWER_80_S1X3_CDD_VHT OFFSETOF(txppr_t, b80_1x3cdd_vht) ++#define WL_TX_POWER_80_S2X3_STBC_VHT OFFSETOF(txppr_t, b80_2x3stbc_vht) ++#define WL_TX_POWER_80_S2X3_VHT OFFSETOF(txppr_t, b80_2x3sdm_vht) ++#define WL_TX_POWER_80_S3X3_VHT OFFSETOF(txppr_t, b80_3x3sdm_vht) ++ ++/* 20 in 80MHz */ ++#define WL_TX_POWER_20UUL_CCK_FIRST OFFSETOF(txppr_t, b20in80_1x1dsss) ++#define WL_TX_POWER_20UUL_OFDM_FIRST OFFSETOF(txppr_t, b20in80_1x1ofdm) ++#define WL_TX_POWER_20UUL_S1x1_FIRST OFFSETOF(txppr_t, b20in80_1x1mcs0) ++ ++#define WL_TX_POWER_CCK_20UU_CDD_S1x2_FIRST OFFSETOF(txppr_t, b20in80_1x2dsss) ++#define WL_TX_POWER_20UUL_OFDM_CDD_FIRST OFFSETOF(txppr_t, b20in80_1x2cdd_ofdm) ++#define WL_TX_POWER_20UUL_S1x2_FIRST OFFSETOF(txppr_t, b20in80_1x2cdd_mcs0) ++#define WL_TX_POWER_20UUL_STBC_S2x2_FIRST OFFSETOF(txppr_t, b20in80_2x2stbc_mcs0) ++#define WL_TX_POWER_20UUL_S2x2_FIRST OFFSETOF(txppr_t, b20in80_2x2sdm_mcs8) ++ ++#define WL_TX_POWER_CCK_20UU_CDD_S1x3_FIRST OFFSETOF(txppr_t, b20in80_1x3dsss) ++#define WL_TX_POWER_20UUL_OFDM_CDD_S1x3_FIRST OFFSETOF(txppr_t, b20in80_1x3cdd_ofdm) ++#define WL_TX_POWER_20UUL_S1x3_FIRST OFFSETOF(txppr_t, b20in80_1x3cdd_mcs0) ++#define WL_TX_POWER_20UUL_STBC_S2x3_FIRST OFFSETOF(txppr_t, b20in80_2x3stbc_mcs0) ++#define WL_TX_POWER_20UUL_S2x3_FIRST OFFSETOF(txppr_t, b20in80_2x3sdm_mcs8) ++#define WL_TX_POWER_20UUL_S3x3_FIRST OFFSETOF(txppr_t, b20in80_3x3sdm_mcs16) ++ ++#define WL_TX_POWER_20UUL_S1X1_VHT OFFSETOF(txppr_t, b20in80_1x1vht) ++#define WL_TX_POWER_20UUL_S1X2_CDD_VHT OFFSETOF(txppr_t, b20in80_1x2cdd_vht) ++#define WL_TX_POWER_20UUL_S2X2_STBC_VHT OFFSETOF(txppr_t, b20in80_2x2stbc_vht) ++#define WL_TX_POWER_20UUL_S2X2_VHT OFFSETOF(txppr_t, b20in80_2x2sdm_vht) ++#define WL_TX_POWER_20UUL_S1X3_CDD_VHT OFFSETOF(txppr_t, b20in80_1x3cdd_vht) ++#define WL_TX_POWER_20UUL_S2X3_STBC_VHT OFFSETOF(txppr_t, b20in80_2x3stbc_vht) ++#define WL_TX_POWER_20UUL_S2X3_VHT OFFSETOF(txppr_t, b20in80_2x3sdm_vht) ++#define WL_TX_POWER_20UUL_S3X3_VHT OFFSETOF(txppr_t, b20in80_3x3sdm_vht) ++ ++/* 40 in 80MHz */ ++#define WL_TX_POWER_40UUL_DUMMY_CCK_FIRST OFFSETOF(txppr_t, b40in80_dummy1x1dsss) ++#define WL_TX_POWER_40UUL_OFDM_FIRST OFFSETOF(txppr_t, b40in80_1x1ofdm) ++#define WL_TX_POWER_40UUL_S1x1_FIRST OFFSETOF(txppr_t, b40in80_1x1mcs0) ++ ++#define WL_TX_POWER_CCK_40UU_DUMMY_CDD_S1x2_FIRST OFFSETOF(txppr_t, b40in80_dummy1x2dsss) ++#define WL_TX_POWER_40UUL_OFDM_CDD_FIRST OFFSETOF(txppr_t, b40in80_1x2cdd_ofdm) ++#define WL_TX_POWER_40UUL_S1x2_FIRST OFFSETOF(txppr_t, b40in80_1x2cdd_mcs0) ++#define WL_TX_POWER_40UUL_STBC_S2x2_FIRST OFFSETOF(txppr_t, b40in80_2x2stbc_mcs0) ++#define WL_TX_POWER_40UUL_S2x2_FIRST OFFSETOF(txppr_t, b40in80_2x2sdm_mcs8) ++ ++#define WL_TX_POWER_CCK_40UU_DUMMY_CDD_S1x3_FIRST OFFSETOF(txppr_t, b40in80_dummy1x3dsss) ++#define WL_TX_POWER_40UUL_OFDM_CDD_S1x3_FIRST OFFSETOF(txppr_t, b40in80_1x3cdd_ofdm) ++#define WL_TX_POWER_40UUL_S1x3_FIRST OFFSETOF(txppr_t, b40in80_1x3cdd_mcs0) ++#define WL_TX_POWER_40UUL_STBC_S2x3_FIRST OFFSETOF(txppr_t, b40in80_2x3stbc_mcs0) ++#define WL_TX_POWER_40UUL_S2x3_FIRST OFFSETOF(txppr_t, b40in80_2x3sdm_mcs8) ++#define WL_TX_POWER_40UUL_S3x3_FIRST OFFSETOF(txppr_t, b40in80_3x3sdm_mcs16) ++ ++#define WL_TX_POWER_40UUL_S1X1_VHT OFFSETOF(txppr_t, b40in80_1x1vht) ++#define WL_TX_POWER_40UUL_S1X2_CDD_VHT OFFSETOF(txppr_t, b40in80_1x2cdd_vht) ++#define WL_TX_POWER_40UUL_S2X2_STBC_VHT OFFSETOF(txppr_t, b40in80_2x2stbc_vht) ++#define WL_TX_POWER_40UUL_S2X2_VHT OFFSETOF(txppr_t, b40in80_2x2sdm_vht) ++#define WL_TX_POWER_40UUL_S1X3_CDD_VHT OFFSETOF(txppr_t, b40in80_1x3cdd_vht) ++#define WL_TX_POWER_40UUL_S2X3_STBC_VHT OFFSETOF(txppr_t, b40in80_2x3stbc_vht) ++#define WL_TX_POWER_40UUL_S2X3_VHT OFFSETOF(txppr_t, b40in80_2x3sdm_vht) ++#define WL_TX_POWER_40UUL_S3X3_VHT OFFSETOF(txppr_t, b40in80_3x3sdm_vht) ++ ++#define WL_TX_POWER_MCS_32 OFFSETOF(txppr_t, mcs32) /* C_CHECK remove later */ ++ ++#define WL_TX_POWER_RATES sizeof(struct txppr) ++ ++/* sslpnphy specifics */ ++#define WL_TX_POWER_MCS20_SISO_FIRST_SSN WL_TX_POWER_MCS20_SISO_FIRST ++#define WL_TX_POWER_MCS40_SISO_FIRST_SSN WL_TX_POWER_MCS40_SISO_FIRST ++ ++typedef struct { ++ uint16 ver; /* version of this struct */ ++ uint16 len; /* length in bytes of this structure */ ++ uint32 flags; ++ chanspec_t chanspec; /* txpwr report for this channel */ ++ chanspec_t local_chanspec; /* channel on which we are associated */ ++ uint8 ppr[WL_TX_POWER_RATES]; /* Latest target power */ ++} wl_txppr_t; ++ ++#define WL_TXPPR_VERSION 0 ++#define WL_TXPPR_LENGTH (sizeof(wl_txppr_t)) ++#define TX_POWER_T_VERSION 43 ++ ++/* Defines used with channel_bandwidth for curpower */ ++#define WL_BW_20MHZ 0 ++#define WL_BW_40MHZ 1 ++#define WL_BW_80MHZ 2 ++ ++/* tx_power_t.flags bits */ ++#ifdef PPR_API ++#define WL_TX_POWER2_F_ENABLED 1 ++#define WL_TX_POWER2_F_HW 2 ++#define WL_TX_POWER2_F_MIMO 4 ++#define WL_TX_POWER2_F_SISO 8 ++#define WL_TX_POWER2_F_HT 0x10 ++#else ++#define WL_TX_POWER_F_ENABLED 1 ++#define WL_TX_POWER_F_HW 2 ++#define WL_TX_POWER_F_MIMO 4 ++#define WL_TX_POWER_F_SISO 8 ++#define WL_TX_POWER_F_HT 0x10 ++#endif ++ ++typedef struct { ++ uint32 flags; ++ chanspec_t chanspec; /* txpwr report for this channel */ ++ chanspec_t local_chanspec; /* channel on which we are associated */ ++ uint8 local_max; /* local max according to the AP */ ++ uint8 local_constraint; /* local constraint according to the AP */ ++ int8 antgain[2]; /* Ant gain for each band - from SROM */ ++ uint8 rf_cores; /* count of RF Cores being reported */ ++ uint8 est_Pout[4]; /* Latest tx power out estimate per RF chain */ ++ uint8 est_Pout_act[4]; /* Latest tx power out estimate per RF chain w/o adjustment */ ++ uint8 est_Pout_cck; /* Latest CCK tx power out estimate */ ++ uint8 tx_power_max[4]; /* Maximum target power among all rates */ ++ uint tx_power_max_rate_ind[4]; /* Index of the rate with the max target power */ ++ uint8 user_limit[WL_TX_POWER_RATES]; /* User limit */ ++ int8 board_limit[WL_TX_POWER_RATES]; /* Max power board can support (SROM) */ ++ int8 target[WL_TX_POWER_RATES]; /* Latest target power */ ++ int8 clm_limits[WL_NUMRATES]; /* regulatory limits - 20, 40 or 80MHz */ ++ int8 clm_limits_subchan1[WL_NUMRATES]; /* regulatory limits - 20in40 or 40in80 */ ++ int8 clm_limits_subchan2[WL_NUMRATES]; /* regulatory limits - 20in80MHz */ ++ int8 sar; /* SAR limit for display by wl executable */ ++ int8 channel_bandwidth; /* 20, 40 or 80 MHz bandwidth? */ ++ uint8 version; /* Version of the data format wlu <--> driver */ ++ uint8 display_core; /* Displayed curpower core */ ++#ifdef PPR_API ++} tx_power_new_t; ++#else ++} tx_power_t; ++#endif ++ ++typedef struct tx_inst_power { ++ uint8 txpwr_est_Pout[2]; /* Latest estimate for 2.4 and 5 Ghz */ ++ uint8 txpwr_est_Pout_gofdm; /* Pwr estimate for 2.4 OFDM */ ++} tx_inst_power_t; ++ ++ ++typedef struct { ++ uint32 flags; ++ chanspec_t chanspec; /* txpwr report for this channel */ ++ chanspec_t local_chanspec; /* channel on which we are associated */ ++ uint8 local_max; /* local max according to the AP */ ++ uint8 local_constraint; /* local constraint according to the AP */ ++ int8 antgain[2]; /* Ant gain for each band - from SROM */ ++ uint8 rf_cores; /* count of RF Cores being reported */ ++ uint8 est_Pout[4]; /* Latest tx power out estimate per RF chain */ ++ uint8 est_Pout_act[4]; /* Latest tx power out estimate per RF chain ++ * without adjustment ++ */ ++ uint8 est_Pout_cck; /* Latest CCK tx power out estimate */ ++ uint8 tx_power_max[4]; /* Maximum target power among all rates */ ++ uint tx_power_max_rate_ind[4]; /* Index of the rate with the max target power */ ++ txppr_t user_limit; /* User limit */ ++ txppr_t reg_limit; /* Regulatory power limit */ ++ txppr_t board_limit; /* Max power board can support (SROM) */ ++ txppr_t target; /* Latest target power */ ++} wl_txpwr_t; ++ ++#define WL_NUM_TXCHAIN_MAX 4 ++typedef struct wl_txchain_pwr_offsets { ++ int8 offset[WL_NUM_TXCHAIN_MAX]; /* quarter dBm signed offset for each chain */ ++} wl_txchain_pwr_offsets_t; ++ ++/* 802.11h measurement types */ ++#define WLC_MEASURE_TPC 1 ++#define WLC_MEASURE_CHANNEL_BASIC 2 ++#define WLC_MEASURE_CHANNEL_CCA 3 ++#define WLC_MEASURE_CHANNEL_RPI 4 ++ ++/* regulatory enforcement levels */ ++#define SPECT_MNGMT_OFF 0 /* both 11h and 11d disabled */ ++#define SPECT_MNGMT_LOOSE_11H 1 /* allow non-11h APs in scan lists */ ++#define SPECT_MNGMT_STRICT_11H 2 /* prune out non-11h APs from scan list */ ++#define SPECT_MNGMT_STRICT_11D 3 /* switch to 802.11D mode */ ++/* SPECT_MNGMT_LOOSE_11H_D - same as SPECT_MNGMT_LOOSE with the exception that Country IE ++ * adoption is done regardless of capability spectrum_management ++ */ ++#define SPECT_MNGMT_LOOSE_11H_D 4 /* operation defined above */ ++ ++#define WL_CHAN_VALID_HW (1 << 0) /* valid with current HW */ ++#define WL_CHAN_VALID_SW (1 << 1) /* valid with current country setting */ ++#define WL_CHAN_BAND_5G (1 << 2) /* 5GHz-band channel */ ++#define WL_CHAN_RADAR (1 << 3) /* radar sensitive channel */ ++#define WL_CHAN_INACTIVE (1 << 4) /* temporarily inactive due to radar */ ++#define WL_CHAN_PASSIVE (1 << 5) /* channel is in passive mode */ ++#define WL_CHAN_RESTRICTED (1 << 6) /* restricted use channel */ ++ ++/* BTC mode used by "btc_mode" iovar */ ++#define WL_BTC_DISABLE 0 /* disable BT coexistence */ ++#define WL_BTC_FULLTDM 1 /* full TDM COEX */ ++#define WL_BTC_ENABLE 1 /* full TDM COEX to maintain backward compatiblity */ ++#define WL_BTC_PREMPT 2 /* full TDM COEX with preemption */ ++#define WL_BTC_LITE 3 /* light weight coex for large isolation platform */ ++#define WL_BTC_PARALLEL 4 /* BT and WLAN run in parallel with separate antenna */ ++#define WL_BTC_HYBRID 5 /* hybrid coex, only ack is allowed to transmit in BT slot */ ++#define WL_BTC_DEFAULT 8 /* set the default mode for the device */ ++#define WL_INF_BTC_DISABLE 0 ++#define WL_INF_BTC_ENABLE 1 ++#define WL_INF_BTC_AUTO 3 ++ ++/* BTC wire used by "btc_wire" iovar */ ++#define WL_BTC_DEFWIRE 0 /* use default wire setting */ ++#define WL_BTC_2WIRE 2 /* use 2-wire BTC */ ++#define WL_BTC_3WIRE 3 /* use 3-wire BTC */ ++#define WL_BTC_4WIRE 4 /* use 4-wire BTC */ ++ ++/* BTC flags: BTC configuration that can be set by host */ ++#define WL_BTC_FLAG_PREMPT (1 << 0) ++#define WL_BTC_FLAG_BT_DEF (1 << 1) ++#define WL_BTC_FLAG_ACTIVE_PROT (1 << 2) ++#define WL_BTC_FLAG_SIM_RSP (1 << 3) ++#define WL_BTC_FLAG_PS_PROTECT (1 << 4) ++#define WL_BTC_FLAG_SIM_TX_LP (1 << 5) ++#define WL_BTC_FLAG_ECI (1 << 6) ++#define WL_BTC_FLAG_LIGHT (1 << 7) ++#define WL_BTC_FLAG_PARALLEL (1 << 8) ++#endif /* !defined(LINUX_POSTMOGRIFY_REMOVAL) */ ++ ++/* Message levels */ ++#define WL_ERROR_VAL 0x00000001 ++#define WL_TRACE_VAL 0x00000002 ++#define WL_PRHDRS_VAL 0x00000004 ++#define WL_PRPKT_VAL 0x00000008 ++#define WL_INFORM_VAL 0x00000010 ++#define WL_TMP_VAL 0x00000020 ++#define WL_OID_VAL 0x00000040 ++#define WL_RATE_VAL 0x00000080 ++#define WL_ASSOC_VAL 0x00000100 ++#define WL_PRUSR_VAL 0x00000200 ++#define WL_PS_VAL 0x00000400 ++#define WL_TXPWR_VAL 0x00000800 /* retired in TOT on 6/10/2009 */ ++#define WL_PORT_VAL 0x00001000 ++#define WL_DUAL_VAL 0x00002000 ++#define WL_WSEC_VAL 0x00004000 ++#define WL_WSEC_DUMP_VAL 0x00008000 ++#define WL_LOG_VAL 0x00010000 ++#define WL_NRSSI_VAL 0x00020000 /* retired in TOT on 6/10/2009 */ ++#define WL_LOFT_VAL 0x00040000 /* retired in TOT on 6/10/2009 */ ++#define WL_REGULATORY_VAL 0x00080000 ++#define WL_PHYCAL_VAL 0x00100000 /* retired in TOT on 6/10/2009 */ ++#define WL_RADAR_VAL 0x00200000 /* retired in TOT on 6/10/2009 */ ++#define WL_MPC_VAL 0x00400000 ++#define WL_APSTA_VAL 0x00800000 ++#define WL_DFS_VAL 0x01000000 ++#define WL_BA_VAL 0x02000000 /* retired in TOT on 6/14/2010 */ ++#define WL_ACI_VAL 0x04000000 ++#define WL_MBSS_VAL 0x04000000 ++#define WL_CAC_VAL 0x08000000 ++#define WL_AMSDU_VAL 0x10000000 ++#define WL_AMPDU_VAL 0x20000000 ++#define WL_FFPLD_VAL 0x40000000 ++ ++/* wl_msg_level is full. For new bits take the next one and AND with ++ * wl_msg_level2 in wl_dbg.h ++ */ ++#define WL_DPT_VAL 0x00000001 ++#define WL_SCAN_VAL 0x00000002 ++#define WL_WOWL_VAL 0x00000004 ++#define WL_COEX_VAL 0x00000008 ++#define WL_RTDC_VAL 0x00000010 ++#define WL_PROTO_VAL 0x00000020 ++#define WL_BTA_VAL 0x00000040 ++#define WL_CHANINT_VAL 0x00000080 ++#define WL_THERMAL_VAL 0x00000100 /* retired in TOT on 6/10/2009 */ ++#define WL_P2P_VAL 0x00000200 ++#define WL_ITFR_VAL 0x00000400 ++#define WL_MCHAN_VAL 0x00000800 ++#define WL_TDLS_VAL 0x00001000 ++#define WL_MCNX_VAL 0x00002000 ++#define WL_PROT_VAL 0x00004000 ++#define WL_PSTA_VAL 0x00008000 ++#define WL_TSO_VAL 0x00010000 ++/* use top-bit for WL_TIME_STAMP_VAL because this is a modifier ++ * rather than a message-type of its own ++ */ ++#define WL_TIMESTAMP_VAL 0x80000000 ++ ++/* max # of leds supported by GPIO (gpio pin# == led index#) */ ++#define WL_LED_NUMGPIO 32 /* gpio 0-31 */ ++ ++/* led per-pin behaviors */ ++#define WL_LED_OFF 0 /* always off */ ++#define WL_LED_ON 1 /* always on */ ++#define WL_LED_ACTIVITY 2 /* activity */ ++#define WL_LED_RADIO 3 /* radio enabled */ ++#define WL_LED_ARADIO 4 /* 5 Ghz radio enabled */ ++#define WL_LED_BRADIO 5 /* 2.4Ghz radio enabled */ ++#define WL_LED_BGMODE 6 /* on if gmode, off if bmode */ ++#define WL_LED_WI1 7 ++#define WL_LED_WI2 8 ++#define WL_LED_WI3 9 ++#define WL_LED_ASSOC 10 /* associated state indicator */ ++#define WL_LED_INACTIVE 11 /* null behavior (clears default behavior) */ ++#define WL_LED_ASSOCACT 12 /* on when associated; blink fast for activity */ ++#define WL_LED_WI4 13 ++#define WL_LED_WI5 14 ++#define WL_LED_BLINKSLOW 15 /* blink slow */ ++#define WL_LED_BLINKMED 16 /* blink med */ ++#define WL_LED_BLINKFAST 17 /* blink fast */ ++#define WL_LED_BLINKCUSTOM 18 /* blink custom */ ++#define WL_LED_BLINKPERIODIC 19 /* blink periodic (custom 1000ms / off 400ms) */ ++#define WL_LED_ASSOC_WITH_SEC 20 /* when connected with security */ ++ /* keep on for 300 sec */ ++#define WL_LED_START_OFF 21 /* off upon boot, could be turned on later */ ++#define WL_LED_NUMBEHAVIOR 22 ++ ++/* led behavior numeric value format */ ++#define WL_LED_BEH_MASK 0x7f /* behavior mask */ ++#define WL_LED_AL_MASK 0x80 /* activelow (polarity) bit */ ++ ++/* maximum channels returned by the get valid channels iovar */ ++#define WL_NUMCHANNELS 64 ++ ++/* max number of chanspecs (used by the iovar to calc. buf space) */ ++#define WL_NUMCHANSPECS 110 ++ ++/* WDS link local endpoint WPA role */ ++#define WL_WDS_WPA_ROLE_AUTH 0 /* authenticator */ ++#define WL_WDS_WPA_ROLE_SUP 1 /* supplicant */ ++#define WL_WDS_WPA_ROLE_AUTO 255 /* auto, based on mac addr value */ ++ ++/* number of bytes needed to define a 128-bit mask for MAC event reporting */ ++#define WL_EVENTING_MASK_LEN 16 ++ ++/* ++ * Join preference iovar value is an array of tuples. Each tuple has a one-byte type, ++ * a one-byte length, and a variable length value. RSSI type tuple must be present ++ * in the array. ++ * ++ * Types are defined in "join preference types" section. ++ * ++ * Length is the value size in octets. It is reserved for WL_JOIN_PREF_WPA type tuple ++ * and must be set to zero. ++ * ++ * Values are defined below. ++ * ++ * 1. RSSI - 2 octets ++ * offset 0: reserved ++ * offset 1: reserved ++ * ++ * 2. WPA - 2 + 12 * n octets (n is # tuples defined below) ++ * offset 0: reserved ++ * offset 1: # of tuples ++ * offset 2: tuple 1 ++ * offset 14: tuple 2 ++ * ... ++ * offset 2 + 12 * (n - 1) octets: tuple n ++ * ++ * struct wpa_cfg_tuple { ++ * uint8 akm[DOT11_OUI_LEN+1]; akm suite ++ * uint8 ucipher[DOT11_OUI_LEN+1]; unicast cipher suite ++ * uint8 mcipher[DOT11_OUI_LEN+1]; multicast cipher suite ++ * }; ++ * ++ * multicast cipher suite can be specified as a specific cipher suite or WL_WPA_ACP_MCS_ANY. ++ * ++ * 3. BAND - 2 octets ++ * offset 0: reserved ++ * offset 1: see "band preference" and "band types" ++ * ++ * 4. BAND RSSI - 2 octets ++ * offset 0: band types ++ * offset 1: +ve RSSI boost balue in dB ++ */ ++ ++/* join preference types */ ++#define WL_JOIN_PREF_RSSI 1 /* by RSSI */ ++#define WL_JOIN_PREF_WPA 2 /* by akm and ciphers */ ++#define WL_JOIN_PREF_BAND 3 /* by 802.11 band */ ++#define WL_JOIN_PREF_RSSI_DELTA 4 /* by 802.11 band only if RSSI delta condition matches */ ++#define WL_JOIN_PREF_TRANS_PREF 5 /* defined by requesting AP */ ++ ++/* band preference */ ++#define WLJP_BAND_ASSOC_PREF 255 /* use what WLC_SET_ASSOC_PREFER ioctl specifies */ ++ ++/* any multicast cipher suite */ ++#define WL_WPA_ACP_MCS_ANY "\x00\x00\x00\x00" ++ ++struct tsinfo_arg { ++ uint8 octets[3]; ++}; ++ ++#define NFIFO 6 /* # tx/rx fifopairs */ ++ ++#define WL_CNT_T_VERSION 8 /* current version of wl_cnt_t struct */ ++ ++typedef struct { ++ uint16 version; /* see definition of WL_CNT_T_VERSION */ ++ uint16 length; /* length of entire structure */ ++ ++ /* transmit stat counters */ ++ uint32 txframe; /* tx data frames */ ++ uint32 txbyte; /* tx data bytes */ ++ uint32 txretrans; /* tx mac retransmits */ ++ uint32 txerror; /* tx data errors (derived: sum of others) */ ++ uint32 txctl; /* tx management frames */ ++ uint32 txprshort; /* tx short preamble frames */ ++ uint32 txserr; /* tx status errors */ ++ uint32 txnobuf; /* tx out of buffers errors */ ++ uint32 txnoassoc; /* tx discard because we're not associated */ ++ uint32 txrunt; /* tx runt frames */ ++ uint32 txchit; /* tx header cache hit (fastpath) */ ++ uint32 txcmiss; /* tx header cache miss (slowpath) */ ++ ++ /* transmit chip error counters */ ++ uint32 txuflo; /* tx fifo underflows */ ++ uint32 txphyerr; /* tx phy errors (indicated in tx status) */ ++ uint32 txphycrs; ++ ++ /* receive stat counters */ ++ uint32 rxframe; /* rx data frames */ ++ uint32 rxbyte; /* rx data bytes */ ++ uint32 rxerror; /* rx data errors (derived: sum of others) */ ++ uint32 rxctl; /* rx management frames */ ++ uint32 rxnobuf; /* rx out of buffers errors */ ++ uint32 rxnondata; /* rx non data frames in the data channel errors */ ++ uint32 rxbadds; /* rx bad DS errors */ ++ uint32 rxbadcm; /* rx bad control or management frames */ ++ uint32 rxfragerr; /* rx fragmentation errors */ ++ uint32 rxrunt; /* rx runt frames */ ++ uint32 rxgiant; /* rx giant frames */ ++ uint32 rxnoscb; /* rx no scb error */ ++ uint32 rxbadproto; /* rx invalid frames */ ++ uint32 rxbadsrcmac; /* rx frames with Invalid Src Mac */ ++ uint32 rxbadda; /* rx frames tossed for invalid da */ ++ uint32 rxfilter; /* rx frames filtered out */ ++ ++ /* receive chip error counters */ ++ uint32 rxoflo; /* rx fifo overflow errors */ ++ uint32 rxuflo[NFIFO]; /* rx dma descriptor underflow errors */ ++ ++ uint32 d11cnt_txrts_off; /* d11cnt txrts value when reset d11cnt */ ++ uint32 d11cnt_rxcrc_off; /* d11cnt rxcrc value when reset d11cnt */ ++ uint32 d11cnt_txnocts_off; /* d11cnt txnocts value when reset d11cnt */ ++ ++ /* misc counters */ ++ uint32 dmade; /* tx/rx dma descriptor errors */ ++ uint32 dmada; /* tx/rx dma data errors */ ++ uint32 dmape; /* tx/rx dma descriptor protocol errors */ ++ uint32 reset; /* reset count */ ++ uint32 tbtt; /* cnts the TBTT int's */ ++ uint32 txdmawar; ++ uint32 pkt_callback_reg_fail; /* callbacks register failure */ ++ ++ /* MAC counters: 32-bit version of d11.h's macstat_t */ ++ uint32 txallfrm; /* total number of frames sent, incl. Data, ACK, RTS, CTS, ++ * Control Management (includes retransmissions) ++ */ ++ uint32 txrtsfrm; /* number of RTS sent out by the MAC */ ++ uint32 txctsfrm; /* number of CTS sent out by the MAC */ ++ uint32 txackfrm; /* number of ACK frames sent out */ ++ uint32 txdnlfrm; /* Not used */ ++ uint32 txbcnfrm; /* beacons transmitted */ ++ uint32 txfunfl[8]; /* per-fifo tx underflows */ ++ uint32 txtplunfl; /* Template underflows (mac was too slow to transmit ACK/CTS ++ * or BCN) ++ */ ++ uint32 txphyerror; /* Transmit phy error, type of error is reported in tx-status for ++ * driver enqueued frames ++ */ ++ uint32 rxfrmtoolong; /* Received frame longer than legal limit (2346 bytes) */ ++ uint32 rxfrmtooshrt; /* Received frame did not contain enough bytes for its frame type */ ++ uint32 rxinvmachdr; /* Either the protocol version != 0 or frame type not ++ * data/control/management ++ */ ++ uint32 rxbadfcs; /* number of frames for which the CRC check failed in the MAC */ ++ uint32 rxbadplcp; /* parity check of the PLCP header failed */ ++ uint32 rxcrsglitch; /* PHY was able to correlate the preamble but not the header */ ++ uint32 rxstrt; /* Number of received frames with a good PLCP ++ * (i.e. passing parity check) ++ */ ++ uint32 rxdfrmucastmbss; /* Number of received DATA frames with good FCS and matching RA */ ++ uint32 rxmfrmucastmbss; /* number of received mgmt frames with good FCS and matching RA */ ++ uint32 rxcfrmucast; /* number of received CNTRL frames with good FCS and matching RA */ ++ uint32 rxrtsucast; /* number of unicast RTS addressed to the MAC (good FCS) */ ++ uint32 rxctsucast; /* number of unicast CTS addressed to the MAC (good FCS) */ ++ uint32 rxackucast; /* number of ucast ACKS received (good FCS) */ ++ uint32 rxdfrmocast; /* number of received DATA frames (good FCS and not matching RA) */ ++ uint32 rxmfrmocast; /* number of received MGMT frames (good FCS and not matching RA) */ ++ uint32 rxcfrmocast; /* number of received CNTRL frame (good FCS and not matching RA) */ ++ uint32 rxrtsocast; /* number of received RTS not addressed to the MAC */ ++ uint32 rxctsocast; /* number of received CTS not addressed to the MAC */ ++ uint32 rxdfrmmcast; /* number of RX Data multicast frames received by the MAC */ ++ uint32 rxmfrmmcast; /* number of RX Management multicast frames received by the MAC */ ++ uint32 rxcfrmmcast; /* number of RX Control multicast frames received by the MAC ++ * (unlikely to see these) ++ */ ++ uint32 rxbeaconmbss; /* beacons received from member of BSS */ ++ uint32 rxdfrmucastobss; /* number of unicast frames addressed to the MAC from ++ * other BSS (WDS FRAME) ++ */ ++ uint32 rxbeaconobss; /* beacons received from other BSS */ ++ uint32 rxrsptmout; /* Number of response timeouts for transmitted frames ++ * expecting a response ++ */ ++ uint32 bcntxcancl; /* transmit beacons canceled due to receipt of beacon (IBSS) */ ++ uint32 rxf0ovfl; /* Number of receive fifo 0 overflows */ ++ uint32 rxf1ovfl; /* Number of receive fifo 1 overflows (obsolete) */ ++ uint32 rxf2ovfl; /* Number of receive fifo 2 overflows (obsolete) */ ++ uint32 txsfovfl; /* Number of transmit status fifo overflows (obsolete) */ ++ uint32 pmqovfl; /* Number of PMQ overflows */ ++ uint32 rxcgprqfrm; /* Number of received Probe requests that made it into ++ * the PRQ fifo ++ */ ++ uint32 rxcgprsqovfl; /* Rx Probe Request Que overflow in the AP */ ++ uint32 txcgprsfail; /* Tx Probe Response Fail. AP sent probe response but did ++ * not get ACK ++ */ ++ uint32 txcgprssuc; /* Tx Probe Response Success (ACK was received) */ ++ uint32 prs_timeout; /* Number of probe requests that were dropped from the PRQ ++ * fifo because a probe response could not be sent out within ++ * the time limit defined in M_PRS_MAXTIME ++ */ ++ uint32 rxnack; /* obsolete */ ++ uint32 frmscons; /* obsolete */ ++ uint32 txnack; /* obsolete */ ++ uint32 txglitch_nack; /* obsolete */ ++ uint32 txburst; /* obsolete */ ++ ++ /* 802.11 MIB counters, pp. 614 of 802.11 reaff doc. */ ++ uint32 txfrag; /* dot11TransmittedFragmentCount */ ++ uint32 txmulti; /* dot11MulticastTransmittedFrameCount */ ++ uint32 txfail; /* dot11FailedCount */ ++ uint32 txretry; /* dot11RetryCount */ ++ uint32 txretrie; /* dot11MultipleRetryCount */ ++ uint32 rxdup; /* dot11FrameduplicateCount */ ++ uint32 txrts; /* dot11RTSSuccessCount */ ++ uint32 txnocts; /* dot11RTSFailureCount */ ++ uint32 txnoack; /* dot11ACKFailureCount */ ++ uint32 rxfrag; /* dot11ReceivedFragmentCount */ ++ uint32 rxmulti; /* dot11MulticastReceivedFrameCount */ ++ uint32 rxcrc; /* dot11FCSErrorCount */ ++ uint32 txfrmsnt; /* dot11TransmittedFrameCount (bogus MIB?) */ ++ uint32 rxundec; /* dot11WEPUndecryptableCount */ ++ ++ /* WPA2 counters (see rxundec for DecryptFailureCount) */ ++ uint32 tkipmicfaill; /* TKIPLocalMICFailures */ ++ uint32 tkipcntrmsr; /* TKIPCounterMeasuresInvoked */ ++ uint32 tkipreplay; /* TKIPReplays */ ++ uint32 ccmpfmterr; /* CCMPFormatErrors */ ++ uint32 ccmpreplay; /* CCMPReplays */ ++ uint32 ccmpundec; /* CCMPDecryptErrors */ ++ uint32 fourwayfail; /* FourWayHandshakeFailures */ ++ uint32 wepundec; /* dot11WEPUndecryptableCount */ ++ uint32 wepicverr; /* dot11WEPICVErrorCount */ ++ uint32 decsuccess; /* DecryptSuccessCount */ ++ uint32 tkipicverr; /* TKIPICVErrorCount */ ++ uint32 wepexcluded; /* dot11WEPExcludedCount */ ++ ++ uint32 txchanrej; /* Tx frames suppressed due to channel rejection */ ++ uint32 psmwds; /* Count PSM watchdogs */ ++ uint32 phywatchdog; /* Count Phy watchdogs (triggered by ucode) */ ++ ++ /* MBSS counters, AP only */ ++ uint32 prq_entries_handled; /* PRQ entries read in */ ++ uint32 prq_undirected_entries; /* which were bcast bss & ssid */ ++ uint32 prq_bad_entries; /* which could not be translated to info */ ++ uint32 atim_suppress_count; /* TX suppressions on ATIM fifo */ ++ uint32 bcn_template_not_ready; /* Template marked in use on send bcn ... */ ++ uint32 bcn_template_not_ready_done; /* ...but "DMA done" interrupt rcvd */ ++ uint32 late_tbtt_dpc; /* TBTT DPC did not happen in time */ ++ ++ /* per-rate receive stat counters */ ++ uint32 rx1mbps; /* packets rx at 1Mbps */ ++ uint32 rx2mbps; /* packets rx at 2Mbps */ ++ uint32 rx5mbps5; /* packets rx at 5.5Mbps */ ++ uint32 rx6mbps; /* packets rx at 6Mbps */ ++ uint32 rx9mbps; /* packets rx at 9Mbps */ ++ uint32 rx11mbps; /* packets rx at 11Mbps */ ++ uint32 rx12mbps; /* packets rx at 12Mbps */ ++ uint32 rx18mbps; /* packets rx at 18Mbps */ ++ uint32 rx24mbps; /* packets rx at 24Mbps */ ++ uint32 rx36mbps; /* packets rx at 36Mbps */ ++ uint32 rx48mbps; /* packets rx at 48Mbps */ ++ uint32 rx54mbps; /* packets rx at 54Mbps */ ++ uint32 rx108mbps; /* packets rx at 108mbps */ ++ uint32 rx162mbps; /* packets rx at 162mbps */ ++ uint32 rx216mbps; /* packets rx at 216 mbps */ ++ uint32 rx270mbps; /* packets rx at 270 mbps */ ++ uint32 rx324mbps; /* packets rx at 324 mbps */ ++ uint32 rx378mbps; /* packets rx at 378 mbps */ ++ uint32 rx432mbps; /* packets rx at 432 mbps */ ++ uint32 rx486mbps; /* packets rx at 486 mbps */ ++ uint32 rx540mbps; /* packets rx at 540 mbps */ ++ ++ /* pkteng rx frame stats */ ++ uint32 pktengrxducast; /* unicast frames rxed by the pkteng code */ ++ uint32 pktengrxdmcast; /* multicast frames rxed by the pkteng code */ ++ ++ uint32 rfdisable; /* count of radio disables */ ++ uint32 bphy_rxcrsglitch; /* PHY count of bphy glitches */ ++ ++ uint32 txexptime; /* Tx frames suppressed due to timer expiration */ ++ ++ uint32 txmpdu_sgi; /* count for sgi transmit */ ++ uint32 rxmpdu_sgi; /* count for sgi received */ ++ uint32 txmpdu_stbc; /* count for stbc transmit */ ++ uint32 rxmpdu_stbc; /* count for stbc received */ ++ ++ uint32 rxundec_mcst; /* dot11WEPUndecryptableCount */ ++ ++ /* WPA2 counters (see rxundec for DecryptFailureCount) */ ++ uint32 tkipmicfaill_mcst; /* TKIPLocalMICFailures */ ++ uint32 tkipcntrmsr_mcst; /* TKIPCounterMeasuresInvoked */ ++ uint32 tkipreplay_mcst; /* TKIPReplays */ ++ uint32 ccmpfmterr_mcst; /* CCMPFormatErrors */ ++ uint32 ccmpreplay_mcst; /* CCMPReplays */ ++ uint32 ccmpundec_mcst; /* CCMPDecryptErrors */ ++ uint32 fourwayfail_mcst; /* FourWayHandshakeFailures */ ++ uint32 wepundec_mcst; /* dot11WEPUndecryptableCount */ ++ uint32 wepicverr_mcst; /* dot11WEPICVErrorCount */ ++ uint32 decsuccess_mcst; /* DecryptSuccessCount */ ++ uint32 tkipicverr_mcst; /* TKIPICVErrorCount */ ++ uint32 wepexcluded_mcst; /* dot11WEPExcludedCount */ ++ ++ uint32 dma_hang; /* count for dma hang */ ++ uint32 reinit; /* count for reinit */ ++ ++ uint32 pstatxucast; /* count of ucast frames xmitted on all psta assoc */ ++ uint32 pstatxnoassoc; /* count of txnoassoc frames xmitted on all psta assoc */ ++ uint32 pstarxucast; /* count of ucast frames received on all psta assoc */ ++ uint32 pstarxbcmc; /* count of bcmc frames received on all psta */ ++ uint32 pstatxbcmc; /* count of bcmc frames transmitted on all psta */ ++ ++ uint32 cso_passthrough; /* hw cso required but passthrough */ ++ uint32 cso_normal; /* hw cso hdr for normal process */ ++ uint32 chained; /* number of frames chained */ ++ uint32 chainedsz1; /* number of chain size 1 frames */ ++ uint32 unchained; /* number of frames not chained */ ++ uint32 maxchainsz; /* max chain size so far */ ++ uint32 currchainsz; /* current chain size */ ++} wl_cnt_t; ++ ++typedef struct { ++ uint16 version; /* see definition of WL_CNT_T_VERSION */ ++ uint16 length; /* length of entire structure */ ++ ++ /* transmit stat counters */ ++ uint32 txframe; /* tx data frames */ ++ uint32 txbyte; /* tx data bytes */ ++ uint32 txretrans; /* tx mac retransmits */ ++ uint32 txerror; /* tx data errors (derived: sum of others) */ ++ uint32 txctl; /* tx management frames */ ++ uint32 txprshort; /* tx short preamble frames */ ++ uint32 txserr; /* tx status errors */ ++ uint32 txnobuf; /* tx out of buffers errors */ ++ uint32 txnoassoc; /* tx discard because we're not associated */ ++ uint32 txrunt; /* tx runt frames */ ++ uint32 txchit; /* tx header cache hit (fastpath) */ ++ uint32 txcmiss; /* tx header cache miss (slowpath) */ ++ ++ /* transmit chip error counters */ ++ uint32 txuflo; /* tx fifo underflows */ ++ uint32 txphyerr; /* tx phy errors (indicated in tx status) */ ++ uint32 txphycrs; ++ ++ /* receive stat counters */ ++ uint32 rxframe; /* rx data frames */ ++ uint32 rxbyte; /* rx data bytes */ ++ uint32 rxerror; /* rx data errors (derived: sum of others) */ ++ uint32 rxctl; /* rx management frames */ ++ uint32 rxnobuf; /* rx out of buffers errors */ ++ uint32 rxnondata; /* rx non data frames in the data channel errors */ ++ uint32 rxbadds; /* rx bad DS errors */ ++ uint32 rxbadcm; /* rx bad control or management frames */ ++ uint32 rxfragerr; /* rx fragmentation errors */ ++ uint32 rxrunt; /* rx runt frames */ ++ uint32 rxgiant; /* rx giant frames */ ++ uint32 rxnoscb; /* rx no scb error */ ++ uint32 rxbadproto; /* rx invalid frames */ ++ uint32 rxbadsrcmac; /* rx frames with Invalid Src Mac */ ++ uint32 rxbadda; /* rx frames tossed for invalid da */ ++ uint32 rxfilter; /* rx frames filtered out */ ++ ++ /* receive chip error counters */ ++ uint32 rxoflo; /* rx fifo overflow errors */ ++ uint32 rxuflo[NFIFO]; /* rx dma descriptor underflow errors */ ++ ++ uint32 d11cnt_txrts_off; /* d11cnt txrts value when reset d11cnt */ ++ uint32 d11cnt_rxcrc_off; /* d11cnt rxcrc value when reset d11cnt */ ++ uint32 d11cnt_txnocts_off; /* d11cnt txnocts value when reset d11cnt */ ++ ++ /* misc counters */ ++ uint32 dmade; /* tx/rx dma descriptor errors */ ++ uint32 dmada; /* tx/rx dma data errors */ ++ uint32 dmape; /* tx/rx dma descriptor protocol errors */ ++ uint32 reset; /* reset count */ ++ uint32 tbtt; /* cnts the TBTT int's */ ++ uint32 txdmawar; ++ uint32 pkt_callback_reg_fail; /* callbacks register failure */ ++ ++ /* MAC counters: 32-bit version of d11.h's macstat_t */ ++ uint32 txallfrm; /* total number of frames sent, incl. Data, ACK, RTS, CTS, ++ * Control Management (includes retransmissions) ++ */ ++ uint32 txrtsfrm; /* number of RTS sent out by the MAC */ ++ uint32 txctsfrm; /* number of CTS sent out by the MAC */ ++ uint32 txackfrm; /* number of ACK frames sent out */ ++ uint32 txdnlfrm; /* Not used */ ++ uint32 txbcnfrm; /* beacons transmitted */ ++ uint32 txfunfl[8]; /* per-fifo tx underflows */ ++ uint32 txtplunfl; /* Template underflows (mac was too slow to transmit ACK/CTS ++ * or BCN) ++ */ ++ uint32 txphyerror; /* Transmit phy error, type of error is reported in tx-status for ++ * driver enqueued frames ++ */ ++ uint32 rxfrmtoolong; /* Received frame longer than legal limit (2346 bytes) */ ++ uint32 rxfrmtooshrt; /* Received frame did not contain enough bytes for its frame type */ ++ uint32 rxinvmachdr; /* Either the protocol version != 0 or frame type not ++ * data/control/management ++ */ ++ uint32 rxbadfcs; /* number of frames for which the CRC check failed in the MAC */ ++ uint32 rxbadplcp; /* parity check of the PLCP header failed */ ++ uint32 rxcrsglitch; /* PHY was able to correlate the preamble but not the header */ ++ uint32 rxstrt; /* Number of received frames with a good PLCP ++ * (i.e. passing parity check) ++ */ ++ uint32 rxdfrmucastmbss; /* Number of received DATA frames with good FCS and matching RA */ ++ uint32 rxmfrmucastmbss; /* number of received mgmt frames with good FCS and matching RA */ ++ uint32 rxcfrmucast; /* number of received CNTRL frames with good FCS and matching RA */ ++ uint32 rxrtsucast; /* number of unicast RTS addressed to the MAC (good FCS) */ ++ uint32 rxctsucast; /* number of unicast CTS addressed to the MAC (good FCS) */ ++ uint32 rxackucast; /* number of ucast ACKS received (good FCS) */ ++ uint32 rxdfrmocast; /* number of received DATA frames (good FCS and not matching RA) */ ++ uint32 rxmfrmocast; /* number of received MGMT frames (good FCS and not matching RA) */ ++ uint32 rxcfrmocast; /* number of received CNTRL frame (good FCS and not matching RA) */ ++ uint32 rxrtsocast; /* number of received RTS not addressed to the MAC */ ++ uint32 rxctsocast; /* number of received CTS not addressed to the MAC */ ++ uint32 rxdfrmmcast; /* number of RX Data multicast frames received by the MAC */ ++ uint32 rxmfrmmcast; /* number of RX Management multicast frames received by the MAC */ ++ uint32 rxcfrmmcast; /* number of RX Control multicast frames received by the MAC ++ * (unlikely to see these) ++ */ ++ uint32 rxbeaconmbss; /* beacons received from member of BSS */ ++ uint32 rxdfrmucastobss; /* number of unicast frames addressed to the MAC from ++ * other BSS (WDS FRAME) ++ */ ++ uint32 rxbeaconobss; /* beacons received from other BSS */ ++ uint32 rxrsptmout; /* Number of response timeouts for transmitted frames ++ * expecting a response ++ */ ++ uint32 bcntxcancl; /* transmit beacons canceled due to receipt of beacon (IBSS) */ ++ uint32 rxf0ovfl; /* Number of receive fifo 0 overflows */ ++ uint32 rxf1ovfl; /* Number of receive fifo 1 overflows (obsolete) */ ++ uint32 rxf2ovfl; /* Number of receive fifo 2 overflows (obsolete) */ ++ uint32 txsfovfl; /* Number of transmit status fifo overflows (obsolete) */ ++ uint32 pmqovfl; /* Number of PMQ overflows */ ++ uint32 rxcgprqfrm; /* Number of received Probe requests that made it into ++ * the PRQ fifo ++ */ ++ uint32 rxcgprsqovfl; /* Rx Probe Request Que overflow in the AP */ ++ uint32 txcgprsfail; /* Tx Probe Response Fail. AP sent probe response but did ++ * not get ACK ++ */ ++ uint32 txcgprssuc; /* Tx Probe Response Success (ACK was received) */ ++ uint32 prs_timeout; /* Number of probe requests that were dropped from the PRQ ++ * fifo because a probe response could not be sent out within ++ * the time limit defined in M_PRS_MAXTIME ++ */ ++ uint32 rxnack; ++ uint32 frmscons; ++ uint32 txnack; ++ uint32 txglitch_nack; /* obsolete */ ++ uint32 txburst; /* obsolete */ ++ ++ /* 802.11 MIB counters, pp. 614 of 802.11 reaff doc. */ ++ uint32 txfrag; /* dot11TransmittedFragmentCount */ ++ uint32 txmulti; /* dot11MulticastTransmittedFrameCount */ ++ uint32 txfail; /* dot11FailedCount */ ++ uint32 txretry; /* dot11RetryCount */ ++ uint32 txretrie; /* dot11MultipleRetryCount */ ++ uint32 rxdup; /* dot11FrameduplicateCount */ ++ uint32 txrts; /* dot11RTSSuccessCount */ ++ uint32 txnocts; /* dot11RTSFailureCount */ ++ uint32 txnoack; /* dot11ACKFailureCount */ ++ uint32 rxfrag; /* dot11ReceivedFragmentCount */ ++ uint32 rxmulti; /* dot11MulticastReceivedFrameCount */ ++ uint32 rxcrc; /* dot11FCSErrorCount */ ++ uint32 txfrmsnt; /* dot11TransmittedFrameCount (bogus MIB?) */ ++ uint32 rxundec; /* dot11WEPUndecryptableCount */ ++ ++ /* WPA2 counters (see rxundec for DecryptFailureCount) */ ++ uint32 tkipmicfaill; /* TKIPLocalMICFailures */ ++ uint32 tkipcntrmsr; /* TKIPCounterMeasuresInvoked */ ++ uint32 tkipreplay; /* TKIPReplays */ ++ uint32 ccmpfmterr; /* CCMPFormatErrors */ ++ uint32 ccmpreplay; /* CCMPReplays */ ++ uint32 ccmpundec; /* CCMPDecryptErrors */ ++ uint32 fourwayfail; /* FourWayHandshakeFailures */ ++ uint32 wepundec; /* dot11WEPUndecryptableCount */ ++ uint32 wepicverr; /* dot11WEPICVErrorCount */ ++ uint32 decsuccess; /* DecryptSuccessCount */ ++ uint32 tkipicverr; /* TKIPICVErrorCount */ ++ uint32 wepexcluded; /* dot11WEPExcludedCount */ ++ ++ uint32 rxundec_mcst; /* dot11WEPUndecryptableCount */ ++ ++ /* WPA2 counters (see rxundec for DecryptFailureCount) */ ++ uint32 tkipmicfaill_mcst; /* TKIPLocalMICFailures */ ++ uint32 tkipcntrmsr_mcst; /* TKIPCounterMeasuresInvoked */ ++ uint32 tkipreplay_mcst; /* TKIPReplays */ ++ uint32 ccmpfmterr_mcst; /* CCMPFormatErrors */ ++ uint32 ccmpreplay_mcst; /* CCMPReplays */ ++ uint32 ccmpundec_mcst; /* CCMPDecryptErrors */ ++ uint32 fourwayfail_mcst; /* FourWayHandshakeFailures */ ++ uint32 wepundec_mcst; /* dot11WEPUndecryptableCount */ ++ uint32 wepicverr_mcst; /* dot11WEPICVErrorCount */ ++ uint32 decsuccess_mcst; /* DecryptSuccessCount */ ++ uint32 tkipicverr_mcst; /* TKIPICVErrorCount */ ++ uint32 wepexcluded_mcst; /* dot11WEPExcludedCount */ ++ ++ uint32 txchanrej; /* Tx frames suppressed due to channel rejection */ ++ uint32 txexptime; /* Tx frames suppressed due to timer expiration */ ++ uint32 psmwds; /* Count PSM watchdogs */ ++ uint32 phywatchdog; /* Count Phy watchdogs (triggered by ucode) */ ++ ++ /* MBSS counters, AP only */ ++ uint32 prq_entries_handled; /* PRQ entries read in */ ++ uint32 prq_undirected_entries; /* which were bcast bss & ssid */ ++ uint32 prq_bad_entries; /* which could not be translated to info */ ++ uint32 atim_suppress_count; /* TX suppressions on ATIM fifo */ ++ uint32 bcn_template_not_ready; /* Template marked in use on send bcn ... */ ++ uint32 bcn_template_not_ready_done; /* ...but "DMA done" interrupt rcvd */ ++ uint32 late_tbtt_dpc; /* TBTT DPC did not happen in time */ ++ ++ /* per-rate receive stat counters */ ++ uint32 rx1mbps; /* packets rx at 1Mbps */ ++ uint32 rx2mbps; /* packets rx at 2Mbps */ ++ uint32 rx5mbps5; /* packets rx at 5.5Mbps */ ++ uint32 rx6mbps; /* packets rx at 6Mbps */ ++ uint32 rx9mbps; /* packets rx at 9Mbps */ ++ uint32 rx11mbps; /* packets rx at 11Mbps */ ++ uint32 rx12mbps; /* packets rx at 12Mbps */ ++ uint32 rx18mbps; /* packets rx at 18Mbps */ ++ uint32 rx24mbps; /* packets rx at 24Mbps */ ++ uint32 rx36mbps; /* packets rx at 36Mbps */ ++ uint32 rx48mbps; /* packets rx at 48Mbps */ ++ uint32 rx54mbps; /* packets rx at 54Mbps */ ++ uint32 rx108mbps; /* packets rx at 108mbps */ ++ uint32 rx162mbps; /* packets rx at 162mbps */ ++ uint32 rx216mbps; /* packets rx at 216 mbps */ ++ uint32 rx270mbps; /* packets rx at 270 mbps */ ++ uint32 rx324mbps; /* packets rx at 324 mbps */ ++ uint32 rx378mbps; /* packets rx at 378 mbps */ ++ uint32 rx432mbps; /* packets rx at 432 mbps */ ++ uint32 rx486mbps; /* packets rx at 486 mbps */ ++ uint32 rx540mbps; /* packets rx at 540 mbps */ ++ ++ /* pkteng rx frame stats */ ++ uint32 pktengrxducast; /* unicast frames rxed by the pkteng code */ ++ uint32 pktengrxdmcast; /* multicast frames rxed by the pkteng code */ ++ ++ uint32 rfdisable; /* count of radio disables */ ++ uint32 bphy_rxcrsglitch; /* PHY count of bphy glitches */ ++ ++ uint32 txmpdu_sgi; /* count for sgi transmit */ ++ uint32 rxmpdu_sgi; /* count for sgi received */ ++ uint32 txmpdu_stbc; /* count for stbc transmit */ ++ uint32 rxmpdu_stbc; /* count for stbc received */ ++} wl_cnt_ver_six_t; ++ ++ ++#ifndef LINUX_POSTMOGRIFY_REMOVAL ++#define WL_DELTA_STATS_T_VERSION 1 /* current version of wl_delta_stats_t struct */ ++ ++typedef struct { ++ uint16 version; /* see definition of WL_DELTA_STATS_T_VERSION */ ++ uint16 length; /* length of entire structure */ ++ ++ /* transmit stat counters */ ++ uint32 txframe; /* tx data frames */ ++ uint32 txbyte; /* tx data bytes */ ++ uint32 txretrans; /* tx mac retransmits */ ++ uint32 txfail; /* tx failures */ ++ ++ /* receive stat counters */ ++ uint32 rxframe; /* rx data frames */ ++ uint32 rxbyte; /* rx data bytes */ ++ ++ /* per-rate receive stat counters */ ++ uint32 rx1mbps; /* packets rx at 1Mbps */ ++ uint32 rx2mbps; /* packets rx at 2Mbps */ ++ uint32 rx5mbps5; /* packets rx at 5.5Mbps */ ++ uint32 rx6mbps; /* packets rx at 6Mbps */ ++ uint32 rx9mbps; /* packets rx at 9Mbps */ ++ uint32 rx11mbps; /* packets rx at 11Mbps */ ++ uint32 rx12mbps; /* packets rx at 12Mbps */ ++ uint32 rx18mbps; /* packets rx at 18Mbps */ ++ uint32 rx24mbps; /* packets rx at 24Mbps */ ++ uint32 rx36mbps; /* packets rx at 36Mbps */ ++ uint32 rx48mbps; /* packets rx at 48Mbps */ ++ uint32 rx54mbps; /* packets rx at 54Mbps */ ++ uint32 rx108mbps; /* packets rx at 108mbps */ ++ uint32 rx162mbps; /* packets rx at 162mbps */ ++ uint32 rx216mbps; /* packets rx at 216 mbps */ ++ uint32 rx270mbps; /* packets rx at 270 mbps */ ++ uint32 rx324mbps; /* packets rx at 324 mbps */ ++ uint32 rx378mbps; /* packets rx at 378 mbps */ ++ uint32 rx432mbps; /* packets rx at 432 mbps */ ++ uint32 rx486mbps; /* packets rx at 486 mbps */ ++ uint32 rx540mbps; /* packets rx at 540 mbps */ ++} wl_delta_stats_t; ++#endif /* LINUX_POSTMOGRIFY_REMOVAL */ ++ ++#define WL_WME_CNT_VERSION 1 /* current version of wl_wme_cnt_t */ ++ ++typedef struct { ++ uint32 packets; ++ uint32 bytes; ++} wl_traffic_stats_t; ++ ++typedef struct { ++ uint16 version; /* see definition of WL_WME_CNT_VERSION */ ++ uint16 length; /* length of entire structure */ ++ ++ wl_traffic_stats_t tx[AC_COUNT]; /* Packets transmitted */ ++ wl_traffic_stats_t tx_failed[AC_COUNT]; /* Packets dropped or failed to transmit */ ++ wl_traffic_stats_t rx[AC_COUNT]; /* Packets received */ ++ wl_traffic_stats_t rx_failed[AC_COUNT]; /* Packets failed to receive */ ++ ++ wl_traffic_stats_t forward[AC_COUNT]; /* Packets forwarded by AP */ ++ ++ wl_traffic_stats_t tx_expired[AC_COUNT]; /* packets dropped due to lifetime expiry */ ++ ++} wl_wme_cnt_t; ++ ++struct wl_msglevel2 { ++ uint32 low; ++ uint32 high; ++}; ++ ++typedef struct wl_mkeep_alive_pkt { ++ uint16 version; /* Version for mkeep_alive */ ++ uint16 length; /* length of fixed parameters in the structure */ ++ uint32 period_msec; ++ uint16 len_bytes; ++ uint8 keep_alive_id; /* 0 - 3 for N = 4 */ ++ uint8 data[1]; ++} wl_mkeep_alive_pkt_t; ++ ++#define WL_MKEEP_ALIVE_VERSION 1 ++#define WL_MKEEP_ALIVE_FIXED_LEN OFFSETOF(wl_mkeep_alive_pkt_t, data) ++#define WL_MKEEP_ALIVE_PRECISION 500 ++ ++#ifndef LINUX_POSTMOGRIFY_REMOVAL ++#ifdef WLBA ++ ++#define WLC_BA_CNT_VERSION 1 /* current version of wlc_ba_cnt_t */ ++ ++/* block ack related stats */ ++typedef struct wlc_ba_cnt { ++ uint16 version; /* WLC_BA_CNT_VERSION */ ++ uint16 length; /* length of entire structure */ ++ ++ /* transmit stat counters */ ++ uint32 txpdu; /* pdus sent */ ++ uint32 txsdu; /* sdus sent */ ++ uint32 txfc; /* tx side flow controlled packets */ ++ uint32 txfci; /* tx side flow control initiated */ ++ uint32 txretrans; /* retransmitted pdus */ ++ uint32 txbatimer; /* ba resend due to timer */ ++ uint32 txdrop; /* dropped packets */ ++ uint32 txaddbareq; /* addba req sent */ ++ uint32 txaddbaresp; /* addba resp sent */ ++ uint32 txdelba; /* delba sent */ ++ uint32 txba; /* ba sent */ ++ uint32 txbar; /* bar sent */ ++ uint32 txpad[4]; /* future */ ++ ++ /* receive side counters */ ++ uint32 rxpdu; /* pdus recd */ ++ uint32 rxqed; /* pdus buffered before sending up */ ++ uint32 rxdup; /* duplicate pdus */ ++ uint32 rxnobuf; /* pdus discarded due to no buf */ ++ uint32 rxaddbareq; /* addba req recd */ ++ uint32 rxaddbaresp; /* addba resp recd */ ++ uint32 rxdelba; /* delba recd */ ++ uint32 rxba; /* ba recd */ ++ uint32 rxbar; /* bar recd */ ++ uint32 rxinvba; /* invalid ba recd */ ++ uint32 rxbaholes; /* ba recd with holes */ ++ uint32 rxunexp; /* unexpected packets */ ++ uint32 rxpad[4]; /* future */ ++} wlc_ba_cnt_t; ++#endif /* WLBA */ ++ ++/* structure for per-tid ampdu control */ ++struct ampdu_tid_control { ++ uint8 tid; /* tid */ ++ uint8 enable; /* enable/disable */ ++}; ++ ++/* structure for identifying ea/tid for sending addba/delba */ ++struct ampdu_ea_tid { ++ struct ether_addr ea; /* Station address */ ++ uint8 tid; /* tid */ ++}; ++/* structure for identifying retry/tid for retry_limit_tid/rr_retry_limit_tid */ ++struct ampdu_retry_tid { ++ uint8 tid; /* tid */ ++ uint8 retry; /* retry value */ ++}; ++ ++/* Different discovery modes for dpt */ ++#define DPT_DISCOVERY_MANUAL 0x01 /* manual discovery mode */ ++#define DPT_DISCOVERY_AUTO 0x02 /* auto discovery mode */ ++#define DPT_DISCOVERY_SCAN 0x04 /* scan-based discovery mode */ ++ ++/* different path selection values */ ++#define DPT_PATHSEL_AUTO 0 /* auto mode for path selection */ ++#define DPT_PATHSEL_DIRECT 1 /* always use direct DPT path */ ++#define DPT_PATHSEL_APPATH 2 /* always use AP path */ ++ ++/* different ops for deny list */ ++#define DPT_DENY_LIST_ADD 1 /* add to dpt deny list */ ++#define DPT_DENY_LIST_REMOVE 2 /* remove from dpt deny list */ ++ ++/* different ops for manual end point */ ++#define DPT_MANUAL_EP_CREATE 1 /* create manual dpt endpoint */ ++#define DPT_MANUAL_EP_MODIFY 2 /* modify manual dpt endpoint */ ++#define DPT_MANUAL_EP_DELETE 3 /* delete manual dpt endpoint */ ++ ++/* structure for dpt iovars */ ++typedef struct dpt_iovar { ++ struct ether_addr ea; /* Station address */ ++ uint8 mode; /* mode: depends on iovar */ ++ uint32 pad; /* future */ ++} dpt_iovar_t; ++ ++/* flags to indicate DPT status */ ++#define DPT_STATUS_ACTIVE 0x01 /* link active (though may be suspended) */ ++#define DPT_STATUS_AES 0x02 /* link secured through AES encryption */ ++#define DPT_STATUS_FAILED 0x04 /* DPT link failed */ ++ ++#define DPT_FNAME_LEN 48 /* Max length of friendly name */ ++ ++typedef struct dpt_status { ++ uint8 status; /* flags to indicate status */ ++ uint8 fnlen; /* length of friendly name */ ++ uchar name[DPT_FNAME_LEN]; /* friendly name */ ++ uint32 rssi; /* RSSI of the link */ ++ sta_info_t sta; /* sta info */ ++} dpt_status_t; ++ ++/* structure for dpt list */ ++typedef struct dpt_list { ++ uint32 num; /* number of entries in struct */ ++ dpt_status_t status[1]; /* per station info */ ++} dpt_list_t; ++ ++/* structure for dpt friendly name */ ++typedef struct dpt_fname { ++ uint8 len; /* length of friendly name */ ++ uchar name[DPT_FNAME_LEN]; /* friendly name */ ++} dpt_fname_t; ++ ++#define BDD_FNAME_LEN 32 /* Max length of friendly name */ ++typedef struct bdd_fname { ++ uint8 len; /* length of friendly name */ ++ uchar name[BDD_FNAME_LEN]; /* friendly name */ ++} bdd_fname_t; ++ ++/* structure for addts arguments */ ++/* For ioctls that take a list of TSPEC */ ++struct tslist { ++ int count; /* number of tspecs */ ++ struct tsinfo_arg tsinfo[1]; /* variable length array of tsinfo */ ++}; ++ ++#ifdef WLTDLS ++/* different ops for manual end point */ ++#define TDLS_MANUAL_EP_CREATE 1 /* create manual dpt endpoint */ ++#define TDLS_MANUAL_EP_MODIFY 2 /* modify manual dpt endpoint */ ++#define TDLS_MANUAL_EP_DELETE 3 /* delete manual dpt endpoint */ ++#define TDLS_MANUAL_EP_PM 4 /* put dpt endpoint in PM mode */ ++#define TDLS_MANUAL_EP_WAKE 5 /* wake up dpt endpoint from PM */ ++#define TDLS_MANUAL_EP_DISCOVERY 6 /* discover if endpoint is TDLS capable */ ++#define TDLS_MANUAL_EP_CHSW 7 /* channel switch */ ++ ++/* structure for tdls iovars */ ++typedef struct tdls_iovar { ++ struct ether_addr ea; /* Station address */ ++ uint8 mode; /* mode: depends on iovar */ ++ chanspec_t chanspec; ++ uint32 pad; /* future */ ++} tdls_iovar_t; ++#endif /* WLTDLS */ ++ ++/* structure for addts/delts arguments */ ++typedef struct tspec_arg { ++ uint16 version; /* see definition of TSPEC_ARG_VERSION */ ++ uint16 length; /* length of entire structure */ ++ uint flag; /* bit field */ ++ /* TSPEC Arguments */ ++ struct tsinfo_arg tsinfo; /* TS Info bit field */ ++ uint16 nom_msdu_size; /* (Nominal or fixed) MSDU Size (bytes) */ ++ uint16 max_msdu_size; /* Maximum MSDU Size (bytes) */ ++ uint min_srv_interval; /* Minimum Service Interval (us) */ ++ uint max_srv_interval; /* Maximum Service Interval (us) */ ++ uint inactivity_interval; /* Inactivity Interval (us) */ ++ uint suspension_interval; /* Suspension Interval (us) */ ++ uint srv_start_time; /* Service Start Time (us) */ ++ uint min_data_rate; /* Minimum Data Rate (bps) */ ++ uint mean_data_rate; /* Mean Data Rate (bps) */ ++ uint peak_data_rate; /* Peak Data Rate (bps) */ ++ uint max_burst_size; /* Maximum Burst Size (bytes) */ ++ uint delay_bound; /* Delay Bound (us) */ ++ uint min_phy_rate; /* Minimum PHY Rate (bps) */ ++ uint16 surplus_bw; /* Surplus Bandwidth Allowance (range 1.0 to 8.0) */ ++ uint16 medium_time; /* Medium Time (32 us/s periods) */ ++ uint8 dialog_token; /* dialog token */ ++} tspec_arg_t; ++ ++/* tspec arg for desired station */ ++typedef struct tspec_per_sta_arg { ++ struct ether_addr ea; ++ struct tspec_arg ts; ++} tspec_per_sta_arg_t; ++ ++/* structure for max bandwidth for each access category */ ++typedef struct wme_max_bandwidth { ++ uint32 ac[AC_COUNT]; /* max bandwidth for each access category */ ++} wme_max_bandwidth_t; ++ ++#define WL_WME_MBW_PARAMS_IO_BYTES (sizeof(wme_max_bandwidth_t)) ++ ++/* current version of wl_tspec_arg_t struct */ ++#define TSPEC_ARG_VERSION 2 /* current version of wl_tspec_arg_t struct */ ++#define TSPEC_ARG_LENGTH 55 /* argument length from tsinfo to medium_time */ ++#define TSPEC_DEFAULT_DIALOG_TOKEN 42 /* default dialog token */ ++#define TSPEC_DEFAULT_SBW_FACTOR 0x3000 /* default surplus bw */ ++ ++ ++#define WL_WOWL_KEEPALIVE_MAX_PACKET_SIZE 80 ++#define WLC_WOWL_MAX_KEEPALIVE 2 ++ ++/* define for flag */ ++#define TSPEC_PENDING 0 /* TSPEC pending */ ++#define TSPEC_ACCEPTED 1 /* TSPEC accepted */ ++#define TSPEC_REJECTED 2 /* TSPEC rejected */ ++#define TSPEC_UNKNOWN 3 /* TSPEC unknown */ ++#define TSPEC_STATUS_MASK 7 /* TSPEC status mask */ ++ ++ ++/* Software feature flag defines used by wlfeatureflag */ ++#ifdef WLAFTERBURNER ++#define WL_SWFL_ABBFL 0x0001 /* Allow Afterburner on systems w/o hardware BFL */ ++#define WL_SWFL_ABENCORE 0x0002 /* Allow AB on non-4318E chips */ ++#endif /* WLAFTERBURNER */ ++#define WL_SWFL_NOHWRADIO 0x0004 ++#define WL_SWFL_FLOWCONTROL 0x0008 /* Enable backpressure to OS stack */ ++#define WL_SWFL_WLBSSSORT 0x0010 /* Per-port supports sorting of BSS */ ++ ++#define WL_LIFETIME_MAX 0xFFFF /* Max value in ms */ ++ ++/* Packet lifetime configuration per ac */ ++typedef struct wl_lifetime { ++ uint32 ac; /* access class */ ++ uint32 lifetime; /* Packet lifetime value in ms */ ++} wl_lifetime_t; ++ ++/* Channel Switch Announcement param */ ++typedef struct wl_chan_switch { ++ uint8 mode; /* value 0 or 1 */ ++ uint8 count; /* count # of beacons before switching */ ++ chanspec_t chspec; /* chanspec */ ++ uint8 reg; /* regulatory class */ ++} wl_chan_switch_t; ++#endif /* LINUX_POSTMOGRIFY_REMOVAL */ ++ ++/* Roaming trigger definitions for WLC_SET_ROAM_TRIGGER. ++ * ++ * (-100 < value < 0) value is used directly as a roaming trigger in dBm ++ * (0 <= value) value specifies a logical roaming trigger level from ++ * the list below ++ * ++ * WLC_GET_ROAM_TRIGGER always returns roaming trigger value in dBm, never ++ * the logical roam trigger value. ++ */ ++#define WLC_ROAM_TRIGGER_DEFAULT 0 /* default roaming trigger */ ++#define WLC_ROAM_TRIGGER_BANDWIDTH 1 /* optimize for bandwidth roaming trigger */ ++#define WLC_ROAM_TRIGGER_DISTANCE 2 /* optimize for distance roaming trigger */ ++#define WLC_ROAM_TRIGGER_AUTO 3 /* auto-detect environment */ ++#define WLC_ROAM_TRIGGER_MAX_VALUE 3 /* max. valid value */ ++ ++#define WLC_ROAM_NEVER_ROAM_TRIGGER (-100) /* Avoid Roaming by setting a large value */ ++ ++/* Preferred Network Offload (PNO, formerly PFN) defines */ ++#define WPA_AUTH_PFN_ANY 0xffffffff /* for PFN, match only ssid */ ++ ++enum { ++ PFN_LIST_ORDER, ++ PFN_RSSI ++}; ++ ++enum { ++ DISABLE, ++ ENABLE ++}; ++ ++enum { ++ OFF_ADAPT, ++ SMART_ADAPT, ++ STRICT_ADAPT, ++ SLOW_ADAPT ++}; ++ ++#define SORT_CRITERIA_BIT 0 ++#define AUTO_NET_SWITCH_BIT 1 ++#define ENABLE_BKGRD_SCAN_BIT 2 ++#define IMMEDIATE_SCAN_BIT 3 ++#define AUTO_CONNECT_BIT 4 ++#define ENABLE_BD_SCAN_BIT 5 ++#define ENABLE_ADAPTSCAN_BIT 6 ++#define IMMEDIATE_EVENT_BIT 8 ++ ++#define SORT_CRITERIA_MASK 0x0001 ++#define AUTO_NET_SWITCH_MASK 0x0002 ++#define ENABLE_BKGRD_SCAN_MASK 0x0004 ++#define IMMEDIATE_SCAN_MASK 0x0008 ++#define AUTO_CONNECT_MASK 0x0010 ++ ++#define ENABLE_BD_SCAN_MASK 0x0020 ++#define ENABLE_ADAPTSCAN_MASK 0x00c0 ++#define IMMEDIATE_EVENT_MASK 0x0100 ++ ++#define PFN_VERSION 2 ++#define PFN_SCANRESULT_VERSION 1 ++#define MAX_PFN_LIST_COUNT 16 ++ ++#define PFN_COMPLETE 1 ++#define PFN_INCOMPLETE 0 ++ ++#define DEFAULT_BESTN 2 ++#define DEFAULT_MSCAN 0 ++#define DEFAULT_REPEAT 10 ++#define DEFAULT_EXP 2 ++ ++/* PFN network info structure */ ++typedef struct wl_pfn_subnet_info { ++ struct ether_addr BSSID; ++ uint8 channel; /* channel number only */ ++ uint8 SSID_len; ++ uint8 SSID[32]; ++} wl_pfn_subnet_info_t; ++ ++typedef struct wl_pfn_net_info { ++ wl_pfn_subnet_info_t pfnsubnet; ++ int16 RSSI; /* receive signal strength (in dBm) */ ++ uint16 timestamp; /* age in seconds */ ++} wl_pfn_net_info_t; ++ ++typedef struct wl_pfn_scanresults { ++ uint32 version; ++ uint32 status; ++ uint32 count; ++ wl_pfn_net_info_t netinfo[1]; ++} wl_pfn_scanresults_t; ++ ++/* PFN data structure */ ++typedef struct wl_pfn_param { ++ int32 version; /* PNO parameters version */ ++ int32 scan_freq; /* Scan frequency */ ++ int32 lost_network_timeout; /* Timeout in sec. to declare ++ * discovered network as lost ++ */ ++ int16 flags; /* Bit field to control features ++ * of PFN such as sort criteria auto ++ * enable switch and background scan ++ */ ++ int16 rssi_margin; /* Margin to avoid jitter for choosing a ++ * PFN based on RSSI sort criteria ++ */ ++ uint8 bestn; /* number of best networks in each scan */ ++ uint8 mscan; /* number of scans recorded */ ++ uint8 repeat; /* Minimum number of scan intervals ++ *before scan frequency changes in adaptive scan ++ */ ++ uint8 exp; /* Exponent of 2 for maximum scan interval */ ++#if !defined(WLC_PATCH) || !defined(BCM43362A2) ++ int32 slow_freq; /* slow scan period */ ++#endif /* !WLC_PATCH || !BCM43362A2 */ ++} wl_pfn_param_t; ++ ++typedef struct wl_pfn { ++ wlc_ssid_t ssid; /* ssid name and its length */ ++ int32 bss_type; /* IBSS or infrastructure */ ++ int32 infra; /* BSS Vs IBSS */ ++ int32 auth; /* Open Vs Closed */ ++ int32 wpa_auth; /* WPA type */ ++ int32 wsec; /* wsec value */ ++} wl_pfn_t; ++#define WL_PFN_HIDDEN_BIT 2 ++#define PNO_SCAN_MAX_FW 508*1000 /* max time scan time in msec */ ++#define PNO_SCAN_MAX_FW_SEC PNO_SCAN_MAX_FW/1000 /* max time scan time in SEC */ ++#define PNO_SCAN_MIN_FW_SEC 10 /* min time scan time in SEC */ ++#define WL_PFN_HIDDEN_MASK 0x4 ++ ++/* TCP Checksum Offload defines */ ++#define TOE_TX_CSUM_OL 0x00000001 ++#define TOE_RX_CSUM_OL 0x00000002 ++ ++/* TCP Checksum Offload error injection for testing */ ++#define TOE_ERRTEST_TX_CSUM 0x00000001 ++#define TOE_ERRTEST_RX_CSUM 0x00000002 ++#define TOE_ERRTEST_RX_CSUM2 0x00000004 ++ ++struct toe_ol_stats_t { ++ /* Num of tx packets that don't need to be checksummed */ ++ uint32 tx_summed; ++ ++ /* Num of tx packets where checksum is filled by offload engine */ ++ uint32 tx_iph_fill; ++ uint32 tx_tcp_fill; ++ uint32 tx_udp_fill; ++ uint32 tx_icmp_fill; ++ ++ /* Num of rx packets where toe finds out if checksum is good or bad */ ++ uint32 rx_iph_good; ++ uint32 rx_iph_bad; ++ uint32 rx_tcp_good; ++ uint32 rx_tcp_bad; ++ uint32 rx_udp_good; ++ uint32 rx_udp_bad; ++ uint32 rx_icmp_good; ++ uint32 rx_icmp_bad; ++ ++ /* Num of tx packets in which csum error is injected */ ++ uint32 tx_tcp_errinj; ++ uint32 tx_udp_errinj; ++ uint32 tx_icmp_errinj; ++ ++ /* Num of rx packets in which csum error is injected */ ++ uint32 rx_tcp_errinj; ++ uint32 rx_udp_errinj; ++ uint32 rx_icmp_errinj; ++}; ++ ++/* ARP Offload feature flags for arp_ol iovar */ ++#define ARP_OL_AGENT 0x00000001 ++#define ARP_OL_SNOOP 0x00000002 ++#define ARP_OL_HOST_AUTO_REPLY 0x00000004 ++#define ARP_OL_PEER_AUTO_REPLY 0x00000008 ++ ++/* ARP Offload error injection */ ++#define ARP_ERRTEST_REPLY_PEER 0x1 ++#define ARP_ERRTEST_REPLY_HOST 0x2 ++ ++#define ARP_MULTIHOMING_MAX 8 /* Maximum local host IP addresses */ ++#define ND_MULTIHOMING_MAX 8 /* Maximum local host IP addresses */ ++ ++/* Arp offload statistic counts */ ++struct arp_ol_stats_t { ++ uint32 host_ip_entries; /* Host IP table addresses (more than one if multihomed) */ ++ uint32 host_ip_overflow; /* Host IP table additions skipped due to overflow */ ++ ++ uint32 arp_table_entries; /* ARP table entries */ ++ uint32 arp_table_overflow; /* ARP table additions skipped due to overflow */ ++ ++ uint32 host_request; /* ARP requests from host */ ++ uint32 host_reply; /* ARP replies from host */ ++ uint32 host_service; /* ARP requests from host serviced by ARP Agent */ ++ ++ uint32 peer_request; /* ARP requests received from network */ ++ uint32 peer_request_drop; /* ARP requests from network that were dropped */ ++ uint32 peer_reply; /* ARP replies received from network */ ++ uint32 peer_reply_drop; /* ARP replies from network that were dropped */ ++ uint32 peer_service; /* ARP request from host serviced by ARP Agent */ ++}; ++ ++/* NS offload statistic counts */ ++struct nd_ol_stats_t { ++ uint32 host_ip_entries; /* Host IP table addresses (more than one if multihomed) */ ++ uint32 host_ip_overflow; /* Host IP table additions skipped due to overflow */ ++ uint32 peer_request; /* NS requests received from network */ ++ uint32 peer_request_drop; /* NS requests from network that were dropped */ ++ uint32 peer_reply_drop; /* NA replies from network that were dropped */ ++ uint32 peer_service; /* NS request from host serviced by firmware */ ++}; ++ ++/* ++ * Keep-alive packet offloading. ++ */ ++ ++/* NAT keep-alive packets format: specifies the re-transmission period, the packet ++ * length, and packet contents. ++ */ ++typedef struct wl_keep_alive_pkt { ++ uint32 period_msec; /* Retransmission period (0 to disable packet re-transmits) */ ++ uint16 len_bytes; /* Size of packet to transmit (0 to disable packet re-transmits) */ ++ uint8 data[1]; /* Variable length packet to transmit. Contents should include ++ * entire ethernet packet (enet header, IP header, UDP header, ++ * and UDP payload) in network byte order. ++ */ ++} wl_keep_alive_pkt_t; ++ ++#define WL_KEEP_ALIVE_FIXED_LEN OFFSETOF(wl_keep_alive_pkt_t, data) ++ ++/* ++ * Dongle pattern matching filter. ++ */ ++ ++/* Packet filter types. Currently, only pattern matching is supported. */ ++typedef enum wl_pkt_filter_type { ++ WL_PKT_FILTER_TYPE_PATTERN_MATCH /* Pattern matching filter */ ++} wl_pkt_filter_type_t; ++ ++#define WL_PKT_FILTER_TYPE wl_pkt_filter_type_t ++ ++/* Pattern matching filter. Specifies an offset within received packets to ++ * start matching, the pattern to match, the size of the pattern, and a bitmask ++ * that indicates which bits within the pattern should be matched. ++ */ ++typedef struct wl_pkt_filter_pattern { ++ uint32 offset; /* Offset within received packet to start pattern matching. ++ * Offset '0' is the first byte of the ethernet header. ++ */ ++ uint32 size_bytes; /* Size of the pattern. Bitmask must be the same size. */ ++ uint8 mask_and_pattern[1]; /* Variable length mask and pattern data. mask starts ++ * at offset 0. Pattern immediately follows mask. ++ */ ++} wl_pkt_filter_pattern_t; ++ ++/* IOVAR "pkt_filter_add" parameter. Used to install packet filters. */ ++typedef struct wl_pkt_filter { ++ uint32 id; /* Unique filter id, specified by app. */ ++ uint32 type; /* Filter type (WL_PKT_FILTER_TYPE_xxx). */ ++ uint32 negate_match; /* Negate the result of filter matches */ ++ union { /* Filter definitions */ ++ wl_pkt_filter_pattern_t pattern; /* Pattern matching filter */ ++ } u; ++} wl_pkt_filter_t; ++ ++#define WL_PKT_FILTER_FIXED_LEN OFFSETOF(wl_pkt_filter_t, u) ++#define WL_PKT_FILTER_PATTERN_FIXED_LEN OFFSETOF(wl_pkt_filter_pattern_t, mask_and_pattern) ++ ++/* IOVAR "pkt_filter_enable" parameter. */ ++typedef struct wl_pkt_filter_enable { ++ uint32 id; /* Unique filter id */ ++ uint32 enable; /* Enable/disable bool */ ++} wl_pkt_filter_enable_t; ++ ++/* IOVAR "pkt_filter_list" parameter. Used to retrieve a list of installed filters. */ ++typedef struct wl_pkt_filter_list { ++ uint32 num; /* Number of installed packet filters */ ++ wl_pkt_filter_t filter[1]; /* Variable array of packet filters. */ ++} wl_pkt_filter_list_t; ++ ++#define WL_PKT_FILTER_LIST_FIXED_LEN OFFSETOF(wl_pkt_filter_list_t, filter) ++ ++/* IOVAR "pkt_filter_stats" parameter. Used to retrieve debug statistics. */ ++typedef struct wl_pkt_filter_stats { ++ uint32 num_pkts_matched; /* # filter matches for specified filter id */ ++ uint32 num_pkts_forwarded; /* # packets fwded from dongle to host for all filters */ ++ uint32 num_pkts_discarded; /* # packets discarded by dongle for all filters */ ++} wl_pkt_filter_stats_t; ++ ++/* Sequential Commands ioctl */ ++typedef struct wl_seq_cmd_ioctl { ++ uint32 cmd; /* common ioctl definition */ ++ uint32 len; /* length of user buffer */ ++} wl_seq_cmd_ioctl_t; ++ ++#define WL_SEQ_CMD_ALIGN_BYTES 4 ++ ++/* These are the set of get IOCTLs that should be allowed when using ++ * IOCTL sequence commands. These are issued implicitly by wl.exe each time ++ * it is invoked. We never want to buffer these, or else wl.exe will stop working. ++ */ ++#define WL_SEQ_CMDS_GET_IOCTL_FILTER(cmd) \ ++ (((cmd) == WLC_GET_MAGIC) || \ ++ ((cmd) == WLC_GET_VERSION) || \ ++ ((cmd) == WLC_GET_AP) || \ ++ ((cmd) == WLC_GET_INSTANCE)) ++ ++/* ++ * Packet engine interface ++ */ ++ ++#define WL_PKTENG_PER_TX_START 0x01 ++#define WL_PKTENG_PER_TX_STOP 0x02 ++#define WL_PKTENG_PER_RX_START 0x04 ++#define WL_PKTENG_PER_RX_WITH_ACK_START 0x05 ++#define WL_PKTENG_PER_TX_WITH_ACK_START 0x06 ++#define WL_PKTENG_PER_RX_STOP 0x08 ++#define WL_PKTENG_PER_MASK 0xff ++ ++#define WL_PKTENG_SYNCHRONOUS 0x100 /* synchronous flag */ ++ ++typedef struct wl_pkteng { ++ uint32 flags; ++ uint32 delay; /* Inter-packet delay */ ++ uint32 nframes; /* Number of frames */ ++ uint32 length; /* Packet length */ ++ uint8 seqno; /* Enable/disable sequence no. */ ++ struct ether_addr dest; /* Destination address */ ++ struct ether_addr src; /* Source address */ ++} wl_pkteng_t; ++ ++#define NUM_80211b_RATES 4 ++#define NUM_80211ag_RATES 8 ++#define NUM_80211n_RATES 32 ++#define NUM_80211_RATES (NUM_80211b_RATES+NUM_80211ag_RATES+NUM_80211n_RATES) ++typedef struct wl_pkteng_stats { ++ uint32 lostfrmcnt; /* RX PER test: no of frames lost (skip seqno) */ ++ int32 rssi; /* RSSI */ ++ int32 snr; /* signal to noise ratio */ ++ uint16 rxpktcnt[NUM_80211_RATES+1]; ++} wl_pkteng_stats_t; ++ ++typedef struct wl_sslpnphy_papd_debug_data { ++ uint8 psat_pwr; ++ uint8 psat_indx; ++ uint8 final_idx; ++ uint8 start_idx; ++ int32 min_phase; ++ int32 voltage; ++ int8 temperature; ++} wl_sslpnphy_papd_debug_data_t; ++typedef struct wl_sslpnphy_debug_data { ++ int16 papdcompRe [64]; ++ int16 papdcompIm [64]; ++} wl_sslpnphy_debug_data_t; ++typedef struct wl_sslpnphy_spbdump_data { ++ uint16 tbl_length; ++ int16 spbreal[256]; ++ int16 spbimg[256]; ++} wl_sslpnphy_spbdump_data_t; ++typedef struct wl_sslpnphy_percal_debug_data { ++ uint cur_idx; ++ uint tx_drift; ++ uint8 prev_cal_idx; ++ uint percal_ctr; ++ int nxt_cal_idx; ++ uint force_1idxcal; ++ uint onedxacl_req; ++ int32 last_cal_volt; ++ int8 last_cal_temp; ++ uint vbat_ripple; ++ uint exit_route; ++ int32 volt_winner; ++} wl_sslpnphy_percal_debug_data_t; ++ ++#define WL_WOWL_MAGIC (1 << 0) /* Wakeup on Magic packet */ ++#define WL_WOWL_NET (1 << 1) /* Wakeup on Netpattern */ ++#define WL_WOWL_DIS (1 << 2) /* Wakeup on loss-of-link due to Disassoc/Deauth */ ++#define WL_WOWL_RETR (1 << 3) /* Wakeup on retrograde TSF */ ++#define WL_WOWL_BCN (1 << 4) /* Wakeup on loss of beacon */ ++#define WL_WOWL_TST (1 << 5) /* Wakeup after test */ ++#define WL_WOWL_M1 (1 << 6) /* Wakeup after PTK refresh */ ++#define WL_WOWL_EAPID (1 << 7) /* Wakeup after receipt of EAP-Identity Req */ ++#define WL_WOWL_PME_GPIO (1 << 8) /* Wakeind via PME(0) or GPIO(1) */ ++#define WL_WOWL_NEEDTKIP1 (1 << 9) /* need tkip phase 1 key to be updated by the driver */ ++#define WL_WOWL_GTK_FAILURE (1 << 10) /* enable wakeup if GTK fails */ ++#define WL_WOWL_EXTMAGPAT (1 << 11) /* support extended magic packets */ ++#define WL_WOWL_ARPOFFLOAD (1 << 12) /* support ARP/NS/keepalive offloading */ ++#define WL_WOWL_WPA2 (1 << 13) /* read protocol version for EAPOL frames */ ++#define WL_WOWL_KEYROT (1 << 14) /* If the bit is set, use key rotaton */ ++#define WL_WOWL_BCAST (1 << 15) /* If the bit is set, frm received was bcast frame */ ++ ++#define MAGIC_PKT_MINLEN 102 /* Magic pkt min length is 6 * 0xFF + 16 * ETHER_ADDR_LEN */ ++ ++#define WOWL_PATTEN_TYPE_ARP (1 << 0) /* ARP offload Pattern */ ++#define WOWL_PATTEN_TYPE_NA (1 << 1) /* NA offload Pattern */ ++ ++typedef struct { ++ uint32 masksize; /* Size of the mask in #of bytes */ ++ uint32 offset; /* Offset to start looking for the packet in # of bytes */ ++ uint32 patternoffset; /* Offset of start of pattern in the structure */ ++ uint32 patternsize; /* Size of the pattern itself in #of bytes */ ++ uint32 id; /* id */ ++ uint32 reasonsize; /* Size of the wakeup reason code */ ++ uint32 flags; /* Flags to tell the pattern type and other properties */ ++ /* Mask follows the structure above */ ++ /* Pattern follows the mask is at 'patternoffset' from the start */ ++} wl_wowl_pattern_t; ++ ++typedef struct { ++ uint count; ++ wl_wowl_pattern_t pattern[1]; ++} wl_wowl_pattern_list_t; ++ ++typedef struct { ++ uint8 pci_wakeind; /* Whether PCI PMECSR PMEStatus bit was set */ ++ uint16 ucode_wakeind; /* What wakeup-event indication was set by ucode */ ++} wl_wowl_wakeind_t; ++ ++ ++/* per AC rate control related data structure */ ++typedef struct wl_txrate_class { ++ uint8 init_rate; ++ uint8 min_rate; ++ uint8 max_rate; ++} wl_txrate_class_t; ++ ++ ++/* Overlap BSS Scan parameters default, minimum, maximum */ ++#define WLC_OBSS_SCAN_PASSIVE_DWELL_DEFAULT 20 /* unit TU */ ++#define WLC_OBSS_SCAN_PASSIVE_DWELL_MIN 5 /* unit TU */ ++#define WLC_OBSS_SCAN_PASSIVE_DWELL_MAX 1000 /* unit TU */ ++#define WLC_OBSS_SCAN_ACTIVE_DWELL_DEFAULT 10 /* unit TU */ ++#define WLC_OBSS_SCAN_ACTIVE_DWELL_MIN 10 /* unit TU */ ++#define WLC_OBSS_SCAN_ACTIVE_DWELL_MAX 1000 /* unit TU */ ++#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_DEFAULT 300 /* unit Sec */ ++#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_MIN 10 /* unit Sec */ ++#define WLC_OBSS_SCAN_WIDTHSCAN_INTERVAL_MAX 900 /* unit Sec */ ++#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_DEFAULT 5 ++#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_MIN 5 ++#define WLC_OBSS_SCAN_CHANWIDTH_TRANSITION_DLY_MAX 100 ++#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_DEFAULT 200 /* unit TU */ ++#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_MIN 200 /* unit TU */ ++#define WLC_OBSS_SCAN_PASSIVE_TOTAL_PER_CHANNEL_MAX 10000 /* unit TU */ ++#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_DEFAULT 20 /* unit TU */ ++#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_MIN 20 /* unit TU */ ++#define WLC_OBSS_SCAN_ACTIVE_TOTAL_PER_CHANNEL_MAX 10000 /* unit TU */ ++#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_DEFAULT 25 /* unit percent */ ++#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_MIN 0 /* unit percent */ ++#define WLC_OBSS_SCAN_ACTIVITY_THRESHOLD_MAX 100 /* unit percent */ ++ ++/* structure for Overlap BSS scan arguments */ ++typedef struct wl_obss_scan_arg { ++ int16 passive_dwell; ++ int16 active_dwell; ++ int16 bss_widthscan_interval; ++ int16 passive_total; ++ int16 active_total; ++ int16 chanwidth_transition_delay; ++ int16 activity_threshold; ++} wl_obss_scan_arg_t; ++ ++#define WL_OBSS_SCAN_PARAM_LEN sizeof(wl_obss_scan_arg_t) ++#define WL_MIN_NUM_OBSS_SCAN_ARG 7 /* minimum number of arguments required for OBSS Scan */ ++ ++#define WL_COEX_INFO_MASK 0x07 ++#define WL_COEX_INFO_REQ 0x01 ++#define WL_COEX_40MHZ_INTOLERANT 0x02 ++#define WL_COEX_WIDTH20 0x04 ++ ++#define WLC_RSSI_INVALID 0 /* invalid RSSI value */ ++ ++#define MAX_RSSI_LEVELS 8 ++ ++/* RSSI event notification configuration. */ ++typedef struct wl_rssi_event { ++ uint32 rate_limit_msec; /* # of events posted to application will be limited to ++ * one per specified period (0 to disable rate limit). ++ */ ++ uint8 num_rssi_levels; /* Number of entries in rssi_levels[] below */ ++ int8 rssi_levels[MAX_RSSI_LEVELS]; /* Variable number of RSSI levels. An event ++ * will be posted each time the RSSI of received ++ * beacons/packets crosses a level. ++ */ ++} wl_rssi_event_t; ++ ++typedef struct wl_action_obss_coex_req { ++ uint8 info; ++ uint8 num; ++ uint8 ch_list[1]; ++} wl_action_obss_coex_req_t; ++ ++ ++/* IOVar parameter block for small MAC address array with type indicator */ ++#define WL_IOV_MAC_PARAM_LEN 4 ++ ++#define WL_IOV_PKTQ_LOG_PRECS 16 ++ ++typedef struct { ++ uint32 num_addrs; ++ char addr_type[WL_IOV_MAC_PARAM_LEN]; ++ struct ether_addr ea[WL_IOV_MAC_PARAM_LEN]; ++} wl_iov_mac_params_t; ++ ++ ++/* Parameter block for PKTQ_LOG statistics */ ++typedef struct { ++ uint32 requested; /* packets requested to be stored */ ++ uint32 stored; /* packets stored */ ++ uint32 saved; /* packets saved, ++ because a lowest priority queue has given away one packet ++ */ ++ uint32 selfsaved; /* packets saved, ++ because an older packet from the same queue has been dropped ++ */ ++ uint32 full_dropped; /* packets dropped, ++ because pktq is full with higher precedence packets ++ */ ++ uint32 dropped; /* packets dropped because pktq per that precedence is full */ ++ uint32 sacrificed; /* packets dropped, ++ in order to save one from a queue of a highest priority ++ */ ++ uint32 busy; /* packets droped because of hardware/transmission error */ ++ uint32 retry; /* packets re-sent because they were not received */ ++ uint32 ps_retry; /* packets retried again prior to moving power save mode */ ++ uint32 retry_drop; /* packets finally dropped after retry limit */ ++ uint32 max_avail; /* the high-water mark of the queue capacity for packets - ++ goes to zero as queue fills ++ */ ++ uint32 max_used; /* the high-water mark of the queue utilisation for packets - ++ increases with use ('inverse' of max_avail) ++ */ ++ uint32 queue_capacity; /* the maximum capacity of the queue */ ++} pktq_log_counters_v01_t; ++ ++#define sacrified sacrificed ++ ++typedef struct { ++ uint8 num_prec[WL_IOV_MAC_PARAM_LEN]; ++ pktq_log_counters_v01_t counters[WL_IOV_MAC_PARAM_LEN][WL_IOV_PKTQ_LOG_PRECS]; ++ char headings[1]; ++} pktq_log_format_v01_t; ++ ++ ++typedef struct { ++ uint32 version; ++ wl_iov_mac_params_t params; ++ union { ++ pktq_log_format_v01_t v01; ++ } pktq_log; ++} wl_iov_pktq_log_t; ++ ++ ++/* **** EXTLOG **** */ ++#define EXTLOG_CUR_VER 0x0100 ++ ++#define MAX_ARGSTR_LEN 18 /* At least big enough for storing ETHER_ADDR_STR_LEN */ ++ ++/* log modules (bitmap) */ ++#define LOG_MODULE_COMMON 0x0001 ++#define LOG_MODULE_ASSOC 0x0002 ++#define LOG_MODULE_EVENT 0x0004 ++#define LOG_MODULE_MAX 3 /* Update when adding module */ ++ ++/* log levels */ ++#define WL_LOG_LEVEL_DISABLE 0 ++#define WL_LOG_LEVEL_ERR 1 ++#define WL_LOG_LEVEL_WARN 2 ++#define WL_LOG_LEVEL_INFO 3 ++#define WL_LOG_LEVEL_MAX WL_LOG_LEVEL_INFO /* Update when adding level */ ++ ++/* flag */ ++#define LOG_FLAG_EVENT 1 ++ ++/* log arg_type */ ++#define LOG_ARGTYPE_NULL 0 ++#define LOG_ARGTYPE_STR 1 /* %s */ ++#define LOG_ARGTYPE_INT 2 /* %d */ ++#define LOG_ARGTYPE_INT_STR 3 /* %d...%s */ ++#define LOG_ARGTYPE_STR_INT 4 /* %s...%d */ ++ ++typedef struct wlc_extlog_cfg { ++ int max_number; ++ uint16 module; /* bitmap */ ++ uint8 level; ++ uint8 flag; ++ uint16 version; ++} wlc_extlog_cfg_t; ++ ++typedef struct log_record { ++ uint32 time; ++ uint16 module; ++ uint16 id; ++ uint8 level; ++ uint8 sub_unit; ++ uint8 seq_num; ++ int32 arg; ++ char str[MAX_ARGSTR_LEN]; ++} log_record_t; ++ ++typedef struct wlc_extlog_req { ++ uint32 from_last; ++ uint32 num; ++} wlc_extlog_req_t; ++ ++typedef struct wlc_extlog_results { ++ uint16 version; ++ uint16 record_len; ++ uint32 num; ++ log_record_t logs[1]; ++} wlc_extlog_results_t; ++ ++typedef struct log_idstr { ++ uint16 id; ++ uint16 flag; ++ uint8 arg_type; ++ const char *fmt_str; ++} log_idstr_t; ++ ++#define FMTSTRF_USER 1 ++ ++/* flat ID definitions ++ * New definitions HAVE TO BE ADDED at the end of the table. Otherwise, it will ++ * affect backward compatibility with pre-existing apps ++ */ ++typedef enum { ++ FMTSTR_DRIVER_UP_ID = 0, ++ FMTSTR_DRIVER_DOWN_ID = 1, ++ FMTSTR_SUSPEND_MAC_FAIL_ID = 2, ++ FMTSTR_NO_PROGRESS_ID = 3, ++ FMTSTR_RFDISABLE_ID = 4, ++ FMTSTR_REG_PRINT_ID = 5, ++ FMTSTR_EXPTIME_ID = 6, ++ FMTSTR_JOIN_START_ID = 7, ++ FMTSTR_JOIN_COMPLETE_ID = 8, ++ FMTSTR_NO_NETWORKS_ID = 9, ++ FMTSTR_SECURITY_MISMATCH_ID = 10, ++ FMTSTR_RATE_MISMATCH_ID = 11, ++ FMTSTR_AP_PRUNED_ID = 12, ++ FMTSTR_KEY_INSERTED_ID = 13, ++ FMTSTR_DEAUTH_ID = 14, ++ FMTSTR_DISASSOC_ID = 15, ++ FMTSTR_LINK_UP_ID = 16, ++ FMTSTR_LINK_DOWN_ID = 17, ++ FMTSTR_RADIO_HW_OFF_ID = 18, ++ FMTSTR_RADIO_HW_ON_ID = 19, ++ FMTSTR_EVENT_DESC_ID = 20, ++ FMTSTR_PNP_SET_POWER_ID = 21, ++ FMTSTR_RADIO_SW_OFF_ID = 22, ++ FMTSTR_RADIO_SW_ON_ID = 23, ++ FMTSTR_PWD_MISMATCH_ID = 24, ++ FMTSTR_FATAL_ERROR_ID = 25, ++ FMTSTR_AUTH_FAIL_ID = 26, ++ FMTSTR_ASSOC_FAIL_ID = 27, ++ FMTSTR_IBSS_FAIL_ID = 28, ++ FMTSTR_EXTAP_FAIL_ID = 29, ++ FMTSTR_MAX_ID ++} log_fmtstr_id_t; ++ ++#ifdef DONGLEOVERLAYS ++typedef struct { ++ uint32 flags_idx; /* lower 8 bits: overlay index; upper 24 bits: flags */ ++ uint32 offset; /* offset into overlay region to write code */ ++ uint32 len; /* overlay code len */ ++ /* overlay code follows this struct */ ++} wl_ioctl_overlay_t; ++ ++#define OVERLAY_IDX_MASK 0x000000ff ++#define OVERLAY_IDX_SHIFT 0 ++#define OVERLAY_FLAGS_MASK 0xffffff00 ++#define OVERLAY_FLAGS_SHIFT 8 ++/* overlay written to device memory immediately after loading the base image */ ++#define OVERLAY_FLAG_POSTLOAD 0x100 ++/* defer overlay download until the device responds w/WLC_E_OVL_DOWNLOAD event */ ++#define OVERLAY_FLAG_DEFER_DL 0x200 ++/* overlay downloaded prior to the host going to sleep */ ++#define OVERLAY_FLAG_PRESLEEP 0x400 ++ ++#define OVERLAY_DOWNLOAD_CHUNKSIZE 1024 ++#endif /* DONGLEOVERLAYS */ ++ ++/* no default structure packing */ ++#include ++ ++/* require strict packing */ ++#include ++/* Structures and constants used for "vndr_ie" IOVar interface */ ++#define VNDR_IE_CMD_LEN 4 /* length of the set command string: ++ * "add", "del" (+ NUL) ++ */ ++ ++/* 802.11 Mgmt Packet flags */ ++#define VNDR_IE_BEACON_FLAG 0x1 ++#define VNDR_IE_PRBRSP_FLAG 0x2 ++#define VNDR_IE_ASSOCRSP_FLAG 0x4 ++#define VNDR_IE_AUTHRSP_FLAG 0x8 ++#define VNDR_IE_PRBREQ_FLAG 0x10 ++#define VNDR_IE_ASSOCREQ_FLAG 0x20 ++#define VNDR_IE_IWAPID_FLAG 0x40 /* vendor IE in IW advertisement protocol ID field */ ++#define VNDR_IE_CUSTOM_FLAG 0x100 /* allow custom IE id */ ++ ++#define VNDR_IE_INFO_HDR_LEN (sizeof(uint32)) ++ ++typedef BWL_PRE_PACKED_STRUCT struct { ++ uint32 pktflag; /* bitmask indicating which packet(s) contain this IE */ ++ vndr_ie_t vndr_ie_data; /* vendor IE data */ ++} BWL_POST_PACKED_STRUCT vndr_ie_info_t; ++ ++typedef BWL_PRE_PACKED_STRUCT struct { ++ int iecount; /* number of entries in the vndr_ie_list[] array */ ++ vndr_ie_info_t vndr_ie_list[1]; /* variable size list of vndr_ie_info_t structs */ ++} BWL_POST_PACKED_STRUCT vndr_ie_buf_t; ++ ++typedef BWL_PRE_PACKED_STRUCT struct { ++ char cmd[VNDR_IE_CMD_LEN]; /* vndr_ie IOVar set command : "add", "del" + NUL */ ++ vndr_ie_buf_t vndr_ie_buffer; /* buffer containing Vendor IE list information */ ++} BWL_POST_PACKED_STRUCT vndr_ie_setbuf_t; ++ ++/* tag_ID/length/value_buffer tuple */ ++typedef BWL_PRE_PACKED_STRUCT struct { ++ uint8 id; ++ uint8 len; ++ uint8 data[1]; ++} BWL_POST_PACKED_STRUCT tlv_t; ++ ++typedef BWL_PRE_PACKED_STRUCT struct { ++ uint32 pktflag; /* bitmask indicating which packet(s) contain this IE */ ++ tlv_t ie_data; /* IE data */ ++} BWL_POST_PACKED_STRUCT ie_info_t; ++ ++typedef BWL_PRE_PACKED_STRUCT struct { ++ int iecount; /* number of entries in the ie_list[] array */ ++ ie_info_t ie_list[1]; /* variable size list of ie_info_t structs */ ++} BWL_POST_PACKED_STRUCT ie_buf_t; ++ ++typedef BWL_PRE_PACKED_STRUCT struct { ++ char cmd[VNDR_IE_CMD_LEN]; /* ie IOVar set command : "add" + NUL */ ++ ie_buf_t ie_buffer; /* buffer containing IE list information */ ++} BWL_POST_PACKED_STRUCT ie_setbuf_t; ++ ++typedef BWL_PRE_PACKED_STRUCT struct { ++ uint32 pktflag; /* bitmask indicating which packet(s) contain this IE */ ++ uint8 id; /* IE type */ ++} BWL_POST_PACKED_STRUCT ie_getbuf_t; ++ ++/* structures used to define format of wps ie data from probe requests */ ++/* passed up to applications via iovar "prbreq_wpsie" */ ++typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_hdr { ++ struct ether_addr staAddr; ++ uint16 ieLen; ++} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_hdr_t; ++ ++typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_data { ++ sta_prbreq_wps_ie_hdr_t hdr; ++ uint8 ieData[1]; ++} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_data_t; ++ ++typedef BWL_PRE_PACKED_STRUCT struct sta_prbreq_wps_ie_list { ++ uint32 totLen; ++ uint8 ieDataList[1]; ++} BWL_POST_PACKED_STRUCT sta_prbreq_wps_ie_list_t; ++ ++ ++#ifdef WLMEDIA_TXFAILEVENT ++typedef BWL_PRE_PACKED_STRUCT struct { ++ char dest[ETHER_ADDR_LEN]; /* destination MAC */ ++ uint8 prio; /* Packet Priority */ ++ uint8 flags; /* Flags */ ++ uint32 tsf_l; /* TSF timer low */ ++ uint32 tsf_h; /* TSF timer high */ ++ uint16 rates; /* Main Rates */ ++ uint16 txstatus; /* TX Status */ ++} BWL_POST_PACKED_STRUCT txfailinfo_t; ++#endif /* WLMEDIA_TXFAILEVENT */ ++ ++/* no strict structure packing */ ++#include ++ ++/* Global ASSERT Logging */ ++#define ASSERTLOG_CUR_VER 0x0100 ++#define MAX_ASSRTSTR_LEN 64 ++ ++typedef struct assert_record { ++ uint32 time; ++ uint8 seq_num; ++ char str[MAX_ASSRTSTR_LEN]; ++} assert_record_t; ++ ++typedef struct assertlog_results { ++ uint16 version; ++ uint16 record_len; ++ uint32 num; ++ assert_record_t logs[1]; ++} assertlog_results_t; ++ ++#define LOGRRC_FIX_LEN 8 ++#define IOBUF_ALLOWED_NUM_OF_LOGREC(type, len) ((len - LOGRRC_FIX_LEN)/sizeof(type)) ++ ++ ++/* channel interference measurement (chanim) related defines */ ++ ++/* chanim mode */ ++#define CHANIM_DISABLE 0 /* disabled */ ++#define CHANIM_DETECT 1 /* detection only */ ++#define CHANIM_EXT 2 /* external state machine */ ++#define CHANIM_ACT 3 /* full internal state machine, detect + act */ ++#define CHANIM_MODE_MAX 4 ++ ++/* define for apcs reason code */ ++#define APCS_INIT 0 ++#define APCS_IOCTL 1 ++#define APCS_CHANIM 2 ++#define APCS_CSTIMER 3 ++#define APCS_BTA 4 ++ ++/* number of ACS record entries */ ++#define CHANIM_ACS_RECORD 10 ++ ++/* CHANIM */ ++#define CCASTATS_TXDUR 0 ++#define CCASTATS_INBSS 1 ++#define CCASTATS_OBSS 2 ++#define CCASTATS_NOCTG 3 ++#define CCASTATS_NOPKT 4 ++#define CCASTATS_DOZE 5 ++#define CCASTATS_TXOP 6 ++#define CCASTATS_GDTXDUR 7 ++#define CCASTATS_BDTXDUR 8 ++#define CCASTATS_MAX 9 ++ ++/* chanim acs record */ ++typedef struct { ++ bool valid; ++ uint8 trigger; ++ chanspec_t selected_chspc; ++ int8 bgnoise; ++ uint32 glitch_cnt; ++ uint8 ccastats; ++ uint timestamp; ++} chanim_acs_record_t; ++ ++typedef struct { ++ chanim_acs_record_t acs_record[CHANIM_ACS_RECORD]; ++ uint8 count; ++ uint timestamp; ++} wl_acs_record_t; ++ ++typedef struct chanim_stats { ++ uint32 glitchcnt; /* normalized as per second count */ ++ uint32 badplcp; /* normalized as per second count */ ++ uint8 ccastats[CCASTATS_MAX]; /* normalized as 0-255 */ ++ int8 bgnoise; /* background noise level (in dBm) */ ++ chanspec_t chanspec; ++ uint32 timestamp; ++} chanim_stats_t; ++ ++#define WL_CHANIM_STATS_VERSION 1 ++#define WL_CHANIM_COUNT_ALL 0xff ++#define WL_CHANIM_COUNT_ONE 0x1 ++ ++typedef struct { ++ uint32 buflen; ++ uint32 version; ++ uint32 count; ++ chanim_stats_t stats[1]; ++} wl_chanim_stats_t; ++ ++#define WL_CHANIM_STATS_FIXED_LEN OFFSETOF(wl_chanim_stats_t, stats) ++ ++/* Noise measurement metrics. */ ++#define NOISE_MEASURE_KNOISE 0x1 ++ ++/* scb probe parameter */ ++typedef struct { ++ uint32 scb_timeout; ++ uint32 scb_activity_time; ++ uint32 scb_max_probe; ++} wl_scb_probe_t; ++ ++/* ap tpc modes */ ++#define AP_TPC_OFF 0 ++#define AP_TPC_BSS_PWR 1 /* BSS power control */ ++#define AP_TPC_AP_PWR 2 /* AP power control */ ++#define AP_TPC_AP_BSS_PWR 3 /* Both AP and BSS power control */ ++#define AP_TPC_MAX_LINK_MARGIN 127 ++ ++/* structure/defines for selective mgmt frame (smf) stats support */ ++ ++#define SMFS_VERSION 1 ++/* selected mgmt frame (smf) stats element */ ++typedef struct wl_smfs_elem { ++ uint32 count; ++ uint16 code; /* SC or RC code */ ++} wl_smfs_elem_t; ++ ++typedef struct wl_smf_stats { ++ uint32 version; ++ uint16 length; /* reserved for future usage */ ++ uint8 type; ++ uint8 codetype; ++ uint32 ignored_cnt; ++ uint32 malformed_cnt; ++ uint32 count_total; /* count included the interested group */ ++ wl_smfs_elem_t elem[1]; ++} wl_smf_stats_t; ++ ++#define WL_SMFSTATS_FIXED_LEN OFFSETOF(wl_smf_stats_t, elem); ++ ++enum { ++ SMFS_CODETYPE_SC, ++ SMFS_CODETYPE_RC ++}; ++ ++/* reuse two number in the sc/rc space */ ++#define SMFS_CODE_MALFORMED 0xFFFE ++#define SMFS_CODE_IGNORED 0xFFFD ++ ++typedef enum smfs_type { ++ SMFS_TYPE_AUTH, ++ SMFS_TYPE_ASSOC, ++ SMFS_TYPE_REASSOC, ++ SMFS_TYPE_DISASSOC_TX, ++ SMFS_TYPE_DISASSOC_RX, ++ SMFS_TYPE_DEAUTH_TX, ++ SMFS_TYPE_DEAUTH_RX, ++ SMFS_TYPE_MAX ++} smfs_type_t; ++ ++#ifdef PHYMON ++ ++#define PHYMON_VERSION 1 ++ ++typedef struct wl_phycal_core_state { ++ /* Tx IQ/LO calibration coeffs */ ++ int16 tx_iqlocal_a; ++ int16 tx_iqlocal_b; ++ int8 tx_iqlocal_ci; ++ int8 tx_iqlocal_cq; ++ int8 tx_iqlocal_di; ++ int8 tx_iqlocal_dq; ++ int8 tx_iqlocal_ei; ++ int8 tx_iqlocal_eq; ++ int8 tx_iqlocal_fi; ++ int8 tx_iqlocal_fq; ++ ++ /* Rx IQ calibration coeffs */ ++ int16 rx_iqcal_a; ++ int16 rx_iqcal_b; ++ ++ uint8 tx_iqlocal_pwridx; /* Tx Power Index for Tx IQ/LO calibration */ ++ uint32 papd_epsilon_table[64]; /* PAPD epsilon table */ ++ int16 papd_epsilon_offset; /* PAPD epsilon offset */ ++ uint8 curr_tx_pwrindex; /* Tx power index */ ++ int8 idle_tssi; /* Idle TSSI */ ++ int8 est_tx_pwr; /* Estimated Tx Power (dB) */ ++ int8 est_rx_pwr; /* Estimated Rx Power (dB) from RSSI */ ++ uint16 rx_gaininfo; /* Rx gain applied on last Rx pkt */ ++ uint16 init_gaincode; /* initgain required for ACI */ ++ int8 estirr_tx; ++ int8 estirr_rx; ++ ++} wl_phycal_core_state_t; ++ ++typedef struct wl_phycal_state { ++ int version; ++ int8 num_phy_cores; /* number of cores */ ++ int8 curr_temperature; /* on-chip temperature sensor reading */ ++ chanspec_t chspec; /* channspec for this state */ ++ bool aci_state; /* ACI state: ON/OFF */ ++ uint16 crsminpower; /* crsminpower required for ACI */ ++ uint16 crsminpowerl; /* crsminpowerl required for ACI */ ++ uint16 crsminpoweru; /* crsminpoweru required for ACI */ ++ wl_phycal_core_state_t phycal_core[1]; ++} wl_phycal_state_t; ++ ++#define WL_PHYCAL_STAT_FIXED_LEN OFFSETOF(wl_phycal_state_t, phycal_core) ++#endif /* PHYMON */ ++ ++/* discovery state */ ++typedef struct wl_p2p_disc_st { ++ uint8 state; /* see state */ ++ chanspec_t chspec; /* valid in listen state */ ++ uint16 dwell; /* valid in listen state, in ms */ ++} wl_p2p_disc_st_t; ++ ++/* state */ ++#define WL_P2P_DISC_ST_SCAN 0 ++#define WL_P2P_DISC_ST_LISTEN 1 ++#define WL_P2P_DISC_ST_SEARCH 2 ++ ++/* scan request */ ++typedef struct wl_p2p_scan { ++ uint8 type; /* 'S' for WLC_SCAN, 'E' for "escan" */ ++ uint8 reserved[3]; ++ /* scan or escan parms... */ ++} wl_p2p_scan_t; ++ ++/* i/f request */ ++typedef struct wl_p2p_if { ++ struct ether_addr addr; ++ uint8 type; /* see i/f type */ ++ chanspec_t chspec; /* for p2p_ifadd GO */ ++} wl_p2p_if_t; ++ ++/* i/f type */ ++#define WL_P2P_IF_CLIENT 0 ++#define WL_P2P_IF_GO 1 ++#define WL_P2P_IF_DYNBCN_GO 2 ++#define WL_P2P_IF_DEV 3 ++ ++/* i/f query */ ++typedef struct wl_p2p_ifq { ++ uint bsscfgidx; ++ char ifname[BCM_MSG_IFNAME_MAX]; ++} wl_p2p_ifq_t; ++ ++/* OppPS & CTWindow */ ++typedef struct wl_p2p_ops { ++ uint8 ops; /* 0: disable 1: enable */ ++ uint8 ctw; /* >= 10 */ ++} wl_p2p_ops_t; ++ ++/* absence and presence request */ ++typedef struct wl_p2p_sched_desc { ++ uint32 start; ++ uint32 interval; ++ uint32 duration; ++ uint32 count; /* see count */ ++} wl_p2p_sched_desc_t; ++ ++/* count */ ++#define WL_P2P_SCHED_RSVD 0 ++#define WL_P2P_SCHED_REPEAT 255 /* anything > 255 will be treated as 255 */ ++ ++typedef struct wl_p2p_sched { ++ uint8 type; /* see schedule type */ ++ uint8 action; /* see schedule action */ ++ uint8 option; /* see schedule option */ ++ wl_p2p_sched_desc_t desc[1]; ++} wl_p2p_sched_t; ++#define WL_P2P_SCHED_FIXED_LEN 3 ++ ++/* schedule type */ ++#define WL_P2P_SCHED_TYPE_ABS 0 /* Scheduled Absence */ ++#define WL_P2P_SCHED_TYPE_REQ_ABS 1 /* Requested Absence */ ++ ++/* schedule action during absence periods (for WL_P2P_SCHED_ABS type) */ ++#define WL_P2P_SCHED_ACTION_NONE 0 /* no action */ ++#define WL_P2P_SCHED_ACTION_DOZE 1 /* doze */ ++/* schedule option - WL_P2P_SCHED_TYPE_REQ_ABS */ ++#define WL_P2P_SCHED_ACTION_GOOFF 2 /* turn off GO beacon/prbrsp functions */ ++/* schedule option - WL_P2P_SCHED_TYPE_XXX */ ++#define WL_P2P_SCHED_ACTION_RESET 255 /* reset */ ++ ++/* schedule option - WL_P2P_SCHED_TYPE_ABS */ ++#define WL_P2P_SCHED_OPTION_NORMAL 0 /* normal start/interval/duration/count */ ++#define WL_P2P_SCHED_OPTION_BCNPCT 1 /* percentage of beacon interval */ ++/* schedule option - WL_P2P_SCHED_TYPE_REQ_ABS */ ++#define WL_P2P_SCHED_OPTION_TSFOFS 2 /* normal start/internal/duration/count with ++ * start being an offset of the 'current' TSF ++ */ ++ ++/* feature flags */ ++#define WL_P2P_FEAT_GO_CSA (1 << 0) /* GO moves with the STA using CSA method */ ++#define WL_P2P_FEAT_GO_NOLEGACY (1 << 1) /* GO does not probe respond to non-p2p probe ++ * requests ++ */ ++#define WL_P2P_FEAT_RESTRICT_DEV_RESP (1 << 2) /* Restrict p2p dev interface from responding */ ++ ++/* RFAWARE def */ ++#define BCM_ACTION_RFAWARE 0x77 ++#define BCM_ACTION_RFAWARE_DCS 0x01 ++ ++/* DCS reason code define */ ++#define BCM_DCS_IOVAR 0x1 ++#define BCM_DCS_UNKNOWN 0xFF ++ ++typedef struct wl_bcmdcs_data { ++ uint reason; ++ chanspec_t chspec; ++} wl_bcmdcs_data_t; ++ ++/* n-mode support capability */ ++/* 2x2 includes both 1x1 & 2x2 devices ++ * reserved #define 2 for future when we want to separate 1x1 & 2x2 and ++ * control it independently ++ */ ++#define WL_11N_2x2 1 ++#define WL_11N_3x3 3 ++#define WL_11N_4x4 4 ++ ++/* define 11n feature disable flags */ ++#define WLFEATURE_DISABLE_11N 0x00000001 ++#define WLFEATURE_DISABLE_11N_STBC_TX 0x00000002 ++#define WLFEATURE_DISABLE_11N_STBC_RX 0x00000004 ++#define WLFEATURE_DISABLE_11N_SGI_TX 0x00000008 ++#define WLFEATURE_DISABLE_11N_SGI_RX 0x00000010 ++#define WLFEATURE_DISABLE_11N_AMPDU_TX 0x00000020 ++#define WLFEATURE_DISABLE_11N_AMPDU_RX 0x00000040 ++#define WLFEATURE_DISABLE_11N_GF 0x00000080 ++ ++/* Proxy STA modes */ ++#define PSTA_MODE_DISABLED 0 ++#define PSTA_MODE_PROXY 1 ++#define PSTA_MODE_REPEATER 2 ++ ++ ++/* NAT configuration */ ++typedef struct { ++ uint32 ipaddr; /* interface ip address */ ++ uint32 ipaddr_mask; /* interface ip address mask */ ++ uint32 ipaddr_gateway; /* gateway ip address */ ++ uint8 mac_gateway[6]; /* gateway mac address */ ++ uint32 ipaddr_dns; /* DNS server ip address, valid only for public if */ ++ uint8 mac_dns[6]; /* DNS server mac address, valid only for public if */ ++ uint8 GUID[38]; /* interface GUID */ ++} nat_if_info_t; ++ ++typedef struct { ++ uint op; /* operation code */ ++ bool pub_if; /* set for public if, clear for private if */ ++ nat_if_info_t if_info; /* interface info */ ++} nat_cfg_t; ++ ++/* op code in nat_cfg */ ++#define NAT_OP_ENABLE 1 /* enable NAT on given interface */ ++#define NAT_OP_DISABLE 2 /* disable NAT on given interface */ ++#define NAT_OP_DISABLE_ALL 3 /* disable NAT on all interfaces */ ++ ++/* NAT state */ ++#define NAT_STATE_ENABLED 1 /* NAT is enabled */ ++#define NAT_STATE_DISABLED 2 /* NAT is disabled */ ++ ++typedef struct { ++ int state; /* NAT state returned */ ++} nat_state_t; ++ ++#ifdef PROP_TXSTATUS ++/* Bit definitions for tlv iovar */ ++/* ++ * enable RSSI signals: ++ * WLFC_CTL_TYPE_RSSI ++ */ ++#define WLFC_FLAGS_RSSI_SIGNALS 1 ++ ++/* enable (if/mac_open, if/mac_close,, mac_add, mac_del) signals: ++ * ++ * WLFC_CTL_TYPE_MAC_OPEN ++ * WLFC_CTL_TYPE_MAC_CLOSE ++ * ++ * WLFC_CTL_TYPE_INTERFACE_OPEN ++ * WLFC_CTL_TYPE_INTERFACE_CLOSE ++ * ++ * WLFC_CTL_TYPE_MACDESC_ADD ++ * WLFC_CTL_TYPE_MACDESC_DEL ++ * ++ */ ++#define WLFC_FLAGS_XONXOFF_SIGNALS 2 ++ ++/* enable (status, fifo_credit, mac_credit) signals ++ * WLFC_CTL_TYPE_MAC_REQUEST_CREDIT ++ * WLFC_CTL_TYPE_TXSTATUS ++ * WLFC_CTL_TYPE_FIFO_CREDITBACK ++ */ ++#define WLFC_FLAGS_CREDIT_STATUS_SIGNALS 4 ++ ++#define WLFC_FLAGS_HOST_PROPTXSTATUS_ACTIVE 8 ++#define WLFC_FLAGS_PSQ_GENERATIONFSM_ENABLE 16 ++#define WLFC_FLAGS_PSQ_ZERO_BUFFER_ENABLE 32 ++#endif /* PROP_TXSTATUS */ ++ ++#define BTA_STATE_LOG_SZ 64 ++ ++/* BTAMP Statemachine states */ ++enum { ++ HCIReset = 1, ++ HCIReadLocalAMPInfo, ++ HCIReadLocalAMPASSOC, ++ HCIWriteRemoteAMPASSOC, ++ HCICreatePhysicalLink, ++ HCIAcceptPhysicalLinkRequest, ++ HCIDisconnectPhysicalLink, ++ HCICreateLogicalLink, ++ HCIAcceptLogicalLink, ++ HCIDisconnectLogicalLink, ++ HCILogicalLinkCancel, ++ HCIAmpStateChange, ++ HCIWriteLogicalLinkAcceptTimeout ++}; ++ ++typedef struct flush_txfifo { ++ uint32 txfifobmp; ++ uint32 hwtxfifoflush; ++ struct ether_addr ea; ++} flush_txfifo_t; ++ ++#define CHANNEL_5G_LOW_START 36 /* 5G low (36..48) CDD enable/disable bit mask */ ++#define CHANNEL_5G_MID_START 52 /* 5G mid (52..64) CDD enable/disable bit mask */ ++#define CHANNEL_5G_HIGH_START 100 /* 5G high (100..140) CDD enable/disable bit mask */ ++#define CHANNEL_5G_UPPER_START 149 /* 5G upper (149..161) CDD enable/disable bit mask */ ++ ++enum { ++ SPATIAL_MODE_2G_IDX = 0, ++ SPATIAL_MODE_5G_LOW_IDX, ++ SPATIAL_MODE_5G_MID_IDX, ++ SPATIAL_MODE_5G_HIGH_IDX, ++ SPATIAL_MODE_5G_UPPER_IDX, ++ SPATIAL_MODE_MAX_IDX ++}; ++ ++/* IOVAR "mempool" parameter. Used to retrieve a list of memory pool statistics. */ ++typedef struct wl_mempool_stats { ++ int num; /* Number of memory pools */ ++ bcm_mp_stats_t s[1]; /* Variable array of memory pool stats. */ ++} wl_mempool_stats_t; ++ ++/* Network Offload Engine */ ++#define NWOE_OL_ENABLE 0x00000001 ++ ++typedef struct { ++ uint32 ipaddr; ++ uint32 ipaddr_netmask; ++ uint32 ipaddr_gateway; ++} nwoe_ifconfig_t; ++ ++/* ++ * Traffic management structures/defines. ++ */ ++ ++/* Traffic management bandwidth parameters */ ++#define TRF_MGMT_MAX_PRIORITIES 3 ++ ++#define TRF_MGMT_FLAG_ADD_DSCP 0x0001 /* Add DSCP to IP TOS field */ ++#define TRF_MGMT_FLAG_DISABLE_SHAPING 0x0002 /* Only support traffic clasification */ ++ ++ ++/* Traffic management priority classes */ ++typedef enum trf_mgmt_priority_class { ++ trf_mgmt_priority_low = 0, /* Maps to 802.1p BK */ ++ trf_mgmt_priority_medium = 1, /* Maps to 802.1p BE */ ++ trf_mgmt_priority_high = 2, /* Maps to 802.1p VI */ ++ trf_mgmt_priority_invalid = (trf_mgmt_priority_high + 1) ++} trf_mgmt_priority_class_t; ++ ++/* Traffic management configuration parameters */ ++typedef struct trf_mgmt_config { ++ uint32 trf_mgmt_enabled; /* 0 - disabled, 1 - enabled */ ++ uint32 flags; /* See TRF_MGMT_FLAG_xxx defines */ ++ uint32 host_ip_addr; ++ uint32 host_subnet_mask; ++ uint32 downlink_bandwidth; /* In units of kbps */ ++ uint32 uplink_bandwidth; /* In units of kbps */ ++ uint32 min_tx_bandwidth[TRF_MGMT_MAX_PRIORITIES]; ++ uint32 min_rx_bandwidth[TRF_MGMT_MAX_PRIORITIES]; ++} trf_mgmt_config_t; ++ ++/* Traffic management filter */ ++typedef struct trf_mgmt_filter { ++ uint32 dst_ip_addr; /* His IP address */ ++ uint16 dst_port; /* His L4 port */ ++ uint16 src_port; /* My L4 port */ ++ uint16 prot; /* L4 protocol (only TCP or UDP protocols) */ ++ uint16 flags; /* TBD. For now, this must be zero. */ ++ trf_mgmt_priority_class_t priority; /* 802.1p priority for filtered packets */ ++} trf_mgmt_filter_t; ++ ++/* Traffic management filter list (variable length) */ ++typedef struct trf_mgmt_filter_list { ++ uint32 num_filters; ++ trf_mgmt_filter_t filter[1]; ++} trf_mgmt_filter_list_t; ++ ++/* Traffic management shaping info */ ++typedef struct trf_mgmt_shaping_info { ++ uint32 max_bps; /* Max bytes consumed or produced per second */ ++ uint32 max_bytes_per_sampling_period; /* Max bytes consumed or produced per sample */ ++ uint32 shaping_delay_threshold; /* Theshold for starting traffic delays */ ++ uint32 num_bytes_produced_per_sec; /* Bytes produced over the sampling period */ ++ uint32 num_bytes_consumed_per_sec; /* Bytes consumed over the sampling period */ ++} trf_mgmt_shaping_info_t; ++ ++/* Traffic management shaping info array */ ++typedef struct trf_mgmt_shaping_info_array { ++ trf_mgmt_shaping_info_t tx_queue_shaping_info[TRF_MGMT_MAX_PRIORITIES]; ++ trf_mgmt_shaping_info_t rx_queue_shaping_info[TRF_MGMT_MAX_PRIORITIES]; ++} trf_mgmt_shaping_info_array_t; ++ ++ ++/* Traffic management statistical counters */ ++typedef struct trf_mgmt_stats { ++ uint32 num_processed_packets; /* Number of packets processed */ ++ uint32 num_processed_bytes; /* Number of bytes processed */ ++ uint32 num_queued_packets; /* Number of packets in queue */ ++ uint32 num_queued_bytes; /* Number of bytes in queue */ ++ uint32 num_discarded_packets; /* Number of packets discarded from queue */ ++} trf_mgmt_stats_t; ++ ++/* Traffic management statisics array */ ++typedef struct trf_mgmt_stats_array { ++ trf_mgmt_stats_t tx_queue_stats[TRF_MGMT_MAX_PRIORITIES]; ++ trf_mgmt_stats_t rx_queue_stats[TRF_MGMT_MAX_PRIORITIES]; ++} trf_mgmt_stats_array_t; ++ ++#endif /* _wlioctl_h_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/shared/aiutils.c b/drivers/net/ethernet/broadcom/gmac/src/shared/aiutils.c +--- a/drivers/net/ethernet/broadcom/gmac/src/shared/aiutils.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/shared/aiutils.c 2017-11-09 17:53:44.023292000 +0800 +@@ -0,0 +1,1028 @@ ++/* ++ * $Copyright Open Broadcom Corporation$ ++ * ++ * Misc utility routines for accessing chip-specific features ++ * of the SiliconBackplane-based Broadcom chips. ++ * ++ * $Id: aiutils.c 327582 2012-04-14 05:02:37Z kenlo $ ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "siutils_priv.h" ++#if defined(CONFIG_MACH_HX4) ++#include "hx4_erom.h" ++#elif defined(CONFIG_MACH_HR2) ++#include "hr2_erom.h" ++#elif defined(CONFIG_MACH_KT2) ++#include "kt2_erom.h" ++#elif defined(CONFIG_MACH_GH) ++#include "gh_erom.h" ++#elif defined(CONFIG_MACH_SB2) ++#include "sb2_erom.h" ++#elif defined(CONFIG_MACH_HR3) ++#include "hr3_erom.h" ++#elif defined(CONFIG_MACH_GH2) ++#include "gh2_erom.h" ++#endif ++ ++/* EROM parsing */ ++static uint32 ++get_erom_ent(si_t *sih, uint32 **eromptr, uint32 mask, uint32 match) ++{ ++ uint32 ent; ++ uint inv = 0, nom = 0; ++ ++ while (TRUE) { ++ ent = **eromptr; ++ ++ (*eromptr)++; ++ ++ if (mask == 0) { ++ break; ++ } ++ ++ if ((ent & ER_VALID) == 0) { ++ inv++; ++ continue; ++ } ++ ++ if (ent == (ER_END | ER_VALID)) { ++ break; ++ } ++ ++ if ((ent & mask) == match) { ++ break; ++ } ++ ++ nom++; ++ } ++ ++ SI_VMSG(("%s: Returning ent 0x%08x\n", __FUNCTION__, ent)); ++ if (inv + nom) { ++ SI_VMSG((" after %d invalid and %d non-matching entries\n", inv, nom)); ++ } ++ return ent; ++} ++ ++static uint32 ++get_asd(si_t *sih, uint32 **eromptr, uint sp, uint ad, uint st, uint32 *addrl, uint32 *addrh, ++ uint32 *sizel, uint32 *sizeh) ++{ ++ uint32 asd, sz, szd; ++ ++ asd = get_erom_ent(sih, eromptr, ER_VALID, ER_VALID); ++ if (((asd & ER_TAG1) != ER_ADD) || ++ (((asd & AD_SP_MASK) >> AD_SP_SHIFT) != sp) || ++ ((asd & AD_ST_MASK) != st)) { ++ /* This is not what we want, "push" it back */ ++ (*eromptr)--; ++ return 0; ++ } ++ *addrl = asd & AD_ADDR_MASK; ++ if (asd & AD_AG32) { ++ *addrh = get_erom_ent(sih, eromptr, 0, 0); ++ } else { ++ *addrh = 0; ++ } ++ *sizeh = 0; ++ sz = asd & AD_SZ_MASK; ++ if (sz == AD_SZ_SZD) { ++ szd = get_erom_ent(sih, eromptr, 0, 0); ++ *sizel = szd & SD_SZ_MASK; ++ if (szd & SD_SG32) { ++ *sizeh = get_erom_ent(sih, eromptr, 0, 0); ++ } ++ } else { ++ *sizel = AD_SZ_BASE << (sz >> AD_SZ_SHIFT); ++ } ++ ++ SI_VMSG((" SP %d, ad %d: st = %d, 0x%08x_0x%08x @ 0x%08x_0x%08x\n", ++ sp, ad, st, *sizeh, *sizel, *addrh, *addrl)); ++ ++ return asd; ++} ++ ++/* parse the enumeration rom to identify all cores */ ++void ++BCMATTACHFN(ai_scan)(si_t *sih, void *regs, uint devid) ++{ ++ si_info_t *sii = SI_INFO(sih); ++ chipcregs_t *cc = (chipcregs_t *)regs; ++ uint32 erombase, *eromptr, *eromlim; ++ ++ erombase = R_REG(sii->osh, &cc->eromptr); ++ ++ switch (BUSTYPE(sih->bustype)) { ++ case SI_BUS: ++#if defined(CONFIG_MACH_HX4) ++ eromptr = hx4_erom; ++#elif defined(CONFIG_MACH_HR2) ++ eromptr = hr2_erom; ++#elif defined(CONFIG_MACH_KT2) ++ eromptr = kt2_erom; ++#elif defined(CONFIG_MACH_GH) ++ eromptr = gh_erom; ++#elif defined(CONFIG_MACH_SB2) ++ eromptr = sb2_erom; ++#elif defined(CONFIG_MACH_HR3) ++ eromptr = hr3_erom; ++#elif defined(CONFIG_MACH_GH2) ++ eromptr = gh2_erom; ++#endif ++ break; ++ ++ case PCI_BUS: ++ /* Set wrappers address */ ++ sii->curwrap = (void *)((uintptr)regs + SI_CORE_SIZE); ++ ++ /* Now point the window at the erom */ ++ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, erombase); ++ eromptr = regs; ++ break; ++ ++ case PCMCIA_BUS: ++ default: ++ SI_ERROR(("Don't know how to do AXI enumertion on bus %d\n", sih->bustype)); ++ ASSERT(0); ++ return; ++ } ++ eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32)); ++ ++ SI_VMSG(("ai_scan: regs = 0x%p, erombase = 0x%08x, eromptr = 0x%p, eromlim = 0x%p\n", ++ regs, erombase, eromptr, eromlim)); ++ while (eromptr < eromlim) { ++ uint32 cia, cib, cid, mfg, crev, nmw, nsw, nmp, nsp; ++ uint32 mpd, asd, addrl, addrh, sizel, sizeh; ++ uint i, j, idx; ++ bool br; ++ ++ br = FALSE; ++ ++ /* Grok a component */ ++ cia = get_erom_ent(sih, &eromptr, ER_TAG, ER_CI); ++ if (cia == (ER_END | ER_VALID)) { ++ SI_VMSG(("Found END of erom after %d cores\n", sii->numcores)); ++ return; ++ } ++ ++ cib = get_erom_ent(sih, &eromptr, 0, 0); ++ ++ if ((cib & ER_TAG) != ER_CI) { ++ SI_ERROR(("CIA not followed by CIB\n")); ++ goto error; ++ } ++ ++ cid = (cia & CIA_CID_MASK) >> CIA_CID_SHIFT; ++ mfg = (cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT; ++ crev = (cib & CIB_REV_MASK) >> CIB_REV_SHIFT; ++ nmw = (cib & CIB_NMW_MASK) >> CIB_NMW_SHIFT; ++ nsw = (cib & CIB_NSW_MASK) >> CIB_NSW_SHIFT; ++ nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT; ++ nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT; ++ ++#ifdef BCMDBG_SI ++ SI_VMSG(("Found component 0x%04x/0x%04x rev %d at erom addr 0x%p, with nmw = %d, " ++ "nsw = %d, nmp = %d & nsp = %d\n", ++ mfg, cid, crev, eromptr - 1, nmw, nsw, nmp, nsp)); ++#else ++ BCM_REFERENCE(crev); ++#endif ++ ++ if (((mfg == MFGID_ARM) && (cid == DEF_AI_COMP)) || (nsp == 0)) { ++ continue; ++ } ++ ++ if ((nmw + nsw == 0)) { ++ /* A component which is not a core */ ++ /* XXX: Should record some info */ ++ if (cid == OOB_ROUTER_CORE_ID) { ++ asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, ++ &addrl, &addrh, &sizel, &sizeh); ++ if (asd != 0) { ++ sii->oob_router = addrl; ++ } ++ } ++ if (cid != GMAC_COMMON_4706_CORE_ID) { ++ continue; ++ } ++ } ++ ++ idx = sii->numcores; ++ ++ sii->cia[idx] = cia; ++ sii->cib[idx] = cib; ++ sii->coreid[idx] = cid; ++ ++ for (i = 0; i < nmp; i++) { ++ mpd = get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID); ++ if ((mpd & ER_TAG) != ER_MP) { ++ SI_ERROR(("Not enough MP entries for component 0x%x\n", cid)); ++ goto error; ++ } ++ /* XXX: Record something? */ ++ SI_VMSG((" Master port %d, mp: %d id: %d\n", i, ++ (mpd & MPD_MP_MASK) >> MPD_MP_SHIFT, ++ (mpd & MPD_MUI_MASK) >> MPD_MUI_SHIFT)); ++ } ++ ++ /* First Slave Address Descriptor should be port 0: ++ * the main register space for the core ++ */ ++ asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh); ++ if (asd == 0) { ++ do { ++ /* Try again to see if it is a bridge */ ++ asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh, ++ &sizel, &sizeh); ++ if (asd != 0) { ++ br = TRUE; ++ } else { ++ if (br == TRUE) { ++ break; ++ } else if ((addrh != 0) || (sizeh != 0) || ++ (sizel != SI_CORE_SIZE)) { ++ /* XXX: Could we have sizel != 4KB? */ ++ SI_ERROR(("addrh = 0x%x\t sizeh = 0x%x\t size1 =" ++ "0x%x\n", addrh, sizeh, sizel)); ++ SI_ERROR(("First Slave ASD for" ++ "core 0x%04x malformed " ++ "(0x%08x)\n", cid, asd)); ++ goto error; ++ } ++ } ++ } while (1); ++ } ++ sii->coresba[idx] = addrl; ++ sii->coresba_size[idx] = sizel; ++ /* Get any more ASDs in port 0 */ ++ j = 1; ++ do { ++ asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh, ++ &sizel, &sizeh); ++ if ((asd != 0) && (j == 1) && (sizel == SI_CORE_SIZE)) { ++ sii->coresba2[idx] = addrl; ++ sii->coresba2_size[idx] = sizel; ++ } ++ j++; ++ } while (asd != 0); ++ ++ /* Go through the ASDs for other slave ports */ ++ for (i = 1; i < nsp; i++) { ++ j = 0; ++ do { ++ asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh, ++ &sizel, &sizeh); ++ /* XXX: Should record them so we can do error recovery later */ ++ ++ if (asd == 0) ++ break; ++ j++; ++ } while (1); ++ if (j == 0) { ++ SI_ERROR((" SP %d has no address descriptors\n", i)); ++ goto error; ++ } ++ } ++ ++ /* Now get master wrappers */ ++ for (i = 0; i < nmw; i++) { ++ asd = get_asd(sih, &eromptr, i, 0, AD_ST_MWRAP, &addrl, &addrh, ++ &sizel, &sizeh); ++ if (asd == 0) { ++ SI_ERROR(("Missing descriptor for MW %d\n", i)); ++ goto error; ++ } ++ if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) { ++ SI_ERROR(("Master wrapper %d is not 4KB\n", i)); ++ goto error; ++ } ++ if (i == 0) { ++ sii->wrapba[idx] = addrl; ++ } ++ } ++ ++ /* And finally slave wrappers */ ++ for (i = 0; i < nsw; i++) { ++ uint fwp = (nsp == 1) ? 0 : 1; ++ asd = get_asd(sih, &eromptr, fwp + i, 0, AD_ST_SWRAP, &addrl, &addrh, ++ &sizel, &sizeh); ++ if (asd == 0) { ++ SI_ERROR(("Missing descriptor for SW %d\n", i)); ++ goto error; ++ } ++ if ((sizeh != 0) || (sizel != SI_CORE_SIZE)) { ++ SI_ERROR(("Slave wrapper %d is not 4KB\n", i)); ++ goto error; ++ } ++ if ((nmw == 0) && (i == 0)) { ++ sii->wrapba[idx] = addrl; ++ } ++ } ++ ++ /* Don't record bridges */ ++ if (br) { ++ continue; ++ } ++ ++ /* Done with core */ ++ sii->numcores++; ++ } ++ ++ SI_ERROR(("Reached end of erom without finding END")); ++ ++error: ++ sii->numcores = 0; ++ return; ++} ++ ++/* This function changes the logical "focus" to the indicated core. ++ * Return the current core's virtual address. ++ */ ++void * ++ai_setcoreidx(si_t *sih, uint coreidx) ++{ ++ si_info_t *sii = SI_INFO(sih); ++ uint32 addr, wrap; ++ void *regs; ++ ++ if (coreidx >= MIN(sii->numcores, SI_MAXCORES)) { ++ return (NULL); ++ } ++ ++ addr = sii->coresba[coreidx]; ++ wrap = sii->wrapba[coreidx]; ++ ++ /* ++ * If the user has provided an interrupt mask enabled function, ++ * then assert interrupts are disabled before switching the core. ++ */ ++ ASSERT((sii->intrsenabled_fn == NULL) || !(*(sii)->intrsenabled_fn)((sii)->intr_arg)); ++ ++ switch (BUSTYPE(sih->bustype)) { ++ case SI_BUS: ++ /* map new one */ ++ if (!sii->regs[coreidx]) { ++ sii->regs[coreidx] = REG_MAP(addr, SI_CORE_SIZE); ++ ASSERT(GOODREGS(sii->regs[coreidx])); ++ } ++ sii->curmap = regs = sii->regs[coreidx]; ++ if (!sii->wrappers[coreidx]) { ++ sii->wrappers[coreidx] = REG_MAP(wrap, SI_CORE_SIZE); ++ ASSERT(GOODREGS(sii->wrappers[coreidx])); ++ } ++ sii->curwrap = sii->wrappers[coreidx]; ++ break; ++ ++ case PCI_BUS: ++ /* point bar0 window */ ++ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN, 4, addr); ++ regs = sii->curmap; ++ /* point bar0 2nd 4KB window to the primary wrapper */ ++ if (PCIE_GEN2(sii)) { ++ OSL_PCI_WRITE_CONFIG(sii->osh, PCIE2_BAR0_WIN2, 4, wrap); ++ } else { ++ OSL_PCI_WRITE_CONFIG(sii->osh, PCI_BAR0_WIN2, 4, wrap); ++ } ++ break; ++ ++ case PCMCIA_BUS: ++ default: ++ ASSERT(0); ++ regs = NULL; ++ break; ++ } ++ ++ sii->curmap = regs; ++ sii->curidx = coreidx; ++ ++ return regs; ++} ++ ++void ++ai_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size) ++{ ++ si_info_t *sii = SI_INFO(sih); ++ chipcregs_t *cc = NULL; ++ uint32 erombase, *eromptr, *eromlim; ++ uint i, j, cidx; ++ uint32 cia, cib, nmp, nsp; ++ uint32 asd, addrl, addrh, sizel, sizeh; ++ ++ for (i = 0; i < sii->numcores; i++) { ++ if (sii->coreid[i] == CC_CORE_ID) { ++ cc = (chipcregs_t *)sii->regs[i]; ++ break; ++ } ++ } ++ if (cc == NULL) { ++ goto error; ++ } ++ ++ erombase = R_REG(sii->osh, &cc->eromptr); ++ eromptr = (uint32 *)REG_MAP(erombase, SI_CORE_SIZE); ++ eromlim = eromptr + (ER_REMAPCONTROL / sizeof(uint32)); ++ ++ cidx = sii->curidx; ++ cia = sii->cia[cidx]; ++ cib = sii->cib[cidx]; ++ ++ nmp = (cib & CIB_NMP_MASK) >> CIB_NMP_SHIFT; ++ nsp = (cib & CIB_NSP_MASK) >> CIB_NSP_SHIFT; ++ ++ /* scan for cores */ ++ while (eromptr < eromlim) { ++ if ((get_erom_ent(sih, &eromptr, ER_TAG, ER_CI) == cia) && ++ (get_erom_ent(sih, &eromptr, 0, 0) == cib)) { ++ break; ++ } ++ } ++ ++ /* skip master ports */ ++ for (i = 0; i < nmp; i++) { ++ get_erom_ent(sih, &eromptr, ER_VALID, ER_VALID); ++ } ++ ++ /* Skip ASDs in port 0 */ ++ asd = get_asd(sih, &eromptr, 0, 0, AD_ST_SLAVE, &addrl, &addrh, &sizel, &sizeh); ++ if (asd == 0) { ++ /* Try again to see if it is a bridge */ ++ asd = get_asd(sih, &eromptr, 0, 0, AD_ST_BRIDGE, &addrl, &addrh, ++ &sizel, &sizeh); ++ } ++ ++ j = 1; ++ do { ++ asd = get_asd(sih, &eromptr, 0, j, AD_ST_SLAVE, &addrl, &addrh, ++ &sizel, &sizeh); ++ j++; ++ } while (asd != 0); ++ ++ /* Go through the ASDs for other slave ports */ ++ for (i = 1; i < nsp; i++) { ++ j = 0; ++ do { ++ asd = get_asd(sih, &eromptr, i, j, AD_ST_SLAVE, &addrl, &addrh, ++ &sizel, &sizeh); ++ if (asd == 0) { ++ break; ++ } ++ ++ if (!asidx--) { ++ *addr = addrl; ++ *size = sizel; ++ return; ++ } ++ j++; ++ } while (1); ++ ++ if (j == 0) { ++ SI_ERROR((" SP %d has no address descriptors\n", i)); ++ break; ++ } ++ } ++ ++error: ++ *size = 0; ++ return; ++} ++ ++/* Return the number of address spaces in current core */ ++int ++ai_numaddrspaces(si_t *sih) ++{ ++ /* XXX: Either save ot or parse the EROM on demand */ ++ return 2; ++} ++ ++/* Return the address of the nth address space in the current core */ ++uint32 ++ai_addrspace(si_t *sih, uint asidx) ++{ ++ si_info_t *sii; ++ uint cidx; ++ ++ sii = SI_INFO(sih); ++ cidx = sii->curidx; ++ ++ if (asidx == 0) { ++ return sii->coresba[cidx]; ++ } else if (asidx == 1) { ++ return sii->coresba2[cidx]; ++ } else { ++ SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n", ++ __FUNCTION__, asidx)); ++ return 0; ++ } ++} ++ ++/* Return the size of the nth address space in the current core */ ++uint32 ++ai_addrspacesize(si_t *sih, uint asidx) ++{ ++ si_info_t *sii; ++ uint cidx; ++ ++ sii = SI_INFO(sih); ++ cidx = sii->curidx; ++ ++ if (asidx == 0) { ++ return sii->coresba_size[cidx]; ++ } else if (asidx == 1) { ++ return sii->coresba2_size[cidx]; ++ } else { ++ SI_ERROR(("%s: Need to parse the erom again to find addr space %d\n", ++ __FUNCTION__, asidx)); ++ return 0; ++ } ++} ++ ++uint ++ai_flag(si_t *sih) ++{ ++ si_info_t *sii; ++ aidmp_t *ai; ++ ++ sii = SI_INFO(sih); ++ ai = sii->curwrap; ++ return (R_REG(sii->osh, &ai->oobselouta30) & 0x1f); ++} ++ ++void ++ai_setint(si_t *sih, int siflag) ++{ ++ /* XXX: Figure out OOB stuff */ ++} ++ ++uint ++ai_wrap_reg(si_t *sih, uint32 offset, uint32 mask, uint32 val) ++{ ++ si_info_t *sii = SI_INFO(sih); ++ uint32 *map = (uint32 *) sii->curwrap; ++ ++ if (mask || val) { ++ uint32 w = R_REG(sii->osh, map+(offset/4)); ++ w &= ~mask; ++ w |= val; ++ W_REG(sii->osh, map+(offset/4), val); ++ } ++ ++ return (R_REG(sii->osh, map+(offset/4))); ++} ++ ++uint ++ai_corevendor(si_t *sih) ++{ ++ si_info_t *sii; ++ uint32 cia; ++ ++ sii = SI_INFO(sih); ++ cia = sii->cia[sii->curidx]; ++ return ((cia & CIA_MFG_MASK) >> CIA_MFG_SHIFT); ++} ++ ++uint ++ai_corerev(si_t *sih) ++{ ++ si_info_t *sii; ++ uint32 cib; ++ ++ sii = SI_INFO(sih); ++ cib = sii->cib[sii->curidx]; ++ return ((cib & CIB_REV_MASK) >> CIB_REV_SHIFT); ++} ++ ++bool ++ai_iscoreup(si_t *sih) ++{ ++ si_info_t *sii; ++ aidmp_t *ai; ++ ++ sii = SI_INFO(sih); ++ ai = sii->curwrap; ++ ++ return (((R_REG(sii->osh, &ai->ioctrl) & (SICF_FGC | SICF_CLOCK_EN)) == SICF_CLOCK_EN) && ++ ((R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) == 0)); ++} ++ ++/* ++ * Switch to 'coreidx', issue a single arbitrary 32bit register mask&set operation, ++ * switch back to the original core, and return the new value. ++ * ++ * When using the silicon backplane, no fiddling with interrupts or core switches is needed. ++ * ++ * Also, when using pci/pcie, we can optimize away the core switching for pci registers ++ * and (on newer pci cores) chipcommon registers. ++ */ ++uint ++ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val) ++{ ++ uint origidx = 0; ++ uint32 *r = NULL; ++ uint w; ++ uint intr_val = 0; ++ bool fast = FALSE; ++ si_info_t *sii; ++ ++ sii = SI_INFO(sih); ++ ++ ASSERT(GOODIDX(coreidx)); ++ ASSERT(regoff < SI_CORE_SIZE); ++ ASSERT((val & ~mask) == 0); ++ ++ if (coreidx >= SI_MAXCORES) { ++ return 0; ++ } ++ ++ if (BUSTYPE(sih->bustype) == SI_BUS) { ++ /* If internal bus, we can always get at everything */ ++ fast = TRUE; ++ /* map if does not exist */ ++ if (!sii->regs[coreidx]) { ++ sii->regs[coreidx] = REG_MAP(sii->coresba[coreidx], ++ SI_CORE_SIZE); ++ ASSERT(GOODREGS(sii->regs[coreidx])); ++ } ++ r = (uint32 *)((uchar *)sii->regs[coreidx] + regoff); ++ } else if (BUSTYPE(sih->bustype) == PCI_BUS) { ++ /* If pci/pcie, we can get at pci/pcie regs and on newer cores to chipc */ ++ ++ if ((sii->coreid[coreidx] == CC_CORE_ID) && SI_FAST(sii)) { ++ /* Chipc registers are mapped at 12KB */ ++ ++ fast = TRUE; ++ r = (uint32 *)((char *)sii->curmap + PCI_16KB0_CCREGS_OFFSET + regoff); ++ } else if (sii->pub.buscoreidx == coreidx) { ++ /* pci registers are at either in the last 2KB of an 8KB window ++ * or, in pcie and pci rev 13 at 8KB ++ */ ++ fast = TRUE; ++ if (SI_FAST(sii)) { ++ r = (uint32 *)((char *)sii->curmap + ++ PCI_16KB0_PCIREGS_OFFSET + regoff); ++ } else { ++ r = (uint32 *)((char *)sii->curmap + ++ ((regoff >= SBCONFIGOFF) ? ++ PCI_BAR0_PCISBR_OFFSET : PCI_BAR0_PCIREGS_OFFSET) + ++ regoff); ++ } ++ } ++ } ++ ++ if (!fast) { ++ INTR_OFF(sii, intr_val); ++ ++ /* save current core index */ ++ origidx = si_coreidx(&sii->pub); ++ ++ /* switch core */ ++ r = (uint32*) ((uchar*) ai_setcoreidx(&sii->pub, coreidx) + regoff); ++ } ++ ASSERT(r != NULL); ++ ++ /* mask and set */ ++ if (mask || val) { ++ w = (R_REG(sii->osh, r) & ~mask) | val; ++ W_REG(sii->osh, r, w); ++ } ++ ++ /* readback */ ++ w = R_REG(sii->osh, r); ++ ++ if (!fast) { ++ /* restore core index */ ++ if (origidx != coreidx) { ++ ai_setcoreidx(&sii->pub, origidx); ++ } ++ ++ INTR_RESTORE(sii, intr_val); ++ } ++ ++ return (w); ++} ++ ++void ++ai_core_disable(si_t *sih, uint32 bits) ++{ ++ si_info_t *sii; ++ volatile uint32 dummy; ++ uint32 status; ++ aidmp_t *ai; ++ ++ sii = SI_INFO(sih); ++ ++ ASSERT(GOODREGS(sii->curwrap)); ++ ai = sii->curwrap; ++ ++ /* if core is already in reset, just return */ ++ if (R_REG(sii->osh, &ai->resetctrl) & AIRC_RESET) { ++ return; ++ } ++ ++ /* ensure there are no pending backplane operations */ ++ SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 300); ++ ++ /* if pending backplane ops still, try waiting longer */ ++ if (status != 0) { ++ /* 300usecs was sufficient to allow backplane ops to clear for big hammer */ ++ /* during driver load we may need more time */ ++ SPINWAIT(((status = R_REG(sii->osh, &ai->resetstatus)) != 0), 10000); ++ /* if still pending ops, continue on and try disable anyway */ ++ /* this is in big hammer path, so don't call wl_reinit in this case... */ ++#ifdef BCMDBG ++ if (status != 0) { ++ printf("%s: WARN: resetstatus=%0x on core disable\n", __FUNCTION__, status); ++ } ++#endif ++ } ++ ++ W_REG(sii->osh, &ai->ioctrl, bits); ++ dummy = R_REG(sii->osh, &ai->ioctrl); ++ BCM_REFERENCE(dummy); ++ OSL_DELAY(10); ++ ++ W_REG(sii->osh, &ai->resetctrl, AIRC_RESET); ++ dummy = R_REG(sii->osh, &ai->resetctrl); ++ BCM_REFERENCE(dummy); ++ OSL_DELAY(1); ++} ++ ++/* reset and re-enable a core ++ * inputs: ++ * bits - core specific bits that are set during and after reset sequence ++ * resetbits - core specific bits that are set only during reset sequence ++ */ ++void ++ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits) ++{ ++ si_info_t *sii; ++ aidmp_t *ai; ++ volatile uint32 dummy; ++ ++ sii = SI_INFO(sih); ++ ASSERT(GOODREGS(sii->curwrap)); ++ ai = sii->curwrap; ++ ++#ifdef CONFIG_BCM_IPROC_GMAC_ACP ++ bits = resetbits = R_REG(sii->osh, &ai->ioctrl) & 0xFFFFFFFC; ++#endif /* CONFIG_BCM_IPROC_GMAC_ACP */ ++ ++ /* ++ * Must do the disable sequence first to work for arbitrary current core state. ++ */ ++ ai_core_disable(sih, (bits | resetbits)); ++ ++ /* ++ * Now do the initialization sequence. ++ */ ++ W_REG(sii->osh, &ai->ioctrl, (bits | SICF_FGC | SICF_CLOCK_EN)); ++ dummy = R_REG(sii->osh, &ai->ioctrl); ++ BCM_REFERENCE(dummy); ++ ++ W_REG(sii->osh, &ai->resetctrl, 0); ++ dummy = R_REG(sii->osh, &ai->resetctrl); ++ BCM_REFERENCE(dummy); ++ OSL_DELAY(1); ++ ++ W_REG(sii->osh, &ai->ioctrl, (bits | SICF_CLOCK_EN)); ++ dummy = R_REG(sii->osh, &ai->ioctrl); ++ BCM_REFERENCE(dummy); ++ OSL_DELAY(1); ++} ++ ++void ++ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val) ++{ ++ si_info_t *sii; ++ aidmp_t *ai; ++ uint32 w; ++ ++ sii = SI_INFO(sih); ++ ++ ASSERT(GOODREGS(sii->curwrap)); ++ ai = sii->curwrap; ++ ++ ASSERT((val & ~mask) == 0); ++ ++ if (mask || val) { ++ w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val); ++ W_REG(sii->osh, &ai->ioctrl, w); ++ } ++} ++ ++uint32 ++ai_core_cflags(si_t *sih, uint32 mask, uint32 val) ++{ ++ si_info_t *sii; ++ aidmp_t *ai; ++ uint32 w; ++ ++ sii = SI_INFO(sih); ++ ++ ASSERT(GOODREGS(sii->curwrap)); ++ ai = sii->curwrap; ++ ++ ASSERT((val & ~mask) == 0); ++ ++ if (mask || val) { ++ w = ((R_REG(sii->osh, &ai->ioctrl) & ~mask) | val); ++ W_REG(sii->osh, &ai->ioctrl, w); ++ } ++ ++ return R_REG(sii->osh, &ai->ioctrl); ++} ++ ++uint32 ++ai_core_sflags(si_t *sih, uint32 mask, uint32 val) ++{ ++ si_info_t *sii; ++ aidmp_t *ai; ++ uint32 w; ++ ++ sii = SI_INFO(sih); ++ ++ ASSERT(GOODREGS(sii->curwrap)); ++ ai = sii->curwrap; ++ ++ ASSERT((val & ~mask) == 0); ++ ASSERT((mask & ~SISF_CORE_BITS) == 0); ++ ++ if (mask || val) { ++ w = ((R_REG(sii->osh, &ai->iostatus) & ~mask) | val); ++ W_REG(sii->osh, &ai->iostatus, w); ++ } ++ ++ return R_REG(sii->osh, &ai->iostatus); ++} ++ ++#if defined(BCMDBG) || defined(BCMDBG_DUMP) ++/* print interesting aidmp registers */ ++void ++ai_dumpregs(si_t *sih, struct bcmstrbuf *b) ++{ ++ si_info_t *sii; ++ osl_t *osh; ++ aidmp_t *ai; ++ uint i; ++ ++ sii = SI_INFO(sih); ++ osh = sii->osh; ++ ++ for (i = 0; i < sii->numcores; i++) { ++ si_setcoreidx(&sii->pub, i); ++ ai = sii->curwrap; ++ ++ bcm_bprintf(b, "core 0x%x: \n", sii->coreid[i]); ++ bcm_bprintf(b, "ioctrlset 0x%x ioctrlclear 0x%x ioctrl 0x%x iostatus 0x%x" ++ "ioctrlwidth 0x%x iostatuswidth 0x%x\n" ++ "resetctrl 0x%x resetstatus 0x%x resetreadid 0x%x resetwriteid 0x%x\n" ++ "errlogctrl 0x%x errlogdone 0x%x errlogstatus 0x%x" ++ "errlogaddrlo 0x%x errlogaddrhi 0x%x\n" ++ "errlogid 0x%x errloguser 0x%x errlogflags 0x%x\n" ++ "intstatus 0x%x config 0x%x itcr 0x%x\n", ++ R_REG(osh, &ai->ioctrlset), ++ R_REG(osh, &ai->ioctrlclear), ++ R_REG(osh, &ai->ioctrl), ++ R_REG(osh, &ai->iostatus), ++ R_REG(osh, &ai->ioctrlwidth), ++ R_REG(osh, &ai->iostatuswidth), ++ R_REG(osh, &ai->resetctrl), ++ R_REG(osh, &ai->resetstatus), ++ R_REG(osh, &ai->resetreadid), ++ R_REG(osh, &ai->resetwriteid), ++ R_REG(osh, &ai->errlogctrl), ++ R_REG(osh, &ai->errlogdone), ++ R_REG(osh, &ai->errlogstatus), ++ R_REG(osh, &ai->errlogaddrlo), ++ R_REG(osh, &ai->errlogaddrhi), ++ R_REG(osh, &ai->errlogid), ++ R_REG(osh, &ai->errloguser), ++ R_REG(osh, &ai->errlogflags), ++ R_REG(osh, &ai->intstatus), ++ R_REG(osh, &ai->config), ++ R_REG(osh, &ai->itcr)); ++ } ++} ++#endif /* BCMDBG || BCMDBG_DUMP */ ++ ++#ifdef BCMDBG ++static void ++_ai_view(osl_t *osh, aidmp_t *ai, uint32 cid, uint32 addr, bool verbose) ++{ ++ uint32 config; ++ ++ config = R_REG(osh, &ai->config); ++ SI_ERROR(("\nCore ID: 0x%x, addr 0x%x, config 0x%x\n", cid, addr, config)); ++ ++ if (config & AICFG_RST) { ++ SI_ERROR(("resetctrl 0x%x, resetstatus 0x%x, resetreadid 0x%x, resetwriteid 0x%x\n", ++ R_REG(osh, &ai->resetctrl), R_REG(osh, &ai->resetstatus), ++ R_REG(osh, &ai->resetreadid), R_REG(osh, &ai->resetwriteid))); ++ } ++ ++ if (config & AICFG_IOC) { ++ SI_ERROR(("ioctrl 0x%x, width %d\n", R_REG(osh, &ai->ioctrl), ++ R_REG(osh, &ai->ioctrlwidth))); ++ } ++ ++ if (config & AICFG_IOS) { ++ SI_ERROR(("iostatus 0x%x, width %d\n", R_REG(osh, &ai->iostatus), ++ R_REG(osh, &ai->iostatuswidth))); ++ } ++ ++ if (config & AICFG_ERRL) { ++ SI_ERROR(("errlogctrl 0x%x, errlogdone 0x%x, errlogstatus 0x%x, intstatus 0x%x\n", ++ R_REG(osh, &ai->errlogctrl), R_REG(osh, &ai->errlogdone), ++ R_REG(osh, &ai->errlogstatus), R_REG(osh, &ai->intstatus))); ++ SI_ERROR(("errlogid 0x%x, errloguser 0x%x, errlogflags 0x%x, errlogaddr " ++ "0x%x/0x%x\n", ++ R_REG(osh, &ai->errlogid), R_REG(osh, &ai->errloguser), ++ R_REG(osh, &ai->errlogflags), R_REG(osh, &ai->errlogaddrhi), ++ R_REG(osh, &ai->errlogaddrlo))); ++ } ++ ++ if (verbose && (config & AICFG_OOB)) { ++ SI_ERROR(("oobselina30 0x%x, oobselina74 0x%x\n", ++ R_REG(osh, &ai->oobselina30), R_REG(osh, &ai->oobselina74))); ++ SI_ERROR(("oobselinb30 0x%x, oobselinb74 0x%x\n", ++ R_REG(osh, &ai->oobselinb30), R_REG(osh, &ai->oobselinb74))); ++ SI_ERROR(("oobselinc30 0x%x, oobselinc74 0x%x\n", ++ R_REG(osh, &ai->oobselinc30), R_REG(osh, &ai->oobselinc74))); ++ SI_ERROR(("oobselind30 0x%x, oobselind74 0x%x\n", ++ R_REG(osh, &ai->oobselind30), R_REG(osh, &ai->oobselind74))); ++ SI_ERROR(("oobselouta30 0x%x, oobselouta74 0x%x\n", ++ R_REG(osh, &ai->oobselouta30), R_REG(osh, &ai->oobselouta74))); ++ SI_ERROR(("oobseloutb30 0x%x, oobseloutb74 0x%x\n", ++ R_REG(osh, &ai->oobseloutb30), R_REG(osh, &ai->oobseloutb74))); ++ SI_ERROR(("oobseloutc30 0x%x, oobseloutc74 0x%x\n", ++ R_REG(osh, &ai->oobseloutc30), R_REG(osh, &ai->oobseloutc74))); ++ SI_ERROR(("oobseloutd30 0x%x, oobseloutd74 0x%x\n", ++ R_REG(osh, &ai->oobseloutd30), R_REG(osh, &ai->oobseloutd74))); ++ SI_ERROR(("oobsynca 0x%x, oobseloutaen 0x%x\n", ++ R_REG(osh, &ai->oobsynca), R_REG(osh, &ai->oobseloutaen))); ++ SI_ERROR(("oobsyncb 0x%x, oobseloutben 0x%x\n", ++ R_REG(osh, &ai->oobsyncb), R_REG(osh, &ai->oobseloutben))); ++ SI_ERROR(("oobsyncc 0x%x, oobseloutcen 0x%x\n", ++ R_REG(osh, &ai->oobsyncc), R_REG(osh, &ai->oobseloutcen))); ++ SI_ERROR(("oobsyncd 0x%x, oobseloutden 0x%x\n", ++ R_REG(osh, &ai->oobsyncd), R_REG(osh, &ai->oobseloutden))); ++ SI_ERROR(("oobaextwidth 0x%x, oobainwidth 0x%x, oobaoutwidth 0x%x\n", ++ R_REG(osh, &ai->oobaextwidth), R_REG(osh, &ai->oobainwidth), ++ R_REG(osh, &ai->oobaoutwidth))); ++ SI_ERROR(("oobbextwidth 0x%x, oobbinwidth 0x%x, oobboutwidth 0x%x\n", ++ R_REG(osh, &ai->oobbextwidth), R_REG(osh, &ai->oobbinwidth), ++ R_REG(osh, &ai->oobboutwidth))); ++ SI_ERROR(("oobcextwidth 0x%x, oobcinwidth 0x%x, oobcoutwidth 0x%x\n", ++ R_REG(osh, &ai->oobcextwidth), R_REG(osh, &ai->oobcinwidth), ++ R_REG(osh, &ai->oobcoutwidth))); ++ SI_ERROR(("oobdextwidth 0x%x, oobdinwidth 0x%x, oobdoutwidth 0x%x\n", ++ R_REG(osh, &ai->oobdextwidth), R_REG(osh, &ai->oobdinwidth), ++ R_REG(osh, &ai->oobdoutwidth))); ++ } ++} ++ ++void ++ai_view(si_t *sih, bool verbose) ++{ ++ si_info_t *sii; ++ osl_t *osh; ++ aidmp_t *ai; ++ uint32 cid, addr; ++ ++ sii = SI_INFO(sih); ++ ai = sii->curwrap; ++ osh = sii->osh; ++ ++ cid = sii->coreid[sii->curidx]; ++ addr = sii->wrapba[sii->curidx]; ++ _ai_view(osh, ai, cid, addr, verbose); ++} ++ ++void ++ai_viewall(si_t *sih, bool verbose) ++{ ++ si_info_t *sii; ++ osl_t *osh; ++ aidmp_t *ai; ++ uint32 cid, addr; ++ uint i; ++ ++ sii = SI_INFO(sih); ++ osh = sii->osh; ++ for (i = 0; i < sii->numcores; i++) { ++ si_setcoreidx(sih, i); ++ ++ ai = sii->curwrap; ++ cid = sii->coreid[sii->curidx]; ++ addr = sii->wrapba[sii->curidx]; ++ _ai_view(osh, ai, cid, addr, verbose); ++ } ++} ++#endif /* BCMDBG */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/shared/bcmiproc_egphy28.c b/drivers/net/ethernet/broadcom/gmac/src/shared/bcmiproc_egphy28.c +--- a/drivers/net/ethernet/broadcom/gmac/src/shared/bcmiproc_egphy28.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/shared/bcmiproc_egphy28.c 2017-11-09 17:53:44.024296000 +0800 +@@ -0,0 +1,352 @@ ++/* ++ * $Copyright Open Broadcom Corporation$ ++ * ++ * These routines provide access to the external phy ++ * ++ */ ++#include ++#include ++#include "../../../mdio/iproc_mdio.h" ++#include "bcmiproc_phy.h" ++#include "bcmiproc_egphy28.h" ++ ++/* debug/trace */ ++//#define BCMDBG ++//#define BCMDBG_ERR ++#ifdef BCMDBG ++#define NET_ERROR(args) printf args ++#define NET_TRACE(args) printf args ++#elif defined(BCMDBG_ERR) ++#define NET_ERROR(args) printf args ++#define NET_TRACE(args) ++#else ++#define NET_ERROR(args) ++#define NET_TRACE(args) ++#endif /* BCMDBG */ ++#define NET_REG_TRACE(args) ++ ++extern u32 cmicd_schan_write(void __iomem *base, u32 ctrl, u32 addr, u32 val); ++extern u32 cmicd_schan_read(void __iomem *base, u32 ctrl, u32 addr); ++ ++ ++static int ++egphy28_rdb_reg_read(u32 phy_addr, u32 reg_addr, u16 *data) ++{ ++ int rv = SOC_E_NONE; ++ ++ /* MDIO write the RDB reg. address to reg.0x1E = */ ++ iproc_mii_write(MII_DEV_LOCAL, phy_addr, EGPHY28_REG_RDB_ADDR, ++ (0xffff & reg_addr)); ++ ++ /* MDIO read from reg.0x1F to get the RDB register's value as */ ++ iproc_mii_read(MII_DEV_LOCAL, phy_addr, EGPHY28_REG_RDB_DATA, data); ++ ++ return rv; ++} ++ ++static int ++egphy28_rdb_reg_write(u32 phy_addr, u32 reg_addr, u16 data) ++{ ++ int rv = SOC_E_NONE; ++ ++ /* MDIO write the RDB reg. address to reg.0x1E = */ ++ iproc_mii_write(MII_DEV_LOCAL, phy_addr, EGPHY28_REG_RDB_ADDR, ++ (0xffff & reg_addr)); ++ ++ /* MDIO write to reg.0x1F to set the RDB resister's value as */ ++ iproc_mii_write(MII_DEV_LOCAL, phy_addr, EGPHY28_REG_RDB_DATA, data); ++ ++ return rv; ++} ++ ++static void ++egphy28_rdb_reg_modify(u32 phy_addr, int reg_addr, u16 data, u16 mask) ++{ ++ u16 ori_data; ++ ++ egphy28_rdb_reg_read(phy_addr, reg_addr, &ori_data); ++ ori_data &= ~mask; ++ ori_data |= (data & mask); ++ egphy28_rdb_reg_write(phy_addr, reg_addr, ori_data); ++} ++ ++int ++egphy28_reg_read(u32 phy_addr, int reg_addr, u16 *data) ++{ ++ int rv = SOC_E_NONE; ++ iproc_mii_read(MII_DEV_LOCAL, phy_addr, reg_addr, data); ++ ++ return rv; ++} ++ ++int ++egphy28_reg_write(u32 phy_addr, int reg_addr, u16 data) ++{ ++ int rv = SOC_E_NONE; ++ iproc_mii_write(MII_DEV_LOCAL, phy_addr, reg_addr, data); ++ ++ return rv; ++} ++ ++static void ++egphy28_reg_modify(u32 phy_addr, int reg_addr, u16 data, u16 mask) ++{ ++ u16 ori_data; ++ ++ egphy28_reg_read(phy_addr, reg_addr, &ori_data); ++ ori_data &= ~mask; ++ ori_data |= (data & mask); ++ egphy28_reg_write(phy_addr, reg_addr, ori_data); ++} ++ ++static int ++egphy28_ge_reset(u32 phy_addr) ++{ ++ int rv = SOC_E_NONE; ++ u16 val; ++ ++ NET_TRACE(("%s: phy_addr %d\n", __FUNCTION__, phy_addr)); ++ ++ /* Reset the PHY */ ++ egphy28_reg_read(phy_addr, EGPHY28_COPPER_MII_CTRL, &val); ++ val |= BMCR_RESET; ++ egphy28_reg_write(phy_addr, EGPHY28_COPPER_MII_CTRL, val); ++ ++ SPINWAIT((!egphy28_reg_read(phy_addr, EGPHY28_COPPER_MII_CTRL, &val) && ++ (val & BMCR_RESET)), 100000); ++ ++ /* Check if out of reset */ ++ egphy28_reg_read(phy_addr, EGPHY28_COPPER_MII_CTRL, &val); ++ if (val & BMCR_RESET) { ++ NET_ERROR(("%s reset not complete\n", __FUNCTION__)); ++ rv = SOC_E_TIMEOUT; ++ } else { ++ NET_TRACE(("%s reset complete\n", __FUNCTION__)); ++ } ++ ++ return rv; ++} ++ ++ ++#if 0 ++static void ++cmid_schan_modify(void __iomem *base, u32 ctrl, u32 addr, u32 val, u32 mask) ++{ ++ u32 ori_val; ++ ++ ori_val = cmicd_schan_read(base, ctrl, addr); ++ ori_val &= ~mask; ++ ori_val |= (val & mask); ++ cmicd_schan_write(base, ctrl, addr, ori_val); ++} ++#endif ++ ++ ++static int ++egphy28_ge_init(void __iomem *base, u32 phy_addr) ++{ ++ int rv = SOC_E_NONE; ++ /* ==== Power up PHY ==== */ ++#if 0 ++ /* Give initial value */ ++ /* TOP_QGPHY_CTRL_0.EXT_PWRDOWN[23:20] = LOW */ ++ cmid_schan_modify(base, 0x2c800200, 0x02033800, 0x0, 0x00F00000); ++ /* TOP_QGPHY_CTRL_2.GPHY_IDDQ_GLOBAL_PWR[18] = HIGH */ ++ cmid_schan_modify(base, 0x2c800200, 0x02033400, 0x40000, 0x040000); ++ /* TOP_QGPHY_CTRL_2.IDDQ_BIAS[5] = LOW */ ++ cmid_schan_modify(base, 0x2c800200, 0x02033400, 0x0, 0x20); ++ /* TOP_SOFT_RESET_REG.TOP_QGPHY_RST_L[21] = LOW */ ++ cmid_schan_modify(base, 0x2c800200, 0x02030400, 0x0, 0x200000); ++ ++ /* TOP_QGPHY_CTRL_2.GPHY_IDDQ_GLOBAL_PWR[18] = LOW */ ++ cmid_schan_modify(base, 0x2c800200, 0x02033400, 0x0, 0x040000); ++ /* TOP_SOFT_RESET_REG.TOP_QGPHY_RST_L[21] = HIGH */ ++ cmid_schan_modify(base, 0x2c800200, 0x02030400, 0x200000, 0x200000); ++ ++ /* ==== Partial Power down other 3 PHYs ==== */ ++ ++ /* Give initial value */ ++ /* TOP_QGPHY_CTRL_0.EXT_PWRDOWN[22:20] = LOW */ ++ cmid_schan_modify(base, 0x2c800200, 0x02033800, 0x0, 0x00700000); ++ /* TOP_QGPHY_CTRL_2.GPHY_IDDQ_GLOBAL_PWR[18] = HIGH */ ++ cmid_schan_modify(base, 0x2c800200, 0x02033400, 0x40000, 0x040000); ++ /* TOP_QGPHY_CTRL_2.IDDQ_BIAS[5] = LOW */ ++ cmid_schan_modify(base, 0x2c800200, 0x02033400, 0x0, 0x20); ++ /* TOP_SOFT_RESET_REG.TOP_QGPHY_RST_L[21] = LOW */ ++ cmid_schan_modify(base, 0x2c800200, 0x02030400, 0x0, 0x200000); ++ ++ /* TOP_QGPHY_CTRL_0.EXT_PWRDOWN[22:20] = HIGH */ ++ cmid_schan_modify(base, 0x2c800200, 0x02033800, 0x00700000, 0x00700000); ++ /* TOP_QGPHY_CTRL_2.IDDQ_BIAS[5] = HIGH */ ++ cmid_schan_modify(base, 0x2c800200, 0x02033400, 0x20, 0x20); ++ /* TOP_SOFT_RESET_REG.TOP_QGPHY_RST_L[21] = HIGH */ ++ cmid_schan_modify(base, 0x2c800200, 0x02030400, 0x200000, 0x200000); ++ ++ /* TOP_QGPHY_CTRL_2.GPHY_IDDQ_GLOBAL_PWR[18] = LOW */ ++ cmid_schan_modify(base, 0x2c800200, 0x02033400, 0x0, 0x040000); ++ /* TOP_QGPHY_CTRL_2.IDDQ_BIAS[5] = LOW */ ++ cmid_schan_modify(base, 0x2c800200, 0x02033400, 0x0, 0x20); ++ /* TOP_SOFT_RESET_REG.TOP_QGPHY_RST_L[21] = LOW */ ++ cmid_schan_modify(base, 0x2c800200, 0x02030400, 0x0, 0x200000); ++ ++ /* TOP_QGPHY_CTRL_2.GPHY_IDDQ_GLOBAL_PWR[18] = HIGH */ ++ cmid_schan_modify(base, 0x2c800200, 0x02033400, 0x040000, 0x040000); ++ /* TOP_QGPHY_CTRL_2.IDDQ_BIAS[5] = HIGH */ ++ cmid_schan_modify(base, 0x2c800200, 0x02033400, 0x20, 0x20); ++ /* TOP_SOFT_RESET_REG.TOP_QGPHY_RST_L[21] = HIGH */ ++ cmid_schan_modify(base, 0x2c800200, 0x02030400, 0x200000, 0x200000); ++ ++ /* TOP_QGPHY_CTRL_0.EXT_PWRDOWN[23] = LOW */ ++ cmid_schan_modify(base, 0x2c800200, 0x02033800, 0x0, 0x00800000); ++ /* TOP_QGPHY_CTRL_2.GPHY_IDDQ_GLOBAL_PWR[18] = LOW */ ++ cmid_schan_modify(base, 0x2c800200, 0x02033400, 0x0, 0x040000); ++ /* TOP_QGPHY_CTRL_2.IDDQ_BIAS[5] = LOW */ ++ cmid_schan_modify(base, 0x2c800200, 0x02033400, 0x0, 0x20); ++ /* TOP_SOFT_RESET_REG.TOP_QGPHY_RST_L[21] = LOW */ ++ cmid_schan_modify(base, 0x2c800200, 0x02030400, 0x0, 0x200000); ++ ++ /* TOP_SOFT_RESET_REG.TOP_QGPHY_RST_L[21] = HIGH */ ++ cmid_schan_modify(base, 0x2c800200, 0x02030400, 0x200000, 0x200000); ++ ++ /* Reset the PHY (Register[0x00], bit 15 = 1) */ ++ egphy28_reg_modify(phy_addr, EGPHY28_COPPER_MII_CTRL, BMCR_RESET, BMCR_RESET); ++ ++ /* ++ * Enable direct RDB addressing mode, write to Expansion register ++ * 0x7E = 0x0000 ++ * - MDIO write to reg 0x17 = 0x0F7E ++ * - MDIO write to reg 0x15 = 0x0000 ++ */ ++ egphy28_reg_write(phy_addr, EGPHY28_RDB_ACCESS_ADDR_1, EGPHY28_RDB_ACCESS_DATA_1); ++ egphy28_reg_write(phy_addr, EGPHY28_RDB_ACCESS_ADDR_2, EGPHY28_RDB_ACCESS_DATA_2); ++ ++ /* Clear Reset the PHY (Register[0x00], bit 15 = 0) */ ++ egphy28_reg_modify(phy_addr, EGPHY28_COPPER_MII_CTRL, 0x0, BMCR_RESET); ++ ++ /* Set MAC PHY interface to be GMII mode */ ++ egphy28_reg_modify(phy_addr, EGPGY28_MII_ECONTROL, 0x0, (1 << 15)); ++ ++ /* Set 1000BASE-T full-duplex and switch device port (Register[0x09], bit 9,10 = 1) */ ++ egphy28_reg_write(phy_addr, EGPHY28_MII_CTRL1000, ADVERTISE_1000FULL | REPEATER_DTE); ++ ++ /* Set Full-duplex, 1000BASE-T, Auto-Negoatiation, Restartr-AN ++ * (Register[0x00], bit 8, 6, 12, 9 = 1) ++ */ ++ egphy28_reg_write(phy_addr, EGPHY28_COPPER_MII_CTRL, (BMCR_FULLDPLX | BMCR_SPEED1000 | ++ BMCR_ANENABLE | BMCR_ANRESTART)); ++ ++ /* Disable super-isolate (RDB Register[0x02a], bit 5 = 0). in default */ ++ egphy28_rdb_reg_modify(phy_addr, 0x2a, 0x0, (1 << 5)); ++ ++ /* Remove power down (Register[0x00], bit 11 = 0). in default */ ++ egphy28_reg_modify(phy_addr, EGPHY28_COPPER_MII_CTRL, ~BMCR_PDOWN, BMCR_PDOWN); ++ ++ /* Enable LEDs to indicate traffic status (Register[0x10], bit 5 = 1) */ ++ egphy28_reg_modify(phy_addr, 0x10, (1 << 5), (1 << 5)); ++ ++ /* Enable extended packet length (4.5k through 25k) (RDB Register[0x28], bit 14 = 1) */ ++ egphy28_rdb_reg_modify(phy_addr, 0x28, (1 << 14), (1 << 14)); ++ ++ egphy28_rdb_reg_modify(phy_addr, 0x16, (1 << 0), (1 << 0)); ++ egphy28_rdb_reg_modify(phy_addr, 0x1b, (1 << 1), (1 << 1)); ++ ++ /* Configure LED selectors */ ++ /* Disable carrier extension */ ++ /* IEEE compliance setup */ ++ egphy28_rdb_reg_write(phy_addr, 0x1E4, 0x00C0); ++ egphy28_rdb_reg_write(phy_addr, 0x1E7, 0xB008); ++ egphy28_rdb_reg_write(phy_addr, 0x1E2, 0x02E3); ++ egphy28_rdb_reg_write(phy_addr, 0x1E0, 0x0D11); ++ egphy28_rdb_reg_write(phy_addr, 0x1E3, 0x7FC0); ++ egphy28_rdb_reg_write(phy_addr, 0x1EB, 0x6B40); ++ egphy28_rdb_reg_write(phy_addr, 0x1E8, 0x0213); ++ egphy28_rdb_reg_write(phy_addr, 0x1E9, 0x0020); ++ egphy28_rdb_reg_write(phy_addr, 0x28, 0x4C30); ++ egphy28_rdb_reg_write(phy_addr, 0x125, 0x211B); ++ egphy28_rdb_reg_write(phy_addr, 0xE, 0x0013); ++ egphy28_rdb_reg_write(phy_addr, 0xB0, 0x000C); ++ egphy28_rdb_reg_write(phy_addr, 0xB0, 0x0000); ++ ++ /* Set Full-duplex, 1000BASE-T, Auto-Negoatiation, Restartr-AN ++ * (Register[0x00], bit 8, 6, 12, 9 = 1) ++ */ ++ egphy28_reg_write(phy_addr, EGPHY28_COPPER_MII_CTRL, (BMCR_FULLDPLX | BMCR_SPEED1000 | ++ BMCR_ANENABLE | BMCR_ANRESTART)); ++ ++ /* Automatic Master/Slave configuration (Register[0x09], bit 12 = 0) in default */ ++ egphy28_reg_modify(phy_addr, EGPHY28_MII_CTRL1000, 0x0, (1 << 12)); ++ ++ /* Ability advert set : IEEE 802.3, 10HD, 100HD, 10FD, 100FD */ ++ /* (Register[0x04] bit 0, 5, 7, 6, 8 = 1 */ ++ egphy28_reg_modify(phy_addr, EGPHY28_MII_ADVERTISE, ++ (ADVERTISE_10HALF | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_100FULL | ADVERTISE_CSMA), ++ (ADVERTISE_10HALF | ADVERTISE_100HALF | ADVERTISE_10FULL | ADVERTISE_100FULL | ADVERTISE_CSMA)); ++ /* Ability advert set : switch device port, 1000BASE-T FD, non-1000BASE-T HD */ ++ /* (Register[0x09] bit 10, 9 = 1, bit 8 = 0 */ ++ egphy28_reg_modify(phy_addr, EGPHY28_MII_CTRL1000, ++ (REPEATER_DTE | ADVERTISE_1000FULL), (REPEATER_DTE | ADVERTISE_1000FULL | ADVERTISE_1000HALF)); ++ ++ /* Set Auto-Negoatiation, Restartr-AN (Register[0x00], bit 12, 9 = 1) */ ++ egphy28_reg_modify(phy_addr, EGPHY28_COPPER_MII_CTRL, (BMCR_ANENABLE | BMCR_ANRESTART), ++ (BMCR_ANENABLE | BMCR_ANRESTART)); ++ ++ /* Clear bit 14 for automatic MDI crossover (Register[RDB 0x00] bit 14 = 0) in default */ ++ egphy28_rdb_reg_modify(phy_addr, 0x00, 0x0, (1 << 14)); ++ ++ /* Clear bit 9 to disable forced auto MDI xover */ ++ egphy28_rdb_reg_modify(phy_addr, 0x2f, 0x0, (1 << 9)); ++#endif ++ ++ return rv; ++} ++ ++ ++int ++egphy28_reset_setup(void __iomem *base, u32 phy_addr) ++{ ++ int rv = SOC_E_NONE; ++ ++ NET_TRACE(("%s enter\n", __FUNCTION__)); ++ ++ rv = egphy28_ge_reset(phy_addr); ++ if (SOC_SUCCESS(rv)) { ++ rv = egphy28_ge_init(base, phy_addr); ++ } ++ ++ return rv; ++} ++ ++int ++egphy28_init(void __iomem *base, u32 phy_addr) ++{ ++ u16 phyid0, phyid1; ++ ++ NET_TRACE(("%s: phy_addr %d\n", __FUNCTION__, phy_addr)); ++ ++ egphy28_reg_read(phy_addr, EGPHY28_PHY_ID_MSB, &phyid0); ++ egphy28_reg_read(phy_addr, EGPHY28_PHY_ID_LSB, &phyid1); ++ ++ printf("%s Phy ChipID: 0x%04x:0x%04x\n", __FUNCTION__, phyid1, phyid0); ++ //egphy28_reset_setup(base, phy_addr); ++ ++ return 0; ++} ++ ++int ++egphy28_enable_set(u32 phy_addr, int enable) ++{ ++ u16 val; ++ ++ NET_TRACE(("%s: phy_addr %d\n", __FUNCTION__, phy_addr)); ++ ++ egphy28_reg_read(phy_addr, EGPHY28_COPPER_MII_CTRL, &val); ++ if (enable) { ++ val &= ~BMCR_PDOWN; ++ } else { ++ val |= BMCR_PDOWN; ++ } ++ egphy28_reg_write(phy_addr, EGPHY28_COPPER_MII_CTRL, val); ++ ++ return SOC_E_NONE; ++} ++ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/shared/bcmiproc_phy5221.c b/drivers/net/ethernet/broadcom/gmac/src/shared/bcmiproc_phy5221.c +--- a/drivers/net/ethernet/broadcom/gmac/src/shared/bcmiproc_phy5221.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/shared/bcmiproc_phy5221.c 2017-11-09 17:53:44.025293000 +0800 +@@ -0,0 +1,657 @@ ++/* ++ * $Copyright Open Broadcom Corporation$ ++ * ++ * These routines provide access to the external phy ++ * ++ */ ++ ++/* ---- Include Files ---------------------------------------------------- */ ++#include ++#include ++#include "../../../mdio/iproc_mdio.h" ++#include "bcmiproc_phy.h" ++#include "bcmiproc_phy5221.h" ++ ++/* ---- External Variable Declarations ----------------------------------- */ ++/* ---- External Function Prototypes ------------------------------------- */ ++/* ---- Public Variables ------------------------------------------------- */ ++/* ---- Private Constants and Types -------------------------------------- */ ++/* ---- Private Variables ------------------------------------------------ */ ++ ++/* debug/trace */ ++//#define BCMDBG ++//#define BCMDBG_ERR ++#ifdef BCMDBG ++#define NET_ERROR(args) printf args ++#define NET_TRACE(args) printf args ++#elif defined(BCMDBG_ERR) ++#define NET_ERROR(args) printf args ++#define NET_TRACE(args) ++#else ++#define NET_ERROR(args) ++#define NET_TRACE(args) ++#endif /* BCMDBG */ ++#define NET_REG_TRACE(args) ++ ++ ++#ifndef ASSERT ++#define ASSERT(exp) ++#endif ++ ++ ++/* ==== Public Functions ================================================= */ ++ ++int ++phy5221_wr_reg(uint eth_num, uint phyaddr, uint16 reg_bank, ++ uint8 reg_addr, uint16 *data) ++{ ++ uint16 wr_data=*data; ++ uint16 test_reg; ++ ++ NET_TRACE(("%s enter\n", __FUNCTION__)); ++ ++ NET_REG_TRACE(("%s going to write phyaddr(0x%x) reg_bank(0x%x) reg_addr(0x%x) data(0x%x)\n", ++ __FUNCTION__, phyaddr, reg_bank, reg_addr, wr_data)); ++ ++ if (reg_bank) { ++ iproc_mii_read(MII_DEV_EXT, phyaddr, 0x1f, &test_reg); ++ iproc_mii_write(MII_DEV_EXT, phyaddr, 0x1f, (test_reg | 0x0080)); ++ ++ iproc_mii_write(MII_DEV_EXT, phyaddr, reg_addr, wr_data); ++ ++ iproc_mii_write(MII_DEV_EXT, phyaddr, 0x1f, test_reg); ++ } else { ++ iproc_mii_write(MII_DEV_EXT, phyaddr, reg_addr, wr_data); ++ } ++ return SOC_E_NONE; ++} ++ ++ ++int ++phy5221_rd_reg(uint eth_num, uint phyaddr, uint16 reg_bank, ++ uint8 reg_addr, uint16 *data) ++{ ++ uint16 test_reg; ++ ++ NET_TRACE(("%s enter\n", __FUNCTION__)); ++ ++ NET_REG_TRACE(("%s going to read phyaddr(0x%x) reg_bank(0x%x) reg_addr(0x%x)\n", ++ __FUNCTION__, phyaddr, reg_bank, reg_addr)); ++ ++ if (reg_bank) { ++ iproc_mii_read(MII_DEV_EXT, phyaddr, 0x1f, &test_reg); ++ iproc_mii_write(MII_DEV_EXT, phyaddr, 0x1f, (test_reg | 0x0080)); ++ ++ iproc_mii_read(MII_DEV_EXT, phyaddr, reg_addr, data); ++ ++ iproc_mii_write(MII_DEV_EXT, phyaddr, 0x1f, test_reg); ++ } else { ++ iproc_mii_read(MII_DEV_EXT, phyaddr, reg_addr, data); ++ } ++ NET_REG_TRACE(("%s rd phyaddr(0x%x) reg_bank(0x%x) reg_addr(0x%x) data(0x%x)\n", ++ __FUNCTION__, phyaddr, reg_bank, reg_addr, *data)); ++ ++ return SOC_E_NONE; ++} ++ ++ ++int ++phy5221_mod_reg(uint eth_num, uint phyaddr, uint16 reg_bank, ++ uint8 reg_addr, uint16 data, uint16 mask) ++{ ++ uint16 test_reg; ++ uint16 org_data, rd_data; ++ ++ NET_TRACE(("%s enter\n", __FUNCTION__)); ++ ++ NET_REG_TRACE(("%s going to modify phyaddr(0x%x) reg_bank(0x%x) reg_addr(0x%x) data(0x%x) mask(0x%x)\n", ++ __FUNCTION__, phyaddr, reg_bank, reg_addr, data, mask)); ++ ++ if (reg_bank) { ++ iproc_mii_read(MII_DEV_EXT, phyaddr, 0x1f, &test_reg); ++ iproc_mii_write(MII_DEV_EXT, phyaddr, 0x1f, (test_reg | 0x0080)); ++ ++ iproc_mii_read(MII_DEV_EXT, phyaddr, reg_addr, &rd_data); ++ NET_REG_TRACE(("%s rd phyaddr(0x%x) reg_bank(0x%x) reg_addr(0x%x) data(0x%x)\n", ++ __FUNCTION__, phyaddr, reg_bank, reg_addr, rd_data)); ++ org_data = rd_data; ++ rd_data &= ~(mask); ++ rd_data |= data; ++ iproc_mii_write(MII_DEV_EXT, phyaddr, reg_addr, rd_data); ++ NET_REG_TRACE(("%s wrt phyaddr(0x%x) reg_bank(0x%x) reg_addr(0x%x) data(0x%x)\n", ++ __FUNCTION__, phyaddr, reg_bank, reg_addr, rd_data)); ++ ++ iproc_mii_write(MII_DEV_EXT, phyaddr, 0x1f, test_reg); ++ } else { ++ iproc_mii_read(MII_DEV_EXT, phyaddr, reg_addr, &rd_data); ++ NET_REG_TRACE(("%s rd phyaddr(0x%x) reg_bank(0x%x) reg_addr(0x%x) data(0x%x)\n", ++ __FUNCTION__, phyaddr, reg_bank, reg_addr, rd_data)); ++ org_data = rd_data; ++ rd_data &= ~(mask); ++ rd_data |= data; ++ iproc_mii_write(MII_DEV_EXT, phyaddr, reg_addr, rd_data); ++ NET_REG_TRACE(("%s wrt phyaddr(0x%x) reg_bank(0x%x) reg_addr(0x%x) data(0x%x)\n", ++ __FUNCTION__, phyaddr, reg_bank, reg_addr, rd_data)); ++ } ++ ++ return SOC_E_NONE; ++} ++ ++ ++void ++phy5221_fe_reset(uint eth_num, uint phyaddr) ++{ ++ uint16 ctrl; ++ ++ NET_TRACE(("et%d: %s: phyaddr %d\n", eth_num, __FUNCTION__, phyaddr)); ++ ++ /* set reset flag */ ++ phy5221_rd_reg(eth_num, phyaddr, PHY_MII_CTRLr_BANK, PHY_MII_CTRLr_ADDR, &ctrl); ++ ctrl |= MII_CTRL_RESET; ++ phy5221_wr_reg(eth_num, phyaddr, PHY_MII_CTRLr_BANK, PHY_MII_CTRLr_ADDR, &ctrl); ++ ++ SPINWAIT( (!phy5221_rd_reg(eth_num, phyaddr, PHY_MII_CTRLr_BANK, PHY_MII_CTRLr_ADDR, &ctrl) ++ && (ctrl & MII_CTRL_RESET)), 100000); ++ /* check if out of reset */ ++ phy5221_rd_reg(eth_num, phyaddr, PHY_MII_CTRLr_BANK, PHY_MII_CTRLr_ADDR, &ctrl); ++ if (ctrl & MII_CTRL_RESET) { ++ /* timeout */ ++ NET_ERROR(("et%d: %s reset not complete\n", eth_num, __FUNCTION__)); ++ } else { ++ NET_ERROR(("et%d: %s reset complete\n", eth_num, __FUNCTION__)); ++ } ++ ++ return; ++} ++ ++ ++/* ++ * Function: ++ * phy5221_fe_init ++ * Purpose: ++ * Initialize the PHY (MII mode) to a known good state. ++ * Parameters: ++ * unit - StrataSwitch unit #. ++ * port - StrataSwitch port #. ++ * Returns: ++ * SOC_E_XXX ++ ++ * Notes: ++ * No synchronization performed at this level. ++ */ ++int ++phy5221_fe_init(uint eth_num, uint phyaddr) ++{ ++ uint16 mii_ana, mii_ctrl; ++ ++ /* Reset PHY */ ++ phy5221_fe_reset(eth_num, phyaddr); ++ ++ mii_ana = MII_ANA_HD_10 | MII_ANA_FD_10 | MII_ANA_HD_100 | ++ MII_ANA_FD_100 | MII_ANA_ASF_802_3; ++ mii_ctrl = MII_CTRL_FD | MII_CTRL_SS_100 | MII_CTRL_AE | MII_CTRL_RAN; ++ ++ phy5221_wr_reg(eth_num, phyaddr, PHY_MII_CTRLr_BANK, PHY_MII_CTRLr_ADDR, &mii_ctrl); ++ phy5221_wr_reg(eth_num, phyaddr, PHY_MII_ANAr_BANK, PHY_MII_ANAr_ADDR, &mii_ana); ++ ++ return SOC_E_NONE; ++} ++ ++ ++#ifdef BCMINTERNAL ++/* ++ * Function: ++ * phy5221_fe_speed_set ++ * Purpose: ++ * Set the current operating speed (forced). ++ * Parameters: ++ * unit - StrataSwitch unit #. ++ * port - StrataSwitch port #. ++ * duplex - (OUT) Boolean, true indicates full duplex, false ++ * indicates half. ++ * Returns: ++ * SOC_E_XXX ++ * Notes: ++ * No synchronization performed at this level. Autonegotiation is ++ * not manipulated. ++ */ ++int ++phy5221_fe_speed_set(uint eth_num, uint phyaddr, int speed) ++{ ++ uint16 mii_ctrl; ++ ++ if (speed == 0) { ++ return SOC_E_NONE; ++ } ++ ++ phy5221_rd_reg(eth_num, phyaddr, PHY_MII_CTRLr_BANK, PHY_MII_CTRLr_ADDR, &mii_ctrl); ++ ++ mii_ctrl &= ~(MII_CTRL_SS_LSB | MII_CTRL_SS_MSB); ++ switch(speed) { ++ case 10: ++ mii_ctrl |= MII_CTRL_SS_10; ++ break; ++ case 100: ++ mii_ctrl |= MII_CTRL_SS_100; ++ break; ++ case 1000: ++ mii_ctrl |= MII_CTRL_SS_1000; ++ break; ++ default: ++ return SOC_E_CONFIG; ++ } ++ ++ phy5221_wr_reg(eth_num, phyaddr, PHY_MII_CTRLr_BANK, PHY_MII_CTRLr_ADDR, &mii_ctrl); ++ ++ return SOC_E_NONE; ++} ++#endif /* BCMINTERNAL */ ++ ++ ++/* ++ * Function: ++ * phy5221_init ++ * Purpose: ++ * Initialize xgxs6 phys ++ * Parameters: ++ * eth_num - ethernet data ++ * phyaddr - physical address ++ * Returns: ++ * 0 ++ */ ++int ++phy5221_init(uint eth_num, uint phyaddr) ++{ ++ uint16 phyid0, phyid1; ++ ++ NET_TRACE(("et%d: %s: phyaddr %d\n", eth_num, __FUNCTION__, phyaddr)); ++ ++ phy5221_rd_reg(eth_num, phyaddr, PHY_MII_PHY_ID0r_BANK, PHY_MII_PHY_ID0r_ADDR, &phyid0); ++ phy5221_rd_reg(eth_num, phyaddr, PHY_MII_PHY_ID1r_BANK, PHY_MII_PHY_ID1r_ADDR, &phyid1); ++ ++ NET_TRACE(("%s phyaddr(0x%x) Phy ChipID: 0x%04x:0x%04x\n", __FUNCTION__, phyaddr, phyid1, phyid0)); ++ ++ phy5221_fe_init(eth_num, phyaddr); ++ ++ return 0; ++} ++ ++/* ++ * Function: ++ * phy5221_link_get ++ * Purpose: ++ * Determine the current link up/down status ++ * Parameters: ++ * unit - StrataSwitch unit #. ++ * port - StrataSwitch port #. ++ * link - (OUT) Boolean, true indicates link established. ++ * Returns: ++ * SOC_E_XXX ++ * Notes: ++ * No synchronization performed at this level. ++ */ ++int ++phy5221_link_get(uint eth_num, uint phyaddr, int *link) ++{ ++ uint16 mii_ctrl, mii_stat; ++ uint32 wait; ++ ++ *link = FALSE; /* Default */ ++ ++ phy5221_rd_reg(eth_num, phyaddr, PHY_MII_STATr_BANK, PHY_MII_STATr_ADDR, &mii_stat); ++ /* the first read of status register will not show link up, second read will show link up */ ++ if (!(mii_stat & MII_STAT_LA) ) { ++ phy5221_rd_reg(eth_num, phyaddr, PHY_MII_STATr_BANK, PHY_MII_STATr_ADDR, &mii_stat); ++ } ++ ++ if (!(mii_stat & MII_STAT_LA) || (mii_stat == 0xffff)) { ++ /* mii_stat == 0xffff check is to handle removable PHY daughter cards */ ++ return SOC_E_NONE; ++ } ++ ++ /* Link appears to be up; we are done if autoneg is off. */ ++ ++ phy5221_rd_reg(eth_num, phyaddr, PHY_MII_CTRLr_BANK, PHY_MII_CTRLr_ADDR, &mii_ctrl); ++ ++ if (!(mii_ctrl & MII_CTRL_AE)) { ++ *link = TRUE; ++ return SOC_E_NONE; ++ } ++ ++ /* ++ * If link appears to be up but autonegotiation is still in ++ * progress, wait for it to complete. For BCM5228, autoneg can ++ * still be busy up to about 200 usec after link is indicated. Also ++ * continue to check link state in case it goes back down. ++ * wait 500ms (500000us/10us = 50000 ) ++ */ ++ for (wait=0; wait<50000; wait++) { ++ ++ phy5221_rd_reg(eth_num, phyaddr, PHY_MII_STATr_BANK, PHY_MII_STATr_ADDR, &mii_stat); ++ ++ if (!(mii_stat & MII_STAT_LA)) { ++ /* link is down */ ++ return SOC_E_NONE; ++ } ++ ++ if (mii_stat & MII_STAT_AN_DONE) { ++ /* AutoNegotiation done */ ++ break; ++ } ++ ++ OSL_DELAY(10); ++ } ++ if (wait>=50000) { ++ /* timeout */ ++ return SOC_E_BUSY; ++ } ++ ++ /* Return link state at end of polling */ ++ *link = ((mii_stat & MII_STAT_LA) != 0); ++ ++ return SOC_E_NONE; ++} ++ ++ ++/* ++ * Function: ++ * phy5221_enable_set ++ * Purpose: ++ * Enable/Disable phy ++ * Parameters: ++ * eth_num - ethernet data ++ * phyaddr - physical address ++ * enable - on/off state to set ++ * Returns: ++ * 0 ++ */ ++int ++phy5221_enable_set(uint eth_num, uint phyaddr, int enable) ++{ ++ uint16 data; /* New value to write to PHY register */ ++ ++ NET_TRACE(("et%d: %s: phyaddr %d\n", eth_num, __FUNCTION__, phyaddr)); ++ ++ data = enable ? 0 : MII_ECR_TD; /* Transmitt enable/disable */ ++ phy5221_mod_reg(eth_num, phyaddr, PHY_MII_ECRr_BANK, PHY_MII_ECRr_ADDR, data, MII_ECR_TD); ++ ++ data = enable ? 0 : PHY522X_SUPER_ISOLATE_MODE; ++ /* Device needs to be put in super-isolate mode in order to disable ++ * the link in 10BaseT mode ++ */ ++ phy5221_mod_reg(eth_num, phyaddr, PHY_AUX_MULTIPLE_PHYr_BANK, PHY_AUX_MULTIPLE_PHYr_ADDR, ++ data, PHY522X_SUPER_ISOLATE_MODE); ++ ++ return SOC_E_NONE; ++} ++ ++ ++#ifdef BCMINTERNAL ++/* ++ * Function: ++ * phy5221_speed_set ++ * Purpose: ++ * Set PHY speed ++ * Parameters: ++ * eth_num - ethernet data ++ * phyaddr - physical address ++ * speed - link speed in Mbps ++ * Returns: ++ * 0 ++ */ ++int ++phy5221_speed_set(uint eth_num, uint phyaddr, int speed) ++{ ++ NET_TRACE(("et%d: %s: phyaddr %d\n", eth_num, __FUNCTION__, phyaddr)); ++ ++ phy5221_fe_speed_set(eth_num, phyaddr, speed); ++ ++ return 0; ++} ++#endif /* BCMINTERNAL */ ++ ++ ++/* ++ * Function: ++ * phy5221_auto_negotiate_gcd (greatest common denominator). ++ * Purpose: ++ * Determine the current greatest common denominator between ++ * two ends of a link ++ * Parameters: ++ * unit - StrataSwitch unit #. ++ * port - StrataSwitch port #. ++ * speed - (OUT) greatest common speed. ++ * duplex - (OUT) greatest common duplex. ++ * link - (OUT) Boolean, true indicates link established. ++ * Returns: ++ * SOC_E_XXX ++ * Notes: ++ * No synchronization performed at this level. ++ */ ++static int ++phy5221_auto_negotiate_gcd(uint eth_num, uint phyaddr, int *speed, int *duplex) ++{ ++ int t_speed, t_duplex; ++ uint16 mii_ana, mii_anp, mii_stat; ++ uint16 mii_gb_stat, mii_esr, mii_gb_ctrl; ++ ++ mii_gb_stat = 0; /* Start off 0 */ ++ mii_gb_ctrl = 0; /* Start off 0 */ ++ ++ phy5221_rd_reg(eth_num, phyaddr, PHY_MII_ANAr_BANK, PHY_MII_ANAr_ADDR, &mii_ana); ++ phy5221_rd_reg(eth_num, phyaddr, PHY_MII_ANPr_BANK, PHY_MII_ANPr_ADDR, &mii_anp); ++ phy5221_rd_reg(eth_num, phyaddr, PHY_MII_STATr_BANK, PHY_MII_STATr_ADDR, &mii_stat); ++ ++ if (mii_stat & MII_STAT_ES) { /* Supports extended status */ ++ /* ++ * If the PHY supports extended status, check if it is 1000MB ++ * capable. If it is, check the 1000Base status register to see ++ * if 1000MB negotiated. ++ */ ++ phy5221_rd_reg(eth_num, phyaddr, PHY_MII_ESRr_BANK, PHY_MII_ESRr_ADDR, &mii_esr); ++ ++ if (mii_esr & (MII_ESR_1000_X_FD | MII_ESR_1000_X_HD | ++ MII_ESR_1000_T_FD | MII_ESR_1000_T_HD)) { ++ phy5221_rd_reg(eth_num, phyaddr, PHY_MII_GB_STATr_BANK, PHY_MII_GB_STATr_ADDR, &mii_gb_stat); ++ phy5221_rd_reg(eth_num, phyaddr, PHY_MII_GB_CTRLr_BANK, PHY_MII_GB_CTRLr_ADDR, &mii_gb_ctrl); ++ } ++ } ++ ++ /* ++ * At this point, if we did not see Gig status, one of mii_gb_stat or ++ * mii_gb_ctrl will be 0. This will cause the first 2 cases below to ++ * fail and fall into the default 10/100 cases. ++ */ ++ ++ mii_ana &= mii_anp; ++ ++ if ((mii_gb_ctrl & MII_GB_CTRL_ADV_1000FD) && ++ (mii_gb_stat & MII_GB_STAT_LP_1000FD)) { ++ t_speed = 1000; ++ t_duplex = 1; ++ } else if ((mii_gb_ctrl & MII_GB_CTRL_ADV_1000HD) && ++ (mii_gb_stat & MII_GB_STAT_LP_1000HD)) { ++ t_speed = 1000; ++ t_duplex = 0; ++ } else if (mii_ana & MII_ANA_FD_100) { /* [a] */ ++ t_speed = 100; ++ t_duplex = 1; ++ } else if (mii_ana & MII_ANA_T4) { /* [b] */ ++ t_speed = 100; ++ t_duplex = 0; ++ } else if (mii_ana & MII_ANA_HD_100) { /* [c] */ ++ t_speed = 100; ++ t_duplex = 0; ++ } else if (mii_ana & MII_ANA_FD_10) { /* [d] */ ++ t_speed = 10; ++ t_duplex = 1 ; ++ } else if (mii_ana & MII_ANA_HD_10) { /* [e] */ ++ t_speed = 10; ++ t_duplex = 0; ++ } else { ++ return(SOC_E_FAIL); ++ } ++ ++ if (speed) *speed = t_speed; ++ if (duplex) *duplex = t_duplex; ++ ++ return(SOC_E_NONE); ++} ++ ++ ++/* ++ * Function: ++ * phy5221_speed_get ++ * Purpose: ++ * Get PHY speed ++ * Parameters: ++ * eth_num - ethernet data ++ * phyaddr - physical address ++ * speed - current link speed in Mbps ++ * Returns: ++ * 0 ++ */ ++int ++phy5221_speed_get(uint eth_num, uint phyaddr, int *speed, int *duplex) ++{ ++ int rv; ++ uint16 mii_ctrl, mii_stat; ++ ++ NET_TRACE(("et%d: %s: phyaddr %d\n", eth_num, __FUNCTION__, phyaddr)); ++ ++ phy5221_rd_reg(eth_num, phyaddr, PHY_MII_CTRLr_BANK, PHY_MII_CTRLr_ADDR, &mii_ctrl); ++ phy5221_rd_reg(eth_num, phyaddr, PHY_MII_STATr_BANK, PHY_MII_STATr_ADDR, &mii_stat); ++ ++ *speed = 0; ++ *duplex = 0; ++ if (mii_ctrl & MII_CTRL_AE) { /* Auto-negotiation enabled */ ++ if (!(mii_stat & MII_STAT_AN_DONE)) { /* Auto-neg NOT complete */ ++ rv = SOC_E_NONE; ++ } else { ++ rv = phy5221_auto_negotiate_gcd(eth_num, phyaddr, speed, duplex); ++ } ++ } else { /* Auto-negotiation disabled */ ++ /* ++ * Simply pick up the values we force in CTRL register. ++ */ ++ if (mii_ctrl & MII_CTRL_FD) ++ *duplex = 1; ++ ++ switch(MII_CTRL_SS(mii_ctrl)) { ++ case MII_CTRL_SS_10: ++ *speed = 10; ++ break; ++ case MII_CTRL_SS_100: ++ *speed = 100; ++ break; ++ case MII_CTRL_SS_1000: ++ *speed = 1000; ++ break; ++ default: /* Just pass error back */ ++ return(SOC_E_UNAVAIL); ++ } ++ rv = SOC_E_NONE; ++ } ++ ++ return(rv); ++} ++ ++ ++#ifdef BCMINTERNAL ++int ++phy5221_lb_set(uint eth_num, uint phyaddr, int enable) ++{ ++ uint16 mii_ctrl; ++ ++ /* set reset flag */ ++ phy5221_rd_reg(eth_num, phyaddr, PHY_MII_CTRLr_BANK, PHY_MII_CTRLr_ADDR, &mii_ctrl); ++ mii_ctrl &= ~MII_CTRL_LE; ++ mii_ctrl |= enable ? MII_CTRL_LE : 0; ++ phy5221_rd_reg(eth_num, phyaddr, PHY_MII_CTRLr_BANK, PHY_MII_CTRLr_ADDR, &mii_ctrl); ++ ++ return 0; ++} ++#endif /* BCMINTERNAL */ ++ ++ ++#ifdef BCMINTERNAL ++void ++phy5221_disp_status(uint eth_num, uint phyaddr) ++{ ++ uint16 tmp0, tmp1, tmp2; ++ int speed, duplex; ++ ++ printf("et%d: %s: phyaddr:%d\n", eth_num, __FUNCTION__, phyaddr); ++ ++ phy5221_rd_reg(eth_num, phyaddr, PHY_MII_CTRLr_BANK, PHY_MII_CTRLr_ADDR, &tmp0); ++ phy5221_rd_reg(eth_num, phyaddr, PHY_MII_STATr_BANK, PHY_MII_STATr_ADDR, &tmp1); ++ printf(" MII-Control: 0x%x; MII-Status: 0x%x\n", tmp0, tmp1); ++ ++ phy5221_rd_reg(eth_num, phyaddr, PHY_MII_PHY_ID0r_BANK, PHY_MII_PHY_ID0r_ADDR, &tmp0); ++ phy5221_rd_reg(eth_num, phyaddr, PHY_MII_PHY_ID1r_BANK, PHY_MII_PHY_ID1r_ADDR, &tmp1); ++ printf(" Phy ChipID: 0x%04x:0x%04x\n", tmp0, tmp1); ++ ++ phy5221_rd_reg(eth_num, phyaddr, PHY_MII_ANAr_BANK, PHY_MII_ANAr_ADDR, &tmp0); ++ phy5221_rd_reg(eth_num, phyaddr, PHY_MII_ANPr_BANK, PHY_MII_ANPr_ADDR, &tmp1); ++ phy5221_speed_get(eth_num, phyaddr, &speed, &duplex); ++ printf(" AutoNeg Ad: 0x%x; AutoNeg Partner: 0x%x; speed:%d; duplex:%d\n", tmp0, tmp1, speed, duplex); ++ ++ phy5221_rd_reg(eth_num, phyaddr, PHY_MII_ESRr_BANK, PHY_MII_ESRr_ADDR, &tmp0); ++ phy5221_rd_reg(eth_num, phyaddr, PHY_MII_ECRr_BANK, PHY_MII_ECRr_ADDR, &tmp1); ++ phy5221_rd_reg(eth_num, phyaddr, 0x0000, 0x11, &tmp2); ++ printf(" Reg0x0f: 0x%x; 100Base-X AUX ctrl: 0x%x; 100Base-X AUX stat: 0x%x\n", tmp0, tmp1, tmp2); ++ ++ phy5221_rd_reg(eth_num, phyaddr, 0x0000, 0x12, &tmp0); ++ phy5221_rd_reg(eth_num, phyaddr, 0x0000, 0x13, &tmp1); ++ phy5221_rd_reg(eth_num, phyaddr, 0x0000, 0x14, &tmp2); ++ printf(" 100Base-X RCV ERR: 0x%x; 100Base-X FALSE CARRIER: 0x%x; 100Base-X DISCON: 0x%x\n", tmp0, tmp1, tmp2); ++} ++#endif /* BCMINTERNAL */ ++ ++ ++#ifdef BCMINTERNAL ++void ++phy5221_chk_err(uint eth_num, uint phyaddr) ++{ ++ uint16 tmp0; ++ ++ phy5221_rd_reg(eth_num, phyaddr, PHY_MII_STATr_BANK, PHY_MII_STATr_ADDR, &tmp0); ++ if (!(tmp0 & MII_STAT_LA)) { ++ printf("ERROR: reg 0x01 (LINK down): 0x%x\n", tmp0); ++ } ++ if (tmp0 & (MII_STAT_JBBR|MII_STAT_RF)) { ++ printf("ERROR: reg 0x01: 0x%x\n", tmp0); ++ } ++ ++ phy5221_rd_reg(eth_num, phyaddr, 0, 0x11, &tmp0); ++ if (!(tmp0 & 0x100)) { ++ printf("ERROR: reg 0x11 (LINK down): 0x%x\n", tmp0); ++ } ++ if (tmp0 & 0x8bf) { ++ printf("ERROR: reg 0x11: 0x%x\n", tmp0); ++ } ++ ++ phy5221_rd_reg(eth_num, phyaddr, 0, 0x12, &tmp0); ++ if (tmp0) { ++ printf("ERROR: reg 0x12 (RCV ERR CNT): 0x%x\n", tmp0); ++ } ++ ++ phy5221_rd_reg(eth_num, phyaddr, 0, 0x13, &tmp0); ++ if (tmp0) { ++ printf("ERROR: reg 0x13 (FALSE CARRIER CNT): 0x%x\n", tmp0); ++ } ++ ++ phy5221_rd_reg(eth_num, phyaddr, 0, 0x14, &tmp0); ++ if (tmp0 & 0xc000) { ++ printf("ERROR: reg 0x14: 0x%x\n", tmp0); ++ } ++ ++ phy5221_rd_reg(eth_num, phyaddr, 0, 0x19, &tmp0); ++ if (!(tmp0 & 0x4)) { ++ printf("ERROR: reg 0x19 (LINK down): 0x%x\n", tmp0); ++ } ++ if (tmp0 & 0xc0) { ++ printf("ERROR: reg 0x19: 0x%x\n", tmp0); ++ } ++} ++#endif /* BCMINTERNAL */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/shared/bcmiproc_phy5461s.c b/drivers/net/ethernet/broadcom/gmac/src/shared/bcmiproc_phy5461s.c +--- a/drivers/net/ethernet/broadcom/gmac/src/shared/bcmiproc_phy5461s.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/shared/bcmiproc_phy5461s.c 2017-11-09 17:53:44.027292000 +0800 +@@ -0,0 +1,896 @@ ++/* ++ * $Copyright Open Broadcom Corporation$ ++ * ++ * These routines provide access to the external phy ++ * ++ */ ++ ++/* ---- Include Files ---------------------------------------------------- */ ++#include ++#include ++#include "../../../mdio/iproc_mdio.h" ++#include "bcmiproc_phy.h" ++#include "bcmiproc_phy5461s.h" ++ ++/* ---- External Variable Declarations ----------------------------------- */ ++/* ---- External Function Prototypes ------------------------------------- */ ++/* ---- Public Variables ------------------------------------------------- */ ++/* ---- Private Constants and Types -------------------------------------- */ ++/* ---- Private Variables ------------------------------------------------ */ ++ ++/* debug/trace */ ++//#define BCMDBG ++//#define BCMDBG_ERR ++#ifdef BCMDBG ++#define NET_ERROR(args) printf args ++#define NET_TRACE(args) printf args ++#elif defined(BCMDBG_ERR) ++#define NET_ERROR(args) printf args ++#define NET_TRACE(args) ++#else ++#define NET_ERROR(args) ++#define NET_TRACE(args) ++#endif /* BCMDBG */ ++#define NET_REG_TRACE(args) ++ ++ ++#ifndef ASSERT ++#define ASSERT(exp) ++#endif ++ ++ ++/* ==== Public Functions ================================================= */ ++ ++int ++phy5461_wr_reg(uint eth_num, uint phyaddr, uint32 flags, uint16 reg_bank, ++ uint8 reg_addr, uint16 *data) ++{ ++ int rv = SOC_E_NONE; ++ uint16 wr_data=*data; ++ ++ NET_TRACE(("%s enter\n", __FUNCTION__)); ++ ++ NET_REG_TRACE(("%s going to write phyaddr(0x%x) flags(0x%x) reg_bank(0x%x) reg_addr(0x%x) data(0x%x)\n", ++ __FUNCTION__, phyaddr, flags, reg_bank, reg_addr, wr_data)); ++ //printf("%s phyaddr(0x%x) flags(0x%x) reg_bank(0x%x) reg_addr(0x%x) data(0x%x)\n", ++ // __FUNCTION__, phyaddr, flags, reg_bank, reg_addr, wr_data); ++ ++ if (flags & SOC_PHY_REG_1000X) { ++ if (reg_addr <= 0x000f) { ++ uint16 blk_sel; ++ ++ /* Map 1000X page */ ++ iproc_mii_write(MII_DEV_EXT, phyaddr, 0x1c, 0x7c00); ++ ++ iproc_mii_read(MII_DEV_EXT, phyaddr, 0x1c, &blk_sel); ++ iproc_mii_write(MII_DEV_EXT, phyaddr, 0x1c, blk_sel | 0x8001); ++ ++ /* write 1000X IEEE register */ ++ iproc_mii_write(MII_DEV_EXT, phyaddr, reg_addr, wr_data); ++ ++ /* Restore IEEE mapping */ ++ iproc_mii_write(MII_DEV_EXT, phyaddr, 0x1c, (blk_sel & 0xfffe) | 0x8000); ++ } else if (flags & _SOC_PHY_REG_DIRECT) { ++ iproc_mii_write(MII_DEV_EXT, phyaddr, reg_addr, wr_data); ++ } else { ++ rv = SOC_E_PARAM; ++ } ++ } else { ++ switch(reg_addr) { ++ /* Map shadow registers */ ++#ifdef BCMINTERNAL ++ case 0x15: ++ iproc_mii_write(MII_DEV_EXT, phyaddr, 0x17, reg_bank); ++ break; ++#endif /* BCMINTERNAL */ ++ case 0x18: ++ if (reg_bank <= 0x0007) { ++ if (reg_bank == 0x0007) { ++ wr_data |= 0x8000; ++ } ++ wr_data = (wr_data & ~(0x0007)) | reg_bank; ++ } else { ++ rv = SOC_E_PARAM; ++ } ++ break; ++ case 0x1C: ++ if (reg_bank <= 0x001F) { ++ wr_data = 0x8000 | (reg_bank << 10) | (wr_data & 0x03FF); ++ } else { ++ rv = SOC_E_PARAM; ++ } ++ break; ++#ifdef BCMINTERNAL ++ case 0x1D: ++ if (reg_bank == 0x0000) { ++ wr_data = wr_data & 0x07FFF; ++ } else { ++ rv = SOC_E_PARAM; ++ } ++ break; ++#endif /* BCMINTERNAL */ ++ default: ++ if (!(flags & SOC_PHY_REG_RESERVE_ACCESS)) { ++ /* Must not write to reserved registers */ ++ if (reg_addr > 0x001e) { ++ rv = SOC_E_PARAM; ++ } ++ } ++ break; ++ } ++ if (SOC_SUCCESS(rv)) { ++ iproc_mii_write(MII_DEV_EXT, phyaddr, reg_addr, wr_data); ++ } ++ } ++ if (SOC_FAILURE(rv)) { ++ NET_ERROR(("%s ERROR phyaddr(0x%x) reg_bank(0x%x) reg_addr(0x%x) rv(%d)\n", ++ __FUNCTION__, phyaddr, reg_bank, reg_addr, rv)); ++ } ++ return rv; ++} ++ ++ ++int ++phy5461_rd_reg(uint eth_num, uint phyaddr, uint32 flags, uint16 reg_bank, ++ uint8 reg_addr, uint16 *data) ++{ ++ int rv = SOC_E_NONE; ++ ++ NET_TRACE(("%s enter\n", __FUNCTION__)); ++ ++ NET_REG_TRACE(("%s going to read phyaddr(0x%x) flags(0x%x) reg_bank(0x%x) reg_addr(0x%x)\n", ++ __FUNCTION__, phyaddr, flags, reg_bank, reg_addr)); ++ if (flags & SOC_PHY_REG_1000X) { ++ if (reg_addr <= 0x000f) { ++ uint16 blk_sel; ++ ++ /* Map 1000X page */ ++ iproc_mii_write(MII_DEV_EXT, phyaddr, 0x1c, 0x7c00); ++ iproc_mii_read(MII_DEV_EXT, phyaddr, 0x1c, &blk_sel); ++ iproc_mii_write(MII_DEV_EXT, phyaddr, 0x1c, blk_sel | 0x8001); ++ ++ /* Read 1000X IEEE register */ ++ iproc_mii_read(MII_DEV_EXT, phyaddr, reg_addr, data); ++ NET_REG_TRACE(("%s rd phyaddr(0x%x) flags(0x%x) reg_bank(0x%x) reg_addr(0x%x) data(0x%x)\n", ++ __FUNCTION__, phyaddr, flags, reg_bank, reg_addr, *data)); ++ ++ /* Restore IEEE mapping */ ++ iproc_mii_write(MII_DEV_EXT, phyaddr, 0x1c, (blk_sel & 0xfffe) | 0x8000); ++ } else { ++ rv = SOC_E_PARAM; ++ } ++ } else { ++ switch(reg_addr) { ++ /* Map shadow registers */ ++#ifdef BCMINTERNAL ++ case 0x15: ++ iproc_mii_write(MII_DEV_EXT, phyaddr, 0x17, reg_bank); ++ break; ++#endif /* BCMINTERNAL */ ++ case 0x18: ++ if (reg_bank <= 0x0007) { ++ iproc_mii_write(MII_DEV_EXT, phyaddr, reg_addr, (reg_bank << 12) | 0x7); ++ } else { ++ rv = SOC_E_PARAM; ++ } ++ break; ++ case 0x1C: ++ if (reg_bank <= 0x001F) { ++ iproc_mii_write(MII_DEV_EXT, phyaddr, reg_addr, (reg_bank << 10)); ++ } else { ++ rv = SOC_E_PARAM; ++ } ++ break; ++#ifdef BCMINTERNAL ++ case 0x1D: ++ if (reg_bank <= 0x0001) { ++ iproc_mii_write(MII_DEV_EXT, phyaddr, reg_addr, (reg_bank << 15)); ++ } else { ++ rv = SOC_E_PARAM; ++ } ++ break; ++#endif /* BCMINTERNAL */ ++ default: ++ if (!(flags & SOC_PHY_REG_RESERVE_ACCESS)) { ++ /* Must not read from reserved registers */ ++ if (reg_addr > 0x001e) { ++ rv = SOC_E_PARAM; ++ } ++ } ++ break; ++ } ++ if (SOC_SUCCESS(rv)) { ++ iproc_mii_read(MII_DEV_EXT, phyaddr, reg_addr, data); ++ NET_REG_TRACE(("%s rd phyaddr(0x%x) flags(0x%x) reg_bank(0x%x) reg_addr(0x%x) data(0x%x)\n", ++ __FUNCTION__, phyaddr, flags, reg_bank, reg_addr, *data)); ++ } ++ } ++ if (SOC_FAILURE(rv)) { ++ NET_ERROR(("%s ERROR phyaddr(0x%x) reg_bank(0x%x) reg_addr(0x%x) rv(%d)\n", ++ __FUNCTION__, phyaddr, reg_bank, reg_addr, rv)); ++ } else { ++ //printf("%s phyaddr(0x%x) reg_bank(0x%x) reg_addr(0x%x) data(0x%x)\n", ++ // __FUNCTION__, phyaddr, reg_bank, reg_addr, *data); ++ } ++ ++ return rv; ++} ++ ++ ++int ++phy5461_mod_reg(uint eth_num, uint phyaddr, uint32 flags, uint16 reg_bank, ++ uint8 reg_addr, uint16 data, uint16 mask) ++{ ++ int rv = SOC_E_NONE; ++ uint16 org_data, rd_data; ++ ++ NET_TRACE(("%s enter\n", __FUNCTION__)); ++ ++ NET_REG_TRACE(("%s going to modify phyaddr(0x%x) flags(0x%x) reg_bank(0x%x) reg_addr(0x%x) data(0x%x) mask(0x%x)\n", ++ __FUNCTION__, phyaddr, flags, reg_bank, reg_addr, data, mask)); ++ ++ if (flags & SOC_PHY_REG_1000X) { ++ if (reg_addr <= 0x000f) { ++ uint16 blk_sel; ++ ++ /* Map 1000X page */ ++ iproc_mii_write(MII_DEV_EXT, phyaddr, 0x1c, 0x7c00); ++ iproc_mii_read(MII_DEV_EXT, phyaddr, 0x1c, &blk_sel); ++ iproc_mii_write(MII_DEV_EXT, phyaddr, 0x1c, blk_sel | 0x8001); ++ ++ /* Modify 1000X IEEE register */ ++ iproc_mii_read(MII_DEV_EXT, phyaddr, reg_addr, &rd_data); ++ NET_REG_TRACE(("%s rd phyaddr(0x%x) reg_bank(0x%x) reg_addr(0x%x) data(0x%x)\n", ++ __FUNCTION__, phyaddr, reg_bank, reg_addr, rd_data)); ++ org_data = rd_data; ++ rd_data &= ~(mask); ++ rd_data |= data; ++ iproc_mii_write(MII_DEV_EXT, phyaddr, reg_addr, rd_data); ++ NET_REG_TRACE(("%s wrt phyaddr(0x%x) reg_bank(0x%x) reg_addr(0x%x) data(0x%x)\n", ++ __FUNCTION__, phyaddr, reg_bank, reg_addr, rd_data)); ++ ++ /* Restore IEEE mapping */ ++ iproc_mii_write(MII_DEV_EXT, phyaddr, 0x1c, (blk_sel & 0xfffe) | 0x8000); ++ } else { ++ rv = SOC_E_PARAM; ++ } ++ } else { ++ switch(reg_addr) { ++ /* Map shadow registers */ ++#ifdef BCMINTERNAL ++ case 0x15: ++ iproc_mii_write(MII_DEV_EXT, phyaddr, 0x17, reg_bank); ++ break; ++#endif /* BCMINTERNAL */ ++ case 0x18: ++ if (reg_bank <= 0x0007) { ++ iproc_mii_write(MII_DEV_EXT, phyaddr, reg_addr, (reg_bank << 12) | 0x7); ++ ++ if (reg_bank == 0x0007) { ++ data |= 0x8000; ++ mask |= 0x8000; ++ } ++ mask &= ~(0x0007); ++ } else { ++ rv = SOC_E_PARAM; ++ } ++ break; ++ case 0x1C: ++ if (reg_bank <= 0x001F) { ++ iproc_mii_write(MII_DEV_EXT, phyaddr, reg_addr, (reg_bank << 10)); ++ data |= 0x8000; ++ mask |= 0x8000; ++ mask &= ~(0x1F << 10); ++ } else { ++ rv = SOC_E_PARAM; ++ } ++ break; ++#ifdef BCMINTERNAL ++ case 0x1D: ++ if (reg_bank == 0x0000) { ++ mask &= 0x07FFF; ++ } else { ++ rv = SOC_E_PARAM; ++ } ++ break; ++#endif /* BCMINTERNAL */ ++ default: ++ if (!(flags & SOC_PHY_REG_RESERVE_ACCESS)) { ++ /* Must not write to reserved registers */ ++ if (reg_addr > 0x001e) { ++ rv = SOC_E_PARAM; ++ } ++ } ++ break; ++ } ++ if (SOC_SUCCESS(rv)) { ++ iproc_mii_read(MII_DEV_EXT, phyaddr, reg_addr, &rd_data); ++ NET_REG_TRACE(("%s rd phyaddr(0x%x) reg_bank(0x%x) reg_addr(0x%x) data(0x%x)\n", ++ __FUNCTION__, phyaddr, reg_bank, reg_addr, rd_data)); ++ org_data = rd_data; ++ rd_data &= ~(mask); ++ rd_data |= data; ++ iproc_mii_write(MII_DEV_EXT, phyaddr, reg_addr, rd_data); ++ NET_REG_TRACE(("%s wrt phyaddr(0x%x) reg_bank(0x%x) reg_addr(0x%x) data(0x%x)\n", ++ __FUNCTION__, phyaddr, reg_bank, reg_addr, rd_data)); ++ } ++ } ++ ++ if (SOC_FAILURE(rv)) { ++ NET_ERROR(("%s ERROR phyaddr(0x%x) reg_bank(0x%x) reg_addr(0x%x) rv(%d)\n", ++ __FUNCTION__, phyaddr, reg_bank, reg_addr, rv)); ++ } else { ++ //printf("%s modified(0x%x to 0x%x at phyaddr(0x%x) reg_bank(0x%x) reg_addr(0x%x)\n", ++ // __FUNCTION__, org_data, rd_data, phyaddr, reg_bank, reg_addr); ++ } ++ ++ return rv; ++} ++ ++ ++void ++phy5461_ge_reset(uint eth_num, uint phyaddr) ++{ ++ uint16 ctrl; ++ ++ NET_TRACE(("et%d: %s: phyaddr %d\n", eth_num, __FUNCTION__, phyaddr)); ++ ++ /* set reset flag */ ++ phy5461_rd_reg(eth_num, phyaddr, PHY_MII_CTRLr_FLAGS, PHY_MII_CTRLr_BANK, PHY_MII_CTRLr_ADDR, &ctrl); ++ ctrl |= MII_CTRL_RESET; ++ phy5461_wr_reg(eth_num, phyaddr, PHY_MII_CTRLr_FLAGS, PHY_MII_CTRLr_BANK, PHY_MII_CTRLr_ADDR, &ctrl); ++ ++ SPINWAIT( (!phy5461_rd_reg(eth_num, phyaddr, PHY_MII_CTRLr_FLAGS, PHY_MII_CTRLr_BANK, PHY_MII_CTRLr_ADDR, &ctrl) ++ && (ctrl & MII_CTRL_RESET)), 100000); ++ /* check if out of reset */ ++ phy5461_rd_reg(eth_num, phyaddr, PHY_MII_CTRLr_FLAGS, PHY_MII_CTRLr_BANK, PHY_MII_CTRLr_ADDR, &ctrl); ++ if (ctrl & MII_CTRL_RESET) { ++ /* timeout */ ++ NET_ERROR(("et%d: %s reset not complete\n", eth_num, __FUNCTION__)); ++ } else { ++ NET_TRACE(("et%d: %s reset complete\n", eth_num, __FUNCTION__)); ++ } ++} ++ ++ ++/* ++ * Function: ++ * phy5461_ge_interface_set ++ * Purpose: ++ * Set the current operating mode of the PHY. ++ * (Pertaining to the MAC/PHY interface, not the line interface). ++ * For example: TBI or MII/GMII. ++ * Parameters: ++ * unit - StrataSwitch unit #. ++ * port - StrataSwitch port #. ++ * pif - one of SOC_PORT_IF_* ++ * Returns: ++ * SOC_E_XXX ++ */ ++int ++phy5461_ge_interface_set(uint eth_num, uint phyaddr, soc_port_if_t pif) ++{ ++ uint16 mii_ecr; ++ int mii; /* MII if true, TBI otherwise */ ++ ++ switch (pif) { ++ case SOC_PORT_IF_MII: ++ case SOC_PORT_IF_GMII: ++ case SOC_PORT_IF_SGMII: ++ mii = TRUE; ++ break; ++ case SOC_PORT_IF_NOCXN: ++ return (SOC_E_NONE); ++ case SOC_PORT_IF_TBI: ++ mii = FALSE; ++ break; ++ default: ++ return SOC_E_UNAVAIL; ++ } ++ ++ phy5461_rd_reg(eth_num, phyaddr, PHY_MII_ECRr_FLAGS, PHY_MII_ECRr_BANK, PHY_MII_ECRr_ADDR, &mii_ecr); ++ ++ if (mii) { ++ mii_ecr &= ~MII_ECR_10B; ++ } else { ++ mii_ecr |= MII_ECR_10B; ++ } ++ ++ phy5461_wr_reg(eth_num, phyaddr, PHY_MII_ECRr_FLAGS, PHY_MII_ECRr_BANK, PHY_MII_ECRr_ADDR, &mii_ecr); ++ ++ return(SOC_E_NONE); ++} ++ ++ ++/* ++ * Function: ++ * phy5461_ge_init ++ * Purpose: ++ * Initialize the PHY (MII mode) to a known good state. ++ * Parameters: ++ * unit - StrataSwitch unit #. ++ * port - StrataSwitch port #. ++ * Returns: ++ * SOC_E_XXX ++ ++ * Notes: ++ * No synchronization performed at this level. ++ */ ++int ++phy5461_ge_init(uint eth_num, uint phyaddr) ++{ ++ uint16 mii_ctrl, mii_gb_ctrl; ++ uint16 mii_ana; ++ soc_port_if_t pif; ++ ++ /* Reset PHY */ ++ phy5461_ge_reset(eth_num, phyaddr); ++ ++ /* set advertized bits */ ++ phy5461_rd_reg(eth_num, phyaddr, PHY_MII_ANAr_FLAGS, PHY_MII_ANAr_BANK, PHY_MII_ANAr_ADDR, &mii_ana); ++ mii_ana |= MII_ANA_FD_100 | MII_ANA_FD_10; ++ mii_ana |= MII_ANA_HD_100 | MII_ANA_HD_10; ++ phy5461_wr_reg(eth_num, phyaddr, PHY_MII_ANAr_FLAGS, PHY_MII_ANAr_BANK, PHY_MII_ANAr_ADDR, &mii_ana); ++ ++ mii_ctrl = MII_CTRL_FD | MII_CTRL_SS_1000 | MII_CTRL_AE | MII_CTRL_RAN; ++ mii_gb_ctrl = MII_GB_CTRL_ADV_1000FD | MII_GB_CTRL_PT; ++ ++ pif = SOC_PORT_IF_GMII; ++ ++ phy5461_ge_interface_set(eth_num, phyaddr, pif); ++ ++ phy5461_wr_reg(eth_num, phyaddr, PHY_MII_GB_CTRLr_FLAGS, PHY_MII_GB_CTRLr_BANK, PHY_MII_GB_CTRLr_ADDR, &mii_gb_ctrl); ++ phy5461_wr_reg(eth_num, phyaddr, PHY_MII_CTRLr_FLAGS, PHY_MII_CTRLr_BANK, PHY_MII_CTRLr_ADDR, &mii_ctrl); ++ ++ return(SOC_E_NONE); ++} ++ ++ ++#ifdef BCMINTERNAL ++/* ++ * Function: ++ * phy5461_ge_speed_set ++ * Purpose: ++ * Set the current operating speed (forced). ++ * Parameters: ++ * unit - StrataSwitch unit #. ++ * port - StrataSwitch port #. ++ * duplex - (OUT) Boolean, true indicates full duplex, false ++ * indicates half. ++ * Returns: ++ * SOC_E_XXX ++ * Notes: ++ * No synchronization performed at this level. Autonegotiation is ++ * not manipulated. ++ */ ++int ++phy5461_ge_speed_set(uint eth_num, uint phyaddr, int speed) ++{ ++ uint16 mii_ctrl; ++ ++ if (speed == 0) { ++ return SOC_E_NONE; ++ } ++ ++ phy5461_rd_reg(eth_num, phyaddr, PHY_MII_CTRLr_FLAGS, PHY_MII_CTRLr_BANK, PHY_MII_CTRLr_ADDR, &mii_ctrl); ++ ++ mii_ctrl &= ~(MII_CTRL_SS_LSB | MII_CTRL_SS_MSB); ++ switch(speed) { ++ case 10: ++ mii_ctrl |= MII_CTRL_SS_10; ++ break; ++ case 100: ++ mii_ctrl |= MII_CTRL_SS_100; ++ break; ++ case 1000: ++ mii_ctrl |= MII_CTRL_SS_1000; ++ break; ++ default: ++ return SOC_E_CONFIG; ++ } ++ ++ phy5461_wr_reg(eth_num, phyaddr, PHY_MII_CTRLr_FLAGS, PHY_MII_CTRLr_BANK, PHY_MII_CTRLr_ADDR, &mii_ctrl); ++ ++ return SOC_E_NONE; ++} ++#endif /* BCMINTERNAL */ ++ ++ ++void ++phy5461_reset_setup(uint eth_num, uint phyaddr) ++{ ++ uint16 tmp; ++ ++ NET_TRACE(("%s enter\n", __FUNCTION__)); ++ ++ phy5461_ge_init(eth_num, phyaddr); ++ ++ /* copper regs */ ++ /* remove power down */ ++ phy5461_mod_reg(eth_num, phyaddr, PHY_MII_CTRLr_FLAGS, PHY_MII_CTRLr_BANK, PHY_MII_CTRLr_ADDR, 0, MII_CTRL_PD); ++ /* Disable super-isolate */ ++ phy5461_mod_reg(eth_num, phyaddr, PHY_MII_POWER_CTRLr_FLAGS, PHY_MII_POWER_CTRLr_BANK, PHY_MII_POWER_CTRLr_ADDR, 0, 1U<<5); ++ /* Enable extended packet length */ ++ phy5461_mod_reg(eth_num, phyaddr, PHY_MII_AUX_CTRLr_FLAGS, PHY_MII_AUX_CTRLr_BANK, PHY_MII_AUX_CTRLr_ADDR, 0x4000, 0x4000); ++ ++ /* Configure interface to MAC */ ++ phy5461_rd_reg(eth_num, phyaddr, PHY_1000X_MII_CTRLr_FLAGS, PHY_1000X_MII_CTRLr_BANK, PHY_1000X_MII_CTRLr_ADDR, &tmp); ++ /* phy5461_ge_init has reset the phy, powering down the unstrapped interface */ ++ /* make sure enabled interfaces are powered up */ ++ /* SGMII (passthrough fiber) or GMII fiber regs */ ++ tmp &= ~MII_CTRL_PD; /* remove power down */ ++ /* ++ * Enable SGMII autonegotiation on the switch side so that the ++ * link status changes are reflected in the switch. ++ * On Bradley devices, LAG failover feature depends on the SerDes ++ * link staus to activate failover recovery. ++ */ ++ tmp |= MII_CTRL_AE; ++ phy5461_wr_reg(eth_num, phyaddr, PHY_1000X_MII_CTRLr_FLAGS, PHY_1000X_MII_CTRLr_BANK, PHY_1000X_MII_CTRLr_ADDR, &tmp); ++ ++ return; ++} ++ ++ ++/* ++ * Function: ++ * phy5461_init ++ * Purpose: ++ * Initialize xgxs6 phys ++ * Parameters: ++ * eth_num - ethernet data ++ * phyaddr - physical address ++ * Returns: ++ * 0 ++ */ ++int ++phy5461_init(uint eth_num, uint phyaddr) ++{ ++ uint16 phyid0, phyid1; ++ ++ NET_TRACE(("et%d: %s: phyaddr %d\n", eth_num, __FUNCTION__, phyaddr)); ++ ++ phy5461_rd_reg(eth_num, phyaddr, PHY_MII_PHY_ID0r_FLAGS, PHY_MII_PHY_ID0r_BANK, PHY_MII_PHY_ID0r_ADDR, &phyid0); ++ phy5461_rd_reg(eth_num, phyaddr, PHY_MII_PHY_ID1r_FLAGS, PHY_MII_PHY_ID1r_BANK, PHY_MII_PHY_ID1r_ADDR, &phyid1); ++ ++ printf("%s Phy ChipID: 0x%04x:0x%04x\n", __FUNCTION__, phyid1, phyid0); ++ ++ phy5461_reset_setup(eth_num, phyaddr); ++ ++ return 0; ++} ++ ++ ++/* ++ * Function: ++ * phy5461_link_get ++ * Purpose: ++ * Determine the current link up/down status ++ * Parameters: ++ * unit - StrataSwitch unit #. ++ * port - StrataSwitch port #. ++ * link - (OUT) Boolean, true indicates link established. ++ * Returns: ++ * SOC_E_XXX ++ * Notes: ++ * No synchronization performed at this level. ++ */ ++int ++phy5461_link_get(uint eth_num, uint phyaddr, int *link) ++{ ++ uint16 mii_ctrl, mii_stat; ++ uint32 wait; ++ ++ *link = FALSE; /* Default */ ++ ++ phy5461_rd_reg(eth_num, phyaddr, PHY_MII_STATr_FLAGS, PHY_MII_STATr_BANK, PHY_MII_STATr_ADDR, &mii_stat); ++ /* the first read of status register will not show link up, second read will show link up */ ++ if (!(mii_stat & MII_STAT_LA) ) { ++ phy5461_rd_reg(eth_num, phyaddr, PHY_MII_STATr_FLAGS, PHY_MII_STATr_BANK, PHY_MII_STATr_ADDR, &mii_stat); ++ } ++ ++ if (!(mii_stat & MII_STAT_LA) || (mii_stat == 0xffff)) { ++ /* mii_stat == 0xffff check is to handle removable PHY daughter cards */ ++ return SOC_E_NONE; ++ } ++ ++ /* Link appears to be up; we are done if autoneg is off. */ ++ ++ phy5461_rd_reg(eth_num, phyaddr, PHY_MII_CTRLr_FLAGS, PHY_MII_CTRLr_BANK, PHY_MII_CTRLr_ADDR, &mii_ctrl); ++ ++ if (!(mii_ctrl & MII_CTRL_AE)) { ++ *link = TRUE; ++ return SOC_E_NONE; ++ } ++ ++ /* ++ * If link appears to be up but autonegotiation is still in ++ * progress, wait for it to complete. For BCM5228, autoneg can ++ * still be busy up to about 200 usec after link is indicated. Also ++ * continue to check link state in case it goes back down. ++ */ ++ for (wait=0; wait<50000; wait++) { ++ ++ phy5461_rd_reg(eth_num, phyaddr, PHY_MII_STATr_FLAGS, PHY_MII_STATr_BANK, PHY_MII_STATr_ADDR, &mii_stat); ++ ++ if (!(mii_stat & MII_STAT_LA)) { ++ /* link is down */ ++ return SOC_E_NONE; ++ } ++ ++ if (mii_stat & MII_STAT_AN_DONE) { ++ /* AutoNegotiation done */ ++ break; ++ } ++ ++ OSL_DELAY(10); ++ } ++ if (wait>=50000) { ++ /* timeout */ ++ return SOC_E_BUSY; ++ } ++ ++ /* Return link state at end of polling */ ++ *link = ((mii_stat & MII_STAT_LA) != 0); ++ ++ return SOC_E_NONE; ++} ++ ++ ++/* ++ * Function: ++ * phy5461_enable_set ++ * Purpose: ++ * Enable/Disable phy ++ * Parameters: ++ * eth_num - ethernet data ++ * phyaddr - physical address ++ * enable - on/off state to set ++ * Returns: ++ * 0 ++ */ ++int ++phy5461_enable_set(uint eth_num, uint phyaddr, int enable) ++{ ++ uint16 power_down; ++ ++ NET_TRACE(("et%d: %s: phyaddr %d\n", eth_num, __FUNCTION__, phyaddr)); ++ ++ power_down = (enable) ? 0 : MII_CTRL_PD; ++ ++ phy5461_mod_reg(eth_num, phyaddr, PHY_MII_CTRLr_FLAGS, PHY_MII_CTRLr_BANK, PHY_MII_CTRLr_ADDR, power_down, MII_CTRL_PD); ++ ++ return SOC_E_NONE; ++} ++ ++ ++#ifdef BCMINTERNAL ++/* ++ * Function: ++ * phy5461_speed_set ++ * Purpose: ++ * Set PHY speed ++ * Parameters: ++ * eth_num - ethernet data ++ * phyaddr - physical address ++ * speed - link speed in Mbps ++ * Returns: ++ * 0 ++ */ ++int ++phy5461_speed_set(uint eth_num, uint phyaddr, int speed) ++{ ++ NET_TRACE(("et%d: %s: phyaddr %d\n", eth_num, __FUNCTION__, phyaddr)); ++ ++ phy5461_ge_speed_set(eth_num, phyaddr, speed); ++ ++ return 0; ++} ++#endif /* BCMINTERNAL */ ++ ++ ++/* ++ * Function: ++ * phy5461_auto_negotiate_gcd (greatest common denominator). ++ * Purpose: ++ * Determine the current greatest common denominator between ++ * two ends of a link ++ * Parameters: ++ * unit - StrataSwitch unit #. ++ * port - StrataSwitch port #. ++ * speed - (OUT) greatest common speed. ++ * duplex - (OUT) greatest common duplex. ++ * link - (OUT) Boolean, true indicates link established. ++ * Returns: ++ * SOC_E_XXX ++ * Notes: ++ * No synchronization performed at this level. ++ */ ++static int ++phy5461_auto_negotiate_gcd(uint eth_num, uint phyaddr, int *speed, int *duplex) ++{ ++ int t_speed, t_duplex; ++ uint16 mii_ana, mii_anp, mii_stat; ++ uint16 mii_gb_stat, mii_esr, mii_gb_ctrl; ++ ++ mii_gb_stat = 0; /* Start off 0 */ ++ mii_gb_ctrl = 0; /* Start off 0 */ ++ ++ phy5461_rd_reg(eth_num, phyaddr, PHY_MII_ANAr_FLAGS, PHY_MII_ANAr_BANK, PHY_MII_ANAr_ADDR, &mii_ana); ++ phy5461_rd_reg(eth_num, phyaddr, PHY_MII_ANPr_FLAGS, PHY_MII_ANPr_BANK, PHY_MII_ANPr_ADDR, &mii_anp); ++ phy5461_rd_reg(eth_num, phyaddr, PHY_MII_STATr_FLAGS, PHY_MII_STATr_BANK, PHY_MII_STATr_ADDR, &mii_stat); ++ ++ if (mii_stat & MII_STAT_ES) { /* Supports extended status */ ++ /* ++ * If the PHY supports extended status, check if it is 1000MB ++ * capable. If it is, check the 1000Base status register to see ++ * if 1000MB negotiated. ++ */ ++ phy5461_rd_reg(eth_num, phyaddr, PHY_MII_ESRr_FLAGS, PHY_MII_ESRr_BANK, PHY_MII_ESRr_ADDR, &mii_esr); ++ ++ if (mii_esr & (MII_ESR_1000_X_FD | MII_ESR_1000_X_HD | ++ MII_ESR_1000_T_FD | MII_ESR_1000_T_HD)) { ++ phy5461_rd_reg(eth_num, phyaddr, PHY_MII_GB_STATr_FLAGS, PHY_MII_GB_STATr_BANK, PHY_MII_GB_STATr_ADDR, &mii_gb_stat); ++ phy5461_rd_reg(eth_num, phyaddr, PHY_MII_GB_CTRLr_FLAGS, PHY_MII_GB_CTRLr_BANK, PHY_MII_GB_CTRLr_ADDR, &mii_gb_ctrl); ++ } ++ } ++ ++ /* ++ * At this point, if we did not see Gig status, one of mii_gb_stat or ++ * mii_gb_ctrl will be 0. This will cause the first 2 cases below to ++ * fail and fall into the default 10/100 cases. ++ */ ++ ++ mii_ana &= mii_anp; ++ ++ if ((mii_gb_ctrl & MII_GB_CTRL_ADV_1000FD) && ++ (mii_gb_stat & MII_GB_STAT_LP_1000FD)) { ++ t_speed = 1000; ++ t_duplex = 1; ++ } else if ((mii_gb_ctrl & MII_GB_CTRL_ADV_1000HD) && ++ (mii_gb_stat & MII_GB_STAT_LP_1000HD)) { ++ t_speed = 1000; ++ t_duplex = 0; ++ } else if (mii_ana & MII_ANA_FD_100) { /* [a] */ ++ t_speed = 100; ++ t_duplex = 1; ++ } else if (mii_ana & MII_ANA_T4) { /* [b] */ ++ t_speed = 100; ++ t_duplex = 0; ++ } else if (mii_ana & MII_ANA_HD_100) { /* [c] */ ++ t_speed = 100; ++ t_duplex = 0; ++ } else if (mii_ana & MII_ANA_FD_10) { /* [d] */ ++ t_speed = 10; ++ t_duplex = 1 ; ++ } else if (mii_ana & MII_ANA_HD_10) { /* [e] */ ++ t_speed = 10; ++ t_duplex = 0; ++ } else { ++ return(SOC_E_FAIL); ++ } ++ ++ if (speed) *speed = t_speed; ++ if (duplex) *duplex = t_duplex; ++ ++ return(SOC_E_NONE); ++} ++ ++ ++/* ++ * Function: ++ * phy5461_speed_get ++ * Purpose: ++ * Get PHY speed ++ * Parameters: ++ * eth_num - ethernet data ++ * phyaddr - physical address ++ * speed - current link speed in Mbps ++ * Returns: ++ * 0 ++ */ ++int ++phy5461_speed_get(uint eth_num, uint phyaddr, int *speed, int *duplex) ++{ ++ int rv; ++ uint16 mii_ctrl, mii_stat; ++ ++ NET_TRACE(("et%d: %s: phyaddr %d\n", eth_num, __FUNCTION__, phyaddr)); ++ ++ phy5461_rd_reg(eth_num, phyaddr, PHY_MII_CTRLr_FLAGS, PHY_MII_CTRLr_BANK, PHY_MII_CTRLr_ADDR, &mii_ctrl); ++ phy5461_rd_reg(eth_num, phyaddr, PHY_MII_STATr_FLAGS, PHY_MII_STATr_BANK, PHY_MII_STATr_ADDR, &mii_stat); ++ ++ *speed = 0; ++ *duplex = 0; ++ if (mii_ctrl & MII_CTRL_AE) { /* Auto-negotiation enabled */ ++ if (!(mii_stat & MII_STAT_AN_DONE)) { /* Auto-neg NOT complete */ ++ rv = SOC_E_NONE; ++ } else { ++ rv = phy5461_auto_negotiate_gcd(eth_num, phyaddr, speed, duplex); ++ } ++ } else { /* Auto-negotiation disabled */ ++ /* ++ * Simply pick up the values we force in CTRL register. ++ */ ++ if (mii_ctrl & MII_CTRL_FD) ++ *duplex = 1; ++ ++ switch(MII_CTRL_SS(mii_ctrl)) { ++ case MII_CTRL_SS_10: ++ *speed = 10; ++ break; ++ case MII_CTRL_SS_100: ++ *speed = 100; ++ break; ++ case MII_CTRL_SS_1000: ++ *speed = 1000; ++ break; ++ default: /* Just pass error back */ ++ return(SOC_E_UNAVAIL); ++ } ++ rv = SOC_E_NONE; ++ } ++ ++ return(rv); ++} ++ ++ ++#ifdef BCMINTERNAL ++int ++phy5461_lb_set(uint eth_num, uint phyaddr, int enable) ++{ ++ uint16 mii_ctrl; ++ ++ /* set reset flag */ ++ phy5461_rd_reg(eth_num, phyaddr, PHY_MII_CTRLr_FLAGS, PHY_MII_CTRLr_BANK, PHY_MII_CTRLr_ADDR, &mii_ctrl); ++ mii_ctrl &= ~MII_CTRL_LE; ++ mii_ctrl |= enable ? MII_CTRL_LE : 0; ++ phy5461_rd_reg(eth_num, phyaddr, PHY_MII_CTRLr_FLAGS, PHY_MII_CTRLr_BANK, PHY_MII_CTRLr_ADDR, &mii_ctrl); ++ ++ return 0; ++} ++ ++ ++void ++phy5461_disp_status(uint eth_num, uint phyaddr) ++{ ++ uint16 tmp0, tmp1, tmp2; ++ int speed, duplex; ++ ++ printf("et%d: %s: phyaddr:%d\n", eth_num, __FUNCTION__, phyaddr); ++ ++ phy5461_rd_reg(eth_num, phyaddr, PHY_MII_CTRLr_FLAGS, PHY_MII_CTRLr_BANK, PHY_MII_CTRLr_ADDR, &tmp0); ++ phy5461_rd_reg(eth_num, phyaddr, PHY_MII_STATr_FLAGS, PHY_MII_STATr_BANK, PHY_MII_STATr_ADDR, &tmp1); ++ printf(" MII-Control: 0x%x; MII-Status: 0x%x\n", tmp0, tmp1); ++ ++ phy5461_rd_reg(eth_num, phyaddr, PHY_MII_PHY_ID0r_FLAGS, PHY_MII_PHY_ID0r_BANK, PHY_MII_PHY_ID0r_ADDR, &tmp0); ++ phy5461_rd_reg(eth_num, phyaddr, PHY_MII_PHY_ID1r_FLAGS, PHY_MII_PHY_ID1r_BANK, PHY_MII_PHY_ID1r_ADDR, &tmp1); ++ printf(" Phy ChipID: 0x%04x:0x%04x\n", tmp0, tmp1); ++ ++ phy5461_rd_reg(eth_num, phyaddr, PHY_MII_ANAr_FLAGS, PHY_MII_ANAr_BANK, PHY_MII_ANAr_ADDR, &tmp0); ++ phy5461_rd_reg(eth_num, phyaddr, PHY_MII_ANPr_FLAGS, PHY_MII_ANPr_BANK, PHY_MII_ANPr_ADDR, &tmp1); ++ phy5461_speed_get(eth_num, phyaddr, &speed, &duplex); ++ printf(" AutoNeg Ad: 0x%x; AutoNeg Partner: 0x%x; speed:%d; duplex:%d\n", tmp0, tmp1, speed, duplex); ++ ++ phy5461_rd_reg(eth_num, phyaddr, PHY_MII_GB_CTRLr_FLAGS, PHY_MII_GB_CTRLr_BANK, PHY_MII_GB_CTRLr_ADDR, &tmp0); ++ phy5461_rd_reg(eth_num, phyaddr, PHY_MII_GB_STATr_FLAGS, PHY_MII_GB_STATr_BANK, PHY_MII_GB_STATr_ADDR, &tmp1); ++ printf(" MII GB ctrl: 0x%x; MII GB stat: 0x%x\n", tmp0, tmp1); ++ ++ phy5461_rd_reg(eth_num, phyaddr, PHY_MII_ESRr_FLAGS, PHY_MII_ESRr_BANK, PHY_MII_ESRr_ADDR, &tmp0); ++ phy5461_rd_reg(eth_num, phyaddr, PHY_MII_ECRr_FLAGS, PHY_MII_ECRr_BANK, PHY_MII_ECRr_ADDR, &tmp1); ++ phy5461_rd_reg(eth_num, phyaddr, 0x00, 0x0000, 0x11, &tmp2); ++ printf(" IEEE Ext stat: 0x%x; PHY Ext ctrl: 0x%x; PHY Ext stat: 0x%x\n", tmp0, tmp1, tmp2); ++ ++ phy5461_rd_reg(eth_num, phyaddr, PHY_MODE_CTRLr_FLAGS, PHY_MODE_CTRLr_BANK, PHY_MODE_CTRLr_ADDR, &tmp0); ++ printf(" Mode Control (Addr 1c shadow 1f): 0x%x\n", tmp0); ++ ++ phy5461_rd_reg(eth_num, phyaddr, PHY_1000X_MII_CTRLr_FLAGS, PHY_1000X_MII_CTRLr_BANK, PHY_1000X_MII_CTRLr_ADDR, &tmp0); ++ phy5461_rd_reg(eth_num, phyaddr, PHY_1000X_MII_CTRLr_FLAGS, PHY_1000X_MII_CTRLr_BANK, 0x01, &tmp1); ++ printf(" 1000-x MII ctrl: 0x%x; 1000-x MII stat: 0x%x\n", tmp0, tmp1); ++ ++ phy5461_rd_reg(eth_num, phyaddr, PHY_1000X_MII_CTRLr_FLAGS, PHY_1000X_MII_CTRLr_BANK, 0x04, &tmp0); ++ phy5461_rd_reg(eth_num, phyaddr, PHY_1000X_MII_CTRLr_FLAGS, PHY_1000X_MII_CTRLr_BANK, 0x05, &tmp1); ++ printf(" 1000-x AutoNeg Ad: 0x%x; 1000-x AutoNeg Partner: 0x%x\n", tmp0, tmp1); ++ ++} ++#endif /* BCMINTERNAL */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/shared/bcmiproc_phy5481.c b/drivers/net/ethernet/broadcom/gmac/src/shared/bcmiproc_phy5481.c +--- a/drivers/net/ethernet/broadcom/gmac/src/shared/bcmiproc_phy5481.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/shared/bcmiproc_phy5481.c 2017-11-09 17:53:44.028289000 +0800 +@@ -0,0 +1,728 @@ ++/* ++ * $Copyright Open Broadcom Corporation$ ++ * ++ * These routines provide access to the external phy ++ * ++ */ ++ ++/* ---- Include Files ---------------------------------------------------- */ ++#include ++#include ++#include "../../../mdio/iproc_mdio.h" ++#include "bcmiproc_phy.h" ++#include "bcmiproc_phy5481.h" ++ ++/* ---- External Variable Declarations ----------------------------------- */ ++/* ---- External Function Prototypes ------------------------------------- */ ++/* ---- Public Variables ------------------------------------------------- */ ++/* ---- Private Constants and Types -------------------------------------- */ ++/* ---- Private Variables ------------------------------------------------ */ ++ ++/* debug/trace */ ++//#define BCMDBG ++//#define BCMDBG_ERR ++#ifdef BCMDBG ++#define NET_ERROR(args) printf args ++#define NET_TRACE(args) printf args ++#elif defined(BCMDBG_ERR) ++#define NET_ERROR(args) printf args ++#define NET_TRACE(args) ++#else ++#define NET_ERROR(args) ++#define NET_TRACE(args) ++#endif /* BCMDBG */ ++#define NET_REG_TRACE(args) ++ ++ ++#ifndef ASSERT ++#define ASSERT(exp) ++#endif ++ ++ ++/* ==== Public Functions ================================================= */ ++ ++int ++phy5481_wr_reg(uint eth_num, uint phyaddr, uint16 reg_bank, ++ uint8 reg_addr, uint16 *data) ++{ ++ int rv = SOC_E_NONE; ++ uint16 wr_data=*data; ++ ++ NET_TRACE(("%s enter\n", __FUNCTION__)); ++ ++ NET_REG_TRACE(("%s going to write phyaddr(0x%x) reg_bank(0x%x) reg_addr(0x%x) data(0x%x)\n", ++ __FUNCTION__, phyaddr, reg_bank, reg_addr, wr_data)); ++ ++ switch(reg_addr) { ++ /* Map shadow registers */ ++ case 0x18: ++ if (reg_bank <= 0x0007) { ++ if (reg_bank == 0x0007) { ++ wr_data |= 0x8000; ++ } ++ wr_data = (wr_data & ~(0x0007)) | reg_bank; ++ } else { ++ rv = SOC_E_PARAM; ++ } ++ break; ++ case 0x1C: ++ if (reg_bank <= 0x000F) { ++ wr_data = 0x8000 | (reg_bank << 10) | (wr_data & 0x03FF); ++ } else { ++ rv = SOC_E_PARAM; ++ } ++ break; ++ default: ++ if (reg_addr > 0x001e) { ++ rv = SOC_E_PARAM; ++ } ++ break; ++ } ++ ++ if (SOC_SUCCESS(rv)) { ++ iproc_mii_write(MII_DEV_EXT, phyaddr, reg_addr, wr_data); ++ } ++ ++ return rv; ++} ++ ++ ++int ++phy5481_rd_reg(uint eth_num, uint phyaddr, uint16 reg_bank, ++ uint8 reg_addr, uint16 *data) ++{ ++ int rv = SOC_E_NONE; ++ ++ NET_TRACE(("%s enter\n", __FUNCTION__)); ++ ++ NET_REG_TRACE(("%s going to read phyaddr(0x%x) reg_bank(0x%x) reg_addr(0x%x)\n", ++ __FUNCTION__, phyaddr, reg_bank, reg_addr)); ++ ++ switch(reg_addr) { ++ /* Map shadow registers */ ++ case 0x18: ++ if (reg_bank <= 0x0007) { ++ iproc_mii_write(MII_DEV_EXT, phyaddr, reg_addr, (reg_bank << 12) | 0x7); ++ } else { ++ rv = SOC_E_PARAM; ++ } ++ break; ++ case 0x1C: ++ if (reg_bank <= 0x00F) { ++ iproc_mii_write(MII_DEV_EXT, phyaddr, reg_addr, (reg_bank << 10)); ++ } else { ++ rv = SOC_E_PARAM; ++ } ++ break; ++ default: ++ if (reg_addr > 0x001e) { ++ rv = SOC_E_PARAM; ++ } ++ break; ++ } ++ ++ if (SOC_SUCCESS(rv)) { ++ iproc_mii_read(MII_DEV_EXT, phyaddr, reg_addr, data); ++ NET_REG_TRACE(("%s rd phyaddr(0x%x) reg_bank(0x%x) reg_addr(0x%x) data(0x%x)\n", ++ __FUNCTION__, phyaddr, reg_bank, reg_addr, *data)); ++ } ++ ++ return rv; ++} ++ ++ ++int ++phy5481_mod_reg(uint eth_num, uint phyaddr, uint16 reg_bank, ++ uint8 reg_addr, uint16 data, uint16 mask) ++{ ++ int rv = SOC_E_NONE; ++ uint16 org_data, rd_data; ++ ++ NET_TRACE(("%s enter\n", __FUNCTION__)); ++ ++ NET_REG_TRACE(("%s going to modify phyaddr(0x%x) reg_bank(0x%x) reg_addr(0x%x) data(0x%x) mask(0x%x)\n", ++ __FUNCTION__, phyaddr, reg_bank, reg_addr, data, mask)); ++ ++ switch(reg_addr) { ++ /* Map shadow registers */ ++ case 0x18: ++ if (reg_bank <= 0x0007) { ++ iproc_mii_write(MII_DEV_EXT, phyaddr, reg_addr, (reg_bank << 12) | 0x7); ++ if (reg_bank == 0x0007) { ++ data |= 0x8000; ++ mask |= 0x8000; ++ } ++ mask &= ~(0x0007); ++ } else { ++ rv = SOC_E_PARAM; ++ } ++ break; ++ case 0x1C: ++ if (reg_bank <= 0x001F) { ++ iproc_mii_write(MII_DEV_EXT, phyaddr, reg_addr, (reg_bank << 10)); ++ data |= 0x8000; ++ mask |= 0x8000; ++ mask &= ~(0x1F << 10); ++ } else { ++ rv = SOC_E_PARAM; ++ } ++ break; ++ default: ++ if (reg_addr > 0x001e) { ++ rv = SOC_E_PARAM; ++ } ++ break; ++ } ++ ++ if (SOC_SUCCESS(rv)) { ++ iproc_mii_read(MII_DEV_EXT, phyaddr, reg_addr, &rd_data); ++ NET_REG_TRACE(("%s rd phyaddr(0x%x) reg_bank(0x%x) reg_addr(0x%x) data(0x%x)\n", ++ __FUNCTION__, phyaddr, reg_bank, reg_addr, rd_data)); ++ org_data = rd_data; ++ rd_data &= ~(mask); ++ rd_data |= data; ++ iproc_mii_write(MII_DEV_EXT, phyaddr, reg_addr, rd_data); ++ NET_REG_TRACE(("%s wrt phyaddr(0x%x) reg_bank(0x%x) reg_addr(0x%x) data(0x%x)\n", ++ __FUNCTION__, phyaddr, reg_bank, reg_addr, rd_data)); ++ } ++ ++ return rv; ++} ++ ++void ++phy5481_ge_reset(uint eth_num, uint phyaddr) ++{ ++ uint16 ctrl; ++ ++ NET_TRACE(("et%d: %s: phyaddr %d\n", eth_num, __FUNCTION__, phyaddr)); ++ ++ /* set reset flag */ ++ phy5481_rd_reg(eth_num, phyaddr, PHY_MII_CTRLr_BANK, PHY_MII_CTRLr_ADDR, &ctrl); ++ ctrl |= MII_CTRL_RESET; ++ phy5481_wr_reg(eth_num, phyaddr, PHY_MII_CTRLr_BANK, PHY_MII_CTRLr_ADDR, &ctrl); ++ ++ SPINWAIT( (!phy5481_rd_reg(eth_num, phyaddr, PHY_MII_CTRLr_BANK, PHY_MII_CTRLr_ADDR, &ctrl) ++ && (ctrl & MII_CTRL_RESET)), 100000); ++ /* check if out of reset */ ++ phy5481_rd_reg(eth_num, phyaddr, PHY_MII_CTRLr_BANK, PHY_MII_CTRLr_ADDR, &ctrl); ++ if (ctrl & MII_CTRL_RESET) { ++ /* timeout */ ++ NET_ERROR(("et%d: %s reset not complete\n", eth_num, __FUNCTION__)); ++ } else { ++ NET_ERROR(("et%d: %s reset complete\n", eth_num, __FUNCTION__)); ++ } ++ ++ return; ++} ++ ++ ++/* ++ * Function: ++ * phy5481_ge_init ++ * Purpose: ++ * Initialize the PHY (MII mode) to a known good state. ++ * Parameters: ++ * unit - StrataSwitch unit #. ++ * port - StrataSwitch port #. ++ * Returns: ++ * SOC_E_XXX ++ ++ * Notes: ++ * No synchronization performed at this level. ++ */ ++int ++phy5481_ge_init(uint eth_num, uint phyaddr) ++{ ++ uint16 mii_ana, mii_ctrl, mii_gb_ctrl; ++ ++ /* Reset PHY */ ++ phy5481_ge_reset(eth_num, phyaddr); ++ ++ /* set advertized bits */ ++ phy5481_rd_reg(eth_num, phyaddr, PHY_MII_ANAr_BANK, PHY_MII_ANAr_ADDR, &mii_ana); ++ mii_ana |= MII_ANA_FD_100 | MII_ANA_FD_10; ++ mii_ana |= MII_ANA_HD_100 | MII_ANA_HD_10; ++ phy5481_wr_reg(eth_num, phyaddr, PHY_MII_ANAr_BANK, PHY_MII_ANAr_ADDR, &mii_ana); ++ ++ ++ mii_ctrl = MII_CTRL_FD | MII_CTRL_SS_1000 | MII_CTRL_AE | MII_CTRL_RAN; ++ mii_gb_ctrl = MII_GB_CTRL_ADV_1000FD | MII_GB_CTRL_PT; ++ ++ phy5481_wr_reg(eth_num, phyaddr, PHY_MII_GB_CTRLr_BANK, PHY_MII_GB_CTRLr_ADDR, &mii_gb_ctrl); ++ phy5481_wr_reg(eth_num, phyaddr, PHY_MII_CTRLr_BANK, PHY_MII_CTRLr_ADDR, &mii_ctrl); ++ ++ ++ return SOC_E_NONE; ++} ++ ++ ++#ifdef BCMINTERNAL ++/* ++ * Function: ++ * phy5481_ge_speed_set ++ * Purpose: ++ * Set the current operating speed (forced). ++ * Parameters: ++ * unit - StrataSwitch unit #. ++ * port - StrataSwitch port #. ++ * duplex - (OUT) Boolean, true indicates full duplex, false ++ * indicates half. ++ * Returns: ++ * SOC_E_XXX ++ * Notes: ++ * No synchronization performed at this level. Autonegotiation is ++ * not manipulated. ++ */ ++int ++phy5481_ge_speed_set(uint eth_num, uint phyaddr, int speed) ++{ ++ uint16 mii_ctrl; ++ ++ if (speed == 0) { ++ return SOC_E_NONE; ++ } ++ ++ phy5481_rd_reg(eth_num, phyaddr, PHY_MII_CTRLr_BANK, PHY_MII_CTRLr_ADDR, &mii_ctrl); ++ ++ mii_ctrl &= ~(MII_CTRL_SS_LSB | MII_CTRL_SS_MSB); ++ switch(speed) { ++ case 10: ++ mii_ctrl |= MII_CTRL_SS_10; ++ break; ++ case 100: ++ mii_ctrl |= MII_CTRL_SS_100; ++ break; ++ case 1000: ++ mii_ctrl |= MII_CTRL_SS_1000; ++ break; ++ default: ++ return SOC_E_CONFIG; ++ } ++ ++ phy5481_wr_reg(eth_num, phyaddr, PHY_MII_CTRLr_BANK, PHY_MII_CTRLr_ADDR, &mii_ctrl); ++ ++ return SOC_E_NONE; ++} ++#endif /* BCMINTERNAL */ ++ ++void ++phy5481_reset_setup(uint eth_num, uint phyaddr) ++{ ++ ++ NET_TRACE(("%s enter\n", __FUNCTION__)); ++ ++ phy5481_ge_init(eth_num, phyaddr); ++ ++ /* copper regs */ ++ /* remove power down */ ++ phy5481_mod_reg(eth_num, phyaddr, PHY_MII_CTRLr_BANK, PHY_MII_CTRLr_ADDR, 0, MII_CTRL_PD); ++ /* Disable super-isolate */ ++ phy5481_mod_reg(eth_num, phyaddr, PHY_MII_POWER_CTRLr_BANK, PHY_MII_POWER_CTRLr_ADDR, 0, PHY5481_SUPER_ISOLATE_MODE); ++ /* Enable extended packet length */ ++ phy5481_mod_reg(eth_num, phyaddr, PHY_MII_AUX_CTRLr_BANK, PHY_MII_AUX_CTRLr_ADDR, 0x4000, 0x4000); ++ ++ return; ++} ++ ++ ++/* ++ * Function: ++ * phy5481_init ++ * Purpose: ++ * Initialize xgxs6 phys ++ * Parameters: ++ * eth_num - ethernet data ++ * phyaddr - physical address ++ * Returns: ++ * 0 ++ */ ++int ++phy5481_init(uint eth_num, uint phyaddr) ++{ ++ uint16 phyid0, phyid1; ++ ++ NET_TRACE(("et%d: %s: phyaddr %d\n", eth_num, __FUNCTION__, phyaddr)); ++ ++ phy5481_rd_reg(eth_num, phyaddr, PHY_MII_PHY_ID0r_BANK, PHY_MII_PHY_ID0r_ADDR, &phyid0); ++ phy5481_rd_reg(eth_num, phyaddr, PHY_MII_PHY_ID1r_BANK, PHY_MII_PHY_ID1r_ADDR, &phyid1); ++ ++ printf("%s phyaddr(0x%x) Phy ChipID: 0x%04x:0x%04x\n", __FUNCTION__, phyaddr, phyid1, phyid0); ++ ++ phy5481_reset_setup(eth_num, phyaddr); ++ ++ return 0; ++} ++ ++ ++/* ++ * Function: ++ * phy5481_link_get ++ * Purpose: ++ * Determine the current link up/down status ++ * Parameters: ++ * unit - StrataSwitch unit #. ++ * port - StrataSwitch port #. ++ * link - (OUT) Boolean, true indicates link established. ++ * Returns: ++ * SOC_E_XXX ++ * Notes: ++ * No synchronization performed at this level. ++ */ ++int ++phy5481_link_get(uint eth_num, uint phyaddr, int *link) ++{ ++ uint16 mii_ctrl, mii_stat; ++ uint32 wait; ++ ++ *link = FALSE; /* Default */ ++ ++ phy5481_rd_reg(eth_num, phyaddr, PHY_MII_STATr_BANK, PHY_MII_STATr_ADDR, &mii_stat); ++ /* the first read of status register will not show link up, second read will show link up */ ++ if (!(mii_stat & MII_STAT_LA) ) { ++ phy5481_rd_reg(eth_num, phyaddr, PHY_MII_STATr_BANK, PHY_MII_STATr_ADDR, &mii_stat); ++ } ++ ++ if (!(mii_stat & MII_STAT_LA) || (mii_stat == 0xffff)) { ++ /* mii_stat == 0xffff check is to handle removable PHY daughter cards */ ++ return SOC_E_NONE; ++ } ++ ++ /* Link appears to be up; we are done if autoneg is off. */ ++ ++ phy5481_rd_reg(eth_num, phyaddr, PHY_MII_CTRLr_BANK, PHY_MII_CTRLr_ADDR, &mii_ctrl); ++ ++ if (!(mii_ctrl & MII_CTRL_AE)) { ++ *link = TRUE; ++ return SOC_E_NONE; ++ } ++ ++ /* ++ * If link appears to be up but autonegotiation is still in ++ * progress, wait for it to complete. For BCM5228, autoneg can ++ * still be busy up to about 200 usec after link is indicated. Also ++ * continue to check link state in case it goes back down. ++ * wait 500ms (500000us/10us = 50000 ) ++ */ ++ for (wait=0; wait<50000; wait++) { ++ ++ phy5481_rd_reg(eth_num, phyaddr, PHY_MII_STATr_BANK, PHY_MII_STATr_ADDR, &mii_stat); ++ ++ if (!(mii_stat & MII_STAT_LA)) { ++ /* link is down */ ++ return SOC_E_NONE; ++ } ++ ++ if (mii_stat & MII_STAT_AN_DONE) { ++ /* AutoNegotiation done */ ++ break; ++ } ++ ++ OSL_DELAY(10); ++ } ++ if (wait>=50000) { ++ /* timeout */ ++ return SOC_E_BUSY; ++ } ++ ++ /* Return link state at end of polling */ ++ *link = ((mii_stat & MII_STAT_LA) != 0); ++ ++ return SOC_E_NONE; ++} ++ ++/* ++ * Function: ++ * phy5481_enable_set ++ * Purpose: ++ * Enable/Disable phy ++ * Parameters: ++ * eth_num - ethernet data ++ * phyaddr - physical address ++ * enable - on/off state to set ++ * Returns: ++ * 0 ++ */ ++int ++phy5481_enable_set(uint eth_num, uint phyaddr, int enable) ++{ ++ uint16 power_down; ++ ++ NET_TRACE(("et%d: %s: phyaddr %d\n", eth_num, __FUNCTION__, phyaddr)); ++ ++ power_down = (enable) ? 0 : MII_CTRL_PD; ++ ++ phy5481_mod_reg(eth_num, phyaddr, PHY_MII_CTRLr_BANK, PHY_MII_CTRLr_ADDR, power_down, MII_CTRL_PD); ++ ++ return SOC_E_NONE; ++} ++ ++ ++#ifdef BCMINTERNAL ++ ++/* ++ * Function: ++ * phy5481_speed_set ++ * Purpose: ++ * Set PHY speed ++ * Parameters: ++ * eth_num - ethernet data ++ * phyaddr - physical address ++ * speed - link speed in Mbps ++ * Returns: ++ * 0 ++ */ ++int ++phy5481_speed_set(uint eth_num, uint phyaddr, int speed) ++{ ++ NET_TRACE(("et%d: %s: phyaddr %d\n", eth_num, __FUNCTION__, phyaddr)); ++ ++ phy5481_ge_speed_set(eth_num, phyaddr, speed); ++ ++ return 0; ++} ++#endif /* BCMINTERNAL */ ++ ++ ++/* ++ * Function: ++ * phy5481_auto_negotiate_gcd (greatest common denominator). ++ * Purpose: ++ * Determine the current greatest common denominator between ++ * two ends of a link ++ * Parameters: ++ * unit - StrataSwitch unit #. ++ * port - StrataSwitch port #. ++ * speed - (OUT) greatest common speed. ++ * duplex - (OUT) greatest common duplex. ++ * link - (OUT) Boolean, true indicates link established. ++ * Returns: ++ * SOC_E_XXX ++ * Notes: ++ * No synchronization performed at this level. ++ */ ++static int ++phy5481_auto_negotiate_gcd(uint eth_num, uint phyaddr, int *speed, int *duplex) ++{ ++ int t_speed, t_duplex; ++ uint16 mii_ana, mii_anp, mii_stat; ++ uint16 mii_gb_stat, mii_esr, mii_gb_ctrl; ++ ++ mii_gb_stat = 0; /* Start off 0 */ ++ mii_gb_ctrl = 0; /* Start off 0 */ ++ ++ phy5481_rd_reg(eth_num, phyaddr, PHY_MII_ANAr_BANK, PHY_MII_ANAr_ADDR, &mii_ana); ++ phy5481_rd_reg(eth_num, phyaddr, PHY_MII_ANPr_BANK, PHY_MII_ANPr_ADDR, &mii_anp); ++ phy5481_rd_reg(eth_num, phyaddr, PHY_MII_STATr_BANK, PHY_MII_STATr_ADDR, &mii_stat); ++ ++ if (mii_stat & MII_STAT_ES) { /* Supports extended status */ ++ /* ++ * If the PHY supports extended status, check if it is 1000MB ++ * capable. If it is, check the 1000Base status register to see ++ * if 1000MB negotiated. ++ */ ++ phy5481_rd_reg(eth_num, phyaddr, PHY_MII_ESRr_BANK, PHY_MII_ESRr_ADDR, &mii_esr); ++ ++ if (mii_esr & (MII_ESR_1000_X_FD | MII_ESR_1000_X_HD | ++ MII_ESR_1000_T_FD | MII_ESR_1000_T_HD)) { ++ phy5481_rd_reg(eth_num, phyaddr, PHY_MII_GB_STATr_BANK, PHY_MII_GB_STATr_ADDR, &mii_gb_stat); ++ phy5481_rd_reg(eth_num, phyaddr, PHY_MII_GB_CTRLr_BANK, PHY_MII_GB_CTRLr_ADDR, &mii_gb_ctrl); ++ } ++ } ++ ++ /* ++ * At this point, if we did not see Gig status, one of mii_gb_stat or ++ * mii_gb_ctrl will be 0. This will cause the first 2 cases below to ++ * fail and fall into the default 10/100 cases. ++ */ ++ ++ mii_ana &= mii_anp; ++ ++ if ((mii_gb_ctrl & MII_GB_CTRL_ADV_1000FD) && ++ (mii_gb_stat & MII_GB_STAT_LP_1000FD)) { ++ t_speed = 1000; ++ t_duplex = 1; ++ } else if ((mii_gb_ctrl & MII_GB_CTRL_ADV_1000HD) && ++ (mii_gb_stat & MII_GB_STAT_LP_1000HD)) { ++ t_speed = 1000; ++ t_duplex = 0; ++ } else if (mii_ana & MII_ANA_FD_100) { /* [a] */ ++ t_speed = 100; ++ t_duplex = 1; ++ } else if (mii_ana & MII_ANA_T4) { /* [b] */ ++ t_speed = 100; ++ t_duplex = 0; ++ } else if (mii_ana & MII_ANA_HD_100) { /* [c] */ ++ t_speed = 100; ++ t_duplex = 0; ++ } else if (mii_ana & MII_ANA_FD_10) { /* [d] */ ++ t_speed = 10; ++ t_duplex = 1 ; ++ } else if (mii_ana & MII_ANA_HD_10) { /* [e] */ ++ t_speed = 10; ++ t_duplex = 0; ++ } else { ++ return(SOC_E_FAIL); ++ } ++ ++ if (speed) *speed = t_speed; ++ if (duplex) *duplex = t_duplex; ++ ++ return(SOC_E_NONE); ++} ++ ++ ++/* ++ * Function: ++ * phy5481_speed_get ++ * Purpose: ++ * Get PHY speed ++ * Parameters: ++ * eth_num - ethernet data ++ * phyaddr - physical address ++ * speed - current link speed in Mbps ++ * Returns: ++ * 0 ++ */ ++int ++phy5481_speed_get(uint eth_num, uint phyaddr, int *speed, int *duplex) ++{ ++ int rv; ++ uint16 mii_ctrl, mii_stat; ++ ++ NET_TRACE(("et%d: %s: phyaddr %d\n", eth_num, __FUNCTION__, phyaddr)); ++ ++ phy5481_rd_reg(eth_num, phyaddr, PHY_MII_CTRLr_BANK, PHY_MII_CTRLr_ADDR, &mii_ctrl); ++ phy5481_rd_reg(eth_num, phyaddr, PHY_MII_STATr_BANK, PHY_MII_STATr_ADDR, &mii_stat); ++ ++ *speed = 0; ++ *duplex = 0; ++ if (mii_ctrl & MII_CTRL_AE) { /* Auto-negotiation enabled */ ++ if (!(mii_stat & MII_STAT_AN_DONE)) { /* Auto-neg NOT complete */ ++ rv = SOC_E_NONE; ++ } else { ++ rv = phy5481_auto_negotiate_gcd(eth_num, phyaddr, speed, duplex); ++ } ++ } else { /* Auto-negotiation disabled */ ++ /* ++ * Simply pick up the values we force in CTRL register. ++ */ ++ if (mii_ctrl & MII_CTRL_FD) ++ *duplex = 1; ++ ++ switch(MII_CTRL_SS(mii_ctrl)) { ++ case MII_CTRL_SS_10: ++ *speed = 10; ++ break; ++ case MII_CTRL_SS_100: ++ *speed = 100; ++ break; ++ case MII_CTRL_SS_1000: ++ *speed = 1000; ++ break; ++ default: /* Just pass error back */ ++ return(SOC_E_UNAVAIL); ++ } ++ rv = SOC_E_NONE; ++ } ++ ++ return(rv); ++} ++ ++ ++#ifdef BCMINTERNAL ++int ++phy5481_lb_set(uint eth_num, uint phyaddr, int enable) ++{ ++ uint16 mii_ctrl; ++ ++ /* set reset flag */ ++ phy5481_rd_reg(eth_num, phyaddr, PHY_MII_CTRLr_BANK, PHY_MII_CTRLr_ADDR, &mii_ctrl); ++ mii_ctrl &= ~MII_CTRL_LE; ++ mii_ctrl |= enable ? MII_CTRL_LE : 0; ++ phy5481_rd_reg(eth_num, phyaddr, PHY_MII_CTRLr_BANK, PHY_MII_CTRLr_ADDR, &mii_ctrl); ++ ++ return 0; ++} ++#endif /* BCMINTERNAL */ ++ ++ ++#ifdef BCMINTERNAL ++void ++phy5481_disp_status(uint eth_num, uint phyaddr) ++{ ++ uint16 tmp0, tmp1, tmp2; ++ int speed, duplex; ++ ++ printf("et%d: %s: phyaddr:%d\n", eth_num, __FUNCTION__, phyaddr); ++ ++ phy5481_rd_reg(eth_num, phyaddr, PHY_MII_CTRLr_BANK, PHY_MII_CTRLr_ADDR, &tmp0); ++ phy5481_rd_reg(eth_num, phyaddr, PHY_MII_STATr_BANK, PHY_MII_STATr_ADDR, &tmp1); ++ printf(" MII-Control: 0x%x; MII-Status: 0x%x\n", tmp0, tmp1); ++ ++ phy5481_rd_reg(eth_num, phyaddr, PHY_MII_PHY_ID0r_BANK, PHY_MII_PHY_ID0r_ADDR, &tmp0); ++ phy5481_rd_reg(eth_num, phyaddr, PHY_MII_PHY_ID1r_BANK, PHY_MII_PHY_ID1r_ADDR, &tmp1); ++ printf(" Phy ChipID: 0x%04x:0x%04x\n", tmp0, tmp1); ++ ++ phy5481_rd_reg(eth_num, phyaddr, PHY_MII_ANAr_BANK, PHY_MII_ANAr_ADDR, &tmp0); ++ phy5481_rd_reg(eth_num, phyaddr, PHY_MII_ANPr_BANK, PHY_MII_ANPr_ADDR, &tmp1); ++ phy5481_speed_get(eth_num, phyaddr, &speed, &duplex); ++ printf(" AutoNeg Ad: 0x%x; AutoNeg Partner: 0x%x; speed:%d; duplex:%d\n", tmp0, tmp1, speed, duplex); ++ ++ phy5481_rd_reg(eth_num, phyaddr, PHY_MII_ESRr_BANK, PHY_MII_ESRr_ADDR, &tmp0); ++ phy5481_rd_reg(eth_num, phyaddr, PHY_MII_ECRr_BANK, PHY_MII_ECRr_ADDR, &tmp1); ++ phy5481_rd_reg(eth_num, phyaddr, 0x0000, 0x11, &tmp2); ++ printf(" Reg0x0f: 0x%x; 100Base-X AUX ctrl: 0x%x; 100Base-X AUX stat: 0x%x\n", tmp0, tmp1, tmp2); ++ ++ phy5481_rd_reg(eth_num, phyaddr, 0x0000, 0x12, &tmp0); ++ phy5481_rd_reg(eth_num, phyaddr, 0x0000, 0x13, &tmp1); ++ phy5481_rd_reg(eth_num, phyaddr, 0x0000, 0x14, &tmp2); ++ printf(" 100Base-X RCV ERR: 0x%x; 100Base-X FALSE CARRIER: 0x%x; 100Base-X DISCON: 0x%x\n", tmp0, tmp1, tmp2); ++} ++#endif /* BCMINTERNAL */ ++ ++ ++#ifdef BCMINTERNAL ++void ++phy5481_chk_err(uint eth_num, uint phyaddr) ++{ ++ uint16 tmp0; ++ ++ phy5481_rd_reg(eth_num, phyaddr, PHY_MII_STATr_BANK, PHY_MII_STATr_ADDR, &tmp0); ++ if (!(tmp0 & MII_STAT_LA)) ++ printf("ERROR: reg 0x01 (LINK down): 0x%x\n", tmp0); ++ if (tmp0 & (MII_STAT_JBBR|MII_STAT_RF)) { ++ printf("ERROR: reg 0x01: 0x%x\n", tmp0); ++ } ++ ++ phy5481_rd_reg(eth_num, phyaddr, 0, 0x11, &tmp0); ++ if (!(tmp0 & 0x100)) { ++ printf("ERROR: reg 0x11 (LINK down): 0x%x\n", tmp0); ++ } ++ if (tmp0 & 0x8bf) { ++ printf("ERROR: reg 0x11: 0x%x\n", tmp0); ++ } ++ ++ phy5481_rd_reg(eth_num, phyaddr, 0, 0x12, &tmp0); ++ if (tmp0) { ++ printf("ERROR: reg 0x12 (RCV ERR CNT): 0x%x\n", tmp0); ++ } ++ ++ phy5481_rd_reg(eth_num, phyaddr, 0, 0x13, &tmp0); ++ if (tmp0) { ++ printf("ERROR: reg 0x13 (FALSE CARRIER CNT): 0x%x\n", tmp0); ++ } ++ ++ phy5481_rd_reg(eth_num, phyaddr, 0, 0x14, &tmp0); ++ if (tmp0 & 0xc000) { ++ printf("ERROR: reg 0x14: 0x%x\n", tmp0); ++ } ++ ++ phy5481_rd_reg(eth_num, phyaddr, 0, 0x19, &tmp0); ++ if (!(tmp0 & 0x4)) { ++ printf("ERROR: reg 0x19 (LINK down): 0x%x\n", tmp0); ++ } ++ if (tmp0 & 0xc0) { ++ printf("ERROR: reg 0x19: 0x%x\n", tmp0); ++ } ++} ++#endif /* BCMINTERNAL */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/shared/bcmiproc_serdes.c b/drivers/net/ethernet/broadcom/gmac/src/shared/bcmiproc_serdes.c +--- a/drivers/net/ethernet/broadcom/gmac/src/shared/bcmiproc_serdes.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/shared/bcmiproc_serdes.c 2017-11-09 17:53:44.029293000 +0800 +@@ -0,0 +1,879 @@ ++/* ++ * $Copyright Open Broadcom Corporation$ ++ * ++ * These routines provide access to the serdes ++ * ++ */ ++ ++/* ---- Include Files ---------------------------------------------------- */ ++#include ++#include ++#include "bcmiproc_serdes.h" ++#include "bcmiproc_serdes_def.h" ++#include "../../../mdio/iproc_mdio.h" ++ ++/* ---- External Variable Declarations ----------------------------------- */ ++/* ---- External Function Prototypes ------------------------------------- */ ++/* ---- Public Variables ------------------------------------------------- */ ++/* ---- Private Constants and Types -------------------------------------- */ ++/* ---- Private Variables ------------------------------------------------ */ ++ ++/* debug/trace */ ++//#define BCMDBG ++//#define BCMDBG_ERR ++#ifdef BCMDBG ++#define NET_ERROR(args) printf args ++#define NET_TRACE(args) printf args ++#elif defined(BCMDBG_ERR) ++#define NET_ERROR(args) printf args ++#define NET_TRACE(args) ++#else ++#define NET_ERROR(args) ++#define NET_TRACE(args) ++#endif /* BCMDBG */ ++#define NET_REG_TRACE(args) ++ ++ ++#ifndef ASSERT ++#define ASSERT(exp) ++#endif ++ ++ ++#if defined(CONFIG_MACH_SB2) ++/* CL22 register access for VIPERCORE in Saber2 */ ++#define PHY_AER_REG_ADDR_AER(_addr) (((_addr) >> 16) & 0x0000FFFF) ++#define PHY_AER_REG_ADDR_BLK(_addr) (((_addr) & 0x0000FFF0)) ++#define PHY_AER_REG_ADDR_REGAD(_addr) ((((_addr) & 0x00008000) >> 11) | \ ++ ((_addr) & 0x0000000F)) ++#endif ++ ++/* ==== Public Functions ================================================= */ ++ ++void ++serdes_set_blk(uint eth_num, uint phyaddr, uint blk) ++{ ++ uint16 blkaddr; ++ uint16 destblk = (uint16)blk; ++ ++ NET_TRACE(("%s enter\n", __FUNCTION__)); ++ ++ NET_REG_TRACE(("%s phyaddr(0x%x) blk(0x%x)\n", ++ __FUNCTION__, phyaddr, blk)); ++ ++ /* check if need to update blk addr */ ++ iproc_mii_read(MII_DEV_LOCAL, phyaddr, PHY_REG_BLK_ADDR, &blkaddr); ++ if (blkaddr!=destblk) { ++ /* write block address */ ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, PHY_REG_BLK_ADDR, destblk); ++ } ++} ++ ++ ++void ++serdes_wr_reg(uint eth_num, uint phyaddr, uint reg, uint data) ++{ ++ uint16 tmpdata=(uint16)data; ++#if defined(CONFIG_MACH_SB2) ++ uint16 phy_reg_aer = 0, phy_reg_blk = 0, phy_reg_addr = 0; ++ ++ phy_reg_aer = PHY_AER_REG_ADDR_AER(reg); /* upper 16 bits */ ++ phy_reg_blk = PHY_AER_REG_ADDR_BLK(reg); /* 12 bits mask=0xfff0 */ ++ phy_reg_addr = PHY_AER_REG_ADDR_REGAD(reg); /* 5 bits {15,3,2,1,0} */ ++ ++ if (phy_reg_aer != 0) { ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x001f, 0xffd0); ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x001e, phy_reg_aer); ++ } ++ ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x001f, phy_reg_blk); /* Map block */ ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, phy_reg_addr, tmpdata); /* write register */ ++ ++ if (phy_reg_aer != 0) { ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x001f, 0xffd0); ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x001e, 0x0); ++ } ++#else ++ uint blk = reg&0x7ff0; ++ uint off = reg&0x000f; ++ ++ NET_TRACE(("%s enter\n", __FUNCTION__)); ++ ++ if (reg&0x8000) ++ off|=0x10; ++ ++ /* set block address */ ++ serdes_set_blk(eth_num, phyaddr, blk); ++ ++ NET_REG_TRACE(("%s wrt phyaddr(0x%x) reg(0x%x) data(0x%x)\n", ++ __FUNCTION__, phyaddr, reg, tmpdata)); ++ //printf("%s wrt phyaddr(0x%x) reg(0x%x) data(0x%x)\n", ++ // __FUNCTION__, phyaddr, reg, tmpdata); ++ /* write register */ ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, off, tmpdata); ++#endif ++} ++ ++ ++uint16 ++serdes_rd_reg(uint eth_num, uint phyaddr, uint reg) ++{ ++ uint16 data; ++#if defined(CONFIG_MACH_SB2) ++ uint16 phy_reg_aer = 0, phy_reg_blk = 0, phy_reg_addr = 0; ++ ++ phy_reg_aer = PHY_AER_REG_ADDR_AER(reg); /* upper 16 bits */ ++ phy_reg_blk = PHY_AER_REG_ADDR_BLK(reg); /* 12 bits mask=0xfff0 */ ++ phy_reg_addr = PHY_AER_REG_ADDR_REGAD(reg); /* 5 bits {15,3,2,1,0} */ ++ ++ if (phy_reg_aer != 0) { ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x001f, 0xffd0); ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x001e, phy_reg_aer); ++ } ++ ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x001f, phy_reg_blk); /* Map block */ ++ iproc_mii_read(MII_DEV_LOCAL, phyaddr, phy_reg_addr, &data); /* read register */ ++ ++ if (phy_reg_aer != 0) { ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x001f, 0xffd0); ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x001e, 0x0); ++ } ++#else ++ uint blk = reg&0x7ff0; ++ uint off = reg&0x000f; ++ ++ NET_TRACE(("%s enter\n", __FUNCTION__)); ++ ++ if (reg&0x8000) ++ off|=0x10; ++ ++ /* set block address */ ++ serdes_set_blk(eth_num, phyaddr, blk); ++ ++ /* read register */ ++ iproc_mii_read(MII_DEV_LOCAL, phyaddr, off, &data); ++ NET_REG_TRACE(("%s rd phyaddr(0x%x) reg(0x%x) data(0x%x)\n", ++ __FUNCTION__, phyaddr, reg, data)); ++ //printf("%s rd phyaddr(0x%x) reg(0x%x) data(0x%x)\n", ++ // __FUNCTION__, phyaddr, reg, data); ++#endif ++ ++ return data; ++} ++ ++ ++uint16 ++serdes_get_id(uint eth_num, uint phyaddr, uint off) ++{ ++ ++ ASSERT(phyaddr < MAXEPHY); ++ ++ if (phyaddr == EPHY_NOREG) ++ return 0; ++ ++ /* read the id high */ ++ return serdes_rd_reg(eth_num, phyaddr, XGXS16G_SERDESID_SERDESID0r+off); ++} ++ ++ ++void ++serdes_reset(uint eth_num, uint phyaddr) ++{ ++ uint16 ctrl; ++ ++ ASSERT(phyaddr < MAXEPHY); ++ ++ if (phyaddr == EPHY_NOREG) ++ return; ++ ++ NET_TRACE(("et%d: %s: phyaddr %d\n", eth_num, __FUNCTION__, phyaddr)); ++ /* set reset flag */ ++ ctrl = serdes_rd_reg(eth_num, phyaddr, XGXS16G_IEEE0BLK_IEEECONTROL0r); ++ ctrl |= IEEE0BLK_IEEECONTROL0_RST_HW_MASK; ++ serdes_wr_reg(eth_num, phyaddr, XGXS16G_IEEE0BLK_IEEECONTROL0r, ctrl); ++ udelay(100); ++ /* check if out of reset */ ++ if (serdes_rd_reg(eth_num, phyaddr, XGXS16G_IEEE0BLK_IEEECONTROL0r) & IEEE0BLK_IEEECONTROL0_RST_HW_MASK) { ++ NET_ERROR(("et%d: %s reset not complete\n", eth_num, __FUNCTION__)); ++ } ++} ++ ++ ++int ++serdes_reset_core(uint eth_num, uint phyaddr) ++{ ++ uint16 data16; ++ uint16 serdes_id2; ++ ++ NET_TRACE(("et%d: %s: phyaddr %d\n", eth_num, __FUNCTION__, phyaddr)); ++ ++ /* get serdes id */ ++ serdes_id2 = serdes_get_id(eth_num, phyaddr, 2); ++ printf("et%d %s pbyaddr(0x%x) id2(0x%x)\n", eth_num, __FUNCTION__, phyaddr, serdes_id2); ++ ++ /* unlock lane */ ++ data16 = serdes_rd_reg(eth_num, phyaddr, WC40_DIGITAL4_MISC3r); ++ data16 &= ~(DIGITAL4_MISC3_LANEDISABLE_MASK); ++ serdes_wr_reg(eth_num, phyaddr, WC40_DIGITAL4_MISC3r, data16); ++ ++ if ( phyaddr == 1 ) { ++ /* Reset the core */ ++ /* Stop PLL Sequencer and configure the core into correct mode */ ++ data16 = (XGXSBLK0_XGXSCONTROL_MODE_10G_IndLane << ++ XGXSBLK0_XGXSCONTROL_MODE_10G_SHIFT) | ++ XGXSBLK0_XGXSCONTROL_HSTL_MASK | ++ XGXSBLK0_XGXSCONTROL_CDET_EN_MASK | ++ XGXSBLK0_XGXSCONTROL_EDEN_MASK | ++ XGXSBLK0_XGXSCONTROL_AFRST_EN_MASK | ++ XGXSBLK0_XGXSCONTROL_TXCKO_DIV_MASK; ++ serdes_wr_reg(eth_num, phyaddr, XGXS16G_XGXSBLK0_XGXSCONTROLr, data16); ++ ++ /* Disable IEEE block select auto-detect. ++ * The driver will select desired block as necessary. ++ * By default, the driver keeps the XAUI block in ++ * IEEE address space. ++ */ ++ data16 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_XGXSBLK0_MISCCONTROL1r); ++ if (XGXS16G_2p5G_ID(serdes_id2)) { ++ data16 &= ~( XGXSBLK0_MISCCONTROL1_IEEE_BLKSEL_AUTODET_MASK | ++ XGXSBLK0_MISCCONTROL1_IEEE_BLKSEL_VAL_MASK); ++ } else { ++ data16 &= ~( XGXSBLK0_MISCCONTROL1_IEEE_BLKSEL_AUTODET_MASK | ++ XGXSBLK0_MISCCONTROL1_IEEE_BLKSEL_VAL_MASK); ++#if (!defined(CONFIG_MACH_KT2)) ++ data16 |= XGXSBLK0_MISCCONTROL1_IEEE_BLKSEL_VAL_MASK; ++#endif /* (!defined(CONFIG_MACH_KT2)) */ ++ } ++ serdes_wr_reg(eth_num, phyaddr, XGXS16G_XGXSBLK0_MISCCONTROL1r, data16); ++ ++ /* disable in-band MDIO. PHY-443 */ ++ data16 = serdes_rd_reg(eth_num, phyaddr, 0x8111); ++ /* rx_inBandMdio_rst */ ++ data16 |= 1 << 3; ++ serdes_wr_reg(eth_num, phyaddr, 0x8111, data16); ++ } ++ return 0; ++} ++ ++ ++int ++serdes_start_pll(uint eth_num, uint phyaddr) ++{ ++ uint16 data16; ++ ++ if ( phyaddr == 1 ) { ++ uint32 count=250; ++ /* Start PLL Sequencer and wait for PLL to lock */ ++ data16 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_XGXSBLK0_XGXSCONTROLr); ++ data16 |= XGXSBLK0_XGXSCONTROL_START_SEQUENCER_MASK; ++ serdes_wr_reg(eth_num, phyaddr, XGXS16G_XGXSBLK0_XGXSCONTROLr, data16); ++ ++ /* wait for PLL to lock */ ++ while (count!=0) { ++ data16 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_XGXSBLK0_XGXSSTATUSr); ++ if ( data16 & XGXSBLK0_XGXSSTATUS_TXPLL_LOCK_MASK ) { ++ break; ++ } ++ /* wait 1 usec then dec counter */ ++ udelay(10); ++ count--; ++ } ++ if (count == 0) { ++ NET_ERROR(("%s TXPLL did not lock\n", __FUNCTION__)); ++ } ++ } ++ return 0; ++} ++ ++ ++/* ++ * Function: ++ * serdes_init ++ * Purpose: ++ * Initialize xgxs6 phys ++ * Parameters: ++ * eth_num - ethernet data ++ * phyaddr - physical address ++ * Returns: ++ * 0 ++ */ ++int ++serdes_init(uint eth_num, uint phyaddr) ++{ ++#if defined(CONFIG_MACH_SB2) ++ ++#if 0 ++ /* Speed = 10M */ ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x001f, 0x8000); ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x0010, 0x0c2f); ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x001f, 0x8300); ++ // iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x0010, 0x0120); /* Mode = AN */ ++ // iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x0010, 0x0000); /* Mode = Force */ ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x001f, 0x8000); ++ // iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x0000, 0x1100); /* Mode = AN */ ++ // iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x0000, 0x0100); /* Mode = Force */ ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x0010, 0x2c2f); ++#endif ++ ++#if 0 ++ /* Speed = 100M */ ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x001f, 0x8000); ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x0010, 0x0c2f); ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x001f, 0x8300); ++ // iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x0010, 0x0120); /* Mode = AN */ ++ // iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x0010, 0x0000); /* Mode = Force */ ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x001f, 0x8000); ++ // iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x0000, 0x3100); /* Mode = AN */ ++ // iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x0000, 0x2100); /* Mode = Force */ ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x0010, 0x2c2f); ++#endif ++ ++#if 0 ++ /* Speed = 1G SGMII */ ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x001f, 0x8000); ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x0010, 0x0c2f); ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x001f, 0x8300); ++ // iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x0010, 0x0120); /* Mode = AN */ ++ // iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x0010, 0x0000); /* Mode = Force */ ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x001f, 0x8000); ++ // iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x0000, 0x1140); /* Mode = AN */ ++ // iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x0000, 0x0140); /* Mode = Force */ ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x0010, 0x2c3f); ++#endif ++ ++#if 1 ++ /* Auto Negotiation 10M/100M/1G ¡V SGMII Slave */ ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x001f, 0x8000); ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x0010, 0x0c2f); ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x001f, 0x8300); ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x0010, 0x0100); ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x001f, 0x8000); ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x0000, 0x1140); ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x0010, 0x2c3f); ++#endif ++ ++#else ++ ++ uint16 data16; ++ uint16 serdes_id0, serdes_id1, serdes_id2; ++ ++ NET_TRACE(("et%d: %s: phyaddr %d\n", eth_num, __FUNCTION__, phyaddr)); ++ ++ /* get serdes id */ ++ serdes_id0 = serdes_get_id(eth_num, phyaddr, 0); ++ serdes_id1 = serdes_get_id(eth_num, phyaddr, 1); ++ serdes_id2 = serdes_get_id(eth_num, phyaddr, 2); ++ printf("%s phyaddr(0x%x) id0(0x%x) id1(0x%x) id2(0x%x)\n", __FUNCTION__, phyaddr, serdes_id0, serdes_id1, serdes_id2); ++ ++ /* get more ids */ ++ serdes_id0 = serdes_rd_reg(eth_num, phyaddr, 2); ++ serdes_id1 = serdes_rd_reg(eth_num, phyaddr, 3); ++ //printf("%s phyaddr(0x%x) SERDES PhyID_MS(0x%x) PhyID_LS(0x%x)\n", __FUNCTION__, phyaddr, serdes_id0, serdes_id1); ++ ++ /* unlock lane */ ++ data16 = serdes_rd_reg(eth_num, phyaddr, WC40_DIGITAL4_MISC3r); ++ data16 &= ~(DIGITAL4_MISC3_LANEDISABLE_MASK); ++ serdes_wr_reg(eth_num, phyaddr, WC40_DIGITAL4_MISC3r, data16); ++ ++ /* disable CL73 BAM */ ++ data16 = serdes_rd_reg(eth_num, phyaddr, 0x8372); ++ data16 &= ~(CL73_USERB0_CL73_BAMCTRL1_CL73_BAMEN_MASK); ++ serdes_wr_reg(eth_num, phyaddr, 0x8372, data16); ++ ++ /* Set Local Advertising Configuration */ ++ data16 = MII_ANA_C37_FD | MII_ANA_C37_PAUSE | MII_ANA_C37_ASYM_PAUSE; ++ serdes_wr_reg(eth_num, phyaddr, XGXS16G_COMBO_IEEE0_AUTONEGADVr, data16); ++ ++ /* Disable BAM in Independent Lane mode. Over1G AN not supported */ ++ data16 = 0; ++ serdes_wr_reg(eth_num, phyaddr, XGXS16G_BAM_NEXTPAGE_MP5_NEXTPAGECTRLr, data16); ++ serdes_wr_reg(eth_num, phyaddr, XGXS16G_BAM_NEXTPAGE_UD_FIELDr, data16); ++ ++ data16 = SERDESDIGITAL_CONTROL1000X1_CRC_CHECKER_DISABLE_MASK | ++ SERDESDIGITAL_CONTROL1000X1_DISABLE_PLL_PWRDWN_MASK; ++ /* ++ * Put the Serdes in SGMII mode ++ * bit0 = 0; in SGMII mode ++ */ ++ serdes_wr_reg(eth_num, phyaddr, XGXS16G_SERDESDIGITAL_CONTROL1000X1r, data16); ++ ++ /* set autoneg */ ++ data16 = MII_CTRL_AE | MII_CTRL_RAN; ++ serdes_wr_reg(eth_num, phyaddr, XGXS16G_COMBO_IEEE0_MIICNTLr, data16); ++ ++ /* Disable 10G parallel detect */ ++ data16 = 0; ++ serdes_wr_reg(eth_num, phyaddr, XGXS16G_AN73_PDET_PARDET10GCONTROLr, data16); ++ ++ /* Disable BAM mode and Teton mode */ ++ serdes_wr_reg(eth_num, phyaddr, XGXS16G_BAM_NEXTPAGE_MP5_NEXTPAGECTRLr, data16); ++ ++ /* Enable lanes */ ++ data16 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_XGXSBLK1_LANECTRL0r); ++ data16 |= XGXSBLK1_LANECTRL0_CL36_PCS_EN_RX_MASK | ++ XGXSBLK1_LANECTRL0_CL36_PCS_EN_TX_MASK; ++ serdes_wr_reg(eth_num, phyaddr, XGXS16G_XGXSBLK1_LANECTRL0r, data16); ++ ++ /* set elasticity fifo size to 13.5k to support 12k jumbo pkt size*/ ++ data16 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_SERDESDIGITAL_CONTROL1000X3r); ++ data16 &= SERDESDIGITAL_CONTROL1000X3_FIFO_ELASICITY_TX_RX_MASK; ++ data16 |= (1 << 2); ++ serdes_wr_reg(eth_num, phyaddr, XGXS16G_SERDESDIGITAL_CONTROL1000X3r, data16); ++ ++ /* Enabble LPI passthru' for native mode EEE */ ++ data16 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_REMOTEPHY_MISC5r); ++ data16 |= 0xc000; ++ serdes_wr_reg(eth_num, phyaddr, XGXS16G_REMOTEPHY_MISC5r, data16); ++ data16 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_XGXSBLK7_EEECONTROLr); ++ data16 |= 0x0007; ++ serdes_wr_reg(eth_num, phyaddr, XGXS16G_XGXSBLK7_EEECONTROLr, data16); ++#endif ++ ++ return 0; ++} ++ ++ ++#ifdef BCMINTERNAL ++/* ++ * Function: ++ * serdes_enable_set ++ * Purpose: ++ * Enable/Disable phy ++ * Parameters: ++ * eth_num - ethernet data ++ * phyaddr - physical address ++ * enable - on/off state to set ++ * Returns: ++ * 0 ++ */ ++int ++serdes_enable_set(uint eth_num, uint phyaddr, int enable) ++{ ++ uint16 data16, mask16; ++ ++ NET_TRACE(("et%d: %s: phyaddr %d\n", eth_num, __FUNCTION__, phyaddr)); ++ ++ data16 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_XGXSBLK1_LANECTRL3r); ++ mask16 = (1 << (phyaddr-1)); /* rx lane */ ++ mask16 |= (mask16 << 4); /* add tx lane */ ++ mask16 |= 0x800; ++ if (enable) { ++ data16 &= ~(mask16); ++ } else { ++ data16 &= ~(mask16); ++ data16 |= mask16; ++ } ++ serdes_wr_reg(eth_num, phyaddr, XGXS16G_XGXSBLK1_LANECTRL3r, data16); ++ ++ return 0; ++} ++ ++ ++/* ++ * Function: ++ * serdes_speed_set ++ * Purpose: ++ * Set PHY speed ++ * Parameters: ++ * eth_num - ethernet data ++ * phyaddr - physical address ++ * speed - link speed in Mbps ++ * Returns: ++ * 0 ++ */ ++int ++serdes_speed_set(uint eth_num, uint phyaddr, int speed) ++{ ++ uint16 speed_val, mask; ++ uint16 data16; ++ uint16 speed_mii; ++ ++ NET_TRACE(("et%d: %s: phyaddr %d\n", eth_num, __FUNCTION__, phyaddr)); ++ ++ if (speed > 1000) { ++ return -1; ++ } ++ ++ speed_val = 0; ++ speed_mii = 0; ++ mask = SERDESDIGITAL_MISC1_FORCE_SPEED_SEL_MASK | ++ SERDESDIGITAL_MISC1_FORCE_SPEED_MASK; ++ ++ switch (speed) { ++ case 0: ++ /* Do not change speed */ ++ return 0; ++ case 10: ++ speed_mii = MII_CTRL_SS_10; ++ break; ++ case 100: ++ speed_mii = MII_CTRL_SS_100; ++ break; ++ case 1000: ++ speed_mii = MII_CTRL_SS_1000; ++ break; ++ default: ++ return -1; ++ } ++ ++ /* Hold rxSeqStart */ ++ data16 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_RX0_RX_CONTROLr); ++ data16 |= DSC_2_0_DSC_CTRL0_RXSEQSTART_MASK; ++ serdes_wr_reg(eth_num, phyaddr, XGXS16G_RX0_RX_CONTROLr, data16); ++ ++ /* hold TX FIFO in reset */ ++ data16 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_SERDESDIGITAL_CONTROL1000X3r); ++ data16 |= SERDESDIGITAL_CONTROL1000X3_TX_FIFO_RST_MASK; ++ serdes_wr_reg(eth_num, phyaddr, XGXS16G_SERDESDIGITAL_CONTROL1000X3r, data16); ++ ++ data16 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_SERDESDIGITAL_MISC1r); ++ data16 &= ~(mask); ++ data16 |= speed_val; ++ serdes_wr_reg(eth_num, phyaddr, XGXS16G_SERDESDIGITAL_MISC1r, data16); ++ ++ data16 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_COMBO_IEEE0_MIICNTLr); ++ data16 &= ~(MII_CTRL_AE | MII_CTRL_SS_LSB | MII_CTRL_SS_MSB); ++ data16 |= speed_mii; ++ serdes_wr_reg(eth_num, phyaddr, XGXS16G_COMBO_IEEE0_MIICNTLr, data16); ++ ++ /* release rxSeqStart */ ++ data16 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_RX0_RX_CONTROLr); ++ data16 &= ~(DSC_2_0_DSC_CTRL0_RXSEQSTART_MASK); ++ serdes_wr_reg(eth_num, phyaddr, XGXS16G_RX0_RX_CONTROLr, data16); ++ ++ /* release TX FIFO reset */ ++ data16 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_SERDESDIGITAL_CONTROL1000X3r); ++ data16 &= ~(SERDESDIGITAL_CONTROL1000X3_TX_FIFO_RST_MASK); ++ serdes_wr_reg(eth_num, phyaddr, XGXS16G_SERDESDIGITAL_CONTROL1000X3r, data16); ++ ++ return 0; ++} ++ ++ ++/* ++ * Function: ++ * serdes_speed_get ++ * Purpose: ++ * Get PHY speed ++ * Parameters: ++ * eth_num - ethernet data ++ * phyaddr - physical address ++ * speed - current link speed in Mbps ++ * Returns: ++ * 0 ++ */ ++int ++serdes_speed_get(uint eth_num, uint phyaddr, int *speed) ++{ ++ uint16 data16; ++ ++ NET_TRACE(("et%d: %s: phyaddr %d\n", eth_num, __FUNCTION__, phyaddr)); ++ ++ data16 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_SERDESDIGITAL_STATUS1000X1r); ++ ++ data16 &= SERDESDIGITAL_STATUS1000X1_SPEED_STATUS_MASK; ++ data16 >>= SERDESDIGITAL_STATUS1000X1_SPEED_STATUS_SHIFT; ++ ++ if (data16 == 3) { ++ *speed= 2500; ++ } else if (data16 == 2) { ++ *speed= 1000; ++ } else if (data16 == 1) { ++ *speed= 100; ++ } else { ++ *speed= 10; ++ } ++ ++ return 0; ++} ++ ++ ++/* ++ * Function: ++ * phy_xgxs16g1l_lb_set ++ * Purpose: ++ * Put XGXS6/FusionCore in PHY loopback ++ * Parameters: ++ * unit - StrataSwitch unit #. ++ * port - StrataSwitch port #. ++ * enable - binary value for on/off (1/0) ++ * Returns: ++ * 0 ++ */ ++int ++serdes_lb_set(uint eth_num, uint phyaddr, int enable) ++{ ++ uint16 misc_ctrl, data16; ++ uint16 lb_bit; ++ uint16 lb_mask; ++ ++ /* Configure Loopback in XAUI */ ++ misc_ctrl = serdes_rd_reg(eth_num, phyaddr, XGXS16G_XGXSBLK0_MISCCONTROL1r); ++ if (misc_ctrl & XGXSBLK0_MISCCONTROL1_PCS_DEV_EN_OVERRIDE_MASK) { ++ /* PCS */ ++ lb_bit = (enable) ? IEEE0BLK_IEEECONTROL0_GLOOPBACK_MASK : 0; ++ lb_mask = IEEE0BLK_IEEECONTROL0_GLOOPBACK_MASK; ++ } else if (misc_ctrl & XGXSBLK0_MISCCONTROL1_PMD_DEV_EN_OVERRIDE_MASK) { ++ /* PMA/PMD */ ++ lb_bit = (enable) ? 1 : 0; ++ lb_mask = 1; ++ } else { ++ /* PHY XS, DTE XS */ ++ lb_bit = (enable) ? IEEE0BLK_IEEECONTROL0_GLOOPBACK_MASK : 0; ++ lb_mask = IEEE0BLK_IEEECONTROL0_GLOOPBACK_MASK; ++ } ++ ++ data16 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_IEEE0BLK_IEEECONTROL0r); ++ data16 &= ~(lb_mask); ++ data16 |= lb_bit; ++ serdes_wr_reg(eth_num, phyaddr, XGXS16G_IEEE0BLK_IEEECONTROL0r, data16); ++ ++ /* Configure Loopback in SerDes */ ++ lb_bit = (enable) ? MII_CTRL_LE : 0; ++ lb_mask = MII_CTRL_LE; ++ data16 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_COMBO_IEEE0_MIICNTLr); ++ data16 &= ~(lb_mask); ++ data16 |= lb_bit; ++ serdes_wr_reg(eth_num, phyaddr, XGXS16G_COMBO_IEEE0_MIICNTLr, data16); ++ ++ data16 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_XGXSBLK0_XGXSCONTROLr); ++ data16 |= 0x10; ++ serdes_wr_reg(eth_num, phyaddr, XGXS16G_XGXSBLK0_XGXSCONTROLr, data16); ++ ++ data16 = serdes_rd_reg(eth_num, phyaddr, 0x8017); ++ data16 = 0xff0f; ++ serdes_wr_reg(eth_num, phyaddr, 0x8017, data16); ++ ++ return 0; ++} ++ ++void ++serdes_disp_status(uint eth_num, uint phyaddr) ++{ ++ uint16 tmp0, tmp1, tmp2, tmp3; ++ ++ printf("et%d: %s: phyaddr:%d\n", eth_num, __FUNCTION__, phyaddr); ++ ++ tmp0 = serdes_get_id(eth_num, phyaddr, 0); ++ tmp1 = serdes_get_id(eth_num, phyaddr, 1); ++ tmp2 = serdes_get_id(eth_num, phyaddr, 2); ++ printf(" id0(0x%x) id1(0x%x) id2(0x%x)\n", tmp0, tmp1, tmp2); ++ ++ tmp0 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_IEEE0BLK_IEEECONTROL0r); ++ tmp1 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_IEEE0BLK_IEEECONTROL0r+1); ++ printf(" MII-Control(0): 0x%x; MII-Status(1): 0x%x\n", tmp0, tmp1); ++ ++ tmp0 = serdes_rd_reg(eth_num, phyaddr, 2); ++ tmp1 = serdes_rd_reg(eth_num, phyaddr, 3); ++ printf(" Phy ChipID(2:3): 0x%04x:0x%04x\n", tmp0, tmp1); ++ ++ tmp0 = serdes_rd_reg(eth_num, phyaddr, 4); ++ tmp1 = serdes_rd_reg(eth_num, phyaddr, 5); ++ tmp2 = serdes_rd_reg(eth_num, phyaddr, 0xf); ++ printf(" AN AD(4): 0x%x; AN LNK PARTNER(5): 0x%x; EXT STAT(f): 0x%x\n", tmp0, tmp1, tmp2); ++ ++ tmp0 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_XGXSBLK0_XGXSCONTROLr); ++ tmp1 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_XGXSBLK0_XGXSSTATUSr); ++ printf(" XGXS-Control(8000): 0x%x; XGXS-Status(8001): 0x%x\n", tmp0, tmp1); ++ ++ tmp0 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_XGXSBLK0_MMDSELECTr); ++ tmp1 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_XGXSBLK0_MISCCONTROL1r); ++ printf(" XGXS BLK0 MMD Select(800d): 0x%x; XGXS BLK0 MISC CTRL(800e): 0x%x\n", tmp0, tmp1); ++ ++ tmp0 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_XGXSBLK1_LANECTRL0r); ++ tmp1 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_XGXSBLK1_LANECTRL3r); ++ printf(" XGXS BLK1 LNCTRL0(8015): 0x%x; XGXS BLK1_LNCTRL3(8018): 0x%x\n", tmp0, tmp1); ++ ++ tmp0 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_RX0_RX_CONTROLr); ++ tmp1 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_AN73_PDET_PARDET10GCONTROLr); ++ tmp2 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_XGXSBLK7_EEECONTROLr); ++ printf(" XGXS RX0 CTRL(80b1): 0x%x; XGXS AN73 PARDET CTRL(8131): 0x%x; XGXS BLK7 EEECTRL(8150): 0x%x\n", tmp0, tmp1, tmp2); ++ ++ tmp0 = serdes_rd_reg(eth_num, phyaddr, 0x8111); ++ tmp1 = serdes_rd_reg(eth_num, phyaddr, 0x8372); ++ printf(" (8111): 0x%x; (8372): 0x%x\n", tmp0, tmp1); ++ ++ tmp0 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_SERDESDIGITAL_CONTROL1000X1r); ++ tmp1 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_SERDESDIGITAL_CONTROL1000X2r); ++ tmp2 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_SERDESDIGITAL_CONTROL1000X3r); ++ printf(" XGXS SERDES DIG CTRL 1000X1(8300): 0x%x; XGXS SERDES DIG CTRL 1000X2(8301): 0x%x; XGXS SERDES DIGITAL CTRL 1000X3r(8302): 0x%x\n", tmp0, tmp1, tmp2); ++ ++ tmp0 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_SERDESDIGITAL_STATUS1000X1r); ++ tmp1 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_SERDESDIGITAL_MISC1r); ++ printf(" XGXS SERDES DIG STATUS 1000X1(8304): 0x%x; XGXS SERDES DIG MISC1(8308): 0x%x\n", tmp0, tmp1); ++ ++ tmp0 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_SERDESID_SERDESID0r); ++ tmp1 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_SERDESID_SERDESID1r); ++ printf(" XGXS SERDESID0(8310): 0x%x; XGXS SERDESID1(8311): 0x%x\n", tmp0, tmp1); ++ ++ tmp0 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_SERDESID_SERDESID2r); ++ tmp1 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_SERDESID_SERDESID3r); ++ printf(" XGXS SERDESID0(8312): 0x%x; XGXS SERDESID1(8313): 0x%x\n", tmp0, tmp1); ++ ++ tmp0 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_REMOTEPHY_MISC3r); ++ tmp1 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_REMOTEPHY_MISC5r); ++ printf(" XGXS REMOTEPHY MISC3(833c): 0x%x; XGXS REMOTEPHY MISC5(833e): 0x%x\n", tmp0, tmp1); ++ ++ tmp0 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_BAM_NEXTPAGE_MP5_NEXTPAGECTRLr); ++ tmp1 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_BAM_NEXTPAGE_UD_FIELDr); ++ printf(" XGXS BAM MP5_NEXTPAGECTRL(8350): 0x%x; XGXS BAM NP UD FIELDr(8357): 0x%x\n", tmp0, tmp1); ++ ++ tmp0 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_COMBO_IEEE0_MIICNTLr); ++ tmp1 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_COMBO_IEEE0_AUTONEGADVr); ++ printf(" XGXS COMBO IEEE0 MIICNTL(ffe0): 0x%x; XGXS COMBO IEEE0 AUTONEGADVr(ffe4): 0x%x\n", tmp0, tmp1); ++ ++ tmp0 = serdes_rd_reg(eth_num, phyaddr, 0x8050); ++ tmp1 = serdes_rd_reg(eth_num, phyaddr, 0x8122); ++ printf(" (8050): 0x%x; (8122): 0x%x\n", tmp0, tmp1); ++ ++ tmp0 = serdes_rd_reg(eth_num, phyaddr, 0x80b0); ++ tmp1 = serdes_rd_reg(eth_num, phyaddr, 0x80c0); ++ tmp2 = serdes_rd_reg(eth_num, phyaddr, 0x80d0); ++ tmp3 = serdes_rd_reg(eth_num, phyaddr, 0x80e0); ++ printf(" (80b0): 0x%x; (80c0): 0x%x; (80d0): 0x%x, (80e0): 0x%x\n", tmp0, tmp1, tmp2, tmp3); ++ ++ tmp0 = serdes_rd_reg(eth_num, phyaddr, 0xffe1); ++ printf(" (ffe1): 0x%x\n", tmp0); ++ ++} ++#endif /* BCMINTERNAL */ ++ ++ ++#if (defined(CONFIG_SERDES_ASYMMETRIC_MODE)) ++/* ++ * Function: ++ * serdes_speeddpx_set ++ * Purpose: ++ * Set serdes speed dpx ++ * Parameters: ++ * eth_num - ethernet data ++ * phyaddr - physical address ++ * speed - link speed in Mbps ++ * fulldpx - link dpx ++ * Returns: ++ * 0 ++ */ ++int ++serdes_speeddpx_set(uint eth_num, uint phyaddr, int speed, int fulldpx) ++{ ++ uint16 speed_val, mask; ++ uint16 data16; ++ uint16 speed_mii; ++ ++ NET_TRACE(("et%d: %s: phyaddr %d\n", eth_num, __FUNCTION__, phyaddr)); ++ ++ if (speed > 1000) { ++ return -1; ++ } ++ ++ speed_val = 0; ++ speed_mii = 0; ++ mask = SERDESDIGITAL_MISC1_FORCE_SPEED_SEL_MASK | ++ SERDESDIGITAL_MISC1_FORCE_SPEED_MASK; ++ ++ switch (speed) { ++ case 0: ++ /* Do not change speed */ ++ return 0; ++ case 10: ++ speed_mii = MII_CTRL_SS_10; ++ break; ++ case 100: ++ speed_mii = MII_CTRL_SS_100; ++ break; ++ case 1000: ++ speed_mii = MII_CTRL_SS_1000; ++ break; ++ default: ++ return -1; ++ } ++ ++ if (fulldpx) ++ speed_mii |= MII_CTRL_FD; ++ ++ /* Hold rxSeqStart */ ++ data16 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_RX0_RX_CONTROLr); ++ data16 |= DSC_2_0_DSC_CTRL0_RXSEQSTART_MASK; ++ serdes_wr_reg(eth_num, phyaddr, XGXS16G_RX0_RX_CONTROLr, data16); ++ ++ /* hold TX FIFO in reset */ ++ data16 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_SERDESDIGITAL_CONTROL1000X3r); ++ data16 |= SERDESDIGITAL_CONTROL1000X3_TX_FIFO_RST_MASK; ++ serdes_wr_reg(eth_num, phyaddr, XGXS16G_SERDESDIGITAL_CONTROL1000X3r, data16); ++ ++ data16 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_SERDESDIGITAL_MISC1r); ++ data16 &= ~(mask); ++ data16 |= speed_val; ++ serdes_wr_reg(eth_num, phyaddr, XGXS16G_SERDESDIGITAL_MISC1r, data16); ++ ++ data16 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_COMBO_IEEE0_MIICNTLr); ++ data16 &= ~(MII_CTRL_AE | MII_CTRL_RAN | MII_CTRL_SS_LSB | MII_CTRL_SS_MSB | MII_CTRL_FD); ++ data16 |= speed_mii; ++ serdes_wr_reg(eth_num, phyaddr, XGXS16G_COMBO_IEEE0_MIICNTLr, data16); ++ ++ /* release rxSeqStart */ ++ data16 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_RX0_RX_CONTROLr); ++ data16 &= ~(DSC_2_0_DSC_CTRL0_RXSEQSTART_MASK); ++ serdes_wr_reg(eth_num, phyaddr, XGXS16G_RX0_RX_CONTROLr, data16); ++ ++ /* release TX FIFO reset */ ++ data16 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_SERDESDIGITAL_CONTROL1000X3r); ++ data16 &= ~(SERDESDIGITAL_CONTROL1000X3_TX_FIFO_RST_MASK); ++ serdes_wr_reg(eth_num, phyaddr, XGXS16G_SERDESDIGITAL_CONTROL1000X3r, data16); ++ ++ return 0; ++} ++ ++int ++serdes_set_asym_mode(uint eth_num, uint phyaddr) ++{ ++ uint16 data16; ++ uint32 txclkctrlreg[] = {0x0000, 0x8065, 0x8075, 0x8085}; ++ uint32 rxclkctrlreg[] = {0x0000, 0x80bc, 0x80cc, 0x80dc}; ++ uint32 spd[] = {0x0000, 0x7120, 0x7120, 0x7110}; ++ uint32 clkctrlmsk[] = {0x0000, 0x0040, 0x0040, 0x0040}; ++ uint32 clkctrlval[] = {0x0000, 0x0040, 0x0040, 0x0000}; ++ ++ NET_TRACE(("et%d: %s: phyaddr %d\n", eth_num, __FUNCTION__, phyaddr)); ++ ++ printk("et%d: %s: setting serdes asymmetrice mode\n", eth_num, __FUNCTION__); ++ ++ /* set speed */ ++ data16 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_SERDESDIGITAL_MISC1r); ++ //printk("et%d: %s: read 0x%x from 0x%x\n", eth_num, __FUNCTION__, data16, XGXS16G_SERDESDIGITAL_MISC1r); ++ data16 &= 0x0f00; ++ data16 |= spd[phyaddr]; ++ serdes_wr_reg(eth_num, phyaddr, XGXS16G_SERDESDIGITAL_MISC1r, data16); ++ //printk("et%d: %s: write 0x%x to 0x%x\n", eth_num, __FUNCTION__, data16, XGXS16G_SERDESDIGITAL_MISC1r); ++ ++ /* Enable asymmetric mode */ ++ data16 = serdes_rd_reg(eth_num, phyaddr, XGXS16G_TX_LN_SWAP1r); ++ //printk("et%d: %s: read 0x%x from 0x%x\n", eth_num, __FUNCTION__, data16, XGXS16G_TX_LN_SWAP1r); ++ data16 |= 0x0100; ++ serdes_wr_reg(eth_num, phyaddr, XGXS16G_TX_LN_SWAP1r, data16); ++ //printk("et%d: %s: write 0x%x to 0x%x\n", eth_num, __FUNCTION__, data16, XGXS16G_TX_LN_SWAP1r); ++ ++ /* set tx clock control bit */ ++ data16 = serdes_rd_reg(eth_num, phyaddr, txclkctrlreg[phyaddr]); ++ //printk("et%d: %s: read 0x%x from 0x%x\n", eth_num, __FUNCTION__, data16, txclkctrlreg[phyaddr]); ++ data16 &= ~(clkctrlmsk[phyaddr]); ++ data16 |= clkctrlval[phyaddr]; ++ serdes_wr_reg(eth_num, phyaddr, txclkctrlreg[phyaddr], data16); ++ //printk("et%d: %s: write 0x%x to 0x%x\n", eth_num, __FUNCTION__, data16, txclkctrlreg[phyaddr]); ++ ++ /* set rx clock control bit */ ++ data16 = serdes_rd_reg(eth_num, phyaddr, rxclkctrlreg[phyaddr]); ++ //printk("et%d: %s: read 0x%x from 0x%x\n", eth_num, __FUNCTION__, data16, rxclkctrlreg[phyaddr]); ++ data16 &= ~(clkctrlmsk[phyaddr]); ++ data16 |= clkctrlval[phyaddr]; ++ serdes_wr_reg(eth_num, phyaddr, rxclkctrlreg[phyaddr], data16); ++ //printk("et%d: %s: write 0x%x to 0x%x\n", eth_num, __FUNCTION__, data16, rxclkctrlreg[phyaddr]); ++ ++ data16 = 0xffff; ++ serdes_wr_reg(eth_num, phyaddr, XGXS16G_XGXSBLK1_LANECTRL1r, data16); ++ //printk("et%d: %s: write 0x%x to 0x%x\n", eth_num, __FUNCTION__, data16, XGXS16G_XGXSBLK1_LANECTRL1r); ++ ++ return 0; ++} ++ ++#endif /* (defined(CONFIG_SERDES_ASYMMETRIC_MODE)) */ ++ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/shared/bcmutils.c b/drivers/net/ethernet/broadcom/gmac/src/shared/bcmutils.c +--- a/drivers/net/ethernet/broadcom/gmac/src/shared/bcmutils.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/shared/bcmutils.c 2017-11-09 17:53:44.032290000 +0800 +@@ -0,0 +1,3389 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Driver O/S-independent utility routines ++ * ++ * $Id: bcmutils.c 325951 2012-04-05 06:03:27Z $ ++ */ ++ ++#include ++#include ++#include ++#if defined(__FreeBSD__) || defined(__NetBSD__) ++#include ++#else ++#include ++#endif ++#ifdef BCMDRIVER ++ ++#include ++#include ++#include ++#include ++ ++#else /* !BCMDRIVER */ ++ ++#include ++#include ++#include ++ ++#if defined(BCMEXTSUP) ++#include ++#endif ++ ++#endif /* !BCMDRIVER */ ++ ++#if defined(_WIN32) || defined(NDIS) || defined(__vxworks) || defined(_CFE_) ++#include ++#endif ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#ifdef BCMPERFSTATS ++#include ++#endif ++#include ++void *_bcmutils_dummy_fn = NULL; ++ ++#ifdef BCMDRIVER ++ ++#ifdef WLC_LOW ++/* nvram vars cache */ ++static char *nvram_vars = NULL; ++static int vars_len = -1; ++#endif /* WLC_LOW */ ++ ++int ++pktpool_init(osl_t *osh, pktpool_t *pktp, int *pplen, int plen, bool istx) ++{ ++ int i, err = BCME_OK; ++ void *p; ++ int pktplen; ++ ++ ASSERT(pktp != NULL); ++ ASSERT(osh != NULL); ++ ASSERT(pplen != NULL); ++ ++ pktplen = *pplen; ++ ++ bzero(pktp, sizeof(pktpool_t)); ++ pktp->inited = TRUE; ++ pktp->istx = istx ? TRUE : FALSE; ++ pktp->plen = (uint16)plen; ++ *pplen = 0; ++ ++ pktp->maxlen = PKTPOOL_LEN_MAX; ++ if (pktplen > pktp->maxlen) ++ pktplen = pktp->maxlen; ++ ++ for (i = 0; i < pktplen; i++) { ++ p = PKTGET(osh, plen, pktp->istx); ++ if (p == NULL) { ++ /* Not able to allocate all requested pkts ++ * so just return what was actually allocated ++ * We can add to the pool later ++ */ ++ if (pktp->w == 0) { ++ err = BCME_NOMEM; ++ } ++ ++ goto exit; ++ } ++ ++ PKTSETPOOL(osh, p, TRUE, pktp); ++ pktp->q[i] = p; ++ pktp->w++; ++ pktp->len++; ++#ifdef BCMDBG_POOL ++ pktp->dbg_q[pktp->dbg_qlen++].p = p; ++#endif ++ } ++ ++exit: ++ *pplen = pktp->w; ++ pktp->len++; /* Add one for end */ ++ return err; ++} ++ ++int ++pktpool_deinit(osl_t *osh, pktpool_t *pktp) ++{ ++ int i; ++ int cnt; ++ ++ ASSERT(osh != NULL); ++ ASSERT(pktp != NULL); ++ ++ cnt = pktp->len; ++ for (i = 0; i < cnt; i++) { ++ if (pktp->q[i] != NULL) { ++ PKTSETPOOL(osh, pktp->q[i], FALSE, NULL); ++ PKTFREE(osh, pktp->q[i], pktp->istx); ++ pktp->q[i] = NULL; ++ pktp->len--; ++ } ++#ifdef BCMDBG_POOL ++ if (pktp->dbg_q[i].p != NULL) { ++ pktp->dbg_q[i].p = NULL; ++ } ++#endif ++ } ++ pktp->inited = FALSE; ++ ++ /* Are there still pending pkts? */ ++ ASSERT(pktpool_len(pktp) == 0); ++ ++ return 0; ++} ++ ++int ++pktpool_fill(osl_t *osh, pktpool_t *pktp, bool minimal) ++{ ++ void *p; ++ int err = 0; ++ int len, psize, maxlen; ++ ++ ASSERT(pktpool_plen(pktp) != 0); ++ ++ maxlen = pktpool_maxlen(pktp); ++ psize = minimal ? (maxlen >> 2) : maxlen; ++ len = pktpool_len(pktp); ++ for (; len < psize; len++) { ++ p = PKTGET(osh, pktpool_plen(pktp), FALSE); ++ if (p == NULL) { ++ err = BCME_NOMEM; ++ break; ++ } ++ ++ if (pktpool_add(pktp, p) != BCME_OK) { ++ PKTFREE(osh, p, FALSE); ++ err = BCME_ERROR; ++ break; ++ } ++ } ++ ++ return err; ++} ++ ++uint16 ++pktpool_avail(pktpool_t *pktp) ++{ ++ if (pktp->w == pktp->r) { ++ return 0; ++ } ++ ++ return (pktp->w > pktp->r) ? (pktp->w - pktp->r) : ((pktp->len) - (pktp->r - pktp->w)); ++} ++ ++static void * ++pktpool_deq(pktpool_t *pktp) ++{ ++ void *p; ++ ++ if (pktp->r == pktp->w) { ++ return NULL; ++ } ++ ++ p = pktp->q[pktp->r]; ++ ASSERT(p != NULL); ++ ++ pktp->q[pktp->r++] = NULL; ++ pktp->r %= (pktp->len); ++ ++ return p; ++} ++ ++static void ++pktpool_enq(pktpool_t *pktp, void *p) ++{ ++ uint16 next; ++ ++ ASSERT(p != NULL); ++ ++ next = (pktp->w + 1) % (pktp->len); ++ if (next == pktp->r) { ++ /* Should not happen; otherwise pkt leak */ ++ ASSERT(0); ++ return; ++ } ++ ++ ASSERT(pktp->q[pktp->w] == NULL); ++ ++ pktp->q[pktp->w] = p; ++ pktp->w = next; ++} ++ ++int ++pktpool_avail_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg) ++{ ++ int i; ++ ++ ASSERT(cb != NULL); ++ ++ i = pktp->cbcnt; ++ if (i == PKTPOOL_CB_MAX) { ++ return BCME_ERROR; ++ } ++ ++ ASSERT(pktp->cbs[i].cb == NULL); ++ pktp->cbs[i].cb = cb; ++ pktp->cbs[i].arg = arg; ++ pktp->cbcnt++; ++ ++ return 0; ++} ++ ++int ++pktpool_empty_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg) ++{ ++ int i; ++ ++ ASSERT(cb != NULL); ++ ++ i = pktp->ecbcnt; ++ if (i == PKTPOOL_CB_MAX) { ++ return BCME_ERROR; ++ } ++ ++ ASSERT(pktp->ecbs[i].cb == NULL); ++ pktp->ecbs[i].cb = cb; ++ pktp->ecbs[i].arg = arg; ++ pktp->ecbcnt++; ++ ++ return 0; ++} ++ ++static int ++pktpool_empty_notify(pktpool_t *pktp) ++{ ++ int i; ++ ++ pktp->empty = TRUE; ++ for (i = 0; i < pktp->ecbcnt; i++) { ++ ASSERT(pktp->ecbs[i].cb != NULL); ++ pktp->ecbs[i].cb(pktp, pktp->ecbs[i].arg); ++ } ++ pktp->empty = FALSE; ++ ++ return 0; ++} ++ ++#ifdef BCMDBG_POOL ++int ++pktpool_dbg_register(pktpool_t *pktp, pktpool_cb_t cb, void *arg) ++{ ++ int i; ++ ++ ASSERT(cb); ++ ++ i = pktp->dbg_cbcnt; ++ if (i == PKTPOOL_CB_MAX) ++ return BCME_ERROR; ++ ++ ASSERT(pktp->dbg_cbs[i].cb == NULL); ++ pktp->dbg_cbs[i].cb = cb; ++ pktp->dbg_cbs[i].arg = arg; ++ pktp->dbg_cbcnt++; ++ ++ return 0; ++} ++ ++int pktpool_dbg_notify(pktpool_t *pktp); ++ ++int ++pktpool_dbg_notify(pktpool_t *pktp) ++{ ++ int i; ++ ++ for (i = 0; i < pktp->dbg_cbcnt; i++) { ++ ASSERT(pktp->dbg_cbs[i].cb); ++ pktp->dbg_cbs[i].cb(pktp, pktp->dbg_cbs[i].arg); ++ } ++ ++ return 0; ++} ++ ++int ++pktpool_dbg_dump(pktpool_t *pktp) ++{ ++ int i; ++ ++ printf("pool len=%d maxlen=%d\n", pktp->dbg_qlen, pktp->maxlen); ++ for (i = 0; i < pktp->dbg_qlen; i++) { ++ ASSERT(pktp->dbg_q[i].p); ++ printf("%d, p: 0x%x dur:%lu us state:%d\n", i, ++ pktp->dbg_q[i].p, pktp->dbg_q[i].dur/100, PKTPOOLSTATE(pktp->dbg_q[i].p)); ++ } ++ ++ return 0; ++} ++ ++int ++pktpool_stats_dump(pktpool_t *pktp, pktpool_stats_t *stats) ++{ ++ int i; ++ int state; ++ ++ bzero(stats, sizeof(pktpool_stats_t)); ++ for (i = 0; i < pktp->dbg_qlen; i++) { ++ ASSERT(pktp->dbg_q[i].p != NULL); ++ ++ state = PKTPOOLSTATE(pktp->dbg_q[i].p); ++ switch (state) { ++ case POOL_TXENQ: ++ stats->enq++; break; ++ case POOL_TXDH: ++ stats->txdh++; break; ++ case POOL_TXD11: ++ stats->txd11++; break; ++ case POOL_RXDH: ++ stats->rxdh++; break; ++ case POOL_RXD11: ++ stats->rxd11++; break; ++ case POOL_RXFILL: ++ stats->rxfill++; break; ++ case POOL_IDLE: ++ stats->idle++; break; ++ } ++ } ++ ++ return 0; ++} ++ ++int ++pktpool_start_trigger(pktpool_t *pktp, void *p) ++{ ++ uint32 cycles, i; ++ ++ if (!PKTPOOL(NULL, p)) { ++ return 0; ++ } ++ ++ OSL_GETCYCLES(cycles); ++ ++ for (i = 0; i < pktp->dbg_qlen; i++) { ++ ASSERT(pktp->dbg_q[i].p != NULL); ++ ++ if (pktp->dbg_q[i].p == p) { ++ pktp->dbg_q[i].cycles = cycles; ++ break; ++ } ++ } ++ ++ return 0; ++} ++ ++int pktpool_stop_trigger(pktpool_t *pktp, void *p); ++int ++pktpool_stop_trigger(pktpool_t *pktp, void *p) ++{ ++ uint32 cycles, i; ++ ++ if (!PKTPOOL(NULL, p)) { ++ return 0; ++ } ++ ++ OSL_GETCYCLES(cycles); ++ ++ for (i = 0; i < pktp->dbg_qlen; i++) { ++ ASSERT(pktp->dbg_q[i].p != NULL); ++ ++ if (pktp->dbg_q[i].p == p) { ++ if (pktp->dbg_q[i].cycles == 0) ++ break; ++ ++ if (cycles >= pktp->dbg_q[i].cycles) ++ pktp->dbg_q[i].dur = cycles - pktp->dbg_q[i].cycles; ++ else ++ pktp->dbg_q[i].dur = ++ (((uint32)-1) - pktp->dbg_q[i].cycles) + cycles + 1; ++ ++ pktp->dbg_q[i].cycles = 0; ++ break; ++ } ++ } ++ ++ return 0; ++} ++#endif /* BCMDBG_POOL */ ++ ++int ++pktpool_avail_notify_normal(osl_t *osh, pktpool_t *pktp) ++{ ++ ASSERT(pktp); ++ pktp->availcb_excl = NULL; ++ return 0; ++} ++ ++int ++pktpool_avail_notify_exclusive(osl_t *osh, pktpool_t *pktp, pktpool_cb_t cb) ++{ ++ int i; ++ ++ ASSERT(pktp); ++ ASSERT(pktp->availcb_excl == NULL); ++ for (i = 0; i < pktp->cbcnt; i++) { ++ if (cb == pktp->cbs[i].cb) { ++ pktp->availcb_excl = &pktp->cbs[i]; ++ break; ++ } ++ } ++ ++ if (pktp->availcb_excl == NULL) { ++ return BCME_ERROR; ++ } else { ++ return 0; ++ } ++} ++ ++static int ++pktpool_avail_notify(pktpool_t *pktp) ++{ ++ int i, k, idx; ++ int avail; ++ ++ ASSERT(pktp); ++ if (pktp->availcb_excl != NULL) { ++ pktp->availcb_excl->cb(pktp, pktp->availcb_excl->arg); ++ return 0; ++ } ++ ++ k = pktp->cbcnt - 1; ++ for (i = 0; i < pktp->cbcnt; i++) { ++ avail = pktpool_avail(pktp); ++ ++ if (avail) { ++ if (pktp->cbtoggle) { ++ idx = i; ++ } else { ++ idx = k--; ++ } ++ ++ ASSERT(pktp->cbs[idx].cb != NULL); ++ pktp->cbs[idx].cb(pktp, pktp->cbs[idx].arg); ++ } ++ } ++ ++ /* Alternate between filling from head or tail ++ */ ++ pktp->cbtoggle ^= 1; ++ ++ return 0; ++} ++ ++void * ++pktpool_get(pktpool_t *pktp) ++{ ++ void *p; ++ ++ p = pktpool_deq(pktp); ++ ++ if (p == NULL) { ++ /* Notify and try to reclaim tx pkts */ ++ if (pktp->ecbcnt) { ++ pktpool_empty_notify(pktp); ++ } ++ ++ p = pktpool_deq(pktp); ++ } ++ ++ return p; ++} ++ ++void ++pktpool_free(pktpool_t *pktp, void *p) ++{ ++ ASSERT(p != NULL); ++ ++#ifdef BCMDBG_POOL ++ /* pktpool_stop_trigger(pktp, p); */ ++#endif ++ ++ pktpool_enq(pktp, p); ++ ++ if (pktp->emptycb_disable) { ++ return; ++ } ++ ++ if (pktp->cbcnt) { ++ if (pktp->empty == FALSE) { ++ pktpool_avail_notify(pktp); ++ } ++ } ++} ++ ++int ++pktpool_add(pktpool_t *pktp, void *p) ++{ ++ ASSERT(p != NULL); ++ ++ if (pktpool_len(pktp) == pktp->maxlen) { ++ return BCME_RANGE; ++ } ++ ++ ASSERT(pktpool_plen(pktp) == PKTLEN(NULL, p)); /* pkts in pool have same length */ ++ PKTSETPOOL(NULL, p, TRUE, pktp); ++ ++ pktp->len++; ++ if (pktp->r > pktp->w) { ++ /* Add to tail */ ++ ASSERT(pktp->q[pktp->len - 1] == NULL); ++ pktp->q[pktp->len - 1] = p; ++ } else { ++ pktpool_enq(pktp, p); ++ } ++ ++#ifdef BCMDBG_POOL ++ pktp->dbg_q[pktp->dbg_qlen++].p = p; ++#endif ++ ++ return 0; ++} ++ ++int ++pktpool_setmaxlen(pktpool_t *pktp, uint16 maxlen) ++{ ++ if (maxlen > PKTPOOL_LEN_MAX) ++ maxlen = PKTPOOL_LEN_MAX; ++ ++ /* if pool is already beyond maxlen, then just cap it ++ * since we currently do not reduce the pool len ++ * already allocated ++ */ ++ pktp->maxlen = (pktpool_len(pktp) > maxlen) ? pktpool_len(pktp) : maxlen; ++ ++ return pktp->maxlen; ++} ++ ++void ++pktpool_emptycb_disable(pktpool_t *pktp, bool disable) ++{ ++ ASSERT(pktp); ++ ++ pktp->emptycb_disable = disable; ++} ++ ++bool ++pktpool_emptycb_disabled(pktpool_t *pktp) ++{ ++ ASSERT(pktp); ++ return pktp->emptycb_disable; ++} ++ ++/* copy a pkt buffer chain into a buffer */ ++uint ++pktcopy(osl_t *osh, void *p, uint offset, int len, uchar *buf) ++{ ++ uint n, ret = 0; ++ ++ if (len < 0) { ++ len = 4096; /* "infinite" */ ++ } ++ ++ /* skip 'offset' bytes */ ++ for (; p && offset; p = PKTNEXT(osh, p)) { ++ if (offset < (uint)PKTLEN(osh, p)) { ++ break; ++ } ++ offset -= PKTLEN(osh, p); ++ } ++ ++ if (!p) { ++ return 0; ++ } ++ ++ /* copy the data */ ++ for (; p && len; p = PKTNEXT(osh, p)) { ++ n = MIN((uint)PKTLEN(osh, p) - offset, (uint)len); ++ bcopy(PKTDATA(osh, p) + offset, buf, n); ++ buf += n; ++ len -= n; ++ ret += n; ++ offset = 0; ++ } ++ ++ return ret; ++} ++ ++/* copy a buffer into a pkt buffer chain */ ++uint ++pktfrombuf(osl_t *osh, void *p, uint offset, int len, uchar *buf) ++{ ++ uint n, ret = 0; ++ ++ /* skip 'offset' bytes */ ++ for (; p && offset; p = PKTNEXT(osh, p)) { ++ if (offset < (uint)PKTLEN(osh, p)) { ++ break; ++ } ++ offset -= PKTLEN(osh, p); ++ } ++ ++ if (!p) { ++ return 0; ++ } ++ ++ /* copy the data */ ++ for (; p && len; p = PKTNEXT(osh, p)) { ++ n = MIN((uint)PKTLEN(osh, p) - offset, (uint)len); ++ bcopy(buf, PKTDATA(osh, p) + offset, n); ++ buf += n; ++ len -= n; ++ ret += n; ++ offset = 0; ++ } ++ ++ return ret; ++} ++ ++#ifdef NOTYET ++/* copy data from one pkt buffer (chain) to another */ ++uint ++pkt2pktcopy(osl_t *osh, void *p1, uint offs1, void *p2, uint offs2, int maxlen) ++{ ++ uint8 *dp1, *dp2; ++ uint len1, len2, copylen, totallen; ++ ++ for (; p1 && offs; p1 = PKTNEXT(osh, p1)) { ++ if (offs1 < (uint)PKTLEN(osh, p1)) { ++ break; ++ } ++ offs1 -= PKTLEN(osh, p1); ++ } ++ for (; p2 && offs; p2 = PKTNEXT(osh, p2)) { ++ if (offs2 < (uint)PKTLEN(osh, p2)) { ++ break; ++ } ++ offs2 -= PKTLEN(osh, p2); ++ } ++ ++ /* Heck w/it, only need the above for now */ ++} ++#endif /* NOTYET */ ++ ++ ++/* return total length of buffer chain */ ++uint BCMFASTPATH ++pkttotlen(osl_t *osh, void *p) ++{ ++ uint total; ++ int len; ++ ++ total = 0; ++ for (; p; p = PKTNEXT(osh, p)) { ++ len = PKTLEN(osh, p); ++#ifdef MACOSX ++ if (len < 0) { ++ /* Bad packet length, just drop and exit */ ++ printf("wl: pkttotlen bad (%p,%d)\n", p, len); ++ break; ++ } ++#endif /* MACOSX */ ++ total += len; ++ } ++ ++ return (total); ++} ++ ++/* return the last buffer of chained pkt */ ++void * ++pktlast(osl_t *osh, void *p) ++{ ++ for (; PKTNEXT(osh, p); p = PKTNEXT(osh, p)) { ++ ; ++ } ++ ++ return (p); ++} ++ ++/* count segments of a chained packet */ ++uint BCMFASTPATH ++pktsegcnt(osl_t *osh, void *p) ++{ ++ uint cnt; ++ ++ for (cnt = 0; p; p = PKTNEXT(osh, p)) { ++ cnt++; ++ } ++ ++ return cnt; ++} ++ ++ ++/* count segments of a chained packet */ ++uint BCMFASTPATH ++pktsegcnt_war(osl_t *osh, void *p) ++{ ++ uint cnt; ++ uint8 *pktdata; ++ uint len, remain, align64; ++ ++ for (cnt = 0; p; p = PKTNEXT(osh, p)) { ++ cnt++; ++ len = PKTLEN(osh, p); ++ if (len > 128) { ++ pktdata = (uint8 *)PKTDATA(osh, p); /* starting address of data */ ++ /* Check for page boundary straddle (2048B) */ ++ if (((uintptr)pktdata & ~0x7ff) != ((uintptr)(pktdata+len) & ~0x7ff)) { ++ cnt++; ++ } ++ ++ align64 = (uint)((uintptr)pktdata & 0x3f); /* aligned to 64B */ ++ align64 = (64 - align64) & 0x3f; ++ len -= align64; /* bytes from aligned 64B to end */ ++ /* if aligned to 128B, check for MOD 128 between 1 to 4B */ ++ remain = len % 128; ++ if (remain > 0 && remain <= 4) { ++ cnt++; /* add extra seg */ ++ } ++ } ++ } ++ ++ return cnt; ++} ++ ++uint8 * BCMFASTPATH ++pktdataoffset(osl_t *osh, void *p, uint offset) ++{ ++ uint total = pkttotlen(osh, p); ++ uint pkt_off = 0, len = 0; ++ uint8 *pdata = (uint8 *) PKTDATA(osh, p); ++ ++ if (offset > total) { ++ return NULL; ++ } ++ ++ for (; p; p = PKTNEXT(osh, p)) { ++ pdata = (uint8 *) PKTDATA(osh, p); ++ pkt_off = offset - len; ++ len += PKTLEN(osh, p); ++ if (len > offset) { ++ break; ++ } ++ } ++ return (uint8*) (pdata+pkt_off); ++} ++ ++ ++/* given a offset in pdata, find the pkt seg hdr */ ++void * ++pktoffset(osl_t *osh, void *p, uint offset) ++{ ++ uint total = pkttotlen(osh, p); ++ uint len = 0; ++ ++ if (offset > total) { ++ return NULL; ++ } ++ ++ for (; p; p = PKTNEXT(osh, p)) { ++ len += PKTLEN(osh, p); ++ if (len > offset) { ++ break; ++ } ++ } ++ return p; ++} ++ ++/* ++ * osl multiple-precedence packet queue ++ * hi_prec is always >= the number of the highest non-empty precedence ++ */ ++void * BCMFASTPATH ++pktq_penq(struct pktq *pq, int prec, void *p) ++{ ++ struct pktq_prec *q; ++ ++ ASSERT(prec >= 0 && prec < pq->num_prec); ++ ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */ ++ ++ ASSERT(!pktq_full(pq)); ++ ASSERT(!pktq_pfull(pq, prec)); ++ ++ q = &pq->q[prec]; ++ ++ if (q->head) { ++ PKTSETLINK(q->tail, p); ++ } else { ++ q->head = p; ++ } ++ ++ q->tail = p; ++ q->len++; ++ ++ pq->len++; ++ ++ if (pq->hi_prec < prec) { ++ pq->hi_prec = (uint8)prec; ++ } ++ ++ return p; ++} ++ ++void * BCMFASTPATH ++pktq_penq_head(struct pktq *pq, int prec, void *p) ++{ ++ struct pktq_prec *q; ++ ++ ASSERT(prec >= 0 && prec < pq->num_prec); ++ ASSERT(PKTLINK(p) == NULL); /* queueing chains not allowed */ ++ ++ ASSERT(!pktq_full(pq)); ++ ASSERT(!pktq_pfull(pq, prec)); ++ ++ q = &pq->q[prec]; ++ ++ if (q->head == NULL) { ++ q->tail = p; ++ } ++ ++ PKTSETLINK(p, q->head); ++ q->head = p; ++ q->len++; ++ ++ pq->len++; ++ ++ if (pq->hi_prec < prec) { ++ pq->hi_prec = (uint8)prec; ++ } ++ ++ return p; ++} ++ ++void * BCMFASTPATH ++pktq_pdeq(struct pktq *pq, int prec) ++{ ++ struct pktq_prec *q; ++ void *p; ++ ++ ASSERT(prec >= 0 && prec < pq->num_prec); ++ ++ q = &pq->q[prec]; ++ ++ if ((p = q->head) == NULL) { ++ return NULL; ++ } ++ ++ if ((q->head = PKTLINK(p)) == NULL) { ++ q->tail = NULL; ++ } ++ ++ q->len--; ++ ++ pq->len--; ++ ++ PKTSETLINK(p, NULL); ++ ++ return p; ++} ++ ++void * BCMFASTPATH ++pktq_pdeq_prev(struct pktq *pq, int prec, void *prev_p) ++{ ++ struct pktq_prec *q; ++ void *p; ++ ++ ASSERT(prec >= 0 && prec < pq->num_prec); ++ ++ q = &pq->q[prec]; ++ ++ if (prev_p == NULL) { ++ return NULL; ++ } ++ ++ if ((p = PKTLINK(prev_p)) == NULL) { ++ return NULL; ++ } ++ ++ q->len--; ++ ++ pq->len--; ++ ++ PKTSETLINK(prev_p, PKTLINK(p)); ++ PKTSETLINK(p, NULL); ++ ++ return p; ++} ++ ++void * BCMFASTPATH ++pktq_pdeq_tail(struct pktq *pq, int prec) ++{ ++ struct pktq_prec *q; ++ void *p, *prev; ++ ++ ASSERT(prec >= 0 && prec < pq->num_prec); ++ ++ q = &pq->q[prec]; ++ ++ if ((p = q->head) == NULL) { ++ return NULL; ++ } ++ ++ for (prev = NULL; p != q->tail; p = PKTLINK(p)) { ++ prev = p; ++ } ++ ++ if (prev) { ++ PKTSETLINK(prev, NULL); ++ } else { ++ q->head = NULL; ++ } ++ ++ q->tail = prev; ++ q->len--; ++ ++ pq->len--; ++ ++ return p; ++} ++ ++void ++pktq_pflush(osl_t *osh, struct pktq *pq, int prec, bool dir, ifpkt_cb_t fn, int arg) ++{ ++ struct pktq_prec *q; ++ void *p, *prev = NULL; ++ ++ q = &pq->q[prec]; ++ p = q->head; ++ while (p) { ++ if (fn == NULL || (*fn)(p, arg)) { ++ bool head = (p == q->head); ++ if (head) { ++ q->head = PKTLINK(p); ++ } else { ++ PKTSETLINK(prev, PKTLINK(p)); ++ } ++ PKTSETLINK(p, NULL); ++ PKTFREE(osh, p, dir); ++ q->len--; ++ pq->len--; ++ p = (head ? q->head : PKTLINK(prev)); ++ } else { ++ prev = p; ++ p = PKTLINK(p); ++ } ++ } ++ ++ if (q->head == NULL) { ++ ASSERT(q->len == 0); ++ q->tail = NULL; ++ } ++} ++ ++bool BCMFASTPATH ++pktq_pdel(struct pktq *pq, void *pktbuf, int prec) ++{ ++ struct pktq_prec *q; ++ void *p; ++ ++ ASSERT(prec >= 0 && prec < pq->num_prec); ++ ++ if (!pktbuf) { ++ return FALSE; ++ } ++ ++ q = &pq->q[prec]; ++ ++ if (q->head == pktbuf) { ++ if ((q->head = PKTLINK(pktbuf)) == NULL) { ++ q->tail = NULL; ++ } ++ } else { ++ for (p = q->head; p && PKTLINK(p) != pktbuf; p = PKTLINK(p)) { ++ ; ++ } ++ if (p == NULL) { ++ return FALSE; ++ } ++ ++ PKTSETLINK(p, PKTLINK(pktbuf)); ++ if (q->tail == pktbuf) { ++ q->tail = p; ++ } ++ } ++ ++ q->len--; ++ pq->len--; ++ PKTSETLINK(pktbuf, NULL); ++ return TRUE; ++} ++ ++void ++pktq_init(struct pktq *pq, int num_prec, int max_len) ++{ ++ int prec; ++ ++ ASSERT(num_prec > 0 && num_prec <= PKTQ_MAX_PREC); ++ ++ /* pq is variable size; only zero out what's requested */ ++ bzero(pq, OFFSETOF(struct pktq, q) + (sizeof(struct pktq_prec) * num_prec)); ++ ++ pq->num_prec = (uint16)num_prec; ++ ++ pq->max = (uint16)max_len; ++ ++ for (prec = 0; prec < num_prec; prec++) { ++ pq->q[prec].max = pq->max; ++ } ++} ++ ++void ++pktq_set_max_plen(struct pktq *pq, int prec, int max_len) ++{ ++ ASSERT(prec >= 0 && prec < pq->num_prec); ++ ++ if (prec < pq->num_prec) { ++ pq->q[prec].max = (uint16)max_len; ++ } ++} ++ ++void * BCMFASTPATH ++pktq_deq(struct pktq *pq, int *prec_out) ++{ ++ struct pktq_prec *q; ++ void *p; ++ int prec; ++ ++ if (pq->len == 0) { ++ return NULL; ++ } ++ ++ while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL) { ++ pq->hi_prec--; ++ } ++ ++ q = &pq->q[prec]; ++ ++ if ((p = q->head) == NULL) { ++ return NULL; ++ } ++ ++ if ((q->head = PKTLINK(p)) == NULL) { ++ q->tail = NULL; ++ } ++ ++ q->len--; ++ ++ pq->len--; ++ ++ if (prec_out) { ++ *prec_out = prec; ++ } ++ ++ PKTSETLINK(p, NULL); ++ ++ return p; ++} ++ ++void * BCMFASTPATH ++pktq_deq_tail(struct pktq *pq, int *prec_out) ++{ ++ struct pktq_prec *q; ++ void *p, *prev; ++ int prec; ++ ++ if (pq->len == 0) { ++ return NULL; ++ } ++ ++ for (prec = 0; prec < pq->hi_prec; prec++) { ++ if (pq->q[prec].head) { ++ break; ++ } ++ } ++ ++ q = &pq->q[prec]; ++ ++ if ((p = q->head) == NULL) { ++ return NULL; ++ } ++ ++ for (prev = NULL; p != q->tail; p = PKTLINK(p)) { ++ prev = p; ++ } ++ ++ if (prev) { ++ PKTSETLINK(prev, NULL); ++ } else { ++ q->head = NULL; ++ } ++ ++ q->tail = prev; ++ q->len--; ++ ++ pq->len--; ++ ++ if (prec_out) { ++ *prec_out = prec; ++ } ++ ++ PKTSETLINK(p, NULL); ++ ++ return p; ++} ++ ++void * ++pktq_peek(struct pktq *pq, int *prec_out) ++{ ++ int prec; ++ ++ if (pq->len == 0) { ++ return NULL; ++ } ++ ++ while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL) { ++ pq->hi_prec--; ++ } ++ ++ if (prec_out) { ++ *prec_out = prec; ++ } ++ ++ return (pq->q[prec].head); ++} ++ ++void * ++pktq_peek_tail(struct pktq *pq, int *prec_out) ++{ ++ int prec; ++ ++ if (pq->len == 0) { ++ return NULL; ++ } ++ ++ for (prec = 0; prec < pq->hi_prec; prec++) { ++ if (pq->q[prec].head) { ++ break; ++ } ++ } ++ ++ if (prec_out) { ++ *prec_out = prec; ++ } ++ ++ return (pq->q[prec].tail); ++} ++ ++void ++pktq_flush(osl_t *osh, struct pktq *pq, bool dir, ifpkt_cb_t fn, int arg) ++{ ++ int prec; ++ ++ /* Optimize flush, if pktq len = 0, just return. ++ * pktq len of 0 means pktq's prec q's are all empty. ++ */ ++ if (pq->len == 0) { ++ return; ++ } ++ ++ for (prec = 0; prec < pq->num_prec; prec++) { ++ pktq_pflush(osh, pq, prec, dir, fn, arg); ++ } ++ if (fn == NULL) { ++ ASSERT(pq->len == 0); ++ } ++} ++ ++/* Return sum of lengths of a specific set of precedences */ ++int ++pktq_mlen(struct pktq *pq, uint prec_bmp) ++{ ++ int prec, len; ++ ++ len = 0; ++ ++ for (prec = 0; prec <= pq->hi_prec; prec++) { ++ if (prec_bmp & (1 << prec)) { ++ len += pq->q[prec].len; ++ } ++ } ++ ++ return len; ++} ++ ++/* Priority peek from a specific set of precedences */ ++void * BCMFASTPATH ++pktq_mpeek(struct pktq *pq, uint prec_bmp, int *prec_out) ++{ ++ struct pktq_prec *q; ++ void *p; ++ int prec; ++ ++ if (pq->len == 0) { ++ return NULL; ++ } ++ while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL) { ++ pq->hi_prec--; ++ } ++ ++ while ((prec_bmp & (1 << prec)) == 0 || pq->q[prec].head == NULL) { ++ if (prec-- == 0) { ++ return NULL; ++ } ++ } ++ ++ q = &pq->q[prec]; ++ ++ if ((p = q->head) == NULL) { ++ return NULL; ++ } ++ ++ if (prec_out) { ++ *prec_out = prec; ++ } ++ ++ return p; ++} ++/* Priority dequeue from a specific set of precedences */ ++void * BCMFASTPATH ++pktq_mdeq(struct pktq *pq, uint prec_bmp, int *prec_out) ++{ ++ struct pktq_prec *q; ++ void *p; ++ int prec; ++ ++ if (pq->len == 0) { ++ return NULL; ++ } ++ ++ while ((prec = pq->hi_prec) > 0 && pq->q[prec].head == NULL) { ++ pq->hi_prec--; ++ } ++ ++ while ((pq->q[prec].head == NULL) || ((prec_bmp & (1 << prec)) == 0)) { ++ if (prec-- == 0) { ++ return NULL; ++ } ++ } ++ ++ q = &pq->q[prec]; ++ ++ if ((p = q->head) == NULL) { ++ return NULL; ++ } ++ ++ if ((q->head = PKTLINK(p)) == NULL) { ++ q->tail = NULL; ++ } ++ ++ q->len--; ++ ++ if (prec_out) { ++ *prec_out = prec; ++ } ++ ++ pq->len--; ++ ++ PKTSETLINK(p, NULL); ++ ++ return p; ++} ++ ++#endif /* BCMDRIVER */ ++ ++#if defined(BCMROMBUILD) ++const unsigned char BCMROMDATA(bcm_ctype)[] = { ++#else ++const unsigned char bcm_ctype[] = { ++#endif ++ ++ _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 0-7 */ ++ _BCM_C, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C|_BCM_S, _BCM_C, ++ _BCM_C, /* 8-15 */ ++ _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 16-23 */ ++ _BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C,_BCM_C, /* 24-31 */ ++ _BCM_S|_BCM_SP,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 32-39 */ ++ _BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 40-47 */ ++ _BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D,_BCM_D, /* 48-55 */ ++ _BCM_D,_BCM_D,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 56-63 */ ++ _BCM_P, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, _BCM_U|_BCM_X, ++ _BCM_U|_BCM_X, _BCM_U, /* 64-71 */ ++ _BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U, /* 72-79 */ ++ _BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U,_BCM_U, /* 80-87 */ ++ _BCM_U,_BCM_U,_BCM_U,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_P, /* 88-95 */ ++ _BCM_P, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, _BCM_L|_BCM_X, ++ _BCM_L|_BCM_X, _BCM_L, /* 96-103 */ ++ _BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 104-111 */ ++ _BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L,_BCM_L, /* 112-119 */ ++ _BCM_L,_BCM_L,_BCM_L,_BCM_P,_BCM_P,_BCM_P,_BCM_P,_BCM_C, /* 120-127 */ ++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 128-143 */ ++ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, /* 144-159 */ ++ _BCM_S|_BCM_SP, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, ++ _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, /* 160-175 */ ++ _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, ++ _BCM_P, _BCM_P, _BCM_P, _BCM_P, _BCM_P, /* 176-191 */ ++ _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, ++ _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, /* 192-207 */ ++ _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_P, _BCM_U, _BCM_U, _BCM_U, ++ _BCM_U, _BCM_U, _BCM_U, _BCM_U, _BCM_L, /* 208-223 */ ++ _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, ++ _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, /* 224-239 */ ++ _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_P, _BCM_L, _BCM_L, _BCM_L, ++ _BCM_L, _BCM_L, _BCM_L, _BCM_L, _BCM_L /* 240-255 */ ++}; ++ ++ulong ++BCMROMFN(bcm_strtoul)(const char *cp, char **endp, uint base) ++{ ++ ulong result, last_result = 0, value; ++ bool minus; ++ ++ minus = FALSE; ++ ++ while (bcm_isspace(*cp)) { ++ cp++; ++ } ++ ++ if (cp[0] == '+') { ++ cp++; ++ } else if (cp[0] == '-') { ++ minus = TRUE; ++ cp++; ++ } ++ ++ if (base == 0) { ++ if (cp[0] == '0') { ++ if ((cp[1] == 'x') || (cp[1] == 'X')) { ++ base = 16; ++ cp = &cp[2]; ++ } else { ++ base = 8; ++ cp = &cp[1]; ++ } ++ } else { ++ base = 10; ++ } ++ } else if (base == 16 && (cp[0] == '0') && ((cp[1] == 'x') || (cp[1] == 'X'))) { ++ cp = &cp[2]; ++ } ++ ++ result = 0; ++ ++ while (bcm_isxdigit(*cp) && ++ (value = bcm_isdigit(*cp) ? *cp-'0' : bcm_toupper(*cp)-'A'+10) < base) { ++ result = result*base + value; ++ /* Detected overflow */ ++ if (result < last_result && !minus) { ++ return (ulong)-1; ++ } ++ last_result = result; ++ cp++; ++ } ++ ++ if (minus) { ++ result = (ulong)(-(long)result); ++ } ++ ++ if (endp) { ++ *endp = DISCARD_QUAL(cp, char); ++ } ++ ++ return (result); ++} ++ ++int ++BCMROMFN(bcm_atoi)(const char *s) ++{ ++ return (int)bcm_strtoul(s, NULL, 10); ++} ++ ++/* return pointer to location of substring 'needle' in 'haystack' */ ++char * ++BCMROMFN(bcmstrstr)(const char *haystack, const char *needle) ++{ ++ int len, nlen; ++ int i; ++ ++ if ((haystack == NULL) || (needle == NULL)) { ++ return DISCARD_QUAL(haystack, char); ++ } ++ ++ nlen = strlen(needle); ++ len = strlen(haystack) - nlen + 1; ++ ++ for (i = 0; i < len; i++) { ++ if (memcmp(needle, &haystack[i], nlen) == 0) { ++ return DISCARD_QUAL(&haystack[i], char); ++ } ++ } ++ return (NULL); ++} ++ ++char * ++BCMROMFN(bcmstrcat)(char *dest, const char *src) ++{ ++ char *p; ++ ++ p = dest + strlen(dest); ++ ++ while ((*p++ = *src++) != '\0') { ++ ; ++ } ++ ++ return (dest); ++} ++ ++char * ++BCMROMFN(bcmstrncat)(char *dest, const char *src, uint size) ++{ ++ char *endp; ++ char *p; ++ ++ p = dest + strlen(dest); ++ endp = p + size; ++ ++ while (p != endp && (*p++ = *src++) != '\0') { ++ ; ++ } ++ ++ return (dest); ++} ++ ++ ++/**************************************************************************** ++* Function: bcmstrtok ++* ++* Purpose: ++* Tokenizes a string. This function is conceptually similiar to ANSI C strtok(), ++* but allows strToken() to be used by different strings or callers at the same ++* time. Each call modifies '*string' by substituting a NULL character for the ++* first delimiter that is encountered, and updates 'string' to point to the char ++* after the delimiter. Leading delimiters are skipped. ++* ++* Parameters: ++* string (mod) Ptr to string ptr, updated by token. ++* delimiters (in) Set of delimiter characters. ++* tokdelim (out) Character that delimits the returned token. (May ++* be set to NULL if token delimiter is not required). ++* ++* Returns: Pointer to the next token found. NULL when no more tokens are found. ++***************************************************************************** ++*/ ++char * ++bcmstrtok(char **string, const char *delimiters, char *tokdelim) ++{ ++ unsigned char *str; ++ unsigned long map[8]; ++ int count; ++ char *nextoken; ++ ++ if (tokdelim != NULL) { ++ /* Prime the token delimiter */ ++ *tokdelim = '\0'; ++ } ++ ++ /* Clear control map */ ++ for (count = 0; count < 8; count++) { ++ map[count] = 0; ++ } ++ ++ /* Set bits in delimiter table */ ++ do { ++ map[*delimiters >> 5] |= (1 << (*delimiters & 31)); ++ } ++ while (*delimiters++); ++ ++ str = (unsigned char*)*string; ++ ++ /* Find beginning of token (skip over leading delimiters). Note that ++ * there is no token iff this loop sets str to point to the terminal ++ * null (*str == '\0') ++ */ ++ while (((map[*str >> 5] & (1 << (*str & 31))) && *str) || (*str == ' ')) { ++ str++; ++ } ++ ++ nextoken = (char*)str; ++ ++ /* Find the end of the token. If it is not the end of the string, ++ * put a null there. ++ */ ++ for (; *str; str++) { ++ if (map[*str >> 5] & (1 << (*str & 31))) { ++ if (tokdelim != NULL) { ++ *tokdelim = *str; ++ } ++ ++ *str++ = '\0'; ++ break; ++ } ++ } ++ ++ *string = (char*)str; ++ ++ /* Determine if a token has been found. */ ++ if (nextoken == (char *) str) { ++ return NULL; ++ } ++ else { ++ return nextoken; ++ } ++} ++ ++ ++#define xToLower(C) \ ++ ((C >= 'A' && C <= 'Z') ? (char)((int)C - (int)'A' + (int)'a') : C) ++ ++ ++/**************************************************************************** ++* Function: bcmstricmp ++* ++* Purpose: Compare to strings case insensitively. ++* ++* Parameters: s1 (in) First string to compare. ++* s2 (in) Second string to compare. ++* ++* Returns: Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if ++* t1 > t2, when ignoring case sensitivity. ++***************************************************************************** ++*/ ++int ++bcmstricmp(const char *s1, const char *s2) ++{ ++ char dc, sc; ++ ++ while (*s2 && *s1) { ++ dc = xToLower(*s1); ++ sc = xToLower(*s2); ++ if (dc < sc) return -1; ++ if (dc > sc) return 1; ++ s1++; ++ s2++; ++ } ++ ++ if (*s1 && !*s2) return 1; ++ if (!*s1 && *s2) return -1; ++ return 0; ++} ++ ++ ++/**************************************************************************** ++* Function: bcmstrnicmp ++* ++* Purpose: Compare to strings case insensitively, upto a max of 'cnt' ++* characters. ++* ++* Parameters: s1 (in) First string to compare. ++* s2 (in) Second string to compare. ++* cnt (in) Max characters to compare. ++* ++* Returns: Return 0 if the two strings are equal, -1 if t1 < t2 and 1 if ++* t1 > t2, when ignoring case sensitivity. ++***************************************************************************** ++*/ ++int ++bcmstrnicmp(const char* s1, const char* s2, int cnt) ++{ ++ char dc, sc; ++ ++ while (*s2 && *s1 && cnt) { ++ dc = xToLower(*s1); ++ sc = xToLower(*s2); ++ if (dc < sc) return -1; ++ if (dc > sc) return 1; ++ s1++; ++ s2++; ++ cnt--; ++ } ++ ++ if (!cnt) return 0; ++ if (*s1 && !*s2) return 1; ++ if (!*s1 && *s2) return -1; ++ return 0; ++} ++ ++/* parse a xx:xx:xx:xx:xx:xx format ethernet address */ ++int ++BCMROMFN(bcm_ether_atoe)(const char *p, struct ether_addr *ea) ++{ ++ int i = 0; ++ char *ep; ++ ++ for (;;) { ++ ea->octet[i++] = (char) bcm_strtoul(p, &ep, 16); ++ p = ep; ++ if (!*p++ || i == 6) ++ break; ++ } ++ ++ return (i == 6); ++} ++ ++ ++#if defined(CONFIG_USBRNDIS_RETAIL) || defined(NDIS_MINIPORT_DRIVER) ++/* registry routine buffer preparation utility functions: ++ * parameter order is like strncpy, but returns count ++ * of bytes copied. Minimum bytes copied is null char(1)/wchar(2) ++ */ ++ulong ++wchar2ascii(char *abuf, ushort *wbuf, ushort wbuflen, ulong abuflen) ++{ ++ ulong copyct = 1; ++ ushort i; ++ ++ if (abuflen == 0) ++ return 0; ++ ++ /* wbuflen is in bytes */ ++ wbuflen /= sizeof(ushort); ++ ++ for (i = 0; i < wbuflen; ++i) { ++ if (--abuflen == 0) { ++ break; ++ } ++ *abuf++ = (char) *wbuf++; ++ ++copyct; ++ } ++ *abuf = '\0'; ++ ++ return copyct; ++} ++#endif /* CONFIG_USBRNDIS_RETAIL || NDIS_MINIPORT_DRIVER */ ++ ++char * ++bcm_ether_ntoa(const struct ether_addr *ea, char *buf) ++{ ++ static const char hex[] = ++ { ++ '0', '1', '2', '3', '4', '5', '6', '7', ++ '8', '9', 'a', 'b', 'c', 'd', 'e', 'f' ++ }; ++ const uint8 *octet = ea->octet; ++ char *p = buf; ++ int i; ++ ++ for (i = 0; i < 6; i++, octet++) { ++ *p++ = hex[(*octet >> 4) & 0xf]; ++ *p++ = hex[*octet & 0xf]; ++ *p++ = ':'; ++ } ++ ++ *(p-1) = '\0'; ++ ++ return (buf); ++} ++ ++char * ++bcm_ip_ntoa(struct ipv4_addr *ia, char *buf) ++{ ++ snprintf(buf, 16, "%d.%d.%d.%d", ++ ia->addr[0], ia->addr[1], ia->addr[2], ia->addr[3]); ++ return (buf); ++} ++ ++#ifdef BCMDRIVER ++ ++void ++bcm_mdelay(uint ms) ++{ ++ uint i; ++ ++ for (i = 0; i < ms; i++) { ++ OSL_DELAY(1000); ++ } ++} ++ ++/* ++ * Search the name=value vars for a specific one and return its value. ++ * Returns NULL if not found. ++ */ ++char * ++getvar(char *vars, const char *name) ++{ ++#ifdef _MINOSL_ ++ return NULL; ++#else ++ char *s; ++ int len; ++ ++ if (!name) { ++ return NULL; ++ } ++ ++ len = strlen(name); ++ if (len == 0) { ++ return NULL; ++ } ++ ++ /* first look in vars[] */ ++ for (s = vars; s && *s;) { ++ if ((bcmp(s, name, len) == 0) && (s[len] == '=') && (strlen(s)==len)) { ++ return (&s[len+1]); ++ } ++ ++ while (*s++) { ++ ; ++ } ++ } ++ ++ /* then query nvram */ ++ return (nvram_get(name)); ++#endif /* defined(_MINOSL_) */ ++} ++ ++/* ++ * Search the vars for a specific one and return its value as ++ * an integer. Returns 0 if not found. ++ */ ++int ++getintvar(char *vars, const char *name) ++{ ++#ifdef _MINOSL_ ++ return 0; ++#else ++ char *val; ++ ++ if ((val = getvar(vars, name)) == NULL) { ++ return (0); ++ } ++ ++ return (bcm_strtoul(val, NULL, 0)); ++#endif /* _MINOSL_ */ ++} ++ ++int ++getintvararray(char *vars, const char *name, int index) ++{ ++#ifdef _MINOSL_ ++ return 0; ++#else ++ char *buf, *endp; ++ int i = 0; ++ int val = 0; ++ ++ if ((buf = getvar(vars, name)) == NULL) { ++ return (0); ++ } ++ ++ /* table values are always separated by "," or " " */ ++ while (*buf != '\0') { ++ val = bcm_strtoul(buf, &endp, 0); ++ if (i == index) { ++ return val; ++ } ++ buf = endp; ++ /* delimiter is ',' */ ++ if (*buf == ',') { ++ buf++; ++ } ++ i++; ++ } ++ return 0; ++#endif /* _MINOSL_ */ ++} ++ ++int ++getintvararraysize(char *vars, const char *name) ++{ ++#ifdef _MINOSL_ ++ return 0; ++#else ++ char *buf, *endp; ++ int count = 0; ++ int val = 0; ++ ++ if ((buf = getvar(vars, name)) == NULL) { ++ return (0); ++ } ++ ++ /* table values are always separated by "," or " " */ ++ while (*buf != '\0') { ++ val = bcm_strtoul(buf, &endp, 0); ++ buf = endp; ++ /* delimiter is ',' */ ++ if (*buf == ',') { ++ buf++; ++ } ++ count++; ++ } ++ BCM_REFERENCE(val); ++ return count; ++#endif /* _MINOSL_ */ ++} ++ ++/* Search for token in comma separated token-string */ ++static int ++findmatch(const char *string, const char *name) ++{ ++ uint len; ++ char *c; ++ ++ len = strlen(name); ++ while ((c = strchr(string, ',')) != NULL) { ++ if (len == (uint)(c - string) && !strncmp(string, name, len)) { ++ return 1; ++ } ++ string = c + 1; ++ } ++ ++ return (!strcmp(string, name)); ++} ++ ++/* Return gpio pin number assigned to the named pin ++ * ++ * Variable should be in format: ++ * ++ * gpio=pin_name,pin_name ++ * ++ * This format allows multiple features to share the gpio with mutual ++ * understanding. ++ * ++ * 'def_pin' is returned if a specific gpio is not defined for the requested functionality ++ * and if def_pin is not used by others. ++ */ ++uint ++getgpiopin(char *vars, char *pin_name, uint def_pin) ++{ ++ char name[] = "gpioXXXX"; ++ char *val; ++ uint pin; ++ ++ /* Go thru all possibilities till a match in pin name */ ++ for (pin = 0; pin < GPIO_NUMPINS; pin ++) { ++ snprintf(name, sizeof(name), "gpio%d", pin); ++ val = getvar(vars, name); ++ if (val && findmatch(val, pin_name)) { ++ return pin; ++ } ++ } ++ ++ if (def_pin != GPIO_PIN_NOTDEFINED) { ++ /* make sure the default pin is not used by someone else */ ++ snprintf(name, sizeof(name), "gpio%d", def_pin); ++ if (getvar(vars, name)) { ++ def_pin = GPIO_PIN_NOTDEFINED; ++ } ++ } ++ return def_pin; ++} ++ ++ ++/* Return the WAN port number ++ * ++ * 0 is returned if no wanport is configured. ++ */ ++int ++getwanport(void) ++{ ++ char name[] = "wanport"; ++ int retval; ++ ++ retval = getintvar(NULL, name); ++ return retval; ++} ++ ++ ++/* Return the brcmtag variable ++ * ++ * 0 is returned if no wanport is configured. ++ */ ++int ++getbrcmtag(void) ++{ ++ char name[] = "brcmtag"; ++ int retval; ++ ++ retval = getintvar(NULL, name); ++ return retval; ++} ++ ++#if defined(BCMPERFSTATS) || defined(BCMTSTAMPEDLOGS) ++#define LOGSIZE 256 /* should be power of 2 to avoid div below */ ++static struct { ++ uint cycles; ++ char *fmt; ++ uint a1; ++ uint a2; ++} logtab[LOGSIZE]; ++ ++/* last entry logged */ ++static uint logi = 0; ++/* next entry to read */ ++static uint readi = 0; ++#endif /* defined(BCMPERFSTATS) || defined(BCMTSTAMPEDLOGS) */ ++ ++#ifdef BCMPERFSTATS ++void ++bcm_perf_enable() ++{ ++ BCMPERF_ENABLE_INSTRCOUNT(); ++ BCMPERF_ENABLE_ICACHE_MISS(); ++ BCMPERF_ENABLE_ICACHE_HIT(); ++} ++ ++/* WARNING: This routine uses OSL_GETCYCLES(), which can give unexpected results on ++ * modern speed stepping CPUs. Use bcmtslog() instead in combination with TSF counter. ++ */ ++void ++bcmlog(char *fmt, uint a1, uint a2) ++{ ++ static uint last = 0; ++ uint cycles, i; ++ OSL_GETCYCLES(cycles); ++ ++ i = logi; ++ ++ logtab[i].cycles = cycles - last; ++ logtab[i].fmt = fmt; ++ logtab[i].a1 = a1; ++ logtab[i].a2 = a2; ++ ++ logi = (i + 1) % LOGSIZE; ++ last = cycles; ++} ++ ++ ++void ++bcmstats(char *fmt) ++{ ++ static uint last = 0; ++ static uint32 ic_miss = 0; ++ static uint32 instr_count = 0; ++ uint32 ic_miss_cur; ++ uint32 instr_count_cur; ++ uint cycles, i; ++ ++ OSL_GETCYCLES(cycles); ++ BCMPERF_GETICACHE_MISS(ic_miss_cur); ++ BCMPERF_GETINSTRCOUNT(instr_count_cur); ++ ++ i = logi; ++ ++ logtab[i].cycles = cycles - last; ++ logtab[i].a1 = ic_miss_cur - ic_miss; ++ logtab[i].a2 = instr_count_cur - instr_count; ++ logtab[i].fmt = fmt; ++ ++ logi = (i + 1) % LOGSIZE; ++ ++ last = cycles; ++ instr_count = instr_count_cur; ++ ic_miss = ic_miss_cur; ++} ++ ++ ++void ++bcmdumplog(char *buf, int size) ++{ ++ char *limit; ++ int j = 0; ++ int num; ++ ++ limit = buf + size - 80; ++ *buf = '\0'; ++ ++ num = logi - readi; ++ ++ if (num < 0) { ++ num += LOGSIZE; ++ } ++ ++ /* print in chronological order */ ++ ++ for (j = 0; j < num && (buf < limit); readi = (readi + 1) % LOGSIZE, j++) { ++ if (logtab[readi].fmt == NULL) { ++ continue; ++ } ++ buf += snprintf(buf, (limit - buf), "%d\t", logtab[readi].cycles); ++ buf += snprintf(buf, (limit - buf), logtab[readi].fmt, logtab[readi].a1, ++ logtab[readi].a2); ++ buf += snprintf(buf, (limit - buf), "\n"); ++ } ++ ++} ++ ++ ++/* ++ * Dump one log entry at a time. ++ * Return index of next entry or -1 when no more . ++ */ ++int ++bcmdumplogent(char *buf, uint i) ++{ ++ bool hit; ++ ++ /* ++ * If buf is NULL, return the starting index, ++ * interpreting i as the indicator of last 'i' entries to dump. ++ */ ++ if (buf == NULL) { ++ i = ((i > 0) && (i < (LOGSIZE - 1))) ? i : (LOGSIZE - 1); ++ return ((logi - i) % LOGSIZE); ++ } ++ ++ *buf = '\0'; ++ ++ ASSERT(i < LOGSIZE); ++ ++ if (i == logi) { ++ return (-1); ++ } ++ ++ hit = FALSE; ++ for (; (i != logi) && !hit; i = (i + 1) % LOGSIZE) { ++ if (logtab[i].fmt == NULL) { ++ continue; ++ } ++ buf += sprintf(buf, "%d: %d\t", i, logtab[i].cycles); ++ buf += sprintf(buf, logtab[i].fmt, logtab[i].a1, logtab[i].a2); ++ buf += sprintf(buf, "\n"); ++ hit = TRUE; ++ } ++ ++ return (i); ++} ++ ++#endif /* BCMPERFSTATS */ ++ ++#if defined(BCMTSTAMPEDLOGS) ++/* Store a TSF timestamp and a log line in the log buffer */ ++void ++bcmtslog(uint32 tstamp, char *fmt, uint a1, uint a2) ++{ ++ uint i = logi; ++ bool use_delta = FALSE; ++ static uint32 last = 0; /* used only when use_delta is true */ ++ ++ logtab[i].cycles = tstamp; ++ if (use_delta) { ++ logtab[i].cycles -= last; ++ } ++ ++ logtab[i].fmt = fmt; ++ logtab[i].a1 = a1; ++ logtab[i].a2 = a2; ++ ++ if (use_delta) { ++ last = tstamp; ++ } ++ logi = (i + 1) % LOGSIZE; ++} ++ ++/* Print out a microsecond timestamp as "sec.ms.us " */ ++void ++bcmprinttstamp(uint32 ticks) ++{ ++ uint us, ms, sec; ++ ++ us = (ticks % TSF_TICKS_PER_MS) * 1000 / TSF_TICKS_PER_MS; ++ ms = ticks / TSF_TICKS_PER_MS; ++ sec = ms / 1000; ++ ms -= sec * 1000; ++ printf("%04u.%03u.%03u ", sec, ms, us); ++} ++ ++/* Print out the log buffer with timestamps */ ++void ++bcmprinttslogs(void) ++{ ++ int j = 0; ++ int num; ++ ++ num = logi - readi; ++ if (num < 0) { ++ num += LOGSIZE; ++ } ++ ++ /* Format and print the log entries directly in chronological order */ ++ for (j = 0; j < num; readi = (readi + 1) % LOGSIZE, j++) { ++ if (logtab[readi].fmt == NULL) { ++ continue; ++ } ++ bcmprinttstamp(logtab[readi].cycles); ++ printf(logtab[readi].fmt, logtab[readi].a1, logtab[readi].a2); ++ printf("\n"); ++ } ++} ++ ++void ++bcmdumptslog(char *buf, int size) ++{ ++ char *limit; ++ int j = 0; ++ int num; ++ uint us, ms, sec; ++ ++ limit = buf + size - 80; ++ *buf = '\0'; ++ ++ num = logi - readi; ++ ++ if (num < 0) { ++ num += LOGSIZE; ++ } ++ ++ /* print in chronological order */ ++ for (j = 0; j < num && (buf < limit); readi = (readi + 1) % LOGSIZE, j++) { ++ if (logtab[readi].fmt == NULL) { ++ continue; ++ } ++ us = (logtab[readi].cycles % TSF_TICKS_PER_MS) * 1000 / TSF_TICKS_PER_MS; ++ ms = logtab[readi].cycles / TSF_TICKS_PER_MS; ++ sec = ms / 1000; ++ ms -= sec * 1000; ++ ++ buf += snprintf(buf, (limit - buf), "%04u.%03u.%03u ", sec, ms, us); ++ /* buf += snprintf(buf, (limit - buf), "%d\t", logtab[readi].cycles); */ ++ buf += snprintf(buf, (limit - buf), logtab[readi].fmt, logtab[readi].a1, ++ logtab[readi].a2); ++ buf += snprintf(buf, (limit - buf), "\n"); ++ } ++} ++#endif /* BCMTSTAMPEDLOGS */ ++ ++#if defined(BCMDBG) || defined(DHD_DEBUG) ++/* pretty hex print a pkt buffer chain */ ++void ++prpkt(const char *msg, osl_t *osh, void *p0) ++{ ++ void *p; ++ ++ if (msg && (msg[0] != '\0')) { ++ printf("%s:\n", msg); ++ } ++ ++ for (p = p0; p; p = PKTNEXT(osh, p)) { ++ prhex(NULL, PKTDATA(osh, p), PKTLEN(osh, p)); ++ } ++} ++#endif /* BCMDBG || DHD_DEBUG */ ++ ++/* Takes an Ethernet frame and sets out-of-bound PKTPRIO. ++ * Also updates the inplace vlan tag if requested. ++ * For debugging, it returns an indication of what it did. ++ */ ++uint BCMFASTPATH ++pktsetprio(void *pkt, bool update_vtag) ++{ ++ struct ether_header *eh; ++ struct ethervlan_header *evh; ++ uint8 *pktdata; ++ int priority = 0; ++ int rc = 0; ++ ++ pktdata = (uint8 *)PKTDATA(NULL, pkt); ++ ASSERT(ISALIGNED((uintptr)pktdata, sizeof(uint16))); ++ ++ eh = (struct ether_header *) pktdata; ++ ++ if (eh->ether_type == hton16(ETHER_TYPE_8021Q)) { ++ uint16 vlan_tag; ++ int vlan_prio, dscp_prio = 0; ++ ++ evh = (struct ethervlan_header *)eh; ++ ++ vlan_tag = ntoh16(evh->vlan_tag); ++ vlan_prio = (int) (vlan_tag >> VLAN_PRI_SHIFT) & VLAN_PRI_MASK; ++ ++ if (evh->ether_type == hton16(ETHER_TYPE_IP)) { ++ uint8 *ip_body = pktdata + sizeof(struct ethervlan_header); ++ uint8 tos_tc = IP_TOS46(ip_body); ++ dscp_prio = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT); ++ } ++ ++ /* DSCP priority gets precedence over 802.1P (vlan tag) */ ++ if (dscp_prio != 0) { ++ priority = dscp_prio; ++ rc |= PKTPRIO_VDSCP; ++ } else { ++ priority = vlan_prio; ++ rc |= PKTPRIO_VLAN; ++ } ++ /* ++ * If the DSCP priority is not the same as the VLAN priority, ++ * then overwrite the priority field in the vlan tag, with the ++ * DSCP priority value. This is required for Linux APs because ++ * the VLAN driver on Linux, overwrites the skb->priority field ++ * with the priority value in the vlan tag ++ */ ++ if (update_vtag && (priority != vlan_prio)) { ++ vlan_tag &= ~(VLAN_PRI_MASK << VLAN_PRI_SHIFT); ++ vlan_tag |= (uint16)priority << VLAN_PRI_SHIFT; ++ evh->vlan_tag = hton16(vlan_tag); ++ rc |= PKTPRIO_UPD; ++ } ++ } else if (eh->ether_type == hton16(ETHER_TYPE_IP)) { ++ uint8 *ip_body = pktdata + sizeof(struct ether_header); ++ uint8 tos_tc = IP_TOS46(ip_body); ++ priority = (int)(tos_tc >> IPV4_TOS_PREC_SHIFT); ++ rc |= PKTPRIO_DSCP; ++ } ++ ++ ASSERT(priority >= 0 && priority <= MAXPRIO); ++ PKTSETPRIO(pkt, priority); ++ return (rc | priority); ++} ++ ++#ifndef BCM_BOOTLOADER ++ ++static char bcm_undeferrstr[32]; ++static const char *const bcmerrorstrtable[] = BCMERRSTRINGTABLE; ++ ++/* Convert the error codes into related error strings */ ++const char * ++bcmerrorstr(int bcmerror) ++{ ++ /* check if someone added a bcmerror code but forgot to add errorstring */ ++ ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(bcmerrorstrtable) - 1)); ++ ++ if (bcmerror > 0 || bcmerror < BCME_LAST) { ++ snprintf(bcm_undeferrstr, sizeof(bcm_undeferrstr), "Undefined error %d", bcmerror); ++ return bcm_undeferrstr; ++ } ++ ++ ASSERT(strlen(bcmerrorstrtable[-bcmerror]) < BCME_STRLEN); ++ ++ return bcmerrorstrtable[-bcmerror]; ++} ++ ++#endif /* !BCM_BOOTLOADER */ ++ ++#ifdef WLC_LOW ++static void ++BCMINITFN(bcm_nvram_refresh)(char *flash) ++{ ++ int i; ++ int ret = 0; ++ ++ ASSERT(flash != NULL); ++ ++ /* default "empty" vars cache */ ++ bzero(flash, 2); ++ ++ if ((ret = nvram_getall(flash, NVRAM_SPACE))) { ++ return; ++ } ++ ++ /* determine nvram length */ ++ for (i = 0; i < NVRAM_SPACE; i++) { ++ if (flash[i] == '\0' && flash[i+1] == '\0') { ++ break; ++ } ++ } ++ ++ if (i > 1) { ++ vars_len = i + 2; ++ } else { ++ vars_len = 0; ++ } ++} ++ ++char * ++bcm_nvram_vars(uint *length) ++{ ++ /* cache may be stale if nvram is read/write */ ++ if (nvram_vars) { ++ ASSERT(!bcmreclaimed); ++ bcm_nvram_refresh(nvram_vars); ++ } ++ if (length) { ++ *length = vars_len; ++ } ++ return nvram_vars; ++} ++ ++/* copy nvram vars into locally-allocated multi-string array */ ++int ++BCMINITFN(bcm_nvram_cache)(void *sih) ++{ ++ int ret = 0; ++ void *osh; ++ char *flash = NULL; ++ ++ if (vars_len >= 0) { ++ bcm_nvram_refresh(nvram_vars); ++ return 0; ++ } ++ ++ osh = si_osh((si_t *)sih); ++ ++ /* allocate memory and read in flash */ ++ if (!(flash = MALLOC(osh, NVRAM_SPACE))) { ++ ret = BCME_NOMEM; ++ goto exit; ++ } ++ ++ bcm_nvram_refresh(flash); ++ ++ /* cache must be full size of nvram if read/write */ ++ nvram_vars = flash; ++ ++exit: ++ return ret; ++} ++#endif /* WLC_LOW */ ++ ++ ++int32 ++exthdr_validate(char *ptr, uint size) ++{ ++ char *exthdr, *trx_offset; ++ uint hdrsz; ++ int trxof = 0; ++ ++ if ((exthdr = nvram_get("ext_imghdr"))) { ++ char s[] = "XXX"; ++ uint i, j; ++ ++ hdrsz = strlen(exthdr); ++ ++ if (hdrsz > size) { ++ printf("Exthdr_size(%d) > Image_size(%d)\n", hdrsz, size); ++ trxof = -1; ++ goto done; ++ } ++ ++ if (hdrsz == 0) { ++ goto match; ++ } ++ ++ for (i = 0, j = 0; i < (hdrsz >> 1); i++) { ++ sprintf(s, "%02x", (ptr[i] & 0xff)); ++ if ((exthdr[j++] != s[0]) || (exthdr[j++] != s[1])) { ++ printf("Header mismatch\n"); ++ goto done; ++ } ++ } ++ } ++ ++match: ++ if ((trx_offset = nvram_get("trx_offset"))) ++ trxof = bcm_strtoul(trx_offset, NULL, 0); ++ ++done: ++ return trxof; ++} ++ ++/* iovar table lookup */ ++const bcm_iovar_t* ++bcm_iovar_lookup(const bcm_iovar_t *table, const char *name) ++{ ++ const bcm_iovar_t *vi; ++ const char *lookup_name; ++ ++ /* skip any ':' delimited option prefixes */ ++ lookup_name = strrchr(name, ':'); ++ if (lookup_name != NULL) { ++ lookup_name++; ++ } else { ++ lookup_name = name; ++ } ++ ++ ASSERT(table != NULL); ++ ++ for (vi = table; vi->name; vi++) { ++ if (!strcmp(vi->name, lookup_name)) { ++ return vi; ++ } ++ } ++ /* ran to end of table */ ++ ++ return NULL; /* var name not found */ ++} ++ ++int ++bcm_iovar_lencheck(const bcm_iovar_t *vi, void *arg, int len, bool set) ++{ ++ int bcmerror = 0; ++ ++ /* length check on io buf */ ++ switch (vi->type) { ++ case IOVT_BOOL: ++ case IOVT_INT8: ++ case IOVT_INT16: ++ case IOVT_INT32: ++ case IOVT_UINT8: ++ case IOVT_UINT16: ++ case IOVT_UINT32: ++ /* all integers are int32 sized args at the ioctl interface */ ++ if (len < (int)sizeof(int)) { ++ bcmerror = BCME_BUFTOOSHORT; ++ } ++ break; ++ ++ case IOVT_BUFFER: ++ /* buffer must meet minimum length requirement */ ++ if (len < vi->minlen) { ++ bcmerror = BCME_BUFTOOSHORT; ++ } ++ break; ++ ++ case IOVT_VOID: ++ if (!set) { ++ /* Cannot return nil... */ ++ bcmerror = BCME_UNSUPPORTED; ++ } else if (len) { ++ /* Set is an action w/o parameters */ ++ bcmerror = BCME_BUFTOOLONG; ++ } ++ break; ++ ++ default: ++ /* unknown type for length check in iovar info */ ++ ASSERT(0); ++ bcmerror = BCME_UNSUPPORTED; ++ } ++ ++ return bcmerror; ++} ++#endif /* BCMDRIVER */ ++ ++ ++/******************************************************************************* ++ * crc8 ++ * ++ * Computes a crc8 over the input data using the polynomial: ++ * ++ * x^8 + x^7 +x^6 + x^4 + x^2 + 1 ++ * ++ * The caller provides the initial value (either CRC8_INIT_VALUE ++ * or the previous returned value) to allow for processing of ++ * discontiguous blocks of data. When generating the CRC the ++ * caller is responsible for complementing the final return value ++ * and inserting it into the byte stream. When checking, a final ++ * return value of CRC8_GOOD_VALUE indicates a valid CRC. ++ * ++ * Reference: Dallas Semiconductor Application Note 27 ++ * Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms", ++ * ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd., ++ * ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt ++ * ++ * **************************************************************************** ++ */ ++ ++static const uint8 crc8_table[256] = { ++ 0x00, 0xF7, 0xB9, 0x4E, 0x25, 0xD2, 0x9C, 0x6B, ++ 0x4A, 0xBD, 0xF3, 0x04, 0x6F, 0x98, 0xD6, 0x21, ++ 0x94, 0x63, 0x2D, 0xDA, 0xB1, 0x46, 0x08, 0xFF, ++ 0xDE, 0x29, 0x67, 0x90, 0xFB, 0x0C, 0x42, 0xB5, ++ 0x7F, 0x88, 0xC6, 0x31, 0x5A, 0xAD, 0xE3, 0x14, ++ 0x35, 0xC2, 0x8C, 0x7B, 0x10, 0xE7, 0xA9, 0x5E, ++ 0xEB, 0x1C, 0x52, 0xA5, 0xCE, 0x39, 0x77, 0x80, ++ 0xA1, 0x56, 0x18, 0xEF, 0x84, 0x73, 0x3D, 0xCA, ++ 0xFE, 0x09, 0x47, 0xB0, 0xDB, 0x2C, 0x62, 0x95, ++ 0xB4, 0x43, 0x0D, 0xFA, 0x91, 0x66, 0x28, 0xDF, ++ 0x6A, 0x9D, 0xD3, 0x24, 0x4F, 0xB8, 0xF6, 0x01, ++ 0x20, 0xD7, 0x99, 0x6E, 0x05, 0xF2, 0xBC, 0x4B, ++ 0x81, 0x76, 0x38, 0xCF, 0xA4, 0x53, 0x1D, 0xEA, ++ 0xCB, 0x3C, 0x72, 0x85, 0xEE, 0x19, 0x57, 0xA0, ++ 0x15, 0xE2, 0xAC, 0x5B, 0x30, 0xC7, 0x89, 0x7E, ++ 0x5F, 0xA8, 0xE6, 0x11, 0x7A, 0x8D, 0xC3, 0x34, ++ 0xAB, 0x5C, 0x12, 0xE5, 0x8E, 0x79, 0x37, 0xC0, ++ 0xE1, 0x16, 0x58, 0xAF, 0xC4, 0x33, 0x7D, 0x8A, ++ 0x3F, 0xC8, 0x86, 0x71, 0x1A, 0xED, 0xA3, 0x54, ++ 0x75, 0x82, 0xCC, 0x3B, 0x50, 0xA7, 0xE9, 0x1E, ++ 0xD4, 0x23, 0x6D, 0x9A, 0xF1, 0x06, 0x48, 0xBF, ++ 0x9E, 0x69, 0x27, 0xD0, 0xBB, 0x4C, 0x02, 0xF5, ++ 0x40, 0xB7, 0xF9, 0x0E, 0x65, 0x92, 0xDC, 0x2B, ++ 0x0A, 0xFD, 0xB3, 0x44, 0x2F, 0xD8, 0x96, 0x61, ++ 0x55, 0xA2, 0xEC, 0x1B, 0x70, 0x87, 0xC9, 0x3E, ++ 0x1F, 0xE8, 0xA6, 0x51, 0x3A, 0xCD, 0x83, 0x74, ++ 0xC1, 0x36, 0x78, 0x8F, 0xE4, 0x13, 0x5D, 0xAA, ++ 0x8B, 0x7C, 0x32, 0xC5, 0xAE, 0x59, 0x17, 0xE0, ++ 0x2A, 0xDD, 0x93, 0x64, 0x0F, 0xF8, 0xB6, 0x41, ++ 0x60, 0x97, 0xD9, 0x2E, 0x45, 0xB2, 0xFC, 0x0B, ++ 0xBE, 0x49, 0x07, 0xF0, 0x9B, 0x6C, 0x22, 0xD5, ++ 0xF4, 0x03, 0x4D, 0xBA, 0xD1, 0x26, 0x68, 0x9F ++}; ++ ++#define CRC_INNER_LOOP(n, c, x) \ ++ (c) = ((c) >> 8) ^ crc##n##_table[((c) ^ (x)) & 0xff] ++ ++uint8 ++BCMROMFN(hndcrc8)( ++ uint8 *pdata, /* pointer to array of data to process */ ++ uint nbytes, /* number of input data bytes to process */ ++ uint8 crc /* either CRC8_INIT_VALUE or previous return value */ ++) ++{ ++ /* hard code the crc loop instead of using CRC_INNER_LOOP macro ++ * to avoid the undefined and unnecessary (uint8 >> 8) operation. ++ */ ++ while (nbytes-- > 0) { ++ crc = crc8_table[(crc ^ *pdata++) & 0xff]; ++ } ++ ++ return crc; ++} ++ ++/******************************************************************************* ++ * crc16 ++ * ++ * Computes a crc16 over the input data using the polynomial: ++ * ++ * x^16 + x^12 +x^5 + 1 ++ * ++ * The caller provides the initial value (either CRC16_INIT_VALUE ++ * or the previous returned value) to allow for processing of ++ * discontiguous blocks of data. When generating the CRC the ++ * caller is responsible for complementing the final return value ++ * and inserting it into the byte stream. When checking, a final ++ * return value of CRC16_GOOD_VALUE indicates a valid CRC. ++ * ++ * Reference: Dallas Semiconductor Application Note 27 ++ * Williams, Ross N., "A Painless Guide to CRC Error Detection Algorithms", ++ * ver 3, Aug 1993, ross@guest.adelaide.edu.au, Rocksoft Pty Ltd., ++ * ftp://ftp.rocksoft.com/clients/rocksoft/papers/crc_v3.txt ++ * ++ * **************************************************************************** ++ */ ++ ++static const uint16 crc16_table[256] = { ++ 0x0000, 0x1189, 0x2312, 0x329B, 0x4624, 0x57AD, 0x6536, 0x74BF, ++ 0x8C48, 0x9DC1, 0xAF5A, 0xBED3, 0xCA6C, 0xDBE5, 0xE97E, 0xF8F7, ++ 0x1081, 0x0108, 0x3393, 0x221A, 0x56A5, 0x472C, 0x75B7, 0x643E, ++ 0x9CC9, 0x8D40, 0xBFDB, 0xAE52, 0xDAED, 0xCB64, 0xF9FF, 0xE876, ++ 0x2102, 0x308B, 0x0210, 0x1399, 0x6726, 0x76AF, 0x4434, 0x55BD, ++ 0xAD4A, 0xBCC3, 0x8E58, 0x9FD1, 0xEB6E, 0xFAE7, 0xC87C, 0xD9F5, ++ 0x3183, 0x200A, 0x1291, 0x0318, 0x77A7, 0x662E, 0x54B5, 0x453C, ++ 0xBDCB, 0xAC42, 0x9ED9, 0x8F50, 0xFBEF, 0xEA66, 0xD8FD, 0xC974, ++ 0x4204, 0x538D, 0x6116, 0x709F, 0x0420, 0x15A9, 0x2732, 0x36BB, ++ 0xCE4C, 0xDFC5, 0xED5E, 0xFCD7, 0x8868, 0x99E1, 0xAB7A, 0xBAF3, ++ 0x5285, 0x430C, 0x7197, 0x601E, 0x14A1, 0x0528, 0x37B3, 0x263A, ++ 0xDECD, 0xCF44, 0xFDDF, 0xEC56, 0x98E9, 0x8960, 0xBBFB, 0xAA72, ++ 0x6306, 0x728F, 0x4014, 0x519D, 0x2522, 0x34AB, 0x0630, 0x17B9, ++ 0xEF4E, 0xFEC7, 0xCC5C, 0xDDD5, 0xA96A, 0xB8E3, 0x8A78, 0x9BF1, ++ 0x7387, 0x620E, 0x5095, 0x411C, 0x35A3, 0x242A, 0x16B1, 0x0738, ++ 0xFFCF, 0xEE46, 0xDCDD, 0xCD54, 0xB9EB, 0xA862, 0x9AF9, 0x8B70, ++ 0x8408, 0x9581, 0xA71A, 0xB693, 0xC22C, 0xD3A5, 0xE13E, 0xF0B7, ++ 0x0840, 0x19C9, 0x2B52, 0x3ADB, 0x4E64, 0x5FED, 0x6D76, 0x7CFF, ++ 0x9489, 0x8500, 0xB79B, 0xA612, 0xD2AD, 0xC324, 0xF1BF, 0xE036, ++ 0x18C1, 0x0948, 0x3BD3, 0x2A5A, 0x5EE5, 0x4F6C, 0x7DF7, 0x6C7E, ++ 0xA50A, 0xB483, 0x8618, 0x9791, 0xE32E, 0xF2A7, 0xC03C, 0xD1B5, ++ 0x2942, 0x38CB, 0x0A50, 0x1BD9, 0x6F66, 0x7EEF, 0x4C74, 0x5DFD, ++ 0xB58B, 0xA402, 0x9699, 0x8710, 0xF3AF, 0xE226, 0xD0BD, 0xC134, ++ 0x39C3, 0x284A, 0x1AD1, 0x0B58, 0x7FE7, 0x6E6E, 0x5CF5, 0x4D7C, ++ 0xC60C, 0xD785, 0xE51E, 0xF497, 0x8028, 0x91A1, 0xA33A, 0xB2B3, ++ 0x4A44, 0x5BCD, 0x6956, 0x78DF, 0x0C60, 0x1DE9, 0x2F72, 0x3EFB, ++ 0xD68D, 0xC704, 0xF59F, 0xE416, 0x90A9, 0x8120, 0xB3BB, 0xA232, ++ 0x5AC5, 0x4B4C, 0x79D7, 0x685E, 0x1CE1, 0x0D68, 0x3FF3, 0x2E7A, ++ 0xE70E, 0xF687, 0xC41C, 0xD595, 0xA12A, 0xB0A3, 0x8238, 0x93B1, ++ 0x6B46, 0x7ACF, 0x4854, 0x59DD, 0x2D62, 0x3CEB, 0x0E70, 0x1FF9, ++ 0xF78F, 0xE606, 0xD49D, 0xC514, 0xB1AB, 0xA022, 0x92B9, 0x8330, ++ 0x7BC7, 0x6A4E, 0x58D5, 0x495C, 0x3DE3, 0x2C6A, 0x1EF1, 0x0F78 ++}; ++ ++uint16 ++BCMROMFN(hndcrc16)( ++ uint8 *pdata, /* pointer to array of data to process */ ++ uint nbytes, /* number of input data bytes to process */ ++ uint16 crc /* either CRC16_INIT_VALUE or previous return value */ ++) ++{ ++ while (nbytes-- > 0) { ++ CRC_INNER_LOOP(16, crc, *pdata++); ++ } ++ return crc; ++} ++ ++static const uint32 crc32_table[256] = { ++ 0x00000000, 0x77073096, 0xEE0E612C, 0x990951BA, ++ 0x076DC419, 0x706AF48F, 0xE963A535, 0x9E6495A3, ++ 0x0EDB8832, 0x79DCB8A4, 0xE0D5E91E, 0x97D2D988, ++ 0x09B64C2B, 0x7EB17CBD, 0xE7B82D07, 0x90BF1D91, ++ 0x1DB71064, 0x6AB020F2, 0xF3B97148, 0x84BE41DE, ++ 0x1ADAD47D, 0x6DDDE4EB, 0xF4D4B551, 0x83D385C7, ++ 0x136C9856, 0x646BA8C0, 0xFD62F97A, 0x8A65C9EC, ++ 0x14015C4F, 0x63066CD9, 0xFA0F3D63, 0x8D080DF5, ++ 0x3B6E20C8, 0x4C69105E, 0xD56041E4, 0xA2677172, ++ 0x3C03E4D1, 0x4B04D447, 0xD20D85FD, 0xA50AB56B, ++ 0x35B5A8FA, 0x42B2986C, 0xDBBBC9D6, 0xACBCF940, ++ 0x32D86CE3, 0x45DF5C75, 0xDCD60DCF, 0xABD13D59, ++ 0x26D930AC, 0x51DE003A, 0xC8D75180, 0xBFD06116, ++ 0x21B4F4B5, 0x56B3C423, 0xCFBA9599, 0xB8BDA50F, ++ 0x2802B89E, 0x5F058808, 0xC60CD9B2, 0xB10BE924, ++ 0x2F6F7C87, 0x58684C11, 0xC1611DAB, 0xB6662D3D, ++ 0x76DC4190, 0x01DB7106, 0x98D220BC, 0xEFD5102A, ++ 0x71B18589, 0x06B6B51F, 0x9FBFE4A5, 0xE8B8D433, ++ 0x7807C9A2, 0x0F00F934, 0x9609A88E, 0xE10E9818, ++ 0x7F6A0DBB, 0x086D3D2D, 0x91646C97, 0xE6635C01, ++ 0x6B6B51F4, 0x1C6C6162, 0x856530D8, 0xF262004E, ++ 0x6C0695ED, 0x1B01A57B, 0x8208F4C1, 0xF50FC457, ++ 0x65B0D9C6, 0x12B7E950, 0x8BBEB8EA, 0xFCB9887C, ++ 0x62DD1DDF, 0x15DA2D49, 0x8CD37CF3, 0xFBD44C65, ++ 0x4DB26158, 0x3AB551CE, 0xA3BC0074, 0xD4BB30E2, ++ 0x4ADFA541, 0x3DD895D7, 0xA4D1C46D, 0xD3D6F4FB, ++ 0x4369E96A, 0x346ED9FC, 0xAD678846, 0xDA60B8D0, ++ 0x44042D73, 0x33031DE5, 0xAA0A4C5F, 0xDD0D7CC9, ++ 0x5005713C, 0x270241AA, 0xBE0B1010, 0xC90C2086, ++ 0x5768B525, 0x206F85B3, 0xB966D409, 0xCE61E49F, ++ 0x5EDEF90E, 0x29D9C998, 0xB0D09822, 0xC7D7A8B4, ++ 0x59B33D17, 0x2EB40D81, 0xB7BD5C3B, 0xC0BA6CAD, ++ 0xEDB88320, 0x9ABFB3B6, 0x03B6E20C, 0x74B1D29A, ++ 0xEAD54739, 0x9DD277AF, 0x04DB2615, 0x73DC1683, ++ 0xE3630B12, 0x94643B84, 0x0D6D6A3E, 0x7A6A5AA8, ++ 0xE40ECF0B, 0x9309FF9D, 0x0A00AE27, 0x7D079EB1, ++ 0xF00F9344, 0x8708A3D2, 0x1E01F268, 0x6906C2FE, ++ 0xF762575D, 0x806567CB, 0x196C3671, 0x6E6B06E7, ++ 0xFED41B76, 0x89D32BE0, 0x10DA7A5A, 0x67DD4ACC, ++ 0xF9B9DF6F, 0x8EBEEFF9, 0x17B7BE43, 0x60B08ED5, ++ 0xD6D6A3E8, 0xA1D1937E, 0x38D8C2C4, 0x4FDFF252, ++ 0xD1BB67F1, 0xA6BC5767, 0x3FB506DD, 0x48B2364B, ++ 0xD80D2BDA, 0xAF0A1B4C, 0x36034AF6, 0x41047A60, ++ 0xDF60EFC3, 0xA867DF55, 0x316E8EEF, 0x4669BE79, ++ 0xCB61B38C, 0xBC66831A, 0x256FD2A0, 0x5268E236, ++ 0xCC0C7795, 0xBB0B4703, 0x220216B9, 0x5505262F, ++ 0xC5BA3BBE, 0xB2BD0B28, 0x2BB45A92, 0x5CB36A04, ++ 0xC2D7FFA7, 0xB5D0CF31, 0x2CD99E8B, 0x5BDEAE1D, ++ 0x9B64C2B0, 0xEC63F226, 0x756AA39C, 0x026D930A, ++ 0x9C0906A9, 0xEB0E363F, 0x72076785, 0x05005713, ++ 0x95BF4A82, 0xE2B87A14, 0x7BB12BAE, 0x0CB61B38, ++ 0x92D28E9B, 0xE5D5BE0D, 0x7CDCEFB7, 0x0BDBDF21, ++ 0x86D3D2D4, 0xF1D4E242, 0x68DDB3F8, 0x1FDA836E, ++ 0x81BE16CD, 0xF6B9265B, 0x6FB077E1, 0x18B74777, ++ 0x88085AE6, 0xFF0F6A70, 0x66063BCA, 0x11010B5C, ++ 0x8F659EFF, 0xF862AE69, 0x616BFFD3, 0x166CCF45, ++ 0xA00AE278, 0xD70DD2EE, 0x4E048354, 0x3903B3C2, ++ 0xA7672661, 0xD06016F7, 0x4969474D, 0x3E6E77DB, ++ 0xAED16A4A, 0xD9D65ADC, 0x40DF0B66, 0x37D83BF0, ++ 0xA9BCAE53, 0xDEBB9EC5, 0x47B2CF7F, 0x30B5FFE9, ++ 0xBDBDF21C, 0xCABAC28A, 0x53B39330, 0x24B4A3A6, ++ 0xBAD03605, 0xCDD70693, 0x54DE5729, 0x23D967BF, ++ 0xB3667A2E, 0xC4614AB8, 0x5D681B02, 0x2A6F2B94, ++ 0xB40BBE37, 0xC30C8EA1, 0x5A05DF1B, 0x2D02EF8D ++}; ++ ++/* ++ * crc input is CRC32_INIT_VALUE for a fresh start, or previous return value if ++ * accumulating over multiple pieces. ++ */ ++uint32 ++BCMROMFN(hndcrc32)(uint8 *pdata, uint nbytes, uint32 crc) ++{ ++ uint8 *pend; ++#ifdef __mips__ ++ uint8 tmp[4]; ++ ulong *tptr = (ulong *)tmp; ++ ++ if (nbytes > 3) { ++ /* in case the beginning of the buffer isn't aligned */ ++ pend = (uint8 *)((uint)(pdata + 3) & ~0x3); ++ nbytes -= (pend - pdata); ++ while (pdata < pend) { ++ CRC_INNER_LOOP(32, crc, *pdata++); ++ } ++ } ++ ++ if (nbytes > 3) { ++ /* handle bulk of data as 32-bit words */ ++ pend = pdata + (nbytes & ~0x3); ++ while (pdata < pend) { ++ *tptr = *(ulong *)pdata; ++ pdata += sizeof(ulong *); ++ CRC_INNER_LOOP(32, crc, tmp[0]); ++ CRC_INNER_LOOP(32, crc, tmp[1]); ++ CRC_INNER_LOOP(32, crc, tmp[2]); ++ CRC_INNER_LOOP(32, crc, tmp[3]); ++ } ++ } ++ ++ /* 1-3 bytes at end of buffer */ ++ pend = pdata + (nbytes & 0x03); ++ while (pdata < pend) { ++ CRC_INNER_LOOP(32, crc, *pdata++); ++ } ++#else ++ pend = pdata + nbytes; ++ while (pdata < pend) { ++ CRC_INNER_LOOP(32, crc, *pdata++); ++ } ++#endif /* __mips__ */ ++ ++ return crc; ++} ++ ++#ifdef notdef ++#define CLEN 1499 /* CRC Length */ ++#define CBUFSIZ (CLEN+4) ++#define CNBUFS 5 /* # of bufs */ ++ ++void ++testcrc32(void) ++{ ++ uint j, k, l; ++ uint8 *buf; ++ uint len[CNBUFS]; ++ uint32 crcr; ++ uint32 crc32tv[CNBUFS] = ++ {0xd2cb1faa, 0xd385c8fa, 0xf5b4f3f3, 0x55789e20, 0x00343110}; ++ ++ ASSERT((buf = MALLOC(CBUFSIZ*CNBUFS)) != NULL); ++ ++ /* step through all possible alignments */ ++ for (l = 0; l <= 4; l++) { ++ for (j = 0; j < CNBUFS; j++) { ++ len[j] = CLEN; ++ for (k = 0; k < len[j]; k++) { ++ *(buf + j*CBUFSIZ + (k+l)) = (j+k) & 0xff; ++ } ++ } ++ ++ for (j = 0; j < CNBUFS; j++) { ++ crcr = crc32(buf + j*CBUFSIZ + l, len[j], CRC32_INIT_VALUE); ++ ASSERT(crcr == crc32tv[j]); ++ } ++ } ++ ++ MFREE(buf, CBUFSIZ*CNBUFS); ++ return; ++} ++#endif /* notdef */ ++ ++/* ++ * Advance from the current 1-byte tag/1-byte length/variable-length value ++ * triple, to the next, returning a pointer to the next. ++ * If the current or next TLV is invalid (does not fit in given buffer length), ++ * NULL is returned. ++ * *buflen is not modified if the TLV elt parameter is invalid, or is decremented ++ * by the TLV parameter's length if it is valid. ++ */ ++bcm_tlv_t * ++BCMROMFN(bcm_next_tlv)(bcm_tlv_t *elt, int *buflen) ++{ ++ int len; ++ ++ /* validate current elt */ ++ if (!bcm_valid_tlv(elt, *buflen)) { ++ return NULL; ++ } ++ ++ /* advance to next elt */ ++ len = elt->len; ++ elt = (bcm_tlv_t*)(elt->data + len); ++ *buflen -= (TLV_HDR_LEN + len); ++ ++ /* validate next elt */ ++ if (!bcm_valid_tlv(elt, *buflen)) { ++ return NULL; ++ } ++ ++ return elt; ++} ++ ++/* ++ * Traverse a string of 1-byte tag/1-byte length/variable-length value ++ * triples, returning a pointer to the substring whose first element ++ * matches tag ++ */ ++bcm_tlv_t * ++BCMROMFN(bcm_parse_tlvs)(void *buf, int buflen, uint key) ++{ ++ bcm_tlv_t *elt; ++ int totlen; ++ ++ elt = (bcm_tlv_t*)buf; ++ totlen = buflen; ++ ++ /* find tagged parameter */ ++ while (totlen >= TLV_HDR_LEN) { ++ int len = elt->len; ++ ++ /* validate remaining totlen */ ++ if ((elt->id == key) && ++ (totlen >= (len + TLV_HDR_LEN))) { ++ return (elt); ++ } ++ ++ elt = (bcm_tlv_t*)((uint8*)elt + (len + TLV_HDR_LEN)); ++ totlen -= (len + TLV_HDR_LEN); ++ } ++ ++ return NULL; ++} ++ ++/* ++ * Traverse a string of 1-byte tag/1-byte length/variable-length value ++ * triples, returning a pointer to the substring whose first element ++ * matches tag. Stop parsing when we see an element whose ID is greater ++ * than the target key. ++ */ ++bcm_tlv_t * ++BCMROMFN(bcm_parse_ordered_tlvs)(void *buf, int buflen, uint key) ++{ ++ bcm_tlv_t *elt; ++ int totlen; ++ ++ elt = (bcm_tlv_t*)buf; ++ totlen = buflen; ++ ++ /* find tagged parameter */ ++ while (totlen >= TLV_HDR_LEN) { ++ uint id = elt->id; ++ int len = elt->len; ++ ++ /* Punt if we start seeing IDs > than target key */ ++ if (id > key) { ++ return (NULL); ++ } ++ ++ /* validate remaining totlen */ ++ if ((id == key) && ++ (totlen >= (len + TLV_HDR_LEN))) { ++ return (elt); ++ } ++ ++ elt = (bcm_tlv_t*)((uint8*)elt + (len + TLV_HDR_LEN)); ++ totlen -= (len + TLV_HDR_LEN); ++ } ++ return NULL; ++} ++ ++#if defined(BCMDBG) || defined(BCMDBG_ERR) || defined(WLMSG_PRHDRS) || \ ++ defined(WLMSG_PRPKT) || defined(WLMSG_ASSOC) || defined(DHD_DEBUG) ++int ++bcm_format_flags(const bcm_bit_desc_t *bd, uint32 flags, char* buf, int len) ++{ ++ int i; ++ char* p = buf; ++ char hexstr[16]; ++ int slen = 0, nlen = 0; ++ uint32 bit; ++ const char* name; ++ ++ if (len < 2 || !buf) { ++ return 0; ++ } ++ ++ buf[0] = '\0'; ++ ++ for (i = 0; flags != 0; i++) { ++ bit = bd[i].bit; ++ name = bd[i].name; ++ if (bit == 0 && flags != 0) { ++ /* print any unnamed bits */ ++ snprintf(hexstr, 16, "0x%X", flags); ++ name = hexstr; ++ flags = 0; /* exit loop */ ++ } else if ((flags & bit) == 0) { ++ continue; ++ } ++ flags &= ~bit; ++ nlen = strlen(name); ++ slen += nlen; ++ /* count btwn flag space */ ++ if (flags != 0) { ++ slen += 1; ++ } ++ /* need NULL char as well */ ++ if (len <= slen) { ++ break; ++ } ++ /* copy NULL char but don't count it */ ++ strncpy(p, name, nlen + 1); ++ p += nlen; ++ /* copy btwn flag space and NULL char */ ++ if (flags != 0) { ++ p += snprintf(p, 2, " "); ++ } ++ } ++ ++ /* indicate the str was too short */ ++ if (flags != 0) { ++ if (len < 2) { ++ p -= 2 - len; /* overwrite last char */ ++ } ++ p += snprintf(p, 2, ">"); ++ } ++ ++ return (int)(p - buf); ++} ++ ++/* print bytes formatted as hex to a string. return the resulting string length */ ++int ++bcm_format_hex(char *str, const void *bytes, int len) ++{ ++ int i; ++ char *p = str; ++ const uint8 *src = (const uint8*)bytes; ++ ++ for (i = 0; i < len; i++) { ++ p += snprintf(p, 3, "%02X", *src); ++ src++; ++ } ++ return (int)(p - str); ++} ++#endif ++ ++/* pretty hex print a contiguous buffer */ ++void ++prhex(const char *msg, uchar *buf, uint nbytes) ++{ ++ char line[128], *p; ++ int len = sizeof(line); ++ int nchar; ++ uint i; ++ ++ if (msg && (msg[0] != '\0')) { ++ printf("%s:\n", msg); ++ } ++ ++ p = line; ++ for (i = 0; i < nbytes; i++) { ++ if (i % 16 == 0) { ++ nchar = snprintf(p, len, " %04d: ", i); /* line prefix */ ++ p += nchar; ++ len -= nchar; ++ } ++ if (len > 0) { ++ nchar = snprintf(p, len, "%02x ", buf[i]); ++ p += nchar; ++ len -= nchar; ++ } ++ ++ if (i % 16 == 15) { ++ printf("%s\n", line); /* flush line */ ++ p = line; ++ len = sizeof(line); ++ } ++ } ++ ++ /* flush last partial line */ ++ if (p != line) { ++ printf("%s\n", line); ++ } ++} ++ ++static const char *crypto_algo_names[] = { ++ "NONE", ++ "WEP1", ++ "TKIP", ++ "WEP128", ++ "AES_CCM", ++ "AES_OCB_MSDU", ++ "AES_OCB_MPDU", ++ "NALG" ++ "UNDEF", ++ "UNDEF", ++ "UNDEF", ++ "UNDEF" ++}; ++ ++const char * ++bcm_crypto_algo_name(uint algo) ++{ ++ return (algo < ARRAYSIZE(crypto_algo_names)) ? crypto_algo_names[algo] : "ERR"; ++} ++ ++#ifdef BCMDBG ++void ++deadbeef(void *p, size_t len) ++{ ++ static uint8 meat[] = { 0xde, 0xad, 0xbe, 0xef }; ++ ++ while (len-- > 0) { ++ *(uint8*)p = meat[((uintptr)p) & 3]; ++ p = (uint8*)p + 1; ++ } ++} ++#endif /* BCMDBG */ ++ ++char * ++bcm_chipname(uint chipid, char *buf, uint len) ++{ ++ const char *fmt; ++ ++ fmt = ((chipid > 0xa000) || (chipid < 0x4000)) ? "%d" : "%x"; ++ snprintf(buf, len, fmt, chipid); ++ return buf; ++} ++ ++/* Produce a human-readable string for boardrev */ ++char * ++bcm_brev_str(uint32 brev, char *buf) ++{ ++ if (brev < 0x100) { ++ snprintf(buf, 8, "%d.%d", (brev & 0xf0) >> 4, brev & 0xf); ++ } else { ++ snprintf(buf, 8, "%c%03x", ((brev & 0xf000) == 0x1000) ? 'P' : 'A', brev & 0xfff); ++ } ++ return (buf); ++} ++ ++#define BUFSIZE_TODUMP_ATONCE 512 /* Buffer size */ ++ ++/* dump large strings to console */ ++void ++printbig(char *buf) ++{ ++ uint len, max_len; ++ char c; ++ ++ len = strlen(buf); ++ ++ max_len = BUFSIZE_TODUMP_ATONCE; ++ ++ while (len > max_len) { ++ c = buf[max_len]; ++ buf[max_len] = '\0'; ++ printf("%s", buf); ++ buf[max_len] = c; ++ ++ buf += max_len; ++ len -= max_len; ++ } ++ /* print the remaining string */ ++ printf("%s\n", buf); ++ return; ++} ++ ++/* routine to dump fields in a fileddesc structure */ ++uint ++bcmdumpfields(bcmutl_rdreg_rtn read_rtn, void *arg0, uint arg1, struct fielddesc *fielddesc_array, ++ char *buf, uint32 bufsize) ++{ ++ uint filled_len; ++ int len; ++ struct fielddesc *cur_ptr; ++ ++ filled_len = 0; ++ cur_ptr = fielddesc_array; ++ ++ while (bufsize > 1) { ++ if (cur_ptr->nameandfmt == NULL) { ++ break; ++ } ++ len = snprintf(buf, bufsize, cur_ptr->nameandfmt, ++ read_rtn(arg0, arg1, cur_ptr->offset)); ++ /* check for snprintf overflow or error */ ++ if (len < 0 || (uint32)len >= bufsize) { ++ len = bufsize - 1; ++ } ++ buf += len; ++ bufsize -= len; ++ filled_len += len; ++ cur_ptr++; ++ } ++ return filled_len; ++} ++ ++uint ++bcm_mkiovar(char *name, char *data, uint datalen, char *buf, uint buflen) ++{ ++ uint len; ++ ++ len = strlen(name) + 1; ++ ++ if ((len + datalen) > buflen) { ++ return 0; ++ } ++ ++ strncpy(buf, name, buflen); ++ ++ /* append data onto the end of the name string */ ++ memcpy(&buf[len], data, datalen); ++ len += datalen; ++ ++ return len; ++} ++ ++/* Quarter dBm units to mW ++ * Table starts at QDBM_OFFSET, so the first entry is mW for qdBm=153 ++ * Table is offset so the last entry is largest mW value that fits in ++ * a uint16. ++ */ ++ ++#define QDBM_OFFSET 153 /* Offset for first entry */ ++#define QDBM_TABLE_LEN 40 /* Table size */ ++ ++/* Smallest mW value that will round up to the first table entry, QDBM_OFFSET. ++ * Value is ( mW(QDBM_OFFSET - 1) + mW(QDBM_OFFSET) ) / 2 ++ */ ++#define QDBM_TABLE_LOW_BOUND 6493 /* Low bound */ ++ ++/* Largest mW value that will round down to the last table entry, ++ * QDBM_OFFSET + QDBM_TABLE_LEN-1. ++ * Value is ( mW(QDBM_OFFSET + QDBM_TABLE_LEN - 1) + mW(QDBM_OFFSET + QDBM_TABLE_LEN) ) / 2. ++ */ ++#define QDBM_TABLE_HIGH_BOUND 64938 /* High bound */ ++ ++static const uint16 nqdBm_to_mW_map[QDBM_TABLE_LEN] = { ++/* qdBm: +0 +1 +2 +3 +4 +5 +6 +7 */ ++/* 153: */ 6683, 7079, 7499, 7943, 8414, 8913, 9441, 10000, ++/* 161: */ 10593, 11220, 11885, 12589, 13335, 14125, 14962, 15849, ++/* 169: */ 16788, 17783, 18836, 19953, 21135, 22387, 23714, 25119, ++/* 177: */ 26607, 28184, 29854, 31623, 33497, 35481, 37584, 39811, ++/* 185: */ 42170, 44668, 47315, 50119, 53088, 56234, 59566, 63096 ++}; ++ ++uint16 ++BCMROMFN(bcm_qdbm_to_mw)(uint8 qdbm) ++{ ++ uint factor = 1; ++ int idx = qdbm - QDBM_OFFSET; ++ ++ if (idx >= QDBM_TABLE_LEN) { ++ /* clamp to max uint16 mW value */ ++ return 0xFFFF; ++ } ++ ++ /* scale the qdBm index up to the range of the table 0-40 ++ * where an offset of 40 qdBm equals a factor of 10 mW. ++ */ ++ while (idx < 0) { ++ idx += 40; ++ factor *= 10; ++ } ++ ++ /* return the mW value scaled down to the correct factor of 10, ++ * adding in factor/2 to get proper rounding. ++ */ ++ return ((nqdBm_to_mW_map[idx] + factor/2) / factor); ++} ++ ++uint8 ++BCMROMFN(bcm_mw_to_qdbm)(uint16 mw) ++{ ++ uint8 qdbm; ++ int offset; ++ uint mw_uint = mw; ++ uint boundary; ++ ++ /* handle boundary case */ ++ if (mw_uint <= 1) { ++ return 0; ++ } ++ ++ offset = QDBM_OFFSET; ++ ++ /* move mw into the range of the table */ ++ while (mw_uint < QDBM_TABLE_LOW_BOUND) { ++ mw_uint *= 10; ++ offset -= 40; ++ } ++ ++ for (qdbm = 0; qdbm < QDBM_TABLE_LEN-1; qdbm++) { ++ boundary = nqdBm_to_mW_map[qdbm] + (nqdBm_to_mW_map[qdbm+1] - ++ nqdBm_to_mW_map[qdbm])/2; ++ if (mw_uint < boundary) break; ++ } ++ ++ qdbm += (uint8)offset; ++ ++ return (qdbm); ++} ++ ++ ++uint ++BCMROMFN(bcm_bitcount)(uint8 *bitmap, uint length) ++{ ++ uint bitcount = 0, i; ++ uint8 tmp; ++ for (i = 0; i < length; i++) { ++ tmp = bitmap[i]; ++ while (tmp) { ++ bitcount++; ++ tmp &= (tmp - 1); ++ } ++ } ++ return bitcount; ++} ++ ++#ifdef BCMDRIVER ++ ++/* Initialization of bcmstrbuf structure */ ++void ++bcm_binit(struct bcmstrbuf *b, char *buf, uint size) ++{ ++ b->origsize = b->size = size; ++ b->origbuf = b->buf = buf; ++} ++ ++/* Buffer sprintf wrapper to guard against buffer overflow */ ++int ++bcm_bprintf(struct bcmstrbuf *b, const char *fmt, ...) ++{ ++ va_list ap; ++ int r; ++ ++ va_start(ap, fmt); ++ ++ r = vsnprintf(b->buf, b->size, fmt, ap); ++ ++ /* Non Ansi C99 compliant returns -1, ++ * Ansi compliant return r >= b->size, ++ * bcmstdlib returns 0, handle all ++ */ ++ /* r == 0 is also the case when strlen(fmt) is zero. ++ * typically the case when "" is passed as argument. ++ */ ++ if ((r == -1) || (r >= (int)b->size)) { ++ b->size = 0; ++ } else { ++ b->size -= r; ++ b->buf += r; ++ } ++ ++ va_end(ap); ++ ++ return r; ++} ++ ++void ++bcm_bprhex(struct bcmstrbuf *b, const char *msg, bool newline, uint8 *buf, int len) ++{ ++ int i; ++ ++ if (msg != NULL && msg[0] != '\0') { ++ bcm_bprintf(b, "%s", msg); ++ } ++ for (i = 0; i < len; i ++) { ++ bcm_bprintf(b, "%02X", buf[i]); ++ } ++ if (newline) { ++ bcm_bprintf(b, "\n"); ++ } ++} ++ ++void ++bcm_inc_bytes(uchar *num, int num_bytes, uint8 amount) ++{ ++ int i; ++ ++ for (i = 0; i < num_bytes; i++) { ++ num[i] += amount; ++ if (num[i] >= amount) { ++ break; ++ } ++ amount = 1; ++ } ++} ++ ++int ++bcm_cmp_bytes(const uchar *arg1, const uchar *arg2, uint8 nbytes) ++{ ++ int i; ++ ++ for (i = nbytes - 1; i >= 0; i--) { ++ if (arg1[i] != arg2[i]) { ++ return (arg1[i] - arg2[i]); ++ } ++ } ++ return 0; ++} ++ ++void ++bcm_print_bytes(const char *name, const uchar *data, int len) ++{ ++ int i; ++ int per_line = 0; ++ ++ printf("%s: %d \n", name ? name : "", len); ++ for (i = 0; i < len; i++) { ++ printf("%02x ", *data++); ++ per_line++; ++ if (per_line == 16) { ++ per_line = 0; ++ printf("\n"); ++ } ++ } ++ printf("\n"); ++} ++#if defined(WLTINYDUMP) || defined(BCMDBG) || defined(WLMSG_INFORM) || \ ++ defined(WLMSG_ASSOC) || defined(WLMSG_PRPKT) || defined(WLMSG_WSEC) ++#define SSID_FMT_BUF_LEN ((4 * DOT11_MAX_SSID_LEN) + 1) ++ ++int ++bcm_format_ssid(char* buf, const uchar ssid[], uint ssid_len) ++{ ++ uint i, c; ++ char *p = buf; ++ char *endp = buf + SSID_FMT_BUF_LEN; ++ ++ if (ssid_len > DOT11_MAX_SSID_LEN) { ++ ssid_len = DOT11_MAX_SSID_LEN; ++ } ++ ++ for (i = 0; i < ssid_len; i++) { ++ c = (uint)ssid[i]; ++ if (c == '\\') { ++ *p++ = '\\'; ++ *p++ = '\\'; ++ } else if (bcm_isprint((uchar)c)) { ++ *p++ = (char)c; ++ } else { ++ p += snprintf(p, (endp - p), "\\x%02X", c); ++ } ++ } ++ *p = '\0'; ++ ASSERT(p < endp); ++ ++ return (int)(p - buf); ++} ++#endif /* WLTINYDUMP || BCMDBG || WLMSG_INFORM || WLMSG_ASSOC || WLMSG_PRPKT */ ++ ++#endif /* BCMDRIVER */ ++ ++/* ++ * ProcessVars:Takes a buffer of "=\n" lines read from a file and ending in a NUL. ++ * also accepts nvram files which are already in the format of =\0\=\0 ++ * Removes carriage returns, empty lines, comment lines, and converts newlines to NULs. ++ * Shortens buffer as needed and pads with NULs. End of buffer is marked by two NULs. ++*/ ++ ++unsigned int ++process_nvram_vars(char *varbuf, unsigned int len) ++{ ++ char *dp; ++ bool findNewline; ++ int column; ++ unsigned int buf_len, n; ++ unsigned int pad = 0; ++ ++ dp = varbuf; ++ ++ findNewline = FALSE; ++ column = 0; ++ ++ for (n = 0; n < len; n++) { ++ if (varbuf[n] == '\r') { ++ continue; ++ } ++ if (findNewline && varbuf[n] != '\n') { ++ continue; ++ } ++ findNewline = FALSE; ++ if (varbuf[n] == '#') { ++ findNewline = TRUE; ++ continue; ++ } ++ if (varbuf[n] == '\n') { ++ if (column == 0) { ++ continue; ++ } ++ *dp++ = 0; ++ column = 0; ++ continue; ++ } ++ *dp++ = varbuf[n]; ++ column++; ++ } ++ buf_len = (unsigned int)(dp - varbuf); ++ if (buf_len % 4) { ++ pad = 4 - buf_len % 4; ++ if (pad && (buf_len + pad <= len)) { ++ buf_len += pad; ++ } ++ } ++ ++ while (dp < varbuf + n) { ++ *dp++ = 0; ++ } ++ ++ return buf_len; ++} +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/shared/gh2_erom.c b/drivers/net/ethernet/broadcom/gmac/src/shared/gh2_erom.c +--- a/drivers/net/ethernet/broadcom/gmac/src/shared/gh2_erom.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/shared/gh2_erom.c 2017-11-09 17:53:44.032304000 +0800 +@@ -0,0 +1,18 @@ ++/* ++ * $Copyright Open Broadcom Corporation$ ++ * ++ * Broadcom Home Networking Division 10/100 Mbit/s Ethernet ++ * Greyhound2 sudo EROM ++ * ++ */ ++#include ++ ++uint32 gh2_erom[] = { ++ //#define CC_CORE_ID 0x800 /* chipcommon core */ ++ 0x4bf80001, 0x2a004201, 0x18000005, 0x181200c5, ++ //#define GMAC_CORE_ID 0x82d /* Gigabit MAC core */ ++ 0x4bf82d01, 0x04004211, 0x00000103, 0x18042005, 0x181100c5, ++ 0x4bf82d01, 0x04004211, 0x00000203, 0x1804a005, 0x181110c5, ++ 0x0000000f ++}; ++ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/shared/gh2_erom.h b/drivers/net/ethernet/broadcom/gmac/src/shared/gh2_erom.h +--- a/drivers/net/ethernet/broadcom/gmac/src/shared/gh2_erom.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/shared/gh2_erom.h 2017-11-09 17:53:44.033296000 +0800 +@@ -0,0 +1,14 @@ ++/* ++ * $Copyright Open Broadcom Corporation$ ++ * ++ * Broadcom Home Networking Division 10/100 Mbit/s Ethernet ++ * Greyhound2 sudo EROM ++ * ++ */ ++ ++#ifndef _gh2_erom_h_ ++#define _gh2_erom_h_ ++ ++extern uint32 gh2_erom[]; ++ ++#endif /* _gh2_erom_h_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/shared/gh_erom.c b/drivers/net/ethernet/broadcom/gmac/src/shared/gh_erom.c +--- a/drivers/net/ethernet/broadcom/gmac/src/shared/gh_erom.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/shared/gh_erom.c 2017-11-09 17:53:44.034292000 +0800 +@@ -0,0 +1,28 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Broadcom Home Networking Division 10/100 Mbit/s Ethernet ++ * Greyhound sudo EROM ++ * ++ */ ++#include ++ ++uint32 gh_erom[] = { ++ //#define CC_CORE_ID 0x800 /* chipcommon core */ ++ 0x4bf80001, 0x2a004201, 0x18000005, 0x181200c5, ++ //#define GMAC_CORE_ID 0x82d /* Gigabit MAC core */ ++ 0x4bf82d01, 0x04004211, 0x00000103, 0x18042005, 0x181100c5, ++ 0x0000000f ++}; +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/shared/gh_erom.h b/drivers/net/ethernet/broadcom/gmac/src/shared/gh_erom.h +--- a/drivers/net/ethernet/broadcom/gmac/src/shared/gh_erom.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/shared/gh_erom.h 2017-11-09 17:53:44.035289000 +0800 +@@ -0,0 +1,26 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Broadcom Home Networking Division 10/100 Mbit/s Ethernet ++ * Greyhound sudo EROM ++ * ++ */ ++ ++#ifndef _gh_erom_h_ ++#define _gh_erom_h_ ++ ++extern uint32 gh_erom[]; ++ ++#endif /* _gh_erom_h_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/shared/hnddma.c b/drivers/net/ethernet/broadcom/gmac/src/shared/hnddma.c +--- a/drivers/net/ethernet/broadcom/gmac/src/shared/hnddma.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/shared/hnddma.c 2017-11-09 17:53:44.045302000 +0800 +@@ -0,0 +1,3701 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Generic Broadcom Home Networking Division (HND) DMA module. ++ * This supports the following chips: BCM42xx, 44xx, 47xx . ++ * ++ * $Id: hnddma.c 328477 2012-04-19 10:57:54Z $ ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++#include ++ ++#ifdef CONFIG_BCM_IPROC_GMAC_PREFETCH ++#include ++#endif ++ ++#ifdef CONFIG_BCM_IPROC_GMAC_RWREG_OPT ++#ifdef R_REG ++#undef R_REG ++#define R_REG(osh, r) (\ ++ sizeof(*(r)) == sizeof(uint8) ? (*(volatile unsigned char __force *)(r)) : \ ++ sizeof(*(r)) == sizeof(uint16) ? (*(volatile unsigned short __force *)(r)) : \ ++ (*(volatile unsigned int __force *)(r)) \ ++) ++#endif /* R_REG */ ++ ++#ifdef W_REG ++#undef W_REG ++#define W_REG(osh, r, v) (\ ++ sizeof(*(r)) == sizeof(uint8) ? (*(volatile unsigned char __force *)(r) = (v)) : \ ++ sizeof(*(r)) == sizeof(uint16) ? (*(volatile unsigned short __force *)(r) = (v)) : \ ++ (*(volatile unsigned int __force *)(r) = (v)) \ ++) ++#endif /* W_REG */ ++#endif /* CONFIG_BCM_IPROC_GMAC_RWREG_OPT */ ++ ++/* debug/trace */ ++#ifdef BCMDBG ++#define DMA_ERROR(args) if (!(*di->msg_level & 1)); else printf args ++#define DMA_TRACE(args) if (!(*di->msg_level & 2)); else printf args ++#elif defined(BCMDBG_ERR) ++#define DMA_ERROR(args) if (!(*di->msg_level & 1)); else printf args ++#define DMA_TRACE(args) ++#else ++#define DMA_ERROR(args) ++#define DMA_TRACE(args) ++#endif /* BCMDBG */ ++ ++#define DMA_NONE(args) ++ ++ ++#define d32txregs dregs.d32_u.txregs_32 ++#define d32rxregs dregs.d32_u.rxregs_32 ++#define txd32 dregs.d32_u.txd_32 ++#define rxd32 dregs.d32_u.rxd_32 ++ ++#define d64txregs dregs.d64_u.txregs_64 ++#define d64rxregs dregs.d64_u.rxregs_64 ++#define txd64 dregs.d64_u.txd_64 ++#define rxd64 dregs.d64_u.rxd_64 ++ ++#define DBG(x...) printk(KERN_ERR x) ++ ++/* default dma message level (if input msg_level pointer is null in dma_attach()) */ ++#ifdef BCMDBG_ERR ++static uint dma_msg_level = 1; ++#else ++static uint dma_msg_level = 0; ++#endif /* BCMDBG_ERR */ ++ ++#define MAXNAMEL 8 /* 8 char names */ ++ ++#define DI_INFO(dmah) ((dma_info_t *)dmah) ++ ++/* dma engine software state */ ++typedef struct dma_info { ++ struct hnddma_pub hnddma; /* exported structure, don't use hnddma_t, ++ * which could be const ++ */ ++ uint *msg_level; /* message level pointer */ ++ char name[MAXNAMEL]; /* callers name for diag msgs */ ++ ++ void *osh; /* os handle */ ++ si_t *sih; /* sb handle */ ++ ++ bool dma64; /* this dma engine is operating in 64-bit mode */ ++ bool addrext; /* this dma engine supports DmaExtendedAddrChanges */ ++ ++ union { ++ struct { ++ dma32regs_t *txregs_32; /* 32-bit dma tx engine registers */ ++ dma32regs_t *rxregs_32; /* 32-bit dma rx engine registers */ ++ dma32dd_t *txd_32; /* pointer to dma32 tx descriptor ring */ ++ dma32dd_t *rxd_32; /* pointer to dma32 rx descriptor ring */ ++ } d32_u; ++ struct { ++ dma64regs_t *txregs_64; /* 64-bit dma tx engine registers */ ++ dma64regs_t *rxregs_64; /* 64-bit dma rx engine registers */ ++ dma64dd_t *txd_64; /* pointer to dma64 tx descriptor ring */ ++ dma64dd_t *rxd_64; /* pointer to dma64 rx descriptor ring */ ++ } d64_u; ++ } dregs; ++ ++ uint16 dmadesc_align; /* alignment requirement for dma descriptors */ ++ ++ uint16 ntxd; /* # tx descriptors tunable */ ++ uint16 txin; /* index of next descriptor to reclaim */ ++ uint16 txout; /* index of next descriptor to post */ ++ void **txp; /* pointer to parallel array of pointers to packets */ ++ osldma_t *tx_dmah; /* DMA TX descriptor ring handle */ ++ hnddma_seg_map_t *txp_dmah; /* DMA MAP meta-data handle */ ++ dmaaddr_t txdpa; /* Aligned physical address of descriptor ring */ ++ dmaaddr_t txdpaorig; /* Original physical address of descriptor ring */ ++ uint16 txdalign; /* #bytes added to alloc'd mem to align txd */ ++ uint32 txdalloc; /* #bytes allocated for the ring */ ++ uint32 xmtptrbase; /* When using unaligned descriptors, the ptr register ++ * is not just an index, it needs all 13 bits to be ++ * an offset from the addr register. ++ */ ++ ++ uint16 nrxd; /* # rx descriptors tunable */ ++ uint16 rxin; /* index of next descriptor to reclaim */ ++ uint16 rxout; /* index of next descriptor to post */ ++ void **rxp; /* pointer to parallel array of pointers to packets */ ++ osldma_t *rx_dmah; /* DMA RX descriptor ring handle */ ++ hnddma_seg_map_t *rxp_dmah; /* DMA MAP meta-data handle */ ++ dmaaddr_t rxdpa; /* Aligned physical address of descriptor ring */ ++ dmaaddr_t rxdpaorig; /* Original physical address of descriptor ring */ ++ uint16 rxdalign; /* #bytes added to alloc'd mem to align rxd */ ++ uint32 rxdalloc; /* #bytes allocated for the ring */ ++ uint32 rcvptrbase; /* Base for ptr reg when using unaligned descriptors */ ++ ++ /* tunables */ ++ uint16 rxbufsize; /* rx buffer size in bytes, ++ * not including the extra headroom ++ */ ++ uint rxextrahdrroom; /* extra rx headroom, reverseved to assist upper stack ++ * e.g. some rx pkt buffers will be bridged to tx side ++ * without byte copying. The extra headroom needs to be ++ * large enough to fit txheader needs. ++ * Some dongle driver may not need it. ++ */ ++ uint nrxpost; /* # rx buffers to keep posted */ ++ uint rxoffset; /* rxcontrol offset */ ++ uint ddoffsetlow; /* add to get dma address of descriptor ring, low 32 bits */ ++ uint ddoffsethigh; /* high 32 bits */ ++ uint dataoffsetlow; /* add to get dma address of data buffer, low 32 bits */ ++ uint dataoffsethigh; /* high 32 bits */ ++ bool aligndesc_4k; /* descriptor base need to be aligned or not */ ++ uint8 rxburstlen; /* burstlen field for rx (for cores supporting burstlen) */ ++ uint8 txburstlen; /* burstlen field for tx (for cores supporting burstlen) */ ++ uint8 txmultioutstdrd; /* tx multiple outstanding reads */ ++ uint8 txprefetchctl; /* prefetch control for tx */ ++ uint8 txprefetchthresh; /* prefetch threshold for tx */ ++ uint8 rxprefetchctl; /* prefetch control for rx */ ++ uint8 rxprefetchthresh; /* prefetch threshold for rx */ ++ pktpool_t *pktpool; /* pktpool */ ++ uint dma_avoidance_cnt; ++ ++ uint32 d64_xs0_cd_mask; /* tx current descriptor pointer mask */ ++ uint32 d64_xs1_ad_mask; /* tx active descriptor mask */ ++ uint32 d64_rs0_cd_mask; /* rx current descriptor pointer mask */ ++ uint16 rs0cd; /* cached value of rcvstatus0 currdescr */ ++ uint16 xs0cd; /* cached value of xmtstatus0 currdescr */ ++ uint16 xs0cd_snapshot; /* snapshot of xmtstatus0 currdescr */ ++ spinlock_t des_lock; ++} dma_info_t; ++ ++/* ++ * If BCMDMA32 is defined, hnddma will support both 32-bit and 64-bit DMA engines. ++ * Otherwise it will support only 64-bit. ++ * ++ * DMA32_ENAB indicates whether hnddma is compiled with support for 32-bit DMA engines. ++ * DMA64_ENAB indicates whether hnddma is compiled with support for 64-bit DMA engines. ++ * ++ * DMA64_MODE indicates whether the current DMA engine is running as 64-bit. ++ */ ++#ifdef BCMDMA32 ++#define DMA32_ENAB(di) 1 ++#define DMA64_ENAB(di) 1 ++#define DMA64_MODE(di) ((di)->dma64) ++#else /* !BCMDMA32 */ ++#define DMA32_ENAB(di) 0 ++#define DMA64_ENAB(di) 1 ++#define DMA64_MODE(di) 1 ++#endif /* !BCMDMA32 */ ++ ++/* DMA Scatter-gather list is supported. Note this is limited to TX direction only */ ++#ifdef BCMDMASGLISTOSL ++#define DMASGLIST_ENAB TRUE ++#else ++#define DMASGLIST_ENAB FALSE ++#endif /* BCMDMASGLISTOSL */ ++ ++/* descriptor bumping macros */ ++#define XXD(x, n) ((x) & ((n) - 1)) /* faster than %, but n must be power of 2 */ ++#define TXD(x) XXD((x), di->ntxd) ++#define RXD(x) XXD((x), di->nrxd) ++#define NEXTTXD(i) TXD((i) + 1) ++#define PREVTXD(i) TXD((i) - 1) ++#define NEXTRXD(i) RXD((i) + 1) ++#define PREVRXD(i) RXD((i) - 1) ++ ++#define NTXDACTIVE(h, t) TXD((t) - (h)) ++#define NRXDACTIVE(h, t) RXD((t) - (h)) ++ ++/* macros to convert between byte offsets and indexes */ ++#define B2I(bytes, type) ((uint16)((bytes) / sizeof(type))) ++#define I2B(index, type) ((index) * sizeof(type)) ++ ++#define PCI32ADDR_HIGH 0xc0000000 /* address[31:30] */ ++#define PCI32ADDR_HIGH_SHIFT 30 /* address[31:30] */ ++ ++#define PCI64ADDR_HIGH 0x80000000 /* address[63] */ ++#define PCI64ADDR_HIGH_SHIFT 31 /* address[63] */ ++ ++ ++#ifdef CONFIG_BCM_IPROC_GMAC_PREFETCH ++#define SKB_PREFETCH_LEN (128) ++#endif ++ ++/* Common prototypes */ ++static bool _dma_isaddrext(dma_info_t *di); ++static bool _dma_descriptor_align(dma_info_t *di); ++static bool _dma_alloc(dma_info_t *di, uint direction); ++static void _dma_detach(dma_info_t *di); ++static void _dma_ddtable_init(dma_info_t *di, uint direction, dmaaddr_t pa); ++static void _dma_rxinit(dma_info_t *di); ++static void *_dma_rx(dma_info_t *di); ++static bool _dma_rxfill(dma_info_t *di); ++static void _dma_rxreclaim(dma_info_t *di); ++static void _dma_rxenable(dma_info_t *di); ++static void *_dma_getnextrxp(dma_info_t *di, bool forceall); ++static void _dma_rx_param_get(dma_info_t *di, uint16 *rxoffset, uint16 *rxbufsize); ++ ++static void _dma_txblock(dma_info_t *di); ++static void _dma_txunblock(dma_info_t *di); ++static uint _dma_txactive(dma_info_t *di); ++static uint _dma_rxactive(dma_info_t *di); ++static uint _dma_activerxbuf(dma_info_t *di); ++static uint _dma_txpending(dma_info_t *di); ++static uint _dma_txcommitted(dma_info_t *di); ++ ++static void *_dma_peeknexttxp(dma_info_t *di); ++static int _dma_peekntxp(dma_info_t *di, int *len, void *txps[], txd_range_t range); ++static void *_dma_peeknextrxp(dma_info_t *di); ++static uintptr _dma_getvar(dma_info_t *di, const char *name); ++static void _dma_counterreset(dma_info_t *di); ++static void _dma_fifoloopbackenable(dma_info_t *di); ++static uint _dma_ctrlflags(dma_info_t *di, uint mask, uint flags); ++static uint8 dma_align_sizetobits(uint size); ++static void *dma_ringalloc(osl_t *osh, uint32 boundary, uint size, uint16 *alignbits, uint* alloced, ++ dmaaddr_t *descpa, osldma_t **dmah); ++static int _dma_pktpool_set(dma_info_t *di, pktpool_t *pool); ++static bool _dma_rxtx_error(dma_info_t *di, bool istx); ++static void _dma_burstlen_set(dma_info_t *di, uint8 rxburstlen, uint8 txburstlen); ++static uint _dma_avoidancecnt(dma_info_t *di); ++static void _dma_param_set(dma_info_t *di, uint16 paramid, uint16 paramval); ++static bool _dma_glom_enable(dma_info_t *di, uint32 val); ++ ++ ++/* Prototypes for 32-bit routines */ ++static bool dma32_alloc(dma_info_t *di, uint direction); ++static bool dma32_txreset(dma_info_t *di); ++static bool dma32_rxreset(dma_info_t *di); ++static bool dma32_txsuspendedidle(dma_info_t *di); ++static int dma32_txfast(dma_info_t *di, void *p0, bool commit); ++static void *dma32_getnexttxp(dma_info_t *di, txd_range_t range); ++static void *dma32_getnextrxp(dma_info_t *di, bool forceall); ++static void dma32_txrotate(dma_info_t *di); ++static bool dma32_rxidle(dma_info_t *di); ++static void dma32_txinit(dma_info_t *di); ++static bool dma32_txenabled(dma_info_t *di); ++static void dma32_txsuspend(dma_info_t *di); ++static void dma32_txresume(dma_info_t *di); ++static bool dma32_txsuspended(dma_info_t *di); ++#ifdef WL_MULTIQUEUE ++static void dma32_txflush(dma_info_t *di); ++static void dma32_txflush_clear(dma_info_t *di); ++#endif /* WL_MULTIQUEUE */ ++static void dma32_txreclaim(dma_info_t *di, txd_range_t range); ++static bool dma32_txstopped(dma_info_t *di); ++static bool dma32_rxstopped(dma_info_t *di); ++static bool dma32_rxenabled(dma_info_t *di); ++#if defined(BCMDBG) ++static void dma32_dumpring(dma_info_t *di, struct bcmstrbuf *b, dma32dd_t *ring, uint start, ++ uint end, uint max_num); ++static void dma32_dump(dma_info_t *di, struct bcmstrbuf *b, bool dumpring); ++static void dma32_dumptx(dma_info_t *di, struct bcmstrbuf *b, bool dumpring); ++static void dma32_dumprx(dma_info_t *di, struct bcmstrbuf *b, bool dumpring); ++#endif ++ ++static bool _dma32_addrext(osl_t *osh, dma32regs_t *dma32regs); ++ ++/* Prototypes for 64-bit routines */ ++static bool dma64_alloc(dma_info_t *di, uint direction); ++static bool dma64_txreset(dma_info_t *di); ++static bool dma64_rxreset(dma_info_t *di); ++static bool dma64_txsuspendedidle(dma_info_t *di); ++static int dma64_txfast(dma_info_t *di, void *p0, bool commit); ++static int dma64_txunframed(dma_info_t *di, void *p0, uint len, bool commit); ++static void *dma64_getpos(dma_info_t *di, bool direction); ++static void *dma64_getnexttxp(dma_info_t *di, txd_range_t range); ++static void *dma64_getnextrxp(dma_info_t *di, bool forceall); ++static void dma64_txrotate(dma_info_t *di); ++ ++static bool dma64_rxidle(dma_info_t *di); ++static void dma64_txinit(dma_info_t *di); ++static bool dma64_txenabled(dma_info_t *di); ++static void dma64_txsuspend(dma_info_t *di); ++static void dma64_txresume(dma_info_t *di); ++static bool dma64_txsuspended(dma_info_t *di); ++#ifdef WL_MULTIQUEUE ++static void dma64_txflush(dma_info_t *di); ++static void dma64_txflush_clear(dma_info_t *di); ++#endif /* WL_MULTIQUEUE */ ++static void dma64_txreclaim(dma_info_t *di, txd_range_t range); ++static bool dma64_txstopped(dma_info_t *di); ++static bool dma64_rxstopped(dma_info_t *di); ++static bool dma64_rxenabled(dma_info_t *di); ++static bool _dma64_addrext(osl_t *osh, dma64regs_t *dma64regs); ++static int dma64_rxunframed(dma_info_t *di, void *p0, uint len, bool commit); ++ ++STATIC INLINE uint32 parity32(uint32 data); ++ ++#if defined(BCMDBG) ++static void dma64_dumpring(dma_info_t *di, struct bcmstrbuf *b, dma64dd_t *ring, uint start, ++ uint end, uint max_num); ++static void dma64_dump(dma_info_t *di, struct bcmstrbuf *b, bool dumpring); ++static void dma64_dumptx(dma_info_t *di, struct bcmstrbuf *b, bool dumpring); ++static void dma64_dumprx(dma_info_t *di, struct bcmstrbuf *b, bool dumpring); ++#endif ++ ++ ++const di_fcn_t dma64proc = { ++ (di_detach_t)_dma_detach, ++ (di_txinit_t)dma64_txinit, ++ (di_txreset_t)dma64_txreset, ++ (di_txenabled_t)dma64_txenabled, ++ (di_txsuspend_t)dma64_txsuspend, ++ (di_txresume_t)dma64_txresume, ++ (di_txsuspended_t)dma64_txsuspended, ++ (di_txsuspendedidle_t)dma64_txsuspendedidle, ++#ifdef WL_MULTIQUEUE ++ (di_txflush_t)dma64_txflush, ++ (di_txflush_clear_t)dma64_txflush_clear, ++#endif /* WL_MULTIQUEUE */ ++ (di_txfast_t)dma64_txfast, ++ (di_txunframed_t)dma64_txunframed, ++ (di_getpos_t)dma64_getpos, ++ (di_txstopped_t)dma64_txstopped, ++ (di_txreclaim_t)dma64_txreclaim, ++ (di_getnexttxp_t)dma64_getnexttxp, ++ (di_peeknexttxp_t)_dma_peeknexttxp, ++ (di_peekntxp_t)_dma_peekntxp, ++ (di_txblock_t)_dma_txblock, ++ (di_txunblock_t)_dma_txunblock, ++ (di_txactive_t)_dma_txactive, ++ (di_txrotate_t)dma64_txrotate, ++ ++ (di_rxinit_t)_dma_rxinit, ++ (di_rxreset_t)dma64_rxreset, ++ (di_rxidle_t)dma64_rxidle, ++ (di_rxstopped_t)dma64_rxstopped, ++ (di_rxenable_t)_dma_rxenable, ++ (di_rxenabled_t)dma64_rxenabled, ++ (di_rx_t)_dma_rx, ++ (di_rxfill_t)_dma_rxfill, ++ (di_rxreclaim_t)_dma_rxreclaim, ++ (di_getnextrxp_t)_dma_getnextrxp, ++ (di_peeknextrxp_t)_dma_peeknextrxp, ++ (di_rxparam_get_t)_dma_rx_param_get, ++ ++ (di_fifoloopbackenable_t)_dma_fifoloopbackenable, ++ (di_getvar_t)_dma_getvar, ++ (di_counterreset_t)_dma_counterreset, ++ (di_ctrlflags_t)_dma_ctrlflags, ++ ++#if defined(BCMDBG) ++ (di_dump_t)dma64_dump, ++ (di_dumptx_t)dma64_dumptx, ++ (di_dumprx_t)dma64_dumprx, ++#else ++ NULL, ++ NULL, ++ NULL, ++#endif ++ (di_rxactive_t)_dma_rxactive, ++ (di_txpending_t)_dma_txpending, ++ (di_txcommitted_t)_dma_txcommitted, ++ (di_pktpool_set_t)_dma_pktpool_set, ++ (di_rxtxerror_t)_dma_rxtx_error, ++ (di_burstlen_set_t)_dma_burstlen_set, ++ (di_avoidancecnt_t)_dma_avoidancecnt, ++ (di_param_set_t)_dma_param_set, ++ (dma_glom_enable_t)_dma_glom_enable, ++ (di_rxunframed_t)dma64_rxunframed, ++ (dma_active_rxbuf_t)_dma_activerxbuf, ++ 40 ++}; ++ ++static const di_fcn_t dma32proc = { ++ (di_detach_t)_dma_detach, ++ (di_txinit_t)dma32_txinit, ++ (di_txreset_t)dma32_txreset, ++ (di_txenabled_t)dma32_txenabled, ++ (di_txsuspend_t)dma32_txsuspend, ++ (di_txresume_t)dma32_txresume, ++ (di_txsuspended_t)dma32_txsuspended, ++ (di_txsuspendedidle_t)dma32_txsuspendedidle, ++#ifdef WL_MULTIQUEUE ++ (di_txflush_t)dma32_txflush, ++ (di_txflush_clear_t)dma32_txflush_clear, ++#endif /* WL_MULTIQUEUE */ ++ (di_txfast_t)dma32_txfast, ++ NULL, ++ NULL, ++ (di_txstopped_t)dma32_txstopped, ++ (di_txreclaim_t)dma32_txreclaim, ++ (di_getnexttxp_t)dma32_getnexttxp, ++ (di_peeknexttxp_t)_dma_peeknexttxp, ++ (di_peekntxp_t)_dma_peekntxp, ++ (di_txblock_t)_dma_txblock, ++ (di_txunblock_t)_dma_txunblock, ++ (di_txactive_t)_dma_txactive, ++ (di_txrotate_t)dma32_txrotate, ++ ++ (di_rxinit_t)_dma_rxinit, ++ (di_rxreset_t)dma32_rxreset, ++ (di_rxidle_t)dma32_rxidle, ++ (di_rxstopped_t)dma32_rxstopped, ++ (di_rxenable_t)_dma_rxenable, ++ (di_rxenabled_t)dma32_rxenabled, ++ (di_rx_t)_dma_rx, ++ (di_rxfill_t)_dma_rxfill, ++ (di_rxreclaim_t)_dma_rxreclaim, ++ (di_getnextrxp_t)_dma_getnextrxp, ++ (di_peeknextrxp_t)_dma_peeknextrxp, ++ (di_rxparam_get_t)_dma_rx_param_get, ++ ++ (di_fifoloopbackenable_t)_dma_fifoloopbackenable, ++ (di_getvar_t)_dma_getvar, ++ (di_counterreset_t)_dma_counterreset, ++ (di_ctrlflags_t)_dma_ctrlflags, ++ ++#if defined(BCMDBG) ++ (di_dump_t)dma32_dump, ++ (di_dumptx_t)dma32_dumptx, ++ (di_dumprx_t)dma32_dumprx, ++#else ++ NULL, ++ NULL, ++ NULL, ++#endif ++ (di_rxactive_t)_dma_rxactive, ++ (di_txpending_t)_dma_txpending, ++ (di_txcommitted_t)_dma_txcommitted, ++ (di_pktpool_set_t)_dma_pktpool_set, ++ (di_rxtxerror_t)_dma_rxtx_error, ++ (di_burstlen_set_t)_dma_burstlen_set, ++ (di_avoidancecnt_t)_dma_avoidancecnt, ++ (di_param_set_t)_dma_param_set, ++ NULL, ++ NULL, ++ NULL, ++ 40 ++}; ++ ++EXPORT_SYMBOL(dma_attach); ++EXPORT_SYMBOL(dma64proc); ++ ++hnddma_t * ++dma_attach(osl_t *osh, const char *name, si_t *sih, ++ volatile void *dmaregstx, volatile void *dmaregsrx, ++ uint ntxd, uint nrxd, uint rxbufsize, int rxextheadroom, uint nrxpost, uint rxoffset, ++ uint *msg_level) ++{ ++ dma_info_t *di; ++ uint size; ++ uint32 mask; ++ ++ /* allocate private info structure */ ++ if ((di = MALLOC(osh, sizeof (dma_info_t))) == NULL) { ++#ifdef BCMDBG ++ DMA_ERROR(("%s: out of memory, malloced %d bytes\n", __FUNCTION__, MALLOCED(osh))); ++#endif ++ return (NULL); ++ } ++ ++ bzero(di, sizeof(dma_info_t)); ++ ++ di->msg_level = msg_level ? msg_level : &dma_msg_level; ++ spin_lock_init(&di->des_lock); ++ ++ /* old chips w/o sb is no longer supported */ ++ ASSERT(sih != NULL); ++ ++ if (DMA64_ENAB(di)) { ++ di->dma64 = ((si_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64); ++ } else { ++ di->dma64 = 0; ++ } ++ ++ /* check arguments */ ++ ASSERT(ISPOWEROF2(ntxd)); ++ ASSERT(ISPOWEROF2(nrxd)); ++ ++ if (nrxd == 0) { ++ ASSERT(dmaregsrx == NULL); ++ } ++ if (ntxd == 0) { ++ ASSERT(dmaregstx == NULL); ++ } ++ ++ /* init dma reg pointer */ ++ if (DMA64_ENAB(di) && DMA64_MODE(di)) { ++ di->d64txregs = (dma64regs_t *)dmaregstx; ++ di->d64rxregs = (dma64regs_t *)dmaregsrx; ++ di->hnddma.di_fn = (const di_fcn_t *)&dma64proc; ++ } else if (DMA32_ENAB(di)) { ++ ASSERT(ntxd <= D32MAXDD); ++ ASSERT(nrxd <= D32MAXDD); ++ di->d32txregs = (dma32regs_t *)dmaregstx; ++ di->d32rxregs = (dma32regs_t *)dmaregsrx; ++ di->hnddma.di_fn = (const di_fcn_t *)&dma32proc; ++ } else { ++ DMA_ERROR(("%s: driver doesn't support 32-bit DMA\n", __FUNCTION__)); ++ ASSERT(0); ++ goto fail; ++ } ++ ++ /* Default flags (which can be changed by the driver calling dma_ctrlflags ++ * before enable): For backwards compatibility both Rx Overflow Continue ++ * and Parity are DISABLED. ++ * supports it. ++ */ ++ di->hnddma.di_fn->ctrlflags(&di->hnddma, DMA_CTRL_ROC | DMA_CTRL_PEN, 0); ++ ++ DMA_TRACE(("%s: %s: %s osh %p flags 0x%x ntxd %d nrxd %d rxbufsize %d " ++ "rxextheadroom %d nrxpost %d rxoffset %d dmaregstx %p dmaregsrx %p\n", ++ name, __FUNCTION__, (DMA64_MODE(di) ? "DMA64" : "DMA32"), ++ osh, di->hnddma.dmactrlflags, ntxd, nrxd, ++ rxbufsize, rxextheadroom, nrxpost, rxoffset, dmaregstx, dmaregsrx)); ++ ++ /* make a private copy of our callers name */ ++ strncpy(di->name, name, MAXNAMEL); ++ di->name[MAXNAMEL-1] = '\0'; ++ ++ di->osh = osh; ++ di->sih = sih; ++ ++ /* save tunables */ ++ di->ntxd = (uint16)ntxd; ++ di->nrxd = (uint16)nrxd; ++ ++ /* the actual dma size doesn't include the extra headroom */ ++ di->rxextrahdrroom = (rxextheadroom == -1) ? BCMEXTRAHDROOM : rxextheadroom; ++ if (rxbufsize > BCMEXTRAHDROOM) { ++ di->rxbufsize = (uint16)(rxbufsize - di->rxextrahdrroom); ++ } else { ++ di->rxbufsize = (uint16)rxbufsize; ++ } ++ ++ di->nrxpost = (uint16)nrxpost; ++ di->rxoffset = (uint8)rxoffset; ++ ++ /* Get the default values (POR) of the burstlen. This can be overridden by the modules ++ * if this has to be different. Otherwise this value will be used to program the control ++ * register after the reset or during the init. ++ */ ++ if (dmaregsrx) { ++ if (DMA64_ENAB(di) && DMA64_MODE(di)) { ++ /* detect the dma descriptor address mask, ++ * should be 0x1fff before 4360B0, 0xffff start from 4360B0 ++ */ ++ W_REG(di->osh, &di->d64rxregs->addrlow, 0xffffffff); ++ mask = R_REG(di->osh, &di->d64rxregs->addrlow); ++ ++ if (mask & 0xfff) { ++ mask = R_REG(di->osh, &di->d64rxregs->ptr) | 0xf; ++ } else { ++ mask = 0x1fff; ++ } ++ ++ DMA_TRACE(("%s: dma_rx_mask: %08x\n", di->name, mask)); ++ di->d64_rs0_cd_mask = mask; ++ ++ if (mask == 0x1fff) { ++ ASSERT(nrxd <= D64MAXDD); ++ } else { ++ ASSERT(nrxd <= D64MAXDD_LARGE); ++ } ++ ++ di->rxburstlen = (R_REG(di->osh, ++ &di->d64rxregs->control) & D64_RC_BL_MASK) >> D64_RC_BL_SHIFT; ++ di->rxprefetchctl = (R_REG(di->osh, ++ &di->d64rxregs->control) & D64_RC_PC_MASK) >> D64_RC_PC_SHIFT; ++ di->rxprefetchthresh = (R_REG(di->osh, ++ &di->d64rxregs->control) & D64_RC_PT_MASK) >> D64_RC_PT_SHIFT; ++ } else if (DMA32_ENAB(di)) { ++ di->rxburstlen = (R_REG(di->osh, ++ &di->d32rxregs->control) & RC_BL_MASK) >> RC_BL_SHIFT; ++ di->rxprefetchctl = (R_REG(di->osh, ++ &di->d32rxregs->control) & RC_PC_MASK) >> RC_PC_SHIFT; ++ di->rxprefetchthresh = (R_REG(di->osh, ++ &di->d32rxregs->control) & RC_PT_MASK) >> RC_PT_SHIFT; ++ } ++ } ++ if (dmaregstx) { ++ if (DMA64_ENAB(di) && DMA64_MODE(di)) { ++ ++ /* detect the dma descriptor address mask, ++ * should be 0x1fff before 4360B0, 0xffff start from 4360B0 ++ */ ++ W_REG(di->osh, &di->d64txregs->addrlow, 0xffffffff); ++ mask = R_REG(di->osh, &di->d64txregs->addrlow); ++ ++ if (mask & 0xfff) { ++ mask = R_REG(di->osh, &di->d64txregs->ptr) | 0xf; ++ } else { ++ mask = 0x1fff; ++ } ++ ++ DMA_TRACE(("%s: dma_tx_mask: %08x\n", di->name, mask)); ++ di->d64_xs0_cd_mask = mask; ++ di->d64_xs1_ad_mask = mask; ++ ++ if (mask == 0x1fff) { ++ ASSERT(ntxd <= D64MAXDD); ++ } else { ++ ASSERT(ntxd <= D64MAXDD_LARGE); ++ } ++ ++ di->txburstlen = (R_REG(di->osh, ++ &di->d64txregs->control) & D64_XC_BL_MASK) >> D64_XC_BL_SHIFT; ++ di->txmultioutstdrd = (R_REG(di->osh, ++ &di->d64txregs->control) & D64_XC_MR_MASK) >> D64_XC_MR_SHIFT; ++ di->txprefetchctl = (R_REG(di->osh, ++ &di->d64txregs->control) & D64_XC_PC_MASK) >> D64_XC_PC_SHIFT; ++ di->txprefetchthresh = (R_REG(di->osh, ++ &di->d64txregs->control) & D64_XC_PT_MASK) >> D64_XC_PT_SHIFT; ++ } else if (DMA32_ENAB(di)) { ++ di->txburstlen = (R_REG(di->osh, ++ &di->d32txregs->control) & XC_BL_MASK) >> XC_BL_SHIFT; ++ di->txmultioutstdrd = (R_REG(di->osh, ++ &di->d32txregs->control) & XC_MR_MASK) >> XC_MR_SHIFT; ++ di->txprefetchctl = (R_REG(di->osh, ++ &di->d32txregs->control) & XC_PC_MASK) >> XC_PC_SHIFT; ++ di->txprefetchthresh = (R_REG(di->osh, ++ &di->d32txregs->control) & XC_PT_MASK) >> XC_PT_SHIFT; ++ } ++ } ++ ++ /* force burstlen to 3 */ ++ di->rxburstlen = 3; ++ di->txburstlen = 3; ++ /* ++ * figure out the DMA physical address offset for dd and data ++ * Other bus: use zero ++ */ ++ di->ddoffsetlow = 0; ++ di->dataoffsetlow = 0; ++ ++ /* set addr ext fields */ ++ di->addrext = _dma_isaddrext(di); ++ ++ /* does the descriptors need to be aligned and if yes, on 4K/8K or not */ ++ di->aligndesc_4k = _dma_descriptor_align(di); ++ if (di->aligndesc_4k) { ++ if (DMA64_MODE(di)) { ++ di->dmadesc_align = D64RINGALIGN_BITS; ++ if ((ntxd < D64MAXDD / 2) && (nrxd < D64MAXDD / 2)) { ++ /* for smaller dd table, HW relax the alignment requirement */ ++ di->dmadesc_align = D64RINGALIGN_BITS - 1; ++ } ++ } else { ++ di->dmadesc_align = D32RINGALIGN_BITS; ++ } ++ } else { ++ /* The start address of descriptor table should be algined to cache line size, ++ * or other structure may share a cache line with it, which can lead to memory ++ * overlapping due to cache write-back operation. In the case of MIPS 74k, the ++ * cache line size is 32 bytes. ++ */ ++#ifdef __mips__ ++ di->dmadesc_align = 5; /* 32 byte alignment */ ++#else ++ di->dmadesc_align = 4; /* 16 byte alignment */ ++#endif ++ } ++ ++ DMA_NONE(("DMA descriptor align_needed %d, align %d\n", ++ di->aligndesc_4k, di->dmadesc_align)); ++ ++ /* allocate tx packet pointer vector */ ++ if (ntxd) { ++ size = ntxd * sizeof(void *); ++ if ((di->txp = MALLOC(osh, size)) == NULL) { ++ DMA_ERROR(("%s: %s: out of tx memory, malloced %d bytes\n", ++ di->name, __FUNCTION__, MALLOCED(osh))); ++ goto fail; ++ } ++ bzero(di->txp, size); ++ } ++ ++ /* allocate rx packet pointer vector */ ++ if (nrxd) { ++ size = nrxd * sizeof(void *); ++ if ((di->rxp = MALLOC(osh, size)) == NULL) { ++ DMA_ERROR(("%s: %s: out of rx memory, malloced %d bytes\n", ++ di->name, __FUNCTION__, MALLOCED(osh))); ++ goto fail; ++ } ++ bzero(di->rxp, size); ++ } ++ ++ /* allocate transmit descriptor ring, only need ntxd descriptors but it must be aligned */ ++ if (ntxd) { ++ if (!_dma_alloc(di, DMA_TX)) { ++ goto fail; ++ } ++ } ++ ++ /* allocate receive descriptor ring, only need nrxd descriptors but it must be aligned */ ++ if (nrxd) { ++ if (!_dma_alloc(di, DMA_RX)) { ++ goto fail; ++ } ++ } ++ ++ if ((di->ddoffsetlow != 0) && !di->addrext) { ++ if (PHYSADDRLO(di->txdpa) > SI_PCI_DMA_SZ) { ++ DMA_ERROR(("%s: %s: txdpa 0x%x: addrext not supported\n", ++ di->name, __FUNCTION__, (uint32)PHYSADDRLO(di->txdpa))); ++ goto fail; ++ } ++ if (PHYSADDRLO(di->rxdpa) > SI_PCI_DMA_SZ) { ++ DMA_ERROR(("%s: %s: rxdpa 0x%x: addrext not supported\n", ++ di->name, __FUNCTION__, (uint32)PHYSADDRLO(di->rxdpa))); ++ goto fail; ++ } ++ } ++ ++ DMA_TRACE(("ddoffsetlow 0x%x ddoffsethigh 0x%x dataoffsetlow 0x%x dataoffsethigh " ++ "0x%x addrext %d\n", di->ddoffsetlow, di->ddoffsethigh, di->dataoffsetlow, ++ di->dataoffsethigh, di->addrext)); ++ ++ /* allocate DMA mapping vectors */ ++ if (DMASGLIST_ENAB) { ++ if (ntxd) { ++ size = ntxd * sizeof(hnddma_seg_map_t); ++ if ((di->txp_dmah = (hnddma_seg_map_t *)MALLOC(osh, size)) == NULL) { ++ goto fail; ++ } ++ bzero(di->txp_dmah, size); ++ } ++ ++ if (nrxd) { ++ size = nrxd * sizeof(hnddma_seg_map_t); ++ if ((di->rxp_dmah = (hnddma_seg_map_t *)MALLOC(osh, size)) == NULL) { ++ goto fail; ++ } ++ bzero(di->rxp_dmah, size); ++ } ++ } ++ ++ return ((hnddma_t *)di); ++ ++fail: ++ _dma_detach(di); ++ return (NULL); ++} ++ ++/* init the tx or rx descriptor */ ++static INLINE void ++dma32_dd_upd(dma_info_t *di, dma32dd_t *ddring, dmaaddr_t pa, uint outidx, uint32 *flags, ++ uint32 bufcount) ++{ ++ /* dma32 uses 32-bit control to fit both flags and bufcounter */ ++ *flags = *flags | (bufcount & CTRL_BC_MASK); ++ ++ if ((di->dataoffsetlow == 0) || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) { ++ W_SM(&ddring[outidx].addr, BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow)); ++ W_SM(&ddring[outidx].ctrl, BUS_SWAP32(*flags)); ++ } else { ++ /* address extension */ ++ uint32 ae; ++ ASSERT(di->addrext); ++ ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT; ++ PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH; ++ ++ *flags |= (ae << CTRL_AE_SHIFT); ++ W_SM(&ddring[outidx].addr, BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow)); ++ W_SM(&ddring[outidx].ctrl, BUS_SWAP32(*flags)); ++ } ++} ++ ++/* Check for odd number of 1's */ ++STATIC INLINE uint32 parity32(uint32 data) ++{ ++ data ^= data >> 16; ++ data ^= data >> 8; ++ data ^= data >> 4; ++ data ^= data >> 2; ++ data ^= data >> 1; ++ ++ return (data & 1); ++} ++ ++#define DMA64_DD_PARITY(dd) parity32((dd)->addrlow ^ (dd)->addrhigh ^ (dd)->ctrl1 ^ (dd)->ctrl2) ++ ++static INLINE void ++dma64_dd_upd(dma_info_t *di, dma64dd_t *ddring, dmaaddr_t pa, uint outidx, uint32 *flags, ++ uint32 bufcount) ++{ ++ uint32 ctrl2 = bufcount & D64_CTRL2_BC_MASK; ++ ++ /* PCI bus with big(>1G) physical address, use address extension */ ++#if defined(__mips__) && defined(IL_BIGENDIAN) ++ if ((di->dataoffsetlow == SI_SDRAM_SWAPPED) || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) { ++#else ++ if ((di->dataoffsetlow == 0) || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) { ++#endif /* defined(__mips__) && defined(IL_BIGENDIAN) */ ++ ASSERT((PHYSADDRHI(pa) & PCI64ADDR_HIGH) == 0); ++ ++ W_SM(&ddring[outidx].addrlow, BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow)); ++ W_SM(&ddring[outidx].addrhigh, BUS_SWAP32(PHYSADDRHI(pa) + di->dataoffsethigh)); ++ W_SM(&ddring[outidx].ctrl1, BUS_SWAP32(*flags)); ++ W_SM(&ddring[outidx].ctrl2, BUS_SWAP32(ctrl2)); ++ } else { ++ /* address extension for 32-bit PCI */ ++ uint32 ae; ++ ASSERT(di->addrext); ++ ++ ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT; ++ PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH; ++ ASSERT(PHYSADDRHI(pa) == 0); ++ ++ ctrl2 |= (ae << D64_CTRL2_AE_SHIFT) & D64_CTRL2_AE; ++ W_SM(&ddring[outidx].addrlow, BUS_SWAP32(PHYSADDRLO(pa) + di->dataoffsetlow)); ++ W_SM(&ddring[outidx].addrhigh, BUS_SWAP32(0 + di->dataoffsethigh)); ++ W_SM(&ddring[outidx].ctrl1, BUS_SWAP32(*flags)); ++ W_SM(&ddring[outidx].ctrl2, BUS_SWAP32(ctrl2)); ++ } ++ if (di->hnddma.dmactrlflags & DMA_CTRL_PEN) { ++ if (DMA64_DD_PARITY(&ddring[outidx])) { ++ W_SM(&ddring[outidx].ctrl2, BUS_SWAP32(ctrl2 | D64_CTRL2_PARITY)); ++ } ++ } ++ ++#ifndef CONFIG_BCM_IPROC_GMAC_ACP ++/* Test */ ++#if defined(__arm__) ++ OSL_CACHE_FLUSH((uint)OSL_CACHED(&ddring[outidx]), sizeof(dma64dd_t)); ++#endif ++#endif /* ! CONFIG_BCM_IPROC_GMAC_ACP */ ++} ++ ++static bool ++_dma32_addrext(osl_t *osh, dma32regs_t *dma32regs) ++{ ++ uint32 w; ++ ++ OR_REG(osh, &dma32regs->control, XC_AE); ++ w = R_REG(osh, &dma32regs->control); ++ AND_REG(osh, &dma32regs->control, ~XC_AE); ++ return ((w & XC_AE) == XC_AE); ++} ++ ++static bool ++_dma_alloc(dma_info_t *di, uint direction) ++{ ++ if (DMA64_ENAB(di) && DMA64_MODE(di)) { ++ return dma64_alloc(di, direction); ++ } else if (DMA32_ENAB(di)) { ++ return dma32_alloc(di, direction); ++ } else { ++ ASSERT(0); ++ } ++} ++ ++/* !! may be called with core in reset */ ++static void ++_dma_detach(dma_info_t *di) ++{ ++ ++ DMA_TRACE(("%s: dma_detach\n", di->name)); ++ ++ /* shouldn't be here if descriptors are unreclaimed */ ++ ASSERT(di->txin == di->txout); ++ ASSERT(di->rxin == di->rxout); ++ ++ /* free dma descriptor rings */ ++ if (DMA64_ENAB(di) && DMA64_MODE(di)) { ++ if (di->txd64) { ++ DMA_FREE_CONSISTENT(di->osh, ((int8 *)(uintptr)di->txd64 - di->txdalign), ++ di->txdalloc, (di->txdpaorig), &di->tx_dmah); ++ } ++ if (di->rxd64) { ++ DMA_FREE_CONSISTENT(di->osh, ((int8 *)(uintptr)di->rxd64 - di->rxdalign), ++ di->rxdalloc, (di->rxdpaorig), &di->rx_dmah); ++ } ++ } else if (DMA32_ENAB(di)) { ++ if (di->txd32) { ++ DMA_FREE_CONSISTENT(di->osh, ((int8 *)(uintptr)di->txd32 - di->txdalign), ++ di->txdalloc, (di->txdpaorig), &di->tx_dmah); ++ } ++ if (di->rxd32) { ++ DMA_FREE_CONSISTENT(di->osh, ((int8 *)(uintptr)di->rxd32 - di->rxdalign), ++ di->rxdalloc, (di->rxdpaorig), &di->rx_dmah); ++ } ++ } else ++ ASSERT(0); ++ ++ /* free packet pointer vectors */ ++ if (di->txp) { ++ MFREE(di->osh, (void *)di->txp, (di->ntxd * sizeof(void *))); ++ } ++ if (di->rxp) { ++ MFREE(di->osh, (void *)di->rxp, (di->nrxd * sizeof(void *))); ++ } ++ ++ /* free tx packet DMA handles */ ++ if (di->txp_dmah) { ++ MFREE(di->osh, (void *)di->txp_dmah, di->ntxd * sizeof(hnddma_seg_map_t)); ++ } ++ ++ /* free rx packet DMA handles */ ++ if (di->rxp_dmah) { ++ MFREE(di->osh, (void *)di->rxp_dmah, di->nrxd * sizeof(hnddma_seg_map_t)); ++ } ++ ++ /* free our private info structure */ ++ MFREE(di->osh, (void *)di, sizeof(dma_info_t)); ++ ++} ++ ++static bool ++_dma_descriptor_align(dma_info_t *di) ++{ ++ if (DMA64_ENAB(di) && DMA64_MODE(di)) { ++ uint32 addrl; ++ ++ /* Check to see if the descriptors need to be aligned on 4K/8K or not */ ++ if (di->d64txregs != NULL) { ++ W_REG(di->osh, &di->d64txregs->addrlow, 0xff0); ++ addrl = R_REG(di->osh, &di->d64txregs->addrlow); ++ if (addrl != 0) { ++ return FALSE; ++ } ++ } else if (di->d64rxregs != NULL) { ++ W_REG(di->osh, &di->d64rxregs->addrlow, 0xff0); ++ addrl = R_REG(di->osh, &di->d64rxregs->addrlow); ++ if (addrl != 0) { ++ return FALSE; ++ } ++ } ++ } ++ return TRUE; ++} ++ ++/* return TRUE if this dma engine supports DmaExtendedAddrChanges, otherwise FALSE */ ++static bool ++_dma_isaddrext(dma_info_t *di) ++{ ++ if (DMA64_ENAB(di) && DMA64_MODE(di)) { ++ /* DMA64 supports full 32- or 64-bit operation. AE is always valid */ ++ ++ /* not all tx or rx channel are available */ ++ if (di->d64txregs != NULL) { ++ if (!_dma64_addrext(di->osh, di->d64txregs)) { ++ DMA_ERROR(("%s: _dma_isaddrext: DMA64 tx doesn't have AE set\n", ++ di->name)); ++ ASSERT(0); ++ } ++ return TRUE; ++ } else if (di->d64rxregs != NULL) { ++ if (!_dma64_addrext(di->osh, di->d64rxregs)) { ++ DMA_ERROR(("%s: _dma_isaddrext: DMA64 rx doesn't have AE set\n", ++ di->name)); ++ ASSERT(0); ++ } ++ return TRUE; ++ } ++ return FALSE; ++ } else if (DMA32_ENAB(di)) { ++ if (di->d32txregs) { ++ return (_dma32_addrext(di->osh, di->d32txregs)); ++ } else if (di->d32rxregs) { ++ return (_dma32_addrext(di->osh, di->d32rxregs)); ++ } ++ } else { ++ ASSERT(0); ++ } ++ ++ return FALSE; ++} ++ ++/* initialize descriptor table base address */ ++static void ++_dma_ddtable_init(dma_info_t *di, uint direction, dmaaddr_t pa) ++{ ++ if (DMA64_ENAB(di) && DMA64_MODE(di)) { ++ if (!di->aligndesc_4k) { ++ if (direction == DMA_TX) { ++ di->xmtptrbase = PHYSADDRLO(pa); ++ } else { ++ di->rcvptrbase = PHYSADDRLO(pa); ++ } ++ } ++ ++ if ((di->ddoffsetlow == 0) || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) { ++ if (direction == DMA_TX) { ++ W_REG(di->osh, &di->d64txregs->addrlow, (PHYSADDRLO(pa) + ++ di->ddoffsetlow)); ++ W_REG(di->osh, &di->d64txregs->addrhigh, (PHYSADDRHI(pa) + ++ di->ddoffsethigh)); ++ } else { ++ W_REG(di->osh, &di->d64rxregs->addrlow, (PHYSADDRLO(pa) + ++ di->ddoffsetlow)); ++ W_REG(di->osh, &di->d64rxregs->addrhigh, (PHYSADDRHI(pa) + ++ di->ddoffsethigh)); ++ } ++ } else { ++ /* DMA64 32bits address extension */ ++ uint32 ae; ++ ASSERT(di->addrext); ++ ASSERT(PHYSADDRHI(pa) == 0); ++ ++ /* shift the high bit(s) from pa to ae */ ++ ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT; ++ PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH; ++ ++ if (direction == DMA_TX) { ++ W_REG(di->osh, &di->d64txregs->addrlow, (PHYSADDRLO(pa) + ++ di->ddoffsetlow)); ++ W_REG(di->osh, &di->d64txregs->addrhigh, di->ddoffsethigh); ++ SET_REG(di->osh, &di->d64txregs->control, D64_XC_AE, ++ (ae << D64_XC_AE_SHIFT)); ++ } else { ++ W_REG(di->osh, &di->d64rxregs->addrlow, (PHYSADDRLO(pa) + ++ di->ddoffsetlow)); ++ W_REG(di->osh, &di->d64rxregs->addrhigh, di->ddoffsethigh); ++ SET_REG(di->osh, &di->d64rxregs->control, D64_RC_AE, ++ (ae << D64_RC_AE_SHIFT)); ++ } ++ } ++ ++ } else if (DMA32_ENAB(di)) { ++ ASSERT(PHYSADDRHI(pa) == 0); ++ if ((di->ddoffsetlow == 0) || !(PHYSADDRLO(pa) & PCI32ADDR_HIGH)) { ++ if (direction == DMA_TX) { ++ W_REG(di->osh, &di->d32txregs->addr, (PHYSADDRLO(pa) + ++ di->ddoffsetlow)); ++ } else { ++ W_REG(di->osh, &di->d32rxregs->addr, (PHYSADDRLO(pa) + ++ di->ddoffsetlow)); ++ } ++ } else { ++ /* dma32 address extension */ ++ uint32 ae; ++ ASSERT(di->addrext); ++ ++ /* shift the high bit(s) from pa to ae */ ++ ae = (PHYSADDRLO(pa) & PCI32ADDR_HIGH) >> PCI32ADDR_HIGH_SHIFT; ++ PHYSADDRLO(pa) &= ~PCI32ADDR_HIGH; ++ ++ if (direction == DMA_TX) { ++ W_REG(di->osh, &di->d32txregs->addr, (PHYSADDRLO(pa) + ++ di->ddoffsetlow)); ++ SET_REG(di->osh, &di->d32txregs->control, XC_AE, ae <osh, &di->d32rxregs->addr, (PHYSADDRLO(pa) + ++ di->ddoffsetlow)); ++ SET_REG(di->osh, &di->d32rxregs->control, RC_AE, ae <name)); ++ ++ if (DMA64_ENAB(di) && DMA64_MODE(di)) { ++ OR_REG(di->osh, &di->d64txregs->control, D64_XC_LE); ++ } else if (DMA32_ENAB(di)) { ++ OR_REG(di->osh, &di->d32txregs->control, XC_LE); ++ } else { ++ ASSERT(0); ++ } ++} ++ ++static void ++_dma_rxinit(dma_info_t *di) ++{ ++ DMA_TRACE(("%s: dma_rxinit\n", di->name)); ++ ++ if (di->nrxd == 0) { ++ return; ++ } ++ ++ /* During the reset procedure, the active rxd may not be zero if pktpool is ++ * enabled, we need to reclaim active rxd to avoid rxd being leaked. ++ */ ++ if ((POOL_ENAB(di->pktpool)) && (NRXDACTIVE(di->rxin, di->rxout))) { ++ _dma_rxreclaim(di); ++ } ++ ++ ASSERT(di->rxin == di->rxout); ++ di->rxin = di->rxout = di->rs0cd = 0; ++ ++ /* clear rx descriptor ring */ ++ if (DMA64_ENAB(di) && DMA64_MODE(di)) { ++ BZERO_SM((void *)(uintptr)di->rxd64, (di->nrxd * sizeof(dma64dd_t))); ++ ++ /* DMA engine with out alignment requirement requires table to be inited ++ * before enabling the engine ++ */ ++ if (!di->aligndesc_4k) { ++ _dma_ddtable_init(di, DMA_RX, di->rxdpa); ++ } ++ ++ _dma_rxenable(di); ++ ++ if (di->aligndesc_4k) { ++ _dma_ddtable_init(di, DMA_RX, di->rxdpa); ++ } ++ } else if (DMA32_ENAB(di)) { ++ BZERO_SM((void *)(uintptr)di->rxd32, (di->nrxd * sizeof(dma32dd_t))); ++ _dma_rxenable(di); ++ _dma_ddtable_init(di, DMA_RX, di->rxdpa); ++ } else { ++ ASSERT(0); ++ } ++} ++ ++static void ++_dma_rxenable(dma_info_t *di) ++{ ++ uint dmactrlflags = di->hnddma.dmactrlflags; ++ ++ DMA_TRACE(("%s: dma_rxenable\n", di->name)); ++ ++ if (DMA64_ENAB(di) && DMA64_MODE(di)) { ++ uint32 control = (R_REG(di->osh, &di->d64rxregs->control) & D64_RC_AE) | D64_RC_RE; ++ ++ if ((dmactrlflags & DMA_CTRL_PEN) == 0) { ++ control |= D64_RC_PD; ++ } ++ ++ if (dmactrlflags & DMA_CTRL_ROC) { ++ control |= D64_RC_OC; ++ } ++ ++ /* These bits 20:18 (burstLen) of control register can be written but will take ++ * effect only if these bits are valid. So this will not affect previous versions ++ * of the DMA. They will continue to have those bits set to 0. ++ */ ++ control &= ~D64_RC_BL_MASK; ++ control |= (di->rxburstlen << D64_RC_BL_SHIFT); ++ ++ control &= ~D64_RC_PC_MASK; ++ control |= (di->rxprefetchctl << D64_RC_PC_SHIFT); ++ ++ control &= ~D64_RC_PT_MASK; ++ control |= (di->rxprefetchthresh << D64_RC_PT_SHIFT); ++ ++ W_REG(di->osh, &di->d64rxregs->control, ++ ((di->rxoffset << D64_RC_RO_SHIFT) | control)); ++ } else if (DMA32_ENAB(di)) { ++ uint32 control = (R_REG(di->osh, &di->d32rxregs->control) & RC_AE) | RC_RE; ++ ++ if ((dmactrlflags & DMA_CTRL_PEN) == 0) { ++ control |= RC_PD; ++ } ++ ++ if (dmactrlflags & DMA_CTRL_ROC) { ++ control |= RC_OC; ++ } ++ ++ /* These bits 20:18 (burstLen) of control register can be written but will take ++ * effect only if these bits are valid. So this will not affect previous versions ++ * of the DMA. They will continue to have those bits set to 0. ++ */ ++ control &= ~RC_BL_MASK; ++ control |= (di->rxburstlen << RC_BL_SHIFT); ++ ++ control &= ~RC_PC_MASK; ++ control |= (di->rxprefetchctl << RC_PC_SHIFT); ++ ++ control &= ~RC_PT_MASK; ++ control |= (di->rxprefetchthresh << RC_PT_SHIFT); ++ ++ W_REG(di->osh, &di->d32rxregs->control, ++ ((di->rxoffset << RC_RO_SHIFT) | control)); ++ } else { ++ ASSERT(0); ++ } ++} ++ ++static void ++_dma_rx_param_get(dma_info_t *di, uint16 *rxoffset, uint16 *rxbufsize) ++{ ++ /* the normal values fit into 16 bits */ ++ *rxoffset = (uint16)di->rxoffset; ++ *rxbufsize = (uint16)di->rxbufsize; ++} ++ ++/* !! rx entry routine ++ * returns a pointer to the next frame received, or NULL if there are no more ++ * if DMA_CTRL_RXMULTI is defined, DMA scattering(multiple buffers) is supported ++ * with pkts chain ++ * otherwise, it's treated as giant pkt and will be tossed. ++ * The DMA scattering starts with normal DMA header, followed by first buffer data. ++ * After it reaches the max size of buffer, the data continues in next DMA descriptor ++ * buffer WITHOUT DMA header ++ */ ++static void * BCMFASTPATH ++_dma_rx(dma_info_t *di) ++{ ++ void *p, *head, *tail; ++ uint len; ++ uint pkt_len; ++ int resid = 0; ++#ifdef BCM4335 ++ dma64regs_t *dregs = di->d64rxregs; ++#endif ++ ++next_frame: ++ head = _dma_getnextrxp(di, FALSE); ++ if (head == NULL) { ++ return (NULL); ++ } ++ ++ len = ltoh16(*(uint16 *)(PKTDATA(di->osh, head))); ++ DMA_TRACE(("%s: dma_rx len %d\n", di->name, len)); ++ ++ /* set actual length */ ++ pkt_len = MIN((di->rxoffset + len), di->rxbufsize); ++ PKTSETLEN(di->osh, head, pkt_len); ++ resid = len - (di->rxbufsize - di->rxoffset); ++ ++ /* check for single or multi-buffer rx */ ++ if (resid <= 0) { ++ /* Single frame, all good */ ++ } else if (di->hnddma.dmactrlflags & DMA_CTRL_RXSINGLE) { ++ DMA_TRACE(("%s: dma_rx: corrupted length (%d)\n", di->name, len)); ++ PKTFREE(di->osh, head, FALSE); ++ di->hnddma.rxgiants++; ++ goto next_frame; ++ } else { ++ /* multi-buffer rx */ ++#ifdef BCMDBG ++ p = NULL; /* get rid of compiler warning */ ++#endif /* BCMDBG */ ++ tail = head; ++ while ((resid > 0) && (p = _dma_getnextrxp(di, FALSE))) { ++ PKTSETNEXT(di->osh, tail, p); ++ pkt_len = MIN(resid, (int)di->rxbufsize); ++ PKTSETLEN(di->osh, p, pkt_len); ++ ++ tail = p; ++ resid -= di->rxbufsize; ++ } ++ ++#ifdef BCMDBG ++ if (resid > 0) { ++ uint16 cur; ++ ASSERT(p == NULL); ++ cur = (DMA64_ENAB(di) && DMA64_MODE(di)) ? ++ B2I(((R_REG(di->osh, &di->d64rxregs->status0) & D64_RS0_CD_MASK) - ++ di->rcvptrbase) & D64_RS0_CD_MASK, dma64dd_t) : ++ B2I(R_REG(di->osh, &di->d32rxregs->status) & RS_CD_MASK, ++ dma32dd_t); ++ DMA_ERROR(("_dma_rx, rxin %d rxout %d, hw_curr %d\n", ++ di->rxin, di->rxout, cur)); ++ } ++#endif /* BCMDBG */ ++ ++ if ((di->hnddma.dmactrlflags & DMA_CTRL_RXMULTI) == 0) { ++ DMA_ERROR(("%s: dma_rx: bad frame length (%d)\n", di->name, len)); ++ PKTFREE(di->osh, head, FALSE); ++ di->hnddma.rxgiants++; ++ goto next_frame; ++ } ++ } ++ ++ return (head); ++} ++ ++/* post receive buffers ++ * return FALSE is refill failed completely and ring is empty ++ * this will stall the rx dma and user might want to call rxfill again asap ++ * This unlikely happens on memory-rich NIC, but often on memory-constrained dongle ++ */ ++static bool BCMFASTPATH ++_dma_rxfill(dma_info_t *di) ++{ ++ void *p; ++ uint16 rxin, rxout; ++ uint32 flags = 0; ++ uint n; ++ uint i; ++ dmaaddr_t pa; ++ uint extra_offset = 0, extra_pad; ++ bool ring_empty; ++ uint alignment_req = (di->hnddma.dmactrlflags & DMA_CTRL_USB_BOUNDRY4KB_WAR) ? ++ 16 : 1; /* MUST BE POWER of 2 */ ++ ++ ring_empty = FALSE; ++ ++ /* ++ * Determine how many receive buffers we're lacking ++ * from the full complement, allocate, initialize, ++ * and post them, then update the chip rx lastdscr. ++ */ ++ ++ rxin = di->rxin; ++ rxout = di->rxout; ++ ++ n = di->nrxpost - NRXDACTIVE(rxin, rxout); ++ ++ if (di->rxbufsize > BCMEXTRAHDROOM) { ++ extra_offset = di->rxextrahdrroom; ++ } ++ ++ DMA_TRACE(("%s: dma_rxfill: post %d\n", di->name, n)); ++ ++ for (i = 0; i < n; i++) { ++ /* the di->rxbufsize doesn't include the extra headroom, we need to add it to the ++ size to be allocated ++ */ ++ if (POOL_ENAB(di->pktpool)) { ++ ASSERT(di->pktpool); ++ p = pktpool_get(di->pktpool); ++#ifdef BCMDBG_POOL ++ if (p) { ++ PKTPOOLSETSTATE(p, POOL_RXFILL); ++ } ++#endif /* BCMDBG_POOL */ ++ } ++ else { ++ p = PKTGET(di->osh, (di->rxbufsize + extra_offset + alignment_req - 1), FALSE); ++ } ++ if (p == NULL) { ++ DMA_TRACE(("%s: dma_rxfill: out of rxbufs\n", di->name)); ++ if (i == 0) { ++ if (DMA64_ENAB(di) && DMA64_MODE(di)) { ++ if (dma64_rxidle(di)) { ++ DMA_TRACE(("%s: rxfill64: ring is empty !\n", di->name)); ++ ring_empty = TRUE; ++ } ++ } else if (DMA32_ENAB(di)) { ++ if (dma32_rxidle(di)) { ++ DMA_TRACE(("%s: rxfill32: ring is empty !\n", di->name)); ++ ring_empty = TRUE; ++ } ++ } else { ++ ASSERT(0); ++ } ++ } ++ di->hnddma.rxnobuf++; ++ break; ++ } ++ /* reserve an extra headroom, if applicable */ ++ if (di->hnddma.dmactrlflags & DMA_CTRL_USB_BOUNDRY4KB_WAR) { ++ extra_pad = ((alignment_req - (uint)(((unsigned long)PKTDATA(di->osh, p) - ++ (unsigned long)(uchar *)0))) & (alignment_req - 1)); ++ } else { ++ extra_pad = 0; ++ } ++ ++ if (extra_offset + extra_pad) { ++ PKTPULL(di->osh, p, extra_offset + extra_pad); ++ } ++ ++ /* Do a cached write instead of uncached write since DMA_MAP ++ * will flush the cache. ++ */ ++ *(uint16 *)(PKTDATA(di->osh, p)) = 0; ++ ++ if (DMASGLIST_ENAB) { ++ bzero(&di->rxp_dmah[rxout], sizeof(hnddma_seg_map_t)); ++ } ++ ++#if defined(CONFIG_BCM_IPROC_GMAC_ACP) && !defined(BCMDMASGLISTOSL) ++ pa = virt_to_phys(PKTDATA(di->osh, p)); ++#else ++ pa = DMA_MAP(di->osh, PKTDATA(di->osh, p), ++ di->rxbufsize, DMA_RX, p, ++ &di->rxp_dmah[rxout]); ++#endif /* defined(CONFIG_BCM_IPROC_GMAC_ACP) && !defined(BCMDMASGLISTOSL) */ ++ ++ ASSERT(ISALIGNED(PHYSADDRLO(pa), 4)); ++ ++ /* save the free packet pointer */ ++ ASSERT(di->rxp[rxout] == NULL); ++ di->rxp[rxout] = p; ++ ++ /* reset flags for each descriptor */ ++ flags = 0; ++ if (DMA64_ENAB(di) && DMA64_MODE(di)) { ++ if (rxout == (di->nrxd - 1)) { ++ flags = D64_CTRL1_EOT; ++ } ++ ++ dma64_dd_upd(di, di->rxd64, pa, rxout, &flags, di->rxbufsize); ++ } else if (DMA32_ENAB(di)) { ++ if (rxout == (di->nrxd - 1)) { ++ flags = CTRL_EOT; ++ } ++ ++ ASSERT(PHYSADDRHI(pa) == 0); ++ dma32_dd_upd(di, di->rxd32, pa, rxout, &flags, di->rxbufsize); ++ } else { ++ ASSERT(0); ++ } ++ rxout = NEXTRXD(rxout); ++ } ++ ++ di->rxout = rxout; ++ ++ /* update the chip lastdscr pointer */ ++ if (DMA64_ENAB(di) && DMA64_MODE(di)) { ++ W_REG(di->osh, &di->d64rxregs->ptr, di->rcvptrbase + I2B(rxout, dma64dd_t)); ++ } else if (DMA32_ENAB(di)) { ++ W_REG(di->osh, &di->d32rxregs->ptr, I2B(rxout, dma32dd_t)); ++ } else { ++ ASSERT(0); ++ } ++ ++ return ring_empty; ++} ++ ++/* like getnexttxp but no reclaim */ ++static void * ++_dma_peeknexttxp(dma_info_t *di) ++{ ++ uint16 end, i; ++ ++ if (di->ntxd == 0) { ++ return (NULL); ++ } ++ ++ if (DMA64_ENAB(di) && DMA64_MODE(di)) { ++ end = (uint16)B2I(((R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_CD_MASK) - ++ di->xmtptrbase) & D64_XS0_CD_MASK, dma64dd_t); ++ di->xs0cd = end; ++ } else if (DMA32_ENAB(di)) { ++ end = (uint16)B2I(R_REG(di->osh, &di->d32txregs->status) & XS_CD_MASK, dma32dd_t); ++ di->xs0cd = end; ++ } else { ++ ASSERT(0); ++ } ++ ++ for (i = di->txin; i != end; i = NEXTTXD(i)) { ++ if (di->txp[i]) { ++ return (di->txp[i]); ++ } ++ } ++ ++ return (NULL); ++} ++ ++int ++_dma_peekntxp(dma_info_t *di, int *len, void *txps[], txd_range_t range) ++{ ++ uint16 start, end, i; ++ uint act; ++ void *txp = NULL; ++ int k, len_max; ++ ++ DMA_TRACE(("%s: dma_peekntxp\n", di->name)); ++ ++ ASSERT(len); ++ ASSERT(txps); ++ ASSERT(di); ++ if (di->ntxd == 0) { ++ *len = 0; ++ return BCME_ERROR; ++ } ++ ++ len_max = *len; ++ *len = 0; ++ ++ start = di->txin; ++ ++ if (range == HNDDMA_RANGE_ALL) { ++ end = di->txout; ++ } else { ++ if (DMA64_ENAB(di)) { ++ end = B2I(((R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_CD_MASK) - ++ di->xmtptrbase) & D64_XS0_CD_MASK, dma64dd_t); ++ ++ act = (uint)(R_REG(di->osh, &di->d64txregs->status1) & D64_XS1_AD_MASK); ++ act = (act - di->xmtptrbase) & D64_XS0_CD_MASK; ++ act = (uint)B2I(act, dma64dd_t); ++ } else { ++ end = B2I(R_REG(di->osh, &di->d32txregs->status) & XS_CD_MASK, dma32dd_t); ++ ++ act = (uint)((R_REG(di->osh, &di->d32txregs->status) & XS_AD_MASK) >> ++ XS_AD_SHIFT); ++ act = (uint)B2I(act, dma32dd_t); ++ } ++ ++ di->xs0cd = end; ++ if (end != act) { ++ end = PREVTXD(act); ++ } ++ } ++ ++ if ((start == 0) && (end > di->txout)) { ++ return BCME_ERROR; ++ } ++ ++ k = 0; ++ for (i = start; i != end; i = NEXTTXD(i)) { ++ txp = di->txp[i]; ++ if (txp != NULL) { ++ if (k < len_max) { ++ txps[k++] = txp; ++ } else { ++ break; ++ } ++ } ++ } ++ *len = k; ++ ++ return BCME_OK; ++} ++ ++/* like getnextrxp but not take off the ring */ ++static void * ++_dma_peeknextrxp(dma_info_t *di) ++{ ++ uint16 end, i; ++ ++ if (di->nrxd == 0) { ++ return (NULL); ++ } ++ ++ if (DMA64_ENAB(di) && DMA64_MODE(di)) { ++ end = (uint16)B2I(((R_REG(di->osh, &di->d64rxregs->status0) & D64_RS0_CD_MASK) - ++ di->rcvptrbase) & D64_RS0_CD_MASK, dma64dd_t); ++ di->rs0cd = end; ++ } else if (DMA32_ENAB(di)) { ++ end = (uint16)B2I(R_REG(di->osh, &di->d32rxregs->status) & RS_CD_MASK, dma32dd_t); ++ di->rs0cd = end; ++ } else { ++ ASSERT(0); ++ } ++ ++ for (i = di->rxin; i != end; i = NEXTRXD(i)) { ++ if (di->rxp[i]) { ++ return (di->rxp[i]); ++ } ++ } ++ ++ return (NULL); ++} ++ ++static void ++_dma_rxreclaim(dma_info_t *di) ++{ ++ void *p; ++ bool origcb = TRUE; ++ ++#ifndef EFI ++ /* "unused local" warning suppression for OSLs that ++ * define PKTFREE() without using the di->osh arg ++ */ ++ di = di; ++#endif /* EFI */ ++ ++ DMA_TRACE(("%s: dma_rxreclaim\n", di->name)); ++ ++ if (POOL_ENAB(di->pktpool) && ++ ((origcb = pktpool_emptycb_disabled(di->pktpool)) == FALSE)) { ++ pktpool_emptycb_disable(di->pktpool, TRUE); ++ } ++ ++ while ((p = _dma_getnextrxp(di, TRUE))) { ++ PKTFREE(di->osh, p, FALSE); ++ } ++ ++ if (origcb == FALSE) { ++ pktpool_emptycb_disable(di->pktpool, FALSE); ++ } ++} ++ ++static void * BCMFASTPATH ++_dma_getnextrxp(dma_info_t *di, bool forceall) ++{ ++ if (di->nrxd == 0) { ++ return (NULL); ++ } ++ ++ if (DMA64_ENAB(di) && DMA64_MODE(di)) { ++ return dma64_getnextrxp(di, forceall); ++ } else if (DMA32_ENAB(di)) { ++ return dma32_getnextrxp(di, forceall); ++ } else { ++ ASSERT(0); ++ } ++} ++ ++static void ++_dma_txblock(dma_info_t *di) ++{ ++ di->hnddma.txavail = 0; ++} ++ ++static void ++_dma_txunblock(dma_info_t *di) ++{ ++ di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1; ++} ++ ++static uint ++_dma_txactive(dma_info_t *di) ++{ ++ return NTXDACTIVE(di->txin, di->txout); ++} ++ ++static uint ++_dma_txpending(dma_info_t *di) ++{ ++ uint16 curr; ++ ++ if (DMA64_ENAB(di) && DMA64_MODE(di)) { ++ curr = B2I(((R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_CD_MASK) - ++ di->xmtptrbase) & D64_XS0_CD_MASK, dma64dd_t); ++ di->xs0cd = curr; ++ } else if (DMA32_ENAB(di)) { ++ curr = B2I(R_REG(di->osh, &di->d32txregs->status) & XS_CD_MASK, dma32dd_t); ++ di->xs0cd = curr; ++ } else { ++ ASSERT(0); ++ } ++ ++ return NTXDACTIVE(curr, di->txout); ++} ++ ++static uint ++_dma_txcommitted(dma_info_t *di) ++{ ++ uint16 ptr; ++ uint txin = di->txin; ++ ++ if (txin == di->txout) { ++ return 0; ++ } ++ ++ if (DMA64_ENAB(di) && DMA64_MODE(di)) { ++ ptr = B2I(R_REG(di->osh, &di->d64txregs->ptr), dma64dd_t); ++ } else if (DMA32_ENAB(di)) { ++ ptr = B2I(R_REG(di->osh, &di->d32txregs->ptr), dma32dd_t); ++ } else { ++ ASSERT(0); ++ } ++ ++ return NTXDACTIVE(di->txin, ptr); ++} ++ ++static uint ++_dma_rxactive(dma_info_t *di) ++{ ++ return NRXDACTIVE(di->rxin, di->rxout); ++} ++ ++static uint ++_dma_activerxbuf(dma_info_t *di) ++{ ++ uint16 curr, ptr; ++ curr = B2I(((R_REG(di->osh, &di->d64rxregs->status0) & D64_RS0_CD_MASK) - ++ di->rcvptrbase) & D64_RS0_CD_MASK, dma64dd_t); ++ ptr = B2I(((R_REG(di->osh, &di->d64rxregs->ptr) & D64_RS0_CD_MASK) - ++ di->rcvptrbase) & D64_RS0_CD_MASK, dma64dd_t); ++ return NRXDACTIVE(curr, ptr); ++} ++ ++ ++static void ++_dma_counterreset(dma_info_t *di) ++{ ++ /* reset all software counter */ ++ di->hnddma.rxgiants = 0; ++ di->hnddma.rxnobuf = 0; ++ di->hnddma.txnobuf = 0; ++} ++ ++static uint ++_dma_ctrlflags(dma_info_t *di, uint mask, uint flags) ++{ ++ uint dmactrlflags; ++ ++ if (!di) { ++ DMA_ERROR(("_dma_ctrlflags: NULL dma handle\n")); ++ return (0); ++ } ++ ++ dmactrlflags = di->hnddma.dmactrlflags; ++ ASSERT((flags & ~mask) == 0); ++ ++ dmactrlflags &= ~mask; ++ dmactrlflags |= flags; ++ ++ /* If trying to enable parity, check if parity is actually supported */ ++ if (dmactrlflags & DMA_CTRL_PEN) { ++ uint32 control; ++ ++ if (DMA64_ENAB(di) && DMA64_MODE(di)) { ++ control = R_REG(di->osh, &di->d64txregs->control); ++ W_REG(di->osh, &di->d64txregs->control, control | D64_XC_PD); ++ if (R_REG(di->osh, &di->d64txregs->control) & D64_XC_PD) { ++ /* We *can* disable it so it is supported, ++ * restore control register ++ */ ++ W_REG(di->osh, &di->d64txregs->control, control); ++ } else { ++ /* Not supported, don't allow it to be enabled */ ++ dmactrlflags &= ~DMA_CTRL_PEN; ++ } ++ } else if (DMA32_ENAB(di)) { ++ control = R_REG(di->osh, &di->d32txregs->control); ++ W_REG(di->osh, &di->d32txregs->control, control | XC_PD); ++ if (R_REG(di->osh, &di->d32txregs->control) & XC_PD) { ++ W_REG(di->osh, &di->d32txregs->control, control); ++ } else { ++ /* Not supported, don't allow it to be enabled */ ++ dmactrlflags &= ~DMA_CTRL_PEN; ++ } ++ } else { ++ ASSERT(0); ++ } ++ } ++ ++ di->hnddma.dmactrlflags = dmactrlflags; ++ ++ return (dmactrlflags); ++} ++ ++/* get the address of the var in order to change later */ ++static uintptr ++_dma_getvar(dma_info_t *di, const char *name) ++{ ++ if (!strcmp(name, "&txavail")) { ++ return ((uintptr) &(di->hnddma.txavail)); ++ } else { ++ ASSERT(0); ++ } ++ return (0); ++} ++ ++static uint ++_dma_avoidancecnt(dma_info_t *di) ++{ ++ return (di->dma_avoidance_cnt); ++} ++ ++void ++dma_txpioloopback(osl_t *osh, dma32regs_t *regs) ++{ ++ OR_REG(osh, ®s->control, XC_LE); ++} ++ ++static ++uint8 dma_align_sizetobits(uint size) ++{ ++ uint8 bitpos = 0; ++ ASSERT(size); ++ ASSERT(!(size & (size-1))); ++ while (size >>= 1) { ++ bitpos ++; ++ } ++ return (bitpos); ++} ++ ++/* This function ensures that the DMA descriptor ring will not get allocated ++ * across Page boundary. If the allocation is done across the page boundary ++ * at the first time, then it is freed and the allocation is done at ++ * descriptor ring size aligned location. This will ensure that the ring will ++ * not cross page boundary ++ */ ++static void * ++dma_ringalloc(osl_t *osh, uint32 boundary, uint size, uint16 *alignbits, uint* alloced, ++ dmaaddr_t *descpa, osldma_t **dmah) ++{ ++ void * va; ++ uint32 desc_strtaddr; ++ uint32 alignbytes = 1 << *alignbits; ++ ++ if ((va = DMA_ALLOC_CONSISTENT(osh, size, *alignbits, alloced, descpa, dmah)) == NULL) { ++ return NULL; ++ } ++ ++ /* printk("%s va(0x%x)\n", __FUNCTION__, va); */ ++ desc_strtaddr = (uint32)ROUNDUP((uint)PHYSADDRLO(*descpa), alignbytes); ++ if (((desc_strtaddr + size - 1) & boundary) != ++ (desc_strtaddr & boundary)) { ++ *alignbits = dma_align_sizetobits(size); ++ DMA_FREE_CONSISTENT(osh, va, ++ size, *descpa, dmah); ++ va = DMA_ALLOC_CONSISTENT(osh, size, *alignbits, alloced, descpa, dmah); ++ } ++ return va; ++} ++ ++#if defined(BCMDBG) ++static void ++dma32_dumpring(dma_info_t *di, struct bcmstrbuf *b, dma32dd_t *ring, uint start, uint end, ++ uint max_num) ++{ ++ uint i; ++ ++ for (i = start; i != end; i = XXD((i + 1), max_num)) { ++ /* in the format of high->low 8 bytes */ ++ bcm_bprintf(b, "ring index %d: 0x%x %x\n", ++ i, R_SM(&ring[i].addr), R_SM(&ring[i].ctrl)); ++ } ++} ++ ++static void ++dma32_dumptx(dma_info_t *di, struct bcmstrbuf *b, bool dumpring) ++{ ++ if (di->ntxd == 0) { ++ return; ++ } ++ ++ bcm_bprintf(b, "DMA32: txd32 %p txdpa 0x%lx txp %p txin %d txout %d " ++ "txavail %d txnodesc %d\n", di->txd32, PHYSADDRLO(di->txdpa), di->txp, di->txin, ++ di->txout, di->hnddma.txavail, di->hnddma.txnodesc); ++ ++ bcm_bprintf(b, "xmtcontrol 0x%x xmtaddr 0x%x xmtptr 0x%x xmtstatus 0x%x\n", ++ R_REG(di->osh, &di->d32txregs->control), ++ R_REG(di->osh, &di->d32txregs->addr), ++ R_REG(di->osh, &di->d32txregs->ptr), ++ R_REG(di->osh, &di->d32txregs->status)); ++ ++ if (dumpring && di->txd32) { ++ dma32_dumpring(di, b, di->txd32, di->txin, di->txout, di->ntxd); ++ } ++} ++ ++static void ++dma32_dumprx(dma_info_t *di, struct bcmstrbuf *b, bool dumpring) ++{ ++ if (di->nrxd == 0) { ++ return; ++ } ++ ++ bcm_bprintf(b, "DMA32: rxd32 %p rxdpa 0x%lx rxp %p rxin %d rxout %d\n", ++ di->rxd32, PHYSADDRLO(di->rxdpa), di->rxp, di->rxin, di->rxout); ++ ++ bcm_bprintf(b, "rcvcontrol 0x%x rcvaddr 0x%x rcvptr 0x%x rcvstatus 0x%x\n", ++ R_REG(di->osh, &di->d32rxregs->control), ++ R_REG(di->osh, &di->d32rxregs->addr), ++ R_REG(di->osh, &di->d32rxregs->ptr), ++ R_REG(di->osh, &di->d32rxregs->status)); ++ if (di->rxd32 && dumpring) { ++ dma32_dumpring(di, b, di->rxd32, di->rxin, di->rxout, di->nrxd); ++ } ++} ++ ++static void ++dma32_dump(dma_info_t *di, struct bcmstrbuf *b, bool dumpring) ++{ ++ dma32_dumptx(di, b, dumpring); ++ dma32_dumprx(di, b, dumpring); ++} ++ ++static void ++dma64_dumpring(dma_info_t *di, struct bcmstrbuf *b, dma64dd_t *ring, uint start, uint end, ++ uint max_num) ++{ ++ uint i; ++ ++ for (i = start; i != end; i = XXD((i + 1), max_num)) { ++ /* in the format of high->low 16 bytes */ ++ bcm_bprintf(b, "ring index %d: 0x%x %x %x %x\n", ++ i, R_SM(&ring[i].addrhigh), R_SM(&ring[i].addrlow), ++ R_SM(&ring[i].ctrl2), R_SM(&ring[i].ctrl1)); ++ } ++} ++ ++static void ++dma64_dumptx(dma_info_t *di, struct bcmstrbuf *b, bool dumpring) ++{ ++ if (di->ntxd == 0) { ++ return; ++ } ++ ++ bcm_bprintf(b, "DMA64: txd64 %p txdpa 0x%lx txdpahi 0x%lx txp %p txin %d txout %d " ++ "txavail %d txnodesc %d\n", di->txd64, PHYSADDRLO(di->txdpa), ++ PHYSADDRHI(di->txdpaorig), di->txp, di->txin, di->txout, di->hnddma.txavail, ++ di->hnddma.txnodesc); ++ ++ bcm_bprintf(b, "xmtcontrol 0x%x xmtaddrlow 0x%x xmtaddrhigh 0x%x " ++ "xmtptr 0x%x xmtstatus0 0x%x xmtstatus1 0x%x\n", ++ R_REG(di->osh, &di->d64txregs->control), ++ R_REG(di->osh, &di->d64txregs->addrlow), ++ R_REG(di->osh, &di->d64txregs->addrhigh), ++ R_REG(di->osh, &di->d64txregs->ptr), ++ R_REG(di->osh, &di->d64txregs->status0), ++ R_REG(di->osh, &di->d64txregs->status1)); ++ ++ bcm_bprintf(b, "DMA64: DMA avoidance applied %d\n", di->dma_avoidance_cnt); ++ ++ if (dumpring && di->txd64) { ++ dma64_dumpring(di, b, di->txd64, di->txin, di->txout, di->ntxd); ++ } ++} ++ ++static void ++dma64_dumprx(dma_info_t *di, struct bcmstrbuf *b, bool dumpring) ++{ ++ if (di->nrxd == 0) { ++ return; ++ } ++ ++ bcm_bprintf(b, "DMA64: rxd64 %p rxdpa 0x%lx rxdpahi 0x%lx rxp %p rxin %d rxout %d\n", ++ di->rxd64, PHYSADDRLO(di->rxdpa), PHYSADDRHI(di->rxdpaorig), di->rxp, ++ di->rxin, di->rxout); ++ ++ bcm_bprintf(b, "rcvcontrol 0x%x rcvaddrlow 0x%x rcvaddrhigh 0x%x rcvptr " ++ "0x%x rcvstatus0 0x%x rcvstatus1 0x%x\n", ++ R_REG(di->osh, &di->d64rxregs->control), ++ R_REG(di->osh, &di->d64rxregs->addrlow), ++ R_REG(di->osh, &di->d64rxregs->addrhigh), ++ R_REG(di->osh, &di->d64rxregs->ptr), ++ R_REG(di->osh, &di->d64rxregs->status0), ++ R_REG(di->osh, &di->d64rxregs->status1)); ++ if (di->rxd64 && dumpring) { ++ dma64_dumpring(di, b, di->rxd64, di->rxin, di->rxout, di->nrxd); ++ } ++} ++ ++static void ++dma64_dump(dma_info_t *di, struct bcmstrbuf *b, bool dumpring) ++{ ++ dma64_dumptx(di, b, dumpring); ++ dma64_dumprx(di, b, dumpring); ++} ++#endif ++ ++ ++/* 32-bit DMA functions */ ++ ++static void ++dma32_txinit(dma_info_t *di) ++{ ++ uint32 control = XC_XE; ++ ++ DMA_TRACE(("%s: dma_txinit\n", di->name)); ++ ++ if (di->ntxd == 0) { ++ return; ++ } ++ ++ di->txin = di->txout = di->xs0cd = 0; ++ di->hnddma.txavail = di->ntxd - 1; ++ ++ /* clear tx descriptor ring */ ++ BZERO_SM(DISCARD_QUAL(di->txd32, void), (di->ntxd * sizeof(dma32dd_t))); ++ ++ /* These bits 20:18 (burstLen) of control register can be written but will take ++ * effect only if these bits are valid. So this will not affect previous versions ++ * of the DMA. They will continue to have those bits set to 0. ++ */ ++ control |= (di->txburstlen << XC_BL_SHIFT); ++ control |= (di->txmultioutstdrd << XC_MR_SHIFT); ++ control |= (di->txprefetchctl << XC_PC_SHIFT); ++ control |= (di->txprefetchthresh << XC_PT_SHIFT); ++ ++ if ((di->hnddma.dmactrlflags & DMA_CTRL_PEN) == 0) { ++ control |= XC_PD; ++ } ++ W_REG(di->osh, &di->d32txregs->control, control); ++ _dma_ddtable_init(di, DMA_TX, di->txdpa); ++} ++ ++static bool ++dma32_txenabled(dma_info_t *di) ++{ ++ uint32 xc; ++ ++ /* If the chip is dead, it is not enabled :-) */ ++ xc = R_REG(di->osh, &di->d32txregs->control); ++ return ((xc != 0xffffffff) && (xc & XC_XE)); ++} ++ ++static void ++dma32_txsuspend(dma_info_t *di) ++{ ++ DMA_TRACE(("%s: dma_txsuspend\n", di->name)); ++ ++ if (di->ntxd == 0) { ++ return; ++ } ++ ++ OR_REG(di->osh, &di->d32txregs->control, XC_SE); ++} ++ ++static void ++dma32_txresume(dma_info_t *di) ++{ ++ DMA_TRACE(("%s: dma_txresume\n", di->name)); ++ ++ if (di->ntxd == 0) { ++ return; ++ } ++ ++ AND_REG(di->osh, &di->d32txregs->control, ~XC_SE); ++} ++ ++static bool ++dma32_txsuspended(dma_info_t *di) ++{ ++ return (di->ntxd == 0) || ((R_REG(di->osh, &di->d32txregs->control) & XC_SE) == XC_SE); ++} ++ ++#ifdef WL_MULTIQUEUE ++static void ++dma32_txflush(dma_info_t *di) ++{ ++ DMA_TRACE(("%s: dma_txflush\n", di->name)); ++ ++ if (di->ntxd == 0) { ++ return; ++ } ++ ++ OR_REG(di->osh, &di->d32txregs->control, XC_SE | XC_FL); ++} ++ ++static void ++dma32_txflush_clear(dma_info_t *di) ++{ ++ uint32 status; ++ ++ DMA_TRACE(("%s: dma_txflush_clear\n", di->name)); ++ ++ if (di->ntxd == 0) { ++ return; ++ } ++ ++ SPINWAIT(((status = (R_REG(di->osh, &di->d32txregs->status) & XS_XS_MASK)) ++ != XS_XS_DISABLED) && ++ (status != XS_XS_IDLE) && ++ (status != XS_XS_STOPPED), ++ (10000)); ++ AND_REG(di->osh, &di->d32txregs->control, ~XC_FL); ++} ++#endif /* WL_MULTIQUEUE */ ++ ++static void ++dma32_txreclaim(dma_info_t *di, txd_range_t range) ++{ ++ void *p; ++ ++ DMA_TRACE(("%s: dma_txreclaim %s\n", di->name, ++ (range == HNDDMA_RANGE_ALL) ? "all" : ++ ((range == HNDDMA_RANGE_TRANSMITTED) ? "transmitted" : "transfered"))); ++ ++ if (di->txin == di->txout) { ++ return; ++ } ++ ++ while ((p = dma32_getnexttxp(di, range))) { ++ PKTFREE(di->osh, p, TRUE); ++ } ++} ++ ++static bool ++dma32_txstopped(dma_info_t *di) ++{ ++ return ((R_REG(di->osh, &di->d32txregs->status) & XS_XS_MASK) == XS_XS_STOPPED); ++} ++ ++static bool ++dma32_rxstopped(dma_info_t *di) ++{ ++ return ((R_REG(di->osh, &di->d32rxregs->status) & RS_RS_MASK) == RS_RS_STOPPED); ++} ++ ++static bool ++dma32_alloc(dma_info_t *di, uint direction) ++{ ++ uint size; ++ uint ddlen; ++ void *va; ++ uint alloced; ++ uint16 align; ++ uint16 align_bits; ++ ++ ddlen = sizeof(dma32dd_t); ++ ++ size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen); ++ ++ alloced = 0; ++ align_bits = di->dmadesc_align; ++ align = (1 << align_bits); ++ ++ if (direction == DMA_TX) { ++ if ((va = dma_ringalloc(di->osh, D32RINGALIGN, size, &align_bits, &alloced, ++ &di->txdpaorig, &di->tx_dmah)) == NULL) { ++ DMA_ERROR(("%s: dma_alloc: DMA_ALLOC_CONSISTENT(ntxd) failed\n", ++ di->name)); ++ return FALSE; ++ } ++ ++ PHYSADDRHISET(di->txdpa, 0); ++ ASSERT(PHYSADDRHI(di->txdpaorig) == 0); ++ di->txd32 = (dma32dd_t *)ROUNDUP((uintptr)va, align); ++ di->txdalign = (uint)((int8 *)(uintptr)di->txd32 - (int8 *)va); ++ ++ PHYSADDRLOSET(di->txdpa, PHYSADDRLO(di->txdpaorig) + di->txdalign); ++ /* Make sure that alignment didn't overflow */ ++ ASSERT(PHYSADDRLO(di->txdpa) >= PHYSADDRLO(di->txdpaorig)); ++ ++ di->txdalloc = alloced; ++ ASSERT(ISALIGNED(di->txd32, align)); ++ } else { ++ if ((va = dma_ringalloc(di->osh, D32RINGALIGN, size, &align_bits, &alloced, ++ &di->rxdpaorig, &di->rx_dmah)) == NULL) { ++ DMA_ERROR(("%s: dma_alloc: DMA_ALLOC_CONSISTENT(nrxd) failed\n", ++ di->name)); ++ return FALSE; ++ } ++ ++ PHYSADDRHISET(di->rxdpa, 0); ++ ASSERT(PHYSADDRHI(di->rxdpaorig) == 0); ++ di->rxd32 = (dma32dd_t *)ROUNDUP((uintptr)va, align); ++ di->rxdalign = (uint)((int8 *)(uintptr)di->rxd32 - (int8 *)va); ++ ++ PHYSADDRLOSET(di->rxdpa, PHYSADDRLO(di->rxdpaorig) + di->rxdalign); ++ /* Make sure that alignment didn't overflow */ ++ ASSERT(PHYSADDRLO(di->rxdpa) >= PHYSADDRLO(di->rxdpaorig)); ++ di->rxdalloc = alloced; ++ ASSERT(ISALIGNED(di->rxd32, align)); ++ } ++ ++ return TRUE; ++} ++ ++static bool ++dma32_txreset(dma_info_t *di) ++{ ++ uint32 status; ++ ++ if (di->ntxd == 0) { ++ return TRUE; ++ } ++ ++ /* suspend tx DMA first */ ++ W_REG(di->osh, &di->d32txregs->control, XC_SE); ++ SPINWAIT(((status = (R_REG(di->osh, &di->d32txregs->status) & XS_XS_MASK)) ++ != XS_XS_DISABLED) && ++ (status != XS_XS_IDLE) && ++ (status != XS_XS_STOPPED), ++ (10000)); ++ ++ W_REG(di->osh, &di->d32txregs->control, 0); ++ SPINWAIT(((status = (R_REG(di->osh, ++ &di->d32txregs->status) & XS_XS_MASK)) != XS_XS_DISABLED), ++ 10000); ++ ++ /* We should be disabled at this point */ ++ if (status != XS_XS_DISABLED) { ++ DMA_ERROR(("%s: status != D64_XS0_XS_DISABLED 0x%x\n", __FUNCTION__, status)); ++ ASSERT(status == XS_XS_DISABLED); ++ OSL_DELAY(300); ++ } ++ ++ return (status == XS_XS_DISABLED); ++} ++ ++static bool ++dma32_rxidle(dma_info_t *di) ++{ ++ DMA_TRACE(("%s: dma_rxidle\n", di->name)); ++ ++ if (di->nrxd == 0) { ++ return TRUE; ++ } ++ ++ return ((R_REG(di->osh, &di->d32rxregs->status) & RS_CD_MASK) == ++ R_REG(di->osh, &di->d32rxregs->ptr)); ++} ++ ++static bool ++dma32_rxreset(dma_info_t *di) ++{ ++ uint32 status; ++ ++ if (di->nrxd == 0) { ++ return TRUE; ++ } ++ ++ W_REG(di->osh, &di->d32rxregs->control, 0); ++ SPINWAIT(((status = (R_REG(di->osh, ++ &di->d32rxregs->status) & RS_RS_MASK)) != RS_RS_DISABLED), ++ 10000); ++ ++ return (status == RS_RS_DISABLED); ++} ++ ++static bool ++dma32_rxenabled(dma_info_t *di) ++{ ++ uint32 rc; ++ ++ rc = R_REG(di->osh, &di->d32rxregs->control); ++ return ((rc != 0xffffffff) && (rc & RC_RE)); ++} ++ ++static bool ++dma32_txsuspendedidle(dma_info_t *di) ++{ ++ if (di->ntxd == 0) { ++ return TRUE; ++ } ++ ++ if (!(R_REG(di->osh, &di->d32txregs->control) & XC_SE)) { ++ return 0; ++ } ++ ++ if ((R_REG(di->osh, &di->d32txregs->status) & XS_XS_MASK) != XS_XS_IDLE) { ++ return 0; ++ } ++ ++ OSL_DELAY(2); ++ return ((R_REG(di->osh, &di->d32txregs->status) & XS_XS_MASK) == XS_XS_IDLE); ++} ++ ++/* !! tx entry routine ++ * supports full 32bit dma engine buffer addressing so ++ * dma buffers can cross 4 Kbyte page boundaries. ++ * ++ * WARNING: call must check the return value for error. ++ * the error(toss frames) could be fatal and cause many subsequent hard to debug problems ++ */ ++static int ++dma32_txfast(dma_info_t *di, void *p0, bool commit) ++{ ++ void *p, *next; ++ uchar *data; ++ uint len; ++ uint16 txout; ++ uint32 flags = 0; ++ dmaaddr_t pa; ++ ++ DMA_TRACE(("%s: dma_txfast\n", di->name)); ++ ++ txout = di->txout; ++ ++ /* ++ * Walk the chain of packet buffers ++ * allocating and initializing transmit descriptor entries. ++ */ ++ for (p = p0; p; p = next) { ++ uint nsegs, j; ++ hnddma_seg_map_t *map; ++ ++ data = PKTDATA(di->osh, p); ++ len = PKTLEN(di->osh, p); ++#ifdef BCM_DMAPAD ++ len += PKTDMAPAD(di->osh, p); ++#endif ++ next = PKTNEXT(di->osh, p); ++ ++ /* return nonzero if out of tx descriptors */ ++ if (NEXTTXD(txout) == di->txin) { ++ goto outoftxd; ++ } ++ ++ if (len == 0) { ++ continue; ++ } ++ ++ if (DMASGLIST_ENAB) { ++ bzero(&di->txp_dmah[txout], sizeof(hnddma_seg_map_t)); ++ } ++ ++ /* get physical address of buffer start */ ++ pa = DMA_MAP(di->osh, data, len, DMA_TX, p, &di->txp_dmah[txout]); ++ ++ if (DMASGLIST_ENAB) { ++ map = &di->txp_dmah[txout]; ++ ++ /* See if all the segments can be accounted for */ ++ if (map->nsegs > (uint)(di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1)) { ++ goto outoftxd; ++ } ++ ++ nsegs = map->nsegs; ++ } else { ++ nsegs = 1; ++ } ++ ++ for (j = 1; j <= nsegs; j++) { ++ flags = 0; ++ if (p == p0 && j == 1) { ++ flags |= CTRL_SOF; ++ } ++ ++ /* With a DMA segment list, Descriptor table is filled ++ * using the segment list instead of looping over ++ * buffers in multi-chain DMA. Therefore, EOF for SGLIST is when ++ * end of segment list is reached. ++ */ ++ if ((!DMASGLIST_ENAB && next == NULL) || ++ (DMASGLIST_ENAB && j == nsegs)) { ++ flags |= (CTRL_IOC | CTRL_EOF); ++ } ++ if (txout == (di->ntxd - 1)) { ++ flags |= CTRL_EOT; ++ } ++ ++ if (DMASGLIST_ENAB) { ++ len = map->segs[j - 1].length; ++ pa = map->segs[j - 1].addr; ++ } ++ ASSERT(PHYSADDRHI(pa) == 0); ++ ++ dma32_dd_upd(di, di->txd32, pa, txout, &flags, len); ++ ASSERT(di->txp[txout] == NULL); ++ ++ txout = NEXTTXD(txout); ++ } ++ ++ /* See above. No need to loop over individual buffers */ ++ if (DMASGLIST_ENAB) { ++ break; ++ } ++ } ++ ++ /* if last txd eof not set, fix it */ ++ if (!(flags & CTRL_EOF)) { ++ W_SM(&di->txd32[PREVTXD(txout)].ctrl, BUS_SWAP32(flags | CTRL_IOC | CTRL_EOF)); ++ } ++ ++ /* save the packet */ ++ di->txp[PREVTXD(txout)] = p0; ++ ++ /* bump the tx descriptor index */ ++ di->txout = txout; ++ ++ /* kick the chip */ ++ if (commit) { ++ W_REG(di->osh, &di->d32txregs->ptr, I2B(txout, dma32dd_t)); ++ } ++ ++ /* tx flow control */ ++ di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1; ++ ++ return (0); ++ ++outoftxd: ++ DMA_ERROR(("%s: dma_txfast: out of txds\n", di->name)); ++ PKTFREE(di->osh, p0, TRUE); ++ di->hnddma.txavail = 0; ++ di->hnddma.txnobuf++; ++ di->hnddma.txnodesc++; ++ return (-1); ++} ++ ++/* ++ * Reclaim next completed txd (txds if using chained buffers) in the range ++ * specified and return associated packet. ++ * If range is HNDDMA_RANGE_TRANSMITTED, reclaim descriptors that have be ++ * transmitted as noted by the hardware "CurrDescr" pointer. ++ * If range is HNDDMA_RANGE_TRANSFERED, reclaim descriptors that have be ++ * transfered by the DMA as noted by the hardware "ActiveDescr" pointer. ++ * If range is HNDDMA_RANGE_ALL, reclaim all txd(s) posted to the ring and ++ * return associated packet regardless of the value of hardware pointers. ++ */ ++static void * ++dma32_getnexttxp(dma_info_t *di, txd_range_t range) ++{ ++ uint16 start, end, i; ++ uint16 active_desc; ++ void *txp; ++ ++ DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name, ++ (range == HNDDMA_RANGE_ALL) ? "all" : ++ ((range == HNDDMA_RANGE_TRANSMITTED) ? "transmitted" : "transfered"))); ++ ++ if (di->ntxd == 0) { ++ return (NULL); ++ } ++ ++ txp = NULL; ++ ++ start = di->txin; ++ if (range == HNDDMA_RANGE_ALL) { ++ end = di->txout; ++ } else { ++ dma32regs_t *dregs = di->d32txregs; ++ ++ if (di->txin == di->xs0cd) { ++ end = (uint16)B2I(R_REG(di->osh, &dregs->status) & XS_CD_MASK, dma32dd_t); ++ di->xs0cd = end; ++ } else { ++ end = di->xs0cd; ++ } ++ ++ if (range == HNDDMA_RANGE_TRANSFERED) { ++ active_desc = (uint16)((R_REG(di->osh, &dregs->status) & XS_AD_MASK) >> ++ XS_AD_SHIFT); ++ active_desc = (uint16)B2I(active_desc, dma32dd_t); ++ if (end != active_desc) { ++ end = PREVTXD(active_desc); ++ } ++ } ++ } ++ ++ if ((start == 0) && (end > di->txout)) { ++ goto bogus; ++ } ++ ++ for (i = start; i != end && !txp; i = NEXTTXD(i)) { ++ dmaaddr_t pa; ++ hnddma_seg_map_t *map = NULL; ++ uint size, j, nsegs; ++ ++ PHYSADDRLOSET(pa, (BUS_SWAP32(R_SM(&di->txd32[i].addr)) - di->dataoffsetlow)); ++ PHYSADDRHISET(pa, 0); ++ ++ if (DMASGLIST_ENAB) { ++ map = &di->txp_dmah[i]; ++ size = map->origsize; ++ nsegs = map->nsegs; ++ } else { ++ size = (BUS_SWAP32(R_SM(&di->txd32[i].ctrl)) & CTRL_BC_MASK); ++ nsegs = 1; ++ } ++ ++ for (j = nsegs; j > 0; j--) { ++ W_SM(&di->txd32[i].addr, 0xdeadbeef); ++ ++ txp = di->txp[i]; ++ di->txp[i] = NULL; ++ if (j > 1) { ++ i = NEXTTXD(i); ++ } ++ } ++ ++#ifndef CONFIG_BCM_IPROC_GMAC_ACP ++ DMA_UNMAP(di->osh, pa, size, DMA_TX, txp, map); ++#endif /* ! CONFIG_BCM_IPROC_GMAC_ACP */ ++ } ++ ++ di->txin = i; ++ ++ /* tx flow control */ ++ di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1; ++ ++ return (txp); ++ ++bogus: ++ DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n", ++ start, end, di->txout, forceall)); ++ return (NULL); ++} ++ ++static void * ++dma32_getnextrxp(dma_info_t *di, bool forceall) ++{ ++ uint16 i, curr; ++ void *rxp; ++ dmaaddr_t pa; ++ /* if forcing, dma engine must be disabled */ ++ ASSERT(!forceall || !dma32_rxenabled(di)); ++ ++ i = di->rxin; ++ ++ /* return if no packets posted */ ++ if (i == di->rxout) { ++ return (NULL); ++ } ++ ++ if (di->rxin == di->rs0cd) { ++ curr = (uint16)B2I(R_REG(di->osh, &di->d32rxregs->status) & RS_CD_MASK, dma32dd_t); ++ di->rs0cd = curr; ++ } else { ++ curr = di->rs0cd; ++ } ++ ++ /* ignore curr if forceall */ ++ if (!forceall && (i == curr)) { ++ return (NULL); ++ } ++ ++ /* get the packet pointer that corresponds to the rx descriptor */ ++ rxp = di->rxp[i]; ++ ASSERT(rxp); ++ di->rxp[i] = NULL; ++ ++ PHYSADDRLOSET(pa, (BUS_SWAP32(R_SM(&di->rxd32[i].addr)) - di->dataoffsetlow)); ++ PHYSADDRHISET(pa, 0); ++ ++ /* clear this packet from the descriptor ring */ ++#ifndef CONFIG_BCM_IPROC_GMAC_ACP ++ DMA_UNMAP(di->osh, pa, ++ di->rxbufsize, DMA_RX, rxp, &di->rxp_dmah[i]); ++#endif /* ! CONFIG_BCM_IPROC_GMAC_ACP */ ++ ++ W_SM(&di->rxd32[i].addr, 0xdeadbeef); ++ ++ di->rxin = NEXTRXD(i); ++ ++ return (rxp); ++} ++ ++/* ++ * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin). ++ */ ++static void ++dma32_txrotate(dma_info_t *di) ++{ ++ uint16 ad; ++ uint nactive; ++ uint rot; ++ uint16 old, new; ++ uint32 w; ++ uint16 first, last; ++ ++ ASSERT(dma32_txsuspendedidle(di)); ++ ++ nactive = _dma_txactive(di); ++ ad = B2I(((R_REG(di->osh, &di->d32txregs->status) & XS_AD_MASK) >> XS_AD_SHIFT), dma32dd_t); ++ rot = TXD(ad - di->txin); ++ ++ ASSERT(rot < di->ntxd); ++ ++ /* full-ring case is a lot harder - don't worry about this */ ++ if (rot >= (di->ntxd - nactive)) { ++ DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di->name)); ++ return; ++ } ++ ++ first = di->txin; ++ last = PREVTXD(di->txout); ++ ++ /* move entries starting at last and moving backwards to first */ ++ for (old = last; old != PREVTXD(first); old = PREVTXD(old)) { ++ new = TXD(old + rot); ++ ++ /* ++ * Move the tx dma descriptor. ++ * EOT is set only in the last entry in the ring. ++ */ ++ w = BUS_SWAP32(R_SM(&di->txd32[old].ctrl)) & ~CTRL_EOT; ++ if (new == (di->ntxd - 1)) { ++ w |= CTRL_EOT; ++ } ++ W_SM(&di->txd32[new].ctrl, BUS_SWAP32(w)); ++ W_SM(&di->txd32[new].addr, R_SM(&di->txd32[old].addr)); ++ ++ /* zap the old tx dma descriptor address field */ ++ W_SM(&di->txd32[old].addr, BUS_SWAP32(0xdeadbeef)); ++ ++ /* move the corresponding txp[] entry */ ++ ASSERT(di->txp[new] == NULL); ++ di->txp[new] = di->txp[old]; ++ ++ /* Move the segment map as well */ ++ if (DMASGLIST_ENAB) { ++ bcopy(&di->txp_dmah[old], &di->txp_dmah[new], sizeof(hnddma_seg_map_t)); ++ bzero(&di->txp_dmah[old], sizeof(hnddma_seg_map_t)); ++ } ++ ++ di->txp[old] = NULL; ++ } ++ ++ /* update txin and txout */ ++ di->txin = ad; ++ di->txout = TXD(di->txout + rot); ++ di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1; ++ ++ /* kick the chip */ ++ W_REG(di->osh, &di->d32txregs->ptr, I2B(di->txout, dma32dd_t)); ++} ++ ++/* 64-bit DMA functions */ ++ ++static void ++dma64_txinit(dma_info_t *di) ++{ ++ uint32 control; ++ ++ DMA_TRACE(("%s: dma_txinit\n", di->name)); ++ ++ if (di->ntxd == 0) { ++ return; ++ } ++ ++ di->txin = di->txout = di->xs0cd = di->xs0cd_snapshot = 0; ++ di->hnddma.txavail = di->ntxd - 1; ++ ++ /* clear tx descriptor ring */ ++ BZERO_SM((void *)(uintptr)di->txd64, (di->ntxd * sizeof(dma64dd_t))); ++ ++ /* These bits 20:18 (burstLen) of control register can be written but will take ++ * effect only if these bits are valid. So this will not affect previous versions ++ * of the DMA. They will continue to have those bits set to 0. ++ */ ++ control = R_REG(di->osh, &di->d64txregs->control); ++ control = (control & ~D64_XC_BL_MASK) | (di->txburstlen << D64_XC_BL_SHIFT); ++ control = (control & ~D64_XC_MR_MASK) | (di->txmultioutstdrd << D64_XC_MR_SHIFT); ++ control = (control & ~D64_XC_PC_MASK) | (di->txprefetchctl << D64_XC_PC_SHIFT); ++ control = (control & ~D64_XC_PT_MASK) | (di->txprefetchthresh << D64_XC_PT_SHIFT); ++ W_REG(di->osh, &di->d64txregs->control, control); ++ ++ control = D64_XC_XE; ++ /* DMA engine with out alignment requirement requires table to be inited ++ * before enabling the engine ++ */ ++ if (!di->aligndesc_4k) { ++ _dma_ddtable_init(di, DMA_TX, di->txdpa); ++ } ++ ++ if ((di->hnddma.dmactrlflags & DMA_CTRL_PEN) == 0) { ++ control |= D64_XC_PD; ++ } ++ OR_REG(di->osh, &di->d64txregs->control, control); ++ ++ /* DMA engine with alignment requirement requires table to be inited ++ * before enabling the engine ++ */ ++ if (di->aligndesc_4k) { ++ _dma_ddtable_init(di, DMA_TX, di->txdpa); ++ } ++} ++ ++static bool ++dma64_txenabled(dma_info_t *di) ++{ ++ uint32 xc; ++ ++ /* If the chip is dead, it is not enabled :-) */ ++ xc = R_REG(di->osh, &di->d64txregs->control); ++ return ((xc != 0xffffffff) && (xc & D64_XC_XE)); ++} ++ ++static void ++dma64_txsuspend(dma_info_t *di) ++{ ++ DMA_TRACE(("%s: dma_txsuspend\n", di->name)); ++ ++ if (di->ntxd == 0) { ++ return; ++ } ++ ++ OR_REG(di->osh, &di->d64txregs->control, D64_XC_SE); ++} ++ ++static void ++dma64_txresume(dma_info_t *di) ++{ ++ DMA_TRACE(("%s: dma_txresume\n", di->name)); ++ ++ if (di->ntxd == 0) { ++ return; ++ } ++ ++ AND_REG(di->osh, &di->d64txregs->control, ~D64_XC_SE); ++} ++ ++static bool ++dma64_txsuspended(dma_info_t *di) ++{ ++ return (di->ntxd == 0) || ++ ((R_REG(di->osh, &di->d64txregs->control) & D64_XC_SE) == D64_XC_SE); ++} ++ ++#ifdef WL_MULTIQUEUE ++static void ++dma64_txflush(dma_info_t *di) ++{ ++ DMA_TRACE(("%s: dma_txflush\n", di->name)); ++ ++ if (di->ntxd == 0) { ++ return; ++ } ++ ++ OR_REG(di->osh, &di->d64txregs->control, D64_XC_SE | D64_XC_FL); ++} ++ ++static void ++dma64_txflush_clear(dma_info_t *di) ++{ ++ uint32 status; ++ ++ DMA_TRACE(("%s: dma_txflush_clear\n", di->name)); ++ ++ if (di->ntxd == 0) { ++ return; ++ } ++ ++ SPINWAIT(((status = (R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK)) != ++ D64_XS0_XS_DISABLED) && ++ (status != D64_XS0_XS_IDLE) && ++ (status != D64_XS0_XS_STOPPED), ++ 10000); ++ AND_REG(di->osh, &di->d64txregs->control, ~D64_XC_FL); ++} ++#endif /* WL_MULTIQUEUE */ ++ ++static void BCMFASTPATH ++dma64_txreclaim(dma_info_t *di, txd_range_t range) ++{ ++ void *p; ++ ++ DMA_TRACE(("%s: dma_txreclaim %s\n", di->name, ++ (range == HNDDMA_RANGE_ALL) ? "all" : ++ ((range == HNDDMA_RANGE_TRANSMITTED) ? "transmitted" : "transfered"))); ++ ++ if (di->txin == di->txout) { ++ return; ++ } ++ ++ while ((p = dma64_getnexttxp(di, range))) { ++ /* For unframed data, we don't have any packets to free */ ++ if (!(di->hnddma.dmactrlflags & DMA_CTRL_UNFRAMED)) { ++ PKTFREE(di->osh, p, TRUE); ++ } ++ } ++} ++ ++static bool ++dma64_txstopped(dma_info_t *di) ++{ ++ return ((R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK) == D64_XS0_XS_STOPPED); ++} ++ ++static bool ++dma64_rxstopped(dma_info_t *di) ++{ ++ return ((R_REG(di->osh, &di->d64rxregs->status0) & D64_RS0_RS_MASK) == D64_RS0_RS_STOPPED); ++} ++ ++static bool ++dma64_alloc(dma_info_t *di, uint direction) ++{ ++ uint32 size; ++ uint ddlen; ++ void *va; ++ uint alloced = 0; ++ uint32 align; ++ uint16 align_bits; ++ ++ ddlen = sizeof(dma64dd_t); ++ ++ size = (direction == DMA_TX) ? (di->ntxd * ddlen) : (di->nrxd * ddlen); ++ align_bits = di->dmadesc_align; ++ align = (1 << align_bits); ++ ++ if (direction == DMA_TX) { ++ if ((va = dma_ringalloc(di->osh, ++ (di->d64_xs0_cd_mask == 0x1fff) ? D64RINGBOUNDARY : D64RINGBOUNDARY_LARGE, ++ size, &align_bits, &alloced, ++ &di->txdpaorig, &di->tx_dmah)) == NULL) { ++ DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(ntxd) failed\n", ++ di->name)); ++ return FALSE; ++ } ++ align = (1 << align_bits); ++ ++ /* adjust the pa by rounding up to the alignment */ ++ PHYSADDRLOSET(di->txdpa, ROUNDUP(PHYSADDRLO(di->txdpaorig), align)); ++ PHYSADDRHISET(di->txdpa, PHYSADDRHI(di->txdpaorig)); ++ ++ /* Make sure that alignment didn't overflow */ ++ ASSERT(PHYSADDRLO(di->txdpa) >= PHYSADDRLO(di->txdpaorig)); ++ ++ /* find the alignment offset that was used */ ++ di->txdalign = (uint)(PHYSADDRLO(di->txdpa) - PHYSADDRLO(di->txdpaorig)); ++ ++ /* adjust the va by the same offset */ ++ di->txd64 = (dma64dd_t *)((uintptr)va + di->txdalign); ++ ++ /* printk("%s di->txd64(0x%x-0x%x) \n", __FUNCTION__, PHYSADDRHI(di->txd64), PHYSADDRLO(di->txd64)); */ ++ /* printk("%s di->txdpa(0x%x-0x%x) \n", __FUNCTION__, PHYSADDRHI(di->txdpa), PHYSADDRLO(di->txdpa)); */ ++ di->txdalloc = alloced; ++ ASSERT(ISALIGNED(PHYSADDRLO(di->txdpa), align)); ++ } else { ++ if ((va = dma_ringalloc(di->osh, ++ (di->d64_rs0_cd_mask == 0x1fff) ? D64RINGBOUNDARY : D64RINGBOUNDARY_LARGE, ++ size, &align_bits, &alloced, ++ &di->rxdpaorig, &di->rx_dmah)) == NULL) { ++ DMA_ERROR(("%s: dma64_alloc: DMA_ALLOC_CONSISTENT(nrxd) failed\n", ++ di->name)); ++ return FALSE; ++ } ++ align = (1 << align_bits); ++ ++ /* adjust the pa by rounding up to the alignment */ ++ PHYSADDRLOSET(di->rxdpa, ROUNDUP(PHYSADDRLO(di->rxdpaorig), align)); ++ PHYSADDRHISET(di->rxdpa, PHYSADDRHI(di->rxdpaorig)); ++ ++ /* Make sure that alignment didn't overflow */ ++ ASSERT(PHYSADDRLO(di->rxdpa) >= PHYSADDRLO(di->rxdpaorig)); ++ ++ /* find the alignment offset that was used */ ++ di->rxdalign = (uint)(PHYSADDRLO(di->rxdpa) - PHYSADDRLO(di->rxdpaorig)); ++ ++ /* adjust the va by the same offset */ ++ di->rxd64 = (dma64dd_t *)((uintptr)va + di->rxdalign); ++ ++ /* printk("%s di->rxd64(0x%x-0x%x) \n", __FUNCTION__, PHYSADDRHI(di->rxd64), PHYSADDRLO(di->rxd64)); */ ++ /* printk("%s di->rxdpa(0x%x-0x%x) \n", __FUNCTION__, PHYSADDRHI(di->rxdpa), PHYSADDRLO(di->rxdpa)); */ ++ di->rxdalloc = alloced; ++ ASSERT(ISALIGNED(PHYSADDRLO(di->rxdpa), align)); ++ } ++ ++ return TRUE; ++} ++ ++static bool ++dma64_txreset(dma_info_t *di) ++{ ++ uint32 status; ++ ++ if (di->ntxd == 0) { ++ return TRUE; ++ } ++ ++ /* suspend tx DMA first */ ++ W_REG(di->osh, &di->d64txregs->control, D64_XC_SE); ++ ++ SPINWAIT(((status = (R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK)) != ++ D64_XS0_XS_DISABLED) && ++ (status != D64_XS0_XS_IDLE) && ++ (status != D64_XS0_XS_STOPPED), ++ 10000); ++ ++ W_REG(di->osh, &di->d64txregs->control, 0); ++ ++ SPINWAIT(((status = (R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK)) != ++ D64_XS0_XS_DISABLED), ++ 10000); ++ ++ /* We should be disabled at this point */ ++ if (status != D64_XS0_XS_DISABLED) { ++ DMA_ERROR(("%s: status != D64_XS0_XS_DISABLED 0x%x\n", __FUNCTION__, status)); ++ ASSERT(status == D64_XS0_XS_DISABLED); ++ OSL_DELAY(300); ++ } ++ ++ return (status == D64_XS0_XS_DISABLED); ++} ++ ++static bool ++dma64_rxidle(dma_info_t *di) ++{ ++ DMA_TRACE(("%s: dma_rxidle\n", di->name)); ++ ++ if (di->nrxd == 0) { ++ return TRUE; ++ } ++ ++ return ((R_REG(di->osh, &di->d64rxregs->status0) & D64_RS0_CD_MASK) == ++ (R_REG(di->osh, &di->d64rxregs->ptr) & D64_RS0_CD_MASK)); ++} ++ ++static bool ++dma64_rxreset(dma_info_t *di) ++{ ++ uint32 status; ++ ++ if (di->nrxd == 0) { ++ return TRUE; ++ } ++ ++ W_REG(di->osh, &di->d64rxregs->control, 0); ++ ++ SPINWAIT(((status = (R_REG(di->osh, &di->d64rxregs->status0) & D64_RS0_RS_MASK)) != ++ D64_RS0_RS_DISABLED), 10000); ++ ++ return (status == D64_RS0_RS_DISABLED); ++} ++ ++static bool ++dma64_rxenabled(dma_info_t *di) ++{ ++ uint32 rc; ++ ++ rc = R_REG(di->osh, &di->d64rxregs->control); ++ return ((rc != 0xffffffff) && (rc & D64_RC_RE)); ++} ++ ++static bool ++dma64_txsuspendedidle(dma_info_t *di) ++{ ++ ++ if (di->ntxd == 0) { ++ return TRUE; ++ } ++ ++ if (!(R_REG(di->osh, &di->d64txregs->control) & D64_XC_SE)) { ++ return 0; ++ } ++ ++ if ((R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_XS_MASK) == D64_XS0_XS_IDLE) { ++ return 1; ++ } ++ ++ return 0; ++} ++ ++/* Useful when sending unframed data. This allows us to get a progress report from the DMA. ++ * We return a pointer to the beginning of the data buffer of the current descriptor. ++ * If DMA is idle, we return NULL. ++ */ ++static void* ++dma64_getpos(dma_info_t *di, bool direction) ++{ ++ void *va; ++ bool idle; ++ uint16 cur_idx; ++ ++ if (direction == DMA_TX) { ++ cur_idx = B2I(((R_REG(di->osh, &di->d64txregs->status0) & D64_XS0_CD_MASK) - ++ di->xmtptrbase) & D64_XS0_CD_MASK, dma64dd_t); ++ idle = !NTXDACTIVE(di->txin, di->txout); ++ va = di->txp[cur_idx]; ++ } else { ++ cur_idx = B2I(((R_REG(di->osh, &di->d64rxregs->status0) & D64_RS0_CD_MASK) - ++ di->rcvptrbase) & D64_RS0_CD_MASK, dma64dd_t); ++ idle = !NRXDACTIVE(di->rxin, di->rxout); ++ va = di->rxp[cur_idx]; ++ } ++ ++ /* If DMA is IDLE, return NULL */ ++ if (idle) { ++ DMA_TRACE(("%s: DMA idle, return NULL\n", __FUNCTION__)); ++ va = NULL; ++ } ++ ++ return va; ++} ++ ++/* TX of unframed data ++ * ++ * Adds a DMA ring descriptor for the data pointed to by "buf". ++ * This is for DMA of a buffer of data and is unlike other hnddma TX functions ++ * that take a pointer to a "packet" ++ * Each call to this is results in a single descriptor being added for "len" bytes of ++ * data starting at "buf", it doesn't handle chained buffers. ++ */ ++static int ++dma64_txunframed(dma_info_t *di, void *buf, uint len, bool commit) ++{ ++ uint16 txout; ++ uint32 flags = 0; ++ dmaaddr_t pa; /* phys addr */ ++ ++ txout = di->txout; ++ ++ /* return nonzero if out of tx descriptors */ ++ if (NEXTTXD(txout) == di->txin) { ++ goto outoftxd; ++ } ++ ++ if (len == 0) { ++ return 0; ++ } ++ ++#if defined(CONFIG_BCM_IPROC_GMAC_ACP) && !defined(BCMDMASGLISTOSL) ++ pa = virt_to_phys(buf); ++#else ++ pa = DMA_MAP(di->osh, buf, len, DMA_TX, NULL, &di->txp_dmah[txout]); ++#endif /* defined(CONFIG_BCM_IPROC_GMAC_ACP) && !defined(BCMDMASGLISTOSL) */ ++ ++ flags = (D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF); ++ ++ if (txout == (di->ntxd - 1)) { ++ flags |= D64_CTRL1_EOT; ++ } ++ ++ dma64_dd_upd(di, di->txd64, pa, txout, &flags, len); ++ ASSERT(di->txp[txout] == NULL); ++ ++ /* save the buffer pointer - used by dma_getpos */ ++ di->txp[txout] = buf; ++ ++ txout = NEXTTXD(txout); ++ /* bump the tx descriptor index */ ++ di->txout = txout; ++ ++ /* kick the chip */ ++ if (commit) { ++ W_REG(di->osh, &di->d64txregs->ptr, di->xmtptrbase + I2B(txout, dma64dd_t)); ++ } ++ ++ /* tx flow control */ ++ di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1; ++ ++ return (0); ++ ++outoftxd: ++ DMA_ERROR(("%s: %s: out of txds !!!\n", di->name, __FUNCTION__)); ++ di->hnddma.txavail = 0; ++ di->hnddma.txnobuf++; ++ return (-1); ++} ++ ++/* RX of unframed data ++ * ++ * Adds a DMA ring descriptor for the data pointed to by "buf". ++ * This is for DMA of a buffer of data and is unlike other hnddma TX functions ++ * that take a pointer to a "packet" ++ * Each call to this is results in a single descriptor being added for "len" bytes of ++ * data starting at "buf", it doesn't handle chained buffers. ++ */ ++static int ++dma64_rxunframed(dma_info_t *di, void *buf, uint len, bool commit) ++{ ++ uint16 rxout; ++ uint32 flags = 0; ++ dmaaddr_t pa; /* phys addr */ ++ ++ rxout = di->rxout; ++ ++ /* return nonzero if out of rx descriptors */ ++ if (NEXTRXD(rxout) == di->rxin) { ++ goto outofrxd; ++ } ++ ++ if (len == 0) { ++ return 0; ++ } ++ ++#if defined(CONFIG_BCM_IPROC_GMAC_ACP) && !defined(BCMDMASGLISTOSL) ++ pa = virt_to_phys(buf); ++#else ++ pa = DMA_MAP(di->osh, buf, len, DMA_RX, NULL, &di->rxp_dmah[rxout]); ++#endif /* defined(CONFIG_BCM_IPROC_GMAC_ACP) && !defined(BCMDMASGLISTOSL) */ ++ ++ flags = (D64_CTRL1_SOF | D64_CTRL1_IOC | D64_CTRL1_EOF); ++ ++ if (rxout == (di->nrxd - 1)) { ++ flags |= D64_CTRL1_EOT; ++ } ++ ++ dma64_dd_upd(di, di->rxd64, pa, rxout, &flags, len); ++ ASSERT(di->rxp[rxout] == NULL); ++ ++ /* save the buffer pointer - used by dma_getpos */ ++ di->rxp[rxout] = buf; ++ ++ rxout = NEXTRXD(rxout); ++ /* bump the tx descriptor index */ ++ di->rxout = rxout; ++ ++ /* kick the chip */ ++ if (commit) { ++ W_REG(di->osh, &di->d64rxregs->ptr, di->rcvptrbase + I2B(rxout, dma64dd_t)); ++ //DBG("%s (Control Reg)W_REG: 0x%x Value: 0x%x\n", __FUNCTION__, &di->d64rxregs->ptr, di->rcvptrbase + I2B(rxout, dma64dd_t)); ++ } ++ ++ /* tx flow control */ ++ //di->hnddma.rxavail = di->nrxd - NRXDACTIVE(di->rxin, di->rxout) - 1; ++ ++ return (0); ++ ++outofrxd: ++ DMA_ERROR(("%s: %s: out of rxds !!!\n", di->name, __FUNCTION__)); ++ //di->hnddma.rxavail = 0; ++ di->hnddma.rxnobuf++; ++ return (-1); ++} ++ ++/* !! tx entry routine ++ * WARNING: call must check the return value for error. ++ * the error(toss frames) could be fatal and cause many subsequent hard to debug problems ++ */ ++static int BCMFASTPATH ++dma64_txfast(dma_info_t *di, void *p0, bool commit) ++{ ++ void *p, *next; ++ uchar *data; ++ uint len; ++ uint16 txout; ++ uint32 flags = 0; ++ dmaaddr_t pa; ++ bool war; ++ ++ DMA_TRACE(("%s: dma_txfast\n", di->name)); ++ ++ txout = di->txout; ++ war = (di->hnddma.dmactrlflags & DMA_CTRL_DMA_AVOIDANCE_WAR) ? TRUE : FALSE; ++ ++ /* ++ * Walk the chain of packet buffers ++ * allocating and initializing transmit descriptor entries. ++ */ ++ for (p = p0; p; p = next) { ++ uint nsegs, j, segsadd; ++ hnddma_seg_map_t *map = NULL; ++ ++ data = PKTDATA(di->osh, p); ++ len = PKTLEN(di->osh, p); ++#ifdef BCM_DMAPAD ++ len += PKTDMAPAD(di->osh, p); ++#endif /* BCM_DMAPAD */ ++ next = PKTNEXT(di->osh, p); ++ ++#ifdef CONFIG_BCM_IPROC_GMAC_PREFETCH ++ prefetch_range(next, SKB_PREFETCH_LEN); ++#endif ++ ++ /* return nonzero if out of tx descriptors */ ++ if (NEXTTXD(txout) == di->txin) { ++ goto outoftxd; ++ } ++ ++ if (len == 0) { ++ continue; ++ } ++ ++ /* get physical address of buffer start */ ++ if (DMASGLIST_ENAB) { ++ bzero(&di->txp_dmah[txout], sizeof(hnddma_seg_map_t)); ++ } ++ ++#if defined(CONFIG_BCM_IPROC_GMAC_ACP) && !defined(BCMDMASGLISTOSL) ++ pa = virt_to_phys(data); ++#else ++ pa = DMA_MAP(di->osh, data, len, DMA_TX, p, &di->txp_dmah[txout]); ++#endif /* defined(CONFIG_BCM_IPROC_GMAC_ACP) && !defined(BCMDMASGLISTOSL) */ ++ ++ ++ if (DMASGLIST_ENAB) { ++ map = &di->txp_dmah[txout]; ++ ++ /* See if all the segments can be accounted for */ ++ if (map->nsegs > (uint)(di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1)) { ++ goto outoftxd; ++ } ++ ++ nsegs = map->nsegs; ++ } else { ++ nsegs = 1; ++ } ++ ++ segsadd = 0; ++ for (j = 1; j <= nsegs; j++) { ++ flags = 0; ++ if (p == p0 && j == 1) { ++ flags |= D64_CTRL1_SOF; ++ } ++ ++ /* With a DMA segment list, Descriptor table is filled ++ * using the segment list instead of looping over ++ * buffers in multi-chain DMA. Therefore, EOF for SGLIST is when ++ * end of segment list is reached. ++ */ ++ if ((!DMASGLIST_ENAB && next == NULL) || ++ (DMASGLIST_ENAB && j == nsegs)) { ++ flags |= (D64_CTRL1_IOC | D64_CTRL1_EOF); ++ } ++ if (txout == (di->ntxd - 1)) { ++ flags |= D64_CTRL1_EOT; ++ } ++ ++ if (DMASGLIST_ENAB) { ++ len = map->segs[j - 1].length; ++ pa = map->segs[j - 1].addr; ++ if (len > 128 && war) { ++ uint remain, new_len, align64; ++ /* check for 64B aligned of pa */ ++ align64 = (uint)(PHYSADDRLO(pa) & 0x3f); ++ align64 = (64 - align64) & 0x3f; ++ new_len = len - align64; ++ remain = new_len % 128; ++ if (remain > 0 && remain <= 4) { ++ uint32 buf_addr_lo; ++ uint32 tmp_flags = ++ flags & (~(D64_CTRL1_EOF | D64_CTRL1_IOC)); ++ flags &= ~(D64_CTRL1_SOF | D64_CTRL1_EOT); ++ remain += 64; ++ dma64_dd_upd(di, di->txd64, pa, txout, ++ &tmp_flags, len-remain); ++ ASSERT(di->txp[txout] == NULL); ++ txout = NEXTTXD(txout); ++ /* return nonzero if out of tx descriptors */ ++ if (txout == di->txin) { ++ DMA_ERROR(("%s: dma_txfast: Out-of-DMA" ++ " descriptors (txin %d txout %d" ++ " nsegs %d)\n", __FUNCTION__, ++ di->txin, di->txout, nsegs)); ++ goto outoftxd; ++ } ++ if (txout == (di->ntxd - 1)) { ++ flags |= D64_CTRL1_EOT; ++ } ++ buf_addr_lo = PHYSADDRLO(pa); ++ PHYSADDRLOSET(pa, (PHYSADDRLO(pa) + (len-remain))); ++ if (PHYSADDRLO(pa) < buf_addr_lo) { ++ PHYSADDRHISET(pa, (PHYSADDRHI(pa) + 1)); ++ } ++ len = remain; ++ segsadd++; ++ di->dma_avoidance_cnt++; ++ } ++ } ++ } ++ dma64_dd_upd(di, di->txd64, pa, txout, &flags, len); ++ ASSERT(di->txp[txout] == NULL); ++ ++ txout = NEXTTXD(txout); ++ /* return nonzero if out of tx descriptors */ ++ if (txout == di->txin) { ++ DMA_ERROR(("%s: dma_txfast: Out-of-DMA descriptors" ++ " (txin %d txout %d nsegs %d)\n", __FUNCTION__, ++ di->txin, di->txout, nsegs)); ++ goto outoftxd; ++ } ++ } ++ if (segsadd && DMASGLIST_ENAB) { ++ map->nsegs += segsadd; ++ } ++ ++ /* See above. No need to loop over individual buffers */ ++ if (DMASGLIST_ENAB) { ++ break; ++ } ++ } ++ ++ /* if last txd eof not set, fix it */ ++ if (!(flags & D64_CTRL1_EOF)) { ++ W_SM(&di->txd64[PREVTXD(txout)].ctrl1, ++ BUS_SWAP32(flags | D64_CTRL1_IOC | D64_CTRL1_EOF)); ++ } ++ ++ /* save the packet */ ++ di->txp[PREVTXD(txout)] = p0; ++ ++ /* bump the tx descriptor index */ ++ di->txout = txout; ++ ++ /* Spin lock to prevent TX discriptor protocol errors when using SG lists */ ++ spin_lock(&di->des_lock); ++ spin_unlock(&di->des_lock); ++ ++ /* kick the chip */ ++ if (commit) { ++ W_REG(di->osh, &di->d64txregs->ptr, di->xmtptrbase + I2B(txout, dma64dd_t)); ++ } ++ ++ /* tx flow control */ ++ di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1; ++ ++ return (0); ++ ++outoftxd: ++ DMA_ERROR(("%s: dma_txfast: out of txds !!!\n", di->name)); ++ PKTFREE(di->osh, p0, TRUE); ++ di->hnddma.txavail = 0; ++ di->hnddma.txnobuf++; ++ return (-1); ++} ++ ++/* ++ * Reclaim next completed txd (txds if using chained buffers) in the range ++ * specified and return associated packet. ++ * If range is HNDDMA_RANGE_TRANSMITTED, reclaim descriptors that have be ++ * transmitted as noted by the hardware "CurrDescr" pointer. ++ * If range is HNDDMA_RANGE_TRANSFERED, reclaim descriptors that have be ++ * transfered by the DMA as noted by the hardware "ActiveDescr" pointer. ++ * If range is HNDDMA_RANGE_ALL, reclaim all txd(s) posted to the ring and ++ * return associated packet regardless of the value of hardware pointers. ++ */ ++static void * BCMFASTPATH ++dma64_getnexttxp(dma_info_t *di, txd_range_t range) ++{ ++ uint16 start, end, i; ++ uint16 active_desc; ++ void *txp; ++ ++ DMA_TRACE(("%s: dma_getnexttxp %s\n", di->name, ++ (range == HNDDMA_RANGE_ALL) ? "all" : ++ ((range == HNDDMA_RANGE_TRANSMITTED) ? "transmitted" : "transfered"))); ++ ++ if (di->ntxd == 0) { ++ return (NULL); ++ } ++ ++ txp = NULL; ++ ++ start = di->txin; ++ if (range == HNDDMA_RANGE_ALL) { ++ end = di->txout; ++ } else { ++ dma64regs_t *dregs = di->d64txregs; ++ ++ if (di->txin == di->xs0cd) { ++ end = (uint16)(B2I(((R_REG(di->osh, &dregs->status0) & D64_XS0_CD_MASK) - ++ di->xmtptrbase) & D64_XS0_CD_MASK, dma64dd_t)); ++ di->xs0cd = end; ++ } else { ++ end = di->xs0cd; ++ } ++ ++ if (range == HNDDMA_RANGE_TRANSFERED) { ++ active_desc = (uint16)(R_REG(di->osh, &dregs->status1) & D64_XS1_AD_MASK); ++ active_desc = (active_desc - di->xmtptrbase) & D64_XS0_CD_MASK; ++ active_desc = B2I(active_desc, dma64dd_t); ++ if (end != active_desc) { ++ end = PREVTXD(active_desc); ++ } ++ } ++ } ++ ++ if ((start == 0) && (end > di->txout)) { ++ goto bogus; ++ } ++ ++ for (i = start; i != end && !txp; i = NEXTTXD(i)) { ++ dmaaddr_t pa; ++ hnddma_seg_map_t *map = NULL; ++ uint size, j, nsegs; ++ ++#ifdef CONFIG_BCM_IPROC_GMAC_PREFETCH ++ prefetch_range(di->txp[NEXTTXD(i)], SKB_PREFETCH_LEN); ++#endif ++ ++ PHYSADDRLOSET(pa, (BUS_SWAP32(R_SM(&di->txd64[i].addrlow)) - di->dataoffsetlow)); ++ PHYSADDRHISET(pa, (BUS_SWAP32(R_SM(&di->txd64[i].addrhigh)) - di->dataoffsethigh)); ++ ++ if (DMASGLIST_ENAB) { ++ map = &di->txp_dmah[i]; ++ size = map->origsize; ++ nsegs = map->nsegs; ++ if (nsegs > (uint)NTXDACTIVE(i, end)) { ++ di->xs0cd = i; ++ break; ++ } ++ } else { ++ size = (BUS_SWAP32(R_SM(&di->txd64[i].ctrl2)) & D64_CTRL2_BC_MASK); ++ nsegs = 1; ++ } ++ ++ for (j = nsegs; j > 0; j--) { ++ W_SM(&di->txd64[i].addrlow, 0xdeadbeef); ++ W_SM(&di->txd64[i].addrhigh, 0xdeadbeef); ++ ++ txp = di->txp[i]; ++ di->txp[i] = NULL; ++ if (j > 1) { ++ i = NEXTTXD(i); ++ } ++ } ++#ifndef CONFIG_BCM_IPROC_GMAC_ACP ++ DMA_UNMAP(di->osh, pa, size, DMA_TX, txp, map); ++#endif /* ! CONFIG_BCM_IPROC_GMAC_ACP */ ++ } ++ ++ di->txin = i; ++ ++ /* tx flow control */ ++ di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1; ++ ++ return (txp); ++ ++bogus: ++ DMA_NONE(("dma_getnexttxp: bogus curr: start %d end %d txout %d force %d\n", ++ start, end, di->txout, forceall)); ++ return (NULL); ++} ++ ++static void * BCMFASTPATH ++dma64_getnextrxp(dma_info_t *di, bool forceall) ++{ ++ uint16 i, curr; ++ void *rxp; ++ dmaaddr_t pa; ++ ++ /* if forcing, dma engine must be disabled */ ++ ASSERT(!forceall || !dma64_rxenabled(di)); ++ ++ i = di->rxin; ++ ++ /* return if no packets posted */ ++ if (i == di->rxout) { ++ return (NULL); ++ } ++ ++#ifdef CONFIG_BCM_IPROC_GMAC_PREFETCH ++ prefetch_range(di->rxp[NEXTRXD(i)], SKB_PREFETCH_LEN); ++#endif ++ ++ if (di->rxin == di->rs0cd) { ++ curr = (uint16)B2I(((R_REG(di->osh, &di->d64rxregs->status0) & D64_RS0_CD_MASK) - ++ di->rcvptrbase) & D64_RS0_CD_MASK, dma64dd_t); ++ di->rs0cd = curr; ++ } else { ++ curr = di->rs0cd; ++ } ++ ++ /* ignore curr if forceall */ ++ if (!forceall && (i == curr)) { ++ return (NULL); ++ } ++ ++ /* get the packet pointer that corresponds to the rx descriptor */ ++ rxp = di->rxp[i]; ++ ASSERT(rxp); ++ di->rxp[i] = NULL; ++ ++ PHYSADDRLOSET(pa, (BUS_SWAP32(R_SM(&di->rxd64[i].addrlow)) - di->dataoffsetlow)); ++ PHYSADDRHISET(pa, (BUS_SWAP32(R_SM(&di->rxd64[i].addrhigh)) - di->dataoffsethigh)); ++ ++ /* clear this packet from the descriptor ring */ ++#ifndef CONFIG_BCM_IPROC_GMAC_ACP ++ DMA_UNMAP(di->osh, pa, ++ di->rxbufsize, DMA_RX, rxp, &di->rxp_dmah[i]); ++#endif /* ! CONFIG_BCM_IPROC_GMAC_ACP */ ++ ++ W_SM(&di->rxd64[i].addrlow, 0xdeadbeef); ++ W_SM(&di->rxd64[i].addrhigh, 0xdeadbeef); ++ ++ di->rxin = NEXTRXD(i); ++ ++ return (rxp); ++} ++ ++static bool ++_dma64_addrext(osl_t *osh, dma64regs_t *dma64regs) ++{ ++ uint32 w; ++ OR_REG(osh, &dma64regs->control, D64_XC_AE); ++ w = R_REG(osh, &dma64regs->control); ++ AND_REG(osh, &dma64regs->control, ~D64_XC_AE); ++ return ((w & D64_XC_AE) == D64_XC_AE); ++} ++ ++/* ++ * Rotate all active tx dma ring entries "forward" by (ActiveDescriptor - txin). ++ */ ++static void ++dma64_txrotate(dma_info_t *di) ++{ ++ uint16 ad; ++ uint nactive; ++ uint rot; ++ uint16 old, new; ++ uint32 w; ++ uint16 first, last; ++ ++ ASSERT(dma64_txsuspendedidle(di)); ++ ++ nactive = _dma_txactive(di); ++ ad = B2I((((R_REG(di->osh, &di->d64txregs->status1) & D64_XS1_AD_MASK) ++ - di->xmtptrbase) & D64_XS1_AD_MASK), dma64dd_t); ++ rot = TXD(ad - di->txin); ++ ++ ASSERT(rot < di->ntxd); ++ ++ /* full-ring case is a lot harder - don't worry about this */ ++ if (rot >= (di->ntxd - nactive)) { ++ DMA_ERROR(("%s: dma_txrotate: ring full - punt\n", di->name)); ++ return; ++ } ++ ++ first = di->txin; ++ last = PREVTXD(di->txout); ++ ++ /* move entries starting at last and moving backwards to first */ ++ for (old = last; old != PREVTXD(first); old = PREVTXD(old)) { ++ new = TXD(old + rot); ++ ++ /* ++ * Move the tx dma descriptor. ++ * EOT is set only in the last entry in the ring. ++ */ ++ w = BUS_SWAP32(R_SM(&di->txd64[old].ctrl1)) & ~D64_CTRL1_EOT; ++ if (new == (di->ntxd - 1)) { ++ w |= D64_CTRL1_EOT; ++ } ++ W_SM(&di->txd64[new].ctrl1, BUS_SWAP32(w)); ++ ++ w = BUS_SWAP32(R_SM(&di->txd64[old].ctrl2)); ++ W_SM(&di->txd64[new].ctrl2, BUS_SWAP32(w)); ++ ++ W_SM(&di->txd64[new].addrlow, R_SM(&di->txd64[old].addrlow)); ++ W_SM(&di->txd64[new].addrhigh, R_SM(&di->txd64[old].addrhigh)); ++ ++ /* zap the old tx dma descriptor address field */ ++ W_SM(&di->txd64[old].addrlow, BUS_SWAP32(0xdeadbeef)); ++ W_SM(&di->txd64[old].addrhigh, BUS_SWAP32(0xdeadbeef)); ++ ++ /* move the corresponding txp[] entry */ ++ ASSERT(di->txp[new] == NULL); ++ di->txp[new] = di->txp[old]; ++ ++ /* Move the map */ ++ if (DMASGLIST_ENAB) { ++ bcopy(&di->txp_dmah[old], &di->txp_dmah[new], sizeof(hnddma_seg_map_t)); ++ bzero(&di->txp_dmah[old], sizeof(hnddma_seg_map_t)); ++ } ++ ++ di->txp[old] = NULL; ++ } ++ ++ /* update txin and txout */ ++ di->txin = ad; ++ di->txout = TXD(di->txout + rot); ++ di->hnddma.txavail = di->ntxd - NTXDACTIVE(di->txin, di->txout) - 1; ++ ++ /* kick the chip */ ++ W_REG(di->osh, &di->d64txregs->ptr, di->xmtptrbase + I2B(di->txout, dma64dd_t)); ++} ++ ++uint ++BCMATTACHFN(dma_addrwidth)(si_t *sih, void *dmaregs) ++{ ++ dma32regs_t *dma32regs; ++ osl_t *osh; ++ ++ osh = si_osh(sih); ++ ++ /* Perform 64-bit checks only if we want to advertise 64-bit (> 32bit) capability) */ ++ /* DMA engine is 64-bit capable */ ++ if ((si_core_sflags(sih, 0, 0) & SISF_DMA64) == SISF_DMA64) { ++ /* backplane are 64-bit capable */ ++ if (si_backplane64(sih)) { ++ /* If bus is System Backplane or PCIE then we can access 64-bits */ ++ if ((BUSTYPE(sih->bustype) == SI_BUS) || ++ ((BUSTYPE(sih->bustype) == PCI_BUS) && ++ ((sih->buscoretype == PCIE_CORE_ID) || ++ (sih->buscoretype == PCIE2_CORE_ID)))) { ++ return (DMADDRWIDTH_64); ++ } ++ } ++ ++ /* DMA64 is always 32-bit capable, AE is always TRUE */ ++ ASSERT(_dma64_addrext(osh, (dma64regs_t *)dmaregs)); ++ ++ return (DMADDRWIDTH_32); ++ } ++ ++ /* Start checking for 32-bit / 30-bit addressing */ ++ dma32regs = (dma32regs_t *)dmaregs; ++ ++ /* For System Backplane, PCIE bus or addrext feature, 32-bits ok */ ++ if ((BUSTYPE(sih->bustype) == SI_BUS) || ++ ((BUSTYPE(sih->bustype) == PCI_BUS) && ++ ((sih->buscoretype == PCIE_CORE_ID) || ++ (sih->buscoretype == PCIE2_CORE_ID))) || ++ (_dma32_addrext(osh, dma32regs))) { ++ return (DMADDRWIDTH_32); ++ } ++ ++ /* Fallthru */ ++ return (DMADDRWIDTH_30); ++} ++ ++static int ++_dma_pktpool_set(dma_info_t *di, pktpool_t *pool) ++{ ++ ASSERT(di); ++ ASSERT(di->pktpool == NULL); ++ di->pktpool = pool; ++ return 0; ++} ++ ++static bool ++_dma_rxtx_error(dma_info_t *di, bool istx) ++{ ++ uint32 status1 = 0; ++ uint16 curr; ++ ++ if (DMA64_ENAB(di) && DMA64_MODE(di)) { ++ if (istx) { ++ status1 = R_REG(di->osh, &di->d64txregs->status1); ++ ++ if ((status1 & D64_XS1_XE_MASK) != D64_XS1_XE_NOERR) { ++ return TRUE; ++ } else if (si_coreid(di->sih) == GMAC_CORE_ID && si_corerev(di->sih) >= 4) { ++ curr = (uint16)(B2I(((R_REG(di->osh, &di->d64txregs->status0) & ++ D64_XS0_CD_MASK) - di->xmtptrbase) & ++ D64_XS0_CD_MASK, dma64dd_t)); ++ ++ if (NTXDACTIVE(di->txin, di->txout) != 0 && ++ curr == di->xs0cd_snapshot) { ++ ++ /* suspicious */ ++ return TRUE; ++ } ++ di->xs0cd_snapshot = di->xs0cd = curr; ++ ++ return FALSE; ++ } else { ++ return FALSE; ++ } ++ } ++ else { ++ ++ status1 = R_REG(di->osh, &di->d64rxregs->status1); ++ ++ if ((status1 & D64_RS1_RE_MASK) != D64_RS1_RE_NOERR) { ++ return TRUE; ++ } else { ++ return FALSE; ++ } ++ } ++ } else if (DMA32_ENAB(di)) { ++ return FALSE; ++ } else { ++ ASSERT(0); ++ return FALSE; ++ } ++ ++} ++ ++void ++_dma_burstlen_set(dma_info_t *di, uint8 rxburstlen, uint8 txburstlen) ++{ ++ di->rxburstlen = rxburstlen; ++ di->txburstlen = txburstlen; ++} ++ ++void ++_dma_param_set(dma_info_t *di, uint16 paramid, uint16 paramval) ++{ ++ switch (paramid) { ++ case HNDDMA_PID_TX_MULTI_OUTSTD_RD: ++ di->txmultioutstdrd = (uint8)paramval; ++ break; ++ ++ case HNDDMA_PID_TX_PREFETCH_CTL: ++ di->txprefetchctl = (uint8)paramval; ++ break; ++ ++ case HNDDMA_PID_TX_PREFETCH_THRESH: ++ di->txprefetchthresh = (uint8)paramval; ++ break; ++ ++ case HNDDMA_PID_TX_BURSTLEN: ++ di->txburstlen = (uint8)paramval; ++ break; ++ ++ case HNDDMA_PID_RX_PREFETCH_CTL: ++ di->rxprefetchctl = (uint8)paramval; ++ break; ++ ++ case HNDDMA_PID_RX_PREFETCH_THRESH: ++ di->rxprefetchthresh = (uint8)paramval; ++ break; ++ ++ case HNDDMA_PID_RX_BURSTLEN: ++ di->rxburstlen = (uint8)paramval; ++ break; ++ ++ default: ++ break; ++ } ++} ++ ++static bool ++_dma_glom_enable(dma_info_t *di, uint32 val) ++{ ++ dma64regs_t *dregs = di->d64rxregs; ++ bool ret = TRUE; ++ if (val) { ++ OR_REG(di->osh, &dregs->control, D64_RC_GE); ++ if (!(R_REG(di->osh, &dregs->control) & D64_RC_GE)) ++ ret = FALSE; ++ } else { ++ AND_REG(di->osh, &dregs->control, ~D64_RC_GE); ++ } ++ return ret; ++} +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/shared/hr2_erom.c b/drivers/net/ethernet/broadcom/gmac/src/shared/hr2_erom.c +--- a/drivers/net/ethernet/broadcom/gmac/src/shared/hr2_erom.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/shared/hr2_erom.c 2017-11-09 17:53:44.051300000 +0800 +@@ -0,0 +1,64 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Broadcom Home Networking Division 10/100 Mbit/s Ethernet ++ * Helix4 sudo EROM ++ * ++ */ ++#include ++ ++uint32 hr2_erom[] = { ++ //#define CC_CORE_ID 0x800 /* chipcommon core */ ++ 0x4bf80001, 0x2a004201, 0x18000005, 0x181200c5, ++ //#define NS_CCB_CORE_ID 0x50b /* ChipcommonB core */ ++ 0x4bf50b01, 0x01000201, 0x18001005, 0x18002005, 0x18003005, 0x18004005, 0x18005005, 0x18006005, 0x18007005, 0x18008005, 0x18009005, ++ //#define NS_DMA_CORE_ID 0x502 /* DMA core */ ++ 0x4bf50201, 0x01004211, 0x00000003, 0x1802c005, 0x181140c5, ++ //#define GMAC_CORE_ID 0x82d /* Gigabit MAC core */ ++ 0x4bf82d01, 0x04004211, 0x00000103, 0x18022005, 0x181100c5, ++ //#define NS_PCIEG2_CORE_ID 0x501 /* PCIE Gen 2 core */ ++ 0x4bf50101, 0x01084411, 0x00000503, 0x18012005, 0x08000135, 0x08000000, 0x181010c5, 0x1810a185, ++ 0x4bf50101, 0x01084411, 0x00000603, 0x18013005, 0x40000135, 0x08000000, 0x181020c5, 0x1810b185, ++ 0x4bf50101, 0x01084411, 0x00000703, 0x18014005, 0x48000135, 0x08000000, 0x181030c5, 0x1810c185, ++ //#define ARMCA9_CORE_ID 0x510 /* ARM Cortex A9 core (ihost) */ ++ 0x4bf51001, 0x01104611, 0x00000803, 0x1800b005, 0x1800c005, 0x19000135, 0x00020000, 0x19020235, 0x00003000, 0x181000c5, 0x18106185, 0x18107285, ++ //#define NS_USB20_CORE_ID 0x504 /* USB2.0 core */ ++ 0x4bf50401, 0x01004211, 0x00000903, 0x18021005, 0x18022005, 0x181150c5, ++ //#define NS_USB30_CORE_ID 0x505 /* USB3.0 core */ ++ 0x4bf50501, 0x01004211, 0x00000a03, 0x18023005, 0x181050c5, ++ //#define NS_SDIO3_CORE_ID 0x503 /* SDIO3 core */ ++ 0x4bf50301, 0x01004211, 0x00000b03, 0x18020005, 0x181160c5, ++ //#define I2S_CORE_ID 0x834 /* I2S core */ ++ 0x4bf83401, 0x03004211, 0x00000c03, 0x1802a005, 0x181170c5, ++ //#define NS_A9JTAG_CORE_ID 0x506 /* ARM Cortex A9 JTAG core */ ++ 0x4bf50601, 0x01084211, 0x00000d03, 0x18210035, 0x00010000, 0x181180c5, 0x1811c085, ++ //#define NS_DDR23_CORE_ID 0x507 /* Denali DDR2/DDR3 memory controller */ ++ 0x4bf50701, 0x01100601, 0x18010005, 0x00000135, 0x08000000, 0x80000135, 0x30000000, 0xb0000235, 0x10000000, 0x18108185, 0x18109285, ++ //#define NS_ROM_CORE_ID 0x508 /* ROM core */ ++ 0x4bf50801, 0x01080201, 0xfffd0035, 0x00030000, 0x1810d085, ++ //#define NS_NAND_CORE_ID 0x509 /* NAND flash controller core */ ++ 0x4bf50901, 0x01080401, 0x18028005, 0x1c000135, 0x02000000, 0x1811a185, ++ //#define NS_QSPI_CORE_ID 0x50a /* SPI flash controller core */ ++ 0x4bf50a01, 0x01080401, 0x18029005, 0x1e000135, 0x02000000, 0x1811b185, ++ //#define EROM_CORE_ID 0x366 /* EROM core ID */ ++ 0x43b36601, 0x00000201, 0x18130005, ++ 0x43b13501, 0x00080201, 0x18000075, 0x00010000, 0x18121085, ++ 0x43b30101, 0x01000201, 0x1a000035, 0x00100000, ++ 0x43bfff01, 0x00280a01, 0x10000035, 0x08000000, 0x18011005, 0x18015035, 0x0000b000, 0x1802b105, 0x1802d135, 0x000d3000, 0x18104105, 0x1810e215, ++ 0x18119205, 0x1811d235, 0x00003000, 0x18122335, 0x0000e000, 0x18131305, 0x18137335, 0x000d9000, 0x18220335, 0x000de000, 0x19023335, ++ 0x00fdd000, 0x1a100335, 0x01f00000, 0x20000435, 0x20000000, 0x50000435, 0x30000000, 0xc0000435, 0x3ffd0000, 0x18132085, 0x18133185, ++ 0x18134285, 0x18135385, 0x18136485, ++ 0x0000000f ++}; +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/shared/hr2_erom.h b/drivers/net/ethernet/broadcom/gmac/src/shared/hr2_erom.h +--- a/drivers/net/ethernet/broadcom/gmac/src/shared/hr2_erom.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/shared/hr2_erom.h 2017-11-09 17:53:44.052298000 +0800 +@@ -0,0 +1,26 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Broadcom Home Networking Division 10/100 Mbit/s Ethernet ++ * Helix4 sudo EROM ++ * ++ */ ++ ++#ifndef _hr2_erom_h_ ++#define _hr2_erom_h_ ++ ++extern uint32 hr2_erom[]; ++ ++#endif //_hr2_erom_h_ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/shared/hr3_erom.c b/drivers/net/ethernet/broadcom/gmac/src/shared/hr3_erom.c +--- a/drivers/net/ethernet/broadcom/gmac/src/shared/hr3_erom.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/shared/hr3_erom.c 2017-11-09 17:53:44.053293000 +0800 +@@ -0,0 +1,28 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Broadcom Home Networking Division 10/100 Mbit/s Ethernet ++ * Hurricane3 sudo EROM ++ * ++ */ ++#include ++ ++uint32 hr3_erom[] = { ++ //#define CC_CORE_ID 0x800 /* chipcommon core */ ++ 0x4bf80001, 0x2a004201, 0x18000005, 0x181200c5, ++ //#define GMAC_CORE_ID 0x82d /* Gigabit MAC core */ ++ 0x4bf82d01, 0x04004211, 0x00000103, 0x18042005, 0x181100c5, ++ 0x0000000f ++}; +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/shared/hr3_erom.h b/drivers/net/ethernet/broadcom/gmac/src/shared/hr3_erom.h +--- a/drivers/net/ethernet/broadcom/gmac/src/shared/hr3_erom.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/shared/hr3_erom.h 2017-11-09 17:53:44.054288000 +0800 +@@ -0,0 +1,26 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Broadcom Home Networking Division 10/100 Mbit/s Ethernet ++ * Hurricane3 sudo EROM ++ * ++ */ ++ ++#ifndef _hr3_erom_h_ ++#define _hr3_erom_h_ ++ ++extern uint32 hr3_erom[]; ++ ++#endif /* _hr3_erom_h_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/shared/hx4_erom.c b/drivers/net/ethernet/broadcom/gmac/src/shared/hx4_erom.c +--- a/drivers/net/ethernet/broadcom/gmac/src/shared/hx4_erom.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/shared/hx4_erom.c 2017-11-09 17:53:44.054324000 +0800 +@@ -0,0 +1,65 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Broadcom Home Networking Division 10/100 Mbit/s Ethernet ++ * Helix4 sudo EROM ++ * ++ */ ++#include ++ ++uint32 hx4_erom[] = { ++ //#define CC_CORE_ID 0x800 /* chipcommon core */ ++ 0x4bf80001, 0x2a004201, 0x18000005, 0x181200c5, ++ //#define NS_CCB_CORE_ID 0x50b /* ChipcommonB core */ ++ 0x4bf50b01, 0x01000201, 0x18001005, 0x18002005, 0x18003005, 0x18004005, 0x18005005, 0x18006005, 0x18007005, 0x18008005, 0x18009005, ++ //#define NS_DMA_CORE_ID 0x502 /* DMA core */ ++ 0x4bf50201, 0x01004211, 0x00000003, 0x1802c005, 0x181140c5, ++ //#define GMAC_CORE_ID 0x82d /* Gigabit MAC core */ ++ 0x4bf82d01, 0x04004211, 0x00000103, 0x18022005, 0x181100c5, ++ 0x4bf82d01, 0x04004211, 0x00000203, 0x18023005, 0x181110c5, ++ //#define NS_PCIEG2_CORE_ID 0x501 /* PCIE Gen 2 core */ ++ 0x4bf50101, 0x01084411, 0x00000503, 0x18012005, 0x08000135, 0x08000000, 0x181010c5, 0x1810a185, ++ 0x4bf50101, 0x01084411, 0x00000603, 0x18013005, 0x40000135, 0x08000000, 0x181020c5, 0x1810b185, ++ 0x4bf50101, 0x01084411, 0x00000703, 0x18014005, 0x48000135, 0x08000000, 0x181030c5, 0x1810c185, ++ //#define ARMCA9_CORE_ID 0x510 /* ARM Cortex A9 core (ihost) */ ++ 0x4bf51001, 0x01104611, 0x00000803, 0x1800b005, 0x1800c005, 0x19000135, 0x00020000, 0x19020235, 0x00003000, 0x181000c5, 0x18106185, 0x18107285, ++ //#define NS_USB20_CORE_ID 0x504 /* USB2.0 core */ ++ 0x4bf50401, 0x01004211, 0x00000903, 0x18021005, 0x18022005, 0x181150c5, ++ //#define NS_USB30_CORE_ID 0x505 /* USB3.0 core */ ++ 0x4bf50501, 0x01004211, 0x00000a03, 0x18023005, 0x181050c5, ++ //#define NS_SDIO3_CORE_ID 0x503 /* SDIO3 core */ ++ 0x4bf50301, 0x01004211, 0x00000b03, 0x18020005, 0x181160c5, ++ //#define I2S_CORE_ID 0x834 /* I2S core */ ++ 0x4bf83401, 0x03004211, 0x00000c03, 0x1802a005, 0x181170c5, ++ //#define NS_A9JTAG_CORE_ID 0x506 /* ARM Cortex A9 JTAG core */ ++ 0x4bf50601, 0x01084211, 0x00000d03, 0x18210035, 0x00010000, 0x181180c5, 0x1811c085, ++ //#define NS_DDR23_CORE_ID 0x507 /* Denali DDR2/DDR3 memory controller */ ++ 0x4bf50701, 0x01100601, 0x18010005, 0x00000135, 0x08000000, 0x80000135, 0x30000000, 0xb0000235, 0x10000000, 0x18108185, 0x18109285, ++ //#define NS_ROM_CORE_ID 0x508 /* ROM core */ ++ 0x4bf50801, 0x01080201, 0xfffd0035, 0x00030000, 0x1810d085, ++ //#define NS_NAND_CORE_ID 0x509 /* NAND flash controller core */ ++ 0x4bf50901, 0x01080401, 0x18028005, 0x1c000135, 0x02000000, 0x1811a185, ++ //#define NS_QSPI_CORE_ID 0x50a /* SPI flash controller core */ ++ 0x4bf50a01, 0x01080401, 0x18029005, 0x1e000135, 0x02000000, 0x1811b185, ++ //#define EROM_CORE_ID 0x366 /* EROM core ID */ ++ 0x43b36601, 0x00000201, 0x18130005, ++ 0x43b13501, 0x00080201, 0x18000075, 0x00010000, 0x18121085, ++ 0x43b30101, 0x01000201, 0x1a000035, 0x00100000, ++ 0x43bfff01, 0x00280a01, 0x10000035, 0x08000000, 0x18011005, 0x18015035, 0x0000b000, 0x1802b105, 0x1802d135, 0x000d3000, 0x18104105, 0x1810e215, ++ 0x18119205, 0x1811d235, 0x00003000, 0x18122335, 0x0000e000, 0x18131305, 0x18137335, 0x000d9000, 0x18220335, 0x000de000, 0x19023335, ++ 0x00fdd000, 0x1a100335, 0x01f00000, 0x20000435, 0x20000000, 0x50000435, 0x30000000, 0xc0000435, 0x3ffd0000, 0x18132085, 0x18133185, ++ 0x18134285, 0x18135385, 0x18136485, ++ 0x0000000f ++}; +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/shared/hx4_erom.h b/drivers/net/ethernet/broadcom/gmac/src/shared/hx4_erom.h +--- a/drivers/net/ethernet/broadcom/gmac/src/shared/hx4_erom.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/shared/hx4_erom.h 2017-11-09 17:53:44.055299000 +0800 +@@ -0,0 +1,26 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Broadcom Home Networking Division 10/100 Mbit/s Ethernet ++ * Helix4 sudo EROM ++ * ++ */ ++ ++#ifndef _hx4_erom_h_ ++#define _hx4_erom_h_ ++ ++extern uint32 hx4_erom[]; ++ ++#endif //_hx4_erom_h_ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/shared/kt2_erom.c b/drivers/net/ethernet/broadcom/gmac/src/shared/kt2_erom.c +--- a/drivers/net/ethernet/broadcom/gmac/src/shared/kt2_erom.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/shared/kt2_erom.c 2017-11-09 17:53:44.056295000 +0800 +@@ -0,0 +1,65 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Broadcom Home Networking Division 10/100 Mbit/s Ethernet ++ * Helix4 sudo EROM ++ * ++ */ ++#include ++ ++uint32 kt2_erom[] = { ++ //#define CC_CORE_ID 0x800 /* chipcommon core */ ++ 0x4bf80001, 0x2a004201, 0x18000005, 0x181200c5, ++ //#define NS_CCB_CORE_ID 0x50b /* ChipcommonB core */ ++ 0x4bf50b01, 0x01000201, 0x18001005, 0x18002005, 0x18003005, 0x18004005, 0x18005005, 0x18006005, 0x18007005, 0x18008005, 0x18009005, ++ //#define NS_DMA_CORE_ID 0x502 /* DMA core */ ++ 0x4bf50201, 0x01004211, 0x00000003, 0x1802c005, 0x181140c5, ++ //#define GMAC_CORE_ID 0x82d /* Gigabit MAC core */ ++ 0x4bf82d01, 0x04004211, 0x00000103, 0x18022005, 0x181100c5, ++ 0x4bf82d01, 0x04004211, 0x00000203, 0x18023005, 0x181110c5, ++ //#define NS_PCIEG2_CORE_ID 0x501 /* PCIE Gen 2 core */ ++ 0x4bf50101, 0x01084411, 0x00000503, 0x18012005, 0x08000135, 0x08000000, 0x181010c5, 0x1810a185, ++ 0x4bf50101, 0x01084411, 0x00000603, 0x18013005, 0x40000135, 0x08000000, 0x181020c5, 0x1810b185, ++ 0x4bf50101, 0x01084411, 0x00000703, 0x18014005, 0x48000135, 0x08000000, 0x181030c5, 0x1810c185, ++ //#define ARMCA9_CORE_ID 0x510 /* ARM Cortex A9 core (ihost) */ ++ 0x4bf51001, 0x01104611, 0x00000803, 0x1800b005, 0x1800c005, 0x19000135, 0x00020000, 0x19020235, 0x00003000, 0x181000c5, 0x18106185, 0x18107285, ++ //#define NS_USB20_CORE_ID 0x504 /* USB2.0 core */ ++ 0x4bf50401, 0x01004211, 0x00000903, 0x18021005, 0x18022005, 0x181150c5, ++ //#define NS_USB30_CORE_ID 0x505 /* USB3.0 core */ ++ 0x4bf50501, 0x01004211, 0x00000a03, 0x18023005, 0x181050c5, ++ //#define NS_SDIO3_CORE_ID 0x503 /* SDIO3 core */ ++ 0x4bf50301, 0x01004211, 0x00000b03, 0x18020005, 0x181160c5, ++ //#define I2S_CORE_ID 0x834 /* I2S core */ ++ 0x4bf83401, 0x03004211, 0x00000c03, 0x1802a005, 0x181170c5, ++ //#define NS_A9JTAG_CORE_ID 0x506 /* ARM Cortex A9 JTAG core */ ++ 0x4bf50601, 0x01084211, 0x00000d03, 0x18210035, 0x00010000, 0x181180c5, 0x1811c085, ++ //#define NS_DDR23_CORE_ID 0x507 /* Denali DDR2/DDR3 memory controller */ ++ 0x4bf50701, 0x01100601, 0x18010005, 0x00000135, 0x08000000, 0x80000135, 0x30000000, 0xb0000235, 0x10000000, 0x18108185, 0x18109285, ++ //#define NS_ROM_CORE_ID 0x508 /* ROM core */ ++ 0x4bf50801, 0x01080201, 0xfffd0035, 0x00030000, 0x1810d085, ++ //#define NS_NAND_CORE_ID 0x509 /* NAND flash controller core */ ++ 0x4bf50901, 0x01080401, 0x18028005, 0x1c000135, 0x02000000, 0x1811a185, ++ //#define NS_QSPI_CORE_ID 0x50a /* SPI flash controller core */ ++ 0x4bf50a01, 0x01080401, 0x18029005, 0x1e000135, 0x02000000, 0x1811b185, ++ //#define EROM_CORE_ID 0x366 /* EROM core ID */ ++ 0x43b36601, 0x00000201, 0x18130005, ++ 0x43b13501, 0x00080201, 0x18000075, 0x00010000, 0x18121085, ++ 0x43b30101, 0x01000201, 0x1a000035, 0x00100000, ++ 0x43bfff01, 0x00280a01, 0x10000035, 0x08000000, 0x18011005, 0x18015035, 0x0000b000, 0x1802b105, 0x1802d135, 0x000d3000, 0x18104105, 0x1810e215, ++ 0x18119205, 0x1811d235, 0x00003000, 0x18122335, 0x0000e000, 0x18131305, 0x18137335, 0x000d9000, 0x18220335, 0x000de000, 0x19023335, ++ 0x00fdd000, 0x1a100335, 0x01f00000, 0x20000435, 0x20000000, 0x50000435, 0x30000000, 0xc0000435, 0x3ffd0000, 0x18132085, 0x18133185, ++ 0x18134285, 0x18135385, 0x18136485, ++ 0x0000000f ++}; +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/shared/kt2_erom.h b/drivers/net/ethernet/broadcom/gmac/src/shared/kt2_erom.h +--- a/drivers/net/ethernet/broadcom/gmac/src/shared/kt2_erom.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/shared/kt2_erom.h 2017-11-09 17:53:44.057293000 +0800 +@@ -0,0 +1,26 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Broadcom Home Networking Division 10/100 Mbit/s Ethernet ++ * Helix4 sudo EROM ++ * ++ */ ++ ++#ifndef _kt2_erom_h_ ++#define _kt2_erom_h_ ++ ++extern uint32 kt2_erom[]; ++ ++#endif //_kt2_erom_h_ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/shared/linux_osl.c b/drivers/net/ethernet/broadcom/gmac/src/shared/linux_osl.c +--- a/drivers/net/ethernet/broadcom/gmac/src/shared/linux_osl.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/shared/linux_osl.c 2017-11-09 17:53:44.058299000 +0800 +@@ -0,0 +1,1266 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Linux OS Independent Layer ++ * ++ * $Id: linux_osl.c 322208 2012-03-20 01:53:23Z $ ++ */ ++ ++#define LINUX_PORT ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#ifdef mips ++#include ++#endif /* mips */ ++#include ++ ++ ++#include ++#include ++ ++#define PCI_CFG_RETRY 10 ++ ++#define OS_HANDLE_MAGIC 0x1234abcd /* Magic # to recognize osh */ ++#define BCM_MEM_FILENAME_LEN 24 /* Mem. filename length */ ++ ++#ifdef DHD_USE_STATIC_BUF ++#define STATIC_BUF_MAX_NUM 16 ++#define STATIC_BUF_SIZE (PAGE_SIZE*2) ++#define STATIC_BUF_TOTAL_LEN (STATIC_BUF_MAX_NUM * STATIC_BUF_SIZE) ++ ++typedef struct bcm_static_buf { ++ struct semaphore static_sem; ++ unsigned char *buf_ptr; ++ unsigned char buf_use[STATIC_BUF_MAX_NUM]; ++} bcm_static_buf_t; ++ ++static bcm_static_buf_t *bcm_static_buf = 0; ++ ++#define STATIC_PKT_MAX_NUM 8 ++ ++typedef struct bcm_static_pkt { ++ struct sk_buff *skb_4k[STATIC_PKT_MAX_NUM]; ++ struct sk_buff *skb_8k[STATIC_PKT_MAX_NUM]; ++ struct semaphore osl_pkt_sem; ++ unsigned char pkt_use[STATIC_PKT_MAX_NUM * 2]; ++} bcm_static_pkt_t; ++ ++static bcm_static_pkt_t *bcm_static_skb = 0; ++#endif /* DHD_USE_STATIC_BUF */ ++ ++typedef struct bcm_mem_link { ++ struct bcm_mem_link *prev; ++ struct bcm_mem_link *next; ++ uint size; ++ int line; ++ void *osh; ++ char file[BCM_MEM_FILENAME_LEN]; ++} bcm_mem_link_t; ++ ++struct osl_info { ++ osl_pubinfo_t pub; ++ uint magic; ++ void *pdev; ++ atomic_t malloced; ++ atomic_t pktalloced; /* Number of allocated packet buffers */ ++ uint failed; ++ uint bustype; ++ bcm_mem_link_t *dbgmem_list; ++ spinlock_t dbgmem_lock; ++ spinlock_t pktalloc_lock; ++}; ++ ++#define OSL_PKTTAG_CLEAR(p) \ ++do { \ ++ struct sk_buff *s = (struct sk_buff *)(p); \ ++ ASSERT(OSL_PKTTAG_SZ == 32); \ ++ *(uint32 *)(&s->cb[0]) = 0; *(uint32 *)(&s->cb[4]) = 0; \ ++ *(uint32 *)(&s->cb[8]) = 0; *(uint32 *)(&s->cb[12]) = 0; \ ++ *(uint32 *)(&s->cb[16]) = 0; *(uint32 *)(&s->cb[20]) = 0; \ ++ *(uint32 *)(&s->cb[24]) = 0; *(uint32 *)(&s->cb[28]) = 0; \ ++} while (0) ++ ++/* PCMCIA attribute space access macros */ ++#if defined(CONFIG_PCMCIA) || defined(CONFIG_PCMCIA_MODULE) ++struct pcmcia_dev { ++ dev_link_t link; /* PCMCIA device pointer */ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 35) ++ dev_node_t node; /* PCMCIA node structure */ ++#endif ++ void *base; /* Mapped attribute memory window */ ++ size_t size; /* Size of window */ ++ void *drv; /* Driver data */ ++}; ++#endif /* defined(CONFIG_PCMCIA) || defined(CONFIG_PCMCIA_MODULE) */ ++ ++/* Global ASSERT type flag */ ++uint32 g_assert_type = FALSE; ++ ++static int16 linuxbcmerrormap[] = ++{ 0, /* 0 */ ++ -EINVAL, /* BCME_ERROR */ ++ -EINVAL, /* BCME_BADARG */ ++ -EINVAL, /* BCME_BADOPTION */ ++ -EINVAL, /* BCME_NOTUP */ ++ -EINVAL, /* BCME_NOTDOWN */ ++ -EINVAL, /* BCME_NOTAP */ ++ -EINVAL, /* BCME_NOTSTA */ ++ -EINVAL, /* BCME_BADKEYIDX */ ++ -EINVAL, /* BCME_RADIOOFF */ ++ -EINVAL, /* BCME_NOTBANDLOCKED */ ++ -EINVAL, /* BCME_NOCLK */ ++ -EINVAL, /* BCME_BADRATESET */ ++ -EINVAL, /* BCME_BADBAND */ ++ -E2BIG, /* BCME_BUFTOOSHORT */ ++ -E2BIG, /* BCME_BUFTOOLONG */ ++ -EBUSY, /* BCME_BUSY */ ++ -EINVAL, /* BCME_NOTASSOCIATED */ ++ -EINVAL, /* BCME_BADSSIDLEN */ ++ -EINVAL, /* BCME_OUTOFRANGECHAN */ ++ -EINVAL, /* BCME_BADCHAN */ ++ -EFAULT, /* BCME_BADADDR */ ++ -ENOMEM, /* BCME_NORESOURCE */ ++ -EOPNOTSUPP, /* BCME_UNSUPPORTED */ ++ -EMSGSIZE, /* BCME_BADLENGTH */ ++ -EINVAL, /* BCME_NOTREADY */ ++ -EPERM, /* BCME_EPERM */ ++ -ENOMEM, /* BCME_NOMEM */ ++ -EINVAL, /* BCME_ASSOCIATED */ ++ -ERANGE, /* BCME_RANGE */ ++ -EINVAL, /* BCME_NOTFOUND */ ++ -EINVAL, /* BCME_WME_NOT_ENABLED */ ++ -EINVAL, /* BCME_TSPEC_NOTFOUND */ ++ -EINVAL, /* BCME_ACM_NOTSUPPORTED */ ++ -EINVAL, /* BCME_NOT_WME_ASSOCIATION */ ++ -EIO, /* BCME_SDIO_ERROR */ ++ -ENODEV, /* BCME_DONGLE_DOWN */ ++ -EINVAL, /* BCME_VERSION */ ++ -EIO, /* BCME_TXFAIL */ ++ -EIO, /* BCME_RXFAIL */ ++ -ENODEV, /* BCME_NODEVICE */ ++ -EINVAL, /* BCME_NMODE_DISABLED */ ++ -ENODATA, /* BCME_NONRESIDENT */ ++ ++/* When an new error code is added to bcmutils.h, add os ++ * specific error translation here as well ++ */ ++/* check if BCME_LAST changed since the last time this function was updated */ ++#if BCME_LAST != -42 ++#error "You need to add a OS error translation in the linuxbcmerrormap \ ++ for new error code defined in bcmutils.h" ++#endif ++}; ++ ++/* translate bcmerrors into linux errors */ ++int ++osl_error(int bcmerror) ++{ ++ if (bcmerror > 0) ++ bcmerror = 0; ++ else if (bcmerror < BCME_LAST) ++ bcmerror = BCME_ERROR; ++ ++ /* Array bounds covered by ASSERT in osl_attach */ ++ return linuxbcmerrormap[-bcmerror]; ++} ++ ++extern uint8* dhd_os_prealloc(void *osh, int section, int size); ++ ++EXPORT_SYMBOL(osl_attach); ++osl_t * ++osl_attach(void *pdev, uint bustype, bool pkttag) ++{ ++ osl_t *osh; ++ ++ osh = kmalloc(sizeof(osl_t), GFP_ATOMIC); ++ ASSERT(osh); ++ ++ bzero(osh, sizeof(osl_t)); ++ ++ /* Check that error map has the right number of entries in it */ ++ ASSERT(ABS(BCME_LAST) == (ARRAYSIZE(linuxbcmerrormap) - 1)); ++ ++ osh->magic = OS_HANDLE_MAGIC; ++ atomic_set(&osh->malloced, 0); ++ osh->failed = 0; ++ osh->dbgmem_list = NULL; ++ spin_lock_init(&(osh->dbgmem_lock)); ++ osh->pdev = pdev; ++ osh->pub.pkttag = pkttag; ++ osh->bustype = bustype; ++ ++ switch (bustype) { ++ case PCI_BUS: ++ case SI_BUS: ++ case PCMCIA_BUS: ++ osh->pub.mmbus = TRUE; ++ break; ++ case JTAG_BUS: ++ case SDIO_BUS: ++ case USB_BUS: ++ case SPI_BUS: ++ case RPC_BUS: ++ osh->pub.mmbus = FALSE; ++ break; ++ default: ++ ASSERT(FALSE); ++ break; ++ } ++ ++#if defined(DHD_USE_STATIC_BUF) ++ if (!bcm_static_buf) { ++ if (!(bcm_static_buf = (bcm_static_buf_t *)dhd_os_prealloc(osh, 3, STATIC_BUF_SIZE+ ++ STATIC_BUF_TOTAL_LEN))) { ++ printk("can not alloc static buf!\n"); ++ } ++ else ++ printk("alloc static buf at %x!\n", (unsigned int)bcm_static_buf); ++ ++ ++ sema_init(&bcm_static_buf->static_sem, 1); ++ ++ bcm_static_buf->buf_ptr = (unsigned char *)bcm_static_buf + STATIC_BUF_SIZE; ++ } ++ ++ if (!bcm_static_skb) { ++ int i; ++ void *skb_buff_ptr = 0; ++ bcm_static_skb = (bcm_static_pkt_t *)((char *)bcm_static_buf + 2048); ++ skb_buff_ptr = dhd_os_prealloc(osh, 4, 0); ++ ++ bcopy(skb_buff_ptr, bcm_static_skb, sizeof(struct sk_buff *)*16); ++ for (i = 0; i < STATIC_PKT_MAX_NUM * 2; i++) ++ bcm_static_skb->pkt_use[i] = 0; ++ ++ sema_init(&bcm_static_skb->osl_pkt_sem, 1); ++ } ++#endif /* DHD_USE_STATIC_BUF */ ++ ++ spin_lock_init(&(osh->pktalloc_lock)); ++ ++#ifdef BCMDBG ++ if (pkttag) { ++ struct sk_buff *skb; ++ ASSERT(OSL_PKTTAG_SZ <= sizeof(skb->cb)); ++ } ++#endif ++ return osh; ++} ++ ++void ++osl_detach(osl_t *osh) ++{ ++ if (osh == NULL) ++ return; ++ ++#ifdef DHD_USE_STATIC_BUF ++ if (bcm_static_buf) { ++ bcm_static_buf = 0; ++ } ++ if (bcm_static_skb) { ++ bcm_static_skb = 0; ++ } ++#endif ++ ++ ASSERT(osh->magic == OS_HANDLE_MAGIC); ++ kfree(osh); ++} ++ ++static struct sk_buff *osl_alloc_skb(unsigned int len) ++{ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(2, 6, 25) ++ gfp_t flags = GFP_ATOMIC; ++ ++ return __dev_alloc_skb(len, flags); ++#else ++ return dev_alloc_skb(len); ++#endif ++} ++ ++/* Convert a driver packet to native(OS) packet ++ * In the process, packettag is zeroed out before sending up ++ * IP code depends on skb->cb to be setup correctly with various options ++ * In our case, that means it should be 0 ++ */ ++struct sk_buff * BCMFASTPATH ++osl_pkt_tonative(osl_t *osh, void *pkt) ++{ ++ struct sk_buff *nskb; ++ ++ if (osh->pub.pkttag) ++ OSL_PKTTAG_CLEAR(pkt); ++ ++ /* Decrement the packet counter */ ++ for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) { ++ atomic_sub(PKTISCHAINED(nskb) ? PKTCCNT(nskb) : 1, &osh->pktalloced); ++ } ++ return (struct sk_buff *)pkt; ++} ++ ++/* Convert a native(OS) packet to driver packet. ++ * In the process, native packet is destroyed, there is no copying ++ * Also, a packettag is zeroed out ++ */ ++void * BCMFASTPATH ++osl_pkt_frmnative(osl_t *osh, void *pkt) ++{ ++ struct sk_buff *nskb; ++ ++ if (osh->pub.pkttag) ++ OSL_PKTTAG_CLEAR(pkt); ++ ++ /* Increment the packet counter */ ++ for (nskb = (struct sk_buff *)pkt; nskb; nskb = nskb->next) { ++ atomic_add(PKTISCHAINED(nskb) ? PKTCCNT(nskb) : 1, &osh->pktalloced); ++ } ++ return (void *)pkt; ++} ++ ++/* Return a new packet. zero out pkttag */ ++void * BCMFASTPATH ++osl_pktget(osl_t *osh, uint len) ++{ ++ struct sk_buff *skb; ++ ++ if ((skb = osl_alloc_skb(len))) { ++ skb_put(skb, len); ++ skb->priority = 0; ++ ++ atomic_inc(&osh->pktalloced); ++ } ++ ++ PKTSETCLINK(skb, NULL); ++ ++ return ((void*) skb); ++} ++ ++/* Free the driver packet. Free the tag if present */ ++void BCMFASTPATH ++osl_pktfree(osl_t *osh, void *p, bool send) ++{ ++ struct sk_buff *skb, *nskb; ++ ++ skb = (struct sk_buff*) p; ++ ++ if (send && osh->pub.tx_fn) ++ osh->pub.tx_fn(osh->pub.tx_ctx, p, 0); ++ ++ PKTDBG_TRACE(osh, (void *) skb, PKTLIST_PKTFREE); ++ ++ /* perversion: we use skb->next to chain multi-skb packets */ ++ while (skb) { ++ nskb = skb->next; ++ skb->next = NULL; ++ ++ ++ { ++ if (skb->destructor) ++ /* cannot kfree_skb() on hard IRQ (net/core/skbuff.c) if ++ * destructor exists ++ */ ++ dev_kfree_skb_any(skb); ++ else ++ /* can free immediately (even in_irq()) if destructor ++ * does not exist ++ */ ++ dev_kfree_skb(skb); ++ } ++ atomic_dec(&osh->pktalloced); ++ skb = nskb; ++ } ++} ++ ++#ifdef DHD_USE_STATIC_BUF ++void* ++osl_pktget_static(osl_t *osh, uint len) ++{ ++ int i = 0; ++ struct sk_buff *skb; ++ ++ if (len > (PAGE_SIZE*2)) { ++ printk("%s: attempt to allocate huge packet (0x%x)\n", __FUNCTION__, len); ++ return osl_pktget(osh, len); ++ } ++ ++ down(&bcm_static_skb->osl_pkt_sem); ++ ++ if (len <= PAGE_SIZE) { ++ for (i = 0; i < STATIC_PKT_MAX_NUM; i++) { ++ if (bcm_static_skb->pkt_use[i] == 0) ++ break; ++ } ++ ++ if (i != STATIC_PKT_MAX_NUM) { ++ bcm_static_skb->pkt_use[i] = 1; ++ up(&bcm_static_skb->osl_pkt_sem); ++ skb = bcm_static_skb->skb_4k[i]; ++ skb->tail = skb->data + len; ++ skb->len = len; ++ return skb; ++ } ++ } ++ ++ ++ for (i = 0; i < STATIC_PKT_MAX_NUM; i++) { ++ if (bcm_static_skb->pkt_use[i+STATIC_PKT_MAX_NUM] == 0) ++ break; ++ } ++ ++ if (i != STATIC_PKT_MAX_NUM) { ++ bcm_static_skb->pkt_use[i+STATIC_PKT_MAX_NUM] = 1; ++ up(&bcm_static_skb->osl_pkt_sem); ++ skb = bcm_static_skb->skb_8k[i]; ++ skb->tail = skb->data + len; ++ skb->len = len; ++ return skb; ++ } ++ ++ up(&bcm_static_skb->osl_pkt_sem); ++ printk("%s: all static pkt in use!\n", __FUNCTION__); ++ return osl_pktget(osh, len); ++} ++ ++void ++osl_pktfree_static(osl_t *osh, void *p, bool send) ++{ ++ int i; ++ ++ for (i = 0; i < STATIC_PKT_MAX_NUM; i++) { ++ if (p == bcm_static_skb->skb_4k[i]) { ++ down(&bcm_static_skb->osl_pkt_sem); ++ bcm_static_skb->pkt_use[i] = 0; ++ up(&bcm_static_skb->osl_pkt_sem); ++ return; ++ } ++ } ++ ++ for (i = 0; i < STATIC_PKT_MAX_NUM; i++) { ++ if (p == bcm_static_skb->skb_8k[i]) { ++ down(&bcm_static_skb->osl_pkt_sem); ++ bcm_static_skb->pkt_use[i + STATIC_PKT_MAX_NUM] = 0; ++ up(&bcm_static_skb->osl_pkt_sem); ++ return; ++ } ++ } ++ ++ return osl_pktfree(osh, p, send); ++} ++#endif /* DHD_USE_STATIC_BUF */ ++ ++uint32 ++osl_pci_read_config(osl_t *osh, uint offset, uint size) ++{ ++ uint val = 0; ++ uint retry = PCI_CFG_RETRY; ++ ++ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); ++ ++ /* only 4byte access supported */ ++ ASSERT(size == 4); ++ ++ do { ++ pci_read_config_dword(osh->pdev, offset, &val); ++ if (val != 0xffffffff) ++ break; ++ } while (retry--); ++ ++#ifdef BCMDBG ++ if (retry < PCI_CFG_RETRY) ++ printk("PCI CONFIG READ access to %d required %d retries\n", offset, ++ (PCI_CFG_RETRY - retry)); ++#endif /* BCMDBG */ ++ ++ return (val); ++} ++ ++void ++osl_pci_write_config(osl_t *osh, uint offset, uint size, uint val) ++{ ++ uint retry = PCI_CFG_RETRY; ++ ++ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); ++ ++ /* only 4byte access supported */ ++ ASSERT(size == 4); ++ ++ do { ++ pci_write_config_dword(osh->pdev, offset, val); ++ if (offset != PCI_BAR0_WIN) ++ break; ++ if (osl_pci_read_config(osh, offset, size) == val) ++ break; ++ } while (retry--); ++ ++#ifdef BCMDBG ++ if (retry < PCI_CFG_RETRY) ++ printk("PCI CONFIG WRITE access to %d required %d retries\n", offset, ++ (PCI_CFG_RETRY - retry)); ++#endif /* BCMDBG */ ++} ++ ++/* return bus # for the pci device pointed by osh->pdev */ ++uint ++osl_pci_bus(osl_t *osh) ++{ ++ ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev); ++ ++ return ((struct pci_dev *)osh->pdev)->bus->number; ++} ++ ++/* return slot # for the pci device pointed by osh->pdev */ ++uint ++osl_pci_slot(osl_t *osh) ++{ ++ ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev); ++ ++ return PCI_SLOT(((struct pci_dev *)osh->pdev)->devfn); ++} ++ ++/* return the pci device pointed by osh->pdev */ ++struct pci_dev * ++osl_pci_device(osl_t *osh) ++{ ++ ASSERT(osh && (osh->magic == OS_HANDLE_MAGIC) && osh->pdev); ++ ++ return osh->pdev; ++} ++ ++static void ++osl_pcmcia_attr(osl_t *osh, uint offset, char *buf, int size, bool write) ++{ ++} ++ ++void ++osl_pcmcia_read_attr(osl_t *osh, uint offset, void *buf, int size) ++{ ++ osl_pcmcia_attr(osh, offset, (char *) buf, size, FALSE); ++} ++ ++void ++osl_pcmcia_write_attr(osl_t *osh, uint offset, void *buf, int size) ++{ ++ osl_pcmcia_attr(osh, offset, (char *) buf, size, TRUE); ++} ++ ++void * ++osl_malloc(osl_t *osh, uint size) ++{ ++ void *addr; ++ ++ /* only ASSERT if osh is defined */ ++ if (osh) ++ ASSERT(osh->magic == OS_HANDLE_MAGIC); ++ ++#ifdef DHD_USE_STATIC_BUF ++ if (bcm_static_buf) ++ { ++ int i = 0; ++ if ((size >= PAGE_SIZE)&&(size <= STATIC_BUF_SIZE)) ++ { ++ down(&bcm_static_buf->static_sem); ++ ++ for (i = 0; i < STATIC_BUF_MAX_NUM; i++) ++ { ++ if (bcm_static_buf->buf_use[i] == 0) ++ break; ++ } ++ ++ if (i == STATIC_BUF_MAX_NUM) ++ { ++ up(&bcm_static_buf->static_sem); ++ printk("all static buff in use!\n"); ++ goto original; ++ } ++ ++ bcm_static_buf->buf_use[i] = 1; ++ up(&bcm_static_buf->static_sem); ++ ++ bzero(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i, size); ++ if (osh) ++ atomic_add(size, &osh->malloced); ++ ++ return ((void *)(bcm_static_buf->buf_ptr+STATIC_BUF_SIZE*i)); ++ } ++ } ++original: ++#endif /* DHD_USE_STATIC_BUF */ ++ ++ if ((addr = kmalloc(size, GFP_ATOMIC)) == NULL) { ++ if (osh) ++ osh->failed++; ++ return (NULL); ++ } ++ if (osh) ++ atomic_add(size, &osh->malloced); ++ ++ return (addr); ++} ++ ++void ++osl_mfree(osl_t *osh, void *addr, uint size) ++{ ++#ifdef DHD_USE_STATIC_BUF ++ if (bcm_static_buf) ++ { ++ if ((addr > (void *)bcm_static_buf) && ((unsigned char *)addr ++ <= ((unsigned char *)bcm_static_buf + STATIC_BUF_TOTAL_LEN))) ++ { ++ int buf_idx = 0; ++ ++ buf_idx = ((unsigned char *)addr - bcm_static_buf->buf_ptr)/STATIC_BUF_SIZE; ++ ++ down(&bcm_static_buf->static_sem); ++ bcm_static_buf->buf_use[buf_idx] = 0; ++ up(&bcm_static_buf->static_sem); ++ ++ if (osh) { ++ ASSERT(osh->magic == OS_HANDLE_MAGIC); ++ atomic_sub(size, &osh->malloced); ++ } ++ return; ++ } ++ } ++#endif /* DHD_USE_STATIC_BUF */ ++ if (osh) { ++ ASSERT(osh->magic == OS_HANDLE_MAGIC); ++ atomic_sub(size, &osh->malloced); ++ } ++ kfree(addr); ++} ++ ++uint ++osl_malloced(osl_t *osh) ++{ ++ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); ++ return (atomic_read(&osh->malloced)); ++} ++ ++uint ++osl_malloc_failed(osl_t *osh) ++{ ++ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); ++ return (osh->failed); ++} ++ ++ ++uint ++osl_dma_consistent_align(void) ++{ ++ return (PAGE_SIZE); ++} ++ ++void* ++osl_dma_alloc_consistent(osl_t *osh, uint size, uint16 align_bits, uint *alloced, ulong *pap) ++{ ++#ifdef CONFIG_BCM_IPROC_GMAC_ACP ++ void *va; ++ uint16 align = (1 << align_bits); ++ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); ++ ++ if (!ISALIGNED(DMA_CONSISTENT_ALIGN, align)) ++ size += align; ++ *alloced = size; ++ ++ va = kmalloc(size, GFP_ATOMIC | __GFP_ZERO); ++ if (va) ++ *pap = (ulong)__virt_to_phys((ulong)va); ++ return va; ++ ++#else ++ void *ret; ++// int gfp = GFP_KERNEL; //GFP_ATOMIC | GFP_DMA; ++ /* platform device reference */ ++ struct platform_device *pdev; ++ ++ uint16 align = (1 << align_bits); ++ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); ++ ++ if (!ISALIGNED(DMA_CONSISTENT_ALIGN, align)) ++ size += align; ++ *alloced = size; ++ ++// ret = (void *)__get_free_pages(gfp, get_order(size)); ++// if (ret != NULL) { ++// memset(ret, 0, size); ++// *pap = virt_to_phys(ret); ++// } ++ pdev = (struct platform_device *)osh->pdev; ++ ret = dma_alloc_coherent(&pdev->dev, size, (dma_addr_t*)pap, GFP_KERNEL); ++ return ret; ++ ++#endif /* CONFIG_BCM_IPROC_GMAC_ACP */ ++} ++ ++void ++osl_dma_free_consistent(osl_t *osh, void *va, uint size, ulong pa) ++{ ++#ifdef CONFIG_BCM_IPROC_GMAC_ACP ++ kfree(va); ++#else ++ /* platform device reference */ ++ struct platform_device *pdev; ++ ++ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); ++ ++// free_pages((unsigned long)va, get_order(size)); ++ pdev = (struct platform_device *)osh->pdev; ++ dma_free_coherent(&pdev->dev, size, va, (dma_addr_t)pa); ++#endif /* CONFIG_BCM_IPROC_GMAC_ACP */ ++} ++ ++uint BCMFASTPATH ++osl_dma_map(osl_t *osh, void *va, uint size, int direction, void *p, hnddma_seg_map_t *dmah) ++{ ++ int dir; ++ /* platform device reference */ ++ struct platform_device *pdev; ++ ++ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); ++ pdev = (struct platform_device *)osh->pdev; ++ dir = (direction == DMA_TX)? DMA_TO_DEVICE: DMA_FROM_DEVICE; ++ ++#if defined(BCMDMASGLISTOSL) ++ if (dmah != NULL) { ++ int32 nsegs, i, totsegs = 0, totlen = 0; ++ struct scatterlist *sg, _sg[MAX_DMA_SEGS * 2]; ++ struct sk_buff *skb; ++ for (skb = (struct sk_buff *)p; skb != NULL; skb = PKTNEXT(osh, skb)) { ++ sg = &_sg[totsegs]; ++ if (skb_is_nonlinear(skb)) { ++ nsegs = skb_to_sgvec(skb, sg, 0, PKTLEN(osh, skb)); ++ ASSERT((nsegs > 0) && (totsegs + nsegs <= MAX_DMA_SEGS)); ++ #ifndef CONFIG_BCM_IPROC_GMAC_ACP ++ dma_map_sg(&pdev->dev, sg, nsegs, dir); ++ #endif /* CONFIG_BCM_IPROC_GMAC_ACP */ ++ } else { ++ nsegs = 1; ++ ASSERT(totsegs + nsegs <= MAX_DMA_SEGS); ++ sg->page_link = 0; ++ sg_set_buf(sg, PKTDATA(osh, skb), PKTLEN(osh, skb)); ++ #ifndef CONFIG_BCM_IPROC_GMAC_ACP ++ dma_map_single(&pdev->dev, PKTDATA(osh, skb), PKTLEN(osh, skb), dir); ++ #endif /* CONFIG_BCM_IPROC_GMAC_ACP */ ++ } ++ totsegs += nsegs; ++ totlen += PKTLEN(osh, skb); ++ } ++ dmah->nsegs = totsegs; ++ dmah->origsize = totlen; ++ for (i = 0, sg = _sg; i < totsegs; i++, sg++) { ++ dmah->segs[i].addr = sg_phys(sg); ++ dmah->segs[i].length = sg->length; ++ } ++ #ifdef CONFIG_BCM_IPROC_GMAC_ACP ++ return virt_to_phys(va); ++ #else ++ return dmah->segs[0].addr; ++ #endif /* CONFIG_BCM_IPROC_GMAC_ACP */ ++ } ++#endif /* defined(BCMDMASGLISTOSL) */ ++ ++#ifdef CONFIG_BCM_IPROC_GMAC_ACP ++ return virt_to_phys(va); ++#else ++ return dma_map_single(&pdev->dev, va, size, dir); ++#endif /* CONFIG_BCM_IPROC_GMAC_ACP */ ++} ++ ++void BCMFASTPATH ++osl_dma_unmap(osl_t *osh, uint pa, uint size, int direction) ++{ ++#ifndef CONFIG_BCM_IPROC_GMAC_ACP ++ int dir; ++ /* platform device reference */ ++ struct platform_device *pdev; ++ ++ ASSERT((osh && (osh->magic == OS_HANDLE_MAGIC))); ++ pdev = (struct platform_device *)osh->pdev; ++ dir = (direction == DMA_TX)? DMA_TO_DEVICE: DMA_FROM_DEVICE; ++ dma_unmap_single(&pdev->dev, (uint32)pa, size, dir); ++#endif /* ! CONFIG_BCM_IPROC_GMAC_ACP */ ++} ++ ++ ++void ++osl_delay(uint usec) ++{ ++ uint d; ++ ++ while (usec > 0) { ++ d = MIN(usec, 1000); ++ udelay(d); ++ usec -= d; ++ } ++} ++ ++/* Clone a packet. ++ * The pkttag contents are NOT cloned. ++ */ ++void * ++osl_pktdup(osl_t *osh, void *skb) ++{ ++ void * p; ++ ++ /* clear the CTFBUF flag if set and map the rest of the buffer ++ * before cloning. ++ */ ++ PKTCTFMAP(osh, skb); ++ ++ if ((p = skb_clone((struct sk_buff *)skb, GFP_ATOMIC)) == NULL) ++ return NULL; ++ ++ /* skb_clone copies skb->cb.. we don't want that */ ++ if (osh->pub.pkttag) ++ OSL_PKTTAG_CLEAR(p); ++ ++ /* Increment the packet counter */ ++ atomic_inc(&osh->pktalloced); ++ return (p); ++} ++ ++ ++/* ++ * OSLREGOPS specifies the use of osl_XXX routines to be used for register access ++ */ ++#ifdef OSLREGOPS ++uint8 ++osl_readb(osl_t *osh, volatile uint8 *r) ++{ ++ osl_rreg_fn_t rreg = ((osl_pubinfo_t*)osh)->rreg_fn; ++ void *ctx = ((osl_pubinfo_t*)osh)->reg_ctx; ++ ++ return (uint8)((rreg)(ctx, (void*)r, sizeof(uint8))); ++} ++ ++ ++uint16 ++osl_readw(osl_t *osh, volatile uint16 *r) ++{ ++ osl_rreg_fn_t rreg = ((osl_pubinfo_t*)osh)->rreg_fn; ++ void *ctx = ((osl_pubinfo_t*)osh)->reg_ctx; ++ ++ return (uint16)((rreg)(ctx, (void*)r, sizeof(uint16))); ++} ++ ++uint32 ++osl_readl(osl_t *osh, volatile uint32 *r) ++{ ++ osl_rreg_fn_t rreg = ((osl_pubinfo_t*)osh)->rreg_fn; ++ void *ctx = ((osl_pubinfo_t*)osh)->reg_ctx; ++ ++ return (uint32)((rreg)(ctx, (void*)r, sizeof(uint32))); ++} ++ ++void ++osl_writeb(osl_t *osh, volatile uint8 *r, uint8 v) ++{ ++ osl_wreg_fn_t wreg = ((osl_pubinfo_t*)osh)->wreg_fn; ++ void *ctx = ((osl_pubinfo_t*)osh)->reg_ctx; ++ ++ ((wreg)(ctx, (void*)r, v, sizeof(uint8))); ++} ++ ++ ++void ++osl_writew(osl_t *osh, volatile uint16 *r, uint16 v) ++{ ++ osl_wreg_fn_t wreg = ((osl_pubinfo_t*)osh)->wreg_fn; ++ void *ctx = ((osl_pubinfo_t*)osh)->reg_ctx; ++ ++ ((wreg)(ctx, (void*)r, v, sizeof(uint16))); ++} ++ ++void ++osl_writel(osl_t *osh, volatile uint32 *r, uint32 v) ++{ ++ osl_wreg_fn_t wreg = ((osl_pubinfo_t*)osh)->wreg_fn; ++ void *ctx = ((osl_pubinfo_t*)osh)->reg_ctx; ++ ++ ((wreg)(ctx, (void*)r, v, sizeof(uint32))); ++} ++#endif /* OSLREGOPS */ ++ ++/* ++ * BINOSL selects the slightly slower function-call-based binary compatible osl. ++ */ ++#ifdef BINOSL ++ ++uint32 ++osl_sysuptime(void) ++{ ++ return ((uint32)jiffies * (1000 / HZ)); ++} ++ ++int ++osl_printf(const char *format, ...) ++{ ++ va_list args; ++ static char printbuf[1024]; ++ int len; ++ ++ /* sprintf into a local buffer because there *is* no "vprintk()".. */ ++ va_start(args, format); ++ len = vsnprintf(printbuf, 1024, format, args); ++ va_end(args); ++ ++ if (len > sizeof(printbuf)) { ++ printk("osl_printf: buffer overrun\n"); ++ return (0); ++ } ++ ++ return (printk("%s", printbuf)); ++} ++ ++int ++osl_sprintf(char *buf, const char *format, ...) ++{ ++ va_list args; ++ int rc; ++ ++ va_start(args, format); ++ rc = vsprintf(buf, format, args); ++ va_end(args); ++ return (rc); ++} ++ ++int ++osl_snprintf(char *buf, size_t n, const char *format, ...) ++{ ++ va_list args; ++ int rc; ++ ++ va_start(args, format); ++ rc = vsnprintf(buf, n, format, args); ++ va_end(args); ++ return (rc); ++} ++ ++int ++osl_vsprintf(char *buf, const char *format, va_list ap) ++{ ++ return (vsprintf(buf, format, ap)); ++} ++ ++int ++osl_vsnprintf(char *buf, size_t n, const char *format, va_list ap) ++{ ++ return (vsnprintf(buf, n, format, ap)); ++} ++ ++int ++osl_strcmp(const char *s1, const char *s2) ++{ ++ return (strcmp(s1, s2)); ++} ++ ++int ++osl_strncmp(const char *s1, const char *s2, uint n) ++{ ++ return (strncmp(s1, s2, n)); ++} ++ ++int ++osl_strlen(const char *s) ++{ ++ return (strlen(s)); ++} ++ ++char* ++osl_strcpy(char *d, const char *s) ++{ ++ return (strcpy(d, s)); ++} ++ ++char* ++osl_strncpy(char *d, const char *s, uint n) ++{ ++ return (strncpy(d, s, n)); ++} ++ ++char* ++osl_strchr(const char *s, int c) ++{ ++ return (strchr(s, c)); ++} ++ ++char* ++osl_strrchr(const char *s, int c) ++{ ++ return (strrchr(s, c)); ++} ++ ++void* ++osl_memset(void *d, int c, size_t n) ++{ ++ return memset(d, c, n); ++} ++ ++void* ++osl_memcpy(void *d, const void *s, size_t n) ++{ ++ return memcpy(d, s, n); ++} ++ ++void* ++osl_memmove(void *d, const void *s, size_t n) ++{ ++ return memmove(d, s, n); ++} ++ ++int ++osl_memcmp(const void *s1, const void *s2, size_t n) ++{ ++ return memcmp(s1, s2, n); ++} ++ ++uint32 ++osl_readl(volatile uint32 *r) ++{ ++ return (readl(r)); ++} ++ ++uint16 ++osl_readw(volatile uint16 *r) ++{ ++ return (readw(r)); ++} ++ ++uint8 ++osl_readb(volatile uint8 *r) ++{ ++ return (readb(r)); ++} ++ ++void ++osl_writel(uint32 v, volatile uint32 *r) ++{ ++ writel(v, r); ++} ++ ++void ++osl_writew(uint16 v, volatile uint16 *r) ++{ ++ writew(v, r); ++} ++ ++void ++osl_writeb(uint8 v, volatile uint8 *r) ++{ ++ writeb(v, r); ++} ++ ++void * ++osl_uncached(void *va) ++{ ++#ifdef mips ++ return ((void*)KSEG1ADDR(va)); ++#else ++ return ((void*)va); ++#endif /* mips */ ++} ++ ++void * ++osl_cached(void *va) ++{ ++#ifdef mips ++ return ((void*)KSEG0ADDR(va)); ++#else ++ return ((void*)va); ++#endif /* mips */ ++} ++ ++uint ++osl_getcycles(void) ++{ ++ uint cycles; ++ ++#if defined(mips) ++ cycles = read_c0_count() * 2; ++#elif defined(__i386__) ++ rdtscl(cycles); ++#else ++ cycles = 0; ++#endif /* defined(mips) */ ++ return cycles; ++} ++ ++void * ++osl_reg_map(uint32 pa, uint size) ++{ ++ return (ioremap_nocache((unsigned long)pa, (unsigned long)size)); ++} ++ ++void ++osl_reg_unmap(void *va) ++{ ++ iounmap(va); ++} ++ ++int ++osl_busprobe(uint32 *val, uint32 addr) ++{ ++#ifdef mips ++ return get_dbe(*val, (uint32 *)addr); ++#else ++ *val = readl((uint32 *)(uintptr)addr); ++ return 0; ++#endif /* mips */ ++} ++ ++bool ++osl_pktshared(void *skb) ++{ ++ return (((struct sk_buff*)skb)->cloned); ++} ++ ++uchar* ++osl_pktdata(osl_t *osh, void *skb) ++{ ++ return (((struct sk_buff*)skb)->data); ++} ++ ++uint ++osl_pktlen(osl_t *osh, void *skb) ++{ ++ return (((struct sk_buff*)skb)->len); ++} ++ ++uint ++osl_pktheadroom(osl_t *osh, void *skb) ++{ ++ return (uint) skb_headroom((struct sk_buff *) skb); ++} ++ ++uint ++osl_pkttailroom(osl_t *osh, void *skb) ++{ ++ return (uint) skb_tailroom((struct sk_buff *) skb); ++} ++ ++void* ++osl_pktnext(osl_t *osh, void *skb) ++{ ++ return (((struct sk_buff*)skb)->next); ++} ++ ++void ++osl_pktsetnext(void *skb, void *x) ++{ ++ ((struct sk_buff*)skb)->next = (struct sk_buff*)x; ++} ++ ++void ++osl_pktsetlen(osl_t *osh, void *skb, uint len) ++{ ++ __pskb_trim((struct sk_buff*)skb, len); ++} ++ ++uchar* ++osl_pktpush(osl_t *osh, void *skb, int bytes) ++{ ++ return (skb_push((struct sk_buff*)skb, bytes)); ++} ++ ++uchar* ++osl_pktpull(osl_t *osh, void *skb, int bytes) ++{ ++ return (skb_pull((struct sk_buff*)skb, bytes)); ++} ++ ++void* ++osl_pkttag(void *skb) ++{ ++ return ((void*)(((struct sk_buff*)skb)->cb)); ++} ++ ++void* ++osl_pktlink(void *skb) ++{ ++ return (((struct sk_buff*)skb)->prev); ++} ++ ++void ++osl_pktsetlink(void *skb, void *x) ++{ ++ ((struct sk_buff*)skb)->prev = (struct sk_buff*)x; ++} ++ ++uint ++osl_pktprio(void *skb) ++{ ++ return (((struct sk_buff*)skb)->priority); ++} ++ ++void ++osl_pktsetprio(void *skb, uint x) ++{ ++ ((struct sk_buff*)skb)->priority = x; ++} ++#endif /* BINOSL */ ++ ++uint ++osl_pktalloced(osl_t *osh) ++{ ++ return (atomic_read(&osh->pktalloced)); ++} ++ ++/* Linux Kernel: File Operations: start */ ++void * ++osl_os_open_image(char *filename) ++{ ++ struct file *fp; ++ ++ fp = filp_open(filename, O_RDONLY, 0); ++ /* ++ * 2.6.11 (FC4) supports filp_open() but later revs don't? ++ * Alternative: ++ * fp = open_namei(AT_FDCWD, filename, O_RD, 0); ++ * ??? ++ */ ++ if (IS_ERR(fp)) ++ fp = NULL; ++ ++ return fp; ++} ++ ++int ++osl_os_get_image_block(char *buf, int len, void *image) ++{ ++ struct file *fp = (struct file *)image; ++ int rdlen; ++ ++ if (!image) ++ return 0; ++ ++ rdlen = kernel_read(fp, fp->f_pos, buf, len); ++ if (rdlen > 0) ++ fp->f_pos += rdlen; ++ ++ return rdlen; ++} ++ ++void ++osl_os_close_image(void *image) ++{ ++ if (image) ++ filp_close((struct file *)image, NULL); ++} ++/* Linux Kernel: File Operations: end */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/shared/nvramstubs.c b/drivers/net/ethernet/broadcom/gmac/src/shared/nvramstubs.c +--- a/drivers/net/ethernet/broadcom/gmac/src/shared/nvramstubs.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/shared/nvramstubs.c 2017-11-09 17:53:44.059297000 +0800 +@@ -0,0 +1,341 @@ ++/* ++ * $Copyright Open Broadcom Corporation$ ++ * ++ * Stubs for NVRAM functions for platforms without flash ++ * ++ * $Id: nvramstubs.c 325991 2012-04-05 10:16:42Z kenlo $ ++ */ ++ ++#include ++#include ++#include ++#undef strcmp ++#define strcmp(s1,s2) 0 /* always match */ ++#include ++ ++int ++nvram_init(void *sih) ++{ ++ return 0; ++} ++ ++int ++nvram_append(void *sb, char *vars, uint varsz) ++{ ++ return 0; ++} ++ ++void ++nvram_exit(void *sih) ++{ ++} ++ ++/* fake nvram tuples */ ++typedef struct { ++ char *name; ++ char *value; ++} nvram_t; ++ ++static nvram_t fake_nvram[] = { ++ {"boardtype", "0x058d"}, ++ {"boardnum", "0x010"}, ++ {"boardrev", "0x1100"}, ++ {"boardflags", "0x710"}, ++ {"boardflags2", "0"}, ++ {"sromrev", "8"}, ++ {"clkfreq", "133,133,133"}, ++ {"xtalfreq", "125000"}, ++ {"et_txq_thresh", "1024"}, ++ {"et_rx_rate_limit","1"}, ++ {"sdram_config", "0x103"}, ++ {"swgmacet", "et2"}, ++ {"brcmtag", "1"}, ++ //{"ethaddr", "00:90:4c:06:a5:72"}, ++#ifdef FOUR_PORT_CONFIG ++ {"vlan1hwname", "et2"}, ++ {"vlan1ports", "0 1 2 8*"}, ++ {"vlan2hwname", "et2"}, ++ {"vlan2ports", "3 8*"}, ++ {"wanport", "3"}, ++#else ++ {"vlan1hwname", "et2"}, ++ {"vlan1ports", "0 1 2 3 8*"}, ++ {"vlan2hwname", "et2"}, ++ {"vlan2ports", "4 8*"}, ++ {"wanport", "4"}, ++#endif ++ {"landevs", "vlan1"}, ++ {"wandevs", "et0"}, ++ {"lan_ipaddr", "192.168.1.1"}, ++ {"lan_netmask", "255.255.255.0"}, ++ {"boot_wait", "on"}, ++ {"wait_time", "3"}, ++ {"watchdog", "0"}, ++ {"et_msglevel", "0xFFFFFFFF"} ++}; ++#define fake_nvram_size sizeof(fake_nvram)/sizeof(fake_nvram[0]) ++ ++#ifndef FAKE_NVRAM ++ ++#if defined(CONFIG_MACH_IPROC_P7) ++#define CONFIG_SPI_BASE 0xf0000000 ++#else ++#define CONFIG_SPI_BASE 0x1e000000 ++#endif /* CONFIG_MACH_IPROC_P7 */ ++#define CONFIG_ENV_OFFSET 0xc0000 /* 30000-b0000 - use last 10000 for env */ ++#define CONFIG_ENV_SIZE 0x10000 /* 64K */ ++#define CONFIG_ENV_MAX_ENTRIES 512 ++ ++#define UBOOT_ENV_ADDR CONFIG_SPI_BASE+CONFIG_ENV_OFFSET ++#define UBOOT_ENV_SIZE CONFIG_ENV_SIZE ++#define UBOOT_ENV_MAX_NUM CONFIG_ENV_MAX_ENTRIES ++ ++static uint8 u_boot_env[UBOOT_ENV_SIZE]; ++static bool u_boot_env_loaded=false; ++static nvram_t env_list[UBOOT_ENV_MAX_NUM]; ++static int uboot_vars_start = UBOOT_ENV_ADDR; ++static int uboot_nvram_max = UBOOT_ENV_SIZE; ++ ++/* pass envaddr= in bootargs */ ++static int __init envaddr_setup(char *str) ++{ ++ int ret =0; ++ unsigned long ul=0; ++ ++ ret = kstrtoul(str, 16, &ul); ++ ++ if (!ret) { ++ uboot_vars_start = ul; ++ printk("NVRAM: assign 0x%08x\n", uboot_vars_start); ++ } ++ ++ return !ret; ++} ++__setup("envaddr=", envaddr_setup); ++ ++/* ++APIs for access into uboot env vars ++*/ ++ ++int ++nvram_env_init(void) ++{ ++ volatile void *envbuf; ++ char *dp, *sp, *name, *value, *dp_end; ++ char sep = '\0'; ++ int idx=0; ++ ++ ++ printk("NVRAM: map 0x%08x\n", uboot_vars_start); ++ ++ /* map uboot env */ ++ if ((envbuf = (uint8*)ioremap(uboot_vars_start, UBOOT_ENV_SIZE)) == NULL) { ++ printk("%s: ioremap() failed\n", __FUNCTION__); ++ return -ENOMEM; ++ } ++ ++ /* copy memory into buffer */ ++ memcpy((void*)u_boot_env, (void *) envbuf, uboot_nvram_max); ++ ++ /* clear fake entry set */ ++ memset(env_list, 0, sizeof(env_list)); ++ ++ /* load uboot fake nvram buffer */ ++ /* point to first data */ ++ dp = (char*)u_boot_env; ++ /* point to data buffer */ ++ dp += 4; ++ dp_end = (char*)((uint32)u_boot_env+UBOOT_ENV_SIZE); ++ ++ /* point to first data */ ++ do { ++ /* skip leading white space */ ++ while ((*dp == ' ') || (*dp == '\t')) { ++ ++dp; ++ } ++ ++ /* skip comment lines */ ++ if (*dp == '#') { ++ while (*dp && (*dp != sep)) { ++ ++dp; ++ } ++ ++dp; ++ continue; ++ } ++ ++ /* parse name */ ++ for (name = dp; *dp != '=' && *dp && *dp != sep; ++dp) { ++ ; ++ } ++ ++ *dp++ = '\0'; /* terminate name */ ++ ++ /* parse value; deal with escapes */ ++ for (value = sp = dp; *dp && (*dp != sep); ++dp) { ++ if ((*dp == '\\') && *(dp + 1)) { ++ ++dp; ++ } ++ *sp++ = *dp; ++ } ++ *sp++ = '\0'; /* terminate value */ ++ ++dp; ++ ++ /* enter into hash table */ ++ env_list[idx].name = name; ++ env_list[idx].value = value; ++ //printk("entry%d %s=%s\n", idx, name, value); ++ idx++; ++ ++ /* check if table is full */ ++ if (idx >= UBOOT_ENV_MAX_NUM ) { ++ printk("%s: WARNING - UBoot environment table is full\n", __FUNCTION__); ++ break; ++ } ++ /* check if end of table */ ++ } while ((dp < dp_end) && *dp); /* size check needed for text */ ++ ++ u_boot_env_loaded = true; ++ ++ /* unmap uboot env */ ++ iounmap(envbuf); ++ ++ return 0; ++} ++#endif ++ ++int ++nvram_env_gmac_name(int gmac, char *name) ++{ ++ int ret=0; ++ switch (gmac) ++ { ++#if (defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2) || defined(CONFIG_MACH_SB2) || defined(CONFIG_MACH_GH2)) ++ case 0: ++ strcpy(name, "ethaddr"); ++ break; ++ case 1: ++ sprintf(name, "eth1addr"); ++ break; ++#elif (defined(CONFIG_MACH_HR2) || defined(CONFIG_MACH_GH) || defined(CONFIG_MACH_HR3)) ++ case 0: ++ strcpy(name, "ethaddr"); ++ break; ++#endif ++ default: ++ strcpy(name, "unknown"); ++ ret = -1; ++ break; ++ } ++ return ret; ++ ++} ++ ++char * ++nvram_get(const char *name) ++{ ++ int i, len; ++ nvram_t *tuple; ++ int num_entries; ++ ++ if (!name) { ++ return (char *) 0; ++ } ++ ++ len = strlen(name); ++ if (len == 0) { ++ return (char *) 0; ++ } ++ ++#if defined(CONFIG_MACH_SB2) && defined(CONFIG_MACH_IPROC_EMULATION) ++ static char macAddr[17]; /* = "d4:ae:52:bc:a5:09" */ ++ ++ if (name[0] == 'e' && name[1] == 't' && name[2] == 'h' && name[3] == 'a' && ++ name[4] == 'd' && name[5] == 'd' && name[6] == 'r') ++ { ++ macAddr[0] = 'd'; macAddr[1] = '4'; macAddr[2] = ':'; ++ macAddr[3] = 'a'; macAddr[4] = 'e'; macAddr[5] = ':'; ++ macAddr[6] = '5'; macAddr[7] = '2'; macAddr[8] = ':'; ++ macAddr[9] = 'b'; macAddr[10] = 'c'; macAddr[11] = ':'; ++ macAddr[12] = 'a'; macAddr[13] = '5'; macAddr[14] = ':'; ++ macAddr[15] = '0'; macAddr[16] = '9'; ++ return((char *)macAddr); ++ } /* else if (name[0] == 'e' && name[1] == 't' && name[2] == 'h' && name[3] == '1' && ++ name[4] == 'a' && name[5] == 'd' && name[6] == 'd' && name[7] == 'r') ++ { ++ macAddr[0] = 'd'; macAddr[1] = '5'; macAddr[2] = ':'; ++ macAddr[3] = 'a'; macAddr[4] = 'e'; macAddr[5] = ':'; ++ macAddr[6] = '5'; macAddr[7] = '3'; macAddr[8] = ':'; ++ macAddr[9] = 'b'; macAddr[10] = 'c'; macAddr[11] = ':'; ++ macAddr[12] = 'a'; macAddr[13] = '6'; macAddr[14] = ':'; ++ macAddr[15] = '0'; macAddr[16] = 'a'; ++ return((char *)macAddr); ++ } */ ++#endif ++ ++#ifndef FAKE_NVRAM ++ tuple = &env_list[0]; ++ num_entries = sizeof(env_list)/sizeof(nvram_t); ++ ++ if (!u_boot_env_loaded) { ++ nvram_env_init(); ++ } ++ ++ /* first check the uboot NVRAM variables */ ++ for (i = 0; i < num_entries; i++) { ++ if (tuple->name && (bcmp(tuple->name, name, len) == 0) && (strlen(tuple->name)==len)) { ++ /*printf("%s (NVRAM) %s: %s\n", __FUNCTION__, name, tuple->value);*/ ++ return tuple->value; ++ } ++ tuple++; ++ } ++#endif ++ ++ /* if cant find then check fake table above */ ++ tuple = &fake_nvram[0]; ++ num_entries = fake_nvram_size; ++ for (i = 0; i < num_entries; i++) { ++ if (tuple->name && (bcmp(tuple->name, name, len) == 0) && (strlen(tuple->name)==len)) { ++ /*printf("%s (STUBS) %s: %s\n", __FUNCTION__, name, tuple->value);*/ ++ return tuple->value; ++ } ++ tuple++; ++ } ++ ++ return (char *) 0; ++} ++ ++int ++nvram_set(const char *name, const char *value) ++{ ++ return 0; ++} ++ ++int ++nvram_unset(const char *name) ++{ ++ return 0; ++} ++ ++int ++nvram_commit(void) ++{ ++ return 0; ++} ++ ++int ++nvram_getall(char *buf, int count) ++{ ++ /* add null string as terminator */ ++ if (count < 1) { ++ return BCME_BUFTOOSHORT; ++ } ++ *buf = '\0'; ++ return 0; ++} ++ ++static int __init iproc_nvram_init(void) ++{ ++ nvram_env_init(); ++ return 0; ++} ++subsys_initcall(iproc_nvram_init); +\ No newline at end of file +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/shared/phy542xx.c b/drivers/net/ethernet/broadcom/gmac/src/shared/phy542xx.c +--- a/drivers/net/ethernet/broadcom/gmac/src/shared/phy542xx.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/shared/phy542xx.c 2017-11-09 17:53:44.061289000 +0800 +@@ -0,0 +1,336 @@ ++/* ++ * $Copyright Open Broadcom Corporation$ ++ * ++ * These routines provide access to the external phy ++ * ++ */ ++#include ++#include ++#include "../../../mdio/iproc_mdio.h" ++#include "bcmiproc_phy.h" ++#include "phy542xx.h" ++ ++/* debug/trace */ ++//#define BCMDBG ++//#define BCMDBG_ERR ++#ifdef BCMDBG ++#define NET_ERROR(args) printf args ++#define NET_TRACE(args) printf args ++#elif defined(BCMDBG_ERR) ++#define NET_ERROR(args) printf args ++#define NET_TRACE(args) ++#else ++#define NET_ERROR(args) ++#define NET_TRACE(args) ++#endif /* BCMDBG */ ++#define NET_REG_TRACE(args) ++ ++ ++#ifndef ASSERT ++#define ASSERT(exp) ++#endif ++ ++#define PHY542XX_RDB_REG_RD phy542xx_rdb_reg_read ++#define PHY542XX_RDB_REG_WR phy542xx_rdb_reg_write ++#define PHY542XX_REG_RD phy542xx_reg_read ++#define PHY542XX_REG_WR phy542xx_reg_write ++ ++static int ++phy542xx_rdb_reg_read(u32 phy_addr, u32 reg_addr, u16 *data) ++{ ++ int rv = SOC_E_NONE; ++ ++ /* MDIO write the RDB reg. address to reg.0x1E = */ ++ iproc_mii_write(MII_DEV_EXT, phy_addr, BCM542XX_REG_RDB_ADDR, ++ (0xffff & reg_addr)); ++ ++ /* MDIO read from reg.0x1F to get the RDB register's value as */ ++ iproc_mii_read(MII_DEV_EXT, phy_addr, BCM542XX_REG_RDB_DATA, data); ++ ++ return rv; ++} ++ ++static int ++phy542xx_rdb_reg_write(u32 phy_addr, u32 reg_addr, u16 data) ++{ ++ int rv = SOC_E_NONE; ++ ++ /* MDIO write the RDB reg. address to reg.0x1E = */ ++ iproc_mii_write(MII_DEV_EXT, phy_addr, BCM542XX_REG_RDB_ADDR, ++ (0xffff & reg_addr)); ++ ++ /* MDIO write to reg.0x1F to set the RDB resister's value as */ ++ iproc_mii_write(MII_DEV_EXT, phy_addr, BCM542XX_REG_RDB_DATA, data); ++ ++ return rv; ++} ++ ++int ++phy542xx_reg_read(u32 phy_addr, u32 flags, int reg_addr, u16 *data) ++{ ++ int rv = SOC_E_NONE; ++ u16 val; ++ ++ if (flags & PHY_REG_FLAGS_FIBER) { /* fiber registers */ ++ if (reg_addr <= 0x0f) { ++ if (flags & PHY_REG_FLAGS_1000X) { ++ /* 54220 fiber is controlled by Secondary SerDes */ ++ PHY542XX_RDB_REG_RD(phy_addr, ++ (reg_addr | BCM542XX_REG_RDB_2ND_SERDES_BASE), data); ++ } else { ++ /* Map 1000X page */ ++ PHY542XX_RDB_REG_RD(phy_addr, BCM542XX_REG_RDB_MODE_CTRL, &val); ++ val |= BCM542XX_REG_MODE_CTRL_1000X_EN; ++ PHY542XX_RDB_REG_WR(phy_addr, BCM542XX_REG_RDB_MODE_CTRL, val); ++ ++ /* Read 1000X IEEE register */ ++ iproc_mii_read(MII_DEV_EXT, phy_addr, reg_addr, data); ++ ++ /* Restore IEEE mapping */ ++ PHY542XX_RDB_REG_RD(phy_addr, BCM542XX_REG_RDB_MODE_CTRL, &val); ++ val &= ~BCM542XX_REG_MODE_CTRL_1000X_EN; ++ PHY542XX_RDB_REG_WR(phy_addr, BCM542XX_REG_RDB_MODE_CTRL, val); ++ } ++ } ++ } else if (flags & PHY_REG_FLAGS_RDB) { ++ PHY542XX_RDB_REG_RD(phy_addr, reg_addr, data); ++ } else { ++ iproc_mii_read(MII_DEV_EXT, phy_addr, reg_addr, data); ++ } ++ ++ return rv; ++} ++ ++ ++int ++phy542xx_reg_write(u32 phy_addr, u32 flags, int reg_addr, u16 data) ++{ ++ int rv = SOC_E_NONE; ++ u16 val; ++ ++ if (flags & PHY_REG_FLAGS_FIBER) { /* fiber registers */ ++ if (reg_addr <= 0x0f) { ++ if (flags & PHY_REG_FLAGS_1000X) { ++ /* 54220 fiber is controlled by Secondary SerDes */ ++ PHY542XX_RDB_REG_WR(phy_addr, ++ (reg_addr | BCM542XX_REG_RDB_2ND_SERDES_BASE), data); ++ } else { ++ /* Map 1000X page */ ++ PHY542XX_RDB_REG_RD(phy_addr, BCM542XX_REG_RDB_MODE_CTRL, &val); ++ val |= BCM542XX_REG_MODE_CTRL_1000X_EN; ++ PHY542XX_RDB_REG_WR(phy_addr, BCM542XX_REG_RDB_MODE_CTRL, val); ++ ++ /* Write 1000X IEEE register */ ++ iproc_mii_write(MII_DEV_EXT, phy_addr, reg_addr, data); ++ ++ /* Restore IEEE mapping */ ++ PHY542XX_RDB_REG_RD(phy_addr, BCM542XX_REG_RDB_MODE_CTRL, &val); ++ val &= ~BCM542XX_REG_MODE_CTRL_1000X_EN; ++ PHY542XX_RDB_REG_WR(phy_addr, BCM542XX_REG_RDB_MODE_CTRL, val); ++ } ++ } ++ } else if (flags & PHY_REG_FLAGS_RDB) { ++ PHY542XX_RDB_REG_WR(phy_addr, reg_addr, data); ++ } else { ++ iproc_mii_write(MII_DEV_EXT, phy_addr, reg_addr, data); ++ } ++ ++ return rv; ++} ++ ++static int ++phy542xx_ge_reset(u32 phy_addr) ++{ ++ int rv = SOC_E_NONE; ++ u16 val; ++ ++ NET_TRACE(("%s: phy_addr %d\n", __FUNCTION__, phy_addr)); ++ ++ /* Reset the PHY */ ++ PHY542XX_REG_RD(phy_addr, PHY_REG_FLAGS_NONE, PHY_MII_CTRLr_ADDR, &val); ++ val |= MII_CTRL_RESET; ++ PHY542XX_REG_WR(phy_addr, PHY_REG_FLAGS_NONE, PHY_MII_CTRLr_ADDR, val); ++ ++ SPINWAIT((!PHY542XX_REG_RD(phy_addr, PHY_REG_FLAGS_NONE, PHY_MII_CTRLr_ADDR, &val) && ++ (val & MII_CTRL_RESET)), 100000); ++ ++ /* Check if out of reset */ ++ PHY542XX_REG_RD(phy_addr, PHY_REG_FLAGS_NONE, PHY_MII_CTRLr_ADDR, &val); ++ if (val & MII_CTRL_RESET) { ++ NET_ERROR(("%s reset not complete\n", __FUNCTION__)); ++ rv = SOC_E_TIMEOUT; ++ } else { ++ NET_TRACE(("%s reset complete\n", __FUNCTION__)); ++ } ++ ++ return rv; ++} ++ ++static int ++phy542xx_ge_init(u32 phy_addr) ++{ ++ int rv = SOC_E_NONE; ++ u16 val; ++ ++ /* ++ * Enable direct RDB addressing mode, write to Expansion register ++ * 0x7E = 0x0000 ++ * - MDIO write to reg 0x17 = 0x0F7E ++ * - MDIO write to reg 0x15 = 0x0000 ++ */ ++ PHY542XX_REG_WR(phy_addr, PHY_REG_FLAGS_NONE, ++ BCM542XX_REG_EXP_SEL, BCM542XX_REG_EXP_SELECT_7E); ++ PHY542XX_REG_WR(phy_addr, PHY_REG_FLAGS_NONE, ++ BCM542XX_REG_EXP_DATA, BCM542XX_REG_EXP_RDB_EN); ++ ++ /* Configure auto-detect Medium */ ++ PHY542XX_RDB_REG_RD(phy_addr, MIIM_BCM542xx_RDB_AUTO_DETECT_MEDIUM, &val); ++ val &= ~BCM542XX_REG_MII_AUTO_DET_MASK; ++ /* Enable dual serdes auto-detect medium */ ++ val |= (BCM542XX_REG_MII_AUTO_DET_MED_2ND_SERDES | ++ BCM542XX_REG_MII_FIBER_IN_USE_LED | ++ BCM542XX_REG_MII_FIBER_LED | ++ BCM542XX_REG_MII_FIBER_SD_SYNC); ++ /* Enable auto-detect medium */ ++ val |= BCM542XX_REG_MII_AUTO_DET_MED_EN; ++ /* Fiber selected when both media are active */ ++ val |= (BCM542XX_REG_MII_AUTO_DET_MED_PRI | ++ BCM542XX_REG_MII_AUTO_DET_MED_DEFAULT); ++ PHY542XX_RDB_REG_WR(phy_addr, MIIM_BCM542xx_RDB_AUTO_DETECT_MEDIUM, val); ++ ++ /* Power up primary SerDes, Fiber MII_CONTROL Reg. bit[11]*/ ++ PHY542XX_REG_RD(phy_addr, PHY_REG_FLAGS_PRI_SERDES, PHY_MII_CTRLr_ADDR, &val); ++ val &= ~MII_CTRL_PD; ++ PHY542XX_REG_WR(phy_addr, PHY_REG_FLAGS_PRI_SERDES, PHY_MII_CTRLr_ADDR, val); ++ ++ /* MODE_CONTROL register, DIGX_SHD_1C_1F, RDB_0x21 */ ++ PHY542XX_RDB_REG_RD(phy_addr, BCM542XX_REG_RDB_MODE_CTRL, &val); ++ val &= ~(BCM542XX_REG_MODE_CNTL_MODE_SEL_1 | ++ BCM542XX_REG_MODE_CNTL_MODE_SEL_2); ++ val |= BCM542XX_REG_MODE_SEL_SGMII_2_COPPER; ++ PHY542XX_RDB_REG_WR(phy_addr, BCM542XX_REG_RDB_MODE_CTRL, val); ++ ++ /* COPPER INTERFACE */ ++ PHY542XX_REG_RD(phy_addr, PHY_REG_FLAGS_NONE, PHY_MII_CTRLr_ADDR, &val); ++ val &= ~MII_CTRL_PD; ++ PHY542XX_REG_WR(phy_addr, PHY_REG_FLAGS_NONE, PHY_MII_CTRLr_ADDR, val); ++ ++ PHY542XX_REG_WR(phy_addr, PHY_REG_FLAGS_NONE, MII_CTRL1000, ADVERTISE_1000FULL); ++ PHY542XX_REG_WR(phy_addr, PHY_REG_FLAGS_NONE, PHY_MII_CTRLr_ADDR, ++ (BMCR_FULLDPLX | BMCR_SPEED100 | BMCR_ANENABLE | BMCR_ANRESTART)); ++ ++ /* Enable/disable auto-detection between SGMII-slave and 1000BASE-X */ ++ /* External Serdes Control Reg., DIGX_SHD_1C_14, RDB_0x234 */ ++ PHY542XX_RDB_REG_RD(phy_addr, BCM542XX_REG_RDB_EXT_SERDES_CTRL, &val); ++ val &= ~(BCM542XX_REG_EXT_SERDES_FX_MASK); ++ PHY542XX_RDB_REG_WR(phy_addr, BCM542XX_REG_RDB_EXT_SERDES_CTRL, val); ++ ++ /* SGMII Slave Control Register, DIGX_SHD_1C_15, RDB_0x235 */ ++ PHY542XX_RDB_REG_RD(phy_addr, BCM542XX_REG_RDB_SGMII_SLAVE, &val); ++ val &= ~(BCM542XX_REG_SGMII_SLAVE_AUTO); ++ PHY542XX_RDB_REG_WR(phy_addr, BCM542XX_REG_RDB_SGMII_SLAVE, val); ++ ++ /* FIBER INTERFACE */ ++ /* Remove power down of SerDes */ ++ PHY542XX_REG_RD(phy_addr, PHY_REG_FLAGS_1000X, PHY_MII_CTRLr_ADDR, &val); ++ val &= ~MII_CTRL_PD; ++ PHY542XX_REG_WR(phy_addr, PHY_REG_FLAGS_1000X, PHY_MII_CTRLr_ADDR, val); ++ ++ /* Set the advertisement of serdes */ ++ PHY542XX_REG_RD(phy_addr, PHY_REG_FLAGS_1000X, PHY_MII_ANAr_ADDR, &val); ++ val |= (MII_ANA_FD_1000X | MII_ANA_HD_1000X | ++ MII_ANA_1000X_PAUSE | MII_ANA_1000X_ASYM_PAUSE); ++ PHY542XX_REG_WR(phy_addr, PHY_REG_FLAGS_1000X, PHY_MII_ANAr_ADDR, val); ++ ++ /* Enable auto-detection between SGMII-slave and 1000BASE-X */ ++ /* External Serdes Control Reg., DIGX_SHD_1C_14, RDB_0x234 */ ++ val = (BCM542XX_REG_EXT_SERDES_LED | BCM542XX_REG_EXT_SEL_SYNC_ST | ++ BCM542XX_REG_EXT_SELECT_SD | BCM542XX_REG_EXT_SERDES_SEL); ++ PHY542XX_RDB_REG_WR(phy_addr, BCM542XX_REG_RDB_EXT_SERDES_CTRL, val); ++ ++ /* SGMII Slave Control Register, DIGX_SHD_1C_15, RDB_0x235 */ ++ PHY542XX_RDB_REG_RD(phy_addr, BCM542XX_REG_RDB_SGMII_SLAVE, &val); ++ val &= ~(BCM542XX_REG_SGMII_SLAVE_AUTO); ++ PHY542XX_RDB_REG_WR(phy_addr, BCM542XX_REG_RDB_SGMII_SLAVE, val); ++ ++ /* Miscellanous Control Reg., CORE_SHD18_111, RDB_0x02f */ ++ PHY542XX_RDB_REG_WR(phy_addr, BCM542XX_REG_RDB_MII_MISC_CTRL, 0x2007); ++ ++ /* Second SERDES 100-FX CONTROL Register, RDB_0xb17 */ ++ PHY542XX_RDB_REG_WR(phy_addr, BCM542XX_REG_RDB_2ND_SERDES_MISC_1000X, 0x0); ++ ++ /* Default SerDes config & restart autonegotiation */ ++ PHY542XX_REG_WR(phy_addr, PHY_REG_FLAGS_1000X, PHY_MII_CTRLr_ADDR, ++ (BMCR_FULLDPLX | BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART)); ++ ++ return rv; ++} ++ ++int ++phy542xx_reset_setup(u32 phy_addr) ++{ ++ int rv = SOC_E_NONE; ++ ++ NET_TRACE(("%s enter\n", __FUNCTION__)); ++ ++ rv = phy542xx_ge_reset(phy_addr); ++ ++ if (SOC_SUCCESS(rv)) { ++ rv = phy542xx_ge_init(phy_addr); ++ } ++ ++ return rv; ++} ++ ++int ++phy542xx_init(u32 phy_addr) ++{ ++ u16 phyid0, phyid1; ++ ++ NET_TRACE(("%s: phy_addr %d\n", __FUNCTION__, phy_addr)); ++ ++ PHY542XX_REG_RD(phy_addr, PHY_REG_FLAGS_NONE, PHY_MII_PHY_ID0r_ADDR, &phyid0); ++ PHY542XX_REG_RD(phy_addr, PHY_REG_FLAGS_NONE, PHY_MII_PHY_ID1r_ADDR, &phyid1); ++ ++ printf("%s Phy ChipID: 0x%04x:0x%04x\n", __FUNCTION__, phyid1, phyid0); ++ ++ phy542xx_reset_setup(phy_addr); ++ ++ return 0; ++} ++ ++int ++phy542xx_enable_set(u32 phy_addr, int enable) ++{ ++ u16 val; ++ ++ NET_TRACE(("%s: phy_addr %d\n", __FUNCTION__, phy_addr)); ++ ++ PHY542XX_REG_RD(phy_addr, PHY_REG_FLAGS_PRI_SERDES, PHY_MII_CTRLr_ADDR, &val); ++ if (enable) { ++ val &= ~MII_CTRL_PD; ++ } else { ++ val |= MII_CTRL_PD; ++ } ++ PHY542XX_REG_WR(phy_addr, PHY_REG_FLAGS_PRI_SERDES, PHY_MII_CTRLr_ADDR, val); ++ ++ return SOC_E_NONE; ++} ++ ++int ++phy542xx_force_auto_mdix(u32 phy_addr, int enable) ++{ ++ u16 val; ++ ++ NET_TRACE(("%s: phy_addr %d\n", __FUNCTION__, phy_addr)); ++ ++ PHY542XX_RDB_REG_RD(phy_addr, BCM542XX_REG_RDB_COPPER_MISC_CTRL, &val); ++ if (enable) { ++ val |= BCM542XX_REG_MISC_CTRL_FORCE_AUTO_MDIX; ++ } else { ++ val &= ~BCM542XX_REG_MISC_CTRL_FORCE_AUTO_MDIX; ++ } ++ PHY542XX_RDB_REG_WR(phy_addr, BCM542XX_REG_RDB_COPPER_MISC_CTRL, val); ++ ++ return SOC_E_NONE; ++} +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/shared/sb2_erom.c b/drivers/net/ethernet/broadcom/gmac/src/shared/sb2_erom.c +--- a/drivers/net/ethernet/broadcom/gmac/src/shared/sb2_erom.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/shared/sb2_erom.c 2017-11-09 17:53:44.061307000 +0800 +@@ -0,0 +1,81 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Broadcom Home Networking Division 10/100 Mbit/s Ethernet ++ * Saber2 sudo EROM ++ * ++ */ ++#include ++ ++uint32 sb2_erom[] = { ++ // LOOP 1 : #define MFGID_BRCM 0x4bf /* Manufacturer Ids (mfg) */ ++ // #define CC_CORE_ID 0x800 /* chipcommon core (cid) */ ++ 0x4bf80001, ++ // crev = 0x2a, nsw = 0x0 (mask = 0x0x00f80000), nmw = 0x1 (mask = 0x0007c000), nsp = 0x1 (mask = 0x00003e00), nmp = 0x0 (mask = 0x000001f0) ++ 0x2a004201, ++ // First Slave Address Descriptor should be port 0: the main register space for the core ++ // addr1 = 0x18000000 (mask = 0xfffff000), addrh = 0x0 (AD_AG32 = 0x00000008), sizeh = 0x0, ++ // sz = 0x0 (AD_SZ_MASK = 0x00000030), sizel = 0x00001000 (0x00001000 << (sz >> 4)) ++ 0x18000005, ++ // Now get master wrappers (i = 0) ++ // addr1 = 0x18120000 (AD_ADDR_MASK = 0xfffff000), addrh = 0x0 (AD_AG32 = 0x00000008), sizeh = 0x0, ++ // sz = 0x0 (AD_SZ_MASK = 0x00000030), sizel = 0x00001000 (0x00001000 << (sz >> 4)) ++ 0x181200c5, ++ ++ // LOOP 2 : #define MFGID_BRCM 0x4bf /* Manufacturer Ids (mfg) */ ++ // #define GMAC_CORE_ID 0x82d /* Gigabit MAC core */ ++ // cia = 0x4bf82d01 ++ // cid = 0x82d (cia & CIA_CID_MASK(0x000fff00)) >> CIA_CID_SHIFT(8), mfg = 0x4bf (cia & CIA_MFG_MASK(0xfff00000)) >> CIA_MFG_SHIFT(20) ++ 0x4bf82d01, ++ // cib = 0x04004211 ++ // crev = 0x04 (cib & CIB_REV_MASK(0xff000000)) >> CIB_REV_SHIFT(24), nmw = 0x01 (cib & CIB_NMW_MASK(0x0007c000)) >> CIB_NMW_SHIFT(14) ++ // nsw = 0x0 (cib & CIB_NSW_MASK(0x00f80000)) >> CIB_NSW_SHIFT(19), nmp = 0x01 (cib & CIB_NMP_MASK(0x000001f0)) >> CIB_NMP_SHIFT(4) ++ // nsp = 0x01 (cib & CIB_NSP_MASK(0x00003e00)) >> CIB_NSP_SHIFT(9) ++ 0x04004211, ++ // mpd = 0x00000103 ++ 0x00000103, ++ // First Slave Address Descriptor should be port 0: the main register space for the core ++ // addr1 = 0x18042000 (AD_ADDR_MASK = 0xfffff000), addrh = 0x0 (AD_AG32 = 0x00000008), sizeh = 0x0, ++ // sz = 0x0 (AD_SZ_MASK = 0x00000030), sizel = 0x00001000 (0x00001000 << (sz >> 4)) ++ 0x18042005, ++ // Now get master wrappers (i = 0) ++ // addr1 = 0x18110000 (AD_ADDR_MASK = 0xfffff000), addrh = 0x0 (AD_AG32 = 0x00000008), sizeh = 0x0, ++ // sz = 0x0 (AD_SZ_MASK = 0x00000030), sizel = 0x00001000 (0x00001000 << (sz >> 4)) ++ 0x181100c5, ++ ++ // LOOP 3 : #define MFGID_BRCM 0x4bf /* Manufacturer Ids (mfg) */ ++ // #define GMAC_CORE_ID 0x82d /* Gigabit MAC core */ ++ // cia = 0x4bf82d01 ++ // cid = 0x82d (cia & CIA_CID_MASK(0x000fff00)) >> CIA_CID_SHIFT(8), mfg = 0x4bf (cia & CIA_MFG_MASK(0xfff00000)) >> CIA_MFG_SHIFT(20) ++ 0x4bf82d01, ++ // cib = 0x04004211 ++ // crev = 0x04 (cib & CIB_REV_MASK(0xff000000)) >> CIB_REV_SHIFT(24), nmw = 0x01 (cib & CIB_NMW_MASK(0x0007c000)) >> CIB_NMW_SHIFT(14) ++ // nsw = 0x0 (cib & CIB_NSW_MASK(0x00f80000)) >> CIB_NSW_SHIFT(19), nmp = 0x01 (cib & CIB_NMP_MASK(0x000001f0)) >> CIB_NMP_SHIFT(4) ++ // nsp = 0x01 (cib & CIB_NSP_MASK(0x00003e00)) >> CIB_NSP_SHIFT(9) ++ 0x04004211, ++ // mpd = 0x00000203 ++ 0x00000203, ++ // First Slave Address Descriptor should be port 0: the main register space for the core ++ // addr1 = 0x1804a000 (AD_ADDR_MASK = 0xfffff000), addrh = 0x0 (AD_AG32 = 0x00000008), sizeh = 0x0, ++ // sz = 0x0 (AD_SZ_MASK = 0x00000030), sizel = 0x00001000 (0x00001000 << (sz >> 4)) ++ 0x1804a005, ++ // Now get master wrappers (i = 0) ++ // addr1 = 0x18110000 (AD_ADDR_MASK = 0xfffff000), addrh = 0x0 (AD_AG32 = 0x00000008), sizeh = 0x0, ++ // sz = 0x0 (AD_SZ_MASK = 0x00000030), sizel = 0x00001000 (0x00001000 << (sz >> 4)) ++ 0x181110c5, ++ ++ // END of parse loop 0x0f = (ER_END(0x0e) | ER_VALID(0x01)) ++ 0x0000000f ++}; +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/shared/sb2_erom.h b/drivers/net/ethernet/broadcom/gmac/src/shared/sb2_erom.h +--- a/drivers/net/ethernet/broadcom/gmac/src/shared/sb2_erom.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/shared/sb2_erom.h 2017-11-09 17:53:44.062302000 +0800 +@@ -0,0 +1,26 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Broadcom Home Networking Division 10/100 Mbit/s Ethernet ++ * Saber2 sudo EROM ++ * ++ */ ++ ++#ifndef _sb2_erom_h_ ++#define _sb2_erom_h_ ++ ++extern uint32 sb2_erom[]; ++ ++#endif /* _sb2_erom_h_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/shared/sgmiiplus2_serdes.c b/drivers/net/ethernet/broadcom/gmac/src/shared/sgmiiplus2_serdes.c +--- a/drivers/net/ethernet/broadcom/gmac/src/shared/sgmiiplus2_serdes.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/shared/sgmiiplus2_serdes.c 2017-11-09 17:53:44.063293000 +0800 +@@ -0,0 +1,102 @@ ++/* ++ * $Copyright Open Broadcom Corporation$ ++ * ++ * These routines provide access to the serdes ++ * ++ */ ++ ++/* ---- Include Files ---------------------------------------------------- */ ++#include ++#include ++#include "bcmiproc_serdes.h" ++#include "bcmiproc_serdes_def.h" ++#include "../../../mdio/iproc_mdio.h" ++ ++/* ---- External Variable Declarations ----------------------------------- */ ++/* ---- External Function Prototypes ------------------------------------- */ ++/* ---- Public Variables ------------------------------------------------- */ ++/* ---- Private Constants and Types -------------------------------------- */ ++/* ---- Private Variables ------------------------------------------------ */ ++ ++/* debug/trace */ ++//#define BCMDBG ++//#define BCMDBG_ERR ++#ifdef BCMDBG ++#define NET_ERROR(args) printf args ++#define NET_TRACE(args) printf args ++#elif defined(BCMDBG_ERR) ++#define NET_ERROR(args) printf args ++#define NET_TRACE(args) ++#else ++#define NET_ERROR(args) ++#define NET_TRACE(args) ++#endif /* BCMDBG */ ++#define NET_REG_TRACE(args) ++ ++#ifndef ASSERT ++#define ASSERT(exp) ++#endif ++ ++void ++sgmiiplus2_serdes_reset(uint eth_num, uint phyaddr) ++{ ++ uint16 ctrl; ++ ++ ASSERT(phyaddr < MAXEPHY); ++ ++ if (phyaddr == EPHY_NOREG) ++ return; ++ ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x1f, 0x0); ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x0, 0x8000); ++ ++ udelay(100); ++ ++ iproc_mii_read(MII_DEV_LOCAL, phyaddr, 0x0, &ctrl); ++ if (ctrl & 0x8000) ++ NET_ERROR(("et%d: %s serdes reset not complete\n", eth_num, __FUNCTION__)); ++ ++} ++ ++int ++sgmiiplus2_serdes_init(uint eth_num, uint phyaddr) ++{ ++ u16 id1, id2; ++ ++ iproc_mii_read(MII_DEV_LOCAL, phyaddr, 0x0002, &id1); ++ iproc_mii_read(MII_DEV_LOCAL, phyaddr, 0x0003, &id2); ++ printf("Internal phyaddr %d: Get PHY ID0:%.4x, ID1:%.4x\n", phyaddr, id1, id2); ++ ++ /* Disable PLL */ ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x001f, 0xffd0); ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x001e, 0x0000); ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x001f, 0x8000); ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x0010, 0x062f); ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x001e, 0x0000); ++ ++ /* Disable lmtcal (broadcast to all lanes) */ ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x001f, 0xffd0); ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x001e, 0x001f); ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x001f, 0x8480); ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x0012, 0x83f8); ++ ++ /* Auto negotiation 10/100/1G - SGMII Slave (broadcast to all lanes) */ ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x001f, 0x8300); ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x0010, 0x0100); ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x001f, 0x8340); ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x001a, 0x0003); ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x001f, 0x0000); ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x0000, 0x1140); ++ ++ /* Change PLL calibration threshold to 0xc */ ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x001f, 0xffd0); ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x001e, 0x0000); ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x001f, 0x8180); ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x0011, 0x0600); ++ ++ /* Enable PLL */ ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x001f, 0x8000); ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x0010, 0x262f); ++ ++ return 0; ++} +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/shared/siutils.c b/drivers/net/ethernet/broadcom/gmac/src/shared/siutils.c +--- a/drivers/net/ethernet/broadcom/gmac/src/shared/siutils.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/shared/siutils.c 2017-11-09 17:53:44.065294000 +0800 +@@ -0,0 +1,1409 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Misc utility routines for accessing chip-specific features ++ * of the SiliconBackplane-based Broadcom chips. ++ * ++ * $Id: siutils.c 328955 2012-04-23 09:06:12Z $ ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "siutils_priv.h" ++ ++/* local prototypes */ ++static si_info_t *si_doattach(si_info_t *sii, uint devid, osl_t *osh, void *regs, ++ uint bustype, void *sdh, char **vars, uint *varsz); ++static bool si_buscore_setup(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin, ++ uint *origidx, void *regs); ++ ++static void si_nvram_process(si_info_t *sii, char *pvars); ++/* dev path concatenation util */ ++static char *si_devpathvar(si_t *sih, char *var, int len, const char *name); ++static bool _si_clkctl_cc(si_info_t *sii, uint mode); ++ ++/* global variable to indicate reservation/release of gpio's */ ++static uint32 si_gpioreservation = 0; ++ ++/* global flag to prevent shared resources from being initialized multiple times in si_attach() */ ++ ++/* ++ * Allocate a si handle. ++ * devid - pci device id (used to determine chip#) ++ * osh - opaque OS handle ++ * regs - virtual address of initial core registers ++ * bustype - pci/pcmcia/sb/sdio/etc ++ * vars - pointer to a pointer area for "environment" variables ++ * varsz - pointer to int to return the size of the vars ++ */ ++si_t * ++BCMATTACHFN(si_attach)(uint devid, osl_t *osh, void *regs, ++ uint bustype, void *sdh, char **vars, uint *varsz) ++{ ++ si_info_t *sii; ++ si_t *sih; ++ ++ /* alloc si_info_t */ ++ if ((sii = MALLOC(osh, sizeof (si_info_t))) == NULL) { ++ SI_ERROR(("si_attach: malloc failed! malloced %d bytes\n", MALLOCED(osh))); ++ return (NULL); ++ } ++ ++ if (si_doattach(sii, devid, osh, regs, bustype, sdh, vars, varsz) == NULL) { ++ MFREE(osh, sii, sizeof(si_info_t)); ++ SI_ERROR(("%s si_doattach() failed\n", __FUNCTION__)); ++ return (NULL); ++ } ++ sii->vars = vars ? *vars : NULL; ++ sii->varsz = varsz ? *varsz : 0; ++ ++ sih = (si_t*)sii; ++ printk("%s socitype(0x%x) chip(0x%x) chiprev(0x%x) chippkg(0x%x)\n", ++ __FUNCTION__, sih->socitype, sih->chip, sih->chiprev, sih->chippkg); ++ ++ return (si_t *)sii; ++} ++ ++static bool ++BCMATTACHFN(si_buscore_setup)(si_info_t *sii, chipcregs_t *cc, uint bustype, uint32 savewin, ++ uint *origidx, void *regs) ++{ ++ bool pci, pcie; ++ uint i; ++ uint pciidx, pcieidx, pcirev, pcierev; ++ ++ cc = si_setcoreidx(&sii->pub, SI_CC_IDX); ++ ASSERT((uintptr)cc); ++ ++ /* get chipcommon rev */ ++ sii->pub.ccrev = (int)si_corerev(&sii->pub); ++ ++ /* get chipcommon chipstatus */ ++ if (sii->pub.ccrev >= 11) { ++ sii->pub.chipst = R_REG(sii->osh, &cc->chipstatus); ++ } ++ ++ /* get chipcommon capabilites */ ++ sii->pub.cccaps = R_REG(sii->osh, &cc->capabilities); ++ /* get chipcommon extended capabilities */ ++ if (sii->pub.ccrev >= 35) { ++ sii->pub.cccaps_ext = R_REG(sii->osh, &cc->capabilities_ext); ++ } ++ ++ /* get pmu rev and caps */ ++ if (sii->pub.cccaps & CC_CAP_PMU) { ++ sii->pub.pmucaps = R_REG(sii->osh, &cc->pmucapabilities); ++ sii->pub.pmurev = sii->pub.pmucaps & PCAP_REV_MASK; ++ } ++ ++ SI_MSG(("Chipc: rev %d, caps 0x%x, chipst 0x%x pmurev %d, pmucaps 0x%x\n", ++ sii->pub.ccrev, sii->pub.cccaps, sii->pub.chipst, sii->pub.pmurev, ++ sii->pub.pmucaps)); ++ ++ /* figure out bus/orignal core idx */ ++ sii->pub.buscoretype = NODEV_CORE_ID; ++ sii->pub.buscorerev = (uint)NOREV; ++ sii->pub.buscoreidx = BADIDX; ++ ++ pci = pcie = FALSE; ++ pcirev = pcierev = (uint)NOREV; ++ pciidx = pcieidx = BADIDX; ++ ++ for (i = 0; i < sii->numcores; i++) { ++ uint cid, crev; ++ ++ si_setcoreidx(&sii->pub, i); ++ cid = si_coreid(&sii->pub); ++ crev = si_corerev(&sii->pub); ++ ++ /* Display cores found */ ++ SI_VMSG(("CORE[%d]: id 0x%x rev %d base 0x%x regs 0x%p\n", ++ i, cid, crev, sii->coresba[i], sii->regs[i])); ++ ++ /* find the core idx before entering this func. */ ++ if ((savewin && (savewin == sii->coresba[i])) || ++ (regs == sii->regs[i])) { ++ *origidx = i; ++ } ++ } ++ ++ SI_VMSG(("Buscore id/type/rev %d/0x%x/%d\n", sii->pub.buscoreidx, sii->pub.buscoretype, ++ sii->pub.buscorerev)); ++ ++ /* return to the original core */ ++ si_setcoreidx(&sii->pub, *origidx); ++ ++ return TRUE; ++} ++ ++static void ++BCMATTACHFN(si_nvram_process)(si_info_t *sii, char *pvars) ++{ ++ /* get boardtype and boardrev */ ++ switch (BUSTYPE(sii->pub.bustype)) { ++ case SI_BUS: ++ sii->pub.boardvendor = VENDOR_BROADCOM; ++ if (pvars == NULL || ((sii->pub.boardtype = getintvar(pvars, "prodid")) == 0)) { ++ if ((sii->pub.boardtype = getintvar(NULL, "boardtype")) == 0) { ++ sii->pub.boardtype = 0xffff; ++ } ++ } ++ break; ++ } ++ ++ if (sii->pub.boardtype == 0) { ++ SI_ERROR(("si_doattach: unknown board type\n")); ++ ASSERT(sii->pub.boardtype); ++ } ++ ++ sii->pub.boardrev = getintvar(pvars, "boardrev"); ++ sii->pub.boardflags = getintvar(pvars, "boardflags"); ++} ++ ++static si_info_t * ++BCMATTACHFN(si_doattach)(si_info_t *sii, uint devid, osl_t *osh, void *regs, ++ uint bustype, void *sdh, char **vars, uint *varsz) ++{ ++ struct si_pub *sih = &sii->pub; ++ uint32 w, savewin; ++ chipcregs_t *cc; ++ char *pvars = NULL; ++ uint origidx; ++ ASSERT(GOODREGS(regs)); ++ ++ bzero((uchar*)sii, sizeof(si_info_t)); ++ ++ savewin = 0; ++ sih->buscoreidx = BADIDX; ++ sii->curmap = regs; ++ sii->sdh = sdh; ++ sii->osh = osh; ++ ++ /* find Chipcommon address */ ++ cc = (chipcregs_t *)REG_MAP(SI_ENUM_BASE, SI_CORE_SIZE); ++ ++ sih->bustype = bustype; ++ if (bustype != BUSTYPE(bustype)) { ++ SI_ERROR(("si_doattach: bus type %d does not match configured bus type %d\n", ++ bustype, BUSTYPE(bustype))); ++ return NULL; ++ } ++ ++ /* ChipID recognition. ++ * We assume we can read chipid at offset 0 from the regs arg. ++ * If we add other chiptypes (or if we need to support old sdio hosts w/o chipcommon), ++ * some way of recognizing them needs to be added here. ++ */ ++ if (!cc) { ++ SI_ERROR(("%s: chipcommon register space is null \n", __FUNCTION__)); ++ return NULL; ++ } ++ w = R_REG(osh, &cc->chipid); ++ printk("%s chipid: 0x%x\n", __FUNCTION__, w); ++#if defined(CONFIG_MACH_IPROC_P7) ++ sih->socitype = SOCI_AI; ++ /* get chip id rev & pkg */ ++ sih->chip = w & 0xfffff; ++ sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT; ++ w = R_REG(osh, &cc->capabilities); ++ sih->chiprev = w & 0xff; ++#else ++ sih->socitype = (w & CID_TYPE_MASK) >> CID_TYPE_SHIFT; ++ /* Might as wll fill in chip id rev & pkg */ ++ sih->chip = w & CID_ID_MASK; ++ sih->chiprev = (w & CID_REV_MASK) >> CID_REV_SHIFT; ++ sih->chippkg = (w & CID_PKG_MASK) >> CID_PKG_SHIFT; ++ /* printk("%s chip: 0x%x; chiprev: 0x%x; chippkg: 0x%x\n", __FUNCTION__, sih->chip, sih->chiprev, sih->chippkg); */ ++#endif /* CONFIG_MACH_IPROC_P7 */ ++ ++ sih->issim = IS_SIM(sih->chippkg); ++ ++ /* scan for cores */ ++ if (CHIPTYPE(sih->socitype) == SOCI_SB) { ++ SI_MSG(("Found chip type SB (0x%08x)\n", w)); ++ sb_scan(sih, regs, devid); ++ } else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NS)) { ++ SI_MSG(("Found chip type %s (0x%08x)\n", (CHIPTYPE(sih->socitype) == SOCI_AI) ? "AI" : "NS", w)); ++ ai_scan(sih, (void *)(uintptr)cc, devid); ++ } else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) { ++ SI_MSG(("Found chip type UBUS (0x%08x), chip id = 0x%4x\n", w, sih->chip)); ++ ub_scan(sih, (void *)(uintptr)cc, devid); ++ } else { ++ SI_ERROR(("Found chip of unknown type (0x%08x)\n", w)); ++ return NULL; ++ } ++ ++ /* no cores found, bail out */ ++ if (sii->numcores == 0) { ++ SI_ERROR(("si_doattach: could not find any cores\n")); ++ return NULL; ++ } ++ /* bus/core/clk setup */ ++ origidx = SI_CC_IDX; ++ if (!si_buscore_setup(sii, cc, bustype, savewin, &origidx, regs)) { ++ SI_ERROR(("si_doattach: si_buscore_setup failed\n")); ++ goto exit; ++ } ++ ++ spin_lock_init(&sih->sih_lock); ++ ++ /* Init nvram from flash if it exists */ ++ nvram_init((void *)sih); ++ ++ pvars = vars ? *vars : NULL; ++ si_nvram_process(sii, pvars); ++ ++ /* bootloader should retain default pulls */ ++#ifndef BCM_BOOTLOADER ++ if (sih->ccrev >= 20) { ++ uint32 gpiopullup = 0, gpiopulldown = 0; ++ cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0); ++ ASSERT(cc != NULL); ++ ++ W_REG(osh, &cc->gpiopullup, gpiopullup); ++ W_REG(osh, &cc->gpiopulldown, gpiopulldown); ++ si_setcoreidx(sih, origidx); ++ } ++#endif /* !BCM_BOOTLOADER */ ++ ++ ++ /* setup the GPIO based LED powersave register */ ++ if (sih->ccrev >= 16) { ++ if ((w = getintvar(pvars, "leddc")) == 0) { ++ w = DEFAULT_GPIOTIMERVAL; ++ } ++ si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, gpiotimerval), ~0, w); ++ } ++ ++#if !defined(_CFE_) || defined(CFG_WL) ++ /* enable GPIO interrupts when clocks are off */ ++ if (sih->ccrev >= 21) { ++ uint32 corecontrol; ++ corecontrol = si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, corecontrol), ++ 0, 0); ++ corecontrol |= CC_ASYNCGPIO; ++ si_corereg(sih, SI_CC_IDX, OFFSETOF(chipcregs_t, corecontrol), ++ corecontrol, corecontrol); ++ } ++#endif /* !_CFE_ || CFG_WL */ ++ ++ return (sii); ++exit: ++ return NULL; ++} ++ ++/* may be called with core in reset */ ++void ++BCMATTACHFN(si_detach)(si_t *sih) ++{ ++ si_info_t *sii; ++ uint idx; ++ ++ sii = SI_INFO(sih); ++ if (sii == NULL) { ++ return; ++ } ++ ++ if (BUSTYPE(sih->bustype) == SI_BUS) { ++ for (idx = 0; idx < SI_MAXCORES; idx++) { ++ if (sii->regs[idx]) { ++ REG_UNMAP(sii->regs[idx]); ++ sii->regs[idx] = NULL; ++ } ++ } ++ } ++ ++ MFREE(sii->osh, sii, sizeof(si_info_t)); ++} ++ ++void * ++si_osh(si_t *sih) ++{ ++ si_info_t *sii; ++ ++ sii = SI_INFO(sih); ++ return sii->osh; ++} ++ ++void ++si_setosh(si_t *sih, osl_t *osh) ++{ ++ si_info_t *sii; ++ ++ sii = SI_INFO(sih); ++ if (sii->osh != NULL) { ++ SI_ERROR(("osh is already set....\n")); ++ ASSERT(!sii->osh); ++ } ++ sii->osh = osh; ++} ++ ++uint ++si_intflag(si_t *sih) ++{ ++ si_info_t *sii = SI_INFO(sih); ++ ++ if (CHIPTYPE(sih->socitype) == SOCI_SB) { ++ return sb_intflag(sih); ++ } else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NS)) { ++ return R_REG(sii->osh, ((uint32 *)(uintptr) ++ (sii->oob_router + OOB_STATUSA))); ++ } ++ ++ ASSERT(0); ++ return 0; ++} ++ ++uint ++si_flag(si_t *sih) ++{ ++ if (CHIPTYPE(sih->socitype) == SOCI_SB) { ++ return sb_flag(sih); ++ } else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NS)) { ++ return ai_flag(sih); ++ } else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) { ++ return ub_flag(sih); ++ } ++ ++ ASSERT(0); ++ return 0; ++} ++ ++void ++si_setint(si_t *sih, int siflag) ++{ ++ if (CHIPTYPE(sih->socitype) == SOCI_SB) { ++ sb_setint(sih, siflag); ++ } else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NS)) { ++ ai_setint(sih, siflag); ++ } else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) { ++ ub_setint(sih, siflag); ++ } else { ++ ASSERT(0); ++ } ++} ++ ++uint ++si_coreid(si_t *sih) ++{ ++ si_info_t *sii; ++ ++ sii = SI_INFO(sih); ++ return sii->coreid[sii->curidx]; ++} ++ ++uint ++si_coreidx(si_t *sih) ++{ ++ si_info_t *sii; ++ ++ sii = SI_INFO(sih); ++ return sii->curidx; ++} ++ ++/* return the core-type instantiation # of the current core */ ++uint ++si_coreunit(si_t *sih) ++{ ++ si_info_t *sii; ++ uint idx; ++ uint coreid; ++ uint coreunit; ++ uint i; ++ ++ sii = SI_INFO(sih); ++ coreunit = 0; ++ ++ idx = sii->curidx; ++ ++ ASSERT(GOODREGS(sii->curmap)); ++ coreid = si_coreid(sih); ++ ++ /* count the cores of our type */ ++ for (i = 0; i < idx; i++) { ++ if (sii->coreid[i] == coreid) { ++ coreunit++; ++ } ++ } ++ ++ return (coreunit); ++} ++ ++uint ++si_corevendor(si_t *sih) ++{ ++ if (CHIPTYPE(sih->socitype) == SOCI_SB) { ++ return sb_corevendor(sih); ++ } else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NS)) { ++ return ai_corevendor(sih); ++ } else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) { ++ return ub_corevendor(sih); ++ } ++ ++ ASSERT(0); ++ return 0; ++} ++ ++bool ++si_backplane64(si_t *sih) ++{ ++ return ((sih->cccaps & CC_CAP_BKPLN64) != 0); ++} ++ ++uint ++si_corerev(si_t *sih) ++{ ++ if (CHIPTYPE(sih->socitype) == SOCI_SB) { ++ return sb_corerev(sih); ++ } else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NS)) { ++ return ai_corerev(sih); ++ } else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) { ++ return ub_corerev(sih); ++ } ++ ++ ASSERT(0); ++ return 0; ++} ++ ++/* return index of coreid or BADIDX if not found */ ++uint ++si_findcoreidx(si_t *sih, uint coreid, uint coreunit) ++{ ++ si_info_t *sii; ++ uint found; ++ uint i; ++ ++ sii = SI_INFO(sih); ++ ++ found = 0; ++ ++ for (i = 0; i < sii->numcores; i++) { ++ if (sii->coreid[i] == coreid) { ++ if (found == coreunit) ++ return (i); ++ found++; ++ } ++ } ++ ++ return (BADIDX); ++} ++ ++/* return list of found cores */ ++uint ++si_corelist(si_t *sih, uint coreid[]) ++{ ++ si_info_t *sii; ++ ++ sii = SI_INFO(sih); ++ ++ bcopy((uchar*)sii->coreid, (uchar*)coreid, (sii->numcores * sizeof(uint))); ++ return (sii->numcores); ++} ++ ++/* return current register mapping */ ++void * ++si_coreregs(si_t *sih) ++{ ++ si_info_t *sii; ++ ++ sii = SI_INFO(sih); ++ ASSERT(GOODREGS(sii->curmap)); ++ ++ return (sii->curmap); ++} ++ ++/* ++ * This function changes logical "focus" to the indicated core; ++ * must be called with interrupts off. ++ * Moreover, callers should keep interrupts off during switching out of and back to d11 core ++ */ ++void * ++si_setcore(si_t *sih, uint coreid, uint coreunit) ++{ ++ uint idx; ++ ++ idx = si_findcoreidx(sih, coreid, coreunit); ++ if (!GOODIDX(idx)) { ++ return (NULL); ++ } ++ ++ if (CHIPTYPE(sih->socitype) == SOCI_SB) { ++ return sb_setcoreidx(sih, idx); ++ } else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NS)) { ++ return ai_setcoreidx(sih, idx); ++ } else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) { ++ return ub_setcoreidx(sih, idx); ++ } ++ ++ ASSERT(0); ++ return NULL; ++} ++ ++void * ++si_setcoreidx(si_t *sih, uint coreidx) ++{ ++ if (CHIPTYPE(sih->socitype) == SOCI_SB) { ++ return sb_setcoreidx(sih, coreidx); ++ } else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NS)) { ++ return ai_setcoreidx(sih, coreidx); ++ } else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) { ++ return ub_setcoreidx(sih, coreidx); ++ } ++ ++ ASSERT(0); ++ return 0; ++} ++ ++/* Turn off interrupt as required by sb_setcore, before switch core */ ++void * ++si_switch_core(si_t *sih, uint coreid, uint *origidx, uint *intr_val) ++{ ++ void *cc; ++ si_info_t *sii; ++ ++ sii = SI_INFO(sih); ++ ++ if (SI_FAST(sii)) { ++ /* Overloading the origidx variable to remember the coreid, ++ * this works because the core ids cannot be confused with ++ * core indices. ++ */ ++ *origidx = coreid; ++ if (coreid == CC_CORE_ID) { ++ return (void *)CCREGS_FAST(sii); ++ } else if (coreid == sih->buscoretype) { ++ return (void *)PCIEREGS(sii); ++ } ++ } ++ INTR_OFF(sii, *intr_val); ++ *origidx = sii->curidx; ++ cc = si_setcore(sih, coreid, 0); ++ ASSERT(cc != NULL); ++ ++ return cc; ++} ++ ++/* restore coreidx and restore interrupt */ ++void ++si_restore_core(si_t *sih, uint coreid, uint intr_val) ++{ ++ si_info_t *sii; ++ ++ sii = SI_INFO(sih); ++ if (SI_FAST(sii) && ((coreid == CC_CORE_ID) || (coreid == sih->buscoretype))) { ++ return; ++ } ++ ++ si_setcoreidx(sih, coreid); ++ INTR_RESTORE(sii, intr_val); ++} ++ ++int ++si_numaddrspaces(si_t *sih) ++{ ++ if (CHIPTYPE(sih->socitype) == SOCI_SB) { ++ return sb_numaddrspaces(sih); ++ } else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NS)) { ++ return ai_numaddrspaces(sih); ++ } else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) { ++ return ub_numaddrspaces(sih); ++ } ++ ++ ASSERT(0); ++ return 0; ++} ++ ++uint32 ++si_addrspace(si_t *sih, uint asidx) ++{ ++ if (CHIPTYPE(sih->socitype) == SOCI_SB) { ++ return sb_addrspace(sih, asidx); ++ } else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NS)) { ++ return ai_addrspace(sih, asidx); ++ } else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) { ++ return ub_addrspace(sih, asidx); ++ } ++ ++ ASSERT(0); ++ return 0; ++} ++ ++uint32 ++si_addrspacesize(si_t *sih, uint asidx) ++{ ++ if (CHIPTYPE(sih->socitype) == SOCI_SB) { ++ return sb_addrspacesize(sih, asidx); ++ } else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NS)) { ++ return ai_addrspacesize(sih, asidx); ++ } else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) { ++ return ub_addrspacesize(sih, asidx); ++ } ++ ++ ASSERT(0); ++ return 0; ++} ++ ++void ++si_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size) ++{ ++ /* Only supported for SOCI_AI */ ++ if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NS)) { ++ ai_coreaddrspaceX(sih, asidx, addr, size); ++ } else { ++ *size = 0; ++ } ++} ++ ++uint32 ++si_core_cflags(si_t *sih, uint32 mask, uint32 val) ++{ ++ if (CHIPTYPE(sih->socitype) == SOCI_SB) { ++ return sb_core_cflags(sih, mask, val); ++ } else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NS)) { ++ return ai_core_cflags(sih, mask, val); ++ } else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) { ++ return ub_core_cflags(sih, mask, val); ++ } ++ ++ ASSERT(0); ++ return 0; ++} ++ ++void ++si_core_cflags_wo(si_t *sih, uint32 mask, uint32 val) ++{ ++ if (CHIPTYPE(sih->socitype) == SOCI_SB) { ++ sb_core_cflags_wo(sih, mask, val); ++ } else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NS)) { ++ ai_core_cflags_wo(sih, mask, val); ++ } else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) { ++ ub_core_cflags_wo(sih, mask, val); ++ } else { ++ ASSERT(0); ++ } ++} ++ ++uint32 ++si_core_sflags(si_t *sih, uint32 mask, uint32 val) ++{ ++ if (CHIPTYPE(sih->socitype) == SOCI_SB) { ++ return sb_core_sflags(sih, mask, val); ++ } else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NS)) { ++ return ai_core_sflags(sih, mask, val); ++ } else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) { ++ return ub_core_sflags(sih, mask, val); ++ } ++ ++ ASSERT(0); ++ return 0; ++} ++ ++bool ++si_iscoreup(si_t *sih) ++{ ++ if (CHIPTYPE(sih->socitype) == SOCI_SB) { ++ return sb_iscoreup(sih); ++ } else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NS)) { ++ return ai_iscoreup(sih); ++ } else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) { ++ return ub_iscoreup(sih); ++ } ++ ++ ASSERT(0); ++ return FALSE; ++} ++ ++uint ++si_wrapperreg(si_t *sih, uint32 offset, uint32 mask, uint32 val) ++{ ++ /* only for AI back plane chips */ ++ if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NS)) { ++ return (ai_wrap_reg(sih, offset, mask, val)); ++ } ++ return 0; ++} ++ ++uint ++si_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val) ++{ ++ if (CHIPTYPE(sih->socitype) == SOCI_SB) { ++ return sb_corereg(sih, coreidx, regoff, mask, val); ++ } else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NS)) { ++ return ai_corereg(sih, coreidx, regoff, mask, val); ++ } else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) { ++ return ub_corereg(sih, coreidx, regoff, mask, val); ++ } ++ ++ ASSERT(0); ++ return 0; ++} ++ ++void ++si_core_disable(si_t *sih, uint32 bits) ++{ ++ if (CHIPTYPE(sih->socitype) == SOCI_SB) { ++ sb_core_disable(sih, bits); ++ } else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NS)) { ++ ai_core_disable(sih, bits); ++ } else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) { ++ ub_core_disable(sih, bits); ++ } ++} ++ ++void ++si_core_reset(si_t *sih, uint32 bits, uint32 resetbits) ++{ ++ if (CHIPTYPE(sih->socitype) == SOCI_SB) { ++ sb_core_reset(sih, bits, resetbits); ++ } else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NS)) { ++ ai_core_reset(sih, bits, resetbits); ++ } else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) { ++ ub_core_reset(sih, bits, resetbits); ++ } ++} ++ ++/* Run bist on current core. Caller needs to take care of core-specific bist hazards */ ++int ++si_corebist(si_t *sih) ++{ ++ uint32 cflags; ++ int result = 0; ++ ++ /* Read core control flags */ ++ cflags = si_core_cflags(sih, 0, 0); ++ ++ /* Set bist & fgc */ ++ si_core_cflags(sih, ~0, (SICF_BIST_EN | SICF_FGC)); ++ ++ /* Wait for bist done */ ++ SPINWAIT(((si_core_sflags(sih, 0, 0) & SISF_BIST_DONE) == 0), 100000); ++ ++ if (si_core_sflags(sih, 0, 0) & SISF_BIST_ERROR) { ++ result = BCME_ERROR; ++ } ++ ++ /* Reset core control flags */ ++ si_core_cflags(sih, 0xffff, cflags); ++ ++ return result; ++} ++ ++static uint32 ++BCMINITFN(factor6)(uint32 x) ++{ ++ switch (x) { ++ case CC_F6_2: return 2; ++ case CC_F6_3: return 3; ++ case CC_F6_4: return 4; ++ case CC_F6_5: return 5; ++ case CC_F6_6: return 6; ++ case CC_F6_7: return 7; ++ default: return 0; ++ } ++} ++ ++/* calculate the speed the SI would run at given a set of clockcontrol values */ ++uint32 ++BCMINITFN(si_clock_rate)(uint32 pll_type, uint32 n, uint32 m) ++{ ++ uint32 n1, n2, clock, m1, m2, m3, mc; ++ ++ n1 = n & CN_N1_MASK; ++ n2 = (n & CN_N2_MASK) >> CN_N2_SHIFT; ++ ++ if (pll_type == PLL_TYPE6) { ++ if (m & CC_T6_MMASK) { ++ return CC_T6_M1; ++ } else { ++ return CC_T6_M0; ++ } ++ } else if ((pll_type == PLL_TYPE1) || ++ (pll_type == PLL_TYPE3) || ++ (pll_type == PLL_TYPE4) || ++ (pll_type == PLL_TYPE7)) { ++ n1 = factor6(n1); ++ n2 += CC_F5_BIAS; ++ } else if (pll_type == PLL_TYPE2) { ++ n1 += CC_T2_BIAS; ++ n2 += CC_T2_BIAS; ++ ASSERT((n1 >= 2) && (n1 <= 7)); ++ ASSERT((n2 >= 5) && (n2 <= 23)); ++ } else if (pll_type == PLL_TYPE5) { ++ return (100000000); ++ } else { ++ ASSERT(0); ++ } ++ ++ /* PLL types 3 and 7 use BASE2 (25Mhz) */ ++ if ((pll_type == PLL_TYPE3) || ++ (pll_type == PLL_TYPE7)) { ++ clock = CC_CLOCK_BASE2 * n1 * n2; ++ } else { ++ clock = CC_CLOCK_BASE1 * n1 * n2; ++ } ++ ++ if (clock == 0) { ++ return 0; ++ } ++ ++ m1 = m & CC_M1_MASK; ++ m2 = (m & CC_M2_MASK) >> CC_M2_SHIFT; ++ m3 = (m & CC_M3_MASK) >> CC_M3_SHIFT; ++ mc = (m & CC_MC_MASK) >> CC_MC_SHIFT; ++ ++ if ((pll_type == PLL_TYPE1) || ++ (pll_type == PLL_TYPE3) || ++ (pll_type == PLL_TYPE4) || ++ (pll_type == PLL_TYPE7)) { ++ m1 = factor6(m1); ++ if ((pll_type == PLL_TYPE1) || (pll_type == PLL_TYPE3)) { ++ m2 += CC_F5_BIAS; ++ } else { ++ m2 = factor6(m2); ++ } ++ m3 = factor6(m3); ++ ++ switch (mc) { ++ case CC_MC_BYPASS: return (clock); ++ case CC_MC_M1: return (clock / m1); ++ case CC_MC_M1M2: return (clock / (m1 * m2)); ++ case CC_MC_M1M2M3: return (clock / (m1 * m2 * m3)); ++ case CC_MC_M1M3: return (clock / (m1 * m3)); ++ default: return (0); ++ } ++ } else { ++ ASSERT(pll_type == PLL_TYPE2); ++ ++ m1 += CC_T2_BIAS; ++ m2 += CC_T2M2_BIAS; ++ m3 += CC_T2_BIAS; ++ ASSERT((m1 >= 2) && (m1 <= 7)); ++ ASSERT((m2 >= 3) && (m2 <= 10)); ++ ASSERT((m3 >= 2) && (m3 <= 7)); ++ ++ if ((mc & CC_T2MC_M1BYP) == 0) { ++ clock /= m1; ++ } ++ if ((mc & CC_T2MC_M2BYP) == 0) { ++ clock /= m2; ++ } ++ if ((mc & CC_T2MC_M3BYP) == 0) { ++ clock /= m3; ++ } ++ ++ return (clock); ++ } ++} ++ ++uint32 ++BCMINITFN(si_clock)(si_t *sih) ++{ ++ if (sih->chippkg == BCM4709_PKG_ID) { ++ return NS_SI_CLOCK; ++ } ++ return NS_SLOW_SI_CLOCK; ++} ++ ++#if defined(BCMDBG) ++/* print interesting sbconfig registers */ ++void ++si_dumpregs(si_t *sih, struct bcmstrbuf *b) ++{ ++ si_info_t *sii; ++ uint origidx, intr_val = 0; ++ ++ sii = SI_INFO(sih); ++ origidx = sii->curidx; ++ ++ INTR_OFF(sii, intr_val); ++ if (CHIPTYPE(sih->socitype) == SOCI_SB) { ++ sb_dumpregs(sih, b); ++ } else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NS)) { ++ ai_dumpregs(sih, b); ++ } else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) { ++ ub_dumpregs(sih, b); ++ } else { ++ ASSERT(0); ++ } ++ ++ si_setcoreidx(sih, origidx); ++ INTR_RESTORE(sii, intr_val); ++} ++#endif ++ ++#ifdef BCMDBG ++void ++si_view(si_t *sih, bool verbose) ++{ ++ if (CHIPTYPE(sih->socitype) == SOCI_SB) { ++ sb_view(sih, verbose); ++ } else if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NS)) { ++ ai_view(sih, verbose); ++ } else if (CHIPTYPE(sih->socitype) == SOCI_UBUS) { ++ ub_view(sih, verbose); ++ } else { ++ ASSERT(0); ++ } ++} ++ ++void ++si_viewall(si_t *sih, bool verbose) ++{ ++ si_info_t *sii; ++ uint curidx, i; ++ uint intr_val = 0; ++ ++ sii = SI_INFO(sih); ++ curidx = sii->curidx; ++ ++ INTR_OFF(sii, intr_val); ++ if ((CHIPTYPE(sih->socitype) == SOCI_AI) || (CHIPTYPE(sih->socitype) == SOCI_NS)) { ++ ai_viewall(sih, verbose); ++ } else { ++ SI_ERROR(("si_viewall: num_cores %d\n", sii->numcores)); ++ for (i = 0; i < sii->numcores; i++) { ++ si_setcoreidx(sih, i); ++ si_view(sih, verbose); ++ } ++ } ++ si_setcoreidx(sih, curidx); ++ INTR_RESTORE(sii, intr_val); ++} ++#endif /* BCMDBG */ ++ ++/* return the slow clock source - LPO, XTAL, or PCI */ ++static uint ++si_slowclk_src(si_info_t *sii) ++{ ++ chipcregs_t *cc; ++ ++ ASSERT(SI_FAST(sii) || si_coreid(&sii->pub) == CC_CORE_ID); ++ ++ if (sii->pub.ccrev < 6) { ++ return (SCC_SS_XTAL); ++ } else if (sii->pub.ccrev < 10) { ++ cc = (chipcregs_t *)si_setcoreidx(&sii->pub, sii->curidx); ++ return (R_REG(sii->osh, &cc->slow_clk_ctl) & SCC_SS_MASK); ++ } else { /* Insta-clock */ ++ return (SCC_SS_XTAL); ++ } ++} ++ ++/* return the ILP (slowclock) min or max frequency */ ++static uint ++si_slowclk_freq(si_info_t *sii, bool max_freq, chipcregs_t *cc) ++{ ++ uint32 slowclk; ++ uint div; ++ ++ ASSERT(SI_FAST(sii) || si_coreid(&sii->pub) == CC_CORE_ID); ++ ++ /* shouldn't be here unless we've established the chip has dynamic clk control */ ++ ASSERT(R_REG(sii->osh, &cc->capabilities) & CC_CAP_PWR_CTL); ++ ++ slowclk = si_slowclk_src(sii); ++ if (sii->pub.ccrev < 6) { ++ if (slowclk == SCC_SS_PCI) { ++ return (max_freq ? (PCIMAXFREQ / 64) : (PCIMINFREQ / 64)); ++ } else { ++ return (max_freq ? (XTALMAXFREQ / 32) : (XTALMINFREQ / 32)); ++ } ++ } else if (sii->pub.ccrev < 10) { ++ div = 4 * ++ (((R_REG(sii->osh, &cc->slow_clk_ctl) & SCC_CD_MASK) >> SCC_CD_SHIFT) + 1); ++ if (slowclk == SCC_SS_LPO) { ++ return (max_freq ? LPOMAXFREQ : LPOMINFREQ); ++ } else if (slowclk == SCC_SS_XTAL) { ++ return (max_freq ? (XTALMAXFREQ / div) : (XTALMINFREQ / div)); ++ } else if (slowclk == SCC_SS_PCI) { ++ return (max_freq ? (PCIMAXFREQ / div) : (PCIMINFREQ / div)); ++ } else { ++ ASSERT(0); ++ } ++ } else { ++ /* Chipc rev 10 is InstaClock */ ++ div = R_REG(sii->osh, &cc->system_clk_ctl) >> SYCC_CD_SHIFT; ++ div = 4 * (div + 1); ++ return (max_freq ? XTALMAXFREQ : (XTALMINFREQ / div)); ++ } ++ return (0); ++} ++ ++static void ++BCMINITFN(si_clkctl_setdelay)(si_info_t *sii, void *chipcregs) ++{ ++ chipcregs_t *cc = (chipcregs_t *)chipcregs; ++ uint slowmaxfreq, pll_delay, slowclk; ++ uint pll_on_delay, fref_sel_delay; ++ ++ pll_delay = PLL_DELAY; ++ ++ /* If the slow clock is not sourced by the xtal then add the xtal_on_delay ++ * since the xtal will also be powered down by dynamic clk control logic. ++ */ ++ ++ slowclk = si_slowclk_src(sii); ++ if (slowclk != SCC_SS_XTAL) { ++ pll_delay += XTAL_ON_DELAY; ++ } ++ ++ /* Starting with 4318 it is ILP that is used for the delays */ ++ slowmaxfreq = si_slowclk_freq(sii, (sii->pub.ccrev >= 10) ? FALSE : TRUE, cc); ++ ++ pll_on_delay = ((slowmaxfreq * pll_delay) + 999999) / 1000000; ++ fref_sel_delay = ((slowmaxfreq * FREF_DELAY) + 999999) / 1000000; ++ ++ W_REG(sii->osh, &cc->pll_on_delay, pll_on_delay); ++ W_REG(sii->osh, &cc->fref_sel_delay, fref_sel_delay); ++} ++ ++/* initialize power control delay registers */ ++void ++BCMINITFN(si_clkctl_init)(si_t *sih) ++{ ++ si_info_t *sii; ++ uint origidx = 0; ++ chipcregs_t *cc; ++ bool fast; ++ ++ if (!CCCTL_ENAB(sih)) { ++ return; ++ } ++ ++ sii = SI_INFO(sih); ++ fast = SI_FAST(sii); ++ if (!fast) { ++ origidx = sii->curidx; ++ if ((cc = (chipcregs_t *)si_setcore(sih, CC_CORE_ID, 0)) == NULL) { ++ return; ++ } ++ } else if ((cc = (chipcregs_t *)CCREGS_FAST(sii)) == NULL) { ++ return; ++ } ++ ASSERT(cc != NULL); ++ ++ /* set all Instaclk chip ILP to 1 MHz */ ++ if (sih->ccrev >= 10) { ++ SET_REG(sii->osh, &cc->system_clk_ctl, SYCC_CD_MASK, ++ (ILP_DIV_1MHZ << SYCC_CD_SHIFT)); ++ } ++ ++ si_clkctl_setdelay(sii, (void *)(uintptr)cc); ++ ++ if (!fast) { ++ si_setcoreidx(sih, origidx); ++ } ++} ++ ++/* turn primary xtal and/or pll off/on */ ++int ++si_clkctl_xtal(si_t *sih, uint what, bool on) ++{ ++ switch (BUSTYPE(sih->bustype)) { ++ ++ default: ++ return (-1); ++ } ++ ++} ++ ++/* ++ * clock control policy function throught chipcommon ++ * ++ * set dynamic clk control mode (forceslow, forcefast, dynamic) ++ * returns true if we are forcing fast clock ++ * this is a wrapper over the next internal function ++ * to allow flexible policy settings for outside caller ++ */ ++bool ++si_clkctl_cc(si_t *sih, uint mode) ++{ ++ si_info_t *sii; ++ ++ sii = SI_INFO(sih); ++ ++ /* chipcommon cores prior to rev6 don't support dynamic clock control */ ++ if (sih->ccrev < 6) { ++ return FALSE; ++ } ++ ++ return _si_clkctl_cc(sii, mode); ++} ++ ++/* clk control mechanism through chipcommon, no policy checking */ ++static bool ++_si_clkctl_cc(si_info_t *sii, uint mode) ++{ ++ uint origidx = 0; ++ chipcregs_t *cc; ++ uint32 scc; ++ uint intr_val = 0; ++ bool fast = SI_FAST(sii); ++ ++ /* chipcommon cores prior to rev6 don't support dynamic clock control */ ++ if (sii->pub.ccrev < 6) { ++ return (FALSE); ++ } ++ ++ /* Chips with ccrev 10 are EOL and they don't have SYCC_HR which we use below */ ++ ASSERT(sii->pub.ccrev != 10); ++ ++ if (!fast) { ++ INTR_OFF(sii, intr_val); ++ origidx = sii->curidx; ++ ++ if ((BUSTYPE(sii->pub.bustype) == SI_BUS) && ++ si_setcore(&sii->pub, MIPS33_CORE_ID, 0) && ++ (si_corerev(&sii->pub) <= 7) && (sii->pub.ccrev >= 10)) { ++ goto done; ++ } ++ ++ cc = (chipcregs_t *) si_setcore(&sii->pub, CC_CORE_ID, 0); ++ } else if ((cc = (chipcregs_t *) CCREGS_FAST(sii)) == NULL) { ++ goto done; ++ } ++ ++ ASSERT(cc != NULL); ++ ++ if (!CCCTL_ENAB(&sii->pub) && (sii->pub.ccrev < 20)) { ++ goto done; ++ } ++ ++ switch (mode) { ++ case CLK_FAST: /* FORCEHT, fast (pll) clock */ ++ if (sii->pub.ccrev < 10) { ++ /* don't forget to force xtal back on before we clear SCC_DYN_XTAL.. */ ++ si_clkctl_xtal(&sii->pub, XTAL, ON); ++ SET_REG(sii->osh, &cc->slow_clk_ctl, (SCC_XC | SCC_FS | SCC_IP), SCC_IP); ++ } else if (sii->pub.ccrev < 20) { ++ OR_REG(sii->osh, &cc->system_clk_ctl, SYCC_HR); ++ } else { ++ OR_REG(sii->osh, &cc->clk_ctl_st, CCS_FORCEHT); ++ } ++ ++ /* wait for the PLL */ ++ if (PMUCTL_ENAB(&sii->pub)) { ++ uint32 htavail = CCS_HTAVAIL; ++ ++ SPINWAIT(((R_REG(sii->osh, &cc->clk_ctl_st) & htavail) == 0), ++ PMU_MAX_TRANSITION_DLY); ++ ASSERT(R_REG(sii->osh, &cc->clk_ctl_st) & htavail); ++ } else { ++ OSL_DELAY(PLL_DELAY); ++ } ++ break; ++ ++ case CLK_DYNAMIC: /* enable dynamic clock control */ ++ if (sii->pub.ccrev < 10) { ++ scc = R_REG(sii->osh, &cc->slow_clk_ctl); ++ scc &= ~(SCC_FS | SCC_IP | SCC_XC); ++ if ((scc & SCC_SS_MASK) != SCC_SS_XTAL) { ++ scc |= SCC_XC; ++ } ++ W_REG(sii->osh, &cc->slow_clk_ctl, scc); ++ ++ /* for dynamic control, we have to release our xtal_pu "force on" */ ++ if (scc & SCC_XC) { ++ si_clkctl_xtal(&sii->pub, XTAL, OFF); ++ } ++ } else if (sii->pub.ccrev < 20) { ++ /* Instaclock */ ++ AND_REG(sii->osh, &cc->system_clk_ctl, ~SYCC_HR); ++ } else { ++ AND_REG(sii->osh, &cc->clk_ctl_st, ~CCS_FORCEHT); ++ } ++ break; ++ ++ default: ++ ASSERT(0); ++ } ++ ++done: ++ if (!fast) { ++ si_setcoreidx(&sii->pub, origidx); ++ INTR_RESTORE(sii, intr_val); ++ } ++ return (mode == CLK_FAST); ++} ++ ++/* Build device path. Support SI, PCI, and JTAG for now. */ ++int ++BCMNMIATTACHFN(si_devpath)(si_t *sih, char *path, int size) ++{ ++ int slen; ++ ++ ASSERT(path != NULL); ++ ASSERT(size >= SI_DEVPATH_BUFSZ); ++ ++ if (!path || size <= 0) ++ return -1; ++ ++ switch (BUSTYPE(sih->bustype)) { ++ case SI_BUS: ++ slen = snprintf(path, (size_t)size, "sb/%u/", si_coreidx(sih)); ++ break; ++ default: ++ slen = -1; ++ ASSERT(0); ++ break; ++ } ++ ++ if (slen < 0 || slen >= size) { ++ path[0] = '\0'; ++ return -1; ++ } ++ ++ return 0; ++} ++ ++char * ++BCMATTACHFN(si_coded_devpathvar)(si_t *sih, char *varname, int var_len, const char *name) ++{ ++ char pathname[SI_DEVPATH_BUFSZ + 32]; ++ char devpath[SI_DEVPATH_BUFSZ + 32]; ++ char *p; ++ int idx; ++ int len; ++ ++ /* try to get compact devpath if it exist */ ++ if (si_devpath(sih, devpath, SI_DEVPATH_BUFSZ) == 0) { ++ len = strlen(devpath); ++ devpath[len - 1] = '\0'; ++ for (idx = 0; idx < SI_MAXCORES; idx++) { ++ snprintf(pathname, SI_DEVPATH_BUFSZ, "devpath%d", idx); ++ if ((p = getvar(NULL, pathname)) == NULL) { ++ continue; ++ } ++ ++ if (strncmp(p, devpath, len) == 0) { ++ snprintf(varname, var_len, "%d:%s", idx, name); ++ return varname; ++ } ++ } ++ } ++ ++ return NULL; ++} ++ ++/* Get a variable, but only if it has a devpath prefix */ ++int ++BCMATTACHFN(si_getdevpathintvar)(si_t *sih, const char *name) ++{ ++#if defined(BCMBUSTYPE) && (BCMBUSTYPE == SI_BUS) ++ return (getintvar(NULL, name)); ++#else ++ char varname[SI_DEVPATH_BUFSZ + 32]; ++ int val; ++ ++ si_devpathvar(sih, varname, sizeof(varname), name); ++ ++ if ((val = getintvar(NULL, varname)) != 0) { ++ return val; ++ } ++ ++ /* try to get compact devpath if it exist */ ++ if (si_coded_devpathvar(sih, varname, sizeof(varname), name) == NULL) { ++ return 0; ++ } ++ ++ return (getintvar(NULL, varname)); ++#endif /* BCMBUSTYPE && BCMBUSTYPE == SI_BUS */ ++} ++ ++/* Concatenate the dev path with a varname into the given 'var' buffer ++ * and return the 'var' pointer. ++ * Nothing is done to the arguments if len == 0 or var is NULL, var is still returned. ++ * On overflow, the first char will be set to '\0'. ++ */ ++static char * ++BCMATTACHFN(si_devpathvar)(si_t *sih, char *var, int len, const char *name) ++{ ++ uint path_len; ++ ++ if (!var || len <= 0) { ++ return var; ++ } ++ ++ if (si_devpath(sih, var, len) == 0) { ++ path_len = strlen(var); ++ ++ if (strlen(name) + 1 > (uint)(len - path_len)) { ++ var[0] = '\0'; ++ } else { ++ strncpy(var + path_len, name, len - path_len - 1); ++ } ++ } ++ ++ return var; ++} ++ ++ ++#if defined(BCMDBG) ++#endif ++ ++/* mask&set gpio output enable bits */ ++uint32 ++si_gpioouten(si_t *sih, uint32 mask, uint32 val, uint8 priority) ++{ ++ uint regoff; ++ ++ regoff = 0; ++ ++ /* gpios could be shared on router platforms ++ * ignore reservation if it's high priority (e.g., test apps) ++ */ ++ if ((priority != GPIO_HI_PRIORITY) && ++ (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) { ++ mask = priority ? (si_gpioreservation & mask) : ++ ((si_gpioreservation | mask) & ~(si_gpioreservation)); ++ val &= mask; ++ } ++ ++ regoff = OFFSETOF(chipcregs_t, gpioouten); ++ return (si_corereg(sih, SI_CC_IDX, regoff, mask, val)); ++} ++ ++/* mask&set gpio output bits */ ++uint32 ++si_gpioout(si_t *sih, uint32 mask, uint32 val, uint8 priority) ++{ ++ uint regoff; ++ ++ regoff = 0; ++ ++ /* gpios could be shared on router platforms ++ * ignore reservation if it's high priority (e.g., test apps) ++ */ ++ if ((priority != GPIO_HI_PRIORITY) && ++ (BUSTYPE(sih->bustype) == SI_BUS) && (val || mask)) { ++ mask = priority ? (si_gpioreservation & mask) : ++ ((si_gpioreservation | mask) & ~(si_gpioreservation)); ++ val &= mask; ++ } ++ ++ regoff = OFFSETOF(chipcregs_t, gpioout); ++ return (si_corereg(sih, SI_CC_IDX, regoff, mask, val)); ++} +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/gmac/src/shared/siutils_priv.h b/drivers/net/ethernet/broadcom/gmac/src/shared/siutils_priv.h +--- a/drivers/net/ethernet/broadcom/gmac/src/shared/siutils_priv.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/gmac/src/shared/siutils_priv.h 2017-11-09 17:53:44.066299000 +0800 +@@ -0,0 +1,236 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ * ++ * Include file private to the SOC Interconnect support files. ++ * ++ * $Id: siutils_priv.h 302333 2011-12-11 01:47:49Z $ ++ */ ++ ++#ifndef _siutils_priv_h_ ++#define _siutils_priv_h_ ++ ++#ifdef BCMDBG_ERR ++#define SI_ERROR(args) printf args ++#else ++#define SI_ERROR(args) ++#endif /* BCMDBG_ERR */ ++ ++#ifdef BCMDBG ++#define SI_MSG(args) printf args ++#else ++#define SI_MSG(args) ++#endif /* BCMDBG */ ++ ++#ifdef BCMDBG_SI ++#define SI_VMSG(args) printf args ++#else ++#define SI_VMSG(args) ++#endif ++ ++#define IS_SIM(chippkg) ((chippkg == HDLSIM_PKG_ID) || (chippkg == HWSIM_PKG_ID)) ++ ++typedef uint32 (*si_intrsoff_t)(void *intr_arg); ++typedef void (*si_intrsrestore_t)(void *intr_arg, uint32 arg); ++typedef bool (*si_intrsenabled_t)(void *intr_arg); ++ ++typedef struct gpioh_item { ++ void *arg; ++ bool level; ++ gpio_handler_t handler; ++ uint32 event; ++ struct gpioh_item *next; ++} gpioh_item_t; ++ ++/* misc si info needed by some of the routines */ ++typedef struct si_info { ++ struct si_pub pub; /* back plane public state (must be first field) */ ++ ++ void *osh; /* osl os handle */ ++ void *sdh; /* bcmsdh handle */ ++ ++ uint dev_coreid; /* the core provides driver functions */ ++ void *intr_arg; /* interrupt callback function arg */ ++ si_intrsoff_t intrsoff_fn; /* turns chip interrupts off */ ++ si_intrsrestore_t intrsrestore_fn; /* restore chip interrupts */ ++ si_intrsenabled_t intrsenabled_fn; /* check if interrupts are enabled */ ++ ++ void *pch; /* PCI/E core handle */ ++ gpioh_item_t *gpioh_head; /* GPIO event handlers list */ ++ bool memseg; /* flag to toggle MEM_SEG register */ ++ char *vars; ++ uint varsz; ++ ++ void *curmap; /* current regs va */ ++ void *regs[SI_MAXCORES]; /* other regs va */ ++ ++ uint curidx; /* current core index */ ++ uint numcores; /* # discovered cores */ ++ uint coreid[SI_MAXCORES]; /* id of each core */ ++ uint32 coresba[SI_MAXCORES]; /* backplane address of each core */ ++ void *regs2[SI_MAXCORES]; /* va of each core second register set (usbh20) */ ++ uint32 coresba2[SI_MAXCORES]; /* address of each core second register set (usbh20) */ ++ uint32 coresba_size[SI_MAXCORES]; /* backplane address space size */ ++ uint32 coresba2_size[SI_MAXCORES]; /* second address space size */ ++ ++ void *curwrap; /* current wrapper va */ ++ void *wrappers[SI_MAXCORES]; /* other cores wrapper va */ ++ uint32 wrapba[SI_MAXCORES]; /* address of controlling wrapper */ ++ ++ uint32 cia[SI_MAXCORES]; /* erom cia entry for each core */ ++ uint32 cib[SI_MAXCORES]; /* erom cia entry for each core */ ++ uint32 oob_router; /* oob router registers for axi */ ++} si_info_t; ++ ++#define SI_INFO(sih) (si_info_t *)(uintptr)sih ++ ++#define GOODCOREADDR(x, b) (((x) >= (b)) && ((x) < ((b) + SI_MAXCORES * SI_CORE_SIZE)) && \ ++ ISALIGNED((x), SI_CORE_SIZE)) ++#define GOODREGS(regs) ((regs) != NULL && ISALIGNED((uintptr)(regs), SI_CORE_SIZE)) ++#define BADCOREADDR 0 ++#define GOODIDX(idx) (((uint)idx) < SI_MAXCORES) ++#define NOREV -1 /* Invalid rev */ ++ ++#define PCI(si) ((BUSTYPE((si)->pub.bustype) == PCI_BUS) && \ ++ ((si)->pub.buscoretype == PCI_CORE_ID)) ++ ++#define PCIE_GEN1(si) ((BUSTYPE((si)->pub.bustype) == PCI_BUS) && \ ++ ((si)->pub.buscoretype == PCIE_CORE_ID)) ++ ++#define PCIE_GEN2(si) ((BUSTYPE((si)->pub.bustype) == PCI_BUS) && \ ++ ((si)->pub.buscoretype == PCIE2_CORE_ID)) ++ ++#define PCIE(si) (PCIE_GEN1(si) || PCIE_GEN2(si)) ++ ++#define PCMCIA(si) ((BUSTYPE((si)->pub.bustype) == PCMCIA_BUS) && ((si)->memseg == TRUE)) ++ ++/* Newer chips can access PCI/PCIE and CC core without requiring to change ++ * PCI BAR0 WIN ++ */ ++#define SI_FAST(si) (PCIE(si) || (PCI(si) && ((si)->pub.buscorerev >= 13))) ++ ++#define PCIEREGS(si) (((char *)((si)->curmap) + PCI_16KB0_PCIREGS_OFFSET)) ++#define CCREGS_FAST(si) (((char *)((si)->curmap) + PCI_16KB0_CCREGS_OFFSET)) ++ ++/* ++ * Macros to disable/restore function core(D11, ENET, ILINE20, etc) interrupts before/ ++ * after core switching to avoid invalid register accesss inside ISR. ++ */ ++#define INTR_OFF(si, intr_val) \ ++ if ((si)->intrsoff_fn && (si)->coreid[(si)->curidx] == (si)->dev_coreid) { \ ++ intr_val = (*(si)->intrsoff_fn)((si)->intr_arg); } ++#define INTR_RESTORE(si, intr_val) \ ++ if ((si)->intrsrestore_fn && (si)->coreid[(si)->curidx] == (si)->dev_coreid) { \ ++ (*(si)->intrsrestore_fn)((si)->intr_arg, intr_val); } ++ ++/* dynamic clock control defines */ ++#define LPOMINFREQ 25000 /* low power oscillator min */ ++#define LPOMAXFREQ 43000 /* low power oscillator max */ ++#define XTALMINFREQ 19800000 /* 20 MHz - 1% */ ++#define XTALMAXFREQ 20200000 /* 20 MHz + 1% */ ++#define PCIMINFREQ 25000000 /* 25 MHz */ ++#define PCIMAXFREQ 34000000 /* 33 MHz + fudge */ ++ ++#define ILP_DIV_5MHZ 0 /* ILP = 5 MHz */ ++#define ILP_DIV_1MHZ 4 /* ILP = 1 MHz */ ++ ++/* GPIO Based LED powersave defines */ ++#define DEFAULT_GPIO_ONTIME 10 /* Default: 10% on */ ++#define DEFAULT_GPIO_OFFTIME 90 /* Default: 10% on */ ++#ifndef DEFAULT_GPIOTIMERVAL ++#define DEFAULT_GPIOTIMERVAL ((DEFAULT_GPIO_ONTIME << GPIO_ONTIME_SHIFT) | DEFAULT_GPIO_OFFTIME) ++#endif ++ ++#define sb_scan(a, b, c) do {} while (0) ++#define sb_coreid(a) (0) ++#define sb_intflag(a) (0) ++#define sb_flag(a) (0) ++#define sb_setint(a, b) do {} while (0) ++#define sb_corevendor(a) (0) ++#define sb_corerev(a) (0) ++#define sb_corereg(a, b, c, d, e) (0) ++#define sb_iscoreup(a) (false) ++#define sb_setcoreidx(a, b) (0) ++#define sb_core_cflags(a, b, c) (0) ++#define sb_core_cflags_wo(a, b, c) do {} while (0) ++#define sb_core_sflags(a, b, c) (0) ++#define sb_commit(a) do {} while (0) ++#define sb_base(a) (0) ++#define sb_size(a) (0) ++#define sb_core_reset(a, b, c) do {} while (0) ++#define sb_core_disable(a, b) do {} while (0) ++#define sb_addrspace(a, b) (0) ++#define sb_addrspacesize(a, b) (0) ++#define sb_numaddrspaces(a) (0) ++#define sb_set_initiator_to(a, b, c) (0) ++#define sb_taclear(a, b) (false) ++#define sb_view(a, b) do {} while (0) ++#define sb_viewall(a, b) do {} while (0) ++#define sb_dump(a, b) do {} while (0) ++#define sb_dumpregs(a, b) do {} while (0) ++ ++ ++/* AMBA Interconnect exported externs */ ++extern si_t *ai_attach(uint pcidev, osl_t *osh, void *regs, uint bustype, ++ void *sdh, char **vars, uint *varsz); ++extern si_t *ai_kattach(osl_t *osh); ++extern void ai_scan(si_t *sih, void *regs, uint devid); ++extern uint ai_flag(si_t *sih); ++extern void ai_setint(si_t *sih, int siflag); ++extern uint ai_coreidx(si_t *sih); ++extern uint ai_corevendor(si_t *sih); ++extern uint ai_corerev(si_t *sih); ++extern bool ai_iscoreup(si_t *sih); ++extern void *ai_setcoreidx(si_t *sih, uint coreidx); ++extern uint32 ai_core_cflags(si_t *sih, uint32 mask, uint32 val); ++extern void ai_core_cflags_wo(si_t *sih, uint32 mask, uint32 val); ++extern uint32 ai_core_sflags(si_t *sih, uint32 mask, uint32 val); ++extern uint ai_corereg(si_t *sih, uint coreidx, uint regoff, uint mask, uint val); ++extern void ai_core_reset(si_t *sih, uint32 bits, uint32 resetbits); ++extern void ai_core_disable(si_t *sih, uint32 bits); ++extern int ai_numaddrspaces(si_t *sih); ++extern uint32 ai_addrspace(si_t *sih, uint asidx); ++extern uint32 ai_addrspacesize(si_t *sih, uint asidx); ++extern void ai_coreaddrspaceX(si_t *sih, uint asidx, uint32 *addr, uint32 *size); ++extern uint ai_wrap_reg(si_t *sih, uint32 offset, uint32 mask, uint32 val); ++#ifdef BCMDBG ++extern void ai_view(si_t *sih, bool verbose); ++extern void ai_viewall(si_t *sih, bool verbose); ++#endif /* BCMDBG */ ++#if defined(BCMDBG) ++extern void ai_dumpregs(si_t *sih, struct bcmstrbuf *b); ++#endif ++ ++ ++#define ub_scan(a, b, c) do {} while (0) ++#define ub_flag(a) (0) ++#define ub_setint(a, b) do {} while (0) ++#define ub_coreidx(a) (0) ++#define ub_corevendor(a) (0) ++#define ub_corerev(a) (0) ++#define ub_iscoreup(a) (0) ++#define ub_setcoreidx(a, b) (0) ++#define ub_core_cflags(a, b, c) (0) ++#define ub_core_cflags_wo(a, b, c) do {} while (0) ++#define ub_core_sflags(a, b, c) (0) ++#define ub_corereg(a, b, c, d, e) (0) ++#define ub_core_reset(a, b, c) do {} while (0) ++#define ub_core_disable(a, b) do {} while (0) ++#define ub_numaddrspaces(a) (0) ++#define ub_addrspace(a, b) (0) ++#define ub_addrspacesize(a, b) (0) ++#define ub_view(a, b) do {} while (0) ++#define ub_dumpregs(a, b) do {} while (0) ++ ++#endif /* _siutils_priv_h_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/mdio/Kconfig b/drivers/net/ethernet/broadcom/mdio/Kconfig +--- a/drivers/net/ethernet/broadcom/mdio/Kconfig 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/mdio/Kconfig 2017-11-09 17:53:44.076300000 +0800 +@@ -0,0 +1,23 @@ ++# ++# Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++# ++# Permission to use, copy, modify, and/or distribute this software for any ++# purpose with or without fee is hereby granted, provided that the above ++# copyright notice and this permission notice appear in all copies. ++# ++# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++# OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++# ++config MDIO_XGS_IPROC ++ tristate "BRCM XGS iProc MDIO support" ++ depends on ARCH_XGS_IPROC ++ default n ++ help ++ MDIO support ++ ++ If unsure, say N. +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/mdio/Makefile b/drivers/net/ethernet/broadcom/mdio/Makefile +--- a/drivers/net/ethernet/broadcom/mdio/Makefile 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/mdio/Makefile 2017-11-09 17:53:44.077310000 +0800 +@@ -0,0 +1,17 @@ ++# ++# Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++# ++# Permission to use, copy, modify, and/or distribute this software for any ++# purpose with or without fee is hereby granted, provided that the above ++# copyright notice and this permission notice appear in all copies. ++# ++# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++# SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++# OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++# CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++# ++obj-$(CONFIG_MDIO_XGS_IPROC) := iproc_mii.o ++iproc_mii-objs := ccb_mdio.o cmicd_mdio.o ccg_mdio.o iproc_mdio.o +\ No newline at end of file +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/mdio/ccb_mdio.c b/drivers/net/ethernet/broadcom/mdio/ccb_mdio.c +--- a/drivers/net/ethernet/broadcom/mdio/ccb_mdio.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/mdio/ccb_mdio.c 2017-11-09 17:53:44.079291000 +0800 +@@ -0,0 +1,873 @@ ++/* ++ * $Copyright Open Broadcom Corporation$ ++ */ ++ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include ++ ++#include ++#include ++ ++#include "iproc_mdio.h" ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "iproc_mdio_dev.h" ++ ++#ifdef CONFIG_OF_MDIO ++#include ++#include ++#include ++static struct iproc_mdiobus_data iproc_mdiobus_data; ++#define __devinit /* */ ++#define __devexit /* */ ++#endif /* CONFIG_OF_MDIO */ ++ ++#ifndef CONFIG_OF_MDIO ++#define CCB_MDIO_BASE_ADDRESS IPROC_MII_MGMT_CTL ++#endif ++ ++#define R_REG(reg) ioread32(ccb_mdio->base + (reg&0x0fff)) ++#define W_REG(reg, val) iowrite32(val, ccb_mdio->base + (reg&0x0fff)) ++ ++#define MII_ERR_VAL 0x0001 ++#define MII_MSG_VAL 0x0002 ++#define MII_DBG_VAL 0x0004 ++//static u32 mii_msg_level = MII_ERR_VAL; ++ ++#if defined(BCMDBG) || defined(BCMDBG_ERR) ++#define MII_ERR(args) do {if (mii_msg_level & MII_ERR_VAL) printk args;} while (0) ++#else ++#define MII_ERR(args) ++#endif ++ ++#ifdef BCMDBG ++#define MII_MSG(args) do {if (mii_msg_level & MII_MSG_VAL) printk args;} while (0) ++#define MII_DBG(args) do {if (mii_msg_level & MII_DBG_VAL) printk args;} while (0) ++#else ++#define MII_MSG(args) ++#define MII_DBG(args) ++#endif ++ ++#define MII_EN_CHK \ ++ {\ ++ if (!ccb_mdio->base) { \ ++ return MII_ERR_INIT; \ ++ } \ ++ if (!(R_REG(MII_MGMT) & 0x7f)) { \ ++ return MII_ERR_INTERNAL; \ ++ } \ ++ } ++ ++#define MII_TRIES 100000 ++#define MII_POLL_USEC 20 ++ ++struct mdio_device_data { ++ mdio_info_t *mdio; ++ int init; ++}; ++ ++static struct mdio_device_data mdio_devices={0}; ++static uint32_t ccb_mdio_clk_rate; ++ ++#define DRIVER_VERSION "0.01" ++#define DRIVER_NAME "iproc mdio" ++ ++static int mdio_major; ++static struct cdev mdio_cdev; ++ ++#define MDIO_IOC_OP_EXTERNAL_READ 0 ++#define MDIO_IOC_OP_EXTERNAL_WRITE 1 ++#define MDIO_IOC_OP_LOCAL_READ 2 ++#define MDIO_IOC_OP_LOCAL_WRITE 3 ++ ++ ++struct ccb_mdio_ctrl { ++ void __iomem *base; ++ int ref_cnt; ++}; ++ ++struct ccb_mdiobus_private { ++ /* iproc_mdiobus_data field have to be placed at the beginning of ++ * mdiobus private data */ ++ struct iproc_mdiobus_data bus_data; ++ struct ccb_mdio_ctrl *hw_ctrl; ++}; ++ ++static struct ccb_mdio_ctrl *ccb_mdio = NULL; ++ ++#ifndef CONFIG_OF_MDIO ++static struct resource ccb_mdio_mem = { ++ .name = "ccb_mdio", ++ .start = CCB_MDIO_BASE_ADDRESS, ++ .end = CCB_MDIO_BASE_ADDRESS + 0x1000 - 1, ++ .flags = IORESOURCE_MEM, ++}; ++#endif ++ ++/* Function : ccb_mii_read ++ * - Read operation. ++ * Return : ++ * Note : ++ */ ++int ++ccb_mii_read(int dev_type, int phy_addr, int reg_off, uint16_t *data) ++{ ++ int i; ++ uint32_t ctrl = 0; ++ unsigned long flags; ++ mdio_info_t *mdio = NULL; ++ ++ MII_EN_CHK; ++ ++ mdio = mdio_devices.mdio; ++ ++ spin_lock_irqsave(&mdio->lock, flags); ++ ++ ctrl = R_REG(MII_MGMT); ++ if (dev_type == MII_DEV_LOCAL) { ++ ctrl &= ~MII_MGMT_EXP_MASK; ++ } else { ++ ctrl |= MII_MGMT_EXP_MASK; ++ } ++ W_REG(MII_MGMT, ctrl); ++ MII_DBG(("MII READ: write(0x%x)=0x%x\n",MII_MGMT, ctrl)); ++ ++ for (i = 0; i < MII_TRIES; i++) { ++ ctrl = R_REG(MII_MGMT); ++ if (!(ctrl & MII_MGMT_BSY_MASK)) { ++ break; ++ } ++ udelay(MII_POLL_USEC); ++ } ++ if (i >= MII_TRIES) { ++ MII_ERR(("\n%s: BUSY stuck: ctrl=0x%x, count=%d\n", __FUNCTION__, ctrl, i)); ++ spin_unlock_irqrestore(&mdio->lock, flags); ++ return -1; ++ } ++ ++ ctrl = (((1 << MII_CMD_DATA_SB_SHIFT) & MII_CMD_DATA_SB_MASK) | ++ ((2 << MII_CMD_DATA_OP_SHIFT) & MII_CMD_DATA_OP_MASK) | ++ ((phy_addr << MII_CMD_DATA_PA_SHIFT) & MII_CMD_DATA_PA_MASK) | ++ ((reg_off << MII_CMD_DATA_RA_SHIFT) & MII_CMD_DATA_RA_MASK) | ++ ((2 << MII_CMD_DATA_TA_SHIFT) & MII_CMD_DATA_TA_MASK)); ++ W_REG(MII_CMD_DATA, ctrl); ++ MII_DBG(("MII READ: write(0x%x)=0x%x\n",MII_CMD_DATA, ctrl)); ++ ++ for (i = 0; i < MII_TRIES; i++) { ++ ctrl = R_REG(MII_MGMT); ++ if (!(ctrl & MII_MGMT_BSY_MASK)) { ++ break; ++ } ++ udelay(MII_POLL_USEC); ++ } ++ if (i >= MII_TRIES) { ++ MII_ERR(("\n%s: BUSY stuck: ctrl=0x%x, count=%d\n", __FUNCTION__, ctrl, i)); ++ spin_unlock_irqrestore(&mdio->lock, flags); ++ return -1; ++ } ++ ++ ctrl = R_REG(MII_CMD_DATA); ++ ++ MII_DBG(("MDIO READ: addr=%x off=%x value=%x\n", phy_addr, reg_off, ctrl)); ++ ++ spin_unlock_irqrestore(&mdio->lock, flags); ++ ++ *data = (ctrl & 0xffff); ++ return 0; ++} ++ ++/* Function : ccb_mii_write ++ * - Write operation. ++ * Return : ++ * Note : ++ */ ++int ++ccb_mii_write(int dev_type, int phy_addr, int reg_off, uint16_t data) ++{ ++ int i; ++ uint32_t ctrl = 0; ++ unsigned long flags; ++ mdio_info_t *mdio = NULL; ++ ++ MII_DBG(("MDIO WRITE: addr=%x off=%x\n", phy_addr, reg_off)); ++ ++ MII_EN_CHK; ++ ++ mdio = mdio_devices.mdio; ++ ++ spin_lock_irqsave(&mdio->lock, flags); ++ ++ ctrl = R_REG(MII_MGMT); ++ if (dev_type == MII_DEV_LOCAL) { ++ ctrl &= ~MII_MGMT_EXP_MASK; ++ } else { ++ ctrl |= MII_MGMT_EXP_MASK; ++ } ++ W_REG(MII_MGMT, ctrl); ++ MII_DBG(("MII WRITE: write(0x%x)=0x%x\n",MII_MGMT, ctrl)); ++ ++ for (i = 0; i < MII_TRIES; i++) { ++ ctrl = R_REG(MII_MGMT); ++ if (!(ctrl & MII_MGMT_BSY_MASK)) { ++ break; ++ } ++ udelay(MII_POLL_USEC); ++ } ++ if (i >= MII_TRIES) { ++ MII_ERR(("\n%s: BUSY stuck: ctrl=0x%x, count=%d\n", __FUNCTION__, ctrl, i)); ++ spin_unlock_irqrestore(&mdio->lock, flags); ++ return -1; ++ } ++ ++ ctrl = (((1 << MII_CMD_DATA_SB_SHIFT) & MII_CMD_DATA_SB_MASK) | ++ ((1 << MII_CMD_DATA_OP_SHIFT) & MII_CMD_DATA_OP_MASK) | ++ ((phy_addr << MII_CMD_DATA_PA_SHIFT) & MII_CMD_DATA_PA_MASK) | ++ ((reg_off << MII_CMD_DATA_RA_SHIFT) & MII_CMD_DATA_RA_MASK) | ++ ((2 << MII_CMD_DATA_TA_SHIFT) & MII_CMD_DATA_TA_MASK) | ++ ((data << MII_CMD_DATA_DATA_SHIFT) & MII_CMD_DATA_DATA_MASK)); ++ W_REG(MII_CMD_DATA, ctrl); ++ MII_DBG(("MII WRITE: write(0x%x)=0x%x\n",MII_CMD_DATA, ctrl)); ++ ++ ++ for (i = 0; i < MII_TRIES; i++) { ++ ctrl = R_REG(MII_MGMT); ++ if (!(ctrl & MII_MGMT_BSY_MASK)) { ++ break; ++ } ++ udelay(MII_POLL_USEC); ++ } ++ if (i >= MII_TRIES) { ++ MII_ERR(("\n%s: BUSY stuck: ctrl=0x%x, count=%d\n", __FUNCTION__, ctrl, i)); ++ spin_unlock_irqrestore(&mdio->lock, flags); ++ return -1; ++ } ++ ++ spin_unlock_irqrestore(&mdio->lock, flags); ++ ++ return MII_ERR_NONE; ++} ++ ++/* Function : ccb_mii_freq_set ++ * - Set MII management interface frequency. ++ * Return : ++ * Note : ++ * ++ */ ++int ++ccb_mii_freq_set(int speed_khz) ++{ ++ int rv = MII_ERR_NONE; ++ uint32_t divider = 0; ++ uint32_t mgmt = 0; ++ ++ MII_DBG(("MDIO FREQ SET: %d KHz\n", speed_khz)); ++ ++ /* host clock 66MHz device value the MDCDIV field */ ++ /* resultant MDIO clock should not exceed 2.5MHz */ ++ ++ if (speed_khz > 2500) { ++ MII_ERR(("\n%s: Maximum MDIO frequency is 2.5MHz\n", __FUNCTION__)); ++ return MII_ERR_PARAM; ++ } ++ ++ divider = ccb_mdio_clk_rate / (1000*speed_khz); ++ divider = (divider & MII_MGMT_MDCDIV_MASK); ++ if (divider > 0x7f) { ++ /* make sure the minimum configurable frequency */ ++ divider = 0x7f; ++ } ++ mgmt = R_REG(MII_MGMT); ++ mgmt &= ~MII_MGMT_MDCDIV_MASK; ++ mgmt |= divider; ++ ++ W_REG(MII_MGMT, mgmt); ++ MII_DBG(("MII FREQ(%d KHz): write(0x%x)=0x%x\n",speed_khz, MII_MGMT, mgmt)); ++ ++ return rv; ++} ++ ++static int ++mdio_open(struct inode *inode, struct file *filp) ++{ ++ filp->private_data = mdio_devices.mdio; ++ return 0; ++} ++ ++static int ++mdio_release(struct inode *inode, struct file *filp) ++{ ++ return 0; ++} ++ ++static int mdio_message(mdio_info_t *mdio, ++ struct mdio_ioc_transfer *u_xfers, unsigned n_xfers, int op) ++{ ++ ++ uint8_t pa, ra; ++ uint16_t regval; ++ ++ pa = u_xfers->pa; ++ ra = u_xfers->ra; ++ ++ MII_DBG(("mdio_message: op = %d\n", op)); ++ ++ if (op == MDIO_IOC_OP_LOCAL_READ) { ++ iproc_mii_read(MII_DEV_LOCAL, pa, ra, ®val); ++ u_xfers->rx_buf = regval; ++ } ++ ++ if (op == MDIO_IOC_OP_LOCAL_WRITE) { ++ iproc_mii_write(MII_DEV_LOCAL, pa, ra, u_xfers->tx_buf); ++ } ++ ++ if (op == MDIO_IOC_OP_EXTERNAL_READ) { ++ iproc_mii_read(MII_DEV_EXT, pa, ra, ®val); ++ u_xfers->rx_buf = regval; ++ } ++ ++ if (op == MDIO_IOC_OP_EXTERNAL_WRITE) { ++ iproc_mii_write(MII_DEV_EXT, pa, ra, u_xfers->tx_buf); ++ } ++ return 0; ++} ++ ++static long ++mdio_ioctl(struct file *filp, ++ unsigned int cmd, unsigned long arg) ++{ ++ int err = 0; ++ int retval = 0; ++ int ioc_op = 0; ++ uint32_t tmp; ++ unsigned n_ioc; ++ struct mdio_ioc_transfer *ioc, *uf; ++ mdio_info_t *mdio; ++ ++ MII_DBG(("mdio_ioctl: cmd = %d\n", cmd)); ++ ++ /* Check type and command number */ ++ if (_IOC_TYPE(cmd) != MDIO_IOC_MAGIC){ ++ return -ENOTTY; ++ } ++ ++ /* Check access direction once here; don't repeat below. ++ * IOC_DIR is from the user perspective, while access_ok is ++ * from the kernel perspective; so they look reversed. ++ */ ++ if (_IOC_DIR(cmd) & _IOC_READ) { ++ err = !access_ok(VERIFY_WRITE, (void __user *)arg, _IOC_SIZE(cmd)); ++ } ++ if (err == 0 && _IOC_DIR(cmd) & _IOC_WRITE) { ++ err = !access_ok(VERIFY_READ, (void __user *)arg, _IOC_SIZE(cmd)); ++ } ++ if (err) { ++ return -EFAULT; ++ } ++ ++ mdio = (mdio_info_t *)filp->private_data; ++ ++ switch (cmd) { ++ case MDIO_IOC_EXTERNAL_R_REG: ++ ioc_op = MDIO_IOC_OP_EXTERNAL_READ; ++ break; ++ case MDIO_IOC_EXTERNAL_W_REG: ++ ioc_op = MDIO_IOC_OP_EXTERNAL_WRITE; ++ break; ++ case MDIO_IOC_LOCAL_R_REG: ++ ioc_op = MDIO_IOC_OP_LOCAL_READ; ++ break; ++ case MDIO_IOC_LOCAL_W_REG: ++ ioc_op = MDIO_IOC_OP_LOCAL_WRITE; ++ break; ++ } ++ ++ tmp = _IOC_SIZE(cmd); ++ if ((tmp % sizeof(struct mdio_ioc_transfer)) != 0) { ++ retval = -EINVAL; ++ return retval; ++ } ++ n_ioc = tmp / sizeof(struct mdio_ioc_transfer); ++ if (n_ioc == 0) { ++ return 0; ++ } ++ ++ /* copy into scratch area */ ++ ioc = kmalloc(tmp, GFP_KERNEL); ++ if (!ioc) { ++ retval = -ENOMEM; ++ return retval; ++ } ++ if (__copy_from_user(ioc, (void __user *)arg, tmp)) { ++ kfree(ioc); ++ retval = -EFAULT; ++ return retval; ++ } ++ /* translate to mdio_message, execute */ ++ retval = mdio_message(mdio, ioc, n_ioc, ioc_op); ++ ++ if ((ioc_op == MDIO_IOC_OP_EXTERNAL_READ) || (ioc_op == MDIO_IOC_OP_LOCAL_READ)) { ++ uf = (struct mdio_ioc_transfer *)arg; ++ if (__copy_to_user((u8 __user *)&uf->rx_buf, (uint8_t *)&ioc->rx_buf, 2)) { ++ kfree(ioc); ++ retval = -EFAULT; ++ return retval; ++ } ++ } ++ kfree(ioc); ++ ++ return 0; ++} ++ ++static const struct file_operations mdio_fops = { ++ .open = mdio_open, ++ .release = mdio_release, ++ .unlocked_ioctl = mdio_ioctl, ++ .owner = THIS_MODULE, ++}; ++ ++static int _mdio_handler_init(void) ++{ ++ mdio_info_t *mdio = NULL; ++ ++ mdio = kmalloc(sizeof(mdio_info_t), GFP_KERNEL); ++ if (mdio == NULL) { ++ MII_ERR(("mdio_init: out of memory\n")); ++ return -ENOMEM; ++ } ++ memset(mdio, 0, sizeof(mdio_info_t)); ++ ++ /* Initialize lock */ ++ spin_lock_init(&mdio->lock); ++ ++ mdio_devices.mdio = mdio; ++ mdio_devices.init = 1; ++ ++ return 0; ++} ++ ++ ++static int ccb_mdiobus_read(struct mii_bus *bus, int phy_id, int regnum) ++{ ++ struct ccb_mdiobus_private *bus_priv = bus->priv; ++ struct iproc_mdiobus_data *bus_data = &bus_priv->bus_data; ++ uint16_t data; ++ int dev_type = 0; ++ int err; ++ ++ if (IPROC_MDIOBUS_TYPE_INTERNAL == bus_data->phybus_type) { ++ dev_type = MII_DEV_LOCAL; ++ } else if (IPROC_MDIOBUS_TYPE_EXTERNAL == bus_data->phybus_type) { ++ dev_type = MII_DEV_EXT; ++ } else { ++ return -EINVAL; ++ } ++ err = ccb_mii_read(dev_type, phy_id, regnum, &data); ++ if (err < 0) { ++ return err; ++ } else { ++ return data; ++ } ++} ++ ++static int ccb_mdiobus_write(struct mii_bus *bus, int phy_id, ++ int regnum, u16 val) ++{ ++ struct ccb_mdiobus_private *bus_priv = bus->priv; ++ struct iproc_mdiobus_data *bus_data = &bus_priv->bus_data; ++ int dev_type = 0; ++ ++ if (IPROC_MDIOBUS_TYPE_INTERNAL == bus_data->phybus_type) { ++ dev_type = MII_DEV_LOCAL; ++ } else if (IPROC_MDIOBUS_TYPE_EXTERNAL == bus_data->phybus_type) { ++ dev_type = MII_DEV_EXT; ++ } else { ++ return -EINVAL; ++ } ++ ++ return ccb_mii_write(dev_type, phy_id, regnum, val); ++} ++ ++static void __maybe_unused ccb_mdiobus_test(struct mii_bus *mii_bus) ++{ ++ int i, nRet1, nRet2; ++ u16 data1 = 0, data2 = 0; ++ struct phy_device *phy_dev; ++ struct ccb_mdiobus_private *bus_priv = mii_bus->priv; ++ struct iproc_mdiobus_data *bus_data = &bus_priv->bus_data; ++ ++ dev_info(mii_bus->parent, "%s : %s phy bus num[%d], type[%d]\n", ++ __func__, mii_bus->id, bus_data->phybus_num, bus_data->phybus_type); ++ ++ /* Check if mdiobus_read works fine */ ++ for (i = 0; i < PHY_MAX_ADDR; i++) { ++ phy_dev = mii_bus->phy_map[i]; ++ if (phy_dev) { ++ dev_info(mii_bus->parent, "phy[%d] id=0x%08x, addr = %d\n", ++ i, phy_dev->phy_id, phy_dev->addr); ++ nRet1 = phy_read(phy_dev, 2); ++ nRet2 = phy_read(phy_dev, 3); ++ if ((nRet1 < 0) || (nRet2 < 0)) { ++ dev_info(mii_bus->parent, ++ "phy_read failed!, %s, nRet1 = %d, nRet2 = %d\n", ++ dev_name(&phy_dev->dev), nRet1, nRet2); ++ } else { ++ dev_info(mii_bus->parent, ++ "%s: reg2 = 0x%x, reg3 = 0x%x\n", ++ dev_name(&phy_dev->dev), nRet1, nRet2); ++ } ++ } ++ } ++ ++ /* Check if general interface function for mdiobus read works fine */ ++ for (i = 0; i < PHY_MAX_ADDR; i++) { ++ data1 = mii_bus->read(mii_bus, i, 2); ++ data2 = mii_bus->read(mii_bus, i, 3); ++ if ((data1 < 0) || (data2 < 0)) { ++ dev_info(mii_bus->parent, ++ "iproc_mdiobus_read failed!, %s phy bus num[%d], type[%d], phyaddr = %d, nRet1 = %d, nRet2 = %d\n", ++ mii_bus->id, bus_data->phybus_num, bus_data->phybus_type, i, data1, data2); ++ } else { ++ dev_info(mii_bus->parent, ++ "read %s phy bus num[%d] type[%d] phyaddr[%d], reg2 = 0x%x, reg3 = 0x%x\n", ++ mii_bus->id, bus_data->phybus_num, bus_data->phybus_type, i, data1, data2); ++ } ++ } ++} ++ ++static struct ccb_mdio_ctrl *ccb_mdio_res_alloc(void) ++{ ++ if (!ccb_mdio) { ++ ccb_mdio = kzalloc(sizeof(*ccb_mdio), GFP_KERNEL); ++ if (!ccb_mdio) { ++ return NULL; ++ } ++ ccb_mdio->ref_cnt = 1; ++ } else { ++ ccb_mdio->ref_cnt++; ++ } ++ ++ return ccb_mdio; ++} ++ ++static void ccb_mdio_res_free(struct ccb_mdio_ctrl *ctrl) ++{ ++ if (ctrl) { ++ ctrl->ref_cnt --; ++ if (ctrl->ref_cnt == 0) { ++ iounmap(ctrl->base); ++ kfree(ctrl); ++ ccb_mdio = NULL; ++ } ++ } ++} ++ ++void ++ccb_mii_init(struct ccb_mdio_ctrl *ccb_mii) ++{ ++ if (ccb_mii->ref_cnt == 1) { ++ /* Set preamble */ ++ W_REG(MII_MGMT, MII_MGMT_PRE_MASK); ++ ++ /* Set the MII default clock 1MHz */ ++ ccb_mii_freq_set(1000); /* KHZ */ ++ } ++} ++ ++#ifdef CONFIG_OF_MDIO ++static int __devinit ccb_mii_probe(struct platform_device *pdev) ++{ ++ int ret = -ENODEV; ++ struct device_node *dn = pdev->dev.of_node; ++ struct mii_bus *mii_bus; ++ struct ccb_mdiobus_private *bus_priv; ++ struct iproc_mdiobus_data *bus_data; ++ u32 mdio_bus_id; ++ const char *mdio_bus_type; ++ struct ccb_mdio_ctrl *ccb_ctrl; ++ struct clk *clk=NULL; ++ ++ if (!of_device_is_available(dn)) ++ return -ENODEV; ++ ++ ccb_ctrl = ccb_mdio_res_alloc(); ++ if (!ccb_ctrl) { ++ printk(KERN_ERR "ccb mdio res alloc failed\n"); ++ return -ENOMEM; ++ ++ } ++ ++ /* Get register base address */ ++ ccb_ctrl->base = (void *)of_iomap(dn, 0); ++ MII_DBG(("MDIO INIT: Base Addr %x\n", ccb_ctrl->base)); ++ ++ clk = of_clk_get (dn, 0); ++ if (clk) ++ ccb_mdio_clk_rate = clk_get_rate(clk)/2; /* used by ccb_mii_freq_set() */ ++ else { ++ printk("No CCB MDIO Clock available from DT, use default clock rate: 62.5MHz\n"); ++ ccb_mdio_clk_rate = 62500000; ++ } ++ ++ ccb_mii_init(ccb_ctrl); ++ ++ if (of_property_read_u32(dn, "#bus-id", &mdio_bus_id)) { ++ mdio_bus_id = 0; /* no property available, use default: 0 */ ++ } ++ if (of_property_read_string(dn, "bus-type", &mdio_bus_type)) { ++ mdio_bus_type = "internal"; /* no property available, use default: "internal" */ ++ } ++ iproc_mdiobus_data.phybus_num = (u8) mdio_bus_id; ++ if (!strcmp(mdio_bus_type, "internal")) ++ iproc_mdiobus_data.phybus_type = IPROC_MDIOBUS_TYPE_INTERNAL; ++ else ++ iproc_mdiobus_data.phybus_type = IPROC_MDIOBUS_TYPE_EXTERNAL; ++ /* Note: this applies to CCB/CCG MDIO, but not for CMICD MDIO */ ++ iproc_mdiobus_data.logbus_num = iproc_mdiobus_data.phybus_num; ++ iproc_mdiobus_data.logbus_type = iproc_mdiobus_data.phybus_type; ++ ++ bus_data = &iproc_mdiobus_data; ++ ++ mii_bus = mdiobus_alloc_size(sizeof(*bus_priv)); ++ if (!mii_bus) ++ return -ENOMEM; ++ ++ mii_bus->name = "iproc_ccb_mdiobus"; ++ snprintf(mii_bus->id, MII_BUS_ID_SIZE, IPROC_MDIO_ID_FMT, ++ bus_data->logbus_num, bus_data->logbus_type); ++ mii_bus->parent = &pdev->dev; ++ mii_bus->read = ccb_mdiobus_read; ++ mii_bus->write = ccb_mdiobus_write; ++ ++ bus_priv = mii_bus->priv; ++ memcpy(&bus_priv->bus_data, bus_data, sizeof(struct iproc_mdiobus_data)); ++ bus_priv->hw_ctrl = ccb_ctrl; ++ ++ ret = mdiobus_register(mii_bus); ++ if (ret) { ++ dev_err(&pdev->dev, "mdiobus_register failed\n"); ++ goto err_exit; ++ } ++ ++ platform_set_drvdata(pdev, mii_bus); ++ ++ /* ccb_mdiobus_test(mii_bus); */ ++ ++ return 0; ++ ++err_exit: ++ kfree(mii_bus); ++ return ret; ++} ++ ++int ccb_mii_remove(struct platform_device *pdev) ++{ ++ struct mii_bus *mii_bus = platform_get_drvdata(pdev); ++ struct ccb_mdiobus_private *bus_priv; ++ ++ if (mii_bus) { ++ bus_priv = mii_bus->priv; ++ ++ mdiobus_unregister(mii_bus); ++ if (bus_priv) { ++ ccb_mdio_res_free(bus_priv->hw_ctrl); ++ } ++ mdiobus_free(mii_bus); ++ } ++ ++ return 0; ++} ++ ++ ++static const struct of_device_id bcm_iproc_dt_ids[] = { ++ { .compatible = "brcm,iproc-ccb-mdio"}, ++ { } ++}; ++MODULE_DEVICE_TABLE(of, bcm_iproc_dt_ids); ++ ++static struct platform_driver iproc_ccb_mdiobus_driver = { ++ .probe = ccb_mii_probe, ++ .remove = ccb_mii_remove, ++ .driver = { ++ .name = DRIVER_NAME, ++ .owner = THIS_MODULE, ++ .of_match_table = of_match_ptr(bcm_iproc_dt_ids), ++ }, ++}; ++/*module_platform_driver(iproc_ccb_mdiobus_driver);*/ ++ ++#else /* CONFIG_OF_MDIO */ ++ ++static int __devinit ccb_mdiobus_probe(struct platform_device *pdev) ++{ ++ struct mii_bus *mii_bus; ++ struct ccb_mdiobus_private *bus_priv; ++ struct iproc_mdiobus_data *bus_data = pdev->dev.platform_data; ++ struct ccb_mdio_ctrl *ccb_ctrl; ++ int ret; ++ ++ /* Get register base address */ ++ ccb_ctrl = ccb_mdio_res_alloc(); ++ if (!ccb_ctrl) { ++ printk(KERN_ERR "ccb mdio res alloc failed\n"); ++ ret = -ENOMEM; ++ goto error_exit; ++ } ++ ++ ccb_mii_init(ccb_ctrl); ++ ++ mii_bus = mdiobus_alloc_size(sizeof(*bus_priv)); ++ if (!mii_bus) { ++ dev_err(&pdev->dev, "mdiobus alloc filed\n"); ++ ret = -ENOMEM; ++ goto error_free_ctrl; ++ } ++ ++ mii_bus->name = "iproc_ccb_mdiobus"; ++ snprintf(mii_bus->id, MII_BUS_ID_SIZE, IPROC_MDIO_ID_FMT, ++ bus_data->logbus_num, bus_data->logbus_type); ++ mii_bus->parent = &pdev->dev; ++ mii_bus->read = ccb_mdiobus_read; ++ mii_bus->write = ccb_mdiobus_write; ++ ++ bus_priv = mii_bus->priv; ++ memcpy(&bus_priv->bus_data, bus_data, sizeof(struct iproc_mdiobus_data)); ++ bus_priv->hw_ctrl = ccb_ctrl; ++ ++ ret = mdiobus_register(mii_bus); ++ if (ret) { ++ dev_err(&pdev->dev, "mdiobus_register failed\n"); ++ goto error_free_bus; ++ } ++ ++ platform_set_drvdata(pdev, mii_bus); ++ ++ return 0; ++ ++error_free_bus: ++ kfree(mii_bus); ++error_free_ctrl: ++ ccb_mdio_res_free(ccb_ctrl); ++error_exit: ++ return ret; ++} ++ ++static int __devexit ccb_mdiobus_remove(struct platform_device *pdev) ++{ ++ struct mii_bus *mii_bus = platform_get_drvdata(pdev); ++ struct ccb_mdiobus_private *bus_priv; ++ ++ if (mii_bus) { ++ bus_priv = mii_bus->priv; ++ ++ mdiobus_unregister(mii_bus); ++ if (bus_priv) { ++ ccb_mdio_res_free(bus_priv->hw_ctrl); ++ } ++ mdiobus_free(mii_bus); ++ } ++ ++ return 0; ++} ++ ++static struct platform_driver iproc_ccb_mdiobus_driver = ++{ ++ .driver = { ++ .name = "iproc_ccb_mdio", ++ .owner = THIS_MODULE, ++ }, ++ .probe = ccb_mdiobus_probe, ++ .remove = ccb_mdiobus_remove, ++}; ++#endif /* CONFIG_OF_MDIO */ ++ ++int ++ccb_mdio_init(void) ++{ ++ int ret = -ENODEV; ++ dev_t mdio_dev; ++ mdio_info_t *mdio = NULL; ++ ++ ret = _mdio_handler_init(); ++ if(ret != 0) { ++ ret = -ENOMEM; ++ goto error_exit; ++ } ++ ++ mdio = mdio_devices.mdio; ++ ++ if (mdio_major) { ++ mdio_dev = MKDEV(mdio_major, 0); ++ ret = register_chrdev_region(mdio_dev, 1, "mdio"); ++ } else { ++ ret = alloc_chrdev_region(&mdio_dev, 0, 1, "mdio"); ++ mdio_major = MAJOR(mdio_dev); ++ } ++ if (ret) { ++ goto error_exit; ++ } ++ ++ cdev_init(&mdio_cdev, &mdio_fops); ++ ret = cdev_add(&mdio_cdev, mdio_dev, 1); ++ if (ret) { ++ printk(KERN_ERR "Fail to add mdio char dev!\n"); ++ goto error_region; ++ } ++ ++ platform_driver_register(&iproc_ccb_mdiobus_driver); ++ ++ return 0; ++ ++error_region: ++ unregister_chrdev_region(mdio_dev, 1); ++error_exit: ++ kfree(mdio); ++ return ret; ++} ++ ++void ++ccb_mdio_exit(void) ++{ ++ mdio_info_t *mdio = NULL; ++ ++ mdio = mdio_devices.mdio; ++ kfree(mdio); ++ ++ mdio_devices.mdio = NULL; ++ mdio_devices.init = 0; ++ unregister_chrdev_region(MKDEV(mdio_major, 0), 1); ++ ++ platform_driver_unregister(&iproc_ccb_mdiobus_driver); ++} ++ ++ ++//module_init(ccb_mdio_init); ++subsys_initcall(ccb_mdio_init); ++module_exit(ccb_mdio_exit); ++ ++ ++EXPORT_SYMBOL(ccb_mii_init); ++EXPORT_SYMBOL(ccb_mii_freq_set); ++EXPORT_SYMBOL(ccb_mii_read); ++EXPORT_SYMBOL(ccb_mii_write); ++ ++MODULE_AUTHOR("Broadcom"); ++MODULE_DESCRIPTION("BCM5301X MDIO Device Driver"); ++MODULE_LICENSE("GPL"); ++ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/mdio/ccg_mdio.c b/drivers/net/ethernet/broadcom/mdio/ccg_mdio.c +--- a/drivers/net/ethernet/broadcom/mdio/ccg_mdio.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/mdio/ccg_mdio.c 2017-11-09 17:53:44.080295000 +0800 +@@ -0,0 +1,488 @@ ++/* ++ * $Copyright Open Broadcom Corporation$ ++ */ ++ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "iproc_mdio.h" ++ ++#include ++#include ++#include ++#include ++ ++static struct iproc_mdiobus_data iproc_mdiobus_data; ++/*#define CCG_MDIO_BASE_ADDRESS IPROC_MII_MGMT_CTL*/ ++ ++#define MGMT_CTL_REG 0x000 ++#define MGMT_CTL__BYP_SHIFT 10 ++#define MGMT_CTL__BYP_WIDTH 1 ++#define MGMT_CTL__BYP_MASK ((1 << MGMT_CTL__BYP_WIDTH) - 1) ++#define MGMT_CTL__EXT_SHIFT 9 ++#define MGMT_CTL__EXT_WIDTH 1 ++#define MGMT_CTL__EXT_MASK ((1 << MGMT_CTL__EXT_WIDTH) - 1) ++#define MGMT_CTL__BSY_SHIFT 8 ++#define MGMT_CTL__BSY_WIDTH 1 ++#define MGMT_CTL__BSY_MASK ((1 << MGMT_CTL__BSY_WIDTH) - 1) ++#define MGMT_CTL__PRE_SHIFT 7 ++#define MGMT_CTL__PRE_WIDTH 1 ++#define MGMT_CTL__PRE_MASK ((1 << MGMT_CTL__BSY_WIDTH) - 1) ++#define MGMT_CTL__MDCDIV_SHIFT 0 ++#define MGMT_CTL__MDCDIV_WIDTH 7 ++#define MGMT_CTL__MDCDIV_MASK ((1 << MGMT_CTL__MDCDIV_WIDTH) - 1) ++ ++#define MGMT_CMD_DATA_REG 0x004 ++#define MGMT_CMD_DATA__SB_SHIFT 30 ++#define MGMT_CMD_DATA__SB_WIDTH 2 ++#define MGMT_CMD_DATA__SB_MASK ((1 << MGMT_CMD_DATA__SB_WIDTH) - 1) ++#define MGMT_CMD_DATA__OP_SHIFT 28 ++#define MGMT_CMD_DATA__OP_WIDTH 2 ++#define MGMT_CMD_DATA__OP_MASK ((1 << MGMT_CMD_DATA__OP_WIDTH) - 1) ++#define MGMT_CMD_DATA__PA_SHIFT 23 ++#define MGMT_CMD_DATA__PA_WIDTH 5 ++#define MGMT_CMD_DATA__PA_MASK ((1 << MGMT_CMD_DATA__PA_WIDTH) - 1) ++#define MGMT_CMD_DATA__RA_SHIFT 18 ++#define MGMT_CMD_DATA__RA_WIDTH 5 ++#define MGMT_CMD_DATA__RA_MASK ((1 << MGMT_CMD_DATA__RA_WIDTH) - 1) ++#define MGMT_CMD_DATA__TA_SHIFT 16 ++#define MGMT_CMD_DATA__TA_WIDTH 2 ++#define MGMT_CMD_DATA__TA_MASK ((1 << MGMT_CMD_DATA__TA_WIDTH) - 1) ++#define MGMT_CMD_DATA__DATA_SHIFT 0 ++#define MGMT_CMD_DATA__DATA_WIDTH 16 ++#define MGMT_CMD_DATA__DATA_MASK ((1 << MGMT_CMD_DATA__DATA_WIDTH) - 1) ++ ++ ++#define SET_REG_FIELD(reg_value, fshift, fmask, fvalue) \ ++ (reg_value) = ((reg_value) & ~((fmask) << (fshift))) | \ ++ (((fvalue) & (fmask)) << (fshift)) ++#define ISET_REG_FIELD(reg_value, fshift, fmask, fvalue) \ ++ (reg_value) = (reg_value) | (((fvalue) & (fmask)) << (fshift)) ++#define GET_REG_FIELD(reg_value, fshift, fmask) \ ++ (((reg_value) & ((fmask) << (fshift))) >> (fshift)) ++ ++#define MII_OP_MAX_HALT_USEC 500 ++#define MII_OP_HALT_USEC 10 ++ ++enum { ++ MII_OP_MODE_READ, ++ MII_OP_MODE_WRITE, ++ MII_OP_MODE_MAX ++}; ++ ++/** ++ * struct cmicd_mdio: cmicd mdio structure ++ * @resource: resource of cmicd cmc2 ++ * @base: base address of cmicd cmc2 ++ * @lock: spin lock protecting io access ++ */ ++struct ccg_mdio_ctrl { ++ void __iomem *base; ++ /* Use spinlock to co-operate that the caller might be in interrupt context */ ++ /* struct mutex lock; */ ++ spinlock_t lock; ++ int ref_cnt; ++}; ++ ++struct ccg_mdiobus_private { ++ /* iproc_mdiobus_data field have to be placed at the beginning of ++ * mdiobus private data */ ++ struct iproc_mdiobus_data bus_data; ++ struct ccg_mdio_ctrl *hw_ctrl; ++}; ++ ++struct ccg_mii_cmd { ++ int bus_id; ++ int ext_sel; ++ int phy_id; ++ int regnum; ++ u16 op_mode; ++ u16 val; ++}; ++ ++static struct ccg_mdio_ctrl *ccg_mdio = NULL; ++static uint32_t ccg_mdio_clk_rate; ++ ++ ++static void __maybe_unused ccg_mdiobus_test(struct mii_bus *mii_bus) ++{ ++ int i; ++ u16 data1 = 0, data2 = 0; ++ struct phy_device *phy_dev; ++ struct ccg_mdiobus_private *bus_priv = mii_bus->priv; ++ struct iproc_mdiobus_data *bus_data = &bus_priv->bus_data; ++ ++ dev_info(mii_bus->parent, "%s : %s phy bus num[%d], type[%d]\n", ++ __func__, mii_bus->id, bus_data->phybus_num, bus_data->phybus_type); ++ ++ /* Check if mdiobus_read works fine */ ++ for (i = 0; i < PHY_MAX_ADDR; i++) { ++ phy_dev = mii_bus->phy_map[i]; ++ if (phy_dev) ++ dev_info(mii_bus->parent, "phy[%d] id=0x%08x, addr = %d\n", ++ i, phy_dev->phy_id, phy_dev->addr); ++ } ++ ++ /* Check if general interface function for mdiobus read works fine */ ++ for (i = 0; i < PHY_MAX_ADDR; i++) { ++ data1 = mii_bus->read(mii_bus, i, 2); ++ data2 = mii_bus->read(mii_bus, i, 3); ++ if ((data1 < 0) || (data2 < 0)) { ++ dev_info(mii_bus->parent, ++ "iproc_mdiobus_read failed!, %s phy bus num[%d], type[%d], phyaddr = %d, nRet1 = %d, nRet2 = %d\n", ++ mii_bus->id, bus_data->phybus_num, bus_data->phybus_type, i, data1, data2); ++ } else { ++ dev_info(mii_bus->parent, ++ "read %s phy bus num[%d] type[%d] phyaddr[%d], reg2 = 0x%x, reg3 = 0x%x\n", ++ mii_bus->id, bus_data->phybus_num, bus_data->phybus_type, i, data1, data2); ++ } ++ } ++ ++} ++ ++static inline u32 ccg_mii_reg_read(struct ccg_mdio_ctrl *ccg_mii, u32 reg) ++{ ++ return readl(ccg_mii->base + reg); ++} ++ ++static inline void ccg_mii_reg_write(struct ccg_mdio_ctrl *ccg_mii, u32 reg, u32 data) ++{ ++ writel(data, ccg_mii->base + reg); ++} ++ ++static inline int ccg_mii_busy(struct ccg_mdio_ctrl *ccg_mii, int to_usec) ++{ ++ do { ++ if(!GET_REG_FIELD(ccg_mii_reg_read(ccg_mii, MGMT_CTL_REG), ++ MGMT_CTL__BSY_SHIFT, MGMT_CTL__BSY_MASK)) ++ return 0; ++ udelay(MII_OP_HALT_USEC); ++ to_usec -= MII_OP_HALT_USEC; ++ } while (to_usec > 0); ++ ++ return 1; ++} ++ ++static int do_ccg_mii_op(struct ccg_mdio_ctrl *ccg_mii, struct ccg_mii_cmd *cmd) ++{ ++ u32 cmd_data = 0, mgt_ctrl; ++ unsigned long flags; ++ int ret = 0; ++ ++ if (MII_OP_MODE_WRITE == cmd->op_mode) { ++ ISET_REG_FIELD(cmd_data, MGMT_CMD_DATA__OP_SHIFT, ++ MGMT_CMD_DATA__OP_MASK, 1); ++ ISET_REG_FIELD(cmd_data, MGMT_CMD_DATA__DATA_SHIFT, ++ MGMT_CMD_DATA__DATA_MASK, cmd->val); ++ } ++ else if (MII_OP_MODE_READ == cmd->op_mode) { ++ ISET_REG_FIELD(cmd_data, MGMT_CMD_DATA__OP_SHIFT, ++ MGMT_CMD_DATA__OP_MASK, 2); ++ } ++ else { ++ printk(KERN_ERR "%s : invald operation %d\n", __func__, cmd->op_mode); ++ return -EINVAL; ++ } ++ ++ ISET_REG_FIELD(cmd_data, MGMT_CMD_DATA__PA_SHIFT, ++ MGMT_CMD_DATA__PA_MASK, cmd->phy_id); ++ ISET_REG_FIELD(cmd_data, MGMT_CMD_DATA__RA_SHIFT, ++ MGMT_CMD_DATA__RA_MASK, cmd->regnum); ++ ISET_REG_FIELD(cmd_data, MGMT_CMD_DATA__TA_SHIFT, ++ MGMT_CMD_DATA__TA_MASK, 2); ++ ISET_REG_FIELD(cmd_data, MGMT_CMD_DATA__SB_SHIFT, ++ MGMT_CMD_DATA__SB_MASK, 1); ++ ++ /* mutex_lock(&ccg_mii->lock); */ ++ spin_lock_irqsave(&ccg_mii->lock, flags); ++ ++ if (ccg_mii_busy(ccg_mii, MII_OP_MAX_HALT_USEC)) { ++ ret = -EBUSY; ++ printk(KERN_ERR "%s : bus busy (1)\n", __func__); ++ goto err_exit_unlock; ++ } ++ ++ mgt_ctrl = ccg_mii_reg_read(ccg_mii, MGMT_CTL_REG); ++ if (cmd->ext_sel != GET_REG_FIELD(mgt_ctrl, MGMT_CTL__EXT_SHIFT, ++ MGMT_CTL__EXT_MASK)) { ++ SET_REG_FIELD(mgt_ctrl, MGMT_CTL__EXT_SHIFT, MGMT_CTL__EXT_MASK, cmd->ext_sel); ++ ccg_mii_reg_write(ccg_mii, MGMT_CTL_REG, mgt_ctrl); ++ } ++ ++ ccg_mii_reg_write(ccg_mii, MGMT_CMD_DATA_REG, cmd_data); ++ ++ if (ccg_mii_busy(ccg_mii, MII_OP_MAX_HALT_USEC)) { ++ ret = -EBUSY; ++ printk(KERN_ERR "%s : bus busy (2)\n", __func__); ++ goto err_exit_unlock; ++ } ++ ++ if (MII_OP_MODE_READ == cmd->op_mode) { ++ ret = GET_REG_FIELD(ccg_mii_reg_read(ccg_mii, MGMT_CMD_DATA_REG), ++ MGMT_CMD_DATA__DATA_SHIFT, MGMT_CMD_DATA__DATA_MASK); ++ } ++ ++ /* mutex_unlock(&ccg_mii->lock); */ ++ spin_unlock_irqrestore(&ccg_mii->lock, flags); ++ ++ return ret; ++ ++err_exit_unlock: ++ /* mutex_unlock(&ccg_mii->lock); */ ++ spin_unlock_irqrestore(&ccg_mii->lock, flags); ++ return ret; ++} ++ ++static int ccg_mdiobus_read(struct mii_bus *bus, int phy_id, int regnum) ++{ ++ struct ccg_mdiobus_private *bus_priv = bus->priv; ++ struct iproc_mdiobus_data *bus_data = &bus_priv->bus_data; ++ struct ccg_mii_cmd cmd = {0}; ++ ++ cmd.bus_id = bus_data->phybus_num; ++ if (IPROC_MDIOBUS_TYPE_EXTERNAL == bus_data->phybus_type) ++ cmd.ext_sel = 1; ++ cmd.phy_id = phy_id; ++ cmd.regnum = regnum; ++ cmd.op_mode = MII_OP_MODE_READ; ++ ++ return do_ccg_mii_op(bus_priv->hw_ctrl, &cmd); ++} ++ ++static int ccg_mdiobus_write(struct mii_bus *bus, int phy_id, ++ int regnum, u16 val) ++{ ++ struct ccg_mdiobus_private *bus_priv = bus->priv; ++ struct iproc_mdiobus_data *bus_data = &bus_priv->bus_data; ++ struct ccg_mii_cmd cmd = {0}; ++ ++ cmd.bus_id = bus_data->phybus_num; ++ if (IPROC_MDIOBUS_TYPE_EXTERNAL == bus_data->phybus_type) ++ cmd.ext_sel = 1; ++ cmd.phy_id = phy_id; ++ cmd.regnum = regnum; ++ cmd.op_mode = MII_OP_MODE_WRITE; ++ cmd.val = val; ++ ++ return do_ccg_mii_op(bus_priv->hw_ctrl, &cmd); ++} ++ ++static struct ccg_mdio_ctrl * ccg_mdio_res_alloc(void) ++{ ++ if (!ccg_mdio) { ++ ccg_mdio = kzalloc(sizeof(*ccg_mdio), GFP_KERNEL); ++ if (!ccg_mdio) ++ return NULL; ++ ++ /* mutex_init(&ccg_mdio->lock); */ ++ spin_lock_init(&ccg_mdio->lock); ++ ccg_mdio->ref_cnt = 1; ++ } ++ else ++ ccg_mdio->ref_cnt ++; ++ ++ return ccg_mdio; ++} ++ ++static void ccg_mdio_res_free(struct ccg_mdio_ctrl *ctrl) ++{ ++ if (ctrl) { ++ ctrl->ref_cnt --; ++ if (ctrl->ref_cnt == 0) { ++ iounmap(ctrl->base); ++ kfree(ctrl); ++ ccg_mdio = NULL; ++ } ++ } ++} ++ ++static void ccg_mii_init(struct ccg_mdio_ctrl *ccg_mii) ++{ ++ u32 clk_rate, val = 0; ++ ++ if(ccg_mii->ref_cnt == 1) { ++ /* Set preamble enabled */ ++ ISET_REG_FIELD(val, MGMT_CTL__PRE_SHIFT, MGMT_CTL__PRE_MASK, 1); ++ ++ clk_rate = ccg_mdio_clk_rate; ++ ++ /* ++ * MII Mgt Clock (MDC) Divisor. 0x0: Disable output of the MDC ++ * Non-zero: Output the MDC with a frequency that is ++ * PCLK/(2* the value of this field). ++ */ ++ ISET_REG_FIELD(val, MGMT_CTL__MDCDIV_SHIFT, MGMT_CTL__MDCDIV_MASK, ++ clk_rate/(1000000)); /* Set the MII default clock to 1MHz: */ ++ ++ ccg_mii_reg_write(ccg_mii, MGMT_CTL_REG, val); ++ } ++} ++ ++static int ccg_mdiobus_probe(struct platform_device *pdev) ++{ ++ struct mii_bus *mii_bus; ++ struct device_node *dn = pdev->dev.of_node; ++ struct ccg_mdiobus_private *bus_priv; ++ struct iproc_mdiobus_data *bus_data; ++ struct ccg_mdio_ctrl *ccg_ctrl; ++ u32 mdio_bus_id; ++ const char *mdio_bus_type; ++ struct clk *clk=NULL; ++ int ret; ++ ++ if (!of_device_is_available(dn)) ++ return -ENODEV; ++ ++ ccg_ctrl = ccg_mdio_res_alloc(); ++ if (!ccg_ctrl) { ++ dev_err(&pdev->dev, "ccg mdio res alloc failed\n"); ++ ret = -ENOMEM; ++ goto err_exit; ++ } ++ ++ /* Get register base address */ ++ ccg_ctrl->base = (void *)of_iomap(dn, 0); ++ ++ clk = of_clk_get (dn, 0); ++ if (clk) ++ ccg_mdio_clk_rate = clk_get_rate(clk)/2; /* used by ccg_mii_init */ ++ else { ++ printk("No CCG MDIO Clock available from DT, use default clock rate: 50MHz\n"); ++ ccg_mdio_clk_rate = 50000000; ++ } ++ ++ ccg_mii_init(ccg_ctrl); ++ ++ if (of_property_read_u32(dn, "#bus-id", &mdio_bus_id)) ++ mdio_bus_id = 0; /* default: 0 */ ++ ++ if (of_property_read_string(dn, "bus-type", &mdio_bus_type)) ++ mdio_bus_type = "internal"; /* default: "internal" */ ++ ++ iproc_mdiobus_data.phybus_num = (u8) mdio_bus_id; ++ if (!strcmp(mdio_bus_type, "internal")) ++ iproc_mdiobus_data.phybus_type = IPROC_MDIOBUS_TYPE_INTERNAL; ++ else ++ iproc_mdiobus_data.phybus_type = IPROC_MDIOBUS_TYPE_EXTERNAL; ++ ++ /* Note: this applies to CCB/CCG MDIO, but not for CMICD MDIO */ ++ iproc_mdiobus_data.logbus_num = iproc_mdiobus_data.phybus_num; ++ iproc_mdiobus_data.logbus_type = iproc_mdiobus_data.phybus_type; ++ ++ bus_data = &iproc_mdiobus_data; ++ ++ mii_bus = mdiobus_alloc_size(sizeof(*bus_priv)); ++ if (!mii_bus) { ++ dev_err(&pdev->dev, "mdiobus alloc filed\n"); ++ ret = -ENOMEM; ++ goto err_free_ctrl; ++ } ++ ++ mii_bus->name = "iproc_ccg_mdiobus"; ++ snprintf(mii_bus->id, MII_BUS_ID_SIZE, IPROC_MDIO_ID_FMT, ++ bus_data->logbus_num, bus_data->logbus_type); ++ mii_bus->parent = &pdev->dev; ++ mii_bus->read = ccg_mdiobus_read; ++ mii_bus->write = ccg_mdiobus_write; ++ ++ bus_priv = mii_bus->priv; ++ memcpy(&bus_priv->bus_data, bus_data, sizeof(struct iproc_mdiobus_data)); ++ bus_priv->hw_ctrl = ccg_ctrl; ++ ++ if (IS_ENABLED(CONFIG_MACH_GH2) || IS_ENABLED(CONFIG_MACH_WH2)) ++ ret = of_mdiobus_register(mii_bus, dn); ++ else ++ ret = mdiobus_register(mii_bus); ++ ++ if (ret) { ++ dev_err(&pdev->dev, "mdiobus_register failed\n"); ++ goto err_free_bus; ++ } ++ ++ platform_set_drvdata(pdev, mii_bus); ++ ++#if 0 ++ ccg_mdiobus_test(mii_bus); ++#endif ++ ++ return 0; ++ ++err_free_bus: ++ kfree(mii_bus); ++err_free_ctrl: ++ ccg_mdio_res_free(ccg_ctrl); ++err_exit: ++ return ret; ++} ++ ++static int ccg_mdiobus_remove(struct platform_device *pdev) ++{ ++ struct mii_bus *mii_bus = platform_get_drvdata(pdev); ++ struct ccg_mdiobus_private *bus_priv; ++ ++ if (mii_bus) { ++ bus_priv = mii_bus->priv; ++ ++ mdiobus_unregister(mii_bus); ++ if (bus_priv) ++ ccg_mdio_res_free(bus_priv->hw_ctrl); ++ mdiobus_free(mii_bus); ++ } ++ ++ return 0; ++} ++ ++static const struct of_device_id bcm_iproc_dt_ids[] = { ++ { .compatible = "brcm,iproc-ccg-mdio"}, ++ { } ++}; ++MODULE_DEVICE_TABLE(of, bcm_iproc_dt_ids); ++ ++ ++static struct platform_driver iproc_ccg_mdiobus_driver = ++{ ++ .driver = { ++ .name = "iproc_ccg_mdio", ++ .owner = THIS_MODULE, ++ .of_match_table = of_match_ptr(bcm_iproc_dt_ids), ++ }, ++ .probe = ccg_mdiobus_probe, ++ .remove = ccg_mdiobus_remove, ++}; ++ ++static int __init ccg_mdio_init(void) ++{ ++ return platform_driver_register(&iproc_ccg_mdiobus_driver); ++} ++ ++static void __exit ccg_mdio_exit(void) ++{ ++ platform_driver_unregister(&iproc_ccg_mdiobus_driver); ++} ++ ++//module_init(ccg_mdio_init); ++subsys_initcall(ccg_mdio_init); ++module_exit(ccg_mdio_exit); ++ ++MODULE_AUTHOR("Broadcom Corporation"); ++MODULE_DESCRIPTION("iProc CCG mdio driver"); ++MODULE_LICENSE("GPL"); ++ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/mdio/cmicd_mdio.c b/drivers/net/ethernet/broadcom/mdio/cmicd_mdio.c +--- a/drivers/net/ethernet/broadcom/mdio/cmicd_mdio.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/mdio/cmicd_mdio.c 2017-11-09 17:53:44.081297000 +0800 +@@ -0,0 +1,606 @@ ++/* ++ * $Copyright Open Broadcom Corporation$ ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "iproc_mdio.h" ++ ++#include ++#include ++#include ++#include ++ ++static struct iproc_mdiobus_data iproc_mdiobus_data; ++ ++/* CMICD MDIO */ ++#define CMIC_COMMON_MIIM_PARAM_OFFSET 0x080 ++#define CMIC_COMMON_MIIM_PARAM__MIIM_CYCLE_L 31 ++#define CMIC_COMMON_MIIM_PARAM__MIIM_CYCLE_R 29 ++#define CMIC_COMMON_MIIM_PARAM__MIIM_CYCLE_WIDTH 3 ++#define CMIC_COMMON_MIIM_PARAM__MIIM_CYCLE_RESETVALUE 0x0 ++#define CMIC_COMMON_MIIM_PARAM__INTERNAL_SEL 25 ++#define CMIC_COMMON_MIIM_PARAM__INTERNAL_SEL_WIDTH 1 ++#define CMIC_COMMON_MIIM_PARAM__INTERNAL_SEL_RESETVALUE 0x0 ++#define CMIC_COMMON_MIIM_PARAM__BUS_ID_L 24 ++#define CMIC_COMMON_MIIM_PARAM__BUS_ID_R 22 ++#define CMIC_COMMON_MIIM_PARAM__BUS_ID_WIDTH 3 ++#define CMIC_COMMON_MIIM_PARAM__BUS_ID_RESETVALUE 0x0 ++#define CMIC_COMMON_MIIM_PARAM__C45_SEL 21 ++#define CMIC_COMMON_MIIM_PARAM__C45_SEL_WIDTH 1 ++#define CMIC_COMMON_MIIM_PARAM__C45_SEL_RESETVALUE 0x0 ++#define CMIC_COMMON_MIIM_PARAM__PHY_ID_L 20 ++#define CMIC_COMMON_MIIM_PARAM__PHY_ID_R 16 ++#define CMIC_COMMON_MIIM_PARAM__PHY_ID_WIDTH 5 ++#define CMIC_COMMON_MIIM_PARAM__PHY_ID_RESETVALUE 0x0 ++#define CMIC_COMMON_MIIM_PARAM__PHY_DATA_L 15 ++#define CMIC_COMMON_MIIM_PARAM__PHY_DATA_R 0 ++#define CMIC_COMMON_MIIM_PARAM__PHY_DATA_WIDTH 16 ++#define CMIC_COMMON_MIIM_PARAM__PHY_DATA_RESETVALUE 0x0000 ++#define CMIC_COMMON_MIIM_PARAM__RESERVED_L 28 ++#define CMIC_COMMON_MIIM_PARAM__RESERVED_R 26 ++#define CMIC_COMMON_MIIM_PARAM_WIDTH 32 ++#define CMIC_COMMON_MIIM_PARAM__WIDTH 32 ++#define CMIC_COMMON_MIIM_PARAM_ALL_L 31 ++#define CMIC_COMMON_MIIM_PARAM_ALL_R 0 ++#define CMIC_COMMON_MIIM_PARAM__ALL_L 31 ++#define CMIC_COMMON_MIIM_PARAM__ALL_R 0 ++#define CMIC_COMMON_MIIM_PARAM_DATAMASK 0xe3ffffff ++#define CMIC_COMMON_MIIM_PARAM_RDWRMASK 0x1c000000 ++#define CMIC_COMMON_MIIM_PARAM_RESETVALUE 0x0 ++ ++#define CMIC_COMMON_MIIM_READ_DATA_OFFSET 0x084 ++#define CMIC_COMMON_MIIM_READ_DATA__DATA_L 15 ++#define CMIC_COMMON_MIIM_READ_DATA__DATA_R 0 ++#define CMIC_COMMON_MIIM_READ_DATA__DATA_WIDTH 16 ++#define CMIC_COMMON_MIIM_READ_DATA__DATA_RESETVALUE 0x0000 ++#define CMIC_COMMON_MIIM_READ_DATA__RESERVED_L 31 ++#define CMIC_COMMON_MIIM_READ_DATA__RESERVED_R 16 ++#define CMIC_COMMON_MIIM_READ_DATA_WIDTH 16 ++#define CMIC_COMMON_MIIM_READ_DATA__WIDTH 16 ++#define CMIC_COMMON_MIIM_READ_DATA_ALL_L 15 ++#define CMIC_COMMON_MIIM_READ_DATA_ALL_R 0 ++#define CMIC_COMMON_MIIM_READ_DATA__ALL_L 15 ++#define CMIC_COMMON_MIIM_READ_DATA__ALL_R 0 ++#define CMIC_COMMON_MIIM_READ_DATA_DATAMASK 0x0000ffff ++#define CMIC_COMMON_MIIM_READ_DATA_RDWRMASK 0xffff0000 ++#define CMIC_COMMON_MIIM_READ_DATA_RESETVALUE 0x0 ++ ++#define CMIC_COMMON_MIIM_ADDRESS_OFFSET 0x088 ++#define CMIC_COMMON_MIIM_ADDRESS__CLAUSE_45_DTYPE_L 20 ++#define CMIC_COMMON_MIIM_ADDRESS__CLAUSE_45_DTYPE_R 16 ++#define CMIC_COMMON_MIIM_ADDRESS__CLAUSE_45_DTYPE_WIDTH 5 ++#define CMIC_COMMON_MIIM_ADDRESS__CLAUSE_45_DTYPE_RESETVALUE 0x0 ++#define CMIC_COMMON_MIIM_ADDRESS__CLAUSE_45_REGADR_L 15 ++#define CMIC_COMMON_MIIM_ADDRESS__CLAUSE_45_REGADR_R 0 ++#define CMIC_COMMON_MIIM_ADDRESS__CLAUSE_45_REGADR_WIDTH 16 ++#define CMIC_COMMON_MIIM_ADDRESS__CLAUSE_45_REGADR_RESETVALUE 0x0000 ++#define CMIC_COMMON_MIIM_ADDRESS__CLAUSE_22_REGADR_L 4 ++#define CMIC_COMMON_MIIM_ADDRESS__CLAUSE_22_REGADR_R 0 ++#define CMIC_COMMON_MIIM_ADDRESS__CLAUSE_22_REGADR_WIDTH 5 ++#define CMIC_COMMON_MIIM_ADDRESS__CLAUSE_22_REGADR_RESETVALUE 0x0 ++#define CMIC_COMMON_MIIM_ADDRESS__RESERVED_L 31 ++#define CMIC_COMMON_MIIM_ADDRESS__RESERVED_R 21 ++#define CMIC_COMMON_MIIM_ADDRESS_WIDTH 21 ++#define CMIC_COMMON_MIIM_ADDRESS__WIDTH 21 ++#define CMIC_COMMON_MIIM_ADDRESS_ALL_L 20 ++#define CMIC_COMMON_MIIM_ADDRESS_ALL_R 0 ++#define CMIC_COMMON_MIIM_ADDRESS__ALL_L 20 ++#define CMIC_COMMON_MIIM_ADDRESS__ALL_R 0 ++#define CMIC_COMMON_MIIM_ADDRESS_DATAMASK 0x001fffff ++#define CMIC_COMMON_MIIM_ADDRESS_RDWRMASK 0xffe00000 ++#define CMIC_COMMON_MIIM_ADDRESS_RESETVALUE 0x0 ++ ++#define CMIC_COMMON_MIIM_CTRL_OFFSET 0x08c ++#define CMIC_COMMON_MIIM_CTRL__MIIM_RD_START 1 ++#define CMIC_COMMON_MIIM_CTRL__MIIM_RD_START_WIDTH 1 ++#define CMIC_COMMON_MIIM_CTRL__MIIM_RD_START_RESETVALUE 0x0 ++#define CMIC_COMMON_MIIM_CTRL__MIIM_WR_START 0 ++#define CMIC_COMMON_MIIM_CTRL__MIIM_WR_START_WIDTH 1 ++#define CMIC_COMMON_MIIM_CTRL__MIIM_WR_START_RESETVALUE 0x0 ++#define CMIC_COMMON_MIIM_CTRL__RESERVED_L 31 ++#define CMIC_COMMON_MIIM_CTRL__RESERVED_R 2 ++#define CMIC_COMMON_MIIM_CTRL_WIDTH 2 ++#define CMIC_COMMON_MIIM_CTRL__WIDTH 2 ++#define CMIC_COMMON_MIIM_CTRL_ALL_L 1 ++#define CMIC_COMMON_MIIM_CTRL_ALL_R 0 ++#define CMIC_COMMON_MIIM_CTRL__ALL_L 1 ++#define CMIC_COMMON_MIIM_CTRL__ALL_R 0 ++#define CMIC_COMMON_MIIM_CTRL_DATAMASK 0x00000003 ++#define CMIC_COMMON_MIIM_CTRL_RDWRMASK 0xfffffffc ++#define CMIC_COMMON_MIIM_CTRL_RESETVALUE 0x0 ++ ++#define CMIC_COMMON_MIIM_STAT_OFFSET 0x090 ++#define CMIC_COMMON_MIIM_STAT__MIIM_OPN_DONE 0 ++#define CMIC_COMMON_MIIM_STAT__MIIM_OPN_DONE_WIDTH 1 ++#define CMIC_COMMON_MIIM_STAT__MIIM_OPN_DONE_RESETVALUE 0x0 ++#define CMIC_COMMON_MIIM_STAT__RESERVED_L 31 ++#define CMIC_COMMON_MIIM_STAT__RESERVED_R 1 ++#define CMIC_COMMON_MIIM_STAT_WIDTH 1 ++#define CMIC_COMMON_MIIM_STAT__WIDTH 1 ++#define CMIC_COMMON_MIIM_STAT_ALL_L 0 ++#define CMIC_COMMON_MIIM_STAT_ALL_R 0 ++#define CMIC_COMMON_MIIM_STAT__ALL_L 0 ++#define CMIC_COMMON_MIIM_STAT__ALL_R 0 ++#define CMIC_COMMON_MIIM_STAT_DATAMASK 0x00000001 ++#define CMIC_COMMON_MIIM_STAT_RDWRMASK 0xfffffffe ++#define CMIC_COMMON_MIIM_STAT_RESETVALUE 0x0 ++ ++#define CMIC_COMMON_UC0_PIO_ENDIANESS 0x1F0 ++ ++#define MIIM_PARAM_REG CMIC_COMMON_MIIM_PARAM_OFFSET ++#define MIIM_PARAM__MIIM_CYCLE_SHIFT CMIC_COMMON_MIIM_PARAM__MIIM_CYCLE_R ++#define MIIM_PARAM__MIIM_CYCLE_MASK ((1 << CMIC_COMMON_MIIM_PARAM__MIIM_CYCLE_WIDTH) - 1) ++#define MIIM_PARAM__INTERNAL_SEL_SHIFT CMIC_COMMON_MIIM_PARAM__INTERNAL_SEL ++#define MIIM_PARAM__INTERNAL_SEL_MASK ((1 << CMIC_COMMON_MIIM_PARAM__INTERNAL_SEL_WIDTH) - 1) ++#define MIIM_PARAM__BUS_ID_SHIFT CMIC_COMMON_MIIM_PARAM__BUS_ID_R ++#define MIIM_PARAM__BUS_ID_MASK ((1 << CMIC_COMMON_MIIM_PARAM__BUS_ID_WIDTH) - 1) ++#define MIIM_PARAM__C45_SEL_SHIFT CMIC_COMMON_MIIM_PARAM__C45_SEL ++#define MIIM_PARAM__C45_SEL_MASK ((1 << CMIC_COMMON_MIIM_PARAM__C45_SEL_WIDTH) - 1) ++#define MIIM_PARAM__PHY_ID_SHIFT CMIC_COMMON_MIIM_PARAM__PHY_ID_R ++#define MIIM_PARAM__PHY_ID_MASK ((1 << CMIC_COMMON_MIIM_PARAM__PHY_ID_WIDTH) - 1) ++#define MIIM_PARAM__PHY_DATA_SHIFT CMIC_COMMON_MIIM_PARAM__PHY_DATA_R ++#define MIIM_PARAM__PHY_DATA_MASK ((1 << CMIC_COMMON_MIIM_PARAM__PHY_DATA_WIDTH) - 1) ++ ++#define MIIM_READ_DATA_REG CMIC_COMMON_MIIM_READ_DATA_OFFSET ++#define MIIM_READ_DATA__DATA_SHIFT CMIC_COMMON_MIIM_READ_DATA__DATA_R ++#define MIIM_READ_DATA__DATA_MASK ((1 << CMIC_COMMON_MIIM_READ_DATA__DATA_WIDTH) - 1) ++ ++#define MIIM_ADDRESS_REG CMIC_COMMON_MIIM_ADDRESS_OFFSET ++#define MIIM_ADDRESS__CLAUSE_45_DTYPE_SHIFT CMIC_COMMON_MIIM_ADDRESS__CLAUSE_45_DTYPE_R ++#define MIIM_ADDRESS__CLAUSE_45_DTYPE_MASK ((1 << CMIC_COMMON_MIIM_ADDRESS__CLAUSE_45_DTYPE_WIDTH) - 1) ++#define MIIM_ADDRESS__CLAUSE_45_REGADR_SHIFT CMIC_COMMON_MIIM_ADDRESS__CLAUSE_45_REGADR_R ++#define MIIM_ADDRESS__CLAUSE_45_REGADR_MASK ((1 << CMIC_COMMON_MIIM_ADDRESS__CLAUSE_45_REGADR_WIDTH) - 1) ++#define MIIM_ADDRESS__CLAUSE_22_REGADR_SHIFT CMIC_COMMON_MIIM_ADDRESS__CLAUSE_22_REGADR_R ++#define MIIM_ADDRESS__CLAUSE_22_REGADR_MASK ((1 << CMIC_COMMON_MIIM_ADDRESS__CLAUSE_22_REGADR_WIDTH) - 1) ++ ++#define MIIM_CTRL_REG CMIC_COMMON_MIIM_CTRL_OFFSET ++#define MIIM_CTRL__MIIM_RD_START_SHIFT CMIC_COMMON_MIIM_CTRL__MIIM_RD_START ++#define MIIM_CTRL__MIIM_RD_START_MASK ((1 << CMIC_COMMON_MIIM_CTRL__MIIM_RD_START_WIDTH) - 1) ++#define MIIM_CTRL__MIIM_WR_START_SHIFT CMIC_COMMON_MIIM_CTRL__MIIM_WR_START ++#define MIIM_CTRL__MIIM_WR_START_MASK ((1 << CMIC_COMMON_MIIM_CTRL__MIIM_WR_START_WIDTH) - 1) ++ ++#define MIIM_STAT_REG CMIC_COMMON_MIIM_STAT_OFFSET ++#define MIIM_STAT__MIIM_OPN_DONE_SHIFT CMIC_COMMON_MIIM_STAT__MIIM_OPN_DONE ++#define MIIM_STAT__MIIM_OPN_DONE_MASK ((1 << CMIC_COMMON_MIIM_STAT__MIIM_OPN_DONE_WIDTH) - 1) ++ ++#define SET_REG_FIELD(reg_value, fshift, fmask, fvalue) \ ++ (reg_value) = ((reg_value) & ~((fmask) << (fshift))) | \ ++ (((fvalue) & (fmask)) << (fshift)) ++#define ISET_REG_FIELD(reg_value, fshift, fmask, fvalue) \ ++ (reg_value) = (reg_value) | (((fvalue) & (fmask)) << (fshift)) ++#define GET_REG_FIELD(reg_value, fshift, fmask) \ ++ (((reg_value) & ((fmask) << (fshift))) >> (fshift)) ++ ++#define MIIM_OP_MAX_HALT_USEC 500 ++ ++enum { ++ MIIM_OP_MODE_READ, ++ MIIM_OP_MODE_WRITE, ++ MIIM_OP_MODE_MAX ++}; ++ ++/** ++ * struct cmicd_mdio: cmicd mdio structure ++ * @base: base address of cmic_common ++ * @lock: spin lock protecting io access ++ */ ++struct cmicd_mdio_ctrl { ++ void __iomem *base; ++ /* Use spinlock to co-operate that the caller might be in interrupt context */ ++ /* struct mutex lock; */ ++ spinlock_t lock; ++ int ref_cnt; ++}; ++ ++struct cmicd_mdiobus_private { ++ /* iproc_mdiobus_data field have to be placed at the beginning of ++ * mdiobus private data */ ++ struct iproc_mdiobus_data bus_data; ++ struct cmicd_mdio_ctrl *hw_ctrl; ++}; ++ ++struct cmicd_miim_cmd { ++ int bus_id; ++ int int_sel; ++ int phy_id; ++ int regnum; ++ int c45_sel; ++ u16 op_mode; ++ u16 val; ++}; ++ ++static struct cmicd_mdio_ctrl *cmic_common = NULL; ++ ++ ++static void __maybe_unused cmicd_mdiobus_test(struct mii_bus *mii_bus) ++{ ++ int i; ++ u16 data1 = 0, data2 = 0; ++ struct phy_device *phy_dev; ++ struct cmicd_mdiobus_private *bus_priv = mii_bus->priv; ++ struct iproc_mdiobus_data *bus_data = &bus_priv->bus_data; ++ ++ dev_info(mii_bus->parent, "%s : %s phy bus num[%d], type[%d]\n", ++ __func__, mii_bus->id, bus_data->phybus_num, bus_data->phybus_type); ++ ++ /* Check if mdiobus_read works fine */ ++ for (i = 0; i < PHY_MAX_ADDR; i++) { ++ phy_dev = mii_bus->phy_map[i]; ++ if (phy_dev) ++ dev_info(mii_bus->parent, "phy[%d] id=0x%08x, addr = %d\n", ++ i, phy_dev->phy_id, phy_dev->addr); ++ } ++ ++ /* Check if general interface function for mdiobus read works fine */ ++ for (i = 0; i < PHY_MAX_ADDR; i++) { ++ data1 = mii_bus->read(mii_bus, i, 2); ++ data2 = mii_bus->read(mii_bus, i, 3); ++ if ((data1 < 0) || (data2 < 0)) { ++ dev_info(mii_bus->parent, ++ "iproc_mdiobus_read failed!, %s phy bus num[%d], type[%d], phyaddr = %d, nRet1 = %d, nRet2 = %d\n", ++ mii_bus->id, bus_data->phybus_num, bus_data->phybus_type, i, data1, data2); ++ } else { ++ dev_info(mii_bus->parent, ++ "read %s phy bus num[%d] type[%d] phyaddr[%d], reg2 = 0x%x, reg3 = 0x%x\n", ++ mii_bus->id, bus_data->phybus_num, bus_data->phybus_type, i, data1, data2); ++ } ++ } ++} ++ ++static inline u32 cmicd_miim_reg_read(struct cmicd_mdio_ctrl *cmic_mdio, u32 reg) ++{ ++ u32 value = readl(cmic_mdio->base + reg); ++#ifdef __BIG_ENDIAN ++ if (readl(cmic_mdio->base + CMIC_COMMON_UC0_PIO_ENDIANESS) != 0) ++ { ++ /* CMICD is in big-endian mode */ ++ value = swab32(value); ++ } ++#endif ++ return value; ++} ++ ++static inline void cmicd_miim_reg_write(struct cmicd_mdio_ctrl *cmic_mdio, u32 reg, u32 data) ++{ ++#ifdef __BIG_ENDIAN ++ if (readl(cmic_mdio->base + CMIC_COMMON_UC0_PIO_ENDIANESS) != 0) ++ { ++ /* CMICD is in big-endian mode */ ++ writel(swab32(data), cmic_mdio->base + reg); ++ return; ++ } ++#endif ++ writel(data, cmic_mdio->base + reg); ++} ++ ++static inline void cmicd_miim_set_op_read(u32 *data, u32 set) ++{ ++ SET_REG_FIELD(*data, MIIM_CTRL__MIIM_RD_START_SHIFT, ++ MIIM_CTRL__MIIM_RD_START_MASK, set); ++} ++ ++static inline void cmicd_miim_set_op_write(u32 *data, u32 set) ++{ ++ SET_REG_FIELD(*data, MIIM_CTRL__MIIM_WR_START_SHIFT, ++ MIIM_CTRL__MIIM_WR_START_MASK, set); ++} ++ ++static inline int do_cmicd_miim_op(struct cmicd_mdio_ctrl *cmic_mdio, u32 op, u32 param, u32 addr) ++{ ++ u32 val, op_done; ++ unsigned long flags; ++ int ret = 0; ++ int usec = MIIM_OP_MAX_HALT_USEC; ++ ++ if (op >= MIIM_OP_MODE_MAX) { ++ printk(KERN_ERR "%s : invalid op code %d\n", __func__, op); ++ return -EINVAL; ++ } ++ ++ /* mutex_lock(&cmic_mdio->lock); */ ++ spin_lock_irqsave(&cmic_mdio->lock, flags); ++ ++ cmicd_miim_reg_write(cmic_mdio, MIIM_PARAM_REG, param); ++ cmicd_miim_reg_write(cmic_mdio, MIIM_ADDRESS_REG, addr); ++ val = cmicd_miim_reg_read(cmic_mdio, MIIM_CTRL_REG); ++ if(op == MIIM_OP_MODE_READ) ++ cmicd_miim_set_op_read(&val, 1); ++ else ++ cmicd_miim_set_op_write(&val, 1); ++ cmicd_miim_reg_write(cmic_mdio, MIIM_CTRL_REG, val); ++ ++ do { ++ op_done = GET_REG_FIELD(cmicd_miim_reg_read(cmic_mdio, MIIM_STAT_REG), ++ MIIM_STAT__MIIM_OPN_DONE_SHIFT, MIIM_STAT__MIIM_OPN_DONE_MASK); ++ if (op_done) ++ break; ++ ++ udelay(1); ++ usec--; ++ } while (usec > 0); ++ ++ if (op_done) { ++ if(op == MIIM_OP_MODE_READ) ++ ret = cmicd_miim_reg_read(cmic_mdio, MIIM_READ_DATA_REG); ++ } ++ else ++ ret = -ETIME; ++ ++ val = cmicd_miim_reg_read(cmic_mdio, MIIM_CTRL_REG); ++ if(op == MIIM_OP_MODE_READ) ++ cmicd_miim_set_op_read(&val, 0); ++ else ++ cmicd_miim_set_op_write(&val, 0); ++ cmicd_miim_reg_write(cmic_mdio, MIIM_CTRL_REG, val); ++ ++ /* mutex_unlock(&cmic_mdio->lock); */ ++ spin_unlock_irqrestore(&cmic_mdio->lock, flags); ++ ++ return ret; ++} ++ ++ ++static int cmicd_miim_op(struct cmicd_mdio_ctrl *cmic_mdio, struct cmicd_miim_cmd *cmd) ++{ ++ u32 miim_param =0, miim_addr = 0; ++ ++ ISET_REG_FIELD(miim_param, MIIM_PARAM__BUS_ID_SHIFT, ++ MIIM_PARAM__BUS_ID_MASK, cmd->bus_id); ++ ++ if (cmd->int_sel) ++ ISET_REG_FIELD(miim_param, MIIM_PARAM__INTERNAL_SEL_SHIFT, ++ MIIM_PARAM__INTERNAL_SEL_MASK, 1); ++ ++ ISET_REG_FIELD(miim_param, MIIM_PARAM__PHY_ID_SHIFT, ++ MIIM_PARAM__PHY_ID_MASK, cmd->phy_id); ++ ++ if (cmd->op_mode == MIIM_OP_MODE_WRITE) ++ ISET_REG_FIELD(miim_param, MIIM_PARAM__PHY_DATA_SHIFT, ++ MIIM_PARAM__PHY_DATA_MASK, cmd->val); ++ ++ if (cmd->c45_sel) { ++ ISET_REG_FIELD(miim_param, MIIM_PARAM__C45_SEL_SHIFT, ++ MIIM_PARAM__C45_SEL_MASK, 1); ++ ++ ISET_REG_FIELD(miim_addr, MIIM_ADDRESS__CLAUSE_45_REGADR_SHIFT, ++ MIIM_ADDRESS__CLAUSE_45_REGADR_MASK, cmd->regnum); ++ ISET_REG_FIELD(miim_addr, MIIM_ADDRESS__CLAUSE_45_DTYPE_SHIFT, ++ MIIM_ADDRESS__CLAUSE_45_REGADR_MASK, cmd->regnum >> 16); ++ } ++ else { ++ ISET_REG_FIELD(miim_addr, MIIM_ADDRESS__CLAUSE_22_REGADR_SHIFT, ++ MIIM_ADDRESS__CLAUSE_22_REGADR_MASK, cmd->regnum); ++ } ++ ++ return do_cmicd_miim_op(cmic_mdio, cmd->op_mode, miim_param, miim_addr); ++} ++ ++ ++static int cmicd_mdiobus_read(struct mii_bus *bus, int phy_id, int regnum) ++{ ++ struct cmicd_mdiobus_private *bus_priv = bus->priv; ++ struct iproc_mdiobus_data *bus_data = &bus_priv->bus_data; ++ struct cmicd_miim_cmd cmd = {0}; ++ ++ cmd.bus_id = bus_data->phybus_num; ++ if (IPROC_MDIOBUS_TYPE_INTERNAL == bus_data->phybus_type) ++ cmd.int_sel = 1; ++ ++ cmd.phy_id = phy_id; ++ cmd.regnum = regnum; ++ ++ if (regnum & MII_ADDR_C45) ++ cmd.c45_sel = 1; ++ ++ cmd.op_mode = MIIM_OP_MODE_READ; ++ ++ return cmicd_miim_op(bus_priv->hw_ctrl, &cmd); ++} ++ ++static int cmicd_mdiobus_write(struct mii_bus *bus, int phy_id, ++ int regnum, u16 val) ++{ ++ struct cmicd_mdiobus_private *bus_priv = bus->priv; ++ struct iproc_mdiobus_data *bus_data = &bus_priv->bus_data; ++ struct cmicd_miim_cmd cmd = {0}; ++ ++ cmd.bus_id = bus_data->phybus_num; ++ if (IPROC_MDIOBUS_TYPE_INTERNAL == bus_data->phybus_type) ++ cmd.int_sel = 1; ++ ++ cmd.phy_id = phy_id; ++ cmd.regnum = regnum; ++ cmd.val = val; ++ ++ if (regnum & MII_ADDR_C45) ++ cmd.c45_sel = 1; ++ ++ cmd.op_mode = MIIM_OP_MODE_WRITE; ++ ++ return cmicd_miim_op(bus_priv->hw_ctrl, &cmd); ++} ++ ++static struct cmicd_mdio_ctrl * cmicd_mdio_res_alloc(void) ++{ ++ if (!cmic_common) { ++ cmic_common = kzalloc(sizeof(*cmic_common), GFP_KERNEL); ++ if (!cmic_common) ++ return NULL; ++ /* mutex_init(&cmic_common->lock); */ ++ spin_lock_init(&cmic_common->lock); ++ cmic_common->ref_cnt = 1; ++ } ++ else ++ cmic_common->ref_cnt ++; ++ ++ return cmic_common; ++} ++ ++static void cmicd_mdio_res_free(struct cmicd_mdio_ctrl *ctrl) ++{ ++ if (ctrl) { ++ ctrl->ref_cnt --; ++ if (ctrl->ref_cnt == 0) { ++ iounmap(ctrl->base); ++ kfree(ctrl); ++ cmic_common = NULL; ++ } ++ } ++} ++ ++static int cmicd_mdiobus_probe(struct platform_device *pdev) ++{ ++ struct mii_bus *mii_bus; ++ struct device_node *dn = pdev->dev.of_node; ++ struct cmicd_mdiobus_private *bus_priv; ++ struct iproc_mdiobus_data *bus_data; ++ struct cmicd_mdio_ctrl *cmicd_ctrl; ++ u32 mdio_bus_id; ++ u32 logical_mdio_bus_id; ++ const char *mdio_bus_type; ++ int ret; ++ ++ if (!of_device_is_available(dn)) ++ return -ENODEV; ++ ++ cmicd_ctrl = cmicd_mdio_res_alloc(); ++ if (!cmicd_ctrl) { ++ dev_err(&pdev->dev, "cmicd mdio rese alloc failed\n"); ++ ret = -ENOMEM; ++ goto err_exit; ++ } ++ ++ /* Get register base address */ ++ cmicd_ctrl->base = (void *)of_iomap(dn, 0); /*cmic_common: 0x03210000*/ ++ ++ if (of_property_read_u32(dn, "#bus-id", &mdio_bus_id)) { ++ mdio_bus_id = 2; /* no property available, use default: 2 */ ++ } ++ if (of_property_read_u32(dn, "#logical-bus-id", &logical_mdio_bus_id)) { ++ logical_mdio_bus_id = 0; /*use default:0 */ ++ } ++ if (of_property_read_string(dn, "bus-type", &mdio_bus_type)) { ++ mdio_bus_type = "external"; /* use default: "external" */ ++ } ++ ++ iproc_mdiobus_data.phybus_num = (u8) mdio_bus_id; ++ iproc_mdiobus_data.logbus_num = (u8) logical_mdio_bus_id; ++ if (!strcmp(mdio_bus_type, "internal")) ++ iproc_mdiobus_data.phybus_type = IPROC_MDIOBUS_TYPE_INTERNAL; ++ else ++ iproc_mdiobus_data.phybus_type = IPROC_MDIOBUS_TYPE_EXTERNAL; ++ iproc_mdiobus_data.logbus_type = iproc_mdiobus_data.phybus_type; ++ bus_data = &iproc_mdiobus_data; ++ ++ mii_bus = mdiobus_alloc_size(sizeof(*bus_priv)); ++ if (!mii_bus) { ++ dev_err(&pdev->dev, "mdiobus_alloc failed\n"); ++ ret = -ENOMEM; ++ goto err_ctrl_free; ++ } ++ ++ mii_bus->name = "iproc_cmicd_mdiobus"; ++ snprintf(mii_bus->id, MII_BUS_ID_SIZE, IPROC_MDIO_ID_FMT, ++ bus_data->logbus_num, bus_data->logbus_type); ++ mii_bus->parent = &pdev->dev; ++ mii_bus->read = cmicd_mdiobus_read; ++ mii_bus->write = cmicd_mdiobus_write; ++ ++ bus_priv = mii_bus->priv; ++ memcpy(&bus_priv->bus_data, bus_data, sizeof(struct iproc_mdiobus_data)); ++ bus_priv->hw_ctrl = cmicd_ctrl; ++ ++ if (IS_ENABLED(CONFIG_MACH_GH2) || IS_ENABLED(CONFIG_MACH_WH2)) ++ ret = of_mdiobus_register(mii_bus, dn); ++ else ++ ret = mdiobus_register(mii_bus); ++ if (ret) { ++ dev_err(&pdev->dev, "mdiobus_register failed\n"); ++ goto err_bus_free; ++ } ++ ++ platform_set_drvdata(pdev, mii_bus); ++ ++#if 0 ++ cmicd_mdiobus_test(mii_bus); ++#endif ++ ++ return 0; ++ ++err_bus_free: ++ kfree(mii_bus); ++err_ctrl_free: ++ cmicd_mdio_res_free(cmicd_ctrl); ++err_exit: ++ return ret; ++} ++ ++static int cmicd_mdiobus_remove(struct platform_device *pdev) ++{ ++ struct mii_bus *mii_bus = platform_get_drvdata(pdev); ++ struct cmicd_mdiobus_private *bus_priv; ++ ++ if (mii_bus) { ++ bus_priv = mii_bus->priv; ++ ++ mdiobus_unregister(mii_bus); ++ if (bus_priv) ++ cmicd_mdio_res_free(bus_priv->hw_ctrl); ++ mdiobus_free(mii_bus); ++ } ++ ++ return 0; ++} ++ ++static const struct of_device_id bcm_iproc_dt_ids[] = { ++ { .compatible = "brcm,iproc-cmicd-mdio"}, ++ { } ++}; ++MODULE_DEVICE_TABLE(of, bcm_iproc_dt_ids); ++ ++static struct platform_driver iproc_cmicd_mdiobus_driver = ++{ ++ .driver = { ++ .name = "iproc_cmicd_mdio", ++ .owner = THIS_MODULE, ++ .of_match_table = of_match_ptr(bcm_iproc_dt_ids), ++ }, ++ .probe = cmicd_mdiobus_probe, ++ .remove = cmicd_mdiobus_remove, ++}; ++ ++static int __init cmicd_mdio_init(void) ++{ ++ return platform_driver_register(&iproc_cmicd_mdiobus_driver); ++} ++ ++static void __exit cmicd_mdio_exit(void) ++{ ++ platform_driver_unregister(&iproc_cmicd_mdiobus_driver); ++} ++ ++//module_init(cmicd_mdio_init); ++subsys_initcall(cmicd_mdio_init); ++module_exit(cmicd_mdio_exit); ++ ++MODULE_AUTHOR("Broadcom Corporation"); ++MODULE_DESCRIPTION("iProc CMICd mdio driver"); ++MODULE_LICENSE("GPL"); ++ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/mdio/iproc_mdio.c b/drivers/net/ethernet/broadcom/mdio/iproc_mdio.c +--- a/drivers/net/ethernet/broadcom/mdio/iproc_mdio.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/mdio/iproc_mdio.c 2017-11-09 17:53:44.082291000 +0800 +@@ -0,0 +1,141 @@ ++/* ++ * $Copyright Open Broadcom Corporation$ ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include "iproc_mdio.h" ++ ++/* Only one MDIO bus has been supported for each type */ ++static struct mii_bus *iproc_mdiobus[IPROC_MDIOBUS_TYPE_MAX] = {0}; ++ ++static struct mii_bus* ++get_iproc_mdiobus(int bustype, int phy_addr) ++{ ++ struct device *d; ++ char bus_id[MII_BUS_ID_SIZE]; ++ char phy_id[20]; ++ struct phy_device *phy_dev; ++ int idx; ++ ++ if (bustype < 0 || bustype >= IPROC_MDIOBUS_TYPE_MAX) { ++ return NULL; ++ } ++ ++ /* ++ * To support more than one bus for internal bus type on GH2, the following ++ * "if (NULL == iproc_mdiobus[bustype])" should be commented out. ++ * Note: The multi-bus support is based on the assumption that the phy_dev ++ * addresses are different for the internal bus_type bus. ++ */ ++#if !(defined(CONFIG_MACH_GH2) || (defined(CONFIG_MACH_HR3) && defined(CONFIG_MACH_WH2))) ++ if (NULL == iproc_mdiobus[bustype]) { ++#endif ++ for (idx = 0; idx < IPROC_MDIOBUS_NUM_MAX; idx++) { ++ snprintf(bus_id, MII_BUS_ID_SIZE, IPROC_MDIO_ID_FMT, idx, bustype); ++ snprintf(phy_id, 20, PHY_ID_FMT, bus_id, phy_addr); ++ d = bus_find_device_by_name(&mdio_bus_type, NULL, phy_id); ++ if (d) { ++ phy_dev = to_phy_device(d); ++ iproc_mdiobus[bustype] = phy_dev->bus; ++ idx = IPROC_MDIOBUS_NUM_MAX; ++ } ++ } ++#if !(defined(CONFIG_MACH_GH2) || (defined(CONFIG_MACH_HR3) && defined(CONFIG_MACH_WH2))) ++ } ++#endif ++ return iproc_mdiobus[bustype]; ++} ++ ++ ++/** ++ * iproc_mii_read - General iProc interface function for reading a given PHY register ++ if not registered PHY interface by phy_driver_register ++ * @busnum: currently we're using busnum value 0 ++ * @bustype: the mdio bus type, coud be IPROC_MDIOBUS_TYPE_INTERNAL or IPROC_MDIOBUS_TYPE_EXTERNAL ++ * @phy_addr: the phy address ++ * @regnum: register number to read, if MII_ADDR_C45 == (@regnum & MII_ADDR_C45), means a C45 request ++ * @val: the address to store read value if the read operation is successful ++ * ++ * Returns 0 on success, or a negative value on error. ++ */ ++int iproc_mii_read(int dev_type, int phy_addr, u32 reg_off, u16 *data) ++{ ++ struct mii_bus *mii_bus; ++ int bustype; ++ int err = -1; ++ ++ if (MII_DEV_LOCAL == dev_type) { ++ bustype = IPROC_MDIOBUS_TYPE_INTERNAL; ++ } else if (MII_DEV_EXT == dev_type) { ++ bustype = IPROC_MDIOBUS_TYPE_EXTERNAL; ++ } else { ++ return -EINVAL; ++ } ++ ++ mii_bus = get_iproc_mdiobus(bustype, phy_addr); ++ if (mii_bus) { ++ err = mii_bus->read(mii_bus, phy_addr, reg_off); ++ if (err >= 0) { ++ *data = err; ++ } ++ } else { ++ pr_err("%s : mdiobus:%d:%d is invalid!\n", __func__, 0, bustype); ++ } ++ ++ return err; ++} ++EXPORT_SYMBOL(iproc_mii_read); ++ ++/** ++ * iproc_mii_write - General iProc interface function for writing a given PHY register ++ if not registered PHY interface by phy_driver_register ++ * @busnum: currently we're using busnum value 0 ++ * @bustype: the mdio bus type, coud be IPROC_MDIOBUS_TYPE_INTERNAL or IPROC_MDIOBUS_TYPE_EXTERNAL ++ * @phy_addr: the phy address ++ * @regnum: register number to write, if MII_ADDR_C45 == (@regnum & MII_ADDR_C45), means a C45 request ++ * @val: value to write to @regnum ++ * ++ * Returns 0 on success, or a negative value on error. ++ */ ++int iproc_mii_write(int dev_type, int phy_addr, u32 reg_off, u16 data) ++{ ++ struct mii_bus *mii_bus; ++ int bustype; ++ int err = -1; ++ ++ if (MII_DEV_LOCAL == dev_type) { ++ bustype = IPROC_MDIOBUS_TYPE_INTERNAL; ++ } else if (MII_DEV_EXT == dev_type) { ++ bustype = IPROC_MDIOBUS_TYPE_EXTERNAL; ++ } else { ++ return -EINVAL; ++ } ++ ++ mii_bus = get_iproc_mdiobus(bustype, phy_addr); ++ if (mii_bus) { ++ err = mii_bus->write(mii_bus, phy_addr, reg_off, data); ++ } else { ++ pr_err("%s : mdiobus:%d:%d is invalid!\n", __func__, 0, bustype); ++ } ++ ++ return err; ++} ++EXPORT_SYMBOL(iproc_mii_write); ++ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/mdio/iproc_mdio.h b/drivers/net/ethernet/broadcom/mdio/iproc_mdio.h +--- a/drivers/net/ethernet/broadcom/mdio/iproc_mdio.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/mdio/iproc_mdio.h 2017-11-09 17:53:44.082312000 +0800 +@@ -0,0 +1,97 @@ ++/* ++ * $Copyright Open Broadcom Corporation$ ++ */ ++ ++ ++#ifndef _bcm5301x_ccb_mii_h_ ++#define _bcm5301x_ccb_mii_h_ ++ ++#include ++ ++typedef struct _mdio_info_s { ++ void *h; /* dev handle */ ++ spinlock_t lock; ++} mdio_info_t; ++ ++/* reutrn value for MII driver */ ++#define MII_ERR_NONE 0 ++#define MII_ERR_TIMEOUT -1 ++#define MII_ERR_INTERNAL -2 ++#define MII_ERR_PARAM -3 ++#define MII_ERR_UNAVAIL -4 ++#define MII_ERR_UNKNOW -5 ++#define MII_ERR_INIT -6 ++ ++/* device type */ ++#define MII_DEV_LOCAL 0 ++#define MII_DEV_EXT 1 ++ ++/* MII register definition */ ++#define MII_MGMT 0x18003000 ++#define MII_MGMT_BASE 0x000 ++#define MII_MGMT_DATAMASK 0x000007ff ++#define MII_CMD_DATA 0x18003004 ++#define MII_CMD_DATA_BASE 0x004 ++#define MII_CMD_DATA_DATAMASK 0xffffffff ++ ++/* fields in MII_MGMT */ ++#define MII_MGMT_BYP_MASK 0x00000400 ++#define MII_MGMT_BYP_SHIFT 10 ++#define MII_MGMT_EXP_MASK 0x00000200 ++#define MII_MGMT_EXP_SHIFT 9 ++#define MII_MGMT_BSY_MASK 0x00000100 ++#define MII_MGMT_BSY_SHIFT 8 ++#define MII_MGMT_PRE_MASK 0x00000080 ++#define MII_MGMT_PRE_SHIFT 7 ++#define MII_MGMT_MDCDIV_MASK 0x0000007f ++#define MII_MGMT_MDCDIV_SHIFT 0 ++/* fields in MII_CMD_DATA */ ++#define MII_CMD_DATA_SB_MASK 0xc0000000 ++#define MII_CMD_DATA_SB_SHIFT 30 ++#define MII_CMD_DATA_OP_MASK 0x30000000 ++#define MII_CMD_DATA_OP_SHIFT 28 ++#define MII_CMD_DATA_PA_MASK 0x0f800000 ++#define MII_CMD_DATA_PA_SHIFT 23 ++#define MII_CMD_DATA_RA_MASK 0x007c0000 ++#define MII_CMD_DATA_RA_SHIFT 18 ++#define MII_CMD_DATA_TA_MASK 0x00030000 ++#define MII_CMD_DATA_TA_SHIFT 16 ++#define MII_CMD_DATA_DATA_MASK 0x0000ffff ++#define MII_CMD_DATA_DATA_SHIFT 0 ++ ++ ++/****** iProc General Interface for mdio bus support ******/ ++struct iproc_mdiobus_data { ++ /* the mdio bus num and type from chip view */ ++ u8 logbus_num; ++ u8 logbus_type; ++ /* the actual bus num and type that mdio bus comes from */ ++ u8 phybus_num; ++ u8 phybus_type; ++ /* Note : ++ * Usually the logbus_num and logbus_type are the same as phybus_num and ++ * phybus_type, but they may be different on some special cases. For example, ++ * we may use cmicd mdio external bus 2 for the iProc mdio external bus 0, ++ * this configuration could be described as phybus_num=2, phybus_type=external, ++ * logbus_num=0, logbus_type=external. From iProc's view, the Phy devices ++ * for iProc AMAC should use mdiobus by logbus_num and logbus_type. But internally ++ * we'll configure the mdio core by phybus_num and phybus_type. ++ */ ++}; ++ ++#define IPROC_MDIOBUS_TYPE_INTERNAL 0 ++#define IPROC_MDIOBUS_TYPE_EXTERNAL 1 ++ ++#define IPROC_MDIOBUS_NUM_MAX 8 ++#define IPROC_MDIOBUS_TYPE_MAX 2 ++ ++/* iproc_mii:[bus_num]:[bus_type] */ ++#define IPROC_MDIO_ID_FMT "iproc_mii:%01x:%01x" ++ ++ ++/* General interface for iProc mdio bus read/write function */ ++extern int iproc_mii_read(int dev_type, int phy_addr, u32 reg_off, u16 *data); ++extern int iproc_mii_write(int dev_type, int phy_addr, u32 reg_off, u16 data); ++/****** iProc General Interface for mdio bus support ******/ ++ ++#endif /* _bcm5301x_ccb_mii_h_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/net/ethernet/broadcom/mdio/iproc_mdio_dev.h b/drivers/net/ethernet/broadcom/mdio/iproc_mdio_dev.h +--- a/drivers/net/ethernet/broadcom/mdio/iproc_mdio_dev.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/net/ethernet/broadcom/mdio/iproc_mdio_dev.h 2017-11-09 17:53:44.083307000 +0800 +@@ -0,0 +1,32 @@ ++/* ++ * $Copyright Open Broadcom Corporation$ ++ */ ++ ++ ++#ifndef _IPROC_MDIO_DEV_H ++#define _IPROC_MDIO_DEV_H ++ ++/* IOCTL commands */ ++ ++#define MDIO_IOC_MAGIC 'm' ++ ++struct mdio_ioc_transfer { ++ uint8_t pa; /* phy address */ ++ uint8_t ra; /* register address */ ++ uint16_t tx_buf; ++ uint16_t rx_buf; ++}; ++ ++#define MDIO_MSGSIZE(N) \ ++ ((((N)*(sizeof (struct mdio_ioc_transfer))) < (1 << _IOC_SIZEBITS)) \ ++ ? ((N)*(sizeof (struct mdio_ioc_transfer))) : 0) ++ ++#define MDIO_IOC_MESSAGE(N) _IOW(MDIO_IOC_MAGIC, 0, char[MDIO_MSGSIZE(N)]) ++ ++#define MDIO_IOC_EXTERNAL_R_REG _IOWR(MDIO_IOC_MAGIC, 0, char[MDIO_MSGSIZE(1)]) ++#define MDIO_IOC_EXTERNAL_W_REG _IOW(MDIO_IOC_MAGIC, 1, char[MDIO_MSGSIZE(1)]) ++#define MDIO_IOC_LOCAL_R_REG _IOWR(MDIO_IOC_MAGIC, 2, char[MDIO_MSGSIZE(1)]) ++#define MDIO_IOC_LOCAL_W_REG _IOW(MDIO_IOC_MAGIC, 3, char[MDIO_MSGSIZE(1)]) ++ ++ ++#endif +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/pci/host/Kconfig b/drivers/pci/host/Kconfig +--- a/drivers/pci/host/Kconfig 2016-12-16 00:49:34.000000000 +0800 ++++ b/drivers/pci/host/Kconfig 2017-11-09 17:53:50.841348000 +0800 +@@ -118,6 +118,15 @@ config PCI_VERSATILE + bool "ARM Versatile PB PCI controller" + depends on ARCH_VERSATILE + ++config PCIE_XGS_IPROC ++ tristate "Broadcom XGS iProc PCIe controller" ++ select PCI_DOMAINS ++ depends on ARCH_XGS_IPROC ++ default n ++ help ++ This enables the XGS iProc PCIe core controller support for Broadcom's ++ iProc family of SoCs. ++ + config PCIE_IPROC + tristate "Broadcom iProc PCIe controller" + depends on OF && (ARM || ARM64) +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/pci/host/Makefile b/drivers/pci/host/Makefile +--- a/drivers/pci/host/Makefile 2016-12-16 00:49:34.000000000 +0800 ++++ b/drivers/pci/host/Makefile 2017-11-09 17:53:50.842362000 +0800 +@@ -16,6 +16,7 @@ obj-$(CONFIG_PCI_LAYERSCAPE) += pci-laye + obj-$(CONFIG_PCI_VERSATILE) += pci-versatile.o + obj-$(CONFIG_PCIE_IPROC) += pcie-iproc.o + obj-$(CONFIG_PCIE_IPROC_PLATFORM) += pcie-iproc-platform.o ++obj-$(CONFIG_PCIE_XGS_IPROC) += pcie-xgs-iproc.o + obj-$(CONFIG_PCIE_IPROC_BCMA) += pcie-iproc-bcma.o + obj-$(CONFIG_PCIE_ALTERA) += pcie-altera.o + obj-$(CONFIG_PCIE_ALTERA_MSI) += pcie-altera-msi.o +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/pci/host/pcie-xgs-iproc.c b/drivers/pci/host/pcie-xgs-iproc.c +--- a/drivers/pci/host/pcie-xgs-iproc.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/pci/host/pcie-xgs-iproc.c 2017-11-09 17:53:50.893339000 +0800 +@@ -0,0 +1,469 @@ ++/* ++ * Copyright (C) 2014 Hauke Mehrtens ++ * Copyright (C) 2015 Broadcom Corporation ++ * ++ * This program is free software; you can redistribute it and/or ++ * modify it under the terms of the GNU General Public License as ++ * published by the Free Software Foundation version 2. ++ * ++ * This program is distributed "as is" WITHOUT ANY WARRANTY of any ++ * kind, whether express or implied; without even the implied warranty ++ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#define CLK_CONTROL_OFFSET 0x000 ++ ++#define CFG_IND_ADDR_OFFSET 0x120 ++#define CFG_IND_ADDR_MASK 0x00001ffc ++#define CFG_IND_DATA_OFFSET 0x124 ++ ++#define CFG_ADDR_OFFSET 0x1f8 ++#define CFG_ADDR_BUS_NUM_SHIFT 20 ++#define CFG_ADDR_BUS_NUM_MASK 0x0ff00000 ++#define CFG_ADDR_DEV_NUM_SHIFT 15 ++#define CFG_ADDR_DEV_NUM_MASK 0x000f8000 ++#define CFG_ADDR_FUNC_NUM_SHIFT 12 ++#define CFG_ADDR_FUNC_NUM_MASK 0x00007000 ++#define CFG_ADDR_REG_NUM_SHIFT 2 ++#define CFG_ADDR_REG_NUM_MASK 0x00000ffc ++#define CFG_ADDR_CFG_TYPE_SHIFT 0 ++#define CFG_ADDR_CFG_TYPE_MASK 0x00000003 ++ ++#define CFG_DATA_OFFSET 0x1fc ++ ++#define SYS_RC_INTX_EN 0x330 ++#define SYS_RC_INTX_MASK 0xf ++ ++#define IPROC_PCIE_MAX_NUM_IRQS 6 ++ ++/** ++ * iProc PCIe device ++ * @dev: pointer to device data structure ++ * @base: PCIe host controller I/O register base ++ * @resources: linked list of all PCI resources ++ * @sysdata: Per PCI controller data (ARM-specific) ++ * @root_bus: pointer to root bus ++ * @phy: optional PHY device that controls the Serdes ++ * @irqs: interrupt IDs ++ */ ++struct iproc_pcie { ++ struct device *dev; ++ void __iomem *base; ++#ifdef CONFIG_ARM ++ struct pci_sys_data sysdata; ++#endif ++ struct pci_bus *root_bus; ++ struct phy *phy; ++ int irqs[IPROC_PCIE_MAX_NUM_IRQS]; ++ int (*map_irq)(const struct pci_dev *, u8, u8); ++}; ++ ++#if (defined(CONFIG_MACH_GH) || defined(CONFIG_MACH_HR3) \ ++ || defined(CONFIG_MACH_GH2)) ++#define PCI_PERST_SWR (1) ++#define PCI_CONFIG_SWR (1) ++#else ++#define PCI_PERST_SWR (0) ++#define PCI_CONFIG_SWR (0) ++#endif /* defined(CONFIG_MACH_GH) || defined(CONFIG_MACH_HR3) || defined(CONFIG_MACH_GH2)*/ ++ ++#if PCI_CONFIG_SWR ++extern char * nvram_get(const char *name); ++#endif /* PCI_CONFIG_SWR */ ++ ++#define MII_DEV_LOCAL 0 ++extern int iproc_mii_write(int dev_type, int phy_addr, u32 reg_off, u16 data); ++ ++static inline struct iproc_pcie *iproc_pcie_data(struct pci_bus *bus) ++{ ++ struct iproc_pcie *pcie; ++#ifdef CONFIG_ARM ++ struct pci_sys_data *sys = bus->sysdata; ++ ++ pcie = sys->private_data; ++#else ++ pcie = bus->sysdata; ++#endif ++ return pcie; ++} ++ ++/** ++ * Note access to the configuration registers are protected at the higher layer ++ * by 'pci_lock' in drivers/pci/access.c ++ */ ++static void __iomem *iproc_pcie_map_cfg_bus(struct pci_bus *bus, ++ unsigned int devfn, ++ int where) ++{ ++ struct iproc_pcie *pcie = iproc_pcie_data(bus); ++ unsigned slot = PCI_SLOT(devfn); ++ unsigned fn = PCI_FUNC(devfn); ++ unsigned busno = bus->number; ++ u32 val; ++ ++ /* root complex access */ ++ if (busno == 0) { ++ if (slot >= 1) ++ return NULL; ++ writel(where & CFG_IND_ADDR_MASK, ++ pcie->base + CFG_IND_ADDR_OFFSET); ++ return (pcie->base + CFG_IND_DATA_OFFSET); ++ } ++ ++ if (fn > 1) ++ return NULL; ++ ++ /* EP device access */ ++ val = (busno << CFG_ADDR_BUS_NUM_SHIFT) | ++ (slot << CFG_ADDR_DEV_NUM_SHIFT) | ++ (fn << CFG_ADDR_FUNC_NUM_SHIFT) | ++ (where & CFG_ADDR_REG_NUM_MASK) | ++ (1 & CFG_ADDR_CFG_TYPE_MASK); ++ writel(val, pcie->base + CFG_ADDR_OFFSET); ++ ++ return (pcie->base + CFG_DATA_OFFSET); ++} ++ ++static struct pci_ops iproc_pcie_ops = { ++ .map_bus = iproc_pcie_map_cfg_bus, ++ .read = pci_generic_config_read32, ++ .write = pci_generic_config_write32, ++}; ++ ++static void iproc_pcie_reset(struct iproc_pcie *pcie) ++{ ++ /* Configure the PCIe controller as root complex and send a downstream reset */ ++ writel(0, pcie->base + CLK_CONTROL_OFFSET); ++ mdelay(1); ++ writel(1, pcie->base + CLK_CONTROL_OFFSET); ++ mdelay(100); ++} ++ ++#if (defined(CONFIG_MACH_GH) || defined(CONFIG_MACH_SB2) || defined(CONFIG_MACH_HR3) || defined(CONFIG_MACH_GH2)) ++static int pcie_serdes_reg_write(int phyaddr, int reg, u16 val) ++{ ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, 0x1f, reg & 0xfff0); ++ iproc_mii_write(MII_DEV_LOCAL, phyaddr, reg & 0xf, val); ++ ++ return PCIBIOS_SUCCESSFUL; ++} ++ ++static int pcie_rc_war(struct iproc_pcie * pcie) ++{ ++ /* Setting for PCIe Serdes PLL output */ ++ pcie_serdes_reg_write(2, 0x2103, 0x2b1c); ++ pcie_serdes_reg_write(2, 0x1300, 0x000b); ++ mdelay(100); ++ ++#if PCI_PERST_SWR ++ iproc_pcie_reset(pcie); ++#endif /* PCI_PERST_SWR */ ++ ++ return PCIBIOS_SUCCESSFUL; ++} ++#endif /* CONFIG_MACH_GH || CONFIG_MACH_SB2) || CONFIG_MACH_HR3 || defined(CONFIG_MACH_GH2) */ ++ ++static int iproc_pcie_check_link(struct iproc_pcie *pcie, struct pci_bus *bus) ++{ ++ u8 hdr_type; ++ u32 link_ctrl; ++ u16 pos, link_status; ++ bool link_is_active = false; ++ u32 class; ++#if PCI_CONFIG_SWR ++ u32 tmp32, devfn = 0; ++ char *pcie_configs = NULL; ++#endif /* PCI_CONFIG_SWR */ ++ ++ ++ /* make sure we are not in EP mode */ ++ pci_bus_read_config_byte(bus, 0, PCI_HEADER_TYPE, &hdr_type); ++ if ((hdr_type & 0x7f) != PCI_HEADER_TYPE_BRIDGE) { ++ dev_err(pcie->dev, "in EP mode, hdr=%#02x\n", hdr_type); ++ return -EFAULT; ++ } ++ ++#if (defined(CONFIG_MACH_GH) || defined(CONFIG_MACH_SB2) || defined(CONFIG_MACH_HR3) || defined(CONFIG_MACH_GH2)) ++ pcie_rc_war(pcie); ++#endif ++ ++#if PCI_CONFIG_SWR ++ pcie_configs = nvram_get("pcie_configs"); ++ if (pcie_configs) { ++ if (!strcmp(pcie_configs, "tx-de-emp")) { ++ pci_bus_read_config_dword(bus, devfn, 0xdc, &tmp32); ++ tmp32 |= (0x1 << 6); ++ pci_bus_write_config_dword(bus, devfn, 0xdc, tmp32); ++ pci_bus_read_config_dword(bus, devfn, 0xdc, &tmp32); ++ } ++ } ++#endif /* PCI_CONFIG_SWR */ ++ ++ /* force class to PCI_CLASS_BRIDGE_PCI (0x0604) */ ++ /* ++ * After this modification, the CLASS code in configuration space would be ++ * read as PCI_CLASS_BRIDGE_PCI(0x0604) instead of network interface(0x0200) ++ */ ++#define PCI_BRIDGE_CTRL_REG_OFFSET 0x43c ++#define PCI_CLASS_BRIDGE_MASK 0xffff00 ++#define PCI_CLASS_BRIDGE_SHIFT 8 ++ pci_bus_read_config_dword(bus, 0, PCI_BRIDGE_CTRL_REG_OFFSET, &class); ++ class &= ~PCI_CLASS_BRIDGE_MASK; ++ class |= (PCI_CLASS_BRIDGE_PCI << PCI_CLASS_BRIDGE_SHIFT); ++ pci_bus_write_config_dword(bus, 0, PCI_BRIDGE_CTRL_REG_OFFSET, class); ++ ++ /* check link status to see if link is active */ ++ pos = pci_bus_find_capability(bus, 0, PCI_CAP_ID_EXP); ++ pci_bus_read_config_word(bus, 0, pos + PCI_EXP_LNKSTA, &link_status); ++ if (link_status & PCI_EXP_LNKSTA_NLW) ++ link_is_active = true; ++ ++ if (!link_is_active) { ++ /* try GEN 1 link speed */ ++#define PCI_LINK_STATUS_CTRL_2_OFFSET 0x0dc ++#define PCI_TARGET_LINK_SPEED_MASK 0xf ++#define PCI_TARGET_LINK_SPEED_GEN2 0x2 ++#define PCI_TARGET_LINK_SPEED_GEN1 0x1 ++ pci_bus_read_config_dword(bus, 0, ++ PCI_LINK_STATUS_CTRL_2_OFFSET, ++ &link_ctrl); ++ if ((link_ctrl & PCI_TARGET_LINK_SPEED_MASK) == ++ PCI_TARGET_LINK_SPEED_GEN2) { ++ link_ctrl &= ~PCI_TARGET_LINK_SPEED_MASK; ++ link_ctrl |= PCI_TARGET_LINK_SPEED_GEN1; ++ pci_bus_write_config_dword(bus, 0, ++ PCI_LINK_STATUS_CTRL_2_OFFSET, ++ link_ctrl); ++ msleep(100); ++ ++ pos = pci_bus_find_capability(bus, 0, PCI_CAP_ID_EXP); ++ pci_bus_read_config_word(bus, 0, pos + PCI_EXP_LNKSTA, ++ &link_status); ++ if (link_status & PCI_EXP_LNKSTA_NLW) ++ link_is_active = true; ++ } ++ } ++ ++ dev_info(pcie->dev, "link: %s\n", link_is_active ? "UP" : "DOWN"); ++ ++ return link_is_active ? 0 : -ENODEV; ++} ++ ++static void iproc_pcie_enable(struct iproc_pcie *pcie) ++{ ++ writel(SYS_RC_INTX_MASK, pcie->base + SYS_RC_INTX_EN); ++} ++ ++int iproc_pcie_setup(struct iproc_pcie *pcie, struct list_head *res) ++{ ++ int ret; ++ void *sysdata; ++ struct pci_bus *bus; ++ ++ if (!pcie || !pcie->dev || !pcie->base) ++ return -EINVAL; ++ ++ ret = phy_init(pcie->phy); ++ if (ret) { ++ dev_err(pcie->dev, "unable to initialize PCIe PHY\n"); ++ return ret; ++ } ++ ++ ret = phy_power_on(pcie->phy); ++ if (ret) { ++ dev_err(pcie->dev, "unable to power on PCIe PHY\n"); ++ goto err_exit_phy; ++ } ++ ++ iproc_pcie_reset(pcie); ++ ++#ifdef CONFIG_ARM ++ pcie->sysdata.private_data = pcie; ++ sysdata = &pcie->sysdata; ++#else ++ sysdata = pcie; ++#endif ++ ++ bus = pci_create_root_bus(pcie->dev, 0, &iproc_pcie_ops, sysdata, res); ++ if (!bus) { ++ dev_err(pcie->dev, "unable to create PCI root bus\n"); ++ ret = -ENOMEM; ++ goto err_power_off_phy; ++ } ++ pcie->root_bus = bus; ++ ++ ret = iproc_pcie_check_link(pcie, bus); ++ if (ret) { ++ dev_err(pcie->dev, "no PCIe EP device detected\n"); ++ goto err_rm_root_bus; ++ } ++ ++ iproc_pcie_enable(pcie); ++ ++ pci_scan_child_bus(bus); ++ pci_assign_unassigned_bus_resources(bus); ++#ifdef CONFIG_ARM ++ pci_fixup_irqs(pci_common_swizzle, pcie->map_irq); ++#endif ++ pci_bus_add_devices(bus); ++ ++ return 0; ++ ++err_rm_root_bus: ++ pci_stop_root_bus(bus); ++ pci_remove_root_bus(bus); ++ ++err_power_off_phy: ++ phy_power_off(pcie->phy); ++err_exit_phy: ++ phy_exit(pcie->phy); ++ return ret; ++} ++EXPORT_SYMBOL(iproc_pcie_setup); ++ ++ ++#if ( defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2) || defined(CONFIG_MACH_HR2) ) ++static void WrongPCIGen2TemplateWAR(int port, u32 reg, u16 val) ++{ ++ /* port = phy addr */ ++ iproc_mii_write(MII_DEV_LOCAL, port, 0x1f, 0x8630); ++ iproc_mii_write(MII_DEV_LOCAL, port, reg, val); ++} ++#endif /* defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_SKT2) || defined(CONFIG_MACH_HR2) */ ++ ++ ++int iproc_pcie_map_irq(const struct pci_dev *pdev, u8 slot, u8 pin) ++{ ++ struct iproc_pcie *pcie = iproc_pcie_data(pdev->bus); ++ int irq = pcie->irqs[4]; ++ ++ return irq; ++} ++ ++static int iproc_pcie_probe(struct platform_device *pdev) ++{ ++ struct iproc_pcie *pcie; ++ struct device_node *np = pdev->dev.of_node; ++ struct resource reg; ++ resource_size_t iobase = 0; ++ LIST_HEAD(res); ++ int ret; ++ //u32 irqs_total; ++ int i; ++ ++ pcie = devm_kzalloc(&pdev->dev, sizeof(struct iproc_pcie), GFP_KERNEL); ++ if (!pcie) ++ return -ENOMEM; ++ ++ pcie->dev = &pdev->dev; ++ platform_set_drvdata(pdev, pcie); ++ ++ ret = of_address_to_resource(np, 0, ®); ++ if (ret < 0) { ++ dev_err(pcie->dev, "unable to obtain controller resources\n"); ++ return ret; ++ } ++ ++ pcie->base = devm_ioremap_resource(pcie->dev, ®); ++ if (IS_ERR(pcie->base)) ++ return PTR_ERR(pcie->base); ++ ++ /* PHY use is optional */ ++ pcie->phy = devm_phy_get(&pdev->dev, "pcie-phy"); ++ if (IS_ERR(pcie->phy)) { ++ if (PTR_ERR(pcie->phy) == -EPROBE_DEFER) ++ return -EPROBE_DEFER; ++ pcie->phy = NULL; ++ } ++ ++ ret = of_pci_get_host_bridge_resources(np, 0, 0xff, &res, &iobase); ++ if (ret) { ++ dev_err(pcie->dev, ++ "unable to get PCI host bridge resources\n"); ++ return ret; ++ } ++ ++#if (defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2) || defined(CONFIG_MACH_HR2)) ++// if (of_device_is_compatible(np, "iproc-p2") || of_device_is_compatible(np, "iproc-p6")) { ++ WrongPCIGen2TemplateWAR(of_get_pci_domain_nr(np) + 7, 0x13, 0x190); ++ WrongPCIGen2TemplateWAR(of_get_pci_domain_nr(np) + 7, 0x19, 0x191); ++// } ++#endif ++ ++ /*Parse IRQ*/ ++ /* of_irq_count is not exported for module to call */ ++#if 0 ++ irqs_total = of_irq_count(np); ++ if ( !irqs_total || (irqs_total > IPROC_PCIE_MAX_NUM_IRQS) ) ++ return -EINVAL; ++#endif ++ ++ for (i = 0; i < IPROC_PCIE_MAX_NUM_IRQS; i++) { ++ pcie->irqs[i] = irq_of_parse_and_map(np, i); ++ if (!pcie->irqs[i]) { ++ dev_err(&pdev->dev, "unable to parse or map irq index:%d\n", i); ++ return -EINVAL; ++ } ++ } ++ ++ //pcie->map_irq = of_irq_parse_and_map_pci; ++ pcie->map_irq = iproc_pcie_map_irq; ++ ++ ret = iproc_pcie_setup(pcie, &res); ++ if (ret) ++ dev_err(pcie->dev, "PCIe controller setup failed\n"); ++ ++ pci_free_resource_list(&res); ++ ++ return ret; ++} ++ ++static int iproc_pcie_remove(struct platform_device *pdev) ++{ ++ struct iproc_pcie *pcie = platform_get_drvdata(pdev); ++ ++ pci_stop_root_bus(pcie->root_bus); ++ pci_remove_root_bus(pcie->root_bus); ++ ++ phy_power_off(pcie->phy); ++ phy_exit(pcie->phy); ++ ++ return 0; ++} ++ ++static const struct of_device_id iproc_pcie_of_match_table[] = { ++ { .compatible = "brcm,iproc-pcie", }, ++ { /* sentinel */ } ++}; ++MODULE_DEVICE_TABLE(of, iproc_pcie_of_match_table); ++ ++static struct platform_driver iproc_pcie_pltfm_driver = { ++ .driver = { ++ .name = "iproc-pcie", ++ .of_match_table = of_match_ptr(iproc_pcie_of_match_table), ++ }, ++ .probe = iproc_pcie_probe, ++ .remove = iproc_pcie_remove, ++}; ++ ++module_platform_driver(iproc_pcie_pltfm_driver); ++ ++MODULE_DESCRIPTION("Broadcom XGS iProc PCIe driver"); ++MODULE_LICENSE("GPL v2"); +\ No newline at end of file +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/soc/Kconfig b/drivers/soc/Kconfig +--- a/drivers/soc/Kconfig 2016-12-16 00:49:34.000000000 +0800 ++++ b/drivers/soc/Kconfig 2017-11-09 17:53:55.652386000 +0800 +@@ -1,6 +1,7 @@ + menu "SOC (System On Chip) specific Drivers" + + source "drivers/soc/brcmstb/Kconfig" ++source "drivers/soc/bcm/Kconfig" + source "drivers/soc/mediatek/Kconfig" + source "drivers/soc/qcom/Kconfig" + source "drivers/soc/rockchip/Kconfig" +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/soc/Makefile b/drivers/soc/Makefile +--- a/drivers/soc/Makefile 2016-12-16 00:49:34.000000000 +0800 ++++ b/drivers/soc/Makefile 2017-11-09 17:53:55.653381000 +0800 +@@ -2,6 +2,7 @@ + # Makefile for the Linux Kernel SOC specific device drivers. + # + ++obj-$(CONFIG_SOC_XGS_IPROC) += bcm/ + obj-$(CONFIG_SOC_BRCMSTB) += brcmstb/ + obj-$(CONFIG_MACH_DOVE) += dove/ + obj-$(CONFIG_ARCH_MEDIATEK) += mediatek/ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/soc/bcm/Kconfig b/drivers/soc/bcm/Kconfig +--- a/drivers/soc/bcm/Kconfig 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/soc/bcm/Kconfig 2017-11-09 17:53:55.664395000 +0800 +@@ -0,0 +1,8 @@ ++menuconfig SOC_XGS_IPROC ++ bool "Broadcom XGS iProc IDM/DMU/PMU drivers" ++ depends on ARCH_XGS_IPROC ++ default ARCH_XGS_IPROC ++ help ++ This option enables XGS iProc IDM/DMU/PMU related drivers. ++ ++ If unsure, say Y. +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/soc/bcm/Makefile b/drivers/soc/bcm/Makefile +--- a/drivers/soc/bcm/Makefile 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/soc/bcm/Makefile 2017-11-09 17:53:55.665375000 +0800 +@@ -0,0 +1 @@ ++obj-$(CONFIG_SOC_XGS_IPROC) += xgs-iproc-wrap-idm-dmu.o xgs-iproc-idm.o +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/soc/bcm/xgs-iproc-idm.c b/drivers/soc/bcm/xgs-iproc-idm.c +--- a/drivers/soc/bcm/xgs-iproc-idm.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/soc/bcm/xgs-iproc-idm.c 2017-11-09 17:53:55.666387000 +0800 +@@ -0,0 +1,842 @@ ++/* ++ * $Copyright Open Broadcom Corporation$ ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++extern void __iomem *get_iproc_idm_base(int); ++extern void __iomem *get_iproc_idm_base_phys(int); ++ ++#if defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2) || \ ++ defined(CONFIG_MACH_HR2) ++#define IHOST_S1_IDM_ERROR_LOG_CONTROL 0x18107900 ++#define IHOST_S1_IDM_ERROR_LOG_COMPLETE 0x18107904 ++#define IHOST_S1_IDM_ERROR_LOG_STATUS 0x18107908 ++#define IHOST_S1_IDM_ERROR_LOG_ADDR_LSB 0x1810790c ++#define IHOST_S1_IDM_ERROR_LOG_ID 0x18107914 ++#define IHOST_S1_IDM_ERROR_LOG_FLAGS 0x1810791c ++#define IHOST_S1_IDM_INTERRUPT_STATUS 0x18107a00 ++#define IHOST_S0_IDM_ERROR_LOG_CONTROL 0x18108900 ++#define IHOST_S0_IDM_ERROR_LOG_COMPLETE 0x18108904 ++#define IHOST_S0_IDM_ERROR_LOG_STATUS 0x18108908 ++#define IHOST_S0_IDM_ERROR_LOG_ADDR_LSB 0x1810890c ++#define IHOST_S0_IDM_ERROR_LOG_ID 0x18108914 ++#define IHOST_S0_IDM_ERROR_LOG_FLAGS 0x1810891c ++#define IHOST_S0_IDM_INTERRUPT_STATUS 0x18108a00 ++#define DDR_S1_IDM_ERROR_LOG_CONTROL 0x18109900 ++#define DDR_S1_IDM_ERROR_LOG_COMPLETE 0x18109904 ++#define DDR_S1_IDM_ERROR_LOG_STATUS 0x18109908 ++#define DDR_S1_IDM_ERROR_LOG_ADDR_LSB 0x1810990c ++#define DDR_S1_IDM_ERROR_LOG_ID 0x18109914 ++#define DDR_S1_IDM_ERROR_LOG_FLAGS 0x1810991c ++#define DDR_S1_IDM_INTERRUPT_STATUS 0x18109a00 ++#define DDR_S2_IDM_ERROR_LOG_CONTROL 0x1810a900 ++#define DDR_S2_IDM_ERROR_LOG_COMPLETE 0x1810a904 ++#define DDR_S2_IDM_ERROR_LOG_STATUS 0x1810a908 ++#define DDR_S2_IDM_ERROR_LOG_ADDR_LSB 0x1810a90c ++#define DDR_S2_IDM_ERROR_LOG_ID 0x1810a914 ++#define DDR_S2_IDM_ERROR_LOG_FLAGS 0x1810a91c ++#define DDR_S2_IDM_INTERRUPT_STATUS 0x1810aa00 ++#define AXI_PCIE_S0_IDM_IDM_ERROR_LOG_CONTROL 0x1810b900 ++#define AXI_PCIE_S0_IDM_IDM_ERROR_LOG_COMPLETE 0x1810b904 ++#define AXI_PCIE_S0_IDM_IDM_ERROR_LOG_STATUS 0x1810b908 ++#define AXI_PCIE_S0_IDM_IDM_ERROR_LOG_ADDR_LSB 0x1810b90c ++#define AXI_PCIE_S0_IDM_IDM_ERROR_LOG_ID 0x1810b914 ++#define AXI_PCIE_S0_IDM_IDM_ERROR_LOG_FLAGS 0x1810b91c ++#define AXI_PCIE_S0_IDM_IDM_INTERRUPT_STATUS 0x1810ba00 ++#define CMICD_S0_IDM_IDM_ERROR_LOG_CONTROL 0x1810d900 ++#define CMICD_S0_IDM_IDM_ERROR_LOG_COMPLETE 0x1810d904 ++#define CMICD_S0_IDM_IDM_ERROR_LOG_STATUS 0x1810d908 ++#define CMICD_S0_IDM_IDM_ERROR_LOG_ADDR_LSB 0x1810d90c ++#define CMICD_S0_IDM_IDM_ERROR_LOG_ID 0x1810d914 ++#define CMICD_S0_IDM_IDM_ERROR_LOG_FLAGS 0x1810d91c ++#define CMICD_S0_IDM_IDM_INTERRUPT_STATUS 0x1810da00 ++#define APBY_S0_IDM_IDM_ERROR_LOG_CONTROL 0x1810f900 ++#define APBY_S0_IDM_IDM_ERROR_LOG_COMPLETE 0x1810f904 ++#define APBY_S0_IDM_IDM_ERROR_LOG_STATUS 0x1810f908 ++#define APBY_S0_IDM_IDM_ERROR_LOG_ADDR_LSB 0x1810f90c ++#define APBY_S0_IDM_IDM_ERROR_LOG_ID 0x1810f914 ++#define APBY_S0_IDM_IDM_ERROR_LOG_FLAGS 0x1810f91c ++#define APBY_S0_IDM_IDM_INTERRUPT_STATUS 0x1810fa00 ++#define ROM_S0_IDM_ERROR_LOG_CONTROL 0x1811a900 ++#define ROM_S0_IDM_ERROR_LOG_COMPLETE 0x1811a904 ++#define ROM_S0_IDM_ERROR_LOG_STATUS 0x1811a908 ++#define ROM_S0_IDM_ERROR_LOG_ADDR_LSB 0x1811a90c ++#define ROM_S0_IDM_ERROR_LOG_ID 0x1811a914 ++#define ROM_S0_IDM_ERROR_LOG_FLAGS 0x1811a91c ++#define ROM_S0_IDM_INTERRUPT_STATUS 0x1811aa00 ++#define NAND_IDM_IDM_ERROR_LOG_CONTROL 0x1811b900 ++#define NAND_IDM_IDM_ERROR_LOG_COMPLETE 0x1811b904 ++#define NAND_IDM_IDM_ERROR_LOG_STATUS 0x1811b908 ++#define NAND_IDM_IDM_ERROR_LOG_ADDR_LSB 0x1811b90c ++#define NAND_IDM_IDM_ERROR_LOG_ID 0x1811b914 ++#define NAND_IDM_IDM_ERROR_LOG_FLAGS 0x1811b91c ++#define NAND_IDM_IDM_INTERRUPT_STATUS 0x1811ba00 ++#define QSPI_IDM_IDM_ERROR_LOG_CONTROL 0x1811c900 ++#define QSPI_IDM_IDM_ERROR_LOG_COMPLETE 0x1811c904 ++#define QSPI_IDM_IDM_ERROR_LOG_STATUS 0x1811c908 ++#define QSPI_IDM_IDM_ERROR_LOG_ADDR_LSB 0x1811c90c ++#define QSPI_IDM_IDM_ERROR_LOG_ID 0x1811c914 ++#define QSPI_IDM_IDM_ERROR_LOG_FLAGS 0x1811c91c ++#define QSPI_IDM_IDM_INTERRUPT_STATUS 0x1811ca00 ++#define A9JTAG_S0_IDM_IDM_ERROR_LOG_CONTROL 0x1811d900 ++#define A9JTAG_S0_IDM_IDM_ERROR_LOG_COMPLETE 0x1811d904 ++#define A9JTAG_S0_IDM_IDM_ERROR_LOG_STATUS 0x1811d908 ++#define A9JTAG_S0_IDM_IDM_ERROR_LOG_ADDR_LSB 0x1811d90c ++#define A9JTAG_S0_IDM_IDM_ERROR_LOG_ID 0x1811d914 ++#define A9JTAG_S0_IDM_IDM_ERROR_LOG_FLAGS 0x1811d91c ++#define A9JTAG_S0_IDM_IDM_INTERRUPT_STATUS 0x1811da00 ++#define SRAM_S0_IDM_ERROR_LOG_CONTROL 0x18120900 ++#define SRAM_S0_IDM_ERROR_LOG_COMPLETE 0x18120904 ++#define SRAM_S0_IDM_ERROR_LOG_STATUS 0x18120908 ++#define SRAM_S0_IDM_ERROR_LOG_ADDR_LSB 0x1812090c ++#define SRAM_S0_IDM_ERROR_LOG_ID 0x18120914 ++#define SRAM_S0_IDM_ERROR_LOG_FLAGS 0x1812091c ++#define SRAM_S0_IDM_INTERRUPT_STATUS 0x18120a00 ++#define APBZ_S0_IDM_IDM_ERROR_LOG_CONTROL 0x18121900 ++#define APBZ_S0_IDM_IDM_ERROR_LOG_COMPLETE 0x18121904 ++#define APBZ_S0_IDM_IDM_ERROR_LOG_STATUS 0x18121908 ++#define APBZ_S0_IDM_IDM_ERROR_LOG_ADDR_LSB 0x1812190c ++#define APBZ_S0_IDM_IDM_ERROR_LOG_ID 0x18121914 ++#define APBZ_S0_IDM_IDM_ERROR_LOG_FLAGS 0x1812191c ++#define APBZ_S0_IDM_IDM_INTERRUPT_STATUS 0x18121a00 ++#define AXIIC_DS_3_IDM_ERROR_LOG_CONTROL 0x18123900 ++#define AXIIC_DS_3_IDM_ERROR_LOG_COMPLETE 0x18123904 ++#define AXIIC_DS_3_IDM_ERROR_LOG_STATUS 0x18123908 ++#define AXIIC_DS_3_IDM_ERROR_LOG_ADDR_LSB 0x1812390c ++#define AXIIC_DS_3_IDM_ERROR_LOG_ID 0x18123914 ++#define AXIIC_DS_3_IDM_ERROR_LOG_FLAGS 0x1812391c ++#define AXIIC_DS_3_IDM_INTERRUPT_STATUS 0x18123a00 ++#define APBW_IDM_IDM_ERROR_LOG_CONTROL 0x18131900 ++#define APBW_IDM_IDM_ERROR_LOG_COMPLETE 0x18131904 ++#define APBW_IDM_IDM_ERROR_LOG_STATUS 0x18131908 ++#define APBW_IDM_IDM_ERROR_LOG_ADDR_LSB 0x1813190c ++#define APBW_IDM_IDM_ERROR_LOG_ID 0x18131914 ++#define APBW_IDM_IDM_ERROR_LOG_FLAGS 0x1813191c ++#define APBW_IDM_IDM_INTERRUPT_STATUS 0x18131a00 ++#define APBX_IDM_IDM_ERROR_LOG_CONTROL 0x18132900 ++#define APBX_IDM_IDM_ERROR_LOG_COMPLETE 0x18132904 ++#define APBX_IDM_IDM_ERROR_LOG_STATUS 0x18132908 ++#define APBX_IDM_IDM_ERROR_LOG_ADDR_LSB 0x1813290c ++#define APBX_IDM_IDM_ERROR_LOG_ID 0x18132914 ++#define APBX_IDM_IDM_ERROR_LOG_FLAGS 0x1813291c ++#define APBX_IDM_IDM_INTERRUPT_STATUS 0x18132a00 ++#define AXIIC_DS_0_IDM_ERROR_LOG_CONTROL 0x18141900 ++#define AXIIC_DS_0_IDM_ERROR_LOG_COMPLETE 0x18141904 ++#define AXIIC_DS_0_IDM_ERROR_LOG_STATUS 0x18141908 ++#define AXIIC_DS_0_IDM_ERROR_LOG_ADDR_LSB 0x1814190c ++#define AXIIC_DS_0_IDM_ERROR_LOG_ID 0x18141914 ++#define AXIIC_DS_0_IDM_ERROR_LOG_FLAGS 0x1814191c ++#define AXIIC_DS_0_IDM_INTERRUPT_STATUS 0x18141a00 ++ ++#elif defined(CONFIG_MACH_HR3) ++ ++#define IHOST_S0_IDM_ERROR_LOG_CONTROL 0x18107900 ++#define IHOST_S0_IDM_ERROR_LOG_COMPLETE 0x18107904 ++#define IHOST_S0_IDM_ERROR_LOG_STATUS 0x18107908 ++#define IHOST_S0_IDM_ERROR_LOG_ADDR_LSB 0x1810790c ++#define IHOST_S0_IDM_ERROR_LOG_ID 0x18107914 ++#define IHOST_S0_IDM_ERROR_LOG_FLAGS 0x1810791c ++#define IHOST_S0_IDM_INTERRUPT_STATUS 0x18107a00 ++#define IHOST_S1_IDM_ERROR_LOG_CONTROL 0x18106900 ++#define IHOST_S1_IDM_ERROR_LOG_COMPLETE 0x18106904 ++#define IHOST_S1_IDM_ERROR_LOG_STATUS 0x18106908 ++#define IHOST_S1_IDM_ERROR_LOG_ADDR_LSB 0x1810690c ++#define IHOST_S1_IDM_ERROR_LOG_ID 0x18106914 ++#define IHOST_S1_IDM_ERROR_LOG_FLAGS 0x1810691c ++#define IHOST_S1_IDM_INTERRUPT_STATUS 0x18106a00 ++#define AXI_PCIE_S0_IDM_IDM_ERROR_LOG_CONTROL 0x18108900 ++#define AXI_PCIE_S0_IDM_IDM_ERROR_LOG_COMPLETE 0x18108904 ++#define AXI_PCIE_S0_IDM_IDM_ERROR_LOG_STATUS 0x18108908 ++#define AXI_PCIE_S0_IDM_IDM_ERROR_LOG_ADDR_LSB 0x1810890c ++#define AXI_PCIE_S0_IDM_IDM_ERROR_LOG_ID 0x18108914 ++#define AXI_PCIE_S0_IDM_IDM_ERROR_LOG_FLAGS 0x1810891c ++#define AXI_PCIE_S0_IDM_IDM_INTERRUPT_STATUS 0x18108a00 ++#define CMICD_S0_IDM_IDM_ERROR_LOG_CONTROL 0x1810a900 ++#define CMICD_S0_IDM_IDM_ERROR_LOG_COMPLETE 0x1810a904 ++#define CMICD_S0_IDM_IDM_ERROR_LOG_STATUS 0x1810a908 ++#define CMICD_S0_IDM_IDM_ERROR_LOG_ADDR_LSB 0x1810a90c ++#define CMICD_S0_IDM_IDM_ERROR_LOG_ID 0x1810a914 ++#define CMICD_S0_IDM_IDM_ERROR_LOG_FLAGS 0x1810a91c ++#define CMICD_S0_IDM_IDM_INTERRUPT_STATUS 0x1810aa00 ++#define A9JTAG_S0_IDM_IDM_ERROR_LOG_CONTROL 0x18119900 ++#define A9JTAG_S0_IDM_IDM_ERROR_LOG_COMPLETE 0x18119904 ++#define A9JTAG_S0_IDM_IDM_ERROR_LOG_STATUS 0x18119908 ++#define A9JTAG_S0_IDM_IDM_ERROR_LOG_ADDR_LSB 0x1811990c ++#define A9JTAG_S0_IDM_IDM_ERROR_LOG_ID 0x18119914 ++#define A9JTAG_S0_IDM_IDM_ERROR_LOG_FLAGS 0x1811991c ++#define A9JTAG_S0_IDM_IDM_INTERRUPT_STATUS 0x18119a00 ++#define APBX_IDM_IDM_ERROR_LOG_CONTROL 0x18130900 ++#define APBX_IDM_IDM_ERROR_LOG_COMPLETE 0x18130904 ++#define APBX_IDM_IDM_ERROR_LOG_STATUS 0x18130908 ++#define APBX_IDM_IDM_ERROR_LOG_ADDR_LSB 0x1813090c ++#define APBX_IDM_IDM_ERROR_LOG_ID 0x18130914 ++#define APBX_IDM_IDM_ERROR_LOG_FLAGS 0x1813091c ++#define APBX_IDM_IDM_INTERRUPT_STATUS 0x18130a00 ++#define DDR_S1_IDM_ERROR_LOG_CONTROL 0x18104900 ++#define DDR_S1_IDM_ERROR_LOG_COMPLETE 0x18104904 ++#define DDR_S1_IDM_ERROR_LOG_STATUS 0x18104908 ++#define DDR_S1_IDM_ERROR_LOG_ADDR_LSB 0x1810490c ++#define DDR_S1_IDM_ERROR_LOG_ID 0x18104914 ++#define DDR_S1_IDM_ERROR_LOG_FLAGS 0x1810491c ++#define DDR_S1_IDM_INTERRUPT_STATUS 0x18104a00 ++#define DDR_S2_IDM_ERROR_LOG_CONTROL 0x18105900 ++#define DDR_S2_IDM_ERROR_LOG_COMPLETE 0x18105904 ++#define DDR_S2_IDM_ERROR_LOG_STATUS 0x18105908 ++#define DDR_S2_IDM_ERROR_LOG_ADDR_LSB 0x1810590c ++#define DDR_S2_IDM_ERROR_LOG_ID 0x18105914 ++#define DDR_S2_IDM_ERROR_LOG_FLAGS 0x1810591c ++#define DDR_S2_IDM_INTERRUPT_STATUS 0x18105a00 ++#define ROM_S0_IDM_ERROR_LOG_CONTROL 0x1811a900 ++#define ROM_S0_IDM_ERROR_LOG_COMPLETE 0x1811a904 ++#define ROM_S0_IDM_ERROR_LOG_STATUS 0x1811a908 ++#define ROM_S0_IDM_ERROR_LOG_ADDR_LSB 0x1811a90c ++#define ROM_S0_IDM_ERROR_LOG_ID 0x1811a914 ++#define ROM_S0_IDM_ERROR_LOG_FLAGS 0x1811a91c ++#define ROM_S0_IDM_INTERRUPT_STATUS 0x1811aa00 ++#define NAND_IDM_IDM_ERROR_LOG_CONTROL 0x1811d900 ++#define NAND_IDM_IDM_ERROR_LOG_COMPLETE 0x1811d904 ++#define NAND_IDM_IDM_ERROR_LOG_STATUS 0x1811d908 ++#define NAND_IDM_IDM_ERROR_LOG_ADDR_LSB 0x1811d90c ++#define NAND_IDM_IDM_ERROR_LOG_ID 0x1811d914 ++#define NAND_IDM_IDM_ERROR_LOG_FLAGS 0x1811d91c ++#define NAND_IDM_IDM_INTERRUPT_STATUS 0x1811da00 ++#define QSPI_IDM_IDM_ERROR_LOG_CONTROL 0x1811f900 ++#define QSPI_IDM_IDM_ERROR_LOG_COMPLETE 0x1811f904 ++#define QSPI_IDM_IDM_ERROR_LOG_STATUS 0x1811f908 ++#define QSPI_IDM_IDM_ERROR_LOG_ADDR_LSB 0x1811f90c ++#define QSPI_IDM_IDM_ERROR_LOG_ID 0x1811f914 ++#define QSPI_IDM_IDM_ERROR_LOG_FLAGS 0x1811f91c ++#define QSPI_IDM_IDM_INTERRUPT_STATUS 0x1811fa00 ++#define AXIIC_DS_0_IDM_ERROR_LOG_CONTROL 0x18120900 ++#define AXIIC_DS_0_IDM_ERROR_LOG_COMPLETE 0x18120904 ++#define AXIIC_DS_0_IDM_ERROR_LOG_STATUS 0x18120908 ++#define AXIIC_DS_0_IDM_ERROR_LOG_ADDR_LSB 0x1812090c ++#define AXIIC_DS_0_IDM_ERROR_LOG_ID 0x18120914 ++#define AXIIC_DS_0_IDM_ERROR_LOG_FLAGS 0x1812091c ++#define AXIIC_DS_0_IDM_INTERRUPT_STATUS 0x18120a00 ++ ++#elif defined(CONFIG_MACH_GH) || defined(CONFIG_MACH_SB2) || \ ++ defined(CONFIG_MACH_GH2) ++ ++#define IHOST_S0_IDM_ERROR_LOG_CONTROL 0x18107900 ++#define IHOST_S0_IDM_ERROR_LOG_COMPLETE 0x18107904 ++#define IHOST_S0_IDM_ERROR_LOG_STATUS 0x18107908 ++#define IHOST_S0_IDM_ERROR_LOG_ADDR_LSB 0x1810790c ++#define IHOST_S0_IDM_ERROR_LOG_ID 0x18107914 ++#define IHOST_S0_IDM_ERROR_LOG_FLAGS 0x1810791c ++#define IHOST_S0_IDM_INTERRUPT_STATUS 0x18107a00 ++#define IHOST_S1_IDM_ERROR_LOG_CONTROL 0x18106900 ++#define IHOST_S1_IDM_ERROR_LOG_COMPLETE 0x18106904 ++#define IHOST_S1_IDM_ERROR_LOG_STATUS 0x18106908 ++#define IHOST_S1_IDM_ERROR_LOG_ADDR_LSB 0x1810690c ++#define IHOST_S1_IDM_ERROR_LOG_ID 0x18106914 ++#define IHOST_S1_IDM_ERROR_LOG_FLAGS 0x1810691c ++#define IHOST_S1_IDM_INTERRUPT_STATUS 0x18106a00 ++#define AXI_PCIE_S0_IDM_IDM_ERROR_LOG_CONTROL 0x18108900 ++#define AXI_PCIE_S0_IDM_IDM_ERROR_LOG_COMPLETE 0x18108904 ++#define AXI_PCIE_S0_IDM_IDM_ERROR_LOG_STATUS 0x18108908 ++#define AXI_PCIE_S0_IDM_IDM_ERROR_LOG_ADDR_LSB 0x1810890c ++#define AXI_PCIE_S0_IDM_IDM_ERROR_LOG_ID 0x18108914 ++#define AXI_PCIE_S0_IDM_IDM_ERROR_LOG_FLAGS 0x1810891c ++#define AXI_PCIE_S0_IDM_IDM_INTERRUPT_STATUS 0x18108a00 ++#define CMICD_S0_IDM_IDM_ERROR_LOG_CONTROL 0x1810a900 ++#define CMICD_S0_IDM_IDM_ERROR_LOG_COMPLETE 0x1810a904 ++#define CMICD_S0_IDM_IDM_ERROR_LOG_STATUS 0x1810a908 ++#define CMICD_S0_IDM_IDM_ERROR_LOG_ADDR_LSB 0x1810a90c ++#define CMICD_S0_IDM_IDM_ERROR_LOG_ID 0x1810a914 ++#define CMICD_S0_IDM_IDM_ERROR_LOG_FLAGS 0x1810a91c ++#define CMICD_S0_IDM_IDM_INTERRUPT_STATUS 0x1810aa00 ++#define A9JTAG_S0_IDM_IDM_ERROR_LOG_CONTROL 0x18119900 ++#define A9JTAG_S0_IDM_IDM_ERROR_LOG_COMPLETE 0x18119904 ++#define A9JTAG_S0_IDM_IDM_ERROR_LOG_STATUS 0x18119908 ++#define A9JTAG_S0_IDM_IDM_ERROR_LOG_ADDR_LSB 0x1811990c ++#define A9JTAG_S0_IDM_IDM_ERROR_LOG_ID 0x18119914 ++#define A9JTAG_S0_IDM_IDM_ERROR_LOG_FLAGS 0x1811991c ++#define A9JTAG_S0_IDM_IDM_INTERRUPT_STATUS 0x18119a00 ++#define SRAM_S0_IDM_ERROR_LOG_CONTROL 0x1811b900 ++#define SRAM_S0_IDM_ERROR_LOG_COMPLETE 0x1811b904 ++#define SRAM_S0_IDM_ERROR_LOG_STATUS 0x1811b908 ++#define SRAM_S0_IDM_ERROR_LOG_ADDR_LSB 0x1811b90c ++#define SRAM_S0_IDM_ERROR_LOG_ID 0x1811b914 ++#define SRAM_S0_IDM_ERROR_LOG_FLAGS 0x1811b91c ++#define SRAM_S0_IDM_INTERRUPT_STATUS 0x1811ba00 ++#define APBX_IDM_IDM_ERROR_LOG_CONTROL 0x18130900 ++#define APBX_IDM_IDM_ERROR_LOG_COMPLETE 0x18130904 ++#define APBX_IDM_IDM_ERROR_LOG_STATUS 0x18130908 ++#define APBX_IDM_IDM_ERROR_LOG_ADDR_LSB 0x1813090c ++#define APBX_IDM_IDM_ERROR_LOG_ID 0x18130914 ++#define APBX_IDM_IDM_ERROR_LOG_FLAGS 0x1813091c ++#define APBX_IDM_IDM_INTERRUPT_STATUS 0x18130a00 ++#define DDR_S1_IDM_ERROR_LOG_CONTROL 0xf8102900 ++#define DDR_S1_IDM_ERROR_LOG_COMPLETE 0xf8102904 ++#define DDR_S1_IDM_ERROR_LOG_STATUS 0xf8102908 ++#define DDR_S1_IDM_ERROR_LOG_ADDR_LSB 0xf810290c ++#define DDR_S1_IDM_ERROR_LOG_ID 0xf8102914 ++#define DDR_S1_IDM_ERROR_LOG_FLAGS 0xf810291c ++#define DDR_S1_IDM_INTERRUPT_STATUS 0xf8102a00 ++#define DDR_S2_IDM_ERROR_LOG_CONTROL 0xf8103900 ++#define DDR_S2_IDM_ERROR_LOG_COMPLETE 0xf8103904 ++#define DDR_S2_IDM_ERROR_LOG_STATUS 0xf8103908 ++#define DDR_S2_IDM_ERROR_LOG_ADDR_LSB 0xf810390c ++#define DDR_S2_IDM_ERROR_LOG_ID 0xf8103914 ++#define DDR_S2_IDM_ERROR_LOG_FLAGS 0xf810391c ++#define DDR_S2_IDM_INTERRUPT_STATUS 0xf8103a00 ++#define ROM_S0_IDM_ERROR_LOG_CONTROL 0xf8104900 ++#define ROM_S0_IDM_ERROR_LOG_COMPLETE 0xf8104904 ++#define ROM_S0_IDM_ERROR_LOG_STATUS 0xf8104908 ++#define ROM_S0_IDM_ERROR_LOG_ADDR_LSB 0xf810490c ++#define ROM_S0_IDM_ERROR_LOG_ID 0xf8104914 ++#define ROM_S0_IDM_ERROR_LOG_FLAGS 0xf810491c ++#define ROM_S0_IDM_INTERRUPT_STATUS 0xf8104a00 ++#define NAND_IDM_IDM_ERROR_LOG_CONTROL 0xf8105900 ++#define NAND_IDM_IDM_ERROR_LOG_COMPLETE 0xf8105904 ++#define NAND_IDM_IDM_ERROR_LOG_STATUS 0xf8105908 ++#define NAND_IDM_IDM_ERROR_LOG_ADDR_LSB 0xf810590c ++#define NAND_IDM_IDM_ERROR_LOG_ID 0xf8105914 ++#define NAND_IDM_IDM_ERROR_LOG_FLAGS 0xf810591c ++#define NAND_IDM_IDM_INTERRUPT_STATUS 0xf8105a00 ++#define QSPI_IDM_IDM_ERROR_LOG_CONTROL 0xf8106900 ++#define QSPI_IDM_IDM_ERROR_LOG_COMPLETE 0xf8106904 ++#define QSPI_IDM_IDM_ERROR_LOG_STATUS 0xf8106908 ++#define QSPI_IDM_IDM_ERROR_LOG_ADDR_LSB 0xf810690c ++#define QSPI_IDM_IDM_ERROR_LOG_ID 0xf8106914 ++#define QSPI_IDM_IDM_ERROR_LOG_FLAGS 0xf810691c ++#define QSPI_IDM_IDM_INTERRUPT_STATUS 0xf8106a00 ++#define AXIIC_DS_0_IDM_ERROR_LOG_CONTROL 0x18120900 ++#define AXIIC_DS_0_IDM_ERROR_LOG_COMPLETE 0x18120904 ++#define AXIIC_DS_0_IDM_ERROR_LOG_STATUS 0x18120908 ++#define AXIIC_DS_0_IDM_ERROR_LOG_ADDR_LSB 0x1812090c ++#define AXIIC_DS_0_IDM_ERROR_LOG_ID 0x18120914 ++#define AXIIC_DS_0_IDM_ERROR_LOG_FLAGS 0x1812091c ++#define AXIIC_DS_0_IDM_INTERRUPT_STATUS 0x18120a00 ++#if !defined(CONFIG_MACH_GH2) ++#define AXIIC_DS_3_IDM_ERROR_LOG_CONTROL 0x1811e900 ++#define AXIIC_DS_3_IDM_ERROR_LOG_COMPLETE 0x1811e904 ++#define AXIIC_DS_3_IDM_ERROR_LOG_STATUS 0x1811e908 ++#define AXIIC_DS_3_IDM_ERROR_LOG_ADDR_LSB 0x1811e90c ++#define AXIIC_DS_3_IDM_ERROR_LOG_ID 0x1811e914 ++#define AXIIC_DS_3_IDM_ERROR_LOG_FLAGS 0x1811e91c ++#define AXIIC_DS_3_IDM_INTERRUPT_STATUS 0x1811ea00 ++#endif /* !defined(CONFIG_MACH_GH2) */ ++ ++#endif /* defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2) || \ ++ defined(CONFIG_MACH_HR2) */ ++ ++#if defined(CONFIG_MACH_GH) || defined(CONFIG_MACH_SB2) || \ ++ defined(CONFIG_MACH_GH2) ++#define IDM_IO_PHYS_TO_VIRT(x) (void __iomem *) \ ++ (((x & 0xFFF00000) == (int)get_iproc_idm_base_phys(0))? \ ++ get_iproc_idm_base(0) + x - get_iproc_idm_base_phys(0) : \ ++ get_iproc_idm_base(1) + x - get_iproc_idm_base_phys(1)) ++#else ++#define IDM_IO_PHYS_TO_VIRT(x) (void __iomem *) \ ++ (get_iproc_idm_base(0) + x - get_iproc_idm_base_phys(0)) ++#endif ++ ++ ++#define IHOST_S1_IDM_ERROR_LOG_CONTROL_VA IDM_IO_PHYS_TO_VIRT(IHOST_S1_IDM_ERROR_LOG_CONTROL) ++#define IHOST_S1_IDM_ERROR_LOG_COMPLETE_VA IDM_IO_PHYS_TO_VIRT(IHOST_S1_IDM_ERROR_LOG_COMPLETE) ++#define IHOST_S1_IDM_ERROR_LOG_STATUS_VA IDM_IO_PHYS_TO_VIRT(IHOST_S1_IDM_ERROR_LOG_STATUS) ++#define IHOST_S1_IDM_ERROR_LOG_ADDR_LSB_VA IDM_IO_PHYS_TO_VIRT(IHOST_S1_IDM_ERROR_LOG_ADDR_LSB) ++#define IHOST_S1_IDM_ERROR_LOG_ID_VA IDM_IO_PHYS_TO_VIRT(IHOST_S1_IDM_ERROR_LOG_ID) ++#define IHOST_S1_IDM_ERROR_LOG_FLAGS_VA IDM_IO_PHYS_TO_VIRT(IHOST_S1_IDM_ERROR_LOG_FLAGS) ++ ++#define IHOST_S0_IDM_ERROR_LOG_CONTROL_VA IDM_IO_PHYS_TO_VIRT(IHOST_S0_IDM_ERROR_LOG_CONTROL) ++#define IHOST_S0_IDM_ERROR_LOG_COMPLETE_VA IDM_IO_PHYS_TO_VIRT(IHOST_S0_IDM_ERROR_LOG_COMPLETE) ++#define IHOST_S0_IDM_ERROR_LOG_STATUS_VA IDM_IO_PHYS_TO_VIRT(IHOST_S0_IDM_ERROR_LOG_STATUS) ++#define IHOST_S0_IDM_ERROR_LOG_ADDR_LSB_VA IDM_IO_PHYS_TO_VIRT(IHOST_S0_IDM_ERROR_LOG_ADDR_LSB) ++#define IHOST_S0_IDM_ERROR_LOG_ID_VA IDM_IO_PHYS_TO_VIRT(IHOST_S0_IDM_ERROR_LOG_ID) ++#define IHOST_S0_IDM_ERROR_LOG_FLAGS_VA IDM_IO_PHYS_TO_VIRT(IHOST_S0_IDM_ERROR_LOG_FLAGS) ++ ++#define DDR_S1_IDM_ERROR_LOG_CONTROL_VA IDM_IO_PHYS_TO_VIRT(DDR_S1_IDM_ERROR_LOG_CONTROL) ++#define DDR_S1_IDM_ERROR_LOG_COMPLETE_VA IDM_IO_PHYS_TO_VIRT(DDR_S1_IDM_ERROR_LOG_COMPLETE) ++#define DDR_S1_IDM_ERROR_LOG_STATUS_VA IDM_IO_PHYS_TO_VIRT(DDR_S1_IDM_ERROR_LOG_STATUS) ++#define DDR_S1_IDM_ERROR_LOG_ADDR_LSB_VA IDM_IO_PHYS_TO_VIRT(DDR_S1_IDM_ERROR_LOG_ADDR_LSB) ++#define DDR_S1_IDM_ERROR_LOG_ID_VA IDM_IO_PHYS_TO_VIRT(DDR_S1_IDM_ERROR_LOG_ID) ++#define DDR_S1_IDM_ERROR_LOG_FLAGS_VA IDM_IO_PHYS_TO_VIRT(DDR_S1_IDM_ERROR_LOG_FLAGS) ++ ++#define DDR_S2_IDM_ERROR_LOG_CONTROL_VA IDM_IO_PHYS_TO_VIRT(DDR_S2_IDM_ERROR_LOG_CONTROL) ++#define DDR_S2_IDM_ERROR_LOG_COMPLETE_VA IDM_IO_PHYS_TO_VIRT(DDR_S2_IDM_ERROR_LOG_COMPLETE) ++#define DDR_S2_IDM_ERROR_LOG_STATUS_VA IDM_IO_PHYS_TO_VIRT(DDR_S2_IDM_ERROR_LOG_STATUS) ++#define DDR_S2_IDM_ERROR_LOG_ADDR_LSB_VA IDM_IO_PHYS_TO_VIRT(DDR_S2_IDM_ERROR_LOG_ADDR_LSB) ++#define DDR_S2_IDM_ERROR_LOG_ID_VA IDM_IO_PHYS_TO_VIRT(DDR_S2_IDM_ERROR_LOG_ID) ++#define DDR_S2_IDM_ERROR_LOG_FLAGS_VA IDM_IO_PHYS_TO_VIRT(DDR_S2_IDM_ERROR_LOG_FLAGS) ++ ++#define AXI_PCIE_S0_IDM_IDM_ERROR_LOG_CONTROL_VA IDM_IO_PHYS_TO_VIRT(AXI_PCIE_S0_IDM_IDM_ERROR_LOG_CONTROL) ++#define AXI_PCIE_S0_IDM_IDM_ERROR_LOG_COMPLETE_VA IDM_IO_PHYS_TO_VIRT(AXI_PCIE_S0_IDM_IDM_ERROR_LOG_COMPLETE) ++#define AXI_PCIE_S0_IDM_IDM_ERROR_LOG_STATUS_VA IDM_IO_PHYS_TO_VIRT(AXI_PCIE_S0_IDM_IDM_ERROR_LOG_STATUS) ++#define AXI_PCIE_S0_IDM_IDM_ERROR_LOG_ADDR_LSB_VA IDM_IO_PHYS_TO_VIRT(AXI_PCIE_S0_IDM_IDM_ERROR_LOG_ADDR_LSB) ++#define AXI_PCIE_S0_IDM_IDM_ERROR_LOG_ID_VA IDM_IO_PHYS_TO_VIRT(AXI_PCIE_S0_IDM_IDM_ERROR_LOG_ID) ++#define AXI_PCIE_S0_IDM_IDM_ERROR_LOG_FLAGS_VA IDM_IO_PHYS_TO_VIRT(AXI_PCIE_S0_IDM_IDM_ERROR_LOG_FLAGS) ++ ++#define CMICD_S0_IDM_IDM_ERROR_LOG_CONTROL_VA IDM_IO_PHYS_TO_VIRT(CMICD_S0_IDM_IDM_ERROR_LOG_CONTROL) ++#define CMICD_S0_IDM_IDM_ERROR_LOG_COMPLETE_VA IDM_IO_PHYS_TO_VIRT(CMICD_S0_IDM_IDM_ERROR_LOG_COMPLETE) ++#define CMICD_S0_IDM_IDM_ERROR_LOG_STATUS_VA IDM_IO_PHYS_TO_VIRT(CMICD_S0_IDM_IDM_ERROR_LOG_STATUS) ++#define CMICD_S0_IDM_IDM_ERROR_LOG_ADDR_LSB_VA IDM_IO_PHYS_TO_VIRT(CMICD_S0_IDM_IDM_ERROR_LOG_ADDR_LSB) ++#define CMICD_S0_IDM_IDM_ERROR_LOG_ID_VA IDM_IO_PHYS_TO_VIRT(CMICD_S0_IDM_IDM_ERROR_LOG_ID) ++#define CMICD_S0_IDM_IDM_ERROR_LOG_FLAGS_VA IDM_IO_PHYS_TO_VIRT(CMICD_S0_IDM_IDM_ERROR_LOG_FLAGS) ++ ++#define APBY_S0_IDM_IDM_ERROR_LOG_CONTROL_VA IDM_IO_PHYS_TO_VIRT(APBY_S0_IDM_IDM_ERROR_LOG_CONTROL) ++#define APBY_S0_IDM_IDM_ERROR_LOG_COMPLETE_VA IDM_IO_PHYS_TO_VIRT(APBY_S0_IDM_IDM_ERROR_LOG_COMPLETE) ++#define APBY_S0_IDM_IDM_ERROR_LOG_STATUS_VA IDM_IO_PHYS_TO_VIRT(APBY_S0_IDM_IDM_ERROR_LOG_STATUS) ++#define APBY_S0_IDM_IDM_ERROR_LOG_ADDR_LSB_VA IDM_IO_PHYS_TO_VIRT(APBY_S0_IDM_IDM_ERROR_LOG_ADDR_LSB) ++#define APBY_S0_IDM_IDM_ERROR_LOG_ID_VA IDM_IO_PHYS_TO_VIRT(APBY_S0_IDM_IDM_ERROR_LOG_ID) ++#define APBY_S0_IDM_IDM_ERROR_LOG_FLAGS_VA IDM_IO_PHYS_TO_VIRT(APBY_S0_IDM_IDM_ERROR_LOG_FLAGS) ++ ++#define ROM_S0_IDM_ERROR_LOG_CONTROL_VA IDM_IO_PHYS_TO_VIRT(ROM_S0_IDM_ERROR_LOG_CONTROL) ++#define ROM_S0_IDM_ERROR_LOG_COMPLETE_VA IDM_IO_PHYS_TO_VIRT(ROM_S0_IDM_ERROR_LOG_COMPLETE) ++#define ROM_S0_IDM_ERROR_LOG_STATUS_VA IDM_IO_PHYS_TO_VIRT(ROM_S0_IDM_ERROR_LOG_STATUS) ++#define ROM_S0_IDM_ERROR_LOG_ADDR_LSB_VA IDM_IO_PHYS_TO_VIRT(ROM_S0_IDM_ERROR_LOG_ADDR_LSB) ++#define ROM_S0_IDM_ERROR_LOG_ID_VA IDM_IO_PHYS_TO_VIRT(ROM_S0_IDM_ERROR_LOG_ID) ++#define ROM_S0_IDM_ERROR_LOG_FLAGS_VA IDM_IO_PHYS_TO_VIRT(ROM_S0_IDM_ERROR_LOG_FLAGS) ++ ++#define NAND_IDM_IDM_ERROR_LOG_CONTROL_VA IDM_IO_PHYS_TO_VIRT(NAND_IDM_IDM_ERROR_LOG_CONTROL) ++#define NAND_IDM_IDM_ERROR_LOG_COMPLETE_VA IDM_IO_PHYS_TO_VIRT(NAND_IDM_IDM_ERROR_LOG_COMPLETE) ++#define NAND_IDM_IDM_ERROR_LOG_STATUS_VA IDM_IO_PHYS_TO_VIRT(NAND_IDM_IDM_ERROR_LOG_STATUS) ++#define NAND_IDM_IDM_ERROR_LOG_ADDR_LSB_VA IDM_IO_PHYS_TO_VIRT(NAND_IDM_IDM_ERROR_LOG_ADDR_LSB) ++#define NAND_IDM_IDM_ERROR_LOG_ID_VA IDM_IO_PHYS_TO_VIRT(NAND_IDM_IDM_ERROR_LOG_ID) ++#define NAND_IDM_IDM_ERROR_LOG_FLAGS_VA IDM_IO_PHYS_TO_VIRT(NAND_IDM_IDM_ERROR_LOG_FLAGS) ++ ++#define QSPI_IDM_IDM_ERROR_LOG_CONTROL_VA IDM_IO_PHYS_TO_VIRT(QSPI_IDM_IDM_ERROR_LOG_CONTROL) ++#define QSPI_IDM_IDM_ERROR_LOG_COMPLETE_VA IDM_IO_PHYS_TO_VIRT(QSPI_IDM_IDM_ERROR_LOG_COMPLETE) ++#define QSPI_IDM_IDM_ERROR_LOG_STATUS_VA IDM_IO_PHYS_TO_VIRT(QSPI_IDM_IDM_ERROR_LOG_STATUS) ++#define QSPI_IDM_IDM_ERROR_LOG_ADDR_LSB_VA IDM_IO_PHYS_TO_VIRT(QSPI_IDM_IDM_ERROR_LOG_ADDR_LSB) ++#define QSPI_IDM_IDM_ERROR_LOG_ID_VA IDM_IO_PHYS_TO_VIRT(QSPI_IDM_IDM_ERROR_LOG_ID) ++#define QSPI_IDM_IDM_ERROR_LOG_FLAGS_VA IDM_IO_PHYS_TO_VIRT(QSPI_IDM_IDM_ERROR_LOG_FLAGS) ++ ++#define A9JTAG_S0_IDM_IDM_ERROR_LOG_CONTROL_VA IDM_IO_PHYS_TO_VIRT(A9JTAG_S0_IDM_IDM_ERROR_LOG_CONTROL) ++#define A9JTAG_S0_IDM_IDM_ERROR_LOG_COMPLETE_VA IDM_IO_PHYS_TO_VIRT(A9JTAG_S0_IDM_IDM_ERROR_LOG_COMPLETE) ++#define A9JTAG_S0_IDM_IDM_ERROR_LOG_STATUS_VA IDM_IO_PHYS_TO_VIRT(A9JTAG_S0_IDM_IDM_ERROR_LOG_STATUS) ++#define A9JTAG_S0_IDM_IDM_ERROR_LOG_ADDR_LSB_VA IDM_IO_PHYS_TO_VIRT(A9JTAG_S0_IDM_IDM_ERROR_LOG_ADDR_LSB) ++#define A9JTAG_S0_IDM_IDM_ERROR_LOG_ID_VA IDM_IO_PHYS_TO_VIRT(A9JTAG_S0_IDM_IDM_ERROR_LOG_ID) ++#define A9JTAG_S0_IDM_IDM_ERROR_LOG_FLAGS_VA IDM_IO_PHYS_TO_VIRT(A9JTAG_S0_IDM_IDM_ERROR_LOG_FLAGS) ++ ++#define SRAM_S0_IDM_ERROR_LOG_CONTROL_VA IDM_IO_PHYS_TO_VIRT(SRAM_S0_IDM_ERROR_LOG_CONTROL) ++#define SRAM_S0_IDM_ERROR_LOG_COMPLETE_VA IDM_IO_PHYS_TO_VIRT(SRAM_S0_IDM_ERROR_LOG_COMPLETE) ++#define SRAM_S0_IDM_ERROR_LOG_STATUS_VA IDM_IO_PHYS_TO_VIRT(SRAM_S0_IDM_ERROR_LOG_STATUS) ++#define SRAM_S0_IDM_ERROR_LOG_ADDR_LSB_VA IDM_IO_PHYS_TO_VIRT(SRAM_S0_IDM_ERROR_LOG_ADDR_LSB) ++#define SRAM_S0_IDM_ERROR_LOG_ID_VA IDM_IO_PHYS_TO_VIRT(SRAM_S0_IDM_ERROR_LOG_ID) ++#define SRAM_S0_IDM_ERROR_LOG_FLAGS_VA IDM_IO_PHYS_TO_VIRT(SRAM_S0_IDM_ERROR_LOG_FLAGS) ++ ++#define APBZ_S0_IDM_IDM_ERROR_LOG_CONTROL_VA IDM_IO_PHYS_TO_VIRT(APBZ_S0_IDM_IDM_ERROR_LOG_CONTROL) ++#define APBZ_S0_IDM_IDM_ERROR_LOG_COMPLETE_VA IDM_IO_PHYS_TO_VIRT(APBZ_S0_IDM_IDM_ERROR_LOG_COMPLETE) ++#define APBZ_S0_IDM_IDM_ERROR_LOG_STATUS_VA IDM_IO_PHYS_TO_VIRT(APBZ_S0_IDM_IDM_ERROR_LOG_STATUS) ++#define APBZ_S0_IDM_IDM_ERROR_LOG_ADDR_LSB_VA IDM_IO_PHYS_TO_VIRT(APBZ_S0_IDM_IDM_ERROR_LOG_ADDR_LSB) ++#define APBZ_S0_IDM_IDM_ERROR_LOG_ID_VA IDM_IO_PHYS_TO_VIRT(APBZ_S0_IDM_IDM_ERROR_LOG_ID) ++#define APBZ_S0_IDM_IDM_ERROR_LOG_FLAGS_VA IDM_IO_PHYS_TO_VIRT(APBZ_S0_IDM_IDM_ERROR_LOG_FLAGS) ++ ++#define AXIIC_DS_3_IDM_ERROR_LOG_CONTROL_VA IDM_IO_PHYS_TO_VIRT(AXIIC_DS_3_IDM_ERROR_LOG_CONTROL) ++#define AXIIC_DS_3_IDM_ERROR_LOG_COMPLETE_VA IDM_IO_PHYS_TO_VIRT(AXIIC_DS_3_IDM_ERROR_LOG_COMPLETE) ++#define AXIIC_DS_3_IDM_ERROR_LOG_STATUS_VA IDM_IO_PHYS_TO_VIRT(AXIIC_DS_3_IDM_ERROR_LOG_STATUS) ++#define AXIIC_DS_3_IDM_ERROR_LOG_ADDR_LSB_VA IDM_IO_PHYS_TO_VIRT(AXIIC_DS_3_IDM_ERROR_LOG_ADDR_LSB) ++#define AXIIC_DS_3_IDM_ERROR_LOG_ID_VA IDM_IO_PHYS_TO_VIRT(AXIIC_DS_3_IDM_ERROR_LOG_ID) ++#define AXIIC_DS_3_IDM_ERROR_LOG_FLAGS_VA IDM_IO_PHYS_TO_VIRT(AXIIC_DS_3_IDM_ERROR_LOG_FLAGS) ++ ++#define APBW_IDM_IDM_ERROR_LOG_CONTROL_VA IDM_IO_PHYS_TO_VIRT(APBW_IDM_IDM_ERROR_LOG_CONTROL) ++#define APBW_IDM_IDM_ERROR_LOG_COMPLETE_VA IDM_IO_PHYS_TO_VIRT(APBW_IDM_IDM_ERROR_LOG_COMPLETE) ++#define APBW_IDM_IDM_ERROR_LOG_STATUS_VA IDM_IO_PHYS_TO_VIRT(APBW_IDM_IDM_ERROR_LOG_STATUS) ++#define APBW_IDM_IDM_ERROR_LOG_ADDR_LSB_VA IDM_IO_PHYS_TO_VIRT(APBW_IDM_IDM_ERROR_LOG_ADDR_LSB) ++#define APBW_IDM_IDM_ERROR_LOG_ID_VA IDM_IO_PHYS_TO_VIRT(APBW_IDM_IDM_ERROR_LOG_ID) ++#define APBW_IDM_IDM_ERROR_LOG_FLAGS_VA IDM_IO_PHYS_TO_VIRT(APBW_IDM_IDM_ERROR_LOG_FLAGS) ++ ++#define APBX_IDM_IDM_ERROR_LOG_CONTROL_VA IDM_IO_PHYS_TO_VIRT(APBX_IDM_IDM_ERROR_LOG_CONTROL) ++#define APBX_IDM_IDM_ERROR_LOG_COMPLETE_VA IDM_IO_PHYS_TO_VIRT(APBX_IDM_IDM_ERROR_LOG_COMPLETE) ++#define APBX_IDM_IDM_ERROR_LOG_STATUS_VA IDM_IO_PHYS_TO_VIRT(APBX_IDM_IDM_ERROR_LOG_STATUS) ++#define APBX_IDM_IDM_ERROR_LOG_ADDR_LSB_VA IDM_IO_PHYS_TO_VIRT(APBX_IDM_IDM_ERROR_LOG_ADDR_LSB) ++#define APBX_IDM_IDM_ERROR_LOG_ID_VA IDM_IO_PHYS_TO_VIRT(APBX_IDM_IDM_ERROR_LOG_ID) ++#define APBX_IDM_IDM_ERROR_LOG_FLAGS_VA IDM_IO_PHYS_TO_VIRT(APBX_IDM_IDM_ERROR_LOG_FLAGS) ++ ++#define AXIIC_DS_0_IDM_ERROR_LOG_CONTROL_VA IDM_IO_PHYS_TO_VIRT(AXIIC_DS_0_IDM_ERROR_LOG_CONTROL) ++#define AXIIC_DS_0_IDM_ERROR_LOG_COMPLETE_VA IDM_IO_PHYS_TO_VIRT(AXIIC_DS_0_IDM_ERROR_LOG_COMPLETE) ++#define AXIIC_DS_0_IDM_ERROR_LOG_STATUS_VA IDM_IO_PHYS_TO_VIRT(AXIIC_DS_0_IDM_ERROR_LOG_STATUS) ++#define AXIIC_DS_0_IDM_ERROR_LOG_ADDR_LSB_VA IDM_IO_PHYS_TO_VIRT(AXIIC_DS_0_IDM_ERROR_LOG_ADDR_LSB) ++#define AXIIC_DS_0_IDM_ERROR_LOG_ID_VA IDM_IO_PHYS_TO_VIRT(AXIIC_DS_0_IDM_ERROR_LOG_ID) ++#define AXIIC_DS_0_IDM_ERROR_LOG_FLAGS_VA IDM_IO_PHYS_TO_VIRT(AXIIC_DS_0_IDM_ERROR_LOG_FLAGS) ++ ++#define IDM_ERROR_LOG_ENABLE 0x33A ++#define IDM_ERROR_LOG_CLEAR 0x3 ++ ++#ifdef CONFIG_MACH_IPROC_P7 ++ ++#if defined(CONFIG_MACH_HR3) || defined(CONFIG_MACH_GH2) ++#define IHOST_S0_IDM_IRQ 52 ++#define DDR_S1_IDM_IRQ 53 ++#define DDR_S2_IDM_IRQ 54 ++#define AXI_PCIE_S0_IDM_IRQ 55 ++#define ROM_S0_IDM_IRQ 56 ++#define NAND_IDM_IRQ 57 ++#define QSPI_IDM_IRQ 58 ++#define PNOR_IDM_IRQ 59 ++#define SRAM_S0_IDM_IRQ 60 ++#define A9JTAG_S0_IDM_IRQ 61 ++#define APX_IDM_IRQ 64 ++#define CMICD_S0_IDM_IRQ 67 ++#define AXIIC_DS_0_IDM_IRQ 68 ++#define AXIIC_DS_1_IDM_IRQ 69 ++#define AXIIC_DS_2_IDM_IRQ 70 ++#else ++#define IHOST_S0_IDM_IRQ 52 ++#define DDR_S1_IDM_IRQ 54 ++#define DDR_S2_IDM_IRQ 55 ++#define AXI_PCIE_S0_IDM_IRQ 56 ++#define AXI_PCIE_S1_IDM_IRQ 57 ++#define ROM_S0_IDM_IRQ 58 ++#define NAND_IDM_IRQ 59 ++#define QSPI_IDM_IRQ 60 ++#define SRAM_S0_IDM_IRQ 62 ++#define A9JTAG_S0_IDM_IRQ 64 ++#define APX_IDM_IRQ 68 ++#define CMICD_S0_IDM_IRQ 71 ++#define AXIIC_DS_0_IDM_IRQ 78 ++#define AXIIC_DS_1_IDM_IRQ 79 ++#define AXIIC_DS_2_IDM_IRQ 80 ++#define AXIIC_DS_3_IDM_IRQ 81 ++#endif /* defined(CONFIG_MACH_HR3) || defined(CONFIG_MACH_GH2) */ ++ ++#else /* CONFIG_MACH_IPROC_P7 */ ++ ++#define IHOST_S1_IDM_IRQ 62 ++#define IHOST_S0_IDM_IRQ 63 ++#define DDR_S1_IDM_IRQ 64 ++#define DDR_S2_IDM_IRQ 65 ++#define AXI_PCIE_S0_IDM_IRQ 66 ++#define AXI_PCIE_S1_IDM_IRQ 67 ++#define CMICD_S0_IDM_IRQ 68 ++#define ROM_S0_IDM_IRQ 69 ++#define NAND_IDM_IRQ 70 ++#define QSPI_IDM_IRQ 71 ++#define A9JTAG_S0_IDM_IRQ 73 ++#define SRAM_S0_IDM_IRQ 74 ++#define APW_IDM_IRQ 75 ++#define APX_IDM_IRQ 76 ++#define APBY_S0_IDM_IRQ 77 ++#define APBZ_S0_IDM_IRQ 78 ++#define AXIIC_DS_0_IDM_IRQ 79 ++#define AXIIC_DS_1_IDM_IRQ 80 ++#define AXIIC_DS_2_IDM_IRQ 81 ++#define AXIIC_DS_3_IDM_IRQ 82 ++#define AXIIC_DS_4_IDM_IRQ 83 ++ ++#endif /* CONFIG_MACH_IPROC_P7 */ ++ ++static irqreturn_t idm_timeout_handler(int val, void *ptr) ++{ ++ u32 errval; ++ ++ printk(KERN_DEBUG "%s: %d, %d entry\n", __func__, __LINE__, val); ++ errval = readl(IHOST_S1_IDM_ERROR_LOG_STATUS_VA); ++ if (errval > 0) ++ { ++ printk(KERN_DEBUG "%s: %d, %d\n", __func__, __LINE__, errval); ++ errval = readl(IHOST_S1_IDM_ERROR_LOG_ADDR_LSB_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ errval = readl(IHOST_S1_IDM_ERROR_LOG_ID_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ errval = readl(IHOST_S1_IDM_ERROR_LOG_FLAGS_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ writel(IDM_ERROR_LOG_CLEAR, IHOST_S1_IDM_ERROR_LOG_COMPLETE_VA); ++ errval = readl(IHOST_S1_IDM_ERROR_LOG_STATUS_VA); ++ printk(KERN_DEBUG "%s: %d, %d\n", __func__, __LINE__, errval); ++ } ++ errval = readl(IHOST_S0_IDM_ERROR_LOG_STATUS_VA); ++ if (errval > 0) ++ { ++ printk(KERN_DEBUG "%s: %d, %d\n", __func__, __LINE__, errval); ++ errval = readl(IHOST_S0_IDM_ERROR_LOG_ADDR_LSB_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ errval = readl(IHOST_S0_IDM_ERROR_LOG_ID_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ errval = readl(IHOST_S0_IDM_ERROR_LOG_FLAGS_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ writel(IDM_ERROR_LOG_CLEAR, IHOST_S0_IDM_ERROR_LOG_COMPLETE_VA); ++ errval = readl(IHOST_S0_IDM_ERROR_LOG_STATUS_VA); ++ printk(KERN_DEBUG "%s: %d, %d\n", __func__, __LINE__, errval); ++ } ++ errval = readl(DDR_S1_IDM_ERROR_LOG_STATUS_VA); ++ if (errval > 0) ++ { ++ printk(KERN_DEBUG "%s: %d, %d\n", __func__, __LINE__, errval); ++ errval = readl(DDR_S1_IDM_ERROR_LOG_ADDR_LSB_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ errval = readl(DDR_S1_IDM_ERROR_LOG_ID_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ errval = readl(DDR_S1_IDM_ERROR_LOG_FLAGS_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ writel(IDM_ERROR_LOG_CLEAR, DDR_S1_IDM_ERROR_LOG_COMPLETE_VA); ++ errval = readl(DDR_S1_IDM_ERROR_LOG_STATUS_VA); ++ printk(KERN_DEBUG "%s: %d, %d\n", __func__, __LINE__, errval); ++ } ++ errval = readl(DDR_S2_IDM_ERROR_LOG_STATUS_VA); ++ if (errval > 0) ++ { ++ printk(KERN_DEBUG "%s: %d, %d\n", __func__, __LINE__, errval); ++ errval = readl(DDR_S2_IDM_ERROR_LOG_ADDR_LSB_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ errval = readl(DDR_S2_IDM_ERROR_LOG_ID_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ errval = readl(DDR_S2_IDM_ERROR_LOG_FLAGS_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ writel(IDM_ERROR_LOG_CLEAR, DDR_S2_IDM_ERROR_LOG_COMPLETE_VA); ++ errval = readl(DDR_S2_IDM_ERROR_LOG_STATUS_VA); ++ printk(KERN_DEBUG "%s: %d, %d\n", __func__, __LINE__, errval); ++ } ++ errval = readl(AXI_PCIE_S0_IDM_IDM_ERROR_LOG_STATUS_VA); ++ if (errval > 0) ++ { ++ printk(KERN_DEBUG "%s: %d, %d\n", __func__, __LINE__, errval); ++ errval = readl(AXI_PCIE_S0_IDM_IDM_ERROR_LOG_ADDR_LSB_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ errval = readl(AXI_PCIE_S0_IDM_IDM_ERROR_LOG_ID_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ errval = readl(AXI_PCIE_S0_IDM_IDM_ERROR_LOG_FLAGS_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ writel(IDM_ERROR_LOG_CLEAR, AXI_PCIE_S0_IDM_IDM_ERROR_LOG_COMPLETE_VA); ++ errval = readl(AXI_PCIE_S0_IDM_IDM_ERROR_LOG_STATUS_VA); ++ printk(KERN_DEBUG "%s: %d, %d\n", __func__, __LINE__, errval); ++ } ++ errval = readl(CMICD_S0_IDM_IDM_ERROR_LOG_STATUS_VA); ++ if (errval > 0) ++ { ++ printk(KERN_DEBUG "%s: %d, %d\n", __func__, __LINE__, errval); ++ errval = readl(CMICD_S0_IDM_IDM_ERROR_LOG_ADDR_LSB_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ errval = readl(CMICD_S0_IDM_IDM_ERROR_LOG_ID_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ errval = readl(CMICD_S0_IDM_IDM_ERROR_LOG_FLAGS_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ writel(IDM_ERROR_LOG_CLEAR, CMICD_S0_IDM_IDM_ERROR_LOG_COMPLETE_VA); ++ errval = readl(CMICD_S0_IDM_IDM_ERROR_LOG_STATUS_VA); ++ printk(KERN_DEBUG "%s: %d, %d\n", __func__, __LINE__, errval); ++ } ++#if !defined(CONFIG_MACH_IPROC_P7) ++ errval = readl(APBY_S0_IDM_IDM_ERROR_LOG_STATUS_VA); ++ if (errval > 0) ++ { ++ printk(KERN_DEBUG "%s: %d, %d\n", __func__, __LINE__, errval); ++ errval = readl(APBY_S0_IDM_IDM_ERROR_LOG_ADDR_LSB_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ errval = readl(APBY_S0_IDM_IDM_ERROR_LOG_ID_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ errval = readl(APBY_S0_IDM_IDM_ERROR_LOG_FLAGS_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ writel(IDM_ERROR_LOG_CLEAR, APBY_S0_IDM_IDM_ERROR_LOG_COMPLETE_VA); ++ errval = readl(APBY_S0_IDM_IDM_ERROR_LOG_STATUS_VA); ++ printk(KERN_DEBUG "%s: %d, %d\n", __func__, __LINE__, errval); ++ } ++#endif ++ errval = readl(ROM_S0_IDM_ERROR_LOG_STATUS_VA); ++ if (errval > 0) ++ { ++ printk(KERN_DEBUG "%s: %d, %d\n", __func__, __LINE__, errval); ++ errval = readl(ROM_S0_IDM_ERROR_LOG_ADDR_LSB_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ errval = readl(ROM_S0_IDM_ERROR_LOG_ID_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ errval = readl(ROM_S0_IDM_ERROR_LOG_FLAGS_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ writel(IDM_ERROR_LOG_CLEAR, ROM_S0_IDM_ERROR_LOG_COMPLETE_VA); ++ errval = readl(ROM_S0_IDM_ERROR_LOG_STATUS_VA); ++ printk(KERN_DEBUG "%s: %d, %d\n", __func__, __LINE__, errval); ++ } ++ errval = readl(NAND_IDM_IDM_ERROR_LOG_STATUS_VA); ++ if (errval > 0) ++ { ++ printk(KERN_DEBUG "%s: %d, %d\n", __func__, __LINE__, errval); ++ errval = readl(NAND_IDM_IDM_ERROR_LOG_ADDR_LSB_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ errval = readl(NAND_IDM_IDM_ERROR_LOG_ID_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ errval = readl(NAND_IDM_IDM_ERROR_LOG_FLAGS_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ writel(IDM_ERROR_LOG_CLEAR, NAND_IDM_IDM_ERROR_LOG_COMPLETE_VA); ++ errval = readl(NAND_IDM_IDM_ERROR_LOG_STATUS_VA); ++ printk(KERN_DEBUG "%s: %d, %d\n", __func__, __LINE__, errval); ++ } ++ errval = readl(QSPI_IDM_IDM_ERROR_LOG_STATUS_VA); ++ if (errval > 0) ++ { ++ printk(KERN_DEBUG "%s: %d, %d\n", __func__, __LINE__, errval); ++ errval = readl(QSPI_IDM_IDM_ERROR_LOG_ADDR_LSB_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ errval = readl(QSPI_IDM_IDM_ERROR_LOG_ID_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ errval = readl(QSPI_IDM_IDM_ERROR_LOG_FLAGS_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ writel(IDM_ERROR_LOG_CLEAR, QSPI_IDM_IDM_ERROR_LOG_COMPLETE_VA); ++ errval = readl(QSPI_IDM_IDM_ERROR_LOG_STATUS_VA); ++ printk(KERN_DEBUG "%s: %d, %d\n", __func__, __LINE__, errval); ++ } ++ errval = readl(A9JTAG_S0_IDM_IDM_ERROR_LOG_STATUS_VA); ++ if (errval > 0) ++ { ++ printk(KERN_DEBUG "%s: %d, %d\n", __func__, __LINE__, errval); ++ errval = readl(A9JTAG_S0_IDM_IDM_ERROR_LOG_ADDR_LSB_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ errval = readl(A9JTAG_S0_IDM_IDM_ERROR_LOG_ID_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ errval = readl(A9JTAG_S0_IDM_IDM_ERROR_LOG_FLAGS_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ writel(IDM_ERROR_LOG_CLEAR, A9JTAG_S0_IDM_IDM_ERROR_LOG_COMPLETE_VA); ++ errval = readl(A9JTAG_S0_IDM_IDM_ERROR_LOG_STATUS_VA); ++ printk(KERN_DEBUG "%s: %d, %d\n", __func__, __LINE__, errval); ++ } ++#if !defined(CONFIG_MACH_IPROC_P7) ++ errval = readl(SRAM_S0_IDM_ERROR_LOG_STATUS_VA); ++ if (errval > 0) ++ { ++ printk(KERN_DEBUG "%s: %d, %d\n", __func__, __LINE__, errval); ++ errval = readl(SRAM_S0_IDM_ERROR_LOG_ADDR_LSB_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ errval = readl(SRAM_S0_IDM_ERROR_LOG_ID_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ errval = readl(SRAM_S0_IDM_ERROR_LOG_FLAGS_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ writel(IDM_ERROR_LOG_CLEAR, SRAM_S0_IDM_ERROR_LOG_COMPLETE_VA); ++ errval = readl(SRAM_S0_IDM_ERROR_LOG_STATUS_VA); ++ printk(KERN_DEBUG "%s: %d, %d\n", __func__, __LINE__, errval); ++ } ++ errval = readl(APBZ_S0_IDM_IDM_ERROR_LOG_STATUS_VA); ++ if (errval > 0) ++ { ++ printk(KERN_DEBUG "%s: %d, %d\n", __func__, __LINE__, errval); ++ errval = readl(APBZ_S0_IDM_IDM_ERROR_LOG_ADDR_LSB_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ errval = readl(APBZ_S0_IDM_IDM_ERROR_LOG_ID_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ errval = readl(APBZ_S0_IDM_IDM_ERROR_LOG_FLAGS_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ writel(IDM_ERROR_LOG_CLEAR, APBZ_S0_IDM_IDM_ERROR_LOG_COMPLETE_VA); ++ errval = readl(APBZ_S0_IDM_IDM_ERROR_LOG_STATUS_VA); ++ printk(KERN_DEBUG "%s: %d, %d\n", __func__, __LINE__, errval); ++ } ++#endif ++ ++#if !defined(CONFIG_MACH_HR3) && !defined(CONFIG_MACH_GH2) ++ errval = readl(AXIIC_DS_3_IDM_ERROR_LOG_STATUS_VA); ++ if (errval > 0) ++ { ++ printk(KERN_DEBUG "%s: %d, %d\n", __func__, __LINE__, errval); ++ errval = readl(AXIIC_DS_3_IDM_ERROR_LOG_ADDR_LSB_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ errval = readl(AXIIC_DS_3_IDM_ERROR_LOG_ID_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ errval = readl(AXIIC_DS_3_IDM_ERROR_LOG_FLAGS_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ writel(IDM_ERROR_LOG_CLEAR, AXIIC_DS_3_IDM_ERROR_LOG_COMPLETE_VA); ++ errval = readl(AXIIC_DS_3_IDM_ERROR_LOG_STATUS_VA); ++ printk(KERN_DEBUG "%s: %d, %d\n", __func__, __LINE__, errval); ++ } ++#endif /* !defined(CONFIG_MACH_HR3) && !defined(CONFIG_MACH_GH2)*/ ++ ++#if !defined(CONFIG_MACH_IPROC_P7) ++ errval = readl(APBW_IDM_IDM_ERROR_LOG_STATUS_VA); ++ if (errval > 0) ++ { ++ printk(KERN_DEBUG "%s: %d, %d\n", __func__, __LINE__, errval); ++ errval = readl(APBW_IDM_IDM_ERROR_LOG_ADDR_LSB_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ errval = readl(APBW_IDM_IDM_ERROR_LOG_ID_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ errval = readl(APBW_IDM_IDM_ERROR_LOG_FLAGS_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ writel(IDM_ERROR_LOG_CLEAR, APBW_IDM_IDM_ERROR_LOG_COMPLETE_VA); ++ errval = readl(APBW_IDM_IDM_ERROR_LOG_STATUS_VA); ++ printk(KERN_DEBUG "%s: %d, %d\n", __func__, __LINE__, errval); ++ } ++#endif ++ errval = readl(APBX_IDM_IDM_ERROR_LOG_STATUS_VA); ++ if (errval > 0) ++ { ++ printk(KERN_DEBUG "%s: %d, %d\n", __func__, __LINE__, errval); ++ errval = readl(APBX_IDM_IDM_ERROR_LOG_ADDR_LSB_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ errval = readl(APBX_IDM_IDM_ERROR_LOG_ID_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ errval = readl(APBX_IDM_IDM_ERROR_LOG_FLAGS_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ writel(IDM_ERROR_LOG_CLEAR, APBX_IDM_IDM_ERROR_LOG_COMPLETE_VA); ++ errval = readl(APBX_IDM_IDM_ERROR_LOG_STATUS_VA); ++ printk(KERN_DEBUG "%s: %d, %d\n", __func__, __LINE__, errval); ++ } ++ errval = readl(AXIIC_DS_0_IDM_ERROR_LOG_STATUS_VA); ++ if (errval > 0) ++ { ++ printk(KERN_DEBUG "%s: %d, %d\n", __func__, __LINE__, errval); ++ errval = readl(AXIIC_DS_0_IDM_ERROR_LOG_ADDR_LSB_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ errval = readl(AXIIC_DS_0_IDM_ERROR_LOG_ID_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ errval = readl(AXIIC_DS_0_IDM_ERROR_LOG_FLAGS_VA); ++ printk(KERN_DEBUG "%s: %d, %08x\n", __func__, __LINE__, errval); ++ writel(IDM_ERROR_LOG_CLEAR, AXIIC_DS_0_IDM_ERROR_LOG_COMPLETE_VA); ++ errval = readl(AXIIC_DS_0_IDM_ERROR_LOG_STATUS_VA); ++ printk(KERN_DEBUG "%s: %d, %d\n", __func__, __LINE__, errval); ++ } ++ ++ return IRQ_HANDLED; ++} ++ ++void init_request_idm_timeout(void) ++{ ++ /* clear all pending idm interrupts */ ++ idm_timeout_handler(0, NULL); ++ ++ /* enable idm error log for all slaves */ ++ ++ writel(IDM_ERROR_LOG_ENABLE, IHOST_S1_IDM_ERROR_LOG_CONTROL_VA); ++ writel(IDM_ERROR_LOG_ENABLE, IHOST_S0_IDM_ERROR_LOG_COMPLETE_VA); ++ writel(IDM_ERROR_LOG_ENABLE, DDR_S1_IDM_ERROR_LOG_COMPLETE_VA); ++ writel(IDM_ERROR_LOG_ENABLE, DDR_S2_IDM_ERROR_LOG_COMPLETE_VA); ++ writel(IDM_ERROR_LOG_ENABLE, AXI_PCIE_S0_IDM_IDM_ERROR_LOG_CONTROL_VA); ++ writel(IDM_ERROR_LOG_ENABLE, CMICD_S0_IDM_IDM_ERROR_LOG_CONTROL_VA); ++ ++#if !defined(CONFIG_MACH_HR3) && !defined(CONFIG_MACH_GH2) ++ writel(IDM_ERROR_LOG_ENABLE, SRAM_S0_IDM_ERROR_LOG_CONTROL_VA); ++#endif /* !defined(CONFIG_MACH_HR3) && !defined(CONFIG_MACH_GH2)*/ ++ ++#ifndef CONFIG_MACH_IPROC_P7 ++ writel(IDM_ERROR_LOG_ENABLE, APBY_S0_IDM_IDM_ERROR_LOG_CONTROL_VA); ++ writel(IDM_ERROR_LOG_ENABLE, APBZ_S0_IDM_IDM_ERROR_LOG_CONTROL_VA); ++ writel(IDM_ERROR_LOG_ENABLE, APBW_IDM_IDM_ERROR_LOG_CONTROL_VA); ++#endif ++ ++ writel(IDM_ERROR_LOG_ENABLE, ROM_S0_IDM_ERROR_LOG_CONTROL_VA); ++ writel(IDM_ERROR_LOG_ENABLE, NAND_IDM_IDM_ERROR_LOG_CONTROL_VA); ++ writel(IDM_ERROR_LOG_ENABLE, QSPI_IDM_IDM_ERROR_LOG_CONTROL_VA); ++ writel(IDM_ERROR_LOG_ENABLE, A9JTAG_S0_IDM_IDM_ERROR_LOG_CONTROL_VA); ++#if !defined(CONFIG_MACH_HR3) && !defined(CONFIG_MACH_GH2) ++ writel(IDM_ERROR_LOG_ENABLE, AXIIC_DS_3_IDM_ERROR_LOG_COMPLETE_VA); ++#endif /* !defined(CONFIG_MACH_HR3) && !defined(CONFIG_MACH_GH2) */ ++ writel(IDM_ERROR_LOG_ENABLE, APBX_IDM_IDM_ERROR_LOG_CONTROL_VA); ++ writel(IDM_ERROR_LOG_ENABLE, AXIIC_DS_0_IDM_ERROR_LOG_CONTROL_VA); ++} ++ ++int request_idm_timeout_interrupts(struct device_node *np) ++{ ++ int i, ret, irq; ++ unsigned int irqs_total; ++ ++ init_request_idm_timeout(); ++ ++ irqs_total = of_irq_count(np); ++ if (!irqs_total) ++ return -EINVAL; ++ ++ for (i=0; i ++#include ++#include ++#include ++ ++#define IPROC_DMU_PCU_COMPATIBLE "brcm,iproc-dmu-pcu" ++#define IPROC_WRAP_CTRL_COMPATIBLE "brcm,iproc-wrap-ctrl" ++#define IPROC_IDM_COMPATIBLE "brcm,iproc-idm" ++#define MAX_IDM_NUM 2 ++ ++static void __iomem *iproc_dmu_pcu_base=NULL; ++static void __iomem *iproc_wrap_ctrl_base=NULL; ++static void __iomem *iproc_idm_base[MAX_IDM_NUM]={NULL}; ++static void __iomem *iproc_idm_base_phys[MAX_IDM_NUM]={NULL}; ++ ++ ++extern void request_idm_timeout_interrupts(struct device_node *); ++ ++void inline __iomem *get_iproc_dmu_pcu_base(void) { ++ return iproc_dmu_pcu_base; ++} ++ ++void inline __iomem *get_iproc_wrap_ctrl_base(void) { ++ return iproc_wrap_ctrl_base; ++} ++ ++void inline __iomem *get_iproc_idm_base(int index) { ++ return iproc_idm_base[index]; ++} ++ ++void inline __iomem *get_iproc_idm_base_phys(int index) { ++ return iproc_idm_base_phys[index]; ++} ++ ++int xgs_iproc_wrap_idm_dmu_base_reg_setup(void) ++{ ++ struct device_node *np; ++ ++ /* Get DMU/PCU base addr */ ++ np = of_find_compatible_node(NULL, NULL, IPROC_DMU_PCU_COMPATIBLE); ++ if (!np) { ++ pr_err("%s: No dmu/pcu node found\n", __func__); ++ return -ENODEV ; ++ } ++ iproc_dmu_pcu_base = of_iomap(np, 0); ++ if (!iproc_dmu_pcu_base) ++ return -ENOMEM; ++ ++ /* Get WRAP CTRL base addr */ ++ np = of_find_compatible_node(NULL, NULL, IPROC_WRAP_CTRL_COMPATIBLE); ++ if (!np) { ++ pr_err("%s: No wrap ctrl node found\n", __func__); ++ return -ENODEV; ++ } ++ iproc_wrap_ctrl_base = of_iomap(np, 0); ++ if (!iproc_wrap_ctrl_base) ++ return -ENOMEM; ++ ++ /* Get IDM base addr */ ++ np = of_find_compatible_node(NULL, NULL, IPROC_IDM_COMPATIBLE); ++ if (!np) { ++ pr_err("%s: No IDM node found\n", __func__); ++ return -ENODEV; ++ } ++ iproc_idm_base[0] = of_iomap(np, 0); ++ if (!iproc_idm_base[0]) ++ return -ENOMEM; ++ ++ /* Second IDM base addr required for GH/SB2/GH2 IDM timeout handling. ++ * For other devices, the second IDM base addr is not used. So, it is ++ * fine even the addr is NULL. ++ */ ++ iproc_idm_base[1] = of_iomap(np, 1); ++ ++ return 1; ++} ++ ++void xgs_iproc_idm_timeout_handler_setup(void) ++{ ++ struct device_node *np; ++ struct platform_device *pdev=NULL; ++ struct resource *res_mem; ++ ++ /* To get IDM phys addr */ ++ np = of_find_compatible_node(NULL, NULL, IPROC_IDM_COMPATIBLE); ++ if (!np) { ++ pr_warn("%s: No IDM node found\n", __func__); ++ return; ++ } ++ pdev = of_find_device_by_node(np); ++ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!res_mem) { ++ pr_warn("%s: No resource found\n", __func__); ++ return; ++ } ++ iproc_idm_base_phys[0] = (void __iomem *)res_mem->start; ++ ++ res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 1); ++ /* Only GH/SB2/GH2 has second IDM base addr */ ++ if (res_mem) ++ iproc_idm_base_phys[1] = (void __iomem *)res_mem->start; ++ ++ /* register IDM timeout interrupt handler */ ++ request_idm_timeout_interrupts(np); ++} +\ No newline at end of file +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/spi/Kconfig b/drivers/spi/Kconfig +--- a/drivers/spi/Kconfig 2016-12-16 00:49:34.000000000 +0800 ++++ b/drivers/spi/Kconfig 2017-11-09 17:53:55.798375000 +0800 +@@ -654,6 +654,74 @@ config SPI_NUC900 + help + SPI driver for Nuvoton NUC900 series ARM SoCs + ++config SPI_XGS_IPROC ++ tristate "BRCM XGS iProc QSPI support" ++ depends on ARCH_XGS_IPROC ++ default n ++ help ++ This selects a driver for the iProc QSPI Controller (for serial flash). ++ ++ If unsure, say N. ++ ++if SPI_XGS_IPROC ++ ++choice ++ prompt "Multi I/O SPI support" ++ default IPROC_QSPI_SINGLE_MODE ++ help ++ Number of (multi I/O) data lanes supported by the SPI flash. ++ ++config IPROC_QSPI_SINGLE_MODE ++ bool "Single lane" ++ help ++ Single lane. ++ ++config IPROC_QSPI_DUAL_MODE ++ bool "Dual mode" ++ help ++ Dual mode. ++ ++config IPROC_QSPI_QUAD_MODE ++ bool "Quad mode" ++ help ++ Quad mode. ++ ++endchoice ++ ++config IPROC_QSPI_MULTI_LANE_ADDR ++ bool "Use multi lanes also for address" ++ depends on IPROC_QSPI_DUAL_MODE || IPROC_QSPI_QUAD_MODE ++ default y ++ help ++ Use multi lanes also for address. ++ ++config IPROC_QSPI_READ_CMD ++ hex "Flash opcode for multi I/O read" ++ depends on IPROC_QSPI_DUAL_MODE || IPROC_QSPI_QUAD_MODE ++ range 0x00 0xff ++ default 0xbb if IPROC_QSPI_DUAL_MODE ++ default 0xeb ++ help ++ Flash opcode to send to flash for multip I/O read. ++ ++config IPROC_QSPI_READ_DUMMY_CYCLES ++ int "Dummy cycles for multi I/O read operation" ++ depends on IPROC_QSPI_DUAL_MODE || IPROC_QSPI_QUAD_MODE ++ range 0 255 ++ default 8 if IPROC_QSPI_DUAL_MODE ++ default 10 ++ help ++ Dummy cycles for flash read operation ++ ++config IPROC_QSPI_MAX_HZ ++ int "Maximal SPI clock in HZ" ++ range 1 1000000000 ++ default 62500000 ++ help ++ The maximal SPI clock (in Hz) supported by the flash. ++ ++endif # SPI_XGS_IPROC ++ + # + # Add new SPI master controllers in alphabetical order above this line + # +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/spi/Makefile b/drivers/spi/Makefile +--- a/drivers/spi/Makefile 2016-12-16 00:49:34.000000000 +0800 ++++ b/drivers/spi/Makefile 2017-11-09 17:53:55.799375000 +0800 +@@ -93,3 +93,5 @@ obj-$(CONFIG_SPI_XILINX) += spi-xilinx. + obj-$(CONFIG_SPI_XLP) += spi-xlp.o + obj-$(CONFIG_SPI_XTENSA_XTFPGA) += spi-xtensa-xtfpga.o + obj-$(CONFIG_SPI_ZYNQMP_GQSPI) += spi-zynqmp-gqspi.o ++obj-$(CONFIG_SPI_XGS_IPROC) += spi-xgs-iproc.o ++ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/spi/spi-xgs-iproc.c b/drivers/spi/spi-xgs-iproc.c +--- a/drivers/spi/spi-xgs-iproc.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/spi/spi-xgs-iproc.c 2017-11-09 17:53:55.932382000 +0800 +@@ -0,0 +1,1982 @@ ++/* ++ * Copyright (C) 2013, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ */ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#ifdef CONFIG_OF ++#include ++#include ++#include ++#include ++#endif ++ ++#define DBG(...) /* */ ++ ++/* ++ * Interrupts ++ */ ++ ++#define QSPI_INTR_COUNT (7) ++ ++#define QSPI_INTR_MSPI_HALTED_MASK (0x00000040) ++#define QSPI_INTR_MSPI_DONE_MASK (0x00000020) ++#define QSPI_INTR_BSPI_LR_OVERREAD_MASK (0x00000010) ++#define QSPI_INTR_BSPI_LR_SESSION_DONE_MASK (0x00000008) ++#define QSPI_INTR_BSPI_LR_IMPATIENT_MASK (0x00000004) ++#define QSPI_INTR_BSPI_LR_SESSION_ABORTED_MASK (0x00000002) ++#define QSPI_INTR_BSPI_LR_FULLNESS_REACHED_MASK (0x00000001) ++ ++#define BSPI_LR_INTERRUPTS_DATA \ ++ (QSPI_INTR_BSPI_LR_SESSION_DONE_MASK | \ ++ QSPI_INTR_BSPI_LR_FULLNESS_REACHED_MASK) ++ ++#define BSPI_LR_INTERRUPTS_ERROR \ ++ (QSPI_INTR_BSPI_LR_OVERREAD_MASK | \ ++ QSPI_INTR_BSPI_LR_IMPATIENT_MASK | \ ++ QSPI_INTR_BSPI_LR_SESSION_ABORTED_MASK) ++ ++#define BSPI_LR_INTERRUPTS_ALL \ ++ (BSPI_LR_INTERRUPTS_ERROR | \ ++ BSPI_LR_INTERRUPTS_DATA) ++ ++#define SPBR_MIN 8U ++#define SPBR_MAX 255U ++#define DEFAULT_SPEED_HZ 25000000UL ++#define MSPI_REFCLK_SOURCE "c_clk125" /* To be doubled */ ++#define MSPI_REFCLK_SOURCE_DEVID "iproc_slow" ++ ++/* ++ * Flash opcode and parameters ++ */ ++#define OPCODE_RDID 0x9f ++#define OPCODE_WREN 0x06 ++#define OPCODE_WRDI 0x04 ++#define OPCODE_WRR 0x01 ++#define OPCODE_RCR 0x35 ++#define OPCODE_READ 0x03 ++#define OPCODE_RDSR 0x05 ++#define OPCODE_WRSR 0x01 ++#define OPCODE_RDFSR 0x70 ++#define OPCODE_FAST_READ 0x0B ++#define OPCODE_FAST_READ_4B 0x0C ++#define OPCODE_EN4B 0xB7 ++#define OPCODE_EX4B 0xE9 ++#define OPCODE_BRWR 0x17 ++ ++#define BSPI_WIDTH_1BIT 1 ++#define BSPI_WIDTH_2BIT 2 ++#define BSPI_WIDTH_4BIT 4 ++ ++#define BSPI_ADDRLEN_3BYTES 3 ++#define BSPI_ADDRLEN_4BYTES 4 ++ ++#define BSPI_FLASH_TYPE_SPANSION 0 ++#define BSPI_FLASH_TYPE_MACRONIX 1 ++#define BSPI_FLASH_TYPE_NUMONYX 2 ++#define BSPI_FLASH_TYPE_SST 3 ++#define BSPI_FLASH_TYPE_UNKNOWN -1 ++ ++/* ++ * Register masks/fields/values ++ */ ++#define QSPI_BSPI_RAF_STATUS_FIFO_EMPTY_MASK (0x00000002) ++#define QSPI_BSPI_RAF_CONTROL_START_MASK (0x00000001) ++#define QSPI_BSPI_RAF_CONTROL_CLEAR_MASK (0x00000002) ++#define QSPI_BSPI_BPP_ADDR_BPP_SELECT_MASK (0x00010000) ++#define QSPI_BSPI_BPP_MODE_BPP_MASK (0x00000100) ++#define QSPI_BSPI_FLEX_MODE_ENABLE_MASK (0x00000001) ++ ++ ++/* ++ * Module parameters ++ */ ++ ++/* Mulit I/O for read: 0 - single, 1 - dual, 2 - quad */ ++#ifdef CONFIG_IPROC_QSPI_SINGLE_MODE ++static int io_mode = 0; ++#else /* !CONFIG_IPROC_QSPI_SINGLE_MODE */ ++#ifdef CONFIG_IPROC_QSPI_DUAL_MODE ++static int io_mode = 1; ++#else /* !CONFIG_IPROC_QSPI_DUAL_MODE */ ++static int io_mode = 2; ++#endif /* !CONFIG_IPROC_QSPI_DUAL_MODE */ ++#endif /* !CONFIG_IPROC_QSPI_SINGLE_MODE */ ++module_param(io_mode, int, 0444); ++ ++/* Multi I/O for address (only if not in single mode) */ ++#ifdef CONFIG_IPROC_QSPI_MULTI_LANE_ADDR ++static int addr_multi = 1; ++#else /* !CONFIG_IPROC_QSPI_MULTI_LANE_ADDR */ ++static int addr_multi = 0; ++#endif /* !CONFIG_IPROC_QSPI_MULTI_LANE_ADDR */ ++module_param(addr_multi, int, 0444); ++ ++/* Read opcode (only if not in single mode) */ ++#ifdef CONFIG_IPROC_QSPI_SINGLE_MODE ++static int read_opcode = OPCODE_FAST_READ; ++#else /* !CONFIG_IPROC_QSPI_SINGLE_MODE */ ++static int read_opcode = CONFIG_IPROC_QSPI_READ_CMD; ++#endif /* !CONFIG_IPROC_QSPI_SINGLE_MODE */ ++module_param(read_opcode, int, 0444); ++ ++/* Dummy cycles for read (only if not in single mode) */ ++#ifdef CONFIG_IPROC_QSPI_SINGLE_MODE ++static int dummy_cycles = 8; ++#else /* !CONFIG_IPROC_QSPI_SINGLE_MODE */ ++static int dummy_cycles = CONFIG_IPROC_QSPI_READ_DUMMY_CYCLES; ++#endif /* !CONFIG_IPROC_QSPI_SINGLE_MODE */ ++module_param(dummy_cycles, int, 0444); ++ ++/* Max SPI clock HZ */ ++static int max_hz = 0; ++module_param(max_hz, int, 0444); ++ ++/* Spansion high performance mode */ ++static int bspi_hp; ++module_param(bspi_hp, int, 0444); ++ ++struct brcmspi_platform_data { ++ int flash_cs; ++}; ++ ++struct bcmspi_parms { ++ u32 speed_hz; ++ u8 chip_select; ++ u8 mode; ++ u8 bits_per_word; ++}; ++ ++struct position { ++ struct spi_message *msg; ++ struct spi_transfer *trans; ++ int byte; ++ int mspi_16bit; ++}; ++ ++#define NUM_TXRAM 32 ++#define NUM_RXRAM 32 ++#define NUM_CDRAM 16 ++ ++struct bcm_mspi_hw { ++ u32 spcr0_lsb; /* 0x000 */ ++ u32 spcr0_msb; /* 0x004 */ ++ u32 spcr1_lsb; /* 0x008 */ ++ u32 spcr1_msb; /* 0x00c */ ++ u32 newqp; /* 0x010 */ ++ u32 endqp; /* 0x014 */ ++ u32 spcr2; /* 0x018 */ ++ u32 reserved0; /* 0x01c */ ++ u32 mspi_status; /* 0x020 */ ++ u32 cptqp; /* 0x024 */ ++ u32 reserved1[6]; /* 0x028 */ ++ u32 txram[NUM_TXRAM]; /* 0x040 */ ++ u32 rxram[NUM_RXRAM]; /* 0x0c0 */ ++ u32 cdram[NUM_CDRAM]; /* 0x140 */ ++ u32 write_lock; /* 0x180 */ ++ u32 disable_flush_gen; /* 0x184 */ ++}; ++ ++struct bcm_bspi_hw { ++ u32 revision_id; /* 0x000 */ ++ u32 scratch; /* 0x004 */ ++ u32 mast_n_boot_ctrl; /* 0x008 */ ++ u32 busy_status; /* 0x00c */ ++ u32 intr_status; /* 0x010 */ ++ u32 b0_status; /* 0x014 */ ++ u32 b0_ctrl; /* 0x018 */ ++ u32 b1_status; /* 0x01c */ ++ u32 b1_ctrl; /* 0x020 */ ++ u32 strap_override_ctrl; /* 0x024 */ ++ u32 flex_mode_enable; /* 0x028 */ ++ u32 bits_per_cycle; /* 0x02C */ ++ u32 bits_per_phase; /* 0x030 */ ++ u32 cmd_and_mode_byte; /* 0x034 */ ++ u32 flash_upper_addr_byte; /* 0x038 */ ++ u32 xor_value; /* 0x03C */ ++ u32 xor_enable; /* 0x040 */ ++ u32 pio_mode_enable; /* 0x044 */ ++ u32 pio_iodir; /* 0x048 */ ++ u32 pio_data; /* 0x04C */ ++}; ++ ++struct bcm_bspi_raf { ++ u32 start_address; /* 0x00 */ ++ u32 num_words; /* 0x04 */ ++ u32 ctrl; /* 0x08 */ ++ u32 fullness; /* 0x0C */ ++ u32 watermark; /* 0x10 */ ++ u32 status; /* 0x14 */ ++ u32 read_data; /* 0x18 */ ++ u32 word_cnt; /* 0x1C */ ++ u32 curr_addr; /* 0x20 */ ++}; ++ ++struct bcm_idm_qspi_ctrl { ++ u32 io_ctrl_direct; ++}; ++ ++struct bcm_cru_control { ++ u32 cru_control; ++}; ++ ++struct bcm_flex_mode { ++ int width; ++ int addrlen; ++ int hp; ++}; ++ ++#define STATE_IDLE 0 ++#define STATE_RUNNING 1 ++#define STATE_SHUTDOWN 2 ++ ++struct bcmspi_priv { ++ struct platform_device *pdev; ++ struct spi_master *master; ++ spinlock_t lock; ++ struct bcmspi_parms last_parms; ++ struct position pos; ++ struct list_head msg_queue; ++ int state; ++ int outstanding_bytes; ++ int next_udelay; ++ int cs_change; ++ unsigned int mspi_refclk; ++ unsigned int max_speed_hz; ++ volatile struct bcm_mspi_hw *mspi_hw; ++ int irq; ++ struct tasklet_struct tasklet; ++ int curr_cs; ++ ++ /* BSPI */ ++ volatile struct bcm_bspi_hw *bspi_hw; ++ volatile struct bcm_cru_control *cru_hw; ++ int bspi_enabled; ++ /* all chip selects controlled by BSPI */ ++ int bspi_chip_select; ++ ++ /* LR */ ++ volatile struct bcm_bspi_raf *bspi_hw_raf; ++ struct spi_transfer *cur_xfer; ++ u32 cur_xfer_idx; ++ u32 cur_xfer_len; ++ u32 xfer_status; ++ struct spi_message *cur_msg; ++ u32 actual_length; ++ u32 raf_next_addr; ++ u32 raf_next_len; ++ ++ /* Interrupts */ ++ volatile u32 *qspi_intr; ++ volatile struct bcm_idm_qspi_ctrl *idm_qspi; ++ ++ /* current flex mode settings */ ++ struct bcm_flex_mode flex_mode; ++}; ++ ++static void bcmspi_enable_interrupt(struct bcmspi_priv *priv, u32 mask) ++{ ++ priv->idm_qspi->io_ctrl_direct |= cpu_to_le32(mask << 2); ++} ++ ++static void bcmspi_disable_interrupt(struct bcmspi_priv *priv, u32 mask) ++{ ++ priv->idm_qspi->io_ctrl_direct &= cpu_to_le32(~(mask << 2)); ++} ++ ++static void bcmspi_clear_interrupt(struct bcmspi_priv *priv, u32 mask) ++{ ++ int i; ++ ++ for(i=0; iqspi_intr[i] = cpu_to_le32(1); ++ } ++ } ++} ++ ++static u32 bcmspi_read_interrupt(struct bcmspi_priv *priv) ++{ ++ int i; ++ u32 status = 0; ++ ++ for(i=0; iqspi_intr[i] & cpu_to_le32(1)) { ++ status |= 1UL << i; ++ } ++ } ++ ++ return status; ++} ++ ++static void bcmspi_flush_prefetch_buffers(struct bcmspi_priv *priv) ++{ ++ priv->bspi_hw->b0_ctrl = 0; ++ priv->bspi_hw->b1_ctrl = 0; ++ priv->bspi_hw->b0_ctrl = cpu_to_le32(1); ++ priv->bspi_hw->b1_ctrl = cpu_to_le32(1); ++} ++ ++static int bcmspi_lr_is_fifo_empty(struct bcmspi_priv *priv) ++{ ++ return priv->bspi_hw_raf->status & cpu_to_le32(QSPI_BSPI_RAF_STATUS_FIFO_EMPTY_MASK); ++} ++ ++static inline u32 bcmspi_lr_read_fifo(struct bcmspi_priv *priv) ++{ ++ /* for performance reasons return the raw data, rather than ++ * byte-swapped data. This works because the caller writes ++ * values 32-bits at a time to the destination buffer, giving ++ * an automatic byte-swap on big-endian machines. */ ++ ++ return priv->bspi_hw_raf->read_data; ++} ++ ++static inline void bcmspi_lr_start(struct bcmspi_priv *priv) ++{ ++ priv->bspi_hw_raf->ctrl = cpu_to_le32(QSPI_BSPI_RAF_CONTROL_START_MASK); ++} ++ ++static inline void bcmspi_lr_clear(struct bcmspi_priv *priv) ++{ ++ priv->bspi_hw_raf->ctrl = cpu_to_le32(QSPI_BSPI_RAF_CONTROL_CLEAR_MASK); ++ bcmspi_flush_prefetch_buffers(priv); ++} ++ ++static inline int bcmspi_is_4_byte_mode(struct bcmspi_priv *priv) ++{ ++ return priv->flex_mode.addrlen == BSPI_ADDRLEN_4BYTES; ++} ++ ++static int bcmbspi_flash_type(struct bcmspi_priv *priv); ++ ++static int bcmspi_set_flex_mode(struct bcmspi_priv *priv, ++ int width, int addrlen, int hp) ++{ ++ int bpc = 0, bpp = dummy_cycles, command = read_opcode; ++ int flex_mode = 1, error = 0; ++ ++ switch (width) { ++ case BSPI_WIDTH_1BIT: ++ if (addrlen == BSPI_ADDRLEN_3BYTES) { ++ /* default mode, does not need flex_cmd */ ++ flex_mode = 0; ++ } else { ++ bpp = 8; /* dummy cycles */ ++ //if (bcmbspi_flash_type(priv) == BSPI_FLASH_TYPE_SPANSION) ++ // command = OPCODE_FAST_READ_4B; ++ //else ++ command = OPCODE_FAST_READ; ++ } ++ break; ++ case BSPI_WIDTH_2BIT: ++ bpc = 0x00000001; /* only data is 2-bit */ ++ if (addr_multi) { ++ bpc |= 0x00010000; ++ } ++ if (hp) { ++ bpc |= 0x00010100; /* address and mode are 2-bit too */ ++ bpp |= QSPI_BSPI_BPP_MODE_BPP_MASK; ++ } ++ break; ++ case BSPI_WIDTH_4BIT: ++ bpc = 0x00000002; /* only data is 4-bit */ ++ if (addr_multi) { ++ bpc |= 0x00020000; ++ } ++ if (hp) { ++ bpc |= 0x00020200; /* address and mode are 4-bit too */ ++ bpp |= QSPI_BSPI_BPP_MODE_BPP_MASK; ++ } ++ break; ++ default: ++ error = 1; ++ break; ++ } ++ ++ if (addrlen == BSPI_ADDRLEN_4BYTES) { ++ bpp |= QSPI_BSPI_BPP_ADDR_BPP_SELECT_MASK; ++ } ++ ++ if (!error) { ++ priv->bspi_hw->flex_mode_enable = 0; ++ priv->bspi_hw->bits_per_cycle = cpu_to_le32(bpc); ++ priv->bspi_hw->bits_per_phase = cpu_to_le32(bpp); ++ priv->bspi_hw->cmd_and_mode_byte = cpu_to_le32(command); ++ priv->bspi_hw->flex_mode_enable = flex_mode ? ++ cpu_to_le32(QSPI_BSPI_FLEX_MODE_ENABLE_MASK) ++ : 0; ++ DBG("%s: width=%d addrlen=%d hp=%d\n", ++ __func__, width, addrlen, hp); ++ DBG("%s: fme=%08x bpc=%08x bpp=%08x cmd=%08x\n", __func__, ++ le32_to_cpu(priv->bspi_hw->flex_mode_enable), ++ le32_to_cpu(priv->bspi_hw->bits_per_cycle), ++ le32_to_cpu(priv->bspi_hw->bits_per_phase), ++ le32_to_cpu(priv->bspi_hw->cmd_and_mode_byte)); ++ } ++ ++ return error; ++} ++ ++static void bcmspi_set_mode(struct bcmspi_priv *priv, ++ int width, int addrlen, int hp) ++{ ++ int error = 0; ++ int show_info = 0; ++ ++ if ((width != -1 && width != priv->flex_mode.width) || ++ (hp != -1 && hp != priv->flex_mode.hp)) { ++ /* Don't print things if only for address mode change because it ++ * could be very frequent. */ ++ show_info = 1; ++ } ++ if (width == -1) ++ width = priv->flex_mode.width; ++ if (addrlen == -1) ++ addrlen = priv->flex_mode.addrlen; ++ if (hp == -1) ++ hp = priv->flex_mode.hp; ++ ++ error = bcmspi_set_flex_mode(priv, width, addrlen, hp); ++ ++ if (!error) { ++ priv->flex_mode.width = width; ++ priv->flex_mode.addrlen = addrlen; ++ priv->flex_mode.hp = hp; ++ if (show_info) { ++ dev_info(&priv->pdev->dev, ++ "%d-lane output, %d-byte address%s\n", ++ priv->flex_mode.width, ++ priv->flex_mode.addrlen, ++ priv->flex_mode.hp ? ", high-performance mode" : ""); ++ } ++ } else ++ dev_warn(&priv->pdev->dev, ++ "INVALID COMBINATION: width=%d addrlen=%d hp=%d\n", ++ width, addrlen, hp); ++} ++ ++static void bcmspi_set_chip_select(struct bcmspi_priv *priv, int cs) ++{ ++ if (priv->curr_cs != cs) { ++ DBG("Switching CS%1d => CS%1d\n", ++ priv->curr_cs, cs); ++ ++ /* We don't have multiple chip selects for now */ ++ } ++ priv->curr_cs = cs; ++ ++} ++ ++static inline int is_bspi_chip_select(struct bcmspi_priv *priv, u8 cs) ++{ ++ return priv->bspi_chip_select & (1 << cs); ++} ++ ++static void bcmspi_disable_bspi(struct bcmspi_priv *priv) ++{ ++ int i; ++ ++ if (!priv->bspi_hw || !priv->bspi_enabled) ++ return; ++ if ((priv->bspi_hw->mast_n_boot_ctrl & cpu_to_le32(1)) == 1) { ++ priv->bspi_enabled = 0; ++ return; ++ } ++ ++ DBG("disabling bspi\n"); ++ for (i = 0; i < 1000; i++) { ++ if ((priv->bspi_hw->busy_status & cpu_to_le32(1)) == 0) { ++ priv->bspi_hw->mast_n_boot_ctrl = cpu_to_le32(1); ++ priv->bspi_enabled = 0; ++ udelay(1); ++ return; ++ } ++ udelay(1); ++ } ++ dev_warn(&priv->pdev->dev, "timeout setting MSPI mode\n"); ++} ++ ++static void bcmspi_enable_bspi(struct bcmspi_priv *priv) ++{ ++ if (!priv->bspi_hw || priv->bspi_enabled) ++ return; ++ if ((priv->bspi_hw->mast_n_boot_ctrl & cpu_to_le32(1)) == 0) { ++ priv->bspi_enabled = 1; ++ return; ++ } ++ ++ DBG("enabling bspi\n"); ++ priv->bspi_hw->mast_n_boot_ctrl = 0; ++ priv->bspi_enabled = 1; ++} ++ ++static void bcmspi_hw_set_parms(struct bcmspi_priv *priv, ++ const struct bcmspi_parms *xp) ++{ ++ if (xp->speed_hz) { ++ unsigned int spbr = priv->mspi_refclk / (2 * xp->speed_hz); ++ ++ priv->mspi_hw->spcr0_lsb = cpu_to_le32(max(min(spbr, SPBR_MAX), SPBR_MIN)); ++ } else { ++ priv->mspi_hw->spcr0_lsb = cpu_to_le32(SPBR_MIN); ++ } ++ ++ if (priv->pos.msg == NULL || xp->bits_per_word > 8) { ++ /* Global hw init for 16bit spi_transfer */ ++ int bits = xp->bits_per_word; ++ bits = bits? (bits == 16? 0 : bits) : 8; ++ priv->mspi_hw->spcr0_msb = cpu_to_le32(0x80 | /* Master */ ++ (bits << 2) | ++ (xp->mode & 3)); ++ } else { ++ /* Configure for a new 8-bit spi_transfer */ ++ if (priv->pos.byte == 0) { ++ /* Use 16-bit MSPI transfer for performance if applicable */ ++ if (priv->pos.mspi_16bit ^ (!(priv->pos.trans->len & 1))) { ++ /* Update it only if needed */ ++ priv->pos.mspi_16bit = !priv->pos.mspi_16bit; ++ priv->mspi_hw->spcr0_msb = cpu_to_le32(0x80 | /* Master */ ++ ((priv->pos.mspi_16bit? 0 : 8) << 2) | ++ (xp->mode & 3)); ++ } ++ } ++ } ++ priv->last_parms = *xp; ++} ++ ++#define PARMS_NO_OVERRIDE 0 ++#define PARMS_OVERRIDE 1 ++ ++static int bcmspi_update_parms(struct bcmspi_priv *priv, ++ struct spi_device *spidev, struct spi_transfer *trans, int override) ++{ ++ struct bcmspi_parms xp; ++ ++ xp.speed_hz = min(trans->speed_hz ? trans->speed_hz : ++ (spidev->max_speed_hz ? spidev->max_speed_hz : DEFAULT_SPEED_HZ), ++ DEFAULT_SPEED_HZ); ++ xp.chip_select = spidev->chip_select; ++ xp.mode = spidev->mode; ++ xp.bits_per_word = trans->bits_per_word ? trans->bits_per_word : ++ (spidev->bits_per_word ? spidev->bits_per_word : 8); ++ ++ if ((override == PARMS_OVERRIDE) || ++ ((xp.speed_hz == priv->last_parms.speed_hz) && ++ (xp.chip_select == priv->last_parms.chip_select) && ++ (xp.mode == priv->last_parms.mode) && ++ (xp.bits_per_word == priv->last_parms.bits_per_word))) { ++ bcmspi_hw_set_parms(priv, &xp); ++ return 0; ++ } ++ /* no override, and parms do not match */ ++ return 1; ++} ++ ++ ++static int bcmspi_setup(struct spi_device *spi) ++{ ++ struct bcmspi_parms *xp; ++ struct bcmspi_priv *priv = spi_master_get_devdata(spi->master); ++ unsigned int speed_hz; ++ ++ DBG("%s\n", __func__); ++ ++ if (spi->bits_per_word > 16) ++ return -EINVAL; ++ ++ /* Module parameter override */ ++ if (max_hz != 0) { ++ speed_hz = max_hz; ++ } else { ++ speed_hz = spi->max_speed_hz; ++ } ++ ++ xp = spi_get_ctldata(spi); ++ if (!xp) { ++ xp = kzalloc(sizeof(struct bcmspi_parms), GFP_KERNEL); ++ if (!xp) ++ return -ENOMEM; ++ spi_set_ctldata(spi, xp); ++ } ++ if (speed_hz < priv->max_speed_hz) ++ xp->speed_hz = speed_hz; ++ else ++ xp->speed_hz = 0; ++ ++ priv->cru_hw->cru_control &= cpu_to_le32(~0x00000006); ++ (void)priv->cru_hw->cru_control; /* Need to read back */ ++ if (speed_hz >= 62500000) { ++ priv->cru_hw->cru_control |= cpu_to_le32(0x00000006); ++ } else if (speed_hz >= 50000000) { ++ priv->cru_hw->cru_control |= cpu_to_le32(0x00000002); ++ } else if (speed_hz >= 31250000) { ++ priv->cru_hw->cru_control |= cpu_to_le32(0x00000004); ++ } ++ (void)priv->cru_hw->cru_control; /* Need to read back */ ++ ++ xp->chip_select = spi->chip_select; ++ xp->mode = spi->mode; ++ xp->bits_per_word = spi->bits_per_word ? spi->bits_per_word : 8; ++ ++ return 0; ++} ++ ++/* stop at end of transfer, no other reason */ ++#define FNB_BREAK_NONE 0 ++/* stop at end of spi_message */ ++#define FNB_BREAK_EOM 1 ++/* stop at end of spi_transfer if delay */ ++#define FNB_BREAK_DELAY 2 ++/* stop at end of spi_transfer if cs_change */ ++#define FNB_BREAK_CS_CHANGE 4 ++/* stop if we run out of bytes */ ++#define FNB_BREAK_NO_BYTES 8 ++/* stop at end of spi_transfer */ ++#define FNB_BREAK_EOT 16 ++ ++/* events that make us stop filling TX slots */ ++#define FNB_BREAK_TX (FNB_BREAK_EOM | FNB_BREAK_DELAY | \ ++ FNB_BREAK_CS_CHANGE) ++ ++/* events that make us deassert CS */ ++#define FNB_BREAK_DESELECT (FNB_BREAK_EOM | FNB_BREAK_CS_CHANGE) ++ ++ ++static int find_next_byte(struct bcmspi_priv *priv, struct position *p, ++ struct list_head *completed, int flags) ++{ ++ int ret = FNB_BREAK_NONE; ++ ++ p->byte++; ++ ++ while (p->byte >= p->trans->len) { ++ /* we're at the end of the spi_transfer */ ++ ++ /* in TX mode, need to pause for a delay or CS change */ ++ if (p->trans->delay_usecs && (flags & FNB_BREAK_DELAY)) ++ ret |= FNB_BREAK_DELAY; ++ if (p->trans->cs_change && (flags & FNB_BREAK_CS_CHANGE)) ++ ret |= FNB_BREAK_CS_CHANGE; ++ if (ret) ++ return ret; ++ ++ /* advance to next spi_message? */ ++ if (list_is_last(&p->trans->transfer_list, ++ &p->msg->transfers)) { ++ struct spi_message *next_msg = NULL; ++ ++ /* TX breaks at the end of each message as well */ ++ if (!completed || (flags & FNB_BREAK_EOM)) { ++ DBG("find_next_byte: advance msg exit\n"); ++ return FNB_BREAK_EOM; ++ } ++ if (!list_is_last(&p->msg->queue, &priv->msg_queue)) { ++ next_msg = list_entry(p->msg->queue.next, ++ struct spi_message, queue); ++ } ++ /* delete from run queue, add to completion queue */ ++ list_del(&p->msg->queue); ++ list_add_tail(&p->msg->queue, completed); ++ ++ p->msg = next_msg; ++ p->byte = 0; ++ if (p->msg == NULL) { ++ p->trans = NULL; ++ ret = FNB_BREAK_NO_BYTES; ++ break; ++ } ++ ++ /* ++ * move on to the first spi_transfer of the new ++ * spi_message ++ */ ++ p->trans = list_entry(p->msg->transfers.next, ++ struct spi_transfer, transfer_list); ++ } else { ++ /* or just advance to the next spi_transfer */ ++ p->trans = list_entry(p->trans->transfer_list.next, ++ struct spi_transfer, transfer_list); ++ p->byte = 0; ++ ++ /* Separate spi_transfers into MSPI transfers */ ++ ret = FNB_BREAK_EOT; ++ } ++ } ++ DBG("find_next_byte: msg %p trans %p len %d byte %d ret %x\n", ++ p->msg, p->trans, p->trans ? p->trans->len : 0, p->byte, ret); ++ return ret; ++} ++ ++static void read_from_hw(struct bcmspi_priv *priv, struct list_head *completed) ++{ ++ struct position p; ++ int slot = 0, n = priv->outstanding_bytes; ++ ++ DBG("%s\n", __func__); ++ ++ p = priv->pos; ++ ++ while (n > 0) { ++ BUG_ON(p.msg == NULL); ++ ++ if (p.trans->bits_per_word <= 8) { ++ u8 *buf = p.trans->rx_buf; ++ ++ if (buf) { ++ ++ if (p.mspi_16bit) { ++ /* Using 16-bit SPI transfers for performance */ ++ buf[p.byte] = ++ le32_to_cpu(priv->mspi_hw->rxram[(slot << 1) + 0]) & 0xff; ++ DBG("RD %02x\n", buf ? buf[p.byte] : 0xff); ++ buf[p.byte + 1] = ++ le32_to_cpu(priv->mspi_hw->rxram[(slot << 1) + 1]) & 0xff; ++ DBG("RD %02x\n", buf ? buf[p.byte + 1] : 0xff); ++ } else { ++ buf[p.byte] = ++ le32_to_cpu(priv->mspi_hw->rxram[(slot << 1) + 1]) & 0xff; ++ DBG("RD %02x\n", buf ? buf[p.byte] : 0xff); ++ } ++ } ++ } else { ++ u16 *buf = p.trans->rx_buf; ++ ++ if (buf) { ++ buf[p.byte] = ++ ((le32_to_cpu(priv->mspi_hw->rxram[(slot << 1) + 1]) & 0xff) << 0) | ++ ((le32_to_cpu(priv->mspi_hw->rxram[(slot << 1) + 0] & 0xff)) << 8); ++ DBG("RD %04x\n", buf ? buf[p.byte] : 0xffff); ++ } ++ } ++ slot++; ++ n--; ++ p.msg->actual_length++; ++ if (p.mspi_16bit) { ++ p.byte++; ++ p.msg->actual_length++; ++ } ++ ++ find_next_byte(priv, &p, completed, FNB_BREAK_NONE); ++ } ++ ++ priv->pos = p; ++ priv->outstanding_bytes = 0; ++} ++ ++static void write_to_hw(struct bcmspi_priv *priv) ++{ ++ struct position p; ++ int slot = 0, fnb = 0; ++ struct spi_message *msg = NULL; ++ ++ DBG("%s\n", __func__); ++ ++ bcmspi_disable_bspi(priv); ++ ++ p = priv->pos; ++ ++ while (1) { ++ if (p.msg == NULL) ++ break; ++ if (!msg) { ++ msg = p.msg; ++ bcmspi_update_parms(priv, msg->spi, p.trans, ++ PARMS_OVERRIDE); ++ } else { ++ /* break if the speed, bits, etc. changed */ ++ if (bcmspi_update_parms(priv, msg->spi, p.trans, ++ PARMS_NO_OVERRIDE)) { ++ DBG("parms don't match, breaking\n"); ++ break; ++ } ++ } ++ if (p.trans->bits_per_word <= 8) { ++ const u8 *buf = p.trans->tx_buf; ++ ++ priv->mspi_hw->txram[slot << 1] = ++ cpu_to_le32(buf ? (buf[p.byte] & 0xff) : 0xff); ++ DBG("WR %02x\n", buf ? buf[p.byte] : 0xff); ++ ++ if (priv->pos.mspi_16bit) { ++ /* Using 16-bit SPI transfers for performance */ ++ p.byte++; ++ priv->mspi_hw->txram[(slot << 1) + 1] = ++ cpu_to_le32(buf ? (buf[p.byte] & 0xff) : 0xff); ++ DBG("WR %02x\n", buf ? buf[p.byte] : 0xff); ++ priv->mspi_hw->cdram[slot] = cpu_to_le32(0xce); ++ } else { ++ priv->mspi_hw->cdram[slot] = cpu_to_le32(0x8e); ++ } ++ ++ } else { ++ const u16 *buf = p.trans->tx_buf; ++ ++ priv->mspi_hw->txram[(slot << 1) + 0] = ++ cpu_to_le32(buf ? (buf[p.byte] >> 8) : 0xff); ++ priv->mspi_hw->txram[(slot << 1) + 1] = ++ cpu_to_le32(buf ? (buf[p.byte] & 0xff) : 0xff); ++ DBG("WR %04x\n", buf ? buf[p.byte] : 0xffff); ++ priv->mspi_hw->cdram[slot] = cpu_to_le32(0xce); ++ } ++ slot++; ++ ++ fnb = find_next_byte(priv, &p, NULL, FNB_BREAK_TX); ++ ++ if (fnb & FNB_BREAK_CS_CHANGE) ++ priv->cs_change = 1; ++ if (fnb & FNB_BREAK_DELAY) ++ priv->next_udelay = p.trans->delay_usecs; ++ if (fnb || (slot == NUM_CDRAM)) ++ break; ++ } ++ ++ if (slot) { ++ DBG("submitting %d slots\n", slot); ++ priv->mspi_hw->newqp = 0; ++ priv->mspi_hw->endqp = cpu_to_le32(slot - 1); ++ ++ /* deassert CS on the final byte */ ++ if (fnb & FNB_BREAK_DESELECT) ++ priv->mspi_hw->cdram[slot - 1] &= cpu_to_le32(~0x80); ++ ++ /* tell HIF_MSPI which CS to use */ ++ bcmspi_set_chip_select(priv, msg->spi->chip_select); ++ ++ priv->mspi_hw->write_lock = cpu_to_le32(1); ++ priv->mspi_hw->spcr2 = cpu_to_le32(0xe0); /* cont | spe | spifie */ ++ ++ priv->state = STATE_RUNNING; ++ priv->outstanding_bytes = slot; ++ } else { ++ priv->mspi_hw->write_lock = 0; ++ priv->state = STATE_IDLE; ++ } ++} ++ ++#define DWORD_ALIGNED(a) (!(((unsigned long)(a)) & 3)) ++#define ACROSS_16MB(a, l) (((a) ^ ((a) + (l) - 1)) & 0xFF000000) ++ ++static int bcmspi_emulate_flash_read(struct bcmspi_priv *priv, ++ struct spi_message *msg) ++{ ++ u32 addr, len; ++ int idx = 0; /* Also used for checking continuation */ ++ unsigned long flags = 0; ++ ++ /* Check if it's a continuation */ ++ if (priv->raf_next_len != 0) { ++ ++ /* Continuation (read across 16MB boundary) */ ++ addr = priv->raf_next_addr; ++ len = priv->raf_next_len; ++ ++ /* Update upper address byte */ ++ if (bcmspi_is_4_byte_mode(priv)) { ++ priv->bspi_hw->flash_upper_addr_byte = cpu_to_le32(addr & 0xFF000000); ++ /* Flush prefecth buffers since upper byte changed */ ++ bcmspi_flush_prefetch_buffers(priv); ++ } ++ ++ } else { ++ ++ /* It's the first session of this transfer */ ++ struct spi_transfer *trans; ++ u8 *buf; ++ ++ /* acquire lock when the MSPI is idle */ ++ while (1) { ++ spin_lock_irqsave(&priv->lock, flags); ++ if (priv->state == STATE_IDLE) ++ break; ++ spin_unlock_irqrestore(&priv->lock, flags); ++ if (priv->state == STATE_SHUTDOWN) ++ return -EIO; ++ udelay(1); ++ } ++ bcmspi_set_chip_select(priv, msg->spi->chip_select); ++ ++ /* first transfer - OPCODE_READ + 3-byte address */ ++ trans = list_entry(msg->transfers.next, struct spi_transfer, ++ transfer_list); ++ buf = (void *)trans->tx_buf; ++ ++ idx = 1; ++ ++ /* Check upper address byte for 4-byte mode */ ++ if (bcmspi_is_4_byte_mode(priv)) { ++ addr = buf[idx++] << 24; ++ } else { ++ addr = 0; ++ } ++ ++ /* ++ * addr coming into this function is a raw flash offset ++ * we need to convert it to the BSPI address ++ */ ++ addr |= (buf[idx] << 16) | (buf[idx+1] << 8) | buf[idx+2]; ++ ++ /* second transfer - read result into buffer */ ++ trans = list_entry(msg->transfers.next->next, struct spi_transfer, ++ transfer_list); ++ ++ buf = (void *)trans->rx_buf; ++ ++ len = trans->len; ++ ++ /* non-aligned and very short transfers are handled by MSPI */ ++ if (unlikely(!DWORD_ALIGNED(addr) || ++ !DWORD_ALIGNED(buf) || ++ len < sizeof(u32) || ++ !priv->bspi_hw_raf)) { ++ spin_unlock_irqrestore(&priv->lock, flags); ++ return -1; ++ } ++ ++ /* Flush prefetch buffers only if upper address byte changed */ ++ if ((addr & 0xFF000000) != le32_to_cpu(priv->bspi_hw->flash_upper_addr_byte)) { ++ bcmspi_flush_prefetch_buffers(priv); ++ /* Update upper address byte */ ++ priv->bspi_hw->flash_upper_addr_byte = cpu_to_le32(addr & 0xFF000000); ++ } ++ ++ /* Switching to BSPI */ ++ bcmspi_enable_bspi(priv); ++ ++ DBG("%s: dst %p src %p len %x addr BSPI %06x\n", ++ __func__, buf, addr, len, addr); ++ ++ /* initialize software parameters */ ++ priv->xfer_status = 0; ++ priv->cur_xfer = trans; ++ priv->cur_xfer_idx = 0; ++ priv->cur_msg = msg; ++ priv->actual_length = idx + 4 + trans->len; ++ } ++ ++ if (bcmspi_is_4_byte_mode(priv) && ACROSS_16MB(addr, len)) { ++ ++ /* Size for the first session */ ++ u32 bytes = 0x1000000 - (addr & 0x00FFFFFF); ++ ++ /* Address and size for remaining sessions */ ++ priv->raf_next_addr = addr + bytes; ++ priv->raf_next_len = len - bytes; ++ ++ len = bytes; ++ ++ } else { ++ priv->raf_next_len = 0; ++ } ++ ++ /* Length for this session */ ++ priv->cur_xfer_len = len; ++ ++ /* setup hardware */ ++ /* address must be 4-byte aligned */ ++ priv->bspi_hw_raf->start_address = cpu_to_le32(addr & 0x00FFFFFF); ++ priv->bspi_hw_raf->num_words = cpu_to_le32((len + 3) >> 2); ++ priv->bspi_hw_raf->watermark = 0; ++ ++ DBG("READ: %08x %08x (%08x)\n", addr, ((len + 3) >> 2), len); ++ ++ bcmspi_clear_interrupt(priv, 0xffffffff); ++ bcmspi_enable_interrupt(priv, BSPI_LR_INTERRUPTS_ALL); ++ bcmspi_lr_start(priv); ++ ++ if (idx) { ++ spin_unlock_irqrestore(&priv->lock, flags); ++ } ++ ++ return 0; ++} ++ ++/* ++ * m25p80_read() calls wait_till_ready() before each read to check ++ * the flash status register for pending writes. ++ * ++ * This can be safely skipped if our last transaction was just an ++ * emulated BSPI read. ++ */ ++static int bcmspi_emulate_flash_rdsr(struct bcmspi_priv *priv, ++ struct spi_message *msg) ++{ ++ u8 *buf; ++ struct spi_transfer *trans; ++ ++ if (priv->bspi_enabled == 0) ++ return 1; ++ ++ trans = list_entry(msg->transfers.next->next, struct spi_transfer, ++ transfer_list); ++ ++ buf = (void *)trans->rx_buf; ++ *buf = 0x00; ++ ++ msg->actual_length = 2; ++ msg->status = 0; ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0) ++ spi_finalize_current_message(priv->master); ++#else ++ msg->complete(msg->context); ++#endif ++ ++ return 0; ++} ++ ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0) ++static int bcmspi_prepare_transfer(struct spi_master *master) ++{ ++ return 0; ++} ++ ++static int bcmspi_unprepare_transfer(struct spi_master *master) ++{ ++ return 0; ++} ++#endif ++ ++static int bcmspi_transfer_one(struct spi_master *master, struct spi_message *msg) ++{ ++ struct bcmspi_priv *priv = spi_master_get_devdata(master); ++ unsigned long flags; ++ ++ DBG("%s\n", __func__); ++ ++ if (is_bspi_chip_select(priv, msg->spi->chip_select)) { ++ struct spi_transfer *trans; ++ ++ trans = list_entry(msg->transfers.next, ++ struct spi_transfer, transfer_list); ++ if (trans && trans->len && trans->tx_buf) { ++ u8 command = ((u8 *)trans->tx_buf)[0]; ++ switch (command) { ++ case OPCODE_FAST_READ: ++ if (bcmspi_emulate_flash_read(priv, msg) == 0) ++ return 0; ++ break; ++ case OPCODE_RDSR: ++ if (bcmspi_emulate_flash_rdsr(priv, msg) == 0) ++ return 0; ++ break; ++ case OPCODE_EN4B: ++ DBG("ENABLE 4-BYTE MODE\n"); ++ bcmspi_set_mode(priv, -1, BSPI_ADDRLEN_4BYTES, -1); ++ break; ++ case OPCODE_EX4B: ++ DBG("DISABLE 4-BYTE MODE\n"); ++ bcmspi_set_mode(priv, -1, BSPI_ADDRLEN_3BYTES, -1); ++ break; ++ case OPCODE_BRWR: ++ { ++ u8 enable = ((u8 *)trans->tx_buf)[1]; ++ DBG("%s 4-BYTE MODE\n", enable ? "ENABLE" : "DISABLE"); ++ bcmspi_set_mode(priv, -1, ++ enable ? BSPI_ADDRLEN_4BYTES : ++ BSPI_ADDRLEN_3BYTES, -1); ++ } ++ break; ++ default: ++ break; ++ } ++ ++ /* Mark prefetch buffers dirty (by using upper byte) if needed */ ++ switch(command) { ++ case OPCODE_RDID: ++ case OPCODE_WREN: ++ case OPCODE_WRDI: ++ case OPCODE_RCR: ++ case OPCODE_READ: ++ case OPCODE_RDSR: ++ case OPCODE_WRSR: ++ case OPCODE_RDFSR: ++ case OPCODE_FAST_READ: ++ case OPCODE_FAST_READ_4B: ++ case OPCODE_EN4B: ++ case OPCODE_EX4B: ++ case OPCODE_BRWR: ++ /* These are known opcodes that are not writing/erasing */ ++ break; ++ default: ++ /* Could be writing/erasing; mark buffers dirty */ ++ priv->bspi_hw->flash_upper_addr_byte = cpu_to_le32(0xff000000); ++ break; ++ } ++ } ++ } ++ ++ spin_lock_irqsave(&priv->lock, flags); ++ ++ if (priv->state == STATE_SHUTDOWN) { ++ spin_unlock_irqrestore(&priv->lock, flags); ++ return -EIO; ++ } ++ ++ msg->actual_length = 0; ++ ++ list_add_tail(&msg->queue, &priv->msg_queue); ++ ++ if (priv->state == STATE_IDLE) { ++ BUG_ON(priv->pos.msg != NULL); ++ priv->pos.msg = msg; ++ priv->pos.trans = list_entry(msg->transfers.next, ++ struct spi_transfer, transfer_list); ++ priv->pos.byte = 0; ++ ++ write_to_hw(priv); ++ } ++ spin_unlock_irqrestore(&priv->lock, flags); ++ ++ return 0; ++} ++ ++#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 4, 0) ++static int bcmspi_transfer(struct spi_device *spi, struct spi_message *msg) ++{ ++ return bcmspi_transfer_one(spi->master, msg); ++} ++#endif ++ ++static void bcmspi_cleanup(struct spi_device *spi) ++{ ++ struct bcmspi_parms *xp = spi_get_ctldata(spi); ++ ++ DBG("%s\n", __func__); ++ ++ kfree(xp); ++} ++ ++static irqreturn_t bcmspi_interrupt(int irq, void *dev_id) ++{ ++ struct bcmspi_priv *priv = dev_id; ++ ++ if (priv->bspi_enabled && priv->cur_xfer) { ++ int done = 0; ++ u32 status = bcmspi_read_interrupt(priv); ++ u32 *buf = (u32 *)priv->cur_xfer->rx_buf; ++ if (status & BSPI_LR_INTERRUPTS_DATA) { ++ while (!bcmspi_lr_is_fifo_empty(priv)) { ++ u32 data = bcmspi_lr_read_fifo(priv); ++ if (likely(priv->cur_xfer_len >= 4)) { ++ buf[priv->cur_xfer_idx++] = data; ++ priv->cur_xfer_len -= 4; ++ } else { ++ /* ++ * Read out remaining bytes, make sure ++ * we do not cross the buffer boundary ++ */ ++ u8 *cbuf = ++ (u8 *)&buf[priv->cur_xfer_idx]; ++ data = cpu_to_le32(data); ++ while (priv->cur_xfer_len) { ++ *cbuf++ = (u8)data; ++ data >>= 8; ++ priv->cur_xfer_len--; ++ } ++ } ++ } ++ } ++ if (status & BSPI_LR_INTERRUPTS_ERROR) { ++ dev_err(&priv->pdev->dev, "ERROR %02x\n", status); ++ priv->xfer_status = -EIO; ++ } else if ((status & QSPI_INTR_BSPI_LR_SESSION_DONE_MASK) && ++ priv->cur_xfer_len == 0) { ++ ++ if (priv->raf_next_len) { ++ ++ /* Continuation for reading across 16MB boundary */ ++ bcmspi_disable_interrupt(priv, BSPI_LR_INTERRUPTS_ALL); ++ bcmspi_emulate_flash_read(priv, NULL); ++ return IRQ_HANDLED; ++ ++ } else { ++ done = 1; ++ } ++ } ++ ++ if (done) { ++ priv->cur_xfer = NULL; ++ bcmspi_disable_interrupt(priv, BSPI_LR_INTERRUPTS_ALL); ++ ++ if (priv->xfer_status) { ++ bcmspi_lr_clear(priv); ++ } else { ++ if (priv->cur_msg) { ++ priv->cur_msg->actual_length = priv->actual_length; ++ priv->cur_msg->status = 0; ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0) ++ spi_finalize_current_message(priv->master); ++#else ++ priv->cur_msg->complete(priv->cur_msg->context); ++#endif ++ } ++ } ++ priv->cur_msg = NULL; ++ } ++ bcmspi_clear_interrupt(priv, status); ++ return IRQ_HANDLED; ++ } ++ ++ if (priv->mspi_hw->mspi_status & cpu_to_le32(1)) { ++ /* clear interrupt */ ++ priv->mspi_hw->mspi_status &= cpu_to_le32(~1); ++ bcmspi_clear_interrupt(priv, QSPI_INTR_MSPI_DONE_MASK); ++ ++ tasklet_schedule(&priv->tasklet); ++ return IRQ_HANDLED; ++ } else ++ return IRQ_NONE; ++} ++ ++static void bcmspi_complete(void *arg) ++{ ++ complete(arg); ++} ++ ++static void bcmspi_tasklet(unsigned long param) ++{ ++ struct bcmspi_priv *priv = (void *)param; ++ struct list_head completed; ++ struct spi_message *msg; ++ unsigned long flags; ++ ++ INIT_LIST_HEAD(&completed); ++ spin_lock_irqsave(&priv->lock, flags); ++ ++ if (priv->next_udelay) { ++ udelay(priv->next_udelay); ++ priv->next_udelay = 0; ++ } ++ ++ msg = priv->pos.msg; ++ ++ read_from_hw(priv, &completed); ++ if (priv->cs_change) { ++ udelay(10); ++ priv->cs_change = 0; ++ } ++ ++ write_to_hw(priv); ++ spin_unlock_irqrestore(&priv->lock, flags); ++ ++ while (!list_empty(&completed)) { ++ msg = list_first_entry(&completed, struct spi_message, queue); ++ list_del(&msg->queue); ++ msg->status = 0; ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0) ++ if (msg->complete == bcmspi_complete) ++ msg->complete(msg->context); ++ else ++ spi_finalize_current_message(priv->master); ++#else ++ if (msg->complete) ++ msg->complete(msg->context); ++#endif ++ ++ } ++} ++ ++static struct spi_master *default_master; ++ ++static int bcmspi_simple_transaction(struct bcmspi_parms *xp, ++ const void *tx_buf, int tx_len, void *rx_buf, int rx_len) ++{ ++ DECLARE_COMPLETION_ONSTACK(fini); ++ struct spi_message m; ++ struct spi_transfer t_tx, t_rx; ++ struct spi_device spi; ++ int ret; ++ ++ memset(&spi, 0, sizeof(spi)); ++ spi.max_speed_hz = xp->speed_hz; ++ spi.chip_select = xp->chip_select; ++ spi.mode = xp->mode; ++ spi.bits_per_word = xp->bits_per_word; ++ spi.master = default_master; ++ ++ spi_message_init(&m); ++ m.complete = bcmspi_complete; ++ m.context = &fini; ++ m.spi = &spi; ++ ++ memset(&t_tx, 0, sizeof(t_tx)); ++ memset(&t_rx, 0, sizeof(t_rx)); ++ t_tx.tx_buf = tx_buf; ++ t_tx.len = tx_len; ++ t_rx.rx_buf = rx_buf; ++ t_rx.len = rx_len; ++ ++ if (tx_len) ++ spi_message_add_tail(&t_tx, &m); ++ if (rx_len) ++ spi_message_add_tail(&t_rx, &m); ++ ++ ret = bcmspi_transfer_one(default_master, &m); ++ if (!ret) ++ wait_for_completion(&fini); ++ return ret; ++} ++ ++static void bcmspi_hw_init(struct bcmspi_priv *priv) ++{ ++ const struct bcmspi_parms bcmspi_default_parms_cs0 = { ++ .speed_hz = DEFAULT_SPEED_HZ, ++ .chip_select = 0, ++ .mode = SPI_MODE_3, ++ .bits_per_word = 8, ++ }; ++ ++ priv->mspi_hw->spcr1_lsb = 0; ++ priv->mspi_hw->spcr1_msb = 0; ++ priv->mspi_hw->newqp = 0; ++ priv->mspi_hw->endqp = 0; ++ priv->mspi_hw->spcr2 = cpu_to_le32(0x20); /* spifie */ ++ ++ bcmspi_hw_set_parms(priv, &bcmspi_default_parms_cs0); ++ ++ priv->bspi_enabled = 1; ++ bcmspi_disable_bspi(priv); ++} ++ ++static void bcmspi_hw_uninit(struct bcmspi_priv *priv) ++{ ++ priv->mspi_hw->spcr2 = 0x0; /* disable irq and enable bits */ ++ bcmspi_enable_bspi(priv); ++} ++ ++static int bcmbspi_flash_type(struct bcmspi_priv *priv) ++{ ++ char tx_buf[4]; ++ unsigned char jedec_id[5] = {0}; ++ int bspi_flash; ++ ++ /* Read ID */ ++ tx_buf[0] = OPCODE_RDID; ++ bcmspi_simple_transaction(&priv->last_parms, tx_buf, 1, &jedec_id, 5); ++ ++ switch (jedec_id[0]) { ++ case 0x01: /* Spansion */ ++ case 0xef: ++ bspi_flash = BSPI_FLASH_TYPE_SPANSION; ++ break; ++ case 0xc2: /* Macronix */ ++ bspi_flash = BSPI_FLASH_TYPE_MACRONIX; ++ break; ++ case 0xbf: /* SST */ ++ bspi_flash = BSPI_FLASH_TYPE_SST; ++ break; ++ case 0x89: /* Numonyx */ ++ bspi_flash = BSPI_FLASH_TYPE_NUMONYX; ++ break; ++ default: ++ bspi_flash = BSPI_FLASH_TYPE_UNKNOWN; ++ break; ++ } ++ return bspi_flash; ++} ++ ++static int bcmspi_set_quad_mode(struct bcmspi_priv *priv, int _enable) ++{ ++ char tx_buf[4]; ++ unsigned char cfg_reg, sts_reg; ++ ++ switch (bcmbspi_flash_type(priv)) { ++ case BSPI_FLASH_TYPE_SPANSION: ++ /* RCR */ ++ tx_buf[0] = OPCODE_RCR; ++ bcmspi_simple_transaction(&priv->last_parms, ++ tx_buf, 1, &cfg_reg, 1); ++ if (_enable) ++ cfg_reg |= 0x2; ++ else ++ cfg_reg &= ~0x2; ++ /* WREN */ ++ tx_buf[0] = OPCODE_WREN; ++ bcmspi_simple_transaction(&priv->last_parms, ++ tx_buf, 1, NULL, 0); ++ /* WRR */ ++ tx_buf[0] = OPCODE_WRR; ++ tx_buf[1] = 0; /* status register */ ++ tx_buf[2] = cfg_reg; /* configuration register */ ++ bcmspi_simple_transaction(&priv->last_parms, ++ tx_buf, 3, NULL, 0); ++ /* wait till ready */ ++ do { ++ tx_buf[0] = OPCODE_RDSR; ++ bcmspi_simple_transaction(&priv->last_parms, ++ tx_buf, 1, &sts_reg, 1); ++ udelay(1); ++ } while (sts_reg & 1); ++ break; ++ case BSPI_FLASH_TYPE_MACRONIX: ++ /* RDSR */ ++ tx_buf[0] = OPCODE_RDSR; ++ bcmspi_simple_transaction(&priv->last_parms, ++ tx_buf, 1, &cfg_reg, 1); ++ if (_enable) ++ cfg_reg |= 0x40; ++ else ++ cfg_reg &= ~0x40; ++ /* WREN */ ++ tx_buf[0] = OPCODE_WREN; ++ bcmspi_simple_transaction(&priv->last_parms, ++ tx_buf, 1, NULL, 0); ++ /* WRSR */ ++ tx_buf[0] = OPCODE_WRSR; ++ tx_buf[1] = cfg_reg; /* status register */ ++ bcmspi_simple_transaction(&priv->last_parms, ++ tx_buf, 2, NULL, 0); ++ /* wait till ready */ ++ do { ++ tx_buf[0] = OPCODE_RDSR; ++ bcmspi_simple_transaction(&priv->last_parms, ++ tx_buf, 1, &sts_reg, 1); ++ udelay(1); ++ } while (sts_reg & 1); ++ /* RDSR */ ++ tx_buf[0] = OPCODE_RDSR; ++ bcmspi_simple_transaction(&priv->last_parms, ++ tx_buf, 1, &cfg_reg, 1); ++ break; ++ case BSPI_FLASH_TYPE_SST: ++ case BSPI_FLASH_TYPE_NUMONYX: ++ /* TODO - send Quad mode control command */ ++ break; ++ default: ++ return _enable ? -1 : 0; ++ } ++ ++ return 0; ++} ++ ++static int bcmspi_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct brcmspi_platform_data *pdata; ++ struct bcmspi_priv *priv; ++ struct spi_master *master; ++ struct resource *res; ++ struct clk *clk; ++ int ret; ++ u32 irq; ++#ifdef CONFIG_OF ++ struct device_node *dn = pdev->dev.of_node; ++ u32 qspi_bus_id; ++ u32 qspi_cs; ++ int i, irqs_total; ++#endif ++ ++ DBG("bcmspi_probe\n"); ++ ++ pdata = (struct brcmspi_platform_data *)pdev->dev.platform_data; ++ ++ master = spi_alloc_master(dev, sizeof(struct bcmspi_priv)); ++ if (!master) { ++ dev_err(&pdev->dev, "error allocating spi_master\n"); ++ return -ENOMEM; ++ } ++ ++ priv = spi_master_get_devdata(master); ++ ++ priv->pdev = pdev; ++ priv->state = STATE_IDLE; ++ priv->pos.msg = NULL; ++ priv->pos.mspi_16bit = 0; ++ priv->master = master; ++ priv->raf_next_len = 0; ++ ++#ifndef CONFIG_OF ++ master->bus_num = pdev->id; ++#else ++ if (of_property_read_u32(dn, "#bus-id", &qspi_bus_id)) { ++ dev_warn(&pdev->dev, ++ "missing #bus-id property (default to 1)\n"); ++ qspi_bus_id = 1; ++ } ++ master->bus_num = qspi_bus_id; ++ pdev->id = qspi_bus_id; ++#endif ++ master->num_chipselect = 1; ++ master->mode_bits = SPI_MODE_3; ++ ++ master->setup = bcmspi_setup; ++ master->cleanup = bcmspi_cleanup; ++#if LINUX_VERSION_CODE >= KERNEL_VERSION(3, 4, 0) ++ master->prepare_transfer_hardware = bcmspi_prepare_transfer; ++ master->unprepare_transfer_hardware = bcmspi_unprepare_transfer; ++ master->transfer_one_message = bcmspi_transfer_one; ++ master->transfer = NULL; ++#else ++ master->transfer = bcmspi_transfer; ++#endif ++/* needed for supporting child SPI devices*/ ++#ifdef CONFIG_OF ++ master->dev.of_node = pdev->dev.of_node; ++#endif ++ ++ priv->mspi_hw = NULL; ++ priv->bspi_hw = NULL; ++ priv->bspi_hw_raf = NULL; ++ priv->qspi_intr = NULL; ++ priv->idm_qspi = NULL; ++ priv->irq = -1; ++ ++ /* Get MSPI reference clock and max speed hz */ ++#ifndef CONFIG_OF ++ clk = clk_get_sys(MSPI_REFCLK_SOURCE_DEVID, MSPI_REFCLK_SOURCE); ++#else ++ clk = of_clk_get (dn, 0); ++#endif /* CONFIG_OF */ ++ if (!clk) { ++ dev_err(&pdev->dev, "can't get reference clock frequency by %s\n", ++ MSPI_REFCLK_SOURCE); ++ ret = -EIO; ++ goto err2; ++ } ++ priv->mspi_refclk = (unsigned int)clk_get_rate(clk) * 2; ++ priv->max_speed_hz = priv->mspi_refclk / (2 * SPBR_MIN); ++ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 0); ++ if (!res) { ++ dev_err(&pdev->dev, "can't get resource 0\n"); ++ ret = -EIO; ++ goto err2; ++ } ++ /* MSPI register range */ ++ priv->mspi_hw = (volatile void *)ioremap(res->start, ++ res->end - res->start); ++ if (!priv->mspi_hw) { ++ dev_err(&pdev->dev, "can't ioremap\n"); ++ ret = -EIO; ++ goto err2; ++ } ++ DBG("priv->mspi_hw=%p\n", priv->mspi_hw); ++ ++ /* BSPI register range */ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 1); ++ if (res) { ++ priv->bspi_hw = (volatile void *)ioremap(res->start, ++ res->end - res->start); ++ if (!priv->bspi_hw) { ++ dev_err(&pdev->dev, "can't ioremap BSPI range\n"); ++ ret = -EIO; ++ goto err2; ++ } ++ } else ++ priv->bspi_hw = NULL; ++ DBG("priv->bspi_hw=%p\n", priv->bspi_hw); ++ ++ /* BSPI_RAF register range */ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 2); ++ if (res) { ++ priv->bspi_hw_raf = (volatile void *)ioremap(res->start, ++ res->end - res->start); ++ if (!priv->bspi_hw_raf) { ++ dev_err(&pdev->dev, "can't ioremap BSPI_RAF range\n"); ++ ret = -EIO; ++ goto err2; ++ } ++ } else ++ priv->bspi_hw_raf = NULL; ++ DBG("priv->bspi_hw_raf=%p\n", priv->bspi_hw_raf); ++ ++ /* QSPI interrupt register range */ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 3); ++ if (res) { ++ priv->qspi_intr = (volatile void *)ioremap(res->start, ++ res->end - res->start); ++ if (!priv->qspi_intr) { ++ dev_err(&pdev->dev, "can't ioremap QSPI interrupt range\n"); ++ ret = -EIO; ++ goto err2; ++ } ++ } else { ++ dev_err(&pdev->dev, "can't get resource 3\n"); ++ ret = -EIO; ++ goto err2; ++ } ++ DBG("priv->qspi_intr=%p\n", priv->qspi_intr); ++ ++ /* IDM QSPI io ctrl register range */ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 4); ++ if (res) { ++ priv->idm_qspi = (volatile void *)ioremap(res->start, ++ res->end - res->start); ++ if (!priv->idm_qspi) { ++ dev_err(&pdev->dev, "can't ioremap IDM QSPI range\n"); ++ ret = -EIO; ++ goto err2; ++ } ++ } else { ++ dev_err(&pdev->dev, "can't get resource 4\n"); ++ ret = -EIO; ++ goto err2; ++ } ++ DBG("priv->idm_qspi=%p\n", priv->idm_qspi); ++ ++ /* CRU control register */ ++ res = platform_get_resource(pdev, IORESOURCE_MEM, 5); ++ if (res) { ++ priv->cru_hw = (volatile void *)ioremap(res->start, ++ res->end - res->start); ++ if (!priv->cru_hw) { ++ dev_err(&pdev->dev, "can't ioremap CRU range\n"); ++ ret = -EIO; ++ goto err2; ++ } ++ } else { ++ dev_err(&pdev->dev, "can't get resource 4\n"); ++ ret = -EIO; ++ goto err2; ++ } ++ DBG("priv->cru_hw=%p\n", priv->cru_hw); ++ ++ /* IRQ */ ++#ifndef CONFIG_OF ++ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); ++ if (!res) { ++ dev_err(&pdev->dev, "no IRQ defined\n"); ++ ret = -ENODEV; ++ goto err2; ++ } ++#else ++ irqs_total = of_irq_count(dn); ++#endif /*CONFIG_OF*/ ++ ++ /* Basic initialization (before enabling interrupts) */ ++ priv->bspi_hw->mast_n_boot_ctrl = cpu_to_le32(1); ++ bcmspi_disable_interrupt(priv, 0xffffffff); ++ bcmspi_clear_interrupt(priv, 0xffffffff); ++ bcmspi_enable_interrupt(priv, QSPI_INTR_MSPI_DONE_MASK); ++ ++ /* Request all IRQs */ ++#ifndef CONFIG_OF ++ for(irq=(u32)res->start; irq<=(u32)res->end; irq++) { ++#else ++ for (i=0; idev, "unable to allocate IRQ\n"); ++ goto err1; ++ } ++ } ++ ++ bcmspi_hw_init(priv); ++ priv->curr_cs = -1; ++ ++#ifdef CONFIG_OF ++ if (of_property_read_u32(dn, "#chip-select", &qspi_cs)) { ++ dev_warn(&pdev->dev, ++ "missing #chip-selects property (default to 0)\n"); ++ qspi_cs = 0; ++ } ++ priv->bspi_chip_select = (1 << qspi_cs); ++ if (pdata == 0) { ++ struct brcmspi_platform_data platformdata; ++ memset(&platformdata, 0, sizeof(platformdata)); ++ platformdata.flash_cs = qspi_cs; ++ platform_device_add_data(pdev, &platformdata, sizeof(platformdata)); ++ pdata = (struct brcmspi_platform_data *)pdev->dev.platform_data; ++ } ++#else ++ priv->bspi_chip_select = (priv->bspi_hw && pdata) ? (1 << pdata->flash_cs) : 0; ++#endif /* CONFIG_OF */ ++ ++ INIT_LIST_HEAD(&priv->msg_queue); ++ spin_lock_init(&priv->lock); ++ ++ platform_set_drvdata(pdev, priv); ++ ++ tasklet_init(&priv->tasklet, bcmspi_tasklet, (unsigned long)priv); ++ ++ if (!default_master) ++ default_master = master; ++ ++ ++ if (priv->bspi_chip_select) { ++ int bspi_width = BSPI_WIDTH_1BIT; ++ ++ /* Module parameter validation */ ++ if (io_mode != 0) { ++ if (read_opcode < 0 || read_opcode > 255) { ++ dev_err(&pdev->dev, "invalid read_opcode\n"); ++ io_mode = 0; ++ } else if (dummy_cycles < 0 || dummy_cycles > 255) { ++ dev_err(&pdev->dev, "invalid dummy_cycles\n"); ++ io_mode = 0; ++ } ++ } ++ if (io_mode == 2) { ++ bspi_width = BSPI_WIDTH_4BIT; ++ } else if (io_mode == 1) { ++ bspi_width = BSPI_WIDTH_2BIT; ++ } else if (io_mode != 0) { ++ dev_err(&pdev->dev, "invalid io_mode (0/1/2)\n"); ++ } ++ ++ if (io_mode == 2) ++ bcmspi_set_quad_mode(priv, 1); ++ ++ bcmspi_set_mode(priv, bspi_width, BSPI_ADDRLEN_3BYTES, bspi_hp); ++ } ++ ++ ret = spi_register_master(master); ++ if (ret < 0) { ++ dev_err(&pdev->dev, "can't register master\n"); ++ goto err0; ++ } ++ ++ return 0; ++ ++err0: ++ bcmspi_hw_uninit(priv); ++err1: ++#ifdef CONFIG_OF ++ while ( i-- ) { ++ irq = irq_of_parse_and_map(dn, i-1); ++ free_irq(irq, priv); ++ } ++#endif ++err2: ++ if (priv->idm_qspi) { ++ iounmap(priv->idm_qspi); ++ } ++ if (priv->qspi_intr) { ++ iounmap(priv->qspi_intr); ++ } ++ if (priv->bspi_hw_raf) { ++ iounmap(priv->bspi_hw_raf); ++ } ++ if (priv->bspi_hw) { ++ iounmap(priv->bspi_hw); ++ } ++ if (priv->mspi_hw) { ++ iounmap(priv->mspi_hw); ++ } ++ spi_master_put(master); ++ return ret; ++} ++ ++static int bcmspi_remove(struct platform_device *pdev) ++{ ++ struct bcmspi_priv *priv = platform_get_drvdata(pdev); ++ unsigned long flags; ++ u32 irq; ++#ifdef CONFIG_OF ++ struct device_node *dn = pdev->dev.of_node; ++ u32 irq_start=0, irq_end=0; ++#else ++ struct resource *res; ++#endif /* CONFIG_OF */ ++ ++ /* acquire lock when the MSPI is idle */ ++ while (1) { ++ spin_lock_irqsave(&priv->lock, flags); ++ if (priv->state == STATE_IDLE) ++ break; ++ spin_unlock_irqrestore(&priv->lock, flags); ++ udelay(100); ++ } ++ priv->state = STATE_SHUTDOWN; ++ spin_unlock_irqrestore(&priv->lock, flags); ++ ++ tasklet_kill(&priv->tasklet); ++ platform_set_drvdata(pdev, NULL); ++ bcmspi_hw_uninit(priv); ++ if (priv->bspi_hw_raf) ++ iounmap(priv->bspi_hw_raf); ++ if (priv->bspi_hw) ++ iounmap((volatile void __iomem *)priv->bspi_hw); ++#ifdef CONFIG_OF ++ irq_start = irq_of_parse_and_map(dn, 0); ++ irq_end = irq_of_parse_and_map(dn, 1); ++ if (irq_start && irq_end) { ++ for(irq=irq_start; irq<=irq_end; irq++) ++ free_irq(irq, priv); ++ } ++#else ++ res = platform_get_resource(pdev, IORESOURCE_IRQ, 0); ++ if (res) { ++ for(irq=(u32)res->start; irq<=(u32)res->end; irq++) { ++ free_irq(irq, priv); ++ } ++ } ++#endif ++ ++ iounmap((volatile void __iomem *)priv->mspi_hw); ++ spi_unregister_master(priv->master); ++ ++ return 0; ++} ++ ++#ifdef CONFIG_PM ++static int bcmspi_suspend(struct device *dev) ++{ ++ struct bcmspi_priv *priv = dev_get_drvdata(dev); ++ int ret; ++ ++ if (priv == NULL || priv->master == NULL) { ++ return -EINVAL; ++ } ++ ++ /* Do nothing if it's not yet initialized */ ++ if (!priv->bspi_hw) ++ return 0; ++ ++ /* Flush transactions and stop the queue */ ++ ret = spi_master_suspend(priv->master); ++ if (ret) { ++ dev_warn(dev, "cannot suspend master\n"); ++ return ret; ++ } ++ ++ /* Disable flex mode */ ++ priv->bspi_hw->flex_mode_enable = 0; ++ ++ /* Clear upper byte */ ++ priv->bspi_hw->flash_upper_addr_byte = 0; ++ ++ /* Ensure BSPI read is clean */ ++ bcmspi_flush_prefetch_buffers(priv); ++ ++ /* Switch to BSPI for waking up from boot code */ ++ if (!priv->bspi_enabled) ++ priv->bspi_hw->mast_n_boot_ctrl = 0; ++ ++ return 0; ++}; ++ ++static int bcmspi_resume(struct device *dev) ++{ ++ struct bcmspi_priv *priv = dev_get_drvdata(dev); ++ int ret; ++ ++ if (priv == NULL || priv->master == NULL) ++ return -EINVAL; ++ ++ /* Do nothing if it's not yet initialized */ ++ if (!priv->bspi_hw) ++ return 0; ++ ++ /* Restore MSPI/BSPI mode */ ++ priv->bspi_enabled = !priv->bspi_enabled; ++ if (priv->bspi_enabled) ++ bcmspi_disable_bspi(priv); ++ else ++ bcmspi_enable_bspi(priv); ++ ++ /* Restore controller configuration */ ++ bcmspi_hw_set_parms(priv, &priv->last_parms); ++ ++ /* Restore flex mode configuration */ ++ bcmspi_set_mode(priv, ++ priv->flex_mode.width, priv->flex_mode.addrlen, priv->flex_mode.hp); ++ ++ ++ /* Restore interrupts */ ++ bcmspi_disable_interrupt(priv, 0xffffffff); ++ bcmspi_clear_interrupt(priv, 0xffffffff); ++ bcmspi_enable_interrupt(priv, QSPI_INTR_MSPI_DONE_MASK); ++ ++ /* Ensure BSPI read is clean */ ++ bcmspi_flush_prefetch_buffers(priv); ++ ++ /* Start the queue running */ ++ ret = spi_master_resume(priv->master); ++ if (ret) ++ dev_err(dev, "problem starting queue (%d)\n", ret); ++ ++ return ret; ++} ++ ++static const struct dev_pm_ops bcmspi_pm_ops = { ++ .suspend = bcmspi_suspend, ++ .resume = bcmspi_resume, ++}; ++#endif /* CONFIG_PM */ ++ ++ ++#ifdef CONFIG_OF ++static const struct of_device_id qspi_iproc_dt_ids[] = { ++ {.compatible = "brcm,iproc-qspi"}, ++ {}, ++}; ++ ++MODULE_DEVICE_TABLE(of, qspi_iproc_dt_ids); ++ ++ ++static struct platform_driver qspi_iproc_driver = { ++ .driver = { ++ .name = "iproc-qspi", ++ .owner = THIS_MODULE, ++ .of_match_table = of_match_ptr(qspi_iproc_dt_ids), ++#ifdef CONFIG_PM ++ .pm = &bcmspi_pm_ops, ++#endif ++ }, ++ .probe = bcmspi_probe, ++ .remove = bcmspi_remove, ++}; ++ ++module_platform_driver(qspi_iproc_driver); ++ ++#else /*CONFIG_OF*/ ++ ++static struct platform_driver driver = { ++ .driver = { ++ .name = "qspi_iproc", ++ .bus = &platform_bus_type, ++ .owner = THIS_MODULE, ++#ifdef CONFIG_PM ++ .pm = &bcmspi_pm_ops, ++#endif ++ }, ++ .probe = bcmspi_probe, ++ .remove = __devexit_p(bcmspi_remove), ++}; ++ ++static int __init bcmspi_spi_init(void) ++{ ++ platform_driver_register(&driver); ++ return 0; ++} ++ ++static void __exit bcmspi_spi_exit(void) ++{ ++ platform_driver_unregister(&driver); ++} ++ ++module_init(bcmspi_spi_init); ++module_exit(bcmspi_spi_exit); ++#endif /*CONFIG_OF*/ ++ ++MODULE_AUTHOR("Broadcom Corporation"); ++MODULE_DESCRIPTION("iProc QSPI driver"); ++MODULE_LICENSE("GPL"); +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/usb/gadget/legacy/serial.c b/drivers/usb/gadget/legacy/serial.c +--- a/drivers/usb/gadget/legacy/serial.c 2016-12-16 00:49:34.000000000 +0800 ++++ b/drivers/usb/gadget/legacy/serial.c 2017-11-09 17:54:01.552429000 +0800 +@@ -250,7 +250,7 @@ static int __init init(void) + */ + if (use_acm) { + serial_config_driver.label = "CDC ACM config"; +- serial_config_driver.bConfigurationValue = 2; ++ serial_config_driver.bConfigurationValue = 1; + device_desc.bDeviceClass = USB_CLASS_COMM; + device_desc.idProduct = + cpu_to_le16(GS_CDC_PRODUCT_ID); +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig +--- a/drivers/usb/gadget/udc/Kconfig 2016-12-16 00:49:34.000000000 +0800 ++++ b/drivers/usb/gadget/udc/Kconfig 2017-11-09 17:54:01.588427000 +0800 +@@ -373,6 +373,17 @@ config USB_GADGET_XILINX + dynamically linked module called "udc-xilinx" and force all + gadget drivers to also be dynamically linked. + ++config USB_XGS_IPROC_UDC ++ tristate "Broadcom XGS IPROC USB Device driver" ++ depends on ARCH_XGS_IPROC && USB_GADGET ++ default n ++ help ++ USB peripheral controller driver for Broadcom XGS IPROC USB 2 device. ++ ++ Say "y" to link the driver statically, or "m" to build a dynamically ++ linked module called "xgs_iproc_udc" and force all gadget drivers to ++ also be dynamically linked. ++ + # + # LAST -- dummy/emulated controller + # +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/usb/gadget/udc/Makefile b/drivers/usb/gadget/udc/Makefile +--- a/drivers/usb/gadget/udc/Makefile 2016-12-16 00:49:34.000000000 +0800 ++++ b/drivers/usb/gadget/udc/Makefile 2017-11-09 17:54:01.589418000 +0800 +@@ -30,4 +30,5 @@ obj-$(CONFIG_USB_FOTG210_UDC) += fotg210 + obj-$(CONFIG_USB_MV_U3D) += mv_u3d_core.o + obj-$(CONFIG_USB_GR_UDC) += gr_udc.o + obj-$(CONFIG_USB_GADGET_XILINX) += udc-xilinx.o ++obj-$(CONFIG_USB_XGS_IPROC_UDC) += xgs_iproc_udc.o + obj-$(CONFIG_USB_BDC_UDC) += bdc/ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/usb/gadget/udc/xgs_iproc_udc.c b/drivers/usb/gadget/udc/xgs_iproc_udc.c +--- a/drivers/usb/gadget/udc/xgs_iproc_udc.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/usb/gadget/udc/xgs_iproc_udc.c 2017-11-09 17:54:01.736429000 +0800 +@@ -0,0 +1,2114 @@ ++/***************************************************************************** ++* Copyright 2006 - 2010 Broadcom Corporation. All rights reserved. ++* ++* Unless you and Broadcom execute a separate written software license ++* agreement governing use of this software, this software is licensed to you ++* under the terms of the GNU General Public License version 2, available at ++* http://www.broadcom.com/licenses/GPLv2.php (the "GPL"). ++* ++* Notwithstanding the above, under no circumstances may you combine this ++* software in any way with any other Broadcom software provided under a ++* license other than the GPL, without Broadcom's express prior written ++* consent. ++*****************************************************************************/ ++/****************************************************************************/ ++/** ++* @file bcm_dwc_udc.c ++* ++* @brief Broadcom Linux driver for DWC USB 2.0 Device Controller (UDC) ++* ++* This driver implements the Linux Gadget driver API as defined in usb_gadget.h ++* ++* @note ++* ++* This driver was written with the intent of being able to support any ++* variations on how this block is integrated into different Broadcom chips. ++* ++* There is a requirement on how the DWC UDC is configured. In particular, this ++* driver requires that the following options be defined and enabled in the ++* UDC core. ++* ++* UDC20AHB_CNAK_CLR_ENH_CC ++* UDC20AHB_STALL_SET_ENH_CC ++* UDC20AHB_SNAK_ENH_CC ++* ++* Some other UDC attributes can be supported by setting compile time options ++* or with some minor modifications to the source code. Ideally these would ++* be run-time info that is provided by the device instance to the driver. ++* These attributes include the following. ++* ++* IPROC_UDC_EP_CNT ++* IPROC_UDC_EP_MAX_PKG_SIZE ++* Type of each endpoint: Control, IN, OUT, or Bidirectional ++*/ ++/****************************************************************************/ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++#include "xgs_iproc_udc.h" ++ ++#define XGS_IPROC_UDC_NAME "iproc-udc" ++/* Would be nice if DMA_ADDR_INVALID or similar was defined in dma-mapping.h */ ++#define DMA_ADDR_INVALID (~(dma_addr_t)0) ++/* ++ * FRAME_NUM_INVALID is used for ISOC IN transfers for frame alignment. ++ * The device specifies the interval at which it wants to do transfers, ++ * but the host initiates all transfers. If the interval is some multiple ++ * number of frames, the device has no idea which frame in an interval ++ * window the host is going to start transfers. This could even be at a ++ * point many frames beyond the current window, as the starting point ++ * can be very application dependant and subject to an indeterminate ++ * amount of latency. ++ */ ++#define FRAME_NUM_INVALID (~(uint)0) ++/* Would be nice if ENOERROR or similar was defined in errno.h */ ++#define ENOERROR 0 ++ ++/* ---- Private Function Prototypes -------------------------------------- */ ++#ifdef IPROC_UDC_DEBUG ++static void iproc_dbg_dma_dump(struct iproc_udc *udc); ++static void iproc_dbg_dma_dump_desc(char *label, struct iproc_udc_dma_desc *virt, struct iproc_udc_dma_desc *phys); ++static void iproc_dbg_dma_dump_ep(struct iproc_ep *ep); ++#endif /* IPROC_UDC_DEBUG */ ++ ++static void iproc_ep_setup_init(struct iproc_ep *ep, int status); ++static void iproc_ep_setup_process(struct iproc_ep *ep, struct usb_ctrlrequest *setup); ++ ++static void iproc_dma_ep_init(struct iproc_ep *ep); ++static void iproc_dma_data_init(struct iproc_ep *ep); ++static void iproc_dma_data_finish(struct iproc_ep *ep); ++static void iproc_dma_data_add_ready(struct iproc_ep *ep); ++static void iproc_dma_data_rm_done(struct iproc_ep *ep); ++ ++static int iproc_platform_dma_alloc(struct platform_device *platformDevP, struct iproc_udc *udc); ++static void iproc_platform_dma_free(struct platform_device *platformDevP, struct iproc_udc *udc); ++ ++static void iproc_udc_req_queue_flush(struct iproc_ep *ep, int status); ++static void iproc_udc_req_xfer_error(struct iproc_ep *ep, int status); ++static void iproc_udc_req_xfer_done(struct iproc_ep *ep, struct iproc_ep_req *req, int status); ++static void iproc_udc_req_xfer_process(struct iproc_ep *ep); ++static void iproc_udc_req_xfer_add(struct iproc_ep *ep, struct iproc_ep_req *req); ++ ++static void iproc_udc_ops_finish(struct iproc_udc *udc); ++static void iproc_udc_ops_init(struct iproc_udc *udc); ++static void iproc_udc_ops_stop(struct iproc_udc *udc); ++static void iproc_udc_ops_start(struct iproc_udc *udc); ++static void iproc_udc_ops_disconnect(struct iproc_udc *udc); ++static void iproc_udc_ops_shutdown(struct iproc_udc *udc); ++ ++static int xgs_iproc_ep_enable(struct usb_ep *ep, const struct usb_endpoint_descriptor *desc); ++static int xgs_iproc_ep_disable(struct usb_ep *ep); ++static struct usb_request *xgs_iproc_ep_alloc_request(struct usb_ep *ep, uint gfp_flags); ++static void xgs_iproc_ep_free_request(struct usb_ep *ep, struct usb_request *req); ++static int xgs_iproc_ep_queue(struct usb_ep *ep, struct usb_request *req, uint gfp_flags); ++static int xgs_iproc_ep_dequeue(struct usb_ep *ep, struct usb_request *req); ++static int xgs_iproc_ep_set_halt(struct usb_ep *ep, int value); ++static int xgs_iproc_ep_fifo_status(struct usb_ep *ep); ++static void xgs_iproc_ep_fifo_flush(struct usb_ep *ep); ++ ++static int xgs_iproc_udc_start(struct usb_gadget *, struct usb_gadget_driver *); ++static int xgs_iproc_udc_stop(struct usb_gadget *); ++ ++static int xgs_iproc_udc_probe(struct platform_device *pdev); ++static int xgs_iproc_udc_remove(struct platform_device *pdev); ++ ++static void xgs_iproc_udc_proc_create(void); ++static void xgs_iproc_udc_proc_remove(void); ++ ++/* ---- Private Variables ------------------------------------------------ */ ++static const struct { ++ const char *name; ++ const int type; ++ const int msize; ++ const struct usb_ep_caps caps; ++} xgs_iproc_ep_info[] = { ++#define EP_INFO(_name, _type, _size, _caps) \ ++ { \ ++ .name = _name, \ ++ .type = _type, \ ++ .msize = _size, \ ++ .caps = _caps, \ ++ } ++ ++ EP_INFO("ep0", USB_ENDPOINT_XFER_CONTROL, IPROC_UDC_CTRL_MAX_PKG_SIZE, ++ USB_EP_CAPS(USB_EP_CAPS_TYPE_CONTROL, USB_EP_CAPS_DIR_ALL)), ++ EP_INFO("ep1in", USB_ENDPOINT_XFER_ISOC, IPROC_UDC_EP_MAX_PKG_SIZE, ++ USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_IN)), ++ EP_INFO("ep2out", USB_ENDPOINT_XFER_ISOC, IPROC_UDC_EP_MAX_PKG_SIZE, ++ USB_EP_CAPS(USB_EP_CAPS_TYPE_ISO, USB_EP_CAPS_DIR_OUT)), ++ EP_INFO("ep3in", USB_ENDPOINT_XFER_BULK, IPROC_UDC_EP_MAX_PKG_SIZE, ++ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_IN)), ++ EP_INFO("ep4out", USB_ENDPOINT_XFER_BULK, IPROC_UDC_EP_MAX_PKG_SIZE, ++ USB_EP_CAPS(USB_EP_CAPS_TYPE_BULK, USB_EP_CAPS_DIR_OUT)), ++ EP_INFO("ep5in", USB_ENDPOINT_XFER_INT, IPROC_UDC_EP_MAX_PKG_SIZE, ++ USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_IN)), ++ EP_INFO("ep6out", USB_ENDPOINT_XFER_INT, IPROC_UDC_EP_MAX_PKG_SIZE, ++ USB_EP_CAPS(USB_EP_CAPS_TYPE_INT, USB_EP_CAPS_DIR_OUT)), ++#undef EP_INFO ++}; ++ ++static struct usb_gadget_ops xgs_iproc_udc_ops = { ++ .udc_start = xgs_iproc_udc_start, ++ .udc_stop = xgs_iproc_udc_stop, ++}; ++ ++static struct usb_ep_ops xgs_iproc_udc_ep_ops = { ++ .enable = xgs_iproc_ep_enable, ++ .disable = xgs_iproc_ep_disable, ++ .alloc_request = xgs_iproc_ep_alloc_request, ++ .free_request = xgs_iproc_ep_free_request, ++ .queue = xgs_iproc_ep_queue, ++ .dequeue = xgs_iproc_ep_dequeue, ++ .set_halt = xgs_iproc_ep_set_halt, ++ .fifo_status = xgs_iproc_ep_fifo_status, ++ .fifo_flush = xgs_iproc_ep_fifo_flush, ++}; ++ ++/*********************************************************************** ++ * Convenience functions ++ ***********************************************************************/ ++static inline struct iproc_udc *gadget_to_udc(struct usb_gadget *g) ++{ ++ return container_of(g, struct iproc_udc, gadget); ++} ++ ++static inline struct iproc_ep *our_ep(struct usb_ep *ep) ++{ ++ return container_of(ep, struct iproc_ep, usb_ep); ++} ++ ++static inline struct iproc_ep_req *our_req(struct usb_request *req) ++{ ++ return container_of(req, struct iproc_ep_req, usb_req); ++} ++ ++/**************************************************************************** ++ * DMA descriptor chain routines. ++ * ++ * dma_desc_chain_reset - Initialize chain in preparation for transfer ++ * dma_desc_chain_full - Indicates if no descriptors in chain for available for use. ++ * dma_desc_chain_alloc - Get next free descriptor for use. Have to check if chain not full first. ++ * dma_desc_chain_empty - Indicates if no descriptors in the chain are being used. ++ * dma_desc_chain_head - Pointer to 1st entry in chain. Have to check if chain not empty first. ++ * dma_desc_chain_free - Frees up 1st entry for use. Only do this if DMA for this descriptor has completed. ++ * ++ ***************************************************************************/ ++static inline struct iproc_udc_dma_desc *dma_desc_chain_alloc(struct iproc_ep *ep) ++{ ++ uint idx; ++ ++ idx = ep->dma.add_idx++; ++ ++ return &ep->dma.vir_addr->desc[IPROC_EP_DMA_DESC_IDX(idx)]; ++} ++ ++static inline int dma_desc_chain_empty(struct iproc_ep *ep) ++{ ++ return ep->dma.add_idx == ep->dma.rm_idx; ++} ++ ++static inline void dma_desc_chain_free(struct iproc_ep *ep) ++{ ++ ep->dma.rm_idx++; ++} ++ ++static inline int dma_desc_chain_full(struct iproc_ep *ep) ++{ ++ return (!dma_desc_chain_empty(ep) && (IPROC_EP_DMA_DESC_IDX(ep->dma.add_idx) == IPROC_EP_DMA_DESC_IDX(ep->dma.rm_idx))); ++} ++ ++static inline struct iproc_udc_dma_desc *dma_desc_chain_head(struct iproc_ep *ep) ++{ ++ return (&ep->dma.vir_addr->desc[IPROC_EP_DMA_DESC_IDX(ep->dma.rm_idx)]); ++} ++ ++static inline void dma_desc_chain_reset(struct iproc_ep *ep) ++{ ++ ep->dma.add_idx = 0; ++ ep->dma.rm_idx = 0; ++} ++ ++ ++/**************************************************************************** ++ * APIs used by a Gadget driver to attach / detach from the UDC driver. ++ ***************************************************************************/ ++static int xgs_iproc_udc_start(struct usb_gadget *gadget, ++ struct usb_gadget_driver *gadget_driver) ++{ ++ struct iproc_udc *udc = gadget_to_udc(gadget); ++ ulong flags; ++ ++ if (!udc) { ++ dev_err(udc->dev, "UDC driver not initialized\n"); ++ return -ENODEV; ++ } ++ ++ if (!gadget_driver || !gadget_driver->setup || ++ gadget_driver->max_speed < USB_SPEED_FULL) { ++ dev_err(udc->dev, "invalid gadget driver\n" ); ++ return -EINVAL; ++ } ++ ++ spin_lock_irqsave(&udc->lock, flags); ++ ++ if (udc->gadget_driver) { ++ spin_unlock_irqrestore(&udc->lock, flags); ++ dev_err(udc->dev, "UDC driver busy\n"); ++ return -EBUSY; ++ } ++ ++ /* Hook up the gadget driver to the UDC controller driver */ ++ gadget_driver->driver.bus = NULL; ++ udc->gadget_driver = gadget_driver; ++ udc->gadget.dev.driver = &gadget_driver->driver; ++ udc->pullup_on = 1; ++ ++ iproc_udc_ops_start(udc); ++ /* un-stop the control endpoint */ ++ udc->ep[0].stopped = 0; ++ iproc_usbd_bus_conn(udc->usbd_regs); ++ ++ iproc_usbd_setup_done(udc->usbd_regs); ++ iproc_usbd_dma_en(udc->usbd_regs); ++ ++ spin_unlock_irqrestore(&udc->lock, flags); ++ ++ return ENOERROR; ++} ++ ++static int xgs_iproc_udc_stop(struct usb_gadget *gadget) ++{ ++ ulong flags; ++ struct iproc_udc *udc = gadget_to_udc(gadget); ++ ++ if (!udc) { ++ dev_err(udc->dev, "UDC driver not initialized\n"); ++ return -ENODEV; ++ } ++ ++ spin_lock_irqsave(&udc->lock, flags); ++ ++ udc->ep[0].stopped = 1; ++ iproc_udc_ops_stop(udc); ++ udelay(20); ++ udc->pullup_on = 0; ++ iproc_usbd_bus_disconn(udc->usbd_regs); ++ iproc_udc_ops_shutdown(udc); ++ spin_unlock_irqrestore(&udc->lock, flags); ++ ++ return ENOERROR; ++} ++ ++/**************************************************************************** ++ * ++ * Platform device level alloc / free of memory used for DMA descriptors. ++ * A single block of memory static in size is used for DMA descriptors. ++ * Each endpoint has a small number of descriptors for its exclusive use. ++ * These are chained in a loop. See bcm_udc_dwc.h and iproc_dma_ep_init() for more ++ * details. ++ * ++ ***************************************************************************/ ++static int iproc_platform_dma_alloc(struct platform_device *platformDevP, struct iproc_udc *udc) ++{ ++ udc->dma.vir_addr = dma_alloc_coherent(&platformDevP->dev, sizeof(struct iproc_udc_dma), ++ (dma_addr_t *)&udc->dma.phy_addr, GFP_KERNEL); ++ ++ if (!udc->dma.vir_addr) { ++ dev_err(udc->dev, "dma_alloc_coherent() failed\n"); ++ return -ENOMEM; ++ } ++ ++ return ENOERROR; ++} ++ ++static void iproc_platform_dma_free(struct platform_device *platformDevP, struct iproc_udc *udc) ++{ ++ int idx; ++ ++ dma_free_coherent(&platformDevP->dev, sizeof(struct iproc_udc_dma), udc->dma.vir_addr, ++ (dma_addr_t)udc->dma.phy_addr); ++ ++ for (idx = 0; idx < IPROC_UDC_EP_CNT; idx ++) { ++ if (udc->ep[idx].dma.align_buff) { ++ dma_free_coherent(NULL, udc->ep[idx].dma.align_len, ++ udc->ep[idx].dma.align_buff, ++ udc->ep[idx].dma.align_addr); ++ udc->ep[idx].dma.align_buff = NULL; ++ } ++ } ++} ++ ++/**************************************************************************** ++ * Linux Gadget endpoint operations. See usb_ep_ops in usb_gadget.h. ++ ***************************************************************************/ ++static int xgs_iproc_ep_enable(struct usb_ep *usb_ep, const struct usb_endpoint_descriptor *desc) ++{ ++ struct iproc_ep *ep = our_ep(usb_ep); ++ struct iproc_udc *udc = ep->udc; ++ ulong flags; ++ uint xferType; ++ int ret = ENOERROR; ++ ++ if (!usb_ep || (ep->beq_addr != desc->bEndpointAddress)) { ++ dev_err(udc->dev, "invalid endpoint (%p)\n", usb_ep); ++ return -EINVAL; ++ } ++ ++ if (!desc || (desc->bDescriptorType != USB_DT_ENDPOINT)) { ++ dev_err(udc->dev, "ep%d: invalid descriptor=%p type=%d\n", ep->num, desc, desc ? desc->bDescriptorType : -1); ++ return -EINVAL; ++ } ++ ++ if (desc == ep->desc) { ++ dev_warn(udc->dev, "ep%d: already enabled with same descriptor\n", ep->num); ++ return -EEXIST; ++ } ++ ++ if (ep->desc) { ++ dev_warn(udc->dev, "ep%d: already enabled with another descriptor\n", ep->num); ++ return -EBUSY; ++ } ++ ++ if (!udc->gadget_driver || (udc->gadget.speed == USB_SPEED_UNKNOWN)) { ++ dev_warn(udc->dev, "%s: invalid device state\n", ep->usb_ep.name); ++ return -ESHUTDOWN; ++ } ++ ++ xferType = desc->bmAttributes & USB_ENDPOINT_XFERTYPE_MASK; ++ if ((ep->dir == USB_DIR_IN) && (xferType == USB_ENDPOINT_XFER_ISOC)) { ++ if ((desc->bInterval < 1) || (desc->bInterval > 16)) { ++ dev_err(udc->dev, "%s: invalid ISOC bInterval=%u\n", ep->usb_ep.name, desc->bInterval); ++ return -ERANGE; ++ } ++ ++ /* ++ * We don't know when the host will send the first ISO IN request, so we need to set up ++ * to capture that event so we can align subsequent transfers to that particular frame ++ * number. Also set the frame number increment. The endpoint descriptor specifies this ++ * as a power of 2 (2**(n-1)). Translate this into a specific number of frames. ++ */ ++ ep->dma.frame_num = FRAME_NUM_INVALID; ++ ep->dma.frame_incr = 1 << (desc->bInterval - 1); ++ } ++ ++ spin_lock_irqsave(&udc->lock, flags); ++ ++ ep->desc = desc; ++ ep->stopped = 0; ++ ++ /** @todo Rework the UdcEpCfg() so it includes iproc_usbd_ep_cfg_set() ... */ ++ iproc_usbd_ep_cfg_set(udc->usbd_regs, ep->num, iproc_usbd_cfg_num(udc->usbd_regs)); ++ ++ spin_unlock_irqrestore(&udc->lock, flags); ++ ++ return ret; ++} ++ ++static int xgs_iproc_ep_disable(struct usb_ep *usb_ep) ++{ ++ struct iproc_ep *ep = our_ep(usb_ep); ++ struct iproc_udc *udc = ep->udc; ++ ulong flags; ++ int ret = ENOERROR; ++ ++ if (!usb_ep) { ++ dev_err(udc->dev, "invalid endpoint\n"); ++ return -EINVAL; ++ } ++ ++ if (!ep->desc) { ++ dev_warn(udc->dev, "%s: already disabled\n", ep->usb_ep.name); ++ return ENOERROR; ++ } ++ ++ spin_lock_irqsave(&udc->lock, flags); ++ ++ iproc_udc_req_queue_flush(ep, -ESHUTDOWN); ++ iproc_usbd_ep_irq_dis(udc->usbd_regs, ep->num, ep->dir); ++ ep->desc = NULL; ++ ++ spin_unlock_irqrestore(&udc->lock, flags); ++ ++ return ret; ++} ++ ++struct usb_request * xgs_iproc_ep_alloc_request(struct usb_ep *usb_ep, uint gfp_flags) ++{ ++ struct iproc_ep_req *req; ++ ++ if (!usb_ep) { ++ return NULL; ++ } ++ ++ if ((req = kzalloc(sizeof(*req), gfp_flags)) != NULL) { ++ /* ++ * Set the usb_req.dma to DMA_ADDR_INVALID so it can be determined if the usb_req.buf needs ++ * to be mapped when the request is subsequently queued. ++ */ ++ INIT_LIST_HEAD(&req->list_node); ++ req->usb_req.dma = DMA_ADDR_INVALID; ++ ++ return &req->usb_req; ++ } ++ ++ return NULL; ++} ++ ++static void xgs_iproc_ep_free_request(struct usb_ep *usb_ep, struct usb_request *usb_req) ++{ ++ struct iproc_ep_req *req = our_req(usb_req); ++ ++ if (usb_req) { ++ kfree(req); ++ } ++} ++ ++static int xgs_iproc_ep_queue(struct usb_ep *usb_ep, struct usb_request *usb_req, uint gfp_flags) ++{ ++ struct iproc_ep *ep = our_ep(usb_ep); ++ struct iproc_udc *udc = ep->udc; ++ struct iproc_ep_req *req = our_req(usb_req); ++ ulong flags; ++ int ret = ENOERROR; ++ ++ if (!usb_ep || !usb_req || !req->usb_req.complete || !req->usb_req.buf || !list_empty(&req->list_node)) { ++ dev_err(udc->dev, "invalid request\n"); ++ return -EINVAL; ++ } ++ ++ if (!ep->desc && (ep->num != 0)) { ++ dev_err(udc->dev, "%s: invalid EP state\n", ep->usb_ep.name); ++ return -EFAULT; ++ } ++ ++ if ((ep->type == USB_ENDPOINT_XFER_CONTROL) && !list_empty(&ep->list_queue)) { ++ dev_err(udc->dev, "%s: CTRL EP queue not empty\n", ep->usb_ep.name); ++ return -EPERM; ++ } ++ ++ if (usb_req->length > 16384 /* FSG_BUFLEN */) { ++ dev_err(udc->dev, "%s: request too big, length=%u\n", ep->usb_ep.name, usb_req->length); ++ return -E2BIG; ++ } ++ ++ /* ++ * Restrict ISOC IN requests to the max packet size. Assumption is that it does not make ++ * much sense to have more than one interval's (scheduled bandwidth's) worth of data. ++ */ ++ if ((ep->type == USB_ENDPOINT_XFER_ISOC) && (ep->dir == USB_DIR_IN) && (usb_req->length > ep->usb_ep.maxpacket)) { ++ dev_err(udc->dev, "%s: request > scheduled bandwidth, length=%u\n", ep->usb_ep.name, usb_req->length); ++ return -EFBIG; ++ } ++ ++ if (!udc->gadget_driver || (udc->gadget.speed == USB_SPEED_UNKNOWN)) { ++ dev_warn(udc->dev, "%s: invalid device state\n", ep->usb_ep.name); ++ return -ESHUTDOWN; ++ } ++ ++ if (((ulong)req->usb_req.buf) & 0x3UL) { ++ /* ++ * The DMA buffer does not have the alignment required by the hardware. We keep an endpoint level ++ * buffer available to handle this situation if it arises. If we don't currently have one available ++ * for this purpose, or if the current one is not large enough, then allocate a new one. Since ++ * we only have one buffer, we won't copy into the buffer until we are ready to do the DMA transfer. ++ * Mark the request as needing this alignment (copy). ++ */ ++ if ((ep->dma.align_buff != NULL) && (ep->dma.align_len < req->usb_req.length)) { ++ dma_free_coherent(NULL, ep->dma.align_len, ep->dma.align_buff, ep->dma.align_addr); ++ ep->dma.align_buff = NULL; ++ } ++ ++ if (ep->dma.align_buff == NULL) { ++ ep->dma.align_len = req->usb_req.length; ++ ep->dma.align_buff = dma_alloc_coherent(NULL, ep->dma.align_len, &(ep->dma.align_addr), GFP_KERNEL); ++ } ++ ++ if (ep->dma.align_buff == NULL) { ++ dev_err(udc->dev, "%s: dma_alloc_coherent() failed, length=%u\n", ep->usb_ep.name, usb_req->length); ++ return -ENOMEM; ++ } ++ ++ req->dma_aligned = 1; ++ } else if ((req->usb_req.dma == DMA_ADDR_INVALID) || (req->usb_req.dma == 0)) { ++ /* A physical address was not provided for the DMA buffer, so request it. */ ++ req->dma_mapped = 1; ++ req->usb_req.dma = dma_map_single(udc->gadget.dev.parent, ++ req->usb_req.buf, ++ req->usb_req.length, ++ (ep->dir == USB_DIR_IN ? DMA_TO_DEVICE : DMA_FROM_DEVICE)); ++ } ++ ++ spin_lock_irqsave(&udc->lock, flags); ++ ++ req->usb_req.status = -EINPROGRESS; ++ req->usb_req.actual = 0; ++ ++ if ((ep->type == USB_ENDPOINT_XFER_CONTROL) && (ep->dir == USB_DIR_OUT) && (req->usb_req.length == 0)) { ++ /* ++ * This might happen if gadget driver decides to send zero length packet (ZLP) during STATUS phase ++ * of a control transfer. This may happen for the cases where there is not a DATA phase. Just consider ++ * things complete. ZLP will be issued by hardware. See the handling of SETUP packets for more details ++ * on control transfer processing. ++ */ ++ iproc_udc_req_xfer_done(ep, req, ENOERROR); ++ } else { ++ if (req->usb_req.length == 0) { ++ req->usb_req.zero = 1; ++ } ++ iproc_udc_req_xfer_add(ep, req); ++ } ++ ++ spin_unlock_irqrestore(&udc->lock, flags); ++ ++ return ret; ++} ++ ++static int xgs_iproc_ep_dequeue(struct usb_ep *usb_ep, struct usb_request *usb_req) ++{ ++ struct iproc_ep *ep = our_ep(usb_ep); ++ struct iproc_udc *udc = ep->udc; ++ struct iproc_ep_req *req = our_req(usb_req); ++ ulong flags; ++ int ret = ENOERROR; ++ ++ if (!usb_ep || !usb_req) { ++ dev_err(udc->dev, "invalid request\n"); ++ return -EINVAL; ++ } ++ ++ spin_lock_irqsave(&udc->lock, flags); ++ ++ /* Make sure it's actually queued on this endpoint */ ++ list_for_each_entry(req, &ep->list_queue, list_node) { ++ if (&req->usb_req == usb_req) { ++ break; ++ } ++ } ++ ++ if (&req->usb_req != usb_req) { ++ spin_unlock_irqrestore(&udc->lock, flags); ++ dev_err(udc->dev, "%s: request not queued\n", ep->usb_ep.name); ++ return -ENOLINK; ++ } ++ ++ /** @todo Handle case where the request is in progress, or completed but not dequeued */ ++ ++ iproc_udc_req_xfer_done(ep, req, -ECONNRESET); ++ spin_unlock_irqrestore(&udc->lock, flags); ++ ++ return ret; ++} ++ ++static int xgs_iproc_ep_set_halt(struct usb_ep *usb_ep, int enable) ++{ ++ struct iproc_ep *ep = our_ep(usb_ep); ++ struct iproc_udc *udc = ep->udc; ++ ulong flags; ++ int ret = ENOERROR; ++ ++ if (!usb_ep) { ++ dev_err(udc->dev, "invalid request\n"); ++ return -EINVAL; ++ } ++ ++ if (ep->type == USB_ENDPOINT_XFER_ISOC) { ++ dev_warn(udc->dev, "%s: ISO HALT operations not supported\n", ep->usb_ep.name); ++ return -EOPNOTSUPP; ++ } ++ ++ if (enable && (ep->dir == USB_DIR_IN) && !list_empty(&ep->list_queue)) { ++ /* Only allow halt on an IN EP if its queue is empty */ ++ dev_err(udc->dev, "%s: IN queue not empty\n", ep->usb_ep.name); ++ return -EAGAIN; ++ } ++ ++ if (!enable && (ep->type == USB_ENDPOINT_XFER_CONTROL)) { ++ /* ++ * Halt clear for a control EP should only be handled as part of the subsequent SETUP ++ * exchange that occurs after the Halt was set. ++ */ ++ dev_warn(udc->dev, "%s: CTRL HALT clear\n", ep->usb_ep.name); ++ return -EPROTO; ++ } ++ ++ spin_lock_irqsave(&udc->lock, flags); ++ ++ if (!enable) { ++ iproc_usbd_ep_stall_dis(udc->usbd_regs, ep->num, ep->dir); ++ } else if (ep->type != USB_ENDPOINT_XFER_CONTROL) { ++ iproc_usbd_ep_stall_en(udc->usbd_regs, ep->num, ep->dir); ++ } else { ++ iproc_usbd_ep_stall_en(udc->usbd_regs, ep->num, USB_DIR_IN); ++ iproc_usbd_ep_stall_en(udc->usbd_regs, ep->num, USB_DIR_OUT); ++ } ++ ++ spin_unlock_irqrestore(&udc->lock, flags); ++ mdelay(2); ++ ++ return ret; ++} ++ ++static int xgs_iproc_ep_fifo_status(struct usb_ep *usb_ep) ++{ ++ /* ++ * The DWC UDC core doesn't have a mechanism for determining the number of bytes ++ * currently in a FIFO. The best that can be done is determine whether or not a ++ * FIFO is empty. However, for the situation where a single Rx FIFO is being ++ * used for all endpoints, if cannot be determined which OUT and CTRL EP's are ++ * affected if the Rx FIFO is not empty. ++ */ ++ return -EOPNOTSUPP; ++} ++ ++static void xgs_iproc_ep_fifo_flush(struct usb_ep *usb_ep) ++{ ++ struct iproc_ep *ep = our_ep(usb_ep); ++ struct iproc_udc *udc = ep->udc; ++ ulong flags; ++ ++ if (!usb_ep) { ++ dev_err(udc->dev, "invalid request\n"); ++ return; ++ } ++ ++ /* ++ * FIFO flush for a control EP does not make any sense. The SETUP protocol ++ * should eliminate the need to flush. ++ */ ++ if (ep->type == USB_ENDPOINT_XFER_CONTROL) { ++ dev_warn(udc->dev, "%s: CTRL FIFO flush\n", ep->usb_ep.name); ++ return; ++ } ++ ++ if (iproc_usbd_ep_fifo_empty(udc->usbd_regs, ep->num, ep->dir)) { ++ dev_warn(udc->dev, "%s: FIFO empty\n", ep->usb_ep.name); ++ return; ++ } ++ ++ spin_lock_irqsave(&udc->lock, flags); ++ ++ iproc_usbd_ep_fifo_flush_en(udc->usbd_regs, ep->num, ep->dir); ++ ++ spin_unlock_irqrestore(&udc->lock, flags); ++} ++ ++/*************************************************************************** ++ * Routines for debug dump of DMA descriptors ++ **************************************************************************/ ++#ifdef IPROC_UDC_DEBUG ++static void iproc_dbg_dma_dump(struct iproc_udc *udc) ++{ ++ int idx; ++ ++ for (idx = 0; idx < IPROC_UDC_EP_CNT; idx++) { ++ iproc_dbg_dma_dump_ep(&udc->ep[idx]); ++ } ++} ++ ++static void iproc_dbg_dma_dump_desc(char *label, struct iproc_udc_dma_desc *virt, struct iproc_udc_dma_desc *phys) ++{ ++ printk("%s virt=0x%p phys=0x%p: 0x%08x 0x%08x 0x%08x", label, virt, phys, virt->status, virt->reserved, virt->buf_addr); ++} ++ ++static void iproc_dbg_dma_dump_ep(struct iproc_ep *ep) ++{ ++ int idx; ++ ++ printk("EP %d DMA\n", ep->num); ++ printk(" setup\n"); ++ iproc_dbg_dma_dump_desc(" ", (struct iproc_udc_dma_desc *)&ep->dma.vir_addr->setup, (struct iproc_udc_dma_desc *)&ep->dma.phy_addr->setup); ++ printk(" desc\n"); ++ ++ for (idx = 0; idx < IPROC_EP_DMA_DESC_CNT; idx++) { ++ iproc_dbg_dma_dump_desc(" ", &ep->dma.vir_addr->desc[idx], &ep->dma.phy_addr->desc[idx]); ++ ++ /* Don't bother displaying entries beyond the last. */ ++ if (IPROC_USBD_READ(ep->dma.vir_addr->desc[idx].status) & IPROC_USBD_REG_DMA_STAT_LAST_DESC) { ++ break; ++ } ++ } ++} ++#endif /* IPROC_UDC_DEBUG */ ++ ++/**************************************************************************** ++ * Initialization of DMA descriptors at the endpoint level. ++ ***************************************************************************/ ++static void iproc_dma_ep_init(struct iproc_ep *ep) ++{ ++ struct iproc_udc *udc = ep->udc; ++ int idx; ++ ++ /** @todo shorten names to virtAddr physAddr?? */ ++ ep->dma.vir_addr = &udc->dma.vir_addr->ep[ep->num]; ++ ep->dma.phy_addr = &udc->dma.phy_addr->ep[ep->num]; ++ ++ /* ++ * Control endpoints only do setup in the OUT direction, so only need to set the ++ * buffer address for that direction. The buffer is set, even if not a control ++ * endpoint, just to simplify things. There's no harm with this. ++ */ ++ ep->dma.vir_addr->setup.status = cpu_to_le32(IPROC_USBD_REG_DMA_STAT_BUF_HOST_BUSY); ++ wmb(); ++ iproc_usbd_ep_dma_buf_addr_set(udc->usbd_regs, ep->num, USB_DIR_OUT, &ep->dma.phy_addr->setup); ++ ++ /* ++ * Take ownership of the DMA descriptors, and chain them in a loop. This allows a small number ++ * descriptors to be used for requests. Need to have the DWC DMA Descriptor Update option enabled ++ * in the device control register in order to do this. When a transfer for a descriptor completes, ++ * the descriptor will get re-used if there's still data left in a request to transfer. See the ++ * iproc_dma_data_rm_done() and iproc_dma_data_add_ready() routines. ++ */ ++ /** @todo Put these in endpoint context?? */ ++ for (idx = 0; idx < IPROC_EP_DMA_DESC_CNT; idx++) { ++ ep->dma.vir_addr->desc[idx].status = cpu_to_le32(IPROC_USBD_REG_DMA_STAT_BUF_HOST_BUSY); ++ wmb(); ++ ep->dma.vir_addr->desc[idx].next_addr = cpu_to_le32((uint)&ep->dma.phy_addr->desc[idx+1]); ++ } ++ ep->dma.vir_addr->desc[(IPROC_EP_DMA_DESC_CNT - 1)].next_addr = cpu_to_le32((uint)&ep->dma.phy_addr->desc[0]); ++ ++ /* ++ * To simplify things, register the descriptor chain in both directions. Control endpoints are the ++ * only type that will be transferring in both directions, but they will only be transferring in one ++ * direction at a time, so should not be any issues with using the same descriptor set for both directions. ++ * For single direction endpoints, the other direction will not be used. ++ */ ++ ++ iproc_usbd_ep_dma_desc_addr_set(udc->usbd_regs, ep->num, USB_DIR_OUT, &ep->dma.phy_addr->desc[0]); ++ iproc_usbd_ep_dma_desc_addr_set(udc->usbd_regs, ep->num, USB_DIR_IN, &ep->dma.phy_addr->desc[0]); ++} ++ ++/**************************************************************************** ++ * DMA data routines. ++ * ++ * A gadget usb_request buf is used for the data. The entire buf contents may ++ * or may not fit into the descriptor chain at once. When the DMA transfer ++ * associated with a descriptor completes, the descriptor is re-used to add ++ * more segments of the usb_request to the chain as necessary. ++ * ++ * iproc_dma_data_init - Initialization in preparation for DMA of usb_request. ++ * iproc_dma_data_add_ready - Adds usb_request segments into DMA chain until full or no segments left ++ * iproc_dma_data_rm_done - Removes usb_request segments from DMA chain that have completed transfer ++ * iproc_dma_data_finish - Final stage of DMA of the usb_request ++ * ++ ***************************************************************************/ ++static void iproc_dma_data_init(struct iproc_ep *ep) ++{ ++ struct iproc_ep_req *req; ++ struct iproc_udc *udc = ep->udc; ++ ++ req = list_first_entry(&ep->list_queue, struct iproc_ep_req, list_node); ++ ++ if (req->dma_aligned) { ++ /* ++ * This buffer needs to be aligned in order to DMA. We do this by copying into a special buffer we ++ * have for this purpose. Save the original DMA physical address so it can be restored later. ++ * This may not be used, but we'll do it anyways. Then set the DMA address to the aligned buffer ++ * address. Only the DMA physical address is used for the transfers, so the original buffer virtual ++ * address does not need to be changed. Then copy the data into the aligned buffer. ++ */ ++ /** @todo Really only need to do the memcpy for IN data */ ++ ++ req->orig_dma_addr = req->usb_req.dma; ++ req->usb_req.dma = ep->dma.align_addr; ++ memcpy(ep->dma.align_buff, req->usb_req.buf, req->usb_req.length); ++ } ++ ++ ep->dma.done = 0; ++ ep->dma.done_len = 0; ++ ep->dma.todo_len = ep->dma.usb_req->length; ++ ep->dma.buf_addr = ep->dma.usb_req->dma; ++ ep->dma.status = IPROC_USBD_REG_DMA_STAT_RX_SUCCESS; ++ ++ if ((ep->dir == USB_DIR_IN) && (ep->type != USB_ENDPOINT_XFER_ISOC)) { ++ /* ++ * For IN transfers, do not need to segment the buffer into max packet portions ++ * for the DMA descriptors. The hardware will automatically segment into max ++ * packet sizes as necessary. ++ */ ++ ep->dma.max_buf_len = ep->usb_ep.maxpacket; ++ ++ /* ++ * If the request is of zero length, then force the zero flag so iproc_dma_data_add_ready() ++ * will queue the request. Conversely, if the gadget has set the zero flag, leave ++ * it set only if it is needed (request length is a multiple of maxpacket) ++ */ ++ if (ep->dma.usb_req->length == 0) { ++ ep->dma.usb_req->zero = 1; ++ } else if (ep->dma.usb_req->zero) { ++ ep->dma.usb_req->zero = (ep->dma.usb_req->length % ep->usb_ep.maxpacket) ? 0 : 1; ++ } ++ } else { ++ ep->dma.max_buf_len = ep->usb_ep.maxpacket; ++ } ++ ++ dma_desc_chain_reset(ep); ++ ++ iproc_usbd_ep_irq_en(udc->usbd_regs, ep->num, ep->dir); ++} ++ ++static void iproc_dma_data_finish(struct iproc_ep *ep) ++{ ++ struct iproc_ep_req *req; ++ struct iproc_udc *udc = ep->udc; ++ ++ iproc_usbd_ep_irq_dis(udc->usbd_regs, ep->num, ep->dir); ++ iproc_usbd_ep_dma_dis(udc->usbd_regs, ep->num, ep->dir); ++ ++ req = list_first_entry(&ep->list_queue, struct iproc_ep_req, list_node); ++ ++ if (req->dma_aligned) { ++ /* ++ * The original request buffer was not aligned properly, so a special buffer was used ++ * for the transfer. Copy the aligned buffer contents into the original. Also restore ++ * the original dma physical address. ++ */ ++ /** @todo Really only need to do the memcpy for OUT setup/data */ ++ memcpy(req->usb_req.buf, ep->dma.align_buff, req->usb_req.length); ++ req->usb_req.dma = req->orig_dma_addr; ++ } ++} ++ ++static void iproc_dma_data_add_ready(struct iproc_ep *ep) ++{ ++ struct iproc_udc *udc = ep->udc; ++ volatile struct iproc_udc_dma_desc *dma_desc = NULL; ++ uint status; ++ uint len; ++ int enable_dma = 0; ++ ++ /* ++ * DMA must be disabled while this loop is running, as multi-descriptor transfers ++ * will have the descriptor chain in an intermediate state until the last descriptor ++ * is written and the chain terminated. ++ */ ++ if (iproc_usbd_dma_status(udc->usbd_regs)) { ++ enable_dma = 1; ++ iproc_usbd_dma_dis(udc->usbd_regs); ++ } ++ ++ if (!ep->dma.todo_len) { ++ ep->dma.usb_req->zero = 1; ++ } ++ ++ /* ++ * Will only have one request in the chain at a time. Add request segments to the ++ * chain until all parts of the request have been put in the chain or the chain ++ * has no more room. ++ */ ++ while (!dma_desc_chain_full(ep) && (ep->dma.todo_len || ep->dma.usb_req->zero)) { ++ /* ++ * Get the next descriptor in the chain, and then fill the descriptor contents as needed. ++ * Do not set the descriptor buffer status to ready until last to ensure there's no ++ * contention with the hardware. ++ */ ++ dma_desc = dma_desc_chain_alloc(ep); ++ ++ len = ep->dma.todo_len < ep->dma.max_buf_len ? ep->dma.todo_len : ep->dma.max_buf_len; ++ ep->dma.todo_len -= len; ++ ++ status = 0; ++ ++ if (len < ep->dma.max_buf_len) { ++ /* ++ * If this segment is less than the max, then it is the last segment. There's no need to ++ * send a closing ZLP, although this segment might be a ZLP. Regardless, clear the ZLP flag ++ * to ensure that the processing of this request finishes. Also set the end of the descriptor ++ * chain. ++ */ ++ ep->dma.usb_req->zero = 0; ++ status |= IPROC_USBD_REG_DMA_STAT_LAST_DESC; ++ } else if ((ep->dma.todo_len == 0) && !ep->dma.usb_req->zero) { ++ /* ++ * Segment is of the max packet length. Since there's nothing left, it has to also be the last ++ * last segment. No closing ZLP segment requested, just set the end of the descriptor chain. ++ */ ++ status |= IPROC_USBD_REG_DMA_STAT_LAST_DESC; ++ } ++ ++ if ((ep->dir == USB_DIR_IN) && (ep->type == USB_ENDPOINT_XFER_ISOC)) { ++ /* ++ * Increment the frame number for transmit, then use it for the next packet. The frame number ++ * may get larger than its 13-bit size, but the mask will handle the wrap-around so we don't ++ * need to add checks for this condition. E.g. 0x7ff + 1 = 0x800. 0x800 & 0x7ff = 0 which ++ * is the next number in the sequence. ++ */ ++ /** @todo Handle ISOC PIDs and frame numbers used with HS high bandwidth transfers */ ++ /** @todo Might not need to set the last descriptor status. Currently restricting ++ * IN ISOC transfers to the max packet size. ++ */ ++ status |= IPROC_USBD_REG_DMA_STAT_LAST_DESC; ++ ++ ep->dma.frame_num += ep->dma.frame_incr; ++ status |= ((ep->dma.frame_num << IPROC_USBD_REG_DMA_STAT_FRAME_NUM_SHIFT) & IPROC_USBD_REG_DMA_STAT_FRAME_NUM_MASK); ++ } ++ ++ IPROC_USBD_WRITE(dma_desc->buf_addr, ep->dma.buf_addr); ++ status |= (len << IPROC_USBD_REG_DMA_STAT_BYTE_CNT_SHIFT); ++ IPROC_USBD_WRITE(dma_desc->status, status | IPROC_USBD_REG_DMA_STAT_BUF_HOST_READY); ++ wmb(); ++ ep->dma.buf_addr += len; ++ ++ if ((ep->dir == USB_DIR_IN) && (ep->type == USB_ENDPOINT_XFER_ISOC)) { ++ /* With ISOC transfers, only enable one DMA descriptors at a time. ++ */ ++ /** @todo Determine if FIFO will overflow. If it does not, then can remove this check. ++ * This may not even be an issue if the buffer size is restricted to the max packet size ++ * when a request is submitted to the endpoint. ++ */ ++ break; ++ } ++ } ++ /* Set LAST bit on last descriptor we've configured */ ++ if (dma_desc) { ++ IPROC_USBD_BITS_SET(dma_desc->status, IPROC_USBD_REG_DMA_STAT_LAST_DESC); ++ } ++ ++ if (enable_dma) { ++ iproc_usbd_dma_en(udc->usbd_regs); ++ } ++} ++ ++static void iproc_dma_data_rm_done(struct iproc_ep *ep) ++{ ++ struct iproc_udc *udc = ep->udc; ++ volatile struct iproc_udc_dma_desc *dma_desc; ++ uint status; ++ uint len; ++ ++ /* ++ * Will only have one request in the chain at a time. Remove any completed ++ * request segments from the chain so any segments awaiting transfer can ++ * be put in the chain. ++ */ ++ while (!dma_desc_chain_empty(ep)) { ++ /* ++ * Examine the first entry in the chain. If its status is not done, then there's ++ * nothing to remove. ++ */ ++ dma_desc = dma_desc_chain_head(ep); ++ ++ if ((IPROC_USBD_READ(dma_desc->status) & IPROC_USBD_REG_DMA_STAT_BUF_MASK) != IPROC_USBD_REG_DMA_STAT_BUF_DMA_DONE) { ++ break; ++ } ++ ++ /* ++ * The transfer of this request segment has completed. Save the status info and then ++ * take ownership of the descriptor. It is simpler to do this than modifying parts of ++ * the descriptor in order to take ownership. Don't put the descriptor back in the chain ++ * until all info affected by the status has been updated, just to be safe. ++ */ ++ status = IPROC_USBD_READ(dma_desc->status); ++ IPROC_USBD_WRITE(dma_desc->status, IPROC_USBD_REG_DMA_STAT_BUF_HOST_BUSY); ++ wmb(); ++ ++ len = (status & IPROC_USBD_REG_DMA_STAT_NON_ISO_BYTE_CNT_MASK) >> IPROC_USBD_REG_DMA_STAT_NON_ISO_BYTE_CNT_SHIFT; ++ ++ /* RX: For multiple descriptors, len is cumulative, not absolute. ++ * RX: So only adjust the dma fields when we get to the last descriptor ++ * TX: Each descriptor entry is absolute, count them all ++ */ ++ if ((ep->dir == USB_DIR_IN) || (status & IPROC_USBD_REG_DMA_STAT_LAST_DESC)) { ++ ep->dma.done_len += len; ++ ep->dma.usb_req->actual += len; ++ } ++ ++ if ((status & IPROC_USBD_REG_DMA_STAT_RX_MASK) != IPROC_USBD_REG_DMA_STAT_RX_SUCCESS) { ++ ep->dma.status = status & IPROC_USBD_REG_DMA_STAT_RX_MASK; ++ ep->dma.usb_req->status = -EIO; ++ dev_warn(udc->dev, "%s: DMA error: desc=0x%p status=0x%x len=%d add=0x%x remove=0x%x\n", ++ ep->usb_ep.name, dma_desc, status, len, ep->dma.add_idx, ep->dma.rm_idx); ++ } ++ ++ if ((ep->dir == USB_DIR_IN) && (ep->type == USB_ENDPOINT_XFER_ISOC)){ ++ /** @todo Determine if this special processing needs to be done. May not to do this if the ++ * buffer size is restricted to the max packet size when a request is submitted to the endpoint. ++ */ ++ if (ep->dma.usb_req->actual == ep->dma.usb_req->length) { ++ ep->dma.usb_req->status = ENOERROR; ++ } ++ dma_desc_chain_reset(ep); ++ } else { ++ dma_desc_chain_free(ep); ++ } ++ } ++ ++ /* When last segment processed, update status if there has not been an error */ ++ if (!ep->dma.todo_len && (ep->dma.usb_req->status == -EINPROGRESS)) { ++ ep->dma.usb_req->status = ENOERROR; ++ } ++} ++ ++/*************************************************************************** ++ * UDC Operations routines. ++ * iproc_udc_ops_init - Initialization of the UDC in preparation for use by Gadget driver. ++ * iproc_udc_ops_start - Start UDC operations. Happens after a Gadget driver attaches. ++ * iproc_udc_ops_stop - Stop UDC operations. Happens after a Gadget driver detaches. ++ * iproc_udc_ops_finish - Finish / terminate all UDC operations ++ ***************************************************************************/ ++static void iproc_udc_ops_finish(struct iproc_udc *udc) ++{ ++ /* do nothing */ ++ return; ++} ++ ++static void iproc_udc_ops_init(struct iproc_udc *udc) ++{ ++ int idx; ++ struct iproc_ep *ep; ++ ++ iproc_usbd_ops_init(udc->usbd_regs); ++ ++ /* ++ * See usb/gadget/epautoconf.c for endpoint naming conventions. ++ * Control endpoints are bi-directional, but initial transfer (SETUP stage) is always OUT. ++ */ ++ /** @todo Really should make the non endpoint 0 init attributes configurable by the chip specific part ++ * of the driver, idx.e. the device instantiation. The settings below are for a chip specific DWG UDC ++ * core configuration. Also should incorporate the DWG UDC endpoint type attribute as part of this, ++ * which can be control, IN, OUT, or bidirectional. ++ */ ++ INIT_LIST_HEAD(&udc->gadget.ep_list); ++ for (idx = 0; idx < IPROC_UDC_EP_CNT; idx++) { ++ ep = &udc->ep[idx]; ++ ++ ep->udc = udc; ++ ep->num = idx; ++ ++ ep->dir = (xgs_iproc_ep_info[idx].caps.dir_in) ? USB_DIR_IN : USB_DIR_OUT;; ++ ep->beq_addr = idx | ep->dir; ++ ep->stopped = 0; ++ ep->type = xgs_iproc_ep_info[idx].type; ++ ++ ep->usb_ep.name = xgs_iproc_ep_info[idx].name; ++ ep->usb_ep.caps = xgs_iproc_ep_info[idx].caps; ++ ep->usb_ep.ops = &xgs_iproc_udc_ep_ops; ++ list_add_tail(&ep->usb_ep.ep_list, &udc->gadget.ep_list); ++ usb_ep_set_maxpacket_limit(&ep->usb_ep, xgs_iproc_ep_info[idx].msize); ++ ep->usb_ep.desc = NULL; ++ INIT_LIST_HEAD(&ep->list_queue); ++ ++ iproc_usbd_ep_ops_init(udc->usbd_regs, ep->num, ep->type, ep->dir, xgs_iproc_ep_info[idx].msize); ++ ++ iproc_dma_ep_init(ep); ++ } ++ ++ udc->gadget.ep0 = &udc->ep[0].usb_ep; ++ list_del(&udc->ep[0].usb_ep.ep_list); ++ ++ iproc_usbd_self_pwr_en(udc->usbd_regs); ++} ++ ++static void iproc_udc_ops_start(struct iproc_udc *udc) ++{ ++ int idx; ++ ++ /* ++ * Just enable interrupts for now. Endpoint 0 will get enabled once the speed enumeration ++ * has completed. The Device DMA enable is global in scope. There's endpoint specific ++ * DMA enables that will happen later. ++ */ ++ iproc_usbd_irq_en(udc->usbd_regs, (IPROC_USBD_IRQ_SPEED_ENUM_DONE | ++ IPROC_USBD_IRQ_BUS_SUSPEND | ++ IPROC_USBD_IRQ_BUS_IDLE | ++ IPROC_USBD_IRQ_BUS_RESET | ++ IPROC_USBD_IRQ_SET_INTF | ++ IPROC_USBD_IRQ_SET_CFG)); ++ iproc_usbd_dma_en(udc->usbd_regs); ++ ++ /* Enable interrupts for all configured endpoints */ ++ for (idx = 0; idx < IPROC_UDC_EP_CNT; ++idx) { ++ if (udc->ep[idx].usb_ep.name) { ++ iproc_usbd_ep_irq_en(udc->usbd_regs, udc->ep[idx].num, USB_DIR_OUT); ++ iproc_usbd_ep_irq_en(udc->usbd_regs, udc->ep[idx].num, USB_DIR_IN); ++ } ++ } ++ iproc_usbd_nak_response_dis(udc->usbd_regs); ++} ++ ++static void iproc_udc_ops_stop(struct iproc_udc *udc) ++{ ++ struct iproc_ep *ep; ++ ++ iproc_usbd_dma_dis(udc->usbd_regs); ++ iproc_usbd_irq_dis(udc->usbd_regs, IPROC_USBD_IRQ_ALL); ++ iproc_usbd_irq_clear(udc->usbd_regs, IPROC_USBD_IRQ_ALL); ++ ++ udc->gadget.speed = USB_SPEED_UNKNOWN; ++ ++ iproc_udc_req_queue_flush(&udc->ep[0], -ESHUTDOWN); ++ list_for_each_entry(ep, &udc->gadget.ep_list, usb_ep.ep_list) { ++ iproc_udc_req_queue_flush(ep, -ESHUTDOWN); ++ } ++} ++ ++static void iproc_udc_ops_disconnect(struct iproc_udc *udc) ++{ ++ struct iproc_ep *ep; ++ int idx; ++ ++ for (idx = 0; idx < IPROC_UDC_EP_CNT; idx++) { ++ ep = &udc->ep[idx]; ++ ++ if (ep->dma.usb_req) { ++ /* Flush DMA, reqeust still pending */ ++ iproc_usbd_ep_fifo_flush_en(udc->usbd_regs, 0, IPROC_USBD_EP_DIR_IN); ++ iproc_usbd_ep_fifo_flush_dis(udc->usbd_regs, 0, IPROC_USBD_EP_DIR_IN); ++ ++ iproc_udc_req_xfer_process(ep); ++ } ++ } ++} ++ ++static void iproc_udc_ops_shutdown(struct iproc_udc *udc) ++{ ++ struct iproc_ep *ep; ++ ++ udc->ep[0].desc = NULL; ++ list_for_each_entry(ep, &udc->gadget.ep_list, usb_ep.ep_list) { ++ ep->desc = NULL; ++ } ++ ++ udc->gadget.dev.driver = NULL; ++ udc->gadget_driver = NULL; ++} ++ ++ ++/**************************************************************************** ++ * Control Endpoint SETUP related routines. ++ * ++ * iproc_ep_setup_init - Prepares for next SETUP Rx. Status indicates if STALL req'd. ++ * iproc_ep_setup_process - Handle Rx of a SETUP. ++ ***************************************************************************/ ++static void iproc_ep_setup_init(struct iproc_ep *ep, int status) ++{ ++ struct iproc_udc *udc = ep->udc; ++ ++ /* Re-enable transfers to the SETUP buffer, clear IN and OUT NAKs, and re-enable OUT interrupts. */ ++ ep->dma.vir_addr->setup.status = cpu_to_le32(IPROC_USBD_REG_DMA_STAT_BUF_HOST_READY); ++ ep->dir = USB_DIR_OUT; ++ ep->stopped = 0; ++ ++ if (status == ENOERROR) { ++ /* Handling of previous SETUP was OK. Just clear any NAKs. */ ++ ++ iproc_usbd_ep_nak_clear(udc->usbd_regs, ep->num, USB_DIR_OUT); ++ iproc_usbd_ep_nak_clear(udc->usbd_regs, ep->num, USB_DIR_IN); ++ } else { ++ /* ++ * Handling of previous SETUP failed. Set the STALL. This will get cleared ++ * when the next SETUP is rx'd. ++ */ ++ iproc_usbd_ep_stall_en(udc->usbd_regs, ep->num, USB_DIR_IN); ++ iproc_usbd_ep_stall_en(udc->usbd_regs, ep->num, USB_DIR_OUT); ++ } ++ ++ iproc_usbd_ep_irq_en(udc->usbd_regs, ep->num, USB_DIR_OUT); ++ iproc_usbd_ep_dma_en(udc->usbd_regs, ep->num, USB_DIR_OUT); ++} ++ ++void iproc_ep_setup_process(struct iproc_ep *ep, struct usb_ctrlrequest *setup) ++{ ++ struct iproc_udc *udc = ep->udc; ++ uint value; ++ uint index; ++ uint length; ++ int status; ++ ++ value = le16_to_cpu(setup->wValue); ++ index = le16_to_cpu(setup->wIndex); ++ length = le16_to_cpu(setup->wLength); ++ ++ /* ++ * Any SETUP packets appearing here need to be handled by the gadget driver. Some SETUPs may have ++ * already been silently handled and acknowledged by the DWC UDC. The exceptions to this rule are the ++ * USB_REQ_SET_CONFIGURATION and USB_REQ_SET_INTERFACE, which have been only partially handled with ++ * the expectation that some additional software processing is required in order to complete these requests. ++ * Thus, they have not been acknowledged by the DWC UDC. There is no DATA stage for these requests. ++ */ ++ ++ /* ++ * Set the direction of the subsequent DATA stage of a control transfer. This is an ++ * optional stage. It may not exist for all control transfers. If there is a DATA ++ * stage, this info is used for DMA operations for any requests received from the ++ * Gadget driver. ++ */ ++ ++ ep->dir = setup->bRequestType & USB_ENDPOINT_DIR_MASK; ++ ++ if (ep->num != 0) { ++ /** @todo Make changes here if the Linux USB gadget ever supports a control endpoint other ++ * than endpoint 0. The DWC UDC supports multiple control endpoints, and this driver has ++ * been written with this in mind. To make things work, really need to change the Gadget ++ * setup() callback parameters to provide an endpoint context, or add something similar ++ * to the usb_ep structure, or possibly use a usb_request to hold a setup data packet. ++ */ ++ ++ dev_err(udc->dev, "%s: control transfer not supported\n", ep->usb_ep.name); ++ status = -EOPNOTSUPP; ++ } else { ++ /* ++ * Forward the SETUP to the gadget driver for processing. The appropriate directional ++ * interrupt and NAK clear will happen when the DATA stage request is queued. ++ */ ++ spin_unlock(&udc->lock); ++ status = udc->gadget_driver->setup(&udc->gadget, setup); ++ spin_lock(&udc->lock); ++ } ++ ++ if (status < 0) { ++ /* ++ * Error occurred during the processing of the SETUP, so enable STALL. This condition ++ * can only be cleared with the RX of another SETUP, so prepare for that event. ++ */ ++ dev_err(udc->dev, "%s: SETUP %02x.%02x STALL; status=%d\n", ++ ep->usb_ep.name, setup->bRequestType, setup->bRequest, status); ++ ++ iproc_ep_setup_init(ep, status); ++ } else if (length == 0) { ++ /* No DATA stage. Just need to prepare for the next SETUP. */ ++ iproc_ep_setup_init(ep, ENOERROR); ++ } else { ++ /* ++ * The SETUP stage processing has completed OK, and there may or may not be a request queued ++ * for the DATA stage. When the DATA stage completes, preparation for the RX of the next ++ * SETUP will be done. ++ */ ++ } ++} ++ ++ ++/**************************************************************************** ++ * IRQ routines. ++ * ++ * xgs_iproc_udc_isr - top level entry point. ++ * iproc_cfg_isr - device (endpoint 0) set config interrupt handler ++ * iproc_inf_isr - device (endpoint 0) set interface interrupt handler ++ * iproc_speed_isr - device speed enumeration done interrupt handler ++ * iproc_ep_in_isr - top level IN endpoint related interrupt handler ++ * iproc_ep_out_isr - top level OUT endpoint related interrupt handler ++ * iproc_ep_out_setup_isr - Control endpoint SETUP Rx handler. This may get ++ * called directly as the result of an endpoint OUT interrupt, or ++ * indirectly as the result of device SET_CFG or SET_INTF. ++ ***************************************************************************/ ++static void iproc_cfg_isr(struct iproc_udc *udc) ++{ ++ struct usb_ctrlrequest setup; ++ int idx; ++ u16 cfg; ++ ++ /* ++ * Device Configuration SETUP has been received. This is not placed in the SETUP ++ * DMA buffer. The packet has to be re-created here so it can be forwarded to the ++ * gadget driver to act upon. ++ */ ++ ++ cfg = (u16) iproc_usbd_cfg_num(udc->usbd_regs); ++ ++ setup.bRequestType = USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE; ++ setup.bRequest = USB_REQ_SET_CONFIGURATION; ++ setup.wValue = cpu_to_le16(cfg); ++ setup.wIndex = 0; ++ setup.wLength = 0; ++ ++ /* ++ * Setting the configuration number before the gadget responds is a bit presumptious, but should ++ * not be fatal. ++ */ ++ /** @todo Do not set endpoint 0? Or is it a don't care? */ ++ for (idx = 0; idx < IPROC_UDC_EP_CNT; idx++) { ++ iproc_usbd_ep_cfg_set(udc->usbd_regs, idx, cfg); ++ } ++ ++ printk(KERN_INFO "SET CFG=%d\n", cfg); ++ ++ iproc_ep_setup_process(&udc->ep[0], &setup); ++ iproc_usbd_setup_done(udc->usbd_regs); ++} ++ ++static void iproc_inf_isr(struct iproc_udc *udc) ++{ ++ struct usb_ctrlrequest setup; ++ uint idx; ++ u16 intf; ++ u16 alt; ++ ++ /* ++ * Device Interface SETUP has been received. This is not placed in the SETUP ++ * DMA buffer. The packet has to be re-created here so it can be forwarded to the ++ * gadget driver to act upon. ++ */ ++ intf = (u16)iproc_usbd_intf_num(udc->usbd_regs); ++ alt = (u16)iproc_usbd_alt_num(udc->usbd_regs); ++ ++ setup.bRequestType = USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_INTERFACE; ++ setup.bRequest = USB_REQ_SET_INTERFACE; ++ setup.wValue = cpu_to_le16(alt); ++ setup.wIndex = cpu_to_le16(intf); ++ setup.wLength = 0; ++ ++ /* ++ * Setting the interface numbers before the gadget responds is a bit presumptious, but should ++ * not be fatal. ++ */ ++ for (idx = 0; idx < IPROC_UDC_EP_CNT; idx++) { ++ iproc_usbd_ep_alt_set(udc->usbd_regs, idx, alt); ++ iproc_usbd_ep_intf_set(udc->usbd_regs, idx, intf); ++ } ++ ++ iproc_ep_setup_process(&udc->ep[0], &setup); ++ iproc_usbd_setup_done(udc->usbd_regs); ++} ++ ++static void iproc_speed_isr(struct iproc_udc *udc) ++{ ++ uint speed; ++ ++ speed = udc->gadget.speed; ++ ++ switch(iproc_usbd_speed_get(udc->usbd_regs)) { ++ case IPROC_USBD_SPEED_HIGH: ++ printk(KERN_INFO "HIGH SPEED\n"); ++ udc->gadget.speed = USB_SPEED_HIGH; ++ break; ++ case IPROC_USBD_SPEED_FULL: ++ printk(KERN_INFO "FULL SPEED\n"); ++ udc->gadget.speed = USB_SPEED_FULL; ++ break; ++ case IPROC_USBD_SPEED_LOW: ++ dev_warn(udc->dev, "low speed not supported\n"); ++ udc->gadget.speed = USB_SPEED_LOW; ++ break; ++ default: ++ dev_err(udc->dev, "unknown speed=0x%x\n", iproc_usbd_speed_get(udc->usbd_regs)); ++ break; ++ } ++ ++ if ((speed == USB_SPEED_UNKNOWN) && (udc->gadget.speed != USB_SPEED_UNKNOWN)) { ++ /* ++ * Speed has not been enumerated before, so now we can initialize transfers on endpoint 0. ++ * Also have to disable the NAKs at a global level, which has been in place while waiting ++ * for enumeration to complete. ++ */ ++ iproc_ep_setup_init(&udc->ep[0], ENOERROR); ++ iproc_usbd_nak_response_dis(udc->usbd_regs); ++ } ++} ++ ++static void iproc_ep_in_isr(struct iproc_ep *ep) ++{ ++ struct iproc_udc *udc = ep->udc; ++ uint status; ++ ++ status = iproc_usbd_ep_stat_active(udc->usbd_regs, ep->num, USB_DIR_IN); ++ iproc_usbd_ep_stat_clear(udc->usbd_regs, ep->num, USB_DIR_IN, status); ++ ++ if (!status) { ++ return; ++ } ++ ++ /** @todo check might only be for direction... */ ++ if ((ep->dir != USB_DIR_IN) && (ep->type != USB_ENDPOINT_XFER_CONTROL)) { ++ dev_err(udc->dev, "%s: unexpected IN interrupt\n", ep->usb_ep.name); ++ return; ++ } ++ ++ if (ep->dir != USB_DIR_IN) { ++ /* This probably should not be happening */ ++ dev_warn(udc->dev, "%s: CTRL dir OUT\n", ep->usb_ep.name); ++ } ++ ++ if ((ep->type == USB_ENDPOINT_XFER_ISOC) && ++ (status & (IPROC_USBD_EP_STAT_IN_XFER_DONE | IPROC_USBD_EP_STAT_DMA_BUF_UNAVAIL))) { ++ dev_warn(udc->dev, "%s: ISOC IN unexpected status=0x%x\n", ep->usb_ep.name, status); ++ } ++ ++ if (status & IPROC_USBD_EP_STAT_IN_TOKEN_RX) { ++ /* ++ * If there's any IN requests, the DMA should be setup and ready to go if ++ * the endpoint is not an ISOC. Nothing to do in this case. However, if ++ * this is an ISOC endpoint, then this interrupt implies there was no ++ * data available for this frame number. This will happen if the gadget ++ * does not have any data queued to send in this frame, or we have been ++ * waiting for this event to occur so we can get alignment with the host ++ * for the interval. This alignment is necessary when the interval is ++ * greater than one frame / uframe. E.g. for an audio stream sending ++ * samples @ 5ms intervals on a FS link, this corresponds to a period ++ * of 5 frames. Samples with be queued for every 5th frame number after ++ * the frame number in which this interrupt occurred. ++ */ ++ status &= ~IPROC_USBD_EP_STAT_IN_TOKEN_RX; ++ iproc_usbd_ep_nak_clear(udc->usbd_regs, ep->num, USB_DIR_IN); ++ ++ if ((ep->type == USB_ENDPOINT_XFER_ISOC)) { ++ /* Always align to the current frame number for subsequent transfers. */ ++ ep->dma.frame_num = iproc_usbd_last_rx_frame_num(udc->usbd_regs); ++ if (ep->dma.usb_req != NULL) { ++ /* ++ * Might have something queued when waiting for alignment. If something is queued, ++ * it is already too late for the current transfer point. It will also have been ++ * placed in the queue at some point before this interrupt, and it will be stale ++ * if we try to transmit at the next transfer point. ++ */ ++ ep->dma.usb_req->status = -EREMOTEIO; ++ iproc_udc_req_xfer_process(ep); ++ } ++ } ++ } ++ ++ if (status & IPROC_USBD_EP_STAT_IN_DMA_DONE) { ++ /* ++ * DMA has completed, but cannot start next transfer until IPROC_USBD_EP_STAT_IN_XFER_DONE. ++ * To avoid race conditions and other issues, do not release the current transfer until both ++ * interrupts have arrived. Normally this interrupt will arrive at or before the IN_XFER_DONE, ++ * but there have been situations when the system is under load that this interrupt might ++ * arrive after the IN_XFER_DONE, in which case we will need to do the processing now. ++ * The exception to this rule is for ISOC endpoints. They will only get this interrupt to ++ * indicate that DMA has completed. ++ */ ++ status &= ~IPROC_USBD_EP_STAT_IN_DMA_DONE; ++ ++ if ((ep->type == USB_ENDPOINT_XFER_ISOC)) { ++ iproc_udc_req_xfer_process(ep); ++ } else if (ep->dma.done & IPROC_USBD_EP_STAT_IN_XFER_DONE) { ++ /* ++ * Did not receive the IN_DMA_DONE interrupt for this request before or ++ * at the same time as the IN_XFER_DONE interrupt, so the request ++ * processing was postponed until the IN_DMA_DONE interrupt arrived. ++ * See handling of IN_XFER_DONE status below. ++ */ ++ iproc_udc_req_xfer_process(ep); ++ } else { ++ /* ++ * IN_DMA_DONE received. Save this info so request processing will be ++ * done when the IN_XFER_DONE interrupt is received. This may happen ++ * immediately, idx.e. both IN_DMA_DONE and IN_XFER_DONE status are ++ * set when the interrupt processing takes place. ++ */ ++ ep->dma.done = IPROC_USBD_EP_STAT_IN_DMA_DONE; ++ } ++ } ++ ++ if (status & IPROC_USBD_EP_STAT_IN_XFER_DONE) { ++ status &= ~(IPROC_USBD_EP_STAT_IN_XFER_DONE); ++ status &= ~(IPROC_USBD_EP_STAT_IN_FIFO_EMPTY); ++ ++ if (ep->dma.done & IPROC_USBD_EP_STAT_IN_DMA_DONE) { ++ /* ++ * Have received both the IN_DMA_DONE and IN_XFER_DONE interrupts ++ * for this request. OK to process the request (remove the request ++ * and start the next one). ++ */ ++ iproc_udc_req_xfer_process(ep); ++ } else { ++ /* ++ * Have not received the IN_DMA_DONE interrupt for this request. ++ * Need to postpone processing of the request until the IN_DMA_DONE ++ * interrupt occurs. See handling of IN_DMA_DONE status above. ++ */ ++ ep->dma.done = IPROC_USBD_EP_STAT_IN_XFER_DONE; ++ } ++ } ++ ++ /* Clear the FIFO EMPTY bit, not to print error message */ ++ status &= ~(IPROC_USBD_EP_STAT_IN_FIFO_EMPTY); ++ ++ if (status & IPROC_USBD_EP_STAT_DMA_BUF_UNAVAIL) { ++ dev_err(udc->dev, "%s: DMA BUF NOT AVAIL\n", ep->usb_ep.name); ++ status &= ~(IPROC_USBD_EP_STAT_DMA_BUF_UNAVAIL); ++ iproc_udc_req_xfer_process(ep); ++ } ++ ++ if (status & IPROC_USBD_EP_STAT_DMA_ERROR) { ++ status &= ~IPROC_USBD_EP_STAT_DMA_ERROR; ++ dev_err(udc->dev, "%s: DMA ERROR\n", ep->usb_ep.name); ++ iproc_udc_req_xfer_error(ep, -EIO); ++ } ++ ++ if (status) { ++ dev_err(udc->dev, "exit: %s %s: unknown status=0x%x\n", __func__, ep->usb_ep.name, status); ++ } ++} ++ ++static void iproc_ep_out_setup_isr(struct iproc_ep *ep) ++{ ++ struct iproc_udc *udc = ep->udc; ++ struct iproc_udc_dma_setup *dma; ++ ++ dma = &ep->dma.vir_addr->setup; ++ if ((IPROC_USBD_READ(dma->status) & IPROC_USBD_REG_DMA_STAT_BUF_MASK) != IPROC_USBD_REG_DMA_STAT_BUF_DMA_DONE) { ++ dev_err(udc->dev, "%s: unexpected DMA buf status=0x%x\n", ep->usb_ep.name, (IPROC_USBD_READ(dma->status) & IPROC_USBD_REG_DMA_STAT_BUF_MASK)); ++ iproc_ep_setup_init(ep, ENOERROR); ++ } else if ((IPROC_USBD_READ(dma->status) & IPROC_USBD_REG_DMA_STAT_RX_MASK) != IPROC_USBD_REG_DMA_STAT_RX_SUCCESS) { ++ dev_err(udc->dev, "%s: unexpected DMA rx status=0x%x\n", ep->usb_ep.name, (IPROC_USBD_READ(dma->status) & IPROC_USBD_REG_DMA_STAT_RX_MASK)); ++ iproc_ep_setup_init(ep, ENOERROR); ++ } else { ++ if (ep->num != 0) { ++ /** @todo Handle the cfg / intf / alt fields of the DMA status. This will only be any issue ++ * once the Linux Gadget driver framework supports control transfers on an endpoint other ++ * than 0. ++ */ ++ dev_warn(udc->dev, "%s: CTRL xfr support not complete\n", ep->usb_ep.name); ++ } ++ /* ++ * Take ownership of the descriptor while processing the request. Ownership will be released ++ * when ready to Rx SETUP again. ++ */ ++ IPROC_USBD_BITS_MODIFY(dma->status, IPROC_USBD_REG_DMA_STAT_BUF_MASK, IPROC_USBD_REG_DMA_STAT_BUF_HOST_BUSY); ++ iproc_ep_setup_process(ep, (struct usb_ctrlrequest *)&dma->data1); ++ } ++} ++ ++static void iproc_ep_out_isr(struct iproc_ep *ep) ++{ ++ struct iproc_udc *udc = ep->udc; ++ uint status; ++ ++ status = iproc_usbd_ep_stat_active(udc->usbd_regs, ep->num, USB_DIR_OUT); ++ iproc_usbd_ep_stat_clear(udc->usbd_regs, ep->num, USB_DIR_OUT, status); ++ ++ /* ++ * Remove the Rx packet size field from the status. The datasheet states this field is not used ++ * in DMA mode, but that is not true. ++ */ ++ status &= IPROC_USBD_EP_STAT_ALL; ++ ++ if (!status) { ++ return; ++ } ++ ++ if ((ep->dir != USB_DIR_OUT) && (ep->type != USB_ENDPOINT_XFER_CONTROL)) { ++ dev_err(udc->dev, "%s: unexpected OUT interrupt\n", ep->usb_ep.name); ++ return; ++ } ++ ++ if (ep->dir != USB_DIR_OUT) { ++ /* This probably should not be happening */ ++ dev_err(udc->dev, "%s: CTRL dir IN\n", ep->usb_ep.name); ++ } ++ ++ if (status & IPROC_USBD_EP_STAT_OUT_DMA_DATA_DONE) { ++ status &= ~IPROC_USBD_EP_STAT_OUT_DMA_DATA_DONE; ++ iproc_udc_req_xfer_process(ep); ++ } ++ ++ if (status & IPROC_USBD_EP_STAT_OUT_DMA_SETUP_DONE) { ++ status &= ~IPROC_USBD_EP_STAT_OUT_DMA_SETUP_DONE; ++ iproc_ep_out_setup_isr(ep); ++ } ++ ++ if (status & IPROC_USBD_EP_STAT_DMA_BUF_UNAVAIL) { ++ /** @todo Verify under what situations this can happen. Should be when chain has emptied but last desc not reached */ ++ /** @todo status for desc updates */ ++ ++ status &= ~IPROC_USBD_EP_STAT_DMA_BUF_UNAVAIL; ++ dev_err(udc->dev, "%s: DMA BUF NOT AVAIL\n", ep->usb_ep.name); ++ iproc_udc_req_xfer_process(ep); ++ } ++ ++ if (status & IPROC_USBD_EP_STAT_DMA_ERROR) { ++ status &= ~IPROC_USBD_EP_STAT_DMA_ERROR; ++ dev_err(udc->dev, "%s: DMA ERROR\n", ep->usb_ep.name); ++ /** @todo merge XferError and XferProcess?? */ ++ iproc_udc_req_xfer_error(ep, -EIO); ++ } ++ ++ if (status) { ++ dev_err(udc->dev, "%s: unknown status=0x%x\n", ep->usb_ep.name, status); ++ } ++} ++ ++irqreturn_t xgs_iproc_udc_isr(int irq, void *context) ++{ ++ struct iproc_udc *udc = NULL; ++ ulong flags; ++ uint stat, epin_stat, epout_stat; ++ int idx; ++ ++ udc = (struct iproc_udc *)context; ++ ++ spin_lock_irqsave(&udc->lock, flags); ++ ++ if (!udc || !udc->gadget_driver) { ++ dev_err(udc->dev, "Invalid context or no driver registered: irq dev=0x%x\n", iproc_usbd_irq_active(udc->usbd_regs)); ++ ++ iproc_usbd_irq_clear(udc->usbd_regs, IPROC_USBD_IRQ_ALL); ++ iproc_usbd_ep_irq_list_clear(udc->usbd_regs, USB_DIR_IN, ~0); ++ iproc_usbd_ep_irq_list_clear(udc->usbd_regs, USB_DIR_OUT, ~0); ++ ++ spin_unlock_irqrestore(&udc->lock, flags); ++ return IRQ_HANDLED; ++ } ++ ++ stat = iproc_usbd_irq_active(udc->usbd_regs); ++ epin_stat = iproc_usbd_ep_irq_list_active(udc->usbd_regs, USB_DIR_IN); ++ epout_stat = iproc_usbd_ep_irq_list_active(udc->usbd_regs, USB_DIR_OUT); ++ ++ if (!(stat || epin_stat || epout_stat)) { ++ return IRQ_NONE; ++ } ++ ++ iproc_usbd_irq_clear(udc->usbd_regs, stat); ++ iproc_usbd_ep_irq_list_clear(udc->usbd_regs, USB_DIR_IN, epin_stat); ++ iproc_usbd_ep_irq_list_clear(udc->usbd_regs, USB_DIR_OUT, epout_stat); ++ ++ /* ++ * Handle the SET_CFG and SET_INTF interrupts after the endpoint and other device interrupts. ++ * There can be some race conditions where we have an endpoint 0 interrupt pending for the ++ * completion of a previous endpoint 0 transfer (e.g. a GET config) when a SETUP arrives ++ * corresponding to the SET_CFG and SET_INTF. Need to complete the processing of the previous ++ * transfer before handling the next one, idx.e. the SET_CFG or SET_INTF. ++ */ ++ if (stat & IPROC_USBD_IRQ_BUS_RESET) { ++ printk(KERN_INFO "BUS reset\n"); ++ } ++ if (stat & IPROC_USBD_IRQ_BUS_SUSPEND) { ++ dev_dbg(udc->dev, "BUS suspend\n"); ++ } ++ if (stat & IPROC_USBD_IRQ_BUS_IDLE) { ++ dev_dbg(udc->dev, "BUS idle\n"); ++ iproc_udc_ops_disconnect(udc); ++ } ++ if (stat & IPROC_USBD_IRQ_SPEED_ENUM_DONE) { ++ dev_dbg(udc->dev, "BUS speed enum done\n"); ++ iproc_speed_isr(udc); ++ } ++ ++ /* endpoint interrupts handler */ ++ for (idx = 0; idx < IPROC_UDC_EP_CNT; idx++) { ++ if (epin_stat & (1 << idx)) { ++ iproc_ep_in_isr(&udc->ep[idx]); ++ } ++ if (epout_stat & (1 << idx)) { ++ iproc_ep_out_isr(&udc->ep[idx]); ++ } ++ } ++ ++ /* SET_CFG and SET_INTF interrupts handler */ ++ if (stat & IPROC_USBD_IRQ_SET_CFG) { ++ iproc_cfg_isr(udc); ++ } ++ if (stat & IPROC_USBD_IRQ_SET_INTF) { ++ iproc_inf_isr(udc); ++ } ++ ++ spin_unlock_irqrestore(&udc->lock, flags); ++ ++ return IRQ_HANDLED; ++} ++ ++/*************************************************************************** ++* Endpoint request operations ++***************************************************************************/ ++static void iproc_udc_req_queue_flush(struct iproc_ep *ep, int status) ++{ ++ struct iproc_udc *udc = ep->udc; ++ struct iproc_ep_req *req; ++ ++ ep->stopped = 1; ++ iproc_usbd_ep_ops_finish(udc->usbd_regs, ep->num); ++ ++ while (!list_empty(&ep->list_queue)) { ++ req = list_first_entry(&ep->list_queue, struct iproc_ep_req, list_node); ++ iproc_udc_req_xfer_done(ep, req, status); ++ } ++ ep->dma.usb_req = NULL; ++} ++ ++ ++static void iproc_udc_req_xfer_add(struct iproc_ep *ep, struct iproc_ep_req *req) ++{ ++ struct iproc_udc *udc = ep->udc; ++ list_add_tail(&req->list_node, &ep->list_queue); ++ ++ /** @todo Is this necessary?? Stopped happens as a result of a halt, complete(), dequeue(), nuke(). ++ * nuke() is called when ep disabled, during setup processing, and by udc_queisce(). The latter is ++ * called during vbus state change (cable insert/remove), USB reset interrupt, and gadget deregister. ++ */ ++ if (ep->stopped) { ++ return; ++ } ++ ++ if ((ep->dir == USB_DIR_IN) && (ep->type == USB_ENDPOINT_XFER_ISOC) && ep->dma.usb_req && (ep->dma.frame_num == FRAME_NUM_INVALID)) { ++ /* ++ * Gadget has a request already queued, but still have not received an IN token from the host ++ * and the interval window is not aligned. Queued packet is now very stale, so remove it. ++ */ ++ ++ iproc_dma_data_finish(ep); ++ /** @todo Move set of ep->dma.usb_req to iproc_dma_data_init() and iproc_dma_data_finish() routines. */ ++ ep->dma.usb_req = NULL; ++ iproc_udc_req_xfer_done(ep, list_first_entry(&ep->list_queue, struct iproc_ep_req, list_node), -EREMOTEIO); ++ } ++ ++ /** @todo Current transfer is always the queue head. Do we need a separate pointer? Maybe just a pointer to usb_request ++ * need to know if the queue head has already been loaded. Maybe that's the point of the "stopped". ++ */ ++ if (!ep->dma.usb_req) { ++ if ((ep->dir == USB_DIR_IN) && (ep->type == USB_ENDPOINT_XFER_ISOC) && ++ (ep->dma.frame_num == FRAME_NUM_INVALID)) { ++ /* ++ * Delay any ISOC IN DMA operations until it is known what frame number the host ++ * is going to start transfers with. Normally might just return requests until ++ * this event occurs. However, the zero gadget does not submit requests based on ++ * its own timer or similar, so if the request is returned right away things are ++ * going to thrash, as another request will be immediately submitted. ++ */ ++ ep->dma.usb_req = &(list_first_entry(&ep->list_queue, struct iproc_ep_req, list_node))->usb_req; ++ iproc_dma_data_init(ep); ++ iproc_usbd_ep_nak_clear(udc->usbd_regs, ep->num, ep->dir); ++ iproc_usbd_ep_irq_en(udc->usbd_regs, ep->num, ep->dir); ++ } else { ++ req = list_first_entry(&ep->list_queue, struct iproc_ep_req, list_node); ++ ep->dma.usb_req = &req->usb_req; ++ iproc_dma_data_init(ep); ++ iproc_dma_data_add_ready(ep); ++ iproc_usbd_ep_nak_clear(udc->usbd_regs, ep->num, ep->dir); ++ iproc_usbd_ep_dma_en(udc->usbd_regs, ep->num, ep->dir); ++ ++ /* needed for gadget commands to complete correctly - possible locking issue */ ++ mdelay(3); ++ } ++ } ++} ++ ++static void iproc_udc_req_xfer_done(struct iproc_ep *ep, struct iproc_ep_req *req, int status) ++{ ++ struct iproc_udc *udc = ep->udc; ++ uint stopped; ++ ++ list_del_init(&req->list_node); ++ ++ if (req->usb_req.status == -EINPROGRESS) { ++ req->usb_req.status = status; ++ } ++ ++ if (req->dma_aligned) { ++ req->dma_aligned = 0; ++ } else if (req->dma_mapped) { ++ /* ++ * A physical address was not provided for the DMA buffer. Release any resources ++ * that were requested by the driver. ++ */ ++ dma_unmap_single(udc->gadget.dev.parent, req->usb_req.dma, req->usb_req.length, ++ (ep->dir == USB_DIR_IN ? DMA_TO_DEVICE : DMA_FROM_DEVICE)); ++ ++ req->dma_mapped = 0; ++ req->usb_req.dma = DMA_ADDR_INVALID; ++ } ++ ++ /* ++ * Disable DMA operations during completion callback. The callback may cause requests to be ++ * added to the queue, but we don't want to change the state of the queue head. ++ */ ++ stopped = ep->stopped; ++ ep->stopped = 1; ++ spin_unlock(&udc->lock); ++ req->usb_req.complete(&ep->usb_ep, &req->usb_req); ++ spin_lock(&udc->lock); ++ ep->stopped = stopped; ++} ++ ++static void iproc_udc_req_xfer_error(struct iproc_ep *ep, int status) ++{ ++ struct iproc_udc *udc = ep->udc; ++ ++ if (!ep->dma.usb_req) { ++ dev_err(udc->dev, "%s: No request being transferred\n", ep->usb_ep.name); ++ return; ++ } ++ ++ /** @todo abort current DMA, start next transfer if there is one. */ ++ ep->dma.usb_req->status = status; ++ iproc_udc_req_xfer_process(ep); ++} ++ ++static void iproc_udc_req_xfer_process(struct iproc_ep *ep) ++{ ++ struct iproc_udc *udc = ep->udc; ++ struct iproc_ep_req *req; ++ ++ /** @todo Current transfer is always the queue head. Do we need a separate pointer? Maybe just a pointer to usb_request */ ++ if (!ep->dma.usb_req) { ++ dev_err(udc->dev, "%s: No request being transferred\n", ep->usb_ep.name); ++ return; ++ } ++ ++ iproc_usbd_ep_dma_dis(udc->usbd_regs, ep->num, ep->dir); ++ iproc_dma_data_rm_done(ep); ++ ++ if (ep->dma.usb_req->status != -EINPROGRESS) { ++ /* ++ * Current transfer stage has finished. This may or may not be with error. ++ * Complete the transfer as needed before starting the next one, if any. ++ */ ++ iproc_dma_data_finish(ep); ++ ++ if ((ep->type == USB_ENDPOINT_XFER_CONTROL) && (ep->dir == USB_DIR_IN) && (ep->dma.usb_req->status == ENOERROR)) { ++ /* ++ * For the status phase of control IN transfers, the hardware requires that an OUT DMA transfer ++ * actually takes place. This should be just an OUT ZLP, and we will re-use the IN buffer that ++ * just completed transfer for this purpose. There should be no harm in doing this, even if the ++ * OUT status is more than a ZLP. ++ */ ++ ep->dir = USB_DIR_OUT; ++ iproc_dma_data_init(ep); ++ } else { ++ /* ++ * All transfer stages have completed. Return the request to the gadget driver, and then ++ * setup for the next transfer. ++ */ ++ iproc_udc_req_xfer_done(ep, list_first_entry(&ep->list_queue, struct iproc_ep_req, list_node), ENOERROR); ++ ++ if (ep->type == USB_ENDPOINT_XFER_CONTROL) { ++ iproc_ep_setup_init(ep, ENOERROR); ++ } ++ ++ if (list_empty(&ep->list_queue)) { ++ /** @todo Probably should more closely bind this to iproc_dma_data_finish. */ ++ ep->dma.usb_req = NULL; ++ } else { ++ req = list_first_entry(&ep->list_queue, struct iproc_ep_req, list_node); ++ ep->dma.usb_req = &req->usb_req; ++ iproc_dma_data_init(ep); ++ } ++ } ++ } ++ ++ if (ep->dma.usb_req != NULL) { ++ iproc_dma_data_add_ready(ep); ++ iproc_usbd_ep_dma_en(udc->usbd_regs, ep->num, ep->dir); ++ iproc_usbd_ep_nak_clear(udc->usbd_regs, ep->num, ep->dir); ++ } ++} ++ ++ ++/*************************************************************************** ++ * Linux proc file system functions ++ ***************************************************************************/ ++#ifdef CONFIG_USB_GADGET_DEBUG_FILES ++#include ++ ++static const char udc_proc_file_name[] = "driver/" XGS_IPROC_UDC_NAME; ++ ++static int proc_file_show(struct seq_file *s, void *_) ++{ ++ return(0); ++} ++ ++static int proc_file_open(struct inode *inode, struct file *file) ++{ ++ return(single_open(file, proc_file_show, NULL)); ++} ++ ++static struct file_operations udc_proc_file_ops = ++{ ++ .open = proc_file_open, ++ .read = seq_read, ++ .llseek = seq_lseek, ++ .release = single_release, ++}; ++ ++static void xgs_iproc_udc_proc_create(void) ++{ ++ struct proc_dir_entry *pde; ++ ++ pde = create_proc_entry (udc_proc_file_name, 0, NULL); ++ if (pde) { ++ pde->proc_fops = &udc_proc_file_ops; ++ } ++} ++ ++static void xgs_iproc_udc_proc_remove(void) ++{ ++ remove_proc_entry(udc_proc_file_name, NULL); ++} ++ ++#else ++ ++static void xgs_iproc_udc_proc_create(void) {} ++static void xgs_iproc_udc_proc_remove(void) {} ++ ++#endif ++ ++static const struct of_device_id xgs_iproc_udc_ids[] = { ++ { .compatible = "brcm,usbd,hx4", }, ++ { .compatible = "brcm,usbd,kt2", }, ++ { .compatible = "brcm,usbd,gh", }, ++ { .compatible = "brcm,usbd,sb2", }, ++ { .compatible = "brcm,usbd,hr3", }, ++ { .compatible = "brcm,usbd,gh2", }, ++ { } ++}; ++MODULE_DEVICE_TABLE(of, xgs_iproc_udc_ids); ++ ++/**************************************************************************** ++ ***************************************************************************/ ++static int xgs_iproc_udc_probe(struct platform_device *pdev) ++{ ++ int ret = ENOERROR; ++ struct device *dev = &pdev->dev; ++ struct device_node *dn = dev->of_node; ++ const struct of_device_id *match; ++ struct iproc_udc *udc = NULL; ++ struct usb_phy *phy; ++ int irq; ++ ++ phy = devm_usb_get_phy_by_phandle(dev, "usb-phy", 0); ++ if (IS_ERR(phy)) { ++ dev_err(dev, "unable to find transceiver\n"); ++ return PTR_ERR(phy); ++ } ++ ++ if (phy->flags != IPROC_USB_MODE_DEVICE) ++ return -ENODEV; ++ ++ match = of_match_device(xgs_iproc_udc_ids, dev); ++ if (!match) { ++ dev_err(dev, "failed to find USBD in DT\n"); ++ return -ENODEV; ++ } ++ ++ irq = (uint)irq_of_parse_and_map(dn, 0); ++ ++ udc = devm_kzalloc(dev, sizeof(*udc), GFP_KERNEL); ++ if (!udc) { ++ dev_err(dev, "devm_kzalloc() failed\n" ); ++ ret = -ENOMEM; ++ goto err1; ++ } ++ platform_set_drvdata(pdev, udc); ++ ++ udc->dev = dev; ++ spin_lock_init(&udc->lock); ++ ++ udc->usbd_regs = (struct iproc_usbd_regs *)of_iomap(dn, 0); ++ if (!udc->usbd_regs) { ++ dev_err(dev, "unable to iomap USB2D base address\n"); ++ ret = -ENXIO; ++ goto err1; ++ } ++ ++ ret = usb_phy_init(phy); ++ if (ret < 0) { ++ dev_err(dev, "initial usb transceiver failed.\n"); ++ goto err1; ++ } ++ ++ ret = iproc_platform_dma_alloc(pdev, udc); ++ if (ret < 0) { ++ dev_err(dev, "iproc_platform_dma_alloc() failed\n"); ++ goto err1; ++ } ++ ++ /* gadget init */ ++ udc->gadget.name = XGS_IPROC_UDC_NAME; ++ udc->gadget.speed = USB_SPEED_UNKNOWN; ++ udc->gadget.max_speed = USB_SPEED_HIGH; ++ udc->gadget.ops = &xgs_iproc_udc_ops; ++ ++ iproc_udc_ops_init(udc); ++ ++ iproc_usbd_irq_dis(udc->usbd_regs, IPROC_USBD_IRQ_ALL); ++ iproc_usbd_irq_clear(udc->usbd_regs, IPROC_USBD_IRQ_ALL); ++ ++ ret = devm_request_irq(dev, irq, xgs_iproc_udc_isr, 0, ++ XGS_IPROC_UDC_NAME, (void *)udc); ++ if (ret < 0) { ++ dev_err(dev, "error requesting IRQ #%d\n", irq); ++ goto err2; ++ } ++ ++ ret = usb_add_gadget_udc(dev, &udc->gadget); ++ if (ret < 0) { ++ dev_err(dev, "usb_add_gadget_udc() failed\n"); ++ goto err3; ++ } ++ ++ xgs_iproc_udc_proc_create(); ++ ++ return ENOERROR; ++ ++ ++err3: ++ devm_free_irq(dev, irq, udc); ++err2: ++ iproc_platform_dma_free(pdev, udc); ++err1: ++ if (udc->usbd_regs) { ++ iounmap(udc->usbd_regs); ++ udc->usbd_regs = NULL; ++ } ++ if (udc) { ++ kfree(udc); ++ } ++ ++ return ret; ++} ++ ++static int xgs_iproc_udc_remove(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct iproc_udc *udc = platform_get_drvdata(pdev); ++ struct device_node *dn = pdev->dev.of_node; ++ int irq = (uint)irq_of_parse_and_map(dn, 0); ++ ++ if (udc) { ++ xgs_iproc_udc_proc_remove(); ++ ++ usb_del_gadget_udc(&udc->gadget); ++ iproc_udc_ops_finish(udc); ++ ++ platform_set_drvdata(pdev, NULL); ++ iproc_platform_dma_free(pdev, udc); ++ devm_free_irq(dev, irq, udc); ++ ++ if (udc->usbd_regs) { ++ iounmap(udc->usbd_regs); ++ udc->usbd_regs = NULL; ++ } ++ ++ kfree(udc); ++ } ++ ++ return ENOERROR; ++} ++ ++/* ++ * Generic platform device driver definition. ++ */ ++static struct platform_driver xgs_iproc_udc_driver = ++{ ++ .probe = xgs_iproc_udc_probe, ++ .remove = xgs_iproc_udc_remove, ++ .driver = { ++ .name = XGS_IPROC_UDC_NAME, ++ .owner = THIS_MODULE, ++ .of_match_table = of_match_ptr(xgs_iproc_udc_ids), ++ }, ++}; ++ ++module_platform_driver(xgs_iproc_udc_driver); ++ ++MODULE_DESCRIPTION("Broadcom USB Device Controller(UDC) driver"); ++MODULE_LICENSE("GPL"); ++MODULE_VERSION("1.0.0"); +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/usb/gadget/udc/xgs_iproc_udc.h b/drivers/usb/gadget/udc/xgs_iproc_udc.h +--- a/drivers/usb/gadget/udc/xgs_iproc_udc.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/usb/gadget/udc/xgs_iproc_udc.h 2017-11-09 17:54:01.737429000 +0800 +@@ -0,0 +1,158 @@ ++/***************************************************************************** ++* Copyright 2006 - 2010 Broadcom Corporation. All rights reserved. ++* ++* Unless you and Broadcom execute a separate written software license ++* agreement governing use of this software, this software is licensed to you ++* under the terms of the GNU General Public License version 2, available at ++* http://www.broadcom.com/licenses/GPLv2.php (the "GPL"). ++* ++* Notwithstanding the above, under no circumstances may you combine this ++* software in any way with any other Broadcom software provided under a ++* license other than the GPL, without Broadcom's express prior written ++* consent. ++*****************************************************************************/ ++#ifndef _XGS_IPROC_UDC_H_ ++#define _XGS_IPROC_UDC_H_ ++ ++#include ++#include "xgs_usbd_regs.h" ++ ++#define IPROC_UDC_EP_CNT 7 ++#define IPROC_UDC_CTRL_MAX_PKG_SIZE 64 ++#define IPROC_UDC_EP_MAX_PKG_SIZE 512 ++ ++/* ++ * Some unsigned number trickery for indexing into DMA descriptor chain. If the ++ * decriptor count is some power of 2, then we can use the mask to extract ++ * an index and not worry about wrap around as the unsigned variables are ++ * incremented. E.g. in following, IDX(0), IDX(4), IDX(8), ..., IDX(0xffffc) ++ * all produce the same result, i.e. 0. ++ */ ++#define IPROC_EP_DMA_DESC_CNT 1 ++#define IPROC_EP_DMA_DESC_IDX_MASK (IPROC_EP_DMA_DESC_CNT - 1) ++#define IPROC_EP_DMA_DESC_IDX(_idx) ((_idx) & IPROC_EP_DMA_DESC_IDX_MASK) ++ ++/* Some DWC UDC DMA descriptor layout definitions. See datasheet for details. */ ++ ++struct iproc_udc_dma_setup { ++ unsigned int status; ++ unsigned int reserved; ++ unsigned int data1; ++ unsigned int data2; ++}; ++ ++struct iproc_udc_dma_desc { ++ unsigned int status; ++ unsigned int reserved; ++ unsigned int buf_addr; ++ unsigned int next_addr; ++}; ++ ++/* ++ * Common DMA descriptor layout used for all endpoints. Only control endpoints ++ * need the setup descriptor, but in order to simply things it is defined for ++ * all. It may be possible to omit this altogether, and just use one of data ++ * descriptors for setup instead. The control transfer protocol should allow ++ * this to be done. ++ */ ++struct iproc_ep_dma { ++ struct iproc_udc_dma_setup setup; ++ struct iproc_udc_dma_desc desc[IPROC_EP_DMA_DESC_CNT]; ++}; ++ ++/* Structure used for DMA descriptor allocation. Not really necessary but convenient. */ ++ ++struct iproc_udc_dma { ++ struct iproc_ep_dma ep[IPROC_UDC_EP_CNT]; ++}; ++ ++/* ++ * Structure used to hold endpoint specific information. There's one of these for ++ * each endpoint. ++ * ++ * The Rx/Tx FIFO sizes are used for RAM allocation purposes. Each transfer ++ * direction has its own RAM that is used for all the FIFOs in that direction. ++ * The RAM gets segmented (allocated) as each endpoint gets enabled. This dynamic ++ * allocation FIFO sizes gives flexibility, and does not require that an ++ * endpoint's size be fixed at run-time or during compilation. If there's not ++ * enough FIFO RAM as required by a gadget's endpoint definitions, then an ++ * error will occur for the enabling of any endpoints after the FIFO RAM has ++ * become exhausted. ++ * ++ * The DMA virtual address is used for all descriptor operations. The DMA ++ * physical address is for convenience (setting hardware registers, obtaining ++ * addresses for descriptor chaining, etc.). The DMA descriptors are not ++ * allocated on a per-endpoint basis. These are just pointers into the ++ * large block that was allocated for all endpoints. ++ */ ++struct iproc_ep { ++ struct usb_ep usb_ep; /* usb_gadget.h */ ++ const struct usb_endpoint_descriptor *desc; /* usb/ch9.h */ ++ struct list_head list_queue; /* active BCM_UDC_EP_REQ's for the endpoint */ ++ struct iproc_udc *udc; /* endpoint owner (UDC controller) */ ++ unsigned int num; ++ unsigned int dir; /* USB_DIR_xxx (direction) */ ++ unsigned int type; /* USB_ENDPOINT_XFER_xxx */ ++ unsigned int beq_addr; /* dirn | type */ ++ unsigned int stopped : 1; ++ struct { ++ struct iproc_ep_dma *vir_addr; ++ struct iproc_ep_dma *phy_addr; ++ struct usb_request *usb_req; /* Current request being DMA'd */ ++ ++ /** @todo Some of the below are duplicates of usb_request elements. Use usb_request instead. */ ++ unsigned int max_buf_len; /* Max buffer length to use with a descriptor */ ++ unsigned int done_len; /* Length of request DMA'd so far */ ++ unsigned int todo_len; /* Length of request left to DMA */ ++ unsigned int add_idx; /* descriptor chain index */ ++ unsigned int rm_idx; /* descriptor chain index */ ++ unsigned int buf_addr; /* Location in request to DMA */ ++ unsigned int frame_num; /* Frame number for ISOC transfers */ ++ unsigned int frame_incr; /* Frame number increment (period) */ ++ unsigned int status; ++ unsigned int done; /* DMA and USB transfer completion indication (IN_DMA_DONE and IN_XFER_DONE) */ ++ void *align_buff; /* Aligned buffer. Only used if usb_req buffer not aligned properly. */ ++ dma_addr_t align_addr; /* Aligned buffer physical address */ ++ unsigned int align_len; /* Aligned buffer length */ ++ } dma; ++}; ++ ++/* ++ * Structure used to hold controller information. There should be one of these ++ * for each controller. Most likely there's only one. ++ * ++ * The Rx/Tx FIFO space are used for RAM allocation purposes. These track how ++ * much RAM is available for use as a FIFO. When an endpoint is enabled, these ++ * are check to see if there's enough RAM for a FIFO of the desired length as ++ * implied by the max packet size. ++ */ ++struct iproc_udc { ++ struct usb_gadget gadget; /* usb_gadget.h */ ++ struct usb_gadget_driver *gadget_driver; /* usb_gadget.h */ ++ struct completion *dev_release; /* Used for coordination during device removal */ ++ spinlock_t lock; ++ struct device *dev; ++ unsigned int irq_num; ++ struct iproc_ep ep[IPROC_UDC_EP_CNT]; ++ struct iproc_usbd_regs *usbd_regs; ++ struct { ++ struct iproc_udc_dma *vir_addr; ++ struct iproc_udc_dma *phy_addr; ++ } dma; ++ unsigned int vbus_active : 1; /* Indicates if VBUS is present */ ++ unsigned int pullup_on : 1; /* Indicates if pull up is on */ ++}; ++ ++/* ++ * Structure used to hold an endpoint transfer request. Can be any number of ++ * these for an endpoint. ++ */ ++struct iproc_ep_req { ++ struct usb_request usb_req; /* usb_gadget.h */ ++ struct list_head list_node; /* For linking in the BCM_UDC_EP request queue */ ++ dma_addr_t orig_dma_addr; /* Original buffer DMA address (physical). */ ++ unsigned dma_mapped : 1; /* Indicates if address mapping req'd. See usb_gadget.h */ ++ unsigned dma_aligned : 1; /* Indicates if buffer duplication done for alignment. */ ++}; ++ ++#endif /* _XGS_IPROC_UDC_H_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/usb/gadget/udc/xgs_usbd_regs.h b/drivers/usb/gadget/udc/xgs_usbd_regs.h +--- a/drivers/usb/gadget/udc/xgs_usbd_regs.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/usb/gadget/udc/xgs_usbd_regs.h 2017-11-09 17:54:01.738446000 +0800 +@@ -0,0 +1,995 @@ ++/***************************************************************************** ++* Copyright 2003 - 2009 Broadcom Corporation. All rights reserved. ++* ++* Unless you and Broadcom execute a separate written software license ++* agreement governing use of this software, this software is licensed to you ++* under the terms of the GNU General Public License version 2, available at ++* http://www.broadcom.com/licenses/GPLv2.php (the "GPL"). ++* ++* Notwithstanding the above, under no circumstances may you combine this ++* software in any way with any other Broadcom software provided under a ++* license other than the GPL, without Broadcom's express prior written ++* consent. ++*****************************************************************************/ ++#ifndef _USBD_REGS_H_ ++#define _USBD_REGS_H_ ++ ++#include ++ ++#define IPROC_USBD_MULTI_RX_FIFO 0 ++ ++#define IPROC_USBD_EP_CFG_CNT 10 ++#define IPROC_USBD_REG_EP_CNT 16 ++ ++#define IPROC_USBD_SPEED_UNKNOWN 0 ++#define IPROC_USBD_SPEED_LOW 1 ++#define IPROC_USBD_SPEED_FULL 2 ++#define IPROC_USBD_SPEED_HIGH 3 ++ ++#define IPROC_USBD_EP_DIR_IN 0x80 ++#define IPROC_USBD_EP_DIR_OUT 0x00 ++ ++#define IPROC_USBD_EP_TYPE_CTRL 0 ++#define IPROC_USBD_EP_TYPE_ISOC 1 ++#define IPROC_USBD_EP_TYPE_BULK 2 ++#define IPROC_USBD_EP_TYPE_INTR 3 ++ ++#define IPROC_USBD_IRQ_REMOTEWAKEUP_DELTA IPROC_USBD_REG_INTR_REMOTE_WAKEUP_DELTA ++#define IPROC_USBD_IRQ_SPEED_ENUM_DONE IPROC_USBD_REG_INTR_SPD_ENUM_DONE ++#define IPROC_USBD_IRQ_SOF_DETECTED IPROC_USBD_REG_INTR_SOF_RX ++#define IPROC_USBD_IRQ_BUS_SUSPEND IPROC_USBD_REG_INTR_BUS_SUSPEND ++#define IPROC_USBD_IRQ_BUS_RESET IPROC_USBD_REG_INTR_BUS_RESET ++#define IPROC_USBD_IRQ_BUS_IDLE IPROC_USBD_REG_INTR_BUS_IDLE ++#define IPROC_USBD_IRQ_SET_INTF IPROC_USBD_REG_INTR_SET_INTF_RX ++#define IPROC_USBD_IRQ_SET_CFG IPROC_USBD_REG_INTR_SET_CFG_RX ++#define IPROC_USBD_IRQ_ALL (IPROC_USBD_IRQ_REMOTEWAKEUP_DELTA | \ ++ IPROC_USBD_IRQ_SPEED_ENUM_DONE | \ ++ IPROC_USBD_IRQ_SOF_DETECTED | \ ++ IPROC_USBD_IRQ_BUS_SUSPEND | \ ++ IPROC_USBD_IRQ_BUS_RESET | \ ++ IPROC_USBD_IRQ_BUS_IDLE | \ ++ IPROC_USBD_IRQ_SET_INTF | \ ++ IPROC_USBD_IRQ_SET_CFG) ++ ++#define IPROC_USBD_EP_STAT_DMA_ERROR IPROC_USBD_REG_EP_FIFO_STATUS_AHB_BUS_ERROR ++#define IPROC_USBD_EP_STAT_DMA_BUF_UNAVAIL IPROC_USBD_REG_EP_FIFO_STATUS_DMA_BUF_NOT_AVAIL ++#define IPROC_USBD_EP_STAT_IN_TOKEN_RX IPROC_USBD_REG_EP_FIFO_STATUS_IN_TOKEN_RX ++#define IPROC_USBD_EP_STAT_IN_DMA_DONE IPROC_USBD_REG_EP_FIFO_STATUS_IN_DMA_DONE ++#define IPROC_USBD_EP_STAT_IN_FIFO_EMPTY IPROC_USBD_REG_EP_FIFO_STATUS_IN_FIFO_EMPTY ++#define IPROC_USBD_EP_STAT_IN_XFER_DONE IPROC_USBD_REG_EP_FIFO_STATUS_IN_XFER_DONE ++#define IPROC_USBD_EP_STAT_OUT_DMA_DATA_DONE IPROC_USBD_REG_EP_FIFO_STATUS_OUT_DMA_DATA_DONE ++#define IPROC_USBD_EP_STAT_OUT_DMA_SETUP_DONE IPROC_USBD_REG_EP_FIFO_STATUS_OUT_DMA_SETUP_DONE ++#define IPROC_USBD_EP_STAT_ALL (IPROC_USBD_EP_STAT_DMA_ERROR | \ ++ IPROC_USBD_EP_STAT_DMA_BUF_UNAVAIL | \ ++ IPROC_USBD_EP_STAT_IN_TOKEN_RX | \ ++ IPROC_USBD_EP_STAT_IN_DMA_DONE | \ ++ IPROC_USBD_EP_STAT_IN_XFER_DONE | \ ++ IPROC_USBD_EP_STAT_OUT_DMA_DATA_DONE | \ ++ IPROC_USBD_EP_STAT_OUT_DMA_SETUP_DONE) ++ ++ ++#define REG8_RSVD(start, end) uint8_t rsvd_##start[(end - start) / sizeof(uint8_t)] ++#define REG16_RSVD(start, end) uint16_t rsvd_##start[(end - start) / sizeof(uint16_t)] ++#define REG32_RSVD(start, end) uint rsvd_##start[(end - start) / sizeof(uint)] ++ ++struct iproc_usbd_ep_fifo_regs { ++ uint ctrl; ++ uint status; ++ uint size1; ++ uint size2; /* Buf Size OUT/Max PKT SIZE */ ++ uint buf_addr; ++ uint desc_addr; ++ REG32_RSVD(0x18, 0x20); ++}; ++ ++struct iproc_usbd_regs { ++ struct iproc_usbd_ep_fifo_regs ep_fifo_in[IPROC_USBD_REG_EP_CNT]; ++ struct iproc_usbd_ep_fifo_regs ep_fifo_out[IPROC_USBD_REG_EP_CNT]; ++ uint dev_cfg; ++ uint dev_ctrl; ++ uint dev_status; ++ uint dev_irq_status; ++ uint dev_irq_mask; ++ uint ep_irq_status; ++ uint ep_irq_mask; ++ uint test_mode; ++ uint rel_num; ++ REG32_RSVD(0x424, 0x500); ++ REG32_RSVD(0x500, 0x504); ++ uint ep_cfg[IPROC_USBD_REG_EP_CNT]; ++ REG32_RSVD(0x544, 0x800); ++ uint rx_fifo[256]; ++ uint tx_fifo[256]; ++ uint strap; ++}; ++ ++ ++struct iproc_usbd_idm_regs { ++ REG32_RSVD(0x000, 0x408); ++ uint io_ctrl; ++ REG32_RSVD(0x40C, 0x500); ++ uint io_status; ++ REG32_RSVD(0x504, 0x800); ++ uint reset_ctrl; ++ uint reset_status; ++ REG32_RSVD(0x808, 0xA00); ++ uint irq_status; ++}; ++ ++/* ++ * The endpoint type field in the FIFO control register has the same enumeration ++ * as the USB protocol. Not going to define it here. ++ */ ++#define IPROC_USBD_REG_EP_FIFO_CTRL_OUT_FLUSH_ENABLE (1 << 12) ++#define IPROC_USBD_REG_EP_FIFO_CTRL_OUT_CLOSE_DESC (1 << 11) ++#define IPROC_USBD_REG_EP_FIFO_CTRL_IN_SEND_NULL (1 << 10) ++#define IPROC_USBD_REG_EP_FIFO_CTRL_OUT_DMA_ENABLE (1 << 9) ++#define IPROC_USBD_REG_EP_FIFO_CTRL_NAK_CLEAR (1 << 8) ++#define IPROC_USBD_REG_EP_FIFO_CTRL_NAK_SET (1 << 7) ++#define IPROC_USBD_REG_EP_FIFO_CTRL_NAK_IN_PROGRESS (1 << 6) ++#define IPROC_USBD_REG_EP_FIFO_CTRL_TYPE_SHIFT 4 ++#define IPROC_USBD_REG_EP_FIFO_CTRL_TYPE_MASK (3 << IPROC_USBD_REG_EP_FIFO_CTRL_TYPE_SHIFT) ++#define IPROC_USBD_REG_EP_FIFO_CTRL_IN_DMA_ENABLE (1 << 3) ++#define IPROC_USBD_REG_EP_FIFO_CTRL_SNOOP_ENABLE (1 << 2) ++#define IPROC_USBD_REG_EP_FIFO_CTRL_IN_FLUSH_ENABLE (1 << 1) ++#define IPROC_USBD_REG_EP_FIFO_CTRL_STALL_ENABLE (1 << 0) ++ ++#define IPROC_USBD_REG_EP_FIFO_STATUS_CLOSE_DESC_CLEAR (1 << 28) ++#define IPROC_USBD_REG_EP_FIFO_STATUS_IN_XFER_DONE (1 << 27) ++#define IPROC_USBD_REG_EP_FIFO_STATUS_STALL_SET_RX (1 << 26) ++#define IPROC_USBD_REG_EP_FIFO_STATUS_STALL_CLEAR_RX (1 << 25) ++#define IPROC_USBD_REG_EP_FIFO_STATUS_IN_FIFO_EMPTY (1 << 24) ++#define IPROC_USBD_REG_EP_FIFO_STATUS_IN_DMA_DONE (1 << 10) ++#define IPROC_USBD_REG_EP_FIFO_STATUS_AHB_BUS_ERROR (1 << 9) ++#define IPROC_USBD_REG_EP_FIFO_STATUS_OUT_FIFO_EMPTY (1 << 8) ++#define IPROC_USBD_REG_EP_FIFO_STATUS_DMA_BUF_NOT_AVAIL (1 << 7) ++#define IPROC_USBD_REG_EP_FIFO_STATUS_IN_TOKEN_RX (1 << 6) ++#define IPROC_USBD_REG_EP_FIFO_STATUS_OUT_DMA_SETUP_DONE (1 << 5) ++#define IPROC_USBD_REG_EP_FIFO_STATUS_OUT_DMA_DATA_DONE (1 << 4) ++ ++#define IPROC_USBD_REG_EP_FIFO_SIZE1_OUT_ISOC_PID_SHIFT 16 ++#define IPROC_USBD_REG_EP_FIFO_SIZE1_OUT_ISOC_PID_MASK (3 << IPROC_USBD_REG_EP_FIFO_SIZE1_OUT_ISOC_PID_SHIFT) ++#define IPROC_USBD_REG_EP_FIFO_SIZE1_IN_DEPTH_SHIFT 0 ++#define IPROC_USBD_REG_EP_FIFO_SIZE1_IN_DEPTH_MASK (0xffff << IPROC_USBD_REG_EP_FIFO_SIZE1_IN_DEPTH_SHIFT) ++#define IPROC_USBD_REG_EP_FIFO_SIZE1_OUT_FRAME_NUM_SHIFT IPROC_USBD_REG_EP_FIFO_SIZE1_IN_DEPTH_SHIFT ++#define IPROC_USBD_REG_EP_FIFO_SIZE1_OUT_FRAME_NUM_MASK IPROC_USBD_REG_EP_FIFO_SIZE1_IN_DEPTH_MASK ++ ++#define IPROC_USBD_REG_EP_FIFO_SIZE2_OUT_DEPTH_SHIFT 16 ++#define IPROC_USBD_REG_EP_FIFO_SIZE2_OUT_DEPTH_MASK (0xffff << IPROC_USBD_REG_EP_FIFO_SIZE2_OUT_DEPTH_SHIFT) ++#define IPROC_USBD_REG_EP_FIFO_SIZE2_PKT_MAX_SHIFT 0 ++#define IPROC_USBD_REG_EP_FIFO_SIZE2_PKT_MAX_MASK (0xffff << IPROC_USBD_REG_EP_FIFO_SIZE2_PKT_MAX_SHIFT) ++ ++/* ++ * The endpoint type field in the config register has the same enumeration ++ * as the USB protocol. Not going to define it here. ++ */ ++#define IPROC_USBD_REG_EP_CFG_PKT_MAX_SHIFT 19 ++#define IPROC_USBD_REG_EP_CFG_PKT_MAX_MASK (0x7ff << IPROC_USBD_REG_EP_CFG_PKT_MAX_SHIFT) ++#define IPROC_USBD_REG_EP_CFG_ALT_NUM_SHIFT 15 ++#define IPROC_USBD_REG_EP_CFG_ALT_NUM_MASK (0xf << IPROC_USBD_REG_EP_CFG_ALT_NUM_SHIFT) ++#define IPROC_USBD_REG_EP_CFG_INTF_NUM_SHIFT 11 ++#define IPROC_USBD_REG_EP_CFG_INTF_NUM_MASK (0xf << IPROC_USBD_REG_EP_CFG_INTF_NUM_SHIFT) ++#define IPROC_USBD_REG_EP_CFG_CFG_NUM_SHIFT 7 ++#define IPROC_USBD_REG_EP_CFG_CFG_NUM_MASK (0xf << IPROC_USBD_REG_EP_CFG_CFG_NUM_SHIFT) ++#define IPROC_USBD_REG_EP_CFG_TYPE_SHIFT 5 ++#define IPROC_USBD_REG_EP_CFG_TYPE_MASK (0x3 << IPROC_USBD_REG_EP_CFG_TYPE_SHIFT) ++#define IPROC_USBD_REG_EP_CFG_DIRN_IN (1 << 4) ++#define IPROC_USBD_REG_EP_CFG_DIRN_OUT 0 ++#define IPROC_USBD_REG_EP_CFG_FIFO_NUM_SHIFT 0 ++#define IPROC_USBD_REG_EP_CFG_FIFO_NUM_MASK (0xf << IPROC_USBD_REG_EP_CFG_FIFO_NUM_SHIFT) ++ ++/* Endpoint Interrupt register definitions */ ++#define IPROC_USBD_REG_EP_INTR_OUT_SHIFT 16 ++#define IPROC_USBD_REG_EP_INTR_OUT_MASK (0xffff << IPROC_USBD_REG_EP_INTR_OUT_SHIFT) ++#define IPROC_USBD_REG_EP_INTR_IN_SHIFT 0 ++#define IPROC_USBD_REG_EP_INTR_IN_MASK (0xffff << IPROC_USBD_REG_EP_INTR_IN_SHIFT) ++ ++/* Device Controller register definitions */ ++#define IPROC_USBD_REG_CFG_ULPI_DDR_ENABLE (1 << 19) ++#define IPROC_USBD_REG_CFG_SET_DESCRIPTOR_ENABLE (1 << 18) ++#define IPROC_USBD_REG_CFG_CSR_PROGRAM_ENABLE (1 << 17) ++#define IPROC_USBD_REG_CFG_HALT_STALL_ENABLE (1 << 16) ++#define IPROC_USBD_REG_CFG_HS_TIMEOUT_CALIB_SHIFT 13 ++#define IPROC_USBD_REG_CFG_HS_TIMEOUT_CALIB_MASK (7 << IPROC_USBD_REG_CFG_HS_TIMEOUT_CALIB_SHIFT) ++#define IPROC_USBD_REG_CFG_FS_TIMEOUT_CALIB_SHIFT 10 ++#define IPROC_USBD_REG_CFG_FS_TIMEOUT_CALIB_MASK (7 << IPROC_USBD_REG_CFG_FS_TIMEOUT_CALIB_SHIFT) ++#define IPROC_USBD_REG_CFG_STATUS_1_ENABLE (1 << 8) ++#define IPROC_USBD_REG_CFG_STATUS_ENABLE (1 << 7) ++#define IPROC_USBD_REG_CFG_UTMI_BI_DIRN_ENABLE (1 << 6) ++#define IPROC_USBD_REG_CFG_UTMI_8BIT_ENABLE (1 << 5) ++#define IPROC_USBD_REG_CFG_SYNC_FRAME_ENABLE (1 << 4) ++#define IPROC_USBD_REG_CFG_SELF_PWR_ENABLE (1 << 3) ++#define IPROC_USBD_REG_CFG_REMOTE_WAKEUP_ENABLE (1 << 2) ++#define IPROC_USBD_REG_CFG_SPD_SHIFT 0 ++#define IPROC_USBD_REG_CFG_SPD_MASK (3 << IPROC_USBD_REG_CFG_SPD_SHIFT) ++#define IPROC_USBD_REG_CFG_SPD_HS (0 << IPROC_USBD_REG_CFG_SPD_SHIFT) ++#define IPROC_USBD_REG_CFG_SPD_FS (1 << IPROC_USBD_REG_CFG_SPD_SHIFT) ++#define IPROC_USBD_REG_CFG_SPD_LS (2 << IPROC_USBD_REG_CFG_SPD_SHIFT) ++#define IPROC_USBD_REG_CFG_SPD_FS_48MHZ (3 << IPROC_USBD_REG_CFG_SPD_SHIFT) ++ ++#define IPROC_USBD_REG_CTRL_DMA_OUT_THRESHOLD_LEN_SHIFT 24 ++#define IPROC_USBD_REG_CTRL_DMA_OUT_THRESHOLD_LEN_MASK (0xff << IPROC_USBD_REG_CTRL_DMA_OUT_THRESHOLD_LEN_SHIFT) ++#define IPROC_USBD_REG_CTRL_DMA_BURST_LEN_SHIFT 16 ++#define IPROC_USBD_REG_CTRL_DMA_BURST_LEN_MASK (0xff << IPROC_USBD_REG_CTRL_DMA_BURST_LEN_SHIFT) ++#define IPROC_USBD_REG_CTRL_OUT_FIFO_FLUSH_ENABLE (1 << 14) ++#define IPROC_USBD_REG_CTRL_CSR_DONE (1 << 13) ++#define IPROC_USBD_REG_CTRL_OUT_NAK_ALL_ENABLE (1 << 12) ++#define IPROC_USBD_REG_CTRL_DISCONNECT_ENABLE (1 << 10) ++#define IPROC_USBD_REG_CTRL_DMA_MODE_ENABLE (1 << 9) ++#define IPROC_USBD_REG_CTRL_DMA_BURST_ENABLE (1 << 8) ++#define IPROC_USBD_REG_CTRL_DMA_OUT_THRESHOLD_ENABLE (1 << 7) ++#define IPROC_USBD_REG_CTRL_DMA_BUFF_FILL_MODE_ENABLE (1 << 6) ++#define IPROC_USBD_REG_CTRL_ENDIAN_BIG_ENABLE (1 << 5) ++#define IPROC_USBD_REG_CTRL_DMA_DESC_UPDATE_ENABLE (1 << 4) ++#define IPROC_USBD_REG_CTRL_DMA_IN_ENABLE (1 << 3) /*TX DMA Enable */ ++#define IPROC_USBD_REG_CTRL_DMA_OUT_ENABLE (1 << 2) /*RX DMA Enable */ ++#define IPROC_USBD_REG_CTRL_RESUME_SIGNAL_ENABLE (1 << 0) ++#define IPROC_USBD_REG_CTRL_LE_ENABLE 0 /*^BCM5892 */ ++ ++#define IPROC_USBD_REG_STAT_SOF_FRAME_NUM_SHIFT 18 ++#define IPROC_USBD_REG_STAT_SOF_FRAME_NUM_MASK (0x3ffff << IPROC_USBD_REG_STAT_SOF_FRAME_NUM_SHIFT) ++#define IPROC_USBD_REG_STAT_REMOTE_WAKEUP_ALLOWED (1 << 17) ++#define IPROC_USBD_REG_STAT_PHY_ERROR (1 << 16) ++#define IPROC_USBD_REG_STAT_OUT_FIFO_EMPTY (1 << 15) ++#define IPROC_USBD_REG_STAT_SPD_SHIFT 13 ++#define IPROC_USBD_REG_STAT_SPD_MASK (3 << IPROC_USBD_REG_STAT_SPD_SHIFT) ++#define IPROC_USBD_REG_STAT_SPD_HS (0 << IPROC_USBD_REG_STAT_SPD_SHIFT) ++#define IPROC_USBD_REG_STAT_SPD_FS (1 << IPROC_USBD_REG_STAT_SPD_SHIFT) ++#define IPROC_USBD_REG_STAT_SPD_LS (2 << IPROC_USBD_REG_STAT_SPD_SHIFT) ++#define IPROC_USBD_REG_STAT_SPD_FS_48MHZ (3 << IPROC_USBD_REG_STAT_SPD_SHIFT) ++#define IPROC_USBD_REG_STAT_BUS_SUSPENDED (1 << 12) ++#define IPROC_USBD_REG_STAT_ALT_NUM_SHIFT 8 ++#define IPROC_USBD_REG_STAT_ALT_NUM_MASK (0xf << IPROC_USBD_REG_STAT_ALT_NUM_SHIFT) ++#define IPROC_USBD_REG_STAT_INTF_NUM_SHIFT 4 ++#define IPROC_USBD_REG_STAT_INTF_NUM_MASK (0xf << IPROC_USBD_REG_STAT_INTF_NUM_SHIFT) ++#define IPROC_USBD_REG_STAT_CFG_NUM_SHIFT 0 ++#define IPROC_USBD_REG_STAT_CFG_NUM_MASK (0xf << IPROC_USBD_REG_STAT_CFG_NUM_SHIFT) ++ ++#define IPROC_USBD_REG_INTR_REMOTE_WAKEUP_DELTA (1 << 7) /*Remote Wakeup Delta*/ ++#define IPROC_USBD_REG_INTR_SPD_ENUM_DONE (1 << 6) /*ENUM Speed Completed*/ ++#define IPROC_USBD_REG_INTR_SOF_RX (1 << 5) /*SOF Token Detected */ ++#define IPROC_USBD_REG_INTR_BUS_SUSPEND (1 << 4) /*SUSPEND State Detected*/ ++#define IPROC_USBD_REG_INTR_BUS_RESET (1 << 3) /*RESET State Detected */ ++#define IPROC_USBD_REG_INTR_BUS_IDLE (1 << 2) /*IDLE State Detected*/ ++#define IPROC_USBD_REG_INTR_SET_INTF_RX (1 << 1) /*Received SET_INTERFACE CMD*/ ++#define IPROC_USBD_REG_INTR_SET_CFG_RX (1 << 0) /*Received SET_CONFIG CMD*/ ++ ++/* DMA Descriptor definitions */ ++#define IPROC_USBD_REG_DMA_STAT_BUF_SHIFT 30 ++#define IPROC_USBD_REG_DMA_STAT_BUF_HOST_READY (0 << IPROC_USBD_REG_DMA_STAT_BUF_SHIFT) ++#define IPROC_USBD_REG_DMA_STAT_BUF_DMA_BUSY (1 << IPROC_USBD_REG_DMA_STAT_BUF_SHIFT) ++#define IPROC_USBD_REG_DMA_STAT_BUF_DMA_DONE (2 << IPROC_USBD_REG_DMA_STAT_BUF_SHIFT) ++#define IPROC_USBD_REG_DMA_STAT_BUF_HOST_BUSY (3 << IPROC_USBD_REG_DMA_STAT_BUF_SHIFT) ++#define IPROC_USBD_REG_DMA_STAT_BUF_MASK (3 << IPROC_USBD_REG_DMA_STAT_BUF_SHIFT) ++#define IPROC_USBD_REG_DMA_STAT_RX_SHIFT 28 ++#define IPROC_USBD_REG_DMA_STAT_RX_SUCCESS (0 << IPROC_USBD_REG_DMA_STAT_RX_SHIFT) ++#define IPROC_USBD_REG_DMA_STAT_RX_ERR_DESC (1 << IPROC_USBD_REG_DMA_STAT_RX_SHIFT) ++#define IPROC_USBD_REG_DMA_STAT_RX_ERR_BUF (3 << IPROC_USBD_REG_DMA_STAT_RX_SHIFT) ++#define IPROC_USBD_REG_DMA_STAT_RX_MASK (3 << IPROC_USBD_REG_DMA_STAT_RX_SHIFT) ++#define IPROC_USBD_REG_DMA_STAT_CFG_NUM_SHIFT 24 ++#define IPROC_USBD_REG_DMA_STAT_CFG_NUM_MASK (0xf << IPROC_USBD_REG_DMA_STAT_CFG_NUM_SHIFT) ++#define IPROC_USBD_REG_DMA_STAT_INTF_NUM_SHIFT 20 ++#define IPROC_USBD_REG_DMA_STAT_INTF_NUM_MASK (0xf << IPROC_USBD_REG_DMA_STAT_INTF_NUM_SHIFT) ++#define IPROC_USBD_REG_DMA_STAT_ALT_NUM_SHIFT 16 ++#define IPROC_USBD_REG_DMA_STAT_ALT_NUM_MASK (0xf << IPROC_USBD_REG_DMA_STAT_ALT_NUM_SHIFT) ++#define IPROC_USBD_REG_DMA_STAT_LAST_DESC (1 << 27) ++#define IPROC_USBD_REG_DMA_STAT_FRAME_NUM_SHIFT 16 ++#define IPROC_USBD_REG_DMA_STAT_FRAME_NUM_MASK (0x7ff << IPROC_USBD_REG_DMA_STAT_FRAME_NUM_SHIFT) ++#define IPROC_USBD_REG_DMA_STAT_BYTE_CNT_SHIFT 0 ++#define IPROC_USBD_REG_DMA_STAT_ISO_PID_SHIFT 14 ++#define IPROC_USBD_REG_DMA_STAT_ISO_PID_MASK (0x3 << IPROC_USBD_REG_DMA_STAT_ISO_PID_SHIFT) ++#define IPROC_USBD_REG_DMA_STAT_ISO_BYTE_CNT_SHIFT IPROC_USBD_REG_DMA_STAT_BYTE_CNT_SHIFT ++#define IPROC_USBD_REG_DMA_STAT_ISO_BYTE_CNT_MASK (0x3fff << IPROC_USBD_REG_DMA_STAT_ISO_BYTE_CNT_SHIFT) ++#define IPROC_USBD_REG_DMA_STAT_NON_ISO_BYTE_CNT_SHIFT IPROC_USBD_REG_DMA_STAT_BYTE_CNT_SHIFT ++#define IPROC_USBD_REG_DMA_STAT_NON_ISO_BYTE_CNT_MASK (0xffff << IPROC_USBD_REG_DMA_STAT_NON_ISO_BYTE_CNT_SHIFT) ++ ++/* USB2D IDM definitions */ ++#define IPROC_USB2D_IDM_REG_IO_CTRL_DIRECT_CLK_ENABLE (1 << 0) ++#define IPROC_USB2D_IDM_REG_RESET_CTRL_RESET (1 << 0) ++ ++/* Inline Function Definitions */ ++static inline uint ++usbd_reg32_read(volatile uint *reg) ++{ ++ return (le32_to_cpu(*reg)); ++} ++ ++static inline void ++usbd_reg32_write(volatile uint *reg, uint value) ++{ ++ *reg = cpu_to_le32(value); ++} ++ ++static inline void ++usbd_reg32_bits_set(volatile uint *reg, uint bits) ++{ ++ uint tmp; ++ tmp = usbd_reg32_read(reg); ++ tmp |= bits; ++ usbd_reg32_write(reg, tmp); ++} ++ ++static inline void ++usbd_reg32_bits_clear(volatile uint *reg, uint bits) ++{ ++ uint tmp; ++ tmp = usbd_reg32_read(reg); ++ tmp &= ~bits; ++ usbd_reg32_write(reg, tmp); ++} ++ ++static inline void ++usbd_reg32_bits_modify(volatile uint *reg, uint mask, uint value) ++{ ++ uint tmp; ++ tmp = usbd_reg32_read(reg); ++ tmp &= ~mask; ++ tmp |= value; ++ usbd_reg32_write(reg, tmp); ++} ++ ++#define IPROC_USBD_READ(_r) usbd_reg32_read(&_r) ++#define IPROC_USBD_WRITE(_r, _v) usbd_reg32_write(&_r, _v) ++#define IPROC_USBD_BITS_SET(_r, _b) usbd_reg32_bits_set(&_r, _b) ++#define IPROC_USBD_BITS_CLEAR(_r, _b) usbd_reg32_bits_clear(&_r, _b) ++#define IPROC_USBD_BITS_MODIFY(_r, _m, _v) usbd_reg32_bits_modify(&_r, _m, _v) ++ ++/***************************************************************************** ++* @brief Connect / Disconnect to USB BUS ++*****************************************************************************/ ++static inline void iproc_usbd_bus_conn(struct iproc_usbd_regs *base) ++{ ++ IPROC_USBD_BITS_CLEAR(base->dev_ctrl, IPROC_USBD_REG_CTRL_DISCONNECT_ENABLE); ++} ++ ++static inline void iproc_usbd_bus_disconn(struct iproc_usbd_regs *base) ++{ ++ IPROC_USBD_BITS_SET(base->dev_ctrl, IPROC_USBD_REG_CTRL_DISCONNECT_ENABLE); ++} ++ ++/***************************************************************************** ++* @brief USB BUS suspend status ++* @return ++* true : BUS is in suspend state ++* false : BUS is not in suspend state ++*****************************************************************************/ ++static inline bool iproc_usbd_bus_suspend(struct iproc_usbd_regs *base) ++{ ++ return (IPROC_USBD_READ(base->dev_status) & IPROC_USBD_REG_STAT_BUS_SUSPENDED) ? true : false; ++} ++ ++/***************************************************************************** ++* @brief Retrieve setting numbers from last Rx'd SET_CONFIGURATION or ++* SET_INTERFACE request ++* @return ++* Setting Number ++*****************************************************************************/ ++static inline uint iproc_usbd_alt_num(struct iproc_usbd_regs *base) ++{ ++ return ((IPROC_USBD_READ(base->dev_status) & IPROC_USBD_REG_STAT_ALT_NUM_MASK) >> IPROC_USBD_REG_STAT_ALT_NUM_SHIFT); ++} ++ ++static inline uint iproc_usbd_cfg_num(struct iproc_usbd_regs *base) ++{ ++ return ((IPROC_USBD_READ(base->dev_status) & IPROC_USBD_REG_STAT_CFG_NUM_MASK) >> IPROC_USBD_REG_STAT_CFG_NUM_SHIFT); ++} ++ ++static inline uint iproc_usbd_intf_num(struct iproc_usbd_regs *base) ++{ ++ return ((IPROC_USBD_READ(base->dev_status) & IPROC_USBD_REG_STAT_INTF_NUM_MASK) >> IPROC_USBD_REG_STAT_INTF_NUM_SHIFT); ++} ++ ++ ++/***************************************************************************** ++* @brief Disable / Enable DMA operations at the device level (all endpoints) ++*****************************************************************************/ ++static inline void iproc_usbd_dma_dis(struct iproc_usbd_regs *base) ++{ ++ IPROC_USBD_BITS_CLEAR(base->dev_ctrl, (IPROC_USBD_REG_CTRL_DMA_IN_ENABLE | IPROC_USBD_REG_CTRL_DMA_OUT_ENABLE)); ++} ++ ++static inline void iproc_usbd_dma_en(struct iproc_usbd_regs *base) ++{ ++ IPROC_USBD_BITS_SET(base->dev_ctrl, (IPROC_USBD_REG_CTRL_DMA_IN_ENABLE | IPROC_USBD_REG_CTRL_DMA_OUT_ENABLE)); ++} ++ ++static inline bool iproc_usbd_dma_status(struct iproc_usbd_regs *base) ++{ ++ return (IPROC_USBD_READ(base->dev_ctrl) & IPROC_USBD_REG_CTRL_DMA_OUT_ENABLE ? true : false); ++} ++ ++/***************************************************************************** ++* @brief Retrieve Frame number contained in last Rx'd SOF packet ++* @return ++* Frame Number in the following format. ++* bits[13:3] milli-second frame number ++* bits[2:0] micro-frame number ++* @note ++* For full and low speed connections, the microframe number will be zero. ++*****************************************************************************/ ++static inline uint iproc_usbd_last_rx_frame_num(struct iproc_usbd_regs *base) ++{ ++ return((IPROC_USBD_READ(base->dev_status) & IPROC_USBD_REG_STAT_SOF_FRAME_NUM_MASK) >> IPROC_USBD_REG_STAT_SOF_FRAME_NUM_SHIFT); ++} ++ ++/***************************************************************************** ++* @brief Device level interrupt operations ++* @note ++* Use the IPROC_USBD_IRQ_xxx definitions with these routines. These ++* definitions are bit-wise, and allow operations on multiple interrupts ++* by OR'ing the definitions together. ++* DeviceIrqClear(), DeviceIrqDisable(), DeviceIrqEnable() use their mask ++* parameter to operate only on the interrupts set in the mask. E.g. ++* DeviceIrqEnable( DEVICE_IRQ_SET_INTF ); ++* DeviceIrqEnable( DEVICE_IRQ_SET_CFG ); ++* and ++* DeviceIrqEnable( DEVICE_IRQ_SET_INTF | DEVICE_IRQ_SET_CFG ); ++* are equivalent. ++* DeviceIrqMask() returns a mask of all the interrupts that are enabled. ++* DeviceIrqStatus() returns a mask of all the interrupts that have an active status. ++*****************************************************************************/ ++static inline uint iproc_usbd_irq_active(struct iproc_usbd_regs *base) ++{ ++ return(IPROC_USBD_READ(base->dev_irq_status)); ++} ++ ++static inline void iproc_usbd_irq_clear(struct iproc_usbd_regs *base, uint mask) ++{ ++ IPROC_USBD_WRITE(base->dev_irq_status, mask); ++} ++ ++static inline void iproc_usbd_irq_dis(struct iproc_usbd_regs *base, uint mask) ++{ ++ IPROC_USBD_BITS_SET(base->dev_irq_mask, mask); ++} ++ ++static inline void iproc_usbd_irq_en(struct iproc_usbd_regs *base, uint mask) ++{ ++ IPROC_USBD_BITS_CLEAR(base->dev_irq_mask, mask); ++} ++static inline uint iproc_usbd_irq_mask(struct iproc_usbd_regs *base) ++{ ++ return((~IPROC_USBD_READ(base->dev_irq_mask)) & IPROC_USBD_IRQ_ALL); ++} ++ ++/***************************************************************************** ++* @brief Disable / Enable NAK responses for all OUT endpoints. ++*****************************************************************************/ ++static inline void iproc_usbd_nak_response_dis(struct iproc_usbd_regs *base) ++{ ++ IPROC_USBD_BITS_CLEAR(base->dev_ctrl, IPROC_USBD_REG_CTRL_OUT_NAK_ALL_ENABLE); ++} ++ ++static inline void iproc_usbd_nak_response_en(struct iproc_usbd_regs *base) ++{ ++ IPROC_USBD_BITS_SET(base->dev_ctrl, IPROC_USBD_REG_CTRL_OUT_NAK_ALL_ENABLE); ++} ++ ++/***************************************************************************** ++* @brief PHY error detected ++*****************************************************************************/ ++static inline bool iproc_usbd_phy_err_detect(struct iproc_usbd_regs *base) ++{ ++ return(IPROC_USBD_READ(base->dev_status) & IPROC_USBD_REG_STAT_PHY_ERROR ? true : false); ++} ++ ++/***************************************************************************** ++* @brief Remote Wakeup operations. ++* DeviceRemoteWakeupEnable() and DeviceRemoteWakeupDisable() are used to ++* specify device if is going to attempt this. ++* DeviceRemoteWakeupAllowed() indicates if host has enabled this feature. ++* The associated DEVICE_IRQ_REMOTEWAKEUP_DELTA can be used to determine ++* changes to the status of this feature. ++* DeviceRemoteWakeupStart(); delayMsec(1); DeviceRemoteWakeupStop(); is ++* used for controlling the wakeup signalling. ++*****************************************************************************/ ++static inline bool iproc_usbd_wakeup_allow(struct iproc_usbd_regs *base) ++{ ++ return(IPROC_USBD_READ(base->dev_status) & IPROC_USBD_REG_STAT_REMOTE_WAKEUP_ALLOWED ? true : false); ++} ++ ++static inline void iproc_usbd_wakeup_dis(struct iproc_usbd_regs *base) ++{ ++ IPROC_USBD_BITS_CLEAR(base->dev_cfg, IPROC_USBD_REG_CFG_REMOTE_WAKEUP_ENABLE); ++} ++ ++static inline void iproc_usbd_wakeup_en(struct iproc_usbd_regs *base) ++{ ++ IPROC_USBD_BITS_SET(base->dev_cfg, IPROC_USBD_REG_CFG_REMOTE_WAKEUP_ENABLE); ++} ++ ++static inline void iproc_usbd_wakeup_start(struct iproc_usbd_regs *base) ++{ ++ IPROC_USBD_BITS_SET(base->dev_ctrl, IPROC_USBD_REG_CTRL_RESUME_SIGNAL_ENABLE); ++} ++ ++static inline void iproc_usbd_wakeup_stop(struct iproc_usbd_regs *base) ++{ ++ IPROC_USBD_BITS_CLEAR(base->dev_ctrl, IPROC_USBD_REG_CTRL_RESUME_SIGNAL_ENABLE); ++} ++ ++/***************************************************************************** ++* @brief Control whether or not device advertises itself as self-powered. ++*****************************************************************************/ ++static inline void iproc_usbd_self_pwr_dis(struct iproc_usbd_regs *base) ++{ ++ IPROC_USBD_BITS_CLEAR(base->dev_cfg, IPROC_USBD_REG_CFG_SELF_PWR_ENABLE); ++} ++ ++static inline void iproc_usbd_self_pwr_en(struct iproc_usbd_regs *base) ++{ ++ IPROC_USBD_BITS_SET(base->dev_cfg, IPROC_USBD_REG_CFG_SELF_PWR_ENABLE); ++} ++ ++/***************************************************************************** ++* @brief Control whether or not device SET DESCRIPTOR support is enabled. ++* If disabled, STALL will be issued upon receipt of a SET DESCRIPTOR request. ++*****************************************************************************/ ++static inline void iproc_usbd_set_desc_dis(struct iproc_usbd_regs *base) ++{ ++ IPROC_USBD_BITS_CLEAR(base->dev_cfg, IPROC_USBD_REG_CFG_SET_DESCRIPTOR_ENABLE); ++} ++ ++static inline void iproc_usbd_set_desc_en(struct iproc_usbd_regs *base) ++{ ++ IPROC_USBD_BITS_SET(base->dev_cfg, IPROC_USBD_REG_CFG_SET_DESCRIPTOR_ENABLE); ++} ++ ++/***************************************************************************** ++* @brief Device SET configuration or SET interface has completed. ++* If disabled, STALL will be issued upon receipt of a SET DESCRIPTOR request. ++*****************************************************************************/ ++static inline void iproc_usbd_setup_done(struct iproc_usbd_regs *base) ++{ ++ IPROC_USBD_BITS_SET(base->dev_ctrl, IPROC_USBD_REG_CTRL_CSR_DONE); ++} ++ ++/***************************************************************************** ++* @brief Link speed routines. ++* Use the usbDevHw_DEVICE_SPEED_xxx definitions with these routines. These ++* DeviceSpeedRequested() indicates the desired link speed. ++* DeviceSpeedEnumerated() returns the speed negotiated with the host. ++* The associated DEVICE_IRQ_SPEED_ENUM_DONE can be used to determine ++* when speed negotiation has completed. ++*****************************************************************************/ ++static inline uint iproc_usbd_speed_get(struct iproc_usbd_regs *base) ++{ ++ switch(IPROC_USBD_READ(base->dev_status) & IPROC_USBD_REG_STAT_SPD_MASK) { ++ case IPROC_USBD_REG_STAT_SPD_LS: ++ return(IPROC_USBD_SPEED_LOW); ++ ++ case IPROC_USBD_REG_STAT_SPD_HS: ++ return(IPROC_USBD_SPEED_HIGH); ++ ++ case IPROC_USBD_REG_STAT_SPD_FS: ++ case IPROC_USBD_REG_STAT_SPD_FS_48MHZ: ++ return(IPROC_USBD_SPEED_FULL); ++ } ++ ++ return IPROC_USBD_SPEED_FULL; ++} ++ ++static inline void iproc_usbd_speed_req(struct iproc_usbd_regs *base, uint speed) ++{ ++ IPROC_USBD_BITS_CLEAR(base->dev_cfg, IPROC_USBD_REG_CFG_SPD_MASK); ++ ++ switch(speed) { ++ case IPROC_USBD_SPEED_LOW: ++ IPROC_USBD_BITS_SET(base->dev_cfg, IPROC_USBD_REG_CFG_SPD_LS); ++ break; ++ ++ case IPROC_USBD_SPEED_HIGH: ++ IPROC_USBD_BITS_SET(base->dev_cfg, IPROC_USBD_REG_CFG_SPD_HS); ++ break; ++ ++ case IPROC_USBD_SPEED_FULL: ++ default: ++ IPROC_USBD_BITS_SET(base->dev_cfg, IPROC_USBD_REG_CFG_SPD_FS); ++ break; ++ } ++} ++ ++/***************************************************************************** ++* @brief Finalize (terminate) / Initialize Endpoint operations ++* @param num - Endpoint number ++* @param dirn - Endpoint direction. See ENDPT_DIRN_xxx definitions ++* @param dirn - Endpoint type. See ENDPT_TYPE_xxx definitions ++* @param dirn - Endpoint max packet size. ++*****************************************************************************/ ++static inline void iproc_usbd_ep_ops_finish(struct iproc_usbd_regs *base, uint num) ++{ ++} ++ ++static inline void iproc_usbd_ep_ops_init(struct iproc_usbd_regs *base, uint num, uint type, uint dirn, uint maxPktSize) ++{ ++ if ((type == IPROC_USBD_EP_TYPE_CTRL) || (dirn == IPROC_USBD_EP_DIR_OUT)) { ++ IPROC_USBD_WRITE(base->ep_fifo_out[num].ctrl, (type << IPROC_USBD_REG_EP_FIFO_CTRL_TYPE_SHIFT)); ++ IPROC_USBD_WRITE(base->ep_fifo_out[num].status, IPROC_USBD_READ(base->ep_fifo_out[num].status)); ++ IPROC_USBD_WRITE(base->ep_fifo_out[num].size1, 0); ++ IPROC_USBD_WRITE(base->ep_fifo_out[num].size2, ((maxPktSize >> 2) << 16) | maxPktSize); ++#if IPROC_USBD_MULTI_RX_FIFO ++ IPROC_USBD_BITS_SET(base->ep_fifo_out[num].size2, ((maxPktSize + 3) >> 2) << IPROC_USBD_REG_EP_FIFO_SIZE2_OUT_DEPTH_SHIFT)); ++#endif ++ } ++ if ((type == IPROC_USBD_EP_TYPE_CTRL) || (dirn == IPROC_USBD_EP_DIR_IN)) { ++ IPROC_USBD_WRITE(base->ep_fifo_in[num].ctrl, (type << IPROC_USBD_REG_EP_FIFO_CTRL_TYPE_SHIFT)); ++ IPROC_USBD_WRITE(base->ep_fifo_in[num].size2, (maxPktSize << IPROC_USBD_REG_EP_FIFO_SIZE2_PKT_MAX_SHIFT)); ++ IPROC_USBD_WRITE(base->ep_fifo_in[num].size1, (maxPktSize >> 2)); ++ IPROC_USBD_BITS_SET(base->ep_fifo_in[num].ctrl, IPROC_USBD_REG_EP_FIFO_CTRL_IN_FLUSH_ENABLE); ++ IPROC_USBD_BITS_CLEAR(base->ep_fifo_in[num].ctrl, (IPROC_USBD_REG_EP_FIFO_CTRL_NAK_SET | IPROC_USBD_REG_EP_FIFO_CTRL_IN_FLUSH_ENABLE)); ++ } ++ IPROC_USBD_WRITE(base->ep_cfg[num], (num << IPROC_USBD_REG_EP_CFG_FIFO_NUM_SHIFT) | ++ (type << IPROC_USBD_REG_EP_CFG_TYPE_SHIFT) | ++ (maxPktSize << IPROC_USBD_REG_EP_CFG_PKT_MAX_SHIFT) | ++ (dirn == IPROC_USBD_EP_DIR_OUT ? IPROC_USBD_REG_EP_CFG_DIRN_OUT : IPROC_USBD_REG_EP_CFG_DIRN_IN)); ++} ++ ++/***************************************************************************** ++* @brief Endpoint Configuration / Interface / Alternate number operations ++* @param num - Endpoint number ++* @param cfg - Configuration number ++* @param intf - Interface number ++* @param alt - Alternate number ++*****************************************************************************/ ++static inline void iproc_usbd_ep_alt_set(struct iproc_usbd_regs *base, uint num, uint alt) ++{ ++ IPROC_USBD_BITS_MODIFY(base->ep_cfg[num], IPROC_USBD_REG_EP_CFG_ALT_NUM_MASK, (alt << IPROC_USBD_REG_EP_CFG_ALT_NUM_SHIFT)); ++} ++ ++static inline void iproc_usbd_ep_cfg_set(struct iproc_usbd_regs *base, uint num, uint cfg) ++{ ++ IPROC_USBD_BITS_MODIFY(base->ep_cfg[num], IPROC_USBD_REG_EP_CFG_CFG_NUM_MASK, (cfg << IPROC_USBD_REG_EP_CFG_CFG_NUM_SHIFT)); ++} ++ ++static inline void iproc_usbd_ep_intf_set(struct iproc_usbd_regs *base, uint num, uint intf) ++{ ++ IPROC_USBD_BITS_MODIFY(base->ep_cfg[num], IPROC_USBD_REG_EP_CFG_INTF_NUM_MASK, (intf << IPROC_USBD_REG_EP_CFG_INTF_NUM_SHIFT)); ++} ++ ++ ++/***************************************************************************** ++* @brief Endpoint DMA routines ++* @param num - Endpoint number ++* @param addr - physical address of buffer or descriptor ++*****************************************************************************/ ++static inline void iproc_usbd_ep_dma_dis(struct iproc_usbd_regs *base, uint num, uint dirn) ++{ ++ if (dirn == IPROC_USBD_EP_DIR_OUT) { ++#if IPROC_USBD_MULTI_RX_FIFO ++ IPROC_USBD_BITS_CLEAR(base->ep_fifo_out[num].ctrl, IPROC_USBD_REG_EP_FIFO_CTRL_OUT_DMA_ENABLE); ++#else ++ /* ++ * With a single RX FIFO, do not want to do anything, as there might be another OUT capable ++ * endpoint still active and wanting DMA enabled. If theory this should be OK, as long as ++ * the DMA descriptor buffer status fields are the last thing updated before being set to ++ * HOST ready, or the first thing updated when being set to HOST busy. Hopefully no ++ * situations arise such that there's contention with the hardware with doing this. ++ */ ++#endif ++ } else { ++ IPROC_USBD_BITS_CLEAR(base->ep_fifo_in[num].ctrl, IPROC_USBD_REG_EP_FIFO_CTRL_IN_DMA_ENABLE); ++ } ++} ++ ++static inline void iproc_usbd_ep_dma_en(struct iproc_usbd_regs *base, uint num, uint dirn) ++{ ++ if (dirn == IPROC_USBD_EP_DIR_OUT) { ++#if IPROC_USBD_MULTI_RX_FIFO ++ IPROC_USBD_BITS_SET(base->ep_fifo_out[num].ctrl, IPROC_USBD_REG_EP_FIFO_CTRL_OUT_DMA_ENABLE); ++#else ++ IPROC_USBD_BITS_SET(base->dev_ctrl, IPROC_USBD_REG_CTRL_DMA_OUT_ENABLE); ++#endif ++ } else { ++ /* Set the Poll bit in the control register */ ++ IPROC_USBD_BITS_SET(base->ep_fifo_in[num].ctrl, IPROC_USBD_REG_EP_FIFO_CTRL_IN_DMA_ENABLE); ++ } ++} ++ ++static inline void iproc_usbd_ep_dma_buf_addr_set(struct iproc_usbd_regs *base, uint num, uint dirn, void *addr) ++{ ++ if (dirn == IPROC_USBD_EP_DIR_OUT) { ++ IPROC_USBD_WRITE(base->ep_fifo_out[num].buf_addr, (uint)addr); ++ } ++} ++ ++static inline void iproc_usbd_ep_dma_desc_addr_set(struct iproc_usbd_regs *base, uint num, uint dirn, void *addr) ++{ ++ if (dirn == IPROC_USBD_EP_DIR_OUT) { ++ IPROC_USBD_WRITE(base->ep_fifo_out[num].desc_addr, (uint)addr); ++ } ++ else { ++ IPROC_USBD_WRITE(base->ep_fifo_in[num].desc_addr, (uint)addr); ++ } ++} ++ ++/***************************************************************************** ++* @brief Endpoint FIFO routines ++* @param num - Endpoint number ++* @note The flush operation is a state. Once enabled, FIFO contents are discared ++* until disabled. Usually enable upon endpoint termination or error, and ++* then disable once operations are to resume normally. ++*****************************************************************************/ ++static inline bool iproc_usbd_ep_fifo_empty(struct iproc_usbd_regs *base, uint num, uint dirn) ++{ ++ if (dirn == IPROC_USBD_EP_DIR_OUT) { ++#if IPROC_USBD_MULTI_RX_FIFO ++ return(base->ep_fifo_out[num].status & IPROC_USBD_REG_EP_FIFO_STATUS_OUT_FIFO_EMPTY ? true : false); ++#else ++ return(base->dev_status & IPROC_USBD_REG_STAT_OUT_FIFO_EMPTY ? true : false); ++#endif ++ } ++ return(base->ep_fifo_in[num].status & IPROC_USBD_REG_EP_FIFO_STATUS_IN_FIFO_EMPTY ? true : false); ++} ++ ++static inline void iproc_usbd_ep_fifo_flush_dis(struct iproc_usbd_regs *base, uint num, uint dirn) ++{ ++ if (dirn == IPROC_USBD_EP_DIR_OUT) { ++#if IPROC_USBD_MULTI_RX_FIFO ++ IPROC_USBD_BITS_CLEAR(base->ep_fifo_out[num].ctrl, IPROC_USBD_REG_EP_FIFO_CTRL_OUT_FLUSH_ENABLE); ++#else ++ IPROC_USBD_BITS_CLEAR(base->dev_ctrl, IPROC_USBD_REG_CTRL_OUT_FIFO_FLUSH_ENABLE); ++#endif ++ } ++ else { ++ IPROC_USBD_BITS_CLEAR(base->ep_fifo_in[num].ctrl, IPROC_USBD_REG_EP_FIFO_CTRL_IN_FLUSH_ENABLE); ++ } ++} ++ ++static inline void iproc_usbd_ep_fifo_flush_en(struct iproc_usbd_regs *base, uint num, uint dirn) ++{ ++ if (dirn == IPROC_USBD_EP_DIR_OUT) { ++#if IPROC_USBD_MULTI_RX_FIFO ++ IPROC_USBD_BITS_SET(base->ep_fifo_out[num].ctrl, IPROC_USBD_REG_EP_FIFO_CTRL_OUT_FLUSH_ENABLE); ++#else ++ IPROC_USBD_BITS_SET(base->dev_ctrl, IPROC_USBD_REG_CTRL_OUT_FIFO_FLUSH_ENABLE); ++#endif ++ } else { ++ IPROC_USBD_BITS_SET(base->ep_fifo_in[num].ctrl, IPROC_USBD_REG_EP_FIFO_CTRL_IN_FLUSH_ENABLE); ++ } ++} ++ ++/***************************************************************************** ++* @brief Endpoint Frame Number routines ++* @param num - Endpoint number ++* @return Frame number of last packet received on the endpoint, and in the following format. ++* bits[13:3] milli-second frame number ++* bits[2:0] micro-frame number ++* @note Really only applicable to OUT endpoints. IN will always return 0. ++*****************************************************************************/ ++static inline uint iproc_usbd_ep_frame_num(struct iproc_usbd_regs *base, uint num, uint dirn) ++{ ++ if (dirn == IPROC_USBD_EP_DIR_OUT) { ++ return((IPROC_USBD_READ(base->ep_fifo_out[num].size1) & IPROC_USBD_REG_EP_FIFO_SIZE1_OUT_FRAME_NUM_MASK) >> IPROC_USBD_REG_EP_FIFO_SIZE1_OUT_FRAME_NUM_SHIFT); ++ } ++ return(0); ++} ++ ++/***************************************************************************** ++* @brief Endpoint IRQ / status routines ++* @param num - Endpoint number ++* @note ++* Cannot set specific status for Endpoint interrupts. Can only do operations ++* in a global sense. Once an interrupt occurs for an endpoint, the endpoint ++* status has to be checked for the particular type of interrupt that occurred. ++* ++* The iproc_usbd_ep_irq_en() and iproc_usbd_ep_irq_dis() are used for ++* operations on a specific endpoint. These routines may or may not be used in ++* the context of interrupt processing. ++* ++* Use the usbDevHw_EndptIrqListXxx() routines for operations using a bit-wise ++* list of endpoints (bit 0 for endpoint 0, etc.). Typical use would be for ++* interrupt processing. ++* ++* Use the IPROC_USBD_EP_STAT_xxx definitions with the status routines. These ++* definitions are bit-wise, and allow operations on multiple conditions ++* by OR'ing the definitions together. ++*****************************************************************************/ ++static inline void iproc_usbd_ep_irq_clear(struct iproc_usbd_regs *base, uint num, uint dirn) ++{ ++ if (dirn == IPROC_USBD_EP_DIR_OUT) { ++ IPROC_USBD_WRITE(base->ep_irq_status, (1 << num) << IPROC_USBD_REG_EP_INTR_OUT_SHIFT); ++ } else { ++ IPROC_USBD_WRITE(base->ep_irq_status, (1 << num) << IPROC_USBD_REG_EP_INTR_IN_SHIFT); ++ } ++} ++ ++static inline void iproc_usbd_ep_irq_dis(struct iproc_usbd_regs *base, uint num, uint dirn) ++{ ++ if (dirn == IPROC_USBD_EP_DIR_OUT) { ++ IPROC_USBD_BITS_SET(base->ep_irq_mask, ((1 << num) << IPROC_USBD_REG_EP_INTR_OUT_SHIFT)); ++ } else { ++ IPROC_USBD_BITS_SET(base->ep_irq_mask, ((1 << num) << IPROC_USBD_REG_EP_INTR_IN_SHIFT)); ++ } ++} ++ ++static inline void iproc_usbd_ep_irq_en(struct iproc_usbd_regs *base, uint num, uint dirn) ++{ ++ if (dirn == IPROC_USBD_EP_DIR_OUT) { ++ IPROC_USBD_BITS_CLEAR(base->ep_irq_mask, ((1 << num) << IPROC_USBD_REG_EP_INTR_OUT_SHIFT)); ++ } else { ++ IPROC_USBD_BITS_CLEAR(base->ep_irq_mask, ((1 << num) << IPROC_USBD_REG_EP_INTR_IN_SHIFT)); ++ } ++} ++ ++static inline uint iproc_usbd_ep_irq_list_active(struct iproc_usbd_regs *base, uint dirn) ++{ ++ if (dirn == IPROC_USBD_EP_DIR_OUT) { ++ return((IPROC_USBD_READ(base->ep_irq_status) & IPROC_USBD_REG_EP_INTR_OUT_MASK) >> IPROC_USBD_REG_EP_INTR_OUT_SHIFT); ++ } ++ return((IPROC_USBD_READ(base->ep_irq_status) & IPROC_USBD_REG_EP_INTR_IN_MASK) >> IPROC_USBD_REG_EP_INTR_IN_SHIFT); ++} ++ ++static inline void iproc_usbd_ep_irq_list_clear(struct iproc_usbd_regs *base, uint dirn, uint mask) ++{ ++ if (dirn == IPROC_USBD_EP_DIR_OUT) { ++ IPROC_USBD_WRITE(base->ep_irq_status, (mask << IPROC_USBD_REG_EP_INTR_OUT_SHIFT)); /*strat from bit 16 */ ++ } else { ++ IPROC_USBD_WRITE(base->ep_irq_status, (mask << IPROC_USBD_REG_EP_INTR_IN_SHIFT)); /* start from bit 0 */ ++ } ++} ++ ++static inline uint iproc_usbd_ep_stat_active(struct iproc_usbd_regs *base, uint num, uint dirn) ++{ ++ if (dirn == IPROC_USBD_EP_DIR_OUT) { ++ return(IPROC_USBD_READ(base->ep_fifo_out[num].status)); /* End Point Status register */ ++ } ++ return(IPROC_USBD_READ(base->ep_fifo_in[num].status)); ++} ++ ++static inline void iproc_usbd_ep_stat_clear(struct iproc_usbd_regs *base, uint num, uint dirn, uint mask) ++{ ++ if (dirn == IPROC_USBD_EP_DIR_OUT) { ++ IPROC_USBD_WRITE(base->ep_fifo_out[num].status, mask); ++ } else { ++ IPROC_USBD_WRITE(base->ep_fifo_in[num].status, mask); ++ } ++} ++ ++/***************************************************************************** ++* @brief Endpoint NAK routines ++* @param num - Endpoint number ++* @note A NAK response can be enabled by the application by the EndptNakEnable(). ++* The EndptNakInProgress() is used to determine if the controller is ++* currently actively sending NAKs. This may have been a result of the ++* EndptNakEnable() or automatically by the controller under certain ++* conditions. The EndptNakClear() must be used to terminate the NAKs. ++*****************************************************************************/ ++static inline void iproc_usbd_ep_nak_clear(struct iproc_usbd_regs *base, uint num, uint dirn) ++{ ++ if (dirn == IPROC_USBD_EP_DIR_OUT) { ++ IPROC_USBD_BITS_SET(base->ep_fifo_out[num].ctrl, IPROC_USBD_REG_EP_FIFO_CTRL_NAK_CLEAR); ++ } else { ++ IPROC_USBD_BITS_SET(base->ep_fifo_in[num].ctrl, IPROC_USBD_REG_EP_FIFO_CTRL_NAK_CLEAR); ++ } ++} ++ ++static inline void iproc_usbd_ep_nak_en(struct iproc_usbd_regs *base, uint num, uint dirn) ++{ ++ if (dirn == IPROC_USBD_EP_DIR_OUT) { ++ IPROC_USBD_BITS_SET(base->ep_fifo_out[num].ctrl, IPROC_USBD_REG_EP_FIFO_CTRL_NAK_SET); ++ } else { ++ IPROC_USBD_BITS_SET(base->ep_fifo_in[num].ctrl, IPROC_USBD_REG_EP_FIFO_CTRL_NAK_SET); ++ } ++} ++ ++static inline void iproc_usbd_ep_nak_dis(struct iproc_usbd_regs *base, uint num, uint dirn) ++{ ++ if (dirn == IPROC_USBD_EP_DIR_OUT) { ++ IPROC_USBD_BITS_CLEAR(base->ep_fifo_out[num].ctrl, IPROC_USBD_REG_EP_FIFO_CTRL_NAK_SET); ++ } else { ++ IPROC_USBD_BITS_CLEAR(base->ep_fifo_in[num].ctrl, IPROC_USBD_REG_EP_FIFO_CTRL_NAK_SET); ++ } ++} ++ ++static inline bool iproc_usbd_ep_nak_progress(struct iproc_usbd_regs *base, uint num, uint dirn) ++{ ++ if (dirn == IPROC_USBD_EP_DIR_OUT) { ++ return (IPROC_USBD_READ(base->ep_fifo_out[num].ctrl) & IPROC_USBD_REG_EP_FIFO_CTRL_NAK_IN_PROGRESS) ? true : false; ++ } ++ return (IPROC_USBD_READ(base->ep_fifo_in[num].ctrl) & IPROC_USBD_REG_EP_FIFO_CTRL_NAK_IN_PROGRESS) ? true : false; ++} ++ ++/***************************************************************************** ++* @brief Endpoint Stall routines ++* Disable / Enable STALL responses (halt feature) on a given endpoint. ++* @param num - Endpoint number ++*****************************************************************************/ ++static inline void iproc_usbd_ep_stall_dis(struct iproc_usbd_regs *base, uint num, uint dirn) ++{ ++ if (dirn == IPROC_USBD_EP_DIR_OUT) { ++ IPROC_USBD_BITS_CLEAR(base->ep_fifo_out[num].ctrl, IPROC_USBD_REG_EP_FIFO_CTRL_STALL_ENABLE); ++ } else { ++ IPROC_USBD_BITS_CLEAR(base->ep_fifo_in[num].ctrl, IPROC_USBD_REG_EP_FIFO_CTRL_STALL_ENABLE); ++ } ++} ++ ++static inline void iproc_usbd_ep_stall_en(struct iproc_usbd_regs *base, uint num, uint dirn) ++{ ++#if IPROC_USBD_MULTI_RX_FIFO ++ if (!(IPROC_USBD_READ(base->ep_fifo_out[num].status) & IPROC_USBD_REG_EP_FIFO_STATUS_OUT_FIFO_EMPTY)) ++#else ++ if (!(IPROC_USBD_READ(base->dev_status) & IPROC_USBD_REG_STAT_OUT_FIFO_EMPTY)) ++#endif ++ return; ++ ++ if (dirn == IPROC_USBD_EP_DIR_OUT) { ++ IPROC_USBD_BITS_SET(base->ep_fifo_out[num].ctrl, IPROC_USBD_REG_EP_FIFO_CTRL_STALL_ENABLE); ++ } else { ++ IPROC_USBD_BITS_SET(base->ep_fifo_in[num].ctrl, IPROC_USBD_REG_EP_FIFO_CTRL_STALL_ENABLE); ++ } ++} ++ ++ ++/***************************************************************************** ++* @brief Initialize device controller operations ++*****************************************************************************/ ++static inline void iproc_usbd_ops_init(struct iproc_usbd_regs *base) ++{ ++ int idx; ++ ++ iproc_usbd_dma_dis(base); ++ iproc_usbd_irq_dis(base, IPROC_USBD_IRQ_ALL); ++ iproc_usbd_irq_clear(base, IPROC_USBD_IRQ_ALL); ++ ++ /* @todo Create and use usbDevHw_EndptIrqListDisable?? */ ++ for (idx = 0; idx < IPROC_USBD_EP_CFG_CNT; idx++) { ++ iproc_usbd_ep_irq_dis(base, idx, IPROC_USBD_EP_DIR_IN); ++ iproc_usbd_ep_irq_clear(base, idx, IPROC_USBD_EP_DIR_IN); ++ iproc_usbd_ep_stat_clear(base, idx, IPROC_USBD_EP_DIR_IN, iproc_usbd_ep_stat_active(base, idx, IPROC_USBD_EP_DIR_IN)); ++ ++ iproc_usbd_ep_irq_dis(base, idx, IPROC_USBD_EP_DIR_OUT); ++ iproc_usbd_ep_irq_clear(base, idx, IPROC_USBD_EP_DIR_OUT); ++ iproc_usbd_ep_stat_clear(base, idx, IPROC_USBD_EP_DIR_OUT, iproc_usbd_ep_stat_active(base, idx, IPROC_USBD_EP_DIR_OUT)); ++ } ++ ++ IPROC_USBD_WRITE(base->dev_cfg, (IPROC_USBD_REG_CFG_SET_DESCRIPTOR_ENABLE | ++ IPROC_USBD_REG_CFG_UTMI_8BIT_ENABLE | ++ IPROC_USBD_REG_CFG_CSR_PROGRAM_ENABLE | ++ IPROC_USBD_REG_CFG_SPD_HS)); ++ ++ IPROC_USBD_WRITE(base->dev_ctrl, (IPROC_USBD_REG_CTRL_LE_ENABLE | ++ IPROC_USBD_REG_CTRL_DISCONNECT_ENABLE | ++ IPROC_USBD_REG_CTRL_DMA_MODE_ENABLE | ++ IPROC_USBD_REG_CTRL_DMA_IN_ENABLE | ++ IPROC_USBD_REG_CTRL_DMA_OUT_ENABLE | ++ IPROC_USBD_REG_CTRL_DMA_DESC_UPDATE_ENABLE | ++ IPROC_USBD_REG_CTRL_OUT_NAK_ALL_ENABLE | ++ IPROC_USBD_REG_CTRL_DMA_OUT_THRESHOLD_LEN_MASK | ++ IPROC_USBD_REG_CTRL_DMA_BURST_LEN_MASK | ++#if !IPROC_USBD_MULTI_RX_FIFO ++ IPROC_USBD_REG_CTRL_OUT_FIFO_FLUSH_ENABLE | ++#endif ++ IPROC_USBD_REG_CTRL_DMA_BURST_ENABLE)); ++ ++ IPROC_USBD_WRITE(base->dev_irq_mask, (IPROC_USBD_REG_INTR_BUS_IDLE | ++ IPROC_USBD_REG_INTR_SOF_RX)); ++ IPROC_USBD_WRITE(base->ep_irq_mask,0); ++} ++ ++/***************************************************************************** ++* @brief Disable / Enable USB device ++*****************************************************************************/ ++static inline void iproc_usbd_dis(struct iproc_usbd_idm_regs *idm_base) ++{ ++ /* reset usb device */ ++ IPROC_USBD_BITS_SET(idm_base->reset_ctrl, IPROC_USB2D_IDM_REG_RESET_CTRL_RESET); ++ ++ /* disable usb device clock */ ++ IPROC_USBD_BITS_CLEAR(idm_base->io_ctrl, IPROC_USB2D_IDM_REG_IO_CTRL_DIRECT_CLK_ENABLE); ++ mdelay(10); ++} ++ ++static inline void iproc_usbd_en(struct iproc_usbd_idm_regs *idm_base) ++{ ++ /* enable usb device clock */ ++ IPROC_USBD_BITS_SET(idm_base->io_ctrl, IPROC_USB2D_IDM_REG_IO_CTRL_DIRECT_CLK_ENABLE); ++ mdelay(10); ++ ++ /* get usb device out of reset */ ++ IPROC_USBD_BITS_CLEAR(idm_base->reset_ctrl, IPROC_USB2D_IDM_REG_RESET_CTRL_RESET); ++ mdelay(100); ++} ++ ++ ++ ++#endif /* _USBD_REGS_H_ */ +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig +--- a/drivers/usb/host/Kconfig 2016-12-16 00:49:34.000000000 +0800 ++++ b/drivers/usb/host/Kconfig 2017-11-09 17:54:01.760430000 +0800 +@@ -298,6 +298,13 @@ config USB_EHCI_HCD_PLATFORM + + If unsure, say N. + ++config USB_EHCI_XGS_IPROC ++ bool "BRCM XGS iProc EHCI patch" ++ depends on (ARCH_XGS_IPROC && USB_EHCI_HCD_PLATFORM) ++ default n ++ ---help--- ++ This option is for BRCM XGS iProc EHCI patch ++ + config USB_OCTEON_EHCI + bool "Octeon on-chip EHCI support (DEPRECATED)" + depends on CAVIUM_OCTEON_SOC +@@ -561,6 +568,13 @@ config USB_OHCI_HCD_PLATFORM + + If unsure, say N. + ++config USB_OHCI_XGS_IPROC ++ bool "BRCM XGS iProc OHCI patch" ++ depends on (ARCH_XGS_IPROC && USB_OHCI_HCD_PLATFORM) ++ default n ++ ---help--- ++ This option is for BRCM XGS iProc OHCI patch ++ + config USB_OCTEON_OHCI + bool "Octeon on-chip OHCI support (DEPRECATED)" + depends on CAVIUM_OCTEON_SOC +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c +--- a/drivers/usb/host/ehci-platform.c 2016-12-16 00:49:34.000000000 +0800 ++++ b/drivers/usb/host/ehci-platform.c 2017-11-09 17:54:01.791421000 +0800 +@@ -41,6 +41,14 @@ + #define EHCI_MAX_CLKS 3 + #define hcd_to_ehci_priv(h) ((struct ehci_platform_priv *)hcd_to_ehci(h)->priv) + ++ ++#ifdef CONFIG_USB_EHCI_XGS_IPROC ++#include ++#include ++ ++#define BCM_USB_FIFO_THRESHOLD 0x00800040 ++#endif /* CONFIG_USB_EHCI_XGS_IPROC */ ++ + struct ehci_platform_priv { + struct clk *clks[EHCI_MAX_CLKS]; + struct reset_control *rst; +@@ -150,10 +158,24 @@ static int ehci_platform_probe(struct pl + struct ehci_platform_priv *priv; + struct ehci_hcd *ehci; + int err, irq, phy_num, clk = 0; ++#ifdef CONFIG_USB_EHCI_XGS_IPROC ++ struct usb_phy *phy; ++#endif /* CONFIG_USB_EHCI_XGS_IPROC */ + + if (usb_disabled()) + return -ENODEV; + ++#ifdef CONFIG_USB_EHCI_XGS_IPROC ++ phy = devm_usb_get_phy_by_phandle(&dev->dev, "usb-phy", 0); ++ if (IS_ERR(phy)) { ++ dev_err(&dev->dev, "unable to find transceiver\n"); ++ return PTR_ERR(phy); ++ } ++ ++ if (phy->flags != IPROC_USB_MODE_HOST) ++ return -ENODEV; ++#endif ++ + /* + * Use reasonable defaults so platforms don't have to provide these + * with DT probing on ARM. +@@ -292,6 +314,9 @@ static int ehci_platform_probe(struct pl + goto err_power; + + device_wakeup_enable(hcd->self.controller); ++#ifdef CONFIG_USB_EHCI_XGS_IPROC ++ ehci_writel(ehci, BCM_USB_FIFO_THRESHOLD, &ehci->regs->reserved4[6]); ++#endif + platform_set_drvdata(dev, hcd); + + return err; +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/usb/host/ohci-platform.c b/drivers/usb/host/ohci-platform.c +--- a/drivers/usb/host/ohci-platform.c 2016-12-16 00:49:34.000000000 +0800 ++++ b/drivers/usb/host/ohci-platform.c 2017-11-09 17:54:01.874447000 +0800 +@@ -35,6 +35,14 @@ + #define OHCI_MAX_CLKS 3 + #define hcd_to_ohci_priv(h) ((struct ohci_platform_priv *)hcd_to_ohci(h)->priv) + ++#ifdef CONFIG_USB_OHCI_XGS_IPROC ++#include ++#include ++ ++#define UHCRHDA_REG_OFFSET 0x48 ++#define UHCRHDA_OCPM (1 << 11) ++#endif ++ + struct ohci_platform_priv { + struct clk *clks[OHCI_MAX_CLKS]; + struct reset_control *rst; +@@ -118,10 +126,24 @@ static int ohci_platform_probe(struct pl + struct ohci_platform_priv *priv; + struct ohci_hcd *ohci; + int err, irq, phy_num, clk = 0; ++#ifdef CONFIG_USB_OHCI_XGS_IPROC ++ struct usb_phy *phy; ++#endif /* CONFIG_USB_OHCI_XGS_IPROC */ + + if (usb_disabled()) + return -ENODEV; + ++#ifdef CONFIG_USB_OHCI_XGS_IPROC ++ phy = devm_usb_get_phy_by_phandle(&dev->dev, "usb-phy", 0); ++ if (IS_ERR(phy)) { ++ dev_err(&dev->dev, "unable to find transceiver\n"); ++ return PTR_ERR(phy); ++ } ++ ++ if (phy->flags != IPROC_USB_MODE_HOST) ++ return -ENODEV; ++#endif /* CONFIG_USB_OHCI_XGS_IPROC */ ++ + /* + * Use reasonable defaults so platforms don't have to provide these + * with DT probing on ARM. +@@ -251,6 +273,12 @@ static int ohci_platform_probe(struct pl + hcd->rsrc_start = res_mem->start; + hcd->rsrc_len = resource_size(res_mem); + ++#ifdef CONFIG_USB_OHCI_XGS_IPROC ++#if defined(CONFIG_MACH_GH) || defined(CONFIG_MACH_HR3) || defined(CONFIG_MACH_GH2) ++ writel(readl(hcd->regs + UHCRHDA_REG_OFFSET) | UHCRHDA_OCPM, hcd->regs + UHCRHDA_REG_OFFSET); ++#endif ++#endif ++ + err = usb_add_hcd(hcd, irq, IRQF_SHARED); + if (err) + goto err_power; +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/usb/phy/Kconfig b/drivers/usb/phy/Kconfig +--- a/drivers/usb/phy/Kconfig 2016-12-16 00:49:34.000000000 +0800 ++++ b/drivers/usb/phy/Kconfig 2017-11-09 17:54:02.191425000 +0800 +@@ -213,4 +213,11 @@ config USB_ULPI_VIEWPORT + Provides read/write operations to the ULPI phy register set for + controllers with a viewport register (e.g. Chipidea/ARC controllers). + ++config USBPHY_XGS_IPROC ++ tristate "BRCM iProc USB PHY" ++ depends on ARCH_XGS_IPROC ++ select USB_PHY ++ help ++ BRCM iProc USB PHY driver ++ + endmenu +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/usb/phy/Makefile b/drivers/usb/phy/Makefile +--- a/drivers/usb/phy/Makefile 2016-12-16 00:49:34.000000000 +0800 ++++ b/drivers/usb/phy/Makefile 2017-11-09 17:54:02.191443000 +0800 +@@ -27,3 +27,4 @@ obj-$(CONFIG_USB_RCAR_PHY) += phy-rcar- + obj-$(CONFIG_USB_ULPI) += phy-ulpi.o + obj-$(CONFIG_USB_ULPI_VIEWPORT) += phy-ulpi-viewport.o + obj-$(CONFIG_KEYSTONE_USB_PHY) += phy-keystone.o ++obj-$(CONFIG_USBPHY_XGS_IPROC) += phy-xgs-iproc.o +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/usb/phy/phy-xgs-iproc.c b/drivers/usb/phy/phy-xgs-iproc.c +--- a/drivers/usb/phy/phy-xgs-iproc.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/usb/phy/phy-xgs-iproc.c 2017-11-09 17:54:02.236439000 +0800 +@@ -0,0 +1,745 @@ ++/* ++ * $Copyright Open Broadcom Corporation$ ++ * ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++ ++#define USB2D_IDM_IDM_IO_CONTROL_DIRECT_ADDR(base) (base + 0x408) ++#define USB2D_IDM_IDM_RESET_CONTROL_ADDR(base) (base + 0x800) ++#define IPROC_WRAP_MISC_STATUS__USBPHY_PLL_LOCK 1 ++#define USB2D_IDM_IDM_RESET_CONTROL__RESET 0 ++#define USB2D_IDM_IDM_IO_CONTROL_DIRECT__clk_enable 0 ++ ++#if defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2) ++ ++#define IPROC_CCB_MDIO_MII_CTRL_ADDR(base) (base + 0x0) ++#define IPROC_CCB_MDIO_MII_DATA_ADDR(base) (base + 0x4) ++#define IPROC_CCB_MDIO_COMPATIBLE "brcm,iproc-ccb-mdio" ++ ++#if defined(CONFIG_MACH_HX4) ++#define IPROC_WRAP_IPROC_XGPLL_CTRL_0_ADDR(base) (base + 0x1c) ++#define IPROC_WRAP_IPROC_XGPLL_CTRL_4_ADDR(base) (base + 0x2c) ++#define IPROC_WRAP_USBPHY_CTRL_ADDR(base) (base + 0x34) ++#define IPROC_WRAP_MISC_STATUS_ADDR(base) (base + 0x38) ++#define IPROC_CLK_NDIV_40 0x80 ++#define IPROC_CLK_NDIV_20 0x8C ++#define USB_CLK_NDIV_MASK 0xFE7FFE00 ++#define USB_CLK_PLL_RESET_MASK 0xFF7FFE00 ++#define USB_CLK_PHY_RESET_MASK 0xFFFFFE00 ++#define USB_CLK_NDIV_40 0x30 ++#define USB_CLK_NDIV_20 0x60 ++#define IPROC_WRAP_IPROC_XGPLL_CTRL_4__NDIV_INT_R 0 ++#define IPROC_WRAP_IPROC_XGPLL_CTRL_4__NDIV_INT_WIDTH 8 ++#define IPROC_WRAP_IPROC_XGPLL_CTRL_0__CH3_MDIV_R 8 ++#define IPROC_WRAP_IPROC_XGPLL_CTRL_0__CH3_MDIV_WIDTH 8 ++#else ++#define IPROC_DDR_PLL_CTRL_REGISTER_3_ADDR(base) (base + 0x0c) ++#define IPROC_DDR_PLL_CTRL_REGISTER_5_ADDR(base) (base + 0x14) ++#define IPROC_WRAP_USBPHY_CTRL_ADDR(base) (base + 0x20) ++#define IPROC_WRAP_MISC_STATUS_ADDR(base) (base + 0x28) ++#define IPROC_DDR_PLL_CTRL_REGISTER_3__NDIV_INT_R 0 ++#define IPROC_DDR_PLL_CTRL_REGISTER_3__NDIV_INT_WIDTH 10 ++#define IPROC_DDR_PLL_CTRL_REGISTER_5__CH1_MDIV_R 0 ++#define IPROC_DDR_PLL_CTRL_REGISTER_5__CH1_MDIV_WIDTH 8 ++#endif /* defined(CONFIG_MACH_HX4) */ ++ ++#else /* defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2) */ ++ ++#define IPROC_WRAP_USBPHY_CTRL_0__PHY_IDDQ 26 ++#define IPROC_WRAP_USBPHY_CTRL_0__PLL_RESETB 25 ++#define IPROC_WRAP_USBPHY_CTRL_0__RESETB 24 ++#define IPROC_WRAP_USBPHY_CTRL_2__PHY_ISO 17 ++#define IPROC_WRAP_USBPHY_CTRL_2__P1CTL_B0 0 ++#define IPROC_WRAP_USBPHY_CTRL_2__P1CTL_B11 11 ++ ++#if defined(CONFIG_MACH_SB2) ++#define IPROC_WRAP_USBPHY_CTRL_0_ADDR(base) (base + 0x28) ++#define IPROC_WRAP_USBPHY_CTRL_2_ADDR(base) (base + 0x30) ++#define IPROC_WRAP_MISC_STATUS_ADDR(base) (base + 0x44) ++#define IPROC_WRAP_TOP_STRAP_CTRL_ADDR(base) (base + 0x70) ++#define IPROC_WRAP_TOP_STRAP_CTRL__USB_DEVICE 10 ++ ++#elif defined(CONFIG_MACH_GH) || defined(CONFIG_MACH_HR3) || \ ++ defined(CONFIG_MACH_GH2) ++#define IPROC_WRAP_USBPHY_CTRL_0_ADDR(base) (base + 0x44) ++#define IPROC_WRAP_USBPHY_CTRL_2_ADDR(base) (base + 0x4c) ++#define IPROC_WRAP_MISC_STATUS_ADDR(base) (base + 0x58) ++#define IPROC_WRAP_TOP_STRAP_STATUS_ADDR(base) (base + 0xa4) ++#define IPROC_WRAP_TOP_STRAP_STATUS__USB2_SEL 17 ++#if defined(CONFIG_MACH_GH2) ++#define USBH_Utmi_p0Ctl(base) (base + 0x10) ++static void __iomem *USBH_Utmi_base = NULL; ++#endif /* defined(CONFIG_MACH_GH2) */ ++#endif ++ ++#endif /* defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2) */ ++ ++struct iproc_usb_priv { ++ struct usb_phy phy; ++ struct device *dev; ++ struct device_node *dn; ++ void __iomem *wrap_base; ++ void __iomem *idm_base; ++ uint usb_mode; ++}; ++ ++extern void __iomem *get_iproc_wrap_ctrl_base(void); ++ ++/*************************************************************************** ++**************************************************************************** ++***************************************************************************/ ++static const struct of_device_id xgs_iproc_usb_phy_dt_ids[] = { ++ { .compatible = "brcm,usb-phy,hx4", }, ++ { .compatible = "brcm,usb-phy,kt2", }, ++ { .compatible = "brcm,usb-phy,gh", }, ++ { .compatible = "brcm,usb-phy,sb2", }, ++ { .compatible = "brcm,usb-phy,hr3", }, ++ { } ++}; ++MODULE_DEVICE_TABLE(of, xgs_iproc_usb_phy_dt_ids); ++ ++ ++static int xgs_iproc_usb_phy_mode(struct iproc_usb_priv *iproc_usb_data) ++{ ++ void __iomem *wrap_base = iproc_usb_data->wrap_base; ++ struct device *dev = iproc_usb_data->dev; ++ int usb_mode = IPROC_USB_MODE_HOST; ++ ulong val; ++#if (defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2)) ++ int gpio_pin, ret; ++#endif /* (defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2)) */ ++ ++ if (!wrap_base) { ++ return -EINVAL; ++ } ++ ++#if (defined(CONFIG_MACH_HX4) || defined(CONFIG_MACH_KT2)) ++ /* gpio pin 4 to control host/device mode */ ++ gpio_pin = of_get_named_gpio(dev->of_node, "usbdev-gpio", 0); ++ ++ if (gpio_pin < 0) { ++ dev_warn(dev, "No gpio pin set for USB device detection(default to 4)\n"); ++ gpio_pin = 4; ++ } ++ ++ ret = gpio_request(gpio_pin, "usbdev-gpio"); ++ if (ret != 0) { ++ dev_err(dev, "request gpio #%d error.\n", gpio_pin); ++ return ret; ++ } ++ ++ val = __gpio_get_value(gpio_pin); ++ ++ gpio_free(gpio_pin); ++ ++ if (val & 1) { ++ usb_mode = IPROC_USB_MODE_DEVICE; ++ } ++#elif defined(CONFIG_MACH_SB2) ++ /* u-boot enable this bit to indicate usb in host mode */ ++ val = readl_relaxed(IPROC_WRAP_TOP_STRAP_CTRL_ADDR(wrap_base)); ++ if (!(val & (1 << IPROC_WRAP_TOP_STRAP_CTRL__USB_DEVICE))) { ++ usb_mode = IPROC_USB_MODE_DEVICE; ++ } ++#else ++ /* u-boot enable this bit to indicate usb in host mode */ ++ val = readl_relaxed(IPROC_WRAP_TOP_STRAP_STATUS_ADDR(wrap_base)); ++ if (!(val & (1 << IPROC_WRAP_TOP_STRAP_STATUS__USB2_SEL))) { ++ usb_mode = IPROC_USB_MODE_DEVICE; ++ } ++#endif ++ ++ dev_info(dev, "usb mode: %s\n", (usb_mode == IPROC_USB_MODE_DEVICE) ? "DEVICE" : "HOST"); ++ ++ return usb_mode; ++} ++ ++#if (defined (CONFIG_MACH_HX4) || defined (CONFIG_MACH_KT2)) ++/* Returns USB PHY PLL ref clock in MHz */ ++static uint _get_usb_clk(void __iomem *wrap_base) ++{ ++ uint ndiv, mdiv, refclk; ++ ulong val; ++ ++#if defined(CONFIG_MACH_HX4) ++ val = readl_relaxed(IPROC_WRAP_IPROC_XGPLL_CTRL_4_ADDR(wrap_base)); ++ ndiv = ((val >> IPROC_WRAP_IPROC_XGPLL_CTRL_4__NDIV_INT_R) & ++ ~(0xFFFFFFFF << IPROC_WRAP_IPROC_XGPLL_CTRL_4__NDIV_INT_WIDTH)); ++ ++ val = readl_relaxed(IPROC_WRAP_IPROC_XGPLL_CTRL_0_ADDR(wrap_base)); ++ mdiv = ((val >> IPROC_WRAP_IPROC_XGPLL_CTRL_0__CH3_MDIV_R) & ++ ~(0xFFFFFFFF << IPROC_WRAP_IPROC_XGPLL_CTRL_0__CH3_MDIV_WIDTH)); ++#else ++ val = readl_relaxed(IPROC_DDR_PLL_CTRL_REGISTER_3_ADDR(wrap_base)); ++ ndiv = ((val >> IPROC_DDR_PLL_CTRL_REGISTER_3__NDIV_INT_R) & ++ ~(0xFFFFFFFF << IPROC_DDR_PLL_CTRL_REGISTER_3__NDIV_INT_WIDTH)); ++ ++ /* read channel 1 mdiv */ ++ val = readl_relaxed(IPROC_DDR_PLL_CTRL_REGISTER_5_ADDR(wrap_base)); ++ mdiv = ((val >> IPROC_DDR_PLL_CTRL_REGISTER_5__CH1_MDIV_R) & ++ ~(0xFFFFFFFF << IPROC_DDR_PLL_CTRL_REGISTER_5__CH1_MDIV_WIDTH)); ++#endif ++ ++ refclk = (25 * ndiv) / mdiv; ++ ++ return refclk; ++} ++#endif /* (defined (CONFIG_MACH_HX4) || defined (CONFIG_MACH_KT2)) */ ++ ++static int iproc_usb_phy_hx4_config(struct iproc_usb_priv *iproc_usb_data) ++{ ++#if (defined (CONFIG_MACH_HX4) || defined (CONFIG_MACH_KT2)) ++ void __iomem *wrap_base = iproc_usb_data->wrap_base; ++ void __iomem *ccb_mdio_base = NULL; ++ struct device_node *np; ++ ulong ndiv, precmd, miicmd, miidata; ++ ulong val, mask; ++ uint count = 0; ++ ++ if (!wrap_base) ++ return -EINVAL; ++ ++ if (iproc_usb_data->usb_mode == IPROC_USB_MODE_DEVICE) { ++ np = of_find_compatible_node(NULL, NULL, IPROC_CCB_MDIO_COMPATIBLE); ++ if (!np) { ++ printk(KERN_ERR "Failed to find CCB MDIO defined in DT\n"); ++ return -ENODEV; ++ } ++ ++ ccb_mdio_base = of_iomap(np, 0); ++ if (!ccb_mdio_base) { ++ printk(KERN_ERR "Unable to iomap CCB MDIO base address\n"); ++ return -ENXIO; ++ } ++ ++ ndiv = 1920 / _get_usb_clk(wrap_base); ++ ++ /* Construct precmd with Start Bit, PHY address and turnaround time */ ++ /* SB | PA | TA */ ++ precmd = 1 << 30 | 6 << 23 | 2 << 16; ++ ++ /* Connect MDIO interface to onchip PHY */ ++ writel_relaxed(0x9A, IPROC_CCB_MDIO_MII_CTRL_ADDR(ccb_mdio_base)); ++ mdelay(10); ++ ++ /* Program NDIV and PDIV into 0x1C register */ ++ miicmd = precmd | (0x1 << 28) | (0x1C << 18); ++ miidata = 1 << 12 | ndiv; ++ /* 0x53721040 */ ++ writel_relaxed(miicmd | miidata, IPROC_CCB_MDIO_MII_DATA_ADDR(ccb_mdio_base)); ++ mdelay(10); ++ ++ /* Program other PLL parameters into 0x1D register, disable suspend and put PHY into reset */ ++ miicmd = precmd | (0x1 << 28) | (0x1D << 18); ++ miidata = 1 << 13 | 3 << 8 | 3 << 4 | 0xa; ++ /* 0x5376233a */ ++ writel_relaxed(miicmd | miidata, IPROC_CCB_MDIO_MII_DATA_ADDR(ccb_mdio_base)); ++ mdelay(10); ++ ++ /* Program register 0x15, USB device mode set and get PHY out of reset */ ++ miicmd = precmd | (0x1 << 28) | (0x15 << 18); ++ miidata = 1 << 2 | 1 << 1; ++ /* 0x53560006 */ ++ writel_relaxed(miicmd | miidata, IPROC_CCB_MDIO_MII_DATA_ADDR(ccb_mdio_base)); ++ mdelay(10); ++ ++ /* Program register 0x19, set mdio mode */ ++ miicmd = precmd | (0x1 << 28) | (0x19 << 18); ++ miidata = 1 << 7; ++ /* 0x53660080 */ ++ writel_relaxed(miicmd | miidata, IPROC_CCB_MDIO_MII_DATA_ADDR(ccb_mdio_base)); ++ mdelay(10); ++ ++ /* get the PLL out of reset */ ++ miicmd = precmd | (0x2 << 28) | (0x1D << 18); ++ miidata = 0; ++ writel_relaxed(miicmd | miidata, IPROC_CCB_MDIO_MII_DATA_ADDR(ccb_mdio_base)); ++ mdelay(10); ++ miidata = readl_relaxed(IPROC_CCB_MDIO_MII_DATA_ADDR(ccb_mdio_base)); ++ miicmd = precmd | (0x1 << 28) | (0x1D << 18); ++ miidata |= (1 << 12); ++ /* 0x5376333a */ ++ writel_relaxed(miicmd | miidata, IPROC_CCB_MDIO_MII_DATA_ADDR(ccb_mdio_base)); ++ mdelay(10); ++ ++ if (ccb_mdio_base) { ++ iounmap(ccb_mdio_base); ++ ccb_mdio_base = NULL; ++ } ++ } else { ++ val = readl_relaxed(IPROC_WRAP_USBPHY_CTRL_ADDR(wrap_base)); ++ val |= 0x01000000; /* 24:PLL_RESETB = 1 */ ++ writel_relaxed(val, IPROC_WRAP_USBPHY_CTRL_ADDR(wrap_base)); ++ ++ mdelay(20); ++ ++ /* check pll_lock */ ++ mask = (1 << IPROC_WRAP_MISC_STATUS__USBPHY_PLL_LOCK); ++ do { ++ val = readl_relaxed(IPROC_WRAP_MISC_STATUS_ADDR(wrap_base)); ++ if ((val & mask) == mask) { ++ break; ++ } else { ++ udelay(10); ++ count ++; ++ } ++ } while(count <= 10); ++ if (count > 10) { ++ printk(KERN_WARNING "%s : PLL not lock! IPROC_WRAP_MISC_STATUS = 0x%08lx\n", ++ __FUNCTION__, val); ++ } ++ ++ val = readl_relaxed(IPROC_WRAP_USBPHY_CTRL_ADDR(wrap_base)); ++ val &= ~0x00800000; /* 23:RESETB = 0 */ ++ writel_relaxed(val, IPROC_WRAP_USBPHY_CTRL_ADDR(wrap_base)); ++ mdelay(100); ++ ++#if defined(CONFIG_MACH_HX4) ++ val = readl_relaxed(IPROC_WRAP_IPROC_XGPLL_CTRL_4_ADDR(wrap_base)); ++ ndiv = ((val >> IPROC_WRAP_IPROC_XGPLL_CTRL_4__NDIV_INT_R) & ++ ~(0xFFFFFFFF << IPROC_WRAP_IPROC_XGPLL_CTRL_4__NDIV_INT_WIDTH)); ++ if (ndiv == IPROC_CLK_NDIV_40) { ++ val = readl_relaxed(IPROC_WRAP_USBPHY_CTRL_ADDR(wrap_base)); ++ val = (val & USB_CLK_NDIV_MASK) | USB_CLK_NDIV_40; ++ writel_relaxed(val, IPROC_WRAP_USBPHY_CTRL_ADDR(wrap_base)); ++ udelay(10); ++ val = (val & USB_CLK_PLL_RESET_MASK) | USB_CLK_NDIV_40; ++ writel_relaxed(val, IPROC_WRAP_USBPHY_CTRL_ADDR(wrap_base)); ++ udelay(10); ++ val = (val & USB_CLK_PHY_RESET_MASK) | USB_CLK_NDIV_40; ++ writel_relaxed(val, IPROC_WRAP_USBPHY_CTRL_ADDR(wrap_base)); ++ udelay(10); ++ } else if (ndiv == IPROC_CLK_NDIV_20) { ++ val = readl_relaxed(IPROC_WRAP_USBPHY_CTRL_ADDR(wrap_base)); ++ val = (val & USB_CLK_NDIV_MASK) | USB_CLK_NDIV_20; ++ writel_relaxed(val, IPROC_WRAP_USBPHY_CTRL_ADDR(wrap_base)); ++ udelay(10); ++ val = (val & USB_CLK_PLL_RESET_MASK) | USB_CLK_NDIV_20; ++ writel_relaxed(val, IPROC_WRAP_USBPHY_CTRL_ADDR(wrap_base)); ++ udelay(10); ++ val = (val & USB_CLK_PHY_RESET_MASK) | USB_CLK_NDIV_20; ++ writel_relaxed(val, IPROC_WRAP_USBPHY_CTRL_ADDR(wrap_base)); ++ udelay(10); ++ } ++#endif /* CONFIG_MACH_HX4 */ ++ ++ val = readl_relaxed(IPROC_WRAP_USBPHY_CTRL_ADDR(wrap_base)); ++ val |= 0x00800000; /* 23:RESETB = 1 */ ++ writel_relaxed(val, IPROC_WRAP_USBPHY_CTRL_ADDR(wrap_base)); ++ udelay(100); ++ } ++#endif /* (defined (CONFIG_MACH_HX4) || defined (CONFIG_MACH_KT2)) */ ++ ++ return 0; ++} ++ ++static int iproc_usb_phy_sb2_config(struct iproc_usb_priv *iproc_usb_data) ++{ ++#if defined(CONFIG_MACH_SB2) ++ void __iomem *wrap_base = iproc_usb_data->wrap_base; ++ ulong val, mask, count = 0; ++ ++ if (!wrap_base) { ++ return -EINVAL; ++ } ++ ++ val = readl_relaxed(IPROC_WRAP_USBPHY_CTRL_0_ADDR(wrap_base)); ++ val |= 0x0c000000; /* 27:PHY_ISO & 26:PLL_SUSPEND_EN = 1 */ ++ writel_relaxed(val, IPROC_WRAP_USBPHY_CTRL_0_ADDR(wrap_base)); ++ val &= ~0x03000000; /* 25:PLL_RESETB & 24:RESETB = 0 */ ++ writel_relaxed(val, IPROC_WRAP_USBPHY_CTRL_0_ADDR(wrap_base)); ++ ++ val = readl_relaxed(IPROC_WRAP_USBPHY_CTRL_2_ADDR(wrap_base)); ++ val &= ~0x03000000; /* 25:AFE_BG_PWRDWNB & 24:AFE_LDO_PWRDWNB = 0 */ ++ writel_relaxed(val, IPROC_WRAP_USBPHY_CTRL_2_ADDR(wrap_base)); ++ udelay(10); ++ val |= 0x02000000; /* 25:AFE_BG_PWRDWNB = 1 */ ++ writel_relaxed(val, IPROC_WRAP_USBPHY_CTRL_2_ADDR(wrap_base)); ++ udelay(150); ++ val |= 0x01000000; /* 24:AFE_LDO_PWRDWNB = 1 */ ++ writel_relaxed(val, IPROC_WRAP_USBPHY_CTRL_2_ADDR(wrap_base)); ++ udelay(160); ++ ++ val = readl_relaxed(IPROC_WRAP_USBPHY_CTRL_0_ADDR(wrap_base)); ++ val &= ~0x08000000; /* 27:PHY_ISO = 0 */ ++ writel_relaxed(val, IPROC_WRAP_USBPHY_CTRL_0_ADDR(wrap_base)); ++ udelay(20); ++ val |= 0x02000000; /* 25:PLL_RESETB = 1 */ ++ writel_relaxed(val, IPROC_WRAP_USBPHY_CTRL_0_ADDR(wrap_base)); ++ ++ mdelay(20); ++ ++ /* check pll_lock */ ++ mask = (1 << IPROC_WRAP_MISC_STATUS__USBPHY_PLL_LOCK); ++ do { ++ val = readl_relaxed(IPROC_WRAP_MISC_STATUS_ADDR(wrap_base)); ++ if ((val & mask) == mask) { ++ break; ++ } else { ++ udelay(10); ++ count ++; ++ } ++ } while(count <= 10); ++ ++ if (count > 10) ++ printk(KERN_WARNING "%s : PLL not lock! IPROC_WRAP_MISC_STATUS = 0x%08lx\n", ++ __FUNCTION__, val); ++ ++ val = readl_relaxed(IPROC_WRAP_USBPHY_CTRL_0_ADDR(wrap_base)); ++ val |= 0x01000000; /* 24:RESETB = 1 */ ++ writel_relaxed(val, IPROC_WRAP_USBPHY_CTRL_0_ADDR(wrap_base)); ++ udelay(2); ++#endif /* defined(CONFIG_MACH_SB2) */ ++ ++ return 0; ++} ++ ++static int iproc_usb_phy_gh_config(struct iproc_usb_priv *iproc_usb_data) ++{ ++#if defined(CONFIG_MACH_GH) || defined(CONFIG_MACH_HR3) || \ ++ defined (CONFIG_MACH_GH2) ++ void __iomem *wrap_base = iproc_usb_data->wrap_base; ++ ulong val, mask, count = 0; ++ ++ if (!wrap_base) ++ return -EINVAL; ++ ++#if !defined(CONFIG_MACH_GH2) ++ val = readl_relaxed(IPROC_WRAP_USBPHY_CTRL_2_ADDR(wrap_base)); ++ val |= (1 << IPROC_WRAP_USBPHY_CTRL_2__PHY_ISO); ++ writel_relaxed(val, IPROC_WRAP_USBPHY_CTRL_2_ADDR(wrap_base)); ++ ++ val = readl_relaxed(IPROC_WRAP_USBPHY_CTRL_0_ADDR(wrap_base)); ++ val |= (1 << IPROC_WRAP_USBPHY_CTRL_0__PHY_IDDQ); ++ writel_relaxed(val, IPROC_WRAP_USBPHY_CTRL_0_ADDR(wrap_base)); ++ ++ val = readl_relaxed(IPROC_WRAP_USBPHY_CTRL_2_ADDR(wrap_base)); ++ val |= (1 << IPROC_WRAP_USBPHY_CTRL_2__P1CTL_B0); ++ writel_relaxed(val, IPROC_WRAP_USBPHY_CTRL_2_ADDR(wrap_base)); ++ ++ /* set phy_resetb to 0, pll_resetb to 0 */ ++ val = readl_relaxed(IPROC_WRAP_USBPHY_CTRL_0_ADDR(wrap_base)); ++ val &= ~(1 << IPROC_WRAP_USBPHY_CTRL_0__RESETB); ++ writel_relaxed(val, IPROC_WRAP_USBPHY_CTRL_0_ADDR(wrap_base)); ++ ++ val = readl_relaxed(IPROC_WRAP_USBPHY_CTRL_0_ADDR(wrap_base)); ++ val &= ~(1 << IPROC_WRAP_USBPHY_CTRL_0__PLL_RESETB); ++ writel_relaxed(val, IPROC_WRAP_USBPHY_CTRL_0_ADDR(wrap_base)); ++ ++ /* set p1ctl[11] to 0 */ ++ val = readl_relaxed(IPROC_WRAP_USBPHY_CTRL_2_ADDR(wrap_base)); ++ val &= ~(1 << IPROC_WRAP_USBPHY_CTRL_2__P1CTL_B11); ++ writel_relaxed(val, IPROC_WRAP_USBPHY_CTRL_2_ADDR(wrap_base)); ++ ++ /* set phy_iso to 0 */ ++ val = readl_relaxed(IPROC_WRAP_USBPHY_CTRL_2_ADDR(wrap_base)); ++ val &= ~(1 << IPROC_WRAP_USBPHY_CTRL_2__PHY_ISO); ++ writel_relaxed(val, IPROC_WRAP_USBPHY_CTRL_2_ADDR(wrap_base)); ++ ++ /* set phy_iddq to 0 */ ++ val = readl_relaxed(IPROC_WRAP_USBPHY_CTRL_0_ADDR(wrap_base)); ++ val &= ~(1 << IPROC_WRAP_USBPHY_CTRL_0__PHY_IDDQ); ++ writel_relaxed(val, IPROC_WRAP_USBPHY_CTRL_0_ADDR(wrap_base)); ++ ++ mdelay(1); ++ ++ /* set pll_resetb to 1, phy_resetb to 1 */ ++ val = readl_relaxed(IPROC_WRAP_USBPHY_CTRL_0_ADDR(wrap_base)); ++ val |= (1 << IPROC_WRAP_USBPHY_CTRL_0__PLL_RESETB); ++ writel_relaxed(val, IPROC_WRAP_USBPHY_CTRL_0_ADDR(wrap_base)); ++ ++ val = readl_relaxed(IPROC_WRAP_USBPHY_CTRL_0_ADDR(wrap_base)); ++ val |= (1 << IPROC_WRAP_USBPHY_CTRL_0__RESETB); ++ writel_relaxed(val, IPROC_WRAP_USBPHY_CTRL_0_ADDR(wrap_base)); ++#else /* !defined(CONFIG_MACH_GH2) */ ++ /* This value is from the designer to set Internal Power Sequence Mode */ ++ val = readl_relaxed(IPROC_WRAP_TOP_STRAP_STATUS_ADDR(wrap_base)); ++ if (!(val & (1 << IPROC_WRAP_TOP_STRAP_STATUS__USB2_SEL))) { ++ /* device mode */ ++ writel_relaxed(0x0806, IPROC_WRAP_USBPHY_CTRL_2_ADDR(wrap_base)); ++ } else { ++ /* host mode */ ++ writel_relaxed(0x0802, USBH_Utmi_p0Ctl(USBH_Utmi_base)); ++ } ++#endif /* !defined(CONFIG_MACH_GH2) */ ++ mdelay(20); ++ ++ /* check pll_lock */ ++ mask = (1 << IPROC_WRAP_MISC_STATUS__USBPHY_PLL_LOCK); ++ do { ++ val = readl_relaxed(IPROC_WRAP_MISC_STATUS_ADDR(wrap_base)); ++ if ((val & mask) == mask) { ++ break; ++ } else { ++ udelay(10); ++ count ++; ++ } ++ } while(count <= 10); ++ ++ if (count > 10) { ++ printk(KERN_WARNING "%s : PLL not lock! IPROC_WRAP_MISC_STATUS = 0x%08lx\n", ++ __FUNCTION__, val); ++ } ++ ++#if !defined(CONFIG_MACH_GH2) ++ /* set non_drving to 0 */ ++ val = readl_relaxed(IPROC_WRAP_USBPHY_CTRL_2_ADDR(wrap_base)); ++ val &= ~(1 << IPROC_WRAP_USBPHY_CTRL_2__P1CTL_B0); ++ writel_relaxed(val, IPROC_WRAP_USBPHY_CTRL_2_ADDR(wrap_base)); ++ ++ /* set p1ctl[11] to 1 */ ++ val = readl_relaxed(IPROC_WRAP_USBPHY_CTRL_2_ADDR(wrap_base)); ++ val |= (1 << IPROC_WRAP_USBPHY_CTRL_2__P1CTL_B11); ++ writel_relaxed(val, IPROC_WRAP_USBPHY_CTRL_2_ADDR(wrap_base)); ++#endif /* !defined(CONFIG_MACH_GH2) */ ++#endif /* defined(CONFIG_MACH_GH) || defined(CONFIG_MACH_HR3) || defined (CONFI ++G_MACH_GH2) */ ++ ++ return 0; ++} ++ ++static int iproc_usb_phy_init(struct usb_phy *phy) ++{ ++ struct iproc_usb_priv *iproc_usb_data = container_of(phy, struct iproc_usb_priv, phy); ++ struct device *dev = iproc_usb_data->dev; ++ const struct of_device_id *match; ++ int ret = 0; ++ uint val; ++ ++ if (!iproc_usb_data->wrap_base || !iproc_usb_data->idm_base) { ++ return -EINVAL; ++ } ++ ++ match = of_match_device(xgs_iproc_usb_phy_dt_ids, dev); ++ if (!match) { ++ dev_err(dev, "failed to find USB PHY in DT\n"); ++ return -ENODEV; ++ } ++ ++ /* Put USBD controller into reset state and disable clock via IDM registers */ ++ val = readl_relaxed(USB2D_IDM_IDM_RESET_CONTROL_ADDR(iproc_usb_data->idm_base)); ++ val |= (1 << USB2D_IDM_IDM_RESET_CONTROL__RESET); ++ writel_relaxed(val, USB2D_IDM_IDM_RESET_CONTROL_ADDR(iproc_usb_data->idm_base)); ++ ++ val = readl_relaxed(USB2D_IDM_IDM_IO_CONTROL_DIRECT_ADDR(iproc_usb_data->idm_base)); ++ val &= ~(1 << USB2D_IDM_IDM_IO_CONTROL_DIRECT__clk_enable); ++ writel_relaxed(val, USB2D_IDM_IDM_IO_CONTROL_DIRECT_ADDR(iproc_usb_data->idm_base)); ++ ++ if (strstr(match->compatible, "hx4") || ++ strstr(match->compatible, "kt2")) ++ ret = iproc_usb_phy_hx4_config(iproc_usb_data); ++ else if (strstr(match->compatible, "sb2")) ++ ret = iproc_usb_phy_sb2_config(iproc_usb_data); ++#if !defined(CONFIG_MACH_GH2) ++ else ++ ret = iproc_usb_phy_gh_config(iproc_usb_data); ++#endif ++ ++ /* Enable clock to USBD and get the USBD out of reset */ ++ val = readl_relaxed(USB2D_IDM_IDM_IO_CONTROL_DIRECT_ADDR(iproc_usb_data->idm_base)); ++ val |= (1 << USB2D_IDM_IDM_IO_CONTROL_DIRECT__clk_enable); ++ writel_relaxed(val, USB2D_IDM_IDM_IO_CONTROL_DIRECT_ADDR(iproc_usb_data->idm_base)); ++ ++ mdelay(10); ++ val = readl_relaxed(USB2D_IDM_IDM_RESET_CONTROL_ADDR(iproc_usb_data->idm_base)); ++ val &= ~(1 << USB2D_IDM_IDM_RESET_CONTROL__RESET); ++ writel_relaxed(val, USB2D_IDM_IDM_RESET_CONTROL_ADDR(iproc_usb_data->idm_base)); ++ ++#if defined(CONFIG_MACH_GH2) ++ /* In GH2, it must init PHY after RESET */ ++ mdelay(100); ++ ret = iproc_usb_phy_gh_config(iproc_usb_data); ++#endif ++ ++ return ret; ++} ++ ++static int xgs_iproc_usb_phy_probe(struct platform_device *pdev) ++{ ++ struct device *dev = &pdev->dev; ++ struct device_node *dn = pdev->dev.of_node; ++ struct iproc_usb_priv *iproc_usb_data; ++ int gpio_pin; ++ enum of_gpio_flags flags; ++ u32 gpio_active_low; ++ u32 __maybe_unused val; ++ int ret, usb_mode; ++ ++ if (!of_device_is_available(dn)) ++ return -ENODEV; ++ ++ iproc_usb_data = devm_kzalloc(dev, sizeof(*iproc_usb_data), GFP_KERNEL); ++ if (!iproc_usb_data) { ++ dev_err(dev, "devm_kzalloc() failed\n" ); ++ return -ENOMEM; ++ } ++ memset(iproc_usb_data, 0, sizeof(*iproc_usb_data)); ++ platform_set_drvdata(pdev, iproc_usb_data); ++ ++ iproc_usb_data->dev = dev; ++ iproc_usb_data->dn = dn; ++ ++ iproc_usb_data->wrap_base = get_iproc_wrap_ctrl_base(); ++ if (!iproc_usb_data->wrap_base) { ++ dev_err(&pdev->dev, "can't iomap usb phy base address\n"); ++ ret = -ENOMEM; ++ goto err1; ++ } ++ ++ gpio_pin = of_get_named_gpio_flags(dn, "vbus-gpio", 0, &flags); ++ ++ if (gpio_pin < 0) { ++ dev_err(&pdev->dev, "Error: no gpio pin set for USB power\n"); ++ return gpio_pin; ++ } ++ ++ gpio_active_low = flags & OF_GPIO_ACTIVE_LOW; ++ ++ ret = gpio_request(gpio_pin, "usbphy-vbus"); ++ if (ret != 0) { ++ dev_err(dev, "request gpio #%d error.\n", gpio_pin); ++ goto err1; ++ } ++ ++ usb_mode = xgs_iproc_usb_phy_mode(iproc_usb_data); ++ ++ iproc_usb_data->usb_mode = usb_mode; ++ iproc_usb_data->phy.dev = dev; ++ iproc_usb_data->phy.flags = usb_mode; ++ iproc_usb_data->phy.init = iproc_usb_phy_init; ++ iproc_usb_data->phy.type = USB_PHY_TYPE_USB2; ++ ++ if (usb_mode == IPROC_USB_MODE_DEVICE) { ++ iproc_usb_data->idm_base = (void *)of_iomap(dn, 1); ++ if (!iproc_usb_data->idm_base) { ++ dev_err(&pdev->dev, "can't iomap usb2d idm base address 1\n"); ++ ret = -ENOMEM; ++ goto err2; ++ } ++ ++ gpio_direction_input(gpio_pin); ++ } else { ++ ++#if defined(CONFIG_MACH_GH2) ++ USBH_Utmi_base = (void *)of_iomap(dn, 2); ++ if (!USBH_Utmi_base) { ++ dev_err(&pdev->dev, "can't iomap usb2h idm base address 2\n"); ++ ret = -ENOMEM; ++ goto err2; ++ } ++#endif ++ iproc_usb_data->idm_base = (void *)of_iomap(dn, 0); ++ if (!iproc_usb_data->idm_base) { ++ dev_err(&pdev->dev, "can't iomap usb2h idm base address 0\n"); ++ ret = -ENOMEM; ++ goto err2; ++ } ++ ++ gpio_direction_output(gpio_pin, 1); ++ ++ /*turn off the power: if active low for power, then set 1 to turn off*/ ++ if (gpio_active_low) ++ __gpio_set_value(gpio_pin, 1); ++ else ++ __gpio_set_value(gpio_pin, 0); ++ ++ /* ++ Initial usb phy for usb host mode. For the device mode, ++ the iproc_usb_phy_init will be called when usb udc start. ++ */ ++ ret = iproc_usb_phy_init(&iproc_usb_data->phy); ++ if (ret < 0) ++ goto err2; ++ } ++ ++ ret = usb_add_phy_dev(&iproc_usb_data->phy); ++ if (ret) ++ goto err2; ++ ++ /* supply power for USB device connected to the host */ ++ if (usb_mode != IPROC_USB_MODE_DEVICE) { ++ if (gpio_active_low) ++ __gpio_set_value(gpio_pin, 0); ++ else ++ __gpio_set_value(gpio_pin, 1); ++ } ++ gpio_free(gpio_pin); ++ ++ return 0; ++ ++err2: ++ gpio_free(gpio_pin); ++err1: ++ if (iproc_usb_data->idm_base) { ++ iounmap(iproc_usb_data->idm_base); ++ } ++ if (iproc_usb_data) { ++ iounmap(iproc_usb_data); ++ } ++#if defined(CONFIG_MACH_GH2) ++ if (USBH_Utmi_base) { ++ iounmap(USBH_Utmi_base); ++ USBH_Utmi_base = NULL; ++ } ++#endif ++ return ret; ++} ++ ++static int xgs_iproc_usb_phy_remove(struct platform_device *pdev) ++{ ++ struct iproc_usb_priv *iproc_usb_data = platform_get_drvdata(pdev); ++ ++ platform_set_drvdata(pdev, NULL); ++ if (iproc_usb_data->idm_base) { ++ iounmap(iproc_usb_data->idm_base); ++ usb_remove_phy(&iproc_usb_data->phy); ++ } ++ ++ if (iproc_usb_data) ++ iounmap(iproc_usb_data); ++ ++#if defined(CONFIG_MACH_GH2) ++ if (USBH_Utmi_base) { ++ iounmap(USBH_Utmi_base); ++ USBH_Utmi_base = NULL; ++ } ++#endif ++ ++ return 0; ++} ++ ++static struct platform_driver xgs_iproc_usb_phy_driver = ++{ ++ .driver = { ++ .name = "usb-phy", ++ .owner = THIS_MODULE, ++ .of_match_table = of_match_ptr(xgs_iproc_usb_phy_dt_ids), ++ }, ++ .probe = xgs_iproc_usb_phy_probe, ++ .remove = xgs_iproc_usb_phy_remove, ++}; ++ ++module_platform_driver(xgs_iproc_usb_phy_driver); ++ ++MODULE_AUTHOR("Broadcom"); ++MODULE_DESCRIPTION("Broadcom USB phy driver"); ++MODULE_LICENSE("GPL"); +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig +--- a/drivers/watchdog/Kconfig 2016-12-16 00:49:34.000000000 +0800 ++++ b/drivers/watchdog/Kconfig 2017-11-09 17:54:04.039439000 +0800 +@@ -578,6 +578,14 @@ config LPC18XX_WATCHDOG + To compile this driver as a module, choose M here: the + module will be called lpc18xx_wdt. + ++config XGS_IPROC_SP805_WDT ++ tristate "BRCM XGS iProc watchdog based on SP805" ++ depends on (ARCH_XGS_IPROC || COMPILE_TEST) ++ select WATCHDOG_CORE ++ help ++ Say Y here to include support for the watchdog timer ++ embedded in BRCM XGS iProc SoCs. ++ + # AVR32 Architecture + + config AT32AP700X_WDT +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile +--- a/drivers/watchdog/Makefile 2016-12-16 00:49:34.000000000 +0800 ++++ b/drivers/watchdog/Makefile 2017-11-09 17:54:04.040435000 +0800 +@@ -69,6 +69,7 @@ obj-$(CONFIG_MEDIATEK_WATCHDOG) += mtk_w + obj-$(CONFIG_DIGICOLOR_WATCHDOG) += digicolor_wdt.o + obj-$(CONFIG_LPC18XX_WATCHDOG) += lpc18xx_wdt.o + obj-$(CONFIG_BCM7038_WDT) += bcm7038_wdt.o ++obj-$(CONFIG_XGS_IPROC_SP805_WDT) += xgs_iproc_sp805_wdt.o + + # AVR32 Architecture + obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/drivers/watchdog/xgs_iproc_sp805_wdt.c b/drivers/watchdog/xgs_iproc_sp805_wdt.c +--- a/drivers/watchdog/xgs_iproc_sp805_wdt.c 1970-01-01 08:00:00.000000000 +0800 ++++ b/drivers/watchdog/xgs_iproc_sp805_wdt.c 2017-11-09 17:54:04.252451000 +0800 +@@ -0,0 +1,381 @@ ++/* ++ * Copyright (C) 2015, Broadcom Corporation. All Rights Reserved. ++ * ++ * Permission to use, copy, modify, and/or distribute this software for any ++ * purpose with or without fee is hereby granted, provided that the above ++ * copyright notice and this permission notice appear in all copies. ++ * ++ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES ++ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF ++ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY ++ * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES ++ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION ++ * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN ++ * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* default timeout in seconds */ ++#define DEFAULT_TIMEOUT 60 ++ ++#define MODULE_NAME "iproc-sp805-wdt" ++ ++/* watchdog register offsets and masks */ ++#define WDTLOAD 0x000 ++ #define LOAD_MIN 0x00000001 ++ #define LOAD_MAX 0xFFFFFFFF ++#define WDTVALUE 0x004 ++#define WDTCONTROL 0x008 ++ /* control register masks */ ++ #define INT_ENABLE (1 << 0) ++ #define RESET_ENABLE (1 << 1) ++#define WDTINTCLR 0x00C ++#define WDTRIS 0x010 ++#define WDTMIS 0x014 ++ #define INT_MASK (1 << 0) ++#define WDTLOCK 0xC00 ++ #define UNLOCK 0x1ACCE551 ++ #define LOCK 0x00000001 ++ ++/** ++ * struct sp805_wdt: sp805 wdt device structure ++ * @wdd: instance of struct watchdog_device ++ * @lock: spin lock protecting dev structure and io access ++ * @base: base address of wdt ++ * @clk: clock structure of wdt ++ * @adev: amba device structure of wdt ++ * @load_val: load value to be set for current timeout ++ */ ++struct sp805_wdt { ++ struct watchdog_device wdd; ++ spinlock_t lock; ++ void __iomem *base; ++ struct clk *clk; ++ struct amba_device *adev; ++ unsigned int load_val; ++}; ++ ++static bool nowayout = WATCHDOG_NOWAYOUT; ++module_param(nowayout, bool, 0); ++MODULE_PARM_DESC(nowayout, ++ "Set to 1 to keep watchdog running after device release"); ++ ++/* This routine get boot status to indicate if the last boot is from WDT */ ++static unsigned int wdt_get_clear_bootstatus( ++ void __iomem *wdt_bootstatus, ++ unsigned int wdt_bootstatus_bit) ++{ ++ unsigned int reg; ++ unsigned int bootstatus = 0; ++ ++ reg = readl_relaxed(wdt_bootstatus); ++ bootstatus = reg & (1 << wdt_bootstatus_bit); ++ ++ if (bootstatus) ++ /* write 1 to clear boot status bit */ ++ writel_relaxed(reg, wdt_bootstatus); ++ ++ return bootstatus; ++} ++ ++/* This routine finds load value that will reset system in required timout */ ++static int wdt_setload(struct watchdog_device *wdd, unsigned int timeout) ++{ ++ struct sp805_wdt *wdt = watchdog_get_drvdata(wdd); ++ u64 load, rate; ++ ++ rate = clk_get_rate(wdt->clk); ++ ++ /* ++ * sp805 runs counter with given value twice, after the end of first ++ * counter it gives an interrupt and then starts counter again. If ++ * interrupt already occurred then it resets the system. This is why ++ * load is half of what should be required. ++ */ ++ load = div_u64(rate, 2) * timeout - 1; ++ ++ load = (load > LOAD_MAX) ? LOAD_MAX : load; ++ load = (load < LOAD_MIN) ? LOAD_MIN : load; ++ ++ spin_lock(&wdt->lock); ++ wdt->load_val = load; ++ /* roundup timeout to closest positive integer value */ ++ wdd->timeout = div_u64((load + 1) * 2 + (rate / 2), rate); ++ spin_unlock(&wdt->lock); ++ ++ return 0; ++} ++ ++/* returns number of seconds left for reset to occur */ ++static unsigned int wdt_timeleft(struct watchdog_device *wdd) ++{ ++ struct sp805_wdt *wdt = watchdog_get_drvdata(wdd); ++ u64 load, rate; ++ ++ rate = clk_get_rate(wdt->clk); ++ ++ spin_lock(&wdt->lock); ++ load = readl_relaxed(wdt->base + WDTVALUE); ++ ++ /* If the interrupt is inactive then time left is WDTValue + WDTLoad. */ ++ if (!(readl_relaxed(wdt->base + WDTRIS) & INT_MASK)) ++ load += wdt->load_val + 1; ++ spin_unlock(&wdt->lock); ++ ++ return div_u64(load, rate); ++} ++ ++static int wdt_config(struct watchdog_device *wdd, bool ping) ++{ ++ struct sp805_wdt *wdt = watchdog_get_drvdata(wdd); ++ int ret; ++ ++ if (!ping) { ++ ret = clk_prepare(wdt->clk); ++ if (ret) { ++ dev_err(&wdt->adev->dev, "clock prepare fail"); ++ return ret; ++ } ++ ++ ret = clk_enable(wdt->clk); ++ if (ret) { ++ dev_err(&wdt->adev->dev, "clock enable fail"); ++ clk_unprepare(wdt->clk); ++ return ret; ++ } ++ } ++ ++ spin_lock(&wdt->lock); ++ ++ writel_relaxed(UNLOCK, wdt->base + WDTLOCK); ++ writel_relaxed(wdt->load_val, wdt->base + WDTLOAD); ++ writel_relaxed(INT_MASK, wdt->base + WDTINTCLR); ++ ++ if (!ping) ++ writel_relaxed(INT_ENABLE | RESET_ENABLE, wdt->base + ++ WDTCONTROL); ++ ++ writel_relaxed(LOCK, wdt->base + WDTLOCK); ++ ++ /* Flush posted writes. */ ++ readl_relaxed(wdt->base + WDTLOCK); ++ spin_unlock(&wdt->lock); ++ ++ return 0; ++} ++ ++static int wdt_ping(struct watchdog_device *wdd) ++{ ++ return wdt_config(wdd, true); ++} ++ ++/* enables watchdog timers reset */ ++static int wdt_enable(struct watchdog_device *wdd) ++{ ++ return wdt_config(wdd, false); ++} ++ ++/* disables watchdog timers reset */ ++static int wdt_disable(struct watchdog_device *wdd) ++{ ++ struct sp805_wdt *wdt = watchdog_get_drvdata(wdd); ++ ++ spin_lock(&wdt->lock); ++ ++ writel_relaxed(UNLOCK, wdt->base + WDTLOCK); ++ writel_relaxed(0, wdt->base + WDTCONTROL); ++ writel_relaxed(LOCK, wdt->base + WDTLOCK); ++ ++ /* Flush posted writes. */ ++ readl_relaxed(wdt->base + WDTLOCK); ++ spin_unlock(&wdt->lock); ++ ++ clk_disable(wdt->clk); ++ clk_unprepare(wdt->clk); ++ ++ return 0; ++} ++ ++static const struct watchdog_info wdt_info = { ++ .options = WDIOF_MAGICCLOSE | WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING, ++ .identity = MODULE_NAME, ++}; ++ ++static const struct watchdog_ops wdt_ops = { ++ .owner = THIS_MODULE, ++ .start = wdt_enable, ++ .stop = wdt_disable, ++ .ping = wdt_ping, ++ .set_timeout = wdt_setload, ++ .get_timeleft = wdt_timeleft, ++}; ++ ++static int sp805_wdt_probe(struct amba_device *adev, const struct amba_id *id) ++{ ++ struct sp805_wdt *wdt; ++ int ret; ++ struct device_node *dnode = adev->dev.of_node; ++ void __iomem *wdt_bootstatus = NULL; ++ unsigned int bootstatus_bit = 0; ++ ++ wdt = devm_kzalloc(&adev->dev, sizeof(*wdt), GFP_KERNEL); ++ if (!wdt) { ++ dev_err(&adev->dev, "Kzalloc failed\n"); ++ ret = -ENOMEM; ++ goto error1; ++ } ++ ++ wdt->base = of_iomap(dnode, 0); ++ if (!wdt->base) { ++ ret = -ENOMEM; ++ dev_err(&adev->dev, "of_iomap failed\n"); ++ goto error2; ++ } ++ ++ wdt_bootstatus = of_iomap(dnode, 1); ++ if (!wdt_bootstatus) { ++ ret = -ENOMEM; ++ dev_err(&adev->dev, "of_iomap failed\n"); ++ goto error2; ++ } ++ ++ wdt->clk = of_clk_get(dnode, 0); ++ if (IS_ERR(wdt->clk)) { ++ dev_err(&adev->dev, "Clock not found\n"); ++ ret = PTR_ERR(wdt->clk); ++ goto error2; ++ } ++ ++ wdt->adev = adev; ++ wdt->wdd.info = &wdt_info; ++ wdt->wdd.ops = &wdt_ops; ++ ++ spin_lock_init(&wdt->lock); ++ watchdog_set_nowayout(&wdt->wdd, nowayout); ++ watchdog_set_drvdata(&wdt->wdd, wdt); ++ wdt_setload(&wdt->wdd, DEFAULT_TIMEOUT); ++ ++ ret = watchdog_register_device(&wdt->wdd); ++ if (ret) { ++ dev_err(&adev->dev, "watchdog_register_device() failed: %d\n", ++ ret); ++ goto error3; ++ } ++ ++ amba_set_drvdata(adev, wdt); ++ ++ ret = of_property_read_u32(dnode, "wdt_boot_status_bit", ++ &bootstatus_bit); ++ if (ret) { ++ dev_err(&adev->dev, "failed getting DT bootstatus bit\n"); ++ goto error3; ++ } ++ ++ wdt->wdd.bootstatus = wdt_get_clear_bootstatus( ++ wdt_bootstatus, ++ bootstatus_bit); ++ ++ dev_info(&adev->dev, "registration successful\n"); ++ dev_info(&adev->dev, "timeout=%d sec, nowayout=%d\n", ++ DEFAULT_TIMEOUT, nowayout); ++ ++ return 0; ++ ++error3: ++ clk_put(wdt->clk); ++ ++error2: ++ if (wdt->base) ++ iounmap(wdt->base); ++ if (wdt_bootstatus) ++ iounmap(wdt_bootstatus); ++ kfree(wdt); ++ ++error1: ++ dev_err(&adev->dev, "Probe Failed!!!\n"); ++ ++ return ret; ++} ++ ++static int sp805_wdt_remove(struct amba_device *adev) ++{ ++ struct sp805_wdt *wdt = amba_get_drvdata(adev); ++ ++ watchdog_unregister_device(&wdt->wdd); ++ amba_set_drvdata(adev, NULL); ++ watchdog_set_drvdata(&wdt->wdd, NULL); ++ clk_put(wdt->clk); ++ ++ return 0; ++} ++ ++#ifdef CONFIG_PM ++static int sp805_wdt_suspend(struct device *dev) ++{ ++ struct sp805_wdt *wdt = dev_get_drvdata(dev); ++ ++ if (watchdog_active(&wdt->wdd)) ++ return wdt_disable(&wdt->wdd); ++ ++ return 0; ++} ++ ++static int sp805_wdt_resume(struct device *dev) ++{ ++ struct sp805_wdt *wdt = dev_get_drvdata(dev); ++ ++ if (watchdog_active(&wdt->wdd)) ++ return wdt_enable(&wdt->wdd); ++ ++ return 0; ++} ++#endif /* CONFIG_PM */ ++ ++static SIMPLE_DEV_PM_OPS(sp805_wdt_dev_pm_ops, sp805_wdt_suspend, ++ sp805_wdt_resume); ++ ++static struct amba_id sp805_wdt_ids[] = { ++ { ++ .id = 0x00141805, ++ .mask = 0x00ffffff, ++ }, ++ { 0, 0 }, ++}; ++MODULE_DEVICE_TABLE(amba, sp805_wdt_ids); ++ ++static struct amba_driver sp805_wdt_driver = { ++ .drv = { ++ .name = MODULE_NAME, ++ .pm = &sp805_wdt_dev_pm_ops, ++ }, ++ .id_table = sp805_wdt_ids, ++ .probe = sp805_wdt_probe, ++ .remove = sp805_wdt_remove, ++}; ++ ++module_amba_driver(sp805_wdt_driver); ++ ++MODULE_AUTHOR("Viresh Kumar "); ++MODULE_DESCRIPTION("ARM SP805 Watchdog Driver"); ++MODULE_LICENSE("GPL"); +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/include/linux/mtd/spi-nor.h b/include/linux/mtd/spi-nor.h +--- a/include/linux/mtd/spi-nor.h 2016-12-16 00:49:34.000000000 +0800 ++++ b/include/linux/mtd/spi-nor.h 2017-11-09 17:54:11.415490000 +0800 +@@ -183,6 +183,9 @@ struct spi_nor { + int (*flash_is_locked)(struct spi_nor *nor, loff_t ofs, uint64_t len); + + void *priv; ++#ifdef CONFIG_M25PXX_STAY_IN_3BYTE_MODE ++ void *priv1; ++#endif + }; + + /** +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/include/linux/soc/bcm/xgs-iproc-wrap-idm-dmu.h b/include/linux/soc/bcm/xgs-iproc-wrap-idm-dmu.h +--- a/include/linux/soc/bcm/xgs-iproc-wrap-idm-dmu.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/include/linux/soc/bcm/xgs-iproc-wrap-idm-dmu.h 2017-11-09 17:54:12.456502000 +0800 +@@ -0,0 +1,15 @@ ++/* ++ * $Copyright Open Broadcom Corporation$ ++ */ ++ ++#ifndef XGS_IPROC_WRAP_IDM_DMU_H ++#define XGS_IPROC_WRAP_IDM_DMU_H ++ ++extern int xgs_iproc_wrap_idm_dmu_base_reg_setup(void); ++extern void xgs_iproc_idm_timeout_handler_setup(void); ++extern void __iomem *get_iproc_wrap_ctrl_base(void); ++extern void __iomem *get_iproc_dmu_pcu_base(void); ++extern void __iomem *get_iproc_idm_base(int index); ++extern void __iomem *get_iproc_idm_base_phys(int index); ++ ++#endif +diff -uprN -EbwB --no-dereference -X /projects/ntsw-sw7/home/chena/TEMP/XLDK_releasebuild12/INTERNAL/release/tools/dontdiff a/include/linux/usb/iproc_usb.h b/include/linux/usb/iproc_usb.h +--- a/include/linux/usb/iproc_usb.h 1970-01-01 08:00:00.000000000 +0800 ++++ b/include/linux/usb/iproc_usb.h 2017-11-09 17:54:12.826524000 +0800 +@@ -0,0 +1,20 @@ ++/* ++ * iproc_usb.h -- USB defines for XGS IPROC USB drivers ++ * ++ * Copyright (C) 2015 Broadcom Limited. ++ * ++ * This software is distributed under the terms of the GNU General ++ * Public License ("GPL") as published by the Free Software Foundation, ++ * version 2 of that License. ++ */ ++ ++#ifndef __LINUX_USB_IPROC_USB_H ++#define __LINUX_USB_IPROC_USB_H ++ ++/* USB Flags */ ++ ++#define IPROC_USB_MODE_HOST (0) ++#define IPROC_USB_MODE_DEVICE (1) ++ ++#endif /* __LINUX_USB_IPROC_USB_H */ ++ diff --git a/packages/base/any/kernels/4.4-lts/patches/series b/packages/base/any/kernels/4.4-lts/patches/series new file mode 100644 index 00000000..db86ea76 --- /dev/null +++ b/packages/base/any/kernels/4.4-lts/patches/series @@ -0,0 +1 @@ +kernel-4.4-brcm-iproc.patch diff --git a/packages/base/any/kernels/4.9-lts/configs/x86_64-all/.gitignore b/packages/base/any/kernels/4.9-lts/configs/x86_64-all/.gitignore index 5dbdc5b9..5540b78d 100644 --- a/packages/base/any/kernels/4.9-lts/configs/x86_64-all/.gitignore +++ b/packages/base/any/kernels/4.9-lts/configs/x86_64-all/.gitignore @@ -1,3 +1,3 @@ -kernel-3.16* +kernel-* linux-* - +lib/ diff --git a/packages/base/any/kernels/4.9-lts/configs/x86_64-all/x86_64-all.config b/packages/base/any/kernels/4.9-lts/configs/x86_64-all/x86_64-all.config index 29bb9441..423e0d63 100644 --- a/packages/base/any/kernels/4.9-lts/configs/x86_64-all/x86_64-all.config +++ b/packages/base/any/kernels/4.9-lts/configs/x86_64-all/x86_64-all.config @@ -2428,6 +2428,7 @@ CONFIG_SENSORS_CORETEMP=y # CONFIG_SENSORS_MAX1668 is not set # CONFIG_SENSORS_MAX197 is not set # CONFIG_SENSORS_MAX31722 is not set +CONFIG_SENSORS_MAX6620=y # CONFIG_SENSORS_MAX6639 is not set # CONFIG_SENSORS_MAX6642 is not set # CONFIG_SENSORS_MAX6650 is not set diff --git a/packages/base/any/kernels/4.9-lts/patches/driver-add-the-support-max6620.patch b/packages/base/any/kernels/4.9-lts/patches/driver-add-the-support-max6620.patch new file mode 100644 index 00000000..124b1105 --- /dev/null +++ b/packages/base/any/kernels/4.9-lts/patches/driver-add-the-support-max6620.patch @@ -0,0 +1,740 @@ +From 63fff0670cf4fea6815aba404ba9ae7c19f2f3ff Mon Sep 17 00:00:00 2001 +From: "shaohua.xiong" +Date: Thu, 19 Apr 2018 16:50:29 +0800 +Subject: [PATCH] add the support max6620 driver + +--- + drivers/hwmon/Kconfig | 10 + + drivers/hwmon/Makefile | 1 + + drivers/hwmon/max6620.c | 686 ++++++++++++++++++++++++++++++++++++++++++++++++ + 3 files changed, 697 insertions(+) + create mode 100644 drivers/hwmon/max6620.c + +diff --git a/drivers/hwmon/Kconfig b/drivers/hwmon/Kconfig +index 45cef3d..5c33957 100644 +--- a/drivers/hwmon/Kconfig ++++ b/drivers/hwmon/Kconfig +@@ -844,6 +844,16 @@ tristate "MAX31722 temperature sensor" + This driver can also be built as a module. If so, the module + will be called max31722. + ++config SENSORS_MAX6620 ++ tristate "Maxim MAX6620 sensor chip" ++ depends on I2C ++ help ++ If you say yes here you get support for the MAX6620 ++ sensor chips. ++ ++ This driver can also be built as a module. If so, the module ++ will be called max6620 ++ + config SENSORS_MAX6639 + tristate "Maxim MAX6639 sensor chip" + depends on I2C +diff --git a/drivers/hwmon/Makefile b/drivers/hwmon/Makefile +index aecf4ba..724a1f7 100644 +--- a/drivers/hwmon/Makefile ++++ b/drivers/hwmon/Makefile +@@ -115,6 +115,7 @@ obj-$(CONFIG_SENSORS_MAX1619) += max1619.o + obj-$(CONFIG_SENSORS_MAX1668) += max1668.o + obj-$(CONFIG_SENSORS_MAX197) += max197.o + obj-$(CONFIG_SENSORS_MAX31722) += max31722.o ++obj-$(CONFIG_SENSORS_MAX6620) += max6620.o + obj-$(CONFIG_SENSORS_MAX6639) += max6639.o + obj-$(CONFIG_SENSORS_MAX6642) += max6642.o + obj-$(CONFIG_SENSORS_MAX6650) += max6650.o +diff --git a/drivers/hwmon/max6620.c b/drivers/hwmon/max6620.c +new file mode 100644 +index 0000000..fb49195 +--- /dev/null ++++ b/drivers/hwmon/max6620.c +@@ -0,0 +1,686 @@ ++/* ++ * max6620.c - Linux Kernel module for hardware monitoring. ++ * ++ * (C) 2012 by L. Grunenberg ++ * ++ * based on code written by : ++ * 2007 by Hans J. Koch ++ * John Morris ++ * Copyright (c) 2003 Spirent Communications ++ * and Claus Gindhart ++ * ++ * This module has only been tested with the MAX6620 chip. ++ * ++ * The datasheet was last seen at: ++ * ++ * http://pdfserv.maxim-ic.com/en/ds/MAX6620.pdf ++ * ++ * This program is free software; you can redistribute it and/or modify ++ * it under the terms of the GNU General Public License as published by ++ * the Free Software Foundation; either version 2 of the License, or ++ * (at your option) any later version. ++ * ++ * This program is distributed in the hope that it will be useful, ++ * but WITHOUT ANY WARRANTY; without even the implied warranty of ++ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the ++ * GNU General Public License for more details. ++ * ++ * You should have received a copy of the GNU General Public License ++ * along with this program; if not, write to the Free Software ++ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. ++ */ ++ ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++#include ++ ++/* ++ * Insmod parameters ++ */ ++ ++ ++/* clock: The clock frequency of the chip the driver should assume */ ++static int clock = 8192; ++static u32 sr = 2; ++static u32 np = 2; ++ ++module_param(clock, int, S_IRUGO); ++ ++static const unsigned short normal_i2c[] = {0x0a, 0x1a, 0x2a, I2C_CLIENT_END}; ++ ++/* ++ * MAX 6620 registers ++ */ ++ ++#define MAX6620_REG_CONFIG 0x00 ++#define MAX6620_REG_FAULT 0x01 ++#define MAX6620_REG_CONF_FAN0 0x02 ++#define MAX6620_REG_CONF_FAN1 0x03 ++#define MAX6620_REG_CONF_FAN2 0x04 ++#define MAX6620_REG_CONF_FAN3 0x05 ++#define MAX6620_REG_DYN_FAN0 0x06 ++#define MAX6620_REG_DYN_FAN1 0x07 ++#define MAX6620_REG_DYN_FAN2 0x08 ++#define MAX6620_REG_DYN_FAN3 0x09 ++#define MAX6620_REG_TACH0 0x10 ++#define MAX6620_REG_TACH1 0x12 ++#define MAX6620_REG_TACH2 0x14 ++#define MAX6620_REG_TACH3 0x16 ++#define MAX6620_REG_VOLT0 0x18 ++#define MAX6620_REG_VOLT1 0x1A ++#define MAX6620_REG_VOLT2 0x1C ++#define MAX6620_REG_VOLT3 0x1E ++#define MAX6620_REG_TAR0 0x20 ++#define MAX6620_REG_TAR1 0x22 ++#define MAX6620_REG_TAR2 0x24 ++#define MAX6620_REG_TAR3 0x26 ++#define MAX6620_REG_DAC0 0x28 ++#define MAX6620_REG_DAC1 0x2A ++#define MAX6620_REG_DAC2 0x2C ++#define MAX6620_REG_DAC3 0x2E ++ ++/* ++ * Config register bits ++ */ ++ ++#define MAX6620_CFG_RUN 0x80 ++#define MAX6620_CFG_POR 0x40 ++#define MAX6620_CFG_TIMEOUT 0x20 ++#define MAX6620_CFG_FULLFAN 0x10 ++#define MAX6620_CFG_OSC 0x08 ++#define MAX6620_CFG_WD_MASK 0x06 ++#define MAX6620_CFG_WD_2 0x02 ++#define MAX6620_CFG_WD_6 0x04 ++#define MAX6620_CFG_WD10 0x06 ++#define MAX6620_CFG_WD 0x01 ++ ++ ++/* ++ * Failure status register bits ++ */ ++ ++#define MAX6620_FAIL_TACH0 0x10 ++#define MAX6620_FAIL_TACH1 0x20 ++#define MAX6620_FAIL_TACH2 0x40 ++#define MAX6620_FAIL_TACH3 0x80 ++#define MAX6620_FAIL_MASK0 0x01 ++#define MAX6620_FAIL_MASK1 0x02 ++#define MAX6620_FAIL_MASK2 0x04 ++#define MAX6620_FAIL_MASK3 0x08 ++ ++ ++/* Minimum and maximum values of the FAN-RPM */ ++#define FAN_RPM_MIN 240 ++#define FAN_RPM_MAX 30000 ++ ++#define DIV_FROM_REG(reg) (1 << ((reg & 0xE0) >> 5)) ++ ++static int max6620_probe(struct i2c_client *client, const struct i2c_device_id *id); ++static int max6620_init_client(struct i2c_client *client); ++static int max6620_remove(struct i2c_client *client); ++static struct max6620_data *max6620_update_device(struct device *dev); ++ ++static const u8 config_reg[] = { ++ MAX6620_REG_CONF_FAN0, ++ MAX6620_REG_CONF_FAN1, ++ MAX6620_REG_CONF_FAN2, ++ MAX6620_REG_CONF_FAN3, ++}; ++ ++static const u8 dyn_reg[] = { ++ MAX6620_REG_DYN_FAN0, ++ MAX6620_REG_DYN_FAN1, ++ MAX6620_REG_DYN_FAN2, ++ MAX6620_REG_DYN_FAN3, ++}; ++ ++static const u8 tach_reg[] = { ++ MAX6620_REG_TACH0, ++ MAX6620_REG_TACH1, ++ MAX6620_REG_TACH2, ++ MAX6620_REG_TACH3, ++}; ++ ++static const u8 volt_reg[] = { ++ MAX6620_REG_VOLT0, ++ MAX6620_REG_VOLT1, ++ MAX6620_REG_VOLT2, ++ MAX6620_REG_VOLT3, ++}; ++ ++static const u8 target_reg[] = { ++ MAX6620_REG_TAR0, ++ MAX6620_REG_TAR1, ++ MAX6620_REG_TAR2, ++ MAX6620_REG_TAR3, ++}; ++ ++static const u8 dac_reg[] = { ++ MAX6620_REG_DAC0, ++ MAX6620_REG_DAC1, ++ MAX6620_REG_DAC2, ++ MAX6620_REG_DAC3, ++}; ++ ++/* ++ * Driver data (common to all clients) ++ */ ++ ++static const struct i2c_device_id max6620_id[] = { ++ { "max6620", 0 }, ++ { } ++}; ++MODULE_DEVICE_TABLE(i2c, max6620_id); ++ ++static struct i2c_driver max6620_driver = { ++ .class = I2C_CLASS_HWMON, ++ .driver = { ++ .name = "max6620", ++ }, ++ .probe = max6620_probe, ++ .remove = max6620_remove, ++ .id_table = max6620_id, ++ .address_list = normal_i2c, ++}; ++ ++/* ++ * Client data (each client gets its own) ++ */ ++ ++struct max6620_data { ++ struct device *hwmon_dev; ++ struct mutex update_lock; ++ int nr_fans; ++ char valid; /* zero until following fields are valid */ ++ unsigned long last_updated; /* in jiffies */ ++ ++ /* register values */ ++ u8 speed[4]; ++ u8 config; ++ u8 fancfg[4]; ++ u8 fandyn[4]; ++ u8 tach[4]; ++ u8 volt[4]; ++ u8 target[4]; ++ u8 dac[4]; ++ u8 fault; ++}; ++ ++static ssize_t get_fan(struct device *dev, struct device_attribute *devattr, char *buf) { ++ ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); ++ struct max6620_data *data = max6620_update_device(dev); ++ struct i2c_client *client = to_i2c_client(dev); ++ u32 rpm = 0; ++ u32 tach = 0; ++ u32 tach1 = 0; ++ u32 tach2 = 0; ++ ++ tach1 = i2c_smbus_read_byte_data(client, tach_reg[attr->index]); ++ tach1 = (tach1 << 3) & 0x7f8; ++ tach2 = i2c_smbus_read_byte_data(client, tach_reg[attr->index] + 1); ++ tach2 = (tach2 >> 5) & 0x7; ++ tach = tach1 | tach2; ++ if (tach == 0) { ++ rpm = 0; ++ } else { ++ rpm = (60 * sr * clock)/(tach * np); ++ } ++ ++ return sprintf(buf, "%d\n", rpm); ++} ++ ++static ssize_t get_target(struct device *dev, struct device_attribute *devattr, char *buf) { ++ ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); ++ struct max6620_data *data = max6620_update_device(dev); ++ struct i2c_client *client = to_i2c_client(dev); ++ u32 rpm; ++ u32 target; ++ u32 target1; ++ u32 target2; ++ ++ target1 = i2c_smbus_read_byte_data(client, target_reg[attr->index]); ++ target1 = (target1 << 3) & 0x7f8; ++ target2 = i2c_smbus_read_byte_data(client, target_reg[attr->index] + 1); ++ target2 = (target2 >> 5) & 0x7; ++ target = target1 | target2; ++ if (target == 0) { ++ rpm = 0; ++ } else { ++ rpm = (60 * sr * clock)/(target * np); ++ } ++ return sprintf(buf, "%d\n", rpm); ++} ++ ++static ssize_t set_target(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { ++ ++ struct i2c_client *client = to_i2c_client(dev); ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); ++ struct max6620_data *data = i2c_get_clientdata(client); ++ unsigned long rpm; ++ int err; ++ unsigned long target; ++ unsigned long target1; ++ unsigned long target2; ++ ++ err = kstrtoul(buf, 10, &rpm); ++ if (err) ++ return err; ++ ++ rpm = clamp_val(rpm, FAN_RPM_MIN, FAN_RPM_MAX); ++ ++ mutex_lock(&data->update_lock); ++ ++ target = (60 * sr * 8192)/(rpm * np); ++ target1 = (target >> 3) & 0xff; ++ target2 = (target << 5) & 0xe0; ++ i2c_smbus_write_byte_data(client, target_reg[attr->index], target1); ++ i2c_smbus_write_byte_data(client, target_reg[attr->index] + 1, target2); ++ ++ mutex_unlock(&data->update_lock); ++ ++ return count; ++} ++ ++/* ++ * Get/set the fan speed in open loop mode using pwm1 sysfs file. ++ * Speed is given as a relative value from 0 to 255, where 255 is maximum ++ * speed. Note that this is done by writing directly to the chip's DAC, ++ * it won't change the closed loop speed set by fan1_target. ++ * Also note that due to rounding errors it is possible that you don't read ++ * back exactly the value you have set. ++ */ ++ ++static ssize_t get_pwm(struct device *dev, struct device_attribute *devattr, char *buf) { ++ ++ int pwm; ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); ++ struct max6620_data *data = max6620_update_device(dev); ++ ++ /* ++ * Useful range for dac is 0-180 for 12V fans and 0-76 for 5V fans. ++ * Lower DAC values mean higher speeds. ++ */ ++ pwm = ((int)data->volt[attr->index]); ++ ++ if (pwm < 0) ++ pwm = 0; ++ ++ return sprintf(buf, "%d\n", pwm); ++} ++ ++static ssize_t set_pwm(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { ++ ++ struct i2c_client *client = to_i2c_client(dev); ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); ++ struct max6620_data *data = i2c_get_clientdata(client); ++ unsigned long pwm; ++ int err; ++ ++ err = kstrtoul(buf, 10, &pwm); ++ if (err) ++ return err; ++ ++ pwm = clamp_val(pwm, 0, 255); ++ ++ mutex_lock(&data->update_lock); ++ ++ data->dac[attr->index] = pwm; ++ ++ ++ i2c_smbus_write_byte_data(client, dac_reg[attr->index], data->dac[attr->index]); ++ i2c_smbus_write_byte_data(client, dac_reg[attr->index]+1, 0x00); ++ ++ mutex_unlock(&data->update_lock); ++ ++ return count; ++} ++ ++/* ++ * Get/Set controller mode: ++ * Possible values: ++ * 0 = Fan always on ++ * 1 = Open loop, Voltage is set according to speed, not regulated. ++ * 2 = Closed loop, RPM for all fans regulated by fan1 tachometer ++ */ ++ ++static ssize_t get_enable(struct device *dev, struct device_attribute *devattr, char *buf) { ++ ++ struct max6620_data *data = max6620_update_device(dev); ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); ++ int mode = (data->fancfg[attr->index] & 0x80 ) >> 7; ++ int sysfs_modes[2] = {1, 2}; ++ ++ return sprintf(buf, "%d\n", sysfs_modes[mode]); ++} ++ ++static ssize_t set_enable(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { ++ ++ struct i2c_client *client = to_i2c_client(dev); ++ struct max6620_data *data = i2c_get_clientdata(client); ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); ++ int max6620_modes[3] = {0, 1, 0}; ++ unsigned long mode; ++ int err; ++ ++ err = kstrtoul(buf, 10, &mode); ++ if (err) ++ return err; ++ ++ if (mode > 2) ++ return -EINVAL; ++ ++ mutex_lock(&data->update_lock); ++ ++ data->fancfg[attr->index] = i2c_smbus_read_byte_data(client, config_reg[attr->index]); ++ data->fancfg[attr->index] = (data->fancfg[attr->index] & ~0x80) ++ | (max6620_modes[mode] << 7); ++ ++ i2c_smbus_write_byte_data(client, config_reg[attr->index], data->fancfg[attr->index]); ++ ++ mutex_unlock(&data->update_lock); ++ ++ return count; ++} ++ ++/* ++ * Read/write functions for fan1_div sysfs file. The MAX6620 has no such ++ * divider. We handle this by converting between divider and counttime: ++ * ++ * (counttime == k) <==> (divider == 2^k), k = 0, 1, 2, 3, 4 or 5 ++ * ++ * Lower values of k allow to connect a faster fan without the risk of ++ * counter overflow. The price is lower resolution. You can also set counttime ++ * using the module parameter. Note that the module parameter "prescaler" also ++ * influences the behaviour. Unfortunately, there's no sysfs attribute ++ * defined for that. See the data sheet for details. ++ */ ++ ++static ssize_t get_div(struct device *dev, struct device_attribute *devattr, char *buf) { ++ ++ struct max6620_data *data = max6620_update_device(dev); ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); ++ ++ return sprintf(buf, "%d\n", DIV_FROM_REG(data->fandyn[attr->index])); ++} ++ ++static ssize_t set_div(struct device *dev, struct device_attribute *devattr, const char *buf, size_t count) { ++ ++ struct i2c_client *client = to_i2c_client(dev); ++ struct max6620_data *data = i2c_get_clientdata(client); ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); ++ unsigned long div; ++ int err; ++ u8 div_bin; ++ ++ err = kstrtoul(buf, 10, &div); ++ if (err) ++ return err; ++ ++ mutex_lock(&data->update_lock); ++ switch (div) { ++ case 1: ++ div_bin = 0; ++ break; ++ case 2: ++ div_bin = 1; ++ break; ++ case 4: ++ div_bin = 2; ++ break; ++ case 8: ++ div_bin = 3; ++ break; ++ case 16: ++ div_bin = 4; ++ break; ++ case 32: ++ div_bin = 5; ++ break; ++ default: ++ mutex_unlock(&data->update_lock); ++ return -EINVAL; ++ } ++ data->fandyn[attr->index] &= 0x1F; ++ data->fandyn[attr->index] |= div_bin << 5; ++ i2c_smbus_write_byte_data(client, dyn_reg[attr->index], data->fandyn[attr->index]); ++ mutex_unlock(&data->update_lock); ++ ++ return count; ++} ++ ++/* ++ * Get alarm stati: ++ * Possible values: ++ * 0 = no alarm ++ * 1 = alarm ++ */ ++ ++static ssize_t get_alarm(struct device *dev, struct device_attribute *devattr, char *buf) { ++ ++ struct sensor_device_attribute *attr = to_sensor_dev_attr(devattr); ++ struct max6620_data *data = max6620_update_device(dev); ++ struct i2c_client *client = to_i2c_client(dev); ++ int alarm = 0; ++ ++ if (data->fault & (1 << attr->index)) { ++ mutex_lock(&data->update_lock); ++ alarm = 1; ++ data->fault &= ~(1 << attr->index); ++ data->fault |= i2c_smbus_read_byte_data(client, ++ MAX6620_REG_FAULT); ++ mutex_unlock(&data->update_lock); ++ } ++ ++ return sprintf(buf, "%d\n", alarm); ++} ++ ++static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, get_fan, NULL, 0); ++static SENSOR_DEVICE_ATTR(fan2_input, S_IRUGO, get_fan, NULL, 1); ++static SENSOR_DEVICE_ATTR(fan3_input, S_IRUGO, get_fan, NULL, 2); ++static SENSOR_DEVICE_ATTR(fan4_input, S_IRUGO, get_fan, NULL, 3); ++static SENSOR_DEVICE_ATTR(fan1_target, S_IWUSR | S_IRUGO, get_target, set_target, 0); ++static SENSOR_DEVICE_ATTR(fan1_div, S_IWUSR | S_IRUGO, get_div, set_div, 0); ++// static SENSOR_DEVICE_ATTR(pwm1_enable, S_IWUSR | S_IRUGO, get_enable, set_enable, 0); ++static SENSOR_DEVICE_ATTR(pwm1, S_IWUSR | S_IRUGO, get_pwm, set_pwm, 0); ++static SENSOR_DEVICE_ATTR(fan2_target, S_IWUSR | S_IRUGO, get_target, set_target, 1); ++static SENSOR_DEVICE_ATTR(fan2_div, S_IWUSR | S_IRUGO, get_div, set_div, 1); ++// static SENSOR_DEVICE_ATTR(pwm2_enable, S_IWUSR | S_IRUGO, get_enable, set_enable, 1); ++static SENSOR_DEVICE_ATTR(pwm2, S_IWUSR | S_IRUGO, get_pwm, set_pwm, 1); ++static SENSOR_DEVICE_ATTR(fan3_target, S_IWUSR | S_IRUGO, get_target, set_target, 2); ++static SENSOR_DEVICE_ATTR(fan3_div, S_IWUSR | S_IRUGO, get_div, set_div, 2); ++// static SENSOR_DEVICE_ATTR(pwm3_enable, S_IWUSR | S_IRUGO, get_enable, set_enable, 2); ++static SENSOR_DEVICE_ATTR(pwm3, S_IWUSR | S_IRUGO, get_pwm, set_pwm, 2); ++static SENSOR_DEVICE_ATTR(fan4_target, S_IWUSR | S_IRUGO, get_target, set_target, 3); ++static SENSOR_DEVICE_ATTR(fan4_div, S_IWUSR | S_IRUGO, get_div, set_div, 3); ++// static SENSOR_DEVICE_ATTR(pwm4_enable, S_IWUSR | S_IRUGO, get_enable, set_enable, 3); ++static SENSOR_DEVICE_ATTR(pwm4, S_IWUSR | S_IRUGO, get_pwm, set_pwm, 3); ++ ++static struct attribute *max6620_attrs[] = { ++ &sensor_dev_attr_fan1_input.dev_attr.attr, ++ &sensor_dev_attr_fan2_input.dev_attr.attr, ++ &sensor_dev_attr_fan3_input.dev_attr.attr, ++ &sensor_dev_attr_fan4_input.dev_attr.attr, ++ &sensor_dev_attr_fan1_target.dev_attr.attr, ++ &sensor_dev_attr_fan1_div.dev_attr.attr, ++// &sensor_dev_attr_pwm1_enable.dev_attr.attr, ++ &sensor_dev_attr_pwm1.dev_attr.attr, ++ &sensor_dev_attr_fan2_target.dev_attr.attr, ++ &sensor_dev_attr_fan2_div.dev_attr.attr, ++// &sensor_dev_attr_pwm2_enable.dev_attr.attr, ++ &sensor_dev_attr_pwm2.dev_attr.attr, ++ &sensor_dev_attr_fan3_target.dev_attr.attr, ++ &sensor_dev_attr_fan3_div.dev_attr.attr, ++// &sensor_dev_attr_pwm3_enable.dev_attr.attr, ++ &sensor_dev_attr_pwm3.dev_attr.attr, ++ &sensor_dev_attr_fan4_target.dev_attr.attr, ++ &sensor_dev_attr_fan4_div.dev_attr.attr, ++// &sensor_dev_attr_pwm4_enable.dev_attr.attr, ++ &sensor_dev_attr_pwm4.dev_attr.attr, ++ NULL ++}; ++ ++static struct attribute_group max6620_attr_grp = { ++ .attrs = max6620_attrs, ++}; ++ ++ ++/* ++ * Real code ++ */ ++ ++static int max6620_probe(struct i2c_client *client, const struct i2c_device_id *id) { ++ ++ struct max6620_data *data; ++ int err; ++ ++ data = devm_kzalloc(&client->dev, sizeof(struct max6620_data), GFP_KERNEL); ++ if (!data) { ++ dev_err(&client->dev, "out of memory.\n"); ++ return -ENOMEM; ++ } ++ ++ i2c_set_clientdata(client, data); ++ mutex_init(&data->update_lock); ++ data->nr_fans = id->driver_data; ++ ++ /* ++ * Initialize the max6620 chip ++ */ ++ dev_info(&client->dev, "About to initialize module\n"); ++ ++ err = max6620_init_client(client); ++ if (err) ++ return err; ++ dev_info(&client->dev, "Module initialized\n"); ++ ++ err = sysfs_create_group(&client->dev.kobj, &max6620_attr_grp); ++ if (err) ++ return err; ++dev_info(&client->dev, "Sysfs entries created\n"); ++ ++ data->hwmon_dev = hwmon_device_register(&client->dev); ++ if (!IS_ERR(data->hwmon_dev)) ++ return 0; ++ ++ err = PTR_ERR(data->hwmon_dev); ++ dev_err(&client->dev, "error registering hwmon device.\n"); ++ ++ sysfs_remove_group(&client->dev.kobj, &max6620_attr_grp); ++ return err; ++} ++ ++static int max6620_remove(struct i2c_client *client) { ++ ++ struct max6620_data *data = i2c_get_clientdata(client); ++ ++ hwmon_device_unregister(data->hwmon_dev); ++ ++ sysfs_remove_group(&client->dev.kobj, &max6620_attr_grp); ++ return 0; ++} ++ ++static int max6620_init_client(struct i2c_client *client) { ++ ++ struct max6620_data *data = i2c_get_clientdata(client); ++ int config; ++ int err = -EIO; ++ int i; ++ ++ config = i2c_smbus_read_byte_data(client, MAX6620_REG_CONFIG); ++ ++ if (config < 0) { ++ dev_err(&client->dev, "Error reading config, aborting.\n"); ++ return err; ++ } ++ ++ /* ++ * Set bit 4, disable other fans from going full speed on a fail ++ * failure. ++ */ ++ if (i2c_smbus_write_byte_data(client, MAX6620_REG_CONFIG, config | 0x10)) { ++ dev_err(&client->dev, "Config write error, aborting.\n"); ++ return err; ++ } ++ ++ data->config = config; ++ for (i = 0; i < 4; i++) { ++ data->fancfg[i] = i2c_smbus_read_byte_data(client, config_reg[i]); ++ data->fancfg[i] |= 0xa8; // enable TACH monitoring ++ i2c_smbus_write_byte_data(client, config_reg[i], data->fancfg[i]); ++ data->fandyn[i] = i2c_smbus_read_byte_data(client, dyn_reg[i]); ++ /* 2 counts (001) and Rate change 100 (0.125 secs) */ ++ data-> fandyn[i] = 0x30; ++ i2c_smbus_write_byte_data(client, dyn_reg[i], data->fandyn[i]); ++ data->tach[i] = i2c_smbus_read_byte_data(client, tach_reg[i]); ++ data->volt[i] = i2c_smbus_read_byte_data(client, volt_reg[i]); ++ data->target[i] = i2c_smbus_read_byte_data(client, target_reg[i]); ++ data->dac[i] = i2c_smbus_read_byte_data(client, dac_reg[i]); ++ } ++ return 0; ++} ++ ++static struct max6620_data *max6620_update_device(struct device *dev) ++{ ++ int i; ++ struct i2c_client *client = to_i2c_client(dev); ++ struct max6620_data *data = i2c_get_clientdata(client); ++ ++ mutex_lock(&data->update_lock); ++ ++ if (time_after(jiffies, data->last_updated + HZ) || !data->valid) { ++ ++ for (i = 0; i < 4; i++) { ++ data->fancfg[i] = i2c_smbus_read_byte_data(client, config_reg[i]); ++ data->fandyn[i] = i2c_smbus_read_byte_data(client, dyn_reg[i]); ++ data->tach[i] = i2c_smbus_read_byte_data(client, tach_reg[i]); ++ data->volt[i] = i2c_smbus_read_byte_data(client, volt_reg[i]); ++ data->target[i] = i2c_smbus_read_byte_data(client, target_reg[i]); ++ data->dac[i] = i2c_smbus_read_byte_data(client, dac_reg[i]); ++ } ++ ++ ++ /* ++ * Alarms are cleared on read in case the condition that ++ * caused the alarm is removed. Keep the value latched here ++ * for providing the register through different alarm files. ++ */ ++ u8 fault_reg; ++ fault_reg = i2c_smbus_read_byte_data(client, MAX6620_REG_FAULT); ++ data->fault |= (fault_reg >> 4) & (fault_reg & 0x0F); ++ ++ data->last_updated = jiffies; ++ data->valid = 1; ++ } ++ ++ mutex_unlock(&data->update_lock); ++ ++ return data; ++} ++ ++static int __init max6620_init(void) ++{ ++ return i2c_add_driver(&max6620_driver); ++} ++module_init(max6620_init); ++ ++/** ++ * sht21_init() - clean up driver ++ * ++ * Called when module is removed. ++ */ ++static void __exit max6620_exit(void) ++{ ++ i2c_del_driver(&max6620_driver); ++} ++module_exit(max6620_exit); ++ ++MODULE_AUTHOR("Lucas Grunenberg"); ++MODULE_DESCRIPTION("MAX6620 sensor driver"); ++MODULE_LICENSE("GPL"); +-- +2.7.4 + diff --git a/packages/base/any/kernels/4.9-lts/patches/driver-i2c-i2c-core.patch b/packages/base/any/kernels/4.9-lts/patches/driver-i2c-i2c-core.patch new file mode 100644 index 00000000..0e6a3118 --- /dev/null +++ b/packages/base/any/kernels/4.9-lts/patches/driver-i2c-i2c-core.patch @@ -0,0 +1,39 @@ +diff --git a/drivers/i2c/i2c-core.c b/drivers/i2c/i2c-core.c +index 35c0bc6..25878eb 100644 +--- a/drivers/i2c/i2c-core.c ++++ b/drivers/i2c/i2c-core.c +@@ -62,6 +62,8 @@ + static DEFINE_MUTEX(core_lock); + static DEFINE_IDR(i2c_adapter_idr); + ++static char i2c_dev_auto_detect = 1; ++ + static struct device_type i2c_client_type; + static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver); + +@@ -2011,6 +2013,9 @@ static int i2c_detect(struct i2c_adapter *adapter, struct i2c_driver *driver) + if (!driver->detect || !address_list) + return 0; + ++ if (i2c_dev_auto_detect == 0) ++ return 0; ++ + /* Stop here if the classes do not match */ + if (!(adapter->class & driver->class)) + return 0; +@@ -2617,6 +2622,15 @@ trace: + } + EXPORT_SYMBOL(i2c_smbus_xfer); + ++static int __init __i2c_dev_auto_detect(char *str) ++{ ++ if (str[0] == '0') ++ i2c_dev_auto_detect = 0; ++ ++ return 1; ++} ++__setup("i2c_dev_auto_detect=", __i2c_dev_auto_detect); ++ + MODULE_AUTHOR("Simon G. Vogl "); + MODULE_DESCRIPTION("I2C-Bus main module"); + MODULE_LICENSE("GPL"); diff --git a/packages/base/any/kernels/4.9-lts/patches/series b/packages/base/any/kernels/4.9-lts/patches/series index e83cb1cb..1d79cd29 100644 --- a/packages/base/any/kernels/4.9-lts/patches/series +++ b/packages/base/any/kernels/4.9-lts/patches/series @@ -11,3 +11,5 @@ driver-support-intel-igb-bcm5461-phy.patch 0010-platform-mellanox-mlxreg-hotplug-driver-add-check-fo.patch 0011-platform-x86-mlx-platform-new-features.patch 0012-i2c-busses-Add-capabilities-to-i2c-mlxcpld.patch +driver-i2c-i2c-core.patch +driver-add-the-support-max6620.patch diff --git a/packages/base/any/onlp/src/sff/module/auto/sff.yml b/packages/base/any/onlp/src/sff/module/auto/sff.yml index 69ef9e50..ae5a01d6 100644 --- a/packages/base/any/onlp/src/sff/module/auto/sff.yml +++ b/packages/base/any/onlp/src/sff/module/auto/sff.yml @@ -55,6 +55,8 @@ sff_module_types: &sff_module_types desc: "100G-CWDM4" - 100G_PSM4: desc: "100G-PSM4" +- 100G_SWDM4: + desc: "100G-SWDM4" - 40G_BASE_CR4: desc: "40GBASE-CR4" - 40G_BASE_SR4: @@ -103,6 +105,8 @@ sff_module_types: &sff_module_types desc: "1GBASE-SX" - 1G_BASE_LX: desc: "1GBASE-LX" +- 1G_BASE_ZX: + desc: "1GBASE-ZX" - 1G_BASE_CX: desc: "1GBASE-CX" - 1G_BASE_T: diff --git a/packages/base/any/onlp/src/sff/module/inc/sff/8472.h b/packages/base/any/onlp/src/sff/module/inc/sff/8472.h index 0e2f73d0..5a985f3b 100644 --- a/packages/base/any/onlp/src/sff/module/inc/sff/8472.h +++ b/packages/base/any/onlp/src/sff/module/inc/sff/8472.h @@ -209,6 +209,9 @@ #define SFF8472_MEDIA_XGE_ER(idprom) \ ((idprom[3] & SFF8472_CC3_XGE_BASE_ER) != 0) +#define SFF8472_MEDIA_LENGTH_ZX(idprom) \ + (idprom[14] >= 70) + /* * some CR cables identify as infiniband copper * some CR cables identify as FC twinax diff --git a/packages/base/any/onlp/src/sff/module/inc/sff/8636.h b/packages/base/any/onlp/src/sff/module/inc/sff/8636.h index 4f87c170..eafc5f69 100644 --- a/packages/base/any/onlp/src/sff/module/inc/sff/8636.h +++ b/packages/base/any/onlp/src/sff/module/inc/sff/8636.h @@ -135,6 +135,8 @@ #define SFF8636_CC192_100GE_CR4 0x0B #define SFF8636_CC192_25GE_CR_S 0x0C #define SFF8636_CC192_25GE_CR_N 0x0D +#define SFF8636_CC192_40GE_SWDM4 0x1F +#define SFF8636_CC192_100GE_SWDM4 0x20 #define SFF8636_MEDIA_100GE_AOC(idprom) \ (idprom[192] == SFF8636_CC192_100GE_AOC) @@ -148,6 +150,10 @@ (idprom[192] == SFF8636_CC192_100GE_PSM4) #define SFF8636_MEDIA_100GE_CR4(idprom) \ (idprom[192] == SFF8636_CC192_100GE_CR4) +#define SFF8636_MEDIA_100GE_SWDM4(idprom) \ + (idprom[192] == SFF8636_CC192_100GE_SWDM4) +#define SFF8636_MEDIA_40GE_SWDM4(idprom) \ + (idprom[192] == SFF8636_CC192_40GE_SWDM4) #define SFF8636_MEDIA_25GE_CR_S(idprom) \ (idprom[192] == SFF8636_CC192_25GE_CR_S) #define SFF8636_MEDIA_25GE_CR_N(idprom) \ diff --git a/packages/base/any/onlp/src/sff/module/inc/sff/sff.h b/packages/base/any/onlp/src/sff/module/inc/sff/sff.h index 8971a36d..0101cfb3 100644 --- a/packages/base/any/onlp/src/sff/module/inc/sff/sff.h +++ b/packages/base/any/onlp/src/sff/module/inc/sff/sff.h @@ -103,6 +103,7 @@ typedef enum sff_module_type_e { SFF_MODULE_TYPE_100G_BASE_LR4, SFF_MODULE_TYPE_100G_CWDM4, SFF_MODULE_TYPE_100G_PSM4, + SFF_MODULE_TYPE_100G_SWDM4, SFF_MODULE_TYPE_40G_BASE_CR4, SFF_MODULE_TYPE_40G_BASE_SR4, SFF_MODULE_TYPE_40G_BASE_LR4, @@ -127,6 +128,7 @@ typedef enum sff_module_type_e { SFF_MODULE_TYPE_10G_BASE_SRL, SFF_MODULE_TYPE_1G_BASE_SX, SFF_MODULE_TYPE_1G_BASE_LX, + SFF_MODULE_TYPE_1G_BASE_ZX, SFF_MODULE_TYPE_1G_BASE_CX, SFF_MODULE_TYPE_1G_BASE_T, SFF_MODULE_TYPE_100_BASE_LX, @@ -146,6 +148,7 @@ typedef enum sff_module_type_e { "100G_BASE_LR4", \ "100G_CWDM4", \ "100G_PSM4", \ + "100G_SWDM4", \ "40G_BASE_CR4", \ "40G_BASE_SR4", \ "40G_BASE_LR4", \ @@ -170,6 +173,7 @@ typedef enum sff_module_type_e { "10G_BASE_SRL", \ "1G_BASE_SX", \ "1G_BASE_LX", \ + "1G_BASE_ZX", \ "1G_BASE_CX", \ "1G_BASE_T", \ "100_BASE_LX", \ diff --git a/packages/base/any/onlp/src/sff/module/inc/sff/sff.x b/packages/base/any/onlp/src/sff/module/inc/sff/sff.x index 40499305..0b211647 100644 --- a/packages/base/any/onlp/src/sff/module/inc/sff/sff.x +++ b/packages/base/any/onlp/src/sff/module/inc/sff/sff.x @@ -13,6 +13,7 @@ SFF_MEDIA_TYPE_ENTRY(100G_BASE_SR4, 100GBASE-SR4) SFF_MEDIA_TYPE_ENTRY(100G_BASE_LR4, 100GBASE-LR4) SFF_MEDIA_TYPE_ENTRY(100G_CWDM4, 100G-CWDM4) SFF_MEDIA_TYPE_ENTRY(100G_PSM4, 100G-PSM4) +SFF_MEDIA_TYPE_ENTRY(100G_SWDM4, 100G-SWDM4) SFF_MEDIA_TYPE_ENTRY(40G_BASE_CR4, 40GBASE-CR4) SFF_MEDIA_TYPE_ENTRY(40G_BASE_SR4, 40GBASE-SR4) SFF_MEDIA_TYPE_ENTRY(40G_BASE_LR4, 40GBASE-LR4) @@ -37,6 +38,7 @@ SFF_MEDIA_TYPE_ENTRY(10G_BASE_ZR, 10GBASE-ZR) SFF_MEDIA_TYPE_ENTRY(10G_BASE_SRL, 10GBASE-SRL) SFF_MEDIA_TYPE_ENTRY(1G_BASE_SX, 1GBASE-SX) SFF_MEDIA_TYPE_ENTRY(1G_BASE_LX, 1GBASE-LX) +SFF_MEDIA_TYPE_ENTRY(1G_BASE_ZX, 1GBASE-ZX) SFF_MEDIA_TYPE_ENTRY(1G_BASE_CX, 1GBASE-CX) SFF_MEDIA_TYPE_ENTRY(1G_BASE_T, 1GBASE-T) SFF_MEDIA_TYPE_ENTRY(100_BASE_LX, 100BASE-LX) @@ -62,6 +64,7 @@ SFF_MODULE_TYPE_ENTRY(100G_BASE_SR4, 100GBASE-SR4) SFF_MODULE_TYPE_ENTRY(100G_BASE_LR4, 100GBASE-LR4) SFF_MODULE_TYPE_ENTRY(100G_CWDM4, 100G-CWDM4) SFF_MODULE_TYPE_ENTRY(100G_PSM4, 100G-PSM4) +SFF_MODULE_TYPE_ENTRY(100G_SWDM4, 100G-SWDM4) SFF_MODULE_TYPE_ENTRY(40G_BASE_CR4, 40GBASE-CR4) SFF_MODULE_TYPE_ENTRY(40G_BASE_SR4, 40GBASE-SR4) SFF_MODULE_TYPE_ENTRY(40G_BASE_LR4, 40GBASE-LR4) @@ -86,6 +89,7 @@ SFF_MODULE_TYPE_ENTRY(10G_BASE_ZR, 10GBASE-ZR) SFF_MODULE_TYPE_ENTRY(10G_BASE_SRL, 10GBASE-SRL) SFF_MODULE_TYPE_ENTRY(1G_BASE_SX, 1GBASE-SX) SFF_MODULE_TYPE_ENTRY(1G_BASE_LX, 1GBASE-LX) +SFF_MODULE_TYPE_ENTRY(1G_BASE_ZX, 1GBASE-ZX) SFF_MODULE_TYPE_ENTRY(1G_BASE_CX, 1GBASE-CX) SFF_MODULE_TYPE_ENTRY(1G_BASE_T, 1GBASE-T) SFF_MODULE_TYPE_ENTRY(100_BASE_LX, 100BASE-LX) diff --git a/packages/base/any/onlp/src/sff/module/python/onlp/sff/enums.py b/packages/base/any/onlp/src/sff/module/python/onlp/sff/enums.py index 31ddcda5..bba20c17 100644 --- a/packages/base/any/onlp/src/sff/module/python/onlp/sff/enums.py +++ b/packages/base/any/onlp/src/sff/module/python/onlp/sff/enums.py @@ -33,35 +33,37 @@ class SFF_MODULE_TYPE(Enumeration): _100G_BASE_LR4 = 3 _100G_CWDM4 = 4 _100G_PSM4 = 5 - _40G_BASE_CR4 = 6 - _40G_BASE_SR4 = 7 - _40G_BASE_LR4 = 8 - _40G_BASE_LM4 = 9 - _40G_BASE_ACTIVE = 10 - _40G_BASE_CR = 11 - _40G_BASE_SR2 = 12 - _40G_BASE_SM4 = 13 - _40G_BASE_ER4 = 14 - _25G_BASE_CR = 15 - _25G_BASE_SR = 16 - _25G_BASE_LR = 17 - _25G_BASE_AOC = 18 - _10G_BASE_SR = 19 - _10G_BASE_LR = 20 - _10G_BASE_LRM = 21 - _10G_BASE_ER = 22 - _10G_BASE_CR = 23 - _10G_BASE_SX = 24 - _10G_BASE_LX = 25 - _10G_BASE_ZR = 26 - _10G_BASE_SRL = 27 - _1G_BASE_SX = 28 - _1G_BASE_LX = 29 - _1G_BASE_CX = 30 - _1G_BASE_T = 31 - _100_BASE_LX = 32 - _100_BASE_FX = 33 - _4X_MUX = 34 + _100G_SWDM4 = 6 + _40G_BASE_CR4 = 7 + _40G_BASE_SR4 = 8 + _40G_BASE_LR4 = 9 + _40G_BASE_LM4 = 10 + _40G_BASE_ACTIVE = 11 + _40G_BASE_CR = 12 + _40G_BASE_SR2 = 13 + _40G_BASE_SM4 = 14 + _40G_BASE_ER4 = 15 + _25G_BASE_CR = 16 + _25G_BASE_SR = 17 + _25G_BASE_LR = 18 + _25G_BASE_AOC = 19 + _10G_BASE_SR = 20 + _10G_BASE_LR = 21 + _10G_BASE_LRM = 22 + _10G_BASE_ER = 23 + _10G_BASE_CR = 24 + _10G_BASE_SX = 25 + _10G_BASE_LX = 26 + _10G_BASE_ZR = 27 + _10G_BASE_SRL = 28 + _1G_BASE_SX = 29 + _1G_BASE_LX = 30 + _1G_BASE_ZX = 31 + _1G_BASE_CX = 32 + _1G_BASE_T = 33 + _100_BASE_LX = 34 + _100_BASE_FX = 35 + _4X_MUX = 36 class SFF_SFP_TYPE(Enumeration): diff --git a/packages/base/any/onlp/src/sff/module/src/sff.c b/packages/base/any/onlp/src/sff/module/src/sff.c index b21ace8f..ab437441 100644 --- a/packages/base/any/onlp/src/sff/module/src/sff.c +++ b/packages/base/any/onlp/src/sff/module/src/sff.c @@ -82,6 +82,11 @@ sff_module_type_get(const uint8_t* eeprom) && SFF8636_MEDIA_100GE_PSM4(eeprom)) return SFF_MODULE_TYPE_100G_PSM4; + if (SFF8636_MODULE_QSFP28(eeprom) + && SFF8636_MEDIA_EXTENDED(eeprom) + && SFF8636_MEDIA_100GE_SWDM4(eeprom)) + return SFF_MODULE_TYPE_100G_SWDM4; + if (SFF8436_MODULE_QSFP_PLUS_V2(eeprom) && SFF8436_MEDIA_40GE_CR4(eeprom)) return SFF_MODULE_TYPE_40G_BASE_CR4; @@ -208,8 +213,13 @@ sff_module_type_get(const uint8_t* eeprom) return SFF_MODULE_TYPE_1G_BASE_SX; if (SFF8472_MODULE_SFP(eeprom) - && SFF8472_MEDIA_GBE_LX(eeprom)) - return SFF_MODULE_TYPE_1G_BASE_LX; + && SFF8472_MEDIA_GBE_LX(eeprom)) { + if (SFF8472_MEDIA_LENGTH_ZX(eeprom)) { + return SFF_MODULE_TYPE_1G_BASE_ZX; + } else { + return SFF_MODULE_TYPE_1G_BASE_LX; + } + } if (SFF8472_MODULE_SFP(eeprom) && SFF8472_MEDIA_GBE_CX(eeprom)) @@ -219,10 +229,6 @@ sff_module_type_get(const uint8_t* eeprom) && SFF8472_MEDIA_GBE_T(eeprom)) return SFF_MODULE_TYPE_1G_BASE_T; - if (SFF8472_MODULE_SFP(eeprom) - && SFF8472_MEDIA_GBE_LX(eeprom)) - return SFF_MODULE_TYPE_1G_BASE_LX; - if (SFF8472_MODULE_SFP(eeprom) && SFF8472_MEDIA_CBE_LX(eeprom)) return SFF_MODULE_TYPE_100_BASE_LX; @@ -263,6 +269,7 @@ sff_media_type_get(sff_module_type_t mt) case SFF_MODULE_TYPE_100G_BASE_LR4: case SFF_MODULE_TYPE_100G_CWDM4: case SFF_MODULE_TYPE_100G_PSM4: + case SFF_MODULE_TYPE_100G_SWDM4: case SFF_MODULE_TYPE_40G_BASE_SR4: case SFF_MODULE_TYPE_40G_BASE_LR4: case SFF_MODULE_TYPE_40G_BASE_LM4: @@ -283,6 +290,7 @@ sff_media_type_get(sff_module_type_t mt) case SFF_MODULE_TYPE_10G_BASE_SRL: case SFF_MODULE_TYPE_1G_BASE_SX: case SFF_MODULE_TYPE_1G_BASE_LX: + case SFF_MODULE_TYPE_1G_BASE_ZX: case SFF_MODULE_TYPE_100_BASE_LX: case SFF_MODULE_TYPE_100_BASE_FX: case SFF_MODULE_TYPE_4X_MUX: @@ -312,6 +320,7 @@ sff_module_caps_get(sff_module_type_t mt, uint32_t *caps) case SFF_MODULE_TYPE_100G_BASE_CR4: case SFF_MODULE_TYPE_100G_CWDM4: case SFF_MODULE_TYPE_100G_PSM4: + case SFF_MODULE_TYPE_100G_SWDM4: *caps |= SFF_MODULE_CAPS_F_100G; return 0; @@ -347,6 +356,7 @@ sff_module_caps_get(sff_module_type_t mt, uint32_t *caps) case SFF_MODULE_TYPE_1G_BASE_SX: case SFF_MODULE_TYPE_1G_BASE_LX: + case SFF_MODULE_TYPE_1G_BASE_ZX: case SFF_MODULE_TYPE_1G_BASE_CX: case SFF_MODULE_TYPE_1G_BASE_T: *caps |= SFF_MODULE_CAPS_F_1G; @@ -664,7 +674,7 @@ sff_eeprom_parse_nonstandard__(sff_eeprom_t* se, uint8_t* eeprom) } if (strncmp(se->info.vendor, "Amphenol", 8) == 0 && - strncmp(se->info.model, "625960001", 9) == 0 && + (strncmp(se->info.model, "625960001", 9) == 0 || strncmp(se->info.model, "659900001", 9) == 0) && (se->eeprom[240] == 0x0f) && (se->eeprom[241] == 0x10) && ((se->eeprom[243] & 0xF0) == 0xE0)) { @@ -716,6 +726,7 @@ sff_info_init(sff_info_t* info, sff_module_type_t mt, case SFF_MODULE_TYPE_100G_BASE_LR4: case SFF_MODULE_TYPE_100G_CWDM4: case SFF_MODULE_TYPE_100G_PSM4: + case SFF_MODULE_TYPE_100G_SWDM4: info->sfp_type = SFF_SFP_TYPE_QSFP28; info->media_type = SFF_MEDIA_TYPE_FIBER; info->caps = SFF_MODULE_CAPS_F_100G; @@ -782,6 +793,7 @@ sff_info_init(sff_info_t* info, sff_module_type_t mt, case SFF_MODULE_TYPE_1G_BASE_SX: case SFF_MODULE_TYPE_1G_BASE_LX: + case SFF_MODULE_TYPE_1G_BASE_ZX: info->sfp_type = SFF_SFP_TYPE_SFP; info->media_type = SFF_MEDIA_TYPE_FIBER; info->caps = SFF_MODULE_CAPS_F_1G; diff --git a/packages/base/any/onlp/src/sff/module/src/sff_db.c b/packages/base/any/onlp/src/sff/module/src/sff_db.c index 15f69fbf..279e8ef4 100644 --- a/packages/base/any/onlp/src/sff/module/src/sff_db.c +++ b/packages/base/any/onlp/src/sff/module/src/sff_db.c @@ -72,6 +72,8 @@ #define SFF_100G_PSM4_PROPERTIES \ SFF_SFP_TYPE_QSFP28, "QSFP28", SFF_MODULE_TYPE_100G_PSM4, "100G-PSM4", SFF_MEDIA_TYPE_FIBER, "Fiber", SFF_MODULE_CAPS_F_100G +#define SFF_100G_SWDM4_PROPERTIES \ + SFF_SFP_TYPE_QSFP28, "QSFP28", SFF_MODULE_TYPE_100G_SWDM4, "100G-SWDM4", SFF_MEDIA_TYPE_FIBER, "Fiber", SFF_MODULE_CAPS_F_100G static sff_db_entry_t sff_database__[] = { diff --git a/packages/base/any/onlp/src/sff/module/src/sff_enums.c b/packages/base/any/onlp/src/sff/module/src/sff_enums.c index d499582c..deeeb50d 100644 --- a/packages/base/any/onlp/src/sff/module/src/sff_enums.c +++ b/packages/base/any/onlp/src/sff/module/src/sff_enums.c @@ -137,6 +137,7 @@ aim_map_si_t sff_module_type_map[] = { "100G_BASE_LR4", SFF_MODULE_TYPE_100G_BASE_LR4 }, { "100G_CWDM4", SFF_MODULE_TYPE_100G_CWDM4 }, { "100G_PSM4", SFF_MODULE_TYPE_100G_PSM4 }, + { "100G_SWDM4", SFF_MODULE_TYPE_100G_SWDM4 }, { "40G_BASE_CR4", SFF_MODULE_TYPE_40G_BASE_CR4 }, { "40G_BASE_SR4", SFF_MODULE_TYPE_40G_BASE_SR4 }, { "40G_BASE_LR4", SFF_MODULE_TYPE_40G_BASE_LR4 }, @@ -161,6 +162,7 @@ aim_map_si_t sff_module_type_map[] = { "10G_BASE_SRL", SFF_MODULE_TYPE_10G_BASE_SRL }, { "1G_BASE_SX", SFF_MODULE_TYPE_1G_BASE_SX }, { "1G_BASE_LX", SFF_MODULE_TYPE_1G_BASE_LX }, + { "1G_BASE_ZX", SFF_MODULE_TYPE_1G_BASE_ZX }, { "1G_BASE_CX", SFF_MODULE_TYPE_1G_BASE_CX }, { "1G_BASE_T", SFF_MODULE_TYPE_1G_BASE_T }, { "100_BASE_LX", SFF_MODULE_TYPE_100_BASE_LX }, @@ -177,6 +179,7 @@ aim_map_si_t sff_module_type_desc_map[] = { "100GBASE-LR4", SFF_MODULE_TYPE_100G_BASE_LR4 }, { "100G-CWDM4", SFF_MODULE_TYPE_100G_CWDM4 }, { "100G-PSM4", SFF_MODULE_TYPE_100G_PSM4 }, + { "100G-SWDM4", SFF_MODULE_TYPE_100G_SWDM4 }, { "40GBASE-CR4", SFF_MODULE_TYPE_40G_BASE_CR4 }, { "40GBASE-SR4", SFF_MODULE_TYPE_40G_BASE_SR4 }, { "40GBASE-LR4", SFF_MODULE_TYPE_40G_BASE_LR4 }, @@ -201,6 +204,7 @@ aim_map_si_t sff_module_type_desc_map[] = { "10GBASE-SRL", SFF_MODULE_TYPE_10G_BASE_SRL }, { "1GBASE-SX", SFF_MODULE_TYPE_1G_BASE_SX }, { "1GBASE-LX", SFF_MODULE_TYPE_1G_BASE_LX }, + { "1GBASE-ZX", SFF_MODULE_TYPE_1G_BASE_ZX }, { "1GBASE-CX", SFF_MODULE_TYPE_1G_BASE_CX }, { "1GBASE-T", SFF_MODULE_TYPE_1G_BASE_T }, { "100BASE-LX", SFF_MODULE_TYPE_100_BASE_LX }, diff --git a/packages/base/armel/kernels/kernel-4.4-lts-arm-iproc-all/Makefile b/packages/base/armel/kernels/kernel-4.4-lts-arm-iproc-all/Makefile new file mode 100644 index 00000000..003238cf --- /dev/null +++ b/packages/base/armel/kernels/kernel-4.4-lts-arm-iproc-all/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk \ No newline at end of file diff --git a/packages/base/armel/kernels/kernel-4.4-lts-arm-iproc-all/PKG.yml b/packages/base/armel/kernels/kernel-4.4-lts-arm-iproc-all/PKG.yml new file mode 100644 index 00000000..1d1f5758 --- /dev/null +++ b/packages/base/armel/kernels/kernel-4.4-lts-arm-iproc-all/PKG.yml @@ -0,0 +1,18 @@ + +common: + arch: armel + version: 1.0.0 + copyright: Copyright 2013, 2014, 2015 Big Switch Networks + maintainer: support@bigswitch.com + support: opennetworklinux@googlegroups.com + +packages: + - name: onl-kernel-4.4-lts-arm-iproc-all + version: 1.0.0 + summary: Open Network Linux Kernel 4.4-LTS for ARM Integrated Processor Platforms. + + files: + builds/kernel-4.4-lts-arm-iproc-all.bin.gz : $$PKG_INSTALL/ + builds/linux-4.4.*-mbuild : $$PKG_INSTALL/mbuilds + + changelog: Change changes changes., diff --git a/packages/base/armel/kernels/kernel-4.4-lts-arm-iproc-all/builds/.gitignore b/packages/base/armel/kernels/kernel-4.4-lts-arm-iproc-all/builds/.gitignore new file mode 100644 index 00000000..9a71a737 --- /dev/null +++ b/packages/base/armel/kernels/kernel-4.4-lts-arm-iproc-all/builds/.gitignore @@ -0,0 +1,3 @@ +linux-* +kernel-* +lib/ diff --git a/packages/base/armel/kernels/kernel-4.4-lts-arm-iproc-all/builds/Makefile b/packages/base/armel/kernels/kernel-4.4-lts-arm-iproc-all/builds/Makefile new file mode 100644 index 00000000..642762a8 --- /dev/null +++ b/packages/base/armel/kernels/kernel-4.4-lts-arm-iproc-all/builds/Makefile @@ -0,0 +1,21 @@ +# -*- Makefile -*- +############################################################ +# +# +# Copyright 2013, 2014 BigSwitch Networks, Inc. +# +# +# +# +############################################################ +THIS_DIR := $(abspath $(dir $(lastword $(MAKEFILE_LIST)))) + +include $(ONL)/make/config.mk + +#export MODSYNCLIST_EXTRA := arch/arm/plat-iproc/include + +kernel: + $(MAKE) -C $(ONL)/packages/base/any/kernels/4.4-lts/configs/arm-iproc-all K_TARGET_DIR=$(THIS_DIR) $(ONL_MAKE_PARALLEL) + +clean: + rm -rf kernel-* linux-4.4.* diff --git a/packages/platforms/accton/x86-64/x86-64-accton-wedge100bf-32x/onlp/builds/src/module/src/ledi.c b/packages/platforms/accton/x86-64/x86-64-accton-wedge100bf-32x/onlp/builds/src/module/src/ledi.c index c04342a2..1442f9a8 100644 --- a/packages/platforms/accton/x86-64/x86-64-accton-wedge100bf-32x/onlp/builds/src/module/src/ledi.c +++ b/packages/platforms/accton/x86-64/x86-64-accton-wedge100bf-32x/onlp/builds/src/module/src/ledi.c @@ -67,12 +67,12 @@ static led_mode_info_t led_mode_info[] = { {ONLP_LED_MODE_OFF, 0x0}, {ONLP_LED_MODE_OFF, 0x8}, - {ONLP_LED_MODE_RED, 0x1}, - {ONLP_LED_MODE_RED_BLINKING, 0x9}, + {ONLP_LED_MODE_BLUE, 0x1}, + {ONLP_LED_MODE_BLUE_BLINKING, 0x9}, {ONLP_LED_MODE_GREEN, 0x2}, {ONLP_LED_MODE_GREEN_BLINKING, 0xa}, - {ONLP_LED_MODE_BLUE, 0x4}, - {ONLP_LED_MODE_BLUE_BLINKING, 0xc}, + {ONLP_LED_MODE_RED, 0x4}, + {ONLP_LED_MODE_RED_BLINKING, 0xc}, }; /* diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/.gitignore b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/.gitignore new file mode 100644 index 00000000..dcc1a780 --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/.gitignore @@ -0,0 +1,2 @@ +*x86*64*delta*ag8032*.mk +onlpdump.mk diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/Makefile b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/Makefile new file mode 100644 index 00000000..003238cf --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk \ No newline at end of file diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/modules/Makefile b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/modules/Makefile new file mode 100644 index 00000000..003238cf --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/modules/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk \ No newline at end of file diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/modules/PKG.yml b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/modules/PKG.yml new file mode 100644 index 00000000..90eeedcf --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/modules/PKG.yml @@ -0,0 +1 @@ +!include $ONL_TEMPLATES/platform-modules.yml VENDOR=delta BASENAME=x86-64-delta-ag8032 ARCH=amd64 KERNELS="onl-kernel-4.9-lts-x86-64-all:amd64" diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/modules/builds/.gitignore b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/modules/builds/.gitignore new file mode 100644 index 00000000..a65b4177 --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/modules/builds/.gitignore @@ -0,0 +1 @@ +lib diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/modules/builds/Makefile b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/modules/builds/Makefile new file mode 100644 index 00000000..89f852cb --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/modules/builds/Makefile @@ -0,0 +1,6 @@ +KERNELS := onl-kernel-4.9-lts-x86-64-all:amd64 +KMODULES := $(wildcard *.c) +VENDOR := delta +BASENAME := x86-64-delta-ag8032 +ARCH := x86_64 +include $(ONL)/make/kmodule.mk diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/modules/builds/x86-64-delta-ag8032-i2c-mux-cpld.c b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/modules/builds/x86-64-delta-ag8032-i2c-mux-cpld.c new file mode 100644 index 00000000..a0ad8bd4 --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/modules/builds/x86-64-delta-ag8032-i2c-mux-cpld.c @@ -0,0 +1,207 @@ + +/* + * + * Copyright (C) 2017 Delta Networks, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#if 0 +#include "x86-64-delta-ag8032-i2c-mux-cpld.h" +#else +struct i2c_mux_cpld_platform_data +{ + u8 cpld_bus; + u8 cpld_addr; + u8 cpld_reg; + + u8 parent_bus; + + u8 base_nr; + + const u8 *values; + int n_values; + bool idle_in_use; + u8 idle; + + void *ctrl_adap; +}; + + +#endif + +//static DEFINE_MUTEX(locker); + +struct cpldmux { + struct i2c_mux_cpld_platform_data data; +}; + +static int i2c_mux_cpld_set(const struct cpldmux *mux, unsigned int chan_id) +{ + unsigned long orig_jiffies; + unsigned short flags; + union i2c_smbus_data data; + struct i2c_adapter *ctrl_adap; + int try; + s32 res = -EIO; + + data.byte = chan_id; + flags = 0; + + ctrl_adap = mux->data.ctrl_adap; + if (!ctrl_adap) + return res; + + // try to lock it + if (ctrl_adap->algo->smbus_xfer) { + /* Retry automatically on arbitration loss */ + orig_jiffies = jiffies; + for (res = 0, try = 0; try <= ctrl_adap->retries; try++) { + + // modify the register + res = ctrl_adap->algo->smbus_xfer(ctrl_adap, + mux->data.cpld_addr, flags, + I2C_SMBUS_WRITE, mux->data.cpld_reg, + I2C_SMBUS_BYTE_DATA, &data); + if (res && res != -EAGAIN) + break; + if (time_after(jiffies, + orig_jiffies + ctrl_adap->timeout)) + break; + } + } + + return 0; +} + +static int i2c_mux_cpld_select (struct i2c_mux_core *muxc, u32 chan) +{ + struct cpldmux *mux = i2c_mux_priv(muxc); + + return i2c_mux_cpld_set(mux, chan); +} + +static int i2c_mux_cpld_deselect (struct i2c_mux_core *muxc, u32 chan) +{ + struct cpldmux *mux = i2c_mux_priv(muxc); + + if (mux->data.idle_in_use) + return i2c_mux_cpld_set(mux, mux->data.idle); + + return 0; +} + +static int i2c_mux_cpld_probe(struct platform_device *pdev) +{ + struct i2c_mux_core *muxc; + struct cpldmux *mux; + struct i2c_adapter *parent; + struct i2c_adapter *ctrl; + int i, ret, nr; + + mux = devm_kzalloc(&pdev->dev, sizeof(*mux), GFP_KERNEL); + if (!mux) + return -ENOMEM; + + ctrl = NULL; + parent = NULL; + if (dev_get_platdata(&pdev->dev)) { + memcpy(&mux->data, dev_get_platdata(&pdev->dev), + sizeof(mux->data)); + + parent = i2c_get_adapter(mux->data.parent_bus); + if (!parent) + return -EPROBE_DEFER; + ctrl = i2c_get_adapter(mux->data.cpld_bus); + if (!ctrl) { + i2c_put_adapter(parent); + return -EPROBE_DEFER; + } + + } + + muxc = i2c_mux_alloc (parent, &pdev->dev, + mux->data.n_values, + 0, 0, + i2c_mux_cpld_select, + i2c_mux_cpld_deselect); + + if (!muxc) + return -ENOMEM; + muxc->priv = mux; + mux->data.ctrl_adap = ctrl; + + platform_set_drvdata(pdev, muxc); + + for (i = 0; i < mux->data.n_values; i++) { + nr = mux->data.base_nr ? (mux->data.base_nr + i) : 0; + + ret = i2c_mux_add_adapter (muxc, nr, mux->data.values[i], 0); + if (ret) { + dev_err(&pdev->dev, "Failed to add adapter %d\n", i); + goto add_adapter_failed; + } + } + + dev_dbg(&pdev->dev, "%d port mux on %s adapter\n", + mux->data.n_values, parent->name); + + return 0; + +add_adapter_failed: + + i2c_put_adapter(ctrl); + i2c_put_adapter(parent); + i2c_mux_del_adapters(muxc); + + return ret; +} + +static int i2c_mux_cpld_remove(struct platform_device *pdev) +{ + struct i2c_mux_core *muxc = platform_get_drvdata(pdev); + struct cpldmux *mux = i2c_mux_priv(muxc); + + i2c_mux_del_adapters(muxc); + i2c_put_adapter(mux->data.ctrl_adap); + i2c_put_adapter(muxc->parent); + + return 0; +} + +static struct platform_driver i2c_mux_cpld_driver = { + .probe = i2c_mux_cpld_probe, + .remove = i2c_mux_cpld_remove, + .driver = { + .name = "i2c-mux-cpld", + }, +}; + +module_platform_driver(i2c_mux_cpld_driver); + +MODULE_AUTHOR("Dave Hu "); +MODULE_DESCRIPTION("I2C CPLD mux driver"); +MODULE_LICENSE("GPL"); +MODULE_ALIAS("platform:i2c-mux-cpld"); + diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/modules/builds/x86-64-delta-ag8032-i2c-mux-cpld.h b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/modules/builds/x86-64-delta-ag8032-i2c-mux-cpld.h new file mode 100644 index 00000000..acda4089 --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/modules/builds/x86-64-delta-ag8032-i2c-mux-cpld.h @@ -0,0 +1,43 @@ + +/* + * + * Copyright (C) 2017 Delta Networks, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef __DNI_I2C_MUX_CPLD_H__ +#define __DNI_I2C_MUX_CPLD_H__ + +struct i2c_mux_cpld_platform_data +{ + u8 cpld_bus; + u8 cpld_addr; + u8 cpld_reg; + + u8 parent_bus; + + u8 base_nr; + + const u8 *values; + int n_values; + bool idle_in_use; + u8 idle; + + void *ctrl_adap; +}; + +#endif // __DNI_I2C_MUX_CPLD_H__ + diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/modules/builds/x86-64-delta-ag8032-i2c-mux-setting.c b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/modules/builds/x86-64-delta-ag8032-i2c-mux-setting.c new file mode 100644 index 00000000..38f1a382 --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/modules/builds/x86-64-delta-ag8032-i2c-mux-setting.c @@ -0,0 +1,177 @@ + +/* + * + * Copyright (C) 2017 Delta Networks, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include + +#if 0 +#include "x86-64-delta-ag8032-i2c-mux-cpld.h" +#else +struct i2c_mux_cpld_platform_data +{ + u8 cpld_bus; + u8 cpld_addr; + u8 cpld_reg; + + u8 parent_bus; + + u8 base_nr; + + const u8 *values; + int n_values; + bool idle_in_use; + u8 idle; + + void *ctrl_adap; +}; +#endif + +static const u8 subbus_mux_values[] = { + 0xe0, 0xe1, 0xe2, 0xe3 +}; +static struct i2c_mux_cpld_platform_data subbus_mux = { + .cpld_bus = 0, + .cpld_addr= 0x2e, + .cpld_reg = 0x15, + + .parent_bus = 1, + + .values = subbus_mux_values, + .n_values = 4, +}; +static const u8 qsfp_mux_values_1_8[] = { + 0xfe, 0xfd, 0xfb, 0xf7, 0xef, 0xdf, 0xbf, 0x7f +}; +static struct i2c_mux_cpld_platform_data qsfp_mux_1_8 = { + .cpld_bus = 3, + .cpld_addr= 0x30, + .cpld_reg = 0x05, + + .parent_bus = 3, + + .values = qsfp_mux_values_1_8, + .n_values = 8, + .idle_in_use = 1, + .idle = 0xff, +}; +static const u8 qsfp_mux_values_9_16[] = { + 0xfe, 0xfd, 0xfb, 0xf7, 0xef, 0xdf, 0xbf, 0x7f +}; +static struct i2c_mux_cpld_platform_data qsfp_mux_9_16 = { + .cpld_bus = 3, + .cpld_addr= 0x30, + .cpld_reg = 0x04, + + .parent_bus = 3, + + .values = qsfp_mux_values_9_16, + .n_values = 8, + .idle_in_use = 1, + .idle = 0xff, + +}; + +static const u8 qsfp_mux_values_17_24[] = { + 0xfe, 0xfd, 0xfb, 0xf7, 0xef, 0xdf, 0xbf, 0x7f +}; +static struct i2c_mux_cpld_platform_data qsfp_mux_17_24 = { + .cpld_bus = 3, + .cpld_addr= 0x30, + .cpld_reg = 0x03, + + .parent_bus = 3, + + .values = qsfp_mux_values_17_24, + .n_values = 8, + .idle_in_use = 1, + .idle = 0xff, + +}; + +static const u8 qsfp_mux_values_25_32[] = { + 0xfe, 0xfd, 0xfb, 0xf7, 0xef, 0xdf, 0xbf, 0x7f +}; +static struct i2c_mux_cpld_platform_data qsfp_mux_25_32 = { + .cpld_bus = 3, + .cpld_addr= 0x30, + .cpld_reg = 0x02, + + .parent_bus = 3, + + .values = qsfp_mux_values_25_32, + .n_values = 8, + .idle_in_use = 1, + .idle = 0xff, +}; + + +static int add_ag8032_platform_cpld_mux_devices(void) +{ + struct platform_device *pdev; + + pdev = platform_device_register_simple("i2c-mux-cpld", 1, NULL, 0); + if (IS_ERR(pdev)) + return PTR_ERR(pdev); + platform_device_add_data (pdev, &subbus_mux, sizeof(subbus_mux)); + + pdev = platform_device_register_simple("i2c-mux-cpld", 2, NULL, 0); + if (IS_ERR(pdev)) + return PTR_ERR(pdev); + platform_device_add_data (pdev, &qsfp_mux_1_8, sizeof(qsfp_mux_1_8)); + + pdev = platform_device_register_simple("i2c-mux-cpld", 3, NULL, 0); + if (IS_ERR(pdev)) + return PTR_ERR(pdev); + platform_device_add_data (pdev, &qsfp_mux_9_16, sizeof(qsfp_mux_9_16)); + + pdev = platform_device_register_simple("i2c-mux-cpld", 4, NULL, 0); + if (IS_ERR(pdev)) + return PTR_ERR(pdev); + platform_device_add_data (pdev, &qsfp_mux_17_24, sizeof(qsfp_mux_17_24)); + + pdev = platform_device_register_simple("i2c-mux-cpld", 5, NULL, 0); + if (IS_ERR(pdev)) + return PTR_ERR(pdev); + platform_device_add_data (pdev, &qsfp_mux_25_32, sizeof(qsfp_mux_25_32)); + + return 0; +} + +static int __init ag8032_platform_init(void) +{ + add_ag8032_platform_cpld_mux_devices (); + + return 0; +} + +static void __exit ag8032_platform_exit(void) +{ +} + +MODULE_AUTHOR("Dave Hu "); +MODULE_DESCRIPTION("Delta AG8032"); +MODULE_LICENSE("GPL"); + +module_init(ag8032_platform_init); +module_exit(ag8032_platform_exit); + + diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/Makefile b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/Makefile new file mode 100644 index 00000000..003238cf --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk \ No newline at end of file diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/PKG.yml b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/PKG.yml new file mode 100644 index 00000000..11834e3e --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/PKG.yml @@ -0,0 +1 @@ +!include $ONL_TEMPLATES/onlp-platform-any.yml PLATFORM=x86-64-delta-ag8032 ARCH=amd64 TOOLCHAIN=x86_64-linux-gnu diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/Makefile b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/Makefile new file mode 100644 index 00000000..e7437cb2 --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/Makefile @@ -0,0 +1,2 @@ +FILTER=src +include $(ONL)/make/subdirs.mk diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/lib/Makefile b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/lib/Makefile new file mode 100644 index 00000000..0657b68b --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/lib/Makefile @@ -0,0 +1,45 @@ +############################################################ +# +# +# Copyright 2014 BigSwitch Networks, Inc. +# +# Licensed under the Eclipse Public License, Version 1.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.eclipse.org/legal/epl-v10.html +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +# either express or implied. See the License for the specific +# language governing permissions and limitations under the +# License. +# +# +############################################################ +# +# +############################################################ +include $(ONL)/make/config.amd64.mk + +MODULE := libonlp-x86-64-delta-ag8032 +include $(BUILDER)/standardinit.mk + +DEPENDMODULES := AIM IOF x86_64_delta_ag8032 onlplib +DEPENDMODULE_HEADERS := sff + +include $(BUILDER)/dependmodules.mk + +SHAREDLIB := libonlp-x86-64-delta-ag8032.so +$(SHAREDLIB)_TARGETS := $(ALL_TARGETS) +include $(BUILDER)/so.mk +.DEFAULT_GOAL := $(SHAREDLIB) + +GLOBAL_CFLAGS += -I$(onlp_BASEDIR)/module/inc +GLOBAL_CFLAGS += -DAIM_CONFIG_INCLUDE_MODULES_INIT=1 +GLOBAL_CFLAGS += -fPIC +GLOBAL_LINK_LIBS += -lpthread + +include $(BUILDER)/targets.mk + diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/onlpdump/Makefile b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/onlpdump/Makefile new file mode 100644 index 00000000..833ae998 --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/onlpdump/Makefile @@ -0,0 +1,46 @@ +############################################################ +# +# +# Copyright 2014 BigSwitch Networks, Inc. +# +# Licensed under the Eclipse Public License, Version 1.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.eclipse.org/legal/epl-v10.html +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +# either express or implied. See the License for the specific +# language governing permissions and limitations under the +# License. +# +# +############################################################ +# +# +# +############################################################ +include $(ONL)/make/config.amd64.mk + +.DEFAULT_GOAL := onlpdump + +MODULE := onlpdump +include $(BUILDER)/standardinit.mk + +DEPENDMODULES := AIM IOF onlp x86_64_delta_ag8032 onlplib onlp_platform_defaults sff cjson cjson_util timer_wheel OS + +include $(BUILDER)/dependmodules.mk + +BINARY := onlpdump +$(BINARY)_LIBRARIES := $(LIBRARY_TARGETS) +include $(BUILDER)/bin.mk + +GLOBAL_CFLAGS += -DAIM_CONFIG_AIM_MAIN_FUNCTION=onlpdump_main +GLOBAL_CFLAGS += -DAIM_CONFIG_INCLUDE_MODULES_INIT=1 +GLOBAL_CFLAGS += -DAIM_CONFIG_INCLUDE_MAIN=1 +GLOBAL_LINK_LIBS += -lpthread -lm + +include $(BUILDER)/targets.mk + diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/.module b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/.module new file mode 100644 index 00000000..f6df00c3 --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/.module @@ -0,0 +1 @@ +name: x86_64_delta_ag8032 diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/Makefile b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/Makefile new file mode 100644 index 00000000..a3469613 --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/Makefile @@ -0,0 +1,9 @@ +############################################################################### +# +# +# +############################################################################### +include ../../init.mk +MODULE := x86_64_delta_ag8032 +AUTOMODULE := x86_64_delta_ag8032 +include $(BUILDER)/definemodule.mk diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/README b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/README new file mode 100644 index 00000000..178070bb --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/README @@ -0,0 +1,6 @@ +############################################################################### +# +# x86_64_delta_ag8032 README +# +############################################################################### + diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/auto/make.mk b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/auto/make.mk new file mode 100644 index 00000000..32eab713 --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/auto/make.mk @@ -0,0 +1,9 @@ +############################################################################### +# +# x86_64_delta_ag8032 Autogeneration +# +############################################################################### +x86_64_delta_ag8032_AUTO_DEFS := module/auto/x86_64_delta_ag8032.yml +x86_64_delta_ag8032_AUTO_DIRS := module/inc/x86_64_delta_ag8032 module/src +include $(BUILDER)/auto.mk + diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/auto/x86_64_delta_ag8032.yml b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/auto/x86_64_delta_ag8032.yml new file mode 100644 index 00000000..1c6ea4f4 --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/auto/x86_64_delta_ag8032.yml @@ -0,0 +1,50 @@ +############################################################################### +# +# x86_64_delta_ag8032 Autogeneration Definitions. +# +############################################################################### + +cdefs: &cdefs +- X86_64_DELTA_AG8032_CONFIG_INCLUDE_LOGGING: + doc: "Include or exclude logging." + default: 1 +- X86_64_DELTA_AG8032_CONFIG_LOG_OPTIONS_DEFAULT: + doc: "Default enabled log options." + default: AIM_LOG_OPTIONS_DEFAULT +- X86_64_DELTA_AG8032_CONFIG_LOG_BITS_DEFAULT: + doc: "Default enabled log bits." + default: AIM_LOG_BITS_DEFAULT +- X86_64_DELTA_AG8032_CONFIG_LOG_CUSTOM_BITS_DEFAULT: + doc: "Default enabled custom log bits." + default: 0 +- X86_64_DELTA_AG8032_CONFIG_PORTING_STDLIB: + doc: "Default all porting macros to use the C standard libraries." + default: 1 +- X86_64_DELTA_AG8032_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS: + doc: "Include standard library headers for stdlib porting macros." + default: X86_64_DELTA_AG8032_CONFIG_PORTING_STDLIB +- X86_64_DELTA_AG8032_CONFIG_INCLUDE_UCLI: + doc: "Include generic uCli support." + default: 0 +- X86_64_DELTA_AG8032_CONFIG_INCLUDE_DEFAULT_FAN_DIRECTION: + doc: "Assume chassis fan direction is the same as the PSU fan direction." + default: 0 + + +definitions: + cdefs: + X86_64_DELTA_AG8032_CONFIG_HEADER: + defs: *cdefs + basename: x86_64_delta_ag8032_config + + portingmacro: + x86_64_delta_ag8032: + macros: + - malloc + - free + - memset + - memcpy + - strncpy + - vsnprintf + - snprintf + - strlen diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/inc/x86_64_delta_ag8032/x86_64_delta_ag8032.x b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/inc/x86_64_delta_ag8032/x86_64_delta_ag8032.x new file mode 100644 index 00000000..f75707cf --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/inc/x86_64_delta_ag8032/x86_64_delta_ag8032.x @@ -0,0 +1,14 @@ +/**************************************************************************//** + * + * + * + *****************************************************************************/ +#include + +/* <--auto.start.xmacro(ALL).define> */ +/* */ + +/* <--auto.start.xenum(ALL).define> */ +/* */ + + diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/inc/x86_64_delta_ag8032/x86_64_delta_ag8032_config.h b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/inc/x86_64_delta_ag8032/x86_64_delta_ag8032_config.h new file mode 100644 index 00000000..100cd6ba --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/inc/x86_64_delta_ag8032/x86_64_delta_ag8032_config.h @@ -0,0 +1,137 @@ +/**************************************************************************//** + * + * @file + * @brief x86_64_delta_ag8032 Configuration Header + * + * @addtogroup x86_64_delta_ag8032-config + * @{ + * + *****************************************************************************/ +#ifndef __X86_64_DELTA_AG8032_CONFIG_H__ +#define __X86_64_DELTA_AG8032_CONFIG_H__ + +#ifdef GLOBAL_INCLUDE_CUSTOM_CONFIG +#include +#endif +#ifdef X86_64_DELTA_AG8032_INCLUDE_CUSTOM_CONFIG +#include +#endif + +/* */ +#include +/** + * X86_64_DELTA_AG8032_CONFIG_INCLUDE_LOGGING + * + * Include or exclude logging. */ + + +#ifndef X86_64_DELTA_AG8032_CONFIG_INCLUDE_LOGGING +#define X86_64_DELTA_AG8032_CONFIG_INCLUDE_LOGGING 1 +#endif + +/** + * X86_64_DELTA_AG8032_CONFIG_LOG_OPTIONS_DEFAULT + * + * Default enabled log options. */ + + +#ifndef X86_64_DELTA_AG8032_CONFIG_LOG_OPTIONS_DEFAULT +#define X86_64_DELTA_AG8032_CONFIG_LOG_OPTIONS_DEFAULT AIM_LOG_OPTIONS_DEFAULT +#endif + +/** + * X86_64_DELTA_AG8032_CONFIG_LOG_BITS_DEFAULT + * + * Default enabled log bits. */ + + +#ifndef X86_64_DELTA_AG8032_CONFIG_LOG_BITS_DEFAULT +#define X86_64_DELTA_AG8032_CONFIG_LOG_BITS_DEFAULT AIM_LOG_BITS_DEFAULT +#endif + +/** + * X86_64_DELTA_AG8032_CONFIG_LOG_CUSTOM_BITS_DEFAULT + * + * Default enabled custom log bits. */ + + +#ifndef X86_64_DELTA_AG8032_CONFIG_LOG_CUSTOM_BITS_DEFAULT +#define X86_64_DELTA_AG8032_CONFIG_LOG_CUSTOM_BITS_DEFAULT 0 +#endif + +/** + * X86_64_DELTA_AG8032_CONFIG_PORTING_STDLIB + * + * Default all porting macros to use the C standard libraries. */ + + +#ifndef X86_64_DELTA_AG8032_CONFIG_PORTING_STDLIB +#define X86_64_DELTA_AG8032_CONFIG_PORTING_STDLIB 1 +#endif + +/** + * X86_64_DELTA_AG8032_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS + * + * Include standard library headers for stdlib porting macros. */ + + +#ifndef X86_64_DELTA_AG8032_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS +#define X86_64_DELTA_AG8032_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS X86_64_DELTA_AG8032_CONFIG_PORTING_STDLIB +#endif + +/** + * X86_64_DELTA_AG8032_CONFIG_INCLUDE_UCLI + * + * Include generic uCli support. */ + + +#ifndef X86_64_DELTA_AG8032_CONFIG_INCLUDE_UCLI +#define X86_64_DELTA_AG8032_CONFIG_INCLUDE_UCLI 0 +#endif + +/** + * X86_64_DELTA_AG8032_CONFIG_INCLUDE_DEFAULT_FAN_DIRECTION + * + * Assume chassis fan direction is the same as the PSU fan direction. */ + + +#ifndef X86_64_DELTA_AG8032_CONFIG_INCLUDE_DEFAULT_FAN_DIRECTION +#define X86_64_DELTA_AG8032_CONFIG_INCLUDE_DEFAULT_FAN_DIRECTION 0 +#endif + + + +/** + * All compile time options can be queried or displayed + */ + +/** Configuration settings structure. */ +typedef struct x86_64_delta_ag8032_config_settings_s { + /** name */ + const char* name; + /** value */ + const char* value; +} x86_64_delta_ag8032_config_settings_t; + +/** Configuration settings table. */ +/** x86_64_delta_ag8032_config_settings table. */ +extern x86_64_delta_ag8032_config_settings_t x86_64_delta_ag8032_config_settings[]; + +/** + * @brief Lookup a configuration setting. + * @param setting The name of the configuration option to lookup. + */ +const char* x86_64_delta_ag8032_config_lookup(const char* setting); + +/** + * @brief Show the compile-time configuration. + * @param pvs The output stream. + */ +int x86_64_delta_ag8032_config_show(struct aim_pvs_s* pvs); + +/* */ + +#include "x86_64_delta_ag8032_porting.h" + +#endif /* __X86_64_DELTA_AG8032_CONFIG_H__ */ +/* @} */ diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/inc/x86_64_delta_ag8032/x86_64_delta_ag8032_dox.h b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/inc/x86_64_delta_ag8032/x86_64_delta_ag8032_dox.h new file mode 100644 index 00000000..da2c1539 --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/inc/x86_64_delta_ag8032/x86_64_delta_ag8032_dox.h @@ -0,0 +1,26 @@ +/**************************************************************************//** + * + * x86_64_delta_ag8032 Doxygen Header + * + *****************************************************************************/ +#ifndef __X86_64_DELTA_AG8032_DOX_H__ +#define __X86_64_DELTA_AG8032_DOX_H__ + +/** + * @defgroup x86_64_delta_ag8032 x86_64_delta_ag8032 - x86_64_delta_ag8032 Description + * + +The documentation overview for this module should go here. + + * + * @{ + * + * @defgroup x86_64_delta_ag8032-x86_64_delta_ag8032 Public Interface + * @defgroup x86_64_delta_ag8032-config Compile Time Configuration + * @defgroup x86_64_delta_ag8032-porting Porting Macros + * + * @} + * + */ + +#endif /* __X86_64_DELTA_AG8032_DOX_H__ */ diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/inc/x86_64_delta_ag8032/x86_64_delta_ag8032_porting.h b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/inc/x86_64_delta_ag8032/x86_64_delta_ag8032_porting.h new file mode 100644 index 00000000..afd2ea88 --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/inc/x86_64_delta_ag8032/x86_64_delta_ag8032_porting.h @@ -0,0 +1,107 @@ +/**************************************************************************//** + * + * @file + * @brief x86_64_delta_ag8032 Porting Macros. + * + * @addtogroup x86_64_delta_ag8032-porting + * @{ + * + *****************************************************************************/ +#ifndef __X86_64_DELTA_AG8032_PORTING_H__ +#define __X86_64_DELTA_AG8032_PORTING_H__ + + +/* */ +#if X86_64_DELTA_AG8032_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS == 1 +#include +#include +#include +#include +#include +#endif + +#ifndef x86_64_delta_ag8032_MALLOC + #if defined(GLOBAL_MALLOC) + #define x86_64_delta_ag8032_MALLOC GLOBAL_MALLOC + #elif X86_64_DELTA_AG8032_CONFIG_PORTING_STDLIB == 1 + #define x86_64_delta_ag8032_MALLOC malloc + #else + #error The macro x86_64_delta_ag8032_MALLOC is required but cannot be defined. + #endif +#endif + +#ifndef x86_64_delta_ag8032_FREE + #if defined(GLOBAL_FREE) + #define x86_64_delta_ag8032_FREE GLOBAL_FREE + #elif X86_64_DELTA_AG8032_CONFIG_PORTING_STDLIB == 1 + #define x86_64_delta_ag8032_FREE free + #else + #error The macro x86_64_delta_ag8032_FREE is required but cannot be defined. + #endif +#endif + +#ifndef x86_64_delta_ag8032_MEMSET + #if defined(GLOBAL_MEMSET) + #define x86_64_delta_ag8032_MEMSET GLOBAL_MEMSET + #elif X86_64_DELTA_AG8032_CONFIG_PORTING_STDLIB == 1 + #define x86_64_delta_ag8032_MEMSET memset + #else + #error The macro x86_64_delta_ag8032_MEMSET is required but cannot be defined. + #endif +#endif + +#ifndef x86_64_delta_ag8032_MEMCPY + #if defined(GLOBAL_MEMCPY) + #define x86_64_delta_ag8032_MEMCPY GLOBAL_MEMCPY + #elif X86_64_DELTA_AG8032_CONFIG_PORTING_STDLIB == 1 + #define x86_64_delta_ag8032_MEMCPY memcpy + #else + #error The macro x86_64_delta_ag8032_MEMCPY is required but cannot be defined. + #endif +#endif + +#ifndef x86_64_delta_ag8032_STRNCPY + #if defined(GLOBAL_STRNCPY) + #define x86_64_delta_ag8032_STRNCPY GLOBAL_STRNCPY + #elif X86_64_DELTA_AG8032_CONFIG_PORTING_STDLIB == 1 + #define x86_64_delta_ag8032_STRNCPY strncpy + #else + #error The macro x86_64_delta_ag8032_STRNCPY is required but cannot be defined. + #endif +#endif + +#ifndef x86_64_delta_ag8032_VSNPRINTF + #if defined(GLOBAL_VSNPRINTF) + #define x86_64_delta_ag8032_VSNPRINTF GLOBAL_VSNPRINTF + #elif X86_64_DELTA_AG8032_CONFIG_PORTING_STDLIB == 1 + #define x86_64_delta_ag8032_VSNPRINTF vsnprintf + #else + #error The macro x86_64_delta_ag8032_VSNPRINTF is required but cannot be defined. + #endif +#endif + +#ifndef x86_64_delta_ag8032_SNPRINTF + #if defined(GLOBAL_SNPRINTF) + #define x86_64_delta_ag8032_SNPRINTF GLOBAL_SNPRINTF + #elif X86_64_DELTA_AG8032_CONFIG_PORTING_STDLIB == 1 + #define x86_64_delta_ag8032_SNPRINTF snprintf + #else + #error The macro x86_64_delta_ag8032_SNPRINTF is required but cannot be defined. + #endif +#endif + +#ifndef x86_64_delta_ag8032_STRLEN + #if defined(GLOBAL_STRLEN) + #define x86_64_delta_ag8032_STRLEN GLOBAL_STRLEN + #elif X86_64_DELTA_AG8032_CONFIG_PORTING_STDLIB == 1 + #define x86_64_delta_ag8032_STRLEN strlen + #else + #error The macro x86_64_delta_ag8032_STRLEN is required but cannot be defined. + #endif +#endif + +/* */ + + +#endif /* __X86_64_DELTA_AG8032_PORTING_H__ */ +/* @} */ diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/make.mk b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/make.mk new file mode 100644 index 00000000..3840b537 --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/make.mk @@ -0,0 +1,10 @@ +############################################################################### +# +# +# +############################################################################### +THIS_DIR := $(dir $(lastword $(MAKEFILE_LIST))) +x86_64_delta_ag8032_INCLUDES := -I $(THIS_DIR)inc +x86_64_delta_ag8032_INTERNAL_INCLUDES := -I $(THIS_DIR)src +x86_64_delta_ag8032_DEPENDMODULE_ENTRIES := init:x86_64_delta_ag8032 ucli:x86_64_delta_ag8032 + diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/Makefile b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/Makefile new file mode 100644 index 00000000..d5e69996 --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/Makefile @@ -0,0 +1,9 @@ +############################################################################### +# +# Local source generation targets. +# +############################################################################### + +ucli: + @../../../../tools/uclihandlers.py x86_64_delta_ag8032_ucli.c + diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/debug.c b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/debug.c new file mode 100644 index 00000000..4a84af85 --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/debug.c @@ -0,0 +1,44 @@ + +#if X86_64_DELTA_AG8032_CONFIG_INCLUDE_DEBUG == 1 + +#include + +static char help__[] = + "Usage: debug [options]\n" + " -c CPLD Versions\n" + " -h Help\n" + ; + +int +x86_64_delta_ag8032_debug_main(int argc, char* argv[]) +{ + int c = 0; + int help = 0; + int rv = 0; + + while( (c = getopt(argc, argv, "ch")) != -1) { + switch(c) + { + case 'c': c = 1; break; + case 'h': help = 1; rv = 0; break; + default: help = 1; rv = 1; break; + } + + } + + if(help || argc == 1) { + printf("%s", help__); + return rv; + } + + if(c) { + printf("Not implemented.\n"); + } + + + return 0; +} + +#endif + + diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/eeprom_drv.c b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/eeprom_drv.c new file mode 100644 index 00000000..cdbee7b4 --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/eeprom_drv.c @@ -0,0 +1,43 @@ + +/************************************************************ + * + * + * Copyright 2014 Big Switch Networks, Inc. + * Copyright 2017 (C) Delta Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ +#include "platform_lib.h" +#include "eeprom_drv.h" + +int eeprom_read (int bus, uint8_t addr, uint8_t offset, uint8_t *buff, int len) +{ + int i,r; + + for (i = 0 ; i < len ; i ++) { + r = onlp_i2c_readb(bus,addr,offset+i,0); + if (r < 0) { + return -1; + } + buff[i] = (uint8_t)r; + } + return 0; +} + diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/eeprom_drv.h b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/eeprom_drv.h new file mode 100644 index 00000000..9fd29b4d --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/eeprom_drv.h @@ -0,0 +1,33 @@ + +/************************************************************ + * + * + * Copyright 2014 Big Switch Networks, Inc. + * Copyright 2017 (C) Delta Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ + +#ifndef __EEPROM_DRV_H__ +#define __EEPROM_DRV_H__ + +int eeprom_read (int bus, uint8_t addr, uint8_t offset, uint8_t *buff, int len); +#endif // __EEPROM_DRV_H__ + diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/eeprom_info.c b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/eeprom_info.c new file mode 100644 index 00000000..c780578f --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/eeprom_info.c @@ -0,0 +1,118 @@ + +/************************************************************ + * + * + * Copyright 2014 Big Switch Networks, Inc. + * Copyright 2017 (C) Delta Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ +#include "eeprom_info.h" +#include + +#define PSU_MODEL_LEN 11 +#define PSU_SERIES_LEN 14 + +/* + function: + search the eeprom with the key + variables: + eeprom: the eeprom data; + key : the searching key string + mode : 1 means model search, 0 means series search + return: + success, return index which points to the finding string + failed, return -1 +*/ + +int eeprom_info_find(char *eeprom, int len, const char *key,int mode) +{ + int index=0; + int found=0; + int key_len=0; + if(!eeprom || !key) + return -1; + + key_len=strlen(key); + + while(index < len-key_len){ + if (!strncmp(&eeprom[index], key, key_len)){ + found=1; + break; + } + index++; + } + if(found){ + /*mode is 1 means the model search and mode is 0 means the series search*/ + if((mode == 1) && (index < len-PSU_MODEL_LEN)) + return index; + else if ((mode == 0) && (index < len-PSU_SERIES_LEN)) + return index; + else + return -1; + } + + return -1; + +} + +int eeprom_info_get(uint8_t *eeprom, int len, char *type, char *v) +{ + const char psu_model_key[]="DPS"; + const char psu_460_series_key[]="DZRD"; + const char psu_550_series_key[]="GVVD"; + int index=0; + char model[PSU_MODEL_LEN+1]={'\0'}; + char * eep=NULL; + if(!eeprom || !type ||!v) + return -1; + eep=(char *)eeprom; + /*fan eeprom is not now*/ + if((strcmp(type, "fan_model")==0) ||(strcmp(type,"fan_series"))==0) + return 0; + /*first get the psu tpye*/ + index = eeprom_info_find(eep,len,psu_model_key,1); + if(index <0) + return -1; + strncpy(model,&eep[index],PSU_MODEL_LEN); + + if((strcmp(type,"psu_model"))==0){ + strncpy(v,model,PSU_MODEL_LEN); + } + else if ((strcmp(type,"psu_series"))==0){ + if(strstr(model,"460")){ + index = eeprom_info_find(eep,len,psu_460_series_key,0); + if(index <0) + return -1; + strncpy(v,&eep[index],PSU_SERIES_LEN); + } + else if(strstr(model,"550")){ + index = eeprom_info_find(eep,len,psu_550_series_key,0); + if(index <0) + return -1; + strncpy(v,&eep[index],PSU_SERIES_LEN); + } + } + else + return -1; + + return 0; +} + diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/eeprom_info.h b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/eeprom_info.h new file mode 100644 index 00000000..8a370f6f --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/eeprom_info.h @@ -0,0 +1,36 @@ + +/************************************************************ + * + * + * Copyright 2014 Big Switch Networks, Inc. + * Copyright 2017 (C) Delta Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ + +#ifndef __EEPROM_INFO__ +#define __EEPROM_INFO__ + +#include "onlp/onlp_config.h" + +extern int eeprom_info_get(uint8_t *eeprom, int len, char *type, char *v); + +#endif // __EEPROM_INFO__ + diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/fani.c b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/fani.c new file mode 100644 index 00000000..ef6ec17e --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/fani.c @@ -0,0 +1,448 @@ +/************************************************************ + * + * + * Copyright 2014 Big Switch Networks, Inc. + * Copyright 2017 (C) Delta Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ + +#include +#include +#include "platform_lib.h" +#include "eeprom_info.h" + +#define AG8032_FAN_TRAY_DEF_CAP \ + (ONLP_FAN_CAPS_F2B) + +static int _fan_tray_present (void *e); +static int _psu_fan_present (void *e); +static int _fan_event_cb (void *e, int ev); + +static cpld_reg_t _fan_tray_present_reg[] = { + [PLAT_LED_ID_5] = CPLD_REG(CPLD_CPUPLD, 0x11, 0, 1), + [PLAT_LED_ID_6] = CPLD_REG(CPLD_CPUPLD, 0x11, 1, 1), + [PLAT_LED_ID_7] = CPLD_REG(CPLD_CPUPLD, 0x11, 2, 1), +}; + + +static plat_fan_t plat_fans[] = { + [PLAT_FAN_ID_1] = { + .name = "FanTray1-1", + .present = _fan_tray_present, + .present_data = &_fan_tray_present_reg[PLAT_LED_ID_5], + .rpm_get_path = "/sys/devices/pci0000:00/0000:00:13.0/i2c-1/i2c-2/2-0029/fan1_input", + .rpm_set_path = "/sys/devices/pci0000:00/0000:00:13.0/i2c-1/i2c-2/2-0029/fan1_target", + .def_rpm = 7000, + .per_get_path = NULL, + .per_set_path = NULL, + .caps = AG8032_FAN_TRAY_DEF_CAP, + + .eeprom_path = "/sys/devices/platform/*/*/2-0051/eeprom", + + .event_callback = _fan_event_cb, + PLAT_FAN_INTERNAL_DEF, + }, + [PLAT_FAN_ID_2] = { + .name = "FanTray1-2", + .present = _fan_tray_present, + .present_data = &_fan_tray_present_reg[PLAT_LED_ID_5], + .rpm_get_path = "/sys/devices/pci0000:00/0000:00:13.0/i2c-1/i2c-2/2-0029/fan2_input", + .rpm_set_path = "/sys/devices/pci0000:00/0000:00:13.0/i2c-1/i2c-2/2-0029/fan2_target", + .def_rpm = 7000, + .per_get_path = NULL, + .per_set_path = NULL, + .caps = AG8032_FAN_TRAY_DEF_CAP, + + .eeprom_path = "/sys/devices/platform/*/*/2-0051/eeprom", + + .event_callback = _fan_event_cb, + PLAT_FAN_INTERNAL_DEF, + }, + [PLAT_FAN_ID_3] = { + .name = "FanTray2-1", + .present = _fan_tray_present, + .present_data = &_fan_tray_present_reg[PLAT_LED_ID_6], + .rpm_get_path = "/sys/devices/pci0000:00/0000:00:13.0/i2c-1/i2c-2/2-0029/fan3_input", + .rpm_set_path = "/sys/devices/pci0000:00/0000:00:13.0/i2c-1/i2c-2/2-0029/fan3_target", + .def_rpm = 7000, + .per_get_path = NULL, + .per_set_path = NULL, + .caps = AG8032_FAN_TRAY_DEF_CAP, + + .eeprom_path = "/sys/devices/platform/*/*/2-0052/eeprom", + + .event_callback = _fan_event_cb, + PLAT_FAN_INTERNAL_DEF, + }, + [PLAT_FAN_ID_4] = { + .name = "FanTray2-2", + .present = _fan_tray_present, + .present_data = &_fan_tray_present_reg[PLAT_LED_ID_6], + .rpm_get_path = "/sys/devices/pci0000:00/0000:00:13.0/i2c-1/i2c-2/2-0029/fan4_input", + .rpm_set_path = "/sys/devices/pci0000:00/0000:00:13.0/i2c-1/i2c-2/2-0029/fan4_target", + .def_rpm = 7000, + .per_get_path = NULL, + .per_set_path = NULL, + .caps = AG8032_FAN_TRAY_DEF_CAP, + + .eeprom_path = "/sys/devices/platform/*/*/2-0052/eeprom", + + .event_callback = _fan_event_cb, + PLAT_FAN_INTERNAL_DEF, + }, + [PLAT_FAN_ID_5] = { + .name = "FanTray3-1", + .present = _fan_tray_present, + .present_data = &_fan_tray_present_reg[PLAT_LED_ID_7], + .rpm_get_path = "/sys/devices/pci0000:00/0000:00:13.0/i2c-1/i2c-2/2-002a/fan1_input", + .rpm_set_path = "/sys/devices/pci0000:00/0000:00:13.0/i2c-1/i2c-2/2-002a/fan1_target", + .def_rpm = 7000, + .per_get_path = NULL, + .per_set_path = NULL, + .caps = AG8032_FAN_TRAY_DEF_CAP, + + .eeprom_path = "/sys/devices/platform/*/*/2-0053/eeprom", + + .event_callback = _fan_event_cb, + PLAT_FAN_INTERNAL_DEF, + }, + [PLAT_FAN_ID_6] = { + .name = "FanTray3-2", + .present = _fan_tray_present, + .present_data = &_fan_tray_present_reg[PLAT_LED_ID_7], + .rpm_get_path = "/sys/devices/pci0000:00/0000:00:13.0/i2c-1/i2c-2/2-002a/fan2_input", + .rpm_set_path = "/sys/devices/pci0000:00/0000:00:13.0/i2c-1/i2c-2/2-002a/fan2_target", + .def_rpm = 7000, + .per_get_path = NULL, + .per_set_path = NULL, + .caps = AG8032_FAN_TRAY_DEF_CAP, + + .eeprom_path = "/sys/devices/platform/*/*/2-0053/eeprom", + + .event_callback = _fan_event_cb, + PLAT_FAN_INTERNAL_DEF, + }, + [PLAT_FAN_ID_7] = { + .name = "PSU1 Fan", + .present = _psu_fan_present, + .rpm_get_path = "/sys/bus/i2c/devices/i2c-4/4-0058/hwmon/*/fan1_input", + .rpm_set_path = NULL, + .per_get_path = NULL, + .per_set_path = NULL, + .caps = AG8032_FAN_TRAY_DEF_CAP, + + PLAT_FAN_INTERNAL_DEF, + }, + [PLAT_FAN_ID_8] = { + .name = "PSU2 Fan", + .present = _psu_fan_present, + .rpm_get_path = "/sys/bus/i2c/devices/i2c-4/4-0059/hwmon/*/fan1_input", + .rpm_set_path = NULL, + .per_get_path = NULL, + .per_set_path = NULL, + .caps = AG8032_FAN_TRAY_DEF_CAP, + + PLAT_FAN_INTERNAL_DEF, + }, +}; + +static int _psu_fan_present (void *e) +{ + plat_fan_t *fan = e; + return plat_os_file_is_existed (fan->rpm_get_path) ? 1 : 0; +} + + +static int _fan_event_cb (void *e, int ev) +{ + int len; + plat_fan_t *fan = e; + + switch (ev) { + case PLAT_FAN_EVENT_PLUGIN: + // reflush fan setting + if (fan->rpm_set_value > 0 && fan->rpm_set_path) + plat_os_file_write_int (fan->rpm_set_value, fan->rpm_set_path, NULL); + + if (fan->per_set_value > 0 && fan->per_set_path) + plat_os_file_write_int (fan->per_set_value, fan->per_set_path, NULL); + + // read eeprom info + if (fan->eeprom_path && (plat_os_file_read (fan->eeprom, sizeof(fan->eeprom), &len, + fan->eeprom_path, NULL) == ONLP_STATUS_OK)) { + break; + } + + memset (fan->eeprom, 0xff, sizeof(fan->eeprom)); + break; + case PLAT_FAN_EVENT_UNPLUG: + + // clear eeprom info + memset (fan->eeprom, 0xff, sizeof(fan->eeprom)); + break; + default: + break; + } + + return 0; +} + +static int _fan_tray_present (void *e) +{ + plat_fan_t *fan = e; + return cpld_reg_get (fan->present_data) == 0 ? 1 : 0; +} + +static int plat_fan_is_valid (int id) +{ + if (id > PLAT_FAN_ID_INVALID && id < PLAT_FAN_ID_MAX) { + if (plat_fans[id].name) + return 1; + } + return 0; +} + + +int onlp_fani_init(void) +{ +#if 0 + plat_fan_t *fan; + int i; + + for (i = PLAT_FAN_ID_1 ; i < PLAT_FAN_ID_MAX ; i ++) { + + if (!plat_fan_is_valid(i)) + continue; + fan = &plat_fans[i]; + + if (fan->def_rpm && fan->rpm_set_path) + onlp_fani_rpm_set (ONLP_FAN_ID_CREATE(i), fan->def_rpm); + + if (fan->def_per && fan->per_set_path) + onlp_fani_percentage_set(ONLP_FAN_ID_CREATE(i), fan->def_per); + } +#endif + + return ONLP_STATUS_OK; +} + +int onlp_fani_info_get(onlp_oid_t id, onlp_fan_info_t* info) +{ + int error; + plat_fan_t *fan; + int fid; + int present = 1; + + if (!ONLP_OID_IS_FAN(id)) + return ONLP_STATUS_E_INVALID; + + fid = ONLP_OID_ID_GET(id); + + if (!plat_fan_is_valid(fid)) + return ONLP_STATUS_E_INVALID; + + fan = &plat_fans[fid]; + + plat_fan_state_update (fan); + + present = fan->state == PLAT_FAN_STATE_PRESENT ? 1 : 0; + + memset (info, 0, sizeof(*info)); + + info->hdr.id = id; + if (fan->name) + snprintf (info->hdr.description, sizeof(info->hdr.description), "%s", fan->name); + + info->caps = fan->caps; + if (fan->rpm_get_path) info->caps |= ONLP_FAN_CAPS_GET_RPM; + if (fan->rpm_set_path) info->caps |= ONLP_FAN_CAPS_SET_RPM; + if (fan->per_get_path) info->caps |= ONLP_FAN_CAPS_GET_PERCENTAGE; + if (fan->per_set_path) info->caps |= ONLP_FAN_CAPS_SET_PERCENTAGE; + + error = 0; + if (present) { + info->status |= ONLP_FAN_STATUS_PRESENT; + if (info->caps & ONLP_FAN_CAPS_GET_RPM) { + if (plat_os_file_read_int(&info->rpm, fan->rpm_get_path, NULL) < 0) { + error ++; + } else { + if (fan->rpm_set_value > 0) { + if (info->rpm < ((fan->rpm_set_value * 80) / 100)) { + info->status |= ONLP_FAN_STATUS_FAILED; + } + } + } + } + if (info->caps & ONLP_FAN_CAPS_GET_PERCENTAGE) { + if (plat_os_file_read_int(&info->percentage, fan->per_get_path, NULL) < 0) { + error ++; + } else { + if (fan->per_set_value > 0) { + if (info->percentage < ((fan->per_set_value * 80) / 100)) { + info->status |= ONLP_FAN_STATUS_FAILED; + } + } + } + } + + // get fan info + eeprom_info_get (fan->eeprom, sizeof(fan->eeprom), "fan_model", info->model); + eeprom_info_get (fan->eeprom, sizeof(fan->eeprom), "fan_series", info->serial); + } + + if ((info->caps & (ONLP_FAN_CAPS_B2F | ONLP_FAN_CAPS_B2F)) == (ONLP_FAN_CAPS_B2F | ONLP_FAN_CAPS_B2F)) { + // should do check it auto + // TODO + } else if (info->caps & ONLP_FAN_CAPS_B2F) { + info->status |= ONLP_FAN_STATUS_B2F; + } else if (info->caps & ONLP_FAN_CAPS_F2B) { + info->status |= ONLP_FAN_STATUS_F2B; + } + return error ? ONLP_STATUS_E_INTERNAL : ONLP_STATUS_OK; +} + +/* + * This function sets the speed of the given fan in RPM. + * + * This function will only be called if the fan supprots the RPM_SET + * capability. + * + * It is optional if you have no fans at all with this feature. + */ +int +onlp_fani_rpm_set(onlp_oid_t id, int rpm) +{ + int error; + plat_fan_t *fan; + int fid; + + if (!ONLP_OID_IS_FAN(id)) + return ONLP_STATUS_E_INVALID; + + fid = ONLP_OID_ID_GET(id); + + if (!plat_fan_is_valid(fid)) + return ONLP_STATUS_E_INVALID; + + fan = &plat_fans[fid]; + + if (fan->rpm_set_path) { + if (fan->rpm_set_value != rpm) { + error = plat_os_file_write_int (rpm, fan->rpm_set_path, NULL); + } + } else + return ONLP_STATUS_E_UNSUPPORTED; + + if (error < 0) { + return ONLP_STATUS_E_PARAM; + } + + fan->rpm_set_value = rpm; + + return ONLP_STATUS_OK; +} + +int onlp_fani_percentage_set(onlp_oid_t id, int p) +{ + int error; + plat_fan_t *fan; + int fid; + + if (!ONLP_OID_IS_FAN(id)) + return ONLP_STATUS_E_INVALID; + + fid = ONLP_OID_ID_GET(id); + + if (!plat_fan_is_valid(fid)) + return ONLP_STATUS_E_INVALID; + + fan = &plat_fans[fid]; + + if (fan->per_set_path) { + if (fan->per_set_value != p) { + error = plat_os_file_write_int (p, fan->per_set_path, NULL); + } + } else + return ONLP_STATUS_E_UNSUPPORTED; + + if (error < 0) { + return ONLP_STATUS_E_PARAM; + } + + fan->per_set_value = p; + + return ONLP_STATUS_OK; + +} + +/* + * This function sets the fan speed of the given OID as per + * the predefined ONLP fan speed modes: off, slow, normal, fast, max. + * + * Interpretation of these modes is up to the platform. + * + */ +int +onlp_fani_mode_set(onlp_oid_t id, onlp_fan_mode_t mode) +{ + return ONLP_STATUS_E_UNSUPPORTED; +} + +/* + * This function sets the fan direction of the given OID. + * + * This function is only relevant if the fan OID supports both direction + * capabilities. + * + * This function is optional unless the functionality is available. + */ +int +onlp_fani_dir_set(onlp_oid_t id, onlp_fan_dir_t dir) +{ + plat_fan_t *fan; + int fid; + + if (!ONLP_OID_IS_FAN(id)) + return ONLP_STATUS_E_INVALID; + + fid = ONLP_OID_ID_GET(id); + + if (!plat_fan_is_valid(fid)) + return ONLP_STATUS_E_INVALID; + + fan = &plat_fans[fid]; + + if ((fan->caps & (ONLP_FAN_CAPS_B2F | ONLP_FAN_CAPS_B2F)) == (ONLP_FAN_CAPS_B2F | ONLP_FAN_CAPS_B2F)) { + // TODO + } + + return ONLP_STATUS_E_UNSUPPORTED; +} + +/* + * Generic fan ioctl. Optional. + */ +int +onlp_fani_ioctl(onlp_oid_t id, va_list vargs) +{ + return ONLP_STATUS_E_UNSUPPORTED; +} + diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/ledi.c b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/ledi.c new file mode 100644 index 00000000..39b4e70a --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/ledi.c @@ -0,0 +1,347 @@ +/************************************************************ + * + * + * Copyright 2014, 2015 Big Switch Networks, Inc. + * Copyright 2017 Delta Networks, Inc + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ +#include +#include +#include +#include + +#include "platform_lib.h" + +static int _fan_tray_present (void *e); + +static cpld_reg_t _fan_tray_present_reg[] = { + [PLAT_LED_ID_5] = CPLD_REG (CPLD_CPUPLD, 0x11, 0, 1), + [PLAT_LED_ID_6] = CPLD_REG (CPLD_CPUPLD, 0x11, 1, 1), + [PLAT_LED_ID_7] = CPLD_REG (CPLD_CPUPLD, 0x11, 2, 1), +}; + +static plat_led_t plat_leds[] = { + ///////////////////////////////////////////////////////////// + [PLAT_LED_ID_1] = { + .name = "sys", + .hw = CPLD_REG (CPLD_CPUPLD, 0x0e, 2, 2), + .mode = { + PLAT_LED_MODE(ONLP_LED_MODE_RED, 0), + PLAT_LED_MODE(ONLP_LED_MODE_GREEN, 1), + PLAT_LED_MODE(ONLP_LED_MODE_GREEN_BLINKING, 2), + PLAT_LED_MODE(ONLP_LED_MODE_RED_BLINKING, 3), + PLAT_LED_MODE_END, + }, + PLAT_LED_INTERNAL_DEF, + }, + ///////////////////////////////////////////////////////////// + [PLAT_LED_ID_2] = { + .name = "fans", + .hw = CPLD_REG (CPLD_CPUPLD, 0x0c, 0, 2), + .mode = { + PLAT_LED_MODE(ONLP_LED_MODE_OFF, 0), + PLAT_LED_MODE(ONLP_LED_MODE_GREEN, 1), + PLAT_LED_MODE(ONLP_LED_MODE_ORANGE, 3), + PLAT_LED_MODE_END, + }, + PLAT_LED_INTERNAL_DEF, + }, + ///////////////////////////////////////////////////////////// + [PLAT_LED_ID_4] = { + .name = "pwr2", + .hw = CPLD_REG (CPLD_CPUPLD, 0x0c, 4, 2), + .mode = { + PLAT_LED_MODE(ONLP_LED_MODE_OFF, 0), + PLAT_LED_MODE(ONLP_LED_MODE_GREEN, 1), + PLAT_LED_MODE(ONLP_LED_MODE_ORANGE, 2), + PLAT_LED_MODE_END, + }, + PLAT_LED_INTERNAL_DEF, + }, + ///////////////////////////////////////////////////////////// + [PLAT_LED_ID_3] = { + .name = "pwr1", + .hw = CPLD_REG (CPLD_CPUPLD, 0x0c, 6, 2), + .mode = { + PLAT_LED_MODE(ONLP_LED_MODE_OFF, 0), + PLAT_LED_MODE(ONLP_LED_MODE_GREEN, 1), + PLAT_LED_MODE(ONLP_LED_MODE_ORANGE, 2), + PLAT_LED_MODE_END, + }, + PLAT_LED_INTERNAL_DEF, + }, + ///////////////////////////////////////////////////////////// + [PLAT_LED_ID_5] = { + .name = "fan1", + .hw = CPLD_REG (CPLD_CPUPLD, 0x0d, 0, 2), + .present = _fan_tray_present, + .present_data = &_fan_tray_present_reg[PLAT_LED_ID_5], + .mode = { + PLAT_LED_MODE(ONLP_LED_MODE_OFF, 0), + PLAT_LED_MODE(ONLP_LED_MODE_GREEN, 1), + PLAT_LED_MODE(ONLP_LED_MODE_ORANGE, 2), + PLAT_LED_MODE_END, + }, + PLAT_LED_INTERNAL_DEF, + }, + ///////////////////////////////////////////////////////////// + [PLAT_LED_ID_6] = { + .name = "fan2", + .hw = CPLD_REG (CPLD_CPUPLD, 0x0d, 2, 2), + .present = _fan_tray_present, + .present_data = &_fan_tray_present_reg[PLAT_LED_ID_6], + .mode = { + PLAT_LED_MODE(ONLP_LED_MODE_OFF, 0), + PLAT_LED_MODE(ONLP_LED_MODE_GREEN, 1), + PLAT_LED_MODE(ONLP_LED_MODE_ORANGE, 2), + PLAT_LED_MODE_END, + }, + PLAT_LED_INTERNAL_DEF, + }, + ///////////////////////////////////////////////////////////// + [PLAT_LED_ID_7] = { + .name = "fan3", + .hw = CPLD_REG (CPLD_CPUPLD, 0x0d, 4, 2), + .present = _fan_tray_present, + .present_data = &_fan_tray_present_reg[PLAT_LED_ID_7], + .mode = { + PLAT_LED_MODE(ONLP_LED_MODE_OFF, 0), + PLAT_LED_MODE(ONLP_LED_MODE_GREEN, 1), + PLAT_LED_MODE(ONLP_LED_MODE_ORANGE, 2), + PLAT_LED_MODE_END, + }, + PLAT_LED_INTERNAL_DEF, + }, +}; +#define plat_leds_size (sizeof(plat_leds)/sizeof(plat_leds[0])) + +static int plat_led_is_valid (int id) +{ + if (id > 0 && id < plat_leds_size) { + if (plat_leds[id].name) + return 1; + } + return 0; +} + +static int __hw_to_onlp_val (int id, int hv) +{ + plat_led_t *led = &plat_leds[id]; + led_mode_t *mod = &led->mode[0]; + + while (mod->hw_val >= 0) { + if (mod->hw_val == hv) + return mod->onlp_val; + mod ++; + } + return -1; +} + +static int __onlp_to_hw_val (int id, int ov) +{ + plat_led_t *led = &plat_leds[id]; + led_mode_t *mod = &led->mode[0]; + + while (mod->onlp_val >= 0) { + if (mod->onlp_val == ov) + return mod->hw_val; + mod ++; + } + return -1; +} + +static uint32_t _onlp_cap_create (led_mode_t *mod) +{ + uint32_t cap = 0; + + while (mod->onlp_val >= 0) { + switch (mod->onlp_val) { + case ONLP_LED_MODE_OFF: cap |= ONLP_LED_CAPS_ON_OFF; break; + case ONLP_LED_MODE_ON: cap |= ONLP_LED_CAPS_ON_OFF; break; + case ONLP_LED_MODE_RED: cap |= ONLP_LED_CAPS_RED; break; + case ONLP_LED_MODE_RED_BLINKING: cap |= ONLP_LED_CAPS_RED_BLINKING; break; + case ONLP_LED_MODE_ORANGE: cap |= ONLP_LED_CAPS_ORANGE; break; + case ONLP_LED_MODE_ORANGE_BLINKING: cap |= ONLP_LED_CAPS_ORANGE_BLINKING; break; + case ONLP_LED_MODE_YELLOW: cap |= ONLP_LED_CAPS_YELLOW; break; + case ONLP_LED_MODE_YELLOW_BLINKING: cap |= ONLP_LED_CAPS_YELLOW_BLINKING; break; + case ONLP_LED_MODE_GREEN: cap |= ONLP_LED_CAPS_GREEN; break; + case ONLP_LED_MODE_GREEN_BLINKING: cap |= ONLP_LED_CAPS_GREEN_BLINKING; break; + case ONLP_LED_MODE_BLUE: cap |= ONLP_LED_CAPS_BLUE; break; + case ONLP_LED_MODE_BLUE_BLINKING: cap |= ONLP_LED_CAPS_BLUE_BLINKING; break; + case ONLP_LED_MODE_PURPLE: cap |= ONLP_LED_CAPS_PURPLE; break; + case ONLP_LED_MODE_PURPLE_BLINKING: cap |= ONLP_LED_CAPS_PURPLE_BLINKING; break; + case ONLP_LED_MODE_AUTO: cap |= ONLP_LED_CAPS_AUTO; break; + case ONLP_LED_MODE_AUTO_BLINKING: cap |= ONLP_LED_CAPS_AUTO_BLINKING; break; + } + mod ++; + } + return cap; +} + +static int _fan_tray_present (void *e) +{ + plat_led_t *led = e; + return cpld_reg_get (led->present_data) == 0 ? 1 : 0; +} + +/* + * This function will be called prior to any other onlp_ledi_* functions. + */ +int +onlp_ledi_init(void) +{ + return ONLP_STATUS_OK; +} + +int +onlp_ledi_info_get(onlp_oid_t id, onlp_led_info_t* info) +{ + int lid; + plat_led_t *led = &plat_leds[id]; + led_mode_t *mod = &led->mode[0]; + int present = 1; + + + if (!ONLP_OID_IS_LED(id)) + return ONLP_STATUS_E_INVALID; + + lid = ONLP_OID_ID_GET(id); + + if (!plat_led_is_valid (lid)) + return ONLP_STATUS_E_INVALID; + + /* Set the onlp_oid_hdr_t and capabilities */ + led = &plat_leds[lid]; + mod = &led->mode[0]; + + memset (info, 0, sizeof(*info)); + info->hdr.id = id; + if (led->name) + snprintf (info->hdr.description, sizeof(info->hdr.description), "%s", led->name); + + info->caps = _onlp_cap_create (mod); + + if (led->present) { + present = led->present(led) ? 1 : 0; + } + + if (present) { + int mode; + + if (led->hw_val_run < 0) + led->hw_val_run = cpld_reg_get (&led->hw); + + mode = __hw_to_onlp_val (lid, led->hw_val_run); + + info->status |= ONLP_LED_STATUS_PRESENT; + + if (mode < 0) { + info->mode = ONLP_LED_MODE_OFF; + info->status |= ONLP_LED_STATUS_FAILED; + } else { + info->mode = mode; + info->status |= ONLP_LED_STATUS_ON; + } + + switch (info->mode) { + case ONLP_LED_MODE_OFF: + info->status &= ~ONLP_LED_STATUS_ON; + break; + default: + break; + } + } else { + info->mode = ONLP_LED_MODE_OFF; + info->status &= ~ONLP_LED_STATUS_ON; + } + + return ONLP_STATUS_OK; +} + +/* + * This function puts the LED into the given mode. It is a more functional + * interface for multimode LEDs. + * + * Only modes reported in the LED's capabilities will be attempted. + */ +int +onlp_ledi_mode_set(onlp_oid_t id, onlp_led_mode_t mode) +{ + int lid; + plat_led_t *led = &plat_leds[id]; + int hw_val; + + if (!ONLP_OID_IS_LED(id)) + return ONLP_STATUS_E_INVALID; + + lid = ONLP_OID_ID_GET(id); + + if (!plat_led_is_valid (lid)) + return ONLP_STATUS_E_INVALID; + + led = &plat_leds[lid]; + + + hw_val = __onlp_to_hw_val (lid, mode); + if (hw_val < 0) + return ONLP_STATUS_E_UNSUPPORTED; + + if (led->hw_val_run == hw_val) + return ONLP_STATUS_OK; + + if (cpld_reg_set (&led->hw, (uint8_t)hw_val)){ + return ONLP_STATUS_E_INTERNAL; + } + + led->hw_val_run = hw_val; + + return ONLP_STATUS_OK; +} + +/* + * Turn an LED on or off. + * + * This function will only be called if the LED OID supports the ONOFF + * capability. + * + * What 'on' means in terms of colors or modes for multimode LEDs is + * up to the platform to decide. This is intended as baseline toggle mechanism. + */ +int +onlp_ledi_set(onlp_oid_t id, int on_or_off) +{ + if (!on_or_off) { + return onlp_ledi_mode_set(id, ONLP_LED_MODE_OFF); + } + + return ONLP_STATUS_E_UNSUPPORTED; +} + + +/* + * Generic LED ioctl interface. + */ +int +onlp_ledi_ioctl(onlp_oid_t id, va_list vargs) +{ + return ONLP_STATUS_E_UNSUPPORTED; +} + + diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/make.mk b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/make.mk new file mode 100644 index 00000000..0529075d --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/make.mk @@ -0,0 +1,9 @@ +############################################################################### +# +# +# +############################################################################### + +LIBRARY := x86_64_delta_ag8032 +$(LIBRARY)_SUBDIR := $(dir $(lastword $(MAKEFILE_LIST))) +include $(BUILDER)/lib.mk diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/platform_lib.c b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/platform_lib.c new file mode 100644 index 00000000..b75649e0 --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/platform_lib.c @@ -0,0 +1,386 @@ + +/************************************************************ + * + * + * Copyright 2014 Big Switch Networks, Inc. + * Copyright 2017 (C) Delta Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ +#include +#include +#include +#include "platform_lib.h" + +//////////////////////////////////////////////////////////////// +// PLAT DEV CONFIG +//////////////////////////////////////////////////////////////// +static plat_dev_desc_t plat_devs[] = { + [PLAT_DEV_ID_INVALID] = { + .name = NULL, + }, + + // valid dev desc start + [PLAT_DEV_ID_1] = { + .name = "CPUPLD", + // i2c dev + .bus = 0, + .addr = 0x2e, + }, + [PLAT_DEV_ID_2] = { + .name = "SWPLD", + // i2c dev + .bus = 3, + .addr = 0x30, + }, + // valid dev desc end + + // END + [PLAT_DEV_ID_MAX] = { + .name = NULL, + }, +}; + +//////////////////////////////////////////////////////////////// +// CPLD CONFIG +//////////////////////////////////////////////////////////////// +static plat_cpld_t plat_cplds[] = { + [PLAT_CPLD_ID_INVALID] = { + .id = PLAT_DEV_ID_INVALID, + }, + + + [PLAT_CPLD_ID_1] = { + .id = PLAT_DEV_ID_1, + .cached = { + [0x11] = 1, + [0x1a] = 1, + }, + }, + [PLAT_CPLD_ID_2] = { + .id = PLAT_DEV_ID_2, + .cached = { + [0x0a] = 1, + [0x0b] = 1, + [0x0c] = 1, + [0x0d] = 1, + }, + }, + + // END + [PLAT_CPLD_ID_MAX] = { + .id = PLAT_DEV_ID_INVALID, + }, + +}; + +//////////////////////////////////////////////////////////////// +// PLAT DEV ROUTINE + +int plat_dev_id_is_valid (plat_dev_id_t id) +{ + if (id > PLAT_DEV_ID_INVALID && id < PLAT_DEV_ID_MAX) { + if (plat_devs[id].name) { + return 1; + } + } + return 0; +} + +int plat_dev_get_byte (plat_dev_id_t id, uint8_t reg) +{ + if (!plat_dev_id_is_valid(id)) { + return -1; + } + return onlp_i2c_readb (plat_devs[id].bus, plat_devs[id].addr, reg, 0); +} + +int plat_dev_set_byte (plat_dev_id_t id, uint8_t reg, uint8_t val) +{ + if (!plat_dev_id_is_valid(id)) + return -1; + return onlp_i2c_writeb(plat_devs[id].bus, plat_devs[id].addr, reg, val, 0); +} + +//////////////////////////////////////////////////////////////// +// CPLD PLAT ROUTINE +int cpld_id_is_valid (plat_cpld_id_t id) +{ + if (id > PLAT_CPLD_ID_INVALID && id < PLAT_CPLD_ID_MAX) { + if (plat_dev_id_is_valid(plat_cplds[id].id)) + return 1; + } + return 0; +} + +int cpld_get (plat_cpld_id_t id, uint8_t reg) +{ + if (!cpld_id_is_valid (id)) + return -1; + return plat_dev_get_byte (plat_cplds[id].id, reg); +} + +int cpld_set (plat_cpld_id_t id, uint8_t reg, uint8_t val) +{ + if (!cpld_id_is_valid (id)) + return -1; + return plat_dev_set_byte (plat_cplds[id].id, reg, val); +} + +int cpld_field_get (plat_cpld_id_t id, uint8_t reg, uint8_t field, uint8_t len) +{ + int val; + int i; + uint8_t mask = 0; + + val = cpld_get (id, reg); + if (val < 0) { + return val; + } + + // make mask; + for (i = 0 ; i < len ; i ++) { + mask |= (1 << (i)); + } + + val = ((val >> field) & mask); + + return val; +} + + +int cpld_field_set (plat_cpld_id_t id, uint8_t reg, uint8_t field, uint8_t len, uint8_t val) +{ + int _val; + int i; + uint8_t mask = 0; + + _val = cpld_get (id, reg); + if (_val < 0) + return val; + + // make mask; + for (i = 0 ; i < len ; i ++) { + mask |= (1 << (field + i)); + } + val = ((val << field) & mask); + _val = (_val & ~mask); + + return cpld_set (id, reg, val | (uint8_t)_val); +} + +//////////////////////////////////////////////////////////////// +// CPLD REG PLAT ROUTINE +int cpld_reg_is_valid (cpld_reg_t *r) +{ + if (!r) + return 0; + if (r->valid) + return 1; + return 0; +} + +int cpld_reg_get (cpld_reg_t *r) +{ + if (!r) + return -1; + + return cpld_field_get (r->id, r->reg, r->field, r->len); +} + +int cpld_reg_set (cpld_reg_t *r, uint8_t val) +{ + if (!r) + return -1; + + return cpld_field_set (r->id, r->reg, r->field, r->len, val); +} + +int present_on_board_always (void *e) +{ + return 1; +} + +//////////////////////////////////////////////////////////////// +// PSU PLAT ROUTINE +int plat_fan_state_update (plat_fan_t *fan) +{ + int present ; + plat_fan_state_t old_state; + + do { + old_state = fan->state; + + present = 1; + if (fan->present) { + present = fan->present(fan) ? 1 : 0; + } + + switch (fan->state) { + case PLAT_FAN_STATE_UNPRESENT: + if (present) { + fan->state = PLAT_FAN_STATE_PRESENT; + if (fan->event_callback) + fan->event_callback(fan, PLAT_FAN_EVENT_PLUGIN); + break; + } + break; + case PLAT_FAN_STATE_PRESENT: + if (!present) { + fan->state = PLAT_FAN_STATE_UNPRESENT; + if (fan->event_callback) + fan->event_callback(fan, PLAT_FAN_EVENT_UNPLUG); + break; + } + break; + default: + break; + } + } while (old_state != fan->state); + + return 0; +} + + +//////////////////////////////////////////////////////////////// +// PSU PLAT ROUTINE + +int plat_psu_state_update (plat_psu_t *psu) +{ + int present ; + plat_psu_state_t old_state; + + do { + old_state = psu->state; + present = 1; + if (psu->present) { + present = psu->present(psu) ? 1 : 0; + } + + switch (psu->state) { + case PLAT_PSU_STATE_UNPRESENT: + if (present) { + psu->state = PLAT_PSU_STATE_PRESENT; + if (psu->event_callback) + psu->event_callback(psu, PLAT_PSU_EVENT_PLUGIN); + } + break; + case PLAT_PSU_STATE_PRESENT: + if (!present) { + psu->state = PLAT_PSU_STATE_UNPRESENT; + if (psu->event_callback) + psu->event_callback(psu, PLAT_PSU_EVENT_UNPLUG); + + break; + } + + if (onlp_i2c_readb (psu->pmbus_bus, psu->pmbus_addr, 0x00, + ONLP_I2C_F_FORCE | ONLP_I2C_F_DISABLE_READ_RETRIES) >= 0) { + + psu->state = PLAT_PSU_STATE_PMBUS_READY; + if (!plat_os_file_is_existed(psu->pmbus_ready_path) && psu->pmbus_insert_cmd) { + system (psu->pmbus_insert_cmd); + } + if (psu->event_callback) + psu->event_callback(psu, PLAT_PSU_PMBUS_CONNECT); + } + break; + case PLAT_PSU_STATE_PMBUS_READY: + + // If unplug, remove kernel module + if (!present) { + psu->state = PLAT_PSU_STATE_UNPRESENT; + if (psu->pmbus_remove_cmd) { + system (psu->pmbus_remove_cmd); + } + if (psu->event_callback) + psu->event_callback(psu, PLAT_PSU_EVENT_UNPLUG); + break; + } + // If pmbus interface is not ok, remove kernel module + if (onlp_i2c_readb (psu->pmbus_bus, psu->pmbus_addr, 0x00, + ONLP_I2C_F_FORCE | ONLP_I2C_F_DISABLE_READ_RETRIES) < 0) { + + psu->state = PLAT_PSU_STATE_PRESENT; + if (psu->pmbus_remove_cmd) { + system (psu->pmbus_remove_cmd); + } + if (psu->event_callback) + psu->event_callback(psu, PLAT_PSU_PMBUS_DISCONNECT); + + break; + } + + break; + default: + break; + } + } while (old_state != psu->state); + + return 0; +} + +//////////////////////////////////////////////////////////////// +// OS HELP ROUTINE +static char *plat_os_path_complete (char *path_pattern, char *buff, int len) +{ + FILE *fp; + + snprintf (buff, len, "realpath -z %s 2>/dev/null", path_pattern); + fp = popen (buff, "r"); + if (fp) { + fgets (buff, len, fp); + pclose (fp); + } else { + snprintf (buff, len, "%s", path_pattern); + } + return buff; +} + +int plat_os_file_is_existed (char *path) +{ + char buff[1024]; + + if (path) + return access (plat_os_path_complete(path, buff, sizeof(buff)), F_OK) == 0 ? 1 : 0; + return 0; +} + +int plat_os_file_read (uint8_t *data, int max, int *len, char *path, ...) +{ + char buff[1024]; + return onlp_file_read (data, max, len, plat_os_path_complete(path, buff, sizeof(buff)), NULL); +} + +int plat_os_file_read_int (int *val, char *path, ...) +{ + char buff[1024]; + return onlp_file_read_int (val, plat_os_path_complete(path, buff, sizeof(buff)), NULL); +} + +int plat_os_file_write_int(int val, char *path, ...) +{ + char buff[1024]; + return onlp_file_write_int (val, plat_os_path_complete(path, buff, sizeof(buff)), NULL); +} + + + + diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/platform_lib.h b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/platform_lib.h new file mode 100644 index 00000000..3148de35 --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/platform_lib.h @@ -0,0 +1,334 @@ +/************************************************************ + * + * + * Copyright 2014 Big Switch Networks, Inc. + * Copyright 2017 (C) Delta Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ + +#ifndef __PLAT_LIB_H__ +#define __PLAT_LIB_H__ + +#include + +#define ONIE_EEPROM_LOCATION "/sys/bus/i2c/devices/i2c-5/5-0054/eeprom" + +typedef int (*hook_present)(void *e); +typedef int (*hook_event )(void *e, int ev); + +extern int present_on_board_always (void *e); + +//////////////////////////////////////////////////////////////// +// PLAT DEV ROUTINE +typedef enum plat_dev_id { + PLAT_DEV_ID_INVALID = 0, + PLAT_DEV_ID_1, + PLAT_DEV_ID_2, + PLAT_DEV_ID_3, + PLAT_DEV_ID_4, + PLAT_DEV_ID_5, + // .... + PLAT_DEV_ID_MAX = 128, +} plat_dev_id_t; + +typedef struct plat_dev_desc { + char *name; + // i2c dev + int bus; + uint8_t addr; +} plat_dev_desc_t; + +extern int plat_dev_id_is_valid (plat_dev_id_t id); +extern int plat_dev_get_byte (plat_dev_id_t id, uint8_t reg); +extern int plat_dev_set_byte (plat_dev_id_t id, uint8_t reg, uint8_t val); + +//////////////////////////////////////////////////////////////// +// CPLD PLAT ROUTINE +typedef enum plat_cpld_id { + PLAT_CPLD_ID_INVALID = 0, + PLAT_CPLD_ID_1, + PLAT_CPLD_ID_2, + PLAT_CPLD_ID_3, + PLAT_CPLD_ID_4, + PLAT_CPLD_ID_MAX, +} plat_cpld_id_t ; + +typedef struct plat_cpld { + plat_dev_id_t id; + uint8_t cached[256]; + uint8_t cache[256]; +} plat_cpld_t; + +extern int cpld_id_is_valid (plat_cpld_id_t id); +extern int cpld_get (plat_cpld_id_t id, uint8_t reg); +extern int cpld_set (plat_cpld_id_t id, uint8_t reg, uint8_t val); +extern int cpld_field_get (plat_cpld_id_t id, uint8_t reg, uint8_t field, uint8_t len); +extern int cpld_field_set (plat_cpld_id_t id, uint8_t reg, uint8_t field, uint8_t len, uint8_t val); + +#define CPLD_CPUPLD PLAT_CPLD_ID_1 +#define CPLD_SWPLD PLAT_CPLD_ID_2 + +//////////////////////////////////////////////////////////////// +// CPLD REG PLAT ROUTINE +typedef struct cpld_reg { + plat_cpld_id_t id; + uint8_t reg; + uint8_t field; + uint8_t len; + char valid; +} cpld_reg_t; + +#define CPLD_REG(i,r,f,l) {.valid=1, .id=i,.reg=r,.field=f,.len=l,} +extern int cpld_reg_is_valid (cpld_reg_t *r); +extern int cpld_reg_get (cpld_reg_t *r); +extern int cpld_reg_set (cpld_reg_t *r, uint8_t val); + +//////////////////////////////////////////////////////////////// +// THERMAL PLAT ROUTINE +typedef enum plat_thermal_id { + PLAT_THERMAL_ID_INVALID, + PLAT_THERMAL_ID_1 = 1, + PLAT_THERMAL_ID_2, + PLAT_THERMAL_ID_3, + PLAT_THERMAL_ID_4, + PLAT_THERMAL_ID_5, + PLAT_THERMAL_ID_6, + PLAT_THERMAL_ID_7, + PLAT_THERMAL_ID_MAX +} plat_thermal_id_t; + +typedef struct plat_thermal { + char *desc; + + hook_present present; + void *present_data; + + char *temp_get_path; + + char *warnning_set_path; + int def_warnning; + + char *critical_set_path; + int def_critical; + + char *shutdown_set_path; + int def_shutdown; + +} plat_thermal_t ; + +//////////////////////////////////////////////////////////////// +// LED PLAT ROUTINE +typedef enum plat_led_id { + PLAT_LED_ID_INVALID = 0, + PLAT_LED_ID_1, + PLAT_LED_ID_2, + PLAT_LED_ID_3, + PLAT_LED_ID_4, + PLAT_LED_ID_5, + PLAT_LED_ID_6, + PLAT_LED_ID_7, + PLAT_LED_ID_MAX +} plat_led_id_t ; + +typedef struct led_mode { + int onlp_val; + int hw_val; +} led_mode_t; +#define PLAT_LED_MODE_MAX 16 +#define PLAT_LED_MODE(o,h) { .onlp_val = o, .hw_val = h, } +#define PLAT_LED_MODE_END { .onlp_val = -1, .hw_val = -1, } + +#define PLAT_LED_INTERNAL_DEF \ + .hw_val_run = -1 + + +typedef struct plat_led { + char *name; + hook_present present; + void *present_data; + cpld_reg_t hw; + int hw_val_run; + led_mode_t mode[PLAT_LED_MODE_MAX]; +} plat_led_t; + +//////////////////////////////////////////////////////////////// +// FAN PLAT ROUTINE +typedef enum plat_fan_id { + PLAT_FAN_ID_INVALID = 0, + PLAT_FAN_ID_1, + PLAT_FAN_ID_2, + PLAT_FAN_ID_3, + PLAT_FAN_ID_4, + PLAT_FAN_ID_5, + PLAT_FAN_ID_6, + PLAT_FAN_ID_7, + PLAT_FAN_ID_8, + PLAT_FAN_ID_MAX, +} plat_fan_id_t ; + +typedef enum plat_fan_state { + PLAT_FAN_STATE_UNPRESENT = 0, + PLAT_FAN_STATE_PRESENT, +} plat_fan_state_t; + +typedef enum plat_fan_event { + PLAT_FAN_EVENT_UNPLUG = 0, + PLAT_FAN_EVENT_PLUGIN, +} plat_fan_event_t ; + +typedef struct plat_fan { + char *name; + + hook_present present; + void *present_data; + + char *rpm_get_path; + char *rpm_set_path; + int def_rpm; + char *per_get_path; + char *per_set_path; + int def_per; + + char *eeprom_path; + + uint32_t caps; + + // internal use + int rpm_set_value; + int per_set_value; + + plat_fan_state_t state; + hook_event event_callback; + + uint8_t eeprom[256]; + +} plat_fan_t; + +#define PLAT_FAN_INTERNAL_DEF \ + .rpm_set_value = -1,\ + .per_set_value = -1,\ + .state = PLAT_FAN_STATE_UNPRESENT + +extern int plat_fan_state_update (plat_fan_t *fan); + +//////////////////////////////////////////////////////////////// +// SFP PLAT ROUTINE +typedef enum plat_sff_id { + PLAT_SFF_ID_MIN = 1, + PLAT_SFF_ID_MAX = 32, +} plat_sff_id_t; + +typedef int (*hook_sff_control)(void *e, int sval, int *gval, int *sup); + +typedef struct plat_sff { + char valid; + + hook_present present; + cpld_reg_t present_cpld_reg; + + hook_sff_control reset; + cpld_reg_t reset_cpld_reg; + + hook_sff_control lpmode; + cpld_reg_t lpmode_cpld_reg; + + hook_sff_control rxlos; + cpld_reg_t rxlos_cpld_reg; + + hook_sff_control txfault; + cpld_reg_t txfault_cpld_reg; + + hook_sff_control txdisable; + cpld_reg_t txdisable_cpld_reg; + + + int bus; +} plat_sff_t; + +//////////////////////////////////////////////////////////////// +// PSU PLAT ROUTINE +typedef enum plat_psu_id { + PLAT_PSU_ID_INVALID = 0, + PLAT_PSU_ID_1, + PLAT_PSU_ID_2, + PLAT_PSU_ID_MAX +} plat_psu_id_t; + +typedef enum plat_psu_state { + PLAT_PSU_STATE_UNPRESENT = 0, + PLAT_PSU_STATE_PRESENT, + PLAT_PSU_STATE_PMBUS_READY, + PLAT_PSU_STATE_MAX +} plat_psu_state_t; + +typedef enum plat_psu_event { + PLAT_PSU_EVENT_UNPLUG = 0, + PLAT_PSU_EVENT_PLUGIN, + PLAT_PSU_PMBUS_CONNECT, + PLAT_PSU_PMBUS_DISCONNECT, +LAT_PSU_EVENT_ +} plat_psu_event_t; + +typedef struct plat_psu { + + char *name; + + hook_present present; + cpld_reg_t present_cpld_reg; + + char *vin_path; + char *vout_path; + char *iin_path; + char *iout_path; + char *pin_path; + char *pout_path; + + char *vin_max_path; + char *vin_min_path; + + hook_event event_callback; + + char eeprom_bus; + char eeprom_addr; + + // use for probe and insmod + plat_psu_state_t state; + char *pmbus_insert_cmd; + char *pmbus_remove_cmd; + char *pmbus_ready_path; + uint8_t pmbus_bus; + uint8_t pmbus_addr; + + uint8_t eeprom[256]; + +} plat_psu_t; + +extern int plat_psu_state_update (plat_psu_t *psu); + +//////////////////////////////////////////////////////////////// +// OS HELP ROUTINE +extern int plat_os_file_is_existed (char *path); +extern int plat_os_file_read (uint8_t *data, int max, int *len, char *path, ...); +extern int plat_os_file_read_int (int *val, char *path, ...); +extern int plat_os_file_write_int(int val, char *path, ...); + +#endif // __PLAT_LIB_H__ + diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/psui.c b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/psui.c new file mode 100644 index 00000000..16d6cf1b --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/psui.c @@ -0,0 +1,283 @@ +/************************************************************ + * + * + * Copyright 2014, 2015 Big Switch Networks, Inc. + * Copyright 2017 Delta Networks, Inc + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ +#include +#include +#include "eeprom_drv.h" +#include "eeprom_info.h" +#include "platform_lib.h" + +static int _psu_present (void *e); +static int _psu_event (void *e, int ev); + +static plat_psu_t plat_psus[] = { + [PLAT_PSU_ID_1] = { + .name = "PSU1", + .present = _psu_present, + .present_cpld_reg = CPLD_REG (CPLD_CPUPLD, 0x1a, 6, 1), + + .vin_path = "/sys/bus/i2c/devices/4-0058/hwmon/*/in1_input", + .iin_path = "/sys/bus/i2c/devices/4-0058/hwmon/*/curr1_input", + .pin_path = "/sys/bus/i2c/devices/4-0058/hwmon/*/power1_input", + + .vout_path = "/sys/bus/i2c/devices/4-0058/hwmon/*/in2_input", + .iout_path = "/sys/bus/i2c/devices/4-0058/hwmon/*/curr2_input", + .pout_path = "/sys/bus/i2c/devices/4-0058/hwmon/*/power2_input", + + .vin_max_path = "/sys/bus/i2c/devices/4-0058/hwmon/*/in1_max", + .vin_min_path = "/sys/bus/i2c/devices/4-0058/hwmon/*/in1_min", + + .eeprom_bus = 4, + .eeprom_addr= 0x50, + .event_callback = _psu_event, + + .state = PLAT_PSU_STATE_UNPRESENT, + .pmbus_insert_cmd = "echo pmbus 0x58 > /sys/bus/i2c/devices/i2c-4/new_device", + .pmbus_remove_cmd = "echo 0x58 > /sys/bus/i2c/devices/i2c-4/delete_device", + .pmbus_ready_path = "/sys/bus/i2c/devices/4-0058/hwmon", + .pmbus_bus = 4, + .pmbus_addr = 0x58, + }, + [PLAT_PSU_ID_2] = { + .name = "PSU2", + .present = _psu_present, + .present_cpld_reg = CPLD_REG (CPLD_CPUPLD, 0x1a, 7, 1), + + .vin_path = "/sys/bus/i2c/devices/4-0059/hwmon/*/in1_input", + .iin_path = "/sys/bus/i2c/devices/4-0059/hwmon/*/curr1_input", + .pin_path = "/sys/bus/i2c/devices/4-0059/hwmon/*/power1_input", + + .vout_path = "/sys/bus/i2c/devices/4-0059/hwmon/*/in2_input", + .iout_path = "/sys/bus/i2c/devices/4-0059/hwmon/*/curr2_input", + .pout_path = "/sys/bus/i2c/devices/4-0059/hwmon/*/power2_input", + + .vin_max_path = "/sys/bus/i2c/devices/4-0059/hwmon/*/in1_max", + .vin_min_path = "/sys/bus/i2c/devices/4-0059/hwmon/*/in1_min", + + .eeprom_bus = 4, + .eeprom_addr= 0x51, + .event_callback = _psu_event, + + .state = PLAT_PSU_STATE_UNPRESENT, + .pmbus_insert_cmd = "echo pmbus 0x59 > /sys/bus/i2c/devices/i2c-4/new_device", + .pmbus_remove_cmd = "echo 0x59 > /sys/bus/i2c/devices/i2c-4/delete_device", + .pmbus_ready_path = "/sys/bus/i2c/devices/4-0059/hwmon", + .pmbus_bus = 4, + .pmbus_addr = 0x59, + }, +}; + +#define plat_psus_size (sizeof(plat_psus)/sizeof(plat_psus[0])) + +static int plat_psu_is_valid (int id) +{ + plat_psu_t *psu; + + if (id < 0 && id >= plat_psus_size) + return 0; + + psu = &plat_psus[id]; + if (psu->name) + return 1; + + return 0; +} + +static int _psu_present (void *e) +{ + plat_psu_t *psu = e; + return cpld_reg_get (&psu->present_cpld_reg) == 0 ? 1 : 0; +} + +static int _psu_event (void *e, int ev) +{ + plat_psu_t *psu = e; + + switch (ev) { + case PLAT_PSU_PMBUS_CONNECT: + if (eeprom_read (psu->eeprom_bus, psu->eeprom_addr, 0, psu->eeprom, sizeof(psu->eeprom))) + memset (psu->eeprom, 0xff, sizeof(psu->eeprom)); + break; + case PLAT_PSU_PMBUS_DISCONNECT: + memset (psu->eeprom, 0xff, sizeof(psu->eeprom)); + break; + case PLAT_PSU_EVENT_UNPLUG: + case PLAT_PSU_EVENT_PLUGIN: + default: + break; + } + + return 0; +} + +static uint32_t _psu_vin_type_guess (plat_psu_t *psu) +{ + uint32_t ret; + int vmax = -1; + int vmin = -1; + + if ((psu->vin_max_path) && + (plat_os_file_read_int (&vmax, psu->vin_max_path, NULL) < 0)) + vmax = -1; + if ((psu->vin_min_path) && + (plat_os_file_read_int (&vmin, psu->vin_min_path, NULL) < 0)) + vmin = -1; + + ret = 0; + if (12000 > vmin && 12000 < vmax) + ret |= ONLP_PSU_CAPS_DC12; + if (48000 > vmin && 48000 < vmax) + ret |= ONLP_PSU_CAPS_DC48; + if (110000 > vmin && 110000 < vmax) + ret |= ONLP_PSU_CAPS_AC; + + if (!ret) + ret |= ONLP_PSU_CAPS_AC; + + return ret; +} + +int +onlp_psui_init(void) +{ + return ONLP_STATUS_OK; +} + +int +onlp_psui_info_get(onlp_oid_t id, onlp_psu_info_t* info) +{ + int error ; + plat_psu_t *psu; + int present; + int pid= ONLP_OID_ID_GET(id); + + if (!ONLP_OID_IS_PSU(id)) + return ONLP_STATUS_E_INVALID; + + if (!plat_psu_is_valid(pid)) + return ONLP_STATUS_E_INVALID; + + psu = &plat_psus[pid]; + + memset(info, 0, sizeof(onlp_psu_info_t)); + + info->hdr.id = id; + if (psu->name) + snprintf (info->hdr.description, sizeof(info->hdr.description), "%s", psu->name); + + + plat_psu_state_update (psu); + + // check present; + present = psu->state != PLAT_PSU_STATE_UNPRESENT ? 1 : 0; + + if (present) { + info->status |= ONLP_PSU_STATUS_PRESENT; + info->status &= ~ONLP_PSU_STATUS_UNPLUGGED; + } else { + info->status |= ONLP_PSU_STATUS_UNPLUGGED; + info->status &= ~ONLP_PSU_STATUS_PRESENT; + } + + // unpresent will return directly + if (info->status & ONLP_PSU_STATUS_UNPLUGGED) { + return ONLP_STATUS_OK; + } + + /////////////////////////////////////////////////////////////// + // get caps + if (psu->vin_path && plat_os_file_is_existed(psu->vin_path)) info->caps |= ONLP_PSU_CAPS_VIN; + if (psu->iin_path && plat_os_file_is_existed(psu->iin_path)) info->caps |= ONLP_PSU_CAPS_IIN; + if (psu->pin_path && plat_os_file_is_existed(psu->pin_path)) info->caps |= ONLP_PSU_CAPS_PIN; + if (psu->vout_path && plat_os_file_is_existed(psu->vout_path)) info->caps |= ONLP_PSU_CAPS_VOUT; + if (psu->iout_path && plat_os_file_is_existed(psu->iout_path)) info->caps |= ONLP_PSU_CAPS_IOUT; + if (psu->pout_path && plat_os_file_is_existed(psu->pout_path)) info->caps |= ONLP_PSU_CAPS_POUT; + + //// TODO : auto detect AC / DC type + // we do a guess + info->caps |= _psu_vin_type_guess (psu); + + // get psu info + eeprom_info_get (psu->eeprom, sizeof(psu->eeprom), "psu_model", info->model); + eeprom_info_get (psu->eeprom, sizeof(psu->eeprom), "psu_series", info->serial); + + /////////////////////////////////////////////////////////////// + // get and check value + error = 0; + if (info->caps & ONLP_PSU_CAPS_VIN) { + + if (psu->state != PLAT_PSU_STATE_PMBUS_READY) { + info->status |= ONLP_PSU_STATUS_FAILED; + } else { + + if (plat_os_file_read_int(&info->mvin, psu->vin_path, NULL) < 0) { + error ++; + } else { + if (info->mvin < 2) + info->status |= ONLP_PSU_STATUS_FAILED; + } + } + } + + //// If VIN is not ok, skip other + if ((info->status & ONLP_PSU_STATUS_FAILED) == 0) { + + if (info->caps & ONLP_PSU_CAPS_IIN) { + if (plat_os_file_read_int(&info->miin, psu->iin_path, NULL) < 0) + error ++; + } + if (info->caps & ONLP_PSU_CAPS_PIN) { + if (plat_os_file_read_int(&info->mpin, psu->pin_path, NULL) < 0) + error ++; + else + info->mpin = info->mpin / 1000; + } + if (info->caps & ONLP_PSU_CAPS_VOUT) { + if (plat_os_file_read_int(&info->mvout, psu->vout_path, NULL) < 0) { + error ++; + } else { + if (info->mvout < 2) + info->status |= ONLP_PSU_STATUS_FAILED; + } + } + if (info->caps & ONLP_PSU_CAPS_IOUT) { + if (plat_os_file_read_int(&info->miout, psu->iout_path, NULL) < 0) + error ++; + } + if (info->caps & ONLP_PSU_CAPS_POUT) { + if (plat_os_file_read_int(&info->mpout, psu->pout_path, NULL) < 0) + error ++; + else + info->mpout = info->mpout / 1000; + } + } // if ((info->status & ONLP_PSU_STATUS_FAILED) == 0) + + return error ? ONLP_STATUS_E_INTERNAL : ONLP_STATUS_OK; +} + +int +onlp_psui_ioctl(onlp_oid_t pid, va_list vargs) +{ + return ONLP_STATUS_E_UNSUPPORTED; +} diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/sfpi.c b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/sfpi.c new file mode 100644 index 00000000..8f7ed37b --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/sfpi.c @@ -0,0 +1,501 @@ +/************************************************************ + * + * + * Copyright 2014, 2015 Big Switch Networks, Inc. + * Copyright 2017 Delta Networks, Inc + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ +#include +#include "platform_lib.h" + +static int _sff_present (void *e); +static int _sff_lpmode (void *e, int sval, int *gval, int *sup); +static int _sff_reset (void *e, int sval, int *gval, int *sup); + +static int _sff8636_txdisable (void *e, int sval, int *gval, int *sup); + +static plat_sff_t plat_sffs[] = { + [PLAT_SFF_ID_MIN] = { + .valid = 1, + .present = _sff_present, + .present_cpld_reg = CPLD_REG(CPLD_SWPLD, 0x0d, 0, 1), + .lpmode = _sff_lpmode, + .lpmode_cpld_reg = CPLD_REG(CPLD_SWPLD, 0x09, 0, 1), + .reset = _sff_reset, + .reset_cpld_reg = CPLD_REG(CPLD_SWPLD, 0x11, 0, 1), + .txdisable = _sff8636_txdisable, + .bus = 6, + }, + // total 32 port .... init in onlp_sfpi_init + [PLAT_SFF_ID_MAX] = { + .valid = 1, + }, +}; + +static int _sff_present (void *e) +{ + int val; + plat_sff_t *sff = e; + + val = cpld_reg_get (&sff->present_cpld_reg); + + /* + * Return 1 if present. + * Return 0 if not present. + * Return < 0 if error. + */ + + if (val < 0) + return val; + return val ? 0 : 1; +} + +static int _sff_lpmode (void *e, int sval, int *gval, int *sup) +{ + plat_sff_t *sff = e; + + if (sup) { + if (cpld_reg_is_valid(&sff->lpmode_cpld_reg)) + *sup = 1; + } + if (gval) + *gval = cpld_reg_get (&sff->lpmode_cpld_reg); + + if (sval >= 0) + cpld_reg_set (&sff->lpmode_cpld_reg, sval ? 1 : 0); + + return ONLP_STATUS_OK; +} + +static int _sff_reset (void *e, int sval, int *gval, int *sup) +{ + plat_sff_t *sff = e; + + if (sup) { + if (cpld_reg_is_valid(&sff->reset_cpld_reg)) + *sup = 1; + } + + if (gval) { + *gval = cpld_reg_get (&sff->reset_cpld_reg); + *gval = *gval ? 0 : 1; + } + + if (sval >= 0) { + cpld_reg_set (&sff->reset_cpld_reg, sval ? 0 : 1); + } + + return ONLP_STATUS_OK; +} + +static int _sff8636_txdisable (void *e, int sval, int *gval, int *sup) +{ + int ret = ONLP_STATUS_OK; + plat_sff_t *sff = e; + + if (sup) { + *sup = 1; + } + + if (gval) { + if (cpld_reg_is_valid(&sff->txdisable_cpld_reg)) { + *gval = cpld_reg_get (&sff->reset_cpld_reg); + } else { + // following sff8636 spec + ret = onlp_i2c_readb (sff->bus, 0x50, 86, ONLP_I2C_F_DISABLE_READ_RETRIES); + if (ret >= 0) + *gval = ret; + } + } + + if (sval >= 0) { + if (cpld_reg_is_valid(&sff->txdisable_cpld_reg)) { + cpld_reg_set (&sff->reset_cpld_reg, sval ? 1 : 0); + } else { + // following sff8636 spec + ret = onlp_i2c_writeb (sff->bus, 0x50, 86, sval, 0); + } + } + + return ret; +} + +static int _sff_is_valid (int p) +{ + plat_sff_t *sff; + + if (p >= PLAT_SFF_ID_MIN && p <= PLAT_SFF_ID_MAX) { + sff = &plat_sffs[p]; + if (sff->valid) + return 1; + } + + return 0; +} + + +/************************************************************ + * + * SFPI Entry Points + * + ***********************************************************/ +int +onlp_sfpi_init(void) +{ + int p; + plat_sff_t *base = &plat_sffs[PLAT_SFF_ID_MIN]; + plat_sff_t *sff; + + for (p = PLAT_SFF_ID_MIN + 1; p <= PLAT_SFF_ID_MAX ; p ++) { + sff = &plat_sffs[p]; + + sff->valid = 1; + + // .present_cpld_reg + sff->present = base->present; + sff->present_cpld_reg.id = base->present_cpld_reg.id; + sff->present_cpld_reg.reg = base->present_cpld_reg.reg - (p - 1) / 8; + sff->present_cpld_reg.field = base->present_cpld_reg.field + (p - 1) % 8; + sff->present_cpld_reg.len = base->present_cpld_reg.len; + sff->present_cpld_reg.valid = 1, + + // .lpmode + sff->lpmode = base->lpmode; + sff->lpmode_cpld_reg.id = base->lpmode_cpld_reg.id; + sff->lpmode_cpld_reg.reg = base->lpmode_cpld_reg.reg - (p - 1) / 8; + sff->lpmode_cpld_reg.field = base->lpmode_cpld_reg.field + (p - 1) % 8; + sff->lpmode_cpld_reg.len = base->lpmode_cpld_reg.len; + sff->lpmode_cpld_reg.valid = 1, + + // .reset + sff->reset = base->reset; + sff->reset_cpld_reg.id = base->reset_cpld_reg.id; + sff->reset_cpld_reg.reg = base->reset_cpld_reg.reg - (p - 1) / 8; + sff->reset_cpld_reg.field = base->reset_cpld_reg.field + (p - 1) % 8; + sff->reset_cpld_reg.len = base->reset_cpld_reg.len; + sff->reset_cpld_reg.valid = 1, + + //.txdisable + sff->txdisable = base->txdisable; + + // bus + sff->bus = base->bus + p - 1; + } + /* Called at initialization time */ + return ONLP_STATUS_OK; +} + +int +onlp_sfpi_bitmap_get(onlp_sfp_bitmap_t* bmap) +{ + int p; + + for (p = PLAT_SFF_ID_MIN ; p <= PLAT_SFF_ID_MAX ; p++) { + if (_sff_is_valid (p)) + AIM_BITMAP_SET(bmap, p); + } + + return ONLP_STATUS_OK; +} + +int +onlp_sfpi_is_present(int port) +{ + plat_sff_t *sff; + /* + * Return 1 if present. + * Return 0 if not present. + * Return < 0 if error. + */ + if (!_sff_is_valid (port)) + return -1; + sff = &plat_sffs[port]; + + if (sff->present == NULL) { + // If not present, it means it is present always. + return 1; + } + + return sff->present (sff); +} + +static int _sff_read_eeprom (int port, uint8_t devaddr, uint8_t *data) +{ + /* + * Read the SFP eeprom into data[] + * + * Return MISSING if SFP is missing. + * Return OK if eeprom is read + */ + plat_sff_t *sff; + int i; + + memset(data, 0, 256); + + if (!_sff_is_valid(port)) { + return ONLP_STATUS_E_INVALID; + } + + if (onlp_sfpi_is_present(port) <= 0) + { + return ONLP_STATUS_E_MISSING; + } + + sff = &plat_sffs[port]; + + for (i = 0 ; i < (256/ONLPLIB_CONFIG_I2C_BLOCK_SIZE); i++) { + if (onlp_i2c_block_read (sff->bus, devaddr, + ONLPLIB_CONFIG_I2C_BLOCK_SIZE * i, + ONLPLIB_CONFIG_I2C_BLOCK_SIZE, + &data [ONLPLIB_CONFIG_I2C_BLOCK_SIZE * i], + 0) < 0) + { + return ONLP_STATUS_E_INTERNAL; + } + } + return ONLP_STATUS_OK; + +} + +int +onlp_sfpi_eeprom_read(int port, uint8_t data[256]) +{ + return _sff_read_eeprom (port, 0x50, data); +} + +int +onlp_sfpi_dom_read(int port, uint8_t data[256]) +{ + return _sff_read_eeprom (port, 0x51, data); +} + +int onlp_sfpi_dev_readb(int port, uint8_t devaddr, uint8_t addr) +{ + plat_sff_t *sff; + + if (!_sff_is_valid(port)) { + return ONLP_STATUS_E_INVALID; + } + + if (onlp_sfpi_is_present(port) <= 0) + { + return ONLP_STATUS_E_MISSING; + } + + sff = &plat_sffs[port]; + + return onlp_i2c_readb (sff->bus, devaddr, addr, 0); +} + +int onlp_sfpi_dev_writeb(int port, uint8_t devaddr, uint8_t addr, uint8_t value) +{ + plat_sff_t *sff; + + if (!_sff_is_valid(port)) { + return ONLP_STATUS_E_INVALID; + } + + if (onlp_sfpi_is_present(port) <= 0) + { + return ONLP_STATUS_E_MISSING; + } + + sff = &plat_sffs[port]; + + return onlp_i2c_writeb (sff->bus, devaddr, addr, value, 0); +} + +int onlp_sfpi_dev_readw(int port, uint8_t devaddr, uint8_t addr) +{ + plat_sff_t *sff; + + if (!_sff_is_valid(port)) { + return ONLP_STATUS_E_INVALID; + } + + if (onlp_sfpi_is_present(port) <= 0) + { + return ONLP_STATUS_E_MISSING; + } + + sff = &plat_sffs[port]; + + return onlp_i2c_readw (sff->bus, devaddr, addr, 0); +} + +int onlp_sfpi_dev_writew(int port, uint8_t devaddr, uint8_t addr, uint16_t value) +{ + plat_sff_t *sff; + + if (!_sff_is_valid(port)) { + return ONLP_STATUS_E_INVALID; + } + + if (onlp_sfpi_is_present(port) <= 0) + { + return ONLP_STATUS_E_MISSING; + } + + sff = &plat_sffs[port]; + + return onlp_i2c_readw (sff->bus, devaddr, addr, 0); +} + +int onlp_sfpi_presence_bitmap_get (onlp_sfp_bitmap_t* dst) +{ + return ONLP_STATUS_E_UNSUPPORTED; +} + +int onlp_sfpi_rx_los_bitmap_get(onlp_sfp_bitmap_t* dst) +{ + return ONLP_STATUS_E_UNSUPPORTED; +} + +int onlp_sfpi_control_supported(int port, onlp_sfp_control_t control, int* rv) +{ + plat_sff_t *sff; + + *rv = 0; + if (control > ONLP_SFP_CONTROL_LAST) + return ONLP_STATUS_OK; + + if (!_sff_is_valid(port)) { + return ONLP_STATUS_E_INVALID; + } + + sff = &plat_sffs[port]; + + switch (control) { + case ONLP_SFP_CONTROL_RESET_STATE: + case ONLP_SFP_CONTROL_RESET: + if (sff->reset) sff->reset (sff, -1, NULL, rv); + break; + + case ONLP_SFP_CONTROL_RX_LOS: + if (sff->rxlos) sff->rxlos (sff, -1, NULL, rv); + break; + + case ONLP_SFP_CONTROL_TX_FAULT: + if (sff->txfault) sff->txfault (sff, -1, NULL, rv); + break; + + case ONLP_SFP_CONTROL_TX_DISABLE_CHANNEL: + case ONLP_SFP_CONTROL_TX_DISABLE: + if (sff->txdisable) sff->txdisable (sff, -1, NULL, rv); + break; + + case ONLP_SFP_CONTROL_LP_MODE: + if (sff->lpmode) sff->lpmode (sff, -1, NULL, rv); + break; + + case ONLP_SFP_CONTROL_POWER_OVERRIDE: + default: + break; + } + + return ONLP_STATUS_OK; + +} + +int +onlp_sfpi_control_set(int port, onlp_sfp_control_t control, int value) +{ + plat_sff_t *sff; + + if (control > ONLP_SFP_CONTROL_LAST) + return ONLP_STATUS_E_UNSUPPORTED; + + if (!_sff_is_valid(port)) { + return ONLP_STATUS_E_INVALID; + } + + sff = &plat_sffs[port]; + + switch (control) { + case ONLP_SFP_CONTROL_RESET: + if (sff->reset) return sff->reset (sff, value, NULL, NULL); + break; + + case ONLP_SFP_CONTROL_TX_DISABLE_CHANNEL: + case ONLP_SFP_CONTROL_TX_DISABLE: + if (sff->txdisable) return sff->txdisable (sff, value, NULL, NULL); + break; + + case ONLP_SFP_CONTROL_LP_MODE: + if (sff->lpmode) return sff->lpmode (sff, value, NULL, NULL); + break; + + default: + break; + } + + return ONLP_STATUS_E_UNSUPPORTED; +} + +int +onlp_sfpi_control_get(int port, onlp_sfp_control_t control, int* value) +{ + plat_sff_t *sff; + + if (control > ONLP_SFP_CONTROL_LAST) + return ONLP_STATUS_E_UNSUPPORTED; + + if (!_sff_is_valid(port)) { + return ONLP_STATUS_E_INVALID; + } + + sff = &plat_sffs[port]; + + switch (control) { + case ONLP_SFP_CONTROL_RESET_STATE: + case ONLP_SFP_CONTROL_RESET: + if (sff->reset) return sff->reset (sff, -1, value, NULL); + break; + + case ONLP_SFP_CONTROL_TX_DISABLE_CHANNEL: + case ONLP_SFP_CONTROL_TX_DISABLE: + if (sff->txdisable) return sff->txdisable (sff, -1, value, NULL); + break; + + case ONLP_SFP_CONTROL_LP_MODE: + if (sff->lpmode) return sff->lpmode (sff, -1, value, NULL); + break; + + case ONLP_SFP_CONTROL_RX_LOS: + if (sff->rxlos) return sff->rxlos (sff, -1, value, NULL); + break; + + case ONLP_SFP_CONTROL_TX_FAULT: + if (sff->txfault) return sff->txfault (sff, -1, value, NULL); + break; + + default: + break; + } + + return ONLP_STATUS_E_UNSUPPORTED; +} + + +int +onlp_sfpi_denit(void) +{ + return ONLP_STATUS_OK; +} diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/sysi.c b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/sysi.c new file mode 100644 index 00000000..4edbd31d --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/sysi.c @@ -0,0 +1,229 @@ +/************************************************************ + * + * + * Copyright 2014, 2015 Big Switch Networks, Inc. + * Copyright 2017 Delta Networks, Inc + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "platform_lib.h" + +//platform_id_t platform_id = PLATFORM_ID_UNKNOWN; + +//#define ONIE_PLATFORM_NAME "x86-64-delta-ag8032-r0" + +const char* +onlp_sysi_platform_get(void) +{ + return "x86-64-delta-ag8032-r0"; +} + +int +onlp_sysi_platform_info_get(onlp_platform_info_t* pi) +{ + char buff[128]; + cpld_reg_t CpuBoardHwVer = CPLD_REG(CPLD_CPUPLD, 0x02, 4, 4); + cpld_reg_t CpuBoardPldVer = CPLD_REG(CPLD_CPUPLD, 0x01, 0, 4); + cpld_reg_t MainBoardHwVer = CPLD_REG(CPLD_SWPLD, 0x00, 0, 4); + cpld_reg_t MainBoardPldVer = CPLD_REG(CPLD_SWPLD, 0x01, 0, 4); + + + snprintf (buff, sizeof(buff), "CpuBoardPldVer=%d,MainBoardPldVer=%d", + cpld_reg_get(&CpuBoardPldVer), + cpld_reg_get(&MainBoardPldVer)); + pi->cpld_versions = aim_fstrdup("%s", buff); + + snprintf (buff, sizeof(buff), "CpuBoardHwVer=%d,MainBoardHwVer=%d", + cpld_reg_get(&CpuBoardHwVer), + cpld_reg_get(&MainBoardHwVer)); + pi->other_versions = aim_fstrdup("%s", buff); + + + return 0; +} + +int +onlp_sysi_onie_data_get(uint8_t** data, int* size) +{ + uint8_t* rdata = aim_zmalloc(256); + + if(!rdata){ + return ONLP_STATUS_E_INTERNAL; + } + + *data = rdata; + if(onlp_file_read(rdata, 256, size, ONIE_EEPROM_LOCATION) == ONLP_STATUS_OK) { + if(*size == 256) { + *data = rdata; + return ONLP_STATUS_OK; + } + } + + aim_free(rdata); + *size = 0; + return ONLP_STATUS_E_UNSUPPORTED; +} + +void +onlp_sysi_onie_data_free(uint8_t* data) +{ + aim_free(data); +} + +int +onlp_sysi_oids_get(onlp_oid_t* table, int max) +{ + int i; + onlp_oid_t* e = table; + memset(table, 0, max*sizeof(onlp_oid_t)); + + /* Thermal sensors on the platform */ + for (i = PLAT_THERMAL_ID_1; i < PLAT_THERMAL_ID_MAX; i++) { + *e++ = ONLP_THERMAL_ID_CREATE(i); + } + + /* LEDs on the platform */ + for (i = PLAT_LED_ID_1; i < PLAT_LED_ID_MAX; i++) { + *e++ = ONLP_LED_ID_CREATE(i); + } + + /* Fans on the platform */ + for (i = PLAT_FAN_ID_1; i < PLAT_FAN_ID_MAX; i++) { + *e++ = ONLP_FAN_ID_CREATE(i); + } + + /* PSUs on the platform */ + for (i = PLAT_PSU_ID_1; i < PLAT_PSU_ID_MAX; i++) { + *e++ = ONLP_PSU_ID_CREATE(i); + } + return 0; +} + +int +onlp_sysi_onie_info_get(onlp_onie_info_t* onie) +{ + onie->platform_name = aim_strdup("x86-64-delta_ag8032-r0"); + return ONLP_STATUS_OK; +} + +int +onlp_sysi_platform_manage_fans(void) +{ + onlp_thermal_info_t thermal; + int i; + int temp_max; + int rpm; + + temp_max = 0; + for (i = PLAT_THERMAL_ID_1 ; i <= PLAT_THERMAL_ID_5; i ++) { + if (onlp_thermali_info_get (ONLP_THERMAL_ID_CREATE(i), &thermal) == ONLP_STATUS_OK) + if (thermal.mcelsius > temp_max) + temp_max = thermal.mcelsius; + } + + rpm = 7500; + if((temp_max >= 30000) && (temp_max < 40000)) rpm =10000; + if((temp_max >= 45000) && (temp_max < 55000)) rpm =13000; + if((temp_max >= 60000) && (temp_max < 75000)) rpm =16000; + if( temp_max >= 80000) rpm =19000; + + for (i = PLAT_FAN_ID_1 ; i <= PLAT_FAN_ID_6 ; i ++) { + onlp_fani_rpm_set (ONLP_FAN_ID_CREATE(i), rpm); + } + + return ONLP_STATUS_OK; +} + + +int +onlp_sysi_platform_manage_leds(void) +{ + onlp_fan_info_t fan; + onlp_psu_info_t psu; + int i; + uint32_t status; + int led_setting; + int global_fail; + + // fan tray led + global_fail = 0; + for (i = 0 ; i < 3 ; i ++) { + status = 0; + if (onlp_fani_info_get (ONLP_FAN_ID_CREATE(PLAT_FAN_ID_1 + i * 2 + 0), + &fan) == ONLP_STATUS_OK) { + status |= fan.status; + } + if (onlp_fani_info_get (ONLP_FAN_ID_CREATE(PLAT_FAN_ID_1 + i * 2 + 1), + &fan) == ONLP_STATUS_OK) { + status |= fan.status; + } + led_setting = ONLP_LED_MODE_GREEN; + if (status & ONLP_FAN_STATUS_FAILED) { + led_setting = ONLP_LED_MODE_ORANGE; + global_fail ++; + } else if ((status & ONLP_FAN_STATUS_PRESENT) == 0) { + led_setting = ONLP_LED_MODE_OFF; + global_fail ++; + } + onlp_ledi_mode_set (ONLP_LED_ID_CREATE (PLAT_LED_ID_5 + i), led_setting); + } + + // fans led (front fan led) + onlp_ledi_mode_set (ONLP_LED_ID_CREATE (PLAT_LED_ID_2), + global_fail ? ONLP_LED_MODE_ORANGE : ONLP_LED_MODE_GREEN); + + + // pwr1 led (front) + led_setting = ONLP_LED_MODE_ORANGE; + if (onlp_psui_info_get (ONLP_PSU_ID_CREATE(PLAT_PSU_ID_1), &psu) == ONLP_STATUS_OK) { + if (psu.status & ONLP_PSU_STATUS_FAILED) + led_setting = ONLP_LED_MODE_ORANGE; + else if ((psu.status & ONLP_PSU_STATUS_PRESENT) == 0) + led_setting = ONLP_LED_MODE_OFF; + else + led_setting = ONLP_LED_MODE_GREEN; + } + onlp_ledi_mode_set (ONLP_LED_ID_CREATE (PLAT_LED_ID_3), led_setting); + + // pwr2 led (front) + led_setting = ONLP_LED_MODE_ORANGE; + if (onlp_psui_info_get (ONLP_PSU_ID_CREATE(PLAT_PSU_ID_2), &psu) == ONLP_STATUS_OK) { + if (psu.status & ONLP_PSU_STATUS_FAILED) + led_setting = ONLP_LED_MODE_ORANGE; + else if ((psu.status & ONLP_PSU_STATUS_PRESENT) == 0) + led_setting = ONLP_LED_MODE_OFF; + else + led_setting = ONLP_LED_MODE_GREEN; + } + onlp_ledi_mode_set (ONLP_LED_ID_CREATE (PLAT_LED_ID_4), led_setting); + + // sys + return ONLP_STATUS_OK; +} + diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/thermali.c b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/thermali.c new file mode 100644 index 00000000..a3301cc2 --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/thermali.c @@ -0,0 +1,265 @@ +/************************************************************ + * + * + * Copyright 2014, 2015 Big Switch Networks, Inc. + * Copyright 2017 Delta Networks, Inc + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * Thermal Sensor Platform Implementation. + * + ***********************************************************/ +#include +#include +#include "platform_lib.h" + +static int _psu_thermal_present (void *e); + +//////////////////////////////////////////////////////////////// +// THERMALS PLAT CONFIG +static plat_thermal_t plat_thermals[] = { + + [PLAT_THERMAL_ID_1] = { + .desc = "Thermal Sensor %d - close to cpu", + .temp_get_path = "/sys/bus/i2c/devices/0-0048/hwmon/*/temp1_input", + .warnning_set_path = "/sys/bus/i2c/devices/0-0048/hwmon/*/temp1_max_hyst", + .critical_set_path = NULL, + .shutdown_set_path = "/sys/bus/i2c/devices/0-0048/hwmon/*/temp1_max", + + .def_warnning = ONLP_THERMAL_THRESHOLD_WARNING_DEFAULT, + .def_critical = ONLP_THERMAL_THRESHOLD_ERROR_DEFAULT, + .def_shutdown = ONLP_THERMAL_THRESHOLD_SHUTDOWN_DEFAULT, + }, + [PLAT_THERMAL_ID_2] = { + .desc = "Thermal Sensor %d - Main Board (U54)", + .temp_get_path = "/sys/bus/i2c/devices/2-004a/hwmon/*/temp1_input", + .warnning_set_path = "/sys/bus/i2c/devices/2-004a/hwmon/*/temp1_max_hyst", + .critical_set_path = NULL, + .shutdown_set_path = "/sys/bus/i2c/devices/2-004a/hwmon/*/temp1_max", + + .def_warnning = ONLP_THERMAL_THRESHOLD_WARNING_DEFAULT, + .def_critical = ONLP_THERMAL_THRESHOLD_ERROR_DEFAULT, + .def_shutdown = ONLP_THERMAL_THRESHOLD_SHUTDOWN_DEFAULT, + }, + [PLAT_THERMAL_ID_3] = { + .desc = "Thermal Sensor %d - BCM chip Bottom (U70)", + .temp_get_path = "/sys/bus/i2c/devices/2-004c/hwmon/*/temp1_input", + .warnning_set_path = "/sys/bus/i2c/devices/2-004c/hwmon/*/temp1_max_hyst", + .critical_set_path = NULL, + .shutdown_set_path = "/sys/bus/i2c/devices/2-004c/hwmon/*/temp1_max", + + .def_warnning = ONLP_THERMAL_THRESHOLD_WARNING_DEFAULT, + .def_critical = ONLP_THERMAL_THRESHOLD_ERROR_DEFAULT, + .def_shutdown = ONLP_THERMAL_THRESHOLD_SHUTDOWN_DEFAULT, + }, + [PLAT_THERMAL_ID_4] = { + .desc = "Thermal Sensor %d - BCM chip Top (U71)", + .temp_get_path = "/sys/bus/i2c/devices/2-004d/hwmon/*/temp1_input", + .warnning_set_path = "/sys/bus/i2c/devices/2-004d/hwmon/*/temp1_max_hyst", + .critical_set_path = NULL, + .shutdown_set_path = "/sys/bus/i2c/devices/2-004d/hwmon/*/temp1_max", + + .def_warnning = ONLP_THERMAL_THRESHOLD_WARNING_DEFAULT, + .def_critical = ONLP_THERMAL_THRESHOLD_ERROR_DEFAULT, + .def_shutdown = ONLP_THERMAL_THRESHOLD_SHUTDOWN_DEFAULT, + }, + [PLAT_THERMAL_ID_5] = { + .desc = "Thermal Sensor %d - Cpu Core", + .temp_get_path = "/sys/devices/platform/coretemp.0/hwmon/*/temp2_input", + .warnning_set_path = "/sys/devices/platform/coretemp.0/hwmon/*/temp2_crit", + .critical_set_path = NULL, + .shutdown_set_path = "/sys/devices/platform/coretemp.0/hwmon/*/temp2_max", + + .def_warnning = 0, + .def_critical = 0, + .def_shutdown = 0, + }, + [PLAT_THERMAL_ID_6] = { + .desc = "Thermal Sensor %d - PSU1", + .present = _psu_thermal_present, + .temp_get_path = "/sys/bus/i2c/devices/i2c-4/4-0058/hwmon/*/temp1_input", + .shutdown_set_path = "/sys/bus/i2c/devices/i2c-4/4-0058/hwmon/*/temp1_max", + + .def_warnning = 0, + .def_critical = 0, + .def_shutdown = 0, + }, + [PLAT_THERMAL_ID_7] = { + .desc = "Thermal Sensor %d - PSU2", + .present = _psu_thermal_present, + .temp_get_path = "/sys/bus/i2c/devices/i2c-4/4-0059/hwmon/*/temp1_input", + .shutdown_set_path = "/sys/bus/i2c/devices/i2c-4/4-0059/hwmon/*/temp1_max", + + .def_warnning = 0, + .def_critical = 0, + .def_shutdown = 0, + }, +}; + +#define plat_thermals_size (sizeof(plat_thermals)/sizeof(plat_thermals[0])) + +static int _psu_thermal_present (void *e) +{ + plat_thermal_t *thermal = e; + return plat_os_file_is_existed (thermal->temp_get_path) ? 1 : 0; +} + +static int plat_thermal_is_valid (int id) +{ + plat_thermal_t *thermal; + + if (id < 0 && id >= plat_thermals_size) + return 0; + + thermal = &plat_thermals[id]; + if (thermal->temp_get_path || thermal->desc) + return 1; + + return 0; +} + +int onlp_thermali_init(void) +{ + int i; + plat_thermal_t *thermal; + + for (i = 0 ; i < plat_thermals_size ; i ++) { + if (!plat_thermal_is_valid (i)) + continue; + thermal = &plat_thermals[i]; + + if (thermal->warnning_set_path && thermal->def_warnning) + plat_os_file_write_int (thermal->def_warnning, thermal->warnning_set_path, NULL); + if (thermal->critical_set_path && thermal->def_critical) + plat_os_file_write_int (thermal->def_critical, thermal->critical_set_path, NULL); + if (thermal->shutdown_set_path && thermal->def_shutdown) + plat_os_file_write_int (thermal->def_shutdown, thermal->shutdown_set_path, NULL); + } + return ONLP_STATUS_OK; +} + +/* + * Retrieve the information structure for the given thermal OID. + * + * If the OID is invalid, return ONLP_E_STATUS_INVALID. + * If an unexpected error occurs, return ONLP_E_STATUS_INTERNAL. + * Otherwise, return ONLP_STATUS_OK with the OID's information. + * + * Note -- it is expected that you fill out the information + * structure even if the sensor described by the OID is not present. + */ +int +onlp_thermali_info_get(onlp_oid_t id, onlp_thermal_info_t* info) +{ + int tid; + int present = 1; + plat_thermal_t *thermal; + int value; + int error; + + if (!ONLP_OID_IS_THERMAL(id)) + return ONLP_STATUS_E_INVALID; + + tid = ONLP_OID_ID_GET(id); + + if (!plat_thermal_is_valid(tid)) + return ONLP_STATUS_E_INVALID; + + thermal = &plat_thermals[tid]; + + if (thermal->present) { + present = thermal->present(thermal) ? 1 : 0; + } + + memset (info, 0, sizeof(*info)); + + // fix onlp_thermal_info_t + info->hdr.id = id; + if (thermal->desc) + snprintf (info->hdr.description, sizeof(info->hdr.description), thermal->desc, tid); + + if (thermal->temp_get_path) + info->caps |= ONLP_THERMAL_CAPS_GET_TEMPERATURE; + if (thermal->warnning_set_path || thermal->def_warnning) + info->caps |= ONLP_THERMAL_CAPS_GET_WARNING_THRESHOLD; + if (thermal->critical_set_path || thermal->def_critical) + info->caps |= ONLP_THERMAL_CAPS_GET_ERROR_THRESHOLD; + if (thermal->shutdown_set_path || thermal->def_shutdown) + info->caps |= ONLP_THERMAL_CAPS_GET_SHUTDOWN_THRESHOLD; + + // Get value + error = 0; + if (info->caps & ONLP_THERMAL_CAPS_GET_TEMPERATURE) { + if (plat_os_file_read_int(&value, thermal->temp_get_path, NULL) < 0) + error ++; + else + info->mcelsius = value; + } + if (info->caps & ONLP_THERMAL_CAPS_GET_WARNING_THRESHOLD) { + if (thermal->warnning_set_path) { + if (plat_os_file_read_int(&value, thermal->warnning_set_path, NULL) < 0) + error ++; + else + info->thresholds.warning = value; + } else { + info->thresholds.warning = thermal->def_warnning; + } + } + if (info->caps & ONLP_THERMAL_CAPS_GET_ERROR_THRESHOLD) { + if (thermal->critical_set_path) { + if (plat_os_file_read_int(&value, thermal->critical_set_path, NULL) < 0) + error ++; + else + info->thresholds.error = value; + } else { + info->thresholds.error = thermal->def_critical; + } + } + if (info->caps & ONLP_THERMAL_CAPS_GET_SHUTDOWN_THRESHOLD) { + if (thermal->shutdown_set_path) { + if (plat_os_file_read_int(&value, thermal->shutdown_set_path, NULL) < 0) + error ++; + else + info->thresholds.shutdown = value; + } else { + info->thresholds.shutdown = thermal->def_shutdown; + } + } + + + + if (present) + info->status |= ONLP_THERMAL_STATUS_PRESENT; + + // check threshold + if (info->caps & ONLP_THERMAL_CAPS_GET_TEMPERATURE) { + if (info->caps & ONLP_THERMAL_CAPS_GET_ERROR_THRESHOLD) { + if (info->mcelsius >= info->thresholds.error) { + info->status |= ONLP_THERMAL_STATUS_FAILED; + } + } + if (info->caps & ONLP_THERMAL_CAPS_GET_SHUTDOWN_THRESHOLD) { + if (info->mcelsius >= info->thresholds.shutdown) { + info->status |= ONLP_THERMAL_STATUS_FAILED; + } + } + } + + + return error ? ONLP_STATUS_E_INTERNAL : ONLP_STATUS_OK; +} + + diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/x86_64_delta_ag8032_config.c b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/x86_64_delta_ag8032_config.c new file mode 100644 index 00000000..60c05d8c --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/x86_64_delta_ag8032_config.c @@ -0,0 +1,81 @@ +/**************************************************************************//** + * + * + * + *****************************************************************************/ +#include + +/* */ +#define __x86_64_delta_ag8032_config_STRINGIFY_NAME(_x) #_x +#define __x86_64_delta_ag8032_config_STRINGIFY_VALUE(_x) __x86_64_delta_ag8032_config_STRINGIFY_NAME(_x) +x86_64_delta_ag8032_config_settings_t x86_64_delta_ag8032_config_settings[] = +{ +#ifdef X86_64_DELTA_AG8032_CONFIG_INCLUDE_LOGGING + { __x86_64_delta_ag8032_config_STRINGIFY_NAME(X86_64_DELTA_AG8032_CONFIG_INCLUDE_LOGGING), __x86_64_delta_ag8032_config_STRINGIFY_VALUE(X86_64_DELTA_AG8032_CONFIG_INCLUDE_LOGGING) }, +#else +{ X86_64_DELTA_AG8032_CONFIG_INCLUDE_LOGGING(__x86_64_delta_ag8032_config_STRINGIFY_NAME), "__undefined__" }, +#endif +#ifdef X86_64_DELTA_AG8032_CONFIG_LOG_OPTIONS_DEFAULT + { __x86_64_delta_ag8032_config_STRINGIFY_NAME(X86_64_DELTA_AG8032_CONFIG_LOG_OPTIONS_DEFAULT), __x86_64_delta_ag8032_config_STRINGIFY_VALUE(X86_64_DELTA_AG8032_CONFIG_LOG_OPTIONS_DEFAULT) }, +#else +{ X86_64_DELTA_AG8032_CONFIG_LOG_OPTIONS_DEFAULT(__x86_64_delta_ag8032_config_STRINGIFY_NAME), "__undefined__" }, +#endif +#ifdef X86_64_DELTA_AG8032_CONFIG_LOG_BITS_DEFAULT + { __x86_64_delta_ag8032_config_STRINGIFY_NAME(X86_64_DELTA_AG8032_CONFIG_LOG_BITS_DEFAULT), __x86_64_delta_ag8032_config_STRINGIFY_VALUE(X86_64_DELTA_AG8032_CONFIG_LOG_BITS_DEFAULT) }, +#else +{ X86_64_DELTA_AG8032_CONFIG_LOG_BITS_DEFAULT(__x86_64_delta_ag8032_config_STRINGIFY_NAME), "__undefined__" }, +#endif +#ifdef X86_64_DELTA_AG8032_CONFIG_LOG_CUSTOM_BITS_DEFAULT + { __x86_64_delta_ag8032_config_STRINGIFY_NAME(X86_64_DELTA_AG8032_CONFIG_LOG_CUSTOM_BITS_DEFAULT), __x86_64_delta_ag8032_config_STRINGIFY_VALUE(X86_64_DELTA_AG8032_CONFIG_LOG_CUSTOM_BITS_DEFAULT) }, +#else +{ X86_64_DELTA_AG8032_CONFIG_LOG_CUSTOM_BITS_DEFAULT(__x86_64_delta_ag8032_config_STRINGIFY_NAME), "__undefined__" }, +#endif +#ifdef X86_64_DELTA_AG8032_CONFIG_PORTING_STDLIB + { __x86_64_delta_ag8032_config_STRINGIFY_NAME(X86_64_DELTA_AG8032_CONFIG_PORTING_STDLIB), __x86_64_delta_ag8032_config_STRINGIFY_VALUE(X86_64_DELTA_AG8032_CONFIG_PORTING_STDLIB) }, +#else +{ X86_64_DELTA_AG8032_CONFIG_PORTING_STDLIB(__x86_64_delta_ag8032_config_STRINGIFY_NAME), "__undefined__" }, +#endif +#ifdef X86_64_DELTA_AG8032_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS + { __x86_64_delta_ag8032_config_STRINGIFY_NAME(X86_64_DELTA_AG8032_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS), __x86_64_delta_ag8032_config_STRINGIFY_VALUE(X86_64_DELTA_AG8032_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS) }, +#else +{ X86_64_DELTA_AG8032_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS(__x86_64_delta_ag8032_config_STRINGIFY_NAME), "__undefined__" }, +#endif +#ifdef X86_64_DELTA_AG8032_CONFIG_INCLUDE_UCLI + { __x86_64_delta_ag8032_config_STRINGIFY_NAME(X86_64_DELTA_AG8032_CONFIG_INCLUDE_UCLI), __x86_64_delta_ag8032_config_STRINGIFY_VALUE(X86_64_DELTA_AG8032_CONFIG_INCLUDE_UCLI) }, +#else +{ X86_64_DELTA_AG8032_CONFIG_INCLUDE_UCLI(__x86_64_delta_ag8032_config_STRINGIFY_NAME), "__undefined__" }, +#endif +#ifdef X86_64_DELTA_AG8032_CONFIG_INCLUDE_DEFAULT_FAN_DIRECTION + { __x86_64_delta_ag8032_config_STRINGIFY_NAME(X86_64_DELTA_AG8032_CONFIG_INCLUDE_DEFAULT_FAN_DIRECTION), __x86_64_delta_ag8032_config_STRINGIFY_VALUE(X86_64_DELTA_AG8032_CONFIG_INCLUDE_DEFAULT_FAN_DIRECTION) }, +#else +{ X86_64_DELTA_AG8032_CONFIG_INCLUDE_DEFAULT_FAN_DIRECTION(__x86_64_delta_ag8032_config_STRINGIFY_NAME), "__undefined__" }, +#endif + { NULL, NULL } +}; +#undef __x86_64_delta_ag8032_config_STRINGIFY_VALUE +#undef __x86_64_delta_ag8032_config_STRINGIFY_NAME + +const char* +x86_64_delta_ag8032_config_lookup(const char* setting) +{ + int i; + for(i = 0; x86_64_delta_ag8032_config_settings[i].name; i++) { + if(strcmp(x86_64_delta_ag8032_config_settings[i].name, setting)) { + return x86_64_delta_ag8032_config_settings[i].value; + } + } + return NULL; +} + +int +x86_64_delta_ag8032_config_show(struct aim_pvs_s* pvs) +{ + int i; + for(i = 0; x86_64_delta_ag8032_config_settings[i].name; i++) { + aim_printf(pvs, "%s = %s\n", x86_64_delta_ag8032_config_settings[i].name, x86_64_delta_ag8032_config_settings[i].value); + } + return i; +} + +/* */ + diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/x86_64_delta_ag8032_enums.c b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/x86_64_delta_ag8032_enums.c new file mode 100644 index 00000000..5168132e --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/x86_64_delta_ag8032_enums.c @@ -0,0 +1,10 @@ +/**************************************************************************//** + * + * + * + *****************************************************************************/ +#include + +/* <--auto.start.enum(ALL).source> */ +/* */ + diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/x86_64_delta_ag8032_int.h b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/x86_64_delta_ag8032_int.h new file mode 100644 index 00000000..b5041fda --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/x86_64_delta_ag8032_int.h @@ -0,0 +1,12 @@ +/**************************************************************************//** + * + * x86_64_delta_ag8032 Internal Header + * + *****************************************************************************/ +#ifndef __x86_64_delta_ag8032_INT_H__ +#define __x86_64_delta_ag8032_INT_H__ + +#include + + +#endif /* __x86_64_delta_ag8032_INT_H__ */ diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/x86_64_delta_ag8032_log.c b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/x86_64_delta_ag8032_log.c new file mode 100644 index 00000000..a4ca271a --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/x86_64_delta_ag8032_log.c @@ -0,0 +1,18 @@ +/**************************************************************************//** + * + * + * + *****************************************************************************/ +#include + +#include "x86_64_delta_ag8032_log.h" +/* + * x86_64_delta_ag8032 log struct. + */ +AIM_LOG_STRUCT_DEFINE( + X86_64_DELTA_AG8032_CONFIG_LOG_OPTIONS_DEFAULT, + X86_64_DELTA_AG8032_CONFIG_LOG_BITS_DEFAULT, + NULL, /* Custom log map */ + X86_64_DELTA_AG8032_CONFIG_LOG_CUSTOM_BITS_DEFAULT + ); + diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/x86_64_delta_ag8032_log.h b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/x86_64_delta_ag8032_log.h new file mode 100644 index 00000000..9e1970bf --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/x86_64_delta_ag8032_log.h @@ -0,0 +1,12 @@ +/**************************************************************************//** + * + * + * + *****************************************************************************/ +#ifndef __x86_64_delta_ag8032_LOG_H__ +#define __x86_64_delta_ag8032_LOG_H__ + +#define AIM_LOG_MODULE_NAME x86_64_delta_ag8032 +#include + +#endif /* __x86_64_delta_ag8032_LOG_H__ */ diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/x86_64_delta_ag8032_module.c b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/x86_64_delta_ag8032_module.c new file mode 100644 index 00000000..1f955e54 --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/x86_64_delta_ag8032_module.c @@ -0,0 +1,24 @@ +/**************************************************************************//** + * + * + * + *****************************************************************************/ +#include + +#include "x86_64_delta_ag8032_log.h" + +static int +datatypes_init__(void) +{ +#define x86_64_delta_ag8032_ENUMERATION_ENTRY(_enum_name, _desc) AIM_DATATYPE_MAP_REGISTER(_enum_name, _enum_name##_map, _desc, AIM_LOG_INTERNAL); +#include + return 0; +} + +void __x86_64_delta_ag8032_module_init__(void) +{ + AIM_LOG_STRUCT_REGISTER(); + datatypes_init__(); +} + +int __onlp_platform_version__ = 1; diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/x86_64_delta_ag8032_ucli.c b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/x86_64_delta_ag8032_ucli.c new file mode 100644 index 00000000..b164d8ac --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/onlp/builds/src/module/src/x86_64_delta_ag8032_ucli.c @@ -0,0 +1,50 @@ +/**************************************************************************//** + * + * + * + *****************************************************************************/ +#include + +#if X86_64_DELTA_AG8032_CONFIG_INCLUDE_UCLI == 1 + +#include +#include +#include + +static ucli_status_t +x86_64_delta_ag8032_ucli_ucli__config__(ucli_context_t* uc) +{ + UCLI_HANDLER_MACRO_MODULE_CONFIG(x86_64_delta_ag8032) +} + +/* */ +/* */ + +static ucli_module_t +x86_64_delta_ag8032_ucli_module__ = + { + "x86_64_delta_ag8032_ucli", + NULL, + x86_64_delta_ag8032_ucli_ucli_handlers__, + NULL, + NULL, + }; + +ucli_node_t* +x86_64_delta_ag8032_ucli_node_create(void) +{ + ucli_node_t* n; + ucli_module_init(&x86_64_delta_ag8032_ucli_module__); + n = ucli_node_create("x86_64_delta_ag8032", NULL, &x86_64_delta_ag8032_ucli_module__); + ucli_node_subnode_add(n, ucli_module_log_node_create("x86_64_delta_ag8032")); + return n; +} + +#else +void* +x86_64_delta_ag8032_ucli_node_create(void) +{ + return NULL; +} +#endif + diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/platform-config/Makefile b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/platform-config/Makefile new file mode 100644 index 00000000..003238cf --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/platform-config/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk \ No newline at end of file diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/platform-config/r0/Makefile b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/platform-config/r0/Makefile new file mode 100644 index 00000000..003238cf --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/platform-config/r0/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk \ No newline at end of file diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/platform-config/r0/PKG.yml b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/platform-config/r0/PKG.yml new file mode 100644 index 00000000..562030ca --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/platform-config/r0/PKG.yml @@ -0,0 +1 @@ +!include $ONL_TEMPLATES/platform-config-platform.yml ARCH=amd64 VENDOR=delta BASENAME=x86-64-delta-ag8032 REVISION=r0 diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/platform-config/r0/src/lib/x86-64-delta-ag8032-r0.yml b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/platform-config/r0/src/lib/x86-64-delta-ag8032-r0.yml new file mode 100644 index 00000000..4412aab4 --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/platform-config/r0/src/lib/x86-64-delta-ag8032-r0.yml @@ -0,0 +1,34 @@ +--- + +###################################################################### +# +# platform-config for AG8032 +# +###################################################################### + +x86-64-delta-ag8032-r0: + + grub: + + serial: >- + --port=0x2f8 + --speed=115200 + --word=8 + --parity=no + --stop=1 + + kernel: + <<: *kernel-4-9 + + args: >- + nopat + acpi=off + console=ttyS1,115200n8 + i2c_dev_auto_detect=0 + tsc=reliable + + ##network + ## interfaces: + ## ma1: + ## name: ~ + ## syspath: pci0000:00/0000:00:14.0 diff --git a/packages/platforms/delta/x86-64/x86-64-delta-ag8032/platform-config/r0/src/python/x86_64_delta_ag8032_r0/__init__.py b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/platform-config/r0/src/python/x86_64_delta_ag8032_r0/__init__.py new file mode 100644 index 00000000..645ca5ec --- /dev/null +++ b/packages/platforms/delta/x86-64/x86-64-delta-ag8032/platform-config/r0/src/python/x86_64_delta_ag8032_r0/__init__.py @@ -0,0 +1,34 @@ +from onl.platform.base import * +from onl.platform.delta import * + +class OnlPlatform_x86_64_delta_ag8032_r0(OnlPlatformDelta,OnlPlatformPortConfig_32x40): + + PLATFORM='x86-64-delta-ag8032-r0' + MODEL="AG8032" + SYS_OBJECT_ID=".8032.1" + + def baseconfig(self): + self.insmod('/lib/modules/4.9.75-OpenNetworkLinux/kernel/drivers/i2c/busses/i2c-ismt.ko') + self.insmod('/lib/modules/4.9.75-OpenNetworkLinux/kernel/drivers/misc/eeprom/at24.ko') + self.insmod('x86-64-delta-ag8032-i2c-mux-setting.ko') + self.insmod('x86-64-delta-ag8032-i2c-mux-cpld.ko') + + + self.new_i2c_devices( + [ + ('tmp75', 0x48, 0), + ('tmp75', 0x4a, 2), + ('tmp75', 0x4c, 2), + ('tmp75', 0x4d, 2), + ('24c02', 0x51, 2), + ('24c02', 0x52, 2), + ('24c02', 0x53, 2), + ('24c08', 0x54, 5), + ('max6620', 0x29, 2), + ('max6620', 0x2a, 2), + ('at24c02', 0x50, 4), + ] + ) + + + return True diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/.gitignore b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/.gitignore new file mode 100755 index 00000000..9f7b1342 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/.gitignore @@ -0,0 +1 @@ +onlpdump.mk diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/Makefile b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/Makefile new file mode 100755 index 00000000..003238cf --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk \ No newline at end of file diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/modules/Makefile b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/modules/Makefile new file mode 100755 index 00000000..003238cf --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/modules/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk \ No newline at end of file diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/modules/PKG.yml b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/modules/PKG.yml new file mode 100755 index 00000000..23adefed --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/modules/PKG.yml @@ -0,0 +1 @@ +!include $ONL_TEMPLATES/no-platform-modules.yml ARCH=amd64 VENDOR=ingrasys BASENAME=x86-64-ingrasys-s9180-32x KERNELS="onl-kernel-3.16-lts-x86-64-all:amd64" diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/Makefile b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/Makefile new file mode 100755 index 00000000..003238cf --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk \ No newline at end of file diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/PKG.yml b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/PKG.yml new file mode 100755 index 00000000..c87e02d4 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/PKG.yml @@ -0,0 +1 @@ +!include $ONL_TEMPLATES/onlp-platform-any.yml PLATFORM=x86-64-ingrasys-s9180-32x ARCH=amd64 TOOLCHAIN=x86_64-linux-gnu diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/Makefile b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/Makefile new file mode 100755 index 00000000..e7437cb2 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/Makefile @@ -0,0 +1,2 @@ +FILTER=src +include $(ONL)/make/subdirs.mk diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/lib/Makefile b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/lib/Makefile new file mode 100755 index 00000000..cafc27df --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/lib/Makefile @@ -0,0 +1,45 @@ +############################################################ +# +# +# Copyright 2014 BigSwitch Networks, Inc. +# +# Licensed under the Eclipse Public License, Version 1.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.eclipse.org/legal/epl-v10.html +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +# either express or implied. See the License for the specific +# language governing permissions and limitations under the +# License. +# +# +############################################################ +# +# +############################################################ +include $(ONL)/make/config.amd64.mk + +MODULE := libonlp-x86-64-ingrasys-s9180-32x +include $(BUILDER)/standardinit.mk + +DEPENDMODULES := AIM IOF x86_64_ingrasys_s9180_32x onlplib +DEPENDMODULE_HEADERS := sff + +include $(BUILDER)/dependmodules.mk + +SHAREDLIB := libonlp-x86-64-ingrasys-s9180-32x.so +$(SHAREDLIB)_TARGETS := $(ALL_TARGETS) +include $(BUILDER)/so.mk +.DEFAULT_GOAL := $(SHAREDLIB) + +GLOBAL_CFLAGS += -I$(onlp_BASEDIR)/module/inc +GLOBAL_CFLAGS += -DAIM_CONFIG_INCLUDE_MODULES_INIT=1 +GLOBAL_CFLAGS += -fPIC +GLOBAL_LINK_LIBS += -lpthread + +include $(BUILDER)/targets.mk + diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/onlpdump/Makefile b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/onlpdump/Makefile new file mode 100755 index 00000000..399c4b02 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/onlpdump/Makefile @@ -0,0 +1,45 @@ +############################################################ +# +# +# Copyright 2014 BigSwitch Networks, Inc. +# +# Licensed under the Eclipse Public License, Version 1.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.eclipse.org/legal/epl-v10.html +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +# either express or implied. See the License for the specific +# language governing permissions and limitations under the +# License. +# +# +############################################################ +# +# +# +############################################################ +include $(ONL)/make/config.amd64.mk + +.DEFAULT_GOAL := onlpdump + +MODULE := onlpdump +include $(BUILDER)/standardinit.mk + +DEPENDMODULES := AIM IOF onlp x86_64_ingrasys_s9180_32x onlplib onlp_platform_defaults sff cjson cjson_util timer_wheel OS + +include $(BUILDER)/dependmodules.mk + +BINARY := onlpdump +$(BINARY)_LIBRARIES := $(LIBRARY_TARGETS) +include $(BUILDER)/bin.mk + +GLOBAL_CFLAGS += -DAIM_CONFIG_AIM_MAIN_FUNCTION=onlpdump_main +GLOBAL_CFLAGS += -DAIM_CONFIG_INCLUDE_MODULES_INIT=1 +GLOBAL_CFLAGS += -DAIM_CONFIG_INCLUDE_MAIN=1 +GLOBAL_LINK_LIBS += -lpthread -lm + +include $(BUILDER)/targets.mk diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/.gitignore b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/.gitignore new file mode 100755 index 00000000..bb569e2c --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/.gitignore @@ -0,0 +1,2 @@ +/x86_64_ingrasys_s9180_32x.mk +/doc diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/.module b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/.module new file mode 100755 index 00000000..88300c89 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/.module @@ -0,0 +1 @@ +name: x86_64_ingrasys_s9180_32x diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/Makefile b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/Makefile new file mode 100755 index 00000000..9e00628a --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/Makefile @@ -0,0 +1,10 @@ +############################################################ +# +# +# +############################################################ +include $(ONL)/make/config.mk + +MODULE := x86_64_ingrasys_s9180_32x +AUTOMODULE := x86_64_ingrasys_s9180_32x +include $(BUILDER)/definemodule.mk diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/auto/make.mk b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/auto/make.mk new file mode 100755 index 00000000..61665f86 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/auto/make.mk @@ -0,0 +1,7 @@ +# +# x86_64_ingrasys_s9180_32x Autogeneration +# +############################################################################### +x86-64-ingrasys-s9180-32x_AUTO_DEFS := module/auto/x86-64-ingrasys-s9180-32x.yml +x86-64-ingrasys-s9180-32x_AUTO_DIRS := module/inc/x86-64-ingrasys-s9180-32x module/src +include $(BUILDER)/auto.mk diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/auto/x86-64-ingrasys-s9180-32x.yml b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/auto/x86-64-ingrasys-s9180-32x.yml new file mode 100755 index 00000000..1c6422b4 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/auto/x86-64-ingrasys-s9180-32x.yml @@ -0,0 +1,47 @@ +############################################################################### +# +# X86_64_INGRASYS_S9180_32X Autogeneration Definitions. +# +############################################################################### + +cdefs: &cdefs +- X86_64_INGRASYS_S9180_32X_CONFIG_INCLUDE_LOGGING: + doc: "Include or exclude logging." + default: 1 +- X86_64_INGRASYS_S9180_32X_CONFIG_LOG_OPTIONS_DEFAULT: + doc: "Default enabled log options." + default: AIM_LOG_OPTIONS_DEFAULT +- X86_64_INGRASYS_S9180_32X_CONFIG_LOG_BITS_DEFAULT: + doc: "Default enabled log bits." + default: AIM_LOG_BITS_DEFAULT +- X86_64_INGRASYS_S9180_32X_CONFIG_LOG_CUSTOM_BITS_DEFAULT: + doc: "Default enabled custom log bits." + default: 0 +- X86_64_INGRASYS_S9180_32X_CONFIG_PORTING_STDLIB: + doc: "Default all porting macros to use the C standard libraries." + default: 1 +- X86_64_INGRASYS_S9180_32X_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS: + doc: "Include standard library headers for stdlib porting macros." + default: X86_64_INGRASYS_S9180_32X_CONFIG_PORTING_STDLIB +- X86_64_INGRASYS_S9180_32X_CONFIG_INCLUDE_UCLI: + doc: "Include generic uCli support." + default: 0 + + +definitions: + cdefs: + X86_64_INGRASYS_S9180_32X_CONFIG_HEADER: + defs: *cdefs + basename: x86_64_ingrasys_s9180_32x_config + + portingmacro: + X86_64_INGRASYS_S9180_32X: + macros: + - malloc + - free + - memset + - memcpy + - strncpy + - vsnprintf + - snprintf + - strlen diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/inc/x86_64_ingrasys_s9180_32x/x86_64_ingrasys_s9180_32x.x b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/inc/x86_64_ingrasys_s9180_32x/x86_64_ingrasys_s9180_32x.x new file mode 100755 index 00000000..e13d404c --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/inc/x86_64_ingrasys_s9180_32x/x86_64_ingrasys_s9180_32x.x @@ -0,0 +1,34 @@ +/************************************************************ + * + * + * Copyright 2014, 2015 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ + +#include + +/* <--auto.start.xmacro(ALL).define> */ +/* */ + +/* <--auto.start.xenum(ALL).define> */ +/* */ + + diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/inc/x86_64_ingrasys_s9180_32x/x86_64_ingrasys_s9180_32x_config.h b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/inc/x86_64_ingrasys_s9180_32x/x86_64_ingrasys_s9180_32x_config.h new file mode 100755 index 00000000..db82c6fc --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/inc/x86_64_ingrasys_s9180_32x/x86_64_ingrasys_s9180_32x_config.h @@ -0,0 +1,162 @@ +/************************************************************ + * + * + * Copyright 2014, 2015 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ + +/**************************************************************************//** + * + * @file + * @brief x86_64_ingrasys_s9180_32x Configuration Header + * + * @addtogroup x86_64_ingrasys_s9180_32x-config + * @{ + * + *****************************************************************************/ +#ifndef __X86_64_INGRAYSYS_S9180_32X_CONFIG_H__ +#define __X86_64_INGRAYSYS_S9180_32X_CONFIG_H__ + +#ifdef GLOBAL_INCLUDE_CUSTOM_CONFIG +#include +#endif +#ifdef X86_64_INGRAYSYS_S9180_32X_INCLUDE_CUSTOM_CONFIG +#include +#endif + +/* */ +#include +/** + * X86_64_INGRAYSYS_S9180_32X_CONFIG_INCLUDE_LOGGING + * + * Include or exclude logging. */ + + +#ifndef X86_64_INGRAYSYS_S9180_32X_CONFIG_INCLUDE_LOGGING +#define X86_64_INGRAYSYS_S9180_32X_CONFIG_INCLUDE_LOGGING 1 +#endif + +/** + * X86_64_INGRAYSYS_S9180_32X_CONFIG_LOG_OPTIONS_DEFAULT + * + * Default enabled log options. */ + + +#ifndef X86_64_INGRAYSYS_S9180_32X_CONFIG_LOG_OPTIONS_DEFAULT +#define X86_64_INGRAYSYS_S9180_32X_CONFIG_LOG_OPTIONS_DEFAULT AIM_LOG_OPTIONS_DEFAULT +#endif + +/** + * X86_64_INGRAYSYS_S9180_32X_CONFIG_LOG_BITS_DEFAULT + * + * Default enabled log bits. */ + + +#ifndef X86_64_INGRAYSYS_S9180_32X_CONFIG_LOG_BITS_DEFAULT +#define X86_64_INGRAYSYS_S9180_32X_CONFIG_LOG_BITS_DEFAULT AIM_LOG_BITS_DEFAULT +#endif + +/** + * X86_64_INGRAYSYS_S9180_32X_CONFIG_LOG_CUSTOM_BITS_DEFAULT + * + * Default enabled custom log bits. */ + + +#ifndef X86_64_INGRAYSYS_S9180_32X_CONFIG_LOG_CUSTOM_BITS_DEFAULT +#define X86_64_INGRAYSYS_S9180_32X_CONFIG_LOG_CUSTOM_BITS_DEFAULT 0 +#endif + +/** + * X86_64_INGRAYSYS_S9180_32X_CONFIG_PORTING_STDLIB + * + * Default all porting macros to use the C standard libraries. */ + + +#ifndef X86_64_INGRAYSYS_S9180_32X_CONFIG_PORTING_STDLIB +#define X86_64_INGRAYSYS_S9180_32X_CONFIG_PORTING_STDLIB 1 +#endif + +/** + * X86_64_INGRAYSYS_S9180_32X_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS + * + * Include standard library headers for stdlib porting macros. */ + + +#ifndef X86_64_INGRAYSYS_S9180_32X_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS +#define X86_64_INGRAYSYS_S9180_32X_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS X86_64_INGRAYSYS_S9180_32X_CONFIG_PORTING_STDLIB +#endif + +/** + * X86_64_INGRAYSYS_S9180_32X_CONFIG_INCLUDE_UCLI + * + * Include generic uCli support. */ + + +#ifndef X86_64_INGRAYSYS_S9180_32X_CONFIG_INCLUDE_UCLI +#define X86_64_INGRAYSYS_S9180_32X_CONFIG_INCLUDE_UCLI 0 +#endif + +/** + * X86_64_INGRAYSYS_S9180_32X_CONFIG_SFP_COUNT + * + * SFP Count. */ + + +#ifndef X86_64_INGRAYSYS_S9180_32X_CONFIG_SFP_COUNT +#define X86_64_INGRAYSYS_S9180_32X_CONFIG_SFP_COUNT 0 +#endif + + + +/** + * All compile time options can be queried or displayed + */ + +/** Configuration settings structure. */ +typedef struct x86_64_ingrasys_s9180_32x_config_settings_s { + /** name */ + const char* name; + /** value */ + const char* value; +} x86_64_ingrasys_s9180_32x_config_settings_t; + +/** Configuration settings table. */ +/** x86_64_ingrasys_s9180_32x_config_settings table. */ +extern x86_64_ingrasys_s9180_32x_config_settings_t x86_64_ingrasys_s9180_32x_config_settings[]; + +/** + * @brief Lookup a configuration setting. + * @param setting The name of the configuration option to lookup. + */ +const char* x86_64_ingrasys_s9180_32x_config_lookup(const char* setting); + +/** + * @brief Show the compile-time configuration. + * @param pvs The output stream. + */ +int x86_64_ingrasys_s9180_32x_config_show(struct aim_pvs_s* pvs); + +/* */ + +#include "x86_64_ingrasys_s9180_32x_porting.h" + +#endif /* __X86_64_INGRAYSYS_S9180_32X_CONFIG_H__ */ +/* @} */ diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/inc/x86_64_ingrasys_s9180_32x/x86_64_ingrasys_s9180_32x_dox.h b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/inc/x86_64_ingrasys_s9180_32x/x86_64_ingrasys_s9180_32x_dox.h new file mode 100755 index 00000000..76067c8e --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/inc/x86_64_ingrasys_s9180_32x/x86_64_ingrasys_s9180_32x_dox.h @@ -0,0 +1,26 @@ +/**************************************************************************//** + * + * x86_64_ingrasys_s9180_32x Doxygen Header + * + *****************************************************************************/ +#ifndef __x86_64_ingrasys_s9180_32x_DOX_H__ +#define __x86_64_ingrasys_s9180_32x_DOX_H__ + +/** + * @defgroup x86_64_ingrasys_s9180_32x x86_64_ingrasys_s9180_32x - x86_64_ingrasys_s9180_32x Description + * + +The documentation overview for this module should go here. + + * + * @{ + * + * @defgroup x86_64_ingrasys_s9180_32x-x86_64_ingrasys_s9180_32x Public Interface + * @defgroup x86_64_ingrasys_s9180_32x-config Compile Time Configuration + * @defgroup x86_64_ingrasys_s9180_32x-porting Porting Macros + * + * @} + * + */ + +#endif /* __x86_64_ingrasys_s9180_32x_DOX_H__ */ diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/inc/x86_64_ingrasys_s9180_32x/x86_64_ingrasys_s9180_32x_porting.h b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/inc/x86_64_ingrasys_s9180_32x/x86_64_ingrasys_s9180_32x_porting.h new file mode 100755 index 00000000..1d3fb7c1 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/inc/x86_64_ingrasys_s9180_32x/x86_64_ingrasys_s9180_32x_porting.h @@ -0,0 +1,106 @@ +/********************************************************//** + * + * @file + * @brief x86_64_Ingrasys_s9180_32x Porting Macros. + * + * @addtogroup x86_64_Ingrasys_s9180_32x-porting + * @{ + * + ***********************************************************/ +#ifndef __X86_64_INGRAYSYS_S9180_32X_PORTING_H__ +#define __X86_64_INGRAYSYS_S9180_32X_PORTING_H__ + +/* */ +#if X86_64_INGRAYSYS_S9180_32X_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS == 1 +#include +#include +#include +#include +#include +#endif + +#ifndef X86_64_INGRAYSYS_S9180_32X_MALLOC + #if defined(GLOBAL_MALLOC) + #define X86_64_INGRAYSYS_S9180_32X_MALLOC GLOBAL_MALLOC + #elif X86_64_INGRAYSYS_S9180_32X_CONFIG_PORTING_STDLIB == 1 + #define X86_64_INGRAYSYS_S9180_32X_MALLOC malloc + #else + #error The macro X86_64_INGRAYSYS_S9180_32X_MALLOC is required but cannot be defined. + #endif +#endif + +#ifndef X86_64_INGRAYSYS_S9180_32X_FREE + #if defined(GLOBAL_FREE) + #define X86_64_INGRAYSYS_S9180_32X_FREE GLOBAL_FREE + #elif X86_64_INGRAYSYS_S9180_32X_CONFIG_PORTING_STDLIB == 1 + #define X86_64_INGRAYSYS_S9180_32X_FREE free + #else + #error The macro X86_64_INGRAYSYS_S9180_32X_FREE is required but cannot be defined. + #endif +#endif + +#ifndef X86_64_INGRAYSYS_S9180_32X_MEMSET + #if defined(GLOBAL_MEMSET) + #define X86_64_INGRAYSYS_S9180_32X_MEMSET GLOBAL_MEMSET + #elif X86_64_INGRAYSYS_S9180_32X_CONFIG_PORTING_STDLIB == 1 + #define X86_64_INGRAYSYS_S9180_32X_MEMSET memset + #else + #error The macro X86_64_INGRAYSYS_S9180_32X_MEMSET is required but cannot be defined. + #endif +#endif + +#ifndef X86_64_INGRAYSYS_S9180_32X_MEMCPY + #if defined(GLOBAL_MEMCPY) + #define X86_64_INGRAYSYS_S9180_32X_MEMCPY GLOBAL_MEMCPY + #elif X86_64_INGRAYSYS_S9180_32X_CONFIG_PORTING_STDLIB == 1 + #define X86_64_INGRAYSYS_S9180_32X_MEMCPY memcpy + #else + #error The macro X86_64_INGRAYSYS_S9180_32X_MEMCPY is required but cannot be defined. + #endif +#endif + +#ifndef X86_64_INGRAYSYS_S9180_32X_STRNCPY + #if defined(GLOBAL_STRNCPY) + #define X86_64_INGRAYSYS_S9180_32X_STRNCPY GLOBAL_STRNCPY + #elif X86_64_INGRAYSYS_S9180_32X_CONFIG_PORTING_STDLIB == 1 + #define X86_64_INGRAYSYS_S9180_32X_STRNCPY strncpy + #else + #error The macro X86_64_INGRAYSYS_S9180_32X_STRNCPY is required but cannot be defined. + #endif +#endif + +#ifndef X86_64_INGRAYSYS_S9180_32X_VSNPRINTF + #if defined(GLOBAL_VSNPRINTF) + #define X86_64_INGRAYSYS_S9180_32X_VSNPRINTF GLOBAL_VSNPRINTF + #elif X86_64_INGRAYSYS_S9180_32X_CONFIG_PORTING_STDLIB == 1 + #define X86_64_INGRAYSYS_S9180_32X_VSNPRINTF vsnprintf + #else + #error The macro X86_64_INGRAYSYS_S9180_32X_VSNPRINTF is required but cannot be defined. + #endif +#endif + +#ifndef X86_64_INGRAYSYS_S9180_32X_SNPRINTF + #if defined(GLOBAL_SNPRINTF) + #define X86_64_INGRAYSYS_S9180_32X_SNPRINTF GLOBAL_SNPRINTF + #elif X86_64_INGRAYSYS_S9180_32X_CONFIG_PORTING_STDLIB == 1 + #define X86_64_INGRAYSYS_S9180_32X_SNPRINTF snprintf + #else + #error The macro X86_64_INGRAYSYS_S9180_32X_SNPRINTF is required but cannot be defined. + #endif +#endif + +#ifndef X86_64_INGRAYSYS_S9180_32X_STRLEN + #if defined(GLOBAL_STRLEN) + #define X86_64_INGRAYSYS_S9180_32X_STRLEN GLOBAL_STRLEN + #elif X86_64_INGRAYSYS_S9180_32X_CONFIG_PORTING_STDLIB == 1 + #define X86_64_INGRAYSYS_S9180_32X_STRLEN strlen + #else + #error The macro X86_64_INGRAYSYS_S9180_32X_STRLEN is required but cannot be defined. + #endif +#endif + +/* */ + + +#endif /* __X86_64_INGRAYSYS_S9180_32X_PORTING_H__ */ +/* @} */ diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/make.mk b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/make.mk new file mode 100755 index 00000000..03592ef6 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/make.mk @@ -0,0 +1,29 @@ +############################################################ +# +# +# Copyright 2014, 2015 Big Switch Networks, Inc. +# +# Licensed under the Eclipse Public License, Version 1.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.eclipse.org/legal/epl-v10.html +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +# either express or implied. See the License for the specific +# language governing permissions and limitations under the +# License. +# +# +############################################################ +# +# +# +############################################################ +THIS_DIR := $(dir $(lastword $(MAKEFILE_LIST))) +x86_64_ingrasys_s9180_32x_INCLUDES := -I $(THIS_DIR)inc +x86_64_ingrasys_s9180_32x_INTERNAL_INCLUDES := -I $(THIS_DIR)src +x86_64_ingrasys_s9180_32x_DEPENDMODULE_ENTRIES := init:x86_64_ingrasys_s9180_32x ucli:x86_64_ingrasys_s9180_32x + diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/Makefile b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/Makefile new file mode 100755 index 00000000..fe84a5d1 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/Makefile @@ -0,0 +1,30 @@ +############################################################ +# +# +# Copyright 2014, 2015 Big Switch Networks, Inc. +# +# Licensed under the Eclipse Public License, Version 1.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.eclipse.org/legal/epl-v10.html +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +# either express or implied. See the License for the specific +# language governing permissions and limitations under the +# License. +# +# +############################################################ +# +# Local source generation targets. +# +############################################################ + +include ../../../../init.mk + +ucli: + $(SUBMODULE_BIGCODE)/tools/uclihandlers.py x86_64_ingrasys_s9180_32x_ucli.c + diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/fani.c b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/fani.c new file mode 100755 index 00000000..a08c504d --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/fani.c @@ -0,0 +1,327 @@ +/************************************************************ + * + * + * Copyright 2014, 2015 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * Fan Platform Implementation Defaults. + * + ***********************************************************/ +#include +#include "x86_64_ingrasys_s9180_32x_int.h" +#include +#include +#include "platform_lib.h" + +onlp_fan_info_t fan_info[] = { + { }, /* Not used */ + { + { FAN_OID_FAN1, "FANTRAY 1-A", 0 }, + ONLP_FAN_STATUS_PRESENT, + ONLP_FAN_CAPS_GET_RPM | ONLP_FAN_CAPS_GET_PERCENTAGE, + 0, + 0, + ONLP_FAN_MODE_INVALID, + }, + { + { FAN_OID_FAN2, "FANTRAY 1-B", 0 }, + ONLP_FAN_STATUS_PRESENT, + ONLP_FAN_CAPS_GET_RPM | ONLP_FAN_CAPS_GET_PERCENTAGE, + 0, + 0, + ONLP_FAN_MODE_INVALID, + }, + { + { FAN_OID_FAN3, "FANTRAY 2-A", 0 }, + ONLP_FAN_STATUS_PRESENT, + ONLP_FAN_CAPS_GET_RPM | ONLP_FAN_CAPS_GET_PERCENTAGE, + 0, + 0, + ONLP_FAN_MODE_INVALID, + }, + { + { FAN_OID_FAN4, "FANTRAY 2-B", 0 }, + ONLP_FAN_STATUS_PRESENT, + ONLP_FAN_CAPS_GET_RPM | ONLP_FAN_CAPS_GET_PERCENTAGE, + 0, + 0, + ONLP_FAN_MODE_INVALID, + }, + { + { FAN_OID_FAN5, "FANTRAY 3-A", 0 }, + ONLP_FAN_STATUS_PRESENT, + ONLP_FAN_CAPS_GET_RPM | ONLP_FAN_CAPS_GET_PERCENTAGE, + 0, + 0, + ONLP_FAN_MODE_INVALID, + }, + { + { FAN_OID_FAN6, "FANTRAY 3-B", 0 }, + ONLP_FAN_STATUS_PRESENT, + ONLP_FAN_CAPS_GET_RPM | ONLP_FAN_CAPS_GET_PERCENTAGE, + 0, + 0, + ONLP_FAN_MODE_INVALID, + }, + { + { FAN_OID_FAN7, "FANTRAY 4-A", 0 }, + ONLP_FAN_STATUS_PRESENT, + ONLP_FAN_CAPS_GET_RPM | ONLP_FAN_CAPS_GET_PERCENTAGE, + 0, + 0, + ONLP_FAN_MODE_INVALID, + }, + { + { FAN_OID_FAN8, "FANTRAY 4-B", 0 }, + ONLP_FAN_STATUS_PRESENT, + ONLP_FAN_CAPS_GET_RPM | ONLP_FAN_CAPS_GET_PERCENTAGE, + 0, + 0, + ONLP_FAN_MODE_INVALID, + }, + { + { FAN_OID_PSU_FAN1, "PSU-1 FAN", 0 }, + ONLP_FAN_STATUS_PRESENT, + }, + { + { FAN_OID_PSU_FAN2, "PSU-2 FAN", 0 }, + ONLP_FAN_STATUS_PRESENT, + } +}; + +/* + * This function will be called prior to all of onlp_fani_* functions. + */ +int +onlp_fani_init(void) +{ + return ONLP_STATUS_OK; +} + +int sys_fan_present_get(onlp_fan_info_t* info, int id) +{ + int rv, fan_presence, i2c_bus, offset, fan_reg_mask; + + /* get fan presence*/ + i2c_bus = I2C_BUS_59; + switch (id) + { + case FAN_ID_FAN1: + case FAN_ID_FAN2: + offset = 1; + fan_reg_mask = FAN_1_2_PRESENT_MASK; + break; + case FAN_ID_FAN3: + case FAN_ID_FAN4: + offset = 1; + fan_reg_mask = FAN_3_4_PRESENT_MASK; + break; + case FAN_ID_FAN5: + case FAN_ID_FAN6: + offset = 0; + fan_reg_mask = FAN_5_6_PRESENT_MASK; + break; + case FAN_ID_FAN7: + case FAN_ID_FAN8: + offset = 0; + fan_reg_mask = FAN_7_8_PRESENT_MASK; + break; + default: + return ONLP_STATUS_E_INVALID; + } + + rv = onlp_i2c_readb(i2c_bus, FAN_GPIO_ADDR, offset, ONLP_I2C_F_FORCE); + if (rv < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + fan_presence = (rv & fan_reg_mask) ? 0 : 1; + + if (!fan_presence) { + info->status &= ~ONLP_FAN_STATUS_PRESENT; + } else { + info->status |= ONLP_FAN_STATUS_PRESENT; + } + + return ONLP_STATUS_OK; +} + +int +sys_fan_info_get(onlp_fan_info_t* info, int id) +{ + int rv, fan_status, fan_rpm, perc_val, percentage; + int max_fan_speed = 22000; + fan_status = 0; + fan_rpm = 0; + + rv = sys_fan_present_get(info, id); + if (rv < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + rv = onlp_file_read_int(&fan_status, SYS_FAN_PREFIX "fan%d_alarm", id); + if (rv < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + /* fan status > 1, means failure */ + if (fan_status > 0) { + info->status |= ONLP_FAN_STATUS_FAILED; + return ONLP_STATUS_OK; + } + + rv = onlp_file_read_int(&fan_rpm, SYS_FAN_PREFIX "fan%d_input", id); + if (rv < 0) { + return ONLP_STATUS_E_INTERNAL; + } + info->rpm = fan_rpm; + + /* get speed percentage*/ + switch (id) + { + case FAN_ID_FAN1: + case FAN_ID_FAN2: + case FAN_ID_FAN3: + case FAN_ID_FAN4: + rv = onlp_file_read_int(&perc_val, SYS_FAN_PREFIX "pwm%d", + FAN_CTRL_SET1); + break; + case FAN_ID_FAN5: + case FAN_ID_FAN6: + case FAN_ID_FAN7: + case FAN_ID_FAN8: + rv = onlp_file_read_int(&perc_val, SYS_FAN_PREFIX "pwm%d", + FAN_CTRL_SET2); + break; + default: + return ONLP_STATUS_E_INVALID; + } + if (rv < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + percentage = (info->rpm*100)/max_fan_speed; + info->percentage = percentage; + + return ONLP_STATUS_OK; +} + +int +sys_fan_rpm_percent_set(int perc) +{ + int rc; + rc = onlp_file_write_int(perc, SYS_FAN_PREFIX "pwm%d", FAN_CTRL_SET1); + + if (rc < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + rc = onlp_file_write_int(perc, SYS_FAN_PREFIX "pwm%d", FAN_CTRL_SET2); + if (rc < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + return ONLP_STATUS_OK; +} + +int +onlp_fani_rpm_set(onlp_oid_t id, int rpm) +{ + return ONLP_STATUS_E_UNSUPPORTED; +} + +/* + * This function sets the fan speed of the given OID as a percentage. + * + * This will only be called if the OID has the PERCENTAGE_SET + * capability. + * + * It is optional if you have no fans at all with this feature. + */ +int +onlp_fani_percentage_set(onlp_oid_t id, int percentage) +{ + int fid, perc_val, rc; + fid = ONLP_OID_ID_GET(id); + + /* + * Set fan speed + * Driver accept value in range between 128 and 255. + * Value 128 is 50%. + * Value 200 is 80%. + * Value 255 is 100%. + */ + if (percentage == 100) { + perc_val = 255; + } else if (percentage == 80) { + perc_val = 200; + } else if (percentage == 50) { + perc_val = 128; + } else { + return ONLP_STATUS_E_INVALID; + } + + switch (fid) + { + case FAN_ID_FAN1: + case FAN_ID_FAN2: + case FAN_ID_FAN3: + case FAN_ID_FAN4: + case FAN_ID_FAN5: + case FAN_ID_FAN6: + case FAN_ID_FAN7: + case FAN_ID_FAN8: + rc = sys_fan_rpm_percent_set(perc_val); + break; + default: + return ONLP_STATUS_E_INVALID; + } + return rc; +} + +int +onlp_fani_info_get(onlp_oid_t id, onlp_fan_info_t* rv) +{ + int fan_id ,rc; + + fan_id = ONLP_OID_ID_GET(id); + *rv = fan_info[fan_id]; + rv->caps |= ONLP_FAN_CAPS_GET_RPM; + + switch (fan_id) { + case FAN_ID_FAN1: + case FAN_ID_FAN2: + case FAN_ID_FAN3: + case FAN_ID_FAN4: + case FAN_ID_FAN5: + case FAN_ID_FAN6: + case FAN_ID_FAN7: + case FAN_ID_FAN8: + rc = sys_fan_info_get(rv, fan_id); + break; + case FAN_ID_PSU_FAN1: + case FAN_ID_PSU_FAN2: + rc = psu_fan_info_get(rv, fan_id); + break; + default: + return ONLP_STATUS_E_INTERNAL; + break; + } + + return rc; +} diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/ledi.c b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/ledi.c new file mode 100755 index 00000000..ea4bc275 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/ledi.c @@ -0,0 +1,228 @@ +/************************************************************ + * + * + * Copyright 2014, 2015 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + ***********************************************************/ +#include +#include +#include +#include +#include +#include + +#include "platform_lib.h" + +/* + * Get the information for the given LED OID. + */ +static onlp_led_info_t led_info[] = +{ + { }, /* Not used */ + { + { LED_OID_SYSTEM, "Chassis LED 1 (SYS LED)", 0 }, + ONLP_LED_STATUS_PRESENT, + ONLP_LED_CAPS_ON_OFF | ONLP_LED_CAPS_GREEN, + }, + { + { LED_OID_FAN, "Chassis LED 2 (FAN LED)", 0 }, + ONLP_LED_STATUS_PRESENT, + ONLP_LED_CAPS_ON_OFF | ONLP_LED_CAPS_ORANGE | + ONLP_LED_CAPS_GREEN | ONLP_LED_CAPS_AUTO, + }, + { + { LED_OID_PSU1, "Chassis LED 3 (PSU1 LED)", 0 }, + ONLP_LED_STATUS_PRESENT, + ONLP_LED_CAPS_ON_OFF | ONLP_LED_CAPS_ORANGE | + ONLP_LED_CAPS_GREEN | ONLP_LED_CAPS_AUTO, + }, + { + { LED_OID_PSU2, "Chassis LED 4 (PSU2 LED)", 0 }, + ONLP_LED_STATUS_PRESENT, + ONLP_LED_CAPS_ON_OFF | ONLP_LED_CAPS_ORANGE | + ONLP_LED_CAPS_GREEN | ONLP_LED_CAPS_AUTO, + }, + { + { LED_OID_FAN_TRAY1, "Rear LED 1 (FAN TRAY1 LED)", 0 }, + ONLP_LED_STATUS_PRESENT, + ONLP_LED_CAPS_ON_OFF | ONLP_LED_CAPS_ORANGE | + ONLP_LED_CAPS_GREEN | ONLP_LED_CAPS_AUTO, + }, + { + { LED_OID_FAN_TRAY2, "Rear LED 2 (FAN TRAY2 LED)", 0 }, + ONLP_LED_STATUS_PRESENT, + ONLP_LED_CAPS_ON_OFF | ONLP_LED_CAPS_ORANGE | + ONLP_LED_CAPS_GREEN | ONLP_LED_CAPS_AUTO, + }, + { + { LED_OID_FAN_TRAY3, "Rear LED 3 (FAN TRAY3 LED)", 0 }, + ONLP_LED_STATUS_PRESENT, + ONLP_LED_CAPS_ON_OFF | ONLP_LED_CAPS_ORANGE | + ONLP_LED_CAPS_GREEN | ONLP_LED_CAPS_AUTO, + }, + { + { LED_OID_FAN_TRAY4, "Rear LED 4 (FAN TRAY4 LED)", 0 }, + ONLP_LED_STATUS_PRESENT, + ONLP_LED_CAPS_ON_OFF | ONLP_LED_CAPS_ORANGE | + ONLP_LED_CAPS_GREEN | ONLP_LED_CAPS_AUTO, + } +}; + +extern int sys_fan_info_get(onlp_fan_info_t* info, int id); + +/* + * This function will be called prior to any other onlp_ledi_* functions. + */ +int +onlp_ledi_init(void) +{ + return ONLP_STATUS_OK; +} + +int +onlp_ledi_info_get(onlp_oid_t id, onlp_led_info_t* info) +{ + int led_id, pw_exist, pw_good, rc, psu_mask, fan_id; + int exist_offset, good_offset, i2c_bus; + onlp_fan_info_t fan_info; + + memset(&fan_info, 0, sizeof(onlp_fan_info_t)); + led_id = ONLP_OID_ID_GET(id); + + *info = led_info[led_id]; + + if (id == LED_OID_PSU1 || id == LED_OID_PSU2) { + + psu_mask = PSU_MUX_MASK; + + if (id == LED_OID_PSU1) { + i2c_bus = I2C_BUS_PSU1; + exist_offset = PSU1_PRESENT_OFFSET; + good_offset = PSU1_PWGOOD_OFFSET; + } else { + i2c_bus = I2C_BUS_PSU2; + exist_offset = PSU2_PRESENT_OFFSET; + good_offset = PSU2_PWGOOD_OFFSET; + } + /* check psu status */ + if ((rc = psu_present_get(&pw_exist, exist_offset, i2c_bus, psu_mask)) + != ONLP_STATUS_OK) { + return ONLP_STATUS_E_INTERNAL; + } + if ((rc = psu_pwgood_get(&pw_good, good_offset, i2c_bus, psu_mask)) + != ONLP_STATUS_OK) { + return ONLP_STATUS_E_INTERNAL; + } + /* psu not present */ + if (pw_exist != PSU_STATUS_PRESENT) { + info->status &= ~ONLP_LED_STATUS_ON; + info->mode = ONLP_LED_MODE_OFF; + } else if (pw_good != PSU_STATUS_POWER_GOOD) { + info->status |= ONLP_LED_STATUS_ON; + info->mode |= ONLP_LED_MODE_ORANGE; + } else { + info->status |= ONLP_LED_STATUS_ON; + info->mode |= ONLP_LED_MODE_GREEN; + } + } else if (id == LED_OID_FAN) { + info->status |= ONLP_LED_STATUS_ON; + info->mode |= ONLP_LED_MODE_GREEN; + for (fan_id=FAN_ID_FAN1; fan_id<=FAN_ID_FAN8; ++fan_id) { + rc = sys_fan_info_get(&fan_info, fan_id); + if (rc != ONLP_STATUS_OK || fan_info.status & ONLP_FAN_STATUS_FAILED) { + info->mode &= ~ONLP_LED_MODE_GREEN; + info->mode |= ONLP_LED_MODE_ORANGE; + break; + } + } + } else if (id == LED_OID_SYSTEM) { + info->status |= ONLP_LED_STATUS_ON; + info->mode |= ONLP_LED_MODE_GREEN; + } else { + info->status |= ONLP_LED_STATUS_ON; + info->mode |= ONLP_LED_MODE_ON; + } + + return ONLP_STATUS_OK; +} + +/* + * Turn an LED on or off. + * + * This function will only be called if the LED OID supports the ONOFF + * capability. + * + * What 'on' means in terms of colors or modes for multimode LEDs is + * up to the platform to decide. This is intended as baseline toggle mechanism. + */ +int +onlp_ledi_set(onlp_oid_t id, int on_or_off) +{ + if (!on_or_off) { + return onlp_ledi_mode_set(id, ONLP_LED_MODE_OFF); + } + + return ONLP_STATUS_E_UNSUPPORTED; +} + +/* + * This function puts the LED into the given mode. It is a more functional + * interface for multimode LEDs. + * + * Only modes reported in the LED's capabilities will be attempted. + */ +int +onlp_ledi_mode_set(onlp_oid_t id, onlp_led_mode_t mode) +{ + int led_id, rc; + + led_id = ONLP_OID_ID_GET(id); + switch (led_id) { + case LED_SYSTEM_LED: + rc = system_led_set(mode); + break; + case LED_FAN_LED: + rc = fan_led_set(mode); + break; + case LED_PSU1_LED: + rc = psu1_led_set(mode); + break; + case LED_PSU2_LED: + rc = psu2_led_set(mode); + break; + case LED_FAN_TRAY1: + case LED_FAN_TRAY2: + case LED_FAN_TRAY3: + case LED_FAN_TRAY4: + rc = fan_tray_led_set(id, mode); + break; + default: + return ONLP_STATUS_E_INTERNAL; + break; + } + + return rc; +} + +int +onlp_ledi_ioctl(onlp_oid_t id, va_list vargs) +{ + return ONLP_STATUS_OK; +} diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/make.mk b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/make.mk new file mode 100755 index 00000000..e34082b6 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/make.mk @@ -0,0 +1,29 @@ +############################################################ +# +# +# Copyright 2014, 2015 Big Switch Networks, Inc. +# +# Licensed under the Eclipse Public License, Version 1.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.eclipse.org/legal/epl-v10.html +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +# either express or implied. See the License for the specific +# language governing permissions and limitations under the +# License. +# +# +############################################################ +# +# +# +############################################################ + +LIBRARY := x86_64_ingrasys_s9180_32x +$(LIBRARY)_SUBDIR := $(dir $(lastword $(MAKEFILE_LIST))) +#$(LIBRARY)_LAST := 1 +include $(BUILDER)/lib.mk diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/platform_lib.c b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/platform_lib.c new file mode 100755 index 00000000..aedcd64a --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/platform_lib.c @@ -0,0 +1,624 @@ +/************************************************************ + * + * + * Copyright 2014 Big Switch Networks, Inc. + * Copyright 2013 Accton Technology Corporation. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "platform_lib.h" + +int +psu_thermal_get(onlp_thermal_info_t* info, int thermal_id) +{ + int pw_exist, pw_good, exist_offset, good_offset; + int offset, i2c_bus, rc; + int value, buf, psu_mask; + unsigned int y_value = 0; + unsigned char n_value = 0; + unsigned int temp = 0; + char result[32]; + + if (thermal_id == THERMAL_ID_PSU1_1) { + i2c_bus = I2C_BUS_PSU1; + offset = PSU_THERMAL1_OFFSET; + exist_offset = PSU1_PRESENT_OFFSET; + good_offset = PSU1_PWGOOD_OFFSET; + } else if (thermal_id == THERMAL_ID_PSU1_2) { + i2c_bus = I2C_BUS_PSU1; + offset = PSU_THERMAL2_OFFSET; + exist_offset = PSU1_PRESENT_OFFSET; + good_offset = PSU1_PWGOOD_OFFSET; + } else if (thermal_id == THERMAL_ID_PSU2_1) { + i2c_bus = I2C_BUS_PSU2; + offset = PSU_THERMAL1_OFFSET; + exist_offset = PSU2_PRESENT_OFFSET; + good_offset = PSU2_PWGOOD_OFFSET; + } else if (thermal_id == THERMAL_ID_PSU2_2) { + i2c_bus = I2C_BUS_PSU2; + offset = PSU_THERMAL2_OFFSET; + exist_offset = PSU2_PRESENT_OFFSET; + good_offset = PSU2_PWGOOD_OFFSET; + } + + psu_mask = PSU_MUX_MASK; + + /* check psu status */ + if ((rc = psu_present_get(&pw_exist, exist_offset, I2C_BUS_0, psu_mask)) + != ONLP_STATUS_OK) { + return ONLP_STATUS_E_INTERNAL; + } + + if (pw_exist != PSU_STATUS_PRESENT) { + info->mcelsius = 0; + info->status &= ~ONLP_THERMAL_STATUS_PRESENT; + return ONLP_STATUS_OK; + } else { + info->status |= ONLP_THERMAL_STATUS_PRESENT; + } + + if ((rc = psu_pwgood_get(&pw_good, good_offset, I2C_BUS_0, psu_mask)) + != ONLP_STATUS_OK) { + return ONLP_STATUS_E_INTERNAL; + } + + if (pw_good != PSU_STATUS_POWER_GOOD) { + info->mcelsius = 0; + return ONLP_STATUS_OK; + } + + value = onlp_i2c_readw(i2c_bus, PSU_REG, offset, ONLP_I2C_F_FORCE); + + y_value = (value & 0x07FF); + if ((value & 0x8000)&&(y_value)) { + n_value = 0xF0 + (((value) >> 11) & 0x0F); + n_value = (~n_value) +1; + temp = (unsigned int)(1<> 11) & 0x0F); + snprintf(result, sizeof(result), "%d", (y_value*(1<mcelsius = (int)(buf * 1000); + + return ONLP_STATUS_OK; +} + +int +psu_fan_info_get(onlp_fan_info_t* info, int id) +{ + int pw_exist, pw_good, exist_offset, good_offset; + int i2c_bus, psu_mask, rc; + unsigned int tmp_fan_rpm, fan_rpm; + + if (id == FAN_ID_PSU_FAN1) { + i2c_bus = I2C_BUS_PSU1; + exist_offset = PSU1_PRESENT_OFFSET; + good_offset = PSU1_PWGOOD_OFFSET; + } else if (id == FAN_ID_PSU_FAN2) { + i2c_bus = I2C_BUS_PSU2; + exist_offset = PSU2_PRESENT_OFFSET; + good_offset = PSU2_PWGOOD_OFFSET; + } else { + return ONLP_STATUS_E_INTERNAL; + } + + psu_mask = PSU_MUX_MASK; + + /* check psu status */ + if ((rc = psu_present_get(&pw_exist, exist_offset, I2C_BUS_0, psu_mask)) + != ONLP_STATUS_OK) { + return ONLP_STATUS_E_INTERNAL; + } + + if (pw_exist != PSU_STATUS_PRESENT) { + info->rpm = 0; + info->status &= ~ONLP_FAN_STATUS_PRESENT; + return ONLP_STATUS_OK; + } else { + info->status |= ONLP_FAN_STATUS_PRESENT; + } + + if ((rc = psu_pwgood_get(&pw_good, good_offset, I2C_BUS_0, psu_mask)) + != ONLP_STATUS_OK) { + return ONLP_STATUS_E_INTERNAL; + } + + if (pw_good != PSU_STATUS_POWER_GOOD) { + info->rpm = 0; + return ONLP_STATUS_OK; + } + + tmp_fan_rpm = onlp_i2c_readw(i2c_bus, PSU_REG, PSU_FAN_RPM_OFFSET, ONLP_I2C_F_FORCE); + + fan_rpm = (unsigned int)tmp_fan_rpm; + fan_rpm = (fan_rpm & 0x07FF) * (1 << ((fan_rpm >> 11) & 0x1F)); + info->rpm = (int)fan_rpm; + + return ONLP_STATUS_OK; +} + +int +psu_vout_get(onlp_psu_info_t* info, int i2c_bus) +{ + int v_value = 0; + int n_value = 0; + unsigned int temp = 0; + char result[32]; + double dvalue; + memset(result, 0, sizeof(result)); + + n_value = onlp_i2c_readb(i2c_bus, PSU_REG, PSU_VOUT_OFFSET1, ONLP_I2C_F_FORCE); + if (n_value < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + v_value = onlp_i2c_readw(i2c_bus, PSU_REG, PSU_VOUT_OFFSET2, ONLP_I2C_F_FORCE); + if (v_value < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + if (n_value & 0x10) { + n_value = 0xF0 + (n_value & 0x0F); + n_value = (~n_value) +1; + temp = (unsigned int)(1< 0.0) { + info->caps |= ONLP_PSU_CAPS_VOUT; + info->mvout = (int)(dvalue * 1000); + } + + return ONLP_STATUS_OK; +} + +int +psu_iout_get(onlp_psu_info_t* info, int i2c_bus) +{ + int value; + unsigned int y_value = 0; + unsigned char n_value = 0; + unsigned int temp = 0; + char result[32]; + memset(result, 0, sizeof(result)); + double dvalue; + + value = onlp_i2c_readw(i2c_bus, PSU_REG, PSU_IOUT_OFFSET, ONLP_I2C_F_FORCE); + if (value < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + y_value = (value & 0x07FF); + if ((value & 0x8000)&&(y_value)) + { + n_value = 0xF0 + (((value) >> 11) & 0x0F); + n_value = (~n_value) +1; + temp = (unsigned int)(1<> 11) & 0x0F); + snprintf(result, sizeof(result), "%d", (y_value*(1< 0.0) { + info->caps |= ONLP_PSU_CAPS_IOUT; + info->miout = (int)(dvalue * 1000); + } + + return ONLP_STATUS_OK; +} + +int +psu_pout_get(onlp_psu_info_t* info, int i2c_bus) +{ + int value; + unsigned int y_value = 0; + unsigned char n_value = 0; + unsigned int temp = 0; + char result[32]; + memset(result, 0, sizeof(result)); + double dvalue; + + value = onlp_i2c_readw(i2c_bus, PSU_REG, PSU_POUT_OFFSET, ONLP_I2C_F_FORCE); + if (value < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + y_value = (value & 0x07FF); + if ((value & 0x8000)&&(y_value)) + { + n_value = 0xF0 + (((value) >> 11) & 0x0F); + n_value = (~n_value) +1; + temp = (unsigned int)(1<> 11) & 0x0F); + snprintf(result, sizeof(result), "%d", (y_value*(1< 0.0) { + info->caps |= ONLP_PSU_CAPS_POUT; + info->mpout = (int)(dvalue * 1000); + } + + return ONLP_STATUS_OK; +} + +int +psu_pin_get(onlp_psu_info_t* info, int i2c_bus) +{ + int value; + unsigned int y_value = 0; + unsigned char n_value = 0; + unsigned int temp = 0; + char result[32]; + memset(result, 0, sizeof(result)); + double dvalue; + + value = onlp_i2c_readw(i2c_bus, PSU_REG, PSU_PIN_OFFSET, ONLP_I2C_F_FORCE); + if (value < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + y_value = (value & 0x07FF); + if ((value & 0x8000)&&(y_value)) + { + n_value = 0xF0 + (((value) >> 11) & 0x0F); + n_value = (~n_value) +1; + temp = (unsigned int)(1<> 11) & 0x0F); + snprintf(result, sizeof(result), "%d", (y_value*(1< 0.0) { + info->caps |= ONLP_PSU_CAPS_PIN; + info->mpin = (int)(dvalue * 1000); + } + + return ONLP_STATUS_OK; +} + +int +psu_eeprom_get(onlp_psu_info_t* info, int id) +{ + uint8_t data[256]; + char eeprom_path[128]; + int data_len, i, rc; + memset(data, 0, sizeof(data)); + memset(eeprom_path, 0, sizeof(eeprom_path)); + + if (id == PSU_ID_PSU1) { + rc = onlp_file_read(data, sizeof(data), &data_len, PSU1_EEPROM_PATH); + } else { + rc = onlp_file_read(data, sizeof(data), &data_len, PSU2_EEPROM_PATH); + } + + if (rc == ONLP_STATUS_OK) + { + i = 11; + + /* Manufacturer Name */ + data_len = (data[i]&0x0f); + i++; + i += data_len; + + /* Product Name */ + data_len = (data[i]&0x0f); + i++; + memcpy(info->model, (char *) &(data[i]), data_len); + i += data_len; + + /* Product part,model number */ + data_len = (data[i]&0x0f); + i++; + i += data_len; + + /* Product Version */ + data_len = (data[i]&0x0f); + i++; + i += data_len; + + /* Product Serial Number */ + data_len = (data[i]&0x0f); + i++; + memcpy(info->serial, (char *) &(data[i]), data_len); + } else { + strcpy(info->model, "Missing"); + strcpy(info->serial, "Missing"); + } + + return ONLP_STATUS_OK; +} + + +int +psu_present_get(int *pw_exist, int exist_offset, int i2c_bus, int psu_mask) +{ + int psu_pres; + + psu_pres = onlp_i2c_readb(i2c_bus, PSU_STATE_REG, 0x0, + ONLP_I2C_F_FORCE); + if (psu_pres < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + *pw_exist = (((psu_pres >> exist_offset) & psu_mask) ? 0 : 1); + return ONLP_STATUS_OK; +} + +int +psu_pwgood_get(int *pw_good, int good_offset, int i2c_bus, int psu_mask) +{ + int psu_pwgood; + + psu_pwgood = onlp_i2c_readb(i2c_bus, PSU_STATE_REG, 0x0, + ONLP_I2C_F_FORCE); + if (psu_pwgood < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + *pw_good = (((psu_pwgood >> good_offset) & psu_mask) ? 1 : 0); + return ONLP_STATUS_OK; +} + +int +qsfp_present_get(int port, int *pres_val) +{ + int reg_addr, val, offset; + + if (port >= 1 && port <= 8) { + reg_addr = QSFP_PRES_REG1; + offset = QSFP_PRES_OFFSET1; + } else if (port >= 9 && port <= 16) { + reg_addr = QSFP_PRES_REG1; + offset = QSFP_PRES_OFFSET2; + } else if (port >= 17 && port <= 24) { + reg_addr = QSFP_PRES_REG2; + offset = QSFP_PRES_OFFSET1; + } else if (port >= 25 && port <= 32) { + reg_addr = QSFP_PRES_REG2; + offset = QSFP_PRES_OFFSET2; + } else { + return ONLP_STATUS_E_INTERNAL; + } + + val = onlp_i2c_readb(I2C_BUS_6, reg_addr, offset, ONLP_I2C_F_FORCE); + if (val < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + *pres_val = val; + + return ONLP_STATUS_OK; +} + + +int +system_led_set(onlp_led_mode_t mode) +{ + int rc; + if(mode == ONLP_LED_MODE_GREEN) { + rc = onlp_i2c_modifyb(I2C_BUS_50, LED_REG, LED_OFFSET, LED_SYS_AND_MASK, + LED_SYS_GMASK, ONLP_I2C_F_FORCE); + } + else if(mode == ONLP_LED_MODE_ORANGE) { + rc = onlp_i2c_modifyb(I2C_BUS_50, LED_REG, LED_OFFSET, LED_SYS_AND_MASK, + LED_SYS_YMASK, ONLP_I2C_F_FORCE); + } else { + return ONLP_STATUS_E_INTERNAL; + } + + if (rc < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + return ONLP_STATUS_OK; +} + +int +fan_led_set(onlp_led_mode_t mode) +{ + int rc; + + if(mode == ONLP_LED_MODE_GREEN ) { + rc = onlp_i2c_modifyb(I2C_BUS_50, LED_REG, LED_OFFSET, LED_FAN_AND_MASK, + LED_FAN_GMASK, ONLP_I2C_F_FORCE); + } + else if(mode == ONLP_LED_MODE_ORANGE) { + rc = onlp_i2c_modifyb(I2C_BUS_50, LED_REG, LED_OFFSET, LED_FAN_AND_MASK, + LED_FAN_YMASK, ONLP_I2C_F_FORCE); + } else { + return ONLP_STATUS_E_INTERNAL; + } + + if (rc < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + return ONLP_STATUS_OK; +} + +int +psu1_led_set(onlp_led_mode_t mode) +{ + int rc; + if(mode == ONLP_LED_MODE_GREEN) { + rc = onlp_i2c_modifyb(I2C_BUS_50, LED_REG, LED_PWOK_OFFSET, + LED_PSU1_ON_AND_MASK, LED_PSU1_ON_OR_MASK, + ONLP_I2C_F_FORCE); + rc = onlp_i2c_modifyb(I2C_BUS_50, LED_REG, LED_OFFSET, + LED_PSU1_AND_MASK, LED_PSU1_GMASK, + ONLP_I2C_F_FORCE); + } else if(mode == ONLP_LED_MODE_ORANGE) { + rc = onlp_i2c_modifyb(I2C_BUS_50, LED_REG, LED_PWOK_OFFSET, + LED_PSU1_ON_AND_MASK, LED_PSU1_ON_OR_MASK, + ONLP_I2C_F_FORCE); + rc = onlp_i2c_modifyb(I2C_BUS_50, LED_REG, LED_OFFSET, + LED_PSU1_AND_MASK, LED_PSU1_YMASK, + ONLP_I2C_F_FORCE); + } else if(mode == ONLP_LED_MODE_OFF) { + rc = onlp_i2c_modifyb(I2C_BUS_50, LED_REG, LED_PWOK_OFFSET, + LED_PSU1_OFF_AND_MASK, LED_PSU1_OFF_OR_MASK, + ONLP_I2C_F_FORCE); + } else { + return ONLP_STATUS_E_INTERNAL; + } + + if (rc < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + return ONLP_STATUS_OK; +} + +int +psu2_led_set(onlp_led_mode_t mode) +{ + int rc; + if(mode == ONLP_LED_MODE_GREEN) { + rc = onlp_i2c_modifyb(I2C_BUS_50, LED_REG, LED_PWOK_OFFSET, + LED_PSU2_ON_AND_MASK, LED_PSU2_ON_OR_MASK, + ONLP_I2C_F_FORCE); + rc = onlp_i2c_modifyb(I2C_BUS_50, LED_REG, LED_OFFSET, + LED_PSU2_AND_MASK, LED_PSU2_GMASK, + ONLP_I2C_F_FORCE); + } else if(mode == ONLP_LED_MODE_ORANGE) { + rc = onlp_i2c_modifyb(I2C_BUS_50, LED_REG, LED_PWOK_OFFSET, + LED_PSU2_ON_AND_MASK, LED_PSU2_ON_OR_MASK, + ONLP_I2C_F_FORCE); + rc = onlp_i2c_modifyb(I2C_BUS_50, LED_REG, LED_OFFSET, + LED_PSU2_AND_MASK, LED_PSU2_YMASK, + ONLP_I2C_F_FORCE); + } else if(mode == ONLP_LED_MODE_OFF) { + rc = onlp_i2c_modifyb(I2C_BUS_50, LED_REG, LED_PWOK_OFFSET, + LED_PSU2_OFF_AND_MASK, LED_PSU2_OFF_OR_MASK, + ONLP_I2C_F_FORCE); + } else { + return ONLP_STATUS_E_INTERNAL; + } + + + if (rc < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + return ONLP_STATUS_OK; +} + +int +fan_tray_led_set(onlp_oid_t id, onlp_led_mode_t mode) +{ + int rc, temp_id; + int fan_tray_id, offset; + + temp_id = ONLP_OID_ID_GET(id); + switch (temp_id) { + case 5: + fan_tray_id = 1; + offset = 2; + break; + case 6: + fan_tray_id = 2; + offset = 2; + break; + case 7: + fan_tray_id = 3; + offset = 3; + break; + case 8: + fan_tray_id = 4; + offset = 3; + break; + default: + return ONLP_STATUS_E_INTERNAL; + break; + } + if (fan_tray_id == 1 || fan_tray_id == 3) { + if (mode == ONLP_LED_MODE_GREEN) { + rc = onlp_i2c_modifyb(I2C_BUS_59, FAN_GPIO_ADDR, offset, 0xFC, + 0x02, ONLP_I2C_F_FORCE); + } else if (mode == ONLP_LED_MODE_ORANGE) { + rc = onlp_i2c_modifyb(I2C_BUS_59, FAN_GPIO_ADDR, offset, 0xFC, + 0x01, ONLP_I2C_F_FORCE); + } + } else if (fan_tray_id == 2 || fan_tray_id == 4) { + if (mode == ONLP_LED_MODE_GREEN) { + rc = onlp_i2c_modifyb(I2C_BUS_59, FAN_GPIO_ADDR, offset, 0xCF, + 0x20, ONLP_I2C_F_FORCE); + } else if (mode == ONLP_LED_MODE_ORANGE) { + rc = onlp_i2c_modifyb(I2C_BUS_59, FAN_GPIO_ADDR, offset, 0xCF, + 0x10, ONLP_I2C_F_FORCE); + } + } + if (rc < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + return ONLP_STATUS_OK; +} + +int +sysi_platform_info_get(onlp_platform_info_t* pi) +{ + int cpld_release, cpld_version, cpld_rev; + + cpld_rev = onlp_i2c_readb(I2C_BUS_44, CPLD_REG, CPLD_VER_OFFSET, ONLP_I2C_F_FORCE); + if (cpld_rev < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + cpld_release = (((cpld_rev) >> 6 & 0x01)); + cpld_version = (((cpld_rev) & 0x3F)); + + pi->cpld_versions = aim_fstrdup( + "CPLD is %d version(0:RD 1:Release), Revision is 0x%02x\n", + cpld_release, cpld_version); + + return ONLP_STATUS_OK; +} diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/platform_lib.h b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/platform_lib.h new file mode 100755 index 00000000..e7bde010 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/platform_lib.h @@ -0,0 +1,306 @@ +/************************************************************ + * + * + * Copyright 2014 Big Switch Networks, Inc. + * Copyright 2013 Accton Technology Corporation. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ +#ifndef __PLATFORM_LIB_H__ +#define __PLATFORM_LIB_H__ + +#include +#include +#include +#include +#include +#include "x86_64_ingrasys_s9180_32x_int.h" +#include "x86_64_ingrasys_s9180_32x_log.h" + +#include +#define SYS_CPU_TEMP_PREFIX "/sys/class/hwmon/hwmon0/" +#define SYS_CORE_TEMP_PREFIX "/sys/class/hwmon/hwmon2/" +#define SYS_CPU_BOARD_TEMP_PREFIX "/sys/class/hwmon/hwmon3/" +#define SYS_PSU1_NEAR_TEMP_PREFIX "/sys/class/hwmon/hwmon4/" +#define SYS_PSU2_NEAR_TEMP_PREFIX "/sys/class/hwmon/hwmon7/" +#define SYS_MAC_REAR_TEMP_PREFIX "/sys/class/hwmon/hwmon5/" +#define SYS_QSFP_NEAR_TEMP_PREFIX "/sys/class/hwmon/hwmon6/" +#define SYS_FAN_PREFIX "/sys/class/hwmon/hwmon1/device/" +#define SYS_EEPROM_PATH "/sys/bus/i2c/devices/0-0055/eeprom" +#define PSU1_EEPROM_PATH "/sys/bus/i2c/devices/58-0050/eeprom" +#define PSU2_EEPROM_PATH "/sys/bus/i2c/devices/57-0050/eeprom" +#define PSU_STATUS_PRESENT 1 +#define PSU_STATUS_POWER_GOOD 1 +#define FAN_PRESENT 0 +#define FAN_CTRL_SET1 1 +#define FAN_CTRL_SET2 2 +#define MAX_SYS_FAN_NUM 8 +#define BOARD_THERMAL_NUM 6 +#define SYS_FAN_NUM 8 +//#define QSFP_NUM 32 +#define PORT_NUM 34 + +#define THERMAL_NUM 15 +#define LED_NUM 4 +#define FAN_NUM 10 + + + +#define THERMAL_SHUTDOWN_DEFAULT 105000 + +#define THERMAL_ERROR_DEFAULT 95000 +#define THERMAL_ERROR_FAN_PERC 100 + +#define THERMAL_WARNING_DEFAULT 77000 +#define THERMAL_WARNING_FAN_PERC 80 + +#define THERMAL_NORMAL_DEFAULT 72000 +#define THERMAL_NORMAL_FAN_PERC 50 + +/* I2C bus */ +#define I2C_BUS_0 0 +#define I2C_BUS_1 1 +#define I2C_BUS_2 2 +#define I2C_BUS_3 3 +#define I2C_BUS_4 4 +#define I2C_BUS_5 5 +#define I2C_BUS_6 6 +#define I2C_BUS_7 7 +#define I2C_BUS_8 8 +#define I2C_BUS_9 9 +#define I2C_BUS_44 44 /* cpld */ +#define I2C_BUS_50 50 /* SYS LED */ +#define I2C_BUS_57 (57) /* PSU2 */ +#define I2C_BUS_58 (58) /* PSU1 */ +#define I2C_BUS_59 59 /* FRU */ + +#define I2C_BUS_PSU1 I2C_BUS_58 /* PSU1 */ +#define I2C_BUS_PSU2 I2C_BUS_57 /* PSU2 */ + +/* PSU */ +#define PSU_MUX_MASK 0x01 + +#define PSU_THERMAL1_OFFSET 0x8D +#define PSU_THERMAL2_OFFSET 0x8E +#define PSU_THERMAL_REG 0x58 +#define PSU_FAN_RPM_REG 0x58 +#define PSU_FAN_RPM_OFFSET 0x90 +#define PSU_REG 0x58 +#define PSU_VOUT_OFFSET1 0x20 +#define PSU_VOUT_OFFSET2 0x8B +#define PSU_IOUT_OFFSET 0x8C +#define PSU_POUT_OFFSET 0x96 +#define PSU_PIN_OFFSET 0x97 + +#define PSU_STATE_REG 0x25 +#define PSU1_PRESENT_OFFSET 0x04 +#define PSU2_PRESENT_OFFSET 0x01 +#define PSU1_PWGOOD_OFFSET 0x03 +#define PSU2_PWGOOD_OFFSET 0x00 + +/* LED */ +#define LED_REG 0x75 +#define LED_OFFSET 0x02 +#define LED_PWOK_OFFSET 0x03 + +#define LED_SYS_AND_MASK 0xFE +#define LED_SYS_GMASK 0x01 +#define LED_SYS_YMASK 0x00 + +#define LED_FAN_AND_MASK 0xF9 +#define LED_FAN_GMASK 0x02 +#define LED_FAN_YMASK 0x06 + +#define LED_PSU2_AND_MASK 0xEF +#define LED_PSU2_GMASK 0x00 +#define LED_PSU2_YMASK 0x10 +#define LED_PSU2_ON_AND_MASK 0xFD +#define LED_PSU2_ON_OR_MASK 0x02 +#define LED_PSU2_OFF_AND_MASK 0xFD +#define LED_PSU2_OFF_OR_MASK 0x00 +#define LED_PSU1_AND_MASK 0xF7 +#define LED_PSU1_GMASK 0x00 +#define LED_PSU1_YMASK 0x08 +#define LED_PSU1_ON_AND_MASK 0xFE +#define LED_PSU1_ON_OR_MASK 0x01 +#define LED_PSU1_OFF_AND_MASK 0xFE +#define LED_PSU1_OFF_OR_MASK 0x00 +#define LED_SYS_ON_MASK 0x00 +#define LED_SYS_OFF_MASK 0x33 + +/* SYS */ +#define CPLD_REG 0x33 +#define CPLD_VER_OFFSET 0x01 + +/* QSFP */ +#define QSFP_PRES_REG1 0x20 +#define QSFP_PRES_REG2 0x21 +#define QSFP_PRES_OFFSET1 0x00 +#define QSFP_PRES_OFFSET2 0x01 + +/* FAN */ +#define FAN_GPIO_ADDR 0x20 +#define FAN_1_2_PRESENT_MASK 0x40 +#define FAN_3_4_PRESENT_MASK 0x04 +#define FAN_5_6_PRESENT_MASK 0x40 +#define FAN_7_8_PRESENT_MASK 0x04 + +/** led_oid */ +typedef enum led_oid_e { + LED_OID_SYSTEM = ONLP_LED_ID_CREATE(1), + LED_OID_FAN = ONLP_LED_ID_CREATE(2), + LED_OID_PSU1 = ONLP_LED_ID_CREATE(3), + LED_OID_PSU2 = ONLP_LED_ID_CREATE(4), + LED_OID_FAN_TRAY1 = ONLP_LED_ID_CREATE(5), + LED_OID_FAN_TRAY2 = ONLP_LED_ID_CREATE(6), + LED_OID_FAN_TRAY3 = ONLP_LED_ID_CREATE(7), + LED_OID_FAN_TRAY4 = ONLP_LED_ID_CREATE(8), +} led_oid_t; + +/** led_id */ +typedef enum led_id_e { + LED_SYSTEM_LED = 1, + LED_FAN_LED = 2, + LED_PSU1_LED = 3, + LED_PSU2_LED = 4, + LED_FAN_TRAY1 = 5, + LED_FAN_TRAY2 = 6, + LED_FAN_TRAY3 = 7, + LED_FAN_TRAY4 = 8, +} led_id_t; + +/** Thermal_oid */ +typedef enum thermal_oid_e { + THERMAL_OID_FRONT_MAC = ONLP_THERMAL_ID_CREATE(1), + THERMAL_OID_ASIC = ONLP_THERMAL_ID_CREATE(2), + THERMAL_OID_CPU1 = ONLP_THERMAL_ID_CREATE(3), + THERMAL_OID_CPU2 = ONLP_THERMAL_ID_CREATE(4), + THERMAL_OID_CPU3 = ONLP_THERMAL_ID_CREATE(5), + THERMAL_OID_CPU4 = ONLP_THERMAL_ID_CREATE(6), + THERMAL_OID_PSU1_1 = ONLP_THERMAL_ID_CREATE(7), + THERMAL_OID_PSU1_2 = ONLP_THERMAL_ID_CREATE(8), + THERMAL_OID_PSU2_1 = ONLP_THERMAL_ID_CREATE(9), + THERMAL_OID_PSU2_2 = ONLP_THERMAL_ID_CREATE(10), + THERMAL_OID_CPU_BOARD = ONLP_THERMAL_ID_CREATE(11), + THERMAL_OID_PSU1_NEAR = ONLP_THERMAL_ID_CREATE(12), + THERMAL_OID_PSU2_NEAR = ONLP_THERMAL_ID_CREATE(13), + THERMAL_OID_MAC_REAR = ONLP_THERMAL_ID_CREATE(14), + THERMAL_OID_QSFP_NEAR = ONLP_THERMAL_ID_CREATE(15), +} thermal_oid_t; + +/** thermal_id */ +typedef enum thermal_id_e { + THERMAL_ID_FRONT_MAC = 1, + THERMAL_ID_ASIC = 2, + THERMAL_ID_CPU1 = 3, + THERMAL_ID_CPU2 = 4, + THERMAL_ID_CPU3 = 5, + THERMAL_ID_CPU4 = 6, + THERMAL_ID_PSU1_1 = 7, + THERMAL_ID_PSU1_2 = 8, + THERMAL_ID_PSU2_1 = 9, + THERMAL_ID_PSU2_2 = 10, + THERMAL_ID_CPU_BOARD = 11, + THERMAL_ID_PSU1_NEAR = 12, + THERMAL_ID_PSU2_NEAR = 13, + THERMAL_ID_MAC_REAR = 14, + THERMAL_ID_QSFP_NEAR = 15, +} thermal_id_t; + +/* Shortcut for CPU thermal threshold value. */ +#define THERMAL_THRESHOLD_INIT_DEFAULTS \ + { THERMAL_WARNING_DEFAULT, \ + THERMAL_ERROR_DEFAULT, \ + THERMAL_SHUTDOWN_DEFAULT } + +/** Fan_oid */ +typedef enum fan_oid_e { + FAN_OID_FAN1 = ONLP_FAN_ID_CREATE(1), + FAN_OID_FAN2 = ONLP_FAN_ID_CREATE(2), + FAN_OID_FAN3 = ONLP_FAN_ID_CREATE(3), + FAN_OID_FAN4 = ONLP_FAN_ID_CREATE(4), + FAN_OID_FAN5 = ONLP_FAN_ID_CREATE(5), + FAN_OID_FAN6 = ONLP_FAN_ID_CREATE(6), + FAN_OID_FAN7 = ONLP_FAN_ID_CREATE(7), + FAN_OID_FAN8 = ONLP_FAN_ID_CREATE(8), + FAN_OID_PSU_FAN1 = ONLP_FAN_ID_CREATE(9), + FAN_OID_PSU_FAN2 = ONLP_FAN_ID_CREATE(10) +} fan_oid_t; + +/** fan_id */ +typedef enum fan_id_e { + FAN_ID_FAN1 = 1, + FAN_ID_FAN2 = 2, + FAN_ID_FAN3 = 3, + FAN_ID_FAN4 = 4, + FAN_ID_FAN5 = 5, + FAN_ID_FAN6 = 6, + FAN_ID_FAN7 = 7, + FAN_ID_FAN8 = 8, + FAN_ID_PSU_FAN1 = 9, + FAN_ID_PSU_FAN2 = 10 +} fan_id_t; + +/** led_oid */ +typedef enum psu_oid_e { + PSU_OID_PSU1 = ONLP_PSU_ID_CREATE(1), + PSU_OID_PSU2 = ONLP_PSU_ID_CREATE(2) +} psu_oid_t; + +/** fan_id */ +typedef enum psu_id_e { + PSU_ID_PSU1 = 1, + PSU_ID_PSU2 = 2 +} psu_id_t; + +int psu_thermal_get(onlp_thermal_info_t* info, int id); + +int psu_fan_info_get(onlp_fan_info_t* info, int id); + +int psu_vout_get(onlp_psu_info_t* info, int i2c_bus); + +int psu_iout_get(onlp_psu_info_t* info, int i2c_bus); + +int psu_pout_get(onlp_psu_info_t* info, int i2c_bus); + +int psu_pin_get(onlp_psu_info_t* info, int i2c_bus); + +int psu_eeprom_get(onlp_psu_info_t* info, int id); + +int psu_present_get(int *pw_exist, int exist_offset, + int i2c_bus, int psu_mask); + +int psu_pwgood_get(int *pw_good, int good_offset, int i2c_bus, int psu_mask); + +int psu2_led_set(onlp_led_mode_t mode); + +int psu1_led_set(onlp_led_mode_t mode); + +int fan_led_set(onlp_led_mode_t mode); + +int system_led_set(onlp_led_mode_t mode); + +int fan_tray_led_set(onlp_oid_t id, onlp_led_mode_t mode); + +int sysi_platform_info_get(onlp_platform_info_t* pi); + +int qsfp_present_get(int port, int *pres_val); + +#endif /* __PLATFORM_LIB_H__ */ diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/psui.c b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/psui.c new file mode 100755 index 00000000..6ef1c98f --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/psui.c @@ -0,0 +1,160 @@ +/************************************************************ + * + * + * Copyright 2014, 2015 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ +#include +#include + +#include "platform_lib.h" + +static onlp_psu_info_t pinfo[] = +{ + { }, /* Not used */ + { + { + PSU_OID_PSU1, + "PSU-1", + 0, + { + FAN_OID_PSU_FAN1, + }, + } + }, + { + { + PSU_OID_PSU2, + "PSU-2", + 0, + { + FAN_OID_PSU_FAN2, + }, + } + } +}; + +int +onlp_psui_init(void) +{ + return ONLP_STATUS_OK; +} + +int +psu_status_info_get(int id, onlp_psu_info_t *info) +{ + int pw_exist, exist_offset; + int pw_good, good_offset; + int rc, psu_mask, i2c_bus; + + if (id == PSU_ID_PSU1) { + i2c_bus = I2C_BUS_PSU1; + exist_offset = PSU1_PRESENT_OFFSET; + good_offset = PSU1_PWGOOD_OFFSET; + } else if (id == PSU_ID_PSU2) { + i2c_bus = I2C_BUS_PSU2; + exist_offset = PSU2_PRESENT_OFFSET; + good_offset = PSU2_PWGOOD_OFFSET; + } else { + return ONLP_STATUS_E_INTERNAL; + } + + psu_mask = PSU_MUX_MASK; + + /* Get power present status */ + if ((rc = psu_present_get(&pw_exist, exist_offset, I2C_BUS_0, psu_mask)) + != ONLP_STATUS_OK) { + return ONLP_STATUS_E_INTERNAL; + } + + if (pw_exist != PSU_STATUS_PRESENT) { + info->status &= ~ONLP_PSU_STATUS_PRESENT; + info->status |= ONLP_PSU_STATUS_FAILED; + return ONLP_STATUS_OK; + } + info->status |= ONLP_PSU_STATUS_PRESENT; + + /* Get power good status */ + if ((rc = psu_pwgood_get(&pw_good, good_offset, I2C_BUS_0, psu_mask)) + != ONLP_STATUS_OK) { + return ONLP_STATUS_E_INTERNAL; + } + + if (pw_good != PSU_STATUS_POWER_GOOD) { + info->status |= ONLP_PSU_STATUS_UNPLUGGED; + return ONLP_STATUS_OK; + } else { + info->status &= ~ONLP_PSU_STATUS_UNPLUGGED; + } + + /* Get power eeprom status */ + if ((rc = psu_eeprom_get(info, id)) != ONLP_STATUS_OK) { + return ONLP_STATUS_E_INTERNAL; + } + + /* Get power iout status */ + if ((rc = psu_iout_get(info, i2c_bus)) != ONLP_STATUS_OK) { + return ONLP_STATUS_E_INTERNAL; + } + + /* Get power pout status */ + if ((rc = psu_pout_get(info, i2c_bus)) != ONLP_STATUS_OK) { + return ONLP_STATUS_E_INTERNAL; + } + + /* Get power pin status */ + if ((rc = psu_pin_get(info, i2c_bus)) != ONLP_STATUS_OK) { + return ONLP_STATUS_E_INTERNAL; + } + + /* Get power vout status */ + if ((rc = psu_vout_get(info, i2c_bus)) != ONLP_STATUS_OK) { + return ONLP_STATUS_E_INTERNAL; + } + + return ONLP_STATUS_OK; +} + +int +onlp_psui_info_get(onlp_oid_t id, onlp_psu_info_t* info) +{ + int pid; + + pid = ONLP_OID_ID_GET(id); + memset(info, 0, sizeof(onlp_psu_info_t)); + + /* Set the onlp_oid_hdr_t */ + *info = pinfo[pid]; + + switch (pid) { + case PSU_ID_PSU1: + case PSU_ID_PSU2: + return psu_status_info_get(pid, info); + break; + default: + return ONLP_STATUS_E_UNSUPPORTED; + break; + } + + return ONLP_STATUS_OK; + + +} diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/sfpi.c b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/sfpi.c new file mode 100755 index 00000000..a8b68f3a --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/sfpi.c @@ -0,0 +1,171 @@ +/************************************************************ + * + * + * Copyright 2014, 2015 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + ***********************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include "x86_64_ingrasys_s9180_32x_log.h" +#include "platform_lib.h" + +int +onlp_sfpi_init(void) +{ + return ONLP_STATUS_OK; +} + +int +onlp_sfpi_bitmap_get(onlp_sfp_bitmap_t* bmap) +{ + int p; + for(p = 1; p <= PORT_NUM; p++) { + AIM_BITMAP_SET(bmap, p); + } + return ONLP_STATUS_OK; +} + +int +onlp_sfpi_is_present(int port) +{ + int status, rc, gpio_num; + + if (port >= 1 && port <= 16) { + gpio_num = 496 + ((port - 1) ^ 1); + } else if (port >= 17 && port <= 32) { + gpio_num = 464 + ((port - 1) ^ 1); + } else if (port == 33) { + gpio_num = 432; + } else if (port == 34) { + gpio_num = 433; + } + + if ((rc = onlp_file_read_int(&status, "/sys/class/gpio/gpio%d/value", + gpio_num)) != ONLP_STATUS_OK) { + return ONLP_STATUS_E_INTERNAL; + } + + return status; +} + + +int +onlp_sfpi_presence_bitmap_get(onlp_sfp_bitmap_t* dst) +{ + int p = 1; + int rc = 0; + + for (p = 1; p <= PORT_NUM; p++) { + rc = onlp_sfpi_is_present(p); + AIM_BITMAP_MOD(dst, p, (1 == rc) ? 1 : 0); + } + + return ONLP_STATUS_OK; +} + +/* + * This function reads the SFPs idrom and returns in + * in the data buffer provided. + */ +int +onlp_sfpi_eeprom_read(int port, uint8_t data[256]) +{ + int eeprombusidx, eeprombus, eeprombusbase; + char eeprom_path[512], eeprom_addr[32]; + memset(eeprom_path, 0, sizeof(eeprom_path)); + memset(eeprom_addr, 0, sizeof(eeprom_addr)); + strncpy(eeprom_addr, "0050", sizeof(eeprom_addr)); + + memset(data, 0, 256); + + if (port >= 1 && port <= 8) { + eeprombusbase = 9; + } else if (port >= 9 && port <= 16) { + eeprombusbase = 17; + } else if (port >= 17 && port <= 24) { + eeprombusbase = 25; + } else if (port >= 25 && port <= 32) { + eeprombusbase = 33; + } else if (port == 33) { + eeprombus = 45; + } else if (port == 34) { + eeprombus = 46; + } else { + return 0; + } + + /* port 33 and 34 doesn't need to swap */ + if (port >=1 && port <= 32) { + eeprombusidx = port % 8; + switch (eeprombusidx) { + case 1: + eeprombus = eeprombusbase + 1; + break; + case 2: + eeprombus = eeprombusbase + 0; + break; + case 3: + eeprombus = eeprombusbase + 3; + break; + case 4: + eeprombus = eeprombusbase + 2; + break; + case 5: + eeprombus = eeprombusbase + 5; + break; + case 6: + eeprombus = eeprombusbase + 4; + break; + case 7: + eeprombus = eeprombusbase + 7; + break; + case 0: + eeprombus = eeprombusbase + 6; + break; + default: + return ONLP_STATUS_E_INTERNAL; + break; + } + } + + snprintf(eeprom_path, sizeof(eeprom_path), + "/sys/bus/i2c/devices/%d-%s/eeprom", eeprombus, eeprom_addr); + + if (onlplib_sfp_eeprom_read_file(eeprom_path, data) != 0) { + return ONLP_STATUS_E_INTERNAL; + } + + return ONLP_STATUS_OK; +} + +/* + * De-initialize the SFPI subsystem. + */ +int +onlp_sfpi_denit(void) +{ + return ONLP_STATUS_OK; +} diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/sysi.c b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/sysi.c new file mode 100755 index 00000000..08a81c60 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/sysi.c @@ -0,0 +1,346 @@ +/************************************************************ + * + * + * Copyright 2014, 2015 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "platform_lib.h" + +const char* +onlp_sysi_platform_get(void) +{ + return "x86-64-ingrasys-s9180-32x-r0"; +} + +int +onlp_sysi_init(void) +{ + return ONLP_STATUS_OK; +} + +int +onlp_sysi_onie_data_get(uint8_t** data, int* size) +{ + uint8_t* rdata = aim_zmalloc(256); + if(onlp_file_read(rdata, 256, size, SYS_EEPROM_PATH) == ONLP_STATUS_OK) { + if(*size == 256) { + *data = rdata; + return ONLP_STATUS_OK; + } + } + + AIM_LOG_INFO("Unable to get data from eeprom \n"); + aim_free(rdata); + *size = 0; + return ONLP_STATUS_E_INTERNAL; +} + +int +onlp_sysi_oids_get(onlp_oid_t* table, int max) +{ + onlp_oid_t* e = table; + memset(table, 0, max*sizeof(onlp_oid_t)); + int i; + + /* 2 PSUs */ + *e++ = ONLP_PSU_ID_CREATE(1); + *e++ = ONLP_PSU_ID_CREATE(2); + + /* LEDs Item */ + for (i=1; i<=LED_NUM; i++) { + *e++ = ONLP_LED_ID_CREATE(i); + } + + /* THERMALs Item */ + for (i=1; i<=THERMAL_NUM; i++) { + *e++ = ONLP_THERMAL_ID_CREATE(i); + } + + /* Fans Item */ + for (i=1; i<=FAN_NUM; i++) { + *e++ = ONLP_FAN_ID_CREATE(i); + } + + return ONLP_STATUS_OK; +} + +int +decide_fan_percentage(int is_up, int new_temp) +{ + int new_perc; + if (is_up) { + if (new_temp >= THERMAL_ERROR_DEFAULT) { + new_perc = THERMAL_ERROR_FAN_PERC; + } else if (new_temp >= THERMAL_WARNING_DEFAULT) { + new_perc = THERMAL_WARNING_FAN_PERC; + } else { + new_perc = THERMAL_NORMAL_FAN_PERC; + } + } else { + if (new_temp <= THERMAL_NORMAL_DEFAULT) { + new_perc = THERMAL_NORMAL_FAN_PERC; + } else if (new_temp <= THERMAL_WARNING_DEFAULT) { + new_perc = THERMAL_WARNING_FAN_PERC; + } else { + new_perc = THERMAL_ERROR_FAN_PERC; + } + } + + return new_perc; +} + +int +platform_thermal_temp_get(int *thermal_temp) +{ + int i, temp, max_temp, rc; + onlp_thermal_info_t thermal_info; + memset(&thermal_info, 0, sizeof(thermal_info)); + uint32_t thermal_arr[] = { THERMAL_OID_FRONT_MAC, + THERMAL_OID_ASIC, + THERMAL_OID_CPU1, + THERMAL_OID_CPU2, + THERMAL_OID_CPU3, + THERMAL_OID_CPU4 }; + max_temp = 0; + + for (i=0; i max_temp) { + max_temp = temp; + } + } + *thermal_temp = max_temp; + + return ONLP_STATUS_OK; +} + +int +onlp_sysi_platform_manage_fans(void) +{ + int rc, is_up ,new_temp, thermal_temp, diff; + static int new_perc = 0, ori_perc = 0; + static int ori_temp = 0; + onlp_thermal_info_t thermal_info; + memset(&thermal_info, 0, sizeof(thermal_info)); + + /* get new temperature */ + if ((rc = platform_thermal_temp_get(&thermal_temp)) != ONLP_STATUS_OK) { + goto _EXIT; + } + + new_temp = thermal_temp; + diff = new_temp - ori_temp; + + if (diff == 0) { + goto _EXIT; + } else { + is_up = (diff > 0 ? 1 : 0); + } + + new_perc = decide_fan_percentage(is_up, new_temp); + + if (ori_perc == new_perc) { + goto _EXIT; + } + + + AIM_LOG_INFO("The Fan Speeds Percent are now at %d%%", new_perc); + + if ((rc = onlp_fani_percentage_set(THERMAL_OID_ASIC, new_perc)) != ONLP_STATUS_OK) { + goto _EXIT; + } + + /* update */ + ori_perc = new_perc; + ori_temp = new_temp; + +_EXIT : + return rc; +} + +int +onlp_sysi_platform_manage_leds(void) +{ + int psu1_status, psu2_status, rc, i; + static int pre_psu1_status = 0, pre_psu2_status = 0, pre_fan_status = 0; + + //------------------------------- + static int pre_fan_tray_status[4] = {0}; + int fan_tray_id, sum, total = 0; + onlp_led_status_t fan_tray_status[SYS_FAN_NUM]; + //------------------------------- + + onlp_psu_info_t psu_info; + onlp_fan_info_t fan_info; + + //-------- ----------------------- + memset(&fan_tray_status, 0, sizeof(fan_tray_status)); + //------------------------------- + + memset(&psu_info, 0, sizeof(onlp_psu_info_t)); + memset(&fan_info, 0, sizeof(onlp_fan_info_t)); + uint32_t fan_arr[] = { FAN_OID_FAN1, + FAN_OID_FAN2, + FAN_OID_FAN3, + FAN_OID_FAN4, + FAN_OID_FAN5, + FAN_OID_FAN6, + FAN_OID_FAN7, + FAN_OID_FAN8, }; + + /* PSU LED CTRL */ + if ((rc = onlp_psui_info_get(PSU_OID_PSU1, &psu_info)) != ONLP_STATUS_OK) { + goto _EXIT; + } + + psu1_status = psu_info.status; + if (psu1_status != pre_psu1_status) { + if((psu1_status & ONLP_PSU_STATUS_PRESENT) == 0) { + rc = onlp_ledi_mode_set(LED_OID_PSU1, ONLP_LED_MODE_OFF); + } + else if(psu1_status != ONLP_PSU_STATUS_PRESENT) { + rc = onlp_ledi_mode_set(LED_OID_PSU1, ONLP_LED_MODE_ORANGE); + } else { + rc = onlp_ledi_mode_set(LED_OID_PSU1, ONLP_LED_MODE_GREEN); + } + + if (rc != ONLP_STATUS_OK) { + goto _EXIT; + } + pre_psu1_status = psu1_status; + } + + if ((rc = onlp_psui_info_get(PSU_OID_PSU2, &psu_info)) != ONLP_STATUS_OK) { + goto _EXIT; + } + + psu2_status = psu_info.status; + if( psu2_status != pre_psu2_status) { + if((psu2_status & ONLP_PSU_STATUS_PRESENT) == 0) { + rc = onlp_ledi_mode_set(LED_OID_PSU2, ONLP_LED_MODE_OFF); + } + else if(psu2_status != ONLP_PSU_STATUS_PRESENT) { + rc = onlp_ledi_mode_set(LED_OID_PSU2, ONLP_LED_MODE_ORANGE); + } else { + rc = onlp_ledi_mode_set(LED_OID_PSU2, ONLP_LED_MODE_GREEN); + } + + if (rc != ONLP_STATUS_OK) { + goto _EXIT; + } + pre_psu2_status = psu2_status; + } + + /* FAN LED CTRL */ + for (i=0; i ONLP_LED_STATUS_FAILED) { + rc = onlp_ledi_mode_set(fan_tray_id, ONLP_LED_MODE_ORANGE); + + } else { + rc = onlp_ledi_mode_set(fan_tray_id, ONLP_LED_MODE_GREEN); + } + + if (rc != ONLP_STATUS_OK) { + goto _EXIT; + } + + pre_fan_tray_status[fan_tray_id - 5] = sum; + } + } + } + + + if (total != pre_fan_status) { + if (total == (ONLP_LED_STATUS_PRESENT * 8)) { + rc = onlp_ledi_mode_set(LED_OID_FAN, ONLP_LED_MODE_GREEN); + } else { + rc = onlp_ledi_mode_set(LED_OID_FAN, ONLP_LED_MODE_ORANGE); + } + + if (rc != ONLP_STATUS_OK) { + goto _EXIT; + } + + pre_fan_status = total; + } + +_EXIT : + return rc; +} + +int +onlp_sysi_platform_info_get(onlp_platform_info_t* pi) +{ + int rc; + if ((rc = sysi_platform_info_get(pi)) != ONLP_STATUS_OK) { + return ONLP_STATUS_E_INTERNAL; + } + + return ONLP_STATUS_OK; +} + diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/thermali.c b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/thermali.c new file mode 100755 index 00000000..95bebcc7 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/thermali.c @@ -0,0 +1,301 @@ +/************************************************************ + * + * + * Copyright 2014, 2015 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * Thermal Sensor Platform Implementation. + * + ***********************************************************/ +#include +#include +#include "x86_64_ingrasys_s9180_32x_log.h" +#include "platform_lib.h" + +static onlp_thermal_info_t thermal_info[] = { + { }, /* Not used */ + { { THERMAL_OID_FRONT_MAC, "Front MAC", 0}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_ALL, 0, THERMAL_THRESHOLD_INIT_DEFAULTS + }, + { { THERMAL_OID_ASIC, "ASIC Core Temp", 0}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_ALL, 0, THERMAL_THRESHOLD_INIT_DEFAULTS + }, + { { THERMAL_OID_CPU1, "CPU Thermal 1", 0}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_ALL, 0, THERMAL_THRESHOLD_INIT_DEFAULTS + }, + { { THERMAL_OID_CPU2, "CPU Thermal 2", 0}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_ALL, 0, THERMAL_THRESHOLD_INIT_DEFAULTS + }, + { { THERMAL_OID_CPU3, "CPU Thermal 3", 0}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_ALL, 0, THERMAL_THRESHOLD_INIT_DEFAULTS + }, + { { THERMAL_OID_CPU4, "CPU Thermal 4", 0}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_ALL, 0, THERMAL_THRESHOLD_INIT_DEFAULTS + }, + { { THERMAL_OID_PSU1_1, "PSU-1 Thermal 1", PSU_OID_PSU1}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_GET_TEMPERATURE, 0 + }, + { { THERMAL_OID_PSU1_2, "PSU-1 Thermal 2", PSU_OID_PSU1}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_GET_TEMPERATURE, 0 + }, + { { THERMAL_OID_PSU2_1, "PSU-2 Thermal 1", PSU_OID_PSU2}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_GET_TEMPERATURE, 0 + }, + { { THERMAL_OID_PSU2_2, "PSU-2 Thermal 2", PSU_OID_PSU2}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_GET_TEMPERATURE, 0 + }, + { { THERMAL_OID_CPU_BOARD, "CPU Board", 0}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_GET_TEMPERATURE, 0 + }, + { { THERMAL_OID_PSU1_NEAR, "Near PSU 1", 0}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_GET_TEMPERATURE, 0 + }, + { { THERMAL_OID_PSU2_NEAR, "Near PSU 2", 0}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_GET_TEMPERATURE, 0 + }, + { { THERMAL_OID_MAC_REAR, "Rear MAC", 0}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_GET_TEMPERATURE, 0 + }, + { { THERMAL_OID_QSFP_NEAR, "Near QSFP Port", 0}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_GET_TEMPERATURE, 0 + } +}; + +/* + * This will be called to intiialize the thermali subsystem. + */ +int +onlp_thermali_init(void) +{ + return ONLP_STATUS_OK; +} + +static int +sys_thermal_info_get(onlp_thermal_info_t* info, int id) +{ + int rv; + + rv = onlp_file_read_int(&info->mcelsius, + SYS_CORE_TEMP_PREFIX "temp%d_input", id); + + if(rv == ONLP_STATUS_E_INTERNAL) { + return rv; + } + + if(rv == ONLP_STATUS_E_MISSING) { + info->status &= ~1; + return 0; + } + + return ONLP_STATUS_OK; +} + +static int +cpu_thermal_info_get(onlp_thermal_info_t* info, int id) +{ + int rv; + int offset; + + offset = 1; + id = id - offset; + rv = onlp_file_read_int(&info->mcelsius, + SYS_CPU_TEMP_PREFIX "temp%d_input", id); + + if(rv == ONLP_STATUS_E_INTERNAL) { + return rv; + } + + if(rv == ONLP_STATUS_E_MISSING) { + info->status &= ~1; + return 0; + } + + return ONLP_STATUS_OK; +} + +int +psu_thermal_info_get(onlp_thermal_info_t* info, int id) +{ + int rv; + + rv = psu_thermal_get(info, id); + if(rv == ONLP_STATUS_E_INTERNAL) { + return rv; + } + + return ONLP_STATUS_OK; +} + +static int +cpu_board_thermal_info_get(onlp_thermal_info_t* info) +{ + int rv; + + rv = onlp_file_read_int(&info->mcelsius, + SYS_CPU_BOARD_TEMP_PREFIX "temp1_input"); + + if (rv == ONLP_STATUS_E_INTERNAL) { + return rv; + } + + if (rv == ONLP_STATUS_E_MISSING) { + info->status &= ~1; + return 0; + } + + return ONLP_STATUS_OK; +} + +static int +psu_near_thermal_info_get(onlp_thermal_info_t* info, int id) +{ + int rv; + + if (id == THERMAL_ID_PSU1_NEAR) { + rv = onlp_file_read_int(&info->mcelsius, + SYS_PSU1_NEAR_TEMP_PREFIX "temp1_input"); + } else if (id == THERMAL_ID_PSU2_NEAR) { + rv = onlp_file_read_int(&info->mcelsius, + SYS_PSU2_NEAR_TEMP_PREFIX "temp1_input"); + } else { + return ONLP_STATUS_E_INTERNAL; + } + + if (rv == ONLP_STATUS_E_INTERNAL) { + return rv; + } + + if (rv == ONLP_STATUS_E_MISSING) { + info->status &= ~1; + return 0; + } + + return ONLP_STATUS_OK; +} + +static int +mac_rear_thermal_info_get(onlp_thermal_info_t* info) +{ + int rv; + + rv = onlp_file_read_int(&info->mcelsius, + SYS_MAC_REAR_TEMP_PREFIX "temp1_input"); + + if (rv == ONLP_STATUS_E_INTERNAL) { + return rv; + } + + if (rv == ONLP_STATUS_E_MISSING) { + info->status &= ~1; + return 0; + } + + return ONLP_STATUS_OK; +} + +static int +qsfp_near_thermal_info_get(onlp_thermal_info_t* info) +{ + int rv; + + rv = onlp_file_read_int(&info->mcelsius, + SYS_QSFP_NEAR_TEMP_PREFIX "temp1_input"); + + if (rv == ONLP_STATUS_E_INTERNAL) { + return rv; + } + + if (rv == ONLP_STATUS_E_MISSING) { + info->status &= ~1; + return 0; + } + + return ONLP_STATUS_OK; +} + +/* + * Retrieve the information structure for the given thermal OID. + * + * If the OID is invalid, return ONLP_E_STATUS_INVALID. + * If an unexpected error occurs, return ONLP_E_STATUS_INTERNAL. + * Otherwise, return ONLP_STATUS_OK with the OID's information. + * + * Note -- it is expected that you fill out the information + * structure even if the sensor described by the OID is not present. + */ +int +onlp_thermali_info_get(onlp_oid_t id, onlp_thermal_info_t* info) +{ + int sensor_id, rc; + sensor_id = ONLP_OID_ID_GET(id); + + *info = thermal_info[sensor_id]; + info->caps |= ONLP_THERMAL_CAPS_GET_TEMPERATURE; + + switch (sensor_id) { + case THERMAL_ID_ASIC: + case THERMAL_ID_FRONT_MAC: + rc = sys_thermal_info_get(info, sensor_id); + break; + case THERMAL_ID_CPU1: + case THERMAL_ID_CPU2: + case THERMAL_ID_CPU3: + case THERMAL_ID_CPU4: + rc = cpu_thermal_info_get(info, sensor_id); + break; + case THERMAL_ID_PSU1_1: + case THERMAL_ID_PSU1_2: + case THERMAL_ID_PSU2_1: + case THERMAL_ID_PSU2_2: + rc = psu_thermal_info_get(info, sensor_id); + break; + case THERMAL_ID_CPU_BOARD: + rc = cpu_board_thermal_info_get(info); + break; + case THERMAL_ID_PSU1_NEAR: + case THERMAL_ID_PSU2_NEAR: + rc = psu_near_thermal_info_get(info, sensor_id); + break; + case THERMAL_ID_MAC_REAR: + rc = mac_rear_thermal_info_get(info); + break; + case THERMAL_ID_QSFP_NEAR: + rc = qsfp_near_thermal_info_get(info); + break; + default: + return ONLP_STATUS_E_INTERNAL; + break; + } + + return rc; +} diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/x86_64_ingrasys_s9180_32x_config.c b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/x86_64_ingrasys_s9180_32x_config.c new file mode 100755 index 00000000..d0dab1c8 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/x86_64_ingrasys_s9180_32x_config.c @@ -0,0 +1,101 @@ +/************************************************************ + * + * + * Copyright 2014, 2015 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ + +#include + +/* */ +#define __x86_64_ingrasys_s9180_32x_config_STRINGIFY_NAME(_x) #_x +#define __x86_64_ingrasys_s9180_32x_config_STRINGIFY_VALUE(_x) __x86_64_ingrasys_s9180_32x_config_STRINGIFY_NAME(_x) +x86_64_ingrasys_s9180_32x_config_settings_t x86_64_ingrasys_s9180_32x_config_settings[] = +{ +#ifdef X86_64_INGRAYSYS_S9180_32X_CONFIG_INCLUDE_LOGGING + { __x86_64_ingrasys_s9180_32x_config_STRINGIFY_NAME(X86_64_INGRAYSYS_S9180_32X_CONFIG_INCLUDE_LOGGING), __x86_64_ingrasys_s9180_32x_config_STRINGIFY_VALUE(X86_64_INGRAYSYS_S9180_32X_CONFIG_INCLUDE_LOGGING) }, +#else +{ X86_64_INGRAYSYS_S9180_32X_CONFIG_INCLUDE_LOGGING(__x86_64_ingrasys_s9180_32x_config_STRINGIFY_NAME), "__undefined__" }, +#endif +#ifdef X86_64_INGRAYSYS_S9180_32X_CONFIG_LOG_OPTIONS_DEFAULT + { __x86_64_ingrasys_s9180_32x_config_STRINGIFY_NAME(X86_64_INGRAYSYS_S9180_32X_CONFIG_LOG_OPTIONS_DEFAULT), __x86_64_ingrasys_s9180_32x_config_STRINGIFY_VALUE(X86_64_INGRAYSYS_S9180_32X_CONFIG_LOG_OPTIONS_DEFAULT) }, +#else +{ X86_64_INGRAYSYS_S9180_32X_CONFIG_LOG_OPTIONS_DEFAULT(__x86_64_ingrasys_s9180_32x_config_STRINGIFY_NAME), "__undefined__" }, +#endif +#ifdef X86_64_INGRAYSYS_S9180_32X_CONFIG_LOG_BITS_DEFAULT + { __x86_64_ingrasys_s9180_32x_config_STRINGIFY_NAME(X86_64_INGRAYSYS_S9180_32X_CONFIG_LOG_BITS_DEFAULT), __x86_64_ingrasys_s9180_32x_config_STRINGIFY_VALUE(X86_64_INGRAYSYS_S9180_32X_CONFIG_LOG_BITS_DEFAULT) }, +#else +{ X86_64_INGRAYSYS_S9180_32X_CONFIG_LOG_BITS_DEFAULT(__x86_64_ingrasys_s9180_32x_config_STRINGIFY_NAME), "__undefined__" }, +#endif +#ifdef X86_64_INGRAYSYS_S9180_32X_CONFIG_LOG_CUSTOM_BITS_DEFAULT + { __x86_64_ingrasys_s9180_32x_config_STRINGIFY_NAME(X86_64_INGRAYSYS_S9180_32X_CONFIG_LOG_CUSTOM_BITS_DEFAULT), __x86_64_ingrasys_s9180_32x_config_STRINGIFY_VALUE(X86_64_INGRAYSYS_S9180_32X_CONFIG_LOG_CUSTOM_BITS_DEFAULT) }, +#else +{ X86_64_INGRAYSYS_S9180_32X_CONFIG_LOG_CUSTOM_BITS_DEFAULT(__x86_64_ingrasys_s9180_32x_config_STRINGIFY_NAME), "__undefined__" }, +#endif +#ifdef X86_64_INGRAYSYS_S9180_32X_CONFIG_PORTING_STDLIB + { __x86_64_ingrasys_s9180_32x_config_STRINGIFY_NAME(X86_64_INGRAYSYS_S9180_32X_CONFIG_PORTING_STDLIB), __x86_64_ingrasys_s9180_32x_config_STRINGIFY_VALUE(X86_64_INGRAYSYS_S9180_32X_CONFIG_PORTING_STDLIB) }, +#else +{ X86_64_INGRAYSYS_S9180_32X_CONFIG_PORTING_STDLIB(__x86_64_ingrasys_s9180_32x_config_STRINGIFY_NAME), "__undefined__" }, +#endif +#ifdef X86_64_INGRAYSYS_S9180_32X_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS + { __x86_64_ingrasys_s9180_32x_config_STRINGIFY_NAME(X86_64_INGRAYSYS_S9180_32X_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS), __x86_64_ingrasys_s9180_32x_config_STRINGIFY_VALUE(X86_64_INGRAYSYS_S9180_32X_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS) }, +#else +{ X86_64_INGRAYSYS_S9180_32X_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS(__x86_64_ingrasys_s9180_32x_config_STRINGIFY_NAME), "__undefined__" }, +#endif +#ifdef X86_64_INGRAYSYS_S9180_32X_CONFIG_INCLUDE_UCLI + { __x86_64_ingrasys_s9180_32x_config_STRINGIFY_NAME(X86_64_INGRAYSYS_S9180_32X_CONFIG_INCLUDE_UCLI), __x86_64_ingrasys_s9180_32x_config_STRINGIFY_VALUE(X86_64_INGRAYSYS_S9180_32X_CONFIG_INCLUDE_UCLI) }, +#else +{ X86_64_INGRAYSYS_S9180_32X_CONFIG_INCLUDE_UCLI(__x86_64_ingrasys_s9180_32x_config_STRINGIFY_NAME), "__undefined__" }, +#endif +#ifdef X86_64_INGRAYSYS_S9180_32X_CONFIG_SFP_COUNT + { __x86_64_ingrasys_s9180_32x_config_STRINGIFY_NAME(X86_64_INGRAYSYS_S9180_32X_CONFIG_SFP_COUNT), __x86_64_ingrasys_s9180_32x_config_STRINGIFY_VALUE(X86_64_INGRAYSYS_S9180_32X_CONFIG_SFP_COUNT) }, +#else +{ X86_64_INGRAYSYS_S9180_32X_CONFIG_SFP_COUNT(__x86_64_ingrasys_s9180_32x_config_STRINGIFY_NAME), "__undefined__" }, +#endif + { NULL, NULL } +}; +#undef __x86_64_ingrasys_s9180_32x_config_STRINGIFY_VALUE +#undef __x86_64_ingrasys_s9180_32x_config_STRINGIFY_NAME + +const char* +x86_64_ingrasys_s9180_32x_config_lookup(const char* setting) +{ + int i; + for(i = 0; x86_64_ingrasys_s9180_32x_config_settings[i].name; i++) { + if(strcmp(x86_64_ingrasys_s9180_32x_config_settings[i].name, setting)) { + return x86_64_ingrasys_s9180_32x_config_settings[i].value; + } + } + return NULL; +} + +int +x86_64_ingrasys_s9180_32x_config_show(struct aim_pvs_s* pvs) +{ + int i; + for(i = 0; x86_64_ingrasys_s9180_32x_config_settings[i].name; i++) { + aim_printf(pvs, "%s = %s\n", x86_64_ingrasys_s9180_32x_config_settings[i].name, x86_64_ingrasys_s9180_32x_config_settings[i].value); + } + return i; +} + +/* */ + diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/x86_64_ingrasys_s9180_32x_enums.c b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/x86_64_ingrasys_s9180_32x_enums.c new file mode 100755 index 00000000..30d7f908 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/x86_64_ingrasys_s9180_32x_enums.c @@ -0,0 +1,30 @@ +/************************************************************ + * + * + * Copyright 2014, 2015 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ + +#include + +/* <--auto.start.enum(ALL).source> */ +/* */ + diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/x86_64_ingrasys_s9180_32x_int.h b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/x86_64_ingrasys_s9180_32x_int.h new file mode 100755 index 00000000..54e126e4 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/x86_64_ingrasys_s9180_32x_int.h @@ -0,0 +1,29 @@ +/************************************************************ + * + * + * Copyright 2014, 2015 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ + +#ifndef __x86_64_ingrasys_s9180_32x_INT_H__ +#define __x86_64_ingrasys_s9180_32x_INT_H__ + +#endif /* __x86_64_ingrasys_s9180_32x_INT_H__ */ diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/x86_64_ingrasys_s9180_32x_log.c b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/x86_64_ingrasys_s9180_32x_log.c new file mode 100755 index 00000000..c1e2fcaf --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/x86_64_ingrasys_s9180_32x_log.c @@ -0,0 +1,38 @@ +/************************************************************ + * + * + * Copyright 2014, 2015 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ + +#include + +#include "x86_64_ingrasys_s9180_32x_log.h" +/* + * x86_64_ingrasys_s9180_32x log struct. + */ +AIM_LOG_STRUCT_DEFINE( + X86_64_INGRAYSYS_S9180_32X_CONFIG_LOG_OPTIONS_DEFAULT, + X86_64_INGRAYSYS_S9180_32X_CONFIG_LOG_BITS_DEFAULT, + NULL, /* Custom log map */ + X86_64_INGRAYSYS_S9180_32X_CONFIG_LOG_CUSTOM_BITS_DEFAULT + ); + diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/x86_64_ingrasys_s9180_32x_log.h b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/x86_64_ingrasys_s9180_32x_log.h new file mode 100755 index 00000000..f69147bf --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/x86_64_ingrasys_s9180_32x_log.h @@ -0,0 +1,32 @@ +/************************************************************ + * + * + * Copyright 2014, 2015 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ + +#ifndef __x86_64_ingrasys_s9180_32x_LOG_H__ +#define __x86_64_ingrasys_s9180_32x_LOG_H__ + +#define AIM_LOG_MODULE_NAME x86_64_ingrasys_s9180_32x +#include + +#endif /* __x86_64_ingrasys_s9180_32x_LOG_H__ */ diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/x86_64_ingrasys_s9180_32x_module.c b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/x86_64_ingrasys_s9180_32x_module.c new file mode 100755 index 00000000..35ffa630 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/x86_64_ingrasys_s9180_32x_module.c @@ -0,0 +1,44 @@ +/************************************************************ + * + * + * Copyright 2014, 2015 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ + +#include + +#include "x86_64_ingrasys_s9180_32x_log.h" + +static int +datatypes_init__(void) +{ +#define INGRAYSYS_S9180_32X_ENUMERATION_ENTRY(_enum_name, _desc) AIM_DATATYPE_MAP_REGISTER(_enum_name, _enum_name##_map, _desc, AIM_LOG_INTERNAL); +#include + return 0; +} + +void __x86_64_ingrasys_s9180_32x_module_init__(void) +{ + AIM_LOG_STRUCT_REGISTER(); + datatypes_init__(); +} + +int __onlp_platform_version__ = 1; diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/x86_64_ingrasys_s9180_32x_ucli.c b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/x86_64_ingrasys_s9180_32x_ucli.c new file mode 100755 index 00000000..dee69a62 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/onlp/builds/src/x86_64_ingrasys_s9180_32x/module/src/x86_64_ingrasys_s9180_32x_ucli.c @@ -0,0 +1,82 @@ +/************************************************************ + * + * + * Copyright 2014, 2015 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ + +#include + +#if ONLPSIM_CONFIG_INCLUDE_UCLI == 1 + +#include +#include +#include + +static ucli_status_t +x86_64_ingrasys_s9180_32x_ucli_ucli__config__(ucli_context_t* uc) +{ + UCLI_HANDLER_MACRO_MODULE_CONFIG(x86_64_ingrasys_s9180_32x) +} + +/* */ +/****************************************************************************** + * + * These handler table(s) were autogenerated from the symbols in this + * source file. + * + *****************************************************************************/ +static ucli_command_handler_f x86_64_ingrasys_s9180_32x_ucli_ucli_handlers__[] = +{ + x86_64_ingrasys_s9180_32x_ucli_ucli__config__, + NULL +}; +/******************************************************************************/ +/* */ + +static ucli_module_t +x86_64_ingrasys_s9180_32x_ucli_module__ = + { + "x86_64_ingrasys_s9180_32x_ucli", + NULL, + x86_64_ingrasys_s9180_32x_ucli_ucli_handlers__, + NULL, + NULL, + }; + +ucli_node_t* +x86_64_ingrasys_s9180_32x_ucli_node_create(void) +{ + ucli_node_t* n; + ucli_module_init(&x86_64_ingrasys_s9180_32x_ucli_module__); + n = ucli_node_create("x86_64_ingrasys_s9180_32x", NULL, &x86_64_ingrasys_s9180_32x_ucli_module__); + ucli_node_subnode_add(n, ucli_module_log_node_create("x86_64_ingrasys_s9180_32x")); + return n; +} + +#else +void* +x86_64_ingrasys_s9180_32x_ucli_node_create(void) +{ + return NULL; +} +#endif + diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/platform-config/Makefile b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/platform-config/Makefile new file mode 100755 index 00000000..003238cf --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/platform-config/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk \ No newline at end of file diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/platform-config/r0/Makefile b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/platform-config/r0/Makefile new file mode 100755 index 00000000..003238cf --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/platform-config/r0/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk \ No newline at end of file diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/platform-config/r0/PKG.yml b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/platform-config/r0/PKG.yml new file mode 100755 index 00000000..8054deba --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/platform-config/r0/PKG.yml @@ -0,0 +1 @@ +!include $ONL_TEMPLATES/platform-config-platform.yml ARCH=amd64 VENDOR=ingrasys BASENAME=x86-64-ingrasys-s9180-32x REVISION=r0 diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/platform-config/r0/src/lib/x86-64-ingrasys-s9180-32x-r0.yml b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/platform-config/r0/src/lib/x86-64-ingrasys-s9180-32x-r0.yml new file mode 100755 index 00000000..1e2b0902 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/platform-config/r0/src/lib/x86-64-ingrasys-s9180-32x-r0.yml @@ -0,0 +1,30 @@ +--- + +###################################################################### +# +# platform-config for x86-64-ingrasys-s9180-32x +# +###################################################################### + +x86-64-ingrasys-s9180-32x-r0: + + grub: + + serial: >- + --port=0x3f8 + --speed=115200 + --word=8 + --parity=no + --stop=1 + + kernel: + <<: *kernel-3-16 + + args: >- + console=ttyS0,115200n8 + + ##network + ## interfaces: + ## ma1: + ## name: ~ + ## syspath: pci0000:00/0000:00:03.0 diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/platform-config/r0/src/python/x86_64_ingrasys_s9180_32x_r0/__init__.py b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/platform-config/r0/src/python/x86_64_ingrasys_s9180_32x_r0/__init__.py new file mode 100755 index 00000000..002c1774 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9180-32x/platform-config/r0/src/python/x86_64_ingrasys_s9180_32x_r0/__init__.py @@ -0,0 +1,287 @@ +from onl.platform.base import * +from onl.platform.ingrasys import * +import os + +class OnlPlatform_x86_64_ingrasys_s9180_32x_r0(OnlPlatformIngrasys): + PLATFORM='x86-64-ingrasys-s9180-32x-r0' + MODEL="S9180-32X" + SYS_OBJECT_ID=".9180.32" + + def baseconfig(self): + + self.insmod("eeprom_mb") + # init SYS EEPROM devices + self.new_i2c_device('mb_eeprom', 0x55, 0) + + os.system("modprobe w83795") + os.system("modprobe eeprom") + + ########### initialize I2C bus 0 ########### + # init PCA9548 + self.new_i2c_devices( + [ + ('pca9548', 0x70, 0), + ('pca9548', 0x71, 1), + ('pca9548', 0x71, 2), + ('pca9548', 0x71, 3), + ('pca9548', 0x71, 4), + ('pca9548', 0x71, 7), + ('pca9548', 0x76, 0), + ] + ) + + # init PCA9545 + self.new_i2c_device('pca9545', 0x72, 0) + + # Golden Finger to show CPLD + os.system("i2cget -y 44 0x74 2") + + # Reset BMC Dummy Board + os.system("i2cset -y -r 0 0x26 4 0x00") + os.system("i2cset -y -r 0 0x26 5 0x00") + os.system("i2cset -y -r 0 0x26 2 0x3F") + os.system("i2cset -y -r 0 0x26 3 0x1F") + os.system("i2cset -y -r 0 0x26 6 0xC0") + os.system("i2cset -y -r 0 0x26 7 0x00") + + # CPU Baord + os.system("i2cset -y -r 0 0x77 6 0xFF") + os.system("i2cset -y -r 0 0x77 7 0xFF") + + # init SMBUS1 ABS + os.system("i2cset -y -r 5 0x20 4 0x00") + os.system("i2cset -y -r 5 0x20 5 0x00") + os.system("i2cset -y -r 5 0x20 6 0xFF") + os.system("i2cset -y -r 5 0x20 7 0xFF") + + os.system("i2cset -y -r 5 0x21 4 0x00") + os.system("i2cset -y -r 5 0x21 5 0x00") + os.system("i2cset -y -r 5 0x21 6 0xFF") + os.system("i2cset -y -r 5 0x21 7 0xFF") + + os.system("i2cset -y -r 5 0x22 4 0x00") + os.system("i2cset -y -r 5 0x22 5 0x00") + os.system("i2cset -y -r 5 0x22 6 0xFF") + os.system("i2cset -y -r 5 0x22 7 0xFF") + + os.system("i2cset -y -r 5 0x23 4 0x00") + os.system("i2cset -y -r 5 0x23 5 0x00") + os.system("i2cset -y -r 5 0x23 2 0xCF") + os.system("i2cset -y -r 5 0x23 3 0xF0") + os.system("i2cset -y -r 5 0x23 6 0xCF") + os.system("i2cset -y -r 5 0x23 7 0xF0") + + # init SFP + os.system("i2cset -y -r 5 0x27 4 0x00") + os.system("i2cset -y -r 5 0x27 5 0x00") + os.system("i2cset -y -r 5 0x27 2 0x00") + os.system("i2cset -y -r 5 0x27 3 0x00") + os.system("i2cset -y -r 5 0x27 6 0xCF") + os.system("i2cset -y -r 5 0x27 7 0xF0") + + # set ZQSFP LP_MODE = 0 + os.system("i2cset -y -r 6 0x20 4 0x00") + os.system("i2cset -y -r 6 0x20 5 0x00") + os.system("i2cset -y -r 6 0x20 2 0x00") + os.system("i2cset -y -r 6 0x20 3 0x00") + os.system("i2cset -y -r 6 0x20 6 0x00") + os.system("i2cset -y -r 6 0x20 7 0x00") + + os.system("i2cset -y -r 6 0x21 4 0x00") + os.system("i2cset -y -r 6 0x21 5 0x00") + os.system("i2cset -y -r 6 0x21 2 0x00") + os.system("i2cset -y -r 6 0x21 3 0x00") + os.system("i2cset -y -r 6 0x21 6 0x00") + os.system("i2cset -y -r 6 0x21 7 0x00") + + # set ZQSFP RST = 1 + os.system("i2cset -y -r 6 0x22 4 0x00") + os.system("i2cset -y -r 6 0x22 5 0x00") + os.system("i2cset -y -r 6 0x22 2 0xFF") + os.system("i2cset -y -r 6 0x22 3 0xFF") + os.system("i2cset -y -r 6 0x22 6 0x00") + os.system("i2cset -y -r 6 0x22 7 0x00") + + os.system("i2cset -y -r 6 0x23 4 0x00") + os.system("i2cset -y -r 6 0x23 5 0x00") + os.system("i2cset -y -r 6 0x23 2 0xFF") + os.system("i2cset -y -r 6 0x23 3 0xFF") + os.system("i2cset -y -r 6 0x23 6 0x00") + os.system("i2cset -y -r 6 0x23 7 0x00") + + # init Host GPIO + os.system("i2cset -y -r 0 0x74 4 0x00") + os.system("i2cset -y -r 0 0x74 5 0x00") + os.system("i2cset -y -r 0 0x74 2 0x0F") + os.system("i2cset -y -r 0 0x74 3 0xDF") + os.system("i2cset -y -r 0 0x74 6 0x08") + os.system("i2cset -y -r 0 0x74 7 0x1F") + + # init LED board after PVT (BAREFOOT_IO_EXP_LED_ID) + #os.system("i2cset -y -r 50 0x75 4 0x00") + #os.system("i2cset -y -r 50 0x75 5 0x00") + #os.system("i2cset -y -r 50 0x75 6 0x00") + #os.system("i2cset -y -r 50 0x75 7 0xFF") + + # init Board ID + os.system("i2cset -y -r 51 0x27 4 0x00") + os.system("i2cset -y -r 51 0x27 5 0x00") + os.system("i2cset -y -r 51 0x27 6 0xFF") + os.system("i2cset -y -r 51 0x27 7 0xFF") + + # init Board ID of Dummy BMC Board + os.system("i2cset -y -r 0 0x24 4 0x00") + os.system("i2cset -y -r 0 0x24 5 0x00") + os.system("i2cset -y -r 0 0x24 6 0xFF") + os.system("i2cset -y -r 0 0x24 7 0xFF") + + # init PSU I/O (BAREFOOT_IO_EXP_PSU_ID) + os.system("i2cset -y -r 0 0x25 4 0x00") + os.system("i2cset -y -r 0 0x25 5 0x00") + os.system("i2cset -y -r 0 0x25 2 0x00") + os.system("i2cset -y -r 0 0x25 3 0x1D") + os.system("i2cset -y -r 0 0x25 6 0xDB") + os.system("i2cset -y -r 0 0x25 7 0x03") + + # init FAN I/O (BAREFOOT_IO_EXP_FAN_ID) + os.system("i2cset -y -r 59 0x20 4 0x00") + os.system("i2cset -y -r 59 0x20 5 0x00") + os.system("i2cset -y -r 59 0x20 2 0x11") + os.system("i2cset -y -r 59 0x20 3 0x11") + os.system("i2cset -y -r 59 0x20 6 0xCC") + os.system("i2cset -y -r 59 0x20 7 0xCC") + + # init Fan + # select bank 0 + os.system("i2cset -y -r 56 0x2F 0x00 0x80") + # enable FANIN 1-8 + os.system("i2cset -y -r 56 0x2F 0x06 0xFF") + # disable FANIN 9-14 + os.system("i2cset -y -r 56 0x2F 0x07 0x00") + # select bank 2 + os.system("i2cset -y -r 56 0x2F 0x00 0x82") + # set PWM mode in FOMC + os.system("i2cset -y -r 56 0x2F 0x0F 0x00") + + # init VOLMON + os.system("i2cset -y -r 56 0x2F 0x00 0x80") + os.system("i2cset -y -r 56 0x2F 0x01 0x1C") + os.system("i2cset -y -r 56 0x2F 0x00 0x80") + os.system("i2cset -y -r 56 0x2F 0x02 0xFF") + os.system("i2cset -y -r 56 0x2F 0x03 0x50") + os.system("i2cset -y -r 56 0x2F 0x04 0x0A") + os.system("i2cset -y -r 56 0x2F 0x00 0x80") + os.system("i2cset -y -r 56 0x2F 0x01 0x1D") + self.new_i2c_device('w83795adg', 0x2F, 56) + + # init Fan Speed + os.system("echo 120 > /sys/class/hwmon/hwmon1/device/pwm1") + os.system("echo 120 > /sys/class/hwmon/hwmon1/device/pwm2") + + # init Temperature + self.new_i2c_devices( + [ + # ASIC Coretemp and Front MAC + ('lm86', 0x4C, 41), + + # CPU Board + ('tmp75', 0x4F, 0), + + # Near PSU1 + ('tmp75', 0x48, 41), + + # Rear MAC + ('tmp75', 0x4A, 41), + + # Near Port 32 + ('tmp75', 0x4B, 41), + + # Near PSU2 + ('tmp75', 0x4D, 41), + ] + ) + + # init GPIO, ABS Port 0-15 + self.new_i2c_device('pca9535', 0x20, 5) + for i in range(496, 512): + os.system("echo %d > /sys/class/gpio/export" % i) + os.system("echo 1 > /sys/class/gpio/gpio%d/active_low" % i) + + # init GPIO, ABS Port 16-31 + self.new_i2c_device('pca9535', 0x21, 5) + for i in range(480, 496): + os.system("echo %d > /sys/class/gpio/export" % i) + os.system("echo 1 > /sys/class/gpio/gpio%d/active_low" % i) + + # init GPIO, INT Port 0-15 + self.new_i2c_device('pca9535', 0x22, 5) + for i in range(464, 480): + os.system("echo %d > /sys/class/gpio/export" % i) + os.system("echo 1 > /sys/class/gpio/gpio%d/active_low" % i) + + # init GPIO, INT Port 16-31 + self.new_i2c_device('pca9535', 0x23, 5) + for i in range(448, 464): + os.system("echo %d > /sys/class/gpio/export" % i) + os.system("echo 1 > /sys/class/gpio/gpio%d/active_low" % i) + + # init GPIO, SFP + self.new_i2c_device('pca9535', 0x27, 5) + for i in range(432, 448): + os.system("echo %d > /sys/class/gpio/export" % i) + if i == 180 or i == 181 or i == 184 or \ + i == 185 or i == 186 or i == 187: + os.system("echo out > /sys/class/gpio/gpio%d/direction" % i) + else: + os.system("echo 1 > /sys/class/gpio/gpio%d/active_low" % i) + + # init GPIO, LP Mode Port 0-15 + self.new_i2c_device('pca9535', 0x20, 6) + for i in range(416, 432): + os.system("echo %d > /sys/class/gpio/export" % i) + os.system("echo out > /sys/class/gpio/gpio%d/direction" % i) + + # init GPIO, LP Mode Port 16-31 + self.new_i2c_device('pca9535', 0x21, 6) + for i in range(400, 416): + os.system("echo %d > /sys/class/gpio/export" % i) + os.system("echo out > /sys/class/gpio/gpio%d/direction" % i) + + # init GPIO, RST Port 0-15 + self.new_i2c_device('pca9535', 0x22, 6) + for i in range(384, 400): + os.system("echo %d > /sys/class/gpio/export" % i) + os.system("echo out > /sys/class/gpio/gpio%d/direction" % i) + os.system("echo 1 > /sys/class/gpio/gpio%d/active_low" % i) + os.system("echo 0 > /sys/class/gpio/gpio%d/value" % i) + + # init GPIO, RST Port 16-31 + self.new_i2c_device('pca9535', 0x23, 6) + for i in range(368, 384): + os.system("echo %d > /sys/class/gpio/export" % i) + os.system("echo out > /sys/class/gpio/gpio%d/direction" % i) + os.system("echo 1 > /sys/class/gpio/gpio%d/active_low" % i) + os.system("echo 0 > /sys/class/gpio/gpio%d/value" % i) + + # init QSFP EEPROM + for port in range(1, 33): + self.new_i2c_device('sff8436', 0x50, port + 8) + + # init SFP(0/1) EEPROM + self.new_i2c_device('sff8436', 0x50, 45) + self.new_i2c_device('sff8436', 0x50, 46) + + # init PSU(0/1) EEPROM devices + self.new_i2c_device('eeprom', 0x50, 57) + self.new_i2c_device('eeprom', 0x50, 58) + + # init SYS LED + os.system("i2cset -y -r 50 0x75 2 0x01") + os.system("i2cset -y -r 50 0x75 4 0x00") + os.system("i2cset -y -r 50 0x75 5 0x00") + os.system("i2cset -y -r 50 0x75 6 0x00") + os.system("i2cset -y -r 50 0x75 7 0x00") + + return True + + diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/.gitignore b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/.gitignore new file mode 100755 index 00000000..9f7b1342 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/.gitignore @@ -0,0 +1 @@ +onlpdump.mk diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/Makefile b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/Makefile new file mode 100755 index 00000000..003238cf --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk \ No newline at end of file diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/modules/Makefile b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/modules/Makefile new file mode 100755 index 00000000..003238cf --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/modules/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk \ No newline at end of file diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/modules/PKG.yml b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/modules/PKG.yml new file mode 100755 index 00000000..a9d3a2df --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/modules/PKG.yml @@ -0,0 +1 @@ +!include $ONL_TEMPLATES/platform-modules.yml ARCH=amd64 VENDOR=ingrasys BASENAME=x86-64-ingrasys-s9280-64x KERNELS="onl-kernel-3.16-lts-x86-64-all:amd64" diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/modules/builds/.gitignore b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/modules/builds/.gitignore new file mode 100644 index 00000000..a65b4177 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/modules/builds/.gitignore @@ -0,0 +1 @@ +lib diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/modules/builds/Makefile b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/modules/builds/Makefile new file mode 100644 index 00000000..00649dbb --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/modules/builds/Makefile @@ -0,0 +1,6 @@ +KERNELS := onl-kernel-3.16-lts-x86-64-all:amd64 +KMODULES := $(wildcard *.c) +VENDOR := ingrasys +BASENAME := x86-64-ingrasys-s9280-64x +ARCH := x86_64 +include $(ONL)/make/kmodule.mk diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/modules/builds/ingrasys_s9280_64x_i2c_cpld.c b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/modules/builds/ingrasys_s9280_64x_i2c_cpld.c new file mode 100644 index 00000000..588e9473 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/modules/builds/ingrasys_s9280_64x_i2c_cpld.c @@ -0,0 +1,1927 @@ +/* + * A i2c cpld driver for the ingrasys_s9280_64x + * + * Copyright (C) 2017 Ingrasys Technology Corporation. + * Leo Lin + * + * Based on ad7414.c + * Copyright 2006 Stefan Roese , DENX Software Engineering + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +/* header file for i2c cpld driver of ingrasys_s9280_64x + * + * Copyright (C) 2017 Ingrasys Technology Corporation. + * Leo Lin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef INGRASYS_S9280_64X_I2C_CPLD_H +#define INGRASYS_S9280_64X_I2C_CPLD_H + +/* CPLD device index value */ +enum cpld_id { + cpld1, + cpld2, + cpld3, + cpld4, + cpld5 +}; + +enum LED_BLINK { + BLINK, + NOBLINK, +}; + +enum LED_YELLOW { + YELLOW_OFF, + YELLOW_ON, +}; + +enum LED_GREEN { + GREEN_OFF, + GREEN_ON, +}; + +/* port number on CPLD */ +#define CPLD_1_PORT_NUM 12 +#define CPLD_2_PORT_NUM 13 + +/* QSFP port number */ +#define QSFP_MAX_PORT_NUM 64 +#define QSFP_MIN_PORT_NUM 1 + +/* SFP+ port number */ +#define SFP_MAX_PORT_NUM 2 +#define SFP_MIN_PORT_NUM 1 + + +/* CPLD registers */ +#define CPLD_BOARD_TYPE_REG 0x0 +#define CPLD_EXT_BOARD_TYPE_REG 0x7 +#define CPLD_VERSION_REG 0x1 +#define CPLD_ID_REG 0x2 +#define CPLD_QSFP_PORT_STATUS_BASE_REG 0x20 +#define CPLD_QSFP_PORT_CONFIG_BASE_REG 0x30 +#define CPLD_QSFP_PORT_INTERRUPT_REG 0x40 +#define CPLD_SFP_PORT_STATUS_REG 0x2F +#define CPLD_SFP_PORT_CONFIG_REG 0x3F +#define CPLD_QSFP_PORT_INTERRUPT_REG 0x40 +#define CPLD_10GMUX_CONFIG_REG 0x41 +#define CPLD_BMC_STATUS_REG 0x42 +#define CPLD_BMC_WATCHDOG_REG 0x43 +#define CPLD_USB_STATUS_REG 0x44 +#define CPLD_RESET_CONTROL_REG 0x4A +#define CPLD_SFP_LED_REG 0x80 +#define CPLD_SFP_LED_BLINK_REG 0x90 +#define CPLD_QSFP_LED_BASE_REG 0x80 +#define CPLD_QSFP_LED_BLINK_BASE_REG 0x90 +#define CPLD_RTMR_RESET_REG 0x4B + +/* bit definition for register value */ +enum CPLD_QSFP_PORT_STATUS_BITS { + CPLD_QSFP_PORT_STATUS_INT_BIT, + CPLD_QSFP_PORT_STATUS_ABS_BIT, +}; +enum CPLD_QSFP_PORT_CONFIG_BITS { + CPLD_QSFP_PORT_CONFIG_RESET_BIT, + CPLD_QSFP_PORT_CONFIG_RESERVE_BIT, + CPLD_QSFP_PORT_CONFIG_LPMODE_BIT, +}; +enum CPLD_SFP_PORT_STATUS_BITS { + CPLD_SFP0_PORT_STATUS_PRESENT_BIT, + CPLD_SFP0_PORT_STATUS_TXFAULT_BIT, + CPLD_SFP0_PORT_STATUS_RXLOS_BIT, + CPLD_SFP_PORT_STATUS_DUMMY, + CPLD_SFP1_PORT_STATUS_PRESENT_BIT, + CPLD_SFP1_PORT_STATUS_TXFAULT_BIT, + CPLD_SFP1_PORT_STATUS_RXLOS_BIT, +}; +enum CPLD_SFP_PORT_CONFIG_BITS { + CPLD_SFP0_PORT_CONFIG_TXDIS_BIT, + CPLD_SFP0_PORT_CONFIG_RS_BIT, + CPLD_SFP0_PORT_CONFIG_TS_BIT, + CPLD_SFP_PORT_CONFIG_DUMMY, + CPLD_SFP1_PORT_CONFIG_TXDIS_BIT, + CPLD_SFP1_PORT_CONFIG_RS_BIT, + CPLD_SFP1_PORT_CONFIG_TS_BIT, + +}; +enum CPLD_10GMUX_CONFIG_BITS { + CPLD_10GMUX_CONFIG_ENSMB_BIT, + CPLD_10GMUX_CONFIG_ENINPUT_BIT, + CPLD_10GMUX_CONFIG_SEL1_BIT, + CPLD_10GMUX_CONFIG_SEL0_BIT, +}; +enum CPLD_BMC_WATCHDOG_BITS { + CPLD_10GMUX_CONFIG_ENTIMER_BIT, + CPLD_10GMUX_CONFIG_TIMEOUT_BIT, +}; +enum CPLD_RESET_CONTROL_BITS { + CPLD_RESET_CONTROL_SWRST_BIT, + CPLD_RESET_CONTROL_CP2104RST_BIT, + CPLD_RESET_CONTROL_82P33814RST_BIT, + CPLD_RESET_CONTROL_BMCRST_BIT, +}; +enum CPLD_SFP_LED_BITS { + CPLD_SFP_LED_SFP0_GREEN_BIT, + CPLD_SFP_LED_SFP0_YELLOW_BIT, + CPLD_SFP_LED_SFP1_GREEN_BIT, + CPLD_SFP_LED_SFP1_YELLOW_BIT, +}; +enum CPLD_SFP_LED_BLINK_BITS { + CPLD_SFP_LED_BLINK_SFP0_BIT, + CPLD_SFP_LED_BLINK_SFP1_BIT, +}; +enum CPLD_QSFP_LED_BITS { + CPLD_QSFP_LED_CHAN_0_GREEN_BIT, + CPLD_QSFP_LED_CHAN_0_YELLOW_BIT, + CPLD_QSFP_LED_CHAN_1_GREEN_BIT, + CPLD_QSFP_LED_CHAN_1_YELLOW_BIT, + CPLD_QSFP_LED_CHAN_2_GREEN_BIT, + CPLD_QSFP_LED_CHAN_2_YELLOW_BIT, + CPLD_QSFP_LED_CHAN_3_GREEN_BIT, + CPLD_QSFP_LED_CHAN_3_YELLOW_BIT, + +}; +enum CPLD_QSFP_LED_BLINK_BITS { + CPLD_QSFP_LED_BLINK_X_CHAN0_BIT, + CPLD_QSFP_LED_BLINK_X_CHAN1_BIT, + CPLD_QSFP_LED_BLINK_X_CHAN2_BIT, + CPLD_QSFP_LED_BLINK_X_CHAN3_BIT, + CPLD_QSFP_LED_BLINK_XPLUS_CHAN0_BIT, + CPLD_QSFP_LED_BLINK_XPLUS_CHAN1_BIT, + CPLD_QSFP_LED_BLINK_XPLUS_CHAN2_BIT, + CPLD_QSFP_LED_BLINK_XPLUS_CHAN3_BIT, +}; + +/* bit field structure for register value */ +struct cpld_reg_board_type_t { + u8 build_rev:2; + u8 hw_rev:2; + u8 board_id:4; +}; + +struct cpld_reg_version_t { + u8 revision:6; + u8 release:1; + u8 reserve:1; +}; + +struct cpld_reg_id_t { + u8 id:3; + u8 release:5; +}; + +/* common manipulation */ +#define INVALID(i, min, max) ((i < min) || (i > max) ? 1u : 0u) +#define READ_BIT(val, bit) ((0u == (val & (1<bf_name) +#define READ_BF_1(bf_struct, val, bf_name, bf_value) \ + bf_struct bf; \ + bf.data = val; \ + bf_value = bf.bf_name +#define BOARD_TYPE_BUILD_REV_GET(val, res) \ + READ_BF(cpld_reg_board_type_t, val, build_rev, res) +#define BOARD_TYPE_HW_REV_GET(val, res) \ + READ_BF(cpld_reg_board_type_t, val, hw_rev, res) +#define BOARD_TYPE_BOARD_ID_GET(val, res) \ + READ_BF(cpld_reg_board_type_t, val, board_id, res) +#define CPLD_VERSION_REV_GET(val, res) \ + READ_BF(cpld_reg_version_t, val, revision, res) +#define CPLD_VERSION_REL_GET(val, res) \ + READ_BF(cpld_reg_version_t, val, release, res) +#define CPLD_ID_ID_GET(val, res) \ + READ_BF(cpld_reg_id_t, val, id, res) +#define CPLD_ID_REL_GET(val, res) \ + READ_BF(cpld_reg_id_t, val, release, res) +/* SFP/QSFP port led registers manipulation */ +#define SFP_LED_TO_CPLD_IDX(sfp_port) cpld1 +#define SFP_LED_REG(sfp_port) CPLD_SFP_LED_REG +#define SFP_LED_BLINK_REG(sfp_port) CPLD_SFP_LED_BLINK_REG +#define QSFP_LED_TO_CPLD_IDX(qsfp_port) \ + ((qsfp_port - 1) / 16 + 2) +#define QSFP_LED_REG(qsfp_port) \ + ((qsfp_port - 1) % 16 + CPLD_QSFP_LED_BASE_REG) +#define QSFP_LED_BLINK_REG(qsfp_port) \ + (((qsfp_port - 1) % 16) / 2 + CPLD_QSFP_LED_BLINK_BASE_REG) +/* QSFP/SFP port status registers manipulation */ +#define QSFP_TO_CPLD_IDX(qsfp_port, cpld_index, cpld_port) \ +{ \ + if (QSFP_MIN_PORT_NUM <= qsfp_port && qsfp_port <= CPLD_1_PORT_NUM) { \ + cpld_index = cpld1; \ + cpld_port = qsfp_port - 1; \ + } else if (CPLD_1_PORT_NUM < qsfp_port \ + && qsfp_port <= QSFP_MAX_PORT_NUM) { \ + cpld_index = cpld2 + (qsfp_port - 1 - CPLD_1_PORT_NUM) \ + / CPLD_2_PORT_NUM; \ + cpld_port = (qsfp_port - 1 - CPLD_1_PORT_NUM) % \ + CPLD_2_PORT_NUM; \ + } else { \ + cpld_index = 0; \ + cpld_port = 0; \ + } \ +} +#define QSFP_PORT_STATUS_REG(cpld_port) \ + (CPLD_QSFP_PORT_STATUS_BASE_REG + cpld_port) +#define QSFP_PORT_CONFIG_REG(cpld_port) \ + (CPLD_QSFP_PORT_CONFIG_BASE_REG + cpld_port) +#define QSFP_PORT_INT_BIT_GET(port_status_value) \ + READ_BIT(port_status_value, CPLD_QSFP_PORT_STATUS_INT_BIT) +#define QSFP_PORT_ABS_BIT_GET(port_status_value) \ + READ_BIT(port_status_value, CPLD_QSFP_PORT_STATUS_ABS_BIT) +#define QSFP_PORT_RESET_BIT_GET(port_config_value) \ + READ_BIT(port_config_value, CPLD_QSFP_PORT_CONFIG_RESET_BIT) +#define QSFP_PORT_LPMODE_BIT_GET(port_config_value) \ + READ_BIT(port_config_value, CPLD_QSFP_PORT_CONFIG_LPMODE_BIT) +#define QSFP_PORT_RESET_BIT_SET(port_config_value) \ + SET_BIT(port_config_value, CPLD_QSFP_PORT_CONFIG_RESET_BIT) +#define QSFP_PORT_RESET_BIT_CLEAR(port_config_value) \ + CLEAR_BIT(port_config_value, CPLD_QSFP_PORT_CONFIG_RESET_BIT) +#define QSFP_PORT_LPMODE_BIT_SET(port_config_value) \ + SET_BIT(port_config_value, CPLD_QSFP_PORT_CONFIG_LPMODE_BIT) +#define QSFP_PORT_LPMODE_BIT_CLEAR(port_config_value) \ + CLEAR_BIT(port_config_value, CPLD_QSFP_PORT_CONFIG_LPMODE_BIT) +#define SFP_PORT_PRESENT_BIT_GET(sfp_port, port_status_value) \ + if (sfp_port == SFP_MIN_PORT_NUM) { \ + READ_BIT(port_status_value, CPLD_SFP0_PORT_STATUS_PRESENT_BIT); \ + } else { \ + READ_BIT(port_status_value, CPLD_SFP1_PORT_STATUS_PRESENT_BIT); \ + } +#define SFP_PORT_TXFAULT_BIT_GET(sfp_port, port_status_value) \ + if (sfp_port == SFP_MIN_PORT_NUM) { \ + READ_BIT(port_status_value, CPLD_SFP0_PORT_STATUS_TXFAULT_BIT); \ + } else { \ + READ_BIT(port_status_value, CPLD_SFP1_PORT_STATUS_TXFAULT_BIT); \ + } +#define SFP_PORT_RXLOS_BIT_GET(sfp_port, port_status_value) \ + if (sfp_port == SFP_MIN_PORT_NUM) { \ + READ_BIT(port_status_value, CPLD_SFP0_PORT_STATUS_RXLOS_BIT); \ + } else { \ + READ_BIT(port_status_value, CPLD_SFP1_PORT_STATUS_RXLOS_BIT); \ + } +#define SFP_PORT_TXDIS_BIT_GET(sfp_port, port_config_value) \ + if (sfp_port == SFP_MIN_PORT_NUM) { \ + READ_BIT(port_config_value, CPLD_SFP0_PORT_CONFIG_TXDIS_BIT); \ + } else { \ + READ_BIT(port_config_value, CPLD_SFP1_PORT_STATUS_RXLOS_BIT); \ + } +#define SFP_PORT_RS_BIT_GET(sfp_port, port_config_value) \ + if (sfp_port == SFP_MIN_PORT_NUM) { \ + READ_BIT(port_config_value, CPLD_SFP0_PORT_CONFIG_RS_BIT); \ + } else { \ + READ_BIT(port_config_value, CPLD_SFP1_PORT_CONFIG_RS_BIT); \ + } +#define SFP_PORT_TS_BIT_GET(sfp_port, port_config_value) \ + if (sfp_port == SFP_MIN_PORT_NUM) { \ + READ_BIT(port_config_value, CPLD_SFP0_PORT_CONFIG_TS_BIT); \ + } else { \ + READ_BIT(port_config_value, CPLD_SFP0_PORT_CONFIG_TS_BIT); \ + } +#define SFP_PORT_TXDIS_BIT_SET(sfp_port, port_config_value) \ + if (sfp_port == SFP_MIN_PORT_NUM) { \ + SET_BIT(port_config_value, CPLD_SFP0_PORT_CONFIG_TXDIS_BIT); \ + } else { \ + SET_BIT(port_config_value, CPLD_SFP1_PORT_CONFIG_TXDIS_BIT); \ + } +#define SFP_PORT_TXDIS_BIT_CLEAR(sfp_port, port_config_value) \ + if (sfp_port == SFP_MIN_PORT_NUM) { \ + CLEAR_BIT(port_config_value, CPLD_SFP0_PORT_CONFIG_TXDIS_BIT); \ + } else { \ + CLEAR_BIT(port_config_value, CPLD_SFP1_PORT_CONFIG_TXDIS_BIT); \ + } +#define SFP_PORT_RS_BIT_SET(sfp_port, port_config_value) \ + if (sfp_port == SFP_MIN_PORT_NUM) { \ + SET_BIT(port_config_value, CPLD_SFP0_PORT_CONFIG_RS_BIT); \ + } else { \ + SET_BIT(port_config_value, CPLD_SFP1_PORT_CONFIG_RS_BIT); \ + } +#define SFP_PORT_RS_BIT_CLEAR(sfp_port, port_config_value) \ + if (sfp_port == SFP_MIN_PORT_NUM) { \ + CLEAR_BIT(port_config_value, CPLD_SFP0_PORT_CONFIG_RS_BIT); \ + } else { \ + CLEAR_BIT(port_config_value, CPLD_SFP1_PORT_CONFIG_RS_BIT); \ + } +#define SFP_PORT_TS_BIT_SET(sfp_port, port_config_value) \ + if (sfp_port == SFP_MIN_PORT_NUM) { \ + SET_BIT(port_config_value, CPLD_SFP0_PORT_CONFIG_TS_BIT); \ + } else { \ + SET_BIT(port_config_value, CPLD_SFP1_PORT_CONFIG_TS_BIT); \ + } +#define SFP_PORT_TS_BIT_CLEAR(sfp_port, port_config_value) \ + if (sfp_port == SFP_MIN_PORT_NUM) { \ + CLEAR_BIT(port_config_value, CPLD_SFP0_PORT_CONFIG_TS_BIT); \ + } else { \ + CLEAR_BIT(port_config_value, CPLD_SFP1_PORT_CONFIG_TS_BIT); \ + } + +/* CPLD access functions */ +extern int ingrasys_i2c_cpld_get_qsfp_port_status_val(u8 port_num); +extern int ingrasys_i2c_cpld_get_qsfp_port_config_val(u8 port_num); +extern int ingrasys_i2c_cpld_set_qsfp_port_config_val(u8 port_num, u8 reg_val); +extern int ingrasys_i2c_cpld_get_sfp_port_status_val(void); +extern int ingrasys_i2c_cpld_get_sfp_port_config_val(void); +extern int ingrasys_i2c_cpld_set_sfp_port_config_val(u8 reg_val); +extern u8 fp_port_to_phy_port(u8 fp_port); +#endif + +#ifdef DEBUG +#define DEBUG_PRINT(fmt, args...) \ + printk(KERN_INFO "%s:%s[%d]: " fmt "\r\n", \ + __FILE__, __func__, __LINE__, ##args) +#else +#define DEBUG_PRINT(fmt, args...) +#endif + +#define I2C_READ_BYTE_DATA(ret, lock, i2c_client, reg) \ +{ \ + mutex_lock(lock); \ + ret = i2c_smbus_read_byte_data(i2c_client, reg); \ + mutex_unlock(lock); \ +} +#define I2C_WRITE_BYTE_DATA(ret, lock, i2c_client, reg, val) \ +{ \ + mutex_lock(lock); \ + ret = i2c_smbus_write_byte_data(i2c_client, reg, val); \ + mutex_unlock(lock); \ +} + +/* CPLD sysfs attributes index */ +enum s9280_64x_cpld_sysfs_attributes { + CPLD_ACCESS_REG, + CPLD_REGISTER_VAL, + CPLD_PORT_START, + CPLD_PORTS, + CPLD_VERSION, + CPLD_ID, + CPLD_BOARD_TYPE, + CPLD_EXT_BOARD_TYPE, + CPLD_QSFP_PORT_STATUS_1, + CPLD_QSFP_PORT_STATUS_2, + CPLD_QSFP_PORT_STATUS_3, + CPLD_QSFP_PORT_STATUS_4, + CPLD_QSFP_PORT_STATUS_5, + CPLD_QSFP_PORT_STATUS_6, + CPLD_QSFP_PORT_STATUS_7, + CPLD_QSFP_PORT_STATUS_8, + CPLD_QSFP_PORT_STATUS_9, + CPLD_QSFP_PORT_STATUS_10, + CPLD_QSFP_PORT_STATUS_11, + CPLD_QSFP_PORT_STATUS_12, + CPLD_QSFP_PORT_STATUS_13, + CPLD_QSFP_PORT_CONFIG_1, + CPLD_QSFP_PORT_CONFIG_2, + CPLD_QSFP_PORT_CONFIG_3, + CPLD_QSFP_PORT_CONFIG_4, + CPLD_QSFP_PORT_CONFIG_5, + CPLD_QSFP_PORT_CONFIG_6, + CPLD_QSFP_PORT_CONFIG_7, + CPLD_QSFP_PORT_CONFIG_8, + CPLD_QSFP_PORT_CONFIG_9, + CPLD_QSFP_PORT_CONFIG_10, + CPLD_QSFP_PORT_CONFIG_11, + CPLD_QSFP_PORT_CONFIG_12, + CPLD_QSFP_PORT_CONFIG_13, + CPLD_QSFP_PORT_INTERRUPT, + CPLD_SFP_PORT_STATUS, + CPLD_SFP_PORT_CONFIG, + CPLD_10GMUX_CONFIG, + CPLD_BMC_STATUS, + CPLD_BMC_WATCHDOG, + CPLD_USB_STATUS, + CPLD_RESET_CONTROL, + CPLD_SFP_LED, + CPLD_SFP_LED_BLINK, + CPLD_QSFP_LED_1, + CPLD_QSFP_LED_2, + CPLD_QSFP_LED_3, + CPLD_QSFP_LED_4, + CPLD_QSFP_LED_5, + CPLD_QSFP_LED_6, + CPLD_QSFP_LED_7, + CPLD_QSFP_LED_8, + CPLD_QSFP_LED_9, + CPLD_QSFP_LED_10, + CPLD_QSFP_LED_11, + CPLD_QSFP_LED_12, + CPLD_QSFP_LED_13, + CPLD_QSFP_LED_14, + CPLD_QSFP_LED_15, + CPLD_QSFP_LED_16, + CPLD_QSFP_LED_BLINK, + CPLD_RTMR_RESET, + +}; + +/* CPLD sysfs attributes hook functions */ +static ssize_t read_access_register(struct device *dev, + struct device_attribute *da, char *buf); +static ssize_t write_access_register(struct device *dev, + struct device_attribute *da, const char *buf, size_t count); +static ssize_t read_register_value(struct device *dev, + struct device_attribute *da, char *buf); +static ssize_t write_register_value(struct device *dev, + struct device_attribute *da, const char *buf, size_t count); +static ssize_t get_qsfp_port_start(struct device *dev, + struct device_attribute *da, char *buf); +static ssize_t get_qsfp_ports(struct device *dev, + struct device_attribute *da, char *buf); +static ssize_t read_cpld_version(struct device *dev, + struct device_attribute *da, char *buf); +static ssize_t read_cpld_id(struct device *dev, + struct device_attribute *da, char *buf); +static ssize_t read_board_type(struct device *dev, + struct device_attribute *da, char *buf); +static ssize_t read_ext_board_type(struct device *dev, + struct device_attribute *da, char *buf); +static ssize_t read_qsfp_port_status(struct device *dev, + struct device_attribute *da, char *buf); +static ssize_t read_qsfp_port_config(struct device *dev, + struct device_attribute *da, char *buf); +static ssize_t write_qsfp_port_config(struct device *dev, + struct device_attribute *da, const char *buf, size_t count); +static ssize_t read_qsfp_port_interrupt(struct device *dev, + struct device_attribute *da, char *buf); +static ssize_t read_sfp_port_status(struct device *dev, + struct device_attribute *da, char *buf); +static ssize_t read_sfp_port_config(struct device *dev, + struct device_attribute *da, char *buf); +static ssize_t write_sfp_port_config(struct device *dev, + struct device_attribute *da, const char *buf, size_t count); +static ssize_t read_10gmux_config(struct device *dev, + struct device_attribute *da, char *buf); +static ssize_t write_10gmux_config(struct device *dev, + struct device_attribute *da, const char *buf, size_t count); +static ssize_t read_bmc_status(struct device *dev, + struct device_attribute *da, char *buf); +static ssize_t read_bmc_watchdog(struct device *dev, + struct device_attribute *da, char *buf); +static ssize_t write_bmc_watchdog(struct device *dev, + struct device_attribute *da, const char *buf, size_t count); +static ssize_t read_usb_status(struct device *dev, + struct device_attribute *da, char *buf); +static ssize_t read_reset_control(struct device *dev, + struct device_attribute *da, char *buf); +static ssize_t write_reset_control(struct device *dev, + struct device_attribute *da, const char *buf, size_t count); +static ssize_t read_sfp_led(struct device *dev, + struct device_attribute *da, char *buf); +static ssize_t write_sfp_led(struct device *dev, + struct device_attribute *da, const char *buf, size_t count); +static ssize_t read_sfp_led_blink(struct device *dev, + struct device_attribute *da, char *buf); +static ssize_t write_sfp_led_blink(struct device *dev, + struct device_attribute *da, const char *buf, size_t count); +static ssize_t read_qsfp_led(struct device *dev, + struct device_attribute *da, char *buf); +static ssize_t write_qsfp_led(struct device *dev, + struct device_attribute *da, const char *buf, size_t count); +static ssize_t read_qsfp_led_blink(struct device *dev, + struct device_attribute *da, char *buf); +static ssize_t write_qsfp_led_blink(struct device *dev, + struct device_attribute *da, const char *buf, size_t count); +static ssize_t read_rtmr_reset(struct device *dev, + struct device_attribute *da, char *buf); +static ssize_t write_rtmr_reset(struct device *dev, + struct device_attribute *da, const char *buf, size_t count); + +static LIST_HEAD(cpld_client_list); /* client list for cpld */ +static struct mutex list_lock; /* mutex for client list */ + +struct cpld_client_node { + struct i2c_client *client; + struct list_head list; +}; + +struct cpld_data { + int index; /* CPLD index */ + struct mutex access_lock; /* mutex for cpld access */ + u8 access_reg; /* register to access */ +}; + +/* CPLD device id and data */ +static const struct i2c_device_id ingrasys_i2c_cpld_id[] = { + { "ingrasys_cpld1", cpld1 }, + { "ingrasys_cpld2", cpld2 }, + { "ingrasys_cpld3", cpld3 }, + { "ingrasys_cpld4", cpld4 }, + { "ingrasys_cpld5", cpld5 }, + {} +}; + +/* Addresses scanned for ingrasys_i2c_cpld */ +static const unsigned short cpld_i2c_addr[] = { 0x33, I2C_CLIENT_END }; + +/* define all support register access of cpld in attribute */ +static SENSOR_DEVICE_ATTR(cpld_access_register, S_IWUSR | S_IRUGO, + read_access_register, write_access_register, CPLD_ACCESS_REG); +static SENSOR_DEVICE_ATTR(cpld_register_value, S_IWUSR | S_IRUGO, + read_register_value, write_register_value, CPLD_REGISTER_VAL); +static SENSOR_DEVICE_ATTR(cpld_qsfp_port_start, S_IRUGO, + get_qsfp_port_start, NULL, CPLD_PORT_START); +static SENSOR_DEVICE_ATTR(cpld_qsfp_ports, S_IRUGO, + get_qsfp_ports, NULL, CPLD_PORTS); +static SENSOR_DEVICE_ATTR(cpld_version, S_IRUGO, + read_cpld_version, NULL, CPLD_VERSION); +static SENSOR_DEVICE_ATTR(cpld_id, S_IRUGO, read_cpld_id, NULL, CPLD_ID); +static SENSOR_DEVICE_ATTR(cpld_board_type, S_IRUGO, + read_board_type, NULL, CPLD_BOARD_TYPE); +static SENSOR_DEVICE_ATTR(cpld_ext_board_type, S_IRUGO, + read_ext_board_type, NULL, CPLD_EXT_BOARD_TYPE); +static SENSOR_DEVICE_ATTR(cpld_qsfp_port_status_1, S_IRUGO, + read_qsfp_port_status, NULL, CPLD_QSFP_PORT_STATUS_1); +static SENSOR_DEVICE_ATTR(cpld_qsfp_port_status_2, S_IRUGO, + read_qsfp_port_status, NULL, CPLD_QSFP_PORT_STATUS_2); +static SENSOR_DEVICE_ATTR(cpld_qsfp_port_status_3, S_IRUGO, + read_qsfp_port_status, NULL, CPLD_QSFP_PORT_STATUS_3); +static SENSOR_DEVICE_ATTR(cpld_qsfp_port_status_4, S_IRUGO, + read_qsfp_port_status, NULL, CPLD_QSFP_PORT_STATUS_4); +static SENSOR_DEVICE_ATTR(cpld_qsfp_port_status_5, S_IRUGO, + read_qsfp_port_status, NULL, CPLD_QSFP_PORT_STATUS_5); +static SENSOR_DEVICE_ATTR(cpld_qsfp_port_status_6, S_IRUGO, + read_qsfp_port_status, NULL, CPLD_QSFP_PORT_STATUS_6); +static SENSOR_DEVICE_ATTR(cpld_qsfp_port_status_7, S_IRUGO, + read_qsfp_port_status, NULL, CPLD_QSFP_PORT_STATUS_7); +static SENSOR_DEVICE_ATTR(cpld_qsfp_port_status_8, S_IRUGO, + read_qsfp_port_status, NULL, CPLD_QSFP_PORT_STATUS_8); +static SENSOR_DEVICE_ATTR(cpld_qsfp_port_status_9, S_IRUGO, + read_qsfp_port_status, NULL, CPLD_QSFP_PORT_STATUS_9); +static SENSOR_DEVICE_ATTR(cpld_qsfp_port_status_10, S_IRUGO, + read_qsfp_port_status, NULL, CPLD_QSFP_PORT_STATUS_10); +static SENSOR_DEVICE_ATTR(cpld_qsfp_port_status_11, S_IRUGO, + read_qsfp_port_status, NULL, CPLD_QSFP_PORT_STATUS_11); +static SENSOR_DEVICE_ATTR(cpld_qsfp_port_status_12, S_IRUGO, + read_qsfp_port_status, NULL, CPLD_QSFP_PORT_STATUS_12); +static SENSOR_DEVICE_ATTR(cpld_qsfp_port_status_13, S_IRUGO, + read_qsfp_port_status, NULL, CPLD_QSFP_PORT_STATUS_13); +static SENSOR_DEVICE_ATTR(cpld_qsfp_port_config_1, S_IWUSR | S_IRUGO, + read_qsfp_port_config, write_qsfp_port_config, + CPLD_QSFP_PORT_CONFIG_1); +static SENSOR_DEVICE_ATTR(cpld_qsfp_port_config_2, S_IWUSR | S_IRUGO, + read_qsfp_port_config, write_qsfp_port_config, + CPLD_QSFP_PORT_CONFIG_2); +static SENSOR_DEVICE_ATTR(cpld_qsfp_port_config_3, S_IWUSR | S_IRUGO, + read_qsfp_port_config, write_qsfp_port_config, + CPLD_QSFP_PORT_CONFIG_3); +static SENSOR_DEVICE_ATTR(cpld_qsfp_port_config_4, S_IWUSR | S_IRUGO, + read_qsfp_port_config, write_qsfp_port_config, + CPLD_QSFP_PORT_CONFIG_4); +static SENSOR_DEVICE_ATTR(cpld_qsfp_port_config_5, S_IWUSR | S_IRUGO, + read_qsfp_port_config, write_qsfp_port_config, + CPLD_QSFP_PORT_CONFIG_5); +static SENSOR_DEVICE_ATTR(cpld_qsfp_port_config_6, S_IWUSR | S_IRUGO, + read_qsfp_port_config, write_qsfp_port_config, + CPLD_QSFP_PORT_CONFIG_6); +static SENSOR_DEVICE_ATTR(cpld_qsfp_port_config_7, S_IWUSR | S_IRUGO, + read_qsfp_port_config, write_qsfp_port_config, + CPLD_QSFP_PORT_CONFIG_7); +static SENSOR_DEVICE_ATTR(cpld_qsfp_port_config_8, S_IWUSR | S_IRUGO, + read_qsfp_port_config, write_qsfp_port_config, + CPLD_QSFP_PORT_CONFIG_8); +static SENSOR_DEVICE_ATTR(cpld_qsfp_port_config_9, S_IWUSR | S_IRUGO, + read_qsfp_port_config, write_qsfp_port_config, + CPLD_QSFP_PORT_CONFIG_9); +static SENSOR_DEVICE_ATTR(cpld_qsfp_port_config_10, S_IWUSR | S_IRUGO, + read_qsfp_port_config, write_qsfp_port_config, + CPLD_QSFP_PORT_CONFIG_10); +static SENSOR_DEVICE_ATTR(cpld_qsfp_port_config_11, S_IWUSR | S_IRUGO, + read_qsfp_port_config, write_qsfp_port_config, + CPLD_QSFP_PORT_CONFIG_11); +static SENSOR_DEVICE_ATTR(cpld_qsfp_port_config_12, S_IWUSR | S_IRUGO, + read_qsfp_port_config, write_qsfp_port_config, + CPLD_QSFP_PORT_CONFIG_12); +static SENSOR_DEVICE_ATTR(cpld_qsfp_port_config_13, S_IWUSR | S_IRUGO, + read_qsfp_port_config, write_qsfp_port_config, + CPLD_QSFP_PORT_CONFIG_13); +static SENSOR_DEVICE_ATTR(cpld_qsfp_port_interrupt, S_IRUGO, + read_qsfp_port_interrupt, NULL, CPLD_QSFP_PORT_INTERRUPT); +static SENSOR_DEVICE_ATTR(cpld_sfp_port_status, S_IRUGO, + read_sfp_port_status, NULL, CPLD_SFP_PORT_STATUS); +static SENSOR_DEVICE_ATTR(cpld_sfp_port_config, S_IWUSR | S_IRUGO, + read_sfp_port_config, write_sfp_port_config, CPLD_SFP_PORT_CONFIG); +static SENSOR_DEVICE_ATTR(cpld_10gmux_config, S_IWUSR | S_IRUGO, + read_10gmux_config, write_10gmux_config, + CPLD_10GMUX_CONFIG); +static SENSOR_DEVICE_ATTR(cpld_bmc_status, S_IRUGO, + read_bmc_status, NULL, CPLD_BMC_STATUS); +static SENSOR_DEVICE_ATTR(cpld_bmc_watchdog, S_IWUSR | S_IRUGO, + read_bmc_watchdog, write_bmc_watchdog, + CPLD_BMC_WATCHDOG); +static SENSOR_DEVICE_ATTR(cpld_usb_status, S_IRUGO, + read_usb_status, NULL, CPLD_USB_STATUS); +static SENSOR_DEVICE_ATTR(cpld_reset_control, S_IWUSR | S_IRUGO, + read_reset_control, write_reset_control, + CPLD_BMC_WATCHDOG); +static SENSOR_DEVICE_ATTR(cpld_sfp_led, S_IWUSR | S_IRUGO, + read_sfp_led, write_sfp_led, CPLD_SFP_LED); +static SENSOR_DEVICE_ATTR(cpld_sfp_led_blink, S_IWUSR | S_IRUGO, + read_sfp_led_blink, write_sfp_led_blink, CPLD_SFP_LED_BLINK); +static SENSOR_DEVICE_ATTR(cpld_qsfp_led_1, S_IWUSR | S_IRUGO, + read_qsfp_led, write_qsfp_led, CPLD_QSFP_LED_1); +static SENSOR_DEVICE_ATTR(cpld_qsfp_led_2, S_IWUSR | S_IRUGO, + read_qsfp_led, write_qsfp_led, CPLD_QSFP_LED_2); +static SENSOR_DEVICE_ATTR(cpld_qsfp_led_3, S_IWUSR | S_IRUGO, + read_qsfp_led, write_qsfp_led, CPLD_QSFP_LED_3); +static SENSOR_DEVICE_ATTR(cpld_qsfp_led_4, S_IWUSR | S_IRUGO, + read_qsfp_led, write_qsfp_led, CPLD_QSFP_LED_4); +static SENSOR_DEVICE_ATTR(cpld_qsfp_led_5, S_IWUSR | S_IRUGO, + read_qsfp_led, write_qsfp_led, CPLD_QSFP_LED_5); +static SENSOR_DEVICE_ATTR(cpld_qsfp_led_6, S_IWUSR | S_IRUGO, + read_qsfp_led, write_qsfp_led, CPLD_QSFP_LED_6); +static SENSOR_DEVICE_ATTR(cpld_qsfp_led_7, S_IWUSR | S_IRUGO, + read_qsfp_led, write_qsfp_led, CPLD_QSFP_LED_7); +static SENSOR_DEVICE_ATTR(cpld_qsfp_led_8, S_IWUSR | S_IRUGO, + read_qsfp_led, write_qsfp_led, CPLD_QSFP_LED_8); +static SENSOR_DEVICE_ATTR(cpld_qsfp_led_9, S_IWUSR | S_IRUGO, + read_qsfp_led, write_qsfp_led, CPLD_QSFP_LED_9); +static SENSOR_DEVICE_ATTR(cpld_qsfp_led_10, S_IWUSR | S_IRUGO, + read_qsfp_led, write_qsfp_led, CPLD_QSFP_LED_10); +static SENSOR_DEVICE_ATTR(cpld_qsfp_led_11, S_IWUSR | S_IRUGO, + read_qsfp_led, write_qsfp_led, CPLD_QSFP_LED_11); +static SENSOR_DEVICE_ATTR(cpld_qsfp_led_12, S_IWUSR | S_IRUGO, + read_qsfp_led, write_qsfp_led, CPLD_QSFP_LED_12); +static SENSOR_DEVICE_ATTR(cpld_qsfp_led_13, S_IWUSR | S_IRUGO, + read_qsfp_led, write_qsfp_led, CPLD_QSFP_LED_13); +static SENSOR_DEVICE_ATTR(cpld_qsfp_led_14, S_IWUSR | S_IRUGO, + read_qsfp_led, write_qsfp_led, CPLD_QSFP_LED_14); +static SENSOR_DEVICE_ATTR(cpld_qsfp_led_15, S_IWUSR | S_IRUGO, + read_qsfp_led, write_qsfp_led, CPLD_QSFP_LED_15); +static SENSOR_DEVICE_ATTR(cpld_qsfp_led_16, S_IWUSR | S_IRUGO, + read_qsfp_led, write_qsfp_led, CPLD_QSFP_LED_16); +static SENSOR_DEVICE_ATTR(cpld_qsfp_led_blink, S_IWUSR | S_IRUGO, + read_qsfp_led_blink, write_qsfp_led_blink, CPLD_QSFP_LED_BLINK); +static SENSOR_DEVICE_ATTR(cpld_rtmr_reset, S_IWUSR | S_IRUGO, + read_rtmr_reset, write_rtmr_reset, CPLD_RTMR_RESET); + +/* define support attributes of cpldx , total 5 */ +/* cpld 1 */ +static struct attribute *s9280_64x_cpld1_attributes[] = { + &sensor_dev_attr_cpld_access_register.dev_attr.attr, + &sensor_dev_attr_cpld_register_value.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_start.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_ports.dev_attr.attr, + &sensor_dev_attr_cpld_version.dev_attr.attr, + &sensor_dev_attr_cpld_id.dev_attr.attr, + &sensor_dev_attr_cpld_board_type.dev_attr.attr, + &sensor_dev_attr_cpld_ext_board_type.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_status_1.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_status_2.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_status_3.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_status_4.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_status_5.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_status_6.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_status_7.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_status_8.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_status_9.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_status_10.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_status_11.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_status_12.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_config_1.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_config_2.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_config_3.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_config_4.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_config_5.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_config_6.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_config_7.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_config_8.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_config_9.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_config_10.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_config_11.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_config_12.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_interrupt.dev_attr.attr, + &sensor_dev_attr_cpld_sfp_port_status.dev_attr.attr, + &sensor_dev_attr_cpld_sfp_port_config.dev_attr.attr, + &sensor_dev_attr_cpld_10gmux_config.dev_attr.attr, + &sensor_dev_attr_cpld_bmc_status.dev_attr.attr, + &sensor_dev_attr_cpld_bmc_watchdog.dev_attr.attr, + &sensor_dev_attr_cpld_usb_status.dev_attr.attr, + &sensor_dev_attr_cpld_reset_control.dev_attr.attr, + &sensor_dev_attr_cpld_sfp_led.dev_attr.attr, + &sensor_dev_attr_cpld_sfp_led_blink.dev_attr.attr, + &sensor_dev_attr_cpld_rtmr_reset.dev_attr.attr, + NULL +}; + +/* cpld 2 / cpld 3 / cpld 4 / cpld 5 */ +static struct attribute *s9280_64x_cpld2345_attributes[] = { +&sensor_dev_attr_cpld_access_register.dev_attr.attr, + &sensor_dev_attr_cpld_register_value.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_start.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_ports.dev_attr.attr, + &sensor_dev_attr_cpld_version.dev_attr.attr, + &sensor_dev_attr_cpld_id.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_status_1.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_status_2.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_status_3.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_status_4.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_status_5.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_status_6.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_status_7.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_status_8.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_status_9.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_status_10.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_status_11.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_status_12.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_status_13.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_config_1.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_config_2.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_config_3.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_config_4.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_config_5.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_config_6.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_config_7.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_config_8.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_config_9.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_config_10.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_config_11.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_config_12.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_config_13.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_port_interrupt.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_led_1.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_led_2.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_led_3.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_led_4.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_led_5.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_led_6.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_led_7.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_led_8.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_led_9.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_led_10.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_led_11.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_led_12.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_led_13.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_led_14.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_led_15.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_led_16.dev_attr.attr, + &sensor_dev_attr_cpld_qsfp_led_blink.dev_attr.attr, + NULL +}; + +/* cpld 1 attributes group */ +static const struct attribute_group s9280_64x_cpld1_group = { + .attrs = s9280_64x_cpld1_attributes, +}; +/* cpld 2/3/4/5 attributes group */ +static const struct attribute_group s9280_64x_cpld2345_group = { + .attrs = s9280_64x_cpld2345_attributes, +}; + +/* read access register from cpld data */ +static ssize_t read_access_register(struct device *dev, + struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct cpld_data *data = i2c_get_clientdata(client); + u8 reg = data->access_reg; + + return sprintf(buf, "0x%x\n", reg); +} + +/* write access register to cpld data */ +static ssize_t write_access_register(struct device *dev, + struct device_attribute *da, + const char *buf, + size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + struct cpld_data *data = i2c_get_clientdata(client); + u8 reg; + + if (kstrtou8(buf, 0, ®) < 0) + return -EINVAL; + + data->access_reg = reg; + return count; +} + +/* read the value of access register in cpld data */ +static ssize_t read_register_value(struct device *dev, + struct device_attribute *da, + char *buf) +{ + struct i2c_client *client = to_i2c_client(dev); + struct cpld_data *data = i2c_get_clientdata(client); + u8 reg = data->access_reg; + int reg_val; + + I2C_READ_BYTE_DATA(reg_val, &data->access_lock, client, reg); + + if (reg_val < 0) + return -1; + + return sprintf(buf, "0x%x\n", reg_val); +} + +/* wrtie the value to access register in cpld data */ +static ssize_t write_register_value(struct device *dev, + struct device_attribute *da, + const char *buf, + size_t count) +{ + struct i2c_client *client = to_i2c_client(dev); + struct cpld_data *data = i2c_get_clientdata(client); + int ret = -EIO; + u8 reg = data->access_reg; + u8 reg_val; + + if (kstrtou8(buf, 0, ®_val) < 0) + return -EINVAL; + + I2C_WRITE_BYTE_DATA(ret, &data->access_lock, client, reg, reg_val); + + return count; +} + +/* get qsfp port start number of the cpld device */ +/* the start number use to tranlate qsfp port to cpld port */ +/* the cpld port use to access the qsfp port register in cpld */ +static ssize_t get_qsfp_port_start(struct device *dev, + struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + struct cpld_data *data = i2c_get_clientdata(client); + int port_base_num; + + if (attr->index == CPLD_PORT_START) { + if (data->index == cpld1) { + port_base_num = 1; + } else { + port_base_num = CPLD_1_PORT_NUM + + CPLD_2_PORT_NUM*(data->index - 1) + 1; + } + return sprintf(buf, "%d\n", port_base_num); + } + return -1; +} + +/* get total qsfp port which contain register in the cpld device */ +static ssize_t get_qsfp_ports(struct device *dev, + struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + struct cpld_data *data = i2c_get_clientdata(client); + int ports; + + if (attr->index == CPLD_PORTS) { + if (data->index == cpld1) + ports = CPLD_1_PORT_NUM; + else + ports = CPLD_2_PORT_NUM; + return sprintf(buf, "%d\n", ports); + } + return -1; +} + +/* get cpdl version register value */ +static ssize_t read_cpld_version(struct device *dev, + struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + struct cpld_data *data = i2c_get_clientdata(client); + u8 reg; + int reg_val; + + if (attr->index == CPLD_VERSION) { + reg = CPLD_VERSION_REG; + I2C_READ_BYTE_DATA(reg_val, &data->access_lock, client, reg); + if (reg_val < 0) + return -1; + return sprintf(buf, "0x%02x\n", reg_val); + } + return -1; +} + +/* get cpdl id register value */ +static ssize_t read_cpld_id(struct device *dev, + struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + struct cpld_data *data = i2c_get_clientdata(client); + u8 reg; + int reg_val; + + if (attr->index == CPLD_ID) { + reg = CPLD_ID_REG; + I2C_READ_BYTE_DATA(reg_val, &data->access_lock, client, reg); + if (reg_val < 0) + return -1; + return sprintf(buf, "0x%02x\n", reg_val); + } + return -1; +} + +/* get board type register value */ +static ssize_t read_board_type(struct device *dev, + struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + struct cpld_data *data = i2c_get_clientdata(client); + u8 reg; + int reg_val; + + if (attr->index == CPLD_BOARD_TYPE) { + reg = CPLD_BOARD_TYPE_REG; + I2C_READ_BYTE_DATA(reg_val, &data->access_lock, client, reg); + if (reg_val < 0) + return -1; + return sprintf(buf, "0x%02x\n", reg_val); + } + return -1; +} + +/* get extend board type register value */ +static ssize_t read_ext_board_type(struct device *dev, + struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + struct cpld_data *data = i2c_get_clientdata(client); + u8 reg; + int reg_val; + + if (attr->index == CPLD_EXT_BOARD_TYPE) { + reg = CPLD_EXT_BOARD_TYPE_REG; + I2C_READ_BYTE_DATA(reg_val, &data->access_lock, client, reg); + if (reg_val < 0) + return -1; + return sprintf(buf, "0x%02x\n", reg_val); + } + return -1; +} + +/* get qsfp port status register value */ +static ssize_t read_qsfp_port_status(struct device *dev, + struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + struct cpld_data *data = i2c_get_clientdata(client); + u8 reg; + int reg_val; + + if (attr->index >= CPLD_QSFP_PORT_STATUS_1 && + attr->index <= CPLD_QSFP_PORT_STATUS_13) { + reg = CPLD_QSFP_PORT_STATUS_BASE_REG + + (attr->index - CPLD_QSFP_PORT_STATUS_1); + I2C_READ_BYTE_DATA(reg_val, &data->access_lock, client, reg); + if (reg_val < 0) + return -1; + return sprintf(buf, "0x%02x\n", reg_val); + } + return -1; +} + +/* get qsfp port config register value */ +static ssize_t read_qsfp_port_config(struct device *dev, + struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + struct cpld_data *data = i2c_get_clientdata(client); + u8 reg; + int reg_val; + + if (attr->index >= CPLD_QSFP_PORT_CONFIG_1 && + attr->index <= CPLD_QSFP_PORT_CONFIG_13) { + reg = CPLD_QSFP_PORT_CONFIG_BASE_REG + + (attr->index - CPLD_QSFP_PORT_CONFIG_1); + I2C_READ_BYTE_DATA(reg_val, &data->access_lock, client, reg); + if (reg_val < 0) + return -1; + return sprintf(buf, "0x%02x\n", reg_val); + } + return -1; +} + +/* set value to qsfp port config register */ +static ssize_t write_qsfp_port_config(struct device *dev, + struct device_attribute *da, + const char *buf, + size_t count) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + struct cpld_data *data = i2c_get_clientdata(client); + u8 reg, reg_val; + int ret; + + if (kstrtou8(buf, 0, ®_val) < 0) + return -EINVAL; + + if (attr->index >= CPLD_QSFP_PORT_CONFIG_1 && + attr->index <= CPLD_QSFP_PORT_CONFIG_13) { + reg = CPLD_QSFP_PORT_CONFIG_BASE_REG + + (attr->index - CPLD_QSFP_PORT_CONFIG_1); + I2C_WRITE_BYTE_DATA(ret, &data->access_lock, + client, reg, reg_val); + } + return count; +} + +/* get qsfp port interrupt register value */ +static ssize_t read_qsfp_port_interrupt(struct device *dev, + struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + struct cpld_data *data = i2c_get_clientdata(client); + u8 reg; + int reg_val; + + if (attr->index == CPLD_QSFP_PORT_INTERRUPT) { + reg = CPLD_QSFP_PORT_INTERRUPT_REG; + I2C_READ_BYTE_DATA(reg_val, &data->access_lock, client, reg); + if (reg_val < 0) + return -1; + return sprintf(buf, "0x%02x\n", reg_val); + } + return -1; +} + +/* get sfp port status register value */ +static ssize_t read_sfp_port_status(struct device *dev, + struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + struct cpld_data *data = i2c_get_clientdata(client); + u8 reg; + int reg_val; + + if (attr->index == CPLD_SFP_PORT_STATUS) { + reg = CPLD_SFP_PORT_STATUS_REG; + I2C_READ_BYTE_DATA(reg_val, &data->access_lock, client, reg); + if (reg_val < 0) + return -1; + return sprintf(buf, "0x%02x\n", reg_val); + } + return -1; +} + +/* get sfp port config register value */ +static ssize_t read_sfp_port_config(struct device *dev, + struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + struct cpld_data *data = i2c_get_clientdata(client); + u8 reg; + int reg_val; + + if (attr->index == CPLD_SFP_PORT_CONFIG) { + reg = CPLD_SFP_PORT_CONFIG_REG; + I2C_READ_BYTE_DATA(reg_val, &data->access_lock, client, reg); + if (reg_val < 0) + return -1; + return sprintf(buf, "0x%02x\n", reg_val); + } + return -1; +} + +/* set value to sfp port config register */ +static ssize_t write_sfp_port_config(struct device *dev, + struct device_attribute *da, + const char *buf, + size_t count) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + struct cpld_data *data = i2c_get_clientdata(client); + u8 reg, reg_val; + int ret; + + if (kstrtou8(buf, 0, ®_val) < 0) + return -EINVAL; + + if (attr->index == CPLD_SFP_PORT_CONFIG) { + reg = CPLD_SFP_PORT_CONFIG_REG; + I2C_WRITE_BYTE_DATA(ret, &data->access_lock, + client, reg, reg_val); + } + return count; +} + +/* get 10g mux config register value */ +static ssize_t read_10gmux_config(struct device *dev, + struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + struct cpld_data *data = i2c_get_clientdata(client); + u8 reg; + int reg_val; + + if (attr->index == CPLD_10GMUX_CONFIG) { + reg = CPLD_10GMUX_CONFIG_REG; + I2C_READ_BYTE_DATA(reg_val, &data->access_lock, client, reg); + if (reg_val < 0) + return -1; + return sprintf(buf, "0x%02x\n", reg_val); + } + return -1; +} + +/* set value to 10g mux config register */ +static ssize_t write_10gmux_config(struct device *dev, + struct device_attribute *da, + const char *buf, + size_t count) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + struct cpld_data *data = i2c_get_clientdata(client); + u8 reg, reg_val; + int ret; + + if (kstrtou8(buf, 0, ®_val) < 0) + return -EINVAL; + + if (attr->index == CPLD_10GMUX_CONFIG) { + reg = CPLD_10GMUX_CONFIG_REG; + I2C_WRITE_BYTE_DATA(ret, &data->access_lock, + client, reg, reg_val); + } + return count; +} + +/* get bmc status register value */ +static ssize_t read_bmc_status(struct device *dev, + struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + struct cpld_data *data = i2c_get_clientdata(client); + u8 reg; + int reg_val; + + if (attr->index == CPLD_BMC_STATUS) { + reg = CPLD_BMC_STATUS_REG; + I2C_READ_BYTE_DATA(reg_val, &data->access_lock, client, reg); + if (reg_val < 0) + return -1; + return sprintf(buf, "0x%02x\n", reg_val); + } + return -1; +} + +/* get bmc watchdog register value */ +static ssize_t read_bmc_watchdog(struct device *dev, + struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + struct cpld_data *data = i2c_get_clientdata(client); + u8 reg; + int reg_val; + + if (attr->index == CPLD_BMC_WATCHDOG) { + reg = CPLD_BMC_WATCHDOG_REG; + I2C_READ_BYTE_DATA(reg_val, &data->access_lock, client, reg); + if (reg_val < 0) + return -1; + return sprintf(buf, "0x%02x\n", reg_val); + } + return -1; +} + +/* set value to bmc watchdog register */ +static ssize_t write_bmc_watchdog(struct device *dev, + struct device_attribute *da, + const char *buf, + size_t count) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + struct cpld_data *data = i2c_get_clientdata(client); + u8 reg, reg_val; + int ret; + + if (kstrtou8(buf, 0, ®_val) < 0) + return -EINVAL; + + if (attr->index == CPLD_BMC_WATCHDOG) { + reg = CPLD_BMC_WATCHDOG_REG; + I2C_WRITE_BYTE_DATA(ret, &data->access_lock, + client, reg, reg_val); + } + return count; +} + +/* get usb status register value */ +static ssize_t read_usb_status(struct device *dev, + struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + struct cpld_data *data = i2c_get_clientdata(client); + u8 reg; + int reg_val; + + if (attr->index == CPLD_USB_STATUS) { + reg = CPLD_USB_STATUS_REG; + I2C_READ_BYTE_DATA(reg_val, &data->access_lock, client, reg); + if (reg_val < 0) + return -1; + return sprintf(buf, "0x%02x\n", reg_val); + } + return -1; +} + +/* get reset control register value */ +static ssize_t read_reset_control(struct device *dev, + struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + struct cpld_data *data = i2c_get_clientdata(client); + u8 reg; + int reg_val; + + if (attr->index == CPLD_RESET_CONTROL) { + reg = CPLD_RESET_CONTROL_REG; + I2C_READ_BYTE_DATA(reg_val, &data->access_lock, client, reg); + if (reg_val < 0) + return -1; + return sprintf(buf, "0x%02x\n", reg_val); + } + return -1; +} + +/* set value to reset control register */ +static ssize_t write_reset_control(struct device *dev, + struct device_attribute *da, + const char *buf, + size_t count) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + struct cpld_data *data = i2c_get_clientdata(client); + u8 reg, reg_val; + int ret; + + if (kstrtou8(buf, 0, ®_val) < 0) + return -EINVAL; + + if (attr->index == CPLD_RESET_CONTROL) { + reg = CPLD_RESET_CONTROL_REG; + I2C_WRITE_BYTE_DATA(ret, &data->access_lock, + client, reg, reg_val); + } + return count; +} + +/* get sfp port 0/1 led register */ +static ssize_t read_sfp_led(struct device *dev, + struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + struct cpld_data *data = i2c_get_clientdata(client); + u8 reg; + int reg_val; + + if (attr->index == CPLD_SFP_LED) { + reg = CPLD_SFP_LED_REG; + I2C_READ_BYTE_DATA(reg_val, &data->access_lock, client, reg); + if (reg_val < 0) + return -1; + return sprintf(buf, "0x%02x\n", reg_val); + } + return -1; +} + +/* set value to sfp 0/1 port led register */ +static ssize_t write_sfp_led(struct device *dev, + struct device_attribute *da, + const char *buf, + size_t count) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + struct cpld_data *data = i2c_get_clientdata(client); + u8 reg, reg_val; + int ret; + + if (kstrtou8(buf, 0, ®_val) < 0) + return -EINVAL; + + if (attr->index == CPLD_SFP_LED) { + reg = CPLD_SFP_LED_REG; + I2C_WRITE_BYTE_DATA(ret, &data->access_lock, + client, reg, reg_val); + } + return count; +} + +/* get sfp port 0/1 led blink register */ +static ssize_t read_sfp_led_blink(struct device *dev, + struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + struct cpld_data *data = i2c_get_clientdata(client); + u8 reg; + int reg_val; + + if (attr->index == CPLD_SFP_LED_BLINK) { + reg = CPLD_SFP_LED_BLINK_REG; + I2C_READ_BYTE_DATA(reg_val, &data->access_lock, client, reg); + if (reg_val < 0) + return -1; + return sprintf(buf, "0x%02x\n", reg_val); + } + return -1; +} + +/* set value to sfp port 0/1 led blink register */ +static ssize_t write_sfp_led_blink(struct device *dev, + struct device_attribute *da, + const char *buf, + size_t count) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + struct cpld_data *data = i2c_get_clientdata(client); + u8 reg, reg_val; + int ret; + + if (kstrtou8(buf, 0, ®_val) < 0) + return -EINVAL; + + if (attr->index == CPLD_SFP_LED_BLINK) { + reg = CPLD_SFP_LED_BLINK_REG; + I2C_WRITE_BYTE_DATA(ret, &data->access_lock, + client, reg, reg_val); + } + return count; +} + +/* get qsfp port led register */ +static ssize_t read_qsfp_led(struct device *dev, + struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + struct cpld_data *data = i2c_get_clientdata(client); + u8 reg; + int reg_val; + + if (attr->index >= CPLD_QSFP_LED_1 && + attr->index <= CPLD_QSFP_LED_16) { + reg = CPLD_QSFP_LED_BASE_REG + (attr->index - CPLD_QSFP_LED_1); + I2C_READ_BYTE_DATA(reg_val, &data->access_lock, client, reg); + if (reg_val < 0) + return -1; + return sprintf(buf, "0x%02x\n", reg_val); + } + return -1; +} + +/* set value to qsfp port led register */ +static ssize_t write_qsfp_led(struct device *dev, + struct device_attribute *da, + const char *buf, + size_t count) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + struct cpld_data *data = i2c_get_clientdata(client); + u8 reg, reg_val; + int ret; + + if (kstrtou8(buf, 0, ®_val) < 0) + return -EINVAL; + + if (attr->index >= CPLD_QSFP_LED_1 && + attr->index <= CPLD_QSFP_LED_16) { + reg = CPLD_QSFP_LED_BASE_REG + (attr->index - CPLD_QSFP_LED_1); + I2C_WRITE_BYTE_DATA(ret, &data->access_lock, + client, reg, reg_val); + } + return count; +} + +/* get qsfp 16 port led blink register value in 64 bit map */ +/* + each register for 2 port, each port has 4 channel for led blink + bit 64 56 48 40 32 24 16 8 0 + port 16/15 14/13 12/11 10/9 8/7 6/5 4/3 2/1 + */ +static ssize_t read_qsfp_led_blink(struct device *dev, + struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + struct cpld_data *data = i2c_get_clientdata(client); + u8 reg; + int reg_val, i; + u64 bitmap = 0; + + if (attr->index == CPLD_QSFP_LED_BLINK) { + for (i = 0; i <= 7; i++) { + reg = CPLD_QSFP_LED_BLINK_BASE_REG + i; + I2C_READ_BYTE_DATA(reg_val, &data->access_lock, client, reg); + if (reg_val < 0) + return -1; + reg_val = reg_val & 0xff; + bitmap = bitmap | (reg_val<<(i*8)); + } + return sprintf(buf, "0x%llx\n", bitmap); + } + return -1; +} + +/* set 64 bit map value to qsfp port led blink register */ +/* + each register for 2 port, each port has 4 channel for led blink + bit 63 56 48 40 32 24 16 8 0 + port 16/15 14/13 12/11 10/9 8/7 6/5 4/3 2/1 + */ +static ssize_t write_qsfp_led_blink(struct device *dev, + struct device_attribute *da, + const char *buf, + size_t count) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + struct cpld_data *data = i2c_get_clientdata(client); + u8 reg, reg_val, i; + int ret; + u64 bitmap = 0; + + if (kstrtou64(buf, 0, &bitmap) < 0) + return -EINVAL; + + if (attr->index == CPLD_QSFP_LED_BLINK) { + for (i = 0; i <= 7; i++) { + reg = CPLD_QSFP_LED_BLINK_BASE_REG + i; + reg_val = (u8)((bitmap >> i*8) & 0xFF); + I2C_WRITE_BYTE_DATA(ret, &data->access_lock, + client, reg, reg_val); + } + } + return count; +} + +/* get retimer reset register */ +static ssize_t read_rtmr_reset(struct device *dev, + struct device_attribute *da, + char *buf) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + struct cpld_data *data = i2c_get_clientdata(client); + u8 reg; + int reg_val; + + if (attr->index == CPLD_RTMR_RESET) { + reg = CPLD_RTMR_RESET_REG; + I2C_READ_BYTE_DATA(reg_val, &data->access_lock, client, reg); + if (reg_val < 0) + return -1; + return sprintf(buf, "0x%02x\n", reg_val); + } + return -1; +} + +/* set value to retimer reset register */ +static ssize_t write_rtmr_reset(struct device *dev, + struct device_attribute *da, + const char *buf, + size_t count) +{ + struct sensor_device_attribute *attr = to_sensor_dev_attr(da); + struct i2c_client *client = to_i2c_client(dev); + struct cpld_data *data = i2c_get_clientdata(client); + u8 reg, reg_val; + int ret; + + if (kstrtou8(buf, 0, ®_val) < 0) + return -EINVAL; + + if (attr->index == CPLD_RTMR_RESET) { + reg = CPLD_RTMR_RESET_REG; + I2C_WRITE_BYTE_DATA(ret, &data->access_lock, + client, reg, reg_val); + } + return count; +} + +/* add valid cpld client to list */ +static void ingrasys_i2c_cpld_add_client(struct i2c_client *client) +{ + struct cpld_client_node *node = NULL; + + node = kzalloc(sizeof(struct cpld_client_node), GFP_KERNEL); + if (!node) { + dev_info(&client->dev, + "Can't allocate cpld_client_node for index %d\n", + client->addr); + return; + } + + node->client = client; + + mutex_lock(&list_lock); + list_add(&node->list, &cpld_client_list); + mutex_unlock(&list_lock); +} + +/* remove exist cpld client in list */ +static void ingrasys_i2c_cpld_remove_client(struct i2c_client *client) +{ + struct list_head *list_node = NULL; + struct cpld_client_node *cpld_node = NULL; + int found = 0; + + mutex_lock(&list_lock); + list_for_each(list_node, &cpld_client_list) { + cpld_node = list_entry(list_node, + struct cpld_client_node, list); + + if (cpld_node->client == client) { + found = 1; + break; + } + } + + if (found) { + list_del(list_node); + kfree(cpld_node); + } + mutex_unlock(&list_lock); +} + +/* cpld drvier probe */ +static int ingrasys_i2c_cpld_probe(struct i2c_client *client, + const struct i2c_device_id *dev_id) +{ + int status; + struct cpld_data *data = NULL; + int ret = -EPERM; + int idx; + + data = kzalloc(sizeof(struct cpld_data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + /* init cpld data for client */ + i2c_set_clientdata(client, data); + mutex_init(&data->access_lock); + + if (!i2c_check_functionality(client->adapter, + I2C_FUNC_SMBUS_BYTE_DATA)) { + dev_info(&client->dev, + "i2c_check_functionality failed (0x%x)\n", + client->addr); + status = -EIO; + goto exit; + } + + /* get cpld id from device */ + ret = i2c_smbus_read_byte_data(client, CPLD_ID_REG); + + if (ret < 0) { + dev_info(&client->dev, + "fail to get cpld id (0x%x) at addr (0x%x)\n", + CPLD_ID_REG, client->addr); + status = -EIO; + goto exit; + } + + CPLD_ID_ID_GET(ret, idx); + + if (INVALID(idx, cpld1, cpld5)) { + dev_info(&client->dev, + "cpld id %d(device) not valid\n", idx); + //status = -EPERM; + //goto exit; + } + +#if 0 + /* change client name for each cpld with index */ + snprintf(client->name, sizeof(client->name), "%s_%d", client->name, + data->index); +#endif + + data->index = dev_id->driver_data; + + /* register sysfs hooks for different cpld group */ + dev_info(&client->dev, "probe cpld with index %d\n", data->index); + switch (data->index) { + case cpld1: + status = sysfs_create_group(&client->dev.kobj, + &s9280_64x_cpld1_group); + break; + case cpld2: + case cpld3: + case cpld4: + case cpld5: + status = sysfs_create_group(&client->dev.kobj, + &s9280_64x_cpld2345_group); + break; + default: + status = -EINVAL; + } + + if (status) + goto exit; + + dev_info(&client->dev, "chip found\n"); + + /* add probe chip to client list */ + ingrasys_i2c_cpld_add_client(client); + + return 0; +exit: + sysfs_remove_group(&client->dev.kobj, &s9280_64x_cpld2345_group); + return status; +} + +/* cpld drvier remove */ +static int ingrasys_i2c_cpld_remove(struct i2c_client *client) +{ + struct cpld_data *data = i2c_get_clientdata(client); + + switch (data->index) { + case cpld1: + sysfs_remove_group(&client->dev.kobj, &s9280_64x_cpld1_group); + break; + case cpld2: + case cpld3: + case cpld4: + case cpld5: + sysfs_remove_group(&client->dev.kobj, + &s9280_64x_cpld2345_group); + break; + } + + ingrasys_i2c_cpld_remove_client(client); + return 0; +} + +MODULE_DEVICE_TABLE(i2c, ingrasys_i2c_cpld_id); + +static struct i2c_driver ingrasys_i2c_cpld_driver = { + .class = I2C_CLASS_HWMON, + .driver = { + .name = "ingrasys_i2c_cpld", + }, + .probe = ingrasys_i2c_cpld_probe, + .remove = ingrasys_i2c_cpld_remove, + .id_table = ingrasys_i2c_cpld_id, + .address_list = cpld_i2c_addr, +}; + +/* provid cpld register read */ +/* cpld_idx indicate the index of cpld device */ +int ingrasys_i2c_cpld_read(u8 cpld_idx, + u8 reg) +{ + struct list_head *list_node = NULL; + struct cpld_client_node *cpld_node = NULL; + int ret = -EPERM; + struct cpld_data *data; + + list_for_each(list_node, &cpld_client_list) { + cpld_node = list_entry(list_node, + struct cpld_client_node, list); + data = i2c_get_clientdata(cpld_node->client); + if (data->index == cpld_idx) { + DEBUG_PRINT("cpld_idx=%d, read reg 0x%02x", + cpld_idx, reg); + I2C_READ_BYTE_DATA(ret, &data->access_lock, + cpld_node->client, reg); + DEBUG_PRINT("cpld_idx=%d, read reg 0x%02x = 0x%02x", + cpld_idx, reg, ret); + break; + } + } + + return ret; +} +EXPORT_SYMBOL(ingrasys_i2c_cpld_read); + +/* provid cpld register write */ +/* cpld_idx indicate the index of cpld device */ +int ingrasys_i2c_cpld_write(u8 cpld_idx, + u8 reg, + u8 value) +{ + struct list_head *list_node = NULL; + struct cpld_client_node *cpld_node = NULL; + int ret = -EIO; + struct cpld_data *data; + + list_for_each(list_node, &cpld_client_list) { + cpld_node = list_entry(list_node, + struct cpld_client_node, list); + data = i2c_get_clientdata(cpld_node->client); + + if (data->index == cpld_idx) { + I2C_WRITE_BYTE_DATA(ret, &data->access_lock, + cpld_node->client, + reg, value); + DEBUG_PRINT("cpld_idx=%d, write reg 0x%02x val 0x%02x, ret=%d", + cpld_idx, reg, value, ret); + break; + } + } + + return ret; +} +EXPORT_SYMBOL(ingrasys_i2c_cpld_write); + +/* provid qsfp port status register read */ +/* port_num indicate the front panel qsfp port number */ +int ingrasys_i2c_cpld_get_qsfp_port_status_val(u8 port_num) +{ + u8 cpld_idx, cpld_port, reg; + int reg_val; + + if (INVALID(port_num, QSFP_MIN_PORT_NUM, QSFP_MAX_PORT_NUM)) { + DEBUG_PRINT("invalid input value %d", port_num); + return -1; + } + QSFP_TO_CPLD_IDX(port_num, cpld_idx, cpld_port); + reg = QSFP_PORT_STATUS_REG(cpld_port); + DEBUG_PRINT("port_num=%d, cpld_idx=%d, cpld_port=%d, reg=0x%x", + port_num, cpld_idx, cpld_port, reg); + reg_val = ingrasys_i2c_cpld_read(cpld_idx, reg); + return reg_val; +} +EXPORT_SYMBOL(ingrasys_i2c_cpld_get_qsfp_port_status_val); + +/* provid qsfp port config register read */ +/* port_num indicate the front panel qsfp port number */ +int ingrasys_i2c_cpld_get_qsfp_port_config_val(u8 port_num) +{ + u8 cpld_idx, cpld_port, reg; + int reg_val; + + if (INVALID(port_num, QSFP_MIN_PORT_NUM, QSFP_MAX_PORT_NUM)) { + DEBUG_PRINT("invalid input value %d", port_num); + return -1; + } + QSFP_TO_CPLD_IDX(port_num, cpld_idx, cpld_port); + reg = QSFP_PORT_CONFIG_REG(cpld_port); + DEBUG_PRINT("port_num=%d, cpld_idx=%d, cpld_port=%d, reg=0x%x", + port_num, cpld_idx, cpld_port, reg); + reg_val = ingrasys_i2c_cpld_read(cpld_idx, reg); + return reg_val; +} +EXPORT_SYMBOL(ingrasys_i2c_cpld_get_qsfp_port_config_val); + +/* provid qsfp port config register write */ +/* port_num indicate the front panel qsfp port number */ +int ingrasys_i2c_cpld_set_qsfp_port_config_val(u8 port_num, + u8 reg_val) +{ + u8 cpld_idx, cpld_port, reg, ret; + + if (INVALID(port_num, QSFP_MIN_PORT_NUM, QSFP_MAX_PORT_NUM)) { + DEBUG_PRINT("invalid input value %d", port_num); + return -1; + } + QSFP_TO_CPLD_IDX(port_num, cpld_idx, cpld_port); + reg = QSFP_PORT_CONFIG_REG(cpld_port); + DEBUG_PRINT("port_num=%d, cpld_idx=%d, cpld_port=%d, reg=0x%x", + port_num, cpld_idx, cpld_port, reg); + ret = ingrasys_i2c_cpld_write(cpld_idx, reg, reg_val); + return ret; +} +EXPORT_SYMBOL(ingrasys_i2c_cpld_set_qsfp_port_config_val); + +/* provid sfp port 0/1 status register read */ +int ingrasys_i2c_cpld_get_sfp_port_status_val(void) +{ + u8 cpld_idx, reg; + int reg_val; + + cpld_idx = cpld1; + reg = CPLD_SFP_PORT_STATUS_REG; + DEBUG_PRINT("cpld_idx=%d, reg=0x%x", + cpld_idx, reg); + reg_val = ingrasys_i2c_cpld_read(cpld_idx, reg); + return reg_val; +} +EXPORT_SYMBOL(ingrasys_i2c_cpld_get_sfp_port_status_val); + +/* provid qsfp port config register read */ +/* port_num indicate the front panel qsfp port number */ +int ingrasys_i2c_cpld_get_sfp_port_config_val(void) +{ + u8 cpld_idx, reg; + int reg_val; + + cpld_idx = cpld1; + reg = CPLD_SFP_PORT_CONFIG_REG; + DEBUG_PRINT("cpld_idx=%d, reg=0x%x", + cpld_idx, reg); + reg_val = ingrasys_i2c_cpld_read(cpld_idx, reg); + return reg_val; +} +EXPORT_SYMBOL(ingrasys_i2c_cpld_get_sfp_port_config_val); + +/* provid qsfp port config register write */ +/* port_num indicate the front panel qsfp port number */ +int ingrasys_i2c_cpld_set_sfp_port_config_val(u8 reg_val) +{ + u8 cpld_idx, reg, ret; + + cpld_idx = cpld1; + reg = CPLD_SFP_PORT_CONFIG_REG; + DEBUG_PRINT("cpld_idx=%d, reg=0x%x", + cpld_idx, reg); + ret = ingrasys_i2c_cpld_write(cpld_idx, reg, reg_val); + return ret; +} +EXPORT_SYMBOL(ingrasys_i2c_cpld_set_sfp_port_config_val); + +static int __init ingrasys_i2c_cpld_init(void) +{ + mutex_init(&list_lock); + return i2c_add_driver(&ingrasys_i2c_cpld_driver); +} + +static void __exit ingrasys_i2c_cpld_exit(void) +{ + i2c_del_driver(&ingrasys_i2c_cpld_driver); +} + +MODULE_AUTHOR("Leo Lin "); +MODULE_DESCRIPTION("ingrasys_i2c_cpld driver"); +MODULE_LICENSE("GPL"); + +module_init(ingrasys_i2c_cpld_init); +module_exit(ingrasys_i2c_cpld_exit); + diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/modules/builds/ingrasys_s9280_64x_i2c_cpld.h b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/modules/builds/ingrasys_s9280_64x_i2c_cpld.h new file mode 100644 index 00000000..08db7138 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/modules/builds/ingrasys_s9280_64x_i2c_cpld.h @@ -0,0 +1,337 @@ +/* header file for i2c cpld driver of ingrasys_s9280_64x + * + * Copyright (C) 2017 Ingrasys Technology Corporation. + * Leo Lin + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#ifndef INGRASYS_S9280_64X_I2C_CPLD_H +#define INGRASYS_S9280_64X_I2C_CPLD_H + +/* CPLD device index value */ +enum cpld_id { + cpld1, + cpld2, + cpld3, + cpld4, + cpld5 +}; + +enum LED_BLINK { + BLINK, + NOBLINK, +}; + +enum LED_YELLOW { + YELLOW_OFF, + YELLOW_ON, +}; + +enum LED_GREEN { + GREEN_OFF, + GREEN_ON, +}; + +/* port number on CPLD */ +#define CPLD_1_PORT_NUM 12 +#define CPLD_2_PORT_NUM 13 + +/* QSFP port number */ +#define QSFP_MAX_PORT_NUM 64 +#define QSFP_MIN_PORT_NUM 1 + +/* SFP+ port number */ +#define SFP_MAX_PORT_NUM 2 +#define SFP_MIN_PORT_NUM 1 + + +/* CPLD registers */ +#define CPLD_BOARD_TYPE_REG 0x0 +#define CPLD_EXT_BOARD_TYPE_REG 0x7 +#define CPLD_VERSION_REG 0x1 +#define CPLD_ID_REG 0x2 +#define CPLD_QSFP_PORT_STATUS_BASE_REG 0x20 +#define CPLD_QSFP_PORT_CONFIG_BASE_REG 0x30 +#define CPLD_QSFP_PORT_INTERRUPT_REG 0x40 +#define CPLD_SFP_PORT_STATUS_REG 0x2F +#define CPLD_SFP_PORT_CONFIG_REG 0x3F +#define CPLD_QSFP_PORT_INTERRUPT_REG 0x40 +#define CPLD_10GMUX_CONFIG_REG 0x41 +#define CPLD_BMC_STATUS_REG 0x42 +#define CPLD_BMC_WATCHDOG_REG 0x43 +#define CPLD_USB_STATUS_REG 0x44 +#define CPLD_RESET_CONTROL_REG 0x4A +#define CPLD_SFP_LED_REG 0x80 +#define CPLD_SFP_LED_BLINK_REG 0x90 +#define CPLD_QSFP_LED_BASE_REG 0x80 +#define CPLD_QSFP_LED_BLINK_BASE_REG 0x90 +#define CPLD_RTMR_RESET_REG 0x4B + +/* bit definition for register value */ +enum CPLD_QSFP_PORT_STATUS_BITS { + CPLD_QSFP_PORT_STATUS_INT_BIT, + CPLD_QSFP_PORT_STATUS_ABS_BIT, +}; +enum CPLD_QSFP_PORT_CONFIG_BITS { + CPLD_QSFP_PORT_CONFIG_RESET_BIT, + CPLD_QSFP_PORT_CONFIG_RESERVE_BIT, + CPLD_QSFP_PORT_CONFIG_LPMODE_BIT, +}; +enum CPLD_SFP_PORT_STATUS_BITS { + CPLD_SFP0_PORT_STATUS_PRESENT_BIT, + CPLD_SFP0_PORT_STATUS_TXFAULT_BIT, + CPLD_SFP0_PORT_STATUS_RXLOS_BIT, + CPLD_SFP_PORT_STATUS_DUMMY, + CPLD_SFP1_PORT_STATUS_PRESENT_BIT, + CPLD_SFP1_PORT_STATUS_TXFAULT_BIT, + CPLD_SFP1_PORT_STATUS_RXLOS_BIT, +}; +enum CPLD_SFP_PORT_CONFIG_BITS { + CPLD_SFP0_PORT_CONFIG_TXDIS_BIT, + CPLD_SFP0_PORT_CONFIG_RS_BIT, + CPLD_SFP0_PORT_CONFIG_TS_BIT, + CPLD_SFP_PORT_CONFIG_DUMMY, + CPLD_SFP1_PORT_CONFIG_TXDIS_BIT, + CPLD_SFP1_PORT_CONFIG_RS_BIT, + CPLD_SFP1_PORT_CONFIG_TS_BIT, + +}; +enum CPLD_10GMUX_CONFIG_BITS { + CPLD_10GMUX_CONFIG_ENSMB_BIT, + CPLD_10GMUX_CONFIG_ENINPUT_BIT, + CPLD_10GMUX_CONFIG_SEL1_BIT, + CPLD_10GMUX_CONFIG_SEL0_BIT, +}; +enum CPLD_BMC_WATCHDOG_BITS { + CPLD_10GMUX_CONFIG_ENTIMER_BIT, + CPLD_10GMUX_CONFIG_TIMEOUT_BIT, +}; +enum CPLD_RESET_CONTROL_BITS { + CPLD_RESET_CONTROL_SWRST_BIT, + CPLD_RESET_CONTROL_CP2104RST_BIT, + CPLD_RESET_CONTROL_82P33814RST_BIT, + CPLD_RESET_CONTROL_BMCRST_BIT, +}; +enum CPLD_SFP_LED_BITS { + CPLD_SFP_LED_SFP0_GREEN_BIT, + CPLD_SFP_LED_SFP0_YELLOW_BIT, + CPLD_SFP_LED_SFP1_GREEN_BIT, + CPLD_SFP_LED_SFP1_YELLOW_BIT, +}; +enum CPLD_SFP_LED_BLINK_BITS { + CPLD_SFP_LED_BLINK_SFP0_BIT, + CPLD_SFP_LED_BLINK_SFP1_BIT, +}; +enum CPLD_QSFP_LED_BITS { + CPLD_QSFP_LED_CHAN_0_GREEN_BIT, + CPLD_QSFP_LED_CHAN_0_YELLOW_BIT, + CPLD_QSFP_LED_CHAN_1_GREEN_BIT, + CPLD_QSFP_LED_CHAN_1_YELLOW_BIT, + CPLD_QSFP_LED_CHAN_2_GREEN_BIT, + CPLD_QSFP_LED_CHAN_2_YELLOW_BIT, + CPLD_QSFP_LED_CHAN_3_GREEN_BIT, + CPLD_QSFP_LED_CHAN_3_YELLOW_BIT, + +}; +enum CPLD_QSFP_LED_BLINK_BITS { + CPLD_QSFP_LED_BLINK_X_CHAN0_BIT, + CPLD_QSFP_LED_BLINK_X_CHAN1_BIT, + CPLD_QSFP_LED_BLINK_X_CHAN2_BIT, + CPLD_QSFP_LED_BLINK_X_CHAN3_BIT, + CPLD_QSFP_LED_BLINK_XPLUS_CHAN0_BIT, + CPLD_QSFP_LED_BLINK_XPLUS_CHAN1_BIT, + CPLD_QSFP_LED_BLINK_XPLUS_CHAN2_BIT, + CPLD_QSFP_LED_BLINK_XPLUS_CHAN3_BIT, +}; + +/* bit field structure for register value */ +struct cpld_reg_board_type_t { + u8 build_rev:2; + u8 hw_rev:2; + u8 board_id:4; +}; + +struct cpld_reg_version_t { + u8 revision:6; + u8 release:1; + u8 reserve:1; +}; + +struct cpld_reg_id_t { + u8 id:3; + u8 release:5; +}; + +/* common manipulation */ +#define INVALID(i, min, max) ((i < min) || (i > max) ? 1u : 0u) +#define READ_BIT(val, bit) ((0u == (val & (1<bf_name) +#define READ_BF_1(bf_struct, val, bf_name, bf_value) \ + bf_struct bf; \ + bf.data = val; \ + bf_value = bf.bf_name +#define BOARD_TYPE_BUILD_REV_GET(val, res) \ + READ_BF(cpld_reg_board_type_t, val, build_rev, res) +#define BOARD_TYPE_HW_REV_GET(val, res) \ + READ_BF(cpld_reg_board_type_t, val, hw_rev, res) +#define BOARD_TYPE_BOARD_ID_GET(val, res) \ + READ_BF(cpld_reg_board_type_t, val, board_id, res) +#define CPLD_VERSION_REV_GET(val, res) \ + READ_BF(cpld_reg_version_t, val, revision, res) +#define CPLD_VERSION_REL_GET(val, res) \ + READ_BF(cpld_reg_version_t, val, release, res) +#define CPLD_ID_ID_GET(val, res) \ + READ_BF(cpld_reg_id_t, val, id, res) +#define CPLD_ID_REL_GET(val, res) \ + READ_BF(cpld_reg_id_t, val, release, res) +/* SFP/QSFP port led registers manipulation */ +#define SFP_LED_TO_CPLD_IDX(sfp_port) cpld1 +#define SFP_LED_REG(sfp_port) CPLD_SFP_LED_REG +#define SFP_LED_BLINK_REG(sfp_port) CPLD_SFP_LED_BLINK_REG +#define QSFP_LED_TO_CPLD_IDX(qsfp_port) \ + ((qsfp_port - 1) / 16 + 2) +#define QSFP_LED_REG(qsfp_port) \ + ((qsfp_port - 1) % 16 + CPLD_QSFP_LED_BASE_REG) +#define QSFP_LED_BLINK_REG(qsfp_port) \ + (((qsfp_port - 1) % 16) / 2 + CPLD_QSFP_LED_BLINK_BASE_REG) +/* QSFP/SFP port status registers manipulation */ +#define QSFP_TO_CPLD_IDX(qsfp_port, cpld_index, cpld_port) \ +{ \ + if (QSFP_MIN_PORT_NUM <= qsfp_port && qsfp_port <= CPLD_1_PORT_NUM) { \ + cpld_index = cpld1; \ + cpld_port = qsfp_port - 1; \ + } else if (CPLD_1_PORT_NUM < qsfp_port \ + && qsfp_port <= QSFP_MAX_PORT_NUM) { \ + cpld_index = cpld2 + (qsfp_port - 1 - CPLD_1_PORT_NUM) \ + / CPLD_2_PORT_NUM; \ + cpld_port = (qsfp_port - 1 - CPLD_1_PORT_NUM) % \ + CPLD_2_PORT_NUM; \ + } else { \ + cpld_index = 0; \ + cpld_port = 0; \ + } \ +} +#define QSFP_PORT_STATUS_REG(cpld_port) \ + (CPLD_QSFP_PORT_STATUS_BASE_REG + cpld_port) +#define QSFP_PORT_CONFIG_REG(cpld_port) \ + (CPLD_QSFP_PORT_CONFIG_BASE_REG + cpld_port) +#define QSFP_PORT_INT_BIT_GET(port_status_value) \ + READ_BIT(port_status_value, CPLD_QSFP_PORT_STATUS_INT_BIT) +#define QSFP_PORT_ABS_BIT_GET(port_status_value) \ + READ_BIT(port_status_value, CPLD_QSFP_PORT_STATUS_ABS_BIT) +#define QSFP_PORT_RESET_BIT_GET(port_config_value) \ + READ_BIT(port_config_value, CPLD_QSFP_PORT_CONFIG_RESET_BIT) +#define QSFP_PORT_LPMODE_BIT_GET(port_config_value) \ + READ_BIT(port_config_value, CPLD_QSFP_PORT_CONFIG_LPMODE_BIT) +#define QSFP_PORT_RESET_BIT_SET(port_config_value) \ + SET_BIT(port_config_value, CPLD_QSFP_PORT_CONFIG_RESET_BIT) +#define QSFP_PORT_RESET_BIT_CLEAR(port_config_value) \ + CLEAR_BIT(port_config_value, CPLD_QSFP_PORT_CONFIG_RESET_BIT) +#define QSFP_PORT_LPMODE_BIT_SET(port_config_value) \ + SET_BIT(port_config_value, CPLD_QSFP_PORT_CONFIG_LPMODE_BIT) +#define QSFP_PORT_LPMODE_BIT_CLEAR(port_config_value) \ + CLEAR_BIT(port_config_value, CPLD_QSFP_PORT_CONFIG_LPMODE_BIT) +#define SFP_PORT_PRESENT_BIT_GET(sfp_port, port_status_value) \ + if (sfp_port == SFP_MIN_PORT_NUM) { \ + READ_BIT(port_status_value, CPLD_SFP0_PORT_STATUS_PRESENT_BIT); \ + } else { \ + READ_BIT(port_status_value, CPLD_SFP1_PORT_STATUS_PRESENT_BIT); \ + } +#define SFP_PORT_TXFAULT_BIT_GET(sfp_port, port_status_value) \ + if (sfp_port == SFP_MIN_PORT_NUM) { \ + READ_BIT(port_status_value, CPLD_SFP0_PORT_STATUS_TXFAULT_BIT); \ + } else { \ + READ_BIT(port_status_value, CPLD_SFP1_PORT_STATUS_TXFAULT_BIT); \ + } +#define SFP_PORT_RXLOS_BIT_GET(sfp_port, port_status_value) \ + if (sfp_port == SFP_MIN_PORT_NUM) { \ + READ_BIT(port_status_value, CPLD_SFP0_PORT_STATUS_RXLOS_BIT); \ + } else { \ + READ_BIT(port_status_value, CPLD_SFP1_PORT_STATUS_RXLOS_BIT); \ + } +#define SFP_PORT_TXDIS_BIT_GET(sfp_port, port_config_value) \ + if (sfp_port == SFP_MIN_PORT_NUM) { \ + READ_BIT(port_config_value, CPLD_SFP0_PORT_CONFIG_TXDIS_BIT); \ + } else { \ + READ_BIT(port_config_value, CPLD_SFP1_PORT_STATUS_RXLOS_BIT); \ + } +#define SFP_PORT_RS_BIT_GET(sfp_port, port_config_value) \ + if (sfp_port == SFP_MIN_PORT_NUM) { \ + READ_BIT(port_config_value, CPLD_SFP0_PORT_CONFIG_RS_BIT); \ + } else { \ + READ_BIT(port_config_value, CPLD_SFP1_PORT_CONFIG_RS_BIT); \ + } +#define SFP_PORT_TS_BIT_GET(sfp_port, port_config_value) \ + if (sfp_port == SFP_MIN_PORT_NUM) { \ + READ_BIT(port_config_value, CPLD_SFP0_PORT_CONFIG_TS_BIT); \ + } else { \ + READ_BIT(port_config_value, CPLD_SFP0_PORT_CONFIG_TS_BIT); \ + } +#define SFP_PORT_TXDIS_BIT_SET(sfp_port, port_config_value) \ + if (sfp_port == SFP_MIN_PORT_NUM) { \ + SET_BIT(port_config_value, CPLD_SFP0_PORT_CONFIG_TXDIS_BIT); \ + } else { \ + SET_BIT(port_config_value, CPLD_SFP1_PORT_CONFIG_TXDIS_BIT); \ + } +#define SFP_PORT_TXDIS_BIT_CLEAR(sfp_port, port_config_value) \ + if (sfp_port == SFP_MIN_PORT_NUM) { \ + CLEAR_BIT(port_config_value, CPLD_SFP0_PORT_CONFIG_TXDIS_BIT); \ + } else { \ + CLEAR_BIT(port_config_value, CPLD_SFP1_PORT_CONFIG_TXDIS_BIT); \ + } +#define SFP_PORT_RS_BIT_SET(sfp_port, port_config_value) \ + if (sfp_port == SFP_MIN_PORT_NUM) { \ + SET_BIT(port_config_value, CPLD_SFP0_PORT_CONFIG_RS_BIT); \ + } else { \ + SET_BIT(port_config_value, CPLD_SFP1_PORT_CONFIG_RS_BIT); \ + } +#define SFP_PORT_RS_BIT_CLEAR(sfp_port, port_config_value) \ + if (sfp_port == SFP_MIN_PORT_NUM) { \ + CLEAR_BIT(port_config_value, CPLD_SFP0_PORT_CONFIG_RS_BIT); \ + } else { \ + CLEAR_BIT(port_config_value, CPLD_SFP1_PORT_CONFIG_RS_BIT); \ + } +#define SFP_PORT_TS_BIT_SET(sfp_port, port_config_value) \ + if (sfp_port == SFP_MIN_PORT_NUM) { \ + SET_BIT(port_config_value, CPLD_SFP0_PORT_CONFIG_TS_BIT); \ + } else { \ + SET_BIT(port_config_value, CPLD_SFP1_PORT_CONFIG_TS_BIT); \ + } +#define SFP_PORT_TS_BIT_CLEAR(sfp_port, port_config_value) \ + if (sfp_port == SFP_MIN_PORT_NUM) { \ + CLEAR_BIT(port_config_value, CPLD_SFP0_PORT_CONFIG_TS_BIT); \ + } else { \ + CLEAR_BIT(port_config_value, CPLD_SFP1_PORT_CONFIG_TS_BIT); \ + } + +/* CPLD access functions */ +extern int ingrasys_i2c_cpld_get_qsfp_port_status_val(u8 port_num); +extern int ingrasys_i2c_cpld_get_qsfp_port_config_val(u8 port_num); +extern int ingrasys_i2c_cpld_set_qsfp_port_config_val(u8 port_num, u8 reg_val); +extern int ingrasys_i2c_cpld_get_sfp_port_status_val(void); +extern int ingrasys_i2c_cpld_get_sfp_port_config_val(void); +extern int ingrasys_i2c_cpld_set_sfp_port_config_val(u8 reg_val); +extern u8 fp_port_to_phy_port(u8 fp_port); +#endif + diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/modules/builds/ingrasys_s9280_64x_platform.h b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/modules/builds/ingrasys_s9280_64x_platform.h new file mode 100644 index 00000000..08f14c0b --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/modules/builds/ingrasys_s9280_64x_platform.h @@ -0,0 +1,158 @@ +#ifndef _S9230_64X_PLATFORM_H +#define _S9230_64X_PLATFORM_H + +#include + +// remove debug before release +#define DEBUG + +enum bus_order { + I2C_BUS_MAIN, + MUX_9548_0_CH0, + MUX_9548_0_CH1, + MUX_9548_0_CH2, + MUX_9548_0_CH3, + MUX_9548_0_CH4, + MUX_9548_0_CH5, + MUX_9548_0_CH6, + MUX_9548_0_CH7, + MUX_9548_1_CH0, + MUX_9548_1_CH1, + MUX_9548_1_CH2, + MUX_9548_1_CH3, + MUX_9548_1_CH4, + MUX_9548_1_CH5, + MUX_9548_1_CH6, + MUX_9548_1_CH7, + MUX_9548_2_CH0, + MUX_9548_2_CH1, + MUX_9548_2_CH2, + MUX_9548_2_CH3, + MUX_9548_2_CH4, + MUX_9548_2_CH5, + MUX_9548_2_CH6, + MUX_9548_2_CH7, + MUX_9546_0_CH0, + MUX_9546_0_CH1, + MUX_9546_0_CH2, + MUX_9546_0_CH3, + MUX_9546_1_CH0, + MUX_9546_1_CH1, + MUX_9546_1_CH2, + MUX_9546_1_CH3, + MUX_9548_11_CH0, + MUX_9548_11_CH1, + MUX_9548_11_CH2, + MUX_9548_11_CH3, + MUX_9548_11_CH4, + MUX_9548_11_CH5, + MUX_9548_11_CH6, + MUX_9548_11_CH7, + MUX_9548_3_CH0, + MUX_9548_3_CH1, + MUX_9548_3_CH2, + MUX_9548_3_CH3, + MUX_9548_3_CH4, + MUX_9548_3_CH5, + MUX_9548_3_CH6, + MUX_9548_3_CH7, + MUX_9548_4_CH0, + MUX_9548_4_CH1, + MUX_9548_4_CH2, + MUX_9548_4_CH3, + MUX_9548_4_CH4, + MUX_9548_4_CH5, + MUX_9548_4_CH6, + MUX_9548_4_CH7, + MUX_9548_5_CH0, + MUX_9548_5_CH1, + MUX_9548_5_CH2, + MUX_9548_5_CH3, + MUX_9548_5_CH4, + MUX_9548_5_CH5, + MUX_9548_5_CH6, + MUX_9548_5_CH7, + MUX_9548_6_CH0, + MUX_9548_6_CH1, + MUX_9548_6_CH2, + MUX_9548_6_CH3, + MUX_9548_6_CH4, + MUX_9548_6_CH5, + MUX_9548_6_CH6, + MUX_9548_6_CH7, + MUX_9548_7_CH0, + MUX_9548_7_CH1, + MUX_9548_7_CH2, + MUX_9548_7_CH3, + MUX_9548_7_CH4, + MUX_9548_7_CH5, + MUX_9548_7_CH6, + MUX_9548_7_CH7, + MUX_9548_8_CH0, + MUX_9548_8_CH1, + MUX_9548_8_CH2, + MUX_9548_8_CH3, + MUX_9548_8_CH4, + MUX_9548_8_CH5, + MUX_9548_8_CH6, + MUX_9548_8_CH7, + MUX_9548_9_CH0, + MUX_9548_9_CH1, + MUX_9548_9_CH2, + MUX_9548_9_CH3, + MUX_9548_9_CH4, + MUX_9548_9_CH5, + MUX_9548_9_CH6, + MUX_9548_9_CH7, + MUX_9548_10_CH0, + MUX_9548_10_CH1, + MUX_9548_10_CH2, + MUX_9548_10_CH3, + MUX_9548_10_CH4, + MUX_9548_10_CH5, + MUX_9548_10_CH6, + MUX_9548_10_CH7, +}; + +#define I2C_ADDR_MUX_9555_0 (0x20) +#define I2C_ADDR_MUX_9555_1 (0x24) +#define I2C_ADDR_MUX_9555_2 (0x25) +#define I2C_ADDR_MUX_9555_3 (0x26) +#define I2C_ADDR_MUX_9539_0 (0x76) +#define I2C_ADDR_MUX_9539_1 (0x76) +#define I2C_BUS_FAN_STATUS (I2C_BUS_MAIN) +#define I2C_BUS_SYS_LED (MUX_9548_1_CH1) +#define I2C_BUS_PSU_STATUS (I2C_BUS_MAIN) +#define I2C_ADDR_PSU_STATUS (I2C_ADDR_MUX_9555_2) + +#define NUM_OF_I2C_MUX (11) +#define NUM_OF_CPLD (5) +#define NUM_OF_QSFP_PORT (64) +#define NUM_OF_SFP_PORT (2) +#define QSFP_EEPROM_I2C_ADDR (0x50) + +enum gpio_reg { + REG_PORT0_IN, + REG_PORT1_IN, + REG_PORT0_OUT, + REG_PORT1_OUT, + REG_PORT0_POL, + REG_PORT1_POL, + REG_PORT0_DIR, + REG_PORT1_DIR, +}; + +struct ing_i2c_board_info { + int ch; + int size; + struct i2c_board_info *board_info; +}; + +struct i2c_init_data { + __u16 ch; + __u16 addr; + __u8 reg; + __u8 value; +}; + +#endif diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/modules/builds/ingrasys_s9280_64x_psu.c b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/modules/builds/ingrasys_s9280_64x_psu.c new file mode 100644 index 00000000..d92b6b9a --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/modules/builds/ingrasys_s9280_64x_psu.c @@ -0,0 +1,552 @@ +/* + * S9280-64x PSU driver + * + * Copyright (C) 2017 Ingrasys, Inc. + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#ifndef _S9230_64X_PLATFORM_H +#define _S9230_64X_PLATFORM_H + +#include + +// remove debug before release +#define DEBUG + +enum bus_order { + I2C_BUS_MAIN, + MUX_9548_0_CH0, + MUX_9548_0_CH1, + MUX_9548_0_CH2, + MUX_9548_0_CH3, + MUX_9548_0_CH4, + MUX_9548_0_CH5, + MUX_9548_0_CH6, + MUX_9548_0_CH7, + MUX_9548_1_CH0, + MUX_9548_1_CH1, + MUX_9548_1_CH2, + MUX_9548_1_CH3, + MUX_9548_1_CH4, + MUX_9548_1_CH5, + MUX_9548_1_CH6, + MUX_9548_1_CH7, + MUX_9548_2_CH0, + MUX_9548_2_CH1, + MUX_9548_2_CH2, + MUX_9548_2_CH3, + MUX_9548_2_CH4, + MUX_9548_2_CH5, + MUX_9548_2_CH6, + MUX_9548_2_CH7, + MUX_9546_0_CH0, + MUX_9546_0_CH1, + MUX_9546_0_CH2, + MUX_9546_0_CH3, + MUX_9546_1_CH0, + MUX_9546_1_CH1, + MUX_9546_1_CH2, + MUX_9546_1_CH3, + MUX_9548_11_CH0, + MUX_9548_11_CH1, + MUX_9548_11_CH2, + MUX_9548_11_CH3, + MUX_9548_11_CH4, + MUX_9548_11_CH5, + MUX_9548_11_CH6, + MUX_9548_11_CH7, + MUX_9548_3_CH0, + MUX_9548_3_CH1, + MUX_9548_3_CH2, + MUX_9548_3_CH3, + MUX_9548_3_CH4, + MUX_9548_3_CH5, + MUX_9548_3_CH6, + MUX_9548_3_CH7, + MUX_9548_4_CH0, + MUX_9548_4_CH1, + MUX_9548_4_CH2, + MUX_9548_4_CH3, + MUX_9548_4_CH4, + MUX_9548_4_CH5, + MUX_9548_4_CH6, + MUX_9548_4_CH7, + MUX_9548_5_CH0, + MUX_9548_5_CH1, + MUX_9548_5_CH2, + MUX_9548_5_CH3, + MUX_9548_5_CH4, + MUX_9548_5_CH5, + MUX_9548_5_CH6, + MUX_9548_5_CH7, + MUX_9548_6_CH0, + MUX_9548_6_CH1, + MUX_9548_6_CH2, + MUX_9548_6_CH3, + MUX_9548_6_CH4, + MUX_9548_6_CH5, + MUX_9548_6_CH6, + MUX_9548_6_CH7, + MUX_9548_7_CH0, + MUX_9548_7_CH1, + MUX_9548_7_CH2, + MUX_9548_7_CH3, + MUX_9548_7_CH4, + MUX_9548_7_CH5, + MUX_9548_7_CH6, + MUX_9548_7_CH7, + MUX_9548_8_CH0, + MUX_9548_8_CH1, + MUX_9548_8_CH2, + MUX_9548_8_CH3, + MUX_9548_8_CH4, + MUX_9548_8_CH5, + MUX_9548_8_CH6, + MUX_9548_8_CH7, + MUX_9548_9_CH0, + MUX_9548_9_CH1, + MUX_9548_9_CH2, + MUX_9548_9_CH3, + MUX_9548_9_CH4, + MUX_9548_9_CH5, + MUX_9548_9_CH6, + MUX_9548_9_CH7, + MUX_9548_10_CH0, + MUX_9548_10_CH1, + MUX_9548_10_CH2, + MUX_9548_10_CH3, + MUX_9548_10_CH4, + MUX_9548_10_CH5, + MUX_9548_10_CH6, + MUX_9548_10_CH7, +}; + +#define I2C_ADDR_MUX_9555_0 (0x20) +#define I2C_ADDR_MUX_9555_1 (0x24) +#define I2C_ADDR_MUX_9555_2 (0x25) +#define I2C_ADDR_MUX_9555_3 (0x26) +#define I2C_ADDR_MUX_9539_0 (0x76) +#define I2C_ADDR_MUX_9539_1 (0x76) +#define I2C_BUS_FAN_STATUS (I2C_BUS_MAIN) +#define I2C_BUS_SYS_LED (MUX_9548_1_CH1) +#define I2C_BUS_PSU_STATUS (I2C_BUS_MAIN) +#define I2C_ADDR_PSU_STATUS (I2C_ADDR_MUX_9555_2) + +#define NUM_OF_I2C_MUX (11) +#define NUM_OF_CPLD (5) +#define NUM_OF_QSFP_PORT (64) +#define NUM_OF_SFP_PORT (2) +#define QSFP_EEPROM_I2C_ADDR (0x50) + +enum gpio_reg { + REG_PORT0_IN, + REG_PORT1_IN, + REG_PORT0_OUT, + REG_PORT1_OUT, + REG_PORT0_POL, + REG_PORT1_POL, + REG_PORT0_DIR, + REG_PORT1_DIR, +}; + +struct ing_i2c_board_info { + int ch; + int size; + struct i2c_board_info *board_info; +}; + +struct i2c_init_data { + __u16 ch; + __u16 addr; + __u8 reg; + __u8 value; +}; + +#endif + +static ssize_t show_psu_eeprom(struct device *dev, + struct device_attribute *da, + char *buf); +static struct s9280_psu_data *s9280_psu_update_status(struct device *dev); +static struct s9280_psu_data *s9280_psu_update_eeprom(struct device *dev); +static int s9280_psu_read_block(struct i2c_client *client, + u8 command, + u8 *data, + int data_len); + + +#define DRIVER_NAME "psu" + +// Addresses scanned +static const unsigned short normal_i2c[] = { 0x50, I2C_CLIENT_END }; + +/* PSU EEPROM SIZE */ +#define EEPROM_SZ 256 +#define READ_EEPROM 1 +#define NREAD_EEPROM 0 + +static struct i2c_client pca9555_client; + +/* pca9555 gpio pin mapping */ +#define PSU2_PWROK 0 +#define PSU2_PRSNT_L 1 +#define PSU2_PWRON_L 2 +#define PSU1_PWROK 3 +#define PSU1_PRSNT_L 4 +#define PSU1_PWRON_L 5 +#define TMP_75_INT_L 6 + +/* Driver Private Data */ +struct s9280_psu_data { + struct mutex lock; + char valid; /* !=0 if registers are valid */ + unsigned long last_updated; /* In jiffies */ + u8 index; /* PSU index */ + s32 status; /* IO expander value */ + char eeprom[EEPROM_SZ]; /* psu eeprom data */ + char psuABS; /* PSU absent */ + char psuPG; /* PSU power good */ +}; + +enum psu_index +{ + s9280_psu1, + s9280_psu2 +}; + +/* + * display power good attribute + */ +static ssize_t +show_psu_pg(struct device *dev, + struct device_attribute *devattr, + char *buf) +{ + struct s9280_psu_data *data = s9280_psu_update_status(dev); + unsigned int value; + + mutex_lock(&data->lock); + value = data->psuPG; + mutex_unlock(&data->lock); + + return sprintf(buf, "%d\n", value); +} + +/* + * display power absent attribute + */ +static ssize_t +show_psu_abs(struct device *dev, + struct device_attribute *devattr, + char *buf) +{ + struct s9280_psu_data *data = s9280_psu_update_status(dev); + unsigned int value; + + mutex_lock(&data->lock); + value = data->psuABS; + mutex_unlock(&data->lock); + + return sprintf(buf, "%d\n", value); +} + + +/* + * sysfs attributes for psu + */ +static DEVICE_ATTR(psu_pg, S_IRUGO, show_psu_pg, NULL); +static DEVICE_ATTR(psu_abs, S_IRUGO, show_psu_abs, NULL); +static DEVICE_ATTR(psu_eeprom, S_IRUGO, show_psu_eeprom, NULL); + +static struct attribute *s9280_psu_attributes[] = { + &dev_attr_psu_pg.attr, + &dev_attr_psu_abs.attr, + &dev_attr_psu_eeprom.attr, + NULL +}; + +/* + * display psu eeprom content + */ +static ssize_t +show_psu_eeprom(struct device *dev, + struct device_attribute *da, + char *buf) +{ + struct s9280_psu_data *data = s9280_psu_update_eeprom(dev); + + memcpy(buf, (char *)data->eeprom, EEPROM_SZ); + return EEPROM_SZ; +} + +static const struct attribute_group s9280_psu_group = { + .attrs = s9280_psu_attributes, +}; + +/* + * check gpio expander is accessible + */ +static int +pca9555_detect(struct i2c_client *client) +{ + if (i2c_smbus_read_byte_data(client, REG_PORT0_DIR) < 0) { + return -ENODEV; + } + + return 0; +} + +/* + * client init + */ +static void +i2c_devices_client_address_init(struct i2c_client *client) +{ + pca9555_client = *client; + + /* get i2c adapter for the target */ + pca9555_client.adapter = i2c_get_adapter(I2C_BUS_PSU_STATUS); + + /* get the i2c addr for the target */ + pca9555_client.addr = I2C_ADDR_PSU_STATUS; +} + +static int +s9280_psu_probe(struct i2c_client *client, + const struct i2c_device_id *dev_id) +{ + struct s9280_psu_data *data; + int status, err; + + if (!i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_I2C_BLOCK)) { + status = -EIO; + goto exit; + } + + data = kzalloc(sizeof(struct s9280_psu_data), GFP_KERNEL); + if (!data) { + status = -ENOMEM; + goto exit; + } + memset(data, 0, sizeof(struct s9280_psu_data)); + i2c_set_clientdata(client, data); + data->valid = 0; + data->index = dev_id->driver_data; + mutex_init(&data->lock); + + i2c_devices_client_address_init(client); + + err = pca9555_detect(&pca9555_client); + if (err) { + return err; + } + + dev_info(&client->dev, "chip found\n"); + + /* Register sysfs hooks */ + status = sysfs_create_group(&client->dev.kobj, &s9280_psu_group); + if (status) { + goto exit_free; + } + + return 0; + +exit_free: + kfree(data); +exit: + + return status; +} + +static int +s9280_psu_remove(struct i2c_client *client) +{ + struct s9280_psu_data *data = i2c_get_clientdata(client); + + sysfs_remove_group(&client->dev.kobj, &s9280_psu_group); + kfree(data); + + /* free i2c adapter */ + i2c_put_adapter(pca9555_client.adapter); + + return 0; +} + + +/* + * psu eeprom read utility + */ +static int +s9280_psu_read_block(struct i2c_client *client, + u8 command, + u8 *data, + int data_len) +{ + int i=0, ret=0; + int blk_max = 32; //max block read size + + /* read eeprom, 32 * 8 = 256 bytes */ + for (i=0; i < EEPROM_SZ/blk_max; i++) { + ret = i2c_smbus_read_i2c_block_data(client, (i*blk_max), blk_max, + data + (i*blk_max)); + if (ret < 0) { + return ret; + } + } + return ret; +} + +/* + * update eeprom content + */ +static struct s9280_psu_data +*s9280_psu_update_eeprom(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct s9280_psu_data *data = i2c_get_clientdata(client); + s32 status = 0; + int psu_pwrok = 0; + int psu_prsnt_l = 0; + + mutex_lock(&data->lock); + + if (time_after(jiffies, data->last_updated + 300 * HZ) + || !data->valid) { + + /* Read psu status */ + + status = i2c_smbus_read_word_data(&(pca9555_client), REG_PORT0_IN); + data->status = status; + + /*read psu status from io expander*/ + + if (data->index == s9280_psu1) { + psu_pwrok = PSU1_PWROK; + psu_prsnt_l = PSU1_PRSNT_L; + } else { + psu_pwrok = PSU2_PWROK; + psu_prsnt_l = PSU2_PRSNT_L; + } + data->psuPG = (status >> psu_pwrok) & 0x1; + data->psuABS = (status >> psu_prsnt_l) & 0x1; + + /* Read eeprom */ + if (!data->psuABS) { + //clear local eeprom data + memset(data->eeprom, 0, EEPROM_SZ); + + //read eeprom + status = s9280_psu_read_block(client, 0, data->eeprom, + ARRAY_SIZE(data->eeprom)); + + if (status < 0) { + memset(data->eeprom, 0, EEPROM_SZ); + dev_err(&client->dev, "Read eeprom failed, status=(%d)\n", status); + } else { + data->valid = 1; + } + } else { + memset(data->eeprom, 0, EEPROM_SZ); + } + data->last_updated = jiffies; + } + + mutex_unlock(&data->lock); + + return data; +} + +/* + * update psu status + */ +static struct s9280_psu_data +*s9280_psu_update_status(struct device *dev) +{ + struct i2c_client *client = to_i2c_client(dev); + struct s9280_psu_data *data = i2c_get_clientdata(client); + s32 status = 0; + int psu_pwrok = 0; + int psu_prsnt_l = 0; + + mutex_lock(&data->lock); + + /* Read psu status */ + + status = i2c_smbus_read_word_data(&(pca9555_client), REG_PORT0_IN); + data->status = status; + + /*read psu status from io expander*/ + + if (data->index == s9280_psu1) { + psu_pwrok = PSU1_PWROK; + psu_prsnt_l = PSU1_PRSNT_L; + } else { + psu_pwrok = PSU2_PWROK; + psu_prsnt_l = PSU2_PRSNT_L; + } + data->psuPG = (status >> psu_pwrok) & 0x1; + data->psuABS = (status >> psu_prsnt_l) & 0x1; + + mutex_unlock(&data->lock); + + return data; +} + +static const struct i2c_device_id s9280_psu_id[] = { + { "psu1", s9280_psu1 }, + { "psu2", s9280_psu2 }, + {} +}; + +MODULE_DEVICE_TABLE(i2c, s9280_psu_id); + +static struct i2c_driver s9280_psu_driver = { + .driver = { + .name = DRIVER_NAME, + }, + .probe = s9280_psu_probe, + .remove = s9280_psu_remove, + .id_table = s9280_psu_id, + .address_list = normal_i2c, +}; + +static int __init s9280_psu_init(void) +{ + return i2c_add_driver(&s9280_psu_driver); +} + +static void __exit s9280_psu_exit(void) +{ + i2c_del_driver(&s9280_psu_driver); +} + +module_init(s9280_psu_init); +module_exit(s9280_psu_exit); + +MODULE_AUTHOR("Leo Lin "); +MODULE_DESCRIPTION("S9280-64X psu driver"); +MODULE_LICENSE("GPL"); diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/Makefile b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/Makefile new file mode 100755 index 00000000..003238cf --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk \ No newline at end of file diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/PKG.yml b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/PKG.yml new file mode 100755 index 00000000..c488b9fa --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/PKG.yml @@ -0,0 +1 @@ +!include $ONL_TEMPLATES/onlp-platform-any.yml PLATFORM=x86-64-ingrasys-s9280-64x ARCH=amd64 TOOLCHAIN=x86_64-linux-gnu diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/Makefile b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/Makefile new file mode 100755 index 00000000..e7437cb2 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/Makefile @@ -0,0 +1,2 @@ +FILTER=src +include $(ONL)/make/subdirs.mk diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/lib/Makefile b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/lib/Makefile new file mode 100755 index 00000000..a47f76a4 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/lib/Makefile @@ -0,0 +1,45 @@ +############################################################ +# +# +# Copyright 2014 BigSwitch Networks, Inc. +# +# Licensed under the Eclipse Public License, Version 1.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.eclipse.org/legal/epl-v10.html +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +# either express or implied. See the License for the specific +# language governing permissions and limitations under the +# License. +# +# +############################################################ +# +# +############################################################ +include $(ONL)/make/config.amd64.mk + +MODULE := libonlp-x86-64-ingrasys-s9280-64x +include $(BUILDER)/standardinit.mk + +DEPENDMODULES := AIM IOF x86_64_ingrasys_s9280_64x onlplib +DEPENDMODULE_HEADERS := sff + +include $(BUILDER)/dependmodules.mk + +SHAREDLIB := libonlp-x86-64-ingrasys-s9280-64x.so +$(SHAREDLIB)_TARGETS := $(ALL_TARGETS) +include $(BUILDER)/so.mk +.DEFAULT_GOAL := $(SHAREDLIB) + +GLOBAL_CFLAGS += -I$(onlp_BASEDIR)/module/inc +GLOBAL_CFLAGS += -DAIM_CONFIG_INCLUDE_MODULES_INIT=1 +GLOBAL_CFLAGS += -fPIC +GLOBAL_LINK_LIBS += -lpthread + +include $(BUILDER)/targets.mk + diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/onlpdump/Makefile b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/onlpdump/Makefile new file mode 100755 index 00000000..84222f31 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/onlpdump/Makefile @@ -0,0 +1,45 @@ +############################################################ +# +# +# Copyright 2014 BigSwitch Networks, Inc. +# +# Licensed under the Eclipse Public License, Version 1.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.eclipse.org/legal/epl-v10.html +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +# either express or implied. See the License for the specific +# language governing permissions and limitations under the +# License. +# +# +############################################################ +# +# +# +############################################################ +include $(ONL)/make/config.amd64.mk + +.DEFAULT_GOAL := onlpdump + +MODULE := onlpdump +include $(BUILDER)/standardinit.mk + +DEPENDMODULES := AIM IOF onlp x86_64_ingrasys_s9280_64x onlplib onlp_platform_defaults sff cjson cjson_util timer_wheel OS + +include $(BUILDER)/dependmodules.mk + +BINARY := onlpdump +$(BINARY)_LIBRARIES := $(LIBRARY_TARGETS) +include $(BUILDER)/bin.mk + +GLOBAL_CFLAGS += -DAIM_CONFIG_AIM_MAIN_FUNCTION=onlpdump_main +GLOBAL_CFLAGS += -DAIM_CONFIG_INCLUDE_MODULES_INIT=1 +GLOBAL_CFLAGS += -DAIM_CONFIG_INCLUDE_MAIN=1 +GLOBAL_LINK_LIBS += -lpthread -lm + +include $(BUILDER)/targets.mk diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/.gitignore b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/.gitignore new file mode 100755 index 00000000..3c410127 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/.gitignore @@ -0,0 +1,2 @@ +/x86_64_ingrasys_s9280_64x.mk +/doc diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/.module b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/.module new file mode 100755 index 00000000..7599a81f --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/.module @@ -0,0 +1 @@ +name: x86_64_ingrasys_s9280_64x diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/Makefile b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/Makefile new file mode 100755 index 00000000..a514eac9 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/Makefile @@ -0,0 +1,10 @@ +############################################################ +# +# +# +############################################################ +include $(ONL)/make/config.mk + +MODULE := x86_64_ingrasys_s9280_64x +AUTOMODULE := x86_64_ingrasys_s9280_64x +include $(BUILDER)/definemodule.mk diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/auto/make.mk b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/auto/make.mk new file mode 100755 index 00000000..cd187a52 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/auto/make.mk @@ -0,0 +1,7 @@ +# +# x86_64_ingrasys_s9280_64x Autogeneration +# +############################################################################### +x86-64-ingrasys-s9280-64x_AUTO_DEFS := module/auto/x86-64-ingrasys-s9280-64x.yml +x86-64-ingrasys-s9280-64x_AUTO_DIRS := module/inc/x86-64-ingrasys-s9280-64x module/src +include $(BUILDER)/auto.mk diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/auto/x86-64-ingrasys-s9280-64x.yml b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/auto/x86-64-ingrasys-s9280-64x.yml new file mode 100755 index 00000000..2436efb8 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/auto/x86-64-ingrasys-s9280-64x.yml @@ -0,0 +1,47 @@ +############################################################################### +# +# X86_64_INGRASYS_S9280_64X Autogeneration Definitions. +# +############################################################################### + +cdefs: &cdefs +- X86_64_INGRASYS_S9280_64X_CONFIG_INCLUDE_LOGGING: + doc: "Include or exclude logging." + default: 1 +- X86_64_INGRASYS_S9280_64X_CONFIG_LOG_OPTIONS_DEFAULT: + doc: "Default enabled log options." + default: AIM_LOG_OPTIONS_DEFAULT +- X86_64_INGRASYS_S9280_64X_CONFIG_LOG_BITS_DEFAULT: + doc: "Default enabled log bits." + default: AIM_LOG_BITS_DEFAULT +- X86_64_INGRASYS_S9280_64X_CONFIG_LOG_CUSTOM_BITS_DEFAULT: + doc: "Default enabled custom log bits." + default: 0 +- X86_64_INGRASYS_S9280_64X_CONFIG_PORTING_STDLIB: + doc: "Default all porting macros to use the C standard libraries." + default: 1 +- X86_64_INGRASYS_S9280_64X_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS: + doc: "Include standard library headers for stdlib porting macros." + default: X86_64_INGRASYS_S9280_64X_CONFIG_PORTING_STDLIB +- X86_64_INGRASYS_S9280_64X_CONFIG_INCLUDE_UCLI: + doc: "Include generic uCli support." + default: 0 + + +definitions: + cdefs: + X86_64_INGRASYS_S9280_64X_CONFIG_HEADER: + defs: *cdefs + basename: x86_64_ingrasys_s9280_64x_config + + portingmacro: + X86_64_INGRASYS_S9280_64X: + macros: + - malloc + - free + - memset + - memcpy + - strncpy + - vsnprintf + - snprintf + - strlen diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/inc/x86_64_ingrasys_s9280_64x/x86_64_ingrasys_s9280_64x.x b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/inc/x86_64_ingrasys_s9280_64x/x86_64_ingrasys_s9280_64x.x new file mode 100755 index 00000000..8f9330a4 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/inc/x86_64_ingrasys_s9280_64x/x86_64_ingrasys_s9280_64x.x @@ -0,0 +1,34 @@ +/************************************************************ + * + * + * Copyright 2014, 2015 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ + +#include + +/* <--auto.start.xmacro(ALL).define> */ +/* */ + +/* <--auto.start.xenum(ALL).define> */ +/* */ + + diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/inc/x86_64_ingrasys_s9280_64x/x86_64_ingrasys_s9280_64x_config.h b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/inc/x86_64_ingrasys_s9280_64x/x86_64_ingrasys_s9280_64x_config.h new file mode 100755 index 00000000..19e6e7a6 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/inc/x86_64_ingrasys_s9280_64x/x86_64_ingrasys_s9280_64x_config.h @@ -0,0 +1,162 @@ +/************************************************************ + * + * + * Copyright 2014, 2015 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ + +/**************************************************************************//** + * + * @file + * @brief x86_64_ingrasys_s9280_64x Configuration Header + * + * @addtogroup x86_64_ingrasys_s9280_64x-config + * @{ + * + *****************************************************************************/ +#ifndef __X86_64_INGRAYSYS_S9280_64X_CONFIG_H__ +#define __X86_64_INGRAYSYS_S9280_64X_CONFIG_H__ + +#ifdef GLOBAL_INCLUDE_CUSTOM_CONFIG +#include +#endif +#ifdef X86_64_INGRAYSYS_S9280_64X_INCLUDE_CUSTOM_CONFIG +#include +#endif + +/* */ +#include +/** + * X86_64_INGRAYSYS_S9280_64X_CONFIG_INCLUDE_LOGGING + * + * Include or exclude logging. */ + + +#ifndef X86_64_INGRAYSYS_S9280_64X_CONFIG_INCLUDE_LOGGING +#define X86_64_INGRAYSYS_S9280_64X_CONFIG_INCLUDE_LOGGING 1 +#endif + +/** + * X86_64_INGRAYSYS_S9280_64X_CONFIG_LOG_OPTIONS_DEFAULT + * + * Default enabled log options. */ + + +#ifndef X86_64_INGRAYSYS_S9280_64X_CONFIG_LOG_OPTIONS_DEFAULT +#define X86_64_INGRAYSYS_S9280_64X_CONFIG_LOG_OPTIONS_DEFAULT AIM_LOG_OPTIONS_DEFAULT +#endif + +/** + * X86_64_INGRAYSYS_S9280_64X_CONFIG_LOG_BITS_DEFAULT + * + * Default enabled log bits. */ + + +#ifndef X86_64_INGRAYSYS_S9280_64X_CONFIG_LOG_BITS_DEFAULT +#define X86_64_INGRAYSYS_S9280_64X_CONFIG_LOG_BITS_DEFAULT AIM_LOG_BITS_DEFAULT +#endif + +/** + * X86_64_INGRAYSYS_S9280_64X_CONFIG_LOG_CUSTOM_BITS_DEFAULT + * + * Default enabled custom log bits. */ + + +#ifndef X86_64_INGRAYSYS_S9280_64X_CONFIG_LOG_CUSTOM_BITS_DEFAULT +#define X86_64_INGRAYSYS_S9280_64X_CONFIG_LOG_CUSTOM_BITS_DEFAULT 0 +#endif + +/** + * X86_64_INGRAYSYS_S9280_64X_CONFIG_PORTING_STDLIB + * + * Default all porting macros to use the C standard libraries. */ + + +#ifndef X86_64_INGRAYSYS_S9280_64X_CONFIG_PORTING_STDLIB +#define X86_64_INGRAYSYS_S9280_64X_CONFIG_PORTING_STDLIB 1 +#endif + +/** + * X86_64_INGRAYSYS_S9280_64X_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS + * + * Include standard library headers for stdlib porting macros. */ + + +#ifndef X86_64_INGRAYSYS_S9280_64X_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS +#define X86_64_INGRAYSYS_S9280_64X_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS X86_64_INGRAYSYS_S9280_64X_CONFIG_PORTING_STDLIB +#endif + +/** + * X86_64_INGRAYSYS_S9280_64X_CONFIG_INCLUDE_UCLI + * + * Include generic uCli support. */ + + +#ifndef X86_64_INGRAYSYS_S9280_64X_CONFIG_INCLUDE_UCLI +#define X86_64_INGRAYSYS_S9280_64X_CONFIG_INCLUDE_UCLI 0 +#endif + +/** + * X86_64_INGRAYSYS_S9280_64X_CONFIG_SFP_COUNT + * + * SFP Count. */ + + +#ifndef X86_64_INGRAYSYS_S9280_64X_CONFIG_SFP_COUNT +#define X86_64_INGRAYSYS_S9280_64X_CONFIG_SFP_COUNT 0 +#endif + + + +/** + * All compile time options can be queried or displayed + */ + +/** Configuration settings structure. */ +typedef struct x86_64_ingrasys_s9280_64x_config_settings_s { + /** name */ + const char* name; + /** value */ + const char* value; +} x86_64_ingrasys_s9280_64x_config_settings_t; + +/** Configuration settings table. */ +/** x86_64_ingrasys_s9280_64x_config_settings table. */ +extern x86_64_ingrasys_s9280_64x_config_settings_t x86_64_ingrasys_s9280_64x_config_settings[]; + +/** + * @brief Lookup a configuration setting. + * @param setting The name of the configuration option to lookup. + */ +const char* x86_64_ingrasys_s9280_64x_config_lookup(const char* setting); + +/** + * @brief Show the compile-time configuration. + * @param pvs The output stream. + */ +int x86_64_ingrasys_s9280_64x_config_show(struct aim_pvs_s* pvs); + +/* */ + +#include "x86_64_ingrasys_s9280_64x_porting.h" + +#endif /* __X86_64_INGRAYSYS_S9280_64X_CONFIG_H__ */ +/* @} */ diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/inc/x86_64_ingrasys_s9280_64x/x86_64_ingrasys_s9280_64x_dox.h b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/inc/x86_64_ingrasys_s9280_64x/x86_64_ingrasys_s9280_64x_dox.h new file mode 100755 index 00000000..0d3f6cf2 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/inc/x86_64_ingrasys_s9280_64x/x86_64_ingrasys_s9280_64x_dox.h @@ -0,0 +1,26 @@ +/**************************************************************************//** + * + * x86_64_ingrasys_s9280_64x Doxygen Header + * + *****************************************************************************/ +#ifndef __x86_64_ingrasys_s9280_64x_DOX_H__ +#define __x86_64_ingrasys_s9280_64x_DOX_H__ + +/** + * @defgroup x86_64_ingrasys_s9280_64x x86_64_ingrasys_s9280_64x - x86_64_ingrasys_s9280_64x Description + * + +The documentation overview for this module should go here. + + * + * @{ + * + * @defgroup x86_64_ingrasys_s9280_64x-x86_64_ingrasys_s9280_64x Public Interface + * @defgroup x86_64_ingrasys_s9280_64x-config Compile Time Configuration + * @defgroup x86_64_ingrasys_s9280_64x-porting Porting Macros + * + * @} + * + */ + +#endif /* __x86_64_ingrasys_s9280_64x_DOX_H__ */ diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/inc/x86_64_ingrasys_s9280_64x/x86_64_ingrasys_s9280_64x_porting.h b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/inc/x86_64_ingrasys_s9280_64x/x86_64_ingrasys_s9280_64x_porting.h new file mode 100755 index 00000000..92f6b757 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/inc/x86_64_ingrasys_s9280_64x/x86_64_ingrasys_s9280_64x_porting.h @@ -0,0 +1,106 @@ +/********************************************************//** + * + * @file + * @brief x86_64_Ingrasys_s9280_64x Porting Macros. + * + * @addtogroup x86_64_Ingrasys_s9280_64x-porting + * @{ + * + ***********************************************************/ +#ifndef __X86_64_INGRAYSYS_S9280_64X_PORTING_H__ +#define __X86_64_INGRAYSYS_S9280_64X_PORTING_H__ + +/* */ +#if X86_64_INGRAYSYS_S9280_64X_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS == 1 +#include +#include +#include +#include +#include +#endif + +#ifndef X86_64_INGRAYSYS_S9280_64X_MALLOC + #if defined(GLOBAL_MALLOC) + #define X86_64_INGRAYSYS_S9280_64X_MALLOC GLOBAL_MALLOC + #elif X86_64_INGRAYSYS_S9280_64X_CONFIG_PORTING_STDLIB == 1 + #define X86_64_INGRAYSYS_S9280_64X_MALLOC malloc + #else + #error The macro X86_64_INGRAYSYS_S9280_64X_MALLOC is required but cannot be defined. + #endif +#endif + +#ifndef X86_64_INGRAYSYS_S9280_64X_FREE + #if defined(GLOBAL_FREE) + #define X86_64_INGRAYSYS_S9280_64X_FREE GLOBAL_FREE + #elif X86_64_INGRAYSYS_S9280_64X_CONFIG_PORTING_STDLIB == 1 + #define X86_64_INGRAYSYS_S9280_64X_FREE free + #else + #error The macro X86_64_INGRAYSYS_S9280_64X_FREE is required but cannot be defined. + #endif +#endif + +#ifndef X86_64_INGRAYSYS_S9280_64X_MEMSET + #if defined(GLOBAL_MEMSET) + #define X86_64_INGRAYSYS_S9280_64X_MEMSET GLOBAL_MEMSET + #elif X86_64_INGRAYSYS_S9280_64X_CONFIG_PORTING_STDLIB == 1 + #define X86_64_INGRAYSYS_S9280_64X_MEMSET memset + #else + #error The macro X86_64_INGRAYSYS_S9280_64X_MEMSET is required but cannot be defined. + #endif +#endif + +#ifndef X86_64_INGRAYSYS_S9280_64X_MEMCPY + #if defined(GLOBAL_MEMCPY) + #define X86_64_INGRAYSYS_S9280_64X_MEMCPY GLOBAL_MEMCPY + #elif X86_64_INGRAYSYS_S9280_64X_CONFIG_PORTING_STDLIB == 1 + #define X86_64_INGRAYSYS_S9280_64X_MEMCPY memcpy + #else + #error The macro X86_64_INGRAYSYS_S9280_64X_MEMCPY is required but cannot be defined. + #endif +#endif + +#ifndef X86_64_INGRAYSYS_S9280_64X_STRNCPY + #if defined(GLOBAL_STRNCPY) + #define X86_64_INGRAYSYS_S9280_64X_STRNCPY GLOBAL_STRNCPY + #elif X86_64_INGRAYSYS_S9280_64X_CONFIG_PORTING_STDLIB == 1 + #define X86_64_INGRAYSYS_S9280_64X_STRNCPY strncpy + #else + #error The macro X86_64_INGRAYSYS_S9280_64X_STRNCPY is required but cannot be defined. + #endif +#endif + +#ifndef X86_64_INGRAYSYS_S9280_64X_VSNPRINTF + #if defined(GLOBAL_VSNPRINTF) + #define X86_64_INGRAYSYS_S9280_64X_VSNPRINTF GLOBAL_VSNPRINTF + #elif X86_64_INGRAYSYS_S9280_64X_CONFIG_PORTING_STDLIB == 1 + #define X86_64_INGRAYSYS_S9280_64X_VSNPRINTF vsnprintf + #else + #error The macro X86_64_INGRAYSYS_S9280_64X_VSNPRINTF is required but cannot be defined. + #endif +#endif + +#ifndef X86_64_INGRAYSYS_S9280_64X_SNPRINTF + #if defined(GLOBAL_SNPRINTF) + #define X86_64_INGRAYSYS_S9280_64X_SNPRINTF GLOBAL_SNPRINTF + #elif X86_64_INGRAYSYS_S9280_64X_CONFIG_PORTING_STDLIB == 1 + #define X86_64_INGRAYSYS_S9280_64X_SNPRINTF snprintf + #else + #error The macro X86_64_INGRAYSYS_S9280_64X_SNPRINTF is required but cannot be defined. + #endif +#endif + +#ifndef X86_64_INGRAYSYS_S9280_64X_STRLEN + #if defined(GLOBAL_STRLEN) + #define X86_64_INGRAYSYS_S9280_64X_STRLEN GLOBAL_STRLEN + #elif X86_64_INGRAYSYS_S9280_64X_CONFIG_PORTING_STDLIB == 1 + #define X86_64_INGRAYSYS_S9280_64X_STRLEN strlen + #else + #error The macro X86_64_INGRAYSYS_S9280_64X_STRLEN is required but cannot be defined. + #endif +#endif + +/* */ + + +#endif /* __X86_64_INGRAYSYS_S9280_64X_PORTING_H__ */ +/* @} */ diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/make.mk b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/make.mk new file mode 100755 index 00000000..ee80af11 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/make.mk @@ -0,0 +1,29 @@ +############################################################ +# +# +# Copyright 2014, 2015 Big Switch Networks, Inc. +# +# Licensed under the Eclipse Public License, Version 1.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.eclipse.org/legal/epl-v10.html +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +# either express or implied. See the License for the specific +# language governing permissions and limitations under the +# License. +# +# +############################################################ +# +# +# +############################################################ +THIS_DIR := $(dir $(lastword $(MAKEFILE_LIST))) +x86_64_ingrasys_s9280_64x_INCLUDES := -I $(THIS_DIR)inc +x86_64_ingrasys_s9280_64x_INTERNAL_INCLUDES := -I $(THIS_DIR)src +x86_64_ingrasys_s9280_64x_DEPENDMODULE_ENTRIES := init:x86_64_ingrasys_s9280_64x ucli:x86_64_ingrasys_s9280_64x + diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/Makefile b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/Makefile new file mode 100755 index 00000000..20eafcde --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/Makefile @@ -0,0 +1,30 @@ +############################################################ +# +# +# Copyright 2014, 2015 Big Switch Networks, Inc. +# +# Licensed under the Eclipse Public License, Version 1.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.eclipse.org/legal/epl-v10.html +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +# either express or implied. See the License for the specific +# language governing permissions and limitations under the +# License. +# +# +############################################################ +# +# Local source generation targets. +# +############################################################ + +include ../../../../init.mk + +ucli: + $(SUBMODULE_BIGCODE)/tools/uclihandlers.py x86_64_ingrasys_s9280_64x_ucli.c + diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/fani.c b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/fani.c new file mode 100755 index 00000000..65344acc --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/fani.c @@ -0,0 +1,272 @@ +/************************************************************ + * + * + * Copyright 2014, 2015 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * Fan Platform Implementation Defaults. + * + ***********************************************************/ +#include +#include "x86_64_ingrasys_s9280_64x_int.h" +#include +#include +#include "platform_lib.h" + +onlp_fan_info_t fan_info[] = { + { }, /* Not used */ + { + { FAN_OID_FAN1, "FANTRAY 1", 0 }, + ONLP_FAN_STATUS_PRESENT, + ONLP_FAN_CAPS_GET_RPM | ONLP_FAN_CAPS_GET_PERCENTAGE, + 0, + 0, + ONLP_FAN_MODE_INVALID, + }, + { + { FAN_OID_FAN2, "FANTRAY 2", 0 }, + ONLP_FAN_STATUS_PRESENT, + ONLP_FAN_CAPS_GET_RPM | ONLP_FAN_CAPS_GET_PERCENTAGE, + 0, + 0, + ONLP_FAN_MODE_INVALID, + }, + { + { FAN_OID_FAN3, "FANTRAY 3", 0 }, + ONLP_FAN_STATUS_PRESENT, + ONLP_FAN_CAPS_GET_RPM | ONLP_FAN_CAPS_GET_PERCENTAGE, + 0, + 0, + ONLP_FAN_MODE_INVALID, + }, + { + { FAN_OID_FAN4, "FANTRAY 4", 0 }, + ONLP_FAN_STATUS_PRESENT, + ONLP_FAN_CAPS_GET_RPM | ONLP_FAN_CAPS_GET_PERCENTAGE, + 0, + 0, + ONLP_FAN_MODE_INVALID, + }, + { + { FAN_OID_PSU_FAN1, "PSU-1 FAN", 0 }, + ONLP_FAN_STATUS_PRESENT, + }, + { + { FAN_OID_PSU_FAN2, "PSU-2 FAN", 0 }, + ONLP_FAN_STATUS_PRESENT, + } +}; + +/* + * This function will be called prior to all of onlp_fani_* functions. + */ +int +onlp_fani_init(void) +{ + return ONLP_STATUS_OK; +} + +int sys_fan_present_get(onlp_fan_info_t* info, int id) +{ + int rv, fan_presence, i2c_bus, offset, fan_reg_mask; + + /* get fan presence*/ + i2c_bus = I2C_BUS_FANTRAY_LED; + switch (id) + { + case FAN_ID_FAN1: + offset = 0; + fan_reg_mask = FAN_1_PRESENT_MASK; + break; + case FAN_ID_FAN2: + offset = 0; + fan_reg_mask = FAN_2_PRESENT_MASK; + break; + case FAN_ID_FAN3: + offset = 1; + fan_reg_mask = FAN_3_PRESENT_MASK; + break; + case FAN_ID_FAN4: + offset = 1; + fan_reg_mask = FAN_4_PRESENT_MASK; + break; + default: + return ONLP_STATUS_E_INVALID; + } + + rv = onlp_i2c_readb(i2c_bus, FAN_GPIO_ADDR, offset, ONLP_I2C_F_FORCE); + if (rv < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + fan_presence = (rv & fan_reg_mask) ? 0 : 1; + + if (!fan_presence) { + info->status &= ~ONLP_FAN_STATUS_PRESENT; + + } else { + info->status |= ONLP_FAN_STATUS_PRESENT; + } + + return ONLP_STATUS_OK; +} + +int +sys_fan_info_get(onlp_fan_info_t* info, int id) +{ + int rv, fan_status, fan_rpm, perc_val, percentage; + int max_fan_speed = 10000; + fan_status = 0; + fan_rpm = 0; + + rv = sys_fan_present_get(info, id); + if (rv < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + rv = onlp_file_read_int(&fan_status, SYS_FAN_PREFIX "fan%d_alarm", 2 * id - 1); + if (rv < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + /* fan status > 1, means failure */ + if (fan_status > 0) { + info->status |= ONLP_FAN_STATUS_FAILED; + return ONLP_STATUS_OK; + } + + rv = onlp_file_read_int(&fan_rpm, SYS_FAN_PREFIX "fan%d_input", 2 * id - 1); + if (rv < 0) { + return ONLP_STATUS_E_INTERNAL; + } + info->rpm = fan_rpm; + + /* get speed percentage*/ + switch (id) + { + case FAN_ID_FAN1: + case FAN_ID_FAN2: + case FAN_ID_FAN3: + case FAN_ID_FAN4: + rv = onlp_file_read_int(&perc_val, SYS_FAN_PREFIX "pwm%d", + FAN_CTRL_SET2); + break; + default: + return ONLP_STATUS_E_INVALID; + } + if (rv < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + percentage = (info->rpm*100)/max_fan_speed; + info->percentage = percentage; + + return ONLP_STATUS_OK; +} + +int +sys_fan_rpm_percent_set(int perc) +{ + int rc; + + rc = onlp_file_write_int(perc, SYS_FAN_PREFIX "pwm%d", FAN_CTRL_SET2); + if (rc < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + return ONLP_STATUS_OK; +} + +int +onlp_fani_rpm_set(onlp_oid_t id, int rpm) +{ + return ONLP_STATUS_E_UNSUPPORTED; +} + +/* + * This function sets the fan speed of the given OID as a percentage. + * + * This will only be called if the OID has the PERCENTAGE_SET + * capability. + * + * It is optional if you have no fans at all with this feature. + */ +int +onlp_fani_percentage_set(onlp_oid_t id, int percentage) +{ + int fid, perc_val, rc; + fid = ONLP_OID_ID_GET(id); + + /* + * Set fan speed + * Driver accept value in range between 128 and 255. + * Value 128 is 50%. + * Value 200 is 80%. + * Value 255 is 100%. + */ + if (percentage == 100) { + perc_val = 255; + } else if (percentage == 80) { + perc_val = 200; + } else if (percentage == 50) { + perc_val = 128; + } else { + return ONLP_STATUS_E_INVALID; + } + + switch (fid) + { + case FAN_ID_FAN1: + case FAN_ID_FAN2: + case FAN_ID_FAN3: + case FAN_ID_FAN4: + rc = sys_fan_rpm_percent_set(perc_val); + break; + default: + return ONLP_STATUS_E_INVALID; + } + return rc; +} + +int +onlp_fani_info_get(onlp_oid_t id, onlp_fan_info_t* rv) +{ + int fan_id ,rc; + + fan_id = ONLP_OID_ID_GET(id); + *rv = fan_info[fan_id]; + rv->caps |= ONLP_FAN_CAPS_GET_RPM; + + switch (fan_id) { + case FAN_ID_FAN1: + case FAN_ID_FAN2: + case FAN_ID_FAN3: + case FAN_ID_FAN4: + rc = sys_fan_info_get(rv, fan_id); + break; + case FAN_ID_PSU_FAN1: + case FAN_ID_PSU_FAN2: + rc = psu_fan_info_get(rv, fan_id); + break; + default: + return ONLP_STATUS_E_INTERNAL; + break; + } + + return rc; +} diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/ledi.c b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/ledi.c new file mode 100755 index 00000000..36f7612a --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/ledi.c @@ -0,0 +1,223 @@ +/************************************************************ + * + * + * Copyright 2014, 2015 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + ***********************************************************/ +#include +#include +#include +#include +#include +#include + +#include "platform_lib.h" + +/* + * Get the information for the given LED OID. + */ +static onlp_led_info_t led_info[] = +{ + { }, /* Not used */ + { + { LED_OID_SYSTEM, "Chassis LED 1 (SYS LED)", 0 }, + ONLP_LED_STATUS_PRESENT, + ONLP_LED_CAPS_ON_OFF | ONLP_LED_CAPS_GREEN, + }, + { + { LED_OID_FAN, "Chassis LED 2 (FAN LED)", 0 }, + ONLP_LED_STATUS_PRESENT, + ONLP_LED_CAPS_ON_OFF | ONLP_LED_CAPS_ORANGE | + ONLP_LED_CAPS_GREEN | ONLP_LED_CAPS_AUTO, + }, + { + { LED_OID_PSU1, "Chassis LED 3 (PSU1 LED)", 0 }, + ONLP_LED_STATUS_PRESENT, + ONLP_LED_CAPS_ON_OFF | ONLP_LED_CAPS_ORANGE | + ONLP_LED_CAPS_GREEN | ONLP_LED_CAPS_AUTO, + }, + { + { LED_OID_PSU2, "Chassis LED 4 (PSU2 LED)", 0 }, + ONLP_LED_STATUS_PRESENT, + ONLP_LED_CAPS_ON_OFF | ONLP_LED_CAPS_ORANGE | + ONLP_LED_CAPS_GREEN | ONLP_LED_CAPS_AUTO, + }, + { + { LED_OID_FAN_TRAY1, "Rear LED 1 (FAN TRAY1 LED)", 0 }, + ONLP_LED_STATUS_PRESENT, + ONLP_LED_CAPS_ON_OFF | ONLP_LED_CAPS_ORANGE | + ONLP_LED_CAPS_GREEN | ONLP_LED_CAPS_AUTO, + }, + { + { LED_OID_FAN_TRAY2, "Rear LED 2 (FAN TRAY2 LED)", 0 }, + ONLP_LED_STATUS_PRESENT, + ONLP_LED_CAPS_ON_OFF | ONLP_LED_CAPS_ORANGE | + ONLP_LED_CAPS_GREEN | ONLP_LED_CAPS_AUTO, + }, + { + { LED_OID_FAN_TRAY3, "Rear LED 3 (FAN TRAY3 LED)", 0 }, + ONLP_LED_STATUS_PRESENT, + ONLP_LED_CAPS_ON_OFF | ONLP_LED_CAPS_ORANGE | + ONLP_LED_CAPS_GREEN | ONLP_LED_CAPS_AUTO, + }, + { + { LED_OID_FAN_TRAY4, "Rear LED 4 (FAN TRAY4 LED)", 0 }, + ONLP_LED_STATUS_PRESENT, + ONLP_LED_CAPS_ON_OFF | ONLP_LED_CAPS_ORANGE | + ONLP_LED_CAPS_GREEN | ONLP_LED_CAPS_AUTO, + } +}; + +extern int sys_fan_info_get(onlp_fan_info_t* info, int id); + +/* + * This function will be called prior to any other onlp_ledi_* functions. + */ +int +onlp_ledi_init(void) +{ + return ONLP_STATUS_OK; +} + +int +onlp_ledi_info_get(onlp_oid_t id, onlp_led_info_t* info) +{ + int led_id, pw_exist, pw_good, rc, fan_id; + onlp_fan_info_t fan_info; + char *sys_psu_prefix = NULL; + + memset(&fan_info, 0, sizeof(onlp_fan_info_t)); + led_id = ONLP_OID_ID_GET(id); + + *info = led_info[led_id]; + + if (id == LED_OID_PSU1 || id == LED_OID_PSU2) { + + if (id == LED_OID_PSU1) { + sys_psu_prefix = SYS_PSU1_PREFIX; + + } else { + sys_psu_prefix = SYS_PSU2_PREFIX; + } + /* check psu status */ + if ((rc = psu_present_get(&pw_exist, sys_psu_prefix)) + != ONLP_STATUS_OK) { + return ONLP_STATUS_E_INTERNAL; + } + if ((rc = psu_pwgood_get(&pw_good, sys_psu_prefix)) + != ONLP_STATUS_OK) { + return ONLP_STATUS_E_INTERNAL; + } + /* psu not present */ + if (pw_exist != PSU_STATUS_PRESENT) { + info->status &= ~ONLP_LED_STATUS_ON; + info->mode = ONLP_LED_MODE_OFF; + } else if (pw_good != PSU_STATUS_POWER_GOOD) { + info->status |= ONLP_LED_STATUS_ON; + info->mode |= ONLP_LED_MODE_ORANGE; + } else { + info->status |= ONLP_LED_STATUS_ON; + info->mode |= ONLP_LED_MODE_GREEN; + } + } else if (id == LED_OID_FAN) { + info->status |= ONLP_LED_STATUS_ON; + info->mode |= ONLP_LED_MODE_GREEN; + for (fan_id=FAN_ID_FAN1; fan_id<=FAN_ID_FAN4; ++fan_id) { + rc = sys_fan_info_get(&fan_info, fan_id); + if (rc != ONLP_STATUS_OK || fan_info.status & ONLP_FAN_STATUS_FAILED) { + info->mode &= ~ONLP_LED_MODE_GREEN; + info->mode |= ONLP_LED_MODE_ORANGE; + break; + } + } + } else if (id == LED_OID_SYSTEM) { + info->status |= ONLP_LED_STATUS_ON; + info->mode |= ONLP_LED_MODE_GREEN; + } else { + info->status |= ONLP_LED_STATUS_ON; + info->mode |= ONLP_LED_MODE_ON; + } + + return ONLP_STATUS_OK; +} + +/* + * Turn an LED on or off. + * + * This function will only be called if the LED OID supports the ONOFF + * capability. + * + * What 'on' means in terms of colors or modes for multimode LEDs is + * up to the platform to decide. This is intended as baseline toggle mechanism. + */ +int +onlp_ledi_set(onlp_oid_t id, int on_or_off) +{ + if (!on_or_off) { + return onlp_ledi_mode_set(id, ONLP_LED_MODE_OFF); + } + + return ONLP_STATUS_E_UNSUPPORTED; +} + +/* + * This function puts the LED into the given mode. It is a more functional + * interface for multimode LEDs. + * + * Only modes reported in the LED's capabilities will be attempted. + */ +int +onlp_ledi_mode_set(onlp_oid_t id, onlp_led_mode_t mode) +{ + int led_id, rc; + + led_id = ONLP_OID_ID_GET(id); + switch (led_id) { + case LED_SYSTEM_LED: + rc = system_led_set(mode); + break; + case LED_FAN_LED: + rc = fan_led_set(mode); + break; + case LED_PSU1_LED: + rc = psu1_led_set(mode); + break; + case LED_PSU2_LED: + rc = psu2_led_set(mode); + break; + case LED_FAN_TRAY1: + case LED_FAN_TRAY2: + case LED_FAN_TRAY3: + case LED_FAN_TRAY4: + rc = fan_tray_led_set(id, mode); + break; + default: + return ONLP_STATUS_E_INTERNAL; + break; + } + + return rc; +} + +int +onlp_ledi_ioctl(onlp_oid_t id, va_list vargs) +{ + return ONLP_STATUS_OK; +} diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/make.mk b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/make.mk new file mode 100755 index 00000000..73ac18ec --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/make.mk @@ -0,0 +1,29 @@ +############################################################ +# +# +# Copyright 2014, 2015 Big Switch Networks, Inc. +# +# Licensed under the Eclipse Public License, Version 1.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.eclipse.org/legal/epl-v10.html +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, +# either express or implied. See the License for the specific +# language governing permissions and limitations under the +# License. +# +# +############################################################ +# +# +# +############################################################ + +LIBRARY := x86_64_ingrasys_s9280_64x +$(LIBRARY)_SUBDIR := $(dir $(lastword $(MAKEFILE_LIST))) +#$(LIBRARY)_LAST := 1 +include $(BUILDER)/lib.mk diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/platform_lib.c b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/platform_lib.c new file mode 100755 index 00000000..d10616bf --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/platform_lib.c @@ -0,0 +1,576 @@ +/************************************************************ + * + * + * Copyright 2014 Big Switch Networks, Inc. + * Copyright 2013 Accton Technology Corporation. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "platform_lib.h" + +int +psu_thermal_get(onlp_thermal_info_t* info, int thermal_id) +{ + int pw_exist, pw_good; + int offset, i2c_bus, rc; + int value, buf; + unsigned int y_value = 0; + unsigned char n_value = 0; + unsigned int temp = 0; + char result[32]; + char *sys_psu_prefix = NULL; + + if (thermal_id == THERMAL_ID_PSU1_1) { + i2c_bus = I2C_BUS_PSU1; + offset = PSU_THERMAL1_OFFSET; + sys_psu_prefix = SYS_PSU1_PREFIX; + } else if (thermal_id == THERMAL_ID_PSU1_2) { + i2c_bus = I2C_BUS_PSU1; + offset = PSU_THERMAL2_OFFSET; + sys_psu_prefix = SYS_PSU1_PREFIX; + } else if (thermal_id == THERMAL_ID_PSU2_1) { + i2c_bus = I2C_BUS_PSU2; + offset = PSU_THERMAL1_OFFSET; + sys_psu_prefix = SYS_PSU2_PREFIX; + } else if (thermal_id == THERMAL_ID_PSU2_2) { + i2c_bus = I2C_BUS_PSU2; + offset = PSU_THERMAL2_OFFSET; + sys_psu_prefix = SYS_PSU2_PREFIX; + } + + /* check psu status */ + if ((rc = psu_present_get(&pw_exist, sys_psu_prefix)) + != ONLP_STATUS_OK) { + return ONLP_STATUS_E_INTERNAL; + } + + if (pw_exist != PSU_STATUS_PRESENT) { + info->mcelsius = 0; + info->status &= ~ONLP_THERMAL_STATUS_PRESENT; + return ONLP_STATUS_OK; + } else { + info->status |= ONLP_THERMAL_STATUS_PRESENT; + } + + if ((rc = psu_pwgood_get(&pw_good, sys_psu_prefix)) + != ONLP_STATUS_OK) { + return ONLP_STATUS_E_INTERNAL; + } + + if (pw_good != PSU_STATUS_POWER_GOOD) { + info->mcelsius = 0; + return ONLP_STATUS_OK; + } + + value = onlp_i2c_readw(i2c_bus, PSU_REG, offset, ONLP_I2C_F_FORCE); + + y_value = (value & 0x07FF); + if ((value & 0x8000)&&(y_value)) { + n_value = 0xF0 + (((value) >> 11) & 0x0F); + n_value = (~n_value) +1; + temp = (unsigned int)(1<> 11) & 0x0F); + snprintf(result, sizeof(result), "%d", (y_value*(1<mcelsius = (int)(buf * 1000); + + return ONLP_STATUS_OK; +} + +int +psu_fan_info_get(onlp_fan_info_t* info, int id) +{ + int pw_exist, pw_good; + int i2c_bus, rc; + unsigned int tmp_fan_rpm, fan_rpm; + char *sys_psu_prefix = NULL; + + if (id == FAN_ID_PSU_FAN1) { + i2c_bus = I2C_BUS_PSU1; + sys_psu_prefix = SYS_PSU1_PREFIX; + } else if (id == FAN_ID_PSU_FAN2) { + i2c_bus = I2C_BUS_PSU2; + sys_psu_prefix = SYS_PSU2_PREFIX; + } else { + return ONLP_STATUS_E_INTERNAL; + } + + /* check psu status */ + if ((rc = psu_present_get(&pw_exist, sys_psu_prefix)) + != ONLP_STATUS_OK) { + return ONLP_STATUS_E_INTERNAL; + } + + if (pw_exist != PSU_STATUS_PRESENT) { + info->rpm = 0; + info->status &= ~ONLP_FAN_STATUS_PRESENT; + return ONLP_STATUS_OK; + } else { + info->status |= ONLP_FAN_STATUS_PRESENT; + } + + if ((rc = psu_pwgood_get(&pw_good, sys_psu_prefix)) + != ONLP_STATUS_OK) { + return ONLP_STATUS_E_INTERNAL; + } + + if (pw_good != PSU_STATUS_POWER_GOOD) { + info->rpm = 0; + return ONLP_STATUS_OK; + } + + tmp_fan_rpm = onlp_i2c_readw(i2c_bus, PSU_REG, PSU_FAN_RPM_OFFSET, ONLP_I2C_F_FORCE); + + fan_rpm = (unsigned int)tmp_fan_rpm; + fan_rpm = (fan_rpm & 0x07FF) * (1 << ((fan_rpm >> 11) & 0x1F)); + info->rpm = (int)fan_rpm; + + return ONLP_STATUS_OK; +} + +int +psu_vout_get(onlp_psu_info_t* info, int i2c_bus) +{ + int v_value = 0; + int n_value = 0; + unsigned int temp = 0; + char result[32]; + double dvalue; + memset(result, 0, sizeof(result)); + + n_value = onlp_i2c_readb(i2c_bus, PSU_REG, PSU_VOUT_OFFSET1, ONLP_I2C_F_FORCE); + if (n_value < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + v_value = onlp_i2c_readw(i2c_bus, PSU_REG, PSU_VOUT_OFFSET2, ONLP_I2C_F_FORCE); + if (v_value < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + if (n_value & 0x10) { + n_value = 0xF0 + (n_value & 0x0F); + n_value = (~n_value) +1; + temp = (unsigned int)(1< 0.0) { + info->caps |= ONLP_PSU_CAPS_VOUT; + info->mvout = (int)(dvalue * 1000); + } + + return ONLP_STATUS_OK; +} + +int +psu_iout_get(onlp_psu_info_t* info, int i2c_bus) +{ + int value; + unsigned int y_value = 0; + unsigned char n_value = 0; + unsigned int temp = 0; + char result[32]; + memset(result, 0, sizeof(result)); + double dvalue; + + value = onlp_i2c_readw(i2c_bus, PSU_REG, PSU_IOUT_OFFSET, ONLP_I2C_F_FORCE); + if (value < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + y_value = (value & 0x07FF); + if ((value & 0x8000)&&(y_value)) + { + n_value = 0xF0 + (((value) >> 11) & 0x0F); + n_value = (~n_value) +1; + temp = (unsigned int)(1<> 11) & 0x0F); + snprintf(result, sizeof(result), "%d", (y_value*(1< 0.0) { + info->caps |= ONLP_PSU_CAPS_IOUT; + info->miout = (int)(dvalue * 1000); + } + + return ONLP_STATUS_OK; +} + +int +psu_pout_get(onlp_psu_info_t* info, int i2c_bus) +{ + int value; + unsigned int y_value = 0; + unsigned char n_value = 0; + unsigned int temp = 0; + char result[32]; + memset(result, 0, sizeof(result)); + double dvalue; + + value = onlp_i2c_readw(i2c_bus, PSU_REG, PSU_POUT_OFFSET, ONLP_I2C_F_FORCE); + if (value < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + y_value = (value & 0x07FF); + if ((value & 0x8000)&&(y_value)) + { + n_value = 0xF0 + (((value) >> 11) & 0x0F); + n_value = (~n_value) +1; + temp = (unsigned int)(1<> 11) & 0x0F); + snprintf(result, sizeof(result), "%d", (y_value*(1< 0.0) { + info->caps |= ONLP_PSU_CAPS_POUT; + info->mpout = (int)(dvalue * 1000); + } + + return ONLP_STATUS_OK; +} + +int +psu_pin_get(onlp_psu_info_t* info, int i2c_bus) +{ + int value; + unsigned int y_value = 0; + unsigned char n_value = 0; + unsigned int temp = 0; + char result[32]; + memset(result, 0, sizeof(result)); + double dvalue; + + value = onlp_i2c_readw(i2c_bus, PSU_REG, PSU_PIN_OFFSET, ONLP_I2C_F_FORCE); + if (value < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + y_value = (value & 0x07FF); + if ((value & 0x8000)&&(y_value)) + { + n_value = 0xF0 + (((value) >> 11) & 0x0F); + n_value = (~n_value) +1; + temp = (unsigned int)(1<> 11) & 0x0F); + snprintf(result, sizeof(result), "%d", (y_value*(1< 0.0) { + info->caps |= ONLP_PSU_CAPS_PIN; + info->mpin = (int)(dvalue * 1000); + } + + return ONLP_STATUS_OK; +} + +int +psu_eeprom_get(onlp_psu_info_t* info, int id) +{ + uint8_t data[256]; + char eeprom_path[128]; + int data_len, i, rc; + memset(data, 0, sizeof(data)); + memset(eeprom_path, 0, sizeof(eeprom_path)); + + if (id == PSU_ID_PSU1) { + rc = onlp_file_read(data, sizeof(data), &data_len, PSU1_EEPROM_PATH); + } else { + rc = onlp_file_read(data, sizeof(data), &data_len, PSU2_EEPROM_PATH); + } + + if (rc == ONLP_STATUS_OK) + { + i = 11; + + /* Manufacturer Name */ + data_len = (data[i]&0x0f); + i++; + i += data_len; + + /* Product Name */ + data_len = (data[i]&0x0f); + i++; + memcpy(info->model, (char *) &(data[i]), data_len); + i += data_len; + + /* Product part,model number */ + data_len = (data[i]&0x0f); + i++; + i += data_len; + + /* Product Version */ + data_len = (data[i]&0x0f); + i++; + i += data_len; + + /* Product Serial Number */ + data_len = (data[i]&0x0f); + i++; + memcpy(info->serial, (char *) &(data[i]), data_len); + } else { + strcpy(info->model, "Missing"); + strcpy(info->serial, "Missing"); + } + + return ONLP_STATUS_OK; +} + + +int +psu_present_get(int *pw_exist, char *sys_psu_prefix) +{ + int rv, psu_pres; + + rv = onlp_file_read_int(&psu_pres, "%spsu_abs", sys_psu_prefix); + if (rv < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + *pw_exist = (psu_pres ? 0 : 1); + return ONLP_STATUS_OK; +} + +int +psu_pwgood_get(int *pw_good, char *sys_psu_prefix) +{ + int rv, psu_pwgood; + + rv = onlp_file_read_int(&psu_pwgood, "%spsu_pg", sys_psu_prefix); + if (rv < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + *pw_good = (psu_pwgood ? 1 : 0); + return ONLP_STATUS_OK; +} + +int +system_led_set(onlp_led_mode_t mode) +{ + int rc; + if(mode == ONLP_LED_MODE_GREEN) { + rc = onlp_i2c_modifyb(I2C_BUS_SYS_LED, LED_REG, LED_OFFSET, LED_SYS_AND_MASK, + LED_SYS_GMASK, ONLP_I2C_F_FORCE); + } + else if(mode == ONLP_LED_MODE_ORANGE) { + rc = onlp_i2c_modifyb(I2C_BUS_SYS_LED, LED_REG, LED_OFFSET, LED_SYS_AND_MASK, + LED_SYS_YMASK, ONLP_I2C_F_FORCE); + } else { + return ONLP_STATUS_E_INTERNAL; + } + + if (rc < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + return ONLP_STATUS_OK; +} + +int +fan_led_set(onlp_led_mode_t mode) +{ + int rc; + + if(mode == ONLP_LED_MODE_GREEN) { + rc = onlp_i2c_modifyb(I2C_BUS_SYS_LED, LED_REG, LED_OFFSET, LED_FAN_AND_MASK, + LED_FAN_GMASK, ONLP_I2C_F_FORCE); + } + else if(mode == ONLP_LED_MODE_ORANGE) { + rc = onlp_i2c_modifyb(I2C_BUS_SYS_LED, LED_REG, LED_OFFSET, LED_FAN_AND_MASK, + LED_FAN_YMASK, ONLP_I2C_F_FORCE); + } + else if(mode == ONLP_LED_MODE_OFF) { + rc = onlp_i2c_modifyb(I2C_BUS_SYS_LED, LED_REG, LED_OFFSET, LED_FAN_AND_MASK, + LED_FAN_OFFMASK, ONLP_I2C_F_FORCE); + } else { + return ONLP_STATUS_E_INTERNAL; + } + + if (rc < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + return ONLP_STATUS_OK; +} + +int +psu1_led_set(onlp_led_mode_t mode) +{ + int rc; + if(mode == ONLP_LED_MODE_GREEN) { + rc = onlp_i2c_modifyb(I2C_BUS_SYS_LED, LED_REG, LED_OFFSET, + LED_PSU1_AND_MASK, LED_PSU1_GMASK, + ONLP_I2C_F_FORCE); + } else if(mode == ONLP_LED_MODE_ORANGE) { + rc = onlp_i2c_modifyb(I2C_BUS_SYS_LED, LED_REG, LED_OFFSET, + LED_PSU1_AND_MASK, LED_PSU1_YMASK, + ONLP_I2C_F_FORCE); + } else if(mode == ONLP_LED_MODE_OFF) { + rc = onlp_i2c_modifyb(I2C_BUS_SYS_LED, LED_REG, LED_OFFSET, + LED_PSU1_AND_MASK, LED_PSU1_OFFMASK, + ONLP_I2C_F_FORCE); + } else { + return ONLP_STATUS_E_INTERNAL; + } + + if (rc < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + return ONLP_STATUS_OK; +} + +int +psu2_led_set(onlp_led_mode_t mode) +{ + int rc; + if(mode == ONLP_LED_MODE_GREEN) { + rc = onlp_i2c_modifyb(I2C_BUS_SYS_LED, LED_REG, LED_OFFSET, + LED_PSU2_AND_MASK, LED_PSU2_GMASK, + ONLP_I2C_F_FORCE); + } else if(mode == ONLP_LED_MODE_ORANGE) { + rc = onlp_i2c_modifyb(I2C_BUS_SYS_LED, LED_REG, LED_OFFSET, + LED_PSU2_AND_MASK, LED_PSU2_YMASK, + ONLP_I2C_F_FORCE); + } else if(mode == ONLP_LED_MODE_OFF) { + rc = onlp_i2c_modifyb(I2C_BUS_SYS_LED, LED_REG, LED_OFFSET, + LED_PSU2_AND_MASK, LED_PSU2_OFFMASK, + ONLP_I2C_F_FORCE); + } else { + return ONLP_STATUS_E_INTERNAL; + } + + + if (rc < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + return ONLP_STATUS_OK; +} + +int +fan_tray_led_set(onlp_oid_t id, onlp_led_mode_t mode) +{ + int rc, temp_id; + int fan_tray_id, offset; + + temp_id = ONLP_OID_ID_GET(id); + switch (temp_id) { + case 5: + fan_tray_id = 1; + offset = 2; + break; + case 6: + fan_tray_id = 2; + offset = 2; + break; + case 7: + fan_tray_id = 3; + offset = 3; + break; + case 8: + fan_tray_id = 4; + offset = 3; + break; + default: + return ONLP_STATUS_E_INTERNAL; + break; + } + if (fan_tray_id == 1 || fan_tray_id == 3) { + if (mode == ONLP_LED_MODE_GREEN) { + rc = onlp_i2c_modifyb(I2C_BUS_FANTRAY_LED, FAN_GPIO_ADDR, offset, 0xFC, + 0x01, ONLP_I2C_F_FORCE); + } else if (mode == ONLP_LED_MODE_ORANGE) { + rc = onlp_i2c_modifyb(I2C_BUS_FANTRAY_LED, FAN_GPIO_ADDR, offset, 0xFC, + 0x02, ONLP_I2C_F_FORCE); + } + } else if (fan_tray_id == 2 || fan_tray_id == 4) { + if (mode == ONLP_LED_MODE_GREEN) { + rc = onlp_i2c_modifyb(I2C_BUS_FANTRAY_LED, FAN_GPIO_ADDR, offset, 0xCF, + 0x10, ONLP_I2C_F_FORCE); + } else if (mode == ONLP_LED_MODE_ORANGE) { + rc = onlp_i2c_modifyb(I2C_BUS_FANTRAY_LED, FAN_GPIO_ADDR, offset, 0xCF, + 0x20, ONLP_I2C_F_FORCE); + } + } + if (rc < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + return ONLP_STATUS_OK; +} + +int +sysi_platform_info_get(onlp_platform_info_t* pi) +{ + int cpld_release, cpld_version, cpld_rev; + int bus_i; + + for(bus_i=I2C_BUS_CPLD1; bus_i <= I2C_BUS_CPLD5; ++bus_i) { + cpld_rev = onlp_i2c_readb(bus_i, CPLD_REG, CPLD_VER_OFFSET, ONLP_I2C_F_FORCE); + if (cpld_rev < 0) { + return ONLP_STATUS_E_INTERNAL; + } + + cpld_release = (((cpld_rev) >> 6 & 0x01)); + cpld_version = (((cpld_rev) & 0x3F)); + + pi->cpld_versions = aim_fstrdup( + "CPLD is %d version(0:RD 1:Release), Revision is 0x%02x\n", + cpld_release, cpld_version); + } + return ONLP_STATUS_OK; +} diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/platform_lib.h b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/platform_lib.h new file mode 100755 index 00000000..ed129e09 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/platform_lib.h @@ -0,0 +1,311 @@ +/************************************************************ + * + * + * Copyright 2014 Big Switch Networks, Inc. + * Copyright 2013 Accton Technology Corporation. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ +#ifndef __PLATFORM_LIB_H__ +#define __PLATFORM_LIB_H__ + +#include +#include +#include +#include +#include +#include "x86_64_ingrasys_s9280_64x_int.h" +#include "x86_64_ingrasys_s9280_64x_log.h" + +#include +#define SYS_CPU_TEMP_PREFIX "/sys/class/hwmon/hwmon0/" +#define SYS_REAR_PANEL_TEMP_PREFIX "/sys/class/hwmon/hwmon2/" +#define SYS_REAR_MAC_TEMP_PREFIX "/sys/class/hwmon/hwmon3/" +#define SYS_MB_ASIC_TEMP_PREFIX "/sys/class/hwmon/hwmon4/" +#define SYS_FRONT_PANEL_PREFIX "/sys/class/hwmon/hwmon5/" +#define SYS_FRONT_MAC_PREFIX "/sys/class/hwmon/hwmon6/" +#define SYS_BMC_BOARD_PREFIX "/sys/class/hwmon/hwmon7/" +#define SYS_CPU_BOARD_PREFIX "/sys/class/hwmon/hwmon8/" +#define SYS_FAN_PREFIX "/sys/class/hwmon/hwmon1/device/" +#define SYS_PSU1_PREFIX "/sys/bus/i2c/devices/i2c-18/18-0050/" +#define SYS_PSU2_PREFIX "/sys/bus/i2c/devices/i2c-17/17-0050/" +#define SYS_EEPROM_PATH "/sys/bus/i2c/devices/0-0051/eeprom" +#define PSU1_EEPROM_PATH "/sys/bus/i2c/devices/18-0050/psu_eeprom" +#define PSU2_EEPROM_PATH "/sys/bus/i2c/devices/17-0050/psu_eeprom" +#define PSU_STATUS_PRESENT 1 +#define PSU_STATUS_POWER_GOOD 1 +#define FAN_PRESENT 0 +#define FAN_CTRL_SET1 1 +#define FAN_CTRL_SET2 2 +#define BOARD_THERMAL_NUM 6 +#define SYS_FAN_NUM 4 +#define PORT_NUM 66 + +#define THERMAL_NUM 16 +#define LED_NUM 4 +#define FAN_NUM 6 + + + +#define THERMAL_SHUTDOWN_DEFAULT 105000 + +#define THERMAL_ERROR_DEFAULT 95000 +#define THERMAL_ERROR_FAN_PERC 100 + +#define THERMAL_WARNING_DEFAULT 77000 +#define THERMAL_WARNING_FAN_PERC 80 + +#define THERMAL_NORMAL_DEFAULT 72000 +#define THERMAL_NORMAL_FAN_PERC 50 + +/* I2C bus */ +#define I2C_BUS_0 0 +#define I2C_BUS_1 1 +#define I2C_BUS_2 2 +#define I2C_BUS_3 3 +#define I2C_BUS_4 4 +#define I2C_BUS_5 5 +#define I2C_BUS_10 10 /* SYS_LED */ +#define I2C_BUS_17 (17) /* PSU2 */ +#define I2C_BUS_18 (18) /* PSU1 */ + +#define I2C_BUS_PSU1 I2C_BUS_18 /* PSU1 */ +#define I2C_BUS_PSU2 I2C_BUS_17 /* PSU2 */ +#define I2C_BUS_SYS_LED I2C_BUS_10 /* SYS LED */ +#define I2C_BUS_FANTRAY_LED I2C_BUS_0 /* FANTRAY LED */ +#define I2C_BUS_CPLD1 I2C_BUS_1 /* CPLD 1 */ +#define I2C_BUS_CPLD2 I2C_BUS_2 /* CPLD 2 */ +#define I2C_BUS_CPLD3 I2C_BUS_3 /* CPLD 3 */ +#define I2C_BUS_CPLD4 I2C_BUS_4 /* CPLD 4 */ +#define I2C_BUS_CPLD5 I2C_BUS_5 /* CPLD 5 */ + +/* PSU */ +#define PSU_MUX_MASK 0x01 + +#define PSU_THERMAL1_OFFSET 0x8D +#define PSU_THERMAL2_OFFSET 0x8E +#define PSU_THERMAL_REG 0x58 +#define PSU_FAN_RPM_REG 0x58 +#define PSU_FAN_RPM_OFFSET 0x90 +#define PSU_REG 0x58 +#define PSU_VOUT_OFFSET1 0x20 +#define PSU_VOUT_OFFSET2 0x8B +#define PSU_IOUT_OFFSET 0x8C +#define PSU_POUT_OFFSET 0x96 +#define PSU_PIN_OFFSET 0x97 + +#define PSU_STATE_REG 0x25 +#define PSU1_PRESENT_OFFSET 0x04 +#define PSU2_PRESENT_OFFSET 0x01 +#define PSU1_PWGOOD_OFFSET 0x03 +#define PSU2_PWGOOD_OFFSET 0x00 + +/* LED */ +#define LED_REG 0x76 +#define LED_OFFSET 0x02 +#define LED_PWOK_OFFSET 0x03 + +#define LED_SYS_AND_MASK 0x7F +#define LED_SYS_GMASK 0x80 +#define LED_SYS_YMASK 0x00 + +#define LED_FAN_AND_MASK 0x9F +#define LED_FAN_GMASK 0x40 +#define LED_FAN_YMASK 0x60 +#define LED_FAN_OFFMASK 0x00 + +#define LED_PSU2_AND_MASK 0xF9 +#define LED_PSU2_GMASK 0x04 +#define LED_PSU2_YMASK 0x06 +#define LED_PSU2_OFFMASK 0x00 + +#define LED_PSU1_AND_MASK 0xE7 +#define LED_PSU1_GMASK 0x10 +#define LED_PSU1_YMASK 0x18 +#define LED_PSU1_OFFMASK 0x00 + +#define LED_SYS_ON_MASK 0x00 +#define LED_SYS_OFF_MASK 0x33 + +/* SYS */ +#define CPLD_REG 0x33 +#define CPLD_VER_OFFSET 0x01 + +/* QSFP */ +#define QSFP_PRES_REG1 0x20 +#define QSFP_PRES_REG2 0x21 +#define QSFP_PRES_OFFSET1 0x00 +#define QSFP_PRES_OFFSET2 0x01 + +/* FANTRAY */ +#define FAN_GPIO_ADDR 0x20 +#define FAN_1_PRESENT_MASK 0x04 +#define FAN_2_PRESENT_MASK 0x40 +#define FAN_3_PRESENT_MASK 0x04 +#define FAN_4_PRESENT_MASK 0x40 + +/* CPLD */ +//#define CPLDx_I2C_ADDR 0x21 +#define QSFP_EEPROM_I2C_ADDR 0x50 +#define CPLD1_PORTS 12 +#define CPLDx_PORTS 13 +#define CPLD_OFFSET 1 +#define CPLD_PRES_BIT 1 +#define CPLD_SFP1_PRES_BIT 1 +#define CPLD_SFP2_PRES_BIT 4 +#define CPLD_QSFP_REG_PATH "/sys/bus/i2c/devices/%d-00%02x/%s_%d" +#define CPLD_SFP_REG_PATH "/sys/bus/i2c/devices/%d-00%02x/%s" +#define CPLD_QSFP_PORT_STATUS_KEY "cpld_qsfp_port_status" +#define CPLD_SFP_PORT_STATUS_KEY "cpld_sfp_port_status" + + +/** led_oid */ +typedef enum led_oid_e { + LED_OID_SYSTEM = ONLP_LED_ID_CREATE(1), + LED_OID_FAN = ONLP_LED_ID_CREATE(2), + LED_OID_PSU1 = ONLP_LED_ID_CREATE(3), + LED_OID_PSU2 = ONLP_LED_ID_CREATE(4), + LED_OID_FAN_TRAY1 = ONLP_LED_ID_CREATE(5), + LED_OID_FAN_TRAY2 = ONLP_LED_ID_CREATE(6), + LED_OID_FAN_TRAY3 = ONLP_LED_ID_CREATE(7), + LED_OID_FAN_TRAY4 = ONLP_LED_ID_CREATE(8), +} led_oid_t; + +/** led_id */ +typedef enum led_id_e { + LED_SYSTEM_LED = 1, + LED_FAN_LED = 2, + LED_PSU1_LED = 3, + LED_PSU2_LED = 4, + LED_FAN_TRAY1 = 5, + LED_FAN_TRAY2 = 6, + LED_FAN_TRAY3 = 7, + LED_FAN_TRAY4 = 8, +} led_id_t; + +/** Thermal_oid */ +typedef enum thermal_oid_e { + THERMAL_OID_CPU1 = ONLP_THERMAL_ID_CREATE(1), + THERMAL_OID_CPU2 = ONLP_THERMAL_ID_CREATE(2), + THERMAL_OID_CPU3 = ONLP_THERMAL_ID_CREATE(3), + THERMAL_OID_CPU4 = ONLP_THERMAL_ID_CREATE(4), + THERMAL_OID_REAR_PANEL = ONLP_THERMAL_ID_CREATE(5), + THERMAL_OID_REAR_MAC = ONLP_THERMAL_ID_CREATE(6), + THERMAL_OID_MB = ONLP_THERMAL_ID_CREATE(7), + THERMAL_OID_ASIC = ONLP_THERMAL_ID_CREATE(8), + THERMAL_OID_FRONT_PANEL = ONLP_THERMAL_ID_CREATE(9), + THERMAL_OID_FRONT_MAC = ONLP_THERMAL_ID_CREATE(10), + THERMAL_OID_BMC_BOARD = ONLP_THERMAL_ID_CREATE(11), + THERMAL_OID_CPU_BOARD = ONLP_THERMAL_ID_CREATE(12), + THERMAL_OID_PSU1_1 = ONLP_THERMAL_ID_CREATE(13), + THERMAL_OID_PSU1_2 = ONLP_THERMAL_ID_CREATE(14), + THERMAL_OID_PSU2_1 = ONLP_THERMAL_ID_CREATE(15), + THERMAL_OID_PSU2_2 = ONLP_THERMAL_ID_CREATE(16), +} thermal_oid_t; + +/** thermal_id */ +typedef enum thermal_id_e { + THERMAL_ID_CPU1 = 1, + THERMAL_ID_CPU2 = 2, + THERMAL_ID_CPU3 = 3, + THERMAL_ID_CPU4 = 4, + THERMAL_ID_REAR_PANEL = 5, + THERMAL_ID_REAR_MAC = 6, + THERMAL_ID_MB = 7, + THERMAL_ID_ASIC = 8, + THERMAL_ID_FRONT_PANEL = 9, + THERMAL_ID_FRONT_MAC = 10, + THERMAL_ID_BMC_BOARD = 11, + THERMAL_ID_CPU_BOARD = 12, + THERMAL_ID_PSU1_1 = 13, + THERMAL_ID_PSU1_2 = 14, + THERMAL_ID_PSU2_1 = 15, + THERMAL_ID_PSU2_2 = 16, +} thermal_id_t; + +/* Shortcut for CPU thermal threshold value. */ +#define THERMAL_THRESHOLD_INIT_DEFAULTS \ + { THERMAL_WARNING_DEFAULT, \ + THERMAL_ERROR_DEFAULT, \ + THERMAL_SHUTDOWN_DEFAULT } + +/** Fan_oid */ +typedef enum fan_oid_e { + FAN_OID_FAN1 = ONLP_FAN_ID_CREATE(1), + FAN_OID_FAN2 = ONLP_FAN_ID_CREATE(2), + FAN_OID_FAN3 = ONLP_FAN_ID_CREATE(3), + FAN_OID_FAN4 = ONLP_FAN_ID_CREATE(4), + FAN_OID_PSU_FAN1 = ONLP_FAN_ID_CREATE(5), + FAN_OID_PSU_FAN2 = ONLP_FAN_ID_CREATE(6) +} fan_oid_t; + +/** fan_id */ +typedef enum fan_id_e { + FAN_ID_FAN1 = 1, + FAN_ID_FAN2 = 2, + FAN_ID_FAN3 = 3, + FAN_ID_FAN4 = 4, + FAN_ID_PSU_FAN1 = 5, + FAN_ID_PSU_FAN2 = 6 +} fan_id_t; + +/** led_oid */ +typedef enum psu_oid_e { + PSU_OID_PSU1 = ONLP_PSU_ID_CREATE(1), + PSU_OID_PSU2 = ONLP_PSU_ID_CREATE(2) +} psu_oid_t; + +/** fan_id */ +typedef enum psu_id_e { + PSU_ID_PSU1 = 1, + PSU_ID_PSU2 = 2 +} psu_id_t; + +int psu_thermal_get(onlp_thermal_info_t* info, int id); + +int psu_fan_info_get(onlp_fan_info_t* info, int id); + +int psu_vout_get(onlp_psu_info_t* info, int i2c_bus); + +int psu_iout_get(onlp_psu_info_t* info, int i2c_bus); + +int psu_pout_get(onlp_psu_info_t* info, int i2c_bus); + +int psu_pin_get(onlp_psu_info_t* info, int i2c_bus); + +int psu_eeprom_get(onlp_psu_info_t* info, int id); + +int psu_present_get(int *pw_exist, char *sys_psu_prefix); + +int psu_pwgood_get(int *pw_good, char *sys_psu_prefix); + +int psu2_led_set(onlp_led_mode_t mode); + +int psu1_led_set(onlp_led_mode_t mode); + +int fan_led_set(onlp_led_mode_t mode); + +int system_led_set(onlp_led_mode_t mode); + +int fan_tray_led_set(onlp_oid_t id, onlp_led_mode_t mode); + +int sysi_platform_info_get(onlp_platform_info_t* pi); + +#endif /* __PLATFORM_LIB_H__ */ diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/psui.c b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/psui.c new file mode 100755 index 00000000..a8f385fb --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/psui.c @@ -0,0 +1,156 @@ +/************************************************************ + * + * + * Copyright 2014, 2015 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ +#include +#include + +#include "platform_lib.h" + +static onlp_psu_info_t pinfo[] = +{ + { }, /* Not used */ + { + { + PSU_OID_PSU1, + "PSU-1", + 0, + { + FAN_OID_PSU_FAN1, + }, + } + }, + { + { + PSU_OID_PSU2, + "PSU-2", + 0, + { + FAN_OID_PSU_FAN2, + }, + } + } +}; + +int +onlp_psui_init(void) +{ + return ONLP_STATUS_OK; +} + +int +psu_status_info_get(int id, onlp_psu_info_t *info) +{ + int pw_exist, pw_good; + int rc, i2c_bus; + char *sys_psu_prefix = NULL; + + if (id == PSU_ID_PSU1) { + i2c_bus = I2C_BUS_PSU1; + sys_psu_prefix = SYS_PSU1_PREFIX; + } else if (id == PSU_ID_PSU2) { + i2c_bus = I2C_BUS_PSU2; + sys_psu_prefix = SYS_PSU2_PREFIX; + } else { + return ONLP_STATUS_E_INTERNAL; + } + + /* Get power present status */ + if ((rc = psu_present_get(&pw_exist, sys_psu_prefix)) + != ONLP_STATUS_OK) { + return ONLP_STATUS_E_INTERNAL; + } + + if (pw_exist != PSU_STATUS_PRESENT) { + info->status &= ~ONLP_PSU_STATUS_PRESENT; + info->status |= ONLP_PSU_STATUS_FAILED; + return ONLP_STATUS_OK; + } + info->status |= ONLP_PSU_STATUS_PRESENT; + + /* Get power good status */ + if ((rc = psu_pwgood_get(&pw_good, sys_psu_prefix)) + != ONLP_STATUS_OK) { + return ONLP_STATUS_E_INTERNAL; + } + + if (pw_good != PSU_STATUS_POWER_GOOD) { + info->status |= ONLP_PSU_STATUS_UNPLUGGED; + return ONLP_STATUS_OK; + } else { + info->status &= ~ONLP_PSU_STATUS_UNPLUGGED; + } + + /* Get power eeprom status */ + if ((rc = psu_eeprom_get(info, id)) != ONLP_STATUS_OK) { + return ONLP_STATUS_E_INTERNAL; + } + + /* Get power iout status */ + if ((rc = psu_iout_get(info, i2c_bus)) != ONLP_STATUS_OK) { + return ONLP_STATUS_E_INTERNAL; + } + + /* Get power pout status */ + if ((rc = psu_pout_get(info, i2c_bus)) != ONLP_STATUS_OK) { + return ONLP_STATUS_E_INTERNAL; + } + + /* Get power pin status */ + if ((rc = psu_pin_get(info, i2c_bus)) != ONLP_STATUS_OK) { + return ONLP_STATUS_E_INTERNAL; + } + + /* Get power vout status */ + if ((rc = psu_vout_get(info, i2c_bus)) != ONLP_STATUS_OK) { + return ONLP_STATUS_E_INTERNAL; + } + + return ONLP_STATUS_OK; +} + +int +onlp_psui_info_get(onlp_oid_t id, onlp_psu_info_t* info) +{ + int pid; + + pid = ONLP_OID_ID_GET(id); + memset(info, 0, sizeof(onlp_psu_info_t)); + + /* Set the onlp_oid_hdr_t */ + *info = pinfo[pid]; + + switch (pid) { + case PSU_ID_PSU1: + case PSU_ID_PSU2: + return psu_status_info_get(pid, info); + break; + default: + return ONLP_STATUS_E_UNSUPPORTED; + break; + } + + return ONLP_STATUS_OK; + + +} diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/sfpi.c b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/sfpi.c new file mode 100755 index 00000000..59a42fa1 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/sfpi.c @@ -0,0 +1,190 @@ +/************************************************************ + * + * + * Copyright 2014, 2015 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + ***********************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include "x86_64_ingrasys_s9280_64x_log.h" +#include "platform_lib.h" + +static int _fp2phy_port_mapping[64] = { + 0, 1, 4, 5, 8, + 9, 12, 13, 16, 17, + 20, 21, 24, 25, 28, + 29, 32, 33, 36, 37, + 40, 41, 44, 45, 48, + 49, 52, 53, 56, 57, + 60, 61, 2, 3, 6, + 7, 10, 11, 14, 15, + 18, 19, 22, 23, 26, + 27, 30, 31, 34, 35, + 38, 39, 42, 43, 46, + 47, 50, 51, 54, 55, + 58, 59, 62, 63}; + +static void +qsfp_to_cpld_index(int phy_port, int *cpld_id, int *cpld_port_index) +{ + if (phy_port < CPLD1_PORTS) { + *cpld_id = 0; + *cpld_port_index = phy_port + 1; + } else { + *cpld_id = 1 + (phy_port - CPLD1_PORTS) / CPLDx_PORTS; + *cpld_port_index = ((phy_port - CPLD1_PORTS) % CPLDx_PORTS) + 1; + } + return; +} + + +int +onlp_sfpi_init(void) +{ + return ONLP_STATUS_OK; +} + +int +onlp_sfpi_bitmap_get(onlp_sfp_bitmap_t* bmap) +{ + int p; + for(p = 1; p <= PORT_NUM; p++) { + AIM_BITMAP_SET(bmap, p); + } + return ONLP_STATUS_OK; +} + +int +onlp_sfpi_is_present(int port) +{ + int status, phy_port; + int i2c_id, cpld_id, cpld_port_index; + char reg_path[128]; + int value, mask; + uint8_t data[8]; + int data_len; + + if (port >= 1 && port <=64) { + phy_port = _fp2phy_port_mapping[port-1]; + qsfp_to_cpld_index(phy_port, &cpld_id, &cpld_port_index); + + i2c_id = CPLD_OFFSET + cpld_id; + mask = 1 << CPLD_PRES_BIT; + + snprintf(reg_path, 128, CPLD_QSFP_REG_PATH, i2c_id, CPLD_REG, CPLD_QSFP_PORT_STATUS_KEY, cpld_port_index); + } else if (port>= 65 && port <= 66) { + cpld_port_index = 0; + i2c_id = CPLD_OFFSET; + + if (port == 65) { + mask = 1 << CPLD_SFP1_PRES_BIT; + } else { + mask = 1 << CPLD_SFP2_PRES_BIT; + } + + snprintf(reg_path, 128, CPLD_SFP_REG_PATH, i2c_id, CPLD_REG, CPLD_SFP_PORT_STATUS_KEY); + } else { + return ONLP_STATUS_E_INTERNAL; + } + + if (onlp_file_read(data, sizeof(data), &data_len, reg_path) == ONLP_STATUS_OK) { + //convert hex string to integer + value = (int) strtol ((char *) data, NULL, 16); + + if ( (value & mask) == 0) { + status = 1; + } else { + status = 0; + } + } else { + return ONLP_STATUS_E_INTERNAL; + } + + return status; +} + + +int +onlp_sfpi_presence_bitmap_get(onlp_sfp_bitmap_t* dst) +{ + int p = 1; + int rc = 0; + + for (p = 1; p <= PORT_NUM; p++) { + rc = onlp_sfpi_is_present(p); + AIM_BITMAP_MOD(dst, p, (1 == rc) ? 1 : 0); + } + + return ONLP_STATUS_OK; +} + +/* + * This function reads the SFPs idrom and returns in + * in the data buffer provided. + */ +int +onlp_sfpi_eeprom_read(int port, uint8_t data[256]) +{ + int eeprombus=0, eeprombusbase=0, phy_port=0, port_group=0, eeprombusshift=0; + char eeprom_path[512], eeprom_addr[32]; + memset(eeprom_path, 0, sizeof(eeprom_path)); + memset(eeprom_addr, 0, sizeof(eeprom_addr)); + strncpy(eeprom_addr, "0050", sizeof(eeprom_addr)); + + memset(data, 0, 256); + + if (port >=1 && port <= 64) { + phy_port = _fp2phy_port_mapping[port-1] + 1; + port_group = (phy_port-1)/8; + eeprombusbase = 41 + (port_group * 8); + eeprombusshift = (phy_port-1)%8; + eeprombus = eeprombusbase + eeprombusshift; + } else if (port == 65 ){ + eeprombus = 29; + } else if (port == 66 ){ + eeprombus = 30; + } else { + return ONLP_STATUS_E_INTERNAL; + } + + snprintf(eeprom_path, sizeof(eeprom_path), + "/sys/bus/i2c/devices/%d-%s/eeprom", eeprombus, eeprom_addr); + + if (onlplib_sfp_eeprom_read_file(eeprom_path, data) != 0) { + return ONLP_STATUS_E_INTERNAL; + } + + return ONLP_STATUS_OK; +} + +/* + * De-initialize the SFPI subsystem. + */ +int +onlp_sfpi_denit(void) +{ + return ONLP_STATUS_OK; +} diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/sysi.c b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/sysi.c new file mode 100755 index 00000000..0cd67513 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/sysi.c @@ -0,0 +1,340 @@ +/************************************************************ + * + * + * Copyright 2014, 2015 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "platform_lib.h" + +const char* +onlp_sysi_platform_get(void) +{ + return "x86-64-ingrasys-s9280-64x-r0"; +} + +int +onlp_sysi_init(void) +{ + return ONLP_STATUS_OK; +} + +int +onlp_sysi_onie_data_get(uint8_t** data, int* size) +{ + uint8_t* rdata = aim_zmalloc(256); + if(onlp_file_read(rdata, 256, size, SYS_EEPROM_PATH) == ONLP_STATUS_OK) { + if(*size == 256) { + *data = rdata; + return ONLP_STATUS_OK; + } + } + + AIM_LOG_INFO("Unable to get data from eeprom \n"); + aim_free(rdata); + *size = 0; + return ONLP_STATUS_E_INTERNAL; +} + +int +onlp_sysi_oids_get(onlp_oid_t* table, int max) +{ + onlp_oid_t* e = table; + memset(table, 0, max*sizeof(onlp_oid_t)); + int i; + + /* 2 PSUs */ + *e++ = ONLP_PSU_ID_CREATE(1); + *e++ = ONLP_PSU_ID_CREATE(2); + + /* LEDs Item */ + for (i=1; i<=LED_NUM; i++) { + *e++ = ONLP_LED_ID_CREATE(i); + } + + /* THERMALs Item */ + for (i=1; i<=THERMAL_NUM; i++) { + *e++ = ONLP_THERMAL_ID_CREATE(i); + } + + /* Fans Item */ + for (i=1; i<=FAN_NUM; i++) { + *e++ = ONLP_FAN_ID_CREATE(i); + } + + return ONLP_STATUS_OK; +} + +int +decide_fan_percentage(int is_up, int new_temp) +{ + int new_perc; + if (is_up) { + if (new_temp >= THERMAL_ERROR_DEFAULT) { + new_perc = THERMAL_ERROR_FAN_PERC; + } else if (new_temp >= THERMAL_WARNING_DEFAULT) { + new_perc = THERMAL_WARNING_FAN_PERC; + } else { + new_perc = THERMAL_NORMAL_FAN_PERC; + } + } else { + if (new_temp <= THERMAL_NORMAL_DEFAULT) { + new_perc = THERMAL_NORMAL_FAN_PERC; + } else if (new_temp <= THERMAL_WARNING_DEFAULT) { + new_perc = THERMAL_WARNING_FAN_PERC; + } else { + new_perc = THERMAL_ERROR_FAN_PERC; + } + } + + return new_perc; +} + +int +platform_thermal_temp_get(int *thermal_temp) +{ + int i, temp, max_temp, rc; + onlp_thermal_info_t thermal_info; + memset(&thermal_info, 0, sizeof(thermal_info)); + uint32_t thermal_arr[] = { THERMAL_OID_FRONT_MAC, + THERMAL_OID_ASIC, + THERMAL_OID_CPU1, + THERMAL_OID_CPU2, + THERMAL_OID_CPU3, + THERMAL_OID_CPU4 }; + max_temp = 0; + + for (i=0; i max_temp) { + max_temp = temp; + } + } + *thermal_temp = max_temp; + + return ONLP_STATUS_OK; +} + +int +onlp_sysi_platform_manage_fans(void) +{ + int rc, is_up ,new_temp, thermal_temp, diff; + static int new_perc = 0, ori_perc = 0; + static int ori_temp = 0; + onlp_thermal_info_t thermal_info; + memset(&thermal_info, 0, sizeof(thermal_info)); + + /* get new temperature */ + if ((rc = platform_thermal_temp_get(&thermal_temp)) != ONLP_STATUS_OK) { + goto _EXIT; + } + + new_temp = thermal_temp; + diff = new_temp - ori_temp; + + if (diff == 0) { + goto _EXIT; + } else { + is_up = (diff > 0 ? 1 : 0); + } + + new_perc = decide_fan_percentage(is_up, new_temp); + + if (ori_perc == new_perc) { + goto _EXIT; + } + + + AIM_LOG_INFO("The Fan Speeds Percent are now at %d%%", new_perc); + + if ((rc = onlp_fani_percentage_set(THERMAL_OID_ASIC, new_perc)) != ONLP_STATUS_OK) { + goto _EXIT; + } + + /* update */ + ori_perc = new_perc; + ori_temp = new_temp; + +_EXIT : + return rc; +} + +int +onlp_sysi_platform_manage_leds(void) +{ + int psu1_status, psu2_status, rc, i; + static int pre_psu1_status = 0, pre_psu2_status = 0, pre_fan_status = 0; + + //------------------------------- + static int pre_fan_tray_status[4] = {0}; + int fan_tray_id, sum, total = 0; + onlp_led_status_t fan_tray_status[SYS_FAN_NUM]; + //------------------------------- + + onlp_psu_info_t psu_info; + onlp_fan_info_t fan_info; + + //-------- ----------------------- + memset(&fan_tray_status, 0, sizeof(fan_tray_status)); + //------------------------------- + + memset(&psu_info, 0, sizeof(onlp_psu_info_t)); + memset(&fan_info, 0, sizeof(onlp_fan_info_t)); + uint32_t fan_arr[] = { FAN_OID_FAN1, + FAN_OID_FAN2, + FAN_OID_FAN3, + FAN_OID_FAN4, }; + + /* PSU LED CTRL */ + if ((rc = onlp_psui_info_get(PSU_OID_PSU1, &psu_info)) != ONLP_STATUS_OK) { + goto _EXIT; + } + + psu1_status = psu_info.status; + if (psu1_status != pre_psu1_status) { + if((psu1_status & ONLP_PSU_STATUS_PRESENT) == 0) { + rc = onlp_ledi_mode_set(LED_OID_PSU1, ONLP_LED_MODE_OFF); + } + else if(psu1_status != ONLP_PSU_STATUS_PRESENT) { + rc = onlp_ledi_mode_set(LED_OID_PSU1, ONLP_LED_MODE_ORANGE); + } else { + rc = onlp_ledi_mode_set(LED_OID_PSU1, ONLP_LED_MODE_GREEN); + } + + if (rc != ONLP_STATUS_OK) { + goto _EXIT; + } + pre_psu1_status = psu1_status; + } + + if ((rc = onlp_psui_info_get(PSU_OID_PSU2, &psu_info)) != ONLP_STATUS_OK) { + goto _EXIT; + } + + psu2_status = psu_info.status; + if( psu2_status != pre_psu2_status) { + if((psu2_status & ONLP_PSU_STATUS_PRESENT) == 0) { + rc = onlp_ledi_mode_set(LED_OID_PSU2, ONLP_LED_MODE_OFF); + } + else if(psu2_status != ONLP_PSU_STATUS_PRESENT) { + rc = onlp_ledi_mode_set(LED_OID_PSU2, ONLP_LED_MODE_ORANGE); + } else { + rc = onlp_ledi_mode_set(LED_OID_PSU2, ONLP_LED_MODE_GREEN); + } + + if (rc != ONLP_STATUS_OK) { + goto _EXIT; + } + pre_psu2_status = psu2_status; + } + + /* FAN LED CTRL */ + for (i=0; i ONLP_LED_STATUS_FAILED) { + rc = onlp_ledi_mode_set(fan_tray_id, ONLP_LED_MODE_ORANGE); + + } else { + rc = onlp_ledi_mode_set(fan_tray_id, ONLP_LED_MODE_GREEN); + } + + if (rc != ONLP_STATUS_OK) { + goto _EXIT; + } + + pre_fan_tray_status[fan_tray_id - 5] = sum; + } + } + + if (total != pre_fan_status) { + if (total == (ONLP_LED_STATUS_PRESENT * 4)) { + rc = onlp_ledi_mode_set(LED_OID_FAN, ONLP_LED_MODE_GREEN); + } else { + rc = onlp_ledi_mode_set(LED_OID_FAN, ONLP_LED_MODE_ORANGE); + } + + if (rc != ONLP_STATUS_OK) { + goto _EXIT; + } + + pre_fan_status = total; + } + +_EXIT : + return rc; +} + +int +onlp_sysi_platform_info_get(onlp_platform_info_t* pi) +{ + int rc; + if ((rc = sysi_platform_info_get(pi)) != ONLP_STATUS_OK) { + return ONLP_STATUS_E_INTERNAL; + } + + return ONLP_STATUS_OK; +} + diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/thermali.c b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/thermali.c new file mode 100755 index 00000000..09bd1832 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/thermali.c @@ -0,0 +1,237 @@ +/************************************************************ + * + * + * Copyright 2014, 2015 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * Thermal Sensor Platform Implementation. + * + ***********************************************************/ +#include +#include +#include "x86_64_ingrasys_s9280_64x_log.h" +#include "platform_lib.h" + +static onlp_thermal_info_t thermal_info[] = { + { }, /* Not used */ + { { THERMAL_OID_CPU1, "CPU Thermal 1", 0}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_ALL, 0, THERMAL_THRESHOLD_INIT_DEFAULTS + }, + { { THERMAL_OID_CPU2, "CPU Thermal 2", 0}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_ALL, 0, THERMAL_THRESHOLD_INIT_DEFAULTS + }, + { { THERMAL_OID_CPU3, "CPU Thermal 3", 0}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_ALL, 0, THERMAL_THRESHOLD_INIT_DEFAULTS + }, + { { THERMAL_OID_CPU4, "CPU Thermal 4", 0}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_ALL, 0, THERMAL_THRESHOLD_INIT_DEFAULTS + }, + { { THERMAL_OID_REAR_PANEL, "Rear Panel", 0}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_ALL, 0, THERMAL_THRESHOLD_INIT_DEFAULTS + }, + { { THERMAL_OID_REAR_MAC, "Rear MAC", 0}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_ALL, 0, THERMAL_THRESHOLD_INIT_DEFAULTS + }, + { { THERMAL_OID_MB, "Mother Board", 0}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_ALL, 0, THERMAL_THRESHOLD_INIT_DEFAULTS + }, + { { THERMAL_OID_ASIC, "MAC Temp", 0}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_ALL, 0, THERMAL_THRESHOLD_INIT_DEFAULTS + }, + { { THERMAL_OID_FRONT_PANEL, "Front Panel", 0}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_ALL, 0, THERMAL_THRESHOLD_INIT_DEFAULTS + }, + { { THERMAL_OID_FRONT_MAC, "FRONT MAC", 0}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_ALL, 0, THERMAL_THRESHOLD_INIT_DEFAULTS + }, + { { THERMAL_OID_BMC_BOARD, "BMC Board", 0}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_GET_TEMPERATURE, 0 + }, + { { THERMAL_OID_CPU_BOARD, "CPU Board", 0}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_GET_TEMPERATURE, 0 + }, + { { THERMAL_OID_PSU1_1, "PSU-1 Thermal 1", PSU_OID_PSU1}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_GET_TEMPERATURE, 0 + }, + { { THERMAL_OID_PSU1_2, "PSU-1 Thermal 2", PSU_OID_PSU1}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_GET_TEMPERATURE, 0 + }, + { { THERMAL_OID_PSU2_1, "PSU-2 Thermal 1", PSU_OID_PSU2}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_GET_TEMPERATURE, 0 + }, + { { THERMAL_OID_PSU2_2, "PSU-2 Thermal 2", PSU_OID_PSU2}, + ONLP_THERMAL_STATUS_PRESENT, + ONLP_THERMAL_CAPS_GET_TEMPERATURE, 0 + } +}; + +/* + * This will be called to intiialize the thermali subsystem. + */ +int +onlp_thermali_init(void) +{ + return ONLP_STATUS_OK; +} + +static int +lm_thermal_info_get(onlp_thermal_info_t* info, int id) +{ + int rv; + char sysfs_path[64]; + + switch (id) { + case THERMAL_ID_REAR_PANEL: + snprintf(sysfs_path, sizeof(sysfs_path), SYS_REAR_PANEL_TEMP_PREFIX "temp%d_input", 1); + break; + case THERMAL_ID_REAR_MAC: + snprintf(sysfs_path, sizeof(sysfs_path), SYS_REAR_MAC_TEMP_PREFIX "temp%d_input", 1); + break; + case THERMAL_ID_MB: + snprintf(sysfs_path, sizeof(sysfs_path), SYS_MB_ASIC_TEMP_PREFIX "temp%d_input", 1); + break; + case THERMAL_ID_ASIC: + snprintf(sysfs_path, sizeof(sysfs_path), SYS_MB_ASIC_TEMP_PREFIX "temp%d_input", 2); + break; + case THERMAL_ID_FRONT_PANEL: + snprintf(sysfs_path, sizeof(sysfs_path), SYS_FRONT_PANEL_PREFIX "temp%d_input", 1); + break; + case THERMAL_ID_FRONT_MAC: + snprintf(sysfs_path, sizeof(sysfs_path), SYS_FRONT_MAC_PREFIX "temp%d_input", 1); + break; + case THERMAL_ID_BMC_BOARD: + snprintf(sysfs_path, sizeof(sysfs_path), SYS_BMC_BOARD_PREFIX "temp%d_input", 1); + break; + case THERMAL_ID_CPU_BOARD: + snprintf(sysfs_path, sizeof(sysfs_path), SYS_CPU_BOARD_PREFIX "temp%d_input", 1); + break; + } + + rv = onlp_file_read_int(&info->mcelsius, sysfs_path); + + if(rv == ONLP_STATUS_E_INTERNAL) { + return rv; + } + + if(rv == ONLP_STATUS_E_MISSING) { + info->status &= ~1; + return 0; + } + + return ONLP_STATUS_OK; +} + +static int +cpu_thermal_info_get(onlp_thermal_info_t* info, int id) +{ + int rv; + int offset; + + offset = 1; + id = id + offset; + rv = onlp_file_read_int(&info->mcelsius, + SYS_CPU_TEMP_PREFIX "temp%d_input", id); + + if(rv == ONLP_STATUS_E_INTERNAL) { + return rv; + } + + if(rv == ONLP_STATUS_E_MISSING) { + info->status &= ~1; + return 0; + } + + return ONLP_STATUS_OK; +} + +int +psu_thermal_info_get(onlp_thermal_info_t* info, int id) +{ + int rv; + + rv = psu_thermal_get(info, id); + if(rv == ONLP_STATUS_E_INTERNAL) { + return rv; + } + + return ONLP_STATUS_OK; +} + +/* + * Retrieve the information structure for the given thermal OID. + * + * If the OID is invalid, return ONLP_E_STATUS_INVALID. + * If an unexpected error occurs, return ONLP_E_STATUS_INTERNAL. + * Otherwise, return ONLP_STATUS_OK with the OID's information. + * + * Note -- it is expected that you fill out the information + * structure even if the sensor described by the OID is not present. + */ +int +onlp_thermali_info_get(onlp_oid_t id, onlp_thermal_info_t* info) +{ + int sensor_id, rc; + sensor_id = ONLP_OID_ID_GET(id); + + *info = thermal_info[sensor_id]; + info->caps |= ONLP_THERMAL_CAPS_GET_TEMPERATURE; + + switch (sensor_id) { + case THERMAL_ID_CPU1: + case THERMAL_ID_CPU2: + case THERMAL_ID_CPU3: + case THERMAL_ID_CPU4: + rc = cpu_thermal_info_get(info, sensor_id); + break; + case THERMAL_ID_REAR_PANEL: + case THERMAL_ID_REAR_MAC: + case THERMAL_ID_MB: + case THERMAL_ID_ASIC: + case THERMAL_ID_FRONT_PANEL: + case THERMAL_ID_FRONT_MAC: + case THERMAL_ID_BMC_BOARD: + case THERMAL_ID_CPU_BOARD: + rc = lm_thermal_info_get(info, sensor_id); + break; + case THERMAL_ID_PSU1_1: + case THERMAL_ID_PSU1_2: + case THERMAL_ID_PSU2_1: + case THERMAL_ID_PSU2_2: + rc = psu_thermal_info_get(info, sensor_id); + break; + default: + return ONLP_STATUS_E_INTERNAL; + break; + } + return rc; +} diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/x86_64_ingrasys_s9280_64x_config.c b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/x86_64_ingrasys_s9280_64x_config.c new file mode 100755 index 00000000..63a1f4bb --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/x86_64_ingrasys_s9280_64x_config.c @@ -0,0 +1,101 @@ +/************************************************************ + * + * + * Copyright 2014, 2015 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ + +#include + +/* */ +#define __x86_64_ingrasys_s9280_64x_config_STRINGIFY_NAME(_x) #_x +#define __x86_64_ingrasys_s9280_64x_config_STRINGIFY_VALUE(_x) __x86_64_ingrasys_s9280_64x_config_STRINGIFY_NAME(_x) +x86_64_ingrasys_s9280_64x_config_settings_t x86_64_ingrasys_s9280_64x_config_settings[] = +{ +#ifdef X86_64_INGRAYSYS_S9280_64X_CONFIG_INCLUDE_LOGGING + { __x86_64_ingrasys_s9280_64x_config_STRINGIFY_NAME(X86_64_INGRAYSYS_S9280_64X_CONFIG_INCLUDE_LOGGING), __x86_64_ingrasys_s9280_64x_config_STRINGIFY_VALUE(X86_64_INGRAYSYS_S9280_64X_CONFIG_INCLUDE_LOGGING) }, +#else +{ X86_64_INGRAYSYS_S9280_64X_CONFIG_INCLUDE_LOGGING(__x86_64_ingrasys_s9280_64x_config_STRINGIFY_NAME), "__undefined__" }, +#endif +#ifdef X86_64_INGRAYSYS_S9280_64X_CONFIG_LOG_OPTIONS_DEFAULT + { __x86_64_ingrasys_s9280_64x_config_STRINGIFY_NAME(X86_64_INGRAYSYS_S9280_64X_CONFIG_LOG_OPTIONS_DEFAULT), __x86_64_ingrasys_s9280_64x_config_STRINGIFY_VALUE(X86_64_INGRAYSYS_S9280_64X_CONFIG_LOG_OPTIONS_DEFAULT) }, +#else +{ X86_64_INGRAYSYS_S9280_64X_CONFIG_LOG_OPTIONS_DEFAULT(__x86_64_ingrasys_s9280_64x_config_STRINGIFY_NAME), "__undefined__" }, +#endif +#ifdef X86_64_INGRAYSYS_S9280_64X_CONFIG_LOG_BITS_DEFAULT + { __x86_64_ingrasys_s9280_64x_config_STRINGIFY_NAME(X86_64_INGRAYSYS_S9280_64X_CONFIG_LOG_BITS_DEFAULT), __x86_64_ingrasys_s9280_64x_config_STRINGIFY_VALUE(X86_64_INGRAYSYS_S9280_64X_CONFIG_LOG_BITS_DEFAULT) }, +#else +{ X86_64_INGRAYSYS_S9280_64X_CONFIG_LOG_BITS_DEFAULT(__x86_64_ingrasys_s9280_64x_config_STRINGIFY_NAME), "__undefined__" }, +#endif +#ifdef X86_64_INGRAYSYS_S9280_64X_CONFIG_LOG_CUSTOM_BITS_DEFAULT + { __x86_64_ingrasys_s9280_64x_config_STRINGIFY_NAME(X86_64_INGRAYSYS_S9280_64X_CONFIG_LOG_CUSTOM_BITS_DEFAULT), __x86_64_ingrasys_s9280_64x_config_STRINGIFY_VALUE(X86_64_INGRAYSYS_S9280_64X_CONFIG_LOG_CUSTOM_BITS_DEFAULT) }, +#else +{ X86_64_INGRAYSYS_S9280_64X_CONFIG_LOG_CUSTOM_BITS_DEFAULT(__x86_64_ingrasys_s9280_64x_config_STRINGIFY_NAME), "__undefined__" }, +#endif +#ifdef X86_64_INGRAYSYS_S9280_64X_CONFIG_PORTING_STDLIB + { __x86_64_ingrasys_s9280_64x_config_STRINGIFY_NAME(X86_64_INGRAYSYS_S9280_64X_CONFIG_PORTING_STDLIB), __x86_64_ingrasys_s9280_64x_config_STRINGIFY_VALUE(X86_64_INGRAYSYS_S9280_64X_CONFIG_PORTING_STDLIB) }, +#else +{ X86_64_INGRAYSYS_S9280_64X_CONFIG_PORTING_STDLIB(__x86_64_ingrasys_s9280_64x_config_STRINGIFY_NAME), "__undefined__" }, +#endif +#ifdef X86_64_INGRAYSYS_S9280_64X_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS + { __x86_64_ingrasys_s9280_64x_config_STRINGIFY_NAME(X86_64_INGRAYSYS_S9280_64X_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS), __x86_64_ingrasys_s9280_64x_config_STRINGIFY_VALUE(X86_64_INGRAYSYS_S9280_64X_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS) }, +#else +{ X86_64_INGRAYSYS_S9280_64X_CONFIG_PORTING_INCLUDE_STDLIB_HEADERS(__x86_64_ingrasys_s9280_64x_config_STRINGIFY_NAME), "__undefined__" }, +#endif +#ifdef X86_64_INGRAYSYS_S9280_64X_CONFIG_INCLUDE_UCLI + { __x86_64_ingrasys_s9280_64x_config_STRINGIFY_NAME(X86_64_INGRAYSYS_S9280_64X_CONFIG_INCLUDE_UCLI), __x86_64_ingrasys_s9280_64x_config_STRINGIFY_VALUE(X86_64_INGRAYSYS_S9280_64X_CONFIG_INCLUDE_UCLI) }, +#else +{ X86_64_INGRAYSYS_S9280_64X_CONFIG_INCLUDE_UCLI(__x86_64_ingrasys_s9280_64x_config_STRINGIFY_NAME), "__undefined__" }, +#endif +#ifdef X86_64_INGRAYSYS_S9280_64X_CONFIG_SFP_COUNT + { __x86_64_ingrasys_s9280_64x_config_STRINGIFY_NAME(X86_64_INGRAYSYS_S9280_64X_CONFIG_SFP_COUNT), __x86_64_ingrasys_s9280_64x_config_STRINGIFY_VALUE(X86_64_INGRAYSYS_S9280_64X_CONFIG_SFP_COUNT) }, +#else +{ X86_64_INGRAYSYS_S9280_64X_CONFIG_SFP_COUNT(__x86_64_ingrasys_s9280_64x_config_STRINGIFY_NAME), "__undefined__" }, +#endif + { NULL, NULL } +}; +#undef __x86_64_ingrasys_s9280_64x_config_STRINGIFY_VALUE +#undef __x86_64_ingrasys_s9280_64x_config_STRINGIFY_NAME + +const char* +x86_64_ingrasys_s9280_64x_config_lookup(const char* setting) +{ + int i; + for(i = 0; x86_64_ingrasys_s9280_64x_config_settings[i].name; i++) { + if(strcmp(x86_64_ingrasys_s9280_64x_config_settings[i].name, setting)) { + return x86_64_ingrasys_s9280_64x_config_settings[i].value; + } + } + return NULL; +} + +int +x86_64_ingrasys_s9280_64x_config_show(struct aim_pvs_s* pvs) +{ + int i; + for(i = 0; x86_64_ingrasys_s9280_64x_config_settings[i].name; i++) { + aim_printf(pvs, "%s = %s\n", x86_64_ingrasys_s9280_64x_config_settings[i].name, x86_64_ingrasys_s9280_64x_config_settings[i].value); + } + return i; +} + +/* */ + diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/x86_64_ingrasys_s9280_64x_enums.c b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/x86_64_ingrasys_s9280_64x_enums.c new file mode 100755 index 00000000..ea89a96d --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/x86_64_ingrasys_s9280_64x_enums.c @@ -0,0 +1,30 @@ +/************************************************************ + * + * + * Copyright 2014, 2015 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ + +#include + +/* <--auto.start.enum(ALL).source> */ +/* */ + diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/x86_64_ingrasys_s9280_64x_int.h b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/x86_64_ingrasys_s9280_64x_int.h new file mode 100755 index 00000000..e5fc2620 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/x86_64_ingrasys_s9280_64x_int.h @@ -0,0 +1,29 @@ +/************************************************************ + * + * + * Copyright 2014, 2015 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ + +#ifndef __x86_64_ingrasys_s9280_64x_INT_H__ +#define __x86_64_ingrasys_s9280_64x_INT_H__ + +#endif /* __x86_64_ingrasys_s9280_64x_INT_H__ */ diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/x86_64_ingrasys_s9280_64x_log.c b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/x86_64_ingrasys_s9280_64x_log.c new file mode 100755 index 00000000..821626db --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/x86_64_ingrasys_s9280_64x_log.c @@ -0,0 +1,38 @@ +/************************************************************ + * + * + * Copyright 2014, 2015 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ + +#include + +#include "x86_64_ingrasys_s9280_64x_log.h" +/* + * x86_64_ingrasys_s9280_64x log struct. + */ +AIM_LOG_STRUCT_DEFINE( + X86_64_INGRAYSYS_S9280_64X_CONFIG_LOG_OPTIONS_DEFAULT, + X86_64_INGRAYSYS_S9280_64X_CONFIG_LOG_BITS_DEFAULT, + NULL, /* Custom log map */ + X86_64_INGRAYSYS_S9280_64X_CONFIG_LOG_CUSTOM_BITS_DEFAULT + ); + diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/x86_64_ingrasys_s9280_64x_log.h b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/x86_64_ingrasys_s9280_64x_log.h new file mode 100755 index 00000000..e3c9a78c --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/x86_64_ingrasys_s9280_64x_log.h @@ -0,0 +1,32 @@ +/************************************************************ + * + * + * Copyright 2014, 2015 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ + +#ifndef __x86_64_ingrasys_s9280_64x_LOG_H__ +#define __x86_64_ingrasys_s9280_64x_LOG_H__ + +#define AIM_LOG_MODULE_NAME x86_64_ingrasys_s9280_64x +#include + +#endif /* __x86_64_ingrasys_s9280_64x_LOG_H__ */ diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/x86_64_ingrasys_s9280_64x_module.c b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/x86_64_ingrasys_s9280_64x_module.c new file mode 100755 index 00000000..f5f9b814 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/x86_64_ingrasys_s9280_64x_module.c @@ -0,0 +1,44 @@ +/************************************************************ + * + * + * Copyright 2014, 2015 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ + +#include + +#include "x86_64_ingrasys_s9280_64x_log.h" + +static int +datatypes_init__(void) +{ +#define INGRAYSYS_S9180_32X_ENUMERATION_ENTRY(_enum_name, _desc) AIM_DATATYPE_MAP_REGISTER(_enum_name, _enum_name##_map, _desc, AIM_LOG_INTERNAL); +#include + return 0; +} + +void __x86_64_ingrasys_s9280_64x_module_init__(void) +{ + AIM_LOG_STRUCT_REGISTER(); + datatypes_init__(); +} + +int __onlp_platform_version__ = 1; diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/x86_64_ingrasys_s9280_64x_ucli.c b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/x86_64_ingrasys_s9280_64x_ucli.c new file mode 100755 index 00000000..21d7efd5 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/onlp/builds/src/x86_64_ingrasys_s9280_64x/module/src/x86_64_ingrasys_s9280_64x_ucli.c @@ -0,0 +1,82 @@ +/************************************************************ + * + * + * Copyright 2014, 2015 Big Switch Networks, Inc. + * + * Licensed under the Eclipse Public License, Version 1.0 (the + * "License"); you may not use this file except in compliance + * with the License. You may obtain a copy of the License at + * + * http://www.eclipse.org/legal/epl-v10.html + * + * Unless required by applicable law or agreed to in writing, + * software distributed under the License is distributed on an + * "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, + * either express or implied. See the License for the specific + * language governing permissions and limitations under the + * License. + * + * + ************************************************************ + * + * + * + ***********************************************************/ + +#include + +#if ONLPSIM_CONFIG_INCLUDE_UCLI == 1 + +#include +#include +#include + +static ucli_status_t +x86_64_ingrasys_s9280_64x_ucli_ucli__config__(ucli_context_t* uc) +{ + UCLI_HANDLER_MACRO_MODULE_CONFIG(x86_64_ingrasys_s9280_64x) +} + +/* */ +/****************************************************************************** + * + * These handler table(s) were autogenerated from the symbols in this + * source file. + * + *****************************************************************************/ +static ucli_command_handler_f x86_64_ingrasys_s9280_64x_ucli_ucli_handlers__[] = +{ + x86_64_ingrasys_s9280_64x_ucli_ucli__config__, + NULL +}; +/******************************************************************************/ +/* */ + +static ucli_module_t +x86_64_ingrasys_s9280_64x_ucli_module__ = + { + "x86_64_ingrasys_s9280_64x_ucli", + NULL, + x86_64_ingrasys_s9280_64x_ucli_ucli_handlers__, + NULL, + NULL, + }; + +ucli_node_t* +x86_64_ingrasys_s9280_64x_ucli_node_create(void) +{ + ucli_node_t* n; + ucli_module_init(&x86_64_ingrasys_s9280_64x_ucli_module__); + n = ucli_node_create("x86_64_ingrasys_s9280_64x", NULL, &x86_64_ingrasys_s9280_64x_ucli_module__); + ucli_node_subnode_add(n, ucli_module_log_node_create("x86_64_ingrasys_s9280_64x")); + return n; +} + +#else +void* +x86_64_ingrasys_s9280_64x_ucli_node_create(void) +{ + return NULL; +} +#endif + diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/platform-config/Makefile b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/platform-config/Makefile new file mode 100755 index 00000000..003238cf --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/platform-config/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk \ No newline at end of file diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/platform-config/r0/Makefile b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/platform-config/r0/Makefile new file mode 100755 index 00000000..003238cf --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/platform-config/r0/Makefile @@ -0,0 +1 @@ +include $(ONL)/make/pkg.mk \ No newline at end of file diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/platform-config/r0/PKG.yml b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/platform-config/r0/PKG.yml new file mode 100755 index 00000000..30eba878 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/platform-config/r0/PKG.yml @@ -0,0 +1 @@ +!include $ONL_TEMPLATES/platform-config-platform.yml ARCH=amd64 VENDOR=ingrasys BASENAME=x86-64-ingrasys-s9280-64x REVISION=r0 diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/platform-config/r0/src/lib/x86-64-ingrasys-s9280-64x-r0.yml b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/platform-config/r0/src/lib/x86-64-ingrasys-s9280-64x-r0.yml new file mode 100755 index 00000000..6d0c6c67 --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/platform-config/r0/src/lib/x86-64-ingrasys-s9280-64x-r0.yml @@ -0,0 +1,30 @@ +--- + +###################################################################### +# +# platform-config for x86-64-ingrasys-s9280-64x +# +###################################################################### + +x86-64-ingrasys-s9280-64x-r0: + + grub: + + serial: >- + --port=0x3f8 + --speed=115200 + --word=8 + --parity=no + --stop=1 + + kernel: + <<: *kernel-3-16 + + args: >- + console=ttyS0,115200n8 + + ##network + ## interfaces: + ## ma1: + ## name: ~ + ## syspath: pci0000:00/0000:00:03.0 diff --git a/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/platform-config/r0/src/python/x86_64_ingrasys_s9280_64x_r0/__init__.py b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/platform-config/r0/src/python/x86_64_ingrasys_s9280_64x_r0/__init__.py new file mode 100755 index 00000000..345fcbbd --- /dev/null +++ b/packages/platforms/ingrasys/x86-64/x86-64-ingrasys-s9280-64x/platform-config/r0/src/python/x86_64_ingrasys_s9280_64x_r0/__init__.py @@ -0,0 +1,192 @@ +from onl.platform.base import * +from onl.platform.ingrasys import * +import os + +class OnlPlatform_x86_64_ingrasys_s9280_64x_r0(OnlPlatformIngrasys): + PLATFORM='x86-64-ingrasys-s9280-64x-r0' + MODEL="S9280-64X" + SYS_OBJECT_ID=".9280.64" + + def baseconfig(self): + + # fp port to phy port mapping + fp2phy_array=( 0, 1, 4, 5, 8, 9, 12, 13, 16, 17, 20, 21, 24, 25, 28, 29, + 32, 33, 36, 37, 40, 41, 44, 45, 48, 49, 52, 53, 56, 57, 60, 61, + 2, 3, 6, 7, 10, 11, 14, 15, 18, 19, 22, 23, 26, 27, 30, 31, + 34, 35, 38, 39, 42, 43, 46, 47, 50, 51, 54, 55, 58, 59, 62, 63) + # fp port to led port mapping + fp2led_array=( 1, 2, 5, 6, 9, 10, 13, 14, 1, 2, 5, 6, 9, 10, 13, 14, + 1, 2, 5, 6, 9, 10, 13, 14, 1, 2, 5, 6, 9, 10, 13, 14, + 3, 4, 7, 8, 11, 12, 15, 16, 3, 4, 7, 8, 11, 12, 15, 16, + 3, 4, 7, 8, 11, 12, 15, 16, 3, 4, 7, 8, 11, 12, 15, 16) + + self.insmod("eeprom_mb") + # init SYS EEPROM devices + self.new_i2c_devices( + [ + # _i2c_mb_eeprom_init + ('mb_eeprom', 0x55, 0), + + # _i2c_cb_eeprom_init + ('mb_eeprom', 0x51, 0), + ] + ) + + os.system("modprobe w83795") + os.system("modprobe eeprom") + + ########### initialize I2C bus 0 ########### + # init PCA9548 + self.new_i2c_devices( + [ + ('pca9548', 0x70, 0), #pca9548_0 + ('pca9548', 0x73, 0), #pca9548_1 + ('pca9546', 0x72, 0), #pca9546_0 + ('pca9548', 0x71, 19), #pca9548_2 + ('pca9546', 0x71, 20), #pca9546_1 + ('pca9548', 0x75, 0), #pca9548_11 + ('pca9548', 0x74, 21), #pca9548_3 + ('pca9548', 0x74, 22), #pca9548_4 + ('pca9548', 0x74, 23), #pca9548_5 + ('pca9548', 0x74, 24), #pca9548_6 + ('pca9548', 0x74, 25), #pca9548_7 + ('pca9548', 0x74, 26), #pca9548_8 + ('pca9548', 0x74, 27), #pca9548_9 + ('pca9548', 0x74, 28), #pca9548_10 + ] + ) + # _i2c_hwm_init + os.system("i2cset -y -r 16 0x2f 0x00 0x80") + os.system("i2cset -y -r 16 0x2f 0x01 0x9C") + os.system("i2cset -y -r 16 0x2f 0x04 0x00") + os.system("i2cset -y -r 16 0x2f 0x06 0xFF") + os.system("i2cset -y -r 16 0x2f 0x07 0x00") + os.system("i2cset -y -r 16 0x2f 0x01 0x1C") + os.system("i2cset -y -r 16 0x2f 0x00 0x82") + os.system("i2cset -y -r 16 0x2f 0x0F 0x00") + os.system("i2cset -y -r 16 0x2f 0x18 0x84") + os.system("i2cset -y -r 16 0x2f 0x19 0x84") + + # _i2c_io_exp_init + # need to init BMC io expander first due to some io expander are reset default + # Init BMC INT & HW ID IO Expander + os.system("i2cset -y -r 0 0x24 6 0xFF") + os.system("i2cset -y -r 0 0x24 7 0xFF") + os.system("i2cset -y -r 0 0x24 4 0x00") + os.system("i2cset -y -r 0 0x24 5 0x00") + + # Init BMC PSU status IO Expander + os.system("i2cset -y -r 0 0x25 2 0x00") + os.system("i2cset -y -r 0 0x25 3 0x00") + os.system("i2cset -y -r 0 0x25 6 0xDB") + os.system("i2cset -y -r 0 0x25 7 0xE3") + os.system("i2cset -y -r 0 0x25 4 0x00") + os.system("i2cset -y -r 0 0x25 5 0x00") + + # Init BMC RST and SEL IO Expander + os.system("i2cset -y -r 0 0x26 2 0x3F") + os.system("i2cset -y -r 0 0x26 3 0x1F") + os.system("i2cset -y -r 0 0x26 6 0xD0") + os.system("i2cset -y -r 0 0x26 7 0x00") + os.system("i2cset -y -r 0 0x26 4 0x00") + os.system("i2cset -y -r 0 0x26 5 0x00") + + # Init System LED & HW ID IO Expander + os.system("i2cset -y -r 10 0x76 2 0x00") + os.system("i2cset -y -r 10 0x76 6 0x00") + os.system("i2cset -y -r 10 0x76 7 0xFF") + os.system("i2cset -y -r 10 0x76 4 0x00") + os.system("i2cset -y -r 10 0x76 5 0x00") + + # Init FAN Board Status IO Expander + os.system("i2cset -y -r 0 0x20 2 0x11") + os.system("i2cset -y -r 0 0x20 3 0x11") + os.system("i2cset -y -r 0 0x20 6 0xCC") + os.system("i2cset -y -r 0 0x20 7 0xCC") + os.system("i2cset -y -r 0 0x20 4 0x00") + os.system("i2cset -y -r 0 0x20 5 0x00") + + # Init System SEL and RST IO Expander + os.system("i2cset -y -r 32 0x76 2 0x04") + os.system("i2cset -y -r 32 0x76 3 0xDF") + os.system("i2cset -y -r 32 0x76 6 0x09") + os.system("i2cset -y -r 32 0x76 7 0x3F") + os.system("i2cset -y -r 32 0x76 4 0x00") + os.system("i2cset -y -r 32 0x76 5 0x00") + + # _i2c_sensors_init + + self.new_i2c_devices( + [ + # w83795, hwmon1 + ('w83795adg', 0x2F, 16), + + # lm75_1 Rear Panel, hwmon2 + ('lm75', 0x4D, 6), + + # lm75_2 Rear MAC, hwmon3 + ('lm75', 0x4E, 6), + + # lm86 , hwmon4 + ('lm86', 0x4C, 6), + + # lm75_3 Front Panel, hwmon5 + ('lm75', 0x4D, 7), + + # lm75_4 Front MAC, hwmon6 + ('lm75', 0x4E, 7), + + # tmp75 BMC board thermal, hwmon7 + ('lm75', 0x4A, 16), + + # tmp75 CPU board thermal, hwmon8 + ('tmp75', 0x4F, 0), + ] + ) + + # hwmon9 + #os.system("modprobe jc42") + + # _i2c_cpld_init + + self.insmod("ingrasys_s9280_64x_i2c_cpld") + + # add cpld 1~5 to sysfs + for i in range(1, 6): + self.new_i2c_device('ingrasys_cpld%d' % i, 0x33, i) + + # _i2c_psu_init + + self.insmod("ingrasys_s9280_64x_psu") + + # add psu 1~2 to sysfs + for i in range(1, 3): + self.new_i2c_device('psu%d' % i, 0x50, 19-i) + + # _i2c_qsfp_eeprom_init + for i in range(1, 65): + phy_port = fp2phy_array[i-1] + 1 + port_group = (phy_port-1)/8 + eeprom_busbase = 41 + (port_group * 8) + eeprom_busshift = (phy_port-1)%8 + eeprom_bus = eeprom_busbase + eeprom_busshift + self.new_i2c_device('sff8436', 0x50, eeprom_bus) + + # _i2c_sfp_eeprom_init + for i in range(1, 3): + self.new_i2c_device('sff8436', 0x50, 28+i) + + # _i2c_fan_speed_init + os.system("echo 120 > /sys/class/hwmon/hwmon1/device/pwm2") + + # _util_port_led_clear + os.system("i2cset -m 0x04 -y -r 32 0x76 2 0x00") + os.system("sleep 1") + os.system("i2cset -m 0x04 -y -r 32 0x76 2 0xFF") + + # turn on sys led + os.system("i2cset -m 0x80 -y -r 10 0x76 2 0x80") + + return True + + diff --git a/sm/bigcode b/sm/bigcode index 081f26bb..05eb4b37 160000 --- a/sm/bigcode +++ b/sm/bigcode @@ -1 +1 @@ -Subproject commit 081f26bb5be40d51a8551d35395f06be137349cb +Subproject commit 05eb4b37f288e4b4a6119bea666bb77582eafbb9 diff --git a/tools/onl-init-pkgs.py b/tools/onl-init-pkgs.py new file mode 100755 index 00000000..a0a12fd6 --- /dev/null +++ b/tools/onl-init-pkgs.py @@ -0,0 +1,18 @@ +#!/usr/bin/python2 +############################################################ +# +# This script produces a YAML list on stdout of all +# packages necessary to support the given init system +# +import argparse + +ap = argparse.ArgumentParser(description='ONL Init Package Lister') +ap.add_argument('init', metavar='INIT-SYSTEM', choices=['sysvinit', 'systemd'], help='Init system to use') + +ops = ap.parse_args() + +if ops.init == 'sysvinit': + print '- sysvinit-core' +elif ops.init == 'systemd': + print '- systemd' + print '- systemd-sysv'